Commit cdcc2343fec00b3b364c0518ef2bb91587319407

Authored by James Smart
Committed by James Bottomley
1 parent 92e3af663a

[SCSI] lpfc 8.3.34: Fixed leaking memory from pci dma pool

Signed-off-by: James Smart <james.smart@emulex.com>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>

Showing 2 changed files with 17 additions and 10 deletions Inline Diff

drivers/scsi/lpfc/lpfc_hbadisc.c
1 /******************************************************************* 1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2012 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2012 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
8 * * 8 * *
9 * This program is free software; you can redistribute it and/or * 9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General * 10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. * 11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. * 12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING * 18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. * 19 * included with this package. *
20 *******************************************************************/ 20 *******************************************************************/
21 21
22 #include <linux/blkdev.h> 22 #include <linux/blkdev.h>
23 #include <linux/delay.h> 23 #include <linux/delay.h>
24 #include <linux/slab.h> 24 #include <linux/slab.h>
25 #include <linux/pci.h> 25 #include <linux/pci.h>
26 #include <linux/kthread.h> 26 #include <linux/kthread.h>
27 #include <linux/interrupt.h> 27 #include <linux/interrupt.h>
28 28
29 #include <scsi/scsi.h> 29 #include <scsi/scsi.h>
30 #include <scsi/scsi_device.h> 30 #include <scsi/scsi_device.h>
31 #include <scsi/scsi_host.h> 31 #include <scsi/scsi_host.h>
32 #include <scsi/scsi_transport_fc.h> 32 #include <scsi/scsi_transport_fc.h>
33 33
34 #include "lpfc_hw4.h" 34 #include "lpfc_hw4.h"
35 #include "lpfc_hw.h" 35 #include "lpfc_hw.h"
36 #include "lpfc_nl.h" 36 #include "lpfc_nl.h"
37 #include "lpfc_disc.h" 37 #include "lpfc_disc.h"
38 #include "lpfc_sli.h" 38 #include "lpfc_sli.h"
39 #include "lpfc_sli4.h" 39 #include "lpfc_sli4.h"
40 #include "lpfc_scsi.h" 40 #include "lpfc_scsi.h"
41 #include "lpfc.h" 41 #include "lpfc.h"
42 #include "lpfc_logmsg.h" 42 #include "lpfc_logmsg.h"
43 #include "lpfc_crtn.h" 43 #include "lpfc_crtn.h"
44 #include "lpfc_vport.h" 44 #include "lpfc_vport.h"
45 #include "lpfc_debugfs.h" 45 #include "lpfc_debugfs.h"
46 46
47 /* AlpaArray for assignment of scsid for scan-down and bind_method */ 47 /* AlpaArray for assignment of scsid for scan-down and bind_method */
48 static uint8_t lpfcAlpaArray[] = { 48 static uint8_t lpfcAlpaArray[] = {
49 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6, 49 0xEF, 0xE8, 0xE4, 0xE2, 0xE1, 0xE0, 0xDC, 0xDA, 0xD9, 0xD6,
50 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA, 50 0xD5, 0xD4, 0xD3, 0xD2, 0xD1, 0xCE, 0xCD, 0xCC, 0xCB, 0xCA,
51 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5, 51 0xC9, 0xC7, 0xC6, 0xC5, 0xC3, 0xBC, 0xBA, 0xB9, 0xB6, 0xB5,
52 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9, 52 0xB4, 0xB3, 0xB2, 0xB1, 0xAE, 0xAD, 0xAC, 0xAB, 0xAA, 0xA9,
53 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97, 53 0xA7, 0xA6, 0xA5, 0xA3, 0x9F, 0x9E, 0x9D, 0x9B, 0x98, 0x97,
54 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79, 54 0x90, 0x8F, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7C, 0x7A, 0x79,
55 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B, 55 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6E, 0x6D, 0x6C, 0x6B,
56 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56, 56 0x6A, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5C, 0x5A, 0x59, 0x56,
57 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A, 57 0x55, 0x54, 0x53, 0x52, 0x51, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A,
58 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35, 58 0x49, 0x47, 0x46, 0x45, 0x43, 0x3C, 0x3A, 0x39, 0x36, 0x35,
59 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 59 0x34, 0x33, 0x32, 0x31, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29,
60 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17, 60 0x27, 0x26, 0x25, 0x23, 0x1F, 0x1E, 0x1D, 0x1B, 0x18, 0x17,
61 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01 61 0x10, 0x0F, 0x08, 0x04, 0x02, 0x01
62 }; 62 };
63 63
64 static void lpfc_disc_timeout_handler(struct lpfc_vport *); 64 static void lpfc_disc_timeout_handler(struct lpfc_vport *);
65 static void lpfc_disc_flush_list(struct lpfc_vport *vport); 65 static void lpfc_disc_flush_list(struct lpfc_vport *vport);
66 static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); 66 static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
67 static int lpfc_fcf_inuse(struct lpfc_hba *); 67 static int lpfc_fcf_inuse(struct lpfc_hba *);
68 68
69 void 69 void
70 lpfc_terminate_rport_io(struct fc_rport *rport) 70 lpfc_terminate_rport_io(struct fc_rport *rport)
71 { 71 {
72 struct lpfc_rport_data *rdata; 72 struct lpfc_rport_data *rdata;
73 struct lpfc_nodelist * ndlp; 73 struct lpfc_nodelist * ndlp;
74 struct lpfc_hba *phba; 74 struct lpfc_hba *phba;
75 75
76 rdata = rport->dd_data; 76 rdata = rport->dd_data;
77 ndlp = rdata->pnode; 77 ndlp = rdata->pnode;
78 78
79 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { 79 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
80 if (rport->roles & FC_RPORT_ROLE_FCP_TARGET) 80 if (rport->roles & FC_RPORT_ROLE_FCP_TARGET)
81 printk(KERN_ERR "Cannot find remote node" 81 printk(KERN_ERR "Cannot find remote node"
82 " to terminate I/O Data x%x\n", 82 " to terminate I/O Data x%x\n",
83 rport->port_id); 83 rport->port_id);
84 return; 84 return;
85 } 85 }
86 86
87 phba = ndlp->phba; 87 phba = ndlp->phba;
88 88
89 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT, 89 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
90 "rport terminate: sid:x%x did:x%x flg:x%x", 90 "rport terminate: sid:x%x did:x%x flg:x%x",
91 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag); 91 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
92 92
93 if (ndlp->nlp_sid != NLP_NO_SID) { 93 if (ndlp->nlp_sid != NLP_NO_SID) {
94 lpfc_sli_abort_iocb(ndlp->vport, 94 lpfc_sli_abort_iocb(ndlp->vport,
95 &phba->sli.ring[phba->sli.fcp_ring], 95 &phba->sli.ring[phba->sli.fcp_ring],
96 ndlp->nlp_sid, 0, LPFC_CTX_TGT); 96 ndlp->nlp_sid, 0, LPFC_CTX_TGT);
97 } 97 }
98 } 98 }
99 99
100 /* 100 /*
101 * This function will be called when dev_loss_tmo fire. 101 * This function will be called when dev_loss_tmo fire.
102 */ 102 */
103 void 103 void
104 lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) 104 lpfc_dev_loss_tmo_callbk(struct fc_rport *rport)
105 { 105 {
106 struct lpfc_rport_data *rdata; 106 struct lpfc_rport_data *rdata;
107 struct lpfc_nodelist * ndlp; 107 struct lpfc_nodelist * ndlp;
108 struct lpfc_vport *vport; 108 struct lpfc_vport *vport;
109 struct lpfc_hba *phba; 109 struct lpfc_hba *phba;
110 struct lpfc_work_evt *evtp; 110 struct lpfc_work_evt *evtp;
111 int put_node; 111 int put_node;
112 int put_rport; 112 int put_rport;
113 113
114 rdata = rport->dd_data; 114 rdata = rport->dd_data;
115 ndlp = rdata->pnode; 115 ndlp = rdata->pnode;
116 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) 116 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
117 return; 117 return;
118 118
119 vport = ndlp->vport; 119 vport = ndlp->vport;
120 phba = vport->phba; 120 phba = vport->phba;
121 121
122 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, 122 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
123 "rport devlosscb: sid:x%x did:x%x flg:x%x", 123 "rport devlosscb: sid:x%x did:x%x flg:x%x",
124 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag); 124 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
125 125
126 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 126 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
127 "3181 dev_loss_callbk x%06x, rport %p flg x%x\n", 127 "3181 dev_loss_callbk x%06x, rport %p flg x%x\n",
128 ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag); 128 ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
129 129
130 /* Don't defer this if we are in the process of deleting the vport 130 /* Don't defer this if we are in the process of deleting the vport
131 * or unloading the driver. The unload will cleanup the node 131 * or unloading the driver. The unload will cleanup the node
132 * appropriately we just need to cleanup the ndlp rport info here. 132 * appropriately we just need to cleanup the ndlp rport info here.
133 */ 133 */
134 if (vport->load_flag & FC_UNLOADING) { 134 if (vport->load_flag & FC_UNLOADING) {
135 put_node = rdata->pnode != NULL; 135 put_node = rdata->pnode != NULL;
136 put_rport = ndlp->rport != NULL; 136 put_rport = ndlp->rport != NULL;
137 rdata->pnode = NULL; 137 rdata->pnode = NULL;
138 ndlp->rport = NULL; 138 ndlp->rport = NULL;
139 if (put_node) 139 if (put_node)
140 lpfc_nlp_put(ndlp); 140 lpfc_nlp_put(ndlp);
141 if (put_rport) 141 if (put_rport)
142 put_device(&rport->dev); 142 put_device(&rport->dev);
143 return; 143 return;
144 } 144 }
145 145
146 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) 146 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
147 return; 147 return;
148 148
149 if (ndlp->nlp_type & NLP_FABRIC) { 149 if (ndlp->nlp_type & NLP_FABRIC) {
150 150
151 /* If the WWPN of the rport and ndlp don't match, ignore it */ 151 /* If the WWPN of the rport and ndlp don't match, ignore it */
152 if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn)) { 152 if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn)) {
153 put_device(&rport->dev); 153 put_device(&rport->dev);
154 return; 154 return;
155 } 155 }
156 } 156 }
157 157
158 evtp = &ndlp->dev_loss_evt; 158 evtp = &ndlp->dev_loss_evt;
159 159
160 if (!list_empty(&evtp->evt_listp)) 160 if (!list_empty(&evtp->evt_listp))
161 return; 161 return;
162 162
163 spin_lock_irq(&phba->hbalock); 163 spin_lock_irq(&phba->hbalock);
164 /* We need to hold the node by incrementing the reference 164 /* We need to hold the node by incrementing the reference
165 * count until this queued work is done 165 * count until this queued work is done
166 */ 166 */
167 evtp->evt_arg1 = lpfc_nlp_get(ndlp); 167 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
168 if (evtp->evt_arg1) { 168 if (evtp->evt_arg1) {
169 evtp->evt = LPFC_EVT_DEV_LOSS; 169 evtp->evt = LPFC_EVT_DEV_LOSS;
170 list_add_tail(&evtp->evt_listp, &phba->work_list); 170 list_add_tail(&evtp->evt_listp, &phba->work_list);
171 lpfc_worker_wake_up(phba); 171 lpfc_worker_wake_up(phba);
172 } 172 }
173 spin_unlock_irq(&phba->hbalock); 173 spin_unlock_irq(&phba->hbalock);
174 174
175 return; 175 return;
176 } 176 }
177 177
178 /** 178 /**
179 * lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler 179 * lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler
180 * @ndlp: Pointer to remote node object. 180 * @ndlp: Pointer to remote node object.
181 * 181 *
182 * This function is called from the worker thread when devloss timeout timer 182 * This function is called from the worker thread when devloss timeout timer
183 * expires. For SLI4 host, this routine shall return 1 when at lease one 183 * expires. For SLI4 host, this routine shall return 1 when at lease one
184 * remote node, including this @ndlp, is still in use of FCF; otherwise, this 184 * remote node, including this @ndlp, is still in use of FCF; otherwise, this
185 * routine shall return 0 when there is no remote node is still in use of FCF 185 * routine shall return 0 when there is no remote node is still in use of FCF
186 * when devloss timeout happened to this @ndlp. 186 * when devloss timeout happened to this @ndlp.
187 **/ 187 **/
188 static int 188 static int
189 lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) 189 lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
190 { 190 {
191 struct lpfc_rport_data *rdata; 191 struct lpfc_rport_data *rdata;
192 struct fc_rport *rport; 192 struct fc_rport *rport;
193 struct lpfc_vport *vport; 193 struct lpfc_vport *vport;
194 struct lpfc_hba *phba; 194 struct lpfc_hba *phba;
195 uint8_t *name; 195 uint8_t *name;
196 int put_node; 196 int put_node;
197 int put_rport; 197 int put_rport;
198 int warn_on = 0; 198 int warn_on = 0;
199 int fcf_inuse = 0; 199 int fcf_inuse = 0;
200 200
201 rport = ndlp->rport; 201 rport = ndlp->rport;
202 202
203 if (!rport) 203 if (!rport)
204 return fcf_inuse; 204 return fcf_inuse;
205 205
206 rdata = rport->dd_data; 206 rdata = rport->dd_data;
207 name = (uint8_t *) &ndlp->nlp_portname; 207 name = (uint8_t *) &ndlp->nlp_portname;
208 vport = ndlp->vport; 208 vport = ndlp->vport;
209 phba = vport->phba; 209 phba = vport->phba;
210 210
211 if (phba->sli_rev == LPFC_SLI_REV4) 211 if (phba->sli_rev == LPFC_SLI_REV4)
212 fcf_inuse = lpfc_fcf_inuse(phba); 212 fcf_inuse = lpfc_fcf_inuse(phba);
213 213
214 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, 214 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
215 "rport devlosstmo:did:x%x type:x%x id:x%x", 215 "rport devlosstmo:did:x%x type:x%x id:x%x",
216 ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id); 216 ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
217 217
218 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 218 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
219 "3182 dev_loss_tmo_handler x%06x, rport %p flg x%x\n", 219 "3182 dev_loss_tmo_handler x%06x, rport %p flg x%x\n",
220 ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag); 220 ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag);
221 221
222 /* Don't defer this if we are in the process of deleting the vport 222 /* Don't defer this if we are in the process of deleting the vport
223 * or unloading the driver. The unload will cleanup the node 223 * or unloading the driver. The unload will cleanup the node
224 * appropriately we just need to cleanup the ndlp rport info here. 224 * appropriately we just need to cleanup the ndlp rport info here.
225 */ 225 */
226 if (vport->load_flag & FC_UNLOADING) { 226 if (vport->load_flag & FC_UNLOADING) {
227 if (ndlp->nlp_sid != NLP_NO_SID) { 227 if (ndlp->nlp_sid != NLP_NO_SID) {
228 /* flush the target */ 228 /* flush the target */
229 lpfc_sli_abort_iocb(vport, 229 lpfc_sli_abort_iocb(vport,
230 &phba->sli.ring[phba->sli.fcp_ring], 230 &phba->sli.ring[phba->sli.fcp_ring],
231 ndlp->nlp_sid, 0, LPFC_CTX_TGT); 231 ndlp->nlp_sid, 0, LPFC_CTX_TGT);
232 } 232 }
233 put_node = rdata->pnode != NULL; 233 put_node = rdata->pnode != NULL;
234 put_rport = ndlp->rport != NULL; 234 put_rport = ndlp->rport != NULL;
235 rdata->pnode = NULL; 235 rdata->pnode = NULL;
236 ndlp->rport = NULL; 236 ndlp->rport = NULL;
237 if (put_node) 237 if (put_node)
238 lpfc_nlp_put(ndlp); 238 lpfc_nlp_put(ndlp);
239 if (put_rport) 239 if (put_rport)
240 put_device(&rport->dev); 240 put_device(&rport->dev);
241 return fcf_inuse; 241 return fcf_inuse;
242 } 242 }
243 243
244 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) { 244 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
245 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 245 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
246 "0284 Devloss timeout Ignored on " 246 "0284 Devloss timeout Ignored on "
247 "WWPN %x:%x:%x:%x:%x:%x:%x:%x " 247 "WWPN %x:%x:%x:%x:%x:%x:%x:%x "
248 "NPort x%x\n", 248 "NPort x%x\n",
249 *name, *(name+1), *(name+2), *(name+3), 249 *name, *(name+1), *(name+2), *(name+3),
250 *(name+4), *(name+5), *(name+6), *(name+7), 250 *(name+4), *(name+5), *(name+6), *(name+7),
251 ndlp->nlp_DID); 251 ndlp->nlp_DID);
252 return fcf_inuse; 252 return fcf_inuse;
253 } 253 }
254 254
255 if (ndlp->nlp_type & NLP_FABRIC) { 255 if (ndlp->nlp_type & NLP_FABRIC) {
256 /* We will clean up these Nodes in linkup */ 256 /* We will clean up these Nodes in linkup */
257 put_node = rdata->pnode != NULL; 257 put_node = rdata->pnode != NULL;
258 put_rport = ndlp->rport != NULL; 258 put_rport = ndlp->rport != NULL;
259 rdata->pnode = NULL; 259 rdata->pnode = NULL;
260 ndlp->rport = NULL; 260 ndlp->rport = NULL;
261 if (put_node) 261 if (put_node)
262 lpfc_nlp_put(ndlp); 262 lpfc_nlp_put(ndlp);
263 if (put_rport) 263 if (put_rport)
264 put_device(&rport->dev); 264 put_device(&rport->dev);
265 return fcf_inuse; 265 return fcf_inuse;
266 } 266 }
267 267
268 if (ndlp->nlp_sid != NLP_NO_SID) { 268 if (ndlp->nlp_sid != NLP_NO_SID) {
269 warn_on = 1; 269 warn_on = 1;
270 /* flush the target */ 270 /* flush the target */
271 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring], 271 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
272 ndlp->nlp_sid, 0, LPFC_CTX_TGT); 272 ndlp->nlp_sid, 0, LPFC_CTX_TGT);
273 } 273 }
274 274
275 if (warn_on) { 275 if (warn_on) {
276 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 276 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
277 "0203 Devloss timeout on " 277 "0203 Devloss timeout on "
278 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " 278 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
279 "NPort x%06x Data: x%x x%x x%x\n", 279 "NPort x%06x Data: x%x x%x x%x\n",
280 *name, *(name+1), *(name+2), *(name+3), 280 *name, *(name+1), *(name+2), *(name+3),
281 *(name+4), *(name+5), *(name+6), *(name+7), 281 *(name+4), *(name+5), *(name+6), *(name+7),
282 ndlp->nlp_DID, ndlp->nlp_flag, 282 ndlp->nlp_DID, ndlp->nlp_flag,
283 ndlp->nlp_state, ndlp->nlp_rpi); 283 ndlp->nlp_state, ndlp->nlp_rpi);
284 } else { 284 } else {
285 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 285 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
286 "0204 Devloss timeout on " 286 "0204 Devloss timeout on "
287 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " 287 "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
288 "NPort x%06x Data: x%x x%x x%x\n", 288 "NPort x%06x Data: x%x x%x x%x\n",
289 *name, *(name+1), *(name+2), *(name+3), 289 *name, *(name+1), *(name+2), *(name+3),
290 *(name+4), *(name+5), *(name+6), *(name+7), 290 *(name+4), *(name+5), *(name+6), *(name+7),
291 ndlp->nlp_DID, ndlp->nlp_flag, 291 ndlp->nlp_DID, ndlp->nlp_flag,
292 ndlp->nlp_state, ndlp->nlp_rpi); 292 ndlp->nlp_state, ndlp->nlp_rpi);
293 } 293 }
294 294
295 put_node = rdata->pnode != NULL; 295 put_node = rdata->pnode != NULL;
296 put_rport = ndlp->rport != NULL; 296 put_rport = ndlp->rport != NULL;
297 rdata->pnode = NULL; 297 rdata->pnode = NULL;
298 ndlp->rport = NULL; 298 ndlp->rport = NULL;
299 if (put_node) 299 if (put_node)
300 lpfc_nlp_put(ndlp); 300 lpfc_nlp_put(ndlp);
301 if (put_rport) 301 if (put_rport)
302 put_device(&rport->dev); 302 put_device(&rport->dev);
303 303
304 if (!(vport->load_flag & FC_UNLOADING) && 304 if (!(vport->load_flag & FC_UNLOADING) &&
305 !(ndlp->nlp_flag & NLP_DELAY_TMO) && 305 !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
306 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) && 306 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
307 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) && 307 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
308 (ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) && 308 (ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) &&
309 (ndlp->nlp_state != NLP_STE_PRLI_ISSUE)) 309 (ndlp->nlp_state != NLP_STE_PRLI_ISSUE))
310 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); 310 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
311 311
312 return fcf_inuse; 312 return fcf_inuse;
313 } 313 }
314 314
315 /** 315 /**
316 * lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler 316 * lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler
317 * @phba: Pointer to hba context object. 317 * @phba: Pointer to hba context object.
318 * @fcf_inuse: SLI4 FCF in-use state reported from devloss timeout handler. 318 * @fcf_inuse: SLI4 FCF in-use state reported from devloss timeout handler.
319 * @nlp_did: remote node identifer with devloss timeout. 319 * @nlp_did: remote node identifer with devloss timeout.
320 * 320 *
321 * This function is called from the worker thread after invoking devloss 321 * This function is called from the worker thread after invoking devloss
322 * timeout handler and releasing the reference count for the ndlp with 322 * timeout handler and releasing the reference count for the ndlp with
323 * which the devloss timeout was handled for SLI4 host. For the devloss 323 * which the devloss timeout was handled for SLI4 host. For the devloss
324 * timeout of the last remote node which had been in use of FCF, when this 324 * timeout of the last remote node which had been in use of FCF, when this
325 * routine is invoked, it shall be guaranteed that none of the remote are 325 * routine is invoked, it shall be guaranteed that none of the remote are
326 * in-use of FCF. When devloss timeout to the last remote using the FCF, 326 * in-use of FCF. When devloss timeout to the last remote using the FCF,
327 * if the FIP engine is neither in FCF table scan process nor roundrobin 327 * if the FIP engine is neither in FCF table scan process nor roundrobin
328 * failover process, the in-use FCF shall be unregistered. If the FIP 328 * failover process, the in-use FCF shall be unregistered. If the FIP
329 * engine is in FCF discovery process, the devloss timeout state shall 329 * engine is in FCF discovery process, the devloss timeout state shall
330 * be set for either the FCF table scan process or roundrobin failover 330 * be set for either the FCF table scan process or roundrobin failover
331 * process to unregister the in-use FCF. 331 * process to unregister the in-use FCF.
332 **/ 332 **/
333 static void 333 static void
334 lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse, 334 lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse,
335 uint32_t nlp_did) 335 uint32_t nlp_did)
336 { 336 {
337 /* If devloss timeout happened to a remote node when FCF had no 337 /* If devloss timeout happened to a remote node when FCF had no
338 * longer been in-use, do nothing. 338 * longer been in-use, do nothing.
339 */ 339 */
340 if (!fcf_inuse) 340 if (!fcf_inuse)
341 return; 341 return;
342 342
343 if ((phba->hba_flag & HBA_FIP_SUPPORT) && !lpfc_fcf_inuse(phba)) { 343 if ((phba->hba_flag & HBA_FIP_SUPPORT) && !lpfc_fcf_inuse(phba)) {
344 spin_lock_irq(&phba->hbalock); 344 spin_lock_irq(&phba->hbalock);
345 if (phba->fcf.fcf_flag & FCF_DISCOVERY) { 345 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
346 if (phba->hba_flag & HBA_DEVLOSS_TMO) { 346 if (phba->hba_flag & HBA_DEVLOSS_TMO) {
347 spin_unlock_irq(&phba->hbalock); 347 spin_unlock_irq(&phba->hbalock);
348 return; 348 return;
349 } 349 }
350 phba->hba_flag |= HBA_DEVLOSS_TMO; 350 phba->hba_flag |= HBA_DEVLOSS_TMO;
351 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 351 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
352 "2847 Last remote node (x%x) using " 352 "2847 Last remote node (x%x) using "
353 "FCF devloss tmo\n", nlp_did); 353 "FCF devloss tmo\n", nlp_did);
354 } 354 }
355 if (phba->fcf.fcf_flag & FCF_REDISC_PROG) { 355 if (phba->fcf.fcf_flag & FCF_REDISC_PROG) {
356 spin_unlock_irq(&phba->hbalock); 356 spin_unlock_irq(&phba->hbalock);
357 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 357 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
358 "2868 Devloss tmo to FCF rediscovery " 358 "2868 Devloss tmo to FCF rediscovery "
359 "in progress\n"); 359 "in progress\n");
360 return; 360 return;
361 } 361 }
362 if (!(phba->hba_flag & (FCF_TS_INPROG | FCF_RR_INPROG))) { 362 if (!(phba->hba_flag & (FCF_TS_INPROG | FCF_RR_INPROG))) {
363 spin_unlock_irq(&phba->hbalock); 363 spin_unlock_irq(&phba->hbalock);
364 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 364 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
365 "2869 Devloss tmo to idle FIP engine, " 365 "2869 Devloss tmo to idle FIP engine, "
366 "unreg in-use FCF and rescan.\n"); 366 "unreg in-use FCF and rescan.\n");
367 /* Unregister in-use FCF and rescan */ 367 /* Unregister in-use FCF and rescan */
368 lpfc_unregister_fcf_rescan(phba); 368 lpfc_unregister_fcf_rescan(phba);
369 return; 369 return;
370 } 370 }
371 spin_unlock_irq(&phba->hbalock); 371 spin_unlock_irq(&phba->hbalock);
372 if (phba->hba_flag & FCF_TS_INPROG) 372 if (phba->hba_flag & FCF_TS_INPROG)
373 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 373 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
374 "2870 FCF table scan in progress\n"); 374 "2870 FCF table scan in progress\n");
375 if (phba->hba_flag & FCF_RR_INPROG) 375 if (phba->hba_flag & FCF_RR_INPROG)
376 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 376 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
377 "2871 FLOGI roundrobin FCF failover " 377 "2871 FLOGI roundrobin FCF failover "
378 "in progress\n"); 378 "in progress\n");
379 } 379 }
380 lpfc_unregister_unused_fcf(phba); 380 lpfc_unregister_unused_fcf(phba);
381 } 381 }
382 382
383 /** 383 /**
384 * lpfc_alloc_fast_evt - Allocates data structure for posting event 384 * lpfc_alloc_fast_evt - Allocates data structure for posting event
385 * @phba: Pointer to hba context object. 385 * @phba: Pointer to hba context object.
386 * 386 *
387 * This function is called from the functions which need to post 387 * This function is called from the functions which need to post
388 * events from interrupt context. This function allocates data 388 * events from interrupt context. This function allocates data
389 * structure required for posting event. It also keeps track of 389 * structure required for posting event. It also keeps track of
390 * number of events pending and prevent event storm when there are 390 * number of events pending and prevent event storm when there are
391 * too many events. 391 * too many events.
392 **/ 392 **/
393 struct lpfc_fast_path_event * 393 struct lpfc_fast_path_event *
394 lpfc_alloc_fast_evt(struct lpfc_hba *phba) { 394 lpfc_alloc_fast_evt(struct lpfc_hba *phba) {
395 struct lpfc_fast_path_event *ret; 395 struct lpfc_fast_path_event *ret;
396 396
397 /* If there are lot of fast event do not exhaust memory due to this */ 397 /* If there are lot of fast event do not exhaust memory due to this */
398 if (atomic_read(&phba->fast_event_count) > LPFC_MAX_EVT_COUNT) 398 if (atomic_read(&phba->fast_event_count) > LPFC_MAX_EVT_COUNT)
399 return NULL; 399 return NULL;
400 400
401 ret = kzalloc(sizeof(struct lpfc_fast_path_event), 401 ret = kzalloc(sizeof(struct lpfc_fast_path_event),
402 GFP_ATOMIC); 402 GFP_ATOMIC);
403 if (ret) { 403 if (ret) {
404 atomic_inc(&phba->fast_event_count); 404 atomic_inc(&phba->fast_event_count);
405 INIT_LIST_HEAD(&ret->work_evt.evt_listp); 405 INIT_LIST_HEAD(&ret->work_evt.evt_listp);
406 ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT; 406 ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
407 } 407 }
408 return ret; 408 return ret;
409 } 409 }
410 410
411 /** 411 /**
412 * lpfc_free_fast_evt - Frees event data structure 412 * lpfc_free_fast_evt - Frees event data structure
413 * @phba: Pointer to hba context object. 413 * @phba: Pointer to hba context object.
414 * @evt: Event object which need to be freed. 414 * @evt: Event object which need to be freed.
415 * 415 *
416 * This function frees the data structure required for posting 416 * This function frees the data structure required for posting
417 * events. 417 * events.
418 **/ 418 **/
419 void 419 void
420 lpfc_free_fast_evt(struct lpfc_hba *phba, 420 lpfc_free_fast_evt(struct lpfc_hba *phba,
421 struct lpfc_fast_path_event *evt) { 421 struct lpfc_fast_path_event *evt) {
422 422
423 atomic_dec(&phba->fast_event_count); 423 atomic_dec(&phba->fast_event_count);
424 kfree(evt); 424 kfree(evt);
425 } 425 }
426 426
427 /** 427 /**
428 * lpfc_send_fastpath_evt - Posts events generated from fast path 428 * lpfc_send_fastpath_evt - Posts events generated from fast path
429 * @phba: Pointer to hba context object. 429 * @phba: Pointer to hba context object.
430 * @evtp: Event data structure. 430 * @evtp: Event data structure.
431 * 431 *
432 * This function is called from worker thread, when the interrupt 432 * This function is called from worker thread, when the interrupt
433 * context need to post an event. This function posts the event 433 * context need to post an event. This function posts the event
434 * to fc transport netlink interface. 434 * to fc transport netlink interface.
435 **/ 435 **/
436 static void 436 static void
437 lpfc_send_fastpath_evt(struct lpfc_hba *phba, 437 lpfc_send_fastpath_evt(struct lpfc_hba *phba,
438 struct lpfc_work_evt *evtp) 438 struct lpfc_work_evt *evtp)
439 { 439 {
440 unsigned long evt_category, evt_sub_category; 440 unsigned long evt_category, evt_sub_category;
441 struct lpfc_fast_path_event *fast_evt_data; 441 struct lpfc_fast_path_event *fast_evt_data;
442 char *evt_data; 442 char *evt_data;
443 uint32_t evt_data_size; 443 uint32_t evt_data_size;
444 struct Scsi_Host *shost; 444 struct Scsi_Host *shost;
445 445
446 fast_evt_data = container_of(evtp, struct lpfc_fast_path_event, 446 fast_evt_data = container_of(evtp, struct lpfc_fast_path_event,
447 work_evt); 447 work_evt);
448 448
449 evt_category = (unsigned long) fast_evt_data->un.fabric_evt.event_type; 449 evt_category = (unsigned long) fast_evt_data->un.fabric_evt.event_type;
450 evt_sub_category = (unsigned long) fast_evt_data->un. 450 evt_sub_category = (unsigned long) fast_evt_data->un.
451 fabric_evt.subcategory; 451 fabric_evt.subcategory;
452 shost = lpfc_shost_from_vport(fast_evt_data->vport); 452 shost = lpfc_shost_from_vport(fast_evt_data->vport);
453 if (evt_category == FC_REG_FABRIC_EVENT) { 453 if (evt_category == FC_REG_FABRIC_EVENT) {
454 if (evt_sub_category == LPFC_EVENT_FCPRDCHKERR) { 454 if (evt_sub_category == LPFC_EVENT_FCPRDCHKERR) {
455 evt_data = (char *) &fast_evt_data->un.read_check_error; 455 evt_data = (char *) &fast_evt_data->un.read_check_error;
456 evt_data_size = sizeof(fast_evt_data->un. 456 evt_data_size = sizeof(fast_evt_data->un.
457 read_check_error); 457 read_check_error);
458 } else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) || 458 } else if ((evt_sub_category == LPFC_EVENT_FABRIC_BUSY) ||
459 (evt_sub_category == LPFC_EVENT_PORT_BUSY)) { 459 (evt_sub_category == LPFC_EVENT_PORT_BUSY)) {
460 evt_data = (char *) &fast_evt_data->un.fabric_evt; 460 evt_data = (char *) &fast_evt_data->un.fabric_evt;
461 evt_data_size = sizeof(fast_evt_data->un.fabric_evt); 461 evt_data_size = sizeof(fast_evt_data->un.fabric_evt);
462 } else { 462 } else {
463 lpfc_free_fast_evt(phba, fast_evt_data); 463 lpfc_free_fast_evt(phba, fast_evt_data);
464 return; 464 return;
465 } 465 }
466 } else if (evt_category == FC_REG_SCSI_EVENT) { 466 } else if (evt_category == FC_REG_SCSI_EVENT) {
467 switch (evt_sub_category) { 467 switch (evt_sub_category) {
468 case LPFC_EVENT_QFULL: 468 case LPFC_EVENT_QFULL:
469 case LPFC_EVENT_DEVBSY: 469 case LPFC_EVENT_DEVBSY:
470 evt_data = (char *) &fast_evt_data->un.scsi_evt; 470 evt_data = (char *) &fast_evt_data->un.scsi_evt;
471 evt_data_size = sizeof(fast_evt_data->un.scsi_evt); 471 evt_data_size = sizeof(fast_evt_data->un.scsi_evt);
472 break; 472 break;
473 case LPFC_EVENT_CHECK_COND: 473 case LPFC_EVENT_CHECK_COND:
474 evt_data = (char *) &fast_evt_data->un.check_cond_evt; 474 evt_data = (char *) &fast_evt_data->un.check_cond_evt;
475 evt_data_size = sizeof(fast_evt_data->un. 475 evt_data_size = sizeof(fast_evt_data->un.
476 check_cond_evt); 476 check_cond_evt);
477 break; 477 break;
478 case LPFC_EVENT_VARQUEDEPTH: 478 case LPFC_EVENT_VARQUEDEPTH:
479 evt_data = (char *) &fast_evt_data->un.queue_depth_evt; 479 evt_data = (char *) &fast_evt_data->un.queue_depth_evt;
480 evt_data_size = sizeof(fast_evt_data->un. 480 evt_data_size = sizeof(fast_evt_data->un.
481 queue_depth_evt); 481 queue_depth_evt);
482 break; 482 break;
483 default: 483 default:
484 lpfc_free_fast_evt(phba, fast_evt_data); 484 lpfc_free_fast_evt(phba, fast_evt_data);
485 return; 485 return;
486 } 486 }
487 } else { 487 } else {
488 lpfc_free_fast_evt(phba, fast_evt_data); 488 lpfc_free_fast_evt(phba, fast_evt_data);
489 return; 489 return;
490 } 490 }
491 491
492 fc_host_post_vendor_event(shost, 492 fc_host_post_vendor_event(shost,
493 fc_get_event_number(), 493 fc_get_event_number(),
494 evt_data_size, 494 evt_data_size,
495 evt_data, 495 evt_data,
496 LPFC_NL_VENDOR_ID); 496 LPFC_NL_VENDOR_ID);
497 497
498 lpfc_free_fast_evt(phba, fast_evt_data); 498 lpfc_free_fast_evt(phba, fast_evt_data);
499 return; 499 return;
500 } 500 }
501 501
502 static void 502 static void
503 lpfc_work_list_done(struct lpfc_hba *phba) 503 lpfc_work_list_done(struct lpfc_hba *phba)
504 { 504 {
505 struct lpfc_work_evt *evtp = NULL; 505 struct lpfc_work_evt *evtp = NULL;
506 struct lpfc_nodelist *ndlp; 506 struct lpfc_nodelist *ndlp;
507 int free_evt; 507 int free_evt;
508 int fcf_inuse; 508 int fcf_inuse;
509 uint32_t nlp_did; 509 uint32_t nlp_did;
510 510
511 spin_lock_irq(&phba->hbalock); 511 spin_lock_irq(&phba->hbalock);
512 while (!list_empty(&phba->work_list)) { 512 while (!list_empty(&phba->work_list)) {
513 list_remove_head((&phba->work_list), evtp, typeof(*evtp), 513 list_remove_head((&phba->work_list), evtp, typeof(*evtp),
514 evt_listp); 514 evt_listp);
515 spin_unlock_irq(&phba->hbalock); 515 spin_unlock_irq(&phba->hbalock);
516 free_evt = 1; 516 free_evt = 1;
517 switch (evtp->evt) { 517 switch (evtp->evt) {
518 case LPFC_EVT_ELS_RETRY: 518 case LPFC_EVT_ELS_RETRY:
519 ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1); 519 ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
520 lpfc_els_retry_delay_handler(ndlp); 520 lpfc_els_retry_delay_handler(ndlp);
521 free_evt = 0; /* evt is part of ndlp */ 521 free_evt = 0; /* evt is part of ndlp */
522 /* decrement the node reference count held 522 /* decrement the node reference count held
523 * for this queued work 523 * for this queued work
524 */ 524 */
525 lpfc_nlp_put(ndlp); 525 lpfc_nlp_put(ndlp);
526 break; 526 break;
527 case LPFC_EVT_DEV_LOSS: 527 case LPFC_EVT_DEV_LOSS:
528 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1); 528 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
529 fcf_inuse = lpfc_dev_loss_tmo_handler(ndlp); 529 fcf_inuse = lpfc_dev_loss_tmo_handler(ndlp);
530 free_evt = 0; 530 free_evt = 0;
531 /* decrement the node reference count held for 531 /* decrement the node reference count held for
532 * this queued work 532 * this queued work
533 */ 533 */
534 nlp_did = ndlp->nlp_DID; 534 nlp_did = ndlp->nlp_DID;
535 lpfc_nlp_put(ndlp); 535 lpfc_nlp_put(ndlp);
536 if (phba->sli_rev == LPFC_SLI_REV4) 536 if (phba->sli_rev == LPFC_SLI_REV4)
537 lpfc_sli4_post_dev_loss_tmo_handler(phba, 537 lpfc_sli4_post_dev_loss_tmo_handler(phba,
538 fcf_inuse, 538 fcf_inuse,
539 nlp_did); 539 nlp_did);
540 break; 540 break;
541 case LPFC_EVT_ONLINE: 541 case LPFC_EVT_ONLINE:
542 if (phba->link_state < LPFC_LINK_DOWN) 542 if (phba->link_state < LPFC_LINK_DOWN)
543 *(int *) (evtp->evt_arg1) = lpfc_online(phba); 543 *(int *) (evtp->evt_arg1) = lpfc_online(phba);
544 else 544 else
545 *(int *) (evtp->evt_arg1) = 0; 545 *(int *) (evtp->evt_arg1) = 0;
546 complete((struct completion *)(evtp->evt_arg2)); 546 complete((struct completion *)(evtp->evt_arg2));
547 break; 547 break;
548 case LPFC_EVT_OFFLINE_PREP: 548 case LPFC_EVT_OFFLINE_PREP:
549 if (phba->link_state >= LPFC_LINK_DOWN) 549 if (phba->link_state >= LPFC_LINK_DOWN)
550 lpfc_offline_prep(phba, LPFC_MBX_WAIT); 550 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
551 *(int *)(evtp->evt_arg1) = 0; 551 *(int *)(evtp->evt_arg1) = 0;
552 complete((struct completion *)(evtp->evt_arg2)); 552 complete((struct completion *)(evtp->evt_arg2));
553 break; 553 break;
554 case LPFC_EVT_OFFLINE: 554 case LPFC_EVT_OFFLINE:
555 lpfc_offline(phba); 555 lpfc_offline(phba);
556 lpfc_sli_brdrestart(phba); 556 lpfc_sli_brdrestart(phba);
557 *(int *)(evtp->evt_arg1) = 557 *(int *)(evtp->evt_arg1) =
558 lpfc_sli_brdready(phba, HS_FFRDY | HS_MBRDY); 558 lpfc_sli_brdready(phba, HS_FFRDY | HS_MBRDY);
559 lpfc_unblock_mgmt_io(phba); 559 lpfc_unblock_mgmt_io(phba);
560 complete((struct completion *)(evtp->evt_arg2)); 560 complete((struct completion *)(evtp->evt_arg2));
561 break; 561 break;
562 case LPFC_EVT_WARM_START: 562 case LPFC_EVT_WARM_START:
563 lpfc_offline(phba); 563 lpfc_offline(phba);
564 lpfc_reset_barrier(phba); 564 lpfc_reset_barrier(phba);
565 lpfc_sli_brdreset(phba); 565 lpfc_sli_brdreset(phba);
566 lpfc_hba_down_post(phba); 566 lpfc_hba_down_post(phba);
567 *(int *)(evtp->evt_arg1) = 567 *(int *)(evtp->evt_arg1) =
568 lpfc_sli_brdready(phba, HS_MBRDY); 568 lpfc_sli_brdready(phba, HS_MBRDY);
569 lpfc_unblock_mgmt_io(phba); 569 lpfc_unblock_mgmt_io(phba);
570 complete((struct completion *)(evtp->evt_arg2)); 570 complete((struct completion *)(evtp->evt_arg2));
571 break; 571 break;
572 case LPFC_EVT_KILL: 572 case LPFC_EVT_KILL:
573 lpfc_offline(phba); 573 lpfc_offline(phba);
574 *(int *)(evtp->evt_arg1) 574 *(int *)(evtp->evt_arg1)
575 = (phba->pport->stopped) 575 = (phba->pport->stopped)
576 ? 0 : lpfc_sli_brdkill(phba); 576 ? 0 : lpfc_sli_brdkill(phba);
577 lpfc_unblock_mgmt_io(phba); 577 lpfc_unblock_mgmt_io(phba);
578 complete((struct completion *)(evtp->evt_arg2)); 578 complete((struct completion *)(evtp->evt_arg2));
579 break; 579 break;
580 case LPFC_EVT_FASTPATH_MGMT_EVT: 580 case LPFC_EVT_FASTPATH_MGMT_EVT:
581 lpfc_send_fastpath_evt(phba, evtp); 581 lpfc_send_fastpath_evt(phba, evtp);
582 free_evt = 0; 582 free_evt = 0;
583 break; 583 break;
584 case LPFC_EVT_RESET_HBA: 584 case LPFC_EVT_RESET_HBA:
585 if (!(phba->pport->load_flag & FC_UNLOADING)) 585 if (!(phba->pport->load_flag & FC_UNLOADING))
586 lpfc_reset_hba(phba); 586 lpfc_reset_hba(phba);
587 break; 587 break;
588 } 588 }
589 if (free_evt) 589 if (free_evt)
590 kfree(evtp); 590 kfree(evtp);
591 spin_lock_irq(&phba->hbalock); 591 spin_lock_irq(&phba->hbalock);
592 } 592 }
593 spin_unlock_irq(&phba->hbalock); 593 spin_unlock_irq(&phba->hbalock);
594 594
595 } 595 }
596 596
597 static void 597 static void
598 lpfc_work_done(struct lpfc_hba *phba) 598 lpfc_work_done(struct lpfc_hba *phba)
599 { 599 {
600 struct lpfc_sli_ring *pring; 600 struct lpfc_sli_ring *pring;
601 uint32_t ha_copy, status, control, work_port_events; 601 uint32_t ha_copy, status, control, work_port_events;
602 struct lpfc_vport **vports; 602 struct lpfc_vport **vports;
603 struct lpfc_vport *vport; 603 struct lpfc_vport *vport;
604 int i; 604 int i;
605 605
606 spin_lock_irq(&phba->hbalock); 606 spin_lock_irq(&phba->hbalock);
607 ha_copy = phba->work_ha; 607 ha_copy = phba->work_ha;
608 phba->work_ha = 0; 608 phba->work_ha = 0;
609 spin_unlock_irq(&phba->hbalock); 609 spin_unlock_irq(&phba->hbalock);
610 610
611 /* First, try to post the next mailbox command to SLI4 device */ 611 /* First, try to post the next mailbox command to SLI4 device */
612 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) 612 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
613 lpfc_sli4_post_async_mbox(phba); 613 lpfc_sli4_post_async_mbox(phba);
614 614
615 if (ha_copy & HA_ERATT) 615 if (ha_copy & HA_ERATT)
616 /* Handle the error attention event */ 616 /* Handle the error attention event */
617 lpfc_handle_eratt(phba); 617 lpfc_handle_eratt(phba);
618 618
619 if (ha_copy & HA_MBATT) 619 if (ha_copy & HA_MBATT)
620 lpfc_sli_handle_mb_event(phba); 620 lpfc_sli_handle_mb_event(phba);
621 621
622 if (ha_copy & HA_LATT) 622 if (ha_copy & HA_LATT)
623 lpfc_handle_latt(phba); 623 lpfc_handle_latt(phba);
624 624
625 /* Process SLI4 events */ 625 /* Process SLI4 events */
626 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) { 626 if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
627 if (phba->hba_flag & HBA_RRQ_ACTIVE) 627 if (phba->hba_flag & HBA_RRQ_ACTIVE)
628 lpfc_handle_rrq_active(phba); 628 lpfc_handle_rrq_active(phba);
629 if (phba->hba_flag & FCP_XRI_ABORT_EVENT) 629 if (phba->hba_flag & FCP_XRI_ABORT_EVENT)
630 lpfc_sli4_fcp_xri_abort_event_proc(phba); 630 lpfc_sli4_fcp_xri_abort_event_proc(phba);
631 if (phba->hba_flag & ELS_XRI_ABORT_EVENT) 631 if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
632 lpfc_sli4_els_xri_abort_event_proc(phba); 632 lpfc_sli4_els_xri_abort_event_proc(phba);
633 if (phba->hba_flag & ASYNC_EVENT) 633 if (phba->hba_flag & ASYNC_EVENT)
634 lpfc_sli4_async_event_proc(phba); 634 lpfc_sli4_async_event_proc(phba);
635 if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) { 635 if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) {
636 spin_lock_irq(&phba->hbalock); 636 spin_lock_irq(&phba->hbalock);
637 phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER; 637 phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER;
638 spin_unlock_irq(&phba->hbalock); 638 spin_unlock_irq(&phba->hbalock);
639 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); 639 lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
640 } 640 }
641 if (phba->fcf.fcf_flag & FCF_REDISC_EVT) 641 if (phba->fcf.fcf_flag & FCF_REDISC_EVT)
642 lpfc_sli4_fcf_redisc_event_proc(phba); 642 lpfc_sli4_fcf_redisc_event_proc(phba);
643 } 643 }
644 644
645 vports = lpfc_create_vport_work_array(phba); 645 vports = lpfc_create_vport_work_array(phba);
646 if (vports != NULL) 646 if (vports != NULL)
647 for (i = 0; i <= phba->max_vports; i++) { 647 for (i = 0; i <= phba->max_vports; i++) {
648 /* 648 /*
649 * We could have no vports in array if unloading, so if 649 * We could have no vports in array if unloading, so if
650 * this happens then just use the pport 650 * this happens then just use the pport
651 */ 651 */
652 if (vports[i] == NULL && i == 0) 652 if (vports[i] == NULL && i == 0)
653 vport = phba->pport; 653 vport = phba->pport;
654 else 654 else
655 vport = vports[i]; 655 vport = vports[i];
656 if (vport == NULL) 656 if (vport == NULL)
657 break; 657 break;
658 spin_lock_irq(&vport->work_port_lock); 658 spin_lock_irq(&vport->work_port_lock);
659 work_port_events = vport->work_port_events; 659 work_port_events = vport->work_port_events;
660 vport->work_port_events &= ~work_port_events; 660 vport->work_port_events &= ~work_port_events;
661 spin_unlock_irq(&vport->work_port_lock); 661 spin_unlock_irq(&vport->work_port_lock);
662 if (work_port_events & WORKER_DISC_TMO) 662 if (work_port_events & WORKER_DISC_TMO)
663 lpfc_disc_timeout_handler(vport); 663 lpfc_disc_timeout_handler(vport);
664 if (work_port_events & WORKER_ELS_TMO) 664 if (work_port_events & WORKER_ELS_TMO)
665 lpfc_els_timeout_handler(vport); 665 lpfc_els_timeout_handler(vport);
666 if (work_port_events & WORKER_HB_TMO) 666 if (work_port_events & WORKER_HB_TMO)
667 lpfc_hb_timeout_handler(phba); 667 lpfc_hb_timeout_handler(phba);
668 if (work_port_events & WORKER_MBOX_TMO) 668 if (work_port_events & WORKER_MBOX_TMO)
669 lpfc_mbox_timeout_handler(phba); 669 lpfc_mbox_timeout_handler(phba);
670 if (work_port_events & WORKER_FABRIC_BLOCK_TMO) 670 if (work_port_events & WORKER_FABRIC_BLOCK_TMO)
671 lpfc_unblock_fabric_iocbs(phba); 671 lpfc_unblock_fabric_iocbs(phba);
672 if (work_port_events & WORKER_FDMI_TMO) 672 if (work_port_events & WORKER_FDMI_TMO)
673 lpfc_fdmi_timeout_handler(vport); 673 lpfc_fdmi_timeout_handler(vport);
674 if (work_port_events & WORKER_RAMP_DOWN_QUEUE) 674 if (work_port_events & WORKER_RAMP_DOWN_QUEUE)
675 lpfc_ramp_down_queue_handler(phba); 675 lpfc_ramp_down_queue_handler(phba);
676 if (work_port_events & WORKER_RAMP_UP_QUEUE) 676 if (work_port_events & WORKER_RAMP_UP_QUEUE)
677 lpfc_ramp_up_queue_handler(phba); 677 lpfc_ramp_up_queue_handler(phba);
678 if (work_port_events & WORKER_DELAYED_DISC_TMO) 678 if (work_port_events & WORKER_DELAYED_DISC_TMO)
679 lpfc_delayed_disc_timeout_handler(vport); 679 lpfc_delayed_disc_timeout_handler(vport);
680 } 680 }
681 lpfc_destroy_vport_work_array(phba, vports); 681 lpfc_destroy_vport_work_array(phba, vports);
682 682
683 pring = &phba->sli.ring[LPFC_ELS_RING]; 683 pring = &phba->sli.ring[LPFC_ELS_RING];
684 status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING))); 684 status = (ha_copy & (HA_RXMASK << (4*LPFC_ELS_RING)));
685 status >>= (4*LPFC_ELS_RING); 685 status >>= (4*LPFC_ELS_RING);
686 if ((status & HA_RXMASK) || 686 if ((status & HA_RXMASK) ||
687 (pring->flag & LPFC_DEFERRED_RING_EVENT) || 687 (pring->flag & LPFC_DEFERRED_RING_EVENT) ||
688 (phba->hba_flag & HBA_SP_QUEUE_EVT)) { 688 (phba->hba_flag & HBA_SP_QUEUE_EVT)) {
689 if (pring->flag & LPFC_STOP_IOCB_EVENT) { 689 if (pring->flag & LPFC_STOP_IOCB_EVENT) {
690 pring->flag |= LPFC_DEFERRED_RING_EVENT; 690 pring->flag |= LPFC_DEFERRED_RING_EVENT;
691 /* Set the lpfc data pending flag */ 691 /* Set the lpfc data pending flag */
692 set_bit(LPFC_DATA_READY, &phba->data_flags); 692 set_bit(LPFC_DATA_READY, &phba->data_flags);
693 } else { 693 } else {
694 pring->flag &= ~LPFC_DEFERRED_RING_EVENT; 694 pring->flag &= ~LPFC_DEFERRED_RING_EVENT;
695 lpfc_sli_handle_slow_ring_event(phba, pring, 695 lpfc_sli_handle_slow_ring_event(phba, pring,
696 (status & 696 (status &
697 HA_RXMASK)); 697 HA_RXMASK));
698 } 698 }
699 if ((phba->sli_rev == LPFC_SLI_REV4) && pring->txq_cnt) 699 if ((phba->sli_rev == LPFC_SLI_REV4) && pring->txq_cnt)
700 lpfc_drain_txq(phba); 700 lpfc_drain_txq(phba);
701 /* 701 /*
702 * Turn on Ring interrupts 702 * Turn on Ring interrupts
703 */ 703 */
704 if (phba->sli_rev <= LPFC_SLI_REV3) { 704 if (phba->sli_rev <= LPFC_SLI_REV3) {
705 spin_lock_irq(&phba->hbalock); 705 spin_lock_irq(&phba->hbalock);
706 control = readl(phba->HCregaddr); 706 control = readl(phba->HCregaddr);
707 if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) { 707 if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
708 lpfc_debugfs_slow_ring_trc(phba, 708 lpfc_debugfs_slow_ring_trc(phba,
709 "WRK Enable ring: cntl:x%x hacopy:x%x", 709 "WRK Enable ring: cntl:x%x hacopy:x%x",
710 control, ha_copy, 0); 710 control, ha_copy, 0);
711 711
712 control |= (HC_R0INT_ENA << LPFC_ELS_RING); 712 control |= (HC_R0INT_ENA << LPFC_ELS_RING);
713 writel(control, phba->HCregaddr); 713 writel(control, phba->HCregaddr);
714 readl(phba->HCregaddr); /* flush */ 714 readl(phba->HCregaddr); /* flush */
715 } else { 715 } else {
716 lpfc_debugfs_slow_ring_trc(phba, 716 lpfc_debugfs_slow_ring_trc(phba,
717 "WRK Ring ok: cntl:x%x hacopy:x%x", 717 "WRK Ring ok: cntl:x%x hacopy:x%x",
718 control, ha_copy, 0); 718 control, ha_copy, 0);
719 } 719 }
720 spin_unlock_irq(&phba->hbalock); 720 spin_unlock_irq(&phba->hbalock);
721 } 721 }
722 } 722 }
723 lpfc_work_list_done(phba); 723 lpfc_work_list_done(phba);
724 } 724 }
725 725
726 int 726 int
727 lpfc_do_work(void *p) 727 lpfc_do_work(void *p)
728 { 728 {
729 struct lpfc_hba *phba = p; 729 struct lpfc_hba *phba = p;
730 int rc; 730 int rc;
731 731
732 set_user_nice(current, -20); 732 set_user_nice(current, -20);
733 current->flags |= PF_NOFREEZE; 733 current->flags |= PF_NOFREEZE;
734 phba->data_flags = 0; 734 phba->data_flags = 0;
735 735
736 while (!kthread_should_stop()) { 736 while (!kthread_should_stop()) {
737 /* wait and check worker queue activities */ 737 /* wait and check worker queue activities */
738 rc = wait_event_interruptible(phba->work_waitq, 738 rc = wait_event_interruptible(phba->work_waitq,
739 (test_and_clear_bit(LPFC_DATA_READY, 739 (test_and_clear_bit(LPFC_DATA_READY,
740 &phba->data_flags) 740 &phba->data_flags)
741 || kthread_should_stop())); 741 || kthread_should_stop()));
742 /* Signal wakeup shall terminate the worker thread */ 742 /* Signal wakeup shall terminate the worker thread */
743 if (rc) { 743 if (rc) {
744 lpfc_printf_log(phba, KERN_ERR, LOG_ELS, 744 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
745 "0433 Wakeup on signal: rc=x%x\n", rc); 745 "0433 Wakeup on signal: rc=x%x\n", rc);
746 break; 746 break;
747 } 747 }
748 748
749 /* Attend pending lpfc data processing */ 749 /* Attend pending lpfc data processing */
750 lpfc_work_done(phba); 750 lpfc_work_done(phba);
751 } 751 }
752 phba->worker_thread = NULL; 752 phba->worker_thread = NULL;
753 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 753 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
754 "0432 Worker thread stopped.\n"); 754 "0432 Worker thread stopped.\n");
755 return 0; 755 return 0;
756 } 756 }
757 757
758 /* 758 /*
759 * This is only called to handle FC worker events. Since this a rare 759 * This is only called to handle FC worker events. Since this a rare
760 * occurrence, we allocate a struct lpfc_work_evt structure here instead of 760 * occurrence, we allocate a struct lpfc_work_evt structure here instead of
761 * embedding it in the IOCB. 761 * embedding it in the IOCB.
762 */ 762 */
763 int 763 int
764 lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2, 764 lpfc_workq_post_event(struct lpfc_hba *phba, void *arg1, void *arg2,
765 uint32_t evt) 765 uint32_t evt)
766 { 766 {
767 struct lpfc_work_evt *evtp; 767 struct lpfc_work_evt *evtp;
768 unsigned long flags; 768 unsigned long flags;
769 769
770 /* 770 /*
771 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will 771 * All Mailbox completions and LPFC_ELS_RING rcv ring IOCB events will
772 * be queued to worker thread for processing 772 * be queued to worker thread for processing
773 */ 773 */
774 evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC); 774 evtp = kmalloc(sizeof(struct lpfc_work_evt), GFP_ATOMIC);
775 if (!evtp) 775 if (!evtp)
776 return 0; 776 return 0;
777 777
778 evtp->evt_arg1 = arg1; 778 evtp->evt_arg1 = arg1;
779 evtp->evt_arg2 = arg2; 779 evtp->evt_arg2 = arg2;
780 evtp->evt = evt; 780 evtp->evt = evt;
781 781
782 spin_lock_irqsave(&phba->hbalock, flags); 782 spin_lock_irqsave(&phba->hbalock, flags);
783 list_add_tail(&evtp->evt_listp, &phba->work_list); 783 list_add_tail(&evtp->evt_listp, &phba->work_list);
784 spin_unlock_irqrestore(&phba->hbalock, flags); 784 spin_unlock_irqrestore(&phba->hbalock, flags);
785 785
786 lpfc_worker_wake_up(phba); 786 lpfc_worker_wake_up(phba);
787 787
788 return 1; 788 return 1;
789 } 789 }
790 790
791 void 791 void
792 lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove) 792 lpfc_cleanup_rpis(struct lpfc_vport *vport, int remove)
793 { 793 {
794 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 794 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
795 struct lpfc_hba *phba = vport->phba; 795 struct lpfc_hba *phba = vport->phba;
796 struct lpfc_nodelist *ndlp, *next_ndlp; 796 struct lpfc_nodelist *ndlp, *next_ndlp;
797 int rc; 797 int rc;
798 798
799 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { 799 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
800 if (!NLP_CHK_NODE_ACT(ndlp)) 800 if (!NLP_CHK_NODE_ACT(ndlp))
801 continue; 801 continue;
802 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 802 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
803 continue; 803 continue;
804 if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) || 804 if ((phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) ||
805 ((vport->port_type == LPFC_NPIV_PORT) && 805 ((vport->port_type == LPFC_NPIV_PORT) &&
806 (ndlp->nlp_DID == NameServer_DID))) 806 (ndlp->nlp_DID == NameServer_DID)))
807 lpfc_unreg_rpi(vport, ndlp); 807 lpfc_unreg_rpi(vport, ndlp);
808 808
809 /* Leave Fabric nodes alone on link down */ 809 /* Leave Fabric nodes alone on link down */
810 if ((phba->sli_rev < LPFC_SLI_REV4) && 810 if ((phba->sli_rev < LPFC_SLI_REV4) &&
811 (!remove && ndlp->nlp_type & NLP_FABRIC)) 811 (!remove && ndlp->nlp_type & NLP_FABRIC))
812 continue; 812 continue;
813 rc = lpfc_disc_state_machine(vport, ndlp, NULL, 813 rc = lpfc_disc_state_machine(vport, ndlp, NULL,
814 remove 814 remove
815 ? NLP_EVT_DEVICE_RM 815 ? NLP_EVT_DEVICE_RM
816 : NLP_EVT_DEVICE_RECOVERY); 816 : NLP_EVT_DEVICE_RECOVERY);
817 } 817 }
818 if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) { 818 if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
819 if (phba->sli_rev == LPFC_SLI_REV4) 819 if (phba->sli_rev == LPFC_SLI_REV4)
820 lpfc_sli4_unreg_all_rpis(vport); 820 lpfc_sli4_unreg_all_rpis(vport);
821 lpfc_mbx_unreg_vpi(vport); 821 lpfc_mbx_unreg_vpi(vport);
822 spin_lock_irq(shost->host_lock); 822 spin_lock_irq(shost->host_lock);
823 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 823 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
824 spin_unlock_irq(shost->host_lock); 824 spin_unlock_irq(shost->host_lock);
825 } 825 }
826 } 826 }
827 827
828 void 828 void
829 lpfc_port_link_failure(struct lpfc_vport *vport) 829 lpfc_port_link_failure(struct lpfc_vport *vport)
830 { 830 {
831 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN); 831 lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
832 832
833 /* Cleanup any outstanding received buffers */ 833 /* Cleanup any outstanding received buffers */
834 lpfc_cleanup_rcv_buffers(vport); 834 lpfc_cleanup_rcv_buffers(vport);
835 835
836 /* Cleanup any outstanding RSCN activity */ 836 /* Cleanup any outstanding RSCN activity */
837 lpfc_els_flush_rscn(vport); 837 lpfc_els_flush_rscn(vport);
838 838
839 /* Cleanup any outstanding ELS commands */ 839 /* Cleanup any outstanding ELS commands */
840 lpfc_els_flush_cmd(vport); 840 lpfc_els_flush_cmd(vport);
841 841
842 lpfc_cleanup_rpis(vport, 0); 842 lpfc_cleanup_rpis(vport, 0);
843 843
844 /* Turn off discovery timer if its running */ 844 /* Turn off discovery timer if its running */
845 lpfc_can_disctmo(vport); 845 lpfc_can_disctmo(vport);
846 } 846 }
847 847
848 void 848 void
849 lpfc_linkdown_port(struct lpfc_vport *vport) 849 lpfc_linkdown_port(struct lpfc_vport *vport)
850 { 850 {
851 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 851 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
852 852
853 fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0); 853 fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKDOWN, 0);
854 854
855 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 855 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
856 "Link Down: state:x%x rtry:x%x flg:x%x", 856 "Link Down: state:x%x rtry:x%x flg:x%x",
857 vport->port_state, vport->fc_ns_retry, vport->fc_flag); 857 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
858 858
859 lpfc_port_link_failure(vport); 859 lpfc_port_link_failure(vport);
860 860
861 /* Stop delayed Nport discovery */ 861 /* Stop delayed Nport discovery */
862 spin_lock_irq(shost->host_lock); 862 spin_lock_irq(shost->host_lock);
863 vport->fc_flag &= ~FC_DISC_DELAYED; 863 vport->fc_flag &= ~FC_DISC_DELAYED;
864 spin_unlock_irq(shost->host_lock); 864 spin_unlock_irq(shost->host_lock);
865 del_timer_sync(&vport->delayed_disc_tmo); 865 del_timer_sync(&vport->delayed_disc_tmo);
866 } 866 }
867 867
868 int 868 int
869 lpfc_linkdown(struct lpfc_hba *phba) 869 lpfc_linkdown(struct lpfc_hba *phba)
870 { 870 {
871 struct lpfc_vport *vport = phba->pport; 871 struct lpfc_vport *vport = phba->pport;
872 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 872 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
873 struct lpfc_vport **vports; 873 struct lpfc_vport **vports;
874 LPFC_MBOXQ_t *mb; 874 LPFC_MBOXQ_t *mb;
875 int i; 875 int i;
876 876
877 if (phba->link_state == LPFC_LINK_DOWN) 877 if (phba->link_state == LPFC_LINK_DOWN)
878 return 0; 878 return 0;
879 879
880 /* Block all SCSI stack I/Os */ 880 /* Block all SCSI stack I/Os */
881 lpfc_scsi_dev_block(phba); 881 lpfc_scsi_dev_block(phba);
882 882
883 spin_lock_irq(&phba->hbalock); 883 spin_lock_irq(&phba->hbalock);
884 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 884 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
885 spin_unlock_irq(&phba->hbalock); 885 spin_unlock_irq(&phba->hbalock);
886 if (phba->link_state > LPFC_LINK_DOWN) { 886 if (phba->link_state > LPFC_LINK_DOWN) {
887 phba->link_state = LPFC_LINK_DOWN; 887 phba->link_state = LPFC_LINK_DOWN;
888 spin_lock_irq(shost->host_lock); 888 spin_lock_irq(shost->host_lock);
889 phba->pport->fc_flag &= ~FC_LBIT; 889 phba->pport->fc_flag &= ~FC_LBIT;
890 spin_unlock_irq(shost->host_lock); 890 spin_unlock_irq(shost->host_lock);
891 } 891 }
892 vports = lpfc_create_vport_work_array(phba); 892 vports = lpfc_create_vport_work_array(phba);
893 if (vports != NULL) 893 if (vports != NULL)
894 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 894 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
895 /* Issue a LINK DOWN event to all nodes */ 895 /* Issue a LINK DOWN event to all nodes */
896 lpfc_linkdown_port(vports[i]); 896 lpfc_linkdown_port(vports[i]);
897 } 897 }
898 lpfc_destroy_vport_work_array(phba, vports); 898 lpfc_destroy_vport_work_array(phba, vports);
899 /* Clean up any firmware default rpi's */ 899 /* Clean up any firmware default rpi's */
900 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 900 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
901 if (mb) { 901 if (mb) {
902 lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb); 902 lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb);
903 mb->vport = vport; 903 mb->vport = vport;
904 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 904 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
905 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT) 905 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
906 == MBX_NOT_FINISHED) { 906 == MBX_NOT_FINISHED) {
907 mempool_free(mb, phba->mbox_mem_pool); 907 mempool_free(mb, phba->mbox_mem_pool);
908 } 908 }
909 } 909 }
910 910
911 /* Setup myDID for link up if we are in pt2pt mode */ 911 /* Setup myDID for link up if we are in pt2pt mode */
912 if (phba->pport->fc_flag & FC_PT2PT) { 912 if (phba->pport->fc_flag & FC_PT2PT) {
913 phba->pport->fc_myDID = 0; 913 phba->pport->fc_myDID = 0;
914 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 914 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
915 if (mb) { 915 if (mb) {
916 lpfc_config_link(phba, mb); 916 lpfc_config_link(phba, mb);
917 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 917 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
918 mb->vport = vport; 918 mb->vport = vport;
919 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT) 919 if (lpfc_sli_issue_mbox(phba, mb, MBX_NOWAIT)
920 == MBX_NOT_FINISHED) { 920 == MBX_NOT_FINISHED) {
921 mempool_free(mb, phba->mbox_mem_pool); 921 mempool_free(mb, phba->mbox_mem_pool);
922 } 922 }
923 } 923 }
924 spin_lock_irq(shost->host_lock); 924 spin_lock_irq(shost->host_lock);
925 phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI); 925 phba->pport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI);
926 spin_unlock_irq(shost->host_lock); 926 spin_unlock_irq(shost->host_lock);
927 } 927 }
928 928
929 return 0; 929 return 0;
930 } 930 }
931 931
932 static void 932 static void
933 lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport) 933 lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport)
934 { 934 {
935 struct lpfc_nodelist *ndlp; 935 struct lpfc_nodelist *ndlp;
936 936
937 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 937 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
938 if (!NLP_CHK_NODE_ACT(ndlp)) 938 if (!NLP_CHK_NODE_ACT(ndlp))
939 continue; 939 continue;
940 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 940 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
941 continue; 941 continue;
942 if (ndlp->nlp_type & NLP_FABRIC) { 942 if (ndlp->nlp_type & NLP_FABRIC) {
943 /* On Linkup its safe to clean up the ndlp 943 /* On Linkup its safe to clean up the ndlp
944 * from Fabric connections. 944 * from Fabric connections.
945 */ 945 */
946 if (ndlp->nlp_DID != Fabric_DID) 946 if (ndlp->nlp_DID != Fabric_DID)
947 lpfc_unreg_rpi(vport, ndlp); 947 lpfc_unreg_rpi(vport, ndlp);
948 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 948 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
949 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { 949 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
950 /* Fail outstanding IO now since device is 950 /* Fail outstanding IO now since device is
951 * marked for PLOGI. 951 * marked for PLOGI.
952 */ 952 */
953 lpfc_unreg_rpi(vport, ndlp); 953 lpfc_unreg_rpi(vport, ndlp);
954 } 954 }
955 } 955 }
956 } 956 }
957 957
958 static void 958 static void
959 lpfc_linkup_port(struct lpfc_vport *vport) 959 lpfc_linkup_port(struct lpfc_vport *vport)
960 { 960 {
961 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 961 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
962 struct lpfc_hba *phba = vport->phba; 962 struct lpfc_hba *phba = vport->phba;
963 963
964 if ((vport->load_flag & FC_UNLOADING) != 0) 964 if ((vport->load_flag & FC_UNLOADING) != 0)
965 return; 965 return;
966 966
967 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 967 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
968 "Link Up: top:x%x speed:x%x flg:x%x", 968 "Link Up: top:x%x speed:x%x flg:x%x",
969 phba->fc_topology, phba->fc_linkspeed, phba->link_flag); 969 phba->fc_topology, phba->fc_linkspeed, phba->link_flag);
970 970
971 /* If NPIV is not enabled, only bring the physical port up */ 971 /* If NPIV is not enabled, only bring the physical port up */
972 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 972 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
973 (vport != phba->pport)) 973 (vport != phba->pport))
974 return; 974 return;
975 975
976 fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKUP, 0); 976 fc_host_post_event(shost, fc_get_event_number(), FCH_EVT_LINKUP, 0);
977 977
978 spin_lock_irq(shost->host_lock); 978 spin_lock_irq(shost->host_lock);
979 vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY | 979 vport->fc_flag &= ~(FC_PT2PT | FC_PT2PT_PLOGI | FC_ABORT_DISCOVERY |
980 FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY); 980 FC_RSCN_MODE | FC_NLP_MORE | FC_RSCN_DISCOVERY);
981 vport->fc_flag |= FC_NDISC_ACTIVE; 981 vport->fc_flag |= FC_NDISC_ACTIVE;
982 vport->fc_ns_retry = 0; 982 vport->fc_ns_retry = 0;
983 spin_unlock_irq(shost->host_lock); 983 spin_unlock_irq(shost->host_lock);
984 984
985 if (vport->fc_flag & FC_LBIT) 985 if (vport->fc_flag & FC_LBIT)
986 lpfc_linkup_cleanup_nodes(vport); 986 lpfc_linkup_cleanup_nodes(vport);
987 987
988 } 988 }
989 989
990 static int 990 static int
991 lpfc_linkup(struct lpfc_hba *phba) 991 lpfc_linkup(struct lpfc_hba *phba)
992 { 992 {
993 struct lpfc_vport **vports; 993 struct lpfc_vport **vports;
994 int i; 994 int i;
995 995
996 lpfc_cleanup_wt_rrqs(phba); 996 lpfc_cleanup_wt_rrqs(phba);
997 phba->link_state = LPFC_LINK_UP; 997 phba->link_state = LPFC_LINK_UP;
998 998
999 /* Unblock fabric iocbs if they are blocked */ 999 /* Unblock fabric iocbs if they are blocked */
1000 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); 1000 clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
1001 del_timer_sync(&phba->fabric_block_timer); 1001 del_timer_sync(&phba->fabric_block_timer);
1002 1002
1003 vports = lpfc_create_vport_work_array(phba); 1003 vports = lpfc_create_vport_work_array(phba);
1004 if (vports != NULL) 1004 if (vports != NULL)
1005 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) 1005 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
1006 lpfc_linkup_port(vports[i]); 1006 lpfc_linkup_port(vports[i]);
1007 lpfc_destroy_vport_work_array(phba, vports); 1007 lpfc_destroy_vport_work_array(phba, vports);
1008 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 1008 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
1009 (phba->sli_rev < LPFC_SLI_REV4)) 1009 (phba->sli_rev < LPFC_SLI_REV4))
1010 lpfc_issue_clear_la(phba, phba->pport); 1010 lpfc_issue_clear_la(phba, phba->pport);
1011 1011
1012 return 0; 1012 return 0;
1013 } 1013 }
1014 1014
1015 /* 1015 /*
1016 * This routine handles processing a CLEAR_LA mailbox 1016 * This routine handles processing a CLEAR_LA mailbox
1017 * command upon completion. It is setup in the LPFC_MBOXQ 1017 * command upon completion. It is setup in the LPFC_MBOXQ
1018 * as the completion routine when the command is 1018 * as the completion routine when the command is
1019 * handed off to the SLI layer. 1019 * handed off to the SLI layer.
1020 */ 1020 */
1021 static void 1021 static void
1022 lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1022 lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1023 { 1023 {
1024 struct lpfc_vport *vport = pmb->vport; 1024 struct lpfc_vport *vport = pmb->vport;
1025 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 1025 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1026 struct lpfc_sli *psli = &phba->sli; 1026 struct lpfc_sli *psli = &phba->sli;
1027 MAILBOX_t *mb = &pmb->u.mb; 1027 MAILBOX_t *mb = &pmb->u.mb;
1028 uint32_t control; 1028 uint32_t control;
1029 1029
1030 /* Since we don't do discovery right now, turn these off here */ 1030 /* Since we don't do discovery right now, turn these off here */
1031 psli->ring[psli->extra_ring].flag &= ~LPFC_STOP_IOCB_EVENT; 1031 psli->ring[psli->extra_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
1032 psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT; 1032 psli->ring[psli->fcp_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
1033 psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT; 1033 psli->ring[psli->next_ring].flag &= ~LPFC_STOP_IOCB_EVENT;
1034 1034
1035 /* Check for error */ 1035 /* Check for error */
1036 if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) { 1036 if ((mb->mbxStatus) && (mb->mbxStatus != 0x1601)) {
1037 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */ 1037 /* CLEAR_LA mbox error <mbxStatus> state <hba_state> */
1038 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 1038 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1039 "0320 CLEAR_LA mbxStatus error x%x hba " 1039 "0320 CLEAR_LA mbxStatus error x%x hba "
1040 "state x%x\n", 1040 "state x%x\n",
1041 mb->mbxStatus, vport->port_state); 1041 mb->mbxStatus, vport->port_state);
1042 phba->link_state = LPFC_HBA_ERROR; 1042 phba->link_state = LPFC_HBA_ERROR;
1043 goto out; 1043 goto out;
1044 } 1044 }
1045 1045
1046 if (vport->port_type == LPFC_PHYSICAL_PORT) 1046 if (vport->port_type == LPFC_PHYSICAL_PORT)
1047 phba->link_state = LPFC_HBA_READY; 1047 phba->link_state = LPFC_HBA_READY;
1048 1048
1049 spin_lock_irq(&phba->hbalock); 1049 spin_lock_irq(&phba->hbalock);
1050 psli->sli_flag |= LPFC_PROCESS_LA; 1050 psli->sli_flag |= LPFC_PROCESS_LA;
1051 control = readl(phba->HCregaddr); 1051 control = readl(phba->HCregaddr);
1052 control |= HC_LAINT_ENA; 1052 control |= HC_LAINT_ENA;
1053 writel(control, phba->HCregaddr); 1053 writel(control, phba->HCregaddr);
1054 readl(phba->HCregaddr); /* flush */ 1054 readl(phba->HCregaddr); /* flush */
1055 spin_unlock_irq(&phba->hbalock); 1055 spin_unlock_irq(&phba->hbalock);
1056 mempool_free(pmb, phba->mbox_mem_pool); 1056 mempool_free(pmb, phba->mbox_mem_pool);
1057 return; 1057 return;
1058 1058
1059 out: 1059 out:
1060 /* Device Discovery completes */ 1060 /* Device Discovery completes */
1061 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 1061 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1062 "0225 Device Discovery completes\n"); 1062 "0225 Device Discovery completes\n");
1063 mempool_free(pmb, phba->mbox_mem_pool); 1063 mempool_free(pmb, phba->mbox_mem_pool);
1064 1064
1065 spin_lock_irq(shost->host_lock); 1065 spin_lock_irq(shost->host_lock);
1066 vport->fc_flag &= ~FC_ABORT_DISCOVERY; 1066 vport->fc_flag &= ~FC_ABORT_DISCOVERY;
1067 spin_unlock_irq(shost->host_lock); 1067 spin_unlock_irq(shost->host_lock);
1068 1068
1069 lpfc_can_disctmo(vport); 1069 lpfc_can_disctmo(vport);
1070 1070
1071 /* turn on Link Attention interrupts */ 1071 /* turn on Link Attention interrupts */
1072 1072
1073 spin_lock_irq(&phba->hbalock); 1073 spin_lock_irq(&phba->hbalock);
1074 psli->sli_flag |= LPFC_PROCESS_LA; 1074 psli->sli_flag |= LPFC_PROCESS_LA;
1075 control = readl(phba->HCregaddr); 1075 control = readl(phba->HCregaddr);
1076 control |= HC_LAINT_ENA; 1076 control |= HC_LAINT_ENA;
1077 writel(control, phba->HCregaddr); 1077 writel(control, phba->HCregaddr);
1078 readl(phba->HCregaddr); /* flush */ 1078 readl(phba->HCregaddr); /* flush */
1079 spin_unlock_irq(&phba->hbalock); 1079 spin_unlock_irq(&phba->hbalock);
1080 1080
1081 return; 1081 return;
1082 } 1082 }
1083 1083
1084 1084
1085 static void 1085 static void
1086 lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1086 lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1087 { 1087 {
1088 struct lpfc_vport *vport = pmb->vport; 1088 struct lpfc_vport *vport = pmb->vport;
1089 1089
1090 if (pmb->u.mb.mbxStatus) 1090 if (pmb->u.mb.mbxStatus)
1091 goto out; 1091 goto out;
1092 1092
1093 mempool_free(pmb, phba->mbox_mem_pool); 1093 mempool_free(pmb, phba->mbox_mem_pool);
1094 1094
1095 /* don't perform discovery for SLI4 loopback diagnostic test */ 1095 /* don't perform discovery for SLI4 loopback diagnostic test */
1096 if ((phba->sli_rev == LPFC_SLI_REV4) && 1096 if ((phba->sli_rev == LPFC_SLI_REV4) &&
1097 !(phba->hba_flag & HBA_FCOE_MODE) && 1097 !(phba->hba_flag & HBA_FCOE_MODE) &&
1098 (phba->link_flag & LS_LOOPBACK_MODE)) 1098 (phba->link_flag & LS_LOOPBACK_MODE))
1099 return; 1099 return;
1100 1100
1101 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP && 1101 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
1102 vport->fc_flag & FC_PUBLIC_LOOP && 1102 vport->fc_flag & FC_PUBLIC_LOOP &&
1103 !(vport->fc_flag & FC_LBIT)) { 1103 !(vport->fc_flag & FC_LBIT)) {
1104 /* Need to wait for FAN - use discovery timer 1104 /* Need to wait for FAN - use discovery timer
1105 * for timeout. port_state is identically 1105 * for timeout. port_state is identically
1106 * LPFC_LOCAL_CFG_LINK while waiting for FAN 1106 * LPFC_LOCAL_CFG_LINK while waiting for FAN
1107 */ 1107 */
1108 lpfc_set_disctmo(vport); 1108 lpfc_set_disctmo(vport);
1109 return; 1109 return;
1110 } 1110 }
1111 1111
1112 /* Start discovery by sending a FLOGI. port_state is identically 1112 /* Start discovery by sending a FLOGI. port_state is identically
1113 * LPFC_FLOGI while waiting for FLOGI cmpl 1113 * LPFC_FLOGI while waiting for FLOGI cmpl
1114 */ 1114 */
1115 if (vport->port_state != LPFC_FLOGI || vport->fc_flag & FC_PT2PT_PLOGI) 1115 if (vport->port_state != LPFC_FLOGI || vport->fc_flag & FC_PT2PT_PLOGI)
1116 lpfc_initial_flogi(vport); 1116 lpfc_initial_flogi(vport);
1117 return; 1117 return;
1118 1118
1119 out: 1119 out:
1120 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 1120 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1121 "0306 CONFIG_LINK mbxStatus error x%x " 1121 "0306 CONFIG_LINK mbxStatus error x%x "
1122 "HBA state x%x\n", 1122 "HBA state x%x\n",
1123 pmb->u.mb.mbxStatus, vport->port_state); 1123 pmb->u.mb.mbxStatus, vport->port_state);
1124 mempool_free(pmb, phba->mbox_mem_pool); 1124 mempool_free(pmb, phba->mbox_mem_pool);
1125 1125
1126 lpfc_linkdown(phba); 1126 lpfc_linkdown(phba);
1127 1127
1128 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 1128 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
1129 "0200 CONFIG_LINK bad hba state x%x\n", 1129 "0200 CONFIG_LINK bad hba state x%x\n",
1130 vport->port_state); 1130 vport->port_state);
1131 1131
1132 lpfc_issue_clear_la(phba, vport); 1132 lpfc_issue_clear_la(phba, vport);
1133 return; 1133 return;
1134 } 1134 }
1135 1135
1136 /** 1136 /**
1137 * lpfc_sli4_clear_fcf_rr_bmask 1137 * lpfc_sli4_clear_fcf_rr_bmask
1138 * @phba pointer to the struct lpfc_hba for this port. 1138 * @phba pointer to the struct lpfc_hba for this port.
1139 * This fucnction resets the round robin bit mask and clears the 1139 * This fucnction resets the round robin bit mask and clears the
1140 * fcf priority list. The list deletions are done while holding the 1140 * fcf priority list. The list deletions are done while holding the
1141 * hbalock. The ON_LIST flag and the FLOGI_FAILED flags are cleared 1141 * hbalock. The ON_LIST flag and the FLOGI_FAILED flags are cleared
1142 * from the lpfc_fcf_pri record. 1142 * from the lpfc_fcf_pri record.
1143 **/ 1143 **/
1144 void 1144 void
1145 lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *phba) 1145 lpfc_sli4_clear_fcf_rr_bmask(struct lpfc_hba *phba)
1146 { 1146 {
1147 struct lpfc_fcf_pri *fcf_pri; 1147 struct lpfc_fcf_pri *fcf_pri;
1148 struct lpfc_fcf_pri *next_fcf_pri; 1148 struct lpfc_fcf_pri *next_fcf_pri;
1149 memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask)); 1149 memset(phba->fcf.fcf_rr_bmask, 0, sizeof(*phba->fcf.fcf_rr_bmask));
1150 spin_lock_irq(&phba->hbalock); 1150 spin_lock_irq(&phba->hbalock);
1151 list_for_each_entry_safe(fcf_pri, next_fcf_pri, 1151 list_for_each_entry_safe(fcf_pri, next_fcf_pri,
1152 &phba->fcf.fcf_pri_list, list) { 1152 &phba->fcf.fcf_pri_list, list) {
1153 list_del_init(&fcf_pri->list); 1153 list_del_init(&fcf_pri->list);
1154 fcf_pri->fcf_rec.flag = 0; 1154 fcf_pri->fcf_rec.flag = 0;
1155 } 1155 }
1156 spin_unlock_irq(&phba->hbalock); 1156 spin_unlock_irq(&phba->hbalock);
1157 } 1157 }
1158 static void 1158 static void
1159 lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 1159 lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1160 { 1160 {
1161 struct lpfc_vport *vport = mboxq->vport; 1161 struct lpfc_vport *vport = mboxq->vport;
1162 1162
1163 if (mboxq->u.mb.mbxStatus) { 1163 if (mboxq->u.mb.mbxStatus) {
1164 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 1164 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
1165 "2017 REG_FCFI mbxStatus error x%x " 1165 "2017 REG_FCFI mbxStatus error x%x "
1166 "HBA state x%x\n", 1166 "HBA state x%x\n",
1167 mboxq->u.mb.mbxStatus, vport->port_state); 1167 mboxq->u.mb.mbxStatus, vport->port_state);
1168 goto fail_out; 1168 goto fail_out;
1169 } 1169 }
1170 1170
1171 /* Start FCoE discovery by sending a FLOGI. */ 1171 /* Start FCoE discovery by sending a FLOGI. */
1172 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi); 1172 phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi);
1173 /* Set the FCFI registered flag */ 1173 /* Set the FCFI registered flag */
1174 spin_lock_irq(&phba->hbalock); 1174 spin_lock_irq(&phba->hbalock);
1175 phba->fcf.fcf_flag |= FCF_REGISTERED; 1175 phba->fcf.fcf_flag |= FCF_REGISTERED;
1176 spin_unlock_irq(&phba->hbalock); 1176 spin_unlock_irq(&phba->hbalock);
1177 1177
1178 /* If there is a pending FCoE event, restart FCF table scan. */ 1178 /* If there is a pending FCoE event, restart FCF table scan. */
1179 if ((!(phba->hba_flag & FCF_RR_INPROG)) && 1179 if ((!(phba->hba_flag & FCF_RR_INPROG)) &&
1180 lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF)) 1180 lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF))
1181 goto fail_out; 1181 goto fail_out;
1182 1182
1183 /* Mark successful completion of FCF table scan */ 1183 /* Mark successful completion of FCF table scan */
1184 spin_lock_irq(&phba->hbalock); 1184 spin_lock_irq(&phba->hbalock);
1185 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); 1185 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
1186 phba->hba_flag &= ~FCF_TS_INPROG; 1186 phba->hba_flag &= ~FCF_TS_INPROG;
1187 if (vport->port_state != LPFC_FLOGI) { 1187 if (vport->port_state != LPFC_FLOGI) {
1188 phba->hba_flag |= FCF_RR_INPROG; 1188 phba->hba_flag |= FCF_RR_INPROG;
1189 spin_unlock_irq(&phba->hbalock); 1189 spin_unlock_irq(&phba->hbalock);
1190 lpfc_issue_init_vfi(vport); 1190 lpfc_issue_init_vfi(vport);
1191 goto out; 1191 goto out;
1192 } 1192 }
1193 spin_unlock_irq(&phba->hbalock); 1193 spin_unlock_irq(&phba->hbalock);
1194 goto out; 1194 goto out;
1195 1195
1196 fail_out: 1196 fail_out:
1197 spin_lock_irq(&phba->hbalock); 1197 spin_lock_irq(&phba->hbalock);
1198 phba->hba_flag &= ~FCF_RR_INPROG; 1198 phba->hba_flag &= ~FCF_RR_INPROG;
1199 spin_unlock_irq(&phba->hbalock); 1199 spin_unlock_irq(&phba->hbalock);
1200 out: 1200 out:
1201 mempool_free(mboxq, phba->mbox_mem_pool); 1201 mempool_free(mboxq, phba->mbox_mem_pool);
1202 } 1202 }
1203 1203
1204 /** 1204 /**
1205 * lpfc_fab_name_match - Check if the fcf fabric name match. 1205 * lpfc_fab_name_match - Check if the fcf fabric name match.
1206 * @fab_name: pointer to fabric name. 1206 * @fab_name: pointer to fabric name.
1207 * @new_fcf_record: pointer to fcf record. 1207 * @new_fcf_record: pointer to fcf record.
1208 * 1208 *
1209 * This routine compare the fcf record's fabric name with provided 1209 * This routine compare the fcf record's fabric name with provided
1210 * fabric name. If the fabric name are identical this function 1210 * fabric name. If the fabric name are identical this function
1211 * returns 1 else return 0. 1211 * returns 1 else return 0.
1212 **/ 1212 **/
1213 static uint32_t 1213 static uint32_t
1214 lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record) 1214 lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record)
1215 { 1215 {
1216 if (fab_name[0] != bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record)) 1216 if (fab_name[0] != bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record))
1217 return 0; 1217 return 0;
1218 if (fab_name[1] != bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record)) 1218 if (fab_name[1] != bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record))
1219 return 0; 1219 return 0;
1220 if (fab_name[2] != bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record)) 1220 if (fab_name[2] != bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record))
1221 return 0; 1221 return 0;
1222 if (fab_name[3] != bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record)) 1222 if (fab_name[3] != bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record))
1223 return 0; 1223 return 0;
1224 if (fab_name[4] != bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record)) 1224 if (fab_name[4] != bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record))
1225 return 0; 1225 return 0;
1226 if (fab_name[5] != bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record)) 1226 if (fab_name[5] != bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record))
1227 return 0; 1227 return 0;
1228 if (fab_name[6] != bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record)) 1228 if (fab_name[6] != bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record))
1229 return 0; 1229 return 0;
1230 if (fab_name[7] != bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record)) 1230 if (fab_name[7] != bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record))
1231 return 0; 1231 return 0;
1232 return 1; 1232 return 1;
1233 } 1233 }
1234 1234
1235 /** 1235 /**
1236 * lpfc_sw_name_match - Check if the fcf switch name match. 1236 * lpfc_sw_name_match - Check if the fcf switch name match.
1237 * @fab_name: pointer to fabric name. 1237 * @fab_name: pointer to fabric name.
1238 * @new_fcf_record: pointer to fcf record. 1238 * @new_fcf_record: pointer to fcf record.
1239 * 1239 *
1240 * This routine compare the fcf record's switch name with provided 1240 * This routine compare the fcf record's switch name with provided
1241 * switch name. If the switch name are identical this function 1241 * switch name. If the switch name are identical this function
1242 * returns 1 else return 0. 1242 * returns 1 else return 0.
1243 **/ 1243 **/
1244 static uint32_t 1244 static uint32_t
1245 lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record) 1245 lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record)
1246 { 1246 {
1247 if (sw_name[0] != bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record)) 1247 if (sw_name[0] != bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record))
1248 return 0; 1248 return 0;
1249 if (sw_name[1] != bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record)) 1249 if (sw_name[1] != bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record))
1250 return 0; 1250 return 0;
1251 if (sw_name[2] != bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record)) 1251 if (sw_name[2] != bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record))
1252 return 0; 1252 return 0;
1253 if (sw_name[3] != bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record)) 1253 if (sw_name[3] != bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record))
1254 return 0; 1254 return 0;
1255 if (sw_name[4] != bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record)) 1255 if (sw_name[4] != bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record))
1256 return 0; 1256 return 0;
1257 if (sw_name[5] != bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record)) 1257 if (sw_name[5] != bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record))
1258 return 0; 1258 return 0;
1259 if (sw_name[6] != bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record)) 1259 if (sw_name[6] != bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record))
1260 return 0; 1260 return 0;
1261 if (sw_name[7] != bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record)) 1261 if (sw_name[7] != bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record))
1262 return 0; 1262 return 0;
1263 return 1; 1263 return 1;
1264 } 1264 }
1265 1265
1266 /** 1266 /**
1267 * lpfc_mac_addr_match - Check if the fcf mac address match. 1267 * lpfc_mac_addr_match - Check if the fcf mac address match.
1268 * @mac_addr: pointer to mac address. 1268 * @mac_addr: pointer to mac address.
1269 * @new_fcf_record: pointer to fcf record. 1269 * @new_fcf_record: pointer to fcf record.
1270 * 1270 *
1271 * This routine compare the fcf record's mac address with HBA's 1271 * This routine compare the fcf record's mac address with HBA's
1272 * FCF mac address. If the mac addresses are identical this function 1272 * FCF mac address. If the mac addresses are identical this function
1273 * returns 1 else return 0. 1273 * returns 1 else return 0.
1274 **/ 1274 **/
1275 static uint32_t 1275 static uint32_t
1276 lpfc_mac_addr_match(uint8_t *mac_addr, struct fcf_record *new_fcf_record) 1276 lpfc_mac_addr_match(uint8_t *mac_addr, struct fcf_record *new_fcf_record)
1277 { 1277 {
1278 if (mac_addr[0] != bf_get(lpfc_fcf_record_mac_0, new_fcf_record)) 1278 if (mac_addr[0] != bf_get(lpfc_fcf_record_mac_0, new_fcf_record))
1279 return 0; 1279 return 0;
1280 if (mac_addr[1] != bf_get(lpfc_fcf_record_mac_1, new_fcf_record)) 1280 if (mac_addr[1] != bf_get(lpfc_fcf_record_mac_1, new_fcf_record))
1281 return 0; 1281 return 0;
1282 if (mac_addr[2] != bf_get(lpfc_fcf_record_mac_2, new_fcf_record)) 1282 if (mac_addr[2] != bf_get(lpfc_fcf_record_mac_2, new_fcf_record))
1283 return 0; 1283 return 0;
1284 if (mac_addr[3] != bf_get(lpfc_fcf_record_mac_3, new_fcf_record)) 1284 if (mac_addr[3] != bf_get(lpfc_fcf_record_mac_3, new_fcf_record))
1285 return 0; 1285 return 0;
1286 if (mac_addr[4] != bf_get(lpfc_fcf_record_mac_4, new_fcf_record)) 1286 if (mac_addr[4] != bf_get(lpfc_fcf_record_mac_4, new_fcf_record))
1287 return 0; 1287 return 0;
1288 if (mac_addr[5] != bf_get(lpfc_fcf_record_mac_5, new_fcf_record)) 1288 if (mac_addr[5] != bf_get(lpfc_fcf_record_mac_5, new_fcf_record))
1289 return 0; 1289 return 0;
1290 return 1; 1290 return 1;
1291 } 1291 }
1292 1292
1293 static bool 1293 static bool
1294 lpfc_vlan_id_match(uint16_t curr_vlan_id, uint16_t new_vlan_id) 1294 lpfc_vlan_id_match(uint16_t curr_vlan_id, uint16_t new_vlan_id)
1295 { 1295 {
1296 return (curr_vlan_id == new_vlan_id); 1296 return (curr_vlan_id == new_vlan_id);
1297 } 1297 }
1298 1298
1299 /** 1299 /**
1300 * lpfc_update_fcf_record - Update driver fcf record 1300 * lpfc_update_fcf_record - Update driver fcf record
1301 * __lpfc_update_fcf_record_pri - update the lpfc_fcf_pri record. 1301 * __lpfc_update_fcf_record_pri - update the lpfc_fcf_pri record.
1302 * @phba: pointer to lpfc hba data structure. 1302 * @phba: pointer to lpfc hba data structure.
1303 * @fcf_index: Index for the lpfc_fcf_record. 1303 * @fcf_index: Index for the lpfc_fcf_record.
1304 * @new_fcf_record: pointer to hba fcf record. 1304 * @new_fcf_record: pointer to hba fcf record.
1305 * 1305 *
1306 * This routine updates the driver FCF priority record from the new HBA FCF 1306 * This routine updates the driver FCF priority record from the new HBA FCF
1307 * record. This routine is called with the host lock held. 1307 * record. This routine is called with the host lock held.
1308 **/ 1308 **/
1309 static void 1309 static void
1310 __lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index, 1310 __lpfc_update_fcf_record_pri(struct lpfc_hba *phba, uint16_t fcf_index,
1311 struct fcf_record *new_fcf_record 1311 struct fcf_record *new_fcf_record
1312 ) 1312 )
1313 { 1313 {
1314 struct lpfc_fcf_pri *fcf_pri; 1314 struct lpfc_fcf_pri *fcf_pri;
1315 1315
1316 fcf_pri = &phba->fcf.fcf_pri[fcf_index]; 1316 fcf_pri = &phba->fcf.fcf_pri[fcf_index];
1317 fcf_pri->fcf_rec.fcf_index = fcf_index; 1317 fcf_pri->fcf_rec.fcf_index = fcf_index;
1318 /* FCF record priority */ 1318 /* FCF record priority */
1319 fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority; 1319 fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
1320 1320
1321 } 1321 }
1322 1322
1323 /** 1323 /**
1324 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba. 1324 * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
1325 * @fcf: pointer to driver fcf record. 1325 * @fcf: pointer to driver fcf record.
1326 * @new_fcf_record: pointer to fcf record. 1326 * @new_fcf_record: pointer to fcf record.
1327 * 1327 *
1328 * This routine copies the FCF information from the FCF 1328 * This routine copies the FCF information from the FCF
1329 * record to lpfc_hba data structure. 1329 * record to lpfc_hba data structure.
1330 **/ 1330 **/
1331 static void 1331 static void
1332 lpfc_copy_fcf_record(struct lpfc_fcf_rec *fcf_rec, 1332 lpfc_copy_fcf_record(struct lpfc_fcf_rec *fcf_rec,
1333 struct fcf_record *new_fcf_record) 1333 struct fcf_record *new_fcf_record)
1334 { 1334 {
1335 /* Fabric name */ 1335 /* Fabric name */
1336 fcf_rec->fabric_name[0] = 1336 fcf_rec->fabric_name[0] =
1337 bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record); 1337 bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record);
1338 fcf_rec->fabric_name[1] = 1338 fcf_rec->fabric_name[1] =
1339 bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record); 1339 bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record);
1340 fcf_rec->fabric_name[2] = 1340 fcf_rec->fabric_name[2] =
1341 bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record); 1341 bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record);
1342 fcf_rec->fabric_name[3] = 1342 fcf_rec->fabric_name[3] =
1343 bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record); 1343 bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record);
1344 fcf_rec->fabric_name[4] = 1344 fcf_rec->fabric_name[4] =
1345 bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record); 1345 bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record);
1346 fcf_rec->fabric_name[5] = 1346 fcf_rec->fabric_name[5] =
1347 bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record); 1347 bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record);
1348 fcf_rec->fabric_name[6] = 1348 fcf_rec->fabric_name[6] =
1349 bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record); 1349 bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record);
1350 fcf_rec->fabric_name[7] = 1350 fcf_rec->fabric_name[7] =
1351 bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record); 1351 bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record);
1352 /* Mac address */ 1352 /* Mac address */
1353 fcf_rec->mac_addr[0] = bf_get(lpfc_fcf_record_mac_0, new_fcf_record); 1353 fcf_rec->mac_addr[0] = bf_get(lpfc_fcf_record_mac_0, new_fcf_record);
1354 fcf_rec->mac_addr[1] = bf_get(lpfc_fcf_record_mac_1, new_fcf_record); 1354 fcf_rec->mac_addr[1] = bf_get(lpfc_fcf_record_mac_1, new_fcf_record);
1355 fcf_rec->mac_addr[2] = bf_get(lpfc_fcf_record_mac_2, new_fcf_record); 1355 fcf_rec->mac_addr[2] = bf_get(lpfc_fcf_record_mac_2, new_fcf_record);
1356 fcf_rec->mac_addr[3] = bf_get(lpfc_fcf_record_mac_3, new_fcf_record); 1356 fcf_rec->mac_addr[3] = bf_get(lpfc_fcf_record_mac_3, new_fcf_record);
1357 fcf_rec->mac_addr[4] = bf_get(lpfc_fcf_record_mac_4, new_fcf_record); 1357 fcf_rec->mac_addr[4] = bf_get(lpfc_fcf_record_mac_4, new_fcf_record);
1358 fcf_rec->mac_addr[5] = bf_get(lpfc_fcf_record_mac_5, new_fcf_record); 1358 fcf_rec->mac_addr[5] = bf_get(lpfc_fcf_record_mac_5, new_fcf_record);
1359 /* FCF record index */ 1359 /* FCF record index */
1360 fcf_rec->fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); 1360 fcf_rec->fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
1361 /* FCF record priority */ 1361 /* FCF record priority */
1362 fcf_rec->priority = new_fcf_record->fip_priority; 1362 fcf_rec->priority = new_fcf_record->fip_priority;
1363 /* Switch name */ 1363 /* Switch name */
1364 fcf_rec->switch_name[0] = 1364 fcf_rec->switch_name[0] =
1365 bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record); 1365 bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record);
1366 fcf_rec->switch_name[1] = 1366 fcf_rec->switch_name[1] =
1367 bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record); 1367 bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record);
1368 fcf_rec->switch_name[2] = 1368 fcf_rec->switch_name[2] =
1369 bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record); 1369 bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record);
1370 fcf_rec->switch_name[3] = 1370 fcf_rec->switch_name[3] =
1371 bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record); 1371 bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record);
1372 fcf_rec->switch_name[4] = 1372 fcf_rec->switch_name[4] =
1373 bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record); 1373 bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record);
1374 fcf_rec->switch_name[5] = 1374 fcf_rec->switch_name[5] =
1375 bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record); 1375 bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record);
1376 fcf_rec->switch_name[6] = 1376 fcf_rec->switch_name[6] =
1377 bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record); 1377 bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record);
1378 fcf_rec->switch_name[7] = 1378 fcf_rec->switch_name[7] =
1379 bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record); 1379 bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record);
1380 } 1380 }
1381 1381
1382 /** 1382 /**
1383 * lpfc_update_fcf_record - Update driver fcf record 1383 * lpfc_update_fcf_record - Update driver fcf record
1384 * @phba: pointer to lpfc hba data structure. 1384 * @phba: pointer to lpfc hba data structure.
1385 * @fcf_rec: pointer to driver fcf record. 1385 * @fcf_rec: pointer to driver fcf record.
1386 * @new_fcf_record: pointer to hba fcf record. 1386 * @new_fcf_record: pointer to hba fcf record.
1387 * @addr_mode: address mode to be set to the driver fcf record. 1387 * @addr_mode: address mode to be set to the driver fcf record.
1388 * @vlan_id: vlan tag to be set to the driver fcf record. 1388 * @vlan_id: vlan tag to be set to the driver fcf record.
1389 * @flag: flag bits to be set to the driver fcf record. 1389 * @flag: flag bits to be set to the driver fcf record.
1390 * 1390 *
1391 * This routine updates the driver FCF record from the new HBA FCF record 1391 * This routine updates the driver FCF record from the new HBA FCF record
1392 * together with the address mode, vlan_id, and other informations. This 1392 * together with the address mode, vlan_id, and other informations. This
1393 * routine is called with the host lock held. 1393 * routine is called with the host lock held.
1394 **/ 1394 **/
1395 static void 1395 static void
1396 __lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec, 1396 __lpfc_update_fcf_record(struct lpfc_hba *phba, struct lpfc_fcf_rec *fcf_rec,
1397 struct fcf_record *new_fcf_record, uint32_t addr_mode, 1397 struct fcf_record *new_fcf_record, uint32_t addr_mode,
1398 uint16_t vlan_id, uint32_t flag) 1398 uint16_t vlan_id, uint32_t flag)
1399 { 1399 {
1400 /* Copy the fields from the HBA's FCF record */ 1400 /* Copy the fields from the HBA's FCF record */
1401 lpfc_copy_fcf_record(fcf_rec, new_fcf_record); 1401 lpfc_copy_fcf_record(fcf_rec, new_fcf_record);
1402 /* Update other fields of driver FCF record */ 1402 /* Update other fields of driver FCF record */
1403 fcf_rec->addr_mode = addr_mode; 1403 fcf_rec->addr_mode = addr_mode;
1404 fcf_rec->vlan_id = vlan_id; 1404 fcf_rec->vlan_id = vlan_id;
1405 fcf_rec->flag |= (flag | RECORD_VALID); 1405 fcf_rec->flag |= (flag | RECORD_VALID);
1406 __lpfc_update_fcf_record_pri(phba, 1406 __lpfc_update_fcf_record_pri(phba,
1407 bf_get(lpfc_fcf_record_fcf_index, new_fcf_record), 1407 bf_get(lpfc_fcf_record_fcf_index, new_fcf_record),
1408 new_fcf_record); 1408 new_fcf_record);
1409 } 1409 }
1410 1410
1411 /** 1411 /**
1412 * lpfc_register_fcf - Register the FCF with hba. 1412 * lpfc_register_fcf - Register the FCF with hba.
1413 * @phba: pointer to lpfc hba data structure. 1413 * @phba: pointer to lpfc hba data structure.
1414 * 1414 *
1415 * This routine issues a register fcfi mailbox command to register 1415 * This routine issues a register fcfi mailbox command to register
1416 * the fcf with HBA. 1416 * the fcf with HBA.
1417 **/ 1417 **/
1418 static void 1418 static void
1419 lpfc_register_fcf(struct lpfc_hba *phba) 1419 lpfc_register_fcf(struct lpfc_hba *phba)
1420 { 1420 {
1421 LPFC_MBOXQ_t *fcf_mbxq; 1421 LPFC_MBOXQ_t *fcf_mbxq;
1422 int rc; 1422 int rc;
1423 1423
1424 spin_lock_irq(&phba->hbalock); 1424 spin_lock_irq(&phba->hbalock);
1425 /* If the FCF is not available do nothing. */ 1425 /* If the FCF is not available do nothing. */
1426 if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) { 1426 if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
1427 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); 1427 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1428 spin_unlock_irq(&phba->hbalock); 1428 spin_unlock_irq(&phba->hbalock);
1429 return; 1429 return;
1430 } 1430 }
1431 1431
1432 /* The FCF is already registered, start discovery */ 1432 /* The FCF is already registered, start discovery */
1433 if (phba->fcf.fcf_flag & FCF_REGISTERED) { 1433 if (phba->fcf.fcf_flag & FCF_REGISTERED) {
1434 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); 1434 phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
1435 phba->hba_flag &= ~FCF_TS_INPROG; 1435 phba->hba_flag &= ~FCF_TS_INPROG;
1436 if (phba->pport->port_state != LPFC_FLOGI) { 1436 if (phba->pport->port_state != LPFC_FLOGI) {
1437 phba->hba_flag |= FCF_RR_INPROG; 1437 phba->hba_flag |= FCF_RR_INPROG;
1438 spin_unlock_irq(&phba->hbalock); 1438 spin_unlock_irq(&phba->hbalock);
1439 lpfc_initial_flogi(phba->pport); 1439 lpfc_initial_flogi(phba->pport);
1440 return; 1440 return;
1441 } 1441 }
1442 spin_unlock_irq(&phba->hbalock); 1442 spin_unlock_irq(&phba->hbalock);
1443 return; 1443 return;
1444 } 1444 }
1445 spin_unlock_irq(&phba->hbalock); 1445 spin_unlock_irq(&phba->hbalock);
1446 1446
1447 fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 1447 fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1448 if (!fcf_mbxq) { 1448 if (!fcf_mbxq) {
1449 spin_lock_irq(&phba->hbalock); 1449 spin_lock_irq(&phba->hbalock);
1450 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); 1450 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1451 spin_unlock_irq(&phba->hbalock); 1451 spin_unlock_irq(&phba->hbalock);
1452 return; 1452 return;
1453 } 1453 }
1454 1454
1455 lpfc_reg_fcfi(phba, fcf_mbxq); 1455 lpfc_reg_fcfi(phba, fcf_mbxq);
1456 fcf_mbxq->vport = phba->pport; 1456 fcf_mbxq->vport = phba->pport;
1457 fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi; 1457 fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi;
1458 rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT); 1458 rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
1459 if (rc == MBX_NOT_FINISHED) { 1459 if (rc == MBX_NOT_FINISHED) {
1460 spin_lock_irq(&phba->hbalock); 1460 spin_lock_irq(&phba->hbalock);
1461 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); 1461 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1462 spin_unlock_irq(&phba->hbalock); 1462 spin_unlock_irq(&phba->hbalock);
1463 mempool_free(fcf_mbxq, phba->mbox_mem_pool); 1463 mempool_free(fcf_mbxq, phba->mbox_mem_pool);
1464 } 1464 }
1465 1465
1466 return; 1466 return;
1467 } 1467 }
1468 1468
1469 /** 1469 /**
1470 * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery. 1470 * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery.
1471 * @phba: pointer to lpfc hba data structure. 1471 * @phba: pointer to lpfc hba data structure.
1472 * @new_fcf_record: pointer to fcf record. 1472 * @new_fcf_record: pointer to fcf record.
1473 * @boot_flag: Indicates if this record used by boot bios. 1473 * @boot_flag: Indicates if this record used by boot bios.
1474 * @addr_mode: The address mode to be used by this FCF 1474 * @addr_mode: The address mode to be used by this FCF
1475 * @vlan_id: The vlan id to be used as vlan tagging by this FCF. 1475 * @vlan_id: The vlan id to be used as vlan tagging by this FCF.
1476 * 1476 *
1477 * This routine compare the fcf record with connect list obtained from the 1477 * This routine compare the fcf record with connect list obtained from the
1478 * config region to decide if this FCF can be used for SAN discovery. It returns 1478 * config region to decide if this FCF can be used for SAN discovery. It returns
1479 * 1 if this record can be used for SAN discovery else return zero. If this FCF 1479 * 1 if this record can be used for SAN discovery else return zero. If this FCF
1480 * record can be used for SAN discovery, the boot_flag will indicate if this FCF 1480 * record can be used for SAN discovery, the boot_flag will indicate if this FCF
1481 * is used by boot bios and addr_mode will indicate the addressing mode to be 1481 * is used by boot bios and addr_mode will indicate the addressing mode to be
1482 * used for this FCF when the function returns. 1482 * used for this FCF when the function returns.
1483 * If the FCF record need to be used with a particular vlan id, the vlan is 1483 * If the FCF record need to be used with a particular vlan id, the vlan is
1484 * set in the vlan_id on return of the function. If not VLAN tagging need to 1484 * set in the vlan_id on return of the function. If not VLAN tagging need to
1485 * be used with the FCF vlan_id will be set to LPFC_FCOE_NULL_VID; 1485 * be used with the FCF vlan_id will be set to LPFC_FCOE_NULL_VID;
1486 **/ 1486 **/
1487 static int 1487 static int
1488 lpfc_match_fcf_conn_list(struct lpfc_hba *phba, 1488 lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
1489 struct fcf_record *new_fcf_record, 1489 struct fcf_record *new_fcf_record,
1490 uint32_t *boot_flag, uint32_t *addr_mode, 1490 uint32_t *boot_flag, uint32_t *addr_mode,
1491 uint16_t *vlan_id) 1491 uint16_t *vlan_id)
1492 { 1492 {
1493 struct lpfc_fcf_conn_entry *conn_entry; 1493 struct lpfc_fcf_conn_entry *conn_entry;
1494 int i, j, fcf_vlan_id = 0; 1494 int i, j, fcf_vlan_id = 0;
1495 1495
1496 /* Find the lowest VLAN id in the FCF record */ 1496 /* Find the lowest VLAN id in the FCF record */
1497 for (i = 0; i < 512; i++) { 1497 for (i = 0; i < 512; i++) {
1498 if (new_fcf_record->vlan_bitmap[i]) { 1498 if (new_fcf_record->vlan_bitmap[i]) {
1499 fcf_vlan_id = i * 8; 1499 fcf_vlan_id = i * 8;
1500 j = 0; 1500 j = 0;
1501 while (!((new_fcf_record->vlan_bitmap[i] >> j) & 1)) { 1501 while (!((new_fcf_record->vlan_bitmap[i] >> j) & 1)) {
1502 j++; 1502 j++;
1503 fcf_vlan_id++; 1503 fcf_vlan_id++;
1504 } 1504 }
1505 break; 1505 break;
1506 } 1506 }
1507 } 1507 }
1508 1508
1509 /* If FCF not available return 0 */ 1509 /* If FCF not available return 0 */
1510 if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) || 1510 if (!bf_get(lpfc_fcf_record_fcf_avail, new_fcf_record) ||
1511 !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record)) 1511 !bf_get(lpfc_fcf_record_fcf_valid, new_fcf_record))
1512 return 0; 1512 return 0;
1513 1513
1514 if (!(phba->hba_flag & HBA_FIP_SUPPORT)) { 1514 if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
1515 *boot_flag = 0; 1515 *boot_flag = 0;
1516 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, 1516 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1517 new_fcf_record); 1517 new_fcf_record);
1518 if (phba->valid_vlan) 1518 if (phba->valid_vlan)
1519 *vlan_id = phba->vlan_id; 1519 *vlan_id = phba->vlan_id;
1520 else 1520 else
1521 *vlan_id = LPFC_FCOE_NULL_VID; 1521 *vlan_id = LPFC_FCOE_NULL_VID;
1522 return 1; 1522 return 1;
1523 } 1523 }
1524 1524
1525 /* 1525 /*
1526 * If there are no FCF connection table entry, driver connect to all 1526 * If there are no FCF connection table entry, driver connect to all
1527 * FCFs. 1527 * FCFs.
1528 */ 1528 */
1529 if (list_empty(&phba->fcf_conn_rec_list)) { 1529 if (list_empty(&phba->fcf_conn_rec_list)) {
1530 *boot_flag = 0; 1530 *boot_flag = 0;
1531 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, 1531 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1532 new_fcf_record); 1532 new_fcf_record);
1533 1533
1534 /* 1534 /*
1535 * When there are no FCF connect entries, use driver's default 1535 * When there are no FCF connect entries, use driver's default
1536 * addressing mode - FPMA. 1536 * addressing mode - FPMA.
1537 */ 1537 */
1538 if (*addr_mode & LPFC_FCF_FPMA) 1538 if (*addr_mode & LPFC_FCF_FPMA)
1539 *addr_mode = LPFC_FCF_FPMA; 1539 *addr_mode = LPFC_FCF_FPMA;
1540 1540
1541 /* If FCF record report a vlan id use that vlan id */ 1541 /* If FCF record report a vlan id use that vlan id */
1542 if (fcf_vlan_id) 1542 if (fcf_vlan_id)
1543 *vlan_id = fcf_vlan_id; 1543 *vlan_id = fcf_vlan_id;
1544 else 1544 else
1545 *vlan_id = LPFC_FCOE_NULL_VID; 1545 *vlan_id = LPFC_FCOE_NULL_VID;
1546 return 1; 1546 return 1;
1547 } 1547 }
1548 1548
1549 list_for_each_entry(conn_entry, 1549 list_for_each_entry(conn_entry,
1550 &phba->fcf_conn_rec_list, list) { 1550 &phba->fcf_conn_rec_list, list) {
1551 if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID)) 1551 if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID))
1552 continue; 1552 continue;
1553 1553
1554 if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) && 1554 if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) &&
1555 !lpfc_fab_name_match(conn_entry->conn_rec.fabric_name, 1555 !lpfc_fab_name_match(conn_entry->conn_rec.fabric_name,
1556 new_fcf_record)) 1556 new_fcf_record))
1557 continue; 1557 continue;
1558 if ((conn_entry->conn_rec.flags & FCFCNCT_SWNM_VALID) && 1558 if ((conn_entry->conn_rec.flags & FCFCNCT_SWNM_VALID) &&
1559 !lpfc_sw_name_match(conn_entry->conn_rec.switch_name, 1559 !lpfc_sw_name_match(conn_entry->conn_rec.switch_name,
1560 new_fcf_record)) 1560 new_fcf_record))
1561 continue; 1561 continue;
1562 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) { 1562 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) {
1563 /* 1563 /*
1564 * If the vlan bit map does not have the bit set for the 1564 * If the vlan bit map does not have the bit set for the
1565 * vlan id to be used, then it is not a match. 1565 * vlan id to be used, then it is not a match.
1566 */ 1566 */
1567 if (!(new_fcf_record->vlan_bitmap 1567 if (!(new_fcf_record->vlan_bitmap
1568 [conn_entry->conn_rec.vlan_tag / 8] & 1568 [conn_entry->conn_rec.vlan_tag / 8] &
1569 (1 << (conn_entry->conn_rec.vlan_tag % 8)))) 1569 (1 << (conn_entry->conn_rec.vlan_tag % 8))))
1570 continue; 1570 continue;
1571 } 1571 }
1572 1572
1573 /* 1573 /*
1574 * If connection record does not support any addressing mode, 1574 * If connection record does not support any addressing mode,
1575 * skip the FCF record. 1575 * skip the FCF record.
1576 */ 1576 */
1577 if (!(bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record) 1577 if (!(bf_get(lpfc_fcf_record_mac_addr_prov, new_fcf_record)
1578 & (LPFC_FCF_FPMA | LPFC_FCF_SPMA))) 1578 & (LPFC_FCF_FPMA | LPFC_FCF_SPMA)))
1579 continue; 1579 continue;
1580 1580
1581 /* 1581 /*
1582 * Check if the connection record specifies a required 1582 * Check if the connection record specifies a required
1583 * addressing mode. 1583 * addressing mode.
1584 */ 1584 */
1585 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && 1585 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1586 !(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)) { 1586 !(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)) {
1587 1587
1588 /* 1588 /*
1589 * If SPMA required but FCF not support this continue. 1589 * If SPMA required but FCF not support this continue.
1590 */ 1590 */
1591 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && 1591 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1592 !(bf_get(lpfc_fcf_record_mac_addr_prov, 1592 !(bf_get(lpfc_fcf_record_mac_addr_prov,
1593 new_fcf_record) & LPFC_FCF_SPMA)) 1593 new_fcf_record) & LPFC_FCF_SPMA))
1594 continue; 1594 continue;
1595 1595
1596 /* 1596 /*
1597 * If FPMA required but FCF not support this continue. 1597 * If FPMA required but FCF not support this continue.
1598 */ 1598 */
1599 if (!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && 1599 if (!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1600 !(bf_get(lpfc_fcf_record_mac_addr_prov, 1600 !(bf_get(lpfc_fcf_record_mac_addr_prov,
1601 new_fcf_record) & LPFC_FCF_FPMA)) 1601 new_fcf_record) & LPFC_FCF_FPMA))
1602 continue; 1602 continue;
1603 } 1603 }
1604 1604
1605 /* 1605 /*
1606 * This fcf record matches filtering criteria. 1606 * This fcf record matches filtering criteria.
1607 */ 1607 */
1608 if (conn_entry->conn_rec.flags & FCFCNCT_BOOT) 1608 if (conn_entry->conn_rec.flags & FCFCNCT_BOOT)
1609 *boot_flag = 1; 1609 *boot_flag = 1;
1610 else 1610 else
1611 *boot_flag = 0; 1611 *boot_flag = 0;
1612 1612
1613 /* 1613 /*
1614 * If user did not specify any addressing mode, or if the 1614 * If user did not specify any addressing mode, or if the
1615 * preferred addressing mode specified by user is not supported 1615 * preferred addressing mode specified by user is not supported
1616 * by FCF, allow fabric to pick the addressing mode. 1616 * by FCF, allow fabric to pick the addressing mode.
1617 */ 1617 */
1618 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov, 1618 *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
1619 new_fcf_record); 1619 new_fcf_record);
1620 /* 1620 /*
1621 * If the user specified a required address mode, assign that 1621 * If the user specified a required address mode, assign that
1622 * address mode 1622 * address mode
1623 */ 1623 */
1624 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && 1624 if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1625 (!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED))) 1625 (!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)))
1626 *addr_mode = (conn_entry->conn_rec.flags & 1626 *addr_mode = (conn_entry->conn_rec.flags &
1627 FCFCNCT_AM_SPMA) ? 1627 FCFCNCT_AM_SPMA) ?
1628 LPFC_FCF_SPMA : LPFC_FCF_FPMA; 1628 LPFC_FCF_SPMA : LPFC_FCF_FPMA;
1629 /* 1629 /*
1630 * If the user specified a preferred address mode, use the 1630 * If the user specified a preferred address mode, use the
1631 * addr mode only if FCF support the addr_mode. 1631 * addr mode only if FCF support the addr_mode.
1632 */ 1632 */
1633 else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && 1633 else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1634 (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) && 1634 (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
1635 (conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && 1635 (conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1636 (*addr_mode & LPFC_FCF_SPMA)) 1636 (*addr_mode & LPFC_FCF_SPMA))
1637 *addr_mode = LPFC_FCF_SPMA; 1637 *addr_mode = LPFC_FCF_SPMA;
1638 else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) && 1638 else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
1639 (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) && 1639 (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
1640 !(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) && 1640 !(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
1641 (*addr_mode & LPFC_FCF_FPMA)) 1641 (*addr_mode & LPFC_FCF_FPMA))
1642 *addr_mode = LPFC_FCF_FPMA; 1642 *addr_mode = LPFC_FCF_FPMA;
1643 1643
1644 /* If matching connect list has a vlan id, use it */ 1644 /* If matching connect list has a vlan id, use it */
1645 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) 1645 if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID)
1646 *vlan_id = conn_entry->conn_rec.vlan_tag; 1646 *vlan_id = conn_entry->conn_rec.vlan_tag;
1647 /* 1647 /*
1648 * If no vlan id is specified in connect list, use the vlan id 1648 * If no vlan id is specified in connect list, use the vlan id
1649 * in the FCF record 1649 * in the FCF record
1650 */ 1650 */
1651 else if (fcf_vlan_id) 1651 else if (fcf_vlan_id)
1652 *vlan_id = fcf_vlan_id; 1652 *vlan_id = fcf_vlan_id;
1653 else 1653 else
1654 *vlan_id = LPFC_FCOE_NULL_VID; 1654 *vlan_id = LPFC_FCOE_NULL_VID;
1655 1655
1656 return 1; 1656 return 1;
1657 } 1657 }
1658 1658
1659 return 0; 1659 return 0;
1660 } 1660 }
1661 1661
1662 /** 1662 /**
1663 * lpfc_check_pending_fcoe_event - Check if there is pending fcoe event. 1663 * lpfc_check_pending_fcoe_event - Check if there is pending fcoe event.
1664 * @phba: pointer to lpfc hba data structure. 1664 * @phba: pointer to lpfc hba data structure.
1665 * @unreg_fcf: Unregister FCF if FCF table need to be re-scaned. 1665 * @unreg_fcf: Unregister FCF if FCF table need to be re-scaned.
1666 * 1666 *
1667 * This function check if there is any fcoe event pending while driver 1667 * This function check if there is any fcoe event pending while driver
1668 * scan FCF entries. If there is any pending event, it will restart the 1668 * scan FCF entries. If there is any pending event, it will restart the
1669 * FCF saning and return 1 else return 0. 1669 * FCF saning and return 1 else return 0.
1670 */ 1670 */
1671 int 1671 int
1672 lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf) 1672 lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
1673 { 1673 {
1674 /* 1674 /*
1675 * If the Link is up and no FCoE events while in the 1675 * If the Link is up and no FCoE events while in the
1676 * FCF discovery, no need to restart FCF discovery. 1676 * FCF discovery, no need to restart FCF discovery.
1677 */ 1677 */
1678 if ((phba->link_state >= LPFC_LINK_UP) && 1678 if ((phba->link_state >= LPFC_LINK_UP) &&
1679 (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan)) 1679 (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan))
1680 return 0; 1680 return 0;
1681 1681
1682 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 1682 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1683 "2768 Pending link or FCF event during current " 1683 "2768 Pending link or FCF event during current "
1684 "handling of the previous event: link_state:x%x, " 1684 "handling of the previous event: link_state:x%x, "
1685 "evt_tag_at_scan:x%x, evt_tag_current:x%x\n", 1685 "evt_tag_at_scan:x%x, evt_tag_current:x%x\n",
1686 phba->link_state, phba->fcoe_eventtag_at_fcf_scan, 1686 phba->link_state, phba->fcoe_eventtag_at_fcf_scan,
1687 phba->fcoe_eventtag); 1687 phba->fcoe_eventtag);
1688 1688
1689 spin_lock_irq(&phba->hbalock); 1689 spin_lock_irq(&phba->hbalock);
1690 phba->fcf.fcf_flag &= ~FCF_AVAILABLE; 1690 phba->fcf.fcf_flag &= ~FCF_AVAILABLE;
1691 spin_unlock_irq(&phba->hbalock); 1691 spin_unlock_irq(&phba->hbalock);
1692 1692
1693 if (phba->link_state >= LPFC_LINK_UP) { 1693 if (phba->link_state >= LPFC_LINK_UP) {
1694 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 1694 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
1695 "2780 Restart FCF table scan due to " 1695 "2780 Restart FCF table scan due to "
1696 "pending FCF event:evt_tag_at_scan:x%x, " 1696 "pending FCF event:evt_tag_at_scan:x%x, "
1697 "evt_tag_current:x%x\n", 1697 "evt_tag_current:x%x\n",
1698 phba->fcoe_eventtag_at_fcf_scan, 1698 phba->fcoe_eventtag_at_fcf_scan,
1699 phba->fcoe_eventtag); 1699 phba->fcoe_eventtag);
1700 lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 1700 lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
1701 } else { 1701 } else {
1702 /* 1702 /*
1703 * Do not continue FCF discovery and clear FCF_TS_INPROG 1703 * Do not continue FCF discovery and clear FCF_TS_INPROG
1704 * flag 1704 * flag
1705 */ 1705 */
1706 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 1706 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
1707 "2833 Stop FCF discovery process due to link " 1707 "2833 Stop FCF discovery process due to link "
1708 "state change (x%x)\n", phba->link_state); 1708 "state change (x%x)\n", phba->link_state);
1709 spin_lock_irq(&phba->hbalock); 1709 spin_lock_irq(&phba->hbalock);
1710 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG); 1710 phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
1711 phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY); 1711 phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY);
1712 spin_unlock_irq(&phba->hbalock); 1712 spin_unlock_irq(&phba->hbalock);
1713 } 1713 }
1714 1714
1715 /* Unregister the currently registered FCF if required */ 1715 /* Unregister the currently registered FCF if required */
1716 if (unreg_fcf) { 1716 if (unreg_fcf) {
1717 spin_lock_irq(&phba->hbalock); 1717 spin_lock_irq(&phba->hbalock);
1718 phba->fcf.fcf_flag &= ~FCF_REGISTERED; 1718 phba->fcf.fcf_flag &= ~FCF_REGISTERED;
1719 spin_unlock_irq(&phba->hbalock); 1719 spin_unlock_irq(&phba->hbalock);
1720 lpfc_sli4_unregister_fcf(phba); 1720 lpfc_sli4_unregister_fcf(phba);
1721 } 1721 }
1722 return 1; 1722 return 1;
1723 } 1723 }
1724 1724
1725 /** 1725 /**
1726 * lpfc_sli4_new_fcf_random_select - Randomly select an eligible new fcf record 1726 * lpfc_sli4_new_fcf_random_select - Randomly select an eligible new fcf record
1727 * @phba: pointer to lpfc hba data structure. 1727 * @phba: pointer to lpfc hba data structure.
1728 * @fcf_cnt: number of eligible fcf record seen so far. 1728 * @fcf_cnt: number of eligible fcf record seen so far.
1729 * 1729 *
1730 * This function makes an running random selection decision on FCF record to 1730 * This function makes an running random selection decision on FCF record to
1731 * use through a sequence of @fcf_cnt eligible FCF records with equal 1731 * use through a sequence of @fcf_cnt eligible FCF records with equal
1732 * probability. To perform integer manunipulation of random numbers with 1732 * probability. To perform integer manunipulation of random numbers with
1733 * size unit32_t, the lower 16 bits of the 32-bit random number returned 1733 * size unit32_t, the lower 16 bits of the 32-bit random number returned
1734 * from random32() are taken as the random random number generated. 1734 * from random32() are taken as the random random number generated.
1735 * 1735 *
1736 * Returns true when outcome is for the newly read FCF record should be 1736 * Returns true when outcome is for the newly read FCF record should be
1737 * chosen; otherwise, return false when outcome is for keeping the previously 1737 * chosen; otherwise, return false when outcome is for keeping the previously
1738 * chosen FCF record. 1738 * chosen FCF record.
1739 **/ 1739 **/
1740 static bool 1740 static bool
1741 lpfc_sli4_new_fcf_random_select(struct lpfc_hba *phba, uint32_t fcf_cnt) 1741 lpfc_sli4_new_fcf_random_select(struct lpfc_hba *phba, uint32_t fcf_cnt)
1742 { 1742 {
1743 uint32_t rand_num; 1743 uint32_t rand_num;
1744 1744
1745 /* Get 16-bit uniform random number */ 1745 /* Get 16-bit uniform random number */
1746 rand_num = (0xFFFF & random32()); 1746 rand_num = (0xFFFF & random32());
1747 1747
1748 /* Decision with probability 1/fcf_cnt */ 1748 /* Decision with probability 1/fcf_cnt */
1749 if ((fcf_cnt * rand_num) < 0xFFFF) 1749 if ((fcf_cnt * rand_num) < 0xFFFF)
1750 return true; 1750 return true;
1751 else 1751 else
1752 return false; 1752 return false;
1753 } 1753 }
1754 1754
1755 /** 1755 /**
1756 * lpfc_sli4_fcf_rec_mbox_parse - Parse read_fcf mbox command. 1756 * lpfc_sli4_fcf_rec_mbox_parse - Parse read_fcf mbox command.
1757 * @phba: pointer to lpfc hba data structure. 1757 * @phba: pointer to lpfc hba data structure.
1758 * @mboxq: pointer to mailbox object. 1758 * @mboxq: pointer to mailbox object.
1759 * @next_fcf_index: pointer to holder of next fcf index. 1759 * @next_fcf_index: pointer to holder of next fcf index.
1760 * 1760 *
1761 * This routine parses the non-embedded fcf mailbox command by performing the 1761 * This routine parses the non-embedded fcf mailbox command by performing the
1762 * necessarily error checking, non-embedded read FCF record mailbox command 1762 * necessarily error checking, non-embedded read FCF record mailbox command
1763 * SGE parsing, and endianness swapping. 1763 * SGE parsing, and endianness swapping.
1764 * 1764 *
1765 * Returns the pointer to the new FCF record in the non-embedded mailbox 1765 * Returns the pointer to the new FCF record in the non-embedded mailbox
1766 * command DMA memory if successfully, other NULL. 1766 * command DMA memory if successfully, other NULL.
1767 */ 1767 */
1768 static struct fcf_record * 1768 static struct fcf_record *
1769 lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, 1769 lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
1770 uint16_t *next_fcf_index) 1770 uint16_t *next_fcf_index)
1771 { 1771 {
1772 void *virt_addr; 1772 void *virt_addr;
1773 dma_addr_t phys_addr; 1773 dma_addr_t phys_addr;
1774 struct lpfc_mbx_sge sge; 1774 struct lpfc_mbx_sge sge;
1775 struct lpfc_mbx_read_fcf_tbl *read_fcf; 1775 struct lpfc_mbx_read_fcf_tbl *read_fcf;
1776 uint32_t shdr_status, shdr_add_status; 1776 uint32_t shdr_status, shdr_add_status;
1777 union lpfc_sli4_cfg_shdr *shdr; 1777 union lpfc_sli4_cfg_shdr *shdr;
1778 struct fcf_record *new_fcf_record; 1778 struct fcf_record *new_fcf_record;
1779 1779
1780 /* Get the first SGE entry from the non-embedded DMA memory. This 1780 /* Get the first SGE entry from the non-embedded DMA memory. This
1781 * routine only uses a single SGE. 1781 * routine only uses a single SGE.
1782 */ 1782 */
1783 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); 1783 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
1784 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo); 1784 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
1785 if (unlikely(!mboxq->sge_array)) { 1785 if (unlikely(!mboxq->sge_array)) {
1786 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1786 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1787 "2524 Failed to get the non-embedded SGE " 1787 "2524 Failed to get the non-embedded SGE "
1788 "virtual address\n"); 1788 "virtual address\n");
1789 return NULL; 1789 return NULL;
1790 } 1790 }
1791 virt_addr = mboxq->sge_array->addr[0]; 1791 virt_addr = mboxq->sge_array->addr[0];
1792 1792
1793 shdr = (union lpfc_sli4_cfg_shdr *)virt_addr; 1793 shdr = (union lpfc_sli4_cfg_shdr *)virt_addr;
1794 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); 1794 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1795 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); 1795 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
1796 if (shdr_status || shdr_add_status) { 1796 if (shdr_status || shdr_add_status) {
1797 if (shdr_status == STATUS_FCF_TABLE_EMPTY) 1797 if (shdr_status == STATUS_FCF_TABLE_EMPTY)
1798 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 1798 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
1799 "2726 READ_FCF_RECORD Indicates empty " 1799 "2726 READ_FCF_RECORD Indicates empty "
1800 "FCF table.\n"); 1800 "FCF table.\n");
1801 else 1801 else
1802 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 1802 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
1803 "2521 READ_FCF_RECORD mailbox failed " 1803 "2521 READ_FCF_RECORD mailbox failed "
1804 "with status x%x add_status x%x, " 1804 "with status x%x add_status x%x, "
1805 "mbx\n", shdr_status, shdr_add_status); 1805 "mbx\n", shdr_status, shdr_add_status);
1806 return NULL; 1806 return NULL;
1807 } 1807 }
1808 1808
1809 /* Interpreting the returned information of the FCF record */ 1809 /* Interpreting the returned information of the FCF record */
1810 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr; 1810 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
1811 lpfc_sli_pcimem_bcopy(read_fcf, read_fcf, 1811 lpfc_sli_pcimem_bcopy(read_fcf, read_fcf,
1812 sizeof(struct lpfc_mbx_read_fcf_tbl)); 1812 sizeof(struct lpfc_mbx_read_fcf_tbl));
1813 *next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf); 1813 *next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf);
1814 new_fcf_record = (struct fcf_record *)(virt_addr + 1814 new_fcf_record = (struct fcf_record *)(virt_addr +
1815 sizeof(struct lpfc_mbx_read_fcf_tbl)); 1815 sizeof(struct lpfc_mbx_read_fcf_tbl));
1816 lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record, 1816 lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record,
1817 offsetof(struct fcf_record, vlan_bitmap)); 1817 offsetof(struct fcf_record, vlan_bitmap));
1818 new_fcf_record->word137 = le32_to_cpu(new_fcf_record->word137); 1818 new_fcf_record->word137 = le32_to_cpu(new_fcf_record->word137);
1819 new_fcf_record->word138 = le32_to_cpu(new_fcf_record->word138); 1819 new_fcf_record->word138 = le32_to_cpu(new_fcf_record->word138);
1820 1820
1821 return new_fcf_record; 1821 return new_fcf_record;
1822 } 1822 }
1823 1823
1824 /** 1824 /**
1825 * lpfc_sli4_log_fcf_record_info - Log the information of a fcf record 1825 * lpfc_sli4_log_fcf_record_info - Log the information of a fcf record
1826 * @phba: pointer to lpfc hba data structure. 1826 * @phba: pointer to lpfc hba data structure.
1827 * @fcf_record: pointer to the fcf record. 1827 * @fcf_record: pointer to the fcf record.
1828 * @vlan_id: the lowest vlan identifier associated to this fcf record. 1828 * @vlan_id: the lowest vlan identifier associated to this fcf record.
1829 * @next_fcf_index: the index to the next fcf record in hba's fcf table. 1829 * @next_fcf_index: the index to the next fcf record in hba's fcf table.
1830 * 1830 *
1831 * This routine logs the detailed FCF record if the LOG_FIP loggin is 1831 * This routine logs the detailed FCF record if the LOG_FIP loggin is
1832 * enabled. 1832 * enabled.
1833 **/ 1833 **/
1834 static void 1834 static void
1835 lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba, 1835 lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba,
1836 struct fcf_record *fcf_record, 1836 struct fcf_record *fcf_record,
1837 uint16_t vlan_id, 1837 uint16_t vlan_id,
1838 uint16_t next_fcf_index) 1838 uint16_t next_fcf_index)
1839 { 1839 {
1840 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 1840 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1841 "2764 READ_FCF_RECORD:\n" 1841 "2764 READ_FCF_RECORD:\n"
1842 "\tFCF_Index : x%x\n" 1842 "\tFCF_Index : x%x\n"
1843 "\tFCF_Avail : x%x\n" 1843 "\tFCF_Avail : x%x\n"
1844 "\tFCF_Valid : x%x\n" 1844 "\tFCF_Valid : x%x\n"
1845 "\tFIP_Priority : x%x\n" 1845 "\tFIP_Priority : x%x\n"
1846 "\tMAC_Provider : x%x\n" 1846 "\tMAC_Provider : x%x\n"
1847 "\tLowest VLANID : x%x\n" 1847 "\tLowest VLANID : x%x\n"
1848 "\tFCF_MAC Addr : x%x:%x:%x:%x:%x:%x\n" 1848 "\tFCF_MAC Addr : x%x:%x:%x:%x:%x:%x\n"
1849 "\tFabric_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n" 1849 "\tFabric_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1850 "\tSwitch_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n" 1850 "\tSwitch_Name : x%x:%x:%x:%x:%x:%x:%x:%x\n"
1851 "\tNext_FCF_Index: x%x\n", 1851 "\tNext_FCF_Index: x%x\n",
1852 bf_get(lpfc_fcf_record_fcf_index, fcf_record), 1852 bf_get(lpfc_fcf_record_fcf_index, fcf_record),
1853 bf_get(lpfc_fcf_record_fcf_avail, fcf_record), 1853 bf_get(lpfc_fcf_record_fcf_avail, fcf_record),
1854 bf_get(lpfc_fcf_record_fcf_valid, fcf_record), 1854 bf_get(lpfc_fcf_record_fcf_valid, fcf_record),
1855 fcf_record->fip_priority, 1855 fcf_record->fip_priority,
1856 bf_get(lpfc_fcf_record_mac_addr_prov, fcf_record), 1856 bf_get(lpfc_fcf_record_mac_addr_prov, fcf_record),
1857 vlan_id, 1857 vlan_id,
1858 bf_get(lpfc_fcf_record_mac_0, fcf_record), 1858 bf_get(lpfc_fcf_record_mac_0, fcf_record),
1859 bf_get(lpfc_fcf_record_mac_1, fcf_record), 1859 bf_get(lpfc_fcf_record_mac_1, fcf_record),
1860 bf_get(lpfc_fcf_record_mac_2, fcf_record), 1860 bf_get(lpfc_fcf_record_mac_2, fcf_record),
1861 bf_get(lpfc_fcf_record_mac_3, fcf_record), 1861 bf_get(lpfc_fcf_record_mac_3, fcf_record),
1862 bf_get(lpfc_fcf_record_mac_4, fcf_record), 1862 bf_get(lpfc_fcf_record_mac_4, fcf_record),
1863 bf_get(lpfc_fcf_record_mac_5, fcf_record), 1863 bf_get(lpfc_fcf_record_mac_5, fcf_record),
1864 bf_get(lpfc_fcf_record_fab_name_0, fcf_record), 1864 bf_get(lpfc_fcf_record_fab_name_0, fcf_record),
1865 bf_get(lpfc_fcf_record_fab_name_1, fcf_record), 1865 bf_get(lpfc_fcf_record_fab_name_1, fcf_record),
1866 bf_get(lpfc_fcf_record_fab_name_2, fcf_record), 1866 bf_get(lpfc_fcf_record_fab_name_2, fcf_record),
1867 bf_get(lpfc_fcf_record_fab_name_3, fcf_record), 1867 bf_get(lpfc_fcf_record_fab_name_3, fcf_record),
1868 bf_get(lpfc_fcf_record_fab_name_4, fcf_record), 1868 bf_get(lpfc_fcf_record_fab_name_4, fcf_record),
1869 bf_get(lpfc_fcf_record_fab_name_5, fcf_record), 1869 bf_get(lpfc_fcf_record_fab_name_5, fcf_record),
1870 bf_get(lpfc_fcf_record_fab_name_6, fcf_record), 1870 bf_get(lpfc_fcf_record_fab_name_6, fcf_record),
1871 bf_get(lpfc_fcf_record_fab_name_7, fcf_record), 1871 bf_get(lpfc_fcf_record_fab_name_7, fcf_record),
1872 bf_get(lpfc_fcf_record_switch_name_0, fcf_record), 1872 bf_get(lpfc_fcf_record_switch_name_0, fcf_record),
1873 bf_get(lpfc_fcf_record_switch_name_1, fcf_record), 1873 bf_get(lpfc_fcf_record_switch_name_1, fcf_record),
1874 bf_get(lpfc_fcf_record_switch_name_2, fcf_record), 1874 bf_get(lpfc_fcf_record_switch_name_2, fcf_record),
1875 bf_get(lpfc_fcf_record_switch_name_3, fcf_record), 1875 bf_get(lpfc_fcf_record_switch_name_3, fcf_record),
1876 bf_get(lpfc_fcf_record_switch_name_4, fcf_record), 1876 bf_get(lpfc_fcf_record_switch_name_4, fcf_record),
1877 bf_get(lpfc_fcf_record_switch_name_5, fcf_record), 1877 bf_get(lpfc_fcf_record_switch_name_5, fcf_record),
1878 bf_get(lpfc_fcf_record_switch_name_6, fcf_record), 1878 bf_get(lpfc_fcf_record_switch_name_6, fcf_record),
1879 bf_get(lpfc_fcf_record_switch_name_7, fcf_record), 1879 bf_get(lpfc_fcf_record_switch_name_7, fcf_record),
1880 next_fcf_index); 1880 next_fcf_index);
1881 } 1881 }
1882 1882
1883 /** 1883 /**
1884 lpfc_sli4_fcf_record_match - testing new FCF record for matching existing FCF 1884 lpfc_sli4_fcf_record_match - testing new FCF record for matching existing FCF
1885 * @phba: pointer to lpfc hba data structure. 1885 * @phba: pointer to lpfc hba data structure.
1886 * @fcf_rec: pointer to an existing FCF record. 1886 * @fcf_rec: pointer to an existing FCF record.
1887 * @new_fcf_record: pointer to a new FCF record. 1887 * @new_fcf_record: pointer to a new FCF record.
1888 * @new_vlan_id: vlan id from the new FCF record. 1888 * @new_vlan_id: vlan id from the new FCF record.
1889 * 1889 *
1890 * This function performs matching test of a new FCF record against an existing 1890 * This function performs matching test of a new FCF record against an existing
1891 * FCF record. If the new_vlan_id passed in is LPFC_FCOE_IGNORE_VID, vlan id 1891 * FCF record. If the new_vlan_id passed in is LPFC_FCOE_IGNORE_VID, vlan id
1892 * will not be used as part of the FCF record matching criteria. 1892 * will not be used as part of the FCF record matching criteria.
1893 * 1893 *
1894 * Returns true if all the fields matching, otherwise returns false. 1894 * Returns true if all the fields matching, otherwise returns false.
1895 */ 1895 */
1896 static bool 1896 static bool
1897 lpfc_sli4_fcf_record_match(struct lpfc_hba *phba, 1897 lpfc_sli4_fcf_record_match(struct lpfc_hba *phba,
1898 struct lpfc_fcf_rec *fcf_rec, 1898 struct lpfc_fcf_rec *fcf_rec,
1899 struct fcf_record *new_fcf_record, 1899 struct fcf_record *new_fcf_record,
1900 uint16_t new_vlan_id) 1900 uint16_t new_vlan_id)
1901 { 1901 {
1902 if (new_vlan_id != LPFC_FCOE_IGNORE_VID) 1902 if (new_vlan_id != LPFC_FCOE_IGNORE_VID)
1903 if (!lpfc_vlan_id_match(fcf_rec->vlan_id, new_vlan_id)) 1903 if (!lpfc_vlan_id_match(fcf_rec->vlan_id, new_vlan_id))
1904 return false; 1904 return false;
1905 if (!lpfc_mac_addr_match(fcf_rec->mac_addr, new_fcf_record)) 1905 if (!lpfc_mac_addr_match(fcf_rec->mac_addr, new_fcf_record))
1906 return false; 1906 return false;
1907 if (!lpfc_sw_name_match(fcf_rec->switch_name, new_fcf_record)) 1907 if (!lpfc_sw_name_match(fcf_rec->switch_name, new_fcf_record))
1908 return false; 1908 return false;
1909 if (!lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record)) 1909 if (!lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record))
1910 return false; 1910 return false;
1911 if (fcf_rec->priority != new_fcf_record->fip_priority) 1911 if (fcf_rec->priority != new_fcf_record->fip_priority)
1912 return false; 1912 return false;
1913 return true; 1913 return true;
1914 } 1914 }
1915 1915
1916 /** 1916 /**
1917 * lpfc_sli4_fcf_rr_next_proc - processing next roundrobin fcf 1917 * lpfc_sli4_fcf_rr_next_proc - processing next roundrobin fcf
1918 * @vport: Pointer to vport object. 1918 * @vport: Pointer to vport object.
1919 * @fcf_index: index to next fcf. 1919 * @fcf_index: index to next fcf.
1920 * 1920 *
1921 * This function processing the roundrobin fcf failover to next fcf index. 1921 * This function processing the roundrobin fcf failover to next fcf index.
1922 * When this function is invoked, there will be a current fcf registered 1922 * When this function is invoked, there will be a current fcf registered
1923 * for flogi. 1923 * for flogi.
1924 * Return: 0 for continue retrying flogi on currently registered fcf; 1924 * Return: 0 for continue retrying flogi on currently registered fcf;
1925 * 1 for stop flogi on currently registered fcf; 1925 * 1 for stop flogi on currently registered fcf;
1926 */ 1926 */
1927 int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index) 1927 int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index)
1928 { 1928 {
1929 struct lpfc_hba *phba = vport->phba; 1929 struct lpfc_hba *phba = vport->phba;
1930 int rc; 1930 int rc;
1931 1931
1932 if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) { 1932 if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) {
1933 spin_lock_irq(&phba->hbalock); 1933 spin_lock_irq(&phba->hbalock);
1934 if (phba->hba_flag & HBA_DEVLOSS_TMO) { 1934 if (phba->hba_flag & HBA_DEVLOSS_TMO) {
1935 spin_unlock_irq(&phba->hbalock); 1935 spin_unlock_irq(&phba->hbalock);
1936 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 1936 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1937 "2872 Devloss tmo with no eligible " 1937 "2872 Devloss tmo with no eligible "
1938 "FCF, unregister in-use FCF (x%x) " 1938 "FCF, unregister in-use FCF (x%x) "
1939 "and rescan FCF table\n", 1939 "and rescan FCF table\n",
1940 phba->fcf.current_rec.fcf_indx); 1940 phba->fcf.current_rec.fcf_indx);
1941 lpfc_unregister_fcf_rescan(phba); 1941 lpfc_unregister_fcf_rescan(phba);
1942 goto stop_flogi_current_fcf; 1942 goto stop_flogi_current_fcf;
1943 } 1943 }
1944 /* Mark the end to FLOGI roundrobin failover */ 1944 /* Mark the end to FLOGI roundrobin failover */
1945 phba->hba_flag &= ~FCF_RR_INPROG; 1945 phba->hba_flag &= ~FCF_RR_INPROG;
1946 /* Allow action to new fcf asynchronous event */ 1946 /* Allow action to new fcf asynchronous event */
1947 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE); 1947 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
1948 spin_unlock_irq(&phba->hbalock); 1948 spin_unlock_irq(&phba->hbalock);
1949 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 1949 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1950 "2865 No FCF available, stop roundrobin FCF " 1950 "2865 No FCF available, stop roundrobin FCF "
1951 "failover and change port state:x%x/x%x\n", 1951 "failover and change port state:x%x/x%x\n",
1952 phba->pport->port_state, LPFC_VPORT_UNKNOWN); 1952 phba->pport->port_state, LPFC_VPORT_UNKNOWN);
1953 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 1953 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
1954 goto stop_flogi_current_fcf; 1954 goto stop_flogi_current_fcf;
1955 } else { 1955 } else {
1956 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS, 1956 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS,
1957 "2794 Try FLOGI roundrobin FCF failover to " 1957 "2794 Try FLOGI roundrobin FCF failover to "
1958 "(x%x)\n", fcf_index); 1958 "(x%x)\n", fcf_index);
1959 rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, fcf_index); 1959 rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, fcf_index);
1960 if (rc) 1960 if (rc)
1961 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS, 1961 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
1962 "2761 FLOGI roundrobin FCF failover " 1962 "2761 FLOGI roundrobin FCF failover "
1963 "failed (rc:x%x) to read FCF (x%x)\n", 1963 "failed (rc:x%x) to read FCF (x%x)\n",
1964 rc, phba->fcf.current_rec.fcf_indx); 1964 rc, phba->fcf.current_rec.fcf_indx);
1965 else 1965 else
1966 goto stop_flogi_current_fcf; 1966 goto stop_flogi_current_fcf;
1967 } 1967 }
1968 return 0; 1968 return 0;
1969 1969
1970 stop_flogi_current_fcf: 1970 stop_flogi_current_fcf:
1971 lpfc_can_disctmo(vport); 1971 lpfc_can_disctmo(vport);
1972 return 1; 1972 return 1;
1973 } 1973 }
1974 1974
1975 /** 1975 /**
1976 * lpfc_sli4_fcf_pri_list_del 1976 * lpfc_sli4_fcf_pri_list_del
1977 * @phba: pointer to lpfc hba data structure. 1977 * @phba: pointer to lpfc hba data structure.
1978 * @fcf_index the index of the fcf record to delete 1978 * @fcf_index the index of the fcf record to delete
1979 * This routine checks the on list flag of the fcf_index to be deleted. 1979 * This routine checks the on list flag of the fcf_index to be deleted.
1980 * If it is one the list then it is removed from the list, and the flag 1980 * If it is one the list then it is removed from the list, and the flag
1981 * is cleared. This routine grab the hbalock before removing the fcf 1981 * is cleared. This routine grab the hbalock before removing the fcf
1982 * record from the list. 1982 * record from the list.
1983 **/ 1983 **/
1984 static void lpfc_sli4_fcf_pri_list_del(struct lpfc_hba *phba, 1984 static void lpfc_sli4_fcf_pri_list_del(struct lpfc_hba *phba,
1985 uint16_t fcf_index) 1985 uint16_t fcf_index)
1986 { 1986 {
1987 struct lpfc_fcf_pri *new_fcf_pri; 1987 struct lpfc_fcf_pri *new_fcf_pri;
1988 1988
1989 new_fcf_pri = &phba->fcf.fcf_pri[fcf_index]; 1989 new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
1990 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 1990 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
1991 "3058 deleting idx x%x pri x%x flg x%x\n", 1991 "3058 deleting idx x%x pri x%x flg x%x\n",
1992 fcf_index, new_fcf_pri->fcf_rec.priority, 1992 fcf_index, new_fcf_pri->fcf_rec.priority,
1993 new_fcf_pri->fcf_rec.flag); 1993 new_fcf_pri->fcf_rec.flag);
1994 spin_lock_irq(&phba->hbalock); 1994 spin_lock_irq(&phba->hbalock);
1995 if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST) { 1995 if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST) {
1996 if (phba->fcf.current_rec.priority == 1996 if (phba->fcf.current_rec.priority ==
1997 new_fcf_pri->fcf_rec.priority) 1997 new_fcf_pri->fcf_rec.priority)
1998 phba->fcf.eligible_fcf_cnt--; 1998 phba->fcf.eligible_fcf_cnt--;
1999 list_del_init(&new_fcf_pri->list); 1999 list_del_init(&new_fcf_pri->list);
2000 new_fcf_pri->fcf_rec.flag &= ~LPFC_FCF_ON_PRI_LIST; 2000 new_fcf_pri->fcf_rec.flag &= ~LPFC_FCF_ON_PRI_LIST;
2001 } 2001 }
2002 spin_unlock_irq(&phba->hbalock); 2002 spin_unlock_irq(&phba->hbalock);
2003 } 2003 }
2004 2004
2005 /** 2005 /**
2006 * lpfc_sli4_set_fcf_flogi_fail 2006 * lpfc_sli4_set_fcf_flogi_fail
2007 * @phba: pointer to lpfc hba data structure. 2007 * @phba: pointer to lpfc hba data structure.
2008 * @fcf_index the index of the fcf record to update 2008 * @fcf_index the index of the fcf record to update
2009 * This routine acquires the hbalock and then set the LPFC_FCF_FLOGI_FAILED 2009 * This routine acquires the hbalock and then set the LPFC_FCF_FLOGI_FAILED
2010 * flag so the the round robin slection for the particular priority level 2010 * flag so the the round robin slection for the particular priority level
2011 * will try a different fcf record that does not have this bit set. 2011 * will try a different fcf record that does not have this bit set.
2012 * If the fcf record is re-read for any reason this flag is cleared brfore 2012 * If the fcf record is re-read for any reason this flag is cleared brfore
2013 * adding it to the priority list. 2013 * adding it to the priority list.
2014 **/ 2014 **/
2015 void 2015 void
2016 lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *phba, uint16_t fcf_index) 2016 lpfc_sli4_set_fcf_flogi_fail(struct lpfc_hba *phba, uint16_t fcf_index)
2017 { 2017 {
2018 struct lpfc_fcf_pri *new_fcf_pri; 2018 struct lpfc_fcf_pri *new_fcf_pri;
2019 new_fcf_pri = &phba->fcf.fcf_pri[fcf_index]; 2019 new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
2020 spin_lock_irq(&phba->hbalock); 2020 spin_lock_irq(&phba->hbalock);
2021 new_fcf_pri->fcf_rec.flag |= LPFC_FCF_FLOGI_FAILED; 2021 new_fcf_pri->fcf_rec.flag |= LPFC_FCF_FLOGI_FAILED;
2022 spin_unlock_irq(&phba->hbalock); 2022 spin_unlock_irq(&phba->hbalock);
2023 } 2023 }
2024 2024
2025 /** 2025 /**
2026 * lpfc_sli4_fcf_pri_list_add 2026 * lpfc_sli4_fcf_pri_list_add
2027 * @phba: pointer to lpfc hba data structure. 2027 * @phba: pointer to lpfc hba data structure.
2028 * @fcf_index the index of the fcf record to add 2028 * @fcf_index the index of the fcf record to add
2029 * This routine checks the priority of the fcf_index to be added. 2029 * This routine checks the priority of the fcf_index to be added.
2030 * If it is a lower priority than the current head of the fcf_pri list 2030 * If it is a lower priority than the current head of the fcf_pri list
2031 * then it is added to the list in the right order. 2031 * then it is added to the list in the right order.
2032 * If it is the same priority as the current head of the list then it 2032 * If it is the same priority as the current head of the list then it
2033 * is added to the head of the list and its bit in the rr_bmask is set. 2033 * is added to the head of the list and its bit in the rr_bmask is set.
2034 * If the fcf_index to be added is of a higher priority than the current 2034 * If the fcf_index to be added is of a higher priority than the current
2035 * head of the list then the rr_bmask is cleared, its bit is set in the 2035 * head of the list then the rr_bmask is cleared, its bit is set in the
2036 * rr_bmask and it is added to the head of the list. 2036 * rr_bmask and it is added to the head of the list.
2037 * returns: 2037 * returns:
2038 * 0=success 1=failure 2038 * 0=success 1=failure
2039 **/ 2039 **/
2040 int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba *phba, uint16_t fcf_index, 2040 int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba *phba, uint16_t fcf_index,
2041 struct fcf_record *new_fcf_record) 2041 struct fcf_record *new_fcf_record)
2042 { 2042 {
2043 uint16_t current_fcf_pri; 2043 uint16_t current_fcf_pri;
2044 uint16_t last_index; 2044 uint16_t last_index;
2045 struct lpfc_fcf_pri *fcf_pri; 2045 struct lpfc_fcf_pri *fcf_pri;
2046 struct lpfc_fcf_pri *next_fcf_pri; 2046 struct lpfc_fcf_pri *next_fcf_pri;
2047 struct lpfc_fcf_pri *new_fcf_pri; 2047 struct lpfc_fcf_pri *new_fcf_pri;
2048 int ret; 2048 int ret;
2049 2049
2050 new_fcf_pri = &phba->fcf.fcf_pri[fcf_index]; 2050 new_fcf_pri = &phba->fcf.fcf_pri[fcf_index];
2051 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2051 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2052 "3059 adding idx x%x pri x%x flg x%x\n", 2052 "3059 adding idx x%x pri x%x flg x%x\n",
2053 fcf_index, new_fcf_record->fip_priority, 2053 fcf_index, new_fcf_record->fip_priority,
2054 new_fcf_pri->fcf_rec.flag); 2054 new_fcf_pri->fcf_rec.flag);
2055 spin_lock_irq(&phba->hbalock); 2055 spin_lock_irq(&phba->hbalock);
2056 if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST) 2056 if (new_fcf_pri->fcf_rec.flag & LPFC_FCF_ON_PRI_LIST)
2057 list_del_init(&new_fcf_pri->list); 2057 list_del_init(&new_fcf_pri->list);
2058 new_fcf_pri->fcf_rec.fcf_index = fcf_index; 2058 new_fcf_pri->fcf_rec.fcf_index = fcf_index;
2059 new_fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority; 2059 new_fcf_pri->fcf_rec.priority = new_fcf_record->fip_priority;
2060 if (list_empty(&phba->fcf.fcf_pri_list)) { 2060 if (list_empty(&phba->fcf.fcf_pri_list)) {
2061 list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list); 2061 list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
2062 ret = lpfc_sli4_fcf_rr_index_set(phba, 2062 ret = lpfc_sli4_fcf_rr_index_set(phba,
2063 new_fcf_pri->fcf_rec.fcf_index); 2063 new_fcf_pri->fcf_rec.fcf_index);
2064 goto out; 2064 goto out;
2065 } 2065 }
2066 2066
2067 last_index = find_first_bit(phba->fcf.fcf_rr_bmask, 2067 last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
2068 LPFC_SLI4_FCF_TBL_INDX_MAX); 2068 LPFC_SLI4_FCF_TBL_INDX_MAX);
2069 if (last_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { 2069 if (last_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
2070 ret = 0; /* Empty rr list */ 2070 ret = 0; /* Empty rr list */
2071 goto out; 2071 goto out;
2072 } 2072 }
2073 current_fcf_pri = phba->fcf.fcf_pri[last_index].fcf_rec.priority; 2073 current_fcf_pri = phba->fcf.fcf_pri[last_index].fcf_rec.priority;
2074 if (new_fcf_pri->fcf_rec.priority <= current_fcf_pri) { 2074 if (new_fcf_pri->fcf_rec.priority <= current_fcf_pri) {
2075 list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list); 2075 list_add(&new_fcf_pri->list, &phba->fcf.fcf_pri_list);
2076 if (new_fcf_pri->fcf_rec.priority < current_fcf_pri) { 2076 if (new_fcf_pri->fcf_rec.priority < current_fcf_pri) {
2077 memset(phba->fcf.fcf_rr_bmask, 0, 2077 memset(phba->fcf.fcf_rr_bmask, 0,
2078 sizeof(*phba->fcf.fcf_rr_bmask)); 2078 sizeof(*phba->fcf.fcf_rr_bmask));
2079 /* fcfs_at_this_priority_level = 1; */ 2079 /* fcfs_at_this_priority_level = 1; */
2080 phba->fcf.eligible_fcf_cnt = 1; 2080 phba->fcf.eligible_fcf_cnt = 1;
2081 } else 2081 } else
2082 /* fcfs_at_this_priority_level++; */ 2082 /* fcfs_at_this_priority_level++; */
2083 phba->fcf.eligible_fcf_cnt++; 2083 phba->fcf.eligible_fcf_cnt++;
2084 ret = lpfc_sli4_fcf_rr_index_set(phba, 2084 ret = lpfc_sli4_fcf_rr_index_set(phba,
2085 new_fcf_pri->fcf_rec.fcf_index); 2085 new_fcf_pri->fcf_rec.fcf_index);
2086 goto out; 2086 goto out;
2087 } 2087 }
2088 2088
2089 list_for_each_entry_safe(fcf_pri, next_fcf_pri, 2089 list_for_each_entry_safe(fcf_pri, next_fcf_pri,
2090 &phba->fcf.fcf_pri_list, list) { 2090 &phba->fcf.fcf_pri_list, list) {
2091 if (new_fcf_pri->fcf_rec.priority <= 2091 if (new_fcf_pri->fcf_rec.priority <=
2092 fcf_pri->fcf_rec.priority) { 2092 fcf_pri->fcf_rec.priority) {
2093 if (fcf_pri->list.prev == &phba->fcf.fcf_pri_list) 2093 if (fcf_pri->list.prev == &phba->fcf.fcf_pri_list)
2094 list_add(&new_fcf_pri->list, 2094 list_add(&new_fcf_pri->list,
2095 &phba->fcf.fcf_pri_list); 2095 &phba->fcf.fcf_pri_list);
2096 else 2096 else
2097 list_add(&new_fcf_pri->list, 2097 list_add(&new_fcf_pri->list,
2098 &((struct lpfc_fcf_pri *) 2098 &((struct lpfc_fcf_pri *)
2099 fcf_pri->list.prev)->list); 2099 fcf_pri->list.prev)->list);
2100 ret = 0; 2100 ret = 0;
2101 goto out; 2101 goto out;
2102 } else if (fcf_pri->list.next == &phba->fcf.fcf_pri_list 2102 } else if (fcf_pri->list.next == &phba->fcf.fcf_pri_list
2103 || new_fcf_pri->fcf_rec.priority < 2103 || new_fcf_pri->fcf_rec.priority <
2104 next_fcf_pri->fcf_rec.priority) { 2104 next_fcf_pri->fcf_rec.priority) {
2105 list_add(&new_fcf_pri->list, &fcf_pri->list); 2105 list_add(&new_fcf_pri->list, &fcf_pri->list);
2106 ret = 0; 2106 ret = 0;
2107 goto out; 2107 goto out;
2108 } 2108 }
2109 if (new_fcf_pri->fcf_rec.priority > fcf_pri->fcf_rec.priority) 2109 if (new_fcf_pri->fcf_rec.priority > fcf_pri->fcf_rec.priority)
2110 continue; 2110 continue;
2111 2111
2112 } 2112 }
2113 ret = 1; 2113 ret = 1;
2114 out: 2114 out:
2115 /* we use = instead of |= to clear the FLOGI_FAILED flag. */ 2115 /* we use = instead of |= to clear the FLOGI_FAILED flag. */
2116 new_fcf_pri->fcf_rec.flag = LPFC_FCF_ON_PRI_LIST; 2116 new_fcf_pri->fcf_rec.flag = LPFC_FCF_ON_PRI_LIST;
2117 spin_unlock_irq(&phba->hbalock); 2117 spin_unlock_irq(&phba->hbalock);
2118 return ret; 2118 return ret;
2119 } 2119 }
2120 2120
2121 /** 2121 /**
2122 * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler. 2122 * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
2123 * @phba: pointer to lpfc hba data structure. 2123 * @phba: pointer to lpfc hba data structure.
2124 * @mboxq: pointer to mailbox object. 2124 * @mboxq: pointer to mailbox object.
2125 * 2125 *
2126 * This function iterates through all the fcf records available in 2126 * This function iterates through all the fcf records available in
2127 * HBA and chooses the optimal FCF record for discovery. After finding 2127 * HBA and chooses the optimal FCF record for discovery. After finding
2128 * the FCF for discovery it registers the FCF record and kicks start 2128 * the FCF for discovery it registers the FCF record and kicks start
2129 * discovery. 2129 * discovery.
2130 * If FCF_IN_USE flag is set in currently used FCF, the routine tries to 2130 * If FCF_IN_USE flag is set in currently used FCF, the routine tries to
2131 * use an FCF record which matches fabric name and mac address of the 2131 * use an FCF record which matches fabric name and mac address of the
2132 * currently used FCF record. 2132 * currently used FCF record.
2133 * If the driver supports only one FCF, it will try to use the FCF record 2133 * If the driver supports only one FCF, it will try to use the FCF record
2134 * used by BOOT_BIOS. 2134 * used by BOOT_BIOS.
2135 */ 2135 */
2136 void 2136 void
2137 lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 2137 lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2138 { 2138 {
2139 struct fcf_record *new_fcf_record; 2139 struct fcf_record *new_fcf_record;
2140 uint32_t boot_flag, addr_mode; 2140 uint32_t boot_flag, addr_mode;
2141 uint16_t fcf_index, next_fcf_index; 2141 uint16_t fcf_index, next_fcf_index;
2142 struct lpfc_fcf_rec *fcf_rec = NULL; 2142 struct lpfc_fcf_rec *fcf_rec = NULL;
2143 uint16_t vlan_id; 2143 uint16_t vlan_id;
2144 uint32_t seed; 2144 uint32_t seed;
2145 bool select_new_fcf; 2145 bool select_new_fcf;
2146 int rc; 2146 int rc;
2147 2147
2148 /* If there is pending FCoE event restart FCF table scan */ 2148 /* If there is pending FCoE event restart FCF table scan */
2149 if (lpfc_check_pending_fcoe_event(phba, LPFC_SKIP_UNREG_FCF)) { 2149 if (lpfc_check_pending_fcoe_event(phba, LPFC_SKIP_UNREG_FCF)) {
2150 lpfc_sli4_mbox_cmd_free(phba, mboxq); 2150 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2151 return; 2151 return;
2152 } 2152 }
2153 2153
2154 /* Parse the FCF record from the non-embedded mailbox command */ 2154 /* Parse the FCF record from the non-embedded mailbox command */
2155 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, 2155 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
2156 &next_fcf_index); 2156 &next_fcf_index);
2157 if (!new_fcf_record) { 2157 if (!new_fcf_record) {
2158 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 2158 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
2159 "2765 Mailbox command READ_FCF_RECORD " 2159 "2765 Mailbox command READ_FCF_RECORD "
2160 "failed to retrieve a FCF record.\n"); 2160 "failed to retrieve a FCF record.\n");
2161 /* Let next new FCF event trigger fast failover */ 2161 /* Let next new FCF event trigger fast failover */
2162 spin_lock_irq(&phba->hbalock); 2162 spin_lock_irq(&phba->hbalock);
2163 phba->hba_flag &= ~FCF_TS_INPROG; 2163 phba->hba_flag &= ~FCF_TS_INPROG;
2164 spin_unlock_irq(&phba->hbalock); 2164 spin_unlock_irq(&phba->hbalock);
2165 lpfc_sli4_mbox_cmd_free(phba, mboxq); 2165 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2166 return; 2166 return;
2167 } 2167 }
2168 2168
2169 /* Check the FCF record against the connection list */ 2169 /* Check the FCF record against the connection list */
2170 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, 2170 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
2171 &addr_mode, &vlan_id); 2171 &addr_mode, &vlan_id);
2172 2172
2173 /* Log the FCF record information if turned on */ 2173 /* Log the FCF record information if turned on */
2174 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id, 2174 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
2175 next_fcf_index); 2175 next_fcf_index);
2176 2176
2177 /* 2177 /*
2178 * If the fcf record does not match with connect list entries 2178 * If the fcf record does not match with connect list entries
2179 * read the next entry; otherwise, this is an eligible FCF 2179 * read the next entry; otherwise, this is an eligible FCF
2180 * record for roundrobin FCF failover. 2180 * record for roundrobin FCF failover.
2181 */ 2181 */
2182 if (!rc) { 2182 if (!rc) {
2183 lpfc_sli4_fcf_pri_list_del(phba, 2183 lpfc_sli4_fcf_pri_list_del(phba,
2184 bf_get(lpfc_fcf_record_fcf_index, 2184 bf_get(lpfc_fcf_record_fcf_index,
2185 new_fcf_record)); 2185 new_fcf_record));
2186 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 2186 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2187 "2781 FCF (x%x) failed connection " 2187 "2781 FCF (x%x) failed connection "
2188 "list check: (x%x/x%x)\n", 2188 "list check: (x%x/x%x)\n",
2189 bf_get(lpfc_fcf_record_fcf_index, 2189 bf_get(lpfc_fcf_record_fcf_index,
2190 new_fcf_record), 2190 new_fcf_record),
2191 bf_get(lpfc_fcf_record_fcf_avail, 2191 bf_get(lpfc_fcf_record_fcf_avail,
2192 new_fcf_record), 2192 new_fcf_record),
2193 bf_get(lpfc_fcf_record_fcf_valid, 2193 bf_get(lpfc_fcf_record_fcf_valid,
2194 new_fcf_record)); 2194 new_fcf_record));
2195 if ((phba->fcf.fcf_flag & FCF_IN_USE) && 2195 if ((phba->fcf.fcf_flag & FCF_IN_USE) &&
2196 lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec, 2196 lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
2197 new_fcf_record, LPFC_FCOE_IGNORE_VID)) { 2197 new_fcf_record, LPFC_FCOE_IGNORE_VID)) {
2198 if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) != 2198 if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) !=
2199 phba->fcf.current_rec.fcf_indx) { 2199 phba->fcf.current_rec.fcf_indx) {
2200 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 2200 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
2201 "2862 FCF (x%x) matches property " 2201 "2862 FCF (x%x) matches property "
2202 "of in-use FCF (x%x)\n", 2202 "of in-use FCF (x%x)\n",
2203 bf_get(lpfc_fcf_record_fcf_index, 2203 bf_get(lpfc_fcf_record_fcf_index,
2204 new_fcf_record), 2204 new_fcf_record),
2205 phba->fcf.current_rec.fcf_indx); 2205 phba->fcf.current_rec.fcf_indx);
2206 goto read_next_fcf; 2206 goto read_next_fcf;
2207 } 2207 }
2208 /* 2208 /*
2209 * In case the current in-use FCF record becomes 2209 * In case the current in-use FCF record becomes
2210 * invalid/unavailable during FCF discovery that 2210 * invalid/unavailable during FCF discovery that
2211 * was not triggered by fast FCF failover process, 2211 * was not triggered by fast FCF failover process,
2212 * treat it as fast FCF failover. 2212 * treat it as fast FCF failover.
2213 */ 2213 */
2214 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND) && 2214 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND) &&
2215 !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) { 2215 !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
2216 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 2216 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2217 "2835 Invalid in-use FCF " 2217 "2835 Invalid in-use FCF "
2218 "(x%x), enter FCF failover " 2218 "(x%x), enter FCF failover "
2219 "table scan.\n", 2219 "table scan.\n",
2220 phba->fcf.current_rec.fcf_indx); 2220 phba->fcf.current_rec.fcf_indx);
2221 spin_lock_irq(&phba->hbalock); 2221 spin_lock_irq(&phba->hbalock);
2222 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 2222 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
2223 spin_unlock_irq(&phba->hbalock); 2223 spin_unlock_irq(&phba->hbalock);
2224 lpfc_sli4_mbox_cmd_free(phba, mboxq); 2224 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2225 lpfc_sli4_fcf_scan_read_fcf_rec(phba, 2225 lpfc_sli4_fcf_scan_read_fcf_rec(phba,
2226 LPFC_FCOE_FCF_GET_FIRST); 2226 LPFC_FCOE_FCF_GET_FIRST);
2227 return; 2227 return;
2228 } 2228 }
2229 } 2229 }
2230 goto read_next_fcf; 2230 goto read_next_fcf;
2231 } else { 2231 } else {
2232 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); 2232 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
2233 rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, 2233 rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index,
2234 new_fcf_record); 2234 new_fcf_record);
2235 if (rc) 2235 if (rc)
2236 goto read_next_fcf; 2236 goto read_next_fcf;
2237 } 2237 }
2238 2238
2239 /* 2239 /*
2240 * If this is not the first FCF discovery of the HBA, use last 2240 * If this is not the first FCF discovery of the HBA, use last
2241 * FCF record for the discovery. The condition that a rescan 2241 * FCF record for the discovery. The condition that a rescan
2242 * matches the in-use FCF record: fabric name, switch name, mac 2242 * matches the in-use FCF record: fabric name, switch name, mac
2243 * address, and vlan_id. 2243 * address, and vlan_id.
2244 */ 2244 */
2245 spin_lock_irq(&phba->hbalock); 2245 spin_lock_irq(&phba->hbalock);
2246 if (phba->fcf.fcf_flag & FCF_IN_USE) { 2246 if (phba->fcf.fcf_flag & FCF_IN_USE) {
2247 if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV && 2247 if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
2248 lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec, 2248 lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
2249 new_fcf_record, vlan_id)) { 2249 new_fcf_record, vlan_id)) {
2250 if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) == 2250 if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) ==
2251 phba->fcf.current_rec.fcf_indx) { 2251 phba->fcf.current_rec.fcf_indx) {
2252 phba->fcf.fcf_flag |= FCF_AVAILABLE; 2252 phba->fcf.fcf_flag |= FCF_AVAILABLE;
2253 if (phba->fcf.fcf_flag & FCF_REDISC_PEND) 2253 if (phba->fcf.fcf_flag & FCF_REDISC_PEND)
2254 /* Stop FCF redisc wait timer */ 2254 /* Stop FCF redisc wait timer */
2255 __lpfc_sli4_stop_fcf_redisc_wait_timer( 2255 __lpfc_sli4_stop_fcf_redisc_wait_timer(
2256 phba); 2256 phba);
2257 else if (phba->fcf.fcf_flag & FCF_REDISC_FOV) 2257 else if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
2258 /* Fast failover, mark completed */ 2258 /* Fast failover, mark completed */
2259 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; 2259 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
2260 spin_unlock_irq(&phba->hbalock); 2260 spin_unlock_irq(&phba->hbalock);
2261 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2261 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2262 "2836 New FCF matches in-use " 2262 "2836 New FCF matches in-use "
2263 "FCF (x%x)\n", 2263 "FCF (x%x)\n",
2264 phba->fcf.current_rec.fcf_indx); 2264 phba->fcf.current_rec.fcf_indx);
2265 goto out; 2265 goto out;
2266 } else 2266 } else
2267 lpfc_printf_log(phba, KERN_ERR, LOG_FIP, 2267 lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
2268 "2863 New FCF (x%x) matches " 2268 "2863 New FCF (x%x) matches "
2269 "property of in-use FCF (x%x)\n", 2269 "property of in-use FCF (x%x)\n",
2270 bf_get(lpfc_fcf_record_fcf_index, 2270 bf_get(lpfc_fcf_record_fcf_index,
2271 new_fcf_record), 2271 new_fcf_record),
2272 phba->fcf.current_rec.fcf_indx); 2272 phba->fcf.current_rec.fcf_indx);
2273 } 2273 }
2274 /* 2274 /*
2275 * Read next FCF record from HBA searching for the matching 2275 * Read next FCF record from HBA searching for the matching
2276 * with in-use record only if not during the fast failover 2276 * with in-use record only if not during the fast failover
2277 * period. In case of fast failover period, it shall try to 2277 * period. In case of fast failover period, it shall try to
2278 * determine whether the FCF record just read should be the 2278 * determine whether the FCF record just read should be the
2279 * next candidate. 2279 * next candidate.
2280 */ 2280 */
2281 if (!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) { 2281 if (!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
2282 spin_unlock_irq(&phba->hbalock); 2282 spin_unlock_irq(&phba->hbalock);
2283 goto read_next_fcf; 2283 goto read_next_fcf;
2284 } 2284 }
2285 } 2285 }
2286 /* 2286 /*
2287 * Update on failover FCF record only if it's in FCF fast-failover 2287 * Update on failover FCF record only if it's in FCF fast-failover
2288 * period; otherwise, update on current FCF record. 2288 * period; otherwise, update on current FCF record.
2289 */ 2289 */
2290 if (phba->fcf.fcf_flag & FCF_REDISC_FOV) 2290 if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
2291 fcf_rec = &phba->fcf.failover_rec; 2291 fcf_rec = &phba->fcf.failover_rec;
2292 else 2292 else
2293 fcf_rec = &phba->fcf.current_rec; 2293 fcf_rec = &phba->fcf.current_rec;
2294 2294
2295 if (phba->fcf.fcf_flag & FCF_AVAILABLE) { 2295 if (phba->fcf.fcf_flag & FCF_AVAILABLE) {
2296 /* 2296 /*
2297 * If the driver FCF record does not have boot flag 2297 * If the driver FCF record does not have boot flag
2298 * set and new hba fcf record has boot flag set, use 2298 * set and new hba fcf record has boot flag set, use
2299 * the new hba fcf record. 2299 * the new hba fcf record.
2300 */ 2300 */
2301 if (boot_flag && !(fcf_rec->flag & BOOT_ENABLE)) { 2301 if (boot_flag && !(fcf_rec->flag & BOOT_ENABLE)) {
2302 /* Choose this FCF record */ 2302 /* Choose this FCF record */
2303 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2303 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2304 "2837 Update current FCF record " 2304 "2837 Update current FCF record "
2305 "(x%x) with new FCF record (x%x)\n", 2305 "(x%x) with new FCF record (x%x)\n",
2306 fcf_rec->fcf_indx, 2306 fcf_rec->fcf_indx,
2307 bf_get(lpfc_fcf_record_fcf_index, 2307 bf_get(lpfc_fcf_record_fcf_index,
2308 new_fcf_record)); 2308 new_fcf_record));
2309 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, 2309 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
2310 addr_mode, vlan_id, BOOT_ENABLE); 2310 addr_mode, vlan_id, BOOT_ENABLE);
2311 spin_unlock_irq(&phba->hbalock); 2311 spin_unlock_irq(&phba->hbalock);
2312 goto read_next_fcf; 2312 goto read_next_fcf;
2313 } 2313 }
2314 /* 2314 /*
2315 * If the driver FCF record has boot flag set and the 2315 * If the driver FCF record has boot flag set and the
2316 * new hba FCF record does not have boot flag, read 2316 * new hba FCF record does not have boot flag, read
2317 * the next FCF record. 2317 * the next FCF record.
2318 */ 2318 */
2319 if (!boot_flag && (fcf_rec->flag & BOOT_ENABLE)) { 2319 if (!boot_flag && (fcf_rec->flag & BOOT_ENABLE)) {
2320 spin_unlock_irq(&phba->hbalock); 2320 spin_unlock_irq(&phba->hbalock);
2321 goto read_next_fcf; 2321 goto read_next_fcf;
2322 } 2322 }
2323 /* 2323 /*
2324 * If the new hba FCF record has lower priority value 2324 * If the new hba FCF record has lower priority value
2325 * than the driver FCF record, use the new record. 2325 * than the driver FCF record, use the new record.
2326 */ 2326 */
2327 if (new_fcf_record->fip_priority < fcf_rec->priority) { 2327 if (new_fcf_record->fip_priority < fcf_rec->priority) {
2328 /* Choose the new FCF record with lower priority */ 2328 /* Choose the new FCF record with lower priority */
2329 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2329 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2330 "2838 Update current FCF record " 2330 "2838 Update current FCF record "
2331 "(x%x) with new FCF record (x%x)\n", 2331 "(x%x) with new FCF record (x%x)\n",
2332 fcf_rec->fcf_indx, 2332 fcf_rec->fcf_indx,
2333 bf_get(lpfc_fcf_record_fcf_index, 2333 bf_get(lpfc_fcf_record_fcf_index,
2334 new_fcf_record)); 2334 new_fcf_record));
2335 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, 2335 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
2336 addr_mode, vlan_id, 0); 2336 addr_mode, vlan_id, 0);
2337 /* Reset running random FCF selection count */ 2337 /* Reset running random FCF selection count */
2338 phba->fcf.eligible_fcf_cnt = 1; 2338 phba->fcf.eligible_fcf_cnt = 1;
2339 } else if (new_fcf_record->fip_priority == fcf_rec->priority) { 2339 } else if (new_fcf_record->fip_priority == fcf_rec->priority) {
2340 /* Update running random FCF selection count */ 2340 /* Update running random FCF selection count */
2341 phba->fcf.eligible_fcf_cnt++; 2341 phba->fcf.eligible_fcf_cnt++;
2342 select_new_fcf = lpfc_sli4_new_fcf_random_select(phba, 2342 select_new_fcf = lpfc_sli4_new_fcf_random_select(phba,
2343 phba->fcf.eligible_fcf_cnt); 2343 phba->fcf.eligible_fcf_cnt);
2344 if (select_new_fcf) { 2344 if (select_new_fcf) {
2345 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2345 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2346 "2839 Update current FCF record " 2346 "2839 Update current FCF record "
2347 "(x%x) with new FCF record (x%x)\n", 2347 "(x%x) with new FCF record (x%x)\n",
2348 fcf_rec->fcf_indx, 2348 fcf_rec->fcf_indx,
2349 bf_get(lpfc_fcf_record_fcf_index, 2349 bf_get(lpfc_fcf_record_fcf_index,
2350 new_fcf_record)); 2350 new_fcf_record));
2351 /* Choose the new FCF by random selection */ 2351 /* Choose the new FCF by random selection */
2352 __lpfc_update_fcf_record(phba, fcf_rec, 2352 __lpfc_update_fcf_record(phba, fcf_rec,
2353 new_fcf_record, 2353 new_fcf_record,
2354 addr_mode, vlan_id, 0); 2354 addr_mode, vlan_id, 0);
2355 } 2355 }
2356 } 2356 }
2357 spin_unlock_irq(&phba->hbalock); 2357 spin_unlock_irq(&phba->hbalock);
2358 goto read_next_fcf; 2358 goto read_next_fcf;
2359 } 2359 }
2360 /* 2360 /*
2361 * This is the first suitable FCF record, choose this record for 2361 * This is the first suitable FCF record, choose this record for
2362 * initial best-fit FCF. 2362 * initial best-fit FCF.
2363 */ 2363 */
2364 if (fcf_rec) { 2364 if (fcf_rec) {
2365 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2365 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2366 "2840 Update initial FCF candidate " 2366 "2840 Update initial FCF candidate "
2367 "with FCF (x%x)\n", 2367 "with FCF (x%x)\n",
2368 bf_get(lpfc_fcf_record_fcf_index, 2368 bf_get(lpfc_fcf_record_fcf_index,
2369 new_fcf_record)); 2369 new_fcf_record));
2370 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record, 2370 __lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
2371 addr_mode, vlan_id, (boot_flag ? 2371 addr_mode, vlan_id, (boot_flag ?
2372 BOOT_ENABLE : 0)); 2372 BOOT_ENABLE : 0));
2373 phba->fcf.fcf_flag |= FCF_AVAILABLE; 2373 phba->fcf.fcf_flag |= FCF_AVAILABLE;
2374 /* Setup initial running random FCF selection count */ 2374 /* Setup initial running random FCF selection count */
2375 phba->fcf.eligible_fcf_cnt = 1; 2375 phba->fcf.eligible_fcf_cnt = 1;
2376 /* Seeding the random number generator for random selection */ 2376 /* Seeding the random number generator for random selection */
2377 seed = (uint32_t)(0xFFFFFFFF & jiffies); 2377 seed = (uint32_t)(0xFFFFFFFF & jiffies);
2378 srandom32(seed); 2378 srandom32(seed);
2379 } 2379 }
2380 spin_unlock_irq(&phba->hbalock); 2380 spin_unlock_irq(&phba->hbalock);
2381 goto read_next_fcf; 2381 goto read_next_fcf;
2382 2382
2383 read_next_fcf: 2383 read_next_fcf:
2384 lpfc_sli4_mbox_cmd_free(phba, mboxq); 2384 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2385 if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0) { 2385 if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0) {
2386 if (phba->fcf.fcf_flag & FCF_REDISC_FOV) { 2386 if (phba->fcf.fcf_flag & FCF_REDISC_FOV) {
2387 /* 2387 /*
2388 * Case of FCF fast failover scan 2388 * Case of FCF fast failover scan
2389 */ 2389 */
2390 2390
2391 /* 2391 /*
2392 * It has not found any suitable FCF record, cancel 2392 * It has not found any suitable FCF record, cancel
2393 * FCF scan inprogress, and do nothing 2393 * FCF scan inprogress, and do nothing
2394 */ 2394 */
2395 if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) { 2395 if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) {
2396 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 2396 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2397 "2782 No suitable FCF found: " 2397 "2782 No suitable FCF found: "
2398 "(x%x/x%x)\n", 2398 "(x%x/x%x)\n",
2399 phba->fcoe_eventtag_at_fcf_scan, 2399 phba->fcoe_eventtag_at_fcf_scan,
2400 bf_get(lpfc_fcf_record_fcf_index, 2400 bf_get(lpfc_fcf_record_fcf_index,
2401 new_fcf_record)); 2401 new_fcf_record));
2402 spin_lock_irq(&phba->hbalock); 2402 spin_lock_irq(&phba->hbalock);
2403 if (phba->hba_flag & HBA_DEVLOSS_TMO) { 2403 if (phba->hba_flag & HBA_DEVLOSS_TMO) {
2404 phba->hba_flag &= ~FCF_TS_INPROG; 2404 phba->hba_flag &= ~FCF_TS_INPROG;
2405 spin_unlock_irq(&phba->hbalock); 2405 spin_unlock_irq(&phba->hbalock);
2406 /* Unregister in-use FCF and rescan */ 2406 /* Unregister in-use FCF and rescan */
2407 lpfc_printf_log(phba, KERN_INFO, 2407 lpfc_printf_log(phba, KERN_INFO,
2408 LOG_FIP, 2408 LOG_FIP,
2409 "2864 On devloss tmo " 2409 "2864 On devloss tmo "
2410 "unreg in-use FCF and " 2410 "unreg in-use FCF and "
2411 "rescan FCF table\n"); 2411 "rescan FCF table\n");
2412 lpfc_unregister_fcf_rescan(phba); 2412 lpfc_unregister_fcf_rescan(phba);
2413 return; 2413 return;
2414 } 2414 }
2415 /* 2415 /*
2416 * Let next new FCF event trigger fast failover 2416 * Let next new FCF event trigger fast failover
2417 */ 2417 */
2418 phba->hba_flag &= ~FCF_TS_INPROG; 2418 phba->hba_flag &= ~FCF_TS_INPROG;
2419 spin_unlock_irq(&phba->hbalock); 2419 spin_unlock_irq(&phba->hbalock);
2420 return; 2420 return;
2421 } 2421 }
2422 /* 2422 /*
2423 * It has found a suitable FCF record that is not 2423 * It has found a suitable FCF record that is not
2424 * the same as in-use FCF record, unregister the 2424 * the same as in-use FCF record, unregister the
2425 * in-use FCF record, replace the in-use FCF record 2425 * in-use FCF record, replace the in-use FCF record
2426 * with the new FCF record, mark FCF fast failover 2426 * with the new FCF record, mark FCF fast failover
2427 * completed, and then start register the new FCF 2427 * completed, and then start register the new FCF
2428 * record. 2428 * record.
2429 */ 2429 */
2430 2430
2431 /* Unregister the current in-use FCF record */ 2431 /* Unregister the current in-use FCF record */
2432 lpfc_unregister_fcf(phba); 2432 lpfc_unregister_fcf(phba);
2433 2433
2434 /* Replace in-use record with the new record */ 2434 /* Replace in-use record with the new record */
2435 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2435 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2436 "2842 Replace in-use FCF (x%x) " 2436 "2842 Replace in-use FCF (x%x) "
2437 "with failover FCF (x%x)\n", 2437 "with failover FCF (x%x)\n",
2438 phba->fcf.current_rec.fcf_indx, 2438 phba->fcf.current_rec.fcf_indx,
2439 phba->fcf.failover_rec.fcf_indx); 2439 phba->fcf.failover_rec.fcf_indx);
2440 memcpy(&phba->fcf.current_rec, 2440 memcpy(&phba->fcf.current_rec,
2441 &phba->fcf.failover_rec, 2441 &phba->fcf.failover_rec,
2442 sizeof(struct lpfc_fcf_rec)); 2442 sizeof(struct lpfc_fcf_rec));
2443 /* 2443 /*
2444 * Mark the fast FCF failover rediscovery completed 2444 * Mark the fast FCF failover rediscovery completed
2445 * and the start of the first round of the roundrobin 2445 * and the start of the first round of the roundrobin
2446 * FCF failover. 2446 * FCF failover.
2447 */ 2447 */
2448 spin_lock_irq(&phba->hbalock); 2448 spin_lock_irq(&phba->hbalock);
2449 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV; 2449 phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
2450 spin_unlock_irq(&phba->hbalock); 2450 spin_unlock_irq(&phba->hbalock);
2451 /* Register to the new FCF record */ 2451 /* Register to the new FCF record */
2452 lpfc_register_fcf(phba); 2452 lpfc_register_fcf(phba);
2453 } else { 2453 } else {
2454 /* 2454 /*
2455 * In case of transaction period to fast FCF failover, 2455 * In case of transaction period to fast FCF failover,
2456 * do nothing when search to the end of the FCF table. 2456 * do nothing when search to the end of the FCF table.
2457 */ 2457 */
2458 if ((phba->fcf.fcf_flag & FCF_REDISC_EVT) || 2458 if ((phba->fcf.fcf_flag & FCF_REDISC_EVT) ||
2459 (phba->fcf.fcf_flag & FCF_REDISC_PEND)) 2459 (phba->fcf.fcf_flag & FCF_REDISC_PEND))
2460 return; 2460 return;
2461 2461
2462 if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV && 2462 if (phba->cfg_fcf_failover_policy == LPFC_FCF_FOV &&
2463 phba->fcf.fcf_flag & FCF_IN_USE) { 2463 phba->fcf.fcf_flag & FCF_IN_USE) {
2464 /* 2464 /*
2465 * In case the current in-use FCF record no 2465 * In case the current in-use FCF record no
2466 * longer existed during FCF discovery that 2466 * longer existed during FCF discovery that
2467 * was not triggered by fast FCF failover 2467 * was not triggered by fast FCF failover
2468 * process, treat it as fast FCF failover. 2468 * process, treat it as fast FCF failover.
2469 */ 2469 */
2470 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2470 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2471 "2841 In-use FCF record (x%x) " 2471 "2841 In-use FCF record (x%x) "
2472 "not reported, entering fast " 2472 "not reported, entering fast "
2473 "FCF failover mode scanning.\n", 2473 "FCF failover mode scanning.\n",
2474 phba->fcf.current_rec.fcf_indx); 2474 phba->fcf.current_rec.fcf_indx);
2475 spin_lock_irq(&phba->hbalock); 2475 spin_lock_irq(&phba->hbalock);
2476 phba->fcf.fcf_flag |= FCF_REDISC_FOV; 2476 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
2477 spin_unlock_irq(&phba->hbalock); 2477 spin_unlock_irq(&phba->hbalock);
2478 lpfc_sli4_fcf_scan_read_fcf_rec(phba, 2478 lpfc_sli4_fcf_scan_read_fcf_rec(phba,
2479 LPFC_FCOE_FCF_GET_FIRST); 2479 LPFC_FCOE_FCF_GET_FIRST);
2480 return; 2480 return;
2481 } 2481 }
2482 /* Register to the new FCF record */ 2482 /* Register to the new FCF record */
2483 lpfc_register_fcf(phba); 2483 lpfc_register_fcf(phba);
2484 } 2484 }
2485 } else 2485 } else
2486 lpfc_sli4_fcf_scan_read_fcf_rec(phba, next_fcf_index); 2486 lpfc_sli4_fcf_scan_read_fcf_rec(phba, next_fcf_index);
2487 return; 2487 return;
2488 2488
2489 out: 2489 out:
2490 lpfc_sli4_mbox_cmd_free(phba, mboxq); 2490 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2491 lpfc_register_fcf(phba); 2491 lpfc_register_fcf(phba);
2492 2492
2493 return; 2493 return;
2494 } 2494 }
2495 2495
2496 /** 2496 /**
2497 * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf roundrobin read_fcf mbox cmpl hdler 2497 * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf roundrobin read_fcf mbox cmpl hdler
2498 * @phba: pointer to lpfc hba data structure. 2498 * @phba: pointer to lpfc hba data structure.
2499 * @mboxq: pointer to mailbox object. 2499 * @mboxq: pointer to mailbox object.
2500 * 2500 *
2501 * This is the callback function for FLOGI failure roundrobin FCF failover 2501 * This is the callback function for FLOGI failure roundrobin FCF failover
2502 * read FCF record mailbox command from the eligible FCF record bmask for 2502 * read FCF record mailbox command from the eligible FCF record bmask for
2503 * performing the failover. If the FCF read back is not valid/available, it 2503 * performing the failover. If the FCF read back is not valid/available, it
2504 * fails through to retrying FLOGI to the currently registered FCF again. 2504 * fails through to retrying FLOGI to the currently registered FCF again.
2505 * Otherwise, if the FCF read back is valid and available, it will set the 2505 * Otherwise, if the FCF read back is valid and available, it will set the
2506 * newly read FCF record to the failover FCF record, unregister currently 2506 * newly read FCF record to the failover FCF record, unregister currently
2507 * registered FCF record, copy the failover FCF record to the current 2507 * registered FCF record, copy the failover FCF record to the current
2508 * FCF record, and then register the current FCF record before proceeding 2508 * FCF record, and then register the current FCF record before proceeding
2509 * to trying FLOGI on the new failover FCF. 2509 * to trying FLOGI on the new failover FCF.
2510 */ 2510 */
2511 void 2511 void
2512 lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 2512 lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2513 { 2513 {
2514 struct fcf_record *new_fcf_record; 2514 struct fcf_record *new_fcf_record;
2515 uint32_t boot_flag, addr_mode; 2515 uint32_t boot_flag, addr_mode;
2516 uint16_t next_fcf_index, fcf_index; 2516 uint16_t next_fcf_index, fcf_index;
2517 uint16_t current_fcf_index; 2517 uint16_t current_fcf_index;
2518 uint16_t vlan_id; 2518 uint16_t vlan_id;
2519 int rc; 2519 int rc;
2520 2520
2521 /* If link state is not up, stop the roundrobin failover process */ 2521 /* If link state is not up, stop the roundrobin failover process */
2522 if (phba->link_state < LPFC_LINK_UP) { 2522 if (phba->link_state < LPFC_LINK_UP) {
2523 spin_lock_irq(&phba->hbalock); 2523 spin_lock_irq(&phba->hbalock);
2524 phba->fcf.fcf_flag &= ~FCF_DISCOVERY; 2524 phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
2525 phba->hba_flag &= ~FCF_RR_INPROG; 2525 phba->hba_flag &= ~FCF_RR_INPROG;
2526 spin_unlock_irq(&phba->hbalock); 2526 spin_unlock_irq(&phba->hbalock);
2527 goto out; 2527 goto out;
2528 } 2528 }
2529 2529
2530 /* Parse the FCF record from the non-embedded mailbox command */ 2530 /* Parse the FCF record from the non-embedded mailbox command */
2531 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, 2531 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
2532 &next_fcf_index); 2532 &next_fcf_index);
2533 if (!new_fcf_record) { 2533 if (!new_fcf_record) {
2534 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP, 2534 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
2535 "2766 Mailbox command READ_FCF_RECORD " 2535 "2766 Mailbox command READ_FCF_RECORD "
2536 "failed to retrieve a FCF record.\n"); 2536 "failed to retrieve a FCF record.\n");
2537 goto error_out; 2537 goto error_out;
2538 } 2538 }
2539 2539
2540 /* Get the needed parameters from FCF record */ 2540 /* Get the needed parameters from FCF record */
2541 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, 2541 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
2542 &addr_mode, &vlan_id); 2542 &addr_mode, &vlan_id);
2543 2543
2544 /* Log the FCF record information if turned on */ 2544 /* Log the FCF record information if turned on */
2545 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id, 2545 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
2546 next_fcf_index); 2546 next_fcf_index);
2547 2547
2548 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); 2548 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
2549 if (!rc) { 2549 if (!rc) {
2550 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2550 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2551 "2848 Remove ineligible FCF (x%x) from " 2551 "2848 Remove ineligible FCF (x%x) from "
2552 "from roundrobin bmask\n", fcf_index); 2552 "from roundrobin bmask\n", fcf_index);
2553 /* Clear roundrobin bmask bit for ineligible FCF */ 2553 /* Clear roundrobin bmask bit for ineligible FCF */
2554 lpfc_sli4_fcf_rr_index_clear(phba, fcf_index); 2554 lpfc_sli4_fcf_rr_index_clear(phba, fcf_index);
2555 /* Perform next round of roundrobin FCF failover */ 2555 /* Perform next round of roundrobin FCF failover */
2556 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba); 2556 fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
2557 rc = lpfc_sli4_fcf_rr_next_proc(phba->pport, fcf_index); 2557 rc = lpfc_sli4_fcf_rr_next_proc(phba->pport, fcf_index);
2558 if (rc) 2558 if (rc)
2559 goto out; 2559 goto out;
2560 goto error_out; 2560 goto error_out;
2561 } 2561 }
2562 2562
2563 if (fcf_index == phba->fcf.current_rec.fcf_indx) { 2563 if (fcf_index == phba->fcf.current_rec.fcf_indx) {
2564 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2564 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2565 "2760 Perform FLOGI roundrobin FCF failover: " 2565 "2760 Perform FLOGI roundrobin FCF failover: "
2566 "FCF (x%x) back to FCF (x%x)\n", 2566 "FCF (x%x) back to FCF (x%x)\n",
2567 phba->fcf.current_rec.fcf_indx, fcf_index); 2567 phba->fcf.current_rec.fcf_indx, fcf_index);
2568 /* Wait 500 ms before retrying FLOGI to current FCF */ 2568 /* Wait 500 ms before retrying FLOGI to current FCF */
2569 msleep(500); 2569 msleep(500);
2570 lpfc_issue_init_vfi(phba->pport); 2570 lpfc_issue_init_vfi(phba->pport);
2571 goto out; 2571 goto out;
2572 } 2572 }
2573 2573
2574 /* Upload new FCF record to the failover FCF record */ 2574 /* Upload new FCF record to the failover FCF record */
2575 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2575 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2576 "2834 Update current FCF (x%x) with new FCF (x%x)\n", 2576 "2834 Update current FCF (x%x) with new FCF (x%x)\n",
2577 phba->fcf.failover_rec.fcf_indx, fcf_index); 2577 phba->fcf.failover_rec.fcf_indx, fcf_index);
2578 spin_lock_irq(&phba->hbalock); 2578 spin_lock_irq(&phba->hbalock);
2579 __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec, 2579 __lpfc_update_fcf_record(phba, &phba->fcf.failover_rec,
2580 new_fcf_record, addr_mode, vlan_id, 2580 new_fcf_record, addr_mode, vlan_id,
2581 (boot_flag ? BOOT_ENABLE : 0)); 2581 (boot_flag ? BOOT_ENABLE : 0));
2582 spin_unlock_irq(&phba->hbalock); 2582 spin_unlock_irq(&phba->hbalock);
2583 2583
2584 current_fcf_index = phba->fcf.current_rec.fcf_indx; 2584 current_fcf_index = phba->fcf.current_rec.fcf_indx;
2585 2585
2586 /* Unregister the current in-use FCF record */ 2586 /* Unregister the current in-use FCF record */
2587 lpfc_unregister_fcf(phba); 2587 lpfc_unregister_fcf(phba);
2588 2588
2589 /* Replace in-use record with the new record */ 2589 /* Replace in-use record with the new record */
2590 memcpy(&phba->fcf.current_rec, &phba->fcf.failover_rec, 2590 memcpy(&phba->fcf.current_rec, &phba->fcf.failover_rec,
2591 sizeof(struct lpfc_fcf_rec)); 2591 sizeof(struct lpfc_fcf_rec));
2592 2592
2593 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2593 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2594 "2783 Perform FLOGI roundrobin FCF failover: FCF " 2594 "2783 Perform FLOGI roundrobin FCF failover: FCF "
2595 "(x%x) to FCF (x%x)\n", current_fcf_index, fcf_index); 2595 "(x%x) to FCF (x%x)\n", current_fcf_index, fcf_index);
2596 2596
2597 error_out: 2597 error_out:
2598 lpfc_register_fcf(phba); 2598 lpfc_register_fcf(phba);
2599 out: 2599 out:
2600 lpfc_sli4_mbox_cmd_free(phba, mboxq); 2600 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2601 } 2601 }
2602 2602
2603 /** 2603 /**
2604 * lpfc_mbx_cmpl_read_fcf_rec - read fcf completion handler. 2604 * lpfc_mbx_cmpl_read_fcf_rec - read fcf completion handler.
2605 * @phba: pointer to lpfc hba data structure. 2605 * @phba: pointer to lpfc hba data structure.
2606 * @mboxq: pointer to mailbox object. 2606 * @mboxq: pointer to mailbox object.
2607 * 2607 *
2608 * This is the callback function of read FCF record mailbox command for 2608 * This is the callback function of read FCF record mailbox command for
2609 * updating the eligible FCF bmask for FLOGI failure roundrobin FCF 2609 * updating the eligible FCF bmask for FLOGI failure roundrobin FCF
2610 * failover when a new FCF event happened. If the FCF read back is 2610 * failover when a new FCF event happened. If the FCF read back is
2611 * valid/available and it passes the connection list check, it updates 2611 * valid/available and it passes the connection list check, it updates
2612 * the bmask for the eligible FCF record for roundrobin failover. 2612 * the bmask for the eligible FCF record for roundrobin failover.
2613 */ 2613 */
2614 void 2614 void
2615 lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 2615 lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2616 { 2616 {
2617 struct fcf_record *new_fcf_record; 2617 struct fcf_record *new_fcf_record;
2618 uint32_t boot_flag, addr_mode; 2618 uint32_t boot_flag, addr_mode;
2619 uint16_t fcf_index, next_fcf_index; 2619 uint16_t fcf_index, next_fcf_index;
2620 uint16_t vlan_id; 2620 uint16_t vlan_id;
2621 int rc; 2621 int rc;
2622 2622
2623 /* If link state is not up, no need to proceed */ 2623 /* If link state is not up, no need to proceed */
2624 if (phba->link_state < LPFC_LINK_UP) 2624 if (phba->link_state < LPFC_LINK_UP)
2625 goto out; 2625 goto out;
2626 2626
2627 /* If FCF discovery period is over, no need to proceed */ 2627 /* If FCF discovery period is over, no need to proceed */
2628 if (!(phba->fcf.fcf_flag & FCF_DISCOVERY)) 2628 if (!(phba->fcf.fcf_flag & FCF_DISCOVERY))
2629 goto out; 2629 goto out;
2630 2630
2631 /* Parse the FCF record from the non-embedded mailbox command */ 2631 /* Parse the FCF record from the non-embedded mailbox command */
2632 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq, 2632 new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
2633 &next_fcf_index); 2633 &next_fcf_index);
2634 if (!new_fcf_record) { 2634 if (!new_fcf_record) {
2635 lpfc_printf_log(phba, KERN_INFO, LOG_FIP, 2635 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
2636 "2767 Mailbox command READ_FCF_RECORD " 2636 "2767 Mailbox command READ_FCF_RECORD "
2637 "failed to retrieve a FCF record.\n"); 2637 "failed to retrieve a FCF record.\n");
2638 goto out; 2638 goto out;
2639 } 2639 }
2640 2640
2641 /* Check the connection list for eligibility */ 2641 /* Check the connection list for eligibility */
2642 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag, 2642 rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
2643 &addr_mode, &vlan_id); 2643 &addr_mode, &vlan_id);
2644 2644
2645 /* Log the FCF record information if turned on */ 2645 /* Log the FCF record information if turned on */
2646 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id, 2646 lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
2647 next_fcf_index); 2647 next_fcf_index);
2648 2648
2649 if (!rc) 2649 if (!rc)
2650 goto out; 2650 goto out;
2651 2651
2652 /* Update the eligible FCF record index bmask */ 2652 /* Update the eligible FCF record index bmask */
2653 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record); 2653 fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
2654 2654
2655 rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, new_fcf_record); 2655 rc = lpfc_sli4_fcf_pri_list_add(phba, fcf_index, new_fcf_record);
2656 2656
2657 out: 2657 out:
2658 lpfc_sli4_mbox_cmd_free(phba, mboxq); 2658 lpfc_sli4_mbox_cmd_free(phba, mboxq);
2659 } 2659 }
2660 2660
2661 /** 2661 /**
2662 * lpfc_init_vfi_cmpl - Completion handler for init_vfi mbox command. 2662 * lpfc_init_vfi_cmpl - Completion handler for init_vfi mbox command.
2663 * @phba: pointer to lpfc hba data structure. 2663 * @phba: pointer to lpfc hba data structure.
2664 * @mboxq: pointer to mailbox data structure. 2664 * @mboxq: pointer to mailbox data structure.
2665 * 2665 *
2666 * This function handles completion of init vfi mailbox command. 2666 * This function handles completion of init vfi mailbox command.
2667 */ 2667 */
2668 void 2668 void
2669 lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 2669 lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2670 { 2670 {
2671 struct lpfc_vport *vport = mboxq->vport; 2671 struct lpfc_vport *vport = mboxq->vport;
2672 2672
2673 /* 2673 /*
2674 * VFI not supported on interface type 0, just do the flogi 2674 * VFI not supported on interface type 0, just do the flogi
2675 * Also continue if the VFI is in use - just use the same one. 2675 * Also continue if the VFI is in use - just use the same one.
2676 */ 2676 */
2677 if (mboxq->u.mb.mbxStatus && 2677 if (mboxq->u.mb.mbxStatus &&
2678 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 2678 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
2679 LPFC_SLI_INTF_IF_TYPE_0) && 2679 LPFC_SLI_INTF_IF_TYPE_0) &&
2680 mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) { 2680 mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
2681 lpfc_printf_vlog(vport, KERN_ERR, 2681 lpfc_printf_vlog(vport, KERN_ERR,
2682 LOG_MBOX, 2682 LOG_MBOX,
2683 "2891 Init VFI mailbox failed 0x%x\n", 2683 "2891 Init VFI mailbox failed 0x%x\n",
2684 mboxq->u.mb.mbxStatus); 2684 mboxq->u.mb.mbxStatus);
2685 mempool_free(mboxq, phba->mbox_mem_pool); 2685 mempool_free(mboxq, phba->mbox_mem_pool);
2686 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 2686 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
2687 return; 2687 return;
2688 } 2688 }
2689 2689
2690 lpfc_initial_flogi(vport); 2690 lpfc_initial_flogi(vport);
2691 mempool_free(mboxq, phba->mbox_mem_pool); 2691 mempool_free(mboxq, phba->mbox_mem_pool);
2692 return; 2692 return;
2693 } 2693 }
2694 2694
2695 /** 2695 /**
2696 * lpfc_issue_init_vfi - Issue init_vfi mailbox command. 2696 * lpfc_issue_init_vfi - Issue init_vfi mailbox command.
2697 * @vport: pointer to lpfc_vport data structure. 2697 * @vport: pointer to lpfc_vport data structure.
2698 * 2698 *
2699 * This function issue a init_vfi mailbox command to initialize the VFI and 2699 * This function issue a init_vfi mailbox command to initialize the VFI and
2700 * VPI for the physical port. 2700 * VPI for the physical port.
2701 */ 2701 */
2702 void 2702 void
2703 lpfc_issue_init_vfi(struct lpfc_vport *vport) 2703 lpfc_issue_init_vfi(struct lpfc_vport *vport)
2704 { 2704 {
2705 LPFC_MBOXQ_t *mboxq; 2705 LPFC_MBOXQ_t *mboxq;
2706 int rc; 2706 int rc;
2707 struct lpfc_hba *phba = vport->phba; 2707 struct lpfc_hba *phba = vport->phba;
2708 2708
2709 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 2709 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2710 if (!mboxq) { 2710 if (!mboxq) {
2711 lpfc_printf_vlog(vport, KERN_ERR, 2711 lpfc_printf_vlog(vport, KERN_ERR,
2712 LOG_MBOX, "2892 Failed to allocate " 2712 LOG_MBOX, "2892 Failed to allocate "
2713 "init_vfi mailbox\n"); 2713 "init_vfi mailbox\n");
2714 return; 2714 return;
2715 } 2715 }
2716 lpfc_init_vfi(mboxq, vport); 2716 lpfc_init_vfi(mboxq, vport);
2717 mboxq->mbox_cmpl = lpfc_init_vfi_cmpl; 2717 mboxq->mbox_cmpl = lpfc_init_vfi_cmpl;
2718 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); 2718 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
2719 if (rc == MBX_NOT_FINISHED) { 2719 if (rc == MBX_NOT_FINISHED) {
2720 lpfc_printf_vlog(vport, KERN_ERR, 2720 lpfc_printf_vlog(vport, KERN_ERR,
2721 LOG_MBOX, "2893 Failed to issue init_vfi mailbox\n"); 2721 LOG_MBOX, "2893 Failed to issue init_vfi mailbox\n");
2722 mempool_free(mboxq, vport->phba->mbox_mem_pool); 2722 mempool_free(mboxq, vport->phba->mbox_mem_pool);
2723 } 2723 }
2724 } 2724 }
2725 2725
2726 /** 2726 /**
2727 * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command. 2727 * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command.
2728 * @phba: pointer to lpfc hba data structure. 2728 * @phba: pointer to lpfc hba data structure.
2729 * @mboxq: pointer to mailbox data structure. 2729 * @mboxq: pointer to mailbox data structure.
2730 * 2730 *
2731 * This function handles completion of init vpi mailbox command. 2731 * This function handles completion of init vpi mailbox command.
2732 */ 2732 */
2733 void 2733 void
2734 lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 2734 lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2735 { 2735 {
2736 struct lpfc_vport *vport = mboxq->vport; 2736 struct lpfc_vport *vport = mboxq->vport;
2737 struct lpfc_nodelist *ndlp; 2737 struct lpfc_nodelist *ndlp;
2738 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2738 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2739 2739
2740 if (mboxq->u.mb.mbxStatus) { 2740 if (mboxq->u.mb.mbxStatus) {
2741 lpfc_printf_vlog(vport, KERN_ERR, 2741 lpfc_printf_vlog(vport, KERN_ERR,
2742 LOG_MBOX, 2742 LOG_MBOX,
2743 "2609 Init VPI mailbox failed 0x%x\n", 2743 "2609 Init VPI mailbox failed 0x%x\n",
2744 mboxq->u.mb.mbxStatus); 2744 mboxq->u.mb.mbxStatus);
2745 mempool_free(mboxq, phba->mbox_mem_pool); 2745 mempool_free(mboxq, phba->mbox_mem_pool);
2746 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 2746 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
2747 return; 2747 return;
2748 } 2748 }
2749 spin_lock_irq(shost->host_lock); 2749 spin_lock_irq(shost->host_lock);
2750 vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI; 2750 vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
2751 spin_unlock_irq(shost->host_lock); 2751 spin_unlock_irq(shost->host_lock);
2752 2752
2753 /* If this port is physical port or FDISC is done, do reg_vpi */ 2753 /* If this port is physical port or FDISC is done, do reg_vpi */
2754 if ((phba->pport == vport) || (vport->port_state == LPFC_FDISC)) { 2754 if ((phba->pport == vport) || (vport->port_state == LPFC_FDISC)) {
2755 ndlp = lpfc_findnode_did(vport, Fabric_DID); 2755 ndlp = lpfc_findnode_did(vport, Fabric_DID);
2756 if (!ndlp) 2756 if (!ndlp)
2757 lpfc_printf_vlog(vport, KERN_ERR, 2757 lpfc_printf_vlog(vport, KERN_ERR,
2758 LOG_DISCOVERY, 2758 LOG_DISCOVERY,
2759 "2731 Cannot find fabric " 2759 "2731 Cannot find fabric "
2760 "controller node\n"); 2760 "controller node\n");
2761 else 2761 else
2762 lpfc_register_new_vport(phba, vport, ndlp); 2762 lpfc_register_new_vport(phba, vport, ndlp);
2763 mempool_free(mboxq, phba->mbox_mem_pool); 2763 mempool_free(mboxq, phba->mbox_mem_pool);
2764 return; 2764 return;
2765 } 2765 }
2766 2766
2767 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) 2767 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
2768 lpfc_initial_fdisc(vport); 2768 lpfc_initial_fdisc(vport);
2769 else { 2769 else {
2770 lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP); 2770 lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
2771 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 2771 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2772 "2606 No NPIV Fabric support\n"); 2772 "2606 No NPIV Fabric support\n");
2773 } 2773 }
2774 mempool_free(mboxq, phba->mbox_mem_pool); 2774 mempool_free(mboxq, phba->mbox_mem_pool);
2775 return; 2775 return;
2776 } 2776 }
2777 2777
2778 /** 2778 /**
2779 * lpfc_issue_init_vpi - Issue init_vpi mailbox command. 2779 * lpfc_issue_init_vpi - Issue init_vpi mailbox command.
2780 * @vport: pointer to lpfc_vport data structure. 2780 * @vport: pointer to lpfc_vport data structure.
2781 * 2781 *
2782 * This function issue a init_vpi mailbox command to initialize 2782 * This function issue a init_vpi mailbox command to initialize
2783 * VPI for the vport. 2783 * VPI for the vport.
2784 */ 2784 */
2785 void 2785 void
2786 lpfc_issue_init_vpi(struct lpfc_vport *vport) 2786 lpfc_issue_init_vpi(struct lpfc_vport *vport)
2787 { 2787 {
2788 LPFC_MBOXQ_t *mboxq; 2788 LPFC_MBOXQ_t *mboxq;
2789 int rc; 2789 int rc;
2790 2790
2791 mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL); 2791 mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL);
2792 if (!mboxq) { 2792 if (!mboxq) {
2793 lpfc_printf_vlog(vport, KERN_ERR, 2793 lpfc_printf_vlog(vport, KERN_ERR,
2794 LOG_MBOX, "2607 Failed to allocate " 2794 LOG_MBOX, "2607 Failed to allocate "
2795 "init_vpi mailbox\n"); 2795 "init_vpi mailbox\n");
2796 return; 2796 return;
2797 } 2797 }
2798 lpfc_init_vpi(vport->phba, mboxq, vport->vpi); 2798 lpfc_init_vpi(vport->phba, mboxq, vport->vpi);
2799 mboxq->vport = vport; 2799 mboxq->vport = vport;
2800 mboxq->mbox_cmpl = lpfc_init_vpi_cmpl; 2800 mboxq->mbox_cmpl = lpfc_init_vpi_cmpl;
2801 rc = lpfc_sli_issue_mbox(vport->phba, mboxq, MBX_NOWAIT); 2801 rc = lpfc_sli_issue_mbox(vport->phba, mboxq, MBX_NOWAIT);
2802 if (rc == MBX_NOT_FINISHED) { 2802 if (rc == MBX_NOT_FINISHED) {
2803 lpfc_printf_vlog(vport, KERN_ERR, 2803 lpfc_printf_vlog(vport, KERN_ERR,
2804 LOG_MBOX, "2608 Failed to issue init_vpi mailbox\n"); 2804 LOG_MBOX, "2608 Failed to issue init_vpi mailbox\n");
2805 mempool_free(mboxq, vport->phba->mbox_mem_pool); 2805 mempool_free(mboxq, vport->phba->mbox_mem_pool);
2806 } 2806 }
2807 } 2807 }
2808 2808
2809 /** 2809 /**
2810 * lpfc_start_fdiscs - send fdiscs for each vports on this port. 2810 * lpfc_start_fdiscs - send fdiscs for each vports on this port.
2811 * @phba: pointer to lpfc hba data structure. 2811 * @phba: pointer to lpfc hba data structure.
2812 * 2812 *
2813 * This function loops through the list of vports on the @phba and issues an 2813 * This function loops through the list of vports on the @phba and issues an
2814 * FDISC if possible. 2814 * FDISC if possible.
2815 */ 2815 */
2816 void 2816 void
2817 lpfc_start_fdiscs(struct lpfc_hba *phba) 2817 lpfc_start_fdiscs(struct lpfc_hba *phba)
2818 { 2818 {
2819 struct lpfc_vport **vports; 2819 struct lpfc_vport **vports;
2820 int i; 2820 int i;
2821 2821
2822 vports = lpfc_create_vport_work_array(phba); 2822 vports = lpfc_create_vport_work_array(phba);
2823 if (vports != NULL) { 2823 if (vports != NULL) {
2824 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 2824 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2825 if (vports[i]->port_type == LPFC_PHYSICAL_PORT) 2825 if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
2826 continue; 2826 continue;
2827 /* There are no vpi for this vport */ 2827 /* There are no vpi for this vport */
2828 if (vports[i]->vpi > phba->max_vpi) { 2828 if (vports[i]->vpi > phba->max_vpi) {
2829 lpfc_vport_set_state(vports[i], 2829 lpfc_vport_set_state(vports[i],
2830 FC_VPORT_FAILED); 2830 FC_VPORT_FAILED);
2831 continue; 2831 continue;
2832 } 2832 }
2833 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 2833 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
2834 lpfc_vport_set_state(vports[i], 2834 lpfc_vport_set_state(vports[i],
2835 FC_VPORT_LINKDOWN); 2835 FC_VPORT_LINKDOWN);
2836 continue; 2836 continue;
2837 } 2837 }
2838 if (vports[i]->fc_flag & FC_VPORT_NEEDS_INIT_VPI) { 2838 if (vports[i]->fc_flag & FC_VPORT_NEEDS_INIT_VPI) {
2839 lpfc_issue_init_vpi(vports[i]); 2839 lpfc_issue_init_vpi(vports[i]);
2840 continue; 2840 continue;
2841 } 2841 }
2842 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED) 2842 if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
2843 lpfc_initial_fdisc(vports[i]); 2843 lpfc_initial_fdisc(vports[i]);
2844 else { 2844 else {
2845 lpfc_vport_set_state(vports[i], 2845 lpfc_vport_set_state(vports[i],
2846 FC_VPORT_NO_FABRIC_SUPP); 2846 FC_VPORT_NO_FABRIC_SUPP);
2847 lpfc_printf_vlog(vports[i], KERN_ERR, 2847 lpfc_printf_vlog(vports[i], KERN_ERR,
2848 LOG_ELS, 2848 LOG_ELS,
2849 "0259 No NPIV " 2849 "0259 No NPIV "
2850 "Fabric support\n"); 2850 "Fabric support\n");
2851 } 2851 }
2852 } 2852 }
2853 } 2853 }
2854 lpfc_destroy_vport_work_array(phba, vports); 2854 lpfc_destroy_vport_work_array(phba, vports);
2855 } 2855 }
2856 2856
2857 void 2857 void
2858 lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 2858 lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
2859 { 2859 {
2860 struct lpfc_dmabuf *dmabuf = mboxq->context1; 2860 struct lpfc_dmabuf *dmabuf = mboxq->context1;
2861 struct lpfc_vport *vport = mboxq->vport; 2861 struct lpfc_vport *vport = mboxq->vport;
2862 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 2862 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2863 2863
2864 /* 2864 /*
2865 * VFI not supported for interface type 0, so ignore any mailbox 2865 * VFI not supported for interface type 0, so ignore any mailbox
2866 * error (except VFI in use) and continue with the discovery. 2866 * error (except VFI in use) and continue with the discovery.
2867 */ 2867 */
2868 if (mboxq->u.mb.mbxStatus && 2868 if (mboxq->u.mb.mbxStatus &&
2869 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != 2869 (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
2870 LPFC_SLI_INTF_IF_TYPE_0) && 2870 LPFC_SLI_INTF_IF_TYPE_0) &&
2871 mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) { 2871 mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
2872 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 2872 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
2873 "2018 REG_VFI mbxStatus error x%x " 2873 "2018 REG_VFI mbxStatus error x%x "
2874 "HBA state x%x\n", 2874 "HBA state x%x\n",
2875 mboxq->u.mb.mbxStatus, vport->port_state); 2875 mboxq->u.mb.mbxStatus, vport->port_state);
2876 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 2876 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
2877 /* FLOGI failed, use loop map to make discovery list */ 2877 /* FLOGI failed, use loop map to make discovery list */
2878 lpfc_disc_list_loopmap(vport); 2878 lpfc_disc_list_loopmap(vport);
2879 /* Start discovery */ 2879 /* Start discovery */
2880 lpfc_disc_start(vport); 2880 lpfc_disc_start(vport);
2881 goto out_free_mem; 2881 goto out_free_mem;
2882 } 2882 }
2883 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 2883 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
2884 goto out_free_mem; 2884 goto out_free_mem;
2885 } 2885 }
2886 /* The VPI is implicitly registered when the VFI is registered */ 2886 /* The VPI is implicitly registered when the VFI is registered */
2887 spin_lock_irq(shost->host_lock); 2887 spin_lock_irq(shost->host_lock);
2888 vport->vpi_state |= LPFC_VPI_REGISTERED; 2888 vport->vpi_state |= LPFC_VPI_REGISTERED;
2889 vport->fc_flag |= FC_VFI_REGISTERED; 2889 vport->fc_flag |= FC_VFI_REGISTERED;
2890 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 2890 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
2891 vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI; 2891 vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
2892 spin_unlock_irq(shost->host_lock); 2892 spin_unlock_irq(shost->host_lock);
2893 2893
2894 /* In case SLI4 FC loopback test, we are ready */ 2894 /* In case SLI4 FC loopback test, we are ready */
2895 if ((phba->sli_rev == LPFC_SLI_REV4) && 2895 if ((phba->sli_rev == LPFC_SLI_REV4) &&
2896 (phba->link_flag & LS_LOOPBACK_MODE)) { 2896 (phba->link_flag & LS_LOOPBACK_MODE)) {
2897 phba->link_state = LPFC_HBA_READY; 2897 phba->link_state = LPFC_HBA_READY;
2898 goto out_free_mem; 2898 goto out_free_mem;
2899 } 2899 }
2900 2900
2901 if (vport->port_state == LPFC_FABRIC_CFG_LINK) { 2901 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
2902 /* 2902 /*
2903 * For private loop or for NPort pt2pt, 2903 * For private loop or for NPort pt2pt,
2904 * just start discovery and we are done. 2904 * just start discovery and we are done.
2905 */ 2905 */
2906 if ((vport->fc_flag & FC_PT2PT) || 2906 if ((vport->fc_flag & FC_PT2PT) ||
2907 ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) && 2907 ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
2908 !(vport->fc_flag & FC_PUBLIC_LOOP))) { 2908 !(vport->fc_flag & FC_PUBLIC_LOOP))) {
2909 2909
2910 /* Use loop map to make discovery list */ 2910 /* Use loop map to make discovery list */
2911 lpfc_disc_list_loopmap(vport); 2911 lpfc_disc_list_loopmap(vport);
2912 /* Start discovery */ 2912 /* Start discovery */
2913 lpfc_disc_start(vport); 2913 lpfc_disc_start(vport);
2914 } else { 2914 } else {
2915 lpfc_start_fdiscs(phba); 2915 lpfc_start_fdiscs(phba);
2916 lpfc_do_scr_ns_plogi(phba, vport); 2916 lpfc_do_scr_ns_plogi(phba, vport);
2917 } 2917 }
2918 } 2918 }
2919 2919
2920 out_free_mem: 2920 out_free_mem:
2921 mempool_free(mboxq, phba->mbox_mem_pool); 2921 mempool_free(mboxq, phba->mbox_mem_pool);
2922 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys); 2922 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
2923 kfree(dmabuf); 2923 kfree(dmabuf);
2924 return; 2924 return;
2925 } 2925 }
2926 2926
2927 static void 2927 static void
2928 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 2928 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2929 { 2929 {
2930 MAILBOX_t *mb = &pmb->u.mb; 2930 MAILBOX_t *mb = &pmb->u.mb;
2931 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1; 2931 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
2932 struct lpfc_vport *vport = pmb->vport; 2932 struct lpfc_vport *vport = pmb->vport;
2933 2933
2934 2934
2935 /* Check for error */ 2935 /* Check for error */
2936 if (mb->mbxStatus) { 2936 if (mb->mbxStatus) {
2937 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */ 2937 /* READ_SPARAM mbox error <mbxStatus> state <hba_state> */
2938 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 2938 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
2939 "0319 READ_SPARAM mbxStatus error x%x " 2939 "0319 READ_SPARAM mbxStatus error x%x "
2940 "hba state x%x>\n", 2940 "hba state x%x>\n",
2941 mb->mbxStatus, vport->port_state); 2941 mb->mbxStatus, vport->port_state);
2942 lpfc_linkdown(phba); 2942 lpfc_linkdown(phba);
2943 goto out; 2943 goto out;
2944 } 2944 }
2945 2945
2946 memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt, 2946 memcpy((uint8_t *) &vport->fc_sparam, (uint8_t *) mp->virt,
2947 sizeof (struct serv_parm)); 2947 sizeof (struct serv_parm));
2948 lpfc_update_vport_wwn(vport); 2948 lpfc_update_vport_wwn(vport);
2949 if (vport->port_type == LPFC_PHYSICAL_PORT) { 2949 if (vport->port_type == LPFC_PHYSICAL_PORT) {
2950 memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn)); 2950 memcpy(&phba->wwnn, &vport->fc_nodename, sizeof(phba->wwnn));
2951 memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn)); 2951 memcpy(&phba->wwpn, &vport->fc_portname, sizeof(phba->wwnn));
2952 } 2952 }
2953 2953
2954 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2954 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2955 kfree(mp); 2955 kfree(mp);
2956 mempool_free(pmb, phba->mbox_mem_pool); 2956 mempool_free(pmb, phba->mbox_mem_pool);
2957 return; 2957 return;
2958 2958
2959 out: 2959 out:
2960 pmb->context1 = NULL; 2960 pmb->context1 = NULL;
2961 lpfc_mbuf_free(phba, mp->virt, mp->phys); 2961 lpfc_mbuf_free(phba, mp->virt, mp->phys);
2962 kfree(mp); 2962 kfree(mp);
2963 lpfc_issue_clear_la(phba, vport); 2963 lpfc_issue_clear_la(phba, vport);
2964 mempool_free(pmb, phba->mbox_mem_pool); 2964 mempool_free(pmb, phba->mbox_mem_pool);
2965 return; 2965 return;
2966 } 2966 }
2967 2967
2968 static void 2968 static void
2969 lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) 2969 lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
2970 { 2970 {
2971 struct lpfc_vport *vport = phba->pport; 2971 struct lpfc_vport *vport = phba->pport;
2972 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL; 2972 LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL;
2973 struct Scsi_Host *shost; 2973 struct Scsi_Host *shost;
2974 int i; 2974 int i;
2975 struct lpfc_dmabuf *mp; 2975 struct lpfc_dmabuf *mp;
2976 int rc; 2976 int rc;
2977 struct fcf_record *fcf_record; 2977 struct fcf_record *fcf_record;
2978 2978
2979 spin_lock_irq(&phba->hbalock); 2979 spin_lock_irq(&phba->hbalock);
2980 switch (bf_get(lpfc_mbx_read_top_link_spd, la)) { 2980 switch (bf_get(lpfc_mbx_read_top_link_spd, la)) {
2981 case LPFC_LINK_SPEED_1GHZ: 2981 case LPFC_LINK_SPEED_1GHZ:
2982 case LPFC_LINK_SPEED_2GHZ: 2982 case LPFC_LINK_SPEED_2GHZ:
2983 case LPFC_LINK_SPEED_4GHZ: 2983 case LPFC_LINK_SPEED_4GHZ:
2984 case LPFC_LINK_SPEED_8GHZ: 2984 case LPFC_LINK_SPEED_8GHZ:
2985 case LPFC_LINK_SPEED_10GHZ: 2985 case LPFC_LINK_SPEED_10GHZ:
2986 case LPFC_LINK_SPEED_16GHZ: 2986 case LPFC_LINK_SPEED_16GHZ:
2987 phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la); 2987 phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la);
2988 break; 2988 break;
2989 default: 2989 default:
2990 phba->fc_linkspeed = LPFC_LINK_SPEED_UNKNOWN; 2990 phba->fc_linkspeed = LPFC_LINK_SPEED_UNKNOWN;
2991 break; 2991 break;
2992 } 2992 }
2993 2993
2994 phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la); 2994 phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la);
2995 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; 2995 phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
2996 2996
2997 shost = lpfc_shost_from_vport(vport); 2997 shost = lpfc_shost_from_vport(vport);
2998 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 2998 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
2999 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED; 2999 phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
3000 3000
3001 /* if npiv is enabled and this adapter supports npiv log 3001 /* if npiv is enabled and this adapter supports npiv log
3002 * a message that npiv is not supported in this topology 3002 * a message that npiv is not supported in this topology
3003 */ 3003 */
3004 if (phba->cfg_enable_npiv && phba->max_vpi) 3004 if (phba->cfg_enable_npiv && phba->max_vpi)
3005 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 3005 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3006 "1309 Link Up Event npiv not supported in loop " 3006 "1309 Link Up Event npiv not supported in loop "
3007 "topology\n"); 3007 "topology\n");
3008 /* Get Loop Map information */ 3008 /* Get Loop Map information */
3009 if (bf_get(lpfc_mbx_read_top_il, la)) { 3009 if (bf_get(lpfc_mbx_read_top_il, la)) {
3010 spin_lock(shost->host_lock); 3010 spin_lock(shost->host_lock);
3011 vport->fc_flag |= FC_LBIT; 3011 vport->fc_flag |= FC_LBIT;
3012 spin_unlock(shost->host_lock); 3012 spin_unlock(shost->host_lock);
3013 } 3013 }
3014 3014
3015 vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la); 3015 vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la);
3016 i = la->lilpBde64.tus.f.bdeSize; 3016 i = la->lilpBde64.tus.f.bdeSize;
3017 3017
3018 if (i == 0) { 3018 if (i == 0) {
3019 phba->alpa_map[0] = 0; 3019 phba->alpa_map[0] = 0;
3020 } else { 3020 } else {
3021 if (vport->cfg_log_verbose & LOG_LINK_EVENT) { 3021 if (vport->cfg_log_verbose & LOG_LINK_EVENT) {
3022 int numalpa, j, k; 3022 int numalpa, j, k;
3023 union { 3023 union {
3024 uint8_t pamap[16]; 3024 uint8_t pamap[16];
3025 struct { 3025 struct {
3026 uint32_t wd1; 3026 uint32_t wd1;
3027 uint32_t wd2; 3027 uint32_t wd2;
3028 uint32_t wd3; 3028 uint32_t wd3;
3029 uint32_t wd4; 3029 uint32_t wd4;
3030 } pa; 3030 } pa;
3031 } un; 3031 } un;
3032 numalpa = phba->alpa_map[0]; 3032 numalpa = phba->alpa_map[0];
3033 j = 0; 3033 j = 0;
3034 while (j < numalpa) { 3034 while (j < numalpa) {
3035 memset(un.pamap, 0, 16); 3035 memset(un.pamap, 0, 16);
3036 for (k = 1; j < numalpa; k++) { 3036 for (k = 1; j < numalpa; k++) {
3037 un.pamap[k - 1] = 3037 un.pamap[k - 1] =
3038 phba->alpa_map[j + 1]; 3038 phba->alpa_map[j + 1];
3039 j++; 3039 j++;
3040 if (k == 16) 3040 if (k == 16)
3041 break; 3041 break;
3042 } 3042 }
3043 /* Link Up Event ALPA map */ 3043 /* Link Up Event ALPA map */
3044 lpfc_printf_log(phba, 3044 lpfc_printf_log(phba,
3045 KERN_WARNING, 3045 KERN_WARNING,
3046 LOG_LINK_EVENT, 3046 LOG_LINK_EVENT,
3047 "1304 Link Up Event " 3047 "1304 Link Up Event "
3048 "ALPA map Data: x%x " 3048 "ALPA map Data: x%x "
3049 "x%x x%x x%x\n", 3049 "x%x x%x x%x\n",
3050 un.pa.wd1, un.pa.wd2, 3050 un.pa.wd1, un.pa.wd2,
3051 un.pa.wd3, un.pa.wd4); 3051 un.pa.wd3, un.pa.wd4);
3052 } 3052 }
3053 } 3053 }
3054 } 3054 }
3055 } else { 3055 } else {
3056 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) { 3056 if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
3057 if (phba->max_vpi && phba->cfg_enable_npiv && 3057 if (phba->max_vpi && phba->cfg_enable_npiv &&
3058 (phba->sli_rev >= LPFC_SLI_REV3)) 3058 (phba->sli_rev >= LPFC_SLI_REV3))
3059 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED; 3059 phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
3060 } 3060 }
3061 vport->fc_myDID = phba->fc_pref_DID; 3061 vport->fc_myDID = phba->fc_pref_DID;
3062 spin_lock(shost->host_lock); 3062 spin_lock(shost->host_lock);
3063 vport->fc_flag |= FC_LBIT; 3063 vport->fc_flag |= FC_LBIT;
3064 spin_unlock(shost->host_lock); 3064 spin_unlock(shost->host_lock);
3065 } 3065 }
3066 spin_unlock_irq(&phba->hbalock); 3066 spin_unlock_irq(&phba->hbalock);
3067 3067
3068 lpfc_linkup(phba); 3068 lpfc_linkup(phba);
3069 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3069 sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3070 if (!sparam_mbox) 3070 if (!sparam_mbox)
3071 goto out; 3071 goto out;
3072 3072
3073 rc = lpfc_read_sparam(phba, sparam_mbox, 0); 3073 rc = lpfc_read_sparam(phba, sparam_mbox, 0);
3074 if (rc) { 3074 if (rc) {
3075 mempool_free(sparam_mbox, phba->mbox_mem_pool); 3075 mempool_free(sparam_mbox, phba->mbox_mem_pool);
3076 goto out; 3076 goto out;
3077 } 3077 }
3078 sparam_mbox->vport = vport; 3078 sparam_mbox->vport = vport;
3079 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; 3079 sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
3080 rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT); 3080 rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT);
3081 if (rc == MBX_NOT_FINISHED) { 3081 if (rc == MBX_NOT_FINISHED) {
3082 mp = (struct lpfc_dmabuf *) sparam_mbox->context1; 3082 mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
3083 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3083 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3084 kfree(mp); 3084 kfree(mp);
3085 mempool_free(sparam_mbox, phba->mbox_mem_pool); 3085 mempool_free(sparam_mbox, phba->mbox_mem_pool);
3086 goto out; 3086 goto out;
3087 } 3087 }
3088 3088
3089 if (!(phba->hba_flag & HBA_FCOE_MODE)) { 3089 if (!(phba->hba_flag & HBA_FCOE_MODE)) {
3090 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3090 cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3091 if (!cfglink_mbox) 3091 if (!cfglink_mbox)
3092 goto out; 3092 goto out;
3093 vport->port_state = LPFC_LOCAL_CFG_LINK; 3093 vport->port_state = LPFC_LOCAL_CFG_LINK;
3094 lpfc_config_link(phba, cfglink_mbox); 3094 lpfc_config_link(phba, cfglink_mbox);
3095 cfglink_mbox->vport = vport; 3095 cfglink_mbox->vport = vport;
3096 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; 3096 cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
3097 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT); 3097 rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
3098 if (rc == MBX_NOT_FINISHED) { 3098 if (rc == MBX_NOT_FINISHED) {
3099 mempool_free(cfglink_mbox, phba->mbox_mem_pool); 3099 mempool_free(cfglink_mbox, phba->mbox_mem_pool);
3100 goto out; 3100 goto out;
3101 } 3101 }
3102 } else { 3102 } else {
3103 vport->port_state = LPFC_VPORT_UNKNOWN; 3103 vport->port_state = LPFC_VPORT_UNKNOWN;
3104 /* 3104 /*
3105 * Add the driver's default FCF record at FCF index 0 now. This 3105 * Add the driver's default FCF record at FCF index 0 now. This
3106 * is phase 1 implementation that support FCF index 0 and driver 3106 * is phase 1 implementation that support FCF index 0 and driver
3107 * defaults. 3107 * defaults.
3108 */ 3108 */
3109 if (!(phba->hba_flag & HBA_FIP_SUPPORT)) { 3109 if (!(phba->hba_flag & HBA_FIP_SUPPORT)) {
3110 fcf_record = kzalloc(sizeof(struct fcf_record), 3110 fcf_record = kzalloc(sizeof(struct fcf_record),
3111 GFP_KERNEL); 3111 GFP_KERNEL);
3112 if (unlikely(!fcf_record)) { 3112 if (unlikely(!fcf_record)) {
3113 lpfc_printf_log(phba, KERN_ERR, 3113 lpfc_printf_log(phba, KERN_ERR,
3114 LOG_MBOX | LOG_SLI, 3114 LOG_MBOX | LOG_SLI,
3115 "2554 Could not allocate memory for " 3115 "2554 Could not allocate memory for "
3116 "fcf record\n"); 3116 "fcf record\n");
3117 rc = -ENODEV; 3117 rc = -ENODEV;
3118 goto out; 3118 goto out;
3119 } 3119 }
3120 3120
3121 lpfc_sli4_build_dflt_fcf_record(phba, fcf_record, 3121 lpfc_sli4_build_dflt_fcf_record(phba, fcf_record,
3122 LPFC_FCOE_FCF_DEF_INDEX); 3122 LPFC_FCOE_FCF_DEF_INDEX);
3123 rc = lpfc_sli4_add_fcf_record(phba, fcf_record); 3123 rc = lpfc_sli4_add_fcf_record(phba, fcf_record);
3124 if (unlikely(rc)) { 3124 if (unlikely(rc)) {
3125 lpfc_printf_log(phba, KERN_ERR, 3125 lpfc_printf_log(phba, KERN_ERR,
3126 LOG_MBOX | LOG_SLI, 3126 LOG_MBOX | LOG_SLI,
3127 "2013 Could not manually add FCF " 3127 "2013 Could not manually add FCF "
3128 "record 0, status %d\n", rc); 3128 "record 0, status %d\n", rc);
3129 rc = -ENODEV; 3129 rc = -ENODEV;
3130 kfree(fcf_record); 3130 kfree(fcf_record);
3131 goto out; 3131 goto out;
3132 } 3132 }
3133 kfree(fcf_record); 3133 kfree(fcf_record);
3134 } 3134 }
3135 /* 3135 /*
3136 * The driver is expected to do FIP/FCF. Call the port 3136 * The driver is expected to do FIP/FCF. Call the port
3137 * and get the FCF Table. 3137 * and get the FCF Table.
3138 */ 3138 */
3139 spin_lock_irq(&phba->hbalock); 3139 spin_lock_irq(&phba->hbalock);
3140 if (phba->hba_flag & FCF_TS_INPROG) { 3140 if (phba->hba_flag & FCF_TS_INPROG) {
3141 spin_unlock_irq(&phba->hbalock); 3141 spin_unlock_irq(&phba->hbalock);
3142 return; 3142 return;
3143 } 3143 }
3144 /* This is the initial FCF discovery scan */ 3144 /* This is the initial FCF discovery scan */
3145 phba->fcf.fcf_flag |= FCF_INIT_DISC; 3145 phba->fcf.fcf_flag |= FCF_INIT_DISC;
3146 spin_unlock_irq(&phba->hbalock); 3146 spin_unlock_irq(&phba->hbalock);
3147 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, 3147 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
3148 "2778 Start FCF table scan at linkup\n"); 3148 "2778 Start FCF table scan at linkup\n");
3149 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, 3149 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
3150 LPFC_FCOE_FCF_GET_FIRST); 3150 LPFC_FCOE_FCF_GET_FIRST);
3151 if (rc) { 3151 if (rc) {
3152 spin_lock_irq(&phba->hbalock); 3152 spin_lock_irq(&phba->hbalock);
3153 phba->fcf.fcf_flag &= ~FCF_INIT_DISC; 3153 phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
3154 spin_unlock_irq(&phba->hbalock); 3154 spin_unlock_irq(&phba->hbalock);
3155 goto out; 3155 goto out;
3156 } 3156 }
3157 /* Reset FCF roundrobin bmask for new discovery */ 3157 /* Reset FCF roundrobin bmask for new discovery */
3158 lpfc_sli4_clear_fcf_rr_bmask(phba); 3158 lpfc_sli4_clear_fcf_rr_bmask(phba);
3159 } 3159 }
3160 3160
3161 return; 3161 return;
3162 out: 3162 out:
3163 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 3163 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3164 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 3164 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
3165 "0263 Discovery Mailbox error: state: 0x%x : %p %p\n", 3165 "0263 Discovery Mailbox error: state: 0x%x : %p %p\n",
3166 vport->port_state, sparam_mbox, cfglink_mbox); 3166 vport->port_state, sparam_mbox, cfglink_mbox);
3167 lpfc_issue_clear_la(phba, vport); 3167 lpfc_issue_clear_la(phba, vport);
3168 return; 3168 return;
3169 } 3169 }
3170 3170
3171 static void 3171 static void
3172 lpfc_enable_la(struct lpfc_hba *phba) 3172 lpfc_enable_la(struct lpfc_hba *phba)
3173 { 3173 {
3174 uint32_t control; 3174 uint32_t control;
3175 struct lpfc_sli *psli = &phba->sli; 3175 struct lpfc_sli *psli = &phba->sli;
3176 spin_lock_irq(&phba->hbalock); 3176 spin_lock_irq(&phba->hbalock);
3177 psli->sli_flag |= LPFC_PROCESS_LA; 3177 psli->sli_flag |= LPFC_PROCESS_LA;
3178 if (phba->sli_rev <= LPFC_SLI_REV3) { 3178 if (phba->sli_rev <= LPFC_SLI_REV3) {
3179 control = readl(phba->HCregaddr); 3179 control = readl(phba->HCregaddr);
3180 control |= HC_LAINT_ENA; 3180 control |= HC_LAINT_ENA;
3181 writel(control, phba->HCregaddr); 3181 writel(control, phba->HCregaddr);
3182 readl(phba->HCregaddr); /* flush */ 3182 readl(phba->HCregaddr); /* flush */
3183 } 3183 }
3184 spin_unlock_irq(&phba->hbalock); 3184 spin_unlock_irq(&phba->hbalock);
3185 } 3185 }
3186 3186
3187 static void 3187 static void
3188 lpfc_mbx_issue_link_down(struct lpfc_hba *phba) 3188 lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
3189 { 3189 {
3190 lpfc_linkdown(phba); 3190 lpfc_linkdown(phba);
3191 lpfc_enable_la(phba); 3191 lpfc_enable_la(phba);
3192 lpfc_unregister_unused_fcf(phba); 3192 lpfc_unregister_unused_fcf(phba);
3193 /* turn on Link Attention interrupts - no CLEAR_LA needed */ 3193 /* turn on Link Attention interrupts - no CLEAR_LA needed */
3194 } 3194 }
3195 3195
3196 3196
3197 /* 3197 /*
3198 * This routine handles processing a READ_TOPOLOGY mailbox 3198 * This routine handles processing a READ_TOPOLOGY mailbox
3199 * command upon completion. It is setup in the LPFC_MBOXQ 3199 * command upon completion. It is setup in the LPFC_MBOXQ
3200 * as the completion routine when the command is 3200 * as the completion routine when the command is
3201 * handed off to the SLI layer. 3201 * handed off to the SLI layer.
3202 */ 3202 */
3203 void 3203 void
3204 lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 3204 lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3205 { 3205 {
3206 struct lpfc_vport *vport = pmb->vport; 3206 struct lpfc_vport *vport = pmb->vport;
3207 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3207 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3208 struct lpfc_mbx_read_top *la; 3208 struct lpfc_mbx_read_top *la;
3209 MAILBOX_t *mb = &pmb->u.mb; 3209 MAILBOX_t *mb = &pmb->u.mb;
3210 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 3210 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
3211 3211
3212 /* Unblock ELS traffic */ 3212 /* Unblock ELS traffic */
3213 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT; 3213 phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
3214 /* Check for error */ 3214 /* Check for error */
3215 if (mb->mbxStatus) { 3215 if (mb->mbxStatus) {
3216 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 3216 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
3217 "1307 READ_LA mbox error x%x state x%x\n", 3217 "1307 READ_LA mbox error x%x state x%x\n",
3218 mb->mbxStatus, vport->port_state); 3218 mb->mbxStatus, vport->port_state);
3219 lpfc_mbx_issue_link_down(phba); 3219 lpfc_mbx_issue_link_down(phba);
3220 phba->link_state = LPFC_HBA_ERROR; 3220 phba->link_state = LPFC_HBA_ERROR;
3221 goto lpfc_mbx_cmpl_read_topology_free_mbuf; 3221 goto lpfc_mbx_cmpl_read_topology_free_mbuf;
3222 } 3222 }
3223 3223
3224 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop; 3224 la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
3225 3225
3226 memcpy(&phba->alpa_map[0], mp->virt, 128); 3226 memcpy(&phba->alpa_map[0], mp->virt, 128);
3227 3227
3228 spin_lock_irq(shost->host_lock); 3228 spin_lock_irq(shost->host_lock);
3229 if (bf_get(lpfc_mbx_read_top_pb, la)) 3229 if (bf_get(lpfc_mbx_read_top_pb, la))
3230 vport->fc_flag |= FC_BYPASSED_MODE; 3230 vport->fc_flag |= FC_BYPASSED_MODE;
3231 else 3231 else
3232 vport->fc_flag &= ~FC_BYPASSED_MODE; 3232 vport->fc_flag &= ~FC_BYPASSED_MODE;
3233 spin_unlock_irq(shost->host_lock); 3233 spin_unlock_irq(shost->host_lock);
3234 3234
3235 if ((phba->fc_eventTag < la->eventTag) || 3235 if ((phba->fc_eventTag < la->eventTag) ||
3236 (phba->fc_eventTag == la->eventTag)) { 3236 (phba->fc_eventTag == la->eventTag)) {
3237 phba->fc_stat.LinkMultiEvent++; 3237 phba->fc_stat.LinkMultiEvent++;
3238 if (bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP) 3238 if (bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP)
3239 if (phba->fc_eventTag != 0) 3239 if (phba->fc_eventTag != 0)
3240 lpfc_linkdown(phba); 3240 lpfc_linkdown(phba);
3241 } 3241 }
3242 3242
3243 phba->fc_eventTag = la->eventTag; 3243 phba->fc_eventTag = la->eventTag;
3244 spin_lock_irq(&phba->hbalock); 3244 spin_lock_irq(&phba->hbalock);
3245 if (bf_get(lpfc_mbx_read_top_mm, la)) 3245 if (bf_get(lpfc_mbx_read_top_mm, la))
3246 phba->sli.sli_flag |= LPFC_MENLO_MAINT; 3246 phba->sli.sli_flag |= LPFC_MENLO_MAINT;
3247 else 3247 else
3248 phba->sli.sli_flag &= ~LPFC_MENLO_MAINT; 3248 phba->sli.sli_flag &= ~LPFC_MENLO_MAINT;
3249 spin_unlock_irq(&phba->hbalock); 3249 spin_unlock_irq(&phba->hbalock);
3250 3250
3251 phba->link_events++; 3251 phba->link_events++;
3252 if ((bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP) && 3252 if ((bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP) &&
3253 (!bf_get(lpfc_mbx_read_top_mm, la))) { 3253 (!bf_get(lpfc_mbx_read_top_mm, la))) {
3254 phba->fc_stat.LinkUp++; 3254 phba->fc_stat.LinkUp++;
3255 if (phba->link_flag & LS_LOOPBACK_MODE) { 3255 if (phba->link_flag & LS_LOOPBACK_MODE) {
3256 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 3256 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3257 "1306 Link Up Event in loop back mode " 3257 "1306 Link Up Event in loop back mode "
3258 "x%x received Data: x%x x%x x%x x%x\n", 3258 "x%x received Data: x%x x%x x%x x%x\n",
3259 la->eventTag, phba->fc_eventTag, 3259 la->eventTag, phba->fc_eventTag,
3260 bf_get(lpfc_mbx_read_top_alpa_granted, 3260 bf_get(lpfc_mbx_read_top_alpa_granted,
3261 la), 3261 la),
3262 bf_get(lpfc_mbx_read_top_link_spd, la), 3262 bf_get(lpfc_mbx_read_top_link_spd, la),
3263 phba->alpa_map[0]); 3263 phba->alpa_map[0]);
3264 } else { 3264 } else {
3265 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 3265 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3266 "1303 Link Up Event x%x received " 3266 "1303 Link Up Event x%x received "
3267 "Data: x%x x%x x%x x%x x%x x%x %d\n", 3267 "Data: x%x x%x x%x x%x x%x x%x %d\n",
3268 la->eventTag, phba->fc_eventTag, 3268 la->eventTag, phba->fc_eventTag,
3269 bf_get(lpfc_mbx_read_top_alpa_granted, 3269 bf_get(lpfc_mbx_read_top_alpa_granted,
3270 la), 3270 la),
3271 bf_get(lpfc_mbx_read_top_link_spd, la), 3271 bf_get(lpfc_mbx_read_top_link_spd, la),
3272 phba->alpa_map[0], 3272 phba->alpa_map[0],
3273 bf_get(lpfc_mbx_read_top_mm, la), 3273 bf_get(lpfc_mbx_read_top_mm, la),
3274 bf_get(lpfc_mbx_read_top_fa, la), 3274 bf_get(lpfc_mbx_read_top_fa, la),
3275 phba->wait_4_mlo_maint_flg); 3275 phba->wait_4_mlo_maint_flg);
3276 } 3276 }
3277 lpfc_mbx_process_link_up(phba, la); 3277 lpfc_mbx_process_link_up(phba, la);
3278 } else if (bf_get(lpfc_mbx_read_top_att_type, la) == 3278 } else if (bf_get(lpfc_mbx_read_top_att_type, la) ==
3279 LPFC_ATT_LINK_DOWN) { 3279 LPFC_ATT_LINK_DOWN) {
3280 phba->fc_stat.LinkDown++; 3280 phba->fc_stat.LinkDown++;
3281 if (phba->link_flag & LS_LOOPBACK_MODE) 3281 if (phba->link_flag & LS_LOOPBACK_MODE)
3282 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 3282 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3283 "1308 Link Down Event in loop back mode " 3283 "1308 Link Down Event in loop back mode "
3284 "x%x received " 3284 "x%x received "
3285 "Data: x%x x%x x%x\n", 3285 "Data: x%x x%x x%x\n",
3286 la->eventTag, phba->fc_eventTag, 3286 la->eventTag, phba->fc_eventTag,
3287 phba->pport->port_state, vport->fc_flag); 3287 phba->pport->port_state, vport->fc_flag);
3288 else 3288 else
3289 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 3289 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3290 "1305 Link Down Event x%x received " 3290 "1305 Link Down Event x%x received "
3291 "Data: x%x x%x x%x x%x x%x\n", 3291 "Data: x%x x%x x%x x%x x%x\n",
3292 la->eventTag, phba->fc_eventTag, 3292 la->eventTag, phba->fc_eventTag,
3293 phba->pport->port_state, vport->fc_flag, 3293 phba->pport->port_state, vport->fc_flag,
3294 bf_get(lpfc_mbx_read_top_mm, la), 3294 bf_get(lpfc_mbx_read_top_mm, la),
3295 bf_get(lpfc_mbx_read_top_fa, la)); 3295 bf_get(lpfc_mbx_read_top_fa, la));
3296 lpfc_mbx_issue_link_down(phba); 3296 lpfc_mbx_issue_link_down(phba);
3297 } 3297 }
3298 if ((bf_get(lpfc_mbx_read_top_mm, la)) && 3298 if ((bf_get(lpfc_mbx_read_top_mm, la)) &&
3299 (bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP)) { 3299 (bf_get(lpfc_mbx_read_top_att_type, la) == LPFC_ATT_LINK_UP)) {
3300 if (phba->link_state != LPFC_LINK_DOWN) { 3300 if (phba->link_state != LPFC_LINK_DOWN) {
3301 phba->fc_stat.LinkDown++; 3301 phba->fc_stat.LinkDown++;
3302 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 3302 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3303 "1312 Link Down Event x%x received " 3303 "1312 Link Down Event x%x received "
3304 "Data: x%x x%x x%x\n", 3304 "Data: x%x x%x x%x\n",
3305 la->eventTag, phba->fc_eventTag, 3305 la->eventTag, phba->fc_eventTag,
3306 phba->pport->port_state, vport->fc_flag); 3306 phba->pport->port_state, vport->fc_flag);
3307 lpfc_mbx_issue_link_down(phba); 3307 lpfc_mbx_issue_link_down(phba);
3308 } else 3308 } else
3309 lpfc_enable_la(phba); 3309 lpfc_enable_la(phba);
3310 3310
3311 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, 3311 lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
3312 "1310 Menlo Maint Mode Link up Event x%x rcvd " 3312 "1310 Menlo Maint Mode Link up Event x%x rcvd "
3313 "Data: x%x x%x x%x\n", 3313 "Data: x%x x%x x%x\n",
3314 la->eventTag, phba->fc_eventTag, 3314 la->eventTag, phba->fc_eventTag,
3315 phba->pport->port_state, vport->fc_flag); 3315 phba->pport->port_state, vport->fc_flag);
3316 /* 3316 /*
3317 * The cmnd that triggered this will be waiting for this 3317 * The cmnd that triggered this will be waiting for this
3318 * signal. 3318 * signal.
3319 */ 3319 */
3320 /* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */ 3320 /* WAKEUP for MENLO_SET_MODE or MENLO_RESET command. */
3321 if (phba->wait_4_mlo_maint_flg) { 3321 if (phba->wait_4_mlo_maint_flg) {
3322 phba->wait_4_mlo_maint_flg = 0; 3322 phba->wait_4_mlo_maint_flg = 0;
3323 wake_up_interruptible(&phba->wait_4_mlo_m_q); 3323 wake_up_interruptible(&phba->wait_4_mlo_m_q);
3324 } 3324 }
3325 } 3325 }
3326 3326
3327 if (bf_get(lpfc_mbx_read_top_fa, la)) { 3327 if (bf_get(lpfc_mbx_read_top_fa, la)) {
3328 if (bf_get(lpfc_mbx_read_top_mm, la)) 3328 if (bf_get(lpfc_mbx_read_top_mm, la))
3329 lpfc_issue_clear_la(phba, vport); 3329 lpfc_issue_clear_la(phba, vport);
3330 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT, 3330 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
3331 "1311 fa %d\n", 3331 "1311 fa %d\n",
3332 bf_get(lpfc_mbx_read_top_fa, la)); 3332 bf_get(lpfc_mbx_read_top_fa, la));
3333 } 3333 }
3334 3334
3335 lpfc_mbx_cmpl_read_topology_free_mbuf: 3335 lpfc_mbx_cmpl_read_topology_free_mbuf:
3336 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3336 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3337 kfree(mp); 3337 kfree(mp);
3338 mempool_free(pmb, phba->mbox_mem_pool); 3338 mempool_free(pmb, phba->mbox_mem_pool);
3339 return; 3339 return;
3340 } 3340 }
3341 3341
3342 /* 3342 /*
3343 * This routine handles processing a REG_LOGIN mailbox 3343 * This routine handles processing a REG_LOGIN mailbox
3344 * command upon completion. It is setup in the LPFC_MBOXQ 3344 * command upon completion. It is setup in the LPFC_MBOXQ
3345 * as the completion routine when the command is 3345 * as the completion routine when the command is
3346 * handed off to the SLI layer. 3346 * handed off to the SLI layer.
3347 */ 3347 */
3348 void 3348 void
3349 lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 3349 lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3350 { 3350 {
3351 struct lpfc_vport *vport = pmb->vport; 3351 struct lpfc_vport *vport = pmb->vport;
3352 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 3352 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
3353 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 3353 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
3354 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3354 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3355 3355
3356 pmb->context1 = NULL; 3356 pmb->context1 = NULL;
3357 pmb->context2 = NULL; 3357 pmb->context2 = NULL;
3358 3358
3359 if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) 3359 if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
3360 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; 3360 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
3361 3361
3362 if (ndlp->nlp_flag & NLP_IGNR_REG_CMPL || 3362 if (ndlp->nlp_flag & NLP_IGNR_REG_CMPL ||
3363 ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) { 3363 ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) {
3364 /* We rcvd a rscn after issuing this 3364 /* We rcvd a rscn after issuing this
3365 * mbox reg login, we may have cycled 3365 * mbox reg login, we may have cycled
3366 * back through the state and be 3366 * back through the state and be
3367 * back at reg login state so this 3367 * back at reg login state so this
3368 * mbox needs to be ignored becase 3368 * mbox needs to be ignored becase
3369 * there is another reg login in 3369 * there is another reg login in
3370 * process. 3370 * process.
3371 */ 3371 */
3372 spin_lock_irq(shost->host_lock); 3372 spin_lock_irq(shost->host_lock);
3373 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; 3373 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
3374 spin_unlock_irq(shost->host_lock); 3374 spin_unlock_irq(shost->host_lock);
3375 } else 3375 } else
3376 /* Good status, call state machine */ 3376 /* Good status, call state machine */
3377 lpfc_disc_state_machine(vport, ndlp, pmb, 3377 lpfc_disc_state_machine(vport, ndlp, pmb,
3378 NLP_EVT_CMPL_REG_LOGIN); 3378 NLP_EVT_CMPL_REG_LOGIN);
3379 3379
3380 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3380 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3381 kfree(mp); 3381 kfree(mp);
3382 mempool_free(pmb, phba->mbox_mem_pool); 3382 mempool_free(pmb, phba->mbox_mem_pool);
3383 /* decrement the node reference count held for this callback 3383 /* decrement the node reference count held for this callback
3384 * function. 3384 * function.
3385 */ 3385 */
3386 lpfc_nlp_put(ndlp); 3386 lpfc_nlp_put(ndlp);
3387 3387
3388 return; 3388 return;
3389 } 3389 }
3390 3390
3391 static void 3391 static void
3392 lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 3392 lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3393 { 3393 {
3394 MAILBOX_t *mb = &pmb->u.mb; 3394 MAILBOX_t *mb = &pmb->u.mb;
3395 struct lpfc_vport *vport = pmb->vport; 3395 struct lpfc_vport *vport = pmb->vport;
3396 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3396 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3397 3397
3398 switch (mb->mbxStatus) { 3398 switch (mb->mbxStatus) {
3399 case 0x0011: 3399 case 0x0011:
3400 case 0x0020: 3400 case 0x0020:
3401 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 3401 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
3402 "0911 cmpl_unreg_vpi, mb status = 0x%x\n", 3402 "0911 cmpl_unreg_vpi, mb status = 0x%x\n",
3403 mb->mbxStatus); 3403 mb->mbxStatus);
3404 break; 3404 break;
3405 /* If VPI is busy, reset the HBA */ 3405 /* If VPI is busy, reset the HBA */
3406 case 0x9700: 3406 case 0x9700:
3407 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3407 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
3408 "2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n", 3408 "2798 Unreg_vpi failed vpi 0x%x, mb status = 0x%x\n",
3409 vport->vpi, mb->mbxStatus); 3409 vport->vpi, mb->mbxStatus);
3410 if (!(phba->pport->load_flag & FC_UNLOADING)) 3410 if (!(phba->pport->load_flag & FC_UNLOADING))
3411 lpfc_workq_post_event(phba, NULL, NULL, 3411 lpfc_workq_post_event(phba, NULL, NULL,
3412 LPFC_EVT_RESET_HBA); 3412 LPFC_EVT_RESET_HBA);
3413 } 3413 }
3414 spin_lock_irq(shost->host_lock); 3414 spin_lock_irq(shost->host_lock);
3415 vport->vpi_state &= ~LPFC_VPI_REGISTERED; 3415 vport->vpi_state &= ~LPFC_VPI_REGISTERED;
3416 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; 3416 vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
3417 spin_unlock_irq(shost->host_lock); 3417 spin_unlock_irq(shost->host_lock);
3418 vport->unreg_vpi_cmpl = VPORT_OK; 3418 vport->unreg_vpi_cmpl = VPORT_OK;
3419 mempool_free(pmb, phba->mbox_mem_pool); 3419 mempool_free(pmb, phba->mbox_mem_pool);
3420 lpfc_cleanup_vports_rrqs(vport, NULL); 3420 lpfc_cleanup_vports_rrqs(vport, NULL);
3421 /* 3421 /*
3422 * This shost reference might have been taken at the beginning of 3422 * This shost reference might have been taken at the beginning of
3423 * lpfc_vport_delete() 3423 * lpfc_vport_delete()
3424 */ 3424 */
3425 if ((vport->load_flag & FC_UNLOADING) && (vport != phba->pport)) 3425 if ((vport->load_flag & FC_UNLOADING) && (vport != phba->pport))
3426 scsi_host_put(shost); 3426 scsi_host_put(shost);
3427 } 3427 }
3428 3428
3429 int 3429 int
3430 lpfc_mbx_unreg_vpi(struct lpfc_vport *vport) 3430 lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
3431 { 3431 {
3432 struct lpfc_hba *phba = vport->phba; 3432 struct lpfc_hba *phba = vport->phba;
3433 LPFC_MBOXQ_t *mbox; 3433 LPFC_MBOXQ_t *mbox;
3434 int rc; 3434 int rc;
3435 3435
3436 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3436 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3437 if (!mbox) 3437 if (!mbox)
3438 return 1; 3438 return 1;
3439 3439
3440 lpfc_unreg_vpi(phba, vport->vpi, mbox); 3440 lpfc_unreg_vpi(phba, vport->vpi, mbox);
3441 mbox->vport = vport; 3441 mbox->vport = vport;
3442 mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi; 3442 mbox->mbox_cmpl = lpfc_mbx_cmpl_unreg_vpi;
3443 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 3443 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
3444 if (rc == MBX_NOT_FINISHED) { 3444 if (rc == MBX_NOT_FINISHED) {
3445 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT, 3445 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
3446 "1800 Could not issue unreg_vpi\n"); 3446 "1800 Could not issue unreg_vpi\n");
3447 mempool_free(mbox, phba->mbox_mem_pool); 3447 mempool_free(mbox, phba->mbox_mem_pool);
3448 vport->unreg_vpi_cmpl = VPORT_ERROR; 3448 vport->unreg_vpi_cmpl = VPORT_ERROR;
3449 return rc; 3449 return rc;
3450 } 3450 }
3451 return 0; 3451 return 0;
3452 } 3452 }
3453 3453
3454 static void 3454 static void
3455 lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 3455 lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3456 { 3456 {
3457 struct lpfc_vport *vport = pmb->vport; 3457 struct lpfc_vport *vport = pmb->vport;
3458 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3458 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3459 MAILBOX_t *mb = &pmb->u.mb; 3459 MAILBOX_t *mb = &pmb->u.mb;
3460 3460
3461 switch (mb->mbxStatus) { 3461 switch (mb->mbxStatus) {
3462 case 0x0011: 3462 case 0x0011:
3463 case 0x9601: 3463 case 0x9601:
3464 case 0x9602: 3464 case 0x9602:
3465 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 3465 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
3466 "0912 cmpl_reg_vpi, mb status = 0x%x\n", 3466 "0912 cmpl_reg_vpi, mb status = 0x%x\n",
3467 mb->mbxStatus); 3467 mb->mbxStatus);
3468 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 3468 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3469 spin_lock_irq(shost->host_lock); 3469 spin_lock_irq(shost->host_lock);
3470 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); 3470 vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
3471 spin_unlock_irq(shost->host_lock); 3471 spin_unlock_irq(shost->host_lock);
3472 vport->fc_myDID = 0; 3472 vport->fc_myDID = 0;
3473 goto out; 3473 goto out;
3474 } 3474 }
3475 3475
3476 spin_lock_irq(shost->host_lock); 3476 spin_lock_irq(shost->host_lock);
3477 vport->vpi_state |= LPFC_VPI_REGISTERED; 3477 vport->vpi_state |= LPFC_VPI_REGISTERED;
3478 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI; 3478 vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
3479 spin_unlock_irq(shost->host_lock); 3479 spin_unlock_irq(shost->host_lock);
3480 vport->num_disc_nodes = 0; 3480 vport->num_disc_nodes = 0;
3481 /* go thru NPR list and issue ELS PLOGIs */ 3481 /* go thru NPR list and issue ELS PLOGIs */
3482 if (vport->fc_npr_cnt) 3482 if (vport->fc_npr_cnt)
3483 lpfc_els_disc_plogi(vport); 3483 lpfc_els_disc_plogi(vport);
3484 3484
3485 if (!vport->num_disc_nodes) { 3485 if (!vport->num_disc_nodes) {
3486 spin_lock_irq(shost->host_lock); 3486 spin_lock_irq(shost->host_lock);
3487 vport->fc_flag &= ~FC_NDISC_ACTIVE; 3487 vport->fc_flag &= ~FC_NDISC_ACTIVE;
3488 spin_unlock_irq(shost->host_lock); 3488 spin_unlock_irq(shost->host_lock);
3489 lpfc_can_disctmo(vport); 3489 lpfc_can_disctmo(vport);
3490 } 3490 }
3491 vport->port_state = LPFC_VPORT_READY; 3491 vport->port_state = LPFC_VPORT_READY;
3492 3492
3493 out: 3493 out:
3494 mempool_free(pmb, phba->mbox_mem_pool); 3494 mempool_free(pmb, phba->mbox_mem_pool);
3495 return; 3495 return;
3496 } 3496 }
3497 3497
3498 /** 3498 /**
3499 * lpfc_create_static_vport - Read HBA config region to create static vports. 3499 * lpfc_create_static_vport - Read HBA config region to create static vports.
3500 * @phba: pointer to lpfc hba data structure. 3500 * @phba: pointer to lpfc hba data structure.
3501 * 3501 *
3502 * This routine issue a DUMP mailbox command for config region 22 to get 3502 * This routine issue a DUMP mailbox command for config region 22 to get
3503 * the list of static vports to be created. The function create vports 3503 * the list of static vports to be created. The function create vports
3504 * based on the information returned from the HBA. 3504 * based on the information returned from the HBA.
3505 **/ 3505 **/
3506 void 3506 void
3507 lpfc_create_static_vport(struct lpfc_hba *phba) 3507 lpfc_create_static_vport(struct lpfc_hba *phba)
3508 { 3508 {
3509 LPFC_MBOXQ_t *pmb = NULL; 3509 LPFC_MBOXQ_t *pmb = NULL;
3510 MAILBOX_t *mb; 3510 MAILBOX_t *mb;
3511 struct static_vport_info *vport_info; 3511 struct static_vport_info *vport_info;
3512 int rc = 0, i; 3512 int mbx_wait_rc = 0, i;
3513 struct fc_vport_identifiers vport_id; 3513 struct fc_vport_identifiers vport_id;
3514 struct fc_vport *new_fc_vport; 3514 struct fc_vport *new_fc_vport;
3515 struct Scsi_Host *shost; 3515 struct Scsi_Host *shost;
3516 struct lpfc_vport *vport; 3516 struct lpfc_vport *vport;
3517 uint16_t offset = 0; 3517 uint16_t offset = 0;
3518 uint8_t *vport_buff; 3518 uint8_t *vport_buff;
3519 struct lpfc_dmabuf *mp; 3519 struct lpfc_dmabuf *mp;
3520 uint32_t byte_count = 0; 3520 uint32_t byte_count = 0;
3521 3521
3522 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 3522 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3523 if (!pmb) { 3523 if (!pmb) {
3524 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3524 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3525 "0542 lpfc_create_static_vport failed to" 3525 "0542 lpfc_create_static_vport failed to"
3526 " allocate mailbox memory\n"); 3526 " allocate mailbox memory\n");
3527 return; 3527 return;
3528 } 3528 }
3529 3529 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
3530 mb = &pmb->u.mb; 3530 mb = &pmb->u.mb;
3531 3531
3532 vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL); 3532 vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL);
3533 if (!vport_info) { 3533 if (!vport_info) {
3534 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3534 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3535 "0543 lpfc_create_static_vport failed to" 3535 "0543 lpfc_create_static_vport failed to"
3536 " allocate vport_info\n"); 3536 " allocate vport_info\n");
3537 mempool_free(pmb, phba->mbox_mem_pool); 3537 mempool_free(pmb, phba->mbox_mem_pool);
3538 return; 3538 return;
3539 } 3539 }
3540 3540
3541 vport_buff = (uint8_t *) vport_info; 3541 vport_buff = (uint8_t *) vport_info;
3542 do { 3542 do {
3543 /* free dma buffer from previous round */
3544 if (pmb->context1) {
3545 mp = (struct lpfc_dmabuf *)pmb->context1;
3546 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3547 kfree(mp);
3548 }
3543 if (lpfc_dump_static_vport(phba, pmb, offset)) 3549 if (lpfc_dump_static_vport(phba, pmb, offset))
3544 goto out; 3550 goto out;
3545 3551
3546 pmb->vport = phba->pport; 3552 pmb->vport = phba->pport;
3547 rc = lpfc_sli_issue_mbox_wait(phba, pmb, LPFC_MBOX_TMO); 3553 mbx_wait_rc = lpfc_sli_issue_mbox_wait(phba, pmb,
3554 LPFC_MBOX_TMO);
3548 3555
3549 if ((rc != MBX_SUCCESS) || mb->mbxStatus) { 3556 if ((mbx_wait_rc != MBX_SUCCESS) || mb->mbxStatus) {
3550 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3557 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3551 "0544 lpfc_create_static_vport failed to" 3558 "0544 lpfc_create_static_vport failed to"
3552 " issue dump mailbox command ret 0x%x " 3559 " issue dump mailbox command ret 0x%x "
3553 "status 0x%x\n", 3560 "status 0x%x\n",
3554 rc, mb->mbxStatus); 3561 mbx_wait_rc, mb->mbxStatus);
3555 goto out; 3562 goto out;
3556 } 3563 }
3557 3564
3558 if (phba->sli_rev == LPFC_SLI_REV4) { 3565 if (phba->sli_rev == LPFC_SLI_REV4) {
3559 byte_count = pmb->u.mqe.un.mb_words[5]; 3566 byte_count = pmb->u.mqe.un.mb_words[5];
3560 mp = (struct lpfc_dmabuf *) pmb->context2; 3567 mp = (struct lpfc_dmabuf *)pmb->context1;
3561 if (byte_count > sizeof(struct static_vport_info) - 3568 if (byte_count > sizeof(struct static_vport_info) -
3562 offset) 3569 offset)
3563 byte_count = sizeof(struct static_vport_info) 3570 byte_count = sizeof(struct static_vport_info)
3564 - offset; 3571 - offset;
3565 memcpy(vport_buff + offset, mp->virt, byte_count); 3572 memcpy(vport_buff + offset, mp->virt, byte_count);
3566 offset += byte_count; 3573 offset += byte_count;
3567 } else { 3574 } else {
3568 if (mb->un.varDmp.word_cnt > 3575 if (mb->un.varDmp.word_cnt >
3569 sizeof(struct static_vport_info) - offset) 3576 sizeof(struct static_vport_info) - offset)
3570 mb->un.varDmp.word_cnt = 3577 mb->un.varDmp.word_cnt =
3571 sizeof(struct static_vport_info) 3578 sizeof(struct static_vport_info)
3572 - offset; 3579 - offset;
3573 byte_count = mb->un.varDmp.word_cnt; 3580 byte_count = mb->un.varDmp.word_cnt;
3574 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET, 3581 lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
3575 vport_buff + offset, 3582 vport_buff + offset,
3576 byte_count); 3583 byte_count);
3577 3584
3578 offset += byte_count; 3585 offset += byte_count;
3579 } 3586 }
3580 3587
3581 } while (byte_count && 3588 } while (byte_count &&
3582 offset < sizeof(struct static_vport_info)); 3589 offset < sizeof(struct static_vport_info));
3583 3590
3584 3591
3585 if ((le32_to_cpu(vport_info->signature) != VPORT_INFO_SIG) || 3592 if ((le32_to_cpu(vport_info->signature) != VPORT_INFO_SIG) ||
3586 ((le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK) 3593 ((le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK)
3587 != VPORT_INFO_REV)) { 3594 != VPORT_INFO_REV)) {
3588 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 3595 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3589 "0545 lpfc_create_static_vport bad" 3596 "0545 lpfc_create_static_vport bad"
3590 " information header 0x%x 0x%x\n", 3597 " information header 0x%x 0x%x\n",
3591 le32_to_cpu(vport_info->signature), 3598 le32_to_cpu(vport_info->signature),
3592 le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK); 3599 le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK);
3593 3600
3594 goto out; 3601 goto out;
3595 } 3602 }
3596 3603
3597 shost = lpfc_shost_from_vport(phba->pport); 3604 shost = lpfc_shost_from_vport(phba->pport);
3598 3605
3599 for (i = 0; i < MAX_STATIC_VPORT_COUNT; i++) { 3606 for (i = 0; i < MAX_STATIC_VPORT_COUNT; i++) {
3600 memset(&vport_id, 0, sizeof(vport_id)); 3607 memset(&vport_id, 0, sizeof(vport_id));
3601 vport_id.port_name = wwn_to_u64(vport_info->vport_list[i].wwpn); 3608 vport_id.port_name = wwn_to_u64(vport_info->vport_list[i].wwpn);
3602 vport_id.node_name = wwn_to_u64(vport_info->vport_list[i].wwnn); 3609 vport_id.node_name = wwn_to_u64(vport_info->vport_list[i].wwnn);
3603 if (!vport_id.port_name || !vport_id.node_name) 3610 if (!vport_id.port_name || !vport_id.node_name)
3604 continue; 3611 continue;
3605 3612
3606 vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR; 3613 vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR;
3607 vport_id.vport_type = FC_PORTTYPE_NPIV; 3614 vport_id.vport_type = FC_PORTTYPE_NPIV;
3608 vport_id.disable = false; 3615 vport_id.disable = false;
3609 new_fc_vport = fc_vport_create(shost, 0, &vport_id); 3616 new_fc_vport = fc_vport_create(shost, 0, &vport_id);
3610 3617
3611 if (!new_fc_vport) { 3618 if (!new_fc_vport) {
3612 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, 3619 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3613 "0546 lpfc_create_static_vport failed to" 3620 "0546 lpfc_create_static_vport failed to"
3614 " create vport\n"); 3621 " create vport\n");
3615 continue; 3622 continue;
3616 } 3623 }
3617 3624
3618 vport = *(struct lpfc_vport **)new_fc_vport->dd_data; 3625 vport = *(struct lpfc_vport **)new_fc_vport->dd_data;
3619 vport->vport_flag |= STATIC_VPORT; 3626 vport->vport_flag |= STATIC_VPORT;
3620 } 3627 }
3621 3628
3622 out: 3629 out:
3623 kfree(vport_info); 3630 kfree(vport_info);
3624 if (rc != MBX_TIMEOUT) { 3631 if (mbx_wait_rc != MBX_TIMEOUT) {
3625 if (pmb->context2) { 3632 if (pmb->context1) {
3626 mp = (struct lpfc_dmabuf *) pmb->context2; 3633 mp = (struct lpfc_dmabuf *)pmb->context1;
3627 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3634 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3628 kfree(mp); 3635 kfree(mp);
3629 } 3636 }
3630 mempool_free(pmb, phba->mbox_mem_pool); 3637 mempool_free(pmb, phba->mbox_mem_pool);
3631 } 3638 }
3632 3639
3633 return; 3640 return;
3634 } 3641 }
3635 3642
3636 /* 3643 /*
3637 * This routine handles processing a Fabric REG_LOGIN mailbox 3644 * This routine handles processing a Fabric REG_LOGIN mailbox
3638 * command upon completion. It is setup in the LPFC_MBOXQ 3645 * command upon completion. It is setup in the LPFC_MBOXQ
3639 * as the completion routine when the command is 3646 * as the completion routine when the command is
3640 * handed off to the SLI layer. 3647 * handed off to the SLI layer.
3641 */ 3648 */
3642 void 3649 void
3643 lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 3650 lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3644 { 3651 {
3645 struct lpfc_vport *vport = pmb->vport; 3652 struct lpfc_vport *vport = pmb->vport;
3646 MAILBOX_t *mb = &pmb->u.mb; 3653 MAILBOX_t *mb = &pmb->u.mb;
3647 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 3654 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
3648 struct lpfc_nodelist *ndlp; 3655 struct lpfc_nodelist *ndlp;
3649 struct Scsi_Host *shost; 3656 struct Scsi_Host *shost;
3650 3657
3651 ndlp = (struct lpfc_nodelist *) pmb->context2; 3658 ndlp = (struct lpfc_nodelist *) pmb->context2;
3652 pmb->context1 = NULL; 3659 pmb->context1 = NULL;
3653 pmb->context2 = NULL; 3660 pmb->context2 = NULL;
3654 3661
3655 if (mb->mbxStatus) { 3662 if (mb->mbxStatus) {
3656 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, 3663 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
3657 "0258 Register Fabric login error: 0x%x\n", 3664 "0258 Register Fabric login error: 0x%x\n",
3658 mb->mbxStatus); 3665 mb->mbxStatus);
3659 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3666 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3660 kfree(mp); 3667 kfree(mp);
3661 mempool_free(pmb, phba->mbox_mem_pool); 3668 mempool_free(pmb, phba->mbox_mem_pool);
3662 3669
3663 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 3670 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
3664 /* FLOGI failed, use loop map to make discovery list */ 3671 /* FLOGI failed, use loop map to make discovery list */
3665 lpfc_disc_list_loopmap(vport); 3672 lpfc_disc_list_loopmap(vport);
3666 3673
3667 /* Start discovery */ 3674 /* Start discovery */
3668 lpfc_disc_start(vport); 3675 lpfc_disc_start(vport);
3669 /* Decrement the reference count to ndlp after the 3676 /* Decrement the reference count to ndlp after the
3670 * reference to the ndlp are done. 3677 * reference to the ndlp are done.
3671 */ 3678 */
3672 lpfc_nlp_put(ndlp); 3679 lpfc_nlp_put(ndlp);
3673 return; 3680 return;
3674 } 3681 }
3675 3682
3676 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 3683 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3677 /* Decrement the reference count to ndlp after the reference 3684 /* Decrement the reference count to ndlp after the reference
3678 * to the ndlp are done. 3685 * to the ndlp are done.
3679 */ 3686 */
3680 lpfc_nlp_put(ndlp); 3687 lpfc_nlp_put(ndlp);
3681 return; 3688 return;
3682 } 3689 }
3683 3690
3684 if (phba->sli_rev < LPFC_SLI_REV4) 3691 if (phba->sli_rev < LPFC_SLI_REV4)
3685 ndlp->nlp_rpi = mb->un.varWords[0]; 3692 ndlp->nlp_rpi = mb->un.varWords[0];
3686 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 3693 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
3687 ndlp->nlp_type |= NLP_FABRIC; 3694 ndlp->nlp_type |= NLP_FABRIC;
3688 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 3695 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
3689 3696
3690 if (vport->port_state == LPFC_FABRIC_CFG_LINK) { 3697 if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
3691 /* when physical port receive logo donot start 3698 /* when physical port receive logo donot start
3692 * vport discovery */ 3699 * vport discovery */
3693 if (!(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) 3700 if (!(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
3694 lpfc_start_fdiscs(phba); 3701 lpfc_start_fdiscs(phba);
3695 else { 3702 else {
3696 shost = lpfc_shost_from_vport(vport); 3703 shost = lpfc_shost_from_vport(vport);
3697 spin_lock_irq(shost->host_lock); 3704 spin_lock_irq(shost->host_lock);
3698 vport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG ; 3705 vport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG ;
3699 spin_unlock_irq(shost->host_lock); 3706 spin_unlock_irq(shost->host_lock);
3700 } 3707 }
3701 lpfc_do_scr_ns_plogi(phba, vport); 3708 lpfc_do_scr_ns_plogi(phba, vport);
3702 } 3709 }
3703 3710
3704 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3711 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3705 kfree(mp); 3712 kfree(mp);
3706 mempool_free(pmb, phba->mbox_mem_pool); 3713 mempool_free(pmb, phba->mbox_mem_pool);
3707 3714
3708 /* Drop the reference count from the mbox at the end after 3715 /* Drop the reference count from the mbox at the end after
3709 * all the current reference to the ndlp have been done. 3716 * all the current reference to the ndlp have been done.
3710 */ 3717 */
3711 lpfc_nlp_put(ndlp); 3718 lpfc_nlp_put(ndlp);
3712 return; 3719 return;
3713 } 3720 }
3714 3721
3715 /* 3722 /*
3716 * This routine handles processing a NameServer REG_LOGIN mailbox 3723 * This routine handles processing a NameServer REG_LOGIN mailbox
3717 * command upon completion. It is setup in the LPFC_MBOXQ 3724 * command upon completion. It is setup in the LPFC_MBOXQ
3718 * as the completion routine when the command is 3725 * as the completion routine when the command is
3719 * handed off to the SLI layer. 3726 * handed off to the SLI layer.
3720 */ 3727 */
3721 void 3728 void
3722 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 3729 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3723 { 3730 {
3724 MAILBOX_t *mb = &pmb->u.mb; 3731 MAILBOX_t *mb = &pmb->u.mb;
3725 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 3732 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
3726 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 3733 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
3727 struct lpfc_vport *vport = pmb->vport; 3734 struct lpfc_vport *vport = pmb->vport;
3728 3735
3729 pmb->context1 = NULL; 3736 pmb->context1 = NULL;
3730 pmb->context2 = NULL; 3737 pmb->context2 = NULL;
3731 3738
3732 if (mb->mbxStatus) { 3739 if (mb->mbxStatus) {
3733 out: 3740 out:
3734 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, 3741 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3735 "0260 Register NameServer error: 0x%x\n", 3742 "0260 Register NameServer error: 0x%x\n",
3736 mb->mbxStatus); 3743 mb->mbxStatus);
3737 /* decrement the node reference count held for this 3744 /* decrement the node reference count held for this
3738 * callback function. 3745 * callback function.
3739 */ 3746 */
3740 lpfc_nlp_put(ndlp); 3747 lpfc_nlp_put(ndlp);
3741 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3748 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3742 kfree(mp); 3749 kfree(mp);
3743 mempool_free(pmb, phba->mbox_mem_pool); 3750 mempool_free(pmb, phba->mbox_mem_pool);
3744 3751
3745 /* If no other thread is using the ndlp, free it */ 3752 /* If no other thread is using the ndlp, free it */
3746 lpfc_nlp_not_used(ndlp); 3753 lpfc_nlp_not_used(ndlp);
3747 3754
3748 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) { 3755 if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
3749 /* 3756 /*
3750 * RegLogin failed, use loop map to make discovery 3757 * RegLogin failed, use loop map to make discovery
3751 * list 3758 * list
3752 */ 3759 */
3753 lpfc_disc_list_loopmap(vport); 3760 lpfc_disc_list_loopmap(vport);
3754 3761
3755 /* Start discovery */ 3762 /* Start discovery */
3756 lpfc_disc_start(vport); 3763 lpfc_disc_start(vport);
3757 return; 3764 return;
3758 } 3765 }
3759 lpfc_vport_set_state(vport, FC_VPORT_FAILED); 3766 lpfc_vport_set_state(vport, FC_VPORT_FAILED);
3760 return; 3767 return;
3761 } 3768 }
3762 3769
3763 if (phba->sli_rev < LPFC_SLI_REV4) 3770 if (phba->sli_rev < LPFC_SLI_REV4)
3764 ndlp->nlp_rpi = mb->un.varWords[0]; 3771 ndlp->nlp_rpi = mb->un.varWords[0];
3765 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 3772 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
3766 ndlp->nlp_type |= NLP_FABRIC; 3773 ndlp->nlp_type |= NLP_FABRIC;
3767 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 3774 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
3768 3775
3769 if (vport->port_state < LPFC_VPORT_READY) { 3776 if (vport->port_state < LPFC_VPORT_READY) {
3770 /* Link up discovery requires Fabric registration. */ 3777 /* Link up discovery requires Fabric registration. */
3771 lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, 0); /* Do this first! */ 3778 lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, 0); /* Do this first! */
3772 lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0); 3779 lpfc_ns_cmd(vport, SLI_CTNS_RNN_ID, 0, 0);
3773 lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0); 3780 lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
3774 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0); 3781 lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
3775 lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0); 3782 lpfc_ns_cmd(vport, SLI_CTNS_RFT_ID, 0, 0);
3776 3783
3777 /* Issue SCR just before NameServer GID_FT Query */ 3784 /* Issue SCR just before NameServer GID_FT Query */
3778 lpfc_issue_els_scr(vport, SCR_DID, 0); 3785 lpfc_issue_els_scr(vport, SCR_DID, 0);
3779 } 3786 }
3780 3787
3781 vport->fc_ns_retry = 0; 3788 vport->fc_ns_retry = 0;
3782 /* Good status, issue CT Request to NameServer */ 3789 /* Good status, issue CT Request to NameServer */
3783 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0)) { 3790 if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0)) {
3784 /* Cannot issue NameServer Query, so finish up discovery */ 3791 /* Cannot issue NameServer Query, so finish up discovery */
3785 goto out; 3792 goto out;
3786 } 3793 }
3787 3794
3788 /* decrement the node reference count held for this 3795 /* decrement the node reference count held for this
3789 * callback function. 3796 * callback function.
3790 */ 3797 */
3791 lpfc_nlp_put(ndlp); 3798 lpfc_nlp_put(ndlp);
3792 lpfc_mbuf_free(phba, mp->virt, mp->phys); 3799 lpfc_mbuf_free(phba, mp->virt, mp->phys);
3793 kfree(mp); 3800 kfree(mp);
3794 mempool_free(pmb, phba->mbox_mem_pool); 3801 mempool_free(pmb, phba->mbox_mem_pool);
3795 3802
3796 return; 3803 return;
3797 } 3804 }
3798 3805
3799 static void 3806 static void
3800 lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 3807 lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
3801 { 3808 {
3802 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3809 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3803 struct fc_rport *rport; 3810 struct fc_rport *rport;
3804 struct lpfc_rport_data *rdata; 3811 struct lpfc_rport_data *rdata;
3805 struct fc_rport_identifiers rport_ids; 3812 struct fc_rport_identifiers rport_ids;
3806 struct lpfc_hba *phba = vport->phba; 3813 struct lpfc_hba *phba = vport->phba;
3807 3814
3808 /* Remote port has reappeared. Re-register w/ FC transport */ 3815 /* Remote port has reappeared. Re-register w/ FC transport */
3809 rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn); 3816 rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
3810 rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn); 3817 rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
3811 rport_ids.port_id = ndlp->nlp_DID; 3818 rport_ids.port_id = ndlp->nlp_DID;
3812 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN; 3819 rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
3813 3820
3814 /* 3821 /*
3815 * We leave our node pointer in rport->dd_data when we unregister a 3822 * We leave our node pointer in rport->dd_data when we unregister a
3816 * FCP target port. But fc_remote_port_add zeros the space to which 3823 * FCP target port. But fc_remote_port_add zeros the space to which
3817 * rport->dd_data points. So, if we're reusing a previously 3824 * rport->dd_data points. So, if we're reusing a previously
3818 * registered port, drop the reference that we took the last time we 3825 * registered port, drop the reference that we took the last time we
3819 * registered the port. 3826 * registered the port.
3820 */ 3827 */
3821 if (ndlp->rport && ndlp->rport->dd_data && 3828 if (ndlp->rport && ndlp->rport->dd_data &&
3822 ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp) 3829 ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp)
3823 lpfc_nlp_put(ndlp); 3830 lpfc_nlp_put(ndlp);
3824 3831
3825 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, 3832 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
3826 "rport add: did:x%x flg:x%x type x%x", 3833 "rport add: did:x%x flg:x%x type x%x",
3827 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); 3834 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
3828 3835
3829 /* Don't add the remote port if unloading. */ 3836 /* Don't add the remote port if unloading. */
3830 if (vport->load_flag & FC_UNLOADING) 3837 if (vport->load_flag & FC_UNLOADING)
3831 return; 3838 return;
3832 3839
3833 ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids); 3840 ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
3834 if (!rport || !get_device(&rport->dev)) { 3841 if (!rport || !get_device(&rport->dev)) {
3835 dev_printk(KERN_WARNING, &phba->pcidev->dev, 3842 dev_printk(KERN_WARNING, &phba->pcidev->dev,
3836 "Warning: fc_remote_port_add failed\n"); 3843 "Warning: fc_remote_port_add failed\n");
3837 return; 3844 return;
3838 } 3845 }
3839 3846
3840 /* initialize static port data */ 3847 /* initialize static port data */
3841 rport->maxframe_size = ndlp->nlp_maxframe; 3848 rport->maxframe_size = ndlp->nlp_maxframe;
3842 rport->supported_classes = ndlp->nlp_class_sup; 3849 rport->supported_classes = ndlp->nlp_class_sup;
3843 rdata = rport->dd_data; 3850 rdata = rport->dd_data;
3844 rdata->pnode = lpfc_nlp_get(ndlp); 3851 rdata->pnode = lpfc_nlp_get(ndlp);
3845 3852
3846 if (ndlp->nlp_type & NLP_FCP_TARGET) 3853 if (ndlp->nlp_type & NLP_FCP_TARGET)
3847 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET; 3854 rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
3848 if (ndlp->nlp_type & NLP_FCP_INITIATOR) 3855 if (ndlp->nlp_type & NLP_FCP_INITIATOR)
3849 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR; 3856 rport_ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
3850 3857
3851 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN) 3858 if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
3852 fc_remote_port_rolechg(rport, rport_ids.roles); 3859 fc_remote_port_rolechg(rport, rport_ids.roles);
3853 3860
3854 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 3861 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
3855 "3183 rport register x%06x, rport %p role x%x\n", 3862 "3183 rport register x%06x, rport %p role x%x\n",
3856 ndlp->nlp_DID, rport, rport_ids.roles); 3863 ndlp->nlp_DID, rport, rport_ids.roles);
3857 3864
3858 if ((rport->scsi_target_id != -1) && 3865 if ((rport->scsi_target_id != -1) &&
3859 (rport->scsi_target_id < LPFC_MAX_TARGET)) { 3866 (rport->scsi_target_id < LPFC_MAX_TARGET)) {
3860 ndlp->nlp_sid = rport->scsi_target_id; 3867 ndlp->nlp_sid = rport->scsi_target_id;
3861 } 3868 }
3862 return; 3869 return;
3863 } 3870 }
3864 3871
3865 static void 3872 static void
3866 lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp) 3873 lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
3867 { 3874 {
3868 struct fc_rport *rport = ndlp->rport; 3875 struct fc_rport *rport = ndlp->rport;
3869 3876
3870 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT, 3877 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
3871 "rport delete: did:x%x flg:x%x type x%x", 3878 "rport delete: did:x%x flg:x%x type x%x",
3872 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); 3879 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
3873 3880
3874 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 3881 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
3875 "3184 rport unregister x%06x, rport %p\n", 3882 "3184 rport unregister x%06x, rport %p\n",
3876 ndlp->nlp_DID, rport); 3883 ndlp->nlp_DID, rport);
3877 3884
3878 fc_remote_port_delete(rport); 3885 fc_remote_port_delete(rport);
3879 3886
3880 return; 3887 return;
3881 } 3888 }
3882 3889
3883 static void 3890 static void
3884 lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count) 3891 lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count)
3885 { 3892 {
3886 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3893 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3887 3894
3888 spin_lock_irq(shost->host_lock); 3895 spin_lock_irq(shost->host_lock);
3889 switch (state) { 3896 switch (state) {
3890 case NLP_STE_UNUSED_NODE: 3897 case NLP_STE_UNUSED_NODE:
3891 vport->fc_unused_cnt += count; 3898 vport->fc_unused_cnt += count;
3892 break; 3899 break;
3893 case NLP_STE_PLOGI_ISSUE: 3900 case NLP_STE_PLOGI_ISSUE:
3894 vport->fc_plogi_cnt += count; 3901 vport->fc_plogi_cnt += count;
3895 break; 3902 break;
3896 case NLP_STE_ADISC_ISSUE: 3903 case NLP_STE_ADISC_ISSUE:
3897 vport->fc_adisc_cnt += count; 3904 vport->fc_adisc_cnt += count;
3898 break; 3905 break;
3899 case NLP_STE_REG_LOGIN_ISSUE: 3906 case NLP_STE_REG_LOGIN_ISSUE:
3900 vport->fc_reglogin_cnt += count; 3907 vport->fc_reglogin_cnt += count;
3901 break; 3908 break;
3902 case NLP_STE_PRLI_ISSUE: 3909 case NLP_STE_PRLI_ISSUE:
3903 vport->fc_prli_cnt += count; 3910 vport->fc_prli_cnt += count;
3904 break; 3911 break;
3905 case NLP_STE_UNMAPPED_NODE: 3912 case NLP_STE_UNMAPPED_NODE:
3906 vport->fc_unmap_cnt += count; 3913 vport->fc_unmap_cnt += count;
3907 break; 3914 break;
3908 case NLP_STE_MAPPED_NODE: 3915 case NLP_STE_MAPPED_NODE:
3909 vport->fc_map_cnt += count; 3916 vport->fc_map_cnt += count;
3910 break; 3917 break;
3911 case NLP_STE_NPR_NODE: 3918 case NLP_STE_NPR_NODE:
3912 vport->fc_npr_cnt += count; 3919 vport->fc_npr_cnt += count;
3913 break; 3920 break;
3914 } 3921 }
3915 spin_unlock_irq(shost->host_lock); 3922 spin_unlock_irq(shost->host_lock);
3916 } 3923 }
3917 3924
3918 static void 3925 static void
3919 lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 3926 lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
3920 int old_state, int new_state) 3927 int old_state, int new_state)
3921 { 3928 {
3922 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 3929 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3923 3930
3924 if (new_state == NLP_STE_UNMAPPED_NODE) { 3931 if (new_state == NLP_STE_UNMAPPED_NODE) {
3925 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; 3932 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
3926 ndlp->nlp_type |= NLP_FC_NODE; 3933 ndlp->nlp_type |= NLP_FC_NODE;
3927 } 3934 }
3928 if (new_state == NLP_STE_MAPPED_NODE) 3935 if (new_state == NLP_STE_MAPPED_NODE)
3929 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; 3936 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
3930 if (new_state == NLP_STE_NPR_NODE) 3937 if (new_state == NLP_STE_NPR_NODE)
3931 ndlp->nlp_flag &= ~NLP_RCV_PLOGI; 3938 ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
3932 3939
3933 /* Transport interface */ 3940 /* Transport interface */
3934 if (ndlp->rport && (old_state == NLP_STE_MAPPED_NODE || 3941 if (ndlp->rport && (old_state == NLP_STE_MAPPED_NODE ||
3935 old_state == NLP_STE_UNMAPPED_NODE)) { 3942 old_state == NLP_STE_UNMAPPED_NODE)) {
3936 vport->phba->nport_event_cnt++; 3943 vport->phba->nport_event_cnt++;
3937 lpfc_unregister_remote_port(ndlp); 3944 lpfc_unregister_remote_port(ndlp);
3938 } 3945 }
3939 3946
3940 if (new_state == NLP_STE_MAPPED_NODE || 3947 if (new_state == NLP_STE_MAPPED_NODE ||
3941 new_state == NLP_STE_UNMAPPED_NODE) { 3948 new_state == NLP_STE_UNMAPPED_NODE) {
3942 vport->phba->nport_event_cnt++; 3949 vport->phba->nport_event_cnt++;
3943 /* 3950 /*
3944 * Tell the fc transport about the port, if we haven't 3951 * Tell the fc transport about the port, if we haven't
3945 * already. If we have, and it's a scsi entity, be 3952 * already. If we have, and it's a scsi entity, be
3946 * sure to unblock any attached scsi devices 3953 * sure to unblock any attached scsi devices
3947 */ 3954 */
3948 lpfc_register_remote_port(vport, ndlp); 3955 lpfc_register_remote_port(vport, ndlp);
3949 } 3956 }
3950 if ((new_state == NLP_STE_MAPPED_NODE) && 3957 if ((new_state == NLP_STE_MAPPED_NODE) &&
3951 (vport->stat_data_enabled)) { 3958 (vport->stat_data_enabled)) {
3952 /* 3959 /*
3953 * A new target is discovered, if there is no buffer for 3960 * A new target is discovered, if there is no buffer for
3954 * statistical data collection allocate buffer. 3961 * statistical data collection allocate buffer.
3955 */ 3962 */
3956 ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT, 3963 ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
3957 sizeof(struct lpfc_scsicmd_bkt), 3964 sizeof(struct lpfc_scsicmd_bkt),
3958 GFP_KERNEL); 3965 GFP_KERNEL);
3959 3966
3960 if (!ndlp->lat_data) 3967 if (!ndlp->lat_data)
3961 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, 3968 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
3962 "0286 lpfc_nlp_state_cleanup failed to " 3969 "0286 lpfc_nlp_state_cleanup failed to "
3963 "allocate statistical data buffer DID " 3970 "allocate statistical data buffer DID "
3964 "0x%x\n", ndlp->nlp_DID); 3971 "0x%x\n", ndlp->nlp_DID);
3965 } 3972 }
3966 /* 3973 /*
3967 * if we added to Mapped list, but the remote port 3974 * if we added to Mapped list, but the remote port
3968 * registration failed or assigned a target id outside 3975 * registration failed or assigned a target id outside
3969 * our presentable range - move the node to the 3976 * our presentable range - move the node to the
3970 * Unmapped List 3977 * Unmapped List
3971 */ 3978 */
3972 if (new_state == NLP_STE_MAPPED_NODE && 3979 if (new_state == NLP_STE_MAPPED_NODE &&
3973 (!ndlp->rport || 3980 (!ndlp->rport ||
3974 ndlp->rport->scsi_target_id == -1 || 3981 ndlp->rport->scsi_target_id == -1 ||
3975 ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) { 3982 ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
3976 spin_lock_irq(shost->host_lock); 3983 spin_lock_irq(shost->host_lock);
3977 ndlp->nlp_flag |= NLP_TGT_NO_SCSIID; 3984 ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
3978 spin_unlock_irq(shost->host_lock); 3985 spin_unlock_irq(shost->host_lock);
3979 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 3986 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
3980 } 3987 }
3981 } 3988 }
3982 3989
3983 static char * 3990 static char *
3984 lpfc_nlp_state_name(char *buffer, size_t size, int state) 3991 lpfc_nlp_state_name(char *buffer, size_t size, int state)
3985 { 3992 {
3986 static char *states[] = { 3993 static char *states[] = {
3987 [NLP_STE_UNUSED_NODE] = "UNUSED", 3994 [NLP_STE_UNUSED_NODE] = "UNUSED",
3988 [NLP_STE_PLOGI_ISSUE] = "PLOGI", 3995 [NLP_STE_PLOGI_ISSUE] = "PLOGI",
3989 [NLP_STE_ADISC_ISSUE] = "ADISC", 3996 [NLP_STE_ADISC_ISSUE] = "ADISC",
3990 [NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN", 3997 [NLP_STE_REG_LOGIN_ISSUE] = "REGLOGIN",
3991 [NLP_STE_PRLI_ISSUE] = "PRLI", 3998 [NLP_STE_PRLI_ISSUE] = "PRLI",
3992 [NLP_STE_LOGO_ISSUE] = "LOGO", 3999 [NLP_STE_LOGO_ISSUE] = "LOGO",
3993 [NLP_STE_UNMAPPED_NODE] = "UNMAPPED", 4000 [NLP_STE_UNMAPPED_NODE] = "UNMAPPED",
3994 [NLP_STE_MAPPED_NODE] = "MAPPED", 4001 [NLP_STE_MAPPED_NODE] = "MAPPED",
3995 [NLP_STE_NPR_NODE] = "NPR", 4002 [NLP_STE_NPR_NODE] = "NPR",
3996 }; 4003 };
3997 4004
3998 if (state < NLP_STE_MAX_STATE && states[state]) 4005 if (state < NLP_STE_MAX_STATE && states[state])
3999 strlcpy(buffer, states[state], size); 4006 strlcpy(buffer, states[state], size);
4000 else 4007 else
4001 snprintf(buffer, size, "unknown (%d)", state); 4008 snprintf(buffer, size, "unknown (%d)", state);
4002 return buffer; 4009 return buffer;
4003 } 4010 }
4004 4011
4005 void 4012 void
4006 lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 4013 lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4007 int state) 4014 int state)
4008 { 4015 {
4009 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4016 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4010 int old_state = ndlp->nlp_state; 4017 int old_state = ndlp->nlp_state;
4011 char name1[16], name2[16]; 4018 char name1[16], name2[16];
4012 4019
4013 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 4020 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
4014 "0904 NPort state transition x%06x, %s -> %s\n", 4021 "0904 NPort state transition x%06x, %s -> %s\n",
4015 ndlp->nlp_DID, 4022 ndlp->nlp_DID,
4016 lpfc_nlp_state_name(name1, sizeof(name1), old_state), 4023 lpfc_nlp_state_name(name1, sizeof(name1), old_state),
4017 lpfc_nlp_state_name(name2, sizeof(name2), state)); 4024 lpfc_nlp_state_name(name2, sizeof(name2), state));
4018 4025
4019 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, 4026 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
4020 "node statechg did:x%x old:%d ste:%d", 4027 "node statechg did:x%x old:%d ste:%d",
4021 ndlp->nlp_DID, old_state, state); 4028 ndlp->nlp_DID, old_state, state);
4022 4029
4023 if (old_state == NLP_STE_NPR_NODE && 4030 if (old_state == NLP_STE_NPR_NODE &&
4024 state != NLP_STE_NPR_NODE) 4031 state != NLP_STE_NPR_NODE)
4025 lpfc_cancel_retry_delay_tmo(vport, ndlp); 4032 lpfc_cancel_retry_delay_tmo(vport, ndlp);
4026 if (old_state == NLP_STE_UNMAPPED_NODE) { 4033 if (old_state == NLP_STE_UNMAPPED_NODE) {
4027 ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID; 4034 ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
4028 ndlp->nlp_type &= ~NLP_FC_NODE; 4035 ndlp->nlp_type &= ~NLP_FC_NODE;
4029 } 4036 }
4030 4037
4031 if (list_empty(&ndlp->nlp_listp)) { 4038 if (list_empty(&ndlp->nlp_listp)) {
4032 spin_lock_irq(shost->host_lock); 4039 spin_lock_irq(shost->host_lock);
4033 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes); 4040 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
4034 spin_unlock_irq(shost->host_lock); 4041 spin_unlock_irq(shost->host_lock);
4035 } else if (old_state) 4042 } else if (old_state)
4036 lpfc_nlp_counters(vport, old_state, -1); 4043 lpfc_nlp_counters(vport, old_state, -1);
4037 4044
4038 ndlp->nlp_state = state; 4045 ndlp->nlp_state = state;
4039 lpfc_nlp_counters(vport, state, 1); 4046 lpfc_nlp_counters(vport, state, 1);
4040 lpfc_nlp_state_cleanup(vport, ndlp, old_state, state); 4047 lpfc_nlp_state_cleanup(vport, ndlp, old_state, state);
4041 } 4048 }
4042 4049
4043 void 4050 void
4044 lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 4051 lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4045 { 4052 {
4046 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4053 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4047 4054
4048 if (list_empty(&ndlp->nlp_listp)) { 4055 if (list_empty(&ndlp->nlp_listp)) {
4049 spin_lock_irq(shost->host_lock); 4056 spin_lock_irq(shost->host_lock);
4050 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes); 4057 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
4051 spin_unlock_irq(shost->host_lock); 4058 spin_unlock_irq(shost->host_lock);
4052 } 4059 }
4053 } 4060 }
4054 4061
4055 void 4062 void
4056 lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 4063 lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4057 { 4064 {
4058 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4065 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4059 4066
4060 lpfc_cancel_retry_delay_tmo(vport, ndlp); 4067 lpfc_cancel_retry_delay_tmo(vport, ndlp);
4061 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp)) 4068 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
4062 lpfc_nlp_counters(vport, ndlp->nlp_state, -1); 4069 lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
4063 spin_lock_irq(shost->host_lock); 4070 spin_lock_irq(shost->host_lock);
4064 list_del_init(&ndlp->nlp_listp); 4071 list_del_init(&ndlp->nlp_listp);
4065 spin_unlock_irq(shost->host_lock); 4072 spin_unlock_irq(shost->host_lock);
4066 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state, 4073 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
4067 NLP_STE_UNUSED_NODE); 4074 NLP_STE_UNUSED_NODE);
4068 } 4075 }
4069 4076
4070 static void 4077 static void
4071 lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 4078 lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4072 { 4079 {
4073 lpfc_cancel_retry_delay_tmo(vport, ndlp); 4080 lpfc_cancel_retry_delay_tmo(vport, ndlp);
4074 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp)) 4081 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
4075 lpfc_nlp_counters(vport, ndlp->nlp_state, -1); 4082 lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
4076 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state, 4083 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
4077 NLP_STE_UNUSED_NODE); 4084 NLP_STE_UNUSED_NODE);
4078 } 4085 }
4079 /** 4086 /**
4080 * lpfc_initialize_node - Initialize all fields of node object 4087 * lpfc_initialize_node - Initialize all fields of node object
4081 * @vport: Pointer to Virtual Port object. 4088 * @vport: Pointer to Virtual Port object.
4082 * @ndlp: Pointer to FC node object. 4089 * @ndlp: Pointer to FC node object.
4083 * @did: FC_ID of the node. 4090 * @did: FC_ID of the node.
4084 * 4091 *
4085 * This function is always called when node object need to be initialized. 4092 * This function is always called when node object need to be initialized.
4086 * It initializes all the fields of the node object. Although the reference 4093 * It initializes all the fields of the node object. Although the reference
4087 * to phba from @ndlp can be obtained indirectly through it's reference to 4094 * to phba from @ndlp can be obtained indirectly through it's reference to
4088 * @vport, a direct reference to phba is taken here by @ndlp. This is due 4095 * @vport, a direct reference to phba is taken here by @ndlp. This is due
4089 * to the life-span of the @ndlp might go beyond the existence of @vport as 4096 * to the life-span of the @ndlp might go beyond the existence of @vport as
4090 * the final release of ndlp is determined by its reference count. And, the 4097 * the final release of ndlp is determined by its reference count. And, the
4091 * operation on @ndlp needs the reference to phba. 4098 * operation on @ndlp needs the reference to phba.
4092 **/ 4099 **/
4093 static inline void 4100 static inline void
4094 lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 4101 lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4095 uint32_t did) 4102 uint32_t did)
4096 { 4103 {
4097 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp); 4104 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
4098 INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp); 4105 INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
4099 init_timer(&ndlp->nlp_delayfunc); 4106 init_timer(&ndlp->nlp_delayfunc);
4100 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay; 4107 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
4101 ndlp->nlp_delayfunc.data = (unsigned long)ndlp; 4108 ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
4102 ndlp->nlp_DID = did; 4109 ndlp->nlp_DID = did;
4103 ndlp->vport = vport; 4110 ndlp->vport = vport;
4104 ndlp->phba = vport->phba; 4111 ndlp->phba = vport->phba;
4105 ndlp->nlp_sid = NLP_NO_SID; 4112 ndlp->nlp_sid = NLP_NO_SID;
4106 kref_init(&ndlp->kref); 4113 kref_init(&ndlp->kref);
4107 NLP_INT_NODE_ACT(ndlp); 4114 NLP_INT_NODE_ACT(ndlp);
4108 atomic_set(&ndlp->cmd_pending, 0); 4115 atomic_set(&ndlp->cmd_pending, 0);
4109 ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth; 4116 ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
4110 if (vport->phba->sli_rev == LPFC_SLI_REV4) 4117 if (vport->phba->sli_rev == LPFC_SLI_REV4)
4111 ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba); 4118 ndlp->nlp_rpi = lpfc_sli4_alloc_rpi(vport->phba);
4112 } 4119 }
4113 4120
4114 struct lpfc_nodelist * 4121 struct lpfc_nodelist *
4115 lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 4122 lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4116 int state) 4123 int state)
4117 { 4124 {
4118 struct lpfc_hba *phba = vport->phba; 4125 struct lpfc_hba *phba = vport->phba;
4119 uint32_t did; 4126 uint32_t did;
4120 unsigned long flags; 4127 unsigned long flags;
4121 4128
4122 if (!ndlp) 4129 if (!ndlp)
4123 return NULL; 4130 return NULL;
4124 4131
4125 spin_lock_irqsave(&phba->ndlp_lock, flags); 4132 spin_lock_irqsave(&phba->ndlp_lock, flags);
4126 /* The ndlp should not be in memory free mode */ 4133 /* The ndlp should not be in memory free mode */
4127 if (NLP_CHK_FREE_REQ(ndlp)) { 4134 if (NLP_CHK_FREE_REQ(ndlp)) {
4128 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 4135 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
4129 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, 4136 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
4130 "0277 lpfc_enable_node: ndlp:x%p " 4137 "0277 lpfc_enable_node: ndlp:x%p "
4131 "usgmap:x%x refcnt:%d\n", 4138 "usgmap:x%x refcnt:%d\n",
4132 (void *)ndlp, ndlp->nlp_usg_map, 4139 (void *)ndlp, ndlp->nlp_usg_map,
4133 atomic_read(&ndlp->kref.refcount)); 4140 atomic_read(&ndlp->kref.refcount));
4134 return NULL; 4141 return NULL;
4135 } 4142 }
4136 /* The ndlp should not already be in active mode */ 4143 /* The ndlp should not already be in active mode */
4137 if (NLP_CHK_NODE_ACT(ndlp)) { 4144 if (NLP_CHK_NODE_ACT(ndlp)) {
4138 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 4145 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
4139 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, 4146 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
4140 "0278 lpfc_enable_node: ndlp:x%p " 4147 "0278 lpfc_enable_node: ndlp:x%p "
4141 "usgmap:x%x refcnt:%d\n", 4148 "usgmap:x%x refcnt:%d\n",
4142 (void *)ndlp, ndlp->nlp_usg_map, 4149 (void *)ndlp, ndlp->nlp_usg_map,
4143 atomic_read(&ndlp->kref.refcount)); 4150 atomic_read(&ndlp->kref.refcount));
4144 return NULL; 4151 return NULL;
4145 } 4152 }
4146 4153
4147 /* Keep the original DID */ 4154 /* Keep the original DID */
4148 did = ndlp->nlp_DID; 4155 did = ndlp->nlp_DID;
4149 4156
4150 /* re-initialize ndlp except of ndlp linked list pointer */ 4157 /* re-initialize ndlp except of ndlp linked list pointer */
4151 memset((((char *)ndlp) + sizeof (struct list_head)), 0, 4158 memset((((char *)ndlp) + sizeof (struct list_head)), 0,
4152 sizeof (struct lpfc_nodelist) - sizeof (struct list_head)); 4159 sizeof (struct lpfc_nodelist) - sizeof (struct list_head));
4153 lpfc_initialize_node(vport, ndlp, did); 4160 lpfc_initialize_node(vport, ndlp, did);
4154 4161
4155 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 4162 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
4156 4163
4157 if (state != NLP_STE_UNUSED_NODE) 4164 if (state != NLP_STE_UNUSED_NODE)
4158 lpfc_nlp_set_state(vport, ndlp, state); 4165 lpfc_nlp_set_state(vport, ndlp, state);
4159 4166
4160 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, 4167 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
4161 "node enable: did:x%x", 4168 "node enable: did:x%x",
4162 ndlp->nlp_DID, 0, 0); 4169 ndlp->nlp_DID, 0, 0);
4163 return ndlp; 4170 return ndlp;
4164 } 4171 }
4165 4172
4166 void 4173 void
4167 lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 4174 lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4168 { 4175 {
4169 /* 4176 /*
4170 * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should 4177 * Use of lpfc_drop_node and UNUSED list: lpfc_drop_node should
4171 * be used if we wish to issue the "last" lpfc_nlp_put() to remove 4178 * be used if we wish to issue the "last" lpfc_nlp_put() to remove
4172 * the ndlp from the vport. The ndlp marked as UNUSED on the list 4179 * the ndlp from the vport. The ndlp marked as UNUSED on the list
4173 * until ALL other outstanding threads have completed. We check 4180 * until ALL other outstanding threads have completed. We check
4174 * that the ndlp not already in the UNUSED state before we proceed. 4181 * that the ndlp not already in the UNUSED state before we proceed.
4175 */ 4182 */
4176 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) 4183 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
4177 return; 4184 return;
4178 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE); 4185 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
4179 if (vport->phba->sli_rev == LPFC_SLI_REV4) 4186 if (vport->phba->sli_rev == LPFC_SLI_REV4)
4180 lpfc_cleanup_vports_rrqs(vport, ndlp); 4187 lpfc_cleanup_vports_rrqs(vport, ndlp);
4181 lpfc_nlp_put(ndlp); 4188 lpfc_nlp_put(ndlp);
4182 return; 4189 return;
4183 } 4190 }
4184 4191
4185 /* 4192 /*
4186 * Start / ReStart rescue timer for Discovery / RSCN handling 4193 * Start / ReStart rescue timer for Discovery / RSCN handling
4187 */ 4194 */
4188 void 4195 void
4189 lpfc_set_disctmo(struct lpfc_vport *vport) 4196 lpfc_set_disctmo(struct lpfc_vport *vport)
4190 { 4197 {
4191 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4198 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4192 struct lpfc_hba *phba = vport->phba; 4199 struct lpfc_hba *phba = vport->phba;
4193 uint32_t tmo; 4200 uint32_t tmo;
4194 4201
4195 if (vport->port_state == LPFC_LOCAL_CFG_LINK) { 4202 if (vport->port_state == LPFC_LOCAL_CFG_LINK) {
4196 /* For FAN, timeout should be greater than edtov */ 4203 /* For FAN, timeout should be greater than edtov */
4197 tmo = (((phba->fc_edtov + 999) / 1000) + 1); 4204 tmo = (((phba->fc_edtov + 999) / 1000) + 1);
4198 } else { 4205 } else {
4199 /* Normal discovery timeout should be > than ELS/CT timeout 4206 /* Normal discovery timeout should be > than ELS/CT timeout
4200 * FC spec states we need 3 * ratov for CT requests 4207 * FC spec states we need 3 * ratov for CT requests
4201 */ 4208 */
4202 tmo = ((phba->fc_ratov * 3) + 3); 4209 tmo = ((phba->fc_ratov * 3) + 3);
4203 } 4210 }
4204 4211
4205 4212
4206 if (!timer_pending(&vport->fc_disctmo)) { 4213 if (!timer_pending(&vport->fc_disctmo)) {
4207 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 4214 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4208 "set disc timer: tmo:x%x state:x%x flg:x%x", 4215 "set disc timer: tmo:x%x state:x%x flg:x%x",
4209 tmo, vport->port_state, vport->fc_flag); 4216 tmo, vport->port_state, vport->fc_flag);
4210 } 4217 }
4211 4218
4212 mod_timer(&vport->fc_disctmo, jiffies + HZ * tmo); 4219 mod_timer(&vport->fc_disctmo, jiffies + HZ * tmo);
4213 spin_lock_irq(shost->host_lock); 4220 spin_lock_irq(shost->host_lock);
4214 vport->fc_flag |= FC_DISC_TMO; 4221 vport->fc_flag |= FC_DISC_TMO;
4215 spin_unlock_irq(shost->host_lock); 4222 spin_unlock_irq(shost->host_lock);
4216 4223
4217 /* Start Discovery Timer state <hba_state> */ 4224 /* Start Discovery Timer state <hba_state> */
4218 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 4225 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4219 "0247 Start Discovery Timer state x%x " 4226 "0247 Start Discovery Timer state x%x "
4220 "Data: x%x x%lx x%x x%x\n", 4227 "Data: x%x x%lx x%x x%x\n",
4221 vport->port_state, tmo, 4228 vport->port_state, tmo,
4222 (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt, 4229 (unsigned long)&vport->fc_disctmo, vport->fc_plogi_cnt,
4223 vport->fc_adisc_cnt); 4230 vport->fc_adisc_cnt);
4224 4231
4225 return; 4232 return;
4226 } 4233 }
4227 4234
4228 /* 4235 /*
4229 * Cancel rescue timer for Discovery / RSCN handling 4236 * Cancel rescue timer for Discovery / RSCN handling
4230 */ 4237 */
4231 int 4238 int
4232 lpfc_can_disctmo(struct lpfc_vport *vport) 4239 lpfc_can_disctmo(struct lpfc_vport *vport)
4233 { 4240 {
4234 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4241 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4235 unsigned long iflags; 4242 unsigned long iflags;
4236 4243
4237 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 4244 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
4238 "can disc timer: state:x%x rtry:x%x flg:x%x", 4245 "can disc timer: state:x%x rtry:x%x flg:x%x",
4239 vport->port_state, vport->fc_ns_retry, vport->fc_flag); 4246 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
4240 4247
4241 /* Turn off discovery timer if its running */ 4248 /* Turn off discovery timer if its running */
4242 if (vport->fc_flag & FC_DISC_TMO) { 4249 if (vport->fc_flag & FC_DISC_TMO) {
4243 spin_lock_irqsave(shost->host_lock, iflags); 4250 spin_lock_irqsave(shost->host_lock, iflags);
4244 vport->fc_flag &= ~FC_DISC_TMO; 4251 vport->fc_flag &= ~FC_DISC_TMO;
4245 spin_unlock_irqrestore(shost->host_lock, iflags); 4252 spin_unlock_irqrestore(shost->host_lock, iflags);
4246 del_timer_sync(&vport->fc_disctmo); 4253 del_timer_sync(&vport->fc_disctmo);
4247 spin_lock_irqsave(&vport->work_port_lock, iflags); 4254 spin_lock_irqsave(&vport->work_port_lock, iflags);
4248 vport->work_port_events &= ~WORKER_DISC_TMO; 4255 vport->work_port_events &= ~WORKER_DISC_TMO;
4249 spin_unlock_irqrestore(&vport->work_port_lock, iflags); 4256 spin_unlock_irqrestore(&vport->work_port_lock, iflags);
4250 } 4257 }
4251 4258
4252 /* Cancel Discovery Timer state <hba_state> */ 4259 /* Cancel Discovery Timer state <hba_state> */
4253 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 4260 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4254 "0248 Cancel Discovery Timer state x%x " 4261 "0248 Cancel Discovery Timer state x%x "
4255 "Data: x%x x%x x%x\n", 4262 "Data: x%x x%x x%x\n",
4256 vport->port_state, vport->fc_flag, 4263 vport->port_state, vport->fc_flag,
4257 vport->fc_plogi_cnt, vport->fc_adisc_cnt); 4264 vport->fc_plogi_cnt, vport->fc_adisc_cnt);
4258 return 0; 4265 return 0;
4259 } 4266 }
4260 4267
4261 /* 4268 /*
4262 * Check specified ring for outstanding IOCB on the SLI queue 4269 * Check specified ring for outstanding IOCB on the SLI queue
4263 * Return true if iocb matches the specified nport 4270 * Return true if iocb matches the specified nport
4264 */ 4271 */
4265 int 4272 int
4266 lpfc_check_sli_ndlp(struct lpfc_hba *phba, 4273 lpfc_check_sli_ndlp(struct lpfc_hba *phba,
4267 struct lpfc_sli_ring *pring, 4274 struct lpfc_sli_ring *pring,
4268 struct lpfc_iocbq *iocb, 4275 struct lpfc_iocbq *iocb,
4269 struct lpfc_nodelist *ndlp) 4276 struct lpfc_nodelist *ndlp)
4270 { 4277 {
4271 struct lpfc_sli *psli = &phba->sli; 4278 struct lpfc_sli *psli = &phba->sli;
4272 IOCB_t *icmd = &iocb->iocb; 4279 IOCB_t *icmd = &iocb->iocb;
4273 struct lpfc_vport *vport = ndlp->vport; 4280 struct lpfc_vport *vport = ndlp->vport;
4274 4281
4275 if (iocb->vport != vport) 4282 if (iocb->vport != vport)
4276 return 0; 4283 return 0;
4277 4284
4278 if (pring->ringno == LPFC_ELS_RING) { 4285 if (pring->ringno == LPFC_ELS_RING) {
4279 switch (icmd->ulpCommand) { 4286 switch (icmd->ulpCommand) {
4280 case CMD_GEN_REQUEST64_CR: 4287 case CMD_GEN_REQUEST64_CR:
4281 if (iocb->context_un.ndlp == ndlp) 4288 if (iocb->context_un.ndlp == ndlp)
4282 return 1; 4289 return 1;
4283 case CMD_ELS_REQUEST64_CR: 4290 case CMD_ELS_REQUEST64_CR:
4284 if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID) 4291 if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
4285 return 1; 4292 return 1;
4286 case CMD_XMIT_ELS_RSP64_CX: 4293 case CMD_XMIT_ELS_RSP64_CX:
4287 if (iocb->context1 == (uint8_t *) ndlp) 4294 if (iocb->context1 == (uint8_t *) ndlp)
4288 return 1; 4295 return 1;
4289 } 4296 }
4290 } else if (pring->ringno == psli->extra_ring) { 4297 } else if (pring->ringno == psli->extra_ring) {
4291 4298
4292 } else if (pring->ringno == psli->fcp_ring) { 4299 } else if (pring->ringno == psli->fcp_ring) {
4293 /* Skip match check if waiting to relogin to FCP target */ 4300 /* Skip match check if waiting to relogin to FCP target */
4294 if ((ndlp->nlp_type & NLP_FCP_TARGET) && 4301 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
4295 (ndlp->nlp_flag & NLP_DELAY_TMO)) { 4302 (ndlp->nlp_flag & NLP_DELAY_TMO)) {
4296 return 0; 4303 return 0;
4297 } 4304 }
4298 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) { 4305 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
4299 return 1; 4306 return 1;
4300 } 4307 }
4301 } else if (pring->ringno == psli->next_ring) { 4308 } else if (pring->ringno == psli->next_ring) {
4302 4309
4303 } 4310 }
4304 return 0; 4311 return 0;
4305 } 4312 }
4306 4313
4307 /* 4314 /*
4308 * Free resources / clean up outstanding I/Os 4315 * Free resources / clean up outstanding I/Os
4309 * associated with nlp_rpi in the LPFC_NODELIST entry. 4316 * associated with nlp_rpi in the LPFC_NODELIST entry.
4310 */ 4317 */
4311 static int 4318 static int
4312 lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 4319 lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
4313 { 4320 {
4314 LIST_HEAD(completions); 4321 LIST_HEAD(completions);
4315 struct lpfc_sli *psli; 4322 struct lpfc_sli *psli;
4316 struct lpfc_sli_ring *pring; 4323 struct lpfc_sli_ring *pring;
4317 struct lpfc_iocbq *iocb, *next_iocb; 4324 struct lpfc_iocbq *iocb, *next_iocb;
4318 uint32_t i; 4325 uint32_t i;
4319 4326
4320 lpfc_fabric_abort_nport(ndlp); 4327 lpfc_fabric_abort_nport(ndlp);
4321 4328
4322 /* 4329 /*
4323 * Everything that matches on txcmplq will be returned 4330 * Everything that matches on txcmplq will be returned
4324 * by firmware with a no rpi error. 4331 * by firmware with a no rpi error.
4325 */ 4332 */
4326 psli = &phba->sli; 4333 psli = &phba->sli;
4327 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { 4334 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
4328 /* Now process each ring */ 4335 /* Now process each ring */
4329 for (i = 0; i < psli->num_rings; i++) { 4336 for (i = 0; i < psli->num_rings; i++) {
4330 pring = &psli->ring[i]; 4337 pring = &psli->ring[i];
4331 4338
4332 spin_lock_irq(&phba->hbalock); 4339 spin_lock_irq(&phba->hbalock);
4333 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, 4340 list_for_each_entry_safe(iocb, next_iocb, &pring->txq,
4334 list) { 4341 list) {
4335 /* 4342 /*
4336 * Check to see if iocb matches the nport we are 4343 * Check to see if iocb matches the nport we are
4337 * looking for 4344 * looking for
4338 */ 4345 */
4339 if ((lpfc_check_sli_ndlp(phba, pring, iocb, 4346 if ((lpfc_check_sli_ndlp(phba, pring, iocb,
4340 ndlp))) { 4347 ndlp))) {
4341 /* It matches, so deque and call compl 4348 /* It matches, so deque and call compl
4342 with an error */ 4349 with an error */
4343 list_move_tail(&iocb->list, 4350 list_move_tail(&iocb->list,
4344 &completions); 4351 &completions);
4345 pring->txq_cnt--; 4352 pring->txq_cnt--;
4346 } 4353 }
4347 } 4354 }
4348 spin_unlock_irq(&phba->hbalock); 4355 spin_unlock_irq(&phba->hbalock);
4349 } 4356 }
4350 } 4357 }
4351 4358
4352 /* Cancel all the IOCBs from the completions list */ 4359 /* Cancel all the IOCBs from the completions list */
4353 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 4360 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
4354 IOERR_SLI_ABORTED); 4361 IOERR_SLI_ABORTED);
4355 4362
4356 return 0; 4363 return 0;
4357 } 4364 }
4358 4365
4359 /** 4366 /**
4360 * lpfc_nlp_logo_unreg - Unreg mailbox completion handler before LOGO 4367 * lpfc_nlp_logo_unreg - Unreg mailbox completion handler before LOGO
4361 * @phba: Pointer to HBA context object. 4368 * @phba: Pointer to HBA context object.
4362 * @pmb: Pointer to mailbox object. 4369 * @pmb: Pointer to mailbox object.
4363 * 4370 *
4364 * This function will issue an ELS LOGO command after completing 4371 * This function will issue an ELS LOGO command after completing
4365 * the UNREG_RPI. 4372 * the UNREG_RPI.
4366 **/ 4373 **/
4367 void 4374 void
4368 lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 4375 lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4369 { 4376 {
4370 struct lpfc_vport *vport = pmb->vport; 4377 struct lpfc_vport *vport = pmb->vport;
4371 struct lpfc_nodelist *ndlp; 4378 struct lpfc_nodelist *ndlp;
4372 4379
4373 ndlp = (struct lpfc_nodelist *)(pmb->context1); 4380 ndlp = (struct lpfc_nodelist *)(pmb->context1);
4374 if (!ndlp) 4381 if (!ndlp)
4375 return; 4382 return;
4376 lpfc_issue_els_logo(vport, ndlp, 0); 4383 lpfc_issue_els_logo(vport, ndlp, 0);
4377 } 4384 }
4378 4385
4379 /* 4386 /*
4380 * Free rpi associated with LPFC_NODELIST entry. 4387 * Free rpi associated with LPFC_NODELIST entry.
4381 * This routine is called from lpfc_freenode(), when we are removing 4388 * This routine is called from lpfc_freenode(), when we are removing
4382 * a LPFC_NODELIST entry. It is also called if the driver initiates a 4389 * a LPFC_NODELIST entry. It is also called if the driver initiates a
4383 * LOGO that completes successfully, and we are waiting to PLOGI back 4390 * LOGO that completes successfully, and we are waiting to PLOGI back
4384 * to the remote NPort. In addition, it is called after we receive 4391 * to the remote NPort. In addition, it is called after we receive
4385 * and unsolicated ELS cmd, send back a rsp, the rsp completes and 4392 * and unsolicated ELS cmd, send back a rsp, the rsp completes and
4386 * we are waiting to PLOGI back to the remote NPort. 4393 * we are waiting to PLOGI back to the remote NPort.
4387 */ 4394 */
4388 int 4395 int
4389 lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 4396 lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4390 { 4397 {
4391 struct lpfc_hba *phba = vport->phba; 4398 struct lpfc_hba *phba = vport->phba;
4392 LPFC_MBOXQ_t *mbox; 4399 LPFC_MBOXQ_t *mbox;
4393 int rc; 4400 int rc;
4394 uint16_t rpi; 4401 uint16_t rpi;
4395 4402
4396 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { 4403 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
4397 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4404 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4398 if (mbox) { 4405 if (mbox) {
4399 /* SLI4 ports require the physical rpi value. */ 4406 /* SLI4 ports require the physical rpi value. */
4400 rpi = ndlp->nlp_rpi; 4407 rpi = ndlp->nlp_rpi;
4401 if (phba->sli_rev == LPFC_SLI_REV4) 4408 if (phba->sli_rev == LPFC_SLI_REV4)
4402 rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; 4409 rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
4403 4410
4404 lpfc_unreg_login(phba, vport->vpi, rpi, mbox); 4411 lpfc_unreg_login(phba, vport->vpi, rpi, mbox);
4405 mbox->vport = vport; 4412 mbox->vport = vport;
4406 if (ndlp->nlp_flag & NLP_ISSUE_LOGO) { 4413 if (ndlp->nlp_flag & NLP_ISSUE_LOGO) {
4407 mbox->context1 = ndlp; 4414 mbox->context1 = ndlp;
4408 mbox->mbox_cmpl = lpfc_nlp_logo_unreg; 4415 mbox->mbox_cmpl = lpfc_nlp_logo_unreg;
4409 } else { 4416 } else {
4410 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4417 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4411 } 4418 }
4412 4419
4413 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 4420 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4414 if (rc == MBX_NOT_FINISHED) 4421 if (rc == MBX_NOT_FINISHED)
4415 mempool_free(mbox, phba->mbox_mem_pool); 4422 mempool_free(mbox, phba->mbox_mem_pool);
4416 } 4423 }
4417 lpfc_no_rpi(phba, ndlp); 4424 lpfc_no_rpi(phba, ndlp);
4418 4425
4419 if (phba->sli_rev != LPFC_SLI_REV4) 4426 if (phba->sli_rev != LPFC_SLI_REV4)
4420 ndlp->nlp_rpi = 0; 4427 ndlp->nlp_rpi = 0;
4421 ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; 4428 ndlp->nlp_flag &= ~NLP_RPI_REGISTERED;
4422 ndlp->nlp_flag &= ~NLP_NPR_ADISC; 4429 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
4423 return 1; 4430 return 1;
4424 } 4431 }
4425 return 0; 4432 return 0;
4426 } 4433 }
4427 4434
4428 /** 4435 /**
4429 * lpfc_unreg_hba_rpis - Unregister rpis registered to the hba. 4436 * lpfc_unreg_hba_rpis - Unregister rpis registered to the hba.
4430 * @phba: pointer to lpfc hba data structure. 4437 * @phba: pointer to lpfc hba data structure.
4431 * 4438 *
4432 * This routine is invoked to unregister all the currently registered RPIs 4439 * This routine is invoked to unregister all the currently registered RPIs
4433 * to the HBA. 4440 * to the HBA.
4434 **/ 4441 **/
4435 void 4442 void
4436 lpfc_unreg_hba_rpis(struct lpfc_hba *phba) 4443 lpfc_unreg_hba_rpis(struct lpfc_hba *phba)
4437 { 4444 {
4438 struct lpfc_vport **vports; 4445 struct lpfc_vport **vports;
4439 struct lpfc_nodelist *ndlp; 4446 struct lpfc_nodelist *ndlp;
4440 struct Scsi_Host *shost; 4447 struct Scsi_Host *shost;
4441 int i; 4448 int i;
4442 4449
4443 vports = lpfc_create_vport_work_array(phba); 4450 vports = lpfc_create_vport_work_array(phba);
4444 if (!vports) { 4451 if (!vports) {
4445 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 4452 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
4446 "2884 Vport array allocation failed \n"); 4453 "2884 Vport array allocation failed \n");
4447 return; 4454 return;
4448 } 4455 }
4449 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 4456 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
4450 shost = lpfc_shost_from_vport(vports[i]); 4457 shost = lpfc_shost_from_vport(vports[i]);
4451 spin_lock_irq(shost->host_lock); 4458 spin_lock_irq(shost->host_lock);
4452 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) { 4459 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
4453 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { 4460 if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
4454 /* The mempool_alloc might sleep */ 4461 /* The mempool_alloc might sleep */
4455 spin_unlock_irq(shost->host_lock); 4462 spin_unlock_irq(shost->host_lock);
4456 lpfc_unreg_rpi(vports[i], ndlp); 4463 lpfc_unreg_rpi(vports[i], ndlp);
4457 spin_lock_irq(shost->host_lock); 4464 spin_lock_irq(shost->host_lock);
4458 } 4465 }
4459 } 4466 }
4460 spin_unlock_irq(shost->host_lock); 4467 spin_unlock_irq(shost->host_lock);
4461 } 4468 }
4462 lpfc_destroy_vport_work_array(phba, vports); 4469 lpfc_destroy_vport_work_array(phba, vports);
4463 } 4470 }
4464 4471
4465 void 4472 void
4466 lpfc_unreg_all_rpis(struct lpfc_vport *vport) 4473 lpfc_unreg_all_rpis(struct lpfc_vport *vport)
4467 { 4474 {
4468 struct lpfc_hba *phba = vport->phba; 4475 struct lpfc_hba *phba = vport->phba;
4469 LPFC_MBOXQ_t *mbox; 4476 LPFC_MBOXQ_t *mbox;
4470 int rc; 4477 int rc;
4471 4478
4472 if (phba->sli_rev == LPFC_SLI_REV4) { 4479 if (phba->sli_rev == LPFC_SLI_REV4) {
4473 lpfc_sli4_unreg_all_rpis(vport); 4480 lpfc_sli4_unreg_all_rpis(vport);
4474 return; 4481 return;
4475 } 4482 }
4476 4483
4477 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4484 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4478 if (mbox) { 4485 if (mbox) {
4479 lpfc_unreg_login(phba, vport->vpi, LPFC_UNREG_ALL_RPIS_VPORT, 4486 lpfc_unreg_login(phba, vport->vpi, LPFC_UNREG_ALL_RPIS_VPORT,
4480 mbox); 4487 mbox);
4481 mbox->vport = vport; 4488 mbox->vport = vport;
4482 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4489 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4483 mbox->context1 = NULL; 4490 mbox->context1 = NULL;
4484 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 4491 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
4485 if (rc != MBX_TIMEOUT) 4492 if (rc != MBX_TIMEOUT)
4486 mempool_free(mbox, phba->mbox_mem_pool); 4493 mempool_free(mbox, phba->mbox_mem_pool);
4487 4494
4488 if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED)) 4495 if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED))
4489 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT, 4496 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
4490 "1836 Could not issue " 4497 "1836 Could not issue "
4491 "unreg_login(all_rpis) status %d\n", rc); 4498 "unreg_login(all_rpis) status %d\n", rc);
4492 } 4499 }
4493 } 4500 }
4494 4501
4495 void 4502 void
4496 lpfc_unreg_default_rpis(struct lpfc_vport *vport) 4503 lpfc_unreg_default_rpis(struct lpfc_vport *vport)
4497 { 4504 {
4498 struct lpfc_hba *phba = vport->phba; 4505 struct lpfc_hba *phba = vport->phba;
4499 LPFC_MBOXQ_t *mbox; 4506 LPFC_MBOXQ_t *mbox;
4500 int rc; 4507 int rc;
4501 4508
4502 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4509 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4503 if (mbox) { 4510 if (mbox) {
4504 lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS, 4511 lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS,
4505 mbox); 4512 mbox);
4506 mbox->vport = vport; 4513 mbox->vport = vport;
4507 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4514 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4508 mbox->context1 = NULL; 4515 mbox->context1 = NULL;
4509 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO); 4516 rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
4510 if (rc != MBX_TIMEOUT) 4517 if (rc != MBX_TIMEOUT)
4511 mempool_free(mbox, phba->mbox_mem_pool); 4518 mempool_free(mbox, phba->mbox_mem_pool);
4512 4519
4513 if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED)) 4520 if ((rc == MBX_TIMEOUT) || (rc == MBX_NOT_FINISHED))
4514 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT, 4521 lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX | LOG_VPORT,
4515 "1815 Could not issue " 4522 "1815 Could not issue "
4516 "unreg_did (default rpis) status %d\n", 4523 "unreg_did (default rpis) status %d\n",
4517 rc); 4524 rc);
4518 } 4525 }
4519 } 4526 }
4520 4527
4521 /* 4528 /*
4522 * Free resources associated with LPFC_NODELIST entry 4529 * Free resources associated with LPFC_NODELIST entry
4523 * so it can be freed. 4530 * so it can be freed.
4524 */ 4531 */
4525 static int 4532 static int
4526 lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 4533 lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4527 { 4534 {
4528 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4535 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4529 struct lpfc_hba *phba = vport->phba; 4536 struct lpfc_hba *phba = vport->phba;
4530 LPFC_MBOXQ_t *mb, *nextmb; 4537 LPFC_MBOXQ_t *mb, *nextmb;
4531 struct lpfc_dmabuf *mp; 4538 struct lpfc_dmabuf *mp;
4532 4539
4533 /* Cleanup node for NPort <nlp_DID> */ 4540 /* Cleanup node for NPort <nlp_DID> */
4534 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 4541 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
4535 "0900 Cleanup node for NPort x%x " 4542 "0900 Cleanup node for NPort x%x "
4536 "Data: x%x x%x x%x\n", 4543 "Data: x%x x%x x%x\n",
4537 ndlp->nlp_DID, ndlp->nlp_flag, 4544 ndlp->nlp_DID, ndlp->nlp_flag,
4538 ndlp->nlp_state, ndlp->nlp_rpi); 4545 ndlp->nlp_state, ndlp->nlp_rpi);
4539 if (NLP_CHK_FREE_REQ(ndlp)) { 4546 if (NLP_CHK_FREE_REQ(ndlp)) {
4540 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, 4547 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
4541 "0280 lpfc_cleanup_node: ndlp:x%p " 4548 "0280 lpfc_cleanup_node: ndlp:x%p "
4542 "usgmap:x%x refcnt:%d\n", 4549 "usgmap:x%x refcnt:%d\n",
4543 (void *)ndlp, ndlp->nlp_usg_map, 4550 (void *)ndlp, ndlp->nlp_usg_map,
4544 atomic_read(&ndlp->kref.refcount)); 4551 atomic_read(&ndlp->kref.refcount));
4545 lpfc_dequeue_node(vport, ndlp); 4552 lpfc_dequeue_node(vport, ndlp);
4546 } else { 4553 } else {
4547 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, 4554 lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE,
4548 "0281 lpfc_cleanup_node: ndlp:x%p " 4555 "0281 lpfc_cleanup_node: ndlp:x%p "
4549 "usgmap:x%x refcnt:%d\n", 4556 "usgmap:x%x refcnt:%d\n",
4550 (void *)ndlp, ndlp->nlp_usg_map, 4557 (void *)ndlp, ndlp->nlp_usg_map,
4551 atomic_read(&ndlp->kref.refcount)); 4558 atomic_read(&ndlp->kref.refcount));
4552 lpfc_disable_node(vport, ndlp); 4559 lpfc_disable_node(vport, ndlp);
4553 } 4560 }
4554 4561
4555 4562
4556 /* Don't need to clean up REG_LOGIN64 cmds for Default RPI cleanup */ 4563 /* Don't need to clean up REG_LOGIN64 cmds for Default RPI cleanup */
4557 4564
4558 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */ 4565 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
4559 if ((mb = phba->sli.mbox_active)) { 4566 if ((mb = phba->sli.mbox_active)) {
4560 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && 4567 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
4561 !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) && 4568 !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
4562 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 4569 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
4563 mb->context2 = NULL; 4570 mb->context2 = NULL;
4564 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4571 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4565 } 4572 }
4566 } 4573 }
4567 4574
4568 spin_lock_irq(&phba->hbalock); 4575 spin_lock_irq(&phba->hbalock);
4569 /* Cleanup REG_LOGIN completions which are not yet processed */ 4576 /* Cleanup REG_LOGIN completions which are not yet processed */
4570 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) { 4577 list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
4571 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) || 4578 if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) ||
4572 (mb->mbox_flag & LPFC_MBX_IMED_UNREG) || 4579 (mb->mbox_flag & LPFC_MBX_IMED_UNREG) ||
4573 (ndlp != (struct lpfc_nodelist *) mb->context2)) 4580 (ndlp != (struct lpfc_nodelist *) mb->context2))
4574 continue; 4581 continue;
4575 4582
4576 mb->context2 = NULL; 4583 mb->context2 = NULL;
4577 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 4584 mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4578 } 4585 }
4579 4586
4580 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { 4587 list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
4581 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && 4588 if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
4582 !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) && 4589 !(mb->mbox_flag & LPFC_MBX_IMED_UNREG) &&
4583 (ndlp == (struct lpfc_nodelist *) mb->context2)) { 4590 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
4584 mp = (struct lpfc_dmabuf *) (mb->context1); 4591 mp = (struct lpfc_dmabuf *) (mb->context1);
4585 if (mp) { 4592 if (mp) {
4586 __lpfc_mbuf_free(phba, mp->virt, mp->phys); 4593 __lpfc_mbuf_free(phba, mp->virt, mp->phys);
4587 kfree(mp); 4594 kfree(mp);
4588 } 4595 }
4589 list_del(&mb->list); 4596 list_del(&mb->list);
4590 mempool_free(mb, phba->mbox_mem_pool); 4597 mempool_free(mb, phba->mbox_mem_pool);
4591 /* We shall not invoke the lpfc_nlp_put to decrement 4598 /* We shall not invoke the lpfc_nlp_put to decrement
4592 * the ndlp reference count as we are in the process 4599 * the ndlp reference count as we are in the process
4593 * of lpfc_nlp_release. 4600 * of lpfc_nlp_release.
4594 */ 4601 */
4595 } 4602 }
4596 } 4603 }
4597 spin_unlock_irq(&phba->hbalock); 4604 spin_unlock_irq(&phba->hbalock);
4598 4605
4599 lpfc_els_abort(phba, ndlp); 4606 lpfc_els_abort(phba, ndlp);
4600 4607
4601 spin_lock_irq(shost->host_lock); 4608 spin_lock_irq(shost->host_lock);
4602 ndlp->nlp_flag &= ~NLP_DELAY_TMO; 4609 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
4603 spin_unlock_irq(shost->host_lock); 4610 spin_unlock_irq(shost->host_lock);
4604 4611
4605 ndlp->nlp_last_elscmd = 0; 4612 ndlp->nlp_last_elscmd = 0;
4606 del_timer_sync(&ndlp->nlp_delayfunc); 4613 del_timer_sync(&ndlp->nlp_delayfunc);
4607 4614
4608 list_del_init(&ndlp->els_retry_evt.evt_listp); 4615 list_del_init(&ndlp->els_retry_evt.evt_listp);
4609 list_del_init(&ndlp->dev_loss_evt.evt_listp); 4616 list_del_init(&ndlp->dev_loss_evt.evt_listp);
4610 lpfc_cleanup_vports_rrqs(vport, ndlp); 4617 lpfc_cleanup_vports_rrqs(vport, ndlp);
4611 lpfc_unreg_rpi(vport, ndlp); 4618 lpfc_unreg_rpi(vport, ndlp);
4612 4619
4613 return 0; 4620 return 0;
4614 } 4621 }
4615 4622
4616 /* 4623 /*
4617 * Check to see if we can free the nlp back to the freelist. 4624 * Check to see if we can free the nlp back to the freelist.
4618 * If we are in the middle of using the nlp in the discovery state 4625 * If we are in the middle of using the nlp in the discovery state
4619 * machine, defer the free till we reach the end of the state machine. 4626 * machine, defer the free till we reach the end of the state machine.
4620 */ 4627 */
4621 static void 4628 static void
4622 lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) 4629 lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4623 { 4630 {
4624 struct lpfc_hba *phba = vport->phba; 4631 struct lpfc_hba *phba = vport->phba;
4625 struct lpfc_rport_data *rdata; 4632 struct lpfc_rport_data *rdata;
4626 LPFC_MBOXQ_t *mbox; 4633 LPFC_MBOXQ_t *mbox;
4627 int rc; 4634 int rc;
4628 4635
4629 lpfc_cancel_retry_delay_tmo(vport, ndlp); 4636 lpfc_cancel_retry_delay_tmo(vport, ndlp);
4630 if ((ndlp->nlp_flag & NLP_DEFER_RM) && 4637 if ((ndlp->nlp_flag & NLP_DEFER_RM) &&
4631 !(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) && 4638 !(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) &&
4632 !(ndlp->nlp_flag & NLP_RPI_REGISTERED)) { 4639 !(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
4633 /* For this case we need to cleanup the default rpi 4640 /* For this case we need to cleanup the default rpi
4634 * allocated by the firmware. 4641 * allocated by the firmware.
4635 */ 4642 */
4636 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) 4643 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))
4637 != NULL) { 4644 != NULL) {
4638 rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID, 4645 rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID,
4639 (uint8_t *) &vport->fc_sparam, mbox, ndlp->nlp_rpi); 4646 (uint8_t *) &vport->fc_sparam, mbox, ndlp->nlp_rpi);
4640 if (rc) { 4647 if (rc) {
4641 mempool_free(mbox, phba->mbox_mem_pool); 4648 mempool_free(mbox, phba->mbox_mem_pool);
4642 } 4649 }
4643 else { 4650 else {
4644 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG; 4651 mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
4645 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; 4652 mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
4646 mbox->vport = vport; 4653 mbox->vport = vport;
4647 mbox->context2 = ndlp; 4654 mbox->context2 = ndlp;
4648 rc =lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 4655 rc =lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4649 if (rc == MBX_NOT_FINISHED) { 4656 if (rc == MBX_NOT_FINISHED) {
4650 mempool_free(mbox, phba->mbox_mem_pool); 4657 mempool_free(mbox, phba->mbox_mem_pool);
4651 } 4658 }
4652 } 4659 }
4653 } 4660 }
4654 } 4661 }
4655 lpfc_cleanup_node(vport, ndlp); 4662 lpfc_cleanup_node(vport, ndlp);
4656 4663
4657 /* 4664 /*
4658 * We can get here with a non-NULL ndlp->rport because when we 4665 * We can get here with a non-NULL ndlp->rport because when we
4659 * unregister a rport we don't break the rport/node linkage. So if we 4666 * unregister a rport we don't break the rport/node linkage. So if we
4660 * do, make sure we don't leaving any dangling pointers behind. 4667 * do, make sure we don't leaving any dangling pointers behind.
4661 */ 4668 */
4662 if (ndlp->rport) { 4669 if (ndlp->rport) {
4663 rdata = ndlp->rport->dd_data; 4670 rdata = ndlp->rport->dd_data;
4664 rdata->pnode = NULL; 4671 rdata->pnode = NULL;
4665 ndlp->rport = NULL; 4672 ndlp->rport = NULL;
4666 } 4673 }
4667 } 4674 }
4668 4675
4669 static int 4676 static int
4670 lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 4677 lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4671 uint32_t did) 4678 uint32_t did)
4672 { 4679 {
4673 D_ID mydid, ndlpdid, matchdid; 4680 D_ID mydid, ndlpdid, matchdid;
4674 4681
4675 if (did == Bcast_DID) 4682 if (did == Bcast_DID)
4676 return 0; 4683 return 0;
4677 4684
4678 /* First check for Direct match */ 4685 /* First check for Direct match */
4679 if (ndlp->nlp_DID == did) 4686 if (ndlp->nlp_DID == did)
4680 return 1; 4687 return 1;
4681 4688
4682 /* Next check for area/domain identically equals 0 match */ 4689 /* Next check for area/domain identically equals 0 match */
4683 mydid.un.word = vport->fc_myDID; 4690 mydid.un.word = vport->fc_myDID;
4684 if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) { 4691 if ((mydid.un.b.domain == 0) && (mydid.un.b.area == 0)) {
4685 return 0; 4692 return 0;
4686 } 4693 }
4687 4694
4688 matchdid.un.word = did; 4695 matchdid.un.word = did;
4689 ndlpdid.un.word = ndlp->nlp_DID; 4696 ndlpdid.un.word = ndlp->nlp_DID;
4690 if (matchdid.un.b.id == ndlpdid.un.b.id) { 4697 if (matchdid.un.b.id == ndlpdid.un.b.id) {
4691 if ((mydid.un.b.domain == matchdid.un.b.domain) && 4698 if ((mydid.un.b.domain == matchdid.un.b.domain) &&
4692 (mydid.un.b.area == matchdid.un.b.area)) { 4699 (mydid.un.b.area == matchdid.un.b.area)) {
4693 if ((ndlpdid.un.b.domain == 0) && 4700 if ((ndlpdid.un.b.domain == 0) &&
4694 (ndlpdid.un.b.area == 0)) { 4701 (ndlpdid.un.b.area == 0)) {
4695 if (ndlpdid.un.b.id) 4702 if (ndlpdid.un.b.id)
4696 return 1; 4703 return 1;
4697 } 4704 }
4698 return 0; 4705 return 0;
4699 } 4706 }
4700 4707
4701 matchdid.un.word = ndlp->nlp_DID; 4708 matchdid.un.word = ndlp->nlp_DID;
4702 if ((mydid.un.b.domain == ndlpdid.un.b.domain) && 4709 if ((mydid.un.b.domain == ndlpdid.un.b.domain) &&
4703 (mydid.un.b.area == ndlpdid.un.b.area)) { 4710 (mydid.un.b.area == ndlpdid.un.b.area)) {
4704 if ((matchdid.un.b.domain == 0) && 4711 if ((matchdid.un.b.domain == 0) &&
4705 (matchdid.un.b.area == 0)) { 4712 (matchdid.un.b.area == 0)) {
4706 if (matchdid.un.b.id) 4713 if (matchdid.un.b.id)
4707 return 1; 4714 return 1;
4708 } 4715 }
4709 } 4716 }
4710 } 4717 }
4711 return 0; 4718 return 0;
4712 } 4719 }
4713 4720
4714 /* Search for a nodelist entry */ 4721 /* Search for a nodelist entry */
4715 static struct lpfc_nodelist * 4722 static struct lpfc_nodelist *
4716 __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) 4723 __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
4717 { 4724 {
4718 struct lpfc_nodelist *ndlp; 4725 struct lpfc_nodelist *ndlp;
4719 uint32_t data1; 4726 uint32_t data1;
4720 4727
4721 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 4728 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
4722 if (lpfc_matchdid(vport, ndlp, did)) { 4729 if (lpfc_matchdid(vport, ndlp, did)) {
4723 data1 = (((uint32_t) ndlp->nlp_state << 24) | 4730 data1 = (((uint32_t) ndlp->nlp_state << 24) |
4724 ((uint32_t) ndlp->nlp_xri << 16) | 4731 ((uint32_t) ndlp->nlp_xri << 16) |
4725 ((uint32_t) ndlp->nlp_type << 8) | 4732 ((uint32_t) ndlp->nlp_type << 8) |
4726 ((uint32_t) ndlp->nlp_rpi & 0xff)); 4733 ((uint32_t) ndlp->nlp_rpi & 0xff));
4727 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 4734 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
4728 "0929 FIND node DID " 4735 "0929 FIND node DID "
4729 "Data: x%p x%x x%x x%x\n", 4736 "Data: x%p x%x x%x x%x\n",
4730 ndlp, ndlp->nlp_DID, 4737 ndlp, ndlp->nlp_DID,
4731 ndlp->nlp_flag, data1); 4738 ndlp->nlp_flag, data1);
4732 return ndlp; 4739 return ndlp;
4733 } 4740 }
4734 } 4741 }
4735 4742
4736 /* FIND node did <did> NOT FOUND */ 4743 /* FIND node did <did> NOT FOUND */
4737 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 4744 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
4738 "0932 FIND node did x%x NOT FOUND.\n", did); 4745 "0932 FIND node did x%x NOT FOUND.\n", did);
4739 return NULL; 4746 return NULL;
4740 } 4747 }
4741 4748
4742 struct lpfc_nodelist * 4749 struct lpfc_nodelist *
4743 lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) 4750 lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did)
4744 { 4751 {
4745 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4752 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4746 struct lpfc_nodelist *ndlp; 4753 struct lpfc_nodelist *ndlp;
4747 unsigned long iflags; 4754 unsigned long iflags;
4748 4755
4749 spin_lock_irqsave(shost->host_lock, iflags); 4756 spin_lock_irqsave(shost->host_lock, iflags);
4750 ndlp = __lpfc_findnode_did(vport, did); 4757 ndlp = __lpfc_findnode_did(vport, did);
4751 spin_unlock_irqrestore(shost->host_lock, iflags); 4758 spin_unlock_irqrestore(shost->host_lock, iflags);
4752 return ndlp; 4759 return ndlp;
4753 } 4760 }
4754 4761
4755 struct lpfc_nodelist * 4762 struct lpfc_nodelist *
4756 lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) 4763 lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did)
4757 { 4764 {
4758 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4765 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4759 struct lpfc_nodelist *ndlp; 4766 struct lpfc_nodelist *ndlp;
4760 4767
4761 ndlp = lpfc_findnode_did(vport, did); 4768 ndlp = lpfc_findnode_did(vport, did);
4762 if (!ndlp) { 4769 if (!ndlp) {
4763 if ((vport->fc_flag & FC_RSCN_MODE) != 0 && 4770 if ((vport->fc_flag & FC_RSCN_MODE) != 0 &&
4764 lpfc_rscn_payload_check(vport, did) == 0) 4771 lpfc_rscn_payload_check(vport, did) == 0)
4765 return NULL; 4772 return NULL;
4766 ndlp = (struct lpfc_nodelist *) 4773 ndlp = (struct lpfc_nodelist *)
4767 mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL); 4774 mempool_alloc(vport->phba->nlp_mem_pool, GFP_KERNEL);
4768 if (!ndlp) 4775 if (!ndlp)
4769 return NULL; 4776 return NULL;
4770 lpfc_nlp_init(vport, ndlp, did); 4777 lpfc_nlp_init(vport, ndlp, did);
4771 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 4778 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
4772 spin_lock_irq(shost->host_lock); 4779 spin_lock_irq(shost->host_lock);
4773 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 4780 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
4774 spin_unlock_irq(shost->host_lock); 4781 spin_unlock_irq(shost->host_lock);
4775 return ndlp; 4782 return ndlp;
4776 } else if (!NLP_CHK_NODE_ACT(ndlp)) { 4783 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
4777 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE); 4784 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE);
4778 if (!ndlp) 4785 if (!ndlp)
4779 return NULL; 4786 return NULL;
4780 spin_lock_irq(shost->host_lock); 4787 spin_lock_irq(shost->host_lock);
4781 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 4788 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
4782 spin_unlock_irq(shost->host_lock); 4789 spin_unlock_irq(shost->host_lock);
4783 return ndlp; 4790 return ndlp;
4784 } 4791 }
4785 4792
4786 if ((vport->fc_flag & FC_RSCN_MODE) && 4793 if ((vport->fc_flag & FC_RSCN_MODE) &&
4787 !(vport->fc_flag & FC_NDISC_ACTIVE)) { 4794 !(vport->fc_flag & FC_NDISC_ACTIVE)) {
4788 if (lpfc_rscn_payload_check(vport, did)) { 4795 if (lpfc_rscn_payload_check(vport, did)) {
4789 /* If we've already received a PLOGI from this NPort 4796 /* If we've already received a PLOGI from this NPort
4790 * we don't need to try to discover it again. 4797 * we don't need to try to discover it again.
4791 */ 4798 */
4792 if (ndlp->nlp_flag & NLP_RCV_PLOGI) 4799 if (ndlp->nlp_flag & NLP_RCV_PLOGI)
4793 return NULL; 4800 return NULL;
4794 4801
4795 /* Since this node is marked for discovery, 4802 /* Since this node is marked for discovery,
4796 * delay timeout is not needed. 4803 * delay timeout is not needed.
4797 */ 4804 */
4798 lpfc_cancel_retry_delay_tmo(vport, ndlp); 4805 lpfc_cancel_retry_delay_tmo(vport, ndlp);
4799 spin_lock_irq(shost->host_lock); 4806 spin_lock_irq(shost->host_lock);
4800 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 4807 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
4801 spin_unlock_irq(shost->host_lock); 4808 spin_unlock_irq(shost->host_lock);
4802 } else 4809 } else
4803 ndlp = NULL; 4810 ndlp = NULL;
4804 } else { 4811 } else {
4805 /* If we've already received a PLOGI from this NPort, 4812 /* If we've already received a PLOGI from this NPort,
4806 * or we are already in the process of discovery on it, 4813 * or we are already in the process of discovery on it,
4807 * we don't need to try to discover it again. 4814 * we don't need to try to discover it again.
4808 */ 4815 */
4809 if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE || 4816 if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
4810 ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || 4817 ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
4811 ndlp->nlp_flag & NLP_RCV_PLOGI) 4818 ndlp->nlp_flag & NLP_RCV_PLOGI)
4812 return NULL; 4819 return NULL;
4813 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); 4820 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
4814 spin_lock_irq(shost->host_lock); 4821 spin_lock_irq(shost->host_lock);
4815 ndlp->nlp_flag |= NLP_NPR_2B_DISC; 4822 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
4816 spin_unlock_irq(shost->host_lock); 4823 spin_unlock_irq(shost->host_lock);
4817 } 4824 }
4818 return ndlp; 4825 return ndlp;
4819 } 4826 }
4820 4827
4821 /* Build a list of nodes to discover based on the loopmap */ 4828 /* Build a list of nodes to discover based on the loopmap */
4822 void 4829 void
4823 lpfc_disc_list_loopmap(struct lpfc_vport *vport) 4830 lpfc_disc_list_loopmap(struct lpfc_vport *vport)
4824 { 4831 {
4825 struct lpfc_hba *phba = vport->phba; 4832 struct lpfc_hba *phba = vport->phba;
4826 int j; 4833 int j;
4827 uint32_t alpa, index; 4834 uint32_t alpa, index;
4828 4835
4829 if (!lpfc_is_link_up(phba)) 4836 if (!lpfc_is_link_up(phba))
4830 return; 4837 return;
4831 4838
4832 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP) 4839 if (phba->fc_topology != LPFC_TOPOLOGY_LOOP)
4833 return; 4840 return;
4834 4841
4835 /* Check for loop map present or not */ 4842 /* Check for loop map present or not */
4836 if (phba->alpa_map[0]) { 4843 if (phba->alpa_map[0]) {
4837 for (j = 1; j <= phba->alpa_map[0]; j++) { 4844 for (j = 1; j <= phba->alpa_map[0]; j++) {
4838 alpa = phba->alpa_map[j]; 4845 alpa = phba->alpa_map[j];
4839 if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0)) 4846 if (((vport->fc_myDID & 0xff) == alpa) || (alpa == 0))
4840 continue; 4847 continue;
4841 lpfc_setup_disc_node(vport, alpa); 4848 lpfc_setup_disc_node(vport, alpa);
4842 } 4849 }
4843 } else { 4850 } else {
4844 /* No alpamap, so try all alpa's */ 4851 /* No alpamap, so try all alpa's */
4845 for (j = 0; j < FC_MAXLOOP; j++) { 4852 for (j = 0; j < FC_MAXLOOP; j++) {
4846 /* If cfg_scan_down is set, start from highest 4853 /* If cfg_scan_down is set, start from highest
4847 * ALPA (0xef) to lowest (0x1). 4854 * ALPA (0xef) to lowest (0x1).
4848 */ 4855 */
4849 if (vport->cfg_scan_down) 4856 if (vport->cfg_scan_down)
4850 index = j; 4857 index = j;
4851 else 4858 else
4852 index = FC_MAXLOOP - j - 1; 4859 index = FC_MAXLOOP - j - 1;
4853 alpa = lpfcAlpaArray[index]; 4860 alpa = lpfcAlpaArray[index];
4854 if ((vport->fc_myDID & 0xff) == alpa) 4861 if ((vport->fc_myDID & 0xff) == alpa)
4855 continue; 4862 continue;
4856 lpfc_setup_disc_node(vport, alpa); 4863 lpfc_setup_disc_node(vport, alpa);
4857 } 4864 }
4858 } 4865 }
4859 return; 4866 return;
4860 } 4867 }
4861 4868
4862 void 4869 void
4863 lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport) 4870 lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
4864 { 4871 {
4865 LPFC_MBOXQ_t *mbox; 4872 LPFC_MBOXQ_t *mbox;
4866 struct lpfc_sli *psli = &phba->sli; 4873 struct lpfc_sli *psli = &phba->sli;
4867 struct lpfc_sli_ring *extra_ring = &psli->ring[psli->extra_ring]; 4874 struct lpfc_sli_ring *extra_ring = &psli->ring[psli->extra_ring];
4868 struct lpfc_sli_ring *fcp_ring = &psli->ring[psli->fcp_ring]; 4875 struct lpfc_sli_ring *fcp_ring = &psli->ring[psli->fcp_ring];
4869 struct lpfc_sli_ring *next_ring = &psli->ring[psli->next_ring]; 4876 struct lpfc_sli_ring *next_ring = &psli->ring[psli->next_ring];
4870 int rc; 4877 int rc;
4871 4878
4872 /* 4879 /*
4873 * if it's not a physical port or if we already send 4880 * if it's not a physical port or if we already send
4874 * clear_la then don't send it. 4881 * clear_la then don't send it.
4875 */ 4882 */
4876 if ((phba->link_state >= LPFC_CLEAR_LA) || 4883 if ((phba->link_state >= LPFC_CLEAR_LA) ||
4877 (vport->port_type != LPFC_PHYSICAL_PORT) || 4884 (vport->port_type != LPFC_PHYSICAL_PORT) ||
4878 (phba->sli_rev == LPFC_SLI_REV4)) 4885 (phba->sli_rev == LPFC_SLI_REV4))
4879 return; 4886 return;
4880 4887
4881 /* Link up discovery */ 4888 /* Link up discovery */
4882 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) { 4889 if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL)) != NULL) {
4883 phba->link_state = LPFC_CLEAR_LA; 4890 phba->link_state = LPFC_CLEAR_LA;
4884 lpfc_clear_la(phba, mbox); 4891 lpfc_clear_la(phba, mbox);
4885 mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la; 4892 mbox->mbox_cmpl = lpfc_mbx_cmpl_clear_la;
4886 mbox->vport = vport; 4893 mbox->vport = vport;
4887 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 4894 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4888 if (rc == MBX_NOT_FINISHED) { 4895 if (rc == MBX_NOT_FINISHED) {
4889 mempool_free(mbox, phba->mbox_mem_pool); 4896 mempool_free(mbox, phba->mbox_mem_pool);
4890 lpfc_disc_flush_list(vport); 4897 lpfc_disc_flush_list(vport);
4891 extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT; 4898 extra_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
4892 fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT; 4899 fcp_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
4893 next_ring->flag &= ~LPFC_STOP_IOCB_EVENT; 4900 next_ring->flag &= ~LPFC_STOP_IOCB_EVENT;
4894 phba->link_state = LPFC_HBA_ERROR; 4901 phba->link_state = LPFC_HBA_ERROR;
4895 } 4902 }
4896 } 4903 }
4897 } 4904 }
4898 4905
4899 /* Reg_vpi to tell firmware to resume normal operations */ 4906 /* Reg_vpi to tell firmware to resume normal operations */
4900 void 4907 void
4901 lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport) 4908 lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
4902 { 4909 {
4903 LPFC_MBOXQ_t *regvpimbox; 4910 LPFC_MBOXQ_t *regvpimbox;
4904 4911
4905 regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 4912 regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4906 if (regvpimbox) { 4913 if (regvpimbox) {
4907 lpfc_reg_vpi(vport, regvpimbox); 4914 lpfc_reg_vpi(vport, regvpimbox);
4908 regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi; 4915 regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
4909 regvpimbox->vport = vport; 4916 regvpimbox->vport = vport;
4910 if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT) 4917 if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT)
4911 == MBX_NOT_FINISHED) { 4918 == MBX_NOT_FINISHED) {
4912 mempool_free(regvpimbox, phba->mbox_mem_pool); 4919 mempool_free(regvpimbox, phba->mbox_mem_pool);
4913 } 4920 }
4914 } 4921 }
4915 } 4922 }
4916 4923
4917 /* Start Link up / RSCN discovery on NPR nodes */ 4924 /* Start Link up / RSCN discovery on NPR nodes */
4918 void 4925 void
4919 lpfc_disc_start(struct lpfc_vport *vport) 4926 lpfc_disc_start(struct lpfc_vport *vport)
4920 { 4927 {
4921 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 4928 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4922 struct lpfc_hba *phba = vport->phba; 4929 struct lpfc_hba *phba = vport->phba;
4923 uint32_t num_sent; 4930 uint32_t num_sent;
4924 uint32_t clear_la_pending; 4931 uint32_t clear_la_pending;
4925 int did_changed; 4932 int did_changed;
4926 4933
4927 if (!lpfc_is_link_up(phba)) 4934 if (!lpfc_is_link_up(phba))
4928 return; 4935 return;
4929 4936
4930 if (phba->link_state == LPFC_CLEAR_LA) 4937 if (phba->link_state == LPFC_CLEAR_LA)
4931 clear_la_pending = 1; 4938 clear_la_pending = 1;
4932 else 4939 else
4933 clear_la_pending = 0; 4940 clear_la_pending = 0;
4934 4941
4935 if (vport->port_state < LPFC_VPORT_READY) 4942 if (vport->port_state < LPFC_VPORT_READY)
4936 vport->port_state = LPFC_DISC_AUTH; 4943 vport->port_state = LPFC_DISC_AUTH;
4937 4944
4938 lpfc_set_disctmo(vport); 4945 lpfc_set_disctmo(vport);
4939 4946
4940 if (vport->fc_prevDID == vport->fc_myDID) 4947 if (vport->fc_prevDID == vport->fc_myDID)
4941 did_changed = 0; 4948 did_changed = 0;
4942 else 4949 else
4943 did_changed = 1; 4950 did_changed = 1;
4944 4951
4945 vport->fc_prevDID = vport->fc_myDID; 4952 vport->fc_prevDID = vport->fc_myDID;
4946 vport->num_disc_nodes = 0; 4953 vport->num_disc_nodes = 0;
4947 4954
4948 /* Start Discovery state <hba_state> */ 4955 /* Start Discovery state <hba_state> */
4949 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, 4956 lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4950 "0202 Start Discovery hba state x%x " 4957 "0202 Start Discovery hba state x%x "
4951 "Data: x%x x%x x%x\n", 4958 "Data: x%x x%x x%x\n",
4952 vport->port_state, vport->fc_flag, vport->fc_plogi_cnt, 4959 vport->port_state, vport->fc_flag, vport->fc_plogi_cnt,
4953 vport->fc_adisc_cnt); 4960 vport->fc_adisc_cnt);
4954 4961
4955 /* First do ADISCs - if any */ 4962 /* First do ADISCs - if any */
4956 num_sent = lpfc_els_disc_adisc(vport); 4963 num_sent = lpfc_els_disc_adisc(vport);
4957 4964
4958 if (num_sent) 4965 if (num_sent)
4959 return; 4966 return;
4960 4967
4961 /* Register the VPI for SLI3, NON-NPIV only. */ 4968 /* Register the VPI for SLI3, NON-NPIV only. */
4962 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && 4969 if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
4963 !(vport->fc_flag & FC_PT2PT) && 4970 !(vport->fc_flag & FC_PT2PT) &&
4964 !(vport->fc_flag & FC_RSCN_MODE) && 4971 !(vport->fc_flag & FC_RSCN_MODE) &&
4965 (phba->sli_rev < LPFC_SLI_REV4)) { 4972 (phba->sli_rev < LPFC_SLI_REV4)) {
4966 lpfc_issue_reg_vpi(phba, vport); 4973 lpfc_issue_reg_vpi(phba, vport);
4967 return; 4974 return;
4968 } 4975 }
4969 4976
4970 /* 4977 /*
4971 * For SLI2, we need to set port_state to READY and continue 4978 * For SLI2, we need to set port_state to READY and continue
4972 * discovery. 4979 * discovery.
4973 */ 4980 */
4974 if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) { 4981 if (vport->port_state < LPFC_VPORT_READY && !clear_la_pending) {
4975 /* If we get here, there is nothing to ADISC */ 4982 /* If we get here, there is nothing to ADISC */
4976 if (vport->port_type == LPFC_PHYSICAL_PORT) 4983 if (vport->port_type == LPFC_PHYSICAL_PORT)
4977 lpfc_issue_clear_la(phba, vport); 4984 lpfc_issue_clear_la(phba, vport);
4978 4985
4979 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) { 4986 if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
4980 vport->num_disc_nodes = 0; 4987 vport->num_disc_nodes = 0;
4981 /* go thru NPR nodes and issue ELS PLOGIs */ 4988 /* go thru NPR nodes and issue ELS PLOGIs */
4982 if (vport->fc_npr_cnt) 4989 if (vport->fc_npr_cnt)
4983 lpfc_els_disc_plogi(vport); 4990 lpfc_els_disc_plogi(vport);
4984 4991
4985 if (!vport->num_disc_nodes) { 4992 if (!vport->num_disc_nodes) {
4986 spin_lock_irq(shost->host_lock); 4993 spin_lock_irq(shost->host_lock);
4987 vport->fc_flag &= ~FC_NDISC_ACTIVE; 4994 vport->fc_flag &= ~FC_NDISC_ACTIVE;
4988 spin_unlock_irq(shost->host_lock); 4995 spin_unlock_irq(shost->host_lock);
4989 lpfc_can_disctmo(vport); 4996 lpfc_can_disctmo(vport);
4990 } 4997 }
4991 } 4998 }
4992 vport->port_state = LPFC_VPORT_READY; 4999 vport->port_state = LPFC_VPORT_READY;
4993 } else { 5000 } else {
4994 /* Next do PLOGIs - if any */ 5001 /* Next do PLOGIs - if any */
4995 num_sent = lpfc_els_disc_plogi(vport); 5002 num_sent = lpfc_els_disc_plogi(vport);
4996 5003
4997 if (num_sent) 5004 if (num_sent)
4998 return; 5005 return;
4999 5006
5000 if (vport->fc_flag & FC_RSCN_MODE) { 5007 if (vport->fc_flag & FC_RSCN_MODE) {
5001 /* Check to see if more RSCNs came in while we 5008 /* Check to see if more RSCNs came in while we
5002 * were processing this one. 5009 * were processing this one.
5003 */ 5010 */
5004 if ((vport->fc_rscn_id_cnt == 0) && 5011 if ((vport->fc_rscn_id_cnt == 0) &&
5005 (!(vport->fc_flag & FC_RSCN_DISCOVERY))) { 5012 (!(vport->fc_flag & FC_RSCN_DISCOVERY))) {
5006 spin_lock_irq(shost->host_lock); 5013 spin_lock_irq(shost->host_lock);
5007 vport->fc_flag &= ~FC_RSCN_MODE; 5014 vport->fc_flag &= ~FC_RSCN_MODE;
5008 spin_unlock_irq(shost->host_lock); 5015 spin_unlock_irq(shost->host_lock);
5009 lpfc_can_disctmo(vport); 5016 lpfc_can_disctmo(vport);
5010 } else 5017 } else
5011 lpfc_els_handle_rscn(vport); 5018 lpfc_els_handle_rscn(vport);
5012 } 5019 }
5013 } 5020 }
5014 return; 5021 return;
5015 } 5022 }
5016 5023
5017 /* 5024 /*
5018 * Ignore completion for all IOCBs on tx and txcmpl queue for ELS 5025 * Ignore completion for all IOCBs on tx and txcmpl queue for ELS
5019 * ring the match the sppecified nodelist. 5026 * ring the match the sppecified nodelist.
5020 */ 5027 */
5021 static void 5028 static void
5022 lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) 5029 lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
5023 { 5030 {
5024 LIST_HEAD(completions); 5031 LIST_HEAD(completions);
5025 struct lpfc_sli *psli; 5032 struct lpfc_sli *psli;
5026 IOCB_t *icmd; 5033 IOCB_t *icmd;
5027 struct lpfc_iocbq *iocb, *next_iocb; 5034 struct lpfc_iocbq *iocb, *next_iocb;
5028 struct lpfc_sli_ring *pring; 5035 struct lpfc_sli_ring *pring;
5029 5036
5030 psli = &phba->sli; 5037 psli = &phba->sli;
5031 pring = &psli->ring[LPFC_ELS_RING]; 5038 pring = &psli->ring[LPFC_ELS_RING];
5032 5039
5033 /* Error matching iocb on txq or txcmplq 5040 /* Error matching iocb on txq or txcmplq
5034 * First check the txq. 5041 * First check the txq.
5035 */ 5042 */
5036 spin_lock_irq(&phba->hbalock); 5043 spin_lock_irq(&phba->hbalock);
5037 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { 5044 list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
5038 if (iocb->context1 != ndlp) { 5045 if (iocb->context1 != ndlp) {
5039 continue; 5046 continue;
5040 } 5047 }
5041 icmd = &iocb->iocb; 5048 icmd = &iocb->iocb;
5042 if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) || 5049 if ((icmd->ulpCommand == CMD_ELS_REQUEST64_CR) ||
5043 (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) { 5050 (icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX)) {
5044 5051
5045 list_move_tail(&iocb->list, &completions); 5052 list_move_tail(&iocb->list, &completions);
5046 pring->txq_cnt--; 5053 pring->txq_cnt--;
5047 } 5054 }
5048 } 5055 }
5049 5056
5050 /* Next check the txcmplq */ 5057 /* Next check the txcmplq */
5051 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { 5058 list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
5052 if (iocb->context1 != ndlp) { 5059 if (iocb->context1 != ndlp) {
5053 continue; 5060 continue;
5054 } 5061 }
5055 icmd = &iocb->iocb; 5062 icmd = &iocb->iocb;
5056 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR || 5063 if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR ||
5057 icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX) { 5064 icmd->ulpCommand == CMD_XMIT_ELS_RSP64_CX) {
5058 lpfc_sli_issue_abort_iotag(phba, pring, iocb); 5065 lpfc_sli_issue_abort_iotag(phba, pring, iocb);
5059 } 5066 }
5060 } 5067 }
5061 spin_unlock_irq(&phba->hbalock); 5068 spin_unlock_irq(&phba->hbalock);
5062 5069
5063 /* Cancel all the IOCBs from the completions list */ 5070 /* Cancel all the IOCBs from the completions list */
5064 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT, 5071 lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
5065 IOERR_SLI_ABORTED); 5072 IOERR_SLI_ABORTED);
5066 } 5073 }
5067 5074
5068 static void 5075 static void
5069 lpfc_disc_flush_list(struct lpfc_vport *vport) 5076 lpfc_disc_flush_list(struct lpfc_vport *vport)
5070 { 5077 {
5071 struct lpfc_nodelist *ndlp, *next_ndlp; 5078 struct lpfc_nodelist *ndlp, *next_ndlp;
5072 struct lpfc_hba *phba = vport->phba; 5079 struct lpfc_hba *phba = vport->phba;
5073 5080
5074 if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) { 5081 if (vport->fc_plogi_cnt || vport->fc_adisc_cnt) {
5075 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, 5082 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
5076 nlp_listp) { 5083 nlp_listp) {
5077 if (!NLP_CHK_NODE_ACT(ndlp)) 5084 if (!NLP_CHK_NODE_ACT(ndlp))
5078 continue; 5085 continue;
5079 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || 5086 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
5080 ndlp->nlp_state == NLP_STE_ADISC_ISSUE) { 5087 ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
5081 lpfc_free_tx(phba, ndlp); 5088 lpfc_free_tx(phba, ndlp);
5082 } 5089 }
5083 } 5090 }
5084 } 5091 }
5085 } 5092 }
5086 5093
5087 void 5094 void
5088 lpfc_cleanup_discovery_resources(struct lpfc_vport *vport) 5095 lpfc_cleanup_discovery_resources(struct lpfc_vport *vport)
5089 { 5096 {
5090 lpfc_els_flush_rscn(vport); 5097 lpfc_els_flush_rscn(vport);
5091 lpfc_els_flush_cmd(vport); 5098 lpfc_els_flush_cmd(vport);
5092 lpfc_disc_flush_list(vport); 5099 lpfc_disc_flush_list(vport);
5093 } 5100 }
5094 5101
5095 /*****************************************************************************/ 5102 /*****************************************************************************/
5096 /* 5103 /*
5097 * NAME: lpfc_disc_timeout 5104 * NAME: lpfc_disc_timeout
5098 * 5105 *
5099 * FUNCTION: Fibre Channel driver discovery timeout routine. 5106 * FUNCTION: Fibre Channel driver discovery timeout routine.
5100 * 5107 *
5101 * EXECUTION ENVIRONMENT: interrupt only 5108 * EXECUTION ENVIRONMENT: interrupt only
5102 * 5109 *
5103 * CALLED FROM: 5110 * CALLED FROM:
5104 * Timer function 5111 * Timer function
5105 * 5112 *
5106 * RETURNS: 5113 * RETURNS:
5107 * none 5114 * none
5108 */ 5115 */
5109 /*****************************************************************************/ 5116 /*****************************************************************************/
5110 void 5117 void
5111 lpfc_disc_timeout(unsigned long ptr) 5118 lpfc_disc_timeout(unsigned long ptr)
5112 { 5119 {
5113 struct lpfc_vport *vport = (struct lpfc_vport *) ptr; 5120 struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
5114 struct lpfc_hba *phba = vport->phba; 5121 struct lpfc_hba *phba = vport->phba;
5115 uint32_t tmo_posted; 5122 uint32_t tmo_posted;
5116 unsigned long flags = 0; 5123 unsigned long flags = 0;
5117 5124
5118 if (unlikely(!phba)) 5125 if (unlikely(!phba))
5119 return; 5126 return;
5120 5127
5121 spin_lock_irqsave(&vport->work_port_lock, flags); 5128 spin_lock_irqsave(&vport->work_port_lock, flags);
5122 tmo_posted = vport->work_port_events & WORKER_DISC_TMO; 5129 tmo_posted = vport->work_port_events & WORKER_DISC_TMO;
5123 if (!tmo_posted) 5130 if (!tmo_posted)
5124 vport->work_port_events |= WORKER_DISC_TMO; 5131 vport->work_port_events |= WORKER_DISC_TMO;
5125 spin_unlock_irqrestore(&vport->work_port_lock, flags); 5132 spin_unlock_irqrestore(&vport->work_port_lock, flags);
5126 5133
5127 if (!tmo_posted) 5134 if (!tmo_posted)
5128 lpfc_worker_wake_up(phba); 5135 lpfc_worker_wake_up(phba);
5129 return; 5136 return;
5130 } 5137 }
5131 5138
5132 static void 5139 static void
5133 lpfc_disc_timeout_handler(struct lpfc_vport *vport) 5140 lpfc_disc_timeout_handler(struct lpfc_vport *vport)
5134 { 5141 {
5135 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 5142 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5136 struct lpfc_hba *phba = vport->phba; 5143 struct lpfc_hba *phba = vport->phba;
5137 struct lpfc_sli *psli = &phba->sli; 5144 struct lpfc_sli *psli = &phba->sli;
5138 struct lpfc_nodelist *ndlp, *next_ndlp; 5145 struct lpfc_nodelist *ndlp, *next_ndlp;
5139 LPFC_MBOXQ_t *initlinkmbox; 5146 LPFC_MBOXQ_t *initlinkmbox;
5140 int rc, clrlaerr = 0; 5147 int rc, clrlaerr = 0;
5141 5148
5142 if (!(vport->fc_flag & FC_DISC_TMO)) 5149 if (!(vport->fc_flag & FC_DISC_TMO))
5143 return; 5150 return;
5144 5151
5145 spin_lock_irq(shost->host_lock); 5152 spin_lock_irq(shost->host_lock);
5146 vport->fc_flag &= ~FC_DISC_TMO; 5153 vport->fc_flag &= ~FC_DISC_TMO;
5147 spin_unlock_irq(shost->host_lock); 5154 spin_unlock_irq(shost->host_lock);
5148 5155
5149 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, 5156 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
5150 "disc timeout: state:x%x rtry:x%x flg:x%x", 5157 "disc timeout: state:x%x rtry:x%x flg:x%x",
5151 vport->port_state, vport->fc_ns_retry, vport->fc_flag); 5158 vport->port_state, vport->fc_ns_retry, vport->fc_flag);
5152 5159
5153 switch (vport->port_state) { 5160 switch (vport->port_state) {
5154 5161
5155 case LPFC_LOCAL_CFG_LINK: 5162 case LPFC_LOCAL_CFG_LINK:
5156 /* port_state is identically LPFC_LOCAL_CFG_LINK while waiting for 5163 /* port_state is identically LPFC_LOCAL_CFG_LINK while waiting for
5157 * FAN 5164 * FAN
5158 */ 5165 */
5159 /* FAN timeout */ 5166 /* FAN timeout */
5160 lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY, 5167 lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY,
5161 "0221 FAN timeout\n"); 5168 "0221 FAN timeout\n");
5162 /* Start discovery by sending FLOGI, clean up old rpis */ 5169 /* Start discovery by sending FLOGI, clean up old rpis */
5163 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, 5170 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
5164 nlp_listp) { 5171 nlp_listp) {
5165 if (!NLP_CHK_NODE_ACT(ndlp)) 5172 if (!NLP_CHK_NODE_ACT(ndlp))
5166 continue; 5173 continue;
5167 if (ndlp->nlp_state != NLP_STE_NPR_NODE) 5174 if (ndlp->nlp_state != NLP_STE_NPR_NODE)
5168 continue; 5175 continue;
5169 if (ndlp->nlp_type & NLP_FABRIC) { 5176 if (ndlp->nlp_type & NLP_FABRIC) {
5170 /* Clean up the ndlp on Fabric connections */ 5177 /* Clean up the ndlp on Fabric connections */
5171 lpfc_drop_node(vport, ndlp); 5178 lpfc_drop_node(vport, ndlp);
5172 5179
5173 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { 5180 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
5174 /* Fail outstanding IO now since device 5181 /* Fail outstanding IO now since device
5175 * is marked for PLOGI. 5182 * is marked for PLOGI.
5176 */ 5183 */
5177 lpfc_unreg_rpi(vport, ndlp); 5184 lpfc_unreg_rpi(vport, ndlp);
5178 } 5185 }
5179 } 5186 }
5180 if (vport->port_state != LPFC_FLOGI) { 5187 if (vport->port_state != LPFC_FLOGI) {
5181 if (phba->sli_rev <= LPFC_SLI_REV3) 5188 if (phba->sli_rev <= LPFC_SLI_REV3)
5182 lpfc_initial_flogi(vport); 5189 lpfc_initial_flogi(vport);
5183 else 5190 else
5184 lpfc_issue_init_vfi(vport); 5191 lpfc_issue_init_vfi(vport);
5185 return; 5192 return;
5186 } 5193 }
5187 break; 5194 break;
5188 5195
5189 case LPFC_FDISC: 5196 case LPFC_FDISC:
5190 case LPFC_FLOGI: 5197 case LPFC_FLOGI:
5191 /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */ 5198 /* port_state is identically LPFC_FLOGI while waiting for FLOGI cmpl */
5192 /* Initial FLOGI timeout */ 5199 /* Initial FLOGI timeout */
5193 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 5200 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
5194 "0222 Initial %s timeout\n", 5201 "0222 Initial %s timeout\n",
5195 vport->vpi ? "FDISC" : "FLOGI"); 5202 vport->vpi ? "FDISC" : "FLOGI");
5196 5203
5197 /* Assume no Fabric and go on with discovery. 5204 /* Assume no Fabric and go on with discovery.
5198 * Check for outstanding ELS FLOGI to abort. 5205 * Check for outstanding ELS FLOGI to abort.
5199 */ 5206 */
5200 5207
5201 /* FLOGI failed, so just use loop map to make discovery list */ 5208 /* FLOGI failed, so just use loop map to make discovery list */
5202 lpfc_disc_list_loopmap(vport); 5209 lpfc_disc_list_loopmap(vport);
5203 5210
5204 /* Start discovery */ 5211 /* Start discovery */
5205 lpfc_disc_start(vport); 5212 lpfc_disc_start(vport);
5206 break; 5213 break;
5207 5214
5208 case LPFC_FABRIC_CFG_LINK: 5215 case LPFC_FABRIC_CFG_LINK:
5209 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for 5216 /* hba_state is identically LPFC_FABRIC_CFG_LINK while waiting for
5210 NameServer login */ 5217 NameServer login */
5211 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 5218 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
5212 "0223 Timeout while waiting for " 5219 "0223 Timeout while waiting for "
5213 "NameServer login\n"); 5220 "NameServer login\n");
5214 /* Next look for NameServer ndlp */ 5221 /* Next look for NameServer ndlp */
5215 ndlp = lpfc_findnode_did(vport, NameServer_DID); 5222 ndlp = lpfc_findnode_did(vport, NameServer_DID);
5216 if (ndlp && NLP_CHK_NODE_ACT(ndlp)) 5223 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
5217 lpfc_els_abort(phba, ndlp); 5224 lpfc_els_abort(phba, ndlp);
5218 5225
5219 /* ReStart discovery */ 5226 /* ReStart discovery */
5220 goto restart_disc; 5227 goto restart_disc;
5221 5228
5222 case LPFC_NS_QRY: 5229 case LPFC_NS_QRY:
5223 /* Check for wait for NameServer Rsp timeout */ 5230 /* Check for wait for NameServer Rsp timeout */
5224 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 5231 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
5225 "0224 NameServer Query timeout " 5232 "0224 NameServer Query timeout "
5226 "Data: x%x x%x\n", 5233 "Data: x%x x%x\n",
5227 vport->fc_ns_retry, LPFC_MAX_NS_RETRY); 5234 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
5228 5235
5229 if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) { 5236 if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
5230 /* Try it one more time */ 5237 /* Try it one more time */
5231 vport->fc_ns_retry++; 5238 vport->fc_ns_retry++;
5232 rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 5239 rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
5233 vport->fc_ns_retry, 0); 5240 vport->fc_ns_retry, 0);
5234 if (rc == 0) 5241 if (rc == 0)
5235 break; 5242 break;
5236 } 5243 }
5237 vport->fc_ns_retry = 0; 5244 vport->fc_ns_retry = 0;
5238 5245
5239 restart_disc: 5246 restart_disc:
5240 /* 5247 /*
5241 * Discovery is over. 5248 * Discovery is over.
5242 * set port_state to PORT_READY if SLI2. 5249 * set port_state to PORT_READY if SLI2.
5243 * cmpl_reg_vpi will set port_state to READY for SLI3. 5250 * cmpl_reg_vpi will set port_state to READY for SLI3.
5244 */ 5251 */
5245 if (phba->sli_rev < LPFC_SLI_REV4) { 5252 if (phba->sli_rev < LPFC_SLI_REV4) {
5246 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 5253 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
5247 lpfc_issue_reg_vpi(phba, vport); 5254 lpfc_issue_reg_vpi(phba, vport);
5248 else { 5255 else {
5249 lpfc_issue_clear_la(phba, vport); 5256 lpfc_issue_clear_la(phba, vport);
5250 vport->port_state = LPFC_VPORT_READY; 5257 vport->port_state = LPFC_VPORT_READY;
5251 } 5258 }
5252 } 5259 }
5253 5260
5254 /* Setup and issue mailbox INITIALIZE LINK command */ 5261 /* Setup and issue mailbox INITIALIZE LINK command */
5255 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5262 initlinkmbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5256 if (!initlinkmbox) { 5263 if (!initlinkmbox) {
5257 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 5264 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
5258 "0206 Device Discovery " 5265 "0206 Device Discovery "
5259 "completion error\n"); 5266 "completion error\n");
5260 phba->link_state = LPFC_HBA_ERROR; 5267 phba->link_state = LPFC_HBA_ERROR;
5261 break; 5268 break;
5262 } 5269 }
5263 5270
5264 lpfc_linkdown(phba); 5271 lpfc_linkdown(phba);
5265 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology, 5272 lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
5266 phba->cfg_link_speed); 5273 phba->cfg_link_speed);
5267 initlinkmbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0; 5274 initlinkmbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
5268 initlinkmbox->vport = vport; 5275 initlinkmbox->vport = vport;
5269 initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 5276 initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
5270 rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT); 5277 rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT);
5271 lpfc_set_loopback_flag(phba); 5278 lpfc_set_loopback_flag(phba);
5272 if (rc == MBX_NOT_FINISHED) 5279 if (rc == MBX_NOT_FINISHED)
5273 mempool_free(initlinkmbox, phba->mbox_mem_pool); 5280 mempool_free(initlinkmbox, phba->mbox_mem_pool);
5274 5281
5275 break; 5282 break;
5276 5283
5277 case LPFC_DISC_AUTH: 5284 case LPFC_DISC_AUTH:
5278 /* Node Authentication timeout */ 5285 /* Node Authentication timeout */
5279 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 5286 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
5280 "0227 Node Authentication timeout\n"); 5287 "0227 Node Authentication timeout\n");
5281 lpfc_disc_flush_list(vport); 5288 lpfc_disc_flush_list(vport);
5282 5289
5283 /* 5290 /*
5284 * set port_state to PORT_READY if SLI2. 5291 * set port_state to PORT_READY if SLI2.
5285 * cmpl_reg_vpi will set port_state to READY for SLI3. 5292 * cmpl_reg_vpi will set port_state to READY for SLI3.
5286 */ 5293 */
5287 if (phba->sli_rev < LPFC_SLI_REV4) { 5294 if (phba->sli_rev < LPFC_SLI_REV4) {
5288 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) 5295 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
5289 lpfc_issue_reg_vpi(phba, vport); 5296 lpfc_issue_reg_vpi(phba, vport);
5290 else { /* NPIV Not enabled */ 5297 else { /* NPIV Not enabled */
5291 lpfc_issue_clear_la(phba, vport); 5298 lpfc_issue_clear_la(phba, vport);
5292 vport->port_state = LPFC_VPORT_READY; 5299 vport->port_state = LPFC_VPORT_READY;
5293 } 5300 }
5294 } 5301 }
5295 break; 5302 break;
5296 5303
5297 case LPFC_VPORT_READY: 5304 case LPFC_VPORT_READY:
5298 if (vport->fc_flag & FC_RSCN_MODE) { 5305 if (vport->fc_flag & FC_RSCN_MODE) {
5299 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 5306 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
5300 "0231 RSCN timeout Data: x%x " 5307 "0231 RSCN timeout Data: x%x "
5301 "x%x\n", 5308 "x%x\n",
5302 vport->fc_ns_retry, LPFC_MAX_NS_RETRY); 5309 vport->fc_ns_retry, LPFC_MAX_NS_RETRY);
5303 5310
5304 /* Cleanup any outstanding ELS commands */ 5311 /* Cleanup any outstanding ELS commands */
5305 lpfc_els_flush_cmd(vport); 5312 lpfc_els_flush_cmd(vport);
5306 5313
5307 lpfc_els_flush_rscn(vport); 5314 lpfc_els_flush_rscn(vport);
5308 lpfc_disc_flush_list(vport); 5315 lpfc_disc_flush_list(vport);
5309 } 5316 }
5310 break; 5317 break;
5311 5318
5312 default: 5319 default:
5313 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 5320 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
5314 "0273 Unexpected discovery timeout, " 5321 "0273 Unexpected discovery timeout, "
5315 "vport State x%x\n", vport->port_state); 5322 "vport State x%x\n", vport->port_state);
5316 break; 5323 break;
5317 } 5324 }
5318 5325
5319 switch (phba->link_state) { 5326 switch (phba->link_state) {
5320 case LPFC_CLEAR_LA: 5327 case LPFC_CLEAR_LA:
5321 /* CLEAR LA timeout */ 5328 /* CLEAR LA timeout */
5322 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 5329 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
5323 "0228 CLEAR LA timeout\n"); 5330 "0228 CLEAR LA timeout\n");
5324 clrlaerr = 1; 5331 clrlaerr = 1;
5325 break; 5332 break;
5326 5333
5327 case LPFC_LINK_UP: 5334 case LPFC_LINK_UP:
5328 lpfc_issue_clear_la(phba, vport); 5335 lpfc_issue_clear_la(phba, vport);
5329 /* Drop thru */ 5336 /* Drop thru */
5330 case LPFC_LINK_UNKNOWN: 5337 case LPFC_LINK_UNKNOWN:
5331 case LPFC_WARM_START: 5338 case LPFC_WARM_START:
5332 case LPFC_INIT_START: 5339 case LPFC_INIT_START:
5333 case LPFC_INIT_MBX_CMDS: 5340 case LPFC_INIT_MBX_CMDS:
5334 case LPFC_LINK_DOWN: 5341 case LPFC_LINK_DOWN:
5335 case LPFC_HBA_ERROR: 5342 case LPFC_HBA_ERROR:
5336 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, 5343 lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
5337 "0230 Unexpected timeout, hba link " 5344 "0230 Unexpected timeout, hba link "
5338 "state x%x\n", phba->link_state); 5345 "state x%x\n", phba->link_state);
5339 clrlaerr = 1; 5346 clrlaerr = 1;
5340 break; 5347 break;
5341 5348
5342 case LPFC_HBA_READY: 5349 case LPFC_HBA_READY:
5343 break; 5350 break;
5344 } 5351 }
5345 5352
5346 if (clrlaerr) { 5353 if (clrlaerr) {
5347 lpfc_disc_flush_list(vport); 5354 lpfc_disc_flush_list(vport);
5348 psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; 5355 psli->ring[(psli->extra_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
5349 psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; 5356 psli->ring[(psli->fcp_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
5350 psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT; 5357 psli->ring[(psli->next_ring)].flag &= ~LPFC_STOP_IOCB_EVENT;
5351 vport->port_state = LPFC_VPORT_READY; 5358 vport->port_state = LPFC_VPORT_READY;
5352 } 5359 }
5353 5360
5354 return; 5361 return;
5355 } 5362 }
5356 5363
5357 /* 5364 /*
5358 * This routine handles processing a NameServer REG_LOGIN mailbox 5365 * This routine handles processing a NameServer REG_LOGIN mailbox
5359 * command upon completion. It is setup in the LPFC_MBOXQ 5366 * command upon completion. It is setup in the LPFC_MBOXQ
5360 * as the completion routine when the command is 5367 * as the completion routine when the command is
5361 * handed off to the SLI layer. 5368 * handed off to the SLI layer.
5362 */ 5369 */
5363 void 5370 void
5364 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 5371 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
5365 { 5372 {
5366 MAILBOX_t *mb = &pmb->u.mb; 5373 MAILBOX_t *mb = &pmb->u.mb;
5367 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1); 5374 struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
5368 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2; 5375 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
5369 struct lpfc_vport *vport = pmb->vport; 5376 struct lpfc_vport *vport = pmb->vport;
5370 5377
5371 pmb->context1 = NULL; 5378 pmb->context1 = NULL;
5372 pmb->context2 = NULL; 5379 pmb->context2 = NULL;
5373 5380
5374 if (phba->sli_rev < LPFC_SLI_REV4) 5381 if (phba->sli_rev < LPFC_SLI_REV4)
5375 ndlp->nlp_rpi = mb->un.varWords[0]; 5382 ndlp->nlp_rpi = mb->un.varWords[0];
5376 ndlp->nlp_flag |= NLP_RPI_REGISTERED; 5383 ndlp->nlp_flag |= NLP_RPI_REGISTERED;
5377 ndlp->nlp_type |= NLP_FABRIC; 5384 ndlp->nlp_type |= NLP_FABRIC;
5378 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); 5385 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
5379 5386
5380 /* 5387 /*
5381 * Start issuing Fabric-Device Management Interface (FDMI) command to 5388 * Start issuing Fabric-Device Management Interface (FDMI) command to
5382 * 0xfffffa (FDMI well known port) or Delay issuing FDMI command if 5389 * 0xfffffa (FDMI well known port) or Delay issuing FDMI command if
5383 * fdmi-on=2 (supporting RPA/hostnmae) 5390 * fdmi-on=2 (supporting RPA/hostnmae)
5384 */ 5391 */
5385 5392
5386 if (vport->cfg_fdmi_on == 1) 5393 if (vport->cfg_fdmi_on == 1)
5387 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA); 5394 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
5388 else 5395 else
5389 mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60); 5396 mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60);
5390 5397
5391 /* decrement the node reference count held for this callback 5398 /* decrement the node reference count held for this callback
5392 * function. 5399 * function.
5393 */ 5400 */
5394 lpfc_nlp_put(ndlp); 5401 lpfc_nlp_put(ndlp);
5395 lpfc_mbuf_free(phba, mp->virt, mp->phys); 5402 lpfc_mbuf_free(phba, mp->virt, mp->phys);
5396 kfree(mp); 5403 kfree(mp);
5397 mempool_free(pmb, phba->mbox_mem_pool); 5404 mempool_free(pmb, phba->mbox_mem_pool);
5398 5405
5399 return; 5406 return;
5400 } 5407 }
5401 5408
5402 static int 5409 static int
5403 lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param) 5410 lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param)
5404 { 5411 {
5405 uint16_t *rpi = param; 5412 uint16_t *rpi = param;
5406 5413
5407 /* check for active node */ 5414 /* check for active node */
5408 if (!NLP_CHK_NODE_ACT(ndlp)) 5415 if (!NLP_CHK_NODE_ACT(ndlp))
5409 return 0; 5416 return 0;
5410 5417
5411 return ndlp->nlp_rpi == *rpi; 5418 return ndlp->nlp_rpi == *rpi;
5412 } 5419 }
5413 5420
5414 static int 5421 static int
5415 lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param) 5422 lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param)
5416 { 5423 {
5417 return memcmp(&ndlp->nlp_portname, param, 5424 return memcmp(&ndlp->nlp_portname, param,
5418 sizeof(ndlp->nlp_portname)) == 0; 5425 sizeof(ndlp->nlp_portname)) == 0;
5419 } 5426 }
5420 5427
5421 static struct lpfc_nodelist * 5428 static struct lpfc_nodelist *
5422 __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param) 5429 __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param)
5423 { 5430 {
5424 struct lpfc_nodelist *ndlp; 5431 struct lpfc_nodelist *ndlp;
5425 5432
5426 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 5433 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
5427 if (filter(ndlp, param)) { 5434 if (filter(ndlp, param)) {
5428 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 5435 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5429 "3185 FIND node filter %p DID " 5436 "3185 FIND node filter %p DID "
5430 "Data: x%p x%x x%x\n", 5437 "Data: x%p x%x x%x\n",
5431 filter, ndlp, ndlp->nlp_DID, 5438 filter, ndlp, ndlp->nlp_DID,
5432 ndlp->nlp_flag); 5439 ndlp->nlp_flag);
5433 return ndlp; 5440 return ndlp;
5434 } 5441 }
5435 } 5442 }
5436 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, 5443 lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE,
5437 "3186 FIND node filter %p NOT FOUND.\n", filter); 5444 "3186 FIND node filter %p NOT FOUND.\n", filter);
5438 return NULL; 5445 return NULL;
5439 } 5446 }
5440 5447
5441 /* 5448 /*
5442 * This routine looks up the ndlp lists for the given RPI. If rpi found it 5449 * This routine looks up the ndlp lists for the given RPI. If rpi found it
5443 * returns the node list element pointer else return NULL. 5450 * returns the node list element pointer else return NULL.
5444 */ 5451 */
5445 struct lpfc_nodelist * 5452 struct lpfc_nodelist *
5446 __lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi) 5453 __lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
5447 { 5454 {
5448 return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi); 5455 return __lpfc_find_node(vport, lpfc_filter_by_rpi, &rpi);
5449 } 5456 }
5450 5457
5451 /* 5458 /*
5452 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it 5459 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
5453 * returns the node element list pointer else return NULL. 5460 * returns the node element list pointer else return NULL.
5454 */ 5461 */
5455 struct lpfc_nodelist * 5462 struct lpfc_nodelist *
5456 lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn) 5463 lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn)
5457 { 5464 {
5458 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 5465 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5459 struct lpfc_nodelist *ndlp; 5466 struct lpfc_nodelist *ndlp;
5460 5467
5461 spin_lock_irq(shost->host_lock); 5468 spin_lock_irq(shost->host_lock);
5462 ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn); 5469 ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn);
5463 spin_unlock_irq(shost->host_lock); 5470 spin_unlock_irq(shost->host_lock);
5464 return ndlp; 5471 return ndlp;
5465 } 5472 }
5466 5473
5467 /* 5474 /*
5468 * This routine looks up the ndlp lists for the given RPI. If the rpi 5475 * This routine looks up the ndlp lists for the given RPI. If the rpi
5469 * is found, the routine returns the node element list pointer else 5476 * is found, the routine returns the node element list pointer else
5470 * return NULL. 5477 * return NULL.
5471 */ 5478 */
5472 struct lpfc_nodelist * 5479 struct lpfc_nodelist *
5473 lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi) 5480 lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
5474 { 5481 {
5475 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 5482 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5476 struct lpfc_nodelist *ndlp; 5483 struct lpfc_nodelist *ndlp;
5477 5484
5478 spin_lock_irq(shost->host_lock); 5485 spin_lock_irq(shost->host_lock);
5479 ndlp = __lpfc_findnode_rpi(vport, rpi); 5486 ndlp = __lpfc_findnode_rpi(vport, rpi);
5480 spin_unlock_irq(shost->host_lock); 5487 spin_unlock_irq(shost->host_lock);
5481 return ndlp; 5488 return ndlp;
5482 } 5489 }
5483 5490
5484 /** 5491 /**
5485 * lpfc_find_vport_by_vpid - Find a vport on a HBA through vport identifier 5492 * lpfc_find_vport_by_vpid - Find a vport on a HBA through vport identifier
5486 * @phba: pointer to lpfc hba data structure. 5493 * @phba: pointer to lpfc hba data structure.
5487 * @vpi: the physical host virtual N_Port identifier. 5494 * @vpi: the physical host virtual N_Port identifier.
5488 * 5495 *
5489 * This routine finds a vport on a HBA (referred by @phba) through a 5496 * This routine finds a vport on a HBA (referred by @phba) through a
5490 * @vpi. The function walks the HBA's vport list and returns the address 5497 * @vpi. The function walks the HBA's vport list and returns the address
5491 * of the vport with the matching @vpi. 5498 * of the vport with the matching @vpi.
5492 * 5499 *
5493 * Return code 5500 * Return code
5494 * NULL - No vport with the matching @vpi found 5501 * NULL - No vport with the matching @vpi found
5495 * Otherwise - Address to the vport with the matching @vpi. 5502 * Otherwise - Address to the vport with the matching @vpi.
5496 **/ 5503 **/
5497 struct lpfc_vport * 5504 struct lpfc_vport *
5498 lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi) 5505 lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
5499 { 5506 {
5500 struct lpfc_vport *vport; 5507 struct lpfc_vport *vport;
5501 unsigned long flags; 5508 unsigned long flags;
5502 int i = 0; 5509 int i = 0;
5503 5510
5504 /* The physical ports are always vpi 0 - translate is unnecessary. */ 5511 /* The physical ports are always vpi 0 - translate is unnecessary. */
5505 if (vpi > 0) { 5512 if (vpi > 0) {
5506 /* 5513 /*
5507 * Translate the physical vpi to the logical vpi. The 5514 * Translate the physical vpi to the logical vpi. The
5508 * vport stores the logical vpi. 5515 * vport stores the logical vpi.
5509 */ 5516 */
5510 for (i = 0; i < phba->max_vpi; i++) { 5517 for (i = 0; i < phba->max_vpi; i++) {
5511 if (vpi == phba->vpi_ids[i]) 5518 if (vpi == phba->vpi_ids[i])
5512 break; 5519 break;
5513 } 5520 }
5514 5521
5515 if (i >= phba->max_vpi) { 5522 if (i >= phba->max_vpi) {
5516 lpfc_printf_log(phba, KERN_ERR, LOG_ELS, 5523 lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
5517 "2936 Could not find Vport mapped " 5524 "2936 Could not find Vport mapped "
5518 "to vpi %d\n", vpi); 5525 "to vpi %d\n", vpi);
5519 return NULL; 5526 return NULL;
5520 } 5527 }
5521 } 5528 }
5522 5529
5523 spin_lock_irqsave(&phba->hbalock, flags); 5530 spin_lock_irqsave(&phba->hbalock, flags);
5524 list_for_each_entry(vport, &phba->port_list, listentry) { 5531 list_for_each_entry(vport, &phba->port_list, listentry) {
5525 if (vport->vpi == i) { 5532 if (vport->vpi == i) {
5526 spin_unlock_irqrestore(&phba->hbalock, flags); 5533 spin_unlock_irqrestore(&phba->hbalock, flags);
5527 return vport; 5534 return vport;
5528 } 5535 }
5529 } 5536 }
5530 spin_unlock_irqrestore(&phba->hbalock, flags); 5537 spin_unlock_irqrestore(&phba->hbalock, flags);
5531 return NULL; 5538 return NULL;
5532 } 5539 }
5533 5540
5534 void 5541 void
5535 lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, 5542 lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
5536 uint32_t did) 5543 uint32_t did)
5537 { 5544 {
5538 memset(ndlp, 0, sizeof (struct lpfc_nodelist)); 5545 memset(ndlp, 0, sizeof (struct lpfc_nodelist));
5539 5546
5540 lpfc_initialize_node(vport, ndlp, did); 5547 lpfc_initialize_node(vport, ndlp, did);
5541 INIT_LIST_HEAD(&ndlp->nlp_listp); 5548 INIT_LIST_HEAD(&ndlp->nlp_listp);
5542 5549
5543 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, 5550 lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE,
5544 "node init: did:x%x", 5551 "node init: did:x%x",
5545 ndlp->nlp_DID, 0, 0); 5552 ndlp->nlp_DID, 0, 0);
5546 5553
5547 return; 5554 return;
5548 } 5555 }
5549 5556
5550 /* This routine releases all resources associated with a specifc NPort's ndlp 5557 /* This routine releases all resources associated with a specifc NPort's ndlp
5551 * and mempool_free's the nodelist. 5558 * and mempool_free's the nodelist.
5552 */ 5559 */
5553 static void 5560 static void
5554 lpfc_nlp_release(struct kref *kref) 5561 lpfc_nlp_release(struct kref *kref)
5555 { 5562 {
5556 struct lpfc_hba *phba; 5563 struct lpfc_hba *phba;
5557 unsigned long flags; 5564 unsigned long flags;
5558 struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist, 5565 struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
5559 kref); 5566 kref);
5560 5567
5561 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, 5568 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
5562 "node release: did:x%x flg:x%x type:x%x", 5569 "node release: did:x%x flg:x%x type:x%x",
5563 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); 5570 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
5564 5571
5565 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, 5572 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
5566 "0279 lpfc_nlp_release: ndlp:x%p did %x " 5573 "0279 lpfc_nlp_release: ndlp:x%p did %x "
5567 "usgmap:x%x refcnt:%d\n", 5574 "usgmap:x%x refcnt:%d\n",
5568 (void *)ndlp, ndlp->nlp_DID, ndlp->nlp_usg_map, 5575 (void *)ndlp, ndlp->nlp_DID, ndlp->nlp_usg_map,
5569 atomic_read(&ndlp->kref.refcount)); 5576 atomic_read(&ndlp->kref.refcount));
5570 5577
5571 /* remove ndlp from action. */ 5578 /* remove ndlp from action. */
5572 lpfc_nlp_remove(ndlp->vport, ndlp); 5579 lpfc_nlp_remove(ndlp->vport, ndlp);
5573 5580
5574 /* clear the ndlp active flag for all release cases */ 5581 /* clear the ndlp active flag for all release cases */
5575 phba = ndlp->phba; 5582 phba = ndlp->phba;
5576 spin_lock_irqsave(&phba->ndlp_lock, flags); 5583 spin_lock_irqsave(&phba->ndlp_lock, flags);
5577 NLP_CLR_NODE_ACT(ndlp); 5584 NLP_CLR_NODE_ACT(ndlp);
5578 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 5585 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
5579 if (phba->sli_rev == LPFC_SLI_REV4) 5586 if (phba->sli_rev == LPFC_SLI_REV4)
5580 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); 5587 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
5581 5588
5582 /* free ndlp memory for final ndlp release */ 5589 /* free ndlp memory for final ndlp release */
5583 if (NLP_CHK_FREE_REQ(ndlp)) { 5590 if (NLP_CHK_FREE_REQ(ndlp)) {
5584 kfree(ndlp->lat_data); 5591 kfree(ndlp->lat_data);
5585 mempool_free(ndlp, ndlp->phba->nlp_mem_pool); 5592 mempool_free(ndlp, ndlp->phba->nlp_mem_pool);
5586 } 5593 }
5587 } 5594 }
5588 5595
5589 /* This routine bumps the reference count for a ndlp structure to ensure 5596 /* This routine bumps the reference count for a ndlp structure to ensure
5590 * that one discovery thread won't free a ndlp while another discovery thread 5597 * that one discovery thread won't free a ndlp while another discovery thread
5591 * is using it. 5598 * is using it.
5592 */ 5599 */
5593 struct lpfc_nodelist * 5600 struct lpfc_nodelist *
5594 lpfc_nlp_get(struct lpfc_nodelist *ndlp) 5601 lpfc_nlp_get(struct lpfc_nodelist *ndlp)
5595 { 5602 {
5596 struct lpfc_hba *phba; 5603 struct lpfc_hba *phba;
5597 unsigned long flags; 5604 unsigned long flags;
5598 5605
5599 if (ndlp) { 5606 if (ndlp) {
5600 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, 5607 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
5601 "node get: did:x%x flg:x%x refcnt:x%x", 5608 "node get: did:x%x flg:x%x refcnt:x%x",
5602 ndlp->nlp_DID, ndlp->nlp_flag, 5609 ndlp->nlp_DID, ndlp->nlp_flag,
5603 atomic_read(&ndlp->kref.refcount)); 5610 atomic_read(&ndlp->kref.refcount));
5604 /* The check of ndlp usage to prevent incrementing the 5611 /* The check of ndlp usage to prevent incrementing the
5605 * ndlp reference count that is in the process of being 5612 * ndlp reference count that is in the process of being
5606 * released. 5613 * released.
5607 */ 5614 */
5608 phba = ndlp->phba; 5615 phba = ndlp->phba;
5609 spin_lock_irqsave(&phba->ndlp_lock, flags); 5616 spin_lock_irqsave(&phba->ndlp_lock, flags);
5610 if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) { 5617 if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) {
5611 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 5618 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
5612 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE, 5619 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
5613 "0276 lpfc_nlp_get: ndlp:x%p " 5620 "0276 lpfc_nlp_get: ndlp:x%p "
5614 "usgmap:x%x refcnt:%d\n", 5621 "usgmap:x%x refcnt:%d\n",
5615 (void *)ndlp, ndlp->nlp_usg_map, 5622 (void *)ndlp, ndlp->nlp_usg_map,
5616 atomic_read(&ndlp->kref.refcount)); 5623 atomic_read(&ndlp->kref.refcount));
5617 return NULL; 5624 return NULL;
5618 } else 5625 } else
5619 kref_get(&ndlp->kref); 5626 kref_get(&ndlp->kref);
5620 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 5627 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
5621 } 5628 }
5622 return ndlp; 5629 return ndlp;
5623 } 5630 }
5624 5631
5625 /* This routine decrements the reference count for a ndlp structure. If the 5632 /* This routine decrements the reference count for a ndlp structure. If the
5626 * count goes to 0, this indicates the the associated nodelist should be 5633 * count goes to 0, this indicates the the associated nodelist should be
5627 * freed. Returning 1 indicates the ndlp resource has been released; on the 5634 * freed. Returning 1 indicates the ndlp resource has been released; on the
5628 * other hand, returning 0 indicates the ndlp resource has not been released 5635 * other hand, returning 0 indicates the ndlp resource has not been released
5629 * yet. 5636 * yet.
5630 */ 5637 */
5631 int 5638 int
5632 lpfc_nlp_put(struct lpfc_nodelist *ndlp) 5639 lpfc_nlp_put(struct lpfc_nodelist *ndlp)
5633 { 5640 {
5634 struct lpfc_hba *phba; 5641 struct lpfc_hba *phba;
5635 unsigned long flags; 5642 unsigned long flags;
5636 5643
5637 if (!ndlp) 5644 if (!ndlp)
5638 return 1; 5645 return 1;
5639 5646
5640 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, 5647 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
5641 "node put: did:x%x flg:x%x refcnt:x%x", 5648 "node put: did:x%x flg:x%x refcnt:x%x",
5642 ndlp->nlp_DID, ndlp->nlp_flag, 5649 ndlp->nlp_DID, ndlp->nlp_flag,
5643 atomic_read(&ndlp->kref.refcount)); 5650 atomic_read(&ndlp->kref.refcount));
5644 phba = ndlp->phba; 5651 phba = ndlp->phba;
5645 spin_lock_irqsave(&phba->ndlp_lock, flags); 5652 spin_lock_irqsave(&phba->ndlp_lock, flags);
5646 /* Check the ndlp memory free acknowledge flag to avoid the 5653 /* Check the ndlp memory free acknowledge flag to avoid the
5647 * possible race condition that kref_put got invoked again 5654 * possible race condition that kref_put got invoked again
5648 * after previous one has done ndlp memory free. 5655 * after previous one has done ndlp memory free.
5649 */ 5656 */
5650 if (NLP_CHK_FREE_ACK(ndlp)) { 5657 if (NLP_CHK_FREE_ACK(ndlp)) {
5651 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 5658 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
5652 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE, 5659 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
5653 "0274 lpfc_nlp_put: ndlp:x%p " 5660 "0274 lpfc_nlp_put: ndlp:x%p "
5654 "usgmap:x%x refcnt:%d\n", 5661 "usgmap:x%x refcnt:%d\n",
5655 (void *)ndlp, ndlp->nlp_usg_map, 5662 (void *)ndlp, ndlp->nlp_usg_map,
5656 atomic_read(&ndlp->kref.refcount)); 5663 atomic_read(&ndlp->kref.refcount));
5657 return 1; 5664 return 1;
5658 } 5665 }
5659 /* Check the ndlp inactivate log flag to avoid the possible 5666 /* Check the ndlp inactivate log flag to avoid the possible
5660 * race condition that kref_put got invoked again after ndlp 5667 * race condition that kref_put got invoked again after ndlp
5661 * is already in inactivating state. 5668 * is already in inactivating state.
5662 */ 5669 */
5663 if (NLP_CHK_IACT_REQ(ndlp)) { 5670 if (NLP_CHK_IACT_REQ(ndlp)) {
5664 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 5671 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
5665 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE, 5672 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
5666 "0275 lpfc_nlp_put: ndlp:x%p " 5673 "0275 lpfc_nlp_put: ndlp:x%p "
5667 "usgmap:x%x refcnt:%d\n", 5674 "usgmap:x%x refcnt:%d\n",
5668 (void *)ndlp, ndlp->nlp_usg_map, 5675 (void *)ndlp, ndlp->nlp_usg_map,
5669 atomic_read(&ndlp->kref.refcount)); 5676 atomic_read(&ndlp->kref.refcount));
5670 return 1; 5677 return 1;
5671 } 5678 }
5672 /* For last put, mark the ndlp usage flags to make sure no 5679 /* For last put, mark the ndlp usage flags to make sure no
5673 * other kref_get and kref_put on the same ndlp shall get 5680 * other kref_get and kref_put on the same ndlp shall get
5674 * in between the process when the final kref_put has been 5681 * in between the process when the final kref_put has been
5675 * invoked on this ndlp. 5682 * invoked on this ndlp.
5676 */ 5683 */
5677 if (atomic_read(&ndlp->kref.refcount) == 1) { 5684 if (atomic_read(&ndlp->kref.refcount) == 1) {
5678 /* Indicate ndlp is put to inactive state. */ 5685 /* Indicate ndlp is put to inactive state. */
5679 NLP_SET_IACT_REQ(ndlp); 5686 NLP_SET_IACT_REQ(ndlp);
5680 /* Acknowledge ndlp memory free has been seen. */ 5687 /* Acknowledge ndlp memory free has been seen. */
5681 if (NLP_CHK_FREE_REQ(ndlp)) 5688 if (NLP_CHK_FREE_REQ(ndlp))
5682 NLP_SET_FREE_ACK(ndlp); 5689 NLP_SET_FREE_ACK(ndlp);
5683 } 5690 }
5684 spin_unlock_irqrestore(&phba->ndlp_lock, flags); 5691 spin_unlock_irqrestore(&phba->ndlp_lock, flags);
5685 /* Note, the kref_put returns 1 when decrementing a reference 5692 /* Note, the kref_put returns 1 when decrementing a reference
5686 * count that was 1, it invokes the release callback function, 5693 * count that was 1, it invokes the release callback function,
5687 * but it still left the reference count as 1 (not actually 5694 * but it still left the reference count as 1 (not actually
5688 * performs the last decrementation). Otherwise, it actually 5695 * performs the last decrementation). Otherwise, it actually
5689 * decrements the reference count and returns 0. 5696 * decrements the reference count and returns 0.
5690 */ 5697 */
5691 return kref_put(&ndlp->kref, lpfc_nlp_release); 5698 return kref_put(&ndlp->kref, lpfc_nlp_release);
5692 } 5699 }
5693 5700
5694 /* This routine free's the specified nodelist if it is not in use 5701 /* This routine free's the specified nodelist if it is not in use
5695 * by any other discovery thread. This routine returns 1 if the 5702 * by any other discovery thread. This routine returns 1 if the
5696 * ndlp has been freed. A return value of 0 indicates the ndlp is 5703 * ndlp has been freed. A return value of 0 indicates the ndlp is
5697 * not yet been released. 5704 * not yet been released.
5698 */ 5705 */
5699 int 5706 int
5700 lpfc_nlp_not_used(struct lpfc_nodelist *ndlp) 5707 lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
5701 { 5708 {
5702 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, 5709 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
5703 "node not used: did:x%x flg:x%x refcnt:x%x", 5710 "node not used: did:x%x flg:x%x refcnt:x%x",
5704 ndlp->nlp_DID, ndlp->nlp_flag, 5711 ndlp->nlp_DID, ndlp->nlp_flag,
5705 atomic_read(&ndlp->kref.refcount)); 5712 atomic_read(&ndlp->kref.refcount));
5706 if (atomic_read(&ndlp->kref.refcount) == 1) 5713 if (atomic_read(&ndlp->kref.refcount) == 1)
5707 if (lpfc_nlp_put(ndlp)) 5714 if (lpfc_nlp_put(ndlp))
5708 return 1; 5715 return 1;
5709 return 0; 5716 return 0;
5710 } 5717 }
5711 5718
5712 /** 5719 /**
5713 * lpfc_fcf_inuse - Check if FCF can be unregistered. 5720 * lpfc_fcf_inuse - Check if FCF can be unregistered.
5714 * @phba: Pointer to hba context object. 5721 * @phba: Pointer to hba context object.
5715 * 5722 *
5716 * This function iterate through all FC nodes associated 5723 * This function iterate through all FC nodes associated
5717 * will all vports to check if there is any node with 5724 * will all vports to check if there is any node with
5718 * fc_rports associated with it. If there is an fc_rport 5725 * fc_rports associated with it. If there is an fc_rport
5719 * associated with the node, then the node is either in 5726 * associated with the node, then the node is either in
5720 * discovered state or its devloss_timer is pending. 5727 * discovered state or its devloss_timer is pending.
5721 */ 5728 */
5722 static int 5729 static int
5723 lpfc_fcf_inuse(struct lpfc_hba *phba) 5730 lpfc_fcf_inuse(struct lpfc_hba *phba)
5724 { 5731 {
5725 struct lpfc_vport **vports; 5732 struct lpfc_vport **vports;
5726 int i, ret = 0; 5733 int i, ret = 0;
5727 struct lpfc_nodelist *ndlp; 5734 struct lpfc_nodelist *ndlp;
5728 struct Scsi_Host *shost; 5735 struct Scsi_Host *shost;
5729 5736
5730 vports = lpfc_create_vport_work_array(phba); 5737 vports = lpfc_create_vport_work_array(phba);
5731 5738
5732 /* If driver cannot allocate memory, indicate fcf is in use */ 5739 /* If driver cannot allocate memory, indicate fcf is in use */
5733 if (!vports) 5740 if (!vports)
5734 return 1; 5741 return 1;
5735 5742
5736 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 5743 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
5737 shost = lpfc_shost_from_vport(vports[i]); 5744 shost = lpfc_shost_from_vport(vports[i]);
5738 spin_lock_irq(shost->host_lock); 5745 spin_lock_irq(shost->host_lock);
5739 /* 5746 /*
5740 * IF the CVL_RCVD bit is not set then we have sent the 5747 * IF the CVL_RCVD bit is not set then we have sent the
5741 * flogi. 5748 * flogi.
5742 * If dev_loss fires while we are waiting we do not want to 5749 * If dev_loss fires while we are waiting we do not want to
5743 * unreg the fcf. 5750 * unreg the fcf.
5744 */ 5751 */
5745 if (!(vports[i]->fc_flag & FC_VPORT_CVL_RCVD)) { 5752 if (!(vports[i]->fc_flag & FC_VPORT_CVL_RCVD)) {
5746 spin_unlock_irq(shost->host_lock); 5753 spin_unlock_irq(shost->host_lock);
5747 ret = 1; 5754 ret = 1;
5748 goto out; 5755 goto out;
5749 } 5756 }
5750 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) { 5757 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
5751 if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport && 5758 if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport &&
5752 (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) { 5759 (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
5753 ret = 1; 5760 ret = 1;
5754 spin_unlock_irq(shost->host_lock); 5761 spin_unlock_irq(shost->host_lock);
5755 goto out; 5762 goto out;
5756 } else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { 5763 } else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) {
5757 ret = 1; 5764 ret = 1;
5758 lpfc_printf_log(phba, KERN_INFO, LOG_ELS, 5765 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
5759 "2624 RPI %x DID %x flag %x " 5766 "2624 RPI %x DID %x flag %x "
5760 "still logged in\n", 5767 "still logged in\n",
5761 ndlp->nlp_rpi, ndlp->nlp_DID, 5768 ndlp->nlp_rpi, ndlp->nlp_DID,
5762 ndlp->nlp_flag); 5769 ndlp->nlp_flag);
5763 } 5770 }
5764 } 5771 }
5765 spin_unlock_irq(shost->host_lock); 5772 spin_unlock_irq(shost->host_lock);
5766 } 5773 }
5767 out: 5774 out:
5768 lpfc_destroy_vport_work_array(phba, vports); 5775 lpfc_destroy_vport_work_array(phba, vports);
5769 return ret; 5776 return ret;
5770 } 5777 }
5771 5778
5772 /** 5779 /**
5773 * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi. 5780 * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi.
5774 * @phba: Pointer to hba context object. 5781 * @phba: Pointer to hba context object.
5775 * @mboxq: Pointer to mailbox object. 5782 * @mboxq: Pointer to mailbox object.
5776 * 5783 *
5777 * This function frees memory associated with the mailbox command. 5784 * This function frees memory associated with the mailbox command.
5778 */ 5785 */
5779 void 5786 void
5780 lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 5787 lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
5781 { 5788 {
5782 struct lpfc_vport *vport = mboxq->vport; 5789 struct lpfc_vport *vport = mboxq->vport;
5783 struct Scsi_Host *shost = lpfc_shost_from_vport(vport); 5790 struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5784 5791
5785 if (mboxq->u.mb.mbxStatus) { 5792 if (mboxq->u.mb.mbxStatus) {
5786 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 5793 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
5787 "2555 UNREG_VFI mbxStatus error x%x " 5794 "2555 UNREG_VFI mbxStatus error x%x "
5788 "HBA state x%x\n", 5795 "HBA state x%x\n",
5789 mboxq->u.mb.mbxStatus, vport->port_state); 5796 mboxq->u.mb.mbxStatus, vport->port_state);
5790 } 5797 }
5791 spin_lock_irq(shost->host_lock); 5798 spin_lock_irq(shost->host_lock);
5792 phba->pport->fc_flag &= ~FC_VFI_REGISTERED; 5799 phba->pport->fc_flag &= ~FC_VFI_REGISTERED;
5793 spin_unlock_irq(shost->host_lock); 5800 spin_unlock_irq(shost->host_lock);
5794 mempool_free(mboxq, phba->mbox_mem_pool); 5801 mempool_free(mboxq, phba->mbox_mem_pool);
5795 return; 5802 return;
5796 } 5803 }
5797 5804
5798 /** 5805 /**
5799 * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi. 5806 * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi.
5800 * @phba: Pointer to hba context object. 5807 * @phba: Pointer to hba context object.
5801 * @mboxq: Pointer to mailbox object. 5808 * @mboxq: Pointer to mailbox object.
5802 * 5809 *
5803 * This function frees memory associated with the mailbox command. 5810 * This function frees memory associated with the mailbox command.
5804 */ 5811 */
5805 static void 5812 static void
5806 lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 5813 lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
5807 { 5814 {
5808 struct lpfc_vport *vport = mboxq->vport; 5815 struct lpfc_vport *vport = mboxq->vport;
5809 5816
5810 if (mboxq->u.mb.mbxStatus) { 5817 if (mboxq->u.mb.mbxStatus) {
5811 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 5818 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
5812 "2550 UNREG_FCFI mbxStatus error x%x " 5819 "2550 UNREG_FCFI mbxStatus error x%x "
5813 "HBA state x%x\n", 5820 "HBA state x%x\n",
5814 mboxq->u.mb.mbxStatus, vport->port_state); 5821 mboxq->u.mb.mbxStatus, vport->port_state);
5815 } 5822 }
5816 mempool_free(mboxq, phba->mbox_mem_pool); 5823 mempool_free(mboxq, phba->mbox_mem_pool);
5817 return; 5824 return;
5818 } 5825 }
5819 5826
5820 /** 5827 /**
5821 * lpfc_unregister_fcf_prep - Unregister fcf record preparation 5828 * lpfc_unregister_fcf_prep - Unregister fcf record preparation
5822 * @phba: Pointer to hba context object. 5829 * @phba: Pointer to hba context object.
5823 * 5830 *
5824 * This function prepare the HBA for unregistering the currently registered 5831 * This function prepare the HBA for unregistering the currently registered
5825 * FCF from the HBA. It performs unregistering, in order, RPIs, VPIs, and 5832 * FCF from the HBA. It performs unregistering, in order, RPIs, VPIs, and
5826 * VFIs. 5833 * VFIs.
5827 */ 5834 */
5828 int 5835 int
5829 lpfc_unregister_fcf_prep(struct lpfc_hba *phba) 5836 lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
5830 { 5837 {
5831 struct lpfc_vport **vports; 5838 struct lpfc_vport **vports;
5832 struct lpfc_nodelist *ndlp; 5839 struct lpfc_nodelist *ndlp;
5833 struct Scsi_Host *shost; 5840 struct Scsi_Host *shost;
5834 int i, rc; 5841 int i, rc;
5835 5842
5836 /* Unregister RPIs */ 5843 /* Unregister RPIs */
5837 if (lpfc_fcf_inuse(phba)) 5844 if (lpfc_fcf_inuse(phba))
5838 lpfc_unreg_hba_rpis(phba); 5845 lpfc_unreg_hba_rpis(phba);
5839 5846
5840 /* At this point, all discovery is aborted */ 5847 /* At this point, all discovery is aborted */
5841 phba->pport->port_state = LPFC_VPORT_UNKNOWN; 5848 phba->pport->port_state = LPFC_VPORT_UNKNOWN;
5842 5849
5843 /* Unregister VPIs */ 5850 /* Unregister VPIs */
5844 vports = lpfc_create_vport_work_array(phba); 5851 vports = lpfc_create_vport_work_array(phba);
5845 if (vports && (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) 5852 if (vports && (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))
5846 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { 5853 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
5847 /* Stop FLOGI/FDISC retries */ 5854 /* Stop FLOGI/FDISC retries */
5848 ndlp = lpfc_findnode_did(vports[i], Fabric_DID); 5855 ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
5849 if (ndlp) 5856 if (ndlp)
5850 lpfc_cancel_retry_delay_tmo(vports[i], ndlp); 5857 lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
5851 lpfc_cleanup_pending_mbox(vports[i]); 5858 lpfc_cleanup_pending_mbox(vports[i]);
5852 if (phba->sli_rev == LPFC_SLI_REV4) 5859 if (phba->sli_rev == LPFC_SLI_REV4)
5853 lpfc_sli4_unreg_all_rpis(vports[i]); 5860 lpfc_sli4_unreg_all_rpis(vports[i]);
5854 lpfc_mbx_unreg_vpi(vports[i]); 5861 lpfc_mbx_unreg_vpi(vports[i]);
5855 shost = lpfc_shost_from_vport(vports[i]); 5862 shost = lpfc_shost_from_vport(vports[i]);
5856 spin_lock_irq(shost->host_lock); 5863 spin_lock_irq(shost->host_lock);
5857 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; 5864 vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
5858 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED; 5865 vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
5859 spin_unlock_irq(shost->host_lock); 5866 spin_unlock_irq(shost->host_lock);
5860 } 5867 }
5861 lpfc_destroy_vport_work_array(phba, vports); 5868 lpfc_destroy_vport_work_array(phba, vports);
5862 5869
5863 /* Cleanup any outstanding ELS commands */ 5870 /* Cleanup any outstanding ELS commands */
5864 lpfc_els_flush_all_cmd(phba); 5871 lpfc_els_flush_all_cmd(phba);
5865 5872
5866 /* Unregister the physical port VFI */ 5873 /* Unregister the physical port VFI */
5867 rc = lpfc_issue_unreg_vfi(phba->pport); 5874 rc = lpfc_issue_unreg_vfi(phba->pport);
5868 return rc; 5875 return rc;
5869 } 5876 }
5870 5877
5871 /** 5878 /**
5872 * lpfc_sli4_unregister_fcf - Unregister currently registered FCF record 5879 * lpfc_sli4_unregister_fcf - Unregister currently registered FCF record
5873 * @phba: Pointer to hba context object. 5880 * @phba: Pointer to hba context object.
5874 * 5881 *
5875 * This function issues synchronous unregister FCF mailbox command to HBA to 5882 * This function issues synchronous unregister FCF mailbox command to HBA to
5876 * unregister the currently registered FCF record. The driver does not reset 5883 * unregister the currently registered FCF record. The driver does not reset
5877 * the driver FCF usage state flags. 5884 * the driver FCF usage state flags.
5878 * 5885 *
5879 * Return 0 if successfully issued, none-zero otherwise. 5886 * Return 0 if successfully issued, none-zero otherwise.
5880 */ 5887 */
5881 int 5888 int
5882 lpfc_sli4_unregister_fcf(struct lpfc_hba *phba) 5889 lpfc_sli4_unregister_fcf(struct lpfc_hba *phba)
5883 { 5890 {
5884 LPFC_MBOXQ_t *mbox; 5891 LPFC_MBOXQ_t *mbox;
5885 int rc; 5892 int rc;
5886 5893
5887 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 5894 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5888 if (!mbox) { 5895 if (!mbox) {
5889 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 5896 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
5890 "2551 UNREG_FCFI mbox allocation failed" 5897 "2551 UNREG_FCFI mbox allocation failed"
5891 "HBA state x%x\n", phba->pport->port_state); 5898 "HBA state x%x\n", phba->pport->port_state);
5892 return -ENOMEM; 5899 return -ENOMEM;
5893 } 5900 }
5894 lpfc_unreg_fcfi(mbox, phba->fcf.fcfi); 5901 lpfc_unreg_fcfi(mbox, phba->fcf.fcfi);
5895 mbox->vport = phba->pport; 5902 mbox->vport = phba->pport;
5896 mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl; 5903 mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl;
5897 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 5904 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
5898 5905
5899 if (rc == MBX_NOT_FINISHED) { 5906 if (rc == MBX_NOT_FINISHED) {
5900 lpfc_printf_log(phba, KERN_ERR, LOG_SLI, 5907 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
5901 "2552 Unregister FCFI command failed rc x%x " 5908 "2552 Unregister FCFI command failed rc x%x "
5902 "HBA state x%x\n", 5909 "HBA state x%x\n",
5903 rc, phba->pport->port_state); 5910 rc, phba->pport->port_state);
5904 return -EINVAL; 5911 return -EINVAL;
5905 } 5912 }
5906 return 0; 5913 return 0;
5907 } 5914 }
5908 5915
5909 /** 5916 /**
5910 * lpfc_unregister_fcf_rescan - Unregister currently registered fcf and rescan 5917 * lpfc_unregister_fcf_rescan - Unregister currently registered fcf and rescan
5911 * @phba: Pointer to hba context object. 5918 * @phba: Pointer to hba context object.
5912 * 5919 *
5913 * This function unregisters the currently reigstered FCF. This function 5920 * This function unregisters the currently reigstered FCF. This function
5914 * also tries to find another FCF for discovery by rescan the HBA FCF table. 5921 * also tries to find another FCF for discovery by rescan the HBA FCF table.
5915 */ 5922 */
5916 void 5923 void
5917 lpfc_unregister_fcf_rescan(struct lpfc_hba *phba) 5924 lpfc_unregister_fcf_rescan(struct lpfc_hba *phba)
5918 { 5925 {
5919 int rc; 5926 int rc;
5920 5927
5921 /* Preparation for unregistering fcf */ 5928 /* Preparation for unregistering fcf */
5922 rc = lpfc_unregister_fcf_prep(phba); 5929 rc = lpfc_unregister_fcf_prep(phba);
5923 if (rc) { 5930 if (rc) {
5924 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 5931 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
5925 "2748 Failed to prepare for unregistering " 5932 "2748 Failed to prepare for unregistering "
5926 "HBA's FCF record: rc=%d\n", rc); 5933 "HBA's FCF record: rc=%d\n", rc);
5927 return; 5934 return;
5928 } 5935 }
5929 5936
5930 /* Now, unregister FCF record and reset HBA FCF state */ 5937 /* Now, unregister FCF record and reset HBA FCF state */
5931 rc = lpfc_sli4_unregister_fcf(phba); 5938 rc = lpfc_sli4_unregister_fcf(phba);
5932 if (rc) 5939 if (rc)
5933 return; 5940 return;
5934 /* Reset HBA FCF states after successful unregister FCF */ 5941 /* Reset HBA FCF states after successful unregister FCF */
5935 phba->fcf.fcf_flag = 0; 5942 phba->fcf.fcf_flag = 0;
5936 phba->fcf.current_rec.flag = 0; 5943 phba->fcf.current_rec.flag = 0;
5937 5944
5938 /* 5945 /*
5939 * If driver is not unloading, check if there is any other 5946 * If driver is not unloading, check if there is any other
5940 * FCF record that can be used for discovery. 5947 * FCF record that can be used for discovery.
5941 */ 5948 */
5942 if ((phba->pport->load_flag & FC_UNLOADING) || 5949 if ((phba->pport->load_flag & FC_UNLOADING) ||
5943 (phba->link_state < LPFC_LINK_UP)) 5950 (phba->link_state < LPFC_LINK_UP))
5944 return; 5951 return;
5945 5952
5946 /* This is considered as the initial FCF discovery scan */ 5953 /* This is considered as the initial FCF discovery scan */
5947 spin_lock_irq(&phba->hbalock); 5954 spin_lock_irq(&phba->hbalock);
5948 phba->fcf.fcf_flag |= FCF_INIT_DISC; 5955 phba->fcf.fcf_flag |= FCF_INIT_DISC;
5949 spin_unlock_irq(&phba->hbalock); 5956 spin_unlock_irq(&phba->hbalock);
5950 5957
5951 /* Reset FCF roundrobin bmask for new discovery */ 5958 /* Reset FCF roundrobin bmask for new discovery */
5952 lpfc_sli4_clear_fcf_rr_bmask(phba); 5959 lpfc_sli4_clear_fcf_rr_bmask(phba);
5953 5960
5954 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); 5961 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
5955 5962
5956 if (rc) { 5963 if (rc) {
5957 spin_lock_irq(&phba->hbalock); 5964 spin_lock_irq(&phba->hbalock);
5958 phba->fcf.fcf_flag &= ~FCF_INIT_DISC; 5965 phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
5959 spin_unlock_irq(&phba->hbalock); 5966 spin_unlock_irq(&phba->hbalock);
5960 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX, 5967 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
5961 "2553 lpfc_unregister_unused_fcf failed " 5968 "2553 lpfc_unregister_unused_fcf failed "
5962 "to read FCF record HBA state x%x\n", 5969 "to read FCF record HBA state x%x\n",
5963 phba->pport->port_state); 5970 phba->pport->port_state);
5964 } 5971 }
5965 } 5972 }
5966 5973
5967 /** 5974 /**
5968 * lpfc_unregister_fcf - Unregister the currently registered fcf record 5975 * lpfc_unregister_fcf - Unregister the currently registered fcf record
5969 * @phba: Pointer to hba context object. 5976 * @phba: Pointer to hba context object.
5970 * 5977 *
5971 * This function just unregisters the currently reigstered FCF. It does not 5978 * This function just unregisters the currently reigstered FCF. It does not
5972 * try to find another FCF for discovery. 5979 * try to find another FCF for discovery.
5973 */ 5980 */
5974 void 5981 void
5975 lpfc_unregister_fcf(struct lpfc_hba *phba) 5982 lpfc_unregister_fcf(struct lpfc_hba *phba)
5976 { 5983 {
5977 int rc; 5984 int rc;
5978 5985
5979 /* Preparation for unregistering fcf */ 5986 /* Preparation for unregistering fcf */
5980 rc = lpfc_unregister_fcf_prep(phba); 5987 rc = lpfc_unregister_fcf_prep(phba);
5981 if (rc) { 5988 if (rc) {
5982 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, 5989 lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
5983 "2749 Failed to prepare for unregistering " 5990 "2749 Failed to prepare for unregistering "
5984 "HBA's FCF record: rc=%d\n", rc); 5991 "HBA's FCF record: rc=%d\n", rc);
5985 return; 5992 return;
5986 } 5993 }
5987 5994
5988 /* Now, unregister FCF record and reset HBA FCF state */ 5995 /* Now, unregister FCF record and reset HBA FCF state */
5989 rc = lpfc_sli4_unregister_fcf(phba); 5996 rc = lpfc_sli4_unregister_fcf(phba);
5990 if (rc) 5997 if (rc)
5991 return; 5998 return;
5992 /* Set proper HBA FCF states after successful unregister FCF */ 5999 /* Set proper HBA FCF states after successful unregister FCF */
5993 spin_lock_irq(&phba->hbalock); 6000 spin_lock_irq(&phba->hbalock);
5994 phba->fcf.fcf_flag &= ~FCF_REGISTERED; 6001 phba->fcf.fcf_flag &= ~FCF_REGISTERED;
5995 spin_unlock_irq(&phba->hbalock); 6002 spin_unlock_irq(&phba->hbalock);
5996 } 6003 }
5997 6004
5998 /** 6005 /**
5999 * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected. 6006 * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected.
6000 * @phba: Pointer to hba context object. 6007 * @phba: Pointer to hba context object.
6001 * 6008 *
6002 * This function check if there are any connected remote port for the FCF and 6009 * This function check if there are any connected remote port for the FCF and
6003 * if all the devices are disconnected, this function unregister FCFI. 6010 * if all the devices are disconnected, this function unregister FCFI.
6004 * This function also tries to use another FCF for discovery. 6011 * This function also tries to use another FCF for discovery.
6005 */ 6012 */
6006 void 6013 void
6007 lpfc_unregister_unused_fcf(struct lpfc_hba *phba) 6014 lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
6008 { 6015 {
6009 /* 6016 /*
6010 * If HBA is not running in FIP mode, if HBA does not support 6017 * If HBA is not running in FIP mode, if HBA does not support
6011 * FCoE, if FCF discovery is ongoing, or if FCF has not been 6018 * FCoE, if FCF discovery is ongoing, or if FCF has not been
6012 * registered, do nothing. 6019 * registered, do nothing.
6013 */ 6020 */
6014 spin_lock_irq(&phba->hbalock); 6021 spin_lock_irq(&phba->hbalock);
6015 if (!(phba->hba_flag & HBA_FCOE_MODE) || 6022 if (!(phba->hba_flag & HBA_FCOE_MODE) ||
6016 !(phba->fcf.fcf_flag & FCF_REGISTERED) || 6023 !(phba->fcf.fcf_flag & FCF_REGISTERED) ||
6017 !(phba->hba_flag & HBA_FIP_SUPPORT) || 6024 !(phba->hba_flag & HBA_FIP_SUPPORT) ||
6018 (phba->fcf.fcf_flag & FCF_DISCOVERY) || 6025 (phba->fcf.fcf_flag & FCF_DISCOVERY) ||
6019 (phba->pport->port_state == LPFC_FLOGI)) { 6026 (phba->pport->port_state == LPFC_FLOGI)) {
6020 spin_unlock_irq(&phba->hbalock); 6027 spin_unlock_irq(&phba->hbalock);
6021 return; 6028 return;
6022 } 6029 }
6023 spin_unlock_irq(&phba->hbalock); 6030 spin_unlock_irq(&phba->hbalock);
6024 6031
6025 if (lpfc_fcf_inuse(phba)) 6032 if (lpfc_fcf_inuse(phba))
6026 return; 6033 return;
6027 6034
6028 lpfc_unregister_fcf_rescan(phba); 6035 lpfc_unregister_fcf_rescan(phba);
6029 } 6036 }
6030 6037
6031 /** 6038 /**
6032 * lpfc_read_fcf_conn_tbl - Create driver FCF connection table. 6039 * lpfc_read_fcf_conn_tbl - Create driver FCF connection table.
6033 * @phba: Pointer to hba context object. 6040 * @phba: Pointer to hba context object.
6034 * @buff: Buffer containing the FCF connection table as in the config 6041 * @buff: Buffer containing the FCF connection table as in the config
6035 * region. 6042 * region.
6036 * This function create driver data structure for the FCF connection 6043 * This function create driver data structure for the FCF connection
6037 * record table read from config region 23. 6044 * record table read from config region 23.
6038 */ 6045 */
6039 static void 6046 static void
6040 lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba, 6047 lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba,
6041 uint8_t *buff) 6048 uint8_t *buff)
6042 { 6049 {
6043 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; 6050 struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
6044 struct lpfc_fcf_conn_hdr *conn_hdr; 6051 struct lpfc_fcf_conn_hdr *conn_hdr;
6045 struct lpfc_fcf_conn_rec *conn_rec; 6052 struct lpfc_fcf_conn_rec *conn_rec;
6046 uint32_t record_count; 6053 uint32_t record_count;
6047 int i; 6054 int i;
6048 6055
6049 /* Free the current connect table */ 6056 /* Free the current connect table */
6050 list_for_each_entry_safe(conn_entry, next_conn_entry, 6057 list_for_each_entry_safe(conn_entry, next_conn_entry,
6051 &phba->fcf_conn_rec_list, list) { 6058 &phba->fcf_conn_rec_list, list) {
6052 list_del_init(&conn_entry->list); 6059 list_del_init(&conn_entry->list);
6053 kfree(conn_entry); 6060 kfree(conn_entry);
6054 } 6061 }
6055 6062
6056 conn_hdr = (struct lpfc_fcf_conn_hdr *) buff; 6063 conn_hdr = (struct lpfc_fcf_conn_hdr *) buff;
6057 record_count = conn_hdr->length * sizeof(uint32_t)/ 6064 record_count = conn_hdr->length * sizeof(uint32_t)/
6058 sizeof(struct lpfc_fcf_conn_rec); 6065 sizeof(struct lpfc_fcf_conn_rec);
6059 6066
6060 conn_rec = (struct lpfc_fcf_conn_rec *) 6067 conn_rec = (struct lpfc_fcf_conn_rec *)
6061 (buff + sizeof(struct lpfc_fcf_conn_hdr)); 6068 (buff + sizeof(struct lpfc_fcf_conn_hdr));
6062 6069
6063 for (i = 0; i < record_count; i++) { 6070 for (i = 0; i < record_count; i++) {
6064 if (!(conn_rec[i].flags & FCFCNCT_VALID)) 6071 if (!(conn_rec[i].flags & FCFCNCT_VALID))
6065 continue; 6072 continue;
6066 conn_entry = kzalloc(sizeof(struct lpfc_fcf_conn_entry), 6073 conn_entry = kzalloc(sizeof(struct lpfc_fcf_conn_entry),
6067 GFP_KERNEL); 6074 GFP_KERNEL);
6068 if (!conn_entry) { 6075 if (!conn_entry) {
6069 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6076 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6070 "2566 Failed to allocate connection" 6077 "2566 Failed to allocate connection"
6071 " table entry\n"); 6078 " table entry\n");
6072 return; 6079 return;
6073 } 6080 }
6074 6081
6075 memcpy(&conn_entry->conn_rec, &conn_rec[i], 6082 memcpy(&conn_entry->conn_rec, &conn_rec[i],
6076 sizeof(struct lpfc_fcf_conn_rec)); 6083 sizeof(struct lpfc_fcf_conn_rec));
6077 conn_entry->conn_rec.vlan_tag = 6084 conn_entry->conn_rec.vlan_tag =
6078 le16_to_cpu(conn_entry->conn_rec.vlan_tag) & 0xFFF; 6085 le16_to_cpu(conn_entry->conn_rec.vlan_tag) & 0xFFF;
6079 conn_entry->conn_rec.flags = 6086 conn_entry->conn_rec.flags =
6080 le16_to_cpu(conn_entry->conn_rec.flags); 6087 le16_to_cpu(conn_entry->conn_rec.flags);
6081 list_add_tail(&conn_entry->list, 6088 list_add_tail(&conn_entry->list,
6082 &phba->fcf_conn_rec_list); 6089 &phba->fcf_conn_rec_list);
6083 } 6090 }
6084 } 6091 }
6085 6092
6086 /** 6093 /**
6087 * lpfc_read_fcoe_param - Read FCoe parameters from conf region.. 6094 * lpfc_read_fcoe_param - Read FCoe parameters from conf region..
6088 * @phba: Pointer to hba context object. 6095 * @phba: Pointer to hba context object.
6089 * @buff: Buffer containing the FCoE parameter data structure. 6096 * @buff: Buffer containing the FCoE parameter data structure.
6090 * 6097 *
6091 * This function update driver data structure with config 6098 * This function update driver data structure with config
6092 * parameters read from config region 23. 6099 * parameters read from config region 23.
6093 */ 6100 */
6094 static void 6101 static void
6095 lpfc_read_fcoe_param(struct lpfc_hba *phba, 6102 lpfc_read_fcoe_param(struct lpfc_hba *phba,
6096 uint8_t *buff) 6103 uint8_t *buff)
6097 { 6104 {
6098 struct lpfc_fip_param_hdr *fcoe_param_hdr; 6105 struct lpfc_fip_param_hdr *fcoe_param_hdr;
6099 struct lpfc_fcoe_params *fcoe_param; 6106 struct lpfc_fcoe_params *fcoe_param;
6100 6107
6101 fcoe_param_hdr = (struct lpfc_fip_param_hdr *) 6108 fcoe_param_hdr = (struct lpfc_fip_param_hdr *)
6102 buff; 6109 buff;
6103 fcoe_param = (struct lpfc_fcoe_params *) 6110 fcoe_param = (struct lpfc_fcoe_params *)
6104 (buff + sizeof(struct lpfc_fip_param_hdr)); 6111 (buff + sizeof(struct lpfc_fip_param_hdr));
6105 6112
6106 if ((fcoe_param_hdr->parm_version != FIPP_VERSION) || 6113 if ((fcoe_param_hdr->parm_version != FIPP_VERSION) ||
6107 (fcoe_param_hdr->length != FCOE_PARAM_LENGTH)) 6114 (fcoe_param_hdr->length != FCOE_PARAM_LENGTH))
6108 return; 6115 return;
6109 6116
6110 if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) { 6117 if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) {
6111 phba->valid_vlan = 1; 6118 phba->valid_vlan = 1;
6112 phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) & 6119 phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) &
6113 0xFFF; 6120 0xFFF;
6114 } 6121 }
6115 6122
6116 phba->fc_map[0] = fcoe_param->fc_map[0]; 6123 phba->fc_map[0] = fcoe_param->fc_map[0];
6117 phba->fc_map[1] = fcoe_param->fc_map[1]; 6124 phba->fc_map[1] = fcoe_param->fc_map[1];
6118 phba->fc_map[2] = fcoe_param->fc_map[2]; 6125 phba->fc_map[2] = fcoe_param->fc_map[2];
6119 return; 6126 return;
6120 } 6127 }
6121 6128
6122 /** 6129 /**
6123 * lpfc_get_rec_conf23 - Get a record type in config region data. 6130 * lpfc_get_rec_conf23 - Get a record type in config region data.
6124 * @buff: Buffer containing config region 23 data. 6131 * @buff: Buffer containing config region 23 data.
6125 * @size: Size of the data buffer. 6132 * @size: Size of the data buffer.
6126 * @rec_type: Record type to be searched. 6133 * @rec_type: Record type to be searched.
6127 * 6134 *
6128 * This function searches config region data to find the beginning 6135 * This function searches config region data to find the beginning
6129 * of the record specified by record_type. If record found, this 6136 * of the record specified by record_type. If record found, this
6130 * function return pointer to the record else return NULL. 6137 * function return pointer to the record else return NULL.
6131 */ 6138 */
6132 static uint8_t * 6139 static uint8_t *
6133 lpfc_get_rec_conf23(uint8_t *buff, uint32_t size, uint8_t rec_type) 6140 lpfc_get_rec_conf23(uint8_t *buff, uint32_t size, uint8_t rec_type)
6134 { 6141 {
6135 uint32_t offset = 0, rec_length; 6142 uint32_t offset = 0, rec_length;
6136 6143
6137 if ((buff[0] == LPFC_REGION23_LAST_REC) || 6144 if ((buff[0] == LPFC_REGION23_LAST_REC) ||
6138 (size < sizeof(uint32_t))) 6145 (size < sizeof(uint32_t)))
6139 return NULL; 6146 return NULL;
6140 6147
6141 rec_length = buff[offset + 1]; 6148 rec_length = buff[offset + 1];
6142 6149
6143 /* 6150 /*
6144 * One TLV record has one word header and number of data words 6151 * One TLV record has one word header and number of data words
6145 * specified in the rec_length field of the record header. 6152 * specified in the rec_length field of the record header.
6146 */ 6153 */
6147 while ((offset + rec_length * sizeof(uint32_t) + sizeof(uint32_t)) 6154 while ((offset + rec_length * sizeof(uint32_t) + sizeof(uint32_t))
6148 <= size) { 6155 <= size) {
6149 if (buff[offset] == rec_type) 6156 if (buff[offset] == rec_type)
6150 return &buff[offset]; 6157 return &buff[offset];
6151 6158
6152 if (buff[offset] == LPFC_REGION23_LAST_REC) 6159 if (buff[offset] == LPFC_REGION23_LAST_REC)
6153 return NULL; 6160 return NULL;
6154 6161
6155 offset += rec_length * sizeof(uint32_t) + sizeof(uint32_t); 6162 offset += rec_length * sizeof(uint32_t) + sizeof(uint32_t);
6156 rec_length = buff[offset + 1]; 6163 rec_length = buff[offset + 1];
6157 } 6164 }
6158 return NULL; 6165 return NULL;
6159 } 6166 }
6160 6167
6161 /** 6168 /**
6162 * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23. 6169 * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23.
6163 * @phba: Pointer to lpfc_hba data structure. 6170 * @phba: Pointer to lpfc_hba data structure.
6164 * @buff: Buffer containing config region 23 data. 6171 * @buff: Buffer containing config region 23 data.
6165 * @size: Size of the data buffer. 6172 * @size: Size of the data buffer.
6166 * 6173 *
6167 * This function parses the FCoE config parameters in config region 23 and 6174 * This function parses the FCoE config parameters in config region 23 and
6168 * populate driver data structure with the parameters. 6175 * populate driver data structure with the parameters.
6169 */ 6176 */
6170 void 6177 void
6171 lpfc_parse_fcoe_conf(struct lpfc_hba *phba, 6178 lpfc_parse_fcoe_conf(struct lpfc_hba *phba,
6172 uint8_t *buff, 6179 uint8_t *buff,
6173 uint32_t size) 6180 uint32_t size)
6174 { 6181 {
6175 uint32_t offset = 0, rec_length; 6182 uint32_t offset = 0, rec_length;
6176 uint8_t *rec_ptr; 6183 uint8_t *rec_ptr;
6177 6184
6178 /* 6185 /*
6179 * If data size is less than 2 words signature and version cannot be 6186 * If data size is less than 2 words signature and version cannot be
6180 * verified. 6187 * verified.
6181 */ 6188 */
6182 if (size < 2*sizeof(uint32_t)) 6189 if (size < 2*sizeof(uint32_t))
6183 return; 6190 return;
6184 6191
6185 /* Check the region signature first */ 6192 /* Check the region signature first */
6186 if (memcmp(buff, LPFC_REGION23_SIGNATURE, 4)) { 6193 if (memcmp(buff, LPFC_REGION23_SIGNATURE, 4)) {
6187 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6194 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6188 "2567 Config region 23 has bad signature\n"); 6195 "2567 Config region 23 has bad signature\n");
6189 return; 6196 return;
6190 } 6197 }
6191 6198
6192 offset += 4; 6199 offset += 4;
6193 6200
6194 /* Check the data structure version */ 6201 /* Check the data structure version */
6195 if (buff[offset] != LPFC_REGION23_VERSION) { 6202 if (buff[offset] != LPFC_REGION23_VERSION) {
6196 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 6203 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
6197 "2568 Config region 23 has bad version\n"); 6204 "2568 Config region 23 has bad version\n");
6198 return; 6205 return;
6199 } 6206 }
6200 offset += 4; 6207 offset += 4;
6201 6208
6202 rec_length = buff[offset + 1]; 6209 rec_length = buff[offset + 1];
6203 6210
6204 /* Read FCoE param record */ 6211 /* Read FCoE param record */
6205 rec_ptr = lpfc_get_rec_conf23(&buff[offset], 6212 rec_ptr = lpfc_get_rec_conf23(&buff[offset],
6206 size - offset, FCOE_PARAM_TYPE); 6213 size - offset, FCOE_PARAM_TYPE);
6207 if (rec_ptr) 6214 if (rec_ptr)
6208 lpfc_read_fcoe_param(phba, rec_ptr); 6215 lpfc_read_fcoe_param(phba, rec_ptr);
6209 6216
6210 /* Read FCF connection table */ 6217 /* Read FCF connection table */
6211 rec_ptr = lpfc_get_rec_conf23(&buff[offset], 6218 rec_ptr = lpfc_get_rec_conf23(&buff[offset],
6212 size - offset, FCOE_CONN_TBL_TYPE); 6219 size - offset, FCOE_CONN_TBL_TYPE);
6213 if (rec_ptr) 6220 if (rec_ptr)
6214 lpfc_read_fcf_conn_tbl(phba, rec_ptr); 6221 lpfc_read_fcf_conn_tbl(phba, rec_ptr);
6215 6222
6216 } 6223 }
6217 6224
drivers/scsi/lpfc/lpfc_mbox.c
1 /******************************************************************* 1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for * 2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. * 3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2009 Emulex. All rights reserved. * 4 * Copyright (C) 2004-2009 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. * 5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com * 6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig * 7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
8 * * 8 * *
9 * This program is free software; you can redistribute it and/or * 9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General * 10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. * 11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. * 12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for * 17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING * 18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. * 19 * included with this package. *
20 *******************************************************************/ 20 *******************************************************************/
21 21
22 #include <linux/blkdev.h> 22 #include <linux/blkdev.h>
23 #include <linux/pci.h> 23 #include <linux/pci.h>
24 #include <linux/slab.h> 24 #include <linux/slab.h>
25 #include <linux/interrupt.h> 25 #include <linux/interrupt.h>
26 26
27 #include <scsi/scsi_device.h> 27 #include <scsi/scsi_device.h>
28 #include <scsi/scsi_transport_fc.h> 28 #include <scsi/scsi_transport_fc.h>
29 #include <scsi/scsi.h> 29 #include <scsi/scsi.h>
30 #include <scsi/fc/fc_fs.h> 30 #include <scsi/fc/fc_fs.h>
31 31
32 #include "lpfc_hw4.h" 32 #include "lpfc_hw4.h"
33 #include "lpfc_hw.h" 33 #include "lpfc_hw.h"
34 #include "lpfc_sli.h" 34 #include "lpfc_sli.h"
35 #include "lpfc_sli4.h" 35 #include "lpfc_sli4.h"
36 #include "lpfc_nl.h" 36 #include "lpfc_nl.h"
37 #include "lpfc_disc.h" 37 #include "lpfc_disc.h"
38 #include "lpfc_scsi.h" 38 #include "lpfc_scsi.h"
39 #include "lpfc.h" 39 #include "lpfc.h"
40 #include "lpfc_logmsg.h" 40 #include "lpfc_logmsg.h"
41 #include "lpfc_crtn.h" 41 #include "lpfc_crtn.h"
42 #include "lpfc_compat.h" 42 #include "lpfc_compat.h"
43 43
44 /** 44 /**
45 * lpfc_dump_static_vport - Dump HBA's static vport information. 45 * lpfc_dump_static_vport - Dump HBA's static vport information.
46 * @phba: pointer to lpfc hba data structure. 46 * @phba: pointer to lpfc hba data structure.
47 * @pmb: pointer to the driver internal queue element for mailbox command. 47 * @pmb: pointer to the driver internal queue element for mailbox command.
48 * @offset: offset for dumping vport info. 48 * @offset: offset for dumping vport info.
49 * 49 *
50 * The dump mailbox command provides a method for the device driver to obtain 50 * The dump mailbox command provides a method for the device driver to obtain
51 * various types of information from the HBA device. 51 * various types of information from the HBA device.
52 * 52 *
53 * This routine prepares the mailbox command for dumping list of static 53 * This routine prepares the mailbox command for dumping list of static
54 * vports to be created. 54 * vports to be created.
55 **/ 55 **/
56 int 56 int
57 lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, 57 lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
58 uint16_t offset) 58 uint16_t offset)
59 { 59 {
60 MAILBOX_t *mb; 60 MAILBOX_t *mb;
61 struct lpfc_dmabuf *mp; 61 struct lpfc_dmabuf *mp;
62 62
63 mb = &pmb->u.mb; 63 mb = &pmb->u.mb;
64 64
65 /* Setup to dump vport info region */ 65 /* Setup to dump vport info region */
66 memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); 66 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
67 mb->mbxCommand = MBX_DUMP_MEMORY; 67 mb->mbxCommand = MBX_DUMP_MEMORY;
68 mb->un.varDmp.type = DMP_NV_PARAMS; 68 mb->un.varDmp.type = DMP_NV_PARAMS;
69 mb->un.varDmp.entry_index = offset; 69 mb->un.varDmp.entry_index = offset;
70 mb->un.varDmp.region_id = DMP_REGION_VPORT; 70 mb->un.varDmp.region_id = DMP_REGION_VPORT;
71 mb->mbxOwner = OWN_HOST; 71 mb->mbxOwner = OWN_HOST;
72 72
73 /* For SLI3 HBAs data is embedded in mailbox */ 73 /* For SLI3 HBAs data is embedded in mailbox */
74 if (phba->sli_rev != LPFC_SLI_REV4) { 74 if (phba->sli_rev != LPFC_SLI_REV4) {
75 mb->un.varDmp.cv = 1; 75 mb->un.varDmp.cv = 1;
76 mb->un.varDmp.word_cnt = DMP_RSP_SIZE/sizeof(uint32_t); 76 mb->un.varDmp.word_cnt = DMP_RSP_SIZE/sizeof(uint32_t);
77 return 0; 77 return 0;
78 } 78 }
79 79
80 /* For SLI4 HBAs driver need to allocate memory */ 80 /* For SLI4 HBAs driver need to allocate memory */
81 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 81 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
82 if (mp) 82 if (mp)
83 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 83 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
84 84
85 if (!mp || !mp->virt) { 85 if (!mp || !mp->virt) {
86 kfree(mp); 86 kfree(mp);
87 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 87 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
88 "2605 lpfc_dump_static_vport: memory" 88 "2605 lpfc_dump_static_vport: memory"
89 " allocation failed\n"); 89 " allocation failed\n");
90 return 1; 90 return 1;
91 } 91 }
92 memset(mp->virt, 0, LPFC_BPL_SIZE); 92 memset(mp->virt, 0, LPFC_BPL_SIZE);
93 INIT_LIST_HEAD(&mp->list); 93 INIT_LIST_HEAD(&mp->list);
94 /* save address for completion */ 94 /* save address for completion */
95 pmb->context2 = (uint8_t *) mp; 95 pmb->context1 = (uint8_t *)mp;
96 mb->un.varWords[3] = putPaddrLow(mp->phys); 96 mb->un.varWords[3] = putPaddrLow(mp->phys);
97 mb->un.varWords[4] = putPaddrHigh(mp->phys); 97 mb->un.varWords[4] = putPaddrHigh(mp->phys);
98 mb->un.varDmp.sli4_length = sizeof(struct static_vport_info); 98 mb->un.varDmp.sli4_length = sizeof(struct static_vport_info);
99 99
100 return 0; 100 return 0;
101 } 101 }
102 102
103 /** 103 /**
104 * lpfc_down_link - Bring down HBAs link. 104 * lpfc_down_link - Bring down HBAs link.
105 * @phba: pointer to lpfc hba data structure. 105 * @phba: pointer to lpfc hba data structure.
106 * @pmb: pointer to the driver internal queue element for mailbox command. 106 * @pmb: pointer to the driver internal queue element for mailbox command.
107 * 107 *
108 * This routine prepares a mailbox command to bring down HBA link. 108 * This routine prepares a mailbox command to bring down HBA link.
109 **/ 109 **/
110 void 110 void
111 lpfc_down_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 111 lpfc_down_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
112 { 112 {
113 MAILBOX_t *mb; 113 MAILBOX_t *mb;
114 memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); 114 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
115 mb = &pmb->u.mb; 115 mb = &pmb->u.mb;
116 mb->mbxCommand = MBX_DOWN_LINK; 116 mb->mbxCommand = MBX_DOWN_LINK;
117 mb->mbxOwner = OWN_HOST; 117 mb->mbxOwner = OWN_HOST;
118 } 118 }
119 119
120 /** 120 /**
121 * lpfc_dump_mem - Prepare a mailbox command for reading a region. 121 * lpfc_dump_mem - Prepare a mailbox command for reading a region.
122 * @phba: pointer to lpfc hba data structure. 122 * @phba: pointer to lpfc hba data structure.
123 * @pmb: pointer to the driver internal queue element for mailbox command. 123 * @pmb: pointer to the driver internal queue element for mailbox command.
124 * @offset: offset into the region. 124 * @offset: offset into the region.
125 * @region_id: config region id. 125 * @region_id: config region id.
126 * 126 *
127 * The dump mailbox command provides a method for the device driver to obtain 127 * The dump mailbox command provides a method for the device driver to obtain
128 * various types of information from the HBA device. 128 * various types of information from the HBA device.
129 * 129 *
130 * This routine prepares the mailbox command for dumping HBA's config region. 130 * This routine prepares the mailbox command for dumping HBA's config region.
131 **/ 131 **/
132 void 132 void
133 lpfc_dump_mem(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, uint16_t offset, 133 lpfc_dump_mem(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, uint16_t offset,
134 uint16_t region_id) 134 uint16_t region_id)
135 { 135 {
136 MAILBOX_t *mb; 136 MAILBOX_t *mb;
137 void *ctx; 137 void *ctx;
138 138
139 mb = &pmb->u.mb; 139 mb = &pmb->u.mb;
140 ctx = pmb->context2; 140 ctx = pmb->context2;
141 141
142 /* Setup to dump VPD region */ 142 /* Setup to dump VPD region */
143 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 143 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
144 mb->mbxCommand = MBX_DUMP_MEMORY; 144 mb->mbxCommand = MBX_DUMP_MEMORY;
145 mb->un.varDmp.cv = 1; 145 mb->un.varDmp.cv = 1;
146 mb->un.varDmp.type = DMP_NV_PARAMS; 146 mb->un.varDmp.type = DMP_NV_PARAMS;
147 mb->un.varDmp.entry_index = offset; 147 mb->un.varDmp.entry_index = offset;
148 mb->un.varDmp.region_id = region_id; 148 mb->un.varDmp.region_id = region_id;
149 mb->un.varDmp.word_cnt = (DMP_RSP_SIZE / sizeof (uint32_t)); 149 mb->un.varDmp.word_cnt = (DMP_RSP_SIZE / sizeof (uint32_t));
150 mb->un.varDmp.co = 0; 150 mb->un.varDmp.co = 0;
151 mb->un.varDmp.resp_offset = 0; 151 mb->un.varDmp.resp_offset = 0;
152 pmb->context2 = ctx; 152 pmb->context2 = ctx;
153 mb->mbxOwner = OWN_HOST; 153 mb->mbxOwner = OWN_HOST;
154 return; 154 return;
155 } 155 }
156 156
157 /** 157 /**
158 * lpfc_dump_wakeup_param - Prepare mailbox command for retrieving wakeup params 158 * lpfc_dump_wakeup_param - Prepare mailbox command for retrieving wakeup params
159 * @phba: pointer to lpfc hba data structure. 159 * @phba: pointer to lpfc hba data structure.
160 * @pmb: pointer to the driver internal queue element for mailbox command. 160 * @pmb: pointer to the driver internal queue element for mailbox command.
161 * 161 *
162 * This function create a dump memory mailbox command to dump wake up 162 * This function create a dump memory mailbox command to dump wake up
163 * parameters. 163 * parameters.
164 */ 164 */
165 void 165 void
166 lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 166 lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
167 { 167 {
168 MAILBOX_t *mb; 168 MAILBOX_t *mb;
169 void *ctx; 169 void *ctx;
170 170
171 mb = &pmb->u.mb; 171 mb = &pmb->u.mb;
172 /* Save context so that we can restore after memset */ 172 /* Save context so that we can restore after memset */
173 ctx = pmb->context2; 173 ctx = pmb->context2;
174 174
175 /* Setup to dump VPD region */ 175 /* Setup to dump VPD region */
176 memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); 176 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
177 mb->mbxCommand = MBX_DUMP_MEMORY; 177 mb->mbxCommand = MBX_DUMP_MEMORY;
178 mb->mbxOwner = OWN_HOST; 178 mb->mbxOwner = OWN_HOST;
179 mb->un.varDmp.cv = 1; 179 mb->un.varDmp.cv = 1;
180 mb->un.varDmp.type = DMP_NV_PARAMS; 180 mb->un.varDmp.type = DMP_NV_PARAMS;
181 mb->un.varDmp.entry_index = 0; 181 mb->un.varDmp.entry_index = 0;
182 mb->un.varDmp.region_id = WAKE_UP_PARMS_REGION_ID; 182 mb->un.varDmp.region_id = WAKE_UP_PARMS_REGION_ID;
183 mb->un.varDmp.word_cnt = WAKE_UP_PARMS_WORD_SIZE; 183 mb->un.varDmp.word_cnt = WAKE_UP_PARMS_WORD_SIZE;
184 mb->un.varDmp.co = 0; 184 mb->un.varDmp.co = 0;
185 mb->un.varDmp.resp_offset = 0; 185 mb->un.varDmp.resp_offset = 0;
186 pmb->context2 = ctx; 186 pmb->context2 = ctx;
187 return; 187 return;
188 } 188 }
189 189
190 /** 190 /**
191 * lpfc_read_nv - Prepare a mailbox command for reading HBA's NVRAM param 191 * lpfc_read_nv - Prepare a mailbox command for reading HBA's NVRAM param
192 * @phba: pointer to lpfc hba data structure. 192 * @phba: pointer to lpfc hba data structure.
193 * @pmb: pointer to the driver internal queue element for mailbox command. 193 * @pmb: pointer to the driver internal queue element for mailbox command.
194 * 194 *
195 * The read NVRAM mailbox command returns the HBA's non-volatile parameters 195 * The read NVRAM mailbox command returns the HBA's non-volatile parameters
196 * that are used as defaults when the Fibre Channel link is brought on-line. 196 * that are used as defaults when the Fibre Channel link is brought on-line.
197 * 197 *
198 * This routine prepares the mailbox command for reading information stored 198 * This routine prepares the mailbox command for reading information stored
199 * in the HBA's NVRAM. Specifically, the HBA's WWNN and WWPN. 199 * in the HBA's NVRAM. Specifically, the HBA's WWNN and WWPN.
200 **/ 200 **/
201 void 201 void
202 lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 202 lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
203 { 203 {
204 MAILBOX_t *mb; 204 MAILBOX_t *mb;
205 205
206 mb = &pmb->u.mb; 206 mb = &pmb->u.mb;
207 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 207 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
208 mb->mbxCommand = MBX_READ_NV; 208 mb->mbxCommand = MBX_READ_NV;
209 mb->mbxOwner = OWN_HOST; 209 mb->mbxOwner = OWN_HOST;
210 return; 210 return;
211 } 211 }
212 212
213 /** 213 /**
214 * lpfc_config_async - Prepare a mailbox command for enabling HBA async event 214 * lpfc_config_async - Prepare a mailbox command for enabling HBA async event
215 * @phba: pointer to lpfc hba data structure. 215 * @phba: pointer to lpfc hba data structure.
216 * @pmb: pointer to the driver internal queue element for mailbox command. 216 * @pmb: pointer to the driver internal queue element for mailbox command.
217 * @ring: ring number for the asynchronous event to be configured. 217 * @ring: ring number for the asynchronous event to be configured.
218 * 218 *
219 * The asynchronous event enable mailbox command is used to enable the 219 * The asynchronous event enable mailbox command is used to enable the
220 * asynchronous event posting via the ASYNC_STATUS_CN IOCB response and 220 * asynchronous event posting via the ASYNC_STATUS_CN IOCB response and
221 * specifies the default ring to which events are posted. 221 * specifies the default ring to which events are posted.
222 * 222 *
223 * This routine prepares the mailbox command for enabling HBA asynchronous 223 * This routine prepares the mailbox command for enabling HBA asynchronous
224 * event support on a IOCB ring. 224 * event support on a IOCB ring.
225 **/ 225 **/
226 void 226 void
227 lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, 227 lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
228 uint32_t ring) 228 uint32_t ring)
229 { 229 {
230 MAILBOX_t *mb; 230 MAILBOX_t *mb;
231 231
232 mb = &pmb->u.mb; 232 mb = &pmb->u.mb;
233 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 233 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
234 mb->mbxCommand = MBX_ASYNCEVT_ENABLE; 234 mb->mbxCommand = MBX_ASYNCEVT_ENABLE;
235 mb->un.varCfgAsyncEvent.ring = ring; 235 mb->un.varCfgAsyncEvent.ring = ring;
236 mb->mbxOwner = OWN_HOST; 236 mb->mbxOwner = OWN_HOST;
237 return; 237 return;
238 } 238 }
239 239
240 /** 240 /**
241 * lpfc_heart_beat - Prepare a mailbox command for heart beat 241 * lpfc_heart_beat - Prepare a mailbox command for heart beat
242 * @phba: pointer to lpfc hba data structure. 242 * @phba: pointer to lpfc hba data structure.
243 * @pmb: pointer to the driver internal queue element for mailbox command. 243 * @pmb: pointer to the driver internal queue element for mailbox command.
244 * 244 *
245 * The heart beat mailbox command is used to detect an unresponsive HBA, which 245 * The heart beat mailbox command is used to detect an unresponsive HBA, which
246 * is defined as any device where no error attention is sent and both mailbox 246 * is defined as any device where no error attention is sent and both mailbox
247 * and rings are not processed. 247 * and rings are not processed.
248 * 248 *
249 * This routine prepares the mailbox command for issuing a heart beat in the 249 * This routine prepares the mailbox command for issuing a heart beat in the
250 * form of mailbox command to the HBA. The timely completion of the heart 250 * form of mailbox command to the HBA. The timely completion of the heart
251 * beat mailbox command indicates the health of the HBA. 251 * beat mailbox command indicates the health of the HBA.
252 **/ 252 **/
253 void 253 void
254 lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 254 lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
255 { 255 {
256 MAILBOX_t *mb; 256 MAILBOX_t *mb;
257 257
258 mb = &pmb->u.mb; 258 mb = &pmb->u.mb;
259 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 259 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
260 mb->mbxCommand = MBX_HEARTBEAT; 260 mb->mbxCommand = MBX_HEARTBEAT;
261 mb->mbxOwner = OWN_HOST; 261 mb->mbxOwner = OWN_HOST;
262 return; 262 return;
263 } 263 }
264 264
265 /** 265 /**
266 * lpfc_read_topology - Prepare a mailbox command for reading HBA topology 266 * lpfc_read_topology - Prepare a mailbox command for reading HBA topology
267 * @phba: pointer to lpfc hba data structure. 267 * @phba: pointer to lpfc hba data structure.
268 * @pmb: pointer to the driver internal queue element for mailbox command. 268 * @pmb: pointer to the driver internal queue element for mailbox command.
269 * @mp: DMA buffer memory for reading the link attention information into. 269 * @mp: DMA buffer memory for reading the link attention information into.
270 * 270 *
271 * The read topology mailbox command is issued to read the link topology 271 * The read topology mailbox command is issued to read the link topology
272 * information indicated by the HBA port when the Link Event bit of the Host 272 * information indicated by the HBA port when the Link Event bit of the Host
273 * Attention (HSTATT) register is set to 1 (For SLI-3) or when an FC Link 273 * Attention (HSTATT) register is set to 1 (For SLI-3) or when an FC Link
274 * Attention ACQE is received from the port (For SLI-4). A Link Event 274 * Attention ACQE is received from the port (For SLI-4). A Link Event
275 * Attention occurs based on an exception detected at the Fibre Channel link 275 * Attention occurs based on an exception detected at the Fibre Channel link
276 * interface. 276 * interface.
277 * 277 *
278 * This routine prepares the mailbox command for reading HBA link topology 278 * This routine prepares the mailbox command for reading HBA link topology
279 * information. A DMA memory has been set aside and address passed to the 279 * information. A DMA memory has been set aside and address passed to the
280 * HBA through @mp for the HBA to DMA link attention information into the 280 * HBA through @mp for the HBA to DMA link attention information into the
281 * memory as part of the execution of the mailbox command. 281 * memory as part of the execution of the mailbox command.
282 * 282 *
283 * Return codes 283 * Return codes
284 * 0 - Success (currently always return 0) 284 * 0 - Success (currently always return 0)
285 **/ 285 **/
286 int 286 int
287 lpfc_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, 287 lpfc_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
288 struct lpfc_dmabuf *mp) 288 struct lpfc_dmabuf *mp)
289 { 289 {
290 MAILBOX_t *mb; 290 MAILBOX_t *mb;
291 struct lpfc_sli *psli; 291 struct lpfc_sli *psli;
292 292
293 psli = &phba->sli; 293 psli = &phba->sli;
294 mb = &pmb->u.mb; 294 mb = &pmb->u.mb;
295 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 295 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
296 296
297 INIT_LIST_HEAD(&mp->list); 297 INIT_LIST_HEAD(&mp->list);
298 mb->mbxCommand = MBX_READ_TOPOLOGY; 298 mb->mbxCommand = MBX_READ_TOPOLOGY;
299 mb->un.varReadTop.lilpBde64.tus.f.bdeSize = LPFC_ALPA_MAP_SIZE; 299 mb->un.varReadTop.lilpBde64.tus.f.bdeSize = LPFC_ALPA_MAP_SIZE;
300 mb->un.varReadTop.lilpBde64.addrHigh = putPaddrHigh(mp->phys); 300 mb->un.varReadTop.lilpBde64.addrHigh = putPaddrHigh(mp->phys);
301 mb->un.varReadTop.lilpBde64.addrLow = putPaddrLow(mp->phys); 301 mb->un.varReadTop.lilpBde64.addrLow = putPaddrLow(mp->phys);
302 302
303 /* Save address for later completion and set the owner to host so that 303 /* Save address for later completion and set the owner to host so that
304 * the FW knows this mailbox is available for processing. 304 * the FW knows this mailbox is available for processing.
305 */ 305 */
306 pmb->context1 = (uint8_t *)mp; 306 pmb->context1 = (uint8_t *)mp;
307 mb->mbxOwner = OWN_HOST; 307 mb->mbxOwner = OWN_HOST;
308 return (0); 308 return (0);
309 } 309 }
310 310
311 /** 311 /**
312 * lpfc_clear_la - Prepare a mailbox command for clearing HBA link attention 312 * lpfc_clear_la - Prepare a mailbox command for clearing HBA link attention
313 * @phba: pointer to lpfc hba data structure. 313 * @phba: pointer to lpfc hba data structure.
314 * @pmb: pointer to the driver internal queue element for mailbox command. 314 * @pmb: pointer to the driver internal queue element for mailbox command.
315 * 315 *
316 * The clear link attention mailbox command is issued to clear the link event 316 * The clear link attention mailbox command is issued to clear the link event
317 * attention condition indicated by the Link Event bit of the Host Attention 317 * attention condition indicated by the Link Event bit of the Host Attention
318 * (HSTATT) register. The link event attention condition is cleared only if 318 * (HSTATT) register. The link event attention condition is cleared only if
319 * the event tag specified matches that of the current link event counter. 319 * the event tag specified matches that of the current link event counter.
320 * The current event tag is read using the read link attention event mailbox 320 * The current event tag is read using the read link attention event mailbox
321 * command. 321 * command.
322 * 322 *
323 * This routine prepares the mailbox command for clearing HBA link attention 323 * This routine prepares the mailbox command for clearing HBA link attention
324 * information. 324 * information.
325 **/ 325 **/
326 void 326 void
327 lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 327 lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
328 { 328 {
329 MAILBOX_t *mb; 329 MAILBOX_t *mb;
330 330
331 mb = &pmb->u.mb; 331 mb = &pmb->u.mb;
332 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 332 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
333 333
334 mb->un.varClearLA.eventTag = phba->fc_eventTag; 334 mb->un.varClearLA.eventTag = phba->fc_eventTag;
335 mb->mbxCommand = MBX_CLEAR_LA; 335 mb->mbxCommand = MBX_CLEAR_LA;
336 mb->mbxOwner = OWN_HOST; 336 mb->mbxOwner = OWN_HOST;
337 return; 337 return;
338 } 338 }
339 339
340 /** 340 /**
341 * lpfc_config_link - Prepare a mailbox command for configuring link on a HBA 341 * lpfc_config_link - Prepare a mailbox command for configuring link on a HBA
342 * @phba: pointer to lpfc hba data structure. 342 * @phba: pointer to lpfc hba data structure.
343 * @pmb: pointer to the driver internal queue element for mailbox command. 343 * @pmb: pointer to the driver internal queue element for mailbox command.
344 * 344 *
345 * The configure link mailbox command is used before the initialize link 345 * The configure link mailbox command is used before the initialize link
346 * mailbox command to override default value and to configure link-oriented 346 * mailbox command to override default value and to configure link-oriented
347 * parameters such as DID address and various timers. Typically, this 347 * parameters such as DID address and various timers. Typically, this
348 * command would be used after an F_Port login to set the returned DID address 348 * command would be used after an F_Port login to set the returned DID address
349 * and the fabric timeout values. This command is not valid before a configure 349 * and the fabric timeout values. This command is not valid before a configure
350 * port command has configured the HBA port. 350 * port command has configured the HBA port.
351 * 351 *
352 * This routine prepares the mailbox command for configuring link on a HBA. 352 * This routine prepares the mailbox command for configuring link on a HBA.
353 **/ 353 **/
354 void 354 void
355 lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 355 lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
356 { 356 {
357 struct lpfc_vport *vport = phba->pport; 357 struct lpfc_vport *vport = phba->pport;
358 MAILBOX_t *mb = &pmb->u.mb; 358 MAILBOX_t *mb = &pmb->u.mb;
359 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 359 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
360 360
361 /* NEW_FEATURE 361 /* NEW_FEATURE
362 * SLI-2, Coalescing Response Feature. 362 * SLI-2, Coalescing Response Feature.
363 */ 363 */
364 if (phba->cfg_cr_delay) { 364 if (phba->cfg_cr_delay) {
365 mb->un.varCfgLnk.cr = 1; 365 mb->un.varCfgLnk.cr = 1;
366 mb->un.varCfgLnk.ci = 1; 366 mb->un.varCfgLnk.ci = 1;
367 mb->un.varCfgLnk.cr_delay = phba->cfg_cr_delay; 367 mb->un.varCfgLnk.cr_delay = phba->cfg_cr_delay;
368 mb->un.varCfgLnk.cr_count = phba->cfg_cr_count; 368 mb->un.varCfgLnk.cr_count = phba->cfg_cr_count;
369 } 369 }
370 370
371 mb->un.varCfgLnk.myId = vport->fc_myDID; 371 mb->un.varCfgLnk.myId = vport->fc_myDID;
372 mb->un.varCfgLnk.edtov = phba->fc_edtov; 372 mb->un.varCfgLnk.edtov = phba->fc_edtov;
373 mb->un.varCfgLnk.arbtov = phba->fc_arbtov; 373 mb->un.varCfgLnk.arbtov = phba->fc_arbtov;
374 mb->un.varCfgLnk.ratov = phba->fc_ratov; 374 mb->un.varCfgLnk.ratov = phba->fc_ratov;
375 mb->un.varCfgLnk.rttov = phba->fc_rttov; 375 mb->un.varCfgLnk.rttov = phba->fc_rttov;
376 mb->un.varCfgLnk.altov = phba->fc_altov; 376 mb->un.varCfgLnk.altov = phba->fc_altov;
377 mb->un.varCfgLnk.crtov = phba->fc_crtov; 377 mb->un.varCfgLnk.crtov = phba->fc_crtov;
378 mb->un.varCfgLnk.citov = phba->fc_citov; 378 mb->un.varCfgLnk.citov = phba->fc_citov;
379 379
380 if (phba->cfg_ack0) 380 if (phba->cfg_ack0)
381 mb->un.varCfgLnk.ack0_enable = 1; 381 mb->un.varCfgLnk.ack0_enable = 1;
382 382
383 mb->mbxCommand = MBX_CONFIG_LINK; 383 mb->mbxCommand = MBX_CONFIG_LINK;
384 mb->mbxOwner = OWN_HOST; 384 mb->mbxOwner = OWN_HOST;
385 return; 385 return;
386 } 386 }
387 387
388 /** 388 /**
389 * lpfc_config_msi - Prepare a mailbox command for configuring msi-x 389 * lpfc_config_msi - Prepare a mailbox command for configuring msi-x
390 * @phba: pointer to lpfc hba data structure. 390 * @phba: pointer to lpfc hba data structure.
391 * @pmb: pointer to the driver internal queue element for mailbox command. 391 * @pmb: pointer to the driver internal queue element for mailbox command.
392 * 392 *
393 * The configure MSI-X mailbox command is used to configure the HBA's SLI-3 393 * The configure MSI-X mailbox command is used to configure the HBA's SLI-3
394 * MSI-X multi-message interrupt vector association to interrupt attention 394 * MSI-X multi-message interrupt vector association to interrupt attention
395 * conditions. 395 * conditions.
396 * 396 *
397 * Return codes 397 * Return codes
398 * 0 - Success 398 * 0 - Success
399 * -EINVAL - Failure 399 * -EINVAL - Failure
400 **/ 400 **/
401 int 401 int
402 lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 402 lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
403 { 403 {
404 MAILBOX_t *mb = &pmb->u.mb; 404 MAILBOX_t *mb = &pmb->u.mb;
405 uint32_t attentionConditions[2]; 405 uint32_t attentionConditions[2];
406 406
407 /* Sanity check */ 407 /* Sanity check */
408 if (phba->cfg_use_msi != 2) { 408 if (phba->cfg_use_msi != 2) {
409 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 409 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
410 "0475 Not configured for supporting MSI-X " 410 "0475 Not configured for supporting MSI-X "
411 "cfg_use_msi: 0x%x\n", phba->cfg_use_msi); 411 "cfg_use_msi: 0x%x\n", phba->cfg_use_msi);
412 return -EINVAL; 412 return -EINVAL;
413 } 413 }
414 414
415 if (phba->sli_rev < 3) { 415 if (phba->sli_rev < 3) {
416 lpfc_printf_log(phba, KERN_ERR, LOG_INIT, 416 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
417 "0476 HBA not supporting SLI-3 or later " 417 "0476 HBA not supporting SLI-3 or later "
418 "SLI Revision: 0x%x\n", phba->sli_rev); 418 "SLI Revision: 0x%x\n", phba->sli_rev);
419 return -EINVAL; 419 return -EINVAL;
420 } 420 }
421 421
422 /* Clear mailbox command fields */ 422 /* Clear mailbox command fields */
423 memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); 423 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
424 424
425 /* 425 /*
426 * SLI-3, Message Signaled Interrupt Fearure. 426 * SLI-3, Message Signaled Interrupt Fearure.
427 */ 427 */
428 428
429 /* Multi-message attention configuration */ 429 /* Multi-message attention configuration */
430 attentionConditions[0] = (HA_R0ATT | HA_R1ATT | HA_R2ATT | HA_ERATT | 430 attentionConditions[0] = (HA_R0ATT | HA_R1ATT | HA_R2ATT | HA_ERATT |
431 HA_LATT | HA_MBATT); 431 HA_LATT | HA_MBATT);
432 attentionConditions[1] = 0; 432 attentionConditions[1] = 0;
433 433
434 mb->un.varCfgMSI.attentionConditions[0] = attentionConditions[0]; 434 mb->un.varCfgMSI.attentionConditions[0] = attentionConditions[0];
435 mb->un.varCfgMSI.attentionConditions[1] = attentionConditions[1]; 435 mb->un.varCfgMSI.attentionConditions[1] = attentionConditions[1];
436 436
437 /* 437 /*
438 * Set up message number to HA bit association 438 * Set up message number to HA bit association
439 */ 439 */
440 #ifdef __BIG_ENDIAN_BITFIELD 440 #ifdef __BIG_ENDIAN_BITFIELD
441 /* RA0 (FCP Ring) */ 441 /* RA0 (FCP Ring) */
442 mb->un.varCfgMSI.messageNumberByHA[HA_R0_POS] = 1; 442 mb->un.varCfgMSI.messageNumberByHA[HA_R0_POS] = 1;
443 /* RA1 (Other Protocol Extra Ring) */ 443 /* RA1 (Other Protocol Extra Ring) */
444 mb->un.varCfgMSI.messageNumberByHA[HA_R1_POS] = 1; 444 mb->un.varCfgMSI.messageNumberByHA[HA_R1_POS] = 1;
445 #else /* __LITTLE_ENDIAN_BITFIELD */ 445 #else /* __LITTLE_ENDIAN_BITFIELD */
446 /* RA0 (FCP Ring) */ 446 /* RA0 (FCP Ring) */
447 mb->un.varCfgMSI.messageNumberByHA[HA_R0_POS^3] = 1; 447 mb->un.varCfgMSI.messageNumberByHA[HA_R0_POS^3] = 1;
448 /* RA1 (Other Protocol Extra Ring) */ 448 /* RA1 (Other Protocol Extra Ring) */
449 mb->un.varCfgMSI.messageNumberByHA[HA_R1_POS^3] = 1; 449 mb->un.varCfgMSI.messageNumberByHA[HA_R1_POS^3] = 1;
450 #endif 450 #endif
451 /* Multi-message interrupt autoclear configuration*/ 451 /* Multi-message interrupt autoclear configuration*/
452 mb->un.varCfgMSI.autoClearHA[0] = attentionConditions[0]; 452 mb->un.varCfgMSI.autoClearHA[0] = attentionConditions[0];
453 mb->un.varCfgMSI.autoClearHA[1] = attentionConditions[1]; 453 mb->un.varCfgMSI.autoClearHA[1] = attentionConditions[1];
454 454
455 /* For now, HBA autoclear does not work reliably, disable it */ 455 /* For now, HBA autoclear does not work reliably, disable it */
456 mb->un.varCfgMSI.autoClearHA[0] = 0; 456 mb->un.varCfgMSI.autoClearHA[0] = 0;
457 mb->un.varCfgMSI.autoClearHA[1] = 0; 457 mb->un.varCfgMSI.autoClearHA[1] = 0;
458 458
459 /* Set command and owner bit */ 459 /* Set command and owner bit */
460 mb->mbxCommand = MBX_CONFIG_MSI; 460 mb->mbxCommand = MBX_CONFIG_MSI;
461 mb->mbxOwner = OWN_HOST; 461 mb->mbxOwner = OWN_HOST;
462 462
463 return 0; 463 return 0;
464 } 464 }
465 465
466 /** 466 /**
467 * lpfc_init_link - Prepare a mailbox command for initialize link on a HBA 467 * lpfc_init_link - Prepare a mailbox command for initialize link on a HBA
468 * @phba: pointer to lpfc hba data structure. 468 * @phba: pointer to lpfc hba data structure.
469 * @pmb: pointer to the driver internal queue element for mailbox command. 469 * @pmb: pointer to the driver internal queue element for mailbox command.
470 * @topology: the link topology for the link to be initialized to. 470 * @topology: the link topology for the link to be initialized to.
471 * @linkspeed: the link speed for the link to be initialized to. 471 * @linkspeed: the link speed for the link to be initialized to.
472 * 472 *
473 * The initialize link mailbox command is used to initialize the Fibre 473 * The initialize link mailbox command is used to initialize the Fibre
474 * Channel link. This command must follow a configure port command that 474 * Channel link. This command must follow a configure port command that
475 * establishes the mode of operation. 475 * establishes the mode of operation.
476 * 476 *
477 * This routine prepares the mailbox command for initializing link on a HBA 477 * This routine prepares the mailbox command for initializing link on a HBA
478 * with the specified link topology and speed. 478 * with the specified link topology and speed.
479 **/ 479 **/
480 void 480 void
481 lpfc_init_link(struct lpfc_hba * phba, 481 lpfc_init_link(struct lpfc_hba * phba,
482 LPFC_MBOXQ_t * pmb, uint32_t topology, uint32_t linkspeed) 482 LPFC_MBOXQ_t * pmb, uint32_t topology, uint32_t linkspeed)
483 { 483 {
484 lpfc_vpd_t *vpd; 484 lpfc_vpd_t *vpd;
485 struct lpfc_sli *psli; 485 struct lpfc_sli *psli;
486 MAILBOX_t *mb; 486 MAILBOX_t *mb;
487 487
488 mb = &pmb->u.mb; 488 mb = &pmb->u.mb;
489 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 489 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
490 490
491 psli = &phba->sli; 491 psli = &phba->sli;
492 switch (topology) { 492 switch (topology) {
493 case FLAGS_TOPOLOGY_MODE_LOOP_PT: 493 case FLAGS_TOPOLOGY_MODE_LOOP_PT:
494 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP; 494 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
495 mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER; 495 mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER;
496 break; 496 break;
497 case FLAGS_TOPOLOGY_MODE_PT_PT: 497 case FLAGS_TOPOLOGY_MODE_PT_PT:
498 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT; 498 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
499 break; 499 break;
500 case FLAGS_TOPOLOGY_MODE_LOOP: 500 case FLAGS_TOPOLOGY_MODE_LOOP:
501 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP; 501 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_LOOP;
502 break; 502 break;
503 case FLAGS_TOPOLOGY_MODE_PT_LOOP: 503 case FLAGS_TOPOLOGY_MODE_PT_LOOP:
504 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT; 504 mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT;
505 mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER; 505 mb->un.varInitLnk.link_flags |= FLAGS_TOPOLOGY_FAILOVER;
506 break; 506 break;
507 case FLAGS_LOCAL_LB: 507 case FLAGS_LOCAL_LB:
508 mb->un.varInitLnk.link_flags = FLAGS_LOCAL_LB; 508 mb->un.varInitLnk.link_flags = FLAGS_LOCAL_LB;
509 break; 509 break;
510 } 510 }
511 511
512 /* Enable asynchronous ABTS responses from firmware */ 512 /* Enable asynchronous ABTS responses from firmware */
513 mb->un.varInitLnk.link_flags |= FLAGS_IMED_ABORT; 513 mb->un.varInitLnk.link_flags |= FLAGS_IMED_ABORT;
514 514
515 /* NEW_FEATURE 515 /* NEW_FEATURE
516 * Setting up the link speed 516 * Setting up the link speed
517 */ 517 */
518 vpd = &phba->vpd; 518 vpd = &phba->vpd;
519 if (vpd->rev.feaLevelHigh >= 0x02){ 519 if (vpd->rev.feaLevelHigh >= 0x02){
520 switch(linkspeed){ 520 switch(linkspeed){
521 case LPFC_USER_LINK_SPEED_1G: 521 case LPFC_USER_LINK_SPEED_1G:
522 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED; 522 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
523 mb->un.varInitLnk.link_speed = LINK_SPEED_1G; 523 mb->un.varInitLnk.link_speed = LINK_SPEED_1G;
524 break; 524 break;
525 case LPFC_USER_LINK_SPEED_2G: 525 case LPFC_USER_LINK_SPEED_2G:
526 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED; 526 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
527 mb->un.varInitLnk.link_speed = LINK_SPEED_2G; 527 mb->un.varInitLnk.link_speed = LINK_SPEED_2G;
528 break; 528 break;
529 case LPFC_USER_LINK_SPEED_4G: 529 case LPFC_USER_LINK_SPEED_4G:
530 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED; 530 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
531 mb->un.varInitLnk.link_speed = LINK_SPEED_4G; 531 mb->un.varInitLnk.link_speed = LINK_SPEED_4G;
532 break; 532 break;
533 case LPFC_USER_LINK_SPEED_8G: 533 case LPFC_USER_LINK_SPEED_8G:
534 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED; 534 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
535 mb->un.varInitLnk.link_speed = LINK_SPEED_8G; 535 mb->un.varInitLnk.link_speed = LINK_SPEED_8G;
536 break; 536 break;
537 case LPFC_USER_LINK_SPEED_10G: 537 case LPFC_USER_LINK_SPEED_10G:
538 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED; 538 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
539 mb->un.varInitLnk.link_speed = LINK_SPEED_10G; 539 mb->un.varInitLnk.link_speed = LINK_SPEED_10G;
540 break; 540 break;
541 case LPFC_USER_LINK_SPEED_16G: 541 case LPFC_USER_LINK_SPEED_16G:
542 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED; 542 mb->un.varInitLnk.link_flags |= FLAGS_LINK_SPEED;
543 mb->un.varInitLnk.link_speed = LINK_SPEED_16G; 543 mb->un.varInitLnk.link_speed = LINK_SPEED_16G;
544 break; 544 break;
545 case LPFC_USER_LINK_SPEED_AUTO: 545 case LPFC_USER_LINK_SPEED_AUTO:
546 default: 546 default:
547 mb->un.varInitLnk.link_speed = LINK_SPEED_AUTO; 547 mb->un.varInitLnk.link_speed = LINK_SPEED_AUTO;
548 break; 548 break;
549 } 549 }
550 550
551 } 551 }
552 else 552 else
553 mb->un.varInitLnk.link_speed = LINK_SPEED_AUTO; 553 mb->un.varInitLnk.link_speed = LINK_SPEED_AUTO;
554 554
555 mb->mbxCommand = (volatile uint8_t)MBX_INIT_LINK; 555 mb->mbxCommand = (volatile uint8_t)MBX_INIT_LINK;
556 mb->mbxOwner = OWN_HOST; 556 mb->mbxOwner = OWN_HOST;
557 mb->un.varInitLnk.fabric_AL_PA = phba->fc_pref_ALPA; 557 mb->un.varInitLnk.fabric_AL_PA = phba->fc_pref_ALPA;
558 return; 558 return;
559 } 559 }
560 560
561 /** 561 /**
562 * lpfc_read_sparam - Prepare a mailbox command for reading HBA parameters 562 * lpfc_read_sparam - Prepare a mailbox command for reading HBA parameters
563 * @phba: pointer to lpfc hba data structure. 563 * @phba: pointer to lpfc hba data structure.
564 * @pmb: pointer to the driver internal queue element for mailbox command. 564 * @pmb: pointer to the driver internal queue element for mailbox command.
565 * @vpi: virtual N_Port identifier. 565 * @vpi: virtual N_Port identifier.
566 * 566 *
567 * The read service parameter mailbox command is used to read the HBA port 567 * The read service parameter mailbox command is used to read the HBA port
568 * service parameters. The service parameters are read into the buffer 568 * service parameters. The service parameters are read into the buffer
569 * specified directly by a BDE in the mailbox command. These service 569 * specified directly by a BDE in the mailbox command. These service
570 * parameters may then be used to build the payload of an N_Port/F_POrt 570 * parameters may then be used to build the payload of an N_Port/F_POrt
571 * login request and reply (LOGI/ACC). 571 * login request and reply (LOGI/ACC).
572 * 572 *
573 * This routine prepares the mailbox command for reading HBA port service 573 * This routine prepares the mailbox command for reading HBA port service
574 * parameters. The DMA memory is allocated in this function and the addresses 574 * parameters. The DMA memory is allocated in this function and the addresses
575 * are populated into the mailbox command for the HBA to DMA the service 575 * are populated into the mailbox command for the HBA to DMA the service
576 * parameters into. 576 * parameters into.
577 * 577 *
578 * Return codes 578 * Return codes
579 * 0 - Success 579 * 0 - Success
580 * 1 - DMA memory allocation failed 580 * 1 - DMA memory allocation failed
581 **/ 581 **/
582 int 582 int
583 lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi) 583 lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
584 { 584 {
585 struct lpfc_dmabuf *mp; 585 struct lpfc_dmabuf *mp;
586 MAILBOX_t *mb; 586 MAILBOX_t *mb;
587 struct lpfc_sli *psli; 587 struct lpfc_sli *psli;
588 588
589 psli = &phba->sli; 589 psli = &phba->sli;
590 mb = &pmb->u.mb; 590 mb = &pmb->u.mb;
591 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 591 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
592 592
593 mb->mbxOwner = OWN_HOST; 593 mb->mbxOwner = OWN_HOST;
594 594
595 /* Get a buffer to hold the HBAs Service Parameters */ 595 /* Get a buffer to hold the HBAs Service Parameters */
596 596
597 mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 597 mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
598 if (mp) 598 if (mp)
599 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 599 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
600 if (!mp || !mp->virt) { 600 if (!mp || !mp->virt) {
601 kfree(mp); 601 kfree(mp);
602 mb->mbxCommand = MBX_READ_SPARM64; 602 mb->mbxCommand = MBX_READ_SPARM64;
603 /* READ_SPARAM: no buffers */ 603 /* READ_SPARAM: no buffers */
604 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 604 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
605 "0301 READ_SPARAM: no buffers\n"); 605 "0301 READ_SPARAM: no buffers\n");
606 return (1); 606 return (1);
607 } 607 }
608 INIT_LIST_HEAD(&mp->list); 608 INIT_LIST_HEAD(&mp->list);
609 mb->mbxCommand = MBX_READ_SPARM64; 609 mb->mbxCommand = MBX_READ_SPARM64;
610 mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm); 610 mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
611 mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys); 611 mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
612 mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys); 612 mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
613 if (phba->sli_rev >= LPFC_SLI_REV3) 613 if (phba->sli_rev >= LPFC_SLI_REV3)
614 mb->un.varRdSparm.vpi = phba->vpi_ids[vpi]; 614 mb->un.varRdSparm.vpi = phba->vpi_ids[vpi];
615 615
616 /* save address for completion */ 616 /* save address for completion */
617 pmb->context1 = mp; 617 pmb->context1 = mp;
618 618
619 return (0); 619 return (0);
620 } 620 }
621 621
622 /** 622 /**
623 * lpfc_unreg_did - Prepare a mailbox command for unregistering DID 623 * lpfc_unreg_did - Prepare a mailbox command for unregistering DID
624 * @phba: pointer to lpfc hba data structure. 624 * @phba: pointer to lpfc hba data structure.
625 * @vpi: virtual N_Port identifier. 625 * @vpi: virtual N_Port identifier.
626 * @did: remote port identifier. 626 * @did: remote port identifier.
627 * @pmb: pointer to the driver internal queue element for mailbox command. 627 * @pmb: pointer to the driver internal queue element for mailbox command.
628 * 628 *
629 * The unregister DID mailbox command is used to unregister an N_Port/F_Port 629 * The unregister DID mailbox command is used to unregister an N_Port/F_Port
630 * login for an unknown RPI by specifying the DID of a remote port. This 630 * login for an unknown RPI by specifying the DID of a remote port. This
631 * command frees an RPI context in the HBA port. This has the effect of 631 * command frees an RPI context in the HBA port. This has the effect of
632 * performing an implicit N_Port/F_Port logout. 632 * performing an implicit N_Port/F_Port logout.
633 * 633 *
634 * This routine prepares the mailbox command for unregistering a remote 634 * This routine prepares the mailbox command for unregistering a remote
635 * N_Port/F_Port (DID) login. 635 * N_Port/F_Port (DID) login.
636 **/ 636 **/
637 void 637 void
638 lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did, 638 lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
639 LPFC_MBOXQ_t * pmb) 639 LPFC_MBOXQ_t * pmb)
640 { 640 {
641 MAILBOX_t *mb; 641 MAILBOX_t *mb;
642 642
643 mb = &pmb->u.mb; 643 mb = &pmb->u.mb;
644 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 644 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
645 645
646 mb->un.varUnregDID.did = did; 646 mb->un.varUnregDID.did = did;
647 mb->un.varUnregDID.vpi = vpi; 647 mb->un.varUnregDID.vpi = vpi;
648 if ((vpi != 0xffff) && 648 if ((vpi != 0xffff) &&
649 (phba->sli_rev == LPFC_SLI_REV4)) 649 (phba->sli_rev == LPFC_SLI_REV4))
650 mb->un.varUnregDID.vpi = phba->vpi_ids[vpi]; 650 mb->un.varUnregDID.vpi = phba->vpi_ids[vpi];
651 651
652 mb->mbxCommand = MBX_UNREG_D_ID; 652 mb->mbxCommand = MBX_UNREG_D_ID;
653 mb->mbxOwner = OWN_HOST; 653 mb->mbxOwner = OWN_HOST;
654 return; 654 return;
655 } 655 }
656 656
657 /** 657 /**
658 * lpfc_read_config - Prepare a mailbox command for reading HBA configuration 658 * lpfc_read_config - Prepare a mailbox command for reading HBA configuration
659 * @phba: pointer to lpfc hba data structure. 659 * @phba: pointer to lpfc hba data structure.
660 * @pmb: pointer to the driver internal queue element for mailbox command. 660 * @pmb: pointer to the driver internal queue element for mailbox command.
661 * 661 *
662 * The read configuration mailbox command is used to read the HBA port 662 * The read configuration mailbox command is used to read the HBA port
663 * configuration parameters. This mailbox command provides a method for 663 * configuration parameters. This mailbox command provides a method for
664 * seeing any parameters that may have changed via various configuration 664 * seeing any parameters that may have changed via various configuration
665 * mailbox commands. 665 * mailbox commands.
666 * 666 *
667 * This routine prepares the mailbox command for reading out HBA configuration 667 * This routine prepares the mailbox command for reading out HBA configuration
668 * parameters. 668 * parameters.
669 **/ 669 **/
670 void 670 void
671 lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 671 lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
672 { 672 {
673 MAILBOX_t *mb; 673 MAILBOX_t *mb;
674 674
675 mb = &pmb->u.mb; 675 mb = &pmb->u.mb;
676 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 676 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
677 677
678 mb->mbxCommand = MBX_READ_CONFIG; 678 mb->mbxCommand = MBX_READ_CONFIG;
679 mb->mbxOwner = OWN_HOST; 679 mb->mbxOwner = OWN_HOST;
680 return; 680 return;
681 } 681 }
682 682
683 /** 683 /**
684 * lpfc_read_lnk_stat - Prepare a mailbox command for reading HBA link stats 684 * lpfc_read_lnk_stat - Prepare a mailbox command for reading HBA link stats
685 * @phba: pointer to lpfc hba data structure. 685 * @phba: pointer to lpfc hba data structure.
686 * @pmb: pointer to the driver internal queue element for mailbox command. 686 * @pmb: pointer to the driver internal queue element for mailbox command.
687 * 687 *
688 * The read link status mailbox command is used to read the link status from 688 * The read link status mailbox command is used to read the link status from
689 * the HBA. Link status includes all link-related error counters. These 689 * the HBA. Link status includes all link-related error counters. These
690 * counters are maintained by the HBA and originated in the link hardware 690 * counters are maintained by the HBA and originated in the link hardware
691 * unit. Note that all of these counters wrap. 691 * unit. Note that all of these counters wrap.
692 * 692 *
693 * This routine prepares the mailbox command for reading out HBA link status. 693 * This routine prepares the mailbox command for reading out HBA link status.
694 **/ 694 **/
695 void 695 void
696 lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 696 lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
697 { 697 {
698 MAILBOX_t *mb; 698 MAILBOX_t *mb;
699 699
700 mb = &pmb->u.mb; 700 mb = &pmb->u.mb;
701 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 701 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
702 702
703 mb->mbxCommand = MBX_READ_LNK_STAT; 703 mb->mbxCommand = MBX_READ_LNK_STAT;
704 mb->mbxOwner = OWN_HOST; 704 mb->mbxOwner = OWN_HOST;
705 return; 705 return;
706 } 706 }
707 707
708 /** 708 /**
709 * lpfc_reg_rpi - Prepare a mailbox command for registering remote login 709 * lpfc_reg_rpi - Prepare a mailbox command for registering remote login
710 * @phba: pointer to lpfc hba data structure. 710 * @phba: pointer to lpfc hba data structure.
711 * @vpi: virtual N_Port identifier. 711 * @vpi: virtual N_Port identifier.
712 * @did: remote port identifier. 712 * @did: remote port identifier.
713 * @param: pointer to memory holding the server parameters. 713 * @param: pointer to memory holding the server parameters.
714 * @pmb: pointer to the driver internal queue element for mailbox command. 714 * @pmb: pointer to the driver internal queue element for mailbox command.
715 * @rpi: the rpi to use in the registration (usually only used for SLI4. 715 * @rpi: the rpi to use in the registration (usually only used for SLI4.
716 * 716 *
717 * The registration login mailbox command is used to register an N_Port or 717 * The registration login mailbox command is used to register an N_Port or
718 * F_Port login. This registration allows the HBA to cache the remote N_Port 718 * F_Port login. This registration allows the HBA to cache the remote N_Port
719 * service parameters internally and thereby make the appropriate FC-2 719 * service parameters internally and thereby make the appropriate FC-2
720 * decisions. The remote port service parameters are handed off by the driver 720 * decisions. The remote port service parameters are handed off by the driver
721 * to the HBA using a descriptor entry that directly identifies a buffer in 721 * to the HBA using a descriptor entry that directly identifies a buffer in
722 * host memory. In exchange, the HBA returns an RPI identifier. 722 * host memory. In exchange, the HBA returns an RPI identifier.
723 * 723 *
724 * This routine prepares the mailbox command for registering remote port login. 724 * This routine prepares the mailbox command for registering remote port login.
725 * The function allocates DMA buffer for passing the service parameters to the 725 * The function allocates DMA buffer for passing the service parameters to the
726 * HBA with the mailbox command. 726 * HBA with the mailbox command.
727 * 727 *
728 * Return codes 728 * Return codes
729 * 0 - Success 729 * 0 - Success
730 * 1 - DMA memory allocation failed 730 * 1 - DMA memory allocation failed
731 **/ 731 **/
732 int 732 int
733 lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did, 733 lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
734 uint8_t *param, LPFC_MBOXQ_t *pmb, uint16_t rpi) 734 uint8_t *param, LPFC_MBOXQ_t *pmb, uint16_t rpi)
735 { 735 {
736 MAILBOX_t *mb = &pmb->u.mb; 736 MAILBOX_t *mb = &pmb->u.mb;
737 uint8_t *sparam; 737 uint8_t *sparam;
738 struct lpfc_dmabuf *mp; 738 struct lpfc_dmabuf *mp;
739 739
740 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 740 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
741 741
742 mb->un.varRegLogin.rpi = 0; 742 mb->un.varRegLogin.rpi = 0;
743 if (phba->sli_rev == LPFC_SLI_REV4) 743 if (phba->sli_rev == LPFC_SLI_REV4)
744 mb->un.varRegLogin.rpi = phba->sli4_hba.rpi_ids[rpi]; 744 mb->un.varRegLogin.rpi = phba->sli4_hba.rpi_ids[rpi];
745 if (phba->sli_rev >= LPFC_SLI_REV3) 745 if (phba->sli_rev >= LPFC_SLI_REV3)
746 mb->un.varRegLogin.vpi = phba->vpi_ids[vpi]; 746 mb->un.varRegLogin.vpi = phba->vpi_ids[vpi];
747 mb->un.varRegLogin.did = did; 747 mb->un.varRegLogin.did = did;
748 mb->mbxOwner = OWN_HOST; 748 mb->mbxOwner = OWN_HOST;
749 /* Get a buffer to hold NPorts Service Parameters */ 749 /* Get a buffer to hold NPorts Service Parameters */
750 mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL); 750 mp = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
751 if (mp) 751 if (mp)
752 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 752 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
753 if (!mp || !mp->virt) { 753 if (!mp || !mp->virt) {
754 kfree(mp); 754 kfree(mp);
755 mb->mbxCommand = MBX_REG_LOGIN64; 755 mb->mbxCommand = MBX_REG_LOGIN64;
756 /* REG_LOGIN: no buffers */ 756 /* REG_LOGIN: no buffers */
757 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 757 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
758 "0302 REG_LOGIN: no buffers, VPI:%d DID:x%x, " 758 "0302 REG_LOGIN: no buffers, VPI:%d DID:x%x, "
759 "rpi x%x\n", vpi, did, rpi); 759 "rpi x%x\n", vpi, did, rpi);
760 return 1; 760 return 1;
761 } 761 }
762 INIT_LIST_HEAD(&mp->list); 762 INIT_LIST_HEAD(&mp->list);
763 sparam = mp->virt; 763 sparam = mp->virt;
764 764
765 /* Copy param's into a new buffer */ 765 /* Copy param's into a new buffer */
766 memcpy(sparam, param, sizeof (struct serv_parm)); 766 memcpy(sparam, param, sizeof (struct serv_parm));
767 767
768 /* save address for completion */ 768 /* save address for completion */
769 pmb->context1 = (uint8_t *) mp; 769 pmb->context1 = (uint8_t *) mp;
770 770
771 mb->mbxCommand = MBX_REG_LOGIN64; 771 mb->mbxCommand = MBX_REG_LOGIN64;
772 mb->un.varRegLogin.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm); 772 mb->un.varRegLogin.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
773 mb->un.varRegLogin.un.sp64.addrHigh = putPaddrHigh(mp->phys); 773 mb->un.varRegLogin.un.sp64.addrHigh = putPaddrHigh(mp->phys);
774 mb->un.varRegLogin.un.sp64.addrLow = putPaddrLow(mp->phys); 774 mb->un.varRegLogin.un.sp64.addrLow = putPaddrLow(mp->phys);
775 775
776 return 0; 776 return 0;
777 } 777 }
778 778
779 /** 779 /**
780 * lpfc_unreg_login - Prepare a mailbox command for unregistering remote login 780 * lpfc_unreg_login - Prepare a mailbox command for unregistering remote login
781 * @phba: pointer to lpfc hba data structure. 781 * @phba: pointer to lpfc hba data structure.
782 * @vpi: virtual N_Port identifier. 782 * @vpi: virtual N_Port identifier.
783 * @rpi: remote port identifier 783 * @rpi: remote port identifier
784 * @pmb: pointer to the driver internal queue element for mailbox command. 784 * @pmb: pointer to the driver internal queue element for mailbox command.
785 * 785 *
786 * The unregistration login mailbox command is used to unregister an N_Port 786 * The unregistration login mailbox command is used to unregister an N_Port
787 * or F_Port login. This command frees an RPI context in the HBA. It has the 787 * or F_Port login. This command frees an RPI context in the HBA. It has the
788 * effect of performing an implicit N_Port/F_Port logout. 788 * effect of performing an implicit N_Port/F_Port logout.
789 * 789 *
790 * This routine prepares the mailbox command for unregistering remote port 790 * This routine prepares the mailbox command for unregistering remote port
791 * login. 791 * login.
792 * 792 *
793 * For SLI4 ports, the rpi passed to this function must be the physical 793 * For SLI4 ports, the rpi passed to this function must be the physical
794 * rpi value, not the logical index. 794 * rpi value, not the logical index.
795 **/ 795 **/
796 void 796 void
797 lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi, 797 lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
798 LPFC_MBOXQ_t * pmb) 798 LPFC_MBOXQ_t * pmb)
799 { 799 {
800 MAILBOX_t *mb; 800 MAILBOX_t *mb;
801 801
802 mb = &pmb->u.mb; 802 mb = &pmb->u.mb;
803 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 803 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
804 804
805 mb->un.varUnregLogin.rpi = rpi; 805 mb->un.varUnregLogin.rpi = rpi;
806 mb->un.varUnregLogin.rsvd1 = 0; 806 mb->un.varUnregLogin.rsvd1 = 0;
807 if (phba->sli_rev >= LPFC_SLI_REV3) 807 if (phba->sli_rev >= LPFC_SLI_REV3)
808 mb->un.varUnregLogin.vpi = phba->vpi_ids[vpi]; 808 mb->un.varUnregLogin.vpi = phba->vpi_ids[vpi];
809 809
810 mb->mbxCommand = MBX_UNREG_LOGIN; 810 mb->mbxCommand = MBX_UNREG_LOGIN;
811 mb->mbxOwner = OWN_HOST; 811 mb->mbxOwner = OWN_HOST;
812 812
813 return; 813 return;
814 } 814 }
815 815
816 /** 816 /**
817 * lpfc_sli4_unreg_all_rpis - unregister all RPIs for a vport on SLI4 HBA. 817 * lpfc_sli4_unreg_all_rpis - unregister all RPIs for a vport on SLI4 HBA.
818 * @vport: pointer to a vport object. 818 * @vport: pointer to a vport object.
819 * 819 *
820 * This routine sends mailbox command to unregister all active RPIs for 820 * This routine sends mailbox command to unregister all active RPIs for
821 * a vport. 821 * a vport.
822 **/ 822 **/
823 void 823 void
824 lpfc_sli4_unreg_all_rpis(struct lpfc_vport *vport) 824 lpfc_sli4_unreg_all_rpis(struct lpfc_vport *vport)
825 { 825 {
826 struct lpfc_hba *phba = vport->phba; 826 struct lpfc_hba *phba = vport->phba;
827 LPFC_MBOXQ_t *mbox; 827 LPFC_MBOXQ_t *mbox;
828 int rc; 828 int rc;
829 829
830 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); 830 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
831 if (mbox) { 831 if (mbox) {
832 /* 832 /*
833 * For SLI4 functions, the rpi field is overloaded for 833 * For SLI4 functions, the rpi field is overloaded for
834 * the vport context unreg all. This routine passes 834 * the vport context unreg all. This routine passes
835 * 0 for the rpi field in lpfc_unreg_login for compatibility 835 * 0 for the rpi field in lpfc_unreg_login for compatibility
836 * with SLI3 and then overrides the rpi field with the 836 * with SLI3 and then overrides the rpi field with the
837 * expected value for SLI4. 837 * expected value for SLI4.
838 */ 838 */
839 lpfc_unreg_login(phba, vport->vpi, phba->vpi_ids[vport->vpi], 839 lpfc_unreg_login(phba, vport->vpi, phba->vpi_ids[vport->vpi],
840 mbox); 840 mbox);
841 mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000; 841 mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000;
842 mbox->vport = vport; 842 mbox->vport = vport;
843 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; 843 mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
844 mbox->context1 = NULL; 844 mbox->context1 = NULL;
845 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); 845 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
846 if (rc == MBX_NOT_FINISHED) 846 if (rc == MBX_NOT_FINISHED)
847 mempool_free(mbox, phba->mbox_mem_pool); 847 mempool_free(mbox, phba->mbox_mem_pool);
848 } 848 }
849 } 849 }
850 850
851 /** 851 /**
852 * lpfc_reg_vpi - Prepare a mailbox command for registering vport identifier 852 * lpfc_reg_vpi - Prepare a mailbox command for registering vport identifier
853 * @phba: pointer to lpfc hba data structure. 853 * @phba: pointer to lpfc hba data structure.
854 * @vpi: virtual N_Port identifier. 854 * @vpi: virtual N_Port identifier.
855 * @sid: Fibre Channel S_ID (N_Port_ID assigned to a virtual N_Port). 855 * @sid: Fibre Channel S_ID (N_Port_ID assigned to a virtual N_Port).
856 * @pmb: pointer to the driver internal queue element for mailbox command. 856 * @pmb: pointer to the driver internal queue element for mailbox command.
857 * 857 *
858 * The registration vport identifier mailbox command is used to activate a 858 * The registration vport identifier mailbox command is used to activate a
859 * virtual N_Port after it has acquired an N_Port_ID. The HBA validates the 859 * virtual N_Port after it has acquired an N_Port_ID. The HBA validates the
860 * N_Port_ID against the information in the selected virtual N_Port context 860 * N_Port_ID against the information in the selected virtual N_Port context
861 * block and marks it active to allow normal processing of IOCB commands and 861 * block and marks it active to allow normal processing of IOCB commands and
862 * received unsolicited exchanges. 862 * received unsolicited exchanges.
863 * 863 *
864 * This routine prepares the mailbox command for registering a virtual N_Port. 864 * This routine prepares the mailbox command for registering a virtual N_Port.
865 **/ 865 **/
866 void 866 void
867 lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb) 867 lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb)
868 { 868 {
869 MAILBOX_t *mb = &pmb->u.mb; 869 MAILBOX_t *mb = &pmb->u.mb;
870 struct lpfc_hba *phba = vport->phba; 870 struct lpfc_hba *phba = vport->phba;
871 871
872 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 872 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
873 /* 873 /*
874 * Set the re-reg VPI bit for f/w to update the MAC address. 874 * Set the re-reg VPI bit for f/w to update the MAC address.
875 */ 875 */
876 if ((phba->sli_rev == LPFC_SLI_REV4) && 876 if ((phba->sli_rev == LPFC_SLI_REV4) &&
877 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) 877 !(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI))
878 mb->un.varRegVpi.upd = 1; 878 mb->un.varRegVpi.upd = 1;
879 879
880 mb->un.varRegVpi.vpi = phba->vpi_ids[vport->vpi]; 880 mb->un.varRegVpi.vpi = phba->vpi_ids[vport->vpi];
881 mb->un.varRegVpi.sid = vport->fc_myDID; 881 mb->un.varRegVpi.sid = vport->fc_myDID;
882 if (phba->sli_rev == LPFC_SLI_REV4) 882 if (phba->sli_rev == LPFC_SLI_REV4)
883 mb->un.varRegVpi.vfi = phba->sli4_hba.vfi_ids[vport->vfi]; 883 mb->un.varRegVpi.vfi = phba->sli4_hba.vfi_ids[vport->vfi];
884 else 884 else
885 mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base; 885 mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base;
886 memcpy(mb->un.varRegVpi.wwn, &vport->fc_portname, 886 memcpy(mb->un.varRegVpi.wwn, &vport->fc_portname,
887 sizeof(struct lpfc_name)); 887 sizeof(struct lpfc_name));
888 mb->un.varRegVpi.wwn[0] = cpu_to_le32(mb->un.varRegVpi.wwn[0]); 888 mb->un.varRegVpi.wwn[0] = cpu_to_le32(mb->un.varRegVpi.wwn[0]);
889 mb->un.varRegVpi.wwn[1] = cpu_to_le32(mb->un.varRegVpi.wwn[1]); 889 mb->un.varRegVpi.wwn[1] = cpu_to_le32(mb->un.varRegVpi.wwn[1]);
890 890
891 mb->mbxCommand = MBX_REG_VPI; 891 mb->mbxCommand = MBX_REG_VPI;
892 mb->mbxOwner = OWN_HOST; 892 mb->mbxOwner = OWN_HOST;
893 return; 893 return;
894 894
895 } 895 }
896 896
897 /** 897 /**
898 * lpfc_unreg_vpi - Prepare a mailbox command for unregistering vport id 898 * lpfc_unreg_vpi - Prepare a mailbox command for unregistering vport id
899 * @phba: pointer to lpfc hba data structure. 899 * @phba: pointer to lpfc hba data structure.
900 * @vpi: virtual N_Port identifier. 900 * @vpi: virtual N_Port identifier.
901 * @pmb: pointer to the driver internal queue element for mailbox command. 901 * @pmb: pointer to the driver internal queue element for mailbox command.
902 * 902 *
903 * The unregistration vport identifier mailbox command is used to inactivate 903 * The unregistration vport identifier mailbox command is used to inactivate
904 * a virtual N_Port. The driver must have logged out and unregistered all 904 * a virtual N_Port. The driver must have logged out and unregistered all
905 * remote N_Ports to abort any activity on the virtual N_Port. The HBA will 905 * remote N_Ports to abort any activity on the virtual N_Port. The HBA will
906 * unregisters any default RPIs associated with the specified vpi, aborting 906 * unregisters any default RPIs associated with the specified vpi, aborting
907 * any active exchanges. The HBA will post the mailbox response after making 907 * any active exchanges. The HBA will post the mailbox response after making
908 * the virtual N_Port inactive. 908 * the virtual N_Port inactive.
909 * 909 *
910 * This routine prepares the mailbox command for unregistering a virtual 910 * This routine prepares the mailbox command for unregistering a virtual
911 * N_Port. 911 * N_Port.
912 **/ 912 **/
913 void 913 void
914 lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb) 914 lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
915 { 915 {
916 MAILBOX_t *mb = &pmb->u.mb; 916 MAILBOX_t *mb = &pmb->u.mb;
917 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 917 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
918 918
919 if (phba->sli_rev == LPFC_SLI_REV3) 919 if (phba->sli_rev == LPFC_SLI_REV3)
920 mb->un.varUnregVpi.vpi = phba->vpi_ids[vpi]; 920 mb->un.varUnregVpi.vpi = phba->vpi_ids[vpi];
921 else if (phba->sli_rev >= LPFC_SLI_REV4) 921 else if (phba->sli_rev >= LPFC_SLI_REV4)
922 mb->un.varUnregVpi.sli4_vpi = phba->vpi_ids[vpi]; 922 mb->un.varUnregVpi.sli4_vpi = phba->vpi_ids[vpi];
923 923
924 mb->mbxCommand = MBX_UNREG_VPI; 924 mb->mbxCommand = MBX_UNREG_VPI;
925 mb->mbxOwner = OWN_HOST; 925 mb->mbxOwner = OWN_HOST;
926 return; 926 return;
927 927
928 } 928 }
929 929
930 /** 930 /**
931 * lpfc_config_pcb_setup - Set up IOCB rings in the Port Control Block (PCB) 931 * lpfc_config_pcb_setup - Set up IOCB rings in the Port Control Block (PCB)
932 * @phba: pointer to lpfc hba data structure. 932 * @phba: pointer to lpfc hba data structure.
933 * 933 *
934 * This routine sets up and initializes the IOCB rings in the Port Control 934 * This routine sets up and initializes the IOCB rings in the Port Control
935 * Block (PCB). 935 * Block (PCB).
936 **/ 936 **/
937 static void 937 static void
938 lpfc_config_pcb_setup(struct lpfc_hba * phba) 938 lpfc_config_pcb_setup(struct lpfc_hba * phba)
939 { 939 {
940 struct lpfc_sli *psli = &phba->sli; 940 struct lpfc_sli *psli = &phba->sli;
941 struct lpfc_sli_ring *pring; 941 struct lpfc_sli_ring *pring;
942 PCB_t *pcbp = phba->pcb; 942 PCB_t *pcbp = phba->pcb;
943 dma_addr_t pdma_addr; 943 dma_addr_t pdma_addr;
944 uint32_t offset; 944 uint32_t offset;
945 uint32_t iocbCnt = 0; 945 uint32_t iocbCnt = 0;
946 int i; 946 int i;
947 947
948 pcbp->maxRing = (psli->num_rings - 1); 948 pcbp->maxRing = (psli->num_rings - 1);
949 949
950 for (i = 0; i < psli->num_rings; i++) { 950 for (i = 0; i < psli->num_rings; i++) {
951 pring = &psli->ring[i]; 951 pring = &psli->ring[i];
952 952
953 pring->sli.sli3.sizeCiocb = 953 pring->sli.sli3.sizeCiocb =
954 phba->sli_rev == 3 ? SLI3_IOCB_CMD_SIZE : 954 phba->sli_rev == 3 ? SLI3_IOCB_CMD_SIZE :
955 SLI2_IOCB_CMD_SIZE; 955 SLI2_IOCB_CMD_SIZE;
956 pring->sli.sli3.sizeRiocb = 956 pring->sli.sli3.sizeRiocb =
957 phba->sli_rev == 3 ? SLI3_IOCB_RSP_SIZE : 957 phba->sli_rev == 3 ? SLI3_IOCB_RSP_SIZE :
958 SLI2_IOCB_RSP_SIZE; 958 SLI2_IOCB_RSP_SIZE;
959 /* A ring MUST have both cmd and rsp entries defined to be 959 /* A ring MUST have both cmd and rsp entries defined to be
960 valid */ 960 valid */
961 if ((pring->sli.sli3.numCiocb == 0) || 961 if ((pring->sli.sli3.numCiocb == 0) ||
962 (pring->sli.sli3.numRiocb == 0)) { 962 (pring->sli.sli3.numRiocb == 0)) {
963 pcbp->rdsc[i].cmdEntries = 0; 963 pcbp->rdsc[i].cmdEntries = 0;
964 pcbp->rdsc[i].rspEntries = 0; 964 pcbp->rdsc[i].rspEntries = 0;
965 pcbp->rdsc[i].cmdAddrHigh = 0; 965 pcbp->rdsc[i].cmdAddrHigh = 0;
966 pcbp->rdsc[i].rspAddrHigh = 0; 966 pcbp->rdsc[i].rspAddrHigh = 0;
967 pcbp->rdsc[i].cmdAddrLow = 0; 967 pcbp->rdsc[i].cmdAddrLow = 0;
968 pcbp->rdsc[i].rspAddrLow = 0; 968 pcbp->rdsc[i].rspAddrLow = 0;
969 pring->sli.sli3.cmdringaddr = NULL; 969 pring->sli.sli3.cmdringaddr = NULL;
970 pring->sli.sli3.rspringaddr = NULL; 970 pring->sli.sli3.rspringaddr = NULL;
971 continue; 971 continue;
972 } 972 }
973 /* Command ring setup for ring */ 973 /* Command ring setup for ring */
974 pring->sli.sli3.cmdringaddr = (void *)&phba->IOCBs[iocbCnt]; 974 pring->sli.sli3.cmdringaddr = (void *)&phba->IOCBs[iocbCnt];
975 pcbp->rdsc[i].cmdEntries = pring->sli.sli3.numCiocb; 975 pcbp->rdsc[i].cmdEntries = pring->sli.sli3.numCiocb;
976 976
977 offset = (uint8_t *) &phba->IOCBs[iocbCnt] - 977 offset = (uint8_t *) &phba->IOCBs[iocbCnt] -
978 (uint8_t *) phba->slim2p.virt; 978 (uint8_t *) phba->slim2p.virt;
979 pdma_addr = phba->slim2p.phys + offset; 979 pdma_addr = phba->slim2p.phys + offset;
980 pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr); 980 pcbp->rdsc[i].cmdAddrHigh = putPaddrHigh(pdma_addr);
981 pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr); 981 pcbp->rdsc[i].cmdAddrLow = putPaddrLow(pdma_addr);
982 iocbCnt += pring->sli.sli3.numCiocb; 982 iocbCnt += pring->sli.sli3.numCiocb;
983 983
984 /* Response ring setup for ring */ 984 /* Response ring setup for ring */
985 pring->sli.sli3.rspringaddr = (void *) &phba->IOCBs[iocbCnt]; 985 pring->sli.sli3.rspringaddr = (void *) &phba->IOCBs[iocbCnt];
986 986
987 pcbp->rdsc[i].rspEntries = pring->sli.sli3.numRiocb; 987 pcbp->rdsc[i].rspEntries = pring->sli.sli3.numRiocb;
988 offset = (uint8_t *)&phba->IOCBs[iocbCnt] - 988 offset = (uint8_t *)&phba->IOCBs[iocbCnt] -
989 (uint8_t *)phba->slim2p.virt; 989 (uint8_t *)phba->slim2p.virt;
990 pdma_addr = phba->slim2p.phys + offset; 990 pdma_addr = phba->slim2p.phys + offset;
991 pcbp->rdsc[i].rspAddrHigh = putPaddrHigh(pdma_addr); 991 pcbp->rdsc[i].rspAddrHigh = putPaddrHigh(pdma_addr);
992 pcbp->rdsc[i].rspAddrLow = putPaddrLow(pdma_addr); 992 pcbp->rdsc[i].rspAddrLow = putPaddrLow(pdma_addr);
993 iocbCnt += pring->sli.sli3.numRiocb; 993 iocbCnt += pring->sli.sli3.numRiocb;
994 } 994 }
995 } 995 }
996 996
997 /** 997 /**
998 * lpfc_read_rev - Prepare a mailbox command for reading HBA revision 998 * lpfc_read_rev - Prepare a mailbox command for reading HBA revision
999 * @phba: pointer to lpfc hba data structure. 999 * @phba: pointer to lpfc hba data structure.
1000 * @pmb: pointer to the driver internal queue element for mailbox command. 1000 * @pmb: pointer to the driver internal queue element for mailbox command.
1001 * 1001 *
1002 * The read revision mailbox command is used to read the revision levels of 1002 * The read revision mailbox command is used to read the revision levels of
1003 * the HBA components. These components include hardware units, resident 1003 * the HBA components. These components include hardware units, resident
1004 * firmware, and available firmware. HBAs that supports SLI-3 mode of 1004 * firmware, and available firmware. HBAs that supports SLI-3 mode of
1005 * operation provide different response information depending on the version 1005 * operation provide different response information depending on the version
1006 * requested by the driver. 1006 * requested by the driver.
1007 * 1007 *
1008 * This routine prepares the mailbox command for reading HBA revision 1008 * This routine prepares the mailbox command for reading HBA revision
1009 * information. 1009 * information.
1010 **/ 1010 **/
1011 void 1011 void
1012 lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 1012 lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
1013 { 1013 {
1014 MAILBOX_t *mb = &pmb->u.mb; 1014 MAILBOX_t *mb = &pmb->u.mb;
1015 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 1015 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
1016 mb->un.varRdRev.cv = 1; 1016 mb->un.varRdRev.cv = 1;
1017 mb->un.varRdRev.v3req = 1; /* Request SLI3 info */ 1017 mb->un.varRdRev.v3req = 1; /* Request SLI3 info */
1018 mb->mbxCommand = MBX_READ_REV; 1018 mb->mbxCommand = MBX_READ_REV;
1019 mb->mbxOwner = OWN_HOST; 1019 mb->mbxOwner = OWN_HOST;
1020 return; 1020 return;
1021 } 1021 }
1022 1022
1023 void 1023 void
1024 lpfc_sli4_swap_str(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1024 lpfc_sli4_swap_str(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1025 { 1025 {
1026 MAILBOX_t *mb = &pmb->u.mb; 1026 MAILBOX_t *mb = &pmb->u.mb;
1027 struct lpfc_mqe *mqe; 1027 struct lpfc_mqe *mqe;
1028 1028
1029 switch (mb->mbxCommand) { 1029 switch (mb->mbxCommand) {
1030 case MBX_READ_REV: 1030 case MBX_READ_REV:
1031 mqe = &pmb->u.mqe; 1031 mqe = &pmb->u.mqe;
1032 lpfc_sli_pcimem_bcopy(mqe->un.read_rev.fw_name, 1032 lpfc_sli_pcimem_bcopy(mqe->un.read_rev.fw_name,
1033 mqe->un.read_rev.fw_name, 16); 1033 mqe->un.read_rev.fw_name, 16);
1034 lpfc_sli_pcimem_bcopy(mqe->un.read_rev.ulp_fw_name, 1034 lpfc_sli_pcimem_bcopy(mqe->un.read_rev.ulp_fw_name,
1035 mqe->un.read_rev.ulp_fw_name, 16); 1035 mqe->un.read_rev.ulp_fw_name, 16);
1036 break; 1036 break;
1037 default: 1037 default:
1038 break; 1038 break;
1039 } 1039 }
1040 return; 1040 return;
1041 } 1041 }
1042 1042
1043 /** 1043 /**
1044 * lpfc_build_hbq_profile2 - Set up the HBQ Selection Profile 2 1044 * lpfc_build_hbq_profile2 - Set up the HBQ Selection Profile 2
1045 * @hbqmb: pointer to the HBQ configuration data structure in mailbox command. 1045 * @hbqmb: pointer to the HBQ configuration data structure in mailbox command.
1046 * @hbq_desc: pointer to the HBQ selection profile descriptor. 1046 * @hbq_desc: pointer to the HBQ selection profile descriptor.
1047 * 1047 *
1048 * The Host Buffer Queue (HBQ) Selection Profile 2 specifies that the HBA 1048 * The Host Buffer Queue (HBQ) Selection Profile 2 specifies that the HBA
1049 * tests the incoming frames' R_CTL/TYPE fields with works 10:15 and performs 1049 * tests the incoming frames' R_CTL/TYPE fields with works 10:15 and performs
1050 * the Sequence Length Test using the fields in the Selection Profile 2 1050 * the Sequence Length Test using the fields in the Selection Profile 2
1051 * extension in words 20:31. 1051 * extension in words 20:31.
1052 **/ 1052 **/
1053 static void 1053 static void
1054 lpfc_build_hbq_profile2(struct config_hbq_var *hbqmb, 1054 lpfc_build_hbq_profile2(struct config_hbq_var *hbqmb,
1055 struct lpfc_hbq_init *hbq_desc) 1055 struct lpfc_hbq_init *hbq_desc)
1056 { 1056 {
1057 hbqmb->profiles.profile2.seqlenbcnt = hbq_desc->seqlenbcnt; 1057 hbqmb->profiles.profile2.seqlenbcnt = hbq_desc->seqlenbcnt;
1058 hbqmb->profiles.profile2.maxlen = hbq_desc->maxlen; 1058 hbqmb->profiles.profile2.maxlen = hbq_desc->maxlen;
1059 hbqmb->profiles.profile2.seqlenoff = hbq_desc->seqlenoff; 1059 hbqmb->profiles.profile2.seqlenoff = hbq_desc->seqlenoff;
1060 } 1060 }
1061 1061
1062 /** 1062 /**
1063 * lpfc_build_hbq_profile3 - Set up the HBQ Selection Profile 3 1063 * lpfc_build_hbq_profile3 - Set up the HBQ Selection Profile 3
1064 * @hbqmb: pointer to the HBQ configuration data structure in mailbox command. 1064 * @hbqmb: pointer to the HBQ configuration data structure in mailbox command.
1065 * @hbq_desc: pointer to the HBQ selection profile descriptor. 1065 * @hbq_desc: pointer to the HBQ selection profile descriptor.
1066 * 1066 *
1067 * The Host Buffer Queue (HBQ) Selection Profile 3 specifies that the HBA 1067 * The Host Buffer Queue (HBQ) Selection Profile 3 specifies that the HBA
1068 * tests the incoming frame's R_CTL/TYPE fields with words 10:15 and performs 1068 * tests the incoming frame's R_CTL/TYPE fields with words 10:15 and performs
1069 * the Sequence Length Test and Byte Field Test using the fields in the 1069 * the Sequence Length Test and Byte Field Test using the fields in the
1070 * Selection Profile 3 extension in words 20:31. 1070 * Selection Profile 3 extension in words 20:31.
1071 **/ 1071 **/
1072 static void 1072 static void
1073 lpfc_build_hbq_profile3(struct config_hbq_var *hbqmb, 1073 lpfc_build_hbq_profile3(struct config_hbq_var *hbqmb,
1074 struct lpfc_hbq_init *hbq_desc) 1074 struct lpfc_hbq_init *hbq_desc)
1075 { 1075 {
1076 hbqmb->profiles.profile3.seqlenbcnt = hbq_desc->seqlenbcnt; 1076 hbqmb->profiles.profile3.seqlenbcnt = hbq_desc->seqlenbcnt;
1077 hbqmb->profiles.profile3.maxlen = hbq_desc->maxlen; 1077 hbqmb->profiles.profile3.maxlen = hbq_desc->maxlen;
1078 hbqmb->profiles.profile3.cmdcodeoff = hbq_desc->cmdcodeoff; 1078 hbqmb->profiles.profile3.cmdcodeoff = hbq_desc->cmdcodeoff;
1079 hbqmb->profiles.profile3.seqlenoff = hbq_desc->seqlenoff; 1079 hbqmb->profiles.profile3.seqlenoff = hbq_desc->seqlenoff;
1080 memcpy(&hbqmb->profiles.profile3.cmdmatch, hbq_desc->cmdmatch, 1080 memcpy(&hbqmb->profiles.profile3.cmdmatch, hbq_desc->cmdmatch,
1081 sizeof(hbqmb->profiles.profile3.cmdmatch)); 1081 sizeof(hbqmb->profiles.profile3.cmdmatch));
1082 } 1082 }
1083 1083
1084 /** 1084 /**
1085 * lpfc_build_hbq_profile5 - Set up the HBQ Selection Profile 5 1085 * lpfc_build_hbq_profile5 - Set up the HBQ Selection Profile 5
1086 * @hbqmb: pointer to the HBQ configuration data structure in mailbox command. 1086 * @hbqmb: pointer to the HBQ configuration data structure in mailbox command.
1087 * @hbq_desc: pointer to the HBQ selection profile descriptor. 1087 * @hbq_desc: pointer to the HBQ selection profile descriptor.
1088 * 1088 *
1089 * The Host Buffer Queue (HBQ) Selection Profile 5 specifies a header HBQ. The 1089 * The Host Buffer Queue (HBQ) Selection Profile 5 specifies a header HBQ. The
1090 * HBA tests the initial frame of an incoming sequence using the frame's 1090 * HBA tests the initial frame of an incoming sequence using the frame's
1091 * R_CTL/TYPE fields with words 10:15 and performs the Sequence Length Test 1091 * R_CTL/TYPE fields with words 10:15 and performs the Sequence Length Test
1092 * and Byte Field Test using the fields in the Selection Profile 5 extension 1092 * and Byte Field Test using the fields in the Selection Profile 5 extension
1093 * words 20:31. 1093 * words 20:31.
1094 **/ 1094 **/
1095 static void 1095 static void
1096 lpfc_build_hbq_profile5(struct config_hbq_var *hbqmb, 1096 lpfc_build_hbq_profile5(struct config_hbq_var *hbqmb,
1097 struct lpfc_hbq_init *hbq_desc) 1097 struct lpfc_hbq_init *hbq_desc)
1098 { 1098 {
1099 hbqmb->profiles.profile5.seqlenbcnt = hbq_desc->seqlenbcnt; 1099 hbqmb->profiles.profile5.seqlenbcnt = hbq_desc->seqlenbcnt;
1100 hbqmb->profiles.profile5.maxlen = hbq_desc->maxlen; 1100 hbqmb->profiles.profile5.maxlen = hbq_desc->maxlen;
1101 hbqmb->profiles.profile5.cmdcodeoff = hbq_desc->cmdcodeoff; 1101 hbqmb->profiles.profile5.cmdcodeoff = hbq_desc->cmdcodeoff;
1102 hbqmb->profiles.profile5.seqlenoff = hbq_desc->seqlenoff; 1102 hbqmb->profiles.profile5.seqlenoff = hbq_desc->seqlenoff;
1103 memcpy(&hbqmb->profiles.profile5.cmdmatch, hbq_desc->cmdmatch, 1103 memcpy(&hbqmb->profiles.profile5.cmdmatch, hbq_desc->cmdmatch,
1104 sizeof(hbqmb->profiles.profile5.cmdmatch)); 1104 sizeof(hbqmb->profiles.profile5.cmdmatch));
1105 } 1105 }
1106 1106
1107 /** 1107 /**
1108 * lpfc_config_hbq - Prepare a mailbox command for configuring an HBQ 1108 * lpfc_config_hbq - Prepare a mailbox command for configuring an HBQ
1109 * @phba: pointer to lpfc hba data structure. 1109 * @phba: pointer to lpfc hba data structure.
1110 * @id: HBQ identifier. 1110 * @id: HBQ identifier.
1111 * @hbq_desc: pointer to the HBA descriptor data structure. 1111 * @hbq_desc: pointer to the HBA descriptor data structure.
1112 * @hbq_entry_index: index of the HBQ entry data structures. 1112 * @hbq_entry_index: index of the HBQ entry data structures.
1113 * @pmb: pointer to the driver internal queue element for mailbox command. 1113 * @pmb: pointer to the driver internal queue element for mailbox command.
1114 * 1114 *
1115 * The configure HBQ (Host Buffer Queue) mailbox command is used to configure 1115 * The configure HBQ (Host Buffer Queue) mailbox command is used to configure
1116 * an HBQ. The configuration binds events that require buffers to a particular 1116 * an HBQ. The configuration binds events that require buffers to a particular
1117 * ring and HBQ based on a selection profile. 1117 * ring and HBQ based on a selection profile.
1118 * 1118 *
1119 * This routine prepares the mailbox command for configuring an HBQ. 1119 * This routine prepares the mailbox command for configuring an HBQ.
1120 **/ 1120 **/
1121 void 1121 void
1122 lpfc_config_hbq(struct lpfc_hba *phba, uint32_t id, 1122 lpfc_config_hbq(struct lpfc_hba *phba, uint32_t id,
1123 struct lpfc_hbq_init *hbq_desc, 1123 struct lpfc_hbq_init *hbq_desc,
1124 uint32_t hbq_entry_index, LPFC_MBOXQ_t *pmb) 1124 uint32_t hbq_entry_index, LPFC_MBOXQ_t *pmb)
1125 { 1125 {
1126 int i; 1126 int i;
1127 MAILBOX_t *mb = &pmb->u.mb; 1127 MAILBOX_t *mb = &pmb->u.mb;
1128 struct config_hbq_var *hbqmb = &mb->un.varCfgHbq; 1128 struct config_hbq_var *hbqmb = &mb->un.varCfgHbq;
1129 1129
1130 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 1130 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
1131 hbqmb->hbqId = id; 1131 hbqmb->hbqId = id;
1132 hbqmb->entry_count = hbq_desc->entry_count; /* # entries in HBQ */ 1132 hbqmb->entry_count = hbq_desc->entry_count; /* # entries in HBQ */
1133 hbqmb->recvNotify = hbq_desc->rn; /* Receive 1133 hbqmb->recvNotify = hbq_desc->rn; /* Receive
1134 * Notification */ 1134 * Notification */
1135 hbqmb->numMask = hbq_desc->mask_count; /* # R_CTL/TYPE masks 1135 hbqmb->numMask = hbq_desc->mask_count; /* # R_CTL/TYPE masks
1136 * # in words 0-19 */ 1136 * # in words 0-19 */
1137 hbqmb->profile = hbq_desc->profile; /* Selection profile: 1137 hbqmb->profile = hbq_desc->profile; /* Selection profile:
1138 * 0 = all, 1138 * 0 = all,
1139 * 7 = logentry */ 1139 * 7 = logentry */
1140 hbqmb->ringMask = hbq_desc->ring_mask; /* Binds HBQ to a ring 1140 hbqmb->ringMask = hbq_desc->ring_mask; /* Binds HBQ to a ring
1141 * e.g. Ring0=b0001, 1141 * e.g. Ring0=b0001,
1142 * ring2=b0100 */ 1142 * ring2=b0100 */
1143 hbqmb->headerLen = hbq_desc->headerLen; /* 0 if not profile 4 1143 hbqmb->headerLen = hbq_desc->headerLen; /* 0 if not profile 4
1144 * or 5 */ 1144 * or 5 */
1145 hbqmb->logEntry = hbq_desc->logEntry; /* Set to 1 if this 1145 hbqmb->logEntry = hbq_desc->logEntry; /* Set to 1 if this
1146 * HBQ will be used 1146 * HBQ will be used
1147 * for LogEntry 1147 * for LogEntry
1148 * buffers */ 1148 * buffers */
1149 hbqmb->hbqaddrLow = putPaddrLow(phba->hbqslimp.phys) + 1149 hbqmb->hbqaddrLow = putPaddrLow(phba->hbqslimp.phys) +
1150 hbq_entry_index * sizeof(struct lpfc_hbq_entry); 1150 hbq_entry_index * sizeof(struct lpfc_hbq_entry);
1151 hbqmb->hbqaddrHigh = putPaddrHigh(phba->hbqslimp.phys); 1151 hbqmb->hbqaddrHigh = putPaddrHigh(phba->hbqslimp.phys);
1152 1152
1153 mb->mbxCommand = MBX_CONFIG_HBQ; 1153 mb->mbxCommand = MBX_CONFIG_HBQ;
1154 mb->mbxOwner = OWN_HOST; 1154 mb->mbxOwner = OWN_HOST;
1155 1155
1156 /* Copy info for profiles 2,3,5. Other 1156 /* Copy info for profiles 2,3,5. Other
1157 * profiles this area is reserved 1157 * profiles this area is reserved
1158 */ 1158 */
1159 if (hbq_desc->profile == 2) 1159 if (hbq_desc->profile == 2)
1160 lpfc_build_hbq_profile2(hbqmb, hbq_desc); 1160 lpfc_build_hbq_profile2(hbqmb, hbq_desc);
1161 else if (hbq_desc->profile == 3) 1161 else if (hbq_desc->profile == 3)
1162 lpfc_build_hbq_profile3(hbqmb, hbq_desc); 1162 lpfc_build_hbq_profile3(hbqmb, hbq_desc);
1163 else if (hbq_desc->profile == 5) 1163 else if (hbq_desc->profile == 5)
1164 lpfc_build_hbq_profile5(hbqmb, hbq_desc); 1164 lpfc_build_hbq_profile5(hbqmb, hbq_desc);
1165 1165
1166 /* Return if no rctl / type masks for this HBQ */ 1166 /* Return if no rctl / type masks for this HBQ */
1167 if (!hbq_desc->mask_count) 1167 if (!hbq_desc->mask_count)
1168 return; 1168 return;
1169 1169
1170 /* Otherwise we setup specific rctl / type masks for this HBQ */ 1170 /* Otherwise we setup specific rctl / type masks for this HBQ */
1171 for (i = 0; i < hbq_desc->mask_count; i++) { 1171 for (i = 0; i < hbq_desc->mask_count; i++) {
1172 hbqmb->hbqMasks[i].tmatch = hbq_desc->hbqMasks[i].tmatch; 1172 hbqmb->hbqMasks[i].tmatch = hbq_desc->hbqMasks[i].tmatch;
1173 hbqmb->hbqMasks[i].tmask = hbq_desc->hbqMasks[i].tmask; 1173 hbqmb->hbqMasks[i].tmask = hbq_desc->hbqMasks[i].tmask;
1174 hbqmb->hbqMasks[i].rctlmatch = hbq_desc->hbqMasks[i].rctlmatch; 1174 hbqmb->hbqMasks[i].rctlmatch = hbq_desc->hbqMasks[i].rctlmatch;
1175 hbqmb->hbqMasks[i].rctlmask = hbq_desc->hbqMasks[i].rctlmask; 1175 hbqmb->hbqMasks[i].rctlmask = hbq_desc->hbqMasks[i].rctlmask;
1176 } 1176 }
1177 1177
1178 return; 1178 return;
1179 } 1179 }
1180 1180
1181 /** 1181 /**
1182 * lpfc_config_ring - Prepare a mailbox command for configuring an IOCB ring 1182 * lpfc_config_ring - Prepare a mailbox command for configuring an IOCB ring
1183 * @phba: pointer to lpfc hba data structure. 1183 * @phba: pointer to lpfc hba data structure.
1184 * @ring: 1184 * @ring:
1185 * @pmb: pointer to the driver internal queue element for mailbox command. 1185 * @pmb: pointer to the driver internal queue element for mailbox command.
1186 * 1186 *
1187 * The configure ring mailbox command is used to configure an IOCB ring. This 1187 * The configure ring mailbox command is used to configure an IOCB ring. This
1188 * configuration binds from one to six of HBA RC_CTL/TYPE mask entries to the 1188 * configuration binds from one to six of HBA RC_CTL/TYPE mask entries to the
1189 * ring. This is used to map incoming sequences to a particular ring whose 1189 * ring. This is used to map incoming sequences to a particular ring whose
1190 * RC_CTL/TYPE mask entry matches that of the sequence. The driver should not 1190 * RC_CTL/TYPE mask entry matches that of the sequence. The driver should not
1191 * attempt to configure a ring whose number is greater than the number 1191 * attempt to configure a ring whose number is greater than the number
1192 * specified in the Port Control Block (PCB). It is an error to issue the 1192 * specified in the Port Control Block (PCB). It is an error to issue the
1193 * configure ring command more than once with the same ring number. The HBA 1193 * configure ring command more than once with the same ring number. The HBA
1194 * returns an error if the driver attempts this. 1194 * returns an error if the driver attempts this.
1195 * 1195 *
1196 * This routine prepares the mailbox command for configuring IOCB ring. 1196 * This routine prepares the mailbox command for configuring IOCB ring.
1197 **/ 1197 **/
1198 void 1198 void
1199 lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb) 1199 lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
1200 { 1200 {
1201 int i; 1201 int i;
1202 MAILBOX_t *mb = &pmb->u.mb; 1202 MAILBOX_t *mb = &pmb->u.mb;
1203 struct lpfc_sli *psli; 1203 struct lpfc_sli *psli;
1204 struct lpfc_sli_ring *pring; 1204 struct lpfc_sli_ring *pring;
1205 1205
1206 memset(pmb, 0, sizeof (LPFC_MBOXQ_t)); 1206 memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
1207 1207
1208 mb->un.varCfgRing.ring = ring; 1208 mb->un.varCfgRing.ring = ring;
1209 mb->un.varCfgRing.maxOrigXchg = 0; 1209 mb->un.varCfgRing.maxOrigXchg = 0;
1210 mb->un.varCfgRing.maxRespXchg = 0; 1210 mb->un.varCfgRing.maxRespXchg = 0;
1211 mb->un.varCfgRing.recvNotify = 1; 1211 mb->un.varCfgRing.recvNotify = 1;
1212 1212
1213 psli = &phba->sli; 1213 psli = &phba->sli;
1214 pring = &psli->ring[ring]; 1214 pring = &psli->ring[ring];
1215 mb->un.varCfgRing.numMask = pring->num_mask; 1215 mb->un.varCfgRing.numMask = pring->num_mask;
1216 mb->mbxCommand = MBX_CONFIG_RING; 1216 mb->mbxCommand = MBX_CONFIG_RING;
1217 mb->mbxOwner = OWN_HOST; 1217 mb->mbxOwner = OWN_HOST;
1218 1218
1219 /* Is this ring configured for a specific profile */ 1219 /* Is this ring configured for a specific profile */
1220 if (pring->prt[0].profile) { 1220 if (pring->prt[0].profile) {
1221 mb->un.varCfgRing.profile = pring->prt[0].profile; 1221 mb->un.varCfgRing.profile = pring->prt[0].profile;
1222 return; 1222 return;
1223 } 1223 }
1224 1224
1225 /* Otherwise we setup specific rctl / type masks for this ring */ 1225 /* Otherwise we setup specific rctl / type masks for this ring */
1226 for (i = 0; i < pring->num_mask; i++) { 1226 for (i = 0; i < pring->num_mask; i++) {
1227 mb->un.varCfgRing.rrRegs[i].rval = pring->prt[i].rctl; 1227 mb->un.varCfgRing.rrRegs[i].rval = pring->prt[i].rctl;
1228 if (mb->un.varCfgRing.rrRegs[i].rval != FC_RCTL_ELS_REQ) 1228 if (mb->un.varCfgRing.rrRegs[i].rval != FC_RCTL_ELS_REQ)
1229 mb->un.varCfgRing.rrRegs[i].rmask = 0xff; 1229 mb->un.varCfgRing.rrRegs[i].rmask = 0xff;
1230 else 1230 else
1231 mb->un.varCfgRing.rrRegs[i].rmask = 0xfe; 1231 mb->un.varCfgRing.rrRegs[i].rmask = 0xfe;
1232 mb->un.varCfgRing.rrRegs[i].tval = pring->prt[i].type; 1232 mb->un.varCfgRing.rrRegs[i].tval = pring->prt[i].type;
1233 mb->un.varCfgRing.rrRegs[i].tmask = 0xff; 1233 mb->un.varCfgRing.rrRegs[i].tmask = 0xff;
1234 } 1234 }
1235 1235
1236 return; 1236 return;
1237 } 1237 }
1238 1238
1239 /** 1239 /**
1240 * lpfc_config_port - Prepare a mailbox command for configuring port 1240 * lpfc_config_port - Prepare a mailbox command for configuring port
1241 * @phba: pointer to lpfc hba data structure. 1241 * @phba: pointer to lpfc hba data structure.
1242 * @pmb: pointer to the driver internal queue element for mailbox command. 1242 * @pmb: pointer to the driver internal queue element for mailbox command.
1243 * 1243 *
1244 * The configure port mailbox command is used to identify the Port Control 1244 * The configure port mailbox command is used to identify the Port Control
1245 * Block (PCB) in the driver memory. After this command is issued, the 1245 * Block (PCB) in the driver memory. After this command is issued, the
1246 * driver must not access the mailbox in the HBA without first resetting 1246 * driver must not access the mailbox in the HBA without first resetting
1247 * the HBA. The HBA may copy the PCB information to internal storage for 1247 * the HBA. The HBA may copy the PCB information to internal storage for
1248 * subsequent use; the driver can not change the PCB information unless it 1248 * subsequent use; the driver can not change the PCB information unless it
1249 * resets the HBA. 1249 * resets the HBA.
1250 * 1250 *
1251 * This routine prepares the mailbox command for configuring port. 1251 * This routine prepares the mailbox command for configuring port.
1252 **/ 1252 **/
1253 void 1253 void
1254 lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) 1254 lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
1255 { 1255 {
1256 MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr; 1256 MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr;
1257 MAILBOX_t *mb = &pmb->u.mb; 1257 MAILBOX_t *mb = &pmb->u.mb;
1258 dma_addr_t pdma_addr; 1258 dma_addr_t pdma_addr;
1259 uint32_t bar_low, bar_high; 1259 uint32_t bar_low, bar_high;
1260 size_t offset; 1260 size_t offset;
1261 struct lpfc_hgp hgp; 1261 struct lpfc_hgp hgp;
1262 int i; 1262 int i;
1263 uint32_t pgp_offset; 1263 uint32_t pgp_offset;
1264 1264
1265 memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); 1265 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
1266 mb->mbxCommand = MBX_CONFIG_PORT; 1266 mb->mbxCommand = MBX_CONFIG_PORT;
1267 mb->mbxOwner = OWN_HOST; 1267 mb->mbxOwner = OWN_HOST;
1268 1268
1269 mb->un.varCfgPort.pcbLen = sizeof(PCB_t); 1269 mb->un.varCfgPort.pcbLen = sizeof(PCB_t);
1270 1270
1271 offset = (uint8_t *)phba->pcb - (uint8_t *)phba->slim2p.virt; 1271 offset = (uint8_t *)phba->pcb - (uint8_t *)phba->slim2p.virt;
1272 pdma_addr = phba->slim2p.phys + offset; 1272 pdma_addr = phba->slim2p.phys + offset;
1273 mb->un.varCfgPort.pcbLow = putPaddrLow(pdma_addr); 1273 mb->un.varCfgPort.pcbLow = putPaddrLow(pdma_addr);
1274 mb->un.varCfgPort.pcbHigh = putPaddrHigh(pdma_addr); 1274 mb->un.varCfgPort.pcbHigh = putPaddrHigh(pdma_addr);
1275 1275
1276 /* Always Host Group Pointer is in SLIM */ 1276 /* Always Host Group Pointer is in SLIM */
1277 mb->un.varCfgPort.hps = 1; 1277 mb->un.varCfgPort.hps = 1;
1278 1278
1279 /* If HBA supports SLI=3 ask for it */ 1279 /* If HBA supports SLI=3 ask for it */
1280 1280
1281 if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) { 1281 if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) {
1282 if (phba->cfg_enable_bg) 1282 if (phba->cfg_enable_bg)
1283 mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */ 1283 mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */
1284 if (phba->cfg_enable_dss) 1284 if (phba->cfg_enable_dss)
1285 mb->un.varCfgPort.cdss = 1; /* Configure Security */ 1285 mb->un.varCfgPort.cdss = 1; /* Configure Security */
1286 mb->un.varCfgPort.cerbm = 1; /* Request HBQs */ 1286 mb->un.varCfgPort.cerbm = 1; /* Request HBQs */
1287 mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */ 1287 mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */
1288 mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count(); 1288 mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count();
1289 if (phba->max_vpi && phba->cfg_enable_npiv && 1289 if (phba->max_vpi && phba->cfg_enable_npiv &&
1290 phba->vpd.sli3Feat.cmv) { 1290 phba->vpd.sli3Feat.cmv) {
1291 mb->un.varCfgPort.max_vpi = LPFC_MAX_VPI; 1291 mb->un.varCfgPort.max_vpi = LPFC_MAX_VPI;
1292 mb->un.varCfgPort.cmv = 1; 1292 mb->un.varCfgPort.cmv = 1;
1293 } else 1293 } else
1294 mb->un.varCfgPort.max_vpi = phba->max_vpi = 0; 1294 mb->un.varCfgPort.max_vpi = phba->max_vpi = 0;
1295 } else 1295 } else
1296 phba->sli_rev = LPFC_SLI_REV2; 1296 phba->sli_rev = LPFC_SLI_REV2;
1297 mb->un.varCfgPort.sli_mode = phba->sli_rev; 1297 mb->un.varCfgPort.sli_mode = phba->sli_rev;
1298 1298
1299 /* If this is an SLI3 port, configure async status notification. */ 1299 /* If this is an SLI3 port, configure async status notification. */
1300 if (phba->sli_rev == LPFC_SLI_REV3) 1300 if (phba->sli_rev == LPFC_SLI_REV3)
1301 mb->un.varCfgPort.casabt = 1; 1301 mb->un.varCfgPort.casabt = 1;
1302 1302
1303 /* Now setup pcb */ 1303 /* Now setup pcb */
1304 phba->pcb->type = TYPE_NATIVE_SLI2; 1304 phba->pcb->type = TYPE_NATIVE_SLI2;
1305 phba->pcb->feature = FEATURE_INITIAL_SLI2; 1305 phba->pcb->feature = FEATURE_INITIAL_SLI2;
1306 1306
1307 /* Setup Mailbox pointers */ 1307 /* Setup Mailbox pointers */
1308 phba->pcb->mailBoxSize = sizeof(MAILBOX_t) + MAILBOX_EXT_SIZE; 1308 phba->pcb->mailBoxSize = sizeof(MAILBOX_t) + MAILBOX_EXT_SIZE;
1309 offset = (uint8_t *)phba->mbox - (uint8_t *)phba->slim2p.virt; 1309 offset = (uint8_t *)phba->mbox - (uint8_t *)phba->slim2p.virt;
1310 pdma_addr = phba->slim2p.phys + offset; 1310 pdma_addr = phba->slim2p.phys + offset;
1311 phba->pcb->mbAddrHigh = putPaddrHigh(pdma_addr); 1311 phba->pcb->mbAddrHigh = putPaddrHigh(pdma_addr);
1312 phba->pcb->mbAddrLow = putPaddrLow(pdma_addr); 1312 phba->pcb->mbAddrLow = putPaddrLow(pdma_addr);
1313 1313
1314 /* 1314 /*
1315 * Setup Host Group ring pointer. 1315 * Setup Host Group ring pointer.
1316 * 1316 *
1317 * For efficiency reasons, the ring get/put pointers can be 1317 * For efficiency reasons, the ring get/put pointers can be
1318 * placed in adapter memory (SLIM) rather than in host memory. 1318 * placed in adapter memory (SLIM) rather than in host memory.
1319 * This allows firmware to avoid PCI reads/writes when updating 1319 * This allows firmware to avoid PCI reads/writes when updating
1320 * and checking pointers. 1320 * and checking pointers.
1321 * 1321 *
1322 * The firmware recognizes the use of SLIM memory by comparing 1322 * The firmware recognizes the use of SLIM memory by comparing
1323 * the address of the get/put pointers structure with that of 1323 * the address of the get/put pointers structure with that of
1324 * the SLIM BAR (BAR0). 1324 * the SLIM BAR (BAR0).
1325 * 1325 *
1326 * Caution: be sure to use the PCI config space value of BAR0/BAR1 1326 * Caution: be sure to use the PCI config space value of BAR0/BAR1
1327 * (the hardware's view of the base address), not the OS's 1327 * (the hardware's view of the base address), not the OS's
1328 * value of pci_resource_start() as the OS value may be a cookie 1328 * value of pci_resource_start() as the OS value may be a cookie
1329 * for ioremap/iomap. 1329 * for ioremap/iomap.
1330 */ 1330 */
1331 1331
1332 1332
1333 pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_0, &bar_low); 1333 pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_0, &bar_low);
1334 pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_1, &bar_high); 1334 pci_read_config_dword(phba->pcidev, PCI_BASE_ADDRESS_1, &bar_high);
1335 1335
1336 /* 1336 /*
1337 * Set up HGP - Port Memory 1337 * Set up HGP - Port Memory
1338 * 1338 *
1339 * The port expects the host get/put pointers to reside in memory 1339 * The port expects the host get/put pointers to reside in memory
1340 * following the "non-diagnostic" mode mailbox (32 words, 0x80 bytes) 1340 * following the "non-diagnostic" mode mailbox (32 words, 0x80 bytes)
1341 * area of SLIM. In SLI-2 mode, there's an additional 16 reserved 1341 * area of SLIM. In SLI-2 mode, there's an additional 16 reserved
1342 * words (0x40 bytes). This area is not reserved if HBQs are 1342 * words (0x40 bytes). This area is not reserved if HBQs are
1343 * configured in SLI-3. 1343 * configured in SLI-3.
1344 * 1344 *
1345 * CR0Put - SLI2(no HBQs) = 0xc0, With HBQs = 0x80 1345 * CR0Put - SLI2(no HBQs) = 0xc0, With HBQs = 0x80
1346 * RR0Get 0xc4 0x84 1346 * RR0Get 0xc4 0x84
1347 * CR1Put 0xc8 0x88 1347 * CR1Put 0xc8 0x88
1348 * RR1Get 0xcc 0x8c 1348 * RR1Get 0xcc 0x8c
1349 * CR2Put 0xd0 0x90 1349 * CR2Put 0xd0 0x90
1350 * RR2Get 0xd4 0x94 1350 * RR2Get 0xd4 0x94
1351 * CR3Put 0xd8 0x98 1351 * CR3Put 0xd8 0x98
1352 * RR3Get 0xdc 0x9c 1352 * RR3Get 0xdc 0x9c
1353 * 1353 *
1354 * Reserved 0xa0-0xbf 1354 * Reserved 0xa0-0xbf
1355 * If HBQs configured: 1355 * If HBQs configured:
1356 * HBQ 0 Put ptr 0xc0 1356 * HBQ 0 Put ptr 0xc0
1357 * HBQ 1 Put ptr 0xc4 1357 * HBQ 1 Put ptr 0xc4
1358 * HBQ 2 Put ptr 0xc8 1358 * HBQ 2 Put ptr 0xc8
1359 * ...... 1359 * ......
1360 * HBQ(M-1)Put Pointer 0xc0+(M-1)*4 1360 * HBQ(M-1)Put Pointer 0xc0+(M-1)*4
1361 * 1361 *
1362 */ 1362 */
1363 1363
1364 if (phba->cfg_hostmem_hgp && phba->sli_rev != 3) { 1364 if (phba->cfg_hostmem_hgp && phba->sli_rev != 3) {
1365 phba->host_gp = &phba->mbox->us.s2.host[0]; 1365 phba->host_gp = &phba->mbox->us.s2.host[0];
1366 phba->hbq_put = NULL; 1366 phba->hbq_put = NULL;
1367 offset = (uint8_t *)&phba->mbox->us.s2.host - 1367 offset = (uint8_t *)&phba->mbox->us.s2.host -
1368 (uint8_t *)phba->slim2p.virt; 1368 (uint8_t *)phba->slim2p.virt;
1369 pdma_addr = phba->slim2p.phys + offset; 1369 pdma_addr = phba->slim2p.phys + offset;
1370 phba->pcb->hgpAddrHigh = putPaddrHigh(pdma_addr); 1370 phba->pcb->hgpAddrHigh = putPaddrHigh(pdma_addr);
1371 phba->pcb->hgpAddrLow = putPaddrLow(pdma_addr); 1371 phba->pcb->hgpAddrLow = putPaddrLow(pdma_addr);
1372 } else { 1372 } else {
1373 /* Always Host Group Pointer is in SLIM */ 1373 /* Always Host Group Pointer is in SLIM */
1374 mb->un.varCfgPort.hps = 1; 1374 mb->un.varCfgPort.hps = 1;
1375 1375
1376 if (phba->sli_rev == 3) { 1376 if (phba->sli_rev == 3) {
1377 phba->host_gp = &mb_slim->us.s3.host[0]; 1377 phba->host_gp = &mb_slim->us.s3.host[0];
1378 phba->hbq_put = &mb_slim->us.s3.hbq_put[0]; 1378 phba->hbq_put = &mb_slim->us.s3.hbq_put[0];
1379 } else { 1379 } else {
1380 phba->host_gp = &mb_slim->us.s2.host[0]; 1380 phba->host_gp = &mb_slim->us.s2.host[0];
1381 phba->hbq_put = NULL; 1381 phba->hbq_put = NULL;
1382 } 1382 }
1383 1383
1384 /* mask off BAR0's flag bits 0 - 3 */ 1384 /* mask off BAR0's flag bits 0 - 3 */
1385 phba->pcb->hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) + 1385 phba->pcb->hgpAddrLow = (bar_low & PCI_BASE_ADDRESS_MEM_MASK) +
1386 (void __iomem *)phba->host_gp - 1386 (void __iomem *)phba->host_gp -
1387 (void __iomem *)phba->MBslimaddr; 1387 (void __iomem *)phba->MBslimaddr;
1388 if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64) 1388 if (bar_low & PCI_BASE_ADDRESS_MEM_TYPE_64)
1389 phba->pcb->hgpAddrHigh = bar_high; 1389 phba->pcb->hgpAddrHigh = bar_high;
1390 else 1390 else
1391 phba->pcb->hgpAddrHigh = 0; 1391 phba->pcb->hgpAddrHigh = 0;
1392 /* write HGP data to SLIM at the required longword offset */ 1392 /* write HGP data to SLIM at the required longword offset */
1393 memset(&hgp, 0, sizeof(struct lpfc_hgp)); 1393 memset(&hgp, 0, sizeof(struct lpfc_hgp));
1394 1394
1395 for (i = 0; i < phba->sli.num_rings; i++) { 1395 for (i = 0; i < phba->sli.num_rings; i++) {
1396 lpfc_memcpy_to_slim(phba->host_gp + i, &hgp, 1396 lpfc_memcpy_to_slim(phba->host_gp + i, &hgp,
1397 sizeof(*phba->host_gp)); 1397 sizeof(*phba->host_gp));
1398 } 1398 }
1399 } 1399 }
1400 1400
1401 /* Setup Port Group offset */ 1401 /* Setup Port Group offset */
1402 if (phba->sli_rev == 3) 1402 if (phba->sli_rev == 3)
1403 pgp_offset = offsetof(struct lpfc_sli2_slim, 1403 pgp_offset = offsetof(struct lpfc_sli2_slim,
1404 mbx.us.s3_pgp.port); 1404 mbx.us.s3_pgp.port);
1405 else 1405 else
1406 pgp_offset = offsetof(struct lpfc_sli2_slim, mbx.us.s2.port); 1406 pgp_offset = offsetof(struct lpfc_sli2_slim, mbx.us.s2.port);
1407 pdma_addr = phba->slim2p.phys + pgp_offset; 1407 pdma_addr = phba->slim2p.phys + pgp_offset;
1408 phba->pcb->pgpAddrHigh = putPaddrHigh(pdma_addr); 1408 phba->pcb->pgpAddrHigh = putPaddrHigh(pdma_addr);
1409 phba->pcb->pgpAddrLow = putPaddrLow(pdma_addr); 1409 phba->pcb->pgpAddrLow = putPaddrLow(pdma_addr);
1410 1410
1411 /* Use callback routine to setp rings in the pcb */ 1411 /* Use callback routine to setp rings in the pcb */
1412 lpfc_config_pcb_setup(phba); 1412 lpfc_config_pcb_setup(phba);
1413 1413
1414 /* special handling for LC HBAs */ 1414 /* special handling for LC HBAs */
1415 if (lpfc_is_LC_HBA(phba->pcidev->device)) { 1415 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
1416 uint32_t hbainit[5]; 1416 uint32_t hbainit[5];
1417 1417
1418 lpfc_hba_init(phba, hbainit); 1418 lpfc_hba_init(phba, hbainit);
1419 1419
1420 memcpy(&mb->un.varCfgPort.hbainit, hbainit, 20); 1420 memcpy(&mb->un.varCfgPort.hbainit, hbainit, 20);
1421 } 1421 }
1422 1422
1423 /* Swap PCB if needed */ 1423 /* Swap PCB if needed */
1424 lpfc_sli_pcimem_bcopy(phba->pcb, phba->pcb, sizeof(PCB_t)); 1424 lpfc_sli_pcimem_bcopy(phba->pcb, phba->pcb, sizeof(PCB_t));
1425 } 1425 }
1426 1426
1427 /** 1427 /**
1428 * lpfc_kill_board - Prepare a mailbox command for killing board 1428 * lpfc_kill_board - Prepare a mailbox command for killing board
1429 * @phba: pointer to lpfc hba data structure. 1429 * @phba: pointer to lpfc hba data structure.
1430 * @pmb: pointer to the driver internal queue element for mailbox command. 1430 * @pmb: pointer to the driver internal queue element for mailbox command.
1431 * 1431 *
1432 * The kill board mailbox command is used to tell firmware to perform a 1432 * The kill board mailbox command is used to tell firmware to perform a
1433 * graceful shutdown of a channel on a specified board to prepare for reset. 1433 * graceful shutdown of a channel on a specified board to prepare for reset.
1434 * When the kill board mailbox command is received, the ER3 bit is set to 1 1434 * When the kill board mailbox command is received, the ER3 bit is set to 1
1435 * in the Host Status register and the ER Attention bit is set to 1 in the 1435 * in the Host Status register and the ER Attention bit is set to 1 in the
1436 * Host Attention register of the HBA function that received the kill board 1436 * Host Attention register of the HBA function that received the kill board
1437 * command. 1437 * command.
1438 * 1438 *
1439 * This routine prepares the mailbox command for killing the board in 1439 * This routine prepares the mailbox command for killing the board in
1440 * preparation for a graceful shutdown. 1440 * preparation for a graceful shutdown.
1441 **/ 1441 **/
1442 void 1442 void
1443 lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb) 1443 lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
1444 { 1444 {
1445 MAILBOX_t *mb = &pmb->u.mb; 1445 MAILBOX_t *mb = &pmb->u.mb;
1446 1446
1447 memset(pmb, 0, sizeof(LPFC_MBOXQ_t)); 1447 memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
1448 mb->mbxCommand = MBX_KILL_BOARD; 1448 mb->mbxCommand = MBX_KILL_BOARD;
1449 mb->mbxOwner = OWN_HOST; 1449 mb->mbxOwner = OWN_HOST;
1450 return; 1450 return;
1451 } 1451 }
1452 1452
1453 /** 1453 /**
1454 * lpfc_mbox_put - Put a mailbox cmd into the tail of driver's mailbox queue 1454 * lpfc_mbox_put - Put a mailbox cmd into the tail of driver's mailbox queue
1455 * @phba: pointer to lpfc hba data structure. 1455 * @phba: pointer to lpfc hba data structure.
1456 * @mbq: pointer to the driver internal queue element for mailbox command. 1456 * @mbq: pointer to the driver internal queue element for mailbox command.
1457 * 1457 *
1458 * Driver maintains a internal mailbox command queue implemented as a linked 1458 * Driver maintains a internal mailbox command queue implemented as a linked
1459 * list. When a mailbox command is issued, it shall be put into the mailbox 1459 * list. When a mailbox command is issued, it shall be put into the mailbox
1460 * command queue such that they shall be processed orderly as HBA can process 1460 * command queue such that they shall be processed orderly as HBA can process
1461 * one mailbox command at a time. 1461 * one mailbox command at a time.
1462 **/ 1462 **/
1463 void 1463 void
1464 lpfc_mbox_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq) 1464 lpfc_mbox_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
1465 { 1465 {
1466 struct lpfc_sli *psli; 1466 struct lpfc_sli *psli;
1467 1467
1468 psli = &phba->sli; 1468 psli = &phba->sli;
1469 1469
1470 list_add_tail(&mbq->list, &psli->mboxq); 1470 list_add_tail(&mbq->list, &psli->mboxq);
1471 1471
1472 psli->mboxq_cnt++; 1472 psli->mboxq_cnt++;
1473 1473
1474 return; 1474 return;
1475 } 1475 }
1476 1476
1477 /** 1477 /**
1478 * lpfc_mbox_get - Remove a mailbox cmd from the head of driver's mailbox queue 1478 * lpfc_mbox_get - Remove a mailbox cmd from the head of driver's mailbox queue
1479 * @phba: pointer to lpfc hba data structure. 1479 * @phba: pointer to lpfc hba data structure.
1480 * 1480 *
1481 * Driver maintains a internal mailbox command queue implemented as a linked 1481 * Driver maintains a internal mailbox command queue implemented as a linked
1482 * list. When a mailbox command is issued, it shall be put into the mailbox 1482 * list. When a mailbox command is issued, it shall be put into the mailbox
1483 * command queue such that they shall be processed orderly as HBA can process 1483 * command queue such that they shall be processed orderly as HBA can process
1484 * one mailbox command at a time. After HBA finished processing a mailbox 1484 * one mailbox command at a time. After HBA finished processing a mailbox
1485 * command, the driver will remove a pending mailbox command from the head of 1485 * command, the driver will remove a pending mailbox command from the head of
1486 * the mailbox command queue and send to the HBA for processing. 1486 * the mailbox command queue and send to the HBA for processing.
1487 * 1487 *
1488 * Return codes 1488 * Return codes
1489 * pointer to the driver internal queue element for mailbox command. 1489 * pointer to the driver internal queue element for mailbox command.
1490 **/ 1490 **/
1491 LPFC_MBOXQ_t * 1491 LPFC_MBOXQ_t *
1492 lpfc_mbox_get(struct lpfc_hba * phba) 1492 lpfc_mbox_get(struct lpfc_hba * phba)
1493 { 1493 {
1494 LPFC_MBOXQ_t *mbq = NULL; 1494 LPFC_MBOXQ_t *mbq = NULL;
1495 struct lpfc_sli *psli = &phba->sli; 1495 struct lpfc_sli *psli = &phba->sli;
1496 1496
1497 list_remove_head((&psli->mboxq), mbq, LPFC_MBOXQ_t, list); 1497 list_remove_head((&psli->mboxq), mbq, LPFC_MBOXQ_t, list);
1498 if (mbq) 1498 if (mbq)
1499 psli->mboxq_cnt--; 1499 psli->mboxq_cnt--;
1500 1500
1501 return mbq; 1501 return mbq;
1502 } 1502 }
1503 1503
1504 /** 1504 /**
1505 * __lpfc_mbox_cmpl_put - Put mailbox cmd into mailbox cmd complete list 1505 * __lpfc_mbox_cmpl_put - Put mailbox cmd into mailbox cmd complete list
1506 * @phba: pointer to lpfc hba data structure. 1506 * @phba: pointer to lpfc hba data structure.
1507 * @mbq: pointer to the driver internal queue element for mailbox command. 1507 * @mbq: pointer to the driver internal queue element for mailbox command.
1508 * 1508 *
1509 * This routine put the completed mailbox command into the mailbox command 1509 * This routine put the completed mailbox command into the mailbox command
1510 * complete list. This is the unlocked version of the routine. The mailbox 1510 * complete list. This is the unlocked version of the routine. The mailbox
1511 * complete list is used by the driver worker thread to process mailbox 1511 * complete list is used by the driver worker thread to process mailbox
1512 * complete callback functions outside the driver interrupt handler. 1512 * complete callback functions outside the driver interrupt handler.
1513 **/ 1513 **/
1514 void 1514 void
1515 __lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq) 1515 __lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
1516 { 1516 {
1517 list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl); 1517 list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl);
1518 } 1518 }
1519 1519
1520 /** 1520 /**
1521 * lpfc_mbox_cmpl_put - Put mailbox command into mailbox command complete list 1521 * lpfc_mbox_cmpl_put - Put mailbox command into mailbox command complete list
1522 * @phba: pointer to lpfc hba data structure. 1522 * @phba: pointer to lpfc hba data structure.
1523 * @mbq: pointer to the driver internal queue element for mailbox command. 1523 * @mbq: pointer to the driver internal queue element for mailbox command.
1524 * 1524 *
1525 * This routine put the completed mailbox command into the mailbox command 1525 * This routine put the completed mailbox command into the mailbox command
1526 * complete list. This is the locked version of the routine. The mailbox 1526 * complete list. This is the locked version of the routine. The mailbox
1527 * complete list is used by the driver worker thread to process mailbox 1527 * complete list is used by the driver worker thread to process mailbox
1528 * complete callback functions outside the driver interrupt handler. 1528 * complete callback functions outside the driver interrupt handler.
1529 **/ 1529 **/
1530 void 1530 void
1531 lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq) 1531 lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
1532 { 1532 {
1533 unsigned long iflag; 1533 unsigned long iflag;
1534 1534
1535 /* This function expects to be called from interrupt context */ 1535 /* This function expects to be called from interrupt context */
1536 spin_lock_irqsave(&phba->hbalock, iflag); 1536 spin_lock_irqsave(&phba->hbalock, iflag);
1537 __lpfc_mbox_cmpl_put(phba, mbq); 1537 __lpfc_mbox_cmpl_put(phba, mbq);
1538 spin_unlock_irqrestore(&phba->hbalock, iflag); 1538 spin_unlock_irqrestore(&phba->hbalock, iflag);
1539 return; 1539 return;
1540 } 1540 }
1541 1541
1542 /** 1542 /**
1543 * lpfc_mbox_cmd_check - Check the validality of a mailbox command 1543 * lpfc_mbox_cmd_check - Check the validality of a mailbox command
1544 * @phba: pointer to lpfc hba data structure. 1544 * @phba: pointer to lpfc hba data structure.
1545 * @mboxq: pointer to the driver internal queue element for mailbox command. 1545 * @mboxq: pointer to the driver internal queue element for mailbox command.
1546 * 1546 *
1547 * This routine is to check whether a mailbox command is valid to be issued. 1547 * This routine is to check whether a mailbox command is valid to be issued.
1548 * This check will be performed by both the mailbox issue API when a client 1548 * This check will be performed by both the mailbox issue API when a client
1549 * is to issue a mailbox command to the mailbox transport. 1549 * is to issue a mailbox command to the mailbox transport.
1550 * 1550 *
1551 * Return 0 - pass the check, -ENODEV - fail the check 1551 * Return 0 - pass the check, -ENODEV - fail the check
1552 **/ 1552 **/
1553 int 1553 int
1554 lpfc_mbox_cmd_check(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 1554 lpfc_mbox_cmd_check(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1555 { 1555 {
1556 /* Mailbox command that have a completion handler must also have a 1556 /* Mailbox command that have a completion handler must also have a
1557 * vport specified. 1557 * vport specified.
1558 */ 1558 */
1559 if (mboxq->mbox_cmpl && mboxq->mbox_cmpl != lpfc_sli_def_mbox_cmpl && 1559 if (mboxq->mbox_cmpl && mboxq->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
1560 mboxq->mbox_cmpl != lpfc_sli_wake_mbox_wait) { 1560 mboxq->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
1561 if (!mboxq->vport) { 1561 if (!mboxq->vport) {
1562 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT, 1562 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT,
1563 "1814 Mbox x%x failed, no vport\n", 1563 "1814 Mbox x%x failed, no vport\n",
1564 mboxq->u.mb.mbxCommand); 1564 mboxq->u.mb.mbxCommand);
1565 dump_stack(); 1565 dump_stack();
1566 return -ENODEV; 1566 return -ENODEV;
1567 } 1567 }
1568 } 1568 }
1569 return 0; 1569 return 0;
1570 } 1570 }
1571 1571
1572 /** 1572 /**
1573 * lpfc_mbox_dev_check - Check the device state for issuing a mailbox command 1573 * lpfc_mbox_dev_check - Check the device state for issuing a mailbox command
1574 * @phba: pointer to lpfc hba data structure. 1574 * @phba: pointer to lpfc hba data structure.
1575 * 1575 *
1576 * This routine is to check whether the HBA device is ready for posting a 1576 * This routine is to check whether the HBA device is ready for posting a
1577 * mailbox command. It is used by the mailbox transport API at the time the 1577 * mailbox command. It is used by the mailbox transport API at the time the
1578 * to post a mailbox command to the device. 1578 * to post a mailbox command to the device.
1579 * 1579 *
1580 * Return 0 - pass the check, -ENODEV - fail the check 1580 * Return 0 - pass the check, -ENODEV - fail the check
1581 **/ 1581 **/
1582 int 1582 int
1583 lpfc_mbox_dev_check(struct lpfc_hba *phba) 1583 lpfc_mbox_dev_check(struct lpfc_hba *phba)
1584 { 1584 {
1585 /* If the PCI channel is in offline state, do not issue mbox */ 1585 /* If the PCI channel is in offline state, do not issue mbox */
1586 if (unlikely(pci_channel_offline(phba->pcidev))) 1586 if (unlikely(pci_channel_offline(phba->pcidev)))
1587 return -ENODEV; 1587 return -ENODEV;
1588 1588
1589 /* If the HBA is in error state, do not issue mbox */ 1589 /* If the HBA is in error state, do not issue mbox */
1590 if (phba->link_state == LPFC_HBA_ERROR) 1590 if (phba->link_state == LPFC_HBA_ERROR)
1591 return -ENODEV; 1591 return -ENODEV;
1592 1592
1593 return 0; 1593 return 0;
1594 } 1594 }
1595 1595
1596 /** 1596 /**
1597 * lpfc_mbox_tmo_val - Retrieve mailbox command timeout value 1597 * lpfc_mbox_tmo_val - Retrieve mailbox command timeout value
1598 * @phba: pointer to lpfc hba data structure. 1598 * @phba: pointer to lpfc hba data structure.
1599 * @cmd: mailbox command code. 1599 * @cmd: mailbox command code.
1600 * 1600 *
1601 * This routine retrieves the proper timeout value according to the mailbox 1601 * This routine retrieves the proper timeout value according to the mailbox
1602 * command code. 1602 * command code.
1603 * 1603 *
1604 * Return codes 1604 * Return codes
1605 * Timeout value to be used for the given mailbox command 1605 * Timeout value to be used for the given mailbox command
1606 **/ 1606 **/
1607 int 1607 int
1608 lpfc_mbox_tmo_val(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) 1608 lpfc_mbox_tmo_val(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
1609 { 1609 {
1610 MAILBOX_t *mbox = &mboxq->u.mb; 1610 MAILBOX_t *mbox = &mboxq->u.mb;
1611 uint8_t subsys, opcode; 1611 uint8_t subsys, opcode;
1612 1612
1613 switch (mbox->mbxCommand) { 1613 switch (mbox->mbxCommand) {
1614 case MBX_WRITE_NV: /* 0x03 */ 1614 case MBX_WRITE_NV: /* 0x03 */
1615 case MBX_DUMP_MEMORY: /* 0x17 */ 1615 case MBX_DUMP_MEMORY: /* 0x17 */
1616 case MBX_UPDATE_CFG: /* 0x1B */ 1616 case MBX_UPDATE_CFG: /* 0x1B */
1617 case MBX_DOWN_LOAD: /* 0x1C */ 1617 case MBX_DOWN_LOAD: /* 0x1C */
1618 case MBX_DEL_LD_ENTRY: /* 0x1D */ 1618 case MBX_DEL_LD_ENTRY: /* 0x1D */
1619 case MBX_WRITE_VPARMS: /* 0x32 */ 1619 case MBX_WRITE_VPARMS: /* 0x32 */
1620 case MBX_LOAD_AREA: /* 0x81 */ 1620 case MBX_LOAD_AREA: /* 0x81 */
1621 case MBX_WRITE_WWN: /* 0x98 */ 1621 case MBX_WRITE_WWN: /* 0x98 */
1622 case MBX_LOAD_EXP_ROM: /* 0x9C */ 1622 case MBX_LOAD_EXP_ROM: /* 0x9C */
1623 case MBX_ACCESS_VDATA: /* 0xA5 */ 1623 case MBX_ACCESS_VDATA: /* 0xA5 */
1624 return LPFC_MBOX_TMO_FLASH_CMD; 1624 return LPFC_MBOX_TMO_FLASH_CMD;
1625 case MBX_SLI4_CONFIG: /* 0x9b */ 1625 case MBX_SLI4_CONFIG: /* 0x9b */
1626 subsys = lpfc_sli_config_mbox_subsys_get(phba, mboxq); 1626 subsys = lpfc_sli_config_mbox_subsys_get(phba, mboxq);
1627 opcode = lpfc_sli_config_mbox_opcode_get(phba, mboxq); 1627 opcode = lpfc_sli_config_mbox_opcode_get(phba, mboxq);
1628 if (subsys == LPFC_MBOX_SUBSYSTEM_COMMON) { 1628 if (subsys == LPFC_MBOX_SUBSYSTEM_COMMON) {
1629 switch (opcode) { 1629 switch (opcode) {
1630 case LPFC_MBOX_OPCODE_READ_OBJECT: 1630 case LPFC_MBOX_OPCODE_READ_OBJECT:
1631 case LPFC_MBOX_OPCODE_WRITE_OBJECT: 1631 case LPFC_MBOX_OPCODE_WRITE_OBJECT:
1632 case LPFC_MBOX_OPCODE_READ_OBJECT_LIST: 1632 case LPFC_MBOX_OPCODE_READ_OBJECT_LIST:
1633 case LPFC_MBOX_OPCODE_DELETE_OBJECT: 1633 case LPFC_MBOX_OPCODE_DELETE_OBJECT:
1634 case LPFC_MBOX_OPCODE_GET_PROFILE_LIST: 1634 case LPFC_MBOX_OPCODE_GET_PROFILE_LIST:
1635 case LPFC_MBOX_OPCODE_SET_ACT_PROFILE: 1635 case LPFC_MBOX_OPCODE_SET_ACT_PROFILE:
1636 case LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG: 1636 case LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG:
1637 case LPFC_MBOX_OPCODE_SET_PROFILE_CONFIG: 1637 case LPFC_MBOX_OPCODE_SET_PROFILE_CONFIG:
1638 case LPFC_MBOX_OPCODE_GET_FACTORY_PROFILE_CONFIG: 1638 case LPFC_MBOX_OPCODE_GET_FACTORY_PROFILE_CONFIG:
1639 case LPFC_MBOX_OPCODE_GET_PROFILE_CAPACITIES: 1639 case LPFC_MBOX_OPCODE_GET_PROFILE_CAPACITIES:
1640 case LPFC_MBOX_OPCODE_SEND_ACTIVATION: 1640 case LPFC_MBOX_OPCODE_SEND_ACTIVATION:
1641 case LPFC_MBOX_OPCODE_RESET_LICENSES: 1641 case LPFC_MBOX_OPCODE_RESET_LICENSES:
1642 case LPFC_MBOX_OPCODE_SET_BOOT_CONFIG: 1642 case LPFC_MBOX_OPCODE_SET_BOOT_CONFIG:
1643 case LPFC_MBOX_OPCODE_GET_VPD_DATA: 1643 case LPFC_MBOX_OPCODE_GET_VPD_DATA:
1644 case LPFC_MBOX_OPCODE_SET_PHYSICAL_LINK_CONFIG: 1644 case LPFC_MBOX_OPCODE_SET_PHYSICAL_LINK_CONFIG:
1645 return LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO; 1645 return LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO;
1646 } 1646 }
1647 } 1647 }
1648 if (subsys == LPFC_MBOX_SUBSYSTEM_FCOE) { 1648 if (subsys == LPFC_MBOX_SUBSYSTEM_FCOE) {
1649 switch (opcode) { 1649 switch (opcode) {
1650 case LPFC_MBOX_OPCODE_FCOE_SET_FCLINK_SETTINGS: 1650 case LPFC_MBOX_OPCODE_FCOE_SET_FCLINK_SETTINGS:
1651 return LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO; 1651 return LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO;
1652 } 1652 }
1653 } 1653 }
1654 return LPFC_MBOX_SLI4_CONFIG_TMO; 1654 return LPFC_MBOX_SLI4_CONFIG_TMO;
1655 } 1655 }
1656 return LPFC_MBOX_TMO; 1656 return LPFC_MBOX_TMO;
1657 } 1657 }
1658 1658
1659 /** 1659 /**
1660 * lpfc_sli4_mbx_sge_set - Set a sge entry in non-embedded mailbox command 1660 * lpfc_sli4_mbx_sge_set - Set a sge entry in non-embedded mailbox command
1661 * @mbox: pointer to lpfc mbox command. 1661 * @mbox: pointer to lpfc mbox command.
1662 * @sgentry: sge entry index. 1662 * @sgentry: sge entry index.
1663 * @phyaddr: physical address for the sge 1663 * @phyaddr: physical address for the sge
1664 * @length: Length of the sge. 1664 * @length: Length of the sge.
1665 * 1665 *
1666 * This routine sets up an entry in the non-embedded mailbox command at the sge 1666 * This routine sets up an entry in the non-embedded mailbox command at the sge
1667 * index location. 1667 * index location.
1668 **/ 1668 **/
1669 void 1669 void
1670 lpfc_sli4_mbx_sge_set(struct lpfcMboxq *mbox, uint32_t sgentry, 1670 lpfc_sli4_mbx_sge_set(struct lpfcMboxq *mbox, uint32_t sgentry,
1671 dma_addr_t phyaddr, uint32_t length) 1671 dma_addr_t phyaddr, uint32_t length)
1672 { 1672 {
1673 struct lpfc_mbx_nembed_cmd *nembed_sge; 1673 struct lpfc_mbx_nembed_cmd *nembed_sge;
1674 1674
1675 nembed_sge = (struct lpfc_mbx_nembed_cmd *) 1675 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
1676 &mbox->u.mqe.un.nembed_cmd; 1676 &mbox->u.mqe.un.nembed_cmd;
1677 nembed_sge->sge[sgentry].pa_lo = putPaddrLow(phyaddr); 1677 nembed_sge->sge[sgentry].pa_lo = putPaddrLow(phyaddr);
1678 nembed_sge->sge[sgentry].pa_hi = putPaddrHigh(phyaddr); 1678 nembed_sge->sge[sgentry].pa_hi = putPaddrHigh(phyaddr);
1679 nembed_sge->sge[sgentry].length = length; 1679 nembed_sge->sge[sgentry].length = length;
1680 } 1680 }
1681 1681
1682 /** 1682 /**
1683 * lpfc_sli4_mbx_sge_get - Get a sge entry from non-embedded mailbox command 1683 * lpfc_sli4_mbx_sge_get - Get a sge entry from non-embedded mailbox command
1684 * @mbox: pointer to lpfc mbox command. 1684 * @mbox: pointer to lpfc mbox command.
1685 * @sgentry: sge entry index. 1685 * @sgentry: sge entry index.
1686 * 1686 *
1687 * This routine gets an entry from the non-embedded mailbox command at the sge 1687 * This routine gets an entry from the non-embedded mailbox command at the sge
1688 * index location. 1688 * index location.
1689 **/ 1689 **/
1690 void 1690 void
1691 lpfc_sli4_mbx_sge_get(struct lpfcMboxq *mbox, uint32_t sgentry, 1691 lpfc_sli4_mbx_sge_get(struct lpfcMboxq *mbox, uint32_t sgentry,
1692 struct lpfc_mbx_sge *sge) 1692 struct lpfc_mbx_sge *sge)
1693 { 1693 {
1694 struct lpfc_mbx_nembed_cmd *nembed_sge; 1694 struct lpfc_mbx_nembed_cmd *nembed_sge;
1695 1695
1696 nembed_sge = (struct lpfc_mbx_nembed_cmd *) 1696 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
1697 &mbox->u.mqe.un.nembed_cmd; 1697 &mbox->u.mqe.un.nembed_cmd;
1698 sge->pa_lo = nembed_sge->sge[sgentry].pa_lo; 1698 sge->pa_lo = nembed_sge->sge[sgentry].pa_lo;
1699 sge->pa_hi = nembed_sge->sge[sgentry].pa_hi; 1699 sge->pa_hi = nembed_sge->sge[sgentry].pa_hi;
1700 sge->length = nembed_sge->sge[sgentry].length; 1700 sge->length = nembed_sge->sge[sgentry].length;
1701 } 1701 }
1702 1702
1703 /** 1703 /**
1704 * lpfc_sli4_mbox_cmd_free - Free a sli4 mailbox command 1704 * lpfc_sli4_mbox_cmd_free - Free a sli4 mailbox command
1705 * @phba: pointer to lpfc hba data structure. 1705 * @phba: pointer to lpfc hba data structure.
1706 * @mbox: pointer to lpfc mbox command. 1706 * @mbox: pointer to lpfc mbox command.
1707 * 1707 *
1708 * This routine frees SLI4 specific mailbox command for sending IOCTL command. 1708 * This routine frees SLI4 specific mailbox command for sending IOCTL command.
1709 **/ 1709 **/
1710 void 1710 void
1711 lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox) 1711 lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
1712 { 1712 {
1713 struct lpfc_mbx_sli4_config *sli4_cfg; 1713 struct lpfc_mbx_sli4_config *sli4_cfg;
1714 struct lpfc_mbx_sge sge; 1714 struct lpfc_mbx_sge sge;
1715 dma_addr_t phyaddr; 1715 dma_addr_t phyaddr;
1716 uint32_t sgecount, sgentry; 1716 uint32_t sgecount, sgentry;
1717 1717
1718 sli4_cfg = &mbox->u.mqe.un.sli4_config; 1718 sli4_cfg = &mbox->u.mqe.un.sli4_config;
1719 1719
1720 /* For embedded mbox command, just free the mbox command */ 1720 /* For embedded mbox command, just free the mbox command */
1721 if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) { 1721 if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
1722 mempool_free(mbox, phba->mbox_mem_pool); 1722 mempool_free(mbox, phba->mbox_mem_pool);
1723 return; 1723 return;
1724 } 1724 }
1725 1725
1726 /* For non-embedded mbox command, we need to free the pages first */ 1726 /* For non-embedded mbox command, we need to free the pages first */
1727 sgecount = bf_get(lpfc_mbox_hdr_sge_cnt, &sli4_cfg->header.cfg_mhdr); 1727 sgecount = bf_get(lpfc_mbox_hdr_sge_cnt, &sli4_cfg->header.cfg_mhdr);
1728 /* There is nothing we can do if there is no sge address array */ 1728 /* There is nothing we can do if there is no sge address array */
1729 if (unlikely(!mbox->sge_array)) { 1729 if (unlikely(!mbox->sge_array)) {
1730 mempool_free(mbox, phba->mbox_mem_pool); 1730 mempool_free(mbox, phba->mbox_mem_pool);
1731 return; 1731 return;
1732 } 1732 }
1733 /* Each non-embedded DMA memory was allocated in the length of a page */ 1733 /* Each non-embedded DMA memory was allocated in the length of a page */
1734 for (sgentry = 0; sgentry < sgecount; sgentry++) { 1734 for (sgentry = 0; sgentry < sgecount; sgentry++) {
1735 lpfc_sli4_mbx_sge_get(mbox, sgentry, &sge); 1735 lpfc_sli4_mbx_sge_get(mbox, sgentry, &sge);
1736 phyaddr = getPaddr(sge.pa_hi, sge.pa_lo); 1736 phyaddr = getPaddr(sge.pa_hi, sge.pa_lo);
1737 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, 1737 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
1738 mbox->sge_array->addr[sgentry], phyaddr); 1738 mbox->sge_array->addr[sgentry], phyaddr);
1739 } 1739 }
1740 /* Free the sge address array memory */ 1740 /* Free the sge address array memory */
1741 kfree(mbox->sge_array); 1741 kfree(mbox->sge_array);
1742 /* Finally, free the mailbox command itself */ 1742 /* Finally, free the mailbox command itself */
1743 mempool_free(mbox, phba->mbox_mem_pool); 1743 mempool_free(mbox, phba->mbox_mem_pool);
1744 } 1744 }
1745 1745
1746 /** 1746 /**
1747 * lpfc_sli4_config - Initialize the SLI4 Config Mailbox command 1747 * lpfc_sli4_config - Initialize the SLI4 Config Mailbox command
1748 * @phba: pointer to lpfc hba data structure. 1748 * @phba: pointer to lpfc hba data structure.
1749 * @mbox: pointer to lpfc mbox command. 1749 * @mbox: pointer to lpfc mbox command.
1750 * @subsystem: The sli4 config sub mailbox subsystem. 1750 * @subsystem: The sli4 config sub mailbox subsystem.
1751 * @opcode: The sli4 config sub mailbox command opcode. 1751 * @opcode: The sli4 config sub mailbox command opcode.
1752 * @length: Length of the sli4 config mailbox command (including sub-header). 1752 * @length: Length of the sli4 config mailbox command (including sub-header).
1753 * 1753 *
1754 * This routine sets up the header fields of SLI4 specific mailbox command 1754 * This routine sets up the header fields of SLI4 specific mailbox command
1755 * for sending IOCTL command. 1755 * for sending IOCTL command.
1756 * 1756 *
1757 * Return: the actual length of the mbox command allocated (mostly useful 1757 * Return: the actual length of the mbox command allocated (mostly useful
1758 * for none embedded mailbox command). 1758 * for none embedded mailbox command).
1759 **/ 1759 **/
1760 int 1760 int
1761 lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox, 1761 lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1762 uint8_t subsystem, uint8_t opcode, uint32_t length, bool emb) 1762 uint8_t subsystem, uint8_t opcode, uint32_t length, bool emb)
1763 { 1763 {
1764 struct lpfc_mbx_sli4_config *sli4_config; 1764 struct lpfc_mbx_sli4_config *sli4_config;
1765 union lpfc_sli4_cfg_shdr *cfg_shdr = NULL; 1765 union lpfc_sli4_cfg_shdr *cfg_shdr = NULL;
1766 uint32_t alloc_len; 1766 uint32_t alloc_len;
1767 uint32_t resid_len; 1767 uint32_t resid_len;
1768 uint32_t pagen, pcount; 1768 uint32_t pagen, pcount;
1769 void *viraddr; 1769 void *viraddr;
1770 dma_addr_t phyaddr; 1770 dma_addr_t phyaddr;
1771 1771
1772 /* Set up SLI4 mailbox command header fields */ 1772 /* Set up SLI4 mailbox command header fields */
1773 memset(mbox, 0, sizeof(*mbox)); 1773 memset(mbox, 0, sizeof(*mbox));
1774 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_SLI4_CONFIG); 1774 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_SLI4_CONFIG);
1775 1775
1776 /* Set up SLI4 ioctl command header fields */ 1776 /* Set up SLI4 ioctl command header fields */
1777 sli4_config = &mbox->u.mqe.un.sli4_config; 1777 sli4_config = &mbox->u.mqe.un.sli4_config;
1778 1778
1779 /* Setup for the embedded mbox command */ 1779 /* Setup for the embedded mbox command */
1780 if (emb) { 1780 if (emb) {
1781 /* Set up main header fields */ 1781 /* Set up main header fields */
1782 bf_set(lpfc_mbox_hdr_emb, &sli4_config->header.cfg_mhdr, 1); 1782 bf_set(lpfc_mbox_hdr_emb, &sli4_config->header.cfg_mhdr, 1);
1783 sli4_config->header.cfg_mhdr.payload_length = length; 1783 sli4_config->header.cfg_mhdr.payload_length = length;
1784 /* Set up sub-header fields following main header */ 1784 /* Set up sub-header fields following main header */
1785 bf_set(lpfc_mbox_hdr_opcode, 1785 bf_set(lpfc_mbox_hdr_opcode,
1786 &sli4_config->header.cfg_shdr.request, opcode); 1786 &sli4_config->header.cfg_shdr.request, opcode);
1787 bf_set(lpfc_mbox_hdr_subsystem, 1787 bf_set(lpfc_mbox_hdr_subsystem,
1788 &sli4_config->header.cfg_shdr.request, subsystem); 1788 &sli4_config->header.cfg_shdr.request, subsystem);
1789 sli4_config->header.cfg_shdr.request.request_length = 1789 sli4_config->header.cfg_shdr.request.request_length =
1790 length - LPFC_MBX_CMD_HDR_LENGTH; 1790 length - LPFC_MBX_CMD_HDR_LENGTH;
1791 return length; 1791 return length;
1792 } 1792 }
1793 1793
1794 /* Setup for the non-embedded mbox command */ 1794 /* Setup for the non-embedded mbox command */
1795 pcount = (SLI4_PAGE_ALIGN(length))/SLI4_PAGE_SIZE; 1795 pcount = (SLI4_PAGE_ALIGN(length))/SLI4_PAGE_SIZE;
1796 pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ? 1796 pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ?
1797 LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount; 1797 LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount;
1798 /* Allocate record for keeping SGE virtual addresses */ 1798 /* Allocate record for keeping SGE virtual addresses */
1799 mbox->sge_array = kzalloc(sizeof(struct lpfc_mbx_nembed_sge_virt), 1799 mbox->sge_array = kzalloc(sizeof(struct lpfc_mbx_nembed_sge_virt),
1800 GFP_KERNEL); 1800 GFP_KERNEL);
1801 if (!mbox->sge_array) { 1801 if (!mbox->sge_array) {
1802 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1802 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1803 "2527 Failed to allocate non-embedded SGE " 1803 "2527 Failed to allocate non-embedded SGE "
1804 "array.\n"); 1804 "array.\n");
1805 return 0; 1805 return 0;
1806 } 1806 }
1807 for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) { 1807 for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) {
1808 /* The DMA memory is always allocated in the length of a 1808 /* The DMA memory is always allocated in the length of a
1809 * page even though the last SGE might not fill up to a 1809 * page even though the last SGE might not fill up to a
1810 * page, this is used as a priori size of SLI4_PAGE_SIZE for 1810 * page, this is used as a priori size of SLI4_PAGE_SIZE for
1811 * the later DMA memory free. 1811 * the later DMA memory free.
1812 */ 1812 */
1813 viraddr = dma_alloc_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE, 1813 viraddr = dma_alloc_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
1814 &phyaddr, GFP_KERNEL); 1814 &phyaddr, GFP_KERNEL);
1815 /* In case of malloc fails, proceed with whatever we have */ 1815 /* In case of malloc fails, proceed with whatever we have */
1816 if (!viraddr) 1816 if (!viraddr)
1817 break; 1817 break;
1818 memset(viraddr, 0, SLI4_PAGE_SIZE); 1818 memset(viraddr, 0, SLI4_PAGE_SIZE);
1819 mbox->sge_array->addr[pagen] = viraddr; 1819 mbox->sge_array->addr[pagen] = viraddr;
1820 /* Keep the first page for later sub-header construction */ 1820 /* Keep the first page for later sub-header construction */
1821 if (pagen == 0) 1821 if (pagen == 0)
1822 cfg_shdr = (union lpfc_sli4_cfg_shdr *)viraddr; 1822 cfg_shdr = (union lpfc_sli4_cfg_shdr *)viraddr;
1823 resid_len = length - alloc_len; 1823 resid_len = length - alloc_len;
1824 if (resid_len > SLI4_PAGE_SIZE) { 1824 if (resid_len > SLI4_PAGE_SIZE) {
1825 lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr, 1825 lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
1826 SLI4_PAGE_SIZE); 1826 SLI4_PAGE_SIZE);
1827 alloc_len += SLI4_PAGE_SIZE; 1827 alloc_len += SLI4_PAGE_SIZE;
1828 } else { 1828 } else {
1829 lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr, 1829 lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
1830 resid_len); 1830 resid_len);
1831 alloc_len = length; 1831 alloc_len = length;
1832 } 1832 }
1833 } 1833 }
1834 1834
1835 /* Set up main header fields in mailbox command */ 1835 /* Set up main header fields in mailbox command */
1836 sli4_config->header.cfg_mhdr.payload_length = alloc_len; 1836 sli4_config->header.cfg_mhdr.payload_length = alloc_len;
1837 bf_set(lpfc_mbox_hdr_sge_cnt, &sli4_config->header.cfg_mhdr, pagen); 1837 bf_set(lpfc_mbox_hdr_sge_cnt, &sli4_config->header.cfg_mhdr, pagen);
1838 1838
1839 /* Set up sub-header fields into the first page */ 1839 /* Set up sub-header fields into the first page */
1840 if (pagen > 0) { 1840 if (pagen > 0) {
1841 bf_set(lpfc_mbox_hdr_opcode, &cfg_shdr->request, opcode); 1841 bf_set(lpfc_mbox_hdr_opcode, &cfg_shdr->request, opcode);
1842 bf_set(lpfc_mbox_hdr_subsystem, &cfg_shdr->request, subsystem); 1842 bf_set(lpfc_mbox_hdr_subsystem, &cfg_shdr->request, subsystem);
1843 cfg_shdr->request.request_length = 1843 cfg_shdr->request.request_length =
1844 alloc_len - sizeof(union lpfc_sli4_cfg_shdr); 1844 alloc_len - sizeof(union lpfc_sli4_cfg_shdr);
1845 } 1845 }
1846 /* The sub-header is in DMA memory, which needs endian converstion */ 1846 /* The sub-header is in DMA memory, which needs endian converstion */
1847 if (cfg_shdr) 1847 if (cfg_shdr)
1848 lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr, 1848 lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr,
1849 sizeof(union lpfc_sli4_cfg_shdr)); 1849 sizeof(union lpfc_sli4_cfg_shdr));
1850 return alloc_len; 1850 return alloc_len;
1851 } 1851 }
1852 1852
1853 /** 1853 /**
1854 * lpfc_sli4_mbox_rsrc_extent - Initialize the opcode resource extent. 1854 * lpfc_sli4_mbox_rsrc_extent - Initialize the opcode resource extent.
1855 * @phba: pointer to lpfc hba data structure. 1855 * @phba: pointer to lpfc hba data structure.
1856 * @mbox: pointer to an allocated lpfc mbox resource. 1856 * @mbox: pointer to an allocated lpfc mbox resource.
1857 * @exts_count: the number of extents, if required, to allocate. 1857 * @exts_count: the number of extents, if required, to allocate.
1858 * @rsrc_type: the resource extent type. 1858 * @rsrc_type: the resource extent type.
1859 * @emb: true if LPFC_SLI4_MBX_EMBED. false if LPFC_SLI4_MBX_NEMBED. 1859 * @emb: true if LPFC_SLI4_MBX_EMBED. false if LPFC_SLI4_MBX_NEMBED.
1860 * 1860 *
1861 * This routine completes the subcommand header for SLI4 resource extent 1861 * This routine completes the subcommand header for SLI4 resource extent
1862 * mailbox commands. It is called after lpfc_sli4_config. The caller must 1862 * mailbox commands. It is called after lpfc_sli4_config. The caller must
1863 * pass an allocated mailbox and the attributes required to initialize the 1863 * pass an allocated mailbox and the attributes required to initialize the
1864 * mailbox correctly. 1864 * mailbox correctly.
1865 * 1865 *
1866 * Return: the actual length of the mbox command allocated. 1866 * Return: the actual length of the mbox command allocated.
1867 **/ 1867 **/
1868 int 1868 int
1869 lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *phba, struct lpfcMboxq *mbox, 1869 lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
1870 uint16_t exts_count, uint16_t rsrc_type, bool emb) 1870 uint16_t exts_count, uint16_t rsrc_type, bool emb)
1871 { 1871 {
1872 uint8_t opcode = 0; 1872 uint8_t opcode = 0;
1873 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc_extnt = NULL; 1873 struct lpfc_mbx_nembed_rsrc_extent *n_rsrc_extnt = NULL;
1874 void *virtaddr = NULL; 1874 void *virtaddr = NULL;
1875 1875
1876 /* Set up SLI4 ioctl command header fields */ 1876 /* Set up SLI4 ioctl command header fields */
1877 if (emb == LPFC_SLI4_MBX_NEMBED) { 1877 if (emb == LPFC_SLI4_MBX_NEMBED) {
1878 /* Get the first SGE entry from the non-embedded DMA memory */ 1878 /* Get the first SGE entry from the non-embedded DMA memory */
1879 virtaddr = mbox->sge_array->addr[0]; 1879 virtaddr = mbox->sge_array->addr[0];
1880 if (virtaddr == NULL) 1880 if (virtaddr == NULL)
1881 return 1; 1881 return 1;
1882 n_rsrc_extnt = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr; 1882 n_rsrc_extnt = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
1883 } 1883 }
1884 1884
1885 /* 1885 /*
1886 * The resource type is common to all extent Opcodes and resides in the 1886 * The resource type is common to all extent Opcodes and resides in the
1887 * same position. 1887 * same position.
1888 */ 1888 */
1889 if (emb == LPFC_SLI4_MBX_EMBED) 1889 if (emb == LPFC_SLI4_MBX_EMBED)
1890 bf_set(lpfc_mbx_alloc_rsrc_extents_type, 1890 bf_set(lpfc_mbx_alloc_rsrc_extents_type,
1891 &mbox->u.mqe.un.alloc_rsrc_extents.u.req, 1891 &mbox->u.mqe.un.alloc_rsrc_extents.u.req,
1892 rsrc_type); 1892 rsrc_type);
1893 else { 1893 else {
1894 /* This is DMA data. Byteswap is required. */ 1894 /* This is DMA data. Byteswap is required. */
1895 bf_set(lpfc_mbx_alloc_rsrc_extents_type, 1895 bf_set(lpfc_mbx_alloc_rsrc_extents_type,
1896 n_rsrc_extnt, rsrc_type); 1896 n_rsrc_extnt, rsrc_type);
1897 lpfc_sli_pcimem_bcopy(&n_rsrc_extnt->word4, 1897 lpfc_sli_pcimem_bcopy(&n_rsrc_extnt->word4,
1898 &n_rsrc_extnt->word4, 1898 &n_rsrc_extnt->word4,
1899 sizeof(uint32_t)); 1899 sizeof(uint32_t));
1900 } 1900 }
1901 1901
1902 /* Complete the initialization for the particular Opcode. */ 1902 /* Complete the initialization for the particular Opcode. */
1903 opcode = lpfc_sli_config_mbox_opcode_get(phba, mbox); 1903 opcode = lpfc_sli_config_mbox_opcode_get(phba, mbox);
1904 switch (opcode) { 1904 switch (opcode) {
1905 case LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT: 1905 case LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT:
1906 if (emb == LPFC_SLI4_MBX_EMBED) 1906 if (emb == LPFC_SLI4_MBX_EMBED)
1907 bf_set(lpfc_mbx_alloc_rsrc_extents_cnt, 1907 bf_set(lpfc_mbx_alloc_rsrc_extents_cnt,
1908 &mbox->u.mqe.un.alloc_rsrc_extents.u.req, 1908 &mbox->u.mqe.un.alloc_rsrc_extents.u.req,
1909 exts_count); 1909 exts_count);
1910 else 1910 else
1911 bf_set(lpfc_mbx_alloc_rsrc_extents_cnt, 1911 bf_set(lpfc_mbx_alloc_rsrc_extents_cnt,
1912 n_rsrc_extnt, exts_count); 1912 n_rsrc_extnt, exts_count);
1913 break; 1913 break;
1914 case LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT: 1914 case LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT:
1915 case LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO: 1915 case LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO:
1916 case LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT: 1916 case LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT:
1917 /* Initialization is complete.*/ 1917 /* Initialization is complete.*/
1918 break; 1918 break;
1919 default: 1919 default:
1920 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 1920 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
1921 "2929 Resource Extent Opcode x%x is " 1921 "2929 Resource Extent Opcode x%x is "
1922 "unsupported\n", opcode); 1922 "unsupported\n", opcode);
1923 return 1; 1923 return 1;
1924 } 1924 }
1925 1925
1926 return 0; 1926 return 0;
1927 } 1927 }
1928 1928
1929 /** 1929 /**
1930 * lpfc_sli_config_mbox_subsys_get - Get subsystem from a sli_config mbox cmd 1930 * lpfc_sli_config_mbox_subsys_get - Get subsystem from a sli_config mbox cmd
1931 * @phba: pointer to lpfc hba data structure. 1931 * @phba: pointer to lpfc hba data structure.
1932 * @mbox: pointer to lpfc mbox command queue entry. 1932 * @mbox: pointer to lpfc mbox command queue entry.
1933 * 1933 *
1934 * This routine gets the subsystem from a SLI4 specific SLI_CONFIG mailbox 1934 * This routine gets the subsystem from a SLI4 specific SLI_CONFIG mailbox
1935 * command. If the mailbox command is not MBX_SLI4_CONFIG (0x9B) or if the 1935 * command. If the mailbox command is not MBX_SLI4_CONFIG (0x9B) or if the
1936 * sub-header is not present, subsystem LPFC_MBOX_SUBSYSTEM_NA (0x0) shall 1936 * sub-header is not present, subsystem LPFC_MBOX_SUBSYSTEM_NA (0x0) shall
1937 * be returned. 1937 * be returned.
1938 **/ 1938 **/
1939 uint8_t 1939 uint8_t
1940 lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) 1940 lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
1941 { 1941 {
1942 struct lpfc_mbx_sli4_config *sli4_cfg; 1942 struct lpfc_mbx_sli4_config *sli4_cfg;
1943 union lpfc_sli4_cfg_shdr *cfg_shdr; 1943 union lpfc_sli4_cfg_shdr *cfg_shdr;
1944 1944
1945 if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG) 1945 if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG)
1946 return LPFC_MBOX_SUBSYSTEM_NA; 1946 return LPFC_MBOX_SUBSYSTEM_NA;
1947 sli4_cfg = &mbox->u.mqe.un.sli4_config; 1947 sli4_cfg = &mbox->u.mqe.un.sli4_config;
1948 1948
1949 /* For embedded mbox command, get opcode from embedded sub-header*/ 1949 /* For embedded mbox command, get opcode from embedded sub-header*/
1950 if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) { 1950 if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
1951 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr; 1951 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
1952 return bf_get(lpfc_mbox_hdr_subsystem, &cfg_shdr->request); 1952 return bf_get(lpfc_mbox_hdr_subsystem, &cfg_shdr->request);
1953 } 1953 }
1954 1954
1955 /* For non-embedded mbox command, get opcode from first dma page */ 1955 /* For non-embedded mbox command, get opcode from first dma page */
1956 if (unlikely(!mbox->sge_array)) 1956 if (unlikely(!mbox->sge_array))
1957 return LPFC_MBOX_SUBSYSTEM_NA; 1957 return LPFC_MBOX_SUBSYSTEM_NA;
1958 cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0]; 1958 cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0];
1959 return bf_get(lpfc_mbox_hdr_subsystem, &cfg_shdr->request); 1959 return bf_get(lpfc_mbox_hdr_subsystem, &cfg_shdr->request);
1960 } 1960 }
1961 1961
1962 /** 1962 /**
1963 * lpfc_sli_config_mbox_opcode_get - Get opcode from a sli_config mbox cmd 1963 * lpfc_sli_config_mbox_opcode_get - Get opcode from a sli_config mbox cmd
1964 * @phba: pointer to lpfc hba data structure. 1964 * @phba: pointer to lpfc hba data structure.
1965 * @mbox: pointer to lpfc mbox command queue entry. 1965 * @mbox: pointer to lpfc mbox command queue entry.
1966 * 1966 *
1967 * This routine gets the opcode from a SLI4 specific SLI_CONFIG mailbox 1967 * This routine gets the opcode from a SLI4 specific SLI_CONFIG mailbox
1968 * command. If the mailbox command is not MBX_SLI4_CONFIG (0x9B) or if 1968 * command. If the mailbox command is not MBX_SLI4_CONFIG (0x9B) or if
1969 * the sub-header is not present, opcode LPFC_MBOX_OPCODE_NA (0x0) be 1969 * the sub-header is not present, opcode LPFC_MBOX_OPCODE_NA (0x0) be
1970 * returned. 1970 * returned.
1971 **/ 1971 **/
1972 uint8_t 1972 uint8_t
1973 lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) 1973 lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
1974 { 1974 {
1975 struct lpfc_mbx_sli4_config *sli4_cfg; 1975 struct lpfc_mbx_sli4_config *sli4_cfg;
1976 union lpfc_sli4_cfg_shdr *cfg_shdr; 1976 union lpfc_sli4_cfg_shdr *cfg_shdr;
1977 1977
1978 if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG) 1978 if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG)
1979 return LPFC_MBOX_OPCODE_NA; 1979 return LPFC_MBOX_OPCODE_NA;
1980 sli4_cfg = &mbox->u.mqe.un.sli4_config; 1980 sli4_cfg = &mbox->u.mqe.un.sli4_config;
1981 1981
1982 /* For embedded mbox command, get opcode from embedded sub-header*/ 1982 /* For embedded mbox command, get opcode from embedded sub-header*/
1983 if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) { 1983 if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
1984 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr; 1984 cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
1985 return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request); 1985 return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
1986 } 1986 }
1987 1987
1988 /* For non-embedded mbox command, get opcode from first dma page */ 1988 /* For non-embedded mbox command, get opcode from first dma page */
1989 if (unlikely(!mbox->sge_array)) 1989 if (unlikely(!mbox->sge_array))
1990 return LPFC_MBOX_OPCODE_NA; 1990 return LPFC_MBOX_OPCODE_NA;
1991 cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0]; 1991 cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0];
1992 return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request); 1992 return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
1993 } 1993 }
1994 1994
1995 /** 1995 /**
1996 * lpfc_sli4_mbx_read_fcf_rec - Allocate and construct read fcf mbox cmd 1996 * lpfc_sli4_mbx_read_fcf_rec - Allocate and construct read fcf mbox cmd
1997 * @phba: pointer to lpfc hba data structure. 1997 * @phba: pointer to lpfc hba data structure.
1998 * @fcf_index: index to fcf table. 1998 * @fcf_index: index to fcf table.
1999 * 1999 *
2000 * This routine routine allocates and constructs non-embedded mailbox command 2000 * This routine routine allocates and constructs non-embedded mailbox command
2001 * for reading a FCF table entry referred by @fcf_index. 2001 * for reading a FCF table entry referred by @fcf_index.
2002 * 2002 *
2003 * Return: pointer to the mailbox command constructed if successful, otherwise 2003 * Return: pointer to the mailbox command constructed if successful, otherwise
2004 * NULL. 2004 * NULL.
2005 **/ 2005 **/
2006 int 2006 int
2007 lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *phba, 2007 lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *phba,
2008 struct lpfcMboxq *mboxq, 2008 struct lpfcMboxq *mboxq,
2009 uint16_t fcf_index) 2009 uint16_t fcf_index)
2010 { 2010 {
2011 void *virt_addr; 2011 void *virt_addr;
2012 dma_addr_t phys_addr; 2012 dma_addr_t phys_addr;
2013 uint8_t *bytep; 2013 uint8_t *bytep;
2014 struct lpfc_mbx_sge sge; 2014 struct lpfc_mbx_sge sge;
2015 uint32_t alloc_len, req_len; 2015 uint32_t alloc_len, req_len;
2016 struct lpfc_mbx_read_fcf_tbl *read_fcf; 2016 struct lpfc_mbx_read_fcf_tbl *read_fcf;
2017 2017
2018 if (!mboxq) 2018 if (!mboxq)
2019 return -ENOMEM; 2019 return -ENOMEM;
2020 2020
2021 req_len = sizeof(struct fcf_record) + 2021 req_len = sizeof(struct fcf_record) +
2022 sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t); 2022 sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t);
2023 2023
2024 /* Set up READ_FCF SLI4_CONFIG mailbox-ioctl command */ 2024 /* Set up READ_FCF SLI4_CONFIG mailbox-ioctl command */
2025 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE, 2025 alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2026 LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len, 2026 LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len,
2027 LPFC_SLI4_MBX_NEMBED); 2027 LPFC_SLI4_MBX_NEMBED);
2028 2028
2029 if (alloc_len < req_len) { 2029 if (alloc_len < req_len) {
2030 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX, 2030 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
2031 "0291 Allocated DMA memory size (x%x) is " 2031 "0291 Allocated DMA memory size (x%x) is "
2032 "less than the requested DMA memory " 2032 "less than the requested DMA memory "
2033 "size (x%x)\n", alloc_len, req_len); 2033 "size (x%x)\n", alloc_len, req_len);
2034 return -ENOMEM; 2034 return -ENOMEM;
2035 } 2035 }
2036 2036
2037 /* Get the first SGE entry from the non-embedded DMA memory. This 2037 /* Get the first SGE entry from the non-embedded DMA memory. This
2038 * routine only uses a single SGE. 2038 * routine only uses a single SGE.
2039 */ 2039 */
2040 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge); 2040 lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
2041 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo); 2041 phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
2042 virt_addr = mboxq->sge_array->addr[0]; 2042 virt_addr = mboxq->sge_array->addr[0];
2043 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr; 2043 read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
2044 2044
2045 /* Set up command fields */ 2045 /* Set up command fields */
2046 bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index); 2046 bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index);
2047 /* Perform necessary endian conversion */ 2047 /* Perform necessary endian conversion */
2048 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr); 2048 bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
2049 lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t)); 2049 lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t));
2050 2050
2051 return 0; 2051 return 0;
2052 } 2052 }
2053 2053
2054 /** 2054 /**
2055 * lpfc_request_features: Configure SLI4 REQUEST_FEATURES mailbox 2055 * lpfc_request_features: Configure SLI4 REQUEST_FEATURES mailbox
2056 * @mboxq: pointer to lpfc mbox command. 2056 * @mboxq: pointer to lpfc mbox command.
2057 * 2057 *
2058 * This routine sets up the mailbox for an SLI4 REQUEST_FEATURES 2058 * This routine sets up the mailbox for an SLI4 REQUEST_FEATURES
2059 * mailbox command. 2059 * mailbox command.
2060 **/ 2060 **/
2061 void 2061 void
2062 lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq) 2062 lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
2063 { 2063 {
2064 /* Set up SLI4 mailbox command header fields */ 2064 /* Set up SLI4 mailbox command header fields */
2065 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t)); 2065 memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
2066 bf_set(lpfc_mqe_command, &mboxq->u.mqe, MBX_SLI4_REQ_FTRS); 2066 bf_set(lpfc_mqe_command, &mboxq->u.mqe, MBX_SLI4_REQ_FTRS);
2067 2067
2068 /* Set up host requested features. */ 2068 /* Set up host requested features. */
2069 bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1); 2069 bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1);
2070 bf_set(lpfc_mbx_rq_ftr_rq_perfh, &mboxq->u.mqe.un.req_ftrs, 1); 2070 bf_set(lpfc_mbx_rq_ftr_rq_perfh, &mboxq->u.mqe.un.req_ftrs, 1);
2071 2071
2072 /* Enable DIF (block guard) only if configured to do so. */ 2072 /* Enable DIF (block guard) only if configured to do so. */
2073 if (phba->cfg_enable_bg) 2073 if (phba->cfg_enable_bg)
2074 bf_set(lpfc_mbx_rq_ftr_rq_dif, &mboxq->u.mqe.un.req_ftrs, 1); 2074 bf_set(lpfc_mbx_rq_ftr_rq_dif, &mboxq->u.mqe.un.req_ftrs, 1);
2075 2075
2076 /* Enable NPIV only if configured to do so. */ 2076 /* Enable NPIV only if configured to do so. */
2077 if (phba->max_vpi && phba->cfg_enable_npiv) 2077 if (phba->max_vpi && phba->cfg_enable_npiv)
2078 bf_set(lpfc_mbx_rq_ftr_rq_npiv, &mboxq->u.mqe.un.req_ftrs, 1); 2078 bf_set(lpfc_mbx_rq_ftr_rq_npiv, &mboxq->u.mqe.un.req_ftrs, 1);
2079 2079
2080 return; 2080 return;
2081 } 2081 }
2082 2082
2083 /** 2083 /**
2084 * lpfc_init_vfi - Initialize the INIT_VFI mailbox command 2084 * lpfc_init_vfi - Initialize the INIT_VFI mailbox command
2085 * @mbox: pointer to lpfc mbox command to initialize. 2085 * @mbox: pointer to lpfc mbox command to initialize.
2086 * @vport: Vport associated with the VF. 2086 * @vport: Vport associated with the VF.
2087 * 2087 *
2088 * This routine initializes @mbox to all zeros and then fills in the mailbox 2088 * This routine initializes @mbox to all zeros and then fills in the mailbox
2089 * fields from @vport. INIT_VFI configures virtual fabrics identified by VFI 2089 * fields from @vport. INIT_VFI configures virtual fabrics identified by VFI
2090 * in the context of an FCF. The driver issues this command to setup a VFI 2090 * in the context of an FCF. The driver issues this command to setup a VFI
2091 * before issuing a FLOGI to login to the VSAN. The driver should also issue a 2091 * before issuing a FLOGI to login to the VSAN. The driver should also issue a
2092 * REG_VFI after a successful VSAN login. 2092 * REG_VFI after a successful VSAN login.
2093 **/ 2093 **/
2094 void 2094 void
2095 lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport) 2095 lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
2096 { 2096 {
2097 struct lpfc_mbx_init_vfi *init_vfi; 2097 struct lpfc_mbx_init_vfi *init_vfi;
2098 2098
2099 memset(mbox, 0, sizeof(*mbox)); 2099 memset(mbox, 0, sizeof(*mbox));
2100 mbox->vport = vport; 2100 mbox->vport = vport;
2101 init_vfi = &mbox->u.mqe.un.init_vfi; 2101 init_vfi = &mbox->u.mqe.un.init_vfi;
2102 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VFI); 2102 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VFI);
2103 bf_set(lpfc_init_vfi_vr, init_vfi, 1); 2103 bf_set(lpfc_init_vfi_vr, init_vfi, 1);
2104 bf_set(lpfc_init_vfi_vt, init_vfi, 1); 2104 bf_set(lpfc_init_vfi_vt, init_vfi, 1);
2105 bf_set(lpfc_init_vfi_vp, init_vfi, 1); 2105 bf_set(lpfc_init_vfi_vp, init_vfi, 1);
2106 bf_set(lpfc_init_vfi_vfi, init_vfi, 2106 bf_set(lpfc_init_vfi_vfi, init_vfi,
2107 vport->phba->sli4_hba.vfi_ids[vport->vfi]); 2107 vport->phba->sli4_hba.vfi_ids[vport->vfi]);
2108 bf_set(lpfc_init_vfi_vpi, init_vfi, 2108 bf_set(lpfc_init_vfi_vpi, init_vfi,
2109 vport->phba->vpi_ids[vport->vpi]); 2109 vport->phba->vpi_ids[vport->vpi]);
2110 bf_set(lpfc_init_vfi_fcfi, init_vfi, 2110 bf_set(lpfc_init_vfi_fcfi, init_vfi,
2111 vport->phba->fcf.fcfi); 2111 vport->phba->fcf.fcfi);
2112 } 2112 }
2113 2113
2114 /** 2114 /**
2115 * lpfc_reg_vfi - Initialize the REG_VFI mailbox command 2115 * lpfc_reg_vfi - Initialize the REG_VFI mailbox command
2116 * @mbox: pointer to lpfc mbox command to initialize. 2116 * @mbox: pointer to lpfc mbox command to initialize.
2117 * @vport: vport associated with the VF. 2117 * @vport: vport associated with the VF.
2118 * @phys: BDE DMA bus address used to send the service parameters to the HBA. 2118 * @phys: BDE DMA bus address used to send the service parameters to the HBA.
2119 * 2119 *
2120 * This routine initializes @mbox to all zeros and then fills in the mailbox 2120 * This routine initializes @mbox to all zeros and then fills in the mailbox
2121 * fields from @vport, and uses @buf as a DMAable buffer to send the vport's 2121 * fields from @vport, and uses @buf as a DMAable buffer to send the vport's
2122 * fc service parameters to the HBA for this VFI. REG_VFI configures virtual 2122 * fc service parameters to the HBA for this VFI. REG_VFI configures virtual
2123 * fabrics identified by VFI in the context of an FCF. 2123 * fabrics identified by VFI in the context of an FCF.
2124 **/ 2124 **/
2125 void 2125 void
2126 lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys) 2126 lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
2127 { 2127 {
2128 struct lpfc_mbx_reg_vfi *reg_vfi; 2128 struct lpfc_mbx_reg_vfi *reg_vfi;
2129 2129
2130 memset(mbox, 0, sizeof(*mbox)); 2130 memset(mbox, 0, sizeof(*mbox));
2131 reg_vfi = &mbox->u.mqe.un.reg_vfi; 2131 reg_vfi = &mbox->u.mqe.un.reg_vfi;
2132 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI); 2132 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI);
2133 bf_set(lpfc_reg_vfi_vp, reg_vfi, 1); 2133 bf_set(lpfc_reg_vfi_vp, reg_vfi, 1);
2134 bf_set(lpfc_reg_vfi_vfi, reg_vfi, 2134 bf_set(lpfc_reg_vfi_vfi, reg_vfi,
2135 vport->phba->sli4_hba.vfi_ids[vport->vfi]); 2135 vport->phba->sli4_hba.vfi_ids[vport->vfi]);
2136 bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi); 2136 bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi);
2137 bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->phba->vpi_ids[vport->vpi]); 2137 bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->phba->vpi_ids[vport->vpi]);
2138 memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name)); 2138 memcpy(reg_vfi->wwn, &vport->fc_portname, sizeof(struct lpfc_name));
2139 reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]); 2139 reg_vfi->wwn[0] = cpu_to_le32(reg_vfi->wwn[0]);
2140 reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]); 2140 reg_vfi->wwn[1] = cpu_to_le32(reg_vfi->wwn[1]);
2141 reg_vfi->e_d_tov = vport->phba->fc_edtov; 2141 reg_vfi->e_d_tov = vport->phba->fc_edtov;
2142 reg_vfi->r_a_tov = vport->phba->fc_ratov; 2142 reg_vfi->r_a_tov = vport->phba->fc_ratov;
2143 reg_vfi->bde.addrHigh = putPaddrHigh(phys); 2143 reg_vfi->bde.addrHigh = putPaddrHigh(phys);
2144 reg_vfi->bde.addrLow = putPaddrLow(phys); 2144 reg_vfi->bde.addrLow = putPaddrLow(phys);
2145 reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam); 2145 reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
2146 reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; 2146 reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
2147 bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID); 2147 bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID);
2148 lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX, 2148 lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX,
2149 "3134 Register VFI, mydid:x%x, fcfi:%d, " 2149 "3134 Register VFI, mydid:x%x, fcfi:%d, "
2150 " vfi:%d, vpi:%d, fc_pname:%x%x\n", 2150 " vfi:%d, vpi:%d, fc_pname:%x%x\n",
2151 vport->fc_myDID, 2151 vport->fc_myDID,
2152 vport->phba->fcf.fcfi, 2152 vport->phba->fcf.fcfi,
2153 vport->phba->sli4_hba.vfi_ids[vport->vfi], 2153 vport->phba->sli4_hba.vfi_ids[vport->vfi],
2154 vport->phba->vpi_ids[vport->vpi], 2154 vport->phba->vpi_ids[vport->vpi],
2155 reg_vfi->wwn[0], reg_vfi->wwn[1]); 2155 reg_vfi->wwn[0], reg_vfi->wwn[1]);
2156 } 2156 }
2157 2157
2158 /** 2158 /**
2159 * lpfc_init_vpi - Initialize the INIT_VPI mailbox command 2159 * lpfc_init_vpi - Initialize the INIT_VPI mailbox command
2160 * @phba: pointer to the hba structure to init the VPI for. 2160 * @phba: pointer to the hba structure to init the VPI for.
2161 * @mbox: pointer to lpfc mbox command to initialize. 2161 * @mbox: pointer to lpfc mbox command to initialize.
2162 * @vpi: VPI to be initialized. 2162 * @vpi: VPI to be initialized.
2163 * 2163 *
2164 * The INIT_VPI mailbox command supports virtual N_Ports. The driver uses the 2164 * The INIT_VPI mailbox command supports virtual N_Ports. The driver uses the
2165 * command to activate a virtual N_Port. The HBA assigns a MAC address to use 2165 * command to activate a virtual N_Port. The HBA assigns a MAC address to use
2166 * with the virtual N Port. The SLI Host issues this command before issuing a 2166 * with the virtual N Port. The SLI Host issues this command before issuing a
2167 * FDISC to connect to the Fabric. The SLI Host should issue a REG_VPI after a 2167 * FDISC to connect to the Fabric. The SLI Host should issue a REG_VPI after a
2168 * successful virtual NPort login. 2168 * successful virtual NPort login.
2169 **/ 2169 **/
2170 void 2170 void
2171 lpfc_init_vpi(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t vpi) 2171 lpfc_init_vpi(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t vpi)
2172 { 2172 {
2173 memset(mbox, 0, sizeof(*mbox)); 2173 memset(mbox, 0, sizeof(*mbox));
2174 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI); 2174 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI);
2175 bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi, 2175 bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi,
2176 phba->vpi_ids[vpi]); 2176 phba->vpi_ids[vpi]);
2177 bf_set(lpfc_init_vpi_vfi, &mbox->u.mqe.un.init_vpi, 2177 bf_set(lpfc_init_vpi_vfi, &mbox->u.mqe.un.init_vpi,
2178 phba->sli4_hba.vfi_ids[phba->pport->vfi]); 2178 phba->sli4_hba.vfi_ids[phba->pport->vfi]);
2179 } 2179 }
2180 2180
2181 /** 2181 /**
2182 * lpfc_unreg_vfi - Initialize the UNREG_VFI mailbox command 2182 * lpfc_unreg_vfi - Initialize the UNREG_VFI mailbox command
2183 * @mbox: pointer to lpfc mbox command to initialize. 2183 * @mbox: pointer to lpfc mbox command to initialize.
2184 * @vport: vport associated with the VF. 2184 * @vport: vport associated with the VF.
2185 * 2185 *
2186 * The UNREG_VFI mailbox command causes the SLI Host to put a virtual fabric 2186 * The UNREG_VFI mailbox command causes the SLI Host to put a virtual fabric
2187 * (logical NPort) into the inactive state. The SLI Host must have logged out 2187 * (logical NPort) into the inactive state. The SLI Host must have logged out
2188 * and unregistered all remote N_Ports to abort any activity on the virtual 2188 * and unregistered all remote N_Ports to abort any activity on the virtual
2189 * fabric. The SLI Port posts the mailbox response after marking the virtual 2189 * fabric. The SLI Port posts the mailbox response after marking the virtual
2190 * fabric inactive. 2190 * fabric inactive.
2191 **/ 2191 **/
2192 void 2192 void
2193 lpfc_unreg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport) 2193 lpfc_unreg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
2194 { 2194 {
2195 memset(mbox, 0, sizeof(*mbox)); 2195 memset(mbox, 0, sizeof(*mbox));
2196 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI); 2196 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI);
2197 bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi, 2197 bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi,
2198 vport->phba->sli4_hba.vfi_ids[vport->vfi]); 2198 vport->phba->sli4_hba.vfi_ids[vport->vfi]);
2199 } 2199 }
2200 2200
2201 /** 2201 /**
2202 * lpfc_sli4_dump_cfg_rg23 - Dump sli4 port config region 23 2202 * lpfc_sli4_dump_cfg_rg23 - Dump sli4 port config region 23
2203 * @phba: pointer to the hba structure containing. 2203 * @phba: pointer to the hba structure containing.
2204 * @mbox: pointer to lpfc mbox command to initialize. 2204 * @mbox: pointer to lpfc mbox command to initialize.
2205 * 2205 *
2206 * This function create a SLI4 dump mailbox command to dump configure 2206 * This function create a SLI4 dump mailbox command to dump configure
2207 * region 23. 2207 * region 23.
2208 **/ 2208 **/
2209 int 2209 int
2210 lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *phba, struct lpfcMboxq *mbox) 2210 lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
2211 { 2211 {
2212 struct lpfc_dmabuf *mp = NULL; 2212 struct lpfc_dmabuf *mp = NULL;
2213 MAILBOX_t *mb; 2213 MAILBOX_t *mb;
2214 2214
2215 memset(mbox, 0, sizeof(*mbox)); 2215 memset(mbox, 0, sizeof(*mbox));
2216 mb = &mbox->u.mb; 2216 mb = &mbox->u.mb;
2217 2217
2218 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); 2218 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2219 if (mp) 2219 if (mp)
2220 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); 2220 mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
2221 2221
2222 if (!mp || !mp->virt) { 2222 if (!mp || !mp->virt) {
2223 kfree(mp); 2223 kfree(mp);
2224 /* dump config region 23 failed to allocate memory */ 2224 /* dump config region 23 failed to allocate memory */
2225 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX, 2225 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
2226 "2569 lpfc dump config region 23: memory" 2226 "2569 lpfc dump config region 23: memory"
2227 " allocation failed\n"); 2227 " allocation failed\n");
2228 return 1; 2228 return 1;
2229 } 2229 }
2230 2230
2231 memset(mp->virt, 0, LPFC_BPL_SIZE); 2231 memset(mp->virt, 0, LPFC_BPL_SIZE);
2232 INIT_LIST_HEAD(&mp->list); 2232 INIT_LIST_HEAD(&mp->list);
2233 2233
2234 /* save address for completion */ 2234 /* save address for completion */
2235 mbox->context1 = (uint8_t *) mp; 2235 mbox->context1 = (uint8_t *) mp;
2236 2236
2237 mb->mbxCommand = MBX_DUMP_MEMORY; 2237 mb->mbxCommand = MBX_DUMP_MEMORY;
2238 mb->un.varDmp.type = DMP_NV_PARAMS; 2238 mb->un.varDmp.type = DMP_NV_PARAMS;
2239 mb->un.varDmp.region_id = DMP_REGION_23; 2239 mb->un.varDmp.region_id = DMP_REGION_23;
2240 mb->un.varDmp.sli4_length = DMP_RGN23_SIZE; 2240 mb->un.varDmp.sli4_length = DMP_RGN23_SIZE;
2241 mb->un.varWords[3] = putPaddrLow(mp->phys); 2241 mb->un.varWords[3] = putPaddrLow(mp->phys);
2242 mb->un.varWords[4] = putPaddrHigh(mp->phys); 2242 mb->un.varWords[4] = putPaddrHigh(mp->phys);
2243 return 0; 2243 return 0;
2244 } 2244 }
2245 2245
2246 /** 2246 /**
2247 * lpfc_reg_fcfi - Initialize the REG_FCFI mailbox command 2247 * lpfc_reg_fcfi - Initialize the REG_FCFI mailbox command
2248 * @phba: pointer to the hba structure containing the FCF index and RQ ID. 2248 * @phba: pointer to the hba structure containing the FCF index and RQ ID.
2249 * @mbox: pointer to lpfc mbox command to initialize. 2249 * @mbox: pointer to lpfc mbox command to initialize.
2250 * 2250 *
2251 * The REG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs). The 2251 * The REG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs). The
2252 * SLI Host uses the command to activate an FCF after it has acquired FCF 2252 * SLI Host uses the command to activate an FCF after it has acquired FCF
2253 * information via a READ_FCF mailbox command. This mailbox command also is used 2253 * information via a READ_FCF mailbox command. This mailbox command also is used
2254 * to indicate where received unsolicited frames from this FCF will be sent. By 2254 * to indicate where received unsolicited frames from this FCF will be sent. By
2255 * default this routine will set up the FCF to forward all unsolicited frames 2255 * default this routine will set up the FCF to forward all unsolicited frames
2256 * the the RQ ID passed in the @phba. This can be overridden by the caller for 2256 * the the RQ ID passed in the @phba. This can be overridden by the caller for
2257 * more complicated setups. 2257 * more complicated setups.
2258 **/ 2258 **/
2259 void 2259 void
2260 lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox) 2260 lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
2261 { 2261 {
2262 struct lpfc_mbx_reg_fcfi *reg_fcfi; 2262 struct lpfc_mbx_reg_fcfi *reg_fcfi;
2263 2263
2264 memset(mbox, 0, sizeof(*mbox)); 2264 memset(mbox, 0, sizeof(*mbox));
2265 reg_fcfi = &mbox->u.mqe.un.reg_fcfi; 2265 reg_fcfi = &mbox->u.mqe.un.reg_fcfi;
2266 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI); 2266 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI);
2267 bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi, phba->sli4_hba.hdr_rq->queue_id); 2267 bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi, phba->sli4_hba.hdr_rq->queue_id);
2268 bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID); 2268 bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID);
2269 bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID); 2269 bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID);
2270 bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID); 2270 bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID);
2271 bf_set(lpfc_reg_fcfi_info_index, reg_fcfi, 2271 bf_set(lpfc_reg_fcfi_info_index, reg_fcfi,
2272 phba->fcf.current_rec.fcf_indx); 2272 phba->fcf.current_rec.fcf_indx);
2273 /* reg_fcf addr mode is bit wise inverted value of fcf addr_mode */ 2273 /* reg_fcf addr mode is bit wise inverted value of fcf addr_mode */
2274 bf_set(lpfc_reg_fcfi_mam, reg_fcfi, (~phba->fcf.addr_mode) & 0x3); 2274 bf_set(lpfc_reg_fcfi_mam, reg_fcfi, (~phba->fcf.addr_mode) & 0x3);
2275 if (phba->fcf.current_rec.vlan_id != LPFC_FCOE_NULL_VID) { 2275 if (phba->fcf.current_rec.vlan_id != LPFC_FCOE_NULL_VID) {
2276 bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1); 2276 bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1);
2277 bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi, 2277 bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi,
2278 phba->fcf.current_rec.vlan_id); 2278 phba->fcf.current_rec.vlan_id);
2279 } 2279 }
2280 } 2280 }
2281 2281
2282 /** 2282 /**
2283 * lpfc_unreg_fcfi - Initialize the UNREG_FCFI mailbox command 2283 * lpfc_unreg_fcfi - Initialize the UNREG_FCFI mailbox command
2284 * @mbox: pointer to lpfc mbox command to initialize. 2284 * @mbox: pointer to lpfc mbox command to initialize.
2285 * @fcfi: FCFI to be unregistered. 2285 * @fcfi: FCFI to be unregistered.
2286 * 2286 *
2287 * The UNREG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs). 2287 * The UNREG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs).
2288 * The SLI Host uses the command to inactivate an FCFI. 2288 * The SLI Host uses the command to inactivate an FCFI.
2289 **/ 2289 **/
2290 void 2290 void
2291 lpfc_unreg_fcfi(struct lpfcMboxq *mbox, uint16_t fcfi) 2291 lpfc_unreg_fcfi(struct lpfcMboxq *mbox, uint16_t fcfi)
2292 { 2292 {
2293 memset(mbox, 0, sizeof(*mbox)); 2293 memset(mbox, 0, sizeof(*mbox));
2294 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_FCFI); 2294 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_FCFI);
2295 bf_set(lpfc_unreg_fcfi, &mbox->u.mqe.un.unreg_fcfi, fcfi); 2295 bf_set(lpfc_unreg_fcfi, &mbox->u.mqe.un.unreg_fcfi, fcfi);
2296 } 2296 }
2297 2297
2298 /** 2298 /**
2299 * lpfc_resume_rpi - Initialize the RESUME_RPI mailbox command 2299 * lpfc_resume_rpi - Initialize the RESUME_RPI mailbox command
2300 * @mbox: pointer to lpfc mbox command to initialize. 2300 * @mbox: pointer to lpfc mbox command to initialize.
2301 * @ndlp: The nodelist structure that describes the RPI to resume. 2301 * @ndlp: The nodelist structure that describes the RPI to resume.
2302 * 2302 *
2303 * The RESUME_RPI mailbox command is used to restart I/O to an RPI after a 2303 * The RESUME_RPI mailbox command is used to restart I/O to an RPI after a
2304 * link event. 2304 * link event.
2305 **/ 2305 **/
2306 void 2306 void
2307 lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp) 2307 lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
2308 { 2308 {
2309 struct lpfc_hba *phba = ndlp->phba; 2309 struct lpfc_hba *phba = ndlp->phba;
2310 struct lpfc_mbx_resume_rpi *resume_rpi; 2310 struct lpfc_mbx_resume_rpi *resume_rpi;
2311 2311
2312 memset(mbox, 0, sizeof(*mbox)); 2312 memset(mbox, 0, sizeof(*mbox));
2313 resume_rpi = &mbox->u.mqe.un.resume_rpi; 2313 resume_rpi = &mbox->u.mqe.un.resume_rpi;
2314 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI); 2314 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI);
2315 bf_set(lpfc_resume_rpi_index, resume_rpi, 2315 bf_set(lpfc_resume_rpi_index, resume_rpi,
2316 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]); 2316 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2317 bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI); 2317 bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI);
2318 resume_rpi->event_tag = ndlp->phba->fc_eventTag; 2318 resume_rpi->event_tag = ndlp->phba->fc_eventTag;
2319 } 2319 }
2320 2320
2321 /** 2321 /**
2322 * lpfc_supported_pages - Initialize the PORT_CAPABILITIES supported pages 2322 * lpfc_supported_pages - Initialize the PORT_CAPABILITIES supported pages
2323 * mailbox command. 2323 * mailbox command.
2324 * @mbox: pointer to lpfc mbox command to initialize. 2324 * @mbox: pointer to lpfc mbox command to initialize.
2325 * 2325 *
2326 * The PORT_CAPABILITIES supported pages mailbox command is issued to 2326 * The PORT_CAPABILITIES supported pages mailbox command is issued to
2327 * retrieve the particular feature pages supported by the port. 2327 * retrieve the particular feature pages supported by the port.
2328 **/ 2328 **/
2329 void 2329 void
2330 lpfc_supported_pages(struct lpfcMboxq *mbox) 2330 lpfc_supported_pages(struct lpfcMboxq *mbox)
2331 { 2331 {
2332 struct lpfc_mbx_supp_pages *supp_pages; 2332 struct lpfc_mbx_supp_pages *supp_pages;
2333 2333
2334 memset(mbox, 0, sizeof(*mbox)); 2334 memset(mbox, 0, sizeof(*mbox));
2335 supp_pages = &mbox->u.mqe.un.supp_pages; 2335 supp_pages = &mbox->u.mqe.un.supp_pages;
2336 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES); 2336 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
2337 bf_set(cpn, supp_pages, LPFC_SUPP_PAGES); 2337 bf_set(cpn, supp_pages, LPFC_SUPP_PAGES);
2338 } 2338 }
2339 2339
2340 /** 2340 /**
2341 * lpfc_pc_sli4_params - Initialize the PORT_CAPABILITIES SLI4 Params mbox cmd. 2341 * lpfc_pc_sli4_params - Initialize the PORT_CAPABILITIES SLI4 Params mbox cmd.
2342 * @mbox: pointer to lpfc mbox command to initialize. 2342 * @mbox: pointer to lpfc mbox command to initialize.
2343 * 2343 *
2344 * The PORT_CAPABILITIES SLI4 parameters mailbox command is issued to 2344 * The PORT_CAPABILITIES SLI4 parameters mailbox command is issued to
2345 * retrieve the particular SLI4 features supported by the port. 2345 * retrieve the particular SLI4 features supported by the port.
2346 **/ 2346 **/
2347 void 2347 void
2348 lpfc_pc_sli4_params(struct lpfcMboxq *mbox) 2348 lpfc_pc_sli4_params(struct lpfcMboxq *mbox)
2349 { 2349 {
2350 struct lpfc_mbx_pc_sli4_params *sli4_params; 2350 struct lpfc_mbx_pc_sli4_params *sli4_params;
2351 2351
2352 memset(mbox, 0, sizeof(*mbox)); 2352 memset(mbox, 0, sizeof(*mbox));
2353 sli4_params = &mbox->u.mqe.un.sli4_params; 2353 sli4_params = &mbox->u.mqe.un.sli4_params;
2354 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES); 2354 bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_PORT_CAPABILITIES);
2355 bf_set(cpn, sli4_params, LPFC_SLI4_PARAMETERS); 2355 bf_set(cpn, sli4_params, LPFC_SLI4_PARAMETERS);
2356 } 2356 }
2357 2357