Commit a82d3f6f7d70bf86eb32164e8e4534117015b338
Committed by
James Bottomley
1 parent
5036f0a0ec
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
[SCSI] csiostor: remove unneeded memset()
No need to memset() this when we just copy over it on the next line. Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com> Acked-by: Naresh Kumar Inna <naresh@chelsio.com> Signed-off-by: James Bottomley <JBottomley@Parallels.com>
Showing 1 changed file with 0 additions and 1 deletions Inline Diff
drivers/scsi/csiostor/csio_lnode.c
1 | /* | 1 | /* |
2 | * This file is part of the Chelsio FCoE driver for Linux. | 2 | * This file is part of the Chelsio FCoE driver for Linux. |
3 | * | 3 | * |
4 | * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. | 4 | * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. |
5 | * | 5 | * |
6 | * This software is available to you under a choice of one of two | 6 | * This software is available to you under a choice of one of two |
7 | * licenses. You may choose to be licensed under the terms of the GNU | 7 | * licenses. You may choose to be licensed under the terms of the GNU |
8 | * General Public License (GPL) Version 2, available from the file | 8 | * General Public License (GPL) Version 2, available from the file |
9 | * COPYING in the main directory of this source tree, or the | 9 | * COPYING in the main directory of this source tree, or the |
10 | * OpenIB.org BSD license below: | 10 | * OpenIB.org BSD license below: |
11 | * | 11 | * |
12 | * Redistribution and use in source and binary forms, with or | 12 | * Redistribution and use in source and binary forms, with or |
13 | * without modification, are permitted provided that the following | 13 | * without modification, are permitted provided that the following |
14 | * conditions are met: | 14 | * conditions are met: |
15 | * | 15 | * |
16 | * - Redistributions of source code must retain the above | 16 | * - Redistributions of source code must retain the above |
17 | * copyright notice, this list of conditions and the following | 17 | * copyright notice, this list of conditions and the following |
18 | * disclaimer. | 18 | * disclaimer. |
19 | * | 19 | * |
20 | * - Redistributions in binary form must reproduce the above | 20 | * - Redistributions in binary form must reproduce the above |
21 | * copyright notice, this list of conditions and the following | 21 | * copyright notice, this list of conditions and the following |
22 | * disclaimer in the documentation and/or other materials | 22 | * disclaimer in the documentation and/or other materials |
23 | * provided with the distribution. | 23 | * provided with the distribution. |
24 | * | 24 | * |
25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | 25 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 26 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | 27 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | 28 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | 29 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | 30 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | 31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
32 | * SOFTWARE. | 32 | * SOFTWARE. |
33 | */ | 33 | */ |
34 | 34 | ||
35 | #include <linux/kernel.h> | 35 | #include <linux/kernel.h> |
36 | #include <linux/delay.h> | 36 | #include <linux/delay.h> |
37 | #include <linux/slab.h> | 37 | #include <linux/slab.h> |
38 | #include <linux/utsname.h> | 38 | #include <linux/utsname.h> |
39 | #include <scsi/scsi_device.h> | 39 | #include <scsi/scsi_device.h> |
40 | #include <scsi/scsi_transport_fc.h> | 40 | #include <scsi/scsi_transport_fc.h> |
41 | #include <asm/unaligned.h> | 41 | #include <asm/unaligned.h> |
42 | #include <scsi/fc/fc_els.h> | 42 | #include <scsi/fc/fc_els.h> |
43 | #include <scsi/fc/fc_fs.h> | 43 | #include <scsi/fc/fc_fs.h> |
44 | #include <scsi/fc/fc_gs.h> | 44 | #include <scsi/fc/fc_gs.h> |
45 | #include <scsi/fc/fc_ms.h> | 45 | #include <scsi/fc/fc_ms.h> |
46 | 46 | ||
47 | #include "csio_hw.h" | 47 | #include "csio_hw.h" |
48 | #include "csio_mb.h" | 48 | #include "csio_mb.h" |
49 | #include "csio_lnode.h" | 49 | #include "csio_lnode.h" |
50 | #include "csio_rnode.h" | 50 | #include "csio_rnode.h" |
51 | 51 | ||
52 | int csio_fcoe_rnodes = 1024; | 52 | int csio_fcoe_rnodes = 1024; |
53 | int csio_fdmi_enable = 1; | 53 | int csio_fdmi_enable = 1; |
54 | 54 | ||
55 | #define PORT_ID_PTR(_x) ((uint8_t *)(&_x) + 1) | 55 | #define PORT_ID_PTR(_x) ((uint8_t *)(&_x) + 1) |
56 | 56 | ||
57 | /* Lnode SM declarations */ | 57 | /* Lnode SM declarations */ |
58 | static void csio_lns_uninit(struct csio_lnode *, enum csio_ln_ev); | 58 | static void csio_lns_uninit(struct csio_lnode *, enum csio_ln_ev); |
59 | static void csio_lns_online(struct csio_lnode *, enum csio_ln_ev); | 59 | static void csio_lns_online(struct csio_lnode *, enum csio_ln_ev); |
60 | static void csio_lns_ready(struct csio_lnode *, enum csio_ln_ev); | 60 | static void csio_lns_ready(struct csio_lnode *, enum csio_ln_ev); |
61 | static void csio_lns_offline(struct csio_lnode *, enum csio_ln_ev); | 61 | static void csio_lns_offline(struct csio_lnode *, enum csio_ln_ev); |
62 | 62 | ||
63 | static int csio_ln_mgmt_submit_req(struct csio_ioreq *, | 63 | static int csio_ln_mgmt_submit_req(struct csio_ioreq *, |
64 | void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *), | 64 | void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *), |
65 | enum fcoe_cmn_type, struct csio_dma_buf *, uint32_t); | 65 | enum fcoe_cmn_type, struct csio_dma_buf *, uint32_t); |
66 | 66 | ||
67 | /* LN event mapping */ | 67 | /* LN event mapping */ |
68 | static enum csio_ln_ev fwevt_to_lnevt[] = { | 68 | static enum csio_ln_ev fwevt_to_lnevt[] = { |
69 | CSIO_LNE_NONE, /* None */ | 69 | CSIO_LNE_NONE, /* None */ |
70 | CSIO_LNE_NONE, /* PLOGI_ACC_RCVD */ | 70 | CSIO_LNE_NONE, /* PLOGI_ACC_RCVD */ |
71 | CSIO_LNE_NONE, /* PLOGI_RJT_RCVD */ | 71 | CSIO_LNE_NONE, /* PLOGI_RJT_RCVD */ |
72 | CSIO_LNE_NONE, /* PLOGI_RCVD */ | 72 | CSIO_LNE_NONE, /* PLOGI_RCVD */ |
73 | CSIO_LNE_NONE, /* PLOGO_RCVD */ | 73 | CSIO_LNE_NONE, /* PLOGO_RCVD */ |
74 | CSIO_LNE_NONE, /* PRLI_ACC_RCVD */ | 74 | CSIO_LNE_NONE, /* PRLI_ACC_RCVD */ |
75 | CSIO_LNE_NONE, /* PRLI_RJT_RCVD */ | 75 | CSIO_LNE_NONE, /* PRLI_RJT_RCVD */ |
76 | CSIO_LNE_NONE, /* PRLI_RCVD */ | 76 | CSIO_LNE_NONE, /* PRLI_RCVD */ |
77 | CSIO_LNE_NONE, /* PRLO_RCVD */ | 77 | CSIO_LNE_NONE, /* PRLO_RCVD */ |
78 | CSIO_LNE_NONE, /* NPORT_ID_CHGD */ | 78 | CSIO_LNE_NONE, /* NPORT_ID_CHGD */ |
79 | CSIO_LNE_LOGO, /* FLOGO_RCVD */ | 79 | CSIO_LNE_LOGO, /* FLOGO_RCVD */ |
80 | CSIO_LNE_LOGO, /* CLR_VIRT_LNK_RCVD */ | 80 | CSIO_LNE_LOGO, /* CLR_VIRT_LNK_RCVD */ |
81 | CSIO_LNE_FAB_INIT_DONE,/* FLOGI_ACC_RCVD */ | 81 | CSIO_LNE_FAB_INIT_DONE,/* FLOGI_ACC_RCVD */ |
82 | CSIO_LNE_NONE, /* FLOGI_RJT_RCVD */ | 82 | CSIO_LNE_NONE, /* FLOGI_RJT_RCVD */ |
83 | CSIO_LNE_FAB_INIT_DONE,/* FDISC_ACC_RCVD */ | 83 | CSIO_LNE_FAB_INIT_DONE,/* FDISC_ACC_RCVD */ |
84 | CSIO_LNE_NONE, /* FDISC_RJT_RCVD */ | 84 | CSIO_LNE_NONE, /* FDISC_RJT_RCVD */ |
85 | CSIO_LNE_NONE, /* FLOGI_TMO_MAX_RETRY */ | 85 | CSIO_LNE_NONE, /* FLOGI_TMO_MAX_RETRY */ |
86 | CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_ACC */ | 86 | CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_ACC */ |
87 | CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_RJT */ | 87 | CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_RJT */ |
88 | CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_CNFLT */ | 88 | CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_CNFLT */ |
89 | CSIO_LNE_NONE, /* PRLI_TMO */ | 89 | CSIO_LNE_NONE, /* PRLI_TMO */ |
90 | CSIO_LNE_NONE, /* ADISC_TMO */ | 90 | CSIO_LNE_NONE, /* ADISC_TMO */ |
91 | CSIO_LNE_NONE, /* RSCN_DEV_LOST */ | 91 | CSIO_LNE_NONE, /* RSCN_DEV_LOST */ |
92 | CSIO_LNE_NONE, /* SCR_ACC_RCVD */ | 92 | CSIO_LNE_NONE, /* SCR_ACC_RCVD */ |
93 | CSIO_LNE_NONE, /* ADISC_RJT_RCVD */ | 93 | CSIO_LNE_NONE, /* ADISC_RJT_RCVD */ |
94 | CSIO_LNE_NONE, /* LOGO_SNT */ | 94 | CSIO_LNE_NONE, /* LOGO_SNT */ |
95 | CSIO_LNE_NONE, /* PROTO_ERR_IMPL_LOGO */ | 95 | CSIO_LNE_NONE, /* PROTO_ERR_IMPL_LOGO */ |
96 | }; | 96 | }; |
97 | 97 | ||
98 | #define CSIO_FWE_TO_LNE(_evt) ((_evt > PROTO_ERR_IMPL_LOGO) ? \ | 98 | #define CSIO_FWE_TO_LNE(_evt) ((_evt > PROTO_ERR_IMPL_LOGO) ? \ |
99 | CSIO_LNE_NONE : \ | 99 | CSIO_LNE_NONE : \ |
100 | fwevt_to_lnevt[_evt]) | 100 | fwevt_to_lnevt[_evt]) |
101 | 101 | ||
102 | #define csio_ct_rsp(cp) (((struct fc_ct_hdr *)cp)->ct_cmd) | 102 | #define csio_ct_rsp(cp) (((struct fc_ct_hdr *)cp)->ct_cmd) |
103 | #define csio_ct_reason(cp) (((struct fc_ct_hdr *)cp)->ct_reason) | 103 | #define csio_ct_reason(cp) (((struct fc_ct_hdr *)cp)->ct_reason) |
104 | #define csio_ct_expl(cp) (((struct fc_ct_hdr *)cp)->ct_explan) | 104 | #define csio_ct_expl(cp) (((struct fc_ct_hdr *)cp)->ct_explan) |
105 | #define csio_ct_get_pld(cp) ((void *)(((uint8_t *)cp) + FC_CT_HDR_LEN)) | 105 | #define csio_ct_get_pld(cp) ((void *)(((uint8_t *)cp) + FC_CT_HDR_LEN)) |
106 | 106 | ||
107 | /* | 107 | /* |
108 | * csio_ln_match_by_portid - lookup lnode using given portid. | 108 | * csio_ln_match_by_portid - lookup lnode using given portid. |
109 | * @hw: HW module | 109 | * @hw: HW module |
110 | * @portid: port-id. | 110 | * @portid: port-id. |
111 | * | 111 | * |
112 | * If found, returns lnode matching given portid otherwise returns NULL. | 112 | * If found, returns lnode matching given portid otherwise returns NULL. |
113 | */ | 113 | */ |
114 | static struct csio_lnode * | 114 | static struct csio_lnode * |
115 | csio_ln_lookup_by_portid(struct csio_hw *hw, uint8_t portid) | 115 | csio_ln_lookup_by_portid(struct csio_hw *hw, uint8_t portid) |
116 | { | 116 | { |
117 | struct csio_lnode *ln = hw->rln; | 117 | struct csio_lnode *ln = hw->rln; |
118 | struct list_head *tmp; | 118 | struct list_head *tmp; |
119 | 119 | ||
120 | /* Match siblings lnode with portid */ | 120 | /* Match siblings lnode with portid */ |
121 | list_for_each(tmp, &hw->sln_head) { | 121 | list_for_each(tmp, &hw->sln_head) { |
122 | ln = (struct csio_lnode *) tmp; | 122 | ln = (struct csio_lnode *) tmp; |
123 | if (ln->portid == portid) | 123 | if (ln->portid == portid) |
124 | return ln; | 124 | return ln; |
125 | } | 125 | } |
126 | 126 | ||
127 | return NULL; | 127 | return NULL; |
128 | } | 128 | } |
129 | 129 | ||
130 | /* | 130 | /* |
131 | * csio_ln_lookup_by_vnpi - Lookup lnode using given vnp id. | 131 | * csio_ln_lookup_by_vnpi - Lookup lnode using given vnp id. |
132 | * @hw - HW module | 132 | * @hw - HW module |
133 | * @vnpi - vnp index. | 133 | * @vnpi - vnp index. |
134 | * Returns - If found, returns lnode matching given vnp id | 134 | * Returns - If found, returns lnode matching given vnp id |
135 | * otherwise returns NULL. | 135 | * otherwise returns NULL. |
136 | */ | 136 | */ |
137 | static struct csio_lnode * | 137 | static struct csio_lnode * |
138 | csio_ln_lookup_by_vnpi(struct csio_hw *hw, uint32_t vnp_id) | 138 | csio_ln_lookup_by_vnpi(struct csio_hw *hw, uint32_t vnp_id) |
139 | { | 139 | { |
140 | struct list_head *tmp1, *tmp2; | 140 | struct list_head *tmp1, *tmp2; |
141 | struct csio_lnode *sln = NULL, *cln = NULL; | 141 | struct csio_lnode *sln = NULL, *cln = NULL; |
142 | 142 | ||
143 | if (list_empty(&hw->sln_head)) { | 143 | if (list_empty(&hw->sln_head)) { |
144 | CSIO_INC_STATS(hw, n_lnlkup_miss); | 144 | CSIO_INC_STATS(hw, n_lnlkup_miss); |
145 | return NULL; | 145 | return NULL; |
146 | } | 146 | } |
147 | /* Traverse sibling lnodes */ | 147 | /* Traverse sibling lnodes */ |
148 | list_for_each(tmp1, &hw->sln_head) { | 148 | list_for_each(tmp1, &hw->sln_head) { |
149 | sln = (struct csio_lnode *) tmp1; | 149 | sln = (struct csio_lnode *) tmp1; |
150 | 150 | ||
151 | /* Match sibling lnode */ | 151 | /* Match sibling lnode */ |
152 | if (sln->vnp_flowid == vnp_id) | 152 | if (sln->vnp_flowid == vnp_id) |
153 | return sln; | 153 | return sln; |
154 | 154 | ||
155 | if (list_empty(&sln->cln_head)) | 155 | if (list_empty(&sln->cln_head)) |
156 | continue; | 156 | continue; |
157 | 157 | ||
158 | /* Traverse children lnodes */ | 158 | /* Traverse children lnodes */ |
159 | list_for_each(tmp2, &sln->cln_head) { | 159 | list_for_each(tmp2, &sln->cln_head) { |
160 | cln = (struct csio_lnode *) tmp2; | 160 | cln = (struct csio_lnode *) tmp2; |
161 | 161 | ||
162 | if (cln->vnp_flowid == vnp_id) | 162 | if (cln->vnp_flowid == vnp_id) |
163 | return cln; | 163 | return cln; |
164 | } | 164 | } |
165 | } | 165 | } |
166 | CSIO_INC_STATS(hw, n_lnlkup_miss); | 166 | CSIO_INC_STATS(hw, n_lnlkup_miss); |
167 | return NULL; | 167 | return NULL; |
168 | } | 168 | } |
169 | 169 | ||
170 | /** | 170 | /** |
171 | * csio_lnode_lookup_by_wwpn - Lookup lnode using given wwpn. | 171 | * csio_lnode_lookup_by_wwpn - Lookup lnode using given wwpn. |
172 | * @hw: HW module. | 172 | * @hw: HW module. |
173 | * @wwpn: WWPN. | 173 | * @wwpn: WWPN. |
174 | * | 174 | * |
175 | * If found, returns lnode matching given wwpn, returns NULL otherwise. | 175 | * If found, returns lnode matching given wwpn, returns NULL otherwise. |
176 | */ | 176 | */ |
177 | struct csio_lnode * | 177 | struct csio_lnode * |
178 | csio_lnode_lookup_by_wwpn(struct csio_hw *hw, uint8_t *wwpn) | 178 | csio_lnode_lookup_by_wwpn(struct csio_hw *hw, uint8_t *wwpn) |
179 | { | 179 | { |
180 | struct list_head *tmp1, *tmp2; | 180 | struct list_head *tmp1, *tmp2; |
181 | struct csio_lnode *sln = NULL, *cln = NULL; | 181 | struct csio_lnode *sln = NULL, *cln = NULL; |
182 | 182 | ||
183 | if (list_empty(&hw->sln_head)) { | 183 | if (list_empty(&hw->sln_head)) { |
184 | CSIO_INC_STATS(hw, n_lnlkup_miss); | 184 | CSIO_INC_STATS(hw, n_lnlkup_miss); |
185 | return NULL; | 185 | return NULL; |
186 | } | 186 | } |
187 | /* Traverse sibling lnodes */ | 187 | /* Traverse sibling lnodes */ |
188 | list_for_each(tmp1, &hw->sln_head) { | 188 | list_for_each(tmp1, &hw->sln_head) { |
189 | sln = (struct csio_lnode *) tmp1; | 189 | sln = (struct csio_lnode *) tmp1; |
190 | 190 | ||
191 | /* Match sibling lnode */ | 191 | /* Match sibling lnode */ |
192 | if (!memcmp(csio_ln_wwpn(sln), wwpn, 8)) | 192 | if (!memcmp(csio_ln_wwpn(sln), wwpn, 8)) |
193 | return sln; | 193 | return sln; |
194 | 194 | ||
195 | if (list_empty(&sln->cln_head)) | 195 | if (list_empty(&sln->cln_head)) |
196 | continue; | 196 | continue; |
197 | 197 | ||
198 | /* Traverse children lnodes */ | 198 | /* Traverse children lnodes */ |
199 | list_for_each(tmp2, &sln->cln_head) { | 199 | list_for_each(tmp2, &sln->cln_head) { |
200 | cln = (struct csio_lnode *) tmp2; | 200 | cln = (struct csio_lnode *) tmp2; |
201 | 201 | ||
202 | if (!memcmp(csio_ln_wwpn(cln), wwpn, 8)) | 202 | if (!memcmp(csio_ln_wwpn(cln), wwpn, 8)) |
203 | return cln; | 203 | return cln; |
204 | } | 204 | } |
205 | } | 205 | } |
206 | return NULL; | 206 | return NULL; |
207 | } | 207 | } |
208 | 208 | ||
209 | /* FDMI */ | 209 | /* FDMI */ |
210 | static void | 210 | static void |
211 | csio_fill_ct_iu(void *buf, uint8_t type, uint8_t sub_type, uint16_t op) | 211 | csio_fill_ct_iu(void *buf, uint8_t type, uint8_t sub_type, uint16_t op) |
212 | { | 212 | { |
213 | struct fc_ct_hdr *cmd = (struct fc_ct_hdr *)buf; | 213 | struct fc_ct_hdr *cmd = (struct fc_ct_hdr *)buf; |
214 | cmd->ct_rev = FC_CT_REV; | 214 | cmd->ct_rev = FC_CT_REV; |
215 | cmd->ct_fs_type = type; | 215 | cmd->ct_fs_type = type; |
216 | cmd->ct_fs_subtype = sub_type; | 216 | cmd->ct_fs_subtype = sub_type; |
217 | cmd->ct_cmd = htons(op); | 217 | cmd->ct_cmd = htons(op); |
218 | } | 218 | } |
219 | 219 | ||
220 | static int | 220 | static int |
221 | csio_hostname(uint8_t *buf, size_t buf_len) | 221 | csio_hostname(uint8_t *buf, size_t buf_len) |
222 | { | 222 | { |
223 | if (snprintf(buf, buf_len, "%s", init_utsname()->nodename) > 0) | 223 | if (snprintf(buf, buf_len, "%s", init_utsname()->nodename) > 0) |
224 | return 0; | 224 | return 0; |
225 | return -1; | 225 | return -1; |
226 | } | 226 | } |
227 | 227 | ||
228 | static int | 228 | static int |
229 | csio_osname(uint8_t *buf, size_t buf_len) | 229 | csio_osname(uint8_t *buf, size_t buf_len) |
230 | { | 230 | { |
231 | if (snprintf(buf, buf_len, "%s %s %s", | 231 | if (snprintf(buf, buf_len, "%s %s %s", |
232 | init_utsname()->sysname, | 232 | init_utsname()->sysname, |
233 | init_utsname()->release, | 233 | init_utsname()->release, |
234 | init_utsname()->version) > 0) | 234 | init_utsname()->version) > 0) |
235 | return 0; | 235 | return 0; |
236 | 236 | ||
237 | return -1; | 237 | return -1; |
238 | } | 238 | } |
239 | 239 | ||
240 | static inline void | 240 | static inline void |
241 | csio_append_attrib(uint8_t **ptr, uint16_t type, uint8_t *val, uint16_t len) | 241 | csio_append_attrib(uint8_t **ptr, uint16_t type, uint8_t *val, uint16_t len) |
242 | { | 242 | { |
243 | struct fc_fdmi_attr_entry *ae = (struct fc_fdmi_attr_entry *)*ptr; | 243 | struct fc_fdmi_attr_entry *ae = (struct fc_fdmi_attr_entry *)*ptr; |
244 | ae->type = htons(type); | 244 | ae->type = htons(type); |
245 | len += 4; /* includes attribute type and length */ | 245 | len += 4; /* includes attribute type and length */ |
246 | len = (len + 3) & ~3; /* should be multiple of 4 bytes */ | 246 | len = (len + 3) & ~3; /* should be multiple of 4 bytes */ |
247 | ae->len = htons(len); | 247 | ae->len = htons(len); |
248 | memset(ae->value, 0, len - 4); | ||
249 | memcpy(ae->value, val, len); | 248 | memcpy(ae->value, val, len); |
250 | *ptr += len; | 249 | *ptr += len; |
251 | } | 250 | } |
252 | 251 | ||
253 | /* | 252 | /* |
254 | * csio_ln_fdmi_done - FDMI registeration completion | 253 | * csio_ln_fdmi_done - FDMI registeration completion |
255 | * @hw: HW context | 254 | * @hw: HW context |
256 | * @fdmi_req: fdmi request | 255 | * @fdmi_req: fdmi request |
257 | */ | 256 | */ |
258 | static void | 257 | static void |
259 | csio_ln_fdmi_done(struct csio_hw *hw, struct csio_ioreq *fdmi_req) | 258 | csio_ln_fdmi_done(struct csio_hw *hw, struct csio_ioreq *fdmi_req) |
260 | { | 259 | { |
261 | void *cmd; | 260 | void *cmd; |
262 | struct csio_lnode *ln = fdmi_req->lnode; | 261 | struct csio_lnode *ln = fdmi_req->lnode; |
263 | 262 | ||
264 | if (fdmi_req->wr_status != FW_SUCCESS) { | 263 | if (fdmi_req->wr_status != FW_SUCCESS) { |
265 | csio_ln_dbg(ln, "WR error:%x in processing fdmi rpa cmd\n", | 264 | csio_ln_dbg(ln, "WR error:%x in processing fdmi rpa cmd\n", |
266 | fdmi_req->wr_status); | 265 | fdmi_req->wr_status); |
267 | CSIO_INC_STATS(ln, n_fdmi_err); | 266 | CSIO_INC_STATS(ln, n_fdmi_err); |
268 | } | 267 | } |
269 | 268 | ||
270 | cmd = fdmi_req->dma_buf.vaddr; | 269 | cmd = fdmi_req->dma_buf.vaddr; |
271 | if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) { | 270 | if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) { |
272 | csio_ln_dbg(ln, "fdmi rpa cmd rejected reason %x expl %x\n", | 271 | csio_ln_dbg(ln, "fdmi rpa cmd rejected reason %x expl %x\n", |
273 | csio_ct_reason(cmd), csio_ct_expl(cmd)); | 272 | csio_ct_reason(cmd), csio_ct_expl(cmd)); |
274 | } | 273 | } |
275 | } | 274 | } |
276 | 275 | ||
277 | /* | 276 | /* |
278 | * csio_ln_fdmi_rhba_cbfn - RHBA completion | 277 | * csio_ln_fdmi_rhba_cbfn - RHBA completion |
279 | * @hw: HW context | 278 | * @hw: HW context |
280 | * @fdmi_req: fdmi request | 279 | * @fdmi_req: fdmi request |
281 | */ | 280 | */ |
282 | static void | 281 | static void |
283 | csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req) | 282 | csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req) |
284 | { | 283 | { |
285 | void *cmd; | 284 | void *cmd; |
286 | uint8_t *pld; | 285 | uint8_t *pld; |
287 | uint32_t len = 0; | 286 | uint32_t len = 0; |
288 | __be32 val; | 287 | __be32 val; |
289 | __be16 mfs; | 288 | __be16 mfs; |
290 | uint32_t numattrs = 0; | 289 | uint32_t numattrs = 0; |
291 | struct csio_lnode *ln = fdmi_req->lnode; | 290 | struct csio_lnode *ln = fdmi_req->lnode; |
292 | struct fs_fdmi_attrs *attrib_blk; | 291 | struct fs_fdmi_attrs *attrib_blk; |
293 | struct fc_fdmi_port_name *port_name; | 292 | struct fc_fdmi_port_name *port_name; |
294 | uint8_t buf[64]; | 293 | uint8_t buf[64]; |
295 | uint8_t *fc4_type; | 294 | uint8_t *fc4_type; |
296 | 295 | ||
297 | if (fdmi_req->wr_status != FW_SUCCESS) { | 296 | if (fdmi_req->wr_status != FW_SUCCESS) { |
298 | csio_ln_dbg(ln, "WR error:%x in processing fdmi rhba cmd\n", | 297 | csio_ln_dbg(ln, "WR error:%x in processing fdmi rhba cmd\n", |
299 | fdmi_req->wr_status); | 298 | fdmi_req->wr_status); |
300 | CSIO_INC_STATS(ln, n_fdmi_err); | 299 | CSIO_INC_STATS(ln, n_fdmi_err); |
301 | } | 300 | } |
302 | 301 | ||
303 | cmd = fdmi_req->dma_buf.vaddr; | 302 | cmd = fdmi_req->dma_buf.vaddr; |
304 | if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) { | 303 | if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) { |
305 | csio_ln_dbg(ln, "fdmi rhba cmd rejected reason %x expl %x\n", | 304 | csio_ln_dbg(ln, "fdmi rhba cmd rejected reason %x expl %x\n", |
306 | csio_ct_reason(cmd), csio_ct_expl(cmd)); | 305 | csio_ct_reason(cmd), csio_ct_expl(cmd)); |
307 | } | 306 | } |
308 | 307 | ||
309 | if (!csio_is_rnode_ready(fdmi_req->rnode)) { | 308 | if (!csio_is_rnode_ready(fdmi_req->rnode)) { |
310 | CSIO_INC_STATS(ln, n_fdmi_err); | 309 | CSIO_INC_STATS(ln, n_fdmi_err); |
311 | return; | 310 | return; |
312 | } | 311 | } |
313 | 312 | ||
314 | /* Prepare CT hdr for RPA cmd */ | 313 | /* Prepare CT hdr for RPA cmd */ |
315 | memset(cmd, 0, FC_CT_HDR_LEN); | 314 | memset(cmd, 0, FC_CT_HDR_LEN); |
316 | csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_RPA); | 315 | csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_RPA); |
317 | 316 | ||
318 | /* Prepare RPA payload */ | 317 | /* Prepare RPA payload */ |
319 | pld = (uint8_t *)csio_ct_get_pld(cmd); | 318 | pld = (uint8_t *)csio_ct_get_pld(cmd); |
320 | port_name = (struct fc_fdmi_port_name *)pld; | 319 | port_name = (struct fc_fdmi_port_name *)pld; |
321 | memcpy(&port_name->portname, csio_ln_wwpn(ln), 8); | 320 | memcpy(&port_name->portname, csio_ln_wwpn(ln), 8); |
322 | pld += sizeof(*port_name); | 321 | pld += sizeof(*port_name); |
323 | 322 | ||
324 | /* Start appending Port attributes */ | 323 | /* Start appending Port attributes */ |
325 | attrib_blk = (struct fs_fdmi_attrs *)pld; | 324 | attrib_blk = (struct fs_fdmi_attrs *)pld; |
326 | attrib_blk->numattrs = 0; | 325 | attrib_blk->numattrs = 0; |
327 | len += sizeof(attrib_blk->numattrs); | 326 | len += sizeof(attrib_blk->numattrs); |
328 | pld += sizeof(attrib_blk->numattrs); | 327 | pld += sizeof(attrib_blk->numattrs); |
329 | 328 | ||
330 | fc4_type = &buf[0]; | 329 | fc4_type = &buf[0]; |
331 | memset(fc4_type, 0, FC_FDMI_PORT_ATTR_FC4TYPES_LEN); | 330 | memset(fc4_type, 0, FC_FDMI_PORT_ATTR_FC4TYPES_LEN); |
332 | fc4_type[2] = 1; | 331 | fc4_type[2] = 1; |
333 | fc4_type[7] = 1; | 332 | fc4_type[7] = 1; |
334 | csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_FC4TYPES, | 333 | csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_FC4TYPES, |
335 | fc4_type, FC_FDMI_PORT_ATTR_FC4TYPES_LEN); | 334 | fc4_type, FC_FDMI_PORT_ATTR_FC4TYPES_LEN); |
336 | numattrs++; | 335 | numattrs++; |
337 | val = htonl(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT); | 336 | val = htonl(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT); |
338 | csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_SUPPORTEDSPEED, | 337 | csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_SUPPORTEDSPEED, |
339 | (uint8_t *)&val, | 338 | (uint8_t *)&val, |
340 | FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN); | 339 | FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN); |
341 | numattrs++; | 340 | numattrs++; |
342 | 341 | ||
343 | if (hw->pport[ln->portid].link_speed == FW_PORT_CAP_SPEED_1G) | 342 | if (hw->pport[ln->portid].link_speed == FW_PORT_CAP_SPEED_1G) |
344 | val = htonl(FC_PORTSPEED_1GBIT); | 343 | val = htonl(FC_PORTSPEED_1GBIT); |
345 | else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP_SPEED_10G) | 344 | else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP_SPEED_10G) |
346 | val = htonl(FC_PORTSPEED_10GBIT); | 345 | val = htonl(FC_PORTSPEED_10GBIT); |
347 | else | 346 | else |
348 | val = htonl(CSIO_HBA_PORTSPEED_UNKNOWN); | 347 | val = htonl(CSIO_HBA_PORTSPEED_UNKNOWN); |
349 | csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_CURRENTPORTSPEED, | 348 | csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_CURRENTPORTSPEED, |
350 | (uint8_t *)&val, | 349 | (uint8_t *)&val, |
351 | FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN); | 350 | FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN); |
352 | numattrs++; | 351 | numattrs++; |
353 | 352 | ||
354 | mfs = ln->ln_sparm.csp.sp_bb_data; | 353 | mfs = ln->ln_sparm.csp.sp_bb_data; |
355 | csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_MAXFRAMESIZE, | 354 | csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_MAXFRAMESIZE, |
356 | (uint8_t *)&mfs, FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN); | 355 | (uint8_t *)&mfs, FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN); |
357 | numattrs++; | 356 | numattrs++; |
358 | 357 | ||
359 | strcpy(buf, "csiostor"); | 358 | strcpy(buf, "csiostor"); |
360 | csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_OSDEVICENAME, buf, | 359 | csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_OSDEVICENAME, buf, |
361 | (uint16_t)strlen(buf)); | 360 | (uint16_t)strlen(buf)); |
362 | numattrs++; | 361 | numattrs++; |
363 | 362 | ||
364 | if (!csio_hostname(buf, sizeof(buf))) { | 363 | if (!csio_hostname(buf, sizeof(buf))) { |
365 | csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_HOSTNAME, | 364 | csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_HOSTNAME, |
366 | buf, (uint16_t)strlen(buf)); | 365 | buf, (uint16_t)strlen(buf)); |
367 | numattrs++; | 366 | numattrs++; |
368 | } | 367 | } |
369 | attrib_blk->numattrs = htonl(numattrs); | 368 | attrib_blk->numattrs = htonl(numattrs); |
370 | len = (uint32_t)(pld - (uint8_t *)cmd); | 369 | len = (uint32_t)(pld - (uint8_t *)cmd); |
371 | 370 | ||
372 | /* Submit FDMI RPA request */ | 371 | /* Submit FDMI RPA request */ |
373 | spin_lock_irq(&hw->lock); | 372 | spin_lock_irq(&hw->lock); |
374 | if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_done, | 373 | if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_done, |
375 | FCOE_CT, &fdmi_req->dma_buf, len)) { | 374 | FCOE_CT, &fdmi_req->dma_buf, len)) { |
376 | CSIO_INC_STATS(ln, n_fdmi_err); | 375 | CSIO_INC_STATS(ln, n_fdmi_err); |
377 | csio_ln_dbg(ln, "Failed to issue fdmi rpa req\n"); | 376 | csio_ln_dbg(ln, "Failed to issue fdmi rpa req\n"); |
378 | } | 377 | } |
379 | spin_unlock_irq(&hw->lock); | 378 | spin_unlock_irq(&hw->lock); |
380 | } | 379 | } |
381 | 380 | ||
382 | /* | 381 | /* |
383 | * csio_ln_fdmi_dprt_cbfn - DPRT completion | 382 | * csio_ln_fdmi_dprt_cbfn - DPRT completion |
384 | * @hw: HW context | 383 | * @hw: HW context |
385 | * @fdmi_req: fdmi request | 384 | * @fdmi_req: fdmi request |
386 | */ | 385 | */ |
387 | static void | 386 | static void |
388 | csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req) | 387 | csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req) |
389 | { | 388 | { |
390 | void *cmd; | 389 | void *cmd; |
391 | uint8_t *pld; | 390 | uint8_t *pld; |
392 | uint32_t len = 0; | 391 | uint32_t len = 0; |
393 | uint32_t numattrs = 0; | 392 | uint32_t numattrs = 0; |
394 | __be32 maxpayload = htonl(65536); | 393 | __be32 maxpayload = htonl(65536); |
395 | struct fc_fdmi_hba_identifier *hbaid; | 394 | struct fc_fdmi_hba_identifier *hbaid; |
396 | struct csio_lnode *ln = fdmi_req->lnode; | 395 | struct csio_lnode *ln = fdmi_req->lnode; |
397 | struct fc_fdmi_rpl *reg_pl; | 396 | struct fc_fdmi_rpl *reg_pl; |
398 | struct fs_fdmi_attrs *attrib_blk; | 397 | struct fs_fdmi_attrs *attrib_blk; |
399 | uint8_t buf[64]; | 398 | uint8_t buf[64]; |
400 | 399 | ||
401 | if (fdmi_req->wr_status != FW_SUCCESS) { | 400 | if (fdmi_req->wr_status != FW_SUCCESS) { |
402 | csio_ln_dbg(ln, "WR error:%x in processing fdmi dprt cmd\n", | 401 | csio_ln_dbg(ln, "WR error:%x in processing fdmi dprt cmd\n", |
403 | fdmi_req->wr_status); | 402 | fdmi_req->wr_status); |
404 | CSIO_INC_STATS(ln, n_fdmi_err); | 403 | CSIO_INC_STATS(ln, n_fdmi_err); |
405 | } | 404 | } |
406 | 405 | ||
407 | if (!csio_is_rnode_ready(fdmi_req->rnode)) { | 406 | if (!csio_is_rnode_ready(fdmi_req->rnode)) { |
408 | CSIO_INC_STATS(ln, n_fdmi_err); | 407 | CSIO_INC_STATS(ln, n_fdmi_err); |
409 | return; | 408 | return; |
410 | } | 409 | } |
411 | cmd = fdmi_req->dma_buf.vaddr; | 410 | cmd = fdmi_req->dma_buf.vaddr; |
412 | if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) { | 411 | if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) { |
413 | csio_ln_dbg(ln, "fdmi dprt cmd rejected reason %x expl %x\n", | 412 | csio_ln_dbg(ln, "fdmi dprt cmd rejected reason %x expl %x\n", |
414 | csio_ct_reason(cmd), csio_ct_expl(cmd)); | 413 | csio_ct_reason(cmd), csio_ct_expl(cmd)); |
415 | } | 414 | } |
416 | 415 | ||
417 | /* Prepare CT hdr for RHBA cmd */ | 416 | /* Prepare CT hdr for RHBA cmd */ |
418 | memset(cmd, 0, FC_CT_HDR_LEN); | 417 | memset(cmd, 0, FC_CT_HDR_LEN); |
419 | csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_RHBA); | 418 | csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_RHBA); |
420 | len = FC_CT_HDR_LEN; | 419 | len = FC_CT_HDR_LEN; |
421 | 420 | ||
422 | /* Prepare RHBA payload */ | 421 | /* Prepare RHBA payload */ |
423 | pld = (uint8_t *)csio_ct_get_pld(cmd); | 422 | pld = (uint8_t *)csio_ct_get_pld(cmd); |
424 | hbaid = (struct fc_fdmi_hba_identifier *)pld; | 423 | hbaid = (struct fc_fdmi_hba_identifier *)pld; |
425 | memcpy(&hbaid->id, csio_ln_wwpn(ln), 8); /* HBA identifer */ | 424 | memcpy(&hbaid->id, csio_ln_wwpn(ln), 8); /* HBA identifer */ |
426 | pld += sizeof(*hbaid); | 425 | pld += sizeof(*hbaid); |
427 | 426 | ||
428 | /* Register one port per hba */ | 427 | /* Register one port per hba */ |
429 | reg_pl = (struct fc_fdmi_rpl *)pld; | 428 | reg_pl = (struct fc_fdmi_rpl *)pld; |
430 | reg_pl->numport = htonl(1); | 429 | reg_pl->numport = htonl(1); |
431 | memcpy(®_pl->port[0].portname, csio_ln_wwpn(ln), 8); | 430 | memcpy(®_pl->port[0].portname, csio_ln_wwpn(ln), 8); |
432 | pld += sizeof(*reg_pl); | 431 | pld += sizeof(*reg_pl); |
433 | 432 | ||
434 | /* Start appending HBA attributes hba */ | 433 | /* Start appending HBA attributes hba */ |
435 | attrib_blk = (struct fs_fdmi_attrs *)pld; | 434 | attrib_blk = (struct fs_fdmi_attrs *)pld; |
436 | attrib_blk->numattrs = 0; | 435 | attrib_blk->numattrs = 0; |
437 | len += sizeof(attrib_blk->numattrs); | 436 | len += sizeof(attrib_blk->numattrs); |
438 | pld += sizeof(attrib_blk->numattrs); | 437 | pld += sizeof(attrib_blk->numattrs); |
439 | 438 | ||
440 | csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_NODENAME, csio_ln_wwnn(ln), | 439 | csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_NODENAME, csio_ln_wwnn(ln), |
441 | FC_FDMI_HBA_ATTR_NODENAME_LEN); | 440 | FC_FDMI_HBA_ATTR_NODENAME_LEN); |
442 | numattrs++; | 441 | numattrs++; |
443 | 442 | ||
444 | memset(buf, 0, sizeof(buf)); | 443 | memset(buf, 0, sizeof(buf)); |
445 | 444 | ||
446 | strcpy(buf, "Chelsio Communications"); | 445 | strcpy(buf, "Chelsio Communications"); |
447 | csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MANUFACTURER, buf, | 446 | csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MANUFACTURER, buf, |
448 | (uint16_t)strlen(buf)); | 447 | (uint16_t)strlen(buf)); |
449 | numattrs++; | 448 | numattrs++; |
450 | csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_SERIALNUMBER, | 449 | csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_SERIALNUMBER, |
451 | hw->vpd.sn, (uint16_t)sizeof(hw->vpd.sn)); | 450 | hw->vpd.sn, (uint16_t)sizeof(hw->vpd.sn)); |
452 | numattrs++; | 451 | numattrs++; |
453 | csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODEL, hw->vpd.id, | 452 | csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODEL, hw->vpd.id, |
454 | (uint16_t)sizeof(hw->vpd.id)); | 453 | (uint16_t)sizeof(hw->vpd.id)); |
455 | numattrs++; | 454 | numattrs++; |
456 | csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODELDESCRIPTION, | 455 | csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODELDESCRIPTION, |
457 | hw->model_desc, (uint16_t)strlen(hw->model_desc)); | 456 | hw->model_desc, (uint16_t)strlen(hw->model_desc)); |
458 | numattrs++; | 457 | numattrs++; |
459 | csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_HARDWAREVERSION, | 458 | csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_HARDWAREVERSION, |
460 | hw->hw_ver, (uint16_t)sizeof(hw->hw_ver)); | 459 | hw->hw_ver, (uint16_t)sizeof(hw->hw_ver)); |
461 | numattrs++; | 460 | numattrs++; |
462 | csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_FIRMWAREVERSION, | 461 | csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_FIRMWAREVERSION, |
463 | hw->fwrev_str, (uint16_t)strlen(hw->fwrev_str)); | 462 | hw->fwrev_str, (uint16_t)strlen(hw->fwrev_str)); |
464 | numattrs++; | 463 | numattrs++; |
465 | 464 | ||
466 | if (!csio_osname(buf, sizeof(buf))) { | 465 | if (!csio_osname(buf, sizeof(buf))) { |
467 | csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_OSNAMEVERSION, | 466 | csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_OSNAMEVERSION, |
468 | buf, (uint16_t)strlen(buf)); | 467 | buf, (uint16_t)strlen(buf)); |
469 | numattrs++; | 468 | numattrs++; |
470 | } | 469 | } |
471 | 470 | ||
472 | csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MAXCTPAYLOAD, | 471 | csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MAXCTPAYLOAD, |
473 | (uint8_t *)&maxpayload, | 472 | (uint8_t *)&maxpayload, |
474 | FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN); | 473 | FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN); |
475 | len = (uint32_t)(pld - (uint8_t *)cmd); | 474 | len = (uint32_t)(pld - (uint8_t *)cmd); |
476 | numattrs++; | 475 | numattrs++; |
477 | attrib_blk->numattrs = htonl(numattrs); | 476 | attrib_blk->numattrs = htonl(numattrs); |
478 | 477 | ||
479 | /* Submit FDMI RHBA request */ | 478 | /* Submit FDMI RHBA request */ |
480 | spin_lock_irq(&hw->lock); | 479 | spin_lock_irq(&hw->lock); |
481 | if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_rhba_cbfn, | 480 | if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_rhba_cbfn, |
482 | FCOE_CT, &fdmi_req->dma_buf, len)) { | 481 | FCOE_CT, &fdmi_req->dma_buf, len)) { |
483 | CSIO_INC_STATS(ln, n_fdmi_err); | 482 | CSIO_INC_STATS(ln, n_fdmi_err); |
484 | csio_ln_dbg(ln, "Failed to issue fdmi rhba req\n"); | 483 | csio_ln_dbg(ln, "Failed to issue fdmi rhba req\n"); |
485 | } | 484 | } |
486 | spin_unlock_irq(&hw->lock); | 485 | spin_unlock_irq(&hw->lock); |
487 | } | 486 | } |
488 | 487 | ||
489 | /* | 488 | /* |
490 | * csio_ln_fdmi_dhba_cbfn - DHBA completion | 489 | * csio_ln_fdmi_dhba_cbfn - DHBA completion |
491 | * @hw: HW context | 490 | * @hw: HW context |
492 | * @fdmi_req: fdmi request | 491 | * @fdmi_req: fdmi request |
493 | */ | 492 | */ |
494 | static void | 493 | static void |
495 | csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req) | 494 | csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req) |
496 | { | 495 | { |
497 | struct csio_lnode *ln = fdmi_req->lnode; | 496 | struct csio_lnode *ln = fdmi_req->lnode; |
498 | void *cmd; | 497 | void *cmd; |
499 | struct fc_fdmi_port_name *port_name; | 498 | struct fc_fdmi_port_name *port_name; |
500 | uint32_t len; | 499 | uint32_t len; |
501 | 500 | ||
502 | if (fdmi_req->wr_status != FW_SUCCESS) { | 501 | if (fdmi_req->wr_status != FW_SUCCESS) { |
503 | csio_ln_dbg(ln, "WR error:%x in processing fdmi dhba cmd\n", | 502 | csio_ln_dbg(ln, "WR error:%x in processing fdmi dhba cmd\n", |
504 | fdmi_req->wr_status); | 503 | fdmi_req->wr_status); |
505 | CSIO_INC_STATS(ln, n_fdmi_err); | 504 | CSIO_INC_STATS(ln, n_fdmi_err); |
506 | } | 505 | } |
507 | 506 | ||
508 | if (!csio_is_rnode_ready(fdmi_req->rnode)) { | 507 | if (!csio_is_rnode_ready(fdmi_req->rnode)) { |
509 | CSIO_INC_STATS(ln, n_fdmi_err); | 508 | CSIO_INC_STATS(ln, n_fdmi_err); |
510 | return; | 509 | return; |
511 | } | 510 | } |
512 | cmd = fdmi_req->dma_buf.vaddr; | 511 | cmd = fdmi_req->dma_buf.vaddr; |
513 | if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) { | 512 | if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) { |
514 | csio_ln_dbg(ln, "fdmi dhba cmd rejected reason %x expl %x\n", | 513 | csio_ln_dbg(ln, "fdmi dhba cmd rejected reason %x expl %x\n", |
515 | csio_ct_reason(cmd), csio_ct_expl(cmd)); | 514 | csio_ct_reason(cmd), csio_ct_expl(cmd)); |
516 | } | 515 | } |
517 | 516 | ||
518 | /* Send FDMI cmd to de-register any Port attributes if registered | 517 | /* Send FDMI cmd to de-register any Port attributes if registered |
519 | * before | 518 | * before |
520 | */ | 519 | */ |
521 | 520 | ||
522 | /* Prepare FDMI DPRT cmd */ | 521 | /* Prepare FDMI DPRT cmd */ |
523 | memset(cmd, 0, FC_CT_HDR_LEN); | 522 | memset(cmd, 0, FC_CT_HDR_LEN); |
524 | csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_DPRT); | 523 | csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_DPRT); |
525 | len = FC_CT_HDR_LEN; | 524 | len = FC_CT_HDR_LEN; |
526 | port_name = (struct fc_fdmi_port_name *)csio_ct_get_pld(cmd); | 525 | port_name = (struct fc_fdmi_port_name *)csio_ct_get_pld(cmd); |
527 | memcpy(&port_name->portname, csio_ln_wwpn(ln), 8); | 526 | memcpy(&port_name->portname, csio_ln_wwpn(ln), 8); |
528 | len += sizeof(*port_name); | 527 | len += sizeof(*port_name); |
529 | 528 | ||
530 | /* Submit FDMI request */ | 529 | /* Submit FDMI request */ |
531 | spin_lock_irq(&hw->lock); | 530 | spin_lock_irq(&hw->lock); |
532 | if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dprt_cbfn, | 531 | if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dprt_cbfn, |
533 | FCOE_CT, &fdmi_req->dma_buf, len)) { | 532 | FCOE_CT, &fdmi_req->dma_buf, len)) { |
534 | CSIO_INC_STATS(ln, n_fdmi_err); | 533 | CSIO_INC_STATS(ln, n_fdmi_err); |
535 | csio_ln_dbg(ln, "Failed to issue fdmi dprt req\n"); | 534 | csio_ln_dbg(ln, "Failed to issue fdmi dprt req\n"); |
536 | } | 535 | } |
537 | spin_unlock_irq(&hw->lock); | 536 | spin_unlock_irq(&hw->lock); |
538 | } | 537 | } |
539 | 538 | ||
540 | /** | 539 | /** |
541 | * csio_ln_fdmi_start - Start an FDMI request. | 540 | * csio_ln_fdmi_start - Start an FDMI request. |
542 | * @ln: lnode | 541 | * @ln: lnode |
543 | * @context: session context | 542 | * @context: session context |
544 | * | 543 | * |
545 | * Issued with lock held. | 544 | * Issued with lock held. |
546 | */ | 545 | */ |
547 | int | 546 | int |
548 | csio_ln_fdmi_start(struct csio_lnode *ln, void *context) | 547 | csio_ln_fdmi_start(struct csio_lnode *ln, void *context) |
549 | { | 548 | { |
550 | struct csio_ioreq *fdmi_req; | 549 | struct csio_ioreq *fdmi_req; |
551 | struct csio_rnode *fdmi_rn = (struct csio_rnode *)context; | 550 | struct csio_rnode *fdmi_rn = (struct csio_rnode *)context; |
552 | void *cmd; | 551 | void *cmd; |
553 | struct fc_fdmi_hba_identifier *hbaid; | 552 | struct fc_fdmi_hba_identifier *hbaid; |
554 | uint32_t len; | 553 | uint32_t len; |
555 | 554 | ||
556 | if (!(ln->flags & CSIO_LNF_FDMI_ENABLE)) | 555 | if (!(ln->flags & CSIO_LNF_FDMI_ENABLE)) |
557 | return -EPROTONOSUPPORT; | 556 | return -EPROTONOSUPPORT; |
558 | 557 | ||
559 | if (!csio_is_rnode_ready(fdmi_rn)) | 558 | if (!csio_is_rnode_ready(fdmi_rn)) |
560 | CSIO_INC_STATS(ln, n_fdmi_err); | 559 | CSIO_INC_STATS(ln, n_fdmi_err); |
561 | 560 | ||
562 | /* Send FDMI cmd to de-register any HBA attributes if registered | 561 | /* Send FDMI cmd to de-register any HBA attributes if registered |
563 | * before | 562 | * before |
564 | */ | 563 | */ |
565 | 564 | ||
566 | fdmi_req = ln->mgmt_req; | 565 | fdmi_req = ln->mgmt_req; |
567 | fdmi_req->lnode = ln; | 566 | fdmi_req->lnode = ln; |
568 | fdmi_req->rnode = fdmi_rn; | 567 | fdmi_req->rnode = fdmi_rn; |
569 | 568 | ||
570 | /* Prepare FDMI DHBA cmd */ | 569 | /* Prepare FDMI DHBA cmd */ |
571 | cmd = fdmi_req->dma_buf.vaddr; | 570 | cmd = fdmi_req->dma_buf.vaddr; |
572 | memset(cmd, 0, FC_CT_HDR_LEN); | 571 | memset(cmd, 0, FC_CT_HDR_LEN); |
573 | csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_DHBA); | 572 | csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_DHBA); |
574 | len = FC_CT_HDR_LEN; | 573 | len = FC_CT_HDR_LEN; |
575 | 574 | ||
576 | hbaid = (struct fc_fdmi_hba_identifier *)csio_ct_get_pld(cmd); | 575 | hbaid = (struct fc_fdmi_hba_identifier *)csio_ct_get_pld(cmd); |
577 | memcpy(&hbaid->id, csio_ln_wwpn(ln), 8); | 576 | memcpy(&hbaid->id, csio_ln_wwpn(ln), 8); |
578 | len += sizeof(*hbaid); | 577 | len += sizeof(*hbaid); |
579 | 578 | ||
580 | /* Submit FDMI request */ | 579 | /* Submit FDMI request */ |
581 | if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dhba_cbfn, | 580 | if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dhba_cbfn, |
582 | FCOE_CT, &fdmi_req->dma_buf, len)) { | 581 | FCOE_CT, &fdmi_req->dma_buf, len)) { |
583 | CSIO_INC_STATS(ln, n_fdmi_err); | 582 | CSIO_INC_STATS(ln, n_fdmi_err); |
584 | csio_ln_dbg(ln, "Failed to issue fdmi dhba req\n"); | 583 | csio_ln_dbg(ln, "Failed to issue fdmi dhba req\n"); |
585 | } | 584 | } |
586 | 585 | ||
587 | return 0; | 586 | return 0; |
588 | } | 587 | } |
589 | 588 | ||
590 | /* | 589 | /* |
591 | * csio_ln_vnp_read_cbfn - vnp read completion handler. | 590 | * csio_ln_vnp_read_cbfn - vnp read completion handler. |
592 | * @hw: HW lnode | 591 | * @hw: HW lnode |
593 | * @cbfn: Completion handler. | 592 | * @cbfn: Completion handler. |
594 | * | 593 | * |
595 | * Reads vnp response and updates ln parameters. | 594 | * Reads vnp response and updates ln parameters. |
596 | */ | 595 | */ |
597 | static void | 596 | static void |
598 | csio_ln_vnp_read_cbfn(struct csio_hw *hw, struct csio_mb *mbp) | 597 | csio_ln_vnp_read_cbfn(struct csio_hw *hw, struct csio_mb *mbp) |
599 | { | 598 | { |
600 | struct csio_lnode *ln = ((struct csio_lnode *)mbp->priv); | 599 | struct csio_lnode *ln = ((struct csio_lnode *)mbp->priv); |
601 | struct fw_fcoe_vnp_cmd *rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb); | 600 | struct fw_fcoe_vnp_cmd *rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb); |
602 | struct fc_els_csp *csp; | 601 | struct fc_els_csp *csp; |
603 | struct fc_els_cssp *clsp; | 602 | struct fc_els_cssp *clsp; |
604 | enum fw_retval retval; | 603 | enum fw_retval retval; |
605 | __be32 nport_id; | 604 | __be32 nport_id; |
606 | 605 | ||
607 | retval = FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16)); | 606 | retval = FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16)); |
608 | if (retval != FW_SUCCESS) { | 607 | if (retval != FW_SUCCESS) { |
609 | csio_err(hw, "FCOE VNP read cmd returned error:0x%x\n", retval); | 608 | csio_err(hw, "FCOE VNP read cmd returned error:0x%x\n", retval); |
610 | mempool_free(mbp, hw->mb_mempool); | 609 | mempool_free(mbp, hw->mb_mempool); |
611 | return; | 610 | return; |
612 | } | 611 | } |
613 | 612 | ||
614 | spin_lock_irq(&hw->lock); | 613 | spin_lock_irq(&hw->lock); |
615 | 614 | ||
616 | memcpy(ln->mac, rsp->vnport_mac, sizeof(ln->mac)); | 615 | memcpy(ln->mac, rsp->vnport_mac, sizeof(ln->mac)); |
617 | memcpy(&nport_id, &rsp->vnport_mac[3], sizeof(uint8_t)*3); | 616 | memcpy(&nport_id, &rsp->vnport_mac[3], sizeof(uint8_t)*3); |
618 | ln->nport_id = ntohl(nport_id); | 617 | ln->nport_id = ntohl(nport_id); |
619 | ln->nport_id = ln->nport_id >> 8; | 618 | ln->nport_id = ln->nport_id >> 8; |
620 | 619 | ||
621 | /* Update WWNs */ | 620 | /* Update WWNs */ |
622 | /* | 621 | /* |
623 | * This may look like a duplication of what csio_fcoe_enable_link() | 622 | * This may look like a duplication of what csio_fcoe_enable_link() |
624 | * does, but is absolutely necessary if the vnpi changes between | 623 | * does, but is absolutely necessary if the vnpi changes between |
625 | * a FCOE LINK UP and FCOE LINK DOWN. | 624 | * a FCOE LINK UP and FCOE LINK DOWN. |
626 | */ | 625 | */ |
627 | memcpy(csio_ln_wwnn(ln), rsp->vnport_wwnn, 8); | 626 | memcpy(csio_ln_wwnn(ln), rsp->vnport_wwnn, 8); |
628 | memcpy(csio_ln_wwpn(ln), rsp->vnport_wwpn, 8); | 627 | memcpy(csio_ln_wwpn(ln), rsp->vnport_wwpn, 8); |
629 | 628 | ||
630 | /* Copy common sparam */ | 629 | /* Copy common sparam */ |
631 | csp = (struct fc_els_csp *)rsp->cmn_srv_parms; | 630 | csp = (struct fc_els_csp *)rsp->cmn_srv_parms; |
632 | ln->ln_sparm.csp.sp_hi_ver = csp->sp_hi_ver; | 631 | ln->ln_sparm.csp.sp_hi_ver = csp->sp_hi_ver; |
633 | ln->ln_sparm.csp.sp_lo_ver = csp->sp_lo_ver; | 632 | ln->ln_sparm.csp.sp_lo_ver = csp->sp_lo_ver; |
634 | ln->ln_sparm.csp.sp_bb_cred = csp->sp_bb_cred; | 633 | ln->ln_sparm.csp.sp_bb_cred = csp->sp_bb_cred; |
635 | ln->ln_sparm.csp.sp_features = csp->sp_features; | 634 | ln->ln_sparm.csp.sp_features = csp->sp_features; |
636 | ln->ln_sparm.csp.sp_bb_data = csp->sp_bb_data; | 635 | ln->ln_sparm.csp.sp_bb_data = csp->sp_bb_data; |
637 | ln->ln_sparm.csp.sp_r_a_tov = csp->sp_r_a_tov; | 636 | ln->ln_sparm.csp.sp_r_a_tov = csp->sp_r_a_tov; |
638 | ln->ln_sparm.csp.sp_e_d_tov = csp->sp_e_d_tov; | 637 | ln->ln_sparm.csp.sp_e_d_tov = csp->sp_e_d_tov; |
639 | 638 | ||
640 | /* Copy word 0 & word 1 of class sparam */ | 639 | /* Copy word 0 & word 1 of class sparam */ |
641 | clsp = (struct fc_els_cssp *)rsp->clsp_word_0_1; | 640 | clsp = (struct fc_els_cssp *)rsp->clsp_word_0_1; |
642 | ln->ln_sparm.clsp[2].cp_class = clsp->cp_class; | 641 | ln->ln_sparm.clsp[2].cp_class = clsp->cp_class; |
643 | ln->ln_sparm.clsp[2].cp_init = clsp->cp_init; | 642 | ln->ln_sparm.clsp[2].cp_init = clsp->cp_init; |
644 | ln->ln_sparm.clsp[2].cp_recip = clsp->cp_recip; | 643 | ln->ln_sparm.clsp[2].cp_recip = clsp->cp_recip; |
645 | ln->ln_sparm.clsp[2].cp_rdfs = clsp->cp_rdfs; | 644 | ln->ln_sparm.clsp[2].cp_rdfs = clsp->cp_rdfs; |
646 | 645 | ||
647 | spin_unlock_irq(&hw->lock); | 646 | spin_unlock_irq(&hw->lock); |
648 | 647 | ||
649 | mempool_free(mbp, hw->mb_mempool); | 648 | mempool_free(mbp, hw->mb_mempool); |
650 | 649 | ||
651 | /* Send an event to update local attribs */ | 650 | /* Send an event to update local attribs */ |
652 | csio_lnode_async_event(ln, CSIO_LN_FC_ATTRIB_UPDATE); | 651 | csio_lnode_async_event(ln, CSIO_LN_FC_ATTRIB_UPDATE); |
653 | } | 652 | } |
654 | 653 | ||
655 | /* | 654 | /* |
656 | * csio_ln_vnp_read - Read vnp params. | 655 | * csio_ln_vnp_read - Read vnp params. |
657 | * @ln: lnode | 656 | * @ln: lnode |
658 | * @cbfn: Completion handler. | 657 | * @cbfn: Completion handler. |
659 | * | 658 | * |
660 | * Issued with lock held. | 659 | * Issued with lock held. |
661 | */ | 660 | */ |
662 | static int | 661 | static int |
663 | csio_ln_vnp_read(struct csio_lnode *ln, | 662 | csio_ln_vnp_read(struct csio_lnode *ln, |
664 | void (*cbfn) (struct csio_hw *, struct csio_mb *)) | 663 | void (*cbfn) (struct csio_hw *, struct csio_mb *)) |
665 | { | 664 | { |
666 | struct csio_hw *hw = ln->hwp; | 665 | struct csio_hw *hw = ln->hwp; |
667 | struct csio_mb *mbp; | 666 | struct csio_mb *mbp; |
668 | 667 | ||
669 | /* Allocate Mbox request */ | 668 | /* Allocate Mbox request */ |
670 | mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); | 669 | mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); |
671 | if (!mbp) { | 670 | if (!mbp) { |
672 | CSIO_INC_STATS(hw, n_err_nomem); | 671 | CSIO_INC_STATS(hw, n_err_nomem); |
673 | return -ENOMEM; | 672 | return -ENOMEM; |
674 | } | 673 | } |
675 | 674 | ||
676 | /* Prepare VNP Command */ | 675 | /* Prepare VNP Command */ |
677 | csio_fcoe_vnp_read_init_mb(ln, mbp, | 676 | csio_fcoe_vnp_read_init_mb(ln, mbp, |
678 | CSIO_MB_DEFAULT_TMO, | 677 | CSIO_MB_DEFAULT_TMO, |
679 | ln->fcf_flowid, | 678 | ln->fcf_flowid, |
680 | ln->vnp_flowid, | 679 | ln->vnp_flowid, |
681 | cbfn); | 680 | cbfn); |
682 | 681 | ||
683 | /* Issue MBOX cmd */ | 682 | /* Issue MBOX cmd */ |
684 | if (csio_mb_issue(hw, mbp)) { | 683 | if (csio_mb_issue(hw, mbp)) { |
685 | csio_err(hw, "Failed to issue mbox FCoE VNP command\n"); | 684 | csio_err(hw, "Failed to issue mbox FCoE VNP command\n"); |
686 | mempool_free(mbp, hw->mb_mempool); | 685 | mempool_free(mbp, hw->mb_mempool); |
687 | return -EINVAL; | 686 | return -EINVAL; |
688 | } | 687 | } |
689 | 688 | ||
690 | return 0; | 689 | return 0; |
691 | } | 690 | } |
692 | 691 | ||
693 | /* | 692 | /* |
694 | * csio_fcoe_enable_link - Enable fcoe link. | 693 | * csio_fcoe_enable_link - Enable fcoe link. |
695 | * @ln: lnode | 694 | * @ln: lnode |
696 | * @enable: enable/disable | 695 | * @enable: enable/disable |
697 | * Issued with lock held. | 696 | * Issued with lock held. |
698 | * Issues mbox cmd to bring up FCOE link on port associated with given ln. | 697 | * Issues mbox cmd to bring up FCOE link on port associated with given ln. |
699 | */ | 698 | */ |
700 | static int | 699 | static int |
701 | csio_fcoe_enable_link(struct csio_lnode *ln, bool enable) | 700 | csio_fcoe_enable_link(struct csio_lnode *ln, bool enable) |
702 | { | 701 | { |
703 | struct csio_hw *hw = ln->hwp; | 702 | struct csio_hw *hw = ln->hwp; |
704 | struct csio_mb *mbp; | 703 | struct csio_mb *mbp; |
705 | enum fw_retval retval; | 704 | enum fw_retval retval; |
706 | uint8_t portid; | 705 | uint8_t portid; |
707 | uint8_t sub_op; | 706 | uint8_t sub_op; |
708 | struct fw_fcoe_link_cmd *lcmd; | 707 | struct fw_fcoe_link_cmd *lcmd; |
709 | int i; | 708 | int i; |
710 | 709 | ||
711 | mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); | 710 | mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); |
712 | if (!mbp) { | 711 | if (!mbp) { |
713 | CSIO_INC_STATS(hw, n_err_nomem); | 712 | CSIO_INC_STATS(hw, n_err_nomem); |
714 | return -ENOMEM; | 713 | return -ENOMEM; |
715 | } | 714 | } |
716 | 715 | ||
717 | portid = ln->portid; | 716 | portid = ln->portid; |
718 | sub_op = enable ? FCOE_LINK_UP : FCOE_LINK_DOWN; | 717 | sub_op = enable ? FCOE_LINK_UP : FCOE_LINK_DOWN; |
719 | 718 | ||
720 | csio_dbg(hw, "bringing FCOE LINK %s on Port:%d\n", | 719 | csio_dbg(hw, "bringing FCOE LINK %s on Port:%d\n", |
721 | sub_op ? "UP" : "DOWN", portid); | 720 | sub_op ? "UP" : "DOWN", portid); |
722 | 721 | ||
723 | csio_write_fcoe_link_cond_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO, | 722 | csio_write_fcoe_link_cond_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO, |
724 | portid, sub_op, 0, 0, 0, NULL); | 723 | portid, sub_op, 0, 0, 0, NULL); |
725 | 724 | ||
726 | if (csio_mb_issue(hw, mbp)) { | 725 | if (csio_mb_issue(hw, mbp)) { |
727 | csio_err(hw, "failed to issue FCOE LINK cmd on port[%d]\n", | 726 | csio_err(hw, "failed to issue FCOE LINK cmd on port[%d]\n", |
728 | portid); | 727 | portid); |
729 | mempool_free(mbp, hw->mb_mempool); | 728 | mempool_free(mbp, hw->mb_mempool); |
730 | return -EINVAL; | 729 | return -EINVAL; |
731 | } | 730 | } |
732 | 731 | ||
733 | retval = csio_mb_fw_retval(mbp); | 732 | retval = csio_mb_fw_retval(mbp); |
734 | if (retval != FW_SUCCESS) { | 733 | if (retval != FW_SUCCESS) { |
735 | csio_err(hw, | 734 | csio_err(hw, |
736 | "FCOE LINK %s cmd on port[%d] failed with " | 735 | "FCOE LINK %s cmd on port[%d] failed with " |
737 | "ret:x%x\n", sub_op ? "UP" : "DOWN", portid, retval); | 736 | "ret:x%x\n", sub_op ? "UP" : "DOWN", portid, retval); |
738 | mempool_free(mbp, hw->mb_mempool); | 737 | mempool_free(mbp, hw->mb_mempool); |
739 | return -EINVAL; | 738 | return -EINVAL; |
740 | } | 739 | } |
741 | 740 | ||
742 | if (!enable) | 741 | if (!enable) |
743 | goto out; | 742 | goto out; |
744 | 743 | ||
745 | lcmd = (struct fw_fcoe_link_cmd *)mbp->mb; | 744 | lcmd = (struct fw_fcoe_link_cmd *)mbp->mb; |
746 | 745 | ||
747 | memcpy(csio_ln_wwnn(ln), lcmd->vnport_wwnn, 8); | 746 | memcpy(csio_ln_wwnn(ln), lcmd->vnport_wwnn, 8); |
748 | memcpy(csio_ln_wwpn(ln), lcmd->vnport_wwpn, 8); | 747 | memcpy(csio_ln_wwpn(ln), lcmd->vnport_wwpn, 8); |
749 | 748 | ||
750 | for (i = 0; i < CSIO_MAX_PPORTS; i++) | 749 | for (i = 0; i < CSIO_MAX_PPORTS; i++) |
751 | if (hw->pport[i].portid == portid) | 750 | if (hw->pport[i].portid == portid) |
752 | memcpy(hw->pport[i].mac, lcmd->phy_mac, 6); | 751 | memcpy(hw->pport[i].mac, lcmd->phy_mac, 6); |
753 | 752 | ||
754 | out: | 753 | out: |
755 | mempool_free(mbp, hw->mb_mempool); | 754 | mempool_free(mbp, hw->mb_mempool); |
756 | return 0; | 755 | return 0; |
757 | } | 756 | } |
758 | 757 | ||
759 | /* | 758 | /* |
760 | * csio_ln_read_fcf_cbfn - Read fcf parameters | 759 | * csio_ln_read_fcf_cbfn - Read fcf parameters |
761 | * @ln: lnode | 760 | * @ln: lnode |
762 | * | 761 | * |
763 | * read fcf response and Update ln fcf information. | 762 | * read fcf response and Update ln fcf information. |
764 | */ | 763 | */ |
765 | static void | 764 | static void |
766 | csio_ln_read_fcf_cbfn(struct csio_hw *hw, struct csio_mb *mbp) | 765 | csio_ln_read_fcf_cbfn(struct csio_hw *hw, struct csio_mb *mbp) |
767 | { | 766 | { |
768 | struct csio_lnode *ln = (struct csio_lnode *)mbp->priv; | 767 | struct csio_lnode *ln = (struct csio_lnode *)mbp->priv; |
769 | struct csio_fcf_info *fcf_info; | 768 | struct csio_fcf_info *fcf_info; |
770 | struct fw_fcoe_fcf_cmd *rsp = | 769 | struct fw_fcoe_fcf_cmd *rsp = |
771 | (struct fw_fcoe_fcf_cmd *)(mbp->mb); | 770 | (struct fw_fcoe_fcf_cmd *)(mbp->mb); |
772 | enum fw_retval retval; | 771 | enum fw_retval retval; |
773 | 772 | ||
774 | retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16)); | 773 | retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16)); |
775 | if (retval != FW_SUCCESS) { | 774 | if (retval != FW_SUCCESS) { |
776 | csio_ln_err(ln, "FCOE FCF cmd failed with ret x%x\n", | 775 | csio_ln_err(ln, "FCOE FCF cmd failed with ret x%x\n", |
777 | retval); | 776 | retval); |
778 | mempool_free(mbp, hw->mb_mempool); | 777 | mempool_free(mbp, hw->mb_mempool); |
779 | return; | 778 | return; |
780 | } | 779 | } |
781 | 780 | ||
782 | spin_lock_irq(&hw->lock); | 781 | spin_lock_irq(&hw->lock); |
783 | fcf_info = ln->fcfinfo; | 782 | fcf_info = ln->fcfinfo; |
784 | fcf_info->priority = FW_FCOE_FCF_CMD_PRIORITY_GET( | 783 | fcf_info->priority = FW_FCOE_FCF_CMD_PRIORITY_GET( |
785 | ntohs(rsp->priority_pkd)); | 784 | ntohs(rsp->priority_pkd)); |
786 | fcf_info->vf_id = ntohs(rsp->vf_id); | 785 | fcf_info->vf_id = ntohs(rsp->vf_id); |
787 | fcf_info->vlan_id = rsp->vlan_id; | 786 | fcf_info->vlan_id = rsp->vlan_id; |
788 | fcf_info->max_fcoe_size = ntohs(rsp->max_fcoe_size); | 787 | fcf_info->max_fcoe_size = ntohs(rsp->max_fcoe_size); |
789 | fcf_info->fka_adv = be32_to_cpu(rsp->fka_adv); | 788 | fcf_info->fka_adv = be32_to_cpu(rsp->fka_adv); |
790 | fcf_info->fcfi = FW_FCOE_FCF_CMD_FCFI_GET(ntohl(rsp->op_to_fcfi)); | 789 | fcf_info->fcfi = FW_FCOE_FCF_CMD_FCFI_GET(ntohl(rsp->op_to_fcfi)); |
791 | fcf_info->fpma = FW_FCOE_FCF_CMD_FPMA_GET(rsp->fpma_to_portid); | 790 | fcf_info->fpma = FW_FCOE_FCF_CMD_FPMA_GET(rsp->fpma_to_portid); |
792 | fcf_info->spma = FW_FCOE_FCF_CMD_SPMA_GET(rsp->fpma_to_portid); | 791 | fcf_info->spma = FW_FCOE_FCF_CMD_SPMA_GET(rsp->fpma_to_portid); |
793 | fcf_info->login = FW_FCOE_FCF_CMD_LOGIN_GET(rsp->fpma_to_portid); | 792 | fcf_info->login = FW_FCOE_FCF_CMD_LOGIN_GET(rsp->fpma_to_portid); |
794 | fcf_info->portid = FW_FCOE_FCF_CMD_PORTID_GET(rsp->fpma_to_portid); | 793 | fcf_info->portid = FW_FCOE_FCF_CMD_PORTID_GET(rsp->fpma_to_portid); |
795 | memcpy(fcf_info->fc_map, rsp->fc_map, sizeof(fcf_info->fc_map)); | 794 | memcpy(fcf_info->fc_map, rsp->fc_map, sizeof(fcf_info->fc_map)); |
796 | memcpy(fcf_info->mac, rsp->mac, sizeof(fcf_info->mac)); | 795 | memcpy(fcf_info->mac, rsp->mac, sizeof(fcf_info->mac)); |
797 | memcpy(fcf_info->name_id, rsp->name_id, sizeof(fcf_info->name_id)); | 796 | memcpy(fcf_info->name_id, rsp->name_id, sizeof(fcf_info->name_id)); |
798 | memcpy(fcf_info->fabric, rsp->fabric, sizeof(fcf_info->fabric)); | 797 | memcpy(fcf_info->fabric, rsp->fabric, sizeof(fcf_info->fabric)); |
799 | memcpy(fcf_info->spma_mac, rsp->spma_mac, sizeof(fcf_info->spma_mac)); | 798 | memcpy(fcf_info->spma_mac, rsp->spma_mac, sizeof(fcf_info->spma_mac)); |
800 | 799 | ||
801 | spin_unlock_irq(&hw->lock); | 800 | spin_unlock_irq(&hw->lock); |
802 | 801 | ||
803 | mempool_free(mbp, hw->mb_mempool); | 802 | mempool_free(mbp, hw->mb_mempool); |
804 | } | 803 | } |
805 | 804 | ||
806 | /* | 805 | /* |
807 | * csio_ln_read_fcf_entry - Read fcf entry. | 806 | * csio_ln_read_fcf_entry - Read fcf entry. |
808 | * @ln: lnode | 807 | * @ln: lnode |
809 | * @cbfn: Completion handler. | 808 | * @cbfn: Completion handler. |
810 | * | 809 | * |
811 | * Issued with lock held. | 810 | * Issued with lock held. |
812 | */ | 811 | */ |
813 | static int | 812 | static int |
814 | csio_ln_read_fcf_entry(struct csio_lnode *ln, | 813 | csio_ln_read_fcf_entry(struct csio_lnode *ln, |
815 | void (*cbfn) (struct csio_hw *, struct csio_mb *)) | 814 | void (*cbfn) (struct csio_hw *, struct csio_mb *)) |
816 | { | 815 | { |
817 | struct csio_hw *hw = ln->hwp; | 816 | struct csio_hw *hw = ln->hwp; |
818 | struct csio_mb *mbp; | 817 | struct csio_mb *mbp; |
819 | 818 | ||
820 | mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); | 819 | mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); |
821 | if (!mbp) { | 820 | if (!mbp) { |
822 | CSIO_INC_STATS(hw, n_err_nomem); | 821 | CSIO_INC_STATS(hw, n_err_nomem); |
823 | return -ENOMEM; | 822 | return -ENOMEM; |
824 | } | 823 | } |
825 | 824 | ||
826 | /* Get FCoE FCF information */ | 825 | /* Get FCoE FCF information */ |
827 | csio_fcoe_read_fcf_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO, | 826 | csio_fcoe_read_fcf_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO, |
828 | ln->portid, ln->fcf_flowid, cbfn); | 827 | ln->portid, ln->fcf_flowid, cbfn); |
829 | 828 | ||
830 | if (csio_mb_issue(hw, mbp)) { | 829 | if (csio_mb_issue(hw, mbp)) { |
831 | csio_err(hw, "failed to issue FCOE FCF cmd\n"); | 830 | csio_err(hw, "failed to issue FCOE FCF cmd\n"); |
832 | mempool_free(mbp, hw->mb_mempool); | 831 | mempool_free(mbp, hw->mb_mempool); |
833 | return -EINVAL; | 832 | return -EINVAL; |
834 | } | 833 | } |
835 | 834 | ||
836 | return 0; | 835 | return 0; |
837 | } | 836 | } |
838 | 837 | ||
839 | /* | 838 | /* |
840 | * csio_handle_link_up - Logical Linkup event. | 839 | * csio_handle_link_up - Logical Linkup event. |
841 | * @hw - HW module. | 840 | * @hw - HW module. |
842 | * @portid - Physical port number | 841 | * @portid - Physical port number |
843 | * @fcfi - FCF index. | 842 | * @fcfi - FCF index. |
844 | * @vnpi - VNP index. | 843 | * @vnpi - VNP index. |
845 | * Returns - none. | 844 | * Returns - none. |
846 | * | 845 | * |
847 | * This event is received from FW, when virtual link is established between | 846 | * This event is received from FW, when virtual link is established between |
848 | * Physical port[ENode] and FCF. If its new vnpi, then local node object is | 847 | * Physical port[ENode] and FCF. If its new vnpi, then local node object is |
849 | * created on this FCF and set to [ONLINE] state. | 848 | * created on this FCF and set to [ONLINE] state. |
850 | * Lnode waits for FW_RDEV_CMD event to be received indicating that | 849 | * Lnode waits for FW_RDEV_CMD event to be received indicating that |
851 | * Fabric login is completed and lnode moves to [READY] state. | 850 | * Fabric login is completed and lnode moves to [READY] state. |
852 | * | 851 | * |
853 | * This called with hw lock held | 852 | * This called with hw lock held |
854 | */ | 853 | */ |
855 | static void | 854 | static void |
856 | csio_handle_link_up(struct csio_hw *hw, uint8_t portid, uint32_t fcfi, | 855 | csio_handle_link_up(struct csio_hw *hw, uint8_t portid, uint32_t fcfi, |
857 | uint32_t vnpi) | 856 | uint32_t vnpi) |
858 | { | 857 | { |
859 | struct csio_lnode *ln = NULL; | 858 | struct csio_lnode *ln = NULL; |
860 | 859 | ||
861 | /* Lookup lnode based on vnpi */ | 860 | /* Lookup lnode based on vnpi */ |
862 | ln = csio_ln_lookup_by_vnpi(hw, vnpi); | 861 | ln = csio_ln_lookup_by_vnpi(hw, vnpi); |
863 | if (!ln) { | 862 | if (!ln) { |
864 | /* Pick lnode based on portid */ | 863 | /* Pick lnode based on portid */ |
865 | ln = csio_ln_lookup_by_portid(hw, portid); | 864 | ln = csio_ln_lookup_by_portid(hw, portid); |
866 | if (!ln) { | 865 | if (!ln) { |
867 | csio_err(hw, "failed to lookup fcoe lnode on port:%d\n", | 866 | csio_err(hw, "failed to lookup fcoe lnode on port:%d\n", |
868 | portid); | 867 | portid); |
869 | CSIO_DB_ASSERT(0); | 868 | CSIO_DB_ASSERT(0); |
870 | return; | 869 | return; |
871 | } | 870 | } |
872 | 871 | ||
873 | /* Check if lnode has valid vnp flowid */ | 872 | /* Check if lnode has valid vnp flowid */ |
874 | if (ln->vnp_flowid != CSIO_INVALID_IDX) { | 873 | if (ln->vnp_flowid != CSIO_INVALID_IDX) { |
875 | /* New VN-Port */ | 874 | /* New VN-Port */ |
876 | spin_unlock_irq(&hw->lock); | 875 | spin_unlock_irq(&hw->lock); |
877 | csio_lnode_alloc(hw); | 876 | csio_lnode_alloc(hw); |
878 | spin_lock_irq(&hw->lock); | 877 | spin_lock_irq(&hw->lock); |
879 | if (!ln) { | 878 | if (!ln) { |
880 | csio_err(hw, | 879 | csio_err(hw, |
881 | "failed to allocate fcoe lnode" | 880 | "failed to allocate fcoe lnode" |
882 | "for port:%d vnpi:x%x\n", | 881 | "for port:%d vnpi:x%x\n", |
883 | portid, vnpi); | 882 | portid, vnpi); |
884 | CSIO_DB_ASSERT(0); | 883 | CSIO_DB_ASSERT(0); |
885 | return; | 884 | return; |
886 | } | 885 | } |
887 | ln->portid = portid; | 886 | ln->portid = portid; |
888 | } | 887 | } |
889 | ln->vnp_flowid = vnpi; | 888 | ln->vnp_flowid = vnpi; |
890 | ln->dev_num &= ~0xFFFF; | 889 | ln->dev_num &= ~0xFFFF; |
891 | ln->dev_num |= vnpi; | 890 | ln->dev_num |= vnpi; |
892 | } | 891 | } |
893 | 892 | ||
894 | /*Initialize fcfi */ | 893 | /*Initialize fcfi */ |
895 | ln->fcf_flowid = fcfi; | 894 | ln->fcf_flowid = fcfi; |
896 | 895 | ||
897 | csio_info(hw, "Port:%d - FCOE LINK UP\n", portid); | 896 | csio_info(hw, "Port:%d - FCOE LINK UP\n", portid); |
898 | 897 | ||
899 | CSIO_INC_STATS(ln, n_link_up); | 898 | CSIO_INC_STATS(ln, n_link_up); |
900 | 899 | ||
901 | /* Send LINKUP event to SM */ | 900 | /* Send LINKUP event to SM */ |
902 | csio_post_event(&ln->sm, CSIO_LNE_LINKUP); | 901 | csio_post_event(&ln->sm, CSIO_LNE_LINKUP); |
903 | } | 902 | } |
904 | 903 | ||
905 | /* | 904 | /* |
906 | * csio_post_event_rns | 905 | * csio_post_event_rns |
907 | * @ln - FCOE lnode | 906 | * @ln - FCOE lnode |
908 | * @evt - Given rnode event | 907 | * @evt - Given rnode event |
909 | * Returns - none | 908 | * Returns - none |
910 | * | 909 | * |
911 | * Posts given rnode event to all FCOE rnodes connected with given Lnode. | 910 | * Posts given rnode event to all FCOE rnodes connected with given Lnode. |
912 | * This routine is invoked when lnode receives LINK_DOWN/DOWN_LINK/CLOSE | 911 | * This routine is invoked when lnode receives LINK_DOWN/DOWN_LINK/CLOSE |
913 | * event. | 912 | * event. |
914 | * | 913 | * |
915 | * This called with hw lock held | 914 | * This called with hw lock held |
916 | */ | 915 | */ |
917 | static void | 916 | static void |
918 | csio_post_event_rns(struct csio_lnode *ln, enum csio_rn_ev evt) | 917 | csio_post_event_rns(struct csio_lnode *ln, enum csio_rn_ev evt) |
919 | { | 918 | { |
920 | struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead; | 919 | struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead; |
921 | struct list_head *tmp, *next; | 920 | struct list_head *tmp, *next; |
922 | struct csio_rnode *rn; | 921 | struct csio_rnode *rn; |
923 | 922 | ||
924 | list_for_each_safe(tmp, next, &rnhead->sm.sm_list) { | 923 | list_for_each_safe(tmp, next, &rnhead->sm.sm_list) { |
925 | rn = (struct csio_rnode *) tmp; | 924 | rn = (struct csio_rnode *) tmp; |
926 | csio_post_event(&rn->sm, evt); | 925 | csio_post_event(&rn->sm, evt); |
927 | } | 926 | } |
928 | } | 927 | } |
929 | 928 | ||
930 | /* | 929 | /* |
931 | * csio_cleanup_rns | 930 | * csio_cleanup_rns |
932 | * @ln - FCOE lnode | 931 | * @ln - FCOE lnode |
933 | * Returns - none | 932 | * Returns - none |
934 | * | 933 | * |
935 | * Frees all FCOE rnodes connected with given Lnode. | 934 | * Frees all FCOE rnodes connected with given Lnode. |
936 | * | 935 | * |
937 | * This called with hw lock held | 936 | * This called with hw lock held |
938 | */ | 937 | */ |
939 | static void | 938 | static void |
940 | csio_cleanup_rns(struct csio_lnode *ln) | 939 | csio_cleanup_rns(struct csio_lnode *ln) |
941 | { | 940 | { |
942 | struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead; | 941 | struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead; |
943 | struct list_head *tmp, *next_rn; | 942 | struct list_head *tmp, *next_rn; |
944 | struct csio_rnode *rn; | 943 | struct csio_rnode *rn; |
945 | 944 | ||
946 | list_for_each_safe(tmp, next_rn, &rnhead->sm.sm_list) { | 945 | list_for_each_safe(tmp, next_rn, &rnhead->sm.sm_list) { |
947 | rn = (struct csio_rnode *) tmp; | 946 | rn = (struct csio_rnode *) tmp; |
948 | csio_put_rnode(ln, rn); | 947 | csio_put_rnode(ln, rn); |
949 | } | 948 | } |
950 | 949 | ||
951 | } | 950 | } |
952 | 951 | ||
953 | /* | 952 | /* |
954 | * csio_post_event_lns | 953 | * csio_post_event_lns |
955 | * @ln - FCOE lnode | 954 | * @ln - FCOE lnode |
956 | * @evt - Given lnode event | 955 | * @evt - Given lnode event |
957 | * Returns - none | 956 | * Returns - none |
958 | * | 957 | * |
959 | * Posts given lnode event to all FCOE lnodes connected with given Lnode. | 958 | * Posts given lnode event to all FCOE lnodes connected with given Lnode. |
960 | * This routine is invoked when lnode receives LINK_DOWN/DOWN_LINK/CLOSE | 959 | * This routine is invoked when lnode receives LINK_DOWN/DOWN_LINK/CLOSE |
961 | * event. | 960 | * event. |
962 | * | 961 | * |
963 | * This called with hw lock held | 962 | * This called with hw lock held |
964 | */ | 963 | */ |
965 | static void | 964 | static void |
966 | csio_post_event_lns(struct csio_lnode *ln, enum csio_ln_ev evt) | 965 | csio_post_event_lns(struct csio_lnode *ln, enum csio_ln_ev evt) |
967 | { | 966 | { |
968 | struct list_head *tmp; | 967 | struct list_head *tmp; |
969 | struct csio_lnode *cln, *sln; | 968 | struct csio_lnode *cln, *sln; |
970 | 969 | ||
971 | /* If NPIV lnode, send evt only to that and return */ | 970 | /* If NPIV lnode, send evt only to that and return */ |
972 | if (csio_is_npiv_ln(ln)) { | 971 | if (csio_is_npiv_ln(ln)) { |
973 | csio_post_event(&ln->sm, evt); | 972 | csio_post_event(&ln->sm, evt); |
974 | return; | 973 | return; |
975 | } | 974 | } |
976 | 975 | ||
977 | sln = ln; | 976 | sln = ln; |
978 | /* Traverse children lnodes list and send evt */ | 977 | /* Traverse children lnodes list and send evt */ |
979 | list_for_each(tmp, &sln->cln_head) { | 978 | list_for_each(tmp, &sln->cln_head) { |
980 | cln = (struct csio_lnode *) tmp; | 979 | cln = (struct csio_lnode *) tmp; |
981 | csio_post_event(&cln->sm, evt); | 980 | csio_post_event(&cln->sm, evt); |
982 | } | 981 | } |
983 | 982 | ||
984 | /* Send evt to parent lnode */ | 983 | /* Send evt to parent lnode */ |
985 | csio_post_event(&ln->sm, evt); | 984 | csio_post_event(&ln->sm, evt); |
986 | } | 985 | } |
987 | 986 | ||
988 | /* | 987 | /* |
989 | * csio_ln_down - Lcoal nport is down | 988 | * csio_ln_down - Lcoal nport is down |
990 | * @ln - FCOE Lnode | 989 | * @ln - FCOE Lnode |
991 | * Returns - none | 990 | * Returns - none |
992 | * | 991 | * |
993 | * Sends LINK_DOWN events to Lnode and its associated NPIVs lnodes. | 992 | * Sends LINK_DOWN events to Lnode and its associated NPIVs lnodes. |
994 | * | 993 | * |
995 | * This called with hw lock held | 994 | * This called with hw lock held |
996 | */ | 995 | */ |
997 | static void | 996 | static void |
998 | csio_ln_down(struct csio_lnode *ln) | 997 | csio_ln_down(struct csio_lnode *ln) |
999 | { | 998 | { |
1000 | csio_post_event_lns(ln, CSIO_LNE_LINK_DOWN); | 999 | csio_post_event_lns(ln, CSIO_LNE_LINK_DOWN); |
1001 | } | 1000 | } |
1002 | 1001 | ||
1003 | /* | 1002 | /* |
1004 | * csio_handle_link_down - Logical Linkdown event. | 1003 | * csio_handle_link_down - Logical Linkdown event. |
1005 | * @hw - HW module. | 1004 | * @hw - HW module. |
1006 | * @portid - Physical port number | 1005 | * @portid - Physical port number |
1007 | * @fcfi - FCF index. | 1006 | * @fcfi - FCF index. |
1008 | * @vnpi - VNP index. | 1007 | * @vnpi - VNP index. |
1009 | * Returns - none | 1008 | * Returns - none |
1010 | * | 1009 | * |
1011 | * This event is received from FW, when virtual link goes down between | 1010 | * This event is received from FW, when virtual link goes down between |
1012 | * Physical port[ENode] and FCF. Lnode and its associated NPIVs lnode hosted on | 1011 | * Physical port[ENode] and FCF. Lnode and its associated NPIVs lnode hosted on |
1013 | * this vnpi[VN-Port] will be de-instantiated. | 1012 | * this vnpi[VN-Port] will be de-instantiated. |
1014 | * | 1013 | * |
1015 | * This called with hw lock held | 1014 | * This called with hw lock held |
1016 | */ | 1015 | */ |
1017 | static void | 1016 | static void |
1018 | csio_handle_link_down(struct csio_hw *hw, uint8_t portid, uint32_t fcfi, | 1017 | csio_handle_link_down(struct csio_hw *hw, uint8_t portid, uint32_t fcfi, |
1019 | uint32_t vnpi) | 1018 | uint32_t vnpi) |
1020 | { | 1019 | { |
1021 | struct csio_fcf_info *fp; | 1020 | struct csio_fcf_info *fp; |
1022 | struct csio_lnode *ln; | 1021 | struct csio_lnode *ln; |
1023 | 1022 | ||
1024 | /* Lookup lnode based on vnpi */ | 1023 | /* Lookup lnode based on vnpi */ |
1025 | ln = csio_ln_lookup_by_vnpi(hw, vnpi); | 1024 | ln = csio_ln_lookup_by_vnpi(hw, vnpi); |
1026 | if (ln) { | 1025 | if (ln) { |
1027 | fp = ln->fcfinfo; | 1026 | fp = ln->fcfinfo; |
1028 | CSIO_INC_STATS(ln, n_link_down); | 1027 | CSIO_INC_STATS(ln, n_link_down); |
1029 | 1028 | ||
1030 | /*Warn if linkdown received if lnode is not in ready state */ | 1029 | /*Warn if linkdown received if lnode is not in ready state */ |
1031 | if (!csio_is_lnode_ready(ln)) { | 1030 | if (!csio_is_lnode_ready(ln)) { |
1032 | csio_ln_warn(ln, | 1031 | csio_ln_warn(ln, |
1033 | "warn: FCOE link is already in offline " | 1032 | "warn: FCOE link is already in offline " |
1034 | "Ignoring Fcoe linkdown event on portid %d\n", | 1033 | "Ignoring Fcoe linkdown event on portid %d\n", |
1035 | portid); | 1034 | portid); |
1036 | CSIO_INC_STATS(ln, n_evt_drop); | 1035 | CSIO_INC_STATS(ln, n_evt_drop); |
1037 | return; | 1036 | return; |
1038 | } | 1037 | } |
1039 | 1038 | ||
1040 | /* Verify portid */ | 1039 | /* Verify portid */ |
1041 | if (fp->portid != portid) { | 1040 | if (fp->portid != portid) { |
1042 | csio_ln_warn(ln, | 1041 | csio_ln_warn(ln, |
1043 | "warn: FCOE linkdown recv with " | 1042 | "warn: FCOE linkdown recv with " |
1044 | "invalid port %d\n", portid); | 1043 | "invalid port %d\n", portid); |
1045 | CSIO_INC_STATS(ln, n_evt_drop); | 1044 | CSIO_INC_STATS(ln, n_evt_drop); |
1046 | return; | 1045 | return; |
1047 | } | 1046 | } |
1048 | 1047 | ||
1049 | /* verify fcfi */ | 1048 | /* verify fcfi */ |
1050 | if (ln->fcf_flowid != fcfi) { | 1049 | if (ln->fcf_flowid != fcfi) { |
1051 | csio_ln_warn(ln, | 1050 | csio_ln_warn(ln, |
1052 | "warn: FCOE linkdown recv with " | 1051 | "warn: FCOE linkdown recv with " |
1053 | "invalid fcfi x%x\n", fcfi); | 1052 | "invalid fcfi x%x\n", fcfi); |
1054 | CSIO_INC_STATS(ln, n_evt_drop); | 1053 | CSIO_INC_STATS(ln, n_evt_drop); |
1055 | return; | 1054 | return; |
1056 | } | 1055 | } |
1057 | 1056 | ||
1058 | csio_info(hw, "Port:%d - FCOE LINK DOWN\n", portid); | 1057 | csio_info(hw, "Port:%d - FCOE LINK DOWN\n", portid); |
1059 | 1058 | ||
1060 | /* Send LINK_DOWN event to lnode s/m */ | 1059 | /* Send LINK_DOWN event to lnode s/m */ |
1061 | csio_ln_down(ln); | 1060 | csio_ln_down(ln); |
1062 | 1061 | ||
1063 | return; | 1062 | return; |
1064 | } else { | 1063 | } else { |
1065 | csio_warn(hw, | 1064 | csio_warn(hw, |
1066 | "warn: FCOE linkdown recv with invalid vnpi x%x\n", | 1065 | "warn: FCOE linkdown recv with invalid vnpi x%x\n", |
1067 | vnpi); | 1066 | vnpi); |
1068 | CSIO_INC_STATS(hw, n_evt_drop); | 1067 | CSIO_INC_STATS(hw, n_evt_drop); |
1069 | } | 1068 | } |
1070 | } | 1069 | } |
1071 | 1070 | ||
1072 | /* | 1071 | /* |
1073 | * csio_is_lnode_ready - Checks FCOE lnode is in ready state. | 1072 | * csio_is_lnode_ready - Checks FCOE lnode is in ready state. |
1074 | * @ln: Lnode module | 1073 | * @ln: Lnode module |
1075 | * | 1074 | * |
1076 | * Returns True if FCOE lnode is in ready state. | 1075 | * Returns True if FCOE lnode is in ready state. |
1077 | */ | 1076 | */ |
1078 | int | 1077 | int |
1079 | csio_is_lnode_ready(struct csio_lnode *ln) | 1078 | csio_is_lnode_ready(struct csio_lnode *ln) |
1080 | { | 1079 | { |
1081 | return (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready)); | 1080 | return (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready)); |
1082 | } | 1081 | } |
1083 | 1082 | ||
1084 | /*****************************************************************************/ | 1083 | /*****************************************************************************/ |
1085 | /* START: Lnode SM */ | 1084 | /* START: Lnode SM */ |
1086 | /*****************************************************************************/ | 1085 | /*****************************************************************************/ |
1087 | /* | 1086 | /* |
1088 | * csio_lns_uninit - The request in uninit state. | 1087 | * csio_lns_uninit - The request in uninit state. |
1089 | * @ln - FCOE lnode. | 1088 | * @ln - FCOE lnode. |
1090 | * @evt - Event to be processed. | 1089 | * @evt - Event to be processed. |
1091 | * | 1090 | * |
1092 | * Process the given lnode event which is currently in "uninit" state. | 1091 | * Process the given lnode event which is currently in "uninit" state. |
1093 | * Invoked with HW lock held. | 1092 | * Invoked with HW lock held. |
1094 | * Return - none. | 1093 | * Return - none. |
1095 | */ | 1094 | */ |
1096 | static void | 1095 | static void |
1097 | csio_lns_uninit(struct csio_lnode *ln, enum csio_ln_ev evt) | 1096 | csio_lns_uninit(struct csio_lnode *ln, enum csio_ln_ev evt) |
1098 | { | 1097 | { |
1099 | struct csio_hw *hw = csio_lnode_to_hw(ln); | 1098 | struct csio_hw *hw = csio_lnode_to_hw(ln); |
1100 | struct csio_lnode *rln = hw->rln; | 1099 | struct csio_lnode *rln = hw->rln; |
1101 | int rv; | 1100 | int rv; |
1102 | 1101 | ||
1103 | CSIO_INC_STATS(ln, n_evt_sm[evt]); | 1102 | CSIO_INC_STATS(ln, n_evt_sm[evt]); |
1104 | switch (evt) { | 1103 | switch (evt) { |
1105 | case CSIO_LNE_LINKUP: | 1104 | case CSIO_LNE_LINKUP: |
1106 | csio_set_state(&ln->sm, csio_lns_online); | 1105 | csio_set_state(&ln->sm, csio_lns_online); |
1107 | /* Read FCF only for physical lnode */ | 1106 | /* Read FCF only for physical lnode */ |
1108 | if (csio_is_phys_ln(ln)) { | 1107 | if (csio_is_phys_ln(ln)) { |
1109 | rv = csio_ln_read_fcf_entry(ln, | 1108 | rv = csio_ln_read_fcf_entry(ln, |
1110 | csio_ln_read_fcf_cbfn); | 1109 | csio_ln_read_fcf_cbfn); |
1111 | if (rv != 0) { | 1110 | if (rv != 0) { |
1112 | /* TODO: Send HW RESET event */ | 1111 | /* TODO: Send HW RESET event */ |
1113 | CSIO_INC_STATS(ln, n_err); | 1112 | CSIO_INC_STATS(ln, n_err); |
1114 | break; | 1113 | break; |
1115 | } | 1114 | } |
1116 | 1115 | ||
1117 | /* Add FCF record */ | 1116 | /* Add FCF record */ |
1118 | list_add_tail(&ln->fcfinfo->list, &rln->fcf_lsthead); | 1117 | list_add_tail(&ln->fcfinfo->list, &rln->fcf_lsthead); |
1119 | } | 1118 | } |
1120 | 1119 | ||
1121 | rv = csio_ln_vnp_read(ln, csio_ln_vnp_read_cbfn); | 1120 | rv = csio_ln_vnp_read(ln, csio_ln_vnp_read_cbfn); |
1122 | if (rv != 0) { | 1121 | if (rv != 0) { |
1123 | /* TODO: Send HW RESET event */ | 1122 | /* TODO: Send HW RESET event */ |
1124 | CSIO_INC_STATS(ln, n_err); | 1123 | CSIO_INC_STATS(ln, n_err); |
1125 | } | 1124 | } |
1126 | break; | 1125 | break; |
1127 | 1126 | ||
1128 | case CSIO_LNE_DOWN_LINK: | 1127 | case CSIO_LNE_DOWN_LINK: |
1129 | break; | 1128 | break; |
1130 | 1129 | ||
1131 | default: | 1130 | default: |
1132 | csio_ln_dbg(ln, | 1131 | csio_ln_dbg(ln, |
1133 | "unexp ln event %d recv from did:x%x in " | 1132 | "unexp ln event %d recv from did:x%x in " |
1134 | "ln state[uninit].\n", evt, ln->nport_id); | 1133 | "ln state[uninit].\n", evt, ln->nport_id); |
1135 | CSIO_INC_STATS(ln, n_evt_unexp); | 1134 | CSIO_INC_STATS(ln, n_evt_unexp); |
1136 | break; | 1135 | break; |
1137 | } /* switch event */ | 1136 | } /* switch event */ |
1138 | } | 1137 | } |
1139 | 1138 | ||
1140 | /* | 1139 | /* |
1141 | * csio_lns_online - The request in online state. | 1140 | * csio_lns_online - The request in online state. |
1142 | * @ln - FCOE lnode. | 1141 | * @ln - FCOE lnode. |
1143 | * @evt - Event to be processed. | 1142 | * @evt - Event to be processed. |
1144 | * | 1143 | * |
1145 | * Process the given lnode event which is currently in "online" state. | 1144 | * Process the given lnode event which is currently in "online" state. |
1146 | * Invoked with HW lock held. | 1145 | * Invoked with HW lock held. |
1147 | * Return - none. | 1146 | * Return - none. |
1148 | */ | 1147 | */ |
1149 | static void | 1148 | static void |
1150 | csio_lns_online(struct csio_lnode *ln, enum csio_ln_ev evt) | 1149 | csio_lns_online(struct csio_lnode *ln, enum csio_ln_ev evt) |
1151 | { | 1150 | { |
1152 | struct csio_hw *hw = csio_lnode_to_hw(ln); | 1151 | struct csio_hw *hw = csio_lnode_to_hw(ln); |
1153 | 1152 | ||
1154 | CSIO_INC_STATS(ln, n_evt_sm[evt]); | 1153 | CSIO_INC_STATS(ln, n_evt_sm[evt]); |
1155 | switch (evt) { | 1154 | switch (evt) { |
1156 | case CSIO_LNE_LINKUP: | 1155 | case CSIO_LNE_LINKUP: |
1157 | csio_ln_warn(ln, | 1156 | csio_ln_warn(ln, |
1158 | "warn: FCOE link is up already " | 1157 | "warn: FCOE link is up already " |
1159 | "Ignoring linkup on port:%d\n", ln->portid); | 1158 | "Ignoring linkup on port:%d\n", ln->portid); |
1160 | CSIO_INC_STATS(ln, n_evt_drop); | 1159 | CSIO_INC_STATS(ln, n_evt_drop); |
1161 | break; | 1160 | break; |
1162 | 1161 | ||
1163 | case CSIO_LNE_FAB_INIT_DONE: | 1162 | case CSIO_LNE_FAB_INIT_DONE: |
1164 | csio_set_state(&ln->sm, csio_lns_ready); | 1163 | csio_set_state(&ln->sm, csio_lns_ready); |
1165 | 1164 | ||
1166 | spin_unlock_irq(&hw->lock); | 1165 | spin_unlock_irq(&hw->lock); |
1167 | csio_lnode_async_event(ln, CSIO_LN_FC_LINKUP); | 1166 | csio_lnode_async_event(ln, CSIO_LN_FC_LINKUP); |
1168 | spin_lock_irq(&hw->lock); | 1167 | spin_lock_irq(&hw->lock); |
1169 | 1168 | ||
1170 | break; | 1169 | break; |
1171 | 1170 | ||
1172 | case CSIO_LNE_LINK_DOWN: | 1171 | case CSIO_LNE_LINK_DOWN: |
1173 | /* Fall through */ | 1172 | /* Fall through */ |
1174 | case CSIO_LNE_DOWN_LINK: | 1173 | case CSIO_LNE_DOWN_LINK: |
1175 | csio_set_state(&ln->sm, csio_lns_uninit); | 1174 | csio_set_state(&ln->sm, csio_lns_uninit); |
1176 | if (csio_is_phys_ln(ln)) { | 1175 | if (csio_is_phys_ln(ln)) { |
1177 | /* Remove FCF entry */ | 1176 | /* Remove FCF entry */ |
1178 | list_del_init(&ln->fcfinfo->list); | 1177 | list_del_init(&ln->fcfinfo->list); |
1179 | } | 1178 | } |
1180 | break; | 1179 | break; |
1181 | 1180 | ||
1182 | default: | 1181 | default: |
1183 | csio_ln_dbg(ln, | 1182 | csio_ln_dbg(ln, |
1184 | "unexp ln event %d recv from did:x%x in " | 1183 | "unexp ln event %d recv from did:x%x in " |
1185 | "ln state[uninit].\n", evt, ln->nport_id); | 1184 | "ln state[uninit].\n", evt, ln->nport_id); |
1186 | CSIO_INC_STATS(ln, n_evt_unexp); | 1185 | CSIO_INC_STATS(ln, n_evt_unexp); |
1187 | 1186 | ||
1188 | break; | 1187 | break; |
1189 | } /* switch event */ | 1188 | } /* switch event */ |
1190 | } | 1189 | } |
1191 | 1190 | ||
1192 | /* | 1191 | /* |
1193 | * csio_lns_ready - The request in ready state. | 1192 | * csio_lns_ready - The request in ready state. |
1194 | * @ln - FCOE lnode. | 1193 | * @ln - FCOE lnode. |
1195 | * @evt - Event to be processed. | 1194 | * @evt - Event to be processed. |
1196 | * | 1195 | * |
1197 | * Process the given lnode event which is currently in "ready" state. | 1196 | * Process the given lnode event which is currently in "ready" state. |
1198 | * Invoked with HW lock held. | 1197 | * Invoked with HW lock held. |
1199 | * Return - none. | 1198 | * Return - none. |
1200 | */ | 1199 | */ |
1201 | static void | 1200 | static void |
1202 | csio_lns_ready(struct csio_lnode *ln, enum csio_ln_ev evt) | 1201 | csio_lns_ready(struct csio_lnode *ln, enum csio_ln_ev evt) |
1203 | { | 1202 | { |
1204 | struct csio_hw *hw = csio_lnode_to_hw(ln); | 1203 | struct csio_hw *hw = csio_lnode_to_hw(ln); |
1205 | 1204 | ||
1206 | CSIO_INC_STATS(ln, n_evt_sm[evt]); | 1205 | CSIO_INC_STATS(ln, n_evt_sm[evt]); |
1207 | switch (evt) { | 1206 | switch (evt) { |
1208 | case CSIO_LNE_FAB_INIT_DONE: | 1207 | case CSIO_LNE_FAB_INIT_DONE: |
1209 | csio_ln_dbg(ln, | 1208 | csio_ln_dbg(ln, |
1210 | "ignoring event %d recv from did x%x" | 1209 | "ignoring event %d recv from did x%x" |
1211 | "in ln state[ready].\n", evt, ln->nport_id); | 1210 | "in ln state[ready].\n", evt, ln->nport_id); |
1212 | CSIO_INC_STATS(ln, n_evt_drop); | 1211 | CSIO_INC_STATS(ln, n_evt_drop); |
1213 | break; | 1212 | break; |
1214 | 1213 | ||
1215 | case CSIO_LNE_LINK_DOWN: | 1214 | case CSIO_LNE_LINK_DOWN: |
1216 | csio_set_state(&ln->sm, csio_lns_offline); | 1215 | csio_set_state(&ln->sm, csio_lns_offline); |
1217 | csio_post_event_rns(ln, CSIO_RNFE_DOWN); | 1216 | csio_post_event_rns(ln, CSIO_RNFE_DOWN); |
1218 | 1217 | ||
1219 | spin_unlock_irq(&hw->lock); | 1218 | spin_unlock_irq(&hw->lock); |
1220 | csio_lnode_async_event(ln, CSIO_LN_FC_LINKDOWN); | 1219 | csio_lnode_async_event(ln, CSIO_LN_FC_LINKDOWN); |
1221 | spin_lock_irq(&hw->lock); | 1220 | spin_lock_irq(&hw->lock); |
1222 | 1221 | ||
1223 | if (csio_is_phys_ln(ln)) { | 1222 | if (csio_is_phys_ln(ln)) { |
1224 | /* Remove FCF entry */ | 1223 | /* Remove FCF entry */ |
1225 | list_del_init(&ln->fcfinfo->list); | 1224 | list_del_init(&ln->fcfinfo->list); |
1226 | } | 1225 | } |
1227 | break; | 1226 | break; |
1228 | 1227 | ||
1229 | case CSIO_LNE_DOWN_LINK: | 1228 | case CSIO_LNE_DOWN_LINK: |
1230 | csio_set_state(&ln->sm, csio_lns_offline); | 1229 | csio_set_state(&ln->sm, csio_lns_offline); |
1231 | csio_post_event_rns(ln, CSIO_RNFE_DOWN); | 1230 | csio_post_event_rns(ln, CSIO_RNFE_DOWN); |
1232 | 1231 | ||
1233 | /* Host need to issue aborts in case if FW has not returned | 1232 | /* Host need to issue aborts in case if FW has not returned |
1234 | * WRs with status "ABORTED" | 1233 | * WRs with status "ABORTED" |
1235 | */ | 1234 | */ |
1236 | spin_unlock_irq(&hw->lock); | 1235 | spin_unlock_irq(&hw->lock); |
1237 | csio_lnode_async_event(ln, CSIO_LN_FC_LINKDOWN); | 1236 | csio_lnode_async_event(ln, CSIO_LN_FC_LINKDOWN); |
1238 | spin_lock_irq(&hw->lock); | 1237 | spin_lock_irq(&hw->lock); |
1239 | 1238 | ||
1240 | if (csio_is_phys_ln(ln)) { | 1239 | if (csio_is_phys_ln(ln)) { |
1241 | /* Remove FCF entry */ | 1240 | /* Remove FCF entry */ |
1242 | list_del_init(&ln->fcfinfo->list); | 1241 | list_del_init(&ln->fcfinfo->list); |
1243 | } | 1242 | } |
1244 | break; | 1243 | break; |
1245 | 1244 | ||
1246 | case CSIO_LNE_CLOSE: | 1245 | case CSIO_LNE_CLOSE: |
1247 | csio_set_state(&ln->sm, csio_lns_uninit); | 1246 | csio_set_state(&ln->sm, csio_lns_uninit); |
1248 | csio_post_event_rns(ln, CSIO_RNFE_CLOSE); | 1247 | csio_post_event_rns(ln, CSIO_RNFE_CLOSE); |
1249 | break; | 1248 | break; |
1250 | 1249 | ||
1251 | case CSIO_LNE_LOGO: | 1250 | case CSIO_LNE_LOGO: |
1252 | csio_set_state(&ln->sm, csio_lns_offline); | 1251 | csio_set_state(&ln->sm, csio_lns_offline); |
1253 | csio_post_event_rns(ln, CSIO_RNFE_DOWN); | 1252 | csio_post_event_rns(ln, CSIO_RNFE_DOWN); |
1254 | break; | 1253 | break; |
1255 | 1254 | ||
1256 | default: | 1255 | default: |
1257 | csio_ln_dbg(ln, | 1256 | csio_ln_dbg(ln, |
1258 | "unexp ln event %d recv from did:x%x in " | 1257 | "unexp ln event %d recv from did:x%x in " |
1259 | "ln state[uninit].\n", evt, ln->nport_id); | 1258 | "ln state[uninit].\n", evt, ln->nport_id); |
1260 | CSIO_INC_STATS(ln, n_evt_unexp); | 1259 | CSIO_INC_STATS(ln, n_evt_unexp); |
1261 | CSIO_DB_ASSERT(0); | 1260 | CSIO_DB_ASSERT(0); |
1262 | break; | 1261 | break; |
1263 | } /* switch event */ | 1262 | } /* switch event */ |
1264 | } | 1263 | } |
1265 | 1264 | ||
1266 | /* | 1265 | /* |
1267 | * csio_lns_offline - The request in offline state. | 1266 | * csio_lns_offline - The request in offline state. |
1268 | * @ln - FCOE lnode. | 1267 | * @ln - FCOE lnode. |
1269 | * @evt - Event to be processed. | 1268 | * @evt - Event to be processed. |
1270 | * | 1269 | * |
1271 | * Process the given lnode event which is currently in "offline" state. | 1270 | * Process the given lnode event which is currently in "offline" state. |
1272 | * Invoked with HW lock held. | 1271 | * Invoked with HW lock held. |
1273 | * Return - none. | 1272 | * Return - none. |
1274 | */ | 1273 | */ |
1275 | static void | 1274 | static void |
1276 | csio_lns_offline(struct csio_lnode *ln, enum csio_ln_ev evt) | 1275 | csio_lns_offline(struct csio_lnode *ln, enum csio_ln_ev evt) |
1277 | { | 1276 | { |
1278 | struct csio_hw *hw = csio_lnode_to_hw(ln); | 1277 | struct csio_hw *hw = csio_lnode_to_hw(ln); |
1279 | struct csio_lnode *rln = hw->rln; | 1278 | struct csio_lnode *rln = hw->rln; |
1280 | int rv; | 1279 | int rv; |
1281 | 1280 | ||
1282 | CSIO_INC_STATS(ln, n_evt_sm[evt]); | 1281 | CSIO_INC_STATS(ln, n_evt_sm[evt]); |
1283 | switch (evt) { | 1282 | switch (evt) { |
1284 | case CSIO_LNE_LINKUP: | 1283 | case CSIO_LNE_LINKUP: |
1285 | csio_set_state(&ln->sm, csio_lns_online); | 1284 | csio_set_state(&ln->sm, csio_lns_online); |
1286 | /* Read FCF only for physical lnode */ | 1285 | /* Read FCF only for physical lnode */ |
1287 | if (csio_is_phys_ln(ln)) { | 1286 | if (csio_is_phys_ln(ln)) { |
1288 | rv = csio_ln_read_fcf_entry(ln, | 1287 | rv = csio_ln_read_fcf_entry(ln, |
1289 | csio_ln_read_fcf_cbfn); | 1288 | csio_ln_read_fcf_cbfn); |
1290 | if (rv != 0) { | 1289 | if (rv != 0) { |
1291 | /* TODO: Send HW RESET event */ | 1290 | /* TODO: Send HW RESET event */ |
1292 | CSIO_INC_STATS(ln, n_err); | 1291 | CSIO_INC_STATS(ln, n_err); |
1293 | break; | 1292 | break; |
1294 | } | 1293 | } |
1295 | 1294 | ||
1296 | /* Add FCF record */ | 1295 | /* Add FCF record */ |
1297 | list_add_tail(&ln->fcfinfo->list, &rln->fcf_lsthead); | 1296 | list_add_tail(&ln->fcfinfo->list, &rln->fcf_lsthead); |
1298 | } | 1297 | } |
1299 | 1298 | ||
1300 | rv = csio_ln_vnp_read(ln, csio_ln_vnp_read_cbfn); | 1299 | rv = csio_ln_vnp_read(ln, csio_ln_vnp_read_cbfn); |
1301 | if (rv != 0) { | 1300 | if (rv != 0) { |
1302 | /* TODO: Send HW RESET event */ | 1301 | /* TODO: Send HW RESET event */ |
1303 | CSIO_INC_STATS(ln, n_err); | 1302 | CSIO_INC_STATS(ln, n_err); |
1304 | } | 1303 | } |
1305 | break; | 1304 | break; |
1306 | 1305 | ||
1307 | case CSIO_LNE_LINK_DOWN: | 1306 | case CSIO_LNE_LINK_DOWN: |
1308 | case CSIO_LNE_DOWN_LINK: | 1307 | case CSIO_LNE_DOWN_LINK: |
1309 | case CSIO_LNE_LOGO: | 1308 | case CSIO_LNE_LOGO: |
1310 | csio_ln_dbg(ln, | 1309 | csio_ln_dbg(ln, |
1311 | "ignoring event %d recv from did x%x" | 1310 | "ignoring event %d recv from did x%x" |
1312 | "in ln state[offline].\n", evt, ln->nport_id); | 1311 | "in ln state[offline].\n", evt, ln->nport_id); |
1313 | CSIO_INC_STATS(ln, n_evt_drop); | 1312 | CSIO_INC_STATS(ln, n_evt_drop); |
1314 | break; | 1313 | break; |
1315 | 1314 | ||
1316 | case CSIO_LNE_CLOSE: | 1315 | case CSIO_LNE_CLOSE: |
1317 | csio_set_state(&ln->sm, csio_lns_uninit); | 1316 | csio_set_state(&ln->sm, csio_lns_uninit); |
1318 | csio_post_event_rns(ln, CSIO_RNFE_CLOSE); | 1317 | csio_post_event_rns(ln, CSIO_RNFE_CLOSE); |
1319 | break; | 1318 | break; |
1320 | 1319 | ||
1321 | default: | 1320 | default: |
1322 | csio_ln_dbg(ln, | 1321 | csio_ln_dbg(ln, |
1323 | "unexp ln event %d recv from did:x%x in " | 1322 | "unexp ln event %d recv from did:x%x in " |
1324 | "ln state[offline]\n", evt, ln->nport_id); | 1323 | "ln state[offline]\n", evt, ln->nport_id); |
1325 | CSIO_INC_STATS(ln, n_evt_unexp); | 1324 | CSIO_INC_STATS(ln, n_evt_unexp); |
1326 | CSIO_DB_ASSERT(0); | 1325 | CSIO_DB_ASSERT(0); |
1327 | break; | 1326 | break; |
1328 | } /* switch event */ | 1327 | } /* switch event */ |
1329 | } | 1328 | } |
1330 | 1329 | ||
1331 | /*****************************************************************************/ | 1330 | /*****************************************************************************/ |
1332 | /* END: Lnode SM */ | 1331 | /* END: Lnode SM */ |
1333 | /*****************************************************************************/ | 1332 | /*****************************************************************************/ |
1334 | 1333 | ||
1335 | static void | 1334 | static void |
1336 | csio_free_fcfinfo(struct kref *kref) | 1335 | csio_free_fcfinfo(struct kref *kref) |
1337 | { | 1336 | { |
1338 | struct csio_fcf_info *fcfinfo = container_of(kref, | 1337 | struct csio_fcf_info *fcfinfo = container_of(kref, |
1339 | struct csio_fcf_info, kref); | 1338 | struct csio_fcf_info, kref); |
1340 | kfree(fcfinfo); | 1339 | kfree(fcfinfo); |
1341 | } | 1340 | } |
1342 | 1341 | ||
1343 | /* Helper routines for attributes */ | 1342 | /* Helper routines for attributes */ |
1344 | /* | 1343 | /* |
1345 | * csio_lnode_state_to_str - Get current state of FCOE lnode. | 1344 | * csio_lnode_state_to_str - Get current state of FCOE lnode. |
1346 | * @ln - lnode | 1345 | * @ln - lnode |
1347 | * @str - state of lnode. | 1346 | * @str - state of lnode. |
1348 | * | 1347 | * |
1349 | */ | 1348 | */ |
1350 | void | 1349 | void |
1351 | csio_lnode_state_to_str(struct csio_lnode *ln, int8_t *str) | 1350 | csio_lnode_state_to_str(struct csio_lnode *ln, int8_t *str) |
1352 | { | 1351 | { |
1353 | if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_uninit)) { | 1352 | if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_uninit)) { |
1354 | strcpy(str, "UNINIT"); | 1353 | strcpy(str, "UNINIT"); |
1355 | return; | 1354 | return; |
1356 | } | 1355 | } |
1357 | if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready)) { | 1356 | if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready)) { |
1358 | strcpy(str, "READY"); | 1357 | strcpy(str, "READY"); |
1359 | return; | 1358 | return; |
1360 | } | 1359 | } |
1361 | if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_offline)) { | 1360 | if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_offline)) { |
1362 | strcpy(str, "OFFLINE"); | 1361 | strcpy(str, "OFFLINE"); |
1363 | return; | 1362 | return; |
1364 | } | 1363 | } |
1365 | strcpy(str, "UNKNOWN"); | 1364 | strcpy(str, "UNKNOWN"); |
1366 | } /* csio_lnode_state_to_str */ | 1365 | } /* csio_lnode_state_to_str */ |
1367 | 1366 | ||
1368 | 1367 | ||
1369 | int | 1368 | int |
1370 | csio_get_phy_port_stats(struct csio_hw *hw, uint8_t portid, | 1369 | csio_get_phy_port_stats(struct csio_hw *hw, uint8_t portid, |
1371 | struct fw_fcoe_port_stats *port_stats) | 1370 | struct fw_fcoe_port_stats *port_stats) |
1372 | { | 1371 | { |
1373 | struct csio_mb *mbp; | 1372 | struct csio_mb *mbp; |
1374 | struct fw_fcoe_port_cmd_params portparams; | 1373 | struct fw_fcoe_port_cmd_params portparams; |
1375 | enum fw_retval retval; | 1374 | enum fw_retval retval; |
1376 | int idx; | 1375 | int idx; |
1377 | 1376 | ||
1378 | mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); | 1377 | mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC); |
1379 | if (!mbp) { | 1378 | if (!mbp) { |
1380 | csio_err(hw, "FCoE FCF PARAMS command out of memory!\n"); | 1379 | csio_err(hw, "FCoE FCF PARAMS command out of memory!\n"); |
1381 | return -EINVAL; | 1380 | return -EINVAL; |
1382 | } | 1381 | } |
1383 | portparams.portid = portid; | 1382 | portparams.portid = portid; |
1384 | 1383 | ||
1385 | for (idx = 1; idx <= 3; idx++) { | 1384 | for (idx = 1; idx <= 3; idx++) { |
1386 | portparams.idx = (idx-1)*6 + 1; | 1385 | portparams.idx = (idx-1)*6 + 1; |
1387 | portparams.nstats = 6; | 1386 | portparams.nstats = 6; |
1388 | if (idx == 3) | 1387 | if (idx == 3) |
1389 | portparams.nstats = 4; | 1388 | portparams.nstats = 4; |
1390 | csio_fcoe_read_portparams_init_mb(hw, mbp, CSIO_MB_DEFAULT_TMO, | 1389 | csio_fcoe_read_portparams_init_mb(hw, mbp, CSIO_MB_DEFAULT_TMO, |
1391 | &portparams, NULL); | 1390 | &portparams, NULL); |
1392 | if (csio_mb_issue(hw, mbp)) { | 1391 | if (csio_mb_issue(hw, mbp)) { |
1393 | csio_err(hw, "Issue of FCoE port params failed!\n"); | 1392 | csio_err(hw, "Issue of FCoE port params failed!\n"); |
1394 | mempool_free(mbp, hw->mb_mempool); | 1393 | mempool_free(mbp, hw->mb_mempool); |
1395 | return -EINVAL; | 1394 | return -EINVAL; |
1396 | } | 1395 | } |
1397 | csio_mb_process_portparams_rsp(hw, mbp, &retval, | 1396 | csio_mb_process_portparams_rsp(hw, mbp, &retval, |
1398 | &portparams, port_stats); | 1397 | &portparams, port_stats); |
1399 | } | 1398 | } |
1400 | 1399 | ||
1401 | mempool_free(mbp, hw->mb_mempool); | 1400 | mempool_free(mbp, hw->mb_mempool); |
1402 | return 0; | 1401 | return 0; |
1403 | } | 1402 | } |
1404 | 1403 | ||
1405 | /* | 1404 | /* |
1406 | * csio_ln_mgmt_wr_handler -Mgmt Work Request handler. | 1405 | * csio_ln_mgmt_wr_handler -Mgmt Work Request handler. |
1407 | * @wr - WR. | 1406 | * @wr - WR. |
1408 | * @len - WR len. | 1407 | * @len - WR len. |
1409 | * This handler is invoked when an outstanding mgmt WR is completed. | 1408 | * This handler is invoked when an outstanding mgmt WR is completed. |
1410 | * Its invoked in the context of FW event worker thread for every | 1409 | * Its invoked in the context of FW event worker thread for every |
1411 | * mgmt event received. | 1410 | * mgmt event received. |
1412 | * Return - none. | 1411 | * Return - none. |
1413 | */ | 1412 | */ |
1414 | 1413 | ||
1415 | static void | 1414 | static void |
1416 | csio_ln_mgmt_wr_handler(struct csio_hw *hw, void *wr, uint32_t len) | 1415 | csio_ln_mgmt_wr_handler(struct csio_hw *hw, void *wr, uint32_t len) |
1417 | { | 1416 | { |
1418 | struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw); | 1417 | struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw); |
1419 | struct csio_ioreq *io_req = NULL; | 1418 | struct csio_ioreq *io_req = NULL; |
1420 | struct fw_fcoe_els_ct_wr *wr_cmd; | 1419 | struct fw_fcoe_els_ct_wr *wr_cmd; |
1421 | 1420 | ||
1422 | 1421 | ||
1423 | wr_cmd = (struct fw_fcoe_els_ct_wr *) wr; | 1422 | wr_cmd = (struct fw_fcoe_els_ct_wr *) wr; |
1424 | 1423 | ||
1425 | if (len < sizeof(struct fw_fcoe_els_ct_wr)) { | 1424 | if (len < sizeof(struct fw_fcoe_els_ct_wr)) { |
1426 | csio_err(mgmtm->hw, | 1425 | csio_err(mgmtm->hw, |
1427 | "Invalid ELS CT WR length recvd, len:%x\n", len); | 1426 | "Invalid ELS CT WR length recvd, len:%x\n", len); |
1428 | mgmtm->stats.n_err++; | 1427 | mgmtm->stats.n_err++; |
1429 | return; | 1428 | return; |
1430 | } | 1429 | } |
1431 | 1430 | ||
1432 | io_req = (struct csio_ioreq *) ((uintptr_t) wr_cmd->cookie); | 1431 | io_req = (struct csio_ioreq *) ((uintptr_t) wr_cmd->cookie); |
1433 | io_req->wr_status = csio_wr_status(wr_cmd); | 1432 | io_req->wr_status = csio_wr_status(wr_cmd); |
1434 | 1433 | ||
1435 | /* lookup ioreq exists in our active Q */ | 1434 | /* lookup ioreq exists in our active Q */ |
1436 | spin_lock_irq(&hw->lock); | 1435 | spin_lock_irq(&hw->lock); |
1437 | if (csio_mgmt_req_lookup(mgmtm, io_req) != 0) { | 1436 | if (csio_mgmt_req_lookup(mgmtm, io_req) != 0) { |
1438 | csio_err(mgmtm->hw, | 1437 | csio_err(mgmtm->hw, |
1439 | "Error- Invalid IO handle recv in WR. handle: %p\n", | 1438 | "Error- Invalid IO handle recv in WR. handle: %p\n", |
1440 | io_req); | 1439 | io_req); |
1441 | mgmtm->stats.n_err++; | 1440 | mgmtm->stats.n_err++; |
1442 | spin_unlock_irq(&hw->lock); | 1441 | spin_unlock_irq(&hw->lock); |
1443 | return; | 1442 | return; |
1444 | } | 1443 | } |
1445 | 1444 | ||
1446 | mgmtm = csio_hw_to_mgmtm(hw); | 1445 | mgmtm = csio_hw_to_mgmtm(hw); |
1447 | 1446 | ||
1448 | /* Dequeue from active queue */ | 1447 | /* Dequeue from active queue */ |
1449 | list_del_init(&io_req->sm.sm_list); | 1448 | list_del_init(&io_req->sm.sm_list); |
1450 | mgmtm->stats.n_active--; | 1449 | mgmtm->stats.n_active--; |
1451 | spin_unlock_irq(&hw->lock); | 1450 | spin_unlock_irq(&hw->lock); |
1452 | 1451 | ||
1453 | /* io_req will be freed by completion handler */ | 1452 | /* io_req will be freed by completion handler */ |
1454 | if (io_req->io_cbfn) | 1453 | if (io_req->io_cbfn) |
1455 | io_req->io_cbfn(hw, io_req); | 1454 | io_req->io_cbfn(hw, io_req); |
1456 | } | 1455 | } |
1457 | 1456 | ||
1458 | /** | 1457 | /** |
1459 | * csio_fcoe_fwevt_handler - Event handler for Firmware FCoE events. | 1458 | * csio_fcoe_fwevt_handler - Event handler for Firmware FCoE events. |
1460 | * @hw: HW module | 1459 | * @hw: HW module |
1461 | * @cpl_op: CPL opcode | 1460 | * @cpl_op: CPL opcode |
1462 | * @cmd: FW cmd/WR. | 1461 | * @cmd: FW cmd/WR. |
1463 | * | 1462 | * |
1464 | * Process received FCoE cmd/WR event from FW. | 1463 | * Process received FCoE cmd/WR event from FW. |
1465 | */ | 1464 | */ |
1466 | void | 1465 | void |
1467 | csio_fcoe_fwevt_handler(struct csio_hw *hw, __u8 cpl_op, __be64 *cmd) | 1466 | csio_fcoe_fwevt_handler(struct csio_hw *hw, __u8 cpl_op, __be64 *cmd) |
1468 | { | 1467 | { |
1469 | struct csio_lnode *ln; | 1468 | struct csio_lnode *ln; |
1470 | struct csio_rnode *rn; | 1469 | struct csio_rnode *rn; |
1471 | uint8_t portid, opcode = *(uint8_t *)cmd; | 1470 | uint8_t portid, opcode = *(uint8_t *)cmd; |
1472 | struct fw_fcoe_link_cmd *lcmd; | 1471 | struct fw_fcoe_link_cmd *lcmd; |
1473 | struct fw_wr_hdr *wr; | 1472 | struct fw_wr_hdr *wr; |
1474 | struct fw_rdev_wr *rdev_wr; | 1473 | struct fw_rdev_wr *rdev_wr; |
1475 | enum fw_fcoe_link_status lstatus; | 1474 | enum fw_fcoe_link_status lstatus; |
1476 | uint32_t fcfi, rdev_flowid, vnpi; | 1475 | uint32_t fcfi, rdev_flowid, vnpi; |
1477 | enum csio_ln_ev evt; | 1476 | enum csio_ln_ev evt; |
1478 | 1477 | ||
1479 | if (cpl_op == CPL_FW6_MSG && opcode == FW_FCOE_LINK_CMD) { | 1478 | if (cpl_op == CPL_FW6_MSG && opcode == FW_FCOE_LINK_CMD) { |
1480 | 1479 | ||
1481 | lcmd = (struct fw_fcoe_link_cmd *)cmd; | 1480 | lcmd = (struct fw_fcoe_link_cmd *)cmd; |
1482 | lstatus = lcmd->lstatus; | 1481 | lstatus = lcmd->lstatus; |
1483 | portid = FW_FCOE_LINK_CMD_PORTID_GET( | 1482 | portid = FW_FCOE_LINK_CMD_PORTID_GET( |
1484 | ntohl(lcmd->op_to_portid)); | 1483 | ntohl(lcmd->op_to_portid)); |
1485 | fcfi = FW_FCOE_LINK_CMD_FCFI_GET(ntohl(lcmd->sub_opcode_fcfi)); | 1484 | fcfi = FW_FCOE_LINK_CMD_FCFI_GET(ntohl(lcmd->sub_opcode_fcfi)); |
1486 | vnpi = FW_FCOE_LINK_CMD_VNPI_GET(ntohl(lcmd->vnpi_pkd)); | 1485 | vnpi = FW_FCOE_LINK_CMD_VNPI_GET(ntohl(lcmd->vnpi_pkd)); |
1487 | 1486 | ||
1488 | if (lstatus == FCOE_LINKUP) { | 1487 | if (lstatus == FCOE_LINKUP) { |
1489 | 1488 | ||
1490 | /* HW lock here */ | 1489 | /* HW lock here */ |
1491 | spin_lock_irq(&hw->lock); | 1490 | spin_lock_irq(&hw->lock); |
1492 | csio_handle_link_up(hw, portid, fcfi, vnpi); | 1491 | csio_handle_link_up(hw, portid, fcfi, vnpi); |
1493 | spin_unlock_irq(&hw->lock); | 1492 | spin_unlock_irq(&hw->lock); |
1494 | /* HW un lock here */ | 1493 | /* HW un lock here */ |
1495 | 1494 | ||
1496 | } else if (lstatus == FCOE_LINKDOWN) { | 1495 | } else if (lstatus == FCOE_LINKDOWN) { |
1497 | 1496 | ||
1498 | /* HW lock here */ | 1497 | /* HW lock here */ |
1499 | spin_lock_irq(&hw->lock); | 1498 | spin_lock_irq(&hw->lock); |
1500 | csio_handle_link_down(hw, portid, fcfi, vnpi); | 1499 | csio_handle_link_down(hw, portid, fcfi, vnpi); |
1501 | spin_unlock_irq(&hw->lock); | 1500 | spin_unlock_irq(&hw->lock); |
1502 | /* HW un lock here */ | 1501 | /* HW un lock here */ |
1503 | } else { | 1502 | } else { |
1504 | csio_warn(hw, "Unexpected FCOE LINK status:0x%x\n", | 1503 | csio_warn(hw, "Unexpected FCOE LINK status:0x%x\n", |
1505 | lcmd->lstatus); | 1504 | lcmd->lstatus); |
1506 | CSIO_INC_STATS(hw, n_cpl_unexp); | 1505 | CSIO_INC_STATS(hw, n_cpl_unexp); |
1507 | } | 1506 | } |
1508 | } else if (cpl_op == CPL_FW6_PLD) { | 1507 | } else if (cpl_op == CPL_FW6_PLD) { |
1509 | wr = (struct fw_wr_hdr *) (cmd + 4); | 1508 | wr = (struct fw_wr_hdr *) (cmd + 4); |
1510 | if (FW_WR_OP_GET(be32_to_cpu(wr->hi)) | 1509 | if (FW_WR_OP_GET(be32_to_cpu(wr->hi)) |
1511 | == FW_RDEV_WR) { | 1510 | == FW_RDEV_WR) { |
1512 | 1511 | ||
1513 | rdev_wr = (struct fw_rdev_wr *) (cmd + 4); | 1512 | rdev_wr = (struct fw_rdev_wr *) (cmd + 4); |
1514 | 1513 | ||
1515 | rdev_flowid = FW_RDEV_WR_FLOWID_GET( | 1514 | rdev_flowid = FW_RDEV_WR_FLOWID_GET( |
1516 | ntohl(rdev_wr->alloc_to_len16)); | 1515 | ntohl(rdev_wr->alloc_to_len16)); |
1517 | vnpi = FW_RDEV_WR_ASSOC_FLOWID_GET( | 1516 | vnpi = FW_RDEV_WR_ASSOC_FLOWID_GET( |
1518 | ntohl(rdev_wr->flags_to_assoc_flowid)); | 1517 | ntohl(rdev_wr->flags_to_assoc_flowid)); |
1519 | 1518 | ||
1520 | csio_dbg(hw, | 1519 | csio_dbg(hw, |
1521 | "FW_RDEV_WR: flowid:x%x ev_cause:x%x " | 1520 | "FW_RDEV_WR: flowid:x%x ev_cause:x%x " |
1522 | "vnpi:0x%x\n", rdev_flowid, | 1521 | "vnpi:0x%x\n", rdev_flowid, |
1523 | rdev_wr->event_cause, vnpi); | 1522 | rdev_wr->event_cause, vnpi); |
1524 | 1523 | ||
1525 | if (rdev_wr->protocol != PROT_FCOE) { | 1524 | if (rdev_wr->protocol != PROT_FCOE) { |
1526 | csio_err(hw, | 1525 | csio_err(hw, |
1527 | "FW_RDEV_WR: invalid proto:x%x " | 1526 | "FW_RDEV_WR: invalid proto:x%x " |
1528 | "received with flowid:x%x\n", | 1527 | "received with flowid:x%x\n", |
1529 | rdev_wr->protocol, | 1528 | rdev_wr->protocol, |
1530 | rdev_flowid); | 1529 | rdev_flowid); |
1531 | CSIO_INC_STATS(hw, n_evt_drop); | 1530 | CSIO_INC_STATS(hw, n_evt_drop); |
1532 | return; | 1531 | return; |
1533 | } | 1532 | } |
1534 | 1533 | ||
1535 | /* HW lock here */ | 1534 | /* HW lock here */ |
1536 | spin_lock_irq(&hw->lock); | 1535 | spin_lock_irq(&hw->lock); |
1537 | ln = csio_ln_lookup_by_vnpi(hw, vnpi); | 1536 | ln = csio_ln_lookup_by_vnpi(hw, vnpi); |
1538 | if (!ln) { | 1537 | if (!ln) { |
1539 | csio_err(hw, | 1538 | csio_err(hw, |
1540 | "FW_DEV_WR: invalid vnpi:x%x received " | 1539 | "FW_DEV_WR: invalid vnpi:x%x received " |
1541 | "with flowid:x%x\n", vnpi, rdev_flowid); | 1540 | "with flowid:x%x\n", vnpi, rdev_flowid); |
1542 | CSIO_INC_STATS(hw, n_evt_drop); | 1541 | CSIO_INC_STATS(hw, n_evt_drop); |
1543 | goto out_pld; | 1542 | goto out_pld; |
1544 | } | 1543 | } |
1545 | 1544 | ||
1546 | rn = csio_confirm_rnode(ln, rdev_flowid, | 1545 | rn = csio_confirm_rnode(ln, rdev_flowid, |
1547 | &rdev_wr->u.fcoe_rdev); | 1546 | &rdev_wr->u.fcoe_rdev); |
1548 | if (!rn) { | 1547 | if (!rn) { |
1549 | csio_ln_dbg(ln, | 1548 | csio_ln_dbg(ln, |
1550 | "Failed to confirm rnode " | 1549 | "Failed to confirm rnode " |
1551 | "for flowid:x%x\n", rdev_flowid); | 1550 | "for flowid:x%x\n", rdev_flowid); |
1552 | CSIO_INC_STATS(hw, n_evt_drop); | 1551 | CSIO_INC_STATS(hw, n_evt_drop); |
1553 | goto out_pld; | 1552 | goto out_pld; |
1554 | } | 1553 | } |
1555 | 1554 | ||
1556 | /* save previous event for debugging */ | 1555 | /* save previous event for debugging */ |
1557 | ln->prev_evt = ln->cur_evt; | 1556 | ln->prev_evt = ln->cur_evt; |
1558 | ln->cur_evt = rdev_wr->event_cause; | 1557 | ln->cur_evt = rdev_wr->event_cause; |
1559 | CSIO_INC_STATS(ln, n_evt_fw[rdev_wr->event_cause]); | 1558 | CSIO_INC_STATS(ln, n_evt_fw[rdev_wr->event_cause]); |
1560 | 1559 | ||
1561 | /* Translate all the fabric events to lnode SM events */ | 1560 | /* Translate all the fabric events to lnode SM events */ |
1562 | evt = CSIO_FWE_TO_LNE(rdev_wr->event_cause); | 1561 | evt = CSIO_FWE_TO_LNE(rdev_wr->event_cause); |
1563 | if (evt) { | 1562 | if (evt) { |
1564 | csio_ln_dbg(ln, | 1563 | csio_ln_dbg(ln, |
1565 | "Posting event to lnode event:%d " | 1564 | "Posting event to lnode event:%d " |
1566 | "cause:%d flowid:x%x\n", evt, | 1565 | "cause:%d flowid:x%x\n", evt, |
1567 | rdev_wr->event_cause, rdev_flowid); | 1566 | rdev_wr->event_cause, rdev_flowid); |
1568 | csio_post_event(&ln->sm, evt); | 1567 | csio_post_event(&ln->sm, evt); |
1569 | } | 1568 | } |
1570 | 1569 | ||
1571 | /* Handover event to rn SM here. */ | 1570 | /* Handover event to rn SM here. */ |
1572 | csio_rnode_fwevt_handler(rn, rdev_wr->event_cause); | 1571 | csio_rnode_fwevt_handler(rn, rdev_wr->event_cause); |
1573 | out_pld: | 1572 | out_pld: |
1574 | spin_unlock_irq(&hw->lock); | 1573 | spin_unlock_irq(&hw->lock); |
1575 | return; | 1574 | return; |
1576 | } else { | 1575 | } else { |
1577 | csio_warn(hw, "unexpected WR op(0x%x) recv\n", | 1576 | csio_warn(hw, "unexpected WR op(0x%x) recv\n", |
1578 | FW_WR_OP_GET(be32_to_cpu((wr->hi)))); | 1577 | FW_WR_OP_GET(be32_to_cpu((wr->hi)))); |
1579 | CSIO_INC_STATS(hw, n_cpl_unexp); | 1578 | CSIO_INC_STATS(hw, n_cpl_unexp); |
1580 | } | 1579 | } |
1581 | } else if (cpl_op == CPL_FW6_MSG) { | 1580 | } else if (cpl_op == CPL_FW6_MSG) { |
1582 | wr = (struct fw_wr_hdr *) (cmd); | 1581 | wr = (struct fw_wr_hdr *) (cmd); |
1583 | if (FW_WR_OP_GET(be32_to_cpu(wr->hi)) == FW_FCOE_ELS_CT_WR) { | 1582 | if (FW_WR_OP_GET(be32_to_cpu(wr->hi)) == FW_FCOE_ELS_CT_WR) { |
1584 | csio_ln_mgmt_wr_handler(hw, wr, | 1583 | csio_ln_mgmt_wr_handler(hw, wr, |
1585 | sizeof(struct fw_fcoe_els_ct_wr)); | 1584 | sizeof(struct fw_fcoe_els_ct_wr)); |
1586 | } else { | 1585 | } else { |
1587 | csio_warn(hw, "unexpected WR op(0x%x) recv\n", | 1586 | csio_warn(hw, "unexpected WR op(0x%x) recv\n", |
1588 | FW_WR_OP_GET(be32_to_cpu((wr->hi)))); | 1587 | FW_WR_OP_GET(be32_to_cpu((wr->hi)))); |
1589 | CSIO_INC_STATS(hw, n_cpl_unexp); | 1588 | CSIO_INC_STATS(hw, n_cpl_unexp); |
1590 | } | 1589 | } |
1591 | } else { | 1590 | } else { |
1592 | csio_warn(hw, "unexpected CPL op(0x%x) recv\n", opcode); | 1591 | csio_warn(hw, "unexpected CPL op(0x%x) recv\n", opcode); |
1593 | CSIO_INC_STATS(hw, n_cpl_unexp); | 1592 | CSIO_INC_STATS(hw, n_cpl_unexp); |
1594 | } | 1593 | } |
1595 | } | 1594 | } |
1596 | 1595 | ||
1597 | /** | 1596 | /** |
1598 | * csio_lnode_start - Kickstart lnode discovery. | 1597 | * csio_lnode_start - Kickstart lnode discovery. |
1599 | * @ln: lnode | 1598 | * @ln: lnode |
1600 | * | 1599 | * |
1601 | * This routine kickstarts the discovery by issuing an FCOE_LINK (up) command. | 1600 | * This routine kickstarts the discovery by issuing an FCOE_LINK (up) command. |
1602 | */ | 1601 | */ |
1603 | int | 1602 | int |
1604 | csio_lnode_start(struct csio_lnode *ln) | 1603 | csio_lnode_start(struct csio_lnode *ln) |
1605 | { | 1604 | { |
1606 | int rv = 0; | 1605 | int rv = 0; |
1607 | if (csio_is_phys_ln(ln) && !(ln->flags & CSIO_LNF_LINK_ENABLE)) { | 1606 | if (csio_is_phys_ln(ln) && !(ln->flags & CSIO_LNF_LINK_ENABLE)) { |
1608 | rv = csio_fcoe_enable_link(ln, 1); | 1607 | rv = csio_fcoe_enable_link(ln, 1); |
1609 | ln->flags |= CSIO_LNF_LINK_ENABLE; | 1608 | ln->flags |= CSIO_LNF_LINK_ENABLE; |
1610 | } | 1609 | } |
1611 | 1610 | ||
1612 | return rv; | 1611 | return rv; |
1613 | } | 1612 | } |
1614 | 1613 | ||
1615 | /** | 1614 | /** |
1616 | * csio_lnode_stop - Stop the lnode. | 1615 | * csio_lnode_stop - Stop the lnode. |
1617 | * @ln: lnode | 1616 | * @ln: lnode |
1618 | * | 1617 | * |
1619 | * This routine is invoked by HW module to stop lnode and its associated NPIV | 1618 | * This routine is invoked by HW module to stop lnode and its associated NPIV |
1620 | * lnodes. | 1619 | * lnodes. |
1621 | */ | 1620 | */ |
1622 | void | 1621 | void |
1623 | csio_lnode_stop(struct csio_lnode *ln) | 1622 | csio_lnode_stop(struct csio_lnode *ln) |
1624 | { | 1623 | { |
1625 | csio_post_event_lns(ln, CSIO_LNE_DOWN_LINK); | 1624 | csio_post_event_lns(ln, CSIO_LNE_DOWN_LINK); |
1626 | if (csio_is_phys_ln(ln) && (ln->flags & CSIO_LNF_LINK_ENABLE)) { | 1625 | if (csio_is_phys_ln(ln) && (ln->flags & CSIO_LNF_LINK_ENABLE)) { |
1627 | csio_fcoe_enable_link(ln, 0); | 1626 | csio_fcoe_enable_link(ln, 0); |
1628 | ln->flags &= ~CSIO_LNF_LINK_ENABLE; | 1627 | ln->flags &= ~CSIO_LNF_LINK_ENABLE; |
1629 | } | 1628 | } |
1630 | csio_ln_dbg(ln, "stopping ln :%p\n", ln); | 1629 | csio_ln_dbg(ln, "stopping ln :%p\n", ln); |
1631 | } | 1630 | } |
1632 | 1631 | ||
1633 | /** | 1632 | /** |
1634 | * csio_lnode_close - Close an lnode. | 1633 | * csio_lnode_close - Close an lnode. |
1635 | * @ln: lnode | 1634 | * @ln: lnode |
1636 | * | 1635 | * |
1637 | * This routine is invoked by HW module to close an lnode and its | 1636 | * This routine is invoked by HW module to close an lnode and its |
1638 | * associated NPIV lnodes. Lnode and its associated NPIV lnodes are | 1637 | * associated NPIV lnodes. Lnode and its associated NPIV lnodes are |
1639 | * set to uninitialized state. | 1638 | * set to uninitialized state. |
1640 | */ | 1639 | */ |
1641 | void | 1640 | void |
1642 | csio_lnode_close(struct csio_lnode *ln) | 1641 | csio_lnode_close(struct csio_lnode *ln) |
1643 | { | 1642 | { |
1644 | csio_post_event_lns(ln, CSIO_LNE_CLOSE); | 1643 | csio_post_event_lns(ln, CSIO_LNE_CLOSE); |
1645 | if (csio_is_phys_ln(ln)) | 1644 | if (csio_is_phys_ln(ln)) |
1646 | ln->vnp_flowid = CSIO_INVALID_IDX; | 1645 | ln->vnp_flowid = CSIO_INVALID_IDX; |
1647 | 1646 | ||
1648 | csio_ln_dbg(ln, "closed ln :%p\n", ln); | 1647 | csio_ln_dbg(ln, "closed ln :%p\n", ln); |
1649 | } | 1648 | } |
1650 | 1649 | ||
1651 | /* | 1650 | /* |
1652 | * csio_ln_prep_ecwr - Prepare ELS/CT WR. | 1651 | * csio_ln_prep_ecwr - Prepare ELS/CT WR. |
1653 | * @io_req - IO request. | 1652 | * @io_req - IO request. |
1654 | * @wr_len - WR len | 1653 | * @wr_len - WR len |
1655 | * @immd_len - WR immediate data | 1654 | * @immd_len - WR immediate data |
1656 | * @sub_op - Sub opcode | 1655 | * @sub_op - Sub opcode |
1657 | * @sid - source portid. | 1656 | * @sid - source portid. |
1658 | * @did - destination portid | 1657 | * @did - destination portid |
1659 | * @flow_id - flowid | 1658 | * @flow_id - flowid |
1660 | * @fw_wr - ELS/CT WR to be prepared. | 1659 | * @fw_wr - ELS/CT WR to be prepared. |
1661 | * Returns: 0 - on success | 1660 | * Returns: 0 - on success |
1662 | */ | 1661 | */ |
1663 | static int | 1662 | static int |
1664 | csio_ln_prep_ecwr(struct csio_ioreq *io_req, uint32_t wr_len, | 1663 | csio_ln_prep_ecwr(struct csio_ioreq *io_req, uint32_t wr_len, |
1665 | uint32_t immd_len, uint8_t sub_op, uint32_t sid, | 1664 | uint32_t immd_len, uint8_t sub_op, uint32_t sid, |
1666 | uint32_t did, uint32_t flow_id, uint8_t *fw_wr) | 1665 | uint32_t did, uint32_t flow_id, uint8_t *fw_wr) |
1667 | { | 1666 | { |
1668 | struct fw_fcoe_els_ct_wr *wr; | 1667 | struct fw_fcoe_els_ct_wr *wr; |
1669 | __be32 port_id; | 1668 | __be32 port_id; |
1670 | 1669 | ||
1671 | wr = (struct fw_fcoe_els_ct_wr *)fw_wr; | 1670 | wr = (struct fw_fcoe_els_ct_wr *)fw_wr; |
1672 | wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_FCOE_ELS_CT_WR) | | 1671 | wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_FCOE_ELS_CT_WR) | |
1673 | FW_FCOE_ELS_CT_WR_IMMDLEN(immd_len)); | 1672 | FW_FCOE_ELS_CT_WR_IMMDLEN(immd_len)); |
1674 | 1673 | ||
1675 | wr_len = DIV_ROUND_UP(wr_len, 16); | 1674 | wr_len = DIV_ROUND_UP(wr_len, 16); |
1676 | wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(flow_id) | | 1675 | wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(flow_id) | |
1677 | FW_WR_LEN16(wr_len)); | 1676 | FW_WR_LEN16(wr_len)); |
1678 | wr->els_ct_type = sub_op; | 1677 | wr->els_ct_type = sub_op; |
1679 | wr->ctl_pri = 0; | 1678 | wr->ctl_pri = 0; |
1680 | wr->cp_en_class = 0; | 1679 | wr->cp_en_class = 0; |
1681 | wr->cookie = io_req->fw_handle; | 1680 | wr->cookie = io_req->fw_handle; |
1682 | wr->iqid = cpu_to_be16(csio_q_physiqid( | 1681 | wr->iqid = cpu_to_be16(csio_q_physiqid( |
1683 | io_req->lnode->hwp, io_req->iq_idx)); | 1682 | io_req->lnode->hwp, io_req->iq_idx)); |
1684 | wr->fl_to_sp = FW_FCOE_ELS_CT_WR_SP(1); | 1683 | wr->fl_to_sp = FW_FCOE_ELS_CT_WR_SP(1); |
1685 | wr->tmo_val = (uint8_t) io_req->tmo; | 1684 | wr->tmo_val = (uint8_t) io_req->tmo; |
1686 | port_id = htonl(sid); | 1685 | port_id = htonl(sid); |
1687 | memcpy(wr->l_id, PORT_ID_PTR(port_id), 3); | 1686 | memcpy(wr->l_id, PORT_ID_PTR(port_id), 3); |
1688 | port_id = htonl(did); | 1687 | port_id = htonl(did); |
1689 | memcpy(wr->r_id, PORT_ID_PTR(port_id), 3); | 1688 | memcpy(wr->r_id, PORT_ID_PTR(port_id), 3); |
1690 | 1689 | ||
1691 | /* Prepare RSP SGL */ | 1690 | /* Prepare RSP SGL */ |
1692 | wr->rsp_dmalen = cpu_to_be32(io_req->dma_buf.len); | 1691 | wr->rsp_dmalen = cpu_to_be32(io_req->dma_buf.len); |
1693 | wr->rsp_dmaaddr = cpu_to_be64(io_req->dma_buf.paddr); | 1692 | wr->rsp_dmaaddr = cpu_to_be64(io_req->dma_buf.paddr); |
1694 | return 0; | 1693 | return 0; |
1695 | } | 1694 | } |
1696 | 1695 | ||
1697 | /* | 1696 | /* |
1698 | * csio_ln_mgmt_submit_wr - Post elsct work request. | 1697 | * csio_ln_mgmt_submit_wr - Post elsct work request. |
1699 | * @mgmtm - mgmtm | 1698 | * @mgmtm - mgmtm |
1700 | * @io_req - io request. | 1699 | * @io_req - io request. |
1701 | * @sub_op - ELS or CT request type | 1700 | * @sub_op - ELS or CT request type |
1702 | * @pld - Dma Payload buffer | 1701 | * @pld - Dma Payload buffer |
1703 | * @pld_len - Payload len | 1702 | * @pld_len - Payload len |
1704 | * Prepares ELSCT Work request and sents it to FW. | 1703 | * Prepares ELSCT Work request and sents it to FW. |
1705 | * Returns: 0 - on success | 1704 | * Returns: 0 - on success |
1706 | */ | 1705 | */ |
1707 | static int | 1706 | static int |
1708 | csio_ln_mgmt_submit_wr(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req, | 1707 | csio_ln_mgmt_submit_wr(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req, |
1709 | uint8_t sub_op, struct csio_dma_buf *pld, | 1708 | uint8_t sub_op, struct csio_dma_buf *pld, |
1710 | uint32_t pld_len) | 1709 | uint32_t pld_len) |
1711 | { | 1710 | { |
1712 | struct csio_wr_pair wrp; | 1711 | struct csio_wr_pair wrp; |
1713 | struct csio_lnode *ln = io_req->lnode; | 1712 | struct csio_lnode *ln = io_req->lnode; |
1714 | struct csio_rnode *rn = io_req->rnode; | 1713 | struct csio_rnode *rn = io_req->rnode; |
1715 | struct csio_hw *hw = mgmtm->hw; | 1714 | struct csio_hw *hw = mgmtm->hw; |
1716 | uint8_t fw_wr[64]; | 1715 | uint8_t fw_wr[64]; |
1717 | struct ulptx_sgl dsgl; | 1716 | struct ulptx_sgl dsgl; |
1718 | uint32_t wr_size = 0; | 1717 | uint32_t wr_size = 0; |
1719 | uint8_t im_len = 0; | 1718 | uint8_t im_len = 0; |
1720 | uint32_t wr_off = 0; | 1719 | uint32_t wr_off = 0; |
1721 | 1720 | ||
1722 | int ret = 0; | 1721 | int ret = 0; |
1723 | 1722 | ||
1724 | /* Calculate WR Size for this ELS REQ */ | 1723 | /* Calculate WR Size for this ELS REQ */ |
1725 | wr_size = sizeof(struct fw_fcoe_els_ct_wr); | 1724 | wr_size = sizeof(struct fw_fcoe_els_ct_wr); |
1726 | 1725 | ||
1727 | /* Send as immediate data if pld < 256 */ | 1726 | /* Send as immediate data if pld < 256 */ |
1728 | if (pld_len < 256) { | 1727 | if (pld_len < 256) { |
1729 | wr_size += ALIGN(pld_len, 8); | 1728 | wr_size += ALIGN(pld_len, 8); |
1730 | im_len = (uint8_t)pld_len; | 1729 | im_len = (uint8_t)pld_len; |
1731 | } else | 1730 | } else |
1732 | wr_size += sizeof(struct ulptx_sgl); | 1731 | wr_size += sizeof(struct ulptx_sgl); |
1733 | 1732 | ||
1734 | /* Roundup WR size in units of 16 bytes */ | 1733 | /* Roundup WR size in units of 16 bytes */ |
1735 | wr_size = ALIGN(wr_size, 16); | 1734 | wr_size = ALIGN(wr_size, 16); |
1736 | 1735 | ||
1737 | /* Get WR to send ELS REQ */ | 1736 | /* Get WR to send ELS REQ */ |
1738 | ret = csio_wr_get(hw, mgmtm->eq_idx, wr_size, &wrp); | 1737 | ret = csio_wr_get(hw, mgmtm->eq_idx, wr_size, &wrp); |
1739 | if (ret != 0) { | 1738 | if (ret != 0) { |
1740 | csio_err(hw, "Failed to get WR for ec_req %p ret:%d\n", | 1739 | csio_err(hw, "Failed to get WR for ec_req %p ret:%d\n", |
1741 | io_req, ret); | 1740 | io_req, ret); |
1742 | return ret; | 1741 | return ret; |
1743 | } | 1742 | } |
1744 | 1743 | ||
1745 | /* Prepare Generic WR used by all ELS/CT cmd */ | 1744 | /* Prepare Generic WR used by all ELS/CT cmd */ |
1746 | csio_ln_prep_ecwr(io_req, wr_size, im_len, sub_op, | 1745 | csio_ln_prep_ecwr(io_req, wr_size, im_len, sub_op, |
1747 | ln->nport_id, rn->nport_id, | 1746 | ln->nport_id, rn->nport_id, |
1748 | csio_rn_flowid(rn), | 1747 | csio_rn_flowid(rn), |
1749 | &fw_wr[0]); | 1748 | &fw_wr[0]); |
1750 | 1749 | ||
1751 | /* Copy ELS/CT WR CMD */ | 1750 | /* Copy ELS/CT WR CMD */ |
1752 | csio_wr_copy_to_wrp(&fw_wr[0], &wrp, wr_off, | 1751 | csio_wr_copy_to_wrp(&fw_wr[0], &wrp, wr_off, |
1753 | sizeof(struct fw_fcoe_els_ct_wr)); | 1752 | sizeof(struct fw_fcoe_els_ct_wr)); |
1754 | wr_off += sizeof(struct fw_fcoe_els_ct_wr); | 1753 | wr_off += sizeof(struct fw_fcoe_els_ct_wr); |
1755 | 1754 | ||
1756 | /* Copy payload to Immediate section of WR */ | 1755 | /* Copy payload to Immediate section of WR */ |
1757 | if (im_len) | 1756 | if (im_len) |
1758 | csio_wr_copy_to_wrp(pld->vaddr, &wrp, wr_off, im_len); | 1757 | csio_wr_copy_to_wrp(pld->vaddr, &wrp, wr_off, im_len); |
1759 | else { | 1758 | else { |
1760 | /* Program DSGL to dma payload */ | 1759 | /* Program DSGL to dma payload */ |
1761 | dsgl.cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | | 1760 | dsgl.cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | |
1762 | ULPTX_MORE | ULPTX_NSGE(1)); | 1761 | ULPTX_MORE | ULPTX_NSGE(1)); |
1763 | dsgl.len0 = cpu_to_be32(pld_len); | 1762 | dsgl.len0 = cpu_to_be32(pld_len); |
1764 | dsgl.addr0 = cpu_to_be64(pld->paddr); | 1763 | dsgl.addr0 = cpu_to_be64(pld->paddr); |
1765 | csio_wr_copy_to_wrp(&dsgl, &wrp, ALIGN(wr_off, 8), | 1764 | csio_wr_copy_to_wrp(&dsgl, &wrp, ALIGN(wr_off, 8), |
1766 | sizeof(struct ulptx_sgl)); | 1765 | sizeof(struct ulptx_sgl)); |
1767 | } | 1766 | } |
1768 | 1767 | ||
1769 | /* Issue work request to xmit ELS/CT req to FW */ | 1768 | /* Issue work request to xmit ELS/CT req to FW */ |
1770 | csio_wr_issue(mgmtm->hw, mgmtm->eq_idx, false); | 1769 | csio_wr_issue(mgmtm->hw, mgmtm->eq_idx, false); |
1771 | return ret; | 1770 | return ret; |
1772 | } | 1771 | } |
1773 | 1772 | ||
1774 | /* | 1773 | /* |
1775 | * csio_ln_mgmt_submit_req - Submit FCOE Mgmt request. | 1774 | * csio_ln_mgmt_submit_req - Submit FCOE Mgmt request. |
1776 | * @io_req - IO Request | 1775 | * @io_req - IO Request |
1777 | * @io_cbfn - Completion handler. | 1776 | * @io_cbfn - Completion handler. |
1778 | * @req_type - ELS or CT request type | 1777 | * @req_type - ELS or CT request type |
1779 | * @pld - Dma Payload buffer | 1778 | * @pld - Dma Payload buffer |
1780 | * @pld_len - Payload len | 1779 | * @pld_len - Payload len |
1781 | * | 1780 | * |
1782 | * | 1781 | * |
1783 | * This API used submit managment ELS/CT request. | 1782 | * This API used submit managment ELS/CT request. |
1784 | * This called with hw lock held | 1783 | * This called with hw lock held |
1785 | * Returns: 0 - on success | 1784 | * Returns: 0 - on success |
1786 | * -ENOMEM - on error. | 1785 | * -ENOMEM - on error. |
1787 | */ | 1786 | */ |
1788 | static int | 1787 | static int |
1789 | csio_ln_mgmt_submit_req(struct csio_ioreq *io_req, | 1788 | csio_ln_mgmt_submit_req(struct csio_ioreq *io_req, |
1790 | void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *), | 1789 | void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *), |
1791 | enum fcoe_cmn_type req_type, struct csio_dma_buf *pld, | 1790 | enum fcoe_cmn_type req_type, struct csio_dma_buf *pld, |
1792 | uint32_t pld_len) | 1791 | uint32_t pld_len) |
1793 | { | 1792 | { |
1794 | struct csio_hw *hw = csio_lnode_to_hw(io_req->lnode); | 1793 | struct csio_hw *hw = csio_lnode_to_hw(io_req->lnode); |
1795 | struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw); | 1794 | struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw); |
1796 | int rv; | 1795 | int rv; |
1797 | 1796 | ||
1798 | io_req->io_cbfn = io_cbfn; /* Upper layer callback handler */ | 1797 | io_req->io_cbfn = io_cbfn; /* Upper layer callback handler */ |
1799 | io_req->fw_handle = (uintptr_t) (io_req); | 1798 | io_req->fw_handle = (uintptr_t) (io_req); |
1800 | io_req->eq_idx = mgmtm->eq_idx; | 1799 | io_req->eq_idx = mgmtm->eq_idx; |
1801 | io_req->iq_idx = mgmtm->iq_idx; | 1800 | io_req->iq_idx = mgmtm->iq_idx; |
1802 | 1801 | ||
1803 | rv = csio_ln_mgmt_submit_wr(mgmtm, io_req, req_type, pld, pld_len); | 1802 | rv = csio_ln_mgmt_submit_wr(mgmtm, io_req, req_type, pld, pld_len); |
1804 | if (rv == 0) { | 1803 | if (rv == 0) { |
1805 | list_add_tail(&io_req->sm.sm_list, &mgmtm->active_q); | 1804 | list_add_tail(&io_req->sm.sm_list, &mgmtm->active_q); |
1806 | mgmtm->stats.n_active++; | 1805 | mgmtm->stats.n_active++; |
1807 | } | 1806 | } |
1808 | return rv; | 1807 | return rv; |
1809 | } | 1808 | } |
1810 | 1809 | ||
1811 | /* | 1810 | /* |
1812 | * csio_ln_fdmi_init - FDMI Init entry point. | 1811 | * csio_ln_fdmi_init - FDMI Init entry point. |
1813 | * @ln: lnode | 1812 | * @ln: lnode |
1814 | */ | 1813 | */ |
1815 | static int | 1814 | static int |
1816 | csio_ln_fdmi_init(struct csio_lnode *ln) | 1815 | csio_ln_fdmi_init(struct csio_lnode *ln) |
1817 | { | 1816 | { |
1818 | struct csio_hw *hw = csio_lnode_to_hw(ln); | 1817 | struct csio_hw *hw = csio_lnode_to_hw(ln); |
1819 | struct csio_dma_buf *dma_buf; | 1818 | struct csio_dma_buf *dma_buf; |
1820 | 1819 | ||
1821 | /* Allocate MGMT request required for FDMI */ | 1820 | /* Allocate MGMT request required for FDMI */ |
1822 | ln->mgmt_req = kzalloc(sizeof(struct csio_ioreq), GFP_KERNEL); | 1821 | ln->mgmt_req = kzalloc(sizeof(struct csio_ioreq), GFP_KERNEL); |
1823 | if (!ln->mgmt_req) { | 1822 | if (!ln->mgmt_req) { |
1824 | csio_ln_err(ln, "Failed to alloc ioreq for FDMI\n"); | 1823 | csio_ln_err(ln, "Failed to alloc ioreq for FDMI\n"); |
1825 | CSIO_INC_STATS(hw, n_err_nomem); | 1824 | CSIO_INC_STATS(hw, n_err_nomem); |
1826 | return -ENOMEM; | 1825 | return -ENOMEM; |
1827 | } | 1826 | } |
1828 | 1827 | ||
1829 | /* Allocate Dma buffers for FDMI response Payload */ | 1828 | /* Allocate Dma buffers for FDMI response Payload */ |
1830 | dma_buf = &ln->mgmt_req->dma_buf; | 1829 | dma_buf = &ln->mgmt_req->dma_buf; |
1831 | dma_buf->len = 2048; | 1830 | dma_buf->len = 2048; |
1832 | dma_buf->vaddr = pci_alloc_consistent(hw->pdev, dma_buf->len, | 1831 | dma_buf->vaddr = pci_alloc_consistent(hw->pdev, dma_buf->len, |
1833 | &dma_buf->paddr); | 1832 | &dma_buf->paddr); |
1834 | if (!dma_buf->vaddr) { | 1833 | if (!dma_buf->vaddr) { |
1835 | csio_err(hw, "Failed to alloc DMA buffer for FDMI!\n"); | 1834 | csio_err(hw, "Failed to alloc DMA buffer for FDMI!\n"); |
1836 | kfree(ln->mgmt_req); | 1835 | kfree(ln->mgmt_req); |
1837 | ln->mgmt_req = NULL; | 1836 | ln->mgmt_req = NULL; |
1838 | return -ENOMEM; | 1837 | return -ENOMEM; |
1839 | } | 1838 | } |
1840 | 1839 | ||
1841 | ln->flags |= CSIO_LNF_FDMI_ENABLE; | 1840 | ln->flags |= CSIO_LNF_FDMI_ENABLE; |
1842 | return 0; | 1841 | return 0; |
1843 | } | 1842 | } |
1844 | 1843 | ||
1845 | /* | 1844 | /* |
1846 | * csio_ln_fdmi_exit - FDMI exit entry point. | 1845 | * csio_ln_fdmi_exit - FDMI exit entry point. |
1847 | * @ln: lnode | 1846 | * @ln: lnode |
1848 | */ | 1847 | */ |
1849 | static int | 1848 | static int |
1850 | csio_ln_fdmi_exit(struct csio_lnode *ln) | 1849 | csio_ln_fdmi_exit(struct csio_lnode *ln) |
1851 | { | 1850 | { |
1852 | struct csio_dma_buf *dma_buf; | 1851 | struct csio_dma_buf *dma_buf; |
1853 | struct csio_hw *hw = csio_lnode_to_hw(ln); | 1852 | struct csio_hw *hw = csio_lnode_to_hw(ln); |
1854 | 1853 | ||
1855 | if (!ln->mgmt_req) | 1854 | if (!ln->mgmt_req) |
1856 | return 0; | 1855 | return 0; |
1857 | 1856 | ||
1858 | dma_buf = &ln->mgmt_req->dma_buf; | 1857 | dma_buf = &ln->mgmt_req->dma_buf; |
1859 | if (dma_buf->vaddr) | 1858 | if (dma_buf->vaddr) |
1860 | pci_free_consistent(hw->pdev, dma_buf->len, dma_buf->vaddr, | 1859 | pci_free_consistent(hw->pdev, dma_buf->len, dma_buf->vaddr, |
1861 | dma_buf->paddr); | 1860 | dma_buf->paddr); |
1862 | 1861 | ||
1863 | kfree(ln->mgmt_req); | 1862 | kfree(ln->mgmt_req); |
1864 | return 0; | 1863 | return 0; |
1865 | } | 1864 | } |
1866 | 1865 | ||
1867 | int | 1866 | int |
1868 | csio_scan_done(struct csio_lnode *ln, unsigned long ticks, | 1867 | csio_scan_done(struct csio_lnode *ln, unsigned long ticks, |
1869 | unsigned long time, unsigned long max_scan_ticks, | 1868 | unsigned long time, unsigned long max_scan_ticks, |
1870 | unsigned long delta_scan_ticks) | 1869 | unsigned long delta_scan_ticks) |
1871 | { | 1870 | { |
1872 | int rv = 0; | 1871 | int rv = 0; |
1873 | 1872 | ||
1874 | if (time >= max_scan_ticks) | 1873 | if (time >= max_scan_ticks) |
1875 | return 1; | 1874 | return 1; |
1876 | 1875 | ||
1877 | if (!ln->tgt_scan_tick) | 1876 | if (!ln->tgt_scan_tick) |
1878 | ln->tgt_scan_tick = ticks; | 1877 | ln->tgt_scan_tick = ticks; |
1879 | 1878 | ||
1880 | if (((ticks - ln->tgt_scan_tick) >= delta_scan_ticks)) { | 1879 | if (((ticks - ln->tgt_scan_tick) >= delta_scan_ticks)) { |
1881 | if (!ln->last_scan_ntgts) | 1880 | if (!ln->last_scan_ntgts) |
1882 | ln->last_scan_ntgts = ln->n_scsi_tgts; | 1881 | ln->last_scan_ntgts = ln->n_scsi_tgts; |
1883 | else { | 1882 | else { |
1884 | if (ln->last_scan_ntgts == ln->n_scsi_tgts) | 1883 | if (ln->last_scan_ntgts == ln->n_scsi_tgts) |
1885 | return 1; | 1884 | return 1; |
1886 | 1885 | ||
1887 | ln->last_scan_ntgts = ln->n_scsi_tgts; | 1886 | ln->last_scan_ntgts = ln->n_scsi_tgts; |
1888 | } | 1887 | } |
1889 | ln->tgt_scan_tick = ticks; | 1888 | ln->tgt_scan_tick = ticks; |
1890 | } | 1889 | } |
1891 | return rv; | 1890 | return rv; |
1892 | } | 1891 | } |
1893 | 1892 | ||
1894 | /* | 1893 | /* |
1895 | * csio_notify_lnodes: | 1894 | * csio_notify_lnodes: |
1896 | * @hw: HW module | 1895 | * @hw: HW module |
1897 | * @note: Notification | 1896 | * @note: Notification |
1898 | * | 1897 | * |
1899 | * Called from the HW SM to fan out notifications to the | 1898 | * Called from the HW SM to fan out notifications to the |
1900 | * Lnode SM. Since the HW SM is entered with lock held, | 1899 | * Lnode SM. Since the HW SM is entered with lock held, |
1901 | * there is no need to hold locks here. | 1900 | * there is no need to hold locks here. |
1902 | * | 1901 | * |
1903 | */ | 1902 | */ |
1904 | void | 1903 | void |
1905 | csio_notify_lnodes(struct csio_hw *hw, enum csio_ln_notify note) | 1904 | csio_notify_lnodes(struct csio_hw *hw, enum csio_ln_notify note) |
1906 | { | 1905 | { |
1907 | struct list_head *tmp; | 1906 | struct list_head *tmp; |
1908 | struct csio_lnode *ln; | 1907 | struct csio_lnode *ln; |
1909 | 1908 | ||
1910 | csio_dbg(hw, "Notifying all nodes of event %d\n", note); | 1909 | csio_dbg(hw, "Notifying all nodes of event %d\n", note); |
1911 | 1910 | ||
1912 | /* Traverse children lnodes list and send evt */ | 1911 | /* Traverse children lnodes list and send evt */ |
1913 | list_for_each(tmp, &hw->sln_head) { | 1912 | list_for_each(tmp, &hw->sln_head) { |
1914 | ln = (struct csio_lnode *) tmp; | 1913 | ln = (struct csio_lnode *) tmp; |
1915 | 1914 | ||
1916 | switch (note) { | 1915 | switch (note) { |
1917 | case CSIO_LN_NOTIFY_HWREADY: | 1916 | case CSIO_LN_NOTIFY_HWREADY: |
1918 | csio_lnode_start(ln); | 1917 | csio_lnode_start(ln); |
1919 | break; | 1918 | break; |
1920 | 1919 | ||
1921 | case CSIO_LN_NOTIFY_HWRESET: | 1920 | case CSIO_LN_NOTIFY_HWRESET: |
1922 | case CSIO_LN_NOTIFY_HWREMOVE: | 1921 | case CSIO_LN_NOTIFY_HWREMOVE: |
1923 | csio_lnode_close(ln); | 1922 | csio_lnode_close(ln); |
1924 | break; | 1923 | break; |
1925 | 1924 | ||
1926 | case CSIO_LN_NOTIFY_HWSTOP: | 1925 | case CSIO_LN_NOTIFY_HWSTOP: |
1927 | csio_lnode_stop(ln); | 1926 | csio_lnode_stop(ln); |
1928 | break; | 1927 | break; |
1929 | 1928 | ||
1930 | default: | 1929 | default: |
1931 | break; | 1930 | break; |
1932 | 1931 | ||
1933 | } | 1932 | } |
1934 | } | 1933 | } |
1935 | } | 1934 | } |
1936 | 1935 | ||
1937 | /* | 1936 | /* |
1938 | * csio_disable_lnodes: | 1937 | * csio_disable_lnodes: |
1939 | * @hw: HW module | 1938 | * @hw: HW module |
1940 | * @portid:port id | 1939 | * @portid:port id |
1941 | * @disable: disable/enable flag. | 1940 | * @disable: disable/enable flag. |
1942 | * If disable=1, disables all lnode hosted on given physical port. | 1941 | * If disable=1, disables all lnode hosted on given physical port. |
1943 | * otherwise enables all the lnodes on given phsysical port. | 1942 | * otherwise enables all the lnodes on given phsysical port. |
1944 | * This routine need to called with hw lock held. | 1943 | * This routine need to called with hw lock held. |
1945 | */ | 1944 | */ |
1946 | void | 1945 | void |
1947 | csio_disable_lnodes(struct csio_hw *hw, uint8_t portid, bool disable) | 1946 | csio_disable_lnodes(struct csio_hw *hw, uint8_t portid, bool disable) |
1948 | { | 1947 | { |
1949 | struct list_head *tmp; | 1948 | struct list_head *tmp; |
1950 | struct csio_lnode *ln; | 1949 | struct csio_lnode *ln; |
1951 | 1950 | ||
1952 | csio_dbg(hw, "Notifying event to all nodes of port:%d\n", portid); | 1951 | csio_dbg(hw, "Notifying event to all nodes of port:%d\n", portid); |
1953 | 1952 | ||
1954 | /* Traverse sibling lnodes list and send evt */ | 1953 | /* Traverse sibling lnodes list and send evt */ |
1955 | list_for_each(tmp, &hw->sln_head) { | 1954 | list_for_each(tmp, &hw->sln_head) { |
1956 | ln = (struct csio_lnode *) tmp; | 1955 | ln = (struct csio_lnode *) tmp; |
1957 | if (ln->portid != portid) | 1956 | if (ln->portid != portid) |
1958 | continue; | 1957 | continue; |
1959 | 1958 | ||
1960 | if (disable) | 1959 | if (disable) |
1961 | csio_lnode_stop(ln); | 1960 | csio_lnode_stop(ln); |
1962 | else | 1961 | else |
1963 | csio_lnode_start(ln); | 1962 | csio_lnode_start(ln); |
1964 | } | 1963 | } |
1965 | } | 1964 | } |
1966 | 1965 | ||
1967 | /* | 1966 | /* |
1968 | * csio_ln_init - Initialize an lnode. | 1967 | * csio_ln_init - Initialize an lnode. |
1969 | * @ln: lnode | 1968 | * @ln: lnode |
1970 | * | 1969 | * |
1971 | */ | 1970 | */ |
1972 | static int | 1971 | static int |
1973 | csio_ln_init(struct csio_lnode *ln) | 1972 | csio_ln_init(struct csio_lnode *ln) |
1974 | { | 1973 | { |
1975 | int rv = -EINVAL; | 1974 | int rv = -EINVAL; |
1976 | struct csio_lnode *rln, *pln; | 1975 | struct csio_lnode *rln, *pln; |
1977 | struct csio_hw *hw = csio_lnode_to_hw(ln); | 1976 | struct csio_hw *hw = csio_lnode_to_hw(ln); |
1978 | 1977 | ||
1979 | csio_init_state(&ln->sm, csio_lns_uninit); | 1978 | csio_init_state(&ln->sm, csio_lns_uninit); |
1980 | ln->vnp_flowid = CSIO_INVALID_IDX; | 1979 | ln->vnp_flowid = CSIO_INVALID_IDX; |
1981 | ln->fcf_flowid = CSIO_INVALID_IDX; | 1980 | ln->fcf_flowid = CSIO_INVALID_IDX; |
1982 | 1981 | ||
1983 | if (csio_is_root_ln(ln)) { | 1982 | if (csio_is_root_ln(ln)) { |
1984 | 1983 | ||
1985 | /* This is the lnode used during initialization */ | 1984 | /* This is the lnode used during initialization */ |
1986 | 1985 | ||
1987 | ln->fcfinfo = kzalloc(sizeof(struct csio_fcf_info), GFP_KERNEL); | 1986 | ln->fcfinfo = kzalloc(sizeof(struct csio_fcf_info), GFP_KERNEL); |
1988 | if (!ln->fcfinfo) { | 1987 | if (!ln->fcfinfo) { |
1989 | csio_ln_err(ln, "Failed to alloc FCF record\n"); | 1988 | csio_ln_err(ln, "Failed to alloc FCF record\n"); |
1990 | CSIO_INC_STATS(hw, n_err_nomem); | 1989 | CSIO_INC_STATS(hw, n_err_nomem); |
1991 | goto err; | 1990 | goto err; |
1992 | } | 1991 | } |
1993 | 1992 | ||
1994 | INIT_LIST_HEAD(&ln->fcf_lsthead); | 1993 | INIT_LIST_HEAD(&ln->fcf_lsthead); |
1995 | kref_init(&ln->fcfinfo->kref); | 1994 | kref_init(&ln->fcfinfo->kref); |
1996 | 1995 | ||
1997 | if (csio_fdmi_enable && csio_ln_fdmi_init(ln)) | 1996 | if (csio_fdmi_enable && csio_ln_fdmi_init(ln)) |
1998 | goto err; | 1997 | goto err; |
1999 | 1998 | ||
2000 | } else { /* Either a non-root physical or a virtual lnode */ | 1999 | } else { /* Either a non-root physical or a virtual lnode */ |
2001 | 2000 | ||
2002 | /* | 2001 | /* |
2003 | * THe rest is common for non-root physical and NPIV lnodes. | 2002 | * THe rest is common for non-root physical and NPIV lnodes. |
2004 | * Just get references to all other modules | 2003 | * Just get references to all other modules |
2005 | */ | 2004 | */ |
2006 | rln = csio_root_lnode(ln); | 2005 | rln = csio_root_lnode(ln); |
2007 | 2006 | ||
2008 | if (csio_is_npiv_ln(ln)) { | 2007 | if (csio_is_npiv_ln(ln)) { |
2009 | /* NPIV */ | 2008 | /* NPIV */ |
2010 | pln = csio_parent_lnode(ln); | 2009 | pln = csio_parent_lnode(ln); |
2011 | kref_get(&pln->fcfinfo->kref); | 2010 | kref_get(&pln->fcfinfo->kref); |
2012 | ln->fcfinfo = pln->fcfinfo; | 2011 | ln->fcfinfo = pln->fcfinfo; |
2013 | } else { | 2012 | } else { |
2014 | /* Another non-root physical lnode (FCF) */ | 2013 | /* Another non-root physical lnode (FCF) */ |
2015 | ln->fcfinfo = kzalloc(sizeof(struct csio_fcf_info), | 2014 | ln->fcfinfo = kzalloc(sizeof(struct csio_fcf_info), |
2016 | GFP_KERNEL); | 2015 | GFP_KERNEL); |
2017 | if (!ln->fcfinfo) { | 2016 | if (!ln->fcfinfo) { |
2018 | csio_ln_err(ln, "Failed to alloc FCF info\n"); | 2017 | csio_ln_err(ln, "Failed to alloc FCF info\n"); |
2019 | CSIO_INC_STATS(hw, n_err_nomem); | 2018 | CSIO_INC_STATS(hw, n_err_nomem); |
2020 | goto err; | 2019 | goto err; |
2021 | } | 2020 | } |
2022 | 2021 | ||
2023 | kref_init(&ln->fcfinfo->kref); | 2022 | kref_init(&ln->fcfinfo->kref); |
2024 | 2023 | ||
2025 | if (csio_fdmi_enable && csio_ln_fdmi_init(ln)) | 2024 | if (csio_fdmi_enable && csio_ln_fdmi_init(ln)) |
2026 | goto err; | 2025 | goto err; |
2027 | } | 2026 | } |
2028 | 2027 | ||
2029 | } /* if (!csio_is_root_ln(ln)) */ | 2028 | } /* if (!csio_is_root_ln(ln)) */ |
2030 | 2029 | ||
2031 | return 0; | 2030 | return 0; |
2032 | err: | 2031 | err: |
2033 | return rv; | 2032 | return rv; |
2034 | } | 2033 | } |
2035 | 2034 | ||
2036 | static void | 2035 | static void |
2037 | csio_ln_exit(struct csio_lnode *ln) | 2036 | csio_ln_exit(struct csio_lnode *ln) |
2038 | { | 2037 | { |
2039 | struct csio_lnode *pln; | 2038 | struct csio_lnode *pln; |
2040 | 2039 | ||
2041 | csio_cleanup_rns(ln); | 2040 | csio_cleanup_rns(ln); |
2042 | if (csio_is_npiv_ln(ln)) { | 2041 | if (csio_is_npiv_ln(ln)) { |
2043 | pln = csio_parent_lnode(ln); | 2042 | pln = csio_parent_lnode(ln); |
2044 | kref_put(&pln->fcfinfo->kref, csio_free_fcfinfo); | 2043 | kref_put(&pln->fcfinfo->kref, csio_free_fcfinfo); |
2045 | } else { | 2044 | } else { |
2046 | kref_put(&ln->fcfinfo->kref, csio_free_fcfinfo); | 2045 | kref_put(&ln->fcfinfo->kref, csio_free_fcfinfo); |
2047 | if (csio_fdmi_enable) | 2046 | if (csio_fdmi_enable) |
2048 | csio_ln_fdmi_exit(ln); | 2047 | csio_ln_fdmi_exit(ln); |
2049 | } | 2048 | } |
2050 | ln->fcfinfo = NULL; | 2049 | ln->fcfinfo = NULL; |
2051 | } | 2050 | } |
2052 | 2051 | ||
2053 | /** | 2052 | /** |
2054 | * csio_lnode_init - Initialize the members of an lnode. | 2053 | * csio_lnode_init - Initialize the members of an lnode. |
2055 | * @ln: lnode | 2054 | * @ln: lnode |
2056 | * | 2055 | * |
2057 | */ | 2056 | */ |
2058 | int | 2057 | int |
2059 | csio_lnode_init(struct csio_lnode *ln, struct csio_hw *hw, | 2058 | csio_lnode_init(struct csio_lnode *ln, struct csio_hw *hw, |
2060 | struct csio_lnode *pln) | 2059 | struct csio_lnode *pln) |
2061 | { | 2060 | { |
2062 | int rv = -EINVAL; | 2061 | int rv = -EINVAL; |
2063 | 2062 | ||
2064 | /* Link this lnode to hw */ | 2063 | /* Link this lnode to hw */ |
2065 | csio_lnode_to_hw(ln) = hw; | 2064 | csio_lnode_to_hw(ln) = hw; |
2066 | 2065 | ||
2067 | /* Link child to parent if child lnode */ | 2066 | /* Link child to parent if child lnode */ |
2068 | if (pln) | 2067 | if (pln) |
2069 | ln->pln = pln; | 2068 | ln->pln = pln; |
2070 | else | 2069 | else |
2071 | ln->pln = NULL; | 2070 | ln->pln = NULL; |
2072 | 2071 | ||
2073 | /* Initialize scsi_tgt and timers to zero */ | 2072 | /* Initialize scsi_tgt and timers to zero */ |
2074 | ln->n_scsi_tgts = 0; | 2073 | ln->n_scsi_tgts = 0; |
2075 | ln->last_scan_ntgts = 0; | 2074 | ln->last_scan_ntgts = 0; |
2076 | ln->tgt_scan_tick = 0; | 2075 | ln->tgt_scan_tick = 0; |
2077 | 2076 | ||
2078 | /* Initialize rnode list */ | 2077 | /* Initialize rnode list */ |
2079 | INIT_LIST_HEAD(&ln->rnhead); | 2078 | INIT_LIST_HEAD(&ln->rnhead); |
2080 | INIT_LIST_HEAD(&ln->cln_head); | 2079 | INIT_LIST_HEAD(&ln->cln_head); |
2081 | 2080 | ||
2082 | /* Initialize log level for debug */ | 2081 | /* Initialize log level for debug */ |
2083 | ln->params.log_level = hw->params.log_level; | 2082 | ln->params.log_level = hw->params.log_level; |
2084 | 2083 | ||
2085 | if (csio_ln_init(ln)) | 2084 | if (csio_ln_init(ln)) |
2086 | goto err; | 2085 | goto err; |
2087 | 2086 | ||
2088 | /* Add lnode to list of sibling or children lnodes */ | 2087 | /* Add lnode to list of sibling or children lnodes */ |
2089 | spin_lock_irq(&hw->lock); | 2088 | spin_lock_irq(&hw->lock); |
2090 | list_add_tail(&ln->sm.sm_list, pln ? &pln->cln_head : &hw->sln_head); | 2089 | list_add_tail(&ln->sm.sm_list, pln ? &pln->cln_head : &hw->sln_head); |
2091 | if (pln) | 2090 | if (pln) |
2092 | pln->num_vports++; | 2091 | pln->num_vports++; |
2093 | spin_unlock_irq(&hw->lock); | 2092 | spin_unlock_irq(&hw->lock); |
2094 | 2093 | ||
2095 | hw->num_lns++; | 2094 | hw->num_lns++; |
2096 | 2095 | ||
2097 | return 0; | 2096 | return 0; |
2098 | err: | 2097 | err: |
2099 | csio_lnode_to_hw(ln) = NULL; | 2098 | csio_lnode_to_hw(ln) = NULL; |
2100 | return rv; | 2099 | return rv; |
2101 | } | 2100 | } |
2102 | 2101 | ||
2103 | /** | 2102 | /** |
2104 | * csio_lnode_exit - De-instantiate an lnode. | 2103 | * csio_lnode_exit - De-instantiate an lnode. |
2105 | * @ln: lnode | 2104 | * @ln: lnode |
2106 | * | 2105 | * |
2107 | */ | 2106 | */ |
2108 | void | 2107 | void |
2109 | csio_lnode_exit(struct csio_lnode *ln) | 2108 | csio_lnode_exit(struct csio_lnode *ln) |
2110 | { | 2109 | { |
2111 | struct csio_hw *hw = csio_lnode_to_hw(ln); | 2110 | struct csio_hw *hw = csio_lnode_to_hw(ln); |
2112 | 2111 | ||
2113 | csio_ln_exit(ln); | 2112 | csio_ln_exit(ln); |
2114 | 2113 | ||
2115 | /* Remove this lnode from hw->sln_head */ | 2114 | /* Remove this lnode from hw->sln_head */ |
2116 | spin_lock_irq(&hw->lock); | 2115 | spin_lock_irq(&hw->lock); |
2117 | 2116 | ||
2118 | list_del_init(&ln->sm.sm_list); | 2117 | list_del_init(&ln->sm.sm_list); |
2119 | 2118 | ||
2120 | /* If it is children lnode, decrement the | 2119 | /* If it is children lnode, decrement the |
2121 | * counter in its parent lnode | 2120 | * counter in its parent lnode |
2122 | */ | 2121 | */ |
2123 | if (ln->pln) | 2122 | if (ln->pln) |
2124 | ln->pln->num_vports--; | 2123 | ln->pln->num_vports--; |
2125 | 2124 | ||
2126 | /* Update root lnode pointer */ | 2125 | /* Update root lnode pointer */ |
2127 | if (list_empty(&hw->sln_head)) | 2126 | if (list_empty(&hw->sln_head)) |
2128 | hw->rln = NULL; | 2127 | hw->rln = NULL; |
2129 | else | 2128 | else |
2130 | hw->rln = (struct csio_lnode *)csio_list_next(&hw->sln_head); | 2129 | hw->rln = (struct csio_lnode *)csio_list_next(&hw->sln_head); |
2131 | 2130 | ||
2132 | spin_unlock_irq(&hw->lock); | 2131 | spin_unlock_irq(&hw->lock); |
2133 | 2132 | ||
2134 | csio_lnode_to_hw(ln) = NULL; | 2133 | csio_lnode_to_hw(ln) = NULL; |
2135 | hw->num_lns--; | 2134 | hw->num_lns--; |
2136 | } | 2135 | } |
2137 | 2136 |