Commit 22c0b963d7400971f4c5a1a67b083e3742996640

Authored by Hariprasad Shenai
Committed by David S. Miller
1 parent 6e36145d4e

cxgb4: Fix FW flash logic using ethtool

Use t4_fw_upgrade instead of t4_load_fw to write firmware into FLASH, since
t4_load_fw doesn't co-ordinate with the firmware and the adapter can get hosed
enough to require a power cycle of the system.

Based on original work by Casey Leedom <leedom@chelsio.com>

Signed-off-by: Hariprasad Shenai <hariprasad@chelsio.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

Showing 3 changed files with 16 additions and 6 deletions Inline Diff

drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
1 /* 1 /*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux. 2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 * 3 *
4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. 4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
5 * 5 *
6 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU 7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file 8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the 9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below: 10 * OpenIB.org BSD license below:
11 * 11 *
12 * Redistribution and use in source and binary forms, with or 12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following 13 * without modification, are permitted provided that the following
14 * conditions are met: 14 * conditions are met:
15 * 15 *
16 * - Redistributions of source code must retain the above 16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following 17 * copyright notice, this list of conditions and the following
18 * disclaimer. 18 * disclaimer.
19 * 19 *
20 * - Redistributions in binary form must reproduce the above 20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following 21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials 22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution. 23 * provided with the distribution.
24 * 24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE. 32 * SOFTWARE.
33 */ 33 */
34 34
35 #ifndef __CXGB4_H__ 35 #ifndef __CXGB4_H__
36 #define __CXGB4_H__ 36 #define __CXGB4_H__
37 37
38 #include "t4_hw.h" 38 #include "t4_hw.h"
39 39
40 #include <linux/bitops.h> 40 #include <linux/bitops.h>
41 #include <linux/cache.h> 41 #include <linux/cache.h>
42 #include <linux/interrupt.h> 42 #include <linux/interrupt.h>
43 #include <linux/list.h> 43 #include <linux/list.h>
44 #include <linux/netdevice.h> 44 #include <linux/netdevice.h>
45 #include <linux/pci.h> 45 #include <linux/pci.h>
46 #include <linux/spinlock.h> 46 #include <linux/spinlock.h>
47 #include <linux/timer.h> 47 #include <linux/timer.h>
48 #include <linux/vmalloc.h> 48 #include <linux/vmalloc.h>
49 #include <asm/io.h> 49 #include <asm/io.h>
50 #include "cxgb4_uld.h" 50 #include "cxgb4_uld.h"
51 51
52 #define T4FW_VERSION_MAJOR 0x01 52 #define T4FW_VERSION_MAJOR 0x01
53 #define T4FW_VERSION_MINOR 0x0B 53 #define T4FW_VERSION_MINOR 0x0B
54 #define T4FW_VERSION_MICRO 0x1B 54 #define T4FW_VERSION_MICRO 0x1B
55 #define T4FW_VERSION_BUILD 0x00 55 #define T4FW_VERSION_BUILD 0x00
56 56
57 #define T5FW_VERSION_MAJOR 0x01 57 #define T5FW_VERSION_MAJOR 0x01
58 #define T5FW_VERSION_MINOR 0x0B 58 #define T5FW_VERSION_MINOR 0x0B
59 #define T5FW_VERSION_MICRO 0x1B 59 #define T5FW_VERSION_MICRO 0x1B
60 #define T5FW_VERSION_BUILD 0x00 60 #define T5FW_VERSION_BUILD 0x00
61 61
62 #define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__) 62 #define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
63 63
64 enum { 64 enum {
65 MAX_NPORTS = 4, /* max # of ports */ 65 MAX_NPORTS = 4, /* max # of ports */
66 SERNUM_LEN = 24, /* Serial # length */ 66 SERNUM_LEN = 24, /* Serial # length */
67 EC_LEN = 16, /* E/C length */ 67 EC_LEN = 16, /* E/C length */
68 ID_LEN = 16, /* ID length */ 68 ID_LEN = 16, /* ID length */
69 PN_LEN = 16, /* Part Number length */ 69 PN_LEN = 16, /* Part Number length */
70 }; 70 };
71 71
72 enum { 72 enum {
73 MEM_EDC0, 73 MEM_EDC0,
74 MEM_EDC1, 74 MEM_EDC1,
75 MEM_MC, 75 MEM_MC,
76 MEM_MC0 = MEM_MC, 76 MEM_MC0 = MEM_MC,
77 MEM_MC1 77 MEM_MC1
78 }; 78 };
79 79
80 enum { 80 enum {
81 MEMWIN0_APERTURE = 2048, 81 MEMWIN0_APERTURE = 2048,
82 MEMWIN0_BASE = 0x1b800, 82 MEMWIN0_BASE = 0x1b800,
83 MEMWIN1_APERTURE = 32768, 83 MEMWIN1_APERTURE = 32768,
84 MEMWIN1_BASE = 0x28000, 84 MEMWIN1_BASE = 0x28000,
85 MEMWIN1_BASE_T5 = 0x52000, 85 MEMWIN1_BASE_T5 = 0x52000,
86 MEMWIN2_APERTURE = 65536, 86 MEMWIN2_APERTURE = 65536,
87 MEMWIN2_BASE = 0x30000, 87 MEMWIN2_BASE = 0x30000,
88 MEMWIN2_APERTURE_T5 = 131072, 88 MEMWIN2_APERTURE_T5 = 131072,
89 MEMWIN2_BASE_T5 = 0x60000, 89 MEMWIN2_BASE_T5 = 0x60000,
90 }; 90 };
91 91
92 enum dev_master { 92 enum dev_master {
93 MASTER_CANT, 93 MASTER_CANT,
94 MASTER_MAY, 94 MASTER_MAY,
95 MASTER_MUST 95 MASTER_MUST
96 }; 96 };
97 97
98 enum dev_state { 98 enum dev_state {
99 DEV_STATE_UNINIT, 99 DEV_STATE_UNINIT,
100 DEV_STATE_INIT, 100 DEV_STATE_INIT,
101 DEV_STATE_ERR 101 DEV_STATE_ERR
102 }; 102 };
103 103
104 enum { 104 enum {
105 PAUSE_RX = 1 << 0, 105 PAUSE_RX = 1 << 0,
106 PAUSE_TX = 1 << 1, 106 PAUSE_TX = 1 << 1,
107 PAUSE_AUTONEG = 1 << 2 107 PAUSE_AUTONEG = 1 << 2
108 }; 108 };
109 109
110 struct port_stats { 110 struct port_stats {
111 u64 tx_octets; /* total # of octets in good frames */ 111 u64 tx_octets; /* total # of octets in good frames */
112 u64 tx_frames; /* all good frames */ 112 u64 tx_frames; /* all good frames */
113 u64 tx_bcast_frames; /* all broadcast frames */ 113 u64 tx_bcast_frames; /* all broadcast frames */
114 u64 tx_mcast_frames; /* all multicast frames */ 114 u64 tx_mcast_frames; /* all multicast frames */
115 u64 tx_ucast_frames; /* all unicast frames */ 115 u64 tx_ucast_frames; /* all unicast frames */
116 u64 tx_error_frames; /* all error frames */ 116 u64 tx_error_frames; /* all error frames */
117 117
118 u64 tx_frames_64; /* # of Tx frames in a particular range */ 118 u64 tx_frames_64; /* # of Tx frames in a particular range */
119 u64 tx_frames_65_127; 119 u64 tx_frames_65_127;
120 u64 tx_frames_128_255; 120 u64 tx_frames_128_255;
121 u64 tx_frames_256_511; 121 u64 tx_frames_256_511;
122 u64 tx_frames_512_1023; 122 u64 tx_frames_512_1023;
123 u64 tx_frames_1024_1518; 123 u64 tx_frames_1024_1518;
124 u64 tx_frames_1519_max; 124 u64 tx_frames_1519_max;
125 125
126 u64 tx_drop; /* # of dropped Tx frames */ 126 u64 tx_drop; /* # of dropped Tx frames */
127 u64 tx_pause; /* # of transmitted pause frames */ 127 u64 tx_pause; /* # of transmitted pause frames */
128 u64 tx_ppp0; /* # of transmitted PPP prio 0 frames */ 128 u64 tx_ppp0; /* # of transmitted PPP prio 0 frames */
129 u64 tx_ppp1; /* # of transmitted PPP prio 1 frames */ 129 u64 tx_ppp1; /* # of transmitted PPP prio 1 frames */
130 u64 tx_ppp2; /* # of transmitted PPP prio 2 frames */ 130 u64 tx_ppp2; /* # of transmitted PPP prio 2 frames */
131 u64 tx_ppp3; /* # of transmitted PPP prio 3 frames */ 131 u64 tx_ppp3; /* # of transmitted PPP prio 3 frames */
132 u64 tx_ppp4; /* # of transmitted PPP prio 4 frames */ 132 u64 tx_ppp4; /* # of transmitted PPP prio 4 frames */
133 u64 tx_ppp5; /* # of transmitted PPP prio 5 frames */ 133 u64 tx_ppp5; /* # of transmitted PPP prio 5 frames */
134 u64 tx_ppp6; /* # of transmitted PPP prio 6 frames */ 134 u64 tx_ppp6; /* # of transmitted PPP prio 6 frames */
135 u64 tx_ppp7; /* # of transmitted PPP prio 7 frames */ 135 u64 tx_ppp7; /* # of transmitted PPP prio 7 frames */
136 136
137 u64 rx_octets; /* total # of octets in good frames */ 137 u64 rx_octets; /* total # of octets in good frames */
138 u64 rx_frames; /* all good frames */ 138 u64 rx_frames; /* all good frames */
139 u64 rx_bcast_frames; /* all broadcast frames */ 139 u64 rx_bcast_frames; /* all broadcast frames */
140 u64 rx_mcast_frames; /* all multicast frames */ 140 u64 rx_mcast_frames; /* all multicast frames */
141 u64 rx_ucast_frames; /* all unicast frames */ 141 u64 rx_ucast_frames; /* all unicast frames */
142 u64 rx_too_long; /* # of frames exceeding MTU */ 142 u64 rx_too_long; /* # of frames exceeding MTU */
143 u64 rx_jabber; /* # of jabber frames */ 143 u64 rx_jabber; /* # of jabber frames */
144 u64 rx_fcs_err; /* # of received frames with bad FCS */ 144 u64 rx_fcs_err; /* # of received frames with bad FCS */
145 u64 rx_len_err; /* # of received frames with length error */ 145 u64 rx_len_err; /* # of received frames with length error */
146 u64 rx_symbol_err; /* symbol errors */ 146 u64 rx_symbol_err; /* symbol errors */
147 u64 rx_runt; /* # of short frames */ 147 u64 rx_runt; /* # of short frames */
148 148
149 u64 rx_frames_64; /* # of Rx frames in a particular range */ 149 u64 rx_frames_64; /* # of Rx frames in a particular range */
150 u64 rx_frames_65_127; 150 u64 rx_frames_65_127;
151 u64 rx_frames_128_255; 151 u64 rx_frames_128_255;
152 u64 rx_frames_256_511; 152 u64 rx_frames_256_511;
153 u64 rx_frames_512_1023; 153 u64 rx_frames_512_1023;
154 u64 rx_frames_1024_1518; 154 u64 rx_frames_1024_1518;
155 u64 rx_frames_1519_max; 155 u64 rx_frames_1519_max;
156 156
157 u64 rx_pause; /* # of received pause frames */ 157 u64 rx_pause; /* # of received pause frames */
158 u64 rx_ppp0; /* # of received PPP prio 0 frames */ 158 u64 rx_ppp0; /* # of received PPP prio 0 frames */
159 u64 rx_ppp1; /* # of received PPP prio 1 frames */ 159 u64 rx_ppp1; /* # of received PPP prio 1 frames */
160 u64 rx_ppp2; /* # of received PPP prio 2 frames */ 160 u64 rx_ppp2; /* # of received PPP prio 2 frames */
161 u64 rx_ppp3; /* # of received PPP prio 3 frames */ 161 u64 rx_ppp3; /* # of received PPP prio 3 frames */
162 u64 rx_ppp4; /* # of received PPP prio 4 frames */ 162 u64 rx_ppp4; /* # of received PPP prio 4 frames */
163 u64 rx_ppp5; /* # of received PPP prio 5 frames */ 163 u64 rx_ppp5; /* # of received PPP prio 5 frames */
164 u64 rx_ppp6; /* # of received PPP prio 6 frames */ 164 u64 rx_ppp6; /* # of received PPP prio 6 frames */
165 u64 rx_ppp7; /* # of received PPP prio 7 frames */ 165 u64 rx_ppp7; /* # of received PPP prio 7 frames */
166 166
167 u64 rx_ovflow0; /* drops due to buffer-group 0 overflows */ 167 u64 rx_ovflow0; /* drops due to buffer-group 0 overflows */
168 u64 rx_ovflow1; /* drops due to buffer-group 1 overflows */ 168 u64 rx_ovflow1; /* drops due to buffer-group 1 overflows */
169 u64 rx_ovflow2; /* drops due to buffer-group 2 overflows */ 169 u64 rx_ovflow2; /* drops due to buffer-group 2 overflows */
170 u64 rx_ovflow3; /* drops due to buffer-group 3 overflows */ 170 u64 rx_ovflow3; /* drops due to buffer-group 3 overflows */
171 u64 rx_trunc0; /* buffer-group 0 truncated packets */ 171 u64 rx_trunc0; /* buffer-group 0 truncated packets */
172 u64 rx_trunc1; /* buffer-group 1 truncated packets */ 172 u64 rx_trunc1; /* buffer-group 1 truncated packets */
173 u64 rx_trunc2; /* buffer-group 2 truncated packets */ 173 u64 rx_trunc2; /* buffer-group 2 truncated packets */
174 u64 rx_trunc3; /* buffer-group 3 truncated packets */ 174 u64 rx_trunc3; /* buffer-group 3 truncated packets */
175 }; 175 };
176 176
177 struct lb_port_stats { 177 struct lb_port_stats {
178 u64 octets; 178 u64 octets;
179 u64 frames; 179 u64 frames;
180 u64 bcast_frames; 180 u64 bcast_frames;
181 u64 mcast_frames; 181 u64 mcast_frames;
182 u64 ucast_frames; 182 u64 ucast_frames;
183 u64 error_frames; 183 u64 error_frames;
184 184
185 u64 frames_64; 185 u64 frames_64;
186 u64 frames_65_127; 186 u64 frames_65_127;
187 u64 frames_128_255; 187 u64 frames_128_255;
188 u64 frames_256_511; 188 u64 frames_256_511;
189 u64 frames_512_1023; 189 u64 frames_512_1023;
190 u64 frames_1024_1518; 190 u64 frames_1024_1518;
191 u64 frames_1519_max; 191 u64 frames_1519_max;
192 192
193 u64 drop; 193 u64 drop;
194 194
195 u64 ovflow0; 195 u64 ovflow0;
196 u64 ovflow1; 196 u64 ovflow1;
197 u64 ovflow2; 197 u64 ovflow2;
198 u64 ovflow3; 198 u64 ovflow3;
199 u64 trunc0; 199 u64 trunc0;
200 u64 trunc1; 200 u64 trunc1;
201 u64 trunc2; 201 u64 trunc2;
202 u64 trunc3; 202 u64 trunc3;
203 }; 203 };
204 204
205 struct tp_tcp_stats { 205 struct tp_tcp_stats {
206 u32 tcpOutRsts; 206 u32 tcpOutRsts;
207 u64 tcpInSegs; 207 u64 tcpInSegs;
208 u64 tcpOutSegs; 208 u64 tcpOutSegs;
209 u64 tcpRetransSegs; 209 u64 tcpRetransSegs;
210 }; 210 };
211 211
212 struct tp_err_stats { 212 struct tp_err_stats {
213 u32 macInErrs[4]; 213 u32 macInErrs[4];
214 u32 hdrInErrs[4]; 214 u32 hdrInErrs[4];
215 u32 tcpInErrs[4]; 215 u32 tcpInErrs[4];
216 u32 tnlCongDrops[4]; 216 u32 tnlCongDrops[4];
217 u32 ofldChanDrops[4]; 217 u32 ofldChanDrops[4];
218 u32 tnlTxDrops[4]; 218 u32 tnlTxDrops[4];
219 u32 ofldVlanDrops[4]; 219 u32 ofldVlanDrops[4];
220 u32 tcp6InErrs[4]; 220 u32 tcp6InErrs[4];
221 u32 ofldNoNeigh; 221 u32 ofldNoNeigh;
222 u32 ofldCongDefer; 222 u32 ofldCongDefer;
223 }; 223 };
224 224
225 struct tp_params { 225 struct tp_params {
226 unsigned int ntxchan; /* # of Tx channels */ 226 unsigned int ntxchan; /* # of Tx channels */
227 unsigned int tre; /* log2 of core clocks per TP tick */ 227 unsigned int tre; /* log2 of core clocks per TP tick */
228 unsigned short tx_modq_map; /* TX modulation scheduler queue to */ 228 unsigned short tx_modq_map; /* TX modulation scheduler queue to */
229 /* channel map */ 229 /* channel map */
230 230
231 uint32_t dack_re; /* DACK timer resolution */ 231 uint32_t dack_re; /* DACK timer resolution */
232 unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */ 232 unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */
233 233
234 u32 vlan_pri_map; /* cached TP_VLAN_PRI_MAP */ 234 u32 vlan_pri_map; /* cached TP_VLAN_PRI_MAP */
235 u32 ingress_config; /* cached TP_INGRESS_CONFIG */ 235 u32 ingress_config; /* cached TP_INGRESS_CONFIG */
236 236
237 /* TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets. This is a 237 /* TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets. This is a
238 * subset of the set of fields which may be present in the Compressed 238 * subset of the set of fields which may be present in the Compressed
239 * Filter Tuple portion of filters and TCP TCB connections. The 239 * Filter Tuple portion of filters and TCP TCB connections. The
240 * fields which are present are controlled by the TP_VLAN_PRI_MAP. 240 * fields which are present are controlled by the TP_VLAN_PRI_MAP.
241 * Since a variable number of fields may or may not be present, their 241 * Since a variable number of fields may or may not be present, their
242 * shifted field positions within the Compressed Filter Tuple may 242 * shifted field positions within the Compressed Filter Tuple may
243 * vary, or not even be present if the field isn't selected in 243 * vary, or not even be present if the field isn't selected in
244 * TP_VLAN_PRI_MAP. Since some of these fields are needed in various 244 * TP_VLAN_PRI_MAP. Since some of these fields are needed in various
245 * places we store their offsets here, or a -1 if the field isn't 245 * places we store their offsets here, or a -1 if the field isn't
246 * present. 246 * present.
247 */ 247 */
248 int vlan_shift; 248 int vlan_shift;
249 int vnic_shift; 249 int vnic_shift;
250 int port_shift; 250 int port_shift;
251 int protocol_shift; 251 int protocol_shift;
252 }; 252 };
253 253
254 struct vpd_params { 254 struct vpd_params {
255 unsigned int cclk; 255 unsigned int cclk;
256 u8 ec[EC_LEN + 1]; 256 u8 ec[EC_LEN + 1];
257 u8 sn[SERNUM_LEN + 1]; 257 u8 sn[SERNUM_LEN + 1];
258 u8 id[ID_LEN + 1]; 258 u8 id[ID_LEN + 1];
259 u8 pn[PN_LEN + 1]; 259 u8 pn[PN_LEN + 1];
260 }; 260 };
261 261
262 struct pci_params { 262 struct pci_params {
263 unsigned char speed; 263 unsigned char speed;
264 unsigned char width; 264 unsigned char width;
265 }; 265 };
266 266
267 #define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision)) 267 #define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
268 #define CHELSIO_CHIP_FPGA 0x100 268 #define CHELSIO_CHIP_FPGA 0x100
269 #define CHELSIO_CHIP_VERSION(code) (((code) >> 4) & 0xf) 269 #define CHELSIO_CHIP_VERSION(code) (((code) >> 4) & 0xf)
270 #define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf) 270 #define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf)
271 271
272 #define CHELSIO_T4 0x4 272 #define CHELSIO_T4 0x4
273 #define CHELSIO_T5 0x5 273 #define CHELSIO_T5 0x5
274 274
275 enum chip_type { 275 enum chip_type {
276 T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1), 276 T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
277 T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2), 277 T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2),
278 T4_FIRST_REV = T4_A1, 278 T4_FIRST_REV = T4_A1,
279 T4_LAST_REV = T4_A2, 279 T4_LAST_REV = T4_A2,
280 280
281 T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0), 281 T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0),
282 T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1), 282 T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1),
283 T5_FIRST_REV = T5_A0, 283 T5_FIRST_REV = T5_A0,
284 T5_LAST_REV = T5_A1, 284 T5_LAST_REV = T5_A1,
285 }; 285 };
286 286
287 struct adapter_params { 287 struct adapter_params {
288 struct tp_params tp; 288 struct tp_params tp;
289 struct vpd_params vpd; 289 struct vpd_params vpd;
290 struct pci_params pci; 290 struct pci_params pci;
291 291
292 unsigned int sf_size; /* serial flash size in bytes */ 292 unsigned int sf_size; /* serial flash size in bytes */
293 unsigned int sf_nsec; /* # of flash sectors */ 293 unsigned int sf_nsec; /* # of flash sectors */
294 unsigned int sf_fw_start; /* start of FW image in flash */ 294 unsigned int sf_fw_start; /* start of FW image in flash */
295 295
296 unsigned int fw_vers; 296 unsigned int fw_vers;
297 unsigned int tp_vers; 297 unsigned int tp_vers;
298 u8 api_vers[7]; 298 u8 api_vers[7];
299 299
300 unsigned short mtus[NMTUS]; 300 unsigned short mtus[NMTUS];
301 unsigned short a_wnd[NCCTRL_WIN]; 301 unsigned short a_wnd[NCCTRL_WIN];
302 unsigned short b_wnd[NCCTRL_WIN]; 302 unsigned short b_wnd[NCCTRL_WIN];
303 303
304 unsigned char nports; /* # of ethernet ports */ 304 unsigned char nports; /* # of ethernet ports */
305 unsigned char portvec; 305 unsigned char portvec;
306 enum chip_type chip; /* chip code */ 306 enum chip_type chip; /* chip code */
307 unsigned char offload; 307 unsigned char offload;
308 308
309 unsigned char bypass; 309 unsigned char bypass;
310 310
311 unsigned int ofldq_wr_cred; 311 unsigned int ofldq_wr_cred;
312 bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */ 312 bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */
313 313
314 unsigned int max_ordird_qp; /* Max read depth per RDMA QP */ 314 unsigned int max_ordird_qp; /* Max read depth per RDMA QP */
315 unsigned int max_ird_adapter; /* Max read depth per adapter */ 315 unsigned int max_ird_adapter; /* Max read depth per adapter */
316 }; 316 };
317 317
318 #include "t4fw_api.h" 318 #include "t4fw_api.h"
319 319
320 #define FW_VERSION(chip) ( \ 320 #define FW_VERSION(chip) ( \
321 FW_HDR_FW_VER_MAJOR_GET(chip##FW_VERSION_MAJOR) | \ 321 FW_HDR_FW_VER_MAJOR_GET(chip##FW_VERSION_MAJOR) | \
322 FW_HDR_FW_VER_MINOR_GET(chip##FW_VERSION_MINOR) | \ 322 FW_HDR_FW_VER_MINOR_GET(chip##FW_VERSION_MINOR) | \
323 FW_HDR_FW_VER_MICRO_GET(chip##FW_VERSION_MICRO) | \ 323 FW_HDR_FW_VER_MICRO_GET(chip##FW_VERSION_MICRO) | \
324 FW_HDR_FW_VER_BUILD_GET(chip##FW_VERSION_BUILD)) 324 FW_HDR_FW_VER_BUILD_GET(chip##FW_VERSION_BUILD))
325 #define FW_INTFVER(chip, intf) (FW_HDR_INTFVER_##intf) 325 #define FW_INTFVER(chip, intf) (FW_HDR_INTFVER_##intf)
326 326
327 struct fw_info { 327 struct fw_info {
328 u8 chip; 328 u8 chip;
329 char *fs_name; 329 char *fs_name;
330 char *fw_mod_name; 330 char *fw_mod_name;
331 struct fw_hdr fw_hdr; 331 struct fw_hdr fw_hdr;
332 }; 332 };
333 333
334 334
335 struct trace_params { 335 struct trace_params {
336 u32 data[TRACE_LEN / 4]; 336 u32 data[TRACE_LEN / 4];
337 u32 mask[TRACE_LEN / 4]; 337 u32 mask[TRACE_LEN / 4];
338 unsigned short snap_len; 338 unsigned short snap_len;
339 unsigned short min_len; 339 unsigned short min_len;
340 unsigned char skip_ofst; 340 unsigned char skip_ofst;
341 unsigned char skip_len; 341 unsigned char skip_len;
342 unsigned char invert; 342 unsigned char invert;
343 unsigned char port; 343 unsigned char port;
344 }; 344 };
345 345
346 struct link_config { 346 struct link_config {
347 unsigned short supported; /* link capabilities */ 347 unsigned short supported; /* link capabilities */
348 unsigned short advertising; /* advertised capabilities */ 348 unsigned short advertising; /* advertised capabilities */
349 unsigned short requested_speed; /* speed user has requested */ 349 unsigned short requested_speed; /* speed user has requested */
350 unsigned short speed; /* actual link speed */ 350 unsigned short speed; /* actual link speed */
351 unsigned char requested_fc; /* flow control user has requested */ 351 unsigned char requested_fc; /* flow control user has requested */
352 unsigned char fc; /* actual link flow control */ 352 unsigned char fc; /* actual link flow control */
353 unsigned char autoneg; /* autonegotiating? */ 353 unsigned char autoneg; /* autonegotiating? */
354 unsigned char link_ok; /* link up? */ 354 unsigned char link_ok; /* link up? */
355 }; 355 };
356 356
357 #define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16) 357 #define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16)
358 358
359 enum { 359 enum {
360 MAX_ETH_QSETS = 32, /* # of Ethernet Tx/Rx queue sets */ 360 MAX_ETH_QSETS = 32, /* # of Ethernet Tx/Rx queue sets */
361 MAX_OFLD_QSETS = 16, /* # of offload Tx/Rx queue sets */ 361 MAX_OFLD_QSETS = 16, /* # of offload Tx/Rx queue sets */
362 MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */ 362 MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */
363 MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */ 363 MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */
364 MAX_RDMA_CIQS = NCHAN, /* # of RDMA concentrator IQs */ 364 MAX_RDMA_CIQS = NCHAN, /* # of RDMA concentrator IQs */
365 MAX_ISCSI_QUEUES = NCHAN, /* # of streaming iSCSI Rx queues */ 365 MAX_ISCSI_QUEUES = NCHAN, /* # of streaming iSCSI Rx queues */
366 }; 366 };
367 367
368 enum { 368 enum {
369 INGQ_EXTRAS = 2, /* firmware event queue and */ 369 INGQ_EXTRAS = 2, /* firmware event queue and */
370 /* forwarded interrupts */ 370 /* forwarded interrupts */
371 MAX_EGRQ = MAX_ETH_QSETS*2 + MAX_OFLD_QSETS*2 371 MAX_EGRQ = MAX_ETH_QSETS*2 + MAX_OFLD_QSETS*2
372 + MAX_CTRL_QUEUES + MAX_RDMA_QUEUES + MAX_ISCSI_QUEUES, 372 + MAX_CTRL_QUEUES + MAX_RDMA_QUEUES + MAX_ISCSI_QUEUES,
373 MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES 373 MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES
374 + MAX_RDMA_CIQS + MAX_ISCSI_QUEUES + INGQ_EXTRAS, 374 + MAX_RDMA_CIQS + MAX_ISCSI_QUEUES + INGQ_EXTRAS,
375 }; 375 };
376 376
377 struct adapter; 377 struct adapter;
378 struct sge_rspq; 378 struct sge_rspq;
379 379
380 #include "cxgb4_dcb.h" 380 #include "cxgb4_dcb.h"
381 381
382 struct port_info { 382 struct port_info {
383 struct adapter *adapter; 383 struct adapter *adapter;
384 u16 viid; 384 u16 viid;
385 s16 xact_addr_filt; /* index of exact MAC address filter */ 385 s16 xact_addr_filt; /* index of exact MAC address filter */
386 u16 rss_size; /* size of VI's RSS table slice */ 386 u16 rss_size; /* size of VI's RSS table slice */
387 s8 mdio_addr; 387 s8 mdio_addr;
388 u8 port_type; 388 u8 port_type;
389 u8 mod_type; 389 u8 mod_type;
390 u8 port_id; 390 u8 port_id;
391 u8 tx_chan; 391 u8 tx_chan;
392 u8 lport; /* associated offload logical port */ 392 u8 lport; /* associated offload logical port */
393 u8 nqsets; /* # of qsets */ 393 u8 nqsets; /* # of qsets */
394 u8 first_qset; /* index of first qset */ 394 u8 first_qset; /* index of first qset */
395 u8 rss_mode; 395 u8 rss_mode;
396 struct link_config link_cfg; 396 struct link_config link_cfg;
397 u16 *rss; 397 u16 *rss;
398 #ifdef CONFIG_CHELSIO_T4_DCB 398 #ifdef CONFIG_CHELSIO_T4_DCB
399 struct port_dcb_info dcb; /* Data Center Bridging support */ 399 struct port_dcb_info dcb; /* Data Center Bridging support */
400 #endif 400 #endif
401 }; 401 };
402 402
403 struct dentry; 403 struct dentry;
404 struct work_struct; 404 struct work_struct;
405 405
406 enum { /* adapter flags */ 406 enum { /* adapter flags */
407 FULL_INIT_DONE = (1 << 0), 407 FULL_INIT_DONE = (1 << 0),
408 DEV_ENABLED = (1 << 1), 408 DEV_ENABLED = (1 << 1),
409 USING_MSI = (1 << 2), 409 USING_MSI = (1 << 2),
410 USING_MSIX = (1 << 3), 410 USING_MSIX = (1 << 3),
411 FW_OK = (1 << 4), 411 FW_OK = (1 << 4),
412 RSS_TNLALLLOOKUP = (1 << 5), 412 RSS_TNLALLLOOKUP = (1 << 5),
413 USING_SOFT_PARAMS = (1 << 6), 413 USING_SOFT_PARAMS = (1 << 6),
414 MASTER_PF = (1 << 7), 414 MASTER_PF = (1 << 7),
415 FW_OFLD_CONN = (1 << 9), 415 FW_OFLD_CONN = (1 << 9),
416 }; 416 };
417 417
418 struct rx_sw_desc; 418 struct rx_sw_desc;
419 419
420 struct sge_fl { /* SGE free-buffer queue state */ 420 struct sge_fl { /* SGE free-buffer queue state */
421 unsigned int avail; /* # of available Rx buffers */ 421 unsigned int avail; /* # of available Rx buffers */
422 unsigned int pend_cred; /* new buffers since last FL DB ring */ 422 unsigned int pend_cred; /* new buffers since last FL DB ring */
423 unsigned int cidx; /* consumer index */ 423 unsigned int cidx; /* consumer index */
424 unsigned int pidx; /* producer index */ 424 unsigned int pidx; /* producer index */
425 unsigned long alloc_failed; /* # of times buffer allocation failed */ 425 unsigned long alloc_failed; /* # of times buffer allocation failed */
426 unsigned long large_alloc_failed; 426 unsigned long large_alloc_failed;
427 unsigned long starving; 427 unsigned long starving;
428 /* RO fields */ 428 /* RO fields */
429 unsigned int cntxt_id; /* SGE context id for the free list */ 429 unsigned int cntxt_id; /* SGE context id for the free list */
430 unsigned int size; /* capacity of free list */ 430 unsigned int size; /* capacity of free list */
431 struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */ 431 struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
432 __be64 *desc; /* address of HW Rx descriptor ring */ 432 __be64 *desc; /* address of HW Rx descriptor ring */
433 dma_addr_t addr; /* bus address of HW ring start */ 433 dma_addr_t addr; /* bus address of HW ring start */
434 u64 udb; /* BAR2 offset of User Doorbell area */ 434 u64 udb; /* BAR2 offset of User Doorbell area */
435 }; 435 };
436 436
437 /* A packet gather list */ 437 /* A packet gather list */
438 struct pkt_gl { 438 struct pkt_gl {
439 struct page_frag frags[MAX_SKB_FRAGS]; 439 struct page_frag frags[MAX_SKB_FRAGS];
440 void *va; /* virtual address of first byte */ 440 void *va; /* virtual address of first byte */
441 unsigned int nfrags; /* # of fragments */ 441 unsigned int nfrags; /* # of fragments */
442 unsigned int tot_len; /* total length of fragments */ 442 unsigned int tot_len; /* total length of fragments */
443 }; 443 };
444 444
445 typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp, 445 typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp,
446 const struct pkt_gl *gl); 446 const struct pkt_gl *gl);
447 447
448 struct sge_rspq { /* state for an SGE response queue */ 448 struct sge_rspq { /* state for an SGE response queue */
449 struct napi_struct napi; 449 struct napi_struct napi;
450 const __be64 *cur_desc; /* current descriptor in queue */ 450 const __be64 *cur_desc; /* current descriptor in queue */
451 unsigned int cidx; /* consumer index */ 451 unsigned int cidx; /* consumer index */
452 u8 gen; /* current generation bit */ 452 u8 gen; /* current generation bit */
453 u8 intr_params; /* interrupt holdoff parameters */ 453 u8 intr_params; /* interrupt holdoff parameters */
454 u8 next_intr_params; /* holdoff params for next interrupt */ 454 u8 next_intr_params; /* holdoff params for next interrupt */
455 u8 adaptive_rx; 455 u8 adaptive_rx;
456 u8 pktcnt_idx; /* interrupt packet threshold */ 456 u8 pktcnt_idx; /* interrupt packet threshold */
457 u8 uld; /* ULD handling this queue */ 457 u8 uld; /* ULD handling this queue */
458 u8 idx; /* queue index within its group */ 458 u8 idx; /* queue index within its group */
459 int offset; /* offset into current Rx buffer */ 459 int offset; /* offset into current Rx buffer */
460 u16 cntxt_id; /* SGE context id for the response q */ 460 u16 cntxt_id; /* SGE context id for the response q */
461 u16 abs_id; /* absolute SGE id for the response q */ 461 u16 abs_id; /* absolute SGE id for the response q */
462 __be64 *desc; /* address of HW response ring */ 462 __be64 *desc; /* address of HW response ring */
463 dma_addr_t phys_addr; /* physical address of the ring */ 463 dma_addr_t phys_addr; /* physical address of the ring */
464 u64 udb; /* BAR2 offset of User Doorbell area */ 464 u64 udb; /* BAR2 offset of User Doorbell area */
465 unsigned int iqe_len; /* entry size */ 465 unsigned int iqe_len; /* entry size */
466 unsigned int size; /* capacity of response queue */ 466 unsigned int size; /* capacity of response queue */
467 struct adapter *adap; 467 struct adapter *adap;
468 struct net_device *netdev; /* associated net device */ 468 struct net_device *netdev; /* associated net device */
469 rspq_handler_t handler; 469 rspq_handler_t handler;
470 }; 470 };
471 471
472 struct sge_eth_stats { /* Ethernet queue statistics */ 472 struct sge_eth_stats { /* Ethernet queue statistics */
473 unsigned long pkts; /* # of ethernet packets */ 473 unsigned long pkts; /* # of ethernet packets */
474 unsigned long lro_pkts; /* # of LRO super packets */ 474 unsigned long lro_pkts; /* # of LRO super packets */
475 unsigned long lro_merged; /* # of wire packets merged by LRO */ 475 unsigned long lro_merged; /* # of wire packets merged by LRO */
476 unsigned long rx_cso; /* # of Rx checksum offloads */ 476 unsigned long rx_cso; /* # of Rx checksum offloads */
477 unsigned long vlan_ex; /* # of Rx VLAN extractions */ 477 unsigned long vlan_ex; /* # of Rx VLAN extractions */
478 unsigned long rx_drops; /* # of packets dropped due to no mem */ 478 unsigned long rx_drops; /* # of packets dropped due to no mem */
479 }; 479 };
480 480
481 struct sge_eth_rxq { /* SW Ethernet Rx queue */ 481 struct sge_eth_rxq { /* SW Ethernet Rx queue */
482 struct sge_rspq rspq; 482 struct sge_rspq rspq;
483 struct sge_fl fl; 483 struct sge_fl fl;
484 struct sge_eth_stats stats; 484 struct sge_eth_stats stats;
485 } ____cacheline_aligned_in_smp; 485 } ____cacheline_aligned_in_smp;
486 486
487 struct sge_ofld_stats { /* offload queue statistics */ 487 struct sge_ofld_stats { /* offload queue statistics */
488 unsigned long pkts; /* # of packets */ 488 unsigned long pkts; /* # of packets */
489 unsigned long imm; /* # of immediate-data packets */ 489 unsigned long imm; /* # of immediate-data packets */
490 unsigned long an; /* # of asynchronous notifications */ 490 unsigned long an; /* # of asynchronous notifications */
491 unsigned long nomem; /* # of responses deferred due to no mem */ 491 unsigned long nomem; /* # of responses deferred due to no mem */
492 }; 492 };
493 493
494 struct sge_ofld_rxq { /* SW offload Rx queue */ 494 struct sge_ofld_rxq { /* SW offload Rx queue */
495 struct sge_rspq rspq; 495 struct sge_rspq rspq;
496 struct sge_fl fl; 496 struct sge_fl fl;
497 struct sge_ofld_stats stats; 497 struct sge_ofld_stats stats;
498 } ____cacheline_aligned_in_smp; 498 } ____cacheline_aligned_in_smp;
499 499
500 struct tx_desc { 500 struct tx_desc {
501 __be64 flit[8]; 501 __be64 flit[8];
502 }; 502 };
503 503
504 struct tx_sw_desc; 504 struct tx_sw_desc;
505 505
506 struct sge_txq { 506 struct sge_txq {
507 unsigned int in_use; /* # of in-use Tx descriptors */ 507 unsigned int in_use; /* # of in-use Tx descriptors */
508 unsigned int size; /* # of descriptors */ 508 unsigned int size; /* # of descriptors */
509 unsigned int cidx; /* SW consumer index */ 509 unsigned int cidx; /* SW consumer index */
510 unsigned int pidx; /* producer index */ 510 unsigned int pidx; /* producer index */
511 unsigned long stops; /* # of times q has been stopped */ 511 unsigned long stops; /* # of times q has been stopped */
512 unsigned long restarts; /* # of queue restarts */ 512 unsigned long restarts; /* # of queue restarts */
513 unsigned int cntxt_id; /* SGE context id for the Tx q */ 513 unsigned int cntxt_id; /* SGE context id for the Tx q */
514 struct tx_desc *desc; /* address of HW Tx descriptor ring */ 514 struct tx_desc *desc; /* address of HW Tx descriptor ring */
515 struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */ 515 struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */
516 struct sge_qstat *stat; /* queue status entry */ 516 struct sge_qstat *stat; /* queue status entry */
517 dma_addr_t phys_addr; /* physical address of the ring */ 517 dma_addr_t phys_addr; /* physical address of the ring */
518 spinlock_t db_lock; 518 spinlock_t db_lock;
519 int db_disabled; 519 int db_disabled;
520 unsigned short db_pidx; 520 unsigned short db_pidx;
521 unsigned short db_pidx_inc; 521 unsigned short db_pidx_inc;
522 u64 udb; /* BAR2 offset of User Doorbell area */ 522 u64 udb; /* BAR2 offset of User Doorbell area */
523 }; 523 };
524 524
525 struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */ 525 struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */
526 struct sge_txq q; 526 struct sge_txq q;
527 struct netdev_queue *txq; /* associated netdev TX queue */ 527 struct netdev_queue *txq; /* associated netdev TX queue */
528 #ifdef CONFIG_CHELSIO_T4_DCB 528 #ifdef CONFIG_CHELSIO_T4_DCB
529 u8 dcb_prio; /* DCB Priority bound to queue */ 529 u8 dcb_prio; /* DCB Priority bound to queue */
530 #endif 530 #endif
531 unsigned long tso; /* # of TSO requests */ 531 unsigned long tso; /* # of TSO requests */
532 unsigned long tx_cso; /* # of Tx checksum offloads */ 532 unsigned long tx_cso; /* # of Tx checksum offloads */
533 unsigned long vlan_ins; /* # of Tx VLAN insertions */ 533 unsigned long vlan_ins; /* # of Tx VLAN insertions */
534 unsigned long mapping_err; /* # of I/O MMU packet mapping errors */ 534 unsigned long mapping_err; /* # of I/O MMU packet mapping errors */
535 } ____cacheline_aligned_in_smp; 535 } ____cacheline_aligned_in_smp;
536 536
537 struct sge_ofld_txq { /* state for an SGE offload Tx queue */ 537 struct sge_ofld_txq { /* state for an SGE offload Tx queue */
538 struct sge_txq q; 538 struct sge_txq q;
539 struct adapter *adap; 539 struct adapter *adap;
540 struct sk_buff_head sendq; /* list of backpressured packets */ 540 struct sk_buff_head sendq; /* list of backpressured packets */
541 struct tasklet_struct qresume_tsk; /* restarts the queue */ 541 struct tasklet_struct qresume_tsk; /* restarts the queue */
542 u8 full; /* the Tx ring is full */ 542 u8 full; /* the Tx ring is full */
543 unsigned long mapping_err; /* # of I/O MMU packet mapping errors */ 543 unsigned long mapping_err; /* # of I/O MMU packet mapping errors */
544 } ____cacheline_aligned_in_smp; 544 } ____cacheline_aligned_in_smp;
545 545
546 struct sge_ctrl_txq { /* state for an SGE control Tx queue */ 546 struct sge_ctrl_txq { /* state for an SGE control Tx queue */
547 struct sge_txq q; 547 struct sge_txq q;
548 struct adapter *adap; 548 struct adapter *adap;
549 struct sk_buff_head sendq; /* list of backpressured packets */ 549 struct sk_buff_head sendq; /* list of backpressured packets */
550 struct tasklet_struct qresume_tsk; /* restarts the queue */ 550 struct tasklet_struct qresume_tsk; /* restarts the queue */
551 u8 full; /* the Tx ring is full */ 551 u8 full; /* the Tx ring is full */
552 } ____cacheline_aligned_in_smp; 552 } ____cacheline_aligned_in_smp;
553 553
554 struct sge { 554 struct sge {
555 struct sge_eth_txq ethtxq[MAX_ETH_QSETS]; 555 struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
556 struct sge_ofld_txq ofldtxq[MAX_OFLD_QSETS]; 556 struct sge_ofld_txq ofldtxq[MAX_OFLD_QSETS];
557 struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES]; 557 struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES];
558 558
559 struct sge_eth_rxq ethrxq[MAX_ETH_QSETS]; 559 struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
560 struct sge_ofld_rxq ofldrxq[MAX_OFLD_QSETS]; 560 struct sge_ofld_rxq ofldrxq[MAX_OFLD_QSETS];
561 struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES]; 561 struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES];
562 struct sge_ofld_rxq rdmaciq[MAX_RDMA_CIQS]; 562 struct sge_ofld_rxq rdmaciq[MAX_RDMA_CIQS];
563 struct sge_rspq fw_evtq ____cacheline_aligned_in_smp; 563 struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
564 564
565 struct sge_rspq intrq ____cacheline_aligned_in_smp; 565 struct sge_rspq intrq ____cacheline_aligned_in_smp;
566 spinlock_t intrq_lock; 566 spinlock_t intrq_lock;
567 567
568 u16 max_ethqsets; /* # of available Ethernet queue sets */ 568 u16 max_ethqsets; /* # of available Ethernet queue sets */
569 u16 ethqsets; /* # of active Ethernet queue sets */ 569 u16 ethqsets; /* # of active Ethernet queue sets */
570 u16 ethtxq_rover; /* Tx queue to clean up next */ 570 u16 ethtxq_rover; /* Tx queue to clean up next */
571 u16 ofldqsets; /* # of active offload queue sets */ 571 u16 ofldqsets; /* # of active offload queue sets */
572 u16 rdmaqs; /* # of available RDMA Rx queues */ 572 u16 rdmaqs; /* # of available RDMA Rx queues */
573 u16 rdmaciqs; /* # of available RDMA concentrator IQs */ 573 u16 rdmaciqs; /* # of available RDMA concentrator IQs */
574 u16 ofld_rxq[MAX_OFLD_QSETS]; 574 u16 ofld_rxq[MAX_OFLD_QSETS];
575 u16 rdma_rxq[NCHAN]; 575 u16 rdma_rxq[NCHAN];
576 u16 rdma_ciq[NCHAN]; 576 u16 rdma_ciq[NCHAN];
577 u16 timer_val[SGE_NTIMERS]; 577 u16 timer_val[SGE_NTIMERS];
578 u8 counter_val[SGE_NCOUNTERS]; 578 u8 counter_val[SGE_NCOUNTERS];
579 u32 fl_pg_order; /* large page allocation size */ 579 u32 fl_pg_order; /* large page allocation size */
580 u32 stat_len; /* length of status page at ring end */ 580 u32 stat_len; /* length of status page at ring end */
581 u32 pktshift; /* padding between CPL & packet data */ 581 u32 pktshift; /* padding between CPL & packet data */
582 u32 fl_align; /* response queue message alignment */ 582 u32 fl_align; /* response queue message alignment */
583 u32 fl_starve_thres; /* Free List starvation threshold */ 583 u32 fl_starve_thres; /* Free List starvation threshold */
584 584
585 /* State variables for detecting an SGE Ingress DMA hang */ 585 /* State variables for detecting an SGE Ingress DMA hang */
586 unsigned int idma_1s_thresh;/* SGE same State Counter 1s threshold */ 586 unsigned int idma_1s_thresh;/* SGE same State Counter 1s threshold */
587 unsigned int idma_stalled[2];/* SGE synthesized stalled timers in HZ */ 587 unsigned int idma_stalled[2];/* SGE synthesized stalled timers in HZ */
588 unsigned int idma_state[2]; /* SGE IDMA Hang detect state */ 588 unsigned int idma_state[2]; /* SGE IDMA Hang detect state */
589 unsigned int idma_qid[2]; /* SGE IDMA Hung Ingress Queue ID */ 589 unsigned int idma_qid[2]; /* SGE IDMA Hung Ingress Queue ID */
590 590
591 unsigned int egr_start; 591 unsigned int egr_start;
592 unsigned int ingr_start; 592 unsigned int ingr_start;
593 void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */ 593 void *egr_map[MAX_EGRQ]; /* qid->queue egress queue map */
594 struct sge_rspq *ingr_map[MAX_INGQ]; /* qid->queue ingress queue map */ 594 struct sge_rspq *ingr_map[MAX_INGQ]; /* qid->queue ingress queue map */
595 DECLARE_BITMAP(starving_fl, MAX_EGRQ); 595 DECLARE_BITMAP(starving_fl, MAX_EGRQ);
596 DECLARE_BITMAP(txq_maperr, MAX_EGRQ); 596 DECLARE_BITMAP(txq_maperr, MAX_EGRQ);
597 struct timer_list rx_timer; /* refills starving FLs */ 597 struct timer_list rx_timer; /* refills starving FLs */
598 struct timer_list tx_timer; /* checks Tx queues */ 598 struct timer_list tx_timer; /* checks Tx queues */
599 }; 599 };
600 600
601 #define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++) 601 #define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++)
602 #define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++) 602 #define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++)
603 #define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++) 603 #define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++)
604 #define for_each_rdmaciq(sge, i) for (i = 0; i < (sge)->rdmaciqs; i++) 604 #define for_each_rdmaciq(sge, i) for (i = 0; i < (sge)->rdmaciqs; i++)
605 605
606 struct l2t_data; 606 struct l2t_data;
607 607
608 #ifdef CONFIG_PCI_IOV 608 #ifdef CONFIG_PCI_IOV
609 609
610 /* T4 supports SRIOV on PF0-3 and T5 on PF0-7. However, the Serial 610 /* T4 supports SRIOV on PF0-3 and T5 on PF0-7. However, the Serial
611 * Configuration initialization for T5 only has SR-IOV functionality enabled 611 * Configuration initialization for T5 only has SR-IOV functionality enabled
612 * on PF0-3 in order to simplify everything. 612 * on PF0-3 in order to simplify everything.
613 */ 613 */
614 #define NUM_OF_PF_WITH_SRIOV 4 614 #define NUM_OF_PF_WITH_SRIOV 4
615 615
616 #endif 616 #endif
617 617
618 struct adapter { 618 struct adapter {
619 void __iomem *regs; 619 void __iomem *regs;
620 void __iomem *bar2; 620 void __iomem *bar2;
621 u32 t4_bar0; 621 u32 t4_bar0;
622 struct pci_dev *pdev; 622 struct pci_dev *pdev;
623 struct device *pdev_dev; 623 struct device *pdev_dev;
624 unsigned int mbox; 624 unsigned int mbox;
625 unsigned int fn; 625 unsigned int fn;
626 unsigned int flags; 626 unsigned int flags;
627 enum chip_type chip; 627 enum chip_type chip;
628 628
629 int msg_enable; 629 int msg_enable;
630 630
631 struct adapter_params params; 631 struct adapter_params params;
632 struct cxgb4_virt_res vres; 632 struct cxgb4_virt_res vres;
633 unsigned int swintr; 633 unsigned int swintr;
634 634
635 unsigned int wol; 635 unsigned int wol;
636 636
637 struct { 637 struct {
638 unsigned short vec; 638 unsigned short vec;
639 char desc[IFNAMSIZ + 10]; 639 char desc[IFNAMSIZ + 10];
640 } msix_info[MAX_INGQ + 1]; 640 } msix_info[MAX_INGQ + 1];
641 641
642 struct sge sge; 642 struct sge sge;
643 643
644 struct net_device *port[MAX_NPORTS]; 644 struct net_device *port[MAX_NPORTS];
645 u8 chan_map[NCHAN]; /* channel -> port map */ 645 u8 chan_map[NCHAN]; /* channel -> port map */
646 646
647 u32 filter_mode; 647 u32 filter_mode;
648 unsigned int l2t_start; 648 unsigned int l2t_start;
649 unsigned int l2t_end; 649 unsigned int l2t_end;
650 struct l2t_data *l2t; 650 struct l2t_data *l2t;
651 void *uld_handle[CXGB4_ULD_MAX]; 651 void *uld_handle[CXGB4_ULD_MAX];
652 struct list_head list_node; 652 struct list_head list_node;
653 struct list_head rcu_node; 653 struct list_head rcu_node;
654 654
655 struct tid_info tids; 655 struct tid_info tids;
656 void **tid_release_head; 656 void **tid_release_head;
657 spinlock_t tid_release_lock; 657 spinlock_t tid_release_lock;
658 struct workqueue_struct *workq; 658 struct workqueue_struct *workq;
659 struct work_struct tid_release_task; 659 struct work_struct tid_release_task;
660 struct work_struct db_full_task; 660 struct work_struct db_full_task;
661 struct work_struct db_drop_task; 661 struct work_struct db_drop_task;
662 bool tid_release_task_busy; 662 bool tid_release_task_busy;
663 663
664 struct dentry *debugfs_root; 664 struct dentry *debugfs_root;
665 665
666 spinlock_t stats_lock; 666 spinlock_t stats_lock;
667 spinlock_t win0_lock ____cacheline_aligned_in_smp; 667 spinlock_t win0_lock ____cacheline_aligned_in_smp;
668 }; 668 };
669 669
670 /* Defined bit width of user definable filter tuples 670 /* Defined bit width of user definable filter tuples
671 */ 671 */
672 #define ETHTYPE_BITWIDTH 16 672 #define ETHTYPE_BITWIDTH 16
673 #define FRAG_BITWIDTH 1 673 #define FRAG_BITWIDTH 1
674 #define MACIDX_BITWIDTH 9 674 #define MACIDX_BITWIDTH 9
675 #define FCOE_BITWIDTH 1 675 #define FCOE_BITWIDTH 1
676 #define IPORT_BITWIDTH 3 676 #define IPORT_BITWIDTH 3
677 #define MATCHTYPE_BITWIDTH 3 677 #define MATCHTYPE_BITWIDTH 3
678 #define PROTO_BITWIDTH 8 678 #define PROTO_BITWIDTH 8
679 #define TOS_BITWIDTH 8 679 #define TOS_BITWIDTH 8
680 #define PF_BITWIDTH 8 680 #define PF_BITWIDTH 8
681 #define VF_BITWIDTH 8 681 #define VF_BITWIDTH 8
682 #define IVLAN_BITWIDTH 16 682 #define IVLAN_BITWIDTH 16
683 #define OVLAN_BITWIDTH 16 683 #define OVLAN_BITWIDTH 16
684 684
685 /* Filter matching rules. These consist of a set of ingress packet field 685 /* Filter matching rules. These consist of a set of ingress packet field
686 * (value, mask) tuples. The associated ingress packet field matches the 686 * (value, mask) tuples. The associated ingress packet field matches the
687 * tuple when ((field & mask) == value). (Thus a wildcard "don't care" field 687 * tuple when ((field & mask) == value). (Thus a wildcard "don't care" field
688 * rule can be constructed by specifying a tuple of (0, 0).) A filter rule 688 * rule can be constructed by specifying a tuple of (0, 0).) A filter rule
689 * matches an ingress packet when all of the individual individual field 689 * matches an ingress packet when all of the individual individual field
690 * matching rules are true. 690 * matching rules are true.
691 * 691 *
692 * Partial field masks are always valid, however, while it may be easy to 692 * Partial field masks are always valid, however, while it may be easy to
693 * understand their meanings for some fields (e.g. IP address to match a 693 * understand their meanings for some fields (e.g. IP address to match a
694 * subnet), for others making sensible partial masks is less intuitive (e.g. 694 * subnet), for others making sensible partial masks is less intuitive (e.g.
695 * MPS match type) ... 695 * MPS match type) ...
696 * 696 *
697 * Most of the following data structures are modeled on T4 capabilities. 697 * Most of the following data structures are modeled on T4 capabilities.
698 * Drivers for earlier chips use the subsets which make sense for those chips. 698 * Drivers for earlier chips use the subsets which make sense for those chips.
699 * We really need to come up with a hardware-independent mechanism to 699 * We really need to come up with a hardware-independent mechanism to
700 * represent hardware filter capabilities ... 700 * represent hardware filter capabilities ...
701 */ 701 */
702 struct ch_filter_tuple { 702 struct ch_filter_tuple {
703 /* Compressed header matching field rules. The TP_VLAN_PRI_MAP 703 /* Compressed header matching field rules. The TP_VLAN_PRI_MAP
704 * register selects which of these fields will participate in the 704 * register selects which of these fields will participate in the
705 * filter match rules -- up to a maximum of 36 bits. Because 705 * filter match rules -- up to a maximum of 36 bits. Because
706 * TP_VLAN_PRI_MAP is a global register, all filters must use the same 706 * TP_VLAN_PRI_MAP is a global register, all filters must use the same
707 * set of fields. 707 * set of fields.
708 */ 708 */
709 uint32_t ethtype:ETHTYPE_BITWIDTH; /* Ethernet type */ 709 uint32_t ethtype:ETHTYPE_BITWIDTH; /* Ethernet type */
710 uint32_t frag:FRAG_BITWIDTH; /* IP fragmentation header */ 710 uint32_t frag:FRAG_BITWIDTH; /* IP fragmentation header */
711 uint32_t ivlan_vld:1; /* inner VLAN valid */ 711 uint32_t ivlan_vld:1; /* inner VLAN valid */
712 uint32_t ovlan_vld:1; /* outer VLAN valid */ 712 uint32_t ovlan_vld:1; /* outer VLAN valid */
713 uint32_t pfvf_vld:1; /* PF/VF valid */ 713 uint32_t pfvf_vld:1; /* PF/VF valid */
714 uint32_t macidx:MACIDX_BITWIDTH; /* exact match MAC index */ 714 uint32_t macidx:MACIDX_BITWIDTH; /* exact match MAC index */
715 uint32_t fcoe:FCOE_BITWIDTH; /* FCoE packet */ 715 uint32_t fcoe:FCOE_BITWIDTH; /* FCoE packet */
716 uint32_t iport:IPORT_BITWIDTH; /* ingress port */ 716 uint32_t iport:IPORT_BITWIDTH; /* ingress port */
717 uint32_t matchtype:MATCHTYPE_BITWIDTH; /* MPS match type */ 717 uint32_t matchtype:MATCHTYPE_BITWIDTH; /* MPS match type */
718 uint32_t proto:PROTO_BITWIDTH; /* protocol type */ 718 uint32_t proto:PROTO_BITWIDTH; /* protocol type */
719 uint32_t tos:TOS_BITWIDTH; /* TOS/Traffic Type */ 719 uint32_t tos:TOS_BITWIDTH; /* TOS/Traffic Type */
720 uint32_t pf:PF_BITWIDTH; /* PCI-E PF ID */ 720 uint32_t pf:PF_BITWIDTH; /* PCI-E PF ID */
721 uint32_t vf:VF_BITWIDTH; /* PCI-E VF ID */ 721 uint32_t vf:VF_BITWIDTH; /* PCI-E VF ID */
722 uint32_t ivlan:IVLAN_BITWIDTH; /* inner VLAN */ 722 uint32_t ivlan:IVLAN_BITWIDTH; /* inner VLAN */
723 uint32_t ovlan:OVLAN_BITWIDTH; /* outer VLAN */ 723 uint32_t ovlan:OVLAN_BITWIDTH; /* outer VLAN */
724 724
725 /* Uncompressed header matching field rules. These are always 725 /* Uncompressed header matching field rules. These are always
726 * available for field rules. 726 * available for field rules.
727 */ 727 */
728 uint8_t lip[16]; /* local IP address (IPv4 in [3:0]) */ 728 uint8_t lip[16]; /* local IP address (IPv4 in [3:0]) */
729 uint8_t fip[16]; /* foreign IP address (IPv4 in [3:0]) */ 729 uint8_t fip[16]; /* foreign IP address (IPv4 in [3:0]) */
730 uint16_t lport; /* local port */ 730 uint16_t lport; /* local port */
731 uint16_t fport; /* foreign port */ 731 uint16_t fport; /* foreign port */
732 }; 732 };
733 733
734 /* A filter ioctl command. 734 /* A filter ioctl command.
735 */ 735 */
736 struct ch_filter_specification { 736 struct ch_filter_specification {
737 /* Administrative fields for filter. 737 /* Administrative fields for filter.
738 */ 738 */
739 uint32_t hitcnts:1; /* count filter hits in TCB */ 739 uint32_t hitcnts:1; /* count filter hits in TCB */
740 uint32_t prio:1; /* filter has priority over active/server */ 740 uint32_t prio:1; /* filter has priority over active/server */
741 741
742 /* Fundamental filter typing. This is the one element of filter 742 /* Fundamental filter typing. This is the one element of filter
743 * matching that doesn't exist as a (value, mask) tuple. 743 * matching that doesn't exist as a (value, mask) tuple.
744 */ 744 */
745 uint32_t type:1; /* 0 => IPv4, 1 => IPv6 */ 745 uint32_t type:1; /* 0 => IPv4, 1 => IPv6 */
746 746
747 /* Packet dispatch information. Ingress packets which match the 747 /* Packet dispatch information. Ingress packets which match the
748 * filter rules will be dropped, passed to the host or switched back 748 * filter rules will be dropped, passed to the host or switched back
749 * out as egress packets. 749 * out as egress packets.
750 */ 750 */
751 uint32_t action:2; /* drop, pass, switch */ 751 uint32_t action:2; /* drop, pass, switch */
752 752
753 uint32_t rpttid:1; /* report TID in RSS hash field */ 753 uint32_t rpttid:1; /* report TID in RSS hash field */
754 754
755 uint32_t dirsteer:1; /* 0 => RSS, 1 => steer to iq */ 755 uint32_t dirsteer:1; /* 0 => RSS, 1 => steer to iq */
756 uint32_t iq:10; /* ingress queue */ 756 uint32_t iq:10; /* ingress queue */
757 757
758 uint32_t maskhash:1; /* dirsteer=0: store RSS hash in TCB */ 758 uint32_t maskhash:1; /* dirsteer=0: store RSS hash in TCB */
759 uint32_t dirsteerhash:1;/* dirsteer=1: 0 => TCB contains RSS hash */ 759 uint32_t dirsteerhash:1;/* dirsteer=1: 0 => TCB contains RSS hash */
760 /* 1 => TCB contains IQ ID */ 760 /* 1 => TCB contains IQ ID */
761 761
762 /* Switch proxy/rewrite fields. An ingress packet which matches a 762 /* Switch proxy/rewrite fields. An ingress packet which matches a
763 * filter with "switch" set will be looped back out as an egress 763 * filter with "switch" set will be looped back out as an egress
764 * packet -- potentially with some Ethernet header rewriting. 764 * packet -- potentially with some Ethernet header rewriting.
765 */ 765 */
766 uint32_t eport:2; /* egress port to switch packet out */ 766 uint32_t eport:2; /* egress port to switch packet out */
767 uint32_t newdmac:1; /* rewrite destination MAC address */ 767 uint32_t newdmac:1; /* rewrite destination MAC address */
768 uint32_t newsmac:1; /* rewrite source MAC address */ 768 uint32_t newsmac:1; /* rewrite source MAC address */
769 uint32_t newvlan:2; /* rewrite VLAN Tag */ 769 uint32_t newvlan:2; /* rewrite VLAN Tag */
770 uint8_t dmac[ETH_ALEN]; /* new destination MAC address */ 770 uint8_t dmac[ETH_ALEN]; /* new destination MAC address */
771 uint8_t smac[ETH_ALEN]; /* new source MAC address */ 771 uint8_t smac[ETH_ALEN]; /* new source MAC address */
772 uint16_t vlan; /* VLAN Tag to insert */ 772 uint16_t vlan; /* VLAN Tag to insert */
773 773
774 /* Filter rule value/mask pairs. 774 /* Filter rule value/mask pairs.
775 */ 775 */
776 struct ch_filter_tuple val; 776 struct ch_filter_tuple val;
777 struct ch_filter_tuple mask; 777 struct ch_filter_tuple mask;
778 }; 778 };
779 779
780 enum { 780 enum {
781 FILTER_PASS = 0, /* default */ 781 FILTER_PASS = 0, /* default */
782 FILTER_DROP, 782 FILTER_DROP,
783 FILTER_SWITCH 783 FILTER_SWITCH
784 }; 784 };
785 785
786 enum { 786 enum {
787 VLAN_NOCHANGE = 0, /* default */ 787 VLAN_NOCHANGE = 0, /* default */
788 VLAN_REMOVE, 788 VLAN_REMOVE,
789 VLAN_INSERT, 789 VLAN_INSERT,
790 VLAN_REWRITE 790 VLAN_REWRITE
791 }; 791 };
792 792
793 static inline int is_t5(enum chip_type chip) 793 static inline int is_t5(enum chip_type chip)
794 { 794 {
795 return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T5; 795 return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T5;
796 } 796 }
797 797
798 static inline int is_t4(enum chip_type chip) 798 static inline int is_t4(enum chip_type chip)
799 { 799 {
800 return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T4; 800 return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T4;
801 } 801 }
802 802
803 static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr) 803 static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr)
804 { 804 {
805 return readl(adap->regs + reg_addr); 805 return readl(adap->regs + reg_addr);
806 } 806 }
807 807
808 static inline void t4_write_reg(struct adapter *adap, u32 reg_addr, u32 val) 808 static inline void t4_write_reg(struct adapter *adap, u32 reg_addr, u32 val)
809 { 809 {
810 writel(val, adap->regs + reg_addr); 810 writel(val, adap->regs + reg_addr);
811 } 811 }
812 812
813 #ifndef readq 813 #ifndef readq
814 static inline u64 readq(const volatile void __iomem *addr) 814 static inline u64 readq(const volatile void __iomem *addr)
815 { 815 {
816 return readl(addr) + ((u64)readl(addr + 4) << 32); 816 return readl(addr) + ((u64)readl(addr + 4) << 32);
817 } 817 }
818 818
819 static inline void writeq(u64 val, volatile void __iomem *addr) 819 static inline void writeq(u64 val, volatile void __iomem *addr)
820 { 820 {
821 writel(val, addr); 821 writel(val, addr);
822 writel(val >> 32, addr + 4); 822 writel(val >> 32, addr + 4);
823 } 823 }
824 #endif 824 #endif
825 825
826 static inline u64 t4_read_reg64(struct adapter *adap, u32 reg_addr) 826 static inline u64 t4_read_reg64(struct adapter *adap, u32 reg_addr)
827 { 827 {
828 return readq(adap->regs + reg_addr); 828 return readq(adap->regs + reg_addr);
829 } 829 }
830 830
831 static inline void t4_write_reg64(struct adapter *adap, u32 reg_addr, u64 val) 831 static inline void t4_write_reg64(struct adapter *adap, u32 reg_addr, u64 val)
832 { 832 {
833 writeq(val, adap->regs + reg_addr); 833 writeq(val, adap->regs + reg_addr);
834 } 834 }
835 835
836 /** 836 /**
837 * netdev2pinfo - return the port_info structure associated with a net_device 837 * netdev2pinfo - return the port_info structure associated with a net_device
838 * @dev: the netdev 838 * @dev: the netdev
839 * 839 *
840 * Return the struct port_info associated with a net_device 840 * Return the struct port_info associated with a net_device
841 */ 841 */
842 static inline struct port_info *netdev2pinfo(const struct net_device *dev) 842 static inline struct port_info *netdev2pinfo(const struct net_device *dev)
843 { 843 {
844 return netdev_priv(dev); 844 return netdev_priv(dev);
845 } 845 }
846 846
847 /** 847 /**
848 * adap2pinfo - return the port_info of a port 848 * adap2pinfo - return the port_info of a port
849 * @adap: the adapter 849 * @adap: the adapter
850 * @idx: the port index 850 * @idx: the port index
851 * 851 *
852 * Return the port_info structure for the port of the given index. 852 * Return the port_info structure for the port of the given index.
853 */ 853 */
854 static inline struct port_info *adap2pinfo(struct adapter *adap, int idx) 854 static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
855 { 855 {
856 return netdev_priv(adap->port[idx]); 856 return netdev_priv(adap->port[idx]);
857 } 857 }
858 858
859 /** 859 /**
860 * netdev2adap - return the adapter structure associated with a net_device 860 * netdev2adap - return the adapter structure associated with a net_device
861 * @dev: the netdev 861 * @dev: the netdev
862 * 862 *
863 * Return the struct adapter associated with a net_device 863 * Return the struct adapter associated with a net_device
864 */ 864 */
865 static inline struct adapter *netdev2adap(const struct net_device *dev) 865 static inline struct adapter *netdev2adap(const struct net_device *dev)
866 { 866 {
867 return netdev2pinfo(dev)->adapter; 867 return netdev2pinfo(dev)->adapter;
868 } 868 }
869 869
870 void t4_os_portmod_changed(const struct adapter *adap, int port_id); 870 void t4_os_portmod_changed(const struct adapter *adap, int port_id);
871 void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat); 871 void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
872 872
873 void *t4_alloc_mem(size_t size); 873 void *t4_alloc_mem(size_t size);
874 874
875 void t4_free_sge_resources(struct adapter *adap); 875 void t4_free_sge_resources(struct adapter *adap);
876 void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q); 876 void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q);
877 irq_handler_t t4_intr_handler(struct adapter *adap); 877 irq_handler_t t4_intr_handler(struct adapter *adap);
878 netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev); 878 netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev);
879 int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, 879 int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
880 const struct pkt_gl *gl); 880 const struct pkt_gl *gl);
881 int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb); 881 int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
882 int t4_ofld_send(struct adapter *adap, struct sk_buff *skb); 882 int t4_ofld_send(struct adapter *adap, struct sk_buff *skb);
883 int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, 883 int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
884 struct net_device *dev, int intr_idx, 884 struct net_device *dev, int intr_idx,
885 struct sge_fl *fl, rspq_handler_t hnd); 885 struct sge_fl *fl, rspq_handler_t hnd);
886 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, 886 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
887 struct net_device *dev, struct netdev_queue *netdevq, 887 struct net_device *dev, struct netdev_queue *netdevq,
888 unsigned int iqid); 888 unsigned int iqid);
889 int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, 889 int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
890 struct net_device *dev, unsigned int iqid, 890 struct net_device *dev, unsigned int iqid,
891 unsigned int cmplqid); 891 unsigned int cmplqid);
892 int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, 892 int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
893 struct net_device *dev, unsigned int iqid); 893 struct net_device *dev, unsigned int iqid);
894 irqreturn_t t4_sge_intr_msix(int irq, void *cookie); 894 irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
895 int t4_sge_init(struct adapter *adap); 895 int t4_sge_init(struct adapter *adap);
896 void t4_sge_start(struct adapter *adap); 896 void t4_sge_start(struct adapter *adap);
897 void t4_sge_stop(struct adapter *adap); 897 void t4_sge_stop(struct adapter *adap);
898 extern int dbfifo_int_thresh; 898 extern int dbfifo_int_thresh;
899 899
900 #define for_each_port(adapter, iter) \ 900 #define for_each_port(adapter, iter) \
901 for (iter = 0; iter < (adapter)->params.nports; ++iter) 901 for (iter = 0; iter < (adapter)->params.nports; ++iter)
902 902
903 static inline int is_bypass(struct adapter *adap) 903 static inline int is_bypass(struct adapter *adap)
904 { 904 {
905 return adap->params.bypass; 905 return adap->params.bypass;
906 } 906 }
907 907
908 static inline int is_bypass_device(int device) 908 static inline int is_bypass_device(int device)
909 { 909 {
910 /* this should be set based upon device capabilities */ 910 /* this should be set based upon device capabilities */
911 switch (device) { 911 switch (device) {
912 case 0x440b: 912 case 0x440b:
913 case 0x440c: 913 case 0x440c:
914 return 1; 914 return 1;
915 default: 915 default:
916 return 0; 916 return 0;
917 } 917 }
918 } 918 }
919 919
920 static inline unsigned int core_ticks_per_usec(const struct adapter *adap) 920 static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
921 { 921 {
922 return adap->params.vpd.cclk / 1000; 922 return adap->params.vpd.cclk / 1000;
923 } 923 }
924 924
925 static inline unsigned int us_to_core_ticks(const struct adapter *adap, 925 static inline unsigned int us_to_core_ticks(const struct adapter *adap,
926 unsigned int us) 926 unsigned int us)
927 { 927 {
928 return (us * adap->params.vpd.cclk) / 1000; 928 return (us * adap->params.vpd.cclk) / 1000;
929 } 929 }
930 930
931 static inline unsigned int core_ticks_to_us(const struct adapter *adapter, 931 static inline unsigned int core_ticks_to_us(const struct adapter *adapter,
932 unsigned int ticks) 932 unsigned int ticks)
933 { 933 {
934 /* add Core Clock / 2 to round ticks to nearest uS */ 934 /* add Core Clock / 2 to round ticks to nearest uS */
935 return ((ticks * 1000 + adapter->params.vpd.cclk/2) / 935 return ((ticks * 1000 + adapter->params.vpd.cclk/2) /
936 adapter->params.vpd.cclk); 936 adapter->params.vpd.cclk);
937 } 937 }
938 938
939 void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask, 939 void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
940 u32 val); 940 u32 val);
941 941
942 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, 942 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
943 void *rpl, bool sleep_ok); 943 void *rpl, bool sleep_ok);
944 944
945 static inline int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd, 945 static inline int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd,
946 int size, void *rpl) 946 int size, void *rpl)
947 { 947 {
948 return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, true); 948 return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, true);
949 } 949 }
950 950
951 static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd, 951 static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd,
952 int size, void *rpl) 952 int size, void *rpl)
953 { 953 {
954 return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false); 954 return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false);
955 } 955 }
956 956
957 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, 957 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
958 unsigned int data_reg, const u32 *vals, 958 unsigned int data_reg, const u32 *vals,
959 unsigned int nregs, unsigned int start_idx); 959 unsigned int nregs, unsigned int start_idx);
960 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, 960 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
961 unsigned int data_reg, u32 *vals, unsigned int nregs, 961 unsigned int data_reg, u32 *vals, unsigned int nregs,
962 unsigned int start_idx); 962 unsigned int start_idx);
963 void t4_hw_pci_read_cfg4(struct adapter *adapter, int reg, u32 *val); 963 void t4_hw_pci_read_cfg4(struct adapter *adapter, int reg, u32 *val);
964 964
965 struct fw_filter_wr; 965 struct fw_filter_wr;
966 966
967 void t4_intr_enable(struct adapter *adapter); 967 void t4_intr_enable(struct adapter *adapter);
968 void t4_intr_disable(struct adapter *adapter); 968 void t4_intr_disable(struct adapter *adapter);
969 int t4_slow_intr_handler(struct adapter *adapter); 969 int t4_slow_intr_handler(struct adapter *adapter);
970 970
971 int t4_wait_dev_ready(void __iomem *regs); 971 int t4_wait_dev_ready(void __iomem *regs);
972 int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, 972 int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
973 struct link_config *lc); 973 struct link_config *lc);
974 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port); 974 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
975 975
976 #define T4_MEMORY_WRITE 0 976 #define T4_MEMORY_WRITE 0
977 #define T4_MEMORY_READ 1 977 #define T4_MEMORY_READ 1
978 int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len, 978 int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len,
979 __be32 *buf, int dir); 979 __be32 *buf, int dir);
980 static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr, 980 static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr,
981 u32 len, __be32 *buf) 981 u32 len, __be32 *buf)
982 { 982 {
983 return t4_memory_rw(adap, 0, mtype, addr, len, buf, 0); 983 return t4_memory_rw(adap, 0, mtype, addr, len, buf, 0);
984 } 984 }
985 985
986 int t4_seeprom_wp(struct adapter *adapter, bool enable); 986 int t4_seeprom_wp(struct adapter *adapter, bool enable);
987 int get_vpd_params(struct adapter *adapter, struct vpd_params *p); 987 int get_vpd_params(struct adapter *adapter, struct vpd_params *p);
988 int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size); 988 int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
989 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
990 const u8 *fw_data, unsigned int size, int force);
989 unsigned int t4_flash_cfg_addr(struct adapter *adapter); 991 unsigned int t4_flash_cfg_addr(struct adapter *adapter);
990 int t4_get_fw_version(struct adapter *adapter, u32 *vers); 992 int t4_get_fw_version(struct adapter *adapter, u32 *vers);
991 int t4_get_tp_version(struct adapter *adapter, u32 *vers); 993 int t4_get_tp_version(struct adapter *adapter, u32 *vers);
992 int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, 994 int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
993 const u8 *fw_data, unsigned int fw_size, 995 const u8 *fw_data, unsigned int fw_size,
994 struct fw_hdr *card_fw, enum dev_state state, int *reset); 996 struct fw_hdr *card_fw, enum dev_state state, int *reset);
995 int t4_prep_adapter(struct adapter *adapter); 997 int t4_prep_adapter(struct adapter *adapter);
996 int t4_init_tp_params(struct adapter *adap); 998 int t4_init_tp_params(struct adapter *adap);
997 int t4_filter_field_shift(const struct adapter *adap, int filter_sel); 999 int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
998 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); 1000 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
999 void t4_fatal_err(struct adapter *adapter); 1001 void t4_fatal_err(struct adapter *adapter);
1000 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, 1002 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
1001 int start, int n, const u16 *rspq, unsigned int nrspq); 1003 int start, int n, const u16 *rspq, unsigned int nrspq);
1002 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, 1004 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
1003 unsigned int flags); 1005 unsigned int flags);
1004 int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, 1006 int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
1005 u64 *parity); 1007 u64 *parity);
1006 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, 1008 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
1007 u64 *parity); 1009 u64 *parity);
1008 const char *t4_get_port_type_description(enum fw_port_type port_type); 1010 const char *t4_get_port_type_description(enum fw_port_type port_type);
1009 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p); 1011 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
1010 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log); 1012 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
1011 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, 1013 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
1012 unsigned int mask, unsigned int val); 1014 unsigned int mask, unsigned int val);
1013 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, 1015 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
1014 struct tp_tcp_stats *v6); 1016 struct tp_tcp_stats *v6);
1015 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, 1017 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
1016 const unsigned short *alpha, const unsigned short *beta); 1018 const unsigned short *alpha, const unsigned short *beta);
1017 1019
1018 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid); 1020 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid);
1019 1021
1020 void t4_wol_magic_enable(struct adapter *adap, unsigned int port, 1022 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
1021 const u8 *addr); 1023 const u8 *addr);
1022 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, 1024 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
1023 u64 mask0, u64 mask1, unsigned int crc, bool enable); 1025 u64 mask0, u64 mask1, unsigned int crc, bool enable);
1024 1026
1025 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, 1027 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
1026 enum dev_master master, enum dev_state *state); 1028 enum dev_master master, enum dev_state *state);
1027 int t4_fw_bye(struct adapter *adap, unsigned int mbox); 1029 int t4_fw_bye(struct adapter *adap, unsigned int mbox);
1028 int t4_early_init(struct adapter *adap, unsigned int mbox); 1030 int t4_early_init(struct adapter *adap, unsigned int mbox);
1029 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset); 1031 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset);
1030 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, 1032 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
1031 unsigned int cache_line_size); 1033 unsigned int cache_line_size);
1032 int t4_fw_initialize(struct adapter *adap, unsigned int mbox); 1034 int t4_fw_initialize(struct adapter *adap, unsigned int mbox);
1033 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, 1035 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
1034 unsigned int vf, unsigned int nparams, const u32 *params, 1036 unsigned int vf, unsigned int nparams, const u32 *params,
1035 u32 *val); 1037 u32 *val);
1036 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, 1038 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
1037 unsigned int vf, unsigned int nparams, const u32 *params, 1039 unsigned int vf, unsigned int nparams, const u32 *params,
1038 const u32 *val); 1040 const u32 *val);
1039 int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox, 1041 int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox,
1040 unsigned int pf, unsigned int vf, 1042 unsigned int pf, unsigned int vf,
1041 unsigned int nparams, const u32 *params, 1043 unsigned int nparams, const u32 *params,
1042 const u32 *val); 1044 const u32 *val);
1043 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, 1045 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
1044 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, 1046 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
1045 unsigned int rxqi, unsigned int rxq, unsigned int tc, 1047 unsigned int rxqi, unsigned int rxq, unsigned int tc,
1046 unsigned int vi, unsigned int cmask, unsigned int pmask, 1048 unsigned int vi, unsigned int cmask, unsigned int pmask,
1047 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps); 1049 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps);
1048 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, 1050 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
1049 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, 1051 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
1050 unsigned int *rss_size); 1052 unsigned int *rss_size);
1051 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, 1053 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
1052 int mtu, int promisc, int all_multi, int bcast, int vlanex, 1054 int mtu, int promisc, int all_multi, int bcast, int vlanex,
1053 bool sleep_ok); 1055 bool sleep_ok);
1054 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, 1056 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
1055 unsigned int viid, bool free, unsigned int naddr, 1057 unsigned int viid, bool free, unsigned int naddr,
1056 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok); 1058 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok);
1057 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, 1059 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
1058 int idx, const u8 *addr, bool persist, bool add_smt); 1060 int idx, const u8 *addr, bool persist, bool add_smt);
1059 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, 1061 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
1060 bool ucast, u64 vec, bool sleep_ok); 1062 bool ucast, u64 vec, bool sleep_ok);
1061 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox, 1063 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
1062 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en); 1064 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en);
1063 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, 1065 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
1064 bool rx_en, bool tx_en); 1066 bool rx_en, bool tx_en);
1065 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, 1067 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
1066 unsigned int nblinks); 1068 unsigned int nblinks);
1067 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 1069 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
1068 unsigned int mmd, unsigned int reg, u16 *valp); 1070 unsigned int mmd, unsigned int reg, u16 *valp);
1069 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 1071 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
1070 unsigned int mmd, unsigned int reg, u16 val); 1072 unsigned int mmd, unsigned int reg, u16 val);
1071 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 1073 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
1072 unsigned int vf, unsigned int iqtype, unsigned int iqid, 1074 unsigned int vf, unsigned int iqtype, unsigned int iqid,
1073 unsigned int fl0id, unsigned int fl1id); 1075 unsigned int fl0id, unsigned int fl1id);
1074 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 1076 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
1075 unsigned int vf, unsigned int eqid); 1077 unsigned int vf, unsigned int eqid);
1076 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 1078 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
1077 unsigned int vf, unsigned int eqid); 1079 unsigned int vf, unsigned int eqid);
1078 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 1080 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
1079 unsigned int vf, unsigned int eqid); 1081 unsigned int vf, unsigned int eqid);
1080 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl); 1082 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
1081 void t4_db_full(struct adapter *adapter); 1083 void t4_db_full(struct adapter *adapter);
1082 void t4_db_dropped(struct adapter *adapter); 1084 void t4_db_dropped(struct adapter *adapter);
1083 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, 1085 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
1084 u32 addr, u32 val); 1086 u32 addr, u32 val);
1085 void t4_sge_decode_idma_state(struct adapter *adapter, int state); 1087 void t4_sge_decode_idma_state(struct adapter *adapter, int state);
1086 #endif /* __CXGB4_H__ */ 1088 #endif /* __CXGB4_H__ */
1087 1089
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
1 /* 1 /*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux. 2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 * 3 *
4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. 4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
5 * 5 *
6 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU 7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file 8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the 9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below: 10 * OpenIB.org BSD license below:
11 * 11 *
12 * Redistribution and use in source and binary forms, with or 12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following 13 * without modification, are permitted provided that the following
14 * conditions are met: 14 * conditions are met:
15 * 15 *
16 * - Redistributions of source code must retain the above 16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following 17 * copyright notice, this list of conditions and the following
18 * disclaimer. 18 * disclaimer.
19 * 19 *
20 * - Redistributions in binary form must reproduce the above 20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following 21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials 22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution. 23 * provided with the distribution.
24 * 24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE. 32 * SOFTWARE.
33 */ 33 */
34 34
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36 36
37 #include <linux/bitmap.h> 37 #include <linux/bitmap.h>
38 #include <linux/crc32.h> 38 #include <linux/crc32.h>
39 #include <linux/ctype.h> 39 #include <linux/ctype.h>
40 #include <linux/debugfs.h> 40 #include <linux/debugfs.h>
41 #include <linux/err.h> 41 #include <linux/err.h>
42 #include <linux/etherdevice.h> 42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h> 43 #include <linux/firmware.h>
44 #include <linux/if.h> 44 #include <linux/if.h>
45 #include <linux/if_vlan.h> 45 #include <linux/if_vlan.h>
46 #include <linux/init.h> 46 #include <linux/init.h>
47 #include <linux/log2.h> 47 #include <linux/log2.h>
48 #include <linux/mdio.h> 48 #include <linux/mdio.h>
49 #include <linux/module.h> 49 #include <linux/module.h>
50 #include <linux/moduleparam.h> 50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h> 51 #include <linux/mutex.h>
52 #include <linux/netdevice.h> 52 #include <linux/netdevice.h>
53 #include <linux/pci.h> 53 #include <linux/pci.h>
54 #include <linux/aer.h> 54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h> 55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h> 56 #include <linux/sched.h>
57 #include <linux/seq_file.h> 57 #include <linux/seq_file.h>
58 #include <linux/sockios.h> 58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h> 59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h> 60 #include <linux/workqueue.h>
61 #include <net/neighbour.h> 61 #include <net/neighbour.h>
62 #include <net/netevent.h> 62 #include <net/netevent.h>
63 #include <net/addrconf.h> 63 #include <net/addrconf.h>
64 #include <asm/uaccess.h> 64 #include <asm/uaccess.h>
65 65
66 #include "cxgb4.h" 66 #include "cxgb4.h"
67 #include "t4_regs.h" 67 #include "t4_regs.h"
68 #include "t4_msg.h" 68 #include "t4_msg.h"
69 #include "t4fw_api.h" 69 #include "t4fw_api.h"
70 #include "cxgb4_dcb.h" 70 #include "cxgb4_dcb.h"
71 #include "l2t.h" 71 #include "l2t.h"
72 72
73 #include <../drivers/net/bonding/bonding.h> 73 #include <../drivers/net/bonding/bonding.h>
74 74
75 #ifdef DRV_VERSION 75 #ifdef DRV_VERSION
76 #undef DRV_VERSION 76 #undef DRV_VERSION
77 #endif 77 #endif
78 #define DRV_VERSION "2.0.0-ko" 78 #define DRV_VERSION "2.0.0-ko"
79 #define DRV_DESC "Chelsio T4/T5 Network Driver" 79 #define DRV_DESC "Chelsio T4/T5 Network Driver"
80 80
81 /* 81 /*
82 * Max interrupt hold-off timer value in us. Queues fall back to this value 82 * Max interrupt hold-off timer value in us. Queues fall back to this value
83 * under extreme memory pressure so it's largish to give the system time to 83 * under extreme memory pressure so it's largish to give the system time to
84 * recover. 84 * recover.
85 */ 85 */
86 #define MAX_SGE_TIMERVAL 200U 86 #define MAX_SGE_TIMERVAL 200U
87 87
88 enum { 88 enum {
89 /* 89 /*
90 * Physical Function provisioning constants. 90 * Physical Function provisioning constants.
91 */ 91 */
92 PFRES_NVI = 4, /* # of Virtual Interfaces */ 92 PFRES_NVI = 4, /* # of Virtual Interfaces */
93 PFRES_NETHCTRL = 128, /* # of EQs used for ETH or CTRL Qs */ 93 PFRES_NETHCTRL = 128, /* # of EQs used for ETH or CTRL Qs */
94 PFRES_NIQFLINT = 128, /* # of ingress Qs/w Free List(s)/intr 94 PFRES_NIQFLINT = 128, /* # of ingress Qs/w Free List(s)/intr
95 */ 95 */
96 PFRES_NEQ = 256, /* # of egress queues */ 96 PFRES_NEQ = 256, /* # of egress queues */
97 PFRES_NIQ = 0, /* # of ingress queues */ 97 PFRES_NIQ = 0, /* # of ingress queues */
98 PFRES_TC = 0, /* PCI-E traffic class */ 98 PFRES_TC = 0, /* PCI-E traffic class */
99 PFRES_NEXACTF = 128, /* # of exact MPS filters */ 99 PFRES_NEXACTF = 128, /* # of exact MPS filters */
100 100
101 PFRES_R_CAPS = FW_CMD_CAP_PF, 101 PFRES_R_CAPS = FW_CMD_CAP_PF,
102 PFRES_WX_CAPS = FW_CMD_CAP_PF, 102 PFRES_WX_CAPS = FW_CMD_CAP_PF,
103 103
104 #ifdef CONFIG_PCI_IOV 104 #ifdef CONFIG_PCI_IOV
105 /* 105 /*
106 * Virtual Function provisioning constants. We need two extra Ingress 106 * Virtual Function provisioning constants. We need two extra Ingress
107 * Queues with Interrupt capability to serve as the VF's Firmware 107 * Queues with Interrupt capability to serve as the VF's Firmware
108 * Event Queue and Forwarded Interrupt Queue (when using MSI mode) -- 108 * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
109 * neither will have Free Lists associated with them). For each 109 * neither will have Free Lists associated with them). For each
110 * Ethernet/Control Egress Queue and for each Free List, we need an 110 * Ethernet/Control Egress Queue and for each Free List, we need an
111 * Egress Context. 111 * Egress Context.
112 */ 112 */
113 VFRES_NPORTS = 1, /* # of "ports" per VF */ 113 VFRES_NPORTS = 1, /* # of "ports" per VF */
114 VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */ 114 VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
115 115
116 VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */ 116 VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
117 VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */ 117 VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
118 VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */ 118 VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
119 VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */ 119 VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
120 VFRES_NIQ = 0, /* # of non-fl/int ingress queues */ 120 VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
121 VFRES_TC = 0, /* PCI-E traffic class */ 121 VFRES_TC = 0, /* PCI-E traffic class */
122 VFRES_NEXACTF = 16, /* # of exact MPS filters */ 122 VFRES_NEXACTF = 16, /* # of exact MPS filters */
123 123
124 VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT, 124 VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
125 VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF, 125 VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
126 #endif 126 #endif
127 }; 127 };
128 128
129 /* 129 /*
130 * Provide a Port Access Rights Mask for the specified PF/VF. This is very 130 * Provide a Port Access Rights Mask for the specified PF/VF. This is very
131 * static and likely not to be useful in the long run. We really need to 131 * static and likely not to be useful in the long run. We really need to
132 * implement some form of persistent configuration which the firmware 132 * implement some form of persistent configuration which the firmware
133 * controls. 133 * controls.
134 */ 134 */
135 static unsigned int pfvfres_pmask(struct adapter *adapter, 135 static unsigned int pfvfres_pmask(struct adapter *adapter,
136 unsigned int pf, unsigned int vf) 136 unsigned int pf, unsigned int vf)
137 { 137 {
138 unsigned int portn, portvec; 138 unsigned int portn, portvec;
139 139
140 /* 140 /*
141 * Give PF's access to all of the ports. 141 * Give PF's access to all of the ports.
142 */ 142 */
143 if (vf == 0) 143 if (vf == 0)
144 return FW_PFVF_CMD_PMASK_MASK; 144 return FW_PFVF_CMD_PMASK_MASK;
145 145
146 /* 146 /*
147 * For VFs, we'll assign them access to the ports based purely on the 147 * For VFs, we'll assign them access to the ports based purely on the
148 * PF. We assign active ports in order, wrapping around if there are 148 * PF. We assign active ports in order, wrapping around if there are
149 * fewer active ports than PFs: e.g. active port[pf % nports]. 149 * fewer active ports than PFs: e.g. active port[pf % nports].
150 * Unfortunately the adapter's port_info structs haven't been 150 * Unfortunately the adapter's port_info structs haven't been
151 * initialized yet so we have to compute this. 151 * initialized yet so we have to compute this.
152 */ 152 */
153 if (adapter->params.nports == 0) 153 if (adapter->params.nports == 0)
154 return 0; 154 return 0;
155 155
156 portn = pf % adapter->params.nports; 156 portn = pf % adapter->params.nports;
157 portvec = adapter->params.portvec; 157 portvec = adapter->params.portvec;
158 for (;;) { 158 for (;;) {
159 /* 159 /*
160 * Isolate the lowest set bit in the port vector. If we're at 160 * Isolate the lowest set bit in the port vector. If we're at
161 * the port number that we want, return that as the pmask. 161 * the port number that we want, return that as the pmask.
162 * otherwise mask that bit out of the port vector and 162 * otherwise mask that bit out of the port vector and
163 * decrement our port number ... 163 * decrement our port number ...
164 */ 164 */
165 unsigned int pmask = portvec ^ (portvec & (portvec-1)); 165 unsigned int pmask = portvec ^ (portvec & (portvec-1));
166 if (portn == 0) 166 if (portn == 0)
167 return pmask; 167 return pmask;
168 portn--; 168 portn--;
169 portvec &= ~pmask; 169 portvec &= ~pmask;
170 } 170 }
171 /*NOTREACHED*/ 171 /*NOTREACHED*/
172 } 172 }
173 173
174 enum { 174 enum {
175 MAX_TXQ_ENTRIES = 16384, 175 MAX_TXQ_ENTRIES = 16384,
176 MAX_CTRL_TXQ_ENTRIES = 1024, 176 MAX_CTRL_TXQ_ENTRIES = 1024,
177 MAX_RSPQ_ENTRIES = 16384, 177 MAX_RSPQ_ENTRIES = 16384,
178 MAX_RX_BUFFERS = 16384, 178 MAX_RX_BUFFERS = 16384,
179 MIN_TXQ_ENTRIES = 32, 179 MIN_TXQ_ENTRIES = 32,
180 MIN_CTRL_TXQ_ENTRIES = 32, 180 MIN_CTRL_TXQ_ENTRIES = 32,
181 MIN_RSPQ_ENTRIES = 128, 181 MIN_RSPQ_ENTRIES = 128,
182 MIN_FL_ENTRIES = 16 182 MIN_FL_ENTRIES = 16
183 }; 183 };
184 184
185 /* Host shadow copy of ingress filter entry. This is in host native format 185 /* Host shadow copy of ingress filter entry. This is in host native format
186 * and doesn't match the ordering or bit order, etc. of the hardware of the 186 * and doesn't match the ordering or bit order, etc. of the hardware of the
187 * firmware command. The use of bit-field structure elements is purely to 187 * firmware command. The use of bit-field structure elements is purely to
188 * remind ourselves of the field size limitations and save memory in the case 188 * remind ourselves of the field size limitations and save memory in the case
189 * where the filter table is large. 189 * where the filter table is large.
190 */ 190 */
191 struct filter_entry { 191 struct filter_entry {
192 /* Administrative fields for filter. 192 /* Administrative fields for filter.
193 */ 193 */
194 u32 valid:1; /* filter allocated and valid */ 194 u32 valid:1; /* filter allocated and valid */
195 u32 locked:1; /* filter is administratively locked */ 195 u32 locked:1; /* filter is administratively locked */
196 196
197 u32 pending:1; /* filter action is pending firmware reply */ 197 u32 pending:1; /* filter action is pending firmware reply */
198 u32 smtidx:8; /* Source MAC Table index for smac */ 198 u32 smtidx:8; /* Source MAC Table index for smac */
199 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */ 199 struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
200 200
201 /* The filter itself. Most of this is a straight copy of information 201 /* The filter itself. Most of this is a straight copy of information
202 * provided by the extended ioctl(). Some fields are translated to 202 * provided by the extended ioctl(). Some fields are translated to
203 * internal forms -- for instance the Ingress Queue ID passed in from 203 * internal forms -- for instance the Ingress Queue ID passed in from
204 * the ioctl() is translated into the Absolute Ingress Queue ID. 204 * the ioctl() is translated into the Absolute Ingress Queue ID.
205 */ 205 */
206 struct ch_filter_specification fs; 206 struct ch_filter_specification fs;
207 }; 207 };
208 208
209 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ 209 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
210 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\ 210 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
211 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) 211 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
212 212
213 #define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) } 213 #define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
214 214
215 static const struct pci_device_id cxgb4_pci_tbl[] = { 215 static const struct pci_device_id cxgb4_pci_tbl[] = {
216 CH_DEVICE(0xa000, 0), /* PE10K */ 216 CH_DEVICE(0xa000, 0), /* PE10K */
217 CH_DEVICE(0x4001, -1), 217 CH_DEVICE(0x4001, -1),
218 CH_DEVICE(0x4002, -1), 218 CH_DEVICE(0x4002, -1),
219 CH_DEVICE(0x4003, -1), 219 CH_DEVICE(0x4003, -1),
220 CH_DEVICE(0x4004, -1), 220 CH_DEVICE(0x4004, -1),
221 CH_DEVICE(0x4005, -1), 221 CH_DEVICE(0x4005, -1),
222 CH_DEVICE(0x4006, -1), 222 CH_DEVICE(0x4006, -1),
223 CH_DEVICE(0x4007, -1), 223 CH_DEVICE(0x4007, -1),
224 CH_DEVICE(0x4008, -1), 224 CH_DEVICE(0x4008, -1),
225 CH_DEVICE(0x4009, -1), 225 CH_DEVICE(0x4009, -1),
226 CH_DEVICE(0x400a, -1), 226 CH_DEVICE(0x400a, -1),
227 CH_DEVICE(0x400d, -1), 227 CH_DEVICE(0x400d, -1),
228 CH_DEVICE(0x400e, -1), 228 CH_DEVICE(0x400e, -1),
229 CH_DEVICE(0x4080, -1), 229 CH_DEVICE(0x4080, -1),
230 CH_DEVICE(0x4081, -1), 230 CH_DEVICE(0x4081, -1),
231 CH_DEVICE(0x4082, -1), 231 CH_DEVICE(0x4082, -1),
232 CH_DEVICE(0x4083, -1), 232 CH_DEVICE(0x4083, -1),
233 CH_DEVICE(0x4084, -1), 233 CH_DEVICE(0x4084, -1),
234 CH_DEVICE(0x4085, -1), 234 CH_DEVICE(0x4085, -1),
235 CH_DEVICE(0x4086, -1), 235 CH_DEVICE(0x4086, -1),
236 CH_DEVICE(0x4087, -1), 236 CH_DEVICE(0x4087, -1),
237 CH_DEVICE(0x4088, -1), 237 CH_DEVICE(0x4088, -1),
238 CH_DEVICE(0x4401, 4), 238 CH_DEVICE(0x4401, 4),
239 CH_DEVICE(0x4402, 4), 239 CH_DEVICE(0x4402, 4),
240 CH_DEVICE(0x4403, 4), 240 CH_DEVICE(0x4403, 4),
241 CH_DEVICE(0x4404, 4), 241 CH_DEVICE(0x4404, 4),
242 CH_DEVICE(0x4405, 4), 242 CH_DEVICE(0x4405, 4),
243 CH_DEVICE(0x4406, 4), 243 CH_DEVICE(0x4406, 4),
244 CH_DEVICE(0x4407, 4), 244 CH_DEVICE(0x4407, 4),
245 CH_DEVICE(0x4408, 4), 245 CH_DEVICE(0x4408, 4),
246 CH_DEVICE(0x4409, 4), 246 CH_DEVICE(0x4409, 4),
247 CH_DEVICE(0x440a, 4), 247 CH_DEVICE(0x440a, 4),
248 CH_DEVICE(0x440d, 4), 248 CH_DEVICE(0x440d, 4),
249 CH_DEVICE(0x440e, 4), 249 CH_DEVICE(0x440e, 4),
250 CH_DEVICE(0x4480, 4), 250 CH_DEVICE(0x4480, 4),
251 CH_DEVICE(0x4481, 4), 251 CH_DEVICE(0x4481, 4),
252 CH_DEVICE(0x4482, 4), 252 CH_DEVICE(0x4482, 4),
253 CH_DEVICE(0x4483, 4), 253 CH_DEVICE(0x4483, 4),
254 CH_DEVICE(0x4484, 4), 254 CH_DEVICE(0x4484, 4),
255 CH_DEVICE(0x4485, 4), 255 CH_DEVICE(0x4485, 4),
256 CH_DEVICE(0x4486, 4), 256 CH_DEVICE(0x4486, 4),
257 CH_DEVICE(0x4487, 4), 257 CH_DEVICE(0x4487, 4),
258 CH_DEVICE(0x4488, 4), 258 CH_DEVICE(0x4488, 4),
259 CH_DEVICE(0x5001, 4), 259 CH_DEVICE(0x5001, 4),
260 CH_DEVICE(0x5002, 4), 260 CH_DEVICE(0x5002, 4),
261 CH_DEVICE(0x5003, 4), 261 CH_DEVICE(0x5003, 4),
262 CH_DEVICE(0x5004, 4), 262 CH_DEVICE(0x5004, 4),
263 CH_DEVICE(0x5005, 4), 263 CH_DEVICE(0x5005, 4),
264 CH_DEVICE(0x5006, 4), 264 CH_DEVICE(0x5006, 4),
265 CH_DEVICE(0x5007, 4), 265 CH_DEVICE(0x5007, 4),
266 CH_DEVICE(0x5008, 4), 266 CH_DEVICE(0x5008, 4),
267 CH_DEVICE(0x5009, 4), 267 CH_DEVICE(0x5009, 4),
268 CH_DEVICE(0x500A, 4), 268 CH_DEVICE(0x500A, 4),
269 CH_DEVICE(0x500B, 4), 269 CH_DEVICE(0x500B, 4),
270 CH_DEVICE(0x500C, 4), 270 CH_DEVICE(0x500C, 4),
271 CH_DEVICE(0x500D, 4), 271 CH_DEVICE(0x500D, 4),
272 CH_DEVICE(0x500E, 4), 272 CH_DEVICE(0x500E, 4),
273 CH_DEVICE(0x500F, 4), 273 CH_DEVICE(0x500F, 4),
274 CH_DEVICE(0x5010, 4), 274 CH_DEVICE(0x5010, 4),
275 CH_DEVICE(0x5011, 4), 275 CH_DEVICE(0x5011, 4),
276 CH_DEVICE(0x5012, 4), 276 CH_DEVICE(0x5012, 4),
277 CH_DEVICE(0x5013, 4), 277 CH_DEVICE(0x5013, 4),
278 CH_DEVICE(0x5014, 4), 278 CH_DEVICE(0x5014, 4),
279 CH_DEVICE(0x5015, 4), 279 CH_DEVICE(0x5015, 4),
280 CH_DEVICE(0x5080, 4), 280 CH_DEVICE(0x5080, 4),
281 CH_DEVICE(0x5081, 4), 281 CH_DEVICE(0x5081, 4),
282 CH_DEVICE(0x5082, 4), 282 CH_DEVICE(0x5082, 4),
283 CH_DEVICE(0x5083, 4), 283 CH_DEVICE(0x5083, 4),
284 CH_DEVICE(0x5084, 4), 284 CH_DEVICE(0x5084, 4),
285 CH_DEVICE(0x5085, 4), 285 CH_DEVICE(0x5085, 4),
286 CH_DEVICE(0x5086, 4), 286 CH_DEVICE(0x5086, 4),
287 CH_DEVICE(0x5087, 4), 287 CH_DEVICE(0x5087, 4),
288 CH_DEVICE(0x5088, 4), 288 CH_DEVICE(0x5088, 4),
289 CH_DEVICE(0x5401, 4), 289 CH_DEVICE(0x5401, 4),
290 CH_DEVICE(0x5402, 4), 290 CH_DEVICE(0x5402, 4),
291 CH_DEVICE(0x5403, 4), 291 CH_DEVICE(0x5403, 4),
292 CH_DEVICE(0x5404, 4), 292 CH_DEVICE(0x5404, 4),
293 CH_DEVICE(0x5405, 4), 293 CH_DEVICE(0x5405, 4),
294 CH_DEVICE(0x5406, 4), 294 CH_DEVICE(0x5406, 4),
295 CH_DEVICE(0x5407, 4), 295 CH_DEVICE(0x5407, 4),
296 CH_DEVICE(0x5408, 4), 296 CH_DEVICE(0x5408, 4),
297 CH_DEVICE(0x5409, 4), 297 CH_DEVICE(0x5409, 4),
298 CH_DEVICE(0x540A, 4), 298 CH_DEVICE(0x540A, 4),
299 CH_DEVICE(0x540B, 4), 299 CH_DEVICE(0x540B, 4),
300 CH_DEVICE(0x540C, 4), 300 CH_DEVICE(0x540C, 4),
301 CH_DEVICE(0x540D, 4), 301 CH_DEVICE(0x540D, 4),
302 CH_DEVICE(0x540E, 4), 302 CH_DEVICE(0x540E, 4),
303 CH_DEVICE(0x540F, 4), 303 CH_DEVICE(0x540F, 4),
304 CH_DEVICE(0x5410, 4), 304 CH_DEVICE(0x5410, 4),
305 CH_DEVICE(0x5411, 4), 305 CH_DEVICE(0x5411, 4),
306 CH_DEVICE(0x5412, 4), 306 CH_DEVICE(0x5412, 4),
307 CH_DEVICE(0x5413, 4), 307 CH_DEVICE(0x5413, 4),
308 CH_DEVICE(0x5414, 4), 308 CH_DEVICE(0x5414, 4),
309 CH_DEVICE(0x5415, 4), 309 CH_DEVICE(0x5415, 4),
310 CH_DEVICE(0x5480, 4), 310 CH_DEVICE(0x5480, 4),
311 CH_DEVICE(0x5481, 4), 311 CH_DEVICE(0x5481, 4),
312 CH_DEVICE(0x5482, 4), 312 CH_DEVICE(0x5482, 4),
313 CH_DEVICE(0x5483, 4), 313 CH_DEVICE(0x5483, 4),
314 CH_DEVICE(0x5484, 4), 314 CH_DEVICE(0x5484, 4),
315 CH_DEVICE(0x5485, 4), 315 CH_DEVICE(0x5485, 4),
316 CH_DEVICE(0x5486, 4), 316 CH_DEVICE(0x5486, 4),
317 CH_DEVICE(0x5487, 4), 317 CH_DEVICE(0x5487, 4),
318 CH_DEVICE(0x5488, 4), 318 CH_DEVICE(0x5488, 4),
319 { 0, } 319 { 0, }
320 }; 320 };
321 321
322 #define FW4_FNAME "cxgb4/t4fw.bin" 322 #define FW4_FNAME "cxgb4/t4fw.bin"
323 #define FW5_FNAME "cxgb4/t5fw.bin" 323 #define FW5_FNAME "cxgb4/t5fw.bin"
324 #define FW4_CFNAME "cxgb4/t4-config.txt" 324 #define FW4_CFNAME "cxgb4/t4-config.txt"
325 #define FW5_CFNAME "cxgb4/t5-config.txt" 325 #define FW5_CFNAME "cxgb4/t5-config.txt"
326 326
327 MODULE_DESCRIPTION(DRV_DESC); 327 MODULE_DESCRIPTION(DRV_DESC);
328 MODULE_AUTHOR("Chelsio Communications"); 328 MODULE_AUTHOR("Chelsio Communications");
329 MODULE_LICENSE("Dual BSD/GPL"); 329 MODULE_LICENSE("Dual BSD/GPL");
330 MODULE_VERSION(DRV_VERSION); 330 MODULE_VERSION(DRV_VERSION);
331 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl); 331 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
332 MODULE_FIRMWARE(FW4_FNAME); 332 MODULE_FIRMWARE(FW4_FNAME);
333 MODULE_FIRMWARE(FW5_FNAME); 333 MODULE_FIRMWARE(FW5_FNAME);
334 334
335 /* 335 /*
336 * Normally we're willing to become the firmware's Master PF but will be happy 336 * Normally we're willing to become the firmware's Master PF but will be happy
337 * if another PF has already become the Master and initialized the adapter. 337 * if another PF has already become the Master and initialized the adapter.
338 * Setting "force_init" will cause this driver to forcibly establish itself as 338 * Setting "force_init" will cause this driver to forcibly establish itself as
339 * the Master PF and initialize the adapter. 339 * the Master PF and initialize the adapter.
340 */ 340 */
341 static uint force_init; 341 static uint force_init;
342 342
343 module_param(force_init, uint, 0644); 343 module_param(force_init, uint, 0644);
344 MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter"); 344 MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
345 345
346 /* 346 /*
347 * Normally if the firmware we connect to has Configuration File support, we 347 * Normally if the firmware we connect to has Configuration File support, we
348 * use that and only fall back to the old Driver-based initialization if the 348 * use that and only fall back to the old Driver-based initialization if the
349 * Configuration File fails for some reason. If force_old_init is set, then 349 * Configuration File fails for some reason. If force_old_init is set, then
350 * we'll always use the old Driver-based initialization sequence. 350 * we'll always use the old Driver-based initialization sequence.
351 */ 351 */
352 static uint force_old_init; 352 static uint force_old_init;
353 353
354 module_param(force_old_init, uint, 0644); 354 module_param(force_old_init, uint, 0644);
355 MODULE_PARM_DESC(force_old_init, "Force old initialization sequence"); 355 MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
356 356
357 static int dflt_msg_enable = DFLT_MSG_ENABLE; 357 static int dflt_msg_enable = DFLT_MSG_ENABLE;
358 358
359 module_param(dflt_msg_enable, int, 0644); 359 module_param(dflt_msg_enable, int, 0644);
360 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap"); 360 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
361 361
362 /* 362 /*
363 * The driver uses the best interrupt scheme available on a platform in the 363 * The driver uses the best interrupt scheme available on a platform in the
364 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which 364 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
365 * of these schemes the driver may consider as follows: 365 * of these schemes the driver may consider as follows:
366 * 366 *
367 * msi = 2: choose from among all three options 367 * msi = 2: choose from among all three options
368 * msi = 1: only consider MSI and INTx interrupts 368 * msi = 1: only consider MSI and INTx interrupts
369 * msi = 0: force INTx interrupts 369 * msi = 0: force INTx interrupts
370 */ 370 */
371 static int msi = 2; 371 static int msi = 2;
372 372
373 module_param(msi, int, 0644); 373 module_param(msi, int, 0644);
374 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)"); 374 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
375 375
376 /* 376 /*
377 * Queue interrupt hold-off timer values. Queues default to the first of these 377 * Queue interrupt hold-off timer values. Queues default to the first of these
378 * upon creation. 378 * upon creation.
379 */ 379 */
380 static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 }; 380 static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
381 381
382 module_param_array(intr_holdoff, uint, NULL, 0644); 382 module_param_array(intr_holdoff, uint, NULL, 0644);
383 MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers " 383 MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
384 "0..4 in microseconds"); 384 "0..4 in microseconds");
385 385
386 static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 }; 386 static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
387 387
388 module_param_array(intr_cnt, uint, NULL, 0644); 388 module_param_array(intr_cnt, uint, NULL, 0644);
389 MODULE_PARM_DESC(intr_cnt, 389 MODULE_PARM_DESC(intr_cnt,
390 "thresholds 1..3 for queue interrupt packet counters"); 390 "thresholds 1..3 for queue interrupt packet counters");
391 391
392 /* 392 /*
393 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers 393 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
394 * offset by 2 bytes in order to have the IP headers line up on 4-byte 394 * offset by 2 bytes in order to have the IP headers line up on 4-byte
395 * boundaries. This is a requirement for many architectures which will throw 395 * boundaries. This is a requirement for many architectures which will throw
396 * a machine check fault if an attempt is made to access one of the 4-byte IP 396 * a machine check fault if an attempt is made to access one of the 4-byte IP
397 * header fields on a non-4-byte boundary. And it's a major performance issue 397 * header fields on a non-4-byte boundary. And it's a major performance issue
398 * even on some architectures which allow it like some implementations of the 398 * even on some architectures which allow it like some implementations of the
399 * x86 ISA. However, some architectures don't mind this and for some very 399 * x86 ISA. However, some architectures don't mind this and for some very
400 * edge-case performance sensitive applications (like forwarding large volumes 400 * edge-case performance sensitive applications (like forwarding large volumes
401 * of small packets), setting this DMA offset to 0 will decrease the number of 401 * of small packets), setting this DMA offset to 0 will decrease the number of
402 * PCI-E Bus transfers enough to measurably affect performance. 402 * PCI-E Bus transfers enough to measurably affect performance.
403 */ 403 */
404 static int rx_dma_offset = 2; 404 static int rx_dma_offset = 2;
405 405
406 static bool vf_acls; 406 static bool vf_acls;
407 407
408 #ifdef CONFIG_PCI_IOV 408 #ifdef CONFIG_PCI_IOV
409 module_param(vf_acls, bool, 0644); 409 module_param(vf_acls, bool, 0644);
410 MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement"); 410 MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
411 411
412 /* Configure the number of PCI-E Virtual Function which are to be instantiated 412 /* Configure the number of PCI-E Virtual Function which are to be instantiated
413 * on SR-IOV Capable Physical Functions. 413 * on SR-IOV Capable Physical Functions.
414 */ 414 */
415 static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV]; 415 static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
416 416
417 module_param_array(num_vf, uint, NULL, 0644); 417 module_param_array(num_vf, uint, NULL, 0644);
418 MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3"); 418 MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
419 #endif 419 #endif
420 420
421 /* TX Queue select used to determine what algorithm to use for selecting TX 421 /* TX Queue select used to determine what algorithm to use for selecting TX
422 * queue. Select between the kernel provided function (select_queue=0) or user 422 * queue. Select between the kernel provided function (select_queue=0) or user
423 * cxgb_select_queue function (select_queue=1) 423 * cxgb_select_queue function (select_queue=1)
424 * 424 *
425 * Default: select_queue=0 425 * Default: select_queue=0
426 */ 426 */
427 static int select_queue; 427 static int select_queue;
428 module_param(select_queue, int, 0644); 428 module_param(select_queue, int, 0644);
429 MODULE_PARM_DESC(select_queue, 429 MODULE_PARM_DESC(select_queue,
430 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method."); 430 "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
431 431
432 /* 432 /*
433 * The filter TCAM has a fixed portion and a variable portion. The fixed 433 * The filter TCAM has a fixed portion and a variable portion. The fixed
434 * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP 434 * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
435 * ports. The variable portion is 36 bits which can include things like Exact 435 * ports. The variable portion is 36 bits which can include things like Exact
436 * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits), 436 * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
437 * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would 437 * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
438 * far exceed the 36-bit budget for this "compressed" header portion of the 438 * far exceed the 36-bit budget for this "compressed" header portion of the
439 * filter. Thus, we have a scarce resource which must be carefully managed. 439 * filter. Thus, we have a scarce resource which must be carefully managed.
440 * 440 *
441 * By default we set this up to mostly match the set of filter matching 441 * By default we set this up to mostly match the set of filter matching
442 * capabilities of T3 but with accommodations for some of T4's more 442 * capabilities of T3 but with accommodations for some of T4's more
443 * interesting features: 443 * interesting features:
444 * 444 *
445 * { IP Fragment (1), MPS Match Type (3), IP Protocol (8), 445 * { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
446 * [Inner] VLAN (17), Port (3), FCoE (1) } 446 * [Inner] VLAN (17), Port (3), FCoE (1) }
447 */ 447 */
448 enum { 448 enum {
449 TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC, 449 TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
450 TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT, 450 TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
451 TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT, 451 TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
452 }; 452 };
453 453
454 static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT; 454 static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
455 455
456 module_param(tp_vlan_pri_map, uint, 0644); 456 module_param(tp_vlan_pri_map, uint, 0644);
457 MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration"); 457 MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration");
458 458
459 static struct dentry *cxgb4_debugfs_root; 459 static struct dentry *cxgb4_debugfs_root;
460 460
461 static LIST_HEAD(adapter_list); 461 static LIST_HEAD(adapter_list);
462 static DEFINE_MUTEX(uld_mutex); 462 static DEFINE_MUTEX(uld_mutex);
463 /* Adapter list to be accessed from atomic context */ 463 /* Adapter list to be accessed from atomic context */
464 static LIST_HEAD(adap_rcu_list); 464 static LIST_HEAD(adap_rcu_list);
465 static DEFINE_SPINLOCK(adap_rcu_lock); 465 static DEFINE_SPINLOCK(adap_rcu_lock);
466 static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX]; 466 static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
467 static const char *uld_str[] = { "RDMA", "iSCSI" }; 467 static const char *uld_str[] = { "RDMA", "iSCSI" };
468 468
469 static void link_report(struct net_device *dev) 469 static void link_report(struct net_device *dev)
470 { 470 {
471 if (!netif_carrier_ok(dev)) 471 if (!netif_carrier_ok(dev))
472 netdev_info(dev, "link down\n"); 472 netdev_info(dev, "link down\n");
473 else { 473 else {
474 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" }; 474 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
475 475
476 const char *s = "10Mbps"; 476 const char *s = "10Mbps";
477 const struct port_info *p = netdev_priv(dev); 477 const struct port_info *p = netdev_priv(dev);
478 478
479 switch (p->link_cfg.speed) { 479 switch (p->link_cfg.speed) {
480 case 10000: 480 case 10000:
481 s = "10Gbps"; 481 s = "10Gbps";
482 break; 482 break;
483 case 1000: 483 case 1000:
484 s = "1000Mbps"; 484 s = "1000Mbps";
485 break; 485 break;
486 case 100: 486 case 100:
487 s = "100Mbps"; 487 s = "100Mbps";
488 break; 488 break;
489 case 40000: 489 case 40000:
490 s = "40Gbps"; 490 s = "40Gbps";
491 break; 491 break;
492 } 492 }
493 493
494 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, 494 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
495 fc[p->link_cfg.fc]); 495 fc[p->link_cfg.fc]);
496 } 496 }
497 } 497 }
498 498
499 #ifdef CONFIG_CHELSIO_T4_DCB 499 #ifdef CONFIG_CHELSIO_T4_DCB
500 /* Set up/tear down Data Center Bridging Priority mapping for a net device. */ 500 /* Set up/tear down Data Center Bridging Priority mapping for a net device. */
501 static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable) 501 static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
502 { 502 {
503 struct port_info *pi = netdev_priv(dev); 503 struct port_info *pi = netdev_priv(dev);
504 struct adapter *adap = pi->adapter; 504 struct adapter *adap = pi->adapter;
505 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset]; 505 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
506 int i; 506 int i;
507 507
508 /* We use a simple mapping of Port TX Queue Index to DCB 508 /* We use a simple mapping of Port TX Queue Index to DCB
509 * Priority when we're enabling DCB. 509 * Priority when we're enabling DCB.
510 */ 510 */
511 for (i = 0; i < pi->nqsets; i++, txq++) { 511 for (i = 0; i < pi->nqsets; i++, txq++) {
512 u32 name, value; 512 u32 name, value;
513 int err; 513 int err;
514 514
515 name = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 515 name = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
516 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) | 516 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
517 FW_PARAMS_PARAM_YZ(txq->q.cntxt_id)); 517 FW_PARAMS_PARAM_YZ(txq->q.cntxt_id));
518 value = enable ? i : 0xffffffff; 518 value = enable ? i : 0xffffffff;
519 519
520 /* Since we can be called while atomic (from "interrupt 520 /* Since we can be called while atomic (from "interrupt
521 * level") we need to issue the Set Parameters Commannd 521 * level") we need to issue the Set Parameters Commannd
522 * without sleeping (timeout < 0). 522 * without sleeping (timeout < 0).
523 */ 523 */
524 err = t4_set_params_nosleep(adap, adap->mbox, adap->fn, 0, 1, 524 err = t4_set_params_nosleep(adap, adap->mbox, adap->fn, 0, 1,
525 &name, &value); 525 &name, &value);
526 526
527 if (err) 527 if (err)
528 dev_err(adap->pdev_dev, 528 dev_err(adap->pdev_dev,
529 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n", 529 "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
530 enable ? "set" : "unset", pi->port_id, i, -err); 530 enable ? "set" : "unset", pi->port_id, i, -err);
531 else 531 else
532 txq->dcb_prio = value; 532 txq->dcb_prio = value;
533 } 533 }
534 } 534 }
535 #endif /* CONFIG_CHELSIO_T4_DCB */ 535 #endif /* CONFIG_CHELSIO_T4_DCB */
536 536
537 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat) 537 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
538 { 538 {
539 struct net_device *dev = adapter->port[port_id]; 539 struct net_device *dev = adapter->port[port_id];
540 540
541 /* Skip changes from disabled ports. */ 541 /* Skip changes from disabled ports. */
542 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) { 542 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
543 if (link_stat) 543 if (link_stat)
544 netif_carrier_on(dev); 544 netif_carrier_on(dev);
545 else { 545 else {
546 #ifdef CONFIG_CHELSIO_T4_DCB 546 #ifdef CONFIG_CHELSIO_T4_DCB
547 cxgb4_dcb_state_init(dev); 547 cxgb4_dcb_state_init(dev);
548 dcb_tx_queue_prio_enable(dev, false); 548 dcb_tx_queue_prio_enable(dev, false);
549 #endif /* CONFIG_CHELSIO_T4_DCB */ 549 #endif /* CONFIG_CHELSIO_T4_DCB */
550 netif_carrier_off(dev); 550 netif_carrier_off(dev);
551 } 551 }
552 552
553 link_report(dev); 553 link_report(dev);
554 } 554 }
555 } 555 }
556 556
557 void t4_os_portmod_changed(const struct adapter *adap, int port_id) 557 void t4_os_portmod_changed(const struct adapter *adap, int port_id)
558 { 558 {
559 static const char *mod_str[] = { 559 static const char *mod_str[] = {
560 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM" 560 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
561 }; 561 };
562 562
563 const struct net_device *dev = adap->port[port_id]; 563 const struct net_device *dev = adap->port[port_id];
564 const struct port_info *pi = netdev_priv(dev); 564 const struct port_info *pi = netdev_priv(dev);
565 565
566 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) 566 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
567 netdev_info(dev, "port module unplugged\n"); 567 netdev_info(dev, "port module unplugged\n");
568 else if (pi->mod_type < ARRAY_SIZE(mod_str)) 568 else if (pi->mod_type < ARRAY_SIZE(mod_str))
569 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]); 569 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
570 } 570 }
571 571
572 /* 572 /*
573 * Configure the exact and hash address filters to handle a port's multicast 573 * Configure the exact and hash address filters to handle a port's multicast
574 * and secondary unicast MAC addresses. 574 * and secondary unicast MAC addresses.
575 */ 575 */
576 static int set_addr_filters(const struct net_device *dev, bool sleep) 576 static int set_addr_filters(const struct net_device *dev, bool sleep)
577 { 577 {
578 u64 mhash = 0; 578 u64 mhash = 0;
579 u64 uhash = 0; 579 u64 uhash = 0;
580 bool free = true; 580 bool free = true;
581 u16 filt_idx[7]; 581 u16 filt_idx[7];
582 const u8 *addr[7]; 582 const u8 *addr[7];
583 int ret, naddr = 0; 583 int ret, naddr = 0;
584 const struct netdev_hw_addr *ha; 584 const struct netdev_hw_addr *ha;
585 int uc_cnt = netdev_uc_count(dev); 585 int uc_cnt = netdev_uc_count(dev);
586 int mc_cnt = netdev_mc_count(dev); 586 int mc_cnt = netdev_mc_count(dev);
587 const struct port_info *pi = netdev_priv(dev); 587 const struct port_info *pi = netdev_priv(dev);
588 unsigned int mb = pi->adapter->fn; 588 unsigned int mb = pi->adapter->fn;
589 589
590 /* first do the secondary unicast addresses */ 590 /* first do the secondary unicast addresses */
591 netdev_for_each_uc_addr(ha, dev) { 591 netdev_for_each_uc_addr(ha, dev) {
592 addr[naddr++] = ha->addr; 592 addr[naddr++] = ha->addr;
593 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) { 593 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
594 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free, 594 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
595 naddr, addr, filt_idx, &uhash, sleep); 595 naddr, addr, filt_idx, &uhash, sleep);
596 if (ret < 0) 596 if (ret < 0)
597 return ret; 597 return ret;
598 598
599 free = false; 599 free = false;
600 naddr = 0; 600 naddr = 0;
601 } 601 }
602 } 602 }
603 603
604 /* next set up the multicast addresses */ 604 /* next set up the multicast addresses */
605 netdev_for_each_mc_addr(ha, dev) { 605 netdev_for_each_mc_addr(ha, dev) {
606 addr[naddr++] = ha->addr; 606 addr[naddr++] = ha->addr;
607 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) { 607 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
608 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free, 608 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
609 naddr, addr, filt_idx, &mhash, sleep); 609 naddr, addr, filt_idx, &mhash, sleep);
610 if (ret < 0) 610 if (ret < 0)
611 return ret; 611 return ret;
612 612
613 free = false; 613 free = false;
614 naddr = 0; 614 naddr = 0;
615 } 615 }
616 } 616 }
617 617
618 return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0, 618 return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
619 uhash | mhash, sleep); 619 uhash | mhash, sleep);
620 } 620 }
621 621
622 int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */ 622 int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
623 module_param(dbfifo_int_thresh, int, 0644); 623 module_param(dbfifo_int_thresh, int, 0644);
624 MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold"); 624 MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
625 625
626 /* 626 /*
627 * usecs to sleep while draining the dbfifo 627 * usecs to sleep while draining the dbfifo
628 */ 628 */
629 static int dbfifo_drain_delay = 1000; 629 static int dbfifo_drain_delay = 1000;
630 module_param(dbfifo_drain_delay, int, 0644); 630 module_param(dbfifo_drain_delay, int, 0644);
631 MODULE_PARM_DESC(dbfifo_drain_delay, 631 MODULE_PARM_DESC(dbfifo_drain_delay,
632 "usecs to sleep while draining the dbfifo"); 632 "usecs to sleep while draining the dbfifo");
633 633
634 /* 634 /*
635 * Set Rx properties of a port, such as promiscruity, address filters, and MTU. 635 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
636 * If @mtu is -1 it is left unchanged. 636 * If @mtu is -1 it is left unchanged.
637 */ 637 */
638 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok) 638 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
639 { 639 {
640 int ret; 640 int ret;
641 struct port_info *pi = netdev_priv(dev); 641 struct port_info *pi = netdev_priv(dev);
642 642
643 ret = set_addr_filters(dev, sleep_ok); 643 ret = set_addr_filters(dev, sleep_ok);
644 if (ret == 0) 644 if (ret == 0)
645 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu, 645 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
646 (dev->flags & IFF_PROMISC) ? 1 : 0, 646 (dev->flags & IFF_PROMISC) ? 1 : 0,
647 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1, 647 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
648 sleep_ok); 648 sleep_ok);
649 return ret; 649 return ret;
650 } 650 }
651 651
652 /** 652 /**
653 * link_start - enable a port 653 * link_start - enable a port
654 * @dev: the port to enable 654 * @dev: the port to enable
655 * 655 *
656 * Performs the MAC and PHY actions needed to enable a port. 656 * Performs the MAC and PHY actions needed to enable a port.
657 */ 657 */
658 static int link_start(struct net_device *dev) 658 static int link_start(struct net_device *dev)
659 { 659 {
660 int ret; 660 int ret;
661 struct port_info *pi = netdev_priv(dev); 661 struct port_info *pi = netdev_priv(dev);
662 unsigned int mb = pi->adapter->fn; 662 unsigned int mb = pi->adapter->fn;
663 663
664 /* 664 /*
665 * We do not set address filters and promiscuity here, the stack does 665 * We do not set address filters and promiscuity here, the stack does
666 * that step explicitly. 666 * that step explicitly.
667 */ 667 */
668 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1, 668 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
669 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true); 669 !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
670 if (ret == 0) { 670 if (ret == 0) {
671 ret = t4_change_mac(pi->adapter, mb, pi->viid, 671 ret = t4_change_mac(pi->adapter, mb, pi->viid,
672 pi->xact_addr_filt, dev->dev_addr, true, 672 pi->xact_addr_filt, dev->dev_addr, true,
673 true); 673 true);
674 if (ret >= 0) { 674 if (ret >= 0) {
675 pi->xact_addr_filt = ret; 675 pi->xact_addr_filt = ret;
676 ret = 0; 676 ret = 0;
677 } 677 }
678 } 678 }
679 if (ret == 0) 679 if (ret == 0)
680 ret = t4_link_start(pi->adapter, mb, pi->tx_chan, 680 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
681 &pi->link_cfg); 681 &pi->link_cfg);
682 if (ret == 0) { 682 if (ret == 0) {
683 local_bh_disable(); 683 local_bh_disable();
684 ret = t4_enable_vi_params(pi->adapter, mb, pi->viid, true, 684 ret = t4_enable_vi_params(pi->adapter, mb, pi->viid, true,
685 true, CXGB4_DCB_ENABLED); 685 true, CXGB4_DCB_ENABLED);
686 local_bh_enable(); 686 local_bh_enable();
687 } 687 }
688 688
689 return ret; 689 return ret;
690 } 690 }
691 691
692 int cxgb4_dcb_enabled(const struct net_device *dev) 692 int cxgb4_dcb_enabled(const struct net_device *dev)
693 { 693 {
694 #ifdef CONFIG_CHELSIO_T4_DCB 694 #ifdef CONFIG_CHELSIO_T4_DCB
695 struct port_info *pi = netdev_priv(dev); 695 struct port_info *pi = netdev_priv(dev);
696 696
697 return pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED; 697 return pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED;
698 #else 698 #else
699 return 0; 699 return 0;
700 #endif 700 #endif
701 } 701 }
702 EXPORT_SYMBOL(cxgb4_dcb_enabled); 702 EXPORT_SYMBOL(cxgb4_dcb_enabled);
703 703
704 #ifdef CONFIG_CHELSIO_T4_DCB 704 #ifdef CONFIG_CHELSIO_T4_DCB
705 /* Handle a Data Center Bridging update message from the firmware. */ 705 /* Handle a Data Center Bridging update message from the firmware. */
706 static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd) 706 static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
707 { 707 {
708 int port = FW_PORT_CMD_PORTID_GET(ntohl(pcmd->op_to_portid)); 708 int port = FW_PORT_CMD_PORTID_GET(ntohl(pcmd->op_to_portid));
709 struct net_device *dev = adap->port[port]; 709 struct net_device *dev = adap->port[port];
710 int old_dcb_enabled = cxgb4_dcb_enabled(dev); 710 int old_dcb_enabled = cxgb4_dcb_enabled(dev);
711 int new_dcb_enabled; 711 int new_dcb_enabled;
712 712
713 cxgb4_dcb_handle_fw_update(adap, pcmd); 713 cxgb4_dcb_handle_fw_update(adap, pcmd);
714 new_dcb_enabled = cxgb4_dcb_enabled(dev); 714 new_dcb_enabled = cxgb4_dcb_enabled(dev);
715 715
716 /* If the DCB has become enabled or disabled on the port then we're 716 /* If the DCB has become enabled or disabled on the port then we're
717 * going to need to set up/tear down DCB Priority parameters for the 717 * going to need to set up/tear down DCB Priority parameters for the
718 * TX Queues associated with the port. 718 * TX Queues associated with the port.
719 */ 719 */
720 if (new_dcb_enabled != old_dcb_enabled) 720 if (new_dcb_enabled != old_dcb_enabled)
721 dcb_tx_queue_prio_enable(dev, new_dcb_enabled); 721 dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
722 } 722 }
723 #endif /* CONFIG_CHELSIO_T4_DCB */ 723 #endif /* CONFIG_CHELSIO_T4_DCB */
724 724
725 /* Clear a filter and release any of its resources that we own. This also 725 /* Clear a filter and release any of its resources that we own. This also
726 * clears the filter's "pending" status. 726 * clears the filter's "pending" status.
727 */ 727 */
728 static void clear_filter(struct adapter *adap, struct filter_entry *f) 728 static void clear_filter(struct adapter *adap, struct filter_entry *f)
729 { 729 {
730 /* If the new or old filter have loopback rewriteing rules then we'll 730 /* If the new or old filter have loopback rewriteing rules then we'll
731 * need to free any existing Layer Two Table (L2T) entries of the old 731 * need to free any existing Layer Two Table (L2T) entries of the old
732 * filter rule. The firmware will handle freeing up any Source MAC 732 * filter rule. The firmware will handle freeing up any Source MAC
733 * Table (SMT) entries used for rewriting Source MAC Addresses in 733 * Table (SMT) entries used for rewriting Source MAC Addresses in
734 * loopback rules. 734 * loopback rules.
735 */ 735 */
736 if (f->l2t) 736 if (f->l2t)
737 cxgb4_l2t_release(f->l2t); 737 cxgb4_l2t_release(f->l2t);
738 738
739 /* The zeroing of the filter rule below clears the filter valid, 739 /* The zeroing of the filter rule below clears the filter valid,
740 * pending, locked flags, l2t pointer, etc. so it's all we need for 740 * pending, locked flags, l2t pointer, etc. so it's all we need for
741 * this operation. 741 * this operation.
742 */ 742 */
743 memset(f, 0, sizeof(*f)); 743 memset(f, 0, sizeof(*f));
744 } 744 }
745 745
746 /* Handle a filter write/deletion reply. 746 /* Handle a filter write/deletion reply.
747 */ 747 */
748 static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl) 748 static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
749 { 749 {
750 unsigned int idx = GET_TID(rpl); 750 unsigned int idx = GET_TID(rpl);
751 unsigned int nidx = idx - adap->tids.ftid_base; 751 unsigned int nidx = idx - adap->tids.ftid_base;
752 unsigned int ret; 752 unsigned int ret;
753 struct filter_entry *f; 753 struct filter_entry *f;
754 754
755 if (idx >= adap->tids.ftid_base && nidx < 755 if (idx >= adap->tids.ftid_base && nidx <
756 (adap->tids.nftids + adap->tids.nsftids)) { 756 (adap->tids.nftids + adap->tids.nsftids)) {
757 idx = nidx; 757 idx = nidx;
758 ret = GET_TCB_COOKIE(rpl->cookie); 758 ret = GET_TCB_COOKIE(rpl->cookie);
759 f = &adap->tids.ftid_tab[idx]; 759 f = &adap->tids.ftid_tab[idx];
760 760
761 if (ret == FW_FILTER_WR_FLT_DELETED) { 761 if (ret == FW_FILTER_WR_FLT_DELETED) {
762 /* Clear the filter when we get confirmation from the 762 /* Clear the filter when we get confirmation from the
763 * hardware that the filter has been deleted. 763 * hardware that the filter has been deleted.
764 */ 764 */
765 clear_filter(adap, f); 765 clear_filter(adap, f);
766 } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) { 766 } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
767 dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n", 767 dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
768 idx); 768 idx);
769 clear_filter(adap, f); 769 clear_filter(adap, f);
770 } else if (ret == FW_FILTER_WR_FLT_ADDED) { 770 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
771 f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff; 771 f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
772 f->pending = 0; /* asynchronous setup completed */ 772 f->pending = 0; /* asynchronous setup completed */
773 f->valid = 1; 773 f->valid = 1;
774 } else { 774 } else {
775 /* Something went wrong. Issue a warning about the 775 /* Something went wrong. Issue a warning about the
776 * problem and clear everything out. 776 * problem and clear everything out.
777 */ 777 */
778 dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n", 778 dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
779 idx, ret); 779 idx, ret);
780 clear_filter(adap, f); 780 clear_filter(adap, f);
781 } 781 }
782 } 782 }
783 } 783 }
784 784
785 /* Response queue handler for the FW event queue. 785 /* Response queue handler for the FW event queue.
786 */ 786 */
787 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp, 787 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
788 const struct pkt_gl *gl) 788 const struct pkt_gl *gl)
789 { 789 {
790 u8 opcode = ((const struct rss_header *)rsp)->opcode; 790 u8 opcode = ((const struct rss_header *)rsp)->opcode;
791 791
792 rsp++; /* skip RSS header */ 792 rsp++; /* skip RSS header */
793 793
794 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG. 794 /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
795 */ 795 */
796 if (unlikely(opcode == CPL_FW4_MSG && 796 if (unlikely(opcode == CPL_FW4_MSG &&
797 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) { 797 ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
798 rsp++; 798 rsp++;
799 opcode = ((const struct rss_header *)rsp)->opcode; 799 opcode = ((const struct rss_header *)rsp)->opcode;
800 rsp++; 800 rsp++;
801 if (opcode != CPL_SGE_EGR_UPDATE) { 801 if (opcode != CPL_SGE_EGR_UPDATE) {
802 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n" 802 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
803 , opcode); 803 , opcode);
804 goto out; 804 goto out;
805 } 805 }
806 } 806 }
807 807
808 if (likely(opcode == CPL_SGE_EGR_UPDATE)) { 808 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
809 const struct cpl_sge_egr_update *p = (void *)rsp; 809 const struct cpl_sge_egr_update *p = (void *)rsp;
810 unsigned int qid = EGR_QID(ntohl(p->opcode_qid)); 810 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
811 struct sge_txq *txq; 811 struct sge_txq *txq;
812 812
813 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start]; 813 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
814 txq->restarts++; 814 txq->restarts++;
815 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) { 815 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
816 struct sge_eth_txq *eq; 816 struct sge_eth_txq *eq;
817 817
818 eq = container_of(txq, struct sge_eth_txq, q); 818 eq = container_of(txq, struct sge_eth_txq, q);
819 netif_tx_wake_queue(eq->txq); 819 netif_tx_wake_queue(eq->txq);
820 } else { 820 } else {
821 struct sge_ofld_txq *oq; 821 struct sge_ofld_txq *oq;
822 822
823 oq = container_of(txq, struct sge_ofld_txq, q); 823 oq = container_of(txq, struct sge_ofld_txq, q);
824 tasklet_schedule(&oq->qresume_tsk); 824 tasklet_schedule(&oq->qresume_tsk);
825 } 825 }
826 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) { 826 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
827 const struct cpl_fw6_msg *p = (void *)rsp; 827 const struct cpl_fw6_msg *p = (void *)rsp;
828 828
829 #ifdef CONFIG_CHELSIO_T4_DCB 829 #ifdef CONFIG_CHELSIO_T4_DCB
830 const struct fw_port_cmd *pcmd = (const void *)p->data; 830 const struct fw_port_cmd *pcmd = (const void *)p->data;
831 unsigned int cmd = FW_CMD_OP_GET(ntohl(pcmd->op_to_portid)); 831 unsigned int cmd = FW_CMD_OP_GET(ntohl(pcmd->op_to_portid));
832 unsigned int action = 832 unsigned int action =
833 FW_PORT_CMD_ACTION_GET(ntohl(pcmd->action_to_len16)); 833 FW_PORT_CMD_ACTION_GET(ntohl(pcmd->action_to_len16));
834 834
835 if (cmd == FW_PORT_CMD && 835 if (cmd == FW_PORT_CMD &&
836 action == FW_PORT_ACTION_GET_PORT_INFO) { 836 action == FW_PORT_ACTION_GET_PORT_INFO) {
837 int port = FW_PORT_CMD_PORTID_GET( 837 int port = FW_PORT_CMD_PORTID_GET(
838 be32_to_cpu(pcmd->op_to_portid)); 838 be32_to_cpu(pcmd->op_to_portid));
839 struct net_device *dev = q->adap->port[port]; 839 struct net_device *dev = q->adap->port[port];
840 int state_input = ((pcmd->u.info.dcbxdis_pkd & 840 int state_input = ((pcmd->u.info.dcbxdis_pkd &
841 FW_PORT_CMD_DCBXDIS) 841 FW_PORT_CMD_DCBXDIS)
842 ? CXGB4_DCB_INPUT_FW_DISABLED 842 ? CXGB4_DCB_INPUT_FW_DISABLED
843 : CXGB4_DCB_INPUT_FW_ENABLED); 843 : CXGB4_DCB_INPUT_FW_ENABLED);
844 844
845 cxgb4_dcb_state_fsm(dev, state_input); 845 cxgb4_dcb_state_fsm(dev, state_input);
846 } 846 }
847 847
848 if (cmd == FW_PORT_CMD && 848 if (cmd == FW_PORT_CMD &&
849 action == FW_PORT_ACTION_L2_DCB_CFG) 849 action == FW_PORT_ACTION_L2_DCB_CFG)
850 dcb_rpl(q->adap, pcmd); 850 dcb_rpl(q->adap, pcmd);
851 else 851 else
852 #endif 852 #endif
853 if (p->type == 0) 853 if (p->type == 0)
854 t4_handle_fw_rpl(q->adap, p->data); 854 t4_handle_fw_rpl(q->adap, p->data);
855 } else if (opcode == CPL_L2T_WRITE_RPL) { 855 } else if (opcode == CPL_L2T_WRITE_RPL) {
856 const struct cpl_l2t_write_rpl *p = (void *)rsp; 856 const struct cpl_l2t_write_rpl *p = (void *)rsp;
857 857
858 do_l2t_write_rpl(q->adap, p); 858 do_l2t_write_rpl(q->adap, p);
859 } else if (opcode == CPL_SET_TCB_RPL) { 859 } else if (opcode == CPL_SET_TCB_RPL) {
860 const struct cpl_set_tcb_rpl *p = (void *)rsp; 860 const struct cpl_set_tcb_rpl *p = (void *)rsp;
861 861
862 filter_rpl(q->adap, p); 862 filter_rpl(q->adap, p);
863 } else 863 } else
864 dev_err(q->adap->pdev_dev, 864 dev_err(q->adap->pdev_dev,
865 "unexpected CPL %#x on FW event queue\n", opcode); 865 "unexpected CPL %#x on FW event queue\n", opcode);
866 out: 866 out:
867 return 0; 867 return 0;
868 } 868 }
869 869
870 /** 870 /**
871 * uldrx_handler - response queue handler for ULD queues 871 * uldrx_handler - response queue handler for ULD queues
872 * @q: the response queue that received the packet 872 * @q: the response queue that received the packet
873 * @rsp: the response queue descriptor holding the offload message 873 * @rsp: the response queue descriptor holding the offload message
874 * @gl: the gather list of packet fragments 874 * @gl: the gather list of packet fragments
875 * 875 *
876 * Deliver an ingress offload packet to a ULD. All processing is done by 876 * Deliver an ingress offload packet to a ULD. All processing is done by
877 * the ULD, we just maintain statistics. 877 * the ULD, we just maintain statistics.
878 */ 878 */
879 static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp, 879 static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
880 const struct pkt_gl *gl) 880 const struct pkt_gl *gl)
881 { 881 {
882 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq); 882 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
883 883
884 /* FW can send CPLs encapsulated in a CPL_FW4_MSG. 884 /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
885 */ 885 */
886 if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG && 886 if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
887 ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL) 887 ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
888 rsp += 2; 888 rsp += 2;
889 889
890 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) { 890 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
891 rxq->stats.nomem++; 891 rxq->stats.nomem++;
892 return -1; 892 return -1;
893 } 893 }
894 if (gl == NULL) 894 if (gl == NULL)
895 rxq->stats.imm++; 895 rxq->stats.imm++;
896 else if (gl == CXGB4_MSG_AN) 896 else if (gl == CXGB4_MSG_AN)
897 rxq->stats.an++; 897 rxq->stats.an++;
898 else 898 else
899 rxq->stats.pkts++; 899 rxq->stats.pkts++;
900 return 0; 900 return 0;
901 } 901 }
902 902
903 static void disable_msi(struct adapter *adapter) 903 static void disable_msi(struct adapter *adapter)
904 { 904 {
905 if (adapter->flags & USING_MSIX) { 905 if (adapter->flags & USING_MSIX) {
906 pci_disable_msix(adapter->pdev); 906 pci_disable_msix(adapter->pdev);
907 adapter->flags &= ~USING_MSIX; 907 adapter->flags &= ~USING_MSIX;
908 } else if (adapter->flags & USING_MSI) { 908 } else if (adapter->flags & USING_MSI) {
909 pci_disable_msi(adapter->pdev); 909 pci_disable_msi(adapter->pdev);
910 adapter->flags &= ~USING_MSI; 910 adapter->flags &= ~USING_MSI;
911 } 911 }
912 } 912 }
913 913
914 /* 914 /*
915 * Interrupt handler for non-data events used with MSI-X. 915 * Interrupt handler for non-data events used with MSI-X.
916 */ 916 */
917 static irqreturn_t t4_nondata_intr(int irq, void *cookie) 917 static irqreturn_t t4_nondata_intr(int irq, void *cookie)
918 { 918 {
919 struct adapter *adap = cookie; 919 struct adapter *adap = cookie;
920 920
921 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE)); 921 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
922 if (v & PFSW) { 922 if (v & PFSW) {
923 adap->swintr = 1; 923 adap->swintr = 1;
924 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v); 924 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
925 } 925 }
926 t4_slow_intr_handler(adap); 926 t4_slow_intr_handler(adap);
927 return IRQ_HANDLED; 927 return IRQ_HANDLED;
928 } 928 }
929 929
930 /* 930 /*
931 * Name the MSI-X interrupts. 931 * Name the MSI-X interrupts.
932 */ 932 */
933 static void name_msix_vecs(struct adapter *adap) 933 static void name_msix_vecs(struct adapter *adap)
934 { 934 {
935 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc); 935 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
936 936
937 /* non-data interrupts */ 937 /* non-data interrupts */
938 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name); 938 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
939 939
940 /* FW events */ 940 /* FW events */
941 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq", 941 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
942 adap->port[0]->name); 942 adap->port[0]->name);
943 943
944 /* Ethernet queues */ 944 /* Ethernet queues */
945 for_each_port(adap, j) { 945 for_each_port(adap, j) {
946 struct net_device *d = adap->port[j]; 946 struct net_device *d = adap->port[j];
947 const struct port_info *pi = netdev_priv(d); 947 const struct port_info *pi = netdev_priv(d);
948 948
949 for (i = 0; i < pi->nqsets; i++, msi_idx++) 949 for (i = 0; i < pi->nqsets; i++, msi_idx++)
950 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d", 950 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
951 d->name, i); 951 d->name, i);
952 } 952 }
953 953
954 /* offload queues */ 954 /* offload queues */
955 for_each_ofldrxq(&adap->sge, i) 955 for_each_ofldrxq(&adap->sge, i)
956 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d", 956 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
957 adap->port[0]->name, i); 957 adap->port[0]->name, i);
958 958
959 for_each_rdmarxq(&adap->sge, i) 959 for_each_rdmarxq(&adap->sge, i)
960 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d", 960 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
961 adap->port[0]->name, i); 961 adap->port[0]->name, i);
962 962
963 for_each_rdmaciq(&adap->sge, i) 963 for_each_rdmaciq(&adap->sge, i)
964 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d", 964 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d",
965 adap->port[0]->name, i); 965 adap->port[0]->name, i);
966 } 966 }
967 967
968 static int request_msix_queue_irqs(struct adapter *adap) 968 static int request_msix_queue_irqs(struct adapter *adap)
969 { 969 {
970 struct sge *s = &adap->sge; 970 struct sge *s = &adap->sge;
971 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0; 971 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
972 int msi_index = 2; 972 int msi_index = 2;
973 973
974 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0, 974 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
975 adap->msix_info[1].desc, &s->fw_evtq); 975 adap->msix_info[1].desc, &s->fw_evtq);
976 if (err) 976 if (err)
977 return err; 977 return err;
978 978
979 for_each_ethrxq(s, ethqidx) { 979 for_each_ethrxq(s, ethqidx) {
980 err = request_irq(adap->msix_info[msi_index].vec, 980 err = request_irq(adap->msix_info[msi_index].vec,
981 t4_sge_intr_msix, 0, 981 t4_sge_intr_msix, 0,
982 adap->msix_info[msi_index].desc, 982 adap->msix_info[msi_index].desc,
983 &s->ethrxq[ethqidx].rspq); 983 &s->ethrxq[ethqidx].rspq);
984 if (err) 984 if (err)
985 goto unwind; 985 goto unwind;
986 msi_index++; 986 msi_index++;
987 } 987 }
988 for_each_ofldrxq(s, ofldqidx) { 988 for_each_ofldrxq(s, ofldqidx) {
989 err = request_irq(adap->msix_info[msi_index].vec, 989 err = request_irq(adap->msix_info[msi_index].vec,
990 t4_sge_intr_msix, 0, 990 t4_sge_intr_msix, 0,
991 adap->msix_info[msi_index].desc, 991 adap->msix_info[msi_index].desc,
992 &s->ofldrxq[ofldqidx].rspq); 992 &s->ofldrxq[ofldqidx].rspq);
993 if (err) 993 if (err)
994 goto unwind; 994 goto unwind;
995 msi_index++; 995 msi_index++;
996 } 996 }
997 for_each_rdmarxq(s, rdmaqidx) { 997 for_each_rdmarxq(s, rdmaqidx) {
998 err = request_irq(adap->msix_info[msi_index].vec, 998 err = request_irq(adap->msix_info[msi_index].vec,
999 t4_sge_intr_msix, 0, 999 t4_sge_intr_msix, 0,
1000 adap->msix_info[msi_index].desc, 1000 adap->msix_info[msi_index].desc,
1001 &s->rdmarxq[rdmaqidx].rspq); 1001 &s->rdmarxq[rdmaqidx].rspq);
1002 if (err) 1002 if (err)
1003 goto unwind; 1003 goto unwind;
1004 msi_index++; 1004 msi_index++;
1005 } 1005 }
1006 for_each_rdmaciq(s, rdmaciqqidx) { 1006 for_each_rdmaciq(s, rdmaciqqidx) {
1007 err = request_irq(adap->msix_info[msi_index].vec, 1007 err = request_irq(adap->msix_info[msi_index].vec,
1008 t4_sge_intr_msix, 0, 1008 t4_sge_intr_msix, 0,
1009 adap->msix_info[msi_index].desc, 1009 adap->msix_info[msi_index].desc,
1010 &s->rdmaciq[rdmaciqqidx].rspq); 1010 &s->rdmaciq[rdmaciqqidx].rspq);
1011 if (err) 1011 if (err)
1012 goto unwind; 1012 goto unwind;
1013 msi_index++; 1013 msi_index++;
1014 } 1014 }
1015 return 0; 1015 return 0;
1016 1016
1017 unwind: 1017 unwind:
1018 while (--rdmaciqqidx >= 0) 1018 while (--rdmaciqqidx >= 0)
1019 free_irq(adap->msix_info[--msi_index].vec, 1019 free_irq(adap->msix_info[--msi_index].vec,
1020 &s->rdmaciq[rdmaciqqidx].rspq); 1020 &s->rdmaciq[rdmaciqqidx].rspq);
1021 while (--rdmaqidx >= 0) 1021 while (--rdmaqidx >= 0)
1022 free_irq(adap->msix_info[--msi_index].vec, 1022 free_irq(adap->msix_info[--msi_index].vec,
1023 &s->rdmarxq[rdmaqidx].rspq); 1023 &s->rdmarxq[rdmaqidx].rspq);
1024 while (--ofldqidx >= 0) 1024 while (--ofldqidx >= 0)
1025 free_irq(adap->msix_info[--msi_index].vec, 1025 free_irq(adap->msix_info[--msi_index].vec,
1026 &s->ofldrxq[ofldqidx].rspq); 1026 &s->ofldrxq[ofldqidx].rspq);
1027 while (--ethqidx >= 0) 1027 while (--ethqidx >= 0)
1028 free_irq(adap->msix_info[--msi_index].vec, 1028 free_irq(adap->msix_info[--msi_index].vec,
1029 &s->ethrxq[ethqidx].rspq); 1029 &s->ethrxq[ethqidx].rspq);
1030 free_irq(adap->msix_info[1].vec, &s->fw_evtq); 1030 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
1031 return err; 1031 return err;
1032 } 1032 }
1033 1033
1034 static void free_msix_queue_irqs(struct adapter *adap) 1034 static void free_msix_queue_irqs(struct adapter *adap)
1035 { 1035 {
1036 int i, msi_index = 2; 1036 int i, msi_index = 2;
1037 struct sge *s = &adap->sge; 1037 struct sge *s = &adap->sge;
1038 1038
1039 free_irq(adap->msix_info[1].vec, &s->fw_evtq); 1039 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
1040 for_each_ethrxq(s, i) 1040 for_each_ethrxq(s, i)
1041 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq); 1041 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
1042 for_each_ofldrxq(s, i) 1042 for_each_ofldrxq(s, i)
1043 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq); 1043 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
1044 for_each_rdmarxq(s, i) 1044 for_each_rdmarxq(s, i)
1045 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq); 1045 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
1046 for_each_rdmaciq(s, i) 1046 for_each_rdmaciq(s, i)
1047 free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq); 1047 free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq);
1048 } 1048 }
1049 1049
1050 /** 1050 /**
1051 * write_rss - write the RSS table for a given port 1051 * write_rss - write the RSS table for a given port
1052 * @pi: the port 1052 * @pi: the port
1053 * @queues: array of queue indices for RSS 1053 * @queues: array of queue indices for RSS
1054 * 1054 *
1055 * Sets up the portion of the HW RSS table for the port's VI to distribute 1055 * Sets up the portion of the HW RSS table for the port's VI to distribute
1056 * packets to the Rx queues in @queues. 1056 * packets to the Rx queues in @queues.
1057 */ 1057 */
1058 static int write_rss(const struct port_info *pi, const u16 *queues) 1058 static int write_rss(const struct port_info *pi, const u16 *queues)
1059 { 1059 {
1060 u16 *rss; 1060 u16 *rss;
1061 int i, err; 1061 int i, err;
1062 const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset]; 1062 const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
1063 1063
1064 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL); 1064 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
1065 if (!rss) 1065 if (!rss)
1066 return -ENOMEM; 1066 return -ENOMEM;
1067 1067
1068 /* map the queue indices to queue ids */ 1068 /* map the queue indices to queue ids */
1069 for (i = 0; i < pi->rss_size; i++, queues++) 1069 for (i = 0; i < pi->rss_size; i++, queues++)
1070 rss[i] = q[*queues].rspq.abs_id; 1070 rss[i] = q[*queues].rspq.abs_id;
1071 1071
1072 err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0, 1072 err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
1073 pi->rss_size, rss, pi->rss_size); 1073 pi->rss_size, rss, pi->rss_size);
1074 kfree(rss); 1074 kfree(rss);
1075 return err; 1075 return err;
1076 } 1076 }
1077 1077
1078 /** 1078 /**
1079 * setup_rss - configure RSS 1079 * setup_rss - configure RSS
1080 * @adap: the adapter 1080 * @adap: the adapter
1081 * 1081 *
1082 * Sets up RSS for each port. 1082 * Sets up RSS for each port.
1083 */ 1083 */
1084 static int setup_rss(struct adapter *adap) 1084 static int setup_rss(struct adapter *adap)
1085 { 1085 {
1086 int i, err; 1086 int i, err;
1087 1087
1088 for_each_port(adap, i) { 1088 for_each_port(adap, i) {
1089 const struct port_info *pi = adap2pinfo(adap, i); 1089 const struct port_info *pi = adap2pinfo(adap, i);
1090 1090
1091 err = write_rss(pi, pi->rss); 1091 err = write_rss(pi, pi->rss);
1092 if (err) 1092 if (err)
1093 return err; 1093 return err;
1094 } 1094 }
1095 return 0; 1095 return 0;
1096 } 1096 }
1097 1097
1098 /* 1098 /*
1099 * Return the channel of the ingress queue with the given qid. 1099 * Return the channel of the ingress queue with the given qid.
1100 */ 1100 */
1101 static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid) 1101 static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
1102 { 1102 {
1103 qid -= p->ingr_start; 1103 qid -= p->ingr_start;
1104 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan; 1104 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
1105 } 1105 }
1106 1106
1107 /* 1107 /*
1108 * Wait until all NAPI handlers are descheduled. 1108 * Wait until all NAPI handlers are descheduled.
1109 */ 1109 */
1110 static void quiesce_rx(struct adapter *adap) 1110 static void quiesce_rx(struct adapter *adap)
1111 { 1111 {
1112 int i; 1112 int i;
1113 1113
1114 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) { 1114 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
1115 struct sge_rspq *q = adap->sge.ingr_map[i]; 1115 struct sge_rspq *q = adap->sge.ingr_map[i];
1116 1116
1117 if (q && q->handler) 1117 if (q && q->handler)
1118 napi_disable(&q->napi); 1118 napi_disable(&q->napi);
1119 } 1119 }
1120 } 1120 }
1121 1121
1122 /* 1122 /*
1123 * Enable NAPI scheduling and interrupt generation for all Rx queues. 1123 * Enable NAPI scheduling and interrupt generation for all Rx queues.
1124 */ 1124 */
1125 static void enable_rx(struct adapter *adap) 1125 static void enable_rx(struct adapter *adap)
1126 { 1126 {
1127 int i; 1127 int i;
1128 1128
1129 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) { 1129 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
1130 struct sge_rspq *q = adap->sge.ingr_map[i]; 1130 struct sge_rspq *q = adap->sge.ingr_map[i];
1131 1131
1132 if (!q) 1132 if (!q)
1133 continue; 1133 continue;
1134 if (q->handler) 1134 if (q->handler)
1135 napi_enable(&q->napi); 1135 napi_enable(&q->napi);
1136 /* 0-increment GTS to start the timer and enable interrupts */ 1136 /* 0-increment GTS to start the timer and enable interrupts */
1137 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS), 1137 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
1138 SEINTARM(q->intr_params) | 1138 SEINTARM(q->intr_params) |
1139 INGRESSQID(q->cntxt_id)); 1139 INGRESSQID(q->cntxt_id));
1140 } 1140 }
1141 } 1141 }
1142 1142
1143 /** 1143 /**
1144 * setup_sge_queues - configure SGE Tx/Rx/response queues 1144 * setup_sge_queues - configure SGE Tx/Rx/response queues
1145 * @adap: the adapter 1145 * @adap: the adapter
1146 * 1146 *
1147 * Determines how many sets of SGE queues to use and initializes them. 1147 * Determines how many sets of SGE queues to use and initializes them.
1148 * We support multiple queue sets per port if we have MSI-X, otherwise 1148 * We support multiple queue sets per port if we have MSI-X, otherwise
1149 * just one queue set per port. 1149 * just one queue set per port.
1150 */ 1150 */
1151 static int setup_sge_queues(struct adapter *adap) 1151 static int setup_sge_queues(struct adapter *adap)
1152 { 1152 {
1153 int err, msi_idx, i, j; 1153 int err, msi_idx, i, j;
1154 struct sge *s = &adap->sge; 1154 struct sge *s = &adap->sge;
1155 1155
1156 bitmap_zero(s->starving_fl, MAX_EGRQ); 1156 bitmap_zero(s->starving_fl, MAX_EGRQ);
1157 bitmap_zero(s->txq_maperr, MAX_EGRQ); 1157 bitmap_zero(s->txq_maperr, MAX_EGRQ);
1158 1158
1159 if (adap->flags & USING_MSIX) 1159 if (adap->flags & USING_MSIX)
1160 msi_idx = 1; /* vector 0 is for non-queue interrupts */ 1160 msi_idx = 1; /* vector 0 is for non-queue interrupts */
1161 else { 1161 else {
1162 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0, 1162 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
1163 NULL, NULL); 1163 NULL, NULL);
1164 if (err) 1164 if (err)
1165 return err; 1165 return err;
1166 msi_idx = -((int)s->intrq.abs_id + 1); 1166 msi_idx = -((int)s->intrq.abs_id + 1);
1167 } 1167 }
1168 1168
1169 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0], 1169 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
1170 msi_idx, NULL, fwevtq_handler); 1170 msi_idx, NULL, fwevtq_handler);
1171 if (err) { 1171 if (err) {
1172 freeout: t4_free_sge_resources(adap); 1172 freeout: t4_free_sge_resources(adap);
1173 return err; 1173 return err;
1174 } 1174 }
1175 1175
1176 for_each_port(adap, i) { 1176 for_each_port(adap, i) {
1177 struct net_device *dev = adap->port[i]; 1177 struct net_device *dev = adap->port[i];
1178 struct port_info *pi = netdev_priv(dev); 1178 struct port_info *pi = netdev_priv(dev);
1179 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset]; 1179 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
1180 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset]; 1180 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
1181 1181
1182 for (j = 0; j < pi->nqsets; j++, q++) { 1182 for (j = 0; j < pi->nqsets; j++, q++) {
1183 if (msi_idx > 0) 1183 if (msi_idx > 0)
1184 msi_idx++; 1184 msi_idx++;
1185 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, 1185 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
1186 msi_idx, &q->fl, 1186 msi_idx, &q->fl,
1187 t4_ethrx_handler); 1187 t4_ethrx_handler);
1188 if (err) 1188 if (err)
1189 goto freeout; 1189 goto freeout;
1190 q->rspq.idx = j; 1190 q->rspq.idx = j;
1191 memset(&q->stats, 0, sizeof(q->stats)); 1191 memset(&q->stats, 0, sizeof(q->stats));
1192 } 1192 }
1193 for (j = 0; j < pi->nqsets; j++, t++) { 1193 for (j = 0; j < pi->nqsets; j++, t++) {
1194 err = t4_sge_alloc_eth_txq(adap, t, dev, 1194 err = t4_sge_alloc_eth_txq(adap, t, dev,
1195 netdev_get_tx_queue(dev, j), 1195 netdev_get_tx_queue(dev, j),
1196 s->fw_evtq.cntxt_id); 1196 s->fw_evtq.cntxt_id);
1197 if (err) 1197 if (err)
1198 goto freeout; 1198 goto freeout;
1199 } 1199 }
1200 } 1200 }
1201 1201
1202 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */ 1202 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
1203 for_each_ofldrxq(s, i) { 1203 for_each_ofldrxq(s, i) {
1204 struct sge_ofld_rxq *q = &s->ofldrxq[i]; 1204 struct sge_ofld_rxq *q = &s->ofldrxq[i];
1205 struct net_device *dev = adap->port[i / j]; 1205 struct net_device *dev = adap->port[i / j];
1206 1206
1207 if (msi_idx > 0) 1207 if (msi_idx > 0)
1208 msi_idx++; 1208 msi_idx++;
1209 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx, 1209 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
1210 q->fl.size ? &q->fl : NULL, 1210 q->fl.size ? &q->fl : NULL,
1211 uldrx_handler); 1211 uldrx_handler);
1212 if (err) 1212 if (err)
1213 goto freeout; 1213 goto freeout;
1214 memset(&q->stats, 0, sizeof(q->stats)); 1214 memset(&q->stats, 0, sizeof(q->stats));
1215 s->ofld_rxq[i] = q->rspq.abs_id; 1215 s->ofld_rxq[i] = q->rspq.abs_id;
1216 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev, 1216 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
1217 s->fw_evtq.cntxt_id); 1217 s->fw_evtq.cntxt_id);
1218 if (err) 1218 if (err)
1219 goto freeout; 1219 goto freeout;
1220 } 1220 }
1221 1221
1222 for_each_rdmarxq(s, i) { 1222 for_each_rdmarxq(s, i) {
1223 struct sge_ofld_rxq *q = &s->rdmarxq[i]; 1223 struct sge_ofld_rxq *q = &s->rdmarxq[i];
1224 1224
1225 if (msi_idx > 0) 1225 if (msi_idx > 0)
1226 msi_idx++; 1226 msi_idx++;
1227 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i], 1227 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1228 msi_idx, q->fl.size ? &q->fl : NULL, 1228 msi_idx, q->fl.size ? &q->fl : NULL,
1229 uldrx_handler); 1229 uldrx_handler);
1230 if (err) 1230 if (err)
1231 goto freeout; 1231 goto freeout;
1232 memset(&q->stats, 0, sizeof(q->stats)); 1232 memset(&q->stats, 0, sizeof(q->stats));
1233 s->rdma_rxq[i] = q->rspq.abs_id; 1233 s->rdma_rxq[i] = q->rspq.abs_id;
1234 } 1234 }
1235 1235
1236 for_each_rdmaciq(s, i) { 1236 for_each_rdmaciq(s, i) {
1237 struct sge_ofld_rxq *q = &s->rdmaciq[i]; 1237 struct sge_ofld_rxq *q = &s->rdmaciq[i];
1238 1238
1239 if (msi_idx > 0) 1239 if (msi_idx > 0)
1240 msi_idx++; 1240 msi_idx++;
1241 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i], 1241 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
1242 msi_idx, q->fl.size ? &q->fl : NULL, 1242 msi_idx, q->fl.size ? &q->fl : NULL,
1243 uldrx_handler); 1243 uldrx_handler);
1244 if (err) 1244 if (err)
1245 goto freeout; 1245 goto freeout;
1246 memset(&q->stats, 0, sizeof(q->stats)); 1246 memset(&q->stats, 0, sizeof(q->stats));
1247 s->rdma_ciq[i] = q->rspq.abs_id; 1247 s->rdma_ciq[i] = q->rspq.abs_id;
1248 } 1248 }
1249 1249
1250 for_each_port(adap, i) { 1250 for_each_port(adap, i) {
1251 /* 1251 /*
1252 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't 1252 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
1253 * have RDMA queues, and that's the right value. 1253 * have RDMA queues, and that's the right value.
1254 */ 1254 */
1255 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i], 1255 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
1256 s->fw_evtq.cntxt_id, 1256 s->fw_evtq.cntxt_id,
1257 s->rdmarxq[i].rspq.cntxt_id); 1257 s->rdmarxq[i].rspq.cntxt_id);
1258 if (err) 1258 if (err)
1259 goto freeout; 1259 goto freeout;
1260 } 1260 }
1261 1261
1262 t4_write_reg(adap, is_t4(adap->params.chip) ? 1262 t4_write_reg(adap, is_t4(adap->params.chip) ?
1263 MPS_TRC_RSS_CONTROL : 1263 MPS_TRC_RSS_CONTROL :
1264 MPS_T5_TRC_RSS_CONTROL, 1264 MPS_T5_TRC_RSS_CONTROL,
1265 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) | 1265 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
1266 QUEUENUMBER(s->ethrxq[0].rspq.abs_id)); 1266 QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
1267 return 0; 1267 return 0;
1268 } 1268 }
1269 1269
1270 /* 1270 /*
1271 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc. 1271 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
1272 * The allocated memory is cleared. 1272 * The allocated memory is cleared.
1273 */ 1273 */
1274 void *t4_alloc_mem(size_t size) 1274 void *t4_alloc_mem(size_t size)
1275 { 1275 {
1276 void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); 1276 void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1277 1277
1278 if (!p) 1278 if (!p)
1279 p = vzalloc(size); 1279 p = vzalloc(size);
1280 return p; 1280 return p;
1281 } 1281 }
1282 1282
1283 /* 1283 /*
1284 * Free memory allocated through alloc_mem(). 1284 * Free memory allocated through alloc_mem().
1285 */ 1285 */
1286 static void t4_free_mem(void *addr) 1286 static void t4_free_mem(void *addr)
1287 { 1287 {
1288 if (is_vmalloc_addr(addr)) 1288 if (is_vmalloc_addr(addr))
1289 vfree(addr); 1289 vfree(addr);
1290 else 1290 else
1291 kfree(addr); 1291 kfree(addr);
1292 } 1292 }
1293 1293
1294 /* Send a Work Request to write the filter at a specified index. We construct 1294 /* Send a Work Request to write the filter at a specified index. We construct
1295 * a Firmware Filter Work Request to have the work done and put the indicated 1295 * a Firmware Filter Work Request to have the work done and put the indicated
1296 * filter into "pending" mode which will prevent any further actions against 1296 * filter into "pending" mode which will prevent any further actions against
1297 * it till we get a reply from the firmware on the completion status of the 1297 * it till we get a reply from the firmware on the completion status of the
1298 * request. 1298 * request.
1299 */ 1299 */
1300 static int set_filter_wr(struct adapter *adapter, int fidx) 1300 static int set_filter_wr(struct adapter *adapter, int fidx)
1301 { 1301 {
1302 struct filter_entry *f = &adapter->tids.ftid_tab[fidx]; 1302 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1303 struct sk_buff *skb; 1303 struct sk_buff *skb;
1304 struct fw_filter_wr *fwr; 1304 struct fw_filter_wr *fwr;
1305 unsigned int ftid; 1305 unsigned int ftid;
1306 1306
1307 /* If the new filter requires loopback Destination MAC and/or VLAN 1307 /* If the new filter requires loopback Destination MAC and/or VLAN
1308 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for 1308 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1309 * the filter. 1309 * the filter.
1310 */ 1310 */
1311 if (f->fs.newdmac || f->fs.newvlan) { 1311 if (f->fs.newdmac || f->fs.newvlan) {
1312 /* allocate L2T entry for new filter */ 1312 /* allocate L2T entry for new filter */
1313 f->l2t = t4_l2t_alloc_switching(adapter->l2t); 1313 f->l2t = t4_l2t_alloc_switching(adapter->l2t);
1314 if (f->l2t == NULL) 1314 if (f->l2t == NULL)
1315 return -EAGAIN; 1315 return -EAGAIN;
1316 if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan, 1316 if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
1317 f->fs.eport, f->fs.dmac)) { 1317 f->fs.eport, f->fs.dmac)) {
1318 cxgb4_l2t_release(f->l2t); 1318 cxgb4_l2t_release(f->l2t);
1319 f->l2t = NULL; 1319 f->l2t = NULL;
1320 return -ENOMEM; 1320 return -ENOMEM;
1321 } 1321 }
1322 } 1322 }
1323 1323
1324 ftid = adapter->tids.ftid_base + fidx; 1324 ftid = adapter->tids.ftid_base + fidx;
1325 1325
1326 skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL); 1326 skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
1327 fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr)); 1327 fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
1328 memset(fwr, 0, sizeof(*fwr)); 1328 memset(fwr, 0, sizeof(*fwr));
1329 1329
1330 /* It would be nice to put most of the following in t4_hw.c but most 1330 /* It would be nice to put most of the following in t4_hw.c but most
1331 * of the work is translating the cxgbtool ch_filter_specification 1331 * of the work is translating the cxgbtool ch_filter_specification
1332 * into the Work Request and the definition of that structure is 1332 * into the Work Request and the definition of that structure is
1333 * currently in cxgbtool.h which isn't appropriate to pull into the 1333 * currently in cxgbtool.h which isn't appropriate to pull into the
1334 * common code. We may eventually try to come up with a more neutral 1334 * common code. We may eventually try to come up with a more neutral
1335 * filter specification structure but for now it's easiest to simply 1335 * filter specification structure but for now it's easiest to simply
1336 * put this fairly direct code in line ... 1336 * put this fairly direct code in line ...
1337 */ 1337 */
1338 fwr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR)); 1338 fwr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
1339 fwr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*fwr)/16)); 1339 fwr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*fwr)/16));
1340 fwr->tid_to_iq = 1340 fwr->tid_to_iq =
1341 htonl(V_FW_FILTER_WR_TID(ftid) | 1341 htonl(V_FW_FILTER_WR_TID(ftid) |
1342 V_FW_FILTER_WR_RQTYPE(f->fs.type) | 1342 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
1343 V_FW_FILTER_WR_NOREPLY(0) | 1343 V_FW_FILTER_WR_NOREPLY(0) |
1344 V_FW_FILTER_WR_IQ(f->fs.iq)); 1344 V_FW_FILTER_WR_IQ(f->fs.iq));
1345 fwr->del_filter_to_l2tix = 1345 fwr->del_filter_to_l2tix =
1346 htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) | 1346 htonl(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
1347 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) | 1347 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
1348 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) | 1348 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
1349 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) | 1349 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
1350 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) | 1350 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
1351 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) | 1351 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
1352 V_FW_FILTER_WR_DMAC(f->fs.newdmac) | 1352 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
1353 V_FW_FILTER_WR_SMAC(f->fs.newsmac) | 1353 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
1354 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT || 1354 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
1355 f->fs.newvlan == VLAN_REWRITE) | 1355 f->fs.newvlan == VLAN_REWRITE) |
1356 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE || 1356 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
1357 f->fs.newvlan == VLAN_REWRITE) | 1357 f->fs.newvlan == VLAN_REWRITE) |
1358 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) | 1358 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
1359 V_FW_FILTER_WR_TXCHAN(f->fs.eport) | 1359 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
1360 V_FW_FILTER_WR_PRIO(f->fs.prio) | 1360 V_FW_FILTER_WR_PRIO(f->fs.prio) |
1361 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0)); 1361 V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
1362 fwr->ethtype = htons(f->fs.val.ethtype); 1362 fwr->ethtype = htons(f->fs.val.ethtype);
1363 fwr->ethtypem = htons(f->fs.mask.ethtype); 1363 fwr->ethtypem = htons(f->fs.mask.ethtype);
1364 fwr->frag_to_ovlan_vldm = 1364 fwr->frag_to_ovlan_vldm =
1365 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) | 1365 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
1366 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) | 1366 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
1367 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) | 1367 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
1368 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) | 1368 V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
1369 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) | 1369 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
1370 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld)); 1370 V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
1371 fwr->smac_sel = 0; 1371 fwr->smac_sel = 0;
1372 fwr->rx_chan_rx_rpl_iq = 1372 fwr->rx_chan_rx_rpl_iq =
1373 htons(V_FW_FILTER_WR_RX_CHAN(0) | 1373 htons(V_FW_FILTER_WR_RX_CHAN(0) |
1374 V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id)); 1374 V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id));
1375 fwr->maci_to_matchtypem = 1375 fwr->maci_to_matchtypem =
1376 htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) | 1376 htonl(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
1377 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) | 1377 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
1378 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) | 1378 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
1379 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) | 1379 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
1380 V_FW_FILTER_WR_PORT(f->fs.val.iport) | 1380 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
1381 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) | 1381 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
1382 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) | 1382 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
1383 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype)); 1383 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
1384 fwr->ptcl = f->fs.val.proto; 1384 fwr->ptcl = f->fs.val.proto;
1385 fwr->ptclm = f->fs.mask.proto; 1385 fwr->ptclm = f->fs.mask.proto;
1386 fwr->ttyp = f->fs.val.tos; 1386 fwr->ttyp = f->fs.val.tos;
1387 fwr->ttypm = f->fs.mask.tos; 1387 fwr->ttypm = f->fs.mask.tos;
1388 fwr->ivlan = htons(f->fs.val.ivlan); 1388 fwr->ivlan = htons(f->fs.val.ivlan);
1389 fwr->ivlanm = htons(f->fs.mask.ivlan); 1389 fwr->ivlanm = htons(f->fs.mask.ivlan);
1390 fwr->ovlan = htons(f->fs.val.ovlan); 1390 fwr->ovlan = htons(f->fs.val.ovlan);
1391 fwr->ovlanm = htons(f->fs.mask.ovlan); 1391 fwr->ovlanm = htons(f->fs.mask.ovlan);
1392 memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip)); 1392 memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
1393 memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm)); 1393 memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
1394 memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip)); 1394 memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
1395 memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm)); 1395 memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
1396 fwr->lp = htons(f->fs.val.lport); 1396 fwr->lp = htons(f->fs.val.lport);
1397 fwr->lpm = htons(f->fs.mask.lport); 1397 fwr->lpm = htons(f->fs.mask.lport);
1398 fwr->fp = htons(f->fs.val.fport); 1398 fwr->fp = htons(f->fs.val.fport);
1399 fwr->fpm = htons(f->fs.mask.fport); 1399 fwr->fpm = htons(f->fs.mask.fport);
1400 if (f->fs.newsmac) 1400 if (f->fs.newsmac)
1401 memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma)); 1401 memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
1402 1402
1403 /* Mark the filter as "pending" and ship off the Filter Work Request. 1403 /* Mark the filter as "pending" and ship off the Filter Work Request.
1404 * When we get the Work Request Reply we'll clear the pending status. 1404 * When we get the Work Request Reply we'll clear the pending status.
1405 */ 1405 */
1406 f->pending = 1; 1406 f->pending = 1;
1407 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3); 1407 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1408 t4_ofld_send(adapter, skb); 1408 t4_ofld_send(adapter, skb);
1409 return 0; 1409 return 0;
1410 } 1410 }
1411 1411
1412 /* Delete the filter at a specified index. 1412 /* Delete the filter at a specified index.
1413 */ 1413 */
1414 static int del_filter_wr(struct adapter *adapter, int fidx) 1414 static int del_filter_wr(struct adapter *adapter, int fidx)
1415 { 1415 {
1416 struct filter_entry *f = &adapter->tids.ftid_tab[fidx]; 1416 struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
1417 struct sk_buff *skb; 1417 struct sk_buff *skb;
1418 struct fw_filter_wr *fwr; 1418 struct fw_filter_wr *fwr;
1419 unsigned int len, ftid; 1419 unsigned int len, ftid;
1420 1420
1421 len = sizeof(*fwr); 1421 len = sizeof(*fwr);
1422 ftid = adapter->tids.ftid_base + fidx; 1422 ftid = adapter->tids.ftid_base + fidx;
1423 1423
1424 skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL); 1424 skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
1425 fwr = (struct fw_filter_wr *)__skb_put(skb, len); 1425 fwr = (struct fw_filter_wr *)__skb_put(skb, len);
1426 t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id); 1426 t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
1427 1427
1428 /* Mark the filter as "pending" and ship off the Filter Work Request. 1428 /* Mark the filter as "pending" and ship off the Filter Work Request.
1429 * When we get the Work Request Reply we'll clear the pending status. 1429 * When we get the Work Request Reply we'll clear the pending status.
1430 */ 1430 */
1431 f->pending = 1; 1431 f->pending = 1;
1432 t4_mgmt_tx(adapter, skb); 1432 t4_mgmt_tx(adapter, skb);
1433 return 0; 1433 return 0;
1434 } 1434 }
1435 1435
1436 static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb, 1436 static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
1437 void *accel_priv, select_queue_fallback_t fallback) 1437 void *accel_priv, select_queue_fallback_t fallback)
1438 { 1438 {
1439 int txq; 1439 int txq;
1440 1440
1441 #ifdef CONFIG_CHELSIO_T4_DCB 1441 #ifdef CONFIG_CHELSIO_T4_DCB
1442 /* If a Data Center Bridging has been successfully negotiated on this 1442 /* If a Data Center Bridging has been successfully negotiated on this
1443 * link then we'll use the skb's priority to map it to a TX Queue. 1443 * link then we'll use the skb's priority to map it to a TX Queue.
1444 * The skb's priority is determined via the VLAN Tag Priority Code 1444 * The skb's priority is determined via the VLAN Tag Priority Code
1445 * Point field. 1445 * Point field.
1446 */ 1446 */
1447 if (cxgb4_dcb_enabled(dev)) { 1447 if (cxgb4_dcb_enabled(dev)) {
1448 u16 vlan_tci; 1448 u16 vlan_tci;
1449 int err; 1449 int err;
1450 1450
1451 err = vlan_get_tag(skb, &vlan_tci); 1451 err = vlan_get_tag(skb, &vlan_tci);
1452 if (unlikely(err)) { 1452 if (unlikely(err)) {
1453 if (net_ratelimit()) 1453 if (net_ratelimit())
1454 netdev_warn(dev, 1454 netdev_warn(dev,
1455 "TX Packet without VLAN Tag on DCB Link\n"); 1455 "TX Packet without VLAN Tag on DCB Link\n");
1456 txq = 0; 1456 txq = 0;
1457 } else { 1457 } else {
1458 txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; 1458 txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
1459 } 1459 }
1460 return txq; 1460 return txq;
1461 } 1461 }
1462 #endif /* CONFIG_CHELSIO_T4_DCB */ 1462 #endif /* CONFIG_CHELSIO_T4_DCB */
1463 1463
1464 if (select_queue) { 1464 if (select_queue) {
1465 txq = (skb_rx_queue_recorded(skb) 1465 txq = (skb_rx_queue_recorded(skb)
1466 ? skb_get_rx_queue(skb) 1466 ? skb_get_rx_queue(skb)
1467 : smp_processor_id()); 1467 : smp_processor_id());
1468 1468
1469 while (unlikely(txq >= dev->real_num_tx_queues)) 1469 while (unlikely(txq >= dev->real_num_tx_queues))
1470 txq -= dev->real_num_tx_queues; 1470 txq -= dev->real_num_tx_queues;
1471 1471
1472 return txq; 1472 return txq;
1473 } 1473 }
1474 1474
1475 return fallback(dev, skb) % dev->real_num_tx_queues; 1475 return fallback(dev, skb) % dev->real_num_tx_queues;
1476 } 1476 }
1477 1477
1478 static inline int is_offload(const struct adapter *adap) 1478 static inline int is_offload(const struct adapter *adap)
1479 { 1479 {
1480 return adap->params.offload; 1480 return adap->params.offload;
1481 } 1481 }
1482 1482
1483 /* 1483 /*
1484 * Implementation of ethtool operations. 1484 * Implementation of ethtool operations.
1485 */ 1485 */
1486 1486
1487 static u32 get_msglevel(struct net_device *dev) 1487 static u32 get_msglevel(struct net_device *dev)
1488 { 1488 {
1489 return netdev2adap(dev)->msg_enable; 1489 return netdev2adap(dev)->msg_enable;
1490 } 1490 }
1491 1491
1492 static void set_msglevel(struct net_device *dev, u32 val) 1492 static void set_msglevel(struct net_device *dev, u32 val)
1493 { 1493 {
1494 netdev2adap(dev)->msg_enable = val; 1494 netdev2adap(dev)->msg_enable = val;
1495 } 1495 }
1496 1496
1497 static char stats_strings[][ETH_GSTRING_LEN] = { 1497 static char stats_strings[][ETH_GSTRING_LEN] = {
1498 "TxOctetsOK ", 1498 "TxOctetsOK ",
1499 "TxFramesOK ", 1499 "TxFramesOK ",
1500 "TxBroadcastFrames ", 1500 "TxBroadcastFrames ",
1501 "TxMulticastFrames ", 1501 "TxMulticastFrames ",
1502 "TxUnicastFrames ", 1502 "TxUnicastFrames ",
1503 "TxErrorFrames ", 1503 "TxErrorFrames ",
1504 1504
1505 "TxFrames64 ", 1505 "TxFrames64 ",
1506 "TxFrames65To127 ", 1506 "TxFrames65To127 ",
1507 "TxFrames128To255 ", 1507 "TxFrames128To255 ",
1508 "TxFrames256To511 ", 1508 "TxFrames256To511 ",
1509 "TxFrames512To1023 ", 1509 "TxFrames512To1023 ",
1510 "TxFrames1024To1518 ", 1510 "TxFrames1024To1518 ",
1511 "TxFrames1519ToMax ", 1511 "TxFrames1519ToMax ",
1512 1512
1513 "TxFramesDropped ", 1513 "TxFramesDropped ",
1514 "TxPauseFrames ", 1514 "TxPauseFrames ",
1515 "TxPPP0Frames ", 1515 "TxPPP0Frames ",
1516 "TxPPP1Frames ", 1516 "TxPPP1Frames ",
1517 "TxPPP2Frames ", 1517 "TxPPP2Frames ",
1518 "TxPPP3Frames ", 1518 "TxPPP3Frames ",
1519 "TxPPP4Frames ", 1519 "TxPPP4Frames ",
1520 "TxPPP5Frames ", 1520 "TxPPP5Frames ",
1521 "TxPPP6Frames ", 1521 "TxPPP6Frames ",
1522 "TxPPP7Frames ", 1522 "TxPPP7Frames ",
1523 1523
1524 "RxOctetsOK ", 1524 "RxOctetsOK ",
1525 "RxFramesOK ", 1525 "RxFramesOK ",
1526 "RxBroadcastFrames ", 1526 "RxBroadcastFrames ",
1527 "RxMulticastFrames ", 1527 "RxMulticastFrames ",
1528 "RxUnicastFrames ", 1528 "RxUnicastFrames ",
1529 1529
1530 "RxFramesTooLong ", 1530 "RxFramesTooLong ",
1531 "RxJabberErrors ", 1531 "RxJabberErrors ",
1532 "RxFCSErrors ", 1532 "RxFCSErrors ",
1533 "RxLengthErrors ", 1533 "RxLengthErrors ",
1534 "RxSymbolErrors ", 1534 "RxSymbolErrors ",
1535 "RxRuntFrames ", 1535 "RxRuntFrames ",
1536 1536
1537 "RxFrames64 ", 1537 "RxFrames64 ",
1538 "RxFrames65To127 ", 1538 "RxFrames65To127 ",
1539 "RxFrames128To255 ", 1539 "RxFrames128To255 ",
1540 "RxFrames256To511 ", 1540 "RxFrames256To511 ",
1541 "RxFrames512To1023 ", 1541 "RxFrames512To1023 ",
1542 "RxFrames1024To1518 ", 1542 "RxFrames1024To1518 ",
1543 "RxFrames1519ToMax ", 1543 "RxFrames1519ToMax ",
1544 1544
1545 "RxPauseFrames ", 1545 "RxPauseFrames ",
1546 "RxPPP0Frames ", 1546 "RxPPP0Frames ",
1547 "RxPPP1Frames ", 1547 "RxPPP1Frames ",
1548 "RxPPP2Frames ", 1548 "RxPPP2Frames ",
1549 "RxPPP3Frames ", 1549 "RxPPP3Frames ",
1550 "RxPPP4Frames ", 1550 "RxPPP4Frames ",
1551 "RxPPP5Frames ", 1551 "RxPPP5Frames ",
1552 "RxPPP6Frames ", 1552 "RxPPP6Frames ",
1553 "RxPPP7Frames ", 1553 "RxPPP7Frames ",
1554 1554
1555 "RxBG0FramesDropped ", 1555 "RxBG0FramesDropped ",
1556 "RxBG1FramesDropped ", 1556 "RxBG1FramesDropped ",
1557 "RxBG2FramesDropped ", 1557 "RxBG2FramesDropped ",
1558 "RxBG3FramesDropped ", 1558 "RxBG3FramesDropped ",
1559 "RxBG0FramesTrunc ", 1559 "RxBG0FramesTrunc ",
1560 "RxBG1FramesTrunc ", 1560 "RxBG1FramesTrunc ",
1561 "RxBG2FramesTrunc ", 1561 "RxBG2FramesTrunc ",
1562 "RxBG3FramesTrunc ", 1562 "RxBG3FramesTrunc ",
1563 1563
1564 "TSO ", 1564 "TSO ",
1565 "TxCsumOffload ", 1565 "TxCsumOffload ",
1566 "RxCsumGood ", 1566 "RxCsumGood ",
1567 "VLANextractions ", 1567 "VLANextractions ",
1568 "VLANinsertions ", 1568 "VLANinsertions ",
1569 "GROpackets ", 1569 "GROpackets ",
1570 "GROmerged ", 1570 "GROmerged ",
1571 "WriteCoalSuccess ", 1571 "WriteCoalSuccess ",
1572 "WriteCoalFail ", 1572 "WriteCoalFail ",
1573 }; 1573 };
1574 1574
1575 static int get_sset_count(struct net_device *dev, int sset) 1575 static int get_sset_count(struct net_device *dev, int sset)
1576 { 1576 {
1577 switch (sset) { 1577 switch (sset) {
1578 case ETH_SS_STATS: 1578 case ETH_SS_STATS:
1579 return ARRAY_SIZE(stats_strings); 1579 return ARRAY_SIZE(stats_strings);
1580 default: 1580 default:
1581 return -EOPNOTSUPP; 1581 return -EOPNOTSUPP;
1582 } 1582 }
1583 } 1583 }
1584 1584
1585 #define T4_REGMAP_SIZE (160 * 1024) 1585 #define T4_REGMAP_SIZE (160 * 1024)
1586 #define T5_REGMAP_SIZE (332 * 1024) 1586 #define T5_REGMAP_SIZE (332 * 1024)
1587 1587
1588 static int get_regs_len(struct net_device *dev) 1588 static int get_regs_len(struct net_device *dev)
1589 { 1589 {
1590 struct adapter *adap = netdev2adap(dev); 1590 struct adapter *adap = netdev2adap(dev);
1591 if (is_t4(adap->params.chip)) 1591 if (is_t4(adap->params.chip))
1592 return T4_REGMAP_SIZE; 1592 return T4_REGMAP_SIZE;
1593 else 1593 else
1594 return T5_REGMAP_SIZE; 1594 return T5_REGMAP_SIZE;
1595 } 1595 }
1596 1596
1597 static int get_eeprom_len(struct net_device *dev) 1597 static int get_eeprom_len(struct net_device *dev)
1598 { 1598 {
1599 return EEPROMSIZE; 1599 return EEPROMSIZE;
1600 } 1600 }
1601 1601
1602 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 1602 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1603 { 1603 {
1604 struct adapter *adapter = netdev2adap(dev); 1604 struct adapter *adapter = netdev2adap(dev);
1605 1605
1606 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 1606 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1607 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 1607 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1608 strlcpy(info->bus_info, pci_name(adapter->pdev), 1608 strlcpy(info->bus_info, pci_name(adapter->pdev),
1609 sizeof(info->bus_info)); 1609 sizeof(info->bus_info));
1610 1610
1611 if (adapter->params.fw_vers) 1611 if (adapter->params.fw_vers)
1612 snprintf(info->fw_version, sizeof(info->fw_version), 1612 snprintf(info->fw_version, sizeof(info->fw_version),
1613 "%u.%u.%u.%u, TP %u.%u.%u.%u", 1613 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1614 FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers), 1614 FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
1615 FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers), 1615 FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
1616 FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers), 1616 FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
1617 FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers), 1617 FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
1618 FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers), 1618 FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
1619 FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers), 1619 FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
1620 FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers), 1620 FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
1621 FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers)); 1621 FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
1622 } 1622 }
1623 1623
1624 static void get_strings(struct net_device *dev, u32 stringset, u8 *data) 1624 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1625 { 1625 {
1626 if (stringset == ETH_SS_STATS) 1626 if (stringset == ETH_SS_STATS)
1627 memcpy(data, stats_strings, sizeof(stats_strings)); 1627 memcpy(data, stats_strings, sizeof(stats_strings));
1628 } 1628 }
1629 1629
1630 /* 1630 /*
1631 * port stats maintained per queue of the port. They should be in the same 1631 * port stats maintained per queue of the port. They should be in the same
1632 * order as in stats_strings above. 1632 * order as in stats_strings above.
1633 */ 1633 */
1634 struct queue_port_stats { 1634 struct queue_port_stats {
1635 u64 tso; 1635 u64 tso;
1636 u64 tx_csum; 1636 u64 tx_csum;
1637 u64 rx_csum; 1637 u64 rx_csum;
1638 u64 vlan_ex; 1638 u64 vlan_ex;
1639 u64 vlan_ins; 1639 u64 vlan_ins;
1640 u64 gro_pkts; 1640 u64 gro_pkts;
1641 u64 gro_merged; 1641 u64 gro_merged;
1642 }; 1642 };
1643 1643
1644 static void collect_sge_port_stats(const struct adapter *adap, 1644 static void collect_sge_port_stats(const struct adapter *adap,
1645 const struct port_info *p, struct queue_port_stats *s) 1645 const struct port_info *p, struct queue_port_stats *s)
1646 { 1646 {
1647 int i; 1647 int i;
1648 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset]; 1648 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1649 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset]; 1649 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1650 1650
1651 memset(s, 0, sizeof(*s)); 1651 memset(s, 0, sizeof(*s));
1652 for (i = 0; i < p->nqsets; i++, rx++, tx++) { 1652 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1653 s->tso += tx->tso; 1653 s->tso += tx->tso;
1654 s->tx_csum += tx->tx_cso; 1654 s->tx_csum += tx->tx_cso;
1655 s->rx_csum += rx->stats.rx_cso; 1655 s->rx_csum += rx->stats.rx_cso;
1656 s->vlan_ex += rx->stats.vlan_ex; 1656 s->vlan_ex += rx->stats.vlan_ex;
1657 s->vlan_ins += tx->vlan_ins; 1657 s->vlan_ins += tx->vlan_ins;
1658 s->gro_pkts += rx->stats.lro_pkts; 1658 s->gro_pkts += rx->stats.lro_pkts;
1659 s->gro_merged += rx->stats.lro_merged; 1659 s->gro_merged += rx->stats.lro_merged;
1660 } 1660 }
1661 } 1661 }
1662 1662
1663 static void get_stats(struct net_device *dev, struct ethtool_stats *stats, 1663 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1664 u64 *data) 1664 u64 *data)
1665 { 1665 {
1666 struct port_info *pi = netdev_priv(dev); 1666 struct port_info *pi = netdev_priv(dev);
1667 struct adapter *adapter = pi->adapter; 1667 struct adapter *adapter = pi->adapter;
1668 u32 val1, val2; 1668 u32 val1, val2;
1669 1669
1670 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data); 1670 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1671 1671
1672 data += sizeof(struct port_stats) / sizeof(u64); 1672 data += sizeof(struct port_stats) / sizeof(u64);
1673 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data); 1673 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1674 data += sizeof(struct queue_port_stats) / sizeof(u64); 1674 data += sizeof(struct queue_port_stats) / sizeof(u64);
1675 if (!is_t4(adapter->params.chip)) { 1675 if (!is_t4(adapter->params.chip)) {
1676 t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7)); 1676 t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
1677 val1 = t4_read_reg(adapter, SGE_STAT_TOTAL); 1677 val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
1678 val2 = t4_read_reg(adapter, SGE_STAT_MATCH); 1678 val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
1679 *data = val1 - val2; 1679 *data = val1 - val2;
1680 data++; 1680 data++;
1681 *data = val2; 1681 *data = val2;
1682 data++; 1682 data++;
1683 } else { 1683 } else {
1684 memset(data, 0, 2 * sizeof(u64)); 1684 memset(data, 0, 2 * sizeof(u64));
1685 *data += 2; 1685 *data += 2;
1686 } 1686 }
1687 } 1687 }
1688 1688
1689 /* 1689 /*
1690 * Return a version number to identify the type of adapter. The scheme is: 1690 * Return a version number to identify the type of adapter. The scheme is:
1691 * - bits 0..9: chip version 1691 * - bits 0..9: chip version
1692 * - bits 10..15: chip revision 1692 * - bits 10..15: chip revision
1693 * - bits 16..23: register dump version 1693 * - bits 16..23: register dump version
1694 */ 1694 */
1695 static inline unsigned int mk_adap_vers(const struct adapter *ap) 1695 static inline unsigned int mk_adap_vers(const struct adapter *ap)
1696 { 1696 {
1697 return CHELSIO_CHIP_VERSION(ap->params.chip) | 1697 return CHELSIO_CHIP_VERSION(ap->params.chip) |
1698 (CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16); 1698 (CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16);
1699 } 1699 }
1700 1700
1701 static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start, 1701 static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1702 unsigned int end) 1702 unsigned int end)
1703 { 1703 {
1704 u32 *p = buf + start; 1704 u32 *p = buf + start;
1705 1705
1706 for ( ; start <= end; start += sizeof(u32)) 1706 for ( ; start <= end; start += sizeof(u32))
1707 *p++ = t4_read_reg(ap, start); 1707 *p++ = t4_read_reg(ap, start);
1708 } 1708 }
1709 1709
1710 static void get_regs(struct net_device *dev, struct ethtool_regs *regs, 1710 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1711 void *buf) 1711 void *buf)
1712 { 1712 {
1713 static const unsigned int t4_reg_ranges[] = { 1713 static const unsigned int t4_reg_ranges[] = {
1714 0x1008, 0x1108, 1714 0x1008, 0x1108,
1715 0x1180, 0x11b4, 1715 0x1180, 0x11b4,
1716 0x11fc, 0x123c, 1716 0x11fc, 0x123c,
1717 0x1300, 0x173c, 1717 0x1300, 0x173c,
1718 0x1800, 0x18fc, 1718 0x1800, 0x18fc,
1719 0x3000, 0x30d8, 1719 0x3000, 0x30d8,
1720 0x30e0, 0x5924, 1720 0x30e0, 0x5924,
1721 0x5960, 0x59d4, 1721 0x5960, 0x59d4,
1722 0x5a00, 0x5af8, 1722 0x5a00, 0x5af8,
1723 0x6000, 0x6098, 1723 0x6000, 0x6098,
1724 0x6100, 0x6150, 1724 0x6100, 0x6150,
1725 0x6200, 0x6208, 1725 0x6200, 0x6208,
1726 0x6240, 0x6248, 1726 0x6240, 0x6248,
1727 0x6280, 0x6338, 1727 0x6280, 0x6338,
1728 0x6370, 0x638c, 1728 0x6370, 0x638c,
1729 0x6400, 0x643c, 1729 0x6400, 0x643c,
1730 0x6500, 0x6524, 1730 0x6500, 0x6524,
1731 0x6a00, 0x6a38, 1731 0x6a00, 0x6a38,
1732 0x6a60, 0x6a78, 1732 0x6a60, 0x6a78,
1733 0x6b00, 0x6b84, 1733 0x6b00, 0x6b84,
1734 0x6bf0, 0x6c84, 1734 0x6bf0, 0x6c84,
1735 0x6cf0, 0x6d84, 1735 0x6cf0, 0x6d84,
1736 0x6df0, 0x6e84, 1736 0x6df0, 0x6e84,
1737 0x6ef0, 0x6f84, 1737 0x6ef0, 0x6f84,
1738 0x6ff0, 0x7084, 1738 0x6ff0, 0x7084,
1739 0x70f0, 0x7184, 1739 0x70f0, 0x7184,
1740 0x71f0, 0x7284, 1740 0x71f0, 0x7284,
1741 0x72f0, 0x7384, 1741 0x72f0, 0x7384,
1742 0x73f0, 0x7450, 1742 0x73f0, 0x7450,
1743 0x7500, 0x7530, 1743 0x7500, 0x7530,
1744 0x7600, 0x761c, 1744 0x7600, 0x761c,
1745 0x7680, 0x76cc, 1745 0x7680, 0x76cc,
1746 0x7700, 0x7798, 1746 0x7700, 0x7798,
1747 0x77c0, 0x77fc, 1747 0x77c0, 0x77fc,
1748 0x7900, 0x79fc, 1748 0x7900, 0x79fc,
1749 0x7b00, 0x7c38, 1749 0x7b00, 0x7c38,
1750 0x7d00, 0x7efc, 1750 0x7d00, 0x7efc,
1751 0x8dc0, 0x8e1c, 1751 0x8dc0, 0x8e1c,
1752 0x8e30, 0x8e78, 1752 0x8e30, 0x8e78,
1753 0x8ea0, 0x8f6c, 1753 0x8ea0, 0x8f6c,
1754 0x8fc0, 0x9074, 1754 0x8fc0, 0x9074,
1755 0x90fc, 0x90fc, 1755 0x90fc, 0x90fc,
1756 0x9400, 0x9458, 1756 0x9400, 0x9458,
1757 0x9600, 0x96bc, 1757 0x9600, 0x96bc,
1758 0x9800, 0x9808, 1758 0x9800, 0x9808,
1759 0x9820, 0x983c, 1759 0x9820, 0x983c,
1760 0x9850, 0x9864, 1760 0x9850, 0x9864,
1761 0x9c00, 0x9c6c, 1761 0x9c00, 0x9c6c,
1762 0x9c80, 0x9cec, 1762 0x9c80, 0x9cec,
1763 0x9d00, 0x9d6c, 1763 0x9d00, 0x9d6c,
1764 0x9d80, 0x9dec, 1764 0x9d80, 0x9dec,
1765 0x9e00, 0x9e6c, 1765 0x9e00, 0x9e6c,
1766 0x9e80, 0x9eec, 1766 0x9e80, 0x9eec,
1767 0x9f00, 0x9f6c, 1767 0x9f00, 0x9f6c,
1768 0x9f80, 0x9fec, 1768 0x9f80, 0x9fec,
1769 0xd004, 0xd03c, 1769 0xd004, 0xd03c,
1770 0xdfc0, 0xdfe0, 1770 0xdfc0, 0xdfe0,
1771 0xe000, 0xea7c, 1771 0xe000, 0xea7c,
1772 0xf000, 0x11110, 1772 0xf000, 0x11110,
1773 0x11118, 0x11190, 1773 0x11118, 0x11190,
1774 0x19040, 0x1906c, 1774 0x19040, 0x1906c,
1775 0x19078, 0x19080, 1775 0x19078, 0x19080,
1776 0x1908c, 0x19124, 1776 0x1908c, 0x19124,
1777 0x19150, 0x191b0, 1777 0x19150, 0x191b0,
1778 0x191d0, 0x191e8, 1778 0x191d0, 0x191e8,
1779 0x19238, 0x1924c, 1779 0x19238, 0x1924c,
1780 0x193f8, 0x19474, 1780 0x193f8, 0x19474,
1781 0x19490, 0x194f8, 1781 0x19490, 0x194f8,
1782 0x19800, 0x19f30, 1782 0x19800, 0x19f30,
1783 0x1a000, 0x1a06c, 1783 0x1a000, 0x1a06c,
1784 0x1a0b0, 0x1a120, 1784 0x1a0b0, 0x1a120,
1785 0x1a128, 0x1a138, 1785 0x1a128, 0x1a138,
1786 0x1a190, 0x1a1c4, 1786 0x1a190, 0x1a1c4,
1787 0x1a1fc, 0x1a1fc, 1787 0x1a1fc, 0x1a1fc,
1788 0x1e040, 0x1e04c, 1788 0x1e040, 0x1e04c,
1789 0x1e284, 0x1e28c, 1789 0x1e284, 0x1e28c,
1790 0x1e2c0, 0x1e2c0, 1790 0x1e2c0, 0x1e2c0,
1791 0x1e2e0, 0x1e2e0, 1791 0x1e2e0, 0x1e2e0,
1792 0x1e300, 0x1e384, 1792 0x1e300, 0x1e384,
1793 0x1e3c0, 0x1e3c8, 1793 0x1e3c0, 0x1e3c8,
1794 0x1e440, 0x1e44c, 1794 0x1e440, 0x1e44c,
1795 0x1e684, 0x1e68c, 1795 0x1e684, 0x1e68c,
1796 0x1e6c0, 0x1e6c0, 1796 0x1e6c0, 0x1e6c0,
1797 0x1e6e0, 0x1e6e0, 1797 0x1e6e0, 0x1e6e0,
1798 0x1e700, 0x1e784, 1798 0x1e700, 0x1e784,
1799 0x1e7c0, 0x1e7c8, 1799 0x1e7c0, 0x1e7c8,
1800 0x1e840, 0x1e84c, 1800 0x1e840, 0x1e84c,
1801 0x1ea84, 0x1ea8c, 1801 0x1ea84, 0x1ea8c,
1802 0x1eac0, 0x1eac0, 1802 0x1eac0, 0x1eac0,
1803 0x1eae0, 0x1eae0, 1803 0x1eae0, 0x1eae0,
1804 0x1eb00, 0x1eb84, 1804 0x1eb00, 0x1eb84,
1805 0x1ebc0, 0x1ebc8, 1805 0x1ebc0, 0x1ebc8,
1806 0x1ec40, 0x1ec4c, 1806 0x1ec40, 0x1ec4c,
1807 0x1ee84, 0x1ee8c, 1807 0x1ee84, 0x1ee8c,
1808 0x1eec0, 0x1eec0, 1808 0x1eec0, 0x1eec0,
1809 0x1eee0, 0x1eee0, 1809 0x1eee0, 0x1eee0,
1810 0x1ef00, 0x1ef84, 1810 0x1ef00, 0x1ef84,
1811 0x1efc0, 0x1efc8, 1811 0x1efc0, 0x1efc8,
1812 0x1f040, 0x1f04c, 1812 0x1f040, 0x1f04c,
1813 0x1f284, 0x1f28c, 1813 0x1f284, 0x1f28c,
1814 0x1f2c0, 0x1f2c0, 1814 0x1f2c0, 0x1f2c0,
1815 0x1f2e0, 0x1f2e0, 1815 0x1f2e0, 0x1f2e0,
1816 0x1f300, 0x1f384, 1816 0x1f300, 0x1f384,
1817 0x1f3c0, 0x1f3c8, 1817 0x1f3c0, 0x1f3c8,
1818 0x1f440, 0x1f44c, 1818 0x1f440, 0x1f44c,
1819 0x1f684, 0x1f68c, 1819 0x1f684, 0x1f68c,
1820 0x1f6c0, 0x1f6c0, 1820 0x1f6c0, 0x1f6c0,
1821 0x1f6e0, 0x1f6e0, 1821 0x1f6e0, 0x1f6e0,
1822 0x1f700, 0x1f784, 1822 0x1f700, 0x1f784,
1823 0x1f7c0, 0x1f7c8, 1823 0x1f7c0, 0x1f7c8,
1824 0x1f840, 0x1f84c, 1824 0x1f840, 0x1f84c,
1825 0x1fa84, 0x1fa8c, 1825 0x1fa84, 0x1fa8c,
1826 0x1fac0, 0x1fac0, 1826 0x1fac0, 0x1fac0,
1827 0x1fae0, 0x1fae0, 1827 0x1fae0, 0x1fae0,
1828 0x1fb00, 0x1fb84, 1828 0x1fb00, 0x1fb84,
1829 0x1fbc0, 0x1fbc8, 1829 0x1fbc0, 0x1fbc8,
1830 0x1fc40, 0x1fc4c, 1830 0x1fc40, 0x1fc4c,
1831 0x1fe84, 0x1fe8c, 1831 0x1fe84, 0x1fe8c,
1832 0x1fec0, 0x1fec0, 1832 0x1fec0, 0x1fec0,
1833 0x1fee0, 0x1fee0, 1833 0x1fee0, 0x1fee0,
1834 0x1ff00, 0x1ff84, 1834 0x1ff00, 0x1ff84,
1835 0x1ffc0, 0x1ffc8, 1835 0x1ffc0, 0x1ffc8,
1836 0x20000, 0x2002c, 1836 0x20000, 0x2002c,
1837 0x20100, 0x2013c, 1837 0x20100, 0x2013c,
1838 0x20190, 0x201c8, 1838 0x20190, 0x201c8,
1839 0x20200, 0x20318, 1839 0x20200, 0x20318,
1840 0x20400, 0x20528, 1840 0x20400, 0x20528,
1841 0x20540, 0x20614, 1841 0x20540, 0x20614,
1842 0x21000, 0x21040, 1842 0x21000, 0x21040,
1843 0x2104c, 0x21060, 1843 0x2104c, 0x21060,
1844 0x210c0, 0x210ec, 1844 0x210c0, 0x210ec,
1845 0x21200, 0x21268, 1845 0x21200, 0x21268,
1846 0x21270, 0x21284, 1846 0x21270, 0x21284,
1847 0x212fc, 0x21388, 1847 0x212fc, 0x21388,
1848 0x21400, 0x21404, 1848 0x21400, 0x21404,
1849 0x21500, 0x21518, 1849 0x21500, 0x21518,
1850 0x2152c, 0x2153c, 1850 0x2152c, 0x2153c,
1851 0x21550, 0x21554, 1851 0x21550, 0x21554,
1852 0x21600, 0x21600, 1852 0x21600, 0x21600,
1853 0x21608, 0x21628, 1853 0x21608, 0x21628,
1854 0x21630, 0x2163c, 1854 0x21630, 0x2163c,
1855 0x21700, 0x2171c, 1855 0x21700, 0x2171c,
1856 0x21780, 0x2178c, 1856 0x21780, 0x2178c,
1857 0x21800, 0x21c38, 1857 0x21800, 0x21c38,
1858 0x21c80, 0x21d7c, 1858 0x21c80, 0x21d7c,
1859 0x21e00, 0x21e04, 1859 0x21e00, 0x21e04,
1860 0x22000, 0x2202c, 1860 0x22000, 0x2202c,
1861 0x22100, 0x2213c, 1861 0x22100, 0x2213c,
1862 0x22190, 0x221c8, 1862 0x22190, 0x221c8,
1863 0x22200, 0x22318, 1863 0x22200, 0x22318,
1864 0x22400, 0x22528, 1864 0x22400, 0x22528,
1865 0x22540, 0x22614, 1865 0x22540, 0x22614,
1866 0x23000, 0x23040, 1866 0x23000, 0x23040,
1867 0x2304c, 0x23060, 1867 0x2304c, 0x23060,
1868 0x230c0, 0x230ec, 1868 0x230c0, 0x230ec,
1869 0x23200, 0x23268, 1869 0x23200, 0x23268,
1870 0x23270, 0x23284, 1870 0x23270, 0x23284,
1871 0x232fc, 0x23388, 1871 0x232fc, 0x23388,
1872 0x23400, 0x23404, 1872 0x23400, 0x23404,
1873 0x23500, 0x23518, 1873 0x23500, 0x23518,
1874 0x2352c, 0x2353c, 1874 0x2352c, 0x2353c,
1875 0x23550, 0x23554, 1875 0x23550, 0x23554,
1876 0x23600, 0x23600, 1876 0x23600, 0x23600,
1877 0x23608, 0x23628, 1877 0x23608, 0x23628,
1878 0x23630, 0x2363c, 1878 0x23630, 0x2363c,
1879 0x23700, 0x2371c, 1879 0x23700, 0x2371c,
1880 0x23780, 0x2378c, 1880 0x23780, 0x2378c,
1881 0x23800, 0x23c38, 1881 0x23800, 0x23c38,
1882 0x23c80, 0x23d7c, 1882 0x23c80, 0x23d7c,
1883 0x23e00, 0x23e04, 1883 0x23e00, 0x23e04,
1884 0x24000, 0x2402c, 1884 0x24000, 0x2402c,
1885 0x24100, 0x2413c, 1885 0x24100, 0x2413c,
1886 0x24190, 0x241c8, 1886 0x24190, 0x241c8,
1887 0x24200, 0x24318, 1887 0x24200, 0x24318,
1888 0x24400, 0x24528, 1888 0x24400, 0x24528,
1889 0x24540, 0x24614, 1889 0x24540, 0x24614,
1890 0x25000, 0x25040, 1890 0x25000, 0x25040,
1891 0x2504c, 0x25060, 1891 0x2504c, 0x25060,
1892 0x250c0, 0x250ec, 1892 0x250c0, 0x250ec,
1893 0x25200, 0x25268, 1893 0x25200, 0x25268,
1894 0x25270, 0x25284, 1894 0x25270, 0x25284,
1895 0x252fc, 0x25388, 1895 0x252fc, 0x25388,
1896 0x25400, 0x25404, 1896 0x25400, 0x25404,
1897 0x25500, 0x25518, 1897 0x25500, 0x25518,
1898 0x2552c, 0x2553c, 1898 0x2552c, 0x2553c,
1899 0x25550, 0x25554, 1899 0x25550, 0x25554,
1900 0x25600, 0x25600, 1900 0x25600, 0x25600,
1901 0x25608, 0x25628, 1901 0x25608, 0x25628,
1902 0x25630, 0x2563c, 1902 0x25630, 0x2563c,
1903 0x25700, 0x2571c, 1903 0x25700, 0x2571c,
1904 0x25780, 0x2578c, 1904 0x25780, 0x2578c,
1905 0x25800, 0x25c38, 1905 0x25800, 0x25c38,
1906 0x25c80, 0x25d7c, 1906 0x25c80, 0x25d7c,
1907 0x25e00, 0x25e04, 1907 0x25e00, 0x25e04,
1908 0x26000, 0x2602c, 1908 0x26000, 0x2602c,
1909 0x26100, 0x2613c, 1909 0x26100, 0x2613c,
1910 0x26190, 0x261c8, 1910 0x26190, 0x261c8,
1911 0x26200, 0x26318, 1911 0x26200, 0x26318,
1912 0x26400, 0x26528, 1912 0x26400, 0x26528,
1913 0x26540, 0x26614, 1913 0x26540, 0x26614,
1914 0x27000, 0x27040, 1914 0x27000, 0x27040,
1915 0x2704c, 0x27060, 1915 0x2704c, 0x27060,
1916 0x270c0, 0x270ec, 1916 0x270c0, 0x270ec,
1917 0x27200, 0x27268, 1917 0x27200, 0x27268,
1918 0x27270, 0x27284, 1918 0x27270, 0x27284,
1919 0x272fc, 0x27388, 1919 0x272fc, 0x27388,
1920 0x27400, 0x27404, 1920 0x27400, 0x27404,
1921 0x27500, 0x27518, 1921 0x27500, 0x27518,
1922 0x2752c, 0x2753c, 1922 0x2752c, 0x2753c,
1923 0x27550, 0x27554, 1923 0x27550, 0x27554,
1924 0x27600, 0x27600, 1924 0x27600, 0x27600,
1925 0x27608, 0x27628, 1925 0x27608, 0x27628,
1926 0x27630, 0x2763c, 1926 0x27630, 0x2763c,
1927 0x27700, 0x2771c, 1927 0x27700, 0x2771c,
1928 0x27780, 0x2778c, 1928 0x27780, 0x2778c,
1929 0x27800, 0x27c38, 1929 0x27800, 0x27c38,
1930 0x27c80, 0x27d7c, 1930 0x27c80, 0x27d7c,
1931 0x27e00, 0x27e04 1931 0x27e00, 0x27e04
1932 }; 1932 };
1933 1933
1934 static const unsigned int t5_reg_ranges[] = { 1934 static const unsigned int t5_reg_ranges[] = {
1935 0x1008, 0x1148, 1935 0x1008, 0x1148,
1936 0x1180, 0x11b4, 1936 0x1180, 0x11b4,
1937 0x11fc, 0x123c, 1937 0x11fc, 0x123c,
1938 0x1280, 0x173c, 1938 0x1280, 0x173c,
1939 0x1800, 0x18fc, 1939 0x1800, 0x18fc,
1940 0x3000, 0x3028, 1940 0x3000, 0x3028,
1941 0x3060, 0x30d8, 1941 0x3060, 0x30d8,
1942 0x30e0, 0x30fc, 1942 0x30e0, 0x30fc,
1943 0x3140, 0x357c, 1943 0x3140, 0x357c,
1944 0x35a8, 0x35cc, 1944 0x35a8, 0x35cc,
1945 0x35ec, 0x35ec, 1945 0x35ec, 0x35ec,
1946 0x3600, 0x5624, 1946 0x3600, 0x5624,
1947 0x56cc, 0x575c, 1947 0x56cc, 0x575c,
1948 0x580c, 0x5814, 1948 0x580c, 0x5814,
1949 0x5890, 0x58bc, 1949 0x5890, 0x58bc,
1950 0x5940, 0x59dc, 1950 0x5940, 0x59dc,
1951 0x59fc, 0x5a18, 1951 0x59fc, 0x5a18,
1952 0x5a60, 0x5a9c, 1952 0x5a60, 0x5a9c,
1953 0x5b9c, 0x5bfc, 1953 0x5b9c, 0x5bfc,
1954 0x6000, 0x6040, 1954 0x6000, 0x6040,
1955 0x6058, 0x614c, 1955 0x6058, 0x614c,
1956 0x7700, 0x7798, 1956 0x7700, 0x7798,
1957 0x77c0, 0x78fc, 1957 0x77c0, 0x78fc,
1958 0x7b00, 0x7c54, 1958 0x7b00, 0x7c54,
1959 0x7d00, 0x7efc, 1959 0x7d00, 0x7efc,
1960 0x8dc0, 0x8de0, 1960 0x8dc0, 0x8de0,
1961 0x8df8, 0x8e84, 1961 0x8df8, 0x8e84,
1962 0x8ea0, 0x8f84, 1962 0x8ea0, 0x8f84,
1963 0x8fc0, 0x90f8, 1963 0x8fc0, 0x90f8,
1964 0x9400, 0x9470, 1964 0x9400, 0x9470,
1965 0x9600, 0x96f4, 1965 0x9600, 0x96f4,
1966 0x9800, 0x9808, 1966 0x9800, 0x9808,
1967 0x9820, 0x983c, 1967 0x9820, 0x983c,
1968 0x9850, 0x9864, 1968 0x9850, 0x9864,
1969 0x9c00, 0x9c6c, 1969 0x9c00, 0x9c6c,
1970 0x9c80, 0x9cec, 1970 0x9c80, 0x9cec,
1971 0x9d00, 0x9d6c, 1971 0x9d00, 0x9d6c,
1972 0x9d80, 0x9dec, 1972 0x9d80, 0x9dec,
1973 0x9e00, 0x9e6c, 1973 0x9e00, 0x9e6c,
1974 0x9e80, 0x9eec, 1974 0x9e80, 0x9eec,
1975 0x9f00, 0x9f6c, 1975 0x9f00, 0x9f6c,
1976 0x9f80, 0xa020, 1976 0x9f80, 0xa020,
1977 0xd004, 0xd03c, 1977 0xd004, 0xd03c,
1978 0xdfc0, 0xdfe0, 1978 0xdfc0, 0xdfe0,
1979 0xe000, 0x11088, 1979 0xe000, 0x11088,
1980 0x1109c, 0x11110, 1980 0x1109c, 0x11110,
1981 0x11118, 0x1117c, 1981 0x11118, 0x1117c,
1982 0x11190, 0x11204, 1982 0x11190, 0x11204,
1983 0x19040, 0x1906c, 1983 0x19040, 0x1906c,
1984 0x19078, 0x19080, 1984 0x19078, 0x19080,
1985 0x1908c, 0x19124, 1985 0x1908c, 0x19124,
1986 0x19150, 0x191b0, 1986 0x19150, 0x191b0,
1987 0x191d0, 0x191e8, 1987 0x191d0, 0x191e8,
1988 0x19238, 0x19290, 1988 0x19238, 0x19290,
1989 0x193f8, 0x19474, 1989 0x193f8, 0x19474,
1990 0x19490, 0x194cc, 1990 0x19490, 0x194cc,
1991 0x194f0, 0x194f8, 1991 0x194f0, 0x194f8,
1992 0x19c00, 0x19c60, 1992 0x19c00, 0x19c60,
1993 0x19c94, 0x19e10, 1993 0x19c94, 0x19e10,
1994 0x19e50, 0x19f34, 1994 0x19e50, 0x19f34,
1995 0x19f40, 0x19f50, 1995 0x19f40, 0x19f50,
1996 0x19f90, 0x19fe4, 1996 0x19f90, 0x19fe4,
1997 0x1a000, 0x1a06c, 1997 0x1a000, 0x1a06c,
1998 0x1a0b0, 0x1a120, 1998 0x1a0b0, 0x1a120,
1999 0x1a128, 0x1a138, 1999 0x1a128, 0x1a138,
2000 0x1a190, 0x1a1c4, 2000 0x1a190, 0x1a1c4,
2001 0x1a1fc, 0x1a1fc, 2001 0x1a1fc, 0x1a1fc,
2002 0x1e008, 0x1e00c, 2002 0x1e008, 0x1e00c,
2003 0x1e040, 0x1e04c, 2003 0x1e040, 0x1e04c,
2004 0x1e284, 0x1e290, 2004 0x1e284, 0x1e290,
2005 0x1e2c0, 0x1e2c0, 2005 0x1e2c0, 0x1e2c0,
2006 0x1e2e0, 0x1e2e0, 2006 0x1e2e0, 0x1e2e0,
2007 0x1e300, 0x1e384, 2007 0x1e300, 0x1e384,
2008 0x1e3c0, 0x1e3c8, 2008 0x1e3c0, 0x1e3c8,
2009 0x1e408, 0x1e40c, 2009 0x1e408, 0x1e40c,
2010 0x1e440, 0x1e44c, 2010 0x1e440, 0x1e44c,
2011 0x1e684, 0x1e690, 2011 0x1e684, 0x1e690,
2012 0x1e6c0, 0x1e6c0, 2012 0x1e6c0, 0x1e6c0,
2013 0x1e6e0, 0x1e6e0, 2013 0x1e6e0, 0x1e6e0,
2014 0x1e700, 0x1e784, 2014 0x1e700, 0x1e784,
2015 0x1e7c0, 0x1e7c8, 2015 0x1e7c0, 0x1e7c8,
2016 0x1e808, 0x1e80c, 2016 0x1e808, 0x1e80c,
2017 0x1e840, 0x1e84c, 2017 0x1e840, 0x1e84c,
2018 0x1ea84, 0x1ea90, 2018 0x1ea84, 0x1ea90,
2019 0x1eac0, 0x1eac0, 2019 0x1eac0, 0x1eac0,
2020 0x1eae0, 0x1eae0, 2020 0x1eae0, 0x1eae0,
2021 0x1eb00, 0x1eb84, 2021 0x1eb00, 0x1eb84,
2022 0x1ebc0, 0x1ebc8, 2022 0x1ebc0, 0x1ebc8,
2023 0x1ec08, 0x1ec0c, 2023 0x1ec08, 0x1ec0c,
2024 0x1ec40, 0x1ec4c, 2024 0x1ec40, 0x1ec4c,
2025 0x1ee84, 0x1ee90, 2025 0x1ee84, 0x1ee90,
2026 0x1eec0, 0x1eec0, 2026 0x1eec0, 0x1eec0,
2027 0x1eee0, 0x1eee0, 2027 0x1eee0, 0x1eee0,
2028 0x1ef00, 0x1ef84, 2028 0x1ef00, 0x1ef84,
2029 0x1efc0, 0x1efc8, 2029 0x1efc0, 0x1efc8,
2030 0x1f008, 0x1f00c, 2030 0x1f008, 0x1f00c,
2031 0x1f040, 0x1f04c, 2031 0x1f040, 0x1f04c,
2032 0x1f284, 0x1f290, 2032 0x1f284, 0x1f290,
2033 0x1f2c0, 0x1f2c0, 2033 0x1f2c0, 0x1f2c0,
2034 0x1f2e0, 0x1f2e0, 2034 0x1f2e0, 0x1f2e0,
2035 0x1f300, 0x1f384, 2035 0x1f300, 0x1f384,
2036 0x1f3c0, 0x1f3c8, 2036 0x1f3c0, 0x1f3c8,
2037 0x1f408, 0x1f40c, 2037 0x1f408, 0x1f40c,
2038 0x1f440, 0x1f44c, 2038 0x1f440, 0x1f44c,
2039 0x1f684, 0x1f690, 2039 0x1f684, 0x1f690,
2040 0x1f6c0, 0x1f6c0, 2040 0x1f6c0, 0x1f6c0,
2041 0x1f6e0, 0x1f6e0, 2041 0x1f6e0, 0x1f6e0,
2042 0x1f700, 0x1f784, 2042 0x1f700, 0x1f784,
2043 0x1f7c0, 0x1f7c8, 2043 0x1f7c0, 0x1f7c8,
2044 0x1f808, 0x1f80c, 2044 0x1f808, 0x1f80c,
2045 0x1f840, 0x1f84c, 2045 0x1f840, 0x1f84c,
2046 0x1fa84, 0x1fa90, 2046 0x1fa84, 0x1fa90,
2047 0x1fac0, 0x1fac0, 2047 0x1fac0, 0x1fac0,
2048 0x1fae0, 0x1fae0, 2048 0x1fae0, 0x1fae0,
2049 0x1fb00, 0x1fb84, 2049 0x1fb00, 0x1fb84,
2050 0x1fbc0, 0x1fbc8, 2050 0x1fbc0, 0x1fbc8,
2051 0x1fc08, 0x1fc0c, 2051 0x1fc08, 0x1fc0c,
2052 0x1fc40, 0x1fc4c, 2052 0x1fc40, 0x1fc4c,
2053 0x1fe84, 0x1fe90, 2053 0x1fe84, 0x1fe90,
2054 0x1fec0, 0x1fec0, 2054 0x1fec0, 0x1fec0,
2055 0x1fee0, 0x1fee0, 2055 0x1fee0, 0x1fee0,
2056 0x1ff00, 0x1ff84, 2056 0x1ff00, 0x1ff84,
2057 0x1ffc0, 0x1ffc8, 2057 0x1ffc0, 0x1ffc8,
2058 0x30000, 0x30030, 2058 0x30000, 0x30030,
2059 0x30100, 0x30144, 2059 0x30100, 0x30144,
2060 0x30190, 0x301d0, 2060 0x30190, 0x301d0,
2061 0x30200, 0x30318, 2061 0x30200, 0x30318,
2062 0x30400, 0x3052c, 2062 0x30400, 0x3052c,
2063 0x30540, 0x3061c, 2063 0x30540, 0x3061c,
2064 0x30800, 0x30834, 2064 0x30800, 0x30834,
2065 0x308c0, 0x30908, 2065 0x308c0, 0x30908,
2066 0x30910, 0x309ac, 2066 0x30910, 0x309ac,
2067 0x30a00, 0x30a04, 2067 0x30a00, 0x30a04,
2068 0x30a0c, 0x30a2c, 2068 0x30a0c, 0x30a2c,
2069 0x30a44, 0x30a50, 2069 0x30a44, 0x30a50,
2070 0x30a74, 0x30c24, 2070 0x30a74, 0x30c24,
2071 0x30d08, 0x30d14, 2071 0x30d08, 0x30d14,
2072 0x30d1c, 0x30d20, 2072 0x30d1c, 0x30d20,
2073 0x30d3c, 0x30d50, 2073 0x30d3c, 0x30d50,
2074 0x31200, 0x3120c, 2074 0x31200, 0x3120c,
2075 0x31220, 0x31220, 2075 0x31220, 0x31220,
2076 0x31240, 0x31240, 2076 0x31240, 0x31240,
2077 0x31600, 0x31600, 2077 0x31600, 0x31600,
2078 0x31608, 0x3160c, 2078 0x31608, 0x3160c,
2079 0x31a00, 0x31a1c, 2079 0x31a00, 0x31a1c,
2080 0x31e04, 0x31e20, 2080 0x31e04, 0x31e20,
2081 0x31e38, 0x31e3c, 2081 0x31e38, 0x31e3c,
2082 0x31e80, 0x31e80, 2082 0x31e80, 0x31e80,
2083 0x31e88, 0x31ea8, 2083 0x31e88, 0x31ea8,
2084 0x31eb0, 0x31eb4, 2084 0x31eb0, 0x31eb4,
2085 0x31ec8, 0x31ed4, 2085 0x31ec8, 0x31ed4,
2086 0x31fb8, 0x32004, 2086 0x31fb8, 0x32004,
2087 0x32208, 0x3223c, 2087 0x32208, 0x3223c,
2088 0x32600, 0x32630, 2088 0x32600, 0x32630,
2089 0x32a00, 0x32abc, 2089 0x32a00, 0x32abc,
2090 0x32b00, 0x32b70, 2090 0x32b00, 0x32b70,
2091 0x33000, 0x33048, 2091 0x33000, 0x33048,
2092 0x33060, 0x3309c, 2092 0x33060, 0x3309c,
2093 0x330f0, 0x33148, 2093 0x330f0, 0x33148,
2094 0x33160, 0x3319c, 2094 0x33160, 0x3319c,
2095 0x331f0, 0x332e4, 2095 0x331f0, 0x332e4,
2096 0x332f8, 0x333e4, 2096 0x332f8, 0x333e4,
2097 0x333f8, 0x33448, 2097 0x333f8, 0x33448,
2098 0x33460, 0x3349c, 2098 0x33460, 0x3349c,
2099 0x334f0, 0x33548, 2099 0x334f0, 0x33548,
2100 0x33560, 0x3359c, 2100 0x33560, 0x3359c,
2101 0x335f0, 0x336e4, 2101 0x335f0, 0x336e4,
2102 0x336f8, 0x337e4, 2102 0x336f8, 0x337e4,
2103 0x337f8, 0x337fc, 2103 0x337f8, 0x337fc,
2104 0x33814, 0x33814, 2104 0x33814, 0x33814,
2105 0x3382c, 0x3382c, 2105 0x3382c, 0x3382c,
2106 0x33880, 0x3388c, 2106 0x33880, 0x3388c,
2107 0x338e8, 0x338ec, 2107 0x338e8, 0x338ec,
2108 0x33900, 0x33948, 2108 0x33900, 0x33948,
2109 0x33960, 0x3399c, 2109 0x33960, 0x3399c,
2110 0x339f0, 0x33ae4, 2110 0x339f0, 0x33ae4,
2111 0x33af8, 0x33b10, 2111 0x33af8, 0x33b10,
2112 0x33b28, 0x33b28, 2112 0x33b28, 0x33b28,
2113 0x33b3c, 0x33b50, 2113 0x33b3c, 0x33b50,
2114 0x33bf0, 0x33c10, 2114 0x33bf0, 0x33c10,
2115 0x33c28, 0x33c28, 2115 0x33c28, 0x33c28,
2116 0x33c3c, 0x33c50, 2116 0x33c3c, 0x33c50,
2117 0x33cf0, 0x33cfc, 2117 0x33cf0, 0x33cfc,
2118 0x34000, 0x34030, 2118 0x34000, 0x34030,
2119 0x34100, 0x34144, 2119 0x34100, 0x34144,
2120 0x34190, 0x341d0, 2120 0x34190, 0x341d0,
2121 0x34200, 0x34318, 2121 0x34200, 0x34318,
2122 0x34400, 0x3452c, 2122 0x34400, 0x3452c,
2123 0x34540, 0x3461c, 2123 0x34540, 0x3461c,
2124 0x34800, 0x34834, 2124 0x34800, 0x34834,
2125 0x348c0, 0x34908, 2125 0x348c0, 0x34908,
2126 0x34910, 0x349ac, 2126 0x34910, 0x349ac,
2127 0x34a00, 0x34a04, 2127 0x34a00, 0x34a04,
2128 0x34a0c, 0x34a2c, 2128 0x34a0c, 0x34a2c,
2129 0x34a44, 0x34a50, 2129 0x34a44, 0x34a50,
2130 0x34a74, 0x34c24, 2130 0x34a74, 0x34c24,
2131 0x34d08, 0x34d14, 2131 0x34d08, 0x34d14,
2132 0x34d1c, 0x34d20, 2132 0x34d1c, 0x34d20,
2133 0x34d3c, 0x34d50, 2133 0x34d3c, 0x34d50,
2134 0x35200, 0x3520c, 2134 0x35200, 0x3520c,
2135 0x35220, 0x35220, 2135 0x35220, 0x35220,
2136 0x35240, 0x35240, 2136 0x35240, 0x35240,
2137 0x35600, 0x35600, 2137 0x35600, 0x35600,
2138 0x35608, 0x3560c, 2138 0x35608, 0x3560c,
2139 0x35a00, 0x35a1c, 2139 0x35a00, 0x35a1c,
2140 0x35e04, 0x35e20, 2140 0x35e04, 0x35e20,
2141 0x35e38, 0x35e3c, 2141 0x35e38, 0x35e3c,
2142 0x35e80, 0x35e80, 2142 0x35e80, 0x35e80,
2143 0x35e88, 0x35ea8, 2143 0x35e88, 0x35ea8,
2144 0x35eb0, 0x35eb4, 2144 0x35eb0, 0x35eb4,
2145 0x35ec8, 0x35ed4, 2145 0x35ec8, 0x35ed4,
2146 0x35fb8, 0x36004, 2146 0x35fb8, 0x36004,
2147 0x36208, 0x3623c, 2147 0x36208, 0x3623c,
2148 0x36600, 0x36630, 2148 0x36600, 0x36630,
2149 0x36a00, 0x36abc, 2149 0x36a00, 0x36abc,
2150 0x36b00, 0x36b70, 2150 0x36b00, 0x36b70,
2151 0x37000, 0x37048, 2151 0x37000, 0x37048,
2152 0x37060, 0x3709c, 2152 0x37060, 0x3709c,
2153 0x370f0, 0x37148, 2153 0x370f0, 0x37148,
2154 0x37160, 0x3719c, 2154 0x37160, 0x3719c,
2155 0x371f0, 0x372e4, 2155 0x371f0, 0x372e4,
2156 0x372f8, 0x373e4, 2156 0x372f8, 0x373e4,
2157 0x373f8, 0x37448, 2157 0x373f8, 0x37448,
2158 0x37460, 0x3749c, 2158 0x37460, 0x3749c,
2159 0x374f0, 0x37548, 2159 0x374f0, 0x37548,
2160 0x37560, 0x3759c, 2160 0x37560, 0x3759c,
2161 0x375f0, 0x376e4, 2161 0x375f0, 0x376e4,
2162 0x376f8, 0x377e4, 2162 0x376f8, 0x377e4,
2163 0x377f8, 0x377fc, 2163 0x377f8, 0x377fc,
2164 0x37814, 0x37814, 2164 0x37814, 0x37814,
2165 0x3782c, 0x3782c, 2165 0x3782c, 0x3782c,
2166 0x37880, 0x3788c, 2166 0x37880, 0x3788c,
2167 0x378e8, 0x378ec, 2167 0x378e8, 0x378ec,
2168 0x37900, 0x37948, 2168 0x37900, 0x37948,
2169 0x37960, 0x3799c, 2169 0x37960, 0x3799c,
2170 0x379f0, 0x37ae4, 2170 0x379f0, 0x37ae4,
2171 0x37af8, 0x37b10, 2171 0x37af8, 0x37b10,
2172 0x37b28, 0x37b28, 2172 0x37b28, 0x37b28,
2173 0x37b3c, 0x37b50, 2173 0x37b3c, 0x37b50,
2174 0x37bf0, 0x37c10, 2174 0x37bf0, 0x37c10,
2175 0x37c28, 0x37c28, 2175 0x37c28, 0x37c28,
2176 0x37c3c, 0x37c50, 2176 0x37c3c, 0x37c50,
2177 0x37cf0, 0x37cfc, 2177 0x37cf0, 0x37cfc,
2178 0x38000, 0x38030, 2178 0x38000, 0x38030,
2179 0x38100, 0x38144, 2179 0x38100, 0x38144,
2180 0x38190, 0x381d0, 2180 0x38190, 0x381d0,
2181 0x38200, 0x38318, 2181 0x38200, 0x38318,
2182 0x38400, 0x3852c, 2182 0x38400, 0x3852c,
2183 0x38540, 0x3861c, 2183 0x38540, 0x3861c,
2184 0x38800, 0x38834, 2184 0x38800, 0x38834,
2185 0x388c0, 0x38908, 2185 0x388c0, 0x38908,
2186 0x38910, 0x389ac, 2186 0x38910, 0x389ac,
2187 0x38a00, 0x38a04, 2187 0x38a00, 0x38a04,
2188 0x38a0c, 0x38a2c, 2188 0x38a0c, 0x38a2c,
2189 0x38a44, 0x38a50, 2189 0x38a44, 0x38a50,
2190 0x38a74, 0x38c24, 2190 0x38a74, 0x38c24,
2191 0x38d08, 0x38d14, 2191 0x38d08, 0x38d14,
2192 0x38d1c, 0x38d20, 2192 0x38d1c, 0x38d20,
2193 0x38d3c, 0x38d50, 2193 0x38d3c, 0x38d50,
2194 0x39200, 0x3920c, 2194 0x39200, 0x3920c,
2195 0x39220, 0x39220, 2195 0x39220, 0x39220,
2196 0x39240, 0x39240, 2196 0x39240, 0x39240,
2197 0x39600, 0x39600, 2197 0x39600, 0x39600,
2198 0x39608, 0x3960c, 2198 0x39608, 0x3960c,
2199 0x39a00, 0x39a1c, 2199 0x39a00, 0x39a1c,
2200 0x39e04, 0x39e20, 2200 0x39e04, 0x39e20,
2201 0x39e38, 0x39e3c, 2201 0x39e38, 0x39e3c,
2202 0x39e80, 0x39e80, 2202 0x39e80, 0x39e80,
2203 0x39e88, 0x39ea8, 2203 0x39e88, 0x39ea8,
2204 0x39eb0, 0x39eb4, 2204 0x39eb0, 0x39eb4,
2205 0x39ec8, 0x39ed4, 2205 0x39ec8, 0x39ed4,
2206 0x39fb8, 0x3a004, 2206 0x39fb8, 0x3a004,
2207 0x3a208, 0x3a23c, 2207 0x3a208, 0x3a23c,
2208 0x3a600, 0x3a630, 2208 0x3a600, 0x3a630,
2209 0x3aa00, 0x3aabc, 2209 0x3aa00, 0x3aabc,
2210 0x3ab00, 0x3ab70, 2210 0x3ab00, 0x3ab70,
2211 0x3b000, 0x3b048, 2211 0x3b000, 0x3b048,
2212 0x3b060, 0x3b09c, 2212 0x3b060, 0x3b09c,
2213 0x3b0f0, 0x3b148, 2213 0x3b0f0, 0x3b148,
2214 0x3b160, 0x3b19c, 2214 0x3b160, 0x3b19c,
2215 0x3b1f0, 0x3b2e4, 2215 0x3b1f0, 0x3b2e4,
2216 0x3b2f8, 0x3b3e4, 2216 0x3b2f8, 0x3b3e4,
2217 0x3b3f8, 0x3b448, 2217 0x3b3f8, 0x3b448,
2218 0x3b460, 0x3b49c, 2218 0x3b460, 0x3b49c,
2219 0x3b4f0, 0x3b548, 2219 0x3b4f0, 0x3b548,
2220 0x3b560, 0x3b59c, 2220 0x3b560, 0x3b59c,
2221 0x3b5f0, 0x3b6e4, 2221 0x3b5f0, 0x3b6e4,
2222 0x3b6f8, 0x3b7e4, 2222 0x3b6f8, 0x3b7e4,
2223 0x3b7f8, 0x3b7fc, 2223 0x3b7f8, 0x3b7fc,
2224 0x3b814, 0x3b814, 2224 0x3b814, 0x3b814,
2225 0x3b82c, 0x3b82c, 2225 0x3b82c, 0x3b82c,
2226 0x3b880, 0x3b88c, 2226 0x3b880, 0x3b88c,
2227 0x3b8e8, 0x3b8ec, 2227 0x3b8e8, 0x3b8ec,
2228 0x3b900, 0x3b948, 2228 0x3b900, 0x3b948,
2229 0x3b960, 0x3b99c, 2229 0x3b960, 0x3b99c,
2230 0x3b9f0, 0x3bae4, 2230 0x3b9f0, 0x3bae4,
2231 0x3baf8, 0x3bb10, 2231 0x3baf8, 0x3bb10,
2232 0x3bb28, 0x3bb28, 2232 0x3bb28, 0x3bb28,
2233 0x3bb3c, 0x3bb50, 2233 0x3bb3c, 0x3bb50,
2234 0x3bbf0, 0x3bc10, 2234 0x3bbf0, 0x3bc10,
2235 0x3bc28, 0x3bc28, 2235 0x3bc28, 0x3bc28,
2236 0x3bc3c, 0x3bc50, 2236 0x3bc3c, 0x3bc50,
2237 0x3bcf0, 0x3bcfc, 2237 0x3bcf0, 0x3bcfc,
2238 0x3c000, 0x3c030, 2238 0x3c000, 0x3c030,
2239 0x3c100, 0x3c144, 2239 0x3c100, 0x3c144,
2240 0x3c190, 0x3c1d0, 2240 0x3c190, 0x3c1d0,
2241 0x3c200, 0x3c318, 2241 0x3c200, 0x3c318,
2242 0x3c400, 0x3c52c, 2242 0x3c400, 0x3c52c,
2243 0x3c540, 0x3c61c, 2243 0x3c540, 0x3c61c,
2244 0x3c800, 0x3c834, 2244 0x3c800, 0x3c834,
2245 0x3c8c0, 0x3c908, 2245 0x3c8c0, 0x3c908,
2246 0x3c910, 0x3c9ac, 2246 0x3c910, 0x3c9ac,
2247 0x3ca00, 0x3ca04, 2247 0x3ca00, 0x3ca04,
2248 0x3ca0c, 0x3ca2c, 2248 0x3ca0c, 0x3ca2c,
2249 0x3ca44, 0x3ca50, 2249 0x3ca44, 0x3ca50,
2250 0x3ca74, 0x3cc24, 2250 0x3ca74, 0x3cc24,
2251 0x3cd08, 0x3cd14, 2251 0x3cd08, 0x3cd14,
2252 0x3cd1c, 0x3cd20, 2252 0x3cd1c, 0x3cd20,
2253 0x3cd3c, 0x3cd50, 2253 0x3cd3c, 0x3cd50,
2254 0x3d200, 0x3d20c, 2254 0x3d200, 0x3d20c,
2255 0x3d220, 0x3d220, 2255 0x3d220, 0x3d220,
2256 0x3d240, 0x3d240, 2256 0x3d240, 0x3d240,
2257 0x3d600, 0x3d600, 2257 0x3d600, 0x3d600,
2258 0x3d608, 0x3d60c, 2258 0x3d608, 0x3d60c,
2259 0x3da00, 0x3da1c, 2259 0x3da00, 0x3da1c,
2260 0x3de04, 0x3de20, 2260 0x3de04, 0x3de20,
2261 0x3de38, 0x3de3c, 2261 0x3de38, 0x3de3c,
2262 0x3de80, 0x3de80, 2262 0x3de80, 0x3de80,
2263 0x3de88, 0x3dea8, 2263 0x3de88, 0x3dea8,
2264 0x3deb0, 0x3deb4, 2264 0x3deb0, 0x3deb4,
2265 0x3dec8, 0x3ded4, 2265 0x3dec8, 0x3ded4,
2266 0x3dfb8, 0x3e004, 2266 0x3dfb8, 0x3e004,
2267 0x3e208, 0x3e23c, 2267 0x3e208, 0x3e23c,
2268 0x3e600, 0x3e630, 2268 0x3e600, 0x3e630,
2269 0x3ea00, 0x3eabc, 2269 0x3ea00, 0x3eabc,
2270 0x3eb00, 0x3eb70, 2270 0x3eb00, 0x3eb70,
2271 0x3f000, 0x3f048, 2271 0x3f000, 0x3f048,
2272 0x3f060, 0x3f09c, 2272 0x3f060, 0x3f09c,
2273 0x3f0f0, 0x3f148, 2273 0x3f0f0, 0x3f148,
2274 0x3f160, 0x3f19c, 2274 0x3f160, 0x3f19c,
2275 0x3f1f0, 0x3f2e4, 2275 0x3f1f0, 0x3f2e4,
2276 0x3f2f8, 0x3f3e4, 2276 0x3f2f8, 0x3f3e4,
2277 0x3f3f8, 0x3f448, 2277 0x3f3f8, 0x3f448,
2278 0x3f460, 0x3f49c, 2278 0x3f460, 0x3f49c,
2279 0x3f4f0, 0x3f548, 2279 0x3f4f0, 0x3f548,
2280 0x3f560, 0x3f59c, 2280 0x3f560, 0x3f59c,
2281 0x3f5f0, 0x3f6e4, 2281 0x3f5f0, 0x3f6e4,
2282 0x3f6f8, 0x3f7e4, 2282 0x3f6f8, 0x3f7e4,
2283 0x3f7f8, 0x3f7fc, 2283 0x3f7f8, 0x3f7fc,
2284 0x3f814, 0x3f814, 2284 0x3f814, 0x3f814,
2285 0x3f82c, 0x3f82c, 2285 0x3f82c, 0x3f82c,
2286 0x3f880, 0x3f88c, 2286 0x3f880, 0x3f88c,
2287 0x3f8e8, 0x3f8ec, 2287 0x3f8e8, 0x3f8ec,
2288 0x3f900, 0x3f948, 2288 0x3f900, 0x3f948,
2289 0x3f960, 0x3f99c, 2289 0x3f960, 0x3f99c,
2290 0x3f9f0, 0x3fae4, 2290 0x3f9f0, 0x3fae4,
2291 0x3faf8, 0x3fb10, 2291 0x3faf8, 0x3fb10,
2292 0x3fb28, 0x3fb28, 2292 0x3fb28, 0x3fb28,
2293 0x3fb3c, 0x3fb50, 2293 0x3fb3c, 0x3fb50,
2294 0x3fbf0, 0x3fc10, 2294 0x3fbf0, 0x3fc10,
2295 0x3fc28, 0x3fc28, 2295 0x3fc28, 0x3fc28,
2296 0x3fc3c, 0x3fc50, 2296 0x3fc3c, 0x3fc50,
2297 0x3fcf0, 0x3fcfc, 2297 0x3fcf0, 0x3fcfc,
2298 0x40000, 0x4000c, 2298 0x40000, 0x4000c,
2299 0x40040, 0x40068, 2299 0x40040, 0x40068,
2300 0x40080, 0x40144, 2300 0x40080, 0x40144,
2301 0x40180, 0x4018c, 2301 0x40180, 0x4018c,
2302 0x40200, 0x40298, 2302 0x40200, 0x40298,
2303 0x402ac, 0x4033c, 2303 0x402ac, 0x4033c,
2304 0x403f8, 0x403fc, 2304 0x403f8, 0x403fc,
2305 0x41304, 0x413c4, 2305 0x41304, 0x413c4,
2306 0x41400, 0x4141c, 2306 0x41400, 0x4141c,
2307 0x41480, 0x414d0, 2307 0x41480, 0x414d0,
2308 0x44000, 0x44078, 2308 0x44000, 0x44078,
2309 0x440c0, 0x44278, 2309 0x440c0, 0x44278,
2310 0x442c0, 0x44478, 2310 0x442c0, 0x44478,
2311 0x444c0, 0x44678, 2311 0x444c0, 0x44678,
2312 0x446c0, 0x44878, 2312 0x446c0, 0x44878,
2313 0x448c0, 0x449fc, 2313 0x448c0, 0x449fc,
2314 0x45000, 0x45068, 2314 0x45000, 0x45068,
2315 0x45080, 0x45084, 2315 0x45080, 0x45084,
2316 0x450a0, 0x450b0, 2316 0x450a0, 0x450b0,
2317 0x45200, 0x45268, 2317 0x45200, 0x45268,
2318 0x45280, 0x45284, 2318 0x45280, 0x45284,
2319 0x452a0, 0x452b0, 2319 0x452a0, 0x452b0,
2320 0x460c0, 0x460e4, 2320 0x460c0, 0x460e4,
2321 0x47000, 0x4708c, 2321 0x47000, 0x4708c,
2322 0x47200, 0x47250, 2322 0x47200, 0x47250,
2323 0x47400, 0x47420, 2323 0x47400, 0x47420,
2324 0x47600, 0x47618, 2324 0x47600, 0x47618,
2325 0x47800, 0x47814, 2325 0x47800, 0x47814,
2326 0x48000, 0x4800c, 2326 0x48000, 0x4800c,
2327 0x48040, 0x48068, 2327 0x48040, 0x48068,
2328 0x48080, 0x48144, 2328 0x48080, 0x48144,
2329 0x48180, 0x4818c, 2329 0x48180, 0x4818c,
2330 0x48200, 0x48298, 2330 0x48200, 0x48298,
2331 0x482ac, 0x4833c, 2331 0x482ac, 0x4833c,
2332 0x483f8, 0x483fc, 2332 0x483f8, 0x483fc,
2333 0x49304, 0x493c4, 2333 0x49304, 0x493c4,
2334 0x49400, 0x4941c, 2334 0x49400, 0x4941c,
2335 0x49480, 0x494d0, 2335 0x49480, 0x494d0,
2336 0x4c000, 0x4c078, 2336 0x4c000, 0x4c078,
2337 0x4c0c0, 0x4c278, 2337 0x4c0c0, 0x4c278,
2338 0x4c2c0, 0x4c478, 2338 0x4c2c0, 0x4c478,
2339 0x4c4c0, 0x4c678, 2339 0x4c4c0, 0x4c678,
2340 0x4c6c0, 0x4c878, 2340 0x4c6c0, 0x4c878,
2341 0x4c8c0, 0x4c9fc, 2341 0x4c8c0, 0x4c9fc,
2342 0x4d000, 0x4d068, 2342 0x4d000, 0x4d068,
2343 0x4d080, 0x4d084, 2343 0x4d080, 0x4d084,
2344 0x4d0a0, 0x4d0b0, 2344 0x4d0a0, 0x4d0b0,
2345 0x4d200, 0x4d268, 2345 0x4d200, 0x4d268,
2346 0x4d280, 0x4d284, 2346 0x4d280, 0x4d284,
2347 0x4d2a0, 0x4d2b0, 2347 0x4d2a0, 0x4d2b0,
2348 0x4e0c0, 0x4e0e4, 2348 0x4e0c0, 0x4e0e4,
2349 0x4f000, 0x4f08c, 2349 0x4f000, 0x4f08c,
2350 0x4f200, 0x4f250, 2350 0x4f200, 0x4f250,
2351 0x4f400, 0x4f420, 2351 0x4f400, 0x4f420,
2352 0x4f600, 0x4f618, 2352 0x4f600, 0x4f618,
2353 0x4f800, 0x4f814, 2353 0x4f800, 0x4f814,
2354 0x50000, 0x500cc, 2354 0x50000, 0x500cc,
2355 0x50400, 0x50400, 2355 0x50400, 0x50400,
2356 0x50800, 0x508cc, 2356 0x50800, 0x508cc,
2357 0x50c00, 0x50c00, 2357 0x50c00, 0x50c00,
2358 0x51000, 0x5101c, 2358 0x51000, 0x5101c,
2359 0x51300, 0x51308, 2359 0x51300, 0x51308,
2360 }; 2360 };
2361 2361
2362 int i; 2362 int i;
2363 struct adapter *ap = netdev2adap(dev); 2363 struct adapter *ap = netdev2adap(dev);
2364 static const unsigned int *reg_ranges; 2364 static const unsigned int *reg_ranges;
2365 int arr_size = 0, buf_size = 0; 2365 int arr_size = 0, buf_size = 0;
2366 2366
2367 if (is_t4(ap->params.chip)) { 2367 if (is_t4(ap->params.chip)) {
2368 reg_ranges = &t4_reg_ranges[0]; 2368 reg_ranges = &t4_reg_ranges[0];
2369 arr_size = ARRAY_SIZE(t4_reg_ranges); 2369 arr_size = ARRAY_SIZE(t4_reg_ranges);
2370 buf_size = T4_REGMAP_SIZE; 2370 buf_size = T4_REGMAP_SIZE;
2371 } else { 2371 } else {
2372 reg_ranges = &t5_reg_ranges[0]; 2372 reg_ranges = &t5_reg_ranges[0];
2373 arr_size = ARRAY_SIZE(t5_reg_ranges); 2373 arr_size = ARRAY_SIZE(t5_reg_ranges);
2374 buf_size = T5_REGMAP_SIZE; 2374 buf_size = T5_REGMAP_SIZE;
2375 } 2375 }
2376 2376
2377 regs->version = mk_adap_vers(ap); 2377 regs->version = mk_adap_vers(ap);
2378 2378
2379 memset(buf, 0, buf_size); 2379 memset(buf, 0, buf_size);
2380 for (i = 0; i < arr_size; i += 2) 2380 for (i = 0; i < arr_size; i += 2)
2381 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]); 2381 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
2382 } 2382 }
2383 2383
2384 static int restart_autoneg(struct net_device *dev) 2384 static int restart_autoneg(struct net_device *dev)
2385 { 2385 {
2386 struct port_info *p = netdev_priv(dev); 2386 struct port_info *p = netdev_priv(dev);
2387 2387
2388 if (!netif_running(dev)) 2388 if (!netif_running(dev))
2389 return -EAGAIN; 2389 return -EAGAIN;
2390 if (p->link_cfg.autoneg != AUTONEG_ENABLE) 2390 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
2391 return -EINVAL; 2391 return -EINVAL;
2392 t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan); 2392 t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
2393 return 0; 2393 return 0;
2394 } 2394 }
2395 2395
2396 static int identify_port(struct net_device *dev, 2396 static int identify_port(struct net_device *dev,
2397 enum ethtool_phys_id_state state) 2397 enum ethtool_phys_id_state state)
2398 { 2398 {
2399 unsigned int val; 2399 unsigned int val;
2400 struct adapter *adap = netdev2adap(dev); 2400 struct adapter *adap = netdev2adap(dev);
2401 2401
2402 if (state == ETHTOOL_ID_ACTIVE) 2402 if (state == ETHTOOL_ID_ACTIVE)
2403 val = 0xffff; 2403 val = 0xffff;
2404 else if (state == ETHTOOL_ID_INACTIVE) 2404 else if (state == ETHTOOL_ID_INACTIVE)
2405 val = 0; 2405 val = 0;
2406 else 2406 else
2407 return -EINVAL; 2407 return -EINVAL;
2408 2408
2409 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val); 2409 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
2410 } 2410 }
2411 2411
2412 static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps) 2412 static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
2413 { 2413 {
2414 unsigned int v = 0; 2414 unsigned int v = 0;
2415 2415
2416 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI || 2416 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
2417 type == FW_PORT_TYPE_BT_XAUI) { 2417 type == FW_PORT_TYPE_BT_XAUI) {
2418 v |= SUPPORTED_TP; 2418 v |= SUPPORTED_TP;
2419 if (caps & FW_PORT_CAP_SPEED_100M) 2419 if (caps & FW_PORT_CAP_SPEED_100M)
2420 v |= SUPPORTED_100baseT_Full; 2420 v |= SUPPORTED_100baseT_Full;
2421 if (caps & FW_PORT_CAP_SPEED_1G) 2421 if (caps & FW_PORT_CAP_SPEED_1G)
2422 v |= SUPPORTED_1000baseT_Full; 2422 v |= SUPPORTED_1000baseT_Full;
2423 if (caps & FW_PORT_CAP_SPEED_10G) 2423 if (caps & FW_PORT_CAP_SPEED_10G)
2424 v |= SUPPORTED_10000baseT_Full; 2424 v |= SUPPORTED_10000baseT_Full;
2425 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) { 2425 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
2426 v |= SUPPORTED_Backplane; 2426 v |= SUPPORTED_Backplane;
2427 if (caps & FW_PORT_CAP_SPEED_1G) 2427 if (caps & FW_PORT_CAP_SPEED_1G)
2428 v |= SUPPORTED_1000baseKX_Full; 2428 v |= SUPPORTED_1000baseKX_Full;
2429 if (caps & FW_PORT_CAP_SPEED_10G) 2429 if (caps & FW_PORT_CAP_SPEED_10G)
2430 v |= SUPPORTED_10000baseKX4_Full; 2430 v |= SUPPORTED_10000baseKX4_Full;
2431 } else if (type == FW_PORT_TYPE_KR) 2431 } else if (type == FW_PORT_TYPE_KR)
2432 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full; 2432 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
2433 else if (type == FW_PORT_TYPE_BP_AP) 2433 else if (type == FW_PORT_TYPE_BP_AP)
2434 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC | 2434 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2435 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full; 2435 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
2436 else if (type == FW_PORT_TYPE_BP4_AP) 2436 else if (type == FW_PORT_TYPE_BP4_AP)
2437 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC | 2437 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
2438 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full | 2438 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
2439 SUPPORTED_10000baseKX4_Full; 2439 SUPPORTED_10000baseKX4_Full;
2440 else if (type == FW_PORT_TYPE_FIBER_XFI || 2440 else if (type == FW_PORT_TYPE_FIBER_XFI ||
2441 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP) 2441 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
2442 v |= SUPPORTED_FIBRE; 2442 v |= SUPPORTED_FIBRE;
2443 else if (type == FW_PORT_TYPE_BP40_BA) 2443 else if (type == FW_PORT_TYPE_BP40_BA)
2444 v |= SUPPORTED_40000baseSR4_Full; 2444 v |= SUPPORTED_40000baseSR4_Full;
2445 2445
2446 if (caps & FW_PORT_CAP_ANEG) 2446 if (caps & FW_PORT_CAP_ANEG)
2447 v |= SUPPORTED_Autoneg; 2447 v |= SUPPORTED_Autoneg;
2448 return v; 2448 return v;
2449 } 2449 }
2450 2450
2451 static unsigned int to_fw_linkcaps(unsigned int caps) 2451 static unsigned int to_fw_linkcaps(unsigned int caps)
2452 { 2452 {
2453 unsigned int v = 0; 2453 unsigned int v = 0;
2454 2454
2455 if (caps & ADVERTISED_100baseT_Full) 2455 if (caps & ADVERTISED_100baseT_Full)
2456 v |= FW_PORT_CAP_SPEED_100M; 2456 v |= FW_PORT_CAP_SPEED_100M;
2457 if (caps & ADVERTISED_1000baseT_Full) 2457 if (caps & ADVERTISED_1000baseT_Full)
2458 v |= FW_PORT_CAP_SPEED_1G; 2458 v |= FW_PORT_CAP_SPEED_1G;
2459 if (caps & ADVERTISED_10000baseT_Full) 2459 if (caps & ADVERTISED_10000baseT_Full)
2460 v |= FW_PORT_CAP_SPEED_10G; 2460 v |= FW_PORT_CAP_SPEED_10G;
2461 if (caps & ADVERTISED_40000baseSR4_Full) 2461 if (caps & ADVERTISED_40000baseSR4_Full)
2462 v |= FW_PORT_CAP_SPEED_40G; 2462 v |= FW_PORT_CAP_SPEED_40G;
2463 return v; 2463 return v;
2464 } 2464 }
2465 2465
2466 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2466 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2467 { 2467 {
2468 const struct port_info *p = netdev_priv(dev); 2468 const struct port_info *p = netdev_priv(dev);
2469 2469
2470 if (p->port_type == FW_PORT_TYPE_BT_SGMII || 2470 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
2471 p->port_type == FW_PORT_TYPE_BT_XFI || 2471 p->port_type == FW_PORT_TYPE_BT_XFI ||
2472 p->port_type == FW_PORT_TYPE_BT_XAUI) 2472 p->port_type == FW_PORT_TYPE_BT_XAUI)
2473 cmd->port = PORT_TP; 2473 cmd->port = PORT_TP;
2474 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI || 2474 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
2475 p->port_type == FW_PORT_TYPE_FIBER_XAUI) 2475 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
2476 cmd->port = PORT_FIBRE; 2476 cmd->port = PORT_FIBRE;
2477 else if (p->port_type == FW_PORT_TYPE_SFP || 2477 else if (p->port_type == FW_PORT_TYPE_SFP ||
2478 p->port_type == FW_PORT_TYPE_QSFP_10G || 2478 p->port_type == FW_PORT_TYPE_QSFP_10G ||
2479 p->port_type == FW_PORT_TYPE_QSFP) { 2479 p->port_type == FW_PORT_TYPE_QSFP) {
2480 if (p->mod_type == FW_PORT_MOD_TYPE_LR || 2480 if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
2481 p->mod_type == FW_PORT_MOD_TYPE_SR || 2481 p->mod_type == FW_PORT_MOD_TYPE_SR ||
2482 p->mod_type == FW_PORT_MOD_TYPE_ER || 2482 p->mod_type == FW_PORT_MOD_TYPE_ER ||
2483 p->mod_type == FW_PORT_MOD_TYPE_LRM) 2483 p->mod_type == FW_PORT_MOD_TYPE_LRM)
2484 cmd->port = PORT_FIBRE; 2484 cmd->port = PORT_FIBRE;
2485 else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE || 2485 else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
2486 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE) 2486 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
2487 cmd->port = PORT_DA; 2487 cmd->port = PORT_DA;
2488 else 2488 else
2489 cmd->port = PORT_OTHER; 2489 cmd->port = PORT_OTHER;
2490 } else 2490 } else
2491 cmd->port = PORT_OTHER; 2491 cmd->port = PORT_OTHER;
2492 2492
2493 if (p->mdio_addr >= 0) { 2493 if (p->mdio_addr >= 0) {
2494 cmd->phy_address = p->mdio_addr; 2494 cmd->phy_address = p->mdio_addr;
2495 cmd->transceiver = XCVR_EXTERNAL; 2495 cmd->transceiver = XCVR_EXTERNAL;
2496 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ? 2496 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
2497 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45; 2497 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
2498 } else { 2498 } else {
2499 cmd->phy_address = 0; /* not really, but no better option */ 2499 cmd->phy_address = 0; /* not really, but no better option */
2500 cmd->transceiver = XCVR_INTERNAL; 2500 cmd->transceiver = XCVR_INTERNAL;
2501 cmd->mdio_support = 0; 2501 cmd->mdio_support = 0;
2502 } 2502 }
2503 2503
2504 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported); 2504 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
2505 cmd->advertising = from_fw_linkcaps(p->port_type, 2505 cmd->advertising = from_fw_linkcaps(p->port_type,
2506 p->link_cfg.advertising); 2506 p->link_cfg.advertising);
2507 ethtool_cmd_speed_set(cmd, 2507 ethtool_cmd_speed_set(cmd,
2508 netif_carrier_ok(dev) ? p->link_cfg.speed : 0); 2508 netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
2509 cmd->duplex = DUPLEX_FULL; 2509 cmd->duplex = DUPLEX_FULL;
2510 cmd->autoneg = p->link_cfg.autoneg; 2510 cmd->autoneg = p->link_cfg.autoneg;
2511 cmd->maxtxpkt = 0; 2511 cmd->maxtxpkt = 0;
2512 cmd->maxrxpkt = 0; 2512 cmd->maxrxpkt = 0;
2513 return 0; 2513 return 0;
2514 } 2514 }
2515 2515
2516 static unsigned int speed_to_caps(int speed) 2516 static unsigned int speed_to_caps(int speed)
2517 { 2517 {
2518 if (speed == 100) 2518 if (speed == 100)
2519 return FW_PORT_CAP_SPEED_100M; 2519 return FW_PORT_CAP_SPEED_100M;
2520 if (speed == 1000) 2520 if (speed == 1000)
2521 return FW_PORT_CAP_SPEED_1G; 2521 return FW_PORT_CAP_SPEED_1G;
2522 if (speed == 10000) 2522 if (speed == 10000)
2523 return FW_PORT_CAP_SPEED_10G; 2523 return FW_PORT_CAP_SPEED_10G;
2524 if (speed == 40000) 2524 if (speed == 40000)
2525 return FW_PORT_CAP_SPEED_40G; 2525 return FW_PORT_CAP_SPEED_40G;
2526 return 0; 2526 return 0;
2527 } 2527 }
2528 2528
2529 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2529 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2530 { 2530 {
2531 unsigned int cap; 2531 unsigned int cap;
2532 struct port_info *p = netdev_priv(dev); 2532 struct port_info *p = netdev_priv(dev);
2533 struct link_config *lc = &p->link_cfg; 2533 struct link_config *lc = &p->link_cfg;
2534 u32 speed = ethtool_cmd_speed(cmd); 2534 u32 speed = ethtool_cmd_speed(cmd);
2535 2535
2536 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */ 2536 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
2537 return -EINVAL; 2537 return -EINVAL;
2538 2538
2539 if (!(lc->supported & FW_PORT_CAP_ANEG)) { 2539 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
2540 /* 2540 /*
2541 * PHY offers a single speed. See if that's what's 2541 * PHY offers a single speed. See if that's what's
2542 * being requested. 2542 * being requested.
2543 */ 2543 */
2544 if (cmd->autoneg == AUTONEG_DISABLE && 2544 if (cmd->autoneg == AUTONEG_DISABLE &&
2545 (lc->supported & speed_to_caps(speed))) 2545 (lc->supported & speed_to_caps(speed)))
2546 return 0; 2546 return 0;
2547 return -EINVAL; 2547 return -EINVAL;
2548 } 2548 }
2549 2549
2550 if (cmd->autoneg == AUTONEG_DISABLE) { 2550 if (cmd->autoneg == AUTONEG_DISABLE) {
2551 cap = speed_to_caps(speed); 2551 cap = speed_to_caps(speed);
2552 2552
2553 if (!(lc->supported & cap) || 2553 if (!(lc->supported & cap) ||
2554 (speed == 1000) || 2554 (speed == 1000) ||
2555 (speed == 10000) || 2555 (speed == 10000) ||
2556 (speed == 40000)) 2556 (speed == 40000))
2557 return -EINVAL; 2557 return -EINVAL;
2558 lc->requested_speed = cap; 2558 lc->requested_speed = cap;
2559 lc->advertising = 0; 2559 lc->advertising = 0;
2560 } else { 2560 } else {
2561 cap = to_fw_linkcaps(cmd->advertising); 2561 cap = to_fw_linkcaps(cmd->advertising);
2562 if (!(lc->supported & cap)) 2562 if (!(lc->supported & cap))
2563 return -EINVAL; 2563 return -EINVAL;
2564 lc->requested_speed = 0; 2564 lc->requested_speed = 0;
2565 lc->advertising = cap | FW_PORT_CAP_ANEG; 2565 lc->advertising = cap | FW_PORT_CAP_ANEG;
2566 } 2566 }
2567 lc->autoneg = cmd->autoneg; 2567 lc->autoneg = cmd->autoneg;
2568 2568
2569 if (netif_running(dev)) 2569 if (netif_running(dev))
2570 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan, 2570 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2571 lc); 2571 lc);
2572 return 0; 2572 return 0;
2573 } 2573 }
2574 2574
2575 static void get_pauseparam(struct net_device *dev, 2575 static void get_pauseparam(struct net_device *dev,
2576 struct ethtool_pauseparam *epause) 2576 struct ethtool_pauseparam *epause)
2577 { 2577 {
2578 struct port_info *p = netdev_priv(dev); 2578 struct port_info *p = netdev_priv(dev);
2579 2579
2580 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0; 2580 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
2581 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0; 2581 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
2582 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0; 2582 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
2583 } 2583 }
2584 2584
2585 static int set_pauseparam(struct net_device *dev, 2585 static int set_pauseparam(struct net_device *dev,
2586 struct ethtool_pauseparam *epause) 2586 struct ethtool_pauseparam *epause)
2587 { 2587 {
2588 struct port_info *p = netdev_priv(dev); 2588 struct port_info *p = netdev_priv(dev);
2589 struct link_config *lc = &p->link_cfg; 2589 struct link_config *lc = &p->link_cfg;
2590 2590
2591 if (epause->autoneg == AUTONEG_DISABLE) 2591 if (epause->autoneg == AUTONEG_DISABLE)
2592 lc->requested_fc = 0; 2592 lc->requested_fc = 0;
2593 else if (lc->supported & FW_PORT_CAP_ANEG) 2593 else if (lc->supported & FW_PORT_CAP_ANEG)
2594 lc->requested_fc = PAUSE_AUTONEG; 2594 lc->requested_fc = PAUSE_AUTONEG;
2595 else 2595 else
2596 return -EINVAL; 2596 return -EINVAL;
2597 2597
2598 if (epause->rx_pause) 2598 if (epause->rx_pause)
2599 lc->requested_fc |= PAUSE_RX; 2599 lc->requested_fc |= PAUSE_RX;
2600 if (epause->tx_pause) 2600 if (epause->tx_pause)
2601 lc->requested_fc |= PAUSE_TX; 2601 lc->requested_fc |= PAUSE_TX;
2602 if (netif_running(dev)) 2602 if (netif_running(dev))
2603 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan, 2603 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
2604 lc); 2604 lc);
2605 return 0; 2605 return 0;
2606 } 2606 }
2607 2607
2608 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e) 2608 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2609 { 2609 {
2610 const struct port_info *pi = netdev_priv(dev); 2610 const struct port_info *pi = netdev_priv(dev);
2611 const struct sge *s = &pi->adapter->sge; 2611 const struct sge *s = &pi->adapter->sge;
2612 2612
2613 e->rx_max_pending = MAX_RX_BUFFERS; 2613 e->rx_max_pending = MAX_RX_BUFFERS;
2614 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES; 2614 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
2615 e->rx_jumbo_max_pending = 0; 2615 e->rx_jumbo_max_pending = 0;
2616 e->tx_max_pending = MAX_TXQ_ENTRIES; 2616 e->tx_max_pending = MAX_TXQ_ENTRIES;
2617 2617
2618 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8; 2618 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
2619 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size; 2619 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
2620 e->rx_jumbo_pending = 0; 2620 e->rx_jumbo_pending = 0;
2621 e->tx_pending = s->ethtxq[pi->first_qset].q.size; 2621 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
2622 } 2622 }
2623 2623
2624 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e) 2624 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
2625 { 2625 {
2626 int i; 2626 int i;
2627 const struct port_info *pi = netdev_priv(dev); 2627 const struct port_info *pi = netdev_priv(dev);
2628 struct adapter *adapter = pi->adapter; 2628 struct adapter *adapter = pi->adapter;
2629 struct sge *s = &adapter->sge; 2629 struct sge *s = &adapter->sge;
2630 2630
2631 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending || 2631 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
2632 e->tx_pending > MAX_TXQ_ENTRIES || 2632 e->tx_pending > MAX_TXQ_ENTRIES ||
2633 e->rx_mini_pending > MAX_RSPQ_ENTRIES || 2633 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
2634 e->rx_mini_pending < MIN_RSPQ_ENTRIES || 2634 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
2635 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES) 2635 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
2636 return -EINVAL; 2636 return -EINVAL;
2637 2637
2638 if (adapter->flags & FULL_INIT_DONE) 2638 if (adapter->flags & FULL_INIT_DONE)
2639 return -EBUSY; 2639 return -EBUSY;
2640 2640
2641 for (i = 0; i < pi->nqsets; ++i) { 2641 for (i = 0; i < pi->nqsets; ++i) {
2642 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending; 2642 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
2643 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8; 2643 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
2644 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending; 2644 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
2645 } 2645 }
2646 return 0; 2646 return 0;
2647 } 2647 }
2648 2648
2649 static int closest_timer(const struct sge *s, int time) 2649 static int closest_timer(const struct sge *s, int time)
2650 { 2650 {
2651 int i, delta, match = 0, min_delta = INT_MAX; 2651 int i, delta, match = 0, min_delta = INT_MAX;
2652 2652
2653 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) { 2653 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
2654 delta = time - s->timer_val[i]; 2654 delta = time - s->timer_val[i];
2655 if (delta < 0) 2655 if (delta < 0)
2656 delta = -delta; 2656 delta = -delta;
2657 if (delta < min_delta) { 2657 if (delta < min_delta) {
2658 min_delta = delta; 2658 min_delta = delta;
2659 match = i; 2659 match = i;
2660 } 2660 }
2661 } 2661 }
2662 return match; 2662 return match;
2663 } 2663 }
2664 2664
2665 static int closest_thres(const struct sge *s, int thres) 2665 static int closest_thres(const struct sge *s, int thres)
2666 { 2666 {
2667 int i, delta, match = 0, min_delta = INT_MAX; 2667 int i, delta, match = 0, min_delta = INT_MAX;
2668 2668
2669 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) { 2669 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
2670 delta = thres - s->counter_val[i]; 2670 delta = thres - s->counter_val[i];
2671 if (delta < 0) 2671 if (delta < 0)
2672 delta = -delta; 2672 delta = -delta;
2673 if (delta < min_delta) { 2673 if (delta < min_delta) {
2674 min_delta = delta; 2674 min_delta = delta;
2675 match = i; 2675 match = i;
2676 } 2676 }
2677 } 2677 }
2678 return match; 2678 return match;
2679 } 2679 }
2680 2680
2681 /* 2681 /*
2682 * Return a queue's interrupt hold-off time in us. 0 means no timer. 2682 * Return a queue's interrupt hold-off time in us. 0 means no timer.
2683 */ 2683 */
2684 static unsigned int qtimer_val(const struct adapter *adap, 2684 static unsigned int qtimer_val(const struct adapter *adap,
2685 const struct sge_rspq *q) 2685 const struct sge_rspq *q)
2686 { 2686 {
2687 unsigned int idx = q->intr_params >> 1; 2687 unsigned int idx = q->intr_params >> 1;
2688 2688
2689 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0; 2689 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
2690 } 2690 }
2691 2691
2692 /** 2692 /**
2693 * set_rspq_intr_params - set a queue's interrupt holdoff parameters 2693 * set_rspq_intr_params - set a queue's interrupt holdoff parameters
2694 * @q: the Rx queue 2694 * @q: the Rx queue
2695 * @us: the hold-off time in us, or 0 to disable timer 2695 * @us: the hold-off time in us, or 0 to disable timer
2696 * @cnt: the hold-off packet count, or 0 to disable counter 2696 * @cnt: the hold-off packet count, or 0 to disable counter
2697 * 2697 *
2698 * Sets an Rx queue's interrupt hold-off time and packet count. At least 2698 * Sets an Rx queue's interrupt hold-off time and packet count. At least
2699 * one of the two needs to be enabled for the queue to generate interrupts. 2699 * one of the two needs to be enabled for the queue to generate interrupts.
2700 */ 2700 */
2701 static int set_rspq_intr_params(struct sge_rspq *q, 2701 static int set_rspq_intr_params(struct sge_rspq *q,
2702 unsigned int us, unsigned int cnt) 2702 unsigned int us, unsigned int cnt)
2703 { 2703 {
2704 struct adapter *adap = q->adap; 2704 struct adapter *adap = q->adap;
2705 2705
2706 if ((us | cnt) == 0) 2706 if ((us | cnt) == 0)
2707 cnt = 1; 2707 cnt = 1;
2708 2708
2709 if (cnt) { 2709 if (cnt) {
2710 int err; 2710 int err;
2711 u32 v, new_idx; 2711 u32 v, new_idx;
2712 2712
2713 new_idx = closest_thres(&adap->sge, cnt); 2713 new_idx = closest_thres(&adap->sge, cnt);
2714 if (q->desc && q->pktcnt_idx != new_idx) { 2714 if (q->desc && q->pktcnt_idx != new_idx) {
2715 /* the queue has already been created, update it */ 2715 /* the queue has already been created, update it */
2716 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 2716 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
2717 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) | 2717 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
2718 FW_PARAMS_PARAM_YZ(q->cntxt_id); 2718 FW_PARAMS_PARAM_YZ(q->cntxt_id);
2719 err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v, 2719 err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
2720 &new_idx); 2720 &new_idx);
2721 if (err) 2721 if (err)
2722 return err; 2722 return err;
2723 } 2723 }
2724 q->pktcnt_idx = new_idx; 2724 q->pktcnt_idx = new_idx;
2725 } 2725 }
2726 2726
2727 us = us == 0 ? 6 : closest_timer(&adap->sge, us); 2727 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
2728 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0); 2728 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
2729 return 0; 2729 return 0;
2730 } 2730 }
2731 2731
2732 /** 2732 /**
2733 * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete! 2733 * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete!
2734 * @dev: the network device 2734 * @dev: the network device
2735 * @us: the hold-off time in us, or 0 to disable timer 2735 * @us: the hold-off time in us, or 0 to disable timer
2736 * @cnt: the hold-off packet count, or 0 to disable counter 2736 * @cnt: the hold-off packet count, or 0 to disable counter
2737 * 2737 *
2738 * Set the RX interrupt hold-off parameters for a network device. 2738 * Set the RX interrupt hold-off parameters for a network device.
2739 */ 2739 */
2740 static int set_rx_intr_params(struct net_device *dev, 2740 static int set_rx_intr_params(struct net_device *dev,
2741 unsigned int us, unsigned int cnt) 2741 unsigned int us, unsigned int cnt)
2742 { 2742 {
2743 int i, err; 2743 int i, err;
2744 struct port_info *pi = netdev_priv(dev); 2744 struct port_info *pi = netdev_priv(dev);
2745 struct adapter *adap = pi->adapter; 2745 struct adapter *adap = pi->adapter;
2746 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset]; 2746 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2747 2747
2748 for (i = 0; i < pi->nqsets; i++, q++) { 2748 for (i = 0; i < pi->nqsets; i++, q++) {
2749 err = set_rspq_intr_params(&q->rspq, us, cnt); 2749 err = set_rspq_intr_params(&q->rspq, us, cnt);
2750 if (err) 2750 if (err)
2751 return err; 2751 return err;
2752 } 2752 }
2753 return 0; 2753 return 0;
2754 } 2754 }
2755 2755
2756 static int set_adaptive_rx_setting(struct net_device *dev, int adaptive_rx) 2756 static int set_adaptive_rx_setting(struct net_device *dev, int adaptive_rx)
2757 { 2757 {
2758 int i; 2758 int i;
2759 struct port_info *pi = netdev_priv(dev); 2759 struct port_info *pi = netdev_priv(dev);
2760 struct adapter *adap = pi->adapter; 2760 struct adapter *adap = pi->adapter;
2761 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset]; 2761 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2762 2762
2763 for (i = 0; i < pi->nqsets; i++, q++) 2763 for (i = 0; i < pi->nqsets; i++, q++)
2764 q->rspq.adaptive_rx = adaptive_rx; 2764 q->rspq.adaptive_rx = adaptive_rx;
2765 2765
2766 return 0; 2766 return 0;
2767 } 2767 }
2768 2768
2769 static int get_adaptive_rx_setting(struct net_device *dev) 2769 static int get_adaptive_rx_setting(struct net_device *dev)
2770 { 2770 {
2771 struct port_info *pi = netdev_priv(dev); 2771 struct port_info *pi = netdev_priv(dev);
2772 struct adapter *adap = pi->adapter; 2772 struct adapter *adap = pi->adapter;
2773 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset]; 2773 struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
2774 2774
2775 return q->rspq.adaptive_rx; 2775 return q->rspq.adaptive_rx;
2776 } 2776 }
2777 2777
2778 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) 2778 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2779 { 2779 {
2780 set_adaptive_rx_setting(dev, c->use_adaptive_rx_coalesce); 2780 set_adaptive_rx_setting(dev, c->use_adaptive_rx_coalesce);
2781 return set_rx_intr_params(dev, c->rx_coalesce_usecs, 2781 return set_rx_intr_params(dev, c->rx_coalesce_usecs,
2782 c->rx_max_coalesced_frames); 2782 c->rx_max_coalesced_frames);
2783 } 2783 }
2784 2784
2785 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) 2785 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
2786 { 2786 {
2787 const struct port_info *pi = netdev_priv(dev); 2787 const struct port_info *pi = netdev_priv(dev);
2788 const struct adapter *adap = pi->adapter; 2788 const struct adapter *adap = pi->adapter;
2789 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq; 2789 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
2790 2790
2791 c->rx_coalesce_usecs = qtimer_val(adap, rq); 2791 c->rx_coalesce_usecs = qtimer_val(adap, rq);
2792 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ? 2792 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
2793 adap->sge.counter_val[rq->pktcnt_idx] : 0; 2793 adap->sge.counter_val[rq->pktcnt_idx] : 0;
2794 c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev); 2794 c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev);
2795 return 0; 2795 return 0;
2796 } 2796 }
2797 2797
2798 /** 2798 /**
2799 * eeprom_ptov - translate a physical EEPROM address to virtual 2799 * eeprom_ptov - translate a physical EEPROM address to virtual
2800 * @phys_addr: the physical EEPROM address 2800 * @phys_addr: the physical EEPROM address
2801 * @fn: the PCI function number 2801 * @fn: the PCI function number
2802 * @sz: size of function-specific area 2802 * @sz: size of function-specific area
2803 * 2803 *
2804 * Translate a physical EEPROM address to virtual. The first 1K is 2804 * Translate a physical EEPROM address to virtual. The first 1K is
2805 * accessed through virtual addresses starting at 31K, the rest is 2805 * accessed through virtual addresses starting at 31K, the rest is
2806 * accessed through virtual addresses starting at 0. 2806 * accessed through virtual addresses starting at 0.
2807 * 2807 *
2808 * The mapping is as follows: 2808 * The mapping is as follows:
2809 * [0..1K) -> [31K..32K) 2809 * [0..1K) -> [31K..32K)
2810 * [1K..1K+A) -> [31K-A..31K) 2810 * [1K..1K+A) -> [31K-A..31K)
2811 * [1K+A..ES) -> [0..ES-A-1K) 2811 * [1K+A..ES) -> [0..ES-A-1K)
2812 * 2812 *
2813 * where A = @fn * @sz, and ES = EEPROM size. 2813 * where A = @fn * @sz, and ES = EEPROM size.
2814 */ 2814 */
2815 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz) 2815 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2816 { 2816 {
2817 fn *= sz; 2817 fn *= sz;
2818 if (phys_addr < 1024) 2818 if (phys_addr < 1024)
2819 return phys_addr + (31 << 10); 2819 return phys_addr + (31 << 10);
2820 if (phys_addr < 1024 + fn) 2820 if (phys_addr < 1024 + fn)
2821 return 31744 - fn + phys_addr - 1024; 2821 return 31744 - fn + phys_addr - 1024;
2822 if (phys_addr < EEPROMSIZE) 2822 if (phys_addr < EEPROMSIZE)
2823 return phys_addr - 1024 - fn; 2823 return phys_addr - 1024 - fn;
2824 return -EINVAL; 2824 return -EINVAL;
2825 } 2825 }
2826 2826
2827 /* 2827 /*
2828 * The next two routines implement eeprom read/write from physical addresses. 2828 * The next two routines implement eeprom read/write from physical addresses.
2829 */ 2829 */
2830 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v) 2830 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
2831 { 2831 {
2832 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE); 2832 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2833 2833
2834 if (vaddr >= 0) 2834 if (vaddr >= 0)
2835 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v); 2835 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
2836 return vaddr < 0 ? vaddr : 0; 2836 return vaddr < 0 ? vaddr : 0;
2837 } 2837 }
2838 2838
2839 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v) 2839 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
2840 { 2840 {
2841 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE); 2841 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
2842 2842
2843 if (vaddr >= 0) 2843 if (vaddr >= 0)
2844 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v); 2844 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
2845 return vaddr < 0 ? vaddr : 0; 2845 return vaddr < 0 ? vaddr : 0;
2846 } 2846 }
2847 2847
2848 #define EEPROM_MAGIC 0x38E2F10C 2848 #define EEPROM_MAGIC 0x38E2F10C
2849 2849
2850 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e, 2850 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
2851 u8 *data) 2851 u8 *data)
2852 { 2852 {
2853 int i, err = 0; 2853 int i, err = 0;
2854 struct adapter *adapter = netdev2adap(dev); 2854 struct adapter *adapter = netdev2adap(dev);
2855 2855
2856 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL); 2856 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
2857 if (!buf) 2857 if (!buf)
2858 return -ENOMEM; 2858 return -ENOMEM;
2859 2859
2860 e->magic = EEPROM_MAGIC; 2860 e->magic = EEPROM_MAGIC;
2861 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4) 2861 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
2862 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]); 2862 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
2863 2863
2864 if (!err) 2864 if (!err)
2865 memcpy(data, buf + e->offset, e->len); 2865 memcpy(data, buf + e->offset, e->len);
2866 kfree(buf); 2866 kfree(buf);
2867 return err; 2867 return err;
2868 } 2868 }
2869 2869
2870 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, 2870 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
2871 u8 *data) 2871 u8 *data)
2872 { 2872 {
2873 u8 *buf; 2873 u8 *buf;
2874 int err = 0; 2874 int err = 0;
2875 u32 aligned_offset, aligned_len, *p; 2875 u32 aligned_offset, aligned_len, *p;
2876 struct adapter *adapter = netdev2adap(dev); 2876 struct adapter *adapter = netdev2adap(dev);
2877 2877
2878 if (eeprom->magic != EEPROM_MAGIC) 2878 if (eeprom->magic != EEPROM_MAGIC)
2879 return -EINVAL; 2879 return -EINVAL;
2880 2880
2881 aligned_offset = eeprom->offset & ~3; 2881 aligned_offset = eeprom->offset & ~3;
2882 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3; 2882 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
2883 2883
2884 if (adapter->fn > 0) { 2884 if (adapter->fn > 0) {
2885 u32 start = 1024 + adapter->fn * EEPROMPFSIZE; 2885 u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
2886 2886
2887 if (aligned_offset < start || 2887 if (aligned_offset < start ||
2888 aligned_offset + aligned_len > start + EEPROMPFSIZE) 2888 aligned_offset + aligned_len > start + EEPROMPFSIZE)
2889 return -EPERM; 2889 return -EPERM;
2890 } 2890 }
2891 2891
2892 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) { 2892 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
2893 /* 2893 /*
2894 * RMW possibly needed for first or last words. 2894 * RMW possibly needed for first or last words.
2895 */ 2895 */
2896 buf = kmalloc(aligned_len, GFP_KERNEL); 2896 buf = kmalloc(aligned_len, GFP_KERNEL);
2897 if (!buf) 2897 if (!buf)
2898 return -ENOMEM; 2898 return -ENOMEM;
2899 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf); 2899 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
2900 if (!err && aligned_len > 4) 2900 if (!err && aligned_len > 4)
2901 err = eeprom_rd_phys(adapter, 2901 err = eeprom_rd_phys(adapter,
2902 aligned_offset + aligned_len - 4, 2902 aligned_offset + aligned_len - 4,
2903 (u32 *)&buf[aligned_len - 4]); 2903 (u32 *)&buf[aligned_len - 4]);
2904 if (err) 2904 if (err)
2905 goto out; 2905 goto out;
2906 memcpy(buf + (eeprom->offset & 3), data, eeprom->len); 2906 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
2907 } else 2907 } else
2908 buf = data; 2908 buf = data;
2909 2909
2910 err = t4_seeprom_wp(adapter, false); 2910 err = t4_seeprom_wp(adapter, false);
2911 if (err) 2911 if (err)
2912 goto out; 2912 goto out;
2913 2913
2914 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) { 2914 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
2915 err = eeprom_wr_phys(adapter, aligned_offset, *p); 2915 err = eeprom_wr_phys(adapter, aligned_offset, *p);
2916 aligned_offset += 4; 2916 aligned_offset += 4;
2917 } 2917 }
2918 2918
2919 if (!err) 2919 if (!err)
2920 err = t4_seeprom_wp(adapter, true); 2920 err = t4_seeprom_wp(adapter, true);
2921 out: 2921 out:
2922 if (buf != data) 2922 if (buf != data)
2923 kfree(buf); 2923 kfree(buf);
2924 return err; 2924 return err;
2925 } 2925 }
2926 2926
2927 static int set_flash(struct net_device *netdev, struct ethtool_flash *ef) 2927 static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
2928 { 2928 {
2929 int ret; 2929 int ret;
2930 const struct firmware *fw; 2930 const struct firmware *fw;
2931 struct adapter *adap = netdev2adap(netdev); 2931 struct adapter *adap = netdev2adap(netdev);
2932 unsigned int mbox = FW_PCIE_FW_MASTER_MASK + 1;
2932 2933
2933 ef->data[sizeof(ef->data) - 1] = '\0'; 2934 ef->data[sizeof(ef->data) - 1] = '\0';
2934 ret = request_firmware(&fw, ef->data, adap->pdev_dev); 2935 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
2935 if (ret < 0) 2936 if (ret < 0)
2936 return ret; 2937 return ret;
2937 2938
2938 ret = t4_load_fw(adap, fw->data, fw->size); 2939 /* If the adapter has been fully initialized then we'll go ahead and
2940 * try to get the firmware's cooperation in upgrading to the new
2941 * firmware image otherwise we'll try to do the entire job from the
2942 * host ... and we always "force" the operation in this path.
2943 */
2944 if (adap->flags & FULL_INIT_DONE)
2945 mbox = adap->mbox;
2946
2947 ret = t4_fw_upgrade(adap, mbox, fw->data, fw->size, 1);
2939 release_firmware(fw); 2948 release_firmware(fw);
2940 if (!ret) 2949 if (!ret)
2941 dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data); 2950 dev_info(adap->pdev_dev, "loaded firmware %s,"
2951 " reload cxgb4 driver\n", ef->data);
2942 return ret; 2952 return ret;
2943 } 2953 }
2944 2954
2945 #define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC) 2955 #define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
2946 #define BCAST_CRC 0xa0ccc1a6 2956 #define BCAST_CRC 0xa0ccc1a6
2947 2957
2948 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2958 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2949 { 2959 {
2950 wol->supported = WAKE_BCAST | WAKE_MAGIC; 2960 wol->supported = WAKE_BCAST | WAKE_MAGIC;
2951 wol->wolopts = netdev2adap(dev)->wol; 2961 wol->wolopts = netdev2adap(dev)->wol;
2952 memset(&wol->sopass, 0, sizeof(wol->sopass)); 2962 memset(&wol->sopass, 0, sizeof(wol->sopass));
2953 } 2963 }
2954 2964
2955 static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 2965 static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2956 { 2966 {
2957 int err = 0; 2967 int err = 0;
2958 struct port_info *pi = netdev_priv(dev); 2968 struct port_info *pi = netdev_priv(dev);
2959 2969
2960 if (wol->wolopts & ~WOL_SUPPORTED) 2970 if (wol->wolopts & ~WOL_SUPPORTED)
2961 return -EINVAL; 2971 return -EINVAL;
2962 t4_wol_magic_enable(pi->adapter, pi->tx_chan, 2972 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
2963 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL); 2973 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
2964 if (wol->wolopts & WAKE_BCAST) { 2974 if (wol->wolopts & WAKE_BCAST) {
2965 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL, 2975 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
2966 ~0ULL, 0, false); 2976 ~0ULL, 0, false);
2967 if (!err) 2977 if (!err)
2968 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1, 2978 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
2969 ~6ULL, ~0ULL, BCAST_CRC, true); 2979 ~6ULL, ~0ULL, BCAST_CRC, true);
2970 } else 2980 } else
2971 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false); 2981 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
2972 return err; 2982 return err;
2973 } 2983 }
2974 2984
2975 static int cxgb_set_features(struct net_device *dev, netdev_features_t features) 2985 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
2976 { 2986 {
2977 const struct port_info *pi = netdev_priv(dev); 2987 const struct port_info *pi = netdev_priv(dev);
2978 netdev_features_t changed = dev->features ^ features; 2988 netdev_features_t changed = dev->features ^ features;
2979 int err; 2989 int err;
2980 2990
2981 if (!(changed & NETIF_F_HW_VLAN_CTAG_RX)) 2991 if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
2982 return 0; 2992 return 0;
2983 2993
2984 err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1, 2994 err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
2985 -1, -1, -1, 2995 -1, -1, -1,
2986 !!(features & NETIF_F_HW_VLAN_CTAG_RX), true); 2996 !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
2987 if (unlikely(err)) 2997 if (unlikely(err))
2988 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX; 2998 dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
2989 return err; 2999 return err;
2990 } 3000 }
2991 3001
2992 static u32 get_rss_table_size(struct net_device *dev) 3002 static u32 get_rss_table_size(struct net_device *dev)
2993 { 3003 {
2994 const struct port_info *pi = netdev_priv(dev); 3004 const struct port_info *pi = netdev_priv(dev);
2995 3005
2996 return pi->rss_size; 3006 return pi->rss_size;
2997 } 3007 }
2998 3008
2999 static int get_rss_table(struct net_device *dev, u32 *p, u8 *key) 3009 static int get_rss_table(struct net_device *dev, u32 *p, u8 *key)
3000 { 3010 {
3001 const struct port_info *pi = netdev_priv(dev); 3011 const struct port_info *pi = netdev_priv(dev);
3002 unsigned int n = pi->rss_size; 3012 unsigned int n = pi->rss_size;
3003 3013
3004 while (n--) 3014 while (n--)
3005 p[n] = pi->rss[n]; 3015 p[n] = pi->rss[n];
3006 return 0; 3016 return 0;
3007 } 3017 }
3008 3018
3009 static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key) 3019 static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key)
3010 { 3020 {
3011 unsigned int i; 3021 unsigned int i;
3012 struct port_info *pi = netdev_priv(dev); 3022 struct port_info *pi = netdev_priv(dev);
3013 3023
3014 for (i = 0; i < pi->rss_size; i++) 3024 for (i = 0; i < pi->rss_size; i++)
3015 pi->rss[i] = p[i]; 3025 pi->rss[i] = p[i];
3016 if (pi->adapter->flags & FULL_INIT_DONE) 3026 if (pi->adapter->flags & FULL_INIT_DONE)
3017 return write_rss(pi, pi->rss); 3027 return write_rss(pi, pi->rss);
3018 return 0; 3028 return 0;
3019 } 3029 }
3020 3030
3021 static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, 3031 static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
3022 u32 *rules) 3032 u32 *rules)
3023 { 3033 {
3024 const struct port_info *pi = netdev_priv(dev); 3034 const struct port_info *pi = netdev_priv(dev);
3025 3035
3026 switch (info->cmd) { 3036 switch (info->cmd) {
3027 case ETHTOOL_GRXFH: { 3037 case ETHTOOL_GRXFH: {
3028 unsigned int v = pi->rss_mode; 3038 unsigned int v = pi->rss_mode;
3029 3039
3030 info->data = 0; 3040 info->data = 0;
3031 switch (info->flow_type) { 3041 switch (info->flow_type) {
3032 case TCP_V4_FLOW: 3042 case TCP_V4_FLOW:
3033 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) 3043 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
3034 info->data = RXH_IP_SRC | RXH_IP_DST | 3044 info->data = RXH_IP_SRC | RXH_IP_DST |
3035 RXH_L4_B_0_1 | RXH_L4_B_2_3; 3045 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3036 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) 3046 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
3037 info->data = RXH_IP_SRC | RXH_IP_DST; 3047 info->data = RXH_IP_SRC | RXH_IP_DST;
3038 break; 3048 break;
3039 case UDP_V4_FLOW: 3049 case UDP_V4_FLOW:
3040 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) && 3050 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
3041 (v & FW_RSS_VI_CONFIG_CMD_UDPEN)) 3051 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
3042 info->data = RXH_IP_SRC | RXH_IP_DST | 3052 info->data = RXH_IP_SRC | RXH_IP_DST |
3043 RXH_L4_B_0_1 | RXH_L4_B_2_3; 3053 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3044 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) 3054 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
3045 info->data = RXH_IP_SRC | RXH_IP_DST; 3055 info->data = RXH_IP_SRC | RXH_IP_DST;
3046 break; 3056 break;
3047 case SCTP_V4_FLOW: 3057 case SCTP_V4_FLOW:
3048 case AH_ESP_V4_FLOW: 3058 case AH_ESP_V4_FLOW:
3049 case IPV4_FLOW: 3059 case IPV4_FLOW:
3050 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) 3060 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
3051 info->data = RXH_IP_SRC | RXH_IP_DST; 3061 info->data = RXH_IP_SRC | RXH_IP_DST;
3052 break; 3062 break;
3053 case TCP_V6_FLOW: 3063 case TCP_V6_FLOW:
3054 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) 3064 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
3055 info->data = RXH_IP_SRC | RXH_IP_DST | 3065 info->data = RXH_IP_SRC | RXH_IP_DST |
3056 RXH_L4_B_0_1 | RXH_L4_B_2_3; 3066 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3057 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) 3067 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
3058 info->data = RXH_IP_SRC | RXH_IP_DST; 3068 info->data = RXH_IP_SRC | RXH_IP_DST;
3059 break; 3069 break;
3060 case UDP_V6_FLOW: 3070 case UDP_V6_FLOW:
3061 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) && 3071 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
3062 (v & FW_RSS_VI_CONFIG_CMD_UDPEN)) 3072 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
3063 info->data = RXH_IP_SRC | RXH_IP_DST | 3073 info->data = RXH_IP_SRC | RXH_IP_DST |
3064 RXH_L4_B_0_1 | RXH_L4_B_2_3; 3074 RXH_L4_B_0_1 | RXH_L4_B_2_3;
3065 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) 3075 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
3066 info->data = RXH_IP_SRC | RXH_IP_DST; 3076 info->data = RXH_IP_SRC | RXH_IP_DST;
3067 break; 3077 break;
3068 case SCTP_V6_FLOW: 3078 case SCTP_V6_FLOW:
3069 case AH_ESP_V6_FLOW: 3079 case AH_ESP_V6_FLOW:
3070 case IPV6_FLOW: 3080 case IPV6_FLOW:
3071 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) 3081 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
3072 info->data = RXH_IP_SRC | RXH_IP_DST; 3082 info->data = RXH_IP_SRC | RXH_IP_DST;
3073 break; 3083 break;
3074 } 3084 }
3075 return 0; 3085 return 0;
3076 } 3086 }
3077 case ETHTOOL_GRXRINGS: 3087 case ETHTOOL_GRXRINGS:
3078 info->data = pi->nqsets; 3088 info->data = pi->nqsets;
3079 return 0; 3089 return 0;
3080 } 3090 }
3081 return -EOPNOTSUPP; 3091 return -EOPNOTSUPP;
3082 } 3092 }
3083 3093
3084 static const struct ethtool_ops cxgb_ethtool_ops = { 3094 static const struct ethtool_ops cxgb_ethtool_ops = {
3085 .get_settings = get_settings, 3095 .get_settings = get_settings,
3086 .set_settings = set_settings, 3096 .set_settings = set_settings,
3087 .get_drvinfo = get_drvinfo, 3097 .get_drvinfo = get_drvinfo,
3088 .get_msglevel = get_msglevel, 3098 .get_msglevel = get_msglevel,
3089 .set_msglevel = set_msglevel, 3099 .set_msglevel = set_msglevel,
3090 .get_ringparam = get_sge_param, 3100 .get_ringparam = get_sge_param,
3091 .set_ringparam = set_sge_param, 3101 .set_ringparam = set_sge_param,
3092 .get_coalesce = get_coalesce, 3102 .get_coalesce = get_coalesce,
3093 .set_coalesce = set_coalesce, 3103 .set_coalesce = set_coalesce,
3094 .get_eeprom_len = get_eeprom_len, 3104 .get_eeprom_len = get_eeprom_len,
3095 .get_eeprom = get_eeprom, 3105 .get_eeprom = get_eeprom,
3096 .set_eeprom = set_eeprom, 3106 .set_eeprom = set_eeprom,
3097 .get_pauseparam = get_pauseparam, 3107 .get_pauseparam = get_pauseparam,
3098 .set_pauseparam = set_pauseparam, 3108 .set_pauseparam = set_pauseparam,
3099 .get_link = ethtool_op_get_link, 3109 .get_link = ethtool_op_get_link,
3100 .get_strings = get_strings, 3110 .get_strings = get_strings,
3101 .set_phys_id = identify_port, 3111 .set_phys_id = identify_port,
3102 .nway_reset = restart_autoneg, 3112 .nway_reset = restart_autoneg,
3103 .get_sset_count = get_sset_count, 3113 .get_sset_count = get_sset_count,
3104 .get_ethtool_stats = get_stats, 3114 .get_ethtool_stats = get_stats,
3105 .get_regs_len = get_regs_len, 3115 .get_regs_len = get_regs_len,
3106 .get_regs = get_regs, 3116 .get_regs = get_regs,
3107 .get_wol = get_wol, 3117 .get_wol = get_wol,
3108 .set_wol = set_wol, 3118 .set_wol = set_wol,
3109 .get_rxnfc = get_rxnfc, 3119 .get_rxnfc = get_rxnfc,
3110 .get_rxfh_indir_size = get_rss_table_size, 3120 .get_rxfh_indir_size = get_rss_table_size,
3111 .get_rxfh = get_rss_table, 3121 .get_rxfh = get_rss_table,
3112 .set_rxfh = set_rss_table, 3122 .set_rxfh = set_rss_table,
3113 .flash_device = set_flash, 3123 .flash_device = set_flash,
3114 }; 3124 };
3115 3125
3116 /* 3126 /*
3117 * debugfs support 3127 * debugfs support
3118 */ 3128 */
3119 static ssize_t mem_read(struct file *file, char __user *buf, size_t count, 3129 static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
3120 loff_t *ppos) 3130 loff_t *ppos)
3121 { 3131 {
3122 loff_t pos = *ppos; 3132 loff_t pos = *ppos;
3123 loff_t avail = file_inode(file)->i_size; 3133 loff_t avail = file_inode(file)->i_size;
3124 unsigned int mem = (uintptr_t)file->private_data & 3; 3134 unsigned int mem = (uintptr_t)file->private_data & 3;
3125 struct adapter *adap = file->private_data - mem; 3135 struct adapter *adap = file->private_data - mem;
3126 __be32 *data; 3136 __be32 *data;
3127 int ret; 3137 int ret;
3128 3138
3129 if (pos < 0) 3139 if (pos < 0)
3130 return -EINVAL; 3140 return -EINVAL;
3131 if (pos >= avail) 3141 if (pos >= avail)
3132 return 0; 3142 return 0;
3133 if (count > avail - pos) 3143 if (count > avail - pos)
3134 count = avail - pos; 3144 count = avail - pos;
3135 3145
3136 data = t4_alloc_mem(count); 3146 data = t4_alloc_mem(count);
3137 if (!data) 3147 if (!data)
3138 return -ENOMEM; 3148 return -ENOMEM;
3139 3149
3140 spin_lock(&adap->win0_lock); 3150 spin_lock(&adap->win0_lock);
3141 ret = t4_memory_rw(adap, 0, mem, pos, count, data, T4_MEMORY_READ); 3151 ret = t4_memory_rw(adap, 0, mem, pos, count, data, T4_MEMORY_READ);
3142 spin_unlock(&adap->win0_lock); 3152 spin_unlock(&adap->win0_lock);
3143 if (ret) { 3153 if (ret) {
3144 t4_free_mem(data); 3154 t4_free_mem(data);
3145 return ret; 3155 return ret;
3146 } 3156 }
3147 ret = copy_to_user(buf, data, count); 3157 ret = copy_to_user(buf, data, count);
3148 3158
3149 t4_free_mem(data); 3159 t4_free_mem(data);
3150 if (ret) 3160 if (ret)
3151 return -EFAULT; 3161 return -EFAULT;
3152 3162
3153 *ppos = pos + count; 3163 *ppos = pos + count;
3154 return count; 3164 return count;
3155 } 3165 }
3156 3166
3157 static const struct file_operations mem_debugfs_fops = { 3167 static const struct file_operations mem_debugfs_fops = {
3158 .owner = THIS_MODULE, 3168 .owner = THIS_MODULE,
3159 .open = simple_open, 3169 .open = simple_open,
3160 .read = mem_read, 3170 .read = mem_read,
3161 .llseek = default_llseek, 3171 .llseek = default_llseek,
3162 }; 3172 };
3163 3173
3164 static void add_debugfs_mem(struct adapter *adap, const char *name, 3174 static void add_debugfs_mem(struct adapter *adap, const char *name,
3165 unsigned int idx, unsigned int size_mb) 3175 unsigned int idx, unsigned int size_mb)
3166 { 3176 {
3167 struct dentry *de; 3177 struct dentry *de;
3168 3178
3169 de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root, 3179 de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
3170 (void *)adap + idx, &mem_debugfs_fops); 3180 (void *)adap + idx, &mem_debugfs_fops);
3171 if (de && de->d_inode) 3181 if (de && de->d_inode)
3172 de->d_inode->i_size = size_mb << 20; 3182 de->d_inode->i_size = size_mb << 20;
3173 } 3183 }
3174 3184
3175 static int setup_debugfs(struct adapter *adap) 3185 static int setup_debugfs(struct adapter *adap)
3176 { 3186 {
3177 int i; 3187 int i;
3178 u32 size; 3188 u32 size;
3179 3189
3180 if (IS_ERR_OR_NULL(adap->debugfs_root)) 3190 if (IS_ERR_OR_NULL(adap->debugfs_root))
3181 return -1; 3191 return -1;
3182 3192
3183 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE); 3193 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
3184 if (i & EDRAM0_ENABLE) { 3194 if (i & EDRAM0_ENABLE) {
3185 size = t4_read_reg(adap, MA_EDRAM0_BAR); 3195 size = t4_read_reg(adap, MA_EDRAM0_BAR);
3186 add_debugfs_mem(adap, "edc0", MEM_EDC0, EDRAM_SIZE_GET(size)); 3196 add_debugfs_mem(adap, "edc0", MEM_EDC0, EDRAM_SIZE_GET(size));
3187 } 3197 }
3188 if (i & EDRAM1_ENABLE) { 3198 if (i & EDRAM1_ENABLE) {
3189 size = t4_read_reg(adap, MA_EDRAM1_BAR); 3199 size = t4_read_reg(adap, MA_EDRAM1_BAR);
3190 add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size)); 3200 add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size));
3191 } 3201 }
3192 if (is_t4(adap->params.chip)) { 3202 if (is_t4(adap->params.chip)) {
3193 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR); 3203 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
3194 if (i & EXT_MEM_ENABLE) 3204 if (i & EXT_MEM_ENABLE)
3195 add_debugfs_mem(adap, "mc", MEM_MC, 3205 add_debugfs_mem(adap, "mc", MEM_MC,
3196 EXT_MEM_SIZE_GET(size)); 3206 EXT_MEM_SIZE_GET(size));
3197 } else { 3207 } else {
3198 if (i & EXT_MEM_ENABLE) { 3208 if (i & EXT_MEM_ENABLE) {
3199 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR); 3209 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
3200 add_debugfs_mem(adap, "mc0", MEM_MC0, 3210 add_debugfs_mem(adap, "mc0", MEM_MC0,
3201 EXT_MEM_SIZE_GET(size)); 3211 EXT_MEM_SIZE_GET(size));
3202 } 3212 }
3203 if (i & EXT_MEM1_ENABLE) { 3213 if (i & EXT_MEM1_ENABLE) {
3204 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR); 3214 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR);
3205 add_debugfs_mem(adap, "mc1", MEM_MC1, 3215 add_debugfs_mem(adap, "mc1", MEM_MC1,
3206 EXT_MEM_SIZE_GET(size)); 3216 EXT_MEM_SIZE_GET(size));
3207 } 3217 }
3208 } 3218 }
3209 if (adap->l2t) 3219 if (adap->l2t)
3210 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap, 3220 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
3211 &t4_l2t_fops); 3221 &t4_l2t_fops);
3212 return 0; 3222 return 0;
3213 } 3223 }
3214 3224
3215 /* 3225 /*
3216 * upper-layer driver support 3226 * upper-layer driver support
3217 */ 3227 */
3218 3228
3219 /* 3229 /*
3220 * Allocate an active-open TID and set it to the supplied value. 3230 * Allocate an active-open TID and set it to the supplied value.
3221 */ 3231 */
3222 int cxgb4_alloc_atid(struct tid_info *t, void *data) 3232 int cxgb4_alloc_atid(struct tid_info *t, void *data)
3223 { 3233 {
3224 int atid = -1; 3234 int atid = -1;
3225 3235
3226 spin_lock_bh(&t->atid_lock); 3236 spin_lock_bh(&t->atid_lock);
3227 if (t->afree) { 3237 if (t->afree) {
3228 union aopen_entry *p = t->afree; 3238 union aopen_entry *p = t->afree;
3229 3239
3230 atid = (p - t->atid_tab) + t->atid_base; 3240 atid = (p - t->atid_tab) + t->atid_base;
3231 t->afree = p->next; 3241 t->afree = p->next;
3232 p->data = data; 3242 p->data = data;
3233 t->atids_in_use++; 3243 t->atids_in_use++;
3234 } 3244 }
3235 spin_unlock_bh(&t->atid_lock); 3245 spin_unlock_bh(&t->atid_lock);
3236 return atid; 3246 return atid;
3237 } 3247 }
3238 EXPORT_SYMBOL(cxgb4_alloc_atid); 3248 EXPORT_SYMBOL(cxgb4_alloc_atid);
3239 3249
3240 /* 3250 /*
3241 * Release an active-open TID. 3251 * Release an active-open TID.
3242 */ 3252 */
3243 void cxgb4_free_atid(struct tid_info *t, unsigned int atid) 3253 void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
3244 { 3254 {
3245 union aopen_entry *p = &t->atid_tab[atid - t->atid_base]; 3255 union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
3246 3256
3247 spin_lock_bh(&t->atid_lock); 3257 spin_lock_bh(&t->atid_lock);
3248 p->next = t->afree; 3258 p->next = t->afree;
3249 t->afree = p; 3259 t->afree = p;
3250 t->atids_in_use--; 3260 t->atids_in_use--;
3251 spin_unlock_bh(&t->atid_lock); 3261 spin_unlock_bh(&t->atid_lock);
3252 } 3262 }
3253 EXPORT_SYMBOL(cxgb4_free_atid); 3263 EXPORT_SYMBOL(cxgb4_free_atid);
3254 3264
3255 /* 3265 /*
3256 * Allocate a server TID and set it to the supplied value. 3266 * Allocate a server TID and set it to the supplied value.
3257 */ 3267 */
3258 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data) 3268 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
3259 { 3269 {
3260 int stid; 3270 int stid;
3261 3271
3262 spin_lock_bh(&t->stid_lock); 3272 spin_lock_bh(&t->stid_lock);
3263 if (family == PF_INET) { 3273 if (family == PF_INET) {
3264 stid = find_first_zero_bit(t->stid_bmap, t->nstids); 3274 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
3265 if (stid < t->nstids) 3275 if (stid < t->nstids)
3266 __set_bit(stid, t->stid_bmap); 3276 __set_bit(stid, t->stid_bmap);
3267 else 3277 else
3268 stid = -1; 3278 stid = -1;
3269 } else { 3279 } else {
3270 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2); 3280 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
3271 if (stid < 0) 3281 if (stid < 0)
3272 stid = -1; 3282 stid = -1;
3273 } 3283 }
3274 if (stid >= 0) { 3284 if (stid >= 0) {
3275 t->stid_tab[stid].data = data; 3285 t->stid_tab[stid].data = data;
3276 stid += t->stid_base; 3286 stid += t->stid_base;
3277 /* IPv6 requires max of 520 bits or 16 cells in TCAM 3287 /* IPv6 requires max of 520 bits or 16 cells in TCAM
3278 * This is equivalent to 4 TIDs. With CLIP enabled it 3288 * This is equivalent to 4 TIDs. With CLIP enabled it
3279 * needs 2 TIDs. 3289 * needs 2 TIDs.
3280 */ 3290 */
3281 if (family == PF_INET) 3291 if (family == PF_INET)
3282 t->stids_in_use++; 3292 t->stids_in_use++;
3283 else 3293 else
3284 t->stids_in_use += 4; 3294 t->stids_in_use += 4;
3285 } 3295 }
3286 spin_unlock_bh(&t->stid_lock); 3296 spin_unlock_bh(&t->stid_lock);
3287 return stid; 3297 return stid;
3288 } 3298 }
3289 EXPORT_SYMBOL(cxgb4_alloc_stid); 3299 EXPORT_SYMBOL(cxgb4_alloc_stid);
3290 3300
3291 /* Allocate a server filter TID and set it to the supplied value. 3301 /* Allocate a server filter TID and set it to the supplied value.
3292 */ 3302 */
3293 int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data) 3303 int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
3294 { 3304 {
3295 int stid; 3305 int stid;
3296 3306
3297 spin_lock_bh(&t->stid_lock); 3307 spin_lock_bh(&t->stid_lock);
3298 if (family == PF_INET) { 3308 if (family == PF_INET) {
3299 stid = find_next_zero_bit(t->stid_bmap, 3309 stid = find_next_zero_bit(t->stid_bmap,
3300 t->nstids + t->nsftids, t->nstids); 3310 t->nstids + t->nsftids, t->nstids);
3301 if (stid < (t->nstids + t->nsftids)) 3311 if (stid < (t->nstids + t->nsftids))
3302 __set_bit(stid, t->stid_bmap); 3312 __set_bit(stid, t->stid_bmap);
3303 else 3313 else
3304 stid = -1; 3314 stid = -1;
3305 } else { 3315 } else {
3306 stid = -1; 3316 stid = -1;
3307 } 3317 }
3308 if (stid >= 0) { 3318 if (stid >= 0) {
3309 t->stid_tab[stid].data = data; 3319 t->stid_tab[stid].data = data;
3310 stid -= t->nstids; 3320 stid -= t->nstids;
3311 stid += t->sftid_base; 3321 stid += t->sftid_base;
3312 t->stids_in_use++; 3322 t->stids_in_use++;
3313 } 3323 }
3314 spin_unlock_bh(&t->stid_lock); 3324 spin_unlock_bh(&t->stid_lock);
3315 return stid; 3325 return stid;
3316 } 3326 }
3317 EXPORT_SYMBOL(cxgb4_alloc_sftid); 3327 EXPORT_SYMBOL(cxgb4_alloc_sftid);
3318 3328
3319 /* Release a server TID. 3329 /* Release a server TID.
3320 */ 3330 */
3321 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family) 3331 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
3322 { 3332 {
3323 /* Is it a server filter TID? */ 3333 /* Is it a server filter TID? */
3324 if (t->nsftids && (stid >= t->sftid_base)) { 3334 if (t->nsftids && (stid >= t->sftid_base)) {
3325 stid -= t->sftid_base; 3335 stid -= t->sftid_base;
3326 stid += t->nstids; 3336 stid += t->nstids;
3327 } else { 3337 } else {
3328 stid -= t->stid_base; 3338 stid -= t->stid_base;
3329 } 3339 }
3330 3340
3331 spin_lock_bh(&t->stid_lock); 3341 spin_lock_bh(&t->stid_lock);
3332 if (family == PF_INET) 3342 if (family == PF_INET)
3333 __clear_bit(stid, t->stid_bmap); 3343 __clear_bit(stid, t->stid_bmap);
3334 else 3344 else
3335 bitmap_release_region(t->stid_bmap, stid, 2); 3345 bitmap_release_region(t->stid_bmap, stid, 2);
3336 t->stid_tab[stid].data = NULL; 3346 t->stid_tab[stid].data = NULL;
3337 if (family == PF_INET) 3347 if (family == PF_INET)
3338 t->stids_in_use--; 3348 t->stids_in_use--;
3339 else 3349 else
3340 t->stids_in_use -= 4; 3350 t->stids_in_use -= 4;
3341 spin_unlock_bh(&t->stid_lock); 3351 spin_unlock_bh(&t->stid_lock);
3342 } 3352 }
3343 EXPORT_SYMBOL(cxgb4_free_stid); 3353 EXPORT_SYMBOL(cxgb4_free_stid);
3344 3354
3345 /* 3355 /*
3346 * Populate a TID_RELEASE WR. Caller must properly size the skb. 3356 * Populate a TID_RELEASE WR. Caller must properly size the skb.
3347 */ 3357 */
3348 static void mk_tid_release(struct sk_buff *skb, unsigned int chan, 3358 static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
3349 unsigned int tid) 3359 unsigned int tid)
3350 { 3360 {
3351 struct cpl_tid_release *req; 3361 struct cpl_tid_release *req;
3352 3362
3353 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan); 3363 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
3354 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req)); 3364 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
3355 INIT_TP_WR(req, tid); 3365 INIT_TP_WR(req, tid);
3356 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid)); 3366 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
3357 } 3367 }
3358 3368
3359 /* 3369 /*
3360 * Queue a TID release request and if necessary schedule a work queue to 3370 * Queue a TID release request and if necessary schedule a work queue to
3361 * process it. 3371 * process it.
3362 */ 3372 */
3363 static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan, 3373 static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
3364 unsigned int tid) 3374 unsigned int tid)
3365 { 3375 {
3366 void **p = &t->tid_tab[tid]; 3376 void **p = &t->tid_tab[tid];
3367 struct adapter *adap = container_of(t, struct adapter, tids); 3377 struct adapter *adap = container_of(t, struct adapter, tids);
3368 3378
3369 spin_lock_bh(&adap->tid_release_lock); 3379 spin_lock_bh(&adap->tid_release_lock);
3370 *p = adap->tid_release_head; 3380 *p = adap->tid_release_head;
3371 /* Low 2 bits encode the Tx channel number */ 3381 /* Low 2 bits encode the Tx channel number */
3372 adap->tid_release_head = (void **)((uintptr_t)p | chan); 3382 adap->tid_release_head = (void **)((uintptr_t)p | chan);
3373 if (!adap->tid_release_task_busy) { 3383 if (!adap->tid_release_task_busy) {
3374 adap->tid_release_task_busy = true; 3384 adap->tid_release_task_busy = true;
3375 queue_work(adap->workq, &adap->tid_release_task); 3385 queue_work(adap->workq, &adap->tid_release_task);
3376 } 3386 }
3377 spin_unlock_bh(&adap->tid_release_lock); 3387 spin_unlock_bh(&adap->tid_release_lock);
3378 } 3388 }
3379 3389
3380 /* 3390 /*
3381 * Process the list of pending TID release requests. 3391 * Process the list of pending TID release requests.
3382 */ 3392 */
3383 static void process_tid_release_list(struct work_struct *work) 3393 static void process_tid_release_list(struct work_struct *work)
3384 { 3394 {
3385 struct sk_buff *skb; 3395 struct sk_buff *skb;
3386 struct adapter *adap; 3396 struct adapter *adap;
3387 3397
3388 adap = container_of(work, struct adapter, tid_release_task); 3398 adap = container_of(work, struct adapter, tid_release_task);
3389 3399
3390 spin_lock_bh(&adap->tid_release_lock); 3400 spin_lock_bh(&adap->tid_release_lock);
3391 while (adap->tid_release_head) { 3401 while (adap->tid_release_head) {
3392 void **p = adap->tid_release_head; 3402 void **p = adap->tid_release_head;
3393 unsigned int chan = (uintptr_t)p & 3; 3403 unsigned int chan = (uintptr_t)p & 3;
3394 p = (void *)p - chan; 3404 p = (void *)p - chan;
3395 3405
3396 adap->tid_release_head = *p; 3406 adap->tid_release_head = *p;
3397 *p = NULL; 3407 *p = NULL;
3398 spin_unlock_bh(&adap->tid_release_lock); 3408 spin_unlock_bh(&adap->tid_release_lock);
3399 3409
3400 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release), 3410 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
3401 GFP_KERNEL))) 3411 GFP_KERNEL)))
3402 schedule_timeout_uninterruptible(1); 3412 schedule_timeout_uninterruptible(1);
3403 3413
3404 mk_tid_release(skb, chan, p - adap->tids.tid_tab); 3414 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
3405 t4_ofld_send(adap, skb); 3415 t4_ofld_send(adap, skb);
3406 spin_lock_bh(&adap->tid_release_lock); 3416 spin_lock_bh(&adap->tid_release_lock);
3407 } 3417 }
3408 adap->tid_release_task_busy = false; 3418 adap->tid_release_task_busy = false;
3409 spin_unlock_bh(&adap->tid_release_lock); 3419 spin_unlock_bh(&adap->tid_release_lock);
3410 } 3420 }
3411 3421
3412 /* 3422 /*
3413 * Release a TID and inform HW. If we are unable to allocate the release 3423 * Release a TID and inform HW. If we are unable to allocate the release
3414 * message we defer to a work queue. 3424 * message we defer to a work queue.
3415 */ 3425 */
3416 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid) 3426 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
3417 { 3427 {
3418 void *old; 3428 void *old;
3419 struct sk_buff *skb; 3429 struct sk_buff *skb;
3420 struct adapter *adap = container_of(t, struct adapter, tids); 3430 struct adapter *adap = container_of(t, struct adapter, tids);
3421 3431
3422 old = t->tid_tab[tid]; 3432 old = t->tid_tab[tid];
3423 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC); 3433 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
3424 if (likely(skb)) { 3434 if (likely(skb)) {
3425 t->tid_tab[tid] = NULL; 3435 t->tid_tab[tid] = NULL;
3426 mk_tid_release(skb, chan, tid); 3436 mk_tid_release(skb, chan, tid);
3427 t4_ofld_send(adap, skb); 3437 t4_ofld_send(adap, skb);
3428 } else 3438 } else
3429 cxgb4_queue_tid_release(t, chan, tid); 3439 cxgb4_queue_tid_release(t, chan, tid);
3430 if (old) 3440 if (old)
3431 atomic_dec(&t->tids_in_use); 3441 atomic_dec(&t->tids_in_use);
3432 } 3442 }
3433 EXPORT_SYMBOL(cxgb4_remove_tid); 3443 EXPORT_SYMBOL(cxgb4_remove_tid);
3434 3444
3435 /* 3445 /*
3436 * Allocate and initialize the TID tables. Returns 0 on success. 3446 * Allocate and initialize the TID tables. Returns 0 on success.
3437 */ 3447 */
3438 static int tid_init(struct tid_info *t) 3448 static int tid_init(struct tid_info *t)
3439 { 3449 {
3440 size_t size; 3450 size_t size;
3441 unsigned int stid_bmap_size; 3451 unsigned int stid_bmap_size;
3442 unsigned int natids = t->natids; 3452 unsigned int natids = t->natids;
3443 struct adapter *adap = container_of(t, struct adapter, tids); 3453 struct adapter *adap = container_of(t, struct adapter, tids);
3444 3454
3445 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids); 3455 stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
3446 size = t->ntids * sizeof(*t->tid_tab) + 3456 size = t->ntids * sizeof(*t->tid_tab) +
3447 natids * sizeof(*t->atid_tab) + 3457 natids * sizeof(*t->atid_tab) +
3448 t->nstids * sizeof(*t->stid_tab) + 3458 t->nstids * sizeof(*t->stid_tab) +
3449 t->nsftids * sizeof(*t->stid_tab) + 3459 t->nsftids * sizeof(*t->stid_tab) +
3450 stid_bmap_size * sizeof(long) + 3460 stid_bmap_size * sizeof(long) +
3451 t->nftids * sizeof(*t->ftid_tab) + 3461 t->nftids * sizeof(*t->ftid_tab) +
3452 t->nsftids * sizeof(*t->ftid_tab); 3462 t->nsftids * sizeof(*t->ftid_tab);
3453 3463
3454 t->tid_tab = t4_alloc_mem(size); 3464 t->tid_tab = t4_alloc_mem(size);
3455 if (!t->tid_tab) 3465 if (!t->tid_tab)
3456 return -ENOMEM; 3466 return -ENOMEM;
3457 3467
3458 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids]; 3468 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
3459 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids]; 3469 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
3460 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids]; 3470 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
3461 t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size]; 3471 t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
3462 spin_lock_init(&t->stid_lock); 3472 spin_lock_init(&t->stid_lock);
3463 spin_lock_init(&t->atid_lock); 3473 spin_lock_init(&t->atid_lock);
3464 3474
3465 t->stids_in_use = 0; 3475 t->stids_in_use = 0;
3466 t->afree = NULL; 3476 t->afree = NULL;
3467 t->atids_in_use = 0; 3477 t->atids_in_use = 0;
3468 atomic_set(&t->tids_in_use, 0); 3478 atomic_set(&t->tids_in_use, 0);
3469 3479
3470 /* Setup the free list for atid_tab and clear the stid bitmap. */ 3480 /* Setup the free list for atid_tab and clear the stid bitmap. */
3471 if (natids) { 3481 if (natids) {
3472 while (--natids) 3482 while (--natids)
3473 t->atid_tab[natids - 1].next = &t->atid_tab[natids]; 3483 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
3474 t->afree = t->atid_tab; 3484 t->afree = t->atid_tab;
3475 } 3485 }
3476 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids); 3486 bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
3477 /* Reserve stid 0 for T4/T5 adapters */ 3487 /* Reserve stid 0 for T4/T5 adapters */
3478 if (!t->stid_base && 3488 if (!t->stid_base &&
3479 (is_t4(adap->params.chip) || is_t5(adap->params.chip))) 3489 (is_t4(adap->params.chip) || is_t5(adap->params.chip)))
3480 __set_bit(0, t->stid_bmap); 3490 __set_bit(0, t->stid_bmap);
3481 3491
3482 return 0; 3492 return 0;
3483 } 3493 }
3484 3494
3485 int cxgb4_clip_get(const struct net_device *dev, 3495 int cxgb4_clip_get(const struct net_device *dev,
3486 const struct in6_addr *lip) 3496 const struct in6_addr *lip)
3487 { 3497 {
3488 struct adapter *adap; 3498 struct adapter *adap;
3489 struct fw_clip_cmd c; 3499 struct fw_clip_cmd c;
3490 3500
3491 adap = netdev2adap(dev); 3501 adap = netdev2adap(dev);
3492 memset(&c, 0, sizeof(c)); 3502 memset(&c, 0, sizeof(c));
3493 c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) | 3503 c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
3494 FW_CMD_REQUEST | FW_CMD_WRITE); 3504 FW_CMD_REQUEST | FW_CMD_WRITE);
3495 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c)); 3505 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c));
3496 c.ip_hi = *(__be64 *)(lip->s6_addr); 3506 c.ip_hi = *(__be64 *)(lip->s6_addr);
3497 c.ip_lo = *(__be64 *)(lip->s6_addr + 8); 3507 c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
3498 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false); 3508 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3499 } 3509 }
3500 EXPORT_SYMBOL(cxgb4_clip_get); 3510 EXPORT_SYMBOL(cxgb4_clip_get);
3501 3511
3502 int cxgb4_clip_release(const struct net_device *dev, 3512 int cxgb4_clip_release(const struct net_device *dev,
3503 const struct in6_addr *lip) 3513 const struct in6_addr *lip)
3504 { 3514 {
3505 struct adapter *adap; 3515 struct adapter *adap;
3506 struct fw_clip_cmd c; 3516 struct fw_clip_cmd c;
3507 3517
3508 adap = netdev2adap(dev); 3518 adap = netdev2adap(dev);
3509 memset(&c, 0, sizeof(c)); 3519 memset(&c, 0, sizeof(c));
3510 c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) | 3520 c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
3511 FW_CMD_REQUEST | FW_CMD_READ); 3521 FW_CMD_REQUEST | FW_CMD_READ);
3512 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_FREE | FW_LEN16(c)); 3522 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_FREE | FW_LEN16(c));
3513 c.ip_hi = *(__be64 *)(lip->s6_addr); 3523 c.ip_hi = *(__be64 *)(lip->s6_addr);
3514 c.ip_lo = *(__be64 *)(lip->s6_addr + 8); 3524 c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
3515 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false); 3525 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
3516 } 3526 }
3517 EXPORT_SYMBOL(cxgb4_clip_release); 3527 EXPORT_SYMBOL(cxgb4_clip_release);
3518 3528
3519 /** 3529 /**
3520 * cxgb4_create_server - create an IP server 3530 * cxgb4_create_server - create an IP server
3521 * @dev: the device 3531 * @dev: the device
3522 * @stid: the server TID 3532 * @stid: the server TID
3523 * @sip: local IP address to bind server to 3533 * @sip: local IP address to bind server to
3524 * @sport: the server's TCP port 3534 * @sport: the server's TCP port
3525 * @queue: queue to direct messages from this server to 3535 * @queue: queue to direct messages from this server to
3526 * 3536 *
3527 * Create an IP server for the given port and address. 3537 * Create an IP server for the given port and address.
3528 * Returns <0 on error and one of the %NET_XMIT_* values on success. 3538 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3529 */ 3539 */
3530 int cxgb4_create_server(const struct net_device *dev, unsigned int stid, 3540 int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
3531 __be32 sip, __be16 sport, __be16 vlan, 3541 __be32 sip, __be16 sport, __be16 vlan,
3532 unsigned int queue) 3542 unsigned int queue)
3533 { 3543 {
3534 unsigned int chan; 3544 unsigned int chan;
3535 struct sk_buff *skb; 3545 struct sk_buff *skb;
3536 struct adapter *adap; 3546 struct adapter *adap;
3537 struct cpl_pass_open_req *req; 3547 struct cpl_pass_open_req *req;
3538 int ret; 3548 int ret;
3539 3549
3540 skb = alloc_skb(sizeof(*req), GFP_KERNEL); 3550 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3541 if (!skb) 3551 if (!skb)
3542 return -ENOMEM; 3552 return -ENOMEM;
3543 3553
3544 adap = netdev2adap(dev); 3554 adap = netdev2adap(dev);
3545 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req)); 3555 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
3546 INIT_TP_WR(req, 0); 3556 INIT_TP_WR(req, 0);
3547 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid)); 3557 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
3548 req->local_port = sport; 3558 req->local_port = sport;
3549 req->peer_port = htons(0); 3559 req->peer_port = htons(0);
3550 req->local_ip = sip; 3560 req->local_ip = sip;
3551 req->peer_ip = htonl(0); 3561 req->peer_ip = htonl(0);
3552 chan = rxq_to_chan(&adap->sge, queue); 3562 chan = rxq_to_chan(&adap->sge, queue);
3553 req->opt0 = cpu_to_be64(TX_CHAN(chan)); 3563 req->opt0 = cpu_to_be64(TX_CHAN(chan));
3554 req->opt1 = cpu_to_be64(CONN_POLICY_ASK | 3564 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3555 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue)); 3565 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3556 ret = t4_mgmt_tx(adap, skb); 3566 ret = t4_mgmt_tx(adap, skb);
3557 return net_xmit_eval(ret); 3567 return net_xmit_eval(ret);
3558 } 3568 }
3559 EXPORT_SYMBOL(cxgb4_create_server); 3569 EXPORT_SYMBOL(cxgb4_create_server);
3560 3570
3561 /* cxgb4_create_server6 - create an IPv6 server 3571 /* cxgb4_create_server6 - create an IPv6 server
3562 * @dev: the device 3572 * @dev: the device
3563 * @stid: the server TID 3573 * @stid: the server TID
3564 * @sip: local IPv6 address to bind server to 3574 * @sip: local IPv6 address to bind server to
3565 * @sport: the server's TCP port 3575 * @sport: the server's TCP port
3566 * @queue: queue to direct messages from this server to 3576 * @queue: queue to direct messages from this server to
3567 * 3577 *
3568 * Create an IPv6 server for the given port and address. 3578 * Create an IPv6 server for the given port and address.
3569 * Returns <0 on error and one of the %NET_XMIT_* values on success. 3579 * Returns <0 on error and one of the %NET_XMIT_* values on success.
3570 */ 3580 */
3571 int cxgb4_create_server6(const struct net_device *dev, unsigned int stid, 3581 int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
3572 const struct in6_addr *sip, __be16 sport, 3582 const struct in6_addr *sip, __be16 sport,
3573 unsigned int queue) 3583 unsigned int queue)
3574 { 3584 {
3575 unsigned int chan; 3585 unsigned int chan;
3576 struct sk_buff *skb; 3586 struct sk_buff *skb;
3577 struct adapter *adap; 3587 struct adapter *adap;
3578 struct cpl_pass_open_req6 *req; 3588 struct cpl_pass_open_req6 *req;
3579 int ret; 3589 int ret;
3580 3590
3581 skb = alloc_skb(sizeof(*req), GFP_KERNEL); 3591 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3582 if (!skb) 3592 if (!skb)
3583 return -ENOMEM; 3593 return -ENOMEM;
3584 3594
3585 adap = netdev2adap(dev); 3595 adap = netdev2adap(dev);
3586 req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req)); 3596 req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
3587 INIT_TP_WR(req, 0); 3597 INIT_TP_WR(req, 0);
3588 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid)); 3598 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
3589 req->local_port = sport; 3599 req->local_port = sport;
3590 req->peer_port = htons(0); 3600 req->peer_port = htons(0);
3591 req->local_ip_hi = *(__be64 *)(sip->s6_addr); 3601 req->local_ip_hi = *(__be64 *)(sip->s6_addr);
3592 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8); 3602 req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
3593 req->peer_ip_hi = cpu_to_be64(0); 3603 req->peer_ip_hi = cpu_to_be64(0);
3594 req->peer_ip_lo = cpu_to_be64(0); 3604 req->peer_ip_lo = cpu_to_be64(0);
3595 chan = rxq_to_chan(&adap->sge, queue); 3605 chan = rxq_to_chan(&adap->sge, queue);
3596 req->opt0 = cpu_to_be64(TX_CHAN(chan)); 3606 req->opt0 = cpu_to_be64(TX_CHAN(chan));
3597 req->opt1 = cpu_to_be64(CONN_POLICY_ASK | 3607 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
3598 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue)); 3608 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
3599 ret = t4_mgmt_tx(adap, skb); 3609 ret = t4_mgmt_tx(adap, skb);
3600 return net_xmit_eval(ret); 3610 return net_xmit_eval(ret);
3601 } 3611 }
3602 EXPORT_SYMBOL(cxgb4_create_server6); 3612 EXPORT_SYMBOL(cxgb4_create_server6);
3603 3613
3604 int cxgb4_remove_server(const struct net_device *dev, unsigned int stid, 3614 int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
3605 unsigned int queue, bool ipv6) 3615 unsigned int queue, bool ipv6)
3606 { 3616 {
3607 struct sk_buff *skb; 3617 struct sk_buff *skb;
3608 struct adapter *adap; 3618 struct adapter *adap;
3609 struct cpl_close_listsvr_req *req; 3619 struct cpl_close_listsvr_req *req;
3610 int ret; 3620 int ret;
3611 3621
3612 adap = netdev2adap(dev); 3622 adap = netdev2adap(dev);
3613 3623
3614 skb = alloc_skb(sizeof(*req), GFP_KERNEL); 3624 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
3615 if (!skb) 3625 if (!skb)
3616 return -ENOMEM; 3626 return -ENOMEM;
3617 3627
3618 req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req)); 3628 req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
3619 INIT_TP_WR(req, 0); 3629 INIT_TP_WR(req, 0);
3620 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid)); 3630 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
3621 req->reply_ctrl = htons(NO_REPLY(0) | (ipv6 ? LISTSVR_IPV6(1) : 3631 req->reply_ctrl = htons(NO_REPLY(0) | (ipv6 ? LISTSVR_IPV6(1) :
3622 LISTSVR_IPV6(0)) | QUEUENO(queue)); 3632 LISTSVR_IPV6(0)) | QUEUENO(queue));
3623 ret = t4_mgmt_tx(adap, skb); 3633 ret = t4_mgmt_tx(adap, skb);
3624 return net_xmit_eval(ret); 3634 return net_xmit_eval(ret);
3625 } 3635 }
3626 EXPORT_SYMBOL(cxgb4_remove_server); 3636 EXPORT_SYMBOL(cxgb4_remove_server);
3627 3637
3628 /** 3638 /**
3629 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU 3639 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
3630 * @mtus: the HW MTU table 3640 * @mtus: the HW MTU table
3631 * @mtu: the target MTU 3641 * @mtu: the target MTU
3632 * @idx: index of selected entry in the MTU table 3642 * @idx: index of selected entry in the MTU table
3633 * 3643 *
3634 * Returns the index and the value in the HW MTU table that is closest to 3644 * Returns the index and the value in the HW MTU table that is closest to
3635 * but does not exceed @mtu, unless @mtu is smaller than any value in the 3645 * but does not exceed @mtu, unless @mtu is smaller than any value in the
3636 * table, in which case that smallest available value is selected. 3646 * table, in which case that smallest available value is selected.
3637 */ 3647 */
3638 unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu, 3648 unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
3639 unsigned int *idx) 3649 unsigned int *idx)
3640 { 3650 {
3641 unsigned int i = 0; 3651 unsigned int i = 0;
3642 3652
3643 while (i < NMTUS - 1 && mtus[i + 1] <= mtu) 3653 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
3644 ++i; 3654 ++i;
3645 if (idx) 3655 if (idx)
3646 *idx = i; 3656 *idx = i;
3647 return mtus[i]; 3657 return mtus[i];
3648 } 3658 }
3649 EXPORT_SYMBOL(cxgb4_best_mtu); 3659 EXPORT_SYMBOL(cxgb4_best_mtu);
3650 3660
3651 /** 3661 /**
3652 * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned 3662 * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
3653 * @mtus: the HW MTU table 3663 * @mtus: the HW MTU table
3654 * @header_size: Header Size 3664 * @header_size: Header Size
3655 * @data_size_max: maximum Data Segment Size 3665 * @data_size_max: maximum Data Segment Size
3656 * @data_size_align: desired Data Segment Size Alignment (2^N) 3666 * @data_size_align: desired Data Segment Size Alignment (2^N)
3657 * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL) 3667 * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
3658 * 3668 *
3659 * Similar to cxgb4_best_mtu() but instead of searching the Hardware 3669 * Similar to cxgb4_best_mtu() but instead of searching the Hardware
3660 * MTU Table based solely on a Maximum MTU parameter, we break that 3670 * MTU Table based solely on a Maximum MTU parameter, we break that
3661 * parameter up into a Header Size and Maximum Data Segment Size, and 3671 * parameter up into a Header Size and Maximum Data Segment Size, and
3662 * provide a desired Data Segment Size Alignment. If we find an MTU in 3672 * provide a desired Data Segment Size Alignment. If we find an MTU in
3663 * the Hardware MTU Table which will result in a Data Segment Size with 3673 * the Hardware MTU Table which will result in a Data Segment Size with
3664 * the requested alignment _and_ that MTU isn't "too far" from the 3674 * the requested alignment _and_ that MTU isn't "too far" from the
3665 * closest MTU, then we'll return that rather than the closest MTU. 3675 * closest MTU, then we'll return that rather than the closest MTU.
3666 */ 3676 */
3667 unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus, 3677 unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
3668 unsigned short header_size, 3678 unsigned short header_size,
3669 unsigned short data_size_max, 3679 unsigned short data_size_max,
3670 unsigned short data_size_align, 3680 unsigned short data_size_align,
3671 unsigned int *mtu_idxp) 3681 unsigned int *mtu_idxp)
3672 { 3682 {
3673 unsigned short max_mtu = header_size + data_size_max; 3683 unsigned short max_mtu = header_size + data_size_max;
3674 unsigned short data_size_align_mask = data_size_align - 1; 3684 unsigned short data_size_align_mask = data_size_align - 1;
3675 int mtu_idx, aligned_mtu_idx; 3685 int mtu_idx, aligned_mtu_idx;
3676 3686
3677 /* Scan the MTU Table till we find an MTU which is larger than our 3687 /* Scan the MTU Table till we find an MTU which is larger than our
3678 * Maximum MTU or we reach the end of the table. Along the way, 3688 * Maximum MTU or we reach the end of the table. Along the way,
3679 * record the last MTU found, if any, which will result in a Data 3689 * record the last MTU found, if any, which will result in a Data
3680 * Segment Length matching the requested alignment. 3690 * Segment Length matching the requested alignment.
3681 */ 3691 */
3682 for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) { 3692 for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
3683 unsigned short data_size = mtus[mtu_idx] - header_size; 3693 unsigned short data_size = mtus[mtu_idx] - header_size;
3684 3694
3685 /* If this MTU minus the Header Size would result in a 3695 /* If this MTU minus the Header Size would result in a
3686 * Data Segment Size of the desired alignment, remember it. 3696 * Data Segment Size of the desired alignment, remember it.
3687 */ 3697 */
3688 if ((data_size & data_size_align_mask) == 0) 3698 if ((data_size & data_size_align_mask) == 0)
3689 aligned_mtu_idx = mtu_idx; 3699 aligned_mtu_idx = mtu_idx;
3690 3700
3691 /* If we're not at the end of the Hardware MTU Table and the 3701 /* If we're not at the end of the Hardware MTU Table and the
3692 * next element is larger than our Maximum MTU, drop out of 3702 * next element is larger than our Maximum MTU, drop out of
3693 * the loop. 3703 * the loop.
3694 */ 3704 */
3695 if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu) 3705 if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
3696 break; 3706 break;
3697 } 3707 }
3698 3708
3699 /* If we fell out of the loop because we ran to the end of the table, 3709 /* If we fell out of the loop because we ran to the end of the table,
3700 * then we just have to use the last [largest] entry. 3710 * then we just have to use the last [largest] entry.
3701 */ 3711 */
3702 if (mtu_idx == NMTUS) 3712 if (mtu_idx == NMTUS)
3703 mtu_idx--; 3713 mtu_idx--;
3704 3714
3705 /* If we found an MTU which resulted in the requested Data Segment 3715 /* If we found an MTU which resulted in the requested Data Segment
3706 * Length alignment and that's "not far" from the largest MTU which is 3716 * Length alignment and that's "not far" from the largest MTU which is
3707 * less than or equal to the maximum MTU, then use that. 3717 * less than or equal to the maximum MTU, then use that.
3708 */ 3718 */
3709 if (aligned_mtu_idx >= 0 && 3719 if (aligned_mtu_idx >= 0 &&
3710 mtu_idx - aligned_mtu_idx <= 1) 3720 mtu_idx - aligned_mtu_idx <= 1)
3711 mtu_idx = aligned_mtu_idx; 3721 mtu_idx = aligned_mtu_idx;
3712 3722
3713 /* If the caller has passed in an MTU Index pointer, pass the 3723 /* If the caller has passed in an MTU Index pointer, pass the
3714 * MTU Index back. Return the MTU value. 3724 * MTU Index back. Return the MTU value.
3715 */ 3725 */
3716 if (mtu_idxp) 3726 if (mtu_idxp)
3717 *mtu_idxp = mtu_idx; 3727 *mtu_idxp = mtu_idx;
3718 return mtus[mtu_idx]; 3728 return mtus[mtu_idx];
3719 } 3729 }
3720 EXPORT_SYMBOL(cxgb4_best_aligned_mtu); 3730 EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
3721 3731
3722 /** 3732 /**
3723 * cxgb4_port_chan - get the HW channel of a port 3733 * cxgb4_port_chan - get the HW channel of a port
3724 * @dev: the net device for the port 3734 * @dev: the net device for the port
3725 * 3735 *
3726 * Return the HW Tx channel of the given port. 3736 * Return the HW Tx channel of the given port.
3727 */ 3737 */
3728 unsigned int cxgb4_port_chan(const struct net_device *dev) 3738 unsigned int cxgb4_port_chan(const struct net_device *dev)
3729 { 3739 {
3730 return netdev2pinfo(dev)->tx_chan; 3740 return netdev2pinfo(dev)->tx_chan;
3731 } 3741 }
3732 EXPORT_SYMBOL(cxgb4_port_chan); 3742 EXPORT_SYMBOL(cxgb4_port_chan);
3733 3743
3734 unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo) 3744 unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
3735 { 3745 {
3736 struct adapter *adap = netdev2adap(dev); 3746 struct adapter *adap = netdev2adap(dev);
3737 u32 v1, v2, lp_count, hp_count; 3747 u32 v1, v2, lp_count, hp_count;
3738 3748
3739 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS); 3749 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3740 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2); 3750 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3741 if (is_t4(adap->params.chip)) { 3751 if (is_t4(adap->params.chip)) {
3742 lp_count = G_LP_COUNT(v1); 3752 lp_count = G_LP_COUNT(v1);
3743 hp_count = G_HP_COUNT(v1); 3753 hp_count = G_HP_COUNT(v1);
3744 } else { 3754 } else {
3745 lp_count = G_LP_COUNT_T5(v1); 3755 lp_count = G_LP_COUNT_T5(v1);
3746 hp_count = G_HP_COUNT_T5(v2); 3756 hp_count = G_HP_COUNT_T5(v2);
3747 } 3757 }
3748 return lpfifo ? lp_count : hp_count; 3758 return lpfifo ? lp_count : hp_count;
3749 } 3759 }
3750 EXPORT_SYMBOL(cxgb4_dbfifo_count); 3760 EXPORT_SYMBOL(cxgb4_dbfifo_count);
3751 3761
3752 /** 3762 /**
3753 * cxgb4_port_viid - get the VI id of a port 3763 * cxgb4_port_viid - get the VI id of a port
3754 * @dev: the net device for the port 3764 * @dev: the net device for the port
3755 * 3765 *
3756 * Return the VI id of the given port. 3766 * Return the VI id of the given port.
3757 */ 3767 */
3758 unsigned int cxgb4_port_viid(const struct net_device *dev) 3768 unsigned int cxgb4_port_viid(const struct net_device *dev)
3759 { 3769 {
3760 return netdev2pinfo(dev)->viid; 3770 return netdev2pinfo(dev)->viid;
3761 } 3771 }
3762 EXPORT_SYMBOL(cxgb4_port_viid); 3772 EXPORT_SYMBOL(cxgb4_port_viid);
3763 3773
3764 /** 3774 /**
3765 * cxgb4_port_idx - get the index of a port 3775 * cxgb4_port_idx - get the index of a port
3766 * @dev: the net device for the port 3776 * @dev: the net device for the port
3767 * 3777 *
3768 * Return the index of the given port. 3778 * Return the index of the given port.
3769 */ 3779 */
3770 unsigned int cxgb4_port_idx(const struct net_device *dev) 3780 unsigned int cxgb4_port_idx(const struct net_device *dev)
3771 { 3781 {
3772 return netdev2pinfo(dev)->port_id; 3782 return netdev2pinfo(dev)->port_id;
3773 } 3783 }
3774 EXPORT_SYMBOL(cxgb4_port_idx); 3784 EXPORT_SYMBOL(cxgb4_port_idx);
3775 3785
3776 void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4, 3786 void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
3777 struct tp_tcp_stats *v6) 3787 struct tp_tcp_stats *v6)
3778 { 3788 {
3779 struct adapter *adap = pci_get_drvdata(pdev); 3789 struct adapter *adap = pci_get_drvdata(pdev);
3780 3790
3781 spin_lock(&adap->stats_lock); 3791 spin_lock(&adap->stats_lock);
3782 t4_tp_get_tcp_stats(adap, v4, v6); 3792 t4_tp_get_tcp_stats(adap, v4, v6);
3783 spin_unlock(&adap->stats_lock); 3793 spin_unlock(&adap->stats_lock);
3784 } 3794 }
3785 EXPORT_SYMBOL(cxgb4_get_tcp_stats); 3795 EXPORT_SYMBOL(cxgb4_get_tcp_stats);
3786 3796
3787 void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask, 3797 void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
3788 const unsigned int *pgsz_order) 3798 const unsigned int *pgsz_order)
3789 { 3799 {
3790 struct adapter *adap = netdev2adap(dev); 3800 struct adapter *adap = netdev2adap(dev);
3791 3801
3792 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask); 3802 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
3793 t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) | 3803 t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
3794 HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) | 3804 HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
3795 HPZ3(pgsz_order[3])); 3805 HPZ3(pgsz_order[3]));
3796 } 3806 }
3797 EXPORT_SYMBOL(cxgb4_iscsi_init); 3807 EXPORT_SYMBOL(cxgb4_iscsi_init);
3798 3808
3799 int cxgb4_flush_eq_cache(struct net_device *dev) 3809 int cxgb4_flush_eq_cache(struct net_device *dev)
3800 { 3810 {
3801 struct adapter *adap = netdev2adap(dev); 3811 struct adapter *adap = netdev2adap(dev);
3802 int ret; 3812 int ret;
3803 3813
3804 ret = t4_fwaddrspace_write(adap, adap->mbox, 3814 ret = t4_fwaddrspace_write(adap, adap->mbox,
3805 0xe1000000 + A_SGE_CTXT_CMD, 0x20000000); 3815 0xe1000000 + A_SGE_CTXT_CMD, 0x20000000);
3806 return ret; 3816 return ret;
3807 } 3817 }
3808 EXPORT_SYMBOL(cxgb4_flush_eq_cache); 3818 EXPORT_SYMBOL(cxgb4_flush_eq_cache);
3809 3819
3810 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx) 3820 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
3811 { 3821 {
3812 u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8; 3822 u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8;
3813 __be64 indices; 3823 __be64 indices;
3814 int ret; 3824 int ret;
3815 3825
3816 spin_lock(&adap->win0_lock); 3826 spin_lock(&adap->win0_lock);
3817 ret = t4_memory_rw(adap, 0, MEM_EDC0, addr, 3827 ret = t4_memory_rw(adap, 0, MEM_EDC0, addr,
3818 sizeof(indices), (__be32 *)&indices, 3828 sizeof(indices), (__be32 *)&indices,
3819 T4_MEMORY_READ); 3829 T4_MEMORY_READ);
3820 spin_unlock(&adap->win0_lock); 3830 spin_unlock(&adap->win0_lock);
3821 if (!ret) { 3831 if (!ret) {
3822 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff; 3832 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
3823 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff; 3833 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
3824 } 3834 }
3825 return ret; 3835 return ret;
3826 } 3836 }
3827 3837
3828 int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, 3838 int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
3829 u16 size) 3839 u16 size)
3830 { 3840 {
3831 struct adapter *adap = netdev2adap(dev); 3841 struct adapter *adap = netdev2adap(dev);
3832 u16 hw_pidx, hw_cidx; 3842 u16 hw_pidx, hw_cidx;
3833 int ret; 3843 int ret;
3834 3844
3835 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx); 3845 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
3836 if (ret) 3846 if (ret)
3837 goto out; 3847 goto out;
3838 3848
3839 if (pidx != hw_pidx) { 3849 if (pidx != hw_pidx) {
3840 u16 delta; 3850 u16 delta;
3841 3851
3842 if (pidx >= hw_pidx) 3852 if (pidx >= hw_pidx)
3843 delta = pidx - hw_pidx; 3853 delta = pidx - hw_pidx;
3844 else 3854 else
3845 delta = size - hw_pidx + pidx; 3855 delta = size - hw_pidx + pidx;
3846 wmb(); 3856 wmb();
3847 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), 3857 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
3848 QID(qid) | PIDX(delta)); 3858 QID(qid) | PIDX(delta));
3849 } 3859 }
3850 out: 3860 out:
3851 return ret; 3861 return ret;
3852 } 3862 }
3853 EXPORT_SYMBOL(cxgb4_sync_txq_pidx); 3863 EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
3854 3864
3855 void cxgb4_disable_db_coalescing(struct net_device *dev) 3865 void cxgb4_disable_db_coalescing(struct net_device *dev)
3856 { 3866 {
3857 struct adapter *adap; 3867 struct adapter *adap;
3858 3868
3859 adap = netdev2adap(dev); 3869 adap = netdev2adap(dev);
3860 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 3870 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE,
3861 F_NOCOALESCE); 3871 F_NOCOALESCE);
3862 } 3872 }
3863 EXPORT_SYMBOL(cxgb4_disable_db_coalescing); 3873 EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
3864 3874
3865 void cxgb4_enable_db_coalescing(struct net_device *dev) 3875 void cxgb4_enable_db_coalescing(struct net_device *dev)
3866 { 3876 {
3867 struct adapter *adap; 3877 struct adapter *adap;
3868 3878
3869 adap = netdev2adap(dev); 3879 adap = netdev2adap(dev);
3870 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 0); 3880 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 0);
3871 } 3881 }
3872 EXPORT_SYMBOL(cxgb4_enable_db_coalescing); 3882 EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
3873 3883
3874 int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte) 3884 int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
3875 { 3885 {
3876 struct adapter *adap; 3886 struct adapter *adap;
3877 u32 offset, memtype, memaddr; 3887 u32 offset, memtype, memaddr;
3878 u32 edc0_size, edc1_size, mc0_size, mc1_size; 3888 u32 edc0_size, edc1_size, mc0_size, mc1_size;
3879 u32 edc0_end, edc1_end, mc0_end, mc1_end; 3889 u32 edc0_end, edc1_end, mc0_end, mc1_end;
3880 int ret; 3890 int ret;
3881 3891
3882 adap = netdev2adap(dev); 3892 adap = netdev2adap(dev);
3883 3893
3884 offset = ((stag >> 8) * 32) + adap->vres.stag.start; 3894 offset = ((stag >> 8) * 32) + adap->vres.stag.start;
3885 3895
3886 /* Figure out where the offset lands in the Memory Type/Address scheme. 3896 /* Figure out where the offset lands in the Memory Type/Address scheme.
3887 * This code assumes that the memory is laid out starting at offset 0 3897 * This code assumes that the memory is laid out starting at offset 0
3888 * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0 3898 * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
3889 * and EDC1. Some cards will have neither MC0 nor MC1, most cards have 3899 * and EDC1. Some cards will have neither MC0 nor MC1, most cards have
3890 * MC0, and some have both MC0 and MC1. 3900 * MC0, and some have both MC0 and MC1.
3891 */ 3901 */
3892 edc0_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM0_BAR)) << 20; 3902 edc0_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM0_BAR)) << 20;
3893 edc1_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM1_BAR)) << 20; 3903 edc1_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM1_BAR)) << 20;
3894 mc0_size = EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)) << 20; 3904 mc0_size = EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)) << 20;
3895 3905
3896 edc0_end = edc0_size; 3906 edc0_end = edc0_size;
3897 edc1_end = edc0_end + edc1_size; 3907 edc1_end = edc0_end + edc1_size;
3898 mc0_end = edc1_end + mc0_size; 3908 mc0_end = edc1_end + mc0_size;
3899 3909
3900 if (offset < edc0_end) { 3910 if (offset < edc0_end) {
3901 memtype = MEM_EDC0; 3911 memtype = MEM_EDC0;
3902 memaddr = offset; 3912 memaddr = offset;
3903 } else if (offset < edc1_end) { 3913 } else if (offset < edc1_end) {
3904 memtype = MEM_EDC1; 3914 memtype = MEM_EDC1;
3905 memaddr = offset - edc0_end; 3915 memaddr = offset - edc0_end;
3906 } else { 3916 } else {
3907 if (offset < mc0_end) { 3917 if (offset < mc0_end) {
3908 memtype = MEM_MC0; 3918 memtype = MEM_MC0;
3909 memaddr = offset - edc1_end; 3919 memaddr = offset - edc1_end;
3910 } else if (is_t4(adap->params.chip)) { 3920 } else if (is_t4(adap->params.chip)) {
3911 /* T4 only has a single memory channel */ 3921 /* T4 only has a single memory channel */
3912 goto err; 3922 goto err;
3913 } else { 3923 } else {
3914 mc1_size = EXT_MEM_SIZE_GET( 3924 mc1_size = EXT_MEM_SIZE_GET(
3915 t4_read_reg(adap, 3925 t4_read_reg(adap,
3916 MA_EXT_MEMORY1_BAR)) << 20; 3926 MA_EXT_MEMORY1_BAR)) << 20;
3917 mc1_end = mc0_end + mc1_size; 3927 mc1_end = mc0_end + mc1_size;
3918 if (offset < mc1_end) { 3928 if (offset < mc1_end) {
3919 memtype = MEM_MC1; 3929 memtype = MEM_MC1;
3920 memaddr = offset - mc0_end; 3930 memaddr = offset - mc0_end;
3921 } else { 3931 } else {
3922 /* offset beyond the end of any memory */ 3932 /* offset beyond the end of any memory */
3923 goto err; 3933 goto err;
3924 } 3934 }
3925 } 3935 }
3926 } 3936 }
3927 3937
3928 spin_lock(&adap->win0_lock); 3938 spin_lock(&adap->win0_lock);
3929 ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ); 3939 ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
3930 spin_unlock(&adap->win0_lock); 3940 spin_unlock(&adap->win0_lock);
3931 return ret; 3941 return ret;
3932 3942
3933 err: 3943 err:
3934 dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n", 3944 dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
3935 stag, offset); 3945 stag, offset);
3936 return -EINVAL; 3946 return -EINVAL;
3937 } 3947 }
3938 EXPORT_SYMBOL(cxgb4_read_tpte); 3948 EXPORT_SYMBOL(cxgb4_read_tpte);
3939 3949
3940 u64 cxgb4_read_sge_timestamp(struct net_device *dev) 3950 u64 cxgb4_read_sge_timestamp(struct net_device *dev)
3941 { 3951 {
3942 u32 hi, lo; 3952 u32 hi, lo;
3943 struct adapter *adap; 3953 struct adapter *adap;
3944 3954
3945 adap = netdev2adap(dev); 3955 adap = netdev2adap(dev);
3946 lo = t4_read_reg(adap, SGE_TIMESTAMP_LO); 3956 lo = t4_read_reg(adap, SGE_TIMESTAMP_LO);
3947 hi = GET_TSVAL(t4_read_reg(adap, SGE_TIMESTAMP_HI)); 3957 hi = GET_TSVAL(t4_read_reg(adap, SGE_TIMESTAMP_HI));
3948 3958
3949 return ((u64)hi << 32) | (u64)lo; 3959 return ((u64)hi << 32) | (u64)lo;
3950 } 3960 }
3951 EXPORT_SYMBOL(cxgb4_read_sge_timestamp); 3961 EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
3952 3962
3953 static struct pci_driver cxgb4_driver; 3963 static struct pci_driver cxgb4_driver;
3954 3964
3955 static void check_neigh_update(struct neighbour *neigh) 3965 static void check_neigh_update(struct neighbour *neigh)
3956 { 3966 {
3957 const struct device *parent; 3967 const struct device *parent;
3958 const struct net_device *netdev = neigh->dev; 3968 const struct net_device *netdev = neigh->dev;
3959 3969
3960 if (netdev->priv_flags & IFF_802_1Q_VLAN) 3970 if (netdev->priv_flags & IFF_802_1Q_VLAN)
3961 netdev = vlan_dev_real_dev(netdev); 3971 netdev = vlan_dev_real_dev(netdev);
3962 parent = netdev->dev.parent; 3972 parent = netdev->dev.parent;
3963 if (parent && parent->driver == &cxgb4_driver.driver) 3973 if (parent && parent->driver == &cxgb4_driver.driver)
3964 t4_l2t_update(dev_get_drvdata(parent), neigh); 3974 t4_l2t_update(dev_get_drvdata(parent), neigh);
3965 } 3975 }
3966 3976
3967 static int netevent_cb(struct notifier_block *nb, unsigned long event, 3977 static int netevent_cb(struct notifier_block *nb, unsigned long event,
3968 void *data) 3978 void *data)
3969 { 3979 {
3970 switch (event) { 3980 switch (event) {
3971 case NETEVENT_NEIGH_UPDATE: 3981 case NETEVENT_NEIGH_UPDATE:
3972 check_neigh_update(data); 3982 check_neigh_update(data);
3973 break; 3983 break;
3974 case NETEVENT_REDIRECT: 3984 case NETEVENT_REDIRECT:
3975 default: 3985 default:
3976 break; 3986 break;
3977 } 3987 }
3978 return 0; 3988 return 0;
3979 } 3989 }
3980 3990
3981 static bool netevent_registered; 3991 static bool netevent_registered;
3982 static struct notifier_block cxgb4_netevent_nb = { 3992 static struct notifier_block cxgb4_netevent_nb = {
3983 .notifier_call = netevent_cb 3993 .notifier_call = netevent_cb
3984 }; 3994 };
3985 3995
3986 static void drain_db_fifo(struct adapter *adap, int usecs) 3996 static void drain_db_fifo(struct adapter *adap, int usecs)
3987 { 3997 {
3988 u32 v1, v2, lp_count, hp_count; 3998 u32 v1, v2, lp_count, hp_count;
3989 3999
3990 do { 4000 do {
3991 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS); 4001 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3992 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2); 4002 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3993 if (is_t4(adap->params.chip)) { 4003 if (is_t4(adap->params.chip)) {
3994 lp_count = G_LP_COUNT(v1); 4004 lp_count = G_LP_COUNT(v1);
3995 hp_count = G_HP_COUNT(v1); 4005 hp_count = G_HP_COUNT(v1);
3996 } else { 4006 } else {
3997 lp_count = G_LP_COUNT_T5(v1); 4007 lp_count = G_LP_COUNT_T5(v1);
3998 hp_count = G_HP_COUNT_T5(v2); 4008 hp_count = G_HP_COUNT_T5(v2);
3999 } 4009 }
4000 4010
4001 if (lp_count == 0 && hp_count == 0) 4011 if (lp_count == 0 && hp_count == 0)
4002 break; 4012 break;
4003 set_current_state(TASK_UNINTERRUPTIBLE); 4013 set_current_state(TASK_UNINTERRUPTIBLE);
4004 schedule_timeout(usecs_to_jiffies(usecs)); 4014 schedule_timeout(usecs_to_jiffies(usecs));
4005 } while (1); 4015 } while (1);
4006 } 4016 }
4007 4017
4008 static void disable_txq_db(struct sge_txq *q) 4018 static void disable_txq_db(struct sge_txq *q)
4009 { 4019 {
4010 unsigned long flags; 4020 unsigned long flags;
4011 4021
4012 spin_lock_irqsave(&q->db_lock, flags); 4022 spin_lock_irqsave(&q->db_lock, flags);
4013 q->db_disabled = 1; 4023 q->db_disabled = 1;
4014 spin_unlock_irqrestore(&q->db_lock, flags); 4024 spin_unlock_irqrestore(&q->db_lock, flags);
4015 } 4025 }
4016 4026
4017 static void enable_txq_db(struct adapter *adap, struct sge_txq *q) 4027 static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
4018 { 4028 {
4019 spin_lock_irq(&q->db_lock); 4029 spin_lock_irq(&q->db_lock);
4020 if (q->db_pidx_inc) { 4030 if (q->db_pidx_inc) {
4021 /* Make sure that all writes to the TX descriptors 4031 /* Make sure that all writes to the TX descriptors
4022 * are committed before we tell HW about them. 4032 * are committed before we tell HW about them.
4023 */ 4033 */
4024 wmb(); 4034 wmb();
4025 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), 4035 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
4026 QID(q->cntxt_id) | PIDX(q->db_pidx_inc)); 4036 QID(q->cntxt_id) | PIDX(q->db_pidx_inc));
4027 q->db_pidx_inc = 0; 4037 q->db_pidx_inc = 0;
4028 } 4038 }
4029 q->db_disabled = 0; 4039 q->db_disabled = 0;
4030 spin_unlock_irq(&q->db_lock); 4040 spin_unlock_irq(&q->db_lock);
4031 } 4041 }
4032 4042
4033 static void disable_dbs(struct adapter *adap) 4043 static void disable_dbs(struct adapter *adap)
4034 { 4044 {
4035 int i; 4045 int i;
4036 4046
4037 for_each_ethrxq(&adap->sge, i) 4047 for_each_ethrxq(&adap->sge, i)
4038 disable_txq_db(&adap->sge.ethtxq[i].q); 4048 disable_txq_db(&adap->sge.ethtxq[i].q);
4039 for_each_ofldrxq(&adap->sge, i) 4049 for_each_ofldrxq(&adap->sge, i)
4040 disable_txq_db(&adap->sge.ofldtxq[i].q); 4050 disable_txq_db(&adap->sge.ofldtxq[i].q);
4041 for_each_port(adap, i) 4051 for_each_port(adap, i)
4042 disable_txq_db(&adap->sge.ctrlq[i].q); 4052 disable_txq_db(&adap->sge.ctrlq[i].q);
4043 } 4053 }
4044 4054
4045 static void enable_dbs(struct adapter *adap) 4055 static void enable_dbs(struct adapter *adap)
4046 { 4056 {
4047 int i; 4057 int i;
4048 4058
4049 for_each_ethrxq(&adap->sge, i) 4059 for_each_ethrxq(&adap->sge, i)
4050 enable_txq_db(adap, &adap->sge.ethtxq[i].q); 4060 enable_txq_db(adap, &adap->sge.ethtxq[i].q);
4051 for_each_ofldrxq(&adap->sge, i) 4061 for_each_ofldrxq(&adap->sge, i)
4052 enable_txq_db(adap, &adap->sge.ofldtxq[i].q); 4062 enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
4053 for_each_port(adap, i) 4063 for_each_port(adap, i)
4054 enable_txq_db(adap, &adap->sge.ctrlq[i].q); 4064 enable_txq_db(adap, &adap->sge.ctrlq[i].q);
4055 } 4065 }
4056 4066
4057 static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd) 4067 static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
4058 { 4068 {
4059 if (adap->uld_handle[CXGB4_ULD_RDMA]) 4069 if (adap->uld_handle[CXGB4_ULD_RDMA])
4060 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA], 4070 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
4061 cmd); 4071 cmd);
4062 } 4072 }
4063 4073
4064 static void process_db_full(struct work_struct *work) 4074 static void process_db_full(struct work_struct *work)
4065 { 4075 {
4066 struct adapter *adap; 4076 struct adapter *adap;
4067 4077
4068 adap = container_of(work, struct adapter, db_full_task); 4078 adap = container_of(work, struct adapter, db_full_task);
4069 4079
4070 drain_db_fifo(adap, dbfifo_drain_delay); 4080 drain_db_fifo(adap, dbfifo_drain_delay);
4071 enable_dbs(adap); 4081 enable_dbs(adap);
4072 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY); 4082 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
4073 t4_set_reg_field(adap, SGE_INT_ENABLE3, 4083 t4_set_reg_field(adap, SGE_INT_ENABLE3,
4074 DBFIFO_HP_INT | DBFIFO_LP_INT, 4084 DBFIFO_HP_INT | DBFIFO_LP_INT,
4075 DBFIFO_HP_INT | DBFIFO_LP_INT); 4085 DBFIFO_HP_INT | DBFIFO_LP_INT);
4076 } 4086 }
4077 4087
4078 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q) 4088 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
4079 { 4089 {
4080 u16 hw_pidx, hw_cidx; 4090 u16 hw_pidx, hw_cidx;
4081 int ret; 4091 int ret;
4082 4092
4083 spin_lock_irq(&q->db_lock); 4093 spin_lock_irq(&q->db_lock);
4084 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx); 4094 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
4085 if (ret) 4095 if (ret)
4086 goto out; 4096 goto out;
4087 if (q->db_pidx != hw_pidx) { 4097 if (q->db_pidx != hw_pidx) {
4088 u16 delta; 4098 u16 delta;
4089 4099
4090 if (q->db_pidx >= hw_pidx) 4100 if (q->db_pidx >= hw_pidx)
4091 delta = q->db_pidx - hw_pidx; 4101 delta = q->db_pidx - hw_pidx;
4092 else 4102 else
4093 delta = q->size - hw_pidx + q->db_pidx; 4103 delta = q->size - hw_pidx + q->db_pidx;
4094 wmb(); 4104 wmb();
4095 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), 4105 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
4096 QID(q->cntxt_id) | PIDX(delta)); 4106 QID(q->cntxt_id) | PIDX(delta));
4097 } 4107 }
4098 out: 4108 out:
4099 q->db_disabled = 0; 4109 q->db_disabled = 0;
4100 q->db_pidx_inc = 0; 4110 q->db_pidx_inc = 0;
4101 spin_unlock_irq(&q->db_lock); 4111 spin_unlock_irq(&q->db_lock);
4102 if (ret) 4112 if (ret)
4103 CH_WARN(adap, "DB drop recovery failed.\n"); 4113 CH_WARN(adap, "DB drop recovery failed.\n");
4104 } 4114 }
4105 static void recover_all_queues(struct adapter *adap) 4115 static void recover_all_queues(struct adapter *adap)
4106 { 4116 {
4107 int i; 4117 int i;
4108 4118
4109 for_each_ethrxq(&adap->sge, i) 4119 for_each_ethrxq(&adap->sge, i)
4110 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q); 4120 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
4111 for_each_ofldrxq(&adap->sge, i) 4121 for_each_ofldrxq(&adap->sge, i)
4112 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q); 4122 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
4113 for_each_port(adap, i) 4123 for_each_port(adap, i)
4114 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q); 4124 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
4115 } 4125 }
4116 4126
4117 static void process_db_drop(struct work_struct *work) 4127 static void process_db_drop(struct work_struct *work)
4118 { 4128 {
4119 struct adapter *adap; 4129 struct adapter *adap;
4120 4130
4121 adap = container_of(work, struct adapter, db_drop_task); 4131 adap = container_of(work, struct adapter, db_drop_task);
4122 4132
4123 if (is_t4(adap->params.chip)) { 4133 if (is_t4(adap->params.chip)) {
4124 drain_db_fifo(adap, dbfifo_drain_delay); 4134 drain_db_fifo(adap, dbfifo_drain_delay);
4125 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP); 4135 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
4126 drain_db_fifo(adap, dbfifo_drain_delay); 4136 drain_db_fifo(adap, dbfifo_drain_delay);
4127 recover_all_queues(adap); 4137 recover_all_queues(adap);
4128 drain_db_fifo(adap, dbfifo_drain_delay); 4138 drain_db_fifo(adap, dbfifo_drain_delay);
4129 enable_dbs(adap); 4139 enable_dbs(adap);
4130 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY); 4140 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
4131 } else { 4141 } else {
4132 u32 dropped_db = t4_read_reg(adap, 0x010ac); 4142 u32 dropped_db = t4_read_reg(adap, 0x010ac);
4133 u16 qid = (dropped_db >> 15) & 0x1ffff; 4143 u16 qid = (dropped_db >> 15) & 0x1ffff;
4134 u16 pidx_inc = dropped_db & 0x1fff; 4144 u16 pidx_inc = dropped_db & 0x1fff;
4135 unsigned int s_qpp; 4145 unsigned int s_qpp;
4136 unsigned short udb_density; 4146 unsigned short udb_density;
4137 unsigned long qpshift; 4147 unsigned long qpshift;
4138 int page; 4148 int page;
4139 u32 udb; 4149 u32 udb;
4140 4150
4141 dev_warn(adap->pdev_dev, 4151 dev_warn(adap->pdev_dev,
4142 "Dropped DB 0x%x qid %d bar2 %d coalesce %d pidx %d\n", 4152 "Dropped DB 0x%x qid %d bar2 %d coalesce %d pidx %d\n",
4143 dropped_db, qid, 4153 dropped_db, qid,
4144 (dropped_db >> 14) & 1, 4154 (dropped_db >> 14) & 1,
4145 (dropped_db >> 13) & 1, 4155 (dropped_db >> 13) & 1,
4146 pidx_inc); 4156 pidx_inc);
4147 4157
4148 drain_db_fifo(adap, 1); 4158 drain_db_fifo(adap, 1);
4149 4159
4150 s_qpp = QUEUESPERPAGEPF1 * adap->fn; 4160 s_qpp = QUEUESPERPAGEPF1 * adap->fn;
4151 udb_density = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adap, 4161 udb_density = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adap,
4152 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp); 4162 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
4153 qpshift = PAGE_SHIFT - ilog2(udb_density); 4163 qpshift = PAGE_SHIFT - ilog2(udb_density);
4154 udb = qid << qpshift; 4164 udb = qid << qpshift;
4155 udb &= PAGE_MASK; 4165 udb &= PAGE_MASK;
4156 page = udb / PAGE_SIZE; 4166 page = udb / PAGE_SIZE;
4157 udb += (qid - (page * udb_density)) * 128; 4167 udb += (qid - (page * udb_density)) * 128;
4158 4168
4159 writel(PIDX(pidx_inc), adap->bar2 + udb + 8); 4169 writel(PIDX(pidx_inc), adap->bar2 + udb + 8);
4160 4170
4161 /* Re-enable BAR2 WC */ 4171 /* Re-enable BAR2 WC */
4162 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15); 4172 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
4163 } 4173 }
4164 4174
4165 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0); 4175 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
4166 } 4176 }
4167 4177
4168 void t4_db_full(struct adapter *adap) 4178 void t4_db_full(struct adapter *adap)
4169 { 4179 {
4170 if (is_t4(adap->params.chip)) { 4180 if (is_t4(adap->params.chip)) {
4171 disable_dbs(adap); 4181 disable_dbs(adap);
4172 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); 4182 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
4173 t4_set_reg_field(adap, SGE_INT_ENABLE3, 4183 t4_set_reg_field(adap, SGE_INT_ENABLE3,
4174 DBFIFO_HP_INT | DBFIFO_LP_INT, 0); 4184 DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
4175 queue_work(adap->workq, &adap->db_full_task); 4185 queue_work(adap->workq, &adap->db_full_task);
4176 } 4186 }
4177 } 4187 }
4178 4188
4179 void t4_db_dropped(struct adapter *adap) 4189 void t4_db_dropped(struct adapter *adap)
4180 { 4190 {
4181 if (is_t4(adap->params.chip)) { 4191 if (is_t4(adap->params.chip)) {
4182 disable_dbs(adap); 4192 disable_dbs(adap);
4183 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); 4193 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
4184 } 4194 }
4185 queue_work(adap->workq, &adap->db_drop_task); 4195 queue_work(adap->workq, &adap->db_drop_task);
4186 } 4196 }
4187 4197
4188 static void uld_attach(struct adapter *adap, unsigned int uld) 4198 static void uld_attach(struct adapter *adap, unsigned int uld)
4189 { 4199 {
4190 void *handle; 4200 void *handle;
4191 struct cxgb4_lld_info lli; 4201 struct cxgb4_lld_info lli;
4192 unsigned short i; 4202 unsigned short i;
4193 4203
4194 lli.pdev = adap->pdev; 4204 lli.pdev = adap->pdev;
4195 lli.pf = adap->fn; 4205 lli.pf = adap->fn;
4196 lli.l2t = adap->l2t; 4206 lli.l2t = adap->l2t;
4197 lli.tids = &adap->tids; 4207 lli.tids = &adap->tids;
4198 lli.ports = adap->port; 4208 lli.ports = adap->port;
4199 lli.vr = &adap->vres; 4209 lli.vr = &adap->vres;
4200 lli.mtus = adap->params.mtus; 4210 lli.mtus = adap->params.mtus;
4201 if (uld == CXGB4_ULD_RDMA) { 4211 if (uld == CXGB4_ULD_RDMA) {
4202 lli.rxq_ids = adap->sge.rdma_rxq; 4212 lli.rxq_ids = adap->sge.rdma_rxq;
4203 lli.ciq_ids = adap->sge.rdma_ciq; 4213 lli.ciq_ids = adap->sge.rdma_ciq;
4204 lli.nrxq = adap->sge.rdmaqs; 4214 lli.nrxq = adap->sge.rdmaqs;
4205 lli.nciq = adap->sge.rdmaciqs; 4215 lli.nciq = adap->sge.rdmaciqs;
4206 } else if (uld == CXGB4_ULD_ISCSI) { 4216 } else if (uld == CXGB4_ULD_ISCSI) {
4207 lli.rxq_ids = adap->sge.ofld_rxq; 4217 lli.rxq_ids = adap->sge.ofld_rxq;
4208 lli.nrxq = adap->sge.ofldqsets; 4218 lli.nrxq = adap->sge.ofldqsets;
4209 } 4219 }
4210 lli.ntxq = adap->sge.ofldqsets; 4220 lli.ntxq = adap->sge.ofldqsets;
4211 lli.nchan = adap->params.nports; 4221 lli.nchan = adap->params.nports;
4212 lli.nports = adap->params.nports; 4222 lli.nports = adap->params.nports;
4213 lli.wr_cred = adap->params.ofldq_wr_cred; 4223 lli.wr_cred = adap->params.ofldq_wr_cred;
4214 lli.adapter_type = adap->params.chip; 4224 lli.adapter_type = adap->params.chip;
4215 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2)); 4225 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
4216 lli.cclk_ps = 1000000000 / adap->params.vpd.cclk; 4226 lli.cclk_ps = 1000000000 / adap->params.vpd.cclk;
4217 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET( 4227 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
4218 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >> 4228 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
4219 (adap->fn * 4)); 4229 (adap->fn * 4));
4220 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET( 4230 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
4221 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >> 4231 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
4222 (adap->fn * 4)); 4232 (adap->fn * 4));
4223 lli.filt_mode = adap->params.tp.vlan_pri_map; 4233 lli.filt_mode = adap->params.tp.vlan_pri_map;
4224 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */ 4234 /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
4225 for (i = 0; i < NCHAN; i++) 4235 for (i = 0; i < NCHAN; i++)
4226 lli.tx_modq[i] = i; 4236 lli.tx_modq[i] = i;
4227 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS); 4237 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
4228 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL); 4238 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
4229 lli.fw_vers = adap->params.fw_vers; 4239 lli.fw_vers = adap->params.fw_vers;
4230 lli.dbfifo_int_thresh = dbfifo_int_thresh; 4240 lli.dbfifo_int_thresh = dbfifo_int_thresh;
4231 lli.sge_ingpadboundary = adap->sge.fl_align; 4241 lli.sge_ingpadboundary = adap->sge.fl_align;
4232 lli.sge_egrstatuspagesize = adap->sge.stat_len; 4242 lli.sge_egrstatuspagesize = adap->sge.stat_len;
4233 lli.sge_pktshift = adap->sge.pktshift; 4243 lli.sge_pktshift = adap->sge.pktshift;
4234 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN; 4244 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
4235 lli.max_ordird_qp = adap->params.max_ordird_qp; 4245 lli.max_ordird_qp = adap->params.max_ordird_qp;
4236 lli.max_ird_adapter = adap->params.max_ird_adapter; 4246 lli.max_ird_adapter = adap->params.max_ird_adapter;
4237 lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl; 4247 lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
4238 4248
4239 handle = ulds[uld].add(&lli); 4249 handle = ulds[uld].add(&lli);
4240 if (IS_ERR(handle)) { 4250 if (IS_ERR(handle)) {
4241 dev_warn(adap->pdev_dev, 4251 dev_warn(adap->pdev_dev,
4242 "could not attach to the %s driver, error %ld\n", 4252 "could not attach to the %s driver, error %ld\n",
4243 uld_str[uld], PTR_ERR(handle)); 4253 uld_str[uld], PTR_ERR(handle));
4244 return; 4254 return;
4245 } 4255 }
4246 4256
4247 adap->uld_handle[uld] = handle; 4257 adap->uld_handle[uld] = handle;
4248 4258
4249 if (!netevent_registered) { 4259 if (!netevent_registered) {
4250 register_netevent_notifier(&cxgb4_netevent_nb); 4260 register_netevent_notifier(&cxgb4_netevent_nb);
4251 netevent_registered = true; 4261 netevent_registered = true;
4252 } 4262 }
4253 4263
4254 if (adap->flags & FULL_INIT_DONE) 4264 if (adap->flags & FULL_INIT_DONE)
4255 ulds[uld].state_change(handle, CXGB4_STATE_UP); 4265 ulds[uld].state_change(handle, CXGB4_STATE_UP);
4256 } 4266 }
4257 4267
4258 static void attach_ulds(struct adapter *adap) 4268 static void attach_ulds(struct adapter *adap)
4259 { 4269 {
4260 unsigned int i; 4270 unsigned int i;
4261 4271
4262 spin_lock(&adap_rcu_lock); 4272 spin_lock(&adap_rcu_lock);
4263 list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list); 4273 list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list);
4264 spin_unlock(&adap_rcu_lock); 4274 spin_unlock(&adap_rcu_lock);
4265 4275
4266 mutex_lock(&uld_mutex); 4276 mutex_lock(&uld_mutex);
4267 list_add_tail(&adap->list_node, &adapter_list); 4277 list_add_tail(&adap->list_node, &adapter_list);
4268 for (i = 0; i < CXGB4_ULD_MAX; i++) 4278 for (i = 0; i < CXGB4_ULD_MAX; i++)
4269 if (ulds[i].add) 4279 if (ulds[i].add)
4270 uld_attach(adap, i); 4280 uld_attach(adap, i);
4271 mutex_unlock(&uld_mutex); 4281 mutex_unlock(&uld_mutex);
4272 } 4282 }
4273 4283
4274 static void detach_ulds(struct adapter *adap) 4284 static void detach_ulds(struct adapter *adap)
4275 { 4285 {
4276 unsigned int i; 4286 unsigned int i;
4277 4287
4278 mutex_lock(&uld_mutex); 4288 mutex_lock(&uld_mutex);
4279 list_del(&adap->list_node); 4289 list_del(&adap->list_node);
4280 for (i = 0; i < CXGB4_ULD_MAX; i++) 4290 for (i = 0; i < CXGB4_ULD_MAX; i++)
4281 if (adap->uld_handle[i]) { 4291 if (adap->uld_handle[i]) {
4282 ulds[i].state_change(adap->uld_handle[i], 4292 ulds[i].state_change(adap->uld_handle[i],
4283 CXGB4_STATE_DETACH); 4293 CXGB4_STATE_DETACH);
4284 adap->uld_handle[i] = NULL; 4294 adap->uld_handle[i] = NULL;
4285 } 4295 }
4286 if (netevent_registered && list_empty(&adapter_list)) { 4296 if (netevent_registered && list_empty(&adapter_list)) {
4287 unregister_netevent_notifier(&cxgb4_netevent_nb); 4297 unregister_netevent_notifier(&cxgb4_netevent_nb);
4288 netevent_registered = false; 4298 netevent_registered = false;
4289 } 4299 }
4290 mutex_unlock(&uld_mutex); 4300 mutex_unlock(&uld_mutex);
4291 4301
4292 spin_lock(&adap_rcu_lock); 4302 spin_lock(&adap_rcu_lock);
4293 list_del_rcu(&adap->rcu_node); 4303 list_del_rcu(&adap->rcu_node);
4294 spin_unlock(&adap_rcu_lock); 4304 spin_unlock(&adap_rcu_lock);
4295 } 4305 }
4296 4306
4297 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state) 4307 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
4298 { 4308 {
4299 unsigned int i; 4309 unsigned int i;
4300 4310
4301 mutex_lock(&uld_mutex); 4311 mutex_lock(&uld_mutex);
4302 for (i = 0; i < CXGB4_ULD_MAX; i++) 4312 for (i = 0; i < CXGB4_ULD_MAX; i++)
4303 if (adap->uld_handle[i]) 4313 if (adap->uld_handle[i])
4304 ulds[i].state_change(adap->uld_handle[i], new_state); 4314 ulds[i].state_change(adap->uld_handle[i], new_state);
4305 mutex_unlock(&uld_mutex); 4315 mutex_unlock(&uld_mutex);
4306 } 4316 }
4307 4317
4308 /** 4318 /**
4309 * cxgb4_register_uld - register an upper-layer driver 4319 * cxgb4_register_uld - register an upper-layer driver
4310 * @type: the ULD type 4320 * @type: the ULD type
4311 * @p: the ULD methods 4321 * @p: the ULD methods
4312 * 4322 *
4313 * Registers an upper-layer driver with this driver and notifies the ULD 4323 * Registers an upper-layer driver with this driver and notifies the ULD
4314 * about any presently available devices that support its type. Returns 4324 * about any presently available devices that support its type. Returns
4315 * %-EBUSY if a ULD of the same type is already registered. 4325 * %-EBUSY if a ULD of the same type is already registered.
4316 */ 4326 */
4317 int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p) 4327 int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
4318 { 4328 {
4319 int ret = 0; 4329 int ret = 0;
4320 struct adapter *adap; 4330 struct adapter *adap;
4321 4331
4322 if (type >= CXGB4_ULD_MAX) 4332 if (type >= CXGB4_ULD_MAX)
4323 return -EINVAL; 4333 return -EINVAL;
4324 mutex_lock(&uld_mutex); 4334 mutex_lock(&uld_mutex);
4325 if (ulds[type].add) { 4335 if (ulds[type].add) {
4326 ret = -EBUSY; 4336 ret = -EBUSY;
4327 goto out; 4337 goto out;
4328 } 4338 }
4329 ulds[type] = *p; 4339 ulds[type] = *p;
4330 list_for_each_entry(adap, &adapter_list, list_node) 4340 list_for_each_entry(adap, &adapter_list, list_node)
4331 uld_attach(adap, type); 4341 uld_attach(adap, type);
4332 out: mutex_unlock(&uld_mutex); 4342 out: mutex_unlock(&uld_mutex);
4333 return ret; 4343 return ret;
4334 } 4344 }
4335 EXPORT_SYMBOL(cxgb4_register_uld); 4345 EXPORT_SYMBOL(cxgb4_register_uld);
4336 4346
4337 /** 4347 /**
4338 * cxgb4_unregister_uld - unregister an upper-layer driver 4348 * cxgb4_unregister_uld - unregister an upper-layer driver
4339 * @type: the ULD type 4349 * @type: the ULD type
4340 * 4350 *
4341 * Unregisters an existing upper-layer driver. 4351 * Unregisters an existing upper-layer driver.
4342 */ 4352 */
4343 int cxgb4_unregister_uld(enum cxgb4_uld type) 4353 int cxgb4_unregister_uld(enum cxgb4_uld type)
4344 { 4354 {
4345 struct adapter *adap; 4355 struct adapter *adap;
4346 4356
4347 if (type >= CXGB4_ULD_MAX) 4357 if (type >= CXGB4_ULD_MAX)
4348 return -EINVAL; 4358 return -EINVAL;
4349 mutex_lock(&uld_mutex); 4359 mutex_lock(&uld_mutex);
4350 list_for_each_entry(adap, &adapter_list, list_node) 4360 list_for_each_entry(adap, &adapter_list, list_node)
4351 adap->uld_handle[type] = NULL; 4361 adap->uld_handle[type] = NULL;
4352 ulds[type].add = NULL; 4362 ulds[type].add = NULL;
4353 mutex_unlock(&uld_mutex); 4363 mutex_unlock(&uld_mutex);
4354 return 0; 4364 return 0;
4355 } 4365 }
4356 EXPORT_SYMBOL(cxgb4_unregister_uld); 4366 EXPORT_SYMBOL(cxgb4_unregister_uld);
4357 4367
4358 /* Check if netdev on which event is occured belongs to us or not. Return 4368 /* Check if netdev on which event is occured belongs to us or not. Return
4359 * success (true) if it belongs otherwise failure (false). 4369 * success (true) if it belongs otherwise failure (false).
4360 * Called with rcu_read_lock() held. 4370 * Called with rcu_read_lock() held.
4361 */ 4371 */
4362 static bool cxgb4_netdev(const struct net_device *netdev) 4372 static bool cxgb4_netdev(const struct net_device *netdev)
4363 { 4373 {
4364 struct adapter *adap; 4374 struct adapter *adap;
4365 int i; 4375 int i;
4366 4376
4367 list_for_each_entry_rcu(adap, &adap_rcu_list, rcu_node) 4377 list_for_each_entry_rcu(adap, &adap_rcu_list, rcu_node)
4368 for (i = 0; i < MAX_NPORTS; i++) 4378 for (i = 0; i < MAX_NPORTS; i++)
4369 if (adap->port[i] == netdev) 4379 if (adap->port[i] == netdev)
4370 return true; 4380 return true;
4371 return false; 4381 return false;
4372 } 4382 }
4373 4383
4374 static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa, 4384 static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa,
4375 unsigned long event) 4385 unsigned long event)
4376 { 4386 {
4377 int ret = NOTIFY_DONE; 4387 int ret = NOTIFY_DONE;
4378 4388
4379 rcu_read_lock(); 4389 rcu_read_lock();
4380 if (cxgb4_netdev(event_dev)) { 4390 if (cxgb4_netdev(event_dev)) {
4381 switch (event) { 4391 switch (event) {
4382 case NETDEV_UP: 4392 case NETDEV_UP:
4383 ret = cxgb4_clip_get(event_dev, 4393 ret = cxgb4_clip_get(event_dev,
4384 (const struct in6_addr *)ifa->addr.s6_addr); 4394 (const struct in6_addr *)ifa->addr.s6_addr);
4385 if (ret < 0) { 4395 if (ret < 0) {
4386 rcu_read_unlock(); 4396 rcu_read_unlock();
4387 return ret; 4397 return ret;
4388 } 4398 }
4389 ret = NOTIFY_OK; 4399 ret = NOTIFY_OK;
4390 break; 4400 break;
4391 case NETDEV_DOWN: 4401 case NETDEV_DOWN:
4392 cxgb4_clip_release(event_dev, 4402 cxgb4_clip_release(event_dev,
4393 (const struct in6_addr *)ifa->addr.s6_addr); 4403 (const struct in6_addr *)ifa->addr.s6_addr);
4394 ret = NOTIFY_OK; 4404 ret = NOTIFY_OK;
4395 break; 4405 break;
4396 default: 4406 default:
4397 break; 4407 break;
4398 } 4408 }
4399 } 4409 }
4400 rcu_read_unlock(); 4410 rcu_read_unlock();
4401 return ret; 4411 return ret;
4402 } 4412 }
4403 4413
4404 static int cxgb4_inet6addr_handler(struct notifier_block *this, 4414 static int cxgb4_inet6addr_handler(struct notifier_block *this,
4405 unsigned long event, void *data) 4415 unsigned long event, void *data)
4406 { 4416 {
4407 struct inet6_ifaddr *ifa = data; 4417 struct inet6_ifaddr *ifa = data;
4408 struct net_device *event_dev; 4418 struct net_device *event_dev;
4409 int ret = NOTIFY_DONE; 4419 int ret = NOTIFY_DONE;
4410 struct bonding *bond = netdev_priv(ifa->idev->dev); 4420 struct bonding *bond = netdev_priv(ifa->idev->dev);
4411 struct list_head *iter; 4421 struct list_head *iter;
4412 struct slave *slave; 4422 struct slave *slave;
4413 struct pci_dev *first_pdev = NULL; 4423 struct pci_dev *first_pdev = NULL;
4414 4424
4415 if (ifa->idev->dev->priv_flags & IFF_802_1Q_VLAN) { 4425 if (ifa->idev->dev->priv_flags & IFF_802_1Q_VLAN) {
4416 event_dev = vlan_dev_real_dev(ifa->idev->dev); 4426 event_dev = vlan_dev_real_dev(ifa->idev->dev);
4417 ret = clip_add(event_dev, ifa, event); 4427 ret = clip_add(event_dev, ifa, event);
4418 } else if (ifa->idev->dev->flags & IFF_MASTER) { 4428 } else if (ifa->idev->dev->flags & IFF_MASTER) {
4419 /* It is possible that two different adapters are bonded in one 4429 /* It is possible that two different adapters are bonded in one
4420 * bond. We need to find such different adapters and add clip 4430 * bond. We need to find such different adapters and add clip
4421 * in all of them only once. 4431 * in all of them only once.
4422 */ 4432 */
4423 bond_for_each_slave(bond, slave, iter) { 4433 bond_for_each_slave(bond, slave, iter) {
4424 if (!first_pdev) { 4434 if (!first_pdev) {
4425 ret = clip_add(slave->dev, ifa, event); 4435 ret = clip_add(slave->dev, ifa, event);
4426 /* If clip_add is success then only initialize 4436 /* If clip_add is success then only initialize
4427 * first_pdev since it means it is our device 4437 * first_pdev since it means it is our device
4428 */ 4438 */
4429 if (ret == NOTIFY_OK) 4439 if (ret == NOTIFY_OK)
4430 first_pdev = to_pci_dev( 4440 first_pdev = to_pci_dev(
4431 slave->dev->dev.parent); 4441 slave->dev->dev.parent);
4432 } else if (first_pdev != 4442 } else if (first_pdev !=
4433 to_pci_dev(slave->dev->dev.parent)) 4443 to_pci_dev(slave->dev->dev.parent))
4434 ret = clip_add(slave->dev, ifa, event); 4444 ret = clip_add(slave->dev, ifa, event);
4435 } 4445 }
4436 } else 4446 } else
4437 ret = clip_add(ifa->idev->dev, ifa, event); 4447 ret = clip_add(ifa->idev->dev, ifa, event);
4438 4448
4439 return ret; 4449 return ret;
4440 } 4450 }
4441 4451
4442 static struct notifier_block cxgb4_inet6addr_notifier = { 4452 static struct notifier_block cxgb4_inet6addr_notifier = {
4443 .notifier_call = cxgb4_inet6addr_handler 4453 .notifier_call = cxgb4_inet6addr_handler
4444 }; 4454 };
4445 4455
4446 /* Retrieves IPv6 addresses from a root device (bond, vlan) associated with 4456 /* Retrieves IPv6 addresses from a root device (bond, vlan) associated with
4447 * a physical device. 4457 * a physical device.
4448 * The physical device reference is needed to send the actul CLIP command. 4458 * The physical device reference is needed to send the actul CLIP command.
4449 */ 4459 */
4450 static int update_dev_clip(struct net_device *root_dev, struct net_device *dev) 4460 static int update_dev_clip(struct net_device *root_dev, struct net_device *dev)
4451 { 4461 {
4452 struct inet6_dev *idev = NULL; 4462 struct inet6_dev *idev = NULL;
4453 struct inet6_ifaddr *ifa; 4463 struct inet6_ifaddr *ifa;
4454 int ret = 0; 4464 int ret = 0;
4455 4465
4456 idev = __in6_dev_get(root_dev); 4466 idev = __in6_dev_get(root_dev);
4457 if (!idev) 4467 if (!idev)
4458 return ret; 4468 return ret;
4459 4469
4460 read_lock_bh(&idev->lock); 4470 read_lock_bh(&idev->lock);
4461 list_for_each_entry(ifa, &idev->addr_list, if_list) { 4471 list_for_each_entry(ifa, &idev->addr_list, if_list) {
4462 ret = cxgb4_clip_get(dev, 4472 ret = cxgb4_clip_get(dev,
4463 (const struct in6_addr *)ifa->addr.s6_addr); 4473 (const struct in6_addr *)ifa->addr.s6_addr);
4464 if (ret < 0) 4474 if (ret < 0)
4465 break; 4475 break;
4466 } 4476 }
4467 read_unlock_bh(&idev->lock); 4477 read_unlock_bh(&idev->lock);
4468 4478
4469 return ret; 4479 return ret;
4470 } 4480 }
4471 4481
4472 static int update_root_dev_clip(struct net_device *dev) 4482 static int update_root_dev_clip(struct net_device *dev)
4473 { 4483 {
4474 struct net_device *root_dev = NULL; 4484 struct net_device *root_dev = NULL;
4475 int i, ret = 0; 4485 int i, ret = 0;
4476 4486
4477 /* First populate the real net device's IPv6 addresses */ 4487 /* First populate the real net device's IPv6 addresses */
4478 ret = update_dev_clip(dev, dev); 4488 ret = update_dev_clip(dev, dev);
4479 if (ret) 4489 if (ret)
4480 return ret; 4490 return ret;
4481 4491
4482 /* Parse all bond and vlan devices layered on top of the physical dev */ 4492 /* Parse all bond and vlan devices layered on top of the physical dev */
4483 for (i = 0; i < VLAN_N_VID; i++) { 4493 for (i = 0; i < VLAN_N_VID; i++) {
4484 root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i); 4494 root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i);
4485 if (!root_dev) 4495 if (!root_dev)
4486 continue; 4496 continue;
4487 4497
4488 ret = update_dev_clip(root_dev, dev); 4498 ret = update_dev_clip(root_dev, dev);
4489 if (ret) 4499 if (ret)
4490 break; 4500 break;
4491 } 4501 }
4492 return ret; 4502 return ret;
4493 } 4503 }
4494 4504
4495 static void update_clip(const struct adapter *adap) 4505 static void update_clip(const struct adapter *adap)
4496 { 4506 {
4497 int i; 4507 int i;
4498 struct net_device *dev; 4508 struct net_device *dev;
4499 int ret; 4509 int ret;
4500 4510
4501 rcu_read_lock(); 4511 rcu_read_lock();
4502 4512
4503 for (i = 0; i < MAX_NPORTS; i++) { 4513 for (i = 0; i < MAX_NPORTS; i++) {
4504 dev = adap->port[i]; 4514 dev = adap->port[i];
4505 ret = 0; 4515 ret = 0;
4506 4516
4507 if (dev) 4517 if (dev)
4508 ret = update_root_dev_clip(dev); 4518 ret = update_root_dev_clip(dev);
4509 4519
4510 if (ret < 0) 4520 if (ret < 0)
4511 break; 4521 break;
4512 } 4522 }
4513 rcu_read_unlock(); 4523 rcu_read_unlock();
4514 } 4524 }
4515 4525
4516 /** 4526 /**
4517 * cxgb_up - enable the adapter 4527 * cxgb_up - enable the adapter
4518 * @adap: adapter being enabled 4528 * @adap: adapter being enabled
4519 * 4529 *
4520 * Called when the first port is enabled, this function performs the 4530 * Called when the first port is enabled, this function performs the
4521 * actions necessary to make an adapter operational, such as completing 4531 * actions necessary to make an adapter operational, such as completing
4522 * the initialization of HW modules, and enabling interrupts. 4532 * the initialization of HW modules, and enabling interrupts.
4523 * 4533 *
4524 * Must be called with the rtnl lock held. 4534 * Must be called with the rtnl lock held.
4525 */ 4535 */
4526 static int cxgb_up(struct adapter *adap) 4536 static int cxgb_up(struct adapter *adap)
4527 { 4537 {
4528 int err; 4538 int err;
4529 4539
4530 err = setup_sge_queues(adap); 4540 err = setup_sge_queues(adap);
4531 if (err) 4541 if (err)
4532 goto out; 4542 goto out;
4533 err = setup_rss(adap); 4543 err = setup_rss(adap);
4534 if (err) 4544 if (err)
4535 goto freeq; 4545 goto freeq;
4536 4546
4537 if (adap->flags & USING_MSIX) { 4547 if (adap->flags & USING_MSIX) {
4538 name_msix_vecs(adap); 4548 name_msix_vecs(adap);
4539 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0, 4549 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
4540 adap->msix_info[0].desc, adap); 4550 adap->msix_info[0].desc, adap);
4541 if (err) 4551 if (err)
4542 goto irq_err; 4552 goto irq_err;
4543 4553
4544 err = request_msix_queue_irqs(adap); 4554 err = request_msix_queue_irqs(adap);
4545 if (err) { 4555 if (err) {
4546 free_irq(adap->msix_info[0].vec, adap); 4556 free_irq(adap->msix_info[0].vec, adap);
4547 goto irq_err; 4557 goto irq_err;
4548 } 4558 }
4549 } else { 4559 } else {
4550 err = request_irq(adap->pdev->irq, t4_intr_handler(adap), 4560 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
4551 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED, 4561 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
4552 adap->port[0]->name, adap); 4562 adap->port[0]->name, adap);
4553 if (err) 4563 if (err)
4554 goto irq_err; 4564 goto irq_err;
4555 } 4565 }
4556 enable_rx(adap); 4566 enable_rx(adap);
4557 t4_sge_start(adap); 4567 t4_sge_start(adap);
4558 t4_intr_enable(adap); 4568 t4_intr_enable(adap);
4559 adap->flags |= FULL_INIT_DONE; 4569 adap->flags |= FULL_INIT_DONE;
4560 notify_ulds(adap, CXGB4_STATE_UP); 4570 notify_ulds(adap, CXGB4_STATE_UP);
4561 update_clip(adap); 4571 update_clip(adap);
4562 out: 4572 out:
4563 return err; 4573 return err;
4564 irq_err: 4574 irq_err:
4565 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err); 4575 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
4566 freeq: 4576 freeq:
4567 t4_free_sge_resources(adap); 4577 t4_free_sge_resources(adap);
4568 goto out; 4578 goto out;
4569 } 4579 }
4570 4580
4571 static void cxgb_down(struct adapter *adapter) 4581 static void cxgb_down(struct adapter *adapter)
4572 { 4582 {
4573 t4_intr_disable(adapter); 4583 t4_intr_disable(adapter);
4574 cancel_work_sync(&adapter->tid_release_task); 4584 cancel_work_sync(&adapter->tid_release_task);
4575 cancel_work_sync(&adapter->db_full_task); 4585 cancel_work_sync(&adapter->db_full_task);
4576 cancel_work_sync(&adapter->db_drop_task); 4586 cancel_work_sync(&adapter->db_drop_task);
4577 adapter->tid_release_task_busy = false; 4587 adapter->tid_release_task_busy = false;
4578 adapter->tid_release_head = NULL; 4588 adapter->tid_release_head = NULL;
4579 4589
4580 if (adapter->flags & USING_MSIX) { 4590 if (adapter->flags & USING_MSIX) {
4581 free_msix_queue_irqs(adapter); 4591 free_msix_queue_irqs(adapter);
4582 free_irq(adapter->msix_info[0].vec, adapter); 4592 free_irq(adapter->msix_info[0].vec, adapter);
4583 } else 4593 } else
4584 free_irq(adapter->pdev->irq, adapter); 4594 free_irq(adapter->pdev->irq, adapter);
4585 quiesce_rx(adapter); 4595 quiesce_rx(adapter);
4586 t4_sge_stop(adapter); 4596 t4_sge_stop(adapter);
4587 t4_free_sge_resources(adapter); 4597 t4_free_sge_resources(adapter);
4588 adapter->flags &= ~FULL_INIT_DONE; 4598 adapter->flags &= ~FULL_INIT_DONE;
4589 } 4599 }
4590 4600
4591 /* 4601 /*
4592 * net_device operations 4602 * net_device operations
4593 */ 4603 */
4594 static int cxgb_open(struct net_device *dev) 4604 static int cxgb_open(struct net_device *dev)
4595 { 4605 {
4596 int err; 4606 int err;
4597 struct port_info *pi = netdev_priv(dev); 4607 struct port_info *pi = netdev_priv(dev);
4598 struct adapter *adapter = pi->adapter; 4608 struct adapter *adapter = pi->adapter;
4599 4609
4600 netif_carrier_off(dev); 4610 netif_carrier_off(dev);
4601 4611
4602 if (!(adapter->flags & FULL_INIT_DONE)) { 4612 if (!(adapter->flags & FULL_INIT_DONE)) {
4603 err = cxgb_up(adapter); 4613 err = cxgb_up(adapter);
4604 if (err < 0) 4614 if (err < 0)
4605 return err; 4615 return err;
4606 } 4616 }
4607 4617
4608 err = link_start(dev); 4618 err = link_start(dev);
4609 if (!err) 4619 if (!err)
4610 netif_tx_start_all_queues(dev); 4620 netif_tx_start_all_queues(dev);
4611 return err; 4621 return err;
4612 } 4622 }
4613 4623
4614 static int cxgb_close(struct net_device *dev) 4624 static int cxgb_close(struct net_device *dev)
4615 { 4625 {
4616 struct port_info *pi = netdev_priv(dev); 4626 struct port_info *pi = netdev_priv(dev);
4617 struct adapter *adapter = pi->adapter; 4627 struct adapter *adapter = pi->adapter;
4618 4628
4619 netif_tx_stop_all_queues(dev); 4629 netif_tx_stop_all_queues(dev);
4620 netif_carrier_off(dev); 4630 netif_carrier_off(dev);
4621 return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false); 4631 return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
4622 } 4632 }
4623 4633
4624 /* Return an error number if the indicated filter isn't writable ... 4634 /* Return an error number if the indicated filter isn't writable ...
4625 */ 4635 */
4626 static int writable_filter(struct filter_entry *f) 4636 static int writable_filter(struct filter_entry *f)
4627 { 4637 {
4628 if (f->locked) 4638 if (f->locked)
4629 return -EPERM; 4639 return -EPERM;
4630 if (f->pending) 4640 if (f->pending)
4631 return -EBUSY; 4641 return -EBUSY;
4632 4642
4633 return 0; 4643 return 0;
4634 } 4644 }
4635 4645
4636 /* Delete the filter at the specified index (if valid). The checks for all 4646 /* Delete the filter at the specified index (if valid). The checks for all
4637 * the common problems with doing this like the filter being locked, currently 4647 * the common problems with doing this like the filter being locked, currently
4638 * pending in another operation, etc. 4648 * pending in another operation, etc.
4639 */ 4649 */
4640 static int delete_filter(struct adapter *adapter, unsigned int fidx) 4650 static int delete_filter(struct adapter *adapter, unsigned int fidx)
4641 { 4651 {
4642 struct filter_entry *f; 4652 struct filter_entry *f;
4643 int ret; 4653 int ret;
4644 4654
4645 if (fidx >= adapter->tids.nftids + adapter->tids.nsftids) 4655 if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
4646 return -EINVAL; 4656 return -EINVAL;
4647 4657
4648 f = &adapter->tids.ftid_tab[fidx]; 4658 f = &adapter->tids.ftid_tab[fidx];
4649 ret = writable_filter(f); 4659 ret = writable_filter(f);
4650 if (ret) 4660 if (ret)
4651 return ret; 4661 return ret;
4652 if (f->valid) 4662 if (f->valid)
4653 return del_filter_wr(adapter, fidx); 4663 return del_filter_wr(adapter, fidx);
4654 4664
4655 return 0; 4665 return 0;
4656 } 4666 }
4657 4667
4658 int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid, 4668 int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
4659 __be32 sip, __be16 sport, __be16 vlan, 4669 __be32 sip, __be16 sport, __be16 vlan,
4660 unsigned int queue, unsigned char port, unsigned char mask) 4670 unsigned int queue, unsigned char port, unsigned char mask)
4661 { 4671 {
4662 int ret; 4672 int ret;
4663 struct filter_entry *f; 4673 struct filter_entry *f;
4664 struct adapter *adap; 4674 struct adapter *adap;
4665 int i; 4675 int i;
4666 u8 *val; 4676 u8 *val;
4667 4677
4668 adap = netdev2adap(dev); 4678 adap = netdev2adap(dev);
4669 4679
4670 /* Adjust stid to correct filter index */ 4680 /* Adjust stid to correct filter index */
4671 stid -= adap->tids.sftid_base; 4681 stid -= adap->tids.sftid_base;
4672 stid += adap->tids.nftids; 4682 stid += adap->tids.nftids;
4673 4683
4674 /* Check to make sure the filter requested is writable ... 4684 /* Check to make sure the filter requested is writable ...
4675 */ 4685 */
4676 f = &adap->tids.ftid_tab[stid]; 4686 f = &adap->tids.ftid_tab[stid];
4677 ret = writable_filter(f); 4687 ret = writable_filter(f);
4678 if (ret) 4688 if (ret)
4679 return ret; 4689 return ret;
4680 4690
4681 /* Clear out any old resources being used by the filter before 4691 /* Clear out any old resources being used by the filter before
4682 * we start constructing the new filter. 4692 * we start constructing the new filter.
4683 */ 4693 */
4684 if (f->valid) 4694 if (f->valid)
4685 clear_filter(adap, f); 4695 clear_filter(adap, f);
4686 4696
4687 /* Clear out filter specifications */ 4697 /* Clear out filter specifications */
4688 memset(&f->fs, 0, sizeof(struct ch_filter_specification)); 4698 memset(&f->fs, 0, sizeof(struct ch_filter_specification));
4689 f->fs.val.lport = cpu_to_be16(sport); 4699 f->fs.val.lport = cpu_to_be16(sport);
4690 f->fs.mask.lport = ~0; 4700 f->fs.mask.lport = ~0;
4691 val = (u8 *)&sip; 4701 val = (u8 *)&sip;
4692 if ((val[0] | val[1] | val[2] | val[3]) != 0) { 4702 if ((val[0] | val[1] | val[2] | val[3]) != 0) {
4693 for (i = 0; i < 4; i++) { 4703 for (i = 0; i < 4; i++) {
4694 f->fs.val.lip[i] = val[i]; 4704 f->fs.val.lip[i] = val[i];
4695 f->fs.mask.lip[i] = ~0; 4705 f->fs.mask.lip[i] = ~0;
4696 } 4706 }
4697 if (adap->params.tp.vlan_pri_map & F_PORT) { 4707 if (adap->params.tp.vlan_pri_map & F_PORT) {
4698 f->fs.val.iport = port; 4708 f->fs.val.iport = port;
4699 f->fs.mask.iport = mask; 4709 f->fs.mask.iport = mask;
4700 } 4710 }
4701 } 4711 }
4702 4712
4703 if (adap->params.tp.vlan_pri_map & F_PROTOCOL) { 4713 if (adap->params.tp.vlan_pri_map & F_PROTOCOL) {
4704 f->fs.val.proto = IPPROTO_TCP; 4714 f->fs.val.proto = IPPROTO_TCP;
4705 f->fs.mask.proto = ~0; 4715 f->fs.mask.proto = ~0;
4706 } 4716 }
4707 4717
4708 f->fs.dirsteer = 1; 4718 f->fs.dirsteer = 1;
4709 f->fs.iq = queue; 4719 f->fs.iq = queue;
4710 /* Mark filter as locked */ 4720 /* Mark filter as locked */
4711 f->locked = 1; 4721 f->locked = 1;
4712 f->fs.rpttid = 1; 4722 f->fs.rpttid = 1;
4713 4723
4714 ret = set_filter_wr(adap, stid); 4724 ret = set_filter_wr(adap, stid);
4715 if (ret) { 4725 if (ret) {
4716 clear_filter(adap, f); 4726 clear_filter(adap, f);
4717 return ret; 4727 return ret;
4718 } 4728 }
4719 4729
4720 return 0; 4730 return 0;
4721 } 4731 }
4722 EXPORT_SYMBOL(cxgb4_create_server_filter); 4732 EXPORT_SYMBOL(cxgb4_create_server_filter);
4723 4733
4724 int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid, 4734 int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
4725 unsigned int queue, bool ipv6) 4735 unsigned int queue, bool ipv6)
4726 { 4736 {
4727 int ret; 4737 int ret;
4728 struct filter_entry *f; 4738 struct filter_entry *f;
4729 struct adapter *adap; 4739 struct adapter *adap;
4730 4740
4731 adap = netdev2adap(dev); 4741 adap = netdev2adap(dev);
4732 4742
4733 /* Adjust stid to correct filter index */ 4743 /* Adjust stid to correct filter index */
4734 stid -= adap->tids.sftid_base; 4744 stid -= adap->tids.sftid_base;
4735 stid += adap->tids.nftids; 4745 stid += adap->tids.nftids;
4736 4746
4737 f = &adap->tids.ftid_tab[stid]; 4747 f = &adap->tids.ftid_tab[stid];
4738 /* Unlock the filter */ 4748 /* Unlock the filter */
4739 f->locked = 0; 4749 f->locked = 0;
4740 4750
4741 ret = delete_filter(adap, stid); 4751 ret = delete_filter(adap, stid);
4742 if (ret) 4752 if (ret)
4743 return ret; 4753 return ret;
4744 4754
4745 return 0; 4755 return 0;
4746 } 4756 }
4747 EXPORT_SYMBOL(cxgb4_remove_server_filter); 4757 EXPORT_SYMBOL(cxgb4_remove_server_filter);
4748 4758
4749 static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev, 4759 static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
4750 struct rtnl_link_stats64 *ns) 4760 struct rtnl_link_stats64 *ns)
4751 { 4761 {
4752 struct port_stats stats; 4762 struct port_stats stats;
4753 struct port_info *p = netdev_priv(dev); 4763 struct port_info *p = netdev_priv(dev);
4754 struct adapter *adapter = p->adapter; 4764 struct adapter *adapter = p->adapter;
4755 4765
4756 /* Block retrieving statistics during EEH error 4766 /* Block retrieving statistics during EEH error
4757 * recovery. Otherwise, the recovery might fail 4767 * recovery. Otherwise, the recovery might fail
4758 * and the PCI device will be removed permanently 4768 * and the PCI device will be removed permanently
4759 */ 4769 */
4760 spin_lock(&adapter->stats_lock); 4770 spin_lock(&adapter->stats_lock);
4761 if (!netif_device_present(dev)) { 4771 if (!netif_device_present(dev)) {
4762 spin_unlock(&adapter->stats_lock); 4772 spin_unlock(&adapter->stats_lock);
4763 return ns; 4773 return ns;
4764 } 4774 }
4765 t4_get_port_stats(adapter, p->tx_chan, &stats); 4775 t4_get_port_stats(adapter, p->tx_chan, &stats);
4766 spin_unlock(&adapter->stats_lock); 4776 spin_unlock(&adapter->stats_lock);
4767 4777
4768 ns->tx_bytes = stats.tx_octets; 4778 ns->tx_bytes = stats.tx_octets;
4769 ns->tx_packets = stats.tx_frames; 4779 ns->tx_packets = stats.tx_frames;
4770 ns->rx_bytes = stats.rx_octets; 4780 ns->rx_bytes = stats.rx_octets;
4771 ns->rx_packets = stats.rx_frames; 4781 ns->rx_packets = stats.rx_frames;
4772 ns->multicast = stats.rx_mcast_frames; 4782 ns->multicast = stats.rx_mcast_frames;
4773 4783
4774 /* detailed rx_errors */ 4784 /* detailed rx_errors */
4775 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long + 4785 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
4776 stats.rx_runt; 4786 stats.rx_runt;
4777 ns->rx_over_errors = 0; 4787 ns->rx_over_errors = 0;
4778 ns->rx_crc_errors = stats.rx_fcs_err; 4788 ns->rx_crc_errors = stats.rx_fcs_err;
4779 ns->rx_frame_errors = stats.rx_symbol_err; 4789 ns->rx_frame_errors = stats.rx_symbol_err;
4780 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 + 4790 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
4781 stats.rx_ovflow2 + stats.rx_ovflow3 + 4791 stats.rx_ovflow2 + stats.rx_ovflow3 +
4782 stats.rx_trunc0 + stats.rx_trunc1 + 4792 stats.rx_trunc0 + stats.rx_trunc1 +
4783 stats.rx_trunc2 + stats.rx_trunc3; 4793 stats.rx_trunc2 + stats.rx_trunc3;
4784 ns->rx_missed_errors = 0; 4794 ns->rx_missed_errors = 0;
4785 4795
4786 /* detailed tx_errors */ 4796 /* detailed tx_errors */
4787 ns->tx_aborted_errors = 0; 4797 ns->tx_aborted_errors = 0;
4788 ns->tx_carrier_errors = 0; 4798 ns->tx_carrier_errors = 0;
4789 ns->tx_fifo_errors = 0; 4799 ns->tx_fifo_errors = 0;
4790 ns->tx_heartbeat_errors = 0; 4800 ns->tx_heartbeat_errors = 0;
4791 ns->tx_window_errors = 0; 4801 ns->tx_window_errors = 0;
4792 4802
4793 ns->tx_errors = stats.tx_error_frames; 4803 ns->tx_errors = stats.tx_error_frames;
4794 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err + 4804 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
4795 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors; 4805 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
4796 return ns; 4806 return ns;
4797 } 4807 }
4798 4808
4799 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd) 4809 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
4800 { 4810 {
4801 unsigned int mbox; 4811 unsigned int mbox;
4802 int ret = 0, prtad, devad; 4812 int ret = 0, prtad, devad;
4803 struct port_info *pi = netdev_priv(dev); 4813 struct port_info *pi = netdev_priv(dev);
4804 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data; 4814 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
4805 4815
4806 switch (cmd) { 4816 switch (cmd) {
4807 case SIOCGMIIPHY: 4817 case SIOCGMIIPHY:
4808 if (pi->mdio_addr < 0) 4818 if (pi->mdio_addr < 0)
4809 return -EOPNOTSUPP; 4819 return -EOPNOTSUPP;
4810 data->phy_id = pi->mdio_addr; 4820 data->phy_id = pi->mdio_addr;
4811 break; 4821 break;
4812 case SIOCGMIIREG: 4822 case SIOCGMIIREG:
4813 case SIOCSMIIREG: 4823 case SIOCSMIIREG:
4814 if (mdio_phy_id_is_c45(data->phy_id)) { 4824 if (mdio_phy_id_is_c45(data->phy_id)) {
4815 prtad = mdio_phy_id_prtad(data->phy_id); 4825 prtad = mdio_phy_id_prtad(data->phy_id);
4816 devad = mdio_phy_id_devad(data->phy_id); 4826 devad = mdio_phy_id_devad(data->phy_id);
4817 } else if (data->phy_id < 32) { 4827 } else if (data->phy_id < 32) {
4818 prtad = data->phy_id; 4828 prtad = data->phy_id;
4819 devad = 0; 4829 devad = 0;
4820 data->reg_num &= 0x1f; 4830 data->reg_num &= 0x1f;
4821 } else 4831 } else
4822 return -EINVAL; 4832 return -EINVAL;
4823 4833
4824 mbox = pi->adapter->fn; 4834 mbox = pi->adapter->fn;
4825 if (cmd == SIOCGMIIREG) 4835 if (cmd == SIOCGMIIREG)
4826 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad, 4836 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
4827 data->reg_num, &data->val_out); 4837 data->reg_num, &data->val_out);
4828 else 4838 else
4829 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad, 4839 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
4830 data->reg_num, data->val_in); 4840 data->reg_num, data->val_in);
4831 break; 4841 break;
4832 default: 4842 default:
4833 return -EOPNOTSUPP; 4843 return -EOPNOTSUPP;
4834 } 4844 }
4835 return ret; 4845 return ret;
4836 } 4846 }
4837 4847
4838 static void cxgb_set_rxmode(struct net_device *dev) 4848 static void cxgb_set_rxmode(struct net_device *dev)
4839 { 4849 {
4840 /* unfortunately we can't return errors to the stack */ 4850 /* unfortunately we can't return errors to the stack */
4841 set_rxmode(dev, -1, false); 4851 set_rxmode(dev, -1, false);
4842 } 4852 }
4843 4853
4844 static int cxgb_change_mtu(struct net_device *dev, int new_mtu) 4854 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
4845 { 4855 {
4846 int ret; 4856 int ret;
4847 struct port_info *pi = netdev_priv(dev); 4857 struct port_info *pi = netdev_priv(dev);
4848 4858
4849 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */ 4859 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
4850 return -EINVAL; 4860 return -EINVAL;
4851 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1, 4861 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
4852 -1, -1, -1, true); 4862 -1, -1, -1, true);
4853 if (!ret) 4863 if (!ret)
4854 dev->mtu = new_mtu; 4864 dev->mtu = new_mtu;
4855 return ret; 4865 return ret;
4856 } 4866 }
4857 4867
4858 static int cxgb_set_mac_addr(struct net_device *dev, void *p) 4868 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
4859 { 4869 {
4860 int ret; 4870 int ret;
4861 struct sockaddr *addr = p; 4871 struct sockaddr *addr = p;
4862 struct port_info *pi = netdev_priv(dev); 4872 struct port_info *pi = netdev_priv(dev);
4863 4873
4864 if (!is_valid_ether_addr(addr->sa_data)) 4874 if (!is_valid_ether_addr(addr->sa_data))
4865 return -EADDRNOTAVAIL; 4875 return -EADDRNOTAVAIL;
4866 4876
4867 ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid, 4877 ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
4868 pi->xact_addr_filt, addr->sa_data, true, true); 4878 pi->xact_addr_filt, addr->sa_data, true, true);
4869 if (ret < 0) 4879 if (ret < 0)
4870 return ret; 4880 return ret;
4871 4881
4872 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 4882 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4873 pi->xact_addr_filt = ret; 4883 pi->xact_addr_filt = ret;
4874 return 0; 4884 return 0;
4875 } 4885 }
4876 4886
4877 #ifdef CONFIG_NET_POLL_CONTROLLER 4887 #ifdef CONFIG_NET_POLL_CONTROLLER
4878 static void cxgb_netpoll(struct net_device *dev) 4888 static void cxgb_netpoll(struct net_device *dev)
4879 { 4889 {
4880 struct port_info *pi = netdev_priv(dev); 4890 struct port_info *pi = netdev_priv(dev);
4881 struct adapter *adap = pi->adapter; 4891 struct adapter *adap = pi->adapter;
4882 4892
4883 if (adap->flags & USING_MSIX) { 4893 if (adap->flags & USING_MSIX) {
4884 int i; 4894 int i;
4885 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset]; 4895 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
4886 4896
4887 for (i = pi->nqsets; i; i--, rx++) 4897 for (i = pi->nqsets; i; i--, rx++)
4888 t4_sge_intr_msix(0, &rx->rspq); 4898 t4_sge_intr_msix(0, &rx->rspq);
4889 } else 4899 } else
4890 t4_intr_handler(adap)(0, adap); 4900 t4_intr_handler(adap)(0, adap);
4891 } 4901 }
4892 #endif 4902 #endif
4893 4903
4894 static const struct net_device_ops cxgb4_netdev_ops = { 4904 static const struct net_device_ops cxgb4_netdev_ops = {
4895 .ndo_open = cxgb_open, 4905 .ndo_open = cxgb_open,
4896 .ndo_stop = cxgb_close, 4906 .ndo_stop = cxgb_close,
4897 .ndo_start_xmit = t4_eth_xmit, 4907 .ndo_start_xmit = t4_eth_xmit,
4898 .ndo_select_queue = cxgb_select_queue, 4908 .ndo_select_queue = cxgb_select_queue,
4899 .ndo_get_stats64 = cxgb_get_stats, 4909 .ndo_get_stats64 = cxgb_get_stats,
4900 .ndo_set_rx_mode = cxgb_set_rxmode, 4910 .ndo_set_rx_mode = cxgb_set_rxmode,
4901 .ndo_set_mac_address = cxgb_set_mac_addr, 4911 .ndo_set_mac_address = cxgb_set_mac_addr,
4902 .ndo_set_features = cxgb_set_features, 4912 .ndo_set_features = cxgb_set_features,
4903 .ndo_validate_addr = eth_validate_addr, 4913 .ndo_validate_addr = eth_validate_addr,
4904 .ndo_do_ioctl = cxgb_ioctl, 4914 .ndo_do_ioctl = cxgb_ioctl,
4905 .ndo_change_mtu = cxgb_change_mtu, 4915 .ndo_change_mtu = cxgb_change_mtu,
4906 #ifdef CONFIG_NET_POLL_CONTROLLER 4916 #ifdef CONFIG_NET_POLL_CONTROLLER
4907 .ndo_poll_controller = cxgb_netpoll, 4917 .ndo_poll_controller = cxgb_netpoll,
4908 #endif 4918 #endif
4909 }; 4919 };
4910 4920
4911 void t4_fatal_err(struct adapter *adap) 4921 void t4_fatal_err(struct adapter *adap)
4912 { 4922 {
4913 t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0); 4923 t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
4914 t4_intr_disable(adap); 4924 t4_intr_disable(adap);
4915 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n"); 4925 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
4916 } 4926 }
4917 4927
4918 /* Return the specified PCI-E Configuration Space register from our Physical 4928 /* Return the specified PCI-E Configuration Space register from our Physical
4919 * Function. We try first via a Firmware LDST Command since we prefer to let 4929 * Function. We try first via a Firmware LDST Command since we prefer to let
4920 * the firmware own all of these registers, but if that fails we go for it 4930 * the firmware own all of these registers, but if that fails we go for it
4921 * directly ourselves. 4931 * directly ourselves.
4922 */ 4932 */
4923 static u32 t4_read_pcie_cfg4(struct adapter *adap, int reg) 4933 static u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
4924 { 4934 {
4925 struct fw_ldst_cmd ldst_cmd; 4935 struct fw_ldst_cmd ldst_cmd;
4926 u32 val; 4936 u32 val;
4927 int ret; 4937 int ret;
4928 4938
4929 /* Construct and send the Firmware LDST Command to retrieve the 4939 /* Construct and send the Firmware LDST Command to retrieve the
4930 * specified PCI-E Configuration Space register. 4940 * specified PCI-E Configuration Space register.
4931 */ 4941 */
4932 memset(&ldst_cmd, 0, sizeof(ldst_cmd)); 4942 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
4933 ldst_cmd.op_to_addrspace = 4943 ldst_cmd.op_to_addrspace =
4934 htonl(FW_CMD_OP(FW_LDST_CMD) | 4944 htonl(FW_CMD_OP(FW_LDST_CMD) |
4935 FW_CMD_REQUEST | 4945 FW_CMD_REQUEST |
4936 FW_CMD_READ | 4946 FW_CMD_READ |
4937 FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE)); 4947 FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
4938 ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd)); 4948 ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
4939 ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS(1); 4949 ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS(1);
4940 ldst_cmd.u.pcie.ctrl_to_fn = 4950 ldst_cmd.u.pcie.ctrl_to_fn =
4941 (FW_LDST_CMD_LC | FW_LDST_CMD_FN(adap->fn)); 4951 (FW_LDST_CMD_LC | FW_LDST_CMD_FN(adap->fn));
4942 ldst_cmd.u.pcie.r = reg; 4952 ldst_cmd.u.pcie.r = reg;
4943 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd), 4953 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
4944 &ldst_cmd); 4954 &ldst_cmd);
4945 4955
4946 /* If the LDST Command suucceeded, exctract the returned register 4956 /* If the LDST Command suucceeded, exctract the returned register
4947 * value. Otherwise read it directly ourself. 4957 * value. Otherwise read it directly ourself.
4948 */ 4958 */
4949 if (ret == 0) 4959 if (ret == 0)
4950 val = ntohl(ldst_cmd.u.pcie.data[0]); 4960 val = ntohl(ldst_cmd.u.pcie.data[0]);
4951 else 4961 else
4952 t4_hw_pci_read_cfg4(adap, reg, &val); 4962 t4_hw_pci_read_cfg4(adap, reg, &val);
4953 4963
4954 return val; 4964 return val;
4955 } 4965 }
4956 4966
4957 static void setup_memwin(struct adapter *adap) 4967 static void setup_memwin(struct adapter *adap)
4958 { 4968 {
4959 u32 mem_win0_base, mem_win1_base, mem_win2_base, mem_win2_aperture; 4969 u32 mem_win0_base, mem_win1_base, mem_win2_base, mem_win2_aperture;
4960 4970
4961 if (is_t4(adap->params.chip)) { 4971 if (is_t4(adap->params.chip)) {
4962 u32 bar0; 4972 u32 bar0;
4963 4973
4964 /* Truncation intentional: we only read the bottom 32-bits of 4974 /* Truncation intentional: we only read the bottom 32-bits of
4965 * the 64-bit BAR0/BAR1 ... We use the hardware backdoor 4975 * the 64-bit BAR0/BAR1 ... We use the hardware backdoor
4966 * mechanism to read BAR0 instead of using 4976 * mechanism to read BAR0 instead of using
4967 * pci_resource_start() because we could be operating from 4977 * pci_resource_start() because we could be operating from
4968 * within a Virtual Machine which is trapping our accesses to 4978 * within a Virtual Machine which is trapping our accesses to
4969 * our Configuration Space and we need to set up the PCI-E 4979 * our Configuration Space and we need to set up the PCI-E
4970 * Memory Window decoders with the actual addresses which will 4980 * Memory Window decoders with the actual addresses which will
4971 * be coming across the PCI-E link. 4981 * be coming across the PCI-E link.
4972 */ 4982 */
4973 bar0 = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_0); 4983 bar0 = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_0);
4974 bar0 &= PCI_BASE_ADDRESS_MEM_MASK; 4984 bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
4975 adap->t4_bar0 = bar0; 4985 adap->t4_bar0 = bar0;
4976 4986
4977 mem_win0_base = bar0 + MEMWIN0_BASE; 4987 mem_win0_base = bar0 + MEMWIN0_BASE;
4978 mem_win1_base = bar0 + MEMWIN1_BASE; 4988 mem_win1_base = bar0 + MEMWIN1_BASE;
4979 mem_win2_base = bar0 + MEMWIN2_BASE; 4989 mem_win2_base = bar0 + MEMWIN2_BASE;
4980 mem_win2_aperture = MEMWIN2_APERTURE; 4990 mem_win2_aperture = MEMWIN2_APERTURE;
4981 } else { 4991 } else {
4982 /* For T5, only relative offset inside the PCIe BAR is passed */ 4992 /* For T5, only relative offset inside the PCIe BAR is passed */
4983 mem_win0_base = MEMWIN0_BASE; 4993 mem_win0_base = MEMWIN0_BASE;
4984 mem_win1_base = MEMWIN1_BASE; 4994 mem_win1_base = MEMWIN1_BASE;
4985 mem_win2_base = MEMWIN2_BASE_T5; 4995 mem_win2_base = MEMWIN2_BASE_T5;
4986 mem_win2_aperture = MEMWIN2_APERTURE_T5; 4996 mem_win2_aperture = MEMWIN2_APERTURE_T5;
4987 } 4997 }
4988 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0), 4998 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
4989 mem_win0_base | BIR(0) | 4999 mem_win0_base | BIR(0) |
4990 WINDOW(ilog2(MEMWIN0_APERTURE) - 10)); 5000 WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
4991 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1), 5001 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
4992 mem_win1_base | BIR(0) | 5002 mem_win1_base | BIR(0) |
4993 WINDOW(ilog2(MEMWIN1_APERTURE) - 10)); 5003 WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
4994 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2), 5004 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
4995 mem_win2_base | BIR(0) | 5005 mem_win2_base | BIR(0) |
4996 WINDOW(ilog2(mem_win2_aperture) - 10)); 5006 WINDOW(ilog2(mem_win2_aperture) - 10));
4997 t4_read_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2)); 5007 t4_read_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2));
4998 } 5008 }
4999 5009
5000 static void setup_memwin_rdma(struct adapter *adap) 5010 static void setup_memwin_rdma(struct adapter *adap)
5001 { 5011 {
5002 if (adap->vres.ocq.size) { 5012 if (adap->vres.ocq.size) {
5003 u32 start; 5013 u32 start;
5004 unsigned int sz_kb; 5014 unsigned int sz_kb;
5005 5015
5006 start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2); 5016 start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2);
5007 start &= PCI_BASE_ADDRESS_MEM_MASK; 5017 start &= PCI_BASE_ADDRESS_MEM_MASK;
5008 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres); 5018 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
5009 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10; 5019 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
5010 t4_write_reg(adap, 5020 t4_write_reg(adap,
5011 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3), 5021 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
5012 start | BIR(1) | WINDOW(ilog2(sz_kb))); 5022 start | BIR(1) | WINDOW(ilog2(sz_kb)));
5013 t4_write_reg(adap, 5023 t4_write_reg(adap,
5014 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3), 5024 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
5015 adap->vres.ocq.start); 5025 adap->vres.ocq.start);
5016 t4_read_reg(adap, 5026 t4_read_reg(adap,
5017 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3)); 5027 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
5018 } 5028 }
5019 } 5029 }
5020 5030
5021 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c) 5031 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
5022 { 5032 {
5023 u32 v; 5033 u32 v;
5024 int ret; 5034 int ret;
5025 5035
5026 /* get device capabilities */ 5036 /* get device capabilities */
5027 memset(c, 0, sizeof(*c)); 5037 memset(c, 0, sizeof(*c));
5028 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 5038 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5029 FW_CMD_REQUEST | FW_CMD_READ); 5039 FW_CMD_REQUEST | FW_CMD_READ);
5030 c->cfvalid_to_len16 = htonl(FW_LEN16(*c)); 5040 c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
5031 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c); 5041 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
5032 if (ret < 0) 5042 if (ret < 0)
5033 return ret; 5043 return ret;
5034 5044
5035 /* select capabilities we'll be using */ 5045 /* select capabilities we'll be using */
5036 if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) { 5046 if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
5037 if (!vf_acls) 5047 if (!vf_acls)
5038 c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM); 5048 c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
5039 else 5049 else
5040 c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM); 5050 c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
5041 } else if (vf_acls) { 5051 } else if (vf_acls) {
5042 dev_err(adap->pdev_dev, "virtualization ACLs not supported"); 5052 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
5043 return ret; 5053 return ret;
5044 } 5054 }
5045 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 5055 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5046 FW_CMD_REQUEST | FW_CMD_WRITE); 5056 FW_CMD_REQUEST | FW_CMD_WRITE);
5047 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL); 5057 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
5048 if (ret < 0) 5058 if (ret < 0)
5049 return ret; 5059 return ret;
5050 5060
5051 ret = t4_config_glbl_rss(adap, adap->fn, 5061 ret = t4_config_glbl_rss(adap, adap->fn,
5052 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL, 5062 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
5053 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN | 5063 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
5054 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP); 5064 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
5055 if (ret < 0) 5065 if (ret < 0)
5056 return ret; 5066 return ret;
5057 5067
5058 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ, 5068 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
5059 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF); 5069 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
5060 if (ret < 0) 5070 if (ret < 0)
5061 return ret; 5071 return ret;
5062 5072
5063 t4_sge_init(adap); 5073 t4_sge_init(adap);
5064 5074
5065 /* tweak some settings */ 5075 /* tweak some settings */
5066 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849); 5076 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
5067 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12)); 5077 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
5068 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG); 5078 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
5069 v = t4_read_reg(adap, TP_PIO_DATA); 5079 v = t4_read_reg(adap, TP_PIO_DATA);
5070 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR); 5080 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
5071 5081
5072 /* first 4 Tx modulation queues point to consecutive Tx channels */ 5082 /* first 4 Tx modulation queues point to consecutive Tx channels */
5073 adap->params.tp.tx_modq_map = 0xE4; 5083 adap->params.tp.tx_modq_map = 0xE4;
5074 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP, 5084 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
5075 V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map)); 5085 V_TX_MOD_QUEUE_REQ_MAP(adap->params.tp.tx_modq_map));
5076 5086
5077 /* associate each Tx modulation queue with consecutive Tx channels */ 5087 /* associate each Tx modulation queue with consecutive Tx channels */
5078 v = 0x84218421; 5088 v = 0x84218421;
5079 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA, 5089 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5080 &v, 1, A_TP_TX_SCHED_HDR); 5090 &v, 1, A_TP_TX_SCHED_HDR);
5081 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA, 5091 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5082 &v, 1, A_TP_TX_SCHED_FIFO); 5092 &v, 1, A_TP_TX_SCHED_FIFO);
5083 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA, 5093 t4_write_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
5084 &v, 1, A_TP_TX_SCHED_PCMD); 5094 &v, 1, A_TP_TX_SCHED_PCMD);
5085 5095
5086 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */ 5096 #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
5087 if (is_offload(adap)) { 5097 if (is_offload(adap)) {
5088 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 5098 t4_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0,
5089 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | 5099 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5090 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | 5100 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5091 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | 5101 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5092 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT)); 5102 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
5093 t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT, 5103 t4_write_reg(adap, A_TP_TX_MOD_CHANNEL_WEIGHT,
5094 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | 5104 V_TX_MODQ_WEIGHT0(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5095 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | 5105 V_TX_MODQ_WEIGHT1(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5096 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) | 5106 V_TX_MODQ_WEIGHT2(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
5097 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT)); 5107 V_TX_MODQ_WEIGHT3(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
5098 } 5108 }
5099 5109
5100 /* get basic stuff going */ 5110 /* get basic stuff going */
5101 return t4_early_init(adap, adap->fn); 5111 return t4_early_init(adap, adap->fn);
5102 } 5112 }
5103 5113
5104 /* 5114 /*
5105 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower. 5115 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
5106 */ 5116 */
5107 #define MAX_ATIDS 8192U 5117 #define MAX_ATIDS 8192U
5108 5118
5109 /* 5119 /*
5110 * Phase 0 of initialization: contact FW, obtain config, perform basic init. 5120 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
5111 * 5121 *
5112 * If the firmware we're dealing with has Configuration File support, then 5122 * If the firmware we're dealing with has Configuration File support, then
5113 * we use that to perform all configuration 5123 * we use that to perform all configuration
5114 */ 5124 */
5115 5125
5116 /* 5126 /*
5117 * Tweak configuration based on module parameters, etc. Most of these have 5127 * Tweak configuration based on module parameters, etc. Most of these have
5118 * defaults assigned to them by Firmware Configuration Files (if we're using 5128 * defaults assigned to them by Firmware Configuration Files (if we're using
5119 * them) but need to be explicitly set if we're using hard-coded 5129 * them) but need to be explicitly set if we're using hard-coded
5120 * initialization. But even in the case of using Firmware Configuration 5130 * initialization. But even in the case of using Firmware Configuration
5121 * Files, we'd like to expose the ability to change these via module 5131 * Files, we'd like to expose the ability to change these via module
5122 * parameters so these are essentially common tweaks/settings for 5132 * parameters so these are essentially common tweaks/settings for
5123 * Configuration Files and hard-coded initialization ... 5133 * Configuration Files and hard-coded initialization ...
5124 */ 5134 */
5125 static int adap_init0_tweaks(struct adapter *adapter) 5135 static int adap_init0_tweaks(struct adapter *adapter)
5126 { 5136 {
5127 /* 5137 /*
5128 * Fix up various Host-Dependent Parameters like Page Size, Cache 5138 * Fix up various Host-Dependent Parameters like Page Size, Cache
5129 * Line Size, etc. The firmware default is for a 4KB Page Size and 5139 * Line Size, etc. The firmware default is for a 4KB Page Size and
5130 * 64B Cache Line Size ... 5140 * 64B Cache Line Size ...
5131 */ 5141 */
5132 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES); 5142 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
5133 5143
5134 /* 5144 /*
5135 * Process module parameters which affect early initialization. 5145 * Process module parameters which affect early initialization.
5136 */ 5146 */
5137 if (rx_dma_offset != 2 && rx_dma_offset != 0) { 5147 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
5138 dev_err(&adapter->pdev->dev, 5148 dev_err(&adapter->pdev->dev,
5139 "Ignoring illegal rx_dma_offset=%d, using 2\n", 5149 "Ignoring illegal rx_dma_offset=%d, using 2\n",
5140 rx_dma_offset); 5150 rx_dma_offset);
5141 rx_dma_offset = 2; 5151 rx_dma_offset = 2;
5142 } 5152 }
5143 t4_set_reg_field(adapter, SGE_CONTROL, 5153 t4_set_reg_field(adapter, SGE_CONTROL,
5144 PKTSHIFT_MASK, 5154 PKTSHIFT_MASK,
5145 PKTSHIFT(rx_dma_offset)); 5155 PKTSHIFT(rx_dma_offset));
5146 5156
5147 /* 5157 /*
5148 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux 5158 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
5149 * adds the pseudo header itself. 5159 * adds the pseudo header itself.
5150 */ 5160 */
5151 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG, 5161 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG,
5152 CSUM_HAS_PSEUDO_HDR, 0); 5162 CSUM_HAS_PSEUDO_HDR, 0);
5153 5163
5154 return 0; 5164 return 0;
5155 } 5165 }
5156 5166
5157 /* 5167 /*
5158 * Attempt to initialize the adapter via a Firmware Configuration File. 5168 * Attempt to initialize the adapter via a Firmware Configuration File.
5159 */ 5169 */
5160 static int adap_init0_config(struct adapter *adapter, int reset) 5170 static int adap_init0_config(struct adapter *adapter, int reset)
5161 { 5171 {
5162 struct fw_caps_config_cmd caps_cmd; 5172 struct fw_caps_config_cmd caps_cmd;
5163 const struct firmware *cf; 5173 const struct firmware *cf;
5164 unsigned long mtype = 0, maddr = 0; 5174 unsigned long mtype = 0, maddr = 0;
5165 u32 finiver, finicsum, cfcsum; 5175 u32 finiver, finicsum, cfcsum;
5166 int ret; 5176 int ret;
5167 int config_issued = 0; 5177 int config_issued = 0;
5168 char *fw_config_file, fw_config_file_path[256]; 5178 char *fw_config_file, fw_config_file_path[256];
5169 char *config_name = NULL; 5179 char *config_name = NULL;
5170 5180
5171 /* 5181 /*
5172 * Reset device if necessary. 5182 * Reset device if necessary.
5173 */ 5183 */
5174 if (reset) { 5184 if (reset) {
5175 ret = t4_fw_reset(adapter, adapter->mbox, 5185 ret = t4_fw_reset(adapter, adapter->mbox,
5176 PIORSTMODE | PIORST); 5186 PIORSTMODE | PIORST);
5177 if (ret < 0) 5187 if (ret < 0)
5178 goto bye; 5188 goto bye;
5179 } 5189 }
5180 5190
5181 /* 5191 /*
5182 * If we have a T4 configuration file under /lib/firmware/cxgb4/, 5192 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
5183 * then use that. Otherwise, use the configuration file stored 5193 * then use that. Otherwise, use the configuration file stored
5184 * in the adapter flash ... 5194 * in the adapter flash ...
5185 */ 5195 */
5186 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) { 5196 switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
5187 case CHELSIO_T4: 5197 case CHELSIO_T4:
5188 fw_config_file = FW4_CFNAME; 5198 fw_config_file = FW4_CFNAME;
5189 break; 5199 break;
5190 case CHELSIO_T5: 5200 case CHELSIO_T5:
5191 fw_config_file = FW5_CFNAME; 5201 fw_config_file = FW5_CFNAME;
5192 break; 5202 break;
5193 default: 5203 default:
5194 dev_err(adapter->pdev_dev, "Device %d is not supported\n", 5204 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
5195 adapter->pdev->device); 5205 adapter->pdev->device);
5196 ret = -EINVAL; 5206 ret = -EINVAL;
5197 goto bye; 5207 goto bye;
5198 } 5208 }
5199 5209
5200 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev); 5210 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
5201 if (ret < 0) { 5211 if (ret < 0) {
5202 config_name = "On FLASH"; 5212 config_name = "On FLASH";
5203 mtype = FW_MEMTYPE_CF_FLASH; 5213 mtype = FW_MEMTYPE_CF_FLASH;
5204 maddr = t4_flash_cfg_addr(adapter); 5214 maddr = t4_flash_cfg_addr(adapter);
5205 } else { 5215 } else {
5206 u32 params[7], val[7]; 5216 u32 params[7], val[7];
5207 5217
5208 sprintf(fw_config_file_path, 5218 sprintf(fw_config_file_path,
5209 "/lib/firmware/%s", fw_config_file); 5219 "/lib/firmware/%s", fw_config_file);
5210 config_name = fw_config_file_path; 5220 config_name = fw_config_file_path;
5211 5221
5212 if (cf->size >= FLASH_CFG_MAX_SIZE) 5222 if (cf->size >= FLASH_CFG_MAX_SIZE)
5213 ret = -ENOMEM; 5223 ret = -ENOMEM;
5214 else { 5224 else {
5215 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 5225 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5216 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF)); 5226 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
5217 ret = t4_query_params(adapter, adapter->mbox, 5227 ret = t4_query_params(adapter, adapter->mbox,
5218 adapter->fn, 0, 1, params, val); 5228 adapter->fn, 0, 1, params, val);
5219 if (ret == 0) { 5229 if (ret == 0) {
5220 /* 5230 /*
5221 * For t4_memory_rw() below addresses and 5231 * For t4_memory_rw() below addresses and
5222 * sizes have to be in terms of multiples of 4 5232 * sizes have to be in terms of multiples of 4
5223 * bytes. So, if the Configuration File isn't 5233 * bytes. So, if the Configuration File isn't
5224 * a multiple of 4 bytes in length we'll have 5234 * a multiple of 4 bytes in length we'll have
5225 * to write that out separately since we can't 5235 * to write that out separately since we can't
5226 * guarantee that the bytes following the 5236 * guarantee that the bytes following the
5227 * residual byte in the buffer returned by 5237 * residual byte in the buffer returned by
5228 * request_firmware() are zeroed out ... 5238 * request_firmware() are zeroed out ...
5229 */ 5239 */
5230 size_t resid = cf->size & 0x3; 5240 size_t resid = cf->size & 0x3;
5231 size_t size = cf->size & ~0x3; 5241 size_t size = cf->size & ~0x3;
5232 __be32 *data = (__be32 *)cf->data; 5242 __be32 *data = (__be32 *)cf->data;
5233 5243
5234 mtype = FW_PARAMS_PARAM_Y_GET(val[0]); 5244 mtype = FW_PARAMS_PARAM_Y_GET(val[0]);
5235 maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16; 5245 maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16;
5236 5246
5237 spin_lock(&adapter->win0_lock); 5247 spin_lock(&adapter->win0_lock);
5238 ret = t4_memory_rw(adapter, 0, mtype, maddr, 5248 ret = t4_memory_rw(adapter, 0, mtype, maddr,
5239 size, data, T4_MEMORY_WRITE); 5249 size, data, T4_MEMORY_WRITE);
5240 if (ret == 0 && resid != 0) { 5250 if (ret == 0 && resid != 0) {
5241 union { 5251 union {
5242 __be32 word; 5252 __be32 word;
5243 char buf[4]; 5253 char buf[4];
5244 } last; 5254 } last;
5245 int i; 5255 int i;
5246 5256
5247 last.word = data[size >> 2]; 5257 last.word = data[size >> 2];
5248 for (i = resid; i < 4; i++) 5258 for (i = resid; i < 4; i++)
5249 last.buf[i] = 0; 5259 last.buf[i] = 0;
5250 ret = t4_memory_rw(adapter, 0, mtype, 5260 ret = t4_memory_rw(adapter, 0, mtype,
5251 maddr + size, 5261 maddr + size,
5252 4, &last.word, 5262 4, &last.word,
5253 T4_MEMORY_WRITE); 5263 T4_MEMORY_WRITE);
5254 } 5264 }
5255 spin_unlock(&adapter->win0_lock); 5265 spin_unlock(&adapter->win0_lock);
5256 } 5266 }
5257 } 5267 }
5258 5268
5259 release_firmware(cf); 5269 release_firmware(cf);
5260 if (ret) 5270 if (ret)
5261 goto bye; 5271 goto bye;
5262 } 5272 }
5263 5273
5264 /* 5274 /*
5265 * Issue a Capability Configuration command to the firmware to get it 5275 * Issue a Capability Configuration command to the firmware to get it
5266 * to parse the Configuration File. We don't use t4_fw_config_file() 5276 * to parse the Configuration File. We don't use t4_fw_config_file()
5267 * because we want the ability to modify various features after we've 5277 * because we want the ability to modify various features after we've
5268 * processed the configuration file ... 5278 * processed the configuration file ...
5269 */ 5279 */
5270 memset(&caps_cmd, 0, sizeof(caps_cmd)); 5280 memset(&caps_cmd, 0, sizeof(caps_cmd));
5271 caps_cmd.op_to_write = 5281 caps_cmd.op_to_write =
5272 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 5282 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5273 FW_CMD_REQUEST | 5283 FW_CMD_REQUEST |
5274 FW_CMD_READ); 5284 FW_CMD_READ);
5275 caps_cmd.cfvalid_to_len16 = 5285 caps_cmd.cfvalid_to_len16 =
5276 htonl(FW_CAPS_CONFIG_CMD_CFVALID | 5286 htonl(FW_CAPS_CONFIG_CMD_CFVALID |
5277 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | 5287 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
5278 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) | 5288 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
5279 FW_LEN16(caps_cmd)); 5289 FW_LEN16(caps_cmd));
5280 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), 5290 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5281 &caps_cmd); 5291 &caps_cmd);
5282 5292
5283 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware 5293 /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
5284 * Configuration File in FLASH), our last gasp effort is to use the 5294 * Configuration File in FLASH), our last gasp effort is to use the
5285 * Firmware Configuration File which is embedded in the firmware. A 5295 * Firmware Configuration File which is embedded in the firmware. A
5286 * very few early versions of the firmware didn't have one embedded 5296 * very few early versions of the firmware didn't have one embedded
5287 * but we can ignore those. 5297 * but we can ignore those.
5288 */ 5298 */
5289 if (ret == -ENOENT) { 5299 if (ret == -ENOENT) {
5290 memset(&caps_cmd, 0, sizeof(caps_cmd)); 5300 memset(&caps_cmd, 0, sizeof(caps_cmd));
5291 caps_cmd.op_to_write = 5301 caps_cmd.op_to_write =
5292 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 5302 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5293 FW_CMD_REQUEST | 5303 FW_CMD_REQUEST |
5294 FW_CMD_READ); 5304 FW_CMD_READ);
5295 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); 5305 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5296 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, 5306 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
5297 sizeof(caps_cmd), &caps_cmd); 5307 sizeof(caps_cmd), &caps_cmd);
5298 config_name = "Firmware Default"; 5308 config_name = "Firmware Default";
5299 } 5309 }
5300 5310
5301 config_issued = 1; 5311 config_issued = 1;
5302 if (ret < 0) 5312 if (ret < 0)
5303 goto bye; 5313 goto bye;
5304 5314
5305 finiver = ntohl(caps_cmd.finiver); 5315 finiver = ntohl(caps_cmd.finiver);
5306 finicsum = ntohl(caps_cmd.finicsum); 5316 finicsum = ntohl(caps_cmd.finicsum);
5307 cfcsum = ntohl(caps_cmd.cfcsum); 5317 cfcsum = ntohl(caps_cmd.cfcsum);
5308 if (finicsum != cfcsum) 5318 if (finicsum != cfcsum)
5309 dev_warn(adapter->pdev_dev, "Configuration File checksum "\ 5319 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
5310 "mismatch: [fini] csum=%#x, computed csum=%#x\n", 5320 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
5311 finicsum, cfcsum); 5321 finicsum, cfcsum);
5312 5322
5313 /* 5323 /*
5314 * And now tell the firmware to use the configuration we just loaded. 5324 * And now tell the firmware to use the configuration we just loaded.
5315 */ 5325 */
5316 caps_cmd.op_to_write = 5326 caps_cmd.op_to_write =
5317 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 5327 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5318 FW_CMD_REQUEST | 5328 FW_CMD_REQUEST |
5319 FW_CMD_WRITE); 5329 FW_CMD_WRITE);
5320 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); 5330 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5321 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), 5331 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5322 NULL); 5332 NULL);
5323 if (ret < 0) 5333 if (ret < 0)
5324 goto bye; 5334 goto bye;
5325 5335
5326 /* 5336 /*
5327 * Tweak configuration based on system architecture, module 5337 * Tweak configuration based on system architecture, module
5328 * parameters, etc. 5338 * parameters, etc.
5329 */ 5339 */
5330 ret = adap_init0_tweaks(adapter); 5340 ret = adap_init0_tweaks(adapter);
5331 if (ret < 0) 5341 if (ret < 0)
5332 goto bye; 5342 goto bye;
5333 5343
5334 /* 5344 /*
5335 * And finally tell the firmware to initialize itself using the 5345 * And finally tell the firmware to initialize itself using the
5336 * parameters from the Configuration File. 5346 * parameters from the Configuration File.
5337 */ 5347 */
5338 ret = t4_fw_initialize(adapter, adapter->mbox); 5348 ret = t4_fw_initialize(adapter, adapter->mbox);
5339 if (ret < 0) 5349 if (ret < 0)
5340 goto bye; 5350 goto bye;
5341 5351
5342 /* 5352 /*
5343 * Return successfully and note that we're operating with parameters 5353 * Return successfully and note that we're operating with parameters
5344 * not supplied by the driver, rather than from hard-wired 5354 * not supplied by the driver, rather than from hard-wired
5345 * initialization constants burried in the driver. 5355 * initialization constants burried in the driver.
5346 */ 5356 */
5347 adapter->flags |= USING_SOFT_PARAMS; 5357 adapter->flags |= USING_SOFT_PARAMS;
5348 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\ 5358 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
5349 "Configuration File \"%s\", version %#x, computed checksum %#x\n", 5359 "Configuration File \"%s\", version %#x, computed checksum %#x\n",
5350 config_name, finiver, cfcsum); 5360 config_name, finiver, cfcsum);
5351 return 0; 5361 return 0;
5352 5362
5353 /* 5363 /*
5354 * Something bad happened. Return the error ... (If the "error" 5364 * Something bad happened. Return the error ... (If the "error"
5355 * is that there's no Configuration File on the adapter we don't 5365 * is that there's no Configuration File on the adapter we don't
5356 * want to issue a warning since this is fairly common.) 5366 * want to issue a warning since this is fairly common.)
5357 */ 5367 */
5358 bye: 5368 bye:
5359 if (config_issued && ret != -ENOENT) 5369 if (config_issued && ret != -ENOENT)
5360 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n", 5370 dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
5361 config_name, -ret); 5371 config_name, -ret);
5362 return ret; 5372 return ret;
5363 } 5373 }
5364 5374
5365 /* 5375 /*
5366 * Attempt to initialize the adapter via hard-coded, driver supplied 5376 * Attempt to initialize the adapter via hard-coded, driver supplied
5367 * parameters ... 5377 * parameters ...
5368 */ 5378 */
5369 static int adap_init0_no_config(struct adapter *adapter, int reset) 5379 static int adap_init0_no_config(struct adapter *adapter, int reset)
5370 { 5380 {
5371 struct sge *s = &adapter->sge; 5381 struct sge *s = &adapter->sge;
5372 struct fw_caps_config_cmd caps_cmd; 5382 struct fw_caps_config_cmd caps_cmd;
5373 u32 v; 5383 u32 v;
5374 int i, ret; 5384 int i, ret;
5375 5385
5376 /* 5386 /*
5377 * Reset device if necessary 5387 * Reset device if necessary
5378 */ 5388 */
5379 if (reset) { 5389 if (reset) {
5380 ret = t4_fw_reset(adapter, adapter->mbox, 5390 ret = t4_fw_reset(adapter, adapter->mbox,
5381 PIORSTMODE | PIORST); 5391 PIORSTMODE | PIORST);
5382 if (ret < 0) 5392 if (ret < 0)
5383 goto bye; 5393 goto bye;
5384 } 5394 }
5385 5395
5386 /* 5396 /*
5387 * Get device capabilities and select which we'll be using. 5397 * Get device capabilities and select which we'll be using.
5388 */ 5398 */
5389 memset(&caps_cmd, 0, sizeof(caps_cmd)); 5399 memset(&caps_cmd, 0, sizeof(caps_cmd));
5390 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 5400 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5391 FW_CMD_REQUEST | FW_CMD_READ); 5401 FW_CMD_REQUEST | FW_CMD_READ);
5392 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); 5402 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5393 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), 5403 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5394 &caps_cmd); 5404 &caps_cmd);
5395 if (ret < 0) 5405 if (ret < 0)
5396 goto bye; 5406 goto bye;
5397 5407
5398 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) { 5408 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
5399 if (!vf_acls) 5409 if (!vf_acls)
5400 caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM); 5410 caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
5401 else 5411 else
5402 caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM); 5412 caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
5403 } else if (vf_acls) { 5413 } else if (vf_acls) {
5404 dev_err(adapter->pdev_dev, "virtualization ACLs not supported"); 5414 dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
5405 goto bye; 5415 goto bye;
5406 } 5416 }
5407 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 5417 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5408 FW_CMD_REQUEST | FW_CMD_WRITE); 5418 FW_CMD_REQUEST | FW_CMD_WRITE);
5409 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), 5419 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
5410 NULL); 5420 NULL);
5411 if (ret < 0) 5421 if (ret < 0)
5412 goto bye; 5422 goto bye;
5413 5423
5414 /* 5424 /*
5415 * Tweak configuration based on system architecture, module 5425 * Tweak configuration based on system architecture, module
5416 * parameters, etc. 5426 * parameters, etc.
5417 */ 5427 */
5418 ret = adap_init0_tweaks(adapter); 5428 ret = adap_init0_tweaks(adapter);
5419 if (ret < 0) 5429 if (ret < 0)
5420 goto bye; 5430 goto bye;
5421 5431
5422 /* 5432 /*
5423 * Select RSS Global Mode we want to use. We use "Basic Virtual" 5433 * Select RSS Global Mode we want to use. We use "Basic Virtual"
5424 * mode which maps each Virtual Interface to its own section of 5434 * mode which maps each Virtual Interface to its own section of
5425 * the RSS Table and we turn on all map and hash enables ... 5435 * the RSS Table and we turn on all map and hash enables ...
5426 */ 5436 */
5427 adapter->flags |= RSS_TNLALLLOOKUP; 5437 adapter->flags |= RSS_TNLALLLOOKUP;
5428 ret = t4_config_glbl_rss(adapter, adapter->mbox, 5438 ret = t4_config_glbl_rss(adapter, adapter->mbox,
5429 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL, 5439 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
5430 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN | 5440 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
5431 FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ | 5441 FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
5432 ((adapter->flags & RSS_TNLALLLOOKUP) ? 5442 ((adapter->flags & RSS_TNLALLLOOKUP) ?
5433 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0)); 5443 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0));
5434 if (ret < 0) 5444 if (ret < 0)
5435 goto bye; 5445 goto bye;
5436 5446
5437 /* 5447 /*
5438 * Set up our own fundamental resource provisioning ... 5448 * Set up our own fundamental resource provisioning ...
5439 */ 5449 */
5440 ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0, 5450 ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
5441 PFRES_NEQ, PFRES_NETHCTRL, 5451 PFRES_NEQ, PFRES_NETHCTRL,
5442 PFRES_NIQFLINT, PFRES_NIQ, 5452 PFRES_NIQFLINT, PFRES_NIQ,
5443 PFRES_TC, PFRES_NVI, 5453 PFRES_TC, PFRES_NVI,
5444 FW_PFVF_CMD_CMASK_MASK, 5454 FW_PFVF_CMD_CMASK_MASK,
5445 pfvfres_pmask(adapter, adapter->fn, 0), 5455 pfvfres_pmask(adapter, adapter->fn, 0),
5446 PFRES_NEXACTF, 5456 PFRES_NEXACTF,
5447 PFRES_R_CAPS, PFRES_WX_CAPS); 5457 PFRES_R_CAPS, PFRES_WX_CAPS);
5448 if (ret < 0) 5458 if (ret < 0)
5449 goto bye; 5459 goto bye;
5450 5460
5451 /* 5461 /*
5452 * Perform low level SGE initialization. We need to do this before we 5462 * Perform low level SGE initialization. We need to do this before we
5453 * send the firmware the INITIALIZE command because that will cause 5463 * send the firmware the INITIALIZE command because that will cause
5454 * any other PF Drivers which are waiting for the Master 5464 * any other PF Drivers which are waiting for the Master
5455 * Initialization to proceed forward. 5465 * Initialization to proceed forward.
5456 */ 5466 */
5457 for (i = 0; i < SGE_NTIMERS - 1; i++) 5467 for (i = 0; i < SGE_NTIMERS - 1; i++)
5458 s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL); 5468 s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
5459 s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL; 5469 s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
5460 s->counter_val[0] = 1; 5470 s->counter_val[0] = 1;
5461 for (i = 1; i < SGE_NCOUNTERS; i++) 5471 for (i = 1; i < SGE_NCOUNTERS; i++)
5462 s->counter_val[i] = min(intr_cnt[i - 1], 5472 s->counter_val[i] = min(intr_cnt[i - 1],
5463 THRESHOLD_0_GET(THRESHOLD_0_MASK)); 5473 THRESHOLD_0_GET(THRESHOLD_0_MASK));
5464 t4_sge_init(adapter); 5474 t4_sge_init(adapter);
5465 5475
5466 #ifdef CONFIG_PCI_IOV 5476 #ifdef CONFIG_PCI_IOV
5467 /* 5477 /*
5468 * Provision resource limits for Virtual Functions. We currently 5478 * Provision resource limits for Virtual Functions. We currently
5469 * grant them all the same static resource limits except for the Port 5479 * grant them all the same static resource limits except for the Port
5470 * Access Rights Mask which we're assigning based on the PF. All of 5480 * Access Rights Mask which we're assigning based on the PF. All of
5471 * the static provisioning stuff for both the PF and VF really needs 5481 * the static provisioning stuff for both the PF and VF really needs
5472 * to be managed in a persistent manner for each device which the 5482 * to be managed in a persistent manner for each device which the
5473 * firmware controls. 5483 * firmware controls.
5474 */ 5484 */
5475 { 5485 {
5476 int pf, vf; 5486 int pf, vf;
5477 5487
5478 for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) { 5488 for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
5479 if (num_vf[pf] <= 0) 5489 if (num_vf[pf] <= 0)
5480 continue; 5490 continue;
5481 5491
5482 /* VF numbering starts at 1! */ 5492 /* VF numbering starts at 1! */
5483 for (vf = 1; vf <= num_vf[pf]; vf++) { 5493 for (vf = 1; vf <= num_vf[pf]; vf++) {
5484 ret = t4_cfg_pfvf(adapter, adapter->mbox, 5494 ret = t4_cfg_pfvf(adapter, adapter->mbox,
5485 pf, vf, 5495 pf, vf,
5486 VFRES_NEQ, VFRES_NETHCTRL, 5496 VFRES_NEQ, VFRES_NETHCTRL,
5487 VFRES_NIQFLINT, VFRES_NIQ, 5497 VFRES_NIQFLINT, VFRES_NIQ,
5488 VFRES_TC, VFRES_NVI, 5498 VFRES_TC, VFRES_NVI,
5489 FW_PFVF_CMD_CMASK_MASK, 5499 FW_PFVF_CMD_CMASK_MASK,
5490 pfvfres_pmask( 5500 pfvfres_pmask(
5491 adapter, pf, vf), 5501 adapter, pf, vf),
5492 VFRES_NEXACTF, 5502 VFRES_NEXACTF,
5493 VFRES_R_CAPS, VFRES_WX_CAPS); 5503 VFRES_R_CAPS, VFRES_WX_CAPS);
5494 if (ret < 0) 5504 if (ret < 0)
5495 dev_warn(adapter->pdev_dev, 5505 dev_warn(adapter->pdev_dev,
5496 "failed to "\ 5506 "failed to "\
5497 "provision pf/vf=%d/%d; " 5507 "provision pf/vf=%d/%d; "
5498 "err=%d\n", pf, vf, ret); 5508 "err=%d\n", pf, vf, ret);
5499 } 5509 }
5500 } 5510 }
5501 } 5511 }
5502 #endif 5512 #endif
5503 5513
5504 /* 5514 /*
5505 * Set up the default filter mode. Later we'll want to implement this 5515 * Set up the default filter mode. Later we'll want to implement this
5506 * via a firmware command, etc. ... This needs to be done before the 5516 * via a firmware command, etc. ... This needs to be done before the
5507 * firmare initialization command ... If the selected set of fields 5517 * firmare initialization command ... If the selected set of fields
5508 * isn't equal to the default value, we'll need to make sure that the 5518 * isn't equal to the default value, we'll need to make sure that the
5509 * field selections will fit in the 36-bit budget. 5519 * field selections will fit in the 36-bit budget.
5510 */ 5520 */
5511 if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) { 5521 if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
5512 int j, bits = 0; 5522 int j, bits = 0;
5513 5523
5514 for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++) 5524 for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++)
5515 switch (tp_vlan_pri_map & (1 << j)) { 5525 switch (tp_vlan_pri_map & (1 << j)) {
5516 case 0: 5526 case 0:
5517 /* compressed filter field not enabled */ 5527 /* compressed filter field not enabled */
5518 break; 5528 break;
5519 case FCOE_MASK: 5529 case FCOE_MASK:
5520 bits += 1; 5530 bits += 1;
5521 break; 5531 break;
5522 case PORT_MASK: 5532 case PORT_MASK:
5523 bits += 3; 5533 bits += 3;
5524 break; 5534 break;
5525 case VNIC_ID_MASK: 5535 case VNIC_ID_MASK:
5526 bits += 17; 5536 bits += 17;
5527 break; 5537 break;
5528 case VLAN_MASK: 5538 case VLAN_MASK:
5529 bits += 17; 5539 bits += 17;
5530 break; 5540 break;
5531 case TOS_MASK: 5541 case TOS_MASK:
5532 bits += 8; 5542 bits += 8;
5533 break; 5543 break;
5534 case PROTOCOL_MASK: 5544 case PROTOCOL_MASK:
5535 bits += 8; 5545 bits += 8;
5536 break; 5546 break;
5537 case ETHERTYPE_MASK: 5547 case ETHERTYPE_MASK:
5538 bits += 16; 5548 bits += 16;
5539 break; 5549 break;
5540 case MACMATCH_MASK: 5550 case MACMATCH_MASK:
5541 bits += 9; 5551 bits += 9;
5542 break; 5552 break;
5543 case MPSHITTYPE_MASK: 5553 case MPSHITTYPE_MASK:
5544 bits += 3; 5554 bits += 3;
5545 break; 5555 break;
5546 case FRAGMENTATION_MASK: 5556 case FRAGMENTATION_MASK:
5547 bits += 1; 5557 bits += 1;
5548 break; 5558 break;
5549 } 5559 }
5550 5560
5551 if (bits > 36) { 5561 if (bits > 36) {
5552 dev_err(adapter->pdev_dev, 5562 dev_err(adapter->pdev_dev,
5553 "tp_vlan_pri_map=%#x needs %d bits > 36;"\ 5563 "tp_vlan_pri_map=%#x needs %d bits > 36;"\
5554 " using %#x\n", tp_vlan_pri_map, bits, 5564 " using %#x\n", tp_vlan_pri_map, bits,
5555 TP_VLAN_PRI_MAP_DEFAULT); 5565 TP_VLAN_PRI_MAP_DEFAULT);
5556 tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT; 5566 tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
5557 } 5567 }
5558 } 5568 }
5559 v = tp_vlan_pri_map; 5569 v = tp_vlan_pri_map;
5560 t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA, 5570 t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
5561 &v, 1, TP_VLAN_PRI_MAP); 5571 &v, 1, TP_VLAN_PRI_MAP);
5562 5572
5563 /* 5573 /*
5564 * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order 5574 * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
5565 * to support any of the compressed filter fields above. Newer 5575 * to support any of the compressed filter fields above. Newer
5566 * versions of the firmware do this automatically but it doesn't hurt 5576 * versions of the firmware do this automatically but it doesn't hurt
5567 * to set it here. Meanwhile, we do _not_ need to set Lookup Every 5577 * to set it here. Meanwhile, we do _not_ need to set Lookup Every
5568 * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets 5578 * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
5569 * since the firmware automatically turns this on and off when we have 5579 * since the firmware automatically turns this on and off when we have
5570 * a non-zero number of filters active (since it does have a 5580 * a non-zero number of filters active (since it does have a
5571 * performance impact). 5581 * performance impact).
5572 */ 5582 */
5573 if (tp_vlan_pri_map) 5583 if (tp_vlan_pri_map)
5574 t4_set_reg_field(adapter, TP_GLOBAL_CONFIG, 5584 t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
5575 FIVETUPLELOOKUP_MASK, 5585 FIVETUPLELOOKUP_MASK,
5576 FIVETUPLELOOKUP_MASK); 5586 FIVETUPLELOOKUP_MASK);
5577 5587
5578 /* 5588 /*
5579 * Tweak some settings. 5589 * Tweak some settings.
5580 */ 5590 */
5581 t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) | 5591 t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
5582 RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) | 5592 RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
5583 PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) | 5593 PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
5584 KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9)); 5594 KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
5585 5595
5586 /* 5596 /*
5587 * Get basic stuff going by issuing the Firmware Initialize command. 5597 * Get basic stuff going by issuing the Firmware Initialize command.
5588 * Note that this _must_ be after all PFVF commands ... 5598 * Note that this _must_ be after all PFVF commands ...
5589 */ 5599 */
5590 ret = t4_fw_initialize(adapter, adapter->mbox); 5600 ret = t4_fw_initialize(adapter, adapter->mbox);
5591 if (ret < 0) 5601 if (ret < 0)
5592 goto bye; 5602 goto bye;
5593 5603
5594 /* 5604 /*
5595 * Return successfully! 5605 * Return successfully!
5596 */ 5606 */
5597 dev_info(adapter->pdev_dev, "Successfully configured using built-in "\ 5607 dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
5598 "driver parameters\n"); 5608 "driver parameters\n");
5599 return 0; 5609 return 0;
5600 5610
5601 /* 5611 /*
5602 * Something bad happened. Return the error ... 5612 * Something bad happened. Return the error ...
5603 */ 5613 */
5604 bye: 5614 bye:
5605 return ret; 5615 return ret;
5606 } 5616 }
5607 5617
5608 static struct fw_info fw_info_array[] = { 5618 static struct fw_info fw_info_array[] = {
5609 { 5619 {
5610 .chip = CHELSIO_T4, 5620 .chip = CHELSIO_T4,
5611 .fs_name = FW4_CFNAME, 5621 .fs_name = FW4_CFNAME,
5612 .fw_mod_name = FW4_FNAME, 5622 .fw_mod_name = FW4_FNAME,
5613 .fw_hdr = { 5623 .fw_hdr = {
5614 .chip = FW_HDR_CHIP_T4, 5624 .chip = FW_HDR_CHIP_T4,
5615 .fw_ver = __cpu_to_be32(FW_VERSION(T4)), 5625 .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
5616 .intfver_nic = FW_INTFVER(T4, NIC), 5626 .intfver_nic = FW_INTFVER(T4, NIC),
5617 .intfver_vnic = FW_INTFVER(T4, VNIC), 5627 .intfver_vnic = FW_INTFVER(T4, VNIC),
5618 .intfver_ri = FW_INTFVER(T4, RI), 5628 .intfver_ri = FW_INTFVER(T4, RI),
5619 .intfver_iscsi = FW_INTFVER(T4, ISCSI), 5629 .intfver_iscsi = FW_INTFVER(T4, ISCSI),
5620 .intfver_fcoe = FW_INTFVER(T4, FCOE), 5630 .intfver_fcoe = FW_INTFVER(T4, FCOE),
5621 }, 5631 },
5622 }, { 5632 }, {
5623 .chip = CHELSIO_T5, 5633 .chip = CHELSIO_T5,
5624 .fs_name = FW5_CFNAME, 5634 .fs_name = FW5_CFNAME,
5625 .fw_mod_name = FW5_FNAME, 5635 .fw_mod_name = FW5_FNAME,
5626 .fw_hdr = { 5636 .fw_hdr = {
5627 .chip = FW_HDR_CHIP_T5, 5637 .chip = FW_HDR_CHIP_T5,
5628 .fw_ver = __cpu_to_be32(FW_VERSION(T5)), 5638 .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
5629 .intfver_nic = FW_INTFVER(T5, NIC), 5639 .intfver_nic = FW_INTFVER(T5, NIC),
5630 .intfver_vnic = FW_INTFVER(T5, VNIC), 5640 .intfver_vnic = FW_INTFVER(T5, VNIC),
5631 .intfver_ri = FW_INTFVER(T5, RI), 5641 .intfver_ri = FW_INTFVER(T5, RI),
5632 .intfver_iscsi = FW_INTFVER(T5, ISCSI), 5642 .intfver_iscsi = FW_INTFVER(T5, ISCSI),
5633 .intfver_fcoe = FW_INTFVER(T5, FCOE), 5643 .intfver_fcoe = FW_INTFVER(T5, FCOE),
5634 }, 5644 },
5635 } 5645 }
5636 }; 5646 };
5637 5647
5638 static struct fw_info *find_fw_info(int chip) 5648 static struct fw_info *find_fw_info(int chip)
5639 { 5649 {
5640 int i; 5650 int i;
5641 5651
5642 for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) { 5652 for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
5643 if (fw_info_array[i].chip == chip) 5653 if (fw_info_array[i].chip == chip)
5644 return &fw_info_array[i]; 5654 return &fw_info_array[i];
5645 } 5655 }
5646 return NULL; 5656 return NULL;
5647 } 5657 }
5648 5658
5649 /* 5659 /*
5650 * Phase 0 of initialization: contact FW, obtain config, perform basic init. 5660 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
5651 */ 5661 */
5652 static int adap_init0(struct adapter *adap) 5662 static int adap_init0(struct adapter *adap)
5653 { 5663 {
5654 int ret; 5664 int ret;
5655 u32 v, port_vec; 5665 u32 v, port_vec;
5656 enum dev_state state; 5666 enum dev_state state;
5657 u32 params[7], val[7]; 5667 u32 params[7], val[7];
5658 struct fw_caps_config_cmd caps_cmd; 5668 struct fw_caps_config_cmd caps_cmd;
5659 int reset = 1; 5669 int reset = 1;
5660 5670
5661 /* 5671 /*
5662 * Contact FW, advertising Master capability (and potentially forcing 5672 * Contact FW, advertising Master capability (and potentially forcing
5663 * ourselves as the Master PF if our module parameter force_init is 5673 * ourselves as the Master PF if our module parameter force_init is
5664 * set). 5674 * set).
5665 */ 5675 */
5666 ret = t4_fw_hello(adap, adap->mbox, adap->fn, 5676 ret = t4_fw_hello(adap, adap->mbox, adap->fn,
5667 force_init ? MASTER_MUST : MASTER_MAY, 5677 force_init ? MASTER_MUST : MASTER_MAY,
5668 &state); 5678 &state);
5669 if (ret < 0) { 5679 if (ret < 0) {
5670 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n", 5680 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
5671 ret); 5681 ret);
5672 return ret; 5682 return ret;
5673 } 5683 }
5674 if (ret == adap->mbox) 5684 if (ret == adap->mbox)
5675 adap->flags |= MASTER_PF; 5685 adap->flags |= MASTER_PF;
5676 if (force_init && state == DEV_STATE_INIT) 5686 if (force_init && state == DEV_STATE_INIT)
5677 state = DEV_STATE_UNINIT; 5687 state = DEV_STATE_UNINIT;
5678 5688
5679 /* 5689 /*
5680 * If we're the Master PF Driver and the device is uninitialized, 5690 * If we're the Master PF Driver and the device is uninitialized,
5681 * then let's consider upgrading the firmware ... (We always want 5691 * then let's consider upgrading the firmware ... (We always want
5682 * to check the firmware version number in order to A. get it for 5692 * to check the firmware version number in order to A. get it for
5683 * later reporting and B. to warn if the currently loaded firmware 5693 * later reporting and B. to warn if the currently loaded firmware
5684 * is excessively mismatched relative to the driver.) 5694 * is excessively mismatched relative to the driver.)
5685 */ 5695 */
5686 t4_get_fw_version(adap, &adap->params.fw_vers); 5696 t4_get_fw_version(adap, &adap->params.fw_vers);
5687 t4_get_tp_version(adap, &adap->params.tp_vers); 5697 t4_get_tp_version(adap, &adap->params.tp_vers);
5688 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) { 5698 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
5689 struct fw_info *fw_info; 5699 struct fw_info *fw_info;
5690 struct fw_hdr *card_fw; 5700 struct fw_hdr *card_fw;
5691 const struct firmware *fw; 5701 const struct firmware *fw;
5692 const u8 *fw_data = NULL; 5702 const u8 *fw_data = NULL;
5693 unsigned int fw_size = 0; 5703 unsigned int fw_size = 0;
5694 5704
5695 /* This is the firmware whose headers the driver was compiled 5705 /* This is the firmware whose headers the driver was compiled
5696 * against 5706 * against
5697 */ 5707 */
5698 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip)); 5708 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
5699 if (fw_info == NULL) { 5709 if (fw_info == NULL) {
5700 dev_err(adap->pdev_dev, 5710 dev_err(adap->pdev_dev,
5701 "unable to get firmware info for chip %d.\n", 5711 "unable to get firmware info for chip %d.\n",
5702 CHELSIO_CHIP_VERSION(adap->params.chip)); 5712 CHELSIO_CHIP_VERSION(adap->params.chip));
5703 return -EINVAL; 5713 return -EINVAL;
5704 } 5714 }
5705 5715
5706 /* allocate memory to read the header of the firmware on the 5716 /* allocate memory to read the header of the firmware on the
5707 * card 5717 * card
5708 */ 5718 */
5709 card_fw = t4_alloc_mem(sizeof(*card_fw)); 5719 card_fw = t4_alloc_mem(sizeof(*card_fw));
5710 5720
5711 /* Get FW from from /lib/firmware/ */ 5721 /* Get FW from from /lib/firmware/ */
5712 ret = request_firmware(&fw, fw_info->fw_mod_name, 5722 ret = request_firmware(&fw, fw_info->fw_mod_name,
5713 adap->pdev_dev); 5723 adap->pdev_dev);
5714 if (ret < 0) { 5724 if (ret < 0) {
5715 dev_err(adap->pdev_dev, 5725 dev_err(adap->pdev_dev,
5716 "unable to load firmware image %s, error %d\n", 5726 "unable to load firmware image %s, error %d\n",
5717 fw_info->fw_mod_name, ret); 5727 fw_info->fw_mod_name, ret);
5718 } else { 5728 } else {
5719 fw_data = fw->data; 5729 fw_data = fw->data;
5720 fw_size = fw->size; 5730 fw_size = fw->size;
5721 } 5731 }
5722 5732
5723 /* upgrade FW logic */ 5733 /* upgrade FW logic */
5724 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw, 5734 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
5725 state, &reset); 5735 state, &reset);
5726 5736
5727 /* Cleaning up */ 5737 /* Cleaning up */
5728 if (fw != NULL) 5738 if (fw != NULL)
5729 release_firmware(fw); 5739 release_firmware(fw);
5730 t4_free_mem(card_fw); 5740 t4_free_mem(card_fw);
5731 5741
5732 if (ret < 0) 5742 if (ret < 0)
5733 goto bye; 5743 goto bye;
5734 } 5744 }
5735 5745
5736 /* 5746 /*
5737 * Grab VPD parameters. This should be done after we establish a 5747 * Grab VPD parameters. This should be done after we establish a
5738 * connection to the firmware since some of the VPD parameters 5748 * connection to the firmware since some of the VPD parameters
5739 * (notably the Core Clock frequency) are retrieved via requests to 5749 * (notably the Core Clock frequency) are retrieved via requests to
5740 * the firmware. On the other hand, we need these fairly early on 5750 * the firmware. On the other hand, we need these fairly early on
5741 * so we do this right after getting ahold of the firmware. 5751 * so we do this right after getting ahold of the firmware.
5742 */ 5752 */
5743 ret = get_vpd_params(adap, &adap->params.vpd); 5753 ret = get_vpd_params(adap, &adap->params.vpd);
5744 if (ret < 0) 5754 if (ret < 0)
5745 goto bye; 5755 goto bye;
5746 5756
5747 /* 5757 /*
5748 * Find out what ports are available to us. Note that we need to do 5758 * Find out what ports are available to us. Note that we need to do
5749 * this before calling adap_init0_no_config() since it needs nports 5759 * this before calling adap_init0_no_config() since it needs nports
5750 * and portvec ... 5760 * and portvec ...
5751 */ 5761 */
5752 v = 5762 v =
5753 FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 5763 FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5754 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC); 5764 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
5755 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec); 5765 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
5756 if (ret < 0) 5766 if (ret < 0)
5757 goto bye; 5767 goto bye;
5758 5768
5759 adap->params.nports = hweight32(port_vec); 5769 adap->params.nports = hweight32(port_vec);
5760 adap->params.portvec = port_vec; 5770 adap->params.portvec = port_vec;
5761 5771
5762 /* 5772 /*
5763 * If the firmware is initialized already (and we're not forcing a 5773 * If the firmware is initialized already (and we're not forcing a
5764 * master initialization), note that we're living with existing 5774 * master initialization), note that we're living with existing
5765 * adapter parameters. Otherwise, it's time to try initializing the 5775 * adapter parameters. Otherwise, it's time to try initializing the
5766 * adapter ... 5776 * adapter ...
5767 */ 5777 */
5768 if (state == DEV_STATE_INIT) { 5778 if (state == DEV_STATE_INIT) {
5769 dev_info(adap->pdev_dev, "Coming up as %s: "\ 5779 dev_info(adap->pdev_dev, "Coming up as %s: "\
5770 "Adapter already initialized\n", 5780 "Adapter already initialized\n",
5771 adap->flags & MASTER_PF ? "MASTER" : "SLAVE"); 5781 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
5772 adap->flags |= USING_SOFT_PARAMS; 5782 adap->flags |= USING_SOFT_PARAMS;
5773 } else { 5783 } else {
5774 dev_info(adap->pdev_dev, "Coming up as MASTER: "\ 5784 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
5775 "Initializing adapter\n"); 5785 "Initializing adapter\n");
5776 5786
5777 /* 5787 /*
5778 * If the firmware doesn't support Configuration 5788 * If the firmware doesn't support Configuration
5779 * Files warn user and exit, 5789 * Files warn user and exit,
5780 */ 5790 */
5781 if (ret < 0) 5791 if (ret < 0)
5782 dev_warn(adap->pdev_dev, "Firmware doesn't support " 5792 dev_warn(adap->pdev_dev, "Firmware doesn't support "
5783 "configuration file.\n"); 5793 "configuration file.\n");
5784 if (force_old_init) 5794 if (force_old_init)
5785 ret = adap_init0_no_config(adap, reset); 5795 ret = adap_init0_no_config(adap, reset);
5786 else { 5796 else {
5787 /* 5797 /*
5788 * Find out whether we're dealing with a version of 5798 * Find out whether we're dealing with a version of
5789 * the firmware which has configuration file support. 5799 * the firmware which has configuration file support.
5790 */ 5800 */
5791 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 5801 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
5792 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF)); 5802 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
5793 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, 5803 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
5794 params, val); 5804 params, val);
5795 5805
5796 /* 5806 /*
5797 * If the firmware doesn't support Configuration 5807 * If the firmware doesn't support Configuration
5798 * Files, use the old Driver-based, hard-wired 5808 * Files, use the old Driver-based, hard-wired
5799 * initialization. Otherwise, try using the 5809 * initialization. Otherwise, try using the
5800 * Configuration File support and fall back to the 5810 * Configuration File support and fall back to the
5801 * Driver-based initialization if there's no 5811 * Driver-based initialization if there's no
5802 * Configuration File found. 5812 * Configuration File found.
5803 */ 5813 */
5804 if (ret < 0) 5814 if (ret < 0)
5805 ret = adap_init0_no_config(adap, reset); 5815 ret = adap_init0_no_config(adap, reset);
5806 else { 5816 else {
5807 /* 5817 /*
5808 * The firmware provides us with a memory 5818 * The firmware provides us with a memory
5809 * buffer where we can load a Configuration 5819 * buffer where we can load a Configuration
5810 * File from the host if we want to override 5820 * File from the host if we want to override
5811 * the Configuration File in flash. 5821 * the Configuration File in flash.
5812 */ 5822 */
5813 5823
5814 ret = adap_init0_config(adap, reset); 5824 ret = adap_init0_config(adap, reset);
5815 if (ret == -ENOENT) { 5825 if (ret == -ENOENT) {
5816 dev_info(adap->pdev_dev, 5826 dev_info(adap->pdev_dev,
5817 "No Configuration File present " 5827 "No Configuration File present "
5818 "on adapter. Using hard-wired " 5828 "on adapter. Using hard-wired "
5819 "configuration parameters.\n"); 5829 "configuration parameters.\n");
5820 ret = adap_init0_no_config(adap, reset); 5830 ret = adap_init0_no_config(adap, reset);
5821 } 5831 }
5822 } 5832 }
5823 } 5833 }
5824 if (ret < 0) { 5834 if (ret < 0) {
5825 dev_err(adap->pdev_dev, 5835 dev_err(adap->pdev_dev,
5826 "could not initialize adapter, error %d\n", 5836 "could not initialize adapter, error %d\n",
5827 -ret); 5837 -ret);
5828 goto bye; 5838 goto bye;
5829 } 5839 }
5830 } 5840 }
5831 5841
5832 /* 5842 /*
5833 * If we're living with non-hard-coded parameters (either from a 5843 * If we're living with non-hard-coded parameters (either from a
5834 * Firmware Configuration File or values programmed by a different PF 5844 * Firmware Configuration File or values programmed by a different PF
5835 * Driver), give the SGE code a chance to pull in anything that it 5845 * Driver), give the SGE code a chance to pull in anything that it
5836 * needs ... Note that this must be called after we retrieve our VPD 5846 * needs ... Note that this must be called after we retrieve our VPD
5837 * parameters in order to know how to convert core ticks to seconds. 5847 * parameters in order to know how to convert core ticks to seconds.
5838 */ 5848 */
5839 if (adap->flags & USING_SOFT_PARAMS) { 5849 if (adap->flags & USING_SOFT_PARAMS) {
5840 ret = t4_sge_init(adap); 5850 ret = t4_sge_init(adap);
5841 if (ret < 0) 5851 if (ret < 0)
5842 goto bye; 5852 goto bye;
5843 } 5853 }
5844 5854
5845 if (is_bypass_device(adap->pdev->device)) 5855 if (is_bypass_device(adap->pdev->device))
5846 adap->params.bypass = 1; 5856 adap->params.bypass = 1;
5847 5857
5848 /* 5858 /*
5849 * Grab some of our basic fundamental operating parameters. 5859 * Grab some of our basic fundamental operating parameters.
5850 */ 5860 */
5851 #define FW_PARAM_DEV(param) \ 5861 #define FW_PARAM_DEV(param) \
5852 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ 5862 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
5853 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) 5863 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
5854 5864
5855 #define FW_PARAM_PFVF(param) \ 5865 #define FW_PARAM_PFVF(param) \
5856 FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ 5866 FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
5857 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \ 5867 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \
5858 FW_PARAMS_PARAM_Y(0) | \ 5868 FW_PARAMS_PARAM_Y(0) | \
5859 FW_PARAMS_PARAM_Z(0) 5869 FW_PARAMS_PARAM_Z(0)
5860 5870
5861 params[0] = FW_PARAM_PFVF(EQ_START); 5871 params[0] = FW_PARAM_PFVF(EQ_START);
5862 params[1] = FW_PARAM_PFVF(L2T_START); 5872 params[1] = FW_PARAM_PFVF(L2T_START);
5863 params[2] = FW_PARAM_PFVF(L2T_END); 5873 params[2] = FW_PARAM_PFVF(L2T_END);
5864 params[3] = FW_PARAM_PFVF(FILTER_START); 5874 params[3] = FW_PARAM_PFVF(FILTER_START);
5865 params[4] = FW_PARAM_PFVF(FILTER_END); 5875 params[4] = FW_PARAM_PFVF(FILTER_END);
5866 params[5] = FW_PARAM_PFVF(IQFLINT_START); 5876 params[5] = FW_PARAM_PFVF(IQFLINT_START);
5867 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val); 5877 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
5868 if (ret < 0) 5878 if (ret < 0)
5869 goto bye; 5879 goto bye;
5870 adap->sge.egr_start = val[0]; 5880 adap->sge.egr_start = val[0];
5871 adap->l2t_start = val[1]; 5881 adap->l2t_start = val[1];
5872 adap->l2t_end = val[2]; 5882 adap->l2t_end = val[2];
5873 adap->tids.ftid_base = val[3]; 5883 adap->tids.ftid_base = val[3];
5874 adap->tids.nftids = val[4] - val[3] + 1; 5884 adap->tids.nftids = val[4] - val[3] + 1;
5875 adap->sge.ingr_start = val[5]; 5885 adap->sge.ingr_start = val[5];
5876 5886
5877 /* query params related to active filter region */ 5887 /* query params related to active filter region */
5878 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START); 5888 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
5879 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END); 5889 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
5880 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val); 5890 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
5881 /* If Active filter size is set we enable establishing 5891 /* If Active filter size is set we enable establishing
5882 * offload connection through firmware work request 5892 * offload connection through firmware work request
5883 */ 5893 */
5884 if ((val[0] != val[1]) && (ret >= 0)) { 5894 if ((val[0] != val[1]) && (ret >= 0)) {
5885 adap->flags |= FW_OFLD_CONN; 5895 adap->flags |= FW_OFLD_CONN;
5886 adap->tids.aftid_base = val[0]; 5896 adap->tids.aftid_base = val[0];
5887 adap->tids.aftid_end = val[1]; 5897 adap->tids.aftid_end = val[1];
5888 } 5898 }
5889 5899
5890 /* If we're running on newer firmware, let it know that we're 5900 /* If we're running on newer firmware, let it know that we're
5891 * prepared to deal with encapsulated CPL messages. Older 5901 * prepared to deal with encapsulated CPL messages. Older
5892 * firmware won't understand this and we'll just get 5902 * firmware won't understand this and we'll just get
5893 * unencapsulated messages ... 5903 * unencapsulated messages ...
5894 */ 5904 */
5895 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP); 5905 params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
5896 val[0] = 1; 5906 val[0] = 1;
5897 (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val); 5907 (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
5898 5908
5899 /* 5909 /*
5900 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL 5910 * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
5901 * capability. Earlier versions of the firmware didn't have the 5911 * capability. Earlier versions of the firmware didn't have the
5902 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no 5912 * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
5903 * permission to use ULPTX MEMWRITE DSGL. 5913 * permission to use ULPTX MEMWRITE DSGL.
5904 */ 5914 */
5905 if (is_t4(adap->params.chip)) { 5915 if (is_t4(adap->params.chip)) {
5906 adap->params.ulptx_memwrite_dsgl = false; 5916 adap->params.ulptx_memwrite_dsgl = false;
5907 } else { 5917 } else {
5908 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL); 5918 params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
5909 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 5919 ret = t4_query_params(adap, adap->mbox, adap->fn, 0,
5910 1, params, val); 5920 1, params, val);
5911 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0); 5921 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
5912 } 5922 }
5913 5923
5914 /* 5924 /*
5915 * Get device capabilities so we can determine what resources we need 5925 * Get device capabilities so we can determine what resources we need
5916 * to manage. 5926 * to manage.
5917 */ 5927 */
5918 memset(&caps_cmd, 0, sizeof(caps_cmd)); 5928 memset(&caps_cmd, 0, sizeof(caps_cmd));
5919 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 5929 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
5920 FW_CMD_REQUEST | FW_CMD_READ); 5930 FW_CMD_REQUEST | FW_CMD_READ);
5921 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd)); 5931 caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
5922 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd), 5932 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
5923 &caps_cmd); 5933 &caps_cmd);
5924 if (ret < 0) 5934 if (ret < 0)
5925 goto bye; 5935 goto bye;
5926 5936
5927 if (caps_cmd.ofldcaps) { 5937 if (caps_cmd.ofldcaps) {
5928 /* query offload-related parameters */ 5938 /* query offload-related parameters */
5929 params[0] = FW_PARAM_DEV(NTID); 5939 params[0] = FW_PARAM_DEV(NTID);
5930 params[1] = FW_PARAM_PFVF(SERVER_START); 5940 params[1] = FW_PARAM_PFVF(SERVER_START);
5931 params[2] = FW_PARAM_PFVF(SERVER_END); 5941 params[2] = FW_PARAM_PFVF(SERVER_END);
5932 params[3] = FW_PARAM_PFVF(TDDP_START); 5942 params[3] = FW_PARAM_PFVF(TDDP_START);
5933 params[4] = FW_PARAM_PFVF(TDDP_END); 5943 params[4] = FW_PARAM_PFVF(TDDP_END);
5934 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); 5944 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
5935 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, 5945 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5936 params, val); 5946 params, val);
5937 if (ret < 0) 5947 if (ret < 0)
5938 goto bye; 5948 goto bye;
5939 adap->tids.ntids = val[0]; 5949 adap->tids.ntids = val[0];
5940 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS); 5950 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
5941 adap->tids.stid_base = val[1]; 5951 adap->tids.stid_base = val[1];
5942 adap->tids.nstids = val[2] - val[1] + 1; 5952 adap->tids.nstids = val[2] - val[1] + 1;
5943 /* 5953 /*
5944 * Setup server filter region. Divide the availble filter 5954 * Setup server filter region. Divide the availble filter
5945 * region into two parts. Regular filters get 1/3rd and server 5955 * region into two parts. Regular filters get 1/3rd and server
5946 * filters get 2/3rd part. This is only enabled if workarond 5956 * filters get 2/3rd part. This is only enabled if workarond
5947 * path is enabled. 5957 * path is enabled.
5948 * 1. For regular filters. 5958 * 1. For regular filters.
5949 * 2. Server filter: This are special filters which are used 5959 * 2. Server filter: This are special filters which are used
5950 * to redirect SYN packets to offload queue. 5960 * to redirect SYN packets to offload queue.
5951 */ 5961 */
5952 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) { 5962 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
5953 adap->tids.sftid_base = adap->tids.ftid_base + 5963 adap->tids.sftid_base = adap->tids.ftid_base +
5954 DIV_ROUND_UP(adap->tids.nftids, 3); 5964 DIV_ROUND_UP(adap->tids.nftids, 3);
5955 adap->tids.nsftids = adap->tids.nftids - 5965 adap->tids.nsftids = adap->tids.nftids -
5956 DIV_ROUND_UP(adap->tids.nftids, 3); 5966 DIV_ROUND_UP(adap->tids.nftids, 3);
5957 adap->tids.nftids = adap->tids.sftid_base - 5967 adap->tids.nftids = adap->tids.sftid_base -
5958 adap->tids.ftid_base; 5968 adap->tids.ftid_base;
5959 } 5969 }
5960 adap->vres.ddp.start = val[3]; 5970 adap->vres.ddp.start = val[3];
5961 adap->vres.ddp.size = val[4] - val[3] + 1; 5971 adap->vres.ddp.size = val[4] - val[3] + 1;
5962 adap->params.ofldq_wr_cred = val[5]; 5972 adap->params.ofldq_wr_cred = val[5];
5963 5973
5964 adap->params.offload = 1; 5974 adap->params.offload = 1;
5965 } 5975 }
5966 if (caps_cmd.rdmacaps) { 5976 if (caps_cmd.rdmacaps) {
5967 params[0] = FW_PARAM_PFVF(STAG_START); 5977 params[0] = FW_PARAM_PFVF(STAG_START);
5968 params[1] = FW_PARAM_PFVF(STAG_END); 5978 params[1] = FW_PARAM_PFVF(STAG_END);
5969 params[2] = FW_PARAM_PFVF(RQ_START); 5979 params[2] = FW_PARAM_PFVF(RQ_START);
5970 params[3] = FW_PARAM_PFVF(RQ_END); 5980 params[3] = FW_PARAM_PFVF(RQ_END);
5971 params[4] = FW_PARAM_PFVF(PBL_START); 5981 params[4] = FW_PARAM_PFVF(PBL_START);
5972 params[5] = FW_PARAM_PFVF(PBL_END); 5982 params[5] = FW_PARAM_PFVF(PBL_END);
5973 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, 5983 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
5974 params, val); 5984 params, val);
5975 if (ret < 0) 5985 if (ret < 0)
5976 goto bye; 5986 goto bye;
5977 adap->vres.stag.start = val[0]; 5987 adap->vres.stag.start = val[0];
5978 adap->vres.stag.size = val[1] - val[0] + 1; 5988 adap->vres.stag.size = val[1] - val[0] + 1;
5979 adap->vres.rq.start = val[2]; 5989 adap->vres.rq.start = val[2];
5980 adap->vres.rq.size = val[3] - val[2] + 1; 5990 adap->vres.rq.size = val[3] - val[2] + 1;
5981 adap->vres.pbl.start = val[4]; 5991 adap->vres.pbl.start = val[4];
5982 adap->vres.pbl.size = val[5] - val[4] + 1; 5992 adap->vres.pbl.size = val[5] - val[4] + 1;
5983 5993
5984 params[0] = FW_PARAM_PFVF(SQRQ_START); 5994 params[0] = FW_PARAM_PFVF(SQRQ_START);
5985 params[1] = FW_PARAM_PFVF(SQRQ_END); 5995 params[1] = FW_PARAM_PFVF(SQRQ_END);
5986 params[2] = FW_PARAM_PFVF(CQ_START); 5996 params[2] = FW_PARAM_PFVF(CQ_START);
5987 params[3] = FW_PARAM_PFVF(CQ_END); 5997 params[3] = FW_PARAM_PFVF(CQ_END);
5988 params[4] = FW_PARAM_PFVF(OCQ_START); 5998 params[4] = FW_PARAM_PFVF(OCQ_START);
5989 params[5] = FW_PARAM_PFVF(OCQ_END); 5999 params[5] = FW_PARAM_PFVF(OCQ_END);
5990 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, 6000 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params,
5991 val); 6001 val);
5992 if (ret < 0) 6002 if (ret < 0)
5993 goto bye; 6003 goto bye;
5994 adap->vres.qp.start = val[0]; 6004 adap->vres.qp.start = val[0];
5995 adap->vres.qp.size = val[1] - val[0] + 1; 6005 adap->vres.qp.size = val[1] - val[0] + 1;
5996 adap->vres.cq.start = val[2]; 6006 adap->vres.cq.start = val[2];
5997 adap->vres.cq.size = val[3] - val[2] + 1; 6007 adap->vres.cq.size = val[3] - val[2] + 1;
5998 adap->vres.ocq.start = val[4]; 6008 adap->vres.ocq.start = val[4];
5999 adap->vres.ocq.size = val[5] - val[4] + 1; 6009 adap->vres.ocq.size = val[5] - val[4] + 1;
6000 6010
6001 params[0] = FW_PARAM_DEV(MAXORDIRD_QP); 6011 params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
6002 params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER); 6012 params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
6003 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, 6013 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params,
6004 val); 6014 val);
6005 if (ret < 0) { 6015 if (ret < 0) {
6006 adap->params.max_ordird_qp = 8; 6016 adap->params.max_ordird_qp = 8;
6007 adap->params.max_ird_adapter = 32 * adap->tids.ntids; 6017 adap->params.max_ird_adapter = 32 * adap->tids.ntids;
6008 ret = 0; 6018 ret = 0;
6009 } else { 6019 } else {
6010 adap->params.max_ordird_qp = val[0]; 6020 adap->params.max_ordird_qp = val[0];
6011 adap->params.max_ird_adapter = val[1]; 6021 adap->params.max_ird_adapter = val[1];
6012 } 6022 }
6013 dev_info(adap->pdev_dev, 6023 dev_info(adap->pdev_dev,
6014 "max_ordird_qp %d max_ird_adapter %d\n", 6024 "max_ordird_qp %d max_ird_adapter %d\n",
6015 adap->params.max_ordird_qp, 6025 adap->params.max_ordird_qp,
6016 adap->params.max_ird_adapter); 6026 adap->params.max_ird_adapter);
6017 } 6027 }
6018 if (caps_cmd.iscsicaps) { 6028 if (caps_cmd.iscsicaps) {
6019 params[0] = FW_PARAM_PFVF(ISCSI_START); 6029 params[0] = FW_PARAM_PFVF(ISCSI_START);
6020 params[1] = FW_PARAM_PFVF(ISCSI_END); 6030 params[1] = FW_PARAM_PFVF(ISCSI_END);
6021 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, 6031 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
6022 params, val); 6032 params, val);
6023 if (ret < 0) 6033 if (ret < 0)
6024 goto bye; 6034 goto bye;
6025 adap->vres.iscsi.start = val[0]; 6035 adap->vres.iscsi.start = val[0];
6026 adap->vres.iscsi.size = val[1] - val[0] + 1; 6036 adap->vres.iscsi.size = val[1] - val[0] + 1;
6027 } 6037 }
6028 #undef FW_PARAM_PFVF 6038 #undef FW_PARAM_PFVF
6029 #undef FW_PARAM_DEV 6039 #undef FW_PARAM_DEV
6030 6040
6031 /* The MTU/MSS Table is initialized by now, so load their values. If 6041 /* The MTU/MSS Table is initialized by now, so load their values. If
6032 * we're initializing the adapter, then we'll make any modifications 6042 * we're initializing the adapter, then we'll make any modifications
6033 * we want to the MTU/MSS Table and also initialize the congestion 6043 * we want to the MTU/MSS Table and also initialize the congestion
6034 * parameters. 6044 * parameters.
6035 */ 6045 */
6036 t4_read_mtu_tbl(adap, adap->params.mtus, NULL); 6046 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
6037 if (state != DEV_STATE_INIT) { 6047 if (state != DEV_STATE_INIT) {
6038 int i; 6048 int i;
6039 6049
6040 /* The default MTU Table contains values 1492 and 1500. 6050 /* The default MTU Table contains values 1492 and 1500.
6041 * However, for TCP, it's better to have two values which are 6051 * However, for TCP, it's better to have two values which are
6042 * a multiple of 8 +/- 4 bytes apart near this popular MTU. 6052 * a multiple of 8 +/- 4 bytes apart near this popular MTU.
6043 * This allows us to have a TCP Data Payload which is a 6053 * This allows us to have a TCP Data Payload which is a
6044 * multiple of 8 regardless of what combination of TCP Options 6054 * multiple of 8 regardless of what combination of TCP Options
6045 * are in use (always a multiple of 4 bytes) which is 6055 * are in use (always a multiple of 4 bytes) which is
6046 * important for performance reasons. For instance, if no 6056 * important for performance reasons. For instance, if no
6047 * options are in use, then we have a 20-byte IP header and a 6057 * options are in use, then we have a 20-byte IP header and a
6048 * 20-byte TCP header. In this case, a 1500-byte MSS would 6058 * 20-byte TCP header. In this case, a 1500-byte MSS would
6049 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes 6059 * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
6050 * which is not a multiple of 8. So using an MSS of 1488 in 6060 * which is not a multiple of 8. So using an MSS of 1488 in
6051 * this case results in a TCP Data Payload of 1448 bytes which 6061 * this case results in a TCP Data Payload of 1448 bytes which
6052 * is a multiple of 8. On the other hand, if 12-byte TCP Time 6062 * is a multiple of 8. On the other hand, if 12-byte TCP Time
6053 * Stamps have been negotiated, then an MTU of 1500 bytes 6063 * Stamps have been negotiated, then an MTU of 1500 bytes
6054 * results in a TCP Data Payload of 1448 bytes which, as 6064 * results in a TCP Data Payload of 1448 bytes which, as
6055 * above, is a multiple of 8 bytes ... 6065 * above, is a multiple of 8 bytes ...
6056 */ 6066 */
6057 for (i = 0; i < NMTUS; i++) 6067 for (i = 0; i < NMTUS; i++)
6058 if (adap->params.mtus[i] == 1492) { 6068 if (adap->params.mtus[i] == 1492) {
6059 adap->params.mtus[i] = 1488; 6069 adap->params.mtus[i] = 1488;
6060 break; 6070 break;
6061 } 6071 }
6062 6072
6063 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, 6073 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
6064 adap->params.b_wnd); 6074 adap->params.b_wnd);
6065 } 6075 }
6066 t4_init_tp_params(adap); 6076 t4_init_tp_params(adap);
6067 adap->flags |= FW_OK; 6077 adap->flags |= FW_OK;
6068 return 0; 6078 return 0;
6069 6079
6070 /* 6080 /*
6071 * Something bad happened. If a command timed out or failed with EIO 6081 * Something bad happened. If a command timed out or failed with EIO
6072 * FW does not operate within its spec or something catastrophic 6082 * FW does not operate within its spec or something catastrophic
6073 * happened to HW/FW, stop issuing commands. 6083 * happened to HW/FW, stop issuing commands.
6074 */ 6084 */
6075 bye: 6085 bye:
6076 if (ret != -ETIMEDOUT && ret != -EIO) 6086 if (ret != -ETIMEDOUT && ret != -EIO)
6077 t4_fw_bye(adap, adap->mbox); 6087 t4_fw_bye(adap, adap->mbox);
6078 return ret; 6088 return ret;
6079 } 6089 }
6080 6090
6081 /* EEH callbacks */ 6091 /* EEH callbacks */
6082 6092
6083 static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev, 6093 static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
6084 pci_channel_state_t state) 6094 pci_channel_state_t state)
6085 { 6095 {
6086 int i; 6096 int i;
6087 struct adapter *adap = pci_get_drvdata(pdev); 6097 struct adapter *adap = pci_get_drvdata(pdev);
6088 6098
6089 if (!adap) 6099 if (!adap)
6090 goto out; 6100 goto out;
6091 6101
6092 rtnl_lock(); 6102 rtnl_lock();
6093 adap->flags &= ~FW_OK; 6103 adap->flags &= ~FW_OK;
6094 notify_ulds(adap, CXGB4_STATE_START_RECOVERY); 6104 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
6095 spin_lock(&adap->stats_lock); 6105 spin_lock(&adap->stats_lock);
6096 for_each_port(adap, i) { 6106 for_each_port(adap, i) {
6097 struct net_device *dev = adap->port[i]; 6107 struct net_device *dev = adap->port[i];
6098 6108
6099 netif_device_detach(dev); 6109 netif_device_detach(dev);
6100 netif_carrier_off(dev); 6110 netif_carrier_off(dev);
6101 } 6111 }
6102 spin_unlock(&adap->stats_lock); 6112 spin_unlock(&adap->stats_lock);
6103 if (adap->flags & FULL_INIT_DONE) 6113 if (adap->flags & FULL_INIT_DONE)
6104 cxgb_down(adap); 6114 cxgb_down(adap);
6105 rtnl_unlock(); 6115 rtnl_unlock();
6106 if ((adap->flags & DEV_ENABLED)) { 6116 if ((adap->flags & DEV_ENABLED)) {
6107 pci_disable_device(pdev); 6117 pci_disable_device(pdev);
6108 adap->flags &= ~DEV_ENABLED; 6118 adap->flags &= ~DEV_ENABLED;
6109 } 6119 }
6110 out: return state == pci_channel_io_perm_failure ? 6120 out: return state == pci_channel_io_perm_failure ?
6111 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; 6121 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
6112 } 6122 }
6113 6123
6114 static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev) 6124 static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
6115 { 6125 {
6116 int i, ret; 6126 int i, ret;
6117 struct fw_caps_config_cmd c; 6127 struct fw_caps_config_cmd c;
6118 struct adapter *adap = pci_get_drvdata(pdev); 6128 struct adapter *adap = pci_get_drvdata(pdev);
6119 6129
6120 if (!adap) { 6130 if (!adap) {
6121 pci_restore_state(pdev); 6131 pci_restore_state(pdev);
6122 pci_save_state(pdev); 6132 pci_save_state(pdev);
6123 return PCI_ERS_RESULT_RECOVERED; 6133 return PCI_ERS_RESULT_RECOVERED;
6124 } 6134 }
6125 6135
6126 if (!(adap->flags & DEV_ENABLED)) { 6136 if (!(adap->flags & DEV_ENABLED)) {
6127 if (pci_enable_device(pdev)) { 6137 if (pci_enable_device(pdev)) {
6128 dev_err(&pdev->dev, "Cannot reenable PCI " 6138 dev_err(&pdev->dev, "Cannot reenable PCI "
6129 "device after reset\n"); 6139 "device after reset\n");
6130 return PCI_ERS_RESULT_DISCONNECT; 6140 return PCI_ERS_RESULT_DISCONNECT;
6131 } 6141 }
6132 adap->flags |= DEV_ENABLED; 6142 adap->flags |= DEV_ENABLED;
6133 } 6143 }
6134 6144
6135 pci_set_master(pdev); 6145 pci_set_master(pdev);
6136 pci_restore_state(pdev); 6146 pci_restore_state(pdev);
6137 pci_save_state(pdev); 6147 pci_save_state(pdev);
6138 pci_cleanup_aer_uncorrect_error_status(pdev); 6148 pci_cleanup_aer_uncorrect_error_status(pdev);
6139 6149
6140 if (t4_wait_dev_ready(adap->regs) < 0) 6150 if (t4_wait_dev_ready(adap->regs) < 0)
6141 return PCI_ERS_RESULT_DISCONNECT; 6151 return PCI_ERS_RESULT_DISCONNECT;
6142 if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0) 6152 if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0)
6143 return PCI_ERS_RESULT_DISCONNECT; 6153 return PCI_ERS_RESULT_DISCONNECT;
6144 adap->flags |= FW_OK; 6154 adap->flags |= FW_OK;
6145 if (adap_init1(adap, &c)) 6155 if (adap_init1(adap, &c))
6146 return PCI_ERS_RESULT_DISCONNECT; 6156 return PCI_ERS_RESULT_DISCONNECT;
6147 6157
6148 for_each_port(adap, i) { 6158 for_each_port(adap, i) {
6149 struct port_info *p = adap2pinfo(adap, i); 6159 struct port_info *p = adap2pinfo(adap, i);
6150 6160
6151 ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1, 6161 ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
6152 NULL, NULL); 6162 NULL, NULL);
6153 if (ret < 0) 6163 if (ret < 0)
6154 return PCI_ERS_RESULT_DISCONNECT; 6164 return PCI_ERS_RESULT_DISCONNECT;
6155 p->viid = ret; 6165 p->viid = ret;
6156 p->xact_addr_filt = -1; 6166 p->xact_addr_filt = -1;
6157 } 6167 }
6158 6168
6159 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, 6169 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
6160 adap->params.b_wnd); 6170 adap->params.b_wnd);
6161 setup_memwin(adap); 6171 setup_memwin(adap);
6162 if (cxgb_up(adap)) 6172 if (cxgb_up(adap))
6163 return PCI_ERS_RESULT_DISCONNECT; 6173 return PCI_ERS_RESULT_DISCONNECT;
6164 return PCI_ERS_RESULT_RECOVERED; 6174 return PCI_ERS_RESULT_RECOVERED;
6165 } 6175 }
6166 6176
6167 static void eeh_resume(struct pci_dev *pdev) 6177 static void eeh_resume(struct pci_dev *pdev)
6168 { 6178 {
6169 int i; 6179 int i;
6170 struct adapter *adap = pci_get_drvdata(pdev); 6180 struct adapter *adap = pci_get_drvdata(pdev);
6171 6181
6172 if (!adap) 6182 if (!adap)
6173 return; 6183 return;
6174 6184
6175 rtnl_lock(); 6185 rtnl_lock();
6176 for_each_port(adap, i) { 6186 for_each_port(adap, i) {
6177 struct net_device *dev = adap->port[i]; 6187 struct net_device *dev = adap->port[i];
6178 6188
6179 if (netif_running(dev)) { 6189 if (netif_running(dev)) {
6180 link_start(dev); 6190 link_start(dev);
6181 cxgb_set_rxmode(dev); 6191 cxgb_set_rxmode(dev);
6182 } 6192 }
6183 netif_device_attach(dev); 6193 netif_device_attach(dev);
6184 } 6194 }
6185 rtnl_unlock(); 6195 rtnl_unlock();
6186 } 6196 }
6187 6197
6188 static const struct pci_error_handlers cxgb4_eeh = { 6198 static const struct pci_error_handlers cxgb4_eeh = {
6189 .error_detected = eeh_err_detected, 6199 .error_detected = eeh_err_detected,
6190 .slot_reset = eeh_slot_reset, 6200 .slot_reset = eeh_slot_reset,
6191 .resume = eeh_resume, 6201 .resume = eeh_resume,
6192 }; 6202 };
6193 6203
6194 static inline bool is_x_10g_port(const struct link_config *lc) 6204 static inline bool is_x_10g_port(const struct link_config *lc)
6195 { 6205 {
6196 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 || 6206 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
6197 (lc->supported & FW_PORT_CAP_SPEED_40G) != 0; 6207 (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
6198 } 6208 }
6199 6209
6200 static inline void init_rspq(struct adapter *adap, struct sge_rspq *q, 6210 static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
6201 unsigned int us, unsigned int cnt, 6211 unsigned int us, unsigned int cnt,
6202 unsigned int size, unsigned int iqe_size) 6212 unsigned int size, unsigned int iqe_size)
6203 { 6213 {
6204 q->adap = adap; 6214 q->adap = adap;
6205 set_rspq_intr_params(q, us, cnt); 6215 set_rspq_intr_params(q, us, cnt);
6206 q->iqe_len = iqe_size; 6216 q->iqe_len = iqe_size;
6207 q->size = size; 6217 q->size = size;
6208 } 6218 }
6209 6219
6210 /* 6220 /*
6211 * Perform default configuration of DMA queues depending on the number and type 6221 * Perform default configuration of DMA queues depending on the number and type
6212 * of ports we found and the number of available CPUs. Most settings can be 6222 * of ports we found and the number of available CPUs. Most settings can be
6213 * modified by the admin prior to actual use. 6223 * modified by the admin prior to actual use.
6214 */ 6224 */
6215 static void cfg_queues(struct adapter *adap) 6225 static void cfg_queues(struct adapter *adap)
6216 { 6226 {
6217 struct sge *s = &adap->sge; 6227 struct sge *s = &adap->sge;
6218 int i, n10g = 0, qidx = 0; 6228 int i, n10g = 0, qidx = 0;
6219 #ifndef CONFIG_CHELSIO_T4_DCB 6229 #ifndef CONFIG_CHELSIO_T4_DCB
6220 int q10g = 0; 6230 int q10g = 0;
6221 #endif 6231 #endif
6222 int ciq_size; 6232 int ciq_size;
6223 6233
6224 for_each_port(adap, i) 6234 for_each_port(adap, i)
6225 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg); 6235 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
6226 #ifdef CONFIG_CHELSIO_T4_DCB 6236 #ifdef CONFIG_CHELSIO_T4_DCB
6227 /* For Data Center Bridging support we need to be able to support up 6237 /* For Data Center Bridging support we need to be able to support up
6228 * to 8 Traffic Priorities; each of which will be assigned to its 6238 * to 8 Traffic Priorities; each of which will be assigned to its
6229 * own TX Queue in order to prevent Head-Of-Line Blocking. 6239 * own TX Queue in order to prevent Head-Of-Line Blocking.
6230 */ 6240 */
6231 if (adap->params.nports * 8 > MAX_ETH_QSETS) { 6241 if (adap->params.nports * 8 > MAX_ETH_QSETS) {
6232 dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n", 6242 dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n",
6233 MAX_ETH_QSETS, adap->params.nports * 8); 6243 MAX_ETH_QSETS, adap->params.nports * 8);
6234 BUG_ON(1); 6244 BUG_ON(1);
6235 } 6245 }
6236 6246
6237 for_each_port(adap, i) { 6247 for_each_port(adap, i) {
6238 struct port_info *pi = adap2pinfo(adap, i); 6248 struct port_info *pi = adap2pinfo(adap, i);
6239 6249
6240 pi->first_qset = qidx; 6250 pi->first_qset = qidx;
6241 pi->nqsets = 8; 6251 pi->nqsets = 8;
6242 qidx += pi->nqsets; 6252 qidx += pi->nqsets;
6243 } 6253 }
6244 #else /* !CONFIG_CHELSIO_T4_DCB */ 6254 #else /* !CONFIG_CHELSIO_T4_DCB */
6245 /* 6255 /*
6246 * We default to 1 queue per non-10G port and up to # of cores queues 6256 * We default to 1 queue per non-10G port and up to # of cores queues
6247 * per 10G port. 6257 * per 10G port.
6248 */ 6258 */
6249 if (n10g) 6259 if (n10g)
6250 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g; 6260 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
6251 if (q10g > netif_get_num_default_rss_queues()) 6261 if (q10g > netif_get_num_default_rss_queues())
6252 q10g = netif_get_num_default_rss_queues(); 6262 q10g = netif_get_num_default_rss_queues();
6253 6263
6254 for_each_port(adap, i) { 6264 for_each_port(adap, i) {
6255 struct port_info *pi = adap2pinfo(adap, i); 6265 struct port_info *pi = adap2pinfo(adap, i);
6256 6266
6257 pi->first_qset = qidx; 6267 pi->first_qset = qidx;
6258 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1; 6268 pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
6259 qidx += pi->nqsets; 6269 qidx += pi->nqsets;
6260 } 6270 }
6261 #endif /* !CONFIG_CHELSIO_T4_DCB */ 6271 #endif /* !CONFIG_CHELSIO_T4_DCB */
6262 6272
6263 s->ethqsets = qidx; 6273 s->ethqsets = qidx;
6264 s->max_ethqsets = qidx; /* MSI-X may lower it later */ 6274 s->max_ethqsets = qidx; /* MSI-X may lower it later */
6265 6275
6266 if (is_offload(adap)) { 6276 if (is_offload(adap)) {
6267 /* 6277 /*
6268 * For offload we use 1 queue/channel if all ports are up to 1G, 6278 * For offload we use 1 queue/channel if all ports are up to 1G,
6269 * otherwise we divide all available queues amongst the channels 6279 * otherwise we divide all available queues amongst the channels
6270 * capped by the number of available cores. 6280 * capped by the number of available cores.
6271 */ 6281 */
6272 if (n10g) { 6282 if (n10g) {
6273 i = min_t(int, ARRAY_SIZE(s->ofldrxq), 6283 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
6274 num_online_cpus()); 6284 num_online_cpus());
6275 s->ofldqsets = roundup(i, adap->params.nports); 6285 s->ofldqsets = roundup(i, adap->params.nports);
6276 } else 6286 } else
6277 s->ofldqsets = adap->params.nports; 6287 s->ofldqsets = adap->params.nports;
6278 /* For RDMA one Rx queue per channel suffices */ 6288 /* For RDMA one Rx queue per channel suffices */
6279 s->rdmaqs = adap->params.nports; 6289 s->rdmaqs = adap->params.nports;
6280 s->rdmaciqs = adap->params.nports; 6290 s->rdmaciqs = adap->params.nports;
6281 } 6291 }
6282 6292
6283 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) { 6293 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
6284 struct sge_eth_rxq *r = &s->ethrxq[i]; 6294 struct sge_eth_rxq *r = &s->ethrxq[i];
6285 6295
6286 init_rspq(adap, &r->rspq, 5, 10, 1024, 64); 6296 init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
6287 r->fl.size = 72; 6297 r->fl.size = 72;
6288 } 6298 }
6289 6299
6290 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++) 6300 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
6291 s->ethtxq[i].q.size = 1024; 6301 s->ethtxq[i].q.size = 1024;
6292 6302
6293 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) 6303 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
6294 s->ctrlq[i].q.size = 512; 6304 s->ctrlq[i].q.size = 512;
6295 6305
6296 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) 6306 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
6297 s->ofldtxq[i].q.size = 1024; 6307 s->ofldtxq[i].q.size = 1024;
6298 6308
6299 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) { 6309 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
6300 struct sge_ofld_rxq *r = &s->ofldrxq[i]; 6310 struct sge_ofld_rxq *r = &s->ofldrxq[i];
6301 6311
6302 init_rspq(adap, &r->rspq, 5, 1, 1024, 64); 6312 init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
6303 r->rspq.uld = CXGB4_ULD_ISCSI; 6313 r->rspq.uld = CXGB4_ULD_ISCSI;
6304 r->fl.size = 72; 6314 r->fl.size = 72;
6305 } 6315 }
6306 6316
6307 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) { 6317 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
6308 struct sge_ofld_rxq *r = &s->rdmarxq[i]; 6318 struct sge_ofld_rxq *r = &s->rdmarxq[i];
6309 6319
6310 init_rspq(adap, &r->rspq, 5, 1, 511, 64); 6320 init_rspq(adap, &r->rspq, 5, 1, 511, 64);
6311 r->rspq.uld = CXGB4_ULD_RDMA; 6321 r->rspq.uld = CXGB4_ULD_RDMA;
6312 r->fl.size = 72; 6322 r->fl.size = 72;
6313 } 6323 }
6314 6324
6315 ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids; 6325 ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
6316 if (ciq_size > SGE_MAX_IQ_SIZE) { 6326 if (ciq_size > SGE_MAX_IQ_SIZE) {
6317 CH_WARN(adap, "CIQ size too small for available IQs\n"); 6327 CH_WARN(adap, "CIQ size too small for available IQs\n");
6318 ciq_size = SGE_MAX_IQ_SIZE; 6328 ciq_size = SGE_MAX_IQ_SIZE;
6319 } 6329 }
6320 6330
6321 for (i = 0; i < ARRAY_SIZE(s->rdmaciq); i++) { 6331 for (i = 0; i < ARRAY_SIZE(s->rdmaciq); i++) {
6322 struct sge_ofld_rxq *r = &s->rdmaciq[i]; 6332 struct sge_ofld_rxq *r = &s->rdmaciq[i];
6323 6333
6324 init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64); 6334 init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
6325 r->rspq.uld = CXGB4_ULD_RDMA; 6335 r->rspq.uld = CXGB4_ULD_RDMA;
6326 } 6336 }
6327 6337
6328 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64); 6338 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
6329 init_rspq(adap, &s->intrq, 0, 1, 2 * MAX_INGQ, 64); 6339 init_rspq(adap, &s->intrq, 0, 1, 2 * MAX_INGQ, 64);
6330 } 6340 }
6331 6341
6332 /* 6342 /*
6333 * Reduce the number of Ethernet queues across all ports to at most n. 6343 * Reduce the number of Ethernet queues across all ports to at most n.
6334 * n provides at least one queue per port. 6344 * n provides at least one queue per port.
6335 */ 6345 */
6336 static void reduce_ethqs(struct adapter *adap, int n) 6346 static void reduce_ethqs(struct adapter *adap, int n)
6337 { 6347 {
6338 int i; 6348 int i;
6339 struct port_info *pi; 6349 struct port_info *pi;
6340 6350
6341 while (n < adap->sge.ethqsets) 6351 while (n < adap->sge.ethqsets)
6342 for_each_port(adap, i) { 6352 for_each_port(adap, i) {
6343 pi = adap2pinfo(adap, i); 6353 pi = adap2pinfo(adap, i);
6344 if (pi->nqsets > 1) { 6354 if (pi->nqsets > 1) {
6345 pi->nqsets--; 6355 pi->nqsets--;
6346 adap->sge.ethqsets--; 6356 adap->sge.ethqsets--;
6347 if (adap->sge.ethqsets <= n) 6357 if (adap->sge.ethqsets <= n)
6348 break; 6358 break;
6349 } 6359 }
6350 } 6360 }
6351 6361
6352 n = 0; 6362 n = 0;
6353 for_each_port(adap, i) { 6363 for_each_port(adap, i) {
6354 pi = adap2pinfo(adap, i); 6364 pi = adap2pinfo(adap, i);
6355 pi->first_qset = n; 6365 pi->first_qset = n;
6356 n += pi->nqsets; 6366 n += pi->nqsets;
6357 } 6367 }
6358 } 6368 }
6359 6369
6360 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */ 6370 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
6361 #define EXTRA_VECS 2 6371 #define EXTRA_VECS 2
6362 6372
6363 static int enable_msix(struct adapter *adap) 6373 static int enable_msix(struct adapter *adap)
6364 { 6374 {
6365 int ofld_need = 0; 6375 int ofld_need = 0;
6366 int i, want, need; 6376 int i, want, need;
6367 struct sge *s = &adap->sge; 6377 struct sge *s = &adap->sge;
6368 unsigned int nchan = adap->params.nports; 6378 unsigned int nchan = adap->params.nports;
6369 struct msix_entry entries[MAX_INGQ + 1]; 6379 struct msix_entry entries[MAX_INGQ + 1];
6370 6380
6371 for (i = 0; i < ARRAY_SIZE(entries); ++i) 6381 for (i = 0; i < ARRAY_SIZE(entries); ++i)
6372 entries[i].entry = i; 6382 entries[i].entry = i;
6373 6383
6374 want = s->max_ethqsets + EXTRA_VECS; 6384 want = s->max_ethqsets + EXTRA_VECS;
6375 if (is_offload(adap)) { 6385 if (is_offload(adap)) {
6376 want += s->rdmaqs + s->rdmaciqs + s->ofldqsets; 6386 want += s->rdmaqs + s->rdmaciqs + s->ofldqsets;
6377 /* need nchan for each possible ULD */ 6387 /* need nchan for each possible ULD */
6378 ofld_need = 3 * nchan; 6388 ofld_need = 3 * nchan;
6379 } 6389 }
6380 #ifdef CONFIG_CHELSIO_T4_DCB 6390 #ifdef CONFIG_CHELSIO_T4_DCB
6381 /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for 6391 /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
6382 * each port. 6392 * each port.
6383 */ 6393 */
6384 need = 8 * adap->params.nports + EXTRA_VECS + ofld_need; 6394 need = 8 * adap->params.nports + EXTRA_VECS + ofld_need;
6385 #else 6395 #else
6386 need = adap->params.nports + EXTRA_VECS + ofld_need; 6396 need = adap->params.nports + EXTRA_VECS + ofld_need;
6387 #endif 6397 #endif
6388 want = pci_enable_msix_range(adap->pdev, entries, need, want); 6398 want = pci_enable_msix_range(adap->pdev, entries, need, want);
6389 if (want < 0) 6399 if (want < 0)
6390 return want; 6400 return want;
6391 6401
6392 /* 6402 /*
6393 * Distribute available vectors to the various queue groups. 6403 * Distribute available vectors to the various queue groups.
6394 * Every group gets its minimum requirement and NIC gets top 6404 * Every group gets its minimum requirement and NIC gets top
6395 * priority for leftovers. 6405 * priority for leftovers.
6396 */ 6406 */
6397 i = want - EXTRA_VECS - ofld_need; 6407 i = want - EXTRA_VECS - ofld_need;
6398 if (i < s->max_ethqsets) { 6408 if (i < s->max_ethqsets) {
6399 s->max_ethqsets = i; 6409 s->max_ethqsets = i;
6400 if (i < s->ethqsets) 6410 if (i < s->ethqsets)
6401 reduce_ethqs(adap, i); 6411 reduce_ethqs(adap, i);
6402 } 6412 }
6403 if (is_offload(adap)) { 6413 if (is_offload(adap)) {
6404 i = want - EXTRA_VECS - s->max_ethqsets; 6414 i = want - EXTRA_VECS - s->max_ethqsets;
6405 i -= ofld_need - nchan; 6415 i -= ofld_need - nchan;
6406 s->ofldqsets = (i / nchan) * nchan; /* round down */ 6416 s->ofldqsets = (i / nchan) * nchan; /* round down */
6407 } 6417 }
6408 for (i = 0; i < want; ++i) 6418 for (i = 0; i < want; ++i)
6409 adap->msix_info[i].vec = entries[i].vector; 6419 adap->msix_info[i].vec = entries[i].vector;
6410 6420
6411 return 0; 6421 return 0;
6412 } 6422 }
6413 6423
6414 #undef EXTRA_VECS 6424 #undef EXTRA_VECS
6415 6425
6416 static int init_rss(struct adapter *adap) 6426 static int init_rss(struct adapter *adap)
6417 { 6427 {
6418 unsigned int i, j; 6428 unsigned int i, j;
6419 6429
6420 for_each_port(adap, i) { 6430 for_each_port(adap, i) {
6421 struct port_info *pi = adap2pinfo(adap, i); 6431 struct port_info *pi = adap2pinfo(adap, i);
6422 6432
6423 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL); 6433 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
6424 if (!pi->rss) 6434 if (!pi->rss)
6425 return -ENOMEM; 6435 return -ENOMEM;
6426 for (j = 0; j < pi->rss_size; j++) 6436 for (j = 0; j < pi->rss_size; j++)
6427 pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets); 6437 pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
6428 } 6438 }
6429 return 0; 6439 return 0;
6430 } 6440 }
6431 6441
6432 static void print_port_info(const struct net_device *dev) 6442 static void print_port_info(const struct net_device *dev)
6433 { 6443 {
6434 char buf[80]; 6444 char buf[80];
6435 char *bufp = buf; 6445 char *bufp = buf;
6436 const char *spd = ""; 6446 const char *spd = "";
6437 const struct port_info *pi = netdev_priv(dev); 6447 const struct port_info *pi = netdev_priv(dev);
6438 const struct adapter *adap = pi->adapter; 6448 const struct adapter *adap = pi->adapter;
6439 6449
6440 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB) 6450 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
6441 spd = " 2.5 GT/s"; 6451 spd = " 2.5 GT/s";
6442 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB) 6452 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
6443 spd = " 5 GT/s"; 6453 spd = " 5 GT/s";
6444 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB) 6454 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
6445 spd = " 8 GT/s"; 6455 spd = " 8 GT/s";
6446 6456
6447 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M) 6457 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
6448 bufp += sprintf(bufp, "100/"); 6458 bufp += sprintf(bufp, "100/");
6449 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G) 6459 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
6450 bufp += sprintf(bufp, "1000/"); 6460 bufp += sprintf(bufp, "1000/");
6451 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G) 6461 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
6452 bufp += sprintf(bufp, "10G/"); 6462 bufp += sprintf(bufp, "10G/");
6453 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G) 6463 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
6454 bufp += sprintf(bufp, "40G/"); 6464 bufp += sprintf(bufp, "40G/");
6455 if (bufp != buf) 6465 if (bufp != buf)
6456 --bufp; 6466 --bufp;
6457 sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type)); 6467 sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
6458 6468
6459 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n", 6469 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
6460 adap->params.vpd.id, 6470 adap->params.vpd.id,
6461 CHELSIO_CHIP_RELEASE(adap->params.chip), buf, 6471 CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
6462 is_offload(adap) ? "R" : "", adap->params.pci.width, spd, 6472 is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
6463 (adap->flags & USING_MSIX) ? " MSI-X" : 6473 (adap->flags & USING_MSIX) ? " MSI-X" :
6464 (adap->flags & USING_MSI) ? " MSI" : ""); 6474 (adap->flags & USING_MSI) ? " MSI" : "");
6465 netdev_info(dev, "S/N: %s, P/N: %s\n", 6475 netdev_info(dev, "S/N: %s, P/N: %s\n",
6466 adap->params.vpd.sn, adap->params.vpd.pn); 6476 adap->params.vpd.sn, adap->params.vpd.pn);
6467 } 6477 }
6468 6478
6469 static void enable_pcie_relaxed_ordering(struct pci_dev *dev) 6479 static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
6470 { 6480 {
6471 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN); 6481 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
6472 } 6482 }
6473 6483
6474 /* 6484 /*
6475 * Free the following resources: 6485 * Free the following resources:
6476 * - memory used for tables 6486 * - memory used for tables
6477 * - MSI/MSI-X 6487 * - MSI/MSI-X
6478 * - net devices 6488 * - net devices
6479 * - resources FW is holding for us 6489 * - resources FW is holding for us
6480 */ 6490 */
6481 static void free_some_resources(struct adapter *adapter) 6491 static void free_some_resources(struct adapter *adapter)
6482 { 6492 {
6483 unsigned int i; 6493 unsigned int i;
6484 6494
6485 t4_free_mem(adapter->l2t); 6495 t4_free_mem(adapter->l2t);
6486 t4_free_mem(adapter->tids.tid_tab); 6496 t4_free_mem(adapter->tids.tid_tab);
6487 disable_msi(adapter); 6497 disable_msi(adapter);
6488 6498
6489 for_each_port(adapter, i) 6499 for_each_port(adapter, i)
6490 if (adapter->port[i]) { 6500 if (adapter->port[i]) {
6491 kfree(adap2pinfo(adapter, i)->rss); 6501 kfree(adap2pinfo(adapter, i)->rss);
6492 free_netdev(adapter->port[i]); 6502 free_netdev(adapter->port[i]);
6493 } 6503 }
6494 if (adapter->flags & FW_OK) 6504 if (adapter->flags & FW_OK)
6495 t4_fw_bye(adapter, adapter->fn); 6505 t4_fw_bye(adapter, adapter->fn);
6496 } 6506 }
6497 6507
6498 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) 6508 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
6499 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \ 6509 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
6500 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA) 6510 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
6501 #define SEGMENT_SIZE 128 6511 #define SEGMENT_SIZE 128
6502 6512
6503 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 6513 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6504 { 6514 {
6505 int func, i, err, s_qpp, qpp, num_seg; 6515 int func, i, err, s_qpp, qpp, num_seg;
6506 struct port_info *pi; 6516 struct port_info *pi;
6507 bool highdma = false; 6517 bool highdma = false;
6508 struct adapter *adapter = NULL; 6518 struct adapter *adapter = NULL;
6509 void __iomem *regs; 6519 void __iomem *regs;
6510 6520
6511 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); 6521 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
6512 6522
6513 err = pci_request_regions(pdev, KBUILD_MODNAME); 6523 err = pci_request_regions(pdev, KBUILD_MODNAME);
6514 if (err) { 6524 if (err) {
6515 /* Just info, some other driver may have claimed the device. */ 6525 /* Just info, some other driver may have claimed the device. */
6516 dev_info(&pdev->dev, "cannot obtain PCI resources\n"); 6526 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
6517 return err; 6527 return err;
6518 } 6528 }
6519 6529
6520 err = pci_enable_device(pdev); 6530 err = pci_enable_device(pdev);
6521 if (err) { 6531 if (err) {
6522 dev_err(&pdev->dev, "cannot enable PCI device\n"); 6532 dev_err(&pdev->dev, "cannot enable PCI device\n");
6523 goto out_release_regions; 6533 goto out_release_regions;
6524 } 6534 }
6525 6535
6526 regs = pci_ioremap_bar(pdev, 0); 6536 regs = pci_ioremap_bar(pdev, 0);
6527 if (!regs) { 6537 if (!regs) {
6528 dev_err(&pdev->dev, "cannot map device registers\n"); 6538 dev_err(&pdev->dev, "cannot map device registers\n");
6529 err = -ENOMEM; 6539 err = -ENOMEM;
6530 goto out_disable_device; 6540 goto out_disable_device;
6531 } 6541 }
6532 6542
6533 err = t4_wait_dev_ready(regs); 6543 err = t4_wait_dev_ready(regs);
6534 if (err < 0) 6544 if (err < 0)
6535 goto out_unmap_bar0; 6545 goto out_unmap_bar0;
6536 6546
6537 /* We control everything through one PF */ 6547 /* We control everything through one PF */
6538 func = SOURCEPF_GET(readl(regs + PL_WHOAMI)); 6548 func = SOURCEPF_GET(readl(regs + PL_WHOAMI));
6539 if (func != ent->driver_data) { 6549 if (func != ent->driver_data) {
6540 iounmap(regs); 6550 iounmap(regs);
6541 pci_disable_device(pdev); 6551 pci_disable_device(pdev);
6542 pci_save_state(pdev); /* to restore SR-IOV later */ 6552 pci_save_state(pdev); /* to restore SR-IOV later */
6543 goto sriov; 6553 goto sriov;
6544 } 6554 }
6545 6555
6546 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 6556 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
6547 highdma = true; 6557 highdma = true;
6548 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 6558 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
6549 if (err) { 6559 if (err) {
6550 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for " 6560 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
6551 "coherent allocations\n"); 6561 "coherent allocations\n");
6552 goto out_unmap_bar0; 6562 goto out_unmap_bar0;
6553 } 6563 }
6554 } else { 6564 } else {
6555 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 6565 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6556 if (err) { 6566 if (err) {
6557 dev_err(&pdev->dev, "no usable DMA configuration\n"); 6567 dev_err(&pdev->dev, "no usable DMA configuration\n");
6558 goto out_unmap_bar0; 6568 goto out_unmap_bar0;
6559 } 6569 }
6560 } 6570 }
6561 6571
6562 pci_enable_pcie_error_reporting(pdev); 6572 pci_enable_pcie_error_reporting(pdev);
6563 enable_pcie_relaxed_ordering(pdev); 6573 enable_pcie_relaxed_ordering(pdev);
6564 pci_set_master(pdev); 6574 pci_set_master(pdev);
6565 pci_save_state(pdev); 6575 pci_save_state(pdev);
6566 6576
6567 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); 6577 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
6568 if (!adapter) { 6578 if (!adapter) {
6569 err = -ENOMEM; 6579 err = -ENOMEM;
6570 goto out_unmap_bar0; 6580 goto out_unmap_bar0;
6571 } 6581 }
6572 6582
6573 adapter->workq = create_singlethread_workqueue("cxgb4"); 6583 adapter->workq = create_singlethread_workqueue("cxgb4");
6574 if (!adapter->workq) { 6584 if (!adapter->workq) {
6575 err = -ENOMEM; 6585 err = -ENOMEM;
6576 goto out_free_adapter; 6586 goto out_free_adapter;
6577 } 6587 }
6578 6588
6579 /* PCI device has been enabled */ 6589 /* PCI device has been enabled */
6580 adapter->flags |= DEV_ENABLED; 6590 adapter->flags |= DEV_ENABLED;
6581 6591
6582 adapter->regs = regs; 6592 adapter->regs = regs;
6583 adapter->pdev = pdev; 6593 adapter->pdev = pdev;
6584 adapter->pdev_dev = &pdev->dev; 6594 adapter->pdev_dev = &pdev->dev;
6585 adapter->mbox = func; 6595 adapter->mbox = func;
6586 adapter->fn = func; 6596 adapter->fn = func;
6587 adapter->msg_enable = dflt_msg_enable; 6597 adapter->msg_enable = dflt_msg_enable;
6588 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map)); 6598 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
6589 6599
6590 spin_lock_init(&adapter->stats_lock); 6600 spin_lock_init(&adapter->stats_lock);
6591 spin_lock_init(&adapter->tid_release_lock); 6601 spin_lock_init(&adapter->tid_release_lock);
6592 6602
6593 INIT_WORK(&adapter->tid_release_task, process_tid_release_list); 6603 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
6594 INIT_WORK(&adapter->db_full_task, process_db_full); 6604 INIT_WORK(&adapter->db_full_task, process_db_full);
6595 INIT_WORK(&adapter->db_drop_task, process_db_drop); 6605 INIT_WORK(&adapter->db_drop_task, process_db_drop);
6596 6606
6597 err = t4_prep_adapter(adapter); 6607 err = t4_prep_adapter(adapter);
6598 if (err) 6608 if (err)
6599 goto out_free_adapter; 6609 goto out_free_adapter;
6600 6610
6601 6611
6602 if (!is_t4(adapter->params.chip)) { 6612 if (!is_t4(adapter->params.chip)) {
6603 s_qpp = QUEUESPERPAGEPF1 * adapter->fn; 6613 s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
6604 qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter, 6614 qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter,
6605 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp); 6615 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
6606 num_seg = PAGE_SIZE / SEGMENT_SIZE; 6616 num_seg = PAGE_SIZE / SEGMENT_SIZE;
6607 6617
6608 /* Each segment size is 128B. Write coalescing is enabled only 6618 /* Each segment size is 128B. Write coalescing is enabled only
6609 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the 6619 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
6610 * queue is less no of segments that can be accommodated in 6620 * queue is less no of segments that can be accommodated in
6611 * a page size. 6621 * a page size.
6612 */ 6622 */
6613 if (qpp > num_seg) { 6623 if (qpp > num_seg) {
6614 dev_err(&pdev->dev, 6624 dev_err(&pdev->dev,
6615 "Incorrect number of egress queues per page\n"); 6625 "Incorrect number of egress queues per page\n");
6616 err = -EINVAL; 6626 err = -EINVAL;
6617 goto out_free_adapter; 6627 goto out_free_adapter;
6618 } 6628 }
6619 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2), 6629 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
6620 pci_resource_len(pdev, 2)); 6630 pci_resource_len(pdev, 2));
6621 if (!adapter->bar2) { 6631 if (!adapter->bar2) {
6622 dev_err(&pdev->dev, "cannot map device bar2 region\n"); 6632 dev_err(&pdev->dev, "cannot map device bar2 region\n");
6623 err = -ENOMEM; 6633 err = -ENOMEM;
6624 goto out_free_adapter; 6634 goto out_free_adapter;
6625 } 6635 }
6626 } 6636 }
6627 6637
6628 setup_memwin(adapter); 6638 setup_memwin(adapter);
6629 err = adap_init0(adapter); 6639 err = adap_init0(adapter);
6630 setup_memwin_rdma(adapter); 6640 setup_memwin_rdma(adapter);
6631 if (err) 6641 if (err)
6632 goto out_unmap_bar; 6642 goto out_unmap_bar;
6633 6643
6634 for_each_port(adapter, i) { 6644 for_each_port(adapter, i) {
6635 struct net_device *netdev; 6645 struct net_device *netdev;
6636 6646
6637 netdev = alloc_etherdev_mq(sizeof(struct port_info), 6647 netdev = alloc_etherdev_mq(sizeof(struct port_info),
6638 MAX_ETH_QSETS); 6648 MAX_ETH_QSETS);
6639 if (!netdev) { 6649 if (!netdev) {
6640 err = -ENOMEM; 6650 err = -ENOMEM;
6641 goto out_free_dev; 6651 goto out_free_dev;
6642 } 6652 }
6643 6653
6644 SET_NETDEV_DEV(netdev, &pdev->dev); 6654 SET_NETDEV_DEV(netdev, &pdev->dev);
6645 6655
6646 adapter->port[i] = netdev; 6656 adapter->port[i] = netdev;
6647 pi = netdev_priv(netdev); 6657 pi = netdev_priv(netdev);
6648 pi->adapter = adapter; 6658 pi->adapter = adapter;
6649 pi->xact_addr_filt = -1; 6659 pi->xact_addr_filt = -1;
6650 pi->port_id = i; 6660 pi->port_id = i;
6651 netdev->irq = pdev->irq; 6661 netdev->irq = pdev->irq;
6652 6662
6653 netdev->hw_features = NETIF_F_SG | TSO_FLAGS | 6663 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
6654 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 6664 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
6655 NETIF_F_RXCSUM | NETIF_F_RXHASH | 6665 NETIF_F_RXCSUM | NETIF_F_RXHASH |
6656 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; 6666 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
6657 if (highdma) 6667 if (highdma)
6658 netdev->hw_features |= NETIF_F_HIGHDMA; 6668 netdev->hw_features |= NETIF_F_HIGHDMA;
6659 netdev->features |= netdev->hw_features; 6669 netdev->features |= netdev->hw_features;
6660 netdev->vlan_features = netdev->features & VLAN_FEAT; 6670 netdev->vlan_features = netdev->features & VLAN_FEAT;
6661 6671
6662 netdev->priv_flags |= IFF_UNICAST_FLT; 6672 netdev->priv_flags |= IFF_UNICAST_FLT;
6663 6673
6664 netdev->netdev_ops = &cxgb4_netdev_ops; 6674 netdev->netdev_ops = &cxgb4_netdev_ops;
6665 #ifdef CONFIG_CHELSIO_T4_DCB 6675 #ifdef CONFIG_CHELSIO_T4_DCB
6666 netdev->dcbnl_ops = &cxgb4_dcb_ops; 6676 netdev->dcbnl_ops = &cxgb4_dcb_ops;
6667 cxgb4_dcb_state_init(netdev); 6677 cxgb4_dcb_state_init(netdev);
6668 #endif 6678 #endif
6669 netdev->ethtool_ops = &cxgb_ethtool_ops; 6679 netdev->ethtool_ops = &cxgb_ethtool_ops;
6670 } 6680 }
6671 6681
6672 pci_set_drvdata(pdev, adapter); 6682 pci_set_drvdata(pdev, adapter);
6673 6683
6674 if (adapter->flags & FW_OK) { 6684 if (adapter->flags & FW_OK) {
6675 err = t4_port_init(adapter, func, func, 0); 6685 err = t4_port_init(adapter, func, func, 0);
6676 if (err) 6686 if (err)
6677 goto out_free_dev; 6687 goto out_free_dev;
6678 } 6688 }
6679 6689
6680 /* 6690 /*
6681 * Configure queues and allocate tables now, they can be needed as 6691 * Configure queues and allocate tables now, they can be needed as
6682 * soon as the first register_netdev completes. 6692 * soon as the first register_netdev completes.
6683 */ 6693 */
6684 cfg_queues(adapter); 6694 cfg_queues(adapter);
6685 6695
6686 adapter->l2t = t4_init_l2t(); 6696 adapter->l2t = t4_init_l2t();
6687 if (!adapter->l2t) { 6697 if (!adapter->l2t) {
6688 /* We tolerate a lack of L2T, giving up some functionality */ 6698 /* We tolerate a lack of L2T, giving up some functionality */
6689 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n"); 6699 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
6690 adapter->params.offload = 0; 6700 adapter->params.offload = 0;
6691 } 6701 }
6692 6702
6693 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) { 6703 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
6694 dev_warn(&pdev->dev, "could not allocate TID table, " 6704 dev_warn(&pdev->dev, "could not allocate TID table, "
6695 "continuing\n"); 6705 "continuing\n");
6696 adapter->params.offload = 0; 6706 adapter->params.offload = 0;
6697 } 6707 }
6698 6708
6699 /* See what interrupts we'll be using */ 6709 /* See what interrupts we'll be using */
6700 if (msi > 1 && enable_msix(adapter) == 0) 6710 if (msi > 1 && enable_msix(adapter) == 0)
6701 adapter->flags |= USING_MSIX; 6711 adapter->flags |= USING_MSIX;
6702 else if (msi > 0 && pci_enable_msi(pdev) == 0) 6712 else if (msi > 0 && pci_enable_msi(pdev) == 0)
6703 adapter->flags |= USING_MSI; 6713 adapter->flags |= USING_MSI;
6704 6714
6705 err = init_rss(adapter); 6715 err = init_rss(adapter);
6706 if (err) 6716 if (err)
6707 goto out_free_dev; 6717 goto out_free_dev;
6708 6718
6709 /* 6719 /*
6710 * The card is now ready to go. If any errors occur during device 6720 * The card is now ready to go. If any errors occur during device
6711 * registration we do not fail the whole card but rather proceed only 6721 * registration we do not fail the whole card but rather proceed only
6712 * with the ports we manage to register successfully. However we must 6722 * with the ports we manage to register successfully. However we must
6713 * register at least one net device. 6723 * register at least one net device.
6714 */ 6724 */
6715 for_each_port(adapter, i) { 6725 for_each_port(adapter, i) {
6716 pi = adap2pinfo(adapter, i); 6726 pi = adap2pinfo(adapter, i);
6717 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets); 6727 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
6718 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets); 6728 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
6719 6729
6720 err = register_netdev(adapter->port[i]); 6730 err = register_netdev(adapter->port[i]);
6721 if (err) 6731 if (err)
6722 break; 6732 break;
6723 adapter->chan_map[pi->tx_chan] = i; 6733 adapter->chan_map[pi->tx_chan] = i;
6724 print_port_info(adapter->port[i]); 6734 print_port_info(adapter->port[i]);
6725 } 6735 }
6726 if (i == 0) { 6736 if (i == 0) {
6727 dev_err(&pdev->dev, "could not register any net devices\n"); 6737 dev_err(&pdev->dev, "could not register any net devices\n");
6728 goto out_free_dev; 6738 goto out_free_dev;
6729 } 6739 }
6730 if (err) { 6740 if (err) {
6731 dev_warn(&pdev->dev, "only %d net devices registered\n", i); 6741 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
6732 err = 0; 6742 err = 0;
6733 } 6743 }
6734 6744
6735 if (cxgb4_debugfs_root) { 6745 if (cxgb4_debugfs_root) {
6736 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev), 6746 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
6737 cxgb4_debugfs_root); 6747 cxgb4_debugfs_root);
6738 setup_debugfs(adapter); 6748 setup_debugfs(adapter);
6739 } 6749 }
6740 6750
6741 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ 6751 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
6742 pdev->needs_freset = 1; 6752 pdev->needs_freset = 1;
6743 6753
6744 if (is_offload(adapter)) 6754 if (is_offload(adapter))
6745 attach_ulds(adapter); 6755 attach_ulds(adapter);
6746 6756
6747 sriov: 6757 sriov:
6748 #ifdef CONFIG_PCI_IOV 6758 #ifdef CONFIG_PCI_IOV
6749 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0) 6759 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
6750 if (pci_enable_sriov(pdev, num_vf[func]) == 0) 6760 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
6751 dev_info(&pdev->dev, 6761 dev_info(&pdev->dev,
6752 "instantiated %u virtual functions\n", 6762 "instantiated %u virtual functions\n",
6753 num_vf[func]); 6763 num_vf[func]);
6754 #endif 6764 #endif
6755 return 0; 6765 return 0;
6756 6766
6757 out_free_dev: 6767 out_free_dev:
6758 free_some_resources(adapter); 6768 free_some_resources(adapter);
6759 out_unmap_bar: 6769 out_unmap_bar:
6760 if (!is_t4(adapter->params.chip)) 6770 if (!is_t4(adapter->params.chip))
6761 iounmap(adapter->bar2); 6771 iounmap(adapter->bar2);
6762 out_free_adapter: 6772 out_free_adapter:
6763 if (adapter->workq) 6773 if (adapter->workq)
6764 destroy_workqueue(adapter->workq); 6774 destroy_workqueue(adapter->workq);
6765 6775
6766 kfree(adapter); 6776 kfree(adapter);
6767 out_unmap_bar0: 6777 out_unmap_bar0:
6768 iounmap(regs); 6778 iounmap(regs);
6769 out_disable_device: 6779 out_disable_device:
6770 pci_disable_pcie_error_reporting(pdev); 6780 pci_disable_pcie_error_reporting(pdev);
6771 pci_disable_device(pdev); 6781 pci_disable_device(pdev);
6772 out_release_regions: 6782 out_release_regions:
6773 pci_release_regions(pdev); 6783 pci_release_regions(pdev);
6774 return err; 6784 return err;
6775 } 6785 }
6776 6786
6777 static void remove_one(struct pci_dev *pdev) 6787 static void remove_one(struct pci_dev *pdev)
6778 { 6788 {
6779 struct adapter *adapter = pci_get_drvdata(pdev); 6789 struct adapter *adapter = pci_get_drvdata(pdev);
6780 6790
6781 #ifdef CONFIG_PCI_IOV 6791 #ifdef CONFIG_PCI_IOV
6782 pci_disable_sriov(pdev); 6792 pci_disable_sriov(pdev);
6783 6793
6784 #endif 6794 #endif
6785 6795
6786 if (adapter) { 6796 if (adapter) {
6787 int i; 6797 int i;
6788 6798
6789 /* Tear down per-adapter Work Queue first since it can contain 6799 /* Tear down per-adapter Work Queue first since it can contain
6790 * references to our adapter data structure. 6800 * references to our adapter data structure.
6791 */ 6801 */
6792 destroy_workqueue(adapter->workq); 6802 destroy_workqueue(adapter->workq);
6793 6803
6794 if (is_offload(adapter)) 6804 if (is_offload(adapter))
6795 detach_ulds(adapter); 6805 detach_ulds(adapter);
6796 6806
6797 for_each_port(adapter, i) 6807 for_each_port(adapter, i)
6798 if (adapter->port[i]->reg_state == NETREG_REGISTERED) 6808 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
6799 unregister_netdev(adapter->port[i]); 6809 unregister_netdev(adapter->port[i]);
6800 6810
6801 debugfs_remove_recursive(adapter->debugfs_root); 6811 debugfs_remove_recursive(adapter->debugfs_root);
6802 6812
6803 /* If we allocated filters, free up state associated with any 6813 /* If we allocated filters, free up state associated with any
6804 * valid filters ... 6814 * valid filters ...
6805 */ 6815 */
6806 if (adapter->tids.ftid_tab) { 6816 if (adapter->tids.ftid_tab) {
6807 struct filter_entry *f = &adapter->tids.ftid_tab[0]; 6817 struct filter_entry *f = &adapter->tids.ftid_tab[0];
6808 for (i = 0; i < (adapter->tids.nftids + 6818 for (i = 0; i < (adapter->tids.nftids +
6809 adapter->tids.nsftids); i++, f++) 6819 adapter->tids.nsftids); i++, f++)
6810 if (f->valid) 6820 if (f->valid)
6811 clear_filter(adapter, f); 6821 clear_filter(adapter, f);
6812 } 6822 }
6813 6823
6814 if (adapter->flags & FULL_INIT_DONE) 6824 if (adapter->flags & FULL_INIT_DONE)
6815 cxgb_down(adapter); 6825 cxgb_down(adapter);
6816 6826
6817 free_some_resources(adapter); 6827 free_some_resources(adapter);
6818 iounmap(adapter->regs); 6828 iounmap(adapter->regs);
6819 if (!is_t4(adapter->params.chip)) 6829 if (!is_t4(adapter->params.chip))
6820 iounmap(adapter->bar2); 6830 iounmap(adapter->bar2);
6821 pci_disable_pcie_error_reporting(pdev); 6831 pci_disable_pcie_error_reporting(pdev);
6822 if ((adapter->flags & DEV_ENABLED)) { 6832 if ((adapter->flags & DEV_ENABLED)) {
6823 pci_disable_device(pdev); 6833 pci_disable_device(pdev);
6824 adapter->flags &= ~DEV_ENABLED; 6834 adapter->flags &= ~DEV_ENABLED;
6825 } 6835 }
6826 pci_release_regions(pdev); 6836 pci_release_regions(pdev);
6827 synchronize_rcu(); 6837 synchronize_rcu();
6828 kfree(adapter); 6838 kfree(adapter);
6829 } else 6839 } else
6830 pci_release_regions(pdev); 6840 pci_release_regions(pdev);
6831 } 6841 }
6832 6842
6833 static struct pci_driver cxgb4_driver = { 6843 static struct pci_driver cxgb4_driver = {
6834 .name = KBUILD_MODNAME, 6844 .name = KBUILD_MODNAME,
6835 .id_table = cxgb4_pci_tbl, 6845 .id_table = cxgb4_pci_tbl,
6836 .probe = init_one, 6846 .probe = init_one,
6837 .remove = remove_one, 6847 .remove = remove_one,
6838 .shutdown = remove_one, 6848 .shutdown = remove_one,
6839 .err_handler = &cxgb4_eeh, 6849 .err_handler = &cxgb4_eeh,
6840 }; 6850 };
6841 6851
6842 static int __init cxgb4_init_module(void) 6852 static int __init cxgb4_init_module(void)
6843 { 6853 {
6844 int ret; 6854 int ret;
6845 6855
6846 /* Debugfs support is optional, just warn if this fails */ 6856 /* Debugfs support is optional, just warn if this fails */
6847 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); 6857 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
6848 if (!cxgb4_debugfs_root) 6858 if (!cxgb4_debugfs_root)
6849 pr_warn("could not create debugfs entry, continuing\n"); 6859 pr_warn("could not create debugfs entry, continuing\n");
6850 6860
6851 ret = pci_register_driver(&cxgb4_driver); 6861 ret = pci_register_driver(&cxgb4_driver);
6852 if (ret < 0) 6862 if (ret < 0)
6853 debugfs_remove(cxgb4_debugfs_root); 6863 debugfs_remove(cxgb4_debugfs_root);
6854 6864
6855 register_inet6addr_notifier(&cxgb4_inet6addr_notifier); 6865 register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6856 6866
6857 return ret; 6867 return ret;
6858 } 6868 }
6859 6869
6860 static void __exit cxgb4_cleanup_module(void) 6870 static void __exit cxgb4_cleanup_module(void)
6861 { 6871 {
6862 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier); 6872 unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
6863 pci_unregister_driver(&cxgb4_driver); 6873 pci_unregister_driver(&cxgb4_driver);
6864 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */ 6874 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
6865 } 6875 }
6866 6876
6867 module_init(cxgb4_init_module); 6877 module_init(cxgb4_init_module);
6868 module_exit(cxgb4_cleanup_module); 6878 module_exit(cxgb4_cleanup_module);
6869 6879
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
1 /* 1 /*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux. 2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 * 3 *
4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved. 4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
5 * 5 *
6 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU 7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file 8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the 9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below: 10 * OpenIB.org BSD license below:
11 * 11 *
12 * Redistribution and use in source and binary forms, with or 12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following 13 * without modification, are permitted provided that the following
14 * conditions are met: 14 * conditions are met:
15 * 15 *
16 * - Redistributions of source code must retain the above 16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following 17 * copyright notice, this list of conditions and the following
18 * disclaimer. 18 * disclaimer.
19 * 19 *
20 * - Redistributions in binary form must reproduce the above 20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following 21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials 22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution. 23 * provided with the distribution.
24 * 24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE. 32 * SOFTWARE.
33 */ 33 */
34 34
35 #include <linux/delay.h> 35 #include <linux/delay.h>
36 #include "cxgb4.h" 36 #include "cxgb4.h"
37 #include "t4_regs.h" 37 #include "t4_regs.h"
38 #include "t4fw_api.h" 38 #include "t4fw_api.h"
39 39
40 static int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
41 const u8 *fw_data, unsigned int size, int force);
42 /** 40 /**
43 * t4_wait_op_done_val - wait until an operation is completed 41 * t4_wait_op_done_val - wait until an operation is completed
44 * @adapter: the adapter performing the operation 42 * @adapter: the adapter performing the operation
45 * @reg: the register to check for completion 43 * @reg: the register to check for completion
46 * @mask: a single-bit field within @reg that indicates completion 44 * @mask: a single-bit field within @reg that indicates completion
47 * @polarity: the value of the field when the operation is completed 45 * @polarity: the value of the field when the operation is completed
48 * @attempts: number of check iterations 46 * @attempts: number of check iterations
49 * @delay: delay in usecs between iterations 47 * @delay: delay in usecs between iterations
50 * @valp: where to store the value of the register at completion time 48 * @valp: where to store the value of the register at completion time
51 * 49 *
52 * Wait until an operation is completed by checking a bit in a register 50 * Wait until an operation is completed by checking a bit in a register
53 * up to @attempts times. If @valp is not NULL the value of the register 51 * up to @attempts times. If @valp is not NULL the value of the register
54 * at the time it indicated completion is stored there. Returns 0 if the 52 * at the time it indicated completion is stored there. Returns 0 if the
55 * operation completes and -EAGAIN otherwise. 53 * operation completes and -EAGAIN otherwise.
56 */ 54 */
57 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, 55 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
58 int polarity, int attempts, int delay, u32 *valp) 56 int polarity, int attempts, int delay, u32 *valp)
59 { 57 {
60 while (1) { 58 while (1) {
61 u32 val = t4_read_reg(adapter, reg); 59 u32 val = t4_read_reg(adapter, reg);
62 60
63 if (!!(val & mask) == polarity) { 61 if (!!(val & mask) == polarity) {
64 if (valp) 62 if (valp)
65 *valp = val; 63 *valp = val;
66 return 0; 64 return 0;
67 } 65 }
68 if (--attempts == 0) 66 if (--attempts == 0)
69 return -EAGAIN; 67 return -EAGAIN;
70 if (delay) 68 if (delay)
71 udelay(delay); 69 udelay(delay);
72 } 70 }
73 } 71 }
74 72
75 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask, 73 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
76 int polarity, int attempts, int delay) 74 int polarity, int attempts, int delay)
77 { 75 {
78 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts, 76 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
79 delay, NULL); 77 delay, NULL);
80 } 78 }
81 79
82 /** 80 /**
83 * t4_set_reg_field - set a register field to a value 81 * t4_set_reg_field - set a register field to a value
84 * @adapter: the adapter to program 82 * @adapter: the adapter to program
85 * @addr: the register address 83 * @addr: the register address
86 * @mask: specifies the portion of the register to modify 84 * @mask: specifies the portion of the register to modify
87 * @val: the new value for the register field 85 * @val: the new value for the register field
88 * 86 *
89 * Sets a register field specified by the supplied mask to the 87 * Sets a register field specified by the supplied mask to the
90 * given value. 88 * given value.
91 */ 89 */
92 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask, 90 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
93 u32 val) 91 u32 val)
94 { 92 {
95 u32 v = t4_read_reg(adapter, addr) & ~mask; 93 u32 v = t4_read_reg(adapter, addr) & ~mask;
96 94
97 t4_write_reg(adapter, addr, v | val); 95 t4_write_reg(adapter, addr, v | val);
98 (void) t4_read_reg(adapter, addr); /* flush */ 96 (void) t4_read_reg(adapter, addr); /* flush */
99 } 97 }
100 98
101 /** 99 /**
102 * t4_read_indirect - read indirectly addressed registers 100 * t4_read_indirect - read indirectly addressed registers
103 * @adap: the adapter 101 * @adap: the adapter
104 * @addr_reg: register holding the indirect address 102 * @addr_reg: register holding the indirect address
105 * @data_reg: register holding the value of the indirect register 103 * @data_reg: register holding the value of the indirect register
106 * @vals: where the read register values are stored 104 * @vals: where the read register values are stored
107 * @nregs: how many indirect registers to read 105 * @nregs: how many indirect registers to read
108 * @start_idx: index of first indirect register to read 106 * @start_idx: index of first indirect register to read
109 * 107 *
110 * Reads registers that are accessed indirectly through an address/data 108 * Reads registers that are accessed indirectly through an address/data
111 * register pair. 109 * register pair.
112 */ 110 */
113 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, 111 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
114 unsigned int data_reg, u32 *vals, 112 unsigned int data_reg, u32 *vals,
115 unsigned int nregs, unsigned int start_idx) 113 unsigned int nregs, unsigned int start_idx)
116 { 114 {
117 while (nregs--) { 115 while (nregs--) {
118 t4_write_reg(adap, addr_reg, start_idx); 116 t4_write_reg(adap, addr_reg, start_idx);
119 *vals++ = t4_read_reg(adap, data_reg); 117 *vals++ = t4_read_reg(adap, data_reg);
120 start_idx++; 118 start_idx++;
121 } 119 }
122 } 120 }
123 121
124 /** 122 /**
125 * t4_write_indirect - write indirectly addressed registers 123 * t4_write_indirect - write indirectly addressed registers
126 * @adap: the adapter 124 * @adap: the adapter
127 * @addr_reg: register holding the indirect addresses 125 * @addr_reg: register holding the indirect addresses
128 * @data_reg: register holding the value for the indirect registers 126 * @data_reg: register holding the value for the indirect registers
129 * @vals: values to write 127 * @vals: values to write
130 * @nregs: how many indirect registers to write 128 * @nregs: how many indirect registers to write
131 * @start_idx: address of first indirect register to write 129 * @start_idx: address of first indirect register to write
132 * 130 *
133 * Writes a sequential block of registers that are accessed indirectly 131 * Writes a sequential block of registers that are accessed indirectly
134 * through an address/data register pair. 132 * through an address/data register pair.
135 */ 133 */
136 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, 134 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
137 unsigned int data_reg, const u32 *vals, 135 unsigned int data_reg, const u32 *vals,
138 unsigned int nregs, unsigned int start_idx) 136 unsigned int nregs, unsigned int start_idx)
139 { 137 {
140 while (nregs--) { 138 while (nregs--) {
141 t4_write_reg(adap, addr_reg, start_idx++); 139 t4_write_reg(adap, addr_reg, start_idx++);
142 t4_write_reg(adap, data_reg, *vals++); 140 t4_write_reg(adap, data_reg, *vals++);
143 } 141 }
144 } 142 }
145 143
146 /* 144 /*
147 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor 145 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
148 * mechanism. This guarantees that we get the real value even if we're 146 * mechanism. This guarantees that we get the real value even if we're
149 * operating within a Virtual Machine and the Hypervisor is trapping our 147 * operating within a Virtual Machine and the Hypervisor is trapping our
150 * Configuration Space accesses. 148 * Configuration Space accesses.
151 */ 149 */
152 void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val) 150 void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
153 { 151 {
154 u32 req = ENABLE | FUNCTION(adap->fn) | reg; 152 u32 req = ENABLE | FUNCTION(adap->fn) | reg;
155 153
156 if (is_t4(adap->params.chip)) 154 if (is_t4(adap->params.chip))
157 req |= F_LOCALCFG; 155 req |= F_LOCALCFG;
158 156
159 t4_write_reg(adap, PCIE_CFG_SPACE_REQ, req); 157 t4_write_reg(adap, PCIE_CFG_SPACE_REQ, req);
160 *val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA); 158 *val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA);
161 159
162 /* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a 160 /* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
163 * Configuration Space read. (None of the other fields matter when 161 * Configuration Space read. (None of the other fields matter when
164 * ENABLE is 0 so a simple register write is easier than a 162 * ENABLE is 0 so a simple register write is easier than a
165 * read-modify-write via t4_set_reg_field().) 163 * read-modify-write via t4_set_reg_field().)
166 */ 164 */
167 t4_write_reg(adap, PCIE_CFG_SPACE_REQ, 0); 165 t4_write_reg(adap, PCIE_CFG_SPACE_REQ, 0);
168 } 166 }
169 167
170 /* 168 /*
171 * t4_report_fw_error - report firmware error 169 * t4_report_fw_error - report firmware error
172 * @adap: the adapter 170 * @adap: the adapter
173 * 171 *
174 * The adapter firmware can indicate error conditions to the host. 172 * The adapter firmware can indicate error conditions to the host.
175 * If the firmware has indicated an error, print out the reason for 173 * If the firmware has indicated an error, print out the reason for
176 * the firmware error. 174 * the firmware error.
177 */ 175 */
178 static void t4_report_fw_error(struct adapter *adap) 176 static void t4_report_fw_error(struct adapter *adap)
179 { 177 {
180 static const char *const reason[] = { 178 static const char *const reason[] = {
181 "Crash", /* PCIE_FW_EVAL_CRASH */ 179 "Crash", /* PCIE_FW_EVAL_CRASH */
182 "During Device Preparation", /* PCIE_FW_EVAL_PREP */ 180 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
183 "During Device Configuration", /* PCIE_FW_EVAL_CONF */ 181 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
184 "During Device Initialization", /* PCIE_FW_EVAL_INIT */ 182 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
185 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */ 183 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
186 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */ 184 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
187 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */ 185 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
188 "Reserved", /* reserved */ 186 "Reserved", /* reserved */
189 }; 187 };
190 u32 pcie_fw; 188 u32 pcie_fw;
191 189
192 pcie_fw = t4_read_reg(adap, MA_PCIE_FW); 190 pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
193 if (pcie_fw & FW_PCIE_FW_ERR) 191 if (pcie_fw & FW_PCIE_FW_ERR)
194 dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n", 192 dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
195 reason[FW_PCIE_FW_EVAL_GET(pcie_fw)]); 193 reason[FW_PCIE_FW_EVAL_GET(pcie_fw)]);
196 } 194 }
197 195
198 /* 196 /*
199 * Get the reply to a mailbox command and store it in @rpl in big-endian order. 197 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
200 */ 198 */
201 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, 199 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
202 u32 mbox_addr) 200 u32 mbox_addr)
203 { 201 {
204 for ( ; nflit; nflit--, mbox_addr += 8) 202 for ( ; nflit; nflit--, mbox_addr += 8)
205 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr)); 203 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
206 } 204 }
207 205
208 /* 206 /*
209 * Handle a FW assertion reported in a mailbox. 207 * Handle a FW assertion reported in a mailbox.
210 */ 208 */
211 static void fw_asrt(struct adapter *adap, u32 mbox_addr) 209 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
212 { 210 {
213 struct fw_debug_cmd asrt; 211 struct fw_debug_cmd asrt;
214 212
215 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr); 213 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
216 dev_alert(adap->pdev_dev, 214 dev_alert(adap->pdev_dev,
217 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n", 215 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
218 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line), 216 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
219 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y)); 217 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
220 } 218 }
221 219
222 static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg) 220 static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
223 { 221 {
224 dev_err(adap->pdev_dev, 222 dev_err(adap->pdev_dev,
225 "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox, 223 "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
226 (unsigned long long)t4_read_reg64(adap, data_reg), 224 (unsigned long long)t4_read_reg64(adap, data_reg),
227 (unsigned long long)t4_read_reg64(adap, data_reg + 8), 225 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
228 (unsigned long long)t4_read_reg64(adap, data_reg + 16), 226 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
229 (unsigned long long)t4_read_reg64(adap, data_reg + 24), 227 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
230 (unsigned long long)t4_read_reg64(adap, data_reg + 32), 228 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
231 (unsigned long long)t4_read_reg64(adap, data_reg + 40), 229 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
232 (unsigned long long)t4_read_reg64(adap, data_reg + 48), 230 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
233 (unsigned long long)t4_read_reg64(adap, data_reg + 56)); 231 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
234 } 232 }
235 233
236 /** 234 /**
237 * t4_wr_mbox_meat - send a command to FW through the given mailbox 235 * t4_wr_mbox_meat - send a command to FW through the given mailbox
238 * @adap: the adapter 236 * @adap: the adapter
239 * @mbox: index of the mailbox to use 237 * @mbox: index of the mailbox to use
240 * @cmd: the command to write 238 * @cmd: the command to write
241 * @size: command length in bytes 239 * @size: command length in bytes
242 * @rpl: where to optionally store the reply 240 * @rpl: where to optionally store the reply
243 * @sleep_ok: if true we may sleep while awaiting command completion 241 * @sleep_ok: if true we may sleep while awaiting command completion
244 * 242 *
245 * Sends the given command to FW through the selected mailbox and waits 243 * Sends the given command to FW through the selected mailbox and waits
246 * for the FW to execute the command. If @rpl is not %NULL it is used to 244 * for the FW to execute the command. If @rpl is not %NULL it is used to
247 * store the FW's reply to the command. The command and its optional 245 * store the FW's reply to the command. The command and its optional
248 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms 246 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
249 * to respond. @sleep_ok determines whether we may sleep while awaiting 247 * to respond. @sleep_ok determines whether we may sleep while awaiting
250 * the response. If sleeping is allowed we use progressive backoff 248 * the response. If sleeping is allowed we use progressive backoff
251 * otherwise we spin. 249 * otherwise we spin.
252 * 250 *
253 * The return value is 0 on success or a negative errno on failure. A 251 * The return value is 0 on success or a negative errno on failure. A
254 * failure can happen either because we are not able to execute the 252 * failure can happen either because we are not able to execute the
255 * command or FW executes it but signals an error. In the latter case 253 * command or FW executes it but signals an error. In the latter case
256 * the return value is the error code indicated by FW (negated). 254 * the return value is the error code indicated by FW (negated).
257 */ 255 */
258 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, 256 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
259 void *rpl, bool sleep_ok) 257 void *rpl, bool sleep_ok)
260 { 258 {
261 static const int delay[] = { 259 static const int delay[] = {
262 1, 1, 3, 5, 10, 10, 20, 50, 100, 200 260 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
263 }; 261 };
264 262
265 u32 v; 263 u32 v;
266 u64 res; 264 u64 res;
267 int i, ms, delay_idx; 265 int i, ms, delay_idx;
268 const __be64 *p = cmd; 266 const __be64 *p = cmd;
269 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA); 267 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA);
270 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL); 268 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL);
271 269
272 if ((size & 15) || size > MBOX_LEN) 270 if ((size & 15) || size > MBOX_LEN)
273 return -EINVAL; 271 return -EINVAL;
274 272
275 /* 273 /*
276 * If the device is off-line, as in EEH, commands will time out. 274 * If the device is off-line, as in EEH, commands will time out.
277 * Fail them early so we don't waste time waiting. 275 * Fail them early so we don't waste time waiting.
278 */ 276 */
279 if (adap->pdev->error_state != pci_channel_io_normal) 277 if (adap->pdev->error_state != pci_channel_io_normal)
280 return -EIO; 278 return -EIO;
281 279
282 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg)); 280 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
283 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++) 281 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
284 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg)); 282 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
285 283
286 if (v != MBOX_OWNER_DRV) 284 if (v != MBOX_OWNER_DRV)
287 return v ? -EBUSY : -ETIMEDOUT; 285 return v ? -EBUSY : -ETIMEDOUT;
288 286
289 for (i = 0; i < size; i += 8) 287 for (i = 0; i < size; i += 8)
290 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++)); 288 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
291 289
292 t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW)); 290 t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
293 t4_read_reg(adap, ctl_reg); /* flush write */ 291 t4_read_reg(adap, ctl_reg); /* flush write */
294 292
295 delay_idx = 0; 293 delay_idx = 0;
296 ms = delay[0]; 294 ms = delay[0];
297 295
298 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) { 296 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
299 if (sleep_ok) { 297 if (sleep_ok) {
300 ms = delay[delay_idx]; /* last element may repeat */ 298 ms = delay[delay_idx]; /* last element may repeat */
301 if (delay_idx < ARRAY_SIZE(delay) - 1) 299 if (delay_idx < ARRAY_SIZE(delay) - 1)
302 delay_idx++; 300 delay_idx++;
303 msleep(ms); 301 msleep(ms);
304 } else 302 } else
305 mdelay(ms); 303 mdelay(ms);
306 304
307 v = t4_read_reg(adap, ctl_reg); 305 v = t4_read_reg(adap, ctl_reg);
308 if (MBOWNER_GET(v) == MBOX_OWNER_DRV) { 306 if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
309 if (!(v & MBMSGVALID)) { 307 if (!(v & MBMSGVALID)) {
310 t4_write_reg(adap, ctl_reg, 0); 308 t4_write_reg(adap, ctl_reg, 0);
311 continue; 309 continue;
312 } 310 }
313 311
314 res = t4_read_reg64(adap, data_reg); 312 res = t4_read_reg64(adap, data_reg);
315 if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) { 313 if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) {
316 fw_asrt(adap, data_reg); 314 fw_asrt(adap, data_reg);
317 res = FW_CMD_RETVAL(EIO); 315 res = FW_CMD_RETVAL(EIO);
318 } else if (rpl) 316 } else if (rpl)
319 get_mbox_rpl(adap, rpl, size / 8, data_reg); 317 get_mbox_rpl(adap, rpl, size / 8, data_reg);
320 318
321 if (FW_CMD_RETVAL_GET((int)res)) 319 if (FW_CMD_RETVAL_GET((int)res))
322 dump_mbox(adap, mbox, data_reg); 320 dump_mbox(adap, mbox, data_reg);
323 t4_write_reg(adap, ctl_reg, 0); 321 t4_write_reg(adap, ctl_reg, 0);
324 return -FW_CMD_RETVAL_GET((int)res); 322 return -FW_CMD_RETVAL_GET((int)res);
325 } 323 }
326 } 324 }
327 325
328 dump_mbox(adap, mbox, data_reg); 326 dump_mbox(adap, mbox, data_reg);
329 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n", 327 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
330 *(const u8 *)cmd, mbox); 328 *(const u8 *)cmd, mbox);
331 t4_report_fw_error(adap); 329 t4_report_fw_error(adap);
332 return -ETIMEDOUT; 330 return -ETIMEDOUT;
333 } 331 }
334 332
335 /** 333 /**
336 * t4_mc_read - read from MC through backdoor accesses 334 * t4_mc_read - read from MC through backdoor accesses
337 * @adap: the adapter 335 * @adap: the adapter
338 * @addr: address of first byte requested 336 * @addr: address of first byte requested
339 * @idx: which MC to access 337 * @idx: which MC to access
340 * @data: 64 bytes of data containing the requested address 338 * @data: 64 bytes of data containing the requested address
341 * @ecc: where to store the corresponding 64-bit ECC word 339 * @ecc: where to store the corresponding 64-bit ECC word
342 * 340 *
343 * Read 64 bytes of data from MC starting at a 64-byte-aligned address 341 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
344 * that covers the requested address @addr. If @parity is not %NULL it 342 * that covers the requested address @addr. If @parity is not %NULL it
345 * is assigned the 64-bit ECC word for the read data. 343 * is assigned the 64-bit ECC word for the read data.
346 */ 344 */
347 int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) 345 int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
348 { 346 {
349 int i; 347 int i;
350 u32 mc_bist_cmd, mc_bist_cmd_addr, mc_bist_cmd_len; 348 u32 mc_bist_cmd, mc_bist_cmd_addr, mc_bist_cmd_len;
351 u32 mc_bist_status_rdata, mc_bist_data_pattern; 349 u32 mc_bist_status_rdata, mc_bist_data_pattern;
352 350
353 if (is_t4(adap->params.chip)) { 351 if (is_t4(adap->params.chip)) {
354 mc_bist_cmd = MC_BIST_CMD; 352 mc_bist_cmd = MC_BIST_CMD;
355 mc_bist_cmd_addr = MC_BIST_CMD_ADDR; 353 mc_bist_cmd_addr = MC_BIST_CMD_ADDR;
356 mc_bist_cmd_len = MC_BIST_CMD_LEN; 354 mc_bist_cmd_len = MC_BIST_CMD_LEN;
357 mc_bist_status_rdata = MC_BIST_STATUS_RDATA; 355 mc_bist_status_rdata = MC_BIST_STATUS_RDATA;
358 mc_bist_data_pattern = MC_BIST_DATA_PATTERN; 356 mc_bist_data_pattern = MC_BIST_DATA_PATTERN;
359 } else { 357 } else {
360 mc_bist_cmd = MC_REG(MC_P_BIST_CMD, idx); 358 mc_bist_cmd = MC_REG(MC_P_BIST_CMD, idx);
361 mc_bist_cmd_addr = MC_REG(MC_P_BIST_CMD_ADDR, idx); 359 mc_bist_cmd_addr = MC_REG(MC_P_BIST_CMD_ADDR, idx);
362 mc_bist_cmd_len = MC_REG(MC_P_BIST_CMD_LEN, idx); 360 mc_bist_cmd_len = MC_REG(MC_P_BIST_CMD_LEN, idx);
363 mc_bist_status_rdata = MC_REG(MC_P_BIST_STATUS_RDATA, idx); 361 mc_bist_status_rdata = MC_REG(MC_P_BIST_STATUS_RDATA, idx);
364 mc_bist_data_pattern = MC_REG(MC_P_BIST_DATA_PATTERN, idx); 362 mc_bist_data_pattern = MC_REG(MC_P_BIST_DATA_PATTERN, idx);
365 } 363 }
366 364
367 if (t4_read_reg(adap, mc_bist_cmd) & START_BIST) 365 if (t4_read_reg(adap, mc_bist_cmd) & START_BIST)
368 return -EBUSY; 366 return -EBUSY;
369 t4_write_reg(adap, mc_bist_cmd_addr, addr & ~0x3fU); 367 t4_write_reg(adap, mc_bist_cmd_addr, addr & ~0x3fU);
370 t4_write_reg(adap, mc_bist_cmd_len, 64); 368 t4_write_reg(adap, mc_bist_cmd_len, 64);
371 t4_write_reg(adap, mc_bist_data_pattern, 0xc); 369 t4_write_reg(adap, mc_bist_data_pattern, 0xc);
372 t4_write_reg(adap, mc_bist_cmd, BIST_OPCODE(1) | START_BIST | 370 t4_write_reg(adap, mc_bist_cmd, BIST_OPCODE(1) | START_BIST |
373 BIST_CMD_GAP(1)); 371 BIST_CMD_GAP(1));
374 i = t4_wait_op_done(adap, mc_bist_cmd, START_BIST, 0, 10, 1); 372 i = t4_wait_op_done(adap, mc_bist_cmd, START_BIST, 0, 10, 1);
375 if (i) 373 if (i)
376 return i; 374 return i;
377 375
378 #define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata, i) 376 #define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata, i)
379 377
380 for (i = 15; i >= 0; i--) 378 for (i = 15; i >= 0; i--)
381 *data++ = htonl(t4_read_reg(adap, MC_DATA(i))); 379 *data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
382 if (ecc) 380 if (ecc)
383 *ecc = t4_read_reg64(adap, MC_DATA(16)); 381 *ecc = t4_read_reg64(adap, MC_DATA(16));
384 #undef MC_DATA 382 #undef MC_DATA
385 return 0; 383 return 0;
386 } 384 }
387 385
388 /** 386 /**
389 * t4_edc_read - read from EDC through backdoor accesses 387 * t4_edc_read - read from EDC through backdoor accesses
390 * @adap: the adapter 388 * @adap: the adapter
391 * @idx: which EDC to access 389 * @idx: which EDC to access
392 * @addr: address of first byte requested 390 * @addr: address of first byte requested
393 * @data: 64 bytes of data containing the requested address 391 * @data: 64 bytes of data containing the requested address
394 * @ecc: where to store the corresponding 64-bit ECC word 392 * @ecc: where to store the corresponding 64-bit ECC word
395 * 393 *
396 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address 394 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
397 * that covers the requested address @addr. If @parity is not %NULL it 395 * that covers the requested address @addr. If @parity is not %NULL it
398 * is assigned the 64-bit ECC word for the read data. 396 * is assigned the 64-bit ECC word for the read data.
399 */ 397 */
400 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) 398 int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
401 { 399 {
402 int i; 400 int i;
403 u32 edc_bist_cmd, edc_bist_cmd_addr, edc_bist_cmd_len; 401 u32 edc_bist_cmd, edc_bist_cmd_addr, edc_bist_cmd_len;
404 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata; 402 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata;
405 403
406 if (is_t4(adap->params.chip)) { 404 if (is_t4(adap->params.chip)) {
407 edc_bist_cmd = EDC_REG(EDC_BIST_CMD, idx); 405 edc_bist_cmd = EDC_REG(EDC_BIST_CMD, idx);
408 edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR, idx); 406 edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR, idx);
409 edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN, idx); 407 edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN, idx);
410 edc_bist_cmd_data_pattern = EDC_REG(EDC_BIST_DATA_PATTERN, 408 edc_bist_cmd_data_pattern = EDC_REG(EDC_BIST_DATA_PATTERN,
411 idx); 409 idx);
412 edc_bist_status_rdata = EDC_REG(EDC_BIST_STATUS_RDATA, 410 edc_bist_status_rdata = EDC_REG(EDC_BIST_STATUS_RDATA,
413 idx); 411 idx);
414 } else { 412 } else {
415 edc_bist_cmd = EDC_REG_T5(EDC_H_BIST_CMD, idx); 413 edc_bist_cmd = EDC_REG_T5(EDC_H_BIST_CMD, idx);
416 edc_bist_cmd_addr = EDC_REG_T5(EDC_H_BIST_CMD_ADDR, idx); 414 edc_bist_cmd_addr = EDC_REG_T5(EDC_H_BIST_CMD_ADDR, idx);
417 edc_bist_cmd_len = EDC_REG_T5(EDC_H_BIST_CMD_LEN, idx); 415 edc_bist_cmd_len = EDC_REG_T5(EDC_H_BIST_CMD_LEN, idx);
418 edc_bist_cmd_data_pattern = 416 edc_bist_cmd_data_pattern =
419 EDC_REG_T5(EDC_H_BIST_DATA_PATTERN, idx); 417 EDC_REG_T5(EDC_H_BIST_DATA_PATTERN, idx);
420 edc_bist_status_rdata = 418 edc_bist_status_rdata =
421 EDC_REG_T5(EDC_H_BIST_STATUS_RDATA, idx); 419 EDC_REG_T5(EDC_H_BIST_STATUS_RDATA, idx);
422 } 420 }
423 421
424 if (t4_read_reg(adap, edc_bist_cmd) & START_BIST) 422 if (t4_read_reg(adap, edc_bist_cmd) & START_BIST)
425 return -EBUSY; 423 return -EBUSY;
426 t4_write_reg(adap, edc_bist_cmd_addr, addr & ~0x3fU); 424 t4_write_reg(adap, edc_bist_cmd_addr, addr & ~0x3fU);
427 t4_write_reg(adap, edc_bist_cmd_len, 64); 425 t4_write_reg(adap, edc_bist_cmd_len, 64);
428 t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc); 426 t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
429 t4_write_reg(adap, edc_bist_cmd, 427 t4_write_reg(adap, edc_bist_cmd,
430 BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST); 428 BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
431 i = t4_wait_op_done(adap, edc_bist_cmd, START_BIST, 0, 10, 1); 429 i = t4_wait_op_done(adap, edc_bist_cmd, START_BIST, 0, 10, 1);
432 if (i) 430 if (i)
433 return i; 431 return i;
434 432
435 #define EDC_DATA(i) (EDC_BIST_STATUS_REG(edc_bist_status_rdata, i)) 433 #define EDC_DATA(i) (EDC_BIST_STATUS_REG(edc_bist_status_rdata, i))
436 434
437 for (i = 15; i >= 0; i--) 435 for (i = 15; i >= 0; i--)
438 *data++ = htonl(t4_read_reg(adap, EDC_DATA(i))); 436 *data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
439 if (ecc) 437 if (ecc)
440 *ecc = t4_read_reg64(adap, EDC_DATA(16)); 438 *ecc = t4_read_reg64(adap, EDC_DATA(16));
441 #undef EDC_DATA 439 #undef EDC_DATA
442 return 0; 440 return 0;
443 } 441 }
444 442
445 /** 443 /**
446 * t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window 444 * t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
447 * @adap: the adapter 445 * @adap: the adapter
448 * @win: PCI-E Memory Window to use 446 * @win: PCI-E Memory Window to use
449 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC 447 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
450 * @addr: address within indicated memory type 448 * @addr: address within indicated memory type
451 * @len: amount of memory to transfer 449 * @len: amount of memory to transfer
452 * @buf: host memory buffer 450 * @buf: host memory buffer
453 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0) 451 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
454 * 452 *
455 * Reads/writes an [almost] arbitrary memory region in the firmware: the 453 * Reads/writes an [almost] arbitrary memory region in the firmware: the
456 * firmware memory address and host buffer must be aligned on 32-bit 454 * firmware memory address and host buffer must be aligned on 32-bit
457 * boudaries; the length may be arbitrary. The memory is transferred as 455 * boudaries; the length may be arbitrary. The memory is transferred as
458 * a raw byte sequence from/to the firmware's memory. If this memory 456 * a raw byte sequence from/to the firmware's memory. If this memory
459 * contains data structures which contain multi-byte integers, it's the 457 * contains data structures which contain multi-byte integers, it's the
460 * caller's responsibility to perform appropriate byte order conversions. 458 * caller's responsibility to perform appropriate byte order conversions.
461 */ 459 */
462 int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, 460 int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
463 u32 len, __be32 *buf, int dir) 461 u32 len, __be32 *buf, int dir)
464 { 462 {
465 u32 pos, offset, resid, memoffset; 463 u32 pos, offset, resid, memoffset;
466 u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base; 464 u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
467 465
468 /* Argument sanity checks ... 466 /* Argument sanity checks ...
469 */ 467 */
470 if (addr & 0x3) 468 if (addr & 0x3)
471 return -EINVAL; 469 return -EINVAL;
472 470
473 /* It's convenient to be able to handle lengths which aren't a 471 /* It's convenient to be able to handle lengths which aren't a
474 * multiple of 32-bits because we often end up transferring files to 472 * multiple of 32-bits because we often end up transferring files to
475 * the firmware. So we'll handle that by normalizing the length here 473 * the firmware. So we'll handle that by normalizing the length here
476 * and then handling any residual transfer at the end. 474 * and then handling any residual transfer at the end.
477 */ 475 */
478 resid = len & 0x3; 476 resid = len & 0x3;
479 len -= resid; 477 len -= resid;
480 478
481 /* Offset into the region of memory which is being accessed 479 /* Offset into the region of memory which is being accessed
482 * MEM_EDC0 = 0 480 * MEM_EDC0 = 0
483 * MEM_EDC1 = 1 481 * MEM_EDC1 = 1
484 * MEM_MC = 2 -- T4 482 * MEM_MC = 2 -- T4
485 * MEM_MC0 = 2 -- For T5 483 * MEM_MC0 = 2 -- For T5
486 * MEM_MC1 = 3 -- For T5 484 * MEM_MC1 = 3 -- For T5
487 */ 485 */
488 edc_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM0_BAR)); 486 edc_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM0_BAR));
489 if (mtype != MEM_MC1) 487 if (mtype != MEM_MC1)
490 memoffset = (mtype * (edc_size * 1024 * 1024)); 488 memoffset = (mtype * (edc_size * 1024 * 1024));
491 else { 489 else {
492 mc_size = EXT_MEM_SIZE_GET(t4_read_reg(adap, 490 mc_size = EXT_MEM_SIZE_GET(t4_read_reg(adap,
493 MA_EXT_MEMORY_BAR)); 491 MA_EXT_MEMORY_BAR));
494 memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024; 492 memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
495 } 493 }
496 494
497 /* Determine the PCIE_MEM_ACCESS_OFFSET */ 495 /* Determine the PCIE_MEM_ACCESS_OFFSET */
498 addr = addr + memoffset; 496 addr = addr + memoffset;
499 497
500 /* Each PCI-E Memory Window is programmed with a window size -- or 498 /* Each PCI-E Memory Window is programmed with a window size -- or
501 * "aperture" -- which controls the granularity of its mapping onto 499 * "aperture" -- which controls the granularity of its mapping onto
502 * adapter memory. We need to grab that aperture in order to know 500 * adapter memory. We need to grab that aperture in order to know
503 * how to use the specified window. The window is also programmed 501 * how to use the specified window. The window is also programmed
504 * with the base address of the Memory Window in BAR0's address 502 * with the base address of the Memory Window in BAR0's address
505 * space. For T4 this is an absolute PCI-E Bus Address. For T5 503 * space. For T4 this is an absolute PCI-E Bus Address. For T5
506 * the address is relative to BAR0. 504 * the address is relative to BAR0.
507 */ 505 */
508 mem_reg = t4_read_reg(adap, 506 mem_reg = t4_read_reg(adap,
509 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 507 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN,
510 win)); 508 win));
511 mem_aperture = 1 << (GET_WINDOW(mem_reg) + 10); 509 mem_aperture = 1 << (GET_WINDOW(mem_reg) + 10);
512 mem_base = GET_PCIEOFST(mem_reg) << 10; 510 mem_base = GET_PCIEOFST(mem_reg) << 10;
513 if (is_t4(adap->params.chip)) 511 if (is_t4(adap->params.chip))
514 mem_base -= adap->t4_bar0; 512 mem_base -= adap->t4_bar0;
515 win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->fn); 513 win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->fn);
516 514
517 /* Calculate our initial PCI-E Memory Window Position and Offset into 515 /* Calculate our initial PCI-E Memory Window Position and Offset into
518 * that Window. 516 * that Window.
519 */ 517 */
520 pos = addr & ~(mem_aperture-1); 518 pos = addr & ~(mem_aperture-1);
521 offset = addr - pos; 519 offset = addr - pos;
522 520
523 /* Set up initial PCI-E Memory Window to cover the start of our 521 /* Set up initial PCI-E Memory Window to cover the start of our
524 * transfer. (Read it back to ensure that changes propagate before we 522 * transfer. (Read it back to ensure that changes propagate before we
525 * attempt to use the new value.) 523 * attempt to use the new value.)
526 */ 524 */
527 t4_write_reg(adap, 525 t4_write_reg(adap,
528 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win), 526 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win),
529 pos | win_pf); 527 pos | win_pf);
530 t4_read_reg(adap, 528 t4_read_reg(adap,
531 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win)); 529 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
532 530
533 /* Transfer data to/from the adapter as long as there's an integral 531 /* Transfer data to/from the adapter as long as there's an integral
534 * number of 32-bit transfers to complete. 532 * number of 32-bit transfers to complete.
535 */ 533 */
536 while (len > 0) { 534 while (len > 0) {
537 if (dir == T4_MEMORY_READ) 535 if (dir == T4_MEMORY_READ)
538 *buf++ = (__force __be32) t4_read_reg(adap, 536 *buf++ = (__force __be32) t4_read_reg(adap,
539 mem_base + offset); 537 mem_base + offset);
540 else 538 else
541 t4_write_reg(adap, mem_base + offset, 539 t4_write_reg(adap, mem_base + offset,
542 (__force u32) *buf++); 540 (__force u32) *buf++);
543 offset += sizeof(__be32); 541 offset += sizeof(__be32);
544 len -= sizeof(__be32); 542 len -= sizeof(__be32);
545 543
546 /* If we've reached the end of our current window aperture, 544 /* If we've reached the end of our current window aperture,
547 * move the PCI-E Memory Window on to the next. Note that 545 * move the PCI-E Memory Window on to the next. Note that
548 * doing this here after "len" may be 0 allows us to set up 546 * doing this here after "len" may be 0 allows us to set up
549 * the PCI-E Memory Window for a possible final residual 547 * the PCI-E Memory Window for a possible final residual
550 * transfer below ... 548 * transfer below ...
551 */ 549 */
552 if (offset == mem_aperture) { 550 if (offset == mem_aperture) {
553 pos += mem_aperture; 551 pos += mem_aperture;
554 offset = 0; 552 offset = 0;
555 t4_write_reg(adap, 553 t4_write_reg(adap,
556 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 554 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET,
557 win), pos | win_pf); 555 win), pos | win_pf);
558 t4_read_reg(adap, 556 t4_read_reg(adap,
559 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 557 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET,
560 win)); 558 win));
561 } 559 }
562 } 560 }
563 561
564 /* If the original transfer had a length which wasn't a multiple of 562 /* If the original transfer had a length which wasn't a multiple of
565 * 32-bits, now's where we need to finish off the transfer of the 563 * 32-bits, now's where we need to finish off the transfer of the
566 * residual amount. The PCI-E Memory Window has already been moved 564 * residual amount. The PCI-E Memory Window has already been moved
567 * above (if necessary) to cover this final transfer. 565 * above (if necessary) to cover this final transfer.
568 */ 566 */
569 if (resid) { 567 if (resid) {
570 union { 568 union {
571 __be32 word; 569 __be32 word;
572 char byte[4]; 570 char byte[4];
573 } last; 571 } last;
574 unsigned char *bp; 572 unsigned char *bp;
575 int i; 573 int i;
576 574
577 if (dir == T4_MEMORY_READ) { 575 if (dir == T4_MEMORY_READ) {
578 last.word = (__force __be32) t4_read_reg(adap, 576 last.word = (__force __be32) t4_read_reg(adap,
579 mem_base + offset); 577 mem_base + offset);
580 for (bp = (unsigned char *)buf, i = resid; i < 4; i++) 578 for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
581 bp[i] = last.byte[i]; 579 bp[i] = last.byte[i];
582 } else { 580 } else {
583 last.word = *buf; 581 last.word = *buf;
584 for (i = resid; i < 4; i++) 582 for (i = resid; i < 4; i++)
585 last.byte[i] = 0; 583 last.byte[i] = 0;
586 t4_write_reg(adap, mem_base + offset, 584 t4_write_reg(adap, mem_base + offset,
587 (__force u32) last.word); 585 (__force u32) last.word);
588 } 586 }
589 } 587 }
590 588
591 return 0; 589 return 0;
592 } 590 }
593 591
594 #define EEPROM_STAT_ADDR 0x7bfc 592 #define EEPROM_STAT_ADDR 0x7bfc
595 #define VPD_BASE 0x400 593 #define VPD_BASE 0x400
596 #define VPD_BASE_OLD 0 594 #define VPD_BASE_OLD 0
597 #define VPD_LEN 1024 595 #define VPD_LEN 1024
598 #define CHELSIO_VPD_UNIQUE_ID 0x82 596 #define CHELSIO_VPD_UNIQUE_ID 0x82
599 597
600 /** 598 /**
601 * t4_seeprom_wp - enable/disable EEPROM write protection 599 * t4_seeprom_wp - enable/disable EEPROM write protection
602 * @adapter: the adapter 600 * @adapter: the adapter
603 * @enable: whether to enable or disable write protection 601 * @enable: whether to enable or disable write protection
604 * 602 *
605 * Enables or disables write protection on the serial EEPROM. 603 * Enables or disables write protection on the serial EEPROM.
606 */ 604 */
607 int t4_seeprom_wp(struct adapter *adapter, bool enable) 605 int t4_seeprom_wp(struct adapter *adapter, bool enable)
608 { 606 {
609 unsigned int v = enable ? 0xc : 0; 607 unsigned int v = enable ? 0xc : 0;
610 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v); 608 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
611 return ret < 0 ? ret : 0; 609 return ret < 0 ? ret : 0;
612 } 610 }
613 611
614 /** 612 /**
615 * get_vpd_params - read VPD parameters from VPD EEPROM 613 * get_vpd_params - read VPD parameters from VPD EEPROM
616 * @adapter: adapter to read 614 * @adapter: adapter to read
617 * @p: where to store the parameters 615 * @p: where to store the parameters
618 * 616 *
619 * Reads card parameters stored in VPD EEPROM. 617 * Reads card parameters stored in VPD EEPROM.
620 */ 618 */
621 int get_vpd_params(struct adapter *adapter, struct vpd_params *p) 619 int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
622 { 620 {
623 u32 cclk_param, cclk_val; 621 u32 cclk_param, cclk_val;
624 int i, ret, addr; 622 int i, ret, addr;
625 int ec, sn, pn; 623 int ec, sn, pn;
626 u8 *vpd, csum; 624 u8 *vpd, csum;
627 unsigned int vpdr_len, kw_offset, id_len; 625 unsigned int vpdr_len, kw_offset, id_len;
628 626
629 vpd = vmalloc(VPD_LEN); 627 vpd = vmalloc(VPD_LEN);
630 if (!vpd) 628 if (!vpd)
631 return -ENOMEM; 629 return -ENOMEM;
632 630
633 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd); 631 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
634 if (ret < 0) 632 if (ret < 0)
635 goto out; 633 goto out;
636 634
637 /* The VPD shall have a unique identifier specified by the PCI SIG. 635 /* The VPD shall have a unique identifier specified by the PCI SIG.
638 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD 636 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
639 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software 637 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
640 * is expected to automatically put this entry at the 638 * is expected to automatically put this entry at the
641 * beginning of the VPD. 639 * beginning of the VPD.
642 */ 640 */
643 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD; 641 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
644 642
645 ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd); 643 ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
646 if (ret < 0) 644 if (ret < 0)
647 goto out; 645 goto out;
648 646
649 if (vpd[0] != PCI_VPD_LRDT_ID_STRING) { 647 if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
650 dev_err(adapter->pdev_dev, "missing VPD ID string\n"); 648 dev_err(adapter->pdev_dev, "missing VPD ID string\n");
651 ret = -EINVAL; 649 ret = -EINVAL;
652 goto out; 650 goto out;
653 } 651 }
654 652
655 id_len = pci_vpd_lrdt_size(vpd); 653 id_len = pci_vpd_lrdt_size(vpd);
656 if (id_len > ID_LEN) 654 if (id_len > ID_LEN)
657 id_len = ID_LEN; 655 id_len = ID_LEN;
658 656
659 i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA); 657 i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
660 if (i < 0) { 658 if (i < 0) {
661 dev_err(adapter->pdev_dev, "missing VPD-R section\n"); 659 dev_err(adapter->pdev_dev, "missing VPD-R section\n");
662 ret = -EINVAL; 660 ret = -EINVAL;
663 goto out; 661 goto out;
664 } 662 }
665 663
666 vpdr_len = pci_vpd_lrdt_size(&vpd[i]); 664 vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
667 kw_offset = i + PCI_VPD_LRDT_TAG_SIZE; 665 kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
668 if (vpdr_len + kw_offset > VPD_LEN) { 666 if (vpdr_len + kw_offset > VPD_LEN) {
669 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len); 667 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
670 ret = -EINVAL; 668 ret = -EINVAL;
671 goto out; 669 goto out;
672 } 670 }
673 671
674 #define FIND_VPD_KW(var, name) do { \ 672 #define FIND_VPD_KW(var, name) do { \
675 var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \ 673 var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
676 if (var < 0) { \ 674 if (var < 0) { \
677 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \ 675 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
678 ret = -EINVAL; \ 676 ret = -EINVAL; \
679 goto out; \ 677 goto out; \
680 } \ 678 } \
681 var += PCI_VPD_INFO_FLD_HDR_SIZE; \ 679 var += PCI_VPD_INFO_FLD_HDR_SIZE; \
682 } while (0) 680 } while (0)
683 681
684 FIND_VPD_KW(i, "RV"); 682 FIND_VPD_KW(i, "RV");
685 for (csum = 0; i >= 0; i--) 683 for (csum = 0; i >= 0; i--)
686 csum += vpd[i]; 684 csum += vpd[i];
687 685
688 if (csum) { 686 if (csum) {
689 dev_err(adapter->pdev_dev, 687 dev_err(adapter->pdev_dev,
690 "corrupted VPD EEPROM, actual csum %u\n", csum); 688 "corrupted VPD EEPROM, actual csum %u\n", csum);
691 ret = -EINVAL; 689 ret = -EINVAL;
692 goto out; 690 goto out;
693 } 691 }
694 692
695 FIND_VPD_KW(ec, "EC"); 693 FIND_VPD_KW(ec, "EC");
696 FIND_VPD_KW(sn, "SN"); 694 FIND_VPD_KW(sn, "SN");
697 FIND_VPD_KW(pn, "PN"); 695 FIND_VPD_KW(pn, "PN");
698 #undef FIND_VPD_KW 696 #undef FIND_VPD_KW
699 697
700 memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len); 698 memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
701 strim(p->id); 699 strim(p->id);
702 memcpy(p->ec, vpd + ec, EC_LEN); 700 memcpy(p->ec, vpd + ec, EC_LEN);
703 strim(p->ec); 701 strim(p->ec);
704 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE); 702 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
705 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); 703 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
706 strim(p->sn); 704 strim(p->sn);
707 i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE); 705 i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
708 memcpy(p->pn, vpd + pn, min(i, PN_LEN)); 706 memcpy(p->pn, vpd + pn, min(i, PN_LEN));
709 strim(p->pn); 707 strim(p->pn);
710 708
711 /* 709 /*
712 * Ask firmware for the Core Clock since it knows how to translate the 710 * Ask firmware for the Core Clock since it knows how to translate the
713 * Reference Clock ('V2') VPD field into a Core Clock value ... 711 * Reference Clock ('V2') VPD field into a Core Clock value ...
714 */ 712 */
715 cclk_param = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 713 cclk_param = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
716 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK)); 714 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CCLK));
717 ret = t4_query_params(adapter, adapter->mbox, 0, 0, 715 ret = t4_query_params(adapter, adapter->mbox, 0, 0,
718 1, &cclk_param, &cclk_val); 716 1, &cclk_param, &cclk_val);
719 717
720 out: 718 out:
721 vfree(vpd); 719 vfree(vpd);
722 if (ret) 720 if (ret)
723 return ret; 721 return ret;
724 p->cclk = cclk_val; 722 p->cclk = cclk_val;
725 723
726 return 0; 724 return 0;
727 } 725 }
728 726
729 /* serial flash and firmware constants */ 727 /* serial flash and firmware constants */
730 enum { 728 enum {
731 SF_ATTEMPTS = 10, /* max retries for SF operations */ 729 SF_ATTEMPTS = 10, /* max retries for SF operations */
732 730
733 /* flash command opcodes */ 731 /* flash command opcodes */
734 SF_PROG_PAGE = 2, /* program page */ 732 SF_PROG_PAGE = 2, /* program page */
735 SF_WR_DISABLE = 4, /* disable writes */ 733 SF_WR_DISABLE = 4, /* disable writes */
736 SF_RD_STATUS = 5, /* read status register */ 734 SF_RD_STATUS = 5, /* read status register */
737 SF_WR_ENABLE = 6, /* enable writes */ 735 SF_WR_ENABLE = 6, /* enable writes */
738 SF_RD_DATA_FAST = 0xb, /* read flash */ 736 SF_RD_DATA_FAST = 0xb, /* read flash */
739 SF_RD_ID = 0x9f, /* read ID */ 737 SF_RD_ID = 0x9f, /* read ID */
740 SF_ERASE_SECTOR = 0xd8, /* erase sector */ 738 SF_ERASE_SECTOR = 0xd8, /* erase sector */
741 739
742 FW_MAX_SIZE = 16 * SF_SEC_SIZE, 740 FW_MAX_SIZE = 16 * SF_SEC_SIZE,
743 }; 741 };
744 742
745 /** 743 /**
746 * sf1_read - read data from the serial flash 744 * sf1_read - read data from the serial flash
747 * @adapter: the adapter 745 * @adapter: the adapter
748 * @byte_cnt: number of bytes to read 746 * @byte_cnt: number of bytes to read
749 * @cont: whether another operation will be chained 747 * @cont: whether another operation will be chained
750 * @lock: whether to lock SF for PL access only 748 * @lock: whether to lock SF for PL access only
751 * @valp: where to store the read data 749 * @valp: where to store the read data
752 * 750 *
753 * Reads up to 4 bytes of data from the serial flash. The location of 751 * Reads up to 4 bytes of data from the serial flash. The location of
754 * the read needs to be specified prior to calling this by issuing the 752 * the read needs to be specified prior to calling this by issuing the
755 * appropriate commands to the serial flash. 753 * appropriate commands to the serial flash.
756 */ 754 */
757 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont, 755 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
758 int lock, u32 *valp) 756 int lock, u32 *valp)
759 { 757 {
760 int ret; 758 int ret;
761 759
762 if (!byte_cnt || byte_cnt > 4) 760 if (!byte_cnt || byte_cnt > 4)
763 return -EINVAL; 761 return -EINVAL;
764 if (t4_read_reg(adapter, SF_OP) & SF_BUSY) 762 if (t4_read_reg(adapter, SF_OP) & SF_BUSY)
765 return -EBUSY; 763 return -EBUSY;
766 cont = cont ? SF_CONT : 0; 764 cont = cont ? SF_CONT : 0;
767 lock = lock ? SF_LOCK : 0; 765 lock = lock ? SF_LOCK : 0;
768 t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1)); 766 t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1));
769 ret = t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5); 767 ret = t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
770 if (!ret) 768 if (!ret)
771 *valp = t4_read_reg(adapter, SF_DATA); 769 *valp = t4_read_reg(adapter, SF_DATA);
772 return ret; 770 return ret;
773 } 771 }
774 772
775 /** 773 /**
776 * sf1_write - write data to the serial flash 774 * sf1_write - write data to the serial flash
777 * @adapter: the adapter 775 * @adapter: the adapter
778 * @byte_cnt: number of bytes to write 776 * @byte_cnt: number of bytes to write
779 * @cont: whether another operation will be chained 777 * @cont: whether another operation will be chained
780 * @lock: whether to lock SF for PL access only 778 * @lock: whether to lock SF for PL access only
781 * @val: value to write 779 * @val: value to write
782 * 780 *
783 * Writes up to 4 bytes of data to the serial flash. The location of 781 * Writes up to 4 bytes of data to the serial flash. The location of
784 * the write needs to be specified prior to calling this by issuing the 782 * the write needs to be specified prior to calling this by issuing the
785 * appropriate commands to the serial flash. 783 * appropriate commands to the serial flash.
786 */ 784 */
787 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont, 785 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
788 int lock, u32 val) 786 int lock, u32 val)
789 { 787 {
790 if (!byte_cnt || byte_cnt > 4) 788 if (!byte_cnt || byte_cnt > 4)
791 return -EINVAL; 789 return -EINVAL;
792 if (t4_read_reg(adapter, SF_OP) & SF_BUSY) 790 if (t4_read_reg(adapter, SF_OP) & SF_BUSY)
793 return -EBUSY; 791 return -EBUSY;
794 cont = cont ? SF_CONT : 0; 792 cont = cont ? SF_CONT : 0;
795 lock = lock ? SF_LOCK : 0; 793 lock = lock ? SF_LOCK : 0;
796 t4_write_reg(adapter, SF_DATA, val); 794 t4_write_reg(adapter, SF_DATA, val);
797 t4_write_reg(adapter, SF_OP, lock | 795 t4_write_reg(adapter, SF_OP, lock |
798 cont | BYTECNT(byte_cnt - 1) | OP_WR); 796 cont | BYTECNT(byte_cnt - 1) | OP_WR);
799 return t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5); 797 return t4_wait_op_done(adapter, SF_OP, SF_BUSY, 0, SF_ATTEMPTS, 5);
800 } 798 }
801 799
802 /** 800 /**
803 * flash_wait_op - wait for a flash operation to complete 801 * flash_wait_op - wait for a flash operation to complete
804 * @adapter: the adapter 802 * @adapter: the adapter
805 * @attempts: max number of polls of the status register 803 * @attempts: max number of polls of the status register
806 * @delay: delay between polls in ms 804 * @delay: delay between polls in ms
807 * 805 *
808 * Wait for a flash operation to complete by polling the status register. 806 * Wait for a flash operation to complete by polling the status register.
809 */ 807 */
810 static int flash_wait_op(struct adapter *adapter, int attempts, int delay) 808 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
811 { 809 {
812 int ret; 810 int ret;
813 u32 status; 811 u32 status;
814 812
815 while (1) { 813 while (1) {
816 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 || 814 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
817 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0) 815 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
818 return ret; 816 return ret;
819 if (!(status & 1)) 817 if (!(status & 1))
820 return 0; 818 return 0;
821 if (--attempts == 0) 819 if (--attempts == 0)
822 return -EAGAIN; 820 return -EAGAIN;
823 if (delay) 821 if (delay)
824 msleep(delay); 822 msleep(delay);
825 } 823 }
826 } 824 }
827 825
828 /** 826 /**
829 * t4_read_flash - read words from serial flash 827 * t4_read_flash - read words from serial flash
830 * @adapter: the adapter 828 * @adapter: the adapter
831 * @addr: the start address for the read 829 * @addr: the start address for the read
832 * @nwords: how many 32-bit words to read 830 * @nwords: how many 32-bit words to read
833 * @data: where to store the read data 831 * @data: where to store the read data
834 * @byte_oriented: whether to store data as bytes or as words 832 * @byte_oriented: whether to store data as bytes or as words
835 * 833 *
836 * Read the specified number of 32-bit words from the serial flash. 834 * Read the specified number of 32-bit words from the serial flash.
837 * If @byte_oriented is set the read data is stored as a byte array 835 * If @byte_oriented is set the read data is stored as a byte array
838 * (i.e., big-endian), otherwise as 32-bit words in the platform's 836 * (i.e., big-endian), otherwise as 32-bit words in the platform's
839 * natural endianess. 837 * natural endianess.
840 */ 838 */
841 static int t4_read_flash(struct adapter *adapter, unsigned int addr, 839 static int t4_read_flash(struct adapter *adapter, unsigned int addr,
842 unsigned int nwords, u32 *data, int byte_oriented) 840 unsigned int nwords, u32 *data, int byte_oriented)
843 { 841 {
844 int ret; 842 int ret;
845 843
846 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3)) 844 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
847 return -EINVAL; 845 return -EINVAL;
848 846
849 addr = swab32(addr) | SF_RD_DATA_FAST; 847 addr = swab32(addr) | SF_RD_DATA_FAST;
850 848
851 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 || 849 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
852 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0) 850 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
853 return ret; 851 return ret;
854 852
855 for ( ; nwords; nwords--, data++) { 853 for ( ; nwords; nwords--, data++) {
856 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data); 854 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
857 if (nwords == 1) 855 if (nwords == 1)
858 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ 856 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
859 if (ret) 857 if (ret)
860 return ret; 858 return ret;
861 if (byte_oriented) 859 if (byte_oriented)
862 *data = (__force __u32) (htonl(*data)); 860 *data = (__force __u32) (htonl(*data));
863 } 861 }
864 return 0; 862 return 0;
865 } 863 }
866 864
867 /** 865 /**
868 * t4_write_flash - write up to a page of data to the serial flash 866 * t4_write_flash - write up to a page of data to the serial flash
869 * @adapter: the adapter 867 * @adapter: the adapter
870 * @addr: the start address to write 868 * @addr: the start address to write
871 * @n: length of data to write in bytes 869 * @n: length of data to write in bytes
872 * @data: the data to write 870 * @data: the data to write
873 * 871 *
874 * Writes up to a page of data (256 bytes) to the serial flash starting 872 * Writes up to a page of data (256 bytes) to the serial flash starting
875 * at the given address. All the data must be written to the same page. 873 * at the given address. All the data must be written to the same page.
876 */ 874 */
877 static int t4_write_flash(struct adapter *adapter, unsigned int addr, 875 static int t4_write_flash(struct adapter *adapter, unsigned int addr,
878 unsigned int n, const u8 *data) 876 unsigned int n, const u8 *data)
879 { 877 {
880 int ret; 878 int ret;
881 u32 buf[64]; 879 u32 buf[64];
882 unsigned int i, c, left, val, offset = addr & 0xff; 880 unsigned int i, c, left, val, offset = addr & 0xff;
883 881
884 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE) 882 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
885 return -EINVAL; 883 return -EINVAL;
886 884
887 val = swab32(addr) | SF_PROG_PAGE; 885 val = swab32(addr) | SF_PROG_PAGE;
888 886
889 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || 887 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
890 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0) 888 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
891 goto unlock; 889 goto unlock;
892 890
893 for (left = n; left; left -= c) { 891 for (left = n; left; left -= c) {
894 c = min(left, 4U); 892 c = min(left, 4U);
895 for (val = 0, i = 0; i < c; ++i) 893 for (val = 0, i = 0; i < c; ++i)
896 val = (val << 8) + *data++; 894 val = (val << 8) + *data++;
897 895
898 ret = sf1_write(adapter, c, c != left, 1, val); 896 ret = sf1_write(adapter, c, c != left, 1, val);
899 if (ret) 897 if (ret)
900 goto unlock; 898 goto unlock;
901 } 899 }
902 ret = flash_wait_op(adapter, 8, 1); 900 ret = flash_wait_op(adapter, 8, 1);
903 if (ret) 901 if (ret)
904 goto unlock; 902 goto unlock;
905 903
906 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ 904 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
907 905
908 /* Read the page to verify the write succeeded */ 906 /* Read the page to verify the write succeeded */
909 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1); 907 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
910 if (ret) 908 if (ret)
911 return ret; 909 return ret;
912 910
913 if (memcmp(data - n, (u8 *)buf + offset, n)) { 911 if (memcmp(data - n, (u8 *)buf + offset, n)) {
914 dev_err(adapter->pdev_dev, 912 dev_err(adapter->pdev_dev,
915 "failed to correctly write the flash page at %#x\n", 913 "failed to correctly write the flash page at %#x\n",
916 addr); 914 addr);
917 return -EIO; 915 return -EIO;
918 } 916 }
919 return 0; 917 return 0;
920 918
921 unlock: 919 unlock:
922 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ 920 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
923 return ret; 921 return ret;
924 } 922 }
925 923
926 /** 924 /**
927 * t4_get_fw_version - read the firmware version 925 * t4_get_fw_version - read the firmware version
928 * @adapter: the adapter 926 * @adapter: the adapter
929 * @vers: where to place the version 927 * @vers: where to place the version
930 * 928 *
931 * Reads the FW version from flash. 929 * Reads the FW version from flash.
932 */ 930 */
933 int t4_get_fw_version(struct adapter *adapter, u32 *vers) 931 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
934 { 932 {
935 return t4_read_flash(adapter, FLASH_FW_START + 933 return t4_read_flash(adapter, FLASH_FW_START +
936 offsetof(struct fw_hdr, fw_ver), 1, 934 offsetof(struct fw_hdr, fw_ver), 1,
937 vers, 0); 935 vers, 0);
938 } 936 }
939 937
940 /** 938 /**
941 * t4_get_tp_version - read the TP microcode version 939 * t4_get_tp_version - read the TP microcode version
942 * @adapter: the adapter 940 * @adapter: the adapter
943 * @vers: where to place the version 941 * @vers: where to place the version
944 * 942 *
945 * Reads the TP microcode version from flash. 943 * Reads the TP microcode version from flash.
946 */ 944 */
947 int t4_get_tp_version(struct adapter *adapter, u32 *vers) 945 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
948 { 946 {
949 return t4_read_flash(adapter, FLASH_FW_START + 947 return t4_read_flash(adapter, FLASH_FW_START +
950 offsetof(struct fw_hdr, tp_microcode_ver), 948 offsetof(struct fw_hdr, tp_microcode_ver),
951 1, vers, 0); 949 1, vers, 0);
952 } 950 }
953 951
954 /* Is the given firmware API compatible with the one the driver was compiled 952 /* Is the given firmware API compatible with the one the driver was compiled
955 * with? 953 * with?
956 */ 954 */
957 static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2) 955 static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
958 { 956 {
959 957
960 /* short circuit if it's the exact same firmware version */ 958 /* short circuit if it's the exact same firmware version */
961 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver) 959 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
962 return 1; 960 return 1;
963 961
964 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x) 962 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
965 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) && 963 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
966 SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe)) 964 SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
967 return 1; 965 return 1;
968 #undef SAME_INTF 966 #undef SAME_INTF
969 967
970 return 0; 968 return 0;
971 } 969 }
972 970
973 /* The firmware in the filesystem is usable, but should it be installed? 971 /* The firmware in the filesystem is usable, but should it be installed?
974 * This routine explains itself in detail if it indicates the filesystem 972 * This routine explains itself in detail if it indicates the filesystem
975 * firmware should be installed. 973 * firmware should be installed.
976 */ 974 */
977 static int should_install_fs_fw(struct adapter *adap, int card_fw_usable, 975 static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
978 int k, int c) 976 int k, int c)
979 { 977 {
980 const char *reason; 978 const char *reason;
981 979
982 if (!card_fw_usable) { 980 if (!card_fw_usable) {
983 reason = "incompatible or unusable"; 981 reason = "incompatible or unusable";
984 goto install; 982 goto install;
985 } 983 }
986 984
987 if (k > c) { 985 if (k > c) {
988 reason = "older than the version supported with this driver"; 986 reason = "older than the version supported with this driver";
989 goto install; 987 goto install;
990 } 988 }
991 989
992 return 0; 990 return 0;
993 991
994 install: 992 install:
995 dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, " 993 dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, "
996 "installing firmware %u.%u.%u.%u on card.\n", 994 "installing firmware %u.%u.%u.%u on card.\n",
997 FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c), 995 FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c),
998 FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c), reason, 996 FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c), reason,
999 FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k), 997 FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k),
1000 FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k)); 998 FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k));
1001 999
1002 return 1; 1000 return 1;
1003 } 1001 }
1004 1002
1005 int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info, 1003 int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
1006 const u8 *fw_data, unsigned int fw_size, 1004 const u8 *fw_data, unsigned int fw_size,
1007 struct fw_hdr *card_fw, enum dev_state state, 1005 struct fw_hdr *card_fw, enum dev_state state,
1008 int *reset) 1006 int *reset)
1009 { 1007 {
1010 int ret, card_fw_usable, fs_fw_usable; 1008 int ret, card_fw_usable, fs_fw_usable;
1011 const struct fw_hdr *fs_fw; 1009 const struct fw_hdr *fs_fw;
1012 const struct fw_hdr *drv_fw; 1010 const struct fw_hdr *drv_fw;
1013 1011
1014 drv_fw = &fw_info->fw_hdr; 1012 drv_fw = &fw_info->fw_hdr;
1015 1013
1016 /* Read the header of the firmware on the card */ 1014 /* Read the header of the firmware on the card */
1017 ret = -t4_read_flash(adap, FLASH_FW_START, 1015 ret = -t4_read_flash(adap, FLASH_FW_START,
1018 sizeof(*card_fw) / sizeof(uint32_t), 1016 sizeof(*card_fw) / sizeof(uint32_t),
1019 (uint32_t *)card_fw, 1); 1017 (uint32_t *)card_fw, 1);
1020 if (ret == 0) { 1018 if (ret == 0) {
1021 card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw); 1019 card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
1022 } else { 1020 } else {
1023 dev_err(adap->pdev_dev, 1021 dev_err(adap->pdev_dev,
1024 "Unable to read card's firmware header: %d\n", ret); 1022 "Unable to read card's firmware header: %d\n", ret);
1025 card_fw_usable = 0; 1023 card_fw_usable = 0;
1026 } 1024 }
1027 1025
1028 if (fw_data != NULL) { 1026 if (fw_data != NULL) {
1029 fs_fw = (const void *)fw_data; 1027 fs_fw = (const void *)fw_data;
1030 fs_fw_usable = fw_compatible(drv_fw, fs_fw); 1028 fs_fw_usable = fw_compatible(drv_fw, fs_fw);
1031 } else { 1029 } else {
1032 fs_fw = NULL; 1030 fs_fw = NULL;
1033 fs_fw_usable = 0; 1031 fs_fw_usable = 0;
1034 } 1032 }
1035 1033
1036 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver && 1034 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
1037 (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) { 1035 (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
1038 /* Common case: the firmware on the card is an exact match and 1036 /* Common case: the firmware on the card is an exact match and
1039 * the filesystem one is an exact match too, or the filesystem 1037 * the filesystem one is an exact match too, or the filesystem
1040 * one is absent/incompatible. 1038 * one is absent/incompatible.
1041 */ 1039 */
1042 } else if (fs_fw_usable && state == DEV_STATE_UNINIT && 1040 } else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
1043 should_install_fs_fw(adap, card_fw_usable, 1041 should_install_fs_fw(adap, card_fw_usable,
1044 be32_to_cpu(fs_fw->fw_ver), 1042 be32_to_cpu(fs_fw->fw_ver),
1045 be32_to_cpu(card_fw->fw_ver))) { 1043 be32_to_cpu(card_fw->fw_ver))) {
1046 ret = -t4_fw_upgrade(adap, adap->mbox, fw_data, 1044 ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
1047 fw_size, 0); 1045 fw_size, 0);
1048 if (ret != 0) { 1046 if (ret != 0) {
1049 dev_err(adap->pdev_dev, 1047 dev_err(adap->pdev_dev,
1050 "failed to install firmware: %d\n", ret); 1048 "failed to install firmware: %d\n", ret);
1051 goto bye; 1049 goto bye;
1052 } 1050 }
1053 1051
1054 /* Installed successfully, update the cached header too. */ 1052 /* Installed successfully, update the cached header too. */
1055 memcpy(card_fw, fs_fw, sizeof(*card_fw)); 1053 memcpy(card_fw, fs_fw, sizeof(*card_fw));
1056 card_fw_usable = 1; 1054 card_fw_usable = 1;
1057 *reset = 0; /* already reset as part of load_fw */ 1055 *reset = 0; /* already reset as part of load_fw */
1058 } 1056 }
1059 1057
1060 if (!card_fw_usable) { 1058 if (!card_fw_usable) {
1061 uint32_t d, c, k; 1059 uint32_t d, c, k;
1062 1060
1063 d = be32_to_cpu(drv_fw->fw_ver); 1061 d = be32_to_cpu(drv_fw->fw_ver);
1064 c = be32_to_cpu(card_fw->fw_ver); 1062 c = be32_to_cpu(card_fw->fw_ver);
1065 k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0; 1063 k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
1066 1064
1067 dev_err(adap->pdev_dev, "Cannot find a usable firmware: " 1065 dev_err(adap->pdev_dev, "Cannot find a usable firmware: "
1068 "chip state %d, " 1066 "chip state %d, "
1069 "driver compiled with %d.%d.%d.%d, " 1067 "driver compiled with %d.%d.%d.%d, "
1070 "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n", 1068 "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
1071 state, 1069 state,
1072 FW_HDR_FW_VER_MAJOR_GET(d), FW_HDR_FW_VER_MINOR_GET(d), 1070 FW_HDR_FW_VER_MAJOR_GET(d), FW_HDR_FW_VER_MINOR_GET(d),
1073 FW_HDR_FW_VER_MICRO_GET(d), FW_HDR_FW_VER_BUILD_GET(d), 1071 FW_HDR_FW_VER_MICRO_GET(d), FW_HDR_FW_VER_BUILD_GET(d),
1074 FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c), 1072 FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c),
1075 FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c), 1073 FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c),
1076 FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k), 1074 FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k),
1077 FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k)); 1075 FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k));
1078 ret = EINVAL; 1076 ret = EINVAL;
1079 goto bye; 1077 goto bye;
1080 } 1078 }
1081 1079
1082 /* We're using whatever's on the card and it's known to be good. */ 1080 /* We're using whatever's on the card and it's known to be good. */
1083 adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver); 1081 adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
1084 adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver); 1082 adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
1085 1083
1086 bye: 1084 bye:
1087 return ret; 1085 return ret;
1088 } 1086 }
1089 1087
1090 /** 1088 /**
1091 * t4_flash_erase_sectors - erase a range of flash sectors 1089 * t4_flash_erase_sectors - erase a range of flash sectors
1092 * @adapter: the adapter 1090 * @adapter: the adapter
1093 * @start: the first sector to erase 1091 * @start: the first sector to erase
1094 * @end: the last sector to erase 1092 * @end: the last sector to erase
1095 * 1093 *
1096 * Erases the sectors in the given inclusive range. 1094 * Erases the sectors in the given inclusive range.
1097 */ 1095 */
1098 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end) 1096 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
1099 { 1097 {
1100 int ret = 0; 1098 int ret = 0;
1101 1099
1102 if (end >= adapter->params.sf_nsec) 1100 if (end >= adapter->params.sf_nsec)
1103 return -EINVAL; 1101 return -EINVAL;
1104 1102
1105 while (start <= end) { 1103 while (start <= end) {
1106 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 || 1104 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
1107 (ret = sf1_write(adapter, 4, 0, 1, 1105 (ret = sf1_write(adapter, 4, 0, 1,
1108 SF_ERASE_SECTOR | (start << 8))) != 0 || 1106 SF_ERASE_SECTOR | (start << 8))) != 0 ||
1109 (ret = flash_wait_op(adapter, 14, 500)) != 0) { 1107 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
1110 dev_err(adapter->pdev_dev, 1108 dev_err(adapter->pdev_dev,
1111 "erase of flash sector %d failed, error %d\n", 1109 "erase of flash sector %d failed, error %d\n",
1112 start, ret); 1110 start, ret);
1113 break; 1111 break;
1114 } 1112 }
1115 start++; 1113 start++;
1116 } 1114 }
1117 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */ 1115 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
1118 return ret; 1116 return ret;
1119 } 1117 }
1120 1118
1121 /** 1119 /**
1122 * t4_flash_cfg_addr - return the address of the flash configuration file 1120 * t4_flash_cfg_addr - return the address of the flash configuration file
1123 * @adapter: the adapter 1121 * @adapter: the adapter
1124 * 1122 *
1125 * Return the address within the flash where the Firmware Configuration 1123 * Return the address within the flash where the Firmware Configuration
1126 * File is stored. 1124 * File is stored.
1127 */ 1125 */
1128 unsigned int t4_flash_cfg_addr(struct adapter *adapter) 1126 unsigned int t4_flash_cfg_addr(struct adapter *adapter)
1129 { 1127 {
1130 if (adapter->params.sf_size == 0x100000) 1128 if (adapter->params.sf_size == 0x100000)
1131 return FLASH_FPGA_CFG_START; 1129 return FLASH_FPGA_CFG_START;
1132 else 1130 else
1133 return FLASH_CFG_START; 1131 return FLASH_CFG_START;
1134 } 1132 }
1135 1133
1136 /** 1134 /**
1137 * t4_load_fw - download firmware 1135 * t4_load_fw - download firmware
1138 * @adap: the adapter 1136 * @adap: the adapter
1139 * @fw_data: the firmware image to write 1137 * @fw_data: the firmware image to write
1140 * @size: image size 1138 * @size: image size
1141 * 1139 *
1142 * Write the supplied firmware image to the card's serial flash. 1140 * Write the supplied firmware image to the card's serial flash.
1143 */ 1141 */
1144 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size) 1142 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
1145 { 1143 {
1146 u32 csum; 1144 u32 csum;
1147 int ret, addr; 1145 int ret, addr;
1148 unsigned int i; 1146 unsigned int i;
1149 u8 first_page[SF_PAGE_SIZE]; 1147 u8 first_page[SF_PAGE_SIZE];
1150 const __be32 *p = (const __be32 *)fw_data; 1148 const __be32 *p = (const __be32 *)fw_data;
1151 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data; 1149 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
1152 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec; 1150 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
1153 unsigned int fw_img_start = adap->params.sf_fw_start; 1151 unsigned int fw_img_start = adap->params.sf_fw_start;
1154 unsigned int fw_start_sec = fw_img_start / sf_sec_size; 1152 unsigned int fw_start_sec = fw_img_start / sf_sec_size;
1155 1153
1156 if (!size) { 1154 if (!size) {
1157 dev_err(adap->pdev_dev, "FW image has no data\n"); 1155 dev_err(adap->pdev_dev, "FW image has no data\n");
1158 return -EINVAL; 1156 return -EINVAL;
1159 } 1157 }
1160 if (size & 511) { 1158 if (size & 511) {
1161 dev_err(adap->pdev_dev, 1159 dev_err(adap->pdev_dev,
1162 "FW image size not multiple of 512 bytes\n"); 1160 "FW image size not multiple of 512 bytes\n");
1163 return -EINVAL; 1161 return -EINVAL;
1164 } 1162 }
1165 if (ntohs(hdr->len512) * 512 != size) { 1163 if (ntohs(hdr->len512) * 512 != size) {
1166 dev_err(adap->pdev_dev, 1164 dev_err(adap->pdev_dev,
1167 "FW image size differs from size in FW header\n"); 1165 "FW image size differs from size in FW header\n");
1168 return -EINVAL; 1166 return -EINVAL;
1169 } 1167 }
1170 if (size > FW_MAX_SIZE) { 1168 if (size > FW_MAX_SIZE) {
1171 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n", 1169 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
1172 FW_MAX_SIZE); 1170 FW_MAX_SIZE);
1173 return -EFBIG; 1171 return -EFBIG;
1174 } 1172 }
1175 1173
1176 for (csum = 0, i = 0; i < size / sizeof(csum); i++) 1174 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1177 csum += ntohl(p[i]); 1175 csum += ntohl(p[i]);
1178 1176
1179 if (csum != 0xffffffff) { 1177 if (csum != 0xffffffff) {
1180 dev_err(adap->pdev_dev, 1178 dev_err(adap->pdev_dev,
1181 "corrupted firmware image, checksum %#x\n", csum); 1179 "corrupted firmware image, checksum %#x\n", csum);
1182 return -EINVAL; 1180 return -EINVAL;
1183 } 1181 }
1184 1182
1185 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */ 1183 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
1186 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1); 1184 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
1187 if (ret) 1185 if (ret)
1188 goto out; 1186 goto out;
1189 1187
1190 /* 1188 /*
1191 * We write the correct version at the end so the driver can see a bad 1189 * We write the correct version at the end so the driver can see a bad
1192 * version if the FW write fails. Start by writing a copy of the 1190 * version if the FW write fails. Start by writing a copy of the
1193 * first page with a bad version. 1191 * first page with a bad version.
1194 */ 1192 */
1195 memcpy(first_page, fw_data, SF_PAGE_SIZE); 1193 memcpy(first_page, fw_data, SF_PAGE_SIZE);
1196 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff); 1194 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
1197 ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page); 1195 ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
1198 if (ret) 1196 if (ret)
1199 goto out; 1197 goto out;
1200 1198
1201 addr = fw_img_start; 1199 addr = fw_img_start;
1202 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) { 1200 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1203 addr += SF_PAGE_SIZE; 1201 addr += SF_PAGE_SIZE;
1204 fw_data += SF_PAGE_SIZE; 1202 fw_data += SF_PAGE_SIZE;
1205 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data); 1203 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
1206 if (ret) 1204 if (ret)
1207 goto out; 1205 goto out;
1208 } 1206 }
1209 1207
1210 ret = t4_write_flash(adap, 1208 ret = t4_write_flash(adap,
1211 fw_img_start + offsetof(struct fw_hdr, fw_ver), 1209 fw_img_start + offsetof(struct fw_hdr, fw_ver),
1212 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver); 1210 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
1213 out: 1211 out:
1214 if (ret) 1212 if (ret)
1215 dev_err(adap->pdev_dev, "firmware download failed, error %d\n", 1213 dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
1216 ret); 1214 ret);
1217 return ret; 1215 return ret;
1218 } 1216 }
1219 1217
1220 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ 1218 #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
1221 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \ 1219 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
1222 FW_PORT_CAP_ANEG) 1220 FW_PORT_CAP_ANEG)
1223 1221
1224 /** 1222 /**
1225 * t4_link_start - apply link configuration to MAC/PHY 1223 * t4_link_start - apply link configuration to MAC/PHY
1226 * @phy: the PHY to setup 1224 * @phy: the PHY to setup
1227 * @mac: the MAC to setup 1225 * @mac: the MAC to setup
1228 * @lc: the requested link configuration 1226 * @lc: the requested link configuration
1229 * 1227 *
1230 * Set up a port's MAC and PHY according to a desired link configuration. 1228 * Set up a port's MAC and PHY according to a desired link configuration.
1231 * - If the PHY can auto-negotiate first decide what to advertise, then 1229 * - If the PHY can auto-negotiate first decide what to advertise, then
1232 * enable/disable auto-negotiation as desired, and reset. 1230 * enable/disable auto-negotiation as desired, and reset.
1233 * - If the PHY does not auto-negotiate just reset it. 1231 * - If the PHY does not auto-negotiate just reset it.
1234 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC, 1232 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1235 * otherwise do it later based on the outcome of auto-negotiation. 1233 * otherwise do it later based on the outcome of auto-negotiation.
1236 */ 1234 */
1237 int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port, 1235 int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
1238 struct link_config *lc) 1236 struct link_config *lc)
1239 { 1237 {
1240 struct fw_port_cmd c; 1238 struct fw_port_cmd c;
1241 unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO); 1239 unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);
1242 1240
1243 lc->link_ok = 0; 1241 lc->link_ok = 0;
1244 if (lc->requested_fc & PAUSE_RX) 1242 if (lc->requested_fc & PAUSE_RX)
1245 fc |= FW_PORT_CAP_FC_RX; 1243 fc |= FW_PORT_CAP_FC_RX;
1246 if (lc->requested_fc & PAUSE_TX) 1244 if (lc->requested_fc & PAUSE_TX)
1247 fc |= FW_PORT_CAP_FC_TX; 1245 fc |= FW_PORT_CAP_FC_TX;
1248 1246
1249 memset(&c, 0, sizeof(c)); 1247 memset(&c, 0, sizeof(c));
1250 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST | 1248 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
1251 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port)); 1249 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
1252 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | 1250 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1253 FW_LEN16(c)); 1251 FW_LEN16(c));
1254 1252
1255 if (!(lc->supported & FW_PORT_CAP_ANEG)) { 1253 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1256 c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc); 1254 c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
1257 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); 1255 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1258 } else if (lc->autoneg == AUTONEG_DISABLE) { 1256 } else if (lc->autoneg == AUTONEG_DISABLE) {
1259 c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi); 1257 c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
1260 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX); 1258 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1261 } else 1259 } else
1262 c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi); 1260 c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
1263 1261
1264 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 1262 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1265 } 1263 }
1266 1264
1267 /** 1265 /**
1268 * t4_restart_aneg - restart autonegotiation 1266 * t4_restart_aneg - restart autonegotiation
1269 * @adap: the adapter 1267 * @adap: the adapter
1270 * @mbox: mbox to use for the FW command 1268 * @mbox: mbox to use for the FW command
1271 * @port: the port id 1269 * @port: the port id
1272 * 1270 *
1273 * Restarts autonegotiation for the selected port. 1271 * Restarts autonegotiation for the selected port.
1274 */ 1272 */
1275 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port) 1273 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
1276 { 1274 {
1277 struct fw_port_cmd c; 1275 struct fw_port_cmd c;
1278 1276
1279 memset(&c, 0, sizeof(c)); 1277 memset(&c, 0, sizeof(c));
1280 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST | 1278 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
1281 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port)); 1279 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
1282 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) | 1280 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
1283 FW_LEN16(c)); 1281 FW_LEN16(c));
1284 c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG); 1282 c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
1285 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 1283 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
1286 } 1284 }
1287 1285
1288 typedef void (*int_handler_t)(struct adapter *adap); 1286 typedef void (*int_handler_t)(struct adapter *adap);
1289 1287
1290 struct intr_info { 1288 struct intr_info {
1291 unsigned int mask; /* bits to check in interrupt status */ 1289 unsigned int mask; /* bits to check in interrupt status */
1292 const char *msg; /* message to print or NULL */ 1290 const char *msg; /* message to print or NULL */
1293 short stat_idx; /* stat counter to increment or -1 */ 1291 short stat_idx; /* stat counter to increment or -1 */
1294 unsigned short fatal; /* whether the condition reported is fatal */ 1292 unsigned short fatal; /* whether the condition reported is fatal */
1295 int_handler_t int_handler; /* platform-specific int handler */ 1293 int_handler_t int_handler; /* platform-specific int handler */
1296 }; 1294 };
1297 1295
1298 /** 1296 /**
1299 * t4_handle_intr_status - table driven interrupt handler 1297 * t4_handle_intr_status - table driven interrupt handler
1300 * @adapter: the adapter that generated the interrupt 1298 * @adapter: the adapter that generated the interrupt
1301 * @reg: the interrupt status register to process 1299 * @reg: the interrupt status register to process
1302 * @acts: table of interrupt actions 1300 * @acts: table of interrupt actions
1303 * 1301 *
1304 * A table driven interrupt handler that applies a set of masks to an 1302 * A table driven interrupt handler that applies a set of masks to an
1305 * interrupt status word and performs the corresponding actions if the 1303 * interrupt status word and performs the corresponding actions if the
1306 * interrupts described by the mask have occurred. The actions include 1304 * interrupts described by the mask have occurred. The actions include
1307 * optionally emitting a warning or alert message. The table is terminated 1305 * optionally emitting a warning or alert message. The table is terminated
1308 * by an entry specifying mask 0. Returns the number of fatal interrupt 1306 * by an entry specifying mask 0. Returns the number of fatal interrupt
1309 * conditions. 1307 * conditions.
1310 */ 1308 */
1311 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg, 1309 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
1312 const struct intr_info *acts) 1310 const struct intr_info *acts)
1313 { 1311 {
1314 int fatal = 0; 1312 int fatal = 0;
1315 unsigned int mask = 0; 1313 unsigned int mask = 0;
1316 unsigned int status = t4_read_reg(adapter, reg); 1314 unsigned int status = t4_read_reg(adapter, reg);
1317 1315
1318 for ( ; acts->mask; ++acts) { 1316 for ( ; acts->mask; ++acts) {
1319 if (!(status & acts->mask)) 1317 if (!(status & acts->mask))
1320 continue; 1318 continue;
1321 if (acts->fatal) { 1319 if (acts->fatal) {
1322 fatal++; 1320 fatal++;
1323 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg, 1321 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
1324 status & acts->mask); 1322 status & acts->mask);
1325 } else if (acts->msg && printk_ratelimit()) 1323 } else if (acts->msg && printk_ratelimit())
1326 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg, 1324 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
1327 status & acts->mask); 1325 status & acts->mask);
1328 if (acts->int_handler) 1326 if (acts->int_handler)
1329 acts->int_handler(adapter); 1327 acts->int_handler(adapter);
1330 mask |= acts->mask; 1328 mask |= acts->mask;
1331 } 1329 }
1332 status &= mask; 1330 status &= mask;
1333 if (status) /* clear processed interrupts */ 1331 if (status) /* clear processed interrupts */
1334 t4_write_reg(adapter, reg, status); 1332 t4_write_reg(adapter, reg, status);
1335 return fatal; 1333 return fatal;
1336 } 1334 }
1337 1335
1338 /* 1336 /*
1339 * Interrupt handler for the PCIE module. 1337 * Interrupt handler for the PCIE module.
1340 */ 1338 */
1341 static void pcie_intr_handler(struct adapter *adapter) 1339 static void pcie_intr_handler(struct adapter *adapter)
1342 { 1340 {
1343 static const struct intr_info sysbus_intr_info[] = { 1341 static const struct intr_info sysbus_intr_info[] = {
1344 { RNPP, "RXNP array parity error", -1, 1 }, 1342 { RNPP, "RXNP array parity error", -1, 1 },
1345 { RPCP, "RXPC array parity error", -1, 1 }, 1343 { RPCP, "RXPC array parity error", -1, 1 },
1346 { RCIP, "RXCIF array parity error", -1, 1 }, 1344 { RCIP, "RXCIF array parity error", -1, 1 },
1347 { RCCP, "Rx completions control array parity error", -1, 1 }, 1345 { RCCP, "Rx completions control array parity error", -1, 1 },
1348 { RFTP, "RXFT array parity error", -1, 1 }, 1346 { RFTP, "RXFT array parity error", -1, 1 },
1349 { 0 } 1347 { 0 }
1350 }; 1348 };
1351 static const struct intr_info pcie_port_intr_info[] = { 1349 static const struct intr_info pcie_port_intr_info[] = {
1352 { TPCP, "TXPC array parity error", -1, 1 }, 1350 { TPCP, "TXPC array parity error", -1, 1 },
1353 { TNPP, "TXNP array parity error", -1, 1 }, 1351 { TNPP, "TXNP array parity error", -1, 1 },
1354 { TFTP, "TXFT array parity error", -1, 1 }, 1352 { TFTP, "TXFT array parity error", -1, 1 },
1355 { TCAP, "TXCA array parity error", -1, 1 }, 1353 { TCAP, "TXCA array parity error", -1, 1 },
1356 { TCIP, "TXCIF array parity error", -1, 1 }, 1354 { TCIP, "TXCIF array parity error", -1, 1 },
1357 { RCAP, "RXCA array parity error", -1, 1 }, 1355 { RCAP, "RXCA array parity error", -1, 1 },
1358 { OTDD, "outbound request TLP discarded", -1, 1 }, 1356 { OTDD, "outbound request TLP discarded", -1, 1 },
1359 { RDPE, "Rx data parity error", -1, 1 }, 1357 { RDPE, "Rx data parity error", -1, 1 },
1360 { TDUE, "Tx uncorrectable data error", -1, 1 }, 1358 { TDUE, "Tx uncorrectable data error", -1, 1 },
1361 { 0 } 1359 { 0 }
1362 }; 1360 };
1363 static const struct intr_info pcie_intr_info[] = { 1361 static const struct intr_info pcie_intr_info[] = {
1364 { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 }, 1362 { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
1365 { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 }, 1363 { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
1366 { MSIDATAPERR, "MSI data parity error", -1, 1 }, 1364 { MSIDATAPERR, "MSI data parity error", -1, 1 },
1367 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, 1365 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1368 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, 1366 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1369 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, 1367 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1370 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, 1368 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1371 { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 }, 1369 { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
1372 { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 }, 1370 { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
1373 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, 1371 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
1374 { CCNTPERR, "PCI CMD channel count parity error", -1, 1 }, 1372 { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
1375 { CREQPERR, "PCI CMD channel request parity error", -1, 1 }, 1373 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
1376 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, 1374 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
1377 { DCNTPERR, "PCI DMA channel count parity error", -1, 1 }, 1375 { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
1378 { DREQPERR, "PCI DMA channel request parity error", -1, 1 }, 1376 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
1379 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, 1377 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
1380 { HCNTPERR, "PCI HMA channel count parity error", -1, 1 }, 1378 { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
1381 { HREQPERR, "PCI HMA channel request parity error", -1, 1 }, 1379 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
1382 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, 1380 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
1383 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, 1381 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
1384 { FIDPERR, "PCI FID parity error", -1, 1 }, 1382 { FIDPERR, "PCI FID parity error", -1, 1 },
1385 { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 }, 1383 { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
1386 { MATAGPERR, "PCI MA tag parity error", -1, 1 }, 1384 { MATAGPERR, "PCI MA tag parity error", -1, 1 },
1387 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, 1385 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1388 { RXCPLPERR, "PCI Rx completion parity error", -1, 1 }, 1386 { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
1389 { RXWRPERR, "PCI Rx write parity error", -1, 1 }, 1387 { RXWRPERR, "PCI Rx write parity error", -1, 1 },
1390 { RPLPERR, "PCI replay buffer parity error", -1, 1 }, 1388 { RPLPERR, "PCI replay buffer parity error", -1, 1 },
1391 { PCIESINT, "PCI core secondary fault", -1, 1 }, 1389 { PCIESINT, "PCI core secondary fault", -1, 1 },
1392 { PCIEPINT, "PCI core primary fault", -1, 1 }, 1390 { PCIEPINT, "PCI core primary fault", -1, 1 },
1393 { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 }, 1391 { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 },
1394 { 0 } 1392 { 0 }
1395 }; 1393 };
1396 1394
1397 static struct intr_info t5_pcie_intr_info[] = { 1395 static struct intr_info t5_pcie_intr_info[] = {
1398 { MSTGRPPERR, "Master Response Read Queue parity error", 1396 { MSTGRPPERR, "Master Response Read Queue parity error",
1399 -1, 1 }, 1397 -1, 1 },
1400 { MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 }, 1398 { MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
1401 { MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 }, 1399 { MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
1402 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 }, 1400 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1403 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 }, 1401 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1404 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 }, 1402 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1405 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 }, 1403 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1406 { PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error", 1404 { PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
1407 -1, 1 }, 1405 -1, 1 },
1408 { PIOREQGRPPERR, "PCI PIO request Group FIFO parity error", 1406 { PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
1409 -1, 1 }, 1407 -1, 1 },
1410 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 }, 1408 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
1411 { MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 }, 1409 { MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
1412 { CREQPERR, "PCI CMD channel request parity error", -1, 1 }, 1410 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
1413 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 }, 1411 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
1414 { DREQWRPERR, "PCI DMA channel write request parity error", 1412 { DREQWRPERR, "PCI DMA channel write request parity error",
1415 -1, 1 }, 1413 -1, 1 },
1416 { DREQPERR, "PCI DMA channel request parity error", -1, 1 }, 1414 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
1417 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 }, 1415 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
1418 { HREQWRPERR, "PCI HMA channel count parity error", -1, 1 }, 1416 { HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
1419 { HREQPERR, "PCI HMA channel request parity error", -1, 1 }, 1417 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
1420 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 }, 1418 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
1421 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 }, 1419 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
1422 { FIDPERR, "PCI FID parity error", -1, 1 }, 1420 { FIDPERR, "PCI FID parity error", -1, 1 },
1423 { VFIDPERR, "PCI INTx clear parity error", -1, 1 }, 1421 { VFIDPERR, "PCI INTx clear parity error", -1, 1 },
1424 { MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 }, 1422 { MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
1425 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 }, 1423 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1426 { IPRXHDRGRPPERR, "PCI IP Rx header group parity error", 1424 { IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
1427 -1, 1 }, 1425 -1, 1 },
1428 { IPRXDATAGRPPERR, "PCI IP Rx data group parity error", -1, 1 }, 1426 { IPRXDATAGRPPERR, "PCI IP Rx data group parity error", -1, 1 },
1429 { RPLPERR, "PCI IP replay buffer parity error", -1, 1 }, 1427 { RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
1430 { IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 }, 1428 { IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
1431 { TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 }, 1429 { TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
1432 { READRSPERR, "Outbound read error", -1, 0 }, 1430 { READRSPERR, "Outbound read error", -1, 0 },
1433 { 0 } 1431 { 0 }
1434 }; 1432 };
1435 1433
1436 int fat; 1434 int fat;
1437 1435
1438 if (is_t4(adapter->params.chip)) 1436 if (is_t4(adapter->params.chip))
1439 fat = t4_handle_intr_status(adapter, 1437 fat = t4_handle_intr_status(adapter,
1440 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, 1438 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
1441 sysbus_intr_info) + 1439 sysbus_intr_info) +
1442 t4_handle_intr_status(adapter, 1440 t4_handle_intr_status(adapter,
1443 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, 1441 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1444 pcie_port_intr_info) + 1442 pcie_port_intr_info) +
1445 t4_handle_intr_status(adapter, PCIE_INT_CAUSE, 1443 t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
1446 pcie_intr_info); 1444 pcie_intr_info);
1447 else 1445 else
1448 fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE, 1446 fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
1449 t5_pcie_intr_info); 1447 t5_pcie_intr_info);
1450 1448
1451 if (fat) 1449 if (fat)
1452 t4_fatal_err(adapter); 1450 t4_fatal_err(adapter);
1453 } 1451 }
1454 1452
1455 /* 1453 /*
1456 * TP interrupt handler. 1454 * TP interrupt handler.
1457 */ 1455 */
1458 static void tp_intr_handler(struct adapter *adapter) 1456 static void tp_intr_handler(struct adapter *adapter)
1459 { 1457 {
1460 static const struct intr_info tp_intr_info[] = { 1458 static const struct intr_info tp_intr_info[] = {
1461 { 0x3fffffff, "TP parity error", -1, 1 }, 1459 { 0x3fffffff, "TP parity error", -1, 1 },
1462 { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 }, 1460 { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
1463 { 0 } 1461 { 0 }
1464 }; 1462 };
1465 1463
1466 if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info)) 1464 if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info))
1467 t4_fatal_err(adapter); 1465 t4_fatal_err(adapter);
1468 } 1466 }
1469 1467
1470 /* 1468 /*
1471 * SGE interrupt handler. 1469 * SGE interrupt handler.
1472 */ 1470 */
1473 static void sge_intr_handler(struct adapter *adapter) 1471 static void sge_intr_handler(struct adapter *adapter)
1474 { 1472 {
1475 u64 v; 1473 u64 v;
1476 1474
1477 static const struct intr_info sge_intr_info[] = { 1475 static const struct intr_info sge_intr_info[] = {
1478 { ERR_CPL_EXCEED_IQE_SIZE, 1476 { ERR_CPL_EXCEED_IQE_SIZE,
1479 "SGE received CPL exceeding IQE size", -1, 1 }, 1477 "SGE received CPL exceeding IQE size", -1, 1 },
1480 { ERR_INVALID_CIDX_INC, 1478 { ERR_INVALID_CIDX_INC,
1481 "SGE GTS CIDX increment too large", -1, 0 }, 1479 "SGE GTS CIDX increment too large", -1, 0 },
1482 { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 }, 1480 { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
1483 { DBFIFO_LP_INT, NULL, -1, 0, t4_db_full }, 1481 { DBFIFO_LP_INT, NULL, -1, 0, t4_db_full },
1484 { DBFIFO_HP_INT, NULL, -1, 0, t4_db_full }, 1482 { DBFIFO_HP_INT, NULL, -1, 0, t4_db_full },
1485 { ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped }, 1483 { ERR_DROPPED_DB, NULL, -1, 0, t4_db_dropped },
1486 { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0, 1484 { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
1487 "SGE IQID > 1023 received CPL for FL", -1, 0 }, 1485 "SGE IQID > 1023 received CPL for FL", -1, 0 },
1488 { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1, 1486 { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
1489 0 }, 1487 0 },
1490 { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1, 1488 { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
1491 0 }, 1489 0 },
1492 { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1, 1490 { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
1493 0 }, 1491 0 },
1494 { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1, 1492 { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
1495 0 }, 1493 0 },
1496 { ERR_ING_CTXT_PRIO, 1494 { ERR_ING_CTXT_PRIO,
1497 "SGE too many priority ingress contexts", -1, 0 }, 1495 "SGE too many priority ingress contexts", -1, 0 },
1498 { ERR_EGR_CTXT_PRIO, 1496 { ERR_EGR_CTXT_PRIO,
1499 "SGE too many priority egress contexts", -1, 0 }, 1497 "SGE too many priority egress contexts", -1, 0 },
1500 { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 }, 1498 { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
1501 { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 }, 1499 { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
1502 { 0 } 1500 { 0 }
1503 }; 1501 };
1504 1502
1505 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) | 1503 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) |
1506 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32); 1504 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32);
1507 if (v) { 1505 if (v) {
1508 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n", 1506 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
1509 (unsigned long long)v); 1507 (unsigned long long)v);
1510 t4_write_reg(adapter, SGE_INT_CAUSE1, v); 1508 t4_write_reg(adapter, SGE_INT_CAUSE1, v);
1511 t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32); 1509 t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32);
1512 } 1510 }
1513 1511
1514 if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) || 1512 if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) ||
1515 v != 0) 1513 v != 0)
1516 t4_fatal_err(adapter); 1514 t4_fatal_err(adapter);
1517 } 1515 }
1518 1516
1519 /* 1517 /*
1520 * CIM interrupt handler. 1518 * CIM interrupt handler.
1521 */ 1519 */
1522 static void cim_intr_handler(struct adapter *adapter) 1520 static void cim_intr_handler(struct adapter *adapter)
1523 { 1521 {
1524 static const struct intr_info cim_intr_info[] = { 1522 static const struct intr_info cim_intr_info[] = {
1525 { PREFDROPINT, "CIM control register prefetch drop", -1, 1 }, 1523 { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
1526 { OBQPARERR, "CIM OBQ parity error", -1, 1 }, 1524 { OBQPARERR, "CIM OBQ parity error", -1, 1 },
1527 { IBQPARERR, "CIM IBQ parity error", -1, 1 }, 1525 { IBQPARERR, "CIM IBQ parity error", -1, 1 },
1528 { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 }, 1526 { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
1529 { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 }, 1527 { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
1530 { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 }, 1528 { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
1531 { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 }, 1529 { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
1532 { 0 } 1530 { 0 }
1533 }; 1531 };
1534 static const struct intr_info cim_upintr_info[] = { 1532 static const struct intr_info cim_upintr_info[] = {
1535 { RSVDSPACEINT, "CIM reserved space access", -1, 1 }, 1533 { RSVDSPACEINT, "CIM reserved space access", -1, 1 },
1536 { ILLTRANSINT, "CIM illegal transaction", -1, 1 }, 1534 { ILLTRANSINT, "CIM illegal transaction", -1, 1 },
1537 { ILLWRINT, "CIM illegal write", -1, 1 }, 1535 { ILLWRINT, "CIM illegal write", -1, 1 },
1538 { ILLRDINT, "CIM illegal read", -1, 1 }, 1536 { ILLRDINT, "CIM illegal read", -1, 1 },
1539 { ILLRDBEINT, "CIM illegal read BE", -1, 1 }, 1537 { ILLRDBEINT, "CIM illegal read BE", -1, 1 },
1540 { ILLWRBEINT, "CIM illegal write BE", -1, 1 }, 1538 { ILLWRBEINT, "CIM illegal write BE", -1, 1 },
1541 { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 }, 1539 { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
1542 { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 }, 1540 { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
1543 { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 }, 1541 { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1544 { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 }, 1542 { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
1545 { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 }, 1543 { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1546 { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 }, 1544 { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1547 { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 }, 1545 { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
1548 { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 }, 1546 { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
1549 { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 }, 1547 { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
1550 { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 }, 1548 { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
1551 { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 }, 1549 { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
1552 { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 }, 1550 { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
1553 { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 }, 1551 { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
1554 { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 }, 1552 { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
1555 { SGLRDPLINT , "CIM single read from PL space", -1, 1 }, 1553 { SGLRDPLINT , "CIM single read from PL space", -1, 1 },
1556 { SGLWRPLINT , "CIM single write to PL space", -1, 1 }, 1554 { SGLWRPLINT , "CIM single write to PL space", -1, 1 },
1557 { BLKRDPLINT , "CIM block read from PL space", -1, 1 }, 1555 { BLKRDPLINT , "CIM block read from PL space", -1, 1 },
1558 { BLKWRPLINT , "CIM block write to PL space", -1, 1 }, 1556 { BLKWRPLINT , "CIM block write to PL space", -1, 1 },
1559 { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 }, 1557 { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
1560 { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 }, 1558 { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
1561 { TIMEOUTINT , "CIM PIF timeout", -1, 1 }, 1559 { TIMEOUTINT , "CIM PIF timeout", -1, 1 },
1562 { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 }, 1560 { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
1563 { 0 } 1561 { 0 }
1564 }; 1562 };
1565 1563
1566 int fat; 1564 int fat;
1567 1565
1568 if (t4_read_reg(adapter, MA_PCIE_FW) & FW_PCIE_FW_ERR) 1566 if (t4_read_reg(adapter, MA_PCIE_FW) & FW_PCIE_FW_ERR)
1569 t4_report_fw_error(adapter); 1567 t4_report_fw_error(adapter);
1570 1568
1571 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE, 1569 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
1572 cim_intr_info) + 1570 cim_intr_info) +
1573 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE, 1571 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
1574 cim_upintr_info); 1572 cim_upintr_info);
1575 if (fat) 1573 if (fat)
1576 t4_fatal_err(adapter); 1574 t4_fatal_err(adapter);
1577 } 1575 }
1578 1576
1579 /* 1577 /*
1580 * ULP RX interrupt handler. 1578 * ULP RX interrupt handler.
1581 */ 1579 */
1582 static void ulprx_intr_handler(struct adapter *adapter) 1580 static void ulprx_intr_handler(struct adapter *adapter)
1583 { 1581 {
1584 static const struct intr_info ulprx_intr_info[] = { 1582 static const struct intr_info ulprx_intr_info[] = {
1585 { 0x1800000, "ULPRX context error", -1, 1 }, 1583 { 0x1800000, "ULPRX context error", -1, 1 },
1586 { 0x7fffff, "ULPRX parity error", -1, 1 }, 1584 { 0x7fffff, "ULPRX parity error", -1, 1 },
1587 { 0 } 1585 { 0 }
1588 }; 1586 };
1589 1587
1590 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info)) 1588 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info))
1591 t4_fatal_err(adapter); 1589 t4_fatal_err(adapter);
1592 } 1590 }
1593 1591
1594 /* 1592 /*
1595 * ULP TX interrupt handler. 1593 * ULP TX interrupt handler.
1596 */ 1594 */
1597 static void ulptx_intr_handler(struct adapter *adapter) 1595 static void ulptx_intr_handler(struct adapter *adapter)
1598 { 1596 {
1599 static const struct intr_info ulptx_intr_info[] = { 1597 static const struct intr_info ulptx_intr_info[] = {
1600 { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1, 1598 { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
1601 0 }, 1599 0 },
1602 { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1, 1600 { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
1603 0 }, 1601 0 },
1604 { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1, 1602 { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
1605 0 }, 1603 0 },
1606 { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1, 1604 { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
1607 0 }, 1605 0 },
1608 { 0xfffffff, "ULPTX parity error", -1, 1 }, 1606 { 0xfffffff, "ULPTX parity error", -1, 1 },
1609 { 0 } 1607 { 0 }
1610 }; 1608 };
1611 1609
1612 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info)) 1610 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info))
1613 t4_fatal_err(adapter); 1611 t4_fatal_err(adapter);
1614 } 1612 }
1615 1613
1616 /* 1614 /*
1617 * PM TX interrupt handler. 1615 * PM TX interrupt handler.
1618 */ 1616 */
1619 static void pmtx_intr_handler(struct adapter *adapter) 1617 static void pmtx_intr_handler(struct adapter *adapter)
1620 { 1618 {
1621 static const struct intr_info pmtx_intr_info[] = { 1619 static const struct intr_info pmtx_intr_info[] = {
1622 { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 }, 1620 { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
1623 { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 }, 1621 { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
1624 { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 }, 1622 { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
1625 { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 }, 1623 { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1626 { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 }, 1624 { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 },
1627 { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 }, 1625 { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
1628 { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 }, 1626 { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 },
1629 { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 }, 1627 { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
1630 { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1}, 1628 { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
1631 { 0 } 1629 { 0 }
1632 }; 1630 };
1633 1631
1634 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info)) 1632 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info))
1635 t4_fatal_err(adapter); 1633 t4_fatal_err(adapter);
1636 } 1634 }
1637 1635
1638 /* 1636 /*
1639 * PM RX interrupt handler. 1637 * PM RX interrupt handler.
1640 */ 1638 */
1641 static void pmrx_intr_handler(struct adapter *adapter) 1639 static void pmrx_intr_handler(struct adapter *adapter)
1642 { 1640 {
1643 static const struct intr_info pmrx_intr_info[] = { 1641 static const struct intr_info pmrx_intr_info[] = {
1644 { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 }, 1642 { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1645 { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 }, 1643 { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 },
1646 { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 }, 1644 { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
1647 { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 }, 1645 { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 },
1648 { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 }, 1646 { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
1649 { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1}, 1647 { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
1650 { 0 } 1648 { 0 }
1651 }; 1649 };
1652 1650
1653 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info)) 1651 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info))
1654 t4_fatal_err(adapter); 1652 t4_fatal_err(adapter);
1655 } 1653 }
1656 1654
1657 /* 1655 /*
1658 * CPL switch interrupt handler. 1656 * CPL switch interrupt handler.
1659 */ 1657 */
1660 static void cplsw_intr_handler(struct adapter *adapter) 1658 static void cplsw_intr_handler(struct adapter *adapter)
1661 { 1659 {
1662 static const struct intr_info cplsw_intr_info[] = { 1660 static const struct intr_info cplsw_intr_info[] = {
1663 { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 }, 1661 { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
1664 { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 }, 1662 { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
1665 { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 }, 1663 { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
1666 { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 }, 1664 { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
1667 { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 }, 1665 { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
1668 { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 }, 1666 { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
1669 { 0 } 1667 { 0 }
1670 }; 1668 };
1671 1669
1672 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info)) 1670 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info))
1673 t4_fatal_err(adapter); 1671 t4_fatal_err(adapter);
1674 } 1672 }
1675 1673
1676 /* 1674 /*
1677 * LE interrupt handler. 1675 * LE interrupt handler.
1678 */ 1676 */
1679 static void le_intr_handler(struct adapter *adap) 1677 static void le_intr_handler(struct adapter *adap)
1680 { 1678 {
1681 static const struct intr_info le_intr_info[] = { 1679 static const struct intr_info le_intr_info[] = {
1682 { LIPMISS, "LE LIP miss", -1, 0 }, 1680 { LIPMISS, "LE LIP miss", -1, 0 },
1683 { LIP0, "LE 0 LIP error", -1, 0 }, 1681 { LIP0, "LE 0 LIP error", -1, 0 },
1684 { PARITYERR, "LE parity error", -1, 1 }, 1682 { PARITYERR, "LE parity error", -1, 1 },
1685 { UNKNOWNCMD, "LE unknown command", -1, 1 }, 1683 { UNKNOWNCMD, "LE unknown command", -1, 1 },
1686 { REQQPARERR, "LE request queue parity error", -1, 1 }, 1684 { REQQPARERR, "LE request queue parity error", -1, 1 },
1687 { 0 } 1685 { 0 }
1688 }; 1686 };
1689 1687
1690 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info)) 1688 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info))
1691 t4_fatal_err(adap); 1689 t4_fatal_err(adap);
1692 } 1690 }
1693 1691
1694 /* 1692 /*
1695 * MPS interrupt handler. 1693 * MPS interrupt handler.
1696 */ 1694 */
1697 static void mps_intr_handler(struct adapter *adapter) 1695 static void mps_intr_handler(struct adapter *adapter)
1698 { 1696 {
1699 static const struct intr_info mps_rx_intr_info[] = { 1697 static const struct intr_info mps_rx_intr_info[] = {
1700 { 0xffffff, "MPS Rx parity error", -1, 1 }, 1698 { 0xffffff, "MPS Rx parity error", -1, 1 },
1701 { 0 } 1699 { 0 }
1702 }; 1700 };
1703 static const struct intr_info mps_tx_intr_info[] = { 1701 static const struct intr_info mps_tx_intr_info[] = {
1704 { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 }, 1702 { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
1705 { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 }, 1703 { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
1706 { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 }, 1704 { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
1707 { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 }, 1705 { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
1708 { BUBBLE, "MPS Tx underflow", -1, 1 }, 1706 { BUBBLE, "MPS Tx underflow", -1, 1 },
1709 { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 }, 1707 { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
1710 { FRMERR, "MPS Tx framing error", -1, 1 }, 1708 { FRMERR, "MPS Tx framing error", -1, 1 },
1711 { 0 } 1709 { 0 }
1712 }; 1710 };
1713 static const struct intr_info mps_trc_intr_info[] = { 1711 static const struct intr_info mps_trc_intr_info[] = {
1714 { FILTMEM, "MPS TRC filter parity error", -1, 1 }, 1712 { FILTMEM, "MPS TRC filter parity error", -1, 1 },
1715 { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 }, 1713 { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
1716 { MISCPERR, "MPS TRC misc parity error", -1, 1 }, 1714 { MISCPERR, "MPS TRC misc parity error", -1, 1 },
1717 { 0 } 1715 { 0 }
1718 }; 1716 };
1719 static const struct intr_info mps_stat_sram_intr_info[] = { 1717 static const struct intr_info mps_stat_sram_intr_info[] = {
1720 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 }, 1718 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
1721 { 0 } 1719 { 0 }
1722 }; 1720 };
1723 static const struct intr_info mps_stat_tx_intr_info[] = { 1721 static const struct intr_info mps_stat_tx_intr_info[] = {
1724 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 }, 1722 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
1725 { 0 } 1723 { 0 }
1726 }; 1724 };
1727 static const struct intr_info mps_stat_rx_intr_info[] = { 1725 static const struct intr_info mps_stat_rx_intr_info[] = {
1728 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 }, 1726 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
1729 { 0 } 1727 { 0 }
1730 }; 1728 };
1731 static const struct intr_info mps_cls_intr_info[] = { 1729 static const struct intr_info mps_cls_intr_info[] = {
1732 { MATCHSRAM, "MPS match SRAM parity error", -1, 1 }, 1730 { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
1733 { MATCHTCAM, "MPS match TCAM parity error", -1, 1 }, 1731 { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
1734 { HASHSRAM, "MPS hash SRAM parity error", -1, 1 }, 1732 { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
1735 { 0 } 1733 { 0 }
1736 }; 1734 };
1737 1735
1738 int fat; 1736 int fat;
1739 1737
1740 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE, 1738 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE,
1741 mps_rx_intr_info) + 1739 mps_rx_intr_info) +
1742 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE, 1740 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE,
1743 mps_tx_intr_info) + 1741 mps_tx_intr_info) +
1744 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE, 1742 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE,
1745 mps_trc_intr_info) + 1743 mps_trc_intr_info) +
1746 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM, 1744 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM,
1747 mps_stat_sram_intr_info) + 1745 mps_stat_sram_intr_info) +
1748 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO, 1746 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
1749 mps_stat_tx_intr_info) + 1747 mps_stat_tx_intr_info) +
1750 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO, 1748 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
1751 mps_stat_rx_intr_info) + 1749 mps_stat_rx_intr_info) +
1752 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE, 1750 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE,
1753 mps_cls_intr_info); 1751 mps_cls_intr_info);
1754 1752
1755 t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT | 1753 t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT |
1756 RXINT | TXINT | STATINT); 1754 RXINT | TXINT | STATINT);
1757 t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */ 1755 t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */
1758 if (fat) 1756 if (fat)
1759 t4_fatal_err(adapter); 1757 t4_fatal_err(adapter);
1760 } 1758 }
1761 1759
1762 #define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE) 1760 #define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
1763 1761
1764 /* 1762 /*
1765 * EDC/MC interrupt handler. 1763 * EDC/MC interrupt handler.
1766 */ 1764 */
1767 static void mem_intr_handler(struct adapter *adapter, int idx) 1765 static void mem_intr_handler(struct adapter *adapter, int idx)
1768 { 1766 {
1769 static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" }; 1767 static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
1770 1768
1771 unsigned int addr, cnt_addr, v; 1769 unsigned int addr, cnt_addr, v;
1772 1770
1773 if (idx <= MEM_EDC1) { 1771 if (idx <= MEM_EDC1) {
1774 addr = EDC_REG(EDC_INT_CAUSE, idx); 1772 addr = EDC_REG(EDC_INT_CAUSE, idx);
1775 cnt_addr = EDC_REG(EDC_ECC_STATUS, idx); 1773 cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
1776 } else if (idx == MEM_MC) { 1774 } else if (idx == MEM_MC) {
1777 if (is_t4(adapter->params.chip)) { 1775 if (is_t4(adapter->params.chip)) {
1778 addr = MC_INT_CAUSE; 1776 addr = MC_INT_CAUSE;
1779 cnt_addr = MC_ECC_STATUS; 1777 cnt_addr = MC_ECC_STATUS;
1780 } else { 1778 } else {
1781 addr = MC_P_INT_CAUSE; 1779 addr = MC_P_INT_CAUSE;
1782 cnt_addr = MC_P_ECC_STATUS; 1780 cnt_addr = MC_P_ECC_STATUS;
1783 } 1781 }
1784 } else { 1782 } else {
1785 addr = MC_REG(MC_P_INT_CAUSE, 1); 1783 addr = MC_REG(MC_P_INT_CAUSE, 1);
1786 cnt_addr = MC_REG(MC_P_ECC_STATUS, 1); 1784 cnt_addr = MC_REG(MC_P_ECC_STATUS, 1);
1787 } 1785 }
1788 1786
1789 v = t4_read_reg(adapter, addr) & MEM_INT_MASK; 1787 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
1790 if (v & PERR_INT_CAUSE) 1788 if (v & PERR_INT_CAUSE)
1791 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n", 1789 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
1792 name[idx]); 1790 name[idx]);
1793 if (v & ECC_CE_INT_CAUSE) { 1791 if (v & ECC_CE_INT_CAUSE) {
1794 u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr)); 1792 u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr));
1795 1793
1796 t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK); 1794 t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK);
1797 if (printk_ratelimit()) 1795 if (printk_ratelimit())
1798 dev_warn(adapter->pdev_dev, 1796 dev_warn(adapter->pdev_dev,
1799 "%u %s correctable ECC data error%s\n", 1797 "%u %s correctable ECC data error%s\n",
1800 cnt, name[idx], cnt > 1 ? "s" : ""); 1798 cnt, name[idx], cnt > 1 ? "s" : "");
1801 } 1799 }
1802 if (v & ECC_UE_INT_CAUSE) 1800 if (v & ECC_UE_INT_CAUSE)
1803 dev_alert(adapter->pdev_dev, 1801 dev_alert(adapter->pdev_dev,
1804 "%s uncorrectable ECC data error\n", name[idx]); 1802 "%s uncorrectable ECC data error\n", name[idx]);
1805 1803
1806 t4_write_reg(adapter, addr, v); 1804 t4_write_reg(adapter, addr, v);
1807 if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE)) 1805 if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
1808 t4_fatal_err(adapter); 1806 t4_fatal_err(adapter);
1809 } 1807 }
1810 1808
1811 /* 1809 /*
1812 * MA interrupt handler. 1810 * MA interrupt handler.
1813 */ 1811 */
1814 static void ma_intr_handler(struct adapter *adap) 1812 static void ma_intr_handler(struct adapter *adap)
1815 { 1813 {
1816 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE); 1814 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
1817 1815
1818 if (status & MEM_PERR_INT_CAUSE) { 1816 if (status & MEM_PERR_INT_CAUSE) {
1819 dev_alert(adap->pdev_dev, 1817 dev_alert(adap->pdev_dev,
1820 "MA parity error, parity status %#x\n", 1818 "MA parity error, parity status %#x\n",
1821 t4_read_reg(adap, MA_PARITY_ERROR_STATUS)); 1819 t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
1822 if (is_t5(adap->params.chip)) 1820 if (is_t5(adap->params.chip))
1823 dev_alert(adap->pdev_dev, 1821 dev_alert(adap->pdev_dev,
1824 "MA parity error, parity status %#x\n", 1822 "MA parity error, parity status %#x\n",
1825 t4_read_reg(adap, 1823 t4_read_reg(adap,
1826 MA_PARITY_ERROR_STATUS2)); 1824 MA_PARITY_ERROR_STATUS2));
1827 } 1825 }
1828 if (status & MEM_WRAP_INT_CAUSE) { 1826 if (status & MEM_WRAP_INT_CAUSE) {
1829 v = t4_read_reg(adap, MA_INT_WRAP_STATUS); 1827 v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
1830 dev_alert(adap->pdev_dev, "MA address wrap-around error by " 1828 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
1831 "client %u to address %#x\n", 1829 "client %u to address %#x\n",
1832 MEM_WRAP_CLIENT_NUM_GET(v), 1830 MEM_WRAP_CLIENT_NUM_GET(v),
1833 MEM_WRAP_ADDRESS_GET(v) << 4); 1831 MEM_WRAP_ADDRESS_GET(v) << 4);
1834 } 1832 }
1835 t4_write_reg(adap, MA_INT_CAUSE, status); 1833 t4_write_reg(adap, MA_INT_CAUSE, status);
1836 t4_fatal_err(adap); 1834 t4_fatal_err(adap);
1837 } 1835 }
1838 1836
1839 /* 1837 /*
1840 * SMB interrupt handler. 1838 * SMB interrupt handler.
1841 */ 1839 */
1842 static void smb_intr_handler(struct adapter *adap) 1840 static void smb_intr_handler(struct adapter *adap)
1843 { 1841 {
1844 static const struct intr_info smb_intr_info[] = { 1842 static const struct intr_info smb_intr_info[] = {
1845 { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 }, 1843 { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
1846 { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 }, 1844 { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
1847 { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 }, 1845 { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
1848 { 0 } 1846 { 0 }
1849 }; 1847 };
1850 1848
1851 if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info)) 1849 if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info))
1852 t4_fatal_err(adap); 1850 t4_fatal_err(adap);
1853 } 1851 }
1854 1852
1855 /* 1853 /*
1856 * NC-SI interrupt handler. 1854 * NC-SI interrupt handler.
1857 */ 1855 */
1858 static void ncsi_intr_handler(struct adapter *adap) 1856 static void ncsi_intr_handler(struct adapter *adap)
1859 { 1857 {
1860 static const struct intr_info ncsi_intr_info[] = { 1858 static const struct intr_info ncsi_intr_info[] = {
1861 { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 }, 1859 { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
1862 { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 }, 1860 { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
1863 { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 }, 1861 { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
1864 { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 }, 1862 { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
1865 { 0 } 1863 { 0 }
1866 }; 1864 };
1867 1865
1868 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info)) 1866 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info))
1869 t4_fatal_err(adap); 1867 t4_fatal_err(adap);
1870 } 1868 }
1871 1869
1872 /* 1870 /*
1873 * XGMAC interrupt handler. 1871 * XGMAC interrupt handler.
1874 */ 1872 */
1875 static void xgmac_intr_handler(struct adapter *adap, int port) 1873 static void xgmac_intr_handler(struct adapter *adap, int port)
1876 { 1874 {
1877 u32 v, int_cause_reg; 1875 u32 v, int_cause_reg;
1878 1876
1879 if (is_t4(adap->params.chip)) 1877 if (is_t4(adap->params.chip))
1880 int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE); 1878 int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE);
1881 else 1879 else
1882 int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE); 1880 int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE);
1883 1881
1884 v = t4_read_reg(adap, int_cause_reg); 1882 v = t4_read_reg(adap, int_cause_reg);
1885 1883
1886 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR; 1884 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
1887 if (!v) 1885 if (!v)
1888 return; 1886 return;
1889 1887
1890 if (v & TXFIFO_PRTY_ERR) 1888 if (v & TXFIFO_PRTY_ERR)
1891 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n", 1889 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
1892 port); 1890 port);
1893 if (v & RXFIFO_PRTY_ERR) 1891 if (v & RXFIFO_PRTY_ERR)
1894 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n", 1892 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
1895 port); 1893 port);
1896 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v); 1894 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v);
1897 t4_fatal_err(adap); 1895 t4_fatal_err(adap);
1898 } 1896 }
1899 1897
1900 /* 1898 /*
1901 * PL interrupt handler. 1899 * PL interrupt handler.
1902 */ 1900 */
1903 static void pl_intr_handler(struct adapter *adap) 1901 static void pl_intr_handler(struct adapter *adap)
1904 { 1902 {
1905 static const struct intr_info pl_intr_info[] = { 1903 static const struct intr_info pl_intr_info[] = {
1906 { FATALPERR, "T4 fatal parity error", -1, 1 }, 1904 { FATALPERR, "T4 fatal parity error", -1, 1 },
1907 { PERRVFID, "PL VFID_MAP parity error", -1, 1 }, 1905 { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
1908 { 0 } 1906 { 0 }
1909 }; 1907 };
1910 1908
1911 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info)) 1909 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info))
1912 t4_fatal_err(adap); 1910 t4_fatal_err(adap);
1913 } 1911 }
1914 1912
1915 #define PF_INTR_MASK (PFSW) 1913 #define PF_INTR_MASK (PFSW)
1916 #define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \ 1914 #define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
1917 EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \ 1915 EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \
1918 CPL_SWITCH | SGE | ULP_TX) 1916 CPL_SWITCH | SGE | ULP_TX)
1919 1917
1920 /** 1918 /**
1921 * t4_slow_intr_handler - control path interrupt handler 1919 * t4_slow_intr_handler - control path interrupt handler
1922 * @adapter: the adapter 1920 * @adapter: the adapter
1923 * 1921 *
1924 * T4 interrupt handler for non-data global interrupt events, e.g., errors. 1922 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
1925 * The designation 'slow' is because it involves register reads, while 1923 * The designation 'slow' is because it involves register reads, while
1926 * data interrupts typically don't involve any MMIOs. 1924 * data interrupts typically don't involve any MMIOs.
1927 */ 1925 */
1928 int t4_slow_intr_handler(struct adapter *adapter) 1926 int t4_slow_intr_handler(struct adapter *adapter)
1929 { 1927 {
1930 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE); 1928 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE);
1931 1929
1932 if (!(cause & GLBL_INTR_MASK)) 1930 if (!(cause & GLBL_INTR_MASK))
1933 return 0; 1931 return 0;
1934 if (cause & CIM) 1932 if (cause & CIM)
1935 cim_intr_handler(adapter); 1933 cim_intr_handler(adapter);
1936 if (cause & MPS) 1934 if (cause & MPS)
1937 mps_intr_handler(adapter); 1935 mps_intr_handler(adapter);
1938 if (cause & NCSI) 1936 if (cause & NCSI)
1939 ncsi_intr_handler(adapter); 1937 ncsi_intr_handler(adapter);
1940 if (cause & PL) 1938 if (cause & PL)
1941 pl_intr_handler(adapter); 1939 pl_intr_handler(adapter);
1942 if (cause & SMB) 1940 if (cause & SMB)
1943 smb_intr_handler(adapter); 1941 smb_intr_handler(adapter);
1944 if (cause & XGMAC0) 1942 if (cause & XGMAC0)
1945 xgmac_intr_handler(adapter, 0); 1943 xgmac_intr_handler(adapter, 0);
1946 if (cause & XGMAC1) 1944 if (cause & XGMAC1)
1947 xgmac_intr_handler(adapter, 1); 1945 xgmac_intr_handler(adapter, 1);
1948 if (cause & XGMAC_KR0) 1946 if (cause & XGMAC_KR0)
1949 xgmac_intr_handler(adapter, 2); 1947 xgmac_intr_handler(adapter, 2);
1950 if (cause & XGMAC_KR1) 1948 if (cause & XGMAC_KR1)
1951 xgmac_intr_handler(adapter, 3); 1949 xgmac_intr_handler(adapter, 3);
1952 if (cause & PCIE) 1950 if (cause & PCIE)
1953 pcie_intr_handler(adapter); 1951 pcie_intr_handler(adapter);
1954 if (cause & MC) 1952 if (cause & MC)
1955 mem_intr_handler(adapter, MEM_MC); 1953 mem_intr_handler(adapter, MEM_MC);
1956 if (!is_t4(adapter->params.chip) && (cause & MC1)) 1954 if (!is_t4(adapter->params.chip) && (cause & MC1))
1957 mem_intr_handler(adapter, MEM_MC1); 1955 mem_intr_handler(adapter, MEM_MC1);
1958 if (cause & EDC0) 1956 if (cause & EDC0)
1959 mem_intr_handler(adapter, MEM_EDC0); 1957 mem_intr_handler(adapter, MEM_EDC0);
1960 if (cause & EDC1) 1958 if (cause & EDC1)
1961 mem_intr_handler(adapter, MEM_EDC1); 1959 mem_intr_handler(adapter, MEM_EDC1);
1962 if (cause & LE) 1960 if (cause & LE)
1963 le_intr_handler(adapter); 1961 le_intr_handler(adapter);
1964 if (cause & TP) 1962 if (cause & TP)
1965 tp_intr_handler(adapter); 1963 tp_intr_handler(adapter);
1966 if (cause & MA) 1964 if (cause & MA)
1967 ma_intr_handler(adapter); 1965 ma_intr_handler(adapter);
1968 if (cause & PM_TX) 1966 if (cause & PM_TX)
1969 pmtx_intr_handler(adapter); 1967 pmtx_intr_handler(adapter);
1970 if (cause & PM_RX) 1968 if (cause & PM_RX)
1971 pmrx_intr_handler(adapter); 1969 pmrx_intr_handler(adapter);
1972 if (cause & ULP_RX) 1970 if (cause & ULP_RX)
1973 ulprx_intr_handler(adapter); 1971 ulprx_intr_handler(adapter);
1974 if (cause & CPL_SWITCH) 1972 if (cause & CPL_SWITCH)
1975 cplsw_intr_handler(adapter); 1973 cplsw_intr_handler(adapter);
1976 if (cause & SGE) 1974 if (cause & SGE)
1977 sge_intr_handler(adapter); 1975 sge_intr_handler(adapter);
1978 if (cause & ULP_TX) 1976 if (cause & ULP_TX)
1979 ulptx_intr_handler(adapter); 1977 ulptx_intr_handler(adapter);
1980 1978
1981 /* Clear the interrupts just processed for which we are the master. */ 1979 /* Clear the interrupts just processed for which we are the master. */
1982 t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK); 1980 t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK);
1983 (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */ 1981 (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
1984 return 1; 1982 return 1;
1985 } 1983 }
1986 1984
1987 /** 1985 /**
1988 * t4_intr_enable - enable interrupts 1986 * t4_intr_enable - enable interrupts
1989 * @adapter: the adapter whose interrupts should be enabled 1987 * @adapter: the adapter whose interrupts should be enabled
1990 * 1988 *
1991 * Enable PF-specific interrupts for the calling function and the top-level 1989 * Enable PF-specific interrupts for the calling function and the top-level
1992 * interrupt concentrator for global interrupts. Interrupts are already 1990 * interrupt concentrator for global interrupts. Interrupts are already
1993 * enabled at each module, here we just enable the roots of the interrupt 1991 * enabled at each module, here we just enable the roots of the interrupt
1994 * hierarchies. 1992 * hierarchies.
1995 * 1993 *
1996 * Note: this function should be called only when the driver manages 1994 * Note: this function should be called only when the driver manages
1997 * non PF-specific interrupts from the various HW modules. Only one PCI 1995 * non PF-specific interrupts from the various HW modules. Only one PCI
1998 * function at a time should be doing this. 1996 * function at a time should be doing this.
1999 */ 1997 */
2000 void t4_intr_enable(struct adapter *adapter) 1998 void t4_intr_enable(struct adapter *adapter)
2001 { 1999 {
2002 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI)); 2000 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
2003 2001
2004 t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE | 2002 t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE |
2005 ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 | 2003 ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 |
2006 ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 | 2004 ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 |
2007 ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 | 2005 ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
2008 ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 | 2006 ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
2009 ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO | 2007 ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
2010 ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR | 2008 ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
2011 DBFIFO_HP_INT | DBFIFO_LP_INT | 2009 DBFIFO_HP_INT | DBFIFO_LP_INT |
2012 EGRESS_SIZE_ERR); 2010 EGRESS_SIZE_ERR);
2013 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK); 2011 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
2014 t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf); 2012 t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
2015 } 2013 }
2016 2014
2017 /** 2015 /**
2018 * t4_intr_disable - disable interrupts 2016 * t4_intr_disable - disable interrupts
2019 * @adapter: the adapter whose interrupts should be disabled 2017 * @adapter: the adapter whose interrupts should be disabled
2020 * 2018 *
2021 * Disable interrupts. We only disable the top-level interrupt 2019 * Disable interrupts. We only disable the top-level interrupt
2022 * concentrators. The caller must be a PCI function managing global 2020 * concentrators. The caller must be a PCI function managing global
2023 * interrupts. 2021 * interrupts.
2024 */ 2022 */
2025 void t4_intr_disable(struct adapter *adapter) 2023 void t4_intr_disable(struct adapter *adapter)
2026 { 2024 {
2027 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI)); 2025 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
2028 2026
2029 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0); 2027 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0);
2030 t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0); 2028 t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0);
2031 } 2029 }
2032 2030
2033 /** 2031 /**
2034 * hash_mac_addr - return the hash value of a MAC address 2032 * hash_mac_addr - return the hash value of a MAC address
2035 * @addr: the 48-bit Ethernet MAC address 2033 * @addr: the 48-bit Ethernet MAC address
2036 * 2034 *
2037 * Hashes a MAC address according to the hash function used by HW inexact 2035 * Hashes a MAC address according to the hash function used by HW inexact
2038 * (hash) address matching. 2036 * (hash) address matching.
2039 */ 2037 */
2040 static int hash_mac_addr(const u8 *addr) 2038 static int hash_mac_addr(const u8 *addr)
2041 { 2039 {
2042 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2]; 2040 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
2043 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5]; 2041 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
2044 a ^= b; 2042 a ^= b;
2045 a ^= (a >> 12); 2043 a ^= (a >> 12);
2046 a ^= (a >> 6); 2044 a ^= (a >> 6);
2047 return a & 0x3f; 2045 return a & 0x3f;
2048 } 2046 }
2049 2047
2050 /** 2048 /**
2051 * t4_config_rss_range - configure a portion of the RSS mapping table 2049 * t4_config_rss_range - configure a portion of the RSS mapping table
2052 * @adapter: the adapter 2050 * @adapter: the adapter
2053 * @mbox: mbox to use for the FW command 2051 * @mbox: mbox to use for the FW command
2054 * @viid: virtual interface whose RSS subtable is to be written 2052 * @viid: virtual interface whose RSS subtable is to be written
2055 * @start: start entry in the table to write 2053 * @start: start entry in the table to write
2056 * @n: how many table entries to write 2054 * @n: how many table entries to write
2057 * @rspq: values for the response queue lookup table 2055 * @rspq: values for the response queue lookup table
2058 * @nrspq: number of values in @rspq 2056 * @nrspq: number of values in @rspq
2059 * 2057 *
2060 * Programs the selected part of the VI's RSS mapping table with the 2058 * Programs the selected part of the VI's RSS mapping table with the
2061 * provided values. If @nrspq < @n the supplied values are used repeatedly 2059 * provided values. If @nrspq < @n the supplied values are used repeatedly
2062 * until the full table range is populated. 2060 * until the full table range is populated.
2063 * 2061 *
2064 * The caller must ensure the values in @rspq are in the range allowed for 2062 * The caller must ensure the values in @rspq are in the range allowed for
2065 * @viid. 2063 * @viid.
2066 */ 2064 */
2067 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid, 2065 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
2068 int start, int n, const u16 *rspq, unsigned int nrspq) 2066 int start, int n, const u16 *rspq, unsigned int nrspq)
2069 { 2067 {
2070 int ret; 2068 int ret;
2071 const u16 *rsp = rspq; 2069 const u16 *rsp = rspq;
2072 const u16 *rsp_end = rspq + nrspq; 2070 const u16 *rsp_end = rspq + nrspq;
2073 struct fw_rss_ind_tbl_cmd cmd; 2071 struct fw_rss_ind_tbl_cmd cmd;
2074 2072
2075 memset(&cmd, 0, sizeof(cmd)); 2073 memset(&cmd, 0, sizeof(cmd));
2076 cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) | 2074 cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
2077 FW_CMD_REQUEST | FW_CMD_WRITE | 2075 FW_CMD_REQUEST | FW_CMD_WRITE |
2078 FW_RSS_IND_TBL_CMD_VIID(viid)); 2076 FW_RSS_IND_TBL_CMD_VIID(viid));
2079 cmd.retval_len16 = htonl(FW_LEN16(cmd)); 2077 cmd.retval_len16 = htonl(FW_LEN16(cmd));
2080 2078
2081 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */ 2079 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
2082 while (n > 0) { 2080 while (n > 0) {
2083 int nq = min(n, 32); 2081 int nq = min(n, 32);
2084 __be32 *qp = &cmd.iq0_to_iq2; 2082 __be32 *qp = &cmd.iq0_to_iq2;
2085 2083
2086 cmd.niqid = htons(nq); 2084 cmd.niqid = htons(nq);
2087 cmd.startidx = htons(start); 2085 cmd.startidx = htons(start);
2088 2086
2089 start += nq; 2087 start += nq;
2090 n -= nq; 2088 n -= nq;
2091 2089
2092 while (nq > 0) { 2090 while (nq > 0) {
2093 unsigned int v; 2091 unsigned int v;
2094 2092
2095 v = FW_RSS_IND_TBL_CMD_IQ0(*rsp); 2093 v = FW_RSS_IND_TBL_CMD_IQ0(*rsp);
2096 if (++rsp >= rsp_end) 2094 if (++rsp >= rsp_end)
2097 rsp = rspq; 2095 rsp = rspq;
2098 v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp); 2096 v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp);
2099 if (++rsp >= rsp_end) 2097 if (++rsp >= rsp_end)
2100 rsp = rspq; 2098 rsp = rspq;
2101 v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp); 2099 v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp);
2102 if (++rsp >= rsp_end) 2100 if (++rsp >= rsp_end)
2103 rsp = rspq; 2101 rsp = rspq;
2104 2102
2105 *qp++ = htonl(v); 2103 *qp++ = htonl(v);
2106 nq -= 3; 2104 nq -= 3;
2107 } 2105 }
2108 2106
2109 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL); 2107 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
2110 if (ret) 2108 if (ret)
2111 return ret; 2109 return ret;
2112 } 2110 }
2113 return 0; 2111 return 0;
2114 } 2112 }
2115 2113
2116 /** 2114 /**
2117 * t4_config_glbl_rss - configure the global RSS mode 2115 * t4_config_glbl_rss - configure the global RSS mode
2118 * @adapter: the adapter 2116 * @adapter: the adapter
2119 * @mbox: mbox to use for the FW command 2117 * @mbox: mbox to use for the FW command
2120 * @mode: global RSS mode 2118 * @mode: global RSS mode
2121 * @flags: mode-specific flags 2119 * @flags: mode-specific flags
2122 * 2120 *
2123 * Sets the global RSS mode. 2121 * Sets the global RSS mode.
2124 */ 2122 */
2125 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, 2123 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
2126 unsigned int flags) 2124 unsigned int flags)
2127 { 2125 {
2128 struct fw_rss_glb_config_cmd c; 2126 struct fw_rss_glb_config_cmd c;
2129 2127
2130 memset(&c, 0, sizeof(c)); 2128 memset(&c, 0, sizeof(c));
2131 c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) | 2129 c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
2132 FW_CMD_REQUEST | FW_CMD_WRITE); 2130 FW_CMD_REQUEST | FW_CMD_WRITE);
2133 c.retval_len16 = htonl(FW_LEN16(c)); 2131 c.retval_len16 = htonl(FW_LEN16(c));
2134 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) { 2132 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
2135 c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode)); 2133 c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2136 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) { 2134 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2137 c.u.basicvirtual.mode_pkd = 2135 c.u.basicvirtual.mode_pkd =
2138 htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode)); 2136 htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
2139 c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags); 2137 c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
2140 } else 2138 } else
2141 return -EINVAL; 2139 return -EINVAL;
2142 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL); 2140 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
2143 } 2141 }
2144 2142
2145 /** 2143 /**
2146 * t4_tp_get_tcp_stats - read TP's TCP MIB counters 2144 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
2147 * @adap: the adapter 2145 * @adap: the adapter
2148 * @v4: holds the TCP/IP counter values 2146 * @v4: holds the TCP/IP counter values
2149 * @v6: holds the TCP/IPv6 counter values 2147 * @v6: holds the TCP/IPv6 counter values
2150 * 2148 *
2151 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters. 2149 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
2152 * Either @v4 or @v6 may be %NULL to skip the corresponding stats. 2150 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
2153 */ 2151 */
2154 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4, 2152 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
2155 struct tp_tcp_stats *v6) 2153 struct tp_tcp_stats *v6)
2156 { 2154 {
2157 u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1]; 2155 u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1];
2158 2156
2159 #define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST) 2157 #define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST)
2160 #define STAT(x) val[STAT_IDX(x)] 2158 #define STAT(x) val[STAT_IDX(x)]
2161 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO)) 2159 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
2162 2160
2163 if (v4) { 2161 if (v4) {
2164 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val, 2162 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
2165 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST); 2163 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST);
2166 v4->tcpOutRsts = STAT(OUT_RST); 2164 v4->tcpOutRsts = STAT(OUT_RST);
2167 v4->tcpInSegs = STAT64(IN_SEG); 2165 v4->tcpInSegs = STAT64(IN_SEG);
2168 v4->tcpOutSegs = STAT64(OUT_SEG); 2166 v4->tcpOutSegs = STAT64(OUT_SEG);
2169 v4->tcpRetransSegs = STAT64(RXT_SEG); 2167 v4->tcpRetransSegs = STAT64(RXT_SEG);
2170 } 2168 }
2171 if (v6) { 2169 if (v6) {
2172 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val, 2170 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
2173 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST); 2171 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST);
2174 v6->tcpOutRsts = STAT(OUT_RST); 2172 v6->tcpOutRsts = STAT(OUT_RST);
2175 v6->tcpInSegs = STAT64(IN_SEG); 2173 v6->tcpInSegs = STAT64(IN_SEG);
2176 v6->tcpOutSegs = STAT64(OUT_SEG); 2174 v6->tcpOutSegs = STAT64(OUT_SEG);
2177 v6->tcpRetransSegs = STAT64(RXT_SEG); 2175 v6->tcpRetransSegs = STAT64(RXT_SEG);
2178 } 2176 }
2179 #undef STAT64 2177 #undef STAT64
2180 #undef STAT 2178 #undef STAT
2181 #undef STAT_IDX 2179 #undef STAT_IDX
2182 } 2180 }
2183 2181
2184 /** 2182 /**
2185 * t4_read_mtu_tbl - returns the values in the HW path MTU table 2183 * t4_read_mtu_tbl - returns the values in the HW path MTU table
2186 * @adap: the adapter 2184 * @adap: the adapter
2187 * @mtus: where to store the MTU values 2185 * @mtus: where to store the MTU values
2188 * @mtu_log: where to store the MTU base-2 log (may be %NULL) 2186 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
2189 * 2187 *
2190 * Reads the HW path MTU table. 2188 * Reads the HW path MTU table.
2191 */ 2189 */
2192 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log) 2190 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
2193 { 2191 {
2194 u32 v; 2192 u32 v;
2195 int i; 2193 int i;
2196 2194
2197 for (i = 0; i < NMTUS; ++i) { 2195 for (i = 0; i < NMTUS; ++i) {
2198 t4_write_reg(adap, TP_MTU_TABLE, 2196 t4_write_reg(adap, TP_MTU_TABLE,
2199 MTUINDEX(0xff) | MTUVALUE(i)); 2197 MTUINDEX(0xff) | MTUVALUE(i));
2200 v = t4_read_reg(adap, TP_MTU_TABLE); 2198 v = t4_read_reg(adap, TP_MTU_TABLE);
2201 mtus[i] = MTUVALUE_GET(v); 2199 mtus[i] = MTUVALUE_GET(v);
2202 if (mtu_log) 2200 if (mtu_log)
2203 mtu_log[i] = MTUWIDTH_GET(v); 2201 mtu_log[i] = MTUWIDTH_GET(v);
2204 } 2202 }
2205 } 2203 }
2206 2204
2207 /** 2205 /**
2208 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register 2206 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
2209 * @adap: the adapter 2207 * @adap: the adapter
2210 * @addr: the indirect TP register address 2208 * @addr: the indirect TP register address
2211 * @mask: specifies the field within the register to modify 2209 * @mask: specifies the field within the register to modify
2212 * @val: new value for the field 2210 * @val: new value for the field
2213 * 2211 *
2214 * Sets a field of an indirect TP register to the given value. 2212 * Sets a field of an indirect TP register to the given value.
2215 */ 2213 */
2216 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr, 2214 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
2217 unsigned int mask, unsigned int val) 2215 unsigned int mask, unsigned int val)
2218 { 2216 {
2219 t4_write_reg(adap, TP_PIO_ADDR, addr); 2217 t4_write_reg(adap, TP_PIO_ADDR, addr);
2220 val |= t4_read_reg(adap, TP_PIO_DATA) & ~mask; 2218 val |= t4_read_reg(adap, TP_PIO_DATA) & ~mask;
2221 t4_write_reg(adap, TP_PIO_DATA, val); 2219 t4_write_reg(adap, TP_PIO_DATA, val);
2222 } 2220 }
2223 2221
2224 /** 2222 /**
2225 * init_cong_ctrl - initialize congestion control parameters 2223 * init_cong_ctrl - initialize congestion control parameters
2226 * @a: the alpha values for congestion control 2224 * @a: the alpha values for congestion control
2227 * @b: the beta values for congestion control 2225 * @b: the beta values for congestion control
2228 * 2226 *
2229 * Initialize the congestion control parameters. 2227 * Initialize the congestion control parameters.
2230 */ 2228 */
2231 static void init_cong_ctrl(unsigned short *a, unsigned short *b) 2229 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
2232 { 2230 {
2233 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1; 2231 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2234 a[9] = 2; 2232 a[9] = 2;
2235 a[10] = 3; 2233 a[10] = 3;
2236 a[11] = 4; 2234 a[11] = 4;
2237 a[12] = 5; 2235 a[12] = 5;
2238 a[13] = 6; 2236 a[13] = 6;
2239 a[14] = 7; 2237 a[14] = 7;
2240 a[15] = 8; 2238 a[15] = 8;
2241 a[16] = 9; 2239 a[16] = 9;
2242 a[17] = 10; 2240 a[17] = 10;
2243 a[18] = 14; 2241 a[18] = 14;
2244 a[19] = 17; 2242 a[19] = 17;
2245 a[20] = 21; 2243 a[20] = 21;
2246 a[21] = 25; 2244 a[21] = 25;
2247 a[22] = 30; 2245 a[22] = 30;
2248 a[23] = 35; 2246 a[23] = 35;
2249 a[24] = 45; 2247 a[24] = 45;
2250 a[25] = 60; 2248 a[25] = 60;
2251 a[26] = 80; 2249 a[26] = 80;
2252 a[27] = 100; 2250 a[27] = 100;
2253 a[28] = 200; 2251 a[28] = 200;
2254 a[29] = 300; 2252 a[29] = 300;
2255 a[30] = 400; 2253 a[30] = 400;
2256 a[31] = 500; 2254 a[31] = 500;
2257 2255
2258 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0; 2256 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2259 b[9] = b[10] = 1; 2257 b[9] = b[10] = 1;
2260 b[11] = b[12] = 2; 2258 b[11] = b[12] = 2;
2261 b[13] = b[14] = b[15] = b[16] = 3; 2259 b[13] = b[14] = b[15] = b[16] = 3;
2262 b[17] = b[18] = b[19] = b[20] = b[21] = 4; 2260 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2263 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5; 2261 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2264 b[28] = b[29] = 6; 2262 b[28] = b[29] = 6;
2265 b[30] = b[31] = 7; 2263 b[30] = b[31] = 7;
2266 } 2264 }
2267 2265
2268 /* The minimum additive increment value for the congestion control table */ 2266 /* The minimum additive increment value for the congestion control table */
2269 #define CC_MIN_INCR 2U 2267 #define CC_MIN_INCR 2U
2270 2268
2271 /** 2269 /**
2272 * t4_load_mtus - write the MTU and congestion control HW tables 2270 * t4_load_mtus - write the MTU and congestion control HW tables
2273 * @adap: the adapter 2271 * @adap: the adapter
2274 * @mtus: the values for the MTU table 2272 * @mtus: the values for the MTU table
2275 * @alpha: the values for the congestion control alpha parameter 2273 * @alpha: the values for the congestion control alpha parameter
2276 * @beta: the values for the congestion control beta parameter 2274 * @beta: the values for the congestion control beta parameter
2277 * 2275 *
2278 * Write the HW MTU table with the supplied MTUs and the high-speed 2276 * Write the HW MTU table with the supplied MTUs and the high-speed
2279 * congestion control table with the supplied alpha, beta, and MTUs. 2277 * congestion control table with the supplied alpha, beta, and MTUs.
2280 * We write the two tables together because the additive increments 2278 * We write the two tables together because the additive increments
2281 * depend on the MTUs. 2279 * depend on the MTUs.
2282 */ 2280 */
2283 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, 2281 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
2284 const unsigned short *alpha, const unsigned short *beta) 2282 const unsigned short *alpha, const unsigned short *beta)
2285 { 2283 {
2286 static const unsigned int avg_pkts[NCCTRL_WIN] = { 2284 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2287 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640, 2285 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2288 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480, 2286 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2289 28672, 40960, 57344, 81920, 114688, 163840, 229376 2287 28672, 40960, 57344, 81920, 114688, 163840, 229376
2290 }; 2288 };
2291 2289
2292 unsigned int i, w; 2290 unsigned int i, w;
2293 2291
2294 for (i = 0; i < NMTUS; ++i) { 2292 for (i = 0; i < NMTUS; ++i) {
2295 unsigned int mtu = mtus[i]; 2293 unsigned int mtu = mtus[i];
2296 unsigned int log2 = fls(mtu); 2294 unsigned int log2 = fls(mtu);
2297 2295
2298 if (!(mtu & ((1 << log2) >> 2))) /* round */ 2296 if (!(mtu & ((1 << log2) >> 2))) /* round */
2299 log2--; 2297 log2--;
2300 t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) | 2298 t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) |
2301 MTUWIDTH(log2) | MTUVALUE(mtu)); 2299 MTUWIDTH(log2) | MTUVALUE(mtu));
2302 2300
2303 for (w = 0; w < NCCTRL_WIN; ++w) { 2301 for (w = 0; w < NCCTRL_WIN; ++w) {
2304 unsigned int inc; 2302 unsigned int inc;
2305 2303
2306 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w], 2304 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2307 CC_MIN_INCR); 2305 CC_MIN_INCR);
2308 2306
2309 t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) | 2307 t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) |
2310 (w << 16) | (beta[w] << 13) | inc); 2308 (w << 16) | (beta[w] << 13) | inc);
2311 } 2309 }
2312 } 2310 }
2313 } 2311 }
2314 2312
2315 /** 2313 /**
2316 * get_mps_bg_map - return the buffer groups associated with a port 2314 * get_mps_bg_map - return the buffer groups associated with a port
2317 * @adap: the adapter 2315 * @adap: the adapter
2318 * @idx: the port index 2316 * @idx: the port index
2319 * 2317 *
2320 * Returns a bitmap indicating which MPS buffer groups are associated 2318 * Returns a bitmap indicating which MPS buffer groups are associated
2321 * with the given port. Bit i is set if buffer group i is used by the 2319 * with the given port. Bit i is set if buffer group i is used by the
2322 * port. 2320 * port.
2323 */ 2321 */
2324 static unsigned int get_mps_bg_map(struct adapter *adap, int idx) 2322 static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
2325 { 2323 {
2326 u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL)); 2324 u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL));
2327 2325
2328 if (n == 0) 2326 if (n == 0)
2329 return idx == 0 ? 0xf : 0; 2327 return idx == 0 ? 0xf : 0;
2330 if (n == 1) 2328 if (n == 1)
2331 return idx < 2 ? (3 << (2 * idx)) : 0; 2329 return idx < 2 ? (3 << (2 * idx)) : 0;
2332 return 1 << idx; 2330 return 1 << idx;
2333 } 2331 }
2334 2332
2335 /** 2333 /**
2336 * t4_get_port_type_description - return Port Type string description 2334 * t4_get_port_type_description - return Port Type string description
2337 * @port_type: firmware Port Type enumeration 2335 * @port_type: firmware Port Type enumeration
2338 */ 2336 */
2339 const char *t4_get_port_type_description(enum fw_port_type port_type) 2337 const char *t4_get_port_type_description(enum fw_port_type port_type)
2340 { 2338 {
2341 static const char *const port_type_description[] = { 2339 static const char *const port_type_description[] = {
2342 "R XFI", 2340 "R XFI",
2343 "R XAUI", 2341 "R XAUI",
2344 "T SGMII", 2342 "T SGMII",
2345 "T XFI", 2343 "T XFI",
2346 "T XAUI", 2344 "T XAUI",
2347 "KX4", 2345 "KX4",
2348 "CX4", 2346 "CX4",
2349 "KX", 2347 "KX",
2350 "KR", 2348 "KR",
2351 "R SFP+", 2349 "R SFP+",
2352 "KR/KX", 2350 "KR/KX",
2353 "KR/KX/KX4", 2351 "KR/KX/KX4",
2354 "R QSFP_10G", 2352 "R QSFP_10G",
2355 "", 2353 "",
2356 "R QSFP", 2354 "R QSFP",
2357 "R BP40_BA", 2355 "R BP40_BA",
2358 }; 2356 };
2359 2357
2360 if (port_type < ARRAY_SIZE(port_type_description)) 2358 if (port_type < ARRAY_SIZE(port_type_description))
2361 return port_type_description[port_type]; 2359 return port_type_description[port_type];
2362 return "UNKNOWN"; 2360 return "UNKNOWN";
2363 } 2361 }
2364 2362
2365 /** 2363 /**
2366 * t4_get_port_stats - collect port statistics 2364 * t4_get_port_stats - collect port statistics
2367 * @adap: the adapter 2365 * @adap: the adapter
2368 * @idx: the port index 2366 * @idx: the port index
2369 * @p: the stats structure to fill 2367 * @p: the stats structure to fill
2370 * 2368 *
2371 * Collect statistics related to the given port from HW. 2369 * Collect statistics related to the given port from HW.
2372 */ 2370 */
2373 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p) 2371 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
2374 { 2372 {
2375 u32 bgmap = get_mps_bg_map(adap, idx); 2373 u32 bgmap = get_mps_bg_map(adap, idx);
2376 2374
2377 #define GET_STAT(name) \ 2375 #define GET_STAT(name) \
2378 t4_read_reg64(adap, \ 2376 t4_read_reg64(adap, \
2379 (is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \ 2377 (is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
2380 T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L))) 2378 T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
2381 #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L) 2379 #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
2382 2380
2383 p->tx_octets = GET_STAT(TX_PORT_BYTES); 2381 p->tx_octets = GET_STAT(TX_PORT_BYTES);
2384 p->tx_frames = GET_STAT(TX_PORT_FRAMES); 2382 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
2385 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST); 2383 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
2386 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST); 2384 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
2387 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST); 2385 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
2388 p->tx_error_frames = GET_STAT(TX_PORT_ERROR); 2386 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
2389 p->tx_frames_64 = GET_STAT(TX_PORT_64B); 2387 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
2390 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B); 2388 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
2391 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B); 2389 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
2392 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B); 2390 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
2393 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B); 2391 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
2394 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B); 2392 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
2395 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX); 2393 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
2396 p->tx_drop = GET_STAT(TX_PORT_DROP); 2394 p->tx_drop = GET_STAT(TX_PORT_DROP);
2397 p->tx_pause = GET_STAT(TX_PORT_PAUSE); 2395 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
2398 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0); 2396 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
2399 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1); 2397 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
2400 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2); 2398 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
2401 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3); 2399 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
2402 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4); 2400 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
2403 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5); 2401 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
2404 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6); 2402 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
2405 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7); 2403 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
2406 2404
2407 p->rx_octets = GET_STAT(RX_PORT_BYTES); 2405 p->rx_octets = GET_STAT(RX_PORT_BYTES);
2408 p->rx_frames = GET_STAT(RX_PORT_FRAMES); 2406 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
2409 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST); 2407 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
2410 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST); 2408 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
2411 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST); 2409 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
2412 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR); 2410 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
2413 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR); 2411 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
2414 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR); 2412 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
2415 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR); 2413 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
2416 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR); 2414 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
2417 p->rx_runt = GET_STAT(RX_PORT_LESS_64B); 2415 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
2418 p->rx_frames_64 = GET_STAT(RX_PORT_64B); 2416 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
2419 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B); 2417 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
2420 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B); 2418 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
2421 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B); 2419 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
2422 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B); 2420 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
2423 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B); 2421 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
2424 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX); 2422 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
2425 p->rx_pause = GET_STAT(RX_PORT_PAUSE); 2423 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
2426 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0); 2424 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
2427 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1); 2425 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
2428 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2); 2426 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
2429 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3); 2427 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
2430 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4); 2428 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
2431 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5); 2429 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
2432 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6); 2430 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
2433 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7); 2431 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
2434 2432
2435 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0; 2433 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
2436 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0; 2434 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
2437 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0; 2435 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
2438 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0; 2436 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
2439 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0; 2437 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
2440 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0; 2438 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
2441 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0; 2439 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
2442 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0; 2440 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
2443 2441
2444 #undef GET_STAT 2442 #undef GET_STAT
2445 #undef GET_STAT_COM 2443 #undef GET_STAT_COM
2446 } 2444 }
2447 2445
2448 /** 2446 /**
2449 * t4_wol_magic_enable - enable/disable magic packet WoL 2447 * t4_wol_magic_enable - enable/disable magic packet WoL
2450 * @adap: the adapter 2448 * @adap: the adapter
2451 * @port: the physical port index 2449 * @port: the physical port index
2452 * @addr: MAC address expected in magic packets, %NULL to disable 2450 * @addr: MAC address expected in magic packets, %NULL to disable
2453 * 2451 *
2454 * Enables/disables magic packet wake-on-LAN for the selected port. 2452 * Enables/disables magic packet wake-on-LAN for the selected port.
2455 */ 2453 */
2456 void t4_wol_magic_enable(struct adapter *adap, unsigned int port, 2454 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
2457 const u8 *addr) 2455 const u8 *addr)
2458 { 2456 {
2459 u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg; 2457 u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
2460 2458
2461 if (is_t4(adap->params.chip)) { 2459 if (is_t4(adap->params.chip)) {
2462 mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO); 2460 mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO);
2463 mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI); 2461 mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI);
2464 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2); 2462 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
2465 } else { 2463 } else {
2466 mag_id_reg_l = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_LO); 2464 mag_id_reg_l = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_LO);
2467 mag_id_reg_h = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_HI); 2465 mag_id_reg_h = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_HI);
2468 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2); 2466 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
2469 } 2467 }
2470 2468
2471 if (addr) { 2469 if (addr) {
2472 t4_write_reg(adap, mag_id_reg_l, 2470 t4_write_reg(adap, mag_id_reg_l,
2473 (addr[2] << 24) | (addr[3] << 16) | 2471 (addr[2] << 24) | (addr[3] << 16) |
2474 (addr[4] << 8) | addr[5]); 2472 (addr[4] << 8) | addr[5]);
2475 t4_write_reg(adap, mag_id_reg_h, 2473 t4_write_reg(adap, mag_id_reg_h,
2476 (addr[0] << 8) | addr[1]); 2474 (addr[0] << 8) | addr[1]);
2477 } 2475 }
2478 t4_set_reg_field(adap, port_cfg_reg, MAGICEN, 2476 t4_set_reg_field(adap, port_cfg_reg, MAGICEN,
2479 addr ? MAGICEN : 0); 2477 addr ? MAGICEN : 0);
2480 } 2478 }
2481 2479
2482 /** 2480 /**
2483 * t4_wol_pat_enable - enable/disable pattern-based WoL 2481 * t4_wol_pat_enable - enable/disable pattern-based WoL
2484 * @adap: the adapter 2482 * @adap: the adapter
2485 * @port: the physical port index 2483 * @port: the physical port index
2486 * @map: bitmap of which HW pattern filters to set 2484 * @map: bitmap of which HW pattern filters to set
2487 * @mask0: byte mask for bytes 0-63 of a packet 2485 * @mask0: byte mask for bytes 0-63 of a packet
2488 * @mask1: byte mask for bytes 64-127 of a packet 2486 * @mask1: byte mask for bytes 64-127 of a packet
2489 * @crc: Ethernet CRC for selected bytes 2487 * @crc: Ethernet CRC for selected bytes
2490 * @enable: enable/disable switch 2488 * @enable: enable/disable switch
2491 * 2489 *
2492 * Sets the pattern filters indicated in @map to mask out the bytes 2490 * Sets the pattern filters indicated in @map to mask out the bytes
2493 * specified in @mask0/@mask1 in received packets and compare the CRC of 2491 * specified in @mask0/@mask1 in received packets and compare the CRC of
2494 * the resulting packet against @crc. If @enable is %true pattern-based 2492 * the resulting packet against @crc. If @enable is %true pattern-based
2495 * WoL is enabled, otherwise disabled. 2493 * WoL is enabled, otherwise disabled.
2496 */ 2494 */
2497 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map, 2495 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
2498 u64 mask0, u64 mask1, unsigned int crc, bool enable) 2496 u64 mask0, u64 mask1, unsigned int crc, bool enable)
2499 { 2497 {
2500 int i; 2498 int i;
2501 u32 port_cfg_reg; 2499 u32 port_cfg_reg;
2502 2500
2503 if (is_t4(adap->params.chip)) 2501 if (is_t4(adap->params.chip))
2504 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2); 2502 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
2505 else 2503 else
2506 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2); 2504 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
2507 2505
2508 if (!enable) { 2506 if (!enable) {
2509 t4_set_reg_field(adap, port_cfg_reg, PATEN, 0); 2507 t4_set_reg_field(adap, port_cfg_reg, PATEN, 0);
2510 return 0; 2508 return 0;
2511 } 2509 }
2512 if (map > 0xff) 2510 if (map > 0xff)
2513 return -EINVAL; 2511 return -EINVAL;
2514 2512
2515 #define EPIO_REG(name) \ 2513 #define EPIO_REG(name) \
2516 (is_t4(adap->params.chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \ 2514 (is_t4(adap->params.chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \
2517 T5_PORT_REG(port, MAC_PORT_EPIO_##name)) 2515 T5_PORT_REG(port, MAC_PORT_EPIO_##name))
2518 2516
2519 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32); 2517 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
2520 t4_write_reg(adap, EPIO_REG(DATA2), mask1); 2518 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
2521 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32); 2519 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
2522 2520
2523 for (i = 0; i < NWOL_PAT; i++, map >>= 1) { 2521 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
2524 if (!(map & 1)) 2522 if (!(map & 1))
2525 continue; 2523 continue;
2526 2524
2527 /* write byte masks */ 2525 /* write byte masks */
2528 t4_write_reg(adap, EPIO_REG(DATA0), mask0); 2526 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
2529 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR); 2527 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR);
2530 t4_read_reg(adap, EPIO_REG(OP)); /* flush */ 2528 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
2531 if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY) 2529 if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY)
2532 return -ETIMEDOUT; 2530 return -ETIMEDOUT;
2533 2531
2534 /* write CRC */ 2532 /* write CRC */
2535 t4_write_reg(adap, EPIO_REG(DATA0), crc); 2533 t4_write_reg(adap, EPIO_REG(DATA0), crc);
2536 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR); 2534 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR);
2537 t4_read_reg(adap, EPIO_REG(OP)); /* flush */ 2535 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
2538 if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY) 2536 if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY)
2539 return -ETIMEDOUT; 2537 return -ETIMEDOUT;
2540 } 2538 }
2541 #undef EPIO_REG 2539 #undef EPIO_REG
2542 2540
2543 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN); 2541 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN);
2544 return 0; 2542 return 0;
2545 } 2543 }
2546 2544
2547 /* t4_mk_filtdelwr - create a delete filter WR 2545 /* t4_mk_filtdelwr - create a delete filter WR
2548 * @ftid: the filter ID 2546 * @ftid: the filter ID
2549 * @wr: the filter work request to populate 2547 * @wr: the filter work request to populate
2550 * @qid: ingress queue to receive the delete notification 2548 * @qid: ingress queue to receive the delete notification
2551 * 2549 *
2552 * Creates a filter work request to delete the supplied filter. If @qid is 2550 * Creates a filter work request to delete the supplied filter. If @qid is
2553 * negative the delete notification is suppressed. 2551 * negative the delete notification is suppressed.
2554 */ 2552 */
2555 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid) 2553 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
2556 { 2554 {
2557 memset(wr, 0, sizeof(*wr)); 2555 memset(wr, 0, sizeof(*wr));
2558 wr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR)); 2556 wr->op_pkd = htonl(FW_WR_OP(FW_FILTER_WR));
2559 wr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*wr) / 16)); 2557 wr->len16_pkd = htonl(FW_WR_LEN16(sizeof(*wr) / 16));
2560 wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) | 2558 wr->tid_to_iq = htonl(V_FW_FILTER_WR_TID(ftid) |
2561 V_FW_FILTER_WR_NOREPLY(qid < 0)); 2559 V_FW_FILTER_WR_NOREPLY(qid < 0));
2562 wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER); 2560 wr->del_filter_to_l2tix = htonl(F_FW_FILTER_WR_DEL_FILTER);
2563 if (qid >= 0) 2561 if (qid >= 0)
2564 wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid)); 2562 wr->rx_chan_rx_rpl_iq = htons(V_FW_FILTER_WR_RX_RPL_IQ(qid));
2565 } 2563 }
2566 2564
2567 #define INIT_CMD(var, cmd, rd_wr) do { \ 2565 #define INIT_CMD(var, cmd, rd_wr) do { \
2568 (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \ 2566 (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \
2569 FW_CMD_REQUEST | FW_CMD_##rd_wr); \ 2567 FW_CMD_REQUEST | FW_CMD_##rd_wr); \
2570 (var).retval_len16 = htonl(FW_LEN16(var)); \ 2568 (var).retval_len16 = htonl(FW_LEN16(var)); \
2571 } while (0) 2569 } while (0)
2572 2570
2573 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox, 2571 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
2574 u32 addr, u32 val) 2572 u32 addr, u32 val)
2575 { 2573 {
2576 struct fw_ldst_cmd c; 2574 struct fw_ldst_cmd c;
2577 2575
2578 memset(&c, 0, sizeof(c)); 2576 memset(&c, 0, sizeof(c));
2579 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST | 2577 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2580 FW_CMD_WRITE | 2578 FW_CMD_WRITE |
2581 FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE)); 2579 FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FIRMWARE));
2582 c.cycles_to_len16 = htonl(FW_LEN16(c)); 2580 c.cycles_to_len16 = htonl(FW_LEN16(c));
2583 c.u.addrval.addr = htonl(addr); 2581 c.u.addrval.addr = htonl(addr);
2584 c.u.addrval.val = htonl(val); 2582 c.u.addrval.val = htonl(val);
2585 2583
2586 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2584 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2587 } 2585 }
2588 2586
2589 /** 2587 /**
2590 * t4_mdio_rd - read a PHY register through MDIO 2588 * t4_mdio_rd - read a PHY register through MDIO
2591 * @adap: the adapter 2589 * @adap: the adapter
2592 * @mbox: mailbox to use for the FW command 2590 * @mbox: mailbox to use for the FW command
2593 * @phy_addr: the PHY address 2591 * @phy_addr: the PHY address
2594 * @mmd: the PHY MMD to access (0 for clause 22 PHYs) 2592 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2595 * @reg: the register to read 2593 * @reg: the register to read
2596 * @valp: where to store the value 2594 * @valp: where to store the value
2597 * 2595 *
2598 * Issues a FW command through the given mailbox to read a PHY register. 2596 * Issues a FW command through the given mailbox to read a PHY register.
2599 */ 2597 */
2600 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 2598 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2601 unsigned int mmd, unsigned int reg, u16 *valp) 2599 unsigned int mmd, unsigned int reg, u16 *valp)
2602 { 2600 {
2603 int ret; 2601 int ret;
2604 struct fw_ldst_cmd c; 2602 struct fw_ldst_cmd c;
2605 2603
2606 memset(&c, 0, sizeof(c)); 2604 memset(&c, 0, sizeof(c));
2607 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST | 2605 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2608 FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO)); 2606 FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2609 c.cycles_to_len16 = htonl(FW_LEN16(c)); 2607 c.cycles_to_len16 = htonl(FW_LEN16(c));
2610 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) | 2608 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2611 FW_LDST_CMD_MMD(mmd)); 2609 FW_LDST_CMD_MMD(mmd));
2612 c.u.mdio.raddr = htons(reg); 2610 c.u.mdio.raddr = htons(reg);
2613 2611
2614 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 2612 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2615 if (ret == 0) 2613 if (ret == 0)
2616 *valp = ntohs(c.u.mdio.rval); 2614 *valp = ntohs(c.u.mdio.rval);
2617 return ret; 2615 return ret;
2618 } 2616 }
2619 2617
2620 /** 2618 /**
2621 * t4_mdio_wr - write a PHY register through MDIO 2619 * t4_mdio_wr - write a PHY register through MDIO
2622 * @adap: the adapter 2620 * @adap: the adapter
2623 * @mbox: mailbox to use for the FW command 2621 * @mbox: mailbox to use for the FW command
2624 * @phy_addr: the PHY address 2622 * @phy_addr: the PHY address
2625 * @mmd: the PHY MMD to access (0 for clause 22 PHYs) 2623 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2626 * @reg: the register to write 2624 * @reg: the register to write
2627 * @valp: value to write 2625 * @valp: value to write
2628 * 2626 *
2629 * Issues a FW command through the given mailbox to write a PHY register. 2627 * Issues a FW command through the given mailbox to write a PHY register.
2630 */ 2628 */
2631 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr, 2629 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2632 unsigned int mmd, unsigned int reg, u16 val) 2630 unsigned int mmd, unsigned int reg, u16 val)
2633 { 2631 {
2634 struct fw_ldst_cmd c; 2632 struct fw_ldst_cmd c;
2635 2633
2636 memset(&c, 0, sizeof(c)); 2634 memset(&c, 0, sizeof(c));
2637 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST | 2635 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2638 FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO)); 2636 FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2639 c.cycles_to_len16 = htonl(FW_LEN16(c)); 2637 c.cycles_to_len16 = htonl(FW_LEN16(c));
2640 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) | 2638 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2641 FW_LDST_CMD_MMD(mmd)); 2639 FW_LDST_CMD_MMD(mmd));
2642 c.u.mdio.raddr = htons(reg); 2640 c.u.mdio.raddr = htons(reg);
2643 c.u.mdio.rval = htons(val); 2641 c.u.mdio.rval = htons(val);
2644 2642
2645 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2643 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2646 } 2644 }
2647 2645
2648 /** 2646 /**
2649 * t4_sge_decode_idma_state - decode the idma state 2647 * t4_sge_decode_idma_state - decode the idma state
2650 * @adap: the adapter 2648 * @adap: the adapter
2651 * @state: the state idma is stuck in 2649 * @state: the state idma is stuck in
2652 */ 2650 */
2653 void t4_sge_decode_idma_state(struct adapter *adapter, int state) 2651 void t4_sge_decode_idma_state(struct adapter *adapter, int state)
2654 { 2652 {
2655 static const char * const t4_decode[] = { 2653 static const char * const t4_decode[] = {
2656 "IDMA_IDLE", 2654 "IDMA_IDLE",
2657 "IDMA_PUSH_MORE_CPL_FIFO", 2655 "IDMA_PUSH_MORE_CPL_FIFO",
2658 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO", 2656 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
2659 "Not used", 2657 "Not used",
2660 "IDMA_PHYSADDR_SEND_PCIEHDR", 2658 "IDMA_PHYSADDR_SEND_PCIEHDR",
2661 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST", 2659 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
2662 "IDMA_PHYSADDR_SEND_PAYLOAD", 2660 "IDMA_PHYSADDR_SEND_PAYLOAD",
2663 "IDMA_SEND_FIFO_TO_IMSG", 2661 "IDMA_SEND_FIFO_TO_IMSG",
2664 "IDMA_FL_REQ_DATA_FL_PREP", 2662 "IDMA_FL_REQ_DATA_FL_PREP",
2665 "IDMA_FL_REQ_DATA_FL", 2663 "IDMA_FL_REQ_DATA_FL",
2666 "IDMA_FL_DROP", 2664 "IDMA_FL_DROP",
2667 "IDMA_FL_H_REQ_HEADER_FL", 2665 "IDMA_FL_H_REQ_HEADER_FL",
2668 "IDMA_FL_H_SEND_PCIEHDR", 2666 "IDMA_FL_H_SEND_PCIEHDR",
2669 "IDMA_FL_H_PUSH_CPL_FIFO", 2667 "IDMA_FL_H_PUSH_CPL_FIFO",
2670 "IDMA_FL_H_SEND_CPL", 2668 "IDMA_FL_H_SEND_CPL",
2671 "IDMA_FL_H_SEND_IP_HDR_FIRST", 2669 "IDMA_FL_H_SEND_IP_HDR_FIRST",
2672 "IDMA_FL_H_SEND_IP_HDR", 2670 "IDMA_FL_H_SEND_IP_HDR",
2673 "IDMA_FL_H_REQ_NEXT_HEADER_FL", 2671 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
2674 "IDMA_FL_H_SEND_NEXT_PCIEHDR", 2672 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
2675 "IDMA_FL_H_SEND_IP_HDR_PADDING", 2673 "IDMA_FL_H_SEND_IP_HDR_PADDING",
2676 "IDMA_FL_D_SEND_PCIEHDR", 2674 "IDMA_FL_D_SEND_PCIEHDR",
2677 "IDMA_FL_D_SEND_CPL_AND_IP_HDR", 2675 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
2678 "IDMA_FL_D_REQ_NEXT_DATA_FL", 2676 "IDMA_FL_D_REQ_NEXT_DATA_FL",
2679 "IDMA_FL_SEND_PCIEHDR", 2677 "IDMA_FL_SEND_PCIEHDR",
2680 "IDMA_FL_PUSH_CPL_FIFO", 2678 "IDMA_FL_PUSH_CPL_FIFO",
2681 "IDMA_FL_SEND_CPL", 2679 "IDMA_FL_SEND_CPL",
2682 "IDMA_FL_SEND_PAYLOAD_FIRST", 2680 "IDMA_FL_SEND_PAYLOAD_FIRST",
2683 "IDMA_FL_SEND_PAYLOAD", 2681 "IDMA_FL_SEND_PAYLOAD",
2684 "IDMA_FL_REQ_NEXT_DATA_FL", 2682 "IDMA_FL_REQ_NEXT_DATA_FL",
2685 "IDMA_FL_SEND_NEXT_PCIEHDR", 2683 "IDMA_FL_SEND_NEXT_PCIEHDR",
2686 "IDMA_FL_SEND_PADDING", 2684 "IDMA_FL_SEND_PADDING",
2687 "IDMA_FL_SEND_COMPLETION_TO_IMSG", 2685 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
2688 "IDMA_FL_SEND_FIFO_TO_IMSG", 2686 "IDMA_FL_SEND_FIFO_TO_IMSG",
2689 "IDMA_FL_REQ_DATAFL_DONE", 2687 "IDMA_FL_REQ_DATAFL_DONE",
2690 "IDMA_FL_REQ_HEADERFL_DONE", 2688 "IDMA_FL_REQ_HEADERFL_DONE",
2691 }; 2689 };
2692 static const char * const t5_decode[] = { 2690 static const char * const t5_decode[] = {
2693 "IDMA_IDLE", 2691 "IDMA_IDLE",
2694 "IDMA_ALMOST_IDLE", 2692 "IDMA_ALMOST_IDLE",
2695 "IDMA_PUSH_MORE_CPL_FIFO", 2693 "IDMA_PUSH_MORE_CPL_FIFO",
2696 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO", 2694 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
2697 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR", 2695 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
2698 "IDMA_PHYSADDR_SEND_PCIEHDR", 2696 "IDMA_PHYSADDR_SEND_PCIEHDR",
2699 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST", 2697 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
2700 "IDMA_PHYSADDR_SEND_PAYLOAD", 2698 "IDMA_PHYSADDR_SEND_PAYLOAD",
2701 "IDMA_SEND_FIFO_TO_IMSG", 2699 "IDMA_SEND_FIFO_TO_IMSG",
2702 "IDMA_FL_REQ_DATA_FL", 2700 "IDMA_FL_REQ_DATA_FL",
2703 "IDMA_FL_DROP", 2701 "IDMA_FL_DROP",
2704 "IDMA_FL_DROP_SEND_INC", 2702 "IDMA_FL_DROP_SEND_INC",
2705 "IDMA_FL_H_REQ_HEADER_FL", 2703 "IDMA_FL_H_REQ_HEADER_FL",
2706 "IDMA_FL_H_SEND_PCIEHDR", 2704 "IDMA_FL_H_SEND_PCIEHDR",
2707 "IDMA_FL_H_PUSH_CPL_FIFO", 2705 "IDMA_FL_H_PUSH_CPL_FIFO",
2708 "IDMA_FL_H_SEND_CPL", 2706 "IDMA_FL_H_SEND_CPL",
2709 "IDMA_FL_H_SEND_IP_HDR_FIRST", 2707 "IDMA_FL_H_SEND_IP_HDR_FIRST",
2710 "IDMA_FL_H_SEND_IP_HDR", 2708 "IDMA_FL_H_SEND_IP_HDR",
2711 "IDMA_FL_H_REQ_NEXT_HEADER_FL", 2709 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
2712 "IDMA_FL_H_SEND_NEXT_PCIEHDR", 2710 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
2713 "IDMA_FL_H_SEND_IP_HDR_PADDING", 2711 "IDMA_FL_H_SEND_IP_HDR_PADDING",
2714 "IDMA_FL_D_SEND_PCIEHDR", 2712 "IDMA_FL_D_SEND_PCIEHDR",
2715 "IDMA_FL_D_SEND_CPL_AND_IP_HDR", 2713 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
2716 "IDMA_FL_D_REQ_NEXT_DATA_FL", 2714 "IDMA_FL_D_REQ_NEXT_DATA_FL",
2717 "IDMA_FL_SEND_PCIEHDR", 2715 "IDMA_FL_SEND_PCIEHDR",
2718 "IDMA_FL_PUSH_CPL_FIFO", 2716 "IDMA_FL_PUSH_CPL_FIFO",
2719 "IDMA_FL_SEND_CPL", 2717 "IDMA_FL_SEND_CPL",
2720 "IDMA_FL_SEND_PAYLOAD_FIRST", 2718 "IDMA_FL_SEND_PAYLOAD_FIRST",
2721 "IDMA_FL_SEND_PAYLOAD", 2719 "IDMA_FL_SEND_PAYLOAD",
2722 "IDMA_FL_REQ_NEXT_DATA_FL", 2720 "IDMA_FL_REQ_NEXT_DATA_FL",
2723 "IDMA_FL_SEND_NEXT_PCIEHDR", 2721 "IDMA_FL_SEND_NEXT_PCIEHDR",
2724 "IDMA_FL_SEND_PADDING", 2722 "IDMA_FL_SEND_PADDING",
2725 "IDMA_FL_SEND_COMPLETION_TO_IMSG", 2723 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
2726 }; 2724 };
2727 static const u32 sge_regs[] = { 2725 static const u32 sge_regs[] = {
2728 SGE_DEBUG_DATA_LOW_INDEX_2, 2726 SGE_DEBUG_DATA_LOW_INDEX_2,
2729 SGE_DEBUG_DATA_LOW_INDEX_3, 2727 SGE_DEBUG_DATA_LOW_INDEX_3,
2730 SGE_DEBUG_DATA_HIGH_INDEX_10, 2728 SGE_DEBUG_DATA_HIGH_INDEX_10,
2731 }; 2729 };
2732 const char **sge_idma_decode; 2730 const char **sge_idma_decode;
2733 int sge_idma_decode_nstates; 2731 int sge_idma_decode_nstates;
2734 int i; 2732 int i;
2735 2733
2736 if (is_t4(adapter->params.chip)) { 2734 if (is_t4(adapter->params.chip)) {
2737 sge_idma_decode = (const char **)t4_decode; 2735 sge_idma_decode = (const char **)t4_decode;
2738 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode); 2736 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
2739 } else { 2737 } else {
2740 sge_idma_decode = (const char **)t5_decode; 2738 sge_idma_decode = (const char **)t5_decode;
2741 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode); 2739 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
2742 } 2740 }
2743 2741
2744 if (state < sge_idma_decode_nstates) 2742 if (state < sge_idma_decode_nstates)
2745 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]); 2743 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
2746 else 2744 else
2747 CH_WARN(adapter, "idma state %d unknown\n", state); 2745 CH_WARN(adapter, "idma state %d unknown\n", state);
2748 2746
2749 for (i = 0; i < ARRAY_SIZE(sge_regs); i++) 2747 for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
2750 CH_WARN(adapter, "SGE register %#x value %#x\n", 2748 CH_WARN(adapter, "SGE register %#x value %#x\n",
2751 sge_regs[i], t4_read_reg(adapter, sge_regs[i])); 2749 sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
2752 } 2750 }
2753 2751
2754 /** 2752 /**
2755 * t4_fw_hello - establish communication with FW 2753 * t4_fw_hello - establish communication with FW
2756 * @adap: the adapter 2754 * @adap: the adapter
2757 * @mbox: mailbox to use for the FW command 2755 * @mbox: mailbox to use for the FW command
2758 * @evt_mbox: mailbox to receive async FW events 2756 * @evt_mbox: mailbox to receive async FW events
2759 * @master: specifies the caller's willingness to be the device master 2757 * @master: specifies the caller's willingness to be the device master
2760 * @state: returns the current device state (if non-NULL) 2758 * @state: returns the current device state (if non-NULL)
2761 * 2759 *
2762 * Issues a command to establish communication with FW. Returns either 2760 * Issues a command to establish communication with FW. Returns either
2763 * an error (negative integer) or the mailbox of the Master PF. 2761 * an error (negative integer) or the mailbox of the Master PF.
2764 */ 2762 */
2765 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox, 2763 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
2766 enum dev_master master, enum dev_state *state) 2764 enum dev_master master, enum dev_state *state)
2767 { 2765 {
2768 int ret; 2766 int ret;
2769 struct fw_hello_cmd c; 2767 struct fw_hello_cmd c;
2770 u32 v; 2768 u32 v;
2771 unsigned int master_mbox; 2769 unsigned int master_mbox;
2772 int retries = FW_CMD_HELLO_RETRIES; 2770 int retries = FW_CMD_HELLO_RETRIES;
2773 2771
2774 retry: 2772 retry:
2775 memset(&c, 0, sizeof(c)); 2773 memset(&c, 0, sizeof(c));
2776 INIT_CMD(c, HELLO, WRITE); 2774 INIT_CMD(c, HELLO, WRITE);
2777 c.err_to_clearinit = htonl( 2775 c.err_to_clearinit = htonl(
2778 FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) | 2776 FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
2779 FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) | 2777 FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
2780 FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : 2778 FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox :
2781 FW_HELLO_CMD_MBMASTER_MASK) | 2779 FW_HELLO_CMD_MBMASTER_MASK) |
2782 FW_HELLO_CMD_MBASYNCNOT(evt_mbox) | 2780 FW_HELLO_CMD_MBASYNCNOT(evt_mbox) |
2783 FW_HELLO_CMD_STAGE(fw_hello_cmd_stage_os) | 2781 FW_HELLO_CMD_STAGE(fw_hello_cmd_stage_os) |
2784 FW_HELLO_CMD_CLEARINIT); 2782 FW_HELLO_CMD_CLEARINIT);
2785 2783
2786 /* 2784 /*
2787 * Issue the HELLO command to the firmware. If it's not successful 2785 * Issue the HELLO command to the firmware. If it's not successful
2788 * but indicates that we got a "busy" or "timeout" condition, retry 2786 * but indicates that we got a "busy" or "timeout" condition, retry
2789 * the HELLO until we exhaust our retry limit. If we do exceed our 2787 * the HELLO until we exhaust our retry limit. If we do exceed our
2790 * retry limit, check to see if the firmware left us any error 2788 * retry limit, check to see if the firmware left us any error
2791 * information and report that if so. 2789 * information and report that if so.
2792 */ 2790 */
2793 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 2791 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2794 if (ret < 0) { 2792 if (ret < 0) {
2795 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0) 2793 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
2796 goto retry; 2794 goto retry;
2797 if (t4_read_reg(adap, MA_PCIE_FW) & FW_PCIE_FW_ERR) 2795 if (t4_read_reg(adap, MA_PCIE_FW) & FW_PCIE_FW_ERR)
2798 t4_report_fw_error(adap); 2796 t4_report_fw_error(adap);
2799 return ret; 2797 return ret;
2800 } 2798 }
2801 2799
2802 v = ntohl(c.err_to_clearinit); 2800 v = ntohl(c.err_to_clearinit);
2803 master_mbox = FW_HELLO_CMD_MBMASTER_GET(v); 2801 master_mbox = FW_HELLO_CMD_MBMASTER_GET(v);
2804 if (state) { 2802 if (state) {
2805 if (v & FW_HELLO_CMD_ERR) 2803 if (v & FW_HELLO_CMD_ERR)
2806 *state = DEV_STATE_ERR; 2804 *state = DEV_STATE_ERR;
2807 else if (v & FW_HELLO_CMD_INIT) 2805 else if (v & FW_HELLO_CMD_INIT)
2808 *state = DEV_STATE_INIT; 2806 *state = DEV_STATE_INIT;
2809 else 2807 else
2810 *state = DEV_STATE_UNINIT; 2808 *state = DEV_STATE_UNINIT;
2811 } 2809 }
2812 2810
2813 /* 2811 /*
2814 * If we're not the Master PF then we need to wait around for the 2812 * If we're not the Master PF then we need to wait around for the
2815 * Master PF Driver to finish setting up the adapter. 2813 * Master PF Driver to finish setting up the adapter.
2816 * 2814 *
2817 * Note that we also do this wait if we're a non-Master-capable PF and 2815 * Note that we also do this wait if we're a non-Master-capable PF and
2818 * there is no current Master PF; a Master PF may show up momentarily 2816 * there is no current Master PF; a Master PF may show up momentarily
2819 * and we wouldn't want to fail pointlessly. (This can happen when an 2817 * and we wouldn't want to fail pointlessly. (This can happen when an
2820 * OS loads lots of different drivers rapidly at the same time). In 2818 * OS loads lots of different drivers rapidly at the same time). In
2821 * this case, the Master PF returned by the firmware will be 2819 * this case, the Master PF returned by the firmware will be
2822 * FW_PCIE_FW_MASTER_MASK so the test below will work ... 2820 * FW_PCIE_FW_MASTER_MASK so the test below will work ...
2823 */ 2821 */
2824 if ((v & (FW_HELLO_CMD_ERR|FW_HELLO_CMD_INIT)) == 0 && 2822 if ((v & (FW_HELLO_CMD_ERR|FW_HELLO_CMD_INIT)) == 0 &&
2825 master_mbox != mbox) { 2823 master_mbox != mbox) {
2826 int waiting = FW_CMD_HELLO_TIMEOUT; 2824 int waiting = FW_CMD_HELLO_TIMEOUT;
2827 2825
2828 /* 2826 /*
2829 * Wait for the firmware to either indicate an error or 2827 * Wait for the firmware to either indicate an error or
2830 * initialized state. If we see either of these we bail out 2828 * initialized state. If we see either of these we bail out
2831 * and report the issue to the caller. If we exhaust the 2829 * and report the issue to the caller. If we exhaust the
2832 * "hello timeout" and we haven't exhausted our retries, try 2830 * "hello timeout" and we haven't exhausted our retries, try
2833 * again. Otherwise bail with a timeout error. 2831 * again. Otherwise bail with a timeout error.
2834 */ 2832 */
2835 for (;;) { 2833 for (;;) {
2836 u32 pcie_fw; 2834 u32 pcie_fw;
2837 2835
2838 msleep(50); 2836 msleep(50);
2839 waiting -= 50; 2837 waiting -= 50;
2840 2838
2841 /* 2839 /*
2842 * If neither Error nor Initialialized are indicated 2840 * If neither Error nor Initialialized are indicated
2843 * by the firmware keep waiting till we exaust our 2841 * by the firmware keep waiting till we exaust our
2844 * timeout ... and then retry if we haven't exhausted 2842 * timeout ... and then retry if we haven't exhausted
2845 * our retries ... 2843 * our retries ...
2846 */ 2844 */
2847 pcie_fw = t4_read_reg(adap, MA_PCIE_FW); 2845 pcie_fw = t4_read_reg(adap, MA_PCIE_FW);
2848 if (!(pcie_fw & (FW_PCIE_FW_ERR|FW_PCIE_FW_INIT))) { 2846 if (!(pcie_fw & (FW_PCIE_FW_ERR|FW_PCIE_FW_INIT))) {
2849 if (waiting <= 0) { 2847 if (waiting <= 0) {
2850 if (retries-- > 0) 2848 if (retries-- > 0)
2851 goto retry; 2849 goto retry;
2852 2850
2853 return -ETIMEDOUT; 2851 return -ETIMEDOUT;
2854 } 2852 }
2855 continue; 2853 continue;
2856 } 2854 }
2857 2855
2858 /* 2856 /*
2859 * We either have an Error or Initialized condition 2857 * We either have an Error or Initialized condition
2860 * report errors preferentially. 2858 * report errors preferentially.
2861 */ 2859 */
2862 if (state) { 2860 if (state) {
2863 if (pcie_fw & FW_PCIE_FW_ERR) 2861 if (pcie_fw & FW_PCIE_FW_ERR)
2864 *state = DEV_STATE_ERR; 2862 *state = DEV_STATE_ERR;
2865 else if (pcie_fw & FW_PCIE_FW_INIT) 2863 else if (pcie_fw & FW_PCIE_FW_INIT)
2866 *state = DEV_STATE_INIT; 2864 *state = DEV_STATE_INIT;
2867 } 2865 }
2868 2866
2869 /* 2867 /*
2870 * If we arrived before a Master PF was selected and 2868 * If we arrived before a Master PF was selected and
2871 * there's not a valid Master PF, grab its identity 2869 * there's not a valid Master PF, grab its identity
2872 * for our caller. 2870 * for our caller.
2873 */ 2871 */
2874 if (master_mbox == FW_PCIE_FW_MASTER_MASK && 2872 if (master_mbox == FW_PCIE_FW_MASTER_MASK &&
2875 (pcie_fw & FW_PCIE_FW_MASTER_VLD)) 2873 (pcie_fw & FW_PCIE_FW_MASTER_VLD))
2876 master_mbox = FW_PCIE_FW_MASTER_GET(pcie_fw); 2874 master_mbox = FW_PCIE_FW_MASTER_GET(pcie_fw);
2877 break; 2875 break;
2878 } 2876 }
2879 } 2877 }
2880 2878
2881 return master_mbox; 2879 return master_mbox;
2882 } 2880 }
2883 2881
2884 /** 2882 /**
2885 * t4_fw_bye - end communication with FW 2883 * t4_fw_bye - end communication with FW
2886 * @adap: the adapter 2884 * @adap: the adapter
2887 * @mbox: mailbox to use for the FW command 2885 * @mbox: mailbox to use for the FW command
2888 * 2886 *
2889 * Issues a command to terminate communication with FW. 2887 * Issues a command to terminate communication with FW.
2890 */ 2888 */
2891 int t4_fw_bye(struct adapter *adap, unsigned int mbox) 2889 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
2892 { 2890 {
2893 struct fw_bye_cmd c; 2891 struct fw_bye_cmd c;
2894 2892
2895 memset(&c, 0, sizeof(c)); 2893 memset(&c, 0, sizeof(c));
2896 INIT_CMD(c, BYE, WRITE); 2894 INIT_CMD(c, BYE, WRITE);
2897 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2895 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2898 } 2896 }
2899 2897
2900 /** 2898 /**
2901 * t4_init_cmd - ask FW to initialize the device 2899 * t4_init_cmd - ask FW to initialize the device
2902 * @adap: the adapter 2900 * @adap: the adapter
2903 * @mbox: mailbox to use for the FW command 2901 * @mbox: mailbox to use for the FW command
2904 * 2902 *
2905 * Issues a command to FW to partially initialize the device. This 2903 * Issues a command to FW to partially initialize the device. This
2906 * performs initialization that generally doesn't depend on user input. 2904 * performs initialization that generally doesn't depend on user input.
2907 */ 2905 */
2908 int t4_early_init(struct adapter *adap, unsigned int mbox) 2906 int t4_early_init(struct adapter *adap, unsigned int mbox)
2909 { 2907 {
2910 struct fw_initialize_cmd c; 2908 struct fw_initialize_cmd c;
2911 2909
2912 memset(&c, 0, sizeof(c)); 2910 memset(&c, 0, sizeof(c));
2913 INIT_CMD(c, INITIALIZE, WRITE); 2911 INIT_CMD(c, INITIALIZE, WRITE);
2914 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2912 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2915 } 2913 }
2916 2914
2917 /** 2915 /**
2918 * t4_fw_reset - issue a reset to FW 2916 * t4_fw_reset - issue a reset to FW
2919 * @adap: the adapter 2917 * @adap: the adapter
2920 * @mbox: mailbox to use for the FW command 2918 * @mbox: mailbox to use for the FW command
2921 * @reset: specifies the type of reset to perform 2919 * @reset: specifies the type of reset to perform
2922 * 2920 *
2923 * Issues a reset command of the specified type to FW. 2921 * Issues a reset command of the specified type to FW.
2924 */ 2922 */
2925 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset) 2923 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
2926 { 2924 {
2927 struct fw_reset_cmd c; 2925 struct fw_reset_cmd c;
2928 2926
2929 memset(&c, 0, sizeof(c)); 2927 memset(&c, 0, sizeof(c));
2930 INIT_CMD(c, RESET, WRITE); 2928 INIT_CMD(c, RESET, WRITE);
2931 c.val = htonl(reset); 2929 c.val = htonl(reset);
2932 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2930 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2933 } 2931 }
2934 2932
2935 /** 2933 /**
2936 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET 2934 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
2937 * @adap: the adapter 2935 * @adap: the adapter
2938 * @mbox: mailbox to use for the FW RESET command (if desired) 2936 * @mbox: mailbox to use for the FW RESET command (if desired)
2939 * @force: force uP into RESET even if FW RESET command fails 2937 * @force: force uP into RESET even if FW RESET command fails
2940 * 2938 *
2941 * Issues a RESET command to firmware (if desired) with a HALT indication 2939 * Issues a RESET command to firmware (if desired) with a HALT indication
2942 * and then puts the microprocessor into RESET state. The RESET command 2940 * and then puts the microprocessor into RESET state. The RESET command
2943 * will only be issued if a legitimate mailbox is provided (mbox <= 2941 * will only be issued if a legitimate mailbox is provided (mbox <=
2944 * FW_PCIE_FW_MASTER_MASK). 2942 * FW_PCIE_FW_MASTER_MASK).
2945 * 2943 *
2946 * This is generally used in order for the host to safely manipulate the 2944 * This is generally used in order for the host to safely manipulate the
2947 * adapter without fear of conflicting with whatever the firmware might 2945 * adapter without fear of conflicting with whatever the firmware might
2948 * be doing. The only way out of this state is to RESTART the firmware 2946 * be doing. The only way out of this state is to RESTART the firmware
2949 * ... 2947 * ...
2950 */ 2948 */
2951 static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force) 2949 static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
2952 { 2950 {
2953 int ret = 0; 2951 int ret = 0;
2954 2952
2955 /* 2953 /*
2956 * If a legitimate mailbox is provided, issue a RESET command 2954 * If a legitimate mailbox is provided, issue a RESET command
2957 * with a HALT indication. 2955 * with a HALT indication.
2958 */ 2956 */
2959 if (mbox <= FW_PCIE_FW_MASTER_MASK) { 2957 if (mbox <= FW_PCIE_FW_MASTER_MASK) {
2960 struct fw_reset_cmd c; 2958 struct fw_reset_cmd c;
2961 2959
2962 memset(&c, 0, sizeof(c)); 2960 memset(&c, 0, sizeof(c));
2963 INIT_CMD(c, RESET, WRITE); 2961 INIT_CMD(c, RESET, WRITE);
2964 c.val = htonl(PIORST | PIORSTMODE); 2962 c.val = htonl(PIORST | PIORSTMODE);
2965 c.halt_pkd = htonl(FW_RESET_CMD_HALT(1U)); 2963 c.halt_pkd = htonl(FW_RESET_CMD_HALT(1U));
2966 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 2964 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2967 } 2965 }
2968 2966
2969 /* 2967 /*
2970 * Normally we won't complete the operation if the firmware RESET 2968 * Normally we won't complete the operation if the firmware RESET
2971 * command fails but if our caller insists we'll go ahead and put the 2969 * command fails but if our caller insists we'll go ahead and put the
2972 * uP into RESET. This can be useful if the firmware is hung or even 2970 * uP into RESET. This can be useful if the firmware is hung or even
2973 * missing ... We'll have to take the risk of putting the uP into 2971 * missing ... We'll have to take the risk of putting the uP into
2974 * RESET without the cooperation of firmware in that case. 2972 * RESET without the cooperation of firmware in that case.
2975 * 2973 *
2976 * We also force the firmware's HALT flag to be on in case we bypassed 2974 * We also force the firmware's HALT flag to be on in case we bypassed
2977 * the firmware RESET command above or we're dealing with old firmware 2975 * the firmware RESET command above or we're dealing with old firmware
2978 * which doesn't have the HALT capability. This will serve as a flag 2976 * which doesn't have the HALT capability. This will serve as a flag
2979 * for the incoming firmware to know that it's coming out of a HALT 2977 * for the incoming firmware to know that it's coming out of a HALT
2980 * rather than a RESET ... if it's new enough to understand that ... 2978 * rather than a RESET ... if it's new enough to understand that ...
2981 */ 2979 */
2982 if (ret == 0 || force) { 2980 if (ret == 0 || force) {
2983 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, UPCRST); 2981 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, UPCRST);
2984 t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT, 2982 t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT,
2985 FW_PCIE_FW_HALT); 2983 FW_PCIE_FW_HALT);
2986 } 2984 }
2987 2985
2988 /* 2986 /*
2989 * And we always return the result of the firmware RESET command 2987 * And we always return the result of the firmware RESET command
2990 * even when we force the uP into RESET ... 2988 * even when we force the uP into RESET ...
2991 */ 2989 */
2992 return ret; 2990 return ret;
2993 } 2991 }
2994 2992
2995 /** 2993 /**
2996 * t4_fw_restart - restart the firmware by taking the uP out of RESET 2994 * t4_fw_restart - restart the firmware by taking the uP out of RESET
2997 * @adap: the adapter 2995 * @adap: the adapter
2998 * @reset: if we want to do a RESET to restart things 2996 * @reset: if we want to do a RESET to restart things
2999 * 2997 *
3000 * Restart firmware previously halted by t4_fw_halt(). On successful 2998 * Restart firmware previously halted by t4_fw_halt(). On successful
3001 * return the previous PF Master remains as the new PF Master and there 2999 * return the previous PF Master remains as the new PF Master and there
3002 * is no need to issue a new HELLO command, etc. 3000 * is no need to issue a new HELLO command, etc.
3003 * 3001 *
3004 * We do this in two ways: 3002 * We do this in two ways:
3005 * 3003 *
3006 * 1. If we're dealing with newer firmware we'll simply want to take 3004 * 1. If we're dealing with newer firmware we'll simply want to take
3007 * the chip's microprocessor out of RESET. This will cause the 3005 * the chip's microprocessor out of RESET. This will cause the
3008 * firmware to start up from its start vector. And then we'll loop 3006 * firmware to start up from its start vector. And then we'll loop
3009 * until the firmware indicates it's started again (PCIE_FW.HALT 3007 * until the firmware indicates it's started again (PCIE_FW.HALT
3010 * reset to 0) or we timeout. 3008 * reset to 0) or we timeout.
3011 * 3009 *
3012 * 2. If we're dealing with older firmware then we'll need to RESET 3010 * 2. If we're dealing with older firmware then we'll need to RESET
3013 * the chip since older firmware won't recognize the PCIE_FW.HALT 3011 * the chip since older firmware won't recognize the PCIE_FW.HALT
3014 * flag and automatically RESET itself on startup. 3012 * flag and automatically RESET itself on startup.
3015 */ 3013 */
3016 static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset) 3014 static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
3017 { 3015 {
3018 if (reset) { 3016 if (reset) {
3019 /* 3017 /*
3020 * Since we're directing the RESET instead of the firmware 3018 * Since we're directing the RESET instead of the firmware
3021 * doing it automatically, we need to clear the PCIE_FW.HALT 3019 * doing it automatically, we need to clear the PCIE_FW.HALT
3022 * bit. 3020 * bit.
3023 */ 3021 */
3024 t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT, 0); 3022 t4_set_reg_field(adap, PCIE_FW, FW_PCIE_FW_HALT, 0);
3025 3023
3026 /* 3024 /*
3027 * If we've been given a valid mailbox, first try to get the 3025 * If we've been given a valid mailbox, first try to get the
3028 * firmware to do the RESET. If that works, great and we can 3026 * firmware to do the RESET. If that works, great and we can
3029 * return success. Otherwise, if we haven't been given a 3027 * return success. Otherwise, if we haven't been given a
3030 * valid mailbox or the RESET command failed, fall back to 3028 * valid mailbox or the RESET command failed, fall back to
3031 * hitting the chip with a hammer. 3029 * hitting the chip with a hammer.
3032 */ 3030 */
3033 if (mbox <= FW_PCIE_FW_MASTER_MASK) { 3031 if (mbox <= FW_PCIE_FW_MASTER_MASK) {
3034 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0); 3032 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
3035 msleep(100); 3033 msleep(100);
3036 if (t4_fw_reset(adap, mbox, 3034 if (t4_fw_reset(adap, mbox,
3037 PIORST | PIORSTMODE) == 0) 3035 PIORST | PIORSTMODE) == 0)
3038 return 0; 3036 return 0;
3039 } 3037 }
3040 3038
3041 t4_write_reg(adap, PL_RST, PIORST | PIORSTMODE); 3039 t4_write_reg(adap, PL_RST, PIORST | PIORSTMODE);
3042 msleep(2000); 3040 msleep(2000);
3043 } else { 3041 } else {
3044 int ms; 3042 int ms;
3045 3043
3046 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0); 3044 t4_set_reg_field(adap, CIM_BOOT_CFG, UPCRST, 0);
3047 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) { 3045 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
3048 if (!(t4_read_reg(adap, PCIE_FW) & FW_PCIE_FW_HALT)) 3046 if (!(t4_read_reg(adap, PCIE_FW) & FW_PCIE_FW_HALT))
3049 return 0; 3047 return 0;
3050 msleep(100); 3048 msleep(100);
3051 ms += 100; 3049 ms += 100;
3052 } 3050 }
3053 return -ETIMEDOUT; 3051 return -ETIMEDOUT;
3054 } 3052 }
3055 return 0; 3053 return 0;
3056 } 3054 }
3057 3055
3058 /** 3056 /**
3059 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW 3057 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
3060 * @adap: the adapter 3058 * @adap: the adapter
3061 * @mbox: mailbox to use for the FW RESET command (if desired) 3059 * @mbox: mailbox to use for the FW RESET command (if desired)
3062 * @fw_data: the firmware image to write 3060 * @fw_data: the firmware image to write
3063 * @size: image size 3061 * @size: image size
3064 * @force: force upgrade even if firmware doesn't cooperate 3062 * @force: force upgrade even if firmware doesn't cooperate
3065 * 3063 *
3066 * Perform all of the steps necessary for upgrading an adapter's 3064 * Perform all of the steps necessary for upgrading an adapter's
3067 * firmware image. Normally this requires the cooperation of the 3065 * firmware image. Normally this requires the cooperation of the
3068 * existing firmware in order to halt all existing activities 3066 * existing firmware in order to halt all existing activities
3069 * but if an invalid mailbox token is passed in we skip that step 3067 * but if an invalid mailbox token is passed in we skip that step
3070 * (though we'll still put the adapter microprocessor into RESET in 3068 * (though we'll still put the adapter microprocessor into RESET in
3071 * that case). 3069 * that case).
3072 * 3070 *
3073 * On successful return the new firmware will have been loaded and 3071 * On successful return the new firmware will have been loaded and
3074 * the adapter will have been fully RESET losing all previous setup 3072 * the adapter will have been fully RESET losing all previous setup
3075 * state. On unsuccessful return the adapter may be completely hosed ... 3073 * state. On unsuccessful return the adapter may be completely hosed ...
3076 * positive errno indicates that the adapter is ~probably~ intact, a 3074 * positive errno indicates that the adapter is ~probably~ intact, a
3077 * negative errno indicates that things are looking bad ... 3075 * negative errno indicates that things are looking bad ...
3078 */ 3076 */
3079 static int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, 3077 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
3080 const u8 *fw_data, unsigned int size, int force) 3078 const u8 *fw_data, unsigned int size, int force)
3081 { 3079 {
3082 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data; 3080 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
3083 int reset, ret; 3081 int reset, ret;
3084 3082
3085 ret = t4_fw_halt(adap, mbox, force); 3083 ret = t4_fw_halt(adap, mbox, force);
3086 if (ret < 0 && !force) 3084 if (ret < 0 && !force)
3087 return ret; 3085 return ret;
3088 3086
3089 ret = t4_load_fw(adap, fw_data, size); 3087 ret = t4_load_fw(adap, fw_data, size);
3090 if (ret < 0) 3088 if (ret < 0)
3091 return ret; 3089 return ret;
3092 3090
3093 /* 3091 /*
3094 * Older versions of the firmware don't understand the new 3092 * Older versions of the firmware don't understand the new
3095 * PCIE_FW.HALT flag and so won't know to perform a RESET when they 3093 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
3096 * restart. So for newly loaded older firmware we'll have to do the 3094 * restart. So for newly loaded older firmware we'll have to do the
3097 * RESET for it so it starts up on a clean slate. We can tell if 3095 * RESET for it so it starts up on a clean slate. We can tell if
3098 * the newly loaded firmware will handle this right by checking 3096 * the newly loaded firmware will handle this right by checking
3099 * its header flags to see if it advertises the capability. 3097 * its header flags to see if it advertises the capability.
3100 */ 3098 */
3101 reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0); 3099 reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
3102 return t4_fw_restart(adap, mbox, reset); 3100 return t4_fw_restart(adap, mbox, reset);
3103 } 3101 }
3104 3102
3105 /** 3103 /**
3106 * t4_fixup_host_params - fix up host-dependent parameters 3104 * t4_fixup_host_params - fix up host-dependent parameters
3107 * @adap: the adapter 3105 * @adap: the adapter
3108 * @page_size: the host's Base Page Size 3106 * @page_size: the host's Base Page Size
3109 * @cache_line_size: the host's Cache Line Size 3107 * @cache_line_size: the host's Cache Line Size
3110 * 3108 *
3111 * Various registers in T4 contain values which are dependent on the 3109 * Various registers in T4 contain values which are dependent on the
3112 * host's Base Page and Cache Line Sizes. This function will fix all of 3110 * host's Base Page and Cache Line Sizes. This function will fix all of
3113 * those registers with the appropriate values as passed in ... 3111 * those registers with the appropriate values as passed in ...
3114 */ 3112 */
3115 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, 3113 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
3116 unsigned int cache_line_size) 3114 unsigned int cache_line_size)
3117 { 3115 {
3118 unsigned int page_shift = fls(page_size) - 1; 3116 unsigned int page_shift = fls(page_size) - 1;
3119 unsigned int sge_hps = page_shift - 10; 3117 unsigned int sge_hps = page_shift - 10;
3120 unsigned int stat_len = cache_line_size > 64 ? 128 : 64; 3118 unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
3121 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size; 3119 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
3122 unsigned int fl_align_log = fls(fl_align) - 1; 3120 unsigned int fl_align_log = fls(fl_align) - 1;
3123 3121
3124 t4_write_reg(adap, SGE_HOST_PAGE_SIZE, 3122 t4_write_reg(adap, SGE_HOST_PAGE_SIZE,
3125 HOSTPAGESIZEPF0(sge_hps) | 3123 HOSTPAGESIZEPF0(sge_hps) |
3126 HOSTPAGESIZEPF1(sge_hps) | 3124 HOSTPAGESIZEPF1(sge_hps) |
3127 HOSTPAGESIZEPF2(sge_hps) | 3125 HOSTPAGESIZEPF2(sge_hps) |
3128 HOSTPAGESIZEPF3(sge_hps) | 3126 HOSTPAGESIZEPF3(sge_hps) |
3129 HOSTPAGESIZEPF4(sge_hps) | 3127 HOSTPAGESIZEPF4(sge_hps) |
3130 HOSTPAGESIZEPF5(sge_hps) | 3128 HOSTPAGESIZEPF5(sge_hps) |
3131 HOSTPAGESIZEPF6(sge_hps) | 3129 HOSTPAGESIZEPF6(sge_hps) |
3132 HOSTPAGESIZEPF7(sge_hps)); 3130 HOSTPAGESIZEPF7(sge_hps));
3133 3131
3134 t4_set_reg_field(adap, SGE_CONTROL, 3132 t4_set_reg_field(adap, SGE_CONTROL,
3135 INGPADBOUNDARY_MASK | 3133 INGPADBOUNDARY_MASK |
3136 EGRSTATUSPAGESIZE_MASK, 3134 EGRSTATUSPAGESIZE_MASK,
3137 INGPADBOUNDARY(fl_align_log - 5) | 3135 INGPADBOUNDARY(fl_align_log - 5) |
3138 EGRSTATUSPAGESIZE(stat_len != 64)); 3136 EGRSTATUSPAGESIZE(stat_len != 64));
3139 3137
3140 /* 3138 /*
3141 * Adjust various SGE Free List Host Buffer Sizes. 3139 * Adjust various SGE Free List Host Buffer Sizes.
3142 * 3140 *
3143 * This is something of a crock since we're using fixed indices into 3141 * This is something of a crock since we're using fixed indices into
3144 * the array which are also known by the sge.c code and the T4 3142 * the array which are also known by the sge.c code and the T4
3145 * Firmware Configuration File. We need to come up with a much better 3143 * Firmware Configuration File. We need to come up with a much better
3146 * approach to managing this array. For now, the first four entries 3144 * approach to managing this array. For now, the first four entries
3147 * are: 3145 * are:
3148 * 3146 *
3149 * 0: Host Page Size 3147 * 0: Host Page Size
3150 * 1: 64KB 3148 * 1: 64KB
3151 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode) 3149 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
3152 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode) 3150 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
3153 * 3151 *
3154 * For the single-MTU buffers in unpacked mode we need to include 3152 * For the single-MTU buffers in unpacked mode we need to include
3155 * space for the SGE Control Packet Shift, 14 byte Ethernet header, 3153 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
3156 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet 3154 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
3157 * Padding boundry. All of these are accommodated in the Factory 3155 * Padding boundry. All of these are accommodated in the Factory
3158 * Default Firmware Configuration File but we need to adjust it for 3156 * Default Firmware Configuration File but we need to adjust it for
3159 * this host's cache line size. 3157 * this host's cache line size.
3160 */ 3158 */
3161 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, page_size); 3159 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, page_size);
3162 t4_write_reg(adap, SGE_FL_BUFFER_SIZE2, 3160 t4_write_reg(adap, SGE_FL_BUFFER_SIZE2,
3163 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2) + fl_align-1) 3161 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2) + fl_align-1)
3164 & ~(fl_align-1)); 3162 & ~(fl_align-1));
3165 t4_write_reg(adap, SGE_FL_BUFFER_SIZE3, 3163 t4_write_reg(adap, SGE_FL_BUFFER_SIZE3,
3166 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3) + fl_align-1) 3164 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3) + fl_align-1)
3167 & ~(fl_align-1)); 3165 & ~(fl_align-1));
3168 3166
3169 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(page_shift - 12)); 3167 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(page_shift - 12));
3170 3168
3171 return 0; 3169 return 0;
3172 } 3170 }
3173 3171
3174 /** 3172 /**
3175 * t4_fw_initialize - ask FW to initialize the device 3173 * t4_fw_initialize - ask FW to initialize the device
3176 * @adap: the adapter 3174 * @adap: the adapter
3177 * @mbox: mailbox to use for the FW command 3175 * @mbox: mailbox to use for the FW command
3178 * 3176 *
3179 * Issues a command to FW to partially initialize the device. This 3177 * Issues a command to FW to partially initialize the device. This
3180 * performs initialization that generally doesn't depend on user input. 3178 * performs initialization that generally doesn't depend on user input.
3181 */ 3179 */
3182 int t4_fw_initialize(struct adapter *adap, unsigned int mbox) 3180 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
3183 { 3181 {
3184 struct fw_initialize_cmd c; 3182 struct fw_initialize_cmd c;
3185 3183
3186 memset(&c, 0, sizeof(c)); 3184 memset(&c, 0, sizeof(c));
3187 INIT_CMD(c, INITIALIZE, WRITE); 3185 INIT_CMD(c, INITIALIZE, WRITE);
3188 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3186 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3189 } 3187 }
3190 3188
3191 /** 3189 /**
3192 * t4_query_params - query FW or device parameters 3190 * t4_query_params - query FW or device parameters
3193 * @adap: the adapter 3191 * @adap: the adapter
3194 * @mbox: mailbox to use for the FW command 3192 * @mbox: mailbox to use for the FW command
3195 * @pf: the PF 3193 * @pf: the PF
3196 * @vf: the VF 3194 * @vf: the VF
3197 * @nparams: the number of parameters 3195 * @nparams: the number of parameters
3198 * @params: the parameter names 3196 * @params: the parameter names
3199 * @val: the parameter values 3197 * @val: the parameter values
3200 * 3198 *
3201 * Reads the value of FW or device parameters. Up to 7 parameters can be 3199 * Reads the value of FW or device parameters. Up to 7 parameters can be
3202 * queried at once. 3200 * queried at once.
3203 */ 3201 */
3204 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, 3202 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3205 unsigned int vf, unsigned int nparams, const u32 *params, 3203 unsigned int vf, unsigned int nparams, const u32 *params,
3206 u32 *val) 3204 u32 *val)
3207 { 3205 {
3208 int i, ret; 3206 int i, ret;
3209 struct fw_params_cmd c; 3207 struct fw_params_cmd c;
3210 __be32 *p = &c.param[0].mnem; 3208 __be32 *p = &c.param[0].mnem;
3211 3209
3212 if (nparams > 7) 3210 if (nparams > 7)
3213 return -EINVAL; 3211 return -EINVAL;
3214 3212
3215 memset(&c, 0, sizeof(c)); 3213 memset(&c, 0, sizeof(c));
3216 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST | 3214 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
3217 FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) | 3215 FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) |
3218 FW_PARAMS_CMD_VFN(vf)); 3216 FW_PARAMS_CMD_VFN(vf));
3219 c.retval_len16 = htonl(FW_LEN16(c)); 3217 c.retval_len16 = htonl(FW_LEN16(c));
3220 for (i = 0; i < nparams; i++, p += 2) 3218 for (i = 0; i < nparams; i++, p += 2)
3221 *p = htonl(*params++); 3219 *p = htonl(*params++);
3222 3220
3223 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 3221 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3224 if (ret == 0) 3222 if (ret == 0)
3225 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2) 3223 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
3226 *val++ = ntohl(*p); 3224 *val++ = ntohl(*p);
3227 return ret; 3225 return ret;
3228 } 3226 }
3229 3227
3230 /** 3228 /**
3231 * t4_set_params_nosleep - sets FW or device parameters 3229 * t4_set_params_nosleep - sets FW or device parameters
3232 * @adap: the adapter 3230 * @adap: the adapter
3233 * @mbox: mailbox to use for the FW command 3231 * @mbox: mailbox to use for the FW command
3234 * @pf: the PF 3232 * @pf: the PF
3235 * @vf: the VF 3233 * @vf: the VF
3236 * @nparams: the number of parameters 3234 * @nparams: the number of parameters
3237 * @params: the parameter names 3235 * @params: the parameter names
3238 * @val: the parameter values 3236 * @val: the parameter values
3239 * 3237 *
3240 * Does not ever sleep 3238 * Does not ever sleep
3241 * Sets the value of FW or device parameters. Up to 7 parameters can be 3239 * Sets the value of FW or device parameters. Up to 7 parameters can be
3242 * specified at once. 3240 * specified at once.
3243 */ 3241 */
3244 int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox, 3242 int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox,
3245 unsigned int pf, unsigned int vf, 3243 unsigned int pf, unsigned int vf,
3246 unsigned int nparams, const u32 *params, 3244 unsigned int nparams, const u32 *params,
3247 const u32 *val) 3245 const u32 *val)
3248 { 3246 {
3249 struct fw_params_cmd c; 3247 struct fw_params_cmd c;
3250 __be32 *p = &c.param[0].mnem; 3248 __be32 *p = &c.param[0].mnem;
3251 3249
3252 if (nparams > 7) 3250 if (nparams > 7)
3253 return -EINVAL; 3251 return -EINVAL;
3254 3252
3255 memset(&c, 0, sizeof(c)); 3253 memset(&c, 0, sizeof(c));
3256 c.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PARAMS_CMD) | 3254 c.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PARAMS_CMD) |
3257 FW_CMD_REQUEST | FW_CMD_WRITE | 3255 FW_CMD_REQUEST | FW_CMD_WRITE |
3258 FW_PARAMS_CMD_PFN(pf) | 3256 FW_PARAMS_CMD_PFN(pf) |
3259 FW_PARAMS_CMD_VFN(vf)); 3257 FW_PARAMS_CMD_VFN(vf));
3260 c.retval_len16 = cpu_to_be32(FW_LEN16(c)); 3258 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3261 3259
3262 while (nparams--) { 3260 while (nparams--) {
3263 *p++ = cpu_to_be32(*params++); 3261 *p++ = cpu_to_be32(*params++);
3264 *p++ = cpu_to_be32(*val++); 3262 *p++ = cpu_to_be32(*val++);
3265 } 3263 }
3266 3264
3267 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL); 3265 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
3268 } 3266 }
3269 3267
3270 /** 3268 /**
3271 * t4_set_params - sets FW or device parameters 3269 * t4_set_params - sets FW or device parameters
3272 * @adap: the adapter 3270 * @adap: the adapter
3273 * @mbox: mailbox to use for the FW command 3271 * @mbox: mailbox to use for the FW command
3274 * @pf: the PF 3272 * @pf: the PF
3275 * @vf: the VF 3273 * @vf: the VF
3276 * @nparams: the number of parameters 3274 * @nparams: the number of parameters
3277 * @params: the parameter names 3275 * @params: the parameter names
3278 * @val: the parameter values 3276 * @val: the parameter values
3279 * 3277 *
3280 * Sets the value of FW or device parameters. Up to 7 parameters can be 3278 * Sets the value of FW or device parameters. Up to 7 parameters can be
3281 * specified at once. 3279 * specified at once.
3282 */ 3280 */
3283 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, 3281 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
3284 unsigned int vf, unsigned int nparams, const u32 *params, 3282 unsigned int vf, unsigned int nparams, const u32 *params,
3285 const u32 *val) 3283 const u32 *val)
3286 { 3284 {
3287 struct fw_params_cmd c; 3285 struct fw_params_cmd c;
3288 __be32 *p = &c.param[0].mnem; 3286 __be32 *p = &c.param[0].mnem;
3289 3287
3290 if (nparams > 7) 3288 if (nparams > 7)
3291 return -EINVAL; 3289 return -EINVAL;
3292 3290
3293 memset(&c, 0, sizeof(c)); 3291 memset(&c, 0, sizeof(c));
3294 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST | 3292 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
3295 FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) | 3293 FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) |
3296 FW_PARAMS_CMD_VFN(vf)); 3294 FW_PARAMS_CMD_VFN(vf));
3297 c.retval_len16 = htonl(FW_LEN16(c)); 3295 c.retval_len16 = htonl(FW_LEN16(c));
3298 while (nparams--) { 3296 while (nparams--) {
3299 *p++ = htonl(*params++); 3297 *p++ = htonl(*params++);
3300 *p++ = htonl(*val++); 3298 *p++ = htonl(*val++);
3301 } 3299 }
3302 3300
3303 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3301 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3304 } 3302 }
3305 3303
3306 /** 3304 /**
3307 * t4_cfg_pfvf - configure PF/VF resource limits 3305 * t4_cfg_pfvf - configure PF/VF resource limits
3308 * @adap: the adapter 3306 * @adap: the adapter
3309 * @mbox: mailbox to use for the FW command 3307 * @mbox: mailbox to use for the FW command
3310 * @pf: the PF being configured 3308 * @pf: the PF being configured
3311 * @vf: the VF being configured 3309 * @vf: the VF being configured
3312 * @txq: the max number of egress queues 3310 * @txq: the max number of egress queues
3313 * @txq_eth_ctrl: the max number of egress Ethernet or control queues 3311 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
3314 * @rxqi: the max number of interrupt-capable ingress queues 3312 * @rxqi: the max number of interrupt-capable ingress queues
3315 * @rxq: the max number of interruptless ingress queues 3313 * @rxq: the max number of interruptless ingress queues
3316 * @tc: the PCI traffic class 3314 * @tc: the PCI traffic class
3317 * @vi: the max number of virtual interfaces 3315 * @vi: the max number of virtual interfaces
3318 * @cmask: the channel access rights mask for the PF/VF 3316 * @cmask: the channel access rights mask for the PF/VF
3319 * @pmask: the port access rights mask for the PF/VF 3317 * @pmask: the port access rights mask for the PF/VF
3320 * @nexact: the maximum number of exact MPS filters 3318 * @nexact: the maximum number of exact MPS filters
3321 * @rcaps: read capabilities 3319 * @rcaps: read capabilities
3322 * @wxcaps: write/execute capabilities 3320 * @wxcaps: write/execute capabilities
3323 * 3321 *
3324 * Configures resource limits and capabilities for a physical or virtual 3322 * Configures resource limits and capabilities for a physical or virtual
3325 * function. 3323 * function.
3326 */ 3324 */
3327 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, 3325 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
3328 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl, 3326 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
3329 unsigned int rxqi, unsigned int rxq, unsigned int tc, 3327 unsigned int rxqi, unsigned int rxq, unsigned int tc,
3330 unsigned int vi, unsigned int cmask, unsigned int pmask, 3328 unsigned int vi, unsigned int cmask, unsigned int pmask,
3331 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps) 3329 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
3332 { 3330 {
3333 struct fw_pfvf_cmd c; 3331 struct fw_pfvf_cmd c;
3334 3332
3335 memset(&c, 0, sizeof(c)); 3333 memset(&c, 0, sizeof(c));
3336 c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST | 3334 c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST |
3337 FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) | 3335 FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) |
3338 FW_PFVF_CMD_VFN(vf)); 3336 FW_PFVF_CMD_VFN(vf));
3339 c.retval_len16 = htonl(FW_LEN16(c)); 3337 c.retval_len16 = htonl(FW_LEN16(c));
3340 c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) | 3338 c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) |
3341 FW_PFVF_CMD_NIQ(rxq)); 3339 FW_PFVF_CMD_NIQ(rxq));
3342 c.type_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) | 3340 c.type_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) |
3343 FW_PFVF_CMD_PMASK(pmask) | 3341 FW_PFVF_CMD_PMASK(pmask) |
3344 FW_PFVF_CMD_NEQ(txq)); 3342 FW_PFVF_CMD_NEQ(txq));
3345 c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) | 3343 c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) |
3346 FW_PFVF_CMD_NEXACTF(nexact)); 3344 FW_PFVF_CMD_NEXACTF(nexact));
3347 c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) | 3345 c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) |
3348 FW_PFVF_CMD_WX_CAPS(wxcaps) | 3346 FW_PFVF_CMD_WX_CAPS(wxcaps) |
3349 FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl)); 3347 FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
3350 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3348 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3351 } 3349 }
3352 3350
3353 /** 3351 /**
3354 * t4_alloc_vi - allocate a virtual interface 3352 * t4_alloc_vi - allocate a virtual interface
3355 * @adap: the adapter 3353 * @adap: the adapter
3356 * @mbox: mailbox to use for the FW command 3354 * @mbox: mailbox to use for the FW command
3357 * @port: physical port associated with the VI 3355 * @port: physical port associated with the VI
3358 * @pf: the PF owning the VI 3356 * @pf: the PF owning the VI
3359 * @vf: the VF owning the VI 3357 * @vf: the VF owning the VI
3360 * @nmac: number of MAC addresses needed (1 to 5) 3358 * @nmac: number of MAC addresses needed (1 to 5)
3361 * @mac: the MAC addresses of the VI 3359 * @mac: the MAC addresses of the VI
3362 * @rss_size: size of RSS table slice associated with this VI 3360 * @rss_size: size of RSS table slice associated with this VI
3363 * 3361 *
3364 * Allocates a virtual interface for the given physical port. If @mac is 3362 * Allocates a virtual interface for the given physical port. If @mac is
3365 * not %NULL it contains the MAC addresses of the VI as assigned by FW. 3363 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
3366 * @mac should be large enough to hold @nmac Ethernet addresses, they are 3364 * @mac should be large enough to hold @nmac Ethernet addresses, they are
3367 * stored consecutively so the space needed is @nmac * 6 bytes. 3365 * stored consecutively so the space needed is @nmac * 6 bytes.
3368 * Returns a negative error number or the non-negative VI id. 3366 * Returns a negative error number or the non-negative VI id.
3369 */ 3367 */
3370 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, 3368 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
3371 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, 3369 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
3372 unsigned int *rss_size) 3370 unsigned int *rss_size)
3373 { 3371 {
3374 int ret; 3372 int ret;
3375 struct fw_vi_cmd c; 3373 struct fw_vi_cmd c;
3376 3374
3377 memset(&c, 0, sizeof(c)); 3375 memset(&c, 0, sizeof(c));
3378 c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST | 3376 c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
3379 FW_CMD_WRITE | FW_CMD_EXEC | 3377 FW_CMD_WRITE | FW_CMD_EXEC |
3380 FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf)); 3378 FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf));
3381 c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c)); 3379 c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c));
3382 c.portid_pkd = FW_VI_CMD_PORTID(port); 3380 c.portid_pkd = FW_VI_CMD_PORTID(port);
3383 c.nmac = nmac - 1; 3381 c.nmac = nmac - 1;
3384 3382
3385 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 3383 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3386 if (ret) 3384 if (ret)
3387 return ret; 3385 return ret;
3388 3386
3389 if (mac) { 3387 if (mac) {
3390 memcpy(mac, c.mac, sizeof(c.mac)); 3388 memcpy(mac, c.mac, sizeof(c.mac));
3391 switch (nmac) { 3389 switch (nmac) {
3392 case 5: 3390 case 5:
3393 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3)); 3391 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
3394 case 4: 3392 case 4:
3395 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2)); 3393 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
3396 case 3: 3394 case 3:
3397 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1)); 3395 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
3398 case 2: 3396 case 2:
3399 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0)); 3397 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
3400 } 3398 }
3401 } 3399 }
3402 if (rss_size) 3400 if (rss_size)
3403 *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd)); 3401 *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd));
3404 return FW_VI_CMD_VIID_GET(ntohs(c.type_viid)); 3402 return FW_VI_CMD_VIID_GET(ntohs(c.type_viid));
3405 } 3403 }
3406 3404
3407 /** 3405 /**
3408 * t4_set_rxmode - set Rx properties of a virtual interface 3406 * t4_set_rxmode - set Rx properties of a virtual interface
3409 * @adap: the adapter 3407 * @adap: the adapter
3410 * @mbox: mailbox to use for the FW command 3408 * @mbox: mailbox to use for the FW command
3411 * @viid: the VI id 3409 * @viid: the VI id
3412 * @mtu: the new MTU or -1 3410 * @mtu: the new MTU or -1
3413 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change 3411 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
3414 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change 3412 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
3415 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change 3413 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
3416 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change 3414 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
3417 * @sleep_ok: if true we may sleep while awaiting command completion 3415 * @sleep_ok: if true we may sleep while awaiting command completion
3418 * 3416 *
3419 * Sets Rx properties of a virtual interface. 3417 * Sets Rx properties of a virtual interface.
3420 */ 3418 */
3421 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, 3419 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
3422 int mtu, int promisc, int all_multi, int bcast, int vlanex, 3420 int mtu, int promisc, int all_multi, int bcast, int vlanex,
3423 bool sleep_ok) 3421 bool sleep_ok)
3424 { 3422 {
3425 struct fw_vi_rxmode_cmd c; 3423 struct fw_vi_rxmode_cmd c;
3426 3424
3427 /* convert to FW values */ 3425 /* convert to FW values */
3428 if (mtu < 0) 3426 if (mtu < 0)
3429 mtu = FW_RXMODE_MTU_NO_CHG; 3427 mtu = FW_RXMODE_MTU_NO_CHG;
3430 if (promisc < 0) 3428 if (promisc < 0)
3431 promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK; 3429 promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK;
3432 if (all_multi < 0) 3430 if (all_multi < 0)
3433 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK; 3431 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
3434 if (bcast < 0) 3432 if (bcast < 0)
3435 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK; 3433 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
3436 if (vlanex < 0) 3434 if (vlanex < 0)
3437 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK; 3435 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK;
3438 3436
3439 memset(&c, 0, sizeof(c)); 3437 memset(&c, 0, sizeof(c));
3440 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST | 3438 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST |
3441 FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid)); 3439 FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid));
3442 c.retval_len16 = htonl(FW_LEN16(c)); 3440 c.retval_len16 = htonl(FW_LEN16(c));
3443 c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) | 3441 c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) |
3444 FW_VI_RXMODE_CMD_PROMISCEN(promisc) | 3442 FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
3445 FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) | 3443 FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
3446 FW_VI_RXMODE_CMD_BROADCASTEN(bcast) | 3444 FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
3447 FW_VI_RXMODE_CMD_VLANEXEN(vlanex)); 3445 FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
3448 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); 3446 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
3449 } 3447 }
3450 3448
3451 /** 3449 /**
3452 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses 3450 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
3453 * @adap: the adapter 3451 * @adap: the adapter
3454 * @mbox: mailbox to use for the FW command 3452 * @mbox: mailbox to use for the FW command
3455 * @viid: the VI id 3453 * @viid: the VI id
3456 * @free: if true any existing filters for this VI id are first removed 3454 * @free: if true any existing filters for this VI id are first removed
3457 * @naddr: the number of MAC addresses to allocate filters for (up to 7) 3455 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
3458 * @addr: the MAC address(es) 3456 * @addr: the MAC address(es)
3459 * @idx: where to store the index of each allocated filter 3457 * @idx: where to store the index of each allocated filter
3460 * @hash: pointer to hash address filter bitmap 3458 * @hash: pointer to hash address filter bitmap
3461 * @sleep_ok: call is allowed to sleep 3459 * @sleep_ok: call is allowed to sleep
3462 * 3460 *
3463 * Allocates an exact-match filter for each of the supplied addresses and 3461 * Allocates an exact-match filter for each of the supplied addresses and
3464 * sets it to the corresponding address. If @idx is not %NULL it should 3462 * sets it to the corresponding address. If @idx is not %NULL it should
3465 * have at least @naddr entries, each of which will be set to the index of 3463 * have at least @naddr entries, each of which will be set to the index of
3466 * the filter allocated for the corresponding MAC address. If a filter 3464 * the filter allocated for the corresponding MAC address. If a filter
3467 * could not be allocated for an address its index is set to 0xffff. 3465 * could not be allocated for an address its index is set to 0xffff.
3468 * If @hash is not %NULL addresses that fail to allocate an exact filter 3466 * If @hash is not %NULL addresses that fail to allocate an exact filter
3469 * are hashed and update the hash filter bitmap pointed at by @hash. 3467 * are hashed and update the hash filter bitmap pointed at by @hash.
3470 * 3468 *
3471 * Returns a negative error number or the number of filters allocated. 3469 * Returns a negative error number or the number of filters allocated.
3472 */ 3470 */
3473 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, 3471 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
3474 unsigned int viid, bool free, unsigned int naddr, 3472 unsigned int viid, bool free, unsigned int naddr,
3475 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok) 3473 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
3476 { 3474 {
3477 int i, ret; 3475 int i, ret;
3478 struct fw_vi_mac_cmd c; 3476 struct fw_vi_mac_cmd c;
3479 struct fw_vi_mac_exact *p; 3477 struct fw_vi_mac_exact *p;
3480 unsigned int max_naddr = is_t4(adap->params.chip) ? 3478 unsigned int max_naddr = is_t4(adap->params.chip) ?
3481 NUM_MPS_CLS_SRAM_L_INSTANCES : 3479 NUM_MPS_CLS_SRAM_L_INSTANCES :
3482 NUM_MPS_T5_CLS_SRAM_L_INSTANCES; 3480 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
3483 3481
3484 if (naddr > 7) 3482 if (naddr > 7)
3485 return -EINVAL; 3483 return -EINVAL;
3486 3484
3487 memset(&c, 0, sizeof(c)); 3485 memset(&c, 0, sizeof(c));
3488 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | 3486 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
3489 FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) | 3487 FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) |
3490 FW_VI_MAC_CMD_VIID(viid)); 3488 FW_VI_MAC_CMD_VIID(viid));
3491 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) | 3489 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) |
3492 FW_CMD_LEN16((naddr + 2) / 2)); 3490 FW_CMD_LEN16((naddr + 2) / 2));
3493 3491
3494 for (i = 0, p = c.u.exact; i < naddr; i++, p++) { 3492 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
3495 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID | 3493 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
3496 FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); 3494 FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
3497 memcpy(p->macaddr, addr[i], sizeof(p->macaddr)); 3495 memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
3498 } 3496 }
3499 3497
3500 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok); 3498 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
3501 if (ret) 3499 if (ret)
3502 return ret; 3500 return ret;
3503 3501
3504 for (i = 0, p = c.u.exact; i < naddr; i++, p++) { 3502 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
3505 u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx)); 3503 u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
3506 3504
3507 if (idx) 3505 if (idx)
3508 idx[i] = index >= max_naddr ? 0xffff : index; 3506 idx[i] = index >= max_naddr ? 0xffff : index;
3509 if (index < max_naddr) 3507 if (index < max_naddr)
3510 ret++; 3508 ret++;
3511 else if (hash) 3509 else if (hash)
3512 *hash |= (1ULL << hash_mac_addr(addr[i])); 3510 *hash |= (1ULL << hash_mac_addr(addr[i]));
3513 } 3511 }
3514 return ret; 3512 return ret;
3515 } 3513 }
3516 3514
3517 /** 3515 /**
3518 * t4_change_mac - modifies the exact-match filter for a MAC address 3516 * t4_change_mac - modifies the exact-match filter for a MAC address
3519 * @adap: the adapter 3517 * @adap: the adapter
3520 * @mbox: mailbox to use for the FW command 3518 * @mbox: mailbox to use for the FW command
3521 * @viid: the VI id 3519 * @viid: the VI id
3522 * @idx: index of existing filter for old value of MAC address, or -1 3520 * @idx: index of existing filter for old value of MAC address, or -1
3523 * @addr: the new MAC address value 3521 * @addr: the new MAC address value
3524 * @persist: whether a new MAC allocation should be persistent 3522 * @persist: whether a new MAC allocation should be persistent
3525 * @add_smt: if true also add the address to the HW SMT 3523 * @add_smt: if true also add the address to the HW SMT
3526 * 3524 *
3527 * Modifies an exact-match filter and sets it to the new MAC address. 3525 * Modifies an exact-match filter and sets it to the new MAC address.
3528 * Note that in general it is not possible to modify the value of a given 3526 * Note that in general it is not possible to modify the value of a given
3529 * filter so the generic way to modify an address filter is to free the one 3527 * filter so the generic way to modify an address filter is to free the one
3530 * being used by the old address value and allocate a new filter for the 3528 * being used by the old address value and allocate a new filter for the
3531 * new address value. @idx can be -1 if the address is a new addition. 3529 * new address value. @idx can be -1 if the address is a new addition.
3532 * 3530 *
3533 * Returns a negative error number or the index of the filter with the new 3531 * Returns a negative error number or the index of the filter with the new
3534 * MAC value. 3532 * MAC value.
3535 */ 3533 */
3536 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, 3534 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
3537 int idx, const u8 *addr, bool persist, bool add_smt) 3535 int idx, const u8 *addr, bool persist, bool add_smt)
3538 { 3536 {
3539 int ret, mode; 3537 int ret, mode;
3540 struct fw_vi_mac_cmd c; 3538 struct fw_vi_mac_cmd c;
3541 struct fw_vi_mac_exact *p = c.u.exact; 3539 struct fw_vi_mac_exact *p = c.u.exact;
3542 unsigned int max_mac_addr = is_t4(adap->params.chip) ? 3540 unsigned int max_mac_addr = is_t4(adap->params.chip) ?
3543 NUM_MPS_CLS_SRAM_L_INSTANCES : 3541 NUM_MPS_CLS_SRAM_L_INSTANCES :
3544 NUM_MPS_T5_CLS_SRAM_L_INSTANCES; 3542 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
3545 3543
3546 if (idx < 0) /* new allocation */ 3544 if (idx < 0) /* new allocation */
3547 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; 3545 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
3548 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY; 3546 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
3549 3547
3550 memset(&c, 0, sizeof(c)); 3548 memset(&c, 0, sizeof(c));
3551 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | 3549 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
3552 FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid)); 3550 FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid));
3553 c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1)); 3551 c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1));
3554 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID | 3552 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
3555 FW_VI_MAC_CMD_SMAC_RESULT(mode) | 3553 FW_VI_MAC_CMD_SMAC_RESULT(mode) |
3556 FW_VI_MAC_CMD_IDX(idx)); 3554 FW_VI_MAC_CMD_IDX(idx));
3557 memcpy(p->macaddr, addr, sizeof(p->macaddr)); 3555 memcpy(p->macaddr, addr, sizeof(p->macaddr));
3558 3556
3559 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 3557 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3560 if (ret == 0) { 3558 if (ret == 0) {
3561 ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx)); 3559 ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
3562 if (ret >= max_mac_addr) 3560 if (ret >= max_mac_addr)
3563 ret = -ENOMEM; 3561 ret = -ENOMEM;
3564 } 3562 }
3565 return ret; 3563 return ret;
3566 } 3564 }
3567 3565
3568 /** 3566 /**
3569 * t4_set_addr_hash - program the MAC inexact-match hash filter 3567 * t4_set_addr_hash - program the MAC inexact-match hash filter
3570 * @adap: the adapter 3568 * @adap: the adapter
3571 * @mbox: mailbox to use for the FW command 3569 * @mbox: mailbox to use for the FW command
3572 * @viid: the VI id 3570 * @viid: the VI id
3573 * @ucast: whether the hash filter should also match unicast addresses 3571 * @ucast: whether the hash filter should also match unicast addresses
3574 * @vec: the value to be written to the hash filter 3572 * @vec: the value to be written to the hash filter
3575 * @sleep_ok: call is allowed to sleep 3573 * @sleep_ok: call is allowed to sleep
3576 * 3574 *
3577 * Sets the 64-bit inexact-match hash filter for a virtual interface. 3575 * Sets the 64-bit inexact-match hash filter for a virtual interface.
3578 */ 3576 */
3579 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, 3577 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
3580 bool ucast, u64 vec, bool sleep_ok) 3578 bool ucast, u64 vec, bool sleep_ok)
3581 { 3579 {
3582 struct fw_vi_mac_cmd c; 3580 struct fw_vi_mac_cmd c;
3583 3581
3584 memset(&c, 0, sizeof(c)); 3582 memset(&c, 0, sizeof(c));
3585 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST | 3583 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
3586 FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid)); 3584 FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid));
3587 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN | 3585 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN |
3588 FW_VI_MAC_CMD_HASHUNIEN(ucast) | 3586 FW_VI_MAC_CMD_HASHUNIEN(ucast) |
3589 FW_CMD_LEN16(1)); 3587 FW_CMD_LEN16(1));
3590 c.u.hash.hashvec = cpu_to_be64(vec); 3588 c.u.hash.hashvec = cpu_to_be64(vec);
3591 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); 3589 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
3592 } 3590 }
3593 3591
3594 /** 3592 /**
3595 * t4_enable_vi_params - enable/disable a virtual interface 3593 * t4_enable_vi_params - enable/disable a virtual interface
3596 * @adap: the adapter 3594 * @adap: the adapter
3597 * @mbox: mailbox to use for the FW command 3595 * @mbox: mailbox to use for the FW command
3598 * @viid: the VI id 3596 * @viid: the VI id
3599 * @rx_en: 1=enable Rx, 0=disable Rx 3597 * @rx_en: 1=enable Rx, 0=disable Rx
3600 * @tx_en: 1=enable Tx, 0=disable Tx 3598 * @tx_en: 1=enable Tx, 0=disable Tx
3601 * @dcb_en: 1=enable delivery of Data Center Bridging messages. 3599 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
3602 * 3600 *
3603 * Enables/disables a virtual interface. Note that setting DCB Enable 3601 * Enables/disables a virtual interface. Note that setting DCB Enable
3604 * only makes sense when enabling a Virtual Interface ... 3602 * only makes sense when enabling a Virtual Interface ...
3605 */ 3603 */
3606 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox, 3604 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
3607 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en) 3605 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
3608 { 3606 {
3609 struct fw_vi_enable_cmd c; 3607 struct fw_vi_enable_cmd c;
3610 3608
3611 memset(&c, 0, sizeof(c)); 3609 memset(&c, 0, sizeof(c));
3612 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST | 3610 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
3613 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid)); 3611 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
3614 3612
3615 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) | 3613 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) |
3616 FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c) | 3614 FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c) |
3617 FW_VI_ENABLE_CMD_DCB_INFO(dcb_en)); 3615 FW_VI_ENABLE_CMD_DCB_INFO(dcb_en));
3618 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL); 3616 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
3619 } 3617 }
3620 3618
3621 /** 3619 /**
3622 * t4_enable_vi - enable/disable a virtual interface 3620 * t4_enable_vi - enable/disable a virtual interface
3623 * @adap: the adapter 3621 * @adap: the adapter
3624 * @mbox: mailbox to use for the FW command 3622 * @mbox: mailbox to use for the FW command
3625 * @viid: the VI id 3623 * @viid: the VI id
3626 * @rx_en: 1=enable Rx, 0=disable Rx 3624 * @rx_en: 1=enable Rx, 0=disable Rx
3627 * @tx_en: 1=enable Tx, 0=disable Tx 3625 * @tx_en: 1=enable Tx, 0=disable Tx
3628 * 3626 *
3629 * Enables/disables a virtual interface. 3627 * Enables/disables a virtual interface.
3630 */ 3628 */
3631 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid, 3629 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
3632 bool rx_en, bool tx_en) 3630 bool rx_en, bool tx_en)
3633 { 3631 {
3634 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0); 3632 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
3635 } 3633 }
3636 3634
3637 /** 3635 /**
3638 * t4_identify_port - identify a VI's port by blinking its LED 3636 * t4_identify_port - identify a VI's port by blinking its LED
3639 * @adap: the adapter 3637 * @adap: the adapter
3640 * @mbox: mailbox to use for the FW command 3638 * @mbox: mailbox to use for the FW command
3641 * @viid: the VI id 3639 * @viid: the VI id
3642 * @nblinks: how many times to blink LED at 2.5 Hz 3640 * @nblinks: how many times to blink LED at 2.5 Hz
3643 * 3641 *
3644 * Identifies a VI's port by blinking its LED. 3642 * Identifies a VI's port by blinking its LED.
3645 */ 3643 */
3646 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid, 3644 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
3647 unsigned int nblinks) 3645 unsigned int nblinks)
3648 { 3646 {
3649 struct fw_vi_enable_cmd c; 3647 struct fw_vi_enable_cmd c;
3650 3648
3651 memset(&c, 0, sizeof(c)); 3649 memset(&c, 0, sizeof(c));
3652 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST | 3650 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
3653 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid)); 3651 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
3654 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c)); 3652 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
3655 c.blinkdur = htons(nblinks); 3653 c.blinkdur = htons(nblinks);
3656 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3654 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3657 } 3655 }
3658 3656
3659 /** 3657 /**
3660 * t4_iq_free - free an ingress queue and its FLs 3658 * t4_iq_free - free an ingress queue and its FLs
3661 * @adap: the adapter 3659 * @adap: the adapter
3662 * @mbox: mailbox to use for the FW command 3660 * @mbox: mailbox to use for the FW command
3663 * @pf: the PF owning the queues 3661 * @pf: the PF owning the queues
3664 * @vf: the VF owning the queues 3662 * @vf: the VF owning the queues
3665 * @iqtype: the ingress queue type 3663 * @iqtype: the ingress queue type
3666 * @iqid: ingress queue id 3664 * @iqid: ingress queue id
3667 * @fl0id: FL0 queue id or 0xffff if no attached FL0 3665 * @fl0id: FL0 queue id or 0xffff if no attached FL0
3668 * @fl1id: FL1 queue id or 0xffff if no attached FL1 3666 * @fl1id: FL1 queue id or 0xffff if no attached FL1
3669 * 3667 *
3670 * Frees an ingress queue and its associated FLs, if any. 3668 * Frees an ingress queue and its associated FLs, if any.
3671 */ 3669 */
3672 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 3670 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3673 unsigned int vf, unsigned int iqtype, unsigned int iqid, 3671 unsigned int vf, unsigned int iqtype, unsigned int iqid,
3674 unsigned int fl0id, unsigned int fl1id) 3672 unsigned int fl0id, unsigned int fl1id)
3675 { 3673 {
3676 struct fw_iq_cmd c; 3674 struct fw_iq_cmd c;
3677 3675
3678 memset(&c, 0, sizeof(c)); 3676 memset(&c, 0, sizeof(c));
3679 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST | 3677 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
3680 FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) | 3678 FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
3681 FW_IQ_CMD_VFN(vf)); 3679 FW_IQ_CMD_VFN(vf));
3682 c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c)); 3680 c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c));
3683 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype)); 3681 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype));
3684 c.iqid = htons(iqid); 3682 c.iqid = htons(iqid);
3685 c.fl0id = htons(fl0id); 3683 c.fl0id = htons(fl0id);
3686 c.fl1id = htons(fl1id); 3684 c.fl1id = htons(fl1id);
3687 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3685 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3688 } 3686 }
3689 3687
3690 /** 3688 /**
3691 * t4_eth_eq_free - free an Ethernet egress queue 3689 * t4_eth_eq_free - free an Ethernet egress queue
3692 * @adap: the adapter 3690 * @adap: the adapter
3693 * @mbox: mailbox to use for the FW command 3691 * @mbox: mailbox to use for the FW command
3694 * @pf: the PF owning the queue 3692 * @pf: the PF owning the queue
3695 * @vf: the VF owning the queue 3693 * @vf: the VF owning the queue
3696 * @eqid: egress queue id 3694 * @eqid: egress queue id
3697 * 3695 *
3698 * Frees an Ethernet egress queue. 3696 * Frees an Ethernet egress queue.
3699 */ 3697 */
3700 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 3698 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3701 unsigned int vf, unsigned int eqid) 3699 unsigned int vf, unsigned int eqid)
3702 { 3700 {
3703 struct fw_eq_eth_cmd c; 3701 struct fw_eq_eth_cmd c;
3704 3702
3705 memset(&c, 0, sizeof(c)); 3703 memset(&c, 0, sizeof(c));
3706 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST | 3704 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
3707 FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) | 3705 FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) |
3708 FW_EQ_ETH_CMD_VFN(vf)); 3706 FW_EQ_ETH_CMD_VFN(vf));
3709 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c)); 3707 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
3710 c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid)); 3708 c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid));
3711 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3709 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3712 } 3710 }
3713 3711
3714 /** 3712 /**
3715 * t4_ctrl_eq_free - free a control egress queue 3713 * t4_ctrl_eq_free - free a control egress queue
3716 * @adap: the adapter 3714 * @adap: the adapter
3717 * @mbox: mailbox to use for the FW command 3715 * @mbox: mailbox to use for the FW command
3718 * @pf: the PF owning the queue 3716 * @pf: the PF owning the queue
3719 * @vf: the VF owning the queue 3717 * @vf: the VF owning the queue
3720 * @eqid: egress queue id 3718 * @eqid: egress queue id
3721 * 3719 *
3722 * Frees a control egress queue. 3720 * Frees a control egress queue.
3723 */ 3721 */
3724 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 3722 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3725 unsigned int vf, unsigned int eqid) 3723 unsigned int vf, unsigned int eqid)
3726 { 3724 {
3727 struct fw_eq_ctrl_cmd c; 3725 struct fw_eq_ctrl_cmd c;
3728 3726
3729 memset(&c, 0, sizeof(c)); 3727 memset(&c, 0, sizeof(c));
3730 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST | 3728 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
3731 FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) | 3729 FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) |
3732 FW_EQ_CTRL_CMD_VFN(vf)); 3730 FW_EQ_CTRL_CMD_VFN(vf));
3733 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c)); 3731 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
3734 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid)); 3732 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid));
3735 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3733 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3736 } 3734 }
3737 3735
3738 /** 3736 /**
3739 * t4_ofld_eq_free - free an offload egress queue 3737 * t4_ofld_eq_free - free an offload egress queue
3740 * @adap: the adapter 3738 * @adap: the adapter
3741 * @mbox: mailbox to use for the FW command 3739 * @mbox: mailbox to use for the FW command
3742 * @pf: the PF owning the queue 3740 * @pf: the PF owning the queue
3743 * @vf: the VF owning the queue 3741 * @vf: the VF owning the queue
3744 * @eqid: egress queue id 3742 * @eqid: egress queue id
3745 * 3743 *
3746 * Frees a control egress queue. 3744 * Frees a control egress queue.
3747 */ 3745 */
3748 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, 3746 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
3749 unsigned int vf, unsigned int eqid) 3747 unsigned int vf, unsigned int eqid)
3750 { 3748 {
3751 struct fw_eq_ofld_cmd c; 3749 struct fw_eq_ofld_cmd c;
3752 3750
3753 memset(&c, 0, sizeof(c)); 3751 memset(&c, 0, sizeof(c));
3754 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST | 3752 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
3755 FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) | 3753 FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) |
3756 FW_EQ_OFLD_CMD_VFN(vf)); 3754 FW_EQ_OFLD_CMD_VFN(vf));
3757 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c)); 3755 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
3758 c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid)); 3756 c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid));
3759 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 3757 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
3760 } 3758 }
3761 3759
3762 /** 3760 /**
3763 * t4_handle_fw_rpl - process a FW reply message 3761 * t4_handle_fw_rpl - process a FW reply message
3764 * @adap: the adapter 3762 * @adap: the adapter
3765 * @rpl: start of the FW message 3763 * @rpl: start of the FW message
3766 * 3764 *
3767 * Processes a FW message, such as link state change messages. 3765 * Processes a FW message, such as link state change messages.
3768 */ 3766 */
3769 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) 3767 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
3770 { 3768 {
3771 u8 opcode = *(const u8 *)rpl; 3769 u8 opcode = *(const u8 *)rpl;
3772 3770
3773 if (opcode == FW_PORT_CMD) { /* link/module state change message */ 3771 if (opcode == FW_PORT_CMD) { /* link/module state change message */
3774 int speed = 0, fc = 0; 3772 int speed = 0, fc = 0;
3775 const struct fw_port_cmd *p = (void *)rpl; 3773 const struct fw_port_cmd *p = (void *)rpl;
3776 int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid)); 3774 int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid));
3777 int port = adap->chan_map[chan]; 3775 int port = adap->chan_map[chan];
3778 struct port_info *pi = adap2pinfo(adap, port); 3776 struct port_info *pi = adap2pinfo(adap, port);
3779 struct link_config *lc = &pi->link_cfg; 3777 struct link_config *lc = &pi->link_cfg;
3780 u32 stat = ntohl(p->u.info.lstatus_to_modtype); 3778 u32 stat = ntohl(p->u.info.lstatus_to_modtype);
3781 int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0; 3779 int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0;
3782 u32 mod = FW_PORT_CMD_MODTYPE_GET(stat); 3780 u32 mod = FW_PORT_CMD_MODTYPE_GET(stat);
3783 3781
3784 if (stat & FW_PORT_CMD_RXPAUSE) 3782 if (stat & FW_PORT_CMD_RXPAUSE)
3785 fc |= PAUSE_RX; 3783 fc |= PAUSE_RX;
3786 if (stat & FW_PORT_CMD_TXPAUSE) 3784 if (stat & FW_PORT_CMD_TXPAUSE)
3787 fc |= PAUSE_TX; 3785 fc |= PAUSE_TX;
3788 if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M)) 3786 if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
3789 speed = 100; 3787 speed = 100;
3790 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G)) 3788 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
3791 speed = 1000; 3789 speed = 1000;
3792 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G)) 3790 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
3793 speed = 10000; 3791 speed = 10000;
3794 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G)) 3792 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
3795 speed = 40000; 3793 speed = 40000;
3796 3794
3797 if (link_ok != lc->link_ok || speed != lc->speed || 3795 if (link_ok != lc->link_ok || speed != lc->speed ||
3798 fc != lc->fc) { /* something changed */ 3796 fc != lc->fc) { /* something changed */
3799 lc->link_ok = link_ok; 3797 lc->link_ok = link_ok;
3800 lc->speed = speed; 3798 lc->speed = speed;
3801 lc->fc = fc; 3799 lc->fc = fc;
3802 lc->supported = be16_to_cpu(p->u.info.pcap); 3800 lc->supported = be16_to_cpu(p->u.info.pcap);
3803 t4_os_link_changed(adap, port, link_ok); 3801 t4_os_link_changed(adap, port, link_ok);
3804 } 3802 }
3805 if (mod != pi->mod_type) { 3803 if (mod != pi->mod_type) {
3806 pi->mod_type = mod; 3804 pi->mod_type = mod;
3807 t4_os_portmod_changed(adap, port); 3805 t4_os_portmod_changed(adap, port);
3808 } 3806 }
3809 } 3807 }
3810 return 0; 3808 return 0;
3811 } 3809 }
3812 3810
3813 static void get_pci_mode(struct adapter *adapter, struct pci_params *p) 3811 static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
3814 { 3812 {
3815 u16 val; 3813 u16 val;
3816 3814
3817 if (pci_is_pcie(adapter->pdev)) { 3815 if (pci_is_pcie(adapter->pdev)) {
3818 pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val); 3816 pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
3819 p->speed = val & PCI_EXP_LNKSTA_CLS; 3817 p->speed = val & PCI_EXP_LNKSTA_CLS;
3820 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4; 3818 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
3821 } 3819 }
3822 } 3820 }
3823 3821
3824 /** 3822 /**
3825 * init_link_config - initialize a link's SW state 3823 * init_link_config - initialize a link's SW state
3826 * @lc: structure holding the link state 3824 * @lc: structure holding the link state
3827 * @caps: link capabilities 3825 * @caps: link capabilities
3828 * 3826 *
3829 * Initializes the SW state maintained for each link, including the link's 3827 * Initializes the SW state maintained for each link, including the link's
3830 * capabilities and default speed/flow-control/autonegotiation settings. 3828 * capabilities and default speed/flow-control/autonegotiation settings.
3831 */ 3829 */
3832 static void init_link_config(struct link_config *lc, unsigned int caps) 3830 static void init_link_config(struct link_config *lc, unsigned int caps)
3833 { 3831 {
3834 lc->supported = caps; 3832 lc->supported = caps;
3835 lc->requested_speed = 0; 3833 lc->requested_speed = 0;
3836 lc->speed = 0; 3834 lc->speed = 0;
3837 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX; 3835 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3838 if (lc->supported & FW_PORT_CAP_ANEG) { 3836 if (lc->supported & FW_PORT_CAP_ANEG) {
3839 lc->advertising = lc->supported & ADVERT_MASK; 3837 lc->advertising = lc->supported & ADVERT_MASK;
3840 lc->autoneg = AUTONEG_ENABLE; 3838 lc->autoneg = AUTONEG_ENABLE;
3841 lc->requested_fc |= PAUSE_AUTONEG; 3839 lc->requested_fc |= PAUSE_AUTONEG;
3842 } else { 3840 } else {
3843 lc->advertising = 0; 3841 lc->advertising = 0;
3844 lc->autoneg = AUTONEG_DISABLE; 3842 lc->autoneg = AUTONEG_DISABLE;
3845 } 3843 }
3846 } 3844 }
3847 3845
3848 #define CIM_PF_NOACCESS 0xeeeeeeee 3846 #define CIM_PF_NOACCESS 0xeeeeeeee
3849 3847
3850 int t4_wait_dev_ready(void __iomem *regs) 3848 int t4_wait_dev_ready(void __iomem *regs)
3851 { 3849 {
3852 u32 whoami; 3850 u32 whoami;
3853 3851
3854 whoami = readl(regs + PL_WHOAMI); 3852 whoami = readl(regs + PL_WHOAMI);
3855 if (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS) 3853 if (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS)
3856 return 0; 3854 return 0;
3857 3855
3858 msleep(500); 3856 msleep(500);
3859 whoami = readl(regs + PL_WHOAMI); 3857 whoami = readl(regs + PL_WHOAMI);
3860 return (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS ? 0 : -EIO); 3858 return (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS ? 0 : -EIO);
3861 } 3859 }
3862 3860
3863 struct flash_desc { 3861 struct flash_desc {
3864 u32 vendor_and_model_id; 3862 u32 vendor_and_model_id;
3865 u32 size_mb; 3863 u32 size_mb;
3866 }; 3864 };
3867 3865
3868 static int get_flash_params(struct adapter *adap) 3866 static int get_flash_params(struct adapter *adap)
3869 { 3867 {
3870 /* Table for non-Numonix supported flash parts. Numonix parts are left 3868 /* Table for non-Numonix supported flash parts. Numonix parts are left
3871 * to the preexisting code. All flash parts have 64KB sectors. 3869 * to the preexisting code. All flash parts have 64KB sectors.
3872 */ 3870 */
3873 static struct flash_desc supported_flash[] = { 3871 static struct flash_desc supported_flash[] = {
3874 { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */ 3872 { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
3875 }; 3873 };
3876 3874
3877 int ret; 3875 int ret;
3878 u32 info; 3876 u32 info;
3879 3877
3880 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID); 3878 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
3881 if (!ret) 3879 if (!ret)
3882 ret = sf1_read(adap, 3, 0, 1, &info); 3880 ret = sf1_read(adap, 3, 0, 1, &info);
3883 t4_write_reg(adap, SF_OP, 0); /* unlock SF */ 3881 t4_write_reg(adap, SF_OP, 0); /* unlock SF */
3884 if (ret) 3882 if (ret)
3885 return ret; 3883 return ret;
3886 3884
3887 for (ret = 0; ret < ARRAY_SIZE(supported_flash); ++ret) 3885 for (ret = 0; ret < ARRAY_SIZE(supported_flash); ++ret)
3888 if (supported_flash[ret].vendor_and_model_id == info) { 3886 if (supported_flash[ret].vendor_and_model_id == info) {
3889 adap->params.sf_size = supported_flash[ret].size_mb; 3887 adap->params.sf_size = supported_flash[ret].size_mb;
3890 adap->params.sf_nsec = 3888 adap->params.sf_nsec =
3891 adap->params.sf_size / SF_SEC_SIZE; 3889 adap->params.sf_size / SF_SEC_SIZE;
3892 return 0; 3890 return 0;
3893 } 3891 }
3894 3892
3895 if ((info & 0xff) != 0x20) /* not a Numonix flash */ 3893 if ((info & 0xff) != 0x20) /* not a Numonix flash */
3896 return -EINVAL; 3894 return -EINVAL;
3897 info >>= 16; /* log2 of size */ 3895 info >>= 16; /* log2 of size */
3898 if (info >= 0x14 && info < 0x18) 3896 if (info >= 0x14 && info < 0x18)
3899 adap->params.sf_nsec = 1 << (info - 16); 3897 adap->params.sf_nsec = 1 << (info - 16);
3900 else if (info == 0x18) 3898 else if (info == 0x18)
3901 adap->params.sf_nsec = 64; 3899 adap->params.sf_nsec = 64;
3902 else 3900 else
3903 return -EINVAL; 3901 return -EINVAL;
3904 adap->params.sf_size = 1 << info; 3902 adap->params.sf_size = 1 << info;
3905 adap->params.sf_fw_start = 3903 adap->params.sf_fw_start =
3906 t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK; 3904 t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK;
3907 3905
3908 if (adap->params.sf_size < FLASH_MIN_SIZE) 3906 if (adap->params.sf_size < FLASH_MIN_SIZE)
3909 dev_warn(adap->pdev_dev, "WARNING!!! FLASH size %#x < %#x!!!\n", 3907 dev_warn(adap->pdev_dev, "WARNING!!! FLASH size %#x < %#x!!!\n",
3910 adap->params.sf_size, FLASH_MIN_SIZE); 3908 adap->params.sf_size, FLASH_MIN_SIZE);
3911 return 0; 3909 return 0;
3912 } 3910 }
3913 3911
3914 /** 3912 /**
3915 * t4_prep_adapter - prepare SW and HW for operation 3913 * t4_prep_adapter - prepare SW and HW for operation
3916 * @adapter: the adapter 3914 * @adapter: the adapter
3917 * @reset: if true perform a HW reset 3915 * @reset: if true perform a HW reset
3918 * 3916 *
3919 * Initialize adapter SW state for the various HW modules, set initial 3917 * Initialize adapter SW state for the various HW modules, set initial
3920 * values for some adapter tunables, take PHYs out of reset, and 3918 * values for some adapter tunables, take PHYs out of reset, and
3921 * initialize the MDIO interface. 3919 * initialize the MDIO interface.
3922 */ 3920 */
3923 int t4_prep_adapter(struct adapter *adapter) 3921 int t4_prep_adapter(struct adapter *adapter)
3924 { 3922 {
3925 int ret, ver; 3923 int ret, ver;
3926 uint16_t device_id; 3924 uint16_t device_id;
3927 u32 pl_rev; 3925 u32 pl_rev;
3928 3926
3929 get_pci_mode(adapter, &adapter->params.pci); 3927 get_pci_mode(adapter, &adapter->params.pci);
3930 pl_rev = G_REV(t4_read_reg(adapter, PL_REV)); 3928 pl_rev = G_REV(t4_read_reg(adapter, PL_REV));
3931 3929
3932 ret = get_flash_params(adapter); 3930 ret = get_flash_params(adapter);
3933 if (ret < 0) { 3931 if (ret < 0) {
3934 dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret); 3932 dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
3935 return ret; 3933 return ret;
3936 } 3934 }
3937 3935
3938 /* Retrieve adapter's device ID 3936 /* Retrieve adapter's device ID
3939 */ 3937 */
3940 pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id); 3938 pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id);
3941 ver = device_id >> 12; 3939 ver = device_id >> 12;
3942 adapter->params.chip = 0; 3940 adapter->params.chip = 0;
3943 switch (ver) { 3941 switch (ver) {
3944 case CHELSIO_T4: 3942 case CHELSIO_T4:
3945 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev); 3943 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
3946 break; 3944 break;
3947 case CHELSIO_T5: 3945 case CHELSIO_T5:
3948 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev); 3946 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
3949 break; 3947 break;
3950 default: 3948 default:
3951 dev_err(adapter->pdev_dev, "Device %d is not supported\n", 3949 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
3952 device_id); 3950 device_id);
3953 return -EINVAL; 3951 return -EINVAL;
3954 } 3952 }
3955 3953
3956 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); 3954 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3957 3955
3958 /* 3956 /*
3959 * Default port for debugging in case we can't reach FW. 3957 * Default port for debugging in case we can't reach FW.
3960 */ 3958 */
3961 adapter->params.nports = 1; 3959 adapter->params.nports = 1;
3962 adapter->params.portvec = 1; 3960 adapter->params.portvec = 1;
3963 adapter->params.vpd.cclk = 50000; 3961 adapter->params.vpd.cclk = 50000;
3964 return 0; 3962 return 0;
3965 } 3963 }
3966 3964
3967 /** 3965 /**
3968 * t4_init_tp_params - initialize adap->params.tp 3966 * t4_init_tp_params - initialize adap->params.tp
3969 * @adap: the adapter 3967 * @adap: the adapter
3970 * 3968 *
3971 * Initialize various fields of the adapter's TP Parameters structure. 3969 * Initialize various fields of the adapter's TP Parameters structure.
3972 */ 3970 */
3973 int t4_init_tp_params(struct adapter *adap) 3971 int t4_init_tp_params(struct adapter *adap)
3974 { 3972 {
3975 int chan; 3973 int chan;
3976 u32 v; 3974 u32 v;
3977 3975
3978 v = t4_read_reg(adap, TP_TIMER_RESOLUTION); 3976 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
3979 adap->params.tp.tre = TIMERRESOLUTION_GET(v); 3977 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
3980 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v); 3978 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
3981 3979
3982 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */ 3980 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
3983 for (chan = 0; chan < NCHAN; chan++) 3981 for (chan = 0; chan < NCHAN; chan++)
3984 adap->params.tp.tx_modq[chan] = chan; 3982 adap->params.tp.tx_modq[chan] = chan;
3985 3983
3986 /* Cache the adapter's Compressed Filter Mode and global Incress 3984 /* Cache the adapter's Compressed Filter Mode and global Incress
3987 * Configuration. 3985 * Configuration.
3988 */ 3986 */
3989 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA, 3987 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
3990 &adap->params.tp.vlan_pri_map, 1, 3988 &adap->params.tp.vlan_pri_map, 1,
3991 TP_VLAN_PRI_MAP); 3989 TP_VLAN_PRI_MAP);
3992 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA, 3990 t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
3993 &adap->params.tp.ingress_config, 1, 3991 &adap->params.tp.ingress_config, 1,
3994 TP_INGRESS_CONFIG); 3992 TP_INGRESS_CONFIG);
3995 3993
3996 /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field 3994 /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
3997 * shift positions of several elements of the Compressed Filter Tuple 3995 * shift positions of several elements of the Compressed Filter Tuple
3998 * for this adapter which we need frequently ... 3996 * for this adapter which we need frequently ...
3999 */ 3997 */
4000 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN); 3998 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
4001 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID); 3999 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
4002 adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT); 4000 adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
4003 adap->params.tp.protocol_shift = t4_filter_field_shift(adap, 4001 adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
4004 F_PROTOCOL); 4002 F_PROTOCOL);
4005 4003
4006 /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID 4004 /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
4007 * represents the presense of an Outer VLAN instead of a VNIC ID. 4005 * represents the presense of an Outer VLAN instead of a VNIC ID.
4008 */ 4006 */
4009 if ((adap->params.tp.ingress_config & F_VNIC) == 0) 4007 if ((adap->params.tp.ingress_config & F_VNIC) == 0)
4010 adap->params.tp.vnic_shift = -1; 4008 adap->params.tp.vnic_shift = -1;
4011 4009
4012 return 0; 4010 return 0;
4013 } 4011 }
4014 4012
4015 /** 4013 /**
4016 * t4_filter_field_shift - calculate filter field shift 4014 * t4_filter_field_shift - calculate filter field shift
4017 * @adap: the adapter 4015 * @adap: the adapter
4018 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits) 4016 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
4019 * 4017 *
4020 * Return the shift position of a filter field within the Compressed 4018 * Return the shift position of a filter field within the Compressed
4021 * Filter Tuple. The filter field is specified via its selection bit 4019 * Filter Tuple. The filter field is specified via its selection bit
4022 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN. 4020 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
4023 */ 4021 */
4024 int t4_filter_field_shift(const struct adapter *adap, int filter_sel) 4022 int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
4025 { 4023 {
4026 unsigned int filter_mode = adap->params.tp.vlan_pri_map; 4024 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
4027 unsigned int sel; 4025 unsigned int sel;
4028 int field_shift; 4026 int field_shift;
4029 4027
4030 if ((filter_mode & filter_sel) == 0) 4028 if ((filter_mode & filter_sel) == 0)
4031 return -1; 4029 return -1;
4032 4030
4033 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) { 4031 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
4034 switch (filter_mode & sel) { 4032 switch (filter_mode & sel) {
4035 case F_FCOE: 4033 case F_FCOE:
4036 field_shift += W_FT_FCOE; 4034 field_shift += W_FT_FCOE;
4037 break; 4035 break;
4038 case F_PORT: 4036 case F_PORT:
4039 field_shift += W_FT_PORT; 4037 field_shift += W_FT_PORT;
4040 break; 4038 break;
4041 case F_VNIC_ID: 4039 case F_VNIC_ID:
4042 field_shift += W_FT_VNIC_ID; 4040 field_shift += W_FT_VNIC_ID;
4043 break; 4041 break;
4044 case F_VLAN: 4042 case F_VLAN:
4045 field_shift += W_FT_VLAN; 4043 field_shift += W_FT_VLAN;
4046 break; 4044 break;
4047 case F_TOS: 4045 case F_TOS:
4048 field_shift += W_FT_TOS; 4046 field_shift += W_FT_TOS;
4049 break; 4047 break;
4050 case F_PROTOCOL: 4048 case F_PROTOCOL:
4051 field_shift += W_FT_PROTOCOL; 4049 field_shift += W_FT_PROTOCOL;
4052 break; 4050 break;
4053 case F_ETHERTYPE: 4051 case F_ETHERTYPE:
4054 field_shift += W_FT_ETHERTYPE; 4052 field_shift += W_FT_ETHERTYPE;
4055 break; 4053 break;
4056 case F_MACMATCH: 4054 case F_MACMATCH:
4057 field_shift += W_FT_MACMATCH; 4055 field_shift += W_FT_MACMATCH;
4058 break; 4056 break;
4059 case F_MPSHITTYPE: 4057 case F_MPSHITTYPE:
4060 field_shift += W_FT_MPSHITTYPE; 4058 field_shift += W_FT_MPSHITTYPE;
4061 break; 4059 break;
4062 case F_FRAGMENTATION: 4060 case F_FRAGMENTATION:
4063 field_shift += W_FT_FRAGMENTATION; 4061 field_shift += W_FT_FRAGMENTATION;
4064 break; 4062 break;
4065 } 4063 }
4066 } 4064 }
4067 return field_shift; 4065 return field_shift;
4068 } 4066 }
4069 4067
4070 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf) 4068 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
4071 { 4069 {
4072 u8 addr[6]; 4070 u8 addr[6];
4073 int ret, i, j = 0; 4071 int ret, i, j = 0;
4074 struct fw_port_cmd c; 4072 struct fw_port_cmd c;
4075 struct fw_rss_vi_config_cmd rvc; 4073 struct fw_rss_vi_config_cmd rvc;
4076 4074
4077 memset(&c, 0, sizeof(c)); 4075 memset(&c, 0, sizeof(c));
4078 memset(&rvc, 0, sizeof(rvc)); 4076 memset(&rvc, 0, sizeof(rvc));
4079 4077
4080 for_each_port(adap, i) { 4078 for_each_port(adap, i) {
4081 unsigned int rss_size; 4079 unsigned int rss_size;
4082 struct port_info *p = adap2pinfo(adap, i); 4080 struct port_info *p = adap2pinfo(adap, i);
4083 4081
4084 while ((adap->params.portvec & (1 << j)) == 0) 4082 while ((adap->params.portvec & (1 << j)) == 0)
4085 j++; 4083 j++;
4086 4084
4087 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | 4085 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) |
4088 FW_CMD_REQUEST | FW_CMD_READ | 4086 FW_CMD_REQUEST | FW_CMD_READ |
4089 FW_PORT_CMD_PORTID(j)); 4087 FW_PORT_CMD_PORTID(j));
4090 c.action_to_len16 = htonl( 4088 c.action_to_len16 = htonl(
4091 FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) | 4089 FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
4092 FW_LEN16(c)); 4090 FW_LEN16(c));
4093 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 4091 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
4094 if (ret) 4092 if (ret)
4095 return ret; 4093 return ret;
4096 4094
4097 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size); 4095 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
4098 if (ret < 0) 4096 if (ret < 0)
4099 return ret; 4097 return ret;
4100 4098
4101 p->viid = ret; 4099 p->viid = ret;
4102 p->tx_chan = j; 4100 p->tx_chan = j;
4103 p->lport = j; 4101 p->lport = j;
4104 p->rss_size = rss_size; 4102 p->rss_size = rss_size;
4105 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN); 4103 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
4106 adap->port[i]->dev_port = j; 4104 adap->port[i]->dev_port = j;
4107 4105
4108 ret = ntohl(c.u.info.lstatus_to_modtype); 4106 ret = ntohl(c.u.info.lstatus_to_modtype);
4109 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ? 4107 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
4110 FW_PORT_CMD_MDIOADDR_GET(ret) : -1; 4108 FW_PORT_CMD_MDIOADDR_GET(ret) : -1;
4111 p->port_type = FW_PORT_CMD_PTYPE_GET(ret); 4109 p->port_type = FW_PORT_CMD_PTYPE_GET(ret);
4112 p->mod_type = FW_PORT_MOD_TYPE_NA; 4110 p->mod_type = FW_PORT_MOD_TYPE_NA;
4113 4111
4114 rvc.op_to_viid = htonl(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) | 4112 rvc.op_to_viid = htonl(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
4115 FW_CMD_REQUEST | FW_CMD_READ | 4113 FW_CMD_REQUEST | FW_CMD_READ |
4116 FW_RSS_VI_CONFIG_CMD_VIID(p->viid)); 4114 FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
4117 rvc.retval_len16 = htonl(FW_LEN16(rvc)); 4115 rvc.retval_len16 = htonl(FW_LEN16(rvc));
4118 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc); 4116 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
4119 if (ret) 4117 if (ret)
4120 return ret; 4118 return ret;
4121 p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen); 4119 p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
4122 4120
4123 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap)); 4121 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
4124 j++; 4122 j++;
4125 } 4123 }
4126 return 0; 4124 return 0;
4127 } 4125 }
4128 4126