Commit e3c98512780ae2cfb90be2152ab35294439bb7bb

Authored by Vipul Pandya
Committed by David S. Miller
1 parent aa731872f7

cxgb4: Fix unable to get UP event from the LLD

If T4 configuration file gets loaded from the /lib/firmware/cxgb4/ directory
then offload capabilities of the cards were getting disabled during
initialization. Hence ULDs do not get an UP event from the LLD.

Signed-off-by: Jay Hernandez <jay@chelsio.com>
Signed-off-by: Vipul Pandya <vipul@chelsio.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

Showing 1 changed file with 0 additions and 10 deletions Inline Diff

drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
1 /* 1 /*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux. 2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 * 3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved. 4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 * 5 *
6 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU 7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file 8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the 9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below: 10 * OpenIB.org BSD license below:
11 * 11 *
12 * Redistribution and use in source and binary forms, with or 12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following 13 * without modification, are permitted provided that the following
14 * conditions are met: 14 * conditions are met:
15 * 15 *
16 * - Redistributions of source code must retain the above 16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following 17 * copyright notice, this list of conditions and the following
18 * disclaimer. 18 * disclaimer.
19 * 19 *
20 * - Redistributions in binary form must reproduce the above 20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following 21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials 22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution. 23 * provided with the distribution.
24 * 24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE. 32 * SOFTWARE.
33 */ 33 */
34 34
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36 36
37 #include <linux/bitmap.h> 37 #include <linux/bitmap.h>
38 #include <linux/crc32.h> 38 #include <linux/crc32.h>
39 #include <linux/ctype.h> 39 #include <linux/ctype.h>
40 #include <linux/debugfs.h> 40 #include <linux/debugfs.h>
41 #include <linux/err.h> 41 #include <linux/err.h>
42 #include <linux/etherdevice.h> 42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h> 43 #include <linux/firmware.h>
44 #include <linux/if.h> 44 #include <linux/if.h>
45 #include <linux/if_vlan.h> 45 #include <linux/if_vlan.h>
46 #include <linux/init.h> 46 #include <linux/init.h>
47 #include <linux/log2.h> 47 #include <linux/log2.h>
48 #include <linux/mdio.h> 48 #include <linux/mdio.h>
49 #include <linux/module.h> 49 #include <linux/module.h>
50 #include <linux/moduleparam.h> 50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h> 51 #include <linux/mutex.h>
52 #include <linux/netdevice.h> 52 #include <linux/netdevice.h>
53 #include <linux/pci.h> 53 #include <linux/pci.h>
54 #include <linux/aer.h> 54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h> 55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h> 56 #include <linux/sched.h>
57 #include <linux/seq_file.h> 57 #include <linux/seq_file.h>
58 #include <linux/sockios.h> 58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h> 59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h> 60 #include <linux/workqueue.h>
61 #include <net/neighbour.h> 61 #include <net/neighbour.h>
62 #include <net/netevent.h> 62 #include <net/netevent.h>
63 #include <asm/uaccess.h> 63 #include <asm/uaccess.h>
64 64
65 #include "cxgb4.h" 65 #include "cxgb4.h"
66 #include "t4_regs.h" 66 #include "t4_regs.h"
67 #include "t4_msg.h" 67 #include "t4_msg.h"
68 #include "t4fw_api.h" 68 #include "t4fw_api.h"
69 #include "l2t.h" 69 #include "l2t.h"
70 70
71 #define DRV_VERSION "1.3.0-ko" 71 #define DRV_VERSION "1.3.0-ko"
72 #define DRV_DESC "Chelsio T4 Network Driver" 72 #define DRV_DESC "Chelsio T4 Network Driver"
73 73
74 /* 74 /*
75 * Max interrupt hold-off timer value in us. Queues fall back to this value 75 * Max interrupt hold-off timer value in us. Queues fall back to this value
76 * under extreme memory pressure so it's largish to give the system time to 76 * under extreme memory pressure so it's largish to give the system time to
77 * recover. 77 * recover.
78 */ 78 */
79 #define MAX_SGE_TIMERVAL 200U 79 #define MAX_SGE_TIMERVAL 200U
80 80
81 enum { 81 enum {
82 /* 82 /*
83 * Physical Function provisioning constants. 83 * Physical Function provisioning constants.
84 */ 84 */
85 PFRES_NVI = 4, /* # of Virtual Interfaces */ 85 PFRES_NVI = 4, /* # of Virtual Interfaces */
86 PFRES_NETHCTRL = 128, /* # of EQs used for ETH or CTRL Qs */ 86 PFRES_NETHCTRL = 128, /* # of EQs used for ETH or CTRL Qs */
87 PFRES_NIQFLINT = 128, /* # of ingress Qs/w Free List(s)/intr 87 PFRES_NIQFLINT = 128, /* # of ingress Qs/w Free List(s)/intr
88 */ 88 */
89 PFRES_NEQ = 256, /* # of egress queues */ 89 PFRES_NEQ = 256, /* # of egress queues */
90 PFRES_NIQ = 0, /* # of ingress queues */ 90 PFRES_NIQ = 0, /* # of ingress queues */
91 PFRES_TC = 0, /* PCI-E traffic class */ 91 PFRES_TC = 0, /* PCI-E traffic class */
92 PFRES_NEXACTF = 128, /* # of exact MPS filters */ 92 PFRES_NEXACTF = 128, /* # of exact MPS filters */
93 93
94 PFRES_R_CAPS = FW_CMD_CAP_PF, 94 PFRES_R_CAPS = FW_CMD_CAP_PF,
95 PFRES_WX_CAPS = FW_CMD_CAP_PF, 95 PFRES_WX_CAPS = FW_CMD_CAP_PF,
96 96
97 #ifdef CONFIG_PCI_IOV 97 #ifdef CONFIG_PCI_IOV
98 /* 98 /*
99 * Virtual Function provisioning constants. We need two extra Ingress 99 * Virtual Function provisioning constants. We need two extra Ingress
100 * Queues with Interrupt capability to serve as the VF's Firmware 100 * Queues with Interrupt capability to serve as the VF's Firmware
101 * Event Queue and Forwarded Interrupt Queue (when using MSI mode) -- 101 * Event Queue and Forwarded Interrupt Queue (when using MSI mode) --
102 * neither will have Free Lists associated with them). For each 102 * neither will have Free Lists associated with them). For each
103 * Ethernet/Control Egress Queue and for each Free List, we need an 103 * Ethernet/Control Egress Queue and for each Free List, we need an
104 * Egress Context. 104 * Egress Context.
105 */ 105 */
106 VFRES_NPORTS = 1, /* # of "ports" per VF */ 106 VFRES_NPORTS = 1, /* # of "ports" per VF */
107 VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */ 107 VFRES_NQSETS = 2, /* # of "Queue Sets" per VF */
108 108
109 VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */ 109 VFRES_NVI = VFRES_NPORTS, /* # of Virtual Interfaces */
110 VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */ 110 VFRES_NETHCTRL = VFRES_NQSETS, /* # of EQs used for ETH or CTRL Qs */
111 VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */ 111 VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
112 VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */ 112 VFRES_NEQ = VFRES_NQSETS*2, /* # of egress queues */
113 VFRES_NIQ = 0, /* # of non-fl/int ingress queues */ 113 VFRES_NIQ = 0, /* # of non-fl/int ingress queues */
114 VFRES_TC = 0, /* PCI-E traffic class */ 114 VFRES_TC = 0, /* PCI-E traffic class */
115 VFRES_NEXACTF = 16, /* # of exact MPS filters */ 115 VFRES_NEXACTF = 16, /* # of exact MPS filters */
116 116
117 VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT, 117 VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
118 VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF, 118 VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
119 #endif 119 #endif
120 }; 120 };
121 121
122 /* 122 /*
123 * Provide a Port Access Rights Mask for the specified PF/VF. This is very 123 * Provide a Port Access Rights Mask for the specified PF/VF. This is very
124 * static and likely not to be useful in the long run. We really need to 124 * static and likely not to be useful in the long run. We really need to
125 * implement some form of persistent configuration which the firmware 125 * implement some form of persistent configuration which the firmware
126 * controls. 126 * controls.
127 */ 127 */
128 static unsigned int pfvfres_pmask(struct adapter *adapter, 128 static unsigned int pfvfres_pmask(struct adapter *adapter,
129 unsigned int pf, unsigned int vf) 129 unsigned int pf, unsigned int vf)
130 { 130 {
131 unsigned int portn, portvec; 131 unsigned int portn, portvec;
132 132
133 /* 133 /*
134 * Give PF's access to all of the ports. 134 * Give PF's access to all of the ports.
135 */ 135 */
136 if (vf == 0) 136 if (vf == 0)
137 return FW_PFVF_CMD_PMASK_MASK; 137 return FW_PFVF_CMD_PMASK_MASK;
138 138
139 /* 139 /*
140 * For VFs, we'll assign them access to the ports based purely on the 140 * For VFs, we'll assign them access to the ports based purely on the
141 * PF. We assign active ports in order, wrapping around if there are 141 * PF. We assign active ports in order, wrapping around if there are
142 * fewer active ports than PFs: e.g. active port[pf % nports]. 142 * fewer active ports than PFs: e.g. active port[pf % nports].
143 * Unfortunately the adapter's port_info structs haven't been 143 * Unfortunately the adapter's port_info structs haven't been
144 * initialized yet so we have to compute this. 144 * initialized yet so we have to compute this.
145 */ 145 */
146 if (adapter->params.nports == 0) 146 if (adapter->params.nports == 0)
147 return 0; 147 return 0;
148 148
149 portn = pf % adapter->params.nports; 149 portn = pf % adapter->params.nports;
150 portvec = adapter->params.portvec; 150 portvec = adapter->params.portvec;
151 for (;;) { 151 for (;;) {
152 /* 152 /*
153 * Isolate the lowest set bit in the port vector. If we're at 153 * Isolate the lowest set bit in the port vector. If we're at
154 * the port number that we want, return that as the pmask. 154 * the port number that we want, return that as the pmask.
155 * otherwise mask that bit out of the port vector and 155 * otherwise mask that bit out of the port vector and
156 * decrement our port number ... 156 * decrement our port number ...
157 */ 157 */
158 unsigned int pmask = portvec ^ (portvec & (portvec-1)); 158 unsigned int pmask = portvec ^ (portvec & (portvec-1));
159 if (portn == 0) 159 if (portn == 0)
160 return pmask; 160 return pmask;
161 portn--; 161 portn--;
162 portvec &= ~pmask; 162 portvec &= ~pmask;
163 } 163 }
164 /*NOTREACHED*/ 164 /*NOTREACHED*/
165 } 165 }
166 166
167 enum { 167 enum {
168 MAX_TXQ_ENTRIES = 16384, 168 MAX_TXQ_ENTRIES = 16384,
169 MAX_CTRL_TXQ_ENTRIES = 1024, 169 MAX_CTRL_TXQ_ENTRIES = 1024,
170 MAX_RSPQ_ENTRIES = 16384, 170 MAX_RSPQ_ENTRIES = 16384,
171 MAX_RX_BUFFERS = 16384, 171 MAX_RX_BUFFERS = 16384,
172 MIN_TXQ_ENTRIES = 32, 172 MIN_TXQ_ENTRIES = 32,
173 MIN_CTRL_TXQ_ENTRIES = 32, 173 MIN_CTRL_TXQ_ENTRIES = 32,
174 MIN_RSPQ_ENTRIES = 128, 174 MIN_RSPQ_ENTRIES = 128,
175 MIN_FL_ENTRIES = 16 175 MIN_FL_ENTRIES = 16
176 }; 176 };
177 177
178 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ 178 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
179 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\ 179 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
180 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) 180 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
181 181
182 #define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) } 182 #define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
183 183
184 static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = { 184 static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
185 CH_DEVICE(0xa000, 0), /* PE10K */ 185 CH_DEVICE(0xa000, 0), /* PE10K */
186 CH_DEVICE(0x4001, -1), 186 CH_DEVICE(0x4001, -1),
187 CH_DEVICE(0x4002, -1), 187 CH_DEVICE(0x4002, -1),
188 CH_DEVICE(0x4003, -1), 188 CH_DEVICE(0x4003, -1),
189 CH_DEVICE(0x4004, -1), 189 CH_DEVICE(0x4004, -1),
190 CH_DEVICE(0x4005, -1), 190 CH_DEVICE(0x4005, -1),
191 CH_DEVICE(0x4006, -1), 191 CH_DEVICE(0x4006, -1),
192 CH_DEVICE(0x4007, -1), 192 CH_DEVICE(0x4007, -1),
193 CH_DEVICE(0x4008, -1), 193 CH_DEVICE(0x4008, -1),
194 CH_DEVICE(0x4009, -1), 194 CH_DEVICE(0x4009, -1),
195 CH_DEVICE(0x400a, -1), 195 CH_DEVICE(0x400a, -1),
196 CH_DEVICE(0x4401, 4), 196 CH_DEVICE(0x4401, 4),
197 CH_DEVICE(0x4402, 4), 197 CH_DEVICE(0x4402, 4),
198 CH_DEVICE(0x4403, 4), 198 CH_DEVICE(0x4403, 4),
199 CH_DEVICE(0x4404, 4), 199 CH_DEVICE(0x4404, 4),
200 CH_DEVICE(0x4405, 4), 200 CH_DEVICE(0x4405, 4),
201 CH_DEVICE(0x4406, 4), 201 CH_DEVICE(0x4406, 4),
202 CH_DEVICE(0x4407, 4), 202 CH_DEVICE(0x4407, 4),
203 CH_DEVICE(0x4408, 4), 203 CH_DEVICE(0x4408, 4),
204 CH_DEVICE(0x4409, 4), 204 CH_DEVICE(0x4409, 4),
205 CH_DEVICE(0x440a, 4), 205 CH_DEVICE(0x440a, 4),
206 CH_DEVICE(0x440d, 4), 206 CH_DEVICE(0x440d, 4),
207 CH_DEVICE(0x440e, 4), 207 CH_DEVICE(0x440e, 4),
208 { 0, } 208 { 0, }
209 }; 209 };
210 210
211 #define FW_FNAME "cxgb4/t4fw.bin" 211 #define FW_FNAME "cxgb4/t4fw.bin"
212 #define FW_CFNAME "cxgb4/t4-config.txt" 212 #define FW_CFNAME "cxgb4/t4-config.txt"
213 213
214 MODULE_DESCRIPTION(DRV_DESC); 214 MODULE_DESCRIPTION(DRV_DESC);
215 MODULE_AUTHOR("Chelsio Communications"); 215 MODULE_AUTHOR("Chelsio Communications");
216 MODULE_LICENSE("Dual BSD/GPL"); 216 MODULE_LICENSE("Dual BSD/GPL");
217 MODULE_VERSION(DRV_VERSION); 217 MODULE_VERSION(DRV_VERSION);
218 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl); 218 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
219 MODULE_FIRMWARE(FW_FNAME); 219 MODULE_FIRMWARE(FW_FNAME);
220 220
221 /* 221 /*
222 * Normally we're willing to become the firmware's Master PF but will be happy 222 * Normally we're willing to become the firmware's Master PF but will be happy
223 * if another PF has already become the Master and initialized the adapter. 223 * if another PF has already become the Master and initialized the adapter.
224 * Setting "force_init" will cause this driver to forcibly establish itself as 224 * Setting "force_init" will cause this driver to forcibly establish itself as
225 * the Master PF and initialize the adapter. 225 * the Master PF and initialize the adapter.
226 */ 226 */
227 static uint force_init; 227 static uint force_init;
228 228
229 module_param(force_init, uint, 0644); 229 module_param(force_init, uint, 0644);
230 MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter"); 230 MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
231 231
232 /* 232 /*
233 * Normally if the firmware we connect to has Configuration File support, we 233 * Normally if the firmware we connect to has Configuration File support, we
234 * use that and only fall back to the old Driver-based initialization if the 234 * use that and only fall back to the old Driver-based initialization if the
235 * Configuration File fails for some reason. If force_old_init is set, then 235 * Configuration File fails for some reason. If force_old_init is set, then
236 * we'll always use the old Driver-based initialization sequence. 236 * we'll always use the old Driver-based initialization sequence.
237 */ 237 */
238 static uint force_old_init; 238 static uint force_old_init;
239 239
240 module_param(force_old_init, uint, 0644); 240 module_param(force_old_init, uint, 0644);
241 MODULE_PARM_DESC(force_old_init, "Force old initialization sequence"); 241 MODULE_PARM_DESC(force_old_init, "Force old initialization sequence");
242 242
243 static int dflt_msg_enable = DFLT_MSG_ENABLE; 243 static int dflt_msg_enable = DFLT_MSG_ENABLE;
244 244
245 module_param(dflt_msg_enable, int, 0644); 245 module_param(dflt_msg_enable, int, 0644);
246 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap"); 246 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
247 247
248 /* 248 /*
249 * The driver uses the best interrupt scheme available on a platform in the 249 * The driver uses the best interrupt scheme available on a platform in the
250 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which 250 * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
251 * of these schemes the driver may consider as follows: 251 * of these schemes the driver may consider as follows:
252 * 252 *
253 * msi = 2: choose from among all three options 253 * msi = 2: choose from among all three options
254 * msi = 1: only consider MSI and INTx interrupts 254 * msi = 1: only consider MSI and INTx interrupts
255 * msi = 0: force INTx interrupts 255 * msi = 0: force INTx interrupts
256 */ 256 */
257 static int msi = 2; 257 static int msi = 2;
258 258
259 module_param(msi, int, 0644); 259 module_param(msi, int, 0644);
260 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)"); 260 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
261 261
262 /* 262 /*
263 * Queue interrupt hold-off timer values. Queues default to the first of these 263 * Queue interrupt hold-off timer values. Queues default to the first of these
264 * upon creation. 264 * upon creation.
265 */ 265 */
266 static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 }; 266 static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
267 267
268 module_param_array(intr_holdoff, uint, NULL, 0644); 268 module_param_array(intr_holdoff, uint, NULL, 0644);
269 MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers " 269 MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
270 "0..4 in microseconds"); 270 "0..4 in microseconds");
271 271
272 static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 }; 272 static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
273 273
274 module_param_array(intr_cnt, uint, NULL, 0644); 274 module_param_array(intr_cnt, uint, NULL, 0644);
275 MODULE_PARM_DESC(intr_cnt, 275 MODULE_PARM_DESC(intr_cnt,
276 "thresholds 1..3 for queue interrupt packet counters"); 276 "thresholds 1..3 for queue interrupt packet counters");
277 277
278 /* 278 /*
279 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers 279 * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
280 * offset by 2 bytes in order to have the IP headers line up on 4-byte 280 * offset by 2 bytes in order to have the IP headers line up on 4-byte
281 * boundaries. This is a requirement for many architectures which will throw 281 * boundaries. This is a requirement for many architectures which will throw
282 * a machine check fault if an attempt is made to access one of the 4-byte IP 282 * a machine check fault if an attempt is made to access one of the 4-byte IP
283 * header fields on a non-4-byte boundary. And it's a major performance issue 283 * header fields on a non-4-byte boundary. And it's a major performance issue
284 * even on some architectures which allow it like some implementations of the 284 * even on some architectures which allow it like some implementations of the
285 * x86 ISA. However, some architectures don't mind this and for some very 285 * x86 ISA. However, some architectures don't mind this and for some very
286 * edge-case performance sensitive applications (like forwarding large volumes 286 * edge-case performance sensitive applications (like forwarding large volumes
287 * of small packets), setting this DMA offset to 0 will decrease the number of 287 * of small packets), setting this DMA offset to 0 will decrease the number of
288 * PCI-E Bus transfers enough to measurably affect performance. 288 * PCI-E Bus transfers enough to measurably affect performance.
289 */ 289 */
290 static int rx_dma_offset = 2; 290 static int rx_dma_offset = 2;
291 291
292 static bool vf_acls; 292 static bool vf_acls;
293 293
294 #ifdef CONFIG_PCI_IOV 294 #ifdef CONFIG_PCI_IOV
295 module_param(vf_acls, bool, 0644); 295 module_param(vf_acls, bool, 0644);
296 MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement"); 296 MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
297 297
298 static unsigned int num_vf[4]; 298 static unsigned int num_vf[4];
299 299
300 module_param_array(num_vf, uint, NULL, 0644); 300 module_param_array(num_vf, uint, NULL, 0644);
301 MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3"); 301 MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
302 #endif 302 #endif
303 303
304 /* 304 /*
305 * The filter TCAM has a fixed portion and a variable portion. The fixed 305 * The filter TCAM has a fixed portion and a variable portion. The fixed
306 * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP 306 * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
307 * ports. The variable portion is 36 bits which can include things like Exact 307 * ports. The variable portion is 36 bits which can include things like Exact
308 * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits), 308 * Match MAC Index (9 bits), Ether Type (16 bits), IP Protocol (8 bits),
309 * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would 309 * [Inner] VLAN Tag (17 bits), etc. which, if all were somehow selected, would
310 * far exceed the 36-bit budget for this "compressed" header portion of the 310 * far exceed the 36-bit budget for this "compressed" header portion of the
311 * filter. Thus, we have a scarce resource which must be carefully managed. 311 * filter. Thus, we have a scarce resource which must be carefully managed.
312 * 312 *
313 * By default we set this up to mostly match the set of filter matching 313 * By default we set this up to mostly match the set of filter matching
314 * capabilities of T3 but with accommodations for some of T4's more 314 * capabilities of T3 but with accommodations for some of T4's more
315 * interesting features: 315 * interesting features:
316 * 316 *
317 * { IP Fragment (1), MPS Match Type (3), IP Protocol (8), 317 * { IP Fragment (1), MPS Match Type (3), IP Protocol (8),
318 * [Inner] VLAN (17), Port (3), FCoE (1) } 318 * [Inner] VLAN (17), Port (3), FCoE (1) }
319 */ 319 */
320 enum { 320 enum {
321 TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC, 321 TP_VLAN_PRI_MAP_DEFAULT = HW_TPL_FR_MT_PR_IV_P_FC,
322 TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT, 322 TP_VLAN_PRI_MAP_FIRST = FCOE_SHIFT,
323 TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT, 323 TP_VLAN_PRI_MAP_LAST = FRAGMENTATION_SHIFT,
324 }; 324 };
325 325
326 static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT; 326 static unsigned int tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
327 327
328 static struct dentry *cxgb4_debugfs_root; 328 static struct dentry *cxgb4_debugfs_root;
329 329
330 static LIST_HEAD(adapter_list); 330 static LIST_HEAD(adapter_list);
331 static DEFINE_MUTEX(uld_mutex); 331 static DEFINE_MUTEX(uld_mutex);
332 static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX]; 332 static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
333 static const char *uld_str[] = { "RDMA", "iSCSI" }; 333 static const char *uld_str[] = { "RDMA", "iSCSI" };
334 334
335 static void link_report(struct net_device *dev) 335 static void link_report(struct net_device *dev)
336 { 336 {
337 if (!netif_carrier_ok(dev)) 337 if (!netif_carrier_ok(dev))
338 netdev_info(dev, "link down\n"); 338 netdev_info(dev, "link down\n");
339 else { 339 else {
340 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" }; 340 static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
341 341
342 const char *s = "10Mbps"; 342 const char *s = "10Mbps";
343 const struct port_info *p = netdev_priv(dev); 343 const struct port_info *p = netdev_priv(dev);
344 344
345 switch (p->link_cfg.speed) { 345 switch (p->link_cfg.speed) {
346 case SPEED_10000: 346 case SPEED_10000:
347 s = "10Gbps"; 347 s = "10Gbps";
348 break; 348 break;
349 case SPEED_1000: 349 case SPEED_1000:
350 s = "1000Mbps"; 350 s = "1000Mbps";
351 break; 351 break;
352 case SPEED_100: 352 case SPEED_100:
353 s = "100Mbps"; 353 s = "100Mbps";
354 break; 354 break;
355 } 355 }
356 356
357 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, 357 netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
358 fc[p->link_cfg.fc]); 358 fc[p->link_cfg.fc]);
359 } 359 }
360 } 360 }
361 361
362 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat) 362 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
363 { 363 {
364 struct net_device *dev = adapter->port[port_id]; 364 struct net_device *dev = adapter->port[port_id];
365 365
366 /* Skip changes from disabled ports. */ 366 /* Skip changes from disabled ports. */
367 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) { 367 if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
368 if (link_stat) 368 if (link_stat)
369 netif_carrier_on(dev); 369 netif_carrier_on(dev);
370 else 370 else
371 netif_carrier_off(dev); 371 netif_carrier_off(dev);
372 372
373 link_report(dev); 373 link_report(dev);
374 } 374 }
375 } 375 }
376 376
377 void t4_os_portmod_changed(const struct adapter *adap, int port_id) 377 void t4_os_portmod_changed(const struct adapter *adap, int port_id)
378 { 378 {
379 static const char *mod_str[] = { 379 static const char *mod_str[] = {
380 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM" 380 NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
381 }; 381 };
382 382
383 const struct net_device *dev = adap->port[port_id]; 383 const struct net_device *dev = adap->port[port_id];
384 const struct port_info *pi = netdev_priv(dev); 384 const struct port_info *pi = netdev_priv(dev);
385 385
386 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) 386 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
387 netdev_info(dev, "port module unplugged\n"); 387 netdev_info(dev, "port module unplugged\n");
388 else if (pi->mod_type < ARRAY_SIZE(mod_str)) 388 else if (pi->mod_type < ARRAY_SIZE(mod_str))
389 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]); 389 netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
390 } 390 }
391 391
392 /* 392 /*
393 * Configure the exact and hash address filters to handle a port's multicast 393 * Configure the exact and hash address filters to handle a port's multicast
394 * and secondary unicast MAC addresses. 394 * and secondary unicast MAC addresses.
395 */ 395 */
396 static int set_addr_filters(const struct net_device *dev, bool sleep) 396 static int set_addr_filters(const struct net_device *dev, bool sleep)
397 { 397 {
398 u64 mhash = 0; 398 u64 mhash = 0;
399 u64 uhash = 0; 399 u64 uhash = 0;
400 bool free = true; 400 bool free = true;
401 u16 filt_idx[7]; 401 u16 filt_idx[7];
402 const u8 *addr[7]; 402 const u8 *addr[7];
403 int ret, naddr = 0; 403 int ret, naddr = 0;
404 const struct netdev_hw_addr *ha; 404 const struct netdev_hw_addr *ha;
405 int uc_cnt = netdev_uc_count(dev); 405 int uc_cnt = netdev_uc_count(dev);
406 int mc_cnt = netdev_mc_count(dev); 406 int mc_cnt = netdev_mc_count(dev);
407 const struct port_info *pi = netdev_priv(dev); 407 const struct port_info *pi = netdev_priv(dev);
408 unsigned int mb = pi->adapter->fn; 408 unsigned int mb = pi->adapter->fn;
409 409
410 /* first do the secondary unicast addresses */ 410 /* first do the secondary unicast addresses */
411 netdev_for_each_uc_addr(ha, dev) { 411 netdev_for_each_uc_addr(ha, dev) {
412 addr[naddr++] = ha->addr; 412 addr[naddr++] = ha->addr;
413 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) { 413 if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
414 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free, 414 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
415 naddr, addr, filt_idx, &uhash, sleep); 415 naddr, addr, filt_idx, &uhash, sleep);
416 if (ret < 0) 416 if (ret < 0)
417 return ret; 417 return ret;
418 418
419 free = false; 419 free = false;
420 naddr = 0; 420 naddr = 0;
421 } 421 }
422 } 422 }
423 423
424 /* next set up the multicast addresses */ 424 /* next set up the multicast addresses */
425 netdev_for_each_mc_addr(ha, dev) { 425 netdev_for_each_mc_addr(ha, dev) {
426 addr[naddr++] = ha->addr; 426 addr[naddr++] = ha->addr;
427 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) { 427 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
428 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free, 428 ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
429 naddr, addr, filt_idx, &mhash, sleep); 429 naddr, addr, filt_idx, &mhash, sleep);
430 if (ret < 0) 430 if (ret < 0)
431 return ret; 431 return ret;
432 432
433 free = false; 433 free = false;
434 naddr = 0; 434 naddr = 0;
435 } 435 }
436 } 436 }
437 437
438 return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0, 438 return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
439 uhash | mhash, sleep); 439 uhash | mhash, sleep);
440 } 440 }
441 441
442 int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */ 442 int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
443 module_param(dbfifo_int_thresh, int, 0644); 443 module_param(dbfifo_int_thresh, int, 0644);
444 MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold"); 444 MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
445 445
446 /* 446 /*
447 * usecs to sleep while draining the dbfifo 447 * usecs to sleep while draining the dbfifo
448 */ 448 */
449 static int dbfifo_drain_delay = 1000; 449 static int dbfifo_drain_delay = 1000;
450 module_param(dbfifo_drain_delay, int, 0644); 450 module_param(dbfifo_drain_delay, int, 0644);
451 MODULE_PARM_DESC(dbfifo_drain_delay, 451 MODULE_PARM_DESC(dbfifo_drain_delay,
452 "usecs to sleep while draining the dbfifo"); 452 "usecs to sleep while draining the dbfifo");
453 453
454 /* 454 /*
455 * Set Rx properties of a port, such as promiscruity, address filters, and MTU. 455 * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
456 * If @mtu is -1 it is left unchanged. 456 * If @mtu is -1 it is left unchanged.
457 */ 457 */
458 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok) 458 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
459 { 459 {
460 int ret; 460 int ret;
461 struct port_info *pi = netdev_priv(dev); 461 struct port_info *pi = netdev_priv(dev);
462 462
463 ret = set_addr_filters(dev, sleep_ok); 463 ret = set_addr_filters(dev, sleep_ok);
464 if (ret == 0) 464 if (ret == 0)
465 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu, 465 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
466 (dev->flags & IFF_PROMISC) ? 1 : 0, 466 (dev->flags & IFF_PROMISC) ? 1 : 0,
467 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1, 467 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
468 sleep_ok); 468 sleep_ok);
469 return ret; 469 return ret;
470 } 470 }
471 471
472 static struct workqueue_struct *workq; 472 static struct workqueue_struct *workq;
473 473
474 /** 474 /**
475 * link_start - enable a port 475 * link_start - enable a port
476 * @dev: the port to enable 476 * @dev: the port to enable
477 * 477 *
478 * Performs the MAC and PHY actions needed to enable a port. 478 * Performs the MAC and PHY actions needed to enable a port.
479 */ 479 */
480 static int link_start(struct net_device *dev) 480 static int link_start(struct net_device *dev)
481 { 481 {
482 int ret; 482 int ret;
483 struct port_info *pi = netdev_priv(dev); 483 struct port_info *pi = netdev_priv(dev);
484 unsigned int mb = pi->adapter->fn; 484 unsigned int mb = pi->adapter->fn;
485 485
486 /* 486 /*
487 * We do not set address filters and promiscuity here, the stack does 487 * We do not set address filters and promiscuity here, the stack does
488 * that step explicitly. 488 * that step explicitly.
489 */ 489 */
490 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1, 490 ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
491 !!(dev->features & NETIF_F_HW_VLAN_RX), true); 491 !!(dev->features & NETIF_F_HW_VLAN_RX), true);
492 if (ret == 0) { 492 if (ret == 0) {
493 ret = t4_change_mac(pi->adapter, mb, pi->viid, 493 ret = t4_change_mac(pi->adapter, mb, pi->viid,
494 pi->xact_addr_filt, dev->dev_addr, true, 494 pi->xact_addr_filt, dev->dev_addr, true,
495 true); 495 true);
496 if (ret >= 0) { 496 if (ret >= 0) {
497 pi->xact_addr_filt = ret; 497 pi->xact_addr_filt = ret;
498 ret = 0; 498 ret = 0;
499 } 499 }
500 } 500 }
501 if (ret == 0) 501 if (ret == 0)
502 ret = t4_link_start(pi->adapter, mb, pi->tx_chan, 502 ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
503 &pi->link_cfg); 503 &pi->link_cfg);
504 if (ret == 0) 504 if (ret == 0)
505 ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true); 505 ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true);
506 return ret; 506 return ret;
507 } 507 }
508 508
509 /* 509 /*
510 * Response queue handler for the FW event queue. 510 * Response queue handler for the FW event queue.
511 */ 511 */
512 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp, 512 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
513 const struct pkt_gl *gl) 513 const struct pkt_gl *gl)
514 { 514 {
515 u8 opcode = ((const struct rss_header *)rsp)->opcode; 515 u8 opcode = ((const struct rss_header *)rsp)->opcode;
516 516
517 rsp++; /* skip RSS header */ 517 rsp++; /* skip RSS header */
518 if (likely(opcode == CPL_SGE_EGR_UPDATE)) { 518 if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
519 const struct cpl_sge_egr_update *p = (void *)rsp; 519 const struct cpl_sge_egr_update *p = (void *)rsp;
520 unsigned int qid = EGR_QID(ntohl(p->opcode_qid)); 520 unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
521 struct sge_txq *txq; 521 struct sge_txq *txq;
522 522
523 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start]; 523 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
524 txq->restarts++; 524 txq->restarts++;
525 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) { 525 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
526 struct sge_eth_txq *eq; 526 struct sge_eth_txq *eq;
527 527
528 eq = container_of(txq, struct sge_eth_txq, q); 528 eq = container_of(txq, struct sge_eth_txq, q);
529 netif_tx_wake_queue(eq->txq); 529 netif_tx_wake_queue(eq->txq);
530 } else { 530 } else {
531 struct sge_ofld_txq *oq; 531 struct sge_ofld_txq *oq;
532 532
533 oq = container_of(txq, struct sge_ofld_txq, q); 533 oq = container_of(txq, struct sge_ofld_txq, q);
534 tasklet_schedule(&oq->qresume_tsk); 534 tasklet_schedule(&oq->qresume_tsk);
535 } 535 }
536 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) { 536 } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
537 const struct cpl_fw6_msg *p = (void *)rsp; 537 const struct cpl_fw6_msg *p = (void *)rsp;
538 538
539 if (p->type == 0) 539 if (p->type == 0)
540 t4_handle_fw_rpl(q->adap, p->data); 540 t4_handle_fw_rpl(q->adap, p->data);
541 } else if (opcode == CPL_L2T_WRITE_RPL) { 541 } else if (opcode == CPL_L2T_WRITE_RPL) {
542 const struct cpl_l2t_write_rpl *p = (void *)rsp; 542 const struct cpl_l2t_write_rpl *p = (void *)rsp;
543 543
544 do_l2t_write_rpl(q->adap, p); 544 do_l2t_write_rpl(q->adap, p);
545 } else 545 } else
546 dev_err(q->adap->pdev_dev, 546 dev_err(q->adap->pdev_dev,
547 "unexpected CPL %#x on FW event queue\n", opcode); 547 "unexpected CPL %#x on FW event queue\n", opcode);
548 return 0; 548 return 0;
549 } 549 }
550 550
551 /** 551 /**
552 * uldrx_handler - response queue handler for ULD queues 552 * uldrx_handler - response queue handler for ULD queues
553 * @q: the response queue that received the packet 553 * @q: the response queue that received the packet
554 * @rsp: the response queue descriptor holding the offload message 554 * @rsp: the response queue descriptor holding the offload message
555 * @gl: the gather list of packet fragments 555 * @gl: the gather list of packet fragments
556 * 556 *
557 * Deliver an ingress offload packet to a ULD. All processing is done by 557 * Deliver an ingress offload packet to a ULD. All processing is done by
558 * the ULD, we just maintain statistics. 558 * the ULD, we just maintain statistics.
559 */ 559 */
560 static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp, 560 static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
561 const struct pkt_gl *gl) 561 const struct pkt_gl *gl)
562 { 562 {
563 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq); 563 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
564 564
565 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) { 565 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
566 rxq->stats.nomem++; 566 rxq->stats.nomem++;
567 return -1; 567 return -1;
568 } 568 }
569 if (gl == NULL) 569 if (gl == NULL)
570 rxq->stats.imm++; 570 rxq->stats.imm++;
571 else if (gl == CXGB4_MSG_AN) 571 else if (gl == CXGB4_MSG_AN)
572 rxq->stats.an++; 572 rxq->stats.an++;
573 else 573 else
574 rxq->stats.pkts++; 574 rxq->stats.pkts++;
575 return 0; 575 return 0;
576 } 576 }
577 577
578 static void disable_msi(struct adapter *adapter) 578 static void disable_msi(struct adapter *adapter)
579 { 579 {
580 if (adapter->flags & USING_MSIX) { 580 if (adapter->flags & USING_MSIX) {
581 pci_disable_msix(adapter->pdev); 581 pci_disable_msix(adapter->pdev);
582 adapter->flags &= ~USING_MSIX; 582 adapter->flags &= ~USING_MSIX;
583 } else if (adapter->flags & USING_MSI) { 583 } else if (adapter->flags & USING_MSI) {
584 pci_disable_msi(adapter->pdev); 584 pci_disable_msi(adapter->pdev);
585 adapter->flags &= ~USING_MSI; 585 adapter->flags &= ~USING_MSI;
586 } 586 }
587 } 587 }
588 588
589 /* 589 /*
590 * Interrupt handler for non-data events used with MSI-X. 590 * Interrupt handler for non-data events used with MSI-X.
591 */ 591 */
592 static irqreturn_t t4_nondata_intr(int irq, void *cookie) 592 static irqreturn_t t4_nondata_intr(int irq, void *cookie)
593 { 593 {
594 struct adapter *adap = cookie; 594 struct adapter *adap = cookie;
595 595
596 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE)); 596 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
597 if (v & PFSW) { 597 if (v & PFSW) {
598 adap->swintr = 1; 598 adap->swintr = 1;
599 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v); 599 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
600 } 600 }
601 t4_slow_intr_handler(adap); 601 t4_slow_intr_handler(adap);
602 return IRQ_HANDLED; 602 return IRQ_HANDLED;
603 } 603 }
604 604
605 /* 605 /*
606 * Name the MSI-X interrupts. 606 * Name the MSI-X interrupts.
607 */ 607 */
608 static void name_msix_vecs(struct adapter *adap) 608 static void name_msix_vecs(struct adapter *adap)
609 { 609 {
610 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc); 610 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
611 611
612 /* non-data interrupts */ 612 /* non-data interrupts */
613 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name); 613 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
614 614
615 /* FW events */ 615 /* FW events */
616 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq", 616 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
617 adap->port[0]->name); 617 adap->port[0]->name);
618 618
619 /* Ethernet queues */ 619 /* Ethernet queues */
620 for_each_port(adap, j) { 620 for_each_port(adap, j) {
621 struct net_device *d = adap->port[j]; 621 struct net_device *d = adap->port[j];
622 const struct port_info *pi = netdev_priv(d); 622 const struct port_info *pi = netdev_priv(d);
623 623
624 for (i = 0; i < pi->nqsets; i++, msi_idx++) 624 for (i = 0; i < pi->nqsets; i++, msi_idx++)
625 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d", 625 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
626 d->name, i); 626 d->name, i);
627 } 627 }
628 628
629 /* offload queues */ 629 /* offload queues */
630 for_each_ofldrxq(&adap->sge, i) 630 for_each_ofldrxq(&adap->sge, i)
631 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d", 631 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
632 adap->port[0]->name, i); 632 adap->port[0]->name, i);
633 633
634 for_each_rdmarxq(&adap->sge, i) 634 for_each_rdmarxq(&adap->sge, i)
635 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d", 635 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
636 adap->port[0]->name, i); 636 adap->port[0]->name, i);
637 } 637 }
638 638
639 static int request_msix_queue_irqs(struct adapter *adap) 639 static int request_msix_queue_irqs(struct adapter *adap)
640 { 640 {
641 struct sge *s = &adap->sge; 641 struct sge *s = &adap->sge;
642 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi_index = 2; 642 int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi_index = 2;
643 643
644 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0, 644 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
645 adap->msix_info[1].desc, &s->fw_evtq); 645 adap->msix_info[1].desc, &s->fw_evtq);
646 if (err) 646 if (err)
647 return err; 647 return err;
648 648
649 for_each_ethrxq(s, ethqidx) { 649 for_each_ethrxq(s, ethqidx) {
650 err = request_irq(adap->msix_info[msi_index].vec, 650 err = request_irq(adap->msix_info[msi_index].vec,
651 t4_sge_intr_msix, 0, 651 t4_sge_intr_msix, 0,
652 adap->msix_info[msi_index].desc, 652 adap->msix_info[msi_index].desc,
653 &s->ethrxq[ethqidx].rspq); 653 &s->ethrxq[ethqidx].rspq);
654 if (err) 654 if (err)
655 goto unwind; 655 goto unwind;
656 msi_index++; 656 msi_index++;
657 } 657 }
658 for_each_ofldrxq(s, ofldqidx) { 658 for_each_ofldrxq(s, ofldqidx) {
659 err = request_irq(adap->msix_info[msi_index].vec, 659 err = request_irq(adap->msix_info[msi_index].vec,
660 t4_sge_intr_msix, 0, 660 t4_sge_intr_msix, 0,
661 adap->msix_info[msi_index].desc, 661 adap->msix_info[msi_index].desc,
662 &s->ofldrxq[ofldqidx].rspq); 662 &s->ofldrxq[ofldqidx].rspq);
663 if (err) 663 if (err)
664 goto unwind; 664 goto unwind;
665 msi_index++; 665 msi_index++;
666 } 666 }
667 for_each_rdmarxq(s, rdmaqidx) { 667 for_each_rdmarxq(s, rdmaqidx) {
668 err = request_irq(adap->msix_info[msi_index].vec, 668 err = request_irq(adap->msix_info[msi_index].vec,
669 t4_sge_intr_msix, 0, 669 t4_sge_intr_msix, 0,
670 adap->msix_info[msi_index].desc, 670 adap->msix_info[msi_index].desc,
671 &s->rdmarxq[rdmaqidx].rspq); 671 &s->rdmarxq[rdmaqidx].rspq);
672 if (err) 672 if (err)
673 goto unwind; 673 goto unwind;
674 msi_index++; 674 msi_index++;
675 } 675 }
676 return 0; 676 return 0;
677 677
678 unwind: 678 unwind:
679 while (--rdmaqidx >= 0) 679 while (--rdmaqidx >= 0)
680 free_irq(adap->msix_info[--msi_index].vec, 680 free_irq(adap->msix_info[--msi_index].vec,
681 &s->rdmarxq[rdmaqidx].rspq); 681 &s->rdmarxq[rdmaqidx].rspq);
682 while (--ofldqidx >= 0) 682 while (--ofldqidx >= 0)
683 free_irq(adap->msix_info[--msi_index].vec, 683 free_irq(adap->msix_info[--msi_index].vec,
684 &s->ofldrxq[ofldqidx].rspq); 684 &s->ofldrxq[ofldqidx].rspq);
685 while (--ethqidx >= 0) 685 while (--ethqidx >= 0)
686 free_irq(adap->msix_info[--msi_index].vec, 686 free_irq(adap->msix_info[--msi_index].vec,
687 &s->ethrxq[ethqidx].rspq); 687 &s->ethrxq[ethqidx].rspq);
688 free_irq(adap->msix_info[1].vec, &s->fw_evtq); 688 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
689 return err; 689 return err;
690 } 690 }
691 691
692 static void free_msix_queue_irqs(struct adapter *adap) 692 static void free_msix_queue_irqs(struct adapter *adap)
693 { 693 {
694 int i, msi_index = 2; 694 int i, msi_index = 2;
695 struct sge *s = &adap->sge; 695 struct sge *s = &adap->sge;
696 696
697 free_irq(adap->msix_info[1].vec, &s->fw_evtq); 697 free_irq(adap->msix_info[1].vec, &s->fw_evtq);
698 for_each_ethrxq(s, i) 698 for_each_ethrxq(s, i)
699 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq); 699 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
700 for_each_ofldrxq(s, i) 700 for_each_ofldrxq(s, i)
701 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq); 701 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
702 for_each_rdmarxq(s, i) 702 for_each_rdmarxq(s, i)
703 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq); 703 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
704 } 704 }
705 705
706 /** 706 /**
707 * write_rss - write the RSS table for a given port 707 * write_rss - write the RSS table for a given port
708 * @pi: the port 708 * @pi: the port
709 * @queues: array of queue indices for RSS 709 * @queues: array of queue indices for RSS
710 * 710 *
711 * Sets up the portion of the HW RSS table for the port's VI to distribute 711 * Sets up the portion of the HW RSS table for the port's VI to distribute
712 * packets to the Rx queues in @queues. 712 * packets to the Rx queues in @queues.
713 */ 713 */
714 static int write_rss(const struct port_info *pi, const u16 *queues) 714 static int write_rss(const struct port_info *pi, const u16 *queues)
715 { 715 {
716 u16 *rss; 716 u16 *rss;
717 int i, err; 717 int i, err;
718 const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset]; 718 const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
719 719
720 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL); 720 rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
721 if (!rss) 721 if (!rss)
722 return -ENOMEM; 722 return -ENOMEM;
723 723
724 /* map the queue indices to queue ids */ 724 /* map the queue indices to queue ids */
725 for (i = 0; i < pi->rss_size; i++, queues++) 725 for (i = 0; i < pi->rss_size; i++, queues++)
726 rss[i] = q[*queues].rspq.abs_id; 726 rss[i] = q[*queues].rspq.abs_id;
727 727
728 err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0, 728 err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
729 pi->rss_size, rss, pi->rss_size); 729 pi->rss_size, rss, pi->rss_size);
730 kfree(rss); 730 kfree(rss);
731 return err; 731 return err;
732 } 732 }
733 733
734 /** 734 /**
735 * setup_rss - configure RSS 735 * setup_rss - configure RSS
736 * @adap: the adapter 736 * @adap: the adapter
737 * 737 *
738 * Sets up RSS for each port. 738 * Sets up RSS for each port.
739 */ 739 */
740 static int setup_rss(struct adapter *adap) 740 static int setup_rss(struct adapter *adap)
741 { 741 {
742 int i, err; 742 int i, err;
743 743
744 for_each_port(adap, i) { 744 for_each_port(adap, i) {
745 const struct port_info *pi = adap2pinfo(adap, i); 745 const struct port_info *pi = adap2pinfo(adap, i);
746 746
747 err = write_rss(pi, pi->rss); 747 err = write_rss(pi, pi->rss);
748 if (err) 748 if (err)
749 return err; 749 return err;
750 } 750 }
751 return 0; 751 return 0;
752 } 752 }
753 753
754 /* 754 /*
755 * Return the channel of the ingress queue with the given qid. 755 * Return the channel of the ingress queue with the given qid.
756 */ 756 */
757 static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid) 757 static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
758 { 758 {
759 qid -= p->ingr_start; 759 qid -= p->ingr_start;
760 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan; 760 return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
761 } 761 }
762 762
763 /* 763 /*
764 * Wait until all NAPI handlers are descheduled. 764 * Wait until all NAPI handlers are descheduled.
765 */ 765 */
766 static void quiesce_rx(struct adapter *adap) 766 static void quiesce_rx(struct adapter *adap)
767 { 767 {
768 int i; 768 int i;
769 769
770 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) { 770 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
771 struct sge_rspq *q = adap->sge.ingr_map[i]; 771 struct sge_rspq *q = adap->sge.ingr_map[i];
772 772
773 if (q && q->handler) 773 if (q && q->handler)
774 napi_disable(&q->napi); 774 napi_disable(&q->napi);
775 } 775 }
776 } 776 }
777 777
778 /* 778 /*
779 * Enable NAPI scheduling and interrupt generation for all Rx queues. 779 * Enable NAPI scheduling and interrupt generation for all Rx queues.
780 */ 780 */
781 static void enable_rx(struct adapter *adap) 781 static void enable_rx(struct adapter *adap)
782 { 782 {
783 int i; 783 int i;
784 784
785 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) { 785 for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
786 struct sge_rspq *q = adap->sge.ingr_map[i]; 786 struct sge_rspq *q = adap->sge.ingr_map[i];
787 787
788 if (!q) 788 if (!q)
789 continue; 789 continue;
790 if (q->handler) 790 if (q->handler)
791 napi_enable(&q->napi); 791 napi_enable(&q->napi);
792 /* 0-increment GTS to start the timer and enable interrupts */ 792 /* 0-increment GTS to start the timer and enable interrupts */
793 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS), 793 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
794 SEINTARM(q->intr_params) | 794 SEINTARM(q->intr_params) |
795 INGRESSQID(q->cntxt_id)); 795 INGRESSQID(q->cntxt_id));
796 } 796 }
797 } 797 }
798 798
799 /** 799 /**
800 * setup_sge_queues - configure SGE Tx/Rx/response queues 800 * setup_sge_queues - configure SGE Tx/Rx/response queues
801 * @adap: the adapter 801 * @adap: the adapter
802 * 802 *
803 * Determines how many sets of SGE queues to use and initializes them. 803 * Determines how many sets of SGE queues to use and initializes them.
804 * We support multiple queue sets per port if we have MSI-X, otherwise 804 * We support multiple queue sets per port if we have MSI-X, otherwise
805 * just one queue set per port. 805 * just one queue set per port.
806 */ 806 */
807 static int setup_sge_queues(struct adapter *adap) 807 static int setup_sge_queues(struct adapter *adap)
808 { 808 {
809 int err, msi_idx, i, j; 809 int err, msi_idx, i, j;
810 struct sge *s = &adap->sge; 810 struct sge *s = &adap->sge;
811 811
812 bitmap_zero(s->starving_fl, MAX_EGRQ); 812 bitmap_zero(s->starving_fl, MAX_EGRQ);
813 bitmap_zero(s->txq_maperr, MAX_EGRQ); 813 bitmap_zero(s->txq_maperr, MAX_EGRQ);
814 814
815 if (adap->flags & USING_MSIX) 815 if (adap->flags & USING_MSIX)
816 msi_idx = 1; /* vector 0 is for non-queue interrupts */ 816 msi_idx = 1; /* vector 0 is for non-queue interrupts */
817 else { 817 else {
818 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0, 818 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
819 NULL, NULL); 819 NULL, NULL);
820 if (err) 820 if (err)
821 return err; 821 return err;
822 msi_idx = -((int)s->intrq.abs_id + 1); 822 msi_idx = -((int)s->intrq.abs_id + 1);
823 } 823 }
824 824
825 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0], 825 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
826 msi_idx, NULL, fwevtq_handler); 826 msi_idx, NULL, fwevtq_handler);
827 if (err) { 827 if (err) {
828 freeout: t4_free_sge_resources(adap); 828 freeout: t4_free_sge_resources(adap);
829 return err; 829 return err;
830 } 830 }
831 831
832 for_each_port(adap, i) { 832 for_each_port(adap, i) {
833 struct net_device *dev = adap->port[i]; 833 struct net_device *dev = adap->port[i];
834 struct port_info *pi = netdev_priv(dev); 834 struct port_info *pi = netdev_priv(dev);
835 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset]; 835 struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
836 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset]; 836 struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
837 837
838 for (j = 0; j < pi->nqsets; j++, q++) { 838 for (j = 0; j < pi->nqsets; j++, q++) {
839 if (msi_idx > 0) 839 if (msi_idx > 0)
840 msi_idx++; 840 msi_idx++;
841 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, 841 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
842 msi_idx, &q->fl, 842 msi_idx, &q->fl,
843 t4_ethrx_handler); 843 t4_ethrx_handler);
844 if (err) 844 if (err)
845 goto freeout; 845 goto freeout;
846 q->rspq.idx = j; 846 q->rspq.idx = j;
847 memset(&q->stats, 0, sizeof(q->stats)); 847 memset(&q->stats, 0, sizeof(q->stats));
848 } 848 }
849 for (j = 0; j < pi->nqsets; j++, t++) { 849 for (j = 0; j < pi->nqsets; j++, t++) {
850 err = t4_sge_alloc_eth_txq(adap, t, dev, 850 err = t4_sge_alloc_eth_txq(adap, t, dev,
851 netdev_get_tx_queue(dev, j), 851 netdev_get_tx_queue(dev, j),
852 s->fw_evtq.cntxt_id); 852 s->fw_evtq.cntxt_id);
853 if (err) 853 if (err)
854 goto freeout; 854 goto freeout;
855 } 855 }
856 } 856 }
857 857
858 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */ 858 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
859 for_each_ofldrxq(s, i) { 859 for_each_ofldrxq(s, i) {
860 struct sge_ofld_rxq *q = &s->ofldrxq[i]; 860 struct sge_ofld_rxq *q = &s->ofldrxq[i];
861 struct net_device *dev = adap->port[i / j]; 861 struct net_device *dev = adap->port[i / j];
862 862
863 if (msi_idx > 0) 863 if (msi_idx > 0)
864 msi_idx++; 864 msi_idx++;
865 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx, 865 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
866 &q->fl, uldrx_handler); 866 &q->fl, uldrx_handler);
867 if (err) 867 if (err)
868 goto freeout; 868 goto freeout;
869 memset(&q->stats, 0, sizeof(q->stats)); 869 memset(&q->stats, 0, sizeof(q->stats));
870 s->ofld_rxq[i] = q->rspq.abs_id; 870 s->ofld_rxq[i] = q->rspq.abs_id;
871 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev, 871 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
872 s->fw_evtq.cntxt_id); 872 s->fw_evtq.cntxt_id);
873 if (err) 873 if (err)
874 goto freeout; 874 goto freeout;
875 } 875 }
876 876
877 for_each_rdmarxq(s, i) { 877 for_each_rdmarxq(s, i) {
878 struct sge_ofld_rxq *q = &s->rdmarxq[i]; 878 struct sge_ofld_rxq *q = &s->rdmarxq[i];
879 879
880 if (msi_idx > 0) 880 if (msi_idx > 0)
881 msi_idx++; 881 msi_idx++;
882 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i], 882 err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
883 msi_idx, &q->fl, uldrx_handler); 883 msi_idx, &q->fl, uldrx_handler);
884 if (err) 884 if (err)
885 goto freeout; 885 goto freeout;
886 memset(&q->stats, 0, sizeof(q->stats)); 886 memset(&q->stats, 0, sizeof(q->stats));
887 s->rdma_rxq[i] = q->rspq.abs_id; 887 s->rdma_rxq[i] = q->rspq.abs_id;
888 } 888 }
889 889
890 for_each_port(adap, i) { 890 for_each_port(adap, i) {
891 /* 891 /*
892 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't 892 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
893 * have RDMA queues, and that's the right value. 893 * have RDMA queues, and that's the right value.
894 */ 894 */
895 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i], 895 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
896 s->fw_evtq.cntxt_id, 896 s->fw_evtq.cntxt_id,
897 s->rdmarxq[i].rspq.cntxt_id); 897 s->rdmarxq[i].rspq.cntxt_id);
898 if (err) 898 if (err)
899 goto freeout; 899 goto freeout;
900 } 900 }
901 901
902 t4_write_reg(adap, MPS_TRC_RSS_CONTROL, 902 t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
903 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) | 903 RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
904 QUEUENUMBER(s->ethrxq[0].rspq.abs_id)); 904 QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
905 return 0; 905 return 0;
906 } 906 }
907 907
908 /* 908 /*
909 * Returns 0 if new FW was successfully loaded, a positive errno if a load was 909 * Returns 0 if new FW was successfully loaded, a positive errno if a load was
910 * started but failed, and a negative errno if flash load couldn't start. 910 * started but failed, and a negative errno if flash load couldn't start.
911 */ 911 */
912 static int upgrade_fw(struct adapter *adap) 912 static int upgrade_fw(struct adapter *adap)
913 { 913 {
914 int ret; 914 int ret;
915 u32 vers; 915 u32 vers;
916 const struct fw_hdr *hdr; 916 const struct fw_hdr *hdr;
917 const struct firmware *fw; 917 const struct firmware *fw;
918 struct device *dev = adap->pdev_dev; 918 struct device *dev = adap->pdev_dev;
919 919
920 ret = request_firmware(&fw, FW_FNAME, dev); 920 ret = request_firmware(&fw, FW_FNAME, dev);
921 if (ret < 0) { 921 if (ret < 0) {
922 dev_err(dev, "unable to load firmware image " FW_FNAME 922 dev_err(dev, "unable to load firmware image " FW_FNAME
923 ", error %d\n", ret); 923 ", error %d\n", ret);
924 return ret; 924 return ret;
925 } 925 }
926 926
927 hdr = (const struct fw_hdr *)fw->data; 927 hdr = (const struct fw_hdr *)fw->data;
928 vers = ntohl(hdr->fw_ver); 928 vers = ntohl(hdr->fw_ver);
929 if (FW_HDR_FW_VER_MAJOR_GET(vers) != FW_VERSION_MAJOR) { 929 if (FW_HDR_FW_VER_MAJOR_GET(vers) != FW_VERSION_MAJOR) {
930 ret = -EINVAL; /* wrong major version, won't do */ 930 ret = -EINVAL; /* wrong major version, won't do */
931 goto out; 931 goto out;
932 } 932 }
933 933
934 /* 934 /*
935 * If the flash FW is unusable or we found something newer, load it. 935 * If the flash FW is unusable or we found something newer, load it.
936 */ 936 */
937 if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR || 937 if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR ||
938 vers > adap->params.fw_vers) { 938 vers > adap->params.fw_vers) {
939 dev_info(dev, "upgrading firmware ...\n"); 939 dev_info(dev, "upgrading firmware ...\n");
940 ret = t4_fw_upgrade(adap, adap->mbox, fw->data, fw->size, 940 ret = t4_fw_upgrade(adap, adap->mbox, fw->data, fw->size,
941 /*force=*/false); 941 /*force=*/false);
942 if (!ret) 942 if (!ret)
943 dev_info(dev, "firmware successfully upgraded to " 943 dev_info(dev, "firmware successfully upgraded to "
944 FW_FNAME " (%d.%d.%d.%d)\n", 944 FW_FNAME " (%d.%d.%d.%d)\n",
945 FW_HDR_FW_VER_MAJOR_GET(vers), 945 FW_HDR_FW_VER_MAJOR_GET(vers),
946 FW_HDR_FW_VER_MINOR_GET(vers), 946 FW_HDR_FW_VER_MINOR_GET(vers),
947 FW_HDR_FW_VER_MICRO_GET(vers), 947 FW_HDR_FW_VER_MICRO_GET(vers),
948 FW_HDR_FW_VER_BUILD_GET(vers)); 948 FW_HDR_FW_VER_BUILD_GET(vers));
949 else 949 else
950 dev_err(dev, "firmware upgrade failed! err=%d\n", -ret); 950 dev_err(dev, "firmware upgrade failed! err=%d\n", -ret);
951 } else { 951 } else {
952 /* 952 /*
953 * Tell our caller that we didn't upgrade the firmware. 953 * Tell our caller that we didn't upgrade the firmware.
954 */ 954 */
955 ret = -EINVAL; 955 ret = -EINVAL;
956 } 956 }
957 957
958 out: release_firmware(fw); 958 out: release_firmware(fw);
959 return ret; 959 return ret;
960 } 960 }
961 961
962 /* 962 /*
963 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc. 963 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
964 * The allocated memory is cleared. 964 * The allocated memory is cleared.
965 */ 965 */
966 void *t4_alloc_mem(size_t size) 966 void *t4_alloc_mem(size_t size)
967 { 967 {
968 void *p = kzalloc(size, GFP_KERNEL); 968 void *p = kzalloc(size, GFP_KERNEL);
969 969
970 if (!p) 970 if (!p)
971 p = vzalloc(size); 971 p = vzalloc(size);
972 return p; 972 return p;
973 } 973 }
974 974
975 /* 975 /*
976 * Free memory allocated through alloc_mem(). 976 * Free memory allocated through alloc_mem().
977 */ 977 */
978 static void t4_free_mem(void *addr) 978 static void t4_free_mem(void *addr)
979 { 979 {
980 if (is_vmalloc_addr(addr)) 980 if (is_vmalloc_addr(addr))
981 vfree(addr); 981 vfree(addr);
982 else 982 else
983 kfree(addr); 983 kfree(addr);
984 } 984 }
985 985
986 static inline int is_offload(const struct adapter *adap) 986 static inline int is_offload(const struct adapter *adap)
987 { 987 {
988 return adap->params.offload; 988 return adap->params.offload;
989 } 989 }
990 990
991 /* 991 /*
992 * Implementation of ethtool operations. 992 * Implementation of ethtool operations.
993 */ 993 */
994 994
995 static u32 get_msglevel(struct net_device *dev) 995 static u32 get_msglevel(struct net_device *dev)
996 { 996 {
997 return netdev2adap(dev)->msg_enable; 997 return netdev2adap(dev)->msg_enable;
998 } 998 }
999 999
1000 static void set_msglevel(struct net_device *dev, u32 val) 1000 static void set_msglevel(struct net_device *dev, u32 val)
1001 { 1001 {
1002 netdev2adap(dev)->msg_enable = val; 1002 netdev2adap(dev)->msg_enable = val;
1003 } 1003 }
1004 1004
1005 static char stats_strings[][ETH_GSTRING_LEN] = { 1005 static char stats_strings[][ETH_GSTRING_LEN] = {
1006 "TxOctetsOK ", 1006 "TxOctetsOK ",
1007 "TxFramesOK ", 1007 "TxFramesOK ",
1008 "TxBroadcastFrames ", 1008 "TxBroadcastFrames ",
1009 "TxMulticastFrames ", 1009 "TxMulticastFrames ",
1010 "TxUnicastFrames ", 1010 "TxUnicastFrames ",
1011 "TxErrorFrames ", 1011 "TxErrorFrames ",
1012 1012
1013 "TxFrames64 ", 1013 "TxFrames64 ",
1014 "TxFrames65To127 ", 1014 "TxFrames65To127 ",
1015 "TxFrames128To255 ", 1015 "TxFrames128To255 ",
1016 "TxFrames256To511 ", 1016 "TxFrames256To511 ",
1017 "TxFrames512To1023 ", 1017 "TxFrames512To1023 ",
1018 "TxFrames1024To1518 ", 1018 "TxFrames1024To1518 ",
1019 "TxFrames1519ToMax ", 1019 "TxFrames1519ToMax ",
1020 1020
1021 "TxFramesDropped ", 1021 "TxFramesDropped ",
1022 "TxPauseFrames ", 1022 "TxPauseFrames ",
1023 "TxPPP0Frames ", 1023 "TxPPP0Frames ",
1024 "TxPPP1Frames ", 1024 "TxPPP1Frames ",
1025 "TxPPP2Frames ", 1025 "TxPPP2Frames ",
1026 "TxPPP3Frames ", 1026 "TxPPP3Frames ",
1027 "TxPPP4Frames ", 1027 "TxPPP4Frames ",
1028 "TxPPP5Frames ", 1028 "TxPPP5Frames ",
1029 "TxPPP6Frames ", 1029 "TxPPP6Frames ",
1030 "TxPPP7Frames ", 1030 "TxPPP7Frames ",
1031 1031
1032 "RxOctetsOK ", 1032 "RxOctetsOK ",
1033 "RxFramesOK ", 1033 "RxFramesOK ",
1034 "RxBroadcastFrames ", 1034 "RxBroadcastFrames ",
1035 "RxMulticastFrames ", 1035 "RxMulticastFrames ",
1036 "RxUnicastFrames ", 1036 "RxUnicastFrames ",
1037 1037
1038 "RxFramesTooLong ", 1038 "RxFramesTooLong ",
1039 "RxJabberErrors ", 1039 "RxJabberErrors ",
1040 "RxFCSErrors ", 1040 "RxFCSErrors ",
1041 "RxLengthErrors ", 1041 "RxLengthErrors ",
1042 "RxSymbolErrors ", 1042 "RxSymbolErrors ",
1043 "RxRuntFrames ", 1043 "RxRuntFrames ",
1044 1044
1045 "RxFrames64 ", 1045 "RxFrames64 ",
1046 "RxFrames65To127 ", 1046 "RxFrames65To127 ",
1047 "RxFrames128To255 ", 1047 "RxFrames128To255 ",
1048 "RxFrames256To511 ", 1048 "RxFrames256To511 ",
1049 "RxFrames512To1023 ", 1049 "RxFrames512To1023 ",
1050 "RxFrames1024To1518 ", 1050 "RxFrames1024To1518 ",
1051 "RxFrames1519ToMax ", 1051 "RxFrames1519ToMax ",
1052 1052
1053 "RxPauseFrames ", 1053 "RxPauseFrames ",
1054 "RxPPP0Frames ", 1054 "RxPPP0Frames ",
1055 "RxPPP1Frames ", 1055 "RxPPP1Frames ",
1056 "RxPPP2Frames ", 1056 "RxPPP2Frames ",
1057 "RxPPP3Frames ", 1057 "RxPPP3Frames ",
1058 "RxPPP4Frames ", 1058 "RxPPP4Frames ",
1059 "RxPPP5Frames ", 1059 "RxPPP5Frames ",
1060 "RxPPP6Frames ", 1060 "RxPPP6Frames ",
1061 "RxPPP7Frames ", 1061 "RxPPP7Frames ",
1062 1062
1063 "RxBG0FramesDropped ", 1063 "RxBG0FramesDropped ",
1064 "RxBG1FramesDropped ", 1064 "RxBG1FramesDropped ",
1065 "RxBG2FramesDropped ", 1065 "RxBG2FramesDropped ",
1066 "RxBG3FramesDropped ", 1066 "RxBG3FramesDropped ",
1067 "RxBG0FramesTrunc ", 1067 "RxBG0FramesTrunc ",
1068 "RxBG1FramesTrunc ", 1068 "RxBG1FramesTrunc ",
1069 "RxBG2FramesTrunc ", 1069 "RxBG2FramesTrunc ",
1070 "RxBG3FramesTrunc ", 1070 "RxBG3FramesTrunc ",
1071 1071
1072 "TSO ", 1072 "TSO ",
1073 "TxCsumOffload ", 1073 "TxCsumOffload ",
1074 "RxCsumGood ", 1074 "RxCsumGood ",
1075 "VLANextractions ", 1075 "VLANextractions ",
1076 "VLANinsertions ", 1076 "VLANinsertions ",
1077 "GROpackets ", 1077 "GROpackets ",
1078 "GROmerged ", 1078 "GROmerged ",
1079 }; 1079 };
1080 1080
1081 static int get_sset_count(struct net_device *dev, int sset) 1081 static int get_sset_count(struct net_device *dev, int sset)
1082 { 1082 {
1083 switch (sset) { 1083 switch (sset) {
1084 case ETH_SS_STATS: 1084 case ETH_SS_STATS:
1085 return ARRAY_SIZE(stats_strings); 1085 return ARRAY_SIZE(stats_strings);
1086 default: 1086 default:
1087 return -EOPNOTSUPP; 1087 return -EOPNOTSUPP;
1088 } 1088 }
1089 } 1089 }
1090 1090
1091 #define T4_REGMAP_SIZE (160 * 1024) 1091 #define T4_REGMAP_SIZE (160 * 1024)
1092 1092
1093 static int get_regs_len(struct net_device *dev) 1093 static int get_regs_len(struct net_device *dev)
1094 { 1094 {
1095 return T4_REGMAP_SIZE; 1095 return T4_REGMAP_SIZE;
1096 } 1096 }
1097 1097
1098 static int get_eeprom_len(struct net_device *dev) 1098 static int get_eeprom_len(struct net_device *dev)
1099 { 1099 {
1100 return EEPROMSIZE; 1100 return EEPROMSIZE;
1101 } 1101 }
1102 1102
1103 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 1103 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1104 { 1104 {
1105 struct adapter *adapter = netdev2adap(dev); 1105 struct adapter *adapter = netdev2adap(dev);
1106 1106
1107 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); 1107 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1108 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 1108 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1109 strlcpy(info->bus_info, pci_name(adapter->pdev), 1109 strlcpy(info->bus_info, pci_name(adapter->pdev),
1110 sizeof(info->bus_info)); 1110 sizeof(info->bus_info));
1111 1111
1112 if (adapter->params.fw_vers) 1112 if (adapter->params.fw_vers)
1113 snprintf(info->fw_version, sizeof(info->fw_version), 1113 snprintf(info->fw_version, sizeof(info->fw_version),
1114 "%u.%u.%u.%u, TP %u.%u.%u.%u", 1114 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1115 FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers), 1115 FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
1116 FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers), 1116 FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
1117 FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers), 1117 FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
1118 FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers), 1118 FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
1119 FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers), 1119 FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
1120 FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers), 1120 FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
1121 FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers), 1121 FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
1122 FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers)); 1122 FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
1123 } 1123 }
1124 1124
1125 static void get_strings(struct net_device *dev, u32 stringset, u8 *data) 1125 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1126 { 1126 {
1127 if (stringset == ETH_SS_STATS) 1127 if (stringset == ETH_SS_STATS)
1128 memcpy(data, stats_strings, sizeof(stats_strings)); 1128 memcpy(data, stats_strings, sizeof(stats_strings));
1129 } 1129 }
1130 1130
1131 /* 1131 /*
1132 * port stats maintained per queue of the port. They should be in the same 1132 * port stats maintained per queue of the port. They should be in the same
1133 * order as in stats_strings above. 1133 * order as in stats_strings above.
1134 */ 1134 */
1135 struct queue_port_stats { 1135 struct queue_port_stats {
1136 u64 tso; 1136 u64 tso;
1137 u64 tx_csum; 1137 u64 tx_csum;
1138 u64 rx_csum; 1138 u64 rx_csum;
1139 u64 vlan_ex; 1139 u64 vlan_ex;
1140 u64 vlan_ins; 1140 u64 vlan_ins;
1141 u64 gro_pkts; 1141 u64 gro_pkts;
1142 u64 gro_merged; 1142 u64 gro_merged;
1143 }; 1143 };
1144 1144
1145 static void collect_sge_port_stats(const struct adapter *adap, 1145 static void collect_sge_port_stats(const struct adapter *adap,
1146 const struct port_info *p, struct queue_port_stats *s) 1146 const struct port_info *p, struct queue_port_stats *s)
1147 { 1147 {
1148 int i; 1148 int i;
1149 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset]; 1149 const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1150 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset]; 1150 const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1151 1151
1152 memset(s, 0, sizeof(*s)); 1152 memset(s, 0, sizeof(*s));
1153 for (i = 0; i < p->nqsets; i++, rx++, tx++) { 1153 for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1154 s->tso += tx->tso; 1154 s->tso += tx->tso;
1155 s->tx_csum += tx->tx_cso; 1155 s->tx_csum += tx->tx_cso;
1156 s->rx_csum += rx->stats.rx_cso; 1156 s->rx_csum += rx->stats.rx_cso;
1157 s->vlan_ex += rx->stats.vlan_ex; 1157 s->vlan_ex += rx->stats.vlan_ex;
1158 s->vlan_ins += tx->vlan_ins; 1158 s->vlan_ins += tx->vlan_ins;
1159 s->gro_pkts += rx->stats.lro_pkts; 1159 s->gro_pkts += rx->stats.lro_pkts;
1160 s->gro_merged += rx->stats.lro_merged; 1160 s->gro_merged += rx->stats.lro_merged;
1161 } 1161 }
1162 } 1162 }
1163 1163
1164 static void get_stats(struct net_device *dev, struct ethtool_stats *stats, 1164 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1165 u64 *data) 1165 u64 *data)
1166 { 1166 {
1167 struct port_info *pi = netdev_priv(dev); 1167 struct port_info *pi = netdev_priv(dev);
1168 struct adapter *adapter = pi->adapter; 1168 struct adapter *adapter = pi->adapter;
1169 1169
1170 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data); 1170 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1171 1171
1172 data += sizeof(struct port_stats) / sizeof(u64); 1172 data += sizeof(struct port_stats) / sizeof(u64);
1173 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data); 1173 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1174 } 1174 }
1175 1175
1176 /* 1176 /*
1177 * Return a version number to identify the type of adapter. The scheme is: 1177 * Return a version number to identify the type of adapter. The scheme is:
1178 * - bits 0..9: chip version 1178 * - bits 0..9: chip version
1179 * - bits 10..15: chip revision 1179 * - bits 10..15: chip revision
1180 * - bits 16..23: register dump version 1180 * - bits 16..23: register dump version
1181 */ 1181 */
1182 static inline unsigned int mk_adap_vers(const struct adapter *ap) 1182 static inline unsigned int mk_adap_vers(const struct adapter *ap)
1183 { 1183 {
1184 return 4 | (ap->params.rev << 10) | (1 << 16); 1184 return 4 | (ap->params.rev << 10) | (1 << 16);
1185 } 1185 }
1186 1186
1187 static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start, 1187 static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1188 unsigned int end) 1188 unsigned int end)
1189 { 1189 {
1190 u32 *p = buf + start; 1190 u32 *p = buf + start;
1191 1191
1192 for ( ; start <= end; start += sizeof(u32)) 1192 for ( ; start <= end; start += sizeof(u32))
1193 *p++ = t4_read_reg(ap, start); 1193 *p++ = t4_read_reg(ap, start);
1194 } 1194 }
1195 1195
1196 static void get_regs(struct net_device *dev, struct ethtool_regs *regs, 1196 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1197 void *buf) 1197 void *buf)
1198 { 1198 {
1199 static const unsigned int reg_ranges[] = { 1199 static const unsigned int reg_ranges[] = {
1200 0x1008, 0x1108, 1200 0x1008, 0x1108,
1201 0x1180, 0x11b4, 1201 0x1180, 0x11b4,
1202 0x11fc, 0x123c, 1202 0x11fc, 0x123c,
1203 0x1300, 0x173c, 1203 0x1300, 0x173c,
1204 0x1800, 0x18fc, 1204 0x1800, 0x18fc,
1205 0x3000, 0x30d8, 1205 0x3000, 0x30d8,
1206 0x30e0, 0x5924, 1206 0x30e0, 0x5924,
1207 0x5960, 0x59d4, 1207 0x5960, 0x59d4,
1208 0x5a00, 0x5af8, 1208 0x5a00, 0x5af8,
1209 0x6000, 0x6098, 1209 0x6000, 0x6098,
1210 0x6100, 0x6150, 1210 0x6100, 0x6150,
1211 0x6200, 0x6208, 1211 0x6200, 0x6208,
1212 0x6240, 0x6248, 1212 0x6240, 0x6248,
1213 0x6280, 0x6338, 1213 0x6280, 0x6338,
1214 0x6370, 0x638c, 1214 0x6370, 0x638c,
1215 0x6400, 0x643c, 1215 0x6400, 0x643c,
1216 0x6500, 0x6524, 1216 0x6500, 0x6524,
1217 0x6a00, 0x6a38, 1217 0x6a00, 0x6a38,
1218 0x6a60, 0x6a78, 1218 0x6a60, 0x6a78,
1219 0x6b00, 0x6b84, 1219 0x6b00, 0x6b84,
1220 0x6bf0, 0x6c84, 1220 0x6bf0, 0x6c84,
1221 0x6cf0, 0x6d84, 1221 0x6cf0, 0x6d84,
1222 0x6df0, 0x6e84, 1222 0x6df0, 0x6e84,
1223 0x6ef0, 0x6f84, 1223 0x6ef0, 0x6f84,
1224 0x6ff0, 0x7084, 1224 0x6ff0, 0x7084,
1225 0x70f0, 0x7184, 1225 0x70f0, 0x7184,
1226 0x71f0, 0x7284, 1226 0x71f0, 0x7284,
1227 0x72f0, 0x7384, 1227 0x72f0, 0x7384,
1228 0x73f0, 0x7450, 1228 0x73f0, 0x7450,
1229 0x7500, 0x7530, 1229 0x7500, 0x7530,
1230 0x7600, 0x761c, 1230 0x7600, 0x761c,
1231 0x7680, 0x76cc, 1231 0x7680, 0x76cc,
1232 0x7700, 0x7798, 1232 0x7700, 0x7798,
1233 0x77c0, 0x77fc, 1233 0x77c0, 0x77fc,
1234 0x7900, 0x79fc, 1234 0x7900, 0x79fc,
1235 0x7b00, 0x7c38, 1235 0x7b00, 0x7c38,
1236 0x7d00, 0x7efc, 1236 0x7d00, 0x7efc,
1237 0x8dc0, 0x8e1c, 1237 0x8dc0, 0x8e1c,
1238 0x8e30, 0x8e78, 1238 0x8e30, 0x8e78,
1239 0x8ea0, 0x8f6c, 1239 0x8ea0, 0x8f6c,
1240 0x8fc0, 0x9074, 1240 0x8fc0, 0x9074,
1241 0x90fc, 0x90fc, 1241 0x90fc, 0x90fc,
1242 0x9400, 0x9458, 1242 0x9400, 0x9458,
1243 0x9600, 0x96bc, 1243 0x9600, 0x96bc,
1244 0x9800, 0x9808, 1244 0x9800, 0x9808,
1245 0x9820, 0x983c, 1245 0x9820, 0x983c,
1246 0x9850, 0x9864, 1246 0x9850, 0x9864,
1247 0x9c00, 0x9c6c, 1247 0x9c00, 0x9c6c,
1248 0x9c80, 0x9cec, 1248 0x9c80, 0x9cec,
1249 0x9d00, 0x9d6c, 1249 0x9d00, 0x9d6c,
1250 0x9d80, 0x9dec, 1250 0x9d80, 0x9dec,
1251 0x9e00, 0x9e6c, 1251 0x9e00, 0x9e6c,
1252 0x9e80, 0x9eec, 1252 0x9e80, 0x9eec,
1253 0x9f00, 0x9f6c, 1253 0x9f00, 0x9f6c,
1254 0x9f80, 0x9fec, 1254 0x9f80, 0x9fec,
1255 0xd004, 0xd03c, 1255 0xd004, 0xd03c,
1256 0xdfc0, 0xdfe0, 1256 0xdfc0, 0xdfe0,
1257 0xe000, 0xea7c, 1257 0xe000, 0xea7c,
1258 0xf000, 0x11190, 1258 0xf000, 0x11190,
1259 0x19040, 0x1906c, 1259 0x19040, 0x1906c,
1260 0x19078, 0x19080, 1260 0x19078, 0x19080,
1261 0x1908c, 0x19124, 1261 0x1908c, 0x19124,
1262 0x19150, 0x191b0, 1262 0x19150, 0x191b0,
1263 0x191d0, 0x191e8, 1263 0x191d0, 0x191e8,
1264 0x19238, 0x1924c, 1264 0x19238, 0x1924c,
1265 0x193f8, 0x19474, 1265 0x193f8, 0x19474,
1266 0x19490, 0x194f8, 1266 0x19490, 0x194f8,
1267 0x19800, 0x19f30, 1267 0x19800, 0x19f30,
1268 0x1a000, 0x1a06c, 1268 0x1a000, 0x1a06c,
1269 0x1a0b0, 0x1a120, 1269 0x1a0b0, 0x1a120,
1270 0x1a128, 0x1a138, 1270 0x1a128, 0x1a138,
1271 0x1a190, 0x1a1c4, 1271 0x1a190, 0x1a1c4,
1272 0x1a1fc, 0x1a1fc, 1272 0x1a1fc, 0x1a1fc,
1273 0x1e040, 0x1e04c, 1273 0x1e040, 0x1e04c,
1274 0x1e284, 0x1e28c, 1274 0x1e284, 0x1e28c,
1275 0x1e2c0, 0x1e2c0, 1275 0x1e2c0, 0x1e2c0,
1276 0x1e2e0, 0x1e2e0, 1276 0x1e2e0, 0x1e2e0,
1277 0x1e300, 0x1e384, 1277 0x1e300, 0x1e384,
1278 0x1e3c0, 0x1e3c8, 1278 0x1e3c0, 0x1e3c8,
1279 0x1e440, 0x1e44c, 1279 0x1e440, 0x1e44c,
1280 0x1e684, 0x1e68c, 1280 0x1e684, 0x1e68c,
1281 0x1e6c0, 0x1e6c0, 1281 0x1e6c0, 0x1e6c0,
1282 0x1e6e0, 0x1e6e0, 1282 0x1e6e0, 0x1e6e0,
1283 0x1e700, 0x1e784, 1283 0x1e700, 0x1e784,
1284 0x1e7c0, 0x1e7c8, 1284 0x1e7c0, 0x1e7c8,
1285 0x1e840, 0x1e84c, 1285 0x1e840, 0x1e84c,
1286 0x1ea84, 0x1ea8c, 1286 0x1ea84, 0x1ea8c,
1287 0x1eac0, 0x1eac0, 1287 0x1eac0, 0x1eac0,
1288 0x1eae0, 0x1eae0, 1288 0x1eae0, 0x1eae0,
1289 0x1eb00, 0x1eb84, 1289 0x1eb00, 0x1eb84,
1290 0x1ebc0, 0x1ebc8, 1290 0x1ebc0, 0x1ebc8,
1291 0x1ec40, 0x1ec4c, 1291 0x1ec40, 0x1ec4c,
1292 0x1ee84, 0x1ee8c, 1292 0x1ee84, 0x1ee8c,
1293 0x1eec0, 0x1eec0, 1293 0x1eec0, 0x1eec0,
1294 0x1eee0, 0x1eee0, 1294 0x1eee0, 0x1eee0,
1295 0x1ef00, 0x1ef84, 1295 0x1ef00, 0x1ef84,
1296 0x1efc0, 0x1efc8, 1296 0x1efc0, 0x1efc8,
1297 0x1f040, 0x1f04c, 1297 0x1f040, 0x1f04c,
1298 0x1f284, 0x1f28c, 1298 0x1f284, 0x1f28c,
1299 0x1f2c0, 0x1f2c0, 1299 0x1f2c0, 0x1f2c0,
1300 0x1f2e0, 0x1f2e0, 1300 0x1f2e0, 0x1f2e0,
1301 0x1f300, 0x1f384, 1301 0x1f300, 0x1f384,
1302 0x1f3c0, 0x1f3c8, 1302 0x1f3c0, 0x1f3c8,
1303 0x1f440, 0x1f44c, 1303 0x1f440, 0x1f44c,
1304 0x1f684, 0x1f68c, 1304 0x1f684, 0x1f68c,
1305 0x1f6c0, 0x1f6c0, 1305 0x1f6c0, 0x1f6c0,
1306 0x1f6e0, 0x1f6e0, 1306 0x1f6e0, 0x1f6e0,
1307 0x1f700, 0x1f784, 1307 0x1f700, 0x1f784,
1308 0x1f7c0, 0x1f7c8, 1308 0x1f7c0, 0x1f7c8,
1309 0x1f840, 0x1f84c, 1309 0x1f840, 0x1f84c,
1310 0x1fa84, 0x1fa8c, 1310 0x1fa84, 0x1fa8c,
1311 0x1fac0, 0x1fac0, 1311 0x1fac0, 0x1fac0,
1312 0x1fae0, 0x1fae0, 1312 0x1fae0, 0x1fae0,
1313 0x1fb00, 0x1fb84, 1313 0x1fb00, 0x1fb84,
1314 0x1fbc0, 0x1fbc8, 1314 0x1fbc0, 0x1fbc8,
1315 0x1fc40, 0x1fc4c, 1315 0x1fc40, 0x1fc4c,
1316 0x1fe84, 0x1fe8c, 1316 0x1fe84, 0x1fe8c,
1317 0x1fec0, 0x1fec0, 1317 0x1fec0, 0x1fec0,
1318 0x1fee0, 0x1fee0, 1318 0x1fee0, 0x1fee0,
1319 0x1ff00, 0x1ff84, 1319 0x1ff00, 0x1ff84,
1320 0x1ffc0, 0x1ffc8, 1320 0x1ffc0, 0x1ffc8,
1321 0x20000, 0x2002c, 1321 0x20000, 0x2002c,
1322 0x20100, 0x2013c, 1322 0x20100, 0x2013c,
1323 0x20190, 0x201c8, 1323 0x20190, 0x201c8,
1324 0x20200, 0x20318, 1324 0x20200, 0x20318,
1325 0x20400, 0x20528, 1325 0x20400, 0x20528,
1326 0x20540, 0x20614, 1326 0x20540, 0x20614,
1327 0x21000, 0x21040, 1327 0x21000, 0x21040,
1328 0x2104c, 0x21060, 1328 0x2104c, 0x21060,
1329 0x210c0, 0x210ec, 1329 0x210c0, 0x210ec,
1330 0x21200, 0x21268, 1330 0x21200, 0x21268,
1331 0x21270, 0x21284, 1331 0x21270, 0x21284,
1332 0x212fc, 0x21388, 1332 0x212fc, 0x21388,
1333 0x21400, 0x21404, 1333 0x21400, 0x21404,
1334 0x21500, 0x21518, 1334 0x21500, 0x21518,
1335 0x2152c, 0x2153c, 1335 0x2152c, 0x2153c,
1336 0x21550, 0x21554, 1336 0x21550, 0x21554,
1337 0x21600, 0x21600, 1337 0x21600, 0x21600,
1338 0x21608, 0x21628, 1338 0x21608, 0x21628,
1339 0x21630, 0x2163c, 1339 0x21630, 0x2163c,
1340 0x21700, 0x2171c, 1340 0x21700, 0x2171c,
1341 0x21780, 0x2178c, 1341 0x21780, 0x2178c,
1342 0x21800, 0x21c38, 1342 0x21800, 0x21c38,
1343 0x21c80, 0x21d7c, 1343 0x21c80, 0x21d7c,
1344 0x21e00, 0x21e04, 1344 0x21e00, 0x21e04,
1345 0x22000, 0x2202c, 1345 0x22000, 0x2202c,
1346 0x22100, 0x2213c, 1346 0x22100, 0x2213c,
1347 0x22190, 0x221c8, 1347 0x22190, 0x221c8,
1348 0x22200, 0x22318, 1348 0x22200, 0x22318,
1349 0x22400, 0x22528, 1349 0x22400, 0x22528,
1350 0x22540, 0x22614, 1350 0x22540, 0x22614,
1351 0x23000, 0x23040, 1351 0x23000, 0x23040,
1352 0x2304c, 0x23060, 1352 0x2304c, 0x23060,
1353 0x230c0, 0x230ec, 1353 0x230c0, 0x230ec,
1354 0x23200, 0x23268, 1354 0x23200, 0x23268,
1355 0x23270, 0x23284, 1355 0x23270, 0x23284,
1356 0x232fc, 0x23388, 1356 0x232fc, 0x23388,
1357 0x23400, 0x23404, 1357 0x23400, 0x23404,
1358 0x23500, 0x23518, 1358 0x23500, 0x23518,
1359 0x2352c, 0x2353c, 1359 0x2352c, 0x2353c,
1360 0x23550, 0x23554, 1360 0x23550, 0x23554,
1361 0x23600, 0x23600, 1361 0x23600, 0x23600,
1362 0x23608, 0x23628, 1362 0x23608, 0x23628,
1363 0x23630, 0x2363c, 1363 0x23630, 0x2363c,
1364 0x23700, 0x2371c, 1364 0x23700, 0x2371c,
1365 0x23780, 0x2378c, 1365 0x23780, 0x2378c,
1366 0x23800, 0x23c38, 1366 0x23800, 0x23c38,
1367 0x23c80, 0x23d7c, 1367 0x23c80, 0x23d7c,
1368 0x23e00, 0x23e04, 1368 0x23e00, 0x23e04,
1369 0x24000, 0x2402c, 1369 0x24000, 0x2402c,
1370 0x24100, 0x2413c, 1370 0x24100, 0x2413c,
1371 0x24190, 0x241c8, 1371 0x24190, 0x241c8,
1372 0x24200, 0x24318, 1372 0x24200, 0x24318,
1373 0x24400, 0x24528, 1373 0x24400, 0x24528,
1374 0x24540, 0x24614, 1374 0x24540, 0x24614,
1375 0x25000, 0x25040, 1375 0x25000, 0x25040,
1376 0x2504c, 0x25060, 1376 0x2504c, 0x25060,
1377 0x250c0, 0x250ec, 1377 0x250c0, 0x250ec,
1378 0x25200, 0x25268, 1378 0x25200, 0x25268,
1379 0x25270, 0x25284, 1379 0x25270, 0x25284,
1380 0x252fc, 0x25388, 1380 0x252fc, 0x25388,
1381 0x25400, 0x25404, 1381 0x25400, 0x25404,
1382 0x25500, 0x25518, 1382 0x25500, 0x25518,
1383 0x2552c, 0x2553c, 1383 0x2552c, 0x2553c,
1384 0x25550, 0x25554, 1384 0x25550, 0x25554,
1385 0x25600, 0x25600, 1385 0x25600, 0x25600,
1386 0x25608, 0x25628, 1386 0x25608, 0x25628,
1387 0x25630, 0x2563c, 1387 0x25630, 0x2563c,
1388 0x25700, 0x2571c, 1388 0x25700, 0x2571c,
1389 0x25780, 0x2578c, 1389 0x25780, 0x2578c,
1390 0x25800, 0x25c38, 1390 0x25800, 0x25c38,
1391 0x25c80, 0x25d7c, 1391 0x25c80, 0x25d7c,
1392 0x25e00, 0x25e04, 1392 0x25e00, 0x25e04,
1393 0x26000, 0x2602c, 1393 0x26000, 0x2602c,
1394 0x26100, 0x2613c, 1394 0x26100, 0x2613c,
1395 0x26190, 0x261c8, 1395 0x26190, 0x261c8,
1396 0x26200, 0x26318, 1396 0x26200, 0x26318,
1397 0x26400, 0x26528, 1397 0x26400, 0x26528,
1398 0x26540, 0x26614, 1398 0x26540, 0x26614,
1399 0x27000, 0x27040, 1399 0x27000, 0x27040,
1400 0x2704c, 0x27060, 1400 0x2704c, 0x27060,
1401 0x270c0, 0x270ec, 1401 0x270c0, 0x270ec,
1402 0x27200, 0x27268, 1402 0x27200, 0x27268,
1403 0x27270, 0x27284, 1403 0x27270, 0x27284,
1404 0x272fc, 0x27388, 1404 0x272fc, 0x27388,
1405 0x27400, 0x27404, 1405 0x27400, 0x27404,
1406 0x27500, 0x27518, 1406 0x27500, 0x27518,
1407 0x2752c, 0x2753c, 1407 0x2752c, 0x2753c,
1408 0x27550, 0x27554, 1408 0x27550, 0x27554,
1409 0x27600, 0x27600, 1409 0x27600, 0x27600,
1410 0x27608, 0x27628, 1410 0x27608, 0x27628,
1411 0x27630, 0x2763c, 1411 0x27630, 0x2763c,
1412 0x27700, 0x2771c, 1412 0x27700, 0x2771c,
1413 0x27780, 0x2778c, 1413 0x27780, 0x2778c,
1414 0x27800, 0x27c38, 1414 0x27800, 0x27c38,
1415 0x27c80, 0x27d7c, 1415 0x27c80, 0x27d7c,
1416 0x27e00, 0x27e04 1416 0x27e00, 0x27e04
1417 }; 1417 };
1418 1418
1419 int i; 1419 int i;
1420 struct adapter *ap = netdev2adap(dev); 1420 struct adapter *ap = netdev2adap(dev);
1421 1421
1422 regs->version = mk_adap_vers(ap); 1422 regs->version = mk_adap_vers(ap);
1423 1423
1424 memset(buf, 0, T4_REGMAP_SIZE); 1424 memset(buf, 0, T4_REGMAP_SIZE);
1425 for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2) 1425 for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2)
1426 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]); 1426 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
1427 } 1427 }
1428 1428
1429 static int restart_autoneg(struct net_device *dev) 1429 static int restart_autoneg(struct net_device *dev)
1430 { 1430 {
1431 struct port_info *p = netdev_priv(dev); 1431 struct port_info *p = netdev_priv(dev);
1432 1432
1433 if (!netif_running(dev)) 1433 if (!netif_running(dev))
1434 return -EAGAIN; 1434 return -EAGAIN;
1435 if (p->link_cfg.autoneg != AUTONEG_ENABLE) 1435 if (p->link_cfg.autoneg != AUTONEG_ENABLE)
1436 return -EINVAL; 1436 return -EINVAL;
1437 t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan); 1437 t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
1438 return 0; 1438 return 0;
1439 } 1439 }
1440 1440
1441 static int identify_port(struct net_device *dev, 1441 static int identify_port(struct net_device *dev,
1442 enum ethtool_phys_id_state state) 1442 enum ethtool_phys_id_state state)
1443 { 1443 {
1444 unsigned int val; 1444 unsigned int val;
1445 struct adapter *adap = netdev2adap(dev); 1445 struct adapter *adap = netdev2adap(dev);
1446 1446
1447 if (state == ETHTOOL_ID_ACTIVE) 1447 if (state == ETHTOOL_ID_ACTIVE)
1448 val = 0xffff; 1448 val = 0xffff;
1449 else if (state == ETHTOOL_ID_INACTIVE) 1449 else if (state == ETHTOOL_ID_INACTIVE)
1450 val = 0; 1450 val = 0;
1451 else 1451 else
1452 return -EINVAL; 1452 return -EINVAL;
1453 1453
1454 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val); 1454 return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
1455 } 1455 }
1456 1456
1457 static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps) 1457 static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
1458 { 1458 {
1459 unsigned int v = 0; 1459 unsigned int v = 0;
1460 1460
1461 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI || 1461 if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
1462 type == FW_PORT_TYPE_BT_XAUI) { 1462 type == FW_PORT_TYPE_BT_XAUI) {
1463 v |= SUPPORTED_TP; 1463 v |= SUPPORTED_TP;
1464 if (caps & FW_PORT_CAP_SPEED_100M) 1464 if (caps & FW_PORT_CAP_SPEED_100M)
1465 v |= SUPPORTED_100baseT_Full; 1465 v |= SUPPORTED_100baseT_Full;
1466 if (caps & FW_PORT_CAP_SPEED_1G) 1466 if (caps & FW_PORT_CAP_SPEED_1G)
1467 v |= SUPPORTED_1000baseT_Full; 1467 v |= SUPPORTED_1000baseT_Full;
1468 if (caps & FW_PORT_CAP_SPEED_10G) 1468 if (caps & FW_PORT_CAP_SPEED_10G)
1469 v |= SUPPORTED_10000baseT_Full; 1469 v |= SUPPORTED_10000baseT_Full;
1470 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) { 1470 } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
1471 v |= SUPPORTED_Backplane; 1471 v |= SUPPORTED_Backplane;
1472 if (caps & FW_PORT_CAP_SPEED_1G) 1472 if (caps & FW_PORT_CAP_SPEED_1G)
1473 v |= SUPPORTED_1000baseKX_Full; 1473 v |= SUPPORTED_1000baseKX_Full;
1474 if (caps & FW_PORT_CAP_SPEED_10G) 1474 if (caps & FW_PORT_CAP_SPEED_10G)
1475 v |= SUPPORTED_10000baseKX4_Full; 1475 v |= SUPPORTED_10000baseKX4_Full;
1476 } else if (type == FW_PORT_TYPE_KR) 1476 } else if (type == FW_PORT_TYPE_KR)
1477 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full; 1477 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
1478 else if (type == FW_PORT_TYPE_BP_AP) 1478 else if (type == FW_PORT_TYPE_BP_AP)
1479 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC | 1479 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
1480 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full; 1480 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
1481 else if (type == FW_PORT_TYPE_BP4_AP) 1481 else if (type == FW_PORT_TYPE_BP4_AP)
1482 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC | 1482 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
1483 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full | 1483 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
1484 SUPPORTED_10000baseKX4_Full; 1484 SUPPORTED_10000baseKX4_Full;
1485 else if (type == FW_PORT_TYPE_FIBER_XFI || 1485 else if (type == FW_PORT_TYPE_FIBER_XFI ||
1486 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP) 1486 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
1487 v |= SUPPORTED_FIBRE; 1487 v |= SUPPORTED_FIBRE;
1488 1488
1489 if (caps & FW_PORT_CAP_ANEG) 1489 if (caps & FW_PORT_CAP_ANEG)
1490 v |= SUPPORTED_Autoneg; 1490 v |= SUPPORTED_Autoneg;
1491 return v; 1491 return v;
1492 } 1492 }
1493 1493
1494 static unsigned int to_fw_linkcaps(unsigned int caps) 1494 static unsigned int to_fw_linkcaps(unsigned int caps)
1495 { 1495 {
1496 unsigned int v = 0; 1496 unsigned int v = 0;
1497 1497
1498 if (caps & ADVERTISED_100baseT_Full) 1498 if (caps & ADVERTISED_100baseT_Full)
1499 v |= FW_PORT_CAP_SPEED_100M; 1499 v |= FW_PORT_CAP_SPEED_100M;
1500 if (caps & ADVERTISED_1000baseT_Full) 1500 if (caps & ADVERTISED_1000baseT_Full)
1501 v |= FW_PORT_CAP_SPEED_1G; 1501 v |= FW_PORT_CAP_SPEED_1G;
1502 if (caps & ADVERTISED_10000baseT_Full) 1502 if (caps & ADVERTISED_10000baseT_Full)
1503 v |= FW_PORT_CAP_SPEED_10G; 1503 v |= FW_PORT_CAP_SPEED_10G;
1504 return v; 1504 return v;
1505 } 1505 }
1506 1506
1507 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1507 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1508 { 1508 {
1509 const struct port_info *p = netdev_priv(dev); 1509 const struct port_info *p = netdev_priv(dev);
1510 1510
1511 if (p->port_type == FW_PORT_TYPE_BT_SGMII || 1511 if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
1512 p->port_type == FW_PORT_TYPE_BT_XFI || 1512 p->port_type == FW_PORT_TYPE_BT_XFI ||
1513 p->port_type == FW_PORT_TYPE_BT_XAUI) 1513 p->port_type == FW_PORT_TYPE_BT_XAUI)
1514 cmd->port = PORT_TP; 1514 cmd->port = PORT_TP;
1515 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI || 1515 else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
1516 p->port_type == FW_PORT_TYPE_FIBER_XAUI) 1516 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
1517 cmd->port = PORT_FIBRE; 1517 cmd->port = PORT_FIBRE;
1518 else if (p->port_type == FW_PORT_TYPE_SFP) { 1518 else if (p->port_type == FW_PORT_TYPE_SFP) {
1519 if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE || 1519 if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
1520 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE) 1520 p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
1521 cmd->port = PORT_DA; 1521 cmd->port = PORT_DA;
1522 else 1522 else
1523 cmd->port = PORT_FIBRE; 1523 cmd->port = PORT_FIBRE;
1524 } else 1524 } else
1525 cmd->port = PORT_OTHER; 1525 cmd->port = PORT_OTHER;
1526 1526
1527 if (p->mdio_addr >= 0) { 1527 if (p->mdio_addr >= 0) {
1528 cmd->phy_address = p->mdio_addr; 1528 cmd->phy_address = p->mdio_addr;
1529 cmd->transceiver = XCVR_EXTERNAL; 1529 cmd->transceiver = XCVR_EXTERNAL;
1530 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ? 1530 cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
1531 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45; 1531 MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
1532 } else { 1532 } else {
1533 cmd->phy_address = 0; /* not really, but no better option */ 1533 cmd->phy_address = 0; /* not really, but no better option */
1534 cmd->transceiver = XCVR_INTERNAL; 1534 cmd->transceiver = XCVR_INTERNAL;
1535 cmd->mdio_support = 0; 1535 cmd->mdio_support = 0;
1536 } 1536 }
1537 1537
1538 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported); 1538 cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
1539 cmd->advertising = from_fw_linkcaps(p->port_type, 1539 cmd->advertising = from_fw_linkcaps(p->port_type,
1540 p->link_cfg.advertising); 1540 p->link_cfg.advertising);
1541 ethtool_cmd_speed_set(cmd, 1541 ethtool_cmd_speed_set(cmd,
1542 netif_carrier_ok(dev) ? p->link_cfg.speed : 0); 1542 netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
1543 cmd->duplex = DUPLEX_FULL; 1543 cmd->duplex = DUPLEX_FULL;
1544 cmd->autoneg = p->link_cfg.autoneg; 1544 cmd->autoneg = p->link_cfg.autoneg;
1545 cmd->maxtxpkt = 0; 1545 cmd->maxtxpkt = 0;
1546 cmd->maxrxpkt = 0; 1546 cmd->maxrxpkt = 0;
1547 return 0; 1547 return 0;
1548 } 1548 }
1549 1549
1550 static unsigned int speed_to_caps(int speed) 1550 static unsigned int speed_to_caps(int speed)
1551 { 1551 {
1552 if (speed == SPEED_100) 1552 if (speed == SPEED_100)
1553 return FW_PORT_CAP_SPEED_100M; 1553 return FW_PORT_CAP_SPEED_100M;
1554 if (speed == SPEED_1000) 1554 if (speed == SPEED_1000)
1555 return FW_PORT_CAP_SPEED_1G; 1555 return FW_PORT_CAP_SPEED_1G;
1556 if (speed == SPEED_10000) 1556 if (speed == SPEED_10000)
1557 return FW_PORT_CAP_SPEED_10G; 1557 return FW_PORT_CAP_SPEED_10G;
1558 return 0; 1558 return 0;
1559 } 1559 }
1560 1560
1561 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1561 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1562 { 1562 {
1563 unsigned int cap; 1563 unsigned int cap;
1564 struct port_info *p = netdev_priv(dev); 1564 struct port_info *p = netdev_priv(dev);
1565 struct link_config *lc = &p->link_cfg; 1565 struct link_config *lc = &p->link_cfg;
1566 u32 speed = ethtool_cmd_speed(cmd); 1566 u32 speed = ethtool_cmd_speed(cmd);
1567 1567
1568 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */ 1568 if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
1569 return -EINVAL; 1569 return -EINVAL;
1570 1570
1571 if (!(lc->supported & FW_PORT_CAP_ANEG)) { 1571 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1572 /* 1572 /*
1573 * PHY offers a single speed. See if that's what's 1573 * PHY offers a single speed. See if that's what's
1574 * being requested. 1574 * being requested.
1575 */ 1575 */
1576 if (cmd->autoneg == AUTONEG_DISABLE && 1576 if (cmd->autoneg == AUTONEG_DISABLE &&
1577 (lc->supported & speed_to_caps(speed))) 1577 (lc->supported & speed_to_caps(speed)))
1578 return 0; 1578 return 0;
1579 return -EINVAL; 1579 return -EINVAL;
1580 } 1580 }
1581 1581
1582 if (cmd->autoneg == AUTONEG_DISABLE) { 1582 if (cmd->autoneg == AUTONEG_DISABLE) {
1583 cap = speed_to_caps(speed); 1583 cap = speed_to_caps(speed);
1584 1584
1585 if (!(lc->supported & cap) || (speed == SPEED_1000) || 1585 if (!(lc->supported & cap) || (speed == SPEED_1000) ||
1586 (speed == SPEED_10000)) 1586 (speed == SPEED_10000))
1587 return -EINVAL; 1587 return -EINVAL;
1588 lc->requested_speed = cap; 1588 lc->requested_speed = cap;
1589 lc->advertising = 0; 1589 lc->advertising = 0;
1590 } else { 1590 } else {
1591 cap = to_fw_linkcaps(cmd->advertising); 1591 cap = to_fw_linkcaps(cmd->advertising);
1592 if (!(lc->supported & cap)) 1592 if (!(lc->supported & cap))
1593 return -EINVAL; 1593 return -EINVAL;
1594 lc->requested_speed = 0; 1594 lc->requested_speed = 0;
1595 lc->advertising = cap | FW_PORT_CAP_ANEG; 1595 lc->advertising = cap | FW_PORT_CAP_ANEG;
1596 } 1596 }
1597 lc->autoneg = cmd->autoneg; 1597 lc->autoneg = cmd->autoneg;
1598 1598
1599 if (netif_running(dev)) 1599 if (netif_running(dev))
1600 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan, 1600 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
1601 lc); 1601 lc);
1602 return 0; 1602 return 0;
1603 } 1603 }
1604 1604
1605 static void get_pauseparam(struct net_device *dev, 1605 static void get_pauseparam(struct net_device *dev,
1606 struct ethtool_pauseparam *epause) 1606 struct ethtool_pauseparam *epause)
1607 { 1607 {
1608 struct port_info *p = netdev_priv(dev); 1608 struct port_info *p = netdev_priv(dev);
1609 1609
1610 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0; 1610 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
1611 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0; 1611 epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
1612 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0; 1612 epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
1613 } 1613 }
1614 1614
1615 static int set_pauseparam(struct net_device *dev, 1615 static int set_pauseparam(struct net_device *dev,
1616 struct ethtool_pauseparam *epause) 1616 struct ethtool_pauseparam *epause)
1617 { 1617 {
1618 struct port_info *p = netdev_priv(dev); 1618 struct port_info *p = netdev_priv(dev);
1619 struct link_config *lc = &p->link_cfg; 1619 struct link_config *lc = &p->link_cfg;
1620 1620
1621 if (epause->autoneg == AUTONEG_DISABLE) 1621 if (epause->autoneg == AUTONEG_DISABLE)
1622 lc->requested_fc = 0; 1622 lc->requested_fc = 0;
1623 else if (lc->supported & FW_PORT_CAP_ANEG) 1623 else if (lc->supported & FW_PORT_CAP_ANEG)
1624 lc->requested_fc = PAUSE_AUTONEG; 1624 lc->requested_fc = PAUSE_AUTONEG;
1625 else 1625 else
1626 return -EINVAL; 1626 return -EINVAL;
1627 1627
1628 if (epause->rx_pause) 1628 if (epause->rx_pause)
1629 lc->requested_fc |= PAUSE_RX; 1629 lc->requested_fc |= PAUSE_RX;
1630 if (epause->tx_pause) 1630 if (epause->tx_pause)
1631 lc->requested_fc |= PAUSE_TX; 1631 lc->requested_fc |= PAUSE_TX;
1632 if (netif_running(dev)) 1632 if (netif_running(dev))
1633 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan, 1633 return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
1634 lc); 1634 lc);
1635 return 0; 1635 return 0;
1636 } 1636 }
1637 1637
1638 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e) 1638 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1639 { 1639 {
1640 const struct port_info *pi = netdev_priv(dev); 1640 const struct port_info *pi = netdev_priv(dev);
1641 const struct sge *s = &pi->adapter->sge; 1641 const struct sge *s = &pi->adapter->sge;
1642 1642
1643 e->rx_max_pending = MAX_RX_BUFFERS; 1643 e->rx_max_pending = MAX_RX_BUFFERS;
1644 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES; 1644 e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
1645 e->rx_jumbo_max_pending = 0; 1645 e->rx_jumbo_max_pending = 0;
1646 e->tx_max_pending = MAX_TXQ_ENTRIES; 1646 e->tx_max_pending = MAX_TXQ_ENTRIES;
1647 1647
1648 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8; 1648 e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
1649 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size; 1649 e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
1650 e->rx_jumbo_pending = 0; 1650 e->rx_jumbo_pending = 0;
1651 e->tx_pending = s->ethtxq[pi->first_qset].q.size; 1651 e->tx_pending = s->ethtxq[pi->first_qset].q.size;
1652 } 1652 }
1653 1653
1654 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e) 1654 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1655 { 1655 {
1656 int i; 1656 int i;
1657 const struct port_info *pi = netdev_priv(dev); 1657 const struct port_info *pi = netdev_priv(dev);
1658 struct adapter *adapter = pi->adapter; 1658 struct adapter *adapter = pi->adapter;
1659 struct sge *s = &adapter->sge; 1659 struct sge *s = &adapter->sge;
1660 1660
1661 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending || 1661 if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
1662 e->tx_pending > MAX_TXQ_ENTRIES || 1662 e->tx_pending > MAX_TXQ_ENTRIES ||
1663 e->rx_mini_pending > MAX_RSPQ_ENTRIES || 1663 e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1664 e->rx_mini_pending < MIN_RSPQ_ENTRIES || 1664 e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1665 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES) 1665 e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
1666 return -EINVAL; 1666 return -EINVAL;
1667 1667
1668 if (adapter->flags & FULL_INIT_DONE) 1668 if (adapter->flags & FULL_INIT_DONE)
1669 return -EBUSY; 1669 return -EBUSY;
1670 1670
1671 for (i = 0; i < pi->nqsets; ++i) { 1671 for (i = 0; i < pi->nqsets; ++i) {
1672 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending; 1672 s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
1673 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8; 1673 s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
1674 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending; 1674 s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
1675 } 1675 }
1676 return 0; 1676 return 0;
1677 } 1677 }
1678 1678
1679 static int closest_timer(const struct sge *s, int time) 1679 static int closest_timer(const struct sge *s, int time)
1680 { 1680 {
1681 int i, delta, match = 0, min_delta = INT_MAX; 1681 int i, delta, match = 0, min_delta = INT_MAX;
1682 1682
1683 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) { 1683 for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1684 delta = time - s->timer_val[i]; 1684 delta = time - s->timer_val[i];
1685 if (delta < 0) 1685 if (delta < 0)
1686 delta = -delta; 1686 delta = -delta;
1687 if (delta < min_delta) { 1687 if (delta < min_delta) {
1688 min_delta = delta; 1688 min_delta = delta;
1689 match = i; 1689 match = i;
1690 } 1690 }
1691 } 1691 }
1692 return match; 1692 return match;
1693 } 1693 }
1694 1694
1695 static int closest_thres(const struct sge *s, int thres) 1695 static int closest_thres(const struct sge *s, int thres)
1696 { 1696 {
1697 int i, delta, match = 0, min_delta = INT_MAX; 1697 int i, delta, match = 0, min_delta = INT_MAX;
1698 1698
1699 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) { 1699 for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1700 delta = thres - s->counter_val[i]; 1700 delta = thres - s->counter_val[i];
1701 if (delta < 0) 1701 if (delta < 0)
1702 delta = -delta; 1702 delta = -delta;
1703 if (delta < min_delta) { 1703 if (delta < min_delta) {
1704 min_delta = delta; 1704 min_delta = delta;
1705 match = i; 1705 match = i;
1706 } 1706 }
1707 } 1707 }
1708 return match; 1708 return match;
1709 } 1709 }
1710 1710
1711 /* 1711 /*
1712 * Return a queue's interrupt hold-off time in us. 0 means no timer. 1712 * Return a queue's interrupt hold-off time in us. 0 means no timer.
1713 */ 1713 */
1714 static unsigned int qtimer_val(const struct adapter *adap, 1714 static unsigned int qtimer_val(const struct adapter *adap,
1715 const struct sge_rspq *q) 1715 const struct sge_rspq *q)
1716 { 1716 {
1717 unsigned int idx = q->intr_params >> 1; 1717 unsigned int idx = q->intr_params >> 1;
1718 1718
1719 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0; 1719 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
1720 } 1720 }
1721 1721
1722 /** 1722 /**
1723 * set_rxq_intr_params - set a queue's interrupt holdoff parameters 1723 * set_rxq_intr_params - set a queue's interrupt holdoff parameters
1724 * @adap: the adapter 1724 * @adap: the adapter
1725 * @q: the Rx queue 1725 * @q: the Rx queue
1726 * @us: the hold-off time in us, or 0 to disable timer 1726 * @us: the hold-off time in us, or 0 to disable timer
1727 * @cnt: the hold-off packet count, or 0 to disable counter 1727 * @cnt: the hold-off packet count, or 0 to disable counter
1728 * 1728 *
1729 * Sets an Rx queue's interrupt hold-off time and packet count. At least 1729 * Sets an Rx queue's interrupt hold-off time and packet count. At least
1730 * one of the two needs to be enabled for the queue to generate interrupts. 1730 * one of the two needs to be enabled for the queue to generate interrupts.
1731 */ 1731 */
1732 static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q, 1732 static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
1733 unsigned int us, unsigned int cnt) 1733 unsigned int us, unsigned int cnt)
1734 { 1734 {
1735 if ((us | cnt) == 0) 1735 if ((us | cnt) == 0)
1736 cnt = 1; 1736 cnt = 1;
1737 1737
1738 if (cnt) { 1738 if (cnt) {
1739 int err; 1739 int err;
1740 u32 v, new_idx; 1740 u32 v, new_idx;
1741 1741
1742 new_idx = closest_thres(&adap->sge, cnt); 1742 new_idx = closest_thres(&adap->sge, cnt);
1743 if (q->desc && q->pktcnt_idx != new_idx) { 1743 if (q->desc && q->pktcnt_idx != new_idx) {
1744 /* the queue has already been created, update it */ 1744 /* the queue has already been created, update it */
1745 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 1745 v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
1746 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) | 1746 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1747 FW_PARAMS_PARAM_YZ(q->cntxt_id); 1747 FW_PARAMS_PARAM_YZ(q->cntxt_id);
1748 err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v, 1748 err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
1749 &new_idx); 1749 &new_idx);
1750 if (err) 1750 if (err)
1751 return err; 1751 return err;
1752 } 1752 }
1753 q->pktcnt_idx = new_idx; 1753 q->pktcnt_idx = new_idx;
1754 } 1754 }
1755 1755
1756 us = us == 0 ? 6 : closest_timer(&adap->sge, us); 1756 us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1757 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0); 1757 q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
1758 return 0; 1758 return 0;
1759 } 1759 }
1760 1760
1761 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) 1761 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1762 { 1762 {
1763 const struct port_info *pi = netdev_priv(dev); 1763 const struct port_info *pi = netdev_priv(dev);
1764 struct adapter *adap = pi->adapter; 1764 struct adapter *adap = pi->adapter;
1765 1765
1766 return set_rxq_intr_params(adap, &adap->sge.ethrxq[pi->first_qset].rspq, 1766 return set_rxq_intr_params(adap, &adap->sge.ethrxq[pi->first_qset].rspq,
1767 c->rx_coalesce_usecs, c->rx_max_coalesced_frames); 1767 c->rx_coalesce_usecs, c->rx_max_coalesced_frames);
1768 } 1768 }
1769 1769
1770 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) 1770 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1771 { 1771 {
1772 const struct port_info *pi = netdev_priv(dev); 1772 const struct port_info *pi = netdev_priv(dev);
1773 const struct adapter *adap = pi->adapter; 1773 const struct adapter *adap = pi->adapter;
1774 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq; 1774 const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
1775 1775
1776 c->rx_coalesce_usecs = qtimer_val(adap, rq); 1776 c->rx_coalesce_usecs = qtimer_val(adap, rq);
1777 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ? 1777 c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
1778 adap->sge.counter_val[rq->pktcnt_idx] : 0; 1778 adap->sge.counter_val[rq->pktcnt_idx] : 0;
1779 return 0; 1779 return 0;
1780 } 1780 }
1781 1781
1782 /** 1782 /**
1783 * eeprom_ptov - translate a physical EEPROM address to virtual 1783 * eeprom_ptov - translate a physical EEPROM address to virtual
1784 * @phys_addr: the physical EEPROM address 1784 * @phys_addr: the physical EEPROM address
1785 * @fn: the PCI function number 1785 * @fn: the PCI function number
1786 * @sz: size of function-specific area 1786 * @sz: size of function-specific area
1787 * 1787 *
1788 * Translate a physical EEPROM address to virtual. The first 1K is 1788 * Translate a physical EEPROM address to virtual. The first 1K is
1789 * accessed through virtual addresses starting at 31K, the rest is 1789 * accessed through virtual addresses starting at 31K, the rest is
1790 * accessed through virtual addresses starting at 0. 1790 * accessed through virtual addresses starting at 0.
1791 * 1791 *
1792 * The mapping is as follows: 1792 * The mapping is as follows:
1793 * [0..1K) -> [31K..32K) 1793 * [0..1K) -> [31K..32K)
1794 * [1K..1K+A) -> [31K-A..31K) 1794 * [1K..1K+A) -> [31K-A..31K)
1795 * [1K+A..ES) -> [0..ES-A-1K) 1795 * [1K+A..ES) -> [0..ES-A-1K)
1796 * 1796 *
1797 * where A = @fn * @sz, and ES = EEPROM size. 1797 * where A = @fn * @sz, and ES = EEPROM size.
1798 */ 1798 */
1799 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz) 1799 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
1800 { 1800 {
1801 fn *= sz; 1801 fn *= sz;
1802 if (phys_addr < 1024) 1802 if (phys_addr < 1024)
1803 return phys_addr + (31 << 10); 1803 return phys_addr + (31 << 10);
1804 if (phys_addr < 1024 + fn) 1804 if (phys_addr < 1024 + fn)
1805 return 31744 - fn + phys_addr - 1024; 1805 return 31744 - fn + phys_addr - 1024;
1806 if (phys_addr < EEPROMSIZE) 1806 if (phys_addr < EEPROMSIZE)
1807 return phys_addr - 1024 - fn; 1807 return phys_addr - 1024 - fn;
1808 return -EINVAL; 1808 return -EINVAL;
1809 } 1809 }
1810 1810
1811 /* 1811 /*
1812 * The next two routines implement eeprom read/write from physical addresses. 1812 * The next two routines implement eeprom read/write from physical addresses.
1813 */ 1813 */
1814 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v) 1814 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
1815 { 1815 {
1816 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE); 1816 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
1817 1817
1818 if (vaddr >= 0) 1818 if (vaddr >= 0)
1819 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v); 1819 vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
1820 return vaddr < 0 ? vaddr : 0; 1820 return vaddr < 0 ? vaddr : 0;
1821 } 1821 }
1822 1822
1823 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v) 1823 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
1824 { 1824 {
1825 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE); 1825 int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
1826 1826
1827 if (vaddr >= 0) 1827 if (vaddr >= 0)
1828 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v); 1828 vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
1829 return vaddr < 0 ? vaddr : 0; 1829 return vaddr < 0 ? vaddr : 0;
1830 } 1830 }
1831 1831
1832 #define EEPROM_MAGIC 0x38E2F10C 1832 #define EEPROM_MAGIC 0x38E2F10C
1833 1833
1834 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e, 1834 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1835 u8 *data) 1835 u8 *data)
1836 { 1836 {
1837 int i, err = 0; 1837 int i, err = 0;
1838 struct adapter *adapter = netdev2adap(dev); 1838 struct adapter *adapter = netdev2adap(dev);
1839 1839
1840 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL); 1840 u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1841 if (!buf) 1841 if (!buf)
1842 return -ENOMEM; 1842 return -ENOMEM;
1843 1843
1844 e->magic = EEPROM_MAGIC; 1844 e->magic = EEPROM_MAGIC;
1845 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4) 1845 for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1846 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]); 1846 err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
1847 1847
1848 if (!err) 1848 if (!err)
1849 memcpy(data, buf + e->offset, e->len); 1849 memcpy(data, buf + e->offset, e->len);
1850 kfree(buf); 1850 kfree(buf);
1851 return err; 1851 return err;
1852 } 1852 }
1853 1853
1854 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, 1854 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1855 u8 *data) 1855 u8 *data)
1856 { 1856 {
1857 u8 *buf; 1857 u8 *buf;
1858 int err = 0; 1858 int err = 0;
1859 u32 aligned_offset, aligned_len, *p; 1859 u32 aligned_offset, aligned_len, *p;
1860 struct adapter *adapter = netdev2adap(dev); 1860 struct adapter *adapter = netdev2adap(dev);
1861 1861
1862 if (eeprom->magic != EEPROM_MAGIC) 1862 if (eeprom->magic != EEPROM_MAGIC)
1863 return -EINVAL; 1863 return -EINVAL;
1864 1864
1865 aligned_offset = eeprom->offset & ~3; 1865 aligned_offset = eeprom->offset & ~3;
1866 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3; 1866 aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1867 1867
1868 if (adapter->fn > 0) { 1868 if (adapter->fn > 0) {
1869 u32 start = 1024 + adapter->fn * EEPROMPFSIZE; 1869 u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
1870 1870
1871 if (aligned_offset < start || 1871 if (aligned_offset < start ||
1872 aligned_offset + aligned_len > start + EEPROMPFSIZE) 1872 aligned_offset + aligned_len > start + EEPROMPFSIZE)
1873 return -EPERM; 1873 return -EPERM;
1874 } 1874 }
1875 1875
1876 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) { 1876 if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1877 /* 1877 /*
1878 * RMW possibly needed for first or last words. 1878 * RMW possibly needed for first or last words.
1879 */ 1879 */
1880 buf = kmalloc(aligned_len, GFP_KERNEL); 1880 buf = kmalloc(aligned_len, GFP_KERNEL);
1881 if (!buf) 1881 if (!buf)
1882 return -ENOMEM; 1882 return -ENOMEM;
1883 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf); 1883 err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
1884 if (!err && aligned_len > 4) 1884 if (!err && aligned_len > 4)
1885 err = eeprom_rd_phys(adapter, 1885 err = eeprom_rd_phys(adapter,
1886 aligned_offset + aligned_len - 4, 1886 aligned_offset + aligned_len - 4,
1887 (u32 *)&buf[aligned_len - 4]); 1887 (u32 *)&buf[aligned_len - 4]);
1888 if (err) 1888 if (err)
1889 goto out; 1889 goto out;
1890 memcpy(buf + (eeprom->offset & 3), data, eeprom->len); 1890 memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1891 } else 1891 } else
1892 buf = data; 1892 buf = data;
1893 1893
1894 err = t4_seeprom_wp(adapter, false); 1894 err = t4_seeprom_wp(adapter, false);
1895 if (err) 1895 if (err)
1896 goto out; 1896 goto out;
1897 1897
1898 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) { 1898 for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
1899 err = eeprom_wr_phys(adapter, aligned_offset, *p); 1899 err = eeprom_wr_phys(adapter, aligned_offset, *p);
1900 aligned_offset += 4; 1900 aligned_offset += 4;
1901 } 1901 }
1902 1902
1903 if (!err) 1903 if (!err)
1904 err = t4_seeprom_wp(adapter, true); 1904 err = t4_seeprom_wp(adapter, true);
1905 out: 1905 out:
1906 if (buf != data) 1906 if (buf != data)
1907 kfree(buf); 1907 kfree(buf);
1908 return err; 1908 return err;
1909 } 1909 }
1910 1910
1911 static int set_flash(struct net_device *netdev, struct ethtool_flash *ef) 1911 static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
1912 { 1912 {
1913 int ret; 1913 int ret;
1914 const struct firmware *fw; 1914 const struct firmware *fw;
1915 struct adapter *adap = netdev2adap(netdev); 1915 struct adapter *adap = netdev2adap(netdev);
1916 1916
1917 ef->data[sizeof(ef->data) - 1] = '\0'; 1917 ef->data[sizeof(ef->data) - 1] = '\0';
1918 ret = request_firmware(&fw, ef->data, adap->pdev_dev); 1918 ret = request_firmware(&fw, ef->data, adap->pdev_dev);
1919 if (ret < 0) 1919 if (ret < 0)
1920 return ret; 1920 return ret;
1921 1921
1922 ret = t4_load_fw(adap, fw->data, fw->size); 1922 ret = t4_load_fw(adap, fw->data, fw->size);
1923 release_firmware(fw); 1923 release_firmware(fw);
1924 if (!ret) 1924 if (!ret)
1925 dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data); 1925 dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
1926 return ret; 1926 return ret;
1927 } 1927 }
1928 1928
1929 #define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC) 1929 #define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
1930 #define BCAST_CRC 0xa0ccc1a6 1930 #define BCAST_CRC 0xa0ccc1a6
1931 1931
1932 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 1932 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1933 { 1933 {
1934 wol->supported = WAKE_BCAST | WAKE_MAGIC; 1934 wol->supported = WAKE_BCAST | WAKE_MAGIC;
1935 wol->wolopts = netdev2adap(dev)->wol; 1935 wol->wolopts = netdev2adap(dev)->wol;
1936 memset(&wol->sopass, 0, sizeof(wol->sopass)); 1936 memset(&wol->sopass, 0, sizeof(wol->sopass));
1937 } 1937 }
1938 1938
1939 static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 1939 static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1940 { 1940 {
1941 int err = 0; 1941 int err = 0;
1942 struct port_info *pi = netdev_priv(dev); 1942 struct port_info *pi = netdev_priv(dev);
1943 1943
1944 if (wol->wolopts & ~WOL_SUPPORTED) 1944 if (wol->wolopts & ~WOL_SUPPORTED)
1945 return -EINVAL; 1945 return -EINVAL;
1946 t4_wol_magic_enable(pi->adapter, pi->tx_chan, 1946 t4_wol_magic_enable(pi->adapter, pi->tx_chan,
1947 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL); 1947 (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
1948 if (wol->wolopts & WAKE_BCAST) { 1948 if (wol->wolopts & WAKE_BCAST) {
1949 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL, 1949 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
1950 ~0ULL, 0, false); 1950 ~0ULL, 0, false);
1951 if (!err) 1951 if (!err)
1952 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1, 1952 err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
1953 ~6ULL, ~0ULL, BCAST_CRC, true); 1953 ~6ULL, ~0ULL, BCAST_CRC, true);
1954 } else 1954 } else
1955 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false); 1955 t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
1956 return err; 1956 return err;
1957 } 1957 }
1958 1958
1959 static int cxgb_set_features(struct net_device *dev, netdev_features_t features) 1959 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
1960 { 1960 {
1961 const struct port_info *pi = netdev_priv(dev); 1961 const struct port_info *pi = netdev_priv(dev);
1962 netdev_features_t changed = dev->features ^ features; 1962 netdev_features_t changed = dev->features ^ features;
1963 int err; 1963 int err;
1964 1964
1965 if (!(changed & NETIF_F_HW_VLAN_RX)) 1965 if (!(changed & NETIF_F_HW_VLAN_RX))
1966 return 0; 1966 return 0;
1967 1967
1968 err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1, 1968 err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
1969 -1, -1, -1, 1969 -1, -1, -1,
1970 !!(features & NETIF_F_HW_VLAN_RX), true); 1970 !!(features & NETIF_F_HW_VLAN_RX), true);
1971 if (unlikely(err)) 1971 if (unlikely(err))
1972 dev->features = features ^ NETIF_F_HW_VLAN_RX; 1972 dev->features = features ^ NETIF_F_HW_VLAN_RX;
1973 return err; 1973 return err;
1974 } 1974 }
1975 1975
1976 static u32 get_rss_table_size(struct net_device *dev) 1976 static u32 get_rss_table_size(struct net_device *dev)
1977 { 1977 {
1978 const struct port_info *pi = netdev_priv(dev); 1978 const struct port_info *pi = netdev_priv(dev);
1979 1979
1980 return pi->rss_size; 1980 return pi->rss_size;
1981 } 1981 }
1982 1982
1983 static int get_rss_table(struct net_device *dev, u32 *p) 1983 static int get_rss_table(struct net_device *dev, u32 *p)
1984 { 1984 {
1985 const struct port_info *pi = netdev_priv(dev); 1985 const struct port_info *pi = netdev_priv(dev);
1986 unsigned int n = pi->rss_size; 1986 unsigned int n = pi->rss_size;
1987 1987
1988 while (n--) 1988 while (n--)
1989 p[n] = pi->rss[n]; 1989 p[n] = pi->rss[n];
1990 return 0; 1990 return 0;
1991 } 1991 }
1992 1992
1993 static int set_rss_table(struct net_device *dev, const u32 *p) 1993 static int set_rss_table(struct net_device *dev, const u32 *p)
1994 { 1994 {
1995 unsigned int i; 1995 unsigned int i;
1996 struct port_info *pi = netdev_priv(dev); 1996 struct port_info *pi = netdev_priv(dev);
1997 1997
1998 for (i = 0; i < pi->rss_size; i++) 1998 for (i = 0; i < pi->rss_size; i++)
1999 pi->rss[i] = p[i]; 1999 pi->rss[i] = p[i];
2000 if (pi->adapter->flags & FULL_INIT_DONE) 2000 if (pi->adapter->flags & FULL_INIT_DONE)
2001 return write_rss(pi, pi->rss); 2001 return write_rss(pi, pi->rss);
2002 return 0; 2002 return 0;
2003 } 2003 }
2004 2004
2005 static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, 2005 static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
2006 u32 *rules) 2006 u32 *rules)
2007 { 2007 {
2008 const struct port_info *pi = netdev_priv(dev); 2008 const struct port_info *pi = netdev_priv(dev);
2009 2009
2010 switch (info->cmd) { 2010 switch (info->cmd) {
2011 case ETHTOOL_GRXFH: { 2011 case ETHTOOL_GRXFH: {
2012 unsigned int v = pi->rss_mode; 2012 unsigned int v = pi->rss_mode;
2013 2013
2014 info->data = 0; 2014 info->data = 0;
2015 switch (info->flow_type) { 2015 switch (info->flow_type) {
2016 case TCP_V4_FLOW: 2016 case TCP_V4_FLOW:
2017 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) 2017 if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
2018 info->data = RXH_IP_SRC | RXH_IP_DST | 2018 info->data = RXH_IP_SRC | RXH_IP_DST |
2019 RXH_L4_B_0_1 | RXH_L4_B_2_3; 2019 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2020 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) 2020 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2021 info->data = RXH_IP_SRC | RXH_IP_DST; 2021 info->data = RXH_IP_SRC | RXH_IP_DST;
2022 break; 2022 break;
2023 case UDP_V4_FLOW: 2023 case UDP_V4_FLOW:
2024 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) && 2024 if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
2025 (v & FW_RSS_VI_CONFIG_CMD_UDPEN)) 2025 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2026 info->data = RXH_IP_SRC | RXH_IP_DST | 2026 info->data = RXH_IP_SRC | RXH_IP_DST |
2027 RXH_L4_B_0_1 | RXH_L4_B_2_3; 2027 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2028 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) 2028 else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2029 info->data = RXH_IP_SRC | RXH_IP_DST; 2029 info->data = RXH_IP_SRC | RXH_IP_DST;
2030 break; 2030 break;
2031 case SCTP_V4_FLOW: 2031 case SCTP_V4_FLOW:
2032 case AH_ESP_V4_FLOW: 2032 case AH_ESP_V4_FLOW:
2033 case IPV4_FLOW: 2033 case IPV4_FLOW:
2034 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) 2034 if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
2035 info->data = RXH_IP_SRC | RXH_IP_DST; 2035 info->data = RXH_IP_SRC | RXH_IP_DST;
2036 break; 2036 break;
2037 case TCP_V6_FLOW: 2037 case TCP_V6_FLOW:
2038 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) 2038 if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
2039 info->data = RXH_IP_SRC | RXH_IP_DST | 2039 info->data = RXH_IP_SRC | RXH_IP_DST |
2040 RXH_L4_B_0_1 | RXH_L4_B_2_3; 2040 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2041 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) 2041 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2042 info->data = RXH_IP_SRC | RXH_IP_DST; 2042 info->data = RXH_IP_SRC | RXH_IP_DST;
2043 break; 2043 break;
2044 case UDP_V6_FLOW: 2044 case UDP_V6_FLOW:
2045 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) && 2045 if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
2046 (v & FW_RSS_VI_CONFIG_CMD_UDPEN)) 2046 (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
2047 info->data = RXH_IP_SRC | RXH_IP_DST | 2047 info->data = RXH_IP_SRC | RXH_IP_DST |
2048 RXH_L4_B_0_1 | RXH_L4_B_2_3; 2048 RXH_L4_B_0_1 | RXH_L4_B_2_3;
2049 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) 2049 else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2050 info->data = RXH_IP_SRC | RXH_IP_DST; 2050 info->data = RXH_IP_SRC | RXH_IP_DST;
2051 break; 2051 break;
2052 case SCTP_V6_FLOW: 2052 case SCTP_V6_FLOW:
2053 case AH_ESP_V6_FLOW: 2053 case AH_ESP_V6_FLOW:
2054 case IPV6_FLOW: 2054 case IPV6_FLOW:
2055 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) 2055 if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
2056 info->data = RXH_IP_SRC | RXH_IP_DST; 2056 info->data = RXH_IP_SRC | RXH_IP_DST;
2057 break; 2057 break;
2058 } 2058 }
2059 return 0; 2059 return 0;
2060 } 2060 }
2061 case ETHTOOL_GRXRINGS: 2061 case ETHTOOL_GRXRINGS:
2062 info->data = pi->nqsets; 2062 info->data = pi->nqsets;
2063 return 0; 2063 return 0;
2064 } 2064 }
2065 return -EOPNOTSUPP; 2065 return -EOPNOTSUPP;
2066 } 2066 }
2067 2067
2068 static const struct ethtool_ops cxgb_ethtool_ops = { 2068 static const struct ethtool_ops cxgb_ethtool_ops = {
2069 .get_settings = get_settings, 2069 .get_settings = get_settings,
2070 .set_settings = set_settings, 2070 .set_settings = set_settings,
2071 .get_drvinfo = get_drvinfo, 2071 .get_drvinfo = get_drvinfo,
2072 .get_msglevel = get_msglevel, 2072 .get_msglevel = get_msglevel,
2073 .set_msglevel = set_msglevel, 2073 .set_msglevel = set_msglevel,
2074 .get_ringparam = get_sge_param, 2074 .get_ringparam = get_sge_param,
2075 .set_ringparam = set_sge_param, 2075 .set_ringparam = set_sge_param,
2076 .get_coalesce = get_coalesce, 2076 .get_coalesce = get_coalesce,
2077 .set_coalesce = set_coalesce, 2077 .set_coalesce = set_coalesce,
2078 .get_eeprom_len = get_eeprom_len, 2078 .get_eeprom_len = get_eeprom_len,
2079 .get_eeprom = get_eeprom, 2079 .get_eeprom = get_eeprom,
2080 .set_eeprom = set_eeprom, 2080 .set_eeprom = set_eeprom,
2081 .get_pauseparam = get_pauseparam, 2081 .get_pauseparam = get_pauseparam,
2082 .set_pauseparam = set_pauseparam, 2082 .set_pauseparam = set_pauseparam,
2083 .get_link = ethtool_op_get_link, 2083 .get_link = ethtool_op_get_link,
2084 .get_strings = get_strings, 2084 .get_strings = get_strings,
2085 .set_phys_id = identify_port, 2085 .set_phys_id = identify_port,
2086 .nway_reset = restart_autoneg, 2086 .nway_reset = restart_autoneg,
2087 .get_sset_count = get_sset_count, 2087 .get_sset_count = get_sset_count,
2088 .get_ethtool_stats = get_stats, 2088 .get_ethtool_stats = get_stats,
2089 .get_regs_len = get_regs_len, 2089 .get_regs_len = get_regs_len,
2090 .get_regs = get_regs, 2090 .get_regs = get_regs,
2091 .get_wol = get_wol, 2091 .get_wol = get_wol,
2092 .set_wol = set_wol, 2092 .set_wol = set_wol,
2093 .get_rxnfc = get_rxnfc, 2093 .get_rxnfc = get_rxnfc,
2094 .get_rxfh_indir_size = get_rss_table_size, 2094 .get_rxfh_indir_size = get_rss_table_size,
2095 .get_rxfh_indir = get_rss_table, 2095 .get_rxfh_indir = get_rss_table,
2096 .set_rxfh_indir = set_rss_table, 2096 .set_rxfh_indir = set_rss_table,
2097 .flash_device = set_flash, 2097 .flash_device = set_flash,
2098 }; 2098 };
2099 2099
2100 /* 2100 /*
2101 * debugfs support 2101 * debugfs support
2102 */ 2102 */
2103 static ssize_t mem_read(struct file *file, char __user *buf, size_t count, 2103 static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
2104 loff_t *ppos) 2104 loff_t *ppos)
2105 { 2105 {
2106 loff_t pos = *ppos; 2106 loff_t pos = *ppos;
2107 loff_t avail = file->f_path.dentry->d_inode->i_size; 2107 loff_t avail = file->f_path.dentry->d_inode->i_size;
2108 unsigned int mem = (uintptr_t)file->private_data & 3; 2108 unsigned int mem = (uintptr_t)file->private_data & 3;
2109 struct adapter *adap = file->private_data - mem; 2109 struct adapter *adap = file->private_data - mem;
2110 2110
2111 if (pos < 0) 2111 if (pos < 0)
2112 return -EINVAL; 2112 return -EINVAL;
2113 if (pos >= avail) 2113 if (pos >= avail)
2114 return 0; 2114 return 0;
2115 if (count > avail - pos) 2115 if (count > avail - pos)
2116 count = avail - pos; 2116 count = avail - pos;
2117 2117
2118 while (count) { 2118 while (count) {
2119 size_t len; 2119 size_t len;
2120 int ret, ofst; 2120 int ret, ofst;
2121 __be32 data[16]; 2121 __be32 data[16];
2122 2122
2123 if (mem == MEM_MC) 2123 if (mem == MEM_MC)
2124 ret = t4_mc_read(adap, pos, data, NULL); 2124 ret = t4_mc_read(adap, pos, data, NULL);
2125 else 2125 else
2126 ret = t4_edc_read(adap, mem, pos, data, NULL); 2126 ret = t4_edc_read(adap, mem, pos, data, NULL);
2127 if (ret) 2127 if (ret)
2128 return ret; 2128 return ret;
2129 2129
2130 ofst = pos % sizeof(data); 2130 ofst = pos % sizeof(data);
2131 len = min(count, sizeof(data) - ofst); 2131 len = min(count, sizeof(data) - ofst);
2132 if (copy_to_user(buf, (u8 *)data + ofst, len)) 2132 if (copy_to_user(buf, (u8 *)data + ofst, len))
2133 return -EFAULT; 2133 return -EFAULT;
2134 2134
2135 buf += len; 2135 buf += len;
2136 pos += len; 2136 pos += len;
2137 count -= len; 2137 count -= len;
2138 } 2138 }
2139 count = pos - *ppos; 2139 count = pos - *ppos;
2140 *ppos = pos; 2140 *ppos = pos;
2141 return count; 2141 return count;
2142 } 2142 }
2143 2143
2144 static const struct file_operations mem_debugfs_fops = { 2144 static const struct file_operations mem_debugfs_fops = {
2145 .owner = THIS_MODULE, 2145 .owner = THIS_MODULE,
2146 .open = simple_open, 2146 .open = simple_open,
2147 .read = mem_read, 2147 .read = mem_read,
2148 .llseek = default_llseek, 2148 .llseek = default_llseek,
2149 }; 2149 };
2150 2150
2151 static void __devinit add_debugfs_mem(struct adapter *adap, const char *name, 2151 static void __devinit add_debugfs_mem(struct adapter *adap, const char *name,
2152 unsigned int idx, unsigned int size_mb) 2152 unsigned int idx, unsigned int size_mb)
2153 { 2153 {
2154 struct dentry *de; 2154 struct dentry *de;
2155 2155
2156 de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root, 2156 de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
2157 (void *)adap + idx, &mem_debugfs_fops); 2157 (void *)adap + idx, &mem_debugfs_fops);
2158 if (de && de->d_inode) 2158 if (de && de->d_inode)
2159 de->d_inode->i_size = size_mb << 20; 2159 de->d_inode->i_size = size_mb << 20;
2160 } 2160 }
2161 2161
2162 static int __devinit setup_debugfs(struct adapter *adap) 2162 static int __devinit setup_debugfs(struct adapter *adap)
2163 { 2163 {
2164 int i; 2164 int i;
2165 2165
2166 if (IS_ERR_OR_NULL(adap->debugfs_root)) 2166 if (IS_ERR_OR_NULL(adap->debugfs_root))
2167 return -1; 2167 return -1;
2168 2168
2169 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE); 2169 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
2170 if (i & EDRAM0_ENABLE) 2170 if (i & EDRAM0_ENABLE)
2171 add_debugfs_mem(adap, "edc0", MEM_EDC0, 5); 2171 add_debugfs_mem(adap, "edc0", MEM_EDC0, 5);
2172 if (i & EDRAM1_ENABLE) 2172 if (i & EDRAM1_ENABLE)
2173 add_debugfs_mem(adap, "edc1", MEM_EDC1, 5); 2173 add_debugfs_mem(adap, "edc1", MEM_EDC1, 5);
2174 if (i & EXT_MEM_ENABLE) 2174 if (i & EXT_MEM_ENABLE)
2175 add_debugfs_mem(adap, "mc", MEM_MC, 2175 add_debugfs_mem(adap, "mc", MEM_MC,
2176 EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR))); 2176 EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)));
2177 if (adap->l2t) 2177 if (adap->l2t)
2178 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap, 2178 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
2179 &t4_l2t_fops); 2179 &t4_l2t_fops);
2180 return 0; 2180 return 0;
2181 } 2181 }
2182 2182
2183 /* 2183 /*
2184 * upper-layer driver support 2184 * upper-layer driver support
2185 */ 2185 */
2186 2186
2187 /* 2187 /*
2188 * Allocate an active-open TID and set it to the supplied value. 2188 * Allocate an active-open TID and set it to the supplied value.
2189 */ 2189 */
2190 int cxgb4_alloc_atid(struct tid_info *t, void *data) 2190 int cxgb4_alloc_atid(struct tid_info *t, void *data)
2191 { 2191 {
2192 int atid = -1; 2192 int atid = -1;
2193 2193
2194 spin_lock_bh(&t->atid_lock); 2194 spin_lock_bh(&t->atid_lock);
2195 if (t->afree) { 2195 if (t->afree) {
2196 union aopen_entry *p = t->afree; 2196 union aopen_entry *p = t->afree;
2197 2197
2198 atid = p - t->atid_tab; 2198 atid = p - t->atid_tab;
2199 t->afree = p->next; 2199 t->afree = p->next;
2200 p->data = data; 2200 p->data = data;
2201 t->atids_in_use++; 2201 t->atids_in_use++;
2202 } 2202 }
2203 spin_unlock_bh(&t->atid_lock); 2203 spin_unlock_bh(&t->atid_lock);
2204 return atid; 2204 return atid;
2205 } 2205 }
2206 EXPORT_SYMBOL(cxgb4_alloc_atid); 2206 EXPORT_SYMBOL(cxgb4_alloc_atid);
2207 2207
2208 /* 2208 /*
2209 * Release an active-open TID. 2209 * Release an active-open TID.
2210 */ 2210 */
2211 void cxgb4_free_atid(struct tid_info *t, unsigned int atid) 2211 void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
2212 { 2212 {
2213 union aopen_entry *p = &t->atid_tab[atid]; 2213 union aopen_entry *p = &t->atid_tab[atid];
2214 2214
2215 spin_lock_bh(&t->atid_lock); 2215 spin_lock_bh(&t->atid_lock);
2216 p->next = t->afree; 2216 p->next = t->afree;
2217 t->afree = p; 2217 t->afree = p;
2218 t->atids_in_use--; 2218 t->atids_in_use--;
2219 spin_unlock_bh(&t->atid_lock); 2219 spin_unlock_bh(&t->atid_lock);
2220 } 2220 }
2221 EXPORT_SYMBOL(cxgb4_free_atid); 2221 EXPORT_SYMBOL(cxgb4_free_atid);
2222 2222
2223 /* 2223 /*
2224 * Allocate a server TID and set it to the supplied value. 2224 * Allocate a server TID and set it to the supplied value.
2225 */ 2225 */
2226 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data) 2226 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
2227 { 2227 {
2228 int stid; 2228 int stid;
2229 2229
2230 spin_lock_bh(&t->stid_lock); 2230 spin_lock_bh(&t->stid_lock);
2231 if (family == PF_INET) { 2231 if (family == PF_INET) {
2232 stid = find_first_zero_bit(t->stid_bmap, t->nstids); 2232 stid = find_first_zero_bit(t->stid_bmap, t->nstids);
2233 if (stid < t->nstids) 2233 if (stid < t->nstids)
2234 __set_bit(stid, t->stid_bmap); 2234 __set_bit(stid, t->stid_bmap);
2235 else 2235 else
2236 stid = -1; 2236 stid = -1;
2237 } else { 2237 } else {
2238 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2); 2238 stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
2239 if (stid < 0) 2239 if (stid < 0)
2240 stid = -1; 2240 stid = -1;
2241 } 2241 }
2242 if (stid >= 0) { 2242 if (stid >= 0) {
2243 t->stid_tab[stid].data = data; 2243 t->stid_tab[stid].data = data;
2244 stid += t->stid_base; 2244 stid += t->stid_base;
2245 t->stids_in_use++; 2245 t->stids_in_use++;
2246 } 2246 }
2247 spin_unlock_bh(&t->stid_lock); 2247 spin_unlock_bh(&t->stid_lock);
2248 return stid; 2248 return stid;
2249 } 2249 }
2250 EXPORT_SYMBOL(cxgb4_alloc_stid); 2250 EXPORT_SYMBOL(cxgb4_alloc_stid);
2251 2251
2252 /* 2252 /*
2253 * Release a server TID. 2253 * Release a server TID.
2254 */ 2254 */
2255 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family) 2255 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
2256 { 2256 {
2257 stid -= t->stid_base; 2257 stid -= t->stid_base;
2258 spin_lock_bh(&t->stid_lock); 2258 spin_lock_bh(&t->stid_lock);
2259 if (family == PF_INET) 2259 if (family == PF_INET)
2260 __clear_bit(stid, t->stid_bmap); 2260 __clear_bit(stid, t->stid_bmap);
2261 else 2261 else
2262 bitmap_release_region(t->stid_bmap, stid, 2); 2262 bitmap_release_region(t->stid_bmap, stid, 2);
2263 t->stid_tab[stid].data = NULL; 2263 t->stid_tab[stid].data = NULL;
2264 t->stids_in_use--; 2264 t->stids_in_use--;
2265 spin_unlock_bh(&t->stid_lock); 2265 spin_unlock_bh(&t->stid_lock);
2266 } 2266 }
2267 EXPORT_SYMBOL(cxgb4_free_stid); 2267 EXPORT_SYMBOL(cxgb4_free_stid);
2268 2268
2269 /* 2269 /*
2270 * Populate a TID_RELEASE WR. Caller must properly size the skb. 2270 * Populate a TID_RELEASE WR. Caller must properly size the skb.
2271 */ 2271 */
2272 static void mk_tid_release(struct sk_buff *skb, unsigned int chan, 2272 static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
2273 unsigned int tid) 2273 unsigned int tid)
2274 { 2274 {
2275 struct cpl_tid_release *req; 2275 struct cpl_tid_release *req;
2276 2276
2277 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan); 2277 set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
2278 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req)); 2278 req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
2279 INIT_TP_WR(req, tid); 2279 INIT_TP_WR(req, tid);
2280 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid)); 2280 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
2281 } 2281 }
2282 2282
2283 /* 2283 /*
2284 * Queue a TID release request and if necessary schedule a work queue to 2284 * Queue a TID release request and if necessary schedule a work queue to
2285 * process it. 2285 * process it.
2286 */ 2286 */
2287 static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan, 2287 static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
2288 unsigned int tid) 2288 unsigned int tid)
2289 { 2289 {
2290 void **p = &t->tid_tab[tid]; 2290 void **p = &t->tid_tab[tid];
2291 struct adapter *adap = container_of(t, struct adapter, tids); 2291 struct adapter *adap = container_of(t, struct adapter, tids);
2292 2292
2293 spin_lock_bh(&adap->tid_release_lock); 2293 spin_lock_bh(&adap->tid_release_lock);
2294 *p = adap->tid_release_head; 2294 *p = adap->tid_release_head;
2295 /* Low 2 bits encode the Tx channel number */ 2295 /* Low 2 bits encode the Tx channel number */
2296 adap->tid_release_head = (void **)((uintptr_t)p | chan); 2296 adap->tid_release_head = (void **)((uintptr_t)p | chan);
2297 if (!adap->tid_release_task_busy) { 2297 if (!adap->tid_release_task_busy) {
2298 adap->tid_release_task_busy = true; 2298 adap->tid_release_task_busy = true;
2299 queue_work(workq, &adap->tid_release_task); 2299 queue_work(workq, &adap->tid_release_task);
2300 } 2300 }
2301 spin_unlock_bh(&adap->tid_release_lock); 2301 spin_unlock_bh(&adap->tid_release_lock);
2302 } 2302 }
2303 2303
2304 /* 2304 /*
2305 * Process the list of pending TID release requests. 2305 * Process the list of pending TID release requests.
2306 */ 2306 */
2307 static void process_tid_release_list(struct work_struct *work) 2307 static void process_tid_release_list(struct work_struct *work)
2308 { 2308 {
2309 struct sk_buff *skb; 2309 struct sk_buff *skb;
2310 struct adapter *adap; 2310 struct adapter *adap;
2311 2311
2312 adap = container_of(work, struct adapter, tid_release_task); 2312 adap = container_of(work, struct adapter, tid_release_task);
2313 2313
2314 spin_lock_bh(&adap->tid_release_lock); 2314 spin_lock_bh(&adap->tid_release_lock);
2315 while (adap->tid_release_head) { 2315 while (adap->tid_release_head) {
2316 void **p = adap->tid_release_head; 2316 void **p = adap->tid_release_head;
2317 unsigned int chan = (uintptr_t)p & 3; 2317 unsigned int chan = (uintptr_t)p & 3;
2318 p = (void *)p - chan; 2318 p = (void *)p - chan;
2319 2319
2320 adap->tid_release_head = *p; 2320 adap->tid_release_head = *p;
2321 *p = NULL; 2321 *p = NULL;
2322 spin_unlock_bh(&adap->tid_release_lock); 2322 spin_unlock_bh(&adap->tid_release_lock);
2323 2323
2324 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release), 2324 while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
2325 GFP_KERNEL))) 2325 GFP_KERNEL)))
2326 schedule_timeout_uninterruptible(1); 2326 schedule_timeout_uninterruptible(1);
2327 2327
2328 mk_tid_release(skb, chan, p - adap->tids.tid_tab); 2328 mk_tid_release(skb, chan, p - adap->tids.tid_tab);
2329 t4_ofld_send(adap, skb); 2329 t4_ofld_send(adap, skb);
2330 spin_lock_bh(&adap->tid_release_lock); 2330 spin_lock_bh(&adap->tid_release_lock);
2331 } 2331 }
2332 adap->tid_release_task_busy = false; 2332 adap->tid_release_task_busy = false;
2333 spin_unlock_bh(&adap->tid_release_lock); 2333 spin_unlock_bh(&adap->tid_release_lock);
2334 } 2334 }
2335 2335
2336 /* 2336 /*
2337 * Release a TID and inform HW. If we are unable to allocate the release 2337 * Release a TID and inform HW. If we are unable to allocate the release
2338 * message we defer to a work queue. 2338 * message we defer to a work queue.
2339 */ 2339 */
2340 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid) 2340 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
2341 { 2341 {
2342 void *old; 2342 void *old;
2343 struct sk_buff *skb; 2343 struct sk_buff *skb;
2344 struct adapter *adap = container_of(t, struct adapter, tids); 2344 struct adapter *adap = container_of(t, struct adapter, tids);
2345 2345
2346 old = t->tid_tab[tid]; 2346 old = t->tid_tab[tid];
2347 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC); 2347 skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
2348 if (likely(skb)) { 2348 if (likely(skb)) {
2349 t->tid_tab[tid] = NULL; 2349 t->tid_tab[tid] = NULL;
2350 mk_tid_release(skb, chan, tid); 2350 mk_tid_release(skb, chan, tid);
2351 t4_ofld_send(adap, skb); 2351 t4_ofld_send(adap, skb);
2352 } else 2352 } else
2353 cxgb4_queue_tid_release(t, chan, tid); 2353 cxgb4_queue_tid_release(t, chan, tid);
2354 if (old) 2354 if (old)
2355 atomic_dec(&t->tids_in_use); 2355 atomic_dec(&t->tids_in_use);
2356 } 2356 }
2357 EXPORT_SYMBOL(cxgb4_remove_tid); 2357 EXPORT_SYMBOL(cxgb4_remove_tid);
2358 2358
2359 /* 2359 /*
2360 * Allocate and initialize the TID tables. Returns 0 on success. 2360 * Allocate and initialize the TID tables. Returns 0 on success.
2361 */ 2361 */
2362 static int tid_init(struct tid_info *t) 2362 static int tid_init(struct tid_info *t)
2363 { 2363 {
2364 size_t size; 2364 size_t size;
2365 unsigned int natids = t->natids; 2365 unsigned int natids = t->natids;
2366 2366
2367 size = t->ntids * sizeof(*t->tid_tab) + natids * sizeof(*t->atid_tab) + 2367 size = t->ntids * sizeof(*t->tid_tab) + natids * sizeof(*t->atid_tab) +
2368 t->nstids * sizeof(*t->stid_tab) + 2368 t->nstids * sizeof(*t->stid_tab) +
2369 BITS_TO_LONGS(t->nstids) * sizeof(long); 2369 BITS_TO_LONGS(t->nstids) * sizeof(long);
2370 t->tid_tab = t4_alloc_mem(size); 2370 t->tid_tab = t4_alloc_mem(size);
2371 if (!t->tid_tab) 2371 if (!t->tid_tab)
2372 return -ENOMEM; 2372 return -ENOMEM;
2373 2373
2374 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids]; 2374 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
2375 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids]; 2375 t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
2376 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids]; 2376 t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids];
2377 spin_lock_init(&t->stid_lock); 2377 spin_lock_init(&t->stid_lock);
2378 spin_lock_init(&t->atid_lock); 2378 spin_lock_init(&t->atid_lock);
2379 2379
2380 t->stids_in_use = 0; 2380 t->stids_in_use = 0;
2381 t->afree = NULL; 2381 t->afree = NULL;
2382 t->atids_in_use = 0; 2382 t->atids_in_use = 0;
2383 atomic_set(&t->tids_in_use, 0); 2383 atomic_set(&t->tids_in_use, 0);
2384 2384
2385 /* Setup the free list for atid_tab and clear the stid bitmap. */ 2385 /* Setup the free list for atid_tab and clear the stid bitmap. */
2386 if (natids) { 2386 if (natids) {
2387 while (--natids) 2387 while (--natids)
2388 t->atid_tab[natids - 1].next = &t->atid_tab[natids]; 2388 t->atid_tab[natids - 1].next = &t->atid_tab[natids];
2389 t->afree = t->atid_tab; 2389 t->afree = t->atid_tab;
2390 } 2390 }
2391 bitmap_zero(t->stid_bmap, t->nstids); 2391 bitmap_zero(t->stid_bmap, t->nstids);
2392 return 0; 2392 return 0;
2393 } 2393 }
2394 2394
2395 /** 2395 /**
2396 * cxgb4_create_server - create an IP server 2396 * cxgb4_create_server - create an IP server
2397 * @dev: the device 2397 * @dev: the device
2398 * @stid: the server TID 2398 * @stid: the server TID
2399 * @sip: local IP address to bind server to 2399 * @sip: local IP address to bind server to
2400 * @sport: the server's TCP port 2400 * @sport: the server's TCP port
2401 * @queue: queue to direct messages from this server to 2401 * @queue: queue to direct messages from this server to
2402 * 2402 *
2403 * Create an IP server for the given port and address. 2403 * Create an IP server for the given port and address.
2404 * Returns <0 on error and one of the %NET_XMIT_* values on success. 2404 * Returns <0 on error and one of the %NET_XMIT_* values on success.
2405 */ 2405 */
2406 int cxgb4_create_server(const struct net_device *dev, unsigned int stid, 2406 int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
2407 __be32 sip, __be16 sport, unsigned int queue) 2407 __be32 sip, __be16 sport, unsigned int queue)
2408 { 2408 {
2409 unsigned int chan; 2409 unsigned int chan;
2410 struct sk_buff *skb; 2410 struct sk_buff *skb;
2411 struct adapter *adap; 2411 struct adapter *adap;
2412 struct cpl_pass_open_req *req; 2412 struct cpl_pass_open_req *req;
2413 2413
2414 skb = alloc_skb(sizeof(*req), GFP_KERNEL); 2414 skb = alloc_skb(sizeof(*req), GFP_KERNEL);
2415 if (!skb) 2415 if (!skb)
2416 return -ENOMEM; 2416 return -ENOMEM;
2417 2417
2418 adap = netdev2adap(dev); 2418 adap = netdev2adap(dev);
2419 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req)); 2419 req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
2420 INIT_TP_WR(req, 0); 2420 INIT_TP_WR(req, 0);
2421 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid)); 2421 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
2422 req->local_port = sport; 2422 req->local_port = sport;
2423 req->peer_port = htons(0); 2423 req->peer_port = htons(0);
2424 req->local_ip = sip; 2424 req->local_ip = sip;
2425 req->peer_ip = htonl(0); 2425 req->peer_ip = htonl(0);
2426 chan = rxq_to_chan(&adap->sge, queue); 2426 chan = rxq_to_chan(&adap->sge, queue);
2427 req->opt0 = cpu_to_be64(TX_CHAN(chan)); 2427 req->opt0 = cpu_to_be64(TX_CHAN(chan));
2428 req->opt1 = cpu_to_be64(CONN_POLICY_ASK | 2428 req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
2429 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue)); 2429 SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
2430 return t4_mgmt_tx(adap, skb); 2430 return t4_mgmt_tx(adap, skb);
2431 } 2431 }
2432 EXPORT_SYMBOL(cxgb4_create_server); 2432 EXPORT_SYMBOL(cxgb4_create_server);
2433 2433
2434 /** 2434 /**
2435 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU 2435 * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
2436 * @mtus: the HW MTU table 2436 * @mtus: the HW MTU table
2437 * @mtu: the target MTU 2437 * @mtu: the target MTU
2438 * @idx: index of selected entry in the MTU table 2438 * @idx: index of selected entry in the MTU table
2439 * 2439 *
2440 * Returns the index and the value in the HW MTU table that is closest to 2440 * Returns the index and the value in the HW MTU table that is closest to
2441 * but does not exceed @mtu, unless @mtu is smaller than any value in the 2441 * but does not exceed @mtu, unless @mtu is smaller than any value in the
2442 * table, in which case that smallest available value is selected. 2442 * table, in which case that smallest available value is selected.
2443 */ 2443 */
2444 unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu, 2444 unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
2445 unsigned int *idx) 2445 unsigned int *idx)
2446 { 2446 {
2447 unsigned int i = 0; 2447 unsigned int i = 0;
2448 2448
2449 while (i < NMTUS - 1 && mtus[i + 1] <= mtu) 2449 while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
2450 ++i; 2450 ++i;
2451 if (idx) 2451 if (idx)
2452 *idx = i; 2452 *idx = i;
2453 return mtus[i]; 2453 return mtus[i];
2454 } 2454 }
2455 EXPORT_SYMBOL(cxgb4_best_mtu); 2455 EXPORT_SYMBOL(cxgb4_best_mtu);
2456 2456
2457 /** 2457 /**
2458 * cxgb4_port_chan - get the HW channel of a port 2458 * cxgb4_port_chan - get the HW channel of a port
2459 * @dev: the net device for the port 2459 * @dev: the net device for the port
2460 * 2460 *
2461 * Return the HW Tx channel of the given port. 2461 * Return the HW Tx channel of the given port.
2462 */ 2462 */
2463 unsigned int cxgb4_port_chan(const struct net_device *dev) 2463 unsigned int cxgb4_port_chan(const struct net_device *dev)
2464 { 2464 {
2465 return netdev2pinfo(dev)->tx_chan; 2465 return netdev2pinfo(dev)->tx_chan;
2466 } 2466 }
2467 EXPORT_SYMBOL(cxgb4_port_chan); 2467 EXPORT_SYMBOL(cxgb4_port_chan);
2468 2468
2469 unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo) 2469 unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
2470 { 2470 {
2471 struct adapter *adap = netdev2adap(dev); 2471 struct adapter *adap = netdev2adap(dev);
2472 u32 v; 2472 u32 v;
2473 2473
2474 v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS); 2474 v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
2475 return lpfifo ? G_LP_COUNT(v) : G_HP_COUNT(v); 2475 return lpfifo ? G_LP_COUNT(v) : G_HP_COUNT(v);
2476 } 2476 }
2477 EXPORT_SYMBOL(cxgb4_dbfifo_count); 2477 EXPORT_SYMBOL(cxgb4_dbfifo_count);
2478 2478
2479 /** 2479 /**
2480 * cxgb4_port_viid - get the VI id of a port 2480 * cxgb4_port_viid - get the VI id of a port
2481 * @dev: the net device for the port 2481 * @dev: the net device for the port
2482 * 2482 *
2483 * Return the VI id of the given port. 2483 * Return the VI id of the given port.
2484 */ 2484 */
2485 unsigned int cxgb4_port_viid(const struct net_device *dev) 2485 unsigned int cxgb4_port_viid(const struct net_device *dev)
2486 { 2486 {
2487 return netdev2pinfo(dev)->viid; 2487 return netdev2pinfo(dev)->viid;
2488 } 2488 }
2489 EXPORT_SYMBOL(cxgb4_port_viid); 2489 EXPORT_SYMBOL(cxgb4_port_viid);
2490 2490
2491 /** 2491 /**
2492 * cxgb4_port_idx - get the index of a port 2492 * cxgb4_port_idx - get the index of a port
2493 * @dev: the net device for the port 2493 * @dev: the net device for the port
2494 * 2494 *
2495 * Return the index of the given port. 2495 * Return the index of the given port.
2496 */ 2496 */
2497 unsigned int cxgb4_port_idx(const struct net_device *dev) 2497 unsigned int cxgb4_port_idx(const struct net_device *dev)
2498 { 2498 {
2499 return netdev2pinfo(dev)->port_id; 2499 return netdev2pinfo(dev)->port_id;
2500 } 2500 }
2501 EXPORT_SYMBOL(cxgb4_port_idx); 2501 EXPORT_SYMBOL(cxgb4_port_idx);
2502 2502
2503 void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4, 2503 void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
2504 struct tp_tcp_stats *v6) 2504 struct tp_tcp_stats *v6)
2505 { 2505 {
2506 struct adapter *adap = pci_get_drvdata(pdev); 2506 struct adapter *adap = pci_get_drvdata(pdev);
2507 2507
2508 spin_lock(&adap->stats_lock); 2508 spin_lock(&adap->stats_lock);
2509 t4_tp_get_tcp_stats(adap, v4, v6); 2509 t4_tp_get_tcp_stats(adap, v4, v6);
2510 spin_unlock(&adap->stats_lock); 2510 spin_unlock(&adap->stats_lock);
2511 } 2511 }
2512 EXPORT_SYMBOL(cxgb4_get_tcp_stats); 2512 EXPORT_SYMBOL(cxgb4_get_tcp_stats);
2513 2513
2514 void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask, 2514 void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
2515 const unsigned int *pgsz_order) 2515 const unsigned int *pgsz_order)
2516 { 2516 {
2517 struct adapter *adap = netdev2adap(dev); 2517 struct adapter *adap = netdev2adap(dev);
2518 2518
2519 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask); 2519 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
2520 t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) | 2520 t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
2521 HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) | 2521 HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
2522 HPZ3(pgsz_order[3])); 2522 HPZ3(pgsz_order[3]));
2523 } 2523 }
2524 EXPORT_SYMBOL(cxgb4_iscsi_init); 2524 EXPORT_SYMBOL(cxgb4_iscsi_init);
2525 2525
2526 int cxgb4_flush_eq_cache(struct net_device *dev) 2526 int cxgb4_flush_eq_cache(struct net_device *dev)
2527 { 2527 {
2528 struct adapter *adap = netdev2adap(dev); 2528 struct adapter *adap = netdev2adap(dev);
2529 int ret; 2529 int ret;
2530 2530
2531 ret = t4_fwaddrspace_write(adap, adap->mbox, 2531 ret = t4_fwaddrspace_write(adap, adap->mbox,
2532 0xe1000000 + A_SGE_CTXT_CMD, 0x20000000); 2532 0xe1000000 + A_SGE_CTXT_CMD, 0x20000000);
2533 return ret; 2533 return ret;
2534 } 2534 }
2535 EXPORT_SYMBOL(cxgb4_flush_eq_cache); 2535 EXPORT_SYMBOL(cxgb4_flush_eq_cache);
2536 2536
2537 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx) 2537 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
2538 { 2538 {
2539 u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8; 2539 u32 addr = t4_read_reg(adap, A_SGE_DBQ_CTXT_BADDR) + 24 * qid + 8;
2540 __be64 indices; 2540 __be64 indices;
2541 int ret; 2541 int ret;
2542 2542
2543 ret = t4_mem_win_read_len(adap, addr, (__be32 *)&indices, 8); 2543 ret = t4_mem_win_read_len(adap, addr, (__be32 *)&indices, 8);
2544 if (!ret) { 2544 if (!ret) {
2545 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff; 2545 *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
2546 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff; 2546 *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
2547 } 2547 }
2548 return ret; 2548 return ret;
2549 } 2549 }
2550 2550
2551 int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, 2551 int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
2552 u16 size) 2552 u16 size)
2553 { 2553 {
2554 struct adapter *adap = netdev2adap(dev); 2554 struct adapter *adap = netdev2adap(dev);
2555 u16 hw_pidx, hw_cidx; 2555 u16 hw_pidx, hw_cidx;
2556 int ret; 2556 int ret;
2557 2557
2558 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx); 2558 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
2559 if (ret) 2559 if (ret)
2560 goto out; 2560 goto out;
2561 2561
2562 if (pidx != hw_pidx) { 2562 if (pidx != hw_pidx) {
2563 u16 delta; 2563 u16 delta;
2564 2564
2565 if (pidx >= hw_pidx) 2565 if (pidx >= hw_pidx)
2566 delta = pidx - hw_pidx; 2566 delta = pidx - hw_pidx;
2567 else 2567 else
2568 delta = size - hw_pidx + pidx; 2568 delta = size - hw_pidx + pidx;
2569 wmb(); 2569 wmb();
2570 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), 2570 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
2571 QID(qid) | PIDX(delta)); 2571 QID(qid) | PIDX(delta));
2572 } 2572 }
2573 out: 2573 out:
2574 return ret; 2574 return ret;
2575 } 2575 }
2576 EXPORT_SYMBOL(cxgb4_sync_txq_pidx); 2576 EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
2577 2577
2578 static struct pci_driver cxgb4_driver; 2578 static struct pci_driver cxgb4_driver;
2579 2579
2580 static void check_neigh_update(struct neighbour *neigh) 2580 static void check_neigh_update(struct neighbour *neigh)
2581 { 2581 {
2582 const struct device *parent; 2582 const struct device *parent;
2583 const struct net_device *netdev = neigh->dev; 2583 const struct net_device *netdev = neigh->dev;
2584 2584
2585 if (netdev->priv_flags & IFF_802_1Q_VLAN) 2585 if (netdev->priv_flags & IFF_802_1Q_VLAN)
2586 netdev = vlan_dev_real_dev(netdev); 2586 netdev = vlan_dev_real_dev(netdev);
2587 parent = netdev->dev.parent; 2587 parent = netdev->dev.parent;
2588 if (parent && parent->driver == &cxgb4_driver.driver) 2588 if (parent && parent->driver == &cxgb4_driver.driver)
2589 t4_l2t_update(dev_get_drvdata(parent), neigh); 2589 t4_l2t_update(dev_get_drvdata(parent), neigh);
2590 } 2590 }
2591 2591
2592 static int netevent_cb(struct notifier_block *nb, unsigned long event, 2592 static int netevent_cb(struct notifier_block *nb, unsigned long event,
2593 void *data) 2593 void *data)
2594 { 2594 {
2595 switch (event) { 2595 switch (event) {
2596 case NETEVENT_NEIGH_UPDATE: 2596 case NETEVENT_NEIGH_UPDATE:
2597 check_neigh_update(data); 2597 check_neigh_update(data);
2598 break; 2598 break;
2599 case NETEVENT_REDIRECT: 2599 case NETEVENT_REDIRECT:
2600 default: 2600 default:
2601 break; 2601 break;
2602 } 2602 }
2603 return 0; 2603 return 0;
2604 } 2604 }
2605 2605
2606 static bool netevent_registered; 2606 static bool netevent_registered;
2607 static struct notifier_block cxgb4_netevent_nb = { 2607 static struct notifier_block cxgb4_netevent_nb = {
2608 .notifier_call = netevent_cb 2608 .notifier_call = netevent_cb
2609 }; 2609 };
2610 2610
2611 static void drain_db_fifo(struct adapter *adap, int usecs) 2611 static void drain_db_fifo(struct adapter *adap, int usecs)
2612 { 2612 {
2613 u32 v; 2613 u32 v;
2614 2614
2615 do { 2615 do {
2616 set_current_state(TASK_UNINTERRUPTIBLE); 2616 set_current_state(TASK_UNINTERRUPTIBLE);
2617 schedule_timeout(usecs_to_jiffies(usecs)); 2617 schedule_timeout(usecs_to_jiffies(usecs));
2618 v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS); 2618 v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
2619 if (G_LP_COUNT(v) == 0 && G_HP_COUNT(v) == 0) 2619 if (G_LP_COUNT(v) == 0 && G_HP_COUNT(v) == 0)
2620 break; 2620 break;
2621 } while (1); 2621 } while (1);
2622 } 2622 }
2623 2623
2624 static void disable_txq_db(struct sge_txq *q) 2624 static void disable_txq_db(struct sge_txq *q)
2625 { 2625 {
2626 spin_lock_irq(&q->db_lock); 2626 spin_lock_irq(&q->db_lock);
2627 q->db_disabled = 1; 2627 q->db_disabled = 1;
2628 spin_unlock_irq(&q->db_lock); 2628 spin_unlock_irq(&q->db_lock);
2629 } 2629 }
2630 2630
2631 static void enable_txq_db(struct sge_txq *q) 2631 static void enable_txq_db(struct sge_txq *q)
2632 { 2632 {
2633 spin_lock_irq(&q->db_lock); 2633 spin_lock_irq(&q->db_lock);
2634 q->db_disabled = 0; 2634 q->db_disabled = 0;
2635 spin_unlock_irq(&q->db_lock); 2635 spin_unlock_irq(&q->db_lock);
2636 } 2636 }
2637 2637
2638 static void disable_dbs(struct adapter *adap) 2638 static void disable_dbs(struct adapter *adap)
2639 { 2639 {
2640 int i; 2640 int i;
2641 2641
2642 for_each_ethrxq(&adap->sge, i) 2642 for_each_ethrxq(&adap->sge, i)
2643 disable_txq_db(&adap->sge.ethtxq[i].q); 2643 disable_txq_db(&adap->sge.ethtxq[i].q);
2644 for_each_ofldrxq(&adap->sge, i) 2644 for_each_ofldrxq(&adap->sge, i)
2645 disable_txq_db(&adap->sge.ofldtxq[i].q); 2645 disable_txq_db(&adap->sge.ofldtxq[i].q);
2646 for_each_port(adap, i) 2646 for_each_port(adap, i)
2647 disable_txq_db(&adap->sge.ctrlq[i].q); 2647 disable_txq_db(&adap->sge.ctrlq[i].q);
2648 } 2648 }
2649 2649
2650 static void enable_dbs(struct adapter *adap) 2650 static void enable_dbs(struct adapter *adap)
2651 { 2651 {
2652 int i; 2652 int i;
2653 2653
2654 for_each_ethrxq(&adap->sge, i) 2654 for_each_ethrxq(&adap->sge, i)
2655 enable_txq_db(&adap->sge.ethtxq[i].q); 2655 enable_txq_db(&adap->sge.ethtxq[i].q);
2656 for_each_ofldrxq(&adap->sge, i) 2656 for_each_ofldrxq(&adap->sge, i)
2657 enable_txq_db(&adap->sge.ofldtxq[i].q); 2657 enable_txq_db(&adap->sge.ofldtxq[i].q);
2658 for_each_port(adap, i) 2658 for_each_port(adap, i)
2659 enable_txq_db(&adap->sge.ctrlq[i].q); 2659 enable_txq_db(&adap->sge.ctrlq[i].q);
2660 } 2660 }
2661 2661
2662 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q) 2662 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
2663 { 2663 {
2664 u16 hw_pidx, hw_cidx; 2664 u16 hw_pidx, hw_cidx;
2665 int ret; 2665 int ret;
2666 2666
2667 spin_lock_bh(&q->db_lock); 2667 spin_lock_bh(&q->db_lock);
2668 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx); 2668 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
2669 if (ret) 2669 if (ret)
2670 goto out; 2670 goto out;
2671 if (q->db_pidx != hw_pidx) { 2671 if (q->db_pidx != hw_pidx) {
2672 u16 delta; 2672 u16 delta;
2673 2673
2674 if (q->db_pidx >= hw_pidx) 2674 if (q->db_pidx >= hw_pidx)
2675 delta = q->db_pidx - hw_pidx; 2675 delta = q->db_pidx - hw_pidx;
2676 else 2676 else
2677 delta = q->size - hw_pidx + q->db_pidx; 2677 delta = q->size - hw_pidx + q->db_pidx;
2678 wmb(); 2678 wmb();
2679 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), 2679 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
2680 QID(q->cntxt_id) | PIDX(delta)); 2680 QID(q->cntxt_id) | PIDX(delta));
2681 } 2681 }
2682 out: 2682 out:
2683 q->db_disabled = 0; 2683 q->db_disabled = 0;
2684 spin_unlock_bh(&q->db_lock); 2684 spin_unlock_bh(&q->db_lock);
2685 if (ret) 2685 if (ret)
2686 CH_WARN(adap, "DB drop recovery failed.\n"); 2686 CH_WARN(adap, "DB drop recovery failed.\n");
2687 } 2687 }
2688 static void recover_all_queues(struct adapter *adap) 2688 static void recover_all_queues(struct adapter *adap)
2689 { 2689 {
2690 int i; 2690 int i;
2691 2691
2692 for_each_ethrxq(&adap->sge, i) 2692 for_each_ethrxq(&adap->sge, i)
2693 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q); 2693 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
2694 for_each_ofldrxq(&adap->sge, i) 2694 for_each_ofldrxq(&adap->sge, i)
2695 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q); 2695 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
2696 for_each_port(adap, i) 2696 for_each_port(adap, i)
2697 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q); 2697 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
2698 } 2698 }
2699 2699
2700 static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd) 2700 static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
2701 { 2701 {
2702 mutex_lock(&uld_mutex); 2702 mutex_lock(&uld_mutex);
2703 if (adap->uld_handle[CXGB4_ULD_RDMA]) 2703 if (adap->uld_handle[CXGB4_ULD_RDMA])
2704 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA], 2704 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
2705 cmd); 2705 cmd);
2706 mutex_unlock(&uld_mutex); 2706 mutex_unlock(&uld_mutex);
2707 } 2707 }
2708 2708
2709 static void process_db_full(struct work_struct *work) 2709 static void process_db_full(struct work_struct *work)
2710 { 2710 {
2711 struct adapter *adap; 2711 struct adapter *adap;
2712 2712
2713 adap = container_of(work, struct adapter, db_full_task); 2713 adap = container_of(work, struct adapter, db_full_task);
2714 2714
2715 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); 2715 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
2716 drain_db_fifo(adap, dbfifo_drain_delay); 2716 drain_db_fifo(adap, dbfifo_drain_delay);
2717 t4_set_reg_field(adap, SGE_INT_ENABLE3, 2717 t4_set_reg_field(adap, SGE_INT_ENABLE3,
2718 DBFIFO_HP_INT | DBFIFO_LP_INT, 2718 DBFIFO_HP_INT | DBFIFO_LP_INT,
2719 DBFIFO_HP_INT | DBFIFO_LP_INT); 2719 DBFIFO_HP_INT | DBFIFO_LP_INT);
2720 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY); 2720 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
2721 } 2721 }
2722 2722
2723 static void process_db_drop(struct work_struct *work) 2723 static void process_db_drop(struct work_struct *work)
2724 { 2724 {
2725 struct adapter *adap; 2725 struct adapter *adap;
2726 2726
2727 adap = container_of(work, struct adapter, db_drop_task); 2727 adap = container_of(work, struct adapter, db_drop_task);
2728 2728
2729 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0); 2729 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
2730 disable_dbs(adap); 2730 disable_dbs(adap);
2731 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP); 2731 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
2732 drain_db_fifo(adap, 1); 2732 drain_db_fifo(adap, 1);
2733 recover_all_queues(adap); 2733 recover_all_queues(adap);
2734 enable_dbs(adap); 2734 enable_dbs(adap);
2735 } 2735 }
2736 2736
2737 void t4_db_full(struct adapter *adap) 2737 void t4_db_full(struct adapter *adap)
2738 { 2738 {
2739 t4_set_reg_field(adap, SGE_INT_ENABLE3, 2739 t4_set_reg_field(adap, SGE_INT_ENABLE3,
2740 DBFIFO_HP_INT | DBFIFO_LP_INT, 0); 2740 DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
2741 queue_work(workq, &adap->db_full_task); 2741 queue_work(workq, &adap->db_full_task);
2742 } 2742 }
2743 2743
2744 void t4_db_dropped(struct adapter *adap) 2744 void t4_db_dropped(struct adapter *adap)
2745 { 2745 {
2746 queue_work(workq, &adap->db_drop_task); 2746 queue_work(workq, &adap->db_drop_task);
2747 } 2747 }
2748 2748
2749 static void uld_attach(struct adapter *adap, unsigned int uld) 2749 static void uld_attach(struct adapter *adap, unsigned int uld)
2750 { 2750 {
2751 void *handle; 2751 void *handle;
2752 struct cxgb4_lld_info lli; 2752 struct cxgb4_lld_info lli;
2753 2753
2754 lli.pdev = adap->pdev; 2754 lli.pdev = adap->pdev;
2755 lli.l2t = adap->l2t; 2755 lli.l2t = adap->l2t;
2756 lli.tids = &adap->tids; 2756 lli.tids = &adap->tids;
2757 lli.ports = adap->port; 2757 lli.ports = adap->port;
2758 lli.vr = &adap->vres; 2758 lli.vr = &adap->vres;
2759 lli.mtus = adap->params.mtus; 2759 lli.mtus = adap->params.mtus;
2760 if (uld == CXGB4_ULD_RDMA) { 2760 if (uld == CXGB4_ULD_RDMA) {
2761 lli.rxq_ids = adap->sge.rdma_rxq; 2761 lli.rxq_ids = adap->sge.rdma_rxq;
2762 lli.nrxq = adap->sge.rdmaqs; 2762 lli.nrxq = adap->sge.rdmaqs;
2763 } else if (uld == CXGB4_ULD_ISCSI) { 2763 } else if (uld == CXGB4_ULD_ISCSI) {
2764 lli.rxq_ids = adap->sge.ofld_rxq; 2764 lli.rxq_ids = adap->sge.ofld_rxq;
2765 lli.nrxq = adap->sge.ofldqsets; 2765 lli.nrxq = adap->sge.ofldqsets;
2766 } 2766 }
2767 lli.ntxq = adap->sge.ofldqsets; 2767 lli.ntxq = adap->sge.ofldqsets;
2768 lli.nchan = adap->params.nports; 2768 lli.nchan = adap->params.nports;
2769 lli.nports = adap->params.nports; 2769 lli.nports = adap->params.nports;
2770 lli.wr_cred = adap->params.ofldq_wr_cred; 2770 lli.wr_cred = adap->params.ofldq_wr_cred;
2771 lli.adapter_type = adap->params.rev; 2771 lli.adapter_type = adap->params.rev;
2772 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2)); 2772 lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
2773 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET( 2773 lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
2774 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >> 2774 t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
2775 (adap->fn * 4)); 2775 (adap->fn * 4));
2776 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET( 2776 lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
2777 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >> 2777 t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
2778 (adap->fn * 4)); 2778 (adap->fn * 4));
2779 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS); 2779 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
2780 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL); 2780 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
2781 lli.fw_vers = adap->params.fw_vers; 2781 lli.fw_vers = adap->params.fw_vers;
2782 lli.dbfifo_int_thresh = dbfifo_int_thresh; 2782 lli.dbfifo_int_thresh = dbfifo_int_thresh;
2783 2783
2784 handle = ulds[uld].add(&lli); 2784 handle = ulds[uld].add(&lli);
2785 if (IS_ERR(handle)) { 2785 if (IS_ERR(handle)) {
2786 dev_warn(adap->pdev_dev, 2786 dev_warn(adap->pdev_dev,
2787 "could not attach to the %s driver, error %ld\n", 2787 "could not attach to the %s driver, error %ld\n",
2788 uld_str[uld], PTR_ERR(handle)); 2788 uld_str[uld], PTR_ERR(handle));
2789 return; 2789 return;
2790 } 2790 }
2791 2791
2792 adap->uld_handle[uld] = handle; 2792 adap->uld_handle[uld] = handle;
2793 2793
2794 if (!netevent_registered) { 2794 if (!netevent_registered) {
2795 register_netevent_notifier(&cxgb4_netevent_nb); 2795 register_netevent_notifier(&cxgb4_netevent_nb);
2796 netevent_registered = true; 2796 netevent_registered = true;
2797 } 2797 }
2798 2798
2799 if (adap->flags & FULL_INIT_DONE) 2799 if (adap->flags & FULL_INIT_DONE)
2800 ulds[uld].state_change(handle, CXGB4_STATE_UP); 2800 ulds[uld].state_change(handle, CXGB4_STATE_UP);
2801 } 2801 }
2802 2802
2803 static void attach_ulds(struct adapter *adap) 2803 static void attach_ulds(struct adapter *adap)
2804 { 2804 {
2805 unsigned int i; 2805 unsigned int i;
2806 2806
2807 mutex_lock(&uld_mutex); 2807 mutex_lock(&uld_mutex);
2808 list_add_tail(&adap->list_node, &adapter_list); 2808 list_add_tail(&adap->list_node, &adapter_list);
2809 for (i = 0; i < CXGB4_ULD_MAX; i++) 2809 for (i = 0; i < CXGB4_ULD_MAX; i++)
2810 if (ulds[i].add) 2810 if (ulds[i].add)
2811 uld_attach(adap, i); 2811 uld_attach(adap, i);
2812 mutex_unlock(&uld_mutex); 2812 mutex_unlock(&uld_mutex);
2813 } 2813 }
2814 2814
2815 static void detach_ulds(struct adapter *adap) 2815 static void detach_ulds(struct adapter *adap)
2816 { 2816 {
2817 unsigned int i; 2817 unsigned int i;
2818 2818
2819 mutex_lock(&uld_mutex); 2819 mutex_lock(&uld_mutex);
2820 list_del(&adap->list_node); 2820 list_del(&adap->list_node);
2821 for (i = 0; i < CXGB4_ULD_MAX; i++) 2821 for (i = 0; i < CXGB4_ULD_MAX; i++)
2822 if (adap->uld_handle[i]) { 2822 if (adap->uld_handle[i]) {
2823 ulds[i].state_change(adap->uld_handle[i], 2823 ulds[i].state_change(adap->uld_handle[i],
2824 CXGB4_STATE_DETACH); 2824 CXGB4_STATE_DETACH);
2825 adap->uld_handle[i] = NULL; 2825 adap->uld_handle[i] = NULL;
2826 } 2826 }
2827 if (netevent_registered && list_empty(&adapter_list)) { 2827 if (netevent_registered && list_empty(&adapter_list)) {
2828 unregister_netevent_notifier(&cxgb4_netevent_nb); 2828 unregister_netevent_notifier(&cxgb4_netevent_nb);
2829 netevent_registered = false; 2829 netevent_registered = false;
2830 } 2830 }
2831 mutex_unlock(&uld_mutex); 2831 mutex_unlock(&uld_mutex);
2832 } 2832 }
2833 2833
2834 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state) 2834 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
2835 { 2835 {
2836 unsigned int i; 2836 unsigned int i;
2837 2837
2838 mutex_lock(&uld_mutex); 2838 mutex_lock(&uld_mutex);
2839 for (i = 0; i < CXGB4_ULD_MAX; i++) 2839 for (i = 0; i < CXGB4_ULD_MAX; i++)
2840 if (adap->uld_handle[i]) 2840 if (adap->uld_handle[i])
2841 ulds[i].state_change(adap->uld_handle[i], new_state); 2841 ulds[i].state_change(adap->uld_handle[i], new_state);
2842 mutex_unlock(&uld_mutex); 2842 mutex_unlock(&uld_mutex);
2843 } 2843 }
2844 2844
2845 /** 2845 /**
2846 * cxgb4_register_uld - register an upper-layer driver 2846 * cxgb4_register_uld - register an upper-layer driver
2847 * @type: the ULD type 2847 * @type: the ULD type
2848 * @p: the ULD methods 2848 * @p: the ULD methods
2849 * 2849 *
2850 * Registers an upper-layer driver with this driver and notifies the ULD 2850 * Registers an upper-layer driver with this driver and notifies the ULD
2851 * about any presently available devices that support its type. Returns 2851 * about any presently available devices that support its type. Returns
2852 * %-EBUSY if a ULD of the same type is already registered. 2852 * %-EBUSY if a ULD of the same type is already registered.
2853 */ 2853 */
2854 int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p) 2854 int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
2855 { 2855 {
2856 int ret = 0; 2856 int ret = 0;
2857 struct adapter *adap; 2857 struct adapter *adap;
2858 2858
2859 if (type >= CXGB4_ULD_MAX) 2859 if (type >= CXGB4_ULD_MAX)
2860 return -EINVAL; 2860 return -EINVAL;
2861 mutex_lock(&uld_mutex); 2861 mutex_lock(&uld_mutex);
2862 if (ulds[type].add) { 2862 if (ulds[type].add) {
2863 ret = -EBUSY; 2863 ret = -EBUSY;
2864 goto out; 2864 goto out;
2865 } 2865 }
2866 ulds[type] = *p; 2866 ulds[type] = *p;
2867 list_for_each_entry(adap, &adapter_list, list_node) 2867 list_for_each_entry(adap, &adapter_list, list_node)
2868 uld_attach(adap, type); 2868 uld_attach(adap, type);
2869 out: mutex_unlock(&uld_mutex); 2869 out: mutex_unlock(&uld_mutex);
2870 return ret; 2870 return ret;
2871 } 2871 }
2872 EXPORT_SYMBOL(cxgb4_register_uld); 2872 EXPORT_SYMBOL(cxgb4_register_uld);
2873 2873
2874 /** 2874 /**
2875 * cxgb4_unregister_uld - unregister an upper-layer driver 2875 * cxgb4_unregister_uld - unregister an upper-layer driver
2876 * @type: the ULD type 2876 * @type: the ULD type
2877 * 2877 *
2878 * Unregisters an existing upper-layer driver. 2878 * Unregisters an existing upper-layer driver.
2879 */ 2879 */
2880 int cxgb4_unregister_uld(enum cxgb4_uld type) 2880 int cxgb4_unregister_uld(enum cxgb4_uld type)
2881 { 2881 {
2882 struct adapter *adap; 2882 struct adapter *adap;
2883 2883
2884 if (type >= CXGB4_ULD_MAX) 2884 if (type >= CXGB4_ULD_MAX)
2885 return -EINVAL; 2885 return -EINVAL;
2886 mutex_lock(&uld_mutex); 2886 mutex_lock(&uld_mutex);
2887 list_for_each_entry(adap, &adapter_list, list_node) 2887 list_for_each_entry(adap, &adapter_list, list_node)
2888 adap->uld_handle[type] = NULL; 2888 adap->uld_handle[type] = NULL;
2889 ulds[type].add = NULL; 2889 ulds[type].add = NULL;
2890 mutex_unlock(&uld_mutex); 2890 mutex_unlock(&uld_mutex);
2891 return 0; 2891 return 0;
2892 } 2892 }
2893 EXPORT_SYMBOL(cxgb4_unregister_uld); 2893 EXPORT_SYMBOL(cxgb4_unregister_uld);
2894 2894
2895 /** 2895 /**
2896 * cxgb_up - enable the adapter 2896 * cxgb_up - enable the adapter
2897 * @adap: adapter being enabled 2897 * @adap: adapter being enabled
2898 * 2898 *
2899 * Called when the first port is enabled, this function performs the 2899 * Called when the first port is enabled, this function performs the
2900 * actions necessary to make an adapter operational, such as completing 2900 * actions necessary to make an adapter operational, such as completing
2901 * the initialization of HW modules, and enabling interrupts. 2901 * the initialization of HW modules, and enabling interrupts.
2902 * 2902 *
2903 * Must be called with the rtnl lock held. 2903 * Must be called with the rtnl lock held.
2904 */ 2904 */
2905 static int cxgb_up(struct adapter *adap) 2905 static int cxgb_up(struct adapter *adap)
2906 { 2906 {
2907 int err; 2907 int err;
2908 2908
2909 err = setup_sge_queues(adap); 2909 err = setup_sge_queues(adap);
2910 if (err) 2910 if (err)
2911 goto out; 2911 goto out;
2912 err = setup_rss(adap); 2912 err = setup_rss(adap);
2913 if (err) 2913 if (err)
2914 goto freeq; 2914 goto freeq;
2915 2915
2916 if (adap->flags & USING_MSIX) { 2916 if (adap->flags & USING_MSIX) {
2917 name_msix_vecs(adap); 2917 name_msix_vecs(adap);
2918 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0, 2918 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
2919 adap->msix_info[0].desc, adap); 2919 adap->msix_info[0].desc, adap);
2920 if (err) 2920 if (err)
2921 goto irq_err; 2921 goto irq_err;
2922 2922
2923 err = request_msix_queue_irqs(adap); 2923 err = request_msix_queue_irqs(adap);
2924 if (err) { 2924 if (err) {
2925 free_irq(adap->msix_info[0].vec, adap); 2925 free_irq(adap->msix_info[0].vec, adap);
2926 goto irq_err; 2926 goto irq_err;
2927 } 2927 }
2928 } else { 2928 } else {
2929 err = request_irq(adap->pdev->irq, t4_intr_handler(adap), 2929 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
2930 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED, 2930 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
2931 adap->port[0]->name, adap); 2931 adap->port[0]->name, adap);
2932 if (err) 2932 if (err)
2933 goto irq_err; 2933 goto irq_err;
2934 } 2934 }
2935 enable_rx(adap); 2935 enable_rx(adap);
2936 t4_sge_start(adap); 2936 t4_sge_start(adap);
2937 t4_intr_enable(adap); 2937 t4_intr_enable(adap);
2938 adap->flags |= FULL_INIT_DONE; 2938 adap->flags |= FULL_INIT_DONE;
2939 notify_ulds(adap, CXGB4_STATE_UP); 2939 notify_ulds(adap, CXGB4_STATE_UP);
2940 out: 2940 out:
2941 return err; 2941 return err;
2942 irq_err: 2942 irq_err:
2943 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err); 2943 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
2944 freeq: 2944 freeq:
2945 t4_free_sge_resources(adap); 2945 t4_free_sge_resources(adap);
2946 goto out; 2946 goto out;
2947 } 2947 }
2948 2948
2949 static void cxgb_down(struct adapter *adapter) 2949 static void cxgb_down(struct adapter *adapter)
2950 { 2950 {
2951 t4_intr_disable(adapter); 2951 t4_intr_disable(adapter);
2952 cancel_work_sync(&adapter->tid_release_task); 2952 cancel_work_sync(&adapter->tid_release_task);
2953 cancel_work_sync(&adapter->db_full_task); 2953 cancel_work_sync(&adapter->db_full_task);
2954 cancel_work_sync(&adapter->db_drop_task); 2954 cancel_work_sync(&adapter->db_drop_task);
2955 adapter->tid_release_task_busy = false; 2955 adapter->tid_release_task_busy = false;
2956 adapter->tid_release_head = NULL; 2956 adapter->tid_release_head = NULL;
2957 2957
2958 if (adapter->flags & USING_MSIX) { 2958 if (adapter->flags & USING_MSIX) {
2959 free_msix_queue_irqs(adapter); 2959 free_msix_queue_irqs(adapter);
2960 free_irq(adapter->msix_info[0].vec, adapter); 2960 free_irq(adapter->msix_info[0].vec, adapter);
2961 } else 2961 } else
2962 free_irq(adapter->pdev->irq, adapter); 2962 free_irq(adapter->pdev->irq, adapter);
2963 quiesce_rx(adapter); 2963 quiesce_rx(adapter);
2964 t4_sge_stop(adapter); 2964 t4_sge_stop(adapter);
2965 t4_free_sge_resources(adapter); 2965 t4_free_sge_resources(adapter);
2966 adapter->flags &= ~FULL_INIT_DONE; 2966 adapter->flags &= ~FULL_INIT_DONE;
2967 } 2967 }
2968 2968
2969 /* 2969 /*
2970 * net_device operations 2970 * net_device operations
2971 */ 2971 */
2972 static int cxgb_open(struct net_device *dev) 2972 static int cxgb_open(struct net_device *dev)
2973 { 2973 {
2974 int err; 2974 int err;
2975 struct port_info *pi = netdev_priv(dev); 2975 struct port_info *pi = netdev_priv(dev);
2976 struct adapter *adapter = pi->adapter; 2976 struct adapter *adapter = pi->adapter;
2977 2977
2978 netif_carrier_off(dev); 2978 netif_carrier_off(dev);
2979 2979
2980 if (!(adapter->flags & FULL_INIT_DONE)) { 2980 if (!(adapter->flags & FULL_INIT_DONE)) {
2981 err = cxgb_up(adapter); 2981 err = cxgb_up(adapter);
2982 if (err < 0) 2982 if (err < 0)
2983 return err; 2983 return err;
2984 } 2984 }
2985 2985
2986 err = link_start(dev); 2986 err = link_start(dev);
2987 if (!err) 2987 if (!err)
2988 netif_tx_start_all_queues(dev); 2988 netif_tx_start_all_queues(dev);
2989 return err; 2989 return err;
2990 } 2990 }
2991 2991
2992 static int cxgb_close(struct net_device *dev) 2992 static int cxgb_close(struct net_device *dev)
2993 { 2993 {
2994 struct port_info *pi = netdev_priv(dev); 2994 struct port_info *pi = netdev_priv(dev);
2995 struct adapter *adapter = pi->adapter; 2995 struct adapter *adapter = pi->adapter;
2996 2996
2997 netif_tx_stop_all_queues(dev); 2997 netif_tx_stop_all_queues(dev);
2998 netif_carrier_off(dev); 2998 netif_carrier_off(dev);
2999 return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false); 2999 return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
3000 } 3000 }
3001 3001
3002 static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev, 3002 static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
3003 struct rtnl_link_stats64 *ns) 3003 struct rtnl_link_stats64 *ns)
3004 { 3004 {
3005 struct port_stats stats; 3005 struct port_stats stats;
3006 struct port_info *p = netdev_priv(dev); 3006 struct port_info *p = netdev_priv(dev);
3007 struct adapter *adapter = p->adapter; 3007 struct adapter *adapter = p->adapter;
3008 3008
3009 spin_lock(&adapter->stats_lock); 3009 spin_lock(&adapter->stats_lock);
3010 t4_get_port_stats(adapter, p->tx_chan, &stats); 3010 t4_get_port_stats(adapter, p->tx_chan, &stats);
3011 spin_unlock(&adapter->stats_lock); 3011 spin_unlock(&adapter->stats_lock);
3012 3012
3013 ns->tx_bytes = stats.tx_octets; 3013 ns->tx_bytes = stats.tx_octets;
3014 ns->tx_packets = stats.tx_frames; 3014 ns->tx_packets = stats.tx_frames;
3015 ns->rx_bytes = stats.rx_octets; 3015 ns->rx_bytes = stats.rx_octets;
3016 ns->rx_packets = stats.rx_frames; 3016 ns->rx_packets = stats.rx_frames;
3017 ns->multicast = stats.rx_mcast_frames; 3017 ns->multicast = stats.rx_mcast_frames;
3018 3018
3019 /* detailed rx_errors */ 3019 /* detailed rx_errors */
3020 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long + 3020 ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
3021 stats.rx_runt; 3021 stats.rx_runt;
3022 ns->rx_over_errors = 0; 3022 ns->rx_over_errors = 0;
3023 ns->rx_crc_errors = stats.rx_fcs_err; 3023 ns->rx_crc_errors = stats.rx_fcs_err;
3024 ns->rx_frame_errors = stats.rx_symbol_err; 3024 ns->rx_frame_errors = stats.rx_symbol_err;
3025 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 + 3025 ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
3026 stats.rx_ovflow2 + stats.rx_ovflow3 + 3026 stats.rx_ovflow2 + stats.rx_ovflow3 +
3027 stats.rx_trunc0 + stats.rx_trunc1 + 3027 stats.rx_trunc0 + stats.rx_trunc1 +
3028 stats.rx_trunc2 + stats.rx_trunc3; 3028 stats.rx_trunc2 + stats.rx_trunc3;
3029 ns->rx_missed_errors = 0; 3029 ns->rx_missed_errors = 0;
3030 3030
3031 /* detailed tx_errors */ 3031 /* detailed tx_errors */
3032 ns->tx_aborted_errors = 0; 3032 ns->tx_aborted_errors = 0;
3033 ns->tx_carrier_errors = 0; 3033 ns->tx_carrier_errors = 0;
3034 ns->tx_fifo_errors = 0; 3034 ns->tx_fifo_errors = 0;
3035 ns->tx_heartbeat_errors = 0; 3035 ns->tx_heartbeat_errors = 0;
3036 ns->tx_window_errors = 0; 3036 ns->tx_window_errors = 0;
3037 3037
3038 ns->tx_errors = stats.tx_error_frames; 3038 ns->tx_errors = stats.tx_error_frames;
3039 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err + 3039 ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
3040 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors; 3040 ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
3041 return ns; 3041 return ns;
3042 } 3042 }
3043 3043
3044 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd) 3044 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
3045 { 3045 {
3046 unsigned int mbox; 3046 unsigned int mbox;
3047 int ret = 0, prtad, devad; 3047 int ret = 0, prtad, devad;
3048 struct port_info *pi = netdev_priv(dev); 3048 struct port_info *pi = netdev_priv(dev);
3049 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data; 3049 struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
3050 3050
3051 switch (cmd) { 3051 switch (cmd) {
3052 case SIOCGMIIPHY: 3052 case SIOCGMIIPHY:
3053 if (pi->mdio_addr < 0) 3053 if (pi->mdio_addr < 0)
3054 return -EOPNOTSUPP; 3054 return -EOPNOTSUPP;
3055 data->phy_id = pi->mdio_addr; 3055 data->phy_id = pi->mdio_addr;
3056 break; 3056 break;
3057 case SIOCGMIIREG: 3057 case SIOCGMIIREG:
3058 case SIOCSMIIREG: 3058 case SIOCSMIIREG:
3059 if (mdio_phy_id_is_c45(data->phy_id)) { 3059 if (mdio_phy_id_is_c45(data->phy_id)) {
3060 prtad = mdio_phy_id_prtad(data->phy_id); 3060 prtad = mdio_phy_id_prtad(data->phy_id);
3061 devad = mdio_phy_id_devad(data->phy_id); 3061 devad = mdio_phy_id_devad(data->phy_id);
3062 } else if (data->phy_id < 32) { 3062 } else if (data->phy_id < 32) {
3063 prtad = data->phy_id; 3063 prtad = data->phy_id;
3064 devad = 0; 3064 devad = 0;
3065 data->reg_num &= 0x1f; 3065 data->reg_num &= 0x1f;
3066 } else 3066 } else
3067 return -EINVAL; 3067 return -EINVAL;
3068 3068
3069 mbox = pi->adapter->fn; 3069 mbox = pi->adapter->fn;
3070 if (cmd == SIOCGMIIREG) 3070 if (cmd == SIOCGMIIREG)
3071 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad, 3071 ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
3072 data->reg_num, &data->val_out); 3072 data->reg_num, &data->val_out);
3073 else 3073 else
3074 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad, 3074 ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
3075 data->reg_num, data->val_in); 3075 data->reg_num, data->val_in);
3076 break; 3076 break;
3077 default: 3077 default:
3078 return -EOPNOTSUPP; 3078 return -EOPNOTSUPP;
3079 } 3079 }
3080 return ret; 3080 return ret;
3081 } 3081 }
3082 3082
3083 static void cxgb_set_rxmode(struct net_device *dev) 3083 static void cxgb_set_rxmode(struct net_device *dev)
3084 { 3084 {
3085 /* unfortunately we can't return errors to the stack */ 3085 /* unfortunately we can't return errors to the stack */
3086 set_rxmode(dev, -1, false); 3086 set_rxmode(dev, -1, false);
3087 } 3087 }
3088 3088
3089 static int cxgb_change_mtu(struct net_device *dev, int new_mtu) 3089 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
3090 { 3090 {
3091 int ret; 3091 int ret;
3092 struct port_info *pi = netdev_priv(dev); 3092 struct port_info *pi = netdev_priv(dev);
3093 3093
3094 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */ 3094 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
3095 return -EINVAL; 3095 return -EINVAL;
3096 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1, 3096 ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
3097 -1, -1, -1, true); 3097 -1, -1, -1, true);
3098 if (!ret) 3098 if (!ret)
3099 dev->mtu = new_mtu; 3099 dev->mtu = new_mtu;
3100 return ret; 3100 return ret;
3101 } 3101 }
3102 3102
3103 static int cxgb_set_mac_addr(struct net_device *dev, void *p) 3103 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
3104 { 3104 {
3105 int ret; 3105 int ret;
3106 struct sockaddr *addr = p; 3106 struct sockaddr *addr = p;
3107 struct port_info *pi = netdev_priv(dev); 3107 struct port_info *pi = netdev_priv(dev);
3108 3108
3109 if (!is_valid_ether_addr(addr->sa_data)) 3109 if (!is_valid_ether_addr(addr->sa_data))
3110 return -EADDRNOTAVAIL; 3110 return -EADDRNOTAVAIL;
3111 3111
3112 ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid, 3112 ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
3113 pi->xact_addr_filt, addr->sa_data, true, true); 3113 pi->xact_addr_filt, addr->sa_data, true, true);
3114 if (ret < 0) 3114 if (ret < 0)
3115 return ret; 3115 return ret;
3116 3116
3117 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 3117 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3118 pi->xact_addr_filt = ret; 3118 pi->xact_addr_filt = ret;
3119 return 0; 3119 return 0;
3120 } 3120 }
3121 3121
3122 #ifdef CONFIG_NET_POLL_CONTROLLER 3122 #ifdef CONFIG_NET_POLL_CONTROLLER
3123 static void cxgb_netpoll(struct net_device *dev) 3123 static void cxgb_netpoll(struct net_device *dev)
3124 { 3124 {
3125 struct port_info *pi = netdev_priv(dev); 3125 struct port_info *pi = netdev_priv(dev);
3126 struct adapter *adap = pi->adapter; 3126 struct adapter *adap = pi->adapter;
3127 3127
3128 if (adap->flags & USING_MSIX) { 3128 if (adap->flags & USING_MSIX) {
3129 int i; 3129 int i;
3130 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset]; 3130 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
3131 3131
3132 for (i = pi->nqsets; i; i--, rx++) 3132 for (i = pi->nqsets; i; i--, rx++)
3133 t4_sge_intr_msix(0, &rx->rspq); 3133 t4_sge_intr_msix(0, &rx->rspq);
3134 } else 3134 } else
3135 t4_intr_handler(adap)(0, adap); 3135 t4_intr_handler(adap)(0, adap);
3136 } 3136 }
3137 #endif 3137 #endif
3138 3138
3139 static const struct net_device_ops cxgb4_netdev_ops = { 3139 static const struct net_device_ops cxgb4_netdev_ops = {
3140 .ndo_open = cxgb_open, 3140 .ndo_open = cxgb_open,
3141 .ndo_stop = cxgb_close, 3141 .ndo_stop = cxgb_close,
3142 .ndo_start_xmit = t4_eth_xmit, 3142 .ndo_start_xmit = t4_eth_xmit,
3143 .ndo_get_stats64 = cxgb_get_stats, 3143 .ndo_get_stats64 = cxgb_get_stats,
3144 .ndo_set_rx_mode = cxgb_set_rxmode, 3144 .ndo_set_rx_mode = cxgb_set_rxmode,
3145 .ndo_set_mac_address = cxgb_set_mac_addr, 3145 .ndo_set_mac_address = cxgb_set_mac_addr,
3146 .ndo_set_features = cxgb_set_features, 3146 .ndo_set_features = cxgb_set_features,
3147 .ndo_validate_addr = eth_validate_addr, 3147 .ndo_validate_addr = eth_validate_addr,
3148 .ndo_do_ioctl = cxgb_ioctl, 3148 .ndo_do_ioctl = cxgb_ioctl,
3149 .ndo_change_mtu = cxgb_change_mtu, 3149 .ndo_change_mtu = cxgb_change_mtu,
3150 #ifdef CONFIG_NET_POLL_CONTROLLER 3150 #ifdef CONFIG_NET_POLL_CONTROLLER
3151 .ndo_poll_controller = cxgb_netpoll, 3151 .ndo_poll_controller = cxgb_netpoll,
3152 #endif 3152 #endif
3153 }; 3153 };
3154 3154
3155 void t4_fatal_err(struct adapter *adap) 3155 void t4_fatal_err(struct adapter *adap)
3156 { 3156 {
3157 t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0); 3157 t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
3158 t4_intr_disable(adap); 3158 t4_intr_disable(adap);
3159 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n"); 3159 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
3160 } 3160 }
3161 3161
3162 static void setup_memwin(struct adapter *adap) 3162 static void setup_memwin(struct adapter *adap)
3163 { 3163 {
3164 u32 bar0; 3164 u32 bar0;
3165 3165
3166 bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */ 3166 bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */
3167 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0), 3167 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
3168 (bar0 + MEMWIN0_BASE) | BIR(0) | 3168 (bar0 + MEMWIN0_BASE) | BIR(0) |
3169 WINDOW(ilog2(MEMWIN0_APERTURE) - 10)); 3169 WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
3170 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1), 3170 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
3171 (bar0 + MEMWIN1_BASE) | BIR(0) | 3171 (bar0 + MEMWIN1_BASE) | BIR(0) |
3172 WINDOW(ilog2(MEMWIN1_APERTURE) - 10)); 3172 WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
3173 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2), 3173 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
3174 (bar0 + MEMWIN2_BASE) | BIR(0) | 3174 (bar0 + MEMWIN2_BASE) | BIR(0) |
3175 WINDOW(ilog2(MEMWIN2_APERTURE) - 10)); 3175 WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
3176 } 3176 }
3177 3177
3178 static void setup_memwin_rdma(struct adapter *adap) 3178 static void setup_memwin_rdma(struct adapter *adap)
3179 { 3179 {
3180 if (adap->vres.ocq.size) { 3180 if (adap->vres.ocq.size) {
3181 unsigned int start, sz_kb; 3181 unsigned int start, sz_kb;
3182 3182
3183 start = pci_resource_start(adap->pdev, 2) + 3183 start = pci_resource_start(adap->pdev, 2) +
3184 OCQ_WIN_OFFSET(adap->pdev, &adap->vres); 3184 OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
3185 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10; 3185 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
3186 t4_write_reg(adap, 3186 t4_write_reg(adap,
3187 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3), 3187 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
3188 start | BIR(1) | WINDOW(ilog2(sz_kb))); 3188 start | BIR(1) | WINDOW(ilog2(sz_kb)));
3189 t4_write_reg(adap, 3189 t4_write_reg(adap,
3190 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3), 3190 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
3191 adap->vres.ocq.start); 3191 adap->vres.ocq.start);
3192 t4_read_reg(adap, 3192 t4_read_reg(adap,
3193 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3)); 3193 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
3194 } 3194 }
3195 } 3195 }
3196 3196
3197 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c) 3197 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
3198 { 3198 {
3199 u32 v; 3199 u32 v;
3200 int ret; 3200 int ret;
3201 3201
3202 /* get device capabilities */ 3202 /* get device capabilities */
3203 memset(c, 0, sizeof(*c)); 3203 memset(c, 0, sizeof(*c));
3204 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 3204 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3205 FW_CMD_REQUEST | FW_CMD_READ); 3205 FW_CMD_REQUEST | FW_CMD_READ);
3206 c->retval_len16 = htonl(FW_LEN16(*c)); 3206 c->retval_len16 = htonl(FW_LEN16(*c));
3207 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c); 3207 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
3208 if (ret < 0) 3208 if (ret < 0)
3209 return ret; 3209 return ret;
3210 3210
3211 /* select capabilities we'll be using */ 3211 /* select capabilities we'll be using */
3212 if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) { 3212 if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
3213 if (!vf_acls) 3213 if (!vf_acls)
3214 c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM); 3214 c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
3215 else 3215 else
3216 c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM); 3216 c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
3217 } else if (vf_acls) { 3217 } else if (vf_acls) {
3218 dev_err(adap->pdev_dev, "virtualization ACLs not supported"); 3218 dev_err(adap->pdev_dev, "virtualization ACLs not supported");
3219 return ret; 3219 return ret;
3220 } 3220 }
3221 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 3221 c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3222 FW_CMD_REQUEST | FW_CMD_WRITE); 3222 FW_CMD_REQUEST | FW_CMD_WRITE);
3223 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL); 3223 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
3224 if (ret < 0) 3224 if (ret < 0)
3225 return ret; 3225 return ret;
3226 3226
3227 ret = t4_config_glbl_rss(adap, adap->fn, 3227 ret = t4_config_glbl_rss(adap, adap->fn,
3228 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL, 3228 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
3229 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN | 3229 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
3230 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP); 3230 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
3231 if (ret < 0) 3231 if (ret < 0)
3232 return ret; 3232 return ret;
3233 3233
3234 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ, 3234 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
3235 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF); 3235 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
3236 if (ret < 0) 3236 if (ret < 0)
3237 return ret; 3237 return ret;
3238 3238
3239 t4_sge_init(adap); 3239 t4_sge_init(adap);
3240 3240
3241 /* tweak some settings */ 3241 /* tweak some settings */
3242 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849); 3242 t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
3243 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12)); 3243 t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
3244 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG); 3244 t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
3245 v = t4_read_reg(adap, TP_PIO_DATA); 3245 v = t4_read_reg(adap, TP_PIO_DATA);
3246 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR); 3246 t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
3247 3247
3248 /* get basic stuff going */ 3248 /* get basic stuff going */
3249 return t4_early_init(adap, adap->fn); 3249 return t4_early_init(adap, adap->fn);
3250 } 3250 }
3251 3251
3252 /* 3252 /*
3253 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower. 3253 * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
3254 */ 3254 */
3255 #define MAX_ATIDS 8192U 3255 #define MAX_ATIDS 8192U
3256 3256
3257 /* 3257 /*
3258 * Phase 0 of initialization: contact FW, obtain config, perform basic init. 3258 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
3259 * 3259 *
3260 * If the firmware we're dealing with has Configuration File support, then 3260 * If the firmware we're dealing with has Configuration File support, then
3261 * we use that to perform all configuration 3261 * we use that to perform all configuration
3262 */ 3262 */
3263 3263
3264 /* 3264 /*
3265 * Tweak configuration based on module parameters, etc. Most of these have 3265 * Tweak configuration based on module parameters, etc. Most of these have
3266 * defaults assigned to them by Firmware Configuration Files (if we're using 3266 * defaults assigned to them by Firmware Configuration Files (if we're using
3267 * them) but need to be explicitly set if we're using hard-coded 3267 * them) but need to be explicitly set if we're using hard-coded
3268 * initialization. But even in the case of using Firmware Configuration 3268 * initialization. But even in the case of using Firmware Configuration
3269 * Files, we'd like to expose the ability to change these via module 3269 * Files, we'd like to expose the ability to change these via module
3270 * parameters so these are essentially common tweaks/settings for 3270 * parameters so these are essentially common tweaks/settings for
3271 * Configuration Files and hard-coded initialization ... 3271 * Configuration Files and hard-coded initialization ...
3272 */ 3272 */
3273 static int adap_init0_tweaks(struct adapter *adapter) 3273 static int adap_init0_tweaks(struct adapter *adapter)
3274 { 3274 {
3275 /* 3275 /*
3276 * Fix up various Host-Dependent Parameters like Page Size, Cache 3276 * Fix up various Host-Dependent Parameters like Page Size, Cache
3277 * Line Size, etc. The firmware default is for a 4KB Page Size and 3277 * Line Size, etc. The firmware default is for a 4KB Page Size and
3278 * 64B Cache Line Size ... 3278 * 64B Cache Line Size ...
3279 */ 3279 */
3280 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES); 3280 t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
3281 3281
3282 /* 3282 /*
3283 * Process module parameters which affect early initialization. 3283 * Process module parameters which affect early initialization.
3284 */ 3284 */
3285 if (rx_dma_offset != 2 && rx_dma_offset != 0) { 3285 if (rx_dma_offset != 2 && rx_dma_offset != 0) {
3286 dev_err(&adapter->pdev->dev, 3286 dev_err(&adapter->pdev->dev,
3287 "Ignoring illegal rx_dma_offset=%d, using 2\n", 3287 "Ignoring illegal rx_dma_offset=%d, using 2\n",
3288 rx_dma_offset); 3288 rx_dma_offset);
3289 rx_dma_offset = 2; 3289 rx_dma_offset = 2;
3290 } 3290 }
3291 t4_set_reg_field(adapter, SGE_CONTROL, 3291 t4_set_reg_field(adapter, SGE_CONTROL,
3292 PKTSHIFT_MASK, 3292 PKTSHIFT_MASK,
3293 PKTSHIFT(rx_dma_offset)); 3293 PKTSHIFT(rx_dma_offset));
3294 3294
3295 /* 3295 /*
3296 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux 3296 * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
3297 * adds the pseudo header itself. 3297 * adds the pseudo header itself.
3298 */ 3298 */
3299 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG, 3299 t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG,
3300 CSUM_HAS_PSEUDO_HDR, 0); 3300 CSUM_HAS_PSEUDO_HDR, 0);
3301 3301
3302 return 0; 3302 return 0;
3303 } 3303 }
3304 3304
3305 /* 3305 /*
3306 * Attempt to initialize the adapter via a Firmware Configuration File. 3306 * Attempt to initialize the adapter via a Firmware Configuration File.
3307 */ 3307 */
3308 static int adap_init0_config(struct adapter *adapter, int reset) 3308 static int adap_init0_config(struct adapter *adapter, int reset)
3309 { 3309 {
3310 struct fw_caps_config_cmd caps_cmd; 3310 struct fw_caps_config_cmd caps_cmd;
3311 const struct firmware *cf; 3311 const struct firmware *cf;
3312 unsigned long mtype = 0, maddr = 0; 3312 unsigned long mtype = 0, maddr = 0;
3313 u32 finiver, finicsum, cfcsum; 3313 u32 finiver, finicsum, cfcsum;
3314 int ret, using_flash; 3314 int ret, using_flash;
3315 3315
3316 /* 3316 /*
3317 * Reset device if necessary. 3317 * Reset device if necessary.
3318 */ 3318 */
3319 if (reset) { 3319 if (reset) {
3320 ret = t4_fw_reset(adapter, adapter->mbox, 3320 ret = t4_fw_reset(adapter, adapter->mbox,
3321 PIORSTMODE | PIORST); 3321 PIORSTMODE | PIORST);
3322 if (ret < 0) 3322 if (ret < 0)
3323 goto bye; 3323 goto bye;
3324 } 3324 }
3325 3325
3326 /* 3326 /*
3327 * If we have a T4 configuration file under /lib/firmware/cxgb4/, 3327 * If we have a T4 configuration file under /lib/firmware/cxgb4/,
3328 * then use that. Otherwise, use the configuration file stored 3328 * then use that. Otherwise, use the configuration file stored
3329 * in the adapter flash ... 3329 * in the adapter flash ...
3330 */ 3330 */
3331 ret = request_firmware(&cf, FW_CFNAME, adapter->pdev_dev); 3331 ret = request_firmware(&cf, FW_CFNAME, adapter->pdev_dev);
3332 if (ret < 0) { 3332 if (ret < 0) {
3333 using_flash = 1; 3333 using_flash = 1;
3334 mtype = FW_MEMTYPE_CF_FLASH; 3334 mtype = FW_MEMTYPE_CF_FLASH;
3335 maddr = t4_flash_cfg_addr(adapter); 3335 maddr = t4_flash_cfg_addr(adapter);
3336 } else { 3336 } else {
3337 u32 params[7], val[7]; 3337 u32 params[7], val[7];
3338 3338
3339 using_flash = 0; 3339 using_flash = 0;
3340 if (cf->size >= FLASH_CFG_MAX_SIZE) 3340 if (cf->size >= FLASH_CFG_MAX_SIZE)
3341 ret = -ENOMEM; 3341 ret = -ENOMEM;
3342 else { 3342 else {
3343 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 3343 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3344 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF)); 3344 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
3345 ret = t4_query_params(adapter, adapter->mbox, 3345 ret = t4_query_params(adapter, adapter->mbox,
3346 adapter->fn, 0, 1, params, val); 3346 adapter->fn, 0, 1, params, val);
3347 if (ret == 0) { 3347 if (ret == 0) {
3348 /* 3348 /*
3349 * For t4_memory_write() below addresses and 3349 * For t4_memory_write() below addresses and
3350 * sizes have to be in terms of multiples of 4 3350 * sizes have to be in terms of multiples of 4
3351 * bytes. So, if the Configuration File isn't 3351 * bytes. So, if the Configuration File isn't
3352 * a multiple of 4 bytes in length we'll have 3352 * a multiple of 4 bytes in length we'll have
3353 * to write that out separately since we can't 3353 * to write that out separately since we can't
3354 * guarantee that the bytes following the 3354 * guarantee that the bytes following the
3355 * residual byte in the buffer returned by 3355 * residual byte in the buffer returned by
3356 * request_firmware() are zeroed out ... 3356 * request_firmware() are zeroed out ...
3357 */ 3357 */
3358 size_t resid = cf->size & 0x3; 3358 size_t resid = cf->size & 0x3;
3359 size_t size = cf->size & ~0x3; 3359 size_t size = cf->size & ~0x3;
3360 __be32 *data = (__be32 *)cf->data; 3360 __be32 *data = (__be32 *)cf->data;
3361 3361
3362 mtype = FW_PARAMS_PARAM_Y_GET(val[0]); 3362 mtype = FW_PARAMS_PARAM_Y_GET(val[0]);
3363 maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16; 3363 maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16;
3364 3364
3365 ret = t4_memory_write(adapter, mtype, maddr, 3365 ret = t4_memory_write(adapter, mtype, maddr,
3366 size, data); 3366 size, data);
3367 if (ret == 0 && resid != 0) { 3367 if (ret == 0 && resid != 0) {
3368 union { 3368 union {
3369 __be32 word; 3369 __be32 word;
3370 char buf[4]; 3370 char buf[4];
3371 } last; 3371 } last;
3372 int i; 3372 int i;
3373 3373
3374 last.word = data[size >> 2]; 3374 last.word = data[size >> 2];
3375 for (i = resid; i < 4; i++) 3375 for (i = resid; i < 4; i++)
3376 last.buf[i] = 0; 3376 last.buf[i] = 0;
3377 ret = t4_memory_write(adapter, mtype, 3377 ret = t4_memory_write(adapter, mtype,
3378 maddr + size, 3378 maddr + size,
3379 4, &last.word); 3379 4, &last.word);
3380 } 3380 }
3381 } 3381 }
3382 } 3382 }
3383 3383
3384 release_firmware(cf); 3384 release_firmware(cf);
3385 if (ret) 3385 if (ret)
3386 goto bye; 3386 goto bye;
3387 } 3387 }
3388 3388
3389 /* 3389 /*
3390 * Issue a Capability Configuration command to the firmware to get it 3390 * Issue a Capability Configuration command to the firmware to get it
3391 * to parse the Configuration File. We don't use t4_fw_config_file() 3391 * to parse the Configuration File. We don't use t4_fw_config_file()
3392 * because we want the ability to modify various features after we've 3392 * because we want the ability to modify various features after we've
3393 * processed the configuration file ... 3393 * processed the configuration file ...
3394 */ 3394 */
3395 memset(&caps_cmd, 0, sizeof(caps_cmd)); 3395 memset(&caps_cmd, 0, sizeof(caps_cmd));
3396 caps_cmd.op_to_write = 3396 caps_cmd.op_to_write =
3397 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 3397 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3398 FW_CMD_REQUEST | 3398 FW_CMD_REQUEST |
3399 FW_CMD_READ); 3399 FW_CMD_READ);
3400 caps_cmd.retval_len16 = 3400 caps_cmd.retval_len16 =
3401 htonl(FW_CAPS_CONFIG_CMD_CFVALID | 3401 htonl(FW_CAPS_CONFIG_CMD_CFVALID |
3402 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | 3402 FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
3403 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) | 3403 FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
3404 FW_LEN16(caps_cmd)); 3404 FW_LEN16(caps_cmd));
3405 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), 3405 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3406 &caps_cmd); 3406 &caps_cmd);
3407 if (ret < 0) 3407 if (ret < 0)
3408 goto bye; 3408 goto bye;
3409 3409
3410 finiver = ntohl(caps_cmd.finiver); 3410 finiver = ntohl(caps_cmd.finiver);
3411 finicsum = ntohl(caps_cmd.finicsum); 3411 finicsum = ntohl(caps_cmd.finicsum);
3412 cfcsum = ntohl(caps_cmd.cfcsum); 3412 cfcsum = ntohl(caps_cmd.cfcsum);
3413 if (finicsum != cfcsum) 3413 if (finicsum != cfcsum)
3414 dev_warn(adapter->pdev_dev, "Configuration File checksum "\ 3414 dev_warn(adapter->pdev_dev, "Configuration File checksum "\
3415 "mismatch: [fini] csum=%#x, computed csum=%#x\n", 3415 "mismatch: [fini] csum=%#x, computed csum=%#x\n",
3416 finicsum, cfcsum); 3416 finicsum, cfcsum);
3417 3417
3418 /* 3418 /*
3419 * If we're a pure NIC driver then disable all offloading facilities.
3420 * This will allow the firmware to optimize aspects of the hardware
3421 * configuration which will result in improved performance.
3422 */
3423 caps_cmd.ofldcaps = 0;
3424 caps_cmd.iscsicaps = 0;
3425 caps_cmd.rdmacaps = 0;
3426 caps_cmd.fcoecaps = 0;
3427
3428 /*
3429 * And now tell the firmware to use the configuration we just loaded. 3419 * And now tell the firmware to use the configuration we just loaded.
3430 */ 3420 */
3431 caps_cmd.op_to_write = 3421 caps_cmd.op_to_write =
3432 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 3422 htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3433 FW_CMD_REQUEST | 3423 FW_CMD_REQUEST |
3434 FW_CMD_WRITE); 3424 FW_CMD_WRITE);
3435 caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd)); 3425 caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
3436 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), 3426 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3437 NULL); 3427 NULL);
3438 if (ret < 0) 3428 if (ret < 0)
3439 goto bye; 3429 goto bye;
3440 3430
3441 /* 3431 /*
3442 * Tweak configuration based on system architecture, module 3432 * Tweak configuration based on system architecture, module
3443 * parameters, etc. 3433 * parameters, etc.
3444 */ 3434 */
3445 ret = adap_init0_tweaks(adapter); 3435 ret = adap_init0_tweaks(adapter);
3446 if (ret < 0) 3436 if (ret < 0)
3447 goto bye; 3437 goto bye;
3448 3438
3449 /* 3439 /*
3450 * And finally tell the firmware to initialize itself using the 3440 * And finally tell the firmware to initialize itself using the
3451 * parameters from the Configuration File. 3441 * parameters from the Configuration File.
3452 */ 3442 */
3453 ret = t4_fw_initialize(adapter, adapter->mbox); 3443 ret = t4_fw_initialize(adapter, adapter->mbox);
3454 if (ret < 0) 3444 if (ret < 0)
3455 goto bye; 3445 goto bye;
3456 3446
3457 /* 3447 /*
3458 * Return successfully and note that we're operating with parameters 3448 * Return successfully and note that we're operating with parameters
3459 * not supplied by the driver, rather than from hard-wired 3449 * not supplied by the driver, rather than from hard-wired
3460 * initialization constants burried in the driver. 3450 * initialization constants burried in the driver.
3461 */ 3451 */
3462 adapter->flags |= USING_SOFT_PARAMS; 3452 adapter->flags |= USING_SOFT_PARAMS;
3463 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\ 3453 dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
3464 "Configuration File %s, version %#x, computed checksum %#x\n", 3454 "Configuration File %s, version %#x, computed checksum %#x\n",
3465 (using_flash 3455 (using_flash
3466 ? "in device FLASH" 3456 ? "in device FLASH"
3467 : "/lib/firmware/" FW_CFNAME), 3457 : "/lib/firmware/" FW_CFNAME),
3468 finiver, cfcsum); 3458 finiver, cfcsum);
3469 return 0; 3459 return 0;
3470 3460
3471 /* 3461 /*
3472 * Something bad happened. Return the error ... (If the "error" 3462 * Something bad happened. Return the error ... (If the "error"
3473 * is that there's no Configuration File on the adapter we don't 3463 * is that there's no Configuration File on the adapter we don't
3474 * want to issue a warning since this is fairly common.) 3464 * want to issue a warning since this is fairly common.)
3475 */ 3465 */
3476 bye: 3466 bye:
3477 if (ret != -ENOENT) 3467 if (ret != -ENOENT)
3478 dev_warn(adapter->pdev_dev, "Configuration file error %d\n", 3468 dev_warn(adapter->pdev_dev, "Configuration file error %d\n",
3479 -ret); 3469 -ret);
3480 return ret; 3470 return ret;
3481 } 3471 }
3482 3472
3483 /* 3473 /*
3484 * Attempt to initialize the adapter via hard-coded, driver supplied 3474 * Attempt to initialize the adapter via hard-coded, driver supplied
3485 * parameters ... 3475 * parameters ...
3486 */ 3476 */
3487 static int adap_init0_no_config(struct adapter *adapter, int reset) 3477 static int adap_init0_no_config(struct adapter *adapter, int reset)
3488 { 3478 {
3489 struct sge *s = &adapter->sge; 3479 struct sge *s = &adapter->sge;
3490 struct fw_caps_config_cmd caps_cmd; 3480 struct fw_caps_config_cmd caps_cmd;
3491 u32 v; 3481 u32 v;
3492 int i, ret; 3482 int i, ret;
3493 3483
3494 /* 3484 /*
3495 * Reset device if necessary 3485 * Reset device if necessary
3496 */ 3486 */
3497 if (reset) { 3487 if (reset) {
3498 ret = t4_fw_reset(adapter, adapter->mbox, 3488 ret = t4_fw_reset(adapter, adapter->mbox,
3499 PIORSTMODE | PIORST); 3489 PIORSTMODE | PIORST);
3500 if (ret < 0) 3490 if (ret < 0)
3501 goto bye; 3491 goto bye;
3502 } 3492 }
3503 3493
3504 /* 3494 /*
3505 * Get device capabilities and select which we'll be using. 3495 * Get device capabilities and select which we'll be using.
3506 */ 3496 */
3507 memset(&caps_cmd, 0, sizeof(caps_cmd)); 3497 memset(&caps_cmd, 0, sizeof(caps_cmd));
3508 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 3498 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3509 FW_CMD_REQUEST | FW_CMD_READ); 3499 FW_CMD_REQUEST | FW_CMD_READ);
3510 caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd)); 3500 caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
3511 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), 3501 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3512 &caps_cmd); 3502 &caps_cmd);
3513 if (ret < 0) 3503 if (ret < 0)
3514 goto bye; 3504 goto bye;
3515 3505
3516 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) { 3506 if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
3517 if (!vf_acls) 3507 if (!vf_acls)
3518 caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM); 3508 caps_cmd.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
3519 else 3509 else
3520 caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM); 3510 caps_cmd.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
3521 } else if (vf_acls) { 3511 } else if (vf_acls) {
3522 dev_err(adapter->pdev_dev, "virtualization ACLs not supported"); 3512 dev_err(adapter->pdev_dev, "virtualization ACLs not supported");
3523 goto bye; 3513 goto bye;
3524 } 3514 }
3525 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 3515 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3526 FW_CMD_REQUEST | FW_CMD_WRITE); 3516 FW_CMD_REQUEST | FW_CMD_WRITE);
3527 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd), 3517 ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
3528 NULL); 3518 NULL);
3529 if (ret < 0) 3519 if (ret < 0)
3530 goto bye; 3520 goto bye;
3531 3521
3532 /* 3522 /*
3533 * Tweak configuration based on system architecture, module 3523 * Tweak configuration based on system architecture, module
3534 * parameters, etc. 3524 * parameters, etc.
3535 */ 3525 */
3536 ret = adap_init0_tweaks(adapter); 3526 ret = adap_init0_tweaks(adapter);
3537 if (ret < 0) 3527 if (ret < 0)
3538 goto bye; 3528 goto bye;
3539 3529
3540 /* 3530 /*
3541 * Select RSS Global Mode we want to use. We use "Basic Virtual" 3531 * Select RSS Global Mode we want to use. We use "Basic Virtual"
3542 * mode which maps each Virtual Interface to its own section of 3532 * mode which maps each Virtual Interface to its own section of
3543 * the RSS Table and we turn on all map and hash enables ... 3533 * the RSS Table and we turn on all map and hash enables ...
3544 */ 3534 */
3545 adapter->flags |= RSS_TNLALLLOOKUP; 3535 adapter->flags |= RSS_TNLALLLOOKUP;
3546 ret = t4_config_glbl_rss(adapter, adapter->mbox, 3536 ret = t4_config_glbl_rss(adapter, adapter->mbox,
3547 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL, 3537 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
3548 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN | 3538 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
3549 FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ | 3539 FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
3550 ((adapter->flags & RSS_TNLALLLOOKUP) ? 3540 ((adapter->flags & RSS_TNLALLLOOKUP) ?
3551 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0)); 3541 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP : 0));
3552 if (ret < 0) 3542 if (ret < 0)
3553 goto bye; 3543 goto bye;
3554 3544
3555 /* 3545 /*
3556 * Set up our own fundamental resource provisioning ... 3546 * Set up our own fundamental resource provisioning ...
3557 */ 3547 */
3558 ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0, 3548 ret = t4_cfg_pfvf(adapter, adapter->mbox, adapter->fn, 0,
3559 PFRES_NEQ, PFRES_NETHCTRL, 3549 PFRES_NEQ, PFRES_NETHCTRL,
3560 PFRES_NIQFLINT, PFRES_NIQ, 3550 PFRES_NIQFLINT, PFRES_NIQ,
3561 PFRES_TC, PFRES_NVI, 3551 PFRES_TC, PFRES_NVI,
3562 FW_PFVF_CMD_CMASK_MASK, 3552 FW_PFVF_CMD_CMASK_MASK,
3563 pfvfres_pmask(adapter, adapter->fn, 0), 3553 pfvfres_pmask(adapter, adapter->fn, 0),
3564 PFRES_NEXACTF, 3554 PFRES_NEXACTF,
3565 PFRES_R_CAPS, PFRES_WX_CAPS); 3555 PFRES_R_CAPS, PFRES_WX_CAPS);
3566 if (ret < 0) 3556 if (ret < 0)
3567 goto bye; 3557 goto bye;
3568 3558
3569 /* 3559 /*
3570 * Perform low level SGE initialization. We need to do this before we 3560 * Perform low level SGE initialization. We need to do this before we
3571 * send the firmware the INITIALIZE command because that will cause 3561 * send the firmware the INITIALIZE command because that will cause
3572 * any other PF Drivers which are waiting for the Master 3562 * any other PF Drivers which are waiting for the Master
3573 * Initialization to proceed forward. 3563 * Initialization to proceed forward.
3574 */ 3564 */
3575 for (i = 0; i < SGE_NTIMERS - 1; i++) 3565 for (i = 0; i < SGE_NTIMERS - 1; i++)
3576 s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL); 3566 s->timer_val[i] = min(intr_holdoff[i], MAX_SGE_TIMERVAL);
3577 s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL; 3567 s->timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
3578 s->counter_val[0] = 1; 3568 s->counter_val[0] = 1;
3579 for (i = 1; i < SGE_NCOUNTERS; i++) 3569 for (i = 1; i < SGE_NCOUNTERS; i++)
3580 s->counter_val[i] = min(intr_cnt[i - 1], 3570 s->counter_val[i] = min(intr_cnt[i - 1],
3581 THRESHOLD_0_GET(THRESHOLD_0_MASK)); 3571 THRESHOLD_0_GET(THRESHOLD_0_MASK));
3582 t4_sge_init(adapter); 3572 t4_sge_init(adapter);
3583 3573
3584 #ifdef CONFIG_PCI_IOV 3574 #ifdef CONFIG_PCI_IOV
3585 /* 3575 /*
3586 * Provision resource limits for Virtual Functions. We currently 3576 * Provision resource limits for Virtual Functions. We currently
3587 * grant them all the same static resource limits except for the Port 3577 * grant them all the same static resource limits except for the Port
3588 * Access Rights Mask which we're assigning based on the PF. All of 3578 * Access Rights Mask which we're assigning based on the PF. All of
3589 * the static provisioning stuff for both the PF and VF really needs 3579 * the static provisioning stuff for both the PF and VF really needs
3590 * to be managed in a persistent manner for each device which the 3580 * to be managed in a persistent manner for each device which the
3591 * firmware controls. 3581 * firmware controls.
3592 */ 3582 */
3593 { 3583 {
3594 int pf, vf; 3584 int pf, vf;
3595 3585
3596 for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) { 3586 for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
3597 if (num_vf[pf] <= 0) 3587 if (num_vf[pf] <= 0)
3598 continue; 3588 continue;
3599 3589
3600 /* VF numbering starts at 1! */ 3590 /* VF numbering starts at 1! */
3601 for (vf = 1; vf <= num_vf[pf]; vf++) { 3591 for (vf = 1; vf <= num_vf[pf]; vf++) {
3602 ret = t4_cfg_pfvf(adapter, adapter->mbox, 3592 ret = t4_cfg_pfvf(adapter, adapter->mbox,
3603 pf, vf, 3593 pf, vf,
3604 VFRES_NEQ, VFRES_NETHCTRL, 3594 VFRES_NEQ, VFRES_NETHCTRL,
3605 VFRES_NIQFLINT, VFRES_NIQ, 3595 VFRES_NIQFLINT, VFRES_NIQ,
3606 VFRES_TC, VFRES_NVI, 3596 VFRES_TC, VFRES_NVI,
3607 FW_PFVF_CMD_CMASK_GET( 3597 FW_PFVF_CMD_CMASK_GET(
3608 FW_PFVF_CMD_CMASK_MASK), 3598 FW_PFVF_CMD_CMASK_MASK),
3609 pfvfres_pmask( 3599 pfvfres_pmask(
3610 adapter, pf, vf), 3600 adapter, pf, vf),
3611 VFRES_NEXACTF, 3601 VFRES_NEXACTF,
3612 VFRES_R_CAPS, VFRES_WX_CAPS); 3602 VFRES_R_CAPS, VFRES_WX_CAPS);
3613 if (ret < 0) 3603 if (ret < 0)
3614 dev_warn(adapter->pdev_dev, 3604 dev_warn(adapter->pdev_dev,
3615 "failed to "\ 3605 "failed to "\
3616 "provision pf/vf=%d/%d; " 3606 "provision pf/vf=%d/%d; "
3617 "err=%d\n", pf, vf, ret); 3607 "err=%d\n", pf, vf, ret);
3618 } 3608 }
3619 } 3609 }
3620 } 3610 }
3621 #endif 3611 #endif
3622 3612
3623 /* 3613 /*
3624 * Set up the default filter mode. Later we'll want to implement this 3614 * Set up the default filter mode. Later we'll want to implement this
3625 * via a firmware command, etc. ... This needs to be done before the 3615 * via a firmware command, etc. ... This needs to be done before the
3626 * firmare initialization command ... If the selected set of fields 3616 * firmare initialization command ... If the selected set of fields
3627 * isn't equal to the default value, we'll need to make sure that the 3617 * isn't equal to the default value, we'll need to make sure that the
3628 * field selections will fit in the 36-bit budget. 3618 * field selections will fit in the 36-bit budget.
3629 */ 3619 */
3630 if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) { 3620 if (tp_vlan_pri_map != TP_VLAN_PRI_MAP_DEFAULT) {
3631 int j, bits = 0; 3621 int j, bits = 0;
3632 3622
3633 for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++) 3623 for (j = TP_VLAN_PRI_MAP_FIRST; j <= TP_VLAN_PRI_MAP_LAST; j++)
3634 switch (tp_vlan_pri_map & (1 << j)) { 3624 switch (tp_vlan_pri_map & (1 << j)) {
3635 case 0: 3625 case 0:
3636 /* compressed filter field not enabled */ 3626 /* compressed filter field not enabled */
3637 break; 3627 break;
3638 case FCOE_MASK: 3628 case FCOE_MASK:
3639 bits += 1; 3629 bits += 1;
3640 break; 3630 break;
3641 case PORT_MASK: 3631 case PORT_MASK:
3642 bits += 3; 3632 bits += 3;
3643 break; 3633 break;
3644 case VNIC_ID_MASK: 3634 case VNIC_ID_MASK:
3645 bits += 17; 3635 bits += 17;
3646 break; 3636 break;
3647 case VLAN_MASK: 3637 case VLAN_MASK:
3648 bits += 17; 3638 bits += 17;
3649 break; 3639 break;
3650 case TOS_MASK: 3640 case TOS_MASK:
3651 bits += 8; 3641 bits += 8;
3652 break; 3642 break;
3653 case PROTOCOL_MASK: 3643 case PROTOCOL_MASK:
3654 bits += 8; 3644 bits += 8;
3655 break; 3645 break;
3656 case ETHERTYPE_MASK: 3646 case ETHERTYPE_MASK:
3657 bits += 16; 3647 bits += 16;
3658 break; 3648 break;
3659 case MACMATCH_MASK: 3649 case MACMATCH_MASK:
3660 bits += 9; 3650 bits += 9;
3661 break; 3651 break;
3662 case MPSHITTYPE_MASK: 3652 case MPSHITTYPE_MASK:
3663 bits += 3; 3653 bits += 3;
3664 break; 3654 break;
3665 case FRAGMENTATION_MASK: 3655 case FRAGMENTATION_MASK:
3666 bits += 1; 3656 bits += 1;
3667 break; 3657 break;
3668 } 3658 }
3669 3659
3670 if (bits > 36) { 3660 if (bits > 36) {
3671 dev_err(adapter->pdev_dev, 3661 dev_err(adapter->pdev_dev,
3672 "tp_vlan_pri_map=%#x needs %d bits > 36;"\ 3662 "tp_vlan_pri_map=%#x needs %d bits > 36;"\
3673 " using %#x\n", tp_vlan_pri_map, bits, 3663 " using %#x\n", tp_vlan_pri_map, bits,
3674 TP_VLAN_PRI_MAP_DEFAULT); 3664 TP_VLAN_PRI_MAP_DEFAULT);
3675 tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT; 3665 tp_vlan_pri_map = TP_VLAN_PRI_MAP_DEFAULT;
3676 } 3666 }
3677 } 3667 }
3678 v = tp_vlan_pri_map; 3668 v = tp_vlan_pri_map;
3679 t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA, 3669 t4_write_indirect(adapter, TP_PIO_ADDR, TP_PIO_DATA,
3680 &v, 1, TP_VLAN_PRI_MAP); 3670 &v, 1, TP_VLAN_PRI_MAP);
3681 3671
3682 /* 3672 /*
3683 * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order 3673 * We need Five Tuple Lookup mode to be set in TP_GLOBAL_CONFIG order
3684 * to support any of the compressed filter fields above. Newer 3674 * to support any of the compressed filter fields above. Newer
3685 * versions of the firmware do this automatically but it doesn't hurt 3675 * versions of the firmware do this automatically but it doesn't hurt
3686 * to set it here. Meanwhile, we do _not_ need to set Lookup Every 3676 * to set it here. Meanwhile, we do _not_ need to set Lookup Every
3687 * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets 3677 * Packet in TP_INGRESS_CONFIG to support matching non-TCP packets
3688 * since the firmware automatically turns this on and off when we have 3678 * since the firmware automatically turns this on and off when we have
3689 * a non-zero number of filters active (since it does have a 3679 * a non-zero number of filters active (since it does have a
3690 * performance impact). 3680 * performance impact).
3691 */ 3681 */
3692 if (tp_vlan_pri_map) 3682 if (tp_vlan_pri_map)
3693 t4_set_reg_field(adapter, TP_GLOBAL_CONFIG, 3683 t4_set_reg_field(adapter, TP_GLOBAL_CONFIG,
3694 FIVETUPLELOOKUP_MASK, 3684 FIVETUPLELOOKUP_MASK,
3695 FIVETUPLELOOKUP_MASK); 3685 FIVETUPLELOOKUP_MASK);
3696 3686
3697 /* 3687 /*
3698 * Tweak some settings. 3688 * Tweak some settings.
3699 */ 3689 */
3700 t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) | 3690 t4_write_reg(adapter, TP_SHIFT_CNT, SYNSHIFTMAX(6) |
3701 RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) | 3691 RXTSHIFTMAXR1(4) | RXTSHIFTMAXR2(15) |
3702 PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) | 3692 PERSHIFTBACKOFFMAX(8) | PERSHIFTMAX(8) |
3703 KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9)); 3693 KEEPALIVEMAXR1(4) | KEEPALIVEMAXR2(9));
3704 3694
3705 /* 3695 /*
3706 * Get basic stuff going by issuing the Firmware Initialize command. 3696 * Get basic stuff going by issuing the Firmware Initialize command.
3707 * Note that this _must_ be after all PFVF commands ... 3697 * Note that this _must_ be after all PFVF commands ...
3708 */ 3698 */
3709 ret = t4_fw_initialize(adapter, adapter->mbox); 3699 ret = t4_fw_initialize(adapter, adapter->mbox);
3710 if (ret < 0) 3700 if (ret < 0)
3711 goto bye; 3701 goto bye;
3712 3702
3713 /* 3703 /*
3714 * Return successfully! 3704 * Return successfully!
3715 */ 3705 */
3716 dev_info(adapter->pdev_dev, "Successfully configured using built-in "\ 3706 dev_info(adapter->pdev_dev, "Successfully configured using built-in "\
3717 "driver parameters\n"); 3707 "driver parameters\n");
3718 return 0; 3708 return 0;
3719 3709
3720 /* 3710 /*
3721 * Something bad happened. Return the error ... 3711 * Something bad happened. Return the error ...
3722 */ 3712 */
3723 bye: 3713 bye:
3724 return ret; 3714 return ret;
3725 } 3715 }
3726 3716
3727 /* 3717 /*
3728 * Phase 0 of initialization: contact FW, obtain config, perform basic init. 3718 * Phase 0 of initialization: contact FW, obtain config, perform basic init.
3729 */ 3719 */
3730 static int adap_init0(struct adapter *adap) 3720 static int adap_init0(struct adapter *adap)
3731 { 3721 {
3732 int ret; 3722 int ret;
3733 u32 v, port_vec; 3723 u32 v, port_vec;
3734 enum dev_state state; 3724 enum dev_state state;
3735 u32 params[7], val[7]; 3725 u32 params[7], val[7];
3736 struct fw_caps_config_cmd caps_cmd; 3726 struct fw_caps_config_cmd caps_cmd;
3737 int reset = 1, j; 3727 int reset = 1, j;
3738 3728
3739 /* 3729 /*
3740 * Contact FW, advertising Master capability (and potentially forcing 3730 * Contact FW, advertising Master capability (and potentially forcing
3741 * ourselves as the Master PF if our module parameter force_init is 3731 * ourselves as the Master PF if our module parameter force_init is
3742 * set). 3732 * set).
3743 */ 3733 */
3744 ret = t4_fw_hello(adap, adap->mbox, adap->fn, 3734 ret = t4_fw_hello(adap, adap->mbox, adap->fn,
3745 force_init ? MASTER_MUST : MASTER_MAY, 3735 force_init ? MASTER_MUST : MASTER_MAY,
3746 &state); 3736 &state);
3747 if (ret < 0) { 3737 if (ret < 0) {
3748 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n", 3738 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
3749 ret); 3739 ret);
3750 return ret; 3740 return ret;
3751 } 3741 }
3752 if (ret == adap->mbox) 3742 if (ret == adap->mbox)
3753 adap->flags |= MASTER_PF; 3743 adap->flags |= MASTER_PF;
3754 if (force_init && state == DEV_STATE_INIT) 3744 if (force_init && state == DEV_STATE_INIT)
3755 state = DEV_STATE_UNINIT; 3745 state = DEV_STATE_UNINIT;
3756 3746
3757 /* 3747 /*
3758 * If we're the Master PF Driver and the device is uninitialized, 3748 * If we're the Master PF Driver and the device is uninitialized,
3759 * then let's consider upgrading the firmware ... (We always want 3749 * then let's consider upgrading the firmware ... (We always want
3760 * to check the firmware version number in order to A. get it for 3750 * to check the firmware version number in order to A. get it for
3761 * later reporting and B. to warn if the currently loaded firmware 3751 * later reporting and B. to warn if the currently loaded firmware
3762 * is excessively mismatched relative to the driver.) 3752 * is excessively mismatched relative to the driver.)
3763 */ 3753 */
3764 ret = t4_check_fw_version(adap); 3754 ret = t4_check_fw_version(adap);
3765 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) { 3755 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
3766 if (ret == -EINVAL || ret > 0) { 3756 if (ret == -EINVAL || ret > 0) {
3767 if (upgrade_fw(adap) >= 0) { 3757 if (upgrade_fw(adap) >= 0) {
3768 /* 3758 /*
3769 * Note that the chip was reset as part of the 3759 * Note that the chip was reset as part of the
3770 * firmware upgrade so we don't reset it again 3760 * firmware upgrade so we don't reset it again
3771 * below and grab the new firmware version. 3761 * below and grab the new firmware version.
3772 */ 3762 */
3773 reset = 0; 3763 reset = 0;
3774 ret = t4_check_fw_version(adap); 3764 ret = t4_check_fw_version(adap);
3775 } 3765 }
3776 } 3766 }
3777 if (ret < 0) 3767 if (ret < 0)
3778 return ret; 3768 return ret;
3779 } 3769 }
3780 3770
3781 /* 3771 /*
3782 * Grab VPD parameters. This should be done after we establish a 3772 * Grab VPD parameters. This should be done after we establish a
3783 * connection to the firmware since some of the VPD parameters 3773 * connection to the firmware since some of the VPD parameters
3784 * (notably the Core Clock frequency) are retrieved via requests to 3774 * (notably the Core Clock frequency) are retrieved via requests to
3785 * the firmware. On the other hand, we need these fairly early on 3775 * the firmware. On the other hand, we need these fairly early on
3786 * so we do this right after getting ahold of the firmware. 3776 * so we do this right after getting ahold of the firmware.
3787 */ 3777 */
3788 ret = get_vpd_params(adap, &adap->params.vpd); 3778 ret = get_vpd_params(adap, &adap->params.vpd);
3789 if (ret < 0) 3779 if (ret < 0)
3790 goto bye; 3780 goto bye;
3791 3781
3792 /* 3782 /*
3793 * Find out what ports are available to us. Note that we need to do 3783 * Find out what ports are available to us. Note that we need to do
3794 * this before calling adap_init0_no_config() since it needs nports 3784 * this before calling adap_init0_no_config() since it needs nports
3795 * and portvec ... 3785 * and portvec ...
3796 */ 3786 */
3797 v = 3787 v =
3798 FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 3788 FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3799 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC); 3789 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_PORTVEC);
3800 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec); 3790 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
3801 if (ret < 0) 3791 if (ret < 0)
3802 goto bye; 3792 goto bye;
3803 3793
3804 adap->params.nports = hweight32(port_vec); 3794 adap->params.nports = hweight32(port_vec);
3805 adap->params.portvec = port_vec; 3795 adap->params.portvec = port_vec;
3806 3796
3807 /* 3797 /*
3808 * If the firmware is initialized already (and we're not forcing a 3798 * If the firmware is initialized already (and we're not forcing a
3809 * master initialization), note that we're living with existing 3799 * master initialization), note that we're living with existing
3810 * adapter parameters. Otherwise, it's time to try initializing the 3800 * adapter parameters. Otherwise, it's time to try initializing the
3811 * adapter ... 3801 * adapter ...
3812 */ 3802 */
3813 if (state == DEV_STATE_INIT) { 3803 if (state == DEV_STATE_INIT) {
3814 dev_info(adap->pdev_dev, "Coming up as %s: "\ 3804 dev_info(adap->pdev_dev, "Coming up as %s: "\
3815 "Adapter already initialized\n", 3805 "Adapter already initialized\n",
3816 adap->flags & MASTER_PF ? "MASTER" : "SLAVE"); 3806 adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
3817 adap->flags |= USING_SOFT_PARAMS; 3807 adap->flags |= USING_SOFT_PARAMS;
3818 } else { 3808 } else {
3819 dev_info(adap->pdev_dev, "Coming up as MASTER: "\ 3809 dev_info(adap->pdev_dev, "Coming up as MASTER: "\
3820 "Initializing adapter\n"); 3810 "Initializing adapter\n");
3821 3811
3822 /* 3812 /*
3823 * If the firmware doesn't support Configuration 3813 * If the firmware doesn't support Configuration
3824 * Files warn user and exit, 3814 * Files warn user and exit,
3825 */ 3815 */
3826 if (ret < 0) 3816 if (ret < 0)
3827 dev_warn(adap->pdev_dev, "Firmware doesn't support " 3817 dev_warn(adap->pdev_dev, "Firmware doesn't support "
3828 "configuration file.\n"); 3818 "configuration file.\n");
3829 if (force_old_init) 3819 if (force_old_init)
3830 ret = adap_init0_no_config(adap, reset); 3820 ret = adap_init0_no_config(adap, reset);
3831 else { 3821 else {
3832 /* 3822 /*
3833 * Find out whether we're dealing with a version of 3823 * Find out whether we're dealing with a version of
3834 * the firmware which has configuration file support. 3824 * the firmware which has configuration file support.
3835 */ 3825 */
3836 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | 3826 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
3837 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF)); 3827 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
3838 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, 3828 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
3839 params, val); 3829 params, val);
3840 3830
3841 /* 3831 /*
3842 * If the firmware doesn't support Configuration 3832 * If the firmware doesn't support Configuration
3843 * Files, use the old Driver-based, hard-wired 3833 * Files, use the old Driver-based, hard-wired
3844 * initialization. Otherwise, try using the 3834 * initialization. Otherwise, try using the
3845 * Configuration File support and fall back to the 3835 * Configuration File support and fall back to the
3846 * Driver-based initialization if there's no 3836 * Driver-based initialization if there's no
3847 * Configuration File found. 3837 * Configuration File found.
3848 */ 3838 */
3849 if (ret < 0) 3839 if (ret < 0)
3850 ret = adap_init0_no_config(adap, reset); 3840 ret = adap_init0_no_config(adap, reset);
3851 else { 3841 else {
3852 /* 3842 /*
3853 * The firmware provides us with a memory 3843 * The firmware provides us with a memory
3854 * buffer where we can load a Configuration 3844 * buffer where we can load a Configuration
3855 * File from the host if we want to override 3845 * File from the host if we want to override
3856 * the Configuration File in flash. 3846 * the Configuration File in flash.
3857 */ 3847 */
3858 3848
3859 ret = adap_init0_config(adap, reset); 3849 ret = adap_init0_config(adap, reset);
3860 if (ret == -ENOENT) { 3850 if (ret == -ENOENT) {
3861 dev_info(adap->pdev_dev, 3851 dev_info(adap->pdev_dev,
3862 "No Configuration File present " 3852 "No Configuration File present "
3863 "on adapter. Using hard-wired " 3853 "on adapter. Using hard-wired "
3864 "configuration parameters.\n"); 3854 "configuration parameters.\n");
3865 ret = adap_init0_no_config(adap, reset); 3855 ret = adap_init0_no_config(adap, reset);
3866 } 3856 }
3867 } 3857 }
3868 } 3858 }
3869 if (ret < 0) { 3859 if (ret < 0) {
3870 dev_err(adap->pdev_dev, 3860 dev_err(adap->pdev_dev,
3871 "could not initialize adapter, error %d\n", 3861 "could not initialize adapter, error %d\n",
3872 -ret); 3862 -ret);
3873 goto bye; 3863 goto bye;
3874 } 3864 }
3875 } 3865 }
3876 3866
3877 /* 3867 /*
3878 * If we're living with non-hard-coded parameters (either from a 3868 * If we're living with non-hard-coded parameters (either from a
3879 * Firmware Configuration File or values programmed by a different PF 3869 * Firmware Configuration File or values programmed by a different PF
3880 * Driver), give the SGE code a chance to pull in anything that it 3870 * Driver), give the SGE code a chance to pull in anything that it
3881 * needs ... Note that this must be called after we retrieve our VPD 3871 * needs ... Note that this must be called after we retrieve our VPD
3882 * parameters in order to know how to convert core ticks to seconds. 3872 * parameters in order to know how to convert core ticks to seconds.
3883 */ 3873 */
3884 if (adap->flags & USING_SOFT_PARAMS) { 3874 if (adap->flags & USING_SOFT_PARAMS) {
3885 ret = t4_sge_init(adap); 3875 ret = t4_sge_init(adap);
3886 if (ret < 0) 3876 if (ret < 0)
3887 goto bye; 3877 goto bye;
3888 } 3878 }
3889 3879
3890 if (is_bypass_device(adap->pdev->device)) 3880 if (is_bypass_device(adap->pdev->device))
3891 adap->params.bypass = 1; 3881 adap->params.bypass = 1;
3892 3882
3893 /* 3883 /*
3894 * Grab some of our basic fundamental operating parameters. 3884 * Grab some of our basic fundamental operating parameters.
3895 */ 3885 */
3896 #define FW_PARAM_DEV(param) \ 3886 #define FW_PARAM_DEV(param) \
3897 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ 3887 (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
3898 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) 3888 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
3899 3889
3900 #define FW_PARAM_PFVF(param) \ 3890 #define FW_PARAM_PFVF(param) \
3901 FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ 3891 FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
3902 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \ 3892 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \
3903 FW_PARAMS_PARAM_Y(0) | \ 3893 FW_PARAMS_PARAM_Y(0) | \
3904 FW_PARAMS_PARAM_Z(0) 3894 FW_PARAMS_PARAM_Z(0)
3905 3895
3906 params[0] = FW_PARAM_PFVF(EQ_START); 3896 params[0] = FW_PARAM_PFVF(EQ_START);
3907 params[1] = FW_PARAM_PFVF(L2T_START); 3897 params[1] = FW_PARAM_PFVF(L2T_START);
3908 params[2] = FW_PARAM_PFVF(L2T_END); 3898 params[2] = FW_PARAM_PFVF(L2T_END);
3909 params[3] = FW_PARAM_PFVF(FILTER_START); 3899 params[3] = FW_PARAM_PFVF(FILTER_START);
3910 params[4] = FW_PARAM_PFVF(FILTER_END); 3900 params[4] = FW_PARAM_PFVF(FILTER_END);
3911 params[5] = FW_PARAM_PFVF(IQFLINT_START); 3901 params[5] = FW_PARAM_PFVF(IQFLINT_START);
3912 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val); 3902 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
3913 if (ret < 0) 3903 if (ret < 0)
3914 goto bye; 3904 goto bye;
3915 adap->sge.egr_start = val[0]; 3905 adap->sge.egr_start = val[0];
3916 adap->l2t_start = val[1]; 3906 adap->l2t_start = val[1];
3917 adap->l2t_end = val[2]; 3907 adap->l2t_end = val[2];
3918 adap->tids.ftid_base = val[3]; 3908 adap->tids.ftid_base = val[3];
3919 adap->tids.nftids = val[4] - val[3] + 1; 3909 adap->tids.nftids = val[4] - val[3] + 1;
3920 adap->sge.ingr_start = val[5]; 3910 adap->sge.ingr_start = val[5];
3921 3911
3922 /* query params related to active filter region */ 3912 /* query params related to active filter region */
3923 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START); 3913 params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
3924 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END); 3914 params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
3925 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val); 3915 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
3926 /* If Active filter size is set we enable establishing 3916 /* If Active filter size is set we enable establishing
3927 * offload connection through firmware work request 3917 * offload connection through firmware work request
3928 */ 3918 */
3929 if ((val[0] != val[1]) && (ret >= 0)) { 3919 if ((val[0] != val[1]) && (ret >= 0)) {
3930 adap->flags |= FW_OFLD_CONN; 3920 adap->flags |= FW_OFLD_CONN;
3931 adap->tids.aftid_base = val[0]; 3921 adap->tids.aftid_base = val[0];
3932 adap->tids.aftid_end = val[1]; 3922 adap->tids.aftid_end = val[1];
3933 } 3923 }
3934 3924
3935 /* 3925 /*
3936 * Get device capabilities so we can determine what resources we need 3926 * Get device capabilities so we can determine what resources we need
3937 * to manage. 3927 * to manage.
3938 */ 3928 */
3939 memset(&caps_cmd, 0, sizeof(caps_cmd)); 3929 memset(&caps_cmd, 0, sizeof(caps_cmd));
3940 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 3930 caps_cmd.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
3941 FW_CMD_REQUEST | FW_CMD_READ); 3931 FW_CMD_REQUEST | FW_CMD_READ);
3942 caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd)); 3932 caps_cmd.retval_len16 = htonl(FW_LEN16(caps_cmd));
3943 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd), 3933 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
3944 &caps_cmd); 3934 &caps_cmd);
3945 if (ret < 0) 3935 if (ret < 0)
3946 goto bye; 3936 goto bye;
3947 3937
3948 if (caps_cmd.ofldcaps) { 3938 if (caps_cmd.ofldcaps) {
3949 /* query offload-related parameters */ 3939 /* query offload-related parameters */
3950 params[0] = FW_PARAM_DEV(NTID); 3940 params[0] = FW_PARAM_DEV(NTID);
3951 params[1] = FW_PARAM_PFVF(SERVER_START); 3941 params[1] = FW_PARAM_PFVF(SERVER_START);
3952 params[2] = FW_PARAM_PFVF(SERVER_END); 3942 params[2] = FW_PARAM_PFVF(SERVER_END);
3953 params[3] = FW_PARAM_PFVF(TDDP_START); 3943 params[3] = FW_PARAM_PFVF(TDDP_START);
3954 params[4] = FW_PARAM_PFVF(TDDP_END); 3944 params[4] = FW_PARAM_PFVF(TDDP_END);
3955 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); 3945 params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
3956 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, 3946 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
3957 params, val); 3947 params, val);
3958 if (ret < 0) 3948 if (ret < 0)
3959 goto bye; 3949 goto bye;
3960 adap->tids.ntids = val[0]; 3950 adap->tids.ntids = val[0];
3961 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS); 3951 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
3962 adap->tids.stid_base = val[1]; 3952 adap->tids.stid_base = val[1];
3963 adap->tids.nstids = val[2] - val[1] + 1; 3953 adap->tids.nstids = val[2] - val[1] + 1;
3964 /* 3954 /*
3965 * Setup server filter region. Divide the availble filter 3955 * Setup server filter region. Divide the availble filter
3966 * region into two parts. Regular filters get 1/3rd and server 3956 * region into two parts. Regular filters get 1/3rd and server
3967 * filters get 2/3rd part. This is only enabled if workarond 3957 * filters get 2/3rd part. This is only enabled if workarond
3968 * path is enabled. 3958 * path is enabled.
3969 * 1. For regular filters. 3959 * 1. For regular filters.
3970 * 2. Server filter: This are special filters which are used 3960 * 2. Server filter: This are special filters which are used
3971 * to redirect SYN packets to offload queue. 3961 * to redirect SYN packets to offload queue.
3972 */ 3962 */
3973 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) { 3963 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
3974 adap->tids.sftid_base = adap->tids.ftid_base + 3964 adap->tids.sftid_base = adap->tids.ftid_base +
3975 DIV_ROUND_UP(adap->tids.nftids, 3); 3965 DIV_ROUND_UP(adap->tids.nftids, 3);
3976 adap->tids.nsftids = adap->tids.nftids - 3966 adap->tids.nsftids = adap->tids.nftids -
3977 DIV_ROUND_UP(adap->tids.nftids, 3); 3967 DIV_ROUND_UP(adap->tids.nftids, 3);
3978 adap->tids.nftids = adap->tids.sftid_base - 3968 adap->tids.nftids = adap->tids.sftid_base -
3979 adap->tids.ftid_base; 3969 adap->tids.ftid_base;
3980 } 3970 }
3981 adap->vres.ddp.start = val[3]; 3971 adap->vres.ddp.start = val[3];
3982 adap->vres.ddp.size = val[4] - val[3] + 1; 3972 adap->vres.ddp.size = val[4] - val[3] + 1;
3983 adap->params.ofldq_wr_cred = val[5]; 3973 adap->params.ofldq_wr_cred = val[5];
3984 3974
3985 adap->params.offload = 1; 3975 adap->params.offload = 1;
3986 } 3976 }
3987 if (caps_cmd.rdmacaps) { 3977 if (caps_cmd.rdmacaps) {
3988 params[0] = FW_PARAM_PFVF(STAG_START); 3978 params[0] = FW_PARAM_PFVF(STAG_START);
3989 params[1] = FW_PARAM_PFVF(STAG_END); 3979 params[1] = FW_PARAM_PFVF(STAG_END);
3990 params[2] = FW_PARAM_PFVF(RQ_START); 3980 params[2] = FW_PARAM_PFVF(RQ_START);
3991 params[3] = FW_PARAM_PFVF(RQ_END); 3981 params[3] = FW_PARAM_PFVF(RQ_END);
3992 params[4] = FW_PARAM_PFVF(PBL_START); 3982 params[4] = FW_PARAM_PFVF(PBL_START);
3993 params[5] = FW_PARAM_PFVF(PBL_END); 3983 params[5] = FW_PARAM_PFVF(PBL_END);
3994 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, 3984 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
3995 params, val); 3985 params, val);
3996 if (ret < 0) 3986 if (ret < 0)
3997 goto bye; 3987 goto bye;
3998 adap->vres.stag.start = val[0]; 3988 adap->vres.stag.start = val[0];
3999 adap->vres.stag.size = val[1] - val[0] + 1; 3989 adap->vres.stag.size = val[1] - val[0] + 1;
4000 adap->vres.rq.start = val[2]; 3990 adap->vres.rq.start = val[2];
4001 adap->vres.rq.size = val[3] - val[2] + 1; 3991 adap->vres.rq.size = val[3] - val[2] + 1;
4002 adap->vres.pbl.start = val[4]; 3992 adap->vres.pbl.start = val[4];
4003 adap->vres.pbl.size = val[5] - val[4] + 1; 3993 adap->vres.pbl.size = val[5] - val[4] + 1;
4004 3994
4005 params[0] = FW_PARAM_PFVF(SQRQ_START); 3995 params[0] = FW_PARAM_PFVF(SQRQ_START);
4006 params[1] = FW_PARAM_PFVF(SQRQ_END); 3996 params[1] = FW_PARAM_PFVF(SQRQ_END);
4007 params[2] = FW_PARAM_PFVF(CQ_START); 3997 params[2] = FW_PARAM_PFVF(CQ_START);
4008 params[3] = FW_PARAM_PFVF(CQ_END); 3998 params[3] = FW_PARAM_PFVF(CQ_END);
4009 params[4] = FW_PARAM_PFVF(OCQ_START); 3999 params[4] = FW_PARAM_PFVF(OCQ_START);
4010 params[5] = FW_PARAM_PFVF(OCQ_END); 4000 params[5] = FW_PARAM_PFVF(OCQ_END);
4011 ret = t4_query_params(adap, 0, 0, 0, 6, params, val); 4001 ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
4012 if (ret < 0) 4002 if (ret < 0)
4013 goto bye; 4003 goto bye;
4014 adap->vres.qp.start = val[0]; 4004 adap->vres.qp.start = val[0];
4015 adap->vres.qp.size = val[1] - val[0] + 1; 4005 adap->vres.qp.size = val[1] - val[0] + 1;
4016 adap->vres.cq.start = val[2]; 4006 adap->vres.cq.start = val[2];
4017 adap->vres.cq.size = val[3] - val[2] + 1; 4007 adap->vres.cq.size = val[3] - val[2] + 1;
4018 adap->vres.ocq.start = val[4]; 4008 adap->vres.ocq.start = val[4];
4019 adap->vres.ocq.size = val[5] - val[4] + 1; 4009 adap->vres.ocq.size = val[5] - val[4] + 1;
4020 } 4010 }
4021 if (caps_cmd.iscsicaps) { 4011 if (caps_cmd.iscsicaps) {
4022 params[0] = FW_PARAM_PFVF(ISCSI_START); 4012 params[0] = FW_PARAM_PFVF(ISCSI_START);
4023 params[1] = FW_PARAM_PFVF(ISCSI_END); 4013 params[1] = FW_PARAM_PFVF(ISCSI_END);
4024 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, 4014 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
4025 params, val); 4015 params, val);
4026 if (ret < 0) 4016 if (ret < 0)
4027 goto bye; 4017 goto bye;
4028 adap->vres.iscsi.start = val[0]; 4018 adap->vres.iscsi.start = val[0];
4029 adap->vres.iscsi.size = val[1] - val[0] + 1; 4019 adap->vres.iscsi.size = val[1] - val[0] + 1;
4030 } 4020 }
4031 #undef FW_PARAM_PFVF 4021 #undef FW_PARAM_PFVF
4032 #undef FW_PARAM_DEV 4022 #undef FW_PARAM_DEV
4033 4023
4034 /* 4024 /*
4035 * These are finalized by FW initialization, load their values now. 4025 * These are finalized by FW initialization, load their values now.
4036 */ 4026 */
4037 v = t4_read_reg(adap, TP_TIMER_RESOLUTION); 4027 v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
4038 adap->params.tp.tre = TIMERRESOLUTION_GET(v); 4028 adap->params.tp.tre = TIMERRESOLUTION_GET(v);
4039 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v); 4029 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
4040 t4_read_mtu_tbl(adap, adap->params.mtus, NULL); 4030 t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
4041 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, 4031 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
4042 adap->params.b_wnd); 4032 adap->params.b_wnd);
4043 4033
4044 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */ 4034 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
4045 for (j = 0; j < NCHAN; j++) 4035 for (j = 0; j < NCHAN; j++)
4046 adap->params.tp.tx_modq[j] = j; 4036 adap->params.tp.tx_modq[j] = j;
4047 4037
4048 adap->flags |= FW_OK; 4038 adap->flags |= FW_OK;
4049 return 0; 4039 return 0;
4050 4040
4051 /* 4041 /*
4052 * Something bad happened. If a command timed out or failed with EIO 4042 * Something bad happened. If a command timed out or failed with EIO
4053 * FW does not operate within its spec or something catastrophic 4043 * FW does not operate within its spec or something catastrophic
4054 * happened to HW/FW, stop issuing commands. 4044 * happened to HW/FW, stop issuing commands.
4055 */ 4045 */
4056 bye: 4046 bye:
4057 if (ret != -ETIMEDOUT && ret != -EIO) 4047 if (ret != -ETIMEDOUT && ret != -EIO)
4058 t4_fw_bye(adap, adap->mbox); 4048 t4_fw_bye(adap, adap->mbox);
4059 return ret; 4049 return ret;
4060 } 4050 }
4061 4051
4062 /* EEH callbacks */ 4052 /* EEH callbacks */
4063 4053
4064 static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev, 4054 static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
4065 pci_channel_state_t state) 4055 pci_channel_state_t state)
4066 { 4056 {
4067 int i; 4057 int i;
4068 struct adapter *adap = pci_get_drvdata(pdev); 4058 struct adapter *adap = pci_get_drvdata(pdev);
4069 4059
4070 if (!adap) 4060 if (!adap)
4071 goto out; 4061 goto out;
4072 4062
4073 rtnl_lock(); 4063 rtnl_lock();
4074 adap->flags &= ~FW_OK; 4064 adap->flags &= ~FW_OK;
4075 notify_ulds(adap, CXGB4_STATE_START_RECOVERY); 4065 notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
4076 for_each_port(adap, i) { 4066 for_each_port(adap, i) {
4077 struct net_device *dev = adap->port[i]; 4067 struct net_device *dev = adap->port[i];
4078 4068
4079 netif_device_detach(dev); 4069 netif_device_detach(dev);
4080 netif_carrier_off(dev); 4070 netif_carrier_off(dev);
4081 } 4071 }
4082 if (adap->flags & FULL_INIT_DONE) 4072 if (adap->flags & FULL_INIT_DONE)
4083 cxgb_down(adap); 4073 cxgb_down(adap);
4084 rtnl_unlock(); 4074 rtnl_unlock();
4085 pci_disable_device(pdev); 4075 pci_disable_device(pdev);
4086 out: return state == pci_channel_io_perm_failure ? 4076 out: return state == pci_channel_io_perm_failure ?
4087 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET; 4077 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
4088 } 4078 }
4089 4079
4090 static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev) 4080 static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
4091 { 4081 {
4092 int i, ret; 4082 int i, ret;
4093 struct fw_caps_config_cmd c; 4083 struct fw_caps_config_cmd c;
4094 struct adapter *adap = pci_get_drvdata(pdev); 4084 struct adapter *adap = pci_get_drvdata(pdev);
4095 4085
4096 if (!adap) { 4086 if (!adap) {
4097 pci_restore_state(pdev); 4087 pci_restore_state(pdev);
4098 pci_save_state(pdev); 4088 pci_save_state(pdev);
4099 return PCI_ERS_RESULT_RECOVERED; 4089 return PCI_ERS_RESULT_RECOVERED;
4100 } 4090 }
4101 4091
4102 if (pci_enable_device(pdev)) { 4092 if (pci_enable_device(pdev)) {
4103 dev_err(&pdev->dev, "cannot reenable PCI device after reset\n"); 4093 dev_err(&pdev->dev, "cannot reenable PCI device after reset\n");
4104 return PCI_ERS_RESULT_DISCONNECT; 4094 return PCI_ERS_RESULT_DISCONNECT;
4105 } 4095 }
4106 4096
4107 pci_set_master(pdev); 4097 pci_set_master(pdev);
4108 pci_restore_state(pdev); 4098 pci_restore_state(pdev);
4109 pci_save_state(pdev); 4099 pci_save_state(pdev);
4110 pci_cleanup_aer_uncorrect_error_status(pdev); 4100 pci_cleanup_aer_uncorrect_error_status(pdev);
4111 4101
4112 if (t4_wait_dev_ready(adap) < 0) 4102 if (t4_wait_dev_ready(adap) < 0)
4113 return PCI_ERS_RESULT_DISCONNECT; 4103 return PCI_ERS_RESULT_DISCONNECT;
4114 if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL)) 4104 if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL))
4115 return PCI_ERS_RESULT_DISCONNECT; 4105 return PCI_ERS_RESULT_DISCONNECT;
4116 adap->flags |= FW_OK; 4106 adap->flags |= FW_OK;
4117 if (adap_init1(adap, &c)) 4107 if (adap_init1(adap, &c))
4118 return PCI_ERS_RESULT_DISCONNECT; 4108 return PCI_ERS_RESULT_DISCONNECT;
4119 4109
4120 for_each_port(adap, i) { 4110 for_each_port(adap, i) {
4121 struct port_info *p = adap2pinfo(adap, i); 4111 struct port_info *p = adap2pinfo(adap, i);
4122 4112
4123 ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1, 4113 ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
4124 NULL, NULL); 4114 NULL, NULL);
4125 if (ret < 0) 4115 if (ret < 0)
4126 return PCI_ERS_RESULT_DISCONNECT; 4116 return PCI_ERS_RESULT_DISCONNECT;
4127 p->viid = ret; 4117 p->viid = ret;
4128 p->xact_addr_filt = -1; 4118 p->xact_addr_filt = -1;
4129 } 4119 }
4130 4120
4131 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, 4121 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
4132 adap->params.b_wnd); 4122 adap->params.b_wnd);
4133 setup_memwin(adap); 4123 setup_memwin(adap);
4134 if (cxgb_up(adap)) 4124 if (cxgb_up(adap))
4135 return PCI_ERS_RESULT_DISCONNECT; 4125 return PCI_ERS_RESULT_DISCONNECT;
4136 return PCI_ERS_RESULT_RECOVERED; 4126 return PCI_ERS_RESULT_RECOVERED;
4137 } 4127 }
4138 4128
4139 static void eeh_resume(struct pci_dev *pdev) 4129 static void eeh_resume(struct pci_dev *pdev)
4140 { 4130 {
4141 int i; 4131 int i;
4142 struct adapter *adap = pci_get_drvdata(pdev); 4132 struct adapter *adap = pci_get_drvdata(pdev);
4143 4133
4144 if (!adap) 4134 if (!adap)
4145 return; 4135 return;
4146 4136
4147 rtnl_lock(); 4137 rtnl_lock();
4148 for_each_port(adap, i) { 4138 for_each_port(adap, i) {
4149 struct net_device *dev = adap->port[i]; 4139 struct net_device *dev = adap->port[i];
4150 4140
4151 if (netif_running(dev)) { 4141 if (netif_running(dev)) {
4152 link_start(dev); 4142 link_start(dev);
4153 cxgb_set_rxmode(dev); 4143 cxgb_set_rxmode(dev);
4154 } 4144 }
4155 netif_device_attach(dev); 4145 netif_device_attach(dev);
4156 } 4146 }
4157 rtnl_unlock(); 4147 rtnl_unlock();
4158 } 4148 }
4159 4149
4160 static const struct pci_error_handlers cxgb4_eeh = { 4150 static const struct pci_error_handlers cxgb4_eeh = {
4161 .error_detected = eeh_err_detected, 4151 .error_detected = eeh_err_detected,
4162 .slot_reset = eeh_slot_reset, 4152 .slot_reset = eeh_slot_reset,
4163 .resume = eeh_resume, 4153 .resume = eeh_resume,
4164 }; 4154 };
4165 4155
4166 static inline bool is_10g_port(const struct link_config *lc) 4156 static inline bool is_10g_port(const struct link_config *lc)
4167 { 4157 {
4168 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0; 4158 return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
4169 } 4159 }
4170 4160
4171 static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx, 4161 static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
4172 unsigned int size, unsigned int iqe_size) 4162 unsigned int size, unsigned int iqe_size)
4173 { 4163 {
4174 q->intr_params = QINTR_TIMER_IDX(timer_idx) | 4164 q->intr_params = QINTR_TIMER_IDX(timer_idx) |
4175 (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0); 4165 (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
4176 q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0; 4166 q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
4177 q->iqe_len = iqe_size; 4167 q->iqe_len = iqe_size;
4178 q->size = size; 4168 q->size = size;
4179 } 4169 }
4180 4170
4181 /* 4171 /*
4182 * Perform default configuration of DMA queues depending on the number and type 4172 * Perform default configuration of DMA queues depending on the number and type
4183 * of ports we found and the number of available CPUs. Most settings can be 4173 * of ports we found and the number of available CPUs. Most settings can be
4184 * modified by the admin prior to actual use. 4174 * modified by the admin prior to actual use.
4185 */ 4175 */
4186 static void __devinit cfg_queues(struct adapter *adap) 4176 static void __devinit cfg_queues(struct adapter *adap)
4187 { 4177 {
4188 struct sge *s = &adap->sge; 4178 struct sge *s = &adap->sge;
4189 int i, q10g = 0, n10g = 0, qidx = 0; 4179 int i, q10g = 0, n10g = 0, qidx = 0;
4190 4180
4191 for_each_port(adap, i) 4181 for_each_port(adap, i)
4192 n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg); 4182 n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
4193 4183
4194 /* 4184 /*
4195 * We default to 1 queue per non-10G port and up to # of cores queues 4185 * We default to 1 queue per non-10G port and up to # of cores queues
4196 * per 10G port. 4186 * per 10G port.
4197 */ 4187 */
4198 if (n10g) 4188 if (n10g)
4199 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g; 4189 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
4200 if (q10g > netif_get_num_default_rss_queues()) 4190 if (q10g > netif_get_num_default_rss_queues())
4201 q10g = netif_get_num_default_rss_queues(); 4191 q10g = netif_get_num_default_rss_queues();
4202 4192
4203 for_each_port(adap, i) { 4193 for_each_port(adap, i) {
4204 struct port_info *pi = adap2pinfo(adap, i); 4194 struct port_info *pi = adap2pinfo(adap, i);
4205 4195
4206 pi->first_qset = qidx; 4196 pi->first_qset = qidx;
4207 pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1; 4197 pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
4208 qidx += pi->nqsets; 4198 qidx += pi->nqsets;
4209 } 4199 }
4210 4200
4211 s->ethqsets = qidx; 4201 s->ethqsets = qidx;
4212 s->max_ethqsets = qidx; /* MSI-X may lower it later */ 4202 s->max_ethqsets = qidx; /* MSI-X may lower it later */
4213 4203
4214 if (is_offload(adap)) { 4204 if (is_offload(adap)) {
4215 /* 4205 /*
4216 * For offload we use 1 queue/channel if all ports are up to 1G, 4206 * For offload we use 1 queue/channel if all ports are up to 1G,
4217 * otherwise we divide all available queues amongst the channels 4207 * otherwise we divide all available queues amongst the channels
4218 * capped by the number of available cores. 4208 * capped by the number of available cores.
4219 */ 4209 */
4220 if (n10g) { 4210 if (n10g) {
4221 i = min_t(int, ARRAY_SIZE(s->ofldrxq), 4211 i = min_t(int, ARRAY_SIZE(s->ofldrxq),
4222 num_online_cpus()); 4212 num_online_cpus());
4223 s->ofldqsets = roundup(i, adap->params.nports); 4213 s->ofldqsets = roundup(i, adap->params.nports);
4224 } else 4214 } else
4225 s->ofldqsets = adap->params.nports; 4215 s->ofldqsets = adap->params.nports;
4226 /* For RDMA one Rx queue per channel suffices */ 4216 /* For RDMA one Rx queue per channel suffices */
4227 s->rdmaqs = adap->params.nports; 4217 s->rdmaqs = adap->params.nports;
4228 } 4218 }
4229 4219
4230 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) { 4220 for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
4231 struct sge_eth_rxq *r = &s->ethrxq[i]; 4221 struct sge_eth_rxq *r = &s->ethrxq[i];
4232 4222
4233 init_rspq(&r->rspq, 0, 0, 1024, 64); 4223 init_rspq(&r->rspq, 0, 0, 1024, 64);
4234 r->fl.size = 72; 4224 r->fl.size = 72;
4235 } 4225 }
4236 4226
4237 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++) 4227 for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
4238 s->ethtxq[i].q.size = 1024; 4228 s->ethtxq[i].q.size = 1024;
4239 4229
4240 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) 4230 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
4241 s->ctrlq[i].q.size = 512; 4231 s->ctrlq[i].q.size = 512;
4242 4232
4243 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) 4233 for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
4244 s->ofldtxq[i].q.size = 1024; 4234 s->ofldtxq[i].q.size = 1024;
4245 4235
4246 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) { 4236 for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
4247 struct sge_ofld_rxq *r = &s->ofldrxq[i]; 4237 struct sge_ofld_rxq *r = &s->ofldrxq[i];
4248 4238
4249 init_rspq(&r->rspq, 0, 0, 1024, 64); 4239 init_rspq(&r->rspq, 0, 0, 1024, 64);
4250 r->rspq.uld = CXGB4_ULD_ISCSI; 4240 r->rspq.uld = CXGB4_ULD_ISCSI;
4251 r->fl.size = 72; 4241 r->fl.size = 72;
4252 } 4242 }
4253 4243
4254 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) { 4244 for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
4255 struct sge_ofld_rxq *r = &s->rdmarxq[i]; 4245 struct sge_ofld_rxq *r = &s->rdmarxq[i];
4256 4246
4257 init_rspq(&r->rspq, 0, 0, 511, 64); 4247 init_rspq(&r->rspq, 0, 0, 511, 64);
4258 r->rspq.uld = CXGB4_ULD_RDMA; 4248 r->rspq.uld = CXGB4_ULD_RDMA;
4259 r->fl.size = 72; 4249 r->fl.size = 72;
4260 } 4250 }
4261 4251
4262 init_rspq(&s->fw_evtq, 6, 0, 512, 64); 4252 init_rspq(&s->fw_evtq, 6, 0, 512, 64);
4263 init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64); 4253 init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
4264 } 4254 }
4265 4255
4266 /* 4256 /*
4267 * Reduce the number of Ethernet queues across all ports to at most n. 4257 * Reduce the number of Ethernet queues across all ports to at most n.
4268 * n provides at least one queue per port. 4258 * n provides at least one queue per port.
4269 */ 4259 */
4270 static void __devinit reduce_ethqs(struct adapter *adap, int n) 4260 static void __devinit reduce_ethqs(struct adapter *adap, int n)
4271 { 4261 {
4272 int i; 4262 int i;
4273 struct port_info *pi; 4263 struct port_info *pi;
4274 4264
4275 while (n < adap->sge.ethqsets) 4265 while (n < adap->sge.ethqsets)
4276 for_each_port(adap, i) { 4266 for_each_port(adap, i) {
4277 pi = adap2pinfo(adap, i); 4267 pi = adap2pinfo(adap, i);
4278 if (pi->nqsets > 1) { 4268 if (pi->nqsets > 1) {
4279 pi->nqsets--; 4269 pi->nqsets--;
4280 adap->sge.ethqsets--; 4270 adap->sge.ethqsets--;
4281 if (adap->sge.ethqsets <= n) 4271 if (adap->sge.ethqsets <= n)
4282 break; 4272 break;
4283 } 4273 }
4284 } 4274 }
4285 4275
4286 n = 0; 4276 n = 0;
4287 for_each_port(adap, i) { 4277 for_each_port(adap, i) {
4288 pi = adap2pinfo(adap, i); 4278 pi = adap2pinfo(adap, i);
4289 pi->first_qset = n; 4279 pi->first_qset = n;
4290 n += pi->nqsets; 4280 n += pi->nqsets;
4291 } 4281 }
4292 } 4282 }
4293 4283
4294 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */ 4284 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
4295 #define EXTRA_VECS 2 4285 #define EXTRA_VECS 2
4296 4286
4297 static int __devinit enable_msix(struct adapter *adap) 4287 static int __devinit enable_msix(struct adapter *adap)
4298 { 4288 {
4299 int ofld_need = 0; 4289 int ofld_need = 0;
4300 int i, err, want, need; 4290 int i, err, want, need;
4301 struct sge *s = &adap->sge; 4291 struct sge *s = &adap->sge;
4302 unsigned int nchan = adap->params.nports; 4292 unsigned int nchan = adap->params.nports;
4303 struct msix_entry entries[MAX_INGQ + 1]; 4293 struct msix_entry entries[MAX_INGQ + 1];
4304 4294
4305 for (i = 0; i < ARRAY_SIZE(entries); ++i) 4295 for (i = 0; i < ARRAY_SIZE(entries); ++i)
4306 entries[i].entry = i; 4296 entries[i].entry = i;
4307 4297
4308 want = s->max_ethqsets + EXTRA_VECS; 4298 want = s->max_ethqsets + EXTRA_VECS;
4309 if (is_offload(adap)) { 4299 if (is_offload(adap)) {
4310 want += s->rdmaqs + s->ofldqsets; 4300 want += s->rdmaqs + s->ofldqsets;
4311 /* need nchan for each possible ULD */ 4301 /* need nchan for each possible ULD */
4312 ofld_need = 2 * nchan; 4302 ofld_need = 2 * nchan;
4313 } 4303 }
4314 need = adap->params.nports + EXTRA_VECS + ofld_need; 4304 need = adap->params.nports + EXTRA_VECS + ofld_need;
4315 4305
4316 while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need) 4306 while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
4317 want = err; 4307 want = err;
4318 4308
4319 if (!err) { 4309 if (!err) {
4320 /* 4310 /*
4321 * Distribute available vectors to the various queue groups. 4311 * Distribute available vectors to the various queue groups.
4322 * Every group gets its minimum requirement and NIC gets top 4312 * Every group gets its minimum requirement and NIC gets top
4323 * priority for leftovers. 4313 * priority for leftovers.
4324 */ 4314 */
4325 i = want - EXTRA_VECS - ofld_need; 4315 i = want - EXTRA_VECS - ofld_need;
4326 if (i < s->max_ethqsets) { 4316 if (i < s->max_ethqsets) {
4327 s->max_ethqsets = i; 4317 s->max_ethqsets = i;
4328 if (i < s->ethqsets) 4318 if (i < s->ethqsets)
4329 reduce_ethqs(adap, i); 4319 reduce_ethqs(adap, i);
4330 } 4320 }
4331 if (is_offload(adap)) { 4321 if (is_offload(adap)) {
4332 i = want - EXTRA_VECS - s->max_ethqsets; 4322 i = want - EXTRA_VECS - s->max_ethqsets;
4333 i -= ofld_need - nchan; 4323 i -= ofld_need - nchan;
4334 s->ofldqsets = (i / nchan) * nchan; /* round down */ 4324 s->ofldqsets = (i / nchan) * nchan; /* round down */
4335 } 4325 }
4336 for (i = 0; i < want; ++i) 4326 for (i = 0; i < want; ++i)
4337 adap->msix_info[i].vec = entries[i].vector; 4327 adap->msix_info[i].vec = entries[i].vector;
4338 } else if (err > 0) 4328 } else if (err > 0)
4339 dev_info(adap->pdev_dev, 4329 dev_info(adap->pdev_dev,
4340 "only %d MSI-X vectors left, not using MSI-X\n", err); 4330 "only %d MSI-X vectors left, not using MSI-X\n", err);
4341 return err; 4331 return err;
4342 } 4332 }
4343 4333
4344 #undef EXTRA_VECS 4334 #undef EXTRA_VECS
4345 4335
4346 static int __devinit init_rss(struct adapter *adap) 4336 static int __devinit init_rss(struct adapter *adap)
4347 { 4337 {
4348 unsigned int i, j; 4338 unsigned int i, j;
4349 4339
4350 for_each_port(adap, i) { 4340 for_each_port(adap, i) {
4351 struct port_info *pi = adap2pinfo(adap, i); 4341 struct port_info *pi = adap2pinfo(adap, i);
4352 4342
4353 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL); 4343 pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
4354 if (!pi->rss) 4344 if (!pi->rss)
4355 return -ENOMEM; 4345 return -ENOMEM;
4356 for (j = 0; j < pi->rss_size; j++) 4346 for (j = 0; j < pi->rss_size; j++)
4357 pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets); 4347 pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
4358 } 4348 }
4359 return 0; 4349 return 0;
4360 } 4350 }
4361 4351
4362 static void __devinit print_port_info(const struct net_device *dev) 4352 static void __devinit print_port_info(const struct net_device *dev)
4363 { 4353 {
4364 static const char *base[] = { 4354 static const char *base[] = {
4365 "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4", 4355 "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
4366 "KX", "KR", "R SFP+", "KR/KX", "KR/KX/KX4" 4356 "KX", "KR", "R SFP+", "KR/KX", "KR/KX/KX4"
4367 }; 4357 };
4368 4358
4369 char buf[80]; 4359 char buf[80];
4370 char *bufp = buf; 4360 char *bufp = buf;
4371 const char *spd = ""; 4361 const char *spd = "";
4372 const struct port_info *pi = netdev_priv(dev); 4362 const struct port_info *pi = netdev_priv(dev);
4373 const struct adapter *adap = pi->adapter; 4363 const struct adapter *adap = pi->adapter;
4374 4364
4375 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB) 4365 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
4376 spd = " 2.5 GT/s"; 4366 spd = " 2.5 GT/s";
4377 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB) 4367 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
4378 spd = " 5 GT/s"; 4368 spd = " 5 GT/s";
4379 4369
4380 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M) 4370 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
4381 bufp += sprintf(bufp, "100/"); 4371 bufp += sprintf(bufp, "100/");
4382 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G) 4372 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
4383 bufp += sprintf(bufp, "1000/"); 4373 bufp += sprintf(bufp, "1000/");
4384 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G) 4374 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
4385 bufp += sprintf(bufp, "10G/"); 4375 bufp += sprintf(bufp, "10G/");
4386 if (bufp != buf) 4376 if (bufp != buf)
4387 --bufp; 4377 --bufp;
4388 sprintf(bufp, "BASE-%s", base[pi->port_type]); 4378 sprintf(bufp, "BASE-%s", base[pi->port_type]);
4389 4379
4390 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n", 4380 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
4391 adap->params.vpd.id, adap->params.rev, buf, 4381 adap->params.vpd.id, adap->params.rev, buf,
4392 is_offload(adap) ? "R" : "", adap->params.pci.width, spd, 4382 is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
4393 (adap->flags & USING_MSIX) ? " MSI-X" : 4383 (adap->flags & USING_MSIX) ? " MSI-X" :
4394 (adap->flags & USING_MSI) ? " MSI" : ""); 4384 (adap->flags & USING_MSI) ? " MSI" : "");
4395 netdev_info(dev, "S/N: %s, E/C: %s\n", 4385 netdev_info(dev, "S/N: %s, E/C: %s\n",
4396 adap->params.vpd.sn, adap->params.vpd.ec); 4386 adap->params.vpd.sn, adap->params.vpd.ec);
4397 } 4387 }
4398 4388
4399 static void __devinit enable_pcie_relaxed_ordering(struct pci_dev *dev) 4389 static void __devinit enable_pcie_relaxed_ordering(struct pci_dev *dev)
4400 { 4390 {
4401 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN); 4391 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
4402 } 4392 }
4403 4393
4404 /* 4394 /*
4405 * Free the following resources: 4395 * Free the following resources:
4406 * - memory used for tables 4396 * - memory used for tables
4407 * - MSI/MSI-X 4397 * - MSI/MSI-X
4408 * - net devices 4398 * - net devices
4409 * - resources FW is holding for us 4399 * - resources FW is holding for us
4410 */ 4400 */
4411 static void free_some_resources(struct adapter *adapter) 4401 static void free_some_resources(struct adapter *adapter)
4412 { 4402 {
4413 unsigned int i; 4403 unsigned int i;
4414 4404
4415 t4_free_mem(adapter->l2t); 4405 t4_free_mem(adapter->l2t);
4416 t4_free_mem(adapter->tids.tid_tab); 4406 t4_free_mem(adapter->tids.tid_tab);
4417 disable_msi(adapter); 4407 disable_msi(adapter);
4418 4408
4419 for_each_port(adapter, i) 4409 for_each_port(adapter, i)
4420 if (adapter->port[i]) { 4410 if (adapter->port[i]) {
4421 kfree(adap2pinfo(adapter, i)->rss); 4411 kfree(adap2pinfo(adapter, i)->rss);
4422 free_netdev(adapter->port[i]); 4412 free_netdev(adapter->port[i]);
4423 } 4413 }
4424 if (adapter->flags & FW_OK) 4414 if (adapter->flags & FW_OK)
4425 t4_fw_bye(adapter, adapter->fn); 4415 t4_fw_bye(adapter, adapter->fn);
4426 } 4416 }
4427 4417
4428 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) 4418 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
4429 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \ 4419 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
4430 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA) 4420 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
4431 4421
4432 static int __devinit init_one(struct pci_dev *pdev, 4422 static int __devinit init_one(struct pci_dev *pdev,
4433 const struct pci_device_id *ent) 4423 const struct pci_device_id *ent)
4434 { 4424 {
4435 int func, i, err; 4425 int func, i, err;
4436 struct port_info *pi; 4426 struct port_info *pi;
4437 bool highdma = false; 4427 bool highdma = false;
4438 struct adapter *adapter = NULL; 4428 struct adapter *adapter = NULL;
4439 4429
4440 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION); 4430 printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
4441 4431
4442 err = pci_request_regions(pdev, KBUILD_MODNAME); 4432 err = pci_request_regions(pdev, KBUILD_MODNAME);
4443 if (err) { 4433 if (err) {
4444 /* Just info, some other driver may have claimed the device. */ 4434 /* Just info, some other driver may have claimed the device. */
4445 dev_info(&pdev->dev, "cannot obtain PCI resources\n"); 4435 dev_info(&pdev->dev, "cannot obtain PCI resources\n");
4446 return err; 4436 return err;
4447 } 4437 }
4448 4438
4449 /* We control everything through one PF */ 4439 /* We control everything through one PF */
4450 func = PCI_FUNC(pdev->devfn); 4440 func = PCI_FUNC(pdev->devfn);
4451 if (func != ent->driver_data) { 4441 if (func != ent->driver_data) {
4452 pci_save_state(pdev); /* to restore SR-IOV later */ 4442 pci_save_state(pdev); /* to restore SR-IOV later */
4453 goto sriov; 4443 goto sriov;
4454 } 4444 }
4455 4445
4456 err = pci_enable_device(pdev); 4446 err = pci_enable_device(pdev);
4457 if (err) { 4447 if (err) {
4458 dev_err(&pdev->dev, "cannot enable PCI device\n"); 4448 dev_err(&pdev->dev, "cannot enable PCI device\n");
4459 goto out_release_regions; 4449 goto out_release_regions;
4460 } 4450 }
4461 4451
4462 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 4452 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4463 highdma = true; 4453 highdma = true;
4464 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 4454 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4465 if (err) { 4455 if (err) {
4466 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for " 4456 dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
4467 "coherent allocations\n"); 4457 "coherent allocations\n");
4468 goto out_disable_device; 4458 goto out_disable_device;
4469 } 4459 }
4470 } else { 4460 } else {
4471 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 4461 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4472 if (err) { 4462 if (err) {
4473 dev_err(&pdev->dev, "no usable DMA configuration\n"); 4463 dev_err(&pdev->dev, "no usable DMA configuration\n");
4474 goto out_disable_device; 4464 goto out_disable_device;
4475 } 4465 }
4476 } 4466 }
4477 4467
4478 pci_enable_pcie_error_reporting(pdev); 4468 pci_enable_pcie_error_reporting(pdev);
4479 enable_pcie_relaxed_ordering(pdev); 4469 enable_pcie_relaxed_ordering(pdev);
4480 pci_set_master(pdev); 4470 pci_set_master(pdev);
4481 pci_save_state(pdev); 4471 pci_save_state(pdev);
4482 4472
4483 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); 4473 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
4484 if (!adapter) { 4474 if (!adapter) {
4485 err = -ENOMEM; 4475 err = -ENOMEM;
4486 goto out_disable_device; 4476 goto out_disable_device;
4487 } 4477 }
4488 4478
4489 adapter->regs = pci_ioremap_bar(pdev, 0); 4479 adapter->regs = pci_ioremap_bar(pdev, 0);
4490 if (!adapter->regs) { 4480 if (!adapter->regs) {
4491 dev_err(&pdev->dev, "cannot map device registers\n"); 4481 dev_err(&pdev->dev, "cannot map device registers\n");
4492 err = -ENOMEM; 4482 err = -ENOMEM;
4493 goto out_free_adapter; 4483 goto out_free_adapter;
4494 } 4484 }
4495 4485
4496 adapter->pdev = pdev; 4486 adapter->pdev = pdev;
4497 adapter->pdev_dev = &pdev->dev; 4487 adapter->pdev_dev = &pdev->dev;
4498 adapter->mbox = func; 4488 adapter->mbox = func;
4499 adapter->fn = func; 4489 adapter->fn = func;
4500 adapter->msg_enable = dflt_msg_enable; 4490 adapter->msg_enable = dflt_msg_enable;
4501 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map)); 4491 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
4502 4492
4503 spin_lock_init(&adapter->stats_lock); 4493 spin_lock_init(&adapter->stats_lock);
4504 spin_lock_init(&adapter->tid_release_lock); 4494 spin_lock_init(&adapter->tid_release_lock);
4505 4495
4506 INIT_WORK(&adapter->tid_release_task, process_tid_release_list); 4496 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
4507 INIT_WORK(&adapter->db_full_task, process_db_full); 4497 INIT_WORK(&adapter->db_full_task, process_db_full);
4508 INIT_WORK(&adapter->db_drop_task, process_db_drop); 4498 INIT_WORK(&adapter->db_drop_task, process_db_drop);
4509 4499
4510 err = t4_prep_adapter(adapter); 4500 err = t4_prep_adapter(adapter);
4511 if (err) 4501 if (err)
4512 goto out_unmap_bar; 4502 goto out_unmap_bar;
4513 setup_memwin(adapter); 4503 setup_memwin(adapter);
4514 err = adap_init0(adapter); 4504 err = adap_init0(adapter);
4515 setup_memwin_rdma(adapter); 4505 setup_memwin_rdma(adapter);
4516 if (err) 4506 if (err)
4517 goto out_unmap_bar; 4507 goto out_unmap_bar;
4518 4508
4519 for_each_port(adapter, i) { 4509 for_each_port(adapter, i) {
4520 struct net_device *netdev; 4510 struct net_device *netdev;
4521 4511
4522 netdev = alloc_etherdev_mq(sizeof(struct port_info), 4512 netdev = alloc_etherdev_mq(sizeof(struct port_info),
4523 MAX_ETH_QSETS); 4513 MAX_ETH_QSETS);
4524 if (!netdev) { 4514 if (!netdev) {
4525 err = -ENOMEM; 4515 err = -ENOMEM;
4526 goto out_free_dev; 4516 goto out_free_dev;
4527 } 4517 }
4528 4518
4529 SET_NETDEV_DEV(netdev, &pdev->dev); 4519 SET_NETDEV_DEV(netdev, &pdev->dev);
4530 4520
4531 adapter->port[i] = netdev; 4521 adapter->port[i] = netdev;
4532 pi = netdev_priv(netdev); 4522 pi = netdev_priv(netdev);
4533 pi->adapter = adapter; 4523 pi->adapter = adapter;
4534 pi->xact_addr_filt = -1; 4524 pi->xact_addr_filt = -1;
4535 pi->port_id = i; 4525 pi->port_id = i;
4536 netdev->irq = pdev->irq; 4526 netdev->irq = pdev->irq;
4537 4527
4538 netdev->hw_features = NETIF_F_SG | TSO_FLAGS | 4528 netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
4539 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 4529 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4540 NETIF_F_RXCSUM | NETIF_F_RXHASH | 4530 NETIF_F_RXCSUM | NETIF_F_RXHASH |
4541 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 4531 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
4542 if (highdma) 4532 if (highdma)
4543 netdev->hw_features |= NETIF_F_HIGHDMA; 4533 netdev->hw_features |= NETIF_F_HIGHDMA;
4544 netdev->features |= netdev->hw_features; 4534 netdev->features |= netdev->hw_features;
4545 netdev->vlan_features = netdev->features & VLAN_FEAT; 4535 netdev->vlan_features = netdev->features & VLAN_FEAT;
4546 4536
4547 netdev->priv_flags |= IFF_UNICAST_FLT; 4537 netdev->priv_flags |= IFF_UNICAST_FLT;
4548 4538
4549 netdev->netdev_ops = &cxgb4_netdev_ops; 4539 netdev->netdev_ops = &cxgb4_netdev_ops;
4550 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops); 4540 SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
4551 } 4541 }
4552 4542
4553 pci_set_drvdata(pdev, adapter); 4543 pci_set_drvdata(pdev, adapter);
4554 4544
4555 if (adapter->flags & FW_OK) { 4545 if (adapter->flags & FW_OK) {
4556 err = t4_port_init(adapter, func, func, 0); 4546 err = t4_port_init(adapter, func, func, 0);
4557 if (err) 4547 if (err)
4558 goto out_free_dev; 4548 goto out_free_dev;
4559 } 4549 }
4560 4550
4561 /* 4551 /*
4562 * Configure queues and allocate tables now, they can be needed as 4552 * Configure queues and allocate tables now, they can be needed as
4563 * soon as the first register_netdev completes. 4553 * soon as the first register_netdev completes.
4564 */ 4554 */
4565 cfg_queues(adapter); 4555 cfg_queues(adapter);
4566 4556
4567 adapter->l2t = t4_init_l2t(); 4557 adapter->l2t = t4_init_l2t();
4568 if (!adapter->l2t) { 4558 if (!adapter->l2t) {
4569 /* We tolerate a lack of L2T, giving up some functionality */ 4559 /* We tolerate a lack of L2T, giving up some functionality */
4570 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n"); 4560 dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
4571 adapter->params.offload = 0; 4561 adapter->params.offload = 0;
4572 } 4562 }
4573 4563
4574 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) { 4564 if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
4575 dev_warn(&pdev->dev, "could not allocate TID table, " 4565 dev_warn(&pdev->dev, "could not allocate TID table, "
4576 "continuing\n"); 4566 "continuing\n");
4577 adapter->params.offload = 0; 4567 adapter->params.offload = 0;
4578 } 4568 }
4579 4569
4580 /* See what interrupts we'll be using */ 4570 /* See what interrupts we'll be using */
4581 if (msi > 1 && enable_msix(adapter) == 0) 4571 if (msi > 1 && enable_msix(adapter) == 0)
4582 adapter->flags |= USING_MSIX; 4572 adapter->flags |= USING_MSIX;
4583 else if (msi > 0 && pci_enable_msi(pdev) == 0) 4573 else if (msi > 0 && pci_enable_msi(pdev) == 0)
4584 adapter->flags |= USING_MSI; 4574 adapter->flags |= USING_MSI;
4585 4575
4586 err = init_rss(adapter); 4576 err = init_rss(adapter);
4587 if (err) 4577 if (err)
4588 goto out_free_dev; 4578 goto out_free_dev;
4589 4579
4590 /* 4580 /*
4591 * The card is now ready to go. If any errors occur during device 4581 * The card is now ready to go. If any errors occur during device
4592 * registration we do not fail the whole card but rather proceed only 4582 * registration we do not fail the whole card but rather proceed only
4593 * with the ports we manage to register successfully. However we must 4583 * with the ports we manage to register successfully. However we must
4594 * register at least one net device. 4584 * register at least one net device.
4595 */ 4585 */
4596 for_each_port(adapter, i) { 4586 for_each_port(adapter, i) {
4597 pi = adap2pinfo(adapter, i); 4587 pi = adap2pinfo(adapter, i);
4598 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets); 4588 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
4599 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets); 4589 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
4600 4590
4601 err = register_netdev(adapter->port[i]); 4591 err = register_netdev(adapter->port[i]);
4602 if (err) 4592 if (err)
4603 break; 4593 break;
4604 adapter->chan_map[pi->tx_chan] = i; 4594 adapter->chan_map[pi->tx_chan] = i;
4605 print_port_info(adapter->port[i]); 4595 print_port_info(adapter->port[i]);
4606 } 4596 }
4607 if (i == 0) { 4597 if (i == 0) {
4608 dev_err(&pdev->dev, "could not register any net devices\n"); 4598 dev_err(&pdev->dev, "could not register any net devices\n");
4609 goto out_free_dev; 4599 goto out_free_dev;
4610 } 4600 }
4611 if (err) { 4601 if (err) {
4612 dev_warn(&pdev->dev, "only %d net devices registered\n", i); 4602 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
4613 err = 0; 4603 err = 0;
4614 } 4604 }
4615 4605
4616 if (cxgb4_debugfs_root) { 4606 if (cxgb4_debugfs_root) {
4617 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev), 4607 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
4618 cxgb4_debugfs_root); 4608 cxgb4_debugfs_root);
4619 setup_debugfs(adapter); 4609 setup_debugfs(adapter);
4620 } 4610 }
4621 4611
4622 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ 4612 /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
4623 pdev->needs_freset = 1; 4613 pdev->needs_freset = 1;
4624 4614
4625 if (is_offload(adapter)) 4615 if (is_offload(adapter))
4626 attach_ulds(adapter); 4616 attach_ulds(adapter);
4627 4617
4628 sriov: 4618 sriov:
4629 #ifdef CONFIG_PCI_IOV 4619 #ifdef CONFIG_PCI_IOV
4630 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0) 4620 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
4631 if (pci_enable_sriov(pdev, num_vf[func]) == 0) 4621 if (pci_enable_sriov(pdev, num_vf[func]) == 0)
4632 dev_info(&pdev->dev, 4622 dev_info(&pdev->dev,
4633 "instantiated %u virtual functions\n", 4623 "instantiated %u virtual functions\n",
4634 num_vf[func]); 4624 num_vf[func]);
4635 #endif 4625 #endif
4636 return 0; 4626 return 0;
4637 4627
4638 out_free_dev: 4628 out_free_dev:
4639 free_some_resources(adapter); 4629 free_some_resources(adapter);
4640 out_unmap_bar: 4630 out_unmap_bar:
4641 iounmap(adapter->regs); 4631 iounmap(adapter->regs);
4642 out_free_adapter: 4632 out_free_adapter:
4643 kfree(adapter); 4633 kfree(adapter);
4644 out_disable_device: 4634 out_disable_device:
4645 pci_disable_pcie_error_reporting(pdev); 4635 pci_disable_pcie_error_reporting(pdev);
4646 pci_disable_device(pdev); 4636 pci_disable_device(pdev);
4647 out_release_regions: 4637 out_release_regions:
4648 pci_release_regions(pdev); 4638 pci_release_regions(pdev);
4649 pci_set_drvdata(pdev, NULL); 4639 pci_set_drvdata(pdev, NULL);
4650 return err; 4640 return err;
4651 } 4641 }
4652 4642
4653 static void __devexit remove_one(struct pci_dev *pdev) 4643 static void __devexit remove_one(struct pci_dev *pdev)
4654 { 4644 {
4655 struct adapter *adapter = pci_get_drvdata(pdev); 4645 struct adapter *adapter = pci_get_drvdata(pdev);
4656 4646
4657 #ifdef CONFIG_PCI_IOV 4647 #ifdef CONFIG_PCI_IOV
4658 pci_disable_sriov(pdev); 4648 pci_disable_sriov(pdev);
4659 4649
4660 #endif 4650 #endif
4661 4651
4662 if (adapter) { 4652 if (adapter) {
4663 int i; 4653 int i;
4664 4654
4665 if (is_offload(adapter)) 4655 if (is_offload(adapter))
4666 detach_ulds(adapter); 4656 detach_ulds(adapter);
4667 4657
4668 for_each_port(adapter, i) 4658 for_each_port(adapter, i)
4669 if (adapter->port[i]->reg_state == NETREG_REGISTERED) 4659 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
4670 unregister_netdev(adapter->port[i]); 4660 unregister_netdev(adapter->port[i]);
4671 4661
4672 if (adapter->debugfs_root) 4662 if (adapter->debugfs_root)
4673 debugfs_remove_recursive(adapter->debugfs_root); 4663 debugfs_remove_recursive(adapter->debugfs_root);
4674 4664
4675 if (adapter->flags & FULL_INIT_DONE) 4665 if (adapter->flags & FULL_INIT_DONE)
4676 cxgb_down(adapter); 4666 cxgb_down(adapter);
4677 4667
4678 free_some_resources(adapter); 4668 free_some_resources(adapter);
4679 iounmap(adapter->regs); 4669 iounmap(adapter->regs);
4680 kfree(adapter); 4670 kfree(adapter);
4681 pci_disable_pcie_error_reporting(pdev); 4671 pci_disable_pcie_error_reporting(pdev);
4682 pci_disable_device(pdev); 4672 pci_disable_device(pdev);
4683 pci_release_regions(pdev); 4673 pci_release_regions(pdev);
4684 pci_set_drvdata(pdev, NULL); 4674 pci_set_drvdata(pdev, NULL);
4685 } else 4675 } else
4686 pci_release_regions(pdev); 4676 pci_release_regions(pdev);
4687 } 4677 }
4688 4678
4689 static struct pci_driver cxgb4_driver = { 4679 static struct pci_driver cxgb4_driver = {
4690 .name = KBUILD_MODNAME, 4680 .name = KBUILD_MODNAME,
4691 .id_table = cxgb4_pci_tbl, 4681 .id_table = cxgb4_pci_tbl,
4692 .probe = init_one, 4682 .probe = init_one,
4693 .remove = __devexit_p(remove_one), 4683 .remove = __devexit_p(remove_one),
4694 .err_handler = &cxgb4_eeh, 4684 .err_handler = &cxgb4_eeh,
4695 }; 4685 };
4696 4686
4697 static int __init cxgb4_init_module(void) 4687 static int __init cxgb4_init_module(void)
4698 { 4688 {
4699 int ret; 4689 int ret;
4700 4690
4701 workq = create_singlethread_workqueue("cxgb4"); 4691 workq = create_singlethread_workqueue("cxgb4");
4702 if (!workq) 4692 if (!workq)
4703 return -ENOMEM; 4693 return -ENOMEM;
4704 4694
4705 /* Debugfs support is optional, just warn if this fails */ 4695 /* Debugfs support is optional, just warn if this fails */
4706 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); 4696 cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
4707 if (!cxgb4_debugfs_root) 4697 if (!cxgb4_debugfs_root)
4708 pr_warning("could not create debugfs entry, continuing\n"); 4698 pr_warning("could not create debugfs entry, continuing\n");
4709 4699
4710 ret = pci_register_driver(&cxgb4_driver); 4700 ret = pci_register_driver(&cxgb4_driver);
4711 if (ret < 0) 4701 if (ret < 0)
4712 debugfs_remove(cxgb4_debugfs_root); 4702 debugfs_remove(cxgb4_debugfs_root);
4713 return ret; 4703 return ret;
4714 } 4704 }
4715 4705
4716 static void __exit cxgb4_cleanup_module(void) 4706 static void __exit cxgb4_cleanup_module(void)
4717 { 4707 {
4718 pci_unregister_driver(&cxgb4_driver); 4708 pci_unregister_driver(&cxgb4_driver);
4719 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */ 4709 debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
4720 flush_workqueue(workq); 4710 flush_workqueue(workq);
4721 destroy_workqueue(workq); 4711 destroy_workqueue(workq);
4722 } 4712 }
4723 4713
4724 module_init(cxgb4_init_module); 4714 module_init(cxgb4_init_module);
4725 module_exit(cxgb4_cleanup_module); 4715 module_exit(cxgb4_cleanup_module);
4726 4716