Commit d215697fe14a0c5a96765c6279b4751e632587a5
Committed by
David S. Miller
1 parent
d028023281
Exists in
master
and in
7 other branches
sfc: make functions static
Make local functions and variable static. Do some rearrangement of the string table stuff to put it where it gets used. Signed-off-by: Stephen Hemminger <shemminger@vyatta.com> Acked-by: Ben Hutchings <bhutchings@solarflare.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Showing 12 changed files with 26 additions and 41 deletions Inline Diff
drivers/net/sfc/efx.c
1 | /**************************************************************************** | 1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | 2 | * Driver for Solarflare Solarstorm network controllers and boards |
3 | * Copyright 2005-2006 Fen Systems Ltd. | 3 | * Copyright 2005-2006 Fen Systems Ltd. |
4 | * Copyright 2005-2009 Solarflare Communications Inc. | 4 | * Copyright 2005-2009 Solarflare Communications Inc. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License version 2 as published | 7 | * under the terms of the GNU General Public License version 2 as published |
8 | * by the Free Software Foundation, incorporated herein by reference. | 8 | * by the Free Software Foundation, incorporated herein by reference. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/pci.h> | 12 | #include <linux/pci.h> |
13 | #include <linux/netdevice.h> | 13 | #include <linux/netdevice.h> |
14 | #include <linux/etherdevice.h> | 14 | #include <linux/etherdevice.h> |
15 | #include <linux/delay.h> | 15 | #include <linux/delay.h> |
16 | #include <linux/notifier.h> | 16 | #include <linux/notifier.h> |
17 | #include <linux/ip.h> | 17 | #include <linux/ip.h> |
18 | #include <linux/tcp.h> | 18 | #include <linux/tcp.h> |
19 | #include <linux/in.h> | 19 | #include <linux/in.h> |
20 | #include <linux/crc32.h> | 20 | #include <linux/crc32.h> |
21 | #include <linux/ethtool.h> | 21 | #include <linux/ethtool.h> |
22 | #include <linux/topology.h> | 22 | #include <linux/topology.h> |
23 | #include <linux/gfp.h> | 23 | #include <linux/gfp.h> |
24 | #include "net_driver.h" | 24 | #include "net_driver.h" |
25 | #include "efx.h" | 25 | #include "efx.h" |
26 | #include "mdio_10g.h" | 26 | #include "mdio_10g.h" |
27 | #include "nic.h" | 27 | #include "nic.h" |
28 | 28 | ||
29 | #include "mcdi.h" | 29 | #include "mcdi.h" |
30 | #include "workarounds.h" | 30 | #include "workarounds.h" |
31 | 31 | ||
32 | /************************************************************************** | 32 | /************************************************************************** |
33 | * | 33 | * |
34 | * Type name strings | 34 | * Type name strings |
35 | * | 35 | * |
36 | ************************************************************************** | 36 | ************************************************************************** |
37 | */ | 37 | */ |
38 | 38 | ||
39 | /* Loopback mode names (see LOOPBACK_MODE()) */ | 39 | /* Loopback mode names (see LOOPBACK_MODE()) */ |
40 | const unsigned int efx_loopback_mode_max = LOOPBACK_MAX; | 40 | const unsigned int efx_loopback_mode_max = LOOPBACK_MAX; |
41 | const char *efx_loopback_mode_names[] = { | 41 | const char *efx_loopback_mode_names[] = { |
42 | [LOOPBACK_NONE] = "NONE", | 42 | [LOOPBACK_NONE] = "NONE", |
43 | [LOOPBACK_DATA] = "DATAPATH", | 43 | [LOOPBACK_DATA] = "DATAPATH", |
44 | [LOOPBACK_GMAC] = "GMAC", | 44 | [LOOPBACK_GMAC] = "GMAC", |
45 | [LOOPBACK_XGMII] = "XGMII", | 45 | [LOOPBACK_XGMII] = "XGMII", |
46 | [LOOPBACK_XGXS] = "XGXS", | 46 | [LOOPBACK_XGXS] = "XGXS", |
47 | [LOOPBACK_XAUI] = "XAUI", | 47 | [LOOPBACK_XAUI] = "XAUI", |
48 | [LOOPBACK_GMII] = "GMII", | 48 | [LOOPBACK_GMII] = "GMII", |
49 | [LOOPBACK_SGMII] = "SGMII", | 49 | [LOOPBACK_SGMII] = "SGMII", |
50 | [LOOPBACK_XGBR] = "XGBR", | 50 | [LOOPBACK_XGBR] = "XGBR", |
51 | [LOOPBACK_XFI] = "XFI", | 51 | [LOOPBACK_XFI] = "XFI", |
52 | [LOOPBACK_XAUI_FAR] = "XAUI_FAR", | 52 | [LOOPBACK_XAUI_FAR] = "XAUI_FAR", |
53 | [LOOPBACK_GMII_FAR] = "GMII_FAR", | 53 | [LOOPBACK_GMII_FAR] = "GMII_FAR", |
54 | [LOOPBACK_SGMII_FAR] = "SGMII_FAR", | 54 | [LOOPBACK_SGMII_FAR] = "SGMII_FAR", |
55 | [LOOPBACK_XFI_FAR] = "XFI_FAR", | 55 | [LOOPBACK_XFI_FAR] = "XFI_FAR", |
56 | [LOOPBACK_GPHY] = "GPHY", | 56 | [LOOPBACK_GPHY] = "GPHY", |
57 | [LOOPBACK_PHYXS] = "PHYXS", | 57 | [LOOPBACK_PHYXS] = "PHYXS", |
58 | [LOOPBACK_PCS] = "PCS", | 58 | [LOOPBACK_PCS] = "PCS", |
59 | [LOOPBACK_PMAPMD] = "PMA/PMD", | 59 | [LOOPBACK_PMAPMD] = "PMA/PMD", |
60 | [LOOPBACK_XPORT] = "XPORT", | 60 | [LOOPBACK_XPORT] = "XPORT", |
61 | [LOOPBACK_XGMII_WS] = "XGMII_WS", | 61 | [LOOPBACK_XGMII_WS] = "XGMII_WS", |
62 | [LOOPBACK_XAUI_WS] = "XAUI_WS", | 62 | [LOOPBACK_XAUI_WS] = "XAUI_WS", |
63 | [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR", | 63 | [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR", |
64 | [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR", | 64 | [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR", |
65 | [LOOPBACK_GMII_WS] = "GMII_WS", | 65 | [LOOPBACK_GMII_WS] = "GMII_WS", |
66 | [LOOPBACK_XFI_WS] = "XFI_WS", | 66 | [LOOPBACK_XFI_WS] = "XFI_WS", |
67 | [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR", | 67 | [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR", |
68 | [LOOPBACK_PHYXS_WS] = "PHYXS_WS", | 68 | [LOOPBACK_PHYXS_WS] = "PHYXS_WS", |
69 | }; | 69 | }; |
70 | 70 | ||
71 | /* Interrupt mode names (see INT_MODE())) */ | ||
72 | const unsigned int efx_interrupt_mode_max = EFX_INT_MODE_MAX; | ||
73 | const char *efx_interrupt_mode_names[] = { | ||
74 | [EFX_INT_MODE_MSIX] = "MSI-X", | ||
75 | [EFX_INT_MODE_MSI] = "MSI", | ||
76 | [EFX_INT_MODE_LEGACY] = "legacy", | ||
77 | }; | ||
78 | |||
79 | const unsigned int efx_reset_type_max = RESET_TYPE_MAX; | 71 | const unsigned int efx_reset_type_max = RESET_TYPE_MAX; |
80 | const char *efx_reset_type_names[] = { | 72 | const char *efx_reset_type_names[] = { |
81 | [RESET_TYPE_INVISIBLE] = "INVISIBLE", | 73 | [RESET_TYPE_INVISIBLE] = "INVISIBLE", |
82 | [RESET_TYPE_ALL] = "ALL", | 74 | [RESET_TYPE_ALL] = "ALL", |
83 | [RESET_TYPE_WORLD] = "WORLD", | 75 | [RESET_TYPE_WORLD] = "WORLD", |
84 | [RESET_TYPE_DISABLE] = "DISABLE", | 76 | [RESET_TYPE_DISABLE] = "DISABLE", |
85 | [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG", | 77 | [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG", |
86 | [RESET_TYPE_INT_ERROR] = "INT_ERROR", | 78 | [RESET_TYPE_INT_ERROR] = "INT_ERROR", |
87 | [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY", | 79 | [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY", |
88 | [RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH", | 80 | [RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH", |
89 | [RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH", | 81 | [RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH", |
90 | [RESET_TYPE_TX_SKIP] = "TX_SKIP", | 82 | [RESET_TYPE_TX_SKIP] = "TX_SKIP", |
91 | [RESET_TYPE_MC_FAILURE] = "MC_FAILURE", | 83 | [RESET_TYPE_MC_FAILURE] = "MC_FAILURE", |
92 | }; | 84 | }; |
93 | 85 | ||
94 | #define EFX_MAX_MTU (9 * 1024) | 86 | #define EFX_MAX_MTU (9 * 1024) |
95 | 87 | ||
96 | /* Reset workqueue. If any NIC has a hardware failure then a reset will be | 88 | /* Reset workqueue. If any NIC has a hardware failure then a reset will be |
97 | * queued onto this work queue. This is not a per-nic work queue, because | 89 | * queued onto this work queue. This is not a per-nic work queue, because |
98 | * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised. | 90 | * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised. |
99 | */ | 91 | */ |
100 | static struct workqueue_struct *reset_workqueue; | 92 | static struct workqueue_struct *reset_workqueue; |
101 | 93 | ||
102 | /************************************************************************** | 94 | /************************************************************************** |
103 | * | 95 | * |
104 | * Configurable values | 96 | * Configurable values |
105 | * | 97 | * |
106 | *************************************************************************/ | 98 | *************************************************************************/ |
107 | 99 | ||
108 | /* | 100 | /* |
109 | * Use separate channels for TX and RX events | 101 | * Use separate channels for TX and RX events |
110 | * | 102 | * |
111 | * Set this to 1 to use separate channels for TX and RX. It allows us | 103 | * Set this to 1 to use separate channels for TX and RX. It allows us |
112 | * to control interrupt affinity separately for TX and RX. | 104 | * to control interrupt affinity separately for TX and RX. |
113 | * | 105 | * |
114 | * This is only used in MSI-X interrupt mode | 106 | * This is only used in MSI-X interrupt mode |
115 | */ | 107 | */ |
116 | static unsigned int separate_tx_channels; | 108 | static unsigned int separate_tx_channels; |
117 | module_param(separate_tx_channels, uint, 0444); | 109 | module_param(separate_tx_channels, uint, 0444); |
118 | MODULE_PARM_DESC(separate_tx_channels, | 110 | MODULE_PARM_DESC(separate_tx_channels, |
119 | "Use separate channels for TX and RX"); | 111 | "Use separate channels for TX and RX"); |
120 | 112 | ||
121 | /* This is the weight assigned to each of the (per-channel) virtual | 113 | /* This is the weight assigned to each of the (per-channel) virtual |
122 | * NAPI devices. | 114 | * NAPI devices. |
123 | */ | 115 | */ |
124 | static int napi_weight = 64; | 116 | static int napi_weight = 64; |
125 | 117 | ||
126 | /* This is the time (in jiffies) between invocations of the hardware | 118 | /* This is the time (in jiffies) between invocations of the hardware |
127 | * monitor. On Falcon-based NICs, this will: | 119 | * monitor. On Falcon-based NICs, this will: |
128 | * - Check the on-board hardware monitor; | 120 | * - Check the on-board hardware monitor; |
129 | * - Poll the link state and reconfigure the hardware as necessary. | 121 | * - Poll the link state and reconfigure the hardware as necessary. |
130 | */ | 122 | */ |
131 | unsigned int efx_monitor_interval = 1 * HZ; | 123 | static unsigned int efx_monitor_interval = 1 * HZ; |
132 | 124 | ||
133 | /* This controls whether or not the driver will initialise devices | 125 | /* This controls whether or not the driver will initialise devices |
134 | * with invalid MAC addresses stored in the EEPROM or flash. If true, | 126 | * with invalid MAC addresses stored in the EEPROM or flash. If true, |
135 | * such devices will be initialised with a random locally-generated | 127 | * such devices will be initialised with a random locally-generated |
136 | * MAC address. This allows for loading the sfc_mtd driver to | 128 | * MAC address. This allows for loading the sfc_mtd driver to |
137 | * reprogram the flash, even if the flash contents (including the MAC | 129 | * reprogram the flash, even if the flash contents (including the MAC |
138 | * address) have previously been erased. | 130 | * address) have previously been erased. |
139 | */ | 131 | */ |
140 | static unsigned int allow_bad_hwaddr; | 132 | static unsigned int allow_bad_hwaddr; |
141 | 133 | ||
142 | /* Initial interrupt moderation settings. They can be modified after | 134 | /* Initial interrupt moderation settings. They can be modified after |
143 | * module load with ethtool. | 135 | * module load with ethtool. |
144 | * | 136 | * |
145 | * The default for RX should strike a balance between increasing the | 137 | * The default for RX should strike a balance between increasing the |
146 | * round-trip latency and reducing overhead. | 138 | * round-trip latency and reducing overhead. |
147 | */ | 139 | */ |
148 | static unsigned int rx_irq_mod_usec = 60; | 140 | static unsigned int rx_irq_mod_usec = 60; |
149 | 141 | ||
150 | /* Initial interrupt moderation settings. They can be modified after | 142 | /* Initial interrupt moderation settings. They can be modified after |
151 | * module load with ethtool. | 143 | * module load with ethtool. |
152 | * | 144 | * |
153 | * This default is chosen to ensure that a 10G link does not go idle | 145 | * This default is chosen to ensure that a 10G link does not go idle |
154 | * while a TX queue is stopped after it has become full. A queue is | 146 | * while a TX queue is stopped after it has become full. A queue is |
155 | * restarted when it drops below half full. The time this takes (assuming | 147 | * restarted when it drops below half full. The time this takes (assuming |
156 | * worst case 3 descriptors per packet and 1024 descriptors) is | 148 | * worst case 3 descriptors per packet and 1024 descriptors) is |
157 | * 512 / 3 * 1.2 = 205 usec. | 149 | * 512 / 3 * 1.2 = 205 usec. |
158 | */ | 150 | */ |
159 | static unsigned int tx_irq_mod_usec = 150; | 151 | static unsigned int tx_irq_mod_usec = 150; |
160 | 152 | ||
161 | /* This is the first interrupt mode to try out of: | 153 | /* This is the first interrupt mode to try out of: |
162 | * 0 => MSI-X | 154 | * 0 => MSI-X |
163 | * 1 => MSI | 155 | * 1 => MSI |
164 | * 2 => legacy | 156 | * 2 => legacy |
165 | */ | 157 | */ |
166 | static unsigned int interrupt_mode; | 158 | static unsigned int interrupt_mode; |
167 | 159 | ||
168 | /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS), | 160 | /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS), |
169 | * i.e. the number of CPUs among which we may distribute simultaneous | 161 | * i.e. the number of CPUs among which we may distribute simultaneous |
170 | * interrupt handling. | 162 | * interrupt handling. |
171 | * | 163 | * |
172 | * Cards without MSI-X will only target one CPU via legacy or MSI interrupt. | 164 | * Cards without MSI-X will only target one CPU via legacy or MSI interrupt. |
173 | * The default (0) means to assign an interrupt to each package (level II cache) | 165 | * The default (0) means to assign an interrupt to each package (level II cache) |
174 | */ | 166 | */ |
175 | static unsigned int rss_cpus; | 167 | static unsigned int rss_cpus; |
176 | module_param(rss_cpus, uint, 0444); | 168 | module_param(rss_cpus, uint, 0444); |
177 | MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling"); | 169 | MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling"); |
178 | 170 | ||
179 | static int phy_flash_cfg; | 171 | static int phy_flash_cfg; |
180 | module_param(phy_flash_cfg, int, 0644); | 172 | module_param(phy_flash_cfg, int, 0644); |
181 | MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially"); | 173 | MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially"); |
182 | 174 | ||
183 | static unsigned irq_adapt_low_thresh = 10000; | 175 | static unsigned irq_adapt_low_thresh = 10000; |
184 | module_param(irq_adapt_low_thresh, uint, 0644); | 176 | module_param(irq_adapt_low_thresh, uint, 0644); |
185 | MODULE_PARM_DESC(irq_adapt_low_thresh, | 177 | MODULE_PARM_DESC(irq_adapt_low_thresh, |
186 | "Threshold score for reducing IRQ moderation"); | 178 | "Threshold score for reducing IRQ moderation"); |
187 | 179 | ||
188 | static unsigned irq_adapt_high_thresh = 20000; | 180 | static unsigned irq_adapt_high_thresh = 20000; |
189 | module_param(irq_adapt_high_thresh, uint, 0644); | 181 | module_param(irq_adapt_high_thresh, uint, 0644); |
190 | MODULE_PARM_DESC(irq_adapt_high_thresh, | 182 | MODULE_PARM_DESC(irq_adapt_high_thresh, |
191 | "Threshold score for increasing IRQ moderation"); | 183 | "Threshold score for increasing IRQ moderation"); |
192 | 184 | ||
193 | static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE | | 185 | static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE | |
194 | NETIF_MSG_LINK | NETIF_MSG_IFDOWN | | 186 | NETIF_MSG_LINK | NETIF_MSG_IFDOWN | |
195 | NETIF_MSG_IFUP | NETIF_MSG_RX_ERR | | 187 | NETIF_MSG_IFUP | NETIF_MSG_RX_ERR | |
196 | NETIF_MSG_TX_ERR | NETIF_MSG_HW); | 188 | NETIF_MSG_TX_ERR | NETIF_MSG_HW); |
197 | module_param(debug, uint, 0); | 189 | module_param(debug, uint, 0); |
198 | MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value"); | 190 | MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value"); |
199 | 191 | ||
200 | /************************************************************************** | 192 | /************************************************************************** |
201 | * | 193 | * |
202 | * Utility functions and prototypes | 194 | * Utility functions and prototypes |
203 | * | 195 | * |
204 | *************************************************************************/ | 196 | *************************************************************************/ |
205 | 197 | ||
206 | static void efx_remove_channels(struct efx_nic *efx); | 198 | static void efx_remove_channels(struct efx_nic *efx); |
207 | static void efx_remove_port(struct efx_nic *efx); | 199 | static void efx_remove_port(struct efx_nic *efx); |
208 | static void efx_fini_napi(struct efx_nic *efx); | 200 | static void efx_fini_napi(struct efx_nic *efx); |
209 | static void efx_fini_struct(struct efx_nic *efx); | 201 | static void efx_fini_struct(struct efx_nic *efx); |
210 | static void efx_start_all(struct efx_nic *efx); | 202 | static void efx_start_all(struct efx_nic *efx); |
211 | static void efx_stop_all(struct efx_nic *efx); | 203 | static void efx_stop_all(struct efx_nic *efx); |
212 | 204 | ||
213 | #define EFX_ASSERT_RESET_SERIALISED(efx) \ | 205 | #define EFX_ASSERT_RESET_SERIALISED(efx) \ |
214 | do { \ | 206 | do { \ |
215 | if ((efx->state == STATE_RUNNING) || \ | 207 | if ((efx->state == STATE_RUNNING) || \ |
216 | (efx->state == STATE_DISABLED)) \ | 208 | (efx->state == STATE_DISABLED)) \ |
217 | ASSERT_RTNL(); \ | 209 | ASSERT_RTNL(); \ |
218 | } while (0) | 210 | } while (0) |
219 | 211 | ||
220 | /************************************************************************** | 212 | /************************************************************************** |
221 | * | 213 | * |
222 | * Event queue processing | 214 | * Event queue processing |
223 | * | 215 | * |
224 | *************************************************************************/ | 216 | *************************************************************************/ |
225 | 217 | ||
226 | /* Process channel's event queue | 218 | /* Process channel's event queue |
227 | * | 219 | * |
228 | * This function is responsible for processing the event queue of a | 220 | * This function is responsible for processing the event queue of a |
229 | * single channel. The caller must guarantee that this function will | 221 | * single channel. The caller must guarantee that this function will |
230 | * never be concurrently called more than once on the same channel, | 222 | * never be concurrently called more than once on the same channel, |
231 | * though different channels may be being processed concurrently. | 223 | * though different channels may be being processed concurrently. |
232 | */ | 224 | */ |
233 | static int efx_process_channel(struct efx_channel *channel, int budget) | 225 | static int efx_process_channel(struct efx_channel *channel, int budget) |
234 | { | 226 | { |
235 | struct efx_nic *efx = channel->efx; | 227 | struct efx_nic *efx = channel->efx; |
236 | int spent; | 228 | int spent; |
237 | 229 | ||
238 | if (unlikely(efx->reset_pending != RESET_TYPE_NONE || | 230 | if (unlikely(efx->reset_pending != RESET_TYPE_NONE || |
239 | !channel->enabled)) | 231 | !channel->enabled)) |
240 | return 0; | 232 | return 0; |
241 | 233 | ||
242 | spent = efx_nic_process_eventq(channel, budget); | 234 | spent = efx_nic_process_eventq(channel, budget); |
243 | if (spent == 0) | 235 | if (spent == 0) |
244 | return 0; | 236 | return 0; |
245 | 237 | ||
246 | /* Deliver last RX packet. */ | 238 | /* Deliver last RX packet. */ |
247 | if (channel->rx_pkt) { | 239 | if (channel->rx_pkt) { |
248 | __efx_rx_packet(channel, channel->rx_pkt, | 240 | __efx_rx_packet(channel, channel->rx_pkt, |
249 | channel->rx_pkt_csummed); | 241 | channel->rx_pkt_csummed); |
250 | channel->rx_pkt = NULL; | 242 | channel->rx_pkt = NULL; |
251 | } | 243 | } |
252 | 244 | ||
253 | efx_rx_strategy(channel); | 245 | efx_rx_strategy(channel); |
254 | 246 | ||
255 | efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel)); | 247 | efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel)); |
256 | 248 | ||
257 | return spent; | 249 | return spent; |
258 | } | 250 | } |
259 | 251 | ||
260 | /* Mark channel as finished processing | 252 | /* Mark channel as finished processing |
261 | * | 253 | * |
262 | * Note that since we will not receive further interrupts for this | 254 | * Note that since we will not receive further interrupts for this |
263 | * channel before we finish processing and call the eventq_read_ack() | 255 | * channel before we finish processing and call the eventq_read_ack() |
264 | * method, there is no need to use the interrupt hold-off timers. | 256 | * method, there is no need to use the interrupt hold-off timers. |
265 | */ | 257 | */ |
266 | static inline void efx_channel_processed(struct efx_channel *channel) | 258 | static inline void efx_channel_processed(struct efx_channel *channel) |
267 | { | 259 | { |
268 | /* The interrupt handler for this channel may set work_pending | 260 | /* The interrupt handler for this channel may set work_pending |
269 | * as soon as we acknowledge the events we've seen. Make sure | 261 | * as soon as we acknowledge the events we've seen. Make sure |
270 | * it's cleared before then. */ | 262 | * it's cleared before then. */ |
271 | channel->work_pending = false; | 263 | channel->work_pending = false; |
272 | smp_wmb(); | 264 | smp_wmb(); |
273 | 265 | ||
274 | efx_nic_eventq_read_ack(channel); | 266 | efx_nic_eventq_read_ack(channel); |
275 | } | 267 | } |
276 | 268 | ||
277 | /* NAPI poll handler | 269 | /* NAPI poll handler |
278 | * | 270 | * |
279 | * NAPI guarantees serialisation of polls of the same device, which | 271 | * NAPI guarantees serialisation of polls of the same device, which |
280 | * provides the guarantee required by efx_process_channel(). | 272 | * provides the guarantee required by efx_process_channel(). |
281 | */ | 273 | */ |
282 | static int efx_poll(struct napi_struct *napi, int budget) | 274 | static int efx_poll(struct napi_struct *napi, int budget) |
283 | { | 275 | { |
284 | struct efx_channel *channel = | 276 | struct efx_channel *channel = |
285 | container_of(napi, struct efx_channel, napi_str); | 277 | container_of(napi, struct efx_channel, napi_str); |
286 | struct efx_nic *efx = channel->efx; | 278 | struct efx_nic *efx = channel->efx; |
287 | int spent; | 279 | int spent; |
288 | 280 | ||
289 | netif_vdbg(efx, intr, efx->net_dev, | 281 | netif_vdbg(efx, intr, efx->net_dev, |
290 | "channel %d NAPI poll executing on CPU %d\n", | 282 | "channel %d NAPI poll executing on CPU %d\n", |
291 | channel->channel, raw_smp_processor_id()); | 283 | channel->channel, raw_smp_processor_id()); |
292 | 284 | ||
293 | spent = efx_process_channel(channel, budget); | 285 | spent = efx_process_channel(channel, budget); |
294 | 286 | ||
295 | if (spent < budget) { | 287 | if (spent < budget) { |
296 | if (channel->channel < efx->n_rx_channels && | 288 | if (channel->channel < efx->n_rx_channels && |
297 | efx->irq_rx_adaptive && | 289 | efx->irq_rx_adaptive && |
298 | unlikely(++channel->irq_count == 1000)) { | 290 | unlikely(++channel->irq_count == 1000)) { |
299 | if (unlikely(channel->irq_mod_score < | 291 | if (unlikely(channel->irq_mod_score < |
300 | irq_adapt_low_thresh)) { | 292 | irq_adapt_low_thresh)) { |
301 | if (channel->irq_moderation > 1) { | 293 | if (channel->irq_moderation > 1) { |
302 | channel->irq_moderation -= 1; | 294 | channel->irq_moderation -= 1; |
303 | efx->type->push_irq_moderation(channel); | 295 | efx->type->push_irq_moderation(channel); |
304 | } | 296 | } |
305 | } else if (unlikely(channel->irq_mod_score > | 297 | } else if (unlikely(channel->irq_mod_score > |
306 | irq_adapt_high_thresh)) { | 298 | irq_adapt_high_thresh)) { |
307 | if (channel->irq_moderation < | 299 | if (channel->irq_moderation < |
308 | efx->irq_rx_moderation) { | 300 | efx->irq_rx_moderation) { |
309 | channel->irq_moderation += 1; | 301 | channel->irq_moderation += 1; |
310 | efx->type->push_irq_moderation(channel); | 302 | efx->type->push_irq_moderation(channel); |
311 | } | 303 | } |
312 | } | 304 | } |
313 | channel->irq_count = 0; | 305 | channel->irq_count = 0; |
314 | channel->irq_mod_score = 0; | 306 | channel->irq_mod_score = 0; |
315 | } | 307 | } |
316 | 308 | ||
317 | /* There is no race here; although napi_disable() will | 309 | /* There is no race here; although napi_disable() will |
318 | * only wait for napi_complete(), this isn't a problem | 310 | * only wait for napi_complete(), this isn't a problem |
319 | * since efx_channel_processed() will have no effect if | 311 | * since efx_channel_processed() will have no effect if |
320 | * interrupts have already been disabled. | 312 | * interrupts have already been disabled. |
321 | */ | 313 | */ |
322 | napi_complete(napi); | 314 | napi_complete(napi); |
323 | efx_channel_processed(channel); | 315 | efx_channel_processed(channel); |
324 | } | 316 | } |
325 | 317 | ||
326 | return spent; | 318 | return spent; |
327 | } | 319 | } |
328 | 320 | ||
329 | /* Process the eventq of the specified channel immediately on this CPU | 321 | /* Process the eventq of the specified channel immediately on this CPU |
330 | * | 322 | * |
331 | * Disable hardware generated interrupts, wait for any existing | 323 | * Disable hardware generated interrupts, wait for any existing |
332 | * processing to finish, then directly poll (and ack ) the eventq. | 324 | * processing to finish, then directly poll (and ack ) the eventq. |
333 | * Finally reenable NAPI and interrupts. | 325 | * Finally reenable NAPI and interrupts. |
334 | * | 326 | * |
335 | * Since we are touching interrupts the caller should hold the suspend lock | 327 | * Since we are touching interrupts the caller should hold the suspend lock |
336 | */ | 328 | */ |
337 | void efx_process_channel_now(struct efx_channel *channel) | 329 | void efx_process_channel_now(struct efx_channel *channel) |
338 | { | 330 | { |
339 | struct efx_nic *efx = channel->efx; | 331 | struct efx_nic *efx = channel->efx; |
340 | 332 | ||
341 | BUG_ON(channel->channel >= efx->n_channels); | 333 | BUG_ON(channel->channel >= efx->n_channels); |
342 | BUG_ON(!channel->enabled); | 334 | BUG_ON(!channel->enabled); |
343 | 335 | ||
344 | /* Disable interrupts and wait for ISRs to complete */ | 336 | /* Disable interrupts and wait for ISRs to complete */ |
345 | efx_nic_disable_interrupts(efx); | 337 | efx_nic_disable_interrupts(efx); |
346 | if (efx->legacy_irq) | 338 | if (efx->legacy_irq) |
347 | synchronize_irq(efx->legacy_irq); | 339 | synchronize_irq(efx->legacy_irq); |
348 | if (channel->irq) | 340 | if (channel->irq) |
349 | synchronize_irq(channel->irq); | 341 | synchronize_irq(channel->irq); |
350 | 342 | ||
351 | /* Wait for any NAPI processing to complete */ | 343 | /* Wait for any NAPI processing to complete */ |
352 | napi_disable(&channel->napi_str); | 344 | napi_disable(&channel->napi_str); |
353 | 345 | ||
354 | /* Poll the channel */ | 346 | /* Poll the channel */ |
355 | efx_process_channel(channel, channel->eventq_mask + 1); | 347 | efx_process_channel(channel, channel->eventq_mask + 1); |
356 | 348 | ||
357 | /* Ack the eventq. This may cause an interrupt to be generated | 349 | /* Ack the eventq. This may cause an interrupt to be generated |
358 | * when they are reenabled */ | 350 | * when they are reenabled */ |
359 | efx_channel_processed(channel); | 351 | efx_channel_processed(channel); |
360 | 352 | ||
361 | napi_enable(&channel->napi_str); | 353 | napi_enable(&channel->napi_str); |
362 | efx_nic_enable_interrupts(efx); | 354 | efx_nic_enable_interrupts(efx); |
363 | } | 355 | } |
364 | 356 | ||
365 | /* Create event queue | 357 | /* Create event queue |
366 | * Event queue memory allocations are done only once. If the channel | 358 | * Event queue memory allocations are done only once. If the channel |
367 | * is reset, the memory buffer will be reused; this guards against | 359 | * is reset, the memory buffer will be reused; this guards against |
368 | * errors during channel reset and also simplifies interrupt handling. | 360 | * errors during channel reset and also simplifies interrupt handling. |
369 | */ | 361 | */ |
370 | static int efx_probe_eventq(struct efx_channel *channel) | 362 | static int efx_probe_eventq(struct efx_channel *channel) |
371 | { | 363 | { |
372 | struct efx_nic *efx = channel->efx; | 364 | struct efx_nic *efx = channel->efx; |
373 | unsigned long entries; | 365 | unsigned long entries; |
374 | 366 | ||
375 | netif_dbg(channel->efx, probe, channel->efx->net_dev, | 367 | netif_dbg(channel->efx, probe, channel->efx->net_dev, |
376 | "chan %d create event queue\n", channel->channel); | 368 | "chan %d create event queue\n", channel->channel); |
377 | 369 | ||
378 | /* Build an event queue with room for one event per tx and rx buffer, | 370 | /* Build an event queue with room for one event per tx and rx buffer, |
379 | * plus some extra for link state events and MCDI completions. */ | 371 | * plus some extra for link state events and MCDI completions. */ |
380 | entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128); | 372 | entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128); |
381 | EFX_BUG_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE); | 373 | EFX_BUG_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE); |
382 | channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1; | 374 | channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1; |
383 | 375 | ||
384 | return efx_nic_probe_eventq(channel); | 376 | return efx_nic_probe_eventq(channel); |
385 | } | 377 | } |
386 | 378 | ||
387 | /* Prepare channel's event queue */ | 379 | /* Prepare channel's event queue */ |
388 | static void efx_init_eventq(struct efx_channel *channel) | 380 | static void efx_init_eventq(struct efx_channel *channel) |
389 | { | 381 | { |
390 | netif_dbg(channel->efx, drv, channel->efx->net_dev, | 382 | netif_dbg(channel->efx, drv, channel->efx->net_dev, |
391 | "chan %d init event queue\n", channel->channel); | 383 | "chan %d init event queue\n", channel->channel); |
392 | 384 | ||
393 | channel->eventq_read_ptr = 0; | 385 | channel->eventq_read_ptr = 0; |
394 | 386 | ||
395 | efx_nic_init_eventq(channel); | 387 | efx_nic_init_eventq(channel); |
396 | } | 388 | } |
397 | 389 | ||
398 | static void efx_fini_eventq(struct efx_channel *channel) | 390 | static void efx_fini_eventq(struct efx_channel *channel) |
399 | { | 391 | { |
400 | netif_dbg(channel->efx, drv, channel->efx->net_dev, | 392 | netif_dbg(channel->efx, drv, channel->efx->net_dev, |
401 | "chan %d fini event queue\n", channel->channel); | 393 | "chan %d fini event queue\n", channel->channel); |
402 | 394 | ||
403 | efx_nic_fini_eventq(channel); | 395 | efx_nic_fini_eventq(channel); |
404 | } | 396 | } |
405 | 397 | ||
406 | static void efx_remove_eventq(struct efx_channel *channel) | 398 | static void efx_remove_eventq(struct efx_channel *channel) |
407 | { | 399 | { |
408 | netif_dbg(channel->efx, drv, channel->efx->net_dev, | 400 | netif_dbg(channel->efx, drv, channel->efx->net_dev, |
409 | "chan %d remove event queue\n", channel->channel); | 401 | "chan %d remove event queue\n", channel->channel); |
410 | 402 | ||
411 | efx_nic_remove_eventq(channel); | 403 | efx_nic_remove_eventq(channel); |
412 | } | 404 | } |
413 | 405 | ||
414 | /************************************************************************** | 406 | /************************************************************************** |
415 | * | 407 | * |
416 | * Channel handling | 408 | * Channel handling |
417 | * | 409 | * |
418 | *************************************************************************/ | 410 | *************************************************************************/ |
419 | 411 | ||
420 | /* Allocate and initialise a channel structure, optionally copying | 412 | /* Allocate and initialise a channel structure, optionally copying |
421 | * parameters (but not resources) from an old channel structure. */ | 413 | * parameters (but not resources) from an old channel structure. */ |
422 | static struct efx_channel * | 414 | static struct efx_channel * |
423 | efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel) | 415 | efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel) |
424 | { | 416 | { |
425 | struct efx_channel *channel; | 417 | struct efx_channel *channel; |
426 | struct efx_rx_queue *rx_queue; | 418 | struct efx_rx_queue *rx_queue; |
427 | struct efx_tx_queue *tx_queue; | 419 | struct efx_tx_queue *tx_queue; |
428 | int j; | 420 | int j; |
429 | 421 | ||
430 | if (old_channel) { | 422 | if (old_channel) { |
431 | channel = kmalloc(sizeof(*channel), GFP_KERNEL); | 423 | channel = kmalloc(sizeof(*channel), GFP_KERNEL); |
432 | if (!channel) | 424 | if (!channel) |
433 | return NULL; | 425 | return NULL; |
434 | 426 | ||
435 | *channel = *old_channel; | 427 | *channel = *old_channel; |
436 | 428 | ||
437 | memset(&channel->eventq, 0, sizeof(channel->eventq)); | 429 | memset(&channel->eventq, 0, sizeof(channel->eventq)); |
438 | 430 | ||
439 | rx_queue = &channel->rx_queue; | 431 | rx_queue = &channel->rx_queue; |
440 | rx_queue->buffer = NULL; | 432 | rx_queue->buffer = NULL; |
441 | memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd)); | 433 | memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd)); |
442 | 434 | ||
443 | for (j = 0; j < EFX_TXQ_TYPES; j++) { | 435 | for (j = 0; j < EFX_TXQ_TYPES; j++) { |
444 | tx_queue = &channel->tx_queue[j]; | 436 | tx_queue = &channel->tx_queue[j]; |
445 | if (tx_queue->channel) | 437 | if (tx_queue->channel) |
446 | tx_queue->channel = channel; | 438 | tx_queue->channel = channel; |
447 | tx_queue->buffer = NULL; | 439 | tx_queue->buffer = NULL; |
448 | memset(&tx_queue->txd, 0, sizeof(tx_queue->txd)); | 440 | memset(&tx_queue->txd, 0, sizeof(tx_queue->txd)); |
449 | } | 441 | } |
450 | } else { | 442 | } else { |
451 | channel = kzalloc(sizeof(*channel), GFP_KERNEL); | 443 | channel = kzalloc(sizeof(*channel), GFP_KERNEL); |
452 | if (!channel) | 444 | if (!channel) |
453 | return NULL; | 445 | return NULL; |
454 | 446 | ||
455 | channel->efx = efx; | 447 | channel->efx = efx; |
456 | channel->channel = i; | 448 | channel->channel = i; |
457 | 449 | ||
458 | for (j = 0; j < EFX_TXQ_TYPES; j++) { | 450 | for (j = 0; j < EFX_TXQ_TYPES; j++) { |
459 | tx_queue = &channel->tx_queue[j]; | 451 | tx_queue = &channel->tx_queue[j]; |
460 | tx_queue->efx = efx; | 452 | tx_queue->efx = efx; |
461 | tx_queue->queue = i * EFX_TXQ_TYPES + j; | 453 | tx_queue->queue = i * EFX_TXQ_TYPES + j; |
462 | tx_queue->channel = channel; | 454 | tx_queue->channel = channel; |
463 | } | 455 | } |
464 | } | 456 | } |
465 | 457 | ||
466 | spin_lock_init(&channel->tx_stop_lock); | 458 | spin_lock_init(&channel->tx_stop_lock); |
467 | atomic_set(&channel->tx_stop_count, 1); | 459 | atomic_set(&channel->tx_stop_count, 1); |
468 | 460 | ||
469 | rx_queue = &channel->rx_queue; | 461 | rx_queue = &channel->rx_queue; |
470 | rx_queue->efx = efx; | 462 | rx_queue->efx = efx; |
471 | setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill, | 463 | setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill, |
472 | (unsigned long)rx_queue); | 464 | (unsigned long)rx_queue); |
473 | 465 | ||
474 | return channel; | 466 | return channel; |
475 | } | 467 | } |
476 | 468 | ||
477 | static int efx_probe_channel(struct efx_channel *channel) | 469 | static int efx_probe_channel(struct efx_channel *channel) |
478 | { | 470 | { |
479 | struct efx_tx_queue *tx_queue; | 471 | struct efx_tx_queue *tx_queue; |
480 | struct efx_rx_queue *rx_queue; | 472 | struct efx_rx_queue *rx_queue; |
481 | int rc; | 473 | int rc; |
482 | 474 | ||
483 | netif_dbg(channel->efx, probe, channel->efx->net_dev, | 475 | netif_dbg(channel->efx, probe, channel->efx->net_dev, |
484 | "creating channel %d\n", channel->channel); | 476 | "creating channel %d\n", channel->channel); |
485 | 477 | ||
486 | rc = efx_probe_eventq(channel); | 478 | rc = efx_probe_eventq(channel); |
487 | if (rc) | 479 | if (rc) |
488 | goto fail1; | 480 | goto fail1; |
489 | 481 | ||
490 | efx_for_each_channel_tx_queue(tx_queue, channel) { | 482 | efx_for_each_channel_tx_queue(tx_queue, channel) { |
491 | rc = efx_probe_tx_queue(tx_queue); | 483 | rc = efx_probe_tx_queue(tx_queue); |
492 | if (rc) | 484 | if (rc) |
493 | goto fail2; | 485 | goto fail2; |
494 | } | 486 | } |
495 | 487 | ||
496 | efx_for_each_channel_rx_queue(rx_queue, channel) { | 488 | efx_for_each_channel_rx_queue(rx_queue, channel) { |
497 | rc = efx_probe_rx_queue(rx_queue); | 489 | rc = efx_probe_rx_queue(rx_queue); |
498 | if (rc) | 490 | if (rc) |
499 | goto fail3; | 491 | goto fail3; |
500 | } | 492 | } |
501 | 493 | ||
502 | channel->n_rx_frm_trunc = 0; | 494 | channel->n_rx_frm_trunc = 0; |
503 | 495 | ||
504 | return 0; | 496 | return 0; |
505 | 497 | ||
506 | fail3: | 498 | fail3: |
507 | efx_for_each_channel_rx_queue(rx_queue, channel) | 499 | efx_for_each_channel_rx_queue(rx_queue, channel) |
508 | efx_remove_rx_queue(rx_queue); | 500 | efx_remove_rx_queue(rx_queue); |
509 | fail2: | 501 | fail2: |
510 | efx_for_each_channel_tx_queue(tx_queue, channel) | 502 | efx_for_each_channel_tx_queue(tx_queue, channel) |
511 | efx_remove_tx_queue(tx_queue); | 503 | efx_remove_tx_queue(tx_queue); |
512 | fail1: | 504 | fail1: |
513 | return rc; | 505 | return rc; |
514 | } | 506 | } |
515 | 507 | ||
516 | 508 | ||
517 | static void efx_set_channel_names(struct efx_nic *efx) | 509 | static void efx_set_channel_names(struct efx_nic *efx) |
518 | { | 510 | { |
519 | struct efx_channel *channel; | 511 | struct efx_channel *channel; |
520 | const char *type = ""; | 512 | const char *type = ""; |
521 | int number; | 513 | int number; |
522 | 514 | ||
523 | efx_for_each_channel(channel, efx) { | 515 | efx_for_each_channel(channel, efx) { |
524 | number = channel->channel; | 516 | number = channel->channel; |
525 | if (efx->n_channels > efx->n_rx_channels) { | 517 | if (efx->n_channels > efx->n_rx_channels) { |
526 | if (channel->channel < efx->n_rx_channels) { | 518 | if (channel->channel < efx->n_rx_channels) { |
527 | type = "-rx"; | 519 | type = "-rx"; |
528 | } else { | 520 | } else { |
529 | type = "-tx"; | 521 | type = "-tx"; |
530 | number -= efx->n_rx_channels; | 522 | number -= efx->n_rx_channels; |
531 | } | 523 | } |
532 | } | 524 | } |
533 | snprintf(efx->channel_name[channel->channel], | 525 | snprintf(efx->channel_name[channel->channel], |
534 | sizeof(efx->channel_name[0]), | 526 | sizeof(efx->channel_name[0]), |
535 | "%s%s-%d", efx->name, type, number); | 527 | "%s%s-%d", efx->name, type, number); |
536 | } | 528 | } |
537 | } | 529 | } |
538 | 530 | ||
539 | static int efx_probe_channels(struct efx_nic *efx) | 531 | static int efx_probe_channels(struct efx_nic *efx) |
540 | { | 532 | { |
541 | struct efx_channel *channel; | 533 | struct efx_channel *channel; |
542 | int rc; | 534 | int rc; |
543 | 535 | ||
544 | /* Restart special buffer allocation */ | 536 | /* Restart special buffer allocation */ |
545 | efx->next_buffer_table = 0; | 537 | efx->next_buffer_table = 0; |
546 | 538 | ||
547 | efx_for_each_channel(channel, efx) { | 539 | efx_for_each_channel(channel, efx) { |
548 | rc = efx_probe_channel(channel); | 540 | rc = efx_probe_channel(channel); |
549 | if (rc) { | 541 | if (rc) { |
550 | netif_err(efx, probe, efx->net_dev, | 542 | netif_err(efx, probe, efx->net_dev, |
551 | "failed to create channel %d\n", | 543 | "failed to create channel %d\n", |
552 | channel->channel); | 544 | channel->channel); |
553 | goto fail; | 545 | goto fail; |
554 | } | 546 | } |
555 | } | 547 | } |
556 | efx_set_channel_names(efx); | 548 | efx_set_channel_names(efx); |
557 | 549 | ||
558 | return 0; | 550 | return 0; |
559 | 551 | ||
560 | fail: | 552 | fail: |
561 | efx_remove_channels(efx); | 553 | efx_remove_channels(efx); |
562 | return rc; | 554 | return rc; |
563 | } | 555 | } |
564 | 556 | ||
565 | /* Channels are shutdown and reinitialised whilst the NIC is running | 557 | /* Channels are shutdown and reinitialised whilst the NIC is running |
566 | * to propagate configuration changes (mtu, checksum offload), or | 558 | * to propagate configuration changes (mtu, checksum offload), or |
567 | * to clear hardware error conditions | 559 | * to clear hardware error conditions |
568 | */ | 560 | */ |
569 | static void efx_init_channels(struct efx_nic *efx) | 561 | static void efx_init_channels(struct efx_nic *efx) |
570 | { | 562 | { |
571 | struct efx_tx_queue *tx_queue; | 563 | struct efx_tx_queue *tx_queue; |
572 | struct efx_rx_queue *rx_queue; | 564 | struct efx_rx_queue *rx_queue; |
573 | struct efx_channel *channel; | 565 | struct efx_channel *channel; |
574 | 566 | ||
575 | /* Calculate the rx buffer allocation parameters required to | 567 | /* Calculate the rx buffer allocation parameters required to |
576 | * support the current MTU, including padding for header | 568 | * support the current MTU, including padding for header |
577 | * alignment and overruns. | 569 | * alignment and overruns. |
578 | */ | 570 | */ |
579 | efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) + | 571 | efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) + |
580 | EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + | 572 | EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + |
581 | efx->type->rx_buffer_hash_size + | 573 | efx->type->rx_buffer_hash_size + |
582 | efx->type->rx_buffer_padding); | 574 | efx->type->rx_buffer_padding); |
583 | efx->rx_buffer_order = get_order(efx->rx_buffer_len + | 575 | efx->rx_buffer_order = get_order(efx->rx_buffer_len + |
584 | sizeof(struct efx_rx_page_state)); | 576 | sizeof(struct efx_rx_page_state)); |
585 | 577 | ||
586 | /* Initialise the channels */ | 578 | /* Initialise the channels */ |
587 | efx_for_each_channel(channel, efx) { | 579 | efx_for_each_channel(channel, efx) { |
588 | netif_dbg(channel->efx, drv, channel->efx->net_dev, | 580 | netif_dbg(channel->efx, drv, channel->efx->net_dev, |
589 | "init chan %d\n", channel->channel); | 581 | "init chan %d\n", channel->channel); |
590 | 582 | ||
591 | efx_init_eventq(channel); | 583 | efx_init_eventq(channel); |
592 | 584 | ||
593 | efx_for_each_channel_tx_queue(tx_queue, channel) | 585 | efx_for_each_channel_tx_queue(tx_queue, channel) |
594 | efx_init_tx_queue(tx_queue); | 586 | efx_init_tx_queue(tx_queue); |
595 | 587 | ||
596 | /* The rx buffer allocation strategy is MTU dependent */ | 588 | /* The rx buffer allocation strategy is MTU dependent */ |
597 | efx_rx_strategy(channel); | 589 | efx_rx_strategy(channel); |
598 | 590 | ||
599 | efx_for_each_channel_rx_queue(rx_queue, channel) | 591 | efx_for_each_channel_rx_queue(rx_queue, channel) |
600 | efx_init_rx_queue(rx_queue); | 592 | efx_init_rx_queue(rx_queue); |
601 | 593 | ||
602 | WARN_ON(channel->rx_pkt != NULL); | 594 | WARN_ON(channel->rx_pkt != NULL); |
603 | efx_rx_strategy(channel); | 595 | efx_rx_strategy(channel); |
604 | } | 596 | } |
605 | } | 597 | } |
606 | 598 | ||
607 | /* This enables event queue processing and packet transmission. | 599 | /* This enables event queue processing and packet transmission. |
608 | * | 600 | * |
609 | * Note that this function is not allowed to fail, since that would | 601 | * Note that this function is not allowed to fail, since that would |
610 | * introduce too much complexity into the suspend/resume path. | 602 | * introduce too much complexity into the suspend/resume path. |
611 | */ | 603 | */ |
612 | static void efx_start_channel(struct efx_channel *channel) | 604 | static void efx_start_channel(struct efx_channel *channel) |
613 | { | 605 | { |
614 | struct efx_rx_queue *rx_queue; | 606 | struct efx_rx_queue *rx_queue; |
615 | 607 | ||
616 | netif_dbg(channel->efx, ifup, channel->efx->net_dev, | 608 | netif_dbg(channel->efx, ifup, channel->efx->net_dev, |
617 | "starting chan %d\n", channel->channel); | 609 | "starting chan %d\n", channel->channel); |
618 | 610 | ||
619 | /* The interrupt handler for this channel may set work_pending | 611 | /* The interrupt handler for this channel may set work_pending |
620 | * as soon as we enable it. Make sure it's cleared before | 612 | * as soon as we enable it. Make sure it's cleared before |
621 | * then. Similarly, make sure it sees the enabled flag set. */ | 613 | * then. Similarly, make sure it sees the enabled flag set. */ |
622 | channel->work_pending = false; | 614 | channel->work_pending = false; |
623 | channel->enabled = true; | 615 | channel->enabled = true; |
624 | smp_wmb(); | 616 | smp_wmb(); |
625 | 617 | ||
626 | /* Fill the queues before enabling NAPI */ | 618 | /* Fill the queues before enabling NAPI */ |
627 | efx_for_each_channel_rx_queue(rx_queue, channel) | 619 | efx_for_each_channel_rx_queue(rx_queue, channel) |
628 | efx_fast_push_rx_descriptors(rx_queue); | 620 | efx_fast_push_rx_descriptors(rx_queue); |
629 | 621 | ||
630 | napi_enable(&channel->napi_str); | 622 | napi_enable(&channel->napi_str); |
631 | } | 623 | } |
632 | 624 | ||
633 | /* This disables event queue processing and packet transmission. | 625 | /* This disables event queue processing and packet transmission. |
634 | * This function does not guarantee that all queue processing | 626 | * This function does not guarantee that all queue processing |
635 | * (e.g. RX refill) is complete. | 627 | * (e.g. RX refill) is complete. |
636 | */ | 628 | */ |
637 | static void efx_stop_channel(struct efx_channel *channel) | 629 | static void efx_stop_channel(struct efx_channel *channel) |
638 | { | 630 | { |
639 | if (!channel->enabled) | 631 | if (!channel->enabled) |
640 | return; | 632 | return; |
641 | 633 | ||
642 | netif_dbg(channel->efx, ifdown, channel->efx->net_dev, | 634 | netif_dbg(channel->efx, ifdown, channel->efx->net_dev, |
643 | "stop chan %d\n", channel->channel); | 635 | "stop chan %d\n", channel->channel); |
644 | 636 | ||
645 | channel->enabled = false; | 637 | channel->enabled = false; |
646 | napi_disable(&channel->napi_str); | 638 | napi_disable(&channel->napi_str); |
647 | } | 639 | } |
648 | 640 | ||
649 | static void efx_fini_channels(struct efx_nic *efx) | 641 | static void efx_fini_channels(struct efx_nic *efx) |
650 | { | 642 | { |
651 | struct efx_channel *channel; | 643 | struct efx_channel *channel; |
652 | struct efx_tx_queue *tx_queue; | 644 | struct efx_tx_queue *tx_queue; |
653 | struct efx_rx_queue *rx_queue; | 645 | struct efx_rx_queue *rx_queue; |
654 | int rc; | 646 | int rc; |
655 | 647 | ||
656 | EFX_ASSERT_RESET_SERIALISED(efx); | 648 | EFX_ASSERT_RESET_SERIALISED(efx); |
657 | BUG_ON(efx->port_enabled); | 649 | BUG_ON(efx->port_enabled); |
658 | 650 | ||
659 | rc = efx_nic_flush_queues(efx); | 651 | rc = efx_nic_flush_queues(efx); |
660 | if (rc && EFX_WORKAROUND_7803(efx)) { | 652 | if (rc && EFX_WORKAROUND_7803(efx)) { |
661 | /* Schedule a reset to recover from the flush failure. The | 653 | /* Schedule a reset to recover from the flush failure. The |
662 | * descriptor caches reference memory we're about to free, | 654 | * descriptor caches reference memory we're about to free, |
663 | * but falcon_reconfigure_mac_wrapper() won't reconnect | 655 | * but falcon_reconfigure_mac_wrapper() won't reconnect |
664 | * the MACs because of the pending reset. */ | 656 | * the MACs because of the pending reset. */ |
665 | netif_err(efx, drv, efx->net_dev, | 657 | netif_err(efx, drv, efx->net_dev, |
666 | "Resetting to recover from flush failure\n"); | 658 | "Resetting to recover from flush failure\n"); |
667 | efx_schedule_reset(efx, RESET_TYPE_ALL); | 659 | efx_schedule_reset(efx, RESET_TYPE_ALL); |
668 | } else if (rc) { | 660 | } else if (rc) { |
669 | netif_err(efx, drv, efx->net_dev, "failed to flush queues\n"); | 661 | netif_err(efx, drv, efx->net_dev, "failed to flush queues\n"); |
670 | } else { | 662 | } else { |
671 | netif_dbg(efx, drv, efx->net_dev, | 663 | netif_dbg(efx, drv, efx->net_dev, |
672 | "successfully flushed all queues\n"); | 664 | "successfully flushed all queues\n"); |
673 | } | 665 | } |
674 | 666 | ||
675 | efx_for_each_channel(channel, efx) { | 667 | efx_for_each_channel(channel, efx) { |
676 | netif_dbg(channel->efx, drv, channel->efx->net_dev, | 668 | netif_dbg(channel->efx, drv, channel->efx->net_dev, |
677 | "shut down chan %d\n", channel->channel); | 669 | "shut down chan %d\n", channel->channel); |
678 | 670 | ||
679 | efx_for_each_channel_rx_queue(rx_queue, channel) | 671 | efx_for_each_channel_rx_queue(rx_queue, channel) |
680 | efx_fini_rx_queue(rx_queue); | 672 | efx_fini_rx_queue(rx_queue); |
681 | efx_for_each_channel_tx_queue(tx_queue, channel) | 673 | efx_for_each_channel_tx_queue(tx_queue, channel) |
682 | efx_fini_tx_queue(tx_queue); | 674 | efx_fini_tx_queue(tx_queue); |
683 | efx_fini_eventq(channel); | 675 | efx_fini_eventq(channel); |
684 | } | 676 | } |
685 | } | 677 | } |
686 | 678 | ||
687 | static void efx_remove_channel(struct efx_channel *channel) | 679 | static void efx_remove_channel(struct efx_channel *channel) |
688 | { | 680 | { |
689 | struct efx_tx_queue *tx_queue; | 681 | struct efx_tx_queue *tx_queue; |
690 | struct efx_rx_queue *rx_queue; | 682 | struct efx_rx_queue *rx_queue; |
691 | 683 | ||
692 | netif_dbg(channel->efx, drv, channel->efx->net_dev, | 684 | netif_dbg(channel->efx, drv, channel->efx->net_dev, |
693 | "destroy chan %d\n", channel->channel); | 685 | "destroy chan %d\n", channel->channel); |
694 | 686 | ||
695 | efx_for_each_channel_rx_queue(rx_queue, channel) | 687 | efx_for_each_channel_rx_queue(rx_queue, channel) |
696 | efx_remove_rx_queue(rx_queue); | 688 | efx_remove_rx_queue(rx_queue); |
697 | efx_for_each_channel_tx_queue(tx_queue, channel) | 689 | efx_for_each_channel_tx_queue(tx_queue, channel) |
698 | efx_remove_tx_queue(tx_queue); | 690 | efx_remove_tx_queue(tx_queue); |
699 | efx_remove_eventq(channel); | 691 | efx_remove_eventq(channel); |
700 | } | 692 | } |
701 | 693 | ||
702 | static void efx_remove_channels(struct efx_nic *efx) | 694 | static void efx_remove_channels(struct efx_nic *efx) |
703 | { | 695 | { |
704 | struct efx_channel *channel; | 696 | struct efx_channel *channel; |
705 | 697 | ||
706 | efx_for_each_channel(channel, efx) | 698 | efx_for_each_channel(channel, efx) |
707 | efx_remove_channel(channel); | 699 | efx_remove_channel(channel); |
708 | } | 700 | } |
709 | 701 | ||
710 | int | 702 | int |
711 | efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) | 703 | efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) |
712 | { | 704 | { |
713 | struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel; | 705 | struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel; |
714 | u32 old_rxq_entries, old_txq_entries; | 706 | u32 old_rxq_entries, old_txq_entries; |
715 | unsigned i; | 707 | unsigned i; |
716 | int rc; | 708 | int rc; |
717 | 709 | ||
718 | efx_stop_all(efx); | 710 | efx_stop_all(efx); |
719 | efx_fini_channels(efx); | 711 | efx_fini_channels(efx); |
720 | 712 | ||
721 | /* Clone channels */ | 713 | /* Clone channels */ |
722 | memset(other_channel, 0, sizeof(other_channel)); | 714 | memset(other_channel, 0, sizeof(other_channel)); |
723 | for (i = 0; i < efx->n_channels; i++) { | 715 | for (i = 0; i < efx->n_channels; i++) { |
724 | channel = efx_alloc_channel(efx, i, efx->channel[i]); | 716 | channel = efx_alloc_channel(efx, i, efx->channel[i]); |
725 | if (!channel) { | 717 | if (!channel) { |
726 | rc = -ENOMEM; | 718 | rc = -ENOMEM; |
727 | goto out; | 719 | goto out; |
728 | } | 720 | } |
729 | other_channel[i] = channel; | 721 | other_channel[i] = channel; |
730 | } | 722 | } |
731 | 723 | ||
732 | /* Swap entry counts and channel pointers */ | 724 | /* Swap entry counts and channel pointers */ |
733 | old_rxq_entries = efx->rxq_entries; | 725 | old_rxq_entries = efx->rxq_entries; |
734 | old_txq_entries = efx->txq_entries; | 726 | old_txq_entries = efx->txq_entries; |
735 | efx->rxq_entries = rxq_entries; | 727 | efx->rxq_entries = rxq_entries; |
736 | efx->txq_entries = txq_entries; | 728 | efx->txq_entries = txq_entries; |
737 | for (i = 0; i < efx->n_channels; i++) { | 729 | for (i = 0; i < efx->n_channels; i++) { |
738 | channel = efx->channel[i]; | 730 | channel = efx->channel[i]; |
739 | efx->channel[i] = other_channel[i]; | 731 | efx->channel[i] = other_channel[i]; |
740 | other_channel[i] = channel; | 732 | other_channel[i] = channel; |
741 | } | 733 | } |
742 | 734 | ||
743 | rc = efx_probe_channels(efx); | 735 | rc = efx_probe_channels(efx); |
744 | if (rc) | 736 | if (rc) |
745 | goto rollback; | 737 | goto rollback; |
746 | 738 | ||
747 | /* Destroy old channels */ | 739 | /* Destroy old channels */ |
748 | for (i = 0; i < efx->n_channels; i++) | 740 | for (i = 0; i < efx->n_channels; i++) |
749 | efx_remove_channel(other_channel[i]); | 741 | efx_remove_channel(other_channel[i]); |
750 | out: | 742 | out: |
751 | /* Free unused channel structures */ | 743 | /* Free unused channel structures */ |
752 | for (i = 0; i < efx->n_channels; i++) | 744 | for (i = 0; i < efx->n_channels; i++) |
753 | kfree(other_channel[i]); | 745 | kfree(other_channel[i]); |
754 | 746 | ||
755 | efx_init_channels(efx); | 747 | efx_init_channels(efx); |
756 | efx_start_all(efx); | 748 | efx_start_all(efx); |
757 | return rc; | 749 | return rc; |
758 | 750 | ||
759 | rollback: | 751 | rollback: |
760 | /* Swap back */ | 752 | /* Swap back */ |
761 | efx->rxq_entries = old_rxq_entries; | 753 | efx->rxq_entries = old_rxq_entries; |
762 | efx->txq_entries = old_txq_entries; | 754 | efx->txq_entries = old_txq_entries; |
763 | for (i = 0; i < efx->n_channels; i++) { | 755 | for (i = 0; i < efx->n_channels; i++) { |
764 | channel = efx->channel[i]; | 756 | channel = efx->channel[i]; |
765 | efx->channel[i] = other_channel[i]; | 757 | efx->channel[i] = other_channel[i]; |
766 | other_channel[i] = channel; | 758 | other_channel[i] = channel; |
767 | } | 759 | } |
768 | goto out; | 760 | goto out; |
769 | } | 761 | } |
770 | 762 | ||
771 | void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue) | 763 | void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue) |
772 | { | 764 | { |
773 | mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100)); | 765 | mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100)); |
774 | } | 766 | } |
775 | 767 | ||
776 | /************************************************************************** | 768 | /************************************************************************** |
777 | * | 769 | * |
778 | * Port handling | 770 | * Port handling |
779 | * | 771 | * |
780 | **************************************************************************/ | 772 | **************************************************************************/ |
781 | 773 | ||
782 | /* This ensures that the kernel is kept informed (via | 774 | /* This ensures that the kernel is kept informed (via |
783 | * netif_carrier_on/off) of the link status, and also maintains the | 775 | * netif_carrier_on/off) of the link status, and also maintains the |
784 | * link status's stop on the port's TX queue. | 776 | * link status's stop on the port's TX queue. |
785 | */ | 777 | */ |
786 | void efx_link_status_changed(struct efx_nic *efx) | 778 | void efx_link_status_changed(struct efx_nic *efx) |
787 | { | 779 | { |
788 | struct efx_link_state *link_state = &efx->link_state; | 780 | struct efx_link_state *link_state = &efx->link_state; |
789 | 781 | ||
790 | /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure | 782 | /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure |
791 | * that no events are triggered between unregister_netdev() and the | 783 | * that no events are triggered between unregister_netdev() and the |
792 | * driver unloading. A more general condition is that NETDEV_CHANGE | 784 | * driver unloading. A more general condition is that NETDEV_CHANGE |
793 | * can only be generated between NETDEV_UP and NETDEV_DOWN */ | 785 | * can only be generated between NETDEV_UP and NETDEV_DOWN */ |
794 | if (!netif_running(efx->net_dev)) | 786 | if (!netif_running(efx->net_dev)) |
795 | return; | 787 | return; |
796 | 788 | ||
797 | if (efx->port_inhibited) { | 789 | if (efx->port_inhibited) { |
798 | netif_carrier_off(efx->net_dev); | 790 | netif_carrier_off(efx->net_dev); |
799 | return; | 791 | return; |
800 | } | 792 | } |
801 | 793 | ||
802 | if (link_state->up != netif_carrier_ok(efx->net_dev)) { | 794 | if (link_state->up != netif_carrier_ok(efx->net_dev)) { |
803 | efx->n_link_state_changes++; | 795 | efx->n_link_state_changes++; |
804 | 796 | ||
805 | if (link_state->up) | 797 | if (link_state->up) |
806 | netif_carrier_on(efx->net_dev); | 798 | netif_carrier_on(efx->net_dev); |
807 | else | 799 | else |
808 | netif_carrier_off(efx->net_dev); | 800 | netif_carrier_off(efx->net_dev); |
809 | } | 801 | } |
810 | 802 | ||
811 | /* Status message for kernel log */ | 803 | /* Status message for kernel log */ |
812 | if (link_state->up) { | 804 | if (link_state->up) { |
813 | netif_info(efx, link, efx->net_dev, | 805 | netif_info(efx, link, efx->net_dev, |
814 | "link up at %uMbps %s-duplex (MTU %d)%s\n", | 806 | "link up at %uMbps %s-duplex (MTU %d)%s\n", |
815 | link_state->speed, link_state->fd ? "full" : "half", | 807 | link_state->speed, link_state->fd ? "full" : "half", |
816 | efx->net_dev->mtu, | 808 | efx->net_dev->mtu, |
817 | (efx->promiscuous ? " [PROMISC]" : "")); | 809 | (efx->promiscuous ? " [PROMISC]" : "")); |
818 | } else { | 810 | } else { |
819 | netif_info(efx, link, efx->net_dev, "link down\n"); | 811 | netif_info(efx, link, efx->net_dev, "link down\n"); |
820 | } | 812 | } |
821 | 813 | ||
822 | } | 814 | } |
823 | 815 | ||
824 | void efx_link_set_advertising(struct efx_nic *efx, u32 advertising) | 816 | void efx_link_set_advertising(struct efx_nic *efx, u32 advertising) |
825 | { | 817 | { |
826 | efx->link_advertising = advertising; | 818 | efx->link_advertising = advertising; |
827 | if (advertising) { | 819 | if (advertising) { |
828 | if (advertising & ADVERTISED_Pause) | 820 | if (advertising & ADVERTISED_Pause) |
829 | efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX); | 821 | efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX); |
830 | else | 822 | else |
831 | efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX); | 823 | efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX); |
832 | if (advertising & ADVERTISED_Asym_Pause) | 824 | if (advertising & ADVERTISED_Asym_Pause) |
833 | efx->wanted_fc ^= EFX_FC_TX; | 825 | efx->wanted_fc ^= EFX_FC_TX; |
834 | } | 826 | } |
835 | } | 827 | } |
836 | 828 | ||
837 | void efx_link_set_wanted_fc(struct efx_nic *efx, enum efx_fc_type wanted_fc) | 829 | void efx_link_set_wanted_fc(struct efx_nic *efx, enum efx_fc_type wanted_fc) |
838 | { | 830 | { |
839 | efx->wanted_fc = wanted_fc; | 831 | efx->wanted_fc = wanted_fc; |
840 | if (efx->link_advertising) { | 832 | if (efx->link_advertising) { |
841 | if (wanted_fc & EFX_FC_RX) | 833 | if (wanted_fc & EFX_FC_RX) |
842 | efx->link_advertising |= (ADVERTISED_Pause | | 834 | efx->link_advertising |= (ADVERTISED_Pause | |
843 | ADVERTISED_Asym_Pause); | 835 | ADVERTISED_Asym_Pause); |
844 | else | 836 | else |
845 | efx->link_advertising &= ~(ADVERTISED_Pause | | 837 | efx->link_advertising &= ~(ADVERTISED_Pause | |
846 | ADVERTISED_Asym_Pause); | 838 | ADVERTISED_Asym_Pause); |
847 | if (wanted_fc & EFX_FC_TX) | 839 | if (wanted_fc & EFX_FC_TX) |
848 | efx->link_advertising ^= ADVERTISED_Asym_Pause; | 840 | efx->link_advertising ^= ADVERTISED_Asym_Pause; |
849 | } | 841 | } |
850 | } | 842 | } |
851 | 843 | ||
852 | static void efx_fini_port(struct efx_nic *efx); | 844 | static void efx_fini_port(struct efx_nic *efx); |
853 | 845 | ||
854 | /* Push loopback/power/transmit disable settings to the PHY, and reconfigure | 846 | /* Push loopback/power/transmit disable settings to the PHY, and reconfigure |
855 | * the MAC appropriately. All other PHY configuration changes are pushed | 847 | * the MAC appropriately. All other PHY configuration changes are pushed |
856 | * through phy_op->set_settings(), and pushed asynchronously to the MAC | 848 | * through phy_op->set_settings(), and pushed asynchronously to the MAC |
857 | * through efx_monitor(). | 849 | * through efx_monitor(). |
858 | * | 850 | * |
859 | * Callers must hold the mac_lock | 851 | * Callers must hold the mac_lock |
860 | */ | 852 | */ |
861 | int __efx_reconfigure_port(struct efx_nic *efx) | 853 | int __efx_reconfigure_port(struct efx_nic *efx) |
862 | { | 854 | { |
863 | enum efx_phy_mode phy_mode; | 855 | enum efx_phy_mode phy_mode; |
864 | int rc; | 856 | int rc; |
865 | 857 | ||
866 | WARN_ON(!mutex_is_locked(&efx->mac_lock)); | 858 | WARN_ON(!mutex_is_locked(&efx->mac_lock)); |
867 | 859 | ||
868 | /* Serialise the promiscuous flag with efx_set_multicast_list. */ | 860 | /* Serialise the promiscuous flag with efx_set_multicast_list. */ |
869 | if (efx_dev_registered(efx)) { | 861 | if (efx_dev_registered(efx)) { |
870 | netif_addr_lock_bh(efx->net_dev); | 862 | netif_addr_lock_bh(efx->net_dev); |
871 | netif_addr_unlock_bh(efx->net_dev); | 863 | netif_addr_unlock_bh(efx->net_dev); |
872 | } | 864 | } |
873 | 865 | ||
874 | /* Disable PHY transmit in mac level loopbacks */ | 866 | /* Disable PHY transmit in mac level loopbacks */ |
875 | phy_mode = efx->phy_mode; | 867 | phy_mode = efx->phy_mode; |
876 | if (LOOPBACK_INTERNAL(efx)) | 868 | if (LOOPBACK_INTERNAL(efx)) |
877 | efx->phy_mode |= PHY_MODE_TX_DISABLED; | 869 | efx->phy_mode |= PHY_MODE_TX_DISABLED; |
878 | else | 870 | else |
879 | efx->phy_mode &= ~PHY_MODE_TX_DISABLED; | 871 | efx->phy_mode &= ~PHY_MODE_TX_DISABLED; |
880 | 872 | ||
881 | rc = efx->type->reconfigure_port(efx); | 873 | rc = efx->type->reconfigure_port(efx); |
882 | 874 | ||
883 | if (rc) | 875 | if (rc) |
884 | efx->phy_mode = phy_mode; | 876 | efx->phy_mode = phy_mode; |
885 | 877 | ||
886 | return rc; | 878 | return rc; |
887 | } | 879 | } |
888 | 880 | ||
889 | /* Reinitialise the MAC to pick up new PHY settings, even if the port is | 881 | /* Reinitialise the MAC to pick up new PHY settings, even if the port is |
890 | * disabled. */ | 882 | * disabled. */ |
891 | int efx_reconfigure_port(struct efx_nic *efx) | 883 | int efx_reconfigure_port(struct efx_nic *efx) |
892 | { | 884 | { |
893 | int rc; | 885 | int rc; |
894 | 886 | ||
895 | EFX_ASSERT_RESET_SERIALISED(efx); | 887 | EFX_ASSERT_RESET_SERIALISED(efx); |
896 | 888 | ||
897 | mutex_lock(&efx->mac_lock); | 889 | mutex_lock(&efx->mac_lock); |
898 | rc = __efx_reconfigure_port(efx); | 890 | rc = __efx_reconfigure_port(efx); |
899 | mutex_unlock(&efx->mac_lock); | 891 | mutex_unlock(&efx->mac_lock); |
900 | 892 | ||
901 | return rc; | 893 | return rc; |
902 | } | 894 | } |
903 | 895 | ||
904 | /* Asynchronous work item for changing MAC promiscuity and multicast | 896 | /* Asynchronous work item for changing MAC promiscuity and multicast |
905 | * hash. Avoid a drain/rx_ingress enable by reconfiguring the current | 897 | * hash. Avoid a drain/rx_ingress enable by reconfiguring the current |
906 | * MAC directly. */ | 898 | * MAC directly. */ |
907 | static void efx_mac_work(struct work_struct *data) | 899 | static void efx_mac_work(struct work_struct *data) |
908 | { | 900 | { |
909 | struct efx_nic *efx = container_of(data, struct efx_nic, mac_work); | 901 | struct efx_nic *efx = container_of(data, struct efx_nic, mac_work); |
910 | 902 | ||
911 | mutex_lock(&efx->mac_lock); | 903 | mutex_lock(&efx->mac_lock); |
912 | if (efx->port_enabled) { | 904 | if (efx->port_enabled) { |
913 | efx->type->push_multicast_hash(efx); | 905 | efx->type->push_multicast_hash(efx); |
914 | efx->mac_op->reconfigure(efx); | 906 | efx->mac_op->reconfigure(efx); |
915 | } | 907 | } |
916 | mutex_unlock(&efx->mac_lock); | 908 | mutex_unlock(&efx->mac_lock); |
917 | } | 909 | } |
918 | 910 | ||
919 | static int efx_probe_port(struct efx_nic *efx) | 911 | static int efx_probe_port(struct efx_nic *efx) |
920 | { | 912 | { |
921 | int rc; | 913 | int rc; |
922 | 914 | ||
923 | netif_dbg(efx, probe, efx->net_dev, "create port\n"); | 915 | netif_dbg(efx, probe, efx->net_dev, "create port\n"); |
924 | 916 | ||
925 | if (phy_flash_cfg) | 917 | if (phy_flash_cfg) |
926 | efx->phy_mode = PHY_MODE_SPECIAL; | 918 | efx->phy_mode = PHY_MODE_SPECIAL; |
927 | 919 | ||
928 | /* Connect up MAC/PHY operations table */ | 920 | /* Connect up MAC/PHY operations table */ |
929 | rc = efx->type->probe_port(efx); | 921 | rc = efx->type->probe_port(efx); |
930 | if (rc) | 922 | if (rc) |
931 | return rc; | 923 | return rc; |
932 | 924 | ||
933 | /* Sanity check MAC address */ | 925 | /* Sanity check MAC address */ |
934 | if (is_valid_ether_addr(efx->mac_address)) { | 926 | if (is_valid_ether_addr(efx->mac_address)) { |
935 | memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN); | 927 | memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN); |
936 | } else { | 928 | } else { |
937 | netif_err(efx, probe, efx->net_dev, "invalid MAC address %pM\n", | 929 | netif_err(efx, probe, efx->net_dev, "invalid MAC address %pM\n", |
938 | efx->mac_address); | 930 | efx->mac_address); |
939 | if (!allow_bad_hwaddr) { | 931 | if (!allow_bad_hwaddr) { |
940 | rc = -EINVAL; | 932 | rc = -EINVAL; |
941 | goto err; | 933 | goto err; |
942 | } | 934 | } |
943 | random_ether_addr(efx->net_dev->dev_addr); | 935 | random_ether_addr(efx->net_dev->dev_addr); |
944 | netif_info(efx, probe, efx->net_dev, | 936 | netif_info(efx, probe, efx->net_dev, |
945 | "using locally-generated MAC %pM\n", | 937 | "using locally-generated MAC %pM\n", |
946 | efx->net_dev->dev_addr); | 938 | efx->net_dev->dev_addr); |
947 | } | 939 | } |
948 | 940 | ||
949 | return 0; | 941 | return 0; |
950 | 942 | ||
951 | err: | 943 | err: |
952 | efx->type->remove_port(efx); | 944 | efx->type->remove_port(efx); |
953 | return rc; | 945 | return rc; |
954 | } | 946 | } |
955 | 947 | ||
956 | static int efx_init_port(struct efx_nic *efx) | 948 | static int efx_init_port(struct efx_nic *efx) |
957 | { | 949 | { |
958 | int rc; | 950 | int rc; |
959 | 951 | ||
960 | netif_dbg(efx, drv, efx->net_dev, "init port\n"); | 952 | netif_dbg(efx, drv, efx->net_dev, "init port\n"); |
961 | 953 | ||
962 | mutex_lock(&efx->mac_lock); | 954 | mutex_lock(&efx->mac_lock); |
963 | 955 | ||
964 | rc = efx->phy_op->init(efx); | 956 | rc = efx->phy_op->init(efx); |
965 | if (rc) | 957 | if (rc) |
966 | goto fail1; | 958 | goto fail1; |
967 | 959 | ||
968 | efx->port_initialized = true; | 960 | efx->port_initialized = true; |
969 | 961 | ||
970 | /* Reconfigure the MAC before creating dma queues (required for | 962 | /* Reconfigure the MAC before creating dma queues (required for |
971 | * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */ | 963 | * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */ |
972 | efx->mac_op->reconfigure(efx); | 964 | efx->mac_op->reconfigure(efx); |
973 | 965 | ||
974 | /* Ensure the PHY advertises the correct flow control settings */ | 966 | /* Ensure the PHY advertises the correct flow control settings */ |
975 | rc = efx->phy_op->reconfigure(efx); | 967 | rc = efx->phy_op->reconfigure(efx); |
976 | if (rc) | 968 | if (rc) |
977 | goto fail2; | 969 | goto fail2; |
978 | 970 | ||
979 | mutex_unlock(&efx->mac_lock); | 971 | mutex_unlock(&efx->mac_lock); |
980 | return 0; | 972 | return 0; |
981 | 973 | ||
982 | fail2: | 974 | fail2: |
983 | efx->phy_op->fini(efx); | 975 | efx->phy_op->fini(efx); |
984 | fail1: | 976 | fail1: |
985 | mutex_unlock(&efx->mac_lock); | 977 | mutex_unlock(&efx->mac_lock); |
986 | return rc; | 978 | return rc; |
987 | } | 979 | } |
988 | 980 | ||
989 | static void efx_start_port(struct efx_nic *efx) | 981 | static void efx_start_port(struct efx_nic *efx) |
990 | { | 982 | { |
991 | netif_dbg(efx, ifup, efx->net_dev, "start port\n"); | 983 | netif_dbg(efx, ifup, efx->net_dev, "start port\n"); |
992 | BUG_ON(efx->port_enabled); | 984 | BUG_ON(efx->port_enabled); |
993 | 985 | ||
994 | mutex_lock(&efx->mac_lock); | 986 | mutex_lock(&efx->mac_lock); |
995 | efx->port_enabled = true; | 987 | efx->port_enabled = true; |
996 | 988 | ||
997 | /* efx_mac_work() might have been scheduled after efx_stop_port(), | 989 | /* efx_mac_work() might have been scheduled after efx_stop_port(), |
998 | * and then cancelled by efx_flush_all() */ | 990 | * and then cancelled by efx_flush_all() */ |
999 | efx->type->push_multicast_hash(efx); | 991 | efx->type->push_multicast_hash(efx); |
1000 | efx->mac_op->reconfigure(efx); | 992 | efx->mac_op->reconfigure(efx); |
1001 | 993 | ||
1002 | mutex_unlock(&efx->mac_lock); | 994 | mutex_unlock(&efx->mac_lock); |
1003 | } | 995 | } |
1004 | 996 | ||
1005 | /* Prevent efx_mac_work() and efx_monitor() from working */ | 997 | /* Prevent efx_mac_work() and efx_monitor() from working */ |
1006 | static void efx_stop_port(struct efx_nic *efx) | 998 | static void efx_stop_port(struct efx_nic *efx) |
1007 | { | 999 | { |
1008 | netif_dbg(efx, ifdown, efx->net_dev, "stop port\n"); | 1000 | netif_dbg(efx, ifdown, efx->net_dev, "stop port\n"); |
1009 | 1001 | ||
1010 | mutex_lock(&efx->mac_lock); | 1002 | mutex_lock(&efx->mac_lock); |
1011 | efx->port_enabled = false; | 1003 | efx->port_enabled = false; |
1012 | mutex_unlock(&efx->mac_lock); | 1004 | mutex_unlock(&efx->mac_lock); |
1013 | 1005 | ||
1014 | /* Serialise against efx_set_multicast_list() */ | 1006 | /* Serialise against efx_set_multicast_list() */ |
1015 | if (efx_dev_registered(efx)) { | 1007 | if (efx_dev_registered(efx)) { |
1016 | netif_addr_lock_bh(efx->net_dev); | 1008 | netif_addr_lock_bh(efx->net_dev); |
1017 | netif_addr_unlock_bh(efx->net_dev); | 1009 | netif_addr_unlock_bh(efx->net_dev); |
1018 | } | 1010 | } |
1019 | } | 1011 | } |
1020 | 1012 | ||
1021 | static void efx_fini_port(struct efx_nic *efx) | 1013 | static void efx_fini_port(struct efx_nic *efx) |
1022 | { | 1014 | { |
1023 | netif_dbg(efx, drv, efx->net_dev, "shut down port\n"); | 1015 | netif_dbg(efx, drv, efx->net_dev, "shut down port\n"); |
1024 | 1016 | ||
1025 | if (!efx->port_initialized) | 1017 | if (!efx->port_initialized) |
1026 | return; | 1018 | return; |
1027 | 1019 | ||
1028 | efx->phy_op->fini(efx); | 1020 | efx->phy_op->fini(efx); |
1029 | efx->port_initialized = false; | 1021 | efx->port_initialized = false; |
1030 | 1022 | ||
1031 | efx->link_state.up = false; | 1023 | efx->link_state.up = false; |
1032 | efx_link_status_changed(efx); | 1024 | efx_link_status_changed(efx); |
1033 | } | 1025 | } |
1034 | 1026 | ||
1035 | static void efx_remove_port(struct efx_nic *efx) | 1027 | static void efx_remove_port(struct efx_nic *efx) |
1036 | { | 1028 | { |
1037 | netif_dbg(efx, drv, efx->net_dev, "destroying port\n"); | 1029 | netif_dbg(efx, drv, efx->net_dev, "destroying port\n"); |
1038 | 1030 | ||
1039 | efx->type->remove_port(efx); | 1031 | efx->type->remove_port(efx); |
1040 | } | 1032 | } |
1041 | 1033 | ||
1042 | /************************************************************************** | 1034 | /************************************************************************** |
1043 | * | 1035 | * |
1044 | * NIC handling | 1036 | * NIC handling |
1045 | * | 1037 | * |
1046 | **************************************************************************/ | 1038 | **************************************************************************/ |
1047 | 1039 | ||
1048 | /* This configures the PCI device to enable I/O and DMA. */ | 1040 | /* This configures the PCI device to enable I/O and DMA. */ |
1049 | static int efx_init_io(struct efx_nic *efx) | 1041 | static int efx_init_io(struct efx_nic *efx) |
1050 | { | 1042 | { |
1051 | struct pci_dev *pci_dev = efx->pci_dev; | 1043 | struct pci_dev *pci_dev = efx->pci_dev; |
1052 | dma_addr_t dma_mask = efx->type->max_dma_mask; | 1044 | dma_addr_t dma_mask = efx->type->max_dma_mask; |
1053 | int rc; | 1045 | int rc; |
1054 | 1046 | ||
1055 | netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); | 1047 | netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); |
1056 | 1048 | ||
1057 | rc = pci_enable_device(pci_dev); | 1049 | rc = pci_enable_device(pci_dev); |
1058 | if (rc) { | 1050 | if (rc) { |
1059 | netif_err(efx, probe, efx->net_dev, | 1051 | netif_err(efx, probe, efx->net_dev, |
1060 | "failed to enable PCI device\n"); | 1052 | "failed to enable PCI device\n"); |
1061 | goto fail1; | 1053 | goto fail1; |
1062 | } | 1054 | } |
1063 | 1055 | ||
1064 | pci_set_master(pci_dev); | 1056 | pci_set_master(pci_dev); |
1065 | 1057 | ||
1066 | /* Set the PCI DMA mask. Try all possibilities from our | 1058 | /* Set the PCI DMA mask. Try all possibilities from our |
1067 | * genuine mask down to 32 bits, because some architectures | 1059 | * genuine mask down to 32 bits, because some architectures |
1068 | * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit | 1060 | * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit |
1069 | * masks event though they reject 46 bit masks. | 1061 | * masks event though they reject 46 bit masks. |
1070 | */ | 1062 | */ |
1071 | while (dma_mask > 0x7fffffffUL) { | 1063 | while (dma_mask > 0x7fffffffUL) { |
1072 | if (pci_dma_supported(pci_dev, dma_mask) && | 1064 | if (pci_dma_supported(pci_dev, dma_mask) && |
1073 | ((rc = pci_set_dma_mask(pci_dev, dma_mask)) == 0)) | 1065 | ((rc = pci_set_dma_mask(pci_dev, dma_mask)) == 0)) |
1074 | break; | 1066 | break; |
1075 | dma_mask >>= 1; | 1067 | dma_mask >>= 1; |
1076 | } | 1068 | } |
1077 | if (rc) { | 1069 | if (rc) { |
1078 | netif_err(efx, probe, efx->net_dev, | 1070 | netif_err(efx, probe, efx->net_dev, |
1079 | "could not find a suitable DMA mask\n"); | 1071 | "could not find a suitable DMA mask\n"); |
1080 | goto fail2; | 1072 | goto fail2; |
1081 | } | 1073 | } |
1082 | netif_dbg(efx, probe, efx->net_dev, | 1074 | netif_dbg(efx, probe, efx->net_dev, |
1083 | "using DMA mask %llx\n", (unsigned long long) dma_mask); | 1075 | "using DMA mask %llx\n", (unsigned long long) dma_mask); |
1084 | rc = pci_set_consistent_dma_mask(pci_dev, dma_mask); | 1076 | rc = pci_set_consistent_dma_mask(pci_dev, dma_mask); |
1085 | if (rc) { | 1077 | if (rc) { |
1086 | /* pci_set_consistent_dma_mask() is not *allowed* to | 1078 | /* pci_set_consistent_dma_mask() is not *allowed* to |
1087 | * fail with a mask that pci_set_dma_mask() accepted, | 1079 | * fail with a mask that pci_set_dma_mask() accepted, |
1088 | * but just in case... | 1080 | * but just in case... |
1089 | */ | 1081 | */ |
1090 | netif_err(efx, probe, efx->net_dev, | 1082 | netif_err(efx, probe, efx->net_dev, |
1091 | "failed to set consistent DMA mask\n"); | 1083 | "failed to set consistent DMA mask\n"); |
1092 | goto fail2; | 1084 | goto fail2; |
1093 | } | 1085 | } |
1094 | 1086 | ||
1095 | efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR); | 1087 | efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR); |
1096 | rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc"); | 1088 | rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc"); |
1097 | if (rc) { | 1089 | if (rc) { |
1098 | netif_err(efx, probe, efx->net_dev, | 1090 | netif_err(efx, probe, efx->net_dev, |
1099 | "request for memory BAR failed\n"); | 1091 | "request for memory BAR failed\n"); |
1100 | rc = -EIO; | 1092 | rc = -EIO; |
1101 | goto fail3; | 1093 | goto fail3; |
1102 | } | 1094 | } |
1103 | efx->membase = ioremap_nocache(efx->membase_phys, | 1095 | efx->membase = ioremap_nocache(efx->membase_phys, |
1104 | efx->type->mem_map_size); | 1096 | efx->type->mem_map_size); |
1105 | if (!efx->membase) { | 1097 | if (!efx->membase) { |
1106 | netif_err(efx, probe, efx->net_dev, | 1098 | netif_err(efx, probe, efx->net_dev, |
1107 | "could not map memory BAR at %llx+%x\n", | 1099 | "could not map memory BAR at %llx+%x\n", |
1108 | (unsigned long long)efx->membase_phys, | 1100 | (unsigned long long)efx->membase_phys, |
1109 | efx->type->mem_map_size); | 1101 | efx->type->mem_map_size); |
1110 | rc = -ENOMEM; | 1102 | rc = -ENOMEM; |
1111 | goto fail4; | 1103 | goto fail4; |
1112 | } | 1104 | } |
1113 | netif_dbg(efx, probe, efx->net_dev, | 1105 | netif_dbg(efx, probe, efx->net_dev, |
1114 | "memory BAR at %llx+%x (virtual %p)\n", | 1106 | "memory BAR at %llx+%x (virtual %p)\n", |
1115 | (unsigned long long)efx->membase_phys, | 1107 | (unsigned long long)efx->membase_phys, |
1116 | efx->type->mem_map_size, efx->membase); | 1108 | efx->type->mem_map_size, efx->membase); |
1117 | 1109 | ||
1118 | return 0; | 1110 | return 0; |
1119 | 1111 | ||
1120 | fail4: | 1112 | fail4: |
1121 | pci_release_region(efx->pci_dev, EFX_MEM_BAR); | 1113 | pci_release_region(efx->pci_dev, EFX_MEM_BAR); |
1122 | fail3: | 1114 | fail3: |
1123 | efx->membase_phys = 0; | 1115 | efx->membase_phys = 0; |
1124 | fail2: | 1116 | fail2: |
1125 | pci_disable_device(efx->pci_dev); | 1117 | pci_disable_device(efx->pci_dev); |
1126 | fail1: | 1118 | fail1: |
1127 | return rc; | 1119 | return rc; |
1128 | } | 1120 | } |
1129 | 1121 | ||
1130 | static void efx_fini_io(struct efx_nic *efx) | 1122 | static void efx_fini_io(struct efx_nic *efx) |
1131 | { | 1123 | { |
1132 | netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n"); | 1124 | netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n"); |
1133 | 1125 | ||
1134 | if (efx->membase) { | 1126 | if (efx->membase) { |
1135 | iounmap(efx->membase); | 1127 | iounmap(efx->membase); |
1136 | efx->membase = NULL; | 1128 | efx->membase = NULL; |
1137 | } | 1129 | } |
1138 | 1130 | ||
1139 | if (efx->membase_phys) { | 1131 | if (efx->membase_phys) { |
1140 | pci_release_region(efx->pci_dev, EFX_MEM_BAR); | 1132 | pci_release_region(efx->pci_dev, EFX_MEM_BAR); |
1141 | efx->membase_phys = 0; | 1133 | efx->membase_phys = 0; |
1142 | } | 1134 | } |
1143 | 1135 | ||
1144 | pci_disable_device(efx->pci_dev); | 1136 | pci_disable_device(efx->pci_dev); |
1145 | } | 1137 | } |
1146 | 1138 | ||
1147 | /* Get number of channels wanted. Each channel will have its own IRQ, | 1139 | /* Get number of channels wanted. Each channel will have its own IRQ, |
1148 | * 1 RX queue and/or 2 TX queues. */ | 1140 | * 1 RX queue and/or 2 TX queues. */ |
1149 | static int efx_wanted_channels(void) | 1141 | static int efx_wanted_channels(void) |
1150 | { | 1142 | { |
1151 | cpumask_var_t core_mask; | 1143 | cpumask_var_t core_mask; |
1152 | int count; | 1144 | int count; |
1153 | int cpu; | 1145 | int cpu; |
1154 | 1146 | ||
1155 | if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) { | 1147 | if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) { |
1156 | printk(KERN_WARNING | 1148 | printk(KERN_WARNING |
1157 | "sfc: RSS disabled due to allocation failure\n"); | 1149 | "sfc: RSS disabled due to allocation failure\n"); |
1158 | return 1; | 1150 | return 1; |
1159 | } | 1151 | } |
1160 | 1152 | ||
1161 | count = 0; | 1153 | count = 0; |
1162 | for_each_online_cpu(cpu) { | 1154 | for_each_online_cpu(cpu) { |
1163 | if (!cpumask_test_cpu(cpu, core_mask)) { | 1155 | if (!cpumask_test_cpu(cpu, core_mask)) { |
1164 | ++count; | 1156 | ++count; |
1165 | cpumask_or(core_mask, core_mask, | 1157 | cpumask_or(core_mask, core_mask, |
1166 | topology_core_cpumask(cpu)); | 1158 | topology_core_cpumask(cpu)); |
1167 | } | 1159 | } |
1168 | } | 1160 | } |
1169 | 1161 | ||
1170 | free_cpumask_var(core_mask); | 1162 | free_cpumask_var(core_mask); |
1171 | return count; | 1163 | return count; |
1172 | } | 1164 | } |
1173 | 1165 | ||
1174 | /* Probe the number and type of interrupts we are able to obtain, and | 1166 | /* Probe the number and type of interrupts we are able to obtain, and |
1175 | * the resulting numbers of channels and RX queues. | 1167 | * the resulting numbers of channels and RX queues. |
1176 | */ | 1168 | */ |
1177 | static void efx_probe_interrupts(struct efx_nic *efx) | 1169 | static void efx_probe_interrupts(struct efx_nic *efx) |
1178 | { | 1170 | { |
1179 | int max_channels = | 1171 | int max_channels = |
1180 | min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS); | 1172 | min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS); |
1181 | int rc, i; | 1173 | int rc, i; |
1182 | 1174 | ||
1183 | if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { | 1175 | if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { |
1184 | struct msix_entry xentries[EFX_MAX_CHANNELS]; | 1176 | struct msix_entry xentries[EFX_MAX_CHANNELS]; |
1185 | int n_channels; | 1177 | int n_channels; |
1186 | 1178 | ||
1187 | n_channels = efx_wanted_channels(); | 1179 | n_channels = efx_wanted_channels(); |
1188 | if (separate_tx_channels) | 1180 | if (separate_tx_channels) |
1189 | n_channels *= 2; | 1181 | n_channels *= 2; |
1190 | n_channels = min(n_channels, max_channels); | 1182 | n_channels = min(n_channels, max_channels); |
1191 | 1183 | ||
1192 | for (i = 0; i < n_channels; i++) | 1184 | for (i = 0; i < n_channels; i++) |
1193 | xentries[i].entry = i; | 1185 | xentries[i].entry = i; |
1194 | rc = pci_enable_msix(efx->pci_dev, xentries, n_channels); | 1186 | rc = pci_enable_msix(efx->pci_dev, xentries, n_channels); |
1195 | if (rc > 0) { | 1187 | if (rc > 0) { |
1196 | netif_err(efx, drv, efx->net_dev, | 1188 | netif_err(efx, drv, efx->net_dev, |
1197 | "WARNING: Insufficient MSI-X vectors" | 1189 | "WARNING: Insufficient MSI-X vectors" |
1198 | " available (%d < %d).\n", rc, n_channels); | 1190 | " available (%d < %d).\n", rc, n_channels); |
1199 | netif_err(efx, drv, efx->net_dev, | 1191 | netif_err(efx, drv, efx->net_dev, |
1200 | "WARNING: Performance may be reduced.\n"); | 1192 | "WARNING: Performance may be reduced.\n"); |
1201 | EFX_BUG_ON_PARANOID(rc >= n_channels); | 1193 | EFX_BUG_ON_PARANOID(rc >= n_channels); |
1202 | n_channels = rc; | 1194 | n_channels = rc; |
1203 | rc = pci_enable_msix(efx->pci_dev, xentries, | 1195 | rc = pci_enable_msix(efx->pci_dev, xentries, |
1204 | n_channels); | 1196 | n_channels); |
1205 | } | 1197 | } |
1206 | 1198 | ||
1207 | if (rc == 0) { | 1199 | if (rc == 0) { |
1208 | efx->n_channels = n_channels; | 1200 | efx->n_channels = n_channels; |
1209 | if (separate_tx_channels) { | 1201 | if (separate_tx_channels) { |
1210 | efx->n_tx_channels = | 1202 | efx->n_tx_channels = |
1211 | max(efx->n_channels / 2, 1U); | 1203 | max(efx->n_channels / 2, 1U); |
1212 | efx->n_rx_channels = | 1204 | efx->n_rx_channels = |
1213 | max(efx->n_channels - | 1205 | max(efx->n_channels - |
1214 | efx->n_tx_channels, 1U); | 1206 | efx->n_tx_channels, 1U); |
1215 | } else { | 1207 | } else { |
1216 | efx->n_tx_channels = efx->n_channels; | 1208 | efx->n_tx_channels = efx->n_channels; |
1217 | efx->n_rx_channels = efx->n_channels; | 1209 | efx->n_rx_channels = efx->n_channels; |
1218 | } | 1210 | } |
1219 | for (i = 0; i < n_channels; i++) | 1211 | for (i = 0; i < n_channels; i++) |
1220 | efx_get_channel(efx, i)->irq = | 1212 | efx_get_channel(efx, i)->irq = |
1221 | xentries[i].vector; | 1213 | xentries[i].vector; |
1222 | } else { | 1214 | } else { |
1223 | /* Fall back to single channel MSI */ | 1215 | /* Fall back to single channel MSI */ |
1224 | efx->interrupt_mode = EFX_INT_MODE_MSI; | 1216 | efx->interrupt_mode = EFX_INT_MODE_MSI; |
1225 | netif_err(efx, drv, efx->net_dev, | 1217 | netif_err(efx, drv, efx->net_dev, |
1226 | "could not enable MSI-X\n"); | 1218 | "could not enable MSI-X\n"); |
1227 | } | 1219 | } |
1228 | } | 1220 | } |
1229 | 1221 | ||
1230 | /* Try single interrupt MSI */ | 1222 | /* Try single interrupt MSI */ |
1231 | if (efx->interrupt_mode == EFX_INT_MODE_MSI) { | 1223 | if (efx->interrupt_mode == EFX_INT_MODE_MSI) { |
1232 | efx->n_channels = 1; | 1224 | efx->n_channels = 1; |
1233 | efx->n_rx_channels = 1; | 1225 | efx->n_rx_channels = 1; |
1234 | efx->n_tx_channels = 1; | 1226 | efx->n_tx_channels = 1; |
1235 | rc = pci_enable_msi(efx->pci_dev); | 1227 | rc = pci_enable_msi(efx->pci_dev); |
1236 | if (rc == 0) { | 1228 | if (rc == 0) { |
1237 | efx_get_channel(efx, 0)->irq = efx->pci_dev->irq; | 1229 | efx_get_channel(efx, 0)->irq = efx->pci_dev->irq; |
1238 | } else { | 1230 | } else { |
1239 | netif_err(efx, drv, efx->net_dev, | 1231 | netif_err(efx, drv, efx->net_dev, |
1240 | "could not enable MSI\n"); | 1232 | "could not enable MSI\n"); |
1241 | efx->interrupt_mode = EFX_INT_MODE_LEGACY; | 1233 | efx->interrupt_mode = EFX_INT_MODE_LEGACY; |
1242 | } | 1234 | } |
1243 | } | 1235 | } |
1244 | 1236 | ||
1245 | /* Assume legacy interrupts */ | 1237 | /* Assume legacy interrupts */ |
1246 | if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) { | 1238 | if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) { |
1247 | efx->n_channels = 1 + (separate_tx_channels ? 1 : 0); | 1239 | efx->n_channels = 1 + (separate_tx_channels ? 1 : 0); |
1248 | efx->n_rx_channels = 1; | 1240 | efx->n_rx_channels = 1; |
1249 | efx->n_tx_channels = 1; | 1241 | efx->n_tx_channels = 1; |
1250 | efx->legacy_irq = efx->pci_dev->irq; | 1242 | efx->legacy_irq = efx->pci_dev->irq; |
1251 | } | 1243 | } |
1252 | } | 1244 | } |
1253 | 1245 | ||
1254 | static void efx_remove_interrupts(struct efx_nic *efx) | 1246 | static void efx_remove_interrupts(struct efx_nic *efx) |
1255 | { | 1247 | { |
1256 | struct efx_channel *channel; | 1248 | struct efx_channel *channel; |
1257 | 1249 | ||
1258 | /* Remove MSI/MSI-X interrupts */ | 1250 | /* Remove MSI/MSI-X interrupts */ |
1259 | efx_for_each_channel(channel, efx) | 1251 | efx_for_each_channel(channel, efx) |
1260 | channel->irq = 0; | 1252 | channel->irq = 0; |
1261 | pci_disable_msi(efx->pci_dev); | 1253 | pci_disable_msi(efx->pci_dev); |
1262 | pci_disable_msix(efx->pci_dev); | 1254 | pci_disable_msix(efx->pci_dev); |
1263 | 1255 | ||
1264 | /* Remove legacy interrupt */ | 1256 | /* Remove legacy interrupt */ |
1265 | efx->legacy_irq = 0; | 1257 | efx->legacy_irq = 0; |
1266 | } | 1258 | } |
1267 | 1259 | ||
1268 | struct efx_tx_queue * | 1260 | struct efx_tx_queue * |
1269 | efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type) | 1261 | efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type) |
1270 | { | 1262 | { |
1271 | unsigned tx_channel_offset = | 1263 | unsigned tx_channel_offset = |
1272 | separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; | 1264 | separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; |
1273 | EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels || | 1265 | EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels || |
1274 | type >= EFX_TXQ_TYPES); | 1266 | type >= EFX_TXQ_TYPES); |
1275 | return &efx->channel[tx_channel_offset + index]->tx_queue[type]; | 1267 | return &efx->channel[tx_channel_offset + index]->tx_queue[type]; |
1276 | } | 1268 | } |
1277 | 1269 | ||
1278 | static void efx_set_channels(struct efx_nic *efx) | 1270 | static void efx_set_channels(struct efx_nic *efx) |
1279 | { | 1271 | { |
1280 | struct efx_channel *channel; | 1272 | struct efx_channel *channel; |
1281 | struct efx_tx_queue *tx_queue; | 1273 | struct efx_tx_queue *tx_queue; |
1282 | unsigned tx_channel_offset = | 1274 | unsigned tx_channel_offset = |
1283 | separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; | 1275 | separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; |
1284 | 1276 | ||
1285 | /* Channel pointers were set in efx_init_struct() but we now | 1277 | /* Channel pointers were set in efx_init_struct() but we now |
1286 | * need to clear them for TX queues in any RX-only channels. */ | 1278 | * need to clear them for TX queues in any RX-only channels. */ |
1287 | efx_for_each_channel(channel, efx) { | 1279 | efx_for_each_channel(channel, efx) { |
1288 | if (channel->channel - tx_channel_offset >= | 1280 | if (channel->channel - tx_channel_offset >= |
1289 | efx->n_tx_channels) { | 1281 | efx->n_tx_channels) { |
1290 | efx_for_each_channel_tx_queue(tx_queue, channel) | 1282 | efx_for_each_channel_tx_queue(tx_queue, channel) |
1291 | tx_queue->channel = NULL; | 1283 | tx_queue->channel = NULL; |
1292 | } | 1284 | } |
1293 | } | 1285 | } |
1294 | } | 1286 | } |
1295 | 1287 | ||
1296 | static int efx_probe_nic(struct efx_nic *efx) | 1288 | static int efx_probe_nic(struct efx_nic *efx) |
1297 | { | 1289 | { |
1298 | size_t i; | 1290 | size_t i; |
1299 | int rc; | 1291 | int rc; |
1300 | 1292 | ||
1301 | netif_dbg(efx, probe, efx->net_dev, "creating NIC\n"); | 1293 | netif_dbg(efx, probe, efx->net_dev, "creating NIC\n"); |
1302 | 1294 | ||
1303 | /* Carry out hardware-type specific initialisation */ | 1295 | /* Carry out hardware-type specific initialisation */ |
1304 | rc = efx->type->probe(efx); | 1296 | rc = efx->type->probe(efx); |
1305 | if (rc) | 1297 | if (rc) |
1306 | return rc; | 1298 | return rc; |
1307 | 1299 | ||
1308 | /* Determine the number of channels and queues by trying to hook | 1300 | /* Determine the number of channels and queues by trying to hook |
1309 | * in MSI-X interrupts. */ | 1301 | * in MSI-X interrupts. */ |
1310 | efx_probe_interrupts(efx); | 1302 | efx_probe_interrupts(efx); |
1311 | 1303 | ||
1312 | if (efx->n_channels > 1) | 1304 | if (efx->n_channels > 1) |
1313 | get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key)); | 1305 | get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key)); |
1314 | for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++) | 1306 | for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++) |
1315 | efx->rx_indir_table[i] = i % efx->n_rx_channels; | 1307 | efx->rx_indir_table[i] = i % efx->n_rx_channels; |
1316 | 1308 | ||
1317 | efx_set_channels(efx); | 1309 | efx_set_channels(efx); |
1318 | netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); | 1310 | netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); |
1319 | netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels); | 1311 | netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels); |
1320 | 1312 | ||
1321 | /* Initialise the interrupt moderation settings */ | 1313 | /* Initialise the interrupt moderation settings */ |
1322 | efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true); | 1314 | efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true); |
1323 | 1315 | ||
1324 | return 0; | 1316 | return 0; |
1325 | } | 1317 | } |
1326 | 1318 | ||
1327 | static void efx_remove_nic(struct efx_nic *efx) | 1319 | static void efx_remove_nic(struct efx_nic *efx) |
1328 | { | 1320 | { |
1329 | netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n"); | 1321 | netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n"); |
1330 | 1322 | ||
1331 | efx_remove_interrupts(efx); | 1323 | efx_remove_interrupts(efx); |
1332 | efx->type->remove(efx); | 1324 | efx->type->remove(efx); |
1333 | } | 1325 | } |
1334 | 1326 | ||
1335 | /************************************************************************** | 1327 | /************************************************************************** |
1336 | * | 1328 | * |
1337 | * NIC startup/shutdown | 1329 | * NIC startup/shutdown |
1338 | * | 1330 | * |
1339 | *************************************************************************/ | 1331 | *************************************************************************/ |
1340 | 1332 | ||
1341 | static int efx_probe_all(struct efx_nic *efx) | 1333 | static int efx_probe_all(struct efx_nic *efx) |
1342 | { | 1334 | { |
1343 | int rc; | 1335 | int rc; |
1344 | 1336 | ||
1345 | rc = efx_probe_nic(efx); | 1337 | rc = efx_probe_nic(efx); |
1346 | if (rc) { | 1338 | if (rc) { |
1347 | netif_err(efx, probe, efx->net_dev, "failed to create NIC\n"); | 1339 | netif_err(efx, probe, efx->net_dev, "failed to create NIC\n"); |
1348 | goto fail1; | 1340 | goto fail1; |
1349 | } | 1341 | } |
1350 | 1342 | ||
1351 | rc = efx_probe_port(efx); | 1343 | rc = efx_probe_port(efx); |
1352 | if (rc) { | 1344 | if (rc) { |
1353 | netif_err(efx, probe, efx->net_dev, "failed to create port\n"); | 1345 | netif_err(efx, probe, efx->net_dev, "failed to create port\n"); |
1354 | goto fail2; | 1346 | goto fail2; |
1355 | } | 1347 | } |
1356 | 1348 | ||
1357 | efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE; | 1349 | efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE; |
1358 | rc = efx_probe_channels(efx); | 1350 | rc = efx_probe_channels(efx); |
1359 | if (rc) | 1351 | if (rc) |
1360 | goto fail3; | 1352 | goto fail3; |
1361 | 1353 | ||
1362 | rc = efx_probe_filters(efx); | 1354 | rc = efx_probe_filters(efx); |
1363 | if (rc) { | 1355 | if (rc) { |
1364 | netif_err(efx, probe, efx->net_dev, | 1356 | netif_err(efx, probe, efx->net_dev, |
1365 | "failed to create filter tables\n"); | 1357 | "failed to create filter tables\n"); |
1366 | goto fail4; | 1358 | goto fail4; |
1367 | } | 1359 | } |
1368 | 1360 | ||
1369 | return 0; | 1361 | return 0; |
1370 | 1362 | ||
1371 | fail4: | 1363 | fail4: |
1372 | efx_remove_channels(efx); | 1364 | efx_remove_channels(efx); |
1373 | fail3: | 1365 | fail3: |
1374 | efx_remove_port(efx); | 1366 | efx_remove_port(efx); |
1375 | fail2: | 1367 | fail2: |
1376 | efx_remove_nic(efx); | 1368 | efx_remove_nic(efx); |
1377 | fail1: | 1369 | fail1: |
1378 | return rc; | 1370 | return rc; |
1379 | } | 1371 | } |
1380 | 1372 | ||
1381 | /* Called after previous invocation(s) of efx_stop_all, restarts the | 1373 | /* Called after previous invocation(s) of efx_stop_all, restarts the |
1382 | * port, kernel transmit queue, NAPI processing and hardware interrupts, | 1374 | * port, kernel transmit queue, NAPI processing and hardware interrupts, |
1383 | * and ensures that the port is scheduled to be reconfigured. | 1375 | * and ensures that the port is scheduled to be reconfigured. |
1384 | * This function is safe to call multiple times when the NIC is in any | 1376 | * This function is safe to call multiple times when the NIC is in any |
1385 | * state. */ | 1377 | * state. */ |
1386 | static void efx_start_all(struct efx_nic *efx) | 1378 | static void efx_start_all(struct efx_nic *efx) |
1387 | { | 1379 | { |
1388 | struct efx_channel *channel; | 1380 | struct efx_channel *channel; |
1389 | 1381 | ||
1390 | EFX_ASSERT_RESET_SERIALISED(efx); | 1382 | EFX_ASSERT_RESET_SERIALISED(efx); |
1391 | 1383 | ||
1392 | /* Check that it is appropriate to restart the interface. All | 1384 | /* Check that it is appropriate to restart the interface. All |
1393 | * of these flags are safe to read under just the rtnl lock */ | 1385 | * of these flags are safe to read under just the rtnl lock */ |
1394 | if (efx->port_enabled) | 1386 | if (efx->port_enabled) |
1395 | return; | 1387 | return; |
1396 | if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT)) | 1388 | if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT)) |
1397 | return; | 1389 | return; |
1398 | if (efx_dev_registered(efx) && !netif_running(efx->net_dev)) | 1390 | if (efx_dev_registered(efx) && !netif_running(efx->net_dev)) |
1399 | return; | 1391 | return; |
1400 | 1392 | ||
1401 | /* Mark the port as enabled so port reconfigurations can start, then | 1393 | /* Mark the port as enabled so port reconfigurations can start, then |
1402 | * restart the transmit interface early so the watchdog timer stops */ | 1394 | * restart the transmit interface early so the watchdog timer stops */ |
1403 | efx_start_port(efx); | 1395 | efx_start_port(efx); |
1404 | 1396 | ||
1405 | efx_for_each_channel(channel, efx) { | 1397 | efx_for_each_channel(channel, efx) { |
1406 | if (efx_dev_registered(efx)) | 1398 | if (efx_dev_registered(efx)) |
1407 | efx_wake_queue(channel); | 1399 | efx_wake_queue(channel); |
1408 | efx_start_channel(channel); | 1400 | efx_start_channel(channel); |
1409 | } | 1401 | } |
1410 | 1402 | ||
1411 | efx_nic_enable_interrupts(efx); | 1403 | efx_nic_enable_interrupts(efx); |
1412 | 1404 | ||
1413 | /* Switch to event based MCDI completions after enabling interrupts. | 1405 | /* Switch to event based MCDI completions after enabling interrupts. |
1414 | * If a reset has been scheduled, then we need to stay in polled mode. | 1406 | * If a reset has been scheduled, then we need to stay in polled mode. |
1415 | * Rather than serialising efx_mcdi_mode_event() [which sleeps] and | 1407 | * Rather than serialising efx_mcdi_mode_event() [which sleeps] and |
1416 | * reset_pending [modified from an atomic context], we instead guarantee | 1408 | * reset_pending [modified from an atomic context], we instead guarantee |
1417 | * that efx_mcdi_mode_poll() isn't reverted erroneously */ | 1409 | * that efx_mcdi_mode_poll() isn't reverted erroneously */ |
1418 | efx_mcdi_mode_event(efx); | 1410 | efx_mcdi_mode_event(efx); |
1419 | if (efx->reset_pending != RESET_TYPE_NONE) | 1411 | if (efx->reset_pending != RESET_TYPE_NONE) |
1420 | efx_mcdi_mode_poll(efx); | 1412 | efx_mcdi_mode_poll(efx); |
1421 | 1413 | ||
1422 | /* Start the hardware monitor if there is one. Otherwise (we're link | 1414 | /* Start the hardware monitor if there is one. Otherwise (we're link |
1423 | * event driven), we have to poll the PHY because after an event queue | 1415 | * event driven), we have to poll the PHY because after an event queue |
1424 | * flush, we could have a missed a link state change */ | 1416 | * flush, we could have a missed a link state change */ |
1425 | if (efx->type->monitor != NULL) { | 1417 | if (efx->type->monitor != NULL) { |
1426 | queue_delayed_work(efx->workqueue, &efx->monitor_work, | 1418 | queue_delayed_work(efx->workqueue, &efx->monitor_work, |
1427 | efx_monitor_interval); | 1419 | efx_monitor_interval); |
1428 | } else { | 1420 | } else { |
1429 | mutex_lock(&efx->mac_lock); | 1421 | mutex_lock(&efx->mac_lock); |
1430 | if (efx->phy_op->poll(efx)) | 1422 | if (efx->phy_op->poll(efx)) |
1431 | efx_link_status_changed(efx); | 1423 | efx_link_status_changed(efx); |
1432 | mutex_unlock(&efx->mac_lock); | 1424 | mutex_unlock(&efx->mac_lock); |
1433 | } | 1425 | } |
1434 | 1426 | ||
1435 | efx->type->start_stats(efx); | 1427 | efx->type->start_stats(efx); |
1436 | } | 1428 | } |
1437 | 1429 | ||
1438 | /* Flush all delayed work. Should only be called when no more delayed work | 1430 | /* Flush all delayed work. Should only be called when no more delayed work |
1439 | * will be scheduled. This doesn't flush pending online resets (efx_reset), | 1431 | * will be scheduled. This doesn't flush pending online resets (efx_reset), |
1440 | * since we're holding the rtnl_lock at this point. */ | 1432 | * since we're holding the rtnl_lock at this point. */ |
1441 | static void efx_flush_all(struct efx_nic *efx) | 1433 | static void efx_flush_all(struct efx_nic *efx) |
1442 | { | 1434 | { |
1443 | /* Make sure the hardware monitor is stopped */ | 1435 | /* Make sure the hardware monitor is stopped */ |
1444 | cancel_delayed_work_sync(&efx->monitor_work); | 1436 | cancel_delayed_work_sync(&efx->monitor_work); |
1445 | /* Stop scheduled port reconfigurations */ | 1437 | /* Stop scheduled port reconfigurations */ |
1446 | cancel_work_sync(&efx->mac_work); | 1438 | cancel_work_sync(&efx->mac_work); |
1447 | } | 1439 | } |
1448 | 1440 | ||
1449 | /* Quiesce hardware and software without bringing the link down. | 1441 | /* Quiesce hardware and software without bringing the link down. |
1450 | * Safe to call multiple times, when the nic and interface is in any | 1442 | * Safe to call multiple times, when the nic and interface is in any |
1451 | * state. The caller is guaranteed to subsequently be in a position | 1443 | * state. The caller is guaranteed to subsequently be in a position |
1452 | * to modify any hardware and software state they see fit without | 1444 | * to modify any hardware and software state they see fit without |
1453 | * taking locks. */ | 1445 | * taking locks. */ |
1454 | static void efx_stop_all(struct efx_nic *efx) | 1446 | static void efx_stop_all(struct efx_nic *efx) |
1455 | { | 1447 | { |
1456 | struct efx_channel *channel; | 1448 | struct efx_channel *channel; |
1457 | 1449 | ||
1458 | EFX_ASSERT_RESET_SERIALISED(efx); | 1450 | EFX_ASSERT_RESET_SERIALISED(efx); |
1459 | 1451 | ||
1460 | /* port_enabled can be read safely under the rtnl lock */ | 1452 | /* port_enabled can be read safely under the rtnl lock */ |
1461 | if (!efx->port_enabled) | 1453 | if (!efx->port_enabled) |
1462 | return; | 1454 | return; |
1463 | 1455 | ||
1464 | efx->type->stop_stats(efx); | 1456 | efx->type->stop_stats(efx); |
1465 | 1457 | ||
1466 | /* Switch to MCDI polling on Siena before disabling interrupts */ | 1458 | /* Switch to MCDI polling on Siena before disabling interrupts */ |
1467 | efx_mcdi_mode_poll(efx); | 1459 | efx_mcdi_mode_poll(efx); |
1468 | 1460 | ||
1469 | /* Disable interrupts and wait for ISR to complete */ | 1461 | /* Disable interrupts and wait for ISR to complete */ |
1470 | efx_nic_disable_interrupts(efx); | 1462 | efx_nic_disable_interrupts(efx); |
1471 | if (efx->legacy_irq) | 1463 | if (efx->legacy_irq) |
1472 | synchronize_irq(efx->legacy_irq); | 1464 | synchronize_irq(efx->legacy_irq); |
1473 | efx_for_each_channel(channel, efx) { | 1465 | efx_for_each_channel(channel, efx) { |
1474 | if (channel->irq) | 1466 | if (channel->irq) |
1475 | synchronize_irq(channel->irq); | 1467 | synchronize_irq(channel->irq); |
1476 | } | 1468 | } |
1477 | 1469 | ||
1478 | /* Stop all NAPI processing and synchronous rx refills */ | 1470 | /* Stop all NAPI processing and synchronous rx refills */ |
1479 | efx_for_each_channel(channel, efx) | 1471 | efx_for_each_channel(channel, efx) |
1480 | efx_stop_channel(channel); | 1472 | efx_stop_channel(channel); |
1481 | 1473 | ||
1482 | /* Stop all asynchronous port reconfigurations. Since all | 1474 | /* Stop all asynchronous port reconfigurations. Since all |
1483 | * event processing has already been stopped, there is no | 1475 | * event processing has already been stopped, there is no |
1484 | * window to loose phy events */ | 1476 | * window to loose phy events */ |
1485 | efx_stop_port(efx); | 1477 | efx_stop_port(efx); |
1486 | 1478 | ||
1487 | /* Flush efx_mac_work(), refill_workqueue, monitor_work */ | 1479 | /* Flush efx_mac_work(), refill_workqueue, monitor_work */ |
1488 | efx_flush_all(efx); | 1480 | efx_flush_all(efx); |
1489 | 1481 | ||
1490 | /* Stop the kernel transmit interface late, so the watchdog | 1482 | /* Stop the kernel transmit interface late, so the watchdog |
1491 | * timer isn't ticking over the flush */ | 1483 | * timer isn't ticking over the flush */ |
1492 | if (efx_dev_registered(efx)) { | 1484 | if (efx_dev_registered(efx)) { |
1493 | struct efx_channel *channel; | 1485 | struct efx_channel *channel; |
1494 | efx_for_each_channel(channel, efx) | 1486 | efx_for_each_channel(channel, efx) |
1495 | efx_stop_queue(channel); | 1487 | efx_stop_queue(channel); |
1496 | netif_tx_lock_bh(efx->net_dev); | 1488 | netif_tx_lock_bh(efx->net_dev); |
1497 | netif_tx_unlock_bh(efx->net_dev); | 1489 | netif_tx_unlock_bh(efx->net_dev); |
1498 | } | 1490 | } |
1499 | } | 1491 | } |
1500 | 1492 | ||
1501 | static void efx_remove_all(struct efx_nic *efx) | 1493 | static void efx_remove_all(struct efx_nic *efx) |
1502 | { | 1494 | { |
1503 | efx_remove_filters(efx); | 1495 | efx_remove_filters(efx); |
1504 | efx_remove_channels(efx); | 1496 | efx_remove_channels(efx); |
1505 | efx_remove_port(efx); | 1497 | efx_remove_port(efx); |
1506 | efx_remove_nic(efx); | 1498 | efx_remove_nic(efx); |
1507 | } | 1499 | } |
1508 | 1500 | ||
1509 | /************************************************************************** | 1501 | /************************************************************************** |
1510 | * | 1502 | * |
1511 | * Interrupt moderation | 1503 | * Interrupt moderation |
1512 | * | 1504 | * |
1513 | **************************************************************************/ | 1505 | **************************************************************************/ |
1514 | 1506 | ||
1515 | static unsigned irq_mod_ticks(int usecs, int resolution) | 1507 | static unsigned irq_mod_ticks(int usecs, int resolution) |
1516 | { | 1508 | { |
1517 | if (usecs <= 0) | 1509 | if (usecs <= 0) |
1518 | return 0; /* cannot receive interrupts ahead of time :-) */ | 1510 | return 0; /* cannot receive interrupts ahead of time :-) */ |
1519 | if (usecs < resolution) | 1511 | if (usecs < resolution) |
1520 | return 1; /* never round down to 0 */ | 1512 | return 1; /* never round down to 0 */ |
1521 | return usecs / resolution; | 1513 | return usecs / resolution; |
1522 | } | 1514 | } |
1523 | 1515 | ||
1524 | /* Set interrupt moderation parameters */ | 1516 | /* Set interrupt moderation parameters */ |
1525 | void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs, | 1517 | void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs, |
1526 | bool rx_adaptive) | 1518 | bool rx_adaptive) |
1527 | { | 1519 | { |
1528 | struct efx_channel *channel; | 1520 | struct efx_channel *channel; |
1529 | unsigned tx_ticks = irq_mod_ticks(tx_usecs, EFX_IRQ_MOD_RESOLUTION); | 1521 | unsigned tx_ticks = irq_mod_ticks(tx_usecs, EFX_IRQ_MOD_RESOLUTION); |
1530 | unsigned rx_ticks = irq_mod_ticks(rx_usecs, EFX_IRQ_MOD_RESOLUTION); | 1522 | unsigned rx_ticks = irq_mod_ticks(rx_usecs, EFX_IRQ_MOD_RESOLUTION); |
1531 | 1523 | ||
1532 | EFX_ASSERT_RESET_SERIALISED(efx); | 1524 | EFX_ASSERT_RESET_SERIALISED(efx); |
1533 | 1525 | ||
1534 | efx->irq_rx_adaptive = rx_adaptive; | 1526 | efx->irq_rx_adaptive = rx_adaptive; |
1535 | efx->irq_rx_moderation = rx_ticks; | 1527 | efx->irq_rx_moderation = rx_ticks; |
1536 | efx_for_each_channel(channel, efx) { | 1528 | efx_for_each_channel(channel, efx) { |
1537 | if (efx_channel_get_rx_queue(channel)) | 1529 | if (efx_channel_get_rx_queue(channel)) |
1538 | channel->irq_moderation = rx_ticks; | 1530 | channel->irq_moderation = rx_ticks; |
1539 | else if (efx_channel_get_tx_queue(channel, 0)) | 1531 | else if (efx_channel_get_tx_queue(channel, 0)) |
1540 | channel->irq_moderation = tx_ticks; | 1532 | channel->irq_moderation = tx_ticks; |
1541 | } | 1533 | } |
1542 | } | 1534 | } |
1543 | 1535 | ||
1544 | /************************************************************************** | 1536 | /************************************************************************** |
1545 | * | 1537 | * |
1546 | * Hardware monitor | 1538 | * Hardware monitor |
1547 | * | 1539 | * |
1548 | **************************************************************************/ | 1540 | **************************************************************************/ |
1549 | 1541 | ||
1550 | /* Run periodically off the general workqueue */ | 1542 | /* Run periodically off the general workqueue */ |
1551 | static void efx_monitor(struct work_struct *data) | 1543 | static void efx_monitor(struct work_struct *data) |
1552 | { | 1544 | { |
1553 | struct efx_nic *efx = container_of(data, struct efx_nic, | 1545 | struct efx_nic *efx = container_of(data, struct efx_nic, |
1554 | monitor_work.work); | 1546 | monitor_work.work); |
1555 | 1547 | ||
1556 | netif_vdbg(efx, timer, efx->net_dev, | 1548 | netif_vdbg(efx, timer, efx->net_dev, |
1557 | "hardware monitor executing on CPU %d\n", | 1549 | "hardware monitor executing on CPU %d\n", |
1558 | raw_smp_processor_id()); | 1550 | raw_smp_processor_id()); |
1559 | BUG_ON(efx->type->monitor == NULL); | 1551 | BUG_ON(efx->type->monitor == NULL); |
1560 | 1552 | ||
1561 | /* If the mac_lock is already held then it is likely a port | 1553 | /* If the mac_lock is already held then it is likely a port |
1562 | * reconfiguration is already in place, which will likely do | 1554 | * reconfiguration is already in place, which will likely do |
1563 | * most of the work of monitor() anyway. */ | 1555 | * most of the work of monitor() anyway. */ |
1564 | if (mutex_trylock(&efx->mac_lock)) { | 1556 | if (mutex_trylock(&efx->mac_lock)) { |
1565 | if (efx->port_enabled) | 1557 | if (efx->port_enabled) |
1566 | efx->type->monitor(efx); | 1558 | efx->type->monitor(efx); |
1567 | mutex_unlock(&efx->mac_lock); | 1559 | mutex_unlock(&efx->mac_lock); |
1568 | } | 1560 | } |
1569 | 1561 | ||
1570 | queue_delayed_work(efx->workqueue, &efx->monitor_work, | 1562 | queue_delayed_work(efx->workqueue, &efx->monitor_work, |
1571 | efx_monitor_interval); | 1563 | efx_monitor_interval); |
1572 | } | 1564 | } |
1573 | 1565 | ||
1574 | /************************************************************************** | 1566 | /************************************************************************** |
1575 | * | 1567 | * |
1576 | * ioctls | 1568 | * ioctls |
1577 | * | 1569 | * |
1578 | *************************************************************************/ | 1570 | *************************************************************************/ |
1579 | 1571 | ||
1580 | /* Net device ioctl | 1572 | /* Net device ioctl |
1581 | * Context: process, rtnl_lock() held. | 1573 | * Context: process, rtnl_lock() held. |
1582 | */ | 1574 | */ |
1583 | static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) | 1575 | static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) |
1584 | { | 1576 | { |
1585 | struct efx_nic *efx = netdev_priv(net_dev); | 1577 | struct efx_nic *efx = netdev_priv(net_dev); |
1586 | struct mii_ioctl_data *data = if_mii(ifr); | 1578 | struct mii_ioctl_data *data = if_mii(ifr); |
1587 | 1579 | ||
1588 | EFX_ASSERT_RESET_SERIALISED(efx); | 1580 | EFX_ASSERT_RESET_SERIALISED(efx); |
1589 | 1581 | ||
1590 | /* Convert phy_id from older PRTAD/DEVAD format */ | 1582 | /* Convert phy_id from older PRTAD/DEVAD format */ |
1591 | if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) && | 1583 | if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) && |
1592 | (data->phy_id & 0xfc00) == 0x0400) | 1584 | (data->phy_id & 0xfc00) == 0x0400) |
1593 | data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400; | 1585 | data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400; |
1594 | 1586 | ||
1595 | return mdio_mii_ioctl(&efx->mdio, data, cmd); | 1587 | return mdio_mii_ioctl(&efx->mdio, data, cmd); |
1596 | } | 1588 | } |
1597 | 1589 | ||
1598 | /************************************************************************** | 1590 | /************************************************************************** |
1599 | * | 1591 | * |
1600 | * NAPI interface | 1592 | * NAPI interface |
1601 | * | 1593 | * |
1602 | **************************************************************************/ | 1594 | **************************************************************************/ |
1603 | 1595 | ||
1604 | static int efx_init_napi(struct efx_nic *efx) | 1596 | static int efx_init_napi(struct efx_nic *efx) |
1605 | { | 1597 | { |
1606 | struct efx_channel *channel; | 1598 | struct efx_channel *channel; |
1607 | 1599 | ||
1608 | efx_for_each_channel(channel, efx) { | 1600 | efx_for_each_channel(channel, efx) { |
1609 | channel->napi_dev = efx->net_dev; | 1601 | channel->napi_dev = efx->net_dev; |
1610 | netif_napi_add(channel->napi_dev, &channel->napi_str, | 1602 | netif_napi_add(channel->napi_dev, &channel->napi_str, |
1611 | efx_poll, napi_weight); | 1603 | efx_poll, napi_weight); |
1612 | } | 1604 | } |
1613 | return 0; | 1605 | return 0; |
1614 | } | 1606 | } |
1615 | 1607 | ||
1616 | static void efx_fini_napi(struct efx_nic *efx) | 1608 | static void efx_fini_napi(struct efx_nic *efx) |
1617 | { | 1609 | { |
1618 | struct efx_channel *channel; | 1610 | struct efx_channel *channel; |
1619 | 1611 | ||
1620 | efx_for_each_channel(channel, efx) { | 1612 | efx_for_each_channel(channel, efx) { |
1621 | if (channel->napi_dev) | 1613 | if (channel->napi_dev) |
1622 | netif_napi_del(&channel->napi_str); | 1614 | netif_napi_del(&channel->napi_str); |
1623 | channel->napi_dev = NULL; | 1615 | channel->napi_dev = NULL; |
1624 | } | 1616 | } |
1625 | } | 1617 | } |
1626 | 1618 | ||
1627 | /************************************************************************** | 1619 | /************************************************************************** |
1628 | * | 1620 | * |
1629 | * Kernel netpoll interface | 1621 | * Kernel netpoll interface |
1630 | * | 1622 | * |
1631 | *************************************************************************/ | 1623 | *************************************************************************/ |
1632 | 1624 | ||
1633 | #ifdef CONFIG_NET_POLL_CONTROLLER | 1625 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1634 | 1626 | ||
1635 | /* Although in the common case interrupts will be disabled, this is not | 1627 | /* Although in the common case interrupts will be disabled, this is not |
1636 | * guaranteed. However, all our work happens inside the NAPI callback, | 1628 | * guaranteed. However, all our work happens inside the NAPI callback, |
1637 | * so no locking is required. | 1629 | * so no locking is required. |
1638 | */ | 1630 | */ |
1639 | static void efx_netpoll(struct net_device *net_dev) | 1631 | static void efx_netpoll(struct net_device *net_dev) |
1640 | { | 1632 | { |
1641 | struct efx_nic *efx = netdev_priv(net_dev); | 1633 | struct efx_nic *efx = netdev_priv(net_dev); |
1642 | struct efx_channel *channel; | 1634 | struct efx_channel *channel; |
1643 | 1635 | ||
1644 | efx_for_each_channel(channel, efx) | 1636 | efx_for_each_channel(channel, efx) |
1645 | efx_schedule_channel(channel); | 1637 | efx_schedule_channel(channel); |
1646 | } | 1638 | } |
1647 | 1639 | ||
1648 | #endif | 1640 | #endif |
1649 | 1641 | ||
1650 | /************************************************************************** | 1642 | /************************************************************************** |
1651 | * | 1643 | * |
1652 | * Kernel net device interface | 1644 | * Kernel net device interface |
1653 | * | 1645 | * |
1654 | *************************************************************************/ | 1646 | *************************************************************************/ |
1655 | 1647 | ||
1656 | /* Context: process, rtnl_lock() held. */ | 1648 | /* Context: process, rtnl_lock() held. */ |
1657 | static int efx_net_open(struct net_device *net_dev) | 1649 | static int efx_net_open(struct net_device *net_dev) |
1658 | { | 1650 | { |
1659 | struct efx_nic *efx = netdev_priv(net_dev); | 1651 | struct efx_nic *efx = netdev_priv(net_dev); |
1660 | EFX_ASSERT_RESET_SERIALISED(efx); | 1652 | EFX_ASSERT_RESET_SERIALISED(efx); |
1661 | 1653 | ||
1662 | netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n", | 1654 | netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n", |
1663 | raw_smp_processor_id()); | 1655 | raw_smp_processor_id()); |
1664 | 1656 | ||
1665 | if (efx->state == STATE_DISABLED) | 1657 | if (efx->state == STATE_DISABLED) |
1666 | return -EIO; | 1658 | return -EIO; |
1667 | if (efx->phy_mode & PHY_MODE_SPECIAL) | 1659 | if (efx->phy_mode & PHY_MODE_SPECIAL) |
1668 | return -EBUSY; | 1660 | return -EBUSY; |
1669 | if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL)) | 1661 | if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL)) |
1670 | return -EIO; | 1662 | return -EIO; |
1671 | 1663 | ||
1672 | /* Notify the kernel of the link state polled during driver load, | 1664 | /* Notify the kernel of the link state polled during driver load, |
1673 | * before the monitor starts running */ | 1665 | * before the monitor starts running */ |
1674 | efx_link_status_changed(efx); | 1666 | efx_link_status_changed(efx); |
1675 | 1667 | ||
1676 | efx_start_all(efx); | 1668 | efx_start_all(efx); |
1677 | return 0; | 1669 | return 0; |
1678 | } | 1670 | } |
1679 | 1671 | ||
1680 | /* Context: process, rtnl_lock() held. | 1672 | /* Context: process, rtnl_lock() held. |
1681 | * Note that the kernel will ignore our return code; this method | 1673 | * Note that the kernel will ignore our return code; this method |
1682 | * should really be a void. | 1674 | * should really be a void. |
1683 | */ | 1675 | */ |
1684 | static int efx_net_stop(struct net_device *net_dev) | 1676 | static int efx_net_stop(struct net_device *net_dev) |
1685 | { | 1677 | { |
1686 | struct efx_nic *efx = netdev_priv(net_dev); | 1678 | struct efx_nic *efx = netdev_priv(net_dev); |
1687 | 1679 | ||
1688 | netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n", | 1680 | netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n", |
1689 | raw_smp_processor_id()); | 1681 | raw_smp_processor_id()); |
1690 | 1682 | ||
1691 | if (efx->state != STATE_DISABLED) { | 1683 | if (efx->state != STATE_DISABLED) { |
1692 | /* Stop the device and flush all the channels */ | 1684 | /* Stop the device and flush all the channels */ |
1693 | efx_stop_all(efx); | 1685 | efx_stop_all(efx); |
1694 | efx_fini_channels(efx); | 1686 | efx_fini_channels(efx); |
1695 | efx_init_channels(efx); | 1687 | efx_init_channels(efx); |
1696 | } | 1688 | } |
1697 | 1689 | ||
1698 | return 0; | 1690 | return 0; |
1699 | } | 1691 | } |
1700 | 1692 | ||
1701 | /* Context: process, dev_base_lock or RTNL held, non-blocking. */ | 1693 | /* Context: process, dev_base_lock or RTNL held, non-blocking. */ |
1702 | static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats) | 1694 | static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats) |
1703 | { | 1695 | { |
1704 | struct efx_nic *efx = netdev_priv(net_dev); | 1696 | struct efx_nic *efx = netdev_priv(net_dev); |
1705 | struct efx_mac_stats *mac_stats = &efx->mac_stats; | 1697 | struct efx_mac_stats *mac_stats = &efx->mac_stats; |
1706 | 1698 | ||
1707 | spin_lock_bh(&efx->stats_lock); | 1699 | spin_lock_bh(&efx->stats_lock); |
1708 | efx->type->update_stats(efx); | 1700 | efx->type->update_stats(efx); |
1709 | spin_unlock_bh(&efx->stats_lock); | 1701 | spin_unlock_bh(&efx->stats_lock); |
1710 | 1702 | ||
1711 | stats->rx_packets = mac_stats->rx_packets; | 1703 | stats->rx_packets = mac_stats->rx_packets; |
1712 | stats->tx_packets = mac_stats->tx_packets; | 1704 | stats->tx_packets = mac_stats->tx_packets; |
1713 | stats->rx_bytes = mac_stats->rx_bytes; | 1705 | stats->rx_bytes = mac_stats->rx_bytes; |
1714 | stats->tx_bytes = mac_stats->tx_bytes; | 1706 | stats->tx_bytes = mac_stats->tx_bytes; |
1715 | stats->rx_dropped = efx->n_rx_nodesc_drop_cnt; | 1707 | stats->rx_dropped = efx->n_rx_nodesc_drop_cnt; |
1716 | stats->multicast = mac_stats->rx_multicast; | 1708 | stats->multicast = mac_stats->rx_multicast; |
1717 | stats->collisions = mac_stats->tx_collision; | 1709 | stats->collisions = mac_stats->tx_collision; |
1718 | stats->rx_length_errors = (mac_stats->rx_gtjumbo + | 1710 | stats->rx_length_errors = (mac_stats->rx_gtjumbo + |
1719 | mac_stats->rx_length_error); | 1711 | mac_stats->rx_length_error); |
1720 | stats->rx_crc_errors = mac_stats->rx_bad; | 1712 | stats->rx_crc_errors = mac_stats->rx_bad; |
1721 | stats->rx_frame_errors = mac_stats->rx_align_error; | 1713 | stats->rx_frame_errors = mac_stats->rx_align_error; |
1722 | stats->rx_fifo_errors = mac_stats->rx_overflow; | 1714 | stats->rx_fifo_errors = mac_stats->rx_overflow; |
1723 | stats->rx_missed_errors = mac_stats->rx_missed; | 1715 | stats->rx_missed_errors = mac_stats->rx_missed; |
1724 | stats->tx_window_errors = mac_stats->tx_late_collision; | 1716 | stats->tx_window_errors = mac_stats->tx_late_collision; |
1725 | 1717 | ||
1726 | stats->rx_errors = (stats->rx_length_errors + | 1718 | stats->rx_errors = (stats->rx_length_errors + |
1727 | stats->rx_crc_errors + | 1719 | stats->rx_crc_errors + |
1728 | stats->rx_frame_errors + | 1720 | stats->rx_frame_errors + |
1729 | mac_stats->rx_symbol_error); | 1721 | mac_stats->rx_symbol_error); |
1730 | stats->tx_errors = (stats->tx_window_errors + | 1722 | stats->tx_errors = (stats->tx_window_errors + |
1731 | mac_stats->tx_bad); | 1723 | mac_stats->tx_bad); |
1732 | 1724 | ||
1733 | return stats; | 1725 | return stats; |
1734 | } | 1726 | } |
1735 | 1727 | ||
1736 | /* Context: netif_tx_lock held, BHs disabled. */ | 1728 | /* Context: netif_tx_lock held, BHs disabled. */ |
1737 | static void efx_watchdog(struct net_device *net_dev) | 1729 | static void efx_watchdog(struct net_device *net_dev) |
1738 | { | 1730 | { |
1739 | struct efx_nic *efx = netdev_priv(net_dev); | 1731 | struct efx_nic *efx = netdev_priv(net_dev); |
1740 | 1732 | ||
1741 | netif_err(efx, tx_err, efx->net_dev, | 1733 | netif_err(efx, tx_err, efx->net_dev, |
1742 | "TX stuck with port_enabled=%d: resetting channels\n", | 1734 | "TX stuck with port_enabled=%d: resetting channels\n", |
1743 | efx->port_enabled); | 1735 | efx->port_enabled); |
1744 | 1736 | ||
1745 | efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG); | 1737 | efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG); |
1746 | } | 1738 | } |
1747 | 1739 | ||
1748 | 1740 | ||
1749 | /* Context: process, rtnl_lock() held. */ | 1741 | /* Context: process, rtnl_lock() held. */ |
1750 | static int efx_change_mtu(struct net_device *net_dev, int new_mtu) | 1742 | static int efx_change_mtu(struct net_device *net_dev, int new_mtu) |
1751 | { | 1743 | { |
1752 | struct efx_nic *efx = netdev_priv(net_dev); | 1744 | struct efx_nic *efx = netdev_priv(net_dev); |
1753 | int rc = 0; | 1745 | int rc = 0; |
1754 | 1746 | ||
1755 | EFX_ASSERT_RESET_SERIALISED(efx); | 1747 | EFX_ASSERT_RESET_SERIALISED(efx); |
1756 | 1748 | ||
1757 | if (new_mtu > EFX_MAX_MTU) | 1749 | if (new_mtu > EFX_MAX_MTU) |
1758 | return -EINVAL; | 1750 | return -EINVAL; |
1759 | 1751 | ||
1760 | efx_stop_all(efx); | 1752 | efx_stop_all(efx); |
1761 | 1753 | ||
1762 | netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu); | 1754 | netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu); |
1763 | 1755 | ||
1764 | efx_fini_channels(efx); | 1756 | efx_fini_channels(efx); |
1765 | 1757 | ||
1766 | mutex_lock(&efx->mac_lock); | 1758 | mutex_lock(&efx->mac_lock); |
1767 | /* Reconfigure the MAC before enabling the dma queues so that | 1759 | /* Reconfigure the MAC before enabling the dma queues so that |
1768 | * the RX buffers don't overflow */ | 1760 | * the RX buffers don't overflow */ |
1769 | net_dev->mtu = new_mtu; | 1761 | net_dev->mtu = new_mtu; |
1770 | efx->mac_op->reconfigure(efx); | 1762 | efx->mac_op->reconfigure(efx); |
1771 | mutex_unlock(&efx->mac_lock); | 1763 | mutex_unlock(&efx->mac_lock); |
1772 | 1764 | ||
1773 | efx_init_channels(efx); | 1765 | efx_init_channels(efx); |
1774 | 1766 | ||
1775 | efx_start_all(efx); | 1767 | efx_start_all(efx); |
1776 | return rc; | 1768 | return rc; |
1777 | } | 1769 | } |
1778 | 1770 | ||
1779 | static int efx_set_mac_address(struct net_device *net_dev, void *data) | 1771 | static int efx_set_mac_address(struct net_device *net_dev, void *data) |
1780 | { | 1772 | { |
1781 | struct efx_nic *efx = netdev_priv(net_dev); | 1773 | struct efx_nic *efx = netdev_priv(net_dev); |
1782 | struct sockaddr *addr = data; | 1774 | struct sockaddr *addr = data; |
1783 | char *new_addr = addr->sa_data; | 1775 | char *new_addr = addr->sa_data; |
1784 | 1776 | ||
1785 | EFX_ASSERT_RESET_SERIALISED(efx); | 1777 | EFX_ASSERT_RESET_SERIALISED(efx); |
1786 | 1778 | ||
1787 | if (!is_valid_ether_addr(new_addr)) { | 1779 | if (!is_valid_ether_addr(new_addr)) { |
1788 | netif_err(efx, drv, efx->net_dev, | 1780 | netif_err(efx, drv, efx->net_dev, |
1789 | "invalid ethernet MAC address requested: %pM\n", | 1781 | "invalid ethernet MAC address requested: %pM\n", |
1790 | new_addr); | 1782 | new_addr); |
1791 | return -EINVAL; | 1783 | return -EINVAL; |
1792 | } | 1784 | } |
1793 | 1785 | ||
1794 | memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len); | 1786 | memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len); |
1795 | 1787 | ||
1796 | /* Reconfigure the MAC */ | 1788 | /* Reconfigure the MAC */ |
1797 | mutex_lock(&efx->mac_lock); | 1789 | mutex_lock(&efx->mac_lock); |
1798 | efx->mac_op->reconfigure(efx); | 1790 | efx->mac_op->reconfigure(efx); |
1799 | mutex_unlock(&efx->mac_lock); | 1791 | mutex_unlock(&efx->mac_lock); |
1800 | 1792 | ||
1801 | return 0; | 1793 | return 0; |
1802 | } | 1794 | } |
1803 | 1795 | ||
1804 | /* Context: netif_addr_lock held, BHs disabled. */ | 1796 | /* Context: netif_addr_lock held, BHs disabled. */ |
1805 | static void efx_set_multicast_list(struct net_device *net_dev) | 1797 | static void efx_set_multicast_list(struct net_device *net_dev) |
1806 | { | 1798 | { |
1807 | struct efx_nic *efx = netdev_priv(net_dev); | 1799 | struct efx_nic *efx = netdev_priv(net_dev); |
1808 | struct netdev_hw_addr *ha; | 1800 | struct netdev_hw_addr *ha; |
1809 | union efx_multicast_hash *mc_hash = &efx->multicast_hash; | 1801 | union efx_multicast_hash *mc_hash = &efx->multicast_hash; |
1810 | u32 crc; | 1802 | u32 crc; |
1811 | int bit; | 1803 | int bit; |
1812 | 1804 | ||
1813 | efx->promiscuous = !!(net_dev->flags & IFF_PROMISC); | 1805 | efx->promiscuous = !!(net_dev->flags & IFF_PROMISC); |
1814 | 1806 | ||
1815 | /* Build multicast hash table */ | 1807 | /* Build multicast hash table */ |
1816 | if (efx->promiscuous || (net_dev->flags & IFF_ALLMULTI)) { | 1808 | if (efx->promiscuous || (net_dev->flags & IFF_ALLMULTI)) { |
1817 | memset(mc_hash, 0xff, sizeof(*mc_hash)); | 1809 | memset(mc_hash, 0xff, sizeof(*mc_hash)); |
1818 | } else { | 1810 | } else { |
1819 | memset(mc_hash, 0x00, sizeof(*mc_hash)); | 1811 | memset(mc_hash, 0x00, sizeof(*mc_hash)); |
1820 | netdev_for_each_mc_addr(ha, net_dev) { | 1812 | netdev_for_each_mc_addr(ha, net_dev) { |
1821 | crc = ether_crc_le(ETH_ALEN, ha->addr); | 1813 | crc = ether_crc_le(ETH_ALEN, ha->addr); |
1822 | bit = crc & (EFX_MCAST_HASH_ENTRIES - 1); | 1814 | bit = crc & (EFX_MCAST_HASH_ENTRIES - 1); |
1823 | set_bit_le(bit, mc_hash->byte); | 1815 | set_bit_le(bit, mc_hash->byte); |
1824 | } | 1816 | } |
1825 | 1817 | ||
1826 | /* Broadcast packets go through the multicast hash filter. | 1818 | /* Broadcast packets go through the multicast hash filter. |
1827 | * ether_crc_le() of the broadcast address is 0xbe2612ff | 1819 | * ether_crc_le() of the broadcast address is 0xbe2612ff |
1828 | * so we always add bit 0xff to the mask. | 1820 | * so we always add bit 0xff to the mask. |
1829 | */ | 1821 | */ |
1830 | set_bit_le(0xff, mc_hash->byte); | 1822 | set_bit_le(0xff, mc_hash->byte); |
1831 | } | 1823 | } |
1832 | 1824 | ||
1833 | if (efx->port_enabled) | 1825 | if (efx->port_enabled) |
1834 | queue_work(efx->workqueue, &efx->mac_work); | 1826 | queue_work(efx->workqueue, &efx->mac_work); |
1835 | /* Otherwise efx_start_port() will do this */ | 1827 | /* Otherwise efx_start_port() will do this */ |
1836 | } | 1828 | } |
1837 | 1829 | ||
1838 | static const struct net_device_ops efx_netdev_ops = { | 1830 | static const struct net_device_ops efx_netdev_ops = { |
1839 | .ndo_open = efx_net_open, | 1831 | .ndo_open = efx_net_open, |
1840 | .ndo_stop = efx_net_stop, | 1832 | .ndo_stop = efx_net_stop, |
1841 | .ndo_get_stats64 = efx_net_stats, | 1833 | .ndo_get_stats64 = efx_net_stats, |
1842 | .ndo_tx_timeout = efx_watchdog, | 1834 | .ndo_tx_timeout = efx_watchdog, |
1843 | .ndo_start_xmit = efx_hard_start_xmit, | 1835 | .ndo_start_xmit = efx_hard_start_xmit, |
1844 | .ndo_validate_addr = eth_validate_addr, | 1836 | .ndo_validate_addr = eth_validate_addr, |
1845 | .ndo_do_ioctl = efx_ioctl, | 1837 | .ndo_do_ioctl = efx_ioctl, |
1846 | .ndo_change_mtu = efx_change_mtu, | 1838 | .ndo_change_mtu = efx_change_mtu, |
1847 | .ndo_set_mac_address = efx_set_mac_address, | 1839 | .ndo_set_mac_address = efx_set_mac_address, |
1848 | .ndo_set_multicast_list = efx_set_multicast_list, | 1840 | .ndo_set_multicast_list = efx_set_multicast_list, |
1849 | #ifdef CONFIG_NET_POLL_CONTROLLER | 1841 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1850 | .ndo_poll_controller = efx_netpoll, | 1842 | .ndo_poll_controller = efx_netpoll, |
1851 | #endif | 1843 | #endif |
1852 | }; | 1844 | }; |
1853 | 1845 | ||
1854 | static void efx_update_name(struct efx_nic *efx) | 1846 | static void efx_update_name(struct efx_nic *efx) |
1855 | { | 1847 | { |
1856 | strcpy(efx->name, efx->net_dev->name); | 1848 | strcpy(efx->name, efx->net_dev->name); |
1857 | efx_mtd_rename(efx); | 1849 | efx_mtd_rename(efx); |
1858 | efx_set_channel_names(efx); | 1850 | efx_set_channel_names(efx); |
1859 | } | 1851 | } |
1860 | 1852 | ||
1861 | static int efx_netdev_event(struct notifier_block *this, | 1853 | static int efx_netdev_event(struct notifier_block *this, |
1862 | unsigned long event, void *ptr) | 1854 | unsigned long event, void *ptr) |
1863 | { | 1855 | { |
1864 | struct net_device *net_dev = ptr; | 1856 | struct net_device *net_dev = ptr; |
1865 | 1857 | ||
1866 | if (net_dev->netdev_ops == &efx_netdev_ops && | 1858 | if (net_dev->netdev_ops == &efx_netdev_ops && |
1867 | event == NETDEV_CHANGENAME) | 1859 | event == NETDEV_CHANGENAME) |
1868 | efx_update_name(netdev_priv(net_dev)); | 1860 | efx_update_name(netdev_priv(net_dev)); |
1869 | 1861 | ||
1870 | return NOTIFY_DONE; | 1862 | return NOTIFY_DONE; |
1871 | } | 1863 | } |
1872 | 1864 | ||
1873 | static struct notifier_block efx_netdev_notifier = { | 1865 | static struct notifier_block efx_netdev_notifier = { |
1874 | .notifier_call = efx_netdev_event, | 1866 | .notifier_call = efx_netdev_event, |
1875 | }; | 1867 | }; |
1876 | 1868 | ||
1877 | static ssize_t | 1869 | static ssize_t |
1878 | show_phy_type(struct device *dev, struct device_attribute *attr, char *buf) | 1870 | show_phy_type(struct device *dev, struct device_attribute *attr, char *buf) |
1879 | { | 1871 | { |
1880 | struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); | 1872 | struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); |
1881 | return sprintf(buf, "%d\n", efx->phy_type); | 1873 | return sprintf(buf, "%d\n", efx->phy_type); |
1882 | } | 1874 | } |
1883 | static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL); | 1875 | static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL); |
1884 | 1876 | ||
1885 | static int efx_register_netdev(struct efx_nic *efx) | 1877 | static int efx_register_netdev(struct efx_nic *efx) |
1886 | { | 1878 | { |
1887 | struct net_device *net_dev = efx->net_dev; | 1879 | struct net_device *net_dev = efx->net_dev; |
1888 | int rc; | 1880 | int rc; |
1889 | 1881 | ||
1890 | net_dev->watchdog_timeo = 5 * HZ; | 1882 | net_dev->watchdog_timeo = 5 * HZ; |
1891 | net_dev->irq = efx->pci_dev->irq; | 1883 | net_dev->irq = efx->pci_dev->irq; |
1892 | net_dev->netdev_ops = &efx_netdev_ops; | 1884 | net_dev->netdev_ops = &efx_netdev_ops; |
1893 | SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops); | 1885 | SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops); |
1894 | 1886 | ||
1895 | /* Clear MAC statistics */ | 1887 | /* Clear MAC statistics */ |
1896 | efx->mac_op->update_stats(efx); | 1888 | efx->mac_op->update_stats(efx); |
1897 | memset(&efx->mac_stats, 0, sizeof(efx->mac_stats)); | 1889 | memset(&efx->mac_stats, 0, sizeof(efx->mac_stats)); |
1898 | 1890 | ||
1899 | rtnl_lock(); | 1891 | rtnl_lock(); |
1900 | 1892 | ||
1901 | rc = dev_alloc_name(net_dev, net_dev->name); | 1893 | rc = dev_alloc_name(net_dev, net_dev->name); |
1902 | if (rc < 0) | 1894 | if (rc < 0) |
1903 | goto fail_locked; | 1895 | goto fail_locked; |
1904 | efx_update_name(efx); | 1896 | efx_update_name(efx); |
1905 | 1897 | ||
1906 | rc = register_netdevice(net_dev); | 1898 | rc = register_netdevice(net_dev); |
1907 | if (rc) | 1899 | if (rc) |
1908 | goto fail_locked; | 1900 | goto fail_locked; |
1909 | 1901 | ||
1910 | /* Always start with carrier off; PHY events will detect the link */ | 1902 | /* Always start with carrier off; PHY events will detect the link */ |
1911 | netif_carrier_off(efx->net_dev); | 1903 | netif_carrier_off(efx->net_dev); |
1912 | 1904 | ||
1913 | rtnl_unlock(); | 1905 | rtnl_unlock(); |
1914 | 1906 | ||
1915 | rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type); | 1907 | rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type); |
1916 | if (rc) { | 1908 | if (rc) { |
1917 | netif_err(efx, drv, efx->net_dev, | 1909 | netif_err(efx, drv, efx->net_dev, |
1918 | "failed to init net dev attributes\n"); | 1910 | "failed to init net dev attributes\n"); |
1919 | goto fail_registered; | 1911 | goto fail_registered; |
1920 | } | 1912 | } |
1921 | 1913 | ||
1922 | return 0; | 1914 | return 0; |
1923 | 1915 | ||
1924 | fail_locked: | 1916 | fail_locked: |
1925 | rtnl_unlock(); | 1917 | rtnl_unlock(); |
1926 | netif_err(efx, drv, efx->net_dev, "could not register net dev\n"); | 1918 | netif_err(efx, drv, efx->net_dev, "could not register net dev\n"); |
1927 | return rc; | 1919 | return rc; |
1928 | 1920 | ||
1929 | fail_registered: | 1921 | fail_registered: |
1930 | unregister_netdev(net_dev); | 1922 | unregister_netdev(net_dev); |
1931 | return rc; | 1923 | return rc; |
1932 | } | 1924 | } |
1933 | 1925 | ||
1934 | static void efx_unregister_netdev(struct efx_nic *efx) | 1926 | static void efx_unregister_netdev(struct efx_nic *efx) |
1935 | { | 1927 | { |
1936 | struct efx_channel *channel; | 1928 | struct efx_channel *channel; |
1937 | struct efx_tx_queue *tx_queue; | 1929 | struct efx_tx_queue *tx_queue; |
1938 | 1930 | ||
1939 | if (!efx->net_dev) | 1931 | if (!efx->net_dev) |
1940 | return; | 1932 | return; |
1941 | 1933 | ||
1942 | BUG_ON(netdev_priv(efx->net_dev) != efx); | 1934 | BUG_ON(netdev_priv(efx->net_dev) != efx); |
1943 | 1935 | ||
1944 | /* Free up any skbs still remaining. This has to happen before | 1936 | /* Free up any skbs still remaining. This has to happen before |
1945 | * we try to unregister the netdev as running their destructors | 1937 | * we try to unregister the netdev as running their destructors |
1946 | * may be needed to get the device ref. count to 0. */ | 1938 | * may be needed to get the device ref. count to 0. */ |
1947 | efx_for_each_channel(channel, efx) { | 1939 | efx_for_each_channel(channel, efx) { |
1948 | efx_for_each_channel_tx_queue(tx_queue, channel) | 1940 | efx_for_each_channel_tx_queue(tx_queue, channel) |
1949 | efx_release_tx_buffers(tx_queue); | 1941 | efx_release_tx_buffers(tx_queue); |
1950 | } | 1942 | } |
1951 | 1943 | ||
1952 | if (efx_dev_registered(efx)) { | 1944 | if (efx_dev_registered(efx)) { |
1953 | strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); | 1945 | strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); |
1954 | device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); | 1946 | device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); |
1955 | unregister_netdev(efx->net_dev); | 1947 | unregister_netdev(efx->net_dev); |
1956 | } | 1948 | } |
1957 | } | 1949 | } |
1958 | 1950 | ||
1959 | /************************************************************************** | 1951 | /************************************************************************** |
1960 | * | 1952 | * |
1961 | * Device reset and suspend | 1953 | * Device reset and suspend |
1962 | * | 1954 | * |
1963 | **************************************************************************/ | 1955 | **************************************************************************/ |
1964 | 1956 | ||
1965 | /* Tears down the entire software state and most of the hardware state | 1957 | /* Tears down the entire software state and most of the hardware state |
1966 | * before reset. */ | 1958 | * before reset. */ |
1967 | void efx_reset_down(struct efx_nic *efx, enum reset_type method) | 1959 | void efx_reset_down(struct efx_nic *efx, enum reset_type method) |
1968 | { | 1960 | { |
1969 | EFX_ASSERT_RESET_SERIALISED(efx); | 1961 | EFX_ASSERT_RESET_SERIALISED(efx); |
1970 | 1962 | ||
1971 | efx_stop_all(efx); | 1963 | efx_stop_all(efx); |
1972 | mutex_lock(&efx->mac_lock); | 1964 | mutex_lock(&efx->mac_lock); |
1973 | mutex_lock(&efx->spi_lock); | 1965 | mutex_lock(&efx->spi_lock); |
1974 | 1966 | ||
1975 | efx_fini_channels(efx); | 1967 | efx_fini_channels(efx); |
1976 | if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) | 1968 | if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) |
1977 | efx->phy_op->fini(efx); | 1969 | efx->phy_op->fini(efx); |
1978 | efx->type->fini(efx); | 1970 | efx->type->fini(efx); |
1979 | } | 1971 | } |
1980 | 1972 | ||
1981 | /* This function will always ensure that the locks acquired in | 1973 | /* This function will always ensure that the locks acquired in |
1982 | * efx_reset_down() are released. A failure return code indicates | 1974 | * efx_reset_down() are released. A failure return code indicates |
1983 | * that we were unable to reinitialise the hardware, and the | 1975 | * that we were unable to reinitialise the hardware, and the |
1984 | * driver should be disabled. If ok is false, then the rx and tx | 1976 | * driver should be disabled. If ok is false, then the rx and tx |
1985 | * engines are not restarted, pending a RESET_DISABLE. */ | 1977 | * engines are not restarted, pending a RESET_DISABLE. */ |
1986 | int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) | 1978 | int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) |
1987 | { | 1979 | { |
1988 | int rc; | 1980 | int rc; |
1989 | 1981 | ||
1990 | EFX_ASSERT_RESET_SERIALISED(efx); | 1982 | EFX_ASSERT_RESET_SERIALISED(efx); |
1991 | 1983 | ||
1992 | rc = efx->type->init(efx); | 1984 | rc = efx->type->init(efx); |
1993 | if (rc) { | 1985 | if (rc) { |
1994 | netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n"); | 1986 | netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n"); |
1995 | goto fail; | 1987 | goto fail; |
1996 | } | 1988 | } |
1997 | 1989 | ||
1998 | if (!ok) | 1990 | if (!ok) |
1999 | goto fail; | 1991 | goto fail; |
2000 | 1992 | ||
2001 | if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) { | 1993 | if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) { |
2002 | rc = efx->phy_op->init(efx); | 1994 | rc = efx->phy_op->init(efx); |
2003 | if (rc) | 1995 | if (rc) |
2004 | goto fail; | 1996 | goto fail; |
2005 | if (efx->phy_op->reconfigure(efx)) | 1997 | if (efx->phy_op->reconfigure(efx)) |
2006 | netif_err(efx, drv, efx->net_dev, | 1998 | netif_err(efx, drv, efx->net_dev, |
2007 | "could not restore PHY settings\n"); | 1999 | "could not restore PHY settings\n"); |
2008 | } | 2000 | } |
2009 | 2001 | ||
2010 | efx->mac_op->reconfigure(efx); | 2002 | efx->mac_op->reconfigure(efx); |
2011 | 2003 | ||
2012 | efx_init_channels(efx); | 2004 | efx_init_channels(efx); |
2013 | efx_restore_filters(efx); | 2005 | efx_restore_filters(efx); |
2014 | 2006 | ||
2015 | mutex_unlock(&efx->spi_lock); | 2007 | mutex_unlock(&efx->spi_lock); |
2016 | mutex_unlock(&efx->mac_lock); | 2008 | mutex_unlock(&efx->mac_lock); |
2017 | 2009 | ||
2018 | efx_start_all(efx); | 2010 | efx_start_all(efx); |
2019 | 2011 | ||
2020 | return 0; | 2012 | return 0; |
2021 | 2013 | ||
2022 | fail: | 2014 | fail: |
2023 | efx->port_initialized = false; | 2015 | efx->port_initialized = false; |
2024 | 2016 | ||
2025 | mutex_unlock(&efx->spi_lock); | 2017 | mutex_unlock(&efx->spi_lock); |
2026 | mutex_unlock(&efx->mac_lock); | 2018 | mutex_unlock(&efx->mac_lock); |
2027 | 2019 | ||
2028 | return rc; | 2020 | return rc; |
2029 | } | 2021 | } |
2030 | 2022 | ||
2031 | /* Reset the NIC using the specified method. Note that the reset may | 2023 | /* Reset the NIC using the specified method. Note that the reset may |
2032 | * fail, in which case the card will be left in an unusable state. | 2024 | * fail, in which case the card will be left in an unusable state. |
2033 | * | 2025 | * |
2034 | * Caller must hold the rtnl_lock. | 2026 | * Caller must hold the rtnl_lock. |
2035 | */ | 2027 | */ |
2036 | int efx_reset(struct efx_nic *efx, enum reset_type method) | 2028 | int efx_reset(struct efx_nic *efx, enum reset_type method) |
2037 | { | 2029 | { |
2038 | int rc, rc2; | 2030 | int rc, rc2; |
2039 | bool disabled; | 2031 | bool disabled; |
2040 | 2032 | ||
2041 | netif_info(efx, drv, efx->net_dev, "resetting (%s)\n", | 2033 | netif_info(efx, drv, efx->net_dev, "resetting (%s)\n", |
2042 | RESET_TYPE(method)); | 2034 | RESET_TYPE(method)); |
2043 | 2035 | ||
2044 | efx_reset_down(efx, method); | 2036 | efx_reset_down(efx, method); |
2045 | 2037 | ||
2046 | rc = efx->type->reset(efx, method); | 2038 | rc = efx->type->reset(efx, method); |
2047 | if (rc) { | 2039 | if (rc) { |
2048 | netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n"); | 2040 | netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n"); |
2049 | goto out; | 2041 | goto out; |
2050 | } | 2042 | } |
2051 | 2043 | ||
2052 | /* Allow resets to be rescheduled. */ | 2044 | /* Allow resets to be rescheduled. */ |
2053 | efx->reset_pending = RESET_TYPE_NONE; | 2045 | efx->reset_pending = RESET_TYPE_NONE; |
2054 | 2046 | ||
2055 | /* Reinitialise bus-mastering, which may have been turned off before | 2047 | /* Reinitialise bus-mastering, which may have been turned off before |
2056 | * the reset was scheduled. This is still appropriate, even in the | 2048 | * the reset was scheduled. This is still appropriate, even in the |
2057 | * RESET_TYPE_DISABLE since this driver generally assumes the hardware | 2049 | * RESET_TYPE_DISABLE since this driver generally assumes the hardware |
2058 | * can respond to requests. */ | 2050 | * can respond to requests. */ |
2059 | pci_set_master(efx->pci_dev); | 2051 | pci_set_master(efx->pci_dev); |
2060 | 2052 | ||
2061 | out: | 2053 | out: |
2062 | /* Leave device stopped if necessary */ | 2054 | /* Leave device stopped if necessary */ |
2063 | disabled = rc || method == RESET_TYPE_DISABLE; | 2055 | disabled = rc || method == RESET_TYPE_DISABLE; |
2064 | rc2 = efx_reset_up(efx, method, !disabled); | 2056 | rc2 = efx_reset_up(efx, method, !disabled); |
2065 | if (rc2) { | 2057 | if (rc2) { |
2066 | disabled = true; | 2058 | disabled = true; |
2067 | if (!rc) | 2059 | if (!rc) |
2068 | rc = rc2; | 2060 | rc = rc2; |
2069 | } | 2061 | } |
2070 | 2062 | ||
2071 | if (disabled) { | 2063 | if (disabled) { |
2072 | dev_close(efx->net_dev); | 2064 | dev_close(efx->net_dev); |
2073 | netif_err(efx, drv, efx->net_dev, "has been disabled\n"); | 2065 | netif_err(efx, drv, efx->net_dev, "has been disabled\n"); |
2074 | efx->state = STATE_DISABLED; | 2066 | efx->state = STATE_DISABLED; |
2075 | } else { | 2067 | } else { |
2076 | netif_dbg(efx, drv, efx->net_dev, "reset complete\n"); | 2068 | netif_dbg(efx, drv, efx->net_dev, "reset complete\n"); |
2077 | } | 2069 | } |
2078 | return rc; | 2070 | return rc; |
2079 | } | 2071 | } |
2080 | 2072 | ||
2081 | /* The worker thread exists so that code that cannot sleep can | 2073 | /* The worker thread exists so that code that cannot sleep can |
2082 | * schedule a reset for later. | 2074 | * schedule a reset for later. |
2083 | */ | 2075 | */ |
2084 | static void efx_reset_work(struct work_struct *data) | 2076 | static void efx_reset_work(struct work_struct *data) |
2085 | { | 2077 | { |
2086 | struct efx_nic *efx = container_of(data, struct efx_nic, reset_work); | 2078 | struct efx_nic *efx = container_of(data, struct efx_nic, reset_work); |
2087 | 2079 | ||
2088 | if (efx->reset_pending == RESET_TYPE_NONE) | 2080 | if (efx->reset_pending == RESET_TYPE_NONE) |
2089 | return; | 2081 | return; |
2090 | 2082 | ||
2091 | /* If we're not RUNNING then don't reset. Leave the reset_pending | 2083 | /* If we're not RUNNING then don't reset. Leave the reset_pending |
2092 | * flag set so that efx_pci_probe_main will be retried */ | 2084 | * flag set so that efx_pci_probe_main will be retried */ |
2093 | if (efx->state != STATE_RUNNING) { | 2085 | if (efx->state != STATE_RUNNING) { |
2094 | netif_info(efx, drv, efx->net_dev, | 2086 | netif_info(efx, drv, efx->net_dev, |
2095 | "scheduled reset quenched. NIC not RUNNING\n"); | 2087 | "scheduled reset quenched. NIC not RUNNING\n"); |
2096 | return; | 2088 | return; |
2097 | } | 2089 | } |
2098 | 2090 | ||
2099 | rtnl_lock(); | 2091 | rtnl_lock(); |
2100 | (void)efx_reset(efx, efx->reset_pending); | 2092 | (void)efx_reset(efx, efx->reset_pending); |
2101 | rtnl_unlock(); | 2093 | rtnl_unlock(); |
2102 | } | 2094 | } |
2103 | 2095 | ||
2104 | void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) | 2096 | void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) |
2105 | { | 2097 | { |
2106 | enum reset_type method; | 2098 | enum reset_type method; |
2107 | 2099 | ||
2108 | if (efx->reset_pending != RESET_TYPE_NONE) { | 2100 | if (efx->reset_pending != RESET_TYPE_NONE) { |
2109 | netif_info(efx, drv, efx->net_dev, | 2101 | netif_info(efx, drv, efx->net_dev, |
2110 | "quenching already scheduled reset\n"); | 2102 | "quenching already scheduled reset\n"); |
2111 | return; | 2103 | return; |
2112 | } | 2104 | } |
2113 | 2105 | ||
2114 | switch (type) { | 2106 | switch (type) { |
2115 | case RESET_TYPE_INVISIBLE: | 2107 | case RESET_TYPE_INVISIBLE: |
2116 | case RESET_TYPE_ALL: | 2108 | case RESET_TYPE_ALL: |
2117 | case RESET_TYPE_WORLD: | 2109 | case RESET_TYPE_WORLD: |
2118 | case RESET_TYPE_DISABLE: | 2110 | case RESET_TYPE_DISABLE: |
2119 | method = type; | 2111 | method = type; |
2120 | break; | 2112 | break; |
2121 | case RESET_TYPE_RX_RECOVERY: | 2113 | case RESET_TYPE_RX_RECOVERY: |
2122 | case RESET_TYPE_RX_DESC_FETCH: | 2114 | case RESET_TYPE_RX_DESC_FETCH: |
2123 | case RESET_TYPE_TX_DESC_FETCH: | 2115 | case RESET_TYPE_TX_DESC_FETCH: |
2124 | case RESET_TYPE_TX_SKIP: | 2116 | case RESET_TYPE_TX_SKIP: |
2125 | method = RESET_TYPE_INVISIBLE; | 2117 | method = RESET_TYPE_INVISIBLE; |
2126 | break; | 2118 | break; |
2127 | case RESET_TYPE_MC_FAILURE: | 2119 | case RESET_TYPE_MC_FAILURE: |
2128 | default: | 2120 | default: |
2129 | method = RESET_TYPE_ALL; | 2121 | method = RESET_TYPE_ALL; |
2130 | break; | 2122 | break; |
2131 | } | 2123 | } |
2132 | 2124 | ||
2133 | if (method != type) | 2125 | if (method != type) |
2134 | netif_dbg(efx, drv, efx->net_dev, | 2126 | netif_dbg(efx, drv, efx->net_dev, |
2135 | "scheduling %s reset for %s\n", | 2127 | "scheduling %s reset for %s\n", |
2136 | RESET_TYPE(method), RESET_TYPE(type)); | 2128 | RESET_TYPE(method), RESET_TYPE(type)); |
2137 | else | 2129 | else |
2138 | netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n", | 2130 | netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n", |
2139 | RESET_TYPE(method)); | 2131 | RESET_TYPE(method)); |
2140 | 2132 | ||
2141 | efx->reset_pending = method; | 2133 | efx->reset_pending = method; |
2142 | 2134 | ||
2143 | /* efx_process_channel() will no longer read events once a | 2135 | /* efx_process_channel() will no longer read events once a |
2144 | * reset is scheduled. So switch back to poll'd MCDI completions. */ | 2136 | * reset is scheduled. So switch back to poll'd MCDI completions. */ |
2145 | efx_mcdi_mode_poll(efx); | 2137 | efx_mcdi_mode_poll(efx); |
2146 | 2138 | ||
2147 | queue_work(reset_workqueue, &efx->reset_work); | 2139 | queue_work(reset_workqueue, &efx->reset_work); |
2148 | } | 2140 | } |
2149 | 2141 | ||
2150 | /************************************************************************** | 2142 | /************************************************************************** |
2151 | * | 2143 | * |
2152 | * List of NICs we support | 2144 | * List of NICs we support |
2153 | * | 2145 | * |
2154 | **************************************************************************/ | 2146 | **************************************************************************/ |
2155 | 2147 | ||
2156 | /* PCI device ID table */ | 2148 | /* PCI device ID table */ |
2157 | static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = { | 2149 | static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = { |
2158 | {PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID), | 2150 | {PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID), |
2159 | .driver_data = (unsigned long) &falcon_a1_nic_type}, | 2151 | .driver_data = (unsigned long) &falcon_a1_nic_type}, |
2160 | {PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID), | 2152 | {PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID), |
2161 | .driver_data = (unsigned long) &falcon_b0_nic_type}, | 2153 | .driver_data = (unsigned long) &falcon_b0_nic_type}, |
2162 | {PCI_DEVICE(EFX_VENDID_SFC, BETHPAGE_A_P_DEVID), | 2154 | {PCI_DEVICE(EFX_VENDID_SFC, BETHPAGE_A_P_DEVID), |
2163 | .driver_data = (unsigned long) &siena_a0_nic_type}, | 2155 | .driver_data = (unsigned long) &siena_a0_nic_type}, |
2164 | {PCI_DEVICE(EFX_VENDID_SFC, SIENA_A_P_DEVID), | 2156 | {PCI_DEVICE(EFX_VENDID_SFC, SIENA_A_P_DEVID), |
2165 | .driver_data = (unsigned long) &siena_a0_nic_type}, | 2157 | .driver_data = (unsigned long) &siena_a0_nic_type}, |
2166 | {0} /* end of list */ | 2158 | {0} /* end of list */ |
2167 | }; | 2159 | }; |
2168 | 2160 | ||
2169 | /************************************************************************** | 2161 | /************************************************************************** |
2170 | * | 2162 | * |
2171 | * Dummy PHY/MAC operations | 2163 | * Dummy PHY/MAC operations |
2172 | * | 2164 | * |
2173 | * Can be used for some unimplemented operations | 2165 | * Can be used for some unimplemented operations |
2174 | * Needed so all function pointers are valid and do not have to be tested | 2166 | * Needed so all function pointers are valid and do not have to be tested |
2175 | * before use | 2167 | * before use |
2176 | * | 2168 | * |
2177 | **************************************************************************/ | 2169 | **************************************************************************/ |
2178 | int efx_port_dummy_op_int(struct efx_nic *efx) | 2170 | int efx_port_dummy_op_int(struct efx_nic *efx) |
2179 | { | 2171 | { |
2180 | return 0; | 2172 | return 0; |
2181 | } | 2173 | } |
2182 | void efx_port_dummy_op_void(struct efx_nic *efx) {} | 2174 | void efx_port_dummy_op_void(struct efx_nic *efx) {} |
2183 | void efx_port_dummy_op_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) | 2175 | |
2184 | { | 2176 | static bool efx_port_dummy_op_poll(struct efx_nic *efx) |
2185 | } | ||
2186 | bool efx_port_dummy_op_poll(struct efx_nic *efx) | ||
2187 | { | 2177 | { |
2188 | return false; | 2178 | return false; |
2189 | } | 2179 | } |
2190 | 2180 | ||
2191 | static struct efx_phy_operations efx_dummy_phy_operations = { | 2181 | static struct efx_phy_operations efx_dummy_phy_operations = { |
2192 | .init = efx_port_dummy_op_int, | 2182 | .init = efx_port_dummy_op_int, |
2193 | .reconfigure = efx_port_dummy_op_int, | 2183 | .reconfigure = efx_port_dummy_op_int, |
2194 | .poll = efx_port_dummy_op_poll, | 2184 | .poll = efx_port_dummy_op_poll, |
2195 | .fini = efx_port_dummy_op_void, | 2185 | .fini = efx_port_dummy_op_void, |
2196 | }; | 2186 | }; |
2197 | 2187 | ||
2198 | /************************************************************************** | 2188 | /************************************************************************** |
2199 | * | 2189 | * |
2200 | * Data housekeeping | 2190 | * Data housekeeping |
2201 | * | 2191 | * |
2202 | **************************************************************************/ | 2192 | **************************************************************************/ |
2203 | 2193 | ||
2204 | /* This zeroes out and then fills in the invariants in a struct | 2194 | /* This zeroes out and then fills in the invariants in a struct |
2205 | * efx_nic (including all sub-structures). | 2195 | * efx_nic (including all sub-structures). |
2206 | */ | 2196 | */ |
2207 | static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type, | 2197 | static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type, |
2208 | struct pci_dev *pci_dev, struct net_device *net_dev) | 2198 | struct pci_dev *pci_dev, struct net_device *net_dev) |
2209 | { | 2199 | { |
2210 | int i; | 2200 | int i; |
2211 | 2201 | ||
2212 | /* Initialise common structures */ | 2202 | /* Initialise common structures */ |
2213 | memset(efx, 0, sizeof(*efx)); | 2203 | memset(efx, 0, sizeof(*efx)); |
2214 | spin_lock_init(&efx->biu_lock); | 2204 | spin_lock_init(&efx->biu_lock); |
2215 | mutex_init(&efx->mdio_lock); | 2205 | mutex_init(&efx->mdio_lock); |
2216 | mutex_init(&efx->spi_lock); | 2206 | mutex_init(&efx->spi_lock); |
2217 | #ifdef CONFIG_SFC_MTD | 2207 | #ifdef CONFIG_SFC_MTD |
2218 | INIT_LIST_HEAD(&efx->mtd_list); | 2208 | INIT_LIST_HEAD(&efx->mtd_list); |
2219 | #endif | 2209 | #endif |
2220 | INIT_WORK(&efx->reset_work, efx_reset_work); | 2210 | INIT_WORK(&efx->reset_work, efx_reset_work); |
2221 | INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor); | 2211 | INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor); |
2222 | efx->pci_dev = pci_dev; | 2212 | efx->pci_dev = pci_dev; |
2223 | efx->msg_enable = debug; | 2213 | efx->msg_enable = debug; |
2224 | efx->state = STATE_INIT; | 2214 | efx->state = STATE_INIT; |
2225 | efx->reset_pending = RESET_TYPE_NONE; | 2215 | efx->reset_pending = RESET_TYPE_NONE; |
2226 | strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); | 2216 | strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); |
2227 | 2217 | ||
2228 | efx->net_dev = net_dev; | 2218 | efx->net_dev = net_dev; |
2229 | efx->rx_checksum_enabled = true; | 2219 | efx->rx_checksum_enabled = true; |
2230 | spin_lock_init(&efx->stats_lock); | 2220 | spin_lock_init(&efx->stats_lock); |
2231 | mutex_init(&efx->mac_lock); | 2221 | mutex_init(&efx->mac_lock); |
2232 | efx->mac_op = type->default_mac_ops; | 2222 | efx->mac_op = type->default_mac_ops; |
2233 | efx->phy_op = &efx_dummy_phy_operations; | 2223 | efx->phy_op = &efx_dummy_phy_operations; |
2234 | efx->mdio.dev = net_dev; | 2224 | efx->mdio.dev = net_dev; |
2235 | INIT_WORK(&efx->mac_work, efx_mac_work); | 2225 | INIT_WORK(&efx->mac_work, efx_mac_work); |
2236 | 2226 | ||
2237 | for (i = 0; i < EFX_MAX_CHANNELS; i++) { | 2227 | for (i = 0; i < EFX_MAX_CHANNELS; i++) { |
2238 | efx->channel[i] = efx_alloc_channel(efx, i, NULL); | 2228 | efx->channel[i] = efx_alloc_channel(efx, i, NULL); |
2239 | if (!efx->channel[i]) | 2229 | if (!efx->channel[i]) |
2240 | goto fail; | 2230 | goto fail; |
2241 | } | 2231 | } |
2242 | 2232 | ||
2243 | efx->type = type; | 2233 | efx->type = type; |
2244 | 2234 | ||
2245 | EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS); | 2235 | EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS); |
2246 | 2236 | ||
2247 | /* Higher numbered interrupt modes are less capable! */ | 2237 | /* Higher numbered interrupt modes are less capable! */ |
2248 | efx->interrupt_mode = max(efx->type->max_interrupt_mode, | 2238 | efx->interrupt_mode = max(efx->type->max_interrupt_mode, |
2249 | interrupt_mode); | 2239 | interrupt_mode); |
2250 | 2240 | ||
2251 | /* Would be good to use the net_dev name, but we're too early */ | 2241 | /* Would be good to use the net_dev name, but we're too early */ |
2252 | snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s", | 2242 | snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s", |
2253 | pci_name(pci_dev)); | 2243 | pci_name(pci_dev)); |
2254 | efx->workqueue = create_singlethread_workqueue(efx->workqueue_name); | 2244 | efx->workqueue = create_singlethread_workqueue(efx->workqueue_name); |
2255 | if (!efx->workqueue) | 2245 | if (!efx->workqueue) |
2256 | goto fail; | 2246 | goto fail; |
2257 | 2247 | ||
2258 | return 0; | 2248 | return 0; |
2259 | 2249 | ||
2260 | fail: | 2250 | fail: |
2261 | efx_fini_struct(efx); | 2251 | efx_fini_struct(efx); |
2262 | return -ENOMEM; | 2252 | return -ENOMEM; |
2263 | } | 2253 | } |
2264 | 2254 | ||
2265 | static void efx_fini_struct(struct efx_nic *efx) | 2255 | static void efx_fini_struct(struct efx_nic *efx) |
2266 | { | 2256 | { |
2267 | int i; | 2257 | int i; |
2268 | 2258 | ||
2269 | for (i = 0; i < EFX_MAX_CHANNELS; i++) | 2259 | for (i = 0; i < EFX_MAX_CHANNELS; i++) |
2270 | kfree(efx->channel[i]); | 2260 | kfree(efx->channel[i]); |
2271 | 2261 | ||
2272 | if (efx->workqueue) { | 2262 | if (efx->workqueue) { |
2273 | destroy_workqueue(efx->workqueue); | 2263 | destroy_workqueue(efx->workqueue); |
2274 | efx->workqueue = NULL; | 2264 | efx->workqueue = NULL; |
2275 | } | 2265 | } |
2276 | } | 2266 | } |
2277 | 2267 | ||
2278 | /************************************************************************** | 2268 | /************************************************************************** |
2279 | * | 2269 | * |
2280 | * PCI interface | 2270 | * PCI interface |
2281 | * | 2271 | * |
2282 | **************************************************************************/ | 2272 | **************************************************************************/ |
2283 | 2273 | ||
2284 | /* Main body of final NIC shutdown code | 2274 | /* Main body of final NIC shutdown code |
2285 | * This is called only at module unload (or hotplug removal). | 2275 | * This is called only at module unload (or hotplug removal). |
2286 | */ | 2276 | */ |
2287 | static void efx_pci_remove_main(struct efx_nic *efx) | 2277 | static void efx_pci_remove_main(struct efx_nic *efx) |
2288 | { | 2278 | { |
2289 | efx_nic_fini_interrupt(efx); | 2279 | efx_nic_fini_interrupt(efx); |
2290 | efx_fini_channels(efx); | 2280 | efx_fini_channels(efx); |
2291 | efx_fini_port(efx); | 2281 | efx_fini_port(efx); |
2292 | efx->type->fini(efx); | 2282 | efx->type->fini(efx); |
2293 | efx_fini_napi(efx); | 2283 | efx_fini_napi(efx); |
2294 | efx_remove_all(efx); | 2284 | efx_remove_all(efx); |
2295 | } | 2285 | } |
2296 | 2286 | ||
2297 | /* Final NIC shutdown | 2287 | /* Final NIC shutdown |
2298 | * This is called only at module unload (or hotplug removal). | 2288 | * This is called only at module unload (or hotplug removal). |
2299 | */ | 2289 | */ |
2300 | static void efx_pci_remove(struct pci_dev *pci_dev) | 2290 | static void efx_pci_remove(struct pci_dev *pci_dev) |
2301 | { | 2291 | { |
2302 | struct efx_nic *efx; | 2292 | struct efx_nic *efx; |
2303 | 2293 | ||
2304 | efx = pci_get_drvdata(pci_dev); | 2294 | efx = pci_get_drvdata(pci_dev); |
2305 | if (!efx) | 2295 | if (!efx) |
2306 | return; | 2296 | return; |
2307 | 2297 | ||
2308 | /* Mark the NIC as fini, then stop the interface */ | 2298 | /* Mark the NIC as fini, then stop the interface */ |
2309 | rtnl_lock(); | 2299 | rtnl_lock(); |
2310 | efx->state = STATE_FINI; | 2300 | efx->state = STATE_FINI; |
2311 | dev_close(efx->net_dev); | 2301 | dev_close(efx->net_dev); |
2312 | 2302 | ||
2313 | /* Allow any queued efx_resets() to complete */ | 2303 | /* Allow any queued efx_resets() to complete */ |
2314 | rtnl_unlock(); | 2304 | rtnl_unlock(); |
2315 | 2305 | ||
2316 | efx_unregister_netdev(efx); | 2306 | efx_unregister_netdev(efx); |
2317 | 2307 | ||
2318 | efx_mtd_remove(efx); | 2308 | efx_mtd_remove(efx); |
2319 | 2309 | ||
2320 | /* Wait for any scheduled resets to complete. No more will be | 2310 | /* Wait for any scheduled resets to complete. No more will be |
2321 | * scheduled from this point because efx_stop_all() has been | 2311 | * scheduled from this point because efx_stop_all() has been |
2322 | * called, we are no longer registered with driverlink, and | 2312 | * called, we are no longer registered with driverlink, and |
2323 | * the net_device's have been removed. */ | 2313 | * the net_device's have been removed. */ |
2324 | cancel_work_sync(&efx->reset_work); | 2314 | cancel_work_sync(&efx->reset_work); |
2325 | 2315 | ||
2326 | efx_pci_remove_main(efx); | 2316 | efx_pci_remove_main(efx); |
2327 | 2317 | ||
2328 | efx_fini_io(efx); | 2318 | efx_fini_io(efx); |
2329 | netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n"); | 2319 | netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n"); |
2330 | 2320 | ||
2331 | pci_set_drvdata(pci_dev, NULL); | 2321 | pci_set_drvdata(pci_dev, NULL); |
2332 | efx_fini_struct(efx); | 2322 | efx_fini_struct(efx); |
2333 | free_netdev(efx->net_dev); | 2323 | free_netdev(efx->net_dev); |
2334 | }; | 2324 | }; |
2335 | 2325 | ||
2336 | /* Main body of NIC initialisation | 2326 | /* Main body of NIC initialisation |
2337 | * This is called at module load (or hotplug insertion, theoretically). | 2327 | * This is called at module load (or hotplug insertion, theoretically). |
2338 | */ | 2328 | */ |
2339 | static int efx_pci_probe_main(struct efx_nic *efx) | 2329 | static int efx_pci_probe_main(struct efx_nic *efx) |
2340 | { | 2330 | { |
2341 | int rc; | 2331 | int rc; |
2342 | 2332 | ||
2343 | /* Do start-of-day initialisation */ | 2333 | /* Do start-of-day initialisation */ |
2344 | rc = efx_probe_all(efx); | 2334 | rc = efx_probe_all(efx); |
2345 | if (rc) | 2335 | if (rc) |
2346 | goto fail1; | 2336 | goto fail1; |
2347 | 2337 | ||
2348 | rc = efx_init_napi(efx); | 2338 | rc = efx_init_napi(efx); |
2349 | if (rc) | 2339 | if (rc) |
2350 | goto fail2; | 2340 | goto fail2; |
2351 | 2341 | ||
2352 | rc = efx->type->init(efx); | 2342 | rc = efx->type->init(efx); |
2353 | if (rc) { | 2343 | if (rc) { |
2354 | netif_err(efx, probe, efx->net_dev, | 2344 | netif_err(efx, probe, efx->net_dev, |
2355 | "failed to initialise NIC\n"); | 2345 | "failed to initialise NIC\n"); |
2356 | goto fail3; | 2346 | goto fail3; |
2357 | } | 2347 | } |
2358 | 2348 | ||
2359 | rc = efx_init_port(efx); | 2349 | rc = efx_init_port(efx); |
2360 | if (rc) { | 2350 | if (rc) { |
2361 | netif_err(efx, probe, efx->net_dev, | 2351 | netif_err(efx, probe, efx->net_dev, |
2362 | "failed to initialise port\n"); | 2352 | "failed to initialise port\n"); |
2363 | goto fail4; | 2353 | goto fail4; |
2364 | } | 2354 | } |
2365 | 2355 | ||
2366 | efx_init_channels(efx); | 2356 | efx_init_channels(efx); |
2367 | 2357 | ||
2368 | rc = efx_nic_init_interrupt(efx); | 2358 | rc = efx_nic_init_interrupt(efx); |
2369 | if (rc) | 2359 | if (rc) |
2370 | goto fail5; | 2360 | goto fail5; |
2371 | 2361 | ||
2372 | return 0; | 2362 | return 0; |
2373 | 2363 | ||
2374 | fail5: | 2364 | fail5: |
2375 | efx_fini_channels(efx); | 2365 | efx_fini_channels(efx); |
2376 | efx_fini_port(efx); | 2366 | efx_fini_port(efx); |
2377 | fail4: | 2367 | fail4: |
2378 | efx->type->fini(efx); | 2368 | efx->type->fini(efx); |
2379 | fail3: | 2369 | fail3: |
2380 | efx_fini_napi(efx); | 2370 | efx_fini_napi(efx); |
2381 | fail2: | 2371 | fail2: |
2382 | efx_remove_all(efx); | 2372 | efx_remove_all(efx); |
2383 | fail1: | 2373 | fail1: |
2384 | return rc; | 2374 | return rc; |
2385 | } | 2375 | } |
2386 | 2376 | ||
2387 | /* NIC initialisation | 2377 | /* NIC initialisation |
2388 | * | 2378 | * |
2389 | * This is called at module load (or hotplug insertion, | 2379 | * This is called at module load (or hotplug insertion, |
2390 | * theoretically). It sets up PCI mappings, tests and resets the NIC, | 2380 | * theoretically). It sets up PCI mappings, tests and resets the NIC, |
2391 | * sets up and registers the network devices with the kernel and hooks | 2381 | * sets up and registers the network devices with the kernel and hooks |
2392 | * the interrupt service routine. It does not prepare the device for | 2382 | * the interrupt service routine. It does not prepare the device for |
2393 | * transmission; this is left to the first time one of the network | 2383 | * transmission; this is left to the first time one of the network |
2394 | * interfaces is brought up (i.e. efx_net_open). | 2384 | * interfaces is brought up (i.e. efx_net_open). |
2395 | */ | 2385 | */ |
2396 | static int __devinit efx_pci_probe(struct pci_dev *pci_dev, | 2386 | static int __devinit efx_pci_probe(struct pci_dev *pci_dev, |
2397 | const struct pci_device_id *entry) | 2387 | const struct pci_device_id *entry) |
2398 | { | 2388 | { |
2399 | struct efx_nic_type *type = (struct efx_nic_type *) entry->driver_data; | 2389 | struct efx_nic_type *type = (struct efx_nic_type *) entry->driver_data; |
2400 | struct net_device *net_dev; | 2390 | struct net_device *net_dev; |
2401 | struct efx_nic *efx; | 2391 | struct efx_nic *efx; |
2402 | int i, rc; | 2392 | int i, rc; |
2403 | 2393 | ||
2404 | /* Allocate and initialise a struct net_device and struct efx_nic */ | 2394 | /* Allocate and initialise a struct net_device and struct efx_nic */ |
2405 | net_dev = alloc_etherdev_mq(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES); | 2395 | net_dev = alloc_etherdev_mq(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES); |
2406 | if (!net_dev) | 2396 | if (!net_dev) |
2407 | return -ENOMEM; | 2397 | return -ENOMEM; |
2408 | net_dev->features |= (type->offload_features | NETIF_F_SG | | 2398 | net_dev->features |= (type->offload_features | NETIF_F_SG | |
2409 | NETIF_F_HIGHDMA | NETIF_F_TSO | | 2399 | NETIF_F_HIGHDMA | NETIF_F_TSO | |
2410 | NETIF_F_GRO); | 2400 | NETIF_F_GRO); |
2411 | if (type->offload_features & NETIF_F_V6_CSUM) | 2401 | if (type->offload_features & NETIF_F_V6_CSUM) |
2412 | net_dev->features |= NETIF_F_TSO6; | 2402 | net_dev->features |= NETIF_F_TSO6; |
2413 | /* Mask for features that also apply to VLAN devices */ | 2403 | /* Mask for features that also apply to VLAN devices */ |
2414 | net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG | | 2404 | net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG | |
2415 | NETIF_F_HIGHDMA | NETIF_F_TSO); | 2405 | NETIF_F_HIGHDMA | NETIF_F_TSO); |
2416 | efx = netdev_priv(net_dev); | 2406 | efx = netdev_priv(net_dev); |
2417 | pci_set_drvdata(pci_dev, efx); | 2407 | pci_set_drvdata(pci_dev, efx); |
2418 | SET_NETDEV_DEV(net_dev, &pci_dev->dev); | 2408 | SET_NETDEV_DEV(net_dev, &pci_dev->dev); |
2419 | rc = efx_init_struct(efx, type, pci_dev, net_dev); | 2409 | rc = efx_init_struct(efx, type, pci_dev, net_dev); |
2420 | if (rc) | 2410 | if (rc) |
2421 | goto fail1; | 2411 | goto fail1; |
2422 | 2412 | ||
2423 | netif_info(efx, probe, efx->net_dev, | 2413 | netif_info(efx, probe, efx->net_dev, |
2424 | "Solarflare Communications NIC detected\n"); | 2414 | "Solarflare Communications NIC detected\n"); |
2425 | 2415 | ||
2426 | /* Set up basic I/O (BAR mappings etc) */ | 2416 | /* Set up basic I/O (BAR mappings etc) */ |
2427 | rc = efx_init_io(efx); | 2417 | rc = efx_init_io(efx); |
2428 | if (rc) | 2418 | if (rc) |
2429 | goto fail2; | 2419 | goto fail2; |
2430 | 2420 | ||
2431 | /* No serialisation is required with the reset path because | 2421 | /* No serialisation is required with the reset path because |
2432 | * we're in STATE_INIT. */ | 2422 | * we're in STATE_INIT. */ |
2433 | for (i = 0; i < 5; i++) { | 2423 | for (i = 0; i < 5; i++) { |
2434 | rc = efx_pci_probe_main(efx); | 2424 | rc = efx_pci_probe_main(efx); |
2435 | 2425 | ||
2436 | /* Serialise against efx_reset(). No more resets will be | 2426 | /* Serialise against efx_reset(). No more resets will be |
2437 | * scheduled since efx_stop_all() has been called, and we | 2427 | * scheduled since efx_stop_all() has been called, and we |
2438 | * have not and never have been registered with either | 2428 | * have not and never have been registered with either |
2439 | * the rtnetlink or driverlink layers. */ | 2429 | * the rtnetlink or driverlink layers. */ |
2440 | cancel_work_sync(&efx->reset_work); | 2430 | cancel_work_sync(&efx->reset_work); |
2441 | 2431 | ||
2442 | if (rc == 0) { | 2432 | if (rc == 0) { |
2443 | if (efx->reset_pending != RESET_TYPE_NONE) { | 2433 | if (efx->reset_pending != RESET_TYPE_NONE) { |
2444 | /* If there was a scheduled reset during | 2434 | /* If there was a scheduled reset during |
2445 | * probe, the NIC is probably hosed anyway */ | 2435 | * probe, the NIC is probably hosed anyway */ |
2446 | efx_pci_remove_main(efx); | 2436 | efx_pci_remove_main(efx); |
2447 | rc = -EIO; | 2437 | rc = -EIO; |
2448 | } else { | 2438 | } else { |
2449 | break; | 2439 | break; |
2450 | } | 2440 | } |
2451 | } | 2441 | } |
2452 | 2442 | ||
2453 | /* Retry if a recoverably reset event has been scheduled */ | 2443 | /* Retry if a recoverably reset event has been scheduled */ |
2454 | if ((efx->reset_pending != RESET_TYPE_INVISIBLE) && | 2444 | if ((efx->reset_pending != RESET_TYPE_INVISIBLE) && |
2455 | (efx->reset_pending != RESET_TYPE_ALL)) | 2445 | (efx->reset_pending != RESET_TYPE_ALL)) |
2456 | goto fail3; | 2446 | goto fail3; |
2457 | 2447 | ||
2458 | efx->reset_pending = RESET_TYPE_NONE; | 2448 | efx->reset_pending = RESET_TYPE_NONE; |
2459 | } | 2449 | } |
2460 | 2450 | ||
2461 | if (rc) { | 2451 | if (rc) { |
2462 | netif_err(efx, probe, efx->net_dev, "Could not reset NIC\n"); | 2452 | netif_err(efx, probe, efx->net_dev, "Could not reset NIC\n"); |
2463 | goto fail4; | 2453 | goto fail4; |
2464 | } | 2454 | } |
2465 | 2455 | ||
2466 | /* Switch to the running state before we expose the device to the OS, | 2456 | /* Switch to the running state before we expose the device to the OS, |
2467 | * so that dev_open()|efx_start_all() will actually start the device */ | 2457 | * so that dev_open()|efx_start_all() will actually start the device */ |
2468 | efx->state = STATE_RUNNING; | 2458 | efx->state = STATE_RUNNING; |
2469 | 2459 | ||
2470 | rc = efx_register_netdev(efx); | 2460 | rc = efx_register_netdev(efx); |
2471 | if (rc) | 2461 | if (rc) |
2472 | goto fail5; | 2462 | goto fail5; |
2473 | 2463 | ||
2474 | netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n"); | 2464 | netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n"); |
2475 | 2465 | ||
2476 | rtnl_lock(); | 2466 | rtnl_lock(); |
2477 | efx_mtd_probe(efx); /* allowed to fail */ | 2467 | efx_mtd_probe(efx); /* allowed to fail */ |
2478 | rtnl_unlock(); | 2468 | rtnl_unlock(); |
2479 | return 0; | 2469 | return 0; |
2480 | 2470 | ||
2481 | fail5: | 2471 | fail5: |
2482 | efx_pci_remove_main(efx); | 2472 | efx_pci_remove_main(efx); |
2483 | fail4: | 2473 | fail4: |
2484 | fail3: | 2474 | fail3: |
2485 | efx_fini_io(efx); | 2475 | efx_fini_io(efx); |
2486 | fail2: | 2476 | fail2: |
2487 | efx_fini_struct(efx); | 2477 | efx_fini_struct(efx); |
2488 | fail1: | 2478 | fail1: |
2489 | WARN_ON(rc > 0); | 2479 | WARN_ON(rc > 0); |
2490 | netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc); | 2480 | netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc); |
2491 | free_netdev(net_dev); | 2481 | free_netdev(net_dev); |
2492 | return rc; | 2482 | return rc; |
2493 | } | 2483 | } |
2494 | 2484 | ||
2495 | static int efx_pm_freeze(struct device *dev) | 2485 | static int efx_pm_freeze(struct device *dev) |
2496 | { | 2486 | { |
2497 | struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); | 2487 | struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); |
2498 | 2488 | ||
2499 | efx->state = STATE_FINI; | 2489 | efx->state = STATE_FINI; |
2500 | 2490 | ||
2501 | netif_device_detach(efx->net_dev); | 2491 | netif_device_detach(efx->net_dev); |
2502 | 2492 | ||
2503 | efx_stop_all(efx); | 2493 | efx_stop_all(efx); |
2504 | efx_fini_channels(efx); | 2494 | efx_fini_channels(efx); |
2505 | 2495 | ||
2506 | return 0; | 2496 | return 0; |
2507 | } | 2497 | } |
2508 | 2498 | ||
2509 | static int efx_pm_thaw(struct device *dev) | 2499 | static int efx_pm_thaw(struct device *dev) |
2510 | { | 2500 | { |
2511 | struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); | 2501 | struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); |
2512 | 2502 | ||
2513 | efx->state = STATE_INIT; | 2503 | efx->state = STATE_INIT; |
2514 | 2504 | ||
2515 | efx_init_channels(efx); | 2505 | efx_init_channels(efx); |
2516 | 2506 | ||
2517 | mutex_lock(&efx->mac_lock); | 2507 | mutex_lock(&efx->mac_lock); |
2518 | efx->phy_op->reconfigure(efx); | 2508 | efx->phy_op->reconfigure(efx); |
2519 | mutex_unlock(&efx->mac_lock); | 2509 | mutex_unlock(&efx->mac_lock); |
2520 | 2510 | ||
2521 | efx_start_all(efx); | 2511 | efx_start_all(efx); |
2522 | 2512 | ||
2523 | netif_device_attach(efx->net_dev); | 2513 | netif_device_attach(efx->net_dev); |
2524 | 2514 | ||
2525 | efx->state = STATE_RUNNING; | 2515 | efx->state = STATE_RUNNING; |
2526 | 2516 | ||
2527 | efx->type->resume_wol(efx); | 2517 | efx->type->resume_wol(efx); |
2528 | 2518 | ||
2529 | /* Reschedule any quenched resets scheduled during efx_pm_freeze() */ | 2519 | /* Reschedule any quenched resets scheduled during efx_pm_freeze() */ |
2530 | queue_work(reset_workqueue, &efx->reset_work); | 2520 | queue_work(reset_workqueue, &efx->reset_work); |
2531 | 2521 | ||
2532 | return 0; | 2522 | return 0; |
2533 | } | 2523 | } |
2534 | 2524 | ||
2535 | static int efx_pm_poweroff(struct device *dev) | 2525 | static int efx_pm_poweroff(struct device *dev) |
2536 | { | 2526 | { |
2537 | struct pci_dev *pci_dev = to_pci_dev(dev); | 2527 | struct pci_dev *pci_dev = to_pci_dev(dev); |
2538 | struct efx_nic *efx = pci_get_drvdata(pci_dev); | 2528 | struct efx_nic *efx = pci_get_drvdata(pci_dev); |
2539 | 2529 | ||
2540 | efx->type->fini(efx); | 2530 | efx->type->fini(efx); |
2541 | 2531 | ||
2542 | efx->reset_pending = RESET_TYPE_NONE; | 2532 | efx->reset_pending = RESET_TYPE_NONE; |
2543 | 2533 | ||
2544 | pci_save_state(pci_dev); | 2534 | pci_save_state(pci_dev); |
2545 | return pci_set_power_state(pci_dev, PCI_D3hot); | 2535 | return pci_set_power_state(pci_dev, PCI_D3hot); |
2546 | } | 2536 | } |
2547 | 2537 | ||
2548 | /* Used for both resume and restore */ | 2538 | /* Used for both resume and restore */ |
2549 | static int efx_pm_resume(struct device *dev) | 2539 | static int efx_pm_resume(struct device *dev) |
2550 | { | 2540 | { |
2551 | struct pci_dev *pci_dev = to_pci_dev(dev); | 2541 | struct pci_dev *pci_dev = to_pci_dev(dev); |
2552 | struct efx_nic *efx = pci_get_drvdata(pci_dev); | 2542 | struct efx_nic *efx = pci_get_drvdata(pci_dev); |
2553 | int rc; | 2543 | int rc; |
2554 | 2544 | ||
2555 | rc = pci_set_power_state(pci_dev, PCI_D0); | 2545 | rc = pci_set_power_state(pci_dev, PCI_D0); |
2556 | if (rc) | 2546 | if (rc) |
2557 | return rc; | 2547 | return rc; |
2558 | pci_restore_state(pci_dev); | 2548 | pci_restore_state(pci_dev); |
2559 | rc = pci_enable_device(pci_dev); | 2549 | rc = pci_enable_device(pci_dev); |
2560 | if (rc) | 2550 | if (rc) |
2561 | return rc; | 2551 | return rc; |
2562 | pci_set_master(efx->pci_dev); | 2552 | pci_set_master(efx->pci_dev); |
2563 | rc = efx->type->reset(efx, RESET_TYPE_ALL); | 2553 | rc = efx->type->reset(efx, RESET_TYPE_ALL); |
2564 | if (rc) | 2554 | if (rc) |
2565 | return rc; | 2555 | return rc; |
2566 | rc = efx->type->init(efx); | 2556 | rc = efx->type->init(efx); |
2567 | if (rc) | 2557 | if (rc) |
2568 | return rc; | 2558 | return rc; |
2569 | efx_pm_thaw(dev); | 2559 | efx_pm_thaw(dev); |
2570 | return 0; | 2560 | return 0; |
2571 | } | 2561 | } |
2572 | 2562 | ||
2573 | static int efx_pm_suspend(struct device *dev) | 2563 | static int efx_pm_suspend(struct device *dev) |
2574 | { | 2564 | { |
2575 | int rc; | 2565 | int rc; |
2576 | 2566 | ||
2577 | efx_pm_freeze(dev); | 2567 | efx_pm_freeze(dev); |
2578 | rc = efx_pm_poweroff(dev); | 2568 | rc = efx_pm_poweroff(dev); |
2579 | if (rc) | 2569 | if (rc) |
2580 | efx_pm_resume(dev); | 2570 | efx_pm_resume(dev); |
2581 | return rc; | 2571 | return rc; |
2582 | } | 2572 | } |
2583 | 2573 | ||
2584 | static struct dev_pm_ops efx_pm_ops = { | 2574 | static struct dev_pm_ops efx_pm_ops = { |
2585 | .suspend = efx_pm_suspend, | 2575 | .suspend = efx_pm_suspend, |
2586 | .resume = efx_pm_resume, | 2576 | .resume = efx_pm_resume, |
2587 | .freeze = efx_pm_freeze, | 2577 | .freeze = efx_pm_freeze, |
2588 | .thaw = efx_pm_thaw, | 2578 | .thaw = efx_pm_thaw, |
2589 | .poweroff = efx_pm_poweroff, | 2579 | .poweroff = efx_pm_poweroff, |
2590 | .restore = efx_pm_resume, | 2580 | .restore = efx_pm_resume, |
2591 | }; | 2581 | }; |
2592 | 2582 | ||
2593 | static struct pci_driver efx_pci_driver = { | 2583 | static struct pci_driver efx_pci_driver = { |
2594 | .name = KBUILD_MODNAME, | 2584 | .name = KBUILD_MODNAME, |
2595 | .id_table = efx_pci_table, | 2585 | .id_table = efx_pci_table, |
2596 | .probe = efx_pci_probe, | 2586 | .probe = efx_pci_probe, |
2597 | .remove = efx_pci_remove, | 2587 | .remove = efx_pci_remove, |
2598 | .driver.pm = &efx_pm_ops, | 2588 | .driver.pm = &efx_pm_ops, |
2599 | }; | 2589 | }; |
2600 | 2590 | ||
2601 | /************************************************************************** | 2591 | /************************************************************************** |
2602 | * | 2592 | * |
2603 | * Kernel module interface | 2593 | * Kernel module interface |
2604 | * | 2594 | * |
2605 | *************************************************************************/ | 2595 | *************************************************************************/ |
2606 | 2596 | ||
2607 | module_param(interrupt_mode, uint, 0444); | 2597 | module_param(interrupt_mode, uint, 0444); |
2608 | MODULE_PARM_DESC(interrupt_mode, | 2598 | MODULE_PARM_DESC(interrupt_mode, |
2609 | "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)"); | 2599 | "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)"); |
2610 | 2600 | ||
2611 | static int __init efx_init_module(void) | 2601 | static int __init efx_init_module(void) |
2612 | { | 2602 | { |
2613 | int rc; | 2603 | int rc; |
2614 | 2604 | ||
2615 | printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n"); | 2605 | printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n"); |
2616 | 2606 | ||
2617 | rc = register_netdevice_notifier(&efx_netdev_notifier); | 2607 | rc = register_netdevice_notifier(&efx_netdev_notifier); |
2618 | if (rc) | 2608 | if (rc) |
2619 | goto err_notifier; | 2609 | goto err_notifier; |
2620 | 2610 | ||
2621 | reset_workqueue = create_singlethread_workqueue("sfc_reset"); | 2611 | reset_workqueue = create_singlethread_workqueue("sfc_reset"); |
2622 | if (!reset_workqueue) { | 2612 | if (!reset_workqueue) { |
2623 | rc = -ENOMEM; | 2613 | rc = -ENOMEM; |
2624 | goto err_reset; | 2614 | goto err_reset; |
2625 | } | 2615 | } |
2626 | 2616 | ||
2627 | rc = pci_register_driver(&efx_pci_driver); | 2617 | rc = pci_register_driver(&efx_pci_driver); |
2628 | if (rc < 0) | 2618 | if (rc < 0) |
2629 | goto err_pci; | 2619 | goto err_pci; |
2630 | 2620 | ||
2631 | return 0; | 2621 | return 0; |
2632 | 2622 | ||
2633 | err_pci: | 2623 | err_pci: |
2634 | destroy_workqueue(reset_workqueue); | 2624 | destroy_workqueue(reset_workqueue); |
2635 | err_reset: | 2625 | err_reset: |
2636 | unregister_netdevice_notifier(&efx_netdev_notifier); | 2626 | unregister_netdevice_notifier(&efx_netdev_notifier); |
2637 | err_notifier: | 2627 | err_notifier: |
2638 | return rc; | 2628 | return rc; |
2639 | } | 2629 | } |
2640 | 2630 | ||
2641 | static void __exit efx_exit_module(void) | 2631 | static void __exit efx_exit_module(void) |
2642 | { | 2632 | { |
2643 | printk(KERN_INFO "Solarflare NET driver unloading\n"); | 2633 | printk(KERN_INFO "Solarflare NET driver unloading\n"); |
2644 | 2634 | ||
2645 | pci_unregister_driver(&efx_pci_driver); | 2635 | pci_unregister_driver(&efx_pci_driver); |
2646 | destroy_workqueue(reset_workqueue); | 2636 | destroy_workqueue(reset_workqueue); |
2647 | unregister_netdevice_notifier(&efx_netdev_notifier); | 2637 | unregister_netdevice_notifier(&efx_netdev_notifier); |
2648 | 2638 | ||
2649 | } | 2639 | } |
2650 | 2640 | ||
2651 | module_init(efx_init_module); | 2641 | module_init(efx_init_module); |
2652 | module_exit(efx_exit_module); | 2642 | module_exit(efx_exit_module); |
2653 | 2643 | ||
2654 | MODULE_AUTHOR("Solarflare Communications and " | 2644 | MODULE_AUTHOR("Solarflare Communications and " |
2655 | "Michael Brown <mbrown@fensystems.co.uk>"); | 2645 | "Michael Brown <mbrown@fensystems.co.uk>"); |
2656 | MODULE_DESCRIPTION("Solarflare Communications network driver"); | 2646 | MODULE_DESCRIPTION("Solarflare Communications network driver"); |
2657 | MODULE_LICENSE("GPL"); | 2647 | MODULE_LICENSE("GPL"); |
2658 | MODULE_DEVICE_TABLE(pci, efx_pci_table); | 2648 | MODULE_DEVICE_TABLE(pci, efx_pci_table); |
2659 | 2649 |
drivers/net/sfc/efx.h
1 | /**************************************************************************** | 1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | 2 | * Driver for Solarflare Solarstorm network controllers and boards |
3 | * Copyright 2005-2006 Fen Systems Ltd. | 3 | * Copyright 2005-2006 Fen Systems Ltd. |
4 | * Copyright 2006-2009 Solarflare Communications Inc. | 4 | * Copyright 2006-2009 Solarflare Communications Inc. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License version 2 as published | 7 | * under the terms of the GNU General Public License version 2 as published |
8 | * by the Free Software Foundation, incorporated herein by reference. | 8 | * by the Free Software Foundation, incorporated herein by reference. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #ifndef EFX_EFX_H | 11 | #ifndef EFX_EFX_H |
12 | #define EFX_EFX_H | 12 | #define EFX_EFX_H |
13 | 13 | ||
14 | #include "net_driver.h" | 14 | #include "net_driver.h" |
15 | #include "filter.h" | 15 | #include "filter.h" |
16 | 16 | ||
17 | /* PCI IDs */ | 17 | /* PCI IDs */ |
18 | #define EFX_VENDID_SFC 0x1924 | 18 | #define EFX_VENDID_SFC 0x1924 |
19 | #define FALCON_A_P_DEVID 0x0703 | 19 | #define FALCON_A_P_DEVID 0x0703 |
20 | #define FALCON_A_S_DEVID 0x6703 | 20 | #define FALCON_A_S_DEVID 0x6703 |
21 | #define FALCON_B_P_DEVID 0x0710 | 21 | #define FALCON_B_P_DEVID 0x0710 |
22 | #define BETHPAGE_A_P_DEVID 0x0803 | 22 | #define BETHPAGE_A_P_DEVID 0x0803 |
23 | #define SIENA_A_P_DEVID 0x0813 | 23 | #define SIENA_A_P_DEVID 0x0813 |
24 | 24 | ||
25 | /* Solarstorm controllers use BAR 0 for I/O space and BAR 2(&3) for memory */ | 25 | /* Solarstorm controllers use BAR 0 for I/O space and BAR 2(&3) for memory */ |
26 | #define EFX_MEM_BAR 2 | 26 | #define EFX_MEM_BAR 2 |
27 | 27 | ||
28 | /* TX */ | 28 | /* TX */ |
29 | extern int efx_probe_tx_queue(struct efx_tx_queue *tx_queue); | 29 | extern int efx_probe_tx_queue(struct efx_tx_queue *tx_queue); |
30 | extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue); | 30 | extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue); |
31 | extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue); | 31 | extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue); |
32 | extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue); | 32 | extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue); |
33 | extern void efx_release_tx_buffers(struct efx_tx_queue *tx_queue); | 33 | extern void efx_release_tx_buffers(struct efx_tx_queue *tx_queue); |
34 | extern netdev_tx_t | 34 | extern netdev_tx_t |
35 | efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev); | 35 | efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev); |
36 | extern netdev_tx_t | 36 | extern netdev_tx_t |
37 | efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); | 37 | efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); |
38 | extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); | 38 | extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); |
39 | extern void efx_stop_queue(struct efx_channel *channel); | 39 | extern void efx_stop_queue(struct efx_channel *channel); |
40 | extern void efx_wake_queue(struct efx_channel *channel); | 40 | extern void efx_wake_queue(struct efx_channel *channel); |
41 | 41 | ||
42 | /* RX */ | 42 | /* RX */ |
43 | extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); | 43 | extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); |
44 | extern void efx_remove_rx_queue(struct efx_rx_queue *rx_queue); | 44 | extern void efx_remove_rx_queue(struct efx_rx_queue *rx_queue); |
45 | extern void efx_init_rx_queue(struct efx_rx_queue *rx_queue); | 45 | extern void efx_init_rx_queue(struct efx_rx_queue *rx_queue); |
46 | extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue); | 46 | extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue); |
47 | extern void efx_rx_strategy(struct efx_channel *channel); | 47 | extern void efx_rx_strategy(struct efx_channel *channel); |
48 | extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue); | 48 | extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue); |
49 | extern void efx_rx_slow_fill(unsigned long context); | 49 | extern void efx_rx_slow_fill(unsigned long context); |
50 | extern void __efx_rx_packet(struct efx_channel *channel, | 50 | extern void __efx_rx_packet(struct efx_channel *channel, |
51 | struct efx_rx_buffer *rx_buf, bool checksummed); | 51 | struct efx_rx_buffer *rx_buf, bool checksummed); |
52 | extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, | 52 | extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, |
53 | unsigned int len, bool checksummed, bool discard); | 53 | unsigned int len, bool checksummed, bool discard); |
54 | extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); | 54 | extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); |
55 | 55 | ||
56 | #define EFX_MAX_DMAQ_SIZE 4096UL | 56 | #define EFX_MAX_DMAQ_SIZE 4096UL |
57 | #define EFX_DEFAULT_DMAQ_SIZE 1024UL | 57 | #define EFX_DEFAULT_DMAQ_SIZE 1024UL |
58 | #define EFX_MIN_DMAQ_SIZE 512UL | 58 | #define EFX_MIN_DMAQ_SIZE 512UL |
59 | 59 | ||
60 | #define EFX_MAX_EVQ_SIZE 16384UL | 60 | #define EFX_MAX_EVQ_SIZE 16384UL |
61 | #define EFX_MIN_EVQ_SIZE 512UL | 61 | #define EFX_MIN_EVQ_SIZE 512UL |
62 | 62 | ||
63 | /* The smallest [rt]xq_entries that the driver supports. Callers of | 63 | /* The smallest [rt]xq_entries that the driver supports. Callers of |
64 | * efx_wake_queue() assume that they can subsequently send at least one | 64 | * efx_wake_queue() assume that they can subsequently send at least one |
65 | * skb. Falcon/A1 may require up to three descriptors per skb_frag. */ | 65 | * skb. Falcon/A1 may require up to three descriptors per skb_frag. */ |
66 | #define EFX_MIN_RING_SIZE (roundup_pow_of_two(2 * 3 * MAX_SKB_FRAGS)) | 66 | #define EFX_MIN_RING_SIZE (roundup_pow_of_two(2 * 3 * MAX_SKB_FRAGS)) |
67 | 67 | ||
68 | /* Filters */ | 68 | /* Filters */ |
69 | extern int efx_probe_filters(struct efx_nic *efx); | 69 | extern int efx_probe_filters(struct efx_nic *efx); |
70 | extern void efx_restore_filters(struct efx_nic *efx); | 70 | extern void efx_restore_filters(struct efx_nic *efx); |
71 | extern void efx_remove_filters(struct efx_nic *efx); | 71 | extern void efx_remove_filters(struct efx_nic *efx); |
72 | extern int efx_filter_insert_filter(struct efx_nic *efx, | 72 | extern int efx_filter_insert_filter(struct efx_nic *efx, |
73 | struct efx_filter_spec *spec, | 73 | struct efx_filter_spec *spec, |
74 | bool replace); | 74 | bool replace); |
75 | extern int efx_filter_remove_filter(struct efx_nic *efx, | 75 | extern int efx_filter_remove_filter(struct efx_nic *efx, |
76 | struct efx_filter_spec *spec); | 76 | struct efx_filter_spec *spec); |
77 | extern void efx_filter_table_clear(struct efx_nic *efx, | 77 | extern void efx_filter_table_clear(struct efx_nic *efx, |
78 | enum efx_filter_table_id table_id, | 78 | enum efx_filter_table_id table_id, |
79 | enum efx_filter_priority priority); | 79 | enum efx_filter_priority priority); |
80 | 80 | ||
81 | /* Channels */ | 81 | /* Channels */ |
82 | extern void efx_process_channel_now(struct efx_channel *channel); | 82 | extern void efx_process_channel_now(struct efx_channel *channel); |
83 | extern int | 83 | extern int |
84 | efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries); | 84 | efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries); |
85 | 85 | ||
86 | /* Ports */ | 86 | /* Ports */ |
87 | extern int efx_reconfigure_port(struct efx_nic *efx); | 87 | extern int efx_reconfigure_port(struct efx_nic *efx); |
88 | extern int __efx_reconfigure_port(struct efx_nic *efx); | 88 | extern int __efx_reconfigure_port(struct efx_nic *efx); |
89 | 89 | ||
90 | /* Ethtool support */ | 90 | /* Ethtool support */ |
91 | extern int efx_ethtool_get_settings(struct net_device *net_dev, | ||
92 | struct ethtool_cmd *ecmd); | ||
93 | extern int efx_ethtool_set_settings(struct net_device *net_dev, | ||
94 | struct ethtool_cmd *ecmd); | ||
95 | extern const struct ethtool_ops efx_ethtool_ops; | 91 | extern const struct ethtool_ops efx_ethtool_ops; |
96 | 92 | ||
97 | /* Reset handling */ | 93 | /* Reset handling */ |
98 | extern int efx_reset(struct efx_nic *efx, enum reset_type method); | 94 | extern int efx_reset(struct efx_nic *efx, enum reset_type method); |
99 | extern void efx_reset_down(struct efx_nic *efx, enum reset_type method); | 95 | extern void efx_reset_down(struct efx_nic *efx, enum reset_type method); |
100 | extern int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok); | 96 | extern int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok); |
101 | 97 | ||
102 | /* Global */ | 98 | /* Global */ |
103 | extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type); | 99 | extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type); |
104 | extern void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, | 100 | extern void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, |
105 | int rx_usecs, bool rx_adaptive); | 101 | int rx_usecs, bool rx_adaptive); |
106 | 102 | ||
107 | /* Dummy PHY ops for PHY drivers */ | 103 | /* Dummy PHY ops for PHY drivers */ |
108 | extern int efx_port_dummy_op_int(struct efx_nic *efx); | 104 | extern int efx_port_dummy_op_int(struct efx_nic *efx); |
109 | extern void efx_port_dummy_op_void(struct efx_nic *efx); | 105 | extern void efx_port_dummy_op_void(struct efx_nic *efx); |
110 | extern void | ||
111 | efx_port_dummy_op_set_id_led(struct efx_nic *efx, enum efx_led_mode mode); | ||
112 | extern bool efx_port_dummy_op_poll(struct efx_nic *efx); | ||
113 | 106 | ||
107 | |||
114 | /* MTD */ | 108 | /* MTD */ |
115 | #ifdef CONFIG_SFC_MTD | 109 | #ifdef CONFIG_SFC_MTD |
116 | extern int efx_mtd_probe(struct efx_nic *efx); | 110 | extern int efx_mtd_probe(struct efx_nic *efx); |
117 | extern void efx_mtd_rename(struct efx_nic *efx); | 111 | extern void efx_mtd_rename(struct efx_nic *efx); |
118 | extern void efx_mtd_remove(struct efx_nic *efx); | 112 | extern void efx_mtd_remove(struct efx_nic *efx); |
119 | #else | 113 | #else |
120 | static inline int efx_mtd_probe(struct efx_nic *efx) { return 0; } | 114 | static inline int efx_mtd_probe(struct efx_nic *efx) { return 0; } |
121 | static inline void efx_mtd_rename(struct efx_nic *efx) {} | 115 | static inline void efx_mtd_rename(struct efx_nic *efx) {} |
122 | static inline void efx_mtd_remove(struct efx_nic *efx) {} | 116 | static inline void efx_mtd_remove(struct efx_nic *efx) {} |
123 | #endif | 117 | #endif |
124 | |||
125 | extern unsigned int efx_monitor_interval; | ||
126 | 118 | ||
127 | static inline void efx_schedule_channel(struct efx_channel *channel) | 119 | static inline void efx_schedule_channel(struct efx_channel *channel) |
128 | { | 120 | { |
129 | netif_vdbg(channel->efx, intr, channel->efx->net_dev, | 121 | netif_vdbg(channel->efx, intr, channel->efx->net_dev, |
130 | "channel %d scheduling NAPI poll on CPU%d\n", | 122 | "channel %d scheduling NAPI poll on CPU%d\n", |
131 | channel->channel, raw_smp_processor_id()); | 123 | channel->channel, raw_smp_processor_id()); |
132 | channel->work_pending = true; | 124 | channel->work_pending = true; |
133 | 125 | ||
134 | napi_schedule(&channel->napi_str); | 126 | napi_schedule(&channel->napi_str); |
135 | } | 127 | } |
136 | 128 | ||
137 | extern void efx_link_status_changed(struct efx_nic *efx); | 129 | extern void efx_link_status_changed(struct efx_nic *efx); |
138 | extern void efx_link_set_advertising(struct efx_nic *efx, u32); | 130 | extern void efx_link_set_advertising(struct efx_nic *efx, u32); |
139 | extern void efx_link_set_wanted_fc(struct efx_nic *efx, enum efx_fc_type); | 131 | extern void efx_link_set_wanted_fc(struct efx_nic *efx, enum efx_fc_type); |
140 | 132 | ||
141 | #endif /* EFX_EFX_H */ | 133 | #endif /* EFX_EFX_H */ |
drivers/net/sfc/ethtool.c
1 | /**************************************************************************** | 1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | 2 | * Driver for Solarflare Solarstorm network controllers and boards |
3 | * Copyright 2005-2006 Fen Systems Ltd. | 3 | * Copyright 2005-2006 Fen Systems Ltd. |
4 | * Copyright 2006-2009 Solarflare Communications Inc. | 4 | * Copyright 2006-2009 Solarflare Communications Inc. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License version 2 as published | 7 | * under the terms of the GNU General Public License version 2 as published |
8 | * by the Free Software Foundation, incorporated herein by reference. | 8 | * by the Free Software Foundation, incorporated herein by reference. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/netdevice.h> | 11 | #include <linux/netdevice.h> |
12 | #include <linux/ethtool.h> | 12 | #include <linux/ethtool.h> |
13 | #include <linux/rtnetlink.h> | 13 | #include <linux/rtnetlink.h> |
14 | #include "net_driver.h" | 14 | #include "net_driver.h" |
15 | #include "workarounds.h" | 15 | #include "workarounds.h" |
16 | #include "selftest.h" | 16 | #include "selftest.h" |
17 | #include "efx.h" | 17 | #include "efx.h" |
18 | #include "filter.h" | 18 | #include "filter.h" |
19 | #include "nic.h" | 19 | #include "nic.h" |
20 | #include "spi.h" | 20 | #include "spi.h" |
21 | #include "mdio_10g.h" | 21 | #include "mdio_10g.h" |
22 | 22 | ||
23 | struct ethtool_string { | 23 | struct ethtool_string { |
24 | char name[ETH_GSTRING_LEN]; | 24 | char name[ETH_GSTRING_LEN]; |
25 | }; | 25 | }; |
26 | 26 | ||
27 | struct efx_ethtool_stat { | 27 | struct efx_ethtool_stat { |
28 | const char *name; | 28 | const char *name; |
29 | enum { | 29 | enum { |
30 | EFX_ETHTOOL_STAT_SOURCE_mac_stats, | 30 | EFX_ETHTOOL_STAT_SOURCE_mac_stats, |
31 | EFX_ETHTOOL_STAT_SOURCE_nic, | 31 | EFX_ETHTOOL_STAT_SOURCE_nic, |
32 | EFX_ETHTOOL_STAT_SOURCE_channel | 32 | EFX_ETHTOOL_STAT_SOURCE_channel |
33 | } source; | 33 | } source; |
34 | unsigned offset; | 34 | unsigned offset; |
35 | u64(*get_stat) (void *field); /* Reader function */ | 35 | u64(*get_stat) (void *field); /* Reader function */ |
36 | }; | 36 | }; |
37 | 37 | ||
38 | /* Initialiser for a struct #efx_ethtool_stat with type-checking */ | 38 | /* Initialiser for a struct #efx_ethtool_stat with type-checking */ |
39 | #define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \ | 39 | #define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \ |
40 | get_stat_function) { \ | 40 | get_stat_function) { \ |
41 | .name = #stat_name, \ | 41 | .name = #stat_name, \ |
42 | .source = EFX_ETHTOOL_STAT_SOURCE_##source_name, \ | 42 | .source = EFX_ETHTOOL_STAT_SOURCE_##source_name, \ |
43 | .offset = ((((field_type *) 0) == \ | 43 | .offset = ((((field_type *) 0) == \ |
44 | &((struct efx_##source_name *)0)->field) ? \ | 44 | &((struct efx_##source_name *)0)->field) ? \ |
45 | offsetof(struct efx_##source_name, field) : \ | 45 | offsetof(struct efx_##source_name, field) : \ |
46 | offsetof(struct efx_##source_name, field)), \ | 46 | offsetof(struct efx_##source_name, field)), \ |
47 | .get_stat = get_stat_function, \ | 47 | .get_stat = get_stat_function, \ |
48 | } | 48 | } |
49 | 49 | ||
50 | static u64 efx_get_uint_stat(void *field) | 50 | static u64 efx_get_uint_stat(void *field) |
51 | { | 51 | { |
52 | return *(unsigned int *)field; | 52 | return *(unsigned int *)field; |
53 | } | 53 | } |
54 | 54 | ||
55 | static u64 efx_get_ulong_stat(void *field) | 55 | static u64 efx_get_ulong_stat(void *field) |
56 | { | 56 | { |
57 | return *(unsigned long *)field; | 57 | return *(unsigned long *)field; |
58 | } | 58 | } |
59 | 59 | ||
60 | static u64 efx_get_u64_stat(void *field) | 60 | static u64 efx_get_u64_stat(void *field) |
61 | { | 61 | { |
62 | return *(u64 *) field; | 62 | return *(u64 *) field; |
63 | } | 63 | } |
64 | 64 | ||
65 | static u64 efx_get_atomic_stat(void *field) | 65 | static u64 efx_get_atomic_stat(void *field) |
66 | { | 66 | { |
67 | return atomic_read((atomic_t *) field); | 67 | return atomic_read((atomic_t *) field); |
68 | } | 68 | } |
69 | 69 | ||
70 | #define EFX_ETHTOOL_ULONG_MAC_STAT(field) \ | 70 | #define EFX_ETHTOOL_ULONG_MAC_STAT(field) \ |
71 | EFX_ETHTOOL_STAT(field, mac_stats, field, \ | 71 | EFX_ETHTOOL_STAT(field, mac_stats, field, \ |
72 | unsigned long, efx_get_ulong_stat) | 72 | unsigned long, efx_get_ulong_stat) |
73 | 73 | ||
74 | #define EFX_ETHTOOL_U64_MAC_STAT(field) \ | 74 | #define EFX_ETHTOOL_U64_MAC_STAT(field) \ |
75 | EFX_ETHTOOL_STAT(field, mac_stats, field, \ | 75 | EFX_ETHTOOL_STAT(field, mac_stats, field, \ |
76 | u64, efx_get_u64_stat) | 76 | u64, efx_get_u64_stat) |
77 | 77 | ||
78 | #define EFX_ETHTOOL_UINT_NIC_STAT(name) \ | 78 | #define EFX_ETHTOOL_UINT_NIC_STAT(name) \ |
79 | EFX_ETHTOOL_STAT(name, nic, n_##name, \ | 79 | EFX_ETHTOOL_STAT(name, nic, n_##name, \ |
80 | unsigned int, efx_get_uint_stat) | 80 | unsigned int, efx_get_uint_stat) |
81 | 81 | ||
82 | #define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \ | 82 | #define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \ |
83 | EFX_ETHTOOL_STAT(field, nic, field, \ | 83 | EFX_ETHTOOL_STAT(field, nic, field, \ |
84 | atomic_t, efx_get_atomic_stat) | 84 | atomic_t, efx_get_atomic_stat) |
85 | 85 | ||
86 | #define EFX_ETHTOOL_UINT_CHANNEL_STAT(field) \ | 86 | #define EFX_ETHTOOL_UINT_CHANNEL_STAT(field) \ |
87 | EFX_ETHTOOL_STAT(field, channel, n_##field, \ | 87 | EFX_ETHTOOL_STAT(field, channel, n_##field, \ |
88 | unsigned int, efx_get_uint_stat) | 88 | unsigned int, efx_get_uint_stat) |
89 | 89 | ||
90 | static struct efx_ethtool_stat efx_ethtool_stats[] = { | 90 | static struct efx_ethtool_stat efx_ethtool_stats[] = { |
91 | EFX_ETHTOOL_U64_MAC_STAT(tx_bytes), | 91 | EFX_ETHTOOL_U64_MAC_STAT(tx_bytes), |
92 | EFX_ETHTOOL_U64_MAC_STAT(tx_good_bytes), | 92 | EFX_ETHTOOL_U64_MAC_STAT(tx_good_bytes), |
93 | EFX_ETHTOOL_U64_MAC_STAT(tx_bad_bytes), | 93 | EFX_ETHTOOL_U64_MAC_STAT(tx_bad_bytes), |
94 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_packets), | 94 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_packets), |
95 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_bad), | 95 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_bad), |
96 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_pause), | 96 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_pause), |
97 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_control), | 97 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_control), |
98 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_unicast), | 98 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_unicast), |
99 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_multicast), | 99 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_multicast), |
100 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_broadcast), | 100 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_broadcast), |
101 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_lt64), | 101 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_lt64), |
102 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_64), | 102 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_64), |
103 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_65_to_127), | 103 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_65_to_127), |
104 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_128_to_255), | 104 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_128_to_255), |
105 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_256_to_511), | 105 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_256_to_511), |
106 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_512_to_1023), | 106 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_512_to_1023), |
107 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_1024_to_15xx), | 107 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_1024_to_15xx), |
108 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_15xx_to_jumbo), | 108 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_15xx_to_jumbo), |
109 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_gtjumbo), | 109 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_gtjumbo), |
110 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_collision), | 110 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_collision), |
111 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_single_collision), | 111 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_single_collision), |
112 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_multiple_collision), | 112 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_multiple_collision), |
113 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_excessive_collision), | 113 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_excessive_collision), |
114 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_deferred), | 114 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_deferred), |
115 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_late_collision), | 115 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_late_collision), |
116 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_excessive_deferred), | 116 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_excessive_deferred), |
117 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_non_tcpudp), | 117 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_non_tcpudp), |
118 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_mac_src_error), | 118 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_mac_src_error), |
119 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_ip_src_error), | 119 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_ip_src_error), |
120 | EFX_ETHTOOL_U64_MAC_STAT(rx_bytes), | 120 | EFX_ETHTOOL_U64_MAC_STAT(rx_bytes), |
121 | EFX_ETHTOOL_U64_MAC_STAT(rx_good_bytes), | 121 | EFX_ETHTOOL_U64_MAC_STAT(rx_good_bytes), |
122 | EFX_ETHTOOL_U64_MAC_STAT(rx_bad_bytes), | 122 | EFX_ETHTOOL_U64_MAC_STAT(rx_bad_bytes), |
123 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_packets), | 123 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_packets), |
124 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_good), | 124 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_good), |
125 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad), | 125 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad), |
126 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_pause), | 126 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_pause), |
127 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_control), | 127 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_control), |
128 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_unicast), | 128 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_unicast), |
129 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_multicast), | 129 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_multicast), |
130 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_broadcast), | 130 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_broadcast), |
131 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_lt64), | 131 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_lt64), |
132 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_64), | 132 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_64), |
133 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_65_to_127), | 133 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_65_to_127), |
134 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_128_to_255), | 134 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_128_to_255), |
135 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_256_to_511), | 135 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_256_to_511), |
136 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_512_to_1023), | 136 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_512_to_1023), |
137 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_1024_to_15xx), | 137 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_1024_to_15xx), |
138 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_15xx_to_jumbo), | 138 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_15xx_to_jumbo), |
139 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_gtjumbo), | 139 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_gtjumbo), |
140 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_lt64), | 140 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_lt64), |
141 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_64_to_15xx), | 141 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_64_to_15xx), |
142 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_15xx_to_jumbo), | 142 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_15xx_to_jumbo), |
143 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_gtjumbo), | 143 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_gtjumbo), |
144 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_overflow), | 144 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_overflow), |
145 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_missed), | 145 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_missed), |
146 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_false_carrier), | 146 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_false_carrier), |
147 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_symbol_error), | 147 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_symbol_error), |
148 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_align_error), | 148 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_align_error), |
149 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_length_error), | 149 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_length_error), |
150 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_internal_error), | 150 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_internal_error), |
151 | EFX_ETHTOOL_UINT_NIC_STAT(rx_nodesc_drop_cnt), | 151 | EFX_ETHTOOL_UINT_NIC_STAT(rx_nodesc_drop_cnt), |
152 | EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset), | 152 | EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset), |
153 | EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc), | 153 | EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc), |
154 | EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err), | 154 | EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err), |
155 | EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err), | 155 | EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err), |
156 | EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch), | 156 | EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch), |
157 | EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc), | 157 | EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc), |
158 | }; | 158 | }; |
159 | 159 | ||
160 | /* Number of ethtool statistics */ | 160 | /* Number of ethtool statistics */ |
161 | #define EFX_ETHTOOL_NUM_STATS ARRAY_SIZE(efx_ethtool_stats) | 161 | #define EFX_ETHTOOL_NUM_STATS ARRAY_SIZE(efx_ethtool_stats) |
162 | 162 | ||
163 | #define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB | 163 | #define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB |
164 | 164 | ||
165 | /************************************************************************** | 165 | /************************************************************************** |
166 | * | 166 | * |
167 | * Ethtool operations | 167 | * Ethtool operations |
168 | * | 168 | * |
169 | ************************************************************************** | 169 | ************************************************************************** |
170 | */ | 170 | */ |
171 | 171 | ||
172 | /* Identify device by flashing LEDs */ | 172 | /* Identify device by flashing LEDs */ |
173 | static int efx_ethtool_phys_id(struct net_device *net_dev, u32 count) | 173 | static int efx_ethtool_phys_id(struct net_device *net_dev, u32 count) |
174 | { | 174 | { |
175 | struct efx_nic *efx = netdev_priv(net_dev); | 175 | struct efx_nic *efx = netdev_priv(net_dev); |
176 | 176 | ||
177 | do { | 177 | do { |
178 | efx->type->set_id_led(efx, EFX_LED_ON); | 178 | efx->type->set_id_led(efx, EFX_LED_ON); |
179 | schedule_timeout_interruptible(HZ / 2); | 179 | schedule_timeout_interruptible(HZ / 2); |
180 | 180 | ||
181 | efx->type->set_id_led(efx, EFX_LED_OFF); | 181 | efx->type->set_id_led(efx, EFX_LED_OFF); |
182 | schedule_timeout_interruptible(HZ / 2); | 182 | schedule_timeout_interruptible(HZ / 2); |
183 | } while (!signal_pending(current) && --count != 0); | 183 | } while (!signal_pending(current) && --count != 0); |
184 | 184 | ||
185 | efx->type->set_id_led(efx, EFX_LED_DEFAULT); | 185 | efx->type->set_id_led(efx, EFX_LED_DEFAULT); |
186 | return 0; | 186 | return 0; |
187 | } | 187 | } |
188 | 188 | ||
189 | /* This must be called with rtnl_lock held. */ | 189 | /* This must be called with rtnl_lock held. */ |
190 | int efx_ethtool_get_settings(struct net_device *net_dev, | 190 | static int efx_ethtool_get_settings(struct net_device *net_dev, |
191 | struct ethtool_cmd *ecmd) | 191 | struct ethtool_cmd *ecmd) |
192 | { | 192 | { |
193 | struct efx_nic *efx = netdev_priv(net_dev); | 193 | struct efx_nic *efx = netdev_priv(net_dev); |
194 | struct efx_link_state *link_state = &efx->link_state; | 194 | struct efx_link_state *link_state = &efx->link_state; |
195 | 195 | ||
196 | mutex_lock(&efx->mac_lock); | 196 | mutex_lock(&efx->mac_lock); |
197 | efx->phy_op->get_settings(efx, ecmd); | 197 | efx->phy_op->get_settings(efx, ecmd); |
198 | mutex_unlock(&efx->mac_lock); | 198 | mutex_unlock(&efx->mac_lock); |
199 | 199 | ||
200 | /* GMAC does not support 1000Mbps HD */ | 200 | /* GMAC does not support 1000Mbps HD */ |
201 | ecmd->supported &= ~SUPPORTED_1000baseT_Half; | 201 | ecmd->supported &= ~SUPPORTED_1000baseT_Half; |
202 | /* Both MACs support pause frames (bidirectional and respond-only) */ | 202 | /* Both MACs support pause frames (bidirectional and respond-only) */ |
203 | ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; | 203 | ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; |
204 | 204 | ||
205 | if (LOOPBACK_INTERNAL(efx)) { | 205 | if (LOOPBACK_INTERNAL(efx)) { |
206 | ecmd->speed = link_state->speed; | 206 | ecmd->speed = link_state->speed; |
207 | ecmd->duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF; | 207 | ecmd->duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF; |
208 | } | 208 | } |
209 | 209 | ||
210 | return 0; | 210 | return 0; |
211 | } | 211 | } |
212 | 212 | ||
213 | /* This must be called with rtnl_lock held. */ | 213 | /* This must be called with rtnl_lock held. */ |
214 | int efx_ethtool_set_settings(struct net_device *net_dev, | 214 | static int efx_ethtool_set_settings(struct net_device *net_dev, |
215 | struct ethtool_cmd *ecmd) | 215 | struct ethtool_cmd *ecmd) |
216 | { | 216 | { |
217 | struct efx_nic *efx = netdev_priv(net_dev); | 217 | struct efx_nic *efx = netdev_priv(net_dev); |
218 | int rc; | 218 | int rc; |
219 | 219 | ||
220 | /* GMAC does not support 1000Mbps HD */ | 220 | /* GMAC does not support 1000Mbps HD */ |
221 | if (ecmd->speed == SPEED_1000 && ecmd->duplex != DUPLEX_FULL) { | 221 | if (ecmd->speed == SPEED_1000 && ecmd->duplex != DUPLEX_FULL) { |
222 | netif_dbg(efx, drv, efx->net_dev, | 222 | netif_dbg(efx, drv, efx->net_dev, |
223 | "rejecting unsupported 1000Mbps HD setting\n"); | 223 | "rejecting unsupported 1000Mbps HD setting\n"); |
224 | return -EINVAL; | 224 | return -EINVAL; |
225 | } | 225 | } |
226 | 226 | ||
227 | mutex_lock(&efx->mac_lock); | 227 | mutex_lock(&efx->mac_lock); |
228 | rc = efx->phy_op->set_settings(efx, ecmd); | 228 | rc = efx->phy_op->set_settings(efx, ecmd); |
229 | mutex_unlock(&efx->mac_lock); | 229 | mutex_unlock(&efx->mac_lock); |
230 | return rc; | 230 | return rc; |
231 | } | 231 | } |
232 | 232 | ||
233 | static void efx_ethtool_get_drvinfo(struct net_device *net_dev, | 233 | static void efx_ethtool_get_drvinfo(struct net_device *net_dev, |
234 | struct ethtool_drvinfo *info) | 234 | struct ethtool_drvinfo *info) |
235 | { | 235 | { |
236 | struct efx_nic *efx = netdev_priv(net_dev); | 236 | struct efx_nic *efx = netdev_priv(net_dev); |
237 | 237 | ||
238 | strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); | 238 | strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); |
239 | strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version)); | 239 | strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version)); |
240 | if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) | 240 | if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) |
241 | siena_print_fwver(efx, info->fw_version, | 241 | siena_print_fwver(efx, info->fw_version, |
242 | sizeof(info->fw_version)); | 242 | sizeof(info->fw_version)); |
243 | strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info)); | 243 | strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info)); |
244 | } | 244 | } |
245 | 245 | ||
246 | static int efx_ethtool_get_regs_len(struct net_device *net_dev) | 246 | static int efx_ethtool_get_regs_len(struct net_device *net_dev) |
247 | { | 247 | { |
248 | return efx_nic_get_regs_len(netdev_priv(net_dev)); | 248 | return efx_nic_get_regs_len(netdev_priv(net_dev)); |
249 | } | 249 | } |
250 | 250 | ||
251 | static void efx_ethtool_get_regs(struct net_device *net_dev, | 251 | static void efx_ethtool_get_regs(struct net_device *net_dev, |
252 | struct ethtool_regs *regs, void *buf) | 252 | struct ethtool_regs *regs, void *buf) |
253 | { | 253 | { |
254 | struct efx_nic *efx = netdev_priv(net_dev); | 254 | struct efx_nic *efx = netdev_priv(net_dev); |
255 | 255 | ||
256 | regs->version = efx->type->revision; | 256 | regs->version = efx->type->revision; |
257 | efx_nic_get_regs(efx, buf); | 257 | efx_nic_get_regs(efx, buf); |
258 | } | 258 | } |
259 | 259 | ||
260 | static u32 efx_ethtool_get_msglevel(struct net_device *net_dev) | 260 | static u32 efx_ethtool_get_msglevel(struct net_device *net_dev) |
261 | { | 261 | { |
262 | struct efx_nic *efx = netdev_priv(net_dev); | 262 | struct efx_nic *efx = netdev_priv(net_dev); |
263 | return efx->msg_enable; | 263 | return efx->msg_enable; |
264 | } | 264 | } |
265 | 265 | ||
266 | static void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable) | 266 | static void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable) |
267 | { | 267 | { |
268 | struct efx_nic *efx = netdev_priv(net_dev); | 268 | struct efx_nic *efx = netdev_priv(net_dev); |
269 | efx->msg_enable = msg_enable; | 269 | efx->msg_enable = msg_enable; |
270 | } | 270 | } |
271 | 271 | ||
272 | /** | 272 | /** |
273 | * efx_fill_test - fill in an individual self-test entry | 273 | * efx_fill_test - fill in an individual self-test entry |
274 | * @test_index: Index of the test | 274 | * @test_index: Index of the test |
275 | * @strings: Ethtool strings, or %NULL | 275 | * @strings: Ethtool strings, or %NULL |
276 | * @data: Ethtool test results, or %NULL | 276 | * @data: Ethtool test results, or %NULL |
277 | * @test: Pointer to test result (used only if data != %NULL) | 277 | * @test: Pointer to test result (used only if data != %NULL) |
278 | * @unit_format: Unit name format (e.g. "chan\%d") | 278 | * @unit_format: Unit name format (e.g. "chan\%d") |
279 | * @unit_id: Unit id (e.g. 0 for "chan0") | 279 | * @unit_id: Unit id (e.g. 0 for "chan0") |
280 | * @test_format: Test name format (e.g. "loopback.\%s.tx.sent") | 280 | * @test_format: Test name format (e.g. "loopback.\%s.tx.sent") |
281 | * @test_id: Test id (e.g. "PHYXS" for "loopback.PHYXS.tx_sent") | 281 | * @test_id: Test id (e.g. "PHYXS" for "loopback.PHYXS.tx_sent") |
282 | * | 282 | * |
283 | * Fill in an individual self-test entry. | 283 | * Fill in an individual self-test entry. |
284 | */ | 284 | */ |
285 | static void efx_fill_test(unsigned int test_index, | 285 | static void efx_fill_test(unsigned int test_index, |
286 | struct ethtool_string *strings, u64 *data, | 286 | struct ethtool_string *strings, u64 *data, |
287 | int *test, const char *unit_format, int unit_id, | 287 | int *test, const char *unit_format, int unit_id, |
288 | const char *test_format, const char *test_id) | 288 | const char *test_format, const char *test_id) |
289 | { | 289 | { |
290 | struct ethtool_string unit_str, test_str; | 290 | struct ethtool_string unit_str, test_str; |
291 | 291 | ||
292 | /* Fill data value, if applicable */ | 292 | /* Fill data value, if applicable */ |
293 | if (data) | 293 | if (data) |
294 | data[test_index] = *test; | 294 | data[test_index] = *test; |
295 | 295 | ||
296 | /* Fill string, if applicable */ | 296 | /* Fill string, if applicable */ |
297 | if (strings) { | 297 | if (strings) { |
298 | if (strchr(unit_format, '%')) | 298 | if (strchr(unit_format, '%')) |
299 | snprintf(unit_str.name, sizeof(unit_str.name), | 299 | snprintf(unit_str.name, sizeof(unit_str.name), |
300 | unit_format, unit_id); | 300 | unit_format, unit_id); |
301 | else | 301 | else |
302 | strcpy(unit_str.name, unit_format); | 302 | strcpy(unit_str.name, unit_format); |
303 | snprintf(test_str.name, sizeof(test_str.name), | 303 | snprintf(test_str.name, sizeof(test_str.name), |
304 | test_format, test_id); | 304 | test_format, test_id); |
305 | snprintf(strings[test_index].name, | 305 | snprintf(strings[test_index].name, |
306 | sizeof(strings[test_index].name), | 306 | sizeof(strings[test_index].name), |
307 | "%-6s %-24s", unit_str.name, test_str.name); | 307 | "%-6s %-24s", unit_str.name, test_str.name); |
308 | } | 308 | } |
309 | } | 309 | } |
310 | 310 | ||
311 | #define EFX_CHANNEL_NAME(_channel) "chan%d", _channel->channel | 311 | #define EFX_CHANNEL_NAME(_channel) "chan%d", _channel->channel |
312 | #define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue | 312 | #define EFX_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue |
313 | #define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue | 313 | #define EFX_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue |
314 | #define EFX_LOOPBACK_NAME(_mode, _counter) \ | 314 | #define EFX_LOOPBACK_NAME(_mode, _counter) \ |
315 | "loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, efx_loopback_mode) | 315 | "loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, efx_loopback_mode) |
316 | 316 | ||
317 | /** | 317 | /** |
318 | * efx_fill_loopback_test - fill in a block of loopback self-test entries | 318 | * efx_fill_loopback_test - fill in a block of loopback self-test entries |
319 | * @efx: Efx NIC | 319 | * @efx: Efx NIC |
320 | * @lb_tests: Efx loopback self-test results structure | 320 | * @lb_tests: Efx loopback self-test results structure |
321 | * @mode: Loopback test mode | 321 | * @mode: Loopback test mode |
322 | * @test_index: Starting index of the test | 322 | * @test_index: Starting index of the test |
323 | * @strings: Ethtool strings, or %NULL | 323 | * @strings: Ethtool strings, or %NULL |
324 | * @data: Ethtool test results, or %NULL | 324 | * @data: Ethtool test results, or %NULL |
325 | */ | 325 | */ |
326 | static int efx_fill_loopback_test(struct efx_nic *efx, | 326 | static int efx_fill_loopback_test(struct efx_nic *efx, |
327 | struct efx_loopback_self_tests *lb_tests, | 327 | struct efx_loopback_self_tests *lb_tests, |
328 | enum efx_loopback_mode mode, | 328 | enum efx_loopback_mode mode, |
329 | unsigned int test_index, | 329 | unsigned int test_index, |
330 | struct ethtool_string *strings, u64 *data) | 330 | struct ethtool_string *strings, u64 *data) |
331 | { | 331 | { |
332 | struct efx_channel *channel = efx_get_channel(efx, 0); | 332 | struct efx_channel *channel = efx_get_channel(efx, 0); |
333 | struct efx_tx_queue *tx_queue; | 333 | struct efx_tx_queue *tx_queue; |
334 | 334 | ||
335 | efx_for_each_channel_tx_queue(tx_queue, channel) { | 335 | efx_for_each_channel_tx_queue(tx_queue, channel) { |
336 | efx_fill_test(test_index++, strings, data, | 336 | efx_fill_test(test_index++, strings, data, |
337 | &lb_tests->tx_sent[tx_queue->queue], | 337 | &lb_tests->tx_sent[tx_queue->queue], |
338 | EFX_TX_QUEUE_NAME(tx_queue), | 338 | EFX_TX_QUEUE_NAME(tx_queue), |
339 | EFX_LOOPBACK_NAME(mode, "tx_sent")); | 339 | EFX_LOOPBACK_NAME(mode, "tx_sent")); |
340 | efx_fill_test(test_index++, strings, data, | 340 | efx_fill_test(test_index++, strings, data, |
341 | &lb_tests->tx_done[tx_queue->queue], | 341 | &lb_tests->tx_done[tx_queue->queue], |
342 | EFX_TX_QUEUE_NAME(tx_queue), | 342 | EFX_TX_QUEUE_NAME(tx_queue), |
343 | EFX_LOOPBACK_NAME(mode, "tx_done")); | 343 | EFX_LOOPBACK_NAME(mode, "tx_done")); |
344 | } | 344 | } |
345 | efx_fill_test(test_index++, strings, data, | 345 | efx_fill_test(test_index++, strings, data, |
346 | &lb_tests->rx_good, | 346 | &lb_tests->rx_good, |
347 | "rx", 0, | 347 | "rx", 0, |
348 | EFX_LOOPBACK_NAME(mode, "rx_good")); | 348 | EFX_LOOPBACK_NAME(mode, "rx_good")); |
349 | efx_fill_test(test_index++, strings, data, | 349 | efx_fill_test(test_index++, strings, data, |
350 | &lb_tests->rx_bad, | 350 | &lb_tests->rx_bad, |
351 | "rx", 0, | 351 | "rx", 0, |
352 | EFX_LOOPBACK_NAME(mode, "rx_bad")); | 352 | EFX_LOOPBACK_NAME(mode, "rx_bad")); |
353 | 353 | ||
354 | return test_index; | 354 | return test_index; |
355 | } | 355 | } |
356 | 356 | ||
357 | /** | 357 | /** |
358 | * efx_ethtool_fill_self_tests - get self-test details | 358 | * efx_ethtool_fill_self_tests - get self-test details |
359 | * @efx: Efx NIC | 359 | * @efx: Efx NIC |
360 | * @tests: Efx self-test results structure, or %NULL | 360 | * @tests: Efx self-test results structure, or %NULL |
361 | * @strings: Ethtool strings, or %NULL | 361 | * @strings: Ethtool strings, or %NULL |
362 | * @data: Ethtool test results, or %NULL | 362 | * @data: Ethtool test results, or %NULL |
363 | */ | 363 | */ |
364 | static int efx_ethtool_fill_self_tests(struct efx_nic *efx, | 364 | static int efx_ethtool_fill_self_tests(struct efx_nic *efx, |
365 | struct efx_self_tests *tests, | 365 | struct efx_self_tests *tests, |
366 | struct ethtool_string *strings, | 366 | struct ethtool_string *strings, |
367 | u64 *data) | 367 | u64 *data) |
368 | { | 368 | { |
369 | struct efx_channel *channel; | 369 | struct efx_channel *channel; |
370 | unsigned int n = 0, i; | 370 | unsigned int n = 0, i; |
371 | enum efx_loopback_mode mode; | 371 | enum efx_loopback_mode mode; |
372 | 372 | ||
373 | efx_fill_test(n++, strings, data, &tests->phy_alive, | 373 | efx_fill_test(n++, strings, data, &tests->phy_alive, |
374 | "phy", 0, "alive", NULL); | 374 | "phy", 0, "alive", NULL); |
375 | efx_fill_test(n++, strings, data, &tests->nvram, | 375 | efx_fill_test(n++, strings, data, &tests->nvram, |
376 | "core", 0, "nvram", NULL); | 376 | "core", 0, "nvram", NULL); |
377 | efx_fill_test(n++, strings, data, &tests->interrupt, | 377 | efx_fill_test(n++, strings, data, &tests->interrupt, |
378 | "core", 0, "interrupt", NULL); | 378 | "core", 0, "interrupt", NULL); |
379 | 379 | ||
380 | /* Event queues */ | 380 | /* Event queues */ |
381 | efx_for_each_channel(channel, efx) { | 381 | efx_for_each_channel(channel, efx) { |
382 | efx_fill_test(n++, strings, data, | 382 | efx_fill_test(n++, strings, data, |
383 | &tests->eventq_dma[channel->channel], | 383 | &tests->eventq_dma[channel->channel], |
384 | EFX_CHANNEL_NAME(channel), | 384 | EFX_CHANNEL_NAME(channel), |
385 | "eventq.dma", NULL); | 385 | "eventq.dma", NULL); |
386 | efx_fill_test(n++, strings, data, | 386 | efx_fill_test(n++, strings, data, |
387 | &tests->eventq_int[channel->channel], | 387 | &tests->eventq_int[channel->channel], |
388 | EFX_CHANNEL_NAME(channel), | 388 | EFX_CHANNEL_NAME(channel), |
389 | "eventq.int", NULL); | 389 | "eventq.int", NULL); |
390 | efx_fill_test(n++, strings, data, | 390 | efx_fill_test(n++, strings, data, |
391 | &tests->eventq_poll[channel->channel], | 391 | &tests->eventq_poll[channel->channel], |
392 | EFX_CHANNEL_NAME(channel), | 392 | EFX_CHANNEL_NAME(channel), |
393 | "eventq.poll", NULL); | 393 | "eventq.poll", NULL); |
394 | } | 394 | } |
395 | 395 | ||
396 | efx_fill_test(n++, strings, data, &tests->registers, | 396 | efx_fill_test(n++, strings, data, &tests->registers, |
397 | "core", 0, "registers", NULL); | 397 | "core", 0, "registers", NULL); |
398 | 398 | ||
399 | if (efx->phy_op->run_tests != NULL) { | 399 | if (efx->phy_op->run_tests != NULL) { |
400 | EFX_BUG_ON_PARANOID(efx->phy_op->test_name == NULL); | 400 | EFX_BUG_ON_PARANOID(efx->phy_op->test_name == NULL); |
401 | 401 | ||
402 | for (i = 0; true; ++i) { | 402 | for (i = 0; true; ++i) { |
403 | const char *name; | 403 | const char *name; |
404 | 404 | ||
405 | EFX_BUG_ON_PARANOID(i >= EFX_MAX_PHY_TESTS); | 405 | EFX_BUG_ON_PARANOID(i >= EFX_MAX_PHY_TESTS); |
406 | name = efx->phy_op->test_name(efx, i); | 406 | name = efx->phy_op->test_name(efx, i); |
407 | if (name == NULL) | 407 | if (name == NULL) |
408 | break; | 408 | break; |
409 | 409 | ||
410 | efx_fill_test(n++, strings, data, &tests->phy_ext[i], | 410 | efx_fill_test(n++, strings, data, &tests->phy_ext[i], |
411 | "phy", 0, name, NULL); | 411 | "phy", 0, name, NULL); |
412 | } | 412 | } |
413 | } | 413 | } |
414 | 414 | ||
415 | /* Loopback tests */ | 415 | /* Loopback tests */ |
416 | for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) { | 416 | for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) { |
417 | if (!(efx->loopback_modes & (1 << mode))) | 417 | if (!(efx->loopback_modes & (1 << mode))) |
418 | continue; | 418 | continue; |
419 | n = efx_fill_loopback_test(efx, | 419 | n = efx_fill_loopback_test(efx, |
420 | &tests->loopback[mode], mode, n, | 420 | &tests->loopback[mode], mode, n, |
421 | strings, data); | 421 | strings, data); |
422 | } | 422 | } |
423 | 423 | ||
424 | return n; | 424 | return n; |
425 | } | 425 | } |
426 | 426 | ||
427 | static int efx_ethtool_get_sset_count(struct net_device *net_dev, | 427 | static int efx_ethtool_get_sset_count(struct net_device *net_dev, |
428 | int string_set) | 428 | int string_set) |
429 | { | 429 | { |
430 | switch (string_set) { | 430 | switch (string_set) { |
431 | case ETH_SS_STATS: | 431 | case ETH_SS_STATS: |
432 | return EFX_ETHTOOL_NUM_STATS; | 432 | return EFX_ETHTOOL_NUM_STATS; |
433 | case ETH_SS_TEST: | 433 | case ETH_SS_TEST: |
434 | return efx_ethtool_fill_self_tests(netdev_priv(net_dev), | 434 | return efx_ethtool_fill_self_tests(netdev_priv(net_dev), |
435 | NULL, NULL, NULL); | 435 | NULL, NULL, NULL); |
436 | default: | 436 | default: |
437 | return -EINVAL; | 437 | return -EINVAL; |
438 | } | 438 | } |
439 | } | 439 | } |
440 | 440 | ||
441 | static void efx_ethtool_get_strings(struct net_device *net_dev, | 441 | static void efx_ethtool_get_strings(struct net_device *net_dev, |
442 | u32 string_set, u8 *strings) | 442 | u32 string_set, u8 *strings) |
443 | { | 443 | { |
444 | struct efx_nic *efx = netdev_priv(net_dev); | 444 | struct efx_nic *efx = netdev_priv(net_dev); |
445 | struct ethtool_string *ethtool_strings = | 445 | struct ethtool_string *ethtool_strings = |
446 | (struct ethtool_string *)strings; | 446 | (struct ethtool_string *)strings; |
447 | int i; | 447 | int i; |
448 | 448 | ||
449 | switch (string_set) { | 449 | switch (string_set) { |
450 | case ETH_SS_STATS: | 450 | case ETH_SS_STATS: |
451 | for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) | 451 | for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) |
452 | strncpy(ethtool_strings[i].name, | 452 | strncpy(ethtool_strings[i].name, |
453 | efx_ethtool_stats[i].name, | 453 | efx_ethtool_stats[i].name, |
454 | sizeof(ethtool_strings[i].name)); | 454 | sizeof(ethtool_strings[i].name)); |
455 | break; | 455 | break; |
456 | case ETH_SS_TEST: | 456 | case ETH_SS_TEST: |
457 | efx_ethtool_fill_self_tests(efx, NULL, | 457 | efx_ethtool_fill_self_tests(efx, NULL, |
458 | ethtool_strings, NULL); | 458 | ethtool_strings, NULL); |
459 | break; | 459 | break; |
460 | default: | 460 | default: |
461 | /* No other string sets */ | 461 | /* No other string sets */ |
462 | break; | 462 | break; |
463 | } | 463 | } |
464 | } | 464 | } |
465 | 465 | ||
466 | static void efx_ethtool_get_stats(struct net_device *net_dev, | 466 | static void efx_ethtool_get_stats(struct net_device *net_dev, |
467 | struct ethtool_stats *stats, | 467 | struct ethtool_stats *stats, |
468 | u64 *data) | 468 | u64 *data) |
469 | { | 469 | { |
470 | struct efx_nic *efx = netdev_priv(net_dev); | 470 | struct efx_nic *efx = netdev_priv(net_dev); |
471 | struct efx_mac_stats *mac_stats = &efx->mac_stats; | 471 | struct efx_mac_stats *mac_stats = &efx->mac_stats; |
472 | struct efx_ethtool_stat *stat; | 472 | struct efx_ethtool_stat *stat; |
473 | struct efx_channel *channel; | 473 | struct efx_channel *channel; |
474 | struct rtnl_link_stats64 temp; | 474 | struct rtnl_link_stats64 temp; |
475 | int i; | 475 | int i; |
476 | 476 | ||
477 | EFX_BUG_ON_PARANOID(stats->n_stats != EFX_ETHTOOL_NUM_STATS); | 477 | EFX_BUG_ON_PARANOID(stats->n_stats != EFX_ETHTOOL_NUM_STATS); |
478 | 478 | ||
479 | /* Update MAC and NIC statistics */ | 479 | /* Update MAC and NIC statistics */ |
480 | dev_get_stats(net_dev, &temp); | 480 | dev_get_stats(net_dev, &temp); |
481 | 481 | ||
482 | /* Fill detailed statistics buffer */ | 482 | /* Fill detailed statistics buffer */ |
483 | for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) { | 483 | for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) { |
484 | stat = &efx_ethtool_stats[i]; | 484 | stat = &efx_ethtool_stats[i]; |
485 | switch (stat->source) { | 485 | switch (stat->source) { |
486 | case EFX_ETHTOOL_STAT_SOURCE_mac_stats: | 486 | case EFX_ETHTOOL_STAT_SOURCE_mac_stats: |
487 | data[i] = stat->get_stat((void *)mac_stats + | 487 | data[i] = stat->get_stat((void *)mac_stats + |
488 | stat->offset); | 488 | stat->offset); |
489 | break; | 489 | break; |
490 | case EFX_ETHTOOL_STAT_SOURCE_nic: | 490 | case EFX_ETHTOOL_STAT_SOURCE_nic: |
491 | data[i] = stat->get_stat((void *)efx + stat->offset); | 491 | data[i] = stat->get_stat((void *)efx + stat->offset); |
492 | break; | 492 | break; |
493 | case EFX_ETHTOOL_STAT_SOURCE_channel: | 493 | case EFX_ETHTOOL_STAT_SOURCE_channel: |
494 | data[i] = 0; | 494 | data[i] = 0; |
495 | efx_for_each_channel(channel, efx) | 495 | efx_for_each_channel(channel, efx) |
496 | data[i] += stat->get_stat((void *)channel + | 496 | data[i] += stat->get_stat((void *)channel + |
497 | stat->offset); | 497 | stat->offset); |
498 | break; | 498 | break; |
499 | } | 499 | } |
500 | } | 500 | } |
501 | } | 501 | } |
502 | 502 | ||
503 | static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable) | 503 | static int efx_ethtool_set_tso(struct net_device *net_dev, u32 enable) |
504 | { | 504 | { |
505 | struct efx_nic *efx __attribute__ ((unused)) = netdev_priv(net_dev); | 505 | struct efx_nic *efx __attribute__ ((unused)) = netdev_priv(net_dev); |
506 | unsigned long features; | 506 | unsigned long features; |
507 | 507 | ||
508 | features = NETIF_F_TSO; | 508 | features = NETIF_F_TSO; |
509 | if (efx->type->offload_features & NETIF_F_V6_CSUM) | 509 | if (efx->type->offload_features & NETIF_F_V6_CSUM) |
510 | features |= NETIF_F_TSO6; | 510 | features |= NETIF_F_TSO6; |
511 | 511 | ||
512 | if (enable) | 512 | if (enable) |
513 | net_dev->features |= features; | 513 | net_dev->features |= features; |
514 | else | 514 | else |
515 | net_dev->features &= ~features; | 515 | net_dev->features &= ~features; |
516 | 516 | ||
517 | return 0; | 517 | return 0; |
518 | } | 518 | } |
519 | 519 | ||
520 | static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable) | 520 | static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable) |
521 | { | 521 | { |
522 | struct efx_nic *efx = netdev_priv(net_dev); | 522 | struct efx_nic *efx = netdev_priv(net_dev); |
523 | unsigned long features = efx->type->offload_features & NETIF_F_ALL_CSUM; | 523 | unsigned long features = efx->type->offload_features & NETIF_F_ALL_CSUM; |
524 | 524 | ||
525 | if (enable) | 525 | if (enable) |
526 | net_dev->features |= features; | 526 | net_dev->features |= features; |
527 | else | 527 | else |
528 | net_dev->features &= ~features; | 528 | net_dev->features &= ~features; |
529 | 529 | ||
530 | return 0; | 530 | return 0; |
531 | } | 531 | } |
532 | 532 | ||
533 | static int efx_ethtool_set_rx_csum(struct net_device *net_dev, u32 enable) | 533 | static int efx_ethtool_set_rx_csum(struct net_device *net_dev, u32 enable) |
534 | { | 534 | { |
535 | struct efx_nic *efx = netdev_priv(net_dev); | 535 | struct efx_nic *efx = netdev_priv(net_dev); |
536 | 536 | ||
537 | /* No way to stop the hardware doing the checks; we just | 537 | /* No way to stop the hardware doing the checks; we just |
538 | * ignore the result. | 538 | * ignore the result. |
539 | */ | 539 | */ |
540 | efx->rx_checksum_enabled = !!enable; | 540 | efx->rx_checksum_enabled = !!enable; |
541 | 541 | ||
542 | return 0; | 542 | return 0; |
543 | } | 543 | } |
544 | 544 | ||
545 | static u32 efx_ethtool_get_rx_csum(struct net_device *net_dev) | 545 | static u32 efx_ethtool_get_rx_csum(struct net_device *net_dev) |
546 | { | 546 | { |
547 | struct efx_nic *efx = netdev_priv(net_dev); | 547 | struct efx_nic *efx = netdev_priv(net_dev); |
548 | 548 | ||
549 | return efx->rx_checksum_enabled; | 549 | return efx->rx_checksum_enabled; |
550 | } | 550 | } |
551 | 551 | ||
552 | static int efx_ethtool_set_flags(struct net_device *net_dev, u32 data) | 552 | static int efx_ethtool_set_flags(struct net_device *net_dev, u32 data) |
553 | { | 553 | { |
554 | struct efx_nic *efx = netdev_priv(net_dev); | 554 | struct efx_nic *efx = netdev_priv(net_dev); |
555 | u32 supported = (efx->type->offload_features & | 555 | u32 supported = (efx->type->offload_features & |
556 | (ETH_FLAG_RXHASH | ETH_FLAG_NTUPLE)); | 556 | (ETH_FLAG_RXHASH | ETH_FLAG_NTUPLE)); |
557 | int rc; | 557 | int rc; |
558 | 558 | ||
559 | rc = ethtool_op_set_flags(net_dev, data, supported); | 559 | rc = ethtool_op_set_flags(net_dev, data, supported); |
560 | if (rc) | 560 | if (rc) |
561 | return rc; | 561 | return rc; |
562 | 562 | ||
563 | if (!(data & ETH_FLAG_NTUPLE)) { | 563 | if (!(data & ETH_FLAG_NTUPLE)) { |
564 | efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_IP, | 564 | efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_IP, |
565 | EFX_FILTER_PRI_MANUAL); | 565 | EFX_FILTER_PRI_MANUAL); |
566 | efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_MAC, | 566 | efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_MAC, |
567 | EFX_FILTER_PRI_MANUAL); | 567 | EFX_FILTER_PRI_MANUAL); |
568 | } | 568 | } |
569 | 569 | ||
570 | return 0; | 570 | return 0; |
571 | } | 571 | } |
572 | 572 | ||
573 | static void efx_ethtool_self_test(struct net_device *net_dev, | 573 | static void efx_ethtool_self_test(struct net_device *net_dev, |
574 | struct ethtool_test *test, u64 *data) | 574 | struct ethtool_test *test, u64 *data) |
575 | { | 575 | { |
576 | struct efx_nic *efx = netdev_priv(net_dev); | 576 | struct efx_nic *efx = netdev_priv(net_dev); |
577 | struct efx_self_tests efx_tests; | 577 | struct efx_self_tests efx_tests; |
578 | int already_up; | 578 | int already_up; |
579 | int rc; | 579 | int rc; |
580 | 580 | ||
581 | ASSERT_RTNL(); | 581 | ASSERT_RTNL(); |
582 | if (efx->state != STATE_RUNNING) { | 582 | if (efx->state != STATE_RUNNING) { |
583 | rc = -EIO; | 583 | rc = -EIO; |
584 | goto fail1; | 584 | goto fail1; |
585 | } | 585 | } |
586 | 586 | ||
587 | /* We need rx buffers and interrupts. */ | 587 | /* We need rx buffers and interrupts. */ |
588 | already_up = (efx->net_dev->flags & IFF_UP); | 588 | already_up = (efx->net_dev->flags & IFF_UP); |
589 | if (!already_up) { | 589 | if (!already_up) { |
590 | rc = dev_open(efx->net_dev); | 590 | rc = dev_open(efx->net_dev); |
591 | if (rc) { | 591 | if (rc) { |
592 | netif_err(efx, drv, efx->net_dev, | 592 | netif_err(efx, drv, efx->net_dev, |
593 | "failed opening device.\n"); | 593 | "failed opening device.\n"); |
594 | goto fail2; | 594 | goto fail2; |
595 | } | 595 | } |
596 | } | 596 | } |
597 | 597 | ||
598 | memset(&efx_tests, 0, sizeof(efx_tests)); | 598 | memset(&efx_tests, 0, sizeof(efx_tests)); |
599 | 599 | ||
600 | rc = efx_selftest(efx, &efx_tests, test->flags); | 600 | rc = efx_selftest(efx, &efx_tests, test->flags); |
601 | 601 | ||
602 | if (!already_up) | 602 | if (!already_up) |
603 | dev_close(efx->net_dev); | 603 | dev_close(efx->net_dev); |
604 | 604 | ||
605 | netif_dbg(efx, drv, efx->net_dev, "%s %sline self-tests\n", | 605 | netif_dbg(efx, drv, efx->net_dev, "%s %sline self-tests\n", |
606 | rc == 0 ? "passed" : "failed", | 606 | rc == 0 ? "passed" : "failed", |
607 | (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); | 607 | (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); |
608 | 608 | ||
609 | fail2: | 609 | fail2: |
610 | fail1: | 610 | fail1: |
611 | /* Fill ethtool results structures */ | 611 | /* Fill ethtool results structures */ |
612 | efx_ethtool_fill_self_tests(efx, &efx_tests, NULL, data); | 612 | efx_ethtool_fill_self_tests(efx, &efx_tests, NULL, data); |
613 | if (rc) | 613 | if (rc) |
614 | test->flags |= ETH_TEST_FL_FAILED; | 614 | test->flags |= ETH_TEST_FL_FAILED; |
615 | } | 615 | } |
616 | 616 | ||
617 | /* Restart autonegotiation */ | 617 | /* Restart autonegotiation */ |
618 | static int efx_ethtool_nway_reset(struct net_device *net_dev) | 618 | static int efx_ethtool_nway_reset(struct net_device *net_dev) |
619 | { | 619 | { |
620 | struct efx_nic *efx = netdev_priv(net_dev); | 620 | struct efx_nic *efx = netdev_priv(net_dev); |
621 | 621 | ||
622 | return mdio45_nway_restart(&efx->mdio); | 622 | return mdio45_nway_restart(&efx->mdio); |
623 | } | 623 | } |
624 | 624 | ||
625 | static u32 efx_ethtool_get_link(struct net_device *net_dev) | 625 | static u32 efx_ethtool_get_link(struct net_device *net_dev) |
626 | { | 626 | { |
627 | struct efx_nic *efx = netdev_priv(net_dev); | 627 | struct efx_nic *efx = netdev_priv(net_dev); |
628 | 628 | ||
629 | return efx->link_state.up; | 629 | return efx->link_state.up; |
630 | } | 630 | } |
631 | 631 | ||
632 | static int efx_ethtool_get_eeprom_len(struct net_device *net_dev) | 632 | static int efx_ethtool_get_eeprom_len(struct net_device *net_dev) |
633 | { | 633 | { |
634 | struct efx_nic *efx = netdev_priv(net_dev); | 634 | struct efx_nic *efx = netdev_priv(net_dev); |
635 | struct efx_spi_device *spi = efx->spi_eeprom; | 635 | struct efx_spi_device *spi = efx->spi_eeprom; |
636 | 636 | ||
637 | if (!spi) | 637 | if (!spi) |
638 | return 0; | 638 | return 0; |
639 | return min(spi->size, EFX_EEPROM_BOOTCONFIG_END) - | 639 | return min(spi->size, EFX_EEPROM_BOOTCONFIG_END) - |
640 | min(spi->size, EFX_EEPROM_BOOTCONFIG_START); | 640 | min(spi->size, EFX_EEPROM_BOOTCONFIG_START); |
641 | } | 641 | } |
642 | 642 | ||
643 | static int efx_ethtool_get_eeprom(struct net_device *net_dev, | 643 | static int efx_ethtool_get_eeprom(struct net_device *net_dev, |
644 | struct ethtool_eeprom *eeprom, u8 *buf) | 644 | struct ethtool_eeprom *eeprom, u8 *buf) |
645 | { | 645 | { |
646 | struct efx_nic *efx = netdev_priv(net_dev); | 646 | struct efx_nic *efx = netdev_priv(net_dev); |
647 | struct efx_spi_device *spi = efx->spi_eeprom; | 647 | struct efx_spi_device *spi = efx->spi_eeprom; |
648 | size_t len; | 648 | size_t len; |
649 | int rc; | 649 | int rc; |
650 | 650 | ||
651 | rc = mutex_lock_interruptible(&efx->spi_lock); | 651 | rc = mutex_lock_interruptible(&efx->spi_lock); |
652 | if (rc) | 652 | if (rc) |
653 | return rc; | 653 | return rc; |
654 | rc = falcon_spi_read(efx, spi, | 654 | rc = falcon_spi_read(efx, spi, |
655 | eeprom->offset + EFX_EEPROM_BOOTCONFIG_START, | 655 | eeprom->offset + EFX_EEPROM_BOOTCONFIG_START, |
656 | eeprom->len, &len, buf); | 656 | eeprom->len, &len, buf); |
657 | mutex_unlock(&efx->spi_lock); | 657 | mutex_unlock(&efx->spi_lock); |
658 | 658 | ||
659 | eeprom->magic = EFX_ETHTOOL_EEPROM_MAGIC; | 659 | eeprom->magic = EFX_ETHTOOL_EEPROM_MAGIC; |
660 | eeprom->len = len; | 660 | eeprom->len = len; |
661 | return rc; | 661 | return rc; |
662 | } | 662 | } |
663 | 663 | ||
664 | static int efx_ethtool_set_eeprom(struct net_device *net_dev, | 664 | static int efx_ethtool_set_eeprom(struct net_device *net_dev, |
665 | struct ethtool_eeprom *eeprom, u8 *buf) | 665 | struct ethtool_eeprom *eeprom, u8 *buf) |
666 | { | 666 | { |
667 | struct efx_nic *efx = netdev_priv(net_dev); | 667 | struct efx_nic *efx = netdev_priv(net_dev); |
668 | struct efx_spi_device *spi = efx->spi_eeprom; | 668 | struct efx_spi_device *spi = efx->spi_eeprom; |
669 | size_t len; | 669 | size_t len; |
670 | int rc; | 670 | int rc; |
671 | 671 | ||
672 | if (eeprom->magic != EFX_ETHTOOL_EEPROM_MAGIC) | 672 | if (eeprom->magic != EFX_ETHTOOL_EEPROM_MAGIC) |
673 | return -EINVAL; | 673 | return -EINVAL; |
674 | 674 | ||
675 | rc = mutex_lock_interruptible(&efx->spi_lock); | 675 | rc = mutex_lock_interruptible(&efx->spi_lock); |
676 | if (rc) | 676 | if (rc) |
677 | return rc; | 677 | return rc; |
678 | rc = falcon_spi_write(efx, spi, | 678 | rc = falcon_spi_write(efx, spi, |
679 | eeprom->offset + EFX_EEPROM_BOOTCONFIG_START, | 679 | eeprom->offset + EFX_EEPROM_BOOTCONFIG_START, |
680 | eeprom->len, &len, buf); | 680 | eeprom->len, &len, buf); |
681 | mutex_unlock(&efx->spi_lock); | 681 | mutex_unlock(&efx->spi_lock); |
682 | 682 | ||
683 | eeprom->len = len; | 683 | eeprom->len = len; |
684 | return rc; | 684 | return rc; |
685 | } | 685 | } |
686 | 686 | ||
687 | static int efx_ethtool_get_coalesce(struct net_device *net_dev, | 687 | static int efx_ethtool_get_coalesce(struct net_device *net_dev, |
688 | struct ethtool_coalesce *coalesce) | 688 | struct ethtool_coalesce *coalesce) |
689 | { | 689 | { |
690 | struct efx_nic *efx = netdev_priv(net_dev); | 690 | struct efx_nic *efx = netdev_priv(net_dev); |
691 | struct efx_channel *channel; | 691 | struct efx_channel *channel; |
692 | 692 | ||
693 | memset(coalesce, 0, sizeof(*coalesce)); | 693 | memset(coalesce, 0, sizeof(*coalesce)); |
694 | 694 | ||
695 | /* Find lowest IRQ moderation across all used TX queues */ | 695 | /* Find lowest IRQ moderation across all used TX queues */ |
696 | coalesce->tx_coalesce_usecs_irq = ~((u32) 0); | 696 | coalesce->tx_coalesce_usecs_irq = ~((u32) 0); |
697 | efx_for_each_channel(channel, efx) { | 697 | efx_for_each_channel(channel, efx) { |
698 | if (!efx_channel_get_tx_queue(channel, 0)) | 698 | if (!efx_channel_get_tx_queue(channel, 0)) |
699 | continue; | 699 | continue; |
700 | if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) { | 700 | if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) { |
701 | if (channel->channel < efx->n_rx_channels) | 701 | if (channel->channel < efx->n_rx_channels) |
702 | coalesce->tx_coalesce_usecs_irq = | 702 | coalesce->tx_coalesce_usecs_irq = |
703 | channel->irq_moderation; | 703 | channel->irq_moderation; |
704 | else | 704 | else |
705 | coalesce->tx_coalesce_usecs_irq = 0; | 705 | coalesce->tx_coalesce_usecs_irq = 0; |
706 | } | 706 | } |
707 | } | 707 | } |
708 | 708 | ||
709 | coalesce->use_adaptive_rx_coalesce = efx->irq_rx_adaptive; | 709 | coalesce->use_adaptive_rx_coalesce = efx->irq_rx_adaptive; |
710 | coalesce->rx_coalesce_usecs_irq = efx->irq_rx_moderation; | 710 | coalesce->rx_coalesce_usecs_irq = efx->irq_rx_moderation; |
711 | 711 | ||
712 | coalesce->tx_coalesce_usecs_irq *= EFX_IRQ_MOD_RESOLUTION; | 712 | coalesce->tx_coalesce_usecs_irq *= EFX_IRQ_MOD_RESOLUTION; |
713 | coalesce->rx_coalesce_usecs_irq *= EFX_IRQ_MOD_RESOLUTION; | 713 | coalesce->rx_coalesce_usecs_irq *= EFX_IRQ_MOD_RESOLUTION; |
714 | 714 | ||
715 | return 0; | 715 | return 0; |
716 | } | 716 | } |
717 | 717 | ||
718 | /* Set coalescing parameters | 718 | /* Set coalescing parameters |
719 | * The difficulties occur for shared channels | 719 | * The difficulties occur for shared channels |
720 | */ | 720 | */ |
721 | static int efx_ethtool_set_coalesce(struct net_device *net_dev, | 721 | static int efx_ethtool_set_coalesce(struct net_device *net_dev, |
722 | struct ethtool_coalesce *coalesce) | 722 | struct ethtool_coalesce *coalesce) |
723 | { | 723 | { |
724 | struct efx_nic *efx = netdev_priv(net_dev); | 724 | struct efx_nic *efx = netdev_priv(net_dev); |
725 | struct efx_channel *channel; | 725 | struct efx_channel *channel; |
726 | unsigned tx_usecs, rx_usecs, adaptive; | 726 | unsigned tx_usecs, rx_usecs, adaptive; |
727 | 727 | ||
728 | if (coalesce->use_adaptive_tx_coalesce) | 728 | if (coalesce->use_adaptive_tx_coalesce) |
729 | return -EOPNOTSUPP; | 729 | return -EOPNOTSUPP; |
730 | 730 | ||
731 | if (coalesce->rx_coalesce_usecs || coalesce->tx_coalesce_usecs) { | 731 | if (coalesce->rx_coalesce_usecs || coalesce->tx_coalesce_usecs) { |
732 | netif_err(efx, drv, efx->net_dev, "invalid coalescing setting. " | 732 | netif_err(efx, drv, efx->net_dev, "invalid coalescing setting. " |
733 | "Only rx/tx_coalesce_usecs_irq are supported\n"); | 733 | "Only rx/tx_coalesce_usecs_irq are supported\n"); |
734 | return -EOPNOTSUPP; | 734 | return -EOPNOTSUPP; |
735 | } | 735 | } |
736 | 736 | ||
737 | rx_usecs = coalesce->rx_coalesce_usecs_irq; | 737 | rx_usecs = coalesce->rx_coalesce_usecs_irq; |
738 | tx_usecs = coalesce->tx_coalesce_usecs_irq; | 738 | tx_usecs = coalesce->tx_coalesce_usecs_irq; |
739 | adaptive = coalesce->use_adaptive_rx_coalesce; | 739 | adaptive = coalesce->use_adaptive_rx_coalesce; |
740 | 740 | ||
741 | /* If the channel is shared only allow RX parameters to be set */ | 741 | /* If the channel is shared only allow RX parameters to be set */ |
742 | efx_for_each_channel(channel, efx) { | 742 | efx_for_each_channel(channel, efx) { |
743 | if (efx_channel_get_rx_queue(channel) && | 743 | if (efx_channel_get_rx_queue(channel) && |
744 | efx_channel_get_tx_queue(channel, 0) && | 744 | efx_channel_get_tx_queue(channel, 0) && |
745 | tx_usecs) { | 745 | tx_usecs) { |
746 | netif_err(efx, drv, efx->net_dev, "Channel is shared. " | 746 | netif_err(efx, drv, efx->net_dev, "Channel is shared. " |
747 | "Only RX coalescing may be set\n"); | 747 | "Only RX coalescing may be set\n"); |
748 | return -EOPNOTSUPP; | 748 | return -EOPNOTSUPP; |
749 | } | 749 | } |
750 | } | 750 | } |
751 | 751 | ||
752 | efx_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive); | 752 | efx_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive); |
753 | efx_for_each_channel(channel, efx) | 753 | efx_for_each_channel(channel, efx) |
754 | efx->type->push_irq_moderation(channel); | 754 | efx->type->push_irq_moderation(channel); |
755 | 755 | ||
756 | return 0; | 756 | return 0; |
757 | } | 757 | } |
758 | 758 | ||
759 | static void efx_ethtool_get_ringparam(struct net_device *net_dev, | 759 | static void efx_ethtool_get_ringparam(struct net_device *net_dev, |
760 | struct ethtool_ringparam *ring) | 760 | struct ethtool_ringparam *ring) |
761 | { | 761 | { |
762 | struct efx_nic *efx = netdev_priv(net_dev); | 762 | struct efx_nic *efx = netdev_priv(net_dev); |
763 | 763 | ||
764 | ring->rx_max_pending = EFX_MAX_DMAQ_SIZE; | 764 | ring->rx_max_pending = EFX_MAX_DMAQ_SIZE; |
765 | ring->tx_max_pending = EFX_MAX_DMAQ_SIZE; | 765 | ring->tx_max_pending = EFX_MAX_DMAQ_SIZE; |
766 | ring->rx_mini_max_pending = 0; | 766 | ring->rx_mini_max_pending = 0; |
767 | ring->rx_jumbo_max_pending = 0; | 767 | ring->rx_jumbo_max_pending = 0; |
768 | ring->rx_pending = efx->rxq_entries; | 768 | ring->rx_pending = efx->rxq_entries; |
769 | ring->tx_pending = efx->txq_entries; | 769 | ring->tx_pending = efx->txq_entries; |
770 | ring->rx_mini_pending = 0; | 770 | ring->rx_mini_pending = 0; |
771 | ring->rx_jumbo_pending = 0; | 771 | ring->rx_jumbo_pending = 0; |
772 | } | 772 | } |
773 | 773 | ||
774 | static int efx_ethtool_set_ringparam(struct net_device *net_dev, | 774 | static int efx_ethtool_set_ringparam(struct net_device *net_dev, |
775 | struct ethtool_ringparam *ring) | 775 | struct ethtool_ringparam *ring) |
776 | { | 776 | { |
777 | struct efx_nic *efx = netdev_priv(net_dev); | 777 | struct efx_nic *efx = netdev_priv(net_dev); |
778 | 778 | ||
779 | if (ring->rx_mini_pending || ring->rx_jumbo_pending || | 779 | if (ring->rx_mini_pending || ring->rx_jumbo_pending || |
780 | ring->rx_pending > EFX_MAX_DMAQ_SIZE || | 780 | ring->rx_pending > EFX_MAX_DMAQ_SIZE || |
781 | ring->tx_pending > EFX_MAX_DMAQ_SIZE) | 781 | ring->tx_pending > EFX_MAX_DMAQ_SIZE) |
782 | return -EINVAL; | 782 | return -EINVAL; |
783 | 783 | ||
784 | if (ring->rx_pending < EFX_MIN_RING_SIZE || | 784 | if (ring->rx_pending < EFX_MIN_RING_SIZE || |
785 | ring->tx_pending < EFX_MIN_RING_SIZE) { | 785 | ring->tx_pending < EFX_MIN_RING_SIZE) { |
786 | netif_err(efx, drv, efx->net_dev, | 786 | netif_err(efx, drv, efx->net_dev, |
787 | "TX and RX queues cannot be smaller than %ld\n", | 787 | "TX and RX queues cannot be smaller than %ld\n", |
788 | EFX_MIN_RING_SIZE); | 788 | EFX_MIN_RING_SIZE); |
789 | return -EINVAL; | 789 | return -EINVAL; |
790 | } | 790 | } |
791 | 791 | ||
792 | return efx_realloc_channels(efx, ring->rx_pending, ring->tx_pending); | 792 | return efx_realloc_channels(efx, ring->rx_pending, ring->tx_pending); |
793 | } | 793 | } |
794 | 794 | ||
795 | static int efx_ethtool_set_pauseparam(struct net_device *net_dev, | 795 | static int efx_ethtool_set_pauseparam(struct net_device *net_dev, |
796 | struct ethtool_pauseparam *pause) | 796 | struct ethtool_pauseparam *pause) |
797 | { | 797 | { |
798 | struct efx_nic *efx = netdev_priv(net_dev); | 798 | struct efx_nic *efx = netdev_priv(net_dev); |
799 | enum efx_fc_type wanted_fc, old_fc; | 799 | enum efx_fc_type wanted_fc, old_fc; |
800 | u32 old_adv; | 800 | u32 old_adv; |
801 | bool reset; | 801 | bool reset; |
802 | int rc = 0; | 802 | int rc = 0; |
803 | 803 | ||
804 | mutex_lock(&efx->mac_lock); | 804 | mutex_lock(&efx->mac_lock); |
805 | 805 | ||
806 | wanted_fc = ((pause->rx_pause ? EFX_FC_RX : 0) | | 806 | wanted_fc = ((pause->rx_pause ? EFX_FC_RX : 0) | |
807 | (pause->tx_pause ? EFX_FC_TX : 0) | | 807 | (pause->tx_pause ? EFX_FC_TX : 0) | |
808 | (pause->autoneg ? EFX_FC_AUTO : 0)); | 808 | (pause->autoneg ? EFX_FC_AUTO : 0)); |
809 | 809 | ||
810 | if ((wanted_fc & EFX_FC_TX) && !(wanted_fc & EFX_FC_RX)) { | 810 | if ((wanted_fc & EFX_FC_TX) && !(wanted_fc & EFX_FC_RX)) { |
811 | netif_dbg(efx, drv, efx->net_dev, | 811 | netif_dbg(efx, drv, efx->net_dev, |
812 | "Flow control unsupported: tx ON rx OFF\n"); | 812 | "Flow control unsupported: tx ON rx OFF\n"); |
813 | rc = -EINVAL; | 813 | rc = -EINVAL; |
814 | goto out; | 814 | goto out; |
815 | } | 815 | } |
816 | 816 | ||
817 | if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising) { | 817 | if ((wanted_fc & EFX_FC_AUTO) && !efx->link_advertising) { |
818 | netif_dbg(efx, drv, efx->net_dev, | 818 | netif_dbg(efx, drv, efx->net_dev, |
819 | "Autonegotiation is disabled\n"); | 819 | "Autonegotiation is disabled\n"); |
820 | rc = -EINVAL; | 820 | rc = -EINVAL; |
821 | goto out; | 821 | goto out; |
822 | } | 822 | } |
823 | 823 | ||
824 | /* TX flow control may automatically turn itself off if the | 824 | /* TX flow control may automatically turn itself off if the |
825 | * link partner (intermittently) stops responding to pause | 825 | * link partner (intermittently) stops responding to pause |
826 | * frames. There isn't any indication that this has happened, | 826 | * frames. There isn't any indication that this has happened, |
827 | * so the best we do is leave it up to the user to spot this | 827 | * so the best we do is leave it up to the user to spot this |
828 | * and fix it be cycling transmit flow control on this end. */ | 828 | * and fix it be cycling transmit flow control on this end. */ |
829 | reset = (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX); | 829 | reset = (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX); |
830 | if (EFX_WORKAROUND_11482(efx) && reset) { | 830 | if (EFX_WORKAROUND_11482(efx) && reset) { |
831 | if (efx_nic_rev(efx) == EFX_REV_FALCON_B0) { | 831 | if (efx_nic_rev(efx) == EFX_REV_FALCON_B0) { |
832 | /* Recover by resetting the EM block */ | 832 | /* Recover by resetting the EM block */ |
833 | falcon_stop_nic_stats(efx); | 833 | falcon_stop_nic_stats(efx); |
834 | falcon_drain_tx_fifo(efx); | 834 | falcon_drain_tx_fifo(efx); |
835 | efx->mac_op->reconfigure(efx); | 835 | efx->mac_op->reconfigure(efx); |
836 | falcon_start_nic_stats(efx); | 836 | falcon_start_nic_stats(efx); |
837 | } else { | 837 | } else { |
838 | /* Schedule a reset to recover */ | 838 | /* Schedule a reset to recover */ |
839 | efx_schedule_reset(efx, RESET_TYPE_INVISIBLE); | 839 | efx_schedule_reset(efx, RESET_TYPE_INVISIBLE); |
840 | } | 840 | } |
841 | } | 841 | } |
842 | 842 | ||
843 | old_adv = efx->link_advertising; | 843 | old_adv = efx->link_advertising; |
844 | old_fc = efx->wanted_fc; | 844 | old_fc = efx->wanted_fc; |
845 | efx_link_set_wanted_fc(efx, wanted_fc); | 845 | efx_link_set_wanted_fc(efx, wanted_fc); |
846 | if (efx->link_advertising != old_adv || | 846 | if (efx->link_advertising != old_adv || |
847 | (efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) { | 847 | (efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) { |
848 | rc = efx->phy_op->reconfigure(efx); | 848 | rc = efx->phy_op->reconfigure(efx); |
849 | if (rc) { | 849 | if (rc) { |
850 | netif_err(efx, drv, efx->net_dev, | 850 | netif_err(efx, drv, efx->net_dev, |
851 | "Unable to advertise requested flow " | 851 | "Unable to advertise requested flow " |
852 | "control setting\n"); | 852 | "control setting\n"); |
853 | goto out; | 853 | goto out; |
854 | } | 854 | } |
855 | } | 855 | } |
856 | 856 | ||
857 | /* Reconfigure the MAC. The PHY *may* generate a link state change event | 857 | /* Reconfigure the MAC. The PHY *may* generate a link state change event |
858 | * if the user just changed the advertised capabilities, but there's no | 858 | * if the user just changed the advertised capabilities, but there's no |
859 | * harm doing this twice */ | 859 | * harm doing this twice */ |
860 | efx->mac_op->reconfigure(efx); | 860 | efx->mac_op->reconfigure(efx); |
861 | 861 | ||
862 | out: | 862 | out: |
863 | mutex_unlock(&efx->mac_lock); | 863 | mutex_unlock(&efx->mac_lock); |
864 | 864 | ||
865 | return rc; | 865 | return rc; |
866 | } | 866 | } |
867 | 867 | ||
868 | static void efx_ethtool_get_pauseparam(struct net_device *net_dev, | 868 | static void efx_ethtool_get_pauseparam(struct net_device *net_dev, |
869 | struct ethtool_pauseparam *pause) | 869 | struct ethtool_pauseparam *pause) |
870 | { | 870 | { |
871 | struct efx_nic *efx = netdev_priv(net_dev); | 871 | struct efx_nic *efx = netdev_priv(net_dev); |
872 | 872 | ||
873 | pause->rx_pause = !!(efx->wanted_fc & EFX_FC_RX); | 873 | pause->rx_pause = !!(efx->wanted_fc & EFX_FC_RX); |
874 | pause->tx_pause = !!(efx->wanted_fc & EFX_FC_TX); | 874 | pause->tx_pause = !!(efx->wanted_fc & EFX_FC_TX); |
875 | pause->autoneg = !!(efx->wanted_fc & EFX_FC_AUTO); | 875 | pause->autoneg = !!(efx->wanted_fc & EFX_FC_AUTO); |
876 | } | 876 | } |
877 | 877 | ||
878 | 878 | ||
879 | static void efx_ethtool_get_wol(struct net_device *net_dev, | 879 | static void efx_ethtool_get_wol(struct net_device *net_dev, |
880 | struct ethtool_wolinfo *wol) | 880 | struct ethtool_wolinfo *wol) |
881 | { | 881 | { |
882 | struct efx_nic *efx = netdev_priv(net_dev); | 882 | struct efx_nic *efx = netdev_priv(net_dev); |
883 | return efx->type->get_wol(efx, wol); | 883 | return efx->type->get_wol(efx, wol); |
884 | } | 884 | } |
885 | 885 | ||
886 | 886 | ||
887 | static int efx_ethtool_set_wol(struct net_device *net_dev, | 887 | static int efx_ethtool_set_wol(struct net_device *net_dev, |
888 | struct ethtool_wolinfo *wol) | 888 | struct ethtool_wolinfo *wol) |
889 | { | 889 | { |
890 | struct efx_nic *efx = netdev_priv(net_dev); | 890 | struct efx_nic *efx = netdev_priv(net_dev); |
891 | return efx->type->set_wol(efx, wol->wolopts); | 891 | return efx->type->set_wol(efx, wol->wolopts); |
892 | } | 892 | } |
893 | 893 | ||
894 | extern int efx_ethtool_reset(struct net_device *net_dev, u32 *flags) | 894 | static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags) |
895 | { | 895 | { |
896 | struct efx_nic *efx = netdev_priv(net_dev); | 896 | struct efx_nic *efx = netdev_priv(net_dev); |
897 | enum reset_type method; | 897 | enum reset_type method; |
898 | enum { | 898 | enum { |
899 | ETH_RESET_EFX_INVISIBLE = (ETH_RESET_DMA | ETH_RESET_FILTER | | 899 | ETH_RESET_EFX_INVISIBLE = (ETH_RESET_DMA | ETH_RESET_FILTER | |
900 | ETH_RESET_OFFLOAD | ETH_RESET_MAC) | 900 | ETH_RESET_OFFLOAD | ETH_RESET_MAC) |
901 | }; | 901 | }; |
902 | 902 | ||
903 | /* Check for minimal reset flags */ | 903 | /* Check for minimal reset flags */ |
904 | if ((*flags & ETH_RESET_EFX_INVISIBLE) != ETH_RESET_EFX_INVISIBLE) | 904 | if ((*flags & ETH_RESET_EFX_INVISIBLE) != ETH_RESET_EFX_INVISIBLE) |
905 | return -EINVAL; | 905 | return -EINVAL; |
906 | *flags ^= ETH_RESET_EFX_INVISIBLE; | 906 | *flags ^= ETH_RESET_EFX_INVISIBLE; |
907 | method = RESET_TYPE_INVISIBLE; | 907 | method = RESET_TYPE_INVISIBLE; |
908 | 908 | ||
909 | if (*flags & ETH_RESET_PHY) { | 909 | if (*flags & ETH_RESET_PHY) { |
910 | *flags ^= ETH_RESET_PHY; | 910 | *flags ^= ETH_RESET_PHY; |
911 | method = RESET_TYPE_ALL; | 911 | method = RESET_TYPE_ALL; |
912 | } | 912 | } |
913 | 913 | ||
914 | if ((*flags & efx->type->reset_world_flags) == | 914 | if ((*flags & efx->type->reset_world_flags) == |
915 | efx->type->reset_world_flags) { | 915 | efx->type->reset_world_flags) { |
916 | *flags ^= efx->type->reset_world_flags; | 916 | *flags ^= efx->type->reset_world_flags; |
917 | method = RESET_TYPE_WORLD; | 917 | method = RESET_TYPE_WORLD; |
918 | } | 918 | } |
919 | 919 | ||
920 | return efx_reset(efx, method); | 920 | return efx_reset(efx, method); |
921 | } | 921 | } |
922 | 922 | ||
923 | static int | 923 | static int |
924 | efx_ethtool_get_rxnfc(struct net_device *net_dev, | 924 | efx_ethtool_get_rxnfc(struct net_device *net_dev, |
925 | struct ethtool_rxnfc *info, void *rules __always_unused) | 925 | struct ethtool_rxnfc *info, void *rules __always_unused) |
926 | { | 926 | { |
927 | struct efx_nic *efx = netdev_priv(net_dev); | 927 | struct efx_nic *efx = netdev_priv(net_dev); |
928 | 928 | ||
929 | switch (info->cmd) { | 929 | switch (info->cmd) { |
930 | case ETHTOOL_GRXRINGS: | 930 | case ETHTOOL_GRXRINGS: |
931 | info->data = efx->n_rx_channels; | 931 | info->data = efx->n_rx_channels; |
932 | return 0; | 932 | return 0; |
933 | 933 | ||
934 | case ETHTOOL_GRXFH: { | 934 | case ETHTOOL_GRXFH: { |
935 | unsigned min_revision = 0; | 935 | unsigned min_revision = 0; |
936 | 936 | ||
937 | info->data = 0; | 937 | info->data = 0; |
938 | switch (info->flow_type) { | 938 | switch (info->flow_type) { |
939 | case TCP_V4_FLOW: | 939 | case TCP_V4_FLOW: |
940 | info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; | 940 | info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; |
941 | /* fall through */ | 941 | /* fall through */ |
942 | case UDP_V4_FLOW: | 942 | case UDP_V4_FLOW: |
943 | case SCTP_V4_FLOW: | 943 | case SCTP_V4_FLOW: |
944 | case AH_ESP_V4_FLOW: | 944 | case AH_ESP_V4_FLOW: |
945 | case IPV4_FLOW: | 945 | case IPV4_FLOW: |
946 | info->data |= RXH_IP_SRC | RXH_IP_DST; | 946 | info->data |= RXH_IP_SRC | RXH_IP_DST; |
947 | min_revision = EFX_REV_FALCON_B0; | 947 | min_revision = EFX_REV_FALCON_B0; |
948 | break; | 948 | break; |
949 | case TCP_V6_FLOW: | 949 | case TCP_V6_FLOW: |
950 | info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; | 950 | info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; |
951 | /* fall through */ | 951 | /* fall through */ |
952 | case UDP_V6_FLOW: | 952 | case UDP_V6_FLOW: |
953 | case SCTP_V6_FLOW: | 953 | case SCTP_V6_FLOW: |
954 | case AH_ESP_V6_FLOW: | 954 | case AH_ESP_V6_FLOW: |
955 | case IPV6_FLOW: | 955 | case IPV6_FLOW: |
956 | info->data |= RXH_IP_SRC | RXH_IP_DST; | 956 | info->data |= RXH_IP_SRC | RXH_IP_DST; |
957 | min_revision = EFX_REV_SIENA_A0; | 957 | min_revision = EFX_REV_SIENA_A0; |
958 | break; | 958 | break; |
959 | default: | 959 | default: |
960 | break; | 960 | break; |
961 | } | 961 | } |
962 | if (efx_nic_rev(efx) < min_revision) | 962 | if (efx_nic_rev(efx) < min_revision) |
963 | info->data = 0; | 963 | info->data = 0; |
964 | return 0; | 964 | return 0; |
965 | } | 965 | } |
966 | 966 | ||
967 | default: | 967 | default: |
968 | return -EOPNOTSUPP; | 968 | return -EOPNOTSUPP; |
969 | } | 969 | } |
970 | } | 970 | } |
971 | 971 | ||
972 | static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev, | 972 | static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev, |
973 | struct ethtool_rx_ntuple *ntuple) | 973 | struct ethtool_rx_ntuple *ntuple) |
974 | { | 974 | { |
975 | struct efx_nic *efx = netdev_priv(net_dev); | 975 | struct efx_nic *efx = netdev_priv(net_dev); |
976 | struct ethtool_tcpip4_spec *ip_entry = &ntuple->fs.h_u.tcp_ip4_spec; | 976 | struct ethtool_tcpip4_spec *ip_entry = &ntuple->fs.h_u.tcp_ip4_spec; |
977 | struct ethtool_tcpip4_spec *ip_mask = &ntuple->fs.m_u.tcp_ip4_spec; | 977 | struct ethtool_tcpip4_spec *ip_mask = &ntuple->fs.m_u.tcp_ip4_spec; |
978 | struct ethhdr *mac_entry = &ntuple->fs.h_u.ether_spec; | 978 | struct ethhdr *mac_entry = &ntuple->fs.h_u.ether_spec; |
979 | struct ethhdr *mac_mask = &ntuple->fs.m_u.ether_spec; | 979 | struct ethhdr *mac_mask = &ntuple->fs.m_u.ether_spec; |
980 | struct efx_filter_spec filter; | 980 | struct efx_filter_spec filter; |
981 | 981 | ||
982 | /* Range-check action */ | 982 | /* Range-check action */ |
983 | if (ntuple->fs.action < ETHTOOL_RXNTUPLE_ACTION_CLEAR || | 983 | if (ntuple->fs.action < ETHTOOL_RXNTUPLE_ACTION_CLEAR || |
984 | ntuple->fs.action >= (s32)efx->n_rx_channels) | 984 | ntuple->fs.action >= (s32)efx->n_rx_channels) |
985 | return -EINVAL; | 985 | return -EINVAL; |
986 | 986 | ||
987 | if (~ntuple->fs.data_mask) | 987 | if (~ntuple->fs.data_mask) |
988 | return -EINVAL; | 988 | return -EINVAL; |
989 | 989 | ||
990 | switch (ntuple->fs.flow_type) { | 990 | switch (ntuple->fs.flow_type) { |
991 | case TCP_V4_FLOW: | 991 | case TCP_V4_FLOW: |
992 | case UDP_V4_FLOW: | 992 | case UDP_V4_FLOW: |
993 | /* Must match all of destination, */ | 993 | /* Must match all of destination, */ |
994 | if (ip_mask->ip4dst | ip_mask->pdst) | 994 | if (ip_mask->ip4dst | ip_mask->pdst) |
995 | return -EINVAL; | 995 | return -EINVAL; |
996 | /* all or none of source, */ | 996 | /* all or none of source, */ |
997 | if ((ip_mask->ip4src | ip_mask->psrc) && | 997 | if ((ip_mask->ip4src | ip_mask->psrc) && |
998 | ((__force u32)~ip_mask->ip4src | | 998 | ((__force u32)~ip_mask->ip4src | |
999 | (__force u16)~ip_mask->psrc)) | 999 | (__force u16)~ip_mask->psrc)) |
1000 | return -EINVAL; | 1000 | return -EINVAL; |
1001 | /* and nothing else */ | 1001 | /* and nothing else */ |
1002 | if ((u8)~ip_mask->tos | (u16)~ntuple->fs.vlan_tag_mask) | 1002 | if ((u8)~ip_mask->tos | (u16)~ntuple->fs.vlan_tag_mask) |
1003 | return -EINVAL; | 1003 | return -EINVAL; |
1004 | break; | 1004 | break; |
1005 | case ETHER_FLOW: | 1005 | case ETHER_FLOW: |
1006 | /* Must match all of destination, */ | 1006 | /* Must match all of destination, */ |
1007 | if (!is_zero_ether_addr(mac_mask->h_dest)) | 1007 | if (!is_zero_ether_addr(mac_mask->h_dest)) |
1008 | return -EINVAL; | 1008 | return -EINVAL; |
1009 | /* all or none of VID, */ | 1009 | /* all or none of VID, */ |
1010 | if (ntuple->fs.vlan_tag_mask != 0xf000 && | 1010 | if (ntuple->fs.vlan_tag_mask != 0xf000 && |
1011 | ntuple->fs.vlan_tag_mask != 0xffff) | 1011 | ntuple->fs.vlan_tag_mask != 0xffff) |
1012 | return -EINVAL; | 1012 | return -EINVAL; |
1013 | /* and nothing else */ | 1013 | /* and nothing else */ |
1014 | if (!is_broadcast_ether_addr(mac_mask->h_source) || | 1014 | if (!is_broadcast_ether_addr(mac_mask->h_source) || |
1015 | mac_mask->h_proto != htons(0xffff)) | 1015 | mac_mask->h_proto != htons(0xffff)) |
1016 | return -EINVAL; | 1016 | return -EINVAL; |
1017 | break; | 1017 | break; |
1018 | default: | 1018 | default: |
1019 | return -EINVAL; | 1019 | return -EINVAL; |
1020 | } | 1020 | } |
1021 | 1021 | ||
1022 | filter.priority = EFX_FILTER_PRI_MANUAL; | 1022 | filter.priority = EFX_FILTER_PRI_MANUAL; |
1023 | filter.flags = 0; | 1023 | filter.flags = 0; |
1024 | 1024 | ||
1025 | switch (ntuple->fs.flow_type) { | 1025 | switch (ntuple->fs.flow_type) { |
1026 | case TCP_V4_FLOW: | 1026 | case TCP_V4_FLOW: |
1027 | if (!ip_mask->ip4src) | 1027 | if (!ip_mask->ip4src) |
1028 | efx_filter_set_rx_tcp_full(&filter, | 1028 | efx_filter_set_rx_tcp_full(&filter, |
1029 | htonl(ip_entry->ip4src), | 1029 | htonl(ip_entry->ip4src), |
1030 | htons(ip_entry->psrc), | 1030 | htons(ip_entry->psrc), |
1031 | htonl(ip_entry->ip4dst), | 1031 | htonl(ip_entry->ip4dst), |
1032 | htons(ip_entry->pdst)); | 1032 | htons(ip_entry->pdst)); |
1033 | else | 1033 | else |
1034 | efx_filter_set_rx_tcp_wild(&filter, | 1034 | efx_filter_set_rx_tcp_wild(&filter, |
1035 | htonl(ip_entry->ip4dst), | 1035 | htonl(ip_entry->ip4dst), |
1036 | htons(ip_entry->pdst)); | 1036 | htons(ip_entry->pdst)); |
1037 | break; | 1037 | break; |
1038 | case UDP_V4_FLOW: | 1038 | case UDP_V4_FLOW: |
1039 | if (!ip_mask->ip4src) | 1039 | if (!ip_mask->ip4src) |
1040 | efx_filter_set_rx_udp_full(&filter, | 1040 | efx_filter_set_rx_udp_full(&filter, |
1041 | htonl(ip_entry->ip4src), | 1041 | htonl(ip_entry->ip4src), |
1042 | htons(ip_entry->psrc), | 1042 | htons(ip_entry->psrc), |
1043 | htonl(ip_entry->ip4dst), | 1043 | htonl(ip_entry->ip4dst), |
1044 | htons(ip_entry->pdst)); | 1044 | htons(ip_entry->pdst)); |
1045 | else | 1045 | else |
1046 | efx_filter_set_rx_udp_wild(&filter, | 1046 | efx_filter_set_rx_udp_wild(&filter, |
1047 | htonl(ip_entry->ip4dst), | 1047 | htonl(ip_entry->ip4dst), |
1048 | htons(ip_entry->pdst)); | 1048 | htons(ip_entry->pdst)); |
1049 | break; | 1049 | break; |
1050 | case ETHER_FLOW: | 1050 | case ETHER_FLOW: |
1051 | if (ntuple->fs.vlan_tag_mask == 0xf000) | 1051 | if (ntuple->fs.vlan_tag_mask == 0xf000) |
1052 | efx_filter_set_rx_mac_full(&filter, | 1052 | efx_filter_set_rx_mac_full(&filter, |
1053 | ntuple->fs.vlan_tag & 0xfff, | 1053 | ntuple->fs.vlan_tag & 0xfff, |
1054 | mac_entry->h_dest); | 1054 | mac_entry->h_dest); |
1055 | else | 1055 | else |
1056 | efx_filter_set_rx_mac_wild(&filter, mac_entry->h_dest); | 1056 | efx_filter_set_rx_mac_wild(&filter, mac_entry->h_dest); |
1057 | break; | 1057 | break; |
1058 | } | 1058 | } |
1059 | 1059 | ||
1060 | if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_CLEAR) { | 1060 | if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_CLEAR) { |
1061 | return efx_filter_remove_filter(efx, &filter); | 1061 | return efx_filter_remove_filter(efx, &filter); |
1062 | } else { | 1062 | } else { |
1063 | if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP) | 1063 | if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP) |
1064 | filter.dmaq_id = 0xfff; | 1064 | filter.dmaq_id = 0xfff; |
1065 | else | 1065 | else |
1066 | filter.dmaq_id = ntuple->fs.action; | 1066 | filter.dmaq_id = ntuple->fs.action; |
1067 | return efx_filter_insert_filter(efx, &filter, true); | 1067 | return efx_filter_insert_filter(efx, &filter, true); |
1068 | } | 1068 | } |
1069 | } | 1069 | } |
1070 | 1070 | ||
1071 | static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev, | 1071 | static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev, |
1072 | struct ethtool_rxfh_indir *indir) | 1072 | struct ethtool_rxfh_indir *indir) |
1073 | { | 1073 | { |
1074 | struct efx_nic *efx = netdev_priv(net_dev); | 1074 | struct efx_nic *efx = netdev_priv(net_dev); |
1075 | size_t copy_size = | 1075 | size_t copy_size = |
1076 | min_t(size_t, indir->size, ARRAY_SIZE(efx->rx_indir_table)); | 1076 | min_t(size_t, indir->size, ARRAY_SIZE(efx->rx_indir_table)); |
1077 | 1077 | ||
1078 | if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) | 1078 | if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) |
1079 | return -EOPNOTSUPP; | 1079 | return -EOPNOTSUPP; |
1080 | 1080 | ||
1081 | indir->size = ARRAY_SIZE(efx->rx_indir_table); | 1081 | indir->size = ARRAY_SIZE(efx->rx_indir_table); |
1082 | memcpy(indir->ring_index, efx->rx_indir_table, | 1082 | memcpy(indir->ring_index, efx->rx_indir_table, |
1083 | copy_size * sizeof(indir->ring_index[0])); | 1083 | copy_size * sizeof(indir->ring_index[0])); |
1084 | return 0; | 1084 | return 0; |
1085 | } | 1085 | } |
1086 | 1086 | ||
1087 | static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev, | 1087 | static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev, |
1088 | const struct ethtool_rxfh_indir *indir) | 1088 | const struct ethtool_rxfh_indir *indir) |
1089 | { | 1089 | { |
1090 | struct efx_nic *efx = netdev_priv(net_dev); | 1090 | struct efx_nic *efx = netdev_priv(net_dev); |
1091 | size_t i; | 1091 | size_t i; |
1092 | 1092 | ||
1093 | if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) | 1093 | if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) |
1094 | return -EOPNOTSUPP; | 1094 | return -EOPNOTSUPP; |
1095 | 1095 | ||
1096 | /* Validate size and indices */ | 1096 | /* Validate size and indices */ |
1097 | if (indir->size != ARRAY_SIZE(efx->rx_indir_table)) | 1097 | if (indir->size != ARRAY_SIZE(efx->rx_indir_table)) |
1098 | return -EINVAL; | 1098 | return -EINVAL; |
1099 | for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++) | 1099 | for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++) |
1100 | if (indir->ring_index[i] >= efx->n_rx_channels) | 1100 | if (indir->ring_index[i] >= efx->n_rx_channels) |
1101 | return -EINVAL; | 1101 | return -EINVAL; |
1102 | 1102 | ||
1103 | memcpy(efx->rx_indir_table, indir->ring_index, | 1103 | memcpy(efx->rx_indir_table, indir->ring_index, |
1104 | sizeof(efx->rx_indir_table)); | 1104 | sizeof(efx->rx_indir_table)); |
1105 | efx_nic_push_rx_indir_table(efx); | 1105 | efx_nic_push_rx_indir_table(efx); |
1106 | return 0; | 1106 | return 0; |
1107 | } | 1107 | } |
1108 | 1108 | ||
1109 | const struct ethtool_ops efx_ethtool_ops = { | 1109 | const struct ethtool_ops efx_ethtool_ops = { |
1110 | .get_settings = efx_ethtool_get_settings, | 1110 | .get_settings = efx_ethtool_get_settings, |
1111 | .set_settings = efx_ethtool_set_settings, | 1111 | .set_settings = efx_ethtool_set_settings, |
1112 | .get_drvinfo = efx_ethtool_get_drvinfo, | 1112 | .get_drvinfo = efx_ethtool_get_drvinfo, |
1113 | .get_regs_len = efx_ethtool_get_regs_len, | 1113 | .get_regs_len = efx_ethtool_get_regs_len, |
1114 | .get_regs = efx_ethtool_get_regs, | 1114 | .get_regs = efx_ethtool_get_regs, |
1115 | .get_msglevel = efx_ethtool_get_msglevel, | 1115 | .get_msglevel = efx_ethtool_get_msglevel, |
1116 | .set_msglevel = efx_ethtool_set_msglevel, | 1116 | .set_msglevel = efx_ethtool_set_msglevel, |
1117 | .nway_reset = efx_ethtool_nway_reset, | 1117 | .nway_reset = efx_ethtool_nway_reset, |
1118 | .get_link = efx_ethtool_get_link, | 1118 | .get_link = efx_ethtool_get_link, |
1119 | .get_eeprom_len = efx_ethtool_get_eeprom_len, | 1119 | .get_eeprom_len = efx_ethtool_get_eeprom_len, |
1120 | .get_eeprom = efx_ethtool_get_eeprom, | 1120 | .get_eeprom = efx_ethtool_get_eeprom, |
1121 | .set_eeprom = efx_ethtool_set_eeprom, | 1121 | .set_eeprom = efx_ethtool_set_eeprom, |
1122 | .get_coalesce = efx_ethtool_get_coalesce, | 1122 | .get_coalesce = efx_ethtool_get_coalesce, |
1123 | .set_coalesce = efx_ethtool_set_coalesce, | 1123 | .set_coalesce = efx_ethtool_set_coalesce, |
1124 | .get_ringparam = efx_ethtool_get_ringparam, | 1124 | .get_ringparam = efx_ethtool_get_ringparam, |
1125 | .set_ringparam = efx_ethtool_set_ringparam, | 1125 | .set_ringparam = efx_ethtool_set_ringparam, |
1126 | .get_pauseparam = efx_ethtool_get_pauseparam, | 1126 | .get_pauseparam = efx_ethtool_get_pauseparam, |
1127 | .set_pauseparam = efx_ethtool_set_pauseparam, | 1127 | .set_pauseparam = efx_ethtool_set_pauseparam, |
1128 | .get_rx_csum = efx_ethtool_get_rx_csum, | 1128 | .get_rx_csum = efx_ethtool_get_rx_csum, |
1129 | .set_rx_csum = efx_ethtool_set_rx_csum, | 1129 | .set_rx_csum = efx_ethtool_set_rx_csum, |
1130 | .get_tx_csum = ethtool_op_get_tx_csum, | 1130 | .get_tx_csum = ethtool_op_get_tx_csum, |
1131 | /* Need to enable/disable IPv6 too */ | 1131 | /* Need to enable/disable IPv6 too */ |
1132 | .set_tx_csum = efx_ethtool_set_tx_csum, | 1132 | .set_tx_csum = efx_ethtool_set_tx_csum, |
1133 | .get_sg = ethtool_op_get_sg, | 1133 | .get_sg = ethtool_op_get_sg, |
1134 | .set_sg = ethtool_op_set_sg, | 1134 | .set_sg = ethtool_op_set_sg, |
1135 | .get_tso = ethtool_op_get_tso, | 1135 | .get_tso = ethtool_op_get_tso, |
1136 | /* Need to enable/disable TSO-IPv6 too */ | 1136 | /* Need to enable/disable TSO-IPv6 too */ |
1137 | .set_tso = efx_ethtool_set_tso, | 1137 | .set_tso = efx_ethtool_set_tso, |
1138 | .get_flags = ethtool_op_get_flags, | 1138 | .get_flags = ethtool_op_get_flags, |
1139 | .set_flags = efx_ethtool_set_flags, | 1139 | .set_flags = efx_ethtool_set_flags, |
1140 | .get_sset_count = efx_ethtool_get_sset_count, | 1140 | .get_sset_count = efx_ethtool_get_sset_count, |
1141 | .self_test = efx_ethtool_self_test, | 1141 | .self_test = efx_ethtool_self_test, |
1142 | .get_strings = efx_ethtool_get_strings, | 1142 | .get_strings = efx_ethtool_get_strings, |
1143 | .phys_id = efx_ethtool_phys_id, | 1143 | .phys_id = efx_ethtool_phys_id, |
1144 | .get_ethtool_stats = efx_ethtool_get_stats, | 1144 | .get_ethtool_stats = efx_ethtool_get_stats, |
1145 | .get_wol = efx_ethtool_get_wol, | 1145 | .get_wol = efx_ethtool_get_wol, |
1146 | .set_wol = efx_ethtool_set_wol, | 1146 | .set_wol = efx_ethtool_set_wol, |
1147 | .reset = efx_ethtool_reset, | 1147 | .reset = efx_ethtool_reset, |
1148 | .get_rxnfc = efx_ethtool_get_rxnfc, | 1148 | .get_rxnfc = efx_ethtool_get_rxnfc, |
1149 | .set_rx_ntuple = efx_ethtool_set_rx_ntuple, | 1149 | .set_rx_ntuple = efx_ethtool_set_rx_ntuple, |
1150 | .get_rxfh_indir = efx_ethtool_get_rxfh_indir, | 1150 | .get_rxfh_indir = efx_ethtool_get_rxfh_indir, |
1151 | .set_rxfh_indir = efx_ethtool_set_rxfh_indir, | 1151 | .set_rxfh_indir = efx_ethtool_set_rxfh_indir, |
1152 | }; | 1152 | }; |
1153 | 1153 |
drivers/net/sfc/falcon_xmac.c
1 | /**************************************************************************** | 1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | 2 | * Driver for Solarflare Solarstorm network controllers and boards |
3 | * Copyright 2005-2006 Fen Systems Ltd. | 3 | * Copyright 2005-2006 Fen Systems Ltd. |
4 | * Copyright 2006-2009 Solarflare Communications Inc. | 4 | * Copyright 2006-2009 Solarflare Communications Inc. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License version 2 as published | 7 | * under the terms of the GNU General Public License version 2 as published |
8 | * by the Free Software Foundation, incorporated herein by reference. | 8 | * by the Free Software Foundation, incorporated herein by reference. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/delay.h> | 11 | #include <linux/delay.h> |
12 | #include "net_driver.h" | 12 | #include "net_driver.h" |
13 | #include "efx.h" | 13 | #include "efx.h" |
14 | #include "nic.h" | 14 | #include "nic.h" |
15 | #include "regs.h" | 15 | #include "regs.h" |
16 | #include "io.h" | 16 | #include "io.h" |
17 | #include "mac.h" | 17 | #include "mac.h" |
18 | #include "mdio_10g.h" | 18 | #include "mdio_10g.h" |
19 | #include "phy.h" | 19 | #include "phy.h" |
20 | #include "workarounds.h" | 20 | #include "workarounds.h" |
21 | 21 | ||
22 | /************************************************************************** | 22 | /************************************************************************** |
23 | * | 23 | * |
24 | * MAC operations | 24 | * MAC operations |
25 | * | 25 | * |
26 | *************************************************************************/ | 26 | *************************************************************************/ |
27 | 27 | ||
28 | /* Configure the XAUI driver that is an output from Falcon */ | 28 | /* Configure the XAUI driver that is an output from Falcon */ |
29 | void falcon_setup_xaui(struct efx_nic *efx) | 29 | void falcon_setup_xaui(struct efx_nic *efx) |
30 | { | 30 | { |
31 | efx_oword_t sdctl, txdrv; | 31 | efx_oword_t sdctl, txdrv; |
32 | 32 | ||
33 | /* Move the XAUI into low power, unless there is no PHY, in | 33 | /* Move the XAUI into low power, unless there is no PHY, in |
34 | * which case the XAUI will have to drive a cable. */ | 34 | * which case the XAUI will have to drive a cable. */ |
35 | if (efx->phy_type == PHY_TYPE_NONE) | 35 | if (efx->phy_type == PHY_TYPE_NONE) |
36 | return; | 36 | return; |
37 | 37 | ||
38 | efx_reado(efx, &sdctl, FR_AB_XX_SD_CTL); | 38 | efx_reado(efx, &sdctl, FR_AB_XX_SD_CTL); |
39 | EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVD, FFE_AB_XX_SD_CTL_DRV_DEF); | 39 | EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVD, FFE_AB_XX_SD_CTL_DRV_DEF); |
40 | EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVD, FFE_AB_XX_SD_CTL_DRV_DEF); | 40 | EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVD, FFE_AB_XX_SD_CTL_DRV_DEF); |
41 | EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVC, FFE_AB_XX_SD_CTL_DRV_DEF); | 41 | EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVC, FFE_AB_XX_SD_CTL_DRV_DEF); |
42 | EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVC, FFE_AB_XX_SD_CTL_DRV_DEF); | 42 | EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVC, FFE_AB_XX_SD_CTL_DRV_DEF); |
43 | EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVB, FFE_AB_XX_SD_CTL_DRV_DEF); | 43 | EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVB, FFE_AB_XX_SD_CTL_DRV_DEF); |
44 | EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVB, FFE_AB_XX_SD_CTL_DRV_DEF); | 44 | EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVB, FFE_AB_XX_SD_CTL_DRV_DEF); |
45 | EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVA, FFE_AB_XX_SD_CTL_DRV_DEF); | 45 | EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVA, FFE_AB_XX_SD_CTL_DRV_DEF); |
46 | EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVA, FFE_AB_XX_SD_CTL_DRV_DEF); | 46 | EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVA, FFE_AB_XX_SD_CTL_DRV_DEF); |
47 | efx_writeo(efx, &sdctl, FR_AB_XX_SD_CTL); | 47 | efx_writeo(efx, &sdctl, FR_AB_XX_SD_CTL); |
48 | 48 | ||
49 | EFX_POPULATE_OWORD_8(txdrv, | 49 | EFX_POPULATE_OWORD_8(txdrv, |
50 | FRF_AB_XX_DEQD, FFE_AB_XX_TXDRV_DEQ_DEF, | 50 | FRF_AB_XX_DEQD, FFE_AB_XX_TXDRV_DEQ_DEF, |
51 | FRF_AB_XX_DEQC, FFE_AB_XX_TXDRV_DEQ_DEF, | 51 | FRF_AB_XX_DEQC, FFE_AB_XX_TXDRV_DEQ_DEF, |
52 | FRF_AB_XX_DEQB, FFE_AB_XX_TXDRV_DEQ_DEF, | 52 | FRF_AB_XX_DEQB, FFE_AB_XX_TXDRV_DEQ_DEF, |
53 | FRF_AB_XX_DEQA, FFE_AB_XX_TXDRV_DEQ_DEF, | 53 | FRF_AB_XX_DEQA, FFE_AB_XX_TXDRV_DEQ_DEF, |
54 | FRF_AB_XX_DTXD, FFE_AB_XX_TXDRV_DTX_DEF, | 54 | FRF_AB_XX_DTXD, FFE_AB_XX_TXDRV_DTX_DEF, |
55 | FRF_AB_XX_DTXC, FFE_AB_XX_TXDRV_DTX_DEF, | 55 | FRF_AB_XX_DTXC, FFE_AB_XX_TXDRV_DTX_DEF, |
56 | FRF_AB_XX_DTXB, FFE_AB_XX_TXDRV_DTX_DEF, | 56 | FRF_AB_XX_DTXB, FFE_AB_XX_TXDRV_DTX_DEF, |
57 | FRF_AB_XX_DTXA, FFE_AB_XX_TXDRV_DTX_DEF); | 57 | FRF_AB_XX_DTXA, FFE_AB_XX_TXDRV_DTX_DEF); |
58 | efx_writeo(efx, &txdrv, FR_AB_XX_TXDRV_CTL); | 58 | efx_writeo(efx, &txdrv, FR_AB_XX_TXDRV_CTL); |
59 | } | 59 | } |
60 | 60 | ||
61 | int falcon_reset_xaui(struct efx_nic *efx) | 61 | int falcon_reset_xaui(struct efx_nic *efx) |
62 | { | 62 | { |
63 | struct falcon_nic_data *nic_data = efx->nic_data; | 63 | struct falcon_nic_data *nic_data = efx->nic_data; |
64 | efx_oword_t reg; | 64 | efx_oword_t reg; |
65 | int count; | 65 | int count; |
66 | 66 | ||
67 | /* Don't fetch MAC statistics over an XMAC reset */ | 67 | /* Don't fetch MAC statistics over an XMAC reset */ |
68 | WARN_ON(nic_data->stats_disable_count == 0); | 68 | WARN_ON(nic_data->stats_disable_count == 0); |
69 | 69 | ||
70 | /* Start reset sequence */ | 70 | /* Start reset sequence */ |
71 | EFX_POPULATE_OWORD_1(reg, FRF_AB_XX_RST_XX_EN, 1); | 71 | EFX_POPULATE_OWORD_1(reg, FRF_AB_XX_RST_XX_EN, 1); |
72 | efx_writeo(efx, ®, FR_AB_XX_PWR_RST); | 72 | efx_writeo(efx, ®, FR_AB_XX_PWR_RST); |
73 | 73 | ||
74 | /* Wait up to 10 ms for completion, then reinitialise */ | 74 | /* Wait up to 10 ms for completion, then reinitialise */ |
75 | for (count = 0; count < 1000; count++) { | 75 | for (count = 0; count < 1000; count++) { |
76 | efx_reado(efx, ®, FR_AB_XX_PWR_RST); | 76 | efx_reado(efx, ®, FR_AB_XX_PWR_RST); |
77 | if (EFX_OWORD_FIELD(reg, FRF_AB_XX_RST_XX_EN) == 0 && | 77 | if (EFX_OWORD_FIELD(reg, FRF_AB_XX_RST_XX_EN) == 0 && |
78 | EFX_OWORD_FIELD(reg, FRF_AB_XX_SD_RST_ACT) == 0) { | 78 | EFX_OWORD_FIELD(reg, FRF_AB_XX_SD_RST_ACT) == 0) { |
79 | falcon_setup_xaui(efx); | 79 | falcon_setup_xaui(efx); |
80 | return 0; | 80 | return 0; |
81 | } | 81 | } |
82 | udelay(10); | 82 | udelay(10); |
83 | } | 83 | } |
84 | netif_err(efx, hw, efx->net_dev, | 84 | netif_err(efx, hw, efx->net_dev, |
85 | "timed out waiting for XAUI/XGXS reset\n"); | 85 | "timed out waiting for XAUI/XGXS reset\n"); |
86 | return -ETIMEDOUT; | 86 | return -ETIMEDOUT; |
87 | } | 87 | } |
88 | 88 | ||
89 | static void falcon_ack_status_intr(struct efx_nic *efx) | 89 | static void falcon_ack_status_intr(struct efx_nic *efx) |
90 | { | 90 | { |
91 | efx_oword_t reg; | 91 | efx_oword_t reg; |
92 | 92 | ||
93 | if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx)) | 93 | if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx)) |
94 | return; | 94 | return; |
95 | 95 | ||
96 | /* We expect xgmii faults if the wireside link is down */ | 96 | /* We expect xgmii faults if the wireside link is down */ |
97 | if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up) | 97 | if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up) |
98 | return; | 98 | return; |
99 | 99 | ||
100 | /* We can only use this interrupt to signal the negative edge of | 100 | /* We can only use this interrupt to signal the negative edge of |
101 | * xaui_align [we have to poll the positive edge]. */ | 101 | * xaui_align [we have to poll the positive edge]. */ |
102 | if (efx->xmac_poll_required) | 102 | if (efx->xmac_poll_required) |
103 | return; | 103 | return; |
104 | 104 | ||
105 | efx_reado(efx, ®, FR_AB_XM_MGT_INT_MSK); | 105 | efx_reado(efx, ®, FR_AB_XM_MGT_INT_MSK); |
106 | } | 106 | } |
107 | 107 | ||
108 | static bool falcon_xgxs_link_ok(struct efx_nic *efx) | 108 | static bool falcon_xgxs_link_ok(struct efx_nic *efx) |
109 | { | 109 | { |
110 | efx_oword_t reg; | 110 | efx_oword_t reg; |
111 | bool align_done, link_ok = false; | 111 | bool align_done, link_ok = false; |
112 | int sync_status; | 112 | int sync_status; |
113 | 113 | ||
114 | /* Read link status */ | 114 | /* Read link status */ |
115 | efx_reado(efx, ®, FR_AB_XX_CORE_STAT); | 115 | efx_reado(efx, ®, FR_AB_XX_CORE_STAT); |
116 | 116 | ||
117 | align_done = EFX_OWORD_FIELD(reg, FRF_AB_XX_ALIGN_DONE); | 117 | align_done = EFX_OWORD_FIELD(reg, FRF_AB_XX_ALIGN_DONE); |
118 | sync_status = EFX_OWORD_FIELD(reg, FRF_AB_XX_SYNC_STAT); | 118 | sync_status = EFX_OWORD_FIELD(reg, FRF_AB_XX_SYNC_STAT); |
119 | if (align_done && (sync_status == FFE_AB_XX_STAT_ALL_LANES)) | 119 | if (align_done && (sync_status == FFE_AB_XX_STAT_ALL_LANES)) |
120 | link_ok = true; | 120 | link_ok = true; |
121 | 121 | ||
122 | /* Clear link status ready for next read */ | 122 | /* Clear link status ready for next read */ |
123 | EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_COMMA_DET, FFE_AB_XX_STAT_ALL_LANES); | 123 | EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_COMMA_DET, FFE_AB_XX_STAT_ALL_LANES); |
124 | EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_CHAR_ERR, FFE_AB_XX_STAT_ALL_LANES); | 124 | EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_CHAR_ERR, FFE_AB_XX_STAT_ALL_LANES); |
125 | EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES); | 125 | EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES); |
126 | efx_writeo(efx, ®, FR_AB_XX_CORE_STAT); | 126 | efx_writeo(efx, ®, FR_AB_XX_CORE_STAT); |
127 | 127 | ||
128 | return link_ok; | 128 | return link_ok; |
129 | } | 129 | } |
130 | 130 | ||
131 | static bool falcon_xmac_link_ok(struct efx_nic *efx) | 131 | static bool falcon_xmac_link_ok(struct efx_nic *efx) |
132 | { | 132 | { |
133 | /* | 133 | /* |
134 | * Check MAC's XGXS link status except when using XGMII loopback | 134 | * Check MAC's XGXS link status except when using XGMII loopback |
135 | * which bypasses the XGXS block. | 135 | * which bypasses the XGXS block. |
136 | * If possible, check PHY's XGXS link status except when using | 136 | * If possible, check PHY's XGXS link status except when using |
137 | * MAC loopback. | 137 | * MAC loopback. |
138 | */ | 138 | */ |
139 | return (efx->loopback_mode == LOOPBACK_XGMII || | 139 | return (efx->loopback_mode == LOOPBACK_XGMII || |
140 | falcon_xgxs_link_ok(efx)) && | 140 | falcon_xgxs_link_ok(efx)) && |
141 | (!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) || | 141 | (!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) || |
142 | LOOPBACK_INTERNAL(efx) || | 142 | LOOPBACK_INTERNAL(efx) || |
143 | efx_mdio_phyxgxs_lane_sync(efx)); | 143 | efx_mdio_phyxgxs_lane_sync(efx)); |
144 | } | 144 | } |
145 | 145 | ||
146 | void falcon_reconfigure_xmac_core(struct efx_nic *efx) | 146 | static void falcon_reconfigure_xmac_core(struct efx_nic *efx) |
147 | { | 147 | { |
148 | unsigned int max_frame_len; | 148 | unsigned int max_frame_len; |
149 | efx_oword_t reg; | 149 | efx_oword_t reg; |
150 | bool rx_fc = !!(efx->link_state.fc & EFX_FC_RX); | 150 | bool rx_fc = !!(efx->link_state.fc & EFX_FC_RX); |
151 | bool tx_fc = !!(efx->link_state.fc & EFX_FC_TX); | 151 | bool tx_fc = !!(efx->link_state.fc & EFX_FC_TX); |
152 | 152 | ||
153 | /* Configure MAC - cut-thru mode is hard wired on */ | 153 | /* Configure MAC - cut-thru mode is hard wired on */ |
154 | EFX_POPULATE_OWORD_3(reg, | 154 | EFX_POPULATE_OWORD_3(reg, |
155 | FRF_AB_XM_RX_JUMBO_MODE, 1, | 155 | FRF_AB_XM_RX_JUMBO_MODE, 1, |
156 | FRF_AB_XM_TX_STAT_EN, 1, | 156 | FRF_AB_XM_TX_STAT_EN, 1, |
157 | FRF_AB_XM_RX_STAT_EN, 1); | 157 | FRF_AB_XM_RX_STAT_EN, 1); |
158 | efx_writeo(efx, ®, FR_AB_XM_GLB_CFG); | 158 | efx_writeo(efx, ®, FR_AB_XM_GLB_CFG); |
159 | 159 | ||
160 | /* Configure TX */ | 160 | /* Configure TX */ |
161 | EFX_POPULATE_OWORD_6(reg, | 161 | EFX_POPULATE_OWORD_6(reg, |
162 | FRF_AB_XM_TXEN, 1, | 162 | FRF_AB_XM_TXEN, 1, |
163 | FRF_AB_XM_TX_PRMBL, 1, | 163 | FRF_AB_XM_TX_PRMBL, 1, |
164 | FRF_AB_XM_AUTO_PAD, 1, | 164 | FRF_AB_XM_AUTO_PAD, 1, |
165 | FRF_AB_XM_TXCRC, 1, | 165 | FRF_AB_XM_TXCRC, 1, |
166 | FRF_AB_XM_FCNTL, tx_fc, | 166 | FRF_AB_XM_FCNTL, tx_fc, |
167 | FRF_AB_XM_IPG, 0x3); | 167 | FRF_AB_XM_IPG, 0x3); |
168 | efx_writeo(efx, ®, FR_AB_XM_TX_CFG); | 168 | efx_writeo(efx, ®, FR_AB_XM_TX_CFG); |
169 | 169 | ||
170 | /* Configure RX */ | 170 | /* Configure RX */ |
171 | EFX_POPULATE_OWORD_5(reg, | 171 | EFX_POPULATE_OWORD_5(reg, |
172 | FRF_AB_XM_RXEN, 1, | 172 | FRF_AB_XM_RXEN, 1, |
173 | FRF_AB_XM_AUTO_DEPAD, 0, | 173 | FRF_AB_XM_AUTO_DEPAD, 0, |
174 | FRF_AB_XM_ACPT_ALL_MCAST, 1, | 174 | FRF_AB_XM_ACPT_ALL_MCAST, 1, |
175 | FRF_AB_XM_ACPT_ALL_UCAST, efx->promiscuous, | 175 | FRF_AB_XM_ACPT_ALL_UCAST, efx->promiscuous, |
176 | FRF_AB_XM_PASS_CRC_ERR, 1); | 176 | FRF_AB_XM_PASS_CRC_ERR, 1); |
177 | efx_writeo(efx, ®, FR_AB_XM_RX_CFG); | 177 | efx_writeo(efx, ®, FR_AB_XM_RX_CFG); |
178 | 178 | ||
179 | /* Set frame length */ | 179 | /* Set frame length */ |
180 | max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu); | 180 | max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu); |
181 | EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_MAX_RX_FRM_SIZE, max_frame_len); | 181 | EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_MAX_RX_FRM_SIZE, max_frame_len); |
182 | efx_writeo(efx, ®, FR_AB_XM_RX_PARAM); | 182 | efx_writeo(efx, ®, FR_AB_XM_RX_PARAM); |
183 | EFX_POPULATE_OWORD_2(reg, | 183 | EFX_POPULATE_OWORD_2(reg, |
184 | FRF_AB_XM_MAX_TX_FRM_SIZE, max_frame_len, | 184 | FRF_AB_XM_MAX_TX_FRM_SIZE, max_frame_len, |
185 | FRF_AB_XM_TX_JUMBO_MODE, 1); | 185 | FRF_AB_XM_TX_JUMBO_MODE, 1); |
186 | efx_writeo(efx, ®, FR_AB_XM_TX_PARAM); | 186 | efx_writeo(efx, ®, FR_AB_XM_TX_PARAM); |
187 | 187 | ||
188 | EFX_POPULATE_OWORD_2(reg, | 188 | EFX_POPULATE_OWORD_2(reg, |
189 | FRF_AB_XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */ | 189 | FRF_AB_XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */ |
190 | FRF_AB_XM_DIS_FCNTL, !rx_fc); | 190 | FRF_AB_XM_DIS_FCNTL, !rx_fc); |
191 | efx_writeo(efx, ®, FR_AB_XM_FC); | 191 | efx_writeo(efx, ®, FR_AB_XM_FC); |
192 | 192 | ||
193 | /* Set MAC address */ | 193 | /* Set MAC address */ |
194 | memcpy(®, &efx->net_dev->dev_addr[0], 4); | 194 | memcpy(®, &efx->net_dev->dev_addr[0], 4); |
195 | efx_writeo(efx, ®, FR_AB_XM_ADR_LO); | 195 | efx_writeo(efx, ®, FR_AB_XM_ADR_LO); |
196 | memcpy(®, &efx->net_dev->dev_addr[4], 2); | 196 | memcpy(®, &efx->net_dev->dev_addr[4], 2); |
197 | efx_writeo(efx, ®, FR_AB_XM_ADR_HI); | 197 | efx_writeo(efx, ®, FR_AB_XM_ADR_HI); |
198 | } | 198 | } |
199 | 199 | ||
200 | static void falcon_reconfigure_xgxs_core(struct efx_nic *efx) | 200 | static void falcon_reconfigure_xgxs_core(struct efx_nic *efx) |
201 | { | 201 | { |
202 | efx_oword_t reg; | 202 | efx_oword_t reg; |
203 | bool xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS); | 203 | bool xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS); |
204 | bool xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI); | 204 | bool xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI); |
205 | bool xgmii_loopback = (efx->loopback_mode == LOOPBACK_XGMII); | 205 | bool xgmii_loopback = (efx->loopback_mode == LOOPBACK_XGMII); |
206 | 206 | ||
207 | /* XGXS block is flaky and will need to be reset if moving | 207 | /* XGXS block is flaky and will need to be reset if moving |
208 | * into our out of XGMII, XGXS or XAUI loopbacks. */ | 208 | * into our out of XGMII, XGXS or XAUI loopbacks. */ |
209 | if (EFX_WORKAROUND_5147(efx)) { | 209 | if (EFX_WORKAROUND_5147(efx)) { |
210 | bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback; | 210 | bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback; |
211 | bool reset_xgxs; | 211 | bool reset_xgxs; |
212 | 212 | ||
213 | efx_reado(efx, ®, FR_AB_XX_CORE_STAT); | 213 | efx_reado(efx, ®, FR_AB_XX_CORE_STAT); |
214 | old_xgxs_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN); | 214 | old_xgxs_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN); |
215 | old_xgmii_loopback = | 215 | old_xgmii_loopback = |
216 | EFX_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN); | 216 | EFX_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN); |
217 | 217 | ||
218 | efx_reado(efx, ®, FR_AB_XX_SD_CTL); | 218 | efx_reado(efx, ®, FR_AB_XX_SD_CTL); |
219 | old_xaui_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_LPBKA); | 219 | old_xaui_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_LPBKA); |
220 | 220 | ||
221 | /* The PHY driver may have turned XAUI off */ | 221 | /* The PHY driver may have turned XAUI off */ |
222 | reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) || | 222 | reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) || |
223 | (xaui_loopback != old_xaui_loopback) || | 223 | (xaui_loopback != old_xaui_loopback) || |
224 | (xgmii_loopback != old_xgmii_loopback)); | 224 | (xgmii_loopback != old_xgmii_loopback)); |
225 | 225 | ||
226 | if (reset_xgxs) | 226 | if (reset_xgxs) |
227 | falcon_reset_xaui(efx); | 227 | falcon_reset_xaui(efx); |
228 | } | 228 | } |
229 | 229 | ||
230 | efx_reado(efx, ®, FR_AB_XX_CORE_STAT); | 230 | efx_reado(efx, ®, FR_AB_XX_CORE_STAT); |
231 | EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_FORCE_SIG, | 231 | EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_FORCE_SIG, |
232 | (xgxs_loopback || xaui_loopback) ? | 232 | (xgxs_loopback || xaui_loopback) ? |
233 | FFE_AB_XX_FORCE_SIG_ALL_LANES : 0); | 233 | FFE_AB_XX_FORCE_SIG_ALL_LANES : 0); |
234 | EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN, xgxs_loopback); | 234 | EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN, xgxs_loopback); |
235 | EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN, xgmii_loopback); | 235 | EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN, xgmii_loopback); |
236 | efx_writeo(efx, ®, FR_AB_XX_CORE_STAT); | 236 | efx_writeo(efx, ®, FR_AB_XX_CORE_STAT); |
237 | 237 | ||
238 | efx_reado(efx, ®, FR_AB_XX_SD_CTL); | 238 | efx_reado(efx, ®, FR_AB_XX_SD_CTL); |
239 | EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKD, xaui_loopback); | 239 | EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKD, xaui_loopback); |
240 | EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKC, xaui_loopback); | 240 | EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKC, xaui_loopback); |
241 | EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKB, xaui_loopback); | 241 | EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKB, xaui_loopback); |
242 | EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKA, xaui_loopback); | 242 | EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKA, xaui_loopback); |
243 | efx_writeo(efx, ®, FR_AB_XX_SD_CTL); | 243 | efx_writeo(efx, ®, FR_AB_XX_SD_CTL); |
244 | } | 244 | } |
245 | 245 | ||
246 | 246 | ||
247 | /* Try to bring up the Falcon side of the Falcon-Phy XAUI link */ | 247 | /* Try to bring up the Falcon side of the Falcon-Phy XAUI link */ |
248 | static bool falcon_xmac_link_ok_retry(struct efx_nic *efx, int tries) | 248 | static bool falcon_xmac_link_ok_retry(struct efx_nic *efx, int tries) |
249 | { | 249 | { |
250 | bool mac_up = falcon_xmac_link_ok(efx); | 250 | bool mac_up = falcon_xmac_link_ok(efx); |
251 | 251 | ||
252 | if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS || | 252 | if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS || |
253 | efx_phy_mode_disabled(efx->phy_mode)) | 253 | efx_phy_mode_disabled(efx->phy_mode)) |
254 | /* XAUI link is expected to be down */ | 254 | /* XAUI link is expected to be down */ |
255 | return mac_up; | 255 | return mac_up; |
256 | 256 | ||
257 | falcon_stop_nic_stats(efx); | 257 | falcon_stop_nic_stats(efx); |
258 | 258 | ||
259 | while (!mac_up && tries) { | 259 | while (!mac_up && tries) { |
260 | netif_dbg(efx, hw, efx->net_dev, "bashing xaui\n"); | 260 | netif_dbg(efx, hw, efx->net_dev, "bashing xaui\n"); |
261 | falcon_reset_xaui(efx); | 261 | falcon_reset_xaui(efx); |
262 | udelay(200); | 262 | udelay(200); |
263 | 263 | ||
264 | mac_up = falcon_xmac_link_ok(efx); | 264 | mac_up = falcon_xmac_link_ok(efx); |
265 | --tries; | 265 | --tries; |
266 | } | 266 | } |
267 | 267 | ||
268 | falcon_start_nic_stats(efx); | 268 | falcon_start_nic_stats(efx); |
269 | 269 | ||
270 | return mac_up; | 270 | return mac_up; |
271 | } | 271 | } |
272 | 272 | ||
273 | static bool falcon_xmac_check_fault(struct efx_nic *efx) | 273 | static bool falcon_xmac_check_fault(struct efx_nic *efx) |
274 | { | 274 | { |
275 | return !falcon_xmac_link_ok_retry(efx, 5); | 275 | return !falcon_xmac_link_ok_retry(efx, 5); |
276 | } | 276 | } |
277 | 277 | ||
278 | static int falcon_reconfigure_xmac(struct efx_nic *efx) | 278 | static int falcon_reconfigure_xmac(struct efx_nic *efx) |
279 | { | 279 | { |
280 | falcon_reconfigure_xgxs_core(efx); | 280 | falcon_reconfigure_xgxs_core(efx); |
281 | falcon_reconfigure_xmac_core(efx); | 281 | falcon_reconfigure_xmac_core(efx); |
282 | 282 | ||
283 | falcon_reconfigure_mac_wrapper(efx); | 283 | falcon_reconfigure_mac_wrapper(efx); |
284 | 284 | ||
285 | efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5); | 285 | efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5); |
286 | falcon_ack_status_intr(efx); | 286 | falcon_ack_status_intr(efx); |
287 | 287 | ||
288 | return 0; | 288 | return 0; |
289 | } | 289 | } |
290 | 290 | ||
291 | static void falcon_update_stats_xmac(struct efx_nic *efx) | 291 | static void falcon_update_stats_xmac(struct efx_nic *efx) |
292 | { | 292 | { |
293 | struct efx_mac_stats *mac_stats = &efx->mac_stats; | 293 | struct efx_mac_stats *mac_stats = &efx->mac_stats; |
294 | 294 | ||
295 | /* Update MAC stats from DMAed values */ | 295 | /* Update MAC stats from DMAed values */ |
296 | FALCON_STAT(efx, XgRxOctets, rx_bytes); | 296 | FALCON_STAT(efx, XgRxOctets, rx_bytes); |
297 | FALCON_STAT(efx, XgRxOctetsOK, rx_good_bytes); | 297 | FALCON_STAT(efx, XgRxOctetsOK, rx_good_bytes); |
298 | FALCON_STAT(efx, XgRxPkts, rx_packets); | 298 | FALCON_STAT(efx, XgRxPkts, rx_packets); |
299 | FALCON_STAT(efx, XgRxPktsOK, rx_good); | 299 | FALCON_STAT(efx, XgRxPktsOK, rx_good); |
300 | FALCON_STAT(efx, XgRxBroadcastPkts, rx_broadcast); | 300 | FALCON_STAT(efx, XgRxBroadcastPkts, rx_broadcast); |
301 | FALCON_STAT(efx, XgRxMulticastPkts, rx_multicast); | 301 | FALCON_STAT(efx, XgRxMulticastPkts, rx_multicast); |
302 | FALCON_STAT(efx, XgRxUnicastPkts, rx_unicast); | 302 | FALCON_STAT(efx, XgRxUnicastPkts, rx_unicast); |
303 | FALCON_STAT(efx, XgRxUndersizePkts, rx_lt64); | 303 | FALCON_STAT(efx, XgRxUndersizePkts, rx_lt64); |
304 | FALCON_STAT(efx, XgRxOversizePkts, rx_gtjumbo); | 304 | FALCON_STAT(efx, XgRxOversizePkts, rx_gtjumbo); |
305 | FALCON_STAT(efx, XgRxJabberPkts, rx_bad_gtjumbo); | 305 | FALCON_STAT(efx, XgRxJabberPkts, rx_bad_gtjumbo); |
306 | FALCON_STAT(efx, XgRxUndersizeFCSerrorPkts, rx_bad_lt64); | 306 | FALCON_STAT(efx, XgRxUndersizeFCSerrorPkts, rx_bad_lt64); |
307 | FALCON_STAT(efx, XgRxDropEvents, rx_overflow); | 307 | FALCON_STAT(efx, XgRxDropEvents, rx_overflow); |
308 | FALCON_STAT(efx, XgRxFCSerrorPkts, rx_bad); | 308 | FALCON_STAT(efx, XgRxFCSerrorPkts, rx_bad); |
309 | FALCON_STAT(efx, XgRxAlignError, rx_align_error); | 309 | FALCON_STAT(efx, XgRxAlignError, rx_align_error); |
310 | FALCON_STAT(efx, XgRxSymbolError, rx_symbol_error); | 310 | FALCON_STAT(efx, XgRxSymbolError, rx_symbol_error); |
311 | FALCON_STAT(efx, XgRxInternalMACError, rx_internal_error); | 311 | FALCON_STAT(efx, XgRxInternalMACError, rx_internal_error); |
312 | FALCON_STAT(efx, XgRxControlPkts, rx_control); | 312 | FALCON_STAT(efx, XgRxControlPkts, rx_control); |
313 | FALCON_STAT(efx, XgRxPausePkts, rx_pause); | 313 | FALCON_STAT(efx, XgRxPausePkts, rx_pause); |
314 | FALCON_STAT(efx, XgRxPkts64Octets, rx_64); | 314 | FALCON_STAT(efx, XgRxPkts64Octets, rx_64); |
315 | FALCON_STAT(efx, XgRxPkts65to127Octets, rx_65_to_127); | 315 | FALCON_STAT(efx, XgRxPkts65to127Octets, rx_65_to_127); |
316 | FALCON_STAT(efx, XgRxPkts128to255Octets, rx_128_to_255); | 316 | FALCON_STAT(efx, XgRxPkts128to255Octets, rx_128_to_255); |
317 | FALCON_STAT(efx, XgRxPkts256to511Octets, rx_256_to_511); | 317 | FALCON_STAT(efx, XgRxPkts256to511Octets, rx_256_to_511); |
318 | FALCON_STAT(efx, XgRxPkts512to1023Octets, rx_512_to_1023); | 318 | FALCON_STAT(efx, XgRxPkts512to1023Octets, rx_512_to_1023); |
319 | FALCON_STAT(efx, XgRxPkts1024to15xxOctets, rx_1024_to_15xx); | 319 | FALCON_STAT(efx, XgRxPkts1024to15xxOctets, rx_1024_to_15xx); |
320 | FALCON_STAT(efx, XgRxPkts15xxtoMaxOctets, rx_15xx_to_jumbo); | 320 | FALCON_STAT(efx, XgRxPkts15xxtoMaxOctets, rx_15xx_to_jumbo); |
321 | FALCON_STAT(efx, XgRxLengthError, rx_length_error); | 321 | FALCON_STAT(efx, XgRxLengthError, rx_length_error); |
322 | FALCON_STAT(efx, XgTxPkts, tx_packets); | 322 | FALCON_STAT(efx, XgTxPkts, tx_packets); |
323 | FALCON_STAT(efx, XgTxOctets, tx_bytes); | 323 | FALCON_STAT(efx, XgTxOctets, tx_bytes); |
324 | FALCON_STAT(efx, XgTxMulticastPkts, tx_multicast); | 324 | FALCON_STAT(efx, XgTxMulticastPkts, tx_multicast); |
325 | FALCON_STAT(efx, XgTxBroadcastPkts, tx_broadcast); | 325 | FALCON_STAT(efx, XgTxBroadcastPkts, tx_broadcast); |
326 | FALCON_STAT(efx, XgTxUnicastPkts, tx_unicast); | 326 | FALCON_STAT(efx, XgTxUnicastPkts, tx_unicast); |
327 | FALCON_STAT(efx, XgTxControlPkts, tx_control); | 327 | FALCON_STAT(efx, XgTxControlPkts, tx_control); |
328 | FALCON_STAT(efx, XgTxPausePkts, tx_pause); | 328 | FALCON_STAT(efx, XgTxPausePkts, tx_pause); |
329 | FALCON_STAT(efx, XgTxPkts64Octets, tx_64); | 329 | FALCON_STAT(efx, XgTxPkts64Octets, tx_64); |
330 | FALCON_STAT(efx, XgTxPkts65to127Octets, tx_65_to_127); | 330 | FALCON_STAT(efx, XgTxPkts65to127Octets, tx_65_to_127); |
331 | FALCON_STAT(efx, XgTxPkts128to255Octets, tx_128_to_255); | 331 | FALCON_STAT(efx, XgTxPkts128to255Octets, tx_128_to_255); |
332 | FALCON_STAT(efx, XgTxPkts256to511Octets, tx_256_to_511); | 332 | FALCON_STAT(efx, XgTxPkts256to511Octets, tx_256_to_511); |
333 | FALCON_STAT(efx, XgTxPkts512to1023Octets, tx_512_to_1023); | 333 | FALCON_STAT(efx, XgTxPkts512to1023Octets, tx_512_to_1023); |
334 | FALCON_STAT(efx, XgTxPkts1024to15xxOctets, tx_1024_to_15xx); | 334 | FALCON_STAT(efx, XgTxPkts1024to15xxOctets, tx_1024_to_15xx); |
335 | FALCON_STAT(efx, XgTxPkts1519toMaxOctets, tx_15xx_to_jumbo); | 335 | FALCON_STAT(efx, XgTxPkts1519toMaxOctets, tx_15xx_to_jumbo); |
336 | FALCON_STAT(efx, XgTxUndersizePkts, tx_lt64); | 336 | FALCON_STAT(efx, XgTxUndersizePkts, tx_lt64); |
337 | FALCON_STAT(efx, XgTxOversizePkts, tx_gtjumbo); | 337 | FALCON_STAT(efx, XgTxOversizePkts, tx_gtjumbo); |
338 | FALCON_STAT(efx, XgTxNonTcpUdpPkt, tx_non_tcpudp); | 338 | FALCON_STAT(efx, XgTxNonTcpUdpPkt, tx_non_tcpudp); |
339 | FALCON_STAT(efx, XgTxMacSrcErrPkt, tx_mac_src_error); | 339 | FALCON_STAT(efx, XgTxMacSrcErrPkt, tx_mac_src_error); |
340 | FALCON_STAT(efx, XgTxIpSrcErrPkt, tx_ip_src_error); | 340 | FALCON_STAT(efx, XgTxIpSrcErrPkt, tx_ip_src_error); |
341 | 341 | ||
342 | /* Update derived statistics */ | 342 | /* Update derived statistics */ |
343 | mac_stats->tx_good_bytes = | 343 | mac_stats->tx_good_bytes = |
344 | (mac_stats->tx_bytes - mac_stats->tx_bad_bytes - | 344 | (mac_stats->tx_bytes - mac_stats->tx_bad_bytes - |
345 | mac_stats->tx_control * 64); | 345 | mac_stats->tx_control * 64); |
346 | mac_stats->rx_bad_bytes = | 346 | mac_stats->rx_bad_bytes = |
347 | (mac_stats->rx_bytes - mac_stats->rx_good_bytes - | 347 | (mac_stats->rx_bytes - mac_stats->rx_good_bytes - |
348 | mac_stats->rx_control * 64); | 348 | mac_stats->rx_control * 64); |
349 | } | 349 | } |
350 | 350 | ||
351 | void falcon_poll_xmac(struct efx_nic *efx) | 351 | void falcon_poll_xmac(struct efx_nic *efx) |
352 | { | 352 | { |
353 | if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up || | 353 | if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up || |
354 | !efx->xmac_poll_required) | 354 | !efx->xmac_poll_required) |
355 | return; | 355 | return; |
356 | 356 | ||
357 | efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1); | 357 | efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1); |
358 | falcon_ack_status_intr(efx); | 358 | falcon_ack_status_intr(efx); |
359 | } | 359 | } |
360 | 360 | ||
361 | struct efx_mac_operations falcon_xmac_operations = { | 361 | struct efx_mac_operations falcon_xmac_operations = { |
362 | .reconfigure = falcon_reconfigure_xmac, | 362 | .reconfigure = falcon_reconfigure_xmac, |
363 | .update_stats = falcon_update_stats_xmac, | 363 | .update_stats = falcon_update_stats_xmac, |
364 | .check_fault = falcon_xmac_check_fault, | 364 | .check_fault = falcon_xmac_check_fault, |
365 | }; | 365 | }; |
366 | 366 |
drivers/net/sfc/mac.h
1 | /**************************************************************************** | 1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | 2 | * Driver for Solarflare Solarstorm network controllers and boards |
3 | * Copyright 2005-2006 Fen Systems Ltd. | 3 | * Copyright 2005-2006 Fen Systems Ltd. |
4 | * Copyright 2006-2009 Solarflare Communications Inc. | 4 | * Copyright 2006-2009 Solarflare Communications Inc. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License version 2 as published | 7 | * under the terms of the GNU General Public License version 2 as published |
8 | * by the Free Software Foundation, incorporated herein by reference. | 8 | * by the Free Software Foundation, incorporated herein by reference. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #ifndef EFX_MAC_H | 11 | #ifndef EFX_MAC_H |
12 | #define EFX_MAC_H | 12 | #define EFX_MAC_H |
13 | 13 | ||
14 | #include "net_driver.h" | 14 | #include "net_driver.h" |
15 | 15 | ||
16 | extern struct efx_mac_operations falcon_xmac_operations; | 16 | extern struct efx_mac_operations falcon_xmac_operations; |
17 | extern struct efx_mac_operations efx_mcdi_mac_operations; | 17 | extern struct efx_mac_operations efx_mcdi_mac_operations; |
18 | extern void falcon_reconfigure_xmac_core(struct efx_nic *efx); | ||
19 | extern int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr, | 18 | extern int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr, |
20 | u32 dma_len, int enable, int clear); | 19 | u32 dma_len, int enable, int clear); |
21 | 20 | ||
22 | #endif | 21 | #endif |
23 | 22 |
drivers/net/sfc/mcdi.c
1 | /**************************************************************************** | 1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | 2 | * Driver for Solarflare Solarstorm network controllers and boards |
3 | * Copyright 2008-2009 Solarflare Communications Inc. | 3 | * Copyright 2008-2009 Solarflare Communications Inc. |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify it | 5 | * This program is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 as published | 6 | * under the terms of the GNU General Public License version 2 as published |
7 | * by the Free Software Foundation, incorporated herein by reference. | 7 | * by the Free Software Foundation, incorporated herein by reference. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/delay.h> | 10 | #include <linux/delay.h> |
11 | #include "net_driver.h" | 11 | #include "net_driver.h" |
12 | #include "nic.h" | 12 | #include "nic.h" |
13 | #include "io.h" | 13 | #include "io.h" |
14 | #include "regs.h" | 14 | #include "regs.h" |
15 | #include "mcdi_pcol.h" | 15 | #include "mcdi_pcol.h" |
16 | #include "phy.h" | 16 | #include "phy.h" |
17 | 17 | ||
18 | /************************************************************************** | 18 | /************************************************************************** |
19 | * | 19 | * |
20 | * Management-Controller-to-Driver Interface | 20 | * Management-Controller-to-Driver Interface |
21 | * | 21 | * |
22 | ************************************************************************** | 22 | ************************************************************************** |
23 | */ | 23 | */ |
24 | 24 | ||
25 | /* Software-defined structure to the shared-memory */ | 25 | /* Software-defined structure to the shared-memory */ |
26 | #define CMD_NOTIFY_PORT0 0 | 26 | #define CMD_NOTIFY_PORT0 0 |
27 | #define CMD_NOTIFY_PORT1 4 | 27 | #define CMD_NOTIFY_PORT1 4 |
28 | #define CMD_PDU_PORT0 0x008 | 28 | #define CMD_PDU_PORT0 0x008 |
29 | #define CMD_PDU_PORT1 0x108 | 29 | #define CMD_PDU_PORT1 0x108 |
30 | #define REBOOT_FLAG_PORT0 0x3f8 | 30 | #define REBOOT_FLAG_PORT0 0x3f8 |
31 | #define REBOOT_FLAG_PORT1 0x3fc | 31 | #define REBOOT_FLAG_PORT1 0x3fc |
32 | 32 | ||
33 | #define MCDI_RPC_TIMEOUT 10 /*seconds */ | 33 | #define MCDI_RPC_TIMEOUT 10 /*seconds */ |
34 | 34 | ||
35 | #define MCDI_PDU(efx) \ | 35 | #define MCDI_PDU(efx) \ |
36 | (efx_port_num(efx) ? CMD_PDU_PORT1 : CMD_PDU_PORT0) | 36 | (efx_port_num(efx) ? CMD_PDU_PORT1 : CMD_PDU_PORT0) |
37 | #define MCDI_DOORBELL(efx) \ | 37 | #define MCDI_DOORBELL(efx) \ |
38 | (efx_port_num(efx) ? CMD_NOTIFY_PORT1 : CMD_NOTIFY_PORT0) | 38 | (efx_port_num(efx) ? CMD_NOTIFY_PORT1 : CMD_NOTIFY_PORT0) |
39 | #define MCDI_REBOOT_FLAG(efx) \ | 39 | #define MCDI_REBOOT_FLAG(efx) \ |
40 | (efx_port_num(efx) ? REBOOT_FLAG_PORT1 : REBOOT_FLAG_PORT0) | 40 | (efx_port_num(efx) ? REBOOT_FLAG_PORT1 : REBOOT_FLAG_PORT0) |
41 | 41 | ||
42 | #define SEQ_MASK \ | 42 | #define SEQ_MASK \ |
43 | EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ)) | 43 | EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ)) |
44 | 44 | ||
45 | static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) | 45 | static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) |
46 | { | 46 | { |
47 | struct siena_nic_data *nic_data; | 47 | struct siena_nic_data *nic_data; |
48 | EFX_BUG_ON_PARANOID(efx_nic_rev(efx) < EFX_REV_SIENA_A0); | 48 | EFX_BUG_ON_PARANOID(efx_nic_rev(efx) < EFX_REV_SIENA_A0); |
49 | nic_data = efx->nic_data; | 49 | nic_data = efx->nic_data; |
50 | return &nic_data->mcdi; | 50 | return &nic_data->mcdi; |
51 | } | 51 | } |
52 | 52 | ||
53 | void efx_mcdi_init(struct efx_nic *efx) | 53 | void efx_mcdi_init(struct efx_nic *efx) |
54 | { | 54 | { |
55 | struct efx_mcdi_iface *mcdi; | 55 | struct efx_mcdi_iface *mcdi; |
56 | 56 | ||
57 | if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) | 57 | if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) |
58 | return; | 58 | return; |
59 | 59 | ||
60 | mcdi = efx_mcdi(efx); | 60 | mcdi = efx_mcdi(efx); |
61 | init_waitqueue_head(&mcdi->wq); | 61 | init_waitqueue_head(&mcdi->wq); |
62 | spin_lock_init(&mcdi->iface_lock); | 62 | spin_lock_init(&mcdi->iface_lock); |
63 | atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT); | 63 | atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT); |
64 | mcdi->mode = MCDI_MODE_POLL; | 64 | mcdi->mode = MCDI_MODE_POLL; |
65 | 65 | ||
66 | (void) efx_mcdi_poll_reboot(efx); | 66 | (void) efx_mcdi_poll_reboot(efx); |
67 | } | 67 | } |
68 | 68 | ||
69 | static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, | 69 | static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, |
70 | const u8 *inbuf, size_t inlen) | 70 | const u8 *inbuf, size_t inlen) |
71 | { | 71 | { |
72 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | 72 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); |
73 | unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); | 73 | unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); |
74 | unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx); | 74 | unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx); |
75 | unsigned int i; | 75 | unsigned int i; |
76 | efx_dword_t hdr; | 76 | efx_dword_t hdr; |
77 | u32 xflags, seqno; | 77 | u32 xflags, seqno; |
78 | 78 | ||
79 | BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); | 79 | BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); |
80 | BUG_ON(inlen & 3 || inlen >= 0x100); | 80 | BUG_ON(inlen & 3 || inlen >= 0x100); |
81 | 81 | ||
82 | seqno = mcdi->seqno & SEQ_MASK; | 82 | seqno = mcdi->seqno & SEQ_MASK; |
83 | xflags = 0; | 83 | xflags = 0; |
84 | if (mcdi->mode == MCDI_MODE_EVENTS) | 84 | if (mcdi->mode == MCDI_MODE_EVENTS) |
85 | xflags |= MCDI_HEADER_XFLAGS_EVREQ; | 85 | xflags |= MCDI_HEADER_XFLAGS_EVREQ; |
86 | 86 | ||
87 | EFX_POPULATE_DWORD_6(hdr, | 87 | EFX_POPULATE_DWORD_6(hdr, |
88 | MCDI_HEADER_RESPONSE, 0, | 88 | MCDI_HEADER_RESPONSE, 0, |
89 | MCDI_HEADER_RESYNC, 1, | 89 | MCDI_HEADER_RESYNC, 1, |
90 | MCDI_HEADER_CODE, cmd, | 90 | MCDI_HEADER_CODE, cmd, |
91 | MCDI_HEADER_DATALEN, inlen, | 91 | MCDI_HEADER_DATALEN, inlen, |
92 | MCDI_HEADER_SEQ, seqno, | 92 | MCDI_HEADER_SEQ, seqno, |
93 | MCDI_HEADER_XFLAGS, xflags); | 93 | MCDI_HEADER_XFLAGS, xflags); |
94 | 94 | ||
95 | efx_writed(efx, &hdr, pdu); | 95 | efx_writed(efx, &hdr, pdu); |
96 | 96 | ||
97 | for (i = 0; i < inlen; i += 4) | 97 | for (i = 0; i < inlen; i += 4) |
98 | _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i); | 98 | _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i); |
99 | 99 | ||
100 | /* Ensure the payload is written out before the header */ | 100 | /* Ensure the payload is written out before the header */ |
101 | wmb(); | 101 | wmb(); |
102 | 102 | ||
103 | /* ring the doorbell with a distinctive value */ | 103 | /* ring the doorbell with a distinctive value */ |
104 | _efx_writed(efx, (__force __le32) 0x45789abc, doorbell); | 104 | _efx_writed(efx, (__force __le32) 0x45789abc, doorbell); |
105 | } | 105 | } |
106 | 106 | ||
107 | static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen) | 107 | static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen) |
108 | { | 108 | { |
109 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | 109 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); |
110 | unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); | 110 | unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); |
111 | int i; | 111 | int i; |
112 | 112 | ||
113 | BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); | 113 | BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); |
114 | BUG_ON(outlen & 3 || outlen >= 0x100); | 114 | BUG_ON(outlen & 3 || outlen >= 0x100); |
115 | 115 | ||
116 | for (i = 0; i < outlen; i += 4) | 116 | for (i = 0; i < outlen; i += 4) |
117 | *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i); | 117 | *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i); |
118 | } | 118 | } |
119 | 119 | ||
120 | static int efx_mcdi_poll(struct efx_nic *efx) | 120 | static int efx_mcdi_poll(struct efx_nic *efx) |
121 | { | 121 | { |
122 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | 122 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); |
123 | unsigned int time, finish; | 123 | unsigned int time, finish; |
124 | unsigned int respseq, respcmd, error; | 124 | unsigned int respseq, respcmd, error; |
125 | unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); | 125 | unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); |
126 | unsigned int rc, spins; | 126 | unsigned int rc, spins; |
127 | efx_dword_t reg; | 127 | efx_dword_t reg; |
128 | 128 | ||
129 | /* Check for a reboot atomically with respect to efx_mcdi_copyout() */ | 129 | /* Check for a reboot atomically with respect to efx_mcdi_copyout() */ |
130 | rc = -efx_mcdi_poll_reboot(efx); | 130 | rc = -efx_mcdi_poll_reboot(efx); |
131 | if (rc) | 131 | if (rc) |
132 | goto out; | 132 | goto out; |
133 | 133 | ||
134 | /* Poll for completion. Poll quickly (once a us) for the 1st jiffy, | 134 | /* Poll for completion. Poll quickly (once a us) for the 1st jiffy, |
135 | * because generally mcdi responses are fast. After that, back off | 135 | * because generally mcdi responses are fast. After that, back off |
136 | * and poll once a jiffy (approximately) | 136 | * and poll once a jiffy (approximately) |
137 | */ | 137 | */ |
138 | spins = TICK_USEC; | 138 | spins = TICK_USEC; |
139 | finish = get_seconds() + MCDI_RPC_TIMEOUT; | 139 | finish = get_seconds() + MCDI_RPC_TIMEOUT; |
140 | 140 | ||
141 | while (1) { | 141 | while (1) { |
142 | if (spins != 0) { | 142 | if (spins != 0) { |
143 | --spins; | 143 | --spins; |
144 | udelay(1); | 144 | udelay(1); |
145 | } else { | 145 | } else { |
146 | schedule_timeout_uninterruptible(1); | 146 | schedule_timeout_uninterruptible(1); |
147 | } | 147 | } |
148 | 148 | ||
149 | time = get_seconds(); | 149 | time = get_seconds(); |
150 | 150 | ||
151 | rmb(); | 151 | rmb(); |
152 | efx_readd(efx, ®, pdu); | 152 | efx_readd(efx, ®, pdu); |
153 | 153 | ||
154 | /* All 1's indicates that shared memory is in reset (and is | 154 | /* All 1's indicates that shared memory is in reset (and is |
155 | * not a valid header). Wait for it to come out reset before | 155 | * not a valid header). Wait for it to come out reset before |
156 | * completing the command */ | 156 | * completing the command */ |
157 | if (EFX_DWORD_FIELD(reg, EFX_DWORD_0) != 0xffffffff && | 157 | if (EFX_DWORD_FIELD(reg, EFX_DWORD_0) != 0xffffffff && |
158 | EFX_DWORD_FIELD(reg, MCDI_HEADER_RESPONSE)) | 158 | EFX_DWORD_FIELD(reg, MCDI_HEADER_RESPONSE)) |
159 | break; | 159 | break; |
160 | 160 | ||
161 | if (time >= finish) | 161 | if (time >= finish) |
162 | return -ETIMEDOUT; | 162 | return -ETIMEDOUT; |
163 | } | 163 | } |
164 | 164 | ||
165 | mcdi->resplen = EFX_DWORD_FIELD(reg, MCDI_HEADER_DATALEN); | 165 | mcdi->resplen = EFX_DWORD_FIELD(reg, MCDI_HEADER_DATALEN); |
166 | respseq = EFX_DWORD_FIELD(reg, MCDI_HEADER_SEQ); | 166 | respseq = EFX_DWORD_FIELD(reg, MCDI_HEADER_SEQ); |
167 | respcmd = EFX_DWORD_FIELD(reg, MCDI_HEADER_CODE); | 167 | respcmd = EFX_DWORD_FIELD(reg, MCDI_HEADER_CODE); |
168 | error = EFX_DWORD_FIELD(reg, MCDI_HEADER_ERROR); | 168 | error = EFX_DWORD_FIELD(reg, MCDI_HEADER_ERROR); |
169 | 169 | ||
170 | if (error && mcdi->resplen == 0) { | 170 | if (error && mcdi->resplen == 0) { |
171 | netif_err(efx, hw, efx->net_dev, "MC rebooted\n"); | 171 | netif_err(efx, hw, efx->net_dev, "MC rebooted\n"); |
172 | rc = EIO; | 172 | rc = EIO; |
173 | } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) { | 173 | } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) { |
174 | netif_err(efx, hw, efx->net_dev, | 174 | netif_err(efx, hw, efx->net_dev, |
175 | "MC response mismatch tx seq 0x%x rx seq 0x%x\n", | 175 | "MC response mismatch tx seq 0x%x rx seq 0x%x\n", |
176 | respseq, mcdi->seqno); | 176 | respseq, mcdi->seqno); |
177 | rc = EIO; | 177 | rc = EIO; |
178 | } else if (error) { | 178 | } else if (error) { |
179 | efx_readd(efx, ®, pdu + 4); | 179 | efx_readd(efx, ®, pdu + 4); |
180 | switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) { | 180 | switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) { |
181 | #define TRANSLATE_ERROR(name) \ | 181 | #define TRANSLATE_ERROR(name) \ |
182 | case MC_CMD_ERR_ ## name: \ | 182 | case MC_CMD_ERR_ ## name: \ |
183 | rc = name; \ | 183 | rc = name; \ |
184 | break | 184 | break |
185 | TRANSLATE_ERROR(ENOENT); | 185 | TRANSLATE_ERROR(ENOENT); |
186 | TRANSLATE_ERROR(EINTR); | 186 | TRANSLATE_ERROR(EINTR); |
187 | TRANSLATE_ERROR(EACCES); | 187 | TRANSLATE_ERROR(EACCES); |
188 | TRANSLATE_ERROR(EBUSY); | 188 | TRANSLATE_ERROR(EBUSY); |
189 | TRANSLATE_ERROR(EINVAL); | 189 | TRANSLATE_ERROR(EINVAL); |
190 | TRANSLATE_ERROR(EDEADLK); | 190 | TRANSLATE_ERROR(EDEADLK); |
191 | TRANSLATE_ERROR(ENOSYS); | 191 | TRANSLATE_ERROR(ENOSYS); |
192 | TRANSLATE_ERROR(ETIME); | 192 | TRANSLATE_ERROR(ETIME); |
193 | #undef TRANSLATE_ERROR | 193 | #undef TRANSLATE_ERROR |
194 | default: | 194 | default: |
195 | rc = EIO; | 195 | rc = EIO; |
196 | break; | 196 | break; |
197 | } | 197 | } |
198 | } else | 198 | } else |
199 | rc = 0; | 199 | rc = 0; |
200 | 200 | ||
201 | out: | 201 | out: |
202 | mcdi->resprc = rc; | 202 | mcdi->resprc = rc; |
203 | if (rc) | 203 | if (rc) |
204 | mcdi->resplen = 0; | 204 | mcdi->resplen = 0; |
205 | 205 | ||
206 | /* Return rc=0 like wait_event_timeout() */ | 206 | /* Return rc=0 like wait_event_timeout() */ |
207 | return 0; | 207 | return 0; |
208 | } | 208 | } |
209 | 209 | ||
210 | /* Test and clear MC-rebooted flag for this port/function */ | 210 | /* Test and clear MC-rebooted flag for this port/function */ |
211 | int efx_mcdi_poll_reboot(struct efx_nic *efx) | 211 | int efx_mcdi_poll_reboot(struct efx_nic *efx) |
212 | { | 212 | { |
213 | unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_REBOOT_FLAG(efx); | 213 | unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_REBOOT_FLAG(efx); |
214 | efx_dword_t reg; | 214 | efx_dword_t reg; |
215 | uint32_t value; | 215 | uint32_t value; |
216 | 216 | ||
217 | if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) | 217 | if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) |
218 | return false; | 218 | return false; |
219 | 219 | ||
220 | efx_readd(efx, ®, addr); | 220 | efx_readd(efx, ®, addr); |
221 | value = EFX_DWORD_FIELD(reg, EFX_DWORD_0); | 221 | value = EFX_DWORD_FIELD(reg, EFX_DWORD_0); |
222 | 222 | ||
223 | if (value == 0) | 223 | if (value == 0) |
224 | return 0; | 224 | return 0; |
225 | 225 | ||
226 | EFX_ZERO_DWORD(reg); | 226 | EFX_ZERO_DWORD(reg); |
227 | efx_writed(efx, ®, addr); | 227 | efx_writed(efx, ®, addr); |
228 | 228 | ||
229 | if (value == MC_STATUS_DWORD_ASSERT) | 229 | if (value == MC_STATUS_DWORD_ASSERT) |
230 | return -EINTR; | 230 | return -EINTR; |
231 | else | 231 | else |
232 | return -EIO; | 232 | return -EIO; |
233 | } | 233 | } |
234 | 234 | ||
235 | static void efx_mcdi_acquire(struct efx_mcdi_iface *mcdi) | 235 | static void efx_mcdi_acquire(struct efx_mcdi_iface *mcdi) |
236 | { | 236 | { |
237 | /* Wait until the interface becomes QUIESCENT and we win the race | 237 | /* Wait until the interface becomes QUIESCENT and we win the race |
238 | * to mark it RUNNING. */ | 238 | * to mark it RUNNING. */ |
239 | wait_event(mcdi->wq, | 239 | wait_event(mcdi->wq, |
240 | atomic_cmpxchg(&mcdi->state, | 240 | atomic_cmpxchg(&mcdi->state, |
241 | MCDI_STATE_QUIESCENT, | 241 | MCDI_STATE_QUIESCENT, |
242 | MCDI_STATE_RUNNING) | 242 | MCDI_STATE_RUNNING) |
243 | == MCDI_STATE_QUIESCENT); | 243 | == MCDI_STATE_QUIESCENT); |
244 | } | 244 | } |
245 | 245 | ||
246 | static int efx_mcdi_await_completion(struct efx_nic *efx) | 246 | static int efx_mcdi_await_completion(struct efx_nic *efx) |
247 | { | 247 | { |
248 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | 248 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); |
249 | 249 | ||
250 | if (wait_event_timeout( | 250 | if (wait_event_timeout( |
251 | mcdi->wq, | 251 | mcdi->wq, |
252 | atomic_read(&mcdi->state) == MCDI_STATE_COMPLETED, | 252 | atomic_read(&mcdi->state) == MCDI_STATE_COMPLETED, |
253 | msecs_to_jiffies(MCDI_RPC_TIMEOUT * 1000)) == 0) | 253 | msecs_to_jiffies(MCDI_RPC_TIMEOUT * 1000)) == 0) |
254 | return -ETIMEDOUT; | 254 | return -ETIMEDOUT; |
255 | 255 | ||
256 | /* Check if efx_mcdi_set_mode() switched us back to polled completions. | 256 | /* Check if efx_mcdi_set_mode() switched us back to polled completions. |
257 | * In which case, poll for completions directly. If efx_mcdi_ev_cpl() | 257 | * In which case, poll for completions directly. If efx_mcdi_ev_cpl() |
258 | * completed the request first, then we'll just end up completing the | 258 | * completed the request first, then we'll just end up completing the |
259 | * request again, which is safe. | 259 | * request again, which is safe. |
260 | * | 260 | * |
261 | * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which | 261 | * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which |
262 | * wait_event_timeout() implicitly provides. | 262 | * wait_event_timeout() implicitly provides. |
263 | */ | 263 | */ |
264 | if (mcdi->mode == MCDI_MODE_POLL) | 264 | if (mcdi->mode == MCDI_MODE_POLL) |
265 | return efx_mcdi_poll(efx); | 265 | return efx_mcdi_poll(efx); |
266 | 266 | ||
267 | return 0; | 267 | return 0; |
268 | } | 268 | } |
269 | 269 | ||
270 | static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi) | 270 | static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi) |
271 | { | 271 | { |
272 | /* If the interface is RUNNING, then move to COMPLETED and wake any | 272 | /* If the interface is RUNNING, then move to COMPLETED and wake any |
273 | * waiters. If the interface isn't in RUNNING then we've received a | 273 | * waiters. If the interface isn't in RUNNING then we've received a |
274 | * duplicate completion after we've already transitioned back to | 274 | * duplicate completion after we've already transitioned back to |
275 | * QUIESCENT. [A subsequent invocation would increment seqno, so would | 275 | * QUIESCENT. [A subsequent invocation would increment seqno, so would |
276 | * have failed the seqno check]. | 276 | * have failed the seqno check]. |
277 | */ | 277 | */ |
278 | if (atomic_cmpxchg(&mcdi->state, | 278 | if (atomic_cmpxchg(&mcdi->state, |
279 | MCDI_STATE_RUNNING, | 279 | MCDI_STATE_RUNNING, |
280 | MCDI_STATE_COMPLETED) == MCDI_STATE_RUNNING) { | 280 | MCDI_STATE_COMPLETED) == MCDI_STATE_RUNNING) { |
281 | wake_up(&mcdi->wq); | 281 | wake_up(&mcdi->wq); |
282 | return true; | 282 | return true; |
283 | } | 283 | } |
284 | 284 | ||
285 | return false; | 285 | return false; |
286 | } | 286 | } |
287 | 287 | ||
288 | static void efx_mcdi_release(struct efx_mcdi_iface *mcdi) | 288 | static void efx_mcdi_release(struct efx_mcdi_iface *mcdi) |
289 | { | 289 | { |
290 | atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT); | 290 | atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT); |
291 | wake_up(&mcdi->wq); | 291 | wake_up(&mcdi->wq); |
292 | } | 292 | } |
293 | 293 | ||
294 | static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno, | 294 | static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno, |
295 | unsigned int datalen, unsigned int errno) | 295 | unsigned int datalen, unsigned int errno) |
296 | { | 296 | { |
297 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | 297 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); |
298 | bool wake = false; | 298 | bool wake = false; |
299 | 299 | ||
300 | spin_lock(&mcdi->iface_lock); | 300 | spin_lock(&mcdi->iface_lock); |
301 | 301 | ||
302 | if ((seqno ^ mcdi->seqno) & SEQ_MASK) { | 302 | if ((seqno ^ mcdi->seqno) & SEQ_MASK) { |
303 | if (mcdi->credits) | 303 | if (mcdi->credits) |
304 | /* The request has been cancelled */ | 304 | /* The request has been cancelled */ |
305 | --mcdi->credits; | 305 | --mcdi->credits; |
306 | else | 306 | else |
307 | netif_err(efx, hw, efx->net_dev, | 307 | netif_err(efx, hw, efx->net_dev, |
308 | "MC response mismatch tx seq 0x%x rx " | 308 | "MC response mismatch tx seq 0x%x rx " |
309 | "seq 0x%x\n", seqno, mcdi->seqno); | 309 | "seq 0x%x\n", seqno, mcdi->seqno); |
310 | } else { | 310 | } else { |
311 | mcdi->resprc = errno; | 311 | mcdi->resprc = errno; |
312 | mcdi->resplen = datalen; | 312 | mcdi->resplen = datalen; |
313 | 313 | ||
314 | wake = true; | 314 | wake = true; |
315 | } | 315 | } |
316 | 316 | ||
317 | spin_unlock(&mcdi->iface_lock); | 317 | spin_unlock(&mcdi->iface_lock); |
318 | 318 | ||
319 | if (wake) | 319 | if (wake) |
320 | efx_mcdi_complete(mcdi); | 320 | efx_mcdi_complete(mcdi); |
321 | } | 321 | } |
322 | 322 | ||
323 | /* Issue the given command by writing the data into the shared memory PDU, | 323 | /* Issue the given command by writing the data into the shared memory PDU, |
324 | * ring the doorbell and wait for completion. Copyout the result. */ | 324 | * ring the doorbell and wait for completion. Copyout the result. */ |
325 | int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, | 325 | int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, |
326 | const u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen, | 326 | const u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen, |
327 | size_t *outlen_actual) | 327 | size_t *outlen_actual) |
328 | { | 328 | { |
329 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | 329 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); |
330 | int rc; | 330 | int rc; |
331 | BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0); | 331 | BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0); |
332 | 332 | ||
333 | efx_mcdi_acquire(mcdi); | 333 | efx_mcdi_acquire(mcdi); |
334 | 334 | ||
335 | /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */ | 335 | /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */ |
336 | spin_lock_bh(&mcdi->iface_lock); | 336 | spin_lock_bh(&mcdi->iface_lock); |
337 | ++mcdi->seqno; | 337 | ++mcdi->seqno; |
338 | spin_unlock_bh(&mcdi->iface_lock); | 338 | spin_unlock_bh(&mcdi->iface_lock); |
339 | 339 | ||
340 | efx_mcdi_copyin(efx, cmd, inbuf, inlen); | 340 | efx_mcdi_copyin(efx, cmd, inbuf, inlen); |
341 | 341 | ||
342 | if (mcdi->mode == MCDI_MODE_POLL) | 342 | if (mcdi->mode == MCDI_MODE_POLL) |
343 | rc = efx_mcdi_poll(efx); | 343 | rc = efx_mcdi_poll(efx); |
344 | else | 344 | else |
345 | rc = efx_mcdi_await_completion(efx); | 345 | rc = efx_mcdi_await_completion(efx); |
346 | 346 | ||
347 | if (rc != 0) { | 347 | if (rc != 0) { |
348 | /* Close the race with efx_mcdi_ev_cpl() executing just too late | 348 | /* Close the race with efx_mcdi_ev_cpl() executing just too late |
349 | * and completing a request we've just cancelled, by ensuring | 349 | * and completing a request we've just cancelled, by ensuring |
350 | * that the seqno check therein fails. | 350 | * that the seqno check therein fails. |
351 | */ | 351 | */ |
352 | spin_lock_bh(&mcdi->iface_lock); | 352 | spin_lock_bh(&mcdi->iface_lock); |
353 | ++mcdi->seqno; | 353 | ++mcdi->seqno; |
354 | ++mcdi->credits; | 354 | ++mcdi->credits; |
355 | spin_unlock_bh(&mcdi->iface_lock); | 355 | spin_unlock_bh(&mcdi->iface_lock); |
356 | 356 | ||
357 | netif_err(efx, hw, efx->net_dev, | 357 | netif_err(efx, hw, efx->net_dev, |
358 | "MC command 0x%x inlen %d mode %d timed out\n", | 358 | "MC command 0x%x inlen %d mode %d timed out\n", |
359 | cmd, (int)inlen, mcdi->mode); | 359 | cmd, (int)inlen, mcdi->mode); |
360 | } else { | 360 | } else { |
361 | size_t resplen; | 361 | size_t resplen; |
362 | 362 | ||
363 | /* At the very least we need a memory barrier here to ensure | 363 | /* At the very least we need a memory barrier here to ensure |
364 | * we pick up changes from efx_mcdi_ev_cpl(). Protect against | 364 | * we pick up changes from efx_mcdi_ev_cpl(). Protect against |
365 | * a spurious efx_mcdi_ev_cpl() running concurrently by | 365 | * a spurious efx_mcdi_ev_cpl() running concurrently by |
366 | * acquiring the iface_lock. */ | 366 | * acquiring the iface_lock. */ |
367 | spin_lock_bh(&mcdi->iface_lock); | 367 | spin_lock_bh(&mcdi->iface_lock); |
368 | rc = -mcdi->resprc; | 368 | rc = -mcdi->resprc; |
369 | resplen = mcdi->resplen; | 369 | resplen = mcdi->resplen; |
370 | spin_unlock_bh(&mcdi->iface_lock); | 370 | spin_unlock_bh(&mcdi->iface_lock); |
371 | 371 | ||
372 | if (rc == 0) { | 372 | if (rc == 0) { |
373 | efx_mcdi_copyout(efx, outbuf, | 373 | efx_mcdi_copyout(efx, outbuf, |
374 | min(outlen, mcdi->resplen + 3) & ~0x3); | 374 | min(outlen, mcdi->resplen + 3) & ~0x3); |
375 | if (outlen_actual != NULL) | 375 | if (outlen_actual != NULL) |
376 | *outlen_actual = resplen; | 376 | *outlen_actual = resplen; |
377 | } else if (cmd == MC_CMD_REBOOT && rc == -EIO) | 377 | } else if (cmd == MC_CMD_REBOOT && rc == -EIO) |
378 | ; /* Don't reset if MC_CMD_REBOOT returns EIO */ | 378 | ; /* Don't reset if MC_CMD_REBOOT returns EIO */ |
379 | else if (rc == -EIO || rc == -EINTR) { | 379 | else if (rc == -EIO || rc == -EINTR) { |
380 | netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n", | 380 | netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n", |
381 | -rc); | 381 | -rc); |
382 | efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); | 382 | efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); |
383 | } else | 383 | } else |
384 | netif_err(efx, hw, efx->net_dev, | 384 | netif_err(efx, hw, efx->net_dev, |
385 | "MC command 0x%x inlen %d failed rc=%d\n", | 385 | "MC command 0x%x inlen %d failed rc=%d\n", |
386 | cmd, (int)inlen, -rc); | 386 | cmd, (int)inlen, -rc); |
387 | } | 387 | } |
388 | 388 | ||
389 | efx_mcdi_release(mcdi); | 389 | efx_mcdi_release(mcdi); |
390 | return rc; | 390 | return rc; |
391 | } | 391 | } |
392 | 392 | ||
393 | void efx_mcdi_mode_poll(struct efx_nic *efx) | 393 | void efx_mcdi_mode_poll(struct efx_nic *efx) |
394 | { | 394 | { |
395 | struct efx_mcdi_iface *mcdi; | 395 | struct efx_mcdi_iface *mcdi; |
396 | 396 | ||
397 | if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) | 397 | if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) |
398 | return; | 398 | return; |
399 | 399 | ||
400 | mcdi = efx_mcdi(efx); | 400 | mcdi = efx_mcdi(efx); |
401 | if (mcdi->mode == MCDI_MODE_POLL) | 401 | if (mcdi->mode == MCDI_MODE_POLL) |
402 | return; | 402 | return; |
403 | 403 | ||
404 | /* We can switch from event completion to polled completion, because | 404 | /* We can switch from event completion to polled completion, because |
405 | * mcdi requests are always completed in shared memory. We do this by | 405 | * mcdi requests are always completed in shared memory. We do this by |
406 | * switching the mode to POLL'd then completing the request. | 406 | * switching the mode to POLL'd then completing the request. |
407 | * efx_mcdi_await_completion() will then call efx_mcdi_poll(). | 407 | * efx_mcdi_await_completion() will then call efx_mcdi_poll(). |
408 | * | 408 | * |
409 | * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(), | 409 | * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(), |
410 | * which efx_mcdi_complete() provides for us. | 410 | * which efx_mcdi_complete() provides for us. |
411 | */ | 411 | */ |
412 | mcdi->mode = MCDI_MODE_POLL; | 412 | mcdi->mode = MCDI_MODE_POLL; |
413 | 413 | ||
414 | efx_mcdi_complete(mcdi); | 414 | efx_mcdi_complete(mcdi); |
415 | } | 415 | } |
416 | 416 | ||
417 | void efx_mcdi_mode_event(struct efx_nic *efx) | 417 | void efx_mcdi_mode_event(struct efx_nic *efx) |
418 | { | 418 | { |
419 | struct efx_mcdi_iface *mcdi; | 419 | struct efx_mcdi_iface *mcdi; |
420 | 420 | ||
421 | if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) | 421 | if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) |
422 | return; | 422 | return; |
423 | 423 | ||
424 | mcdi = efx_mcdi(efx); | 424 | mcdi = efx_mcdi(efx); |
425 | 425 | ||
426 | if (mcdi->mode == MCDI_MODE_EVENTS) | 426 | if (mcdi->mode == MCDI_MODE_EVENTS) |
427 | return; | 427 | return; |
428 | 428 | ||
429 | /* We can't switch from polled to event completion in the middle of a | 429 | /* We can't switch from polled to event completion in the middle of a |
430 | * request, because the completion method is specified in the request. | 430 | * request, because the completion method is specified in the request. |
431 | * So acquire the interface to serialise the requestors. We don't need | 431 | * So acquire the interface to serialise the requestors. We don't need |
432 | * to acquire the iface_lock to change the mode here, but we do need a | 432 | * to acquire the iface_lock to change the mode here, but we do need a |
433 | * write memory barrier ensure that efx_mcdi_rpc() sees it, which | 433 | * write memory barrier ensure that efx_mcdi_rpc() sees it, which |
434 | * efx_mcdi_acquire() provides. | 434 | * efx_mcdi_acquire() provides. |
435 | */ | 435 | */ |
436 | efx_mcdi_acquire(mcdi); | 436 | efx_mcdi_acquire(mcdi); |
437 | mcdi->mode = MCDI_MODE_EVENTS; | 437 | mcdi->mode = MCDI_MODE_EVENTS; |
438 | efx_mcdi_release(mcdi); | 438 | efx_mcdi_release(mcdi); |
439 | } | 439 | } |
440 | 440 | ||
441 | static void efx_mcdi_ev_death(struct efx_nic *efx, int rc) | 441 | static void efx_mcdi_ev_death(struct efx_nic *efx, int rc) |
442 | { | 442 | { |
443 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | 443 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); |
444 | 444 | ||
445 | /* If there is an outstanding MCDI request, it has been terminated | 445 | /* If there is an outstanding MCDI request, it has been terminated |
446 | * either by a BADASSERT or REBOOT event. If the mcdi interface is | 446 | * either by a BADASSERT or REBOOT event. If the mcdi interface is |
447 | * in polled mode, then do nothing because the MC reboot handler will | 447 | * in polled mode, then do nothing because the MC reboot handler will |
448 | * set the header correctly. However, if the mcdi interface is waiting | 448 | * set the header correctly. However, if the mcdi interface is waiting |
449 | * for a CMDDONE event it won't receive it [and since all MCDI events | 449 | * for a CMDDONE event it won't receive it [and since all MCDI events |
450 | * are sent to the same queue, we can't be racing with | 450 | * are sent to the same queue, we can't be racing with |
451 | * efx_mcdi_ev_cpl()] | 451 | * efx_mcdi_ev_cpl()] |
452 | * | 452 | * |
453 | * There's a race here with efx_mcdi_rpc(), because we might receive | 453 | * There's a race here with efx_mcdi_rpc(), because we might receive |
454 | * a REBOOT event *before* the request has been copied out. In polled | 454 | * a REBOOT event *before* the request has been copied out. In polled |
455 | * mode (during startup) this is irrelevent, because efx_mcdi_complete() | 455 | * mode (during startup) this is irrelevent, because efx_mcdi_complete() |
456 | * is ignored. In event mode, this condition is just an edge-case of | 456 | * is ignored. In event mode, this condition is just an edge-case of |
457 | * receiving a REBOOT event after posting the MCDI request. Did the mc | 457 | * receiving a REBOOT event after posting the MCDI request. Did the mc |
458 | * reboot before or after the copyout? The best we can do always is | 458 | * reboot before or after the copyout? The best we can do always is |
459 | * just return failure. | 459 | * just return failure. |
460 | */ | 460 | */ |
461 | spin_lock(&mcdi->iface_lock); | 461 | spin_lock(&mcdi->iface_lock); |
462 | if (efx_mcdi_complete(mcdi)) { | 462 | if (efx_mcdi_complete(mcdi)) { |
463 | if (mcdi->mode == MCDI_MODE_EVENTS) { | 463 | if (mcdi->mode == MCDI_MODE_EVENTS) { |
464 | mcdi->resprc = rc; | 464 | mcdi->resprc = rc; |
465 | mcdi->resplen = 0; | 465 | mcdi->resplen = 0; |
466 | } | 466 | } |
467 | } else | 467 | } else |
468 | /* Nobody was waiting for an MCDI request, so trigger a reset */ | 468 | /* Nobody was waiting for an MCDI request, so trigger a reset */ |
469 | efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); | 469 | efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); |
470 | 470 | ||
471 | spin_unlock(&mcdi->iface_lock); | 471 | spin_unlock(&mcdi->iface_lock); |
472 | } | 472 | } |
473 | 473 | ||
474 | static unsigned int efx_mcdi_event_link_speed[] = { | 474 | static unsigned int efx_mcdi_event_link_speed[] = { |
475 | [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100, | 475 | [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100, |
476 | [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000, | 476 | [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000, |
477 | [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000, | 477 | [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000, |
478 | }; | 478 | }; |
479 | 479 | ||
480 | 480 | ||
481 | static void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev) | 481 | static void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev) |
482 | { | 482 | { |
483 | u32 flags, fcntl, speed, lpa; | 483 | u32 flags, fcntl, speed, lpa; |
484 | 484 | ||
485 | speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED); | 485 | speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED); |
486 | EFX_BUG_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed)); | 486 | EFX_BUG_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed)); |
487 | speed = efx_mcdi_event_link_speed[speed]; | 487 | speed = efx_mcdi_event_link_speed[speed]; |
488 | 488 | ||
489 | flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS); | 489 | flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS); |
490 | fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL); | 490 | fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL); |
491 | lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP); | 491 | lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP); |
492 | 492 | ||
493 | /* efx->link_state is only modified by efx_mcdi_phy_get_link(), | 493 | /* efx->link_state is only modified by efx_mcdi_phy_get_link(), |
494 | * which is only run after flushing the event queues. Therefore, it | 494 | * which is only run after flushing the event queues. Therefore, it |
495 | * is safe to modify the link state outside of the mac_lock here. | 495 | * is safe to modify the link state outside of the mac_lock here. |
496 | */ | 496 | */ |
497 | efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl); | 497 | efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl); |
498 | 498 | ||
499 | efx_mcdi_phy_check_fcntl(efx, lpa); | 499 | efx_mcdi_phy_check_fcntl(efx, lpa); |
500 | 500 | ||
501 | efx_link_status_changed(efx); | 501 | efx_link_status_changed(efx); |
502 | } | 502 | } |
503 | 503 | ||
504 | static const char *sensor_names[] = { | 504 | static const char *sensor_names[] = { |
505 | [MC_CMD_SENSOR_CONTROLLER_TEMP] = "Controller temp. sensor", | 505 | [MC_CMD_SENSOR_CONTROLLER_TEMP] = "Controller temp. sensor", |
506 | [MC_CMD_SENSOR_PHY_COMMON_TEMP] = "PHY shared temp. sensor", | 506 | [MC_CMD_SENSOR_PHY_COMMON_TEMP] = "PHY shared temp. sensor", |
507 | [MC_CMD_SENSOR_CONTROLLER_COOLING] = "Controller cooling", | 507 | [MC_CMD_SENSOR_CONTROLLER_COOLING] = "Controller cooling", |
508 | [MC_CMD_SENSOR_PHY0_TEMP] = "PHY 0 temp. sensor", | 508 | [MC_CMD_SENSOR_PHY0_TEMP] = "PHY 0 temp. sensor", |
509 | [MC_CMD_SENSOR_PHY0_COOLING] = "PHY 0 cooling", | 509 | [MC_CMD_SENSOR_PHY0_COOLING] = "PHY 0 cooling", |
510 | [MC_CMD_SENSOR_PHY1_TEMP] = "PHY 1 temp. sensor", | 510 | [MC_CMD_SENSOR_PHY1_TEMP] = "PHY 1 temp. sensor", |
511 | [MC_CMD_SENSOR_PHY1_COOLING] = "PHY 1 cooling", | 511 | [MC_CMD_SENSOR_PHY1_COOLING] = "PHY 1 cooling", |
512 | [MC_CMD_SENSOR_IN_1V0] = "1.0V supply sensor", | 512 | [MC_CMD_SENSOR_IN_1V0] = "1.0V supply sensor", |
513 | [MC_CMD_SENSOR_IN_1V2] = "1.2V supply sensor", | 513 | [MC_CMD_SENSOR_IN_1V2] = "1.2V supply sensor", |
514 | [MC_CMD_SENSOR_IN_1V8] = "1.8V supply sensor", | 514 | [MC_CMD_SENSOR_IN_1V8] = "1.8V supply sensor", |
515 | [MC_CMD_SENSOR_IN_2V5] = "2.5V supply sensor", | 515 | [MC_CMD_SENSOR_IN_2V5] = "2.5V supply sensor", |
516 | [MC_CMD_SENSOR_IN_3V3] = "3.3V supply sensor", | 516 | [MC_CMD_SENSOR_IN_3V3] = "3.3V supply sensor", |
517 | [MC_CMD_SENSOR_IN_12V0] = "12V supply sensor" | 517 | [MC_CMD_SENSOR_IN_12V0] = "12V supply sensor" |
518 | }; | 518 | }; |
519 | 519 | ||
520 | static const char *sensor_status_names[] = { | 520 | static const char *sensor_status_names[] = { |
521 | [MC_CMD_SENSOR_STATE_OK] = "OK", | 521 | [MC_CMD_SENSOR_STATE_OK] = "OK", |
522 | [MC_CMD_SENSOR_STATE_WARNING] = "Warning", | 522 | [MC_CMD_SENSOR_STATE_WARNING] = "Warning", |
523 | [MC_CMD_SENSOR_STATE_FATAL] = "Fatal", | 523 | [MC_CMD_SENSOR_STATE_FATAL] = "Fatal", |
524 | [MC_CMD_SENSOR_STATE_BROKEN] = "Device failure", | 524 | [MC_CMD_SENSOR_STATE_BROKEN] = "Device failure", |
525 | }; | 525 | }; |
526 | 526 | ||
527 | static void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev) | 527 | static void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev) |
528 | { | 528 | { |
529 | unsigned int monitor, state, value; | 529 | unsigned int monitor, state, value; |
530 | const char *name, *state_txt; | 530 | const char *name, *state_txt; |
531 | monitor = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_MONITOR); | 531 | monitor = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_MONITOR); |
532 | state = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_STATE); | 532 | state = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_STATE); |
533 | value = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_VALUE); | 533 | value = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_VALUE); |
534 | /* Deal gracefully with the board having more drivers than we | 534 | /* Deal gracefully with the board having more drivers than we |
535 | * know about, but do not expect new sensor states. */ | 535 | * know about, but do not expect new sensor states. */ |
536 | name = (monitor >= ARRAY_SIZE(sensor_names)) | 536 | name = (monitor >= ARRAY_SIZE(sensor_names)) |
537 | ? "No sensor name available" : | 537 | ? "No sensor name available" : |
538 | sensor_names[monitor]; | 538 | sensor_names[monitor]; |
539 | EFX_BUG_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names)); | 539 | EFX_BUG_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names)); |
540 | state_txt = sensor_status_names[state]; | 540 | state_txt = sensor_status_names[state]; |
541 | 541 | ||
542 | netif_err(efx, hw, efx->net_dev, | 542 | netif_err(efx, hw, efx->net_dev, |
543 | "Sensor %d (%s) reports condition '%s' for raw value %d\n", | 543 | "Sensor %d (%s) reports condition '%s' for raw value %d\n", |
544 | monitor, name, state_txt, value); | 544 | monitor, name, state_txt, value); |
545 | } | 545 | } |
546 | 546 | ||
547 | /* Called from falcon_process_eventq for MCDI events */ | 547 | /* Called from falcon_process_eventq for MCDI events */ |
548 | void efx_mcdi_process_event(struct efx_channel *channel, | 548 | void efx_mcdi_process_event(struct efx_channel *channel, |
549 | efx_qword_t *event) | 549 | efx_qword_t *event) |
550 | { | 550 | { |
551 | struct efx_nic *efx = channel->efx; | 551 | struct efx_nic *efx = channel->efx; |
552 | int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE); | 552 | int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE); |
553 | u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA); | 553 | u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA); |
554 | 554 | ||
555 | switch (code) { | 555 | switch (code) { |
556 | case MCDI_EVENT_CODE_BADSSERT: | 556 | case MCDI_EVENT_CODE_BADSSERT: |
557 | netif_err(efx, hw, efx->net_dev, | 557 | netif_err(efx, hw, efx->net_dev, |
558 | "MC watchdog or assertion failure at 0x%x\n", data); | 558 | "MC watchdog or assertion failure at 0x%x\n", data); |
559 | efx_mcdi_ev_death(efx, EINTR); | 559 | efx_mcdi_ev_death(efx, EINTR); |
560 | break; | 560 | break; |
561 | 561 | ||
562 | case MCDI_EVENT_CODE_PMNOTICE: | 562 | case MCDI_EVENT_CODE_PMNOTICE: |
563 | netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n"); | 563 | netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n"); |
564 | break; | 564 | break; |
565 | 565 | ||
566 | case MCDI_EVENT_CODE_CMDDONE: | 566 | case MCDI_EVENT_CODE_CMDDONE: |
567 | efx_mcdi_ev_cpl(efx, | 567 | efx_mcdi_ev_cpl(efx, |
568 | MCDI_EVENT_FIELD(*event, CMDDONE_SEQ), | 568 | MCDI_EVENT_FIELD(*event, CMDDONE_SEQ), |
569 | MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN), | 569 | MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN), |
570 | MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO)); | 570 | MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO)); |
571 | break; | 571 | break; |
572 | 572 | ||
573 | case MCDI_EVENT_CODE_LINKCHANGE: | 573 | case MCDI_EVENT_CODE_LINKCHANGE: |
574 | efx_mcdi_process_link_change(efx, event); | 574 | efx_mcdi_process_link_change(efx, event); |
575 | break; | 575 | break; |
576 | case MCDI_EVENT_CODE_SENSOREVT: | 576 | case MCDI_EVENT_CODE_SENSOREVT: |
577 | efx_mcdi_sensor_event(efx, event); | 577 | efx_mcdi_sensor_event(efx, event); |
578 | break; | 578 | break; |
579 | case MCDI_EVENT_CODE_SCHEDERR: | 579 | case MCDI_EVENT_CODE_SCHEDERR: |
580 | netif_info(efx, hw, efx->net_dev, | 580 | netif_info(efx, hw, efx->net_dev, |
581 | "MC Scheduler error address=0x%x\n", data); | 581 | "MC Scheduler error address=0x%x\n", data); |
582 | break; | 582 | break; |
583 | case MCDI_EVENT_CODE_REBOOT: | 583 | case MCDI_EVENT_CODE_REBOOT: |
584 | netif_info(efx, hw, efx->net_dev, "MC Reboot\n"); | 584 | netif_info(efx, hw, efx->net_dev, "MC Reboot\n"); |
585 | efx_mcdi_ev_death(efx, EIO); | 585 | efx_mcdi_ev_death(efx, EIO); |
586 | break; | 586 | break; |
587 | case MCDI_EVENT_CODE_MAC_STATS_DMA: | 587 | case MCDI_EVENT_CODE_MAC_STATS_DMA: |
588 | /* MAC stats are gather lazily. We can ignore this. */ | 588 | /* MAC stats are gather lazily. We can ignore this. */ |
589 | break; | 589 | break; |
590 | 590 | ||
591 | default: | 591 | default: |
592 | netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n", | 592 | netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n", |
593 | code); | 593 | code); |
594 | } | 594 | } |
595 | } | 595 | } |
596 | 596 | ||
597 | /************************************************************************** | 597 | /************************************************************************** |
598 | * | 598 | * |
599 | * Specific request functions | 599 | * Specific request functions |
600 | * | 600 | * |
601 | ************************************************************************** | 601 | ************************************************************************** |
602 | */ | 602 | */ |
603 | 603 | ||
604 | int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build) | 604 | int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build) |
605 | { | 605 | { |
606 | u8 outbuf[ALIGN(MC_CMD_GET_VERSION_V1_OUT_LEN, 4)]; | 606 | u8 outbuf[ALIGN(MC_CMD_GET_VERSION_V1_OUT_LEN, 4)]; |
607 | size_t outlength; | 607 | size_t outlength; |
608 | const __le16 *ver_words; | 608 | const __le16 *ver_words; |
609 | int rc; | 609 | int rc; |
610 | 610 | ||
611 | BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0); | 611 | BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0); |
612 | 612 | ||
613 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0, | 613 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0, |
614 | outbuf, sizeof(outbuf), &outlength); | 614 | outbuf, sizeof(outbuf), &outlength); |
615 | if (rc) | 615 | if (rc) |
616 | goto fail; | 616 | goto fail; |
617 | 617 | ||
618 | if (outlength == MC_CMD_GET_VERSION_V0_OUT_LEN) { | 618 | if (outlength == MC_CMD_GET_VERSION_V0_OUT_LEN) { |
619 | *version = 0; | 619 | *version = 0; |
620 | *build = MCDI_DWORD(outbuf, GET_VERSION_OUT_FIRMWARE); | 620 | *build = MCDI_DWORD(outbuf, GET_VERSION_OUT_FIRMWARE); |
621 | return 0; | 621 | return 0; |
622 | } | 622 | } |
623 | 623 | ||
624 | if (outlength < MC_CMD_GET_VERSION_V1_OUT_LEN) { | 624 | if (outlength < MC_CMD_GET_VERSION_V1_OUT_LEN) { |
625 | rc = -EIO; | 625 | rc = -EIO; |
626 | goto fail; | 626 | goto fail; |
627 | } | 627 | } |
628 | 628 | ||
629 | ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION); | 629 | ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION); |
630 | *version = (((u64)le16_to_cpu(ver_words[0]) << 48) | | 630 | *version = (((u64)le16_to_cpu(ver_words[0]) << 48) | |
631 | ((u64)le16_to_cpu(ver_words[1]) << 32) | | 631 | ((u64)le16_to_cpu(ver_words[1]) << 32) | |
632 | ((u64)le16_to_cpu(ver_words[2]) << 16) | | 632 | ((u64)le16_to_cpu(ver_words[2]) << 16) | |
633 | le16_to_cpu(ver_words[3])); | 633 | le16_to_cpu(ver_words[3])); |
634 | *build = MCDI_DWORD(outbuf, GET_VERSION_OUT_FIRMWARE); | 634 | *build = MCDI_DWORD(outbuf, GET_VERSION_OUT_FIRMWARE); |
635 | 635 | ||
636 | return 0; | 636 | return 0; |
637 | 637 | ||
638 | fail: | 638 | fail: |
639 | netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | 639 | netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
640 | return rc; | 640 | return rc; |
641 | } | 641 | } |
642 | 642 | ||
643 | int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, | 643 | int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, |
644 | bool *was_attached) | 644 | bool *was_attached) |
645 | { | 645 | { |
646 | u8 inbuf[MC_CMD_DRV_ATTACH_IN_LEN]; | 646 | u8 inbuf[MC_CMD_DRV_ATTACH_IN_LEN]; |
647 | u8 outbuf[MC_CMD_DRV_ATTACH_OUT_LEN]; | 647 | u8 outbuf[MC_CMD_DRV_ATTACH_OUT_LEN]; |
648 | size_t outlen; | 648 | size_t outlen; |
649 | int rc; | 649 | int rc; |
650 | 650 | ||
651 | MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE, | 651 | MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE, |
652 | driver_operating ? 1 : 0); | 652 | driver_operating ? 1 : 0); |
653 | MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1); | 653 | MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1); |
654 | 654 | ||
655 | rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf), | 655 | rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf), |
656 | outbuf, sizeof(outbuf), &outlen); | 656 | outbuf, sizeof(outbuf), &outlen); |
657 | if (rc) | 657 | if (rc) |
658 | goto fail; | 658 | goto fail; |
659 | if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) { | 659 | if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) { |
660 | rc = -EIO; | 660 | rc = -EIO; |
661 | goto fail; | 661 | goto fail; |
662 | } | 662 | } |
663 | 663 | ||
664 | if (was_attached != NULL) | 664 | if (was_attached != NULL) |
665 | *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE); | 665 | *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE); |
666 | return 0; | 666 | return 0; |
667 | 667 | ||
668 | fail: | 668 | fail: |
669 | netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | 669 | netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
670 | return rc; | 670 | return rc; |
671 | } | 671 | } |
672 | 672 | ||
673 | int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, | 673 | int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, |
674 | u16 *fw_subtype_list) | 674 | u16 *fw_subtype_list) |
675 | { | 675 | { |
676 | uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LEN]; | 676 | uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LEN]; |
677 | size_t outlen; | 677 | size_t outlen; |
678 | int port_num = efx_port_num(efx); | 678 | int port_num = efx_port_num(efx); |
679 | int offset; | 679 | int offset; |
680 | int rc; | 680 | int rc; |
681 | 681 | ||
682 | BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0); | 682 | BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0); |
683 | 683 | ||
684 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0, | 684 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0, |
685 | outbuf, sizeof(outbuf), &outlen); | 685 | outbuf, sizeof(outbuf), &outlen); |
686 | if (rc) | 686 | if (rc) |
687 | goto fail; | 687 | goto fail; |
688 | 688 | ||
689 | if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LEN) { | 689 | if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LEN) { |
690 | rc = -EIO; | 690 | rc = -EIO; |
691 | goto fail; | 691 | goto fail; |
692 | } | 692 | } |
693 | 693 | ||
694 | offset = (port_num) | 694 | offset = (port_num) |
695 | ? MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST | 695 | ? MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST |
696 | : MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST; | 696 | : MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST; |
697 | if (mac_address) | 697 | if (mac_address) |
698 | memcpy(mac_address, outbuf + offset, ETH_ALEN); | 698 | memcpy(mac_address, outbuf + offset, ETH_ALEN); |
699 | if (fw_subtype_list) | 699 | if (fw_subtype_list) |
700 | memcpy(fw_subtype_list, | 700 | memcpy(fw_subtype_list, |
701 | outbuf + MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST, | 701 | outbuf + MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST, |
702 | MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN); | 702 | MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN); |
703 | 703 | ||
704 | return 0; | 704 | return 0; |
705 | 705 | ||
706 | fail: | 706 | fail: |
707 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n", | 707 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n", |
708 | __func__, rc, (int)outlen); | 708 | __func__, rc, (int)outlen); |
709 | 709 | ||
710 | return rc; | 710 | return rc; |
711 | } | 711 | } |
712 | 712 | ||
713 | int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq) | 713 | int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq) |
714 | { | 714 | { |
715 | u8 inbuf[MC_CMD_LOG_CTRL_IN_LEN]; | 715 | u8 inbuf[MC_CMD_LOG_CTRL_IN_LEN]; |
716 | u32 dest = 0; | 716 | u32 dest = 0; |
717 | int rc; | 717 | int rc; |
718 | 718 | ||
719 | if (uart) | 719 | if (uart) |
720 | dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART; | 720 | dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART; |
721 | if (evq) | 721 | if (evq) |
722 | dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ; | 722 | dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ; |
723 | 723 | ||
724 | MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest); | 724 | MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest); |
725 | MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq); | 725 | MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq); |
726 | 726 | ||
727 | BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0); | 727 | BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0); |
728 | 728 | ||
729 | rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf), | 729 | rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf), |
730 | NULL, 0, NULL); | 730 | NULL, 0, NULL); |
731 | if (rc) | 731 | if (rc) |
732 | goto fail; | 732 | goto fail; |
733 | 733 | ||
734 | return 0; | 734 | return 0; |
735 | 735 | ||
736 | fail: | 736 | fail: |
737 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | 737 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
738 | return rc; | 738 | return rc; |
739 | } | 739 | } |
740 | 740 | ||
741 | int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out) | 741 | int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out) |
742 | { | 742 | { |
743 | u8 outbuf[MC_CMD_NVRAM_TYPES_OUT_LEN]; | 743 | u8 outbuf[MC_CMD_NVRAM_TYPES_OUT_LEN]; |
744 | size_t outlen; | 744 | size_t outlen; |
745 | int rc; | 745 | int rc; |
746 | 746 | ||
747 | BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0); | 747 | BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0); |
748 | 748 | ||
749 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0, | 749 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0, |
750 | outbuf, sizeof(outbuf), &outlen); | 750 | outbuf, sizeof(outbuf), &outlen); |
751 | if (rc) | 751 | if (rc) |
752 | goto fail; | 752 | goto fail; |
753 | if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) { | 753 | if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) { |
754 | rc = -EIO; | 754 | rc = -EIO; |
755 | goto fail; | 755 | goto fail; |
756 | } | 756 | } |
757 | 757 | ||
758 | *nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES); | 758 | *nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES); |
759 | return 0; | 759 | return 0; |
760 | 760 | ||
761 | fail: | 761 | fail: |
762 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", | 762 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", |
763 | __func__, rc); | 763 | __func__, rc); |
764 | return rc; | 764 | return rc; |
765 | } | 765 | } |
766 | 766 | ||
767 | int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, | 767 | int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, |
768 | size_t *size_out, size_t *erase_size_out, | 768 | size_t *size_out, size_t *erase_size_out, |
769 | bool *protected_out) | 769 | bool *protected_out) |
770 | { | 770 | { |
771 | u8 inbuf[MC_CMD_NVRAM_INFO_IN_LEN]; | 771 | u8 inbuf[MC_CMD_NVRAM_INFO_IN_LEN]; |
772 | u8 outbuf[MC_CMD_NVRAM_INFO_OUT_LEN]; | 772 | u8 outbuf[MC_CMD_NVRAM_INFO_OUT_LEN]; |
773 | size_t outlen; | 773 | size_t outlen; |
774 | int rc; | 774 | int rc; |
775 | 775 | ||
776 | MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type); | 776 | MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type); |
777 | 777 | ||
778 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf), | 778 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf), |
779 | outbuf, sizeof(outbuf), &outlen); | 779 | outbuf, sizeof(outbuf), &outlen); |
780 | if (rc) | 780 | if (rc) |
781 | goto fail; | 781 | goto fail; |
782 | if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) { | 782 | if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) { |
783 | rc = -EIO; | 783 | rc = -EIO; |
784 | goto fail; | 784 | goto fail; |
785 | } | 785 | } |
786 | 786 | ||
787 | *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE); | 787 | *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE); |
788 | *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE); | 788 | *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE); |
789 | *protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) & | 789 | *protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) & |
790 | (1 << MC_CMD_NVRAM_PROTECTED_LBN)); | 790 | (1 << MC_CMD_NVRAM_PROTECTED_LBN)); |
791 | return 0; | 791 | return 0; |
792 | 792 | ||
793 | fail: | 793 | fail: |
794 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | 794 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
795 | return rc; | 795 | return rc; |
796 | } | 796 | } |
797 | 797 | ||
798 | int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type) | 798 | int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type) |
799 | { | 799 | { |
800 | u8 inbuf[MC_CMD_NVRAM_UPDATE_START_IN_LEN]; | 800 | u8 inbuf[MC_CMD_NVRAM_UPDATE_START_IN_LEN]; |
801 | int rc; | 801 | int rc; |
802 | 802 | ||
803 | MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type); | 803 | MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type); |
804 | 804 | ||
805 | BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0); | 805 | BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0); |
806 | 806 | ||
807 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf), | 807 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf), |
808 | NULL, 0, NULL); | 808 | NULL, 0, NULL); |
809 | if (rc) | 809 | if (rc) |
810 | goto fail; | 810 | goto fail; |
811 | 811 | ||
812 | return 0; | 812 | return 0; |
813 | 813 | ||
814 | fail: | 814 | fail: |
815 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | 815 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
816 | return rc; | 816 | return rc; |
817 | } | 817 | } |
818 | 818 | ||
819 | int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type, | 819 | int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type, |
820 | loff_t offset, u8 *buffer, size_t length) | 820 | loff_t offset, u8 *buffer, size_t length) |
821 | { | 821 | { |
822 | u8 inbuf[MC_CMD_NVRAM_READ_IN_LEN]; | 822 | u8 inbuf[MC_CMD_NVRAM_READ_IN_LEN]; |
823 | u8 outbuf[MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX)]; | 823 | u8 outbuf[MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX)]; |
824 | size_t outlen; | 824 | size_t outlen; |
825 | int rc; | 825 | int rc; |
826 | 826 | ||
827 | MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type); | 827 | MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type); |
828 | MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset); | 828 | MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset); |
829 | MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length); | 829 | MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length); |
830 | 830 | ||
831 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf), | 831 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf), |
832 | outbuf, sizeof(outbuf), &outlen); | 832 | outbuf, sizeof(outbuf), &outlen); |
833 | if (rc) | 833 | if (rc) |
834 | goto fail; | 834 | goto fail; |
835 | 835 | ||
836 | memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length); | 836 | memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length); |
837 | return 0; | 837 | return 0; |
838 | 838 | ||
839 | fail: | 839 | fail: |
840 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | 840 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
841 | return rc; | 841 | return rc; |
842 | } | 842 | } |
843 | 843 | ||
844 | int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, | 844 | int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, |
845 | loff_t offset, const u8 *buffer, size_t length) | 845 | loff_t offset, const u8 *buffer, size_t length) |
846 | { | 846 | { |
847 | u8 inbuf[MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX)]; | 847 | u8 inbuf[MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX)]; |
848 | int rc; | 848 | int rc; |
849 | 849 | ||
850 | MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type); | 850 | MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type); |
851 | MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset); | 851 | MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset); |
852 | MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length); | 852 | MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length); |
853 | memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length); | 853 | memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length); |
854 | 854 | ||
855 | BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0); | 855 | BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0); |
856 | 856 | ||
857 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, | 857 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, |
858 | ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4), | 858 | ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4), |
859 | NULL, 0, NULL); | 859 | NULL, 0, NULL); |
860 | if (rc) | 860 | if (rc) |
861 | goto fail; | 861 | goto fail; |
862 | 862 | ||
863 | return 0; | 863 | return 0; |
864 | 864 | ||
865 | fail: | 865 | fail: |
866 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | 866 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
867 | return rc; | 867 | return rc; |
868 | } | 868 | } |
869 | 869 | ||
870 | int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, | 870 | int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, |
871 | loff_t offset, size_t length) | 871 | loff_t offset, size_t length) |
872 | { | 872 | { |
873 | u8 inbuf[MC_CMD_NVRAM_ERASE_IN_LEN]; | 873 | u8 inbuf[MC_CMD_NVRAM_ERASE_IN_LEN]; |
874 | int rc; | 874 | int rc; |
875 | 875 | ||
876 | MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type); | 876 | MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type); |
877 | MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset); | 877 | MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset); |
878 | MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length); | 878 | MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length); |
879 | 879 | ||
880 | BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0); | 880 | BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0); |
881 | 881 | ||
882 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf), | 882 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf), |
883 | NULL, 0, NULL); | 883 | NULL, 0, NULL); |
884 | if (rc) | 884 | if (rc) |
885 | goto fail; | 885 | goto fail; |
886 | 886 | ||
887 | return 0; | 887 | return 0; |
888 | 888 | ||
889 | fail: | 889 | fail: |
890 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | 890 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
891 | return rc; | 891 | return rc; |
892 | } | 892 | } |
893 | 893 | ||
894 | int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type) | 894 | int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type) |
895 | { | 895 | { |
896 | u8 inbuf[MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN]; | 896 | u8 inbuf[MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN]; |
897 | int rc; | 897 | int rc; |
898 | 898 | ||
899 | MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type); | 899 | MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type); |
900 | 900 | ||
901 | BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0); | 901 | BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0); |
902 | 902 | ||
903 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf), | 903 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf), |
904 | NULL, 0, NULL); | 904 | NULL, 0, NULL); |
905 | if (rc) | 905 | if (rc) |
906 | goto fail; | 906 | goto fail; |
907 | 907 | ||
908 | return 0; | 908 | return 0; |
909 | 909 | ||
910 | fail: | 910 | fail: |
911 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | 911 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
912 | return rc; | 912 | return rc; |
913 | } | 913 | } |
914 | 914 | ||
915 | static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type) | 915 | static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type) |
916 | { | 916 | { |
917 | u8 inbuf[MC_CMD_NVRAM_TEST_IN_LEN]; | 917 | u8 inbuf[MC_CMD_NVRAM_TEST_IN_LEN]; |
918 | u8 outbuf[MC_CMD_NVRAM_TEST_OUT_LEN]; | 918 | u8 outbuf[MC_CMD_NVRAM_TEST_OUT_LEN]; |
919 | int rc; | 919 | int rc; |
920 | 920 | ||
921 | MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type); | 921 | MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type); |
922 | 922 | ||
923 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf), | 923 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf), |
924 | outbuf, sizeof(outbuf), NULL); | 924 | outbuf, sizeof(outbuf), NULL); |
925 | if (rc) | 925 | if (rc) |
926 | return rc; | 926 | return rc; |
927 | 927 | ||
928 | switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) { | 928 | switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) { |
929 | case MC_CMD_NVRAM_TEST_PASS: | 929 | case MC_CMD_NVRAM_TEST_PASS: |
930 | case MC_CMD_NVRAM_TEST_NOTSUPP: | 930 | case MC_CMD_NVRAM_TEST_NOTSUPP: |
931 | return 0; | 931 | return 0; |
932 | default: | 932 | default: |
933 | return -EIO; | 933 | return -EIO; |
934 | } | 934 | } |
935 | } | 935 | } |
936 | 936 | ||
937 | int efx_mcdi_nvram_test_all(struct efx_nic *efx) | 937 | int efx_mcdi_nvram_test_all(struct efx_nic *efx) |
938 | { | 938 | { |
939 | u32 nvram_types; | 939 | u32 nvram_types; |
940 | unsigned int type; | 940 | unsigned int type; |
941 | int rc; | 941 | int rc; |
942 | 942 | ||
943 | rc = efx_mcdi_nvram_types(efx, &nvram_types); | 943 | rc = efx_mcdi_nvram_types(efx, &nvram_types); |
944 | if (rc) | 944 | if (rc) |
945 | goto fail1; | 945 | goto fail1; |
946 | 946 | ||
947 | type = 0; | 947 | type = 0; |
948 | while (nvram_types != 0) { | 948 | while (nvram_types != 0) { |
949 | if (nvram_types & 1) { | 949 | if (nvram_types & 1) { |
950 | rc = efx_mcdi_nvram_test(efx, type); | 950 | rc = efx_mcdi_nvram_test(efx, type); |
951 | if (rc) | 951 | if (rc) |
952 | goto fail2; | 952 | goto fail2; |
953 | } | 953 | } |
954 | type++; | 954 | type++; |
955 | nvram_types >>= 1; | 955 | nvram_types >>= 1; |
956 | } | 956 | } |
957 | 957 | ||
958 | return 0; | 958 | return 0; |
959 | 959 | ||
960 | fail2: | 960 | fail2: |
961 | netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n", | 961 | netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n", |
962 | __func__, type); | 962 | __func__, type); |
963 | fail1: | 963 | fail1: |
964 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | 964 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
965 | return rc; | 965 | return rc; |
966 | } | 966 | } |
967 | 967 | ||
968 | static int efx_mcdi_read_assertion(struct efx_nic *efx) | 968 | static int efx_mcdi_read_assertion(struct efx_nic *efx) |
969 | { | 969 | { |
970 | u8 inbuf[MC_CMD_GET_ASSERTS_IN_LEN]; | 970 | u8 inbuf[MC_CMD_GET_ASSERTS_IN_LEN]; |
971 | u8 outbuf[MC_CMD_GET_ASSERTS_OUT_LEN]; | 971 | u8 outbuf[MC_CMD_GET_ASSERTS_OUT_LEN]; |
972 | unsigned int flags, index, ofst; | 972 | unsigned int flags, index, ofst; |
973 | const char *reason; | 973 | const char *reason; |
974 | size_t outlen; | 974 | size_t outlen; |
975 | int retry; | 975 | int retry; |
976 | int rc; | 976 | int rc; |
977 | 977 | ||
978 | /* Attempt to read any stored assertion state before we reboot | 978 | /* Attempt to read any stored assertion state before we reboot |
979 | * the mcfw out of the assertion handler. Retry twice, once | 979 | * the mcfw out of the assertion handler. Retry twice, once |
980 | * because a boot-time assertion might cause this command to fail | 980 | * because a boot-time assertion might cause this command to fail |
981 | * with EINTR. And once again because GET_ASSERTS can race with | 981 | * with EINTR. And once again because GET_ASSERTS can race with |
982 | * MC_CMD_REBOOT running on the other port. */ | 982 | * MC_CMD_REBOOT running on the other port. */ |
983 | retry = 2; | 983 | retry = 2; |
984 | do { | 984 | do { |
985 | MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1); | 985 | MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1); |
986 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS, | 986 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS, |
987 | inbuf, MC_CMD_GET_ASSERTS_IN_LEN, | 987 | inbuf, MC_CMD_GET_ASSERTS_IN_LEN, |
988 | outbuf, sizeof(outbuf), &outlen); | 988 | outbuf, sizeof(outbuf), &outlen); |
989 | } while ((rc == -EINTR || rc == -EIO) && retry-- > 0); | 989 | } while ((rc == -EINTR || rc == -EIO) && retry-- > 0); |
990 | 990 | ||
991 | if (rc) | 991 | if (rc) |
992 | return rc; | 992 | return rc; |
993 | if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN) | 993 | if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN) |
994 | return -EIO; | 994 | return -EIO; |
995 | 995 | ||
996 | /* Print out any recorded assertion state */ | 996 | /* Print out any recorded assertion state */ |
997 | flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS); | 997 | flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS); |
998 | if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS) | 998 | if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS) |
999 | return 0; | 999 | return 0; |
1000 | 1000 | ||
1001 | reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL) | 1001 | reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL) |
1002 | ? "system-level assertion" | 1002 | ? "system-level assertion" |
1003 | : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL) | 1003 | : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL) |
1004 | ? "thread-level assertion" | 1004 | ? "thread-level assertion" |
1005 | : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED) | 1005 | : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED) |
1006 | ? "watchdog reset" | 1006 | ? "watchdog reset" |
1007 | : "unknown assertion"; | 1007 | : "unknown assertion"; |
1008 | netif_err(efx, hw, efx->net_dev, | 1008 | netif_err(efx, hw, efx->net_dev, |
1009 | "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason, | 1009 | "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason, |
1010 | MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS), | 1010 | MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS), |
1011 | MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS)); | 1011 | MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS)); |
1012 | 1012 | ||
1013 | /* Print out the registers */ | 1013 | /* Print out the registers */ |
1014 | ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST; | 1014 | ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST; |
1015 | for (index = 1; index < 32; index++) { | 1015 | for (index = 1; index < 32; index++) { |
1016 | netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n", index, | 1016 | netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n", index, |
1017 | MCDI_DWORD2(outbuf, ofst)); | 1017 | MCDI_DWORD2(outbuf, ofst)); |
1018 | ofst += sizeof(efx_dword_t); | 1018 | ofst += sizeof(efx_dword_t); |
1019 | } | 1019 | } |
1020 | 1020 | ||
1021 | return 0; | 1021 | return 0; |
1022 | } | 1022 | } |
1023 | 1023 | ||
1024 | static void efx_mcdi_exit_assertion(struct efx_nic *efx) | 1024 | static void efx_mcdi_exit_assertion(struct efx_nic *efx) |
1025 | { | 1025 | { |
1026 | u8 inbuf[MC_CMD_REBOOT_IN_LEN]; | 1026 | u8 inbuf[MC_CMD_REBOOT_IN_LEN]; |
1027 | 1027 | ||
1028 | /* Atomically reboot the mcfw out of the assertion handler */ | 1028 | /* Atomically reboot the mcfw out of the assertion handler */ |
1029 | BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); | 1029 | BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); |
1030 | MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, | 1030 | MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, |
1031 | MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION); | 1031 | MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION); |
1032 | efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN, | 1032 | efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN, |
1033 | NULL, 0, NULL); | 1033 | NULL, 0, NULL); |
1034 | } | 1034 | } |
1035 | 1035 | ||
1036 | int efx_mcdi_handle_assertion(struct efx_nic *efx) | 1036 | int efx_mcdi_handle_assertion(struct efx_nic *efx) |
1037 | { | 1037 | { |
1038 | int rc; | 1038 | int rc; |
1039 | 1039 | ||
1040 | rc = efx_mcdi_read_assertion(efx); | 1040 | rc = efx_mcdi_read_assertion(efx); |
1041 | if (rc) | 1041 | if (rc) |
1042 | return rc; | 1042 | return rc; |
1043 | 1043 | ||
1044 | efx_mcdi_exit_assertion(efx); | 1044 | efx_mcdi_exit_assertion(efx); |
1045 | 1045 | ||
1046 | return 0; | 1046 | return 0; |
1047 | } | 1047 | } |
1048 | 1048 | ||
1049 | void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) | 1049 | void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) |
1050 | { | 1050 | { |
1051 | u8 inbuf[MC_CMD_SET_ID_LED_IN_LEN]; | 1051 | u8 inbuf[MC_CMD_SET_ID_LED_IN_LEN]; |
1052 | int rc; | 1052 | int rc; |
1053 | 1053 | ||
1054 | BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF); | 1054 | BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF); |
1055 | BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON); | 1055 | BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON); |
1056 | BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT); | 1056 | BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT); |
1057 | 1057 | ||
1058 | BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0); | 1058 | BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0); |
1059 | 1059 | ||
1060 | MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode); | 1060 | MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode); |
1061 | 1061 | ||
1062 | rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf), | 1062 | rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf), |
1063 | NULL, 0, NULL); | 1063 | NULL, 0, NULL); |
1064 | if (rc) | 1064 | if (rc) |
1065 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", | 1065 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", |
1066 | __func__, rc); | 1066 | __func__, rc); |
1067 | } | 1067 | } |
1068 | 1068 | ||
1069 | int efx_mcdi_reset_port(struct efx_nic *efx) | 1069 | int efx_mcdi_reset_port(struct efx_nic *efx) |
1070 | { | 1070 | { |
1071 | int rc = efx_mcdi_rpc(efx, MC_CMD_PORT_RESET, NULL, 0, NULL, 0, NULL); | 1071 | int rc = efx_mcdi_rpc(efx, MC_CMD_PORT_RESET, NULL, 0, NULL, 0, NULL); |
1072 | if (rc) | 1072 | if (rc) |
1073 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", | 1073 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", |
1074 | __func__, rc); | 1074 | __func__, rc); |
1075 | return rc; | 1075 | return rc; |
1076 | } | 1076 | } |
1077 | 1077 | ||
1078 | int efx_mcdi_reset_mc(struct efx_nic *efx) | 1078 | int efx_mcdi_reset_mc(struct efx_nic *efx) |
1079 | { | 1079 | { |
1080 | u8 inbuf[MC_CMD_REBOOT_IN_LEN]; | 1080 | u8 inbuf[MC_CMD_REBOOT_IN_LEN]; |
1081 | int rc; | 1081 | int rc; |
1082 | 1082 | ||
1083 | BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); | 1083 | BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); |
1084 | MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0); | 1084 | MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0); |
1085 | rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf), | 1085 | rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf), |
1086 | NULL, 0, NULL); | 1086 | NULL, 0, NULL); |
1087 | /* White is black, and up is down */ | 1087 | /* White is black, and up is down */ |
1088 | if (rc == -EIO) | 1088 | if (rc == -EIO) |
1089 | return 0; | 1089 | return 0; |
1090 | if (rc == 0) | 1090 | if (rc == 0) |
1091 | rc = -EIO; | 1091 | rc = -EIO; |
1092 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | 1092 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
1093 | return rc; | 1093 | return rc; |
1094 | } | 1094 | } |
1095 | 1095 | ||
1096 | int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type, | 1096 | static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type, |
1097 | const u8 *mac, int *id_out) | 1097 | const u8 *mac, int *id_out) |
1098 | { | 1098 | { |
1099 | u8 inbuf[MC_CMD_WOL_FILTER_SET_IN_LEN]; | 1099 | u8 inbuf[MC_CMD_WOL_FILTER_SET_IN_LEN]; |
1100 | u8 outbuf[MC_CMD_WOL_FILTER_SET_OUT_LEN]; | 1100 | u8 outbuf[MC_CMD_WOL_FILTER_SET_OUT_LEN]; |
1101 | size_t outlen; | 1101 | size_t outlen; |
1102 | int rc; | 1102 | int rc; |
1103 | 1103 | ||
1104 | MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type); | 1104 | MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type); |
1105 | MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE, | 1105 | MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE, |
1106 | MC_CMD_FILTER_MODE_SIMPLE); | 1106 | MC_CMD_FILTER_MODE_SIMPLE); |
1107 | memcpy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac, ETH_ALEN); | 1107 | memcpy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac, ETH_ALEN); |
1108 | 1108 | ||
1109 | rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf), | 1109 | rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf), |
1110 | outbuf, sizeof(outbuf), &outlen); | 1110 | outbuf, sizeof(outbuf), &outlen); |
1111 | if (rc) | 1111 | if (rc) |
1112 | goto fail; | 1112 | goto fail; |
1113 | 1113 | ||
1114 | if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) { | 1114 | if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) { |
1115 | rc = -EIO; | 1115 | rc = -EIO; |
1116 | goto fail; | 1116 | goto fail; |
1117 | } | 1117 | } |
1118 | 1118 | ||
1119 | *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID); | 1119 | *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID); |
1120 | 1120 | ||
1121 | return 0; | 1121 | return 0; |
1122 | 1122 | ||
1123 | fail: | 1123 | fail: |
1124 | *id_out = -1; | 1124 | *id_out = -1; |
1125 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | 1125 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
1126 | return rc; | 1126 | return rc; |
1127 | 1127 | ||
1128 | } | 1128 | } |
1129 | 1129 | ||
1130 | 1130 | ||
1131 | int | 1131 | int |
1132 | efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out) | 1132 | efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out) |
1133 | { | 1133 | { |
1134 | return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out); | 1134 | return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out); |
1135 | } | 1135 | } |
1136 | 1136 | ||
1137 | 1137 | ||
1138 | int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out) | 1138 | int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out) |
1139 | { | 1139 | { |
1140 | u8 outbuf[MC_CMD_WOL_FILTER_GET_OUT_LEN]; | 1140 | u8 outbuf[MC_CMD_WOL_FILTER_GET_OUT_LEN]; |
1141 | size_t outlen; | 1141 | size_t outlen; |
1142 | int rc; | 1142 | int rc; |
1143 | 1143 | ||
1144 | rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0, | 1144 | rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0, |
1145 | outbuf, sizeof(outbuf), &outlen); | 1145 | outbuf, sizeof(outbuf), &outlen); |
1146 | if (rc) | 1146 | if (rc) |
1147 | goto fail; | 1147 | goto fail; |
1148 | 1148 | ||
1149 | if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) { | 1149 | if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) { |
1150 | rc = -EIO; | 1150 | rc = -EIO; |
1151 | goto fail; | 1151 | goto fail; |
1152 | } | 1152 | } |
1153 | 1153 | ||
1154 | *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID); | 1154 | *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID); |
1155 | 1155 | ||
1156 | return 0; | 1156 | return 0; |
1157 | 1157 | ||
1158 | fail: | 1158 | fail: |
1159 | *id_out = -1; | 1159 | *id_out = -1; |
1160 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | 1160 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
1161 | return rc; | 1161 | return rc; |
1162 | } | 1162 | } |
1163 | 1163 | ||
1164 | 1164 | ||
1165 | int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id) | 1165 | int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id) |
1166 | { | 1166 | { |
1167 | u8 inbuf[MC_CMD_WOL_FILTER_REMOVE_IN_LEN]; | 1167 | u8 inbuf[MC_CMD_WOL_FILTER_REMOVE_IN_LEN]; |
1168 | int rc; | 1168 | int rc; |
1169 | 1169 | ||
1170 | MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id); | 1170 | MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id); |
1171 | 1171 | ||
1172 | rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf), | 1172 | rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf), |
1173 | NULL, 0, NULL); | 1173 | NULL, 0, NULL); |
1174 | if (rc) | 1174 | if (rc) |
1175 | goto fail; | 1175 | goto fail; |
1176 | 1176 | ||
1177 | return 0; | 1177 | return 0; |
1178 | 1178 | ||
1179 | fail: | 1179 | fail: |
1180 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | 1180 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
1181 | return rc; | 1181 | return rc; |
1182 | } | 1182 | } |
1183 | 1183 | ||
1184 | 1184 | ||
1185 | int efx_mcdi_wol_filter_reset(struct efx_nic *efx) | 1185 | int efx_mcdi_wol_filter_reset(struct efx_nic *efx) |
1186 | { | 1186 | { |
1187 | int rc; | 1187 | int rc; |
1188 | 1188 | ||
1189 | rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL); | 1189 | rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL); |
1190 | if (rc) | 1190 | if (rc) |
1191 | goto fail; | 1191 | goto fail; |
1192 | 1192 | ||
1193 | return 0; | 1193 | return 0; |
1194 | 1194 | ||
1195 | fail: | 1195 | fail: |
1196 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | 1196 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
1197 | return rc; | 1197 | return rc; |
1198 | } | 1198 | } |
1199 | 1199 | ||
1200 | 1200 |
drivers/net/sfc/mcdi.h
1 | /**************************************************************************** | 1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | 2 | * Driver for Solarflare Solarstorm network controllers and boards |
3 | * Copyright 2008-2009 Solarflare Communications Inc. | 3 | * Copyright 2008-2009 Solarflare Communications Inc. |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify it | 5 | * This program is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 as published | 6 | * under the terms of the GNU General Public License version 2 as published |
7 | * by the Free Software Foundation, incorporated herein by reference. | 7 | * by the Free Software Foundation, incorporated herein by reference. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #ifndef EFX_MCDI_H | 10 | #ifndef EFX_MCDI_H |
11 | #define EFX_MCDI_H | 11 | #define EFX_MCDI_H |
12 | 12 | ||
13 | /** | 13 | /** |
14 | * enum efx_mcdi_state | 14 | * enum efx_mcdi_state |
15 | * @MCDI_STATE_QUIESCENT: No pending MCDI requests. If the caller holds the | 15 | * @MCDI_STATE_QUIESCENT: No pending MCDI requests. If the caller holds the |
16 | * mcdi_lock then they are able to move to MCDI_STATE_RUNNING | 16 | * mcdi_lock then they are able to move to MCDI_STATE_RUNNING |
17 | * @MCDI_STATE_RUNNING: There is an MCDI request pending. Only the thread that | 17 | * @MCDI_STATE_RUNNING: There is an MCDI request pending. Only the thread that |
18 | * moved into this state is allowed to move out of it. | 18 | * moved into this state is allowed to move out of it. |
19 | * @MCDI_STATE_COMPLETED: An MCDI request has completed, but the owning thread | 19 | * @MCDI_STATE_COMPLETED: An MCDI request has completed, but the owning thread |
20 | * has not yet consumed the result. For all other threads, equivalent to | 20 | * has not yet consumed the result. For all other threads, equivalent to |
21 | * MCDI_STATE_RUNNING. | 21 | * MCDI_STATE_RUNNING. |
22 | */ | 22 | */ |
23 | enum efx_mcdi_state { | 23 | enum efx_mcdi_state { |
24 | MCDI_STATE_QUIESCENT, | 24 | MCDI_STATE_QUIESCENT, |
25 | MCDI_STATE_RUNNING, | 25 | MCDI_STATE_RUNNING, |
26 | MCDI_STATE_COMPLETED, | 26 | MCDI_STATE_COMPLETED, |
27 | }; | 27 | }; |
28 | 28 | ||
29 | enum efx_mcdi_mode { | 29 | enum efx_mcdi_mode { |
30 | MCDI_MODE_POLL, | 30 | MCDI_MODE_POLL, |
31 | MCDI_MODE_EVENTS, | 31 | MCDI_MODE_EVENTS, |
32 | }; | 32 | }; |
33 | 33 | ||
34 | /** | 34 | /** |
35 | * struct efx_mcdi_iface | 35 | * struct efx_mcdi_iface |
36 | * @state: Interface state. Waited for by mcdi_wq. | 36 | * @state: Interface state. Waited for by mcdi_wq. |
37 | * @wq: Wait queue for threads waiting for state != STATE_RUNNING | 37 | * @wq: Wait queue for threads waiting for state != STATE_RUNNING |
38 | * @iface_lock: Protects @credits, @seqno, @resprc, @resplen | 38 | * @iface_lock: Protects @credits, @seqno, @resprc, @resplen |
39 | * @mode: Poll for mcdi completion, or wait for an mcdi_event. | 39 | * @mode: Poll for mcdi completion, or wait for an mcdi_event. |
40 | * Serialised by @lock | 40 | * Serialised by @lock |
41 | * @seqno: The next sequence number to use for mcdi requests. | 41 | * @seqno: The next sequence number to use for mcdi requests. |
42 | * Serialised by @lock | 42 | * Serialised by @lock |
43 | * @credits: Number of spurious MCDI completion events allowed before we | 43 | * @credits: Number of spurious MCDI completion events allowed before we |
44 | * trigger a fatal error. Protected by @lock | 44 | * trigger a fatal error. Protected by @lock |
45 | * @resprc: Returned MCDI completion | 45 | * @resprc: Returned MCDI completion |
46 | * @resplen: Returned payload length | 46 | * @resplen: Returned payload length |
47 | */ | 47 | */ |
48 | struct efx_mcdi_iface { | 48 | struct efx_mcdi_iface { |
49 | atomic_t state; | 49 | atomic_t state; |
50 | wait_queue_head_t wq; | 50 | wait_queue_head_t wq; |
51 | spinlock_t iface_lock; | 51 | spinlock_t iface_lock; |
52 | enum efx_mcdi_mode mode; | 52 | enum efx_mcdi_mode mode; |
53 | unsigned int credits; | 53 | unsigned int credits; |
54 | unsigned int seqno; | 54 | unsigned int seqno; |
55 | unsigned int resprc; | 55 | unsigned int resprc; |
56 | size_t resplen; | 56 | size_t resplen; |
57 | }; | 57 | }; |
58 | 58 | ||
59 | extern void efx_mcdi_init(struct efx_nic *efx); | 59 | extern void efx_mcdi_init(struct efx_nic *efx); |
60 | 60 | ||
61 | extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const u8 *inbuf, | 61 | extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const u8 *inbuf, |
62 | size_t inlen, u8 *outbuf, size_t outlen, | 62 | size_t inlen, u8 *outbuf, size_t outlen, |
63 | size_t *outlen_actual); | 63 | size_t *outlen_actual); |
64 | 64 | ||
65 | extern int efx_mcdi_poll_reboot(struct efx_nic *efx); | 65 | extern int efx_mcdi_poll_reboot(struct efx_nic *efx); |
66 | extern void efx_mcdi_mode_poll(struct efx_nic *efx); | 66 | extern void efx_mcdi_mode_poll(struct efx_nic *efx); |
67 | extern void efx_mcdi_mode_event(struct efx_nic *efx); | 67 | extern void efx_mcdi_mode_event(struct efx_nic *efx); |
68 | 68 | ||
69 | extern void efx_mcdi_process_event(struct efx_channel *channel, | 69 | extern void efx_mcdi_process_event(struct efx_channel *channel, |
70 | efx_qword_t *event); | 70 | efx_qword_t *event); |
71 | 71 | ||
72 | #define MCDI_PTR2(_buf, _ofst) \ | 72 | #define MCDI_PTR2(_buf, _ofst) \ |
73 | (((u8 *)_buf) + _ofst) | 73 | (((u8 *)_buf) + _ofst) |
74 | #define MCDI_SET_DWORD2(_buf, _ofst, _value) \ | 74 | #define MCDI_SET_DWORD2(_buf, _ofst, _value) \ |
75 | EFX_POPULATE_DWORD_1(*((efx_dword_t *)MCDI_PTR2(_buf, _ofst)), \ | 75 | EFX_POPULATE_DWORD_1(*((efx_dword_t *)MCDI_PTR2(_buf, _ofst)), \ |
76 | EFX_DWORD_0, _value) | 76 | EFX_DWORD_0, _value) |
77 | #define MCDI_DWORD2(_buf, _ofst) \ | 77 | #define MCDI_DWORD2(_buf, _ofst) \ |
78 | EFX_DWORD_FIELD(*((efx_dword_t *)MCDI_PTR2(_buf, _ofst)), \ | 78 | EFX_DWORD_FIELD(*((efx_dword_t *)MCDI_PTR2(_buf, _ofst)), \ |
79 | EFX_DWORD_0) | 79 | EFX_DWORD_0) |
80 | #define MCDI_QWORD2(_buf, _ofst) \ | 80 | #define MCDI_QWORD2(_buf, _ofst) \ |
81 | EFX_QWORD_FIELD64(*((efx_qword_t *)MCDI_PTR2(_buf, _ofst)), \ | 81 | EFX_QWORD_FIELD64(*((efx_qword_t *)MCDI_PTR2(_buf, _ofst)), \ |
82 | EFX_QWORD_0) | 82 | EFX_QWORD_0) |
83 | 83 | ||
84 | #define MCDI_PTR(_buf, _ofst) \ | 84 | #define MCDI_PTR(_buf, _ofst) \ |
85 | MCDI_PTR2(_buf, MC_CMD_ ## _ofst ## _OFST) | 85 | MCDI_PTR2(_buf, MC_CMD_ ## _ofst ## _OFST) |
86 | #define MCDI_SET_DWORD(_buf, _ofst, _value) \ | 86 | #define MCDI_SET_DWORD(_buf, _ofst, _value) \ |
87 | MCDI_SET_DWORD2(_buf, MC_CMD_ ## _ofst ## _OFST, _value) | 87 | MCDI_SET_DWORD2(_buf, MC_CMD_ ## _ofst ## _OFST, _value) |
88 | #define MCDI_DWORD(_buf, _ofst) \ | 88 | #define MCDI_DWORD(_buf, _ofst) \ |
89 | MCDI_DWORD2(_buf, MC_CMD_ ## _ofst ## _OFST) | 89 | MCDI_DWORD2(_buf, MC_CMD_ ## _ofst ## _OFST) |
90 | #define MCDI_QWORD(_buf, _ofst) \ | 90 | #define MCDI_QWORD(_buf, _ofst) \ |
91 | MCDI_QWORD2(_buf, MC_CMD_ ## _ofst ## _OFST) | 91 | MCDI_QWORD2(_buf, MC_CMD_ ## _ofst ## _OFST) |
92 | 92 | ||
93 | #define MCDI_EVENT_FIELD(_ev, _field) \ | 93 | #define MCDI_EVENT_FIELD(_ev, _field) \ |
94 | EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field) | 94 | EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field) |
95 | 95 | ||
96 | extern int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build); | 96 | extern int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build); |
97 | extern int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, | 97 | extern int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, |
98 | bool *was_attached_out); | 98 | bool *was_attached_out); |
99 | extern int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, | 99 | extern int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, |
100 | u16 *fw_subtype_list); | 100 | u16 *fw_subtype_list); |
101 | extern int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, | 101 | extern int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, |
102 | u32 dest_evq); | 102 | u32 dest_evq); |
103 | extern int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out); | 103 | extern int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out); |
104 | extern int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, | 104 | extern int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, |
105 | size_t *size_out, size_t *erase_size_out, | 105 | size_t *size_out, size_t *erase_size_out, |
106 | bool *protected_out); | 106 | bool *protected_out); |
107 | extern int efx_mcdi_nvram_update_start(struct efx_nic *efx, | 107 | extern int efx_mcdi_nvram_update_start(struct efx_nic *efx, |
108 | unsigned int type); | 108 | unsigned int type); |
109 | extern int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type, | 109 | extern int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type, |
110 | loff_t offset, u8 *buffer, size_t length); | 110 | loff_t offset, u8 *buffer, size_t length); |
111 | extern int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, | 111 | extern int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, |
112 | loff_t offset, const u8 *buffer, | 112 | loff_t offset, const u8 *buffer, |
113 | size_t length); | 113 | size_t length); |
114 | #define EFX_MCDI_NVRAM_LEN_MAX 128 | 114 | #define EFX_MCDI_NVRAM_LEN_MAX 128 |
115 | extern int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, | 115 | extern int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, |
116 | loff_t offset, size_t length); | 116 | loff_t offset, size_t length); |
117 | extern int efx_mcdi_nvram_update_finish(struct efx_nic *efx, | 117 | extern int efx_mcdi_nvram_update_finish(struct efx_nic *efx, |
118 | unsigned int type); | 118 | unsigned int type); |
119 | extern int efx_mcdi_nvram_test_all(struct efx_nic *efx); | 119 | extern int efx_mcdi_nvram_test_all(struct efx_nic *efx); |
120 | extern int efx_mcdi_handle_assertion(struct efx_nic *efx); | 120 | extern int efx_mcdi_handle_assertion(struct efx_nic *efx); |
121 | extern void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode); | 121 | extern void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode); |
122 | extern int efx_mcdi_reset_port(struct efx_nic *efx); | 122 | extern int efx_mcdi_reset_port(struct efx_nic *efx); |
123 | extern int efx_mcdi_reset_mc(struct efx_nic *efx); | 123 | extern int efx_mcdi_reset_mc(struct efx_nic *efx); |
124 | extern int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type, | ||
125 | const u8 *mac, int *id_out); | ||
126 | extern int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, | 124 | extern int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, |
127 | const u8 *mac, int *id_out); | 125 | const u8 *mac, int *id_out); |
128 | extern int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out); | 126 | extern int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out); |
129 | extern int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id); | 127 | extern int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id); |
130 | extern int efx_mcdi_wol_filter_reset(struct efx_nic *efx); | 128 | extern int efx_mcdi_wol_filter_reset(struct efx_nic *efx); |
131 | 129 | ||
132 | #endif /* EFX_MCDI_H */ | 130 | #endif /* EFX_MCDI_H */ |
133 | 131 |
drivers/net/sfc/mcdi_phy.c
1 | /**************************************************************************** | 1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | 2 | * Driver for Solarflare Solarstorm network controllers and boards |
3 | * Copyright 2009 Solarflare Communications Inc. | 3 | * Copyright 2009 Solarflare Communications Inc. |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify it | 5 | * This program is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 as published | 6 | * under the terms of the GNU General Public License version 2 as published |
7 | * by the Free Software Foundation, incorporated herein by reference. | 7 | * by the Free Software Foundation, incorporated herein by reference. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | /* | 10 | /* |
11 | * Driver for PHY related operations via MCDI. | 11 | * Driver for PHY related operations via MCDI. |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/slab.h> | 14 | #include <linux/slab.h> |
15 | #include "efx.h" | 15 | #include "efx.h" |
16 | #include "phy.h" | 16 | #include "phy.h" |
17 | #include "mcdi.h" | 17 | #include "mcdi.h" |
18 | #include "mcdi_pcol.h" | 18 | #include "mcdi_pcol.h" |
19 | #include "mdio_10g.h" | 19 | #include "mdio_10g.h" |
20 | #include "nic.h" | 20 | #include "nic.h" |
21 | #include "selftest.h" | 21 | #include "selftest.h" |
22 | 22 | ||
23 | struct efx_mcdi_phy_data { | 23 | struct efx_mcdi_phy_data { |
24 | u32 flags; | 24 | u32 flags; |
25 | u32 type; | 25 | u32 type; |
26 | u32 supported_cap; | 26 | u32 supported_cap; |
27 | u32 channel; | 27 | u32 channel; |
28 | u32 port; | 28 | u32 port; |
29 | u32 stats_mask; | 29 | u32 stats_mask; |
30 | u8 name[20]; | 30 | u8 name[20]; |
31 | u32 media; | 31 | u32 media; |
32 | u32 mmd_mask; | 32 | u32 mmd_mask; |
33 | u8 revision[20]; | 33 | u8 revision[20]; |
34 | u32 forced_cap; | 34 | u32 forced_cap; |
35 | }; | 35 | }; |
36 | 36 | ||
37 | static int | 37 | static int |
38 | efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg) | 38 | efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg) |
39 | { | 39 | { |
40 | u8 outbuf[MC_CMD_GET_PHY_CFG_OUT_LEN]; | 40 | u8 outbuf[MC_CMD_GET_PHY_CFG_OUT_LEN]; |
41 | size_t outlen; | 41 | size_t outlen; |
42 | int rc; | 42 | int rc; |
43 | 43 | ||
44 | BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_IN_LEN != 0); | 44 | BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_IN_LEN != 0); |
45 | BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_OUT_NAME_LEN != sizeof(cfg->name)); | 45 | BUILD_BUG_ON(MC_CMD_GET_PHY_CFG_OUT_NAME_LEN != sizeof(cfg->name)); |
46 | 46 | ||
47 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_CFG, NULL, 0, | 47 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_CFG, NULL, 0, |
48 | outbuf, sizeof(outbuf), &outlen); | 48 | outbuf, sizeof(outbuf), &outlen); |
49 | if (rc) | 49 | if (rc) |
50 | goto fail; | 50 | goto fail; |
51 | 51 | ||
52 | if (outlen < MC_CMD_GET_PHY_CFG_OUT_LEN) { | 52 | if (outlen < MC_CMD_GET_PHY_CFG_OUT_LEN) { |
53 | rc = -EIO; | 53 | rc = -EIO; |
54 | goto fail; | 54 | goto fail; |
55 | } | 55 | } |
56 | 56 | ||
57 | cfg->flags = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_FLAGS); | 57 | cfg->flags = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_FLAGS); |
58 | cfg->type = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_TYPE); | 58 | cfg->type = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_TYPE); |
59 | cfg->supported_cap = | 59 | cfg->supported_cap = |
60 | MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_SUPPORTED_CAP); | 60 | MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_SUPPORTED_CAP); |
61 | cfg->channel = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_CHANNEL); | 61 | cfg->channel = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_CHANNEL); |
62 | cfg->port = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_PRT); | 62 | cfg->port = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_PRT); |
63 | cfg->stats_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_STATS_MASK); | 63 | cfg->stats_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_STATS_MASK); |
64 | memcpy(cfg->name, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_NAME), | 64 | memcpy(cfg->name, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_NAME), |
65 | sizeof(cfg->name)); | 65 | sizeof(cfg->name)); |
66 | cfg->media = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MEDIA_TYPE); | 66 | cfg->media = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MEDIA_TYPE); |
67 | cfg->mmd_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MMD_MASK); | 67 | cfg->mmd_mask = MCDI_DWORD(outbuf, GET_PHY_CFG_OUT_MMD_MASK); |
68 | memcpy(cfg->revision, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_REVISION), | 68 | memcpy(cfg->revision, MCDI_PTR(outbuf, GET_PHY_CFG_OUT_REVISION), |
69 | sizeof(cfg->revision)); | 69 | sizeof(cfg->revision)); |
70 | 70 | ||
71 | return 0; | 71 | return 0; |
72 | 72 | ||
73 | fail: | 73 | fail: |
74 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | 74 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
75 | return rc; | 75 | return rc; |
76 | } | 76 | } |
77 | 77 | ||
78 | static int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities, | 78 | static int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities, |
79 | u32 flags, u32 loopback_mode, | 79 | u32 flags, u32 loopback_mode, |
80 | u32 loopback_speed) | 80 | u32 loopback_speed) |
81 | { | 81 | { |
82 | u8 inbuf[MC_CMD_SET_LINK_IN_LEN]; | 82 | u8 inbuf[MC_CMD_SET_LINK_IN_LEN]; |
83 | int rc; | 83 | int rc; |
84 | 84 | ||
85 | BUILD_BUG_ON(MC_CMD_SET_LINK_OUT_LEN != 0); | 85 | BUILD_BUG_ON(MC_CMD_SET_LINK_OUT_LEN != 0); |
86 | 86 | ||
87 | MCDI_SET_DWORD(inbuf, SET_LINK_IN_CAP, capabilities); | 87 | MCDI_SET_DWORD(inbuf, SET_LINK_IN_CAP, capabilities); |
88 | MCDI_SET_DWORD(inbuf, SET_LINK_IN_FLAGS, flags); | 88 | MCDI_SET_DWORD(inbuf, SET_LINK_IN_FLAGS, flags); |
89 | MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_MODE, loopback_mode); | 89 | MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_MODE, loopback_mode); |
90 | MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_SPEED, loopback_speed); | 90 | MCDI_SET_DWORD(inbuf, SET_LINK_IN_LOOPBACK_SPEED, loopback_speed); |
91 | 91 | ||
92 | rc = efx_mcdi_rpc(efx, MC_CMD_SET_LINK, inbuf, sizeof(inbuf), | 92 | rc = efx_mcdi_rpc(efx, MC_CMD_SET_LINK, inbuf, sizeof(inbuf), |
93 | NULL, 0, NULL); | 93 | NULL, 0, NULL); |
94 | if (rc) | 94 | if (rc) |
95 | goto fail; | 95 | goto fail; |
96 | 96 | ||
97 | return 0; | 97 | return 0; |
98 | 98 | ||
99 | fail: | 99 | fail: |
100 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | 100 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
101 | return rc; | 101 | return rc; |
102 | } | 102 | } |
103 | 103 | ||
104 | static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes) | 104 | static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes) |
105 | { | 105 | { |
106 | u8 outbuf[MC_CMD_GET_LOOPBACK_MODES_OUT_LEN]; | 106 | u8 outbuf[MC_CMD_GET_LOOPBACK_MODES_OUT_LEN]; |
107 | size_t outlen; | 107 | size_t outlen; |
108 | int rc; | 108 | int rc; |
109 | 109 | ||
110 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_LOOPBACK_MODES, NULL, 0, | 110 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_LOOPBACK_MODES, NULL, 0, |
111 | outbuf, sizeof(outbuf), &outlen); | 111 | outbuf, sizeof(outbuf), &outlen); |
112 | if (rc) | 112 | if (rc) |
113 | goto fail; | 113 | goto fail; |
114 | 114 | ||
115 | if (outlen < MC_CMD_GET_LOOPBACK_MODES_OUT_LEN) { | 115 | if (outlen < MC_CMD_GET_LOOPBACK_MODES_OUT_LEN) { |
116 | rc = -EIO; | 116 | rc = -EIO; |
117 | goto fail; | 117 | goto fail; |
118 | } | 118 | } |
119 | 119 | ||
120 | *loopback_modes = MCDI_QWORD(outbuf, GET_LOOPBACK_MODES_SUGGESTED); | 120 | *loopback_modes = MCDI_QWORD(outbuf, GET_LOOPBACK_MODES_SUGGESTED); |
121 | 121 | ||
122 | return 0; | 122 | return 0; |
123 | 123 | ||
124 | fail: | 124 | fail: |
125 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | 125 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
126 | return rc; | 126 | return rc; |
127 | } | 127 | } |
128 | 128 | ||
129 | int efx_mcdi_mdio_read(struct efx_nic *efx, unsigned int bus, | 129 | int efx_mcdi_mdio_read(struct efx_nic *efx, unsigned int bus, |
130 | unsigned int prtad, unsigned int devad, u16 addr, | 130 | unsigned int prtad, unsigned int devad, u16 addr, |
131 | u16 *value_out, u32 *status_out) | 131 | u16 *value_out, u32 *status_out) |
132 | { | 132 | { |
133 | u8 inbuf[MC_CMD_MDIO_READ_IN_LEN]; | 133 | u8 inbuf[MC_CMD_MDIO_READ_IN_LEN]; |
134 | u8 outbuf[MC_CMD_MDIO_READ_OUT_LEN]; | 134 | u8 outbuf[MC_CMD_MDIO_READ_OUT_LEN]; |
135 | size_t outlen; | 135 | size_t outlen; |
136 | int rc; | 136 | int rc; |
137 | 137 | ||
138 | MCDI_SET_DWORD(inbuf, MDIO_READ_IN_BUS, bus); | 138 | MCDI_SET_DWORD(inbuf, MDIO_READ_IN_BUS, bus); |
139 | MCDI_SET_DWORD(inbuf, MDIO_READ_IN_PRTAD, prtad); | 139 | MCDI_SET_DWORD(inbuf, MDIO_READ_IN_PRTAD, prtad); |
140 | MCDI_SET_DWORD(inbuf, MDIO_READ_IN_DEVAD, devad); | 140 | MCDI_SET_DWORD(inbuf, MDIO_READ_IN_DEVAD, devad); |
141 | MCDI_SET_DWORD(inbuf, MDIO_READ_IN_ADDR, addr); | 141 | MCDI_SET_DWORD(inbuf, MDIO_READ_IN_ADDR, addr); |
142 | 142 | ||
143 | rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_READ, inbuf, sizeof(inbuf), | 143 | rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_READ, inbuf, sizeof(inbuf), |
144 | outbuf, sizeof(outbuf), &outlen); | 144 | outbuf, sizeof(outbuf), &outlen); |
145 | if (rc) | 145 | if (rc) |
146 | goto fail; | 146 | goto fail; |
147 | 147 | ||
148 | *value_out = (u16)MCDI_DWORD(outbuf, MDIO_READ_OUT_VALUE); | 148 | *value_out = (u16)MCDI_DWORD(outbuf, MDIO_READ_OUT_VALUE); |
149 | *status_out = MCDI_DWORD(outbuf, MDIO_READ_OUT_STATUS); | 149 | *status_out = MCDI_DWORD(outbuf, MDIO_READ_OUT_STATUS); |
150 | return 0; | 150 | return 0; |
151 | 151 | ||
152 | fail: | 152 | fail: |
153 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | 153 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
154 | return rc; | 154 | return rc; |
155 | } | 155 | } |
156 | 156 | ||
157 | int efx_mcdi_mdio_write(struct efx_nic *efx, unsigned int bus, | 157 | int efx_mcdi_mdio_write(struct efx_nic *efx, unsigned int bus, |
158 | unsigned int prtad, unsigned int devad, u16 addr, | 158 | unsigned int prtad, unsigned int devad, u16 addr, |
159 | u16 value, u32 *status_out) | 159 | u16 value, u32 *status_out) |
160 | { | 160 | { |
161 | u8 inbuf[MC_CMD_MDIO_WRITE_IN_LEN]; | 161 | u8 inbuf[MC_CMD_MDIO_WRITE_IN_LEN]; |
162 | u8 outbuf[MC_CMD_MDIO_WRITE_OUT_LEN]; | 162 | u8 outbuf[MC_CMD_MDIO_WRITE_OUT_LEN]; |
163 | size_t outlen; | 163 | size_t outlen; |
164 | int rc; | 164 | int rc; |
165 | 165 | ||
166 | MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_BUS, bus); | 166 | MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_BUS, bus); |
167 | MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_PRTAD, prtad); | 167 | MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_PRTAD, prtad); |
168 | MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_DEVAD, devad); | 168 | MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_DEVAD, devad); |
169 | MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_ADDR, addr); | 169 | MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_ADDR, addr); |
170 | MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_VALUE, value); | 170 | MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_VALUE, value); |
171 | 171 | ||
172 | rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_WRITE, inbuf, sizeof(inbuf), | 172 | rc = efx_mcdi_rpc(efx, MC_CMD_MDIO_WRITE, inbuf, sizeof(inbuf), |
173 | outbuf, sizeof(outbuf), &outlen); | 173 | outbuf, sizeof(outbuf), &outlen); |
174 | if (rc) | 174 | if (rc) |
175 | goto fail; | 175 | goto fail; |
176 | 176 | ||
177 | *status_out = MCDI_DWORD(outbuf, MDIO_WRITE_OUT_STATUS); | 177 | *status_out = MCDI_DWORD(outbuf, MDIO_WRITE_OUT_STATUS); |
178 | return 0; | 178 | return 0; |
179 | 179 | ||
180 | fail: | 180 | fail: |
181 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | 181 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
182 | return rc; | 182 | return rc; |
183 | } | 183 | } |
184 | 184 | ||
185 | static u32 mcdi_to_ethtool_cap(u32 media, u32 cap) | 185 | static u32 mcdi_to_ethtool_cap(u32 media, u32 cap) |
186 | { | 186 | { |
187 | u32 result = 0; | 187 | u32 result = 0; |
188 | 188 | ||
189 | switch (media) { | 189 | switch (media) { |
190 | case MC_CMD_MEDIA_KX4: | 190 | case MC_CMD_MEDIA_KX4: |
191 | result |= SUPPORTED_Backplane; | 191 | result |= SUPPORTED_Backplane; |
192 | if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) | 192 | if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) |
193 | result |= SUPPORTED_1000baseKX_Full; | 193 | result |= SUPPORTED_1000baseKX_Full; |
194 | if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) | 194 | if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) |
195 | result |= SUPPORTED_10000baseKX4_Full; | 195 | result |= SUPPORTED_10000baseKX4_Full; |
196 | break; | 196 | break; |
197 | 197 | ||
198 | case MC_CMD_MEDIA_XFP: | 198 | case MC_CMD_MEDIA_XFP: |
199 | case MC_CMD_MEDIA_SFP_PLUS: | 199 | case MC_CMD_MEDIA_SFP_PLUS: |
200 | result |= SUPPORTED_FIBRE; | 200 | result |= SUPPORTED_FIBRE; |
201 | break; | 201 | break; |
202 | 202 | ||
203 | case MC_CMD_MEDIA_BASE_T: | 203 | case MC_CMD_MEDIA_BASE_T: |
204 | result |= SUPPORTED_TP; | 204 | result |= SUPPORTED_TP; |
205 | if (cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN)) | 205 | if (cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN)) |
206 | result |= SUPPORTED_10baseT_Half; | 206 | result |= SUPPORTED_10baseT_Half; |
207 | if (cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN)) | 207 | if (cap & (1 << MC_CMD_PHY_CAP_10FDX_LBN)) |
208 | result |= SUPPORTED_10baseT_Full; | 208 | result |= SUPPORTED_10baseT_Full; |
209 | if (cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN)) | 209 | if (cap & (1 << MC_CMD_PHY_CAP_100HDX_LBN)) |
210 | result |= SUPPORTED_100baseT_Half; | 210 | result |= SUPPORTED_100baseT_Half; |
211 | if (cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN)) | 211 | if (cap & (1 << MC_CMD_PHY_CAP_100FDX_LBN)) |
212 | result |= SUPPORTED_100baseT_Full; | 212 | result |= SUPPORTED_100baseT_Full; |
213 | if (cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN)) | 213 | if (cap & (1 << MC_CMD_PHY_CAP_1000HDX_LBN)) |
214 | result |= SUPPORTED_1000baseT_Half; | 214 | result |= SUPPORTED_1000baseT_Half; |
215 | if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) | 215 | if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) |
216 | result |= SUPPORTED_1000baseT_Full; | 216 | result |= SUPPORTED_1000baseT_Full; |
217 | if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) | 217 | if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) |
218 | result |= SUPPORTED_10000baseT_Full; | 218 | result |= SUPPORTED_10000baseT_Full; |
219 | break; | 219 | break; |
220 | } | 220 | } |
221 | 221 | ||
222 | if (cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN)) | 222 | if (cap & (1 << MC_CMD_PHY_CAP_PAUSE_LBN)) |
223 | result |= SUPPORTED_Pause; | 223 | result |= SUPPORTED_Pause; |
224 | if (cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN)) | 224 | if (cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN)) |
225 | result |= SUPPORTED_Asym_Pause; | 225 | result |= SUPPORTED_Asym_Pause; |
226 | if (cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) | 226 | if (cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) |
227 | result |= SUPPORTED_Autoneg; | 227 | result |= SUPPORTED_Autoneg; |
228 | 228 | ||
229 | return result; | 229 | return result; |
230 | } | 230 | } |
231 | 231 | ||
232 | static u32 ethtool_to_mcdi_cap(u32 cap) | 232 | static u32 ethtool_to_mcdi_cap(u32 cap) |
233 | { | 233 | { |
234 | u32 result = 0; | 234 | u32 result = 0; |
235 | 235 | ||
236 | if (cap & SUPPORTED_10baseT_Half) | 236 | if (cap & SUPPORTED_10baseT_Half) |
237 | result |= (1 << MC_CMD_PHY_CAP_10HDX_LBN); | 237 | result |= (1 << MC_CMD_PHY_CAP_10HDX_LBN); |
238 | if (cap & SUPPORTED_10baseT_Full) | 238 | if (cap & SUPPORTED_10baseT_Full) |
239 | result |= (1 << MC_CMD_PHY_CAP_10FDX_LBN); | 239 | result |= (1 << MC_CMD_PHY_CAP_10FDX_LBN); |
240 | if (cap & SUPPORTED_100baseT_Half) | 240 | if (cap & SUPPORTED_100baseT_Half) |
241 | result |= (1 << MC_CMD_PHY_CAP_100HDX_LBN); | 241 | result |= (1 << MC_CMD_PHY_CAP_100HDX_LBN); |
242 | if (cap & SUPPORTED_100baseT_Full) | 242 | if (cap & SUPPORTED_100baseT_Full) |
243 | result |= (1 << MC_CMD_PHY_CAP_100FDX_LBN); | 243 | result |= (1 << MC_CMD_PHY_CAP_100FDX_LBN); |
244 | if (cap & SUPPORTED_1000baseT_Half) | 244 | if (cap & SUPPORTED_1000baseT_Half) |
245 | result |= (1 << MC_CMD_PHY_CAP_1000HDX_LBN); | 245 | result |= (1 << MC_CMD_PHY_CAP_1000HDX_LBN); |
246 | if (cap & (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseKX_Full)) | 246 | if (cap & (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseKX_Full)) |
247 | result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN); | 247 | result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN); |
248 | if (cap & (SUPPORTED_10000baseT_Full | SUPPORTED_10000baseKX4_Full)) | 248 | if (cap & (SUPPORTED_10000baseT_Full | SUPPORTED_10000baseKX4_Full)) |
249 | result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN); | 249 | result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN); |
250 | if (cap & SUPPORTED_Pause) | 250 | if (cap & SUPPORTED_Pause) |
251 | result |= (1 << MC_CMD_PHY_CAP_PAUSE_LBN); | 251 | result |= (1 << MC_CMD_PHY_CAP_PAUSE_LBN); |
252 | if (cap & SUPPORTED_Asym_Pause) | 252 | if (cap & SUPPORTED_Asym_Pause) |
253 | result |= (1 << MC_CMD_PHY_CAP_ASYM_LBN); | 253 | result |= (1 << MC_CMD_PHY_CAP_ASYM_LBN); |
254 | if (cap & SUPPORTED_Autoneg) | 254 | if (cap & SUPPORTED_Autoneg) |
255 | result |= (1 << MC_CMD_PHY_CAP_AN_LBN); | 255 | result |= (1 << MC_CMD_PHY_CAP_AN_LBN); |
256 | 256 | ||
257 | return result; | 257 | return result; |
258 | } | 258 | } |
259 | 259 | ||
260 | static u32 efx_get_mcdi_phy_flags(struct efx_nic *efx) | 260 | static u32 efx_get_mcdi_phy_flags(struct efx_nic *efx) |
261 | { | 261 | { |
262 | struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; | 262 | struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; |
263 | enum efx_phy_mode mode, supported; | 263 | enum efx_phy_mode mode, supported; |
264 | u32 flags; | 264 | u32 flags; |
265 | 265 | ||
266 | /* TODO: Advertise the capabilities supported by this PHY */ | 266 | /* TODO: Advertise the capabilities supported by this PHY */ |
267 | supported = 0; | 267 | supported = 0; |
268 | if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_TXDIS_LBN)) | 268 | if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_TXDIS_LBN)) |
269 | supported |= PHY_MODE_TX_DISABLED; | 269 | supported |= PHY_MODE_TX_DISABLED; |
270 | if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_LOWPOWER_LBN)) | 270 | if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_LOWPOWER_LBN)) |
271 | supported |= PHY_MODE_LOW_POWER; | 271 | supported |= PHY_MODE_LOW_POWER; |
272 | if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_POWEROFF_LBN)) | 272 | if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_POWEROFF_LBN)) |
273 | supported |= PHY_MODE_OFF; | 273 | supported |= PHY_MODE_OFF; |
274 | 274 | ||
275 | mode = efx->phy_mode & supported; | 275 | mode = efx->phy_mode & supported; |
276 | 276 | ||
277 | flags = 0; | 277 | flags = 0; |
278 | if (mode & PHY_MODE_TX_DISABLED) | 278 | if (mode & PHY_MODE_TX_DISABLED) |
279 | flags |= (1 << MC_CMD_SET_LINK_TXDIS_LBN); | 279 | flags |= (1 << MC_CMD_SET_LINK_TXDIS_LBN); |
280 | if (mode & PHY_MODE_LOW_POWER) | 280 | if (mode & PHY_MODE_LOW_POWER) |
281 | flags |= (1 << MC_CMD_SET_LINK_LOWPOWER_LBN); | 281 | flags |= (1 << MC_CMD_SET_LINK_LOWPOWER_LBN); |
282 | if (mode & PHY_MODE_OFF) | 282 | if (mode & PHY_MODE_OFF) |
283 | flags |= (1 << MC_CMD_SET_LINK_POWEROFF_LBN); | 283 | flags |= (1 << MC_CMD_SET_LINK_POWEROFF_LBN); |
284 | 284 | ||
285 | return flags; | 285 | return flags; |
286 | } | 286 | } |
287 | 287 | ||
288 | static u32 mcdi_to_ethtool_media(u32 media) | 288 | static u32 mcdi_to_ethtool_media(u32 media) |
289 | { | 289 | { |
290 | switch (media) { | 290 | switch (media) { |
291 | case MC_CMD_MEDIA_XAUI: | 291 | case MC_CMD_MEDIA_XAUI: |
292 | case MC_CMD_MEDIA_CX4: | 292 | case MC_CMD_MEDIA_CX4: |
293 | case MC_CMD_MEDIA_KX4: | 293 | case MC_CMD_MEDIA_KX4: |
294 | return PORT_OTHER; | 294 | return PORT_OTHER; |
295 | 295 | ||
296 | case MC_CMD_MEDIA_XFP: | 296 | case MC_CMD_MEDIA_XFP: |
297 | case MC_CMD_MEDIA_SFP_PLUS: | 297 | case MC_CMD_MEDIA_SFP_PLUS: |
298 | return PORT_FIBRE; | 298 | return PORT_FIBRE; |
299 | 299 | ||
300 | case MC_CMD_MEDIA_BASE_T: | 300 | case MC_CMD_MEDIA_BASE_T: |
301 | return PORT_TP; | 301 | return PORT_TP; |
302 | 302 | ||
303 | default: | 303 | default: |
304 | return PORT_OTHER; | 304 | return PORT_OTHER; |
305 | } | 305 | } |
306 | } | 306 | } |
307 | 307 | ||
308 | static int efx_mcdi_phy_probe(struct efx_nic *efx) | 308 | static int efx_mcdi_phy_probe(struct efx_nic *efx) |
309 | { | 309 | { |
310 | struct efx_mcdi_phy_data *phy_data; | 310 | struct efx_mcdi_phy_data *phy_data; |
311 | u8 outbuf[MC_CMD_GET_LINK_OUT_LEN]; | 311 | u8 outbuf[MC_CMD_GET_LINK_OUT_LEN]; |
312 | u32 caps; | 312 | u32 caps; |
313 | int rc; | 313 | int rc; |
314 | 314 | ||
315 | /* Initialise and populate phy_data */ | 315 | /* Initialise and populate phy_data */ |
316 | phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL); | 316 | phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL); |
317 | if (phy_data == NULL) | 317 | if (phy_data == NULL) |
318 | return -ENOMEM; | 318 | return -ENOMEM; |
319 | 319 | ||
320 | rc = efx_mcdi_get_phy_cfg(efx, phy_data); | 320 | rc = efx_mcdi_get_phy_cfg(efx, phy_data); |
321 | if (rc != 0) | 321 | if (rc != 0) |
322 | goto fail; | 322 | goto fail; |
323 | 323 | ||
324 | /* Read initial link advertisement */ | 324 | /* Read initial link advertisement */ |
325 | BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); | 325 | BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); |
326 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, | 326 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, |
327 | outbuf, sizeof(outbuf), NULL); | 327 | outbuf, sizeof(outbuf), NULL); |
328 | if (rc) | 328 | if (rc) |
329 | goto fail; | 329 | goto fail; |
330 | 330 | ||
331 | /* Fill out nic state */ | 331 | /* Fill out nic state */ |
332 | efx->phy_data = phy_data; | 332 | efx->phy_data = phy_data; |
333 | efx->phy_type = phy_data->type; | 333 | efx->phy_type = phy_data->type; |
334 | 334 | ||
335 | efx->mdio_bus = phy_data->channel; | 335 | efx->mdio_bus = phy_data->channel; |
336 | efx->mdio.prtad = phy_data->port; | 336 | efx->mdio.prtad = phy_data->port; |
337 | efx->mdio.mmds = phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22); | 337 | efx->mdio.mmds = phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22); |
338 | efx->mdio.mode_support = 0; | 338 | efx->mdio.mode_support = 0; |
339 | if (phy_data->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22)) | 339 | if (phy_data->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22)) |
340 | efx->mdio.mode_support |= MDIO_SUPPORTS_C22; | 340 | efx->mdio.mode_support |= MDIO_SUPPORTS_C22; |
341 | if (phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22)) | 341 | if (phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22)) |
342 | efx->mdio.mode_support |= MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; | 342 | efx->mdio.mode_support |= MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; |
343 | 343 | ||
344 | caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP); | 344 | caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP); |
345 | if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN)) | 345 | if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN)) |
346 | efx->link_advertising = | 346 | efx->link_advertising = |
347 | mcdi_to_ethtool_cap(phy_data->media, caps); | 347 | mcdi_to_ethtool_cap(phy_data->media, caps); |
348 | else | 348 | else |
349 | phy_data->forced_cap = caps; | 349 | phy_data->forced_cap = caps; |
350 | 350 | ||
351 | /* Assert that we can map efx -> mcdi loopback modes */ | 351 | /* Assert that we can map efx -> mcdi loopback modes */ |
352 | BUILD_BUG_ON(LOOPBACK_NONE != MC_CMD_LOOPBACK_NONE); | 352 | BUILD_BUG_ON(LOOPBACK_NONE != MC_CMD_LOOPBACK_NONE); |
353 | BUILD_BUG_ON(LOOPBACK_DATA != MC_CMD_LOOPBACK_DATA); | 353 | BUILD_BUG_ON(LOOPBACK_DATA != MC_CMD_LOOPBACK_DATA); |
354 | BUILD_BUG_ON(LOOPBACK_GMAC != MC_CMD_LOOPBACK_GMAC); | 354 | BUILD_BUG_ON(LOOPBACK_GMAC != MC_CMD_LOOPBACK_GMAC); |
355 | BUILD_BUG_ON(LOOPBACK_XGMII != MC_CMD_LOOPBACK_XGMII); | 355 | BUILD_BUG_ON(LOOPBACK_XGMII != MC_CMD_LOOPBACK_XGMII); |
356 | BUILD_BUG_ON(LOOPBACK_XGXS != MC_CMD_LOOPBACK_XGXS); | 356 | BUILD_BUG_ON(LOOPBACK_XGXS != MC_CMD_LOOPBACK_XGXS); |
357 | BUILD_BUG_ON(LOOPBACK_XAUI != MC_CMD_LOOPBACK_XAUI); | 357 | BUILD_BUG_ON(LOOPBACK_XAUI != MC_CMD_LOOPBACK_XAUI); |
358 | BUILD_BUG_ON(LOOPBACK_GMII != MC_CMD_LOOPBACK_GMII); | 358 | BUILD_BUG_ON(LOOPBACK_GMII != MC_CMD_LOOPBACK_GMII); |
359 | BUILD_BUG_ON(LOOPBACK_SGMII != MC_CMD_LOOPBACK_SGMII); | 359 | BUILD_BUG_ON(LOOPBACK_SGMII != MC_CMD_LOOPBACK_SGMII); |
360 | BUILD_BUG_ON(LOOPBACK_XGBR != MC_CMD_LOOPBACK_XGBR); | 360 | BUILD_BUG_ON(LOOPBACK_XGBR != MC_CMD_LOOPBACK_XGBR); |
361 | BUILD_BUG_ON(LOOPBACK_XFI != MC_CMD_LOOPBACK_XFI); | 361 | BUILD_BUG_ON(LOOPBACK_XFI != MC_CMD_LOOPBACK_XFI); |
362 | BUILD_BUG_ON(LOOPBACK_XAUI_FAR != MC_CMD_LOOPBACK_XAUI_FAR); | 362 | BUILD_BUG_ON(LOOPBACK_XAUI_FAR != MC_CMD_LOOPBACK_XAUI_FAR); |
363 | BUILD_BUG_ON(LOOPBACK_GMII_FAR != MC_CMD_LOOPBACK_GMII_FAR); | 363 | BUILD_BUG_ON(LOOPBACK_GMII_FAR != MC_CMD_LOOPBACK_GMII_FAR); |
364 | BUILD_BUG_ON(LOOPBACK_SGMII_FAR != MC_CMD_LOOPBACK_SGMII_FAR); | 364 | BUILD_BUG_ON(LOOPBACK_SGMII_FAR != MC_CMD_LOOPBACK_SGMII_FAR); |
365 | BUILD_BUG_ON(LOOPBACK_XFI_FAR != MC_CMD_LOOPBACK_XFI_FAR); | 365 | BUILD_BUG_ON(LOOPBACK_XFI_FAR != MC_CMD_LOOPBACK_XFI_FAR); |
366 | BUILD_BUG_ON(LOOPBACK_GPHY != MC_CMD_LOOPBACK_GPHY); | 366 | BUILD_BUG_ON(LOOPBACK_GPHY != MC_CMD_LOOPBACK_GPHY); |
367 | BUILD_BUG_ON(LOOPBACK_PHYXS != MC_CMD_LOOPBACK_PHYXS); | 367 | BUILD_BUG_ON(LOOPBACK_PHYXS != MC_CMD_LOOPBACK_PHYXS); |
368 | BUILD_BUG_ON(LOOPBACK_PCS != MC_CMD_LOOPBACK_PCS); | 368 | BUILD_BUG_ON(LOOPBACK_PCS != MC_CMD_LOOPBACK_PCS); |
369 | BUILD_BUG_ON(LOOPBACK_PMAPMD != MC_CMD_LOOPBACK_PMAPMD); | 369 | BUILD_BUG_ON(LOOPBACK_PMAPMD != MC_CMD_LOOPBACK_PMAPMD); |
370 | BUILD_BUG_ON(LOOPBACK_XPORT != MC_CMD_LOOPBACK_XPORT); | 370 | BUILD_BUG_ON(LOOPBACK_XPORT != MC_CMD_LOOPBACK_XPORT); |
371 | BUILD_BUG_ON(LOOPBACK_XGMII_WS != MC_CMD_LOOPBACK_XGMII_WS); | 371 | BUILD_BUG_ON(LOOPBACK_XGMII_WS != MC_CMD_LOOPBACK_XGMII_WS); |
372 | BUILD_BUG_ON(LOOPBACK_XAUI_WS != MC_CMD_LOOPBACK_XAUI_WS); | 372 | BUILD_BUG_ON(LOOPBACK_XAUI_WS != MC_CMD_LOOPBACK_XAUI_WS); |
373 | BUILD_BUG_ON(LOOPBACK_XAUI_WS_FAR != MC_CMD_LOOPBACK_XAUI_WS_FAR); | 373 | BUILD_BUG_ON(LOOPBACK_XAUI_WS_FAR != MC_CMD_LOOPBACK_XAUI_WS_FAR); |
374 | BUILD_BUG_ON(LOOPBACK_XAUI_WS_NEAR != MC_CMD_LOOPBACK_XAUI_WS_NEAR); | 374 | BUILD_BUG_ON(LOOPBACK_XAUI_WS_NEAR != MC_CMD_LOOPBACK_XAUI_WS_NEAR); |
375 | BUILD_BUG_ON(LOOPBACK_GMII_WS != MC_CMD_LOOPBACK_GMII_WS); | 375 | BUILD_BUG_ON(LOOPBACK_GMII_WS != MC_CMD_LOOPBACK_GMII_WS); |
376 | BUILD_BUG_ON(LOOPBACK_XFI_WS != MC_CMD_LOOPBACK_XFI_WS); | 376 | BUILD_BUG_ON(LOOPBACK_XFI_WS != MC_CMD_LOOPBACK_XFI_WS); |
377 | BUILD_BUG_ON(LOOPBACK_XFI_WS_FAR != MC_CMD_LOOPBACK_XFI_WS_FAR); | 377 | BUILD_BUG_ON(LOOPBACK_XFI_WS_FAR != MC_CMD_LOOPBACK_XFI_WS_FAR); |
378 | BUILD_BUG_ON(LOOPBACK_PHYXS_WS != MC_CMD_LOOPBACK_PHYXS_WS); | 378 | BUILD_BUG_ON(LOOPBACK_PHYXS_WS != MC_CMD_LOOPBACK_PHYXS_WS); |
379 | 379 | ||
380 | rc = efx_mcdi_loopback_modes(efx, &efx->loopback_modes); | 380 | rc = efx_mcdi_loopback_modes(efx, &efx->loopback_modes); |
381 | if (rc != 0) | 381 | if (rc != 0) |
382 | goto fail; | 382 | goto fail; |
383 | /* The MC indicates that LOOPBACK_NONE is a valid loopback mode, | 383 | /* The MC indicates that LOOPBACK_NONE is a valid loopback mode, |
384 | * but by convention we don't */ | 384 | * but by convention we don't */ |
385 | efx->loopback_modes &= ~(1 << LOOPBACK_NONE); | 385 | efx->loopback_modes &= ~(1 << LOOPBACK_NONE); |
386 | 386 | ||
387 | /* Set the initial link mode */ | 387 | /* Set the initial link mode */ |
388 | efx_mcdi_phy_decode_link( | 388 | efx_mcdi_phy_decode_link( |
389 | efx, &efx->link_state, | 389 | efx, &efx->link_state, |
390 | MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED), | 390 | MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED), |
391 | MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS), | 391 | MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS), |
392 | MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL)); | 392 | MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL)); |
393 | 393 | ||
394 | /* Default to Autonegotiated flow control if the PHY supports it */ | 394 | /* Default to Autonegotiated flow control if the PHY supports it */ |
395 | efx->wanted_fc = EFX_FC_RX | EFX_FC_TX; | 395 | efx->wanted_fc = EFX_FC_RX | EFX_FC_TX; |
396 | if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) | 396 | if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) |
397 | efx->wanted_fc |= EFX_FC_AUTO; | 397 | efx->wanted_fc |= EFX_FC_AUTO; |
398 | efx_link_set_wanted_fc(efx, efx->wanted_fc); | 398 | efx_link_set_wanted_fc(efx, efx->wanted_fc); |
399 | 399 | ||
400 | return 0; | 400 | return 0; |
401 | 401 | ||
402 | fail: | 402 | fail: |
403 | kfree(phy_data); | 403 | kfree(phy_data); |
404 | return rc; | 404 | return rc; |
405 | } | 405 | } |
406 | 406 | ||
407 | int efx_mcdi_phy_reconfigure(struct efx_nic *efx) | 407 | int efx_mcdi_phy_reconfigure(struct efx_nic *efx) |
408 | { | 408 | { |
409 | struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; | 409 | struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; |
410 | u32 caps = (efx->link_advertising ? | 410 | u32 caps = (efx->link_advertising ? |
411 | ethtool_to_mcdi_cap(efx->link_advertising) : | 411 | ethtool_to_mcdi_cap(efx->link_advertising) : |
412 | phy_cfg->forced_cap); | 412 | phy_cfg->forced_cap); |
413 | 413 | ||
414 | return efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx), | 414 | return efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx), |
415 | efx->loopback_mode, 0); | 415 | efx->loopback_mode, 0); |
416 | } | 416 | } |
417 | 417 | ||
418 | void efx_mcdi_phy_decode_link(struct efx_nic *efx, | 418 | void efx_mcdi_phy_decode_link(struct efx_nic *efx, |
419 | struct efx_link_state *link_state, | 419 | struct efx_link_state *link_state, |
420 | u32 speed, u32 flags, u32 fcntl) | 420 | u32 speed, u32 flags, u32 fcntl) |
421 | { | 421 | { |
422 | switch (fcntl) { | 422 | switch (fcntl) { |
423 | case MC_CMD_FCNTL_AUTO: | 423 | case MC_CMD_FCNTL_AUTO: |
424 | WARN_ON(1); /* This is not a link mode */ | 424 | WARN_ON(1); /* This is not a link mode */ |
425 | link_state->fc = EFX_FC_AUTO | EFX_FC_TX | EFX_FC_RX; | 425 | link_state->fc = EFX_FC_AUTO | EFX_FC_TX | EFX_FC_RX; |
426 | break; | 426 | break; |
427 | case MC_CMD_FCNTL_BIDIR: | 427 | case MC_CMD_FCNTL_BIDIR: |
428 | link_state->fc = EFX_FC_TX | EFX_FC_RX; | 428 | link_state->fc = EFX_FC_TX | EFX_FC_RX; |
429 | break; | 429 | break; |
430 | case MC_CMD_FCNTL_RESPOND: | 430 | case MC_CMD_FCNTL_RESPOND: |
431 | link_state->fc = EFX_FC_RX; | 431 | link_state->fc = EFX_FC_RX; |
432 | break; | 432 | break; |
433 | default: | 433 | default: |
434 | WARN_ON(1); | 434 | WARN_ON(1); |
435 | case MC_CMD_FCNTL_OFF: | 435 | case MC_CMD_FCNTL_OFF: |
436 | link_state->fc = 0; | 436 | link_state->fc = 0; |
437 | break; | 437 | break; |
438 | } | 438 | } |
439 | 439 | ||
440 | link_state->up = !!(flags & (1 << MC_CMD_GET_LINK_LINK_UP_LBN)); | 440 | link_state->up = !!(flags & (1 << MC_CMD_GET_LINK_LINK_UP_LBN)); |
441 | link_state->fd = !!(flags & (1 << MC_CMD_GET_LINK_FULL_DUPLEX_LBN)); | 441 | link_state->fd = !!(flags & (1 << MC_CMD_GET_LINK_FULL_DUPLEX_LBN)); |
442 | link_state->speed = speed; | 442 | link_state->speed = speed; |
443 | } | 443 | } |
444 | 444 | ||
445 | /* Verify that the forced flow control settings (!EFX_FC_AUTO) are | 445 | /* Verify that the forced flow control settings (!EFX_FC_AUTO) are |
446 | * supported by the link partner. Warn the user if this isn't the case | 446 | * supported by the link partner. Warn the user if this isn't the case |
447 | */ | 447 | */ |
448 | void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa) | 448 | void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa) |
449 | { | 449 | { |
450 | struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; | 450 | struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; |
451 | u32 rmtadv; | 451 | u32 rmtadv; |
452 | 452 | ||
453 | /* The link partner capabilities are only relevent if the | 453 | /* The link partner capabilities are only relevent if the |
454 | * link supports flow control autonegotiation */ | 454 | * link supports flow control autonegotiation */ |
455 | if (~phy_cfg->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) | 455 | if (~phy_cfg->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN)) |
456 | return; | 456 | return; |
457 | 457 | ||
458 | /* If flow control autoneg is supported and enabled, then fine */ | 458 | /* If flow control autoneg is supported and enabled, then fine */ |
459 | if (efx->wanted_fc & EFX_FC_AUTO) | 459 | if (efx->wanted_fc & EFX_FC_AUTO) |
460 | return; | 460 | return; |
461 | 461 | ||
462 | rmtadv = 0; | 462 | rmtadv = 0; |
463 | if (lpa & (1 << MC_CMD_PHY_CAP_PAUSE_LBN)) | 463 | if (lpa & (1 << MC_CMD_PHY_CAP_PAUSE_LBN)) |
464 | rmtadv |= ADVERTISED_Pause; | 464 | rmtadv |= ADVERTISED_Pause; |
465 | if (lpa & (1 << MC_CMD_PHY_CAP_ASYM_LBN)) | 465 | if (lpa & (1 << MC_CMD_PHY_CAP_ASYM_LBN)) |
466 | rmtadv |= ADVERTISED_Asym_Pause; | 466 | rmtadv |= ADVERTISED_Asym_Pause; |
467 | 467 | ||
468 | if ((efx->wanted_fc & EFX_FC_TX) && rmtadv == ADVERTISED_Asym_Pause) | 468 | if ((efx->wanted_fc & EFX_FC_TX) && rmtadv == ADVERTISED_Asym_Pause) |
469 | netif_err(efx, link, efx->net_dev, | 469 | netif_err(efx, link, efx->net_dev, |
470 | "warning: link partner doesn't support pause frames"); | 470 | "warning: link partner doesn't support pause frames"); |
471 | } | 471 | } |
472 | 472 | ||
473 | static bool efx_mcdi_phy_poll(struct efx_nic *efx) | 473 | static bool efx_mcdi_phy_poll(struct efx_nic *efx) |
474 | { | 474 | { |
475 | struct efx_link_state old_state = efx->link_state; | 475 | struct efx_link_state old_state = efx->link_state; |
476 | u8 outbuf[MC_CMD_GET_LINK_OUT_LEN]; | 476 | u8 outbuf[MC_CMD_GET_LINK_OUT_LEN]; |
477 | int rc; | 477 | int rc; |
478 | 478 | ||
479 | WARN_ON(!mutex_is_locked(&efx->mac_lock)); | 479 | WARN_ON(!mutex_is_locked(&efx->mac_lock)); |
480 | 480 | ||
481 | BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); | 481 | BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); |
482 | 482 | ||
483 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, | 483 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, |
484 | outbuf, sizeof(outbuf), NULL); | 484 | outbuf, sizeof(outbuf), NULL); |
485 | if (rc) { | 485 | if (rc) { |
486 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", | 486 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", |
487 | __func__, rc); | 487 | __func__, rc); |
488 | efx->link_state.up = false; | 488 | efx->link_state.up = false; |
489 | } else { | 489 | } else { |
490 | efx_mcdi_phy_decode_link( | 490 | efx_mcdi_phy_decode_link( |
491 | efx, &efx->link_state, | 491 | efx, &efx->link_state, |
492 | MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED), | 492 | MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED), |
493 | MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS), | 493 | MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS), |
494 | MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL)); | 494 | MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL)); |
495 | } | 495 | } |
496 | 496 | ||
497 | return !efx_link_state_equal(&efx->link_state, &old_state); | 497 | return !efx_link_state_equal(&efx->link_state, &old_state); |
498 | } | 498 | } |
499 | 499 | ||
500 | static void efx_mcdi_phy_remove(struct efx_nic *efx) | 500 | static void efx_mcdi_phy_remove(struct efx_nic *efx) |
501 | { | 501 | { |
502 | struct efx_mcdi_phy_data *phy_data = efx->phy_data; | 502 | struct efx_mcdi_phy_data *phy_data = efx->phy_data; |
503 | 503 | ||
504 | efx->phy_data = NULL; | 504 | efx->phy_data = NULL; |
505 | kfree(phy_data); | 505 | kfree(phy_data); |
506 | } | 506 | } |
507 | 507 | ||
508 | static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) | 508 | static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) |
509 | { | 509 | { |
510 | struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; | 510 | struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; |
511 | u8 outbuf[MC_CMD_GET_LINK_OUT_LEN]; | 511 | u8 outbuf[MC_CMD_GET_LINK_OUT_LEN]; |
512 | int rc; | 512 | int rc; |
513 | 513 | ||
514 | ecmd->supported = | 514 | ecmd->supported = |
515 | mcdi_to_ethtool_cap(phy_cfg->media, phy_cfg->supported_cap); | 515 | mcdi_to_ethtool_cap(phy_cfg->media, phy_cfg->supported_cap); |
516 | ecmd->advertising = efx->link_advertising; | 516 | ecmd->advertising = efx->link_advertising; |
517 | ecmd->speed = efx->link_state.speed; | 517 | ecmd->speed = efx->link_state.speed; |
518 | ecmd->duplex = efx->link_state.fd; | 518 | ecmd->duplex = efx->link_state.fd; |
519 | ecmd->port = mcdi_to_ethtool_media(phy_cfg->media); | 519 | ecmd->port = mcdi_to_ethtool_media(phy_cfg->media); |
520 | ecmd->phy_address = phy_cfg->port; | 520 | ecmd->phy_address = phy_cfg->port; |
521 | ecmd->transceiver = XCVR_INTERNAL; | 521 | ecmd->transceiver = XCVR_INTERNAL; |
522 | ecmd->autoneg = !!(efx->link_advertising & ADVERTISED_Autoneg); | 522 | ecmd->autoneg = !!(efx->link_advertising & ADVERTISED_Autoneg); |
523 | ecmd->mdio_support = (efx->mdio.mode_support & | 523 | ecmd->mdio_support = (efx->mdio.mode_support & |
524 | (MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22)); | 524 | (MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22)); |
525 | 525 | ||
526 | BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); | 526 | BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); |
527 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, | 527 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, |
528 | outbuf, sizeof(outbuf), NULL); | 528 | outbuf, sizeof(outbuf), NULL); |
529 | if (rc) { | 529 | if (rc) { |
530 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", | 530 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", |
531 | __func__, rc); | 531 | __func__, rc); |
532 | return; | 532 | return; |
533 | } | 533 | } |
534 | ecmd->lp_advertising = | 534 | ecmd->lp_advertising = |
535 | mcdi_to_ethtool_cap(phy_cfg->media, | 535 | mcdi_to_ethtool_cap(phy_cfg->media, |
536 | MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP)); | 536 | MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP)); |
537 | } | 537 | } |
538 | 538 | ||
539 | static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) | 539 | static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) |
540 | { | 540 | { |
541 | struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; | 541 | struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; |
542 | u32 caps; | 542 | u32 caps; |
543 | int rc; | 543 | int rc; |
544 | 544 | ||
545 | if (ecmd->autoneg) { | 545 | if (ecmd->autoneg) { |
546 | caps = (ethtool_to_mcdi_cap(ecmd->advertising) | | 546 | caps = (ethtool_to_mcdi_cap(ecmd->advertising) | |
547 | 1 << MC_CMD_PHY_CAP_AN_LBN); | 547 | 1 << MC_CMD_PHY_CAP_AN_LBN); |
548 | } else if (ecmd->duplex) { | 548 | } else if (ecmd->duplex) { |
549 | switch (ecmd->speed) { | 549 | switch (ecmd->speed) { |
550 | case 10: caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN; break; | 550 | case 10: caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN; break; |
551 | case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break; | 551 | case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break; |
552 | case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break; | 552 | case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break; |
553 | case 10000: caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN; break; | 553 | case 10000: caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN; break; |
554 | default: return -EINVAL; | 554 | default: return -EINVAL; |
555 | } | 555 | } |
556 | } else { | 556 | } else { |
557 | switch (ecmd->speed) { | 557 | switch (ecmd->speed) { |
558 | case 10: caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN; break; | 558 | case 10: caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN; break; |
559 | case 100: caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN; break; | 559 | case 100: caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN; break; |
560 | case 1000: caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN; break; | 560 | case 1000: caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN; break; |
561 | default: return -EINVAL; | 561 | default: return -EINVAL; |
562 | } | 562 | } |
563 | } | 563 | } |
564 | 564 | ||
565 | rc = efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx), | 565 | rc = efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx), |
566 | efx->loopback_mode, 0); | 566 | efx->loopback_mode, 0); |
567 | if (rc) | 567 | if (rc) |
568 | return rc; | 568 | return rc; |
569 | 569 | ||
570 | if (ecmd->autoneg) { | 570 | if (ecmd->autoneg) { |
571 | efx_link_set_advertising( | 571 | efx_link_set_advertising( |
572 | efx, ecmd->advertising | ADVERTISED_Autoneg); | 572 | efx, ecmd->advertising | ADVERTISED_Autoneg); |
573 | phy_cfg->forced_cap = 0; | 573 | phy_cfg->forced_cap = 0; |
574 | } else { | 574 | } else { |
575 | efx_link_set_advertising(efx, 0); | 575 | efx_link_set_advertising(efx, 0); |
576 | phy_cfg->forced_cap = caps; | 576 | phy_cfg->forced_cap = caps; |
577 | } | 577 | } |
578 | return 0; | 578 | return 0; |
579 | } | 579 | } |
580 | 580 | ||
581 | static int efx_mcdi_phy_test_alive(struct efx_nic *efx) | 581 | static int efx_mcdi_phy_test_alive(struct efx_nic *efx) |
582 | { | 582 | { |
583 | u8 outbuf[MC_CMD_GET_PHY_STATE_OUT_LEN]; | 583 | u8 outbuf[MC_CMD_GET_PHY_STATE_OUT_LEN]; |
584 | size_t outlen; | 584 | size_t outlen; |
585 | int rc; | 585 | int rc; |
586 | 586 | ||
587 | BUILD_BUG_ON(MC_CMD_GET_PHY_STATE_IN_LEN != 0); | 587 | BUILD_BUG_ON(MC_CMD_GET_PHY_STATE_IN_LEN != 0); |
588 | 588 | ||
589 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_STATE, NULL, 0, | 589 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_STATE, NULL, 0, |
590 | outbuf, sizeof(outbuf), &outlen); | 590 | outbuf, sizeof(outbuf), &outlen); |
591 | if (rc) | 591 | if (rc) |
592 | return rc; | 592 | return rc; |
593 | 593 | ||
594 | if (outlen < MC_CMD_GET_PHY_STATE_OUT_LEN) | 594 | if (outlen < MC_CMD_GET_PHY_STATE_OUT_LEN) |
595 | return -EIO; | 595 | return -EIO; |
596 | if (MCDI_DWORD(outbuf, GET_PHY_STATE_STATE) != MC_CMD_PHY_STATE_OK) | 596 | if (MCDI_DWORD(outbuf, GET_PHY_STATE_STATE) != MC_CMD_PHY_STATE_OK) |
597 | return -EINVAL; | 597 | return -EINVAL; |
598 | 598 | ||
599 | return 0; | 599 | return 0; |
600 | } | 600 | } |
601 | 601 | ||
602 | static const char *const mcdi_sft9001_cable_diag_names[] = { | 602 | static const char *const mcdi_sft9001_cable_diag_names[] = { |
603 | "cable.pairA.length", | 603 | "cable.pairA.length", |
604 | "cable.pairB.length", | 604 | "cable.pairB.length", |
605 | "cable.pairC.length", | 605 | "cable.pairC.length", |
606 | "cable.pairD.length", | 606 | "cable.pairD.length", |
607 | "cable.pairA.status", | 607 | "cable.pairA.status", |
608 | "cable.pairB.status", | 608 | "cable.pairB.status", |
609 | "cable.pairC.status", | 609 | "cable.pairC.status", |
610 | "cable.pairD.status", | 610 | "cable.pairD.status", |
611 | }; | 611 | }; |
612 | 612 | ||
613 | static int efx_mcdi_bist(struct efx_nic *efx, unsigned int bist_mode, | 613 | static int efx_mcdi_bist(struct efx_nic *efx, unsigned int bist_mode, |
614 | int *results) | 614 | int *results) |
615 | { | 615 | { |
616 | unsigned int retry, i, count = 0; | 616 | unsigned int retry, i, count = 0; |
617 | size_t outlen; | 617 | size_t outlen; |
618 | u32 status; | 618 | u32 status; |
619 | u8 *buf, *ptr; | 619 | u8 *buf, *ptr; |
620 | int rc; | 620 | int rc; |
621 | 621 | ||
622 | buf = kzalloc(0x100, GFP_KERNEL); | 622 | buf = kzalloc(0x100, GFP_KERNEL); |
623 | if (buf == NULL) | 623 | if (buf == NULL) |
624 | return -ENOMEM; | 624 | return -ENOMEM; |
625 | 625 | ||
626 | BUILD_BUG_ON(MC_CMD_START_BIST_OUT_LEN != 0); | 626 | BUILD_BUG_ON(MC_CMD_START_BIST_OUT_LEN != 0); |
627 | MCDI_SET_DWORD(buf, START_BIST_IN_TYPE, bist_mode); | 627 | MCDI_SET_DWORD(buf, START_BIST_IN_TYPE, bist_mode); |
628 | rc = efx_mcdi_rpc(efx, MC_CMD_START_BIST, buf, MC_CMD_START_BIST_IN_LEN, | 628 | rc = efx_mcdi_rpc(efx, MC_CMD_START_BIST, buf, MC_CMD_START_BIST_IN_LEN, |
629 | NULL, 0, NULL); | 629 | NULL, 0, NULL); |
630 | if (rc) | 630 | if (rc) |
631 | goto out; | 631 | goto out; |
632 | 632 | ||
633 | /* Wait up to 10s for BIST to finish */ | 633 | /* Wait up to 10s for BIST to finish */ |
634 | for (retry = 0; retry < 100; ++retry) { | 634 | for (retry = 0; retry < 100; ++retry) { |
635 | BUILD_BUG_ON(MC_CMD_POLL_BIST_IN_LEN != 0); | 635 | BUILD_BUG_ON(MC_CMD_POLL_BIST_IN_LEN != 0); |
636 | rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0, | 636 | rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0, |
637 | buf, 0x100, &outlen); | 637 | buf, 0x100, &outlen); |
638 | if (rc) | 638 | if (rc) |
639 | goto out; | 639 | goto out; |
640 | 640 | ||
641 | status = MCDI_DWORD(buf, POLL_BIST_OUT_RESULT); | 641 | status = MCDI_DWORD(buf, POLL_BIST_OUT_RESULT); |
642 | if (status != MC_CMD_POLL_BIST_RUNNING) | 642 | if (status != MC_CMD_POLL_BIST_RUNNING) |
643 | goto finished; | 643 | goto finished; |
644 | 644 | ||
645 | msleep(100); | 645 | msleep(100); |
646 | } | 646 | } |
647 | 647 | ||
648 | rc = -ETIMEDOUT; | 648 | rc = -ETIMEDOUT; |
649 | goto out; | 649 | goto out; |
650 | 650 | ||
651 | finished: | 651 | finished: |
652 | results[count++] = (status == MC_CMD_POLL_BIST_PASSED) ? 1 : -1; | 652 | results[count++] = (status == MC_CMD_POLL_BIST_PASSED) ? 1 : -1; |
653 | 653 | ||
654 | /* SFT9001 specific cable diagnostics output */ | 654 | /* SFT9001 specific cable diagnostics output */ |
655 | if (efx->phy_type == PHY_TYPE_SFT9001B && | 655 | if (efx->phy_type == PHY_TYPE_SFT9001B && |
656 | (bist_mode == MC_CMD_PHY_BIST_CABLE_SHORT || | 656 | (bist_mode == MC_CMD_PHY_BIST_CABLE_SHORT || |
657 | bist_mode == MC_CMD_PHY_BIST_CABLE_LONG)) { | 657 | bist_mode == MC_CMD_PHY_BIST_CABLE_LONG)) { |
658 | ptr = MCDI_PTR(buf, POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A); | 658 | ptr = MCDI_PTR(buf, POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A); |
659 | if (status == MC_CMD_POLL_BIST_PASSED && | 659 | if (status == MC_CMD_POLL_BIST_PASSED && |
660 | outlen >= MC_CMD_POLL_BIST_OUT_SFT9001_LEN) { | 660 | outlen >= MC_CMD_POLL_BIST_OUT_SFT9001_LEN) { |
661 | for (i = 0; i < 8; i++) { | 661 | for (i = 0; i < 8; i++) { |
662 | results[count + i] = | 662 | results[count + i] = |
663 | EFX_DWORD_FIELD(((efx_dword_t *)ptr)[i], | 663 | EFX_DWORD_FIELD(((efx_dword_t *)ptr)[i], |
664 | EFX_DWORD_0); | 664 | EFX_DWORD_0); |
665 | } | 665 | } |
666 | } | 666 | } |
667 | count += 8; | 667 | count += 8; |
668 | } | 668 | } |
669 | rc = count; | 669 | rc = count; |
670 | 670 | ||
671 | out: | 671 | out: |
672 | kfree(buf); | 672 | kfree(buf); |
673 | 673 | ||
674 | return rc; | 674 | return rc; |
675 | } | 675 | } |
676 | 676 | ||
677 | static int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results, | 677 | static int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results, |
678 | unsigned flags) | 678 | unsigned flags) |
679 | { | 679 | { |
680 | struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; | 680 | struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; |
681 | u32 mode; | 681 | u32 mode; |
682 | int rc; | 682 | int rc; |
683 | 683 | ||
684 | if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_LBN)) { | 684 | if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_LBN)) { |
685 | rc = efx_mcdi_bist(efx, MC_CMD_PHY_BIST, results); | 685 | rc = efx_mcdi_bist(efx, MC_CMD_PHY_BIST, results); |
686 | if (rc < 0) | 686 | if (rc < 0) |
687 | return rc; | 687 | return rc; |
688 | 688 | ||
689 | results += rc; | 689 | results += rc; |
690 | } | 690 | } |
691 | 691 | ||
692 | /* If we support both LONG and SHORT, then run each in response to | 692 | /* If we support both LONG and SHORT, then run each in response to |
693 | * break or not. Otherwise, run the one we support */ | 693 | * break or not. Otherwise, run the one we support */ |
694 | mode = 0; | 694 | mode = 0; |
695 | if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_LBN)) { | 695 | if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_LBN)) { |
696 | if ((flags & ETH_TEST_FL_OFFLINE) && | 696 | if ((flags & ETH_TEST_FL_OFFLINE) && |
697 | (phy_cfg->flags & | 697 | (phy_cfg->flags & |
698 | (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN))) | 698 | (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN))) |
699 | mode = MC_CMD_PHY_BIST_CABLE_LONG; | 699 | mode = MC_CMD_PHY_BIST_CABLE_LONG; |
700 | else | 700 | else |
701 | mode = MC_CMD_PHY_BIST_CABLE_SHORT; | 701 | mode = MC_CMD_PHY_BIST_CABLE_SHORT; |
702 | } else if (phy_cfg->flags & | 702 | } else if (phy_cfg->flags & |
703 | (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN)) | 703 | (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN)) |
704 | mode = MC_CMD_PHY_BIST_CABLE_LONG; | 704 | mode = MC_CMD_PHY_BIST_CABLE_LONG; |
705 | 705 | ||
706 | if (mode != 0) { | 706 | if (mode != 0) { |
707 | rc = efx_mcdi_bist(efx, mode, results); | 707 | rc = efx_mcdi_bist(efx, mode, results); |
708 | if (rc < 0) | 708 | if (rc < 0) |
709 | return rc; | 709 | return rc; |
710 | results += rc; | 710 | results += rc; |
711 | } | 711 | } |
712 | 712 | ||
713 | return 0; | 713 | return 0; |
714 | } | 714 | } |
715 | 715 | ||
716 | const char *efx_mcdi_phy_test_name(struct efx_nic *efx, unsigned int index) | 716 | static const char *efx_mcdi_phy_test_name(struct efx_nic *efx, |
717 | unsigned int index) | ||
717 | { | 718 | { |
718 | struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; | 719 | struct efx_mcdi_phy_data *phy_cfg = efx->phy_data; |
719 | 720 | ||
720 | if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_LBN)) { | 721 | if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_LBN)) { |
721 | if (index == 0) | 722 | if (index == 0) |
722 | return "bist"; | 723 | return "bist"; |
723 | --index; | 724 | --index; |
724 | } | 725 | } |
725 | 726 | ||
726 | if (phy_cfg->flags & ((1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_LBN) | | 727 | if (phy_cfg->flags & ((1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_LBN) | |
727 | (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN))) { | 728 | (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN))) { |
728 | if (index == 0) | 729 | if (index == 0) |
729 | return "cable"; | 730 | return "cable"; |
730 | --index; | 731 | --index; |
731 | 732 | ||
732 | if (efx->phy_type == PHY_TYPE_SFT9001B) { | 733 | if (efx->phy_type == PHY_TYPE_SFT9001B) { |
733 | if (index < ARRAY_SIZE(mcdi_sft9001_cable_diag_names)) | 734 | if (index < ARRAY_SIZE(mcdi_sft9001_cable_diag_names)) |
734 | return mcdi_sft9001_cable_diag_names[index]; | 735 | return mcdi_sft9001_cable_diag_names[index]; |
735 | index -= ARRAY_SIZE(mcdi_sft9001_cable_diag_names); | 736 | index -= ARRAY_SIZE(mcdi_sft9001_cable_diag_names); |
736 | } | 737 | } |
737 | } | 738 | } |
738 | 739 | ||
739 | return NULL; | 740 | return NULL; |
740 | } | 741 | } |
741 | 742 | ||
742 | struct efx_phy_operations efx_mcdi_phy_ops = { | 743 | struct efx_phy_operations efx_mcdi_phy_ops = { |
743 | .probe = efx_mcdi_phy_probe, | 744 | .probe = efx_mcdi_phy_probe, |
744 | .init = efx_port_dummy_op_int, | 745 | .init = efx_port_dummy_op_int, |
745 | .reconfigure = efx_mcdi_phy_reconfigure, | 746 | .reconfigure = efx_mcdi_phy_reconfigure, |
746 | .poll = efx_mcdi_phy_poll, | 747 | .poll = efx_mcdi_phy_poll, |
747 | .fini = efx_port_dummy_op_void, | 748 | .fini = efx_port_dummy_op_void, |
748 | .remove = efx_mcdi_phy_remove, | 749 | .remove = efx_mcdi_phy_remove, |
749 | .get_settings = efx_mcdi_phy_get_settings, | 750 | .get_settings = efx_mcdi_phy_get_settings, |
750 | .set_settings = efx_mcdi_phy_set_settings, | 751 | .set_settings = efx_mcdi_phy_set_settings, |
751 | .test_alive = efx_mcdi_phy_test_alive, | 752 | .test_alive = efx_mcdi_phy_test_alive, |
752 | .run_tests = efx_mcdi_phy_run_tests, | 753 | .run_tests = efx_mcdi_phy_run_tests, |
753 | .test_name = efx_mcdi_phy_test_name, | 754 | .test_name = efx_mcdi_phy_test_name, |
754 | }; | 755 | }; |
755 | 756 |
drivers/net/sfc/net_driver.h
1 | /**************************************************************************** | 1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | 2 | * Driver for Solarflare Solarstorm network controllers and boards |
3 | * Copyright 2005-2006 Fen Systems Ltd. | 3 | * Copyright 2005-2006 Fen Systems Ltd. |
4 | * Copyright 2005-2009 Solarflare Communications Inc. | 4 | * Copyright 2005-2009 Solarflare Communications Inc. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License version 2 as published | 7 | * under the terms of the GNU General Public License version 2 as published |
8 | * by the Free Software Foundation, incorporated herein by reference. | 8 | * by the Free Software Foundation, incorporated herein by reference. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | /* Common definitions for all Efx net driver code */ | 11 | /* Common definitions for all Efx net driver code */ |
12 | 12 | ||
13 | #ifndef EFX_NET_DRIVER_H | 13 | #ifndef EFX_NET_DRIVER_H |
14 | #define EFX_NET_DRIVER_H | 14 | #define EFX_NET_DRIVER_H |
15 | 15 | ||
16 | #if defined(EFX_ENABLE_DEBUG) && !defined(DEBUG) | 16 | #if defined(EFX_ENABLE_DEBUG) && !defined(DEBUG) |
17 | #define DEBUG | 17 | #define DEBUG |
18 | #endif | 18 | #endif |
19 | 19 | ||
20 | #include <linux/version.h> | 20 | #include <linux/version.h> |
21 | #include <linux/netdevice.h> | 21 | #include <linux/netdevice.h> |
22 | #include <linux/etherdevice.h> | 22 | #include <linux/etherdevice.h> |
23 | #include <linux/ethtool.h> | 23 | #include <linux/ethtool.h> |
24 | #include <linux/if_vlan.h> | 24 | #include <linux/if_vlan.h> |
25 | #include <linux/timer.h> | 25 | #include <linux/timer.h> |
26 | #include <linux/mdio.h> | 26 | #include <linux/mdio.h> |
27 | #include <linux/list.h> | 27 | #include <linux/list.h> |
28 | #include <linux/pci.h> | 28 | #include <linux/pci.h> |
29 | #include <linux/device.h> | 29 | #include <linux/device.h> |
30 | #include <linux/highmem.h> | 30 | #include <linux/highmem.h> |
31 | #include <linux/workqueue.h> | 31 | #include <linux/workqueue.h> |
32 | #include <linux/vmalloc.h> | 32 | #include <linux/vmalloc.h> |
33 | #include <linux/i2c.h> | 33 | #include <linux/i2c.h> |
34 | 34 | ||
35 | #include "enum.h" | 35 | #include "enum.h" |
36 | #include "bitfield.h" | 36 | #include "bitfield.h" |
37 | 37 | ||
38 | /************************************************************************** | 38 | /************************************************************************** |
39 | * | 39 | * |
40 | * Build definitions | 40 | * Build definitions |
41 | * | 41 | * |
42 | **************************************************************************/ | 42 | **************************************************************************/ |
43 | 43 | ||
44 | #define EFX_DRIVER_VERSION "3.0" | 44 | #define EFX_DRIVER_VERSION "3.0" |
45 | 45 | ||
46 | #ifdef EFX_ENABLE_DEBUG | 46 | #ifdef EFX_ENABLE_DEBUG |
47 | #define EFX_BUG_ON_PARANOID(x) BUG_ON(x) | 47 | #define EFX_BUG_ON_PARANOID(x) BUG_ON(x) |
48 | #define EFX_WARN_ON_PARANOID(x) WARN_ON(x) | 48 | #define EFX_WARN_ON_PARANOID(x) WARN_ON(x) |
49 | #else | 49 | #else |
50 | #define EFX_BUG_ON_PARANOID(x) do {} while (0) | 50 | #define EFX_BUG_ON_PARANOID(x) do {} while (0) |
51 | #define EFX_WARN_ON_PARANOID(x) do {} while (0) | 51 | #define EFX_WARN_ON_PARANOID(x) do {} while (0) |
52 | #endif | 52 | #endif |
53 | 53 | ||
54 | /************************************************************************** | 54 | /************************************************************************** |
55 | * | 55 | * |
56 | * Efx data structures | 56 | * Efx data structures |
57 | * | 57 | * |
58 | **************************************************************************/ | 58 | **************************************************************************/ |
59 | 59 | ||
60 | #define EFX_MAX_CHANNELS 32 | 60 | #define EFX_MAX_CHANNELS 32 |
61 | #define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS | 61 | #define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS |
62 | 62 | ||
63 | /* Checksum generation is a per-queue option in hardware, so each | 63 | /* Checksum generation is a per-queue option in hardware, so each |
64 | * queue visible to the networking core is backed by two hardware TX | 64 | * queue visible to the networking core is backed by two hardware TX |
65 | * queues. */ | 65 | * queues. */ |
66 | #define EFX_MAX_CORE_TX_QUEUES EFX_MAX_CHANNELS | 66 | #define EFX_MAX_CORE_TX_QUEUES EFX_MAX_CHANNELS |
67 | #define EFX_TXQ_TYPE_OFFLOAD 1 | 67 | #define EFX_TXQ_TYPE_OFFLOAD 1 |
68 | #define EFX_TXQ_TYPES 2 | 68 | #define EFX_TXQ_TYPES 2 |
69 | #define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CORE_TX_QUEUES) | 69 | #define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CORE_TX_QUEUES) |
70 | 70 | ||
71 | /** | 71 | /** |
72 | * struct efx_special_buffer - An Efx special buffer | 72 | * struct efx_special_buffer - An Efx special buffer |
73 | * @addr: CPU base address of the buffer | 73 | * @addr: CPU base address of the buffer |
74 | * @dma_addr: DMA base address of the buffer | 74 | * @dma_addr: DMA base address of the buffer |
75 | * @len: Buffer length, in bytes | 75 | * @len: Buffer length, in bytes |
76 | * @index: Buffer index within controller;s buffer table | 76 | * @index: Buffer index within controller;s buffer table |
77 | * @entries: Number of buffer table entries | 77 | * @entries: Number of buffer table entries |
78 | * | 78 | * |
79 | * Special buffers are used for the event queues and the TX and RX | 79 | * Special buffers are used for the event queues and the TX and RX |
80 | * descriptor queues for each channel. They are *not* used for the | 80 | * descriptor queues for each channel. They are *not* used for the |
81 | * actual transmit and receive buffers. | 81 | * actual transmit and receive buffers. |
82 | */ | 82 | */ |
83 | struct efx_special_buffer { | 83 | struct efx_special_buffer { |
84 | void *addr; | 84 | void *addr; |
85 | dma_addr_t dma_addr; | 85 | dma_addr_t dma_addr; |
86 | unsigned int len; | 86 | unsigned int len; |
87 | int index; | 87 | int index; |
88 | int entries; | 88 | int entries; |
89 | }; | 89 | }; |
90 | 90 | ||
91 | enum efx_flush_state { | 91 | enum efx_flush_state { |
92 | FLUSH_NONE, | 92 | FLUSH_NONE, |
93 | FLUSH_PENDING, | 93 | FLUSH_PENDING, |
94 | FLUSH_FAILED, | 94 | FLUSH_FAILED, |
95 | FLUSH_DONE, | 95 | FLUSH_DONE, |
96 | }; | 96 | }; |
97 | 97 | ||
98 | /** | 98 | /** |
99 | * struct efx_tx_buffer - An Efx TX buffer | 99 | * struct efx_tx_buffer - An Efx TX buffer |
100 | * @skb: The associated socket buffer. | 100 | * @skb: The associated socket buffer. |
101 | * Set only on the final fragment of a packet; %NULL for all other | 101 | * Set only on the final fragment of a packet; %NULL for all other |
102 | * fragments. When this fragment completes, then we can free this | 102 | * fragments. When this fragment completes, then we can free this |
103 | * skb. | 103 | * skb. |
104 | * @tsoh: The associated TSO header structure, or %NULL if this | 104 | * @tsoh: The associated TSO header structure, or %NULL if this |
105 | * buffer is not a TSO header. | 105 | * buffer is not a TSO header. |
106 | * @dma_addr: DMA address of the fragment. | 106 | * @dma_addr: DMA address of the fragment. |
107 | * @len: Length of this fragment. | 107 | * @len: Length of this fragment. |
108 | * This field is zero when the queue slot is empty. | 108 | * This field is zero when the queue slot is empty. |
109 | * @continuation: True if this fragment is not the end of a packet. | 109 | * @continuation: True if this fragment is not the end of a packet. |
110 | * @unmap_single: True if pci_unmap_single should be used. | 110 | * @unmap_single: True if pci_unmap_single should be used. |
111 | * @unmap_len: Length of this fragment to unmap | 111 | * @unmap_len: Length of this fragment to unmap |
112 | */ | 112 | */ |
113 | struct efx_tx_buffer { | 113 | struct efx_tx_buffer { |
114 | const struct sk_buff *skb; | 114 | const struct sk_buff *skb; |
115 | struct efx_tso_header *tsoh; | 115 | struct efx_tso_header *tsoh; |
116 | dma_addr_t dma_addr; | 116 | dma_addr_t dma_addr; |
117 | unsigned short len; | 117 | unsigned short len; |
118 | bool continuation; | 118 | bool continuation; |
119 | bool unmap_single; | 119 | bool unmap_single; |
120 | unsigned short unmap_len; | 120 | unsigned short unmap_len; |
121 | }; | 121 | }; |
122 | 122 | ||
123 | /** | 123 | /** |
124 | * struct efx_tx_queue - An Efx TX queue | 124 | * struct efx_tx_queue - An Efx TX queue |
125 | * | 125 | * |
126 | * This is a ring buffer of TX fragments. | 126 | * This is a ring buffer of TX fragments. |
127 | * Since the TX completion path always executes on the same | 127 | * Since the TX completion path always executes on the same |
128 | * CPU and the xmit path can operate on different CPUs, | 128 | * CPU and the xmit path can operate on different CPUs, |
129 | * performance is increased by ensuring that the completion | 129 | * performance is increased by ensuring that the completion |
130 | * path and the xmit path operate on different cache lines. | 130 | * path and the xmit path operate on different cache lines. |
131 | * This is particularly important if the xmit path is always | 131 | * This is particularly important if the xmit path is always |
132 | * executing on one CPU which is different from the completion | 132 | * executing on one CPU which is different from the completion |
133 | * path. There is also a cache line for members which are | 133 | * path. There is also a cache line for members which are |
134 | * read but not written on the fast path. | 134 | * read but not written on the fast path. |
135 | * | 135 | * |
136 | * @efx: The associated Efx NIC | 136 | * @efx: The associated Efx NIC |
137 | * @queue: DMA queue number | 137 | * @queue: DMA queue number |
138 | * @channel: The associated channel | 138 | * @channel: The associated channel |
139 | * @buffer: The software buffer ring | 139 | * @buffer: The software buffer ring |
140 | * @txd: The hardware descriptor ring | 140 | * @txd: The hardware descriptor ring |
141 | * @ptr_mask: The size of the ring minus 1. | 141 | * @ptr_mask: The size of the ring minus 1. |
142 | * @flushed: Used when handling queue flushing | 142 | * @flushed: Used when handling queue flushing |
143 | * @read_count: Current read pointer. | 143 | * @read_count: Current read pointer. |
144 | * This is the number of buffers that have been removed from both rings. | 144 | * This is the number of buffers that have been removed from both rings. |
145 | * @stopped: Stopped count. | 145 | * @stopped: Stopped count. |
146 | * Set if this TX queue is currently stopping its port. | 146 | * Set if this TX queue is currently stopping its port. |
147 | * @insert_count: Current insert pointer | 147 | * @insert_count: Current insert pointer |
148 | * This is the number of buffers that have been added to the | 148 | * This is the number of buffers that have been added to the |
149 | * software ring. | 149 | * software ring. |
150 | * @write_count: Current write pointer | 150 | * @write_count: Current write pointer |
151 | * This is the number of buffers that have been added to the | 151 | * This is the number of buffers that have been added to the |
152 | * hardware ring. | 152 | * hardware ring. |
153 | * @old_read_count: The value of read_count when last checked. | 153 | * @old_read_count: The value of read_count when last checked. |
154 | * This is here for performance reasons. The xmit path will | 154 | * This is here for performance reasons. The xmit path will |
155 | * only get the up-to-date value of read_count if this | 155 | * only get the up-to-date value of read_count if this |
156 | * variable indicates that the queue is full. This is to | 156 | * variable indicates that the queue is full. This is to |
157 | * avoid cache-line ping-pong between the xmit path and the | 157 | * avoid cache-line ping-pong between the xmit path and the |
158 | * completion path. | 158 | * completion path. |
159 | * @tso_headers_free: A list of TSO headers allocated for this TX queue | 159 | * @tso_headers_free: A list of TSO headers allocated for this TX queue |
160 | * that are not in use, and so available for new TSO sends. The list | 160 | * that are not in use, and so available for new TSO sends. The list |
161 | * is protected by the TX queue lock. | 161 | * is protected by the TX queue lock. |
162 | * @tso_bursts: Number of times TSO xmit invoked by kernel | 162 | * @tso_bursts: Number of times TSO xmit invoked by kernel |
163 | * @tso_long_headers: Number of packets with headers too long for standard | 163 | * @tso_long_headers: Number of packets with headers too long for standard |
164 | * blocks | 164 | * blocks |
165 | * @tso_packets: Number of packets via the TSO xmit path | 165 | * @tso_packets: Number of packets via the TSO xmit path |
166 | */ | 166 | */ |
167 | struct efx_tx_queue { | 167 | struct efx_tx_queue { |
168 | /* Members which don't change on the fast path */ | 168 | /* Members which don't change on the fast path */ |
169 | struct efx_nic *efx ____cacheline_aligned_in_smp; | 169 | struct efx_nic *efx ____cacheline_aligned_in_smp; |
170 | unsigned queue; | 170 | unsigned queue; |
171 | struct efx_channel *channel; | 171 | struct efx_channel *channel; |
172 | struct efx_nic *nic; | 172 | struct efx_nic *nic; |
173 | struct efx_tx_buffer *buffer; | 173 | struct efx_tx_buffer *buffer; |
174 | struct efx_special_buffer txd; | 174 | struct efx_special_buffer txd; |
175 | unsigned int ptr_mask; | 175 | unsigned int ptr_mask; |
176 | enum efx_flush_state flushed; | 176 | enum efx_flush_state flushed; |
177 | 177 | ||
178 | /* Members used mainly on the completion path */ | 178 | /* Members used mainly on the completion path */ |
179 | unsigned int read_count ____cacheline_aligned_in_smp; | 179 | unsigned int read_count ____cacheline_aligned_in_smp; |
180 | int stopped; | 180 | int stopped; |
181 | 181 | ||
182 | /* Members used only on the xmit path */ | 182 | /* Members used only on the xmit path */ |
183 | unsigned int insert_count ____cacheline_aligned_in_smp; | 183 | unsigned int insert_count ____cacheline_aligned_in_smp; |
184 | unsigned int write_count; | 184 | unsigned int write_count; |
185 | unsigned int old_read_count; | 185 | unsigned int old_read_count; |
186 | struct efx_tso_header *tso_headers_free; | 186 | struct efx_tso_header *tso_headers_free; |
187 | unsigned int tso_bursts; | 187 | unsigned int tso_bursts; |
188 | unsigned int tso_long_headers; | 188 | unsigned int tso_long_headers; |
189 | unsigned int tso_packets; | 189 | unsigned int tso_packets; |
190 | }; | 190 | }; |
191 | 191 | ||
192 | /** | 192 | /** |
193 | * struct efx_rx_buffer - An Efx RX data buffer | 193 | * struct efx_rx_buffer - An Efx RX data buffer |
194 | * @dma_addr: DMA base address of the buffer | 194 | * @dma_addr: DMA base address of the buffer |
195 | * @skb: The associated socket buffer, if any. | 195 | * @skb: The associated socket buffer, if any. |
196 | * If both this and page are %NULL, the buffer slot is currently free. | 196 | * If both this and page are %NULL, the buffer slot is currently free. |
197 | * @page: The associated page buffer, if any. | 197 | * @page: The associated page buffer, if any. |
198 | * If both this and skb are %NULL, the buffer slot is currently free. | 198 | * If both this and skb are %NULL, the buffer slot is currently free. |
199 | * @data: Pointer to ethernet header | 199 | * @data: Pointer to ethernet header |
200 | * @len: Buffer length, in bytes. | 200 | * @len: Buffer length, in bytes. |
201 | */ | 201 | */ |
202 | struct efx_rx_buffer { | 202 | struct efx_rx_buffer { |
203 | dma_addr_t dma_addr; | 203 | dma_addr_t dma_addr; |
204 | struct sk_buff *skb; | 204 | struct sk_buff *skb; |
205 | struct page *page; | 205 | struct page *page; |
206 | char *data; | 206 | char *data; |
207 | unsigned int len; | 207 | unsigned int len; |
208 | }; | 208 | }; |
209 | 209 | ||
210 | /** | 210 | /** |
211 | * struct efx_rx_page_state - Page-based rx buffer state | 211 | * struct efx_rx_page_state - Page-based rx buffer state |
212 | * | 212 | * |
213 | * Inserted at the start of every page allocated for receive buffers. | 213 | * Inserted at the start of every page allocated for receive buffers. |
214 | * Used to facilitate sharing dma mappings between recycled rx buffers | 214 | * Used to facilitate sharing dma mappings between recycled rx buffers |
215 | * and those passed up to the kernel. | 215 | * and those passed up to the kernel. |
216 | * | 216 | * |
217 | * @refcnt: Number of struct efx_rx_buffer's referencing this page. | 217 | * @refcnt: Number of struct efx_rx_buffer's referencing this page. |
218 | * When refcnt falls to zero, the page is unmapped for dma | 218 | * When refcnt falls to zero, the page is unmapped for dma |
219 | * @dma_addr: The dma address of this page. | 219 | * @dma_addr: The dma address of this page. |
220 | */ | 220 | */ |
221 | struct efx_rx_page_state { | 221 | struct efx_rx_page_state { |
222 | unsigned refcnt; | 222 | unsigned refcnt; |
223 | dma_addr_t dma_addr; | 223 | dma_addr_t dma_addr; |
224 | 224 | ||
225 | unsigned int __pad[0] ____cacheline_aligned; | 225 | unsigned int __pad[0] ____cacheline_aligned; |
226 | }; | 226 | }; |
227 | 227 | ||
228 | /** | 228 | /** |
229 | * struct efx_rx_queue - An Efx RX queue | 229 | * struct efx_rx_queue - An Efx RX queue |
230 | * @efx: The associated Efx NIC | 230 | * @efx: The associated Efx NIC |
231 | * @buffer: The software buffer ring | 231 | * @buffer: The software buffer ring |
232 | * @rxd: The hardware descriptor ring | 232 | * @rxd: The hardware descriptor ring |
233 | * @ptr_mask: The size of the ring minus 1. | 233 | * @ptr_mask: The size of the ring minus 1. |
234 | * @added_count: Number of buffers added to the receive queue. | 234 | * @added_count: Number of buffers added to the receive queue. |
235 | * @notified_count: Number of buffers given to NIC (<= @added_count). | 235 | * @notified_count: Number of buffers given to NIC (<= @added_count). |
236 | * @removed_count: Number of buffers removed from the receive queue. | 236 | * @removed_count: Number of buffers removed from the receive queue. |
237 | * @max_fill: RX descriptor maximum fill level (<= ring size) | 237 | * @max_fill: RX descriptor maximum fill level (<= ring size) |
238 | * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill | 238 | * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill |
239 | * (<= @max_fill) | 239 | * (<= @max_fill) |
240 | * @fast_fill_limit: The level to which a fast fill will fill | 240 | * @fast_fill_limit: The level to which a fast fill will fill |
241 | * (@fast_fill_trigger <= @fast_fill_limit <= @max_fill) | 241 | * (@fast_fill_trigger <= @fast_fill_limit <= @max_fill) |
242 | * @min_fill: RX descriptor minimum non-zero fill level. | 242 | * @min_fill: RX descriptor minimum non-zero fill level. |
243 | * This records the minimum fill level observed when a ring | 243 | * This records the minimum fill level observed when a ring |
244 | * refill was triggered. | 244 | * refill was triggered. |
245 | * @alloc_page_count: RX allocation strategy counter. | 245 | * @alloc_page_count: RX allocation strategy counter. |
246 | * @alloc_skb_count: RX allocation strategy counter. | 246 | * @alloc_skb_count: RX allocation strategy counter. |
247 | * @slow_fill: Timer used to defer efx_nic_generate_fill_event(). | 247 | * @slow_fill: Timer used to defer efx_nic_generate_fill_event(). |
248 | * @flushed: Use when handling queue flushing | 248 | * @flushed: Use when handling queue flushing |
249 | */ | 249 | */ |
250 | struct efx_rx_queue { | 250 | struct efx_rx_queue { |
251 | struct efx_nic *efx; | 251 | struct efx_nic *efx; |
252 | struct efx_rx_buffer *buffer; | 252 | struct efx_rx_buffer *buffer; |
253 | struct efx_special_buffer rxd; | 253 | struct efx_special_buffer rxd; |
254 | unsigned int ptr_mask; | 254 | unsigned int ptr_mask; |
255 | 255 | ||
256 | int added_count; | 256 | int added_count; |
257 | int notified_count; | 257 | int notified_count; |
258 | int removed_count; | 258 | int removed_count; |
259 | unsigned int max_fill; | 259 | unsigned int max_fill; |
260 | unsigned int fast_fill_trigger; | 260 | unsigned int fast_fill_trigger; |
261 | unsigned int fast_fill_limit; | 261 | unsigned int fast_fill_limit; |
262 | unsigned int min_fill; | 262 | unsigned int min_fill; |
263 | unsigned int min_overfill; | 263 | unsigned int min_overfill; |
264 | unsigned int alloc_page_count; | 264 | unsigned int alloc_page_count; |
265 | unsigned int alloc_skb_count; | 265 | unsigned int alloc_skb_count; |
266 | struct timer_list slow_fill; | 266 | struct timer_list slow_fill; |
267 | unsigned int slow_fill_count; | 267 | unsigned int slow_fill_count; |
268 | 268 | ||
269 | enum efx_flush_state flushed; | 269 | enum efx_flush_state flushed; |
270 | }; | 270 | }; |
271 | 271 | ||
272 | /** | 272 | /** |
273 | * struct efx_buffer - An Efx general-purpose buffer | 273 | * struct efx_buffer - An Efx general-purpose buffer |
274 | * @addr: host base address of the buffer | 274 | * @addr: host base address of the buffer |
275 | * @dma_addr: DMA base address of the buffer | 275 | * @dma_addr: DMA base address of the buffer |
276 | * @len: Buffer length, in bytes | 276 | * @len: Buffer length, in bytes |
277 | * | 277 | * |
278 | * The NIC uses these buffers for its interrupt status registers and | 278 | * The NIC uses these buffers for its interrupt status registers and |
279 | * MAC stats dumps. | 279 | * MAC stats dumps. |
280 | */ | 280 | */ |
281 | struct efx_buffer { | 281 | struct efx_buffer { |
282 | void *addr; | 282 | void *addr; |
283 | dma_addr_t dma_addr; | 283 | dma_addr_t dma_addr; |
284 | unsigned int len; | 284 | unsigned int len; |
285 | }; | 285 | }; |
286 | 286 | ||
287 | 287 | ||
288 | enum efx_rx_alloc_method { | 288 | enum efx_rx_alloc_method { |
289 | RX_ALLOC_METHOD_AUTO = 0, | 289 | RX_ALLOC_METHOD_AUTO = 0, |
290 | RX_ALLOC_METHOD_SKB = 1, | 290 | RX_ALLOC_METHOD_SKB = 1, |
291 | RX_ALLOC_METHOD_PAGE = 2, | 291 | RX_ALLOC_METHOD_PAGE = 2, |
292 | }; | 292 | }; |
293 | 293 | ||
294 | /** | 294 | /** |
295 | * struct efx_channel - An Efx channel | 295 | * struct efx_channel - An Efx channel |
296 | * | 296 | * |
297 | * A channel comprises an event queue, at least one TX queue, at least | 297 | * A channel comprises an event queue, at least one TX queue, at least |
298 | * one RX queue, and an associated tasklet for processing the event | 298 | * one RX queue, and an associated tasklet for processing the event |
299 | * queue. | 299 | * queue. |
300 | * | 300 | * |
301 | * @efx: Associated Efx NIC | 301 | * @efx: Associated Efx NIC |
302 | * @channel: Channel instance number | 302 | * @channel: Channel instance number |
303 | * @enabled: Channel enabled indicator | 303 | * @enabled: Channel enabled indicator |
304 | * @irq: IRQ number (MSI and MSI-X only) | 304 | * @irq: IRQ number (MSI and MSI-X only) |
305 | * @irq_moderation: IRQ moderation value (in hardware ticks) | 305 | * @irq_moderation: IRQ moderation value (in hardware ticks) |
306 | * @napi_dev: Net device used with NAPI | 306 | * @napi_dev: Net device used with NAPI |
307 | * @napi_str: NAPI control structure | 307 | * @napi_str: NAPI control structure |
308 | * @reset_work: Scheduled reset work thread | 308 | * @reset_work: Scheduled reset work thread |
309 | * @work_pending: Is work pending via NAPI? | 309 | * @work_pending: Is work pending via NAPI? |
310 | * @eventq: Event queue buffer | 310 | * @eventq: Event queue buffer |
311 | * @eventq_mask: Event queue pointer mask | 311 | * @eventq_mask: Event queue pointer mask |
312 | * @eventq_read_ptr: Event queue read pointer | 312 | * @eventq_read_ptr: Event queue read pointer |
313 | * @last_eventq_read_ptr: Last event queue read pointer value. | 313 | * @last_eventq_read_ptr: Last event queue read pointer value. |
314 | * @magic_count: Event queue test event count | 314 | * @magic_count: Event queue test event count |
315 | * @irq_count: Number of IRQs since last adaptive moderation decision | 315 | * @irq_count: Number of IRQs since last adaptive moderation decision |
316 | * @irq_mod_score: IRQ moderation score | 316 | * @irq_mod_score: IRQ moderation score |
317 | * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors | 317 | * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors |
318 | * and diagnostic counters | 318 | * and diagnostic counters |
319 | * @rx_alloc_push_pages: RX allocation method currently in use for pushing | 319 | * @rx_alloc_push_pages: RX allocation method currently in use for pushing |
320 | * descriptors | 320 | * descriptors |
321 | * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors | 321 | * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors |
322 | * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors | 322 | * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors |
323 | * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors | 323 | * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors |
324 | * @n_rx_mcast_mismatch: Count of unmatched multicast frames | 324 | * @n_rx_mcast_mismatch: Count of unmatched multicast frames |
325 | * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors | 325 | * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors |
326 | * @n_rx_overlength: Count of RX_OVERLENGTH errors | 326 | * @n_rx_overlength: Count of RX_OVERLENGTH errors |
327 | * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun | 327 | * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun |
328 | * @rx_queue: RX queue for this channel | 328 | * @rx_queue: RX queue for this channel |
329 | * @tx_stop_count: Core TX queue stop count | 329 | * @tx_stop_count: Core TX queue stop count |
330 | * @tx_stop_lock: Core TX queue stop lock | 330 | * @tx_stop_lock: Core TX queue stop lock |
331 | * @tx_queue: TX queues for this channel | 331 | * @tx_queue: TX queues for this channel |
332 | */ | 332 | */ |
333 | struct efx_channel { | 333 | struct efx_channel { |
334 | struct efx_nic *efx; | 334 | struct efx_nic *efx; |
335 | int channel; | 335 | int channel; |
336 | bool enabled; | 336 | bool enabled; |
337 | int irq; | 337 | int irq; |
338 | unsigned int irq_moderation; | 338 | unsigned int irq_moderation; |
339 | struct net_device *napi_dev; | 339 | struct net_device *napi_dev; |
340 | struct napi_struct napi_str; | 340 | struct napi_struct napi_str; |
341 | bool work_pending; | 341 | bool work_pending; |
342 | struct efx_special_buffer eventq; | 342 | struct efx_special_buffer eventq; |
343 | unsigned int eventq_mask; | 343 | unsigned int eventq_mask; |
344 | unsigned int eventq_read_ptr; | 344 | unsigned int eventq_read_ptr; |
345 | unsigned int last_eventq_read_ptr; | 345 | unsigned int last_eventq_read_ptr; |
346 | unsigned int magic_count; | 346 | unsigned int magic_count; |
347 | 347 | ||
348 | unsigned int irq_count; | 348 | unsigned int irq_count; |
349 | unsigned int irq_mod_score; | 349 | unsigned int irq_mod_score; |
350 | 350 | ||
351 | int rx_alloc_level; | 351 | int rx_alloc_level; |
352 | int rx_alloc_push_pages; | 352 | int rx_alloc_push_pages; |
353 | 353 | ||
354 | unsigned n_rx_tobe_disc; | 354 | unsigned n_rx_tobe_disc; |
355 | unsigned n_rx_ip_hdr_chksum_err; | 355 | unsigned n_rx_ip_hdr_chksum_err; |
356 | unsigned n_rx_tcp_udp_chksum_err; | 356 | unsigned n_rx_tcp_udp_chksum_err; |
357 | unsigned n_rx_mcast_mismatch; | 357 | unsigned n_rx_mcast_mismatch; |
358 | unsigned n_rx_frm_trunc; | 358 | unsigned n_rx_frm_trunc; |
359 | unsigned n_rx_overlength; | 359 | unsigned n_rx_overlength; |
360 | unsigned n_skbuff_leaks; | 360 | unsigned n_skbuff_leaks; |
361 | 361 | ||
362 | /* Used to pipeline received packets in order to optimise memory | 362 | /* Used to pipeline received packets in order to optimise memory |
363 | * access with prefetches. | 363 | * access with prefetches. |
364 | */ | 364 | */ |
365 | struct efx_rx_buffer *rx_pkt; | 365 | struct efx_rx_buffer *rx_pkt; |
366 | bool rx_pkt_csummed; | 366 | bool rx_pkt_csummed; |
367 | 367 | ||
368 | struct efx_rx_queue rx_queue; | 368 | struct efx_rx_queue rx_queue; |
369 | 369 | ||
370 | atomic_t tx_stop_count; | 370 | atomic_t tx_stop_count; |
371 | spinlock_t tx_stop_lock; | 371 | spinlock_t tx_stop_lock; |
372 | 372 | ||
373 | struct efx_tx_queue tx_queue[2]; | 373 | struct efx_tx_queue tx_queue[2]; |
374 | }; | 374 | }; |
375 | 375 | ||
376 | enum efx_led_mode { | 376 | enum efx_led_mode { |
377 | EFX_LED_OFF = 0, | 377 | EFX_LED_OFF = 0, |
378 | EFX_LED_ON = 1, | 378 | EFX_LED_ON = 1, |
379 | EFX_LED_DEFAULT = 2 | 379 | EFX_LED_DEFAULT = 2 |
380 | }; | 380 | }; |
381 | 381 | ||
382 | #define STRING_TABLE_LOOKUP(val, member) \ | 382 | #define STRING_TABLE_LOOKUP(val, member) \ |
383 | ((val) < member ## _max) ? member ## _names[val] : "(invalid)" | 383 | ((val) < member ## _max) ? member ## _names[val] : "(invalid)" |
384 | 384 | ||
385 | extern const char *efx_loopback_mode_names[]; | 385 | extern const char *efx_loopback_mode_names[]; |
386 | extern const unsigned int efx_loopback_mode_max; | 386 | extern const unsigned int efx_loopback_mode_max; |
387 | #define LOOPBACK_MODE(efx) \ | 387 | #define LOOPBACK_MODE(efx) \ |
388 | STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_loopback_mode) | 388 | STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_loopback_mode) |
389 | 389 | ||
390 | extern const char *efx_interrupt_mode_names[]; | ||
391 | extern const unsigned int efx_interrupt_mode_max; | ||
392 | #define INT_MODE(efx) \ | ||
393 | STRING_TABLE_LOOKUP(efx->interrupt_mode, efx_interrupt_mode) | ||
394 | |||
395 | extern const char *efx_reset_type_names[]; | 390 | extern const char *efx_reset_type_names[]; |
396 | extern const unsigned int efx_reset_type_max; | 391 | extern const unsigned int efx_reset_type_max; |
397 | #define RESET_TYPE(type) \ | 392 | #define RESET_TYPE(type) \ |
398 | STRING_TABLE_LOOKUP(type, efx_reset_type) | 393 | STRING_TABLE_LOOKUP(type, efx_reset_type) |
399 | 394 | ||
400 | enum efx_int_mode { | 395 | enum efx_int_mode { |
401 | /* Be careful if altering to correct macro below */ | 396 | /* Be careful if altering to correct macro below */ |
402 | EFX_INT_MODE_MSIX = 0, | 397 | EFX_INT_MODE_MSIX = 0, |
403 | EFX_INT_MODE_MSI = 1, | 398 | EFX_INT_MODE_MSI = 1, |
404 | EFX_INT_MODE_LEGACY = 2, | 399 | EFX_INT_MODE_LEGACY = 2, |
405 | EFX_INT_MODE_MAX /* Insert any new items before this */ | 400 | EFX_INT_MODE_MAX /* Insert any new items before this */ |
406 | }; | 401 | }; |
407 | #define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI) | 402 | #define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI) |
408 | 403 | ||
409 | enum nic_state { | 404 | enum nic_state { |
410 | STATE_INIT = 0, | 405 | STATE_INIT = 0, |
411 | STATE_RUNNING = 1, | 406 | STATE_RUNNING = 1, |
412 | STATE_FINI = 2, | 407 | STATE_FINI = 2, |
413 | STATE_DISABLED = 3, | 408 | STATE_DISABLED = 3, |
414 | STATE_MAX, | 409 | STATE_MAX, |
415 | }; | 410 | }; |
416 | 411 | ||
417 | /* | 412 | /* |
418 | * Alignment of page-allocated RX buffers | 413 | * Alignment of page-allocated RX buffers |
419 | * | 414 | * |
420 | * Controls the number of bytes inserted at the start of an RX buffer. | 415 | * Controls the number of bytes inserted at the start of an RX buffer. |
421 | * This is the equivalent of NET_IP_ALIGN [which controls the alignment | 416 | * This is the equivalent of NET_IP_ALIGN [which controls the alignment |
422 | * of the skb->head for hardware DMA]. | 417 | * of the skb->head for hardware DMA]. |
423 | */ | 418 | */ |
424 | #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS | 419 | #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS |
425 | #define EFX_PAGE_IP_ALIGN 0 | 420 | #define EFX_PAGE_IP_ALIGN 0 |
426 | #else | 421 | #else |
427 | #define EFX_PAGE_IP_ALIGN NET_IP_ALIGN | 422 | #define EFX_PAGE_IP_ALIGN NET_IP_ALIGN |
428 | #endif | 423 | #endif |
429 | 424 | ||
430 | /* | 425 | /* |
431 | * Alignment of the skb->head which wraps a page-allocated RX buffer | 426 | * Alignment of the skb->head which wraps a page-allocated RX buffer |
432 | * | 427 | * |
433 | * The skb allocated to wrap an rx_buffer can have this alignment. Since | 428 | * The skb allocated to wrap an rx_buffer can have this alignment. Since |
434 | * the data is memcpy'd from the rx_buf, it does not need to be equal to | 429 | * the data is memcpy'd from the rx_buf, it does not need to be equal to |
435 | * EFX_PAGE_IP_ALIGN. | 430 | * EFX_PAGE_IP_ALIGN. |
436 | */ | 431 | */ |
437 | #define EFX_PAGE_SKB_ALIGN 2 | 432 | #define EFX_PAGE_SKB_ALIGN 2 |
438 | 433 | ||
439 | /* Forward declaration */ | 434 | /* Forward declaration */ |
440 | struct efx_nic; | 435 | struct efx_nic; |
441 | 436 | ||
442 | /* Pseudo bit-mask flow control field */ | 437 | /* Pseudo bit-mask flow control field */ |
443 | enum efx_fc_type { | 438 | enum efx_fc_type { |
444 | EFX_FC_RX = FLOW_CTRL_RX, | 439 | EFX_FC_RX = FLOW_CTRL_RX, |
445 | EFX_FC_TX = FLOW_CTRL_TX, | 440 | EFX_FC_TX = FLOW_CTRL_TX, |
446 | EFX_FC_AUTO = 4, | 441 | EFX_FC_AUTO = 4, |
447 | }; | 442 | }; |
448 | 443 | ||
449 | /** | 444 | /** |
450 | * struct efx_link_state - Current state of the link | 445 | * struct efx_link_state - Current state of the link |
451 | * @up: Link is up | 446 | * @up: Link is up |
452 | * @fd: Link is full-duplex | 447 | * @fd: Link is full-duplex |
453 | * @fc: Actual flow control flags | 448 | * @fc: Actual flow control flags |
454 | * @speed: Link speed (Mbps) | 449 | * @speed: Link speed (Mbps) |
455 | */ | 450 | */ |
456 | struct efx_link_state { | 451 | struct efx_link_state { |
457 | bool up; | 452 | bool up; |
458 | bool fd; | 453 | bool fd; |
459 | enum efx_fc_type fc; | 454 | enum efx_fc_type fc; |
460 | unsigned int speed; | 455 | unsigned int speed; |
461 | }; | 456 | }; |
462 | 457 | ||
463 | static inline bool efx_link_state_equal(const struct efx_link_state *left, | 458 | static inline bool efx_link_state_equal(const struct efx_link_state *left, |
464 | const struct efx_link_state *right) | 459 | const struct efx_link_state *right) |
465 | { | 460 | { |
466 | return left->up == right->up && left->fd == right->fd && | 461 | return left->up == right->up && left->fd == right->fd && |
467 | left->fc == right->fc && left->speed == right->speed; | 462 | left->fc == right->fc && left->speed == right->speed; |
468 | } | 463 | } |
469 | 464 | ||
470 | /** | 465 | /** |
471 | * struct efx_mac_operations - Efx MAC operations table | 466 | * struct efx_mac_operations - Efx MAC operations table |
472 | * @reconfigure: Reconfigure MAC. Serialised by the mac_lock | 467 | * @reconfigure: Reconfigure MAC. Serialised by the mac_lock |
473 | * @update_stats: Update statistics | 468 | * @update_stats: Update statistics |
474 | * @check_fault: Check fault state. True if fault present. | 469 | * @check_fault: Check fault state. True if fault present. |
475 | */ | 470 | */ |
476 | struct efx_mac_operations { | 471 | struct efx_mac_operations { |
477 | int (*reconfigure) (struct efx_nic *efx); | 472 | int (*reconfigure) (struct efx_nic *efx); |
478 | void (*update_stats) (struct efx_nic *efx); | 473 | void (*update_stats) (struct efx_nic *efx); |
479 | bool (*check_fault)(struct efx_nic *efx); | 474 | bool (*check_fault)(struct efx_nic *efx); |
480 | }; | 475 | }; |
481 | 476 | ||
482 | /** | 477 | /** |
483 | * struct efx_phy_operations - Efx PHY operations table | 478 | * struct efx_phy_operations - Efx PHY operations table |
484 | * @probe: Probe PHY and initialise efx->mdio.mode_support, efx->mdio.mmds, | 479 | * @probe: Probe PHY and initialise efx->mdio.mode_support, efx->mdio.mmds, |
485 | * efx->loopback_modes. | 480 | * efx->loopback_modes. |
486 | * @init: Initialise PHY | 481 | * @init: Initialise PHY |
487 | * @fini: Shut down PHY | 482 | * @fini: Shut down PHY |
488 | * @reconfigure: Reconfigure PHY (e.g. for new link parameters) | 483 | * @reconfigure: Reconfigure PHY (e.g. for new link parameters) |
489 | * @poll: Update @link_state and report whether it changed. | 484 | * @poll: Update @link_state and report whether it changed. |
490 | * Serialised by the mac_lock. | 485 | * Serialised by the mac_lock. |
491 | * @get_settings: Get ethtool settings. Serialised by the mac_lock. | 486 | * @get_settings: Get ethtool settings. Serialised by the mac_lock. |
492 | * @set_settings: Set ethtool settings. Serialised by the mac_lock. | 487 | * @set_settings: Set ethtool settings. Serialised by the mac_lock. |
493 | * @set_npage_adv: Set abilities advertised in (Extended) Next Page | 488 | * @set_npage_adv: Set abilities advertised in (Extended) Next Page |
494 | * (only needed where AN bit is set in mmds) | 489 | * (only needed where AN bit is set in mmds) |
495 | * @test_alive: Test that PHY is 'alive' (online) | 490 | * @test_alive: Test that PHY is 'alive' (online) |
496 | * @test_name: Get the name of a PHY-specific test/result | 491 | * @test_name: Get the name of a PHY-specific test/result |
497 | * @run_tests: Run tests and record results as appropriate (offline). | 492 | * @run_tests: Run tests and record results as appropriate (offline). |
498 | * Flags are the ethtool tests flags. | 493 | * Flags are the ethtool tests flags. |
499 | */ | 494 | */ |
500 | struct efx_phy_operations { | 495 | struct efx_phy_operations { |
501 | int (*probe) (struct efx_nic *efx); | 496 | int (*probe) (struct efx_nic *efx); |
502 | int (*init) (struct efx_nic *efx); | 497 | int (*init) (struct efx_nic *efx); |
503 | void (*fini) (struct efx_nic *efx); | 498 | void (*fini) (struct efx_nic *efx); |
504 | void (*remove) (struct efx_nic *efx); | 499 | void (*remove) (struct efx_nic *efx); |
505 | int (*reconfigure) (struct efx_nic *efx); | 500 | int (*reconfigure) (struct efx_nic *efx); |
506 | bool (*poll) (struct efx_nic *efx); | 501 | bool (*poll) (struct efx_nic *efx); |
507 | void (*get_settings) (struct efx_nic *efx, | 502 | void (*get_settings) (struct efx_nic *efx, |
508 | struct ethtool_cmd *ecmd); | 503 | struct ethtool_cmd *ecmd); |
509 | int (*set_settings) (struct efx_nic *efx, | 504 | int (*set_settings) (struct efx_nic *efx, |
510 | struct ethtool_cmd *ecmd); | 505 | struct ethtool_cmd *ecmd); |
511 | void (*set_npage_adv) (struct efx_nic *efx, u32); | 506 | void (*set_npage_adv) (struct efx_nic *efx, u32); |
512 | int (*test_alive) (struct efx_nic *efx); | 507 | int (*test_alive) (struct efx_nic *efx); |
513 | const char *(*test_name) (struct efx_nic *efx, unsigned int index); | 508 | const char *(*test_name) (struct efx_nic *efx, unsigned int index); |
514 | int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags); | 509 | int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags); |
515 | }; | 510 | }; |
516 | 511 | ||
517 | /** | 512 | /** |
518 | * @enum efx_phy_mode - PHY operating mode flags | 513 | * @enum efx_phy_mode - PHY operating mode flags |
519 | * @PHY_MODE_NORMAL: on and should pass traffic | 514 | * @PHY_MODE_NORMAL: on and should pass traffic |
520 | * @PHY_MODE_TX_DISABLED: on with TX disabled | 515 | * @PHY_MODE_TX_DISABLED: on with TX disabled |
521 | * @PHY_MODE_LOW_POWER: set to low power through MDIO | 516 | * @PHY_MODE_LOW_POWER: set to low power through MDIO |
522 | * @PHY_MODE_OFF: switched off through external control | 517 | * @PHY_MODE_OFF: switched off through external control |
523 | * @PHY_MODE_SPECIAL: on but will not pass traffic | 518 | * @PHY_MODE_SPECIAL: on but will not pass traffic |
524 | */ | 519 | */ |
525 | enum efx_phy_mode { | 520 | enum efx_phy_mode { |
526 | PHY_MODE_NORMAL = 0, | 521 | PHY_MODE_NORMAL = 0, |
527 | PHY_MODE_TX_DISABLED = 1, | 522 | PHY_MODE_TX_DISABLED = 1, |
528 | PHY_MODE_LOW_POWER = 2, | 523 | PHY_MODE_LOW_POWER = 2, |
529 | PHY_MODE_OFF = 4, | 524 | PHY_MODE_OFF = 4, |
530 | PHY_MODE_SPECIAL = 8, | 525 | PHY_MODE_SPECIAL = 8, |
531 | }; | 526 | }; |
532 | 527 | ||
533 | static inline bool efx_phy_mode_disabled(enum efx_phy_mode mode) | 528 | static inline bool efx_phy_mode_disabled(enum efx_phy_mode mode) |
534 | { | 529 | { |
535 | return !!(mode & ~PHY_MODE_TX_DISABLED); | 530 | return !!(mode & ~PHY_MODE_TX_DISABLED); |
536 | } | 531 | } |
537 | 532 | ||
538 | /* | 533 | /* |
539 | * Efx extended statistics | 534 | * Efx extended statistics |
540 | * | 535 | * |
541 | * Not all statistics are provided by all supported MACs. The purpose | 536 | * Not all statistics are provided by all supported MACs. The purpose |
542 | * is this structure is to contain the raw statistics provided by each | 537 | * is this structure is to contain the raw statistics provided by each |
543 | * MAC. | 538 | * MAC. |
544 | */ | 539 | */ |
545 | struct efx_mac_stats { | 540 | struct efx_mac_stats { |
546 | u64 tx_bytes; | 541 | u64 tx_bytes; |
547 | u64 tx_good_bytes; | 542 | u64 tx_good_bytes; |
548 | u64 tx_bad_bytes; | 543 | u64 tx_bad_bytes; |
549 | unsigned long tx_packets; | 544 | unsigned long tx_packets; |
550 | unsigned long tx_bad; | 545 | unsigned long tx_bad; |
551 | unsigned long tx_pause; | 546 | unsigned long tx_pause; |
552 | unsigned long tx_control; | 547 | unsigned long tx_control; |
553 | unsigned long tx_unicast; | 548 | unsigned long tx_unicast; |
554 | unsigned long tx_multicast; | 549 | unsigned long tx_multicast; |
555 | unsigned long tx_broadcast; | 550 | unsigned long tx_broadcast; |
556 | unsigned long tx_lt64; | 551 | unsigned long tx_lt64; |
557 | unsigned long tx_64; | 552 | unsigned long tx_64; |
558 | unsigned long tx_65_to_127; | 553 | unsigned long tx_65_to_127; |
559 | unsigned long tx_128_to_255; | 554 | unsigned long tx_128_to_255; |
560 | unsigned long tx_256_to_511; | 555 | unsigned long tx_256_to_511; |
561 | unsigned long tx_512_to_1023; | 556 | unsigned long tx_512_to_1023; |
562 | unsigned long tx_1024_to_15xx; | 557 | unsigned long tx_1024_to_15xx; |
563 | unsigned long tx_15xx_to_jumbo; | 558 | unsigned long tx_15xx_to_jumbo; |
564 | unsigned long tx_gtjumbo; | 559 | unsigned long tx_gtjumbo; |
565 | unsigned long tx_collision; | 560 | unsigned long tx_collision; |
566 | unsigned long tx_single_collision; | 561 | unsigned long tx_single_collision; |
567 | unsigned long tx_multiple_collision; | 562 | unsigned long tx_multiple_collision; |
568 | unsigned long tx_excessive_collision; | 563 | unsigned long tx_excessive_collision; |
569 | unsigned long tx_deferred; | 564 | unsigned long tx_deferred; |
570 | unsigned long tx_late_collision; | 565 | unsigned long tx_late_collision; |
571 | unsigned long tx_excessive_deferred; | 566 | unsigned long tx_excessive_deferred; |
572 | unsigned long tx_non_tcpudp; | 567 | unsigned long tx_non_tcpudp; |
573 | unsigned long tx_mac_src_error; | 568 | unsigned long tx_mac_src_error; |
574 | unsigned long tx_ip_src_error; | 569 | unsigned long tx_ip_src_error; |
575 | u64 rx_bytes; | 570 | u64 rx_bytes; |
576 | u64 rx_good_bytes; | 571 | u64 rx_good_bytes; |
577 | u64 rx_bad_bytes; | 572 | u64 rx_bad_bytes; |
578 | unsigned long rx_packets; | 573 | unsigned long rx_packets; |
579 | unsigned long rx_good; | 574 | unsigned long rx_good; |
580 | unsigned long rx_bad; | 575 | unsigned long rx_bad; |
581 | unsigned long rx_pause; | 576 | unsigned long rx_pause; |
582 | unsigned long rx_control; | 577 | unsigned long rx_control; |
583 | unsigned long rx_unicast; | 578 | unsigned long rx_unicast; |
584 | unsigned long rx_multicast; | 579 | unsigned long rx_multicast; |
585 | unsigned long rx_broadcast; | 580 | unsigned long rx_broadcast; |
586 | unsigned long rx_lt64; | 581 | unsigned long rx_lt64; |
587 | unsigned long rx_64; | 582 | unsigned long rx_64; |
588 | unsigned long rx_65_to_127; | 583 | unsigned long rx_65_to_127; |
589 | unsigned long rx_128_to_255; | 584 | unsigned long rx_128_to_255; |
590 | unsigned long rx_256_to_511; | 585 | unsigned long rx_256_to_511; |
591 | unsigned long rx_512_to_1023; | 586 | unsigned long rx_512_to_1023; |
592 | unsigned long rx_1024_to_15xx; | 587 | unsigned long rx_1024_to_15xx; |
593 | unsigned long rx_15xx_to_jumbo; | 588 | unsigned long rx_15xx_to_jumbo; |
594 | unsigned long rx_gtjumbo; | 589 | unsigned long rx_gtjumbo; |
595 | unsigned long rx_bad_lt64; | 590 | unsigned long rx_bad_lt64; |
596 | unsigned long rx_bad_64_to_15xx; | 591 | unsigned long rx_bad_64_to_15xx; |
597 | unsigned long rx_bad_15xx_to_jumbo; | 592 | unsigned long rx_bad_15xx_to_jumbo; |
598 | unsigned long rx_bad_gtjumbo; | 593 | unsigned long rx_bad_gtjumbo; |
599 | unsigned long rx_overflow; | 594 | unsigned long rx_overflow; |
600 | unsigned long rx_missed; | 595 | unsigned long rx_missed; |
601 | unsigned long rx_false_carrier; | 596 | unsigned long rx_false_carrier; |
602 | unsigned long rx_symbol_error; | 597 | unsigned long rx_symbol_error; |
603 | unsigned long rx_align_error; | 598 | unsigned long rx_align_error; |
604 | unsigned long rx_length_error; | 599 | unsigned long rx_length_error; |
605 | unsigned long rx_internal_error; | 600 | unsigned long rx_internal_error; |
606 | unsigned long rx_good_lt64; | 601 | unsigned long rx_good_lt64; |
607 | }; | 602 | }; |
608 | 603 | ||
609 | /* Number of bits used in a multicast filter hash address */ | 604 | /* Number of bits used in a multicast filter hash address */ |
610 | #define EFX_MCAST_HASH_BITS 8 | 605 | #define EFX_MCAST_HASH_BITS 8 |
611 | 606 | ||
612 | /* Number of (single-bit) entries in a multicast filter hash */ | 607 | /* Number of (single-bit) entries in a multicast filter hash */ |
613 | #define EFX_MCAST_HASH_ENTRIES (1 << EFX_MCAST_HASH_BITS) | 608 | #define EFX_MCAST_HASH_ENTRIES (1 << EFX_MCAST_HASH_BITS) |
614 | 609 | ||
615 | /* An Efx multicast filter hash */ | 610 | /* An Efx multicast filter hash */ |
616 | union efx_multicast_hash { | 611 | union efx_multicast_hash { |
617 | u8 byte[EFX_MCAST_HASH_ENTRIES / 8]; | 612 | u8 byte[EFX_MCAST_HASH_ENTRIES / 8]; |
618 | efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8]; | 613 | efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8]; |
619 | }; | 614 | }; |
620 | 615 | ||
621 | struct efx_filter_state; | 616 | struct efx_filter_state; |
622 | 617 | ||
623 | /** | 618 | /** |
624 | * struct efx_nic - an Efx NIC | 619 | * struct efx_nic - an Efx NIC |
625 | * @name: Device name (net device name or bus id before net device registered) | 620 | * @name: Device name (net device name or bus id before net device registered) |
626 | * @pci_dev: The PCI device | 621 | * @pci_dev: The PCI device |
627 | * @type: Controller type attributes | 622 | * @type: Controller type attributes |
628 | * @legacy_irq: IRQ number | 623 | * @legacy_irq: IRQ number |
629 | * @workqueue: Workqueue for port reconfigures and the HW monitor. | 624 | * @workqueue: Workqueue for port reconfigures and the HW monitor. |
630 | * Work items do not hold and must not acquire RTNL. | 625 | * Work items do not hold and must not acquire RTNL. |
631 | * @workqueue_name: Name of workqueue | 626 | * @workqueue_name: Name of workqueue |
632 | * @reset_work: Scheduled reset workitem | 627 | * @reset_work: Scheduled reset workitem |
633 | * @monitor_work: Hardware monitor workitem | 628 | * @monitor_work: Hardware monitor workitem |
634 | * @membase_phys: Memory BAR value as physical address | 629 | * @membase_phys: Memory BAR value as physical address |
635 | * @membase: Memory BAR value | 630 | * @membase: Memory BAR value |
636 | * @biu_lock: BIU (bus interface unit) lock | 631 | * @biu_lock: BIU (bus interface unit) lock |
637 | * @interrupt_mode: Interrupt mode | 632 | * @interrupt_mode: Interrupt mode |
638 | * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues | 633 | * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues |
639 | * @irq_rx_moderation: IRQ moderation time for RX event queues | 634 | * @irq_rx_moderation: IRQ moderation time for RX event queues |
640 | * @msg_enable: Log message enable flags | 635 | * @msg_enable: Log message enable flags |
641 | * @state: Device state flag. Serialised by the rtnl_lock. | 636 | * @state: Device state flag. Serialised by the rtnl_lock. |
642 | * @reset_pending: Pending reset method (normally RESET_TYPE_NONE) | 637 | * @reset_pending: Pending reset method (normally RESET_TYPE_NONE) |
643 | * @tx_queue: TX DMA queues | 638 | * @tx_queue: TX DMA queues |
644 | * @rx_queue: RX DMA queues | 639 | * @rx_queue: RX DMA queues |
645 | * @channel: Channels | 640 | * @channel: Channels |
646 | * @channel_name: Names for channels and their IRQs | 641 | * @channel_name: Names for channels and their IRQs |
647 | * @rxq_entries: Size of receive queues requested by user. | 642 | * @rxq_entries: Size of receive queues requested by user. |
648 | * @txq_entries: Size of transmit queues requested by user. | 643 | * @txq_entries: Size of transmit queues requested by user. |
649 | * @next_buffer_table: First available buffer table id | 644 | * @next_buffer_table: First available buffer table id |
650 | * @n_channels: Number of channels in use | 645 | * @n_channels: Number of channels in use |
651 | * @n_rx_channels: Number of channels used for RX (= number of RX queues) | 646 | * @n_rx_channels: Number of channels used for RX (= number of RX queues) |
652 | * @n_tx_channels: Number of channels used for TX | 647 | * @n_tx_channels: Number of channels used for TX |
653 | * @rx_buffer_len: RX buffer length | 648 | * @rx_buffer_len: RX buffer length |
654 | * @rx_buffer_order: Order (log2) of number of pages for each RX buffer | 649 | * @rx_buffer_order: Order (log2) of number of pages for each RX buffer |
655 | * @rx_indir_table: Indirection table for RSS | 650 | * @rx_indir_table: Indirection table for RSS |
656 | * @int_error_count: Number of internal errors seen recently | 651 | * @int_error_count: Number of internal errors seen recently |
657 | * @int_error_expire: Time at which error count will be expired | 652 | * @int_error_expire: Time at which error count will be expired |
658 | * @irq_status: Interrupt status buffer | 653 | * @irq_status: Interrupt status buffer |
659 | * @last_irq_cpu: Last CPU to handle interrupt. | 654 | * @last_irq_cpu: Last CPU to handle interrupt. |
660 | * This register is written with the SMP processor ID whenever an | 655 | * This register is written with the SMP processor ID whenever an |
661 | * interrupt is handled. It is used by efx_nic_test_interrupt() | 656 | * interrupt is handled. It is used by efx_nic_test_interrupt() |
662 | * to verify that an interrupt has occurred. | 657 | * to verify that an interrupt has occurred. |
663 | * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0 | 658 | * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0 |
664 | * @fatal_irq_level: IRQ level (bit number) used for serious errors | 659 | * @fatal_irq_level: IRQ level (bit number) used for serious errors |
665 | * @spi_flash: SPI flash device | 660 | * @spi_flash: SPI flash device |
666 | * This field will be %NULL if no flash device is present (or for Siena). | 661 | * This field will be %NULL if no flash device is present (or for Siena). |
667 | * @spi_eeprom: SPI EEPROM device | 662 | * @spi_eeprom: SPI EEPROM device |
668 | * This field will be %NULL if no EEPROM device is present (or for Siena). | 663 | * This field will be %NULL if no EEPROM device is present (or for Siena). |
669 | * @spi_lock: SPI bus lock | 664 | * @spi_lock: SPI bus lock |
670 | * @mtd_list: List of MTDs attached to the NIC | 665 | * @mtd_list: List of MTDs attached to the NIC |
671 | * @n_rx_nodesc_drop_cnt: RX no descriptor drop count | 666 | * @n_rx_nodesc_drop_cnt: RX no descriptor drop count |
672 | * @nic_data: Hardware dependant state | 667 | * @nic_data: Hardware dependant state |
673 | * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode, | 668 | * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode, |
674 | * @port_inhibited, efx_monitor() and efx_reconfigure_port() | 669 | * @port_inhibited, efx_monitor() and efx_reconfigure_port() |
675 | * @port_enabled: Port enabled indicator. | 670 | * @port_enabled: Port enabled indicator. |
676 | * Serialises efx_stop_all(), efx_start_all(), efx_monitor() and | 671 | * Serialises efx_stop_all(), efx_start_all(), efx_monitor() and |
677 | * efx_mac_work() with kernel interfaces. Safe to read under any | 672 | * efx_mac_work() with kernel interfaces. Safe to read under any |
678 | * one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must | 673 | * one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must |
679 | * be held to modify it. | 674 | * be held to modify it. |
680 | * @port_inhibited: If set, the netif_carrier is always off. Hold the mac_lock | 675 | * @port_inhibited: If set, the netif_carrier is always off. Hold the mac_lock |
681 | * @port_initialized: Port initialized? | 676 | * @port_initialized: Port initialized? |
682 | * @net_dev: Operating system network device. Consider holding the rtnl lock | 677 | * @net_dev: Operating system network device. Consider holding the rtnl lock |
683 | * @rx_checksum_enabled: RX checksumming enabled | 678 | * @rx_checksum_enabled: RX checksumming enabled |
684 | * @mac_stats: MAC statistics. These include all statistics the MACs | 679 | * @mac_stats: MAC statistics. These include all statistics the MACs |
685 | * can provide. Generic code converts these into a standard | 680 | * can provide. Generic code converts these into a standard |
686 | * &struct net_device_stats. | 681 | * &struct net_device_stats. |
687 | * @stats_buffer: DMA buffer for statistics | 682 | * @stats_buffer: DMA buffer for statistics |
688 | * @stats_lock: Statistics update lock. Serialises statistics fetches | 683 | * @stats_lock: Statistics update lock. Serialises statistics fetches |
689 | * @mac_op: MAC interface | 684 | * @mac_op: MAC interface |
690 | * @mac_address: Permanent MAC address | 685 | * @mac_address: Permanent MAC address |
691 | * @phy_type: PHY type | 686 | * @phy_type: PHY type |
692 | * @mdio_lock: MDIO lock | 687 | * @mdio_lock: MDIO lock |
693 | * @phy_op: PHY interface | 688 | * @phy_op: PHY interface |
694 | * @phy_data: PHY private data (including PHY-specific stats) | 689 | * @phy_data: PHY private data (including PHY-specific stats) |
695 | * @mdio: PHY MDIO interface | 690 | * @mdio: PHY MDIO interface |
696 | * @mdio_bus: PHY MDIO bus ID (only used by Siena) | 691 | * @mdio_bus: PHY MDIO bus ID (only used by Siena) |
697 | * @phy_mode: PHY operating mode. Serialised by @mac_lock. | 692 | * @phy_mode: PHY operating mode. Serialised by @mac_lock. |
698 | * @xmac_poll_required: XMAC link state needs polling | 693 | * @xmac_poll_required: XMAC link state needs polling |
699 | * @link_advertising: Autonegotiation advertising flags | 694 | * @link_advertising: Autonegotiation advertising flags |
700 | * @link_state: Current state of the link | 695 | * @link_state: Current state of the link |
701 | * @n_link_state_changes: Number of times the link has changed state | 696 | * @n_link_state_changes: Number of times the link has changed state |
702 | * @promiscuous: Promiscuous flag. Protected by netif_tx_lock. | 697 | * @promiscuous: Promiscuous flag. Protected by netif_tx_lock. |
703 | * @multicast_hash: Multicast hash table | 698 | * @multicast_hash: Multicast hash table |
704 | * @wanted_fc: Wanted flow control flags | 699 | * @wanted_fc: Wanted flow control flags |
705 | * @mac_work: Work item for changing MAC promiscuity and multicast hash | 700 | * @mac_work: Work item for changing MAC promiscuity and multicast hash |
706 | * @loopback_mode: Loopback status | 701 | * @loopback_mode: Loopback status |
707 | * @loopback_modes: Supported loopback mode bitmask | 702 | * @loopback_modes: Supported loopback mode bitmask |
708 | * @loopback_selftest: Offline self-test private state | 703 | * @loopback_selftest: Offline self-test private state |
709 | * | 704 | * |
710 | * This is stored in the private area of the &struct net_device. | 705 | * This is stored in the private area of the &struct net_device. |
711 | */ | 706 | */ |
712 | struct efx_nic { | 707 | struct efx_nic { |
713 | char name[IFNAMSIZ]; | 708 | char name[IFNAMSIZ]; |
714 | struct pci_dev *pci_dev; | 709 | struct pci_dev *pci_dev; |
715 | const struct efx_nic_type *type; | 710 | const struct efx_nic_type *type; |
716 | int legacy_irq; | 711 | int legacy_irq; |
717 | struct workqueue_struct *workqueue; | 712 | struct workqueue_struct *workqueue; |
718 | char workqueue_name[16]; | 713 | char workqueue_name[16]; |
719 | struct work_struct reset_work; | 714 | struct work_struct reset_work; |
720 | struct delayed_work monitor_work; | 715 | struct delayed_work monitor_work; |
721 | resource_size_t membase_phys; | 716 | resource_size_t membase_phys; |
722 | void __iomem *membase; | 717 | void __iomem *membase; |
723 | spinlock_t biu_lock; | 718 | spinlock_t biu_lock; |
724 | enum efx_int_mode interrupt_mode; | 719 | enum efx_int_mode interrupt_mode; |
725 | bool irq_rx_adaptive; | 720 | bool irq_rx_adaptive; |
726 | unsigned int irq_rx_moderation; | 721 | unsigned int irq_rx_moderation; |
727 | u32 msg_enable; | 722 | u32 msg_enable; |
728 | 723 | ||
729 | enum nic_state state; | 724 | enum nic_state state; |
730 | enum reset_type reset_pending; | 725 | enum reset_type reset_pending; |
731 | 726 | ||
732 | struct efx_channel *channel[EFX_MAX_CHANNELS]; | 727 | struct efx_channel *channel[EFX_MAX_CHANNELS]; |
733 | char channel_name[EFX_MAX_CHANNELS][IFNAMSIZ + 6]; | 728 | char channel_name[EFX_MAX_CHANNELS][IFNAMSIZ + 6]; |
734 | 729 | ||
735 | unsigned rxq_entries; | 730 | unsigned rxq_entries; |
736 | unsigned txq_entries; | 731 | unsigned txq_entries; |
737 | unsigned next_buffer_table; | 732 | unsigned next_buffer_table; |
738 | unsigned n_channels; | 733 | unsigned n_channels; |
739 | unsigned n_rx_channels; | 734 | unsigned n_rx_channels; |
740 | unsigned n_tx_channels; | 735 | unsigned n_tx_channels; |
741 | unsigned int rx_buffer_len; | 736 | unsigned int rx_buffer_len; |
742 | unsigned int rx_buffer_order; | 737 | unsigned int rx_buffer_order; |
743 | u8 rx_hash_key[40]; | 738 | u8 rx_hash_key[40]; |
744 | u32 rx_indir_table[128]; | 739 | u32 rx_indir_table[128]; |
745 | 740 | ||
746 | unsigned int_error_count; | 741 | unsigned int_error_count; |
747 | unsigned long int_error_expire; | 742 | unsigned long int_error_expire; |
748 | 743 | ||
749 | struct efx_buffer irq_status; | 744 | struct efx_buffer irq_status; |
750 | volatile signed int last_irq_cpu; | 745 | volatile signed int last_irq_cpu; |
751 | unsigned irq_zero_count; | 746 | unsigned irq_zero_count; |
752 | unsigned fatal_irq_level; | 747 | unsigned fatal_irq_level; |
753 | 748 | ||
754 | struct efx_spi_device *spi_flash; | 749 | struct efx_spi_device *spi_flash; |
755 | struct efx_spi_device *spi_eeprom; | 750 | struct efx_spi_device *spi_eeprom; |
756 | struct mutex spi_lock; | 751 | struct mutex spi_lock; |
757 | #ifdef CONFIG_SFC_MTD | 752 | #ifdef CONFIG_SFC_MTD |
758 | struct list_head mtd_list; | 753 | struct list_head mtd_list; |
759 | #endif | 754 | #endif |
760 | 755 | ||
761 | unsigned n_rx_nodesc_drop_cnt; | 756 | unsigned n_rx_nodesc_drop_cnt; |
762 | 757 | ||
763 | void *nic_data; | 758 | void *nic_data; |
764 | 759 | ||
765 | struct mutex mac_lock; | 760 | struct mutex mac_lock; |
766 | struct work_struct mac_work; | 761 | struct work_struct mac_work; |
767 | bool port_enabled; | 762 | bool port_enabled; |
768 | bool port_inhibited; | 763 | bool port_inhibited; |
769 | 764 | ||
770 | bool port_initialized; | 765 | bool port_initialized; |
771 | struct net_device *net_dev; | 766 | struct net_device *net_dev; |
772 | bool rx_checksum_enabled; | 767 | bool rx_checksum_enabled; |
773 | 768 | ||
774 | struct efx_mac_stats mac_stats; | 769 | struct efx_mac_stats mac_stats; |
775 | struct efx_buffer stats_buffer; | 770 | struct efx_buffer stats_buffer; |
776 | spinlock_t stats_lock; | 771 | spinlock_t stats_lock; |
777 | 772 | ||
778 | struct efx_mac_operations *mac_op; | 773 | struct efx_mac_operations *mac_op; |
779 | unsigned char mac_address[ETH_ALEN]; | 774 | unsigned char mac_address[ETH_ALEN]; |
780 | 775 | ||
781 | unsigned int phy_type; | 776 | unsigned int phy_type; |
782 | struct mutex mdio_lock; | 777 | struct mutex mdio_lock; |
783 | struct efx_phy_operations *phy_op; | 778 | struct efx_phy_operations *phy_op; |
784 | void *phy_data; | 779 | void *phy_data; |
785 | struct mdio_if_info mdio; | 780 | struct mdio_if_info mdio; |
786 | unsigned int mdio_bus; | 781 | unsigned int mdio_bus; |
787 | enum efx_phy_mode phy_mode; | 782 | enum efx_phy_mode phy_mode; |
788 | 783 | ||
789 | bool xmac_poll_required; | 784 | bool xmac_poll_required; |
790 | u32 link_advertising; | 785 | u32 link_advertising; |
791 | struct efx_link_state link_state; | 786 | struct efx_link_state link_state; |
792 | unsigned int n_link_state_changes; | 787 | unsigned int n_link_state_changes; |
793 | 788 | ||
794 | bool promiscuous; | 789 | bool promiscuous; |
795 | union efx_multicast_hash multicast_hash; | 790 | union efx_multicast_hash multicast_hash; |
796 | enum efx_fc_type wanted_fc; | 791 | enum efx_fc_type wanted_fc; |
797 | 792 | ||
798 | atomic_t rx_reset; | 793 | atomic_t rx_reset; |
799 | enum efx_loopback_mode loopback_mode; | 794 | enum efx_loopback_mode loopback_mode; |
800 | u64 loopback_modes; | 795 | u64 loopback_modes; |
801 | 796 | ||
802 | void *loopback_selftest; | 797 | void *loopback_selftest; |
803 | 798 | ||
804 | struct efx_filter_state *filter_state; | 799 | struct efx_filter_state *filter_state; |
805 | }; | 800 | }; |
806 | 801 | ||
807 | static inline int efx_dev_registered(struct efx_nic *efx) | 802 | static inline int efx_dev_registered(struct efx_nic *efx) |
808 | { | 803 | { |
809 | return efx->net_dev->reg_state == NETREG_REGISTERED; | 804 | return efx->net_dev->reg_state == NETREG_REGISTERED; |
810 | } | 805 | } |
811 | 806 | ||
812 | /* Net device name, for inclusion in log messages if it has been registered. | 807 | /* Net device name, for inclusion in log messages if it has been registered. |
813 | * Use efx->name not efx->net_dev->name so that races with (un)registration | 808 | * Use efx->name not efx->net_dev->name so that races with (un)registration |
814 | * are harmless. | 809 | * are harmless. |
815 | */ | 810 | */ |
816 | static inline const char *efx_dev_name(struct efx_nic *efx) | 811 | static inline const char *efx_dev_name(struct efx_nic *efx) |
817 | { | 812 | { |
818 | return efx_dev_registered(efx) ? efx->name : ""; | 813 | return efx_dev_registered(efx) ? efx->name : ""; |
819 | } | 814 | } |
820 | 815 | ||
821 | static inline unsigned int efx_port_num(struct efx_nic *efx) | 816 | static inline unsigned int efx_port_num(struct efx_nic *efx) |
822 | { | 817 | { |
823 | return efx->net_dev->dev_id; | 818 | return efx->net_dev->dev_id; |
824 | } | 819 | } |
825 | 820 | ||
826 | /** | 821 | /** |
827 | * struct efx_nic_type - Efx device type definition | 822 | * struct efx_nic_type - Efx device type definition |
828 | * @probe: Probe the controller | 823 | * @probe: Probe the controller |
829 | * @remove: Free resources allocated by probe() | 824 | * @remove: Free resources allocated by probe() |
830 | * @init: Initialise the controller | 825 | * @init: Initialise the controller |
831 | * @fini: Shut down the controller | 826 | * @fini: Shut down the controller |
832 | * @monitor: Periodic function for polling link state and hardware monitor | 827 | * @monitor: Periodic function for polling link state and hardware monitor |
833 | * @reset: Reset the controller hardware and possibly the PHY. This will | 828 | * @reset: Reset the controller hardware and possibly the PHY. This will |
834 | * be called while the controller is uninitialised. | 829 | * be called while the controller is uninitialised. |
835 | * @probe_port: Probe the MAC and PHY | 830 | * @probe_port: Probe the MAC and PHY |
836 | * @remove_port: Free resources allocated by probe_port() | 831 | * @remove_port: Free resources allocated by probe_port() |
837 | * @prepare_flush: Prepare the hardware for flushing the DMA queues | 832 | * @prepare_flush: Prepare the hardware for flushing the DMA queues |
838 | * @update_stats: Update statistics not provided by event handling | 833 | * @update_stats: Update statistics not provided by event handling |
839 | * @start_stats: Start the regular fetching of statistics | 834 | * @start_stats: Start the regular fetching of statistics |
840 | * @stop_stats: Stop the regular fetching of statistics | 835 | * @stop_stats: Stop the regular fetching of statistics |
841 | * @set_id_led: Set state of identifying LED or revert to automatic function | 836 | * @set_id_led: Set state of identifying LED or revert to automatic function |
842 | * @push_irq_moderation: Apply interrupt moderation value | 837 | * @push_irq_moderation: Apply interrupt moderation value |
843 | * @push_multicast_hash: Apply multicast hash table | 838 | * @push_multicast_hash: Apply multicast hash table |
844 | * @reconfigure_port: Push loopback/power/txdis changes to the MAC and PHY | 839 | * @reconfigure_port: Push loopback/power/txdis changes to the MAC and PHY |
845 | * @get_wol: Get WoL configuration from driver state | 840 | * @get_wol: Get WoL configuration from driver state |
846 | * @set_wol: Push WoL configuration to the NIC | 841 | * @set_wol: Push WoL configuration to the NIC |
847 | * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume) | 842 | * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume) |
848 | * @test_registers: Test read/write functionality of control registers | 843 | * @test_registers: Test read/write functionality of control registers |
849 | * @test_nvram: Test validity of NVRAM contents | 844 | * @test_nvram: Test validity of NVRAM contents |
850 | * @default_mac_ops: efx_mac_operations to set at startup | 845 | * @default_mac_ops: efx_mac_operations to set at startup |
851 | * @revision: Hardware architecture revision | 846 | * @revision: Hardware architecture revision |
852 | * @mem_map_size: Memory BAR mapped size | 847 | * @mem_map_size: Memory BAR mapped size |
853 | * @txd_ptr_tbl_base: TX descriptor ring base address | 848 | * @txd_ptr_tbl_base: TX descriptor ring base address |
854 | * @rxd_ptr_tbl_base: RX descriptor ring base address | 849 | * @rxd_ptr_tbl_base: RX descriptor ring base address |
855 | * @buf_tbl_base: Buffer table base address | 850 | * @buf_tbl_base: Buffer table base address |
856 | * @evq_ptr_tbl_base: Event queue pointer table base address | 851 | * @evq_ptr_tbl_base: Event queue pointer table base address |
857 | * @evq_rptr_tbl_base: Event queue read-pointer table base address | 852 | * @evq_rptr_tbl_base: Event queue read-pointer table base address |
858 | * @max_dma_mask: Maximum possible DMA mask | 853 | * @max_dma_mask: Maximum possible DMA mask |
859 | * @rx_buffer_hash_size: Size of hash at start of RX buffer | 854 | * @rx_buffer_hash_size: Size of hash at start of RX buffer |
860 | * @rx_buffer_padding: Size of padding at end of RX buffer | 855 | * @rx_buffer_padding: Size of padding at end of RX buffer |
861 | * @max_interrupt_mode: Highest capability interrupt mode supported | 856 | * @max_interrupt_mode: Highest capability interrupt mode supported |
862 | * from &enum efx_init_mode. | 857 | * from &enum efx_init_mode. |
863 | * @phys_addr_channels: Number of channels with physically addressed | 858 | * @phys_addr_channels: Number of channels with physically addressed |
864 | * descriptors | 859 | * descriptors |
865 | * @tx_dc_base: Base address in SRAM of TX queue descriptor caches | 860 | * @tx_dc_base: Base address in SRAM of TX queue descriptor caches |
866 | * @rx_dc_base: Base address in SRAM of RX queue descriptor caches | 861 | * @rx_dc_base: Base address in SRAM of RX queue descriptor caches |
867 | * @offload_features: net_device feature flags for protocol offload | 862 | * @offload_features: net_device feature flags for protocol offload |
868 | * features implemented in hardware | 863 | * features implemented in hardware |
869 | * @reset_world_flags: Flags for additional components covered by | 864 | * @reset_world_flags: Flags for additional components covered by |
870 | * reset method RESET_TYPE_WORLD | 865 | * reset method RESET_TYPE_WORLD |
871 | */ | 866 | */ |
872 | struct efx_nic_type { | 867 | struct efx_nic_type { |
873 | int (*probe)(struct efx_nic *efx); | 868 | int (*probe)(struct efx_nic *efx); |
874 | void (*remove)(struct efx_nic *efx); | 869 | void (*remove)(struct efx_nic *efx); |
875 | int (*init)(struct efx_nic *efx); | 870 | int (*init)(struct efx_nic *efx); |
876 | void (*fini)(struct efx_nic *efx); | 871 | void (*fini)(struct efx_nic *efx); |
877 | void (*monitor)(struct efx_nic *efx); | 872 | void (*monitor)(struct efx_nic *efx); |
878 | int (*reset)(struct efx_nic *efx, enum reset_type method); | 873 | int (*reset)(struct efx_nic *efx, enum reset_type method); |
879 | int (*probe_port)(struct efx_nic *efx); | 874 | int (*probe_port)(struct efx_nic *efx); |
880 | void (*remove_port)(struct efx_nic *efx); | 875 | void (*remove_port)(struct efx_nic *efx); |
881 | void (*prepare_flush)(struct efx_nic *efx); | 876 | void (*prepare_flush)(struct efx_nic *efx); |
882 | void (*update_stats)(struct efx_nic *efx); | 877 | void (*update_stats)(struct efx_nic *efx); |
883 | void (*start_stats)(struct efx_nic *efx); | 878 | void (*start_stats)(struct efx_nic *efx); |
884 | void (*stop_stats)(struct efx_nic *efx); | 879 | void (*stop_stats)(struct efx_nic *efx); |
885 | void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode); | 880 | void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode); |
886 | void (*push_irq_moderation)(struct efx_channel *channel); | 881 | void (*push_irq_moderation)(struct efx_channel *channel); |
887 | void (*push_multicast_hash)(struct efx_nic *efx); | 882 | void (*push_multicast_hash)(struct efx_nic *efx); |
888 | int (*reconfigure_port)(struct efx_nic *efx); | 883 | int (*reconfigure_port)(struct efx_nic *efx); |
889 | void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol); | 884 | void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol); |
890 | int (*set_wol)(struct efx_nic *efx, u32 type); | 885 | int (*set_wol)(struct efx_nic *efx, u32 type); |
891 | void (*resume_wol)(struct efx_nic *efx); | 886 | void (*resume_wol)(struct efx_nic *efx); |
892 | int (*test_registers)(struct efx_nic *efx); | 887 | int (*test_registers)(struct efx_nic *efx); |
893 | int (*test_nvram)(struct efx_nic *efx); | 888 | int (*test_nvram)(struct efx_nic *efx); |
894 | struct efx_mac_operations *default_mac_ops; | 889 | struct efx_mac_operations *default_mac_ops; |
895 | 890 | ||
896 | int revision; | 891 | int revision; |
897 | unsigned int mem_map_size; | 892 | unsigned int mem_map_size; |
898 | unsigned int txd_ptr_tbl_base; | 893 | unsigned int txd_ptr_tbl_base; |
899 | unsigned int rxd_ptr_tbl_base; | 894 | unsigned int rxd_ptr_tbl_base; |
900 | unsigned int buf_tbl_base; | 895 | unsigned int buf_tbl_base; |
901 | unsigned int evq_ptr_tbl_base; | 896 | unsigned int evq_ptr_tbl_base; |
902 | unsigned int evq_rptr_tbl_base; | 897 | unsigned int evq_rptr_tbl_base; |
903 | u64 max_dma_mask; | 898 | u64 max_dma_mask; |
904 | unsigned int rx_buffer_hash_size; | 899 | unsigned int rx_buffer_hash_size; |
905 | unsigned int rx_buffer_padding; | 900 | unsigned int rx_buffer_padding; |
906 | unsigned int max_interrupt_mode; | 901 | unsigned int max_interrupt_mode; |
907 | unsigned int phys_addr_channels; | 902 | unsigned int phys_addr_channels; |
908 | unsigned int tx_dc_base; | 903 | unsigned int tx_dc_base; |
909 | unsigned int rx_dc_base; | 904 | unsigned int rx_dc_base; |
910 | unsigned long offload_features; | 905 | unsigned long offload_features; |
911 | u32 reset_world_flags; | 906 | u32 reset_world_flags; |
912 | }; | 907 | }; |
913 | 908 | ||
914 | /************************************************************************** | 909 | /************************************************************************** |
915 | * | 910 | * |
916 | * Prototypes and inline functions | 911 | * Prototypes and inline functions |
917 | * | 912 | * |
918 | *************************************************************************/ | 913 | *************************************************************************/ |
919 | 914 | ||
920 | static inline struct efx_channel * | 915 | static inline struct efx_channel * |
921 | efx_get_channel(struct efx_nic *efx, unsigned index) | 916 | efx_get_channel(struct efx_nic *efx, unsigned index) |
922 | { | 917 | { |
923 | EFX_BUG_ON_PARANOID(index >= efx->n_channels); | 918 | EFX_BUG_ON_PARANOID(index >= efx->n_channels); |
924 | return efx->channel[index]; | 919 | return efx->channel[index]; |
925 | } | 920 | } |
926 | 921 | ||
927 | /* Iterate over all used channels */ | 922 | /* Iterate over all used channels */ |
928 | #define efx_for_each_channel(_channel, _efx) \ | 923 | #define efx_for_each_channel(_channel, _efx) \ |
929 | for (_channel = (_efx)->channel[0]; \ | 924 | for (_channel = (_efx)->channel[0]; \ |
930 | _channel; \ | 925 | _channel; \ |
931 | _channel = (_channel->channel + 1 < (_efx)->n_channels) ? \ | 926 | _channel = (_channel->channel + 1 < (_efx)->n_channels) ? \ |
932 | (_efx)->channel[_channel->channel + 1] : NULL) | 927 | (_efx)->channel[_channel->channel + 1] : NULL) |
933 | 928 | ||
934 | extern struct efx_tx_queue * | 929 | extern struct efx_tx_queue * |
935 | efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type); | 930 | efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type); |
936 | 931 | ||
937 | static inline struct efx_tx_queue * | 932 | static inline struct efx_tx_queue * |
938 | efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type) | 933 | efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type) |
939 | { | 934 | { |
940 | struct efx_tx_queue *tx_queue = channel->tx_queue; | 935 | struct efx_tx_queue *tx_queue = channel->tx_queue; |
941 | EFX_BUG_ON_PARANOID(type >= EFX_TXQ_TYPES); | 936 | EFX_BUG_ON_PARANOID(type >= EFX_TXQ_TYPES); |
942 | return tx_queue->channel ? tx_queue + type : NULL; | 937 | return tx_queue->channel ? tx_queue + type : NULL; |
943 | } | 938 | } |
944 | 939 | ||
945 | /* Iterate over all TX queues belonging to a channel */ | 940 | /* Iterate over all TX queues belonging to a channel */ |
946 | #define efx_for_each_channel_tx_queue(_tx_queue, _channel) \ | 941 | #define efx_for_each_channel_tx_queue(_tx_queue, _channel) \ |
947 | for (_tx_queue = efx_channel_get_tx_queue(channel, 0); \ | 942 | for (_tx_queue = efx_channel_get_tx_queue(channel, 0); \ |
948 | _tx_queue && _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \ | 943 | _tx_queue && _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \ |
949 | _tx_queue++) | 944 | _tx_queue++) |
950 | 945 | ||
951 | static inline struct efx_rx_queue * | 946 | static inline struct efx_rx_queue * |
952 | efx_get_rx_queue(struct efx_nic *efx, unsigned index) | 947 | efx_get_rx_queue(struct efx_nic *efx, unsigned index) |
953 | { | 948 | { |
954 | EFX_BUG_ON_PARANOID(index >= efx->n_rx_channels); | 949 | EFX_BUG_ON_PARANOID(index >= efx->n_rx_channels); |
955 | return &efx->channel[index]->rx_queue; | 950 | return &efx->channel[index]->rx_queue; |
956 | } | 951 | } |
957 | 952 | ||
958 | static inline struct efx_rx_queue * | 953 | static inline struct efx_rx_queue * |
959 | efx_channel_get_rx_queue(struct efx_channel *channel) | 954 | efx_channel_get_rx_queue(struct efx_channel *channel) |
960 | { | 955 | { |
961 | return channel->channel < channel->efx->n_rx_channels ? | 956 | return channel->channel < channel->efx->n_rx_channels ? |
962 | &channel->rx_queue : NULL; | 957 | &channel->rx_queue : NULL; |
963 | } | 958 | } |
964 | 959 | ||
965 | /* Iterate over all RX queues belonging to a channel */ | 960 | /* Iterate over all RX queues belonging to a channel */ |
966 | #define efx_for_each_channel_rx_queue(_rx_queue, _channel) \ | 961 | #define efx_for_each_channel_rx_queue(_rx_queue, _channel) \ |
967 | for (_rx_queue = efx_channel_get_rx_queue(channel); \ | 962 | for (_rx_queue = efx_channel_get_rx_queue(channel); \ |
968 | _rx_queue; \ | 963 | _rx_queue; \ |
969 | _rx_queue = NULL) | 964 | _rx_queue = NULL) |
970 | 965 | ||
971 | static inline struct efx_channel * | 966 | static inline struct efx_channel * |
972 | efx_rx_queue_channel(struct efx_rx_queue *rx_queue) | 967 | efx_rx_queue_channel(struct efx_rx_queue *rx_queue) |
973 | { | 968 | { |
974 | return container_of(rx_queue, struct efx_channel, rx_queue); | 969 | return container_of(rx_queue, struct efx_channel, rx_queue); |
975 | } | 970 | } |
976 | 971 | ||
977 | static inline int efx_rx_queue_index(struct efx_rx_queue *rx_queue) | 972 | static inline int efx_rx_queue_index(struct efx_rx_queue *rx_queue) |
978 | { | 973 | { |
979 | return efx_rx_queue_channel(rx_queue)->channel; | 974 | return efx_rx_queue_channel(rx_queue)->channel; |
980 | } | 975 | } |
981 | 976 | ||
982 | /* Returns a pointer to the specified receive buffer in the RX | 977 | /* Returns a pointer to the specified receive buffer in the RX |
983 | * descriptor queue. | 978 | * descriptor queue. |
984 | */ | 979 | */ |
985 | static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue, | 980 | static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue, |
986 | unsigned int index) | 981 | unsigned int index) |
987 | { | 982 | { |
988 | return &rx_queue->buffer[index]; | 983 | return &rx_queue->buffer[index]; |
989 | } | 984 | } |
990 | 985 | ||
991 | /* Set bit in a little-endian bitfield */ | 986 | /* Set bit in a little-endian bitfield */ |
992 | static inline void set_bit_le(unsigned nr, unsigned char *addr) | 987 | static inline void set_bit_le(unsigned nr, unsigned char *addr) |
993 | { | 988 | { |
994 | addr[nr / 8] |= (1 << (nr % 8)); | 989 | addr[nr / 8] |= (1 << (nr % 8)); |
995 | } | 990 | } |
996 | 991 | ||
997 | /* Clear bit in a little-endian bitfield */ | 992 | /* Clear bit in a little-endian bitfield */ |
998 | static inline void clear_bit_le(unsigned nr, unsigned char *addr) | 993 | static inline void clear_bit_le(unsigned nr, unsigned char *addr) |
999 | { | 994 | { |
1000 | addr[nr / 8] &= ~(1 << (nr % 8)); | 995 | addr[nr / 8] &= ~(1 << (nr % 8)); |
1001 | } | 996 | } |
1002 | 997 | ||
1003 | 998 | ||
1004 | /** | 999 | /** |
1005 | * EFX_MAX_FRAME_LEN - calculate maximum frame length | 1000 | * EFX_MAX_FRAME_LEN - calculate maximum frame length |
1006 | * | 1001 | * |
1007 | * This calculates the maximum frame length that will be used for a | 1002 | * This calculates the maximum frame length that will be used for a |
1008 | * given MTU. The frame length will be equal to the MTU plus a | 1003 | * given MTU. The frame length will be equal to the MTU plus a |
1009 | * constant amount of header space and padding. This is the quantity | 1004 | * constant amount of header space and padding. This is the quantity |
1010 | * that the net driver will program into the MAC as the maximum frame | 1005 | * that the net driver will program into the MAC as the maximum frame |
1011 | * length. | 1006 | * length. |
1012 | * | 1007 | * |
1013 | * The 10G MAC requires 8-byte alignment on the frame | 1008 | * The 10G MAC requires 8-byte alignment on the frame |
1014 | * length, so we round up to the nearest 8. | 1009 | * length, so we round up to the nearest 8. |
1015 | * | 1010 | * |
1016 | * Re-clocking by the XGXS on RX can reduce an IPG to 32 bits (half an | 1011 | * Re-clocking by the XGXS on RX can reduce an IPG to 32 bits (half an |
1017 | * XGMII cycle). If the frame length reaches the maximum value in the | 1012 | * XGMII cycle). If the frame length reaches the maximum value in the |
1018 | * same cycle, the XMAC can miss the IPG altogether. We work around | 1013 | * same cycle, the XMAC can miss the IPG altogether. We work around |
1019 | * this by adding a further 16 bytes. | 1014 | * this by adding a further 16 bytes. |
1020 | */ | 1015 | */ |
1021 | #define EFX_MAX_FRAME_LEN(mtu) \ | 1016 | #define EFX_MAX_FRAME_LEN(mtu) \ |
1022 | ((((mtu) + ETH_HLEN + VLAN_HLEN + 4/* FCS */ + 7) & ~7) + 16) | 1017 | ((((mtu) + ETH_HLEN + VLAN_HLEN + 4/* FCS */ + 7) & ~7) + 16) |
1023 | 1018 | ||
1024 | 1019 | ||
1025 | #endif /* EFX_NET_DRIVER_H */ | 1020 | #endif /* EFX_NET_DRIVER_H */ |
1026 | 1021 |
drivers/net/sfc/nic.c
1 | /**************************************************************************** | 1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | 2 | * Driver for Solarflare Solarstorm network controllers and boards |
3 | * Copyright 2005-2006 Fen Systems Ltd. | 3 | * Copyright 2005-2006 Fen Systems Ltd. |
4 | * Copyright 2006-2009 Solarflare Communications Inc. | 4 | * Copyright 2006-2009 Solarflare Communications Inc. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License version 2 as published | 7 | * under the terms of the GNU General Public License version 2 as published |
8 | * by the Free Software Foundation, incorporated herein by reference. | 8 | * by the Free Software Foundation, incorporated herein by reference. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/bitops.h> | 11 | #include <linux/bitops.h> |
12 | #include <linux/delay.h> | 12 | #include <linux/delay.h> |
13 | #include <linux/pci.h> | 13 | #include <linux/pci.h> |
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/seq_file.h> | 15 | #include <linux/seq_file.h> |
16 | #include "net_driver.h" | 16 | #include "net_driver.h" |
17 | #include "bitfield.h" | 17 | #include "bitfield.h" |
18 | #include "efx.h" | 18 | #include "efx.h" |
19 | #include "nic.h" | 19 | #include "nic.h" |
20 | #include "regs.h" | 20 | #include "regs.h" |
21 | #include "io.h" | 21 | #include "io.h" |
22 | #include "workarounds.h" | 22 | #include "workarounds.h" |
23 | 23 | ||
24 | /************************************************************************** | 24 | /************************************************************************** |
25 | * | 25 | * |
26 | * Configurable values | 26 | * Configurable values |
27 | * | 27 | * |
28 | ************************************************************************** | 28 | ************************************************************************** |
29 | */ | 29 | */ |
30 | 30 | ||
31 | /* This is set to 16 for a good reason. In summary, if larger than | 31 | /* This is set to 16 for a good reason. In summary, if larger than |
32 | * 16, the descriptor cache holds more than a default socket | 32 | * 16, the descriptor cache holds more than a default socket |
33 | * buffer's worth of packets (for UDP we can only have at most one | 33 | * buffer's worth of packets (for UDP we can only have at most one |
34 | * socket buffer's worth outstanding). This combined with the fact | 34 | * socket buffer's worth outstanding). This combined with the fact |
35 | * that we only get 1 TX event per descriptor cache means the NIC | 35 | * that we only get 1 TX event per descriptor cache means the NIC |
36 | * goes idle. | 36 | * goes idle. |
37 | */ | 37 | */ |
38 | #define TX_DC_ENTRIES 16 | 38 | #define TX_DC_ENTRIES 16 |
39 | #define TX_DC_ENTRIES_ORDER 1 | 39 | #define TX_DC_ENTRIES_ORDER 1 |
40 | 40 | ||
41 | #define RX_DC_ENTRIES 64 | 41 | #define RX_DC_ENTRIES 64 |
42 | #define RX_DC_ENTRIES_ORDER 3 | 42 | #define RX_DC_ENTRIES_ORDER 3 |
43 | 43 | ||
44 | /* RX FIFO XOFF watermark | 44 | /* RX FIFO XOFF watermark |
45 | * | 45 | * |
46 | * When the amount of the RX FIFO increases used increases past this | 46 | * When the amount of the RX FIFO increases used increases past this |
47 | * watermark send XOFF. Only used if RX flow control is enabled (ethtool -A) | 47 | * watermark send XOFF. Only used if RX flow control is enabled (ethtool -A) |
48 | * This also has an effect on RX/TX arbitration | 48 | * This also has an effect on RX/TX arbitration |
49 | */ | 49 | */ |
50 | int efx_nic_rx_xoff_thresh = -1; | 50 | int efx_nic_rx_xoff_thresh = -1; |
51 | module_param_named(rx_xoff_thresh_bytes, efx_nic_rx_xoff_thresh, int, 0644); | 51 | module_param_named(rx_xoff_thresh_bytes, efx_nic_rx_xoff_thresh, int, 0644); |
52 | MODULE_PARM_DESC(rx_xoff_thresh_bytes, "RX fifo XOFF threshold"); | 52 | MODULE_PARM_DESC(rx_xoff_thresh_bytes, "RX fifo XOFF threshold"); |
53 | 53 | ||
54 | /* RX FIFO XON watermark | 54 | /* RX FIFO XON watermark |
55 | * | 55 | * |
56 | * When the amount of the RX FIFO used decreases below this | 56 | * When the amount of the RX FIFO used decreases below this |
57 | * watermark send XON. Only used if TX flow control is enabled (ethtool -A) | 57 | * watermark send XON. Only used if TX flow control is enabled (ethtool -A) |
58 | * This also has an effect on RX/TX arbitration | 58 | * This also has an effect on RX/TX arbitration |
59 | */ | 59 | */ |
60 | int efx_nic_rx_xon_thresh = -1; | 60 | int efx_nic_rx_xon_thresh = -1; |
61 | module_param_named(rx_xon_thresh_bytes, efx_nic_rx_xon_thresh, int, 0644); | 61 | module_param_named(rx_xon_thresh_bytes, efx_nic_rx_xon_thresh, int, 0644); |
62 | MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold"); | 62 | MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold"); |
63 | 63 | ||
64 | /* If EFX_MAX_INT_ERRORS internal errors occur within | 64 | /* If EFX_MAX_INT_ERRORS internal errors occur within |
65 | * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and | 65 | * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and |
66 | * disable it. | 66 | * disable it. |
67 | */ | 67 | */ |
68 | #define EFX_INT_ERROR_EXPIRE 3600 | 68 | #define EFX_INT_ERROR_EXPIRE 3600 |
69 | #define EFX_MAX_INT_ERRORS 5 | 69 | #define EFX_MAX_INT_ERRORS 5 |
70 | 70 | ||
71 | /* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times | 71 | /* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times |
72 | */ | 72 | */ |
73 | #define EFX_FLUSH_INTERVAL 10 | 73 | #define EFX_FLUSH_INTERVAL 10 |
74 | #define EFX_FLUSH_POLL_COUNT 100 | 74 | #define EFX_FLUSH_POLL_COUNT 100 |
75 | 75 | ||
76 | /* Size and alignment of special buffers (4KB) */ | 76 | /* Size and alignment of special buffers (4KB) */ |
77 | #define EFX_BUF_SIZE 4096 | 77 | #define EFX_BUF_SIZE 4096 |
78 | 78 | ||
79 | /* Depth of RX flush request fifo */ | 79 | /* Depth of RX flush request fifo */ |
80 | #define EFX_RX_FLUSH_COUNT 4 | 80 | #define EFX_RX_FLUSH_COUNT 4 |
81 | 81 | ||
82 | /* Generated event code for efx_generate_test_event() */ | 82 | /* Generated event code for efx_generate_test_event() */ |
83 | #define EFX_CHANNEL_MAGIC_TEST(_channel) \ | 83 | #define EFX_CHANNEL_MAGIC_TEST(_channel) \ |
84 | (0x00010100 + (_channel)->channel) | 84 | (0x00010100 + (_channel)->channel) |
85 | 85 | ||
86 | /* Generated event code for efx_generate_fill_event() */ | 86 | /* Generated event code for efx_generate_fill_event() */ |
87 | #define EFX_CHANNEL_MAGIC_FILL(_channel) \ | 87 | #define EFX_CHANNEL_MAGIC_FILL(_channel) \ |
88 | (0x00010200 + (_channel)->channel) | 88 | (0x00010200 + (_channel)->channel) |
89 | 89 | ||
90 | /************************************************************************** | 90 | /************************************************************************** |
91 | * | 91 | * |
92 | * Solarstorm hardware access | 92 | * Solarstorm hardware access |
93 | * | 93 | * |
94 | **************************************************************************/ | 94 | **************************************************************************/ |
95 | 95 | ||
96 | static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value, | 96 | static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value, |
97 | unsigned int index) | 97 | unsigned int index) |
98 | { | 98 | { |
99 | efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base, | 99 | efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base, |
100 | value, index); | 100 | value, index); |
101 | } | 101 | } |
102 | 102 | ||
103 | /* Read the current event from the event queue */ | 103 | /* Read the current event from the event queue */ |
104 | static inline efx_qword_t *efx_event(struct efx_channel *channel, | 104 | static inline efx_qword_t *efx_event(struct efx_channel *channel, |
105 | unsigned int index) | 105 | unsigned int index) |
106 | { | 106 | { |
107 | return ((efx_qword_t *) (channel->eventq.addr)) + index; | 107 | return ((efx_qword_t *) (channel->eventq.addr)) + index; |
108 | } | 108 | } |
109 | 109 | ||
110 | /* See if an event is present | 110 | /* See if an event is present |
111 | * | 111 | * |
112 | * We check both the high and low dword of the event for all ones. We | 112 | * We check both the high and low dword of the event for all ones. We |
113 | * wrote all ones when we cleared the event, and no valid event can | 113 | * wrote all ones when we cleared the event, and no valid event can |
114 | * have all ones in either its high or low dwords. This approach is | 114 | * have all ones in either its high or low dwords. This approach is |
115 | * robust against reordering. | 115 | * robust against reordering. |
116 | * | 116 | * |
117 | * Note that using a single 64-bit comparison is incorrect; even | 117 | * Note that using a single 64-bit comparison is incorrect; even |
118 | * though the CPU read will be atomic, the DMA write may not be. | 118 | * though the CPU read will be atomic, the DMA write may not be. |
119 | */ | 119 | */ |
120 | static inline int efx_event_present(efx_qword_t *event) | 120 | static inline int efx_event_present(efx_qword_t *event) |
121 | { | 121 | { |
122 | return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) | | 122 | return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) | |
123 | EFX_DWORD_IS_ALL_ONES(event->dword[1])); | 123 | EFX_DWORD_IS_ALL_ONES(event->dword[1])); |
124 | } | 124 | } |
125 | 125 | ||
126 | static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b, | 126 | static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b, |
127 | const efx_oword_t *mask) | 127 | const efx_oword_t *mask) |
128 | { | 128 | { |
129 | return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) || | 129 | return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) || |
130 | ((a->u64[1] ^ b->u64[1]) & mask->u64[1]); | 130 | ((a->u64[1] ^ b->u64[1]) & mask->u64[1]); |
131 | } | 131 | } |
132 | 132 | ||
133 | int efx_nic_test_registers(struct efx_nic *efx, | 133 | int efx_nic_test_registers(struct efx_nic *efx, |
134 | const struct efx_nic_register_test *regs, | 134 | const struct efx_nic_register_test *regs, |
135 | size_t n_regs) | 135 | size_t n_regs) |
136 | { | 136 | { |
137 | unsigned address = 0, i, j; | 137 | unsigned address = 0, i, j; |
138 | efx_oword_t mask, imask, original, reg, buf; | 138 | efx_oword_t mask, imask, original, reg, buf; |
139 | 139 | ||
140 | /* Falcon should be in loopback to isolate the XMAC from the PHY */ | 140 | /* Falcon should be in loopback to isolate the XMAC from the PHY */ |
141 | WARN_ON(!LOOPBACK_INTERNAL(efx)); | 141 | WARN_ON(!LOOPBACK_INTERNAL(efx)); |
142 | 142 | ||
143 | for (i = 0; i < n_regs; ++i) { | 143 | for (i = 0; i < n_regs; ++i) { |
144 | address = regs[i].address; | 144 | address = regs[i].address; |
145 | mask = imask = regs[i].mask; | 145 | mask = imask = regs[i].mask; |
146 | EFX_INVERT_OWORD(imask); | 146 | EFX_INVERT_OWORD(imask); |
147 | 147 | ||
148 | efx_reado(efx, &original, address); | 148 | efx_reado(efx, &original, address); |
149 | 149 | ||
150 | /* bit sweep on and off */ | 150 | /* bit sweep on and off */ |
151 | for (j = 0; j < 128; j++) { | 151 | for (j = 0; j < 128; j++) { |
152 | if (!EFX_EXTRACT_OWORD32(mask, j, j)) | 152 | if (!EFX_EXTRACT_OWORD32(mask, j, j)) |
153 | continue; | 153 | continue; |
154 | 154 | ||
155 | /* Test this testable bit can be set in isolation */ | 155 | /* Test this testable bit can be set in isolation */ |
156 | EFX_AND_OWORD(reg, original, mask); | 156 | EFX_AND_OWORD(reg, original, mask); |
157 | EFX_SET_OWORD32(reg, j, j, 1); | 157 | EFX_SET_OWORD32(reg, j, j, 1); |
158 | 158 | ||
159 | efx_writeo(efx, ®, address); | 159 | efx_writeo(efx, ®, address); |
160 | efx_reado(efx, &buf, address); | 160 | efx_reado(efx, &buf, address); |
161 | 161 | ||
162 | if (efx_masked_compare_oword(®, &buf, &mask)) | 162 | if (efx_masked_compare_oword(®, &buf, &mask)) |
163 | goto fail; | 163 | goto fail; |
164 | 164 | ||
165 | /* Test this testable bit can be cleared in isolation */ | 165 | /* Test this testable bit can be cleared in isolation */ |
166 | EFX_OR_OWORD(reg, original, mask); | 166 | EFX_OR_OWORD(reg, original, mask); |
167 | EFX_SET_OWORD32(reg, j, j, 0); | 167 | EFX_SET_OWORD32(reg, j, j, 0); |
168 | 168 | ||
169 | efx_writeo(efx, ®, address); | 169 | efx_writeo(efx, ®, address); |
170 | efx_reado(efx, &buf, address); | 170 | efx_reado(efx, &buf, address); |
171 | 171 | ||
172 | if (efx_masked_compare_oword(®, &buf, &mask)) | 172 | if (efx_masked_compare_oword(®, &buf, &mask)) |
173 | goto fail; | 173 | goto fail; |
174 | } | 174 | } |
175 | 175 | ||
176 | efx_writeo(efx, &original, address); | 176 | efx_writeo(efx, &original, address); |
177 | } | 177 | } |
178 | 178 | ||
179 | return 0; | 179 | return 0; |
180 | 180 | ||
181 | fail: | 181 | fail: |
182 | netif_err(efx, hw, efx->net_dev, | 182 | netif_err(efx, hw, efx->net_dev, |
183 | "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT | 183 | "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT |
184 | " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg), | 184 | " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg), |
185 | EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask)); | 185 | EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask)); |
186 | return -EIO; | 186 | return -EIO; |
187 | } | 187 | } |
188 | 188 | ||
189 | /************************************************************************** | 189 | /************************************************************************** |
190 | * | 190 | * |
191 | * Special buffer handling | 191 | * Special buffer handling |
192 | * Special buffers are used for event queues and the TX and RX | 192 | * Special buffers are used for event queues and the TX and RX |
193 | * descriptor rings. | 193 | * descriptor rings. |
194 | * | 194 | * |
195 | *************************************************************************/ | 195 | *************************************************************************/ |
196 | 196 | ||
197 | /* | 197 | /* |
198 | * Initialise a special buffer | 198 | * Initialise a special buffer |
199 | * | 199 | * |
200 | * This will define a buffer (previously allocated via | 200 | * This will define a buffer (previously allocated via |
201 | * efx_alloc_special_buffer()) in the buffer table, allowing | 201 | * efx_alloc_special_buffer()) in the buffer table, allowing |
202 | * it to be used for event queues, descriptor rings etc. | 202 | * it to be used for event queues, descriptor rings etc. |
203 | */ | 203 | */ |
204 | static void | 204 | static void |
205 | efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) | 205 | efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) |
206 | { | 206 | { |
207 | efx_qword_t buf_desc; | 207 | efx_qword_t buf_desc; |
208 | int index; | 208 | int index; |
209 | dma_addr_t dma_addr; | 209 | dma_addr_t dma_addr; |
210 | int i; | 210 | int i; |
211 | 211 | ||
212 | EFX_BUG_ON_PARANOID(!buffer->addr); | 212 | EFX_BUG_ON_PARANOID(!buffer->addr); |
213 | 213 | ||
214 | /* Write buffer descriptors to NIC */ | 214 | /* Write buffer descriptors to NIC */ |
215 | for (i = 0; i < buffer->entries; i++) { | 215 | for (i = 0; i < buffer->entries; i++) { |
216 | index = buffer->index + i; | 216 | index = buffer->index + i; |
217 | dma_addr = buffer->dma_addr + (i * 4096); | 217 | dma_addr = buffer->dma_addr + (i * 4096); |
218 | netif_dbg(efx, probe, efx->net_dev, | 218 | netif_dbg(efx, probe, efx->net_dev, |
219 | "mapping special buffer %d at %llx\n", | 219 | "mapping special buffer %d at %llx\n", |
220 | index, (unsigned long long)dma_addr); | 220 | index, (unsigned long long)dma_addr); |
221 | EFX_POPULATE_QWORD_3(buf_desc, | 221 | EFX_POPULATE_QWORD_3(buf_desc, |
222 | FRF_AZ_BUF_ADR_REGION, 0, | 222 | FRF_AZ_BUF_ADR_REGION, 0, |
223 | FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12, | 223 | FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12, |
224 | FRF_AZ_BUF_OWNER_ID_FBUF, 0); | 224 | FRF_AZ_BUF_OWNER_ID_FBUF, 0); |
225 | efx_write_buf_tbl(efx, &buf_desc, index); | 225 | efx_write_buf_tbl(efx, &buf_desc, index); |
226 | } | 226 | } |
227 | } | 227 | } |
228 | 228 | ||
229 | /* Unmaps a buffer and clears the buffer table entries */ | 229 | /* Unmaps a buffer and clears the buffer table entries */ |
230 | static void | 230 | static void |
231 | efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) | 231 | efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) |
232 | { | 232 | { |
233 | efx_oword_t buf_tbl_upd; | 233 | efx_oword_t buf_tbl_upd; |
234 | unsigned int start = buffer->index; | 234 | unsigned int start = buffer->index; |
235 | unsigned int end = (buffer->index + buffer->entries - 1); | 235 | unsigned int end = (buffer->index + buffer->entries - 1); |
236 | 236 | ||
237 | if (!buffer->entries) | 237 | if (!buffer->entries) |
238 | return; | 238 | return; |
239 | 239 | ||
240 | netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n", | 240 | netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n", |
241 | buffer->index, buffer->index + buffer->entries - 1); | 241 | buffer->index, buffer->index + buffer->entries - 1); |
242 | 242 | ||
243 | EFX_POPULATE_OWORD_4(buf_tbl_upd, | 243 | EFX_POPULATE_OWORD_4(buf_tbl_upd, |
244 | FRF_AZ_BUF_UPD_CMD, 0, | 244 | FRF_AZ_BUF_UPD_CMD, 0, |
245 | FRF_AZ_BUF_CLR_CMD, 1, | 245 | FRF_AZ_BUF_CLR_CMD, 1, |
246 | FRF_AZ_BUF_CLR_END_ID, end, | 246 | FRF_AZ_BUF_CLR_END_ID, end, |
247 | FRF_AZ_BUF_CLR_START_ID, start); | 247 | FRF_AZ_BUF_CLR_START_ID, start); |
248 | efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD); | 248 | efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD); |
249 | } | 249 | } |
250 | 250 | ||
251 | /* | 251 | /* |
252 | * Allocate a new special buffer | 252 | * Allocate a new special buffer |
253 | * | 253 | * |
254 | * This allocates memory for a new buffer, clears it and allocates a | 254 | * This allocates memory for a new buffer, clears it and allocates a |
255 | * new buffer ID range. It does not write into the buffer table. | 255 | * new buffer ID range. It does not write into the buffer table. |
256 | * | 256 | * |
257 | * This call will allocate 4KB buffers, since 8KB buffers can't be | 257 | * This call will allocate 4KB buffers, since 8KB buffers can't be |
258 | * used for event queues and descriptor rings. | 258 | * used for event queues and descriptor rings. |
259 | */ | 259 | */ |
260 | static int efx_alloc_special_buffer(struct efx_nic *efx, | 260 | static int efx_alloc_special_buffer(struct efx_nic *efx, |
261 | struct efx_special_buffer *buffer, | 261 | struct efx_special_buffer *buffer, |
262 | unsigned int len) | 262 | unsigned int len) |
263 | { | 263 | { |
264 | len = ALIGN(len, EFX_BUF_SIZE); | 264 | len = ALIGN(len, EFX_BUF_SIZE); |
265 | 265 | ||
266 | buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, | 266 | buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, |
267 | &buffer->dma_addr, GFP_KERNEL); | 267 | &buffer->dma_addr, GFP_KERNEL); |
268 | if (!buffer->addr) | 268 | if (!buffer->addr) |
269 | return -ENOMEM; | 269 | return -ENOMEM; |
270 | buffer->len = len; | 270 | buffer->len = len; |
271 | buffer->entries = len / EFX_BUF_SIZE; | 271 | buffer->entries = len / EFX_BUF_SIZE; |
272 | BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1)); | 272 | BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1)); |
273 | 273 | ||
274 | /* All zeros is a potentially valid event so memset to 0xff */ | 274 | /* All zeros is a potentially valid event so memset to 0xff */ |
275 | memset(buffer->addr, 0xff, len); | 275 | memset(buffer->addr, 0xff, len); |
276 | 276 | ||
277 | /* Select new buffer ID */ | 277 | /* Select new buffer ID */ |
278 | buffer->index = efx->next_buffer_table; | 278 | buffer->index = efx->next_buffer_table; |
279 | efx->next_buffer_table += buffer->entries; | 279 | efx->next_buffer_table += buffer->entries; |
280 | 280 | ||
281 | netif_dbg(efx, probe, efx->net_dev, | 281 | netif_dbg(efx, probe, efx->net_dev, |
282 | "allocating special buffers %d-%d at %llx+%x " | 282 | "allocating special buffers %d-%d at %llx+%x " |
283 | "(virt %p phys %llx)\n", buffer->index, | 283 | "(virt %p phys %llx)\n", buffer->index, |
284 | buffer->index + buffer->entries - 1, | 284 | buffer->index + buffer->entries - 1, |
285 | (u64)buffer->dma_addr, len, | 285 | (u64)buffer->dma_addr, len, |
286 | buffer->addr, (u64)virt_to_phys(buffer->addr)); | 286 | buffer->addr, (u64)virt_to_phys(buffer->addr)); |
287 | 287 | ||
288 | return 0; | 288 | return 0; |
289 | } | 289 | } |
290 | 290 | ||
291 | static void | 291 | static void |
292 | efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) | 292 | efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) |
293 | { | 293 | { |
294 | if (!buffer->addr) | 294 | if (!buffer->addr) |
295 | return; | 295 | return; |
296 | 296 | ||
297 | netif_dbg(efx, hw, efx->net_dev, | 297 | netif_dbg(efx, hw, efx->net_dev, |
298 | "deallocating special buffers %d-%d at %llx+%x " | 298 | "deallocating special buffers %d-%d at %llx+%x " |
299 | "(virt %p phys %llx)\n", buffer->index, | 299 | "(virt %p phys %llx)\n", buffer->index, |
300 | buffer->index + buffer->entries - 1, | 300 | buffer->index + buffer->entries - 1, |
301 | (u64)buffer->dma_addr, buffer->len, | 301 | (u64)buffer->dma_addr, buffer->len, |
302 | buffer->addr, (u64)virt_to_phys(buffer->addr)); | 302 | buffer->addr, (u64)virt_to_phys(buffer->addr)); |
303 | 303 | ||
304 | dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr, | 304 | dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr, |
305 | buffer->dma_addr); | 305 | buffer->dma_addr); |
306 | buffer->addr = NULL; | 306 | buffer->addr = NULL; |
307 | buffer->entries = 0; | 307 | buffer->entries = 0; |
308 | } | 308 | } |
309 | 309 | ||
310 | /************************************************************************** | 310 | /************************************************************************** |
311 | * | 311 | * |
312 | * Generic buffer handling | 312 | * Generic buffer handling |
313 | * These buffers are used for interrupt status and MAC stats | 313 | * These buffers are used for interrupt status and MAC stats |
314 | * | 314 | * |
315 | **************************************************************************/ | 315 | **************************************************************************/ |
316 | 316 | ||
317 | int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, | 317 | int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, |
318 | unsigned int len) | 318 | unsigned int len) |
319 | { | 319 | { |
320 | buffer->addr = pci_alloc_consistent(efx->pci_dev, len, | 320 | buffer->addr = pci_alloc_consistent(efx->pci_dev, len, |
321 | &buffer->dma_addr); | 321 | &buffer->dma_addr); |
322 | if (!buffer->addr) | 322 | if (!buffer->addr) |
323 | return -ENOMEM; | 323 | return -ENOMEM; |
324 | buffer->len = len; | 324 | buffer->len = len; |
325 | memset(buffer->addr, 0, len); | 325 | memset(buffer->addr, 0, len); |
326 | return 0; | 326 | return 0; |
327 | } | 327 | } |
328 | 328 | ||
329 | void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer) | 329 | void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer) |
330 | { | 330 | { |
331 | if (buffer->addr) { | 331 | if (buffer->addr) { |
332 | pci_free_consistent(efx->pci_dev, buffer->len, | 332 | pci_free_consistent(efx->pci_dev, buffer->len, |
333 | buffer->addr, buffer->dma_addr); | 333 | buffer->addr, buffer->dma_addr); |
334 | buffer->addr = NULL; | 334 | buffer->addr = NULL; |
335 | } | 335 | } |
336 | } | 336 | } |
337 | 337 | ||
338 | /************************************************************************** | 338 | /************************************************************************** |
339 | * | 339 | * |
340 | * TX path | 340 | * TX path |
341 | * | 341 | * |
342 | **************************************************************************/ | 342 | **************************************************************************/ |
343 | 343 | ||
344 | /* Returns a pointer to the specified transmit descriptor in the TX | 344 | /* Returns a pointer to the specified transmit descriptor in the TX |
345 | * descriptor queue belonging to the specified channel. | 345 | * descriptor queue belonging to the specified channel. |
346 | */ | 346 | */ |
347 | static inline efx_qword_t * | 347 | static inline efx_qword_t * |
348 | efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) | 348 | efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) |
349 | { | 349 | { |
350 | return ((efx_qword_t *) (tx_queue->txd.addr)) + index; | 350 | return ((efx_qword_t *) (tx_queue->txd.addr)) + index; |
351 | } | 351 | } |
352 | 352 | ||
353 | /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ | 353 | /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ |
354 | static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue) | 354 | static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue) |
355 | { | 355 | { |
356 | unsigned write_ptr; | 356 | unsigned write_ptr; |
357 | efx_dword_t reg; | 357 | efx_dword_t reg; |
358 | 358 | ||
359 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; | 359 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; |
360 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr); | 360 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr); |
361 | efx_writed_page(tx_queue->efx, ®, | 361 | efx_writed_page(tx_queue->efx, ®, |
362 | FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); | 362 | FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); |
363 | } | 363 | } |
364 | 364 | ||
365 | 365 | ||
366 | /* For each entry inserted into the software descriptor ring, create a | 366 | /* For each entry inserted into the software descriptor ring, create a |
367 | * descriptor in the hardware TX descriptor ring (in host memory), and | 367 | * descriptor in the hardware TX descriptor ring (in host memory), and |
368 | * write a doorbell. | 368 | * write a doorbell. |
369 | */ | 369 | */ |
370 | void efx_nic_push_buffers(struct efx_tx_queue *tx_queue) | 370 | void efx_nic_push_buffers(struct efx_tx_queue *tx_queue) |
371 | { | 371 | { |
372 | 372 | ||
373 | struct efx_tx_buffer *buffer; | 373 | struct efx_tx_buffer *buffer; |
374 | efx_qword_t *txd; | 374 | efx_qword_t *txd; |
375 | unsigned write_ptr; | 375 | unsigned write_ptr; |
376 | 376 | ||
377 | BUG_ON(tx_queue->write_count == tx_queue->insert_count); | 377 | BUG_ON(tx_queue->write_count == tx_queue->insert_count); |
378 | 378 | ||
379 | do { | 379 | do { |
380 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; | 380 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; |
381 | buffer = &tx_queue->buffer[write_ptr]; | 381 | buffer = &tx_queue->buffer[write_ptr]; |
382 | txd = efx_tx_desc(tx_queue, write_ptr); | 382 | txd = efx_tx_desc(tx_queue, write_ptr); |
383 | ++tx_queue->write_count; | 383 | ++tx_queue->write_count; |
384 | 384 | ||
385 | /* Create TX descriptor ring entry */ | 385 | /* Create TX descriptor ring entry */ |
386 | EFX_POPULATE_QWORD_4(*txd, | 386 | EFX_POPULATE_QWORD_4(*txd, |
387 | FSF_AZ_TX_KER_CONT, buffer->continuation, | 387 | FSF_AZ_TX_KER_CONT, buffer->continuation, |
388 | FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, | 388 | FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, |
389 | FSF_AZ_TX_KER_BUF_REGION, 0, | 389 | FSF_AZ_TX_KER_BUF_REGION, 0, |
390 | FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); | 390 | FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); |
391 | } while (tx_queue->write_count != tx_queue->insert_count); | 391 | } while (tx_queue->write_count != tx_queue->insert_count); |
392 | 392 | ||
393 | wmb(); /* Ensure descriptors are written before they are fetched */ | 393 | wmb(); /* Ensure descriptors are written before they are fetched */ |
394 | efx_notify_tx_desc(tx_queue); | 394 | efx_notify_tx_desc(tx_queue); |
395 | } | 395 | } |
396 | 396 | ||
397 | /* Allocate hardware resources for a TX queue */ | 397 | /* Allocate hardware resources for a TX queue */ |
398 | int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) | 398 | int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) |
399 | { | 399 | { |
400 | struct efx_nic *efx = tx_queue->efx; | 400 | struct efx_nic *efx = tx_queue->efx; |
401 | unsigned entries; | 401 | unsigned entries; |
402 | 402 | ||
403 | entries = tx_queue->ptr_mask + 1; | 403 | entries = tx_queue->ptr_mask + 1; |
404 | return efx_alloc_special_buffer(efx, &tx_queue->txd, | 404 | return efx_alloc_special_buffer(efx, &tx_queue->txd, |
405 | entries * sizeof(efx_qword_t)); | 405 | entries * sizeof(efx_qword_t)); |
406 | } | 406 | } |
407 | 407 | ||
408 | void efx_nic_init_tx(struct efx_tx_queue *tx_queue) | 408 | void efx_nic_init_tx(struct efx_tx_queue *tx_queue) |
409 | { | 409 | { |
410 | efx_oword_t tx_desc_ptr; | 410 | efx_oword_t tx_desc_ptr; |
411 | struct efx_nic *efx = tx_queue->efx; | 411 | struct efx_nic *efx = tx_queue->efx; |
412 | 412 | ||
413 | tx_queue->flushed = FLUSH_NONE; | 413 | tx_queue->flushed = FLUSH_NONE; |
414 | 414 | ||
415 | /* Pin TX descriptor ring */ | 415 | /* Pin TX descriptor ring */ |
416 | efx_init_special_buffer(efx, &tx_queue->txd); | 416 | efx_init_special_buffer(efx, &tx_queue->txd); |
417 | 417 | ||
418 | /* Push TX descriptor ring to card */ | 418 | /* Push TX descriptor ring to card */ |
419 | EFX_POPULATE_OWORD_10(tx_desc_ptr, | 419 | EFX_POPULATE_OWORD_10(tx_desc_ptr, |
420 | FRF_AZ_TX_DESCQ_EN, 1, | 420 | FRF_AZ_TX_DESCQ_EN, 1, |
421 | FRF_AZ_TX_ISCSI_DDIG_EN, 0, | 421 | FRF_AZ_TX_ISCSI_DDIG_EN, 0, |
422 | FRF_AZ_TX_ISCSI_HDIG_EN, 0, | 422 | FRF_AZ_TX_ISCSI_HDIG_EN, 0, |
423 | FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, | 423 | FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, |
424 | FRF_AZ_TX_DESCQ_EVQ_ID, | 424 | FRF_AZ_TX_DESCQ_EVQ_ID, |
425 | tx_queue->channel->channel, | 425 | tx_queue->channel->channel, |
426 | FRF_AZ_TX_DESCQ_OWNER_ID, 0, | 426 | FRF_AZ_TX_DESCQ_OWNER_ID, 0, |
427 | FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue, | 427 | FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue, |
428 | FRF_AZ_TX_DESCQ_SIZE, | 428 | FRF_AZ_TX_DESCQ_SIZE, |
429 | __ffs(tx_queue->txd.entries), | 429 | __ffs(tx_queue->txd.entries), |
430 | FRF_AZ_TX_DESCQ_TYPE, 0, | 430 | FRF_AZ_TX_DESCQ_TYPE, 0, |
431 | FRF_BZ_TX_NON_IP_DROP_DIS, 1); | 431 | FRF_BZ_TX_NON_IP_DROP_DIS, 1); |
432 | 432 | ||
433 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { | 433 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { |
434 | int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; | 434 | int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; |
435 | EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum); | 435 | EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum); |
436 | EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS, | 436 | EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS, |
437 | !csum); | 437 | !csum); |
438 | } | 438 | } |
439 | 439 | ||
440 | efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, | 440 | efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, |
441 | tx_queue->queue); | 441 | tx_queue->queue); |
442 | 442 | ||
443 | if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { | 443 | if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { |
444 | efx_oword_t reg; | 444 | efx_oword_t reg; |
445 | 445 | ||
446 | /* Only 128 bits in this register */ | 446 | /* Only 128 bits in this register */ |
447 | BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128); | 447 | BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128); |
448 | 448 | ||
449 | efx_reado(efx, ®, FR_AA_TX_CHKSM_CFG); | 449 | efx_reado(efx, ®, FR_AA_TX_CHKSM_CFG); |
450 | if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) | 450 | if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) |
451 | clear_bit_le(tx_queue->queue, (void *)®); | 451 | clear_bit_le(tx_queue->queue, (void *)®); |
452 | else | 452 | else |
453 | set_bit_le(tx_queue->queue, (void *)®); | 453 | set_bit_le(tx_queue->queue, (void *)®); |
454 | efx_writeo(efx, ®, FR_AA_TX_CHKSM_CFG); | 454 | efx_writeo(efx, ®, FR_AA_TX_CHKSM_CFG); |
455 | } | 455 | } |
456 | } | 456 | } |
457 | 457 | ||
458 | static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue) | 458 | static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue) |
459 | { | 459 | { |
460 | struct efx_nic *efx = tx_queue->efx; | 460 | struct efx_nic *efx = tx_queue->efx; |
461 | efx_oword_t tx_flush_descq; | 461 | efx_oword_t tx_flush_descq; |
462 | 462 | ||
463 | tx_queue->flushed = FLUSH_PENDING; | 463 | tx_queue->flushed = FLUSH_PENDING; |
464 | 464 | ||
465 | /* Post a flush command */ | 465 | /* Post a flush command */ |
466 | EFX_POPULATE_OWORD_2(tx_flush_descq, | 466 | EFX_POPULATE_OWORD_2(tx_flush_descq, |
467 | FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, | 467 | FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, |
468 | FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); | 468 | FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); |
469 | efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ); | 469 | efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ); |
470 | } | 470 | } |
471 | 471 | ||
472 | void efx_nic_fini_tx(struct efx_tx_queue *tx_queue) | 472 | void efx_nic_fini_tx(struct efx_tx_queue *tx_queue) |
473 | { | 473 | { |
474 | struct efx_nic *efx = tx_queue->efx; | 474 | struct efx_nic *efx = tx_queue->efx; |
475 | efx_oword_t tx_desc_ptr; | 475 | efx_oword_t tx_desc_ptr; |
476 | 476 | ||
477 | /* The queue should have been flushed */ | 477 | /* The queue should have been flushed */ |
478 | WARN_ON(tx_queue->flushed != FLUSH_DONE); | 478 | WARN_ON(tx_queue->flushed != FLUSH_DONE); |
479 | 479 | ||
480 | /* Remove TX descriptor ring from card */ | 480 | /* Remove TX descriptor ring from card */ |
481 | EFX_ZERO_OWORD(tx_desc_ptr); | 481 | EFX_ZERO_OWORD(tx_desc_ptr); |
482 | efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, | 482 | efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, |
483 | tx_queue->queue); | 483 | tx_queue->queue); |
484 | 484 | ||
485 | /* Unpin TX descriptor ring */ | 485 | /* Unpin TX descriptor ring */ |
486 | efx_fini_special_buffer(efx, &tx_queue->txd); | 486 | efx_fini_special_buffer(efx, &tx_queue->txd); |
487 | } | 487 | } |
488 | 488 | ||
489 | /* Free buffers backing TX queue */ | 489 | /* Free buffers backing TX queue */ |
490 | void efx_nic_remove_tx(struct efx_tx_queue *tx_queue) | 490 | void efx_nic_remove_tx(struct efx_tx_queue *tx_queue) |
491 | { | 491 | { |
492 | efx_free_special_buffer(tx_queue->efx, &tx_queue->txd); | 492 | efx_free_special_buffer(tx_queue->efx, &tx_queue->txd); |
493 | } | 493 | } |
494 | 494 | ||
495 | /************************************************************************** | 495 | /************************************************************************** |
496 | * | 496 | * |
497 | * RX path | 497 | * RX path |
498 | * | 498 | * |
499 | **************************************************************************/ | 499 | **************************************************************************/ |
500 | 500 | ||
501 | /* Returns a pointer to the specified descriptor in the RX descriptor queue */ | 501 | /* Returns a pointer to the specified descriptor in the RX descriptor queue */ |
502 | static inline efx_qword_t * | 502 | static inline efx_qword_t * |
503 | efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) | 503 | efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) |
504 | { | 504 | { |
505 | return ((efx_qword_t *) (rx_queue->rxd.addr)) + index; | 505 | return ((efx_qword_t *) (rx_queue->rxd.addr)) + index; |
506 | } | 506 | } |
507 | 507 | ||
508 | /* This creates an entry in the RX descriptor queue */ | 508 | /* This creates an entry in the RX descriptor queue */ |
509 | static inline void | 509 | static inline void |
510 | efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index) | 510 | efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index) |
511 | { | 511 | { |
512 | struct efx_rx_buffer *rx_buf; | 512 | struct efx_rx_buffer *rx_buf; |
513 | efx_qword_t *rxd; | 513 | efx_qword_t *rxd; |
514 | 514 | ||
515 | rxd = efx_rx_desc(rx_queue, index); | 515 | rxd = efx_rx_desc(rx_queue, index); |
516 | rx_buf = efx_rx_buffer(rx_queue, index); | 516 | rx_buf = efx_rx_buffer(rx_queue, index); |
517 | EFX_POPULATE_QWORD_3(*rxd, | 517 | EFX_POPULATE_QWORD_3(*rxd, |
518 | FSF_AZ_RX_KER_BUF_SIZE, | 518 | FSF_AZ_RX_KER_BUF_SIZE, |
519 | rx_buf->len - | 519 | rx_buf->len - |
520 | rx_queue->efx->type->rx_buffer_padding, | 520 | rx_queue->efx->type->rx_buffer_padding, |
521 | FSF_AZ_RX_KER_BUF_REGION, 0, | 521 | FSF_AZ_RX_KER_BUF_REGION, 0, |
522 | FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); | 522 | FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); |
523 | } | 523 | } |
524 | 524 | ||
525 | /* This writes to the RX_DESC_WPTR register for the specified receive | 525 | /* This writes to the RX_DESC_WPTR register for the specified receive |
526 | * descriptor ring. | 526 | * descriptor ring. |
527 | */ | 527 | */ |
528 | void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue) | 528 | void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue) |
529 | { | 529 | { |
530 | struct efx_nic *efx = rx_queue->efx; | 530 | struct efx_nic *efx = rx_queue->efx; |
531 | efx_dword_t reg; | 531 | efx_dword_t reg; |
532 | unsigned write_ptr; | 532 | unsigned write_ptr; |
533 | 533 | ||
534 | while (rx_queue->notified_count != rx_queue->added_count) { | 534 | while (rx_queue->notified_count != rx_queue->added_count) { |
535 | efx_build_rx_desc( | 535 | efx_build_rx_desc( |
536 | rx_queue, | 536 | rx_queue, |
537 | rx_queue->notified_count & rx_queue->ptr_mask); | 537 | rx_queue->notified_count & rx_queue->ptr_mask); |
538 | ++rx_queue->notified_count; | 538 | ++rx_queue->notified_count; |
539 | } | 539 | } |
540 | 540 | ||
541 | wmb(); | 541 | wmb(); |
542 | write_ptr = rx_queue->added_count & rx_queue->ptr_mask; | 542 | write_ptr = rx_queue->added_count & rx_queue->ptr_mask; |
543 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); | 543 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); |
544 | efx_writed_page(efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0, | 544 | efx_writed_page(efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0, |
545 | efx_rx_queue_index(rx_queue)); | 545 | efx_rx_queue_index(rx_queue)); |
546 | } | 546 | } |
547 | 547 | ||
548 | int efx_nic_probe_rx(struct efx_rx_queue *rx_queue) | 548 | int efx_nic_probe_rx(struct efx_rx_queue *rx_queue) |
549 | { | 549 | { |
550 | struct efx_nic *efx = rx_queue->efx; | 550 | struct efx_nic *efx = rx_queue->efx; |
551 | unsigned entries; | 551 | unsigned entries; |
552 | 552 | ||
553 | entries = rx_queue->ptr_mask + 1; | 553 | entries = rx_queue->ptr_mask + 1; |
554 | return efx_alloc_special_buffer(efx, &rx_queue->rxd, | 554 | return efx_alloc_special_buffer(efx, &rx_queue->rxd, |
555 | entries * sizeof(efx_qword_t)); | 555 | entries * sizeof(efx_qword_t)); |
556 | } | 556 | } |
557 | 557 | ||
558 | void efx_nic_init_rx(struct efx_rx_queue *rx_queue) | 558 | void efx_nic_init_rx(struct efx_rx_queue *rx_queue) |
559 | { | 559 | { |
560 | efx_oword_t rx_desc_ptr; | 560 | efx_oword_t rx_desc_ptr; |
561 | struct efx_nic *efx = rx_queue->efx; | 561 | struct efx_nic *efx = rx_queue->efx; |
562 | bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0; | 562 | bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0; |
563 | bool iscsi_digest_en = is_b0; | 563 | bool iscsi_digest_en = is_b0; |
564 | 564 | ||
565 | netif_dbg(efx, hw, efx->net_dev, | 565 | netif_dbg(efx, hw, efx->net_dev, |
566 | "RX queue %d ring in special buffers %d-%d\n", | 566 | "RX queue %d ring in special buffers %d-%d\n", |
567 | efx_rx_queue_index(rx_queue), rx_queue->rxd.index, | 567 | efx_rx_queue_index(rx_queue), rx_queue->rxd.index, |
568 | rx_queue->rxd.index + rx_queue->rxd.entries - 1); | 568 | rx_queue->rxd.index + rx_queue->rxd.entries - 1); |
569 | 569 | ||
570 | rx_queue->flushed = FLUSH_NONE; | 570 | rx_queue->flushed = FLUSH_NONE; |
571 | 571 | ||
572 | /* Pin RX descriptor ring */ | 572 | /* Pin RX descriptor ring */ |
573 | efx_init_special_buffer(efx, &rx_queue->rxd); | 573 | efx_init_special_buffer(efx, &rx_queue->rxd); |
574 | 574 | ||
575 | /* Push RX descriptor ring to card */ | 575 | /* Push RX descriptor ring to card */ |
576 | EFX_POPULATE_OWORD_10(rx_desc_ptr, | 576 | EFX_POPULATE_OWORD_10(rx_desc_ptr, |
577 | FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en, | 577 | FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en, |
578 | FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en, | 578 | FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en, |
579 | FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, | 579 | FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, |
580 | FRF_AZ_RX_DESCQ_EVQ_ID, | 580 | FRF_AZ_RX_DESCQ_EVQ_ID, |
581 | efx_rx_queue_channel(rx_queue)->channel, | 581 | efx_rx_queue_channel(rx_queue)->channel, |
582 | FRF_AZ_RX_DESCQ_OWNER_ID, 0, | 582 | FRF_AZ_RX_DESCQ_OWNER_ID, 0, |
583 | FRF_AZ_RX_DESCQ_LABEL, | 583 | FRF_AZ_RX_DESCQ_LABEL, |
584 | efx_rx_queue_index(rx_queue), | 584 | efx_rx_queue_index(rx_queue), |
585 | FRF_AZ_RX_DESCQ_SIZE, | 585 | FRF_AZ_RX_DESCQ_SIZE, |
586 | __ffs(rx_queue->rxd.entries), | 586 | __ffs(rx_queue->rxd.entries), |
587 | FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , | 587 | FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , |
588 | /* For >=B0 this is scatter so disable */ | 588 | /* For >=B0 this is scatter so disable */ |
589 | FRF_AZ_RX_DESCQ_JUMBO, !is_b0, | 589 | FRF_AZ_RX_DESCQ_JUMBO, !is_b0, |
590 | FRF_AZ_RX_DESCQ_EN, 1); | 590 | FRF_AZ_RX_DESCQ_EN, 1); |
591 | efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, | 591 | efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, |
592 | efx_rx_queue_index(rx_queue)); | 592 | efx_rx_queue_index(rx_queue)); |
593 | } | 593 | } |
594 | 594 | ||
595 | static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue) | 595 | static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue) |
596 | { | 596 | { |
597 | struct efx_nic *efx = rx_queue->efx; | 597 | struct efx_nic *efx = rx_queue->efx; |
598 | efx_oword_t rx_flush_descq; | 598 | efx_oword_t rx_flush_descq; |
599 | 599 | ||
600 | rx_queue->flushed = FLUSH_PENDING; | 600 | rx_queue->flushed = FLUSH_PENDING; |
601 | 601 | ||
602 | /* Post a flush command */ | 602 | /* Post a flush command */ |
603 | EFX_POPULATE_OWORD_2(rx_flush_descq, | 603 | EFX_POPULATE_OWORD_2(rx_flush_descq, |
604 | FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, | 604 | FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, |
605 | FRF_AZ_RX_FLUSH_DESCQ, | 605 | FRF_AZ_RX_FLUSH_DESCQ, |
606 | efx_rx_queue_index(rx_queue)); | 606 | efx_rx_queue_index(rx_queue)); |
607 | efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ); | 607 | efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ); |
608 | } | 608 | } |
609 | 609 | ||
610 | void efx_nic_fini_rx(struct efx_rx_queue *rx_queue) | 610 | void efx_nic_fini_rx(struct efx_rx_queue *rx_queue) |
611 | { | 611 | { |
612 | efx_oword_t rx_desc_ptr; | 612 | efx_oword_t rx_desc_ptr; |
613 | struct efx_nic *efx = rx_queue->efx; | 613 | struct efx_nic *efx = rx_queue->efx; |
614 | 614 | ||
615 | /* The queue should already have been flushed */ | 615 | /* The queue should already have been flushed */ |
616 | WARN_ON(rx_queue->flushed != FLUSH_DONE); | 616 | WARN_ON(rx_queue->flushed != FLUSH_DONE); |
617 | 617 | ||
618 | /* Remove RX descriptor ring from card */ | 618 | /* Remove RX descriptor ring from card */ |
619 | EFX_ZERO_OWORD(rx_desc_ptr); | 619 | EFX_ZERO_OWORD(rx_desc_ptr); |
620 | efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, | 620 | efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, |
621 | efx_rx_queue_index(rx_queue)); | 621 | efx_rx_queue_index(rx_queue)); |
622 | 622 | ||
623 | /* Unpin RX descriptor ring */ | 623 | /* Unpin RX descriptor ring */ |
624 | efx_fini_special_buffer(efx, &rx_queue->rxd); | 624 | efx_fini_special_buffer(efx, &rx_queue->rxd); |
625 | } | 625 | } |
626 | 626 | ||
627 | /* Free buffers backing RX queue */ | 627 | /* Free buffers backing RX queue */ |
628 | void efx_nic_remove_rx(struct efx_rx_queue *rx_queue) | 628 | void efx_nic_remove_rx(struct efx_rx_queue *rx_queue) |
629 | { | 629 | { |
630 | efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd); | 630 | efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd); |
631 | } | 631 | } |
632 | 632 | ||
633 | /************************************************************************** | 633 | /************************************************************************** |
634 | * | 634 | * |
635 | * Event queue processing | 635 | * Event queue processing |
636 | * Event queues are processed by per-channel tasklets. | 636 | * Event queues are processed by per-channel tasklets. |
637 | * | 637 | * |
638 | **************************************************************************/ | 638 | **************************************************************************/ |
639 | 639 | ||
640 | /* Update a channel's event queue's read pointer (RPTR) register | 640 | /* Update a channel's event queue's read pointer (RPTR) register |
641 | * | 641 | * |
642 | * This writes the EVQ_RPTR_REG register for the specified channel's | 642 | * This writes the EVQ_RPTR_REG register for the specified channel's |
643 | * event queue. | 643 | * event queue. |
644 | */ | 644 | */ |
645 | void efx_nic_eventq_read_ack(struct efx_channel *channel) | 645 | void efx_nic_eventq_read_ack(struct efx_channel *channel) |
646 | { | 646 | { |
647 | efx_dword_t reg; | 647 | efx_dword_t reg; |
648 | struct efx_nic *efx = channel->efx; | 648 | struct efx_nic *efx = channel->efx; |
649 | 649 | ||
650 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, channel->eventq_read_ptr); | 650 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, channel->eventq_read_ptr); |
651 | efx_writed_table(efx, ®, efx->type->evq_rptr_tbl_base, | 651 | efx_writed_table(efx, ®, efx->type->evq_rptr_tbl_base, |
652 | channel->channel); | 652 | channel->channel); |
653 | } | 653 | } |
654 | 654 | ||
655 | /* Use HW to insert a SW defined event */ | 655 | /* Use HW to insert a SW defined event */ |
656 | void efx_generate_event(struct efx_channel *channel, efx_qword_t *event) | 656 | static void efx_generate_event(struct efx_channel *channel, efx_qword_t *event) |
657 | { | 657 | { |
658 | efx_oword_t drv_ev_reg; | 658 | efx_oword_t drv_ev_reg; |
659 | 659 | ||
660 | BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 || | 660 | BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 || |
661 | FRF_AZ_DRV_EV_DATA_WIDTH != 64); | 661 | FRF_AZ_DRV_EV_DATA_WIDTH != 64); |
662 | drv_ev_reg.u32[0] = event->u32[0]; | 662 | drv_ev_reg.u32[0] = event->u32[0]; |
663 | drv_ev_reg.u32[1] = event->u32[1]; | 663 | drv_ev_reg.u32[1] = event->u32[1]; |
664 | drv_ev_reg.u32[2] = 0; | 664 | drv_ev_reg.u32[2] = 0; |
665 | drv_ev_reg.u32[3] = 0; | 665 | drv_ev_reg.u32[3] = 0; |
666 | EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, channel->channel); | 666 | EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, channel->channel); |
667 | efx_writeo(channel->efx, &drv_ev_reg, FR_AZ_DRV_EV); | 667 | efx_writeo(channel->efx, &drv_ev_reg, FR_AZ_DRV_EV); |
668 | } | 668 | } |
669 | 669 | ||
670 | /* Handle a transmit completion event | 670 | /* Handle a transmit completion event |
671 | * | 671 | * |
672 | * The NIC batches TX completion events; the message we receive is of | 672 | * The NIC batches TX completion events; the message we receive is of |
673 | * the form "complete all TX events up to this index". | 673 | * the form "complete all TX events up to this index". |
674 | */ | 674 | */ |
675 | static int | 675 | static int |
676 | efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) | 676 | efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) |
677 | { | 677 | { |
678 | unsigned int tx_ev_desc_ptr; | 678 | unsigned int tx_ev_desc_ptr; |
679 | unsigned int tx_ev_q_label; | 679 | unsigned int tx_ev_q_label; |
680 | struct efx_tx_queue *tx_queue; | 680 | struct efx_tx_queue *tx_queue; |
681 | struct efx_nic *efx = channel->efx; | 681 | struct efx_nic *efx = channel->efx; |
682 | int tx_packets = 0; | 682 | int tx_packets = 0; |
683 | 683 | ||
684 | if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { | 684 | if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { |
685 | /* Transmit completion */ | 685 | /* Transmit completion */ |
686 | tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); | 686 | tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); |
687 | tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); | 687 | tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); |
688 | tx_queue = efx_channel_get_tx_queue( | 688 | tx_queue = efx_channel_get_tx_queue( |
689 | channel, tx_ev_q_label % EFX_TXQ_TYPES); | 689 | channel, tx_ev_q_label % EFX_TXQ_TYPES); |
690 | tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) & | 690 | tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) & |
691 | tx_queue->ptr_mask); | 691 | tx_queue->ptr_mask); |
692 | channel->irq_mod_score += tx_packets; | 692 | channel->irq_mod_score += tx_packets; |
693 | efx_xmit_done(tx_queue, tx_ev_desc_ptr); | 693 | efx_xmit_done(tx_queue, tx_ev_desc_ptr); |
694 | } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { | 694 | } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { |
695 | /* Rewrite the FIFO write pointer */ | 695 | /* Rewrite the FIFO write pointer */ |
696 | tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); | 696 | tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); |
697 | tx_queue = efx_channel_get_tx_queue( | 697 | tx_queue = efx_channel_get_tx_queue( |
698 | channel, tx_ev_q_label % EFX_TXQ_TYPES); | 698 | channel, tx_ev_q_label % EFX_TXQ_TYPES); |
699 | 699 | ||
700 | if (efx_dev_registered(efx)) | 700 | if (efx_dev_registered(efx)) |
701 | netif_tx_lock(efx->net_dev); | 701 | netif_tx_lock(efx->net_dev); |
702 | efx_notify_tx_desc(tx_queue); | 702 | efx_notify_tx_desc(tx_queue); |
703 | if (efx_dev_registered(efx)) | 703 | if (efx_dev_registered(efx)) |
704 | netif_tx_unlock(efx->net_dev); | 704 | netif_tx_unlock(efx->net_dev); |
705 | } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) && | 705 | } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) && |
706 | EFX_WORKAROUND_10727(efx)) { | 706 | EFX_WORKAROUND_10727(efx)) { |
707 | efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); | 707 | efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); |
708 | } else { | 708 | } else { |
709 | netif_err(efx, tx_err, efx->net_dev, | 709 | netif_err(efx, tx_err, efx->net_dev, |
710 | "channel %d unexpected TX event " | 710 | "channel %d unexpected TX event " |
711 | EFX_QWORD_FMT"\n", channel->channel, | 711 | EFX_QWORD_FMT"\n", channel->channel, |
712 | EFX_QWORD_VAL(*event)); | 712 | EFX_QWORD_VAL(*event)); |
713 | } | 713 | } |
714 | 714 | ||
715 | return tx_packets; | 715 | return tx_packets; |
716 | } | 716 | } |
717 | 717 | ||
718 | /* Detect errors included in the rx_evt_pkt_ok bit. */ | 718 | /* Detect errors included in the rx_evt_pkt_ok bit. */ |
719 | static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue, | 719 | static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue, |
720 | const efx_qword_t *event, | 720 | const efx_qword_t *event, |
721 | bool *rx_ev_pkt_ok, | 721 | bool *rx_ev_pkt_ok, |
722 | bool *discard) | 722 | bool *discard) |
723 | { | 723 | { |
724 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); | 724 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); |
725 | struct efx_nic *efx = rx_queue->efx; | 725 | struct efx_nic *efx = rx_queue->efx; |
726 | bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; | 726 | bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; |
727 | bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; | 727 | bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; |
728 | bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc; | 728 | bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc; |
729 | bool rx_ev_other_err, rx_ev_pause_frm; | 729 | bool rx_ev_other_err, rx_ev_pause_frm; |
730 | bool rx_ev_hdr_type, rx_ev_mcast_pkt; | 730 | bool rx_ev_hdr_type, rx_ev_mcast_pkt; |
731 | unsigned rx_ev_pkt_type; | 731 | unsigned rx_ev_pkt_type; |
732 | 732 | ||
733 | rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); | 733 | rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); |
734 | rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); | 734 | rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); |
735 | rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC); | 735 | rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC); |
736 | rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE); | 736 | rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE); |
737 | rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event, | 737 | rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event, |
738 | FSF_AZ_RX_EV_BUF_OWNER_ID_ERR); | 738 | FSF_AZ_RX_EV_BUF_OWNER_ID_ERR); |
739 | rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event, | 739 | rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event, |
740 | FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR); | 740 | FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR); |
741 | rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event, | 741 | rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event, |
742 | FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR); | 742 | FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR); |
743 | rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR); | 743 | rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR); |
744 | rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC); | 744 | rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC); |
745 | rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ? | 745 | rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ? |
746 | 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB)); | 746 | 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB)); |
747 | rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR); | 747 | rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR); |
748 | 748 | ||
749 | /* Every error apart from tobe_disc and pause_frm */ | 749 | /* Every error apart from tobe_disc and pause_frm */ |
750 | rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err | | 750 | rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err | |
751 | rx_ev_buf_owner_id_err | rx_ev_eth_crc_err | | 751 | rx_ev_buf_owner_id_err | rx_ev_eth_crc_err | |
752 | rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err); | 752 | rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err); |
753 | 753 | ||
754 | /* Count errors that are not in MAC stats. Ignore expected | 754 | /* Count errors that are not in MAC stats. Ignore expected |
755 | * checksum errors during self-test. */ | 755 | * checksum errors during self-test. */ |
756 | if (rx_ev_frm_trunc) | 756 | if (rx_ev_frm_trunc) |
757 | ++channel->n_rx_frm_trunc; | 757 | ++channel->n_rx_frm_trunc; |
758 | else if (rx_ev_tobe_disc) | 758 | else if (rx_ev_tobe_disc) |
759 | ++channel->n_rx_tobe_disc; | 759 | ++channel->n_rx_tobe_disc; |
760 | else if (!efx->loopback_selftest) { | 760 | else if (!efx->loopback_selftest) { |
761 | if (rx_ev_ip_hdr_chksum_err) | 761 | if (rx_ev_ip_hdr_chksum_err) |
762 | ++channel->n_rx_ip_hdr_chksum_err; | 762 | ++channel->n_rx_ip_hdr_chksum_err; |
763 | else if (rx_ev_tcp_udp_chksum_err) | 763 | else if (rx_ev_tcp_udp_chksum_err) |
764 | ++channel->n_rx_tcp_udp_chksum_err; | 764 | ++channel->n_rx_tcp_udp_chksum_err; |
765 | } | 765 | } |
766 | 766 | ||
767 | /* The frame must be discarded if any of these are true. */ | 767 | /* The frame must be discarded if any of these are true. */ |
768 | *discard = (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib | | 768 | *discard = (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib | |
769 | rx_ev_tobe_disc | rx_ev_pause_frm); | 769 | rx_ev_tobe_disc | rx_ev_pause_frm); |
770 | 770 | ||
771 | /* TOBE_DISC is expected on unicast mismatches; don't print out an | 771 | /* TOBE_DISC is expected on unicast mismatches; don't print out an |
772 | * error message. FRM_TRUNC indicates RXDP dropped the packet due | 772 | * error message. FRM_TRUNC indicates RXDP dropped the packet due |
773 | * to a FIFO overflow. | 773 | * to a FIFO overflow. |
774 | */ | 774 | */ |
775 | #ifdef EFX_ENABLE_DEBUG | 775 | #ifdef EFX_ENABLE_DEBUG |
776 | if (rx_ev_other_err && net_ratelimit()) { | 776 | if (rx_ev_other_err && net_ratelimit()) { |
777 | netif_dbg(efx, rx_err, efx->net_dev, | 777 | netif_dbg(efx, rx_err, efx->net_dev, |
778 | " RX queue %d unexpected RX event " | 778 | " RX queue %d unexpected RX event " |
779 | EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n", | 779 | EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n", |
780 | efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event), | 780 | efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event), |
781 | rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", | 781 | rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", |
782 | rx_ev_ip_hdr_chksum_err ? | 782 | rx_ev_ip_hdr_chksum_err ? |
783 | " [IP_HDR_CHKSUM_ERR]" : "", | 783 | " [IP_HDR_CHKSUM_ERR]" : "", |
784 | rx_ev_tcp_udp_chksum_err ? | 784 | rx_ev_tcp_udp_chksum_err ? |
785 | " [TCP_UDP_CHKSUM_ERR]" : "", | 785 | " [TCP_UDP_CHKSUM_ERR]" : "", |
786 | rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "", | 786 | rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "", |
787 | rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", | 787 | rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", |
788 | rx_ev_drib_nib ? " [DRIB_NIB]" : "", | 788 | rx_ev_drib_nib ? " [DRIB_NIB]" : "", |
789 | rx_ev_tobe_disc ? " [TOBE_DISC]" : "", | 789 | rx_ev_tobe_disc ? " [TOBE_DISC]" : "", |
790 | rx_ev_pause_frm ? " [PAUSE]" : ""); | 790 | rx_ev_pause_frm ? " [PAUSE]" : ""); |
791 | } | 791 | } |
792 | #endif | 792 | #endif |
793 | } | 793 | } |
794 | 794 | ||
795 | /* Handle receive events that are not in-order. */ | 795 | /* Handle receive events that are not in-order. */ |
796 | static void | 796 | static void |
797 | efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) | 797 | efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) |
798 | { | 798 | { |
799 | struct efx_nic *efx = rx_queue->efx; | 799 | struct efx_nic *efx = rx_queue->efx; |
800 | unsigned expected, dropped; | 800 | unsigned expected, dropped; |
801 | 801 | ||
802 | expected = rx_queue->removed_count & rx_queue->ptr_mask; | 802 | expected = rx_queue->removed_count & rx_queue->ptr_mask; |
803 | dropped = (index - expected) & rx_queue->ptr_mask; | 803 | dropped = (index - expected) & rx_queue->ptr_mask; |
804 | netif_info(efx, rx_err, efx->net_dev, | 804 | netif_info(efx, rx_err, efx->net_dev, |
805 | "dropped %d events (index=%d expected=%d)\n", | 805 | "dropped %d events (index=%d expected=%d)\n", |
806 | dropped, index, expected); | 806 | dropped, index, expected); |
807 | 807 | ||
808 | efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ? | 808 | efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ? |
809 | RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); | 809 | RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); |
810 | } | 810 | } |
811 | 811 | ||
812 | /* Handle a packet received event | 812 | /* Handle a packet received event |
813 | * | 813 | * |
814 | * The NIC gives a "discard" flag if it's a unicast packet with the | 814 | * The NIC gives a "discard" flag if it's a unicast packet with the |
815 | * wrong destination address | 815 | * wrong destination address |
816 | * Also "is multicast" and "matches multicast filter" flags can be used to | 816 | * Also "is multicast" and "matches multicast filter" flags can be used to |
817 | * discard non-matching multicast packets. | 817 | * discard non-matching multicast packets. |
818 | */ | 818 | */ |
819 | static void | 819 | static void |
820 | efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) | 820 | efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) |
821 | { | 821 | { |
822 | unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; | 822 | unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; |
823 | unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; | 823 | unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; |
824 | unsigned expected_ptr; | 824 | unsigned expected_ptr; |
825 | bool rx_ev_pkt_ok, discard = false, checksummed; | 825 | bool rx_ev_pkt_ok, discard = false, checksummed; |
826 | struct efx_rx_queue *rx_queue; | 826 | struct efx_rx_queue *rx_queue; |
827 | struct efx_nic *efx = channel->efx; | 827 | struct efx_nic *efx = channel->efx; |
828 | 828 | ||
829 | /* Basic packet information */ | 829 | /* Basic packet information */ |
830 | rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); | 830 | rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); |
831 | rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); | 831 | rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); |
832 | rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); | 832 | rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); |
833 | WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT)); | 833 | WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT)); |
834 | WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1); | 834 | WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1); |
835 | WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != | 835 | WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != |
836 | channel->channel); | 836 | channel->channel); |
837 | 837 | ||
838 | rx_queue = efx_channel_get_rx_queue(channel); | 838 | rx_queue = efx_channel_get_rx_queue(channel); |
839 | 839 | ||
840 | rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); | 840 | rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); |
841 | expected_ptr = rx_queue->removed_count & rx_queue->ptr_mask; | 841 | expected_ptr = rx_queue->removed_count & rx_queue->ptr_mask; |
842 | if (unlikely(rx_ev_desc_ptr != expected_ptr)) | 842 | if (unlikely(rx_ev_desc_ptr != expected_ptr)) |
843 | efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr); | 843 | efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr); |
844 | 844 | ||
845 | if (likely(rx_ev_pkt_ok)) { | 845 | if (likely(rx_ev_pkt_ok)) { |
846 | /* If packet is marked as OK and packet type is TCP/IP or | 846 | /* If packet is marked as OK and packet type is TCP/IP or |
847 | * UDP/IP, then we can rely on the hardware checksum. | 847 | * UDP/IP, then we can rely on the hardware checksum. |
848 | */ | 848 | */ |
849 | checksummed = | 849 | checksummed = |
850 | likely(efx->rx_checksum_enabled) && | 850 | likely(efx->rx_checksum_enabled) && |
851 | (rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP || | 851 | (rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP || |
852 | rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP); | 852 | rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP); |
853 | } else { | 853 | } else { |
854 | efx_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, &discard); | 854 | efx_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, &discard); |
855 | checksummed = false; | 855 | checksummed = false; |
856 | } | 856 | } |
857 | 857 | ||
858 | /* Detect multicast packets that didn't match the filter */ | 858 | /* Detect multicast packets that didn't match the filter */ |
859 | rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); | 859 | rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); |
860 | if (rx_ev_mcast_pkt) { | 860 | if (rx_ev_mcast_pkt) { |
861 | unsigned int rx_ev_mcast_hash_match = | 861 | unsigned int rx_ev_mcast_hash_match = |
862 | EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH); | 862 | EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH); |
863 | 863 | ||
864 | if (unlikely(!rx_ev_mcast_hash_match)) { | 864 | if (unlikely(!rx_ev_mcast_hash_match)) { |
865 | ++channel->n_rx_mcast_mismatch; | 865 | ++channel->n_rx_mcast_mismatch; |
866 | discard = true; | 866 | discard = true; |
867 | } | 867 | } |
868 | } | 868 | } |
869 | 869 | ||
870 | channel->irq_mod_score += 2; | 870 | channel->irq_mod_score += 2; |
871 | 871 | ||
872 | /* Handle received packet */ | 872 | /* Handle received packet */ |
873 | efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, | 873 | efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, |
874 | checksummed, discard); | 874 | checksummed, discard); |
875 | } | 875 | } |
876 | 876 | ||
877 | static void | 877 | static void |
878 | efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event) | 878 | efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event) |
879 | { | 879 | { |
880 | struct efx_nic *efx = channel->efx; | 880 | struct efx_nic *efx = channel->efx; |
881 | unsigned code; | 881 | unsigned code; |
882 | 882 | ||
883 | code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); | 883 | code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); |
884 | if (code == EFX_CHANNEL_MAGIC_TEST(channel)) | 884 | if (code == EFX_CHANNEL_MAGIC_TEST(channel)) |
885 | ++channel->magic_count; | 885 | ++channel->magic_count; |
886 | else if (code == EFX_CHANNEL_MAGIC_FILL(channel)) | 886 | else if (code == EFX_CHANNEL_MAGIC_FILL(channel)) |
887 | /* The queue must be empty, so we won't receive any rx | 887 | /* The queue must be empty, so we won't receive any rx |
888 | * events, so efx_process_channel() won't refill the | 888 | * events, so efx_process_channel() won't refill the |
889 | * queue. Refill it here */ | 889 | * queue. Refill it here */ |
890 | efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel)); | 890 | efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel)); |
891 | else | 891 | else |
892 | netif_dbg(efx, hw, efx->net_dev, "channel %d received " | 892 | netif_dbg(efx, hw, efx->net_dev, "channel %d received " |
893 | "generated event "EFX_QWORD_FMT"\n", | 893 | "generated event "EFX_QWORD_FMT"\n", |
894 | channel->channel, EFX_QWORD_VAL(*event)); | 894 | channel->channel, EFX_QWORD_VAL(*event)); |
895 | } | 895 | } |
896 | 896 | ||
897 | /* Global events are basically PHY events */ | 897 | /* Global events are basically PHY events */ |
898 | static void | 898 | static void |
899 | efx_handle_global_event(struct efx_channel *channel, efx_qword_t *event) | 899 | efx_handle_global_event(struct efx_channel *channel, efx_qword_t *event) |
900 | { | 900 | { |
901 | struct efx_nic *efx = channel->efx; | 901 | struct efx_nic *efx = channel->efx; |
902 | bool handled = false; | 902 | bool handled = false; |
903 | 903 | ||
904 | if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) || | 904 | if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) || |
905 | EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) || | 905 | EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) || |
906 | EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR)) { | 906 | EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR)) { |
907 | /* Ignored */ | 907 | /* Ignored */ |
908 | handled = true; | 908 | handled = true; |
909 | } | 909 | } |
910 | 910 | ||
911 | if ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) && | 911 | if ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) && |
912 | EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) { | 912 | EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) { |
913 | efx->xmac_poll_required = true; | 913 | efx->xmac_poll_required = true; |
914 | handled = true; | 914 | handled = true; |
915 | } | 915 | } |
916 | 916 | ||
917 | if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ? | 917 | if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ? |
918 | EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) : | 918 | EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) : |
919 | EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) { | 919 | EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) { |
920 | netif_err(efx, rx_err, efx->net_dev, | 920 | netif_err(efx, rx_err, efx->net_dev, |
921 | "channel %d seen global RX_RESET event. Resetting.\n", | 921 | "channel %d seen global RX_RESET event. Resetting.\n", |
922 | channel->channel); | 922 | channel->channel); |
923 | 923 | ||
924 | atomic_inc(&efx->rx_reset); | 924 | atomic_inc(&efx->rx_reset); |
925 | efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ? | 925 | efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ? |
926 | RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); | 926 | RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); |
927 | handled = true; | 927 | handled = true; |
928 | } | 928 | } |
929 | 929 | ||
930 | if (!handled) | 930 | if (!handled) |
931 | netif_err(efx, hw, efx->net_dev, | 931 | netif_err(efx, hw, efx->net_dev, |
932 | "channel %d unknown global event " | 932 | "channel %d unknown global event " |
933 | EFX_QWORD_FMT "\n", channel->channel, | 933 | EFX_QWORD_FMT "\n", channel->channel, |
934 | EFX_QWORD_VAL(*event)); | 934 | EFX_QWORD_VAL(*event)); |
935 | } | 935 | } |
936 | 936 | ||
937 | static void | 937 | static void |
938 | efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) | 938 | efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) |
939 | { | 939 | { |
940 | struct efx_nic *efx = channel->efx; | 940 | struct efx_nic *efx = channel->efx; |
941 | unsigned int ev_sub_code; | 941 | unsigned int ev_sub_code; |
942 | unsigned int ev_sub_data; | 942 | unsigned int ev_sub_data; |
943 | 943 | ||
944 | ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE); | 944 | ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE); |
945 | ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); | 945 | ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); |
946 | 946 | ||
947 | switch (ev_sub_code) { | 947 | switch (ev_sub_code) { |
948 | case FSE_AZ_TX_DESCQ_FLS_DONE_EV: | 948 | case FSE_AZ_TX_DESCQ_FLS_DONE_EV: |
949 | netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", | 949 | netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", |
950 | channel->channel, ev_sub_data); | 950 | channel->channel, ev_sub_data); |
951 | break; | 951 | break; |
952 | case FSE_AZ_RX_DESCQ_FLS_DONE_EV: | 952 | case FSE_AZ_RX_DESCQ_FLS_DONE_EV: |
953 | netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", | 953 | netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", |
954 | channel->channel, ev_sub_data); | 954 | channel->channel, ev_sub_data); |
955 | break; | 955 | break; |
956 | case FSE_AZ_EVQ_INIT_DONE_EV: | 956 | case FSE_AZ_EVQ_INIT_DONE_EV: |
957 | netif_dbg(efx, hw, efx->net_dev, | 957 | netif_dbg(efx, hw, efx->net_dev, |
958 | "channel %d EVQ %d initialised\n", | 958 | "channel %d EVQ %d initialised\n", |
959 | channel->channel, ev_sub_data); | 959 | channel->channel, ev_sub_data); |
960 | break; | 960 | break; |
961 | case FSE_AZ_SRM_UPD_DONE_EV: | 961 | case FSE_AZ_SRM_UPD_DONE_EV: |
962 | netif_vdbg(efx, hw, efx->net_dev, | 962 | netif_vdbg(efx, hw, efx->net_dev, |
963 | "channel %d SRAM update done\n", channel->channel); | 963 | "channel %d SRAM update done\n", channel->channel); |
964 | break; | 964 | break; |
965 | case FSE_AZ_WAKE_UP_EV: | 965 | case FSE_AZ_WAKE_UP_EV: |
966 | netif_vdbg(efx, hw, efx->net_dev, | 966 | netif_vdbg(efx, hw, efx->net_dev, |
967 | "channel %d RXQ %d wakeup event\n", | 967 | "channel %d RXQ %d wakeup event\n", |
968 | channel->channel, ev_sub_data); | 968 | channel->channel, ev_sub_data); |
969 | break; | 969 | break; |
970 | case FSE_AZ_TIMER_EV: | 970 | case FSE_AZ_TIMER_EV: |
971 | netif_vdbg(efx, hw, efx->net_dev, | 971 | netif_vdbg(efx, hw, efx->net_dev, |
972 | "channel %d RX queue %d timer expired\n", | 972 | "channel %d RX queue %d timer expired\n", |
973 | channel->channel, ev_sub_data); | 973 | channel->channel, ev_sub_data); |
974 | break; | 974 | break; |
975 | case FSE_AA_RX_RECOVER_EV: | 975 | case FSE_AA_RX_RECOVER_EV: |
976 | netif_err(efx, rx_err, efx->net_dev, | 976 | netif_err(efx, rx_err, efx->net_dev, |
977 | "channel %d seen DRIVER RX_RESET event. " | 977 | "channel %d seen DRIVER RX_RESET event. " |
978 | "Resetting.\n", channel->channel); | 978 | "Resetting.\n", channel->channel); |
979 | atomic_inc(&efx->rx_reset); | 979 | atomic_inc(&efx->rx_reset); |
980 | efx_schedule_reset(efx, | 980 | efx_schedule_reset(efx, |
981 | EFX_WORKAROUND_6555(efx) ? | 981 | EFX_WORKAROUND_6555(efx) ? |
982 | RESET_TYPE_RX_RECOVERY : | 982 | RESET_TYPE_RX_RECOVERY : |
983 | RESET_TYPE_DISABLE); | 983 | RESET_TYPE_DISABLE); |
984 | break; | 984 | break; |
985 | case FSE_BZ_RX_DSC_ERROR_EV: | 985 | case FSE_BZ_RX_DSC_ERROR_EV: |
986 | netif_err(efx, rx_err, efx->net_dev, | 986 | netif_err(efx, rx_err, efx->net_dev, |
987 | "RX DMA Q %d reports descriptor fetch error." | 987 | "RX DMA Q %d reports descriptor fetch error." |
988 | " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data); | 988 | " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data); |
989 | efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH); | 989 | efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH); |
990 | break; | 990 | break; |
991 | case FSE_BZ_TX_DSC_ERROR_EV: | 991 | case FSE_BZ_TX_DSC_ERROR_EV: |
992 | netif_err(efx, tx_err, efx->net_dev, | 992 | netif_err(efx, tx_err, efx->net_dev, |
993 | "TX DMA Q %d reports descriptor fetch error." | 993 | "TX DMA Q %d reports descriptor fetch error." |
994 | " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data); | 994 | " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data); |
995 | efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); | 995 | efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); |
996 | break; | 996 | break; |
997 | default: | 997 | default: |
998 | netif_vdbg(efx, hw, efx->net_dev, | 998 | netif_vdbg(efx, hw, efx->net_dev, |
999 | "channel %d unknown driver event code %d " | 999 | "channel %d unknown driver event code %d " |
1000 | "data %04x\n", channel->channel, ev_sub_code, | 1000 | "data %04x\n", channel->channel, ev_sub_code, |
1001 | ev_sub_data); | 1001 | ev_sub_data); |
1002 | break; | 1002 | break; |
1003 | } | 1003 | } |
1004 | } | 1004 | } |
1005 | 1005 | ||
1006 | int efx_nic_process_eventq(struct efx_channel *channel, int budget) | 1006 | int efx_nic_process_eventq(struct efx_channel *channel, int budget) |
1007 | { | 1007 | { |
1008 | struct efx_nic *efx = channel->efx; | 1008 | struct efx_nic *efx = channel->efx; |
1009 | unsigned int read_ptr; | 1009 | unsigned int read_ptr; |
1010 | efx_qword_t event, *p_event; | 1010 | efx_qword_t event, *p_event; |
1011 | int ev_code; | 1011 | int ev_code; |
1012 | int tx_packets = 0; | 1012 | int tx_packets = 0; |
1013 | int spent = 0; | 1013 | int spent = 0; |
1014 | 1014 | ||
1015 | read_ptr = channel->eventq_read_ptr; | 1015 | read_ptr = channel->eventq_read_ptr; |
1016 | 1016 | ||
1017 | for (;;) { | 1017 | for (;;) { |
1018 | p_event = efx_event(channel, read_ptr); | 1018 | p_event = efx_event(channel, read_ptr); |
1019 | event = *p_event; | 1019 | event = *p_event; |
1020 | 1020 | ||
1021 | if (!efx_event_present(&event)) | 1021 | if (!efx_event_present(&event)) |
1022 | /* End of events */ | 1022 | /* End of events */ |
1023 | break; | 1023 | break; |
1024 | 1024 | ||
1025 | netif_vdbg(channel->efx, intr, channel->efx->net_dev, | 1025 | netif_vdbg(channel->efx, intr, channel->efx->net_dev, |
1026 | "channel %d event is "EFX_QWORD_FMT"\n", | 1026 | "channel %d event is "EFX_QWORD_FMT"\n", |
1027 | channel->channel, EFX_QWORD_VAL(event)); | 1027 | channel->channel, EFX_QWORD_VAL(event)); |
1028 | 1028 | ||
1029 | /* Clear this event by marking it all ones */ | 1029 | /* Clear this event by marking it all ones */ |
1030 | EFX_SET_QWORD(*p_event); | 1030 | EFX_SET_QWORD(*p_event); |
1031 | 1031 | ||
1032 | /* Increment read pointer */ | 1032 | /* Increment read pointer */ |
1033 | read_ptr = (read_ptr + 1) & channel->eventq_mask; | 1033 | read_ptr = (read_ptr + 1) & channel->eventq_mask; |
1034 | 1034 | ||
1035 | ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); | 1035 | ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); |
1036 | 1036 | ||
1037 | switch (ev_code) { | 1037 | switch (ev_code) { |
1038 | case FSE_AZ_EV_CODE_RX_EV: | 1038 | case FSE_AZ_EV_CODE_RX_EV: |
1039 | efx_handle_rx_event(channel, &event); | 1039 | efx_handle_rx_event(channel, &event); |
1040 | if (++spent == budget) | 1040 | if (++spent == budget) |
1041 | goto out; | 1041 | goto out; |
1042 | break; | 1042 | break; |
1043 | case FSE_AZ_EV_CODE_TX_EV: | 1043 | case FSE_AZ_EV_CODE_TX_EV: |
1044 | tx_packets += efx_handle_tx_event(channel, &event); | 1044 | tx_packets += efx_handle_tx_event(channel, &event); |
1045 | if (tx_packets > efx->txq_entries) { | 1045 | if (tx_packets > efx->txq_entries) { |
1046 | spent = budget; | 1046 | spent = budget; |
1047 | goto out; | 1047 | goto out; |
1048 | } | 1048 | } |
1049 | break; | 1049 | break; |
1050 | case FSE_AZ_EV_CODE_DRV_GEN_EV: | 1050 | case FSE_AZ_EV_CODE_DRV_GEN_EV: |
1051 | efx_handle_generated_event(channel, &event); | 1051 | efx_handle_generated_event(channel, &event); |
1052 | break; | 1052 | break; |
1053 | case FSE_AZ_EV_CODE_GLOBAL_EV: | 1053 | case FSE_AZ_EV_CODE_GLOBAL_EV: |
1054 | efx_handle_global_event(channel, &event); | 1054 | efx_handle_global_event(channel, &event); |
1055 | break; | 1055 | break; |
1056 | case FSE_AZ_EV_CODE_DRIVER_EV: | 1056 | case FSE_AZ_EV_CODE_DRIVER_EV: |
1057 | efx_handle_driver_event(channel, &event); | 1057 | efx_handle_driver_event(channel, &event); |
1058 | break; | 1058 | break; |
1059 | case FSE_CZ_EV_CODE_MCDI_EV: | 1059 | case FSE_CZ_EV_CODE_MCDI_EV: |
1060 | efx_mcdi_process_event(channel, &event); | 1060 | efx_mcdi_process_event(channel, &event); |
1061 | break; | 1061 | break; |
1062 | default: | 1062 | default: |
1063 | netif_err(channel->efx, hw, channel->efx->net_dev, | 1063 | netif_err(channel->efx, hw, channel->efx->net_dev, |
1064 | "channel %d unknown event type %d (data " | 1064 | "channel %d unknown event type %d (data " |
1065 | EFX_QWORD_FMT ")\n", channel->channel, | 1065 | EFX_QWORD_FMT ")\n", channel->channel, |
1066 | ev_code, EFX_QWORD_VAL(event)); | 1066 | ev_code, EFX_QWORD_VAL(event)); |
1067 | } | 1067 | } |
1068 | } | 1068 | } |
1069 | 1069 | ||
1070 | out: | 1070 | out: |
1071 | channel->eventq_read_ptr = read_ptr; | 1071 | channel->eventq_read_ptr = read_ptr; |
1072 | return spent; | 1072 | return spent; |
1073 | } | 1073 | } |
1074 | 1074 | ||
1075 | 1075 | ||
1076 | /* Allocate buffer table entries for event queue */ | 1076 | /* Allocate buffer table entries for event queue */ |
1077 | int efx_nic_probe_eventq(struct efx_channel *channel) | 1077 | int efx_nic_probe_eventq(struct efx_channel *channel) |
1078 | { | 1078 | { |
1079 | struct efx_nic *efx = channel->efx; | 1079 | struct efx_nic *efx = channel->efx; |
1080 | unsigned entries; | 1080 | unsigned entries; |
1081 | 1081 | ||
1082 | entries = channel->eventq_mask + 1; | 1082 | entries = channel->eventq_mask + 1; |
1083 | return efx_alloc_special_buffer(efx, &channel->eventq, | 1083 | return efx_alloc_special_buffer(efx, &channel->eventq, |
1084 | entries * sizeof(efx_qword_t)); | 1084 | entries * sizeof(efx_qword_t)); |
1085 | } | 1085 | } |
1086 | 1086 | ||
1087 | void efx_nic_init_eventq(struct efx_channel *channel) | 1087 | void efx_nic_init_eventq(struct efx_channel *channel) |
1088 | { | 1088 | { |
1089 | efx_oword_t reg; | 1089 | efx_oword_t reg; |
1090 | struct efx_nic *efx = channel->efx; | 1090 | struct efx_nic *efx = channel->efx; |
1091 | 1091 | ||
1092 | netif_dbg(efx, hw, efx->net_dev, | 1092 | netif_dbg(efx, hw, efx->net_dev, |
1093 | "channel %d event queue in special buffers %d-%d\n", | 1093 | "channel %d event queue in special buffers %d-%d\n", |
1094 | channel->channel, channel->eventq.index, | 1094 | channel->channel, channel->eventq.index, |
1095 | channel->eventq.index + channel->eventq.entries - 1); | 1095 | channel->eventq.index + channel->eventq.entries - 1); |
1096 | 1096 | ||
1097 | if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { | 1097 | if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { |
1098 | EFX_POPULATE_OWORD_3(reg, | 1098 | EFX_POPULATE_OWORD_3(reg, |
1099 | FRF_CZ_TIMER_Q_EN, 1, | 1099 | FRF_CZ_TIMER_Q_EN, 1, |
1100 | FRF_CZ_HOST_NOTIFY_MODE, 0, | 1100 | FRF_CZ_HOST_NOTIFY_MODE, 0, |
1101 | FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); | 1101 | FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); |
1102 | efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); | 1102 | efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); |
1103 | } | 1103 | } |
1104 | 1104 | ||
1105 | /* Pin event queue buffer */ | 1105 | /* Pin event queue buffer */ |
1106 | efx_init_special_buffer(efx, &channel->eventq); | 1106 | efx_init_special_buffer(efx, &channel->eventq); |
1107 | 1107 | ||
1108 | /* Fill event queue with all ones (i.e. empty events) */ | 1108 | /* Fill event queue with all ones (i.e. empty events) */ |
1109 | memset(channel->eventq.addr, 0xff, channel->eventq.len); | 1109 | memset(channel->eventq.addr, 0xff, channel->eventq.len); |
1110 | 1110 | ||
1111 | /* Push event queue to card */ | 1111 | /* Push event queue to card */ |
1112 | EFX_POPULATE_OWORD_3(reg, | 1112 | EFX_POPULATE_OWORD_3(reg, |
1113 | FRF_AZ_EVQ_EN, 1, | 1113 | FRF_AZ_EVQ_EN, 1, |
1114 | FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries), | 1114 | FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries), |
1115 | FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index); | 1115 | FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index); |
1116 | efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, | 1116 | efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, |
1117 | channel->channel); | 1117 | channel->channel); |
1118 | 1118 | ||
1119 | efx->type->push_irq_moderation(channel); | 1119 | efx->type->push_irq_moderation(channel); |
1120 | } | 1120 | } |
1121 | 1121 | ||
1122 | void efx_nic_fini_eventq(struct efx_channel *channel) | 1122 | void efx_nic_fini_eventq(struct efx_channel *channel) |
1123 | { | 1123 | { |
1124 | efx_oword_t reg; | 1124 | efx_oword_t reg; |
1125 | struct efx_nic *efx = channel->efx; | 1125 | struct efx_nic *efx = channel->efx; |
1126 | 1126 | ||
1127 | /* Remove event queue from card */ | 1127 | /* Remove event queue from card */ |
1128 | EFX_ZERO_OWORD(reg); | 1128 | EFX_ZERO_OWORD(reg); |
1129 | efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, | 1129 | efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, |
1130 | channel->channel); | 1130 | channel->channel); |
1131 | if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) | 1131 | if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) |
1132 | efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); | 1132 | efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); |
1133 | 1133 | ||
1134 | /* Unpin event queue */ | 1134 | /* Unpin event queue */ |
1135 | efx_fini_special_buffer(efx, &channel->eventq); | 1135 | efx_fini_special_buffer(efx, &channel->eventq); |
1136 | } | 1136 | } |
1137 | 1137 | ||
1138 | /* Free buffers backing event queue */ | 1138 | /* Free buffers backing event queue */ |
1139 | void efx_nic_remove_eventq(struct efx_channel *channel) | 1139 | void efx_nic_remove_eventq(struct efx_channel *channel) |
1140 | { | 1140 | { |
1141 | efx_free_special_buffer(channel->efx, &channel->eventq); | 1141 | efx_free_special_buffer(channel->efx, &channel->eventq); |
1142 | } | 1142 | } |
1143 | 1143 | ||
1144 | 1144 | ||
1145 | void efx_nic_generate_test_event(struct efx_channel *channel) | 1145 | void efx_nic_generate_test_event(struct efx_channel *channel) |
1146 | { | 1146 | { |
1147 | unsigned int magic = EFX_CHANNEL_MAGIC_TEST(channel); | 1147 | unsigned int magic = EFX_CHANNEL_MAGIC_TEST(channel); |
1148 | efx_qword_t test_event; | 1148 | efx_qword_t test_event; |
1149 | 1149 | ||
1150 | EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE, | 1150 | EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE, |
1151 | FSE_AZ_EV_CODE_DRV_GEN_EV, | 1151 | FSE_AZ_EV_CODE_DRV_GEN_EV, |
1152 | FSF_AZ_DRV_GEN_EV_MAGIC, magic); | 1152 | FSF_AZ_DRV_GEN_EV_MAGIC, magic); |
1153 | efx_generate_event(channel, &test_event); | 1153 | efx_generate_event(channel, &test_event); |
1154 | } | 1154 | } |
1155 | 1155 | ||
1156 | void efx_nic_generate_fill_event(struct efx_channel *channel) | 1156 | void efx_nic_generate_fill_event(struct efx_channel *channel) |
1157 | { | 1157 | { |
1158 | unsigned int magic = EFX_CHANNEL_MAGIC_FILL(channel); | 1158 | unsigned int magic = EFX_CHANNEL_MAGIC_FILL(channel); |
1159 | efx_qword_t test_event; | 1159 | efx_qword_t test_event; |
1160 | 1160 | ||
1161 | EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE, | 1161 | EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE, |
1162 | FSE_AZ_EV_CODE_DRV_GEN_EV, | 1162 | FSE_AZ_EV_CODE_DRV_GEN_EV, |
1163 | FSF_AZ_DRV_GEN_EV_MAGIC, magic); | 1163 | FSF_AZ_DRV_GEN_EV_MAGIC, magic); |
1164 | efx_generate_event(channel, &test_event); | 1164 | efx_generate_event(channel, &test_event); |
1165 | } | 1165 | } |
1166 | 1166 | ||
1167 | /************************************************************************** | 1167 | /************************************************************************** |
1168 | * | 1168 | * |
1169 | * Flush handling | 1169 | * Flush handling |
1170 | * | 1170 | * |
1171 | **************************************************************************/ | 1171 | **************************************************************************/ |
1172 | 1172 | ||
1173 | 1173 | ||
1174 | static void efx_poll_flush_events(struct efx_nic *efx) | 1174 | static void efx_poll_flush_events(struct efx_nic *efx) |
1175 | { | 1175 | { |
1176 | struct efx_channel *channel = efx_get_channel(efx, 0); | 1176 | struct efx_channel *channel = efx_get_channel(efx, 0); |
1177 | struct efx_tx_queue *tx_queue; | 1177 | struct efx_tx_queue *tx_queue; |
1178 | struct efx_rx_queue *rx_queue; | 1178 | struct efx_rx_queue *rx_queue; |
1179 | unsigned int read_ptr = channel->eventq_read_ptr; | 1179 | unsigned int read_ptr = channel->eventq_read_ptr; |
1180 | unsigned int end_ptr = (read_ptr - 1) & channel->eventq_mask; | 1180 | unsigned int end_ptr = (read_ptr - 1) & channel->eventq_mask; |
1181 | 1181 | ||
1182 | do { | 1182 | do { |
1183 | efx_qword_t *event = efx_event(channel, read_ptr); | 1183 | efx_qword_t *event = efx_event(channel, read_ptr); |
1184 | int ev_code, ev_sub_code, ev_queue; | 1184 | int ev_code, ev_sub_code, ev_queue; |
1185 | bool ev_failed; | 1185 | bool ev_failed; |
1186 | 1186 | ||
1187 | if (!efx_event_present(event)) | 1187 | if (!efx_event_present(event)) |
1188 | break; | 1188 | break; |
1189 | 1189 | ||
1190 | ev_code = EFX_QWORD_FIELD(*event, FSF_AZ_EV_CODE); | 1190 | ev_code = EFX_QWORD_FIELD(*event, FSF_AZ_EV_CODE); |
1191 | ev_sub_code = EFX_QWORD_FIELD(*event, | 1191 | ev_sub_code = EFX_QWORD_FIELD(*event, |
1192 | FSF_AZ_DRIVER_EV_SUBCODE); | 1192 | FSF_AZ_DRIVER_EV_SUBCODE); |
1193 | if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV && | 1193 | if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV && |
1194 | ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) { | 1194 | ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) { |
1195 | ev_queue = EFX_QWORD_FIELD(*event, | 1195 | ev_queue = EFX_QWORD_FIELD(*event, |
1196 | FSF_AZ_DRIVER_EV_SUBDATA); | 1196 | FSF_AZ_DRIVER_EV_SUBDATA); |
1197 | if (ev_queue < EFX_TXQ_TYPES * efx->n_tx_channels) { | 1197 | if (ev_queue < EFX_TXQ_TYPES * efx->n_tx_channels) { |
1198 | tx_queue = efx_get_tx_queue( | 1198 | tx_queue = efx_get_tx_queue( |
1199 | efx, ev_queue / EFX_TXQ_TYPES, | 1199 | efx, ev_queue / EFX_TXQ_TYPES, |
1200 | ev_queue % EFX_TXQ_TYPES); | 1200 | ev_queue % EFX_TXQ_TYPES); |
1201 | tx_queue->flushed = FLUSH_DONE; | 1201 | tx_queue->flushed = FLUSH_DONE; |
1202 | } | 1202 | } |
1203 | } else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV && | 1203 | } else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV && |
1204 | ev_sub_code == FSE_AZ_RX_DESCQ_FLS_DONE_EV) { | 1204 | ev_sub_code == FSE_AZ_RX_DESCQ_FLS_DONE_EV) { |
1205 | ev_queue = EFX_QWORD_FIELD( | 1205 | ev_queue = EFX_QWORD_FIELD( |
1206 | *event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); | 1206 | *event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); |
1207 | ev_failed = EFX_QWORD_FIELD( | 1207 | ev_failed = EFX_QWORD_FIELD( |
1208 | *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); | 1208 | *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); |
1209 | if (ev_queue < efx->n_rx_channels) { | 1209 | if (ev_queue < efx->n_rx_channels) { |
1210 | rx_queue = efx_get_rx_queue(efx, ev_queue); | 1210 | rx_queue = efx_get_rx_queue(efx, ev_queue); |
1211 | rx_queue->flushed = | 1211 | rx_queue->flushed = |
1212 | ev_failed ? FLUSH_FAILED : FLUSH_DONE; | 1212 | ev_failed ? FLUSH_FAILED : FLUSH_DONE; |
1213 | } | 1213 | } |
1214 | } | 1214 | } |
1215 | 1215 | ||
1216 | /* We're about to destroy the queue anyway, so | 1216 | /* We're about to destroy the queue anyway, so |
1217 | * it's ok to throw away every non-flush event */ | 1217 | * it's ok to throw away every non-flush event */ |
1218 | EFX_SET_QWORD(*event); | 1218 | EFX_SET_QWORD(*event); |
1219 | 1219 | ||
1220 | read_ptr = (read_ptr + 1) & channel->eventq_mask; | 1220 | read_ptr = (read_ptr + 1) & channel->eventq_mask; |
1221 | } while (read_ptr != end_ptr); | 1221 | } while (read_ptr != end_ptr); |
1222 | 1222 | ||
1223 | channel->eventq_read_ptr = read_ptr; | 1223 | channel->eventq_read_ptr = read_ptr; |
1224 | } | 1224 | } |
1225 | 1225 | ||
1226 | /* Handle tx and rx flushes at the same time, since they run in | 1226 | /* Handle tx and rx flushes at the same time, since they run in |
1227 | * parallel in the hardware and there's no reason for us to | 1227 | * parallel in the hardware and there's no reason for us to |
1228 | * serialise them */ | 1228 | * serialise them */ |
1229 | int efx_nic_flush_queues(struct efx_nic *efx) | 1229 | int efx_nic_flush_queues(struct efx_nic *efx) |
1230 | { | 1230 | { |
1231 | struct efx_channel *channel; | 1231 | struct efx_channel *channel; |
1232 | struct efx_rx_queue *rx_queue; | 1232 | struct efx_rx_queue *rx_queue; |
1233 | struct efx_tx_queue *tx_queue; | 1233 | struct efx_tx_queue *tx_queue; |
1234 | int i, tx_pending, rx_pending; | 1234 | int i, tx_pending, rx_pending; |
1235 | 1235 | ||
1236 | /* If necessary prepare the hardware for flushing */ | 1236 | /* If necessary prepare the hardware for flushing */ |
1237 | efx->type->prepare_flush(efx); | 1237 | efx->type->prepare_flush(efx); |
1238 | 1238 | ||
1239 | /* Flush all tx queues in parallel */ | 1239 | /* Flush all tx queues in parallel */ |
1240 | efx_for_each_channel(channel, efx) { | 1240 | efx_for_each_channel(channel, efx) { |
1241 | efx_for_each_channel_tx_queue(tx_queue, channel) | 1241 | efx_for_each_channel_tx_queue(tx_queue, channel) |
1242 | efx_flush_tx_queue(tx_queue); | 1242 | efx_flush_tx_queue(tx_queue); |
1243 | } | 1243 | } |
1244 | 1244 | ||
1245 | /* The hardware supports four concurrent rx flushes, each of which may | 1245 | /* The hardware supports four concurrent rx flushes, each of which may |
1246 | * need to be retried if there is an outstanding descriptor fetch */ | 1246 | * need to be retried if there is an outstanding descriptor fetch */ |
1247 | for (i = 0; i < EFX_FLUSH_POLL_COUNT; ++i) { | 1247 | for (i = 0; i < EFX_FLUSH_POLL_COUNT; ++i) { |
1248 | rx_pending = tx_pending = 0; | 1248 | rx_pending = tx_pending = 0; |
1249 | efx_for_each_channel(channel, efx) { | 1249 | efx_for_each_channel(channel, efx) { |
1250 | efx_for_each_channel_rx_queue(rx_queue, channel) { | 1250 | efx_for_each_channel_rx_queue(rx_queue, channel) { |
1251 | if (rx_queue->flushed == FLUSH_PENDING) | 1251 | if (rx_queue->flushed == FLUSH_PENDING) |
1252 | ++rx_pending; | 1252 | ++rx_pending; |
1253 | } | 1253 | } |
1254 | } | 1254 | } |
1255 | efx_for_each_channel(channel, efx) { | 1255 | efx_for_each_channel(channel, efx) { |
1256 | efx_for_each_channel_rx_queue(rx_queue, channel) { | 1256 | efx_for_each_channel_rx_queue(rx_queue, channel) { |
1257 | if (rx_pending == EFX_RX_FLUSH_COUNT) | 1257 | if (rx_pending == EFX_RX_FLUSH_COUNT) |
1258 | break; | 1258 | break; |
1259 | if (rx_queue->flushed == FLUSH_FAILED || | 1259 | if (rx_queue->flushed == FLUSH_FAILED || |
1260 | rx_queue->flushed == FLUSH_NONE) { | 1260 | rx_queue->flushed == FLUSH_NONE) { |
1261 | efx_flush_rx_queue(rx_queue); | 1261 | efx_flush_rx_queue(rx_queue); |
1262 | ++rx_pending; | 1262 | ++rx_pending; |
1263 | } | 1263 | } |
1264 | } | 1264 | } |
1265 | efx_for_each_channel_tx_queue(tx_queue, channel) { | 1265 | efx_for_each_channel_tx_queue(tx_queue, channel) { |
1266 | if (tx_queue->flushed != FLUSH_DONE) | 1266 | if (tx_queue->flushed != FLUSH_DONE) |
1267 | ++tx_pending; | 1267 | ++tx_pending; |
1268 | } | 1268 | } |
1269 | } | 1269 | } |
1270 | 1270 | ||
1271 | if (rx_pending == 0 && tx_pending == 0) | 1271 | if (rx_pending == 0 && tx_pending == 0) |
1272 | return 0; | 1272 | return 0; |
1273 | 1273 | ||
1274 | msleep(EFX_FLUSH_INTERVAL); | 1274 | msleep(EFX_FLUSH_INTERVAL); |
1275 | efx_poll_flush_events(efx); | 1275 | efx_poll_flush_events(efx); |
1276 | } | 1276 | } |
1277 | 1277 | ||
1278 | /* Mark the queues as all flushed. We're going to return failure | 1278 | /* Mark the queues as all flushed. We're going to return failure |
1279 | * leading to a reset, or fake up success anyway */ | 1279 | * leading to a reset, or fake up success anyway */ |
1280 | efx_for_each_channel(channel, efx) { | 1280 | efx_for_each_channel(channel, efx) { |
1281 | efx_for_each_channel_tx_queue(tx_queue, channel) { | 1281 | efx_for_each_channel_tx_queue(tx_queue, channel) { |
1282 | if (tx_queue->flushed != FLUSH_DONE) | 1282 | if (tx_queue->flushed != FLUSH_DONE) |
1283 | netif_err(efx, hw, efx->net_dev, | 1283 | netif_err(efx, hw, efx->net_dev, |
1284 | "tx queue %d flush command timed out\n", | 1284 | "tx queue %d flush command timed out\n", |
1285 | tx_queue->queue); | 1285 | tx_queue->queue); |
1286 | tx_queue->flushed = FLUSH_DONE; | 1286 | tx_queue->flushed = FLUSH_DONE; |
1287 | } | 1287 | } |
1288 | efx_for_each_channel_rx_queue(rx_queue, channel) { | 1288 | efx_for_each_channel_rx_queue(rx_queue, channel) { |
1289 | if (rx_queue->flushed != FLUSH_DONE) | 1289 | if (rx_queue->flushed != FLUSH_DONE) |
1290 | netif_err(efx, hw, efx->net_dev, | 1290 | netif_err(efx, hw, efx->net_dev, |
1291 | "rx queue %d flush command timed out\n", | 1291 | "rx queue %d flush command timed out\n", |
1292 | efx_rx_queue_index(rx_queue)); | 1292 | efx_rx_queue_index(rx_queue)); |
1293 | rx_queue->flushed = FLUSH_DONE; | 1293 | rx_queue->flushed = FLUSH_DONE; |
1294 | } | 1294 | } |
1295 | } | 1295 | } |
1296 | 1296 | ||
1297 | return -ETIMEDOUT; | 1297 | return -ETIMEDOUT; |
1298 | } | 1298 | } |
1299 | 1299 | ||
1300 | /************************************************************************** | 1300 | /************************************************************************** |
1301 | * | 1301 | * |
1302 | * Hardware interrupts | 1302 | * Hardware interrupts |
1303 | * The hardware interrupt handler does very little work; all the event | 1303 | * The hardware interrupt handler does very little work; all the event |
1304 | * queue processing is carried out by per-channel tasklets. | 1304 | * queue processing is carried out by per-channel tasklets. |
1305 | * | 1305 | * |
1306 | **************************************************************************/ | 1306 | **************************************************************************/ |
1307 | 1307 | ||
1308 | /* Enable/disable/generate interrupts */ | 1308 | /* Enable/disable/generate interrupts */ |
1309 | static inline void efx_nic_interrupts(struct efx_nic *efx, | 1309 | static inline void efx_nic_interrupts(struct efx_nic *efx, |
1310 | bool enabled, bool force) | 1310 | bool enabled, bool force) |
1311 | { | 1311 | { |
1312 | efx_oword_t int_en_reg_ker; | 1312 | efx_oword_t int_en_reg_ker; |
1313 | 1313 | ||
1314 | EFX_POPULATE_OWORD_3(int_en_reg_ker, | 1314 | EFX_POPULATE_OWORD_3(int_en_reg_ker, |
1315 | FRF_AZ_KER_INT_LEVE_SEL, efx->fatal_irq_level, | 1315 | FRF_AZ_KER_INT_LEVE_SEL, efx->fatal_irq_level, |
1316 | FRF_AZ_KER_INT_KER, force, | 1316 | FRF_AZ_KER_INT_KER, force, |
1317 | FRF_AZ_DRV_INT_EN_KER, enabled); | 1317 | FRF_AZ_DRV_INT_EN_KER, enabled); |
1318 | efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); | 1318 | efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); |
1319 | } | 1319 | } |
1320 | 1320 | ||
1321 | void efx_nic_enable_interrupts(struct efx_nic *efx) | 1321 | void efx_nic_enable_interrupts(struct efx_nic *efx) |
1322 | { | 1322 | { |
1323 | struct efx_channel *channel; | 1323 | struct efx_channel *channel; |
1324 | 1324 | ||
1325 | EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr)); | 1325 | EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr)); |
1326 | wmb(); /* Ensure interrupt vector is clear before interrupts enabled */ | 1326 | wmb(); /* Ensure interrupt vector is clear before interrupts enabled */ |
1327 | 1327 | ||
1328 | /* Enable interrupts */ | 1328 | /* Enable interrupts */ |
1329 | efx_nic_interrupts(efx, true, false); | 1329 | efx_nic_interrupts(efx, true, false); |
1330 | 1330 | ||
1331 | /* Force processing of all the channels to get the EVQ RPTRs up to | 1331 | /* Force processing of all the channels to get the EVQ RPTRs up to |
1332 | date */ | 1332 | date */ |
1333 | efx_for_each_channel(channel, efx) | 1333 | efx_for_each_channel(channel, efx) |
1334 | efx_schedule_channel(channel); | 1334 | efx_schedule_channel(channel); |
1335 | } | 1335 | } |
1336 | 1336 | ||
1337 | void efx_nic_disable_interrupts(struct efx_nic *efx) | 1337 | void efx_nic_disable_interrupts(struct efx_nic *efx) |
1338 | { | 1338 | { |
1339 | /* Disable interrupts */ | 1339 | /* Disable interrupts */ |
1340 | efx_nic_interrupts(efx, false, false); | 1340 | efx_nic_interrupts(efx, false, false); |
1341 | } | 1341 | } |
1342 | 1342 | ||
1343 | /* Generate a test interrupt | 1343 | /* Generate a test interrupt |
1344 | * Interrupt must already have been enabled, otherwise nasty things | 1344 | * Interrupt must already have been enabled, otherwise nasty things |
1345 | * may happen. | 1345 | * may happen. |
1346 | */ | 1346 | */ |
1347 | void efx_nic_generate_interrupt(struct efx_nic *efx) | 1347 | void efx_nic_generate_interrupt(struct efx_nic *efx) |
1348 | { | 1348 | { |
1349 | efx_nic_interrupts(efx, true, true); | 1349 | efx_nic_interrupts(efx, true, true); |
1350 | } | 1350 | } |
1351 | 1351 | ||
1352 | /* Process a fatal interrupt | 1352 | /* Process a fatal interrupt |
1353 | * Disable bus mastering ASAP and schedule a reset | 1353 | * Disable bus mastering ASAP and schedule a reset |
1354 | */ | 1354 | */ |
1355 | irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx) | 1355 | irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx) |
1356 | { | 1356 | { |
1357 | struct falcon_nic_data *nic_data = efx->nic_data; | 1357 | struct falcon_nic_data *nic_data = efx->nic_data; |
1358 | efx_oword_t *int_ker = efx->irq_status.addr; | 1358 | efx_oword_t *int_ker = efx->irq_status.addr; |
1359 | efx_oword_t fatal_intr; | 1359 | efx_oword_t fatal_intr; |
1360 | int error, mem_perr; | 1360 | int error, mem_perr; |
1361 | 1361 | ||
1362 | efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER); | 1362 | efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER); |
1363 | error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR); | 1363 | error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR); |
1364 | 1364 | ||
1365 | netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status " | 1365 | netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status " |
1366 | EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), | 1366 | EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), |
1367 | EFX_OWORD_VAL(fatal_intr), | 1367 | EFX_OWORD_VAL(fatal_intr), |
1368 | error ? "disabling bus mastering" : "no recognised error"); | 1368 | error ? "disabling bus mastering" : "no recognised error"); |
1369 | 1369 | ||
1370 | /* If this is a memory parity error dump which blocks are offending */ | 1370 | /* If this is a memory parity error dump which blocks are offending */ |
1371 | mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) || | 1371 | mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) || |
1372 | EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER)); | 1372 | EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER)); |
1373 | if (mem_perr) { | 1373 | if (mem_perr) { |
1374 | efx_oword_t reg; | 1374 | efx_oword_t reg; |
1375 | efx_reado(efx, ®, FR_AZ_MEM_STAT); | 1375 | efx_reado(efx, ®, FR_AZ_MEM_STAT); |
1376 | netif_err(efx, hw, efx->net_dev, | 1376 | netif_err(efx, hw, efx->net_dev, |
1377 | "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n", | 1377 | "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n", |
1378 | EFX_OWORD_VAL(reg)); | 1378 | EFX_OWORD_VAL(reg)); |
1379 | } | 1379 | } |
1380 | 1380 | ||
1381 | /* Disable both devices */ | 1381 | /* Disable both devices */ |
1382 | pci_clear_master(efx->pci_dev); | 1382 | pci_clear_master(efx->pci_dev); |
1383 | if (efx_nic_is_dual_func(efx)) | 1383 | if (efx_nic_is_dual_func(efx)) |
1384 | pci_clear_master(nic_data->pci_dev2); | 1384 | pci_clear_master(nic_data->pci_dev2); |
1385 | efx_nic_disable_interrupts(efx); | 1385 | efx_nic_disable_interrupts(efx); |
1386 | 1386 | ||
1387 | /* Count errors and reset or disable the NIC accordingly */ | 1387 | /* Count errors and reset or disable the NIC accordingly */ |
1388 | if (efx->int_error_count == 0 || | 1388 | if (efx->int_error_count == 0 || |
1389 | time_after(jiffies, efx->int_error_expire)) { | 1389 | time_after(jiffies, efx->int_error_expire)) { |
1390 | efx->int_error_count = 0; | 1390 | efx->int_error_count = 0; |
1391 | efx->int_error_expire = | 1391 | efx->int_error_expire = |
1392 | jiffies + EFX_INT_ERROR_EXPIRE * HZ; | 1392 | jiffies + EFX_INT_ERROR_EXPIRE * HZ; |
1393 | } | 1393 | } |
1394 | if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { | 1394 | if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { |
1395 | netif_err(efx, hw, efx->net_dev, | 1395 | netif_err(efx, hw, efx->net_dev, |
1396 | "SYSTEM ERROR - reset scheduled\n"); | 1396 | "SYSTEM ERROR - reset scheduled\n"); |
1397 | efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); | 1397 | efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); |
1398 | } else { | 1398 | } else { |
1399 | netif_err(efx, hw, efx->net_dev, | 1399 | netif_err(efx, hw, efx->net_dev, |
1400 | "SYSTEM ERROR - max number of errors seen." | 1400 | "SYSTEM ERROR - max number of errors seen." |
1401 | "NIC will be disabled\n"); | 1401 | "NIC will be disabled\n"); |
1402 | efx_schedule_reset(efx, RESET_TYPE_DISABLE); | 1402 | efx_schedule_reset(efx, RESET_TYPE_DISABLE); |
1403 | } | 1403 | } |
1404 | 1404 | ||
1405 | return IRQ_HANDLED; | 1405 | return IRQ_HANDLED; |
1406 | } | 1406 | } |
1407 | 1407 | ||
1408 | /* Handle a legacy interrupt | 1408 | /* Handle a legacy interrupt |
1409 | * Acknowledges the interrupt and schedule event queue processing. | 1409 | * Acknowledges the interrupt and schedule event queue processing. |
1410 | */ | 1410 | */ |
1411 | static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id) | 1411 | static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id) |
1412 | { | 1412 | { |
1413 | struct efx_nic *efx = dev_id; | 1413 | struct efx_nic *efx = dev_id; |
1414 | efx_oword_t *int_ker = efx->irq_status.addr; | 1414 | efx_oword_t *int_ker = efx->irq_status.addr; |
1415 | irqreturn_t result = IRQ_NONE; | 1415 | irqreturn_t result = IRQ_NONE; |
1416 | struct efx_channel *channel; | 1416 | struct efx_channel *channel; |
1417 | efx_dword_t reg; | 1417 | efx_dword_t reg; |
1418 | u32 queues; | 1418 | u32 queues; |
1419 | int syserr; | 1419 | int syserr; |
1420 | 1420 | ||
1421 | /* Read the ISR which also ACKs the interrupts */ | 1421 | /* Read the ISR which also ACKs the interrupts */ |
1422 | efx_readd(efx, ®, FR_BZ_INT_ISR0); | 1422 | efx_readd(efx, ®, FR_BZ_INT_ISR0); |
1423 | queues = EFX_EXTRACT_DWORD(reg, 0, 31); | 1423 | queues = EFX_EXTRACT_DWORD(reg, 0, 31); |
1424 | 1424 | ||
1425 | /* Check to see if we have a serious error condition */ | 1425 | /* Check to see if we have a serious error condition */ |
1426 | if (queues & (1U << efx->fatal_irq_level)) { | 1426 | if (queues & (1U << efx->fatal_irq_level)) { |
1427 | syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); | 1427 | syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); |
1428 | if (unlikely(syserr)) | 1428 | if (unlikely(syserr)) |
1429 | return efx_nic_fatal_interrupt(efx); | 1429 | return efx_nic_fatal_interrupt(efx); |
1430 | } | 1430 | } |
1431 | 1431 | ||
1432 | if (queues != 0) { | 1432 | if (queues != 0) { |
1433 | if (EFX_WORKAROUND_15783(efx)) | 1433 | if (EFX_WORKAROUND_15783(efx)) |
1434 | efx->irq_zero_count = 0; | 1434 | efx->irq_zero_count = 0; |
1435 | 1435 | ||
1436 | /* Schedule processing of any interrupting queues */ | 1436 | /* Schedule processing of any interrupting queues */ |
1437 | efx_for_each_channel(channel, efx) { | 1437 | efx_for_each_channel(channel, efx) { |
1438 | if (queues & 1) | 1438 | if (queues & 1) |
1439 | efx_schedule_channel(channel); | 1439 | efx_schedule_channel(channel); |
1440 | queues >>= 1; | 1440 | queues >>= 1; |
1441 | } | 1441 | } |
1442 | result = IRQ_HANDLED; | 1442 | result = IRQ_HANDLED; |
1443 | 1443 | ||
1444 | } else if (EFX_WORKAROUND_15783(efx)) { | 1444 | } else if (EFX_WORKAROUND_15783(efx)) { |
1445 | efx_qword_t *event; | 1445 | efx_qword_t *event; |
1446 | 1446 | ||
1447 | /* We can't return IRQ_HANDLED more than once on seeing ISR=0 | 1447 | /* We can't return IRQ_HANDLED more than once on seeing ISR=0 |
1448 | * because this might be a shared interrupt. */ | 1448 | * because this might be a shared interrupt. */ |
1449 | if (efx->irq_zero_count++ == 0) | 1449 | if (efx->irq_zero_count++ == 0) |
1450 | result = IRQ_HANDLED; | 1450 | result = IRQ_HANDLED; |
1451 | 1451 | ||
1452 | /* Ensure we schedule or rearm all event queues */ | 1452 | /* Ensure we schedule or rearm all event queues */ |
1453 | efx_for_each_channel(channel, efx) { | 1453 | efx_for_each_channel(channel, efx) { |
1454 | event = efx_event(channel, channel->eventq_read_ptr); | 1454 | event = efx_event(channel, channel->eventq_read_ptr); |
1455 | if (efx_event_present(event)) | 1455 | if (efx_event_present(event)) |
1456 | efx_schedule_channel(channel); | 1456 | efx_schedule_channel(channel); |
1457 | else | 1457 | else |
1458 | efx_nic_eventq_read_ack(channel); | 1458 | efx_nic_eventq_read_ack(channel); |
1459 | } | 1459 | } |
1460 | } | 1460 | } |
1461 | 1461 | ||
1462 | if (result == IRQ_HANDLED) { | 1462 | if (result == IRQ_HANDLED) { |
1463 | efx->last_irq_cpu = raw_smp_processor_id(); | 1463 | efx->last_irq_cpu = raw_smp_processor_id(); |
1464 | netif_vdbg(efx, intr, efx->net_dev, | 1464 | netif_vdbg(efx, intr, efx->net_dev, |
1465 | "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", | 1465 | "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", |
1466 | irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); | 1466 | irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); |
1467 | } | 1467 | } |
1468 | 1468 | ||
1469 | return result; | 1469 | return result; |
1470 | } | 1470 | } |
1471 | 1471 | ||
1472 | /* Handle an MSI interrupt | 1472 | /* Handle an MSI interrupt |
1473 | * | 1473 | * |
1474 | * Handle an MSI hardware interrupt. This routine schedules event | 1474 | * Handle an MSI hardware interrupt. This routine schedules event |
1475 | * queue processing. No interrupt acknowledgement cycle is necessary. | 1475 | * queue processing. No interrupt acknowledgement cycle is necessary. |
1476 | * Also, we never need to check that the interrupt is for us, since | 1476 | * Also, we never need to check that the interrupt is for us, since |
1477 | * MSI interrupts cannot be shared. | 1477 | * MSI interrupts cannot be shared. |
1478 | */ | 1478 | */ |
1479 | static irqreturn_t efx_msi_interrupt(int irq, void *dev_id) | 1479 | static irqreturn_t efx_msi_interrupt(int irq, void *dev_id) |
1480 | { | 1480 | { |
1481 | struct efx_channel *channel = *(struct efx_channel **)dev_id; | 1481 | struct efx_channel *channel = *(struct efx_channel **)dev_id; |
1482 | struct efx_nic *efx = channel->efx; | 1482 | struct efx_nic *efx = channel->efx; |
1483 | efx_oword_t *int_ker = efx->irq_status.addr; | 1483 | efx_oword_t *int_ker = efx->irq_status.addr; |
1484 | int syserr; | 1484 | int syserr; |
1485 | 1485 | ||
1486 | efx->last_irq_cpu = raw_smp_processor_id(); | 1486 | efx->last_irq_cpu = raw_smp_processor_id(); |
1487 | netif_vdbg(efx, intr, efx->net_dev, | 1487 | netif_vdbg(efx, intr, efx->net_dev, |
1488 | "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", | 1488 | "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", |
1489 | irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); | 1489 | irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); |
1490 | 1490 | ||
1491 | /* Check to see if we have a serious error condition */ | 1491 | /* Check to see if we have a serious error condition */ |
1492 | if (channel->channel == efx->fatal_irq_level) { | 1492 | if (channel->channel == efx->fatal_irq_level) { |
1493 | syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); | 1493 | syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); |
1494 | if (unlikely(syserr)) | 1494 | if (unlikely(syserr)) |
1495 | return efx_nic_fatal_interrupt(efx); | 1495 | return efx_nic_fatal_interrupt(efx); |
1496 | } | 1496 | } |
1497 | 1497 | ||
1498 | /* Schedule processing of the channel */ | 1498 | /* Schedule processing of the channel */ |
1499 | efx_schedule_channel(channel); | 1499 | efx_schedule_channel(channel); |
1500 | 1500 | ||
1501 | return IRQ_HANDLED; | 1501 | return IRQ_HANDLED; |
1502 | } | 1502 | } |
1503 | 1503 | ||
1504 | 1504 | ||
1505 | /* Setup RSS indirection table. | 1505 | /* Setup RSS indirection table. |
1506 | * This maps from the hash value of the packet to RXQ | 1506 | * This maps from the hash value of the packet to RXQ |
1507 | */ | 1507 | */ |
1508 | void efx_nic_push_rx_indir_table(struct efx_nic *efx) | 1508 | void efx_nic_push_rx_indir_table(struct efx_nic *efx) |
1509 | { | 1509 | { |
1510 | size_t i = 0; | 1510 | size_t i = 0; |
1511 | efx_dword_t dword; | 1511 | efx_dword_t dword; |
1512 | 1512 | ||
1513 | if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) | 1513 | if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) |
1514 | return; | 1514 | return; |
1515 | 1515 | ||
1516 | BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != | 1516 | BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != |
1517 | FR_BZ_RX_INDIRECTION_TBL_ROWS); | 1517 | FR_BZ_RX_INDIRECTION_TBL_ROWS); |
1518 | 1518 | ||
1519 | for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { | 1519 | for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { |
1520 | EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, | 1520 | EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, |
1521 | efx->rx_indir_table[i]); | 1521 | efx->rx_indir_table[i]); |
1522 | efx_writed_table(efx, &dword, FR_BZ_RX_INDIRECTION_TBL, i); | 1522 | efx_writed_table(efx, &dword, FR_BZ_RX_INDIRECTION_TBL, i); |
1523 | } | 1523 | } |
1524 | } | 1524 | } |
1525 | 1525 | ||
1526 | /* Hook interrupt handler(s) | 1526 | /* Hook interrupt handler(s) |
1527 | * Try MSI and then legacy interrupts. | 1527 | * Try MSI and then legacy interrupts. |
1528 | */ | 1528 | */ |
1529 | int efx_nic_init_interrupt(struct efx_nic *efx) | 1529 | int efx_nic_init_interrupt(struct efx_nic *efx) |
1530 | { | 1530 | { |
1531 | struct efx_channel *channel; | 1531 | struct efx_channel *channel; |
1532 | int rc; | 1532 | int rc; |
1533 | 1533 | ||
1534 | if (!EFX_INT_MODE_USE_MSI(efx)) { | 1534 | if (!EFX_INT_MODE_USE_MSI(efx)) { |
1535 | irq_handler_t handler; | 1535 | irq_handler_t handler; |
1536 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) | 1536 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) |
1537 | handler = efx_legacy_interrupt; | 1537 | handler = efx_legacy_interrupt; |
1538 | else | 1538 | else |
1539 | handler = falcon_legacy_interrupt_a1; | 1539 | handler = falcon_legacy_interrupt_a1; |
1540 | 1540 | ||
1541 | rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED, | 1541 | rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED, |
1542 | efx->name, efx); | 1542 | efx->name, efx); |
1543 | if (rc) { | 1543 | if (rc) { |
1544 | netif_err(efx, drv, efx->net_dev, | 1544 | netif_err(efx, drv, efx->net_dev, |
1545 | "failed to hook legacy IRQ %d\n", | 1545 | "failed to hook legacy IRQ %d\n", |
1546 | efx->pci_dev->irq); | 1546 | efx->pci_dev->irq); |
1547 | goto fail1; | 1547 | goto fail1; |
1548 | } | 1548 | } |
1549 | return 0; | 1549 | return 0; |
1550 | } | 1550 | } |
1551 | 1551 | ||
1552 | /* Hook MSI or MSI-X interrupt */ | 1552 | /* Hook MSI or MSI-X interrupt */ |
1553 | efx_for_each_channel(channel, efx) { | 1553 | efx_for_each_channel(channel, efx) { |
1554 | rc = request_irq(channel->irq, efx_msi_interrupt, | 1554 | rc = request_irq(channel->irq, efx_msi_interrupt, |
1555 | IRQF_PROBE_SHARED, /* Not shared */ | 1555 | IRQF_PROBE_SHARED, /* Not shared */ |
1556 | efx->channel_name[channel->channel], | 1556 | efx->channel_name[channel->channel], |
1557 | &efx->channel[channel->channel]); | 1557 | &efx->channel[channel->channel]); |
1558 | if (rc) { | 1558 | if (rc) { |
1559 | netif_err(efx, drv, efx->net_dev, | 1559 | netif_err(efx, drv, efx->net_dev, |
1560 | "failed to hook IRQ %d\n", channel->irq); | 1560 | "failed to hook IRQ %d\n", channel->irq); |
1561 | goto fail2; | 1561 | goto fail2; |
1562 | } | 1562 | } |
1563 | } | 1563 | } |
1564 | 1564 | ||
1565 | return 0; | 1565 | return 0; |
1566 | 1566 | ||
1567 | fail2: | 1567 | fail2: |
1568 | efx_for_each_channel(channel, efx) | 1568 | efx_for_each_channel(channel, efx) |
1569 | free_irq(channel->irq, &efx->channel[channel->channel]); | 1569 | free_irq(channel->irq, &efx->channel[channel->channel]); |
1570 | fail1: | 1570 | fail1: |
1571 | return rc; | 1571 | return rc; |
1572 | } | 1572 | } |
1573 | 1573 | ||
1574 | void efx_nic_fini_interrupt(struct efx_nic *efx) | 1574 | void efx_nic_fini_interrupt(struct efx_nic *efx) |
1575 | { | 1575 | { |
1576 | struct efx_channel *channel; | 1576 | struct efx_channel *channel; |
1577 | efx_oword_t reg; | 1577 | efx_oword_t reg; |
1578 | 1578 | ||
1579 | /* Disable MSI/MSI-X interrupts */ | 1579 | /* Disable MSI/MSI-X interrupts */ |
1580 | efx_for_each_channel(channel, efx) { | 1580 | efx_for_each_channel(channel, efx) { |
1581 | if (channel->irq) | 1581 | if (channel->irq) |
1582 | free_irq(channel->irq, &efx->channel[channel->channel]); | 1582 | free_irq(channel->irq, &efx->channel[channel->channel]); |
1583 | } | 1583 | } |
1584 | 1584 | ||
1585 | /* ACK legacy interrupt */ | 1585 | /* ACK legacy interrupt */ |
1586 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) | 1586 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) |
1587 | efx_reado(efx, ®, FR_BZ_INT_ISR0); | 1587 | efx_reado(efx, ®, FR_BZ_INT_ISR0); |
1588 | else | 1588 | else |
1589 | falcon_irq_ack_a1(efx); | 1589 | falcon_irq_ack_a1(efx); |
1590 | 1590 | ||
1591 | /* Disable legacy interrupt */ | 1591 | /* Disable legacy interrupt */ |
1592 | if (efx->legacy_irq) | 1592 | if (efx->legacy_irq) |
1593 | free_irq(efx->legacy_irq, efx); | 1593 | free_irq(efx->legacy_irq, efx); |
1594 | } | 1594 | } |
1595 | 1595 | ||
1596 | u32 efx_nic_fpga_ver(struct efx_nic *efx) | 1596 | u32 efx_nic_fpga_ver(struct efx_nic *efx) |
1597 | { | 1597 | { |
1598 | efx_oword_t altera_build; | 1598 | efx_oword_t altera_build; |
1599 | efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD); | 1599 | efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD); |
1600 | return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER); | 1600 | return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER); |
1601 | } | 1601 | } |
1602 | 1602 | ||
1603 | void efx_nic_init_common(struct efx_nic *efx) | 1603 | void efx_nic_init_common(struct efx_nic *efx) |
1604 | { | 1604 | { |
1605 | efx_oword_t temp; | 1605 | efx_oword_t temp; |
1606 | 1606 | ||
1607 | /* Set positions of descriptor caches in SRAM. */ | 1607 | /* Set positions of descriptor caches in SRAM. */ |
1608 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, | 1608 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, |
1609 | efx->type->tx_dc_base / 8); | 1609 | efx->type->tx_dc_base / 8); |
1610 | efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG); | 1610 | efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG); |
1611 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, | 1611 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, |
1612 | efx->type->rx_dc_base / 8); | 1612 | efx->type->rx_dc_base / 8); |
1613 | efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG); | 1613 | efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG); |
1614 | 1614 | ||
1615 | /* Set TX descriptor cache size. */ | 1615 | /* Set TX descriptor cache size. */ |
1616 | BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER)); | 1616 | BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER)); |
1617 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER); | 1617 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER); |
1618 | efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG); | 1618 | efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG); |
1619 | 1619 | ||
1620 | /* Set RX descriptor cache size. Set low watermark to size-8, as | 1620 | /* Set RX descriptor cache size. Set low watermark to size-8, as |
1621 | * this allows most efficient prefetching. | 1621 | * this allows most efficient prefetching. |
1622 | */ | 1622 | */ |
1623 | BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER)); | 1623 | BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER)); |
1624 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER); | 1624 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER); |
1625 | efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG); | 1625 | efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG); |
1626 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8); | 1626 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8); |
1627 | efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM); | 1627 | efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM); |
1628 | 1628 | ||
1629 | /* Program INT_KER address */ | 1629 | /* Program INT_KER address */ |
1630 | EFX_POPULATE_OWORD_2(temp, | 1630 | EFX_POPULATE_OWORD_2(temp, |
1631 | FRF_AZ_NORM_INT_VEC_DIS_KER, | 1631 | FRF_AZ_NORM_INT_VEC_DIS_KER, |
1632 | EFX_INT_MODE_USE_MSI(efx), | 1632 | EFX_INT_MODE_USE_MSI(efx), |
1633 | FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); | 1633 | FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); |
1634 | efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER); | 1634 | efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER); |
1635 | 1635 | ||
1636 | if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx)) | 1636 | if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx)) |
1637 | /* Use an interrupt level unused by event queues */ | 1637 | /* Use an interrupt level unused by event queues */ |
1638 | efx->fatal_irq_level = 0x1f; | 1638 | efx->fatal_irq_level = 0x1f; |
1639 | else | 1639 | else |
1640 | /* Use a valid MSI-X vector */ | 1640 | /* Use a valid MSI-X vector */ |
1641 | efx->fatal_irq_level = 0; | 1641 | efx->fatal_irq_level = 0; |
1642 | 1642 | ||
1643 | /* Enable all the genuinely fatal interrupts. (They are still | 1643 | /* Enable all the genuinely fatal interrupts. (They are still |
1644 | * masked by the overall interrupt mask, controlled by | 1644 | * masked by the overall interrupt mask, controlled by |
1645 | * falcon_interrupts()). | 1645 | * falcon_interrupts()). |
1646 | * | 1646 | * |
1647 | * Note: All other fatal interrupts are enabled | 1647 | * Note: All other fatal interrupts are enabled |
1648 | */ | 1648 | */ |
1649 | EFX_POPULATE_OWORD_3(temp, | 1649 | EFX_POPULATE_OWORD_3(temp, |
1650 | FRF_AZ_ILL_ADR_INT_KER_EN, 1, | 1650 | FRF_AZ_ILL_ADR_INT_KER_EN, 1, |
1651 | FRF_AZ_RBUF_OWN_INT_KER_EN, 1, | 1651 | FRF_AZ_RBUF_OWN_INT_KER_EN, 1, |
1652 | FRF_AZ_TBUF_OWN_INT_KER_EN, 1); | 1652 | FRF_AZ_TBUF_OWN_INT_KER_EN, 1); |
1653 | if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) | 1653 | if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) |
1654 | EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1); | 1654 | EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1); |
1655 | EFX_INVERT_OWORD(temp); | 1655 | EFX_INVERT_OWORD(temp); |
1656 | efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); | 1656 | efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); |
1657 | 1657 | ||
1658 | efx_nic_push_rx_indir_table(efx); | 1658 | efx_nic_push_rx_indir_table(efx); |
1659 | 1659 | ||
1660 | /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be | 1660 | /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be |
1661 | * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. | 1661 | * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. |
1662 | */ | 1662 | */ |
1663 | efx_reado(efx, &temp, FR_AZ_TX_RESERVED); | 1663 | efx_reado(efx, &temp, FR_AZ_TX_RESERVED); |
1664 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe); | 1664 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe); |
1665 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1); | 1665 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1); |
1666 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1); | 1666 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1); |
1667 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 0); | 1667 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 0); |
1668 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); | 1668 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); |
1669 | /* Enable SW_EV to inherit in char driver - assume harmless here */ | 1669 | /* Enable SW_EV to inherit in char driver - assume harmless here */ |
1670 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); | 1670 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); |
1671 | /* Prefetch threshold 2 => fetch when descriptor cache half empty */ | 1671 | /* Prefetch threshold 2 => fetch when descriptor cache half empty */ |
1672 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2); | 1672 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2); |
1673 | /* Disable hardware watchdog which can misfire */ | 1673 | /* Disable hardware watchdog which can misfire */ |
1674 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff); | 1674 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff); |
1675 | /* Squash TX of packets of 16 bytes or less */ | 1675 | /* Squash TX of packets of 16 bytes or less */ |
1676 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) | 1676 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) |
1677 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); | 1677 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); |
1678 | efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); | 1678 | efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); |
1679 | } | 1679 | } |
1680 | 1680 | ||
1681 | /* Register dump */ | 1681 | /* Register dump */ |
1682 | 1682 | ||
1683 | #define REGISTER_REVISION_A 1 | 1683 | #define REGISTER_REVISION_A 1 |
1684 | #define REGISTER_REVISION_B 2 | 1684 | #define REGISTER_REVISION_B 2 |
1685 | #define REGISTER_REVISION_C 3 | 1685 | #define REGISTER_REVISION_C 3 |
1686 | #define REGISTER_REVISION_Z 3 /* latest revision */ | 1686 | #define REGISTER_REVISION_Z 3 /* latest revision */ |
1687 | 1687 | ||
1688 | struct efx_nic_reg { | 1688 | struct efx_nic_reg { |
1689 | u32 offset:24; | 1689 | u32 offset:24; |
1690 | u32 min_revision:2, max_revision:2; | 1690 | u32 min_revision:2, max_revision:2; |
1691 | }; | 1691 | }; |
1692 | 1692 | ||
1693 | #define REGISTER(name, min_rev, max_rev) { \ | 1693 | #define REGISTER(name, min_rev, max_rev) { \ |
1694 | FR_ ## min_rev ## max_rev ## _ ## name, \ | 1694 | FR_ ## min_rev ## max_rev ## _ ## name, \ |
1695 | REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \ | 1695 | REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \ |
1696 | } | 1696 | } |
1697 | #define REGISTER_AA(name) REGISTER(name, A, A) | 1697 | #define REGISTER_AA(name) REGISTER(name, A, A) |
1698 | #define REGISTER_AB(name) REGISTER(name, A, B) | 1698 | #define REGISTER_AB(name) REGISTER(name, A, B) |
1699 | #define REGISTER_AZ(name) REGISTER(name, A, Z) | 1699 | #define REGISTER_AZ(name) REGISTER(name, A, Z) |
1700 | #define REGISTER_BB(name) REGISTER(name, B, B) | 1700 | #define REGISTER_BB(name) REGISTER(name, B, B) |
1701 | #define REGISTER_BZ(name) REGISTER(name, B, Z) | 1701 | #define REGISTER_BZ(name) REGISTER(name, B, Z) |
1702 | #define REGISTER_CZ(name) REGISTER(name, C, Z) | 1702 | #define REGISTER_CZ(name) REGISTER(name, C, Z) |
1703 | 1703 | ||
1704 | static const struct efx_nic_reg efx_nic_regs[] = { | 1704 | static const struct efx_nic_reg efx_nic_regs[] = { |
1705 | REGISTER_AZ(ADR_REGION), | 1705 | REGISTER_AZ(ADR_REGION), |
1706 | REGISTER_AZ(INT_EN_KER), | 1706 | REGISTER_AZ(INT_EN_KER), |
1707 | REGISTER_BZ(INT_EN_CHAR), | 1707 | REGISTER_BZ(INT_EN_CHAR), |
1708 | REGISTER_AZ(INT_ADR_KER), | 1708 | REGISTER_AZ(INT_ADR_KER), |
1709 | REGISTER_BZ(INT_ADR_CHAR), | 1709 | REGISTER_BZ(INT_ADR_CHAR), |
1710 | /* INT_ACK_KER is WO */ | 1710 | /* INT_ACK_KER is WO */ |
1711 | /* INT_ISR0 is RC */ | 1711 | /* INT_ISR0 is RC */ |
1712 | REGISTER_AZ(HW_INIT), | 1712 | REGISTER_AZ(HW_INIT), |
1713 | REGISTER_CZ(USR_EV_CFG), | 1713 | REGISTER_CZ(USR_EV_CFG), |
1714 | REGISTER_AB(EE_SPI_HCMD), | 1714 | REGISTER_AB(EE_SPI_HCMD), |
1715 | REGISTER_AB(EE_SPI_HADR), | 1715 | REGISTER_AB(EE_SPI_HADR), |
1716 | REGISTER_AB(EE_SPI_HDATA), | 1716 | REGISTER_AB(EE_SPI_HDATA), |
1717 | REGISTER_AB(EE_BASE_PAGE), | 1717 | REGISTER_AB(EE_BASE_PAGE), |
1718 | REGISTER_AB(EE_VPD_CFG0), | 1718 | REGISTER_AB(EE_VPD_CFG0), |
1719 | /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */ | 1719 | /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */ |
1720 | /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */ | 1720 | /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */ |
1721 | /* PCIE_CORE_INDIRECT is indirect */ | 1721 | /* PCIE_CORE_INDIRECT is indirect */ |
1722 | REGISTER_AB(NIC_STAT), | 1722 | REGISTER_AB(NIC_STAT), |
1723 | REGISTER_AB(GPIO_CTL), | 1723 | REGISTER_AB(GPIO_CTL), |
1724 | REGISTER_AB(GLB_CTL), | 1724 | REGISTER_AB(GLB_CTL), |
1725 | /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */ | 1725 | /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */ |
1726 | REGISTER_BZ(DP_CTRL), | 1726 | REGISTER_BZ(DP_CTRL), |
1727 | REGISTER_AZ(MEM_STAT), | 1727 | REGISTER_AZ(MEM_STAT), |
1728 | REGISTER_AZ(CS_DEBUG), | 1728 | REGISTER_AZ(CS_DEBUG), |
1729 | REGISTER_AZ(ALTERA_BUILD), | 1729 | REGISTER_AZ(ALTERA_BUILD), |
1730 | REGISTER_AZ(CSR_SPARE), | 1730 | REGISTER_AZ(CSR_SPARE), |
1731 | REGISTER_AB(PCIE_SD_CTL0123), | 1731 | REGISTER_AB(PCIE_SD_CTL0123), |
1732 | REGISTER_AB(PCIE_SD_CTL45), | 1732 | REGISTER_AB(PCIE_SD_CTL45), |
1733 | REGISTER_AB(PCIE_PCS_CTL_STAT), | 1733 | REGISTER_AB(PCIE_PCS_CTL_STAT), |
1734 | /* DEBUG_DATA_OUT is not used */ | 1734 | /* DEBUG_DATA_OUT is not used */ |
1735 | /* DRV_EV is WO */ | 1735 | /* DRV_EV is WO */ |
1736 | REGISTER_AZ(EVQ_CTL), | 1736 | REGISTER_AZ(EVQ_CTL), |
1737 | REGISTER_AZ(EVQ_CNT1), | 1737 | REGISTER_AZ(EVQ_CNT1), |
1738 | REGISTER_AZ(EVQ_CNT2), | 1738 | REGISTER_AZ(EVQ_CNT2), |
1739 | REGISTER_AZ(BUF_TBL_CFG), | 1739 | REGISTER_AZ(BUF_TBL_CFG), |
1740 | REGISTER_AZ(SRM_RX_DC_CFG), | 1740 | REGISTER_AZ(SRM_RX_DC_CFG), |
1741 | REGISTER_AZ(SRM_TX_DC_CFG), | 1741 | REGISTER_AZ(SRM_TX_DC_CFG), |
1742 | REGISTER_AZ(SRM_CFG), | 1742 | REGISTER_AZ(SRM_CFG), |
1743 | /* BUF_TBL_UPD is WO */ | 1743 | /* BUF_TBL_UPD is WO */ |
1744 | REGISTER_AZ(SRM_UPD_EVQ), | 1744 | REGISTER_AZ(SRM_UPD_EVQ), |
1745 | REGISTER_AZ(SRAM_PARITY), | 1745 | REGISTER_AZ(SRAM_PARITY), |
1746 | REGISTER_AZ(RX_CFG), | 1746 | REGISTER_AZ(RX_CFG), |
1747 | REGISTER_BZ(RX_FILTER_CTL), | 1747 | REGISTER_BZ(RX_FILTER_CTL), |
1748 | /* RX_FLUSH_DESCQ is WO */ | 1748 | /* RX_FLUSH_DESCQ is WO */ |
1749 | REGISTER_AZ(RX_DC_CFG), | 1749 | REGISTER_AZ(RX_DC_CFG), |
1750 | REGISTER_AZ(RX_DC_PF_WM), | 1750 | REGISTER_AZ(RX_DC_PF_WM), |
1751 | REGISTER_BZ(RX_RSS_TKEY), | 1751 | REGISTER_BZ(RX_RSS_TKEY), |
1752 | /* RX_NODESC_DROP is RC */ | 1752 | /* RX_NODESC_DROP is RC */ |
1753 | REGISTER_AA(RX_SELF_RST), | 1753 | REGISTER_AA(RX_SELF_RST), |
1754 | /* RX_DEBUG, RX_PUSH_DROP are not used */ | 1754 | /* RX_DEBUG, RX_PUSH_DROP are not used */ |
1755 | REGISTER_CZ(RX_RSS_IPV6_REG1), | 1755 | REGISTER_CZ(RX_RSS_IPV6_REG1), |
1756 | REGISTER_CZ(RX_RSS_IPV6_REG2), | 1756 | REGISTER_CZ(RX_RSS_IPV6_REG2), |
1757 | REGISTER_CZ(RX_RSS_IPV6_REG3), | 1757 | REGISTER_CZ(RX_RSS_IPV6_REG3), |
1758 | /* TX_FLUSH_DESCQ is WO */ | 1758 | /* TX_FLUSH_DESCQ is WO */ |
1759 | REGISTER_AZ(TX_DC_CFG), | 1759 | REGISTER_AZ(TX_DC_CFG), |
1760 | REGISTER_AA(TX_CHKSM_CFG), | 1760 | REGISTER_AA(TX_CHKSM_CFG), |
1761 | REGISTER_AZ(TX_CFG), | 1761 | REGISTER_AZ(TX_CFG), |
1762 | /* TX_PUSH_DROP is not used */ | 1762 | /* TX_PUSH_DROP is not used */ |
1763 | REGISTER_AZ(TX_RESERVED), | 1763 | REGISTER_AZ(TX_RESERVED), |
1764 | REGISTER_BZ(TX_PACE), | 1764 | REGISTER_BZ(TX_PACE), |
1765 | /* TX_PACE_DROP_QID is RC */ | 1765 | /* TX_PACE_DROP_QID is RC */ |
1766 | REGISTER_BB(TX_VLAN), | 1766 | REGISTER_BB(TX_VLAN), |
1767 | REGISTER_BZ(TX_IPFIL_PORTEN), | 1767 | REGISTER_BZ(TX_IPFIL_PORTEN), |
1768 | REGISTER_AB(MD_TXD), | 1768 | REGISTER_AB(MD_TXD), |
1769 | REGISTER_AB(MD_RXD), | 1769 | REGISTER_AB(MD_RXD), |
1770 | REGISTER_AB(MD_CS), | 1770 | REGISTER_AB(MD_CS), |
1771 | REGISTER_AB(MD_PHY_ADR), | 1771 | REGISTER_AB(MD_PHY_ADR), |
1772 | REGISTER_AB(MD_ID), | 1772 | REGISTER_AB(MD_ID), |
1773 | /* MD_STAT is RC */ | 1773 | /* MD_STAT is RC */ |
1774 | REGISTER_AB(MAC_STAT_DMA), | 1774 | REGISTER_AB(MAC_STAT_DMA), |
1775 | REGISTER_AB(MAC_CTRL), | 1775 | REGISTER_AB(MAC_CTRL), |
1776 | REGISTER_BB(GEN_MODE), | 1776 | REGISTER_BB(GEN_MODE), |
1777 | REGISTER_AB(MAC_MC_HASH_REG0), | 1777 | REGISTER_AB(MAC_MC_HASH_REG0), |
1778 | REGISTER_AB(MAC_MC_HASH_REG1), | 1778 | REGISTER_AB(MAC_MC_HASH_REG1), |
1779 | REGISTER_AB(GM_CFG1), | 1779 | REGISTER_AB(GM_CFG1), |
1780 | REGISTER_AB(GM_CFG2), | 1780 | REGISTER_AB(GM_CFG2), |
1781 | /* GM_IPG and GM_HD are not used */ | 1781 | /* GM_IPG and GM_HD are not used */ |
1782 | REGISTER_AB(GM_MAX_FLEN), | 1782 | REGISTER_AB(GM_MAX_FLEN), |
1783 | /* GM_TEST is not used */ | 1783 | /* GM_TEST is not used */ |
1784 | REGISTER_AB(GM_ADR1), | 1784 | REGISTER_AB(GM_ADR1), |
1785 | REGISTER_AB(GM_ADR2), | 1785 | REGISTER_AB(GM_ADR2), |
1786 | REGISTER_AB(GMF_CFG0), | 1786 | REGISTER_AB(GMF_CFG0), |
1787 | REGISTER_AB(GMF_CFG1), | 1787 | REGISTER_AB(GMF_CFG1), |
1788 | REGISTER_AB(GMF_CFG2), | 1788 | REGISTER_AB(GMF_CFG2), |
1789 | REGISTER_AB(GMF_CFG3), | 1789 | REGISTER_AB(GMF_CFG3), |
1790 | REGISTER_AB(GMF_CFG4), | 1790 | REGISTER_AB(GMF_CFG4), |
1791 | REGISTER_AB(GMF_CFG5), | 1791 | REGISTER_AB(GMF_CFG5), |
1792 | REGISTER_BB(TX_SRC_MAC_CTL), | 1792 | REGISTER_BB(TX_SRC_MAC_CTL), |
1793 | REGISTER_AB(XM_ADR_LO), | 1793 | REGISTER_AB(XM_ADR_LO), |
1794 | REGISTER_AB(XM_ADR_HI), | 1794 | REGISTER_AB(XM_ADR_HI), |
1795 | REGISTER_AB(XM_GLB_CFG), | 1795 | REGISTER_AB(XM_GLB_CFG), |
1796 | REGISTER_AB(XM_TX_CFG), | 1796 | REGISTER_AB(XM_TX_CFG), |
1797 | REGISTER_AB(XM_RX_CFG), | 1797 | REGISTER_AB(XM_RX_CFG), |
1798 | REGISTER_AB(XM_MGT_INT_MASK), | 1798 | REGISTER_AB(XM_MGT_INT_MASK), |
1799 | REGISTER_AB(XM_FC), | 1799 | REGISTER_AB(XM_FC), |
1800 | REGISTER_AB(XM_PAUSE_TIME), | 1800 | REGISTER_AB(XM_PAUSE_TIME), |
1801 | REGISTER_AB(XM_TX_PARAM), | 1801 | REGISTER_AB(XM_TX_PARAM), |
1802 | REGISTER_AB(XM_RX_PARAM), | 1802 | REGISTER_AB(XM_RX_PARAM), |
1803 | /* XM_MGT_INT_MSK (note no 'A') is RC */ | 1803 | /* XM_MGT_INT_MSK (note no 'A') is RC */ |
1804 | REGISTER_AB(XX_PWR_RST), | 1804 | REGISTER_AB(XX_PWR_RST), |
1805 | REGISTER_AB(XX_SD_CTL), | 1805 | REGISTER_AB(XX_SD_CTL), |
1806 | REGISTER_AB(XX_TXDRV_CTL), | 1806 | REGISTER_AB(XX_TXDRV_CTL), |
1807 | /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */ | 1807 | /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */ |
1808 | /* XX_CORE_STAT is partly RC */ | 1808 | /* XX_CORE_STAT is partly RC */ |
1809 | }; | 1809 | }; |
1810 | 1810 | ||
1811 | struct efx_nic_reg_table { | 1811 | struct efx_nic_reg_table { |
1812 | u32 offset:24; | 1812 | u32 offset:24; |
1813 | u32 min_revision:2, max_revision:2; | 1813 | u32 min_revision:2, max_revision:2; |
1814 | u32 step:6, rows:21; | 1814 | u32 step:6, rows:21; |
1815 | }; | 1815 | }; |
1816 | 1816 | ||
1817 | #define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \ | 1817 | #define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \ |
1818 | offset, \ | 1818 | offset, \ |
1819 | REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \ | 1819 | REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \ |
1820 | step, rows \ | 1820 | step, rows \ |
1821 | } | 1821 | } |
1822 | #define REGISTER_TABLE(name, min_rev, max_rev) \ | 1822 | #define REGISTER_TABLE(name, min_rev, max_rev) \ |
1823 | REGISTER_TABLE_DIMENSIONS( \ | 1823 | REGISTER_TABLE_DIMENSIONS( \ |
1824 | name, FR_ ## min_rev ## max_rev ## _ ## name, \ | 1824 | name, FR_ ## min_rev ## max_rev ## _ ## name, \ |
1825 | min_rev, max_rev, \ | 1825 | min_rev, max_rev, \ |
1826 | FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \ | 1826 | FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \ |
1827 | FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS) | 1827 | FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS) |
1828 | #define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A) | 1828 | #define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A) |
1829 | #define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z) | 1829 | #define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z) |
1830 | #define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B) | 1830 | #define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B) |
1831 | #define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z) | 1831 | #define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z) |
1832 | #define REGISTER_TABLE_BB_CZ(name) \ | 1832 | #define REGISTER_TABLE_BB_CZ(name) \ |
1833 | REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \ | 1833 | REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \ |
1834 | FR_BZ_ ## name ## _STEP, \ | 1834 | FR_BZ_ ## name ## _STEP, \ |
1835 | FR_BB_ ## name ## _ROWS), \ | 1835 | FR_BB_ ## name ## _ROWS), \ |
1836 | REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \ | 1836 | REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \ |
1837 | FR_BZ_ ## name ## _STEP, \ | 1837 | FR_BZ_ ## name ## _STEP, \ |
1838 | FR_CZ_ ## name ## _ROWS) | 1838 | FR_CZ_ ## name ## _ROWS) |
1839 | #define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z) | 1839 | #define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z) |
1840 | 1840 | ||
1841 | static const struct efx_nic_reg_table efx_nic_reg_tables[] = { | 1841 | static const struct efx_nic_reg_table efx_nic_reg_tables[] = { |
1842 | /* DRIVER is not used */ | 1842 | /* DRIVER is not used */ |
1843 | /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */ | 1843 | /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */ |
1844 | REGISTER_TABLE_BB(TX_IPFIL_TBL), | 1844 | REGISTER_TABLE_BB(TX_IPFIL_TBL), |
1845 | REGISTER_TABLE_BB(TX_SRC_MAC_TBL), | 1845 | REGISTER_TABLE_BB(TX_SRC_MAC_TBL), |
1846 | REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER), | 1846 | REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER), |
1847 | REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL), | 1847 | REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL), |
1848 | REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER), | 1848 | REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER), |
1849 | REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL), | 1849 | REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL), |
1850 | REGISTER_TABLE_AA(EVQ_PTR_TBL_KER), | 1850 | REGISTER_TABLE_AA(EVQ_PTR_TBL_KER), |
1851 | REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL), | 1851 | REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL), |
1852 | /* We can't reasonably read all of the buffer table (up to 8MB!). | 1852 | /* We can't reasonably read all of the buffer table (up to 8MB!). |
1853 | * However this driver will only use a few entries. Reading | 1853 | * However this driver will only use a few entries. Reading |
1854 | * 1K entries allows for some expansion of queue count and | 1854 | * 1K entries allows for some expansion of queue count and |
1855 | * size before we need to change the version. */ | 1855 | * size before we need to change the version. */ |
1856 | REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER, | 1856 | REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER, |
1857 | A, A, 8, 1024), | 1857 | A, A, 8, 1024), |
1858 | REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL, | 1858 | REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL, |
1859 | B, Z, 8, 1024), | 1859 | B, Z, 8, 1024), |
1860 | REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0), | 1860 | REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0), |
1861 | REGISTER_TABLE_BB_CZ(TIMER_TBL), | 1861 | REGISTER_TABLE_BB_CZ(TIMER_TBL), |
1862 | REGISTER_TABLE_BB_CZ(TX_PACE_TBL), | 1862 | REGISTER_TABLE_BB_CZ(TX_PACE_TBL), |
1863 | REGISTER_TABLE_BZ(RX_INDIRECTION_TBL), | 1863 | REGISTER_TABLE_BZ(RX_INDIRECTION_TBL), |
1864 | /* TX_FILTER_TBL0 is huge and not used by this driver */ | 1864 | /* TX_FILTER_TBL0 is huge and not used by this driver */ |
1865 | REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0), | 1865 | REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0), |
1866 | REGISTER_TABLE_CZ(MC_TREG_SMEM), | 1866 | REGISTER_TABLE_CZ(MC_TREG_SMEM), |
1867 | /* MSIX_PBA_TABLE is not mapped */ | 1867 | /* MSIX_PBA_TABLE is not mapped */ |
1868 | /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */ | 1868 | /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */ |
1869 | REGISTER_TABLE_BZ(RX_FILTER_TBL0), | 1869 | REGISTER_TABLE_BZ(RX_FILTER_TBL0), |
1870 | }; | 1870 | }; |
1871 | 1871 | ||
1872 | size_t efx_nic_get_regs_len(struct efx_nic *efx) | 1872 | size_t efx_nic_get_regs_len(struct efx_nic *efx) |
1873 | { | 1873 | { |
1874 | const struct efx_nic_reg *reg; | 1874 | const struct efx_nic_reg *reg; |
1875 | const struct efx_nic_reg_table *table; | 1875 | const struct efx_nic_reg_table *table; |
1876 | size_t len = 0; | 1876 | size_t len = 0; |
1877 | 1877 | ||
1878 | for (reg = efx_nic_regs; | 1878 | for (reg = efx_nic_regs; |
1879 | reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); | 1879 | reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); |
1880 | reg++) | 1880 | reg++) |
1881 | if (efx->type->revision >= reg->min_revision && | 1881 | if (efx->type->revision >= reg->min_revision && |
1882 | efx->type->revision <= reg->max_revision) | 1882 | efx->type->revision <= reg->max_revision) |
1883 | len += sizeof(efx_oword_t); | 1883 | len += sizeof(efx_oword_t); |
1884 | 1884 | ||
1885 | for (table = efx_nic_reg_tables; | 1885 | for (table = efx_nic_reg_tables; |
1886 | table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); | 1886 | table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); |
1887 | table++) | 1887 | table++) |
1888 | if (efx->type->revision >= table->min_revision && | 1888 | if (efx->type->revision >= table->min_revision && |
1889 | efx->type->revision <= table->max_revision) | 1889 | efx->type->revision <= table->max_revision) |
1890 | len += table->rows * min_t(size_t, table->step, 16); | 1890 | len += table->rows * min_t(size_t, table->step, 16); |
1891 | 1891 | ||
1892 | return len; | 1892 | return len; |
1893 | } | 1893 | } |
1894 | 1894 | ||
1895 | void efx_nic_get_regs(struct efx_nic *efx, void *buf) | 1895 | void efx_nic_get_regs(struct efx_nic *efx, void *buf) |
1896 | { | 1896 | { |
1897 | const struct efx_nic_reg *reg; | 1897 | const struct efx_nic_reg *reg; |
1898 | const struct efx_nic_reg_table *table; | 1898 | const struct efx_nic_reg_table *table; |
1899 | 1899 | ||
1900 | for (reg = efx_nic_regs; | 1900 | for (reg = efx_nic_regs; |
1901 | reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); | 1901 | reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); |
1902 | reg++) { | 1902 | reg++) { |
1903 | if (efx->type->revision >= reg->min_revision && | 1903 | if (efx->type->revision >= reg->min_revision && |
1904 | efx->type->revision <= reg->max_revision) { | 1904 | efx->type->revision <= reg->max_revision) { |
1905 | efx_reado(efx, (efx_oword_t *)buf, reg->offset); | 1905 | efx_reado(efx, (efx_oword_t *)buf, reg->offset); |
1906 | buf += sizeof(efx_oword_t); | 1906 | buf += sizeof(efx_oword_t); |
1907 | } | 1907 | } |
1908 | } | 1908 | } |
1909 | 1909 | ||
1910 | for (table = efx_nic_reg_tables; | 1910 | for (table = efx_nic_reg_tables; |
1911 | table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); | 1911 | table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); |
1912 | table++) { | 1912 | table++) { |
1913 | size_t size, i; | 1913 | size_t size, i; |
1914 | 1914 | ||
1915 | if (!(efx->type->revision >= table->min_revision && | 1915 | if (!(efx->type->revision >= table->min_revision && |
1916 | efx->type->revision <= table->max_revision)) | 1916 | efx->type->revision <= table->max_revision)) |
1917 | continue; | 1917 | continue; |
1918 | 1918 | ||
1919 | size = min_t(size_t, table->step, 16); | 1919 | size = min_t(size_t, table->step, 16); |
1920 | 1920 | ||
1921 | for (i = 0; i < table->rows; i++) { | 1921 | for (i = 0; i < table->rows; i++) { |
1922 | switch (table->step) { | 1922 | switch (table->step) { |
1923 | case 4: /* 32-bit register or SRAM */ | 1923 | case 4: /* 32-bit register or SRAM */ |
1924 | efx_readd_table(efx, buf, table->offset, i); | 1924 | efx_readd_table(efx, buf, table->offset, i); |
1925 | break; | 1925 | break; |
1926 | case 8: /* 64-bit SRAM */ | 1926 | case 8: /* 64-bit SRAM */ |
1927 | efx_sram_readq(efx, | 1927 | efx_sram_readq(efx, |
1928 | efx->membase + table->offset, | 1928 | efx->membase + table->offset, |
1929 | buf, i); | 1929 | buf, i); |
1930 | break; | 1930 | break; |
1931 | case 16: /* 128-bit register */ | 1931 | case 16: /* 128-bit register */ |
1932 | efx_reado_table(efx, buf, table->offset, i); | 1932 | efx_reado_table(efx, buf, table->offset, i); |
1933 | break; | 1933 | break; |
1934 | case 32: /* 128-bit register, interleaved */ | 1934 | case 32: /* 128-bit register, interleaved */ |
1935 | efx_reado_table(efx, buf, table->offset, 2 * i); | 1935 | efx_reado_table(efx, buf, table->offset, 2 * i); |
1936 | break; | 1936 | break; |
1937 | default: | 1937 | default: |
1938 | WARN_ON(1); | 1938 | WARN_ON(1); |
1939 | return; | 1939 | return; |
1940 | } | 1940 | } |
1941 | buf += size; | 1941 | buf += size; |
1942 | } | 1942 | } |
1943 | } | 1943 | } |
1944 | } | 1944 | } |
1945 | 1945 |
drivers/net/sfc/selftest.c
1 | /**************************************************************************** | 1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | 2 | * Driver for Solarflare Solarstorm network controllers and boards |
3 | * Copyright 2005-2006 Fen Systems Ltd. | 3 | * Copyright 2005-2006 Fen Systems Ltd. |
4 | * Copyright 2006-2009 Solarflare Communications Inc. | 4 | * Copyright 2006-2009 Solarflare Communications Inc. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License version 2 as published | 7 | * under the terms of the GNU General Public License version 2 as published |
8 | * by the Free Software Foundation, incorporated herein by reference. | 8 | * by the Free Software Foundation, incorporated herein by reference. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/netdevice.h> | 11 | #include <linux/netdevice.h> |
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/delay.h> | 13 | #include <linux/delay.h> |
14 | #include <linux/kernel_stat.h> | 14 | #include <linux/kernel_stat.h> |
15 | #include <linux/pci.h> | 15 | #include <linux/pci.h> |
16 | #include <linux/ethtool.h> | 16 | #include <linux/ethtool.h> |
17 | #include <linux/ip.h> | 17 | #include <linux/ip.h> |
18 | #include <linux/in.h> | 18 | #include <linux/in.h> |
19 | #include <linux/udp.h> | 19 | #include <linux/udp.h> |
20 | #include <linux/rtnetlink.h> | 20 | #include <linux/rtnetlink.h> |
21 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
22 | #include <asm/io.h> | 22 | #include <asm/io.h> |
23 | #include "net_driver.h" | 23 | #include "net_driver.h" |
24 | #include "efx.h" | 24 | #include "efx.h" |
25 | #include "nic.h" | 25 | #include "nic.h" |
26 | #include "selftest.h" | 26 | #include "selftest.h" |
27 | #include "workarounds.h" | 27 | #include "workarounds.h" |
28 | 28 | ||
29 | /* | 29 | /* |
30 | * Loopback test packet structure | 30 | * Loopback test packet structure |
31 | * | 31 | * |
32 | * The self-test should stress every RSS vector, and unfortunately | 32 | * The self-test should stress every RSS vector, and unfortunately |
33 | * Falcon only performs RSS on TCP/UDP packets. | 33 | * Falcon only performs RSS on TCP/UDP packets. |
34 | */ | 34 | */ |
35 | struct efx_loopback_payload { | 35 | struct efx_loopback_payload { |
36 | struct ethhdr header; | 36 | struct ethhdr header; |
37 | struct iphdr ip; | 37 | struct iphdr ip; |
38 | struct udphdr udp; | 38 | struct udphdr udp; |
39 | __be16 iteration; | 39 | __be16 iteration; |
40 | const char msg[64]; | 40 | const char msg[64]; |
41 | } __packed; | 41 | } __packed; |
42 | 42 | ||
43 | /* Loopback test source MAC address */ | 43 | /* Loopback test source MAC address */ |
44 | static const unsigned char payload_source[ETH_ALEN] = { | 44 | static const unsigned char payload_source[ETH_ALEN] = { |
45 | 0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b, | 45 | 0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b, |
46 | }; | 46 | }; |
47 | 47 | ||
48 | static const char payload_msg[] = | 48 | static const char payload_msg[] = |
49 | "Hello world! This is an Efx loopback test in progress!"; | 49 | "Hello world! This is an Efx loopback test in progress!"; |
50 | 50 | ||
51 | /* Interrupt mode names */ | ||
52 | static const unsigned int efx_interrupt_mode_max = EFX_INT_MODE_MAX; | ||
53 | static const char *efx_interrupt_mode_names[] = { | ||
54 | [EFX_INT_MODE_MSIX] = "MSI-X", | ||
55 | [EFX_INT_MODE_MSI] = "MSI", | ||
56 | [EFX_INT_MODE_LEGACY] = "legacy", | ||
57 | }; | ||
58 | #define INT_MODE(efx) \ | ||
59 | STRING_TABLE_LOOKUP(efx->interrupt_mode, efx_interrupt_mode) | ||
60 | |||
51 | /** | 61 | /** |
52 | * efx_loopback_state - persistent state during a loopback selftest | 62 | * efx_loopback_state - persistent state during a loopback selftest |
53 | * @flush: Drop all packets in efx_loopback_rx_packet | 63 | * @flush: Drop all packets in efx_loopback_rx_packet |
54 | * @packet_count: Number of packets being used in this test | 64 | * @packet_count: Number of packets being used in this test |
55 | * @skbs: An array of skbs transmitted | 65 | * @skbs: An array of skbs transmitted |
56 | * @offload_csum: Checksums are being offloaded | 66 | * @offload_csum: Checksums are being offloaded |
57 | * @rx_good: RX good packet count | 67 | * @rx_good: RX good packet count |
58 | * @rx_bad: RX bad packet count | 68 | * @rx_bad: RX bad packet count |
59 | * @payload: Payload used in tests | 69 | * @payload: Payload used in tests |
60 | */ | 70 | */ |
61 | struct efx_loopback_state { | 71 | struct efx_loopback_state { |
62 | bool flush; | 72 | bool flush; |
63 | int packet_count; | 73 | int packet_count; |
64 | struct sk_buff **skbs; | 74 | struct sk_buff **skbs; |
65 | bool offload_csum; | 75 | bool offload_csum; |
66 | atomic_t rx_good; | 76 | atomic_t rx_good; |
67 | atomic_t rx_bad; | 77 | atomic_t rx_bad; |
68 | struct efx_loopback_payload payload; | 78 | struct efx_loopback_payload payload; |
69 | }; | 79 | }; |
70 | 80 | ||
71 | /************************************************************************** | 81 | /************************************************************************** |
72 | * | 82 | * |
73 | * MII, NVRAM and register tests | 83 | * MII, NVRAM and register tests |
74 | * | 84 | * |
75 | **************************************************************************/ | 85 | **************************************************************************/ |
76 | 86 | ||
77 | static int efx_test_phy_alive(struct efx_nic *efx, struct efx_self_tests *tests) | 87 | static int efx_test_phy_alive(struct efx_nic *efx, struct efx_self_tests *tests) |
78 | { | 88 | { |
79 | int rc = 0; | 89 | int rc = 0; |
80 | 90 | ||
81 | if (efx->phy_op->test_alive) { | 91 | if (efx->phy_op->test_alive) { |
82 | rc = efx->phy_op->test_alive(efx); | 92 | rc = efx->phy_op->test_alive(efx); |
83 | tests->phy_alive = rc ? -1 : 1; | 93 | tests->phy_alive = rc ? -1 : 1; |
84 | } | 94 | } |
85 | 95 | ||
86 | return rc; | 96 | return rc; |
87 | } | 97 | } |
88 | 98 | ||
89 | static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests) | 99 | static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests) |
90 | { | 100 | { |
91 | int rc = 0; | 101 | int rc = 0; |
92 | 102 | ||
93 | if (efx->type->test_nvram) { | 103 | if (efx->type->test_nvram) { |
94 | rc = efx->type->test_nvram(efx); | 104 | rc = efx->type->test_nvram(efx); |
95 | tests->nvram = rc ? -1 : 1; | 105 | tests->nvram = rc ? -1 : 1; |
96 | } | 106 | } |
97 | 107 | ||
98 | return rc; | 108 | return rc; |
99 | } | 109 | } |
100 | 110 | ||
101 | static int efx_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) | 111 | static int efx_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) |
102 | { | 112 | { |
103 | int rc = 0; | 113 | int rc = 0; |
104 | 114 | ||
105 | /* Test register access */ | 115 | /* Test register access */ |
106 | if (efx->type->test_registers) { | 116 | if (efx->type->test_registers) { |
107 | rc = efx->type->test_registers(efx); | 117 | rc = efx->type->test_registers(efx); |
108 | tests->registers = rc ? -1 : 1; | 118 | tests->registers = rc ? -1 : 1; |
109 | } | 119 | } |
110 | 120 | ||
111 | return rc; | 121 | return rc; |
112 | } | 122 | } |
113 | 123 | ||
114 | /************************************************************************** | 124 | /************************************************************************** |
115 | * | 125 | * |
116 | * Interrupt and event queue testing | 126 | * Interrupt and event queue testing |
117 | * | 127 | * |
118 | **************************************************************************/ | 128 | **************************************************************************/ |
119 | 129 | ||
120 | /* Test generation and receipt of interrupts */ | 130 | /* Test generation and receipt of interrupts */ |
121 | static int efx_test_interrupts(struct efx_nic *efx, | 131 | static int efx_test_interrupts(struct efx_nic *efx, |
122 | struct efx_self_tests *tests) | 132 | struct efx_self_tests *tests) |
123 | { | 133 | { |
124 | struct efx_channel *channel; | 134 | struct efx_channel *channel; |
125 | 135 | ||
126 | netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n"); | 136 | netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n"); |
127 | tests->interrupt = -1; | 137 | tests->interrupt = -1; |
128 | 138 | ||
129 | /* Reset interrupt flag */ | 139 | /* Reset interrupt flag */ |
130 | efx->last_irq_cpu = -1; | 140 | efx->last_irq_cpu = -1; |
131 | smp_wmb(); | 141 | smp_wmb(); |
132 | 142 | ||
133 | /* ACK each interrupting event queue. Receiving an interrupt due to | 143 | /* ACK each interrupting event queue. Receiving an interrupt due to |
134 | * traffic before a test event is raised is considered a pass */ | 144 | * traffic before a test event is raised is considered a pass */ |
135 | efx_for_each_channel(channel, efx) { | 145 | efx_for_each_channel(channel, efx) { |
136 | if (channel->work_pending) | 146 | if (channel->work_pending) |
137 | efx_process_channel_now(channel); | 147 | efx_process_channel_now(channel); |
138 | if (efx->last_irq_cpu >= 0) | 148 | if (efx->last_irq_cpu >= 0) |
139 | goto success; | 149 | goto success; |
140 | } | 150 | } |
141 | 151 | ||
142 | efx_nic_generate_interrupt(efx); | 152 | efx_nic_generate_interrupt(efx); |
143 | 153 | ||
144 | /* Wait for arrival of test interrupt. */ | 154 | /* Wait for arrival of test interrupt. */ |
145 | netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n"); | 155 | netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n"); |
146 | schedule_timeout_uninterruptible(HZ / 10); | 156 | schedule_timeout_uninterruptible(HZ / 10); |
147 | if (efx->last_irq_cpu >= 0) | 157 | if (efx->last_irq_cpu >= 0) |
148 | goto success; | 158 | goto success; |
149 | 159 | ||
150 | netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n"); | 160 | netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n"); |
151 | return -ETIMEDOUT; | 161 | return -ETIMEDOUT; |
152 | 162 | ||
153 | success: | 163 | success: |
154 | netif_dbg(efx, drv, efx->net_dev, "%s test interrupt seen on CPU%d\n", | 164 | netif_dbg(efx, drv, efx->net_dev, "%s test interrupt seen on CPU%d\n", |
155 | INT_MODE(efx), | 165 | INT_MODE(efx), |
156 | efx->last_irq_cpu); | 166 | efx->last_irq_cpu); |
157 | tests->interrupt = 1; | 167 | tests->interrupt = 1; |
158 | return 0; | 168 | return 0; |
159 | } | 169 | } |
160 | 170 | ||
161 | /* Test generation and receipt of interrupting events */ | 171 | /* Test generation and receipt of interrupting events */ |
162 | static int efx_test_eventq_irq(struct efx_channel *channel, | 172 | static int efx_test_eventq_irq(struct efx_channel *channel, |
163 | struct efx_self_tests *tests) | 173 | struct efx_self_tests *tests) |
164 | { | 174 | { |
165 | struct efx_nic *efx = channel->efx; | 175 | struct efx_nic *efx = channel->efx; |
166 | unsigned int magic_count, count; | 176 | unsigned int magic_count, count; |
167 | 177 | ||
168 | tests->eventq_dma[channel->channel] = -1; | 178 | tests->eventq_dma[channel->channel] = -1; |
169 | tests->eventq_int[channel->channel] = -1; | 179 | tests->eventq_int[channel->channel] = -1; |
170 | tests->eventq_poll[channel->channel] = -1; | 180 | tests->eventq_poll[channel->channel] = -1; |
171 | 181 | ||
172 | magic_count = channel->magic_count; | 182 | magic_count = channel->magic_count; |
173 | channel->efx->last_irq_cpu = -1; | 183 | channel->efx->last_irq_cpu = -1; |
174 | smp_wmb(); | 184 | smp_wmb(); |
175 | 185 | ||
176 | efx_nic_generate_test_event(channel); | 186 | efx_nic_generate_test_event(channel); |
177 | 187 | ||
178 | /* Wait for arrival of interrupt */ | 188 | /* Wait for arrival of interrupt */ |
179 | count = 0; | 189 | count = 0; |
180 | do { | 190 | do { |
181 | schedule_timeout_uninterruptible(HZ / 100); | 191 | schedule_timeout_uninterruptible(HZ / 100); |
182 | 192 | ||
183 | if (channel->work_pending) | 193 | if (channel->work_pending) |
184 | efx_process_channel_now(channel); | 194 | efx_process_channel_now(channel); |
185 | 195 | ||
186 | if (channel->magic_count != magic_count) | 196 | if (channel->magic_count != magic_count) |
187 | goto eventq_ok; | 197 | goto eventq_ok; |
188 | } while (++count < 2); | 198 | } while (++count < 2); |
189 | 199 | ||
190 | netif_err(efx, drv, efx->net_dev, | 200 | netif_err(efx, drv, efx->net_dev, |
191 | "channel %d timed out waiting for event queue\n", | 201 | "channel %d timed out waiting for event queue\n", |
192 | channel->channel); | 202 | channel->channel); |
193 | 203 | ||
194 | /* See if interrupt arrived */ | 204 | /* See if interrupt arrived */ |
195 | if (channel->efx->last_irq_cpu >= 0) { | 205 | if (channel->efx->last_irq_cpu >= 0) { |
196 | netif_err(efx, drv, efx->net_dev, | 206 | netif_err(efx, drv, efx->net_dev, |
197 | "channel %d saw interrupt on CPU%d " | 207 | "channel %d saw interrupt on CPU%d " |
198 | "during event queue test\n", channel->channel, | 208 | "during event queue test\n", channel->channel, |
199 | raw_smp_processor_id()); | 209 | raw_smp_processor_id()); |
200 | tests->eventq_int[channel->channel] = 1; | 210 | tests->eventq_int[channel->channel] = 1; |
201 | } | 211 | } |
202 | 212 | ||
203 | /* Check to see if event was received even if interrupt wasn't */ | 213 | /* Check to see if event was received even if interrupt wasn't */ |
204 | efx_process_channel_now(channel); | 214 | efx_process_channel_now(channel); |
205 | if (channel->magic_count != magic_count) { | 215 | if (channel->magic_count != magic_count) { |
206 | netif_err(efx, drv, efx->net_dev, | 216 | netif_err(efx, drv, efx->net_dev, |
207 | "channel %d event was generated, but " | 217 | "channel %d event was generated, but " |
208 | "failed to trigger an interrupt\n", channel->channel); | 218 | "failed to trigger an interrupt\n", channel->channel); |
209 | tests->eventq_dma[channel->channel] = 1; | 219 | tests->eventq_dma[channel->channel] = 1; |
210 | } | 220 | } |
211 | 221 | ||
212 | return -ETIMEDOUT; | 222 | return -ETIMEDOUT; |
213 | eventq_ok: | 223 | eventq_ok: |
214 | netif_dbg(efx, drv, efx->net_dev, "channel %d event queue passed\n", | 224 | netif_dbg(efx, drv, efx->net_dev, "channel %d event queue passed\n", |
215 | channel->channel); | 225 | channel->channel); |
216 | tests->eventq_dma[channel->channel] = 1; | 226 | tests->eventq_dma[channel->channel] = 1; |
217 | tests->eventq_int[channel->channel] = 1; | 227 | tests->eventq_int[channel->channel] = 1; |
218 | tests->eventq_poll[channel->channel] = 1; | 228 | tests->eventq_poll[channel->channel] = 1; |
219 | return 0; | 229 | return 0; |
220 | } | 230 | } |
221 | 231 | ||
222 | static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests, | 232 | static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests, |
223 | unsigned flags) | 233 | unsigned flags) |
224 | { | 234 | { |
225 | int rc; | 235 | int rc; |
226 | 236 | ||
227 | if (!efx->phy_op->run_tests) | 237 | if (!efx->phy_op->run_tests) |
228 | return 0; | 238 | return 0; |
229 | 239 | ||
230 | mutex_lock(&efx->mac_lock); | 240 | mutex_lock(&efx->mac_lock); |
231 | rc = efx->phy_op->run_tests(efx, tests->phy_ext, flags); | 241 | rc = efx->phy_op->run_tests(efx, tests->phy_ext, flags); |
232 | mutex_unlock(&efx->mac_lock); | 242 | mutex_unlock(&efx->mac_lock); |
233 | return rc; | 243 | return rc; |
234 | } | 244 | } |
235 | 245 | ||
236 | /************************************************************************** | 246 | /************************************************************************** |
237 | * | 247 | * |
238 | * Loopback testing | 248 | * Loopback testing |
239 | * NB Only one loopback test can be executing concurrently. | 249 | * NB Only one loopback test can be executing concurrently. |
240 | * | 250 | * |
241 | **************************************************************************/ | 251 | **************************************************************************/ |
242 | 252 | ||
243 | /* Loopback test RX callback | 253 | /* Loopback test RX callback |
244 | * This is called for each received packet during loopback testing. | 254 | * This is called for each received packet during loopback testing. |
245 | */ | 255 | */ |
246 | void efx_loopback_rx_packet(struct efx_nic *efx, | 256 | void efx_loopback_rx_packet(struct efx_nic *efx, |
247 | const char *buf_ptr, int pkt_len) | 257 | const char *buf_ptr, int pkt_len) |
248 | { | 258 | { |
249 | struct efx_loopback_state *state = efx->loopback_selftest; | 259 | struct efx_loopback_state *state = efx->loopback_selftest; |
250 | struct efx_loopback_payload *received; | 260 | struct efx_loopback_payload *received; |
251 | struct efx_loopback_payload *payload; | 261 | struct efx_loopback_payload *payload; |
252 | 262 | ||
253 | BUG_ON(!buf_ptr); | 263 | BUG_ON(!buf_ptr); |
254 | 264 | ||
255 | /* If we are just flushing, then drop the packet */ | 265 | /* If we are just flushing, then drop the packet */ |
256 | if ((state == NULL) || state->flush) | 266 | if ((state == NULL) || state->flush) |
257 | return; | 267 | return; |
258 | 268 | ||
259 | payload = &state->payload; | 269 | payload = &state->payload; |
260 | 270 | ||
261 | received = (struct efx_loopback_payload *) buf_ptr; | 271 | received = (struct efx_loopback_payload *) buf_ptr; |
262 | received->ip.saddr = payload->ip.saddr; | 272 | received->ip.saddr = payload->ip.saddr; |
263 | if (state->offload_csum) | 273 | if (state->offload_csum) |
264 | received->ip.check = payload->ip.check; | 274 | received->ip.check = payload->ip.check; |
265 | 275 | ||
266 | /* Check that header exists */ | 276 | /* Check that header exists */ |
267 | if (pkt_len < sizeof(received->header)) { | 277 | if (pkt_len < sizeof(received->header)) { |
268 | netif_err(efx, drv, efx->net_dev, | 278 | netif_err(efx, drv, efx->net_dev, |
269 | "saw runt RX packet (length %d) in %s loopback " | 279 | "saw runt RX packet (length %d) in %s loopback " |
270 | "test\n", pkt_len, LOOPBACK_MODE(efx)); | 280 | "test\n", pkt_len, LOOPBACK_MODE(efx)); |
271 | goto err; | 281 | goto err; |
272 | } | 282 | } |
273 | 283 | ||
274 | /* Check that the ethernet header exists */ | 284 | /* Check that the ethernet header exists */ |
275 | if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) { | 285 | if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) { |
276 | netif_err(efx, drv, efx->net_dev, | 286 | netif_err(efx, drv, efx->net_dev, |
277 | "saw non-loopback RX packet in %s loopback test\n", | 287 | "saw non-loopback RX packet in %s loopback test\n", |
278 | LOOPBACK_MODE(efx)); | 288 | LOOPBACK_MODE(efx)); |
279 | goto err; | 289 | goto err; |
280 | } | 290 | } |
281 | 291 | ||
282 | /* Check packet length */ | 292 | /* Check packet length */ |
283 | if (pkt_len != sizeof(*payload)) { | 293 | if (pkt_len != sizeof(*payload)) { |
284 | netif_err(efx, drv, efx->net_dev, | 294 | netif_err(efx, drv, efx->net_dev, |
285 | "saw incorrect RX packet length %d (wanted %d) in " | 295 | "saw incorrect RX packet length %d (wanted %d) in " |
286 | "%s loopback test\n", pkt_len, (int)sizeof(*payload), | 296 | "%s loopback test\n", pkt_len, (int)sizeof(*payload), |
287 | LOOPBACK_MODE(efx)); | 297 | LOOPBACK_MODE(efx)); |
288 | goto err; | 298 | goto err; |
289 | } | 299 | } |
290 | 300 | ||
291 | /* Check that IP header matches */ | 301 | /* Check that IP header matches */ |
292 | if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) { | 302 | if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) { |
293 | netif_err(efx, drv, efx->net_dev, | 303 | netif_err(efx, drv, efx->net_dev, |
294 | "saw corrupted IP header in %s loopback test\n", | 304 | "saw corrupted IP header in %s loopback test\n", |
295 | LOOPBACK_MODE(efx)); | 305 | LOOPBACK_MODE(efx)); |
296 | goto err; | 306 | goto err; |
297 | } | 307 | } |
298 | 308 | ||
299 | /* Check that msg and padding matches */ | 309 | /* Check that msg and padding matches */ |
300 | if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) { | 310 | if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) { |
301 | netif_err(efx, drv, efx->net_dev, | 311 | netif_err(efx, drv, efx->net_dev, |
302 | "saw corrupted RX packet in %s loopback test\n", | 312 | "saw corrupted RX packet in %s loopback test\n", |
303 | LOOPBACK_MODE(efx)); | 313 | LOOPBACK_MODE(efx)); |
304 | goto err; | 314 | goto err; |
305 | } | 315 | } |
306 | 316 | ||
307 | /* Check that iteration matches */ | 317 | /* Check that iteration matches */ |
308 | if (received->iteration != payload->iteration) { | 318 | if (received->iteration != payload->iteration) { |
309 | netif_err(efx, drv, efx->net_dev, | 319 | netif_err(efx, drv, efx->net_dev, |
310 | "saw RX packet from iteration %d (wanted %d) in " | 320 | "saw RX packet from iteration %d (wanted %d) in " |
311 | "%s loopback test\n", ntohs(received->iteration), | 321 | "%s loopback test\n", ntohs(received->iteration), |
312 | ntohs(payload->iteration), LOOPBACK_MODE(efx)); | 322 | ntohs(payload->iteration), LOOPBACK_MODE(efx)); |
313 | goto err; | 323 | goto err; |
314 | } | 324 | } |
315 | 325 | ||
316 | /* Increase correct RX count */ | 326 | /* Increase correct RX count */ |
317 | netif_vdbg(efx, drv, efx->net_dev, | 327 | netif_vdbg(efx, drv, efx->net_dev, |
318 | "got loopback RX in %s loopback test\n", LOOPBACK_MODE(efx)); | 328 | "got loopback RX in %s loopback test\n", LOOPBACK_MODE(efx)); |
319 | 329 | ||
320 | atomic_inc(&state->rx_good); | 330 | atomic_inc(&state->rx_good); |
321 | return; | 331 | return; |
322 | 332 | ||
323 | err: | 333 | err: |
324 | #ifdef EFX_ENABLE_DEBUG | 334 | #ifdef EFX_ENABLE_DEBUG |
325 | if (atomic_read(&state->rx_bad) == 0) { | 335 | if (atomic_read(&state->rx_bad) == 0) { |
326 | netif_err(efx, drv, efx->net_dev, "received packet:\n"); | 336 | netif_err(efx, drv, efx->net_dev, "received packet:\n"); |
327 | print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, | 337 | print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, |
328 | buf_ptr, pkt_len, 0); | 338 | buf_ptr, pkt_len, 0); |
329 | netif_err(efx, drv, efx->net_dev, "expected packet:\n"); | 339 | netif_err(efx, drv, efx->net_dev, "expected packet:\n"); |
330 | print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, | 340 | print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1, |
331 | &state->payload, sizeof(state->payload), 0); | 341 | &state->payload, sizeof(state->payload), 0); |
332 | } | 342 | } |
333 | #endif | 343 | #endif |
334 | atomic_inc(&state->rx_bad); | 344 | atomic_inc(&state->rx_bad); |
335 | } | 345 | } |
336 | 346 | ||
337 | /* Initialise an efx_selftest_state for a new iteration */ | 347 | /* Initialise an efx_selftest_state for a new iteration */ |
338 | static void efx_iterate_state(struct efx_nic *efx) | 348 | static void efx_iterate_state(struct efx_nic *efx) |
339 | { | 349 | { |
340 | struct efx_loopback_state *state = efx->loopback_selftest; | 350 | struct efx_loopback_state *state = efx->loopback_selftest; |
341 | struct net_device *net_dev = efx->net_dev; | 351 | struct net_device *net_dev = efx->net_dev; |
342 | struct efx_loopback_payload *payload = &state->payload; | 352 | struct efx_loopback_payload *payload = &state->payload; |
343 | 353 | ||
344 | /* Initialise the layerII header */ | 354 | /* Initialise the layerII header */ |
345 | memcpy(&payload->header.h_dest, net_dev->dev_addr, ETH_ALEN); | 355 | memcpy(&payload->header.h_dest, net_dev->dev_addr, ETH_ALEN); |
346 | memcpy(&payload->header.h_source, &payload_source, ETH_ALEN); | 356 | memcpy(&payload->header.h_source, &payload_source, ETH_ALEN); |
347 | payload->header.h_proto = htons(ETH_P_IP); | 357 | payload->header.h_proto = htons(ETH_P_IP); |
348 | 358 | ||
349 | /* saddr set later and used as incrementing count */ | 359 | /* saddr set later and used as incrementing count */ |
350 | payload->ip.daddr = htonl(INADDR_LOOPBACK); | 360 | payload->ip.daddr = htonl(INADDR_LOOPBACK); |
351 | payload->ip.ihl = 5; | 361 | payload->ip.ihl = 5; |
352 | payload->ip.check = htons(0xdead); | 362 | payload->ip.check = htons(0xdead); |
353 | payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr)); | 363 | payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr)); |
354 | payload->ip.version = IPVERSION; | 364 | payload->ip.version = IPVERSION; |
355 | payload->ip.protocol = IPPROTO_UDP; | 365 | payload->ip.protocol = IPPROTO_UDP; |
356 | 366 | ||
357 | /* Initialise udp header */ | 367 | /* Initialise udp header */ |
358 | payload->udp.source = 0; | 368 | payload->udp.source = 0; |
359 | payload->udp.len = htons(sizeof(*payload) - sizeof(struct ethhdr) - | 369 | payload->udp.len = htons(sizeof(*payload) - sizeof(struct ethhdr) - |
360 | sizeof(struct iphdr)); | 370 | sizeof(struct iphdr)); |
361 | payload->udp.check = 0; /* checksum ignored */ | 371 | payload->udp.check = 0; /* checksum ignored */ |
362 | 372 | ||
363 | /* Fill out payload */ | 373 | /* Fill out payload */ |
364 | payload->iteration = htons(ntohs(payload->iteration) + 1); | 374 | payload->iteration = htons(ntohs(payload->iteration) + 1); |
365 | memcpy(&payload->msg, payload_msg, sizeof(payload_msg)); | 375 | memcpy(&payload->msg, payload_msg, sizeof(payload_msg)); |
366 | 376 | ||
367 | /* Fill out remaining state members */ | 377 | /* Fill out remaining state members */ |
368 | atomic_set(&state->rx_good, 0); | 378 | atomic_set(&state->rx_good, 0); |
369 | atomic_set(&state->rx_bad, 0); | 379 | atomic_set(&state->rx_bad, 0); |
370 | smp_wmb(); | 380 | smp_wmb(); |
371 | } | 381 | } |
372 | 382 | ||
373 | static int efx_begin_loopback(struct efx_tx_queue *tx_queue) | 383 | static int efx_begin_loopback(struct efx_tx_queue *tx_queue) |
374 | { | 384 | { |
375 | struct efx_nic *efx = tx_queue->efx; | 385 | struct efx_nic *efx = tx_queue->efx; |
376 | struct efx_loopback_state *state = efx->loopback_selftest; | 386 | struct efx_loopback_state *state = efx->loopback_selftest; |
377 | struct efx_loopback_payload *payload; | 387 | struct efx_loopback_payload *payload; |
378 | struct sk_buff *skb; | 388 | struct sk_buff *skb; |
379 | int i; | 389 | int i; |
380 | netdev_tx_t rc; | 390 | netdev_tx_t rc; |
381 | 391 | ||
382 | /* Transmit N copies of buffer */ | 392 | /* Transmit N copies of buffer */ |
383 | for (i = 0; i < state->packet_count; i++) { | 393 | for (i = 0; i < state->packet_count; i++) { |
384 | /* Allocate an skb, holding an extra reference for | 394 | /* Allocate an skb, holding an extra reference for |
385 | * transmit completion counting */ | 395 | * transmit completion counting */ |
386 | skb = alloc_skb(sizeof(state->payload), GFP_KERNEL); | 396 | skb = alloc_skb(sizeof(state->payload), GFP_KERNEL); |
387 | if (!skb) | 397 | if (!skb) |
388 | return -ENOMEM; | 398 | return -ENOMEM; |
389 | state->skbs[i] = skb; | 399 | state->skbs[i] = skb; |
390 | skb_get(skb); | 400 | skb_get(skb); |
391 | 401 | ||
392 | /* Copy the payload in, incrementing the source address to | 402 | /* Copy the payload in, incrementing the source address to |
393 | * exercise the rss vectors */ | 403 | * exercise the rss vectors */ |
394 | payload = ((struct efx_loopback_payload *) | 404 | payload = ((struct efx_loopback_payload *) |
395 | skb_put(skb, sizeof(state->payload))); | 405 | skb_put(skb, sizeof(state->payload))); |
396 | memcpy(payload, &state->payload, sizeof(state->payload)); | 406 | memcpy(payload, &state->payload, sizeof(state->payload)); |
397 | payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2)); | 407 | payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2)); |
398 | 408 | ||
399 | /* Ensure everything we've written is visible to the | 409 | /* Ensure everything we've written is visible to the |
400 | * interrupt handler. */ | 410 | * interrupt handler. */ |
401 | smp_wmb(); | 411 | smp_wmb(); |
402 | 412 | ||
403 | if (efx_dev_registered(efx)) | 413 | if (efx_dev_registered(efx)) |
404 | netif_tx_lock_bh(efx->net_dev); | 414 | netif_tx_lock_bh(efx->net_dev); |
405 | rc = efx_enqueue_skb(tx_queue, skb); | 415 | rc = efx_enqueue_skb(tx_queue, skb); |
406 | if (efx_dev_registered(efx)) | 416 | if (efx_dev_registered(efx)) |
407 | netif_tx_unlock_bh(efx->net_dev); | 417 | netif_tx_unlock_bh(efx->net_dev); |
408 | 418 | ||
409 | if (rc != NETDEV_TX_OK) { | 419 | if (rc != NETDEV_TX_OK) { |
410 | netif_err(efx, drv, efx->net_dev, | 420 | netif_err(efx, drv, efx->net_dev, |
411 | "TX queue %d could not transmit packet %d of " | 421 | "TX queue %d could not transmit packet %d of " |
412 | "%d in %s loopback test\n", tx_queue->queue, | 422 | "%d in %s loopback test\n", tx_queue->queue, |
413 | i + 1, state->packet_count, | 423 | i + 1, state->packet_count, |
414 | LOOPBACK_MODE(efx)); | 424 | LOOPBACK_MODE(efx)); |
415 | 425 | ||
416 | /* Defer cleaning up the other skbs for the caller */ | 426 | /* Defer cleaning up the other skbs for the caller */ |
417 | kfree_skb(skb); | 427 | kfree_skb(skb); |
418 | return -EPIPE; | 428 | return -EPIPE; |
419 | } | 429 | } |
420 | } | 430 | } |
421 | 431 | ||
422 | return 0; | 432 | return 0; |
423 | } | 433 | } |
424 | 434 | ||
425 | static int efx_poll_loopback(struct efx_nic *efx) | 435 | static int efx_poll_loopback(struct efx_nic *efx) |
426 | { | 436 | { |
427 | struct efx_loopback_state *state = efx->loopback_selftest; | 437 | struct efx_loopback_state *state = efx->loopback_selftest; |
428 | struct efx_channel *channel; | 438 | struct efx_channel *channel; |
429 | 439 | ||
430 | /* NAPI polling is not enabled, so process channels | 440 | /* NAPI polling is not enabled, so process channels |
431 | * synchronously */ | 441 | * synchronously */ |
432 | efx_for_each_channel(channel, efx) { | 442 | efx_for_each_channel(channel, efx) { |
433 | if (channel->work_pending) | 443 | if (channel->work_pending) |
434 | efx_process_channel_now(channel); | 444 | efx_process_channel_now(channel); |
435 | } | 445 | } |
436 | return atomic_read(&state->rx_good) == state->packet_count; | 446 | return atomic_read(&state->rx_good) == state->packet_count; |
437 | } | 447 | } |
438 | 448 | ||
439 | static int efx_end_loopback(struct efx_tx_queue *tx_queue, | 449 | static int efx_end_loopback(struct efx_tx_queue *tx_queue, |
440 | struct efx_loopback_self_tests *lb_tests) | 450 | struct efx_loopback_self_tests *lb_tests) |
441 | { | 451 | { |
442 | struct efx_nic *efx = tx_queue->efx; | 452 | struct efx_nic *efx = tx_queue->efx; |
443 | struct efx_loopback_state *state = efx->loopback_selftest; | 453 | struct efx_loopback_state *state = efx->loopback_selftest; |
444 | struct sk_buff *skb; | 454 | struct sk_buff *skb; |
445 | int tx_done = 0, rx_good, rx_bad; | 455 | int tx_done = 0, rx_good, rx_bad; |
446 | int i, rc = 0; | 456 | int i, rc = 0; |
447 | 457 | ||
448 | if (efx_dev_registered(efx)) | 458 | if (efx_dev_registered(efx)) |
449 | netif_tx_lock_bh(efx->net_dev); | 459 | netif_tx_lock_bh(efx->net_dev); |
450 | 460 | ||
451 | /* Count the number of tx completions, and decrement the refcnt. Any | 461 | /* Count the number of tx completions, and decrement the refcnt. Any |
452 | * skbs not already completed will be free'd when the queue is flushed */ | 462 | * skbs not already completed will be free'd when the queue is flushed */ |
453 | for (i=0; i < state->packet_count; i++) { | 463 | for (i=0; i < state->packet_count; i++) { |
454 | skb = state->skbs[i]; | 464 | skb = state->skbs[i]; |
455 | if (skb && !skb_shared(skb)) | 465 | if (skb && !skb_shared(skb)) |
456 | ++tx_done; | 466 | ++tx_done; |
457 | dev_kfree_skb_any(skb); | 467 | dev_kfree_skb_any(skb); |
458 | } | 468 | } |
459 | 469 | ||
460 | if (efx_dev_registered(efx)) | 470 | if (efx_dev_registered(efx)) |
461 | netif_tx_unlock_bh(efx->net_dev); | 471 | netif_tx_unlock_bh(efx->net_dev); |
462 | 472 | ||
463 | /* Check TX completion and received packet counts */ | 473 | /* Check TX completion and received packet counts */ |
464 | rx_good = atomic_read(&state->rx_good); | 474 | rx_good = atomic_read(&state->rx_good); |
465 | rx_bad = atomic_read(&state->rx_bad); | 475 | rx_bad = atomic_read(&state->rx_bad); |
466 | if (tx_done != state->packet_count) { | 476 | if (tx_done != state->packet_count) { |
467 | /* Don't free the skbs; they will be picked up on TX | 477 | /* Don't free the skbs; they will be picked up on TX |
468 | * overflow or channel teardown. | 478 | * overflow or channel teardown. |
469 | */ | 479 | */ |
470 | netif_err(efx, drv, efx->net_dev, | 480 | netif_err(efx, drv, efx->net_dev, |
471 | "TX queue %d saw only %d out of an expected %d " | 481 | "TX queue %d saw only %d out of an expected %d " |
472 | "TX completion events in %s loopback test\n", | 482 | "TX completion events in %s loopback test\n", |
473 | tx_queue->queue, tx_done, state->packet_count, | 483 | tx_queue->queue, tx_done, state->packet_count, |
474 | LOOPBACK_MODE(efx)); | 484 | LOOPBACK_MODE(efx)); |
475 | rc = -ETIMEDOUT; | 485 | rc = -ETIMEDOUT; |
476 | /* Allow to fall through so we see the RX errors as well */ | 486 | /* Allow to fall through so we see the RX errors as well */ |
477 | } | 487 | } |
478 | 488 | ||
479 | /* We may always be up to a flush away from our desired packet total */ | 489 | /* We may always be up to a flush away from our desired packet total */ |
480 | if (rx_good != state->packet_count) { | 490 | if (rx_good != state->packet_count) { |
481 | netif_dbg(efx, drv, efx->net_dev, | 491 | netif_dbg(efx, drv, efx->net_dev, |
482 | "TX queue %d saw only %d out of an expected %d " | 492 | "TX queue %d saw only %d out of an expected %d " |
483 | "received packets in %s loopback test\n", | 493 | "received packets in %s loopback test\n", |
484 | tx_queue->queue, rx_good, state->packet_count, | 494 | tx_queue->queue, rx_good, state->packet_count, |
485 | LOOPBACK_MODE(efx)); | 495 | LOOPBACK_MODE(efx)); |
486 | rc = -ETIMEDOUT; | 496 | rc = -ETIMEDOUT; |
487 | /* Fall through */ | 497 | /* Fall through */ |
488 | } | 498 | } |
489 | 499 | ||
490 | /* Update loopback test structure */ | 500 | /* Update loopback test structure */ |
491 | lb_tests->tx_sent[tx_queue->queue] += state->packet_count; | 501 | lb_tests->tx_sent[tx_queue->queue] += state->packet_count; |
492 | lb_tests->tx_done[tx_queue->queue] += tx_done; | 502 | lb_tests->tx_done[tx_queue->queue] += tx_done; |
493 | lb_tests->rx_good += rx_good; | 503 | lb_tests->rx_good += rx_good; |
494 | lb_tests->rx_bad += rx_bad; | 504 | lb_tests->rx_bad += rx_bad; |
495 | 505 | ||
496 | return rc; | 506 | return rc; |
497 | } | 507 | } |
498 | 508 | ||
499 | static int | 509 | static int |
500 | efx_test_loopback(struct efx_tx_queue *tx_queue, | 510 | efx_test_loopback(struct efx_tx_queue *tx_queue, |
501 | struct efx_loopback_self_tests *lb_tests) | 511 | struct efx_loopback_self_tests *lb_tests) |
502 | { | 512 | { |
503 | struct efx_nic *efx = tx_queue->efx; | 513 | struct efx_nic *efx = tx_queue->efx; |
504 | struct efx_loopback_state *state = efx->loopback_selftest; | 514 | struct efx_loopback_state *state = efx->loopback_selftest; |
505 | int i, begin_rc, end_rc; | 515 | int i, begin_rc, end_rc; |
506 | 516 | ||
507 | for (i = 0; i < 3; i++) { | 517 | for (i = 0; i < 3; i++) { |
508 | /* Determine how many packets to send */ | 518 | /* Determine how many packets to send */ |
509 | state->packet_count = efx->txq_entries / 3; | 519 | state->packet_count = efx->txq_entries / 3; |
510 | state->packet_count = min(1 << (i << 2), state->packet_count); | 520 | state->packet_count = min(1 << (i << 2), state->packet_count); |
511 | state->skbs = kzalloc(sizeof(state->skbs[0]) * | 521 | state->skbs = kzalloc(sizeof(state->skbs[0]) * |
512 | state->packet_count, GFP_KERNEL); | 522 | state->packet_count, GFP_KERNEL); |
513 | if (!state->skbs) | 523 | if (!state->skbs) |
514 | return -ENOMEM; | 524 | return -ENOMEM; |
515 | state->flush = false; | 525 | state->flush = false; |
516 | 526 | ||
517 | netif_dbg(efx, drv, efx->net_dev, | 527 | netif_dbg(efx, drv, efx->net_dev, |
518 | "TX queue %d testing %s loopback with %d packets\n", | 528 | "TX queue %d testing %s loopback with %d packets\n", |
519 | tx_queue->queue, LOOPBACK_MODE(efx), | 529 | tx_queue->queue, LOOPBACK_MODE(efx), |
520 | state->packet_count); | 530 | state->packet_count); |
521 | 531 | ||
522 | efx_iterate_state(efx); | 532 | efx_iterate_state(efx); |
523 | begin_rc = efx_begin_loopback(tx_queue); | 533 | begin_rc = efx_begin_loopback(tx_queue); |
524 | 534 | ||
525 | /* This will normally complete very quickly, but be | 535 | /* This will normally complete very quickly, but be |
526 | * prepared to wait up to 100 ms. */ | 536 | * prepared to wait up to 100 ms. */ |
527 | msleep(1); | 537 | msleep(1); |
528 | if (!efx_poll_loopback(efx)) { | 538 | if (!efx_poll_loopback(efx)) { |
529 | msleep(100); | 539 | msleep(100); |
530 | efx_poll_loopback(efx); | 540 | efx_poll_loopback(efx); |
531 | } | 541 | } |
532 | 542 | ||
533 | end_rc = efx_end_loopback(tx_queue, lb_tests); | 543 | end_rc = efx_end_loopback(tx_queue, lb_tests); |
534 | kfree(state->skbs); | 544 | kfree(state->skbs); |
535 | 545 | ||
536 | if (begin_rc || end_rc) { | 546 | if (begin_rc || end_rc) { |
537 | /* Wait a while to ensure there are no packets | 547 | /* Wait a while to ensure there are no packets |
538 | * floating around after a failure. */ | 548 | * floating around after a failure. */ |
539 | schedule_timeout_uninterruptible(HZ / 10); | 549 | schedule_timeout_uninterruptible(HZ / 10); |
540 | return begin_rc ? begin_rc : end_rc; | 550 | return begin_rc ? begin_rc : end_rc; |
541 | } | 551 | } |
542 | } | 552 | } |
543 | 553 | ||
544 | netif_dbg(efx, drv, efx->net_dev, | 554 | netif_dbg(efx, drv, efx->net_dev, |
545 | "TX queue %d passed %s loopback test with a burst length " | 555 | "TX queue %d passed %s loopback test with a burst length " |
546 | "of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx), | 556 | "of %d packets\n", tx_queue->queue, LOOPBACK_MODE(efx), |
547 | state->packet_count); | 557 | state->packet_count); |
548 | 558 | ||
549 | return 0; | 559 | return 0; |
550 | } | 560 | } |
551 | 561 | ||
552 | /* Wait for link up. On Falcon, we would prefer to rely on efx_monitor, but | 562 | /* Wait for link up. On Falcon, we would prefer to rely on efx_monitor, but |
553 | * any contention on the mac lock (via e.g. efx_mac_mcast_work) causes it | 563 | * any contention on the mac lock (via e.g. efx_mac_mcast_work) causes it |
554 | * to delay and retry. Therefore, it's safer to just poll directly. Wait | 564 | * to delay and retry. Therefore, it's safer to just poll directly. Wait |
555 | * for link up and any faults to dissipate. */ | 565 | * for link up and any faults to dissipate. */ |
556 | static int efx_wait_for_link(struct efx_nic *efx) | 566 | static int efx_wait_for_link(struct efx_nic *efx) |
557 | { | 567 | { |
558 | struct efx_link_state *link_state = &efx->link_state; | 568 | struct efx_link_state *link_state = &efx->link_state; |
559 | int count, link_up_count = 0; | 569 | int count, link_up_count = 0; |
560 | bool link_up; | 570 | bool link_up; |
561 | 571 | ||
562 | for (count = 0; count < 40; count++) { | 572 | for (count = 0; count < 40; count++) { |
563 | schedule_timeout_uninterruptible(HZ / 10); | 573 | schedule_timeout_uninterruptible(HZ / 10); |
564 | 574 | ||
565 | if (efx->type->monitor != NULL) { | 575 | if (efx->type->monitor != NULL) { |
566 | mutex_lock(&efx->mac_lock); | 576 | mutex_lock(&efx->mac_lock); |
567 | efx->type->monitor(efx); | 577 | efx->type->monitor(efx); |
568 | mutex_unlock(&efx->mac_lock); | 578 | mutex_unlock(&efx->mac_lock); |
569 | } else { | 579 | } else { |
570 | struct efx_channel *channel = efx_get_channel(efx, 0); | 580 | struct efx_channel *channel = efx_get_channel(efx, 0); |
571 | if (channel->work_pending) | 581 | if (channel->work_pending) |
572 | efx_process_channel_now(channel); | 582 | efx_process_channel_now(channel); |
573 | } | 583 | } |
574 | 584 | ||
575 | mutex_lock(&efx->mac_lock); | 585 | mutex_lock(&efx->mac_lock); |
576 | link_up = link_state->up; | 586 | link_up = link_state->up; |
577 | if (link_up) | 587 | if (link_up) |
578 | link_up = !efx->mac_op->check_fault(efx); | 588 | link_up = !efx->mac_op->check_fault(efx); |
579 | mutex_unlock(&efx->mac_lock); | 589 | mutex_unlock(&efx->mac_lock); |
580 | 590 | ||
581 | if (link_up) { | 591 | if (link_up) { |
582 | if (++link_up_count == 2) | 592 | if (++link_up_count == 2) |
583 | return 0; | 593 | return 0; |
584 | } else { | 594 | } else { |
585 | link_up_count = 0; | 595 | link_up_count = 0; |
586 | } | 596 | } |
587 | } | 597 | } |
588 | 598 | ||
589 | return -ETIMEDOUT; | 599 | return -ETIMEDOUT; |
590 | } | 600 | } |
591 | 601 | ||
592 | static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests, | 602 | static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests, |
593 | unsigned int loopback_modes) | 603 | unsigned int loopback_modes) |
594 | { | 604 | { |
595 | enum efx_loopback_mode mode; | 605 | enum efx_loopback_mode mode; |
596 | struct efx_loopback_state *state; | 606 | struct efx_loopback_state *state; |
597 | struct efx_channel *channel = efx_get_channel(efx, 0); | 607 | struct efx_channel *channel = efx_get_channel(efx, 0); |
598 | struct efx_tx_queue *tx_queue; | 608 | struct efx_tx_queue *tx_queue; |
599 | int rc = 0; | 609 | int rc = 0; |
600 | 610 | ||
601 | /* Set the port loopback_selftest member. From this point on | 611 | /* Set the port loopback_selftest member. From this point on |
602 | * all received packets will be dropped. Mark the state as | 612 | * all received packets will be dropped. Mark the state as |
603 | * "flushing" so all inflight packets are dropped */ | 613 | * "flushing" so all inflight packets are dropped */ |
604 | state = kzalloc(sizeof(*state), GFP_KERNEL); | 614 | state = kzalloc(sizeof(*state), GFP_KERNEL); |
605 | if (state == NULL) | 615 | if (state == NULL) |
606 | return -ENOMEM; | 616 | return -ENOMEM; |
607 | BUG_ON(efx->loopback_selftest); | 617 | BUG_ON(efx->loopback_selftest); |
608 | state->flush = true; | 618 | state->flush = true; |
609 | efx->loopback_selftest = state; | 619 | efx->loopback_selftest = state; |
610 | 620 | ||
611 | /* Test all supported loopback modes */ | 621 | /* Test all supported loopback modes */ |
612 | for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) { | 622 | for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) { |
613 | if (!(loopback_modes & (1 << mode))) | 623 | if (!(loopback_modes & (1 << mode))) |
614 | continue; | 624 | continue; |
615 | 625 | ||
616 | /* Move the port into the specified loopback mode. */ | 626 | /* Move the port into the specified loopback mode. */ |
617 | state->flush = true; | 627 | state->flush = true; |
618 | mutex_lock(&efx->mac_lock); | 628 | mutex_lock(&efx->mac_lock); |
619 | efx->loopback_mode = mode; | 629 | efx->loopback_mode = mode; |
620 | rc = __efx_reconfigure_port(efx); | 630 | rc = __efx_reconfigure_port(efx); |
621 | mutex_unlock(&efx->mac_lock); | 631 | mutex_unlock(&efx->mac_lock); |
622 | if (rc) { | 632 | if (rc) { |
623 | netif_err(efx, drv, efx->net_dev, | 633 | netif_err(efx, drv, efx->net_dev, |
624 | "unable to move into %s loopback\n", | 634 | "unable to move into %s loopback\n", |
625 | LOOPBACK_MODE(efx)); | 635 | LOOPBACK_MODE(efx)); |
626 | goto out; | 636 | goto out; |
627 | } | 637 | } |
628 | 638 | ||
629 | rc = efx_wait_for_link(efx); | 639 | rc = efx_wait_for_link(efx); |
630 | if (rc) { | 640 | if (rc) { |
631 | netif_err(efx, drv, efx->net_dev, | 641 | netif_err(efx, drv, efx->net_dev, |
632 | "loopback %s never came up\n", | 642 | "loopback %s never came up\n", |
633 | LOOPBACK_MODE(efx)); | 643 | LOOPBACK_MODE(efx)); |
634 | goto out; | 644 | goto out; |
635 | } | 645 | } |
636 | 646 | ||
637 | /* Test both types of TX queue */ | 647 | /* Test both types of TX queue */ |
638 | efx_for_each_channel_tx_queue(tx_queue, channel) { | 648 | efx_for_each_channel_tx_queue(tx_queue, channel) { |
639 | state->offload_csum = (tx_queue->queue & | 649 | state->offload_csum = (tx_queue->queue & |
640 | EFX_TXQ_TYPE_OFFLOAD); | 650 | EFX_TXQ_TYPE_OFFLOAD); |
641 | rc = efx_test_loopback(tx_queue, | 651 | rc = efx_test_loopback(tx_queue, |
642 | &tests->loopback[mode]); | 652 | &tests->loopback[mode]); |
643 | if (rc) | 653 | if (rc) |
644 | goto out; | 654 | goto out; |
645 | } | 655 | } |
646 | } | 656 | } |
647 | 657 | ||
648 | out: | 658 | out: |
649 | /* Remove the flush. The caller will remove the loopback setting */ | 659 | /* Remove the flush. The caller will remove the loopback setting */ |
650 | state->flush = true; | 660 | state->flush = true; |
651 | efx->loopback_selftest = NULL; | 661 | efx->loopback_selftest = NULL; |
652 | wmb(); | 662 | wmb(); |
653 | kfree(state); | 663 | kfree(state); |
654 | 664 | ||
655 | return rc; | 665 | return rc; |
656 | } | 666 | } |
657 | 667 | ||
658 | /************************************************************************** | 668 | /************************************************************************** |
659 | * | 669 | * |
660 | * Entry point | 670 | * Entry point |
661 | * | 671 | * |
662 | *************************************************************************/ | 672 | *************************************************************************/ |
663 | 673 | ||
664 | int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests, | 674 | int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests, |
665 | unsigned flags) | 675 | unsigned flags) |
666 | { | 676 | { |
667 | enum efx_loopback_mode loopback_mode = efx->loopback_mode; | 677 | enum efx_loopback_mode loopback_mode = efx->loopback_mode; |
668 | int phy_mode = efx->phy_mode; | 678 | int phy_mode = efx->phy_mode; |
669 | enum reset_type reset_method = RESET_TYPE_INVISIBLE; | 679 | enum reset_type reset_method = RESET_TYPE_INVISIBLE; |
670 | struct efx_channel *channel; | 680 | struct efx_channel *channel; |
671 | int rc_test = 0, rc_reset = 0, rc; | 681 | int rc_test = 0, rc_reset = 0, rc; |
672 | 682 | ||
673 | /* Online (i.e. non-disruptive) testing | 683 | /* Online (i.e. non-disruptive) testing |
674 | * This checks interrupt generation, event delivery and PHY presence. */ | 684 | * This checks interrupt generation, event delivery and PHY presence. */ |
675 | 685 | ||
676 | rc = efx_test_phy_alive(efx, tests); | 686 | rc = efx_test_phy_alive(efx, tests); |
677 | if (rc && !rc_test) | 687 | if (rc && !rc_test) |
678 | rc_test = rc; | 688 | rc_test = rc; |
679 | 689 | ||
680 | rc = efx_test_nvram(efx, tests); | 690 | rc = efx_test_nvram(efx, tests); |
681 | if (rc && !rc_test) | 691 | if (rc && !rc_test) |
682 | rc_test = rc; | 692 | rc_test = rc; |
683 | 693 | ||
684 | rc = efx_test_interrupts(efx, tests); | 694 | rc = efx_test_interrupts(efx, tests); |
685 | if (rc && !rc_test) | 695 | if (rc && !rc_test) |
686 | rc_test = rc; | 696 | rc_test = rc; |
687 | 697 | ||
688 | efx_for_each_channel(channel, efx) { | 698 | efx_for_each_channel(channel, efx) { |
689 | rc = efx_test_eventq_irq(channel, tests); | 699 | rc = efx_test_eventq_irq(channel, tests); |
690 | if (rc && !rc_test) | 700 | if (rc && !rc_test) |
691 | rc_test = rc; | 701 | rc_test = rc; |
692 | } | 702 | } |
693 | 703 | ||
694 | if (rc_test) | 704 | if (rc_test) |
695 | return rc_test; | 705 | return rc_test; |
696 | 706 | ||
697 | if (!(flags & ETH_TEST_FL_OFFLINE)) | 707 | if (!(flags & ETH_TEST_FL_OFFLINE)) |
698 | return efx_test_phy(efx, tests, flags); | 708 | return efx_test_phy(efx, tests, flags); |
699 | 709 | ||
700 | /* Offline (i.e. disruptive) testing | 710 | /* Offline (i.e. disruptive) testing |
701 | * This checks MAC and PHY loopback on the specified port. */ | 711 | * This checks MAC and PHY loopback on the specified port. */ |
702 | 712 | ||
703 | /* force the carrier state off so the kernel doesn't transmit during | 713 | /* force the carrier state off so the kernel doesn't transmit during |
704 | * the loopback test, and the watchdog timeout doesn't fire. Also put | 714 | * the loopback test, and the watchdog timeout doesn't fire. Also put |
705 | * falcon into loopback for the register test. | 715 | * falcon into loopback for the register test. |
706 | */ | 716 | */ |
707 | mutex_lock(&efx->mac_lock); | 717 | mutex_lock(&efx->mac_lock); |
708 | efx->port_inhibited = true; | 718 | efx->port_inhibited = true; |
709 | if (efx->loopback_modes) { | 719 | if (efx->loopback_modes) { |
710 | /* We need the 312 clock from the PHY to test the XMAC | 720 | /* We need the 312 clock from the PHY to test the XMAC |
711 | * registers, so move into XGMII loopback if available */ | 721 | * registers, so move into XGMII loopback if available */ |
712 | if (efx->loopback_modes & (1 << LOOPBACK_XGMII)) | 722 | if (efx->loopback_modes & (1 << LOOPBACK_XGMII)) |
713 | efx->loopback_mode = LOOPBACK_XGMII; | 723 | efx->loopback_mode = LOOPBACK_XGMII; |
714 | else | 724 | else |
715 | efx->loopback_mode = __ffs(efx->loopback_modes); | 725 | efx->loopback_mode = __ffs(efx->loopback_modes); |
716 | } | 726 | } |
717 | 727 | ||
718 | __efx_reconfigure_port(efx); | 728 | __efx_reconfigure_port(efx); |
719 | mutex_unlock(&efx->mac_lock); | 729 | mutex_unlock(&efx->mac_lock); |
720 | 730 | ||
721 | /* free up all consumers of SRAM (including all the queues) */ | 731 | /* free up all consumers of SRAM (including all the queues) */ |
722 | efx_reset_down(efx, reset_method); | 732 | efx_reset_down(efx, reset_method); |
723 | 733 | ||
724 | rc = efx_test_chip(efx, tests); | 734 | rc = efx_test_chip(efx, tests); |
725 | if (rc && !rc_test) | 735 | if (rc && !rc_test) |
726 | rc_test = rc; | 736 | rc_test = rc; |
727 | 737 | ||
728 | /* reset the chip to recover from the register test */ | 738 | /* reset the chip to recover from the register test */ |
729 | rc_reset = efx->type->reset(efx, reset_method); | 739 | rc_reset = efx->type->reset(efx, reset_method); |
730 | 740 | ||
731 | /* Ensure that the phy is powered and out of loopback | 741 | /* Ensure that the phy is powered and out of loopback |
732 | * for the bist and loopback tests */ | 742 | * for the bist and loopback tests */ |
733 | efx->phy_mode &= ~PHY_MODE_LOW_POWER; | 743 | efx->phy_mode &= ~PHY_MODE_LOW_POWER; |
734 | efx->loopback_mode = LOOPBACK_NONE; | 744 | efx->loopback_mode = LOOPBACK_NONE; |
735 | 745 | ||
736 | rc = efx_reset_up(efx, reset_method, rc_reset == 0); | 746 | rc = efx_reset_up(efx, reset_method, rc_reset == 0); |
737 | if (rc && !rc_reset) | 747 | if (rc && !rc_reset) |
738 | rc_reset = rc; | 748 | rc_reset = rc; |
739 | 749 | ||
740 | if (rc_reset) { | 750 | if (rc_reset) { |
741 | netif_err(efx, drv, efx->net_dev, | 751 | netif_err(efx, drv, efx->net_dev, |
742 | "Unable to recover from chip test\n"); | 752 | "Unable to recover from chip test\n"); |
743 | efx_schedule_reset(efx, RESET_TYPE_DISABLE); | 753 | efx_schedule_reset(efx, RESET_TYPE_DISABLE); |
744 | return rc_reset; | 754 | return rc_reset; |
745 | } | 755 | } |
746 | 756 | ||
747 | rc = efx_test_phy(efx, tests, flags); | 757 | rc = efx_test_phy(efx, tests, flags); |
748 | if (rc && !rc_test) | 758 | if (rc && !rc_test) |
749 | rc_test = rc; | 759 | rc_test = rc; |
750 | 760 | ||
751 | rc = efx_test_loopbacks(efx, tests, efx->loopback_modes); | 761 | rc = efx_test_loopbacks(efx, tests, efx->loopback_modes); |
752 | if (rc && !rc_test) | 762 | if (rc && !rc_test) |
753 | rc_test = rc; | 763 | rc_test = rc; |
754 | 764 | ||
755 | /* restore the PHY to the previous state */ | 765 | /* restore the PHY to the previous state */ |
756 | mutex_lock(&efx->mac_lock); | 766 | mutex_lock(&efx->mac_lock); |
757 | efx->phy_mode = phy_mode; | 767 | efx->phy_mode = phy_mode; |
758 | efx->port_inhibited = false; | 768 | efx->port_inhibited = false; |
759 | efx->loopback_mode = loopback_mode; | 769 | efx->loopback_mode = loopback_mode; |
760 | __efx_reconfigure_port(efx); | 770 | __efx_reconfigure_port(efx); |
761 | mutex_unlock(&efx->mac_lock); | 771 | mutex_unlock(&efx->mac_lock); |
762 | 772 | ||
763 | return rc_test; | 773 | return rc_test; |
764 | } | 774 | } |
765 | 775 | ||
766 | 776 |
drivers/net/sfc/siena.c
1 | /**************************************************************************** | 1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | 2 | * Driver for Solarflare Solarstorm network controllers and boards |
3 | * Copyright 2005-2006 Fen Systems Ltd. | 3 | * Copyright 2005-2006 Fen Systems Ltd. |
4 | * Copyright 2006-2009 Solarflare Communications Inc. | 4 | * Copyright 2006-2009 Solarflare Communications Inc. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License version 2 as published | 7 | * under the terms of the GNU General Public License version 2 as published |
8 | * by the Free Software Foundation, incorporated herein by reference. | 8 | * by the Free Software Foundation, incorporated herein by reference. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/bitops.h> | 11 | #include <linux/bitops.h> |
12 | #include <linux/delay.h> | 12 | #include <linux/delay.h> |
13 | #include <linux/pci.h> | 13 | #include <linux/pci.h> |
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/random.h> | 16 | #include <linux/random.h> |
17 | #include "net_driver.h" | 17 | #include "net_driver.h" |
18 | #include "bitfield.h" | 18 | #include "bitfield.h" |
19 | #include "efx.h" | 19 | #include "efx.h" |
20 | #include "nic.h" | 20 | #include "nic.h" |
21 | #include "mac.h" | 21 | #include "mac.h" |
22 | #include "spi.h" | 22 | #include "spi.h" |
23 | #include "regs.h" | 23 | #include "regs.h" |
24 | #include "io.h" | 24 | #include "io.h" |
25 | #include "phy.h" | 25 | #include "phy.h" |
26 | #include "workarounds.h" | 26 | #include "workarounds.h" |
27 | #include "mcdi.h" | 27 | #include "mcdi.h" |
28 | #include "mcdi_pcol.h" | 28 | #include "mcdi_pcol.h" |
29 | 29 | ||
30 | /* Hardware control for SFC9000 family including SFL9021 (aka Siena). */ | 30 | /* Hardware control for SFC9000 family including SFL9021 (aka Siena). */ |
31 | 31 | ||
32 | static void siena_init_wol(struct efx_nic *efx); | 32 | static void siena_init_wol(struct efx_nic *efx); |
33 | 33 | ||
34 | 34 | ||
35 | static void siena_push_irq_moderation(struct efx_channel *channel) | 35 | static void siena_push_irq_moderation(struct efx_channel *channel) |
36 | { | 36 | { |
37 | efx_dword_t timer_cmd; | 37 | efx_dword_t timer_cmd; |
38 | 38 | ||
39 | if (channel->irq_moderation) | 39 | if (channel->irq_moderation) |
40 | EFX_POPULATE_DWORD_2(timer_cmd, | 40 | EFX_POPULATE_DWORD_2(timer_cmd, |
41 | FRF_CZ_TC_TIMER_MODE, | 41 | FRF_CZ_TC_TIMER_MODE, |
42 | FFE_CZ_TIMER_MODE_INT_HLDOFF, | 42 | FFE_CZ_TIMER_MODE_INT_HLDOFF, |
43 | FRF_CZ_TC_TIMER_VAL, | 43 | FRF_CZ_TC_TIMER_VAL, |
44 | channel->irq_moderation - 1); | 44 | channel->irq_moderation - 1); |
45 | else | 45 | else |
46 | EFX_POPULATE_DWORD_2(timer_cmd, | 46 | EFX_POPULATE_DWORD_2(timer_cmd, |
47 | FRF_CZ_TC_TIMER_MODE, | 47 | FRF_CZ_TC_TIMER_MODE, |
48 | FFE_CZ_TIMER_MODE_DIS, | 48 | FFE_CZ_TIMER_MODE_DIS, |
49 | FRF_CZ_TC_TIMER_VAL, 0); | 49 | FRF_CZ_TC_TIMER_VAL, 0); |
50 | efx_writed_page_locked(channel->efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0, | 50 | efx_writed_page_locked(channel->efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0, |
51 | channel->channel); | 51 | channel->channel); |
52 | } | 52 | } |
53 | 53 | ||
54 | static void siena_push_multicast_hash(struct efx_nic *efx) | 54 | static void siena_push_multicast_hash(struct efx_nic *efx) |
55 | { | 55 | { |
56 | WARN_ON(!mutex_is_locked(&efx->mac_lock)); | 56 | WARN_ON(!mutex_is_locked(&efx->mac_lock)); |
57 | 57 | ||
58 | efx_mcdi_rpc(efx, MC_CMD_SET_MCAST_HASH, | 58 | efx_mcdi_rpc(efx, MC_CMD_SET_MCAST_HASH, |
59 | efx->multicast_hash.byte, sizeof(efx->multicast_hash), | 59 | efx->multicast_hash.byte, sizeof(efx->multicast_hash), |
60 | NULL, 0, NULL); | 60 | NULL, 0, NULL); |
61 | } | 61 | } |
62 | 62 | ||
63 | static int siena_mdio_write(struct net_device *net_dev, | 63 | static int siena_mdio_write(struct net_device *net_dev, |
64 | int prtad, int devad, u16 addr, u16 value) | 64 | int prtad, int devad, u16 addr, u16 value) |
65 | { | 65 | { |
66 | struct efx_nic *efx = netdev_priv(net_dev); | 66 | struct efx_nic *efx = netdev_priv(net_dev); |
67 | uint32_t status; | 67 | uint32_t status; |
68 | int rc; | 68 | int rc; |
69 | 69 | ||
70 | rc = efx_mcdi_mdio_write(efx, efx->mdio_bus, prtad, devad, | 70 | rc = efx_mcdi_mdio_write(efx, efx->mdio_bus, prtad, devad, |
71 | addr, value, &status); | 71 | addr, value, &status); |
72 | if (rc) | 72 | if (rc) |
73 | return rc; | 73 | return rc; |
74 | if (status != MC_CMD_MDIO_STATUS_GOOD) | 74 | if (status != MC_CMD_MDIO_STATUS_GOOD) |
75 | return -EIO; | 75 | return -EIO; |
76 | 76 | ||
77 | return 0; | 77 | return 0; |
78 | } | 78 | } |
79 | 79 | ||
80 | static int siena_mdio_read(struct net_device *net_dev, | 80 | static int siena_mdio_read(struct net_device *net_dev, |
81 | int prtad, int devad, u16 addr) | 81 | int prtad, int devad, u16 addr) |
82 | { | 82 | { |
83 | struct efx_nic *efx = netdev_priv(net_dev); | 83 | struct efx_nic *efx = netdev_priv(net_dev); |
84 | uint16_t value; | 84 | uint16_t value; |
85 | uint32_t status; | 85 | uint32_t status; |
86 | int rc; | 86 | int rc; |
87 | 87 | ||
88 | rc = efx_mcdi_mdio_read(efx, efx->mdio_bus, prtad, devad, | 88 | rc = efx_mcdi_mdio_read(efx, efx->mdio_bus, prtad, devad, |
89 | addr, &value, &status); | 89 | addr, &value, &status); |
90 | if (rc) | 90 | if (rc) |
91 | return rc; | 91 | return rc; |
92 | if (status != MC_CMD_MDIO_STATUS_GOOD) | 92 | if (status != MC_CMD_MDIO_STATUS_GOOD) |
93 | return -EIO; | 93 | return -EIO; |
94 | 94 | ||
95 | return (int)value; | 95 | return (int)value; |
96 | } | 96 | } |
97 | 97 | ||
98 | /* This call is responsible for hooking in the MAC and PHY operations */ | 98 | /* This call is responsible for hooking in the MAC and PHY operations */ |
99 | static int siena_probe_port(struct efx_nic *efx) | 99 | static int siena_probe_port(struct efx_nic *efx) |
100 | { | 100 | { |
101 | int rc; | 101 | int rc; |
102 | 102 | ||
103 | /* Hook in PHY operations table */ | 103 | /* Hook in PHY operations table */ |
104 | efx->phy_op = &efx_mcdi_phy_ops; | 104 | efx->phy_op = &efx_mcdi_phy_ops; |
105 | 105 | ||
106 | /* Set up MDIO structure for PHY */ | 106 | /* Set up MDIO structure for PHY */ |
107 | efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; | 107 | efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; |
108 | efx->mdio.mdio_read = siena_mdio_read; | 108 | efx->mdio.mdio_read = siena_mdio_read; |
109 | efx->mdio.mdio_write = siena_mdio_write; | 109 | efx->mdio.mdio_write = siena_mdio_write; |
110 | 110 | ||
111 | /* Fill out MDIO structure, loopback modes, and initial link state */ | 111 | /* Fill out MDIO structure, loopback modes, and initial link state */ |
112 | rc = efx->phy_op->probe(efx); | 112 | rc = efx->phy_op->probe(efx); |
113 | if (rc != 0) | 113 | if (rc != 0) |
114 | return rc; | 114 | return rc; |
115 | 115 | ||
116 | /* Allocate buffer for stats */ | 116 | /* Allocate buffer for stats */ |
117 | rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer, | 117 | rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer, |
118 | MC_CMD_MAC_NSTATS * sizeof(u64)); | 118 | MC_CMD_MAC_NSTATS * sizeof(u64)); |
119 | if (rc) | 119 | if (rc) |
120 | return rc; | 120 | return rc; |
121 | netif_dbg(efx, probe, efx->net_dev, | 121 | netif_dbg(efx, probe, efx->net_dev, |
122 | "stats buffer at %llx (virt %p phys %llx)\n", | 122 | "stats buffer at %llx (virt %p phys %llx)\n", |
123 | (u64)efx->stats_buffer.dma_addr, | 123 | (u64)efx->stats_buffer.dma_addr, |
124 | efx->stats_buffer.addr, | 124 | efx->stats_buffer.addr, |
125 | (u64)virt_to_phys(efx->stats_buffer.addr)); | 125 | (u64)virt_to_phys(efx->stats_buffer.addr)); |
126 | 126 | ||
127 | efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 1); | 127 | efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 1); |
128 | 128 | ||
129 | return 0; | 129 | return 0; |
130 | } | 130 | } |
131 | 131 | ||
132 | void siena_remove_port(struct efx_nic *efx) | 132 | static void siena_remove_port(struct efx_nic *efx) |
133 | { | 133 | { |
134 | efx->phy_op->remove(efx); | 134 | efx->phy_op->remove(efx); |
135 | efx_nic_free_buffer(efx, &efx->stats_buffer); | 135 | efx_nic_free_buffer(efx, &efx->stats_buffer); |
136 | } | 136 | } |
137 | 137 | ||
138 | static const struct efx_nic_register_test siena_register_tests[] = { | 138 | static const struct efx_nic_register_test siena_register_tests[] = { |
139 | { FR_AZ_ADR_REGION, | 139 | { FR_AZ_ADR_REGION, |
140 | EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) }, | 140 | EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) }, |
141 | { FR_CZ_USR_EV_CFG, | 141 | { FR_CZ_USR_EV_CFG, |
142 | EFX_OWORD32(0x000103FF, 0x00000000, 0x00000000, 0x00000000) }, | 142 | EFX_OWORD32(0x000103FF, 0x00000000, 0x00000000, 0x00000000) }, |
143 | { FR_AZ_RX_CFG, | 143 | { FR_AZ_RX_CFG, |
144 | EFX_OWORD32(0xFFFFFFFE, 0xFFFFFFFF, 0x0003FFFF, 0x00000000) }, | 144 | EFX_OWORD32(0xFFFFFFFE, 0xFFFFFFFF, 0x0003FFFF, 0x00000000) }, |
145 | { FR_AZ_TX_CFG, | 145 | { FR_AZ_TX_CFG, |
146 | EFX_OWORD32(0x7FFF0037, 0xFFFF8000, 0xFFFFFFFF, 0x03FFFFFF) }, | 146 | EFX_OWORD32(0x7FFF0037, 0xFFFF8000, 0xFFFFFFFF, 0x03FFFFFF) }, |
147 | { FR_AZ_TX_RESERVED, | 147 | { FR_AZ_TX_RESERVED, |
148 | EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) }, | 148 | EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) }, |
149 | { FR_AZ_SRM_TX_DC_CFG, | 149 | { FR_AZ_SRM_TX_DC_CFG, |
150 | EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) }, | 150 | EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) }, |
151 | { FR_AZ_RX_DC_CFG, | 151 | { FR_AZ_RX_DC_CFG, |
152 | EFX_OWORD32(0x00000003, 0x00000000, 0x00000000, 0x00000000) }, | 152 | EFX_OWORD32(0x00000003, 0x00000000, 0x00000000, 0x00000000) }, |
153 | { FR_AZ_RX_DC_PF_WM, | 153 | { FR_AZ_RX_DC_PF_WM, |
154 | EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) }, | 154 | EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) }, |
155 | { FR_BZ_DP_CTRL, | 155 | { FR_BZ_DP_CTRL, |
156 | EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) }, | 156 | EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) }, |
157 | { FR_BZ_RX_RSS_TKEY, | 157 | { FR_BZ_RX_RSS_TKEY, |
158 | EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) }, | 158 | EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) }, |
159 | { FR_CZ_RX_RSS_IPV6_REG1, | 159 | { FR_CZ_RX_RSS_IPV6_REG1, |
160 | EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) }, | 160 | EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) }, |
161 | { FR_CZ_RX_RSS_IPV6_REG2, | 161 | { FR_CZ_RX_RSS_IPV6_REG2, |
162 | EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) }, | 162 | EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) }, |
163 | { FR_CZ_RX_RSS_IPV6_REG3, | 163 | { FR_CZ_RX_RSS_IPV6_REG3, |
164 | EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0x00000007, 0x00000000) }, | 164 | EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0x00000007, 0x00000000) }, |
165 | }; | 165 | }; |
166 | 166 | ||
167 | static int siena_test_registers(struct efx_nic *efx) | 167 | static int siena_test_registers(struct efx_nic *efx) |
168 | { | 168 | { |
169 | return efx_nic_test_registers(efx, siena_register_tests, | 169 | return efx_nic_test_registers(efx, siena_register_tests, |
170 | ARRAY_SIZE(siena_register_tests)); | 170 | ARRAY_SIZE(siena_register_tests)); |
171 | } | 171 | } |
172 | 172 | ||
173 | /************************************************************************** | 173 | /************************************************************************** |
174 | * | 174 | * |
175 | * Device reset | 175 | * Device reset |
176 | * | 176 | * |
177 | ************************************************************************** | 177 | ************************************************************************** |
178 | */ | 178 | */ |
179 | 179 | ||
180 | static int siena_reset_hw(struct efx_nic *efx, enum reset_type method) | 180 | static int siena_reset_hw(struct efx_nic *efx, enum reset_type method) |
181 | { | 181 | { |
182 | int rc; | 182 | int rc; |
183 | 183 | ||
184 | /* Recover from a failed assertion pre-reset */ | 184 | /* Recover from a failed assertion pre-reset */ |
185 | rc = efx_mcdi_handle_assertion(efx); | 185 | rc = efx_mcdi_handle_assertion(efx); |
186 | if (rc) | 186 | if (rc) |
187 | return rc; | 187 | return rc; |
188 | 188 | ||
189 | if (method == RESET_TYPE_WORLD) | 189 | if (method == RESET_TYPE_WORLD) |
190 | return efx_mcdi_reset_mc(efx); | 190 | return efx_mcdi_reset_mc(efx); |
191 | else | 191 | else |
192 | return efx_mcdi_reset_port(efx); | 192 | return efx_mcdi_reset_port(efx); |
193 | } | 193 | } |
194 | 194 | ||
195 | static int siena_probe_nvconfig(struct efx_nic *efx) | 195 | static int siena_probe_nvconfig(struct efx_nic *efx) |
196 | { | 196 | { |
197 | int rc; | 197 | int rc; |
198 | 198 | ||
199 | rc = efx_mcdi_get_board_cfg(efx, efx->mac_address, NULL); | 199 | rc = efx_mcdi_get_board_cfg(efx, efx->mac_address, NULL); |
200 | if (rc) | 200 | if (rc) |
201 | return rc; | 201 | return rc; |
202 | 202 | ||
203 | return 0; | 203 | return 0; |
204 | } | 204 | } |
205 | 205 | ||
206 | static int siena_probe_nic(struct efx_nic *efx) | 206 | static int siena_probe_nic(struct efx_nic *efx) |
207 | { | 207 | { |
208 | struct siena_nic_data *nic_data; | 208 | struct siena_nic_data *nic_data; |
209 | bool already_attached = 0; | 209 | bool already_attached = 0; |
210 | efx_oword_t reg; | 210 | efx_oword_t reg; |
211 | int rc; | 211 | int rc; |
212 | 212 | ||
213 | /* Allocate storage for hardware specific data */ | 213 | /* Allocate storage for hardware specific data */ |
214 | nic_data = kzalloc(sizeof(struct siena_nic_data), GFP_KERNEL); | 214 | nic_data = kzalloc(sizeof(struct siena_nic_data), GFP_KERNEL); |
215 | if (!nic_data) | 215 | if (!nic_data) |
216 | return -ENOMEM; | 216 | return -ENOMEM; |
217 | efx->nic_data = nic_data; | 217 | efx->nic_data = nic_data; |
218 | 218 | ||
219 | if (efx_nic_fpga_ver(efx) != 0) { | 219 | if (efx_nic_fpga_ver(efx) != 0) { |
220 | netif_err(efx, probe, efx->net_dev, | 220 | netif_err(efx, probe, efx->net_dev, |
221 | "Siena FPGA not supported\n"); | 221 | "Siena FPGA not supported\n"); |
222 | rc = -ENODEV; | 222 | rc = -ENODEV; |
223 | goto fail1; | 223 | goto fail1; |
224 | } | 224 | } |
225 | 225 | ||
226 | efx_reado(efx, ®, FR_AZ_CS_DEBUG); | 226 | efx_reado(efx, ®, FR_AZ_CS_DEBUG); |
227 | efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; | 227 | efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; |
228 | 228 | ||
229 | efx_mcdi_init(efx); | 229 | efx_mcdi_init(efx); |
230 | 230 | ||
231 | /* Recover from a failed assertion before probing */ | 231 | /* Recover from a failed assertion before probing */ |
232 | rc = efx_mcdi_handle_assertion(efx); | 232 | rc = efx_mcdi_handle_assertion(efx); |
233 | if (rc) | 233 | if (rc) |
234 | goto fail1; | 234 | goto fail1; |
235 | 235 | ||
236 | rc = efx_mcdi_fwver(efx, &nic_data->fw_version, &nic_data->fw_build); | 236 | rc = efx_mcdi_fwver(efx, &nic_data->fw_version, &nic_data->fw_build); |
237 | if (rc) { | 237 | if (rc) { |
238 | netif_err(efx, probe, efx->net_dev, | 238 | netif_err(efx, probe, efx->net_dev, |
239 | "Failed to read MCPU firmware version - rc %d\n", rc); | 239 | "Failed to read MCPU firmware version - rc %d\n", rc); |
240 | goto fail1; /* MCPU absent? */ | 240 | goto fail1; /* MCPU absent? */ |
241 | } | 241 | } |
242 | 242 | ||
243 | /* Let the BMC know that the driver is now in charge of link and | 243 | /* Let the BMC know that the driver is now in charge of link and |
244 | * filter settings. We must do this before we reset the NIC */ | 244 | * filter settings. We must do this before we reset the NIC */ |
245 | rc = efx_mcdi_drv_attach(efx, true, &already_attached); | 245 | rc = efx_mcdi_drv_attach(efx, true, &already_attached); |
246 | if (rc) { | 246 | if (rc) { |
247 | netif_err(efx, probe, efx->net_dev, | 247 | netif_err(efx, probe, efx->net_dev, |
248 | "Unable to register driver with MCPU\n"); | 248 | "Unable to register driver with MCPU\n"); |
249 | goto fail2; | 249 | goto fail2; |
250 | } | 250 | } |
251 | if (already_attached) | 251 | if (already_attached) |
252 | /* Not a fatal error */ | 252 | /* Not a fatal error */ |
253 | netif_err(efx, probe, efx->net_dev, | 253 | netif_err(efx, probe, efx->net_dev, |
254 | "Host already registered with MCPU\n"); | 254 | "Host already registered with MCPU\n"); |
255 | 255 | ||
256 | /* Now we can reset the NIC */ | 256 | /* Now we can reset the NIC */ |
257 | rc = siena_reset_hw(efx, RESET_TYPE_ALL); | 257 | rc = siena_reset_hw(efx, RESET_TYPE_ALL); |
258 | if (rc) { | 258 | if (rc) { |
259 | netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n"); | 259 | netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n"); |
260 | goto fail3; | 260 | goto fail3; |
261 | } | 261 | } |
262 | 262 | ||
263 | siena_init_wol(efx); | 263 | siena_init_wol(efx); |
264 | 264 | ||
265 | /* Allocate memory for INT_KER */ | 265 | /* Allocate memory for INT_KER */ |
266 | rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t)); | 266 | rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t)); |
267 | if (rc) | 267 | if (rc) |
268 | goto fail4; | 268 | goto fail4; |
269 | BUG_ON(efx->irq_status.dma_addr & 0x0f); | 269 | BUG_ON(efx->irq_status.dma_addr & 0x0f); |
270 | 270 | ||
271 | netif_dbg(efx, probe, efx->net_dev, | 271 | netif_dbg(efx, probe, efx->net_dev, |
272 | "INT_KER at %llx (virt %p phys %llx)\n", | 272 | "INT_KER at %llx (virt %p phys %llx)\n", |
273 | (unsigned long long)efx->irq_status.dma_addr, | 273 | (unsigned long long)efx->irq_status.dma_addr, |
274 | efx->irq_status.addr, | 274 | efx->irq_status.addr, |
275 | (unsigned long long)virt_to_phys(efx->irq_status.addr)); | 275 | (unsigned long long)virt_to_phys(efx->irq_status.addr)); |
276 | 276 | ||
277 | /* Read in the non-volatile configuration */ | 277 | /* Read in the non-volatile configuration */ |
278 | rc = siena_probe_nvconfig(efx); | 278 | rc = siena_probe_nvconfig(efx); |
279 | if (rc == -EINVAL) { | 279 | if (rc == -EINVAL) { |
280 | netif_err(efx, probe, efx->net_dev, | 280 | netif_err(efx, probe, efx->net_dev, |
281 | "NVRAM is invalid therefore using defaults\n"); | 281 | "NVRAM is invalid therefore using defaults\n"); |
282 | efx->phy_type = PHY_TYPE_NONE; | 282 | efx->phy_type = PHY_TYPE_NONE; |
283 | efx->mdio.prtad = MDIO_PRTAD_NONE; | 283 | efx->mdio.prtad = MDIO_PRTAD_NONE; |
284 | } else if (rc) { | 284 | } else if (rc) { |
285 | goto fail5; | 285 | goto fail5; |
286 | } | 286 | } |
287 | 287 | ||
288 | return 0; | 288 | return 0; |
289 | 289 | ||
290 | fail5: | 290 | fail5: |
291 | efx_nic_free_buffer(efx, &efx->irq_status); | 291 | efx_nic_free_buffer(efx, &efx->irq_status); |
292 | fail4: | 292 | fail4: |
293 | fail3: | 293 | fail3: |
294 | efx_mcdi_drv_attach(efx, false, NULL); | 294 | efx_mcdi_drv_attach(efx, false, NULL); |
295 | fail2: | 295 | fail2: |
296 | fail1: | 296 | fail1: |
297 | kfree(efx->nic_data); | 297 | kfree(efx->nic_data); |
298 | return rc; | 298 | return rc; |
299 | } | 299 | } |
300 | 300 | ||
301 | /* This call performs hardware-specific global initialisation, such as | 301 | /* This call performs hardware-specific global initialisation, such as |
302 | * defining the descriptor cache sizes and number of RSS channels. | 302 | * defining the descriptor cache sizes and number of RSS channels. |
303 | * It does not set up any buffers, descriptor rings or event queues. | 303 | * It does not set up any buffers, descriptor rings or event queues. |
304 | */ | 304 | */ |
305 | static int siena_init_nic(struct efx_nic *efx) | 305 | static int siena_init_nic(struct efx_nic *efx) |
306 | { | 306 | { |
307 | efx_oword_t temp; | 307 | efx_oword_t temp; |
308 | int rc; | 308 | int rc; |
309 | 309 | ||
310 | /* Recover from a failed assertion post-reset */ | 310 | /* Recover from a failed assertion post-reset */ |
311 | rc = efx_mcdi_handle_assertion(efx); | 311 | rc = efx_mcdi_handle_assertion(efx); |
312 | if (rc) | 312 | if (rc) |
313 | return rc; | 313 | return rc; |
314 | 314 | ||
315 | /* Squash TX of packets of 16 bytes or less */ | 315 | /* Squash TX of packets of 16 bytes or less */ |
316 | efx_reado(efx, &temp, FR_AZ_TX_RESERVED); | 316 | efx_reado(efx, &temp, FR_AZ_TX_RESERVED); |
317 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); | 317 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); |
318 | efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); | 318 | efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); |
319 | 319 | ||
320 | /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16 | 320 | /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16 |
321 | * descriptors (which is bad). | 321 | * descriptors (which is bad). |
322 | */ | 322 | */ |
323 | efx_reado(efx, &temp, FR_AZ_TX_CFG); | 323 | efx_reado(efx, &temp, FR_AZ_TX_CFG); |
324 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0); | 324 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0); |
325 | EFX_SET_OWORD_FIELD(temp, FRF_CZ_TX_FILTER_EN_BIT, 1); | 325 | EFX_SET_OWORD_FIELD(temp, FRF_CZ_TX_FILTER_EN_BIT, 1); |
326 | efx_writeo(efx, &temp, FR_AZ_TX_CFG); | 326 | efx_writeo(efx, &temp, FR_AZ_TX_CFG); |
327 | 327 | ||
328 | efx_reado(efx, &temp, FR_AZ_RX_CFG); | 328 | efx_reado(efx, &temp, FR_AZ_RX_CFG); |
329 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_DESC_PUSH_EN, 0); | 329 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_DESC_PUSH_EN, 0); |
330 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_INGR_EN, 1); | 330 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_INGR_EN, 1); |
331 | /* Enable hash insertion. This is broken for the 'Falcon' hash | 331 | /* Enable hash insertion. This is broken for the 'Falcon' hash |
332 | * if IPv6 hashing is also enabled, so also select Toeplitz | 332 | * if IPv6 hashing is also enabled, so also select Toeplitz |
333 | * TCP/IPv4 and IPv4 hashes. */ | 333 | * TCP/IPv4 and IPv4 hashes. */ |
334 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_INSRT_HDR, 1); | 334 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_INSRT_HDR, 1); |
335 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_ALG, 1); | 335 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_ALG, 1); |
336 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_IP_HASH, 1); | 336 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_IP_HASH, 1); |
337 | efx_writeo(efx, &temp, FR_AZ_RX_CFG); | 337 | efx_writeo(efx, &temp, FR_AZ_RX_CFG); |
338 | 338 | ||
339 | /* Set hash key for IPv4 */ | 339 | /* Set hash key for IPv4 */ |
340 | memcpy(&temp, efx->rx_hash_key, sizeof(temp)); | 340 | memcpy(&temp, efx->rx_hash_key, sizeof(temp)); |
341 | efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY); | 341 | efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY); |
342 | 342 | ||
343 | /* Enable IPv6 RSS */ | 343 | /* Enable IPv6 RSS */ |
344 | BUILD_BUG_ON(sizeof(efx->rx_hash_key) < | 344 | BUILD_BUG_ON(sizeof(efx->rx_hash_key) < |
345 | 2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 || | 345 | 2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 || |
346 | FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0); | 346 | FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0); |
347 | memcpy(&temp, efx->rx_hash_key, sizeof(temp)); | 347 | memcpy(&temp, efx->rx_hash_key, sizeof(temp)); |
348 | efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1); | 348 | efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1); |
349 | memcpy(&temp, efx->rx_hash_key + sizeof(temp), sizeof(temp)); | 349 | memcpy(&temp, efx->rx_hash_key + sizeof(temp), sizeof(temp)); |
350 | efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2); | 350 | efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2); |
351 | EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1, | 351 | EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1, |
352 | FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1); | 352 | FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1); |
353 | memcpy(&temp, efx->rx_hash_key + 2 * sizeof(temp), | 353 | memcpy(&temp, efx->rx_hash_key + 2 * sizeof(temp), |
354 | FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8); | 354 | FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8); |
355 | efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3); | 355 | efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3); |
356 | 356 | ||
357 | if (efx_nic_rx_xoff_thresh >= 0 || efx_nic_rx_xon_thresh >= 0) | 357 | if (efx_nic_rx_xoff_thresh >= 0 || efx_nic_rx_xon_thresh >= 0) |
358 | /* No MCDI operation has been defined to set thresholds */ | 358 | /* No MCDI operation has been defined to set thresholds */ |
359 | netif_err(efx, hw, efx->net_dev, | 359 | netif_err(efx, hw, efx->net_dev, |
360 | "ignoring RX flow control thresholds\n"); | 360 | "ignoring RX flow control thresholds\n"); |
361 | 361 | ||
362 | /* Enable event logging */ | 362 | /* Enable event logging */ |
363 | rc = efx_mcdi_log_ctrl(efx, true, false, 0); | 363 | rc = efx_mcdi_log_ctrl(efx, true, false, 0); |
364 | if (rc) | 364 | if (rc) |
365 | return rc; | 365 | return rc; |
366 | 366 | ||
367 | /* Set destination of both TX and RX Flush events */ | 367 | /* Set destination of both TX and RX Flush events */ |
368 | EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0); | 368 | EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0); |
369 | efx_writeo(efx, &temp, FR_BZ_DP_CTRL); | 369 | efx_writeo(efx, &temp, FR_BZ_DP_CTRL); |
370 | 370 | ||
371 | EFX_POPULATE_OWORD_1(temp, FRF_CZ_USREV_DIS, 1); | 371 | EFX_POPULATE_OWORD_1(temp, FRF_CZ_USREV_DIS, 1); |
372 | efx_writeo(efx, &temp, FR_CZ_USR_EV_CFG); | 372 | efx_writeo(efx, &temp, FR_CZ_USR_EV_CFG); |
373 | 373 | ||
374 | efx_nic_init_common(efx); | 374 | efx_nic_init_common(efx); |
375 | return 0; | 375 | return 0; |
376 | } | 376 | } |
377 | 377 | ||
378 | static void siena_remove_nic(struct efx_nic *efx) | 378 | static void siena_remove_nic(struct efx_nic *efx) |
379 | { | 379 | { |
380 | efx_nic_free_buffer(efx, &efx->irq_status); | 380 | efx_nic_free_buffer(efx, &efx->irq_status); |
381 | 381 | ||
382 | siena_reset_hw(efx, RESET_TYPE_ALL); | 382 | siena_reset_hw(efx, RESET_TYPE_ALL); |
383 | 383 | ||
384 | /* Relinquish the device back to the BMC */ | 384 | /* Relinquish the device back to the BMC */ |
385 | if (efx_nic_has_mc(efx)) | 385 | if (efx_nic_has_mc(efx)) |
386 | efx_mcdi_drv_attach(efx, false, NULL); | 386 | efx_mcdi_drv_attach(efx, false, NULL); |
387 | 387 | ||
388 | /* Tear down the private nic state */ | 388 | /* Tear down the private nic state */ |
389 | kfree(efx->nic_data); | 389 | kfree(efx->nic_data); |
390 | efx->nic_data = NULL; | 390 | efx->nic_data = NULL; |
391 | } | 391 | } |
392 | 392 | ||
393 | #define STATS_GENERATION_INVALID ((u64)(-1)) | 393 | #define STATS_GENERATION_INVALID ((u64)(-1)) |
394 | 394 | ||
395 | static int siena_try_update_nic_stats(struct efx_nic *efx) | 395 | static int siena_try_update_nic_stats(struct efx_nic *efx) |
396 | { | 396 | { |
397 | u64 *dma_stats; | 397 | u64 *dma_stats; |
398 | struct efx_mac_stats *mac_stats; | 398 | struct efx_mac_stats *mac_stats; |
399 | u64 generation_start; | 399 | u64 generation_start; |
400 | u64 generation_end; | 400 | u64 generation_end; |
401 | 401 | ||
402 | mac_stats = &efx->mac_stats; | 402 | mac_stats = &efx->mac_stats; |
403 | dma_stats = (u64 *)efx->stats_buffer.addr; | 403 | dma_stats = (u64 *)efx->stats_buffer.addr; |
404 | 404 | ||
405 | generation_end = dma_stats[MC_CMD_MAC_GENERATION_END]; | 405 | generation_end = dma_stats[MC_CMD_MAC_GENERATION_END]; |
406 | if (generation_end == STATS_GENERATION_INVALID) | 406 | if (generation_end == STATS_GENERATION_INVALID) |
407 | return 0; | 407 | return 0; |
408 | rmb(); | 408 | rmb(); |
409 | 409 | ||
410 | #define MAC_STAT(M, D) \ | 410 | #define MAC_STAT(M, D) \ |
411 | mac_stats->M = dma_stats[MC_CMD_MAC_ ## D] | 411 | mac_stats->M = dma_stats[MC_CMD_MAC_ ## D] |
412 | 412 | ||
413 | MAC_STAT(tx_bytes, TX_BYTES); | 413 | MAC_STAT(tx_bytes, TX_BYTES); |
414 | MAC_STAT(tx_bad_bytes, TX_BAD_BYTES); | 414 | MAC_STAT(tx_bad_bytes, TX_BAD_BYTES); |
415 | mac_stats->tx_good_bytes = (mac_stats->tx_bytes - | 415 | mac_stats->tx_good_bytes = (mac_stats->tx_bytes - |
416 | mac_stats->tx_bad_bytes); | 416 | mac_stats->tx_bad_bytes); |
417 | MAC_STAT(tx_packets, TX_PKTS); | 417 | MAC_STAT(tx_packets, TX_PKTS); |
418 | MAC_STAT(tx_bad, TX_BAD_FCS_PKTS); | 418 | MAC_STAT(tx_bad, TX_BAD_FCS_PKTS); |
419 | MAC_STAT(tx_pause, TX_PAUSE_PKTS); | 419 | MAC_STAT(tx_pause, TX_PAUSE_PKTS); |
420 | MAC_STAT(tx_control, TX_CONTROL_PKTS); | 420 | MAC_STAT(tx_control, TX_CONTROL_PKTS); |
421 | MAC_STAT(tx_unicast, TX_UNICAST_PKTS); | 421 | MAC_STAT(tx_unicast, TX_UNICAST_PKTS); |
422 | MAC_STAT(tx_multicast, TX_MULTICAST_PKTS); | 422 | MAC_STAT(tx_multicast, TX_MULTICAST_PKTS); |
423 | MAC_STAT(tx_broadcast, TX_BROADCAST_PKTS); | 423 | MAC_STAT(tx_broadcast, TX_BROADCAST_PKTS); |
424 | MAC_STAT(tx_lt64, TX_LT64_PKTS); | 424 | MAC_STAT(tx_lt64, TX_LT64_PKTS); |
425 | MAC_STAT(tx_64, TX_64_PKTS); | 425 | MAC_STAT(tx_64, TX_64_PKTS); |
426 | MAC_STAT(tx_65_to_127, TX_65_TO_127_PKTS); | 426 | MAC_STAT(tx_65_to_127, TX_65_TO_127_PKTS); |
427 | MAC_STAT(tx_128_to_255, TX_128_TO_255_PKTS); | 427 | MAC_STAT(tx_128_to_255, TX_128_TO_255_PKTS); |
428 | MAC_STAT(tx_256_to_511, TX_256_TO_511_PKTS); | 428 | MAC_STAT(tx_256_to_511, TX_256_TO_511_PKTS); |
429 | MAC_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS); | 429 | MAC_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS); |
430 | MAC_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS); | 430 | MAC_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS); |
431 | MAC_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS); | 431 | MAC_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS); |
432 | MAC_STAT(tx_gtjumbo, TX_GTJUMBO_PKTS); | 432 | MAC_STAT(tx_gtjumbo, TX_GTJUMBO_PKTS); |
433 | mac_stats->tx_collision = 0; | 433 | mac_stats->tx_collision = 0; |
434 | MAC_STAT(tx_single_collision, TX_SINGLE_COLLISION_PKTS); | 434 | MAC_STAT(tx_single_collision, TX_SINGLE_COLLISION_PKTS); |
435 | MAC_STAT(tx_multiple_collision, TX_MULTIPLE_COLLISION_PKTS); | 435 | MAC_STAT(tx_multiple_collision, TX_MULTIPLE_COLLISION_PKTS); |
436 | MAC_STAT(tx_excessive_collision, TX_EXCESSIVE_COLLISION_PKTS); | 436 | MAC_STAT(tx_excessive_collision, TX_EXCESSIVE_COLLISION_PKTS); |
437 | MAC_STAT(tx_deferred, TX_DEFERRED_PKTS); | 437 | MAC_STAT(tx_deferred, TX_DEFERRED_PKTS); |
438 | MAC_STAT(tx_late_collision, TX_LATE_COLLISION_PKTS); | 438 | MAC_STAT(tx_late_collision, TX_LATE_COLLISION_PKTS); |
439 | mac_stats->tx_collision = (mac_stats->tx_single_collision + | 439 | mac_stats->tx_collision = (mac_stats->tx_single_collision + |
440 | mac_stats->tx_multiple_collision + | 440 | mac_stats->tx_multiple_collision + |
441 | mac_stats->tx_excessive_collision + | 441 | mac_stats->tx_excessive_collision + |
442 | mac_stats->tx_late_collision); | 442 | mac_stats->tx_late_collision); |
443 | MAC_STAT(tx_excessive_deferred, TX_EXCESSIVE_DEFERRED_PKTS); | 443 | MAC_STAT(tx_excessive_deferred, TX_EXCESSIVE_DEFERRED_PKTS); |
444 | MAC_STAT(tx_non_tcpudp, TX_NON_TCPUDP_PKTS); | 444 | MAC_STAT(tx_non_tcpudp, TX_NON_TCPUDP_PKTS); |
445 | MAC_STAT(tx_mac_src_error, TX_MAC_SRC_ERR_PKTS); | 445 | MAC_STAT(tx_mac_src_error, TX_MAC_SRC_ERR_PKTS); |
446 | MAC_STAT(tx_ip_src_error, TX_IP_SRC_ERR_PKTS); | 446 | MAC_STAT(tx_ip_src_error, TX_IP_SRC_ERR_PKTS); |
447 | MAC_STAT(rx_bytes, RX_BYTES); | 447 | MAC_STAT(rx_bytes, RX_BYTES); |
448 | MAC_STAT(rx_bad_bytes, RX_BAD_BYTES); | 448 | MAC_STAT(rx_bad_bytes, RX_BAD_BYTES); |
449 | mac_stats->rx_good_bytes = (mac_stats->rx_bytes - | 449 | mac_stats->rx_good_bytes = (mac_stats->rx_bytes - |
450 | mac_stats->rx_bad_bytes); | 450 | mac_stats->rx_bad_bytes); |
451 | MAC_STAT(rx_packets, RX_PKTS); | 451 | MAC_STAT(rx_packets, RX_PKTS); |
452 | MAC_STAT(rx_good, RX_GOOD_PKTS); | 452 | MAC_STAT(rx_good, RX_GOOD_PKTS); |
453 | MAC_STAT(rx_bad, RX_BAD_FCS_PKTS); | 453 | MAC_STAT(rx_bad, RX_BAD_FCS_PKTS); |
454 | MAC_STAT(rx_pause, RX_PAUSE_PKTS); | 454 | MAC_STAT(rx_pause, RX_PAUSE_PKTS); |
455 | MAC_STAT(rx_control, RX_CONTROL_PKTS); | 455 | MAC_STAT(rx_control, RX_CONTROL_PKTS); |
456 | MAC_STAT(rx_unicast, RX_UNICAST_PKTS); | 456 | MAC_STAT(rx_unicast, RX_UNICAST_PKTS); |
457 | MAC_STAT(rx_multicast, RX_MULTICAST_PKTS); | 457 | MAC_STAT(rx_multicast, RX_MULTICAST_PKTS); |
458 | MAC_STAT(rx_broadcast, RX_BROADCAST_PKTS); | 458 | MAC_STAT(rx_broadcast, RX_BROADCAST_PKTS); |
459 | MAC_STAT(rx_lt64, RX_UNDERSIZE_PKTS); | 459 | MAC_STAT(rx_lt64, RX_UNDERSIZE_PKTS); |
460 | MAC_STAT(rx_64, RX_64_PKTS); | 460 | MAC_STAT(rx_64, RX_64_PKTS); |
461 | MAC_STAT(rx_65_to_127, RX_65_TO_127_PKTS); | 461 | MAC_STAT(rx_65_to_127, RX_65_TO_127_PKTS); |
462 | MAC_STAT(rx_128_to_255, RX_128_TO_255_PKTS); | 462 | MAC_STAT(rx_128_to_255, RX_128_TO_255_PKTS); |
463 | MAC_STAT(rx_256_to_511, RX_256_TO_511_PKTS); | 463 | MAC_STAT(rx_256_to_511, RX_256_TO_511_PKTS); |
464 | MAC_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS); | 464 | MAC_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS); |
465 | MAC_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS); | 465 | MAC_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS); |
466 | MAC_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS); | 466 | MAC_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS); |
467 | MAC_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS); | 467 | MAC_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS); |
468 | mac_stats->rx_bad_lt64 = 0; | 468 | mac_stats->rx_bad_lt64 = 0; |
469 | mac_stats->rx_bad_64_to_15xx = 0; | 469 | mac_stats->rx_bad_64_to_15xx = 0; |
470 | mac_stats->rx_bad_15xx_to_jumbo = 0; | 470 | mac_stats->rx_bad_15xx_to_jumbo = 0; |
471 | MAC_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS); | 471 | MAC_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS); |
472 | MAC_STAT(rx_overflow, RX_OVERFLOW_PKTS); | 472 | MAC_STAT(rx_overflow, RX_OVERFLOW_PKTS); |
473 | mac_stats->rx_missed = 0; | 473 | mac_stats->rx_missed = 0; |
474 | MAC_STAT(rx_false_carrier, RX_FALSE_CARRIER_PKTS); | 474 | MAC_STAT(rx_false_carrier, RX_FALSE_CARRIER_PKTS); |
475 | MAC_STAT(rx_symbol_error, RX_SYMBOL_ERROR_PKTS); | 475 | MAC_STAT(rx_symbol_error, RX_SYMBOL_ERROR_PKTS); |
476 | MAC_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS); | 476 | MAC_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS); |
477 | MAC_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS); | 477 | MAC_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS); |
478 | MAC_STAT(rx_internal_error, RX_INTERNAL_ERROR_PKTS); | 478 | MAC_STAT(rx_internal_error, RX_INTERNAL_ERROR_PKTS); |
479 | mac_stats->rx_good_lt64 = 0; | 479 | mac_stats->rx_good_lt64 = 0; |
480 | 480 | ||
481 | efx->n_rx_nodesc_drop_cnt = dma_stats[MC_CMD_MAC_RX_NODESC_DROPS]; | 481 | efx->n_rx_nodesc_drop_cnt = dma_stats[MC_CMD_MAC_RX_NODESC_DROPS]; |
482 | 482 | ||
483 | #undef MAC_STAT | 483 | #undef MAC_STAT |
484 | 484 | ||
485 | rmb(); | 485 | rmb(); |
486 | generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; | 486 | generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; |
487 | if (generation_end != generation_start) | 487 | if (generation_end != generation_start) |
488 | return -EAGAIN; | 488 | return -EAGAIN; |
489 | 489 | ||
490 | return 0; | 490 | return 0; |
491 | } | 491 | } |
492 | 492 | ||
493 | static void siena_update_nic_stats(struct efx_nic *efx) | 493 | static void siena_update_nic_stats(struct efx_nic *efx) |
494 | { | 494 | { |
495 | int retry; | 495 | int retry; |
496 | 496 | ||
497 | /* If we're unlucky enough to read statistics wduring the DMA, wait | 497 | /* If we're unlucky enough to read statistics wduring the DMA, wait |
498 | * up to 10ms for it to finish (typically takes <500us) */ | 498 | * up to 10ms for it to finish (typically takes <500us) */ |
499 | for (retry = 0; retry < 100; ++retry) { | 499 | for (retry = 0; retry < 100; ++retry) { |
500 | if (siena_try_update_nic_stats(efx) == 0) | 500 | if (siena_try_update_nic_stats(efx) == 0) |
501 | return; | 501 | return; |
502 | udelay(100); | 502 | udelay(100); |
503 | } | 503 | } |
504 | 504 | ||
505 | /* Use the old values instead */ | 505 | /* Use the old values instead */ |
506 | } | 506 | } |
507 | 507 | ||
508 | static void siena_start_nic_stats(struct efx_nic *efx) | 508 | static void siena_start_nic_stats(struct efx_nic *efx) |
509 | { | 509 | { |
510 | u64 *dma_stats = (u64 *)efx->stats_buffer.addr; | 510 | u64 *dma_stats = (u64 *)efx->stats_buffer.addr; |
511 | 511 | ||
512 | dma_stats[MC_CMD_MAC_GENERATION_END] = STATS_GENERATION_INVALID; | 512 | dma_stats[MC_CMD_MAC_GENERATION_END] = STATS_GENERATION_INVALID; |
513 | 513 | ||
514 | efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, | 514 | efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, |
515 | MC_CMD_MAC_NSTATS * sizeof(u64), 1, 0); | 515 | MC_CMD_MAC_NSTATS * sizeof(u64), 1, 0); |
516 | } | 516 | } |
517 | 517 | ||
518 | static void siena_stop_nic_stats(struct efx_nic *efx) | 518 | static void siena_stop_nic_stats(struct efx_nic *efx) |
519 | { | 519 | { |
520 | efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0); | 520 | efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0); |
521 | } | 521 | } |
522 | 522 | ||
523 | void siena_print_fwver(struct efx_nic *efx, char *buf, size_t len) | 523 | void siena_print_fwver(struct efx_nic *efx, char *buf, size_t len) |
524 | { | 524 | { |
525 | struct siena_nic_data *nic_data = efx->nic_data; | 525 | struct siena_nic_data *nic_data = efx->nic_data; |
526 | snprintf(buf, len, "%u.%u.%u.%u", | 526 | snprintf(buf, len, "%u.%u.%u.%u", |
527 | (unsigned int)(nic_data->fw_version >> 48), | 527 | (unsigned int)(nic_data->fw_version >> 48), |
528 | (unsigned int)(nic_data->fw_version >> 32 & 0xffff), | 528 | (unsigned int)(nic_data->fw_version >> 32 & 0xffff), |
529 | (unsigned int)(nic_data->fw_version >> 16 & 0xffff), | 529 | (unsigned int)(nic_data->fw_version >> 16 & 0xffff), |
530 | (unsigned int)(nic_data->fw_version & 0xffff)); | 530 | (unsigned int)(nic_data->fw_version & 0xffff)); |
531 | } | 531 | } |
532 | 532 | ||
533 | /************************************************************************** | 533 | /************************************************************************** |
534 | * | 534 | * |
535 | * Wake on LAN | 535 | * Wake on LAN |
536 | * | 536 | * |
537 | ************************************************************************** | 537 | ************************************************************************** |
538 | */ | 538 | */ |
539 | 539 | ||
540 | static void siena_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol) | 540 | static void siena_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol) |
541 | { | 541 | { |
542 | struct siena_nic_data *nic_data = efx->nic_data; | 542 | struct siena_nic_data *nic_data = efx->nic_data; |
543 | 543 | ||
544 | wol->supported = WAKE_MAGIC; | 544 | wol->supported = WAKE_MAGIC; |
545 | if (nic_data->wol_filter_id != -1) | 545 | if (nic_data->wol_filter_id != -1) |
546 | wol->wolopts = WAKE_MAGIC; | 546 | wol->wolopts = WAKE_MAGIC; |
547 | else | 547 | else |
548 | wol->wolopts = 0; | 548 | wol->wolopts = 0; |
549 | memset(&wol->sopass, 0, sizeof(wol->sopass)); | 549 | memset(&wol->sopass, 0, sizeof(wol->sopass)); |
550 | } | 550 | } |
551 | 551 | ||
552 | 552 | ||
553 | static int siena_set_wol(struct efx_nic *efx, u32 type) | 553 | static int siena_set_wol(struct efx_nic *efx, u32 type) |
554 | { | 554 | { |
555 | struct siena_nic_data *nic_data = efx->nic_data; | 555 | struct siena_nic_data *nic_data = efx->nic_data; |
556 | int rc; | 556 | int rc; |
557 | 557 | ||
558 | if (type & ~WAKE_MAGIC) | 558 | if (type & ~WAKE_MAGIC) |
559 | return -EINVAL; | 559 | return -EINVAL; |
560 | 560 | ||
561 | if (type & WAKE_MAGIC) { | 561 | if (type & WAKE_MAGIC) { |
562 | if (nic_data->wol_filter_id != -1) | 562 | if (nic_data->wol_filter_id != -1) |
563 | efx_mcdi_wol_filter_remove(efx, | 563 | efx_mcdi_wol_filter_remove(efx, |
564 | nic_data->wol_filter_id); | 564 | nic_data->wol_filter_id); |
565 | rc = efx_mcdi_wol_filter_set_magic(efx, efx->mac_address, | 565 | rc = efx_mcdi_wol_filter_set_magic(efx, efx->mac_address, |
566 | &nic_data->wol_filter_id); | 566 | &nic_data->wol_filter_id); |
567 | if (rc) | 567 | if (rc) |
568 | goto fail; | 568 | goto fail; |
569 | 569 | ||
570 | pci_wake_from_d3(efx->pci_dev, true); | 570 | pci_wake_from_d3(efx->pci_dev, true); |
571 | } else { | 571 | } else { |
572 | rc = efx_mcdi_wol_filter_reset(efx); | 572 | rc = efx_mcdi_wol_filter_reset(efx); |
573 | nic_data->wol_filter_id = -1; | 573 | nic_data->wol_filter_id = -1; |
574 | pci_wake_from_d3(efx->pci_dev, false); | 574 | pci_wake_from_d3(efx->pci_dev, false); |
575 | if (rc) | 575 | if (rc) |
576 | goto fail; | 576 | goto fail; |
577 | } | 577 | } |
578 | 578 | ||
579 | return 0; | 579 | return 0; |
580 | fail: | 580 | fail: |
581 | netif_err(efx, hw, efx->net_dev, "%s failed: type=%d rc=%d\n", | 581 | netif_err(efx, hw, efx->net_dev, "%s failed: type=%d rc=%d\n", |
582 | __func__, type, rc); | 582 | __func__, type, rc); |
583 | return rc; | 583 | return rc; |
584 | } | 584 | } |
585 | 585 | ||
586 | 586 | ||
587 | static void siena_init_wol(struct efx_nic *efx) | 587 | static void siena_init_wol(struct efx_nic *efx) |
588 | { | 588 | { |
589 | struct siena_nic_data *nic_data = efx->nic_data; | 589 | struct siena_nic_data *nic_data = efx->nic_data; |
590 | int rc; | 590 | int rc; |
591 | 591 | ||
592 | rc = efx_mcdi_wol_filter_get_magic(efx, &nic_data->wol_filter_id); | 592 | rc = efx_mcdi_wol_filter_get_magic(efx, &nic_data->wol_filter_id); |
593 | 593 | ||
594 | if (rc != 0) { | 594 | if (rc != 0) { |
595 | /* If it failed, attempt to get into a synchronised | 595 | /* If it failed, attempt to get into a synchronised |
596 | * state with MC by resetting any set WoL filters */ | 596 | * state with MC by resetting any set WoL filters */ |
597 | efx_mcdi_wol_filter_reset(efx); | 597 | efx_mcdi_wol_filter_reset(efx); |
598 | nic_data->wol_filter_id = -1; | 598 | nic_data->wol_filter_id = -1; |
599 | } else if (nic_data->wol_filter_id != -1) { | 599 | } else if (nic_data->wol_filter_id != -1) { |
600 | pci_wake_from_d3(efx->pci_dev, true); | 600 | pci_wake_from_d3(efx->pci_dev, true); |
601 | } | 601 | } |
602 | } | 602 | } |
603 | 603 | ||
604 | 604 | ||
605 | /************************************************************************** | 605 | /************************************************************************** |
606 | * | 606 | * |
607 | * Revision-dependent attributes used by efx.c and nic.c | 607 | * Revision-dependent attributes used by efx.c and nic.c |
608 | * | 608 | * |
609 | ************************************************************************** | 609 | ************************************************************************** |
610 | */ | 610 | */ |
611 | 611 | ||
612 | struct efx_nic_type siena_a0_nic_type = { | 612 | struct efx_nic_type siena_a0_nic_type = { |
613 | .probe = siena_probe_nic, | 613 | .probe = siena_probe_nic, |
614 | .remove = siena_remove_nic, | 614 | .remove = siena_remove_nic, |
615 | .init = siena_init_nic, | 615 | .init = siena_init_nic, |
616 | .fini = efx_port_dummy_op_void, | 616 | .fini = efx_port_dummy_op_void, |
617 | .monitor = NULL, | 617 | .monitor = NULL, |
618 | .reset = siena_reset_hw, | 618 | .reset = siena_reset_hw, |
619 | .probe_port = siena_probe_port, | 619 | .probe_port = siena_probe_port, |
620 | .remove_port = siena_remove_port, | 620 | .remove_port = siena_remove_port, |
621 | .prepare_flush = efx_port_dummy_op_void, | 621 | .prepare_flush = efx_port_dummy_op_void, |
622 | .update_stats = siena_update_nic_stats, | 622 | .update_stats = siena_update_nic_stats, |
623 | .start_stats = siena_start_nic_stats, | 623 | .start_stats = siena_start_nic_stats, |
624 | .stop_stats = siena_stop_nic_stats, | 624 | .stop_stats = siena_stop_nic_stats, |
625 | .set_id_led = efx_mcdi_set_id_led, | 625 | .set_id_led = efx_mcdi_set_id_led, |
626 | .push_irq_moderation = siena_push_irq_moderation, | 626 | .push_irq_moderation = siena_push_irq_moderation, |
627 | .push_multicast_hash = siena_push_multicast_hash, | 627 | .push_multicast_hash = siena_push_multicast_hash, |
628 | .reconfigure_port = efx_mcdi_phy_reconfigure, | 628 | .reconfigure_port = efx_mcdi_phy_reconfigure, |
629 | .get_wol = siena_get_wol, | 629 | .get_wol = siena_get_wol, |
630 | .set_wol = siena_set_wol, | 630 | .set_wol = siena_set_wol, |
631 | .resume_wol = siena_init_wol, | 631 | .resume_wol = siena_init_wol, |
632 | .test_registers = siena_test_registers, | 632 | .test_registers = siena_test_registers, |
633 | .test_nvram = efx_mcdi_nvram_test_all, | 633 | .test_nvram = efx_mcdi_nvram_test_all, |
634 | .default_mac_ops = &efx_mcdi_mac_operations, | 634 | .default_mac_ops = &efx_mcdi_mac_operations, |
635 | 635 | ||
636 | .revision = EFX_REV_SIENA_A0, | 636 | .revision = EFX_REV_SIENA_A0, |
637 | .mem_map_size = (FR_CZ_MC_TREG_SMEM + | 637 | .mem_map_size = (FR_CZ_MC_TREG_SMEM + |
638 | FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS), | 638 | FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS), |
639 | .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, | 639 | .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, |
640 | .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, | 640 | .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, |
641 | .buf_tbl_base = FR_BZ_BUF_FULL_TBL, | 641 | .buf_tbl_base = FR_BZ_BUF_FULL_TBL, |
642 | .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL, | 642 | .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL, |
643 | .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR, | 643 | .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR, |
644 | .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), | 644 | .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), |
645 | .rx_buffer_hash_size = 0x10, | 645 | .rx_buffer_hash_size = 0x10, |
646 | .rx_buffer_padding = 0, | 646 | .rx_buffer_padding = 0, |
647 | .max_interrupt_mode = EFX_INT_MODE_MSIX, | 647 | .max_interrupt_mode = EFX_INT_MODE_MSIX, |
648 | .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy | 648 | .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy |
649 | * interrupt handler only supports 32 | 649 | * interrupt handler only supports 32 |
650 | * channels */ | 650 | * channels */ |
651 | .tx_dc_base = 0x88000, | 651 | .tx_dc_base = 0x88000, |
652 | .rx_dc_base = 0x68000, | 652 | .rx_dc_base = 0x68000, |
653 | .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | 653 | .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
654 | NETIF_F_RXHASH | NETIF_F_NTUPLE), | 654 | NETIF_F_RXHASH | NETIF_F_NTUPLE), |
655 | .reset_world_flags = ETH_RESET_MGMT << ETH_RESET_SHARED_SHIFT, | 655 | .reset_world_flags = ETH_RESET_MGMT << ETH_RESET_SHARED_SHIFT, |
656 | }; | 656 | }; |
657 | 657 |