Commit 86c432ca5d6da90a26ac8d3e680f2268b502d9c5
Committed by
David S. Miller
1 parent
883cb07583
Exists in
master
and in
6 other branches
Revert "sfc: Use write-combining to reduce TX latency" and follow-ups
This reverts commits 65f0b417dee94f779ce9b77102b7d73c93723b39, d88d6b05fee3cc78e5b0273eb58c31201dcc6b76, fcfa060468a4edcf776f0c1211d826d5de1668c1, 747df2258b1b9a2e25929ef496262c339c380009 and 867955f5682f7157fdafe8670804b9f8ea077bc7. Depending on the processor model, write-combining may result in reordering that the NIC will not tolerate. This typically results in a DMA error event and reset by the driver, logged as: sfc 0000:0e:00.0: eth2: TX DMA Q reports TX_EV_PKT_ERR. sfc 0000:0e:00.0: eth2: resetting (ALL) Signed-off-by: Ben Hutchings <bhutchings@solarflare.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Showing 7 changed files with 27 additions and 88 deletions Inline Diff
drivers/net/sfc/efx.c
1 | /**************************************************************************** | 1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | 2 | * Driver for Solarflare Solarstorm network controllers and boards |
3 | * Copyright 2005-2006 Fen Systems Ltd. | 3 | * Copyright 2005-2006 Fen Systems Ltd. |
4 | * Copyright 2005-2011 Solarflare Communications Inc. | 4 | * Copyright 2005-2011 Solarflare Communications Inc. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License version 2 as published | 7 | * under the terms of the GNU General Public License version 2 as published |
8 | * by the Free Software Foundation, incorporated herein by reference. | 8 | * by the Free Software Foundation, incorporated herein by reference. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/pci.h> | 12 | #include <linux/pci.h> |
13 | #include <linux/netdevice.h> | 13 | #include <linux/netdevice.h> |
14 | #include <linux/etherdevice.h> | 14 | #include <linux/etherdevice.h> |
15 | #include <linux/delay.h> | 15 | #include <linux/delay.h> |
16 | #include <linux/notifier.h> | 16 | #include <linux/notifier.h> |
17 | #include <linux/ip.h> | 17 | #include <linux/ip.h> |
18 | #include <linux/tcp.h> | 18 | #include <linux/tcp.h> |
19 | #include <linux/in.h> | 19 | #include <linux/in.h> |
20 | #include <linux/crc32.h> | 20 | #include <linux/crc32.h> |
21 | #include <linux/ethtool.h> | 21 | #include <linux/ethtool.h> |
22 | #include <linux/topology.h> | 22 | #include <linux/topology.h> |
23 | #include <linux/gfp.h> | 23 | #include <linux/gfp.h> |
24 | #include <linux/cpu_rmap.h> | 24 | #include <linux/cpu_rmap.h> |
25 | #include "net_driver.h" | 25 | #include "net_driver.h" |
26 | #include "efx.h" | 26 | #include "efx.h" |
27 | #include "nic.h" | 27 | #include "nic.h" |
28 | 28 | ||
29 | #include "mcdi.h" | 29 | #include "mcdi.h" |
30 | #include "workarounds.h" | 30 | #include "workarounds.h" |
31 | 31 | ||
32 | /************************************************************************** | 32 | /************************************************************************** |
33 | * | 33 | * |
34 | * Type name strings | 34 | * Type name strings |
35 | * | 35 | * |
36 | ************************************************************************** | 36 | ************************************************************************** |
37 | */ | 37 | */ |
38 | 38 | ||
39 | /* Loopback mode names (see LOOPBACK_MODE()) */ | 39 | /* Loopback mode names (see LOOPBACK_MODE()) */ |
40 | const unsigned int efx_loopback_mode_max = LOOPBACK_MAX; | 40 | const unsigned int efx_loopback_mode_max = LOOPBACK_MAX; |
41 | const char *efx_loopback_mode_names[] = { | 41 | const char *efx_loopback_mode_names[] = { |
42 | [LOOPBACK_NONE] = "NONE", | 42 | [LOOPBACK_NONE] = "NONE", |
43 | [LOOPBACK_DATA] = "DATAPATH", | 43 | [LOOPBACK_DATA] = "DATAPATH", |
44 | [LOOPBACK_GMAC] = "GMAC", | 44 | [LOOPBACK_GMAC] = "GMAC", |
45 | [LOOPBACK_XGMII] = "XGMII", | 45 | [LOOPBACK_XGMII] = "XGMII", |
46 | [LOOPBACK_XGXS] = "XGXS", | 46 | [LOOPBACK_XGXS] = "XGXS", |
47 | [LOOPBACK_XAUI] = "XAUI", | 47 | [LOOPBACK_XAUI] = "XAUI", |
48 | [LOOPBACK_GMII] = "GMII", | 48 | [LOOPBACK_GMII] = "GMII", |
49 | [LOOPBACK_SGMII] = "SGMII", | 49 | [LOOPBACK_SGMII] = "SGMII", |
50 | [LOOPBACK_XGBR] = "XGBR", | 50 | [LOOPBACK_XGBR] = "XGBR", |
51 | [LOOPBACK_XFI] = "XFI", | 51 | [LOOPBACK_XFI] = "XFI", |
52 | [LOOPBACK_XAUI_FAR] = "XAUI_FAR", | 52 | [LOOPBACK_XAUI_FAR] = "XAUI_FAR", |
53 | [LOOPBACK_GMII_FAR] = "GMII_FAR", | 53 | [LOOPBACK_GMII_FAR] = "GMII_FAR", |
54 | [LOOPBACK_SGMII_FAR] = "SGMII_FAR", | 54 | [LOOPBACK_SGMII_FAR] = "SGMII_FAR", |
55 | [LOOPBACK_XFI_FAR] = "XFI_FAR", | 55 | [LOOPBACK_XFI_FAR] = "XFI_FAR", |
56 | [LOOPBACK_GPHY] = "GPHY", | 56 | [LOOPBACK_GPHY] = "GPHY", |
57 | [LOOPBACK_PHYXS] = "PHYXS", | 57 | [LOOPBACK_PHYXS] = "PHYXS", |
58 | [LOOPBACK_PCS] = "PCS", | 58 | [LOOPBACK_PCS] = "PCS", |
59 | [LOOPBACK_PMAPMD] = "PMA/PMD", | 59 | [LOOPBACK_PMAPMD] = "PMA/PMD", |
60 | [LOOPBACK_XPORT] = "XPORT", | 60 | [LOOPBACK_XPORT] = "XPORT", |
61 | [LOOPBACK_XGMII_WS] = "XGMII_WS", | 61 | [LOOPBACK_XGMII_WS] = "XGMII_WS", |
62 | [LOOPBACK_XAUI_WS] = "XAUI_WS", | 62 | [LOOPBACK_XAUI_WS] = "XAUI_WS", |
63 | [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR", | 63 | [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR", |
64 | [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR", | 64 | [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR", |
65 | [LOOPBACK_GMII_WS] = "GMII_WS", | 65 | [LOOPBACK_GMII_WS] = "GMII_WS", |
66 | [LOOPBACK_XFI_WS] = "XFI_WS", | 66 | [LOOPBACK_XFI_WS] = "XFI_WS", |
67 | [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR", | 67 | [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR", |
68 | [LOOPBACK_PHYXS_WS] = "PHYXS_WS", | 68 | [LOOPBACK_PHYXS_WS] = "PHYXS_WS", |
69 | }; | 69 | }; |
70 | 70 | ||
71 | const unsigned int efx_reset_type_max = RESET_TYPE_MAX; | 71 | const unsigned int efx_reset_type_max = RESET_TYPE_MAX; |
72 | const char *efx_reset_type_names[] = { | 72 | const char *efx_reset_type_names[] = { |
73 | [RESET_TYPE_INVISIBLE] = "INVISIBLE", | 73 | [RESET_TYPE_INVISIBLE] = "INVISIBLE", |
74 | [RESET_TYPE_ALL] = "ALL", | 74 | [RESET_TYPE_ALL] = "ALL", |
75 | [RESET_TYPE_WORLD] = "WORLD", | 75 | [RESET_TYPE_WORLD] = "WORLD", |
76 | [RESET_TYPE_DISABLE] = "DISABLE", | 76 | [RESET_TYPE_DISABLE] = "DISABLE", |
77 | [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG", | 77 | [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG", |
78 | [RESET_TYPE_INT_ERROR] = "INT_ERROR", | 78 | [RESET_TYPE_INT_ERROR] = "INT_ERROR", |
79 | [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY", | 79 | [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY", |
80 | [RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH", | 80 | [RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH", |
81 | [RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH", | 81 | [RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH", |
82 | [RESET_TYPE_TX_SKIP] = "TX_SKIP", | 82 | [RESET_TYPE_TX_SKIP] = "TX_SKIP", |
83 | [RESET_TYPE_MC_FAILURE] = "MC_FAILURE", | 83 | [RESET_TYPE_MC_FAILURE] = "MC_FAILURE", |
84 | }; | 84 | }; |
85 | 85 | ||
86 | #define EFX_MAX_MTU (9 * 1024) | 86 | #define EFX_MAX_MTU (9 * 1024) |
87 | 87 | ||
88 | /* Reset workqueue. If any NIC has a hardware failure then a reset will be | 88 | /* Reset workqueue. If any NIC has a hardware failure then a reset will be |
89 | * queued onto this work queue. This is not a per-nic work queue, because | 89 | * queued onto this work queue. This is not a per-nic work queue, because |
90 | * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised. | 90 | * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised. |
91 | */ | 91 | */ |
92 | static struct workqueue_struct *reset_workqueue; | 92 | static struct workqueue_struct *reset_workqueue; |
93 | 93 | ||
94 | /************************************************************************** | 94 | /************************************************************************** |
95 | * | 95 | * |
96 | * Configurable values | 96 | * Configurable values |
97 | * | 97 | * |
98 | *************************************************************************/ | 98 | *************************************************************************/ |
99 | 99 | ||
100 | /* | 100 | /* |
101 | * Use separate channels for TX and RX events | 101 | * Use separate channels for TX and RX events |
102 | * | 102 | * |
103 | * Set this to 1 to use separate channels for TX and RX. It allows us | 103 | * Set this to 1 to use separate channels for TX and RX. It allows us |
104 | * to control interrupt affinity separately for TX and RX. | 104 | * to control interrupt affinity separately for TX and RX. |
105 | * | 105 | * |
106 | * This is only used in MSI-X interrupt mode | 106 | * This is only used in MSI-X interrupt mode |
107 | */ | 107 | */ |
108 | static unsigned int separate_tx_channels; | 108 | static unsigned int separate_tx_channels; |
109 | module_param(separate_tx_channels, uint, 0444); | 109 | module_param(separate_tx_channels, uint, 0444); |
110 | MODULE_PARM_DESC(separate_tx_channels, | 110 | MODULE_PARM_DESC(separate_tx_channels, |
111 | "Use separate channels for TX and RX"); | 111 | "Use separate channels for TX and RX"); |
112 | 112 | ||
113 | /* This is the weight assigned to each of the (per-channel) virtual | 113 | /* This is the weight assigned to each of the (per-channel) virtual |
114 | * NAPI devices. | 114 | * NAPI devices. |
115 | */ | 115 | */ |
116 | static int napi_weight = 64; | 116 | static int napi_weight = 64; |
117 | 117 | ||
118 | /* This is the time (in jiffies) between invocations of the hardware | 118 | /* This is the time (in jiffies) between invocations of the hardware |
119 | * monitor. On Falcon-based NICs, this will: | 119 | * monitor. On Falcon-based NICs, this will: |
120 | * - Check the on-board hardware monitor; | 120 | * - Check the on-board hardware monitor; |
121 | * - Poll the link state and reconfigure the hardware as necessary. | 121 | * - Poll the link state and reconfigure the hardware as necessary. |
122 | */ | 122 | */ |
123 | static unsigned int efx_monitor_interval = 1 * HZ; | 123 | static unsigned int efx_monitor_interval = 1 * HZ; |
124 | 124 | ||
125 | /* This controls whether or not the driver will initialise devices | 125 | /* This controls whether or not the driver will initialise devices |
126 | * with invalid MAC addresses stored in the EEPROM or flash. If true, | 126 | * with invalid MAC addresses stored in the EEPROM or flash. If true, |
127 | * such devices will be initialised with a random locally-generated | 127 | * such devices will be initialised with a random locally-generated |
128 | * MAC address. This allows for loading the sfc_mtd driver to | 128 | * MAC address. This allows for loading the sfc_mtd driver to |
129 | * reprogram the flash, even if the flash contents (including the MAC | 129 | * reprogram the flash, even if the flash contents (including the MAC |
130 | * address) have previously been erased. | 130 | * address) have previously been erased. |
131 | */ | 131 | */ |
132 | static unsigned int allow_bad_hwaddr; | 132 | static unsigned int allow_bad_hwaddr; |
133 | 133 | ||
134 | /* Initial interrupt moderation settings. They can be modified after | 134 | /* Initial interrupt moderation settings. They can be modified after |
135 | * module load with ethtool. | 135 | * module load with ethtool. |
136 | * | 136 | * |
137 | * The default for RX should strike a balance between increasing the | 137 | * The default for RX should strike a balance between increasing the |
138 | * round-trip latency and reducing overhead. | 138 | * round-trip latency and reducing overhead. |
139 | */ | 139 | */ |
140 | static unsigned int rx_irq_mod_usec = 60; | 140 | static unsigned int rx_irq_mod_usec = 60; |
141 | 141 | ||
142 | /* Initial interrupt moderation settings. They can be modified after | 142 | /* Initial interrupt moderation settings. They can be modified after |
143 | * module load with ethtool. | 143 | * module load with ethtool. |
144 | * | 144 | * |
145 | * This default is chosen to ensure that a 10G link does not go idle | 145 | * This default is chosen to ensure that a 10G link does not go idle |
146 | * while a TX queue is stopped after it has become full. A queue is | 146 | * while a TX queue is stopped after it has become full. A queue is |
147 | * restarted when it drops below half full. The time this takes (assuming | 147 | * restarted when it drops below half full. The time this takes (assuming |
148 | * worst case 3 descriptors per packet and 1024 descriptors) is | 148 | * worst case 3 descriptors per packet and 1024 descriptors) is |
149 | * 512 / 3 * 1.2 = 205 usec. | 149 | * 512 / 3 * 1.2 = 205 usec. |
150 | */ | 150 | */ |
151 | static unsigned int tx_irq_mod_usec = 150; | 151 | static unsigned int tx_irq_mod_usec = 150; |
152 | 152 | ||
153 | /* This is the first interrupt mode to try out of: | 153 | /* This is the first interrupt mode to try out of: |
154 | * 0 => MSI-X | 154 | * 0 => MSI-X |
155 | * 1 => MSI | 155 | * 1 => MSI |
156 | * 2 => legacy | 156 | * 2 => legacy |
157 | */ | 157 | */ |
158 | static unsigned int interrupt_mode; | 158 | static unsigned int interrupt_mode; |
159 | 159 | ||
160 | /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS), | 160 | /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS), |
161 | * i.e. the number of CPUs among which we may distribute simultaneous | 161 | * i.e. the number of CPUs among which we may distribute simultaneous |
162 | * interrupt handling. | 162 | * interrupt handling. |
163 | * | 163 | * |
164 | * Cards without MSI-X will only target one CPU via legacy or MSI interrupt. | 164 | * Cards without MSI-X will only target one CPU via legacy or MSI interrupt. |
165 | * The default (0) means to assign an interrupt to each package (level II cache) | 165 | * The default (0) means to assign an interrupt to each package (level II cache) |
166 | */ | 166 | */ |
167 | static unsigned int rss_cpus; | 167 | static unsigned int rss_cpus; |
168 | module_param(rss_cpus, uint, 0444); | 168 | module_param(rss_cpus, uint, 0444); |
169 | MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling"); | 169 | MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling"); |
170 | 170 | ||
171 | static int phy_flash_cfg; | 171 | static int phy_flash_cfg; |
172 | module_param(phy_flash_cfg, int, 0644); | 172 | module_param(phy_flash_cfg, int, 0644); |
173 | MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially"); | 173 | MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially"); |
174 | 174 | ||
175 | static unsigned irq_adapt_low_thresh = 10000; | 175 | static unsigned irq_adapt_low_thresh = 10000; |
176 | module_param(irq_adapt_low_thresh, uint, 0644); | 176 | module_param(irq_adapt_low_thresh, uint, 0644); |
177 | MODULE_PARM_DESC(irq_adapt_low_thresh, | 177 | MODULE_PARM_DESC(irq_adapt_low_thresh, |
178 | "Threshold score for reducing IRQ moderation"); | 178 | "Threshold score for reducing IRQ moderation"); |
179 | 179 | ||
180 | static unsigned irq_adapt_high_thresh = 20000; | 180 | static unsigned irq_adapt_high_thresh = 20000; |
181 | module_param(irq_adapt_high_thresh, uint, 0644); | 181 | module_param(irq_adapt_high_thresh, uint, 0644); |
182 | MODULE_PARM_DESC(irq_adapt_high_thresh, | 182 | MODULE_PARM_DESC(irq_adapt_high_thresh, |
183 | "Threshold score for increasing IRQ moderation"); | 183 | "Threshold score for increasing IRQ moderation"); |
184 | 184 | ||
185 | static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE | | 185 | static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE | |
186 | NETIF_MSG_LINK | NETIF_MSG_IFDOWN | | 186 | NETIF_MSG_LINK | NETIF_MSG_IFDOWN | |
187 | NETIF_MSG_IFUP | NETIF_MSG_RX_ERR | | 187 | NETIF_MSG_IFUP | NETIF_MSG_RX_ERR | |
188 | NETIF_MSG_TX_ERR | NETIF_MSG_HW); | 188 | NETIF_MSG_TX_ERR | NETIF_MSG_HW); |
189 | module_param(debug, uint, 0); | 189 | module_param(debug, uint, 0); |
190 | MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value"); | 190 | MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value"); |
191 | 191 | ||
192 | /************************************************************************** | 192 | /************************************************************************** |
193 | * | 193 | * |
194 | * Utility functions and prototypes | 194 | * Utility functions and prototypes |
195 | * | 195 | * |
196 | *************************************************************************/ | 196 | *************************************************************************/ |
197 | 197 | ||
198 | static void efx_remove_channels(struct efx_nic *efx); | 198 | static void efx_remove_channels(struct efx_nic *efx); |
199 | static void efx_remove_port(struct efx_nic *efx); | 199 | static void efx_remove_port(struct efx_nic *efx); |
200 | static void efx_init_napi(struct efx_nic *efx); | 200 | static void efx_init_napi(struct efx_nic *efx); |
201 | static void efx_fini_napi(struct efx_nic *efx); | 201 | static void efx_fini_napi(struct efx_nic *efx); |
202 | static void efx_fini_napi_channel(struct efx_channel *channel); | 202 | static void efx_fini_napi_channel(struct efx_channel *channel); |
203 | static void efx_fini_struct(struct efx_nic *efx); | 203 | static void efx_fini_struct(struct efx_nic *efx); |
204 | static void efx_start_all(struct efx_nic *efx); | 204 | static void efx_start_all(struct efx_nic *efx); |
205 | static void efx_stop_all(struct efx_nic *efx); | 205 | static void efx_stop_all(struct efx_nic *efx); |
206 | 206 | ||
207 | #define EFX_ASSERT_RESET_SERIALISED(efx) \ | 207 | #define EFX_ASSERT_RESET_SERIALISED(efx) \ |
208 | do { \ | 208 | do { \ |
209 | if ((efx->state == STATE_RUNNING) || \ | 209 | if ((efx->state == STATE_RUNNING) || \ |
210 | (efx->state == STATE_DISABLED)) \ | 210 | (efx->state == STATE_DISABLED)) \ |
211 | ASSERT_RTNL(); \ | 211 | ASSERT_RTNL(); \ |
212 | } while (0) | 212 | } while (0) |
213 | 213 | ||
214 | /************************************************************************** | 214 | /************************************************************************** |
215 | * | 215 | * |
216 | * Event queue processing | 216 | * Event queue processing |
217 | * | 217 | * |
218 | *************************************************************************/ | 218 | *************************************************************************/ |
219 | 219 | ||
220 | /* Process channel's event queue | 220 | /* Process channel's event queue |
221 | * | 221 | * |
222 | * This function is responsible for processing the event queue of a | 222 | * This function is responsible for processing the event queue of a |
223 | * single channel. The caller must guarantee that this function will | 223 | * single channel. The caller must guarantee that this function will |
224 | * never be concurrently called more than once on the same channel, | 224 | * never be concurrently called more than once on the same channel, |
225 | * though different channels may be being processed concurrently. | 225 | * though different channels may be being processed concurrently. |
226 | */ | 226 | */ |
227 | static int efx_process_channel(struct efx_channel *channel, int budget) | 227 | static int efx_process_channel(struct efx_channel *channel, int budget) |
228 | { | 228 | { |
229 | struct efx_nic *efx = channel->efx; | 229 | struct efx_nic *efx = channel->efx; |
230 | int spent; | 230 | int spent; |
231 | 231 | ||
232 | if (unlikely(efx->reset_pending || !channel->enabled)) | 232 | if (unlikely(efx->reset_pending || !channel->enabled)) |
233 | return 0; | 233 | return 0; |
234 | 234 | ||
235 | spent = efx_nic_process_eventq(channel, budget); | 235 | spent = efx_nic_process_eventq(channel, budget); |
236 | if (spent == 0) | 236 | if (spent == 0) |
237 | return 0; | 237 | return 0; |
238 | 238 | ||
239 | /* Deliver last RX packet. */ | 239 | /* Deliver last RX packet. */ |
240 | if (channel->rx_pkt) { | 240 | if (channel->rx_pkt) { |
241 | __efx_rx_packet(channel, channel->rx_pkt, | 241 | __efx_rx_packet(channel, channel->rx_pkt, |
242 | channel->rx_pkt_csummed); | 242 | channel->rx_pkt_csummed); |
243 | channel->rx_pkt = NULL; | 243 | channel->rx_pkt = NULL; |
244 | } | 244 | } |
245 | 245 | ||
246 | efx_rx_strategy(channel); | 246 | efx_rx_strategy(channel); |
247 | 247 | ||
248 | efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel)); | 248 | efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel)); |
249 | 249 | ||
250 | return spent; | 250 | return spent; |
251 | } | 251 | } |
252 | 252 | ||
253 | /* Mark channel as finished processing | 253 | /* Mark channel as finished processing |
254 | * | 254 | * |
255 | * Note that since we will not receive further interrupts for this | 255 | * Note that since we will not receive further interrupts for this |
256 | * channel before we finish processing and call the eventq_read_ack() | 256 | * channel before we finish processing and call the eventq_read_ack() |
257 | * method, there is no need to use the interrupt hold-off timers. | 257 | * method, there is no need to use the interrupt hold-off timers. |
258 | */ | 258 | */ |
259 | static inline void efx_channel_processed(struct efx_channel *channel) | 259 | static inline void efx_channel_processed(struct efx_channel *channel) |
260 | { | 260 | { |
261 | /* The interrupt handler for this channel may set work_pending | 261 | /* The interrupt handler for this channel may set work_pending |
262 | * as soon as we acknowledge the events we've seen. Make sure | 262 | * as soon as we acknowledge the events we've seen. Make sure |
263 | * it's cleared before then. */ | 263 | * it's cleared before then. */ |
264 | channel->work_pending = false; | 264 | channel->work_pending = false; |
265 | smp_wmb(); | 265 | smp_wmb(); |
266 | 266 | ||
267 | efx_nic_eventq_read_ack(channel); | 267 | efx_nic_eventq_read_ack(channel); |
268 | } | 268 | } |
269 | 269 | ||
270 | /* NAPI poll handler | 270 | /* NAPI poll handler |
271 | * | 271 | * |
272 | * NAPI guarantees serialisation of polls of the same device, which | 272 | * NAPI guarantees serialisation of polls of the same device, which |
273 | * provides the guarantee required by efx_process_channel(). | 273 | * provides the guarantee required by efx_process_channel(). |
274 | */ | 274 | */ |
275 | static int efx_poll(struct napi_struct *napi, int budget) | 275 | static int efx_poll(struct napi_struct *napi, int budget) |
276 | { | 276 | { |
277 | struct efx_channel *channel = | 277 | struct efx_channel *channel = |
278 | container_of(napi, struct efx_channel, napi_str); | 278 | container_of(napi, struct efx_channel, napi_str); |
279 | struct efx_nic *efx = channel->efx; | 279 | struct efx_nic *efx = channel->efx; |
280 | int spent; | 280 | int spent; |
281 | 281 | ||
282 | netif_vdbg(efx, intr, efx->net_dev, | 282 | netif_vdbg(efx, intr, efx->net_dev, |
283 | "channel %d NAPI poll executing on CPU %d\n", | 283 | "channel %d NAPI poll executing on CPU %d\n", |
284 | channel->channel, raw_smp_processor_id()); | 284 | channel->channel, raw_smp_processor_id()); |
285 | 285 | ||
286 | spent = efx_process_channel(channel, budget); | 286 | spent = efx_process_channel(channel, budget); |
287 | 287 | ||
288 | if (spent < budget) { | 288 | if (spent < budget) { |
289 | if (channel->channel < efx->n_rx_channels && | 289 | if (channel->channel < efx->n_rx_channels && |
290 | efx->irq_rx_adaptive && | 290 | efx->irq_rx_adaptive && |
291 | unlikely(++channel->irq_count == 1000)) { | 291 | unlikely(++channel->irq_count == 1000)) { |
292 | if (unlikely(channel->irq_mod_score < | 292 | if (unlikely(channel->irq_mod_score < |
293 | irq_adapt_low_thresh)) { | 293 | irq_adapt_low_thresh)) { |
294 | if (channel->irq_moderation > 1) { | 294 | if (channel->irq_moderation > 1) { |
295 | channel->irq_moderation -= 1; | 295 | channel->irq_moderation -= 1; |
296 | efx->type->push_irq_moderation(channel); | 296 | efx->type->push_irq_moderation(channel); |
297 | } | 297 | } |
298 | } else if (unlikely(channel->irq_mod_score > | 298 | } else if (unlikely(channel->irq_mod_score > |
299 | irq_adapt_high_thresh)) { | 299 | irq_adapt_high_thresh)) { |
300 | if (channel->irq_moderation < | 300 | if (channel->irq_moderation < |
301 | efx->irq_rx_moderation) { | 301 | efx->irq_rx_moderation) { |
302 | channel->irq_moderation += 1; | 302 | channel->irq_moderation += 1; |
303 | efx->type->push_irq_moderation(channel); | 303 | efx->type->push_irq_moderation(channel); |
304 | } | 304 | } |
305 | } | 305 | } |
306 | channel->irq_count = 0; | 306 | channel->irq_count = 0; |
307 | channel->irq_mod_score = 0; | 307 | channel->irq_mod_score = 0; |
308 | } | 308 | } |
309 | 309 | ||
310 | efx_filter_rfs_expire(channel); | 310 | efx_filter_rfs_expire(channel); |
311 | 311 | ||
312 | /* There is no race here; although napi_disable() will | 312 | /* There is no race here; although napi_disable() will |
313 | * only wait for napi_complete(), this isn't a problem | 313 | * only wait for napi_complete(), this isn't a problem |
314 | * since efx_channel_processed() will have no effect if | 314 | * since efx_channel_processed() will have no effect if |
315 | * interrupts have already been disabled. | 315 | * interrupts have already been disabled. |
316 | */ | 316 | */ |
317 | napi_complete(napi); | 317 | napi_complete(napi); |
318 | efx_channel_processed(channel); | 318 | efx_channel_processed(channel); |
319 | } | 319 | } |
320 | 320 | ||
321 | return spent; | 321 | return spent; |
322 | } | 322 | } |
323 | 323 | ||
324 | /* Process the eventq of the specified channel immediately on this CPU | 324 | /* Process the eventq of the specified channel immediately on this CPU |
325 | * | 325 | * |
326 | * Disable hardware generated interrupts, wait for any existing | 326 | * Disable hardware generated interrupts, wait for any existing |
327 | * processing to finish, then directly poll (and ack ) the eventq. | 327 | * processing to finish, then directly poll (and ack ) the eventq. |
328 | * Finally reenable NAPI and interrupts. | 328 | * Finally reenable NAPI and interrupts. |
329 | * | 329 | * |
330 | * This is for use only during a loopback self-test. It must not | 330 | * This is for use only during a loopback self-test. It must not |
331 | * deliver any packets up the stack as this can result in deadlock. | 331 | * deliver any packets up the stack as this can result in deadlock. |
332 | */ | 332 | */ |
333 | void efx_process_channel_now(struct efx_channel *channel) | 333 | void efx_process_channel_now(struct efx_channel *channel) |
334 | { | 334 | { |
335 | struct efx_nic *efx = channel->efx; | 335 | struct efx_nic *efx = channel->efx; |
336 | 336 | ||
337 | BUG_ON(channel->channel >= efx->n_channels); | 337 | BUG_ON(channel->channel >= efx->n_channels); |
338 | BUG_ON(!channel->enabled); | 338 | BUG_ON(!channel->enabled); |
339 | BUG_ON(!efx->loopback_selftest); | 339 | BUG_ON(!efx->loopback_selftest); |
340 | 340 | ||
341 | /* Disable interrupts and wait for ISRs to complete */ | 341 | /* Disable interrupts and wait for ISRs to complete */ |
342 | efx_nic_disable_interrupts(efx); | 342 | efx_nic_disable_interrupts(efx); |
343 | if (efx->legacy_irq) { | 343 | if (efx->legacy_irq) { |
344 | synchronize_irq(efx->legacy_irq); | 344 | synchronize_irq(efx->legacy_irq); |
345 | efx->legacy_irq_enabled = false; | 345 | efx->legacy_irq_enabled = false; |
346 | } | 346 | } |
347 | if (channel->irq) | 347 | if (channel->irq) |
348 | synchronize_irq(channel->irq); | 348 | synchronize_irq(channel->irq); |
349 | 349 | ||
350 | /* Wait for any NAPI processing to complete */ | 350 | /* Wait for any NAPI processing to complete */ |
351 | napi_disable(&channel->napi_str); | 351 | napi_disable(&channel->napi_str); |
352 | 352 | ||
353 | /* Poll the channel */ | 353 | /* Poll the channel */ |
354 | efx_process_channel(channel, channel->eventq_mask + 1); | 354 | efx_process_channel(channel, channel->eventq_mask + 1); |
355 | 355 | ||
356 | /* Ack the eventq. This may cause an interrupt to be generated | 356 | /* Ack the eventq. This may cause an interrupt to be generated |
357 | * when they are reenabled */ | 357 | * when they are reenabled */ |
358 | efx_channel_processed(channel); | 358 | efx_channel_processed(channel); |
359 | 359 | ||
360 | napi_enable(&channel->napi_str); | 360 | napi_enable(&channel->napi_str); |
361 | if (efx->legacy_irq) | 361 | if (efx->legacy_irq) |
362 | efx->legacy_irq_enabled = true; | 362 | efx->legacy_irq_enabled = true; |
363 | efx_nic_enable_interrupts(efx); | 363 | efx_nic_enable_interrupts(efx); |
364 | } | 364 | } |
365 | 365 | ||
366 | /* Create event queue | 366 | /* Create event queue |
367 | * Event queue memory allocations are done only once. If the channel | 367 | * Event queue memory allocations are done only once. If the channel |
368 | * is reset, the memory buffer will be reused; this guards against | 368 | * is reset, the memory buffer will be reused; this guards against |
369 | * errors during channel reset and also simplifies interrupt handling. | 369 | * errors during channel reset and also simplifies interrupt handling. |
370 | */ | 370 | */ |
371 | static int efx_probe_eventq(struct efx_channel *channel) | 371 | static int efx_probe_eventq(struct efx_channel *channel) |
372 | { | 372 | { |
373 | struct efx_nic *efx = channel->efx; | 373 | struct efx_nic *efx = channel->efx; |
374 | unsigned long entries; | 374 | unsigned long entries; |
375 | 375 | ||
376 | netif_dbg(channel->efx, probe, channel->efx->net_dev, | 376 | netif_dbg(channel->efx, probe, channel->efx->net_dev, |
377 | "chan %d create event queue\n", channel->channel); | 377 | "chan %d create event queue\n", channel->channel); |
378 | 378 | ||
379 | /* Build an event queue with room for one event per tx and rx buffer, | 379 | /* Build an event queue with room for one event per tx and rx buffer, |
380 | * plus some extra for link state events and MCDI completions. */ | 380 | * plus some extra for link state events and MCDI completions. */ |
381 | entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128); | 381 | entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128); |
382 | EFX_BUG_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE); | 382 | EFX_BUG_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE); |
383 | channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1; | 383 | channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1; |
384 | 384 | ||
385 | return efx_nic_probe_eventq(channel); | 385 | return efx_nic_probe_eventq(channel); |
386 | } | 386 | } |
387 | 387 | ||
388 | /* Prepare channel's event queue */ | 388 | /* Prepare channel's event queue */ |
389 | static void efx_init_eventq(struct efx_channel *channel) | 389 | static void efx_init_eventq(struct efx_channel *channel) |
390 | { | 390 | { |
391 | netif_dbg(channel->efx, drv, channel->efx->net_dev, | 391 | netif_dbg(channel->efx, drv, channel->efx->net_dev, |
392 | "chan %d init event queue\n", channel->channel); | 392 | "chan %d init event queue\n", channel->channel); |
393 | 393 | ||
394 | channel->eventq_read_ptr = 0; | 394 | channel->eventq_read_ptr = 0; |
395 | 395 | ||
396 | efx_nic_init_eventq(channel); | 396 | efx_nic_init_eventq(channel); |
397 | } | 397 | } |
398 | 398 | ||
399 | static void efx_fini_eventq(struct efx_channel *channel) | 399 | static void efx_fini_eventq(struct efx_channel *channel) |
400 | { | 400 | { |
401 | netif_dbg(channel->efx, drv, channel->efx->net_dev, | 401 | netif_dbg(channel->efx, drv, channel->efx->net_dev, |
402 | "chan %d fini event queue\n", channel->channel); | 402 | "chan %d fini event queue\n", channel->channel); |
403 | 403 | ||
404 | efx_nic_fini_eventq(channel); | 404 | efx_nic_fini_eventq(channel); |
405 | } | 405 | } |
406 | 406 | ||
407 | static void efx_remove_eventq(struct efx_channel *channel) | 407 | static void efx_remove_eventq(struct efx_channel *channel) |
408 | { | 408 | { |
409 | netif_dbg(channel->efx, drv, channel->efx->net_dev, | 409 | netif_dbg(channel->efx, drv, channel->efx->net_dev, |
410 | "chan %d remove event queue\n", channel->channel); | 410 | "chan %d remove event queue\n", channel->channel); |
411 | 411 | ||
412 | efx_nic_remove_eventq(channel); | 412 | efx_nic_remove_eventq(channel); |
413 | } | 413 | } |
414 | 414 | ||
415 | /************************************************************************** | 415 | /************************************************************************** |
416 | * | 416 | * |
417 | * Channel handling | 417 | * Channel handling |
418 | * | 418 | * |
419 | *************************************************************************/ | 419 | *************************************************************************/ |
420 | 420 | ||
421 | /* Allocate and initialise a channel structure, optionally copying | 421 | /* Allocate and initialise a channel structure, optionally copying |
422 | * parameters (but not resources) from an old channel structure. */ | 422 | * parameters (but not resources) from an old channel structure. */ |
423 | static struct efx_channel * | 423 | static struct efx_channel * |
424 | efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel) | 424 | efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel) |
425 | { | 425 | { |
426 | struct efx_channel *channel; | 426 | struct efx_channel *channel; |
427 | struct efx_rx_queue *rx_queue; | 427 | struct efx_rx_queue *rx_queue; |
428 | struct efx_tx_queue *tx_queue; | 428 | struct efx_tx_queue *tx_queue; |
429 | int j; | 429 | int j; |
430 | 430 | ||
431 | if (old_channel) { | 431 | if (old_channel) { |
432 | channel = kmalloc(sizeof(*channel), GFP_KERNEL); | 432 | channel = kmalloc(sizeof(*channel), GFP_KERNEL); |
433 | if (!channel) | 433 | if (!channel) |
434 | return NULL; | 434 | return NULL; |
435 | 435 | ||
436 | *channel = *old_channel; | 436 | *channel = *old_channel; |
437 | 437 | ||
438 | channel->napi_dev = NULL; | 438 | channel->napi_dev = NULL; |
439 | memset(&channel->eventq, 0, sizeof(channel->eventq)); | 439 | memset(&channel->eventq, 0, sizeof(channel->eventq)); |
440 | 440 | ||
441 | rx_queue = &channel->rx_queue; | 441 | rx_queue = &channel->rx_queue; |
442 | rx_queue->buffer = NULL; | 442 | rx_queue->buffer = NULL; |
443 | memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd)); | 443 | memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd)); |
444 | 444 | ||
445 | for (j = 0; j < EFX_TXQ_TYPES; j++) { | 445 | for (j = 0; j < EFX_TXQ_TYPES; j++) { |
446 | tx_queue = &channel->tx_queue[j]; | 446 | tx_queue = &channel->tx_queue[j]; |
447 | if (tx_queue->channel) | 447 | if (tx_queue->channel) |
448 | tx_queue->channel = channel; | 448 | tx_queue->channel = channel; |
449 | tx_queue->buffer = NULL; | 449 | tx_queue->buffer = NULL; |
450 | memset(&tx_queue->txd, 0, sizeof(tx_queue->txd)); | 450 | memset(&tx_queue->txd, 0, sizeof(tx_queue->txd)); |
451 | } | 451 | } |
452 | } else { | 452 | } else { |
453 | channel = kzalloc(sizeof(*channel), GFP_KERNEL); | 453 | channel = kzalloc(sizeof(*channel), GFP_KERNEL); |
454 | if (!channel) | 454 | if (!channel) |
455 | return NULL; | 455 | return NULL; |
456 | 456 | ||
457 | channel->efx = efx; | 457 | channel->efx = efx; |
458 | channel->channel = i; | 458 | channel->channel = i; |
459 | 459 | ||
460 | for (j = 0; j < EFX_TXQ_TYPES; j++) { | 460 | for (j = 0; j < EFX_TXQ_TYPES; j++) { |
461 | tx_queue = &channel->tx_queue[j]; | 461 | tx_queue = &channel->tx_queue[j]; |
462 | tx_queue->efx = efx; | 462 | tx_queue->efx = efx; |
463 | tx_queue->queue = i * EFX_TXQ_TYPES + j; | 463 | tx_queue->queue = i * EFX_TXQ_TYPES + j; |
464 | tx_queue->channel = channel; | 464 | tx_queue->channel = channel; |
465 | } | 465 | } |
466 | } | 466 | } |
467 | 467 | ||
468 | rx_queue = &channel->rx_queue; | 468 | rx_queue = &channel->rx_queue; |
469 | rx_queue->efx = efx; | 469 | rx_queue->efx = efx; |
470 | setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill, | 470 | setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill, |
471 | (unsigned long)rx_queue); | 471 | (unsigned long)rx_queue); |
472 | 472 | ||
473 | return channel; | 473 | return channel; |
474 | } | 474 | } |
475 | 475 | ||
476 | static int efx_probe_channel(struct efx_channel *channel) | 476 | static int efx_probe_channel(struct efx_channel *channel) |
477 | { | 477 | { |
478 | struct efx_tx_queue *tx_queue; | 478 | struct efx_tx_queue *tx_queue; |
479 | struct efx_rx_queue *rx_queue; | 479 | struct efx_rx_queue *rx_queue; |
480 | int rc; | 480 | int rc; |
481 | 481 | ||
482 | netif_dbg(channel->efx, probe, channel->efx->net_dev, | 482 | netif_dbg(channel->efx, probe, channel->efx->net_dev, |
483 | "creating channel %d\n", channel->channel); | 483 | "creating channel %d\n", channel->channel); |
484 | 484 | ||
485 | rc = efx_probe_eventq(channel); | 485 | rc = efx_probe_eventq(channel); |
486 | if (rc) | 486 | if (rc) |
487 | goto fail1; | 487 | goto fail1; |
488 | 488 | ||
489 | efx_for_each_channel_tx_queue(tx_queue, channel) { | 489 | efx_for_each_channel_tx_queue(tx_queue, channel) { |
490 | rc = efx_probe_tx_queue(tx_queue); | 490 | rc = efx_probe_tx_queue(tx_queue); |
491 | if (rc) | 491 | if (rc) |
492 | goto fail2; | 492 | goto fail2; |
493 | } | 493 | } |
494 | 494 | ||
495 | efx_for_each_channel_rx_queue(rx_queue, channel) { | 495 | efx_for_each_channel_rx_queue(rx_queue, channel) { |
496 | rc = efx_probe_rx_queue(rx_queue); | 496 | rc = efx_probe_rx_queue(rx_queue); |
497 | if (rc) | 497 | if (rc) |
498 | goto fail3; | 498 | goto fail3; |
499 | } | 499 | } |
500 | 500 | ||
501 | channel->n_rx_frm_trunc = 0; | 501 | channel->n_rx_frm_trunc = 0; |
502 | 502 | ||
503 | return 0; | 503 | return 0; |
504 | 504 | ||
505 | fail3: | 505 | fail3: |
506 | efx_for_each_channel_rx_queue(rx_queue, channel) | 506 | efx_for_each_channel_rx_queue(rx_queue, channel) |
507 | efx_remove_rx_queue(rx_queue); | 507 | efx_remove_rx_queue(rx_queue); |
508 | fail2: | 508 | fail2: |
509 | efx_for_each_channel_tx_queue(tx_queue, channel) | 509 | efx_for_each_channel_tx_queue(tx_queue, channel) |
510 | efx_remove_tx_queue(tx_queue); | 510 | efx_remove_tx_queue(tx_queue); |
511 | fail1: | 511 | fail1: |
512 | return rc; | 512 | return rc; |
513 | } | 513 | } |
514 | 514 | ||
515 | 515 | ||
516 | static void efx_set_channel_names(struct efx_nic *efx) | 516 | static void efx_set_channel_names(struct efx_nic *efx) |
517 | { | 517 | { |
518 | struct efx_channel *channel; | 518 | struct efx_channel *channel; |
519 | const char *type = ""; | 519 | const char *type = ""; |
520 | int number; | 520 | int number; |
521 | 521 | ||
522 | efx_for_each_channel(channel, efx) { | 522 | efx_for_each_channel(channel, efx) { |
523 | number = channel->channel; | 523 | number = channel->channel; |
524 | if (efx->n_channels > efx->n_rx_channels) { | 524 | if (efx->n_channels > efx->n_rx_channels) { |
525 | if (channel->channel < efx->n_rx_channels) { | 525 | if (channel->channel < efx->n_rx_channels) { |
526 | type = "-rx"; | 526 | type = "-rx"; |
527 | } else { | 527 | } else { |
528 | type = "-tx"; | 528 | type = "-tx"; |
529 | number -= efx->n_rx_channels; | 529 | number -= efx->n_rx_channels; |
530 | } | 530 | } |
531 | } | 531 | } |
532 | snprintf(efx->channel_name[channel->channel], | 532 | snprintf(efx->channel_name[channel->channel], |
533 | sizeof(efx->channel_name[0]), | 533 | sizeof(efx->channel_name[0]), |
534 | "%s%s-%d", efx->name, type, number); | 534 | "%s%s-%d", efx->name, type, number); |
535 | } | 535 | } |
536 | } | 536 | } |
537 | 537 | ||
538 | static int efx_probe_channels(struct efx_nic *efx) | 538 | static int efx_probe_channels(struct efx_nic *efx) |
539 | { | 539 | { |
540 | struct efx_channel *channel; | 540 | struct efx_channel *channel; |
541 | int rc; | 541 | int rc; |
542 | 542 | ||
543 | /* Restart special buffer allocation */ | 543 | /* Restart special buffer allocation */ |
544 | efx->next_buffer_table = 0; | 544 | efx->next_buffer_table = 0; |
545 | 545 | ||
546 | efx_for_each_channel(channel, efx) { | 546 | efx_for_each_channel(channel, efx) { |
547 | rc = efx_probe_channel(channel); | 547 | rc = efx_probe_channel(channel); |
548 | if (rc) { | 548 | if (rc) { |
549 | netif_err(efx, probe, efx->net_dev, | 549 | netif_err(efx, probe, efx->net_dev, |
550 | "failed to create channel %d\n", | 550 | "failed to create channel %d\n", |
551 | channel->channel); | 551 | channel->channel); |
552 | goto fail; | 552 | goto fail; |
553 | } | 553 | } |
554 | } | 554 | } |
555 | efx_set_channel_names(efx); | 555 | efx_set_channel_names(efx); |
556 | 556 | ||
557 | return 0; | 557 | return 0; |
558 | 558 | ||
559 | fail: | 559 | fail: |
560 | efx_remove_channels(efx); | 560 | efx_remove_channels(efx); |
561 | return rc; | 561 | return rc; |
562 | } | 562 | } |
563 | 563 | ||
564 | /* Channels are shutdown and reinitialised whilst the NIC is running | 564 | /* Channels are shutdown and reinitialised whilst the NIC is running |
565 | * to propagate configuration changes (mtu, checksum offload), or | 565 | * to propagate configuration changes (mtu, checksum offload), or |
566 | * to clear hardware error conditions | 566 | * to clear hardware error conditions |
567 | */ | 567 | */ |
568 | static void efx_init_channels(struct efx_nic *efx) | 568 | static void efx_init_channels(struct efx_nic *efx) |
569 | { | 569 | { |
570 | struct efx_tx_queue *tx_queue; | 570 | struct efx_tx_queue *tx_queue; |
571 | struct efx_rx_queue *rx_queue; | 571 | struct efx_rx_queue *rx_queue; |
572 | struct efx_channel *channel; | 572 | struct efx_channel *channel; |
573 | 573 | ||
574 | /* Calculate the rx buffer allocation parameters required to | 574 | /* Calculate the rx buffer allocation parameters required to |
575 | * support the current MTU, including padding for header | 575 | * support the current MTU, including padding for header |
576 | * alignment and overruns. | 576 | * alignment and overruns. |
577 | */ | 577 | */ |
578 | efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) + | 578 | efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) + |
579 | EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + | 579 | EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + |
580 | efx->type->rx_buffer_hash_size + | 580 | efx->type->rx_buffer_hash_size + |
581 | efx->type->rx_buffer_padding); | 581 | efx->type->rx_buffer_padding); |
582 | efx->rx_buffer_order = get_order(efx->rx_buffer_len + | 582 | efx->rx_buffer_order = get_order(efx->rx_buffer_len + |
583 | sizeof(struct efx_rx_page_state)); | 583 | sizeof(struct efx_rx_page_state)); |
584 | 584 | ||
585 | /* Initialise the channels */ | 585 | /* Initialise the channels */ |
586 | efx_for_each_channel(channel, efx) { | 586 | efx_for_each_channel(channel, efx) { |
587 | netif_dbg(channel->efx, drv, channel->efx->net_dev, | 587 | netif_dbg(channel->efx, drv, channel->efx->net_dev, |
588 | "init chan %d\n", channel->channel); | 588 | "init chan %d\n", channel->channel); |
589 | 589 | ||
590 | efx_init_eventq(channel); | 590 | efx_init_eventq(channel); |
591 | 591 | ||
592 | efx_for_each_channel_tx_queue(tx_queue, channel) | 592 | efx_for_each_channel_tx_queue(tx_queue, channel) |
593 | efx_init_tx_queue(tx_queue); | 593 | efx_init_tx_queue(tx_queue); |
594 | 594 | ||
595 | /* The rx buffer allocation strategy is MTU dependent */ | 595 | /* The rx buffer allocation strategy is MTU dependent */ |
596 | efx_rx_strategy(channel); | 596 | efx_rx_strategy(channel); |
597 | 597 | ||
598 | efx_for_each_channel_rx_queue(rx_queue, channel) | 598 | efx_for_each_channel_rx_queue(rx_queue, channel) |
599 | efx_init_rx_queue(rx_queue); | 599 | efx_init_rx_queue(rx_queue); |
600 | 600 | ||
601 | WARN_ON(channel->rx_pkt != NULL); | 601 | WARN_ON(channel->rx_pkt != NULL); |
602 | efx_rx_strategy(channel); | 602 | efx_rx_strategy(channel); |
603 | } | 603 | } |
604 | } | 604 | } |
605 | 605 | ||
606 | /* This enables event queue processing and packet transmission. | 606 | /* This enables event queue processing and packet transmission. |
607 | * | 607 | * |
608 | * Note that this function is not allowed to fail, since that would | 608 | * Note that this function is not allowed to fail, since that would |
609 | * introduce too much complexity into the suspend/resume path. | 609 | * introduce too much complexity into the suspend/resume path. |
610 | */ | 610 | */ |
611 | static void efx_start_channel(struct efx_channel *channel) | 611 | static void efx_start_channel(struct efx_channel *channel) |
612 | { | 612 | { |
613 | struct efx_rx_queue *rx_queue; | 613 | struct efx_rx_queue *rx_queue; |
614 | 614 | ||
615 | netif_dbg(channel->efx, ifup, channel->efx->net_dev, | 615 | netif_dbg(channel->efx, ifup, channel->efx->net_dev, |
616 | "starting chan %d\n", channel->channel); | 616 | "starting chan %d\n", channel->channel); |
617 | 617 | ||
618 | /* The interrupt handler for this channel may set work_pending | 618 | /* The interrupt handler for this channel may set work_pending |
619 | * as soon as we enable it. Make sure it's cleared before | 619 | * as soon as we enable it. Make sure it's cleared before |
620 | * then. Similarly, make sure it sees the enabled flag set. */ | 620 | * then. Similarly, make sure it sees the enabled flag set. */ |
621 | channel->work_pending = false; | 621 | channel->work_pending = false; |
622 | channel->enabled = true; | 622 | channel->enabled = true; |
623 | smp_wmb(); | 623 | smp_wmb(); |
624 | 624 | ||
625 | /* Fill the queues before enabling NAPI */ | 625 | /* Fill the queues before enabling NAPI */ |
626 | efx_for_each_channel_rx_queue(rx_queue, channel) | 626 | efx_for_each_channel_rx_queue(rx_queue, channel) |
627 | efx_fast_push_rx_descriptors(rx_queue); | 627 | efx_fast_push_rx_descriptors(rx_queue); |
628 | 628 | ||
629 | napi_enable(&channel->napi_str); | 629 | napi_enable(&channel->napi_str); |
630 | } | 630 | } |
631 | 631 | ||
632 | /* This disables event queue processing and packet transmission. | 632 | /* This disables event queue processing and packet transmission. |
633 | * This function does not guarantee that all queue processing | 633 | * This function does not guarantee that all queue processing |
634 | * (e.g. RX refill) is complete. | 634 | * (e.g. RX refill) is complete. |
635 | */ | 635 | */ |
636 | static void efx_stop_channel(struct efx_channel *channel) | 636 | static void efx_stop_channel(struct efx_channel *channel) |
637 | { | 637 | { |
638 | if (!channel->enabled) | 638 | if (!channel->enabled) |
639 | return; | 639 | return; |
640 | 640 | ||
641 | netif_dbg(channel->efx, ifdown, channel->efx->net_dev, | 641 | netif_dbg(channel->efx, ifdown, channel->efx->net_dev, |
642 | "stop chan %d\n", channel->channel); | 642 | "stop chan %d\n", channel->channel); |
643 | 643 | ||
644 | channel->enabled = false; | 644 | channel->enabled = false; |
645 | napi_disable(&channel->napi_str); | 645 | napi_disable(&channel->napi_str); |
646 | } | 646 | } |
647 | 647 | ||
648 | static void efx_fini_channels(struct efx_nic *efx) | 648 | static void efx_fini_channels(struct efx_nic *efx) |
649 | { | 649 | { |
650 | struct efx_channel *channel; | 650 | struct efx_channel *channel; |
651 | struct efx_tx_queue *tx_queue; | 651 | struct efx_tx_queue *tx_queue; |
652 | struct efx_rx_queue *rx_queue; | 652 | struct efx_rx_queue *rx_queue; |
653 | int rc; | 653 | int rc; |
654 | 654 | ||
655 | EFX_ASSERT_RESET_SERIALISED(efx); | 655 | EFX_ASSERT_RESET_SERIALISED(efx); |
656 | BUG_ON(efx->port_enabled); | 656 | BUG_ON(efx->port_enabled); |
657 | 657 | ||
658 | rc = efx_nic_flush_queues(efx); | 658 | rc = efx_nic_flush_queues(efx); |
659 | if (rc && EFX_WORKAROUND_7803(efx)) { | 659 | if (rc && EFX_WORKAROUND_7803(efx)) { |
660 | /* Schedule a reset to recover from the flush failure. The | 660 | /* Schedule a reset to recover from the flush failure. The |
661 | * descriptor caches reference memory we're about to free, | 661 | * descriptor caches reference memory we're about to free, |
662 | * but falcon_reconfigure_mac_wrapper() won't reconnect | 662 | * but falcon_reconfigure_mac_wrapper() won't reconnect |
663 | * the MACs because of the pending reset. */ | 663 | * the MACs because of the pending reset. */ |
664 | netif_err(efx, drv, efx->net_dev, | 664 | netif_err(efx, drv, efx->net_dev, |
665 | "Resetting to recover from flush failure\n"); | 665 | "Resetting to recover from flush failure\n"); |
666 | efx_schedule_reset(efx, RESET_TYPE_ALL); | 666 | efx_schedule_reset(efx, RESET_TYPE_ALL); |
667 | } else if (rc) { | 667 | } else if (rc) { |
668 | netif_err(efx, drv, efx->net_dev, "failed to flush queues\n"); | 668 | netif_err(efx, drv, efx->net_dev, "failed to flush queues\n"); |
669 | } else { | 669 | } else { |
670 | netif_dbg(efx, drv, efx->net_dev, | 670 | netif_dbg(efx, drv, efx->net_dev, |
671 | "successfully flushed all queues\n"); | 671 | "successfully flushed all queues\n"); |
672 | } | 672 | } |
673 | 673 | ||
674 | efx_for_each_channel(channel, efx) { | 674 | efx_for_each_channel(channel, efx) { |
675 | netif_dbg(channel->efx, drv, channel->efx->net_dev, | 675 | netif_dbg(channel->efx, drv, channel->efx->net_dev, |
676 | "shut down chan %d\n", channel->channel); | 676 | "shut down chan %d\n", channel->channel); |
677 | 677 | ||
678 | efx_for_each_channel_rx_queue(rx_queue, channel) | 678 | efx_for_each_channel_rx_queue(rx_queue, channel) |
679 | efx_fini_rx_queue(rx_queue); | 679 | efx_fini_rx_queue(rx_queue); |
680 | efx_for_each_possible_channel_tx_queue(tx_queue, channel) | 680 | efx_for_each_possible_channel_tx_queue(tx_queue, channel) |
681 | efx_fini_tx_queue(tx_queue); | 681 | efx_fini_tx_queue(tx_queue); |
682 | efx_fini_eventq(channel); | 682 | efx_fini_eventq(channel); |
683 | } | 683 | } |
684 | } | 684 | } |
685 | 685 | ||
686 | static void efx_remove_channel(struct efx_channel *channel) | 686 | static void efx_remove_channel(struct efx_channel *channel) |
687 | { | 687 | { |
688 | struct efx_tx_queue *tx_queue; | 688 | struct efx_tx_queue *tx_queue; |
689 | struct efx_rx_queue *rx_queue; | 689 | struct efx_rx_queue *rx_queue; |
690 | 690 | ||
691 | netif_dbg(channel->efx, drv, channel->efx->net_dev, | 691 | netif_dbg(channel->efx, drv, channel->efx->net_dev, |
692 | "destroy chan %d\n", channel->channel); | 692 | "destroy chan %d\n", channel->channel); |
693 | 693 | ||
694 | efx_for_each_channel_rx_queue(rx_queue, channel) | 694 | efx_for_each_channel_rx_queue(rx_queue, channel) |
695 | efx_remove_rx_queue(rx_queue); | 695 | efx_remove_rx_queue(rx_queue); |
696 | efx_for_each_possible_channel_tx_queue(tx_queue, channel) | 696 | efx_for_each_possible_channel_tx_queue(tx_queue, channel) |
697 | efx_remove_tx_queue(tx_queue); | 697 | efx_remove_tx_queue(tx_queue); |
698 | efx_remove_eventq(channel); | 698 | efx_remove_eventq(channel); |
699 | } | 699 | } |
700 | 700 | ||
701 | static void efx_remove_channels(struct efx_nic *efx) | 701 | static void efx_remove_channels(struct efx_nic *efx) |
702 | { | 702 | { |
703 | struct efx_channel *channel; | 703 | struct efx_channel *channel; |
704 | 704 | ||
705 | efx_for_each_channel(channel, efx) | 705 | efx_for_each_channel(channel, efx) |
706 | efx_remove_channel(channel); | 706 | efx_remove_channel(channel); |
707 | } | 707 | } |
708 | 708 | ||
709 | int | 709 | int |
710 | efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) | 710 | efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) |
711 | { | 711 | { |
712 | struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel; | 712 | struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel; |
713 | u32 old_rxq_entries, old_txq_entries; | 713 | u32 old_rxq_entries, old_txq_entries; |
714 | unsigned i; | 714 | unsigned i; |
715 | int rc; | 715 | int rc; |
716 | 716 | ||
717 | efx_stop_all(efx); | 717 | efx_stop_all(efx); |
718 | efx_fini_channels(efx); | 718 | efx_fini_channels(efx); |
719 | 719 | ||
720 | /* Clone channels */ | 720 | /* Clone channels */ |
721 | memset(other_channel, 0, sizeof(other_channel)); | 721 | memset(other_channel, 0, sizeof(other_channel)); |
722 | for (i = 0; i < efx->n_channels; i++) { | 722 | for (i = 0; i < efx->n_channels; i++) { |
723 | channel = efx_alloc_channel(efx, i, efx->channel[i]); | 723 | channel = efx_alloc_channel(efx, i, efx->channel[i]); |
724 | if (!channel) { | 724 | if (!channel) { |
725 | rc = -ENOMEM; | 725 | rc = -ENOMEM; |
726 | goto out; | 726 | goto out; |
727 | } | 727 | } |
728 | other_channel[i] = channel; | 728 | other_channel[i] = channel; |
729 | } | 729 | } |
730 | 730 | ||
731 | /* Swap entry counts and channel pointers */ | 731 | /* Swap entry counts and channel pointers */ |
732 | old_rxq_entries = efx->rxq_entries; | 732 | old_rxq_entries = efx->rxq_entries; |
733 | old_txq_entries = efx->txq_entries; | 733 | old_txq_entries = efx->txq_entries; |
734 | efx->rxq_entries = rxq_entries; | 734 | efx->rxq_entries = rxq_entries; |
735 | efx->txq_entries = txq_entries; | 735 | efx->txq_entries = txq_entries; |
736 | for (i = 0; i < efx->n_channels; i++) { | 736 | for (i = 0; i < efx->n_channels; i++) { |
737 | channel = efx->channel[i]; | 737 | channel = efx->channel[i]; |
738 | efx->channel[i] = other_channel[i]; | 738 | efx->channel[i] = other_channel[i]; |
739 | other_channel[i] = channel; | 739 | other_channel[i] = channel; |
740 | } | 740 | } |
741 | 741 | ||
742 | rc = efx_probe_channels(efx); | 742 | rc = efx_probe_channels(efx); |
743 | if (rc) | 743 | if (rc) |
744 | goto rollback; | 744 | goto rollback; |
745 | 745 | ||
746 | efx_init_napi(efx); | 746 | efx_init_napi(efx); |
747 | 747 | ||
748 | /* Destroy old channels */ | 748 | /* Destroy old channels */ |
749 | for (i = 0; i < efx->n_channels; i++) { | 749 | for (i = 0; i < efx->n_channels; i++) { |
750 | efx_fini_napi_channel(other_channel[i]); | 750 | efx_fini_napi_channel(other_channel[i]); |
751 | efx_remove_channel(other_channel[i]); | 751 | efx_remove_channel(other_channel[i]); |
752 | } | 752 | } |
753 | out: | 753 | out: |
754 | /* Free unused channel structures */ | 754 | /* Free unused channel structures */ |
755 | for (i = 0; i < efx->n_channels; i++) | 755 | for (i = 0; i < efx->n_channels; i++) |
756 | kfree(other_channel[i]); | 756 | kfree(other_channel[i]); |
757 | 757 | ||
758 | efx_init_channels(efx); | 758 | efx_init_channels(efx); |
759 | efx_start_all(efx); | 759 | efx_start_all(efx); |
760 | return rc; | 760 | return rc; |
761 | 761 | ||
762 | rollback: | 762 | rollback: |
763 | /* Swap back */ | 763 | /* Swap back */ |
764 | efx->rxq_entries = old_rxq_entries; | 764 | efx->rxq_entries = old_rxq_entries; |
765 | efx->txq_entries = old_txq_entries; | 765 | efx->txq_entries = old_txq_entries; |
766 | for (i = 0; i < efx->n_channels; i++) { | 766 | for (i = 0; i < efx->n_channels; i++) { |
767 | channel = efx->channel[i]; | 767 | channel = efx->channel[i]; |
768 | efx->channel[i] = other_channel[i]; | 768 | efx->channel[i] = other_channel[i]; |
769 | other_channel[i] = channel; | 769 | other_channel[i] = channel; |
770 | } | 770 | } |
771 | goto out; | 771 | goto out; |
772 | } | 772 | } |
773 | 773 | ||
774 | void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue) | 774 | void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue) |
775 | { | 775 | { |
776 | mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100)); | 776 | mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100)); |
777 | } | 777 | } |
778 | 778 | ||
779 | /************************************************************************** | 779 | /************************************************************************** |
780 | * | 780 | * |
781 | * Port handling | 781 | * Port handling |
782 | * | 782 | * |
783 | **************************************************************************/ | 783 | **************************************************************************/ |
784 | 784 | ||
785 | /* This ensures that the kernel is kept informed (via | 785 | /* This ensures that the kernel is kept informed (via |
786 | * netif_carrier_on/off) of the link status, and also maintains the | 786 | * netif_carrier_on/off) of the link status, and also maintains the |
787 | * link status's stop on the port's TX queue. | 787 | * link status's stop on the port's TX queue. |
788 | */ | 788 | */ |
789 | void efx_link_status_changed(struct efx_nic *efx) | 789 | void efx_link_status_changed(struct efx_nic *efx) |
790 | { | 790 | { |
791 | struct efx_link_state *link_state = &efx->link_state; | 791 | struct efx_link_state *link_state = &efx->link_state; |
792 | 792 | ||
793 | /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure | 793 | /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure |
794 | * that no events are triggered between unregister_netdev() and the | 794 | * that no events are triggered between unregister_netdev() and the |
795 | * driver unloading. A more general condition is that NETDEV_CHANGE | 795 | * driver unloading. A more general condition is that NETDEV_CHANGE |
796 | * can only be generated between NETDEV_UP and NETDEV_DOWN */ | 796 | * can only be generated between NETDEV_UP and NETDEV_DOWN */ |
797 | if (!netif_running(efx->net_dev)) | 797 | if (!netif_running(efx->net_dev)) |
798 | return; | 798 | return; |
799 | 799 | ||
800 | if (link_state->up != netif_carrier_ok(efx->net_dev)) { | 800 | if (link_state->up != netif_carrier_ok(efx->net_dev)) { |
801 | efx->n_link_state_changes++; | 801 | efx->n_link_state_changes++; |
802 | 802 | ||
803 | if (link_state->up) | 803 | if (link_state->up) |
804 | netif_carrier_on(efx->net_dev); | 804 | netif_carrier_on(efx->net_dev); |
805 | else | 805 | else |
806 | netif_carrier_off(efx->net_dev); | 806 | netif_carrier_off(efx->net_dev); |
807 | } | 807 | } |
808 | 808 | ||
809 | /* Status message for kernel log */ | 809 | /* Status message for kernel log */ |
810 | if (link_state->up) { | 810 | if (link_state->up) { |
811 | netif_info(efx, link, efx->net_dev, | 811 | netif_info(efx, link, efx->net_dev, |
812 | "link up at %uMbps %s-duplex (MTU %d)%s\n", | 812 | "link up at %uMbps %s-duplex (MTU %d)%s\n", |
813 | link_state->speed, link_state->fd ? "full" : "half", | 813 | link_state->speed, link_state->fd ? "full" : "half", |
814 | efx->net_dev->mtu, | 814 | efx->net_dev->mtu, |
815 | (efx->promiscuous ? " [PROMISC]" : "")); | 815 | (efx->promiscuous ? " [PROMISC]" : "")); |
816 | } else { | 816 | } else { |
817 | netif_info(efx, link, efx->net_dev, "link down\n"); | 817 | netif_info(efx, link, efx->net_dev, "link down\n"); |
818 | } | 818 | } |
819 | 819 | ||
820 | } | 820 | } |
821 | 821 | ||
822 | void efx_link_set_advertising(struct efx_nic *efx, u32 advertising) | 822 | void efx_link_set_advertising(struct efx_nic *efx, u32 advertising) |
823 | { | 823 | { |
824 | efx->link_advertising = advertising; | 824 | efx->link_advertising = advertising; |
825 | if (advertising) { | 825 | if (advertising) { |
826 | if (advertising & ADVERTISED_Pause) | 826 | if (advertising & ADVERTISED_Pause) |
827 | efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX); | 827 | efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX); |
828 | else | 828 | else |
829 | efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX); | 829 | efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX); |
830 | if (advertising & ADVERTISED_Asym_Pause) | 830 | if (advertising & ADVERTISED_Asym_Pause) |
831 | efx->wanted_fc ^= EFX_FC_TX; | 831 | efx->wanted_fc ^= EFX_FC_TX; |
832 | } | 832 | } |
833 | } | 833 | } |
834 | 834 | ||
835 | void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc) | 835 | void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc) |
836 | { | 836 | { |
837 | efx->wanted_fc = wanted_fc; | 837 | efx->wanted_fc = wanted_fc; |
838 | if (efx->link_advertising) { | 838 | if (efx->link_advertising) { |
839 | if (wanted_fc & EFX_FC_RX) | 839 | if (wanted_fc & EFX_FC_RX) |
840 | efx->link_advertising |= (ADVERTISED_Pause | | 840 | efx->link_advertising |= (ADVERTISED_Pause | |
841 | ADVERTISED_Asym_Pause); | 841 | ADVERTISED_Asym_Pause); |
842 | else | 842 | else |
843 | efx->link_advertising &= ~(ADVERTISED_Pause | | 843 | efx->link_advertising &= ~(ADVERTISED_Pause | |
844 | ADVERTISED_Asym_Pause); | 844 | ADVERTISED_Asym_Pause); |
845 | if (wanted_fc & EFX_FC_TX) | 845 | if (wanted_fc & EFX_FC_TX) |
846 | efx->link_advertising ^= ADVERTISED_Asym_Pause; | 846 | efx->link_advertising ^= ADVERTISED_Asym_Pause; |
847 | } | 847 | } |
848 | } | 848 | } |
849 | 849 | ||
850 | static void efx_fini_port(struct efx_nic *efx); | 850 | static void efx_fini_port(struct efx_nic *efx); |
851 | 851 | ||
852 | /* Push loopback/power/transmit disable settings to the PHY, and reconfigure | 852 | /* Push loopback/power/transmit disable settings to the PHY, and reconfigure |
853 | * the MAC appropriately. All other PHY configuration changes are pushed | 853 | * the MAC appropriately. All other PHY configuration changes are pushed |
854 | * through phy_op->set_settings(), and pushed asynchronously to the MAC | 854 | * through phy_op->set_settings(), and pushed asynchronously to the MAC |
855 | * through efx_monitor(). | 855 | * through efx_monitor(). |
856 | * | 856 | * |
857 | * Callers must hold the mac_lock | 857 | * Callers must hold the mac_lock |
858 | */ | 858 | */ |
859 | int __efx_reconfigure_port(struct efx_nic *efx) | 859 | int __efx_reconfigure_port(struct efx_nic *efx) |
860 | { | 860 | { |
861 | enum efx_phy_mode phy_mode; | 861 | enum efx_phy_mode phy_mode; |
862 | int rc; | 862 | int rc; |
863 | 863 | ||
864 | WARN_ON(!mutex_is_locked(&efx->mac_lock)); | 864 | WARN_ON(!mutex_is_locked(&efx->mac_lock)); |
865 | 865 | ||
866 | /* Serialise the promiscuous flag with efx_set_multicast_list. */ | 866 | /* Serialise the promiscuous flag with efx_set_multicast_list. */ |
867 | if (efx_dev_registered(efx)) { | 867 | if (efx_dev_registered(efx)) { |
868 | netif_addr_lock_bh(efx->net_dev); | 868 | netif_addr_lock_bh(efx->net_dev); |
869 | netif_addr_unlock_bh(efx->net_dev); | 869 | netif_addr_unlock_bh(efx->net_dev); |
870 | } | 870 | } |
871 | 871 | ||
872 | /* Disable PHY transmit in mac level loopbacks */ | 872 | /* Disable PHY transmit in mac level loopbacks */ |
873 | phy_mode = efx->phy_mode; | 873 | phy_mode = efx->phy_mode; |
874 | if (LOOPBACK_INTERNAL(efx)) | 874 | if (LOOPBACK_INTERNAL(efx)) |
875 | efx->phy_mode |= PHY_MODE_TX_DISABLED; | 875 | efx->phy_mode |= PHY_MODE_TX_DISABLED; |
876 | else | 876 | else |
877 | efx->phy_mode &= ~PHY_MODE_TX_DISABLED; | 877 | efx->phy_mode &= ~PHY_MODE_TX_DISABLED; |
878 | 878 | ||
879 | rc = efx->type->reconfigure_port(efx); | 879 | rc = efx->type->reconfigure_port(efx); |
880 | 880 | ||
881 | if (rc) | 881 | if (rc) |
882 | efx->phy_mode = phy_mode; | 882 | efx->phy_mode = phy_mode; |
883 | 883 | ||
884 | return rc; | 884 | return rc; |
885 | } | 885 | } |
886 | 886 | ||
887 | /* Reinitialise the MAC to pick up new PHY settings, even if the port is | 887 | /* Reinitialise the MAC to pick up new PHY settings, even if the port is |
888 | * disabled. */ | 888 | * disabled. */ |
889 | int efx_reconfigure_port(struct efx_nic *efx) | 889 | int efx_reconfigure_port(struct efx_nic *efx) |
890 | { | 890 | { |
891 | int rc; | 891 | int rc; |
892 | 892 | ||
893 | EFX_ASSERT_RESET_SERIALISED(efx); | 893 | EFX_ASSERT_RESET_SERIALISED(efx); |
894 | 894 | ||
895 | mutex_lock(&efx->mac_lock); | 895 | mutex_lock(&efx->mac_lock); |
896 | rc = __efx_reconfigure_port(efx); | 896 | rc = __efx_reconfigure_port(efx); |
897 | mutex_unlock(&efx->mac_lock); | 897 | mutex_unlock(&efx->mac_lock); |
898 | 898 | ||
899 | return rc; | 899 | return rc; |
900 | } | 900 | } |
901 | 901 | ||
902 | /* Asynchronous work item for changing MAC promiscuity and multicast | 902 | /* Asynchronous work item for changing MAC promiscuity and multicast |
903 | * hash. Avoid a drain/rx_ingress enable by reconfiguring the current | 903 | * hash. Avoid a drain/rx_ingress enable by reconfiguring the current |
904 | * MAC directly. */ | 904 | * MAC directly. */ |
905 | static void efx_mac_work(struct work_struct *data) | 905 | static void efx_mac_work(struct work_struct *data) |
906 | { | 906 | { |
907 | struct efx_nic *efx = container_of(data, struct efx_nic, mac_work); | 907 | struct efx_nic *efx = container_of(data, struct efx_nic, mac_work); |
908 | 908 | ||
909 | mutex_lock(&efx->mac_lock); | 909 | mutex_lock(&efx->mac_lock); |
910 | if (efx->port_enabled) { | 910 | if (efx->port_enabled) { |
911 | efx->type->push_multicast_hash(efx); | 911 | efx->type->push_multicast_hash(efx); |
912 | efx->mac_op->reconfigure(efx); | 912 | efx->mac_op->reconfigure(efx); |
913 | } | 913 | } |
914 | mutex_unlock(&efx->mac_lock); | 914 | mutex_unlock(&efx->mac_lock); |
915 | } | 915 | } |
916 | 916 | ||
917 | static int efx_probe_port(struct efx_nic *efx) | 917 | static int efx_probe_port(struct efx_nic *efx) |
918 | { | 918 | { |
919 | unsigned char *perm_addr; | 919 | unsigned char *perm_addr; |
920 | int rc; | 920 | int rc; |
921 | 921 | ||
922 | netif_dbg(efx, probe, efx->net_dev, "create port\n"); | 922 | netif_dbg(efx, probe, efx->net_dev, "create port\n"); |
923 | 923 | ||
924 | if (phy_flash_cfg) | 924 | if (phy_flash_cfg) |
925 | efx->phy_mode = PHY_MODE_SPECIAL; | 925 | efx->phy_mode = PHY_MODE_SPECIAL; |
926 | 926 | ||
927 | /* Connect up MAC/PHY operations table */ | 927 | /* Connect up MAC/PHY operations table */ |
928 | rc = efx->type->probe_port(efx); | 928 | rc = efx->type->probe_port(efx); |
929 | if (rc) | 929 | if (rc) |
930 | return rc; | 930 | return rc; |
931 | 931 | ||
932 | /* Sanity check MAC address */ | 932 | /* Sanity check MAC address */ |
933 | perm_addr = efx->net_dev->perm_addr; | 933 | perm_addr = efx->net_dev->perm_addr; |
934 | if (is_valid_ether_addr(perm_addr)) { | 934 | if (is_valid_ether_addr(perm_addr)) { |
935 | memcpy(efx->net_dev->dev_addr, perm_addr, ETH_ALEN); | 935 | memcpy(efx->net_dev->dev_addr, perm_addr, ETH_ALEN); |
936 | } else { | 936 | } else { |
937 | netif_err(efx, probe, efx->net_dev, "invalid MAC address %pM\n", | 937 | netif_err(efx, probe, efx->net_dev, "invalid MAC address %pM\n", |
938 | perm_addr); | 938 | perm_addr); |
939 | if (!allow_bad_hwaddr) { | 939 | if (!allow_bad_hwaddr) { |
940 | rc = -EINVAL; | 940 | rc = -EINVAL; |
941 | goto err; | 941 | goto err; |
942 | } | 942 | } |
943 | random_ether_addr(efx->net_dev->dev_addr); | 943 | random_ether_addr(efx->net_dev->dev_addr); |
944 | netif_info(efx, probe, efx->net_dev, | 944 | netif_info(efx, probe, efx->net_dev, |
945 | "using locally-generated MAC %pM\n", | 945 | "using locally-generated MAC %pM\n", |
946 | efx->net_dev->dev_addr); | 946 | efx->net_dev->dev_addr); |
947 | } | 947 | } |
948 | 948 | ||
949 | return 0; | 949 | return 0; |
950 | 950 | ||
951 | err: | 951 | err: |
952 | efx->type->remove_port(efx); | 952 | efx->type->remove_port(efx); |
953 | return rc; | 953 | return rc; |
954 | } | 954 | } |
955 | 955 | ||
956 | static int efx_init_port(struct efx_nic *efx) | 956 | static int efx_init_port(struct efx_nic *efx) |
957 | { | 957 | { |
958 | int rc; | 958 | int rc; |
959 | 959 | ||
960 | netif_dbg(efx, drv, efx->net_dev, "init port\n"); | 960 | netif_dbg(efx, drv, efx->net_dev, "init port\n"); |
961 | 961 | ||
962 | mutex_lock(&efx->mac_lock); | 962 | mutex_lock(&efx->mac_lock); |
963 | 963 | ||
964 | rc = efx->phy_op->init(efx); | 964 | rc = efx->phy_op->init(efx); |
965 | if (rc) | 965 | if (rc) |
966 | goto fail1; | 966 | goto fail1; |
967 | 967 | ||
968 | efx->port_initialized = true; | 968 | efx->port_initialized = true; |
969 | 969 | ||
970 | /* Reconfigure the MAC before creating dma queues (required for | 970 | /* Reconfigure the MAC before creating dma queues (required for |
971 | * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */ | 971 | * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */ |
972 | efx->mac_op->reconfigure(efx); | 972 | efx->mac_op->reconfigure(efx); |
973 | 973 | ||
974 | /* Ensure the PHY advertises the correct flow control settings */ | 974 | /* Ensure the PHY advertises the correct flow control settings */ |
975 | rc = efx->phy_op->reconfigure(efx); | 975 | rc = efx->phy_op->reconfigure(efx); |
976 | if (rc) | 976 | if (rc) |
977 | goto fail2; | 977 | goto fail2; |
978 | 978 | ||
979 | mutex_unlock(&efx->mac_lock); | 979 | mutex_unlock(&efx->mac_lock); |
980 | return 0; | 980 | return 0; |
981 | 981 | ||
982 | fail2: | 982 | fail2: |
983 | efx->phy_op->fini(efx); | 983 | efx->phy_op->fini(efx); |
984 | fail1: | 984 | fail1: |
985 | mutex_unlock(&efx->mac_lock); | 985 | mutex_unlock(&efx->mac_lock); |
986 | return rc; | 986 | return rc; |
987 | } | 987 | } |
988 | 988 | ||
989 | static void efx_start_port(struct efx_nic *efx) | 989 | static void efx_start_port(struct efx_nic *efx) |
990 | { | 990 | { |
991 | netif_dbg(efx, ifup, efx->net_dev, "start port\n"); | 991 | netif_dbg(efx, ifup, efx->net_dev, "start port\n"); |
992 | BUG_ON(efx->port_enabled); | 992 | BUG_ON(efx->port_enabled); |
993 | 993 | ||
994 | mutex_lock(&efx->mac_lock); | 994 | mutex_lock(&efx->mac_lock); |
995 | efx->port_enabled = true; | 995 | efx->port_enabled = true; |
996 | 996 | ||
997 | /* efx_mac_work() might have been scheduled after efx_stop_port(), | 997 | /* efx_mac_work() might have been scheduled after efx_stop_port(), |
998 | * and then cancelled by efx_flush_all() */ | 998 | * and then cancelled by efx_flush_all() */ |
999 | efx->type->push_multicast_hash(efx); | 999 | efx->type->push_multicast_hash(efx); |
1000 | efx->mac_op->reconfigure(efx); | 1000 | efx->mac_op->reconfigure(efx); |
1001 | 1001 | ||
1002 | mutex_unlock(&efx->mac_lock); | 1002 | mutex_unlock(&efx->mac_lock); |
1003 | } | 1003 | } |
1004 | 1004 | ||
1005 | /* Prevent efx_mac_work() and efx_monitor() from working */ | 1005 | /* Prevent efx_mac_work() and efx_monitor() from working */ |
1006 | static void efx_stop_port(struct efx_nic *efx) | 1006 | static void efx_stop_port(struct efx_nic *efx) |
1007 | { | 1007 | { |
1008 | netif_dbg(efx, ifdown, efx->net_dev, "stop port\n"); | 1008 | netif_dbg(efx, ifdown, efx->net_dev, "stop port\n"); |
1009 | 1009 | ||
1010 | mutex_lock(&efx->mac_lock); | 1010 | mutex_lock(&efx->mac_lock); |
1011 | efx->port_enabled = false; | 1011 | efx->port_enabled = false; |
1012 | mutex_unlock(&efx->mac_lock); | 1012 | mutex_unlock(&efx->mac_lock); |
1013 | 1013 | ||
1014 | /* Serialise against efx_set_multicast_list() */ | 1014 | /* Serialise against efx_set_multicast_list() */ |
1015 | if (efx_dev_registered(efx)) { | 1015 | if (efx_dev_registered(efx)) { |
1016 | netif_addr_lock_bh(efx->net_dev); | 1016 | netif_addr_lock_bh(efx->net_dev); |
1017 | netif_addr_unlock_bh(efx->net_dev); | 1017 | netif_addr_unlock_bh(efx->net_dev); |
1018 | } | 1018 | } |
1019 | } | 1019 | } |
1020 | 1020 | ||
1021 | static void efx_fini_port(struct efx_nic *efx) | 1021 | static void efx_fini_port(struct efx_nic *efx) |
1022 | { | 1022 | { |
1023 | netif_dbg(efx, drv, efx->net_dev, "shut down port\n"); | 1023 | netif_dbg(efx, drv, efx->net_dev, "shut down port\n"); |
1024 | 1024 | ||
1025 | if (!efx->port_initialized) | 1025 | if (!efx->port_initialized) |
1026 | return; | 1026 | return; |
1027 | 1027 | ||
1028 | efx->phy_op->fini(efx); | 1028 | efx->phy_op->fini(efx); |
1029 | efx->port_initialized = false; | 1029 | efx->port_initialized = false; |
1030 | 1030 | ||
1031 | efx->link_state.up = false; | 1031 | efx->link_state.up = false; |
1032 | efx_link_status_changed(efx); | 1032 | efx_link_status_changed(efx); |
1033 | } | 1033 | } |
1034 | 1034 | ||
1035 | static void efx_remove_port(struct efx_nic *efx) | 1035 | static void efx_remove_port(struct efx_nic *efx) |
1036 | { | 1036 | { |
1037 | netif_dbg(efx, drv, efx->net_dev, "destroying port\n"); | 1037 | netif_dbg(efx, drv, efx->net_dev, "destroying port\n"); |
1038 | 1038 | ||
1039 | efx->type->remove_port(efx); | 1039 | efx->type->remove_port(efx); |
1040 | } | 1040 | } |
1041 | 1041 | ||
1042 | /************************************************************************** | 1042 | /************************************************************************** |
1043 | * | 1043 | * |
1044 | * NIC handling | 1044 | * NIC handling |
1045 | * | 1045 | * |
1046 | **************************************************************************/ | 1046 | **************************************************************************/ |
1047 | 1047 | ||
1048 | /* This configures the PCI device to enable I/O and DMA. */ | 1048 | /* This configures the PCI device to enable I/O and DMA. */ |
1049 | static int efx_init_io(struct efx_nic *efx) | 1049 | static int efx_init_io(struct efx_nic *efx) |
1050 | { | 1050 | { |
1051 | struct pci_dev *pci_dev = efx->pci_dev; | 1051 | struct pci_dev *pci_dev = efx->pci_dev; |
1052 | dma_addr_t dma_mask = efx->type->max_dma_mask; | 1052 | dma_addr_t dma_mask = efx->type->max_dma_mask; |
1053 | bool use_wc; | ||
1054 | int rc; | 1053 | int rc; |
1055 | 1054 | ||
1056 | netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); | 1055 | netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n"); |
1057 | 1056 | ||
1058 | rc = pci_enable_device(pci_dev); | 1057 | rc = pci_enable_device(pci_dev); |
1059 | if (rc) { | 1058 | if (rc) { |
1060 | netif_err(efx, probe, efx->net_dev, | 1059 | netif_err(efx, probe, efx->net_dev, |
1061 | "failed to enable PCI device\n"); | 1060 | "failed to enable PCI device\n"); |
1062 | goto fail1; | 1061 | goto fail1; |
1063 | } | 1062 | } |
1064 | 1063 | ||
1065 | pci_set_master(pci_dev); | 1064 | pci_set_master(pci_dev); |
1066 | 1065 | ||
1067 | /* Set the PCI DMA mask. Try all possibilities from our | 1066 | /* Set the PCI DMA mask. Try all possibilities from our |
1068 | * genuine mask down to 32 bits, because some architectures | 1067 | * genuine mask down to 32 bits, because some architectures |
1069 | * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit | 1068 | * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit |
1070 | * masks event though they reject 46 bit masks. | 1069 | * masks event though they reject 46 bit masks. |
1071 | */ | 1070 | */ |
1072 | while (dma_mask > 0x7fffffffUL) { | 1071 | while (dma_mask > 0x7fffffffUL) { |
1073 | if (pci_dma_supported(pci_dev, dma_mask) && | 1072 | if (pci_dma_supported(pci_dev, dma_mask) && |
1074 | ((rc = pci_set_dma_mask(pci_dev, dma_mask)) == 0)) | 1073 | ((rc = pci_set_dma_mask(pci_dev, dma_mask)) == 0)) |
1075 | break; | 1074 | break; |
1076 | dma_mask >>= 1; | 1075 | dma_mask >>= 1; |
1077 | } | 1076 | } |
1078 | if (rc) { | 1077 | if (rc) { |
1079 | netif_err(efx, probe, efx->net_dev, | 1078 | netif_err(efx, probe, efx->net_dev, |
1080 | "could not find a suitable DMA mask\n"); | 1079 | "could not find a suitable DMA mask\n"); |
1081 | goto fail2; | 1080 | goto fail2; |
1082 | } | 1081 | } |
1083 | netif_dbg(efx, probe, efx->net_dev, | 1082 | netif_dbg(efx, probe, efx->net_dev, |
1084 | "using DMA mask %llx\n", (unsigned long long) dma_mask); | 1083 | "using DMA mask %llx\n", (unsigned long long) dma_mask); |
1085 | rc = pci_set_consistent_dma_mask(pci_dev, dma_mask); | 1084 | rc = pci_set_consistent_dma_mask(pci_dev, dma_mask); |
1086 | if (rc) { | 1085 | if (rc) { |
1087 | /* pci_set_consistent_dma_mask() is not *allowed* to | 1086 | /* pci_set_consistent_dma_mask() is not *allowed* to |
1088 | * fail with a mask that pci_set_dma_mask() accepted, | 1087 | * fail with a mask that pci_set_dma_mask() accepted, |
1089 | * but just in case... | 1088 | * but just in case... |
1090 | */ | 1089 | */ |
1091 | netif_err(efx, probe, efx->net_dev, | 1090 | netif_err(efx, probe, efx->net_dev, |
1092 | "failed to set consistent DMA mask\n"); | 1091 | "failed to set consistent DMA mask\n"); |
1093 | goto fail2; | 1092 | goto fail2; |
1094 | } | 1093 | } |
1095 | 1094 | ||
1096 | efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR); | 1095 | efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR); |
1097 | rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc"); | 1096 | rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc"); |
1098 | if (rc) { | 1097 | if (rc) { |
1099 | netif_err(efx, probe, efx->net_dev, | 1098 | netif_err(efx, probe, efx->net_dev, |
1100 | "request for memory BAR failed\n"); | 1099 | "request for memory BAR failed\n"); |
1101 | rc = -EIO; | 1100 | rc = -EIO; |
1102 | goto fail3; | 1101 | goto fail3; |
1103 | } | 1102 | } |
1104 | 1103 | efx->membase = ioremap_nocache(efx->membase_phys, | |
1105 | /* bug22643: If SR-IOV is enabled then tx push over a write combined | 1104 | efx->type->mem_map_size); |
1106 | * mapping is unsafe. We need to disable write combining in this case. | ||
1107 | * MSI is unsupported when SR-IOV is enabled, and the firmware will | ||
1108 | * have removed the MSI capability. So write combining is safe if | ||
1109 | * there is an MSI capability. | ||
1110 | */ | ||
1111 | use_wc = (!EFX_WORKAROUND_22643(efx) || | ||
1112 | pci_find_capability(pci_dev, PCI_CAP_ID_MSI)); | ||
1113 | if (use_wc) | ||
1114 | efx->membase = ioremap_wc(efx->membase_phys, | ||
1115 | efx->type->mem_map_size); | ||
1116 | else | ||
1117 | efx->membase = ioremap_nocache(efx->membase_phys, | ||
1118 | efx->type->mem_map_size); | ||
1119 | if (!efx->membase) { | 1105 | if (!efx->membase) { |
1120 | netif_err(efx, probe, efx->net_dev, | 1106 | netif_err(efx, probe, efx->net_dev, |
1121 | "could not map memory BAR at %llx+%x\n", | 1107 | "could not map memory BAR at %llx+%x\n", |
1122 | (unsigned long long)efx->membase_phys, | 1108 | (unsigned long long)efx->membase_phys, |
1123 | efx->type->mem_map_size); | 1109 | efx->type->mem_map_size); |
1124 | rc = -ENOMEM; | 1110 | rc = -ENOMEM; |
1125 | goto fail4; | 1111 | goto fail4; |
1126 | } | 1112 | } |
1127 | netif_dbg(efx, probe, efx->net_dev, | 1113 | netif_dbg(efx, probe, efx->net_dev, |
1128 | "memory BAR at %llx+%x (virtual %p)\n", | 1114 | "memory BAR at %llx+%x (virtual %p)\n", |
1129 | (unsigned long long)efx->membase_phys, | 1115 | (unsigned long long)efx->membase_phys, |
1130 | efx->type->mem_map_size, efx->membase); | 1116 | efx->type->mem_map_size, efx->membase); |
1131 | 1117 | ||
1132 | return 0; | 1118 | return 0; |
1133 | 1119 | ||
1134 | fail4: | 1120 | fail4: |
1135 | pci_release_region(efx->pci_dev, EFX_MEM_BAR); | 1121 | pci_release_region(efx->pci_dev, EFX_MEM_BAR); |
1136 | fail3: | 1122 | fail3: |
1137 | efx->membase_phys = 0; | 1123 | efx->membase_phys = 0; |
1138 | fail2: | 1124 | fail2: |
1139 | pci_disable_device(efx->pci_dev); | 1125 | pci_disable_device(efx->pci_dev); |
1140 | fail1: | 1126 | fail1: |
1141 | return rc; | 1127 | return rc; |
1142 | } | 1128 | } |
1143 | 1129 | ||
1144 | static void efx_fini_io(struct efx_nic *efx) | 1130 | static void efx_fini_io(struct efx_nic *efx) |
1145 | { | 1131 | { |
1146 | netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n"); | 1132 | netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n"); |
1147 | 1133 | ||
1148 | if (efx->membase) { | 1134 | if (efx->membase) { |
1149 | iounmap(efx->membase); | 1135 | iounmap(efx->membase); |
1150 | efx->membase = NULL; | 1136 | efx->membase = NULL; |
1151 | } | 1137 | } |
1152 | 1138 | ||
1153 | if (efx->membase_phys) { | 1139 | if (efx->membase_phys) { |
1154 | pci_release_region(efx->pci_dev, EFX_MEM_BAR); | 1140 | pci_release_region(efx->pci_dev, EFX_MEM_BAR); |
1155 | efx->membase_phys = 0; | 1141 | efx->membase_phys = 0; |
1156 | } | 1142 | } |
1157 | 1143 | ||
1158 | pci_disable_device(efx->pci_dev); | 1144 | pci_disable_device(efx->pci_dev); |
1159 | } | 1145 | } |
1160 | 1146 | ||
1161 | /* Get number of channels wanted. Each channel will have its own IRQ, | 1147 | /* Get number of channels wanted. Each channel will have its own IRQ, |
1162 | * 1 RX queue and/or 2 TX queues. */ | 1148 | * 1 RX queue and/or 2 TX queues. */ |
1163 | static int efx_wanted_channels(void) | 1149 | static int efx_wanted_channels(void) |
1164 | { | 1150 | { |
1165 | cpumask_var_t core_mask; | 1151 | cpumask_var_t core_mask; |
1166 | int count; | 1152 | int count; |
1167 | int cpu; | 1153 | int cpu; |
1168 | 1154 | ||
1169 | if (rss_cpus) | 1155 | if (rss_cpus) |
1170 | return rss_cpus; | 1156 | return rss_cpus; |
1171 | 1157 | ||
1172 | if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) { | 1158 | if (unlikely(!zalloc_cpumask_var(&core_mask, GFP_KERNEL))) { |
1173 | printk(KERN_WARNING | 1159 | printk(KERN_WARNING |
1174 | "sfc: RSS disabled due to allocation failure\n"); | 1160 | "sfc: RSS disabled due to allocation failure\n"); |
1175 | return 1; | 1161 | return 1; |
1176 | } | 1162 | } |
1177 | 1163 | ||
1178 | count = 0; | 1164 | count = 0; |
1179 | for_each_online_cpu(cpu) { | 1165 | for_each_online_cpu(cpu) { |
1180 | if (!cpumask_test_cpu(cpu, core_mask)) { | 1166 | if (!cpumask_test_cpu(cpu, core_mask)) { |
1181 | ++count; | 1167 | ++count; |
1182 | cpumask_or(core_mask, core_mask, | 1168 | cpumask_or(core_mask, core_mask, |
1183 | topology_core_cpumask(cpu)); | 1169 | topology_core_cpumask(cpu)); |
1184 | } | 1170 | } |
1185 | } | 1171 | } |
1186 | 1172 | ||
1187 | free_cpumask_var(core_mask); | 1173 | free_cpumask_var(core_mask); |
1188 | return count; | 1174 | return count; |
1189 | } | 1175 | } |
1190 | 1176 | ||
1191 | static int | 1177 | static int |
1192 | efx_init_rx_cpu_rmap(struct efx_nic *efx, struct msix_entry *xentries) | 1178 | efx_init_rx_cpu_rmap(struct efx_nic *efx, struct msix_entry *xentries) |
1193 | { | 1179 | { |
1194 | #ifdef CONFIG_RFS_ACCEL | 1180 | #ifdef CONFIG_RFS_ACCEL |
1195 | int i, rc; | 1181 | int i, rc; |
1196 | 1182 | ||
1197 | efx->net_dev->rx_cpu_rmap = alloc_irq_cpu_rmap(efx->n_rx_channels); | 1183 | efx->net_dev->rx_cpu_rmap = alloc_irq_cpu_rmap(efx->n_rx_channels); |
1198 | if (!efx->net_dev->rx_cpu_rmap) | 1184 | if (!efx->net_dev->rx_cpu_rmap) |
1199 | return -ENOMEM; | 1185 | return -ENOMEM; |
1200 | for (i = 0; i < efx->n_rx_channels; i++) { | 1186 | for (i = 0; i < efx->n_rx_channels; i++) { |
1201 | rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap, | 1187 | rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap, |
1202 | xentries[i].vector); | 1188 | xentries[i].vector); |
1203 | if (rc) { | 1189 | if (rc) { |
1204 | free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); | 1190 | free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); |
1205 | efx->net_dev->rx_cpu_rmap = NULL; | 1191 | efx->net_dev->rx_cpu_rmap = NULL; |
1206 | return rc; | 1192 | return rc; |
1207 | } | 1193 | } |
1208 | } | 1194 | } |
1209 | #endif | 1195 | #endif |
1210 | return 0; | 1196 | return 0; |
1211 | } | 1197 | } |
1212 | 1198 | ||
1213 | /* Probe the number and type of interrupts we are able to obtain, and | 1199 | /* Probe the number and type of interrupts we are able to obtain, and |
1214 | * the resulting numbers of channels and RX queues. | 1200 | * the resulting numbers of channels and RX queues. |
1215 | */ | 1201 | */ |
1216 | static int efx_probe_interrupts(struct efx_nic *efx) | 1202 | static int efx_probe_interrupts(struct efx_nic *efx) |
1217 | { | 1203 | { |
1218 | int max_channels = | 1204 | int max_channels = |
1219 | min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS); | 1205 | min_t(int, efx->type->phys_addr_channels, EFX_MAX_CHANNELS); |
1220 | int rc, i; | 1206 | int rc, i; |
1221 | 1207 | ||
1222 | if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { | 1208 | if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { |
1223 | struct msix_entry xentries[EFX_MAX_CHANNELS]; | 1209 | struct msix_entry xentries[EFX_MAX_CHANNELS]; |
1224 | int n_channels; | 1210 | int n_channels; |
1225 | 1211 | ||
1226 | n_channels = efx_wanted_channels(); | 1212 | n_channels = efx_wanted_channels(); |
1227 | if (separate_tx_channels) | 1213 | if (separate_tx_channels) |
1228 | n_channels *= 2; | 1214 | n_channels *= 2; |
1229 | n_channels = min(n_channels, max_channels); | 1215 | n_channels = min(n_channels, max_channels); |
1230 | 1216 | ||
1231 | for (i = 0; i < n_channels; i++) | 1217 | for (i = 0; i < n_channels; i++) |
1232 | xentries[i].entry = i; | 1218 | xentries[i].entry = i; |
1233 | rc = pci_enable_msix(efx->pci_dev, xentries, n_channels); | 1219 | rc = pci_enable_msix(efx->pci_dev, xentries, n_channels); |
1234 | if (rc > 0) { | 1220 | if (rc > 0) { |
1235 | netif_err(efx, drv, efx->net_dev, | 1221 | netif_err(efx, drv, efx->net_dev, |
1236 | "WARNING: Insufficient MSI-X vectors" | 1222 | "WARNING: Insufficient MSI-X vectors" |
1237 | " available (%d < %d).\n", rc, n_channels); | 1223 | " available (%d < %d).\n", rc, n_channels); |
1238 | netif_err(efx, drv, efx->net_dev, | 1224 | netif_err(efx, drv, efx->net_dev, |
1239 | "WARNING: Performance may be reduced.\n"); | 1225 | "WARNING: Performance may be reduced.\n"); |
1240 | EFX_BUG_ON_PARANOID(rc >= n_channels); | 1226 | EFX_BUG_ON_PARANOID(rc >= n_channels); |
1241 | n_channels = rc; | 1227 | n_channels = rc; |
1242 | rc = pci_enable_msix(efx->pci_dev, xentries, | 1228 | rc = pci_enable_msix(efx->pci_dev, xentries, |
1243 | n_channels); | 1229 | n_channels); |
1244 | } | 1230 | } |
1245 | 1231 | ||
1246 | if (rc == 0) { | 1232 | if (rc == 0) { |
1247 | efx->n_channels = n_channels; | 1233 | efx->n_channels = n_channels; |
1248 | if (separate_tx_channels) { | 1234 | if (separate_tx_channels) { |
1249 | efx->n_tx_channels = | 1235 | efx->n_tx_channels = |
1250 | max(efx->n_channels / 2, 1U); | 1236 | max(efx->n_channels / 2, 1U); |
1251 | efx->n_rx_channels = | 1237 | efx->n_rx_channels = |
1252 | max(efx->n_channels - | 1238 | max(efx->n_channels - |
1253 | efx->n_tx_channels, 1U); | 1239 | efx->n_tx_channels, 1U); |
1254 | } else { | 1240 | } else { |
1255 | efx->n_tx_channels = efx->n_channels; | 1241 | efx->n_tx_channels = efx->n_channels; |
1256 | efx->n_rx_channels = efx->n_channels; | 1242 | efx->n_rx_channels = efx->n_channels; |
1257 | } | 1243 | } |
1258 | rc = efx_init_rx_cpu_rmap(efx, xentries); | 1244 | rc = efx_init_rx_cpu_rmap(efx, xentries); |
1259 | if (rc) { | 1245 | if (rc) { |
1260 | pci_disable_msix(efx->pci_dev); | 1246 | pci_disable_msix(efx->pci_dev); |
1261 | return rc; | 1247 | return rc; |
1262 | } | 1248 | } |
1263 | for (i = 0; i < n_channels; i++) | 1249 | for (i = 0; i < n_channels; i++) |
1264 | efx_get_channel(efx, i)->irq = | 1250 | efx_get_channel(efx, i)->irq = |
1265 | xentries[i].vector; | 1251 | xentries[i].vector; |
1266 | } else { | 1252 | } else { |
1267 | /* Fall back to single channel MSI */ | 1253 | /* Fall back to single channel MSI */ |
1268 | efx->interrupt_mode = EFX_INT_MODE_MSI; | 1254 | efx->interrupt_mode = EFX_INT_MODE_MSI; |
1269 | netif_err(efx, drv, efx->net_dev, | 1255 | netif_err(efx, drv, efx->net_dev, |
1270 | "could not enable MSI-X\n"); | 1256 | "could not enable MSI-X\n"); |
1271 | } | 1257 | } |
1272 | } | 1258 | } |
1273 | 1259 | ||
1274 | /* Try single interrupt MSI */ | 1260 | /* Try single interrupt MSI */ |
1275 | if (efx->interrupt_mode == EFX_INT_MODE_MSI) { | 1261 | if (efx->interrupt_mode == EFX_INT_MODE_MSI) { |
1276 | efx->n_channels = 1; | 1262 | efx->n_channels = 1; |
1277 | efx->n_rx_channels = 1; | 1263 | efx->n_rx_channels = 1; |
1278 | efx->n_tx_channels = 1; | 1264 | efx->n_tx_channels = 1; |
1279 | rc = pci_enable_msi(efx->pci_dev); | 1265 | rc = pci_enable_msi(efx->pci_dev); |
1280 | if (rc == 0) { | 1266 | if (rc == 0) { |
1281 | efx_get_channel(efx, 0)->irq = efx->pci_dev->irq; | 1267 | efx_get_channel(efx, 0)->irq = efx->pci_dev->irq; |
1282 | } else { | 1268 | } else { |
1283 | netif_err(efx, drv, efx->net_dev, | 1269 | netif_err(efx, drv, efx->net_dev, |
1284 | "could not enable MSI\n"); | 1270 | "could not enable MSI\n"); |
1285 | efx->interrupt_mode = EFX_INT_MODE_LEGACY; | 1271 | efx->interrupt_mode = EFX_INT_MODE_LEGACY; |
1286 | } | 1272 | } |
1287 | } | 1273 | } |
1288 | 1274 | ||
1289 | /* Assume legacy interrupts */ | 1275 | /* Assume legacy interrupts */ |
1290 | if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) { | 1276 | if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) { |
1291 | efx->n_channels = 1 + (separate_tx_channels ? 1 : 0); | 1277 | efx->n_channels = 1 + (separate_tx_channels ? 1 : 0); |
1292 | efx->n_rx_channels = 1; | 1278 | efx->n_rx_channels = 1; |
1293 | efx->n_tx_channels = 1; | 1279 | efx->n_tx_channels = 1; |
1294 | efx->legacy_irq = efx->pci_dev->irq; | 1280 | efx->legacy_irq = efx->pci_dev->irq; |
1295 | } | 1281 | } |
1296 | 1282 | ||
1297 | return 0; | 1283 | return 0; |
1298 | } | 1284 | } |
1299 | 1285 | ||
1300 | static void efx_remove_interrupts(struct efx_nic *efx) | 1286 | static void efx_remove_interrupts(struct efx_nic *efx) |
1301 | { | 1287 | { |
1302 | struct efx_channel *channel; | 1288 | struct efx_channel *channel; |
1303 | 1289 | ||
1304 | /* Remove MSI/MSI-X interrupts */ | 1290 | /* Remove MSI/MSI-X interrupts */ |
1305 | efx_for_each_channel(channel, efx) | 1291 | efx_for_each_channel(channel, efx) |
1306 | channel->irq = 0; | 1292 | channel->irq = 0; |
1307 | pci_disable_msi(efx->pci_dev); | 1293 | pci_disable_msi(efx->pci_dev); |
1308 | pci_disable_msix(efx->pci_dev); | 1294 | pci_disable_msix(efx->pci_dev); |
1309 | 1295 | ||
1310 | /* Remove legacy interrupt */ | 1296 | /* Remove legacy interrupt */ |
1311 | efx->legacy_irq = 0; | 1297 | efx->legacy_irq = 0; |
1312 | } | 1298 | } |
1313 | 1299 | ||
1314 | static void efx_set_channels(struct efx_nic *efx) | 1300 | static void efx_set_channels(struct efx_nic *efx) |
1315 | { | 1301 | { |
1316 | struct efx_channel *channel; | 1302 | struct efx_channel *channel; |
1317 | struct efx_tx_queue *tx_queue; | 1303 | struct efx_tx_queue *tx_queue; |
1318 | 1304 | ||
1319 | efx->tx_channel_offset = | 1305 | efx->tx_channel_offset = |
1320 | separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; | 1306 | separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0; |
1321 | 1307 | ||
1322 | /* We need to adjust the TX queue numbers if we have separate | 1308 | /* We need to adjust the TX queue numbers if we have separate |
1323 | * RX-only and TX-only channels. | 1309 | * RX-only and TX-only channels. |
1324 | */ | 1310 | */ |
1325 | efx_for_each_channel(channel, efx) { | 1311 | efx_for_each_channel(channel, efx) { |
1326 | efx_for_each_channel_tx_queue(tx_queue, channel) | 1312 | efx_for_each_channel_tx_queue(tx_queue, channel) |
1327 | tx_queue->queue -= (efx->tx_channel_offset * | 1313 | tx_queue->queue -= (efx->tx_channel_offset * |
1328 | EFX_TXQ_TYPES); | 1314 | EFX_TXQ_TYPES); |
1329 | } | 1315 | } |
1330 | } | 1316 | } |
1331 | 1317 | ||
1332 | static int efx_probe_nic(struct efx_nic *efx) | 1318 | static int efx_probe_nic(struct efx_nic *efx) |
1333 | { | 1319 | { |
1334 | size_t i; | 1320 | size_t i; |
1335 | int rc; | 1321 | int rc; |
1336 | 1322 | ||
1337 | netif_dbg(efx, probe, efx->net_dev, "creating NIC\n"); | 1323 | netif_dbg(efx, probe, efx->net_dev, "creating NIC\n"); |
1338 | 1324 | ||
1339 | /* Carry out hardware-type specific initialisation */ | 1325 | /* Carry out hardware-type specific initialisation */ |
1340 | rc = efx->type->probe(efx); | 1326 | rc = efx->type->probe(efx); |
1341 | if (rc) | 1327 | if (rc) |
1342 | return rc; | 1328 | return rc; |
1343 | 1329 | ||
1344 | /* Determine the number of channels and queues by trying to hook | 1330 | /* Determine the number of channels and queues by trying to hook |
1345 | * in MSI-X interrupts. */ | 1331 | * in MSI-X interrupts. */ |
1346 | rc = efx_probe_interrupts(efx); | 1332 | rc = efx_probe_interrupts(efx); |
1347 | if (rc) | 1333 | if (rc) |
1348 | goto fail; | 1334 | goto fail; |
1349 | 1335 | ||
1350 | if (efx->n_channels > 1) | 1336 | if (efx->n_channels > 1) |
1351 | get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key)); | 1337 | get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key)); |
1352 | for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++) | 1338 | for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++) |
1353 | efx->rx_indir_table[i] = i % efx->n_rx_channels; | 1339 | efx->rx_indir_table[i] = i % efx->n_rx_channels; |
1354 | 1340 | ||
1355 | efx_set_channels(efx); | 1341 | efx_set_channels(efx); |
1356 | netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); | 1342 | netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); |
1357 | netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels); | 1343 | netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels); |
1358 | 1344 | ||
1359 | /* Initialise the interrupt moderation settings */ | 1345 | /* Initialise the interrupt moderation settings */ |
1360 | efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true); | 1346 | efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true); |
1361 | 1347 | ||
1362 | return 0; | 1348 | return 0; |
1363 | 1349 | ||
1364 | fail: | 1350 | fail: |
1365 | efx->type->remove(efx); | 1351 | efx->type->remove(efx); |
1366 | return rc; | 1352 | return rc; |
1367 | } | 1353 | } |
1368 | 1354 | ||
1369 | static void efx_remove_nic(struct efx_nic *efx) | 1355 | static void efx_remove_nic(struct efx_nic *efx) |
1370 | { | 1356 | { |
1371 | netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n"); | 1357 | netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n"); |
1372 | 1358 | ||
1373 | efx_remove_interrupts(efx); | 1359 | efx_remove_interrupts(efx); |
1374 | efx->type->remove(efx); | 1360 | efx->type->remove(efx); |
1375 | } | 1361 | } |
1376 | 1362 | ||
1377 | /************************************************************************** | 1363 | /************************************************************************** |
1378 | * | 1364 | * |
1379 | * NIC startup/shutdown | 1365 | * NIC startup/shutdown |
1380 | * | 1366 | * |
1381 | *************************************************************************/ | 1367 | *************************************************************************/ |
1382 | 1368 | ||
1383 | static int efx_probe_all(struct efx_nic *efx) | 1369 | static int efx_probe_all(struct efx_nic *efx) |
1384 | { | 1370 | { |
1385 | int rc; | 1371 | int rc; |
1386 | 1372 | ||
1387 | rc = efx_probe_nic(efx); | 1373 | rc = efx_probe_nic(efx); |
1388 | if (rc) { | 1374 | if (rc) { |
1389 | netif_err(efx, probe, efx->net_dev, "failed to create NIC\n"); | 1375 | netif_err(efx, probe, efx->net_dev, "failed to create NIC\n"); |
1390 | goto fail1; | 1376 | goto fail1; |
1391 | } | 1377 | } |
1392 | 1378 | ||
1393 | rc = efx_probe_port(efx); | 1379 | rc = efx_probe_port(efx); |
1394 | if (rc) { | 1380 | if (rc) { |
1395 | netif_err(efx, probe, efx->net_dev, "failed to create port\n"); | 1381 | netif_err(efx, probe, efx->net_dev, "failed to create port\n"); |
1396 | goto fail2; | 1382 | goto fail2; |
1397 | } | 1383 | } |
1398 | 1384 | ||
1399 | efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE; | 1385 | efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE; |
1400 | rc = efx_probe_channels(efx); | 1386 | rc = efx_probe_channels(efx); |
1401 | if (rc) | 1387 | if (rc) |
1402 | goto fail3; | 1388 | goto fail3; |
1403 | 1389 | ||
1404 | rc = efx_probe_filters(efx); | 1390 | rc = efx_probe_filters(efx); |
1405 | if (rc) { | 1391 | if (rc) { |
1406 | netif_err(efx, probe, efx->net_dev, | 1392 | netif_err(efx, probe, efx->net_dev, |
1407 | "failed to create filter tables\n"); | 1393 | "failed to create filter tables\n"); |
1408 | goto fail4; | 1394 | goto fail4; |
1409 | } | 1395 | } |
1410 | 1396 | ||
1411 | return 0; | 1397 | return 0; |
1412 | 1398 | ||
1413 | fail4: | 1399 | fail4: |
1414 | efx_remove_channels(efx); | 1400 | efx_remove_channels(efx); |
1415 | fail3: | 1401 | fail3: |
1416 | efx_remove_port(efx); | 1402 | efx_remove_port(efx); |
1417 | fail2: | 1403 | fail2: |
1418 | efx_remove_nic(efx); | 1404 | efx_remove_nic(efx); |
1419 | fail1: | 1405 | fail1: |
1420 | return rc; | 1406 | return rc; |
1421 | } | 1407 | } |
1422 | 1408 | ||
1423 | /* Called after previous invocation(s) of efx_stop_all, restarts the | 1409 | /* Called after previous invocation(s) of efx_stop_all, restarts the |
1424 | * port, kernel transmit queue, NAPI processing and hardware interrupts, | 1410 | * port, kernel transmit queue, NAPI processing and hardware interrupts, |
1425 | * and ensures that the port is scheduled to be reconfigured. | 1411 | * and ensures that the port is scheduled to be reconfigured. |
1426 | * This function is safe to call multiple times when the NIC is in any | 1412 | * This function is safe to call multiple times when the NIC is in any |
1427 | * state. */ | 1413 | * state. */ |
1428 | static void efx_start_all(struct efx_nic *efx) | 1414 | static void efx_start_all(struct efx_nic *efx) |
1429 | { | 1415 | { |
1430 | struct efx_channel *channel; | 1416 | struct efx_channel *channel; |
1431 | 1417 | ||
1432 | EFX_ASSERT_RESET_SERIALISED(efx); | 1418 | EFX_ASSERT_RESET_SERIALISED(efx); |
1433 | 1419 | ||
1434 | /* Check that it is appropriate to restart the interface. All | 1420 | /* Check that it is appropriate to restart the interface. All |
1435 | * of these flags are safe to read under just the rtnl lock */ | 1421 | * of these flags are safe to read under just the rtnl lock */ |
1436 | if (efx->port_enabled) | 1422 | if (efx->port_enabled) |
1437 | return; | 1423 | return; |
1438 | if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT)) | 1424 | if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT)) |
1439 | return; | 1425 | return; |
1440 | if (efx_dev_registered(efx) && !netif_running(efx->net_dev)) | 1426 | if (efx_dev_registered(efx) && !netif_running(efx->net_dev)) |
1441 | return; | 1427 | return; |
1442 | 1428 | ||
1443 | /* Mark the port as enabled so port reconfigurations can start, then | 1429 | /* Mark the port as enabled so port reconfigurations can start, then |
1444 | * restart the transmit interface early so the watchdog timer stops */ | 1430 | * restart the transmit interface early so the watchdog timer stops */ |
1445 | efx_start_port(efx); | 1431 | efx_start_port(efx); |
1446 | 1432 | ||
1447 | if (efx_dev_registered(efx) && netif_device_present(efx->net_dev)) | 1433 | if (efx_dev_registered(efx) && netif_device_present(efx->net_dev)) |
1448 | netif_tx_wake_all_queues(efx->net_dev); | 1434 | netif_tx_wake_all_queues(efx->net_dev); |
1449 | 1435 | ||
1450 | efx_for_each_channel(channel, efx) | 1436 | efx_for_each_channel(channel, efx) |
1451 | efx_start_channel(channel); | 1437 | efx_start_channel(channel); |
1452 | 1438 | ||
1453 | if (efx->legacy_irq) | 1439 | if (efx->legacy_irq) |
1454 | efx->legacy_irq_enabled = true; | 1440 | efx->legacy_irq_enabled = true; |
1455 | efx_nic_enable_interrupts(efx); | 1441 | efx_nic_enable_interrupts(efx); |
1456 | 1442 | ||
1457 | /* Switch to event based MCDI completions after enabling interrupts. | 1443 | /* Switch to event based MCDI completions after enabling interrupts. |
1458 | * If a reset has been scheduled, then we need to stay in polled mode. | 1444 | * If a reset has been scheduled, then we need to stay in polled mode. |
1459 | * Rather than serialising efx_mcdi_mode_event() [which sleeps] and | 1445 | * Rather than serialising efx_mcdi_mode_event() [which sleeps] and |
1460 | * reset_pending [modified from an atomic context], we instead guarantee | 1446 | * reset_pending [modified from an atomic context], we instead guarantee |
1461 | * that efx_mcdi_mode_poll() isn't reverted erroneously */ | 1447 | * that efx_mcdi_mode_poll() isn't reverted erroneously */ |
1462 | efx_mcdi_mode_event(efx); | 1448 | efx_mcdi_mode_event(efx); |
1463 | if (efx->reset_pending) | 1449 | if (efx->reset_pending) |
1464 | efx_mcdi_mode_poll(efx); | 1450 | efx_mcdi_mode_poll(efx); |
1465 | 1451 | ||
1466 | /* Start the hardware monitor if there is one. Otherwise (we're link | 1452 | /* Start the hardware monitor if there is one. Otherwise (we're link |
1467 | * event driven), we have to poll the PHY because after an event queue | 1453 | * event driven), we have to poll the PHY because after an event queue |
1468 | * flush, we could have a missed a link state change */ | 1454 | * flush, we could have a missed a link state change */ |
1469 | if (efx->type->monitor != NULL) { | 1455 | if (efx->type->monitor != NULL) { |
1470 | queue_delayed_work(efx->workqueue, &efx->monitor_work, | 1456 | queue_delayed_work(efx->workqueue, &efx->monitor_work, |
1471 | efx_monitor_interval); | 1457 | efx_monitor_interval); |
1472 | } else { | 1458 | } else { |
1473 | mutex_lock(&efx->mac_lock); | 1459 | mutex_lock(&efx->mac_lock); |
1474 | if (efx->phy_op->poll(efx)) | 1460 | if (efx->phy_op->poll(efx)) |
1475 | efx_link_status_changed(efx); | 1461 | efx_link_status_changed(efx); |
1476 | mutex_unlock(&efx->mac_lock); | 1462 | mutex_unlock(&efx->mac_lock); |
1477 | } | 1463 | } |
1478 | 1464 | ||
1479 | efx->type->start_stats(efx); | 1465 | efx->type->start_stats(efx); |
1480 | } | 1466 | } |
1481 | 1467 | ||
1482 | /* Flush all delayed work. Should only be called when no more delayed work | 1468 | /* Flush all delayed work. Should only be called when no more delayed work |
1483 | * will be scheduled. This doesn't flush pending online resets (efx_reset), | 1469 | * will be scheduled. This doesn't flush pending online resets (efx_reset), |
1484 | * since we're holding the rtnl_lock at this point. */ | 1470 | * since we're holding the rtnl_lock at this point. */ |
1485 | static void efx_flush_all(struct efx_nic *efx) | 1471 | static void efx_flush_all(struct efx_nic *efx) |
1486 | { | 1472 | { |
1487 | /* Make sure the hardware monitor is stopped */ | 1473 | /* Make sure the hardware monitor is stopped */ |
1488 | cancel_delayed_work_sync(&efx->monitor_work); | 1474 | cancel_delayed_work_sync(&efx->monitor_work); |
1489 | /* Stop scheduled port reconfigurations */ | 1475 | /* Stop scheduled port reconfigurations */ |
1490 | cancel_work_sync(&efx->mac_work); | 1476 | cancel_work_sync(&efx->mac_work); |
1491 | } | 1477 | } |
1492 | 1478 | ||
1493 | /* Quiesce hardware and software without bringing the link down. | 1479 | /* Quiesce hardware and software without bringing the link down. |
1494 | * Safe to call multiple times, when the nic and interface is in any | 1480 | * Safe to call multiple times, when the nic and interface is in any |
1495 | * state. The caller is guaranteed to subsequently be in a position | 1481 | * state. The caller is guaranteed to subsequently be in a position |
1496 | * to modify any hardware and software state they see fit without | 1482 | * to modify any hardware and software state they see fit without |
1497 | * taking locks. */ | 1483 | * taking locks. */ |
1498 | static void efx_stop_all(struct efx_nic *efx) | 1484 | static void efx_stop_all(struct efx_nic *efx) |
1499 | { | 1485 | { |
1500 | struct efx_channel *channel; | 1486 | struct efx_channel *channel; |
1501 | 1487 | ||
1502 | EFX_ASSERT_RESET_SERIALISED(efx); | 1488 | EFX_ASSERT_RESET_SERIALISED(efx); |
1503 | 1489 | ||
1504 | /* port_enabled can be read safely under the rtnl lock */ | 1490 | /* port_enabled can be read safely under the rtnl lock */ |
1505 | if (!efx->port_enabled) | 1491 | if (!efx->port_enabled) |
1506 | return; | 1492 | return; |
1507 | 1493 | ||
1508 | efx->type->stop_stats(efx); | 1494 | efx->type->stop_stats(efx); |
1509 | 1495 | ||
1510 | /* Switch to MCDI polling on Siena before disabling interrupts */ | 1496 | /* Switch to MCDI polling on Siena before disabling interrupts */ |
1511 | efx_mcdi_mode_poll(efx); | 1497 | efx_mcdi_mode_poll(efx); |
1512 | 1498 | ||
1513 | /* Disable interrupts and wait for ISR to complete */ | 1499 | /* Disable interrupts and wait for ISR to complete */ |
1514 | efx_nic_disable_interrupts(efx); | 1500 | efx_nic_disable_interrupts(efx); |
1515 | if (efx->legacy_irq) { | 1501 | if (efx->legacy_irq) { |
1516 | synchronize_irq(efx->legacy_irq); | 1502 | synchronize_irq(efx->legacy_irq); |
1517 | efx->legacy_irq_enabled = false; | 1503 | efx->legacy_irq_enabled = false; |
1518 | } | 1504 | } |
1519 | efx_for_each_channel(channel, efx) { | 1505 | efx_for_each_channel(channel, efx) { |
1520 | if (channel->irq) | 1506 | if (channel->irq) |
1521 | synchronize_irq(channel->irq); | 1507 | synchronize_irq(channel->irq); |
1522 | } | 1508 | } |
1523 | 1509 | ||
1524 | /* Stop all NAPI processing and synchronous rx refills */ | 1510 | /* Stop all NAPI processing and synchronous rx refills */ |
1525 | efx_for_each_channel(channel, efx) | 1511 | efx_for_each_channel(channel, efx) |
1526 | efx_stop_channel(channel); | 1512 | efx_stop_channel(channel); |
1527 | 1513 | ||
1528 | /* Stop all asynchronous port reconfigurations. Since all | 1514 | /* Stop all asynchronous port reconfigurations. Since all |
1529 | * event processing has already been stopped, there is no | 1515 | * event processing has already been stopped, there is no |
1530 | * window to loose phy events */ | 1516 | * window to loose phy events */ |
1531 | efx_stop_port(efx); | 1517 | efx_stop_port(efx); |
1532 | 1518 | ||
1533 | /* Flush efx_mac_work(), refill_workqueue, monitor_work */ | 1519 | /* Flush efx_mac_work(), refill_workqueue, monitor_work */ |
1534 | efx_flush_all(efx); | 1520 | efx_flush_all(efx); |
1535 | 1521 | ||
1536 | /* Stop the kernel transmit interface late, so the watchdog | 1522 | /* Stop the kernel transmit interface late, so the watchdog |
1537 | * timer isn't ticking over the flush */ | 1523 | * timer isn't ticking over the flush */ |
1538 | if (efx_dev_registered(efx)) { | 1524 | if (efx_dev_registered(efx)) { |
1539 | netif_tx_stop_all_queues(efx->net_dev); | 1525 | netif_tx_stop_all_queues(efx->net_dev); |
1540 | netif_tx_lock_bh(efx->net_dev); | 1526 | netif_tx_lock_bh(efx->net_dev); |
1541 | netif_tx_unlock_bh(efx->net_dev); | 1527 | netif_tx_unlock_bh(efx->net_dev); |
1542 | } | 1528 | } |
1543 | } | 1529 | } |
1544 | 1530 | ||
1545 | static void efx_remove_all(struct efx_nic *efx) | 1531 | static void efx_remove_all(struct efx_nic *efx) |
1546 | { | 1532 | { |
1547 | efx_remove_filters(efx); | 1533 | efx_remove_filters(efx); |
1548 | efx_remove_channels(efx); | 1534 | efx_remove_channels(efx); |
1549 | efx_remove_port(efx); | 1535 | efx_remove_port(efx); |
1550 | efx_remove_nic(efx); | 1536 | efx_remove_nic(efx); |
1551 | } | 1537 | } |
1552 | 1538 | ||
1553 | /************************************************************************** | 1539 | /************************************************************************** |
1554 | * | 1540 | * |
1555 | * Interrupt moderation | 1541 | * Interrupt moderation |
1556 | * | 1542 | * |
1557 | **************************************************************************/ | 1543 | **************************************************************************/ |
1558 | 1544 | ||
1559 | static unsigned irq_mod_ticks(int usecs, int resolution) | 1545 | static unsigned irq_mod_ticks(int usecs, int resolution) |
1560 | { | 1546 | { |
1561 | if (usecs <= 0) | 1547 | if (usecs <= 0) |
1562 | return 0; /* cannot receive interrupts ahead of time :-) */ | 1548 | return 0; /* cannot receive interrupts ahead of time :-) */ |
1563 | if (usecs < resolution) | 1549 | if (usecs < resolution) |
1564 | return 1; /* never round down to 0 */ | 1550 | return 1; /* never round down to 0 */ |
1565 | return usecs / resolution; | 1551 | return usecs / resolution; |
1566 | } | 1552 | } |
1567 | 1553 | ||
1568 | /* Set interrupt moderation parameters */ | 1554 | /* Set interrupt moderation parameters */ |
1569 | void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs, | 1555 | void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs, |
1570 | bool rx_adaptive) | 1556 | bool rx_adaptive) |
1571 | { | 1557 | { |
1572 | struct efx_channel *channel; | 1558 | struct efx_channel *channel; |
1573 | unsigned tx_ticks = irq_mod_ticks(tx_usecs, EFX_IRQ_MOD_RESOLUTION); | 1559 | unsigned tx_ticks = irq_mod_ticks(tx_usecs, EFX_IRQ_MOD_RESOLUTION); |
1574 | unsigned rx_ticks = irq_mod_ticks(rx_usecs, EFX_IRQ_MOD_RESOLUTION); | 1560 | unsigned rx_ticks = irq_mod_ticks(rx_usecs, EFX_IRQ_MOD_RESOLUTION); |
1575 | 1561 | ||
1576 | EFX_ASSERT_RESET_SERIALISED(efx); | 1562 | EFX_ASSERT_RESET_SERIALISED(efx); |
1577 | 1563 | ||
1578 | efx->irq_rx_adaptive = rx_adaptive; | 1564 | efx->irq_rx_adaptive = rx_adaptive; |
1579 | efx->irq_rx_moderation = rx_ticks; | 1565 | efx->irq_rx_moderation = rx_ticks; |
1580 | efx_for_each_channel(channel, efx) { | 1566 | efx_for_each_channel(channel, efx) { |
1581 | if (efx_channel_has_rx_queue(channel)) | 1567 | if (efx_channel_has_rx_queue(channel)) |
1582 | channel->irq_moderation = rx_ticks; | 1568 | channel->irq_moderation = rx_ticks; |
1583 | else if (efx_channel_has_tx_queues(channel)) | 1569 | else if (efx_channel_has_tx_queues(channel)) |
1584 | channel->irq_moderation = tx_ticks; | 1570 | channel->irq_moderation = tx_ticks; |
1585 | } | 1571 | } |
1586 | } | 1572 | } |
1587 | 1573 | ||
1588 | /************************************************************************** | 1574 | /************************************************************************** |
1589 | * | 1575 | * |
1590 | * Hardware monitor | 1576 | * Hardware monitor |
1591 | * | 1577 | * |
1592 | **************************************************************************/ | 1578 | **************************************************************************/ |
1593 | 1579 | ||
1594 | /* Run periodically off the general workqueue */ | 1580 | /* Run periodically off the general workqueue */ |
1595 | static void efx_monitor(struct work_struct *data) | 1581 | static void efx_monitor(struct work_struct *data) |
1596 | { | 1582 | { |
1597 | struct efx_nic *efx = container_of(data, struct efx_nic, | 1583 | struct efx_nic *efx = container_of(data, struct efx_nic, |
1598 | monitor_work.work); | 1584 | monitor_work.work); |
1599 | 1585 | ||
1600 | netif_vdbg(efx, timer, efx->net_dev, | 1586 | netif_vdbg(efx, timer, efx->net_dev, |
1601 | "hardware monitor executing on CPU %d\n", | 1587 | "hardware monitor executing on CPU %d\n", |
1602 | raw_smp_processor_id()); | 1588 | raw_smp_processor_id()); |
1603 | BUG_ON(efx->type->monitor == NULL); | 1589 | BUG_ON(efx->type->monitor == NULL); |
1604 | 1590 | ||
1605 | /* If the mac_lock is already held then it is likely a port | 1591 | /* If the mac_lock is already held then it is likely a port |
1606 | * reconfiguration is already in place, which will likely do | 1592 | * reconfiguration is already in place, which will likely do |
1607 | * most of the work of monitor() anyway. */ | 1593 | * most of the work of monitor() anyway. */ |
1608 | if (mutex_trylock(&efx->mac_lock)) { | 1594 | if (mutex_trylock(&efx->mac_lock)) { |
1609 | if (efx->port_enabled) | 1595 | if (efx->port_enabled) |
1610 | efx->type->monitor(efx); | 1596 | efx->type->monitor(efx); |
1611 | mutex_unlock(&efx->mac_lock); | 1597 | mutex_unlock(&efx->mac_lock); |
1612 | } | 1598 | } |
1613 | 1599 | ||
1614 | queue_delayed_work(efx->workqueue, &efx->monitor_work, | 1600 | queue_delayed_work(efx->workqueue, &efx->monitor_work, |
1615 | efx_monitor_interval); | 1601 | efx_monitor_interval); |
1616 | } | 1602 | } |
1617 | 1603 | ||
1618 | /************************************************************************** | 1604 | /************************************************************************** |
1619 | * | 1605 | * |
1620 | * ioctls | 1606 | * ioctls |
1621 | * | 1607 | * |
1622 | *************************************************************************/ | 1608 | *************************************************************************/ |
1623 | 1609 | ||
1624 | /* Net device ioctl | 1610 | /* Net device ioctl |
1625 | * Context: process, rtnl_lock() held. | 1611 | * Context: process, rtnl_lock() held. |
1626 | */ | 1612 | */ |
1627 | static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) | 1613 | static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) |
1628 | { | 1614 | { |
1629 | struct efx_nic *efx = netdev_priv(net_dev); | 1615 | struct efx_nic *efx = netdev_priv(net_dev); |
1630 | struct mii_ioctl_data *data = if_mii(ifr); | 1616 | struct mii_ioctl_data *data = if_mii(ifr); |
1631 | 1617 | ||
1632 | EFX_ASSERT_RESET_SERIALISED(efx); | 1618 | EFX_ASSERT_RESET_SERIALISED(efx); |
1633 | 1619 | ||
1634 | /* Convert phy_id from older PRTAD/DEVAD format */ | 1620 | /* Convert phy_id from older PRTAD/DEVAD format */ |
1635 | if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) && | 1621 | if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) && |
1636 | (data->phy_id & 0xfc00) == 0x0400) | 1622 | (data->phy_id & 0xfc00) == 0x0400) |
1637 | data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400; | 1623 | data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400; |
1638 | 1624 | ||
1639 | return mdio_mii_ioctl(&efx->mdio, data, cmd); | 1625 | return mdio_mii_ioctl(&efx->mdio, data, cmd); |
1640 | } | 1626 | } |
1641 | 1627 | ||
1642 | /************************************************************************** | 1628 | /************************************************************************** |
1643 | * | 1629 | * |
1644 | * NAPI interface | 1630 | * NAPI interface |
1645 | * | 1631 | * |
1646 | **************************************************************************/ | 1632 | **************************************************************************/ |
1647 | 1633 | ||
1648 | static void efx_init_napi(struct efx_nic *efx) | 1634 | static void efx_init_napi(struct efx_nic *efx) |
1649 | { | 1635 | { |
1650 | struct efx_channel *channel; | 1636 | struct efx_channel *channel; |
1651 | 1637 | ||
1652 | efx_for_each_channel(channel, efx) { | 1638 | efx_for_each_channel(channel, efx) { |
1653 | channel->napi_dev = efx->net_dev; | 1639 | channel->napi_dev = efx->net_dev; |
1654 | netif_napi_add(channel->napi_dev, &channel->napi_str, | 1640 | netif_napi_add(channel->napi_dev, &channel->napi_str, |
1655 | efx_poll, napi_weight); | 1641 | efx_poll, napi_weight); |
1656 | } | 1642 | } |
1657 | } | 1643 | } |
1658 | 1644 | ||
1659 | static void efx_fini_napi_channel(struct efx_channel *channel) | 1645 | static void efx_fini_napi_channel(struct efx_channel *channel) |
1660 | { | 1646 | { |
1661 | if (channel->napi_dev) | 1647 | if (channel->napi_dev) |
1662 | netif_napi_del(&channel->napi_str); | 1648 | netif_napi_del(&channel->napi_str); |
1663 | channel->napi_dev = NULL; | 1649 | channel->napi_dev = NULL; |
1664 | } | 1650 | } |
1665 | 1651 | ||
1666 | static void efx_fini_napi(struct efx_nic *efx) | 1652 | static void efx_fini_napi(struct efx_nic *efx) |
1667 | { | 1653 | { |
1668 | struct efx_channel *channel; | 1654 | struct efx_channel *channel; |
1669 | 1655 | ||
1670 | efx_for_each_channel(channel, efx) | 1656 | efx_for_each_channel(channel, efx) |
1671 | efx_fini_napi_channel(channel); | 1657 | efx_fini_napi_channel(channel); |
1672 | } | 1658 | } |
1673 | 1659 | ||
1674 | /************************************************************************** | 1660 | /************************************************************************** |
1675 | * | 1661 | * |
1676 | * Kernel netpoll interface | 1662 | * Kernel netpoll interface |
1677 | * | 1663 | * |
1678 | *************************************************************************/ | 1664 | *************************************************************************/ |
1679 | 1665 | ||
1680 | #ifdef CONFIG_NET_POLL_CONTROLLER | 1666 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1681 | 1667 | ||
1682 | /* Although in the common case interrupts will be disabled, this is not | 1668 | /* Although in the common case interrupts will be disabled, this is not |
1683 | * guaranteed. However, all our work happens inside the NAPI callback, | 1669 | * guaranteed. However, all our work happens inside the NAPI callback, |
1684 | * so no locking is required. | 1670 | * so no locking is required. |
1685 | */ | 1671 | */ |
1686 | static void efx_netpoll(struct net_device *net_dev) | 1672 | static void efx_netpoll(struct net_device *net_dev) |
1687 | { | 1673 | { |
1688 | struct efx_nic *efx = netdev_priv(net_dev); | 1674 | struct efx_nic *efx = netdev_priv(net_dev); |
1689 | struct efx_channel *channel; | 1675 | struct efx_channel *channel; |
1690 | 1676 | ||
1691 | efx_for_each_channel(channel, efx) | 1677 | efx_for_each_channel(channel, efx) |
1692 | efx_schedule_channel(channel); | 1678 | efx_schedule_channel(channel); |
1693 | } | 1679 | } |
1694 | 1680 | ||
1695 | #endif | 1681 | #endif |
1696 | 1682 | ||
1697 | /************************************************************************** | 1683 | /************************************************************************** |
1698 | * | 1684 | * |
1699 | * Kernel net device interface | 1685 | * Kernel net device interface |
1700 | * | 1686 | * |
1701 | *************************************************************************/ | 1687 | *************************************************************************/ |
1702 | 1688 | ||
1703 | /* Context: process, rtnl_lock() held. */ | 1689 | /* Context: process, rtnl_lock() held. */ |
1704 | static int efx_net_open(struct net_device *net_dev) | 1690 | static int efx_net_open(struct net_device *net_dev) |
1705 | { | 1691 | { |
1706 | struct efx_nic *efx = netdev_priv(net_dev); | 1692 | struct efx_nic *efx = netdev_priv(net_dev); |
1707 | EFX_ASSERT_RESET_SERIALISED(efx); | 1693 | EFX_ASSERT_RESET_SERIALISED(efx); |
1708 | 1694 | ||
1709 | netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n", | 1695 | netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n", |
1710 | raw_smp_processor_id()); | 1696 | raw_smp_processor_id()); |
1711 | 1697 | ||
1712 | if (efx->state == STATE_DISABLED) | 1698 | if (efx->state == STATE_DISABLED) |
1713 | return -EIO; | 1699 | return -EIO; |
1714 | if (efx->phy_mode & PHY_MODE_SPECIAL) | 1700 | if (efx->phy_mode & PHY_MODE_SPECIAL) |
1715 | return -EBUSY; | 1701 | return -EBUSY; |
1716 | if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL)) | 1702 | if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL)) |
1717 | return -EIO; | 1703 | return -EIO; |
1718 | 1704 | ||
1719 | /* Notify the kernel of the link state polled during driver load, | 1705 | /* Notify the kernel of the link state polled during driver load, |
1720 | * before the monitor starts running */ | 1706 | * before the monitor starts running */ |
1721 | efx_link_status_changed(efx); | 1707 | efx_link_status_changed(efx); |
1722 | 1708 | ||
1723 | efx_start_all(efx); | 1709 | efx_start_all(efx); |
1724 | return 0; | 1710 | return 0; |
1725 | } | 1711 | } |
1726 | 1712 | ||
1727 | /* Context: process, rtnl_lock() held. | 1713 | /* Context: process, rtnl_lock() held. |
1728 | * Note that the kernel will ignore our return code; this method | 1714 | * Note that the kernel will ignore our return code; this method |
1729 | * should really be a void. | 1715 | * should really be a void. |
1730 | */ | 1716 | */ |
1731 | static int efx_net_stop(struct net_device *net_dev) | 1717 | static int efx_net_stop(struct net_device *net_dev) |
1732 | { | 1718 | { |
1733 | struct efx_nic *efx = netdev_priv(net_dev); | 1719 | struct efx_nic *efx = netdev_priv(net_dev); |
1734 | 1720 | ||
1735 | netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n", | 1721 | netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n", |
1736 | raw_smp_processor_id()); | 1722 | raw_smp_processor_id()); |
1737 | 1723 | ||
1738 | if (efx->state != STATE_DISABLED) { | 1724 | if (efx->state != STATE_DISABLED) { |
1739 | /* Stop the device and flush all the channels */ | 1725 | /* Stop the device and flush all the channels */ |
1740 | efx_stop_all(efx); | 1726 | efx_stop_all(efx); |
1741 | efx_fini_channels(efx); | 1727 | efx_fini_channels(efx); |
1742 | efx_init_channels(efx); | 1728 | efx_init_channels(efx); |
1743 | } | 1729 | } |
1744 | 1730 | ||
1745 | return 0; | 1731 | return 0; |
1746 | } | 1732 | } |
1747 | 1733 | ||
1748 | /* Context: process, dev_base_lock or RTNL held, non-blocking. */ | 1734 | /* Context: process, dev_base_lock or RTNL held, non-blocking. */ |
1749 | static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats) | 1735 | static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats) |
1750 | { | 1736 | { |
1751 | struct efx_nic *efx = netdev_priv(net_dev); | 1737 | struct efx_nic *efx = netdev_priv(net_dev); |
1752 | struct efx_mac_stats *mac_stats = &efx->mac_stats; | 1738 | struct efx_mac_stats *mac_stats = &efx->mac_stats; |
1753 | 1739 | ||
1754 | spin_lock_bh(&efx->stats_lock); | 1740 | spin_lock_bh(&efx->stats_lock); |
1755 | efx->type->update_stats(efx); | 1741 | efx->type->update_stats(efx); |
1756 | spin_unlock_bh(&efx->stats_lock); | 1742 | spin_unlock_bh(&efx->stats_lock); |
1757 | 1743 | ||
1758 | stats->rx_packets = mac_stats->rx_packets; | 1744 | stats->rx_packets = mac_stats->rx_packets; |
1759 | stats->tx_packets = mac_stats->tx_packets; | 1745 | stats->tx_packets = mac_stats->tx_packets; |
1760 | stats->rx_bytes = mac_stats->rx_bytes; | 1746 | stats->rx_bytes = mac_stats->rx_bytes; |
1761 | stats->tx_bytes = mac_stats->tx_bytes; | 1747 | stats->tx_bytes = mac_stats->tx_bytes; |
1762 | stats->rx_dropped = efx->n_rx_nodesc_drop_cnt; | 1748 | stats->rx_dropped = efx->n_rx_nodesc_drop_cnt; |
1763 | stats->multicast = mac_stats->rx_multicast; | 1749 | stats->multicast = mac_stats->rx_multicast; |
1764 | stats->collisions = mac_stats->tx_collision; | 1750 | stats->collisions = mac_stats->tx_collision; |
1765 | stats->rx_length_errors = (mac_stats->rx_gtjumbo + | 1751 | stats->rx_length_errors = (mac_stats->rx_gtjumbo + |
1766 | mac_stats->rx_length_error); | 1752 | mac_stats->rx_length_error); |
1767 | stats->rx_crc_errors = mac_stats->rx_bad; | 1753 | stats->rx_crc_errors = mac_stats->rx_bad; |
1768 | stats->rx_frame_errors = mac_stats->rx_align_error; | 1754 | stats->rx_frame_errors = mac_stats->rx_align_error; |
1769 | stats->rx_fifo_errors = mac_stats->rx_overflow; | 1755 | stats->rx_fifo_errors = mac_stats->rx_overflow; |
1770 | stats->rx_missed_errors = mac_stats->rx_missed; | 1756 | stats->rx_missed_errors = mac_stats->rx_missed; |
1771 | stats->tx_window_errors = mac_stats->tx_late_collision; | 1757 | stats->tx_window_errors = mac_stats->tx_late_collision; |
1772 | 1758 | ||
1773 | stats->rx_errors = (stats->rx_length_errors + | 1759 | stats->rx_errors = (stats->rx_length_errors + |
1774 | stats->rx_crc_errors + | 1760 | stats->rx_crc_errors + |
1775 | stats->rx_frame_errors + | 1761 | stats->rx_frame_errors + |
1776 | mac_stats->rx_symbol_error); | 1762 | mac_stats->rx_symbol_error); |
1777 | stats->tx_errors = (stats->tx_window_errors + | 1763 | stats->tx_errors = (stats->tx_window_errors + |
1778 | mac_stats->tx_bad); | 1764 | mac_stats->tx_bad); |
1779 | 1765 | ||
1780 | return stats; | 1766 | return stats; |
1781 | } | 1767 | } |
1782 | 1768 | ||
1783 | /* Context: netif_tx_lock held, BHs disabled. */ | 1769 | /* Context: netif_tx_lock held, BHs disabled. */ |
1784 | static void efx_watchdog(struct net_device *net_dev) | 1770 | static void efx_watchdog(struct net_device *net_dev) |
1785 | { | 1771 | { |
1786 | struct efx_nic *efx = netdev_priv(net_dev); | 1772 | struct efx_nic *efx = netdev_priv(net_dev); |
1787 | 1773 | ||
1788 | netif_err(efx, tx_err, efx->net_dev, | 1774 | netif_err(efx, tx_err, efx->net_dev, |
1789 | "TX stuck with port_enabled=%d: resetting channels\n", | 1775 | "TX stuck with port_enabled=%d: resetting channels\n", |
1790 | efx->port_enabled); | 1776 | efx->port_enabled); |
1791 | 1777 | ||
1792 | efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG); | 1778 | efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG); |
1793 | } | 1779 | } |
1794 | 1780 | ||
1795 | 1781 | ||
1796 | /* Context: process, rtnl_lock() held. */ | 1782 | /* Context: process, rtnl_lock() held. */ |
1797 | static int efx_change_mtu(struct net_device *net_dev, int new_mtu) | 1783 | static int efx_change_mtu(struct net_device *net_dev, int new_mtu) |
1798 | { | 1784 | { |
1799 | struct efx_nic *efx = netdev_priv(net_dev); | 1785 | struct efx_nic *efx = netdev_priv(net_dev); |
1800 | int rc = 0; | 1786 | int rc = 0; |
1801 | 1787 | ||
1802 | EFX_ASSERT_RESET_SERIALISED(efx); | 1788 | EFX_ASSERT_RESET_SERIALISED(efx); |
1803 | 1789 | ||
1804 | if (new_mtu > EFX_MAX_MTU) | 1790 | if (new_mtu > EFX_MAX_MTU) |
1805 | return -EINVAL; | 1791 | return -EINVAL; |
1806 | 1792 | ||
1807 | efx_stop_all(efx); | 1793 | efx_stop_all(efx); |
1808 | 1794 | ||
1809 | netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu); | 1795 | netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu); |
1810 | 1796 | ||
1811 | efx_fini_channels(efx); | 1797 | efx_fini_channels(efx); |
1812 | 1798 | ||
1813 | mutex_lock(&efx->mac_lock); | 1799 | mutex_lock(&efx->mac_lock); |
1814 | /* Reconfigure the MAC before enabling the dma queues so that | 1800 | /* Reconfigure the MAC before enabling the dma queues so that |
1815 | * the RX buffers don't overflow */ | 1801 | * the RX buffers don't overflow */ |
1816 | net_dev->mtu = new_mtu; | 1802 | net_dev->mtu = new_mtu; |
1817 | efx->mac_op->reconfigure(efx); | 1803 | efx->mac_op->reconfigure(efx); |
1818 | mutex_unlock(&efx->mac_lock); | 1804 | mutex_unlock(&efx->mac_lock); |
1819 | 1805 | ||
1820 | efx_init_channels(efx); | 1806 | efx_init_channels(efx); |
1821 | 1807 | ||
1822 | efx_start_all(efx); | 1808 | efx_start_all(efx); |
1823 | return rc; | 1809 | return rc; |
1824 | } | 1810 | } |
1825 | 1811 | ||
1826 | static int efx_set_mac_address(struct net_device *net_dev, void *data) | 1812 | static int efx_set_mac_address(struct net_device *net_dev, void *data) |
1827 | { | 1813 | { |
1828 | struct efx_nic *efx = netdev_priv(net_dev); | 1814 | struct efx_nic *efx = netdev_priv(net_dev); |
1829 | struct sockaddr *addr = data; | 1815 | struct sockaddr *addr = data; |
1830 | char *new_addr = addr->sa_data; | 1816 | char *new_addr = addr->sa_data; |
1831 | 1817 | ||
1832 | EFX_ASSERT_RESET_SERIALISED(efx); | 1818 | EFX_ASSERT_RESET_SERIALISED(efx); |
1833 | 1819 | ||
1834 | if (!is_valid_ether_addr(new_addr)) { | 1820 | if (!is_valid_ether_addr(new_addr)) { |
1835 | netif_err(efx, drv, efx->net_dev, | 1821 | netif_err(efx, drv, efx->net_dev, |
1836 | "invalid ethernet MAC address requested: %pM\n", | 1822 | "invalid ethernet MAC address requested: %pM\n", |
1837 | new_addr); | 1823 | new_addr); |
1838 | return -EINVAL; | 1824 | return -EINVAL; |
1839 | } | 1825 | } |
1840 | 1826 | ||
1841 | memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len); | 1827 | memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len); |
1842 | 1828 | ||
1843 | /* Reconfigure the MAC */ | 1829 | /* Reconfigure the MAC */ |
1844 | mutex_lock(&efx->mac_lock); | 1830 | mutex_lock(&efx->mac_lock); |
1845 | efx->mac_op->reconfigure(efx); | 1831 | efx->mac_op->reconfigure(efx); |
1846 | mutex_unlock(&efx->mac_lock); | 1832 | mutex_unlock(&efx->mac_lock); |
1847 | 1833 | ||
1848 | return 0; | 1834 | return 0; |
1849 | } | 1835 | } |
1850 | 1836 | ||
1851 | /* Context: netif_addr_lock held, BHs disabled. */ | 1837 | /* Context: netif_addr_lock held, BHs disabled. */ |
1852 | static void efx_set_multicast_list(struct net_device *net_dev) | 1838 | static void efx_set_multicast_list(struct net_device *net_dev) |
1853 | { | 1839 | { |
1854 | struct efx_nic *efx = netdev_priv(net_dev); | 1840 | struct efx_nic *efx = netdev_priv(net_dev); |
1855 | struct netdev_hw_addr *ha; | 1841 | struct netdev_hw_addr *ha; |
1856 | union efx_multicast_hash *mc_hash = &efx->multicast_hash; | 1842 | union efx_multicast_hash *mc_hash = &efx->multicast_hash; |
1857 | u32 crc; | 1843 | u32 crc; |
1858 | int bit; | 1844 | int bit; |
1859 | 1845 | ||
1860 | efx->promiscuous = !!(net_dev->flags & IFF_PROMISC); | 1846 | efx->promiscuous = !!(net_dev->flags & IFF_PROMISC); |
1861 | 1847 | ||
1862 | /* Build multicast hash table */ | 1848 | /* Build multicast hash table */ |
1863 | if (efx->promiscuous || (net_dev->flags & IFF_ALLMULTI)) { | 1849 | if (efx->promiscuous || (net_dev->flags & IFF_ALLMULTI)) { |
1864 | memset(mc_hash, 0xff, sizeof(*mc_hash)); | 1850 | memset(mc_hash, 0xff, sizeof(*mc_hash)); |
1865 | } else { | 1851 | } else { |
1866 | memset(mc_hash, 0x00, sizeof(*mc_hash)); | 1852 | memset(mc_hash, 0x00, sizeof(*mc_hash)); |
1867 | netdev_for_each_mc_addr(ha, net_dev) { | 1853 | netdev_for_each_mc_addr(ha, net_dev) { |
1868 | crc = ether_crc_le(ETH_ALEN, ha->addr); | 1854 | crc = ether_crc_le(ETH_ALEN, ha->addr); |
1869 | bit = crc & (EFX_MCAST_HASH_ENTRIES - 1); | 1855 | bit = crc & (EFX_MCAST_HASH_ENTRIES - 1); |
1870 | set_bit_le(bit, mc_hash->byte); | 1856 | set_bit_le(bit, mc_hash->byte); |
1871 | } | 1857 | } |
1872 | 1858 | ||
1873 | /* Broadcast packets go through the multicast hash filter. | 1859 | /* Broadcast packets go through the multicast hash filter. |
1874 | * ether_crc_le() of the broadcast address is 0xbe2612ff | 1860 | * ether_crc_le() of the broadcast address is 0xbe2612ff |
1875 | * so we always add bit 0xff to the mask. | 1861 | * so we always add bit 0xff to the mask. |
1876 | */ | 1862 | */ |
1877 | set_bit_le(0xff, mc_hash->byte); | 1863 | set_bit_le(0xff, mc_hash->byte); |
1878 | } | 1864 | } |
1879 | 1865 | ||
1880 | if (efx->port_enabled) | 1866 | if (efx->port_enabled) |
1881 | queue_work(efx->workqueue, &efx->mac_work); | 1867 | queue_work(efx->workqueue, &efx->mac_work); |
1882 | /* Otherwise efx_start_port() will do this */ | 1868 | /* Otherwise efx_start_port() will do this */ |
1883 | } | 1869 | } |
1884 | 1870 | ||
1885 | static int efx_set_features(struct net_device *net_dev, u32 data) | 1871 | static int efx_set_features(struct net_device *net_dev, u32 data) |
1886 | { | 1872 | { |
1887 | struct efx_nic *efx = netdev_priv(net_dev); | 1873 | struct efx_nic *efx = netdev_priv(net_dev); |
1888 | 1874 | ||
1889 | /* If disabling RX n-tuple filtering, clear existing filters */ | 1875 | /* If disabling RX n-tuple filtering, clear existing filters */ |
1890 | if (net_dev->features & ~data & NETIF_F_NTUPLE) | 1876 | if (net_dev->features & ~data & NETIF_F_NTUPLE) |
1891 | efx_filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL); | 1877 | efx_filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL); |
1892 | 1878 | ||
1893 | return 0; | 1879 | return 0; |
1894 | } | 1880 | } |
1895 | 1881 | ||
1896 | static const struct net_device_ops efx_netdev_ops = { | 1882 | static const struct net_device_ops efx_netdev_ops = { |
1897 | .ndo_open = efx_net_open, | 1883 | .ndo_open = efx_net_open, |
1898 | .ndo_stop = efx_net_stop, | 1884 | .ndo_stop = efx_net_stop, |
1899 | .ndo_get_stats64 = efx_net_stats, | 1885 | .ndo_get_stats64 = efx_net_stats, |
1900 | .ndo_tx_timeout = efx_watchdog, | 1886 | .ndo_tx_timeout = efx_watchdog, |
1901 | .ndo_start_xmit = efx_hard_start_xmit, | 1887 | .ndo_start_xmit = efx_hard_start_xmit, |
1902 | .ndo_validate_addr = eth_validate_addr, | 1888 | .ndo_validate_addr = eth_validate_addr, |
1903 | .ndo_do_ioctl = efx_ioctl, | 1889 | .ndo_do_ioctl = efx_ioctl, |
1904 | .ndo_change_mtu = efx_change_mtu, | 1890 | .ndo_change_mtu = efx_change_mtu, |
1905 | .ndo_set_mac_address = efx_set_mac_address, | 1891 | .ndo_set_mac_address = efx_set_mac_address, |
1906 | .ndo_set_multicast_list = efx_set_multicast_list, | 1892 | .ndo_set_multicast_list = efx_set_multicast_list, |
1907 | .ndo_set_features = efx_set_features, | 1893 | .ndo_set_features = efx_set_features, |
1908 | #ifdef CONFIG_NET_POLL_CONTROLLER | 1894 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1909 | .ndo_poll_controller = efx_netpoll, | 1895 | .ndo_poll_controller = efx_netpoll, |
1910 | #endif | 1896 | #endif |
1911 | .ndo_setup_tc = efx_setup_tc, | 1897 | .ndo_setup_tc = efx_setup_tc, |
1912 | #ifdef CONFIG_RFS_ACCEL | 1898 | #ifdef CONFIG_RFS_ACCEL |
1913 | .ndo_rx_flow_steer = efx_filter_rfs, | 1899 | .ndo_rx_flow_steer = efx_filter_rfs, |
1914 | #endif | 1900 | #endif |
1915 | }; | 1901 | }; |
1916 | 1902 | ||
1917 | static void efx_update_name(struct efx_nic *efx) | 1903 | static void efx_update_name(struct efx_nic *efx) |
1918 | { | 1904 | { |
1919 | strcpy(efx->name, efx->net_dev->name); | 1905 | strcpy(efx->name, efx->net_dev->name); |
1920 | efx_mtd_rename(efx); | 1906 | efx_mtd_rename(efx); |
1921 | efx_set_channel_names(efx); | 1907 | efx_set_channel_names(efx); |
1922 | } | 1908 | } |
1923 | 1909 | ||
1924 | static int efx_netdev_event(struct notifier_block *this, | 1910 | static int efx_netdev_event(struct notifier_block *this, |
1925 | unsigned long event, void *ptr) | 1911 | unsigned long event, void *ptr) |
1926 | { | 1912 | { |
1927 | struct net_device *net_dev = ptr; | 1913 | struct net_device *net_dev = ptr; |
1928 | 1914 | ||
1929 | if (net_dev->netdev_ops == &efx_netdev_ops && | 1915 | if (net_dev->netdev_ops == &efx_netdev_ops && |
1930 | event == NETDEV_CHANGENAME) | 1916 | event == NETDEV_CHANGENAME) |
1931 | efx_update_name(netdev_priv(net_dev)); | 1917 | efx_update_name(netdev_priv(net_dev)); |
1932 | 1918 | ||
1933 | return NOTIFY_DONE; | 1919 | return NOTIFY_DONE; |
1934 | } | 1920 | } |
1935 | 1921 | ||
1936 | static struct notifier_block efx_netdev_notifier = { | 1922 | static struct notifier_block efx_netdev_notifier = { |
1937 | .notifier_call = efx_netdev_event, | 1923 | .notifier_call = efx_netdev_event, |
1938 | }; | 1924 | }; |
1939 | 1925 | ||
1940 | static ssize_t | 1926 | static ssize_t |
1941 | show_phy_type(struct device *dev, struct device_attribute *attr, char *buf) | 1927 | show_phy_type(struct device *dev, struct device_attribute *attr, char *buf) |
1942 | { | 1928 | { |
1943 | struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); | 1929 | struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); |
1944 | return sprintf(buf, "%d\n", efx->phy_type); | 1930 | return sprintf(buf, "%d\n", efx->phy_type); |
1945 | } | 1931 | } |
1946 | static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL); | 1932 | static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL); |
1947 | 1933 | ||
1948 | static int efx_register_netdev(struct efx_nic *efx) | 1934 | static int efx_register_netdev(struct efx_nic *efx) |
1949 | { | 1935 | { |
1950 | struct net_device *net_dev = efx->net_dev; | 1936 | struct net_device *net_dev = efx->net_dev; |
1951 | struct efx_channel *channel; | 1937 | struct efx_channel *channel; |
1952 | int rc; | 1938 | int rc; |
1953 | 1939 | ||
1954 | net_dev->watchdog_timeo = 5 * HZ; | 1940 | net_dev->watchdog_timeo = 5 * HZ; |
1955 | net_dev->irq = efx->pci_dev->irq; | 1941 | net_dev->irq = efx->pci_dev->irq; |
1956 | net_dev->netdev_ops = &efx_netdev_ops; | 1942 | net_dev->netdev_ops = &efx_netdev_ops; |
1957 | SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops); | 1943 | SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops); |
1958 | 1944 | ||
1959 | /* Clear MAC statistics */ | 1945 | /* Clear MAC statistics */ |
1960 | efx->mac_op->update_stats(efx); | 1946 | efx->mac_op->update_stats(efx); |
1961 | memset(&efx->mac_stats, 0, sizeof(efx->mac_stats)); | 1947 | memset(&efx->mac_stats, 0, sizeof(efx->mac_stats)); |
1962 | 1948 | ||
1963 | rtnl_lock(); | 1949 | rtnl_lock(); |
1964 | 1950 | ||
1965 | rc = dev_alloc_name(net_dev, net_dev->name); | 1951 | rc = dev_alloc_name(net_dev, net_dev->name); |
1966 | if (rc < 0) | 1952 | if (rc < 0) |
1967 | goto fail_locked; | 1953 | goto fail_locked; |
1968 | efx_update_name(efx); | 1954 | efx_update_name(efx); |
1969 | 1955 | ||
1970 | rc = register_netdevice(net_dev); | 1956 | rc = register_netdevice(net_dev); |
1971 | if (rc) | 1957 | if (rc) |
1972 | goto fail_locked; | 1958 | goto fail_locked; |
1973 | 1959 | ||
1974 | efx_for_each_channel(channel, efx) { | 1960 | efx_for_each_channel(channel, efx) { |
1975 | struct efx_tx_queue *tx_queue; | 1961 | struct efx_tx_queue *tx_queue; |
1976 | efx_for_each_channel_tx_queue(tx_queue, channel) | 1962 | efx_for_each_channel_tx_queue(tx_queue, channel) |
1977 | efx_init_tx_queue_core_txq(tx_queue); | 1963 | efx_init_tx_queue_core_txq(tx_queue); |
1978 | } | 1964 | } |
1979 | 1965 | ||
1980 | /* Always start with carrier off; PHY events will detect the link */ | 1966 | /* Always start with carrier off; PHY events will detect the link */ |
1981 | netif_carrier_off(efx->net_dev); | 1967 | netif_carrier_off(efx->net_dev); |
1982 | 1968 | ||
1983 | rtnl_unlock(); | 1969 | rtnl_unlock(); |
1984 | 1970 | ||
1985 | rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type); | 1971 | rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type); |
1986 | if (rc) { | 1972 | if (rc) { |
1987 | netif_err(efx, drv, efx->net_dev, | 1973 | netif_err(efx, drv, efx->net_dev, |
1988 | "failed to init net dev attributes\n"); | 1974 | "failed to init net dev attributes\n"); |
1989 | goto fail_registered; | 1975 | goto fail_registered; |
1990 | } | 1976 | } |
1991 | 1977 | ||
1992 | return 0; | 1978 | return 0; |
1993 | 1979 | ||
1994 | fail_locked: | 1980 | fail_locked: |
1995 | rtnl_unlock(); | 1981 | rtnl_unlock(); |
1996 | netif_err(efx, drv, efx->net_dev, "could not register net dev\n"); | 1982 | netif_err(efx, drv, efx->net_dev, "could not register net dev\n"); |
1997 | return rc; | 1983 | return rc; |
1998 | 1984 | ||
1999 | fail_registered: | 1985 | fail_registered: |
2000 | unregister_netdev(net_dev); | 1986 | unregister_netdev(net_dev); |
2001 | return rc; | 1987 | return rc; |
2002 | } | 1988 | } |
2003 | 1989 | ||
2004 | static void efx_unregister_netdev(struct efx_nic *efx) | 1990 | static void efx_unregister_netdev(struct efx_nic *efx) |
2005 | { | 1991 | { |
2006 | struct efx_channel *channel; | 1992 | struct efx_channel *channel; |
2007 | struct efx_tx_queue *tx_queue; | 1993 | struct efx_tx_queue *tx_queue; |
2008 | 1994 | ||
2009 | if (!efx->net_dev) | 1995 | if (!efx->net_dev) |
2010 | return; | 1996 | return; |
2011 | 1997 | ||
2012 | BUG_ON(netdev_priv(efx->net_dev) != efx); | 1998 | BUG_ON(netdev_priv(efx->net_dev) != efx); |
2013 | 1999 | ||
2014 | /* Free up any skbs still remaining. This has to happen before | 2000 | /* Free up any skbs still remaining. This has to happen before |
2015 | * we try to unregister the netdev as running their destructors | 2001 | * we try to unregister the netdev as running their destructors |
2016 | * may be needed to get the device ref. count to 0. */ | 2002 | * may be needed to get the device ref. count to 0. */ |
2017 | efx_for_each_channel(channel, efx) { | 2003 | efx_for_each_channel(channel, efx) { |
2018 | efx_for_each_channel_tx_queue(tx_queue, channel) | 2004 | efx_for_each_channel_tx_queue(tx_queue, channel) |
2019 | efx_release_tx_buffers(tx_queue); | 2005 | efx_release_tx_buffers(tx_queue); |
2020 | } | 2006 | } |
2021 | 2007 | ||
2022 | if (efx_dev_registered(efx)) { | 2008 | if (efx_dev_registered(efx)) { |
2023 | strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); | 2009 | strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); |
2024 | device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); | 2010 | device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type); |
2025 | unregister_netdev(efx->net_dev); | 2011 | unregister_netdev(efx->net_dev); |
2026 | } | 2012 | } |
2027 | } | 2013 | } |
2028 | 2014 | ||
2029 | /************************************************************************** | 2015 | /************************************************************************** |
2030 | * | 2016 | * |
2031 | * Device reset and suspend | 2017 | * Device reset and suspend |
2032 | * | 2018 | * |
2033 | **************************************************************************/ | 2019 | **************************************************************************/ |
2034 | 2020 | ||
2035 | /* Tears down the entire software state and most of the hardware state | 2021 | /* Tears down the entire software state and most of the hardware state |
2036 | * before reset. */ | 2022 | * before reset. */ |
2037 | void efx_reset_down(struct efx_nic *efx, enum reset_type method) | 2023 | void efx_reset_down(struct efx_nic *efx, enum reset_type method) |
2038 | { | 2024 | { |
2039 | EFX_ASSERT_RESET_SERIALISED(efx); | 2025 | EFX_ASSERT_RESET_SERIALISED(efx); |
2040 | 2026 | ||
2041 | efx_stop_all(efx); | 2027 | efx_stop_all(efx); |
2042 | mutex_lock(&efx->mac_lock); | 2028 | mutex_lock(&efx->mac_lock); |
2043 | 2029 | ||
2044 | efx_fini_channels(efx); | 2030 | efx_fini_channels(efx); |
2045 | if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) | 2031 | if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) |
2046 | efx->phy_op->fini(efx); | 2032 | efx->phy_op->fini(efx); |
2047 | efx->type->fini(efx); | 2033 | efx->type->fini(efx); |
2048 | } | 2034 | } |
2049 | 2035 | ||
2050 | /* This function will always ensure that the locks acquired in | 2036 | /* This function will always ensure that the locks acquired in |
2051 | * efx_reset_down() are released. A failure return code indicates | 2037 | * efx_reset_down() are released. A failure return code indicates |
2052 | * that we were unable to reinitialise the hardware, and the | 2038 | * that we were unable to reinitialise the hardware, and the |
2053 | * driver should be disabled. If ok is false, then the rx and tx | 2039 | * driver should be disabled. If ok is false, then the rx and tx |
2054 | * engines are not restarted, pending a RESET_DISABLE. */ | 2040 | * engines are not restarted, pending a RESET_DISABLE. */ |
2055 | int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) | 2041 | int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) |
2056 | { | 2042 | { |
2057 | int rc; | 2043 | int rc; |
2058 | 2044 | ||
2059 | EFX_ASSERT_RESET_SERIALISED(efx); | 2045 | EFX_ASSERT_RESET_SERIALISED(efx); |
2060 | 2046 | ||
2061 | rc = efx->type->init(efx); | 2047 | rc = efx->type->init(efx); |
2062 | if (rc) { | 2048 | if (rc) { |
2063 | netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n"); | 2049 | netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n"); |
2064 | goto fail; | 2050 | goto fail; |
2065 | } | 2051 | } |
2066 | 2052 | ||
2067 | if (!ok) | 2053 | if (!ok) |
2068 | goto fail; | 2054 | goto fail; |
2069 | 2055 | ||
2070 | if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) { | 2056 | if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) { |
2071 | rc = efx->phy_op->init(efx); | 2057 | rc = efx->phy_op->init(efx); |
2072 | if (rc) | 2058 | if (rc) |
2073 | goto fail; | 2059 | goto fail; |
2074 | if (efx->phy_op->reconfigure(efx)) | 2060 | if (efx->phy_op->reconfigure(efx)) |
2075 | netif_err(efx, drv, efx->net_dev, | 2061 | netif_err(efx, drv, efx->net_dev, |
2076 | "could not restore PHY settings\n"); | 2062 | "could not restore PHY settings\n"); |
2077 | } | 2063 | } |
2078 | 2064 | ||
2079 | efx->mac_op->reconfigure(efx); | 2065 | efx->mac_op->reconfigure(efx); |
2080 | 2066 | ||
2081 | efx_init_channels(efx); | 2067 | efx_init_channels(efx); |
2082 | efx_restore_filters(efx); | 2068 | efx_restore_filters(efx); |
2083 | 2069 | ||
2084 | mutex_unlock(&efx->mac_lock); | 2070 | mutex_unlock(&efx->mac_lock); |
2085 | 2071 | ||
2086 | efx_start_all(efx); | 2072 | efx_start_all(efx); |
2087 | 2073 | ||
2088 | return 0; | 2074 | return 0; |
2089 | 2075 | ||
2090 | fail: | 2076 | fail: |
2091 | efx->port_initialized = false; | 2077 | efx->port_initialized = false; |
2092 | 2078 | ||
2093 | mutex_unlock(&efx->mac_lock); | 2079 | mutex_unlock(&efx->mac_lock); |
2094 | 2080 | ||
2095 | return rc; | 2081 | return rc; |
2096 | } | 2082 | } |
2097 | 2083 | ||
2098 | /* Reset the NIC using the specified method. Note that the reset may | 2084 | /* Reset the NIC using the specified method. Note that the reset may |
2099 | * fail, in which case the card will be left in an unusable state. | 2085 | * fail, in which case the card will be left in an unusable state. |
2100 | * | 2086 | * |
2101 | * Caller must hold the rtnl_lock. | 2087 | * Caller must hold the rtnl_lock. |
2102 | */ | 2088 | */ |
2103 | int efx_reset(struct efx_nic *efx, enum reset_type method) | 2089 | int efx_reset(struct efx_nic *efx, enum reset_type method) |
2104 | { | 2090 | { |
2105 | int rc, rc2; | 2091 | int rc, rc2; |
2106 | bool disabled; | 2092 | bool disabled; |
2107 | 2093 | ||
2108 | netif_info(efx, drv, efx->net_dev, "resetting (%s)\n", | 2094 | netif_info(efx, drv, efx->net_dev, "resetting (%s)\n", |
2109 | RESET_TYPE(method)); | 2095 | RESET_TYPE(method)); |
2110 | 2096 | ||
2111 | netif_device_detach(efx->net_dev); | 2097 | netif_device_detach(efx->net_dev); |
2112 | efx_reset_down(efx, method); | 2098 | efx_reset_down(efx, method); |
2113 | 2099 | ||
2114 | rc = efx->type->reset(efx, method); | 2100 | rc = efx->type->reset(efx, method); |
2115 | if (rc) { | 2101 | if (rc) { |
2116 | netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n"); | 2102 | netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n"); |
2117 | goto out; | 2103 | goto out; |
2118 | } | 2104 | } |
2119 | 2105 | ||
2120 | /* Clear flags for the scopes we covered. We assume the NIC and | 2106 | /* Clear flags for the scopes we covered. We assume the NIC and |
2121 | * driver are now quiescent so that there is no race here. | 2107 | * driver are now quiescent so that there is no race here. |
2122 | */ | 2108 | */ |
2123 | efx->reset_pending &= -(1 << (method + 1)); | 2109 | efx->reset_pending &= -(1 << (method + 1)); |
2124 | 2110 | ||
2125 | /* Reinitialise bus-mastering, which may have been turned off before | 2111 | /* Reinitialise bus-mastering, which may have been turned off before |
2126 | * the reset was scheduled. This is still appropriate, even in the | 2112 | * the reset was scheduled. This is still appropriate, even in the |
2127 | * RESET_TYPE_DISABLE since this driver generally assumes the hardware | 2113 | * RESET_TYPE_DISABLE since this driver generally assumes the hardware |
2128 | * can respond to requests. */ | 2114 | * can respond to requests. */ |
2129 | pci_set_master(efx->pci_dev); | 2115 | pci_set_master(efx->pci_dev); |
2130 | 2116 | ||
2131 | out: | 2117 | out: |
2132 | /* Leave device stopped if necessary */ | 2118 | /* Leave device stopped if necessary */ |
2133 | disabled = rc || method == RESET_TYPE_DISABLE; | 2119 | disabled = rc || method == RESET_TYPE_DISABLE; |
2134 | rc2 = efx_reset_up(efx, method, !disabled); | 2120 | rc2 = efx_reset_up(efx, method, !disabled); |
2135 | if (rc2) { | 2121 | if (rc2) { |
2136 | disabled = true; | 2122 | disabled = true; |
2137 | if (!rc) | 2123 | if (!rc) |
2138 | rc = rc2; | 2124 | rc = rc2; |
2139 | } | 2125 | } |
2140 | 2126 | ||
2141 | if (disabled) { | 2127 | if (disabled) { |
2142 | dev_close(efx->net_dev); | 2128 | dev_close(efx->net_dev); |
2143 | netif_err(efx, drv, efx->net_dev, "has been disabled\n"); | 2129 | netif_err(efx, drv, efx->net_dev, "has been disabled\n"); |
2144 | efx->state = STATE_DISABLED; | 2130 | efx->state = STATE_DISABLED; |
2145 | } else { | 2131 | } else { |
2146 | netif_dbg(efx, drv, efx->net_dev, "reset complete\n"); | 2132 | netif_dbg(efx, drv, efx->net_dev, "reset complete\n"); |
2147 | netif_device_attach(efx->net_dev); | 2133 | netif_device_attach(efx->net_dev); |
2148 | } | 2134 | } |
2149 | return rc; | 2135 | return rc; |
2150 | } | 2136 | } |
2151 | 2137 | ||
2152 | /* The worker thread exists so that code that cannot sleep can | 2138 | /* The worker thread exists so that code that cannot sleep can |
2153 | * schedule a reset for later. | 2139 | * schedule a reset for later. |
2154 | */ | 2140 | */ |
2155 | static void efx_reset_work(struct work_struct *data) | 2141 | static void efx_reset_work(struct work_struct *data) |
2156 | { | 2142 | { |
2157 | struct efx_nic *efx = container_of(data, struct efx_nic, reset_work); | 2143 | struct efx_nic *efx = container_of(data, struct efx_nic, reset_work); |
2158 | unsigned long pending = ACCESS_ONCE(efx->reset_pending); | 2144 | unsigned long pending = ACCESS_ONCE(efx->reset_pending); |
2159 | 2145 | ||
2160 | if (!pending) | 2146 | if (!pending) |
2161 | return; | 2147 | return; |
2162 | 2148 | ||
2163 | /* If we're not RUNNING then don't reset. Leave the reset_pending | 2149 | /* If we're not RUNNING then don't reset. Leave the reset_pending |
2164 | * flags set so that efx_pci_probe_main will be retried */ | 2150 | * flags set so that efx_pci_probe_main will be retried */ |
2165 | if (efx->state != STATE_RUNNING) { | 2151 | if (efx->state != STATE_RUNNING) { |
2166 | netif_info(efx, drv, efx->net_dev, | 2152 | netif_info(efx, drv, efx->net_dev, |
2167 | "scheduled reset quenched. NIC not RUNNING\n"); | 2153 | "scheduled reset quenched. NIC not RUNNING\n"); |
2168 | return; | 2154 | return; |
2169 | } | 2155 | } |
2170 | 2156 | ||
2171 | rtnl_lock(); | 2157 | rtnl_lock(); |
2172 | (void)efx_reset(efx, fls(pending) - 1); | 2158 | (void)efx_reset(efx, fls(pending) - 1); |
2173 | rtnl_unlock(); | 2159 | rtnl_unlock(); |
2174 | } | 2160 | } |
2175 | 2161 | ||
2176 | void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) | 2162 | void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) |
2177 | { | 2163 | { |
2178 | enum reset_type method; | 2164 | enum reset_type method; |
2179 | 2165 | ||
2180 | switch (type) { | 2166 | switch (type) { |
2181 | case RESET_TYPE_INVISIBLE: | 2167 | case RESET_TYPE_INVISIBLE: |
2182 | case RESET_TYPE_ALL: | 2168 | case RESET_TYPE_ALL: |
2183 | case RESET_TYPE_WORLD: | 2169 | case RESET_TYPE_WORLD: |
2184 | case RESET_TYPE_DISABLE: | 2170 | case RESET_TYPE_DISABLE: |
2185 | method = type; | 2171 | method = type; |
2186 | netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n", | 2172 | netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n", |
2187 | RESET_TYPE(method)); | 2173 | RESET_TYPE(method)); |
2188 | break; | 2174 | break; |
2189 | default: | 2175 | default: |
2190 | method = efx->type->map_reset_reason(type); | 2176 | method = efx->type->map_reset_reason(type); |
2191 | netif_dbg(efx, drv, efx->net_dev, | 2177 | netif_dbg(efx, drv, efx->net_dev, |
2192 | "scheduling %s reset for %s\n", | 2178 | "scheduling %s reset for %s\n", |
2193 | RESET_TYPE(method), RESET_TYPE(type)); | 2179 | RESET_TYPE(method), RESET_TYPE(type)); |
2194 | break; | 2180 | break; |
2195 | } | 2181 | } |
2196 | 2182 | ||
2197 | set_bit(method, &efx->reset_pending); | 2183 | set_bit(method, &efx->reset_pending); |
2198 | 2184 | ||
2199 | /* efx_process_channel() will no longer read events once a | 2185 | /* efx_process_channel() will no longer read events once a |
2200 | * reset is scheduled. So switch back to poll'd MCDI completions. */ | 2186 | * reset is scheduled. So switch back to poll'd MCDI completions. */ |
2201 | efx_mcdi_mode_poll(efx); | 2187 | efx_mcdi_mode_poll(efx); |
2202 | 2188 | ||
2203 | queue_work(reset_workqueue, &efx->reset_work); | 2189 | queue_work(reset_workqueue, &efx->reset_work); |
2204 | } | 2190 | } |
2205 | 2191 | ||
2206 | /************************************************************************** | 2192 | /************************************************************************** |
2207 | * | 2193 | * |
2208 | * List of NICs we support | 2194 | * List of NICs we support |
2209 | * | 2195 | * |
2210 | **************************************************************************/ | 2196 | **************************************************************************/ |
2211 | 2197 | ||
2212 | /* PCI device ID table */ | 2198 | /* PCI device ID table */ |
2213 | static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = { | 2199 | static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = { |
2214 | {PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID), | 2200 | {PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID), |
2215 | .driver_data = (unsigned long) &falcon_a1_nic_type}, | 2201 | .driver_data = (unsigned long) &falcon_a1_nic_type}, |
2216 | {PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID), | 2202 | {PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID), |
2217 | .driver_data = (unsigned long) &falcon_b0_nic_type}, | 2203 | .driver_data = (unsigned long) &falcon_b0_nic_type}, |
2218 | {PCI_DEVICE(EFX_VENDID_SFC, BETHPAGE_A_P_DEVID), | 2204 | {PCI_DEVICE(EFX_VENDID_SFC, BETHPAGE_A_P_DEVID), |
2219 | .driver_data = (unsigned long) &siena_a0_nic_type}, | 2205 | .driver_data = (unsigned long) &siena_a0_nic_type}, |
2220 | {PCI_DEVICE(EFX_VENDID_SFC, SIENA_A_P_DEVID), | 2206 | {PCI_DEVICE(EFX_VENDID_SFC, SIENA_A_P_DEVID), |
2221 | .driver_data = (unsigned long) &siena_a0_nic_type}, | 2207 | .driver_data = (unsigned long) &siena_a0_nic_type}, |
2222 | {0} /* end of list */ | 2208 | {0} /* end of list */ |
2223 | }; | 2209 | }; |
2224 | 2210 | ||
2225 | /************************************************************************** | 2211 | /************************************************************************** |
2226 | * | 2212 | * |
2227 | * Dummy PHY/MAC operations | 2213 | * Dummy PHY/MAC operations |
2228 | * | 2214 | * |
2229 | * Can be used for some unimplemented operations | 2215 | * Can be used for some unimplemented operations |
2230 | * Needed so all function pointers are valid and do not have to be tested | 2216 | * Needed so all function pointers are valid and do not have to be tested |
2231 | * before use | 2217 | * before use |
2232 | * | 2218 | * |
2233 | **************************************************************************/ | 2219 | **************************************************************************/ |
2234 | int efx_port_dummy_op_int(struct efx_nic *efx) | 2220 | int efx_port_dummy_op_int(struct efx_nic *efx) |
2235 | { | 2221 | { |
2236 | return 0; | 2222 | return 0; |
2237 | } | 2223 | } |
2238 | void efx_port_dummy_op_void(struct efx_nic *efx) {} | 2224 | void efx_port_dummy_op_void(struct efx_nic *efx) {} |
2239 | 2225 | ||
2240 | static bool efx_port_dummy_op_poll(struct efx_nic *efx) | 2226 | static bool efx_port_dummy_op_poll(struct efx_nic *efx) |
2241 | { | 2227 | { |
2242 | return false; | 2228 | return false; |
2243 | } | 2229 | } |
2244 | 2230 | ||
2245 | static const struct efx_phy_operations efx_dummy_phy_operations = { | 2231 | static const struct efx_phy_operations efx_dummy_phy_operations = { |
2246 | .init = efx_port_dummy_op_int, | 2232 | .init = efx_port_dummy_op_int, |
2247 | .reconfigure = efx_port_dummy_op_int, | 2233 | .reconfigure = efx_port_dummy_op_int, |
2248 | .poll = efx_port_dummy_op_poll, | 2234 | .poll = efx_port_dummy_op_poll, |
2249 | .fini = efx_port_dummy_op_void, | 2235 | .fini = efx_port_dummy_op_void, |
2250 | }; | 2236 | }; |
2251 | 2237 | ||
2252 | /************************************************************************** | 2238 | /************************************************************************** |
2253 | * | 2239 | * |
2254 | * Data housekeeping | 2240 | * Data housekeeping |
2255 | * | 2241 | * |
2256 | **************************************************************************/ | 2242 | **************************************************************************/ |
2257 | 2243 | ||
2258 | /* This zeroes out and then fills in the invariants in a struct | 2244 | /* This zeroes out and then fills in the invariants in a struct |
2259 | * efx_nic (including all sub-structures). | 2245 | * efx_nic (including all sub-structures). |
2260 | */ | 2246 | */ |
2261 | static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type, | 2247 | static int efx_init_struct(struct efx_nic *efx, const struct efx_nic_type *type, |
2262 | struct pci_dev *pci_dev, struct net_device *net_dev) | 2248 | struct pci_dev *pci_dev, struct net_device *net_dev) |
2263 | { | 2249 | { |
2264 | int i; | 2250 | int i; |
2265 | 2251 | ||
2266 | /* Initialise common structures */ | 2252 | /* Initialise common structures */ |
2267 | memset(efx, 0, sizeof(*efx)); | 2253 | memset(efx, 0, sizeof(*efx)); |
2268 | spin_lock_init(&efx->biu_lock); | 2254 | spin_lock_init(&efx->biu_lock); |
2269 | #ifdef CONFIG_SFC_MTD | 2255 | #ifdef CONFIG_SFC_MTD |
2270 | INIT_LIST_HEAD(&efx->mtd_list); | 2256 | INIT_LIST_HEAD(&efx->mtd_list); |
2271 | #endif | 2257 | #endif |
2272 | INIT_WORK(&efx->reset_work, efx_reset_work); | 2258 | INIT_WORK(&efx->reset_work, efx_reset_work); |
2273 | INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor); | 2259 | INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor); |
2274 | efx->pci_dev = pci_dev; | 2260 | efx->pci_dev = pci_dev; |
2275 | efx->msg_enable = debug; | 2261 | efx->msg_enable = debug; |
2276 | efx->state = STATE_INIT; | 2262 | efx->state = STATE_INIT; |
2277 | strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); | 2263 | strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); |
2278 | 2264 | ||
2279 | efx->net_dev = net_dev; | 2265 | efx->net_dev = net_dev; |
2280 | spin_lock_init(&efx->stats_lock); | 2266 | spin_lock_init(&efx->stats_lock); |
2281 | mutex_init(&efx->mac_lock); | 2267 | mutex_init(&efx->mac_lock); |
2282 | efx->mac_op = type->default_mac_ops; | 2268 | efx->mac_op = type->default_mac_ops; |
2283 | efx->phy_op = &efx_dummy_phy_operations; | 2269 | efx->phy_op = &efx_dummy_phy_operations; |
2284 | efx->mdio.dev = net_dev; | 2270 | efx->mdio.dev = net_dev; |
2285 | INIT_WORK(&efx->mac_work, efx_mac_work); | 2271 | INIT_WORK(&efx->mac_work, efx_mac_work); |
2286 | 2272 | ||
2287 | for (i = 0; i < EFX_MAX_CHANNELS; i++) { | 2273 | for (i = 0; i < EFX_MAX_CHANNELS; i++) { |
2288 | efx->channel[i] = efx_alloc_channel(efx, i, NULL); | 2274 | efx->channel[i] = efx_alloc_channel(efx, i, NULL); |
2289 | if (!efx->channel[i]) | 2275 | if (!efx->channel[i]) |
2290 | goto fail; | 2276 | goto fail; |
2291 | } | 2277 | } |
2292 | 2278 | ||
2293 | efx->type = type; | 2279 | efx->type = type; |
2294 | 2280 | ||
2295 | EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS); | 2281 | EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS); |
2296 | 2282 | ||
2297 | /* Higher numbered interrupt modes are less capable! */ | 2283 | /* Higher numbered interrupt modes are less capable! */ |
2298 | efx->interrupt_mode = max(efx->type->max_interrupt_mode, | 2284 | efx->interrupt_mode = max(efx->type->max_interrupt_mode, |
2299 | interrupt_mode); | 2285 | interrupt_mode); |
2300 | 2286 | ||
2301 | /* Would be good to use the net_dev name, but we're too early */ | 2287 | /* Would be good to use the net_dev name, but we're too early */ |
2302 | snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s", | 2288 | snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s", |
2303 | pci_name(pci_dev)); | 2289 | pci_name(pci_dev)); |
2304 | efx->workqueue = create_singlethread_workqueue(efx->workqueue_name); | 2290 | efx->workqueue = create_singlethread_workqueue(efx->workqueue_name); |
2305 | if (!efx->workqueue) | 2291 | if (!efx->workqueue) |
2306 | goto fail; | 2292 | goto fail; |
2307 | 2293 | ||
2308 | return 0; | 2294 | return 0; |
2309 | 2295 | ||
2310 | fail: | 2296 | fail: |
2311 | efx_fini_struct(efx); | 2297 | efx_fini_struct(efx); |
2312 | return -ENOMEM; | 2298 | return -ENOMEM; |
2313 | } | 2299 | } |
2314 | 2300 | ||
2315 | static void efx_fini_struct(struct efx_nic *efx) | 2301 | static void efx_fini_struct(struct efx_nic *efx) |
2316 | { | 2302 | { |
2317 | int i; | 2303 | int i; |
2318 | 2304 | ||
2319 | for (i = 0; i < EFX_MAX_CHANNELS; i++) | 2305 | for (i = 0; i < EFX_MAX_CHANNELS; i++) |
2320 | kfree(efx->channel[i]); | 2306 | kfree(efx->channel[i]); |
2321 | 2307 | ||
2322 | if (efx->workqueue) { | 2308 | if (efx->workqueue) { |
2323 | destroy_workqueue(efx->workqueue); | 2309 | destroy_workqueue(efx->workqueue); |
2324 | efx->workqueue = NULL; | 2310 | efx->workqueue = NULL; |
2325 | } | 2311 | } |
2326 | } | 2312 | } |
2327 | 2313 | ||
2328 | /************************************************************************** | 2314 | /************************************************************************** |
2329 | * | 2315 | * |
2330 | * PCI interface | 2316 | * PCI interface |
2331 | * | 2317 | * |
2332 | **************************************************************************/ | 2318 | **************************************************************************/ |
2333 | 2319 | ||
2334 | /* Main body of final NIC shutdown code | 2320 | /* Main body of final NIC shutdown code |
2335 | * This is called only at module unload (or hotplug removal). | 2321 | * This is called only at module unload (or hotplug removal). |
2336 | */ | 2322 | */ |
2337 | static void efx_pci_remove_main(struct efx_nic *efx) | 2323 | static void efx_pci_remove_main(struct efx_nic *efx) |
2338 | { | 2324 | { |
2339 | #ifdef CONFIG_RFS_ACCEL | 2325 | #ifdef CONFIG_RFS_ACCEL |
2340 | free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); | 2326 | free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap); |
2341 | efx->net_dev->rx_cpu_rmap = NULL; | 2327 | efx->net_dev->rx_cpu_rmap = NULL; |
2342 | #endif | 2328 | #endif |
2343 | efx_nic_fini_interrupt(efx); | 2329 | efx_nic_fini_interrupt(efx); |
2344 | efx_fini_channels(efx); | 2330 | efx_fini_channels(efx); |
2345 | efx_fini_port(efx); | 2331 | efx_fini_port(efx); |
2346 | efx->type->fini(efx); | 2332 | efx->type->fini(efx); |
2347 | efx_fini_napi(efx); | 2333 | efx_fini_napi(efx); |
2348 | efx_remove_all(efx); | 2334 | efx_remove_all(efx); |
2349 | } | 2335 | } |
2350 | 2336 | ||
2351 | /* Final NIC shutdown | 2337 | /* Final NIC shutdown |
2352 | * This is called only at module unload (or hotplug removal). | 2338 | * This is called only at module unload (or hotplug removal). |
2353 | */ | 2339 | */ |
2354 | static void efx_pci_remove(struct pci_dev *pci_dev) | 2340 | static void efx_pci_remove(struct pci_dev *pci_dev) |
2355 | { | 2341 | { |
2356 | struct efx_nic *efx; | 2342 | struct efx_nic *efx; |
2357 | 2343 | ||
2358 | efx = pci_get_drvdata(pci_dev); | 2344 | efx = pci_get_drvdata(pci_dev); |
2359 | if (!efx) | 2345 | if (!efx) |
2360 | return; | 2346 | return; |
2361 | 2347 | ||
2362 | /* Mark the NIC as fini, then stop the interface */ | 2348 | /* Mark the NIC as fini, then stop the interface */ |
2363 | rtnl_lock(); | 2349 | rtnl_lock(); |
2364 | efx->state = STATE_FINI; | 2350 | efx->state = STATE_FINI; |
2365 | dev_close(efx->net_dev); | 2351 | dev_close(efx->net_dev); |
2366 | 2352 | ||
2367 | /* Allow any queued efx_resets() to complete */ | 2353 | /* Allow any queued efx_resets() to complete */ |
2368 | rtnl_unlock(); | 2354 | rtnl_unlock(); |
2369 | 2355 | ||
2370 | efx_unregister_netdev(efx); | 2356 | efx_unregister_netdev(efx); |
2371 | 2357 | ||
2372 | efx_mtd_remove(efx); | 2358 | efx_mtd_remove(efx); |
2373 | 2359 | ||
2374 | /* Wait for any scheduled resets to complete. No more will be | 2360 | /* Wait for any scheduled resets to complete. No more will be |
2375 | * scheduled from this point because efx_stop_all() has been | 2361 | * scheduled from this point because efx_stop_all() has been |
2376 | * called, we are no longer registered with driverlink, and | 2362 | * called, we are no longer registered with driverlink, and |
2377 | * the net_device's have been removed. */ | 2363 | * the net_device's have been removed. */ |
2378 | cancel_work_sync(&efx->reset_work); | 2364 | cancel_work_sync(&efx->reset_work); |
2379 | 2365 | ||
2380 | efx_pci_remove_main(efx); | 2366 | efx_pci_remove_main(efx); |
2381 | 2367 | ||
2382 | efx_fini_io(efx); | 2368 | efx_fini_io(efx); |
2383 | netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n"); | 2369 | netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n"); |
2384 | 2370 | ||
2385 | pci_set_drvdata(pci_dev, NULL); | 2371 | pci_set_drvdata(pci_dev, NULL); |
2386 | efx_fini_struct(efx); | 2372 | efx_fini_struct(efx); |
2387 | free_netdev(efx->net_dev); | 2373 | free_netdev(efx->net_dev); |
2388 | }; | 2374 | }; |
2389 | 2375 | ||
2390 | /* Main body of NIC initialisation | 2376 | /* Main body of NIC initialisation |
2391 | * This is called at module load (or hotplug insertion, theoretically). | 2377 | * This is called at module load (or hotplug insertion, theoretically). |
2392 | */ | 2378 | */ |
2393 | static int efx_pci_probe_main(struct efx_nic *efx) | 2379 | static int efx_pci_probe_main(struct efx_nic *efx) |
2394 | { | 2380 | { |
2395 | int rc; | 2381 | int rc; |
2396 | 2382 | ||
2397 | /* Do start-of-day initialisation */ | 2383 | /* Do start-of-day initialisation */ |
2398 | rc = efx_probe_all(efx); | 2384 | rc = efx_probe_all(efx); |
2399 | if (rc) | 2385 | if (rc) |
2400 | goto fail1; | 2386 | goto fail1; |
2401 | 2387 | ||
2402 | efx_init_napi(efx); | 2388 | efx_init_napi(efx); |
2403 | 2389 | ||
2404 | rc = efx->type->init(efx); | 2390 | rc = efx->type->init(efx); |
2405 | if (rc) { | 2391 | if (rc) { |
2406 | netif_err(efx, probe, efx->net_dev, | 2392 | netif_err(efx, probe, efx->net_dev, |
2407 | "failed to initialise NIC\n"); | 2393 | "failed to initialise NIC\n"); |
2408 | goto fail3; | 2394 | goto fail3; |
2409 | } | 2395 | } |
2410 | 2396 | ||
2411 | rc = efx_init_port(efx); | 2397 | rc = efx_init_port(efx); |
2412 | if (rc) { | 2398 | if (rc) { |
2413 | netif_err(efx, probe, efx->net_dev, | 2399 | netif_err(efx, probe, efx->net_dev, |
2414 | "failed to initialise port\n"); | 2400 | "failed to initialise port\n"); |
2415 | goto fail4; | 2401 | goto fail4; |
2416 | } | 2402 | } |
2417 | 2403 | ||
2418 | efx_init_channels(efx); | 2404 | efx_init_channels(efx); |
2419 | 2405 | ||
2420 | rc = efx_nic_init_interrupt(efx); | 2406 | rc = efx_nic_init_interrupt(efx); |
2421 | if (rc) | 2407 | if (rc) |
2422 | goto fail5; | 2408 | goto fail5; |
2423 | 2409 | ||
2424 | return 0; | 2410 | return 0; |
2425 | 2411 | ||
2426 | fail5: | 2412 | fail5: |
2427 | efx_fini_channels(efx); | 2413 | efx_fini_channels(efx); |
2428 | efx_fini_port(efx); | 2414 | efx_fini_port(efx); |
2429 | fail4: | 2415 | fail4: |
2430 | efx->type->fini(efx); | 2416 | efx->type->fini(efx); |
2431 | fail3: | 2417 | fail3: |
2432 | efx_fini_napi(efx); | 2418 | efx_fini_napi(efx); |
2433 | efx_remove_all(efx); | 2419 | efx_remove_all(efx); |
2434 | fail1: | 2420 | fail1: |
2435 | return rc; | 2421 | return rc; |
2436 | } | 2422 | } |
2437 | 2423 | ||
2438 | /* NIC initialisation | 2424 | /* NIC initialisation |
2439 | * | 2425 | * |
2440 | * This is called at module load (or hotplug insertion, | 2426 | * This is called at module load (or hotplug insertion, |
2441 | * theoretically). It sets up PCI mappings, tests and resets the NIC, | 2427 | * theoretically). It sets up PCI mappings, tests and resets the NIC, |
2442 | * sets up and registers the network devices with the kernel and hooks | 2428 | * sets up and registers the network devices with the kernel and hooks |
2443 | * the interrupt service routine. It does not prepare the device for | 2429 | * the interrupt service routine. It does not prepare the device for |
2444 | * transmission; this is left to the first time one of the network | 2430 | * transmission; this is left to the first time one of the network |
2445 | * interfaces is brought up (i.e. efx_net_open). | 2431 | * interfaces is brought up (i.e. efx_net_open). |
2446 | */ | 2432 | */ |
2447 | static int __devinit efx_pci_probe(struct pci_dev *pci_dev, | 2433 | static int __devinit efx_pci_probe(struct pci_dev *pci_dev, |
2448 | const struct pci_device_id *entry) | 2434 | const struct pci_device_id *entry) |
2449 | { | 2435 | { |
2450 | const struct efx_nic_type *type = (const struct efx_nic_type *) entry->driver_data; | 2436 | const struct efx_nic_type *type = (const struct efx_nic_type *) entry->driver_data; |
2451 | struct net_device *net_dev; | 2437 | struct net_device *net_dev; |
2452 | struct efx_nic *efx; | 2438 | struct efx_nic *efx; |
2453 | int i, rc; | 2439 | int i, rc; |
2454 | 2440 | ||
2455 | /* Allocate and initialise a struct net_device and struct efx_nic */ | 2441 | /* Allocate and initialise a struct net_device and struct efx_nic */ |
2456 | net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES, | 2442 | net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES, |
2457 | EFX_MAX_RX_QUEUES); | 2443 | EFX_MAX_RX_QUEUES); |
2458 | if (!net_dev) | 2444 | if (!net_dev) |
2459 | return -ENOMEM; | 2445 | return -ENOMEM; |
2460 | net_dev->features |= (type->offload_features | NETIF_F_SG | | 2446 | net_dev->features |= (type->offload_features | NETIF_F_SG | |
2461 | NETIF_F_HIGHDMA | NETIF_F_TSO | | 2447 | NETIF_F_HIGHDMA | NETIF_F_TSO | |
2462 | NETIF_F_RXCSUM); | 2448 | NETIF_F_RXCSUM); |
2463 | if (type->offload_features & NETIF_F_V6_CSUM) | 2449 | if (type->offload_features & NETIF_F_V6_CSUM) |
2464 | net_dev->features |= NETIF_F_TSO6; | 2450 | net_dev->features |= NETIF_F_TSO6; |
2465 | /* Mask for features that also apply to VLAN devices */ | 2451 | /* Mask for features that also apply to VLAN devices */ |
2466 | net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG | | 2452 | net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG | |
2467 | NETIF_F_HIGHDMA | NETIF_F_ALL_TSO | | 2453 | NETIF_F_HIGHDMA | NETIF_F_ALL_TSO | |
2468 | NETIF_F_RXCSUM); | 2454 | NETIF_F_RXCSUM); |
2469 | /* All offloads can be toggled */ | 2455 | /* All offloads can be toggled */ |
2470 | net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA; | 2456 | net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA; |
2471 | efx = netdev_priv(net_dev); | 2457 | efx = netdev_priv(net_dev); |
2472 | pci_set_drvdata(pci_dev, efx); | 2458 | pci_set_drvdata(pci_dev, efx); |
2473 | SET_NETDEV_DEV(net_dev, &pci_dev->dev); | 2459 | SET_NETDEV_DEV(net_dev, &pci_dev->dev); |
2474 | rc = efx_init_struct(efx, type, pci_dev, net_dev); | 2460 | rc = efx_init_struct(efx, type, pci_dev, net_dev); |
2475 | if (rc) | 2461 | if (rc) |
2476 | goto fail1; | 2462 | goto fail1; |
2477 | 2463 | ||
2478 | netif_info(efx, probe, efx->net_dev, | 2464 | netif_info(efx, probe, efx->net_dev, |
2479 | "Solarflare NIC detected\n"); | 2465 | "Solarflare NIC detected\n"); |
2480 | 2466 | ||
2481 | /* Set up basic I/O (BAR mappings etc) */ | 2467 | /* Set up basic I/O (BAR mappings etc) */ |
2482 | rc = efx_init_io(efx); | 2468 | rc = efx_init_io(efx); |
2483 | if (rc) | 2469 | if (rc) |
2484 | goto fail2; | 2470 | goto fail2; |
2485 | 2471 | ||
2486 | /* No serialisation is required with the reset path because | 2472 | /* No serialisation is required with the reset path because |
2487 | * we're in STATE_INIT. */ | 2473 | * we're in STATE_INIT. */ |
2488 | for (i = 0; i < 5; i++) { | 2474 | for (i = 0; i < 5; i++) { |
2489 | rc = efx_pci_probe_main(efx); | 2475 | rc = efx_pci_probe_main(efx); |
2490 | 2476 | ||
2491 | /* Serialise against efx_reset(). No more resets will be | 2477 | /* Serialise against efx_reset(). No more resets will be |
2492 | * scheduled since efx_stop_all() has been called, and we | 2478 | * scheduled since efx_stop_all() has been called, and we |
2493 | * have not and never have been registered with either | 2479 | * have not and never have been registered with either |
2494 | * the rtnetlink or driverlink layers. */ | 2480 | * the rtnetlink or driverlink layers. */ |
2495 | cancel_work_sync(&efx->reset_work); | 2481 | cancel_work_sync(&efx->reset_work); |
2496 | 2482 | ||
2497 | if (rc == 0) { | 2483 | if (rc == 0) { |
2498 | if (efx->reset_pending) { | 2484 | if (efx->reset_pending) { |
2499 | /* If there was a scheduled reset during | 2485 | /* If there was a scheduled reset during |
2500 | * probe, the NIC is probably hosed anyway */ | 2486 | * probe, the NIC is probably hosed anyway */ |
2501 | efx_pci_remove_main(efx); | 2487 | efx_pci_remove_main(efx); |
2502 | rc = -EIO; | 2488 | rc = -EIO; |
2503 | } else { | 2489 | } else { |
2504 | break; | 2490 | break; |
2505 | } | 2491 | } |
2506 | } | 2492 | } |
2507 | 2493 | ||
2508 | /* Retry if a recoverably reset event has been scheduled */ | 2494 | /* Retry if a recoverably reset event has been scheduled */ |
2509 | if (efx->reset_pending & | 2495 | if (efx->reset_pending & |
2510 | ~(1 << RESET_TYPE_INVISIBLE | 1 << RESET_TYPE_ALL) || | 2496 | ~(1 << RESET_TYPE_INVISIBLE | 1 << RESET_TYPE_ALL) || |
2511 | !efx->reset_pending) | 2497 | !efx->reset_pending) |
2512 | goto fail3; | 2498 | goto fail3; |
2513 | 2499 | ||
2514 | efx->reset_pending = 0; | 2500 | efx->reset_pending = 0; |
2515 | } | 2501 | } |
2516 | 2502 | ||
2517 | if (rc) { | 2503 | if (rc) { |
2518 | netif_err(efx, probe, efx->net_dev, "Could not reset NIC\n"); | 2504 | netif_err(efx, probe, efx->net_dev, "Could not reset NIC\n"); |
2519 | goto fail4; | 2505 | goto fail4; |
2520 | } | 2506 | } |
2521 | 2507 | ||
2522 | /* Switch to the running state before we expose the device to the OS, | 2508 | /* Switch to the running state before we expose the device to the OS, |
2523 | * so that dev_open()|efx_start_all() will actually start the device */ | 2509 | * so that dev_open()|efx_start_all() will actually start the device */ |
2524 | efx->state = STATE_RUNNING; | 2510 | efx->state = STATE_RUNNING; |
2525 | 2511 | ||
2526 | rc = efx_register_netdev(efx); | 2512 | rc = efx_register_netdev(efx); |
2527 | if (rc) | 2513 | if (rc) |
2528 | goto fail5; | 2514 | goto fail5; |
2529 | 2515 | ||
2530 | netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n"); | 2516 | netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n"); |
2531 | 2517 | ||
2532 | rtnl_lock(); | 2518 | rtnl_lock(); |
2533 | efx_mtd_probe(efx); /* allowed to fail */ | 2519 | efx_mtd_probe(efx); /* allowed to fail */ |
2534 | rtnl_unlock(); | 2520 | rtnl_unlock(); |
2535 | return 0; | 2521 | return 0; |
2536 | 2522 | ||
2537 | fail5: | 2523 | fail5: |
2538 | efx_pci_remove_main(efx); | 2524 | efx_pci_remove_main(efx); |
2539 | fail4: | 2525 | fail4: |
2540 | fail3: | 2526 | fail3: |
2541 | efx_fini_io(efx); | 2527 | efx_fini_io(efx); |
2542 | fail2: | 2528 | fail2: |
2543 | efx_fini_struct(efx); | 2529 | efx_fini_struct(efx); |
2544 | fail1: | 2530 | fail1: |
2545 | WARN_ON(rc > 0); | 2531 | WARN_ON(rc > 0); |
2546 | netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc); | 2532 | netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc); |
2547 | free_netdev(net_dev); | 2533 | free_netdev(net_dev); |
2548 | return rc; | 2534 | return rc; |
2549 | } | 2535 | } |
2550 | 2536 | ||
2551 | static int efx_pm_freeze(struct device *dev) | 2537 | static int efx_pm_freeze(struct device *dev) |
2552 | { | 2538 | { |
2553 | struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); | 2539 | struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); |
2554 | 2540 | ||
2555 | efx->state = STATE_FINI; | 2541 | efx->state = STATE_FINI; |
2556 | 2542 | ||
2557 | netif_device_detach(efx->net_dev); | 2543 | netif_device_detach(efx->net_dev); |
2558 | 2544 | ||
2559 | efx_stop_all(efx); | 2545 | efx_stop_all(efx); |
2560 | efx_fini_channels(efx); | 2546 | efx_fini_channels(efx); |
2561 | 2547 | ||
2562 | return 0; | 2548 | return 0; |
2563 | } | 2549 | } |
2564 | 2550 | ||
2565 | static int efx_pm_thaw(struct device *dev) | 2551 | static int efx_pm_thaw(struct device *dev) |
2566 | { | 2552 | { |
2567 | struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); | 2553 | struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); |
2568 | 2554 | ||
2569 | efx->state = STATE_INIT; | 2555 | efx->state = STATE_INIT; |
2570 | 2556 | ||
2571 | efx_init_channels(efx); | 2557 | efx_init_channels(efx); |
2572 | 2558 | ||
2573 | mutex_lock(&efx->mac_lock); | 2559 | mutex_lock(&efx->mac_lock); |
2574 | efx->phy_op->reconfigure(efx); | 2560 | efx->phy_op->reconfigure(efx); |
2575 | mutex_unlock(&efx->mac_lock); | 2561 | mutex_unlock(&efx->mac_lock); |
2576 | 2562 | ||
2577 | efx_start_all(efx); | 2563 | efx_start_all(efx); |
2578 | 2564 | ||
2579 | netif_device_attach(efx->net_dev); | 2565 | netif_device_attach(efx->net_dev); |
2580 | 2566 | ||
2581 | efx->state = STATE_RUNNING; | 2567 | efx->state = STATE_RUNNING; |
2582 | 2568 | ||
2583 | efx->type->resume_wol(efx); | 2569 | efx->type->resume_wol(efx); |
2584 | 2570 | ||
2585 | /* Reschedule any quenched resets scheduled during efx_pm_freeze() */ | 2571 | /* Reschedule any quenched resets scheduled during efx_pm_freeze() */ |
2586 | queue_work(reset_workqueue, &efx->reset_work); | 2572 | queue_work(reset_workqueue, &efx->reset_work); |
2587 | 2573 | ||
2588 | return 0; | 2574 | return 0; |
2589 | } | 2575 | } |
2590 | 2576 | ||
2591 | static int efx_pm_poweroff(struct device *dev) | 2577 | static int efx_pm_poweroff(struct device *dev) |
2592 | { | 2578 | { |
2593 | struct pci_dev *pci_dev = to_pci_dev(dev); | 2579 | struct pci_dev *pci_dev = to_pci_dev(dev); |
2594 | struct efx_nic *efx = pci_get_drvdata(pci_dev); | 2580 | struct efx_nic *efx = pci_get_drvdata(pci_dev); |
2595 | 2581 | ||
2596 | efx->type->fini(efx); | 2582 | efx->type->fini(efx); |
2597 | 2583 | ||
2598 | efx->reset_pending = 0; | 2584 | efx->reset_pending = 0; |
2599 | 2585 | ||
2600 | pci_save_state(pci_dev); | 2586 | pci_save_state(pci_dev); |
2601 | return pci_set_power_state(pci_dev, PCI_D3hot); | 2587 | return pci_set_power_state(pci_dev, PCI_D3hot); |
2602 | } | 2588 | } |
2603 | 2589 | ||
2604 | /* Used for both resume and restore */ | 2590 | /* Used for both resume and restore */ |
2605 | static int efx_pm_resume(struct device *dev) | 2591 | static int efx_pm_resume(struct device *dev) |
2606 | { | 2592 | { |
2607 | struct pci_dev *pci_dev = to_pci_dev(dev); | 2593 | struct pci_dev *pci_dev = to_pci_dev(dev); |
2608 | struct efx_nic *efx = pci_get_drvdata(pci_dev); | 2594 | struct efx_nic *efx = pci_get_drvdata(pci_dev); |
2609 | int rc; | 2595 | int rc; |
2610 | 2596 | ||
2611 | rc = pci_set_power_state(pci_dev, PCI_D0); | 2597 | rc = pci_set_power_state(pci_dev, PCI_D0); |
2612 | if (rc) | 2598 | if (rc) |
2613 | return rc; | 2599 | return rc; |
2614 | pci_restore_state(pci_dev); | 2600 | pci_restore_state(pci_dev); |
2615 | rc = pci_enable_device(pci_dev); | 2601 | rc = pci_enable_device(pci_dev); |
2616 | if (rc) | 2602 | if (rc) |
2617 | return rc; | 2603 | return rc; |
2618 | pci_set_master(efx->pci_dev); | 2604 | pci_set_master(efx->pci_dev); |
2619 | rc = efx->type->reset(efx, RESET_TYPE_ALL); | 2605 | rc = efx->type->reset(efx, RESET_TYPE_ALL); |
2620 | if (rc) | 2606 | if (rc) |
2621 | return rc; | 2607 | return rc; |
2622 | rc = efx->type->init(efx); | 2608 | rc = efx->type->init(efx); |
2623 | if (rc) | 2609 | if (rc) |
2624 | return rc; | 2610 | return rc; |
2625 | efx_pm_thaw(dev); | 2611 | efx_pm_thaw(dev); |
2626 | return 0; | 2612 | return 0; |
2627 | } | 2613 | } |
2628 | 2614 | ||
2629 | static int efx_pm_suspend(struct device *dev) | 2615 | static int efx_pm_suspend(struct device *dev) |
2630 | { | 2616 | { |
2631 | int rc; | 2617 | int rc; |
2632 | 2618 | ||
2633 | efx_pm_freeze(dev); | 2619 | efx_pm_freeze(dev); |
2634 | rc = efx_pm_poweroff(dev); | 2620 | rc = efx_pm_poweroff(dev); |
2635 | if (rc) | 2621 | if (rc) |
2636 | efx_pm_resume(dev); | 2622 | efx_pm_resume(dev); |
2637 | return rc; | 2623 | return rc; |
2638 | } | 2624 | } |
2639 | 2625 | ||
2640 | static struct dev_pm_ops efx_pm_ops = { | 2626 | static struct dev_pm_ops efx_pm_ops = { |
2641 | .suspend = efx_pm_suspend, | 2627 | .suspend = efx_pm_suspend, |
2642 | .resume = efx_pm_resume, | 2628 | .resume = efx_pm_resume, |
2643 | .freeze = efx_pm_freeze, | 2629 | .freeze = efx_pm_freeze, |
2644 | .thaw = efx_pm_thaw, | 2630 | .thaw = efx_pm_thaw, |
2645 | .poweroff = efx_pm_poweroff, | 2631 | .poweroff = efx_pm_poweroff, |
2646 | .restore = efx_pm_resume, | 2632 | .restore = efx_pm_resume, |
2647 | }; | 2633 | }; |
2648 | 2634 | ||
2649 | static struct pci_driver efx_pci_driver = { | 2635 | static struct pci_driver efx_pci_driver = { |
2650 | .name = KBUILD_MODNAME, | 2636 | .name = KBUILD_MODNAME, |
2651 | .id_table = efx_pci_table, | 2637 | .id_table = efx_pci_table, |
2652 | .probe = efx_pci_probe, | 2638 | .probe = efx_pci_probe, |
2653 | .remove = efx_pci_remove, | 2639 | .remove = efx_pci_remove, |
2654 | .driver.pm = &efx_pm_ops, | 2640 | .driver.pm = &efx_pm_ops, |
2655 | }; | 2641 | }; |
2656 | 2642 | ||
2657 | /************************************************************************** | 2643 | /************************************************************************** |
2658 | * | 2644 | * |
2659 | * Kernel module interface | 2645 | * Kernel module interface |
2660 | * | 2646 | * |
2661 | *************************************************************************/ | 2647 | *************************************************************************/ |
2662 | 2648 | ||
2663 | module_param(interrupt_mode, uint, 0444); | 2649 | module_param(interrupt_mode, uint, 0444); |
2664 | MODULE_PARM_DESC(interrupt_mode, | 2650 | MODULE_PARM_DESC(interrupt_mode, |
2665 | "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)"); | 2651 | "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)"); |
2666 | 2652 | ||
2667 | static int __init efx_init_module(void) | 2653 | static int __init efx_init_module(void) |
2668 | { | 2654 | { |
2669 | int rc; | 2655 | int rc; |
2670 | 2656 | ||
2671 | printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n"); | 2657 | printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n"); |
2672 | 2658 | ||
2673 | rc = register_netdevice_notifier(&efx_netdev_notifier); | 2659 | rc = register_netdevice_notifier(&efx_netdev_notifier); |
2674 | if (rc) | 2660 | if (rc) |
2675 | goto err_notifier; | 2661 | goto err_notifier; |
2676 | 2662 | ||
2677 | reset_workqueue = create_singlethread_workqueue("sfc_reset"); | 2663 | reset_workqueue = create_singlethread_workqueue("sfc_reset"); |
2678 | if (!reset_workqueue) { | 2664 | if (!reset_workqueue) { |
2679 | rc = -ENOMEM; | 2665 | rc = -ENOMEM; |
2680 | goto err_reset; | 2666 | goto err_reset; |
2681 | } | 2667 | } |
2682 | 2668 | ||
2683 | rc = pci_register_driver(&efx_pci_driver); | 2669 | rc = pci_register_driver(&efx_pci_driver); |
2684 | if (rc < 0) | 2670 | if (rc < 0) |
2685 | goto err_pci; | 2671 | goto err_pci; |
2686 | 2672 | ||
2687 | return 0; | 2673 | return 0; |
2688 | 2674 | ||
2689 | err_pci: | 2675 | err_pci: |
2690 | destroy_workqueue(reset_workqueue); | 2676 | destroy_workqueue(reset_workqueue); |
2691 | err_reset: | 2677 | err_reset: |
2692 | unregister_netdevice_notifier(&efx_netdev_notifier); | 2678 | unregister_netdevice_notifier(&efx_netdev_notifier); |
2693 | err_notifier: | 2679 | err_notifier: |
2694 | return rc; | 2680 | return rc; |
2695 | } | 2681 | } |
2696 | 2682 | ||
2697 | static void __exit efx_exit_module(void) | 2683 | static void __exit efx_exit_module(void) |
2698 | { | 2684 | { |
2699 | printk(KERN_INFO "Solarflare NET driver unloading\n"); | 2685 | printk(KERN_INFO "Solarflare NET driver unloading\n"); |
2700 | 2686 | ||
2701 | pci_unregister_driver(&efx_pci_driver); | 2687 | pci_unregister_driver(&efx_pci_driver); |
2702 | destroy_workqueue(reset_workqueue); | 2688 | destroy_workqueue(reset_workqueue); |
2703 | unregister_netdevice_notifier(&efx_netdev_notifier); | 2689 | unregister_netdevice_notifier(&efx_netdev_notifier); |
2704 | 2690 | ||
2705 | } | 2691 | } |
2706 | 2692 | ||
2707 | module_init(efx_init_module); | 2693 | module_init(efx_init_module); |
2708 | module_exit(efx_exit_module); | 2694 | module_exit(efx_exit_module); |
2709 | 2695 | ||
2710 | MODULE_AUTHOR("Solarflare Communications and " | 2696 | MODULE_AUTHOR("Solarflare Communications and " |
2711 | "Michael Brown <mbrown@fensystems.co.uk>"); | 2697 | "Michael Brown <mbrown@fensystems.co.uk>"); |
2712 | MODULE_DESCRIPTION("Solarflare Communications network driver"); | 2698 | MODULE_DESCRIPTION("Solarflare Communications network driver"); |
2713 | MODULE_LICENSE("GPL"); | 2699 | MODULE_LICENSE("GPL"); |
2714 | MODULE_DEVICE_TABLE(pci, efx_pci_table); | 2700 | MODULE_DEVICE_TABLE(pci, efx_pci_table); |
2715 | 2701 |
drivers/net/sfc/io.h
1 | /**************************************************************************** | 1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | 2 | * Driver for Solarflare Solarstorm network controllers and boards |
3 | * Copyright 2005-2006 Fen Systems Ltd. | 3 | * Copyright 2005-2006 Fen Systems Ltd. |
4 | * Copyright 2006-2010 Solarflare Communications Inc. | 4 | * Copyright 2006-2010 Solarflare Communications Inc. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License version 2 as published | 7 | * under the terms of the GNU General Public License version 2 as published |
8 | * by the Free Software Foundation, incorporated herein by reference. | 8 | * by the Free Software Foundation, incorporated herein by reference. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #ifndef EFX_IO_H | 11 | #ifndef EFX_IO_H |
12 | #define EFX_IO_H | 12 | #define EFX_IO_H |
13 | 13 | ||
14 | #include <linux/io.h> | 14 | #include <linux/io.h> |
15 | #include <linux/spinlock.h> | 15 | #include <linux/spinlock.h> |
16 | 16 | ||
17 | /************************************************************************** | 17 | /************************************************************************** |
18 | * | 18 | * |
19 | * NIC register I/O | 19 | * NIC register I/O |
20 | * | 20 | * |
21 | ************************************************************************** | 21 | ************************************************************************** |
22 | * | 22 | * |
23 | * Notes on locking strategy: | 23 | * Notes on locking strategy: |
24 | * | 24 | * |
25 | * Most CSRs are 128-bit (oword) and therefore cannot be read or | 25 | * Most CSRs are 128-bit (oword) and therefore cannot be read or |
26 | * written atomically. Access from the host is buffered by the Bus | 26 | * written atomically. Access from the host is buffered by the Bus |
27 | * Interface Unit (BIU). Whenever the host reads from the lowest | 27 | * Interface Unit (BIU). Whenever the host reads from the lowest |
28 | * address of such a register, or from the address of a different such | 28 | * address of such a register, or from the address of a different such |
29 | * register, the BIU latches the register's value. Subsequent reads | 29 | * register, the BIU latches the register's value. Subsequent reads |
30 | * from higher addresses of the same register will read the latched | 30 | * from higher addresses of the same register will read the latched |
31 | * value. Whenever the host writes part of such a register, the BIU | 31 | * value. Whenever the host writes part of such a register, the BIU |
32 | * collects the written value and does not write to the underlying | 32 | * collects the written value and does not write to the underlying |
33 | * register until all 4 dwords have been written. A similar buffering | 33 | * register until all 4 dwords have been written. A similar buffering |
34 | * scheme applies to host access to the NIC's 64-bit SRAM. | 34 | * scheme applies to host access to the NIC's 64-bit SRAM. |
35 | * | 35 | * |
36 | * Access to different CSRs and 64-bit SRAM words must be serialised, | 36 | * Access to different CSRs and 64-bit SRAM words must be serialised, |
37 | * since interleaved access can result in lost writes or lost | 37 | * since interleaved access can result in lost writes or lost |
38 | * information from read-to-clear fields. We use efx_nic::biu_lock | 38 | * information from read-to-clear fields. We use efx_nic::biu_lock |
39 | * for this. (We could use separate locks for read and write, but | 39 | * for this. (We could use separate locks for read and write, but |
40 | * this is not normally a performance bottleneck.) | 40 | * this is not normally a performance bottleneck.) |
41 | * | 41 | * |
42 | * The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are | 42 | * The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are |
43 | * 128-bit but are special-cased in the BIU to avoid the need for | 43 | * 128-bit but are special-cased in the BIU to avoid the need for |
44 | * locking in the host: | 44 | * locking in the host: |
45 | * | 45 | * |
46 | * - They are write-only. | 46 | * - They are write-only. |
47 | * - The semantics of writing to these registers are such that | 47 | * - The semantics of writing to these registers are such that |
48 | * replacing the low 96 bits with zero does not affect functionality. | 48 | * replacing the low 96 bits with zero does not affect functionality. |
49 | * - If the host writes to the last dword address of such a register | 49 | * - If the host writes to the last dword address of such a register |
50 | * (i.e. the high 32 bits) the underlying register will always be | 50 | * (i.e. the high 32 bits) the underlying register will always be |
51 | * written. If the collector and the current write together do not | 51 | * written. If the collector does not hold values for the low 96 |
52 | * provide values for all 128 bits of the register, the low 96 bits | 52 | * bits of the register, they will be written as zero. Writing to |
53 | * will be written as zero. | 53 | * the last qword does not have this effect and must not be done. |
54 | * - If the host writes to the address of any other part of such a | 54 | * - If the host writes to the address of any other part of such a |
55 | * register while the collector already holds values for some other | 55 | * register while the collector already holds values for some other |
56 | * register, the write is discarded and the collector maintains its | 56 | * register, the write is discarded and the collector maintains its |
57 | * current state. | 57 | * current state. |
58 | */ | 58 | */ |
59 | 59 | ||
60 | #if BITS_PER_LONG == 64 | 60 | #if BITS_PER_LONG == 64 |
61 | #define EFX_USE_QWORD_IO 1 | 61 | #define EFX_USE_QWORD_IO 1 |
62 | #endif | 62 | #endif |
63 | 63 | ||
64 | #ifdef EFX_USE_QWORD_IO | 64 | #ifdef EFX_USE_QWORD_IO |
65 | static inline void _efx_writeq(struct efx_nic *efx, __le64 value, | 65 | static inline void _efx_writeq(struct efx_nic *efx, __le64 value, |
66 | unsigned int reg) | 66 | unsigned int reg) |
67 | { | 67 | { |
68 | __raw_writeq((__force u64)value, efx->membase + reg); | 68 | __raw_writeq((__force u64)value, efx->membase + reg); |
69 | } | 69 | } |
70 | static inline __le64 _efx_readq(struct efx_nic *efx, unsigned int reg) | 70 | static inline __le64 _efx_readq(struct efx_nic *efx, unsigned int reg) |
71 | { | 71 | { |
72 | return (__force __le64)__raw_readq(efx->membase + reg); | 72 | return (__force __le64)__raw_readq(efx->membase + reg); |
73 | } | 73 | } |
74 | #endif | 74 | #endif |
75 | 75 | ||
76 | static inline void _efx_writed(struct efx_nic *efx, __le32 value, | 76 | static inline void _efx_writed(struct efx_nic *efx, __le32 value, |
77 | unsigned int reg) | 77 | unsigned int reg) |
78 | { | 78 | { |
79 | __raw_writel((__force u32)value, efx->membase + reg); | 79 | __raw_writel((__force u32)value, efx->membase + reg); |
80 | } | 80 | } |
81 | static inline __le32 _efx_readd(struct efx_nic *efx, unsigned int reg) | 81 | static inline __le32 _efx_readd(struct efx_nic *efx, unsigned int reg) |
82 | { | 82 | { |
83 | return (__force __le32)__raw_readl(efx->membase + reg); | 83 | return (__force __le32)__raw_readl(efx->membase + reg); |
84 | } | 84 | } |
85 | 85 | ||
86 | /* Write a normal 128-bit CSR, locking as appropriate. */ | 86 | /* Write a normal 128-bit CSR, locking as appropriate. */ |
87 | static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value, | 87 | static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value, |
88 | unsigned int reg) | 88 | unsigned int reg) |
89 | { | 89 | { |
90 | unsigned long flags __attribute__ ((unused)); | 90 | unsigned long flags __attribute__ ((unused)); |
91 | 91 | ||
92 | netif_vdbg(efx, hw, efx->net_dev, | 92 | netif_vdbg(efx, hw, efx->net_dev, |
93 | "writing register %x with " EFX_OWORD_FMT "\n", reg, | 93 | "writing register %x with " EFX_OWORD_FMT "\n", reg, |
94 | EFX_OWORD_VAL(*value)); | 94 | EFX_OWORD_VAL(*value)); |
95 | 95 | ||
96 | spin_lock_irqsave(&efx->biu_lock, flags); | 96 | spin_lock_irqsave(&efx->biu_lock, flags); |
97 | #ifdef EFX_USE_QWORD_IO | 97 | #ifdef EFX_USE_QWORD_IO |
98 | _efx_writeq(efx, value->u64[0], reg + 0); | 98 | _efx_writeq(efx, value->u64[0], reg + 0); |
99 | _efx_writeq(efx, value->u64[1], reg + 8); | 99 | _efx_writeq(efx, value->u64[1], reg + 8); |
100 | #else | 100 | #else |
101 | _efx_writed(efx, value->u32[0], reg + 0); | 101 | _efx_writed(efx, value->u32[0], reg + 0); |
102 | _efx_writed(efx, value->u32[1], reg + 4); | 102 | _efx_writed(efx, value->u32[1], reg + 4); |
103 | _efx_writed(efx, value->u32[2], reg + 8); | 103 | _efx_writed(efx, value->u32[2], reg + 8); |
104 | _efx_writed(efx, value->u32[3], reg + 12); | 104 | _efx_writed(efx, value->u32[3], reg + 12); |
105 | #endif | 105 | #endif |
106 | wmb(); | ||
107 | mmiowb(); | 106 | mmiowb(); |
108 | spin_unlock_irqrestore(&efx->biu_lock, flags); | 107 | spin_unlock_irqrestore(&efx->biu_lock, flags); |
109 | } | 108 | } |
110 | 109 | ||
111 | /* Write 64-bit SRAM through the supplied mapping, locking as appropriate. */ | 110 | /* Write 64-bit SRAM through the supplied mapping, locking as appropriate. */ |
112 | static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase, | 111 | static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase, |
113 | efx_qword_t *value, unsigned int index) | 112 | efx_qword_t *value, unsigned int index) |
114 | { | 113 | { |
115 | unsigned int addr = index * sizeof(*value); | 114 | unsigned int addr = index * sizeof(*value); |
116 | unsigned long flags __attribute__ ((unused)); | 115 | unsigned long flags __attribute__ ((unused)); |
117 | 116 | ||
118 | netif_vdbg(efx, hw, efx->net_dev, | 117 | netif_vdbg(efx, hw, efx->net_dev, |
119 | "writing SRAM address %x with " EFX_QWORD_FMT "\n", | 118 | "writing SRAM address %x with " EFX_QWORD_FMT "\n", |
120 | addr, EFX_QWORD_VAL(*value)); | 119 | addr, EFX_QWORD_VAL(*value)); |
121 | 120 | ||
122 | spin_lock_irqsave(&efx->biu_lock, flags); | 121 | spin_lock_irqsave(&efx->biu_lock, flags); |
123 | #ifdef EFX_USE_QWORD_IO | 122 | #ifdef EFX_USE_QWORD_IO |
124 | __raw_writeq((__force u64)value->u64[0], membase + addr); | 123 | __raw_writeq((__force u64)value->u64[0], membase + addr); |
125 | #else | 124 | #else |
126 | __raw_writel((__force u32)value->u32[0], membase + addr); | 125 | __raw_writel((__force u32)value->u32[0], membase + addr); |
127 | __raw_writel((__force u32)value->u32[1], membase + addr + 4); | 126 | __raw_writel((__force u32)value->u32[1], membase + addr + 4); |
128 | #endif | 127 | #endif |
129 | wmb(); | ||
130 | mmiowb(); | 128 | mmiowb(); |
131 | spin_unlock_irqrestore(&efx->biu_lock, flags); | 129 | spin_unlock_irqrestore(&efx->biu_lock, flags); |
132 | } | 130 | } |
133 | 131 | ||
134 | /* Write a 32-bit CSR or the last dword of a special 128-bit CSR */ | 132 | /* Write a 32-bit CSR or the last dword of a special 128-bit CSR */ |
135 | static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value, | 133 | static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value, |
136 | unsigned int reg) | 134 | unsigned int reg) |
137 | { | 135 | { |
138 | netif_vdbg(efx, hw, efx->net_dev, | 136 | netif_vdbg(efx, hw, efx->net_dev, |
139 | "writing register %x with "EFX_DWORD_FMT"\n", | 137 | "writing register %x with "EFX_DWORD_FMT"\n", |
140 | reg, EFX_DWORD_VAL(*value)); | 138 | reg, EFX_DWORD_VAL(*value)); |
141 | 139 | ||
142 | /* No lock required */ | 140 | /* No lock required */ |
143 | _efx_writed(efx, value->u32[0], reg); | 141 | _efx_writed(efx, value->u32[0], reg); |
144 | wmb(); | ||
145 | } | 142 | } |
146 | 143 | ||
147 | /* Read a 128-bit CSR, locking as appropriate. */ | 144 | /* Read a 128-bit CSR, locking as appropriate. */ |
148 | static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value, | 145 | static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value, |
149 | unsigned int reg) | 146 | unsigned int reg) |
150 | { | 147 | { |
151 | unsigned long flags __attribute__ ((unused)); | 148 | unsigned long flags __attribute__ ((unused)); |
152 | 149 | ||
153 | spin_lock_irqsave(&efx->biu_lock, flags); | 150 | spin_lock_irqsave(&efx->biu_lock, flags); |
154 | value->u32[0] = _efx_readd(efx, reg + 0); | 151 | value->u32[0] = _efx_readd(efx, reg + 0); |
155 | rmb(); | ||
156 | value->u32[1] = _efx_readd(efx, reg + 4); | 152 | value->u32[1] = _efx_readd(efx, reg + 4); |
157 | value->u32[2] = _efx_readd(efx, reg + 8); | 153 | value->u32[2] = _efx_readd(efx, reg + 8); |
158 | value->u32[3] = _efx_readd(efx, reg + 12); | 154 | value->u32[3] = _efx_readd(efx, reg + 12); |
159 | spin_unlock_irqrestore(&efx->biu_lock, flags); | 155 | spin_unlock_irqrestore(&efx->biu_lock, flags); |
160 | 156 | ||
161 | netif_vdbg(efx, hw, efx->net_dev, | 157 | netif_vdbg(efx, hw, efx->net_dev, |
162 | "read from register %x, got " EFX_OWORD_FMT "\n", reg, | 158 | "read from register %x, got " EFX_OWORD_FMT "\n", reg, |
163 | EFX_OWORD_VAL(*value)); | 159 | EFX_OWORD_VAL(*value)); |
164 | } | 160 | } |
165 | 161 | ||
166 | /* Read 64-bit SRAM through the supplied mapping, locking as appropriate. */ | 162 | /* Read 64-bit SRAM through the supplied mapping, locking as appropriate. */ |
167 | static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase, | 163 | static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase, |
168 | efx_qword_t *value, unsigned int index) | 164 | efx_qword_t *value, unsigned int index) |
169 | { | 165 | { |
170 | unsigned int addr = index * sizeof(*value); | 166 | unsigned int addr = index * sizeof(*value); |
171 | unsigned long flags __attribute__ ((unused)); | 167 | unsigned long flags __attribute__ ((unused)); |
172 | 168 | ||
173 | spin_lock_irqsave(&efx->biu_lock, flags); | 169 | spin_lock_irqsave(&efx->biu_lock, flags); |
174 | #ifdef EFX_USE_QWORD_IO | 170 | #ifdef EFX_USE_QWORD_IO |
175 | value->u64[0] = (__force __le64)__raw_readq(membase + addr); | 171 | value->u64[0] = (__force __le64)__raw_readq(membase + addr); |
176 | #else | 172 | #else |
177 | value->u32[0] = (__force __le32)__raw_readl(membase + addr); | 173 | value->u32[0] = (__force __le32)__raw_readl(membase + addr); |
178 | rmb(); | ||
179 | value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4); | 174 | value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4); |
180 | #endif | 175 | #endif |
181 | spin_unlock_irqrestore(&efx->biu_lock, flags); | 176 | spin_unlock_irqrestore(&efx->biu_lock, flags); |
182 | 177 | ||
183 | netif_vdbg(efx, hw, efx->net_dev, | 178 | netif_vdbg(efx, hw, efx->net_dev, |
184 | "read from SRAM address %x, got "EFX_QWORD_FMT"\n", | 179 | "read from SRAM address %x, got "EFX_QWORD_FMT"\n", |
185 | addr, EFX_QWORD_VAL(*value)); | 180 | addr, EFX_QWORD_VAL(*value)); |
186 | } | 181 | } |
187 | 182 | ||
188 | /* Read a 32-bit CSR or SRAM */ | 183 | /* Read a 32-bit CSR or SRAM */ |
189 | static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value, | 184 | static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value, |
190 | unsigned int reg) | 185 | unsigned int reg) |
191 | { | 186 | { |
192 | value->u32[0] = _efx_readd(efx, reg); | 187 | value->u32[0] = _efx_readd(efx, reg); |
193 | netif_vdbg(efx, hw, efx->net_dev, | 188 | netif_vdbg(efx, hw, efx->net_dev, |
194 | "read from register %x, got "EFX_DWORD_FMT"\n", | 189 | "read from register %x, got "EFX_DWORD_FMT"\n", |
195 | reg, EFX_DWORD_VAL(*value)); | 190 | reg, EFX_DWORD_VAL(*value)); |
196 | } | 191 | } |
197 | 192 | ||
198 | /* Write a 128-bit CSR forming part of a table */ | 193 | /* Write a 128-bit CSR forming part of a table */ |
199 | static inline void efx_writeo_table(struct efx_nic *efx, efx_oword_t *value, | 194 | static inline void efx_writeo_table(struct efx_nic *efx, efx_oword_t *value, |
200 | unsigned int reg, unsigned int index) | 195 | unsigned int reg, unsigned int index) |
201 | { | 196 | { |
202 | efx_writeo(efx, value, reg + index * sizeof(efx_oword_t)); | 197 | efx_writeo(efx, value, reg + index * sizeof(efx_oword_t)); |
203 | } | 198 | } |
204 | 199 | ||
205 | /* Read a 128-bit CSR forming part of a table */ | 200 | /* Read a 128-bit CSR forming part of a table */ |
206 | static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value, | 201 | static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value, |
207 | unsigned int reg, unsigned int index) | 202 | unsigned int reg, unsigned int index) |
208 | { | 203 | { |
209 | efx_reado(efx, value, reg + index * sizeof(efx_oword_t)); | 204 | efx_reado(efx, value, reg + index * sizeof(efx_oword_t)); |
210 | } | 205 | } |
211 | 206 | ||
212 | /* Write a 32-bit CSR forming part of a table, or 32-bit SRAM */ | 207 | /* Write a 32-bit CSR forming part of a table, or 32-bit SRAM */ |
213 | static inline void efx_writed_table(struct efx_nic *efx, efx_dword_t *value, | 208 | static inline void efx_writed_table(struct efx_nic *efx, efx_dword_t *value, |
214 | unsigned int reg, unsigned int index) | 209 | unsigned int reg, unsigned int index) |
215 | { | 210 | { |
216 | efx_writed(efx, value, reg + index * sizeof(efx_oword_t)); | 211 | efx_writed(efx, value, reg + index * sizeof(efx_oword_t)); |
217 | } | 212 | } |
218 | 213 | ||
219 | /* Read a 32-bit CSR forming part of a table, or 32-bit SRAM */ | 214 | /* Read a 32-bit CSR forming part of a table, or 32-bit SRAM */ |
220 | static inline void efx_readd_table(struct efx_nic *efx, efx_dword_t *value, | 215 | static inline void efx_readd_table(struct efx_nic *efx, efx_dword_t *value, |
221 | unsigned int reg, unsigned int index) | 216 | unsigned int reg, unsigned int index) |
222 | { | 217 | { |
223 | efx_readd(efx, value, reg + index * sizeof(efx_dword_t)); | 218 | efx_readd(efx, value, reg + index * sizeof(efx_dword_t)); |
224 | } | 219 | } |
225 | 220 | ||
226 | /* Page-mapped register block size */ | 221 | /* Page-mapped register block size */ |
227 | #define EFX_PAGE_BLOCK_SIZE 0x2000 | 222 | #define EFX_PAGE_BLOCK_SIZE 0x2000 |
228 | 223 | ||
229 | /* Calculate offset to page-mapped register block */ | 224 | /* Calculate offset to page-mapped register block */ |
230 | #define EFX_PAGED_REG(page, reg) \ | 225 | #define EFX_PAGED_REG(page, reg) \ |
231 | ((page) * EFX_PAGE_BLOCK_SIZE + (reg)) | 226 | ((page) * EFX_PAGE_BLOCK_SIZE + (reg)) |
232 | 227 | ||
233 | /* Write the whole of RX_DESC_UPD or TX_DESC_UPD */ | 228 | /* Write the whole of RX_DESC_UPD or TX_DESC_UPD */ |
234 | static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value, | 229 | static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value, |
235 | unsigned int reg, unsigned int page) | 230 | unsigned int reg, unsigned int page) |
236 | { | 231 | { |
237 | reg = EFX_PAGED_REG(page, reg); | 232 | reg = EFX_PAGED_REG(page, reg); |
238 | 233 | ||
239 | netif_vdbg(efx, hw, efx->net_dev, | 234 | netif_vdbg(efx, hw, efx->net_dev, |
240 | "writing register %x with " EFX_OWORD_FMT "\n", reg, | 235 | "writing register %x with " EFX_OWORD_FMT "\n", reg, |
241 | EFX_OWORD_VAL(*value)); | 236 | EFX_OWORD_VAL(*value)); |
242 | 237 | ||
243 | #ifdef EFX_USE_QWORD_IO | 238 | #ifdef EFX_USE_QWORD_IO |
244 | _efx_writeq(efx, value->u64[0], reg + 0); | 239 | _efx_writeq(efx, value->u64[0], reg + 0); |
245 | _efx_writeq(efx, value->u64[1], reg + 8); | ||
246 | #else | 240 | #else |
247 | _efx_writed(efx, value->u32[0], reg + 0); | 241 | _efx_writed(efx, value->u32[0], reg + 0); |
248 | _efx_writed(efx, value->u32[1], reg + 4); | 242 | _efx_writed(efx, value->u32[1], reg + 4); |
243 | #endif | ||
249 | _efx_writed(efx, value->u32[2], reg + 8); | 244 | _efx_writed(efx, value->u32[2], reg + 8); |
250 | _efx_writed(efx, value->u32[3], reg + 12); | 245 | _efx_writed(efx, value->u32[3], reg + 12); |
251 | #endif | ||
252 | wmb(); | ||
253 | } | 246 | } |
254 | #define efx_writeo_page(efx, value, reg, page) \ | 247 | #define efx_writeo_page(efx, value, reg, page) \ |
255 | _efx_writeo_page(efx, value, \ | 248 | _efx_writeo_page(efx, value, \ |
256 | reg + \ | 249 | reg + \ |
257 | BUILD_BUG_ON_ZERO((reg) != 0x830 && (reg) != 0xa10), \ | 250 | BUILD_BUG_ON_ZERO((reg) != 0x830 && (reg) != 0xa10), \ |
258 | page) | 251 | page) |
259 | 252 | ||
260 | /* Write a page-mapped 32-bit CSR (EVQ_RPTR or the high bits of | 253 | /* Write a page-mapped 32-bit CSR (EVQ_RPTR or the high bits of |
261 | * RX_DESC_UPD or TX_DESC_UPD) | 254 | * RX_DESC_UPD or TX_DESC_UPD) |
262 | */ | 255 | */ |
263 | static inline void _efx_writed_page(struct efx_nic *efx, efx_dword_t *value, | 256 | static inline void _efx_writed_page(struct efx_nic *efx, efx_dword_t *value, |
264 | unsigned int reg, unsigned int page) | 257 | unsigned int reg, unsigned int page) |
265 | { | 258 | { |
266 | efx_writed(efx, value, EFX_PAGED_REG(page, reg)); | 259 | efx_writed(efx, value, EFX_PAGED_REG(page, reg)); |
267 | } | 260 | } |
268 | #define efx_writed_page(efx, value, reg, page) \ | 261 | #define efx_writed_page(efx, value, reg, page) \ |
269 | _efx_writed_page(efx, value, \ | 262 | _efx_writed_page(efx, value, \ |
270 | reg + \ | 263 | reg + \ |
271 | BUILD_BUG_ON_ZERO((reg) != 0x400 && (reg) != 0x83c \ | 264 | BUILD_BUG_ON_ZERO((reg) != 0x400 && (reg) != 0x83c \ |
272 | && (reg) != 0xa1c), \ | 265 | && (reg) != 0xa1c), \ |
273 | page) | 266 | page) |
274 | 267 | ||
275 | /* Write TIMER_COMMAND. This is a page-mapped 32-bit CSR, but a bug | 268 | /* Write TIMER_COMMAND. This is a page-mapped 32-bit CSR, but a bug |
276 | * in the BIU means that writes to TIMER_COMMAND[0] invalidate the | 269 | * in the BIU means that writes to TIMER_COMMAND[0] invalidate the |
277 | * collector register. | 270 | * collector register. |
278 | */ | 271 | */ |
279 | static inline void _efx_writed_page_locked(struct efx_nic *efx, | 272 | static inline void _efx_writed_page_locked(struct efx_nic *efx, |
280 | efx_dword_t *value, | 273 | efx_dword_t *value, |
281 | unsigned int reg, | 274 | unsigned int reg, |
282 | unsigned int page) | 275 | unsigned int page) |
283 | { | 276 | { |
284 | unsigned long flags __attribute__ ((unused)); | 277 | unsigned long flags __attribute__ ((unused)); |
285 | 278 | ||
286 | if (page == 0) { | 279 | if (page == 0) { |
287 | spin_lock_irqsave(&efx->biu_lock, flags); | 280 | spin_lock_irqsave(&efx->biu_lock, flags); |
288 | efx_writed(efx, value, EFX_PAGED_REG(page, reg)); | 281 | efx_writed(efx, value, EFX_PAGED_REG(page, reg)); |
289 | spin_unlock_irqrestore(&efx->biu_lock, flags); | 282 | spin_unlock_irqrestore(&efx->biu_lock, flags); |
290 | } else { | 283 | } else { |
291 | efx_writed(efx, value, EFX_PAGED_REG(page, reg)); | 284 | efx_writed(efx, value, EFX_PAGED_REG(page, reg)); |
292 | } | 285 | } |
293 | } | 286 | } |
294 | #define efx_writed_page_locked(efx, value, reg, page) \ | 287 | #define efx_writed_page_locked(efx, value, reg, page) \ |
295 | _efx_writed_page_locked(efx, value, \ | 288 | _efx_writed_page_locked(efx, value, \ |
296 | reg + BUILD_BUG_ON_ZERO((reg) != 0x420), \ | 289 | reg + BUILD_BUG_ON_ZERO((reg) != 0x420), \ |
297 | page) | 290 | page) |
298 | 291 | ||
299 | #endif /* EFX_IO_H */ | 292 | #endif /* EFX_IO_H */ |
drivers/net/sfc/mcdi.c
1 | /**************************************************************************** | 1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | 2 | * Driver for Solarflare Solarstorm network controllers and boards |
3 | * Copyright 2008-2011 Solarflare Communications Inc. | 3 | * Copyright 2008-2011 Solarflare Communications Inc. |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify it | 5 | * This program is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 as published | 6 | * under the terms of the GNU General Public License version 2 as published |
7 | * by the Free Software Foundation, incorporated herein by reference. | 7 | * by the Free Software Foundation, incorporated herein by reference. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/delay.h> | 10 | #include <linux/delay.h> |
11 | #include "net_driver.h" | 11 | #include "net_driver.h" |
12 | #include "nic.h" | 12 | #include "nic.h" |
13 | #include "io.h" | 13 | #include "io.h" |
14 | #include "regs.h" | 14 | #include "regs.h" |
15 | #include "mcdi_pcol.h" | 15 | #include "mcdi_pcol.h" |
16 | #include "phy.h" | 16 | #include "phy.h" |
17 | 17 | ||
18 | /************************************************************************** | 18 | /************************************************************************** |
19 | * | 19 | * |
20 | * Management-Controller-to-Driver Interface | 20 | * Management-Controller-to-Driver Interface |
21 | * | 21 | * |
22 | ************************************************************************** | 22 | ************************************************************************** |
23 | */ | 23 | */ |
24 | 24 | ||
25 | /* Software-defined structure to the shared-memory */ | 25 | /* Software-defined structure to the shared-memory */ |
26 | #define CMD_NOTIFY_PORT0 0 | 26 | #define CMD_NOTIFY_PORT0 0 |
27 | #define CMD_NOTIFY_PORT1 4 | 27 | #define CMD_NOTIFY_PORT1 4 |
28 | #define CMD_PDU_PORT0 0x008 | 28 | #define CMD_PDU_PORT0 0x008 |
29 | #define CMD_PDU_PORT1 0x108 | 29 | #define CMD_PDU_PORT1 0x108 |
30 | #define REBOOT_FLAG_PORT0 0x3f8 | 30 | #define REBOOT_FLAG_PORT0 0x3f8 |
31 | #define REBOOT_FLAG_PORT1 0x3fc | 31 | #define REBOOT_FLAG_PORT1 0x3fc |
32 | 32 | ||
33 | #define MCDI_RPC_TIMEOUT 10 /*seconds */ | 33 | #define MCDI_RPC_TIMEOUT 10 /*seconds */ |
34 | 34 | ||
35 | #define MCDI_PDU(efx) \ | 35 | #define MCDI_PDU(efx) \ |
36 | (efx_port_num(efx) ? CMD_PDU_PORT1 : CMD_PDU_PORT0) | 36 | (efx_port_num(efx) ? CMD_PDU_PORT1 : CMD_PDU_PORT0) |
37 | #define MCDI_DOORBELL(efx) \ | 37 | #define MCDI_DOORBELL(efx) \ |
38 | (efx_port_num(efx) ? CMD_NOTIFY_PORT1 : CMD_NOTIFY_PORT0) | 38 | (efx_port_num(efx) ? CMD_NOTIFY_PORT1 : CMD_NOTIFY_PORT0) |
39 | #define MCDI_REBOOT_FLAG(efx) \ | 39 | #define MCDI_REBOOT_FLAG(efx) \ |
40 | (efx_port_num(efx) ? REBOOT_FLAG_PORT1 : REBOOT_FLAG_PORT0) | 40 | (efx_port_num(efx) ? REBOOT_FLAG_PORT1 : REBOOT_FLAG_PORT0) |
41 | 41 | ||
42 | #define SEQ_MASK \ | 42 | #define SEQ_MASK \ |
43 | EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ)) | 43 | EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ)) |
44 | 44 | ||
45 | static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) | 45 | static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx) |
46 | { | 46 | { |
47 | struct siena_nic_data *nic_data; | 47 | struct siena_nic_data *nic_data; |
48 | EFX_BUG_ON_PARANOID(efx_nic_rev(efx) < EFX_REV_SIENA_A0); | 48 | EFX_BUG_ON_PARANOID(efx_nic_rev(efx) < EFX_REV_SIENA_A0); |
49 | nic_data = efx->nic_data; | 49 | nic_data = efx->nic_data; |
50 | return &nic_data->mcdi; | 50 | return &nic_data->mcdi; |
51 | } | 51 | } |
52 | 52 | ||
53 | static inline void | ||
54 | efx_mcdi_readd(struct efx_nic *efx, efx_dword_t *value, unsigned reg) | ||
55 | { | ||
56 | struct siena_nic_data *nic_data = efx->nic_data; | ||
57 | value->u32[0] = (__force __le32)__raw_readl(nic_data->mcdi_smem + reg); | ||
58 | } | ||
59 | |||
60 | static inline void | ||
61 | efx_mcdi_writed(struct efx_nic *efx, const efx_dword_t *value, unsigned reg) | ||
62 | { | ||
63 | struct siena_nic_data *nic_data = efx->nic_data; | ||
64 | __raw_writel((__force u32)value->u32[0], nic_data->mcdi_smem + reg); | ||
65 | } | ||
66 | |||
67 | void efx_mcdi_init(struct efx_nic *efx) | 53 | void efx_mcdi_init(struct efx_nic *efx) |
68 | { | 54 | { |
69 | struct efx_mcdi_iface *mcdi; | 55 | struct efx_mcdi_iface *mcdi; |
70 | 56 | ||
71 | if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) | 57 | if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) |
72 | return; | 58 | return; |
73 | 59 | ||
74 | mcdi = efx_mcdi(efx); | 60 | mcdi = efx_mcdi(efx); |
75 | init_waitqueue_head(&mcdi->wq); | 61 | init_waitqueue_head(&mcdi->wq); |
76 | spin_lock_init(&mcdi->iface_lock); | 62 | spin_lock_init(&mcdi->iface_lock); |
77 | atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT); | 63 | atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT); |
78 | mcdi->mode = MCDI_MODE_POLL; | 64 | mcdi->mode = MCDI_MODE_POLL; |
79 | 65 | ||
80 | (void) efx_mcdi_poll_reboot(efx); | 66 | (void) efx_mcdi_poll_reboot(efx); |
81 | } | 67 | } |
82 | 68 | ||
83 | static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, | 69 | static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd, |
84 | const u8 *inbuf, size_t inlen) | 70 | const u8 *inbuf, size_t inlen) |
85 | { | 71 | { |
86 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | 72 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); |
87 | unsigned pdu = MCDI_PDU(efx); | 73 | unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); |
88 | unsigned doorbell = MCDI_DOORBELL(efx); | 74 | unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx); |
89 | unsigned int i; | 75 | unsigned int i; |
90 | efx_dword_t hdr; | 76 | efx_dword_t hdr; |
91 | u32 xflags, seqno; | 77 | u32 xflags, seqno; |
92 | 78 | ||
93 | BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); | 79 | BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); |
94 | BUG_ON(inlen & 3 || inlen >= 0x100); | 80 | BUG_ON(inlen & 3 || inlen >= 0x100); |
95 | 81 | ||
96 | seqno = mcdi->seqno & SEQ_MASK; | 82 | seqno = mcdi->seqno & SEQ_MASK; |
97 | xflags = 0; | 83 | xflags = 0; |
98 | if (mcdi->mode == MCDI_MODE_EVENTS) | 84 | if (mcdi->mode == MCDI_MODE_EVENTS) |
99 | xflags |= MCDI_HEADER_XFLAGS_EVREQ; | 85 | xflags |= MCDI_HEADER_XFLAGS_EVREQ; |
100 | 86 | ||
101 | EFX_POPULATE_DWORD_6(hdr, | 87 | EFX_POPULATE_DWORD_6(hdr, |
102 | MCDI_HEADER_RESPONSE, 0, | 88 | MCDI_HEADER_RESPONSE, 0, |
103 | MCDI_HEADER_RESYNC, 1, | 89 | MCDI_HEADER_RESYNC, 1, |
104 | MCDI_HEADER_CODE, cmd, | 90 | MCDI_HEADER_CODE, cmd, |
105 | MCDI_HEADER_DATALEN, inlen, | 91 | MCDI_HEADER_DATALEN, inlen, |
106 | MCDI_HEADER_SEQ, seqno, | 92 | MCDI_HEADER_SEQ, seqno, |
107 | MCDI_HEADER_XFLAGS, xflags); | 93 | MCDI_HEADER_XFLAGS, xflags); |
108 | 94 | ||
109 | efx_mcdi_writed(efx, &hdr, pdu); | 95 | efx_writed(efx, &hdr, pdu); |
110 | 96 | ||
111 | for (i = 0; i < inlen; i += 4) | 97 | for (i = 0; i < inlen; i += 4) |
112 | efx_mcdi_writed(efx, (const efx_dword_t *)(inbuf + i), | 98 | _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i); |
113 | pdu + 4 + i); | ||
114 | 99 | ||
100 | /* Ensure the payload is written out before the header */ | ||
101 | wmb(); | ||
102 | |||
115 | /* ring the doorbell with a distinctive value */ | 103 | /* ring the doorbell with a distinctive value */ |
116 | EFX_POPULATE_DWORD_1(hdr, EFX_DWORD_0, 0x45789abc); | 104 | _efx_writed(efx, (__force __le32) 0x45789abc, doorbell); |
117 | efx_mcdi_writed(efx, &hdr, doorbell); | ||
118 | } | 105 | } |
119 | 106 | ||
120 | static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen) | 107 | static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen) |
121 | { | 108 | { |
122 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | 109 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); |
123 | unsigned int pdu = MCDI_PDU(efx); | 110 | unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); |
124 | int i; | 111 | int i; |
125 | 112 | ||
126 | BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); | 113 | BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT); |
127 | BUG_ON(outlen & 3 || outlen >= 0x100); | 114 | BUG_ON(outlen & 3 || outlen >= 0x100); |
128 | 115 | ||
129 | for (i = 0; i < outlen; i += 4) | 116 | for (i = 0; i < outlen; i += 4) |
130 | efx_mcdi_readd(efx, (efx_dword_t *)(outbuf + i), pdu + 4 + i); | 117 | *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i); |
131 | } | 118 | } |
132 | 119 | ||
133 | static int efx_mcdi_poll(struct efx_nic *efx) | 120 | static int efx_mcdi_poll(struct efx_nic *efx) |
134 | { | 121 | { |
135 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | 122 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); |
136 | unsigned int time, finish; | 123 | unsigned int time, finish; |
137 | unsigned int respseq, respcmd, error; | 124 | unsigned int respseq, respcmd, error; |
138 | unsigned int pdu = MCDI_PDU(efx); | 125 | unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx); |
139 | unsigned int rc, spins; | 126 | unsigned int rc, spins; |
140 | efx_dword_t reg; | 127 | efx_dword_t reg; |
141 | 128 | ||
142 | /* Check for a reboot atomically with respect to efx_mcdi_copyout() */ | 129 | /* Check for a reboot atomically with respect to efx_mcdi_copyout() */ |
143 | rc = -efx_mcdi_poll_reboot(efx); | 130 | rc = -efx_mcdi_poll_reboot(efx); |
144 | if (rc) | 131 | if (rc) |
145 | goto out; | 132 | goto out; |
146 | 133 | ||
147 | /* Poll for completion. Poll quickly (once a us) for the 1st jiffy, | 134 | /* Poll for completion. Poll quickly (once a us) for the 1st jiffy, |
148 | * because generally mcdi responses are fast. After that, back off | 135 | * because generally mcdi responses are fast. After that, back off |
149 | * and poll once a jiffy (approximately) | 136 | * and poll once a jiffy (approximately) |
150 | */ | 137 | */ |
151 | spins = TICK_USEC; | 138 | spins = TICK_USEC; |
152 | finish = get_seconds() + MCDI_RPC_TIMEOUT; | 139 | finish = get_seconds() + MCDI_RPC_TIMEOUT; |
153 | 140 | ||
154 | while (1) { | 141 | while (1) { |
155 | if (spins != 0) { | 142 | if (spins != 0) { |
156 | --spins; | 143 | --spins; |
157 | udelay(1); | 144 | udelay(1); |
158 | } else { | 145 | } else { |
159 | schedule_timeout_uninterruptible(1); | 146 | schedule_timeout_uninterruptible(1); |
160 | } | 147 | } |
161 | 148 | ||
162 | time = get_seconds(); | 149 | time = get_seconds(); |
163 | 150 | ||
164 | efx_mcdi_readd(efx, ®, pdu); | 151 | rmb(); |
152 | efx_readd(efx, ®, pdu); | ||
165 | 153 | ||
166 | /* All 1's indicates that shared memory is in reset (and is | 154 | /* All 1's indicates that shared memory is in reset (and is |
167 | * not a valid header). Wait for it to come out reset before | 155 | * not a valid header). Wait for it to come out reset before |
168 | * completing the command */ | 156 | * completing the command */ |
169 | if (EFX_DWORD_FIELD(reg, EFX_DWORD_0) != 0xffffffff && | 157 | if (EFX_DWORD_FIELD(reg, EFX_DWORD_0) != 0xffffffff && |
170 | EFX_DWORD_FIELD(reg, MCDI_HEADER_RESPONSE)) | 158 | EFX_DWORD_FIELD(reg, MCDI_HEADER_RESPONSE)) |
171 | break; | 159 | break; |
172 | 160 | ||
173 | if (time >= finish) | 161 | if (time >= finish) |
174 | return -ETIMEDOUT; | 162 | return -ETIMEDOUT; |
175 | } | 163 | } |
176 | 164 | ||
177 | mcdi->resplen = EFX_DWORD_FIELD(reg, MCDI_HEADER_DATALEN); | 165 | mcdi->resplen = EFX_DWORD_FIELD(reg, MCDI_HEADER_DATALEN); |
178 | respseq = EFX_DWORD_FIELD(reg, MCDI_HEADER_SEQ); | 166 | respseq = EFX_DWORD_FIELD(reg, MCDI_HEADER_SEQ); |
179 | respcmd = EFX_DWORD_FIELD(reg, MCDI_HEADER_CODE); | 167 | respcmd = EFX_DWORD_FIELD(reg, MCDI_HEADER_CODE); |
180 | error = EFX_DWORD_FIELD(reg, MCDI_HEADER_ERROR); | 168 | error = EFX_DWORD_FIELD(reg, MCDI_HEADER_ERROR); |
181 | 169 | ||
182 | if (error && mcdi->resplen == 0) { | 170 | if (error && mcdi->resplen == 0) { |
183 | netif_err(efx, hw, efx->net_dev, "MC rebooted\n"); | 171 | netif_err(efx, hw, efx->net_dev, "MC rebooted\n"); |
184 | rc = EIO; | 172 | rc = EIO; |
185 | } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) { | 173 | } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) { |
186 | netif_err(efx, hw, efx->net_dev, | 174 | netif_err(efx, hw, efx->net_dev, |
187 | "MC response mismatch tx seq 0x%x rx seq 0x%x\n", | 175 | "MC response mismatch tx seq 0x%x rx seq 0x%x\n", |
188 | respseq, mcdi->seqno); | 176 | respseq, mcdi->seqno); |
189 | rc = EIO; | 177 | rc = EIO; |
190 | } else if (error) { | 178 | } else if (error) { |
191 | efx_mcdi_readd(efx, ®, pdu + 4); | 179 | efx_readd(efx, ®, pdu + 4); |
192 | switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) { | 180 | switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) { |
193 | #define TRANSLATE_ERROR(name) \ | 181 | #define TRANSLATE_ERROR(name) \ |
194 | case MC_CMD_ERR_ ## name: \ | 182 | case MC_CMD_ERR_ ## name: \ |
195 | rc = name; \ | 183 | rc = name; \ |
196 | break | 184 | break |
197 | TRANSLATE_ERROR(ENOENT); | 185 | TRANSLATE_ERROR(ENOENT); |
198 | TRANSLATE_ERROR(EINTR); | 186 | TRANSLATE_ERROR(EINTR); |
199 | TRANSLATE_ERROR(EACCES); | 187 | TRANSLATE_ERROR(EACCES); |
200 | TRANSLATE_ERROR(EBUSY); | 188 | TRANSLATE_ERROR(EBUSY); |
201 | TRANSLATE_ERROR(EINVAL); | 189 | TRANSLATE_ERROR(EINVAL); |
202 | TRANSLATE_ERROR(EDEADLK); | 190 | TRANSLATE_ERROR(EDEADLK); |
203 | TRANSLATE_ERROR(ENOSYS); | 191 | TRANSLATE_ERROR(ENOSYS); |
204 | TRANSLATE_ERROR(ETIME); | 192 | TRANSLATE_ERROR(ETIME); |
205 | #undef TRANSLATE_ERROR | 193 | #undef TRANSLATE_ERROR |
206 | default: | 194 | default: |
207 | rc = EIO; | 195 | rc = EIO; |
208 | break; | 196 | break; |
209 | } | 197 | } |
210 | } else | 198 | } else |
211 | rc = 0; | 199 | rc = 0; |
212 | 200 | ||
213 | out: | 201 | out: |
214 | mcdi->resprc = rc; | 202 | mcdi->resprc = rc; |
215 | if (rc) | 203 | if (rc) |
216 | mcdi->resplen = 0; | 204 | mcdi->resplen = 0; |
217 | 205 | ||
218 | /* Return rc=0 like wait_event_timeout() */ | 206 | /* Return rc=0 like wait_event_timeout() */ |
219 | return 0; | 207 | return 0; |
220 | } | 208 | } |
221 | 209 | ||
222 | /* Test and clear MC-rebooted flag for this port/function */ | 210 | /* Test and clear MC-rebooted flag for this port/function */ |
223 | int efx_mcdi_poll_reboot(struct efx_nic *efx) | 211 | int efx_mcdi_poll_reboot(struct efx_nic *efx) |
224 | { | 212 | { |
225 | unsigned int addr = MCDI_REBOOT_FLAG(efx); | 213 | unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_REBOOT_FLAG(efx); |
226 | efx_dword_t reg; | 214 | efx_dword_t reg; |
227 | uint32_t value; | 215 | uint32_t value; |
228 | 216 | ||
229 | if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) | 217 | if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) |
230 | return false; | 218 | return false; |
231 | 219 | ||
232 | efx_mcdi_readd(efx, ®, addr); | 220 | efx_readd(efx, ®, addr); |
233 | value = EFX_DWORD_FIELD(reg, EFX_DWORD_0); | 221 | value = EFX_DWORD_FIELD(reg, EFX_DWORD_0); |
234 | 222 | ||
235 | if (value == 0) | 223 | if (value == 0) |
236 | return 0; | 224 | return 0; |
237 | 225 | ||
238 | EFX_ZERO_DWORD(reg); | 226 | EFX_ZERO_DWORD(reg); |
239 | efx_mcdi_writed(efx, ®, addr); | 227 | efx_writed(efx, ®, addr); |
240 | 228 | ||
241 | if (value == MC_STATUS_DWORD_ASSERT) | 229 | if (value == MC_STATUS_DWORD_ASSERT) |
242 | return -EINTR; | 230 | return -EINTR; |
243 | else | 231 | else |
244 | return -EIO; | 232 | return -EIO; |
245 | } | 233 | } |
246 | 234 | ||
247 | static void efx_mcdi_acquire(struct efx_mcdi_iface *mcdi) | 235 | static void efx_mcdi_acquire(struct efx_mcdi_iface *mcdi) |
248 | { | 236 | { |
249 | /* Wait until the interface becomes QUIESCENT and we win the race | 237 | /* Wait until the interface becomes QUIESCENT and we win the race |
250 | * to mark it RUNNING. */ | 238 | * to mark it RUNNING. */ |
251 | wait_event(mcdi->wq, | 239 | wait_event(mcdi->wq, |
252 | atomic_cmpxchg(&mcdi->state, | 240 | atomic_cmpxchg(&mcdi->state, |
253 | MCDI_STATE_QUIESCENT, | 241 | MCDI_STATE_QUIESCENT, |
254 | MCDI_STATE_RUNNING) | 242 | MCDI_STATE_RUNNING) |
255 | == MCDI_STATE_QUIESCENT); | 243 | == MCDI_STATE_QUIESCENT); |
256 | } | 244 | } |
257 | 245 | ||
258 | static int efx_mcdi_await_completion(struct efx_nic *efx) | 246 | static int efx_mcdi_await_completion(struct efx_nic *efx) |
259 | { | 247 | { |
260 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | 248 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); |
261 | 249 | ||
262 | if (wait_event_timeout( | 250 | if (wait_event_timeout( |
263 | mcdi->wq, | 251 | mcdi->wq, |
264 | atomic_read(&mcdi->state) == MCDI_STATE_COMPLETED, | 252 | atomic_read(&mcdi->state) == MCDI_STATE_COMPLETED, |
265 | msecs_to_jiffies(MCDI_RPC_TIMEOUT * 1000)) == 0) | 253 | msecs_to_jiffies(MCDI_RPC_TIMEOUT * 1000)) == 0) |
266 | return -ETIMEDOUT; | 254 | return -ETIMEDOUT; |
267 | 255 | ||
268 | /* Check if efx_mcdi_set_mode() switched us back to polled completions. | 256 | /* Check if efx_mcdi_set_mode() switched us back to polled completions. |
269 | * In which case, poll for completions directly. If efx_mcdi_ev_cpl() | 257 | * In which case, poll for completions directly. If efx_mcdi_ev_cpl() |
270 | * completed the request first, then we'll just end up completing the | 258 | * completed the request first, then we'll just end up completing the |
271 | * request again, which is safe. | 259 | * request again, which is safe. |
272 | * | 260 | * |
273 | * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which | 261 | * We need an smp_rmb() to synchronise with efx_mcdi_mode_poll(), which |
274 | * wait_event_timeout() implicitly provides. | 262 | * wait_event_timeout() implicitly provides. |
275 | */ | 263 | */ |
276 | if (mcdi->mode == MCDI_MODE_POLL) | 264 | if (mcdi->mode == MCDI_MODE_POLL) |
277 | return efx_mcdi_poll(efx); | 265 | return efx_mcdi_poll(efx); |
278 | 266 | ||
279 | return 0; | 267 | return 0; |
280 | } | 268 | } |
281 | 269 | ||
282 | static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi) | 270 | static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi) |
283 | { | 271 | { |
284 | /* If the interface is RUNNING, then move to COMPLETED and wake any | 272 | /* If the interface is RUNNING, then move to COMPLETED and wake any |
285 | * waiters. If the interface isn't in RUNNING then we've received a | 273 | * waiters. If the interface isn't in RUNNING then we've received a |
286 | * duplicate completion after we've already transitioned back to | 274 | * duplicate completion after we've already transitioned back to |
287 | * QUIESCENT. [A subsequent invocation would increment seqno, so would | 275 | * QUIESCENT. [A subsequent invocation would increment seqno, so would |
288 | * have failed the seqno check]. | 276 | * have failed the seqno check]. |
289 | */ | 277 | */ |
290 | if (atomic_cmpxchg(&mcdi->state, | 278 | if (atomic_cmpxchg(&mcdi->state, |
291 | MCDI_STATE_RUNNING, | 279 | MCDI_STATE_RUNNING, |
292 | MCDI_STATE_COMPLETED) == MCDI_STATE_RUNNING) { | 280 | MCDI_STATE_COMPLETED) == MCDI_STATE_RUNNING) { |
293 | wake_up(&mcdi->wq); | 281 | wake_up(&mcdi->wq); |
294 | return true; | 282 | return true; |
295 | } | 283 | } |
296 | 284 | ||
297 | return false; | 285 | return false; |
298 | } | 286 | } |
299 | 287 | ||
300 | static void efx_mcdi_release(struct efx_mcdi_iface *mcdi) | 288 | static void efx_mcdi_release(struct efx_mcdi_iface *mcdi) |
301 | { | 289 | { |
302 | atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT); | 290 | atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT); |
303 | wake_up(&mcdi->wq); | 291 | wake_up(&mcdi->wq); |
304 | } | 292 | } |
305 | 293 | ||
306 | static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno, | 294 | static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno, |
307 | unsigned int datalen, unsigned int errno) | 295 | unsigned int datalen, unsigned int errno) |
308 | { | 296 | { |
309 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | 297 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); |
310 | bool wake = false; | 298 | bool wake = false; |
311 | 299 | ||
312 | spin_lock(&mcdi->iface_lock); | 300 | spin_lock(&mcdi->iface_lock); |
313 | 301 | ||
314 | if ((seqno ^ mcdi->seqno) & SEQ_MASK) { | 302 | if ((seqno ^ mcdi->seqno) & SEQ_MASK) { |
315 | if (mcdi->credits) | 303 | if (mcdi->credits) |
316 | /* The request has been cancelled */ | 304 | /* The request has been cancelled */ |
317 | --mcdi->credits; | 305 | --mcdi->credits; |
318 | else | 306 | else |
319 | netif_err(efx, hw, efx->net_dev, | 307 | netif_err(efx, hw, efx->net_dev, |
320 | "MC response mismatch tx seq 0x%x rx " | 308 | "MC response mismatch tx seq 0x%x rx " |
321 | "seq 0x%x\n", seqno, mcdi->seqno); | 309 | "seq 0x%x\n", seqno, mcdi->seqno); |
322 | } else { | 310 | } else { |
323 | mcdi->resprc = errno; | 311 | mcdi->resprc = errno; |
324 | mcdi->resplen = datalen; | 312 | mcdi->resplen = datalen; |
325 | 313 | ||
326 | wake = true; | 314 | wake = true; |
327 | } | 315 | } |
328 | 316 | ||
329 | spin_unlock(&mcdi->iface_lock); | 317 | spin_unlock(&mcdi->iface_lock); |
330 | 318 | ||
331 | if (wake) | 319 | if (wake) |
332 | efx_mcdi_complete(mcdi); | 320 | efx_mcdi_complete(mcdi); |
333 | } | 321 | } |
334 | 322 | ||
335 | /* Issue the given command by writing the data into the shared memory PDU, | 323 | /* Issue the given command by writing the data into the shared memory PDU, |
336 | * ring the doorbell and wait for completion. Copyout the result. */ | 324 | * ring the doorbell and wait for completion. Copyout the result. */ |
337 | int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, | 325 | int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, |
338 | const u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen, | 326 | const u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen, |
339 | size_t *outlen_actual) | 327 | size_t *outlen_actual) |
340 | { | 328 | { |
341 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | 329 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); |
342 | int rc; | 330 | int rc; |
343 | BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0); | 331 | BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0); |
344 | 332 | ||
345 | efx_mcdi_acquire(mcdi); | 333 | efx_mcdi_acquire(mcdi); |
346 | 334 | ||
347 | /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */ | 335 | /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */ |
348 | spin_lock_bh(&mcdi->iface_lock); | 336 | spin_lock_bh(&mcdi->iface_lock); |
349 | ++mcdi->seqno; | 337 | ++mcdi->seqno; |
350 | spin_unlock_bh(&mcdi->iface_lock); | 338 | spin_unlock_bh(&mcdi->iface_lock); |
351 | 339 | ||
352 | efx_mcdi_copyin(efx, cmd, inbuf, inlen); | 340 | efx_mcdi_copyin(efx, cmd, inbuf, inlen); |
353 | 341 | ||
354 | if (mcdi->mode == MCDI_MODE_POLL) | 342 | if (mcdi->mode == MCDI_MODE_POLL) |
355 | rc = efx_mcdi_poll(efx); | 343 | rc = efx_mcdi_poll(efx); |
356 | else | 344 | else |
357 | rc = efx_mcdi_await_completion(efx); | 345 | rc = efx_mcdi_await_completion(efx); |
358 | 346 | ||
359 | if (rc != 0) { | 347 | if (rc != 0) { |
360 | /* Close the race with efx_mcdi_ev_cpl() executing just too late | 348 | /* Close the race with efx_mcdi_ev_cpl() executing just too late |
361 | * and completing a request we've just cancelled, by ensuring | 349 | * and completing a request we've just cancelled, by ensuring |
362 | * that the seqno check therein fails. | 350 | * that the seqno check therein fails. |
363 | */ | 351 | */ |
364 | spin_lock_bh(&mcdi->iface_lock); | 352 | spin_lock_bh(&mcdi->iface_lock); |
365 | ++mcdi->seqno; | 353 | ++mcdi->seqno; |
366 | ++mcdi->credits; | 354 | ++mcdi->credits; |
367 | spin_unlock_bh(&mcdi->iface_lock); | 355 | spin_unlock_bh(&mcdi->iface_lock); |
368 | 356 | ||
369 | netif_err(efx, hw, efx->net_dev, | 357 | netif_err(efx, hw, efx->net_dev, |
370 | "MC command 0x%x inlen %d mode %d timed out\n", | 358 | "MC command 0x%x inlen %d mode %d timed out\n", |
371 | cmd, (int)inlen, mcdi->mode); | 359 | cmd, (int)inlen, mcdi->mode); |
372 | } else { | 360 | } else { |
373 | size_t resplen; | 361 | size_t resplen; |
374 | 362 | ||
375 | /* At the very least we need a memory barrier here to ensure | 363 | /* At the very least we need a memory barrier here to ensure |
376 | * we pick up changes from efx_mcdi_ev_cpl(). Protect against | 364 | * we pick up changes from efx_mcdi_ev_cpl(). Protect against |
377 | * a spurious efx_mcdi_ev_cpl() running concurrently by | 365 | * a spurious efx_mcdi_ev_cpl() running concurrently by |
378 | * acquiring the iface_lock. */ | 366 | * acquiring the iface_lock. */ |
379 | spin_lock_bh(&mcdi->iface_lock); | 367 | spin_lock_bh(&mcdi->iface_lock); |
380 | rc = -mcdi->resprc; | 368 | rc = -mcdi->resprc; |
381 | resplen = mcdi->resplen; | 369 | resplen = mcdi->resplen; |
382 | spin_unlock_bh(&mcdi->iface_lock); | 370 | spin_unlock_bh(&mcdi->iface_lock); |
383 | 371 | ||
384 | if (rc == 0) { | 372 | if (rc == 0) { |
385 | efx_mcdi_copyout(efx, outbuf, | 373 | efx_mcdi_copyout(efx, outbuf, |
386 | min(outlen, mcdi->resplen + 3) & ~0x3); | 374 | min(outlen, mcdi->resplen + 3) & ~0x3); |
387 | if (outlen_actual != NULL) | 375 | if (outlen_actual != NULL) |
388 | *outlen_actual = resplen; | 376 | *outlen_actual = resplen; |
389 | } else if (cmd == MC_CMD_REBOOT && rc == -EIO) | 377 | } else if (cmd == MC_CMD_REBOOT && rc == -EIO) |
390 | ; /* Don't reset if MC_CMD_REBOOT returns EIO */ | 378 | ; /* Don't reset if MC_CMD_REBOOT returns EIO */ |
391 | else if (rc == -EIO || rc == -EINTR) { | 379 | else if (rc == -EIO || rc == -EINTR) { |
392 | netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n", | 380 | netif_err(efx, hw, efx->net_dev, "MC fatal error %d\n", |
393 | -rc); | 381 | -rc); |
394 | efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); | 382 | efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); |
395 | } else | 383 | } else |
396 | netif_dbg(efx, hw, efx->net_dev, | 384 | netif_dbg(efx, hw, efx->net_dev, |
397 | "MC command 0x%x inlen %d failed rc=%d\n", | 385 | "MC command 0x%x inlen %d failed rc=%d\n", |
398 | cmd, (int)inlen, -rc); | 386 | cmd, (int)inlen, -rc); |
399 | } | 387 | } |
400 | 388 | ||
401 | efx_mcdi_release(mcdi); | 389 | efx_mcdi_release(mcdi); |
402 | return rc; | 390 | return rc; |
403 | } | 391 | } |
404 | 392 | ||
405 | void efx_mcdi_mode_poll(struct efx_nic *efx) | 393 | void efx_mcdi_mode_poll(struct efx_nic *efx) |
406 | { | 394 | { |
407 | struct efx_mcdi_iface *mcdi; | 395 | struct efx_mcdi_iface *mcdi; |
408 | 396 | ||
409 | if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) | 397 | if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) |
410 | return; | 398 | return; |
411 | 399 | ||
412 | mcdi = efx_mcdi(efx); | 400 | mcdi = efx_mcdi(efx); |
413 | if (mcdi->mode == MCDI_MODE_POLL) | 401 | if (mcdi->mode == MCDI_MODE_POLL) |
414 | return; | 402 | return; |
415 | 403 | ||
416 | /* We can switch from event completion to polled completion, because | 404 | /* We can switch from event completion to polled completion, because |
417 | * mcdi requests are always completed in shared memory. We do this by | 405 | * mcdi requests are always completed in shared memory. We do this by |
418 | * switching the mode to POLL'd then completing the request. | 406 | * switching the mode to POLL'd then completing the request. |
419 | * efx_mcdi_await_completion() will then call efx_mcdi_poll(). | 407 | * efx_mcdi_await_completion() will then call efx_mcdi_poll(). |
420 | * | 408 | * |
421 | * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(), | 409 | * We need an smp_wmb() to synchronise with efx_mcdi_await_completion(), |
422 | * which efx_mcdi_complete() provides for us. | 410 | * which efx_mcdi_complete() provides for us. |
423 | */ | 411 | */ |
424 | mcdi->mode = MCDI_MODE_POLL; | 412 | mcdi->mode = MCDI_MODE_POLL; |
425 | 413 | ||
426 | efx_mcdi_complete(mcdi); | 414 | efx_mcdi_complete(mcdi); |
427 | } | 415 | } |
428 | 416 | ||
429 | void efx_mcdi_mode_event(struct efx_nic *efx) | 417 | void efx_mcdi_mode_event(struct efx_nic *efx) |
430 | { | 418 | { |
431 | struct efx_mcdi_iface *mcdi; | 419 | struct efx_mcdi_iface *mcdi; |
432 | 420 | ||
433 | if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) | 421 | if (efx_nic_rev(efx) < EFX_REV_SIENA_A0) |
434 | return; | 422 | return; |
435 | 423 | ||
436 | mcdi = efx_mcdi(efx); | 424 | mcdi = efx_mcdi(efx); |
437 | 425 | ||
438 | if (mcdi->mode == MCDI_MODE_EVENTS) | 426 | if (mcdi->mode == MCDI_MODE_EVENTS) |
439 | return; | 427 | return; |
440 | 428 | ||
441 | /* We can't switch from polled to event completion in the middle of a | 429 | /* We can't switch from polled to event completion in the middle of a |
442 | * request, because the completion method is specified in the request. | 430 | * request, because the completion method is specified in the request. |
443 | * So acquire the interface to serialise the requestors. We don't need | 431 | * So acquire the interface to serialise the requestors. We don't need |
444 | * to acquire the iface_lock to change the mode here, but we do need a | 432 | * to acquire the iface_lock to change the mode here, but we do need a |
445 | * write memory barrier ensure that efx_mcdi_rpc() sees it, which | 433 | * write memory barrier ensure that efx_mcdi_rpc() sees it, which |
446 | * efx_mcdi_acquire() provides. | 434 | * efx_mcdi_acquire() provides. |
447 | */ | 435 | */ |
448 | efx_mcdi_acquire(mcdi); | 436 | efx_mcdi_acquire(mcdi); |
449 | mcdi->mode = MCDI_MODE_EVENTS; | 437 | mcdi->mode = MCDI_MODE_EVENTS; |
450 | efx_mcdi_release(mcdi); | 438 | efx_mcdi_release(mcdi); |
451 | } | 439 | } |
452 | 440 | ||
453 | static void efx_mcdi_ev_death(struct efx_nic *efx, int rc) | 441 | static void efx_mcdi_ev_death(struct efx_nic *efx, int rc) |
454 | { | 442 | { |
455 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); | 443 | struct efx_mcdi_iface *mcdi = efx_mcdi(efx); |
456 | 444 | ||
457 | /* If there is an outstanding MCDI request, it has been terminated | 445 | /* If there is an outstanding MCDI request, it has been terminated |
458 | * either by a BADASSERT or REBOOT event. If the mcdi interface is | 446 | * either by a BADASSERT or REBOOT event. If the mcdi interface is |
459 | * in polled mode, then do nothing because the MC reboot handler will | 447 | * in polled mode, then do nothing because the MC reboot handler will |
460 | * set the header correctly. However, if the mcdi interface is waiting | 448 | * set the header correctly. However, if the mcdi interface is waiting |
461 | * for a CMDDONE event it won't receive it [and since all MCDI events | 449 | * for a CMDDONE event it won't receive it [and since all MCDI events |
462 | * are sent to the same queue, we can't be racing with | 450 | * are sent to the same queue, we can't be racing with |
463 | * efx_mcdi_ev_cpl()] | 451 | * efx_mcdi_ev_cpl()] |
464 | * | 452 | * |
465 | * There's a race here with efx_mcdi_rpc(), because we might receive | 453 | * There's a race here with efx_mcdi_rpc(), because we might receive |
466 | * a REBOOT event *before* the request has been copied out. In polled | 454 | * a REBOOT event *before* the request has been copied out. In polled |
467 | * mode (during startup) this is irrelevant, because efx_mcdi_complete() | 455 | * mode (during startup) this is irrelevant, because efx_mcdi_complete() |
468 | * is ignored. In event mode, this condition is just an edge-case of | 456 | * is ignored. In event mode, this condition is just an edge-case of |
469 | * receiving a REBOOT event after posting the MCDI request. Did the mc | 457 | * receiving a REBOOT event after posting the MCDI request. Did the mc |
470 | * reboot before or after the copyout? The best we can do always is | 458 | * reboot before or after the copyout? The best we can do always is |
471 | * just return failure. | 459 | * just return failure. |
472 | */ | 460 | */ |
473 | spin_lock(&mcdi->iface_lock); | 461 | spin_lock(&mcdi->iface_lock); |
474 | if (efx_mcdi_complete(mcdi)) { | 462 | if (efx_mcdi_complete(mcdi)) { |
475 | if (mcdi->mode == MCDI_MODE_EVENTS) { | 463 | if (mcdi->mode == MCDI_MODE_EVENTS) { |
476 | mcdi->resprc = rc; | 464 | mcdi->resprc = rc; |
477 | mcdi->resplen = 0; | 465 | mcdi->resplen = 0; |
478 | ++mcdi->credits; | 466 | ++mcdi->credits; |
479 | } | 467 | } |
480 | } else | 468 | } else |
481 | /* Nobody was waiting for an MCDI request, so trigger a reset */ | 469 | /* Nobody was waiting for an MCDI request, so trigger a reset */ |
482 | efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); | 470 | efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); |
483 | 471 | ||
484 | spin_unlock(&mcdi->iface_lock); | 472 | spin_unlock(&mcdi->iface_lock); |
485 | } | 473 | } |
486 | 474 | ||
487 | static unsigned int efx_mcdi_event_link_speed[] = { | 475 | static unsigned int efx_mcdi_event_link_speed[] = { |
488 | [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100, | 476 | [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100, |
489 | [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000, | 477 | [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000, |
490 | [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000, | 478 | [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000, |
491 | }; | 479 | }; |
492 | 480 | ||
493 | 481 | ||
494 | static void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev) | 482 | static void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev) |
495 | { | 483 | { |
496 | u32 flags, fcntl, speed, lpa; | 484 | u32 flags, fcntl, speed, lpa; |
497 | 485 | ||
498 | speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED); | 486 | speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED); |
499 | EFX_BUG_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed)); | 487 | EFX_BUG_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed)); |
500 | speed = efx_mcdi_event_link_speed[speed]; | 488 | speed = efx_mcdi_event_link_speed[speed]; |
501 | 489 | ||
502 | flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS); | 490 | flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS); |
503 | fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL); | 491 | fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL); |
504 | lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP); | 492 | lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP); |
505 | 493 | ||
506 | /* efx->link_state is only modified by efx_mcdi_phy_get_link(), | 494 | /* efx->link_state is only modified by efx_mcdi_phy_get_link(), |
507 | * which is only run after flushing the event queues. Therefore, it | 495 | * which is only run after flushing the event queues. Therefore, it |
508 | * is safe to modify the link state outside of the mac_lock here. | 496 | * is safe to modify the link state outside of the mac_lock here. |
509 | */ | 497 | */ |
510 | efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl); | 498 | efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl); |
511 | 499 | ||
512 | efx_mcdi_phy_check_fcntl(efx, lpa); | 500 | efx_mcdi_phy_check_fcntl(efx, lpa); |
513 | 501 | ||
514 | efx_link_status_changed(efx); | 502 | efx_link_status_changed(efx); |
515 | } | 503 | } |
516 | 504 | ||
517 | static const char *sensor_names[] = { | 505 | static const char *sensor_names[] = { |
518 | [MC_CMD_SENSOR_CONTROLLER_TEMP] = "Controller temp. sensor", | 506 | [MC_CMD_SENSOR_CONTROLLER_TEMP] = "Controller temp. sensor", |
519 | [MC_CMD_SENSOR_PHY_COMMON_TEMP] = "PHY shared temp. sensor", | 507 | [MC_CMD_SENSOR_PHY_COMMON_TEMP] = "PHY shared temp. sensor", |
520 | [MC_CMD_SENSOR_CONTROLLER_COOLING] = "Controller cooling", | 508 | [MC_CMD_SENSOR_CONTROLLER_COOLING] = "Controller cooling", |
521 | [MC_CMD_SENSOR_PHY0_TEMP] = "PHY 0 temp. sensor", | 509 | [MC_CMD_SENSOR_PHY0_TEMP] = "PHY 0 temp. sensor", |
522 | [MC_CMD_SENSOR_PHY0_COOLING] = "PHY 0 cooling", | 510 | [MC_CMD_SENSOR_PHY0_COOLING] = "PHY 0 cooling", |
523 | [MC_CMD_SENSOR_PHY1_TEMP] = "PHY 1 temp. sensor", | 511 | [MC_CMD_SENSOR_PHY1_TEMP] = "PHY 1 temp. sensor", |
524 | [MC_CMD_SENSOR_PHY1_COOLING] = "PHY 1 cooling", | 512 | [MC_CMD_SENSOR_PHY1_COOLING] = "PHY 1 cooling", |
525 | [MC_CMD_SENSOR_IN_1V0] = "1.0V supply sensor", | 513 | [MC_CMD_SENSOR_IN_1V0] = "1.0V supply sensor", |
526 | [MC_CMD_SENSOR_IN_1V2] = "1.2V supply sensor", | 514 | [MC_CMD_SENSOR_IN_1V2] = "1.2V supply sensor", |
527 | [MC_CMD_SENSOR_IN_1V8] = "1.8V supply sensor", | 515 | [MC_CMD_SENSOR_IN_1V8] = "1.8V supply sensor", |
528 | [MC_CMD_SENSOR_IN_2V5] = "2.5V supply sensor", | 516 | [MC_CMD_SENSOR_IN_2V5] = "2.5V supply sensor", |
529 | [MC_CMD_SENSOR_IN_3V3] = "3.3V supply sensor", | 517 | [MC_CMD_SENSOR_IN_3V3] = "3.3V supply sensor", |
530 | [MC_CMD_SENSOR_IN_12V0] = "12V supply sensor" | 518 | [MC_CMD_SENSOR_IN_12V0] = "12V supply sensor" |
531 | }; | 519 | }; |
532 | 520 | ||
533 | static const char *sensor_status_names[] = { | 521 | static const char *sensor_status_names[] = { |
534 | [MC_CMD_SENSOR_STATE_OK] = "OK", | 522 | [MC_CMD_SENSOR_STATE_OK] = "OK", |
535 | [MC_CMD_SENSOR_STATE_WARNING] = "Warning", | 523 | [MC_CMD_SENSOR_STATE_WARNING] = "Warning", |
536 | [MC_CMD_SENSOR_STATE_FATAL] = "Fatal", | 524 | [MC_CMD_SENSOR_STATE_FATAL] = "Fatal", |
537 | [MC_CMD_SENSOR_STATE_BROKEN] = "Device failure", | 525 | [MC_CMD_SENSOR_STATE_BROKEN] = "Device failure", |
538 | }; | 526 | }; |
539 | 527 | ||
540 | static void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev) | 528 | static void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev) |
541 | { | 529 | { |
542 | unsigned int monitor, state, value; | 530 | unsigned int monitor, state, value; |
543 | const char *name, *state_txt; | 531 | const char *name, *state_txt; |
544 | monitor = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_MONITOR); | 532 | monitor = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_MONITOR); |
545 | state = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_STATE); | 533 | state = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_STATE); |
546 | value = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_VALUE); | 534 | value = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_VALUE); |
547 | /* Deal gracefully with the board having more drivers than we | 535 | /* Deal gracefully with the board having more drivers than we |
548 | * know about, but do not expect new sensor states. */ | 536 | * know about, but do not expect new sensor states. */ |
549 | name = (monitor >= ARRAY_SIZE(sensor_names)) | 537 | name = (monitor >= ARRAY_SIZE(sensor_names)) |
550 | ? "No sensor name available" : | 538 | ? "No sensor name available" : |
551 | sensor_names[monitor]; | 539 | sensor_names[monitor]; |
552 | EFX_BUG_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names)); | 540 | EFX_BUG_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names)); |
553 | state_txt = sensor_status_names[state]; | 541 | state_txt = sensor_status_names[state]; |
554 | 542 | ||
555 | netif_err(efx, hw, efx->net_dev, | 543 | netif_err(efx, hw, efx->net_dev, |
556 | "Sensor %d (%s) reports condition '%s' for raw value %d\n", | 544 | "Sensor %d (%s) reports condition '%s' for raw value %d\n", |
557 | monitor, name, state_txt, value); | 545 | monitor, name, state_txt, value); |
558 | } | 546 | } |
559 | 547 | ||
560 | /* Called from falcon_process_eventq for MCDI events */ | 548 | /* Called from falcon_process_eventq for MCDI events */ |
561 | void efx_mcdi_process_event(struct efx_channel *channel, | 549 | void efx_mcdi_process_event(struct efx_channel *channel, |
562 | efx_qword_t *event) | 550 | efx_qword_t *event) |
563 | { | 551 | { |
564 | struct efx_nic *efx = channel->efx; | 552 | struct efx_nic *efx = channel->efx; |
565 | int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE); | 553 | int code = EFX_QWORD_FIELD(*event, MCDI_EVENT_CODE); |
566 | u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA); | 554 | u32 data = EFX_QWORD_FIELD(*event, MCDI_EVENT_DATA); |
567 | 555 | ||
568 | switch (code) { | 556 | switch (code) { |
569 | case MCDI_EVENT_CODE_BADSSERT: | 557 | case MCDI_EVENT_CODE_BADSSERT: |
570 | netif_err(efx, hw, efx->net_dev, | 558 | netif_err(efx, hw, efx->net_dev, |
571 | "MC watchdog or assertion failure at 0x%x\n", data); | 559 | "MC watchdog or assertion failure at 0x%x\n", data); |
572 | efx_mcdi_ev_death(efx, EINTR); | 560 | efx_mcdi_ev_death(efx, EINTR); |
573 | break; | 561 | break; |
574 | 562 | ||
575 | case MCDI_EVENT_CODE_PMNOTICE: | 563 | case MCDI_EVENT_CODE_PMNOTICE: |
576 | netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n"); | 564 | netif_info(efx, wol, efx->net_dev, "MCDI PM event.\n"); |
577 | break; | 565 | break; |
578 | 566 | ||
579 | case MCDI_EVENT_CODE_CMDDONE: | 567 | case MCDI_EVENT_CODE_CMDDONE: |
580 | efx_mcdi_ev_cpl(efx, | 568 | efx_mcdi_ev_cpl(efx, |
581 | MCDI_EVENT_FIELD(*event, CMDDONE_SEQ), | 569 | MCDI_EVENT_FIELD(*event, CMDDONE_SEQ), |
582 | MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN), | 570 | MCDI_EVENT_FIELD(*event, CMDDONE_DATALEN), |
583 | MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO)); | 571 | MCDI_EVENT_FIELD(*event, CMDDONE_ERRNO)); |
584 | break; | 572 | break; |
585 | 573 | ||
586 | case MCDI_EVENT_CODE_LINKCHANGE: | 574 | case MCDI_EVENT_CODE_LINKCHANGE: |
587 | efx_mcdi_process_link_change(efx, event); | 575 | efx_mcdi_process_link_change(efx, event); |
588 | break; | 576 | break; |
589 | case MCDI_EVENT_CODE_SENSOREVT: | 577 | case MCDI_EVENT_CODE_SENSOREVT: |
590 | efx_mcdi_sensor_event(efx, event); | 578 | efx_mcdi_sensor_event(efx, event); |
591 | break; | 579 | break; |
592 | case MCDI_EVENT_CODE_SCHEDERR: | 580 | case MCDI_EVENT_CODE_SCHEDERR: |
593 | netif_info(efx, hw, efx->net_dev, | 581 | netif_info(efx, hw, efx->net_dev, |
594 | "MC Scheduler error address=0x%x\n", data); | 582 | "MC Scheduler error address=0x%x\n", data); |
595 | break; | 583 | break; |
596 | case MCDI_EVENT_CODE_REBOOT: | 584 | case MCDI_EVENT_CODE_REBOOT: |
597 | netif_info(efx, hw, efx->net_dev, "MC Reboot\n"); | 585 | netif_info(efx, hw, efx->net_dev, "MC Reboot\n"); |
598 | efx_mcdi_ev_death(efx, EIO); | 586 | efx_mcdi_ev_death(efx, EIO); |
599 | break; | 587 | break; |
600 | case MCDI_EVENT_CODE_MAC_STATS_DMA: | 588 | case MCDI_EVENT_CODE_MAC_STATS_DMA: |
601 | /* MAC stats are gather lazily. We can ignore this. */ | 589 | /* MAC stats are gather lazily. We can ignore this. */ |
602 | break; | 590 | break; |
603 | 591 | ||
604 | default: | 592 | default: |
605 | netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n", | 593 | netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n", |
606 | code); | 594 | code); |
607 | } | 595 | } |
608 | } | 596 | } |
609 | 597 | ||
610 | /************************************************************************** | 598 | /************************************************************************** |
611 | * | 599 | * |
612 | * Specific request functions | 600 | * Specific request functions |
613 | * | 601 | * |
614 | ************************************************************************** | 602 | ************************************************************************** |
615 | */ | 603 | */ |
616 | 604 | ||
617 | void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len) | 605 | void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len) |
618 | { | 606 | { |
619 | u8 outbuf[ALIGN(MC_CMD_GET_VERSION_V1_OUT_LEN, 4)]; | 607 | u8 outbuf[ALIGN(MC_CMD_GET_VERSION_V1_OUT_LEN, 4)]; |
620 | size_t outlength; | 608 | size_t outlength; |
621 | const __le16 *ver_words; | 609 | const __le16 *ver_words; |
622 | int rc; | 610 | int rc; |
623 | 611 | ||
624 | BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0); | 612 | BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0); |
625 | 613 | ||
626 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0, | 614 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0, |
627 | outbuf, sizeof(outbuf), &outlength); | 615 | outbuf, sizeof(outbuf), &outlength); |
628 | if (rc) | 616 | if (rc) |
629 | goto fail; | 617 | goto fail; |
630 | 618 | ||
631 | if (outlength < MC_CMD_GET_VERSION_V1_OUT_LEN) { | 619 | if (outlength < MC_CMD_GET_VERSION_V1_OUT_LEN) { |
632 | rc = -EIO; | 620 | rc = -EIO; |
633 | goto fail; | 621 | goto fail; |
634 | } | 622 | } |
635 | 623 | ||
636 | ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION); | 624 | ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION); |
637 | snprintf(buf, len, "%u.%u.%u.%u", | 625 | snprintf(buf, len, "%u.%u.%u.%u", |
638 | le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]), | 626 | le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]), |
639 | le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3])); | 627 | le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3])); |
640 | return; | 628 | return; |
641 | 629 | ||
642 | fail: | 630 | fail: |
643 | netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | 631 | netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
644 | buf[0] = 0; | 632 | buf[0] = 0; |
645 | } | 633 | } |
646 | 634 | ||
647 | int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, | 635 | int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating, |
648 | bool *was_attached) | 636 | bool *was_attached) |
649 | { | 637 | { |
650 | u8 inbuf[MC_CMD_DRV_ATTACH_IN_LEN]; | 638 | u8 inbuf[MC_CMD_DRV_ATTACH_IN_LEN]; |
651 | u8 outbuf[MC_CMD_DRV_ATTACH_OUT_LEN]; | 639 | u8 outbuf[MC_CMD_DRV_ATTACH_OUT_LEN]; |
652 | size_t outlen; | 640 | size_t outlen; |
653 | int rc; | 641 | int rc; |
654 | 642 | ||
655 | MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE, | 643 | MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE, |
656 | driver_operating ? 1 : 0); | 644 | driver_operating ? 1 : 0); |
657 | MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1); | 645 | MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1); |
658 | 646 | ||
659 | rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf), | 647 | rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf), |
660 | outbuf, sizeof(outbuf), &outlen); | 648 | outbuf, sizeof(outbuf), &outlen); |
661 | if (rc) | 649 | if (rc) |
662 | goto fail; | 650 | goto fail; |
663 | if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) { | 651 | if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) { |
664 | rc = -EIO; | 652 | rc = -EIO; |
665 | goto fail; | 653 | goto fail; |
666 | } | 654 | } |
667 | 655 | ||
668 | if (was_attached != NULL) | 656 | if (was_attached != NULL) |
669 | *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE); | 657 | *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE); |
670 | return 0; | 658 | return 0; |
671 | 659 | ||
672 | fail: | 660 | fail: |
673 | netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | 661 | netif_err(efx, probe, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
674 | return rc; | 662 | return rc; |
675 | } | 663 | } |
676 | 664 | ||
677 | int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, | 665 | int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address, |
678 | u16 *fw_subtype_list) | 666 | u16 *fw_subtype_list) |
679 | { | 667 | { |
680 | uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LEN]; | 668 | uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LEN]; |
681 | size_t outlen; | 669 | size_t outlen; |
682 | int port_num = efx_port_num(efx); | 670 | int port_num = efx_port_num(efx); |
683 | int offset; | 671 | int offset; |
684 | int rc; | 672 | int rc; |
685 | 673 | ||
686 | BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0); | 674 | BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0); |
687 | 675 | ||
688 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0, | 676 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_BOARD_CFG, NULL, 0, |
689 | outbuf, sizeof(outbuf), &outlen); | 677 | outbuf, sizeof(outbuf), &outlen); |
690 | if (rc) | 678 | if (rc) |
691 | goto fail; | 679 | goto fail; |
692 | 680 | ||
693 | if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LEN) { | 681 | if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LEN) { |
694 | rc = -EIO; | 682 | rc = -EIO; |
695 | goto fail; | 683 | goto fail; |
696 | } | 684 | } |
697 | 685 | ||
698 | offset = (port_num) | 686 | offset = (port_num) |
699 | ? MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST | 687 | ? MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST |
700 | : MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST; | 688 | : MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST; |
701 | if (mac_address) | 689 | if (mac_address) |
702 | memcpy(mac_address, outbuf + offset, ETH_ALEN); | 690 | memcpy(mac_address, outbuf + offset, ETH_ALEN); |
703 | if (fw_subtype_list) | 691 | if (fw_subtype_list) |
704 | memcpy(fw_subtype_list, | 692 | memcpy(fw_subtype_list, |
705 | outbuf + MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST, | 693 | outbuf + MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST, |
706 | MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN); | 694 | MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN); |
707 | 695 | ||
708 | return 0; | 696 | return 0; |
709 | 697 | ||
710 | fail: | 698 | fail: |
711 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n", | 699 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d len=%d\n", |
712 | __func__, rc, (int)outlen); | 700 | __func__, rc, (int)outlen); |
713 | 701 | ||
714 | return rc; | 702 | return rc; |
715 | } | 703 | } |
716 | 704 | ||
717 | int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq) | 705 | int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq) |
718 | { | 706 | { |
719 | u8 inbuf[MC_CMD_LOG_CTRL_IN_LEN]; | 707 | u8 inbuf[MC_CMD_LOG_CTRL_IN_LEN]; |
720 | u32 dest = 0; | 708 | u32 dest = 0; |
721 | int rc; | 709 | int rc; |
722 | 710 | ||
723 | if (uart) | 711 | if (uart) |
724 | dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART; | 712 | dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_UART; |
725 | if (evq) | 713 | if (evq) |
726 | dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ; | 714 | dest |= MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ; |
727 | 715 | ||
728 | MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest); | 716 | MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST, dest); |
729 | MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq); | 717 | MCDI_SET_DWORD(inbuf, LOG_CTRL_IN_LOG_DEST_EVQ, dest_evq); |
730 | 718 | ||
731 | BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0); | 719 | BUILD_BUG_ON(MC_CMD_LOG_CTRL_OUT_LEN != 0); |
732 | 720 | ||
733 | rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf), | 721 | rc = efx_mcdi_rpc(efx, MC_CMD_LOG_CTRL, inbuf, sizeof(inbuf), |
734 | NULL, 0, NULL); | 722 | NULL, 0, NULL); |
735 | if (rc) | 723 | if (rc) |
736 | goto fail; | 724 | goto fail; |
737 | 725 | ||
738 | return 0; | 726 | return 0; |
739 | 727 | ||
740 | fail: | 728 | fail: |
741 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | 729 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
742 | return rc; | 730 | return rc; |
743 | } | 731 | } |
744 | 732 | ||
745 | int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out) | 733 | int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out) |
746 | { | 734 | { |
747 | u8 outbuf[MC_CMD_NVRAM_TYPES_OUT_LEN]; | 735 | u8 outbuf[MC_CMD_NVRAM_TYPES_OUT_LEN]; |
748 | size_t outlen; | 736 | size_t outlen; |
749 | int rc; | 737 | int rc; |
750 | 738 | ||
751 | BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0); | 739 | BUILD_BUG_ON(MC_CMD_NVRAM_TYPES_IN_LEN != 0); |
752 | 740 | ||
753 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0, | 741 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TYPES, NULL, 0, |
754 | outbuf, sizeof(outbuf), &outlen); | 742 | outbuf, sizeof(outbuf), &outlen); |
755 | if (rc) | 743 | if (rc) |
756 | goto fail; | 744 | goto fail; |
757 | if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) { | 745 | if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) { |
758 | rc = -EIO; | 746 | rc = -EIO; |
759 | goto fail; | 747 | goto fail; |
760 | } | 748 | } |
761 | 749 | ||
762 | *nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES); | 750 | *nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES); |
763 | return 0; | 751 | return 0; |
764 | 752 | ||
765 | fail: | 753 | fail: |
766 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", | 754 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", |
767 | __func__, rc); | 755 | __func__, rc); |
768 | return rc; | 756 | return rc; |
769 | } | 757 | } |
770 | 758 | ||
771 | int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, | 759 | int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type, |
772 | size_t *size_out, size_t *erase_size_out, | 760 | size_t *size_out, size_t *erase_size_out, |
773 | bool *protected_out) | 761 | bool *protected_out) |
774 | { | 762 | { |
775 | u8 inbuf[MC_CMD_NVRAM_INFO_IN_LEN]; | 763 | u8 inbuf[MC_CMD_NVRAM_INFO_IN_LEN]; |
776 | u8 outbuf[MC_CMD_NVRAM_INFO_OUT_LEN]; | 764 | u8 outbuf[MC_CMD_NVRAM_INFO_OUT_LEN]; |
777 | size_t outlen; | 765 | size_t outlen; |
778 | int rc; | 766 | int rc; |
779 | 767 | ||
780 | MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type); | 768 | MCDI_SET_DWORD(inbuf, NVRAM_INFO_IN_TYPE, type); |
781 | 769 | ||
782 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf), | 770 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_INFO, inbuf, sizeof(inbuf), |
783 | outbuf, sizeof(outbuf), &outlen); | 771 | outbuf, sizeof(outbuf), &outlen); |
784 | if (rc) | 772 | if (rc) |
785 | goto fail; | 773 | goto fail; |
786 | if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) { | 774 | if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) { |
787 | rc = -EIO; | 775 | rc = -EIO; |
788 | goto fail; | 776 | goto fail; |
789 | } | 777 | } |
790 | 778 | ||
791 | *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE); | 779 | *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE); |
792 | *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE); | 780 | *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE); |
793 | *protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) & | 781 | *protected_out = !!(MCDI_DWORD(outbuf, NVRAM_INFO_OUT_FLAGS) & |
794 | (1 << MC_CMD_NVRAM_PROTECTED_LBN)); | 782 | (1 << MC_CMD_NVRAM_PROTECTED_LBN)); |
795 | return 0; | 783 | return 0; |
796 | 784 | ||
797 | fail: | 785 | fail: |
798 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | 786 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
799 | return rc; | 787 | return rc; |
800 | } | 788 | } |
801 | 789 | ||
802 | int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type) | 790 | int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type) |
803 | { | 791 | { |
804 | u8 inbuf[MC_CMD_NVRAM_UPDATE_START_IN_LEN]; | 792 | u8 inbuf[MC_CMD_NVRAM_UPDATE_START_IN_LEN]; |
805 | int rc; | 793 | int rc; |
806 | 794 | ||
807 | MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type); | 795 | MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type); |
808 | 796 | ||
809 | BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0); | 797 | BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0); |
810 | 798 | ||
811 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf), | 799 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf), |
812 | NULL, 0, NULL); | 800 | NULL, 0, NULL); |
813 | if (rc) | 801 | if (rc) |
814 | goto fail; | 802 | goto fail; |
815 | 803 | ||
816 | return 0; | 804 | return 0; |
817 | 805 | ||
818 | fail: | 806 | fail: |
819 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | 807 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
820 | return rc; | 808 | return rc; |
821 | } | 809 | } |
822 | 810 | ||
823 | int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type, | 811 | int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type, |
824 | loff_t offset, u8 *buffer, size_t length) | 812 | loff_t offset, u8 *buffer, size_t length) |
825 | { | 813 | { |
826 | u8 inbuf[MC_CMD_NVRAM_READ_IN_LEN]; | 814 | u8 inbuf[MC_CMD_NVRAM_READ_IN_LEN]; |
827 | u8 outbuf[MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX)]; | 815 | u8 outbuf[MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX)]; |
828 | size_t outlen; | 816 | size_t outlen; |
829 | int rc; | 817 | int rc; |
830 | 818 | ||
831 | MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type); | 819 | MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type); |
832 | MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset); | 820 | MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset); |
833 | MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length); | 821 | MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length); |
834 | 822 | ||
835 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf), | 823 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf), |
836 | outbuf, sizeof(outbuf), &outlen); | 824 | outbuf, sizeof(outbuf), &outlen); |
837 | if (rc) | 825 | if (rc) |
838 | goto fail; | 826 | goto fail; |
839 | 827 | ||
840 | memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length); | 828 | memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length); |
841 | return 0; | 829 | return 0; |
842 | 830 | ||
843 | fail: | 831 | fail: |
844 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | 832 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
845 | return rc; | 833 | return rc; |
846 | } | 834 | } |
847 | 835 | ||
848 | int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, | 836 | int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type, |
849 | loff_t offset, const u8 *buffer, size_t length) | 837 | loff_t offset, const u8 *buffer, size_t length) |
850 | { | 838 | { |
851 | u8 inbuf[MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX)]; | 839 | u8 inbuf[MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX)]; |
852 | int rc; | 840 | int rc; |
853 | 841 | ||
854 | MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type); | 842 | MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type); |
855 | MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset); | 843 | MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset); |
856 | MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length); | 844 | MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length); |
857 | memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length); | 845 | memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length); |
858 | 846 | ||
859 | BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0); | 847 | BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0); |
860 | 848 | ||
861 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, | 849 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf, |
862 | ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4), | 850 | ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4), |
863 | NULL, 0, NULL); | 851 | NULL, 0, NULL); |
864 | if (rc) | 852 | if (rc) |
865 | goto fail; | 853 | goto fail; |
866 | 854 | ||
867 | return 0; | 855 | return 0; |
868 | 856 | ||
869 | fail: | 857 | fail: |
870 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | 858 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
871 | return rc; | 859 | return rc; |
872 | } | 860 | } |
873 | 861 | ||
874 | int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, | 862 | int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type, |
875 | loff_t offset, size_t length) | 863 | loff_t offset, size_t length) |
876 | { | 864 | { |
877 | u8 inbuf[MC_CMD_NVRAM_ERASE_IN_LEN]; | 865 | u8 inbuf[MC_CMD_NVRAM_ERASE_IN_LEN]; |
878 | int rc; | 866 | int rc; |
879 | 867 | ||
880 | MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type); | 868 | MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type); |
881 | MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset); | 869 | MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset); |
882 | MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length); | 870 | MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length); |
883 | 871 | ||
884 | BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0); | 872 | BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0); |
885 | 873 | ||
886 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf), | 874 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf), |
887 | NULL, 0, NULL); | 875 | NULL, 0, NULL); |
888 | if (rc) | 876 | if (rc) |
889 | goto fail; | 877 | goto fail; |
890 | 878 | ||
891 | return 0; | 879 | return 0; |
892 | 880 | ||
893 | fail: | 881 | fail: |
894 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | 882 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
895 | return rc; | 883 | return rc; |
896 | } | 884 | } |
897 | 885 | ||
898 | int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type) | 886 | int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type) |
899 | { | 887 | { |
900 | u8 inbuf[MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN]; | 888 | u8 inbuf[MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN]; |
901 | int rc; | 889 | int rc; |
902 | 890 | ||
903 | MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type); | 891 | MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type); |
904 | 892 | ||
905 | BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0); | 893 | BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0); |
906 | 894 | ||
907 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf), | 895 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf), |
908 | NULL, 0, NULL); | 896 | NULL, 0, NULL); |
909 | if (rc) | 897 | if (rc) |
910 | goto fail; | 898 | goto fail; |
911 | 899 | ||
912 | return 0; | 900 | return 0; |
913 | 901 | ||
914 | fail: | 902 | fail: |
915 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | 903 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
916 | return rc; | 904 | return rc; |
917 | } | 905 | } |
918 | 906 | ||
919 | static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type) | 907 | static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type) |
920 | { | 908 | { |
921 | u8 inbuf[MC_CMD_NVRAM_TEST_IN_LEN]; | 909 | u8 inbuf[MC_CMD_NVRAM_TEST_IN_LEN]; |
922 | u8 outbuf[MC_CMD_NVRAM_TEST_OUT_LEN]; | 910 | u8 outbuf[MC_CMD_NVRAM_TEST_OUT_LEN]; |
923 | int rc; | 911 | int rc; |
924 | 912 | ||
925 | MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type); | 913 | MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type); |
926 | 914 | ||
927 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf), | 915 | rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf), |
928 | outbuf, sizeof(outbuf), NULL); | 916 | outbuf, sizeof(outbuf), NULL); |
929 | if (rc) | 917 | if (rc) |
930 | return rc; | 918 | return rc; |
931 | 919 | ||
932 | switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) { | 920 | switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) { |
933 | case MC_CMD_NVRAM_TEST_PASS: | 921 | case MC_CMD_NVRAM_TEST_PASS: |
934 | case MC_CMD_NVRAM_TEST_NOTSUPP: | 922 | case MC_CMD_NVRAM_TEST_NOTSUPP: |
935 | return 0; | 923 | return 0; |
936 | default: | 924 | default: |
937 | return -EIO; | 925 | return -EIO; |
938 | } | 926 | } |
939 | } | 927 | } |
940 | 928 | ||
941 | int efx_mcdi_nvram_test_all(struct efx_nic *efx) | 929 | int efx_mcdi_nvram_test_all(struct efx_nic *efx) |
942 | { | 930 | { |
943 | u32 nvram_types; | 931 | u32 nvram_types; |
944 | unsigned int type; | 932 | unsigned int type; |
945 | int rc; | 933 | int rc; |
946 | 934 | ||
947 | rc = efx_mcdi_nvram_types(efx, &nvram_types); | 935 | rc = efx_mcdi_nvram_types(efx, &nvram_types); |
948 | if (rc) | 936 | if (rc) |
949 | goto fail1; | 937 | goto fail1; |
950 | 938 | ||
951 | type = 0; | 939 | type = 0; |
952 | while (nvram_types != 0) { | 940 | while (nvram_types != 0) { |
953 | if (nvram_types & 1) { | 941 | if (nvram_types & 1) { |
954 | rc = efx_mcdi_nvram_test(efx, type); | 942 | rc = efx_mcdi_nvram_test(efx, type); |
955 | if (rc) | 943 | if (rc) |
956 | goto fail2; | 944 | goto fail2; |
957 | } | 945 | } |
958 | type++; | 946 | type++; |
959 | nvram_types >>= 1; | 947 | nvram_types >>= 1; |
960 | } | 948 | } |
961 | 949 | ||
962 | return 0; | 950 | return 0; |
963 | 951 | ||
964 | fail2: | 952 | fail2: |
965 | netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n", | 953 | netif_err(efx, hw, efx->net_dev, "%s: failed type=%u\n", |
966 | __func__, type); | 954 | __func__, type); |
967 | fail1: | 955 | fail1: |
968 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | 956 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
969 | return rc; | 957 | return rc; |
970 | } | 958 | } |
971 | 959 | ||
972 | static int efx_mcdi_read_assertion(struct efx_nic *efx) | 960 | static int efx_mcdi_read_assertion(struct efx_nic *efx) |
973 | { | 961 | { |
974 | u8 inbuf[MC_CMD_GET_ASSERTS_IN_LEN]; | 962 | u8 inbuf[MC_CMD_GET_ASSERTS_IN_LEN]; |
975 | u8 outbuf[MC_CMD_GET_ASSERTS_OUT_LEN]; | 963 | u8 outbuf[MC_CMD_GET_ASSERTS_OUT_LEN]; |
976 | unsigned int flags, index, ofst; | 964 | unsigned int flags, index, ofst; |
977 | const char *reason; | 965 | const char *reason; |
978 | size_t outlen; | 966 | size_t outlen; |
979 | int retry; | 967 | int retry; |
980 | int rc; | 968 | int rc; |
981 | 969 | ||
982 | /* Attempt to read any stored assertion state before we reboot | 970 | /* Attempt to read any stored assertion state before we reboot |
983 | * the mcfw out of the assertion handler. Retry twice, once | 971 | * the mcfw out of the assertion handler. Retry twice, once |
984 | * because a boot-time assertion might cause this command to fail | 972 | * because a boot-time assertion might cause this command to fail |
985 | * with EINTR. And once again because GET_ASSERTS can race with | 973 | * with EINTR. And once again because GET_ASSERTS can race with |
986 | * MC_CMD_REBOOT running on the other port. */ | 974 | * MC_CMD_REBOOT running on the other port. */ |
987 | retry = 2; | 975 | retry = 2; |
988 | do { | 976 | do { |
989 | MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1); | 977 | MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1); |
990 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS, | 978 | rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS, |
991 | inbuf, MC_CMD_GET_ASSERTS_IN_LEN, | 979 | inbuf, MC_CMD_GET_ASSERTS_IN_LEN, |
992 | outbuf, sizeof(outbuf), &outlen); | 980 | outbuf, sizeof(outbuf), &outlen); |
993 | } while ((rc == -EINTR || rc == -EIO) && retry-- > 0); | 981 | } while ((rc == -EINTR || rc == -EIO) && retry-- > 0); |
994 | 982 | ||
995 | if (rc) | 983 | if (rc) |
996 | return rc; | 984 | return rc; |
997 | if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN) | 985 | if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN) |
998 | return -EIO; | 986 | return -EIO; |
999 | 987 | ||
1000 | /* Print out any recorded assertion state */ | 988 | /* Print out any recorded assertion state */ |
1001 | flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS); | 989 | flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS); |
1002 | if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS) | 990 | if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS) |
1003 | return 0; | 991 | return 0; |
1004 | 992 | ||
1005 | reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL) | 993 | reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL) |
1006 | ? "system-level assertion" | 994 | ? "system-level assertion" |
1007 | : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL) | 995 | : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL) |
1008 | ? "thread-level assertion" | 996 | ? "thread-level assertion" |
1009 | : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED) | 997 | : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED) |
1010 | ? "watchdog reset" | 998 | ? "watchdog reset" |
1011 | : "unknown assertion"; | 999 | : "unknown assertion"; |
1012 | netif_err(efx, hw, efx->net_dev, | 1000 | netif_err(efx, hw, efx->net_dev, |
1013 | "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason, | 1001 | "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason, |
1014 | MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS), | 1002 | MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS), |
1015 | MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS)); | 1003 | MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS)); |
1016 | 1004 | ||
1017 | /* Print out the registers */ | 1005 | /* Print out the registers */ |
1018 | ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST; | 1006 | ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST; |
1019 | for (index = 1; index < 32; index++) { | 1007 | for (index = 1; index < 32; index++) { |
1020 | netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n", index, | 1008 | netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n", index, |
1021 | MCDI_DWORD2(outbuf, ofst)); | 1009 | MCDI_DWORD2(outbuf, ofst)); |
1022 | ofst += sizeof(efx_dword_t); | 1010 | ofst += sizeof(efx_dword_t); |
1023 | } | 1011 | } |
1024 | 1012 | ||
1025 | return 0; | 1013 | return 0; |
1026 | } | 1014 | } |
1027 | 1015 | ||
1028 | static void efx_mcdi_exit_assertion(struct efx_nic *efx) | 1016 | static void efx_mcdi_exit_assertion(struct efx_nic *efx) |
1029 | { | 1017 | { |
1030 | u8 inbuf[MC_CMD_REBOOT_IN_LEN]; | 1018 | u8 inbuf[MC_CMD_REBOOT_IN_LEN]; |
1031 | 1019 | ||
1032 | /* Atomically reboot the mcfw out of the assertion handler */ | 1020 | /* Atomically reboot the mcfw out of the assertion handler */ |
1033 | BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); | 1021 | BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); |
1034 | MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, | 1022 | MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, |
1035 | MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION); | 1023 | MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION); |
1036 | efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN, | 1024 | efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN, |
1037 | NULL, 0, NULL); | 1025 | NULL, 0, NULL); |
1038 | } | 1026 | } |
1039 | 1027 | ||
1040 | int efx_mcdi_handle_assertion(struct efx_nic *efx) | 1028 | int efx_mcdi_handle_assertion(struct efx_nic *efx) |
1041 | { | 1029 | { |
1042 | int rc; | 1030 | int rc; |
1043 | 1031 | ||
1044 | rc = efx_mcdi_read_assertion(efx); | 1032 | rc = efx_mcdi_read_assertion(efx); |
1045 | if (rc) | 1033 | if (rc) |
1046 | return rc; | 1034 | return rc; |
1047 | 1035 | ||
1048 | efx_mcdi_exit_assertion(efx); | 1036 | efx_mcdi_exit_assertion(efx); |
1049 | 1037 | ||
1050 | return 0; | 1038 | return 0; |
1051 | } | 1039 | } |
1052 | 1040 | ||
1053 | void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) | 1041 | void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) |
1054 | { | 1042 | { |
1055 | u8 inbuf[MC_CMD_SET_ID_LED_IN_LEN]; | 1043 | u8 inbuf[MC_CMD_SET_ID_LED_IN_LEN]; |
1056 | int rc; | 1044 | int rc; |
1057 | 1045 | ||
1058 | BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF); | 1046 | BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF); |
1059 | BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON); | 1047 | BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON); |
1060 | BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT); | 1048 | BUILD_BUG_ON(EFX_LED_DEFAULT != MC_CMD_LED_DEFAULT); |
1061 | 1049 | ||
1062 | BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0); | 1050 | BUILD_BUG_ON(MC_CMD_SET_ID_LED_OUT_LEN != 0); |
1063 | 1051 | ||
1064 | MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode); | 1052 | MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode); |
1065 | 1053 | ||
1066 | rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf), | 1054 | rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf), |
1067 | NULL, 0, NULL); | 1055 | NULL, 0, NULL); |
1068 | if (rc) | 1056 | if (rc) |
1069 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", | 1057 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", |
1070 | __func__, rc); | 1058 | __func__, rc); |
1071 | } | 1059 | } |
1072 | 1060 | ||
1073 | int efx_mcdi_reset_port(struct efx_nic *efx) | 1061 | int efx_mcdi_reset_port(struct efx_nic *efx) |
1074 | { | 1062 | { |
1075 | int rc = efx_mcdi_rpc(efx, MC_CMD_PORT_RESET, NULL, 0, NULL, 0, NULL); | 1063 | int rc = efx_mcdi_rpc(efx, MC_CMD_PORT_RESET, NULL, 0, NULL, 0, NULL); |
1076 | if (rc) | 1064 | if (rc) |
1077 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", | 1065 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", |
1078 | __func__, rc); | 1066 | __func__, rc); |
1079 | return rc; | 1067 | return rc; |
1080 | } | 1068 | } |
1081 | 1069 | ||
1082 | int efx_mcdi_reset_mc(struct efx_nic *efx) | 1070 | int efx_mcdi_reset_mc(struct efx_nic *efx) |
1083 | { | 1071 | { |
1084 | u8 inbuf[MC_CMD_REBOOT_IN_LEN]; | 1072 | u8 inbuf[MC_CMD_REBOOT_IN_LEN]; |
1085 | int rc; | 1073 | int rc; |
1086 | 1074 | ||
1087 | BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); | 1075 | BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0); |
1088 | MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0); | 1076 | MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS, 0); |
1089 | rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf), | 1077 | rc = efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, sizeof(inbuf), |
1090 | NULL, 0, NULL); | 1078 | NULL, 0, NULL); |
1091 | /* White is black, and up is down */ | 1079 | /* White is black, and up is down */ |
1092 | if (rc == -EIO) | 1080 | if (rc == -EIO) |
1093 | return 0; | 1081 | return 0; |
1094 | if (rc == 0) | 1082 | if (rc == 0) |
1095 | rc = -EIO; | 1083 | rc = -EIO; |
1096 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | 1084 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
1097 | return rc; | 1085 | return rc; |
1098 | } | 1086 | } |
1099 | 1087 | ||
1100 | static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type, | 1088 | static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type, |
1101 | const u8 *mac, int *id_out) | 1089 | const u8 *mac, int *id_out) |
1102 | { | 1090 | { |
1103 | u8 inbuf[MC_CMD_WOL_FILTER_SET_IN_LEN]; | 1091 | u8 inbuf[MC_CMD_WOL_FILTER_SET_IN_LEN]; |
1104 | u8 outbuf[MC_CMD_WOL_FILTER_SET_OUT_LEN]; | 1092 | u8 outbuf[MC_CMD_WOL_FILTER_SET_OUT_LEN]; |
1105 | size_t outlen; | 1093 | size_t outlen; |
1106 | int rc; | 1094 | int rc; |
1107 | 1095 | ||
1108 | MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type); | 1096 | MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_WOL_TYPE, type); |
1109 | MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE, | 1097 | MCDI_SET_DWORD(inbuf, WOL_FILTER_SET_IN_FILTER_MODE, |
1110 | MC_CMD_FILTER_MODE_SIMPLE); | 1098 | MC_CMD_FILTER_MODE_SIMPLE); |
1111 | memcpy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac, ETH_ALEN); | 1099 | memcpy(MCDI_PTR(inbuf, WOL_FILTER_SET_IN_MAGIC_MAC), mac, ETH_ALEN); |
1112 | 1100 | ||
1113 | rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf), | 1101 | rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_SET, inbuf, sizeof(inbuf), |
1114 | outbuf, sizeof(outbuf), &outlen); | 1102 | outbuf, sizeof(outbuf), &outlen); |
1115 | if (rc) | 1103 | if (rc) |
1116 | goto fail; | 1104 | goto fail; |
1117 | 1105 | ||
1118 | if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) { | 1106 | if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) { |
1119 | rc = -EIO; | 1107 | rc = -EIO; |
1120 | goto fail; | 1108 | goto fail; |
1121 | } | 1109 | } |
1122 | 1110 | ||
1123 | *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID); | 1111 | *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_SET_OUT_FILTER_ID); |
1124 | 1112 | ||
1125 | return 0; | 1113 | return 0; |
1126 | 1114 | ||
1127 | fail: | 1115 | fail: |
1128 | *id_out = -1; | 1116 | *id_out = -1; |
1129 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | 1117 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
1130 | return rc; | 1118 | return rc; |
1131 | 1119 | ||
1132 | } | 1120 | } |
1133 | 1121 | ||
1134 | 1122 | ||
1135 | int | 1123 | int |
1136 | efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out) | 1124 | efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out) |
1137 | { | 1125 | { |
1138 | return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out); | 1126 | return efx_mcdi_wol_filter_set(efx, MC_CMD_WOL_TYPE_MAGIC, mac, id_out); |
1139 | } | 1127 | } |
1140 | 1128 | ||
1141 | 1129 | ||
1142 | int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out) | 1130 | int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out) |
1143 | { | 1131 | { |
1144 | u8 outbuf[MC_CMD_WOL_FILTER_GET_OUT_LEN]; | 1132 | u8 outbuf[MC_CMD_WOL_FILTER_GET_OUT_LEN]; |
1145 | size_t outlen; | 1133 | size_t outlen; |
1146 | int rc; | 1134 | int rc; |
1147 | 1135 | ||
1148 | rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0, | 1136 | rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_GET, NULL, 0, |
1149 | outbuf, sizeof(outbuf), &outlen); | 1137 | outbuf, sizeof(outbuf), &outlen); |
1150 | if (rc) | 1138 | if (rc) |
1151 | goto fail; | 1139 | goto fail; |
1152 | 1140 | ||
1153 | if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) { | 1141 | if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) { |
1154 | rc = -EIO; | 1142 | rc = -EIO; |
1155 | goto fail; | 1143 | goto fail; |
1156 | } | 1144 | } |
1157 | 1145 | ||
1158 | *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID); | 1146 | *id_out = (int)MCDI_DWORD(outbuf, WOL_FILTER_GET_OUT_FILTER_ID); |
1159 | 1147 | ||
1160 | return 0; | 1148 | return 0; |
1161 | 1149 | ||
1162 | fail: | 1150 | fail: |
1163 | *id_out = -1; | 1151 | *id_out = -1; |
1164 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | 1152 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
1165 | return rc; | 1153 | return rc; |
1166 | } | 1154 | } |
1167 | 1155 | ||
1168 | 1156 | ||
1169 | int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id) | 1157 | int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id) |
1170 | { | 1158 | { |
1171 | u8 inbuf[MC_CMD_WOL_FILTER_REMOVE_IN_LEN]; | 1159 | u8 inbuf[MC_CMD_WOL_FILTER_REMOVE_IN_LEN]; |
1172 | int rc; | 1160 | int rc; |
1173 | 1161 | ||
1174 | MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id); | 1162 | MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id); |
1175 | 1163 | ||
1176 | rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf), | 1164 | rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_REMOVE, inbuf, sizeof(inbuf), |
1177 | NULL, 0, NULL); | 1165 | NULL, 0, NULL); |
1178 | if (rc) | 1166 | if (rc) |
1179 | goto fail; | 1167 | goto fail; |
1180 | 1168 | ||
1181 | return 0; | 1169 | return 0; |
1182 | 1170 | ||
1183 | fail: | 1171 | fail: |
1184 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | 1172 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
1185 | return rc; | 1173 | return rc; |
1186 | } | 1174 | } |
1187 | 1175 | ||
1188 | 1176 | ||
1189 | int efx_mcdi_wol_filter_reset(struct efx_nic *efx) | 1177 | int efx_mcdi_wol_filter_reset(struct efx_nic *efx) |
1190 | { | 1178 | { |
1191 | int rc; | 1179 | int rc; |
1192 | 1180 | ||
1193 | rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL); | 1181 | rc = efx_mcdi_rpc(efx, MC_CMD_WOL_FILTER_RESET, NULL, 0, NULL, 0, NULL); |
1194 | if (rc) | 1182 | if (rc) |
1195 | goto fail; | 1183 | goto fail; |
1196 | 1184 | ||
1197 | return 0; | 1185 | return 0; |
1198 | 1186 | ||
1199 | fail: | 1187 | fail: |
1200 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); | 1188 | netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); |
drivers/net/sfc/nic.c
1 | /**************************************************************************** | 1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | 2 | * Driver for Solarflare Solarstorm network controllers and boards |
3 | * Copyright 2005-2006 Fen Systems Ltd. | 3 | * Copyright 2005-2006 Fen Systems Ltd. |
4 | * Copyright 2006-2011 Solarflare Communications Inc. | 4 | * Copyright 2006-2011 Solarflare Communications Inc. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License version 2 as published | 7 | * under the terms of the GNU General Public License version 2 as published |
8 | * by the Free Software Foundation, incorporated herein by reference. | 8 | * by the Free Software Foundation, incorporated herein by reference. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/bitops.h> | 11 | #include <linux/bitops.h> |
12 | #include <linux/delay.h> | 12 | #include <linux/delay.h> |
13 | #include <linux/interrupt.h> | 13 | #include <linux/interrupt.h> |
14 | #include <linux/pci.h> | 14 | #include <linux/pci.h> |
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/seq_file.h> | 16 | #include <linux/seq_file.h> |
17 | #include "net_driver.h" | 17 | #include "net_driver.h" |
18 | #include "bitfield.h" | 18 | #include "bitfield.h" |
19 | #include "efx.h" | 19 | #include "efx.h" |
20 | #include "nic.h" | 20 | #include "nic.h" |
21 | #include "regs.h" | 21 | #include "regs.h" |
22 | #include "io.h" | 22 | #include "io.h" |
23 | #include "workarounds.h" | 23 | #include "workarounds.h" |
24 | 24 | ||
25 | /************************************************************************** | 25 | /************************************************************************** |
26 | * | 26 | * |
27 | * Configurable values | 27 | * Configurable values |
28 | * | 28 | * |
29 | ************************************************************************** | 29 | ************************************************************************** |
30 | */ | 30 | */ |
31 | 31 | ||
32 | /* This is set to 16 for a good reason. In summary, if larger than | 32 | /* This is set to 16 for a good reason. In summary, if larger than |
33 | * 16, the descriptor cache holds more than a default socket | 33 | * 16, the descriptor cache holds more than a default socket |
34 | * buffer's worth of packets (for UDP we can only have at most one | 34 | * buffer's worth of packets (for UDP we can only have at most one |
35 | * socket buffer's worth outstanding). This combined with the fact | 35 | * socket buffer's worth outstanding). This combined with the fact |
36 | * that we only get 1 TX event per descriptor cache means the NIC | 36 | * that we only get 1 TX event per descriptor cache means the NIC |
37 | * goes idle. | 37 | * goes idle. |
38 | */ | 38 | */ |
39 | #define TX_DC_ENTRIES 16 | 39 | #define TX_DC_ENTRIES 16 |
40 | #define TX_DC_ENTRIES_ORDER 1 | 40 | #define TX_DC_ENTRIES_ORDER 1 |
41 | 41 | ||
42 | #define RX_DC_ENTRIES 64 | 42 | #define RX_DC_ENTRIES 64 |
43 | #define RX_DC_ENTRIES_ORDER 3 | 43 | #define RX_DC_ENTRIES_ORDER 3 |
44 | 44 | ||
45 | /* If EFX_MAX_INT_ERRORS internal errors occur within | 45 | /* If EFX_MAX_INT_ERRORS internal errors occur within |
46 | * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and | 46 | * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and |
47 | * disable it. | 47 | * disable it. |
48 | */ | 48 | */ |
49 | #define EFX_INT_ERROR_EXPIRE 3600 | 49 | #define EFX_INT_ERROR_EXPIRE 3600 |
50 | #define EFX_MAX_INT_ERRORS 5 | 50 | #define EFX_MAX_INT_ERRORS 5 |
51 | 51 | ||
52 | /* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times | 52 | /* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times |
53 | */ | 53 | */ |
54 | #define EFX_FLUSH_INTERVAL 10 | 54 | #define EFX_FLUSH_INTERVAL 10 |
55 | #define EFX_FLUSH_POLL_COUNT 100 | 55 | #define EFX_FLUSH_POLL_COUNT 100 |
56 | 56 | ||
57 | /* Size and alignment of special buffers (4KB) */ | 57 | /* Size and alignment of special buffers (4KB) */ |
58 | #define EFX_BUF_SIZE 4096 | 58 | #define EFX_BUF_SIZE 4096 |
59 | 59 | ||
60 | /* Depth of RX flush request fifo */ | 60 | /* Depth of RX flush request fifo */ |
61 | #define EFX_RX_FLUSH_COUNT 4 | 61 | #define EFX_RX_FLUSH_COUNT 4 |
62 | 62 | ||
63 | /* Generated event code for efx_generate_test_event() */ | 63 | /* Generated event code for efx_generate_test_event() */ |
64 | #define EFX_CHANNEL_MAGIC_TEST(_channel) \ | 64 | #define EFX_CHANNEL_MAGIC_TEST(_channel) \ |
65 | (0x00010100 + (_channel)->channel) | 65 | (0x00010100 + (_channel)->channel) |
66 | 66 | ||
67 | /* Generated event code for efx_generate_fill_event() */ | 67 | /* Generated event code for efx_generate_fill_event() */ |
68 | #define EFX_CHANNEL_MAGIC_FILL(_channel) \ | 68 | #define EFX_CHANNEL_MAGIC_FILL(_channel) \ |
69 | (0x00010200 + (_channel)->channel) | 69 | (0x00010200 + (_channel)->channel) |
70 | 70 | ||
71 | /************************************************************************** | 71 | /************************************************************************** |
72 | * | 72 | * |
73 | * Solarstorm hardware access | 73 | * Solarstorm hardware access |
74 | * | 74 | * |
75 | **************************************************************************/ | 75 | **************************************************************************/ |
76 | 76 | ||
77 | static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value, | 77 | static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value, |
78 | unsigned int index) | 78 | unsigned int index) |
79 | { | 79 | { |
80 | efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base, | 80 | efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base, |
81 | value, index); | 81 | value, index); |
82 | } | 82 | } |
83 | 83 | ||
84 | /* Read the current event from the event queue */ | 84 | /* Read the current event from the event queue */ |
85 | static inline efx_qword_t *efx_event(struct efx_channel *channel, | 85 | static inline efx_qword_t *efx_event(struct efx_channel *channel, |
86 | unsigned int index) | 86 | unsigned int index) |
87 | { | 87 | { |
88 | return ((efx_qword_t *) (channel->eventq.addr)) + | 88 | return ((efx_qword_t *) (channel->eventq.addr)) + |
89 | (index & channel->eventq_mask); | 89 | (index & channel->eventq_mask); |
90 | } | 90 | } |
91 | 91 | ||
92 | /* See if an event is present | 92 | /* See if an event is present |
93 | * | 93 | * |
94 | * We check both the high and low dword of the event for all ones. We | 94 | * We check both the high and low dword of the event for all ones. We |
95 | * wrote all ones when we cleared the event, and no valid event can | 95 | * wrote all ones when we cleared the event, and no valid event can |
96 | * have all ones in either its high or low dwords. This approach is | 96 | * have all ones in either its high or low dwords. This approach is |
97 | * robust against reordering. | 97 | * robust against reordering. |
98 | * | 98 | * |
99 | * Note that using a single 64-bit comparison is incorrect; even | 99 | * Note that using a single 64-bit comparison is incorrect; even |
100 | * though the CPU read will be atomic, the DMA write may not be. | 100 | * though the CPU read will be atomic, the DMA write may not be. |
101 | */ | 101 | */ |
102 | static inline int efx_event_present(efx_qword_t *event) | 102 | static inline int efx_event_present(efx_qword_t *event) |
103 | { | 103 | { |
104 | return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) | | 104 | return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) | |
105 | EFX_DWORD_IS_ALL_ONES(event->dword[1])); | 105 | EFX_DWORD_IS_ALL_ONES(event->dword[1])); |
106 | } | 106 | } |
107 | 107 | ||
108 | static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b, | 108 | static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b, |
109 | const efx_oword_t *mask) | 109 | const efx_oword_t *mask) |
110 | { | 110 | { |
111 | return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) || | 111 | return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) || |
112 | ((a->u64[1] ^ b->u64[1]) & mask->u64[1]); | 112 | ((a->u64[1] ^ b->u64[1]) & mask->u64[1]); |
113 | } | 113 | } |
114 | 114 | ||
115 | int efx_nic_test_registers(struct efx_nic *efx, | 115 | int efx_nic_test_registers(struct efx_nic *efx, |
116 | const struct efx_nic_register_test *regs, | 116 | const struct efx_nic_register_test *regs, |
117 | size_t n_regs) | 117 | size_t n_regs) |
118 | { | 118 | { |
119 | unsigned address = 0, i, j; | 119 | unsigned address = 0, i, j; |
120 | efx_oword_t mask, imask, original, reg, buf; | 120 | efx_oword_t mask, imask, original, reg, buf; |
121 | 121 | ||
122 | /* Falcon should be in loopback to isolate the XMAC from the PHY */ | 122 | /* Falcon should be in loopback to isolate the XMAC from the PHY */ |
123 | WARN_ON(!LOOPBACK_INTERNAL(efx)); | 123 | WARN_ON(!LOOPBACK_INTERNAL(efx)); |
124 | 124 | ||
125 | for (i = 0; i < n_regs; ++i) { | 125 | for (i = 0; i < n_regs; ++i) { |
126 | address = regs[i].address; | 126 | address = regs[i].address; |
127 | mask = imask = regs[i].mask; | 127 | mask = imask = regs[i].mask; |
128 | EFX_INVERT_OWORD(imask); | 128 | EFX_INVERT_OWORD(imask); |
129 | 129 | ||
130 | efx_reado(efx, &original, address); | 130 | efx_reado(efx, &original, address); |
131 | 131 | ||
132 | /* bit sweep on and off */ | 132 | /* bit sweep on and off */ |
133 | for (j = 0; j < 128; j++) { | 133 | for (j = 0; j < 128; j++) { |
134 | if (!EFX_EXTRACT_OWORD32(mask, j, j)) | 134 | if (!EFX_EXTRACT_OWORD32(mask, j, j)) |
135 | continue; | 135 | continue; |
136 | 136 | ||
137 | /* Test this testable bit can be set in isolation */ | 137 | /* Test this testable bit can be set in isolation */ |
138 | EFX_AND_OWORD(reg, original, mask); | 138 | EFX_AND_OWORD(reg, original, mask); |
139 | EFX_SET_OWORD32(reg, j, j, 1); | 139 | EFX_SET_OWORD32(reg, j, j, 1); |
140 | 140 | ||
141 | efx_writeo(efx, ®, address); | 141 | efx_writeo(efx, ®, address); |
142 | efx_reado(efx, &buf, address); | 142 | efx_reado(efx, &buf, address); |
143 | 143 | ||
144 | if (efx_masked_compare_oword(®, &buf, &mask)) | 144 | if (efx_masked_compare_oword(®, &buf, &mask)) |
145 | goto fail; | 145 | goto fail; |
146 | 146 | ||
147 | /* Test this testable bit can be cleared in isolation */ | 147 | /* Test this testable bit can be cleared in isolation */ |
148 | EFX_OR_OWORD(reg, original, mask); | 148 | EFX_OR_OWORD(reg, original, mask); |
149 | EFX_SET_OWORD32(reg, j, j, 0); | 149 | EFX_SET_OWORD32(reg, j, j, 0); |
150 | 150 | ||
151 | efx_writeo(efx, ®, address); | 151 | efx_writeo(efx, ®, address); |
152 | efx_reado(efx, &buf, address); | 152 | efx_reado(efx, &buf, address); |
153 | 153 | ||
154 | if (efx_masked_compare_oword(®, &buf, &mask)) | 154 | if (efx_masked_compare_oword(®, &buf, &mask)) |
155 | goto fail; | 155 | goto fail; |
156 | } | 156 | } |
157 | 157 | ||
158 | efx_writeo(efx, &original, address); | 158 | efx_writeo(efx, &original, address); |
159 | } | 159 | } |
160 | 160 | ||
161 | return 0; | 161 | return 0; |
162 | 162 | ||
163 | fail: | 163 | fail: |
164 | netif_err(efx, hw, efx->net_dev, | 164 | netif_err(efx, hw, efx->net_dev, |
165 | "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT | 165 | "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT |
166 | " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg), | 166 | " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg), |
167 | EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask)); | 167 | EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask)); |
168 | return -EIO; | 168 | return -EIO; |
169 | } | 169 | } |
170 | 170 | ||
171 | /************************************************************************** | 171 | /************************************************************************** |
172 | * | 172 | * |
173 | * Special buffer handling | 173 | * Special buffer handling |
174 | * Special buffers are used for event queues and the TX and RX | 174 | * Special buffers are used for event queues and the TX and RX |
175 | * descriptor rings. | 175 | * descriptor rings. |
176 | * | 176 | * |
177 | *************************************************************************/ | 177 | *************************************************************************/ |
178 | 178 | ||
179 | /* | 179 | /* |
180 | * Initialise a special buffer | 180 | * Initialise a special buffer |
181 | * | 181 | * |
182 | * This will define a buffer (previously allocated via | 182 | * This will define a buffer (previously allocated via |
183 | * efx_alloc_special_buffer()) in the buffer table, allowing | 183 | * efx_alloc_special_buffer()) in the buffer table, allowing |
184 | * it to be used for event queues, descriptor rings etc. | 184 | * it to be used for event queues, descriptor rings etc. |
185 | */ | 185 | */ |
186 | static void | 186 | static void |
187 | efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) | 187 | efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) |
188 | { | 188 | { |
189 | efx_qword_t buf_desc; | 189 | efx_qword_t buf_desc; |
190 | int index; | 190 | int index; |
191 | dma_addr_t dma_addr; | 191 | dma_addr_t dma_addr; |
192 | int i; | 192 | int i; |
193 | 193 | ||
194 | EFX_BUG_ON_PARANOID(!buffer->addr); | 194 | EFX_BUG_ON_PARANOID(!buffer->addr); |
195 | 195 | ||
196 | /* Write buffer descriptors to NIC */ | 196 | /* Write buffer descriptors to NIC */ |
197 | for (i = 0; i < buffer->entries; i++) { | 197 | for (i = 0; i < buffer->entries; i++) { |
198 | index = buffer->index + i; | 198 | index = buffer->index + i; |
199 | dma_addr = buffer->dma_addr + (i * 4096); | 199 | dma_addr = buffer->dma_addr + (i * 4096); |
200 | netif_dbg(efx, probe, efx->net_dev, | 200 | netif_dbg(efx, probe, efx->net_dev, |
201 | "mapping special buffer %d at %llx\n", | 201 | "mapping special buffer %d at %llx\n", |
202 | index, (unsigned long long)dma_addr); | 202 | index, (unsigned long long)dma_addr); |
203 | EFX_POPULATE_QWORD_3(buf_desc, | 203 | EFX_POPULATE_QWORD_3(buf_desc, |
204 | FRF_AZ_BUF_ADR_REGION, 0, | 204 | FRF_AZ_BUF_ADR_REGION, 0, |
205 | FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12, | 205 | FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12, |
206 | FRF_AZ_BUF_OWNER_ID_FBUF, 0); | 206 | FRF_AZ_BUF_OWNER_ID_FBUF, 0); |
207 | efx_write_buf_tbl(efx, &buf_desc, index); | 207 | efx_write_buf_tbl(efx, &buf_desc, index); |
208 | } | 208 | } |
209 | } | 209 | } |
210 | 210 | ||
211 | /* Unmaps a buffer and clears the buffer table entries */ | 211 | /* Unmaps a buffer and clears the buffer table entries */ |
212 | static void | 212 | static void |
213 | efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) | 213 | efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) |
214 | { | 214 | { |
215 | efx_oword_t buf_tbl_upd; | 215 | efx_oword_t buf_tbl_upd; |
216 | unsigned int start = buffer->index; | 216 | unsigned int start = buffer->index; |
217 | unsigned int end = (buffer->index + buffer->entries - 1); | 217 | unsigned int end = (buffer->index + buffer->entries - 1); |
218 | 218 | ||
219 | if (!buffer->entries) | 219 | if (!buffer->entries) |
220 | return; | 220 | return; |
221 | 221 | ||
222 | netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n", | 222 | netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n", |
223 | buffer->index, buffer->index + buffer->entries - 1); | 223 | buffer->index, buffer->index + buffer->entries - 1); |
224 | 224 | ||
225 | EFX_POPULATE_OWORD_4(buf_tbl_upd, | 225 | EFX_POPULATE_OWORD_4(buf_tbl_upd, |
226 | FRF_AZ_BUF_UPD_CMD, 0, | 226 | FRF_AZ_BUF_UPD_CMD, 0, |
227 | FRF_AZ_BUF_CLR_CMD, 1, | 227 | FRF_AZ_BUF_CLR_CMD, 1, |
228 | FRF_AZ_BUF_CLR_END_ID, end, | 228 | FRF_AZ_BUF_CLR_END_ID, end, |
229 | FRF_AZ_BUF_CLR_START_ID, start); | 229 | FRF_AZ_BUF_CLR_START_ID, start); |
230 | efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD); | 230 | efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD); |
231 | } | 231 | } |
232 | 232 | ||
233 | /* | 233 | /* |
234 | * Allocate a new special buffer | 234 | * Allocate a new special buffer |
235 | * | 235 | * |
236 | * This allocates memory for a new buffer, clears it and allocates a | 236 | * This allocates memory for a new buffer, clears it and allocates a |
237 | * new buffer ID range. It does not write into the buffer table. | 237 | * new buffer ID range. It does not write into the buffer table. |
238 | * | 238 | * |
239 | * This call will allocate 4KB buffers, since 8KB buffers can't be | 239 | * This call will allocate 4KB buffers, since 8KB buffers can't be |
240 | * used for event queues and descriptor rings. | 240 | * used for event queues and descriptor rings. |
241 | */ | 241 | */ |
242 | static int efx_alloc_special_buffer(struct efx_nic *efx, | 242 | static int efx_alloc_special_buffer(struct efx_nic *efx, |
243 | struct efx_special_buffer *buffer, | 243 | struct efx_special_buffer *buffer, |
244 | unsigned int len) | 244 | unsigned int len) |
245 | { | 245 | { |
246 | len = ALIGN(len, EFX_BUF_SIZE); | 246 | len = ALIGN(len, EFX_BUF_SIZE); |
247 | 247 | ||
248 | buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, | 248 | buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, |
249 | &buffer->dma_addr, GFP_KERNEL); | 249 | &buffer->dma_addr, GFP_KERNEL); |
250 | if (!buffer->addr) | 250 | if (!buffer->addr) |
251 | return -ENOMEM; | 251 | return -ENOMEM; |
252 | buffer->len = len; | 252 | buffer->len = len; |
253 | buffer->entries = len / EFX_BUF_SIZE; | 253 | buffer->entries = len / EFX_BUF_SIZE; |
254 | BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1)); | 254 | BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1)); |
255 | 255 | ||
256 | /* All zeros is a potentially valid event so memset to 0xff */ | 256 | /* All zeros is a potentially valid event so memset to 0xff */ |
257 | memset(buffer->addr, 0xff, len); | 257 | memset(buffer->addr, 0xff, len); |
258 | 258 | ||
259 | /* Select new buffer ID */ | 259 | /* Select new buffer ID */ |
260 | buffer->index = efx->next_buffer_table; | 260 | buffer->index = efx->next_buffer_table; |
261 | efx->next_buffer_table += buffer->entries; | 261 | efx->next_buffer_table += buffer->entries; |
262 | 262 | ||
263 | netif_dbg(efx, probe, efx->net_dev, | 263 | netif_dbg(efx, probe, efx->net_dev, |
264 | "allocating special buffers %d-%d at %llx+%x " | 264 | "allocating special buffers %d-%d at %llx+%x " |
265 | "(virt %p phys %llx)\n", buffer->index, | 265 | "(virt %p phys %llx)\n", buffer->index, |
266 | buffer->index + buffer->entries - 1, | 266 | buffer->index + buffer->entries - 1, |
267 | (u64)buffer->dma_addr, len, | 267 | (u64)buffer->dma_addr, len, |
268 | buffer->addr, (u64)virt_to_phys(buffer->addr)); | 268 | buffer->addr, (u64)virt_to_phys(buffer->addr)); |
269 | 269 | ||
270 | return 0; | 270 | return 0; |
271 | } | 271 | } |
272 | 272 | ||
273 | static void | 273 | static void |
274 | efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) | 274 | efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer) |
275 | { | 275 | { |
276 | if (!buffer->addr) | 276 | if (!buffer->addr) |
277 | return; | 277 | return; |
278 | 278 | ||
279 | netif_dbg(efx, hw, efx->net_dev, | 279 | netif_dbg(efx, hw, efx->net_dev, |
280 | "deallocating special buffers %d-%d at %llx+%x " | 280 | "deallocating special buffers %d-%d at %llx+%x " |
281 | "(virt %p phys %llx)\n", buffer->index, | 281 | "(virt %p phys %llx)\n", buffer->index, |
282 | buffer->index + buffer->entries - 1, | 282 | buffer->index + buffer->entries - 1, |
283 | (u64)buffer->dma_addr, buffer->len, | 283 | (u64)buffer->dma_addr, buffer->len, |
284 | buffer->addr, (u64)virt_to_phys(buffer->addr)); | 284 | buffer->addr, (u64)virt_to_phys(buffer->addr)); |
285 | 285 | ||
286 | dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr, | 286 | dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr, |
287 | buffer->dma_addr); | 287 | buffer->dma_addr); |
288 | buffer->addr = NULL; | 288 | buffer->addr = NULL; |
289 | buffer->entries = 0; | 289 | buffer->entries = 0; |
290 | } | 290 | } |
291 | 291 | ||
292 | /************************************************************************** | 292 | /************************************************************************** |
293 | * | 293 | * |
294 | * Generic buffer handling | 294 | * Generic buffer handling |
295 | * These buffers are used for interrupt status and MAC stats | 295 | * These buffers are used for interrupt status and MAC stats |
296 | * | 296 | * |
297 | **************************************************************************/ | 297 | **************************************************************************/ |
298 | 298 | ||
299 | int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, | 299 | int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, |
300 | unsigned int len) | 300 | unsigned int len) |
301 | { | 301 | { |
302 | buffer->addr = pci_alloc_consistent(efx->pci_dev, len, | 302 | buffer->addr = pci_alloc_consistent(efx->pci_dev, len, |
303 | &buffer->dma_addr); | 303 | &buffer->dma_addr); |
304 | if (!buffer->addr) | 304 | if (!buffer->addr) |
305 | return -ENOMEM; | 305 | return -ENOMEM; |
306 | buffer->len = len; | 306 | buffer->len = len; |
307 | memset(buffer->addr, 0, len); | 307 | memset(buffer->addr, 0, len); |
308 | return 0; | 308 | return 0; |
309 | } | 309 | } |
310 | 310 | ||
311 | void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer) | 311 | void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer) |
312 | { | 312 | { |
313 | if (buffer->addr) { | 313 | if (buffer->addr) { |
314 | pci_free_consistent(efx->pci_dev, buffer->len, | 314 | pci_free_consistent(efx->pci_dev, buffer->len, |
315 | buffer->addr, buffer->dma_addr); | 315 | buffer->addr, buffer->dma_addr); |
316 | buffer->addr = NULL; | 316 | buffer->addr = NULL; |
317 | } | 317 | } |
318 | } | 318 | } |
319 | 319 | ||
320 | /************************************************************************** | 320 | /************************************************************************** |
321 | * | 321 | * |
322 | * TX path | 322 | * TX path |
323 | * | 323 | * |
324 | **************************************************************************/ | 324 | **************************************************************************/ |
325 | 325 | ||
326 | /* Returns a pointer to the specified transmit descriptor in the TX | 326 | /* Returns a pointer to the specified transmit descriptor in the TX |
327 | * descriptor queue belonging to the specified channel. | 327 | * descriptor queue belonging to the specified channel. |
328 | */ | 328 | */ |
329 | static inline efx_qword_t * | 329 | static inline efx_qword_t * |
330 | efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) | 330 | efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) |
331 | { | 331 | { |
332 | return ((efx_qword_t *) (tx_queue->txd.addr)) + index; | 332 | return ((efx_qword_t *) (tx_queue->txd.addr)) + index; |
333 | } | 333 | } |
334 | 334 | ||
335 | /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ | 335 | /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ |
336 | static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue) | 336 | static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue) |
337 | { | 337 | { |
338 | unsigned write_ptr; | 338 | unsigned write_ptr; |
339 | efx_dword_t reg; | 339 | efx_dword_t reg; |
340 | 340 | ||
341 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; | 341 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; |
342 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr); | 342 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr); |
343 | efx_writed_page(tx_queue->efx, ®, | 343 | efx_writed_page(tx_queue->efx, ®, |
344 | FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); | 344 | FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); |
345 | } | 345 | } |
346 | 346 | ||
347 | /* Write pointer and first descriptor for TX descriptor ring */ | 347 | /* Write pointer and first descriptor for TX descriptor ring */ |
348 | static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue, | 348 | static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue, |
349 | const efx_qword_t *txd) | 349 | const efx_qword_t *txd) |
350 | { | 350 | { |
351 | unsigned write_ptr; | 351 | unsigned write_ptr; |
352 | efx_oword_t reg; | 352 | efx_oword_t reg; |
353 | 353 | ||
354 | BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0); | 354 | BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0); |
355 | BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0); | 355 | BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0); |
356 | 356 | ||
357 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; | 357 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; |
358 | EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true, | 358 | EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true, |
359 | FRF_AZ_TX_DESC_WPTR, write_ptr); | 359 | FRF_AZ_TX_DESC_WPTR, write_ptr); |
360 | reg.qword[0] = *txd; | 360 | reg.qword[0] = *txd; |
361 | efx_writeo_page(tx_queue->efx, ®, | 361 | efx_writeo_page(tx_queue->efx, ®, |
362 | FR_BZ_TX_DESC_UPD_P0, tx_queue->queue); | 362 | FR_BZ_TX_DESC_UPD_P0, tx_queue->queue); |
363 | } | 363 | } |
364 | 364 | ||
365 | static inline bool | 365 | static inline bool |
366 | efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count) | 366 | efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count) |
367 | { | 367 | { |
368 | unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count); | 368 | unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count); |
369 | 369 | ||
370 | if (empty_read_count == 0) | 370 | if (empty_read_count == 0) |
371 | return false; | 371 | return false; |
372 | 372 | ||
373 | tx_queue->empty_read_count = 0; | 373 | tx_queue->empty_read_count = 0; |
374 | return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0; | 374 | return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0; |
375 | } | 375 | } |
376 | 376 | ||
377 | /* For each entry inserted into the software descriptor ring, create a | 377 | /* For each entry inserted into the software descriptor ring, create a |
378 | * descriptor in the hardware TX descriptor ring (in host memory), and | 378 | * descriptor in the hardware TX descriptor ring (in host memory), and |
379 | * write a doorbell. | 379 | * write a doorbell. |
380 | */ | 380 | */ |
381 | void efx_nic_push_buffers(struct efx_tx_queue *tx_queue) | 381 | void efx_nic_push_buffers(struct efx_tx_queue *tx_queue) |
382 | { | 382 | { |
383 | 383 | ||
384 | struct efx_tx_buffer *buffer; | 384 | struct efx_tx_buffer *buffer; |
385 | efx_qword_t *txd; | 385 | efx_qword_t *txd; |
386 | unsigned write_ptr; | 386 | unsigned write_ptr; |
387 | unsigned old_write_count = tx_queue->write_count; | 387 | unsigned old_write_count = tx_queue->write_count; |
388 | 388 | ||
389 | BUG_ON(tx_queue->write_count == tx_queue->insert_count); | 389 | BUG_ON(tx_queue->write_count == tx_queue->insert_count); |
390 | 390 | ||
391 | do { | 391 | do { |
392 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; | 392 | write_ptr = tx_queue->write_count & tx_queue->ptr_mask; |
393 | buffer = &tx_queue->buffer[write_ptr]; | 393 | buffer = &tx_queue->buffer[write_ptr]; |
394 | txd = efx_tx_desc(tx_queue, write_ptr); | 394 | txd = efx_tx_desc(tx_queue, write_ptr); |
395 | ++tx_queue->write_count; | 395 | ++tx_queue->write_count; |
396 | 396 | ||
397 | /* Create TX descriptor ring entry */ | 397 | /* Create TX descriptor ring entry */ |
398 | EFX_POPULATE_QWORD_4(*txd, | 398 | EFX_POPULATE_QWORD_4(*txd, |
399 | FSF_AZ_TX_KER_CONT, buffer->continuation, | 399 | FSF_AZ_TX_KER_CONT, buffer->continuation, |
400 | FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, | 400 | FSF_AZ_TX_KER_BYTE_COUNT, buffer->len, |
401 | FSF_AZ_TX_KER_BUF_REGION, 0, | 401 | FSF_AZ_TX_KER_BUF_REGION, 0, |
402 | FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); | 402 | FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr); |
403 | } while (tx_queue->write_count != tx_queue->insert_count); | 403 | } while (tx_queue->write_count != tx_queue->insert_count); |
404 | 404 | ||
405 | wmb(); /* Ensure descriptors are written before they are fetched */ | 405 | wmb(); /* Ensure descriptors are written before they are fetched */ |
406 | 406 | ||
407 | if (efx_may_push_tx_desc(tx_queue, old_write_count)) { | 407 | if (efx_may_push_tx_desc(tx_queue, old_write_count)) { |
408 | txd = efx_tx_desc(tx_queue, | 408 | txd = efx_tx_desc(tx_queue, |
409 | old_write_count & tx_queue->ptr_mask); | 409 | old_write_count & tx_queue->ptr_mask); |
410 | efx_push_tx_desc(tx_queue, txd); | 410 | efx_push_tx_desc(tx_queue, txd); |
411 | ++tx_queue->pushes; | 411 | ++tx_queue->pushes; |
412 | } else { | 412 | } else { |
413 | efx_notify_tx_desc(tx_queue); | 413 | efx_notify_tx_desc(tx_queue); |
414 | } | 414 | } |
415 | } | 415 | } |
416 | 416 | ||
417 | /* Allocate hardware resources for a TX queue */ | 417 | /* Allocate hardware resources for a TX queue */ |
418 | int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) | 418 | int efx_nic_probe_tx(struct efx_tx_queue *tx_queue) |
419 | { | 419 | { |
420 | struct efx_nic *efx = tx_queue->efx; | 420 | struct efx_nic *efx = tx_queue->efx; |
421 | unsigned entries; | 421 | unsigned entries; |
422 | 422 | ||
423 | entries = tx_queue->ptr_mask + 1; | 423 | entries = tx_queue->ptr_mask + 1; |
424 | return efx_alloc_special_buffer(efx, &tx_queue->txd, | 424 | return efx_alloc_special_buffer(efx, &tx_queue->txd, |
425 | entries * sizeof(efx_qword_t)); | 425 | entries * sizeof(efx_qword_t)); |
426 | } | 426 | } |
427 | 427 | ||
428 | void efx_nic_init_tx(struct efx_tx_queue *tx_queue) | 428 | void efx_nic_init_tx(struct efx_tx_queue *tx_queue) |
429 | { | 429 | { |
430 | struct efx_nic *efx = tx_queue->efx; | 430 | struct efx_nic *efx = tx_queue->efx; |
431 | efx_oword_t reg; | 431 | efx_oword_t reg; |
432 | 432 | ||
433 | tx_queue->flushed = FLUSH_NONE; | 433 | tx_queue->flushed = FLUSH_NONE; |
434 | 434 | ||
435 | /* Pin TX descriptor ring */ | 435 | /* Pin TX descriptor ring */ |
436 | efx_init_special_buffer(efx, &tx_queue->txd); | 436 | efx_init_special_buffer(efx, &tx_queue->txd); |
437 | 437 | ||
438 | /* Push TX descriptor ring to card */ | 438 | /* Push TX descriptor ring to card */ |
439 | EFX_POPULATE_OWORD_10(reg, | 439 | EFX_POPULATE_OWORD_10(reg, |
440 | FRF_AZ_TX_DESCQ_EN, 1, | 440 | FRF_AZ_TX_DESCQ_EN, 1, |
441 | FRF_AZ_TX_ISCSI_DDIG_EN, 0, | 441 | FRF_AZ_TX_ISCSI_DDIG_EN, 0, |
442 | FRF_AZ_TX_ISCSI_HDIG_EN, 0, | 442 | FRF_AZ_TX_ISCSI_HDIG_EN, 0, |
443 | FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, | 443 | FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, |
444 | FRF_AZ_TX_DESCQ_EVQ_ID, | 444 | FRF_AZ_TX_DESCQ_EVQ_ID, |
445 | tx_queue->channel->channel, | 445 | tx_queue->channel->channel, |
446 | FRF_AZ_TX_DESCQ_OWNER_ID, 0, | 446 | FRF_AZ_TX_DESCQ_OWNER_ID, 0, |
447 | FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue, | 447 | FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue, |
448 | FRF_AZ_TX_DESCQ_SIZE, | 448 | FRF_AZ_TX_DESCQ_SIZE, |
449 | __ffs(tx_queue->txd.entries), | 449 | __ffs(tx_queue->txd.entries), |
450 | FRF_AZ_TX_DESCQ_TYPE, 0, | 450 | FRF_AZ_TX_DESCQ_TYPE, 0, |
451 | FRF_BZ_TX_NON_IP_DROP_DIS, 1); | 451 | FRF_BZ_TX_NON_IP_DROP_DIS, 1); |
452 | 452 | ||
453 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { | 453 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { |
454 | int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; | 454 | int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD; |
455 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum); | 455 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum); |
456 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS, | 456 | EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS, |
457 | !csum); | 457 | !csum); |
458 | } | 458 | } |
459 | 459 | ||
460 | efx_writeo_table(efx, ®, efx->type->txd_ptr_tbl_base, | 460 | efx_writeo_table(efx, ®, efx->type->txd_ptr_tbl_base, |
461 | tx_queue->queue); | 461 | tx_queue->queue); |
462 | 462 | ||
463 | if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { | 463 | if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { |
464 | /* Only 128 bits in this register */ | 464 | /* Only 128 bits in this register */ |
465 | BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128); | 465 | BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128); |
466 | 466 | ||
467 | efx_reado(efx, ®, FR_AA_TX_CHKSM_CFG); | 467 | efx_reado(efx, ®, FR_AA_TX_CHKSM_CFG); |
468 | if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) | 468 | if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) |
469 | clear_bit_le(tx_queue->queue, (void *)®); | 469 | clear_bit_le(tx_queue->queue, (void *)®); |
470 | else | 470 | else |
471 | set_bit_le(tx_queue->queue, (void *)®); | 471 | set_bit_le(tx_queue->queue, (void *)®); |
472 | efx_writeo(efx, ®, FR_AA_TX_CHKSM_CFG); | 472 | efx_writeo(efx, ®, FR_AA_TX_CHKSM_CFG); |
473 | } | 473 | } |
474 | 474 | ||
475 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { | 475 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { |
476 | EFX_POPULATE_OWORD_1(reg, | 476 | EFX_POPULATE_OWORD_1(reg, |
477 | FRF_BZ_TX_PACE, | 477 | FRF_BZ_TX_PACE, |
478 | (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? | 478 | (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? |
479 | FFE_BZ_TX_PACE_OFF : | 479 | FFE_BZ_TX_PACE_OFF : |
480 | FFE_BZ_TX_PACE_RESERVED); | 480 | FFE_BZ_TX_PACE_RESERVED); |
481 | efx_writeo_table(efx, ®, FR_BZ_TX_PACE_TBL, | 481 | efx_writeo_table(efx, ®, FR_BZ_TX_PACE_TBL, |
482 | tx_queue->queue); | 482 | tx_queue->queue); |
483 | } | 483 | } |
484 | } | 484 | } |
485 | 485 | ||
486 | static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue) | 486 | static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue) |
487 | { | 487 | { |
488 | struct efx_nic *efx = tx_queue->efx; | 488 | struct efx_nic *efx = tx_queue->efx; |
489 | efx_oword_t tx_flush_descq; | 489 | efx_oword_t tx_flush_descq; |
490 | 490 | ||
491 | tx_queue->flushed = FLUSH_PENDING; | 491 | tx_queue->flushed = FLUSH_PENDING; |
492 | 492 | ||
493 | /* Post a flush command */ | 493 | /* Post a flush command */ |
494 | EFX_POPULATE_OWORD_2(tx_flush_descq, | 494 | EFX_POPULATE_OWORD_2(tx_flush_descq, |
495 | FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, | 495 | FRF_AZ_TX_FLUSH_DESCQ_CMD, 1, |
496 | FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); | 496 | FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue); |
497 | efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ); | 497 | efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ); |
498 | } | 498 | } |
499 | 499 | ||
500 | void efx_nic_fini_tx(struct efx_tx_queue *tx_queue) | 500 | void efx_nic_fini_tx(struct efx_tx_queue *tx_queue) |
501 | { | 501 | { |
502 | struct efx_nic *efx = tx_queue->efx; | 502 | struct efx_nic *efx = tx_queue->efx; |
503 | efx_oword_t tx_desc_ptr; | 503 | efx_oword_t tx_desc_ptr; |
504 | 504 | ||
505 | /* The queue should have been flushed */ | 505 | /* The queue should have been flushed */ |
506 | WARN_ON(tx_queue->flushed != FLUSH_DONE); | 506 | WARN_ON(tx_queue->flushed != FLUSH_DONE); |
507 | 507 | ||
508 | /* Remove TX descriptor ring from card */ | 508 | /* Remove TX descriptor ring from card */ |
509 | EFX_ZERO_OWORD(tx_desc_ptr); | 509 | EFX_ZERO_OWORD(tx_desc_ptr); |
510 | efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, | 510 | efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, |
511 | tx_queue->queue); | 511 | tx_queue->queue); |
512 | 512 | ||
513 | /* Unpin TX descriptor ring */ | 513 | /* Unpin TX descriptor ring */ |
514 | efx_fini_special_buffer(efx, &tx_queue->txd); | 514 | efx_fini_special_buffer(efx, &tx_queue->txd); |
515 | } | 515 | } |
516 | 516 | ||
517 | /* Free buffers backing TX queue */ | 517 | /* Free buffers backing TX queue */ |
518 | void efx_nic_remove_tx(struct efx_tx_queue *tx_queue) | 518 | void efx_nic_remove_tx(struct efx_tx_queue *tx_queue) |
519 | { | 519 | { |
520 | efx_free_special_buffer(tx_queue->efx, &tx_queue->txd); | 520 | efx_free_special_buffer(tx_queue->efx, &tx_queue->txd); |
521 | } | 521 | } |
522 | 522 | ||
523 | /************************************************************************** | 523 | /************************************************************************** |
524 | * | 524 | * |
525 | * RX path | 525 | * RX path |
526 | * | 526 | * |
527 | **************************************************************************/ | 527 | **************************************************************************/ |
528 | 528 | ||
529 | /* Returns a pointer to the specified descriptor in the RX descriptor queue */ | 529 | /* Returns a pointer to the specified descriptor in the RX descriptor queue */ |
530 | static inline efx_qword_t * | 530 | static inline efx_qword_t * |
531 | efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) | 531 | efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index) |
532 | { | 532 | { |
533 | return ((efx_qword_t *) (rx_queue->rxd.addr)) + index; | 533 | return ((efx_qword_t *) (rx_queue->rxd.addr)) + index; |
534 | } | 534 | } |
535 | 535 | ||
536 | /* This creates an entry in the RX descriptor queue */ | 536 | /* This creates an entry in the RX descriptor queue */ |
537 | static inline void | 537 | static inline void |
538 | efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index) | 538 | efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index) |
539 | { | 539 | { |
540 | struct efx_rx_buffer *rx_buf; | 540 | struct efx_rx_buffer *rx_buf; |
541 | efx_qword_t *rxd; | 541 | efx_qword_t *rxd; |
542 | 542 | ||
543 | rxd = efx_rx_desc(rx_queue, index); | 543 | rxd = efx_rx_desc(rx_queue, index); |
544 | rx_buf = efx_rx_buffer(rx_queue, index); | 544 | rx_buf = efx_rx_buffer(rx_queue, index); |
545 | EFX_POPULATE_QWORD_3(*rxd, | 545 | EFX_POPULATE_QWORD_3(*rxd, |
546 | FSF_AZ_RX_KER_BUF_SIZE, | 546 | FSF_AZ_RX_KER_BUF_SIZE, |
547 | rx_buf->len - | 547 | rx_buf->len - |
548 | rx_queue->efx->type->rx_buffer_padding, | 548 | rx_queue->efx->type->rx_buffer_padding, |
549 | FSF_AZ_RX_KER_BUF_REGION, 0, | 549 | FSF_AZ_RX_KER_BUF_REGION, 0, |
550 | FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); | 550 | FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr); |
551 | } | 551 | } |
552 | 552 | ||
553 | /* This writes to the RX_DESC_WPTR register for the specified receive | 553 | /* This writes to the RX_DESC_WPTR register for the specified receive |
554 | * descriptor ring. | 554 | * descriptor ring. |
555 | */ | 555 | */ |
556 | void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue) | 556 | void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue) |
557 | { | 557 | { |
558 | struct efx_nic *efx = rx_queue->efx; | 558 | struct efx_nic *efx = rx_queue->efx; |
559 | efx_dword_t reg; | 559 | efx_dword_t reg; |
560 | unsigned write_ptr; | 560 | unsigned write_ptr; |
561 | 561 | ||
562 | while (rx_queue->notified_count != rx_queue->added_count) { | 562 | while (rx_queue->notified_count != rx_queue->added_count) { |
563 | efx_build_rx_desc( | 563 | efx_build_rx_desc( |
564 | rx_queue, | 564 | rx_queue, |
565 | rx_queue->notified_count & rx_queue->ptr_mask); | 565 | rx_queue->notified_count & rx_queue->ptr_mask); |
566 | ++rx_queue->notified_count; | 566 | ++rx_queue->notified_count; |
567 | } | 567 | } |
568 | 568 | ||
569 | wmb(); | 569 | wmb(); |
570 | write_ptr = rx_queue->added_count & rx_queue->ptr_mask; | 570 | write_ptr = rx_queue->added_count & rx_queue->ptr_mask; |
571 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); | 571 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr); |
572 | efx_writed_page(efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0, | 572 | efx_writed_page(efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0, |
573 | efx_rx_queue_index(rx_queue)); | 573 | efx_rx_queue_index(rx_queue)); |
574 | } | 574 | } |
575 | 575 | ||
576 | int efx_nic_probe_rx(struct efx_rx_queue *rx_queue) | 576 | int efx_nic_probe_rx(struct efx_rx_queue *rx_queue) |
577 | { | 577 | { |
578 | struct efx_nic *efx = rx_queue->efx; | 578 | struct efx_nic *efx = rx_queue->efx; |
579 | unsigned entries; | 579 | unsigned entries; |
580 | 580 | ||
581 | entries = rx_queue->ptr_mask + 1; | 581 | entries = rx_queue->ptr_mask + 1; |
582 | return efx_alloc_special_buffer(efx, &rx_queue->rxd, | 582 | return efx_alloc_special_buffer(efx, &rx_queue->rxd, |
583 | entries * sizeof(efx_qword_t)); | 583 | entries * sizeof(efx_qword_t)); |
584 | } | 584 | } |
585 | 585 | ||
586 | void efx_nic_init_rx(struct efx_rx_queue *rx_queue) | 586 | void efx_nic_init_rx(struct efx_rx_queue *rx_queue) |
587 | { | 587 | { |
588 | efx_oword_t rx_desc_ptr; | 588 | efx_oword_t rx_desc_ptr; |
589 | struct efx_nic *efx = rx_queue->efx; | 589 | struct efx_nic *efx = rx_queue->efx; |
590 | bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0; | 590 | bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0; |
591 | bool iscsi_digest_en = is_b0; | 591 | bool iscsi_digest_en = is_b0; |
592 | 592 | ||
593 | netif_dbg(efx, hw, efx->net_dev, | 593 | netif_dbg(efx, hw, efx->net_dev, |
594 | "RX queue %d ring in special buffers %d-%d\n", | 594 | "RX queue %d ring in special buffers %d-%d\n", |
595 | efx_rx_queue_index(rx_queue), rx_queue->rxd.index, | 595 | efx_rx_queue_index(rx_queue), rx_queue->rxd.index, |
596 | rx_queue->rxd.index + rx_queue->rxd.entries - 1); | 596 | rx_queue->rxd.index + rx_queue->rxd.entries - 1); |
597 | 597 | ||
598 | rx_queue->flushed = FLUSH_NONE; | 598 | rx_queue->flushed = FLUSH_NONE; |
599 | 599 | ||
600 | /* Pin RX descriptor ring */ | 600 | /* Pin RX descriptor ring */ |
601 | efx_init_special_buffer(efx, &rx_queue->rxd); | 601 | efx_init_special_buffer(efx, &rx_queue->rxd); |
602 | 602 | ||
603 | /* Push RX descriptor ring to card */ | 603 | /* Push RX descriptor ring to card */ |
604 | EFX_POPULATE_OWORD_10(rx_desc_ptr, | 604 | EFX_POPULATE_OWORD_10(rx_desc_ptr, |
605 | FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en, | 605 | FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en, |
606 | FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en, | 606 | FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en, |
607 | FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, | 607 | FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, |
608 | FRF_AZ_RX_DESCQ_EVQ_ID, | 608 | FRF_AZ_RX_DESCQ_EVQ_ID, |
609 | efx_rx_queue_channel(rx_queue)->channel, | 609 | efx_rx_queue_channel(rx_queue)->channel, |
610 | FRF_AZ_RX_DESCQ_OWNER_ID, 0, | 610 | FRF_AZ_RX_DESCQ_OWNER_ID, 0, |
611 | FRF_AZ_RX_DESCQ_LABEL, | 611 | FRF_AZ_RX_DESCQ_LABEL, |
612 | efx_rx_queue_index(rx_queue), | 612 | efx_rx_queue_index(rx_queue), |
613 | FRF_AZ_RX_DESCQ_SIZE, | 613 | FRF_AZ_RX_DESCQ_SIZE, |
614 | __ffs(rx_queue->rxd.entries), | 614 | __ffs(rx_queue->rxd.entries), |
615 | FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , | 615 | FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , |
616 | /* For >=B0 this is scatter so disable */ | 616 | /* For >=B0 this is scatter so disable */ |
617 | FRF_AZ_RX_DESCQ_JUMBO, !is_b0, | 617 | FRF_AZ_RX_DESCQ_JUMBO, !is_b0, |
618 | FRF_AZ_RX_DESCQ_EN, 1); | 618 | FRF_AZ_RX_DESCQ_EN, 1); |
619 | efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, | 619 | efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, |
620 | efx_rx_queue_index(rx_queue)); | 620 | efx_rx_queue_index(rx_queue)); |
621 | } | 621 | } |
622 | 622 | ||
623 | static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue) | 623 | static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue) |
624 | { | 624 | { |
625 | struct efx_nic *efx = rx_queue->efx; | 625 | struct efx_nic *efx = rx_queue->efx; |
626 | efx_oword_t rx_flush_descq; | 626 | efx_oword_t rx_flush_descq; |
627 | 627 | ||
628 | rx_queue->flushed = FLUSH_PENDING; | 628 | rx_queue->flushed = FLUSH_PENDING; |
629 | 629 | ||
630 | /* Post a flush command */ | 630 | /* Post a flush command */ |
631 | EFX_POPULATE_OWORD_2(rx_flush_descq, | 631 | EFX_POPULATE_OWORD_2(rx_flush_descq, |
632 | FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, | 632 | FRF_AZ_RX_FLUSH_DESCQ_CMD, 1, |
633 | FRF_AZ_RX_FLUSH_DESCQ, | 633 | FRF_AZ_RX_FLUSH_DESCQ, |
634 | efx_rx_queue_index(rx_queue)); | 634 | efx_rx_queue_index(rx_queue)); |
635 | efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ); | 635 | efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ); |
636 | } | 636 | } |
637 | 637 | ||
638 | void efx_nic_fini_rx(struct efx_rx_queue *rx_queue) | 638 | void efx_nic_fini_rx(struct efx_rx_queue *rx_queue) |
639 | { | 639 | { |
640 | efx_oword_t rx_desc_ptr; | 640 | efx_oword_t rx_desc_ptr; |
641 | struct efx_nic *efx = rx_queue->efx; | 641 | struct efx_nic *efx = rx_queue->efx; |
642 | 642 | ||
643 | /* The queue should already have been flushed */ | 643 | /* The queue should already have been flushed */ |
644 | WARN_ON(rx_queue->flushed != FLUSH_DONE); | 644 | WARN_ON(rx_queue->flushed != FLUSH_DONE); |
645 | 645 | ||
646 | /* Remove RX descriptor ring from card */ | 646 | /* Remove RX descriptor ring from card */ |
647 | EFX_ZERO_OWORD(rx_desc_ptr); | 647 | EFX_ZERO_OWORD(rx_desc_ptr); |
648 | efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, | 648 | efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, |
649 | efx_rx_queue_index(rx_queue)); | 649 | efx_rx_queue_index(rx_queue)); |
650 | 650 | ||
651 | /* Unpin RX descriptor ring */ | 651 | /* Unpin RX descriptor ring */ |
652 | efx_fini_special_buffer(efx, &rx_queue->rxd); | 652 | efx_fini_special_buffer(efx, &rx_queue->rxd); |
653 | } | 653 | } |
654 | 654 | ||
655 | /* Free buffers backing RX queue */ | 655 | /* Free buffers backing RX queue */ |
656 | void efx_nic_remove_rx(struct efx_rx_queue *rx_queue) | 656 | void efx_nic_remove_rx(struct efx_rx_queue *rx_queue) |
657 | { | 657 | { |
658 | efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd); | 658 | efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd); |
659 | } | 659 | } |
660 | 660 | ||
661 | /************************************************************************** | 661 | /************************************************************************** |
662 | * | 662 | * |
663 | * Event queue processing | 663 | * Event queue processing |
664 | * Event queues are processed by per-channel tasklets. | 664 | * Event queues are processed by per-channel tasklets. |
665 | * | 665 | * |
666 | **************************************************************************/ | 666 | **************************************************************************/ |
667 | 667 | ||
668 | /* Update a channel's event queue's read pointer (RPTR) register | 668 | /* Update a channel's event queue's read pointer (RPTR) register |
669 | * | 669 | * |
670 | * This writes the EVQ_RPTR_REG register for the specified channel's | 670 | * This writes the EVQ_RPTR_REG register for the specified channel's |
671 | * event queue. | 671 | * event queue. |
672 | */ | 672 | */ |
673 | void efx_nic_eventq_read_ack(struct efx_channel *channel) | 673 | void efx_nic_eventq_read_ack(struct efx_channel *channel) |
674 | { | 674 | { |
675 | efx_dword_t reg; | 675 | efx_dword_t reg; |
676 | struct efx_nic *efx = channel->efx; | 676 | struct efx_nic *efx = channel->efx; |
677 | 677 | ||
678 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, | 678 | EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, |
679 | channel->eventq_read_ptr & channel->eventq_mask); | 679 | channel->eventq_read_ptr & channel->eventq_mask); |
680 | efx_writed_table(efx, ®, efx->type->evq_rptr_tbl_base, | 680 | efx_writed_table(efx, ®, efx->type->evq_rptr_tbl_base, |
681 | channel->channel); | 681 | channel->channel); |
682 | } | 682 | } |
683 | 683 | ||
684 | /* Use HW to insert a SW defined event */ | 684 | /* Use HW to insert a SW defined event */ |
685 | static void efx_generate_event(struct efx_channel *channel, efx_qword_t *event) | 685 | static void efx_generate_event(struct efx_channel *channel, efx_qword_t *event) |
686 | { | 686 | { |
687 | efx_oword_t drv_ev_reg; | 687 | efx_oword_t drv_ev_reg; |
688 | 688 | ||
689 | BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 || | 689 | BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 || |
690 | FRF_AZ_DRV_EV_DATA_WIDTH != 64); | 690 | FRF_AZ_DRV_EV_DATA_WIDTH != 64); |
691 | drv_ev_reg.u32[0] = event->u32[0]; | 691 | drv_ev_reg.u32[0] = event->u32[0]; |
692 | drv_ev_reg.u32[1] = event->u32[1]; | 692 | drv_ev_reg.u32[1] = event->u32[1]; |
693 | drv_ev_reg.u32[2] = 0; | 693 | drv_ev_reg.u32[2] = 0; |
694 | drv_ev_reg.u32[3] = 0; | 694 | drv_ev_reg.u32[3] = 0; |
695 | EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, channel->channel); | 695 | EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, channel->channel); |
696 | efx_writeo(channel->efx, &drv_ev_reg, FR_AZ_DRV_EV); | 696 | efx_writeo(channel->efx, &drv_ev_reg, FR_AZ_DRV_EV); |
697 | } | 697 | } |
698 | 698 | ||
699 | /* Handle a transmit completion event | 699 | /* Handle a transmit completion event |
700 | * | 700 | * |
701 | * The NIC batches TX completion events; the message we receive is of | 701 | * The NIC batches TX completion events; the message we receive is of |
702 | * the form "complete all TX events up to this index". | 702 | * the form "complete all TX events up to this index". |
703 | */ | 703 | */ |
704 | static int | 704 | static int |
705 | efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) | 705 | efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) |
706 | { | 706 | { |
707 | unsigned int tx_ev_desc_ptr; | 707 | unsigned int tx_ev_desc_ptr; |
708 | unsigned int tx_ev_q_label; | 708 | unsigned int tx_ev_q_label; |
709 | struct efx_tx_queue *tx_queue; | 709 | struct efx_tx_queue *tx_queue; |
710 | struct efx_nic *efx = channel->efx; | 710 | struct efx_nic *efx = channel->efx; |
711 | int tx_packets = 0; | 711 | int tx_packets = 0; |
712 | 712 | ||
713 | if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { | 713 | if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { |
714 | /* Transmit completion */ | 714 | /* Transmit completion */ |
715 | tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); | 715 | tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); |
716 | tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); | 716 | tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); |
717 | tx_queue = efx_channel_get_tx_queue( | 717 | tx_queue = efx_channel_get_tx_queue( |
718 | channel, tx_ev_q_label % EFX_TXQ_TYPES); | 718 | channel, tx_ev_q_label % EFX_TXQ_TYPES); |
719 | tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) & | 719 | tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) & |
720 | tx_queue->ptr_mask); | 720 | tx_queue->ptr_mask); |
721 | channel->irq_mod_score += tx_packets; | 721 | channel->irq_mod_score += tx_packets; |
722 | efx_xmit_done(tx_queue, tx_ev_desc_ptr); | 722 | efx_xmit_done(tx_queue, tx_ev_desc_ptr); |
723 | } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { | 723 | } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { |
724 | /* Rewrite the FIFO write pointer */ | 724 | /* Rewrite the FIFO write pointer */ |
725 | tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); | 725 | tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); |
726 | tx_queue = efx_channel_get_tx_queue( | 726 | tx_queue = efx_channel_get_tx_queue( |
727 | channel, tx_ev_q_label % EFX_TXQ_TYPES); | 727 | channel, tx_ev_q_label % EFX_TXQ_TYPES); |
728 | 728 | ||
729 | if (efx_dev_registered(efx)) | 729 | if (efx_dev_registered(efx)) |
730 | netif_tx_lock(efx->net_dev); | 730 | netif_tx_lock(efx->net_dev); |
731 | efx_notify_tx_desc(tx_queue); | 731 | efx_notify_tx_desc(tx_queue); |
732 | if (efx_dev_registered(efx)) | 732 | if (efx_dev_registered(efx)) |
733 | netif_tx_unlock(efx->net_dev); | 733 | netif_tx_unlock(efx->net_dev); |
734 | } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) && | 734 | } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) && |
735 | EFX_WORKAROUND_10727(efx)) { | 735 | EFX_WORKAROUND_10727(efx)) { |
736 | efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); | 736 | efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); |
737 | } else { | 737 | } else { |
738 | netif_err(efx, tx_err, efx->net_dev, | 738 | netif_err(efx, tx_err, efx->net_dev, |
739 | "channel %d unexpected TX event " | 739 | "channel %d unexpected TX event " |
740 | EFX_QWORD_FMT"\n", channel->channel, | 740 | EFX_QWORD_FMT"\n", channel->channel, |
741 | EFX_QWORD_VAL(*event)); | 741 | EFX_QWORD_VAL(*event)); |
742 | } | 742 | } |
743 | 743 | ||
744 | return tx_packets; | 744 | return tx_packets; |
745 | } | 745 | } |
746 | 746 | ||
747 | /* Detect errors included in the rx_evt_pkt_ok bit. */ | 747 | /* Detect errors included in the rx_evt_pkt_ok bit. */ |
748 | static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue, | 748 | static void efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue, |
749 | const efx_qword_t *event, | 749 | const efx_qword_t *event, |
750 | bool *rx_ev_pkt_ok, | 750 | bool *rx_ev_pkt_ok, |
751 | bool *discard) | 751 | bool *discard) |
752 | { | 752 | { |
753 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); | 753 | struct efx_channel *channel = efx_rx_queue_channel(rx_queue); |
754 | struct efx_nic *efx = rx_queue->efx; | 754 | struct efx_nic *efx = rx_queue->efx; |
755 | bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; | 755 | bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; |
756 | bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; | 756 | bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; |
757 | bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc; | 757 | bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc; |
758 | bool rx_ev_other_err, rx_ev_pause_frm; | 758 | bool rx_ev_other_err, rx_ev_pause_frm; |
759 | bool rx_ev_hdr_type, rx_ev_mcast_pkt; | 759 | bool rx_ev_hdr_type, rx_ev_mcast_pkt; |
760 | unsigned rx_ev_pkt_type; | 760 | unsigned rx_ev_pkt_type; |
761 | 761 | ||
762 | rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); | 762 | rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); |
763 | rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); | 763 | rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); |
764 | rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC); | 764 | rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC); |
765 | rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE); | 765 | rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE); |
766 | rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event, | 766 | rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event, |
767 | FSF_AZ_RX_EV_BUF_OWNER_ID_ERR); | 767 | FSF_AZ_RX_EV_BUF_OWNER_ID_ERR); |
768 | rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event, | 768 | rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event, |
769 | FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR); | 769 | FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR); |
770 | rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event, | 770 | rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event, |
771 | FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR); | 771 | FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR); |
772 | rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR); | 772 | rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR); |
773 | rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC); | 773 | rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC); |
774 | rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ? | 774 | rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ? |
775 | 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB)); | 775 | 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB)); |
776 | rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR); | 776 | rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR); |
777 | 777 | ||
778 | /* Every error apart from tobe_disc and pause_frm */ | 778 | /* Every error apart from tobe_disc and pause_frm */ |
779 | rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err | | 779 | rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err | |
780 | rx_ev_buf_owner_id_err | rx_ev_eth_crc_err | | 780 | rx_ev_buf_owner_id_err | rx_ev_eth_crc_err | |
781 | rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err); | 781 | rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err); |
782 | 782 | ||
783 | /* Count errors that are not in MAC stats. Ignore expected | 783 | /* Count errors that are not in MAC stats. Ignore expected |
784 | * checksum errors during self-test. */ | 784 | * checksum errors during self-test. */ |
785 | if (rx_ev_frm_trunc) | 785 | if (rx_ev_frm_trunc) |
786 | ++channel->n_rx_frm_trunc; | 786 | ++channel->n_rx_frm_trunc; |
787 | else if (rx_ev_tobe_disc) | 787 | else if (rx_ev_tobe_disc) |
788 | ++channel->n_rx_tobe_disc; | 788 | ++channel->n_rx_tobe_disc; |
789 | else if (!efx->loopback_selftest) { | 789 | else if (!efx->loopback_selftest) { |
790 | if (rx_ev_ip_hdr_chksum_err) | 790 | if (rx_ev_ip_hdr_chksum_err) |
791 | ++channel->n_rx_ip_hdr_chksum_err; | 791 | ++channel->n_rx_ip_hdr_chksum_err; |
792 | else if (rx_ev_tcp_udp_chksum_err) | 792 | else if (rx_ev_tcp_udp_chksum_err) |
793 | ++channel->n_rx_tcp_udp_chksum_err; | 793 | ++channel->n_rx_tcp_udp_chksum_err; |
794 | } | 794 | } |
795 | 795 | ||
796 | /* The frame must be discarded if any of these are true. */ | 796 | /* The frame must be discarded if any of these are true. */ |
797 | *discard = (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib | | 797 | *discard = (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib | |
798 | rx_ev_tobe_disc | rx_ev_pause_frm); | 798 | rx_ev_tobe_disc | rx_ev_pause_frm); |
799 | 799 | ||
800 | /* TOBE_DISC is expected on unicast mismatches; don't print out an | 800 | /* TOBE_DISC is expected on unicast mismatches; don't print out an |
801 | * error message. FRM_TRUNC indicates RXDP dropped the packet due | 801 | * error message. FRM_TRUNC indicates RXDP dropped the packet due |
802 | * to a FIFO overflow. | 802 | * to a FIFO overflow. |
803 | */ | 803 | */ |
804 | #ifdef EFX_ENABLE_DEBUG | 804 | #ifdef EFX_ENABLE_DEBUG |
805 | if (rx_ev_other_err && net_ratelimit()) { | 805 | if (rx_ev_other_err && net_ratelimit()) { |
806 | netif_dbg(efx, rx_err, efx->net_dev, | 806 | netif_dbg(efx, rx_err, efx->net_dev, |
807 | " RX queue %d unexpected RX event " | 807 | " RX queue %d unexpected RX event " |
808 | EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n", | 808 | EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n", |
809 | efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event), | 809 | efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event), |
810 | rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", | 810 | rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", |
811 | rx_ev_ip_hdr_chksum_err ? | 811 | rx_ev_ip_hdr_chksum_err ? |
812 | " [IP_HDR_CHKSUM_ERR]" : "", | 812 | " [IP_HDR_CHKSUM_ERR]" : "", |
813 | rx_ev_tcp_udp_chksum_err ? | 813 | rx_ev_tcp_udp_chksum_err ? |
814 | " [TCP_UDP_CHKSUM_ERR]" : "", | 814 | " [TCP_UDP_CHKSUM_ERR]" : "", |
815 | rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "", | 815 | rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "", |
816 | rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", | 816 | rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", |
817 | rx_ev_drib_nib ? " [DRIB_NIB]" : "", | 817 | rx_ev_drib_nib ? " [DRIB_NIB]" : "", |
818 | rx_ev_tobe_disc ? " [TOBE_DISC]" : "", | 818 | rx_ev_tobe_disc ? " [TOBE_DISC]" : "", |
819 | rx_ev_pause_frm ? " [PAUSE]" : ""); | 819 | rx_ev_pause_frm ? " [PAUSE]" : ""); |
820 | } | 820 | } |
821 | #endif | 821 | #endif |
822 | } | 822 | } |
823 | 823 | ||
824 | /* Handle receive events that are not in-order. */ | 824 | /* Handle receive events that are not in-order. */ |
825 | static void | 825 | static void |
826 | efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) | 826 | efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) |
827 | { | 827 | { |
828 | struct efx_nic *efx = rx_queue->efx; | 828 | struct efx_nic *efx = rx_queue->efx; |
829 | unsigned expected, dropped; | 829 | unsigned expected, dropped; |
830 | 830 | ||
831 | expected = rx_queue->removed_count & rx_queue->ptr_mask; | 831 | expected = rx_queue->removed_count & rx_queue->ptr_mask; |
832 | dropped = (index - expected) & rx_queue->ptr_mask; | 832 | dropped = (index - expected) & rx_queue->ptr_mask; |
833 | netif_info(efx, rx_err, efx->net_dev, | 833 | netif_info(efx, rx_err, efx->net_dev, |
834 | "dropped %d events (index=%d expected=%d)\n", | 834 | "dropped %d events (index=%d expected=%d)\n", |
835 | dropped, index, expected); | 835 | dropped, index, expected); |
836 | 836 | ||
837 | efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ? | 837 | efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ? |
838 | RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); | 838 | RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); |
839 | } | 839 | } |
840 | 840 | ||
841 | /* Handle a packet received event | 841 | /* Handle a packet received event |
842 | * | 842 | * |
843 | * The NIC gives a "discard" flag if it's a unicast packet with the | 843 | * The NIC gives a "discard" flag if it's a unicast packet with the |
844 | * wrong destination address | 844 | * wrong destination address |
845 | * Also "is multicast" and "matches multicast filter" flags can be used to | 845 | * Also "is multicast" and "matches multicast filter" flags can be used to |
846 | * discard non-matching multicast packets. | 846 | * discard non-matching multicast packets. |
847 | */ | 847 | */ |
848 | static void | 848 | static void |
849 | efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) | 849 | efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) |
850 | { | 850 | { |
851 | unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; | 851 | unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; |
852 | unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; | 852 | unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; |
853 | unsigned expected_ptr; | 853 | unsigned expected_ptr; |
854 | bool rx_ev_pkt_ok, discard = false, checksummed; | 854 | bool rx_ev_pkt_ok, discard = false, checksummed; |
855 | struct efx_rx_queue *rx_queue; | 855 | struct efx_rx_queue *rx_queue; |
856 | 856 | ||
857 | /* Basic packet information */ | 857 | /* Basic packet information */ |
858 | rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); | 858 | rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); |
859 | rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); | 859 | rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK); |
860 | rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); | 860 | rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE); |
861 | WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT)); | 861 | WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT)); |
862 | WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1); | 862 | WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1); |
863 | WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != | 863 | WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != |
864 | channel->channel); | 864 | channel->channel); |
865 | 865 | ||
866 | rx_queue = efx_channel_get_rx_queue(channel); | 866 | rx_queue = efx_channel_get_rx_queue(channel); |
867 | 867 | ||
868 | rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); | 868 | rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); |
869 | expected_ptr = rx_queue->removed_count & rx_queue->ptr_mask; | 869 | expected_ptr = rx_queue->removed_count & rx_queue->ptr_mask; |
870 | if (unlikely(rx_ev_desc_ptr != expected_ptr)) | 870 | if (unlikely(rx_ev_desc_ptr != expected_ptr)) |
871 | efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr); | 871 | efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr); |
872 | 872 | ||
873 | if (likely(rx_ev_pkt_ok)) { | 873 | if (likely(rx_ev_pkt_ok)) { |
874 | /* If packet is marked as OK and packet type is TCP/IP or | 874 | /* If packet is marked as OK and packet type is TCP/IP or |
875 | * UDP/IP, then we can rely on the hardware checksum. | 875 | * UDP/IP, then we can rely on the hardware checksum. |
876 | */ | 876 | */ |
877 | checksummed = | 877 | checksummed = |
878 | rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP || | 878 | rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP || |
879 | rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP; | 879 | rx_ev_hdr_type == FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP; |
880 | } else { | 880 | } else { |
881 | efx_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, &discard); | 881 | efx_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, &discard); |
882 | checksummed = false; | 882 | checksummed = false; |
883 | } | 883 | } |
884 | 884 | ||
885 | /* Detect multicast packets that didn't match the filter */ | 885 | /* Detect multicast packets that didn't match the filter */ |
886 | rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); | 886 | rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT); |
887 | if (rx_ev_mcast_pkt) { | 887 | if (rx_ev_mcast_pkt) { |
888 | unsigned int rx_ev_mcast_hash_match = | 888 | unsigned int rx_ev_mcast_hash_match = |
889 | EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH); | 889 | EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH); |
890 | 890 | ||
891 | if (unlikely(!rx_ev_mcast_hash_match)) { | 891 | if (unlikely(!rx_ev_mcast_hash_match)) { |
892 | ++channel->n_rx_mcast_mismatch; | 892 | ++channel->n_rx_mcast_mismatch; |
893 | discard = true; | 893 | discard = true; |
894 | } | 894 | } |
895 | } | 895 | } |
896 | 896 | ||
897 | channel->irq_mod_score += 2; | 897 | channel->irq_mod_score += 2; |
898 | 898 | ||
899 | /* Handle received packet */ | 899 | /* Handle received packet */ |
900 | efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, | 900 | efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, |
901 | checksummed, discard); | 901 | checksummed, discard); |
902 | } | 902 | } |
903 | 903 | ||
904 | static void | 904 | static void |
905 | efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event) | 905 | efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event) |
906 | { | 906 | { |
907 | struct efx_nic *efx = channel->efx; | 907 | struct efx_nic *efx = channel->efx; |
908 | unsigned code; | 908 | unsigned code; |
909 | 909 | ||
910 | code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); | 910 | code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC); |
911 | if (code == EFX_CHANNEL_MAGIC_TEST(channel)) | 911 | if (code == EFX_CHANNEL_MAGIC_TEST(channel)) |
912 | ; /* ignore */ | 912 | ; /* ignore */ |
913 | else if (code == EFX_CHANNEL_MAGIC_FILL(channel)) | 913 | else if (code == EFX_CHANNEL_MAGIC_FILL(channel)) |
914 | /* The queue must be empty, so we won't receive any rx | 914 | /* The queue must be empty, so we won't receive any rx |
915 | * events, so efx_process_channel() won't refill the | 915 | * events, so efx_process_channel() won't refill the |
916 | * queue. Refill it here */ | 916 | * queue. Refill it here */ |
917 | efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel)); | 917 | efx_fast_push_rx_descriptors(efx_channel_get_rx_queue(channel)); |
918 | else | 918 | else |
919 | netif_dbg(efx, hw, efx->net_dev, "channel %d received " | 919 | netif_dbg(efx, hw, efx->net_dev, "channel %d received " |
920 | "generated event "EFX_QWORD_FMT"\n", | 920 | "generated event "EFX_QWORD_FMT"\n", |
921 | channel->channel, EFX_QWORD_VAL(*event)); | 921 | channel->channel, EFX_QWORD_VAL(*event)); |
922 | } | 922 | } |
923 | 923 | ||
924 | static void | 924 | static void |
925 | efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) | 925 | efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) |
926 | { | 926 | { |
927 | struct efx_nic *efx = channel->efx; | 927 | struct efx_nic *efx = channel->efx; |
928 | unsigned int ev_sub_code; | 928 | unsigned int ev_sub_code; |
929 | unsigned int ev_sub_data; | 929 | unsigned int ev_sub_data; |
930 | 930 | ||
931 | ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE); | 931 | ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE); |
932 | ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); | 932 | ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA); |
933 | 933 | ||
934 | switch (ev_sub_code) { | 934 | switch (ev_sub_code) { |
935 | case FSE_AZ_TX_DESCQ_FLS_DONE_EV: | 935 | case FSE_AZ_TX_DESCQ_FLS_DONE_EV: |
936 | netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", | 936 | netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n", |
937 | channel->channel, ev_sub_data); | 937 | channel->channel, ev_sub_data); |
938 | break; | 938 | break; |
939 | case FSE_AZ_RX_DESCQ_FLS_DONE_EV: | 939 | case FSE_AZ_RX_DESCQ_FLS_DONE_EV: |
940 | netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", | 940 | netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n", |
941 | channel->channel, ev_sub_data); | 941 | channel->channel, ev_sub_data); |
942 | break; | 942 | break; |
943 | case FSE_AZ_EVQ_INIT_DONE_EV: | 943 | case FSE_AZ_EVQ_INIT_DONE_EV: |
944 | netif_dbg(efx, hw, efx->net_dev, | 944 | netif_dbg(efx, hw, efx->net_dev, |
945 | "channel %d EVQ %d initialised\n", | 945 | "channel %d EVQ %d initialised\n", |
946 | channel->channel, ev_sub_data); | 946 | channel->channel, ev_sub_data); |
947 | break; | 947 | break; |
948 | case FSE_AZ_SRM_UPD_DONE_EV: | 948 | case FSE_AZ_SRM_UPD_DONE_EV: |
949 | netif_vdbg(efx, hw, efx->net_dev, | 949 | netif_vdbg(efx, hw, efx->net_dev, |
950 | "channel %d SRAM update done\n", channel->channel); | 950 | "channel %d SRAM update done\n", channel->channel); |
951 | break; | 951 | break; |
952 | case FSE_AZ_WAKE_UP_EV: | 952 | case FSE_AZ_WAKE_UP_EV: |
953 | netif_vdbg(efx, hw, efx->net_dev, | 953 | netif_vdbg(efx, hw, efx->net_dev, |
954 | "channel %d RXQ %d wakeup event\n", | 954 | "channel %d RXQ %d wakeup event\n", |
955 | channel->channel, ev_sub_data); | 955 | channel->channel, ev_sub_data); |
956 | break; | 956 | break; |
957 | case FSE_AZ_TIMER_EV: | 957 | case FSE_AZ_TIMER_EV: |
958 | netif_vdbg(efx, hw, efx->net_dev, | 958 | netif_vdbg(efx, hw, efx->net_dev, |
959 | "channel %d RX queue %d timer expired\n", | 959 | "channel %d RX queue %d timer expired\n", |
960 | channel->channel, ev_sub_data); | 960 | channel->channel, ev_sub_data); |
961 | break; | 961 | break; |
962 | case FSE_AA_RX_RECOVER_EV: | 962 | case FSE_AA_RX_RECOVER_EV: |
963 | netif_err(efx, rx_err, efx->net_dev, | 963 | netif_err(efx, rx_err, efx->net_dev, |
964 | "channel %d seen DRIVER RX_RESET event. " | 964 | "channel %d seen DRIVER RX_RESET event. " |
965 | "Resetting.\n", channel->channel); | 965 | "Resetting.\n", channel->channel); |
966 | atomic_inc(&efx->rx_reset); | 966 | atomic_inc(&efx->rx_reset); |
967 | efx_schedule_reset(efx, | 967 | efx_schedule_reset(efx, |
968 | EFX_WORKAROUND_6555(efx) ? | 968 | EFX_WORKAROUND_6555(efx) ? |
969 | RESET_TYPE_RX_RECOVERY : | 969 | RESET_TYPE_RX_RECOVERY : |
970 | RESET_TYPE_DISABLE); | 970 | RESET_TYPE_DISABLE); |
971 | break; | 971 | break; |
972 | case FSE_BZ_RX_DSC_ERROR_EV: | 972 | case FSE_BZ_RX_DSC_ERROR_EV: |
973 | netif_err(efx, rx_err, efx->net_dev, | 973 | netif_err(efx, rx_err, efx->net_dev, |
974 | "RX DMA Q %d reports descriptor fetch error." | 974 | "RX DMA Q %d reports descriptor fetch error." |
975 | " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data); | 975 | " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data); |
976 | efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH); | 976 | efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH); |
977 | break; | 977 | break; |
978 | case FSE_BZ_TX_DSC_ERROR_EV: | 978 | case FSE_BZ_TX_DSC_ERROR_EV: |
979 | netif_err(efx, tx_err, efx->net_dev, | 979 | netif_err(efx, tx_err, efx->net_dev, |
980 | "TX DMA Q %d reports descriptor fetch error." | 980 | "TX DMA Q %d reports descriptor fetch error." |
981 | " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data); | 981 | " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data); |
982 | efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); | 982 | efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); |
983 | break; | 983 | break; |
984 | default: | 984 | default: |
985 | netif_vdbg(efx, hw, efx->net_dev, | 985 | netif_vdbg(efx, hw, efx->net_dev, |
986 | "channel %d unknown driver event code %d " | 986 | "channel %d unknown driver event code %d " |
987 | "data %04x\n", channel->channel, ev_sub_code, | 987 | "data %04x\n", channel->channel, ev_sub_code, |
988 | ev_sub_data); | 988 | ev_sub_data); |
989 | break; | 989 | break; |
990 | } | 990 | } |
991 | } | 991 | } |
992 | 992 | ||
993 | int efx_nic_process_eventq(struct efx_channel *channel, int budget) | 993 | int efx_nic_process_eventq(struct efx_channel *channel, int budget) |
994 | { | 994 | { |
995 | struct efx_nic *efx = channel->efx; | 995 | struct efx_nic *efx = channel->efx; |
996 | unsigned int read_ptr; | 996 | unsigned int read_ptr; |
997 | efx_qword_t event, *p_event; | 997 | efx_qword_t event, *p_event; |
998 | int ev_code; | 998 | int ev_code; |
999 | int tx_packets = 0; | 999 | int tx_packets = 0; |
1000 | int spent = 0; | 1000 | int spent = 0; |
1001 | 1001 | ||
1002 | read_ptr = channel->eventq_read_ptr; | 1002 | read_ptr = channel->eventq_read_ptr; |
1003 | 1003 | ||
1004 | for (;;) { | 1004 | for (;;) { |
1005 | p_event = efx_event(channel, read_ptr); | 1005 | p_event = efx_event(channel, read_ptr); |
1006 | event = *p_event; | 1006 | event = *p_event; |
1007 | 1007 | ||
1008 | if (!efx_event_present(&event)) | 1008 | if (!efx_event_present(&event)) |
1009 | /* End of events */ | 1009 | /* End of events */ |
1010 | break; | 1010 | break; |
1011 | 1011 | ||
1012 | netif_vdbg(channel->efx, intr, channel->efx->net_dev, | 1012 | netif_vdbg(channel->efx, intr, channel->efx->net_dev, |
1013 | "channel %d event is "EFX_QWORD_FMT"\n", | 1013 | "channel %d event is "EFX_QWORD_FMT"\n", |
1014 | channel->channel, EFX_QWORD_VAL(event)); | 1014 | channel->channel, EFX_QWORD_VAL(event)); |
1015 | 1015 | ||
1016 | /* Clear this event by marking it all ones */ | 1016 | /* Clear this event by marking it all ones */ |
1017 | EFX_SET_QWORD(*p_event); | 1017 | EFX_SET_QWORD(*p_event); |
1018 | 1018 | ||
1019 | ++read_ptr; | 1019 | ++read_ptr; |
1020 | 1020 | ||
1021 | ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); | 1021 | ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); |
1022 | 1022 | ||
1023 | switch (ev_code) { | 1023 | switch (ev_code) { |
1024 | case FSE_AZ_EV_CODE_RX_EV: | 1024 | case FSE_AZ_EV_CODE_RX_EV: |
1025 | efx_handle_rx_event(channel, &event); | 1025 | efx_handle_rx_event(channel, &event); |
1026 | if (++spent == budget) | 1026 | if (++spent == budget) |
1027 | goto out; | 1027 | goto out; |
1028 | break; | 1028 | break; |
1029 | case FSE_AZ_EV_CODE_TX_EV: | 1029 | case FSE_AZ_EV_CODE_TX_EV: |
1030 | tx_packets += efx_handle_tx_event(channel, &event); | 1030 | tx_packets += efx_handle_tx_event(channel, &event); |
1031 | if (tx_packets > efx->txq_entries) { | 1031 | if (tx_packets > efx->txq_entries) { |
1032 | spent = budget; | 1032 | spent = budget; |
1033 | goto out; | 1033 | goto out; |
1034 | } | 1034 | } |
1035 | break; | 1035 | break; |
1036 | case FSE_AZ_EV_CODE_DRV_GEN_EV: | 1036 | case FSE_AZ_EV_CODE_DRV_GEN_EV: |
1037 | efx_handle_generated_event(channel, &event); | 1037 | efx_handle_generated_event(channel, &event); |
1038 | break; | 1038 | break; |
1039 | case FSE_AZ_EV_CODE_DRIVER_EV: | 1039 | case FSE_AZ_EV_CODE_DRIVER_EV: |
1040 | efx_handle_driver_event(channel, &event); | 1040 | efx_handle_driver_event(channel, &event); |
1041 | break; | 1041 | break; |
1042 | case FSE_CZ_EV_CODE_MCDI_EV: | 1042 | case FSE_CZ_EV_CODE_MCDI_EV: |
1043 | efx_mcdi_process_event(channel, &event); | 1043 | efx_mcdi_process_event(channel, &event); |
1044 | break; | 1044 | break; |
1045 | case FSE_AZ_EV_CODE_GLOBAL_EV: | 1045 | case FSE_AZ_EV_CODE_GLOBAL_EV: |
1046 | if (efx->type->handle_global_event && | 1046 | if (efx->type->handle_global_event && |
1047 | efx->type->handle_global_event(channel, &event)) | 1047 | efx->type->handle_global_event(channel, &event)) |
1048 | break; | 1048 | break; |
1049 | /* else fall through */ | 1049 | /* else fall through */ |
1050 | default: | 1050 | default: |
1051 | netif_err(channel->efx, hw, channel->efx->net_dev, | 1051 | netif_err(channel->efx, hw, channel->efx->net_dev, |
1052 | "channel %d unknown event type %d (data " | 1052 | "channel %d unknown event type %d (data " |
1053 | EFX_QWORD_FMT ")\n", channel->channel, | 1053 | EFX_QWORD_FMT ")\n", channel->channel, |
1054 | ev_code, EFX_QWORD_VAL(event)); | 1054 | ev_code, EFX_QWORD_VAL(event)); |
1055 | } | 1055 | } |
1056 | } | 1056 | } |
1057 | 1057 | ||
1058 | out: | 1058 | out: |
1059 | channel->eventq_read_ptr = read_ptr; | 1059 | channel->eventq_read_ptr = read_ptr; |
1060 | return spent; | 1060 | return spent; |
1061 | } | 1061 | } |
1062 | 1062 | ||
1063 | /* Check whether an event is present in the eventq at the current | 1063 | /* Check whether an event is present in the eventq at the current |
1064 | * read pointer. Only useful for self-test. | 1064 | * read pointer. Only useful for self-test. |
1065 | */ | 1065 | */ |
1066 | bool efx_nic_event_present(struct efx_channel *channel) | 1066 | bool efx_nic_event_present(struct efx_channel *channel) |
1067 | { | 1067 | { |
1068 | return efx_event_present(efx_event(channel, channel->eventq_read_ptr)); | 1068 | return efx_event_present(efx_event(channel, channel->eventq_read_ptr)); |
1069 | } | 1069 | } |
1070 | 1070 | ||
1071 | /* Allocate buffer table entries for event queue */ | 1071 | /* Allocate buffer table entries for event queue */ |
1072 | int efx_nic_probe_eventq(struct efx_channel *channel) | 1072 | int efx_nic_probe_eventq(struct efx_channel *channel) |
1073 | { | 1073 | { |
1074 | struct efx_nic *efx = channel->efx; | 1074 | struct efx_nic *efx = channel->efx; |
1075 | unsigned entries; | 1075 | unsigned entries; |
1076 | 1076 | ||
1077 | entries = channel->eventq_mask + 1; | 1077 | entries = channel->eventq_mask + 1; |
1078 | return efx_alloc_special_buffer(efx, &channel->eventq, | 1078 | return efx_alloc_special_buffer(efx, &channel->eventq, |
1079 | entries * sizeof(efx_qword_t)); | 1079 | entries * sizeof(efx_qword_t)); |
1080 | } | 1080 | } |
1081 | 1081 | ||
1082 | void efx_nic_init_eventq(struct efx_channel *channel) | 1082 | void efx_nic_init_eventq(struct efx_channel *channel) |
1083 | { | 1083 | { |
1084 | efx_oword_t reg; | 1084 | efx_oword_t reg; |
1085 | struct efx_nic *efx = channel->efx; | 1085 | struct efx_nic *efx = channel->efx; |
1086 | 1086 | ||
1087 | netif_dbg(efx, hw, efx->net_dev, | 1087 | netif_dbg(efx, hw, efx->net_dev, |
1088 | "channel %d event queue in special buffers %d-%d\n", | 1088 | "channel %d event queue in special buffers %d-%d\n", |
1089 | channel->channel, channel->eventq.index, | 1089 | channel->channel, channel->eventq.index, |
1090 | channel->eventq.index + channel->eventq.entries - 1); | 1090 | channel->eventq.index + channel->eventq.entries - 1); |
1091 | 1091 | ||
1092 | if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { | 1092 | if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { |
1093 | EFX_POPULATE_OWORD_3(reg, | 1093 | EFX_POPULATE_OWORD_3(reg, |
1094 | FRF_CZ_TIMER_Q_EN, 1, | 1094 | FRF_CZ_TIMER_Q_EN, 1, |
1095 | FRF_CZ_HOST_NOTIFY_MODE, 0, | 1095 | FRF_CZ_HOST_NOTIFY_MODE, 0, |
1096 | FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); | 1096 | FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); |
1097 | efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); | 1097 | efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); |
1098 | } | 1098 | } |
1099 | 1099 | ||
1100 | /* Pin event queue buffer */ | 1100 | /* Pin event queue buffer */ |
1101 | efx_init_special_buffer(efx, &channel->eventq); | 1101 | efx_init_special_buffer(efx, &channel->eventq); |
1102 | 1102 | ||
1103 | /* Fill event queue with all ones (i.e. empty events) */ | 1103 | /* Fill event queue with all ones (i.e. empty events) */ |
1104 | memset(channel->eventq.addr, 0xff, channel->eventq.len); | 1104 | memset(channel->eventq.addr, 0xff, channel->eventq.len); |
1105 | 1105 | ||
1106 | /* Push event queue to card */ | 1106 | /* Push event queue to card */ |
1107 | EFX_POPULATE_OWORD_3(reg, | 1107 | EFX_POPULATE_OWORD_3(reg, |
1108 | FRF_AZ_EVQ_EN, 1, | 1108 | FRF_AZ_EVQ_EN, 1, |
1109 | FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries), | 1109 | FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries), |
1110 | FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index); | 1110 | FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index); |
1111 | efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, | 1111 | efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, |
1112 | channel->channel); | 1112 | channel->channel); |
1113 | 1113 | ||
1114 | efx->type->push_irq_moderation(channel); | 1114 | efx->type->push_irq_moderation(channel); |
1115 | } | 1115 | } |
1116 | 1116 | ||
1117 | void efx_nic_fini_eventq(struct efx_channel *channel) | 1117 | void efx_nic_fini_eventq(struct efx_channel *channel) |
1118 | { | 1118 | { |
1119 | efx_oword_t reg; | 1119 | efx_oword_t reg; |
1120 | struct efx_nic *efx = channel->efx; | 1120 | struct efx_nic *efx = channel->efx; |
1121 | 1121 | ||
1122 | /* Remove event queue from card */ | 1122 | /* Remove event queue from card */ |
1123 | EFX_ZERO_OWORD(reg); | 1123 | EFX_ZERO_OWORD(reg); |
1124 | efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, | 1124 | efx_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base, |
1125 | channel->channel); | 1125 | channel->channel); |
1126 | if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) | 1126 | if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) |
1127 | efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); | 1127 | efx_writeo_table(efx, ®, FR_BZ_TIMER_TBL, channel->channel); |
1128 | 1128 | ||
1129 | /* Unpin event queue */ | 1129 | /* Unpin event queue */ |
1130 | efx_fini_special_buffer(efx, &channel->eventq); | 1130 | efx_fini_special_buffer(efx, &channel->eventq); |
1131 | } | 1131 | } |
1132 | 1132 | ||
1133 | /* Free buffers backing event queue */ | 1133 | /* Free buffers backing event queue */ |
1134 | void efx_nic_remove_eventq(struct efx_channel *channel) | 1134 | void efx_nic_remove_eventq(struct efx_channel *channel) |
1135 | { | 1135 | { |
1136 | efx_free_special_buffer(channel->efx, &channel->eventq); | 1136 | efx_free_special_buffer(channel->efx, &channel->eventq); |
1137 | } | 1137 | } |
1138 | 1138 | ||
1139 | 1139 | ||
1140 | void efx_nic_generate_test_event(struct efx_channel *channel) | 1140 | void efx_nic_generate_test_event(struct efx_channel *channel) |
1141 | { | 1141 | { |
1142 | unsigned int magic = EFX_CHANNEL_MAGIC_TEST(channel); | 1142 | unsigned int magic = EFX_CHANNEL_MAGIC_TEST(channel); |
1143 | efx_qword_t test_event; | 1143 | efx_qword_t test_event; |
1144 | 1144 | ||
1145 | EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE, | 1145 | EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE, |
1146 | FSE_AZ_EV_CODE_DRV_GEN_EV, | 1146 | FSE_AZ_EV_CODE_DRV_GEN_EV, |
1147 | FSF_AZ_DRV_GEN_EV_MAGIC, magic); | 1147 | FSF_AZ_DRV_GEN_EV_MAGIC, magic); |
1148 | efx_generate_event(channel, &test_event); | 1148 | efx_generate_event(channel, &test_event); |
1149 | } | 1149 | } |
1150 | 1150 | ||
1151 | void efx_nic_generate_fill_event(struct efx_channel *channel) | 1151 | void efx_nic_generate_fill_event(struct efx_channel *channel) |
1152 | { | 1152 | { |
1153 | unsigned int magic = EFX_CHANNEL_MAGIC_FILL(channel); | 1153 | unsigned int magic = EFX_CHANNEL_MAGIC_FILL(channel); |
1154 | efx_qword_t test_event; | 1154 | efx_qword_t test_event; |
1155 | 1155 | ||
1156 | EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE, | 1156 | EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE, |
1157 | FSE_AZ_EV_CODE_DRV_GEN_EV, | 1157 | FSE_AZ_EV_CODE_DRV_GEN_EV, |
1158 | FSF_AZ_DRV_GEN_EV_MAGIC, magic); | 1158 | FSF_AZ_DRV_GEN_EV_MAGIC, magic); |
1159 | efx_generate_event(channel, &test_event); | 1159 | efx_generate_event(channel, &test_event); |
1160 | } | 1160 | } |
1161 | 1161 | ||
1162 | /************************************************************************** | 1162 | /************************************************************************** |
1163 | * | 1163 | * |
1164 | * Flush handling | 1164 | * Flush handling |
1165 | * | 1165 | * |
1166 | **************************************************************************/ | 1166 | **************************************************************************/ |
1167 | 1167 | ||
1168 | 1168 | ||
1169 | static void efx_poll_flush_events(struct efx_nic *efx) | 1169 | static void efx_poll_flush_events(struct efx_nic *efx) |
1170 | { | 1170 | { |
1171 | struct efx_channel *channel = efx_get_channel(efx, 0); | 1171 | struct efx_channel *channel = efx_get_channel(efx, 0); |
1172 | struct efx_tx_queue *tx_queue; | 1172 | struct efx_tx_queue *tx_queue; |
1173 | struct efx_rx_queue *rx_queue; | 1173 | struct efx_rx_queue *rx_queue; |
1174 | unsigned int read_ptr = channel->eventq_read_ptr; | 1174 | unsigned int read_ptr = channel->eventq_read_ptr; |
1175 | unsigned int end_ptr = read_ptr + channel->eventq_mask - 1; | 1175 | unsigned int end_ptr = read_ptr + channel->eventq_mask - 1; |
1176 | 1176 | ||
1177 | do { | 1177 | do { |
1178 | efx_qword_t *event = efx_event(channel, read_ptr); | 1178 | efx_qword_t *event = efx_event(channel, read_ptr); |
1179 | int ev_code, ev_sub_code, ev_queue; | 1179 | int ev_code, ev_sub_code, ev_queue; |
1180 | bool ev_failed; | 1180 | bool ev_failed; |
1181 | 1181 | ||
1182 | if (!efx_event_present(event)) | 1182 | if (!efx_event_present(event)) |
1183 | break; | 1183 | break; |
1184 | 1184 | ||
1185 | ev_code = EFX_QWORD_FIELD(*event, FSF_AZ_EV_CODE); | 1185 | ev_code = EFX_QWORD_FIELD(*event, FSF_AZ_EV_CODE); |
1186 | ev_sub_code = EFX_QWORD_FIELD(*event, | 1186 | ev_sub_code = EFX_QWORD_FIELD(*event, |
1187 | FSF_AZ_DRIVER_EV_SUBCODE); | 1187 | FSF_AZ_DRIVER_EV_SUBCODE); |
1188 | if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV && | 1188 | if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV && |
1189 | ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) { | 1189 | ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) { |
1190 | ev_queue = EFX_QWORD_FIELD(*event, | 1190 | ev_queue = EFX_QWORD_FIELD(*event, |
1191 | FSF_AZ_DRIVER_EV_SUBDATA); | 1191 | FSF_AZ_DRIVER_EV_SUBDATA); |
1192 | if (ev_queue < EFX_TXQ_TYPES * efx->n_tx_channels) { | 1192 | if (ev_queue < EFX_TXQ_TYPES * efx->n_tx_channels) { |
1193 | tx_queue = efx_get_tx_queue( | 1193 | tx_queue = efx_get_tx_queue( |
1194 | efx, ev_queue / EFX_TXQ_TYPES, | 1194 | efx, ev_queue / EFX_TXQ_TYPES, |
1195 | ev_queue % EFX_TXQ_TYPES); | 1195 | ev_queue % EFX_TXQ_TYPES); |
1196 | tx_queue->flushed = FLUSH_DONE; | 1196 | tx_queue->flushed = FLUSH_DONE; |
1197 | } | 1197 | } |
1198 | } else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV && | 1198 | } else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV && |
1199 | ev_sub_code == FSE_AZ_RX_DESCQ_FLS_DONE_EV) { | 1199 | ev_sub_code == FSE_AZ_RX_DESCQ_FLS_DONE_EV) { |
1200 | ev_queue = EFX_QWORD_FIELD( | 1200 | ev_queue = EFX_QWORD_FIELD( |
1201 | *event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); | 1201 | *event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); |
1202 | ev_failed = EFX_QWORD_FIELD( | 1202 | ev_failed = EFX_QWORD_FIELD( |
1203 | *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); | 1203 | *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); |
1204 | if (ev_queue < efx->n_rx_channels) { | 1204 | if (ev_queue < efx->n_rx_channels) { |
1205 | rx_queue = efx_get_rx_queue(efx, ev_queue); | 1205 | rx_queue = efx_get_rx_queue(efx, ev_queue); |
1206 | rx_queue->flushed = | 1206 | rx_queue->flushed = |
1207 | ev_failed ? FLUSH_FAILED : FLUSH_DONE; | 1207 | ev_failed ? FLUSH_FAILED : FLUSH_DONE; |
1208 | } | 1208 | } |
1209 | } | 1209 | } |
1210 | 1210 | ||
1211 | /* We're about to destroy the queue anyway, so | 1211 | /* We're about to destroy the queue anyway, so |
1212 | * it's ok to throw away every non-flush event */ | 1212 | * it's ok to throw away every non-flush event */ |
1213 | EFX_SET_QWORD(*event); | 1213 | EFX_SET_QWORD(*event); |
1214 | 1214 | ||
1215 | ++read_ptr; | 1215 | ++read_ptr; |
1216 | } while (read_ptr != end_ptr); | 1216 | } while (read_ptr != end_ptr); |
1217 | 1217 | ||
1218 | channel->eventq_read_ptr = read_ptr; | 1218 | channel->eventq_read_ptr = read_ptr; |
1219 | } | 1219 | } |
1220 | 1220 | ||
1221 | /* Handle tx and rx flushes at the same time, since they run in | 1221 | /* Handle tx and rx flushes at the same time, since they run in |
1222 | * parallel in the hardware and there's no reason for us to | 1222 | * parallel in the hardware and there's no reason for us to |
1223 | * serialise them */ | 1223 | * serialise them */ |
1224 | int efx_nic_flush_queues(struct efx_nic *efx) | 1224 | int efx_nic_flush_queues(struct efx_nic *efx) |
1225 | { | 1225 | { |
1226 | struct efx_channel *channel; | 1226 | struct efx_channel *channel; |
1227 | struct efx_rx_queue *rx_queue; | 1227 | struct efx_rx_queue *rx_queue; |
1228 | struct efx_tx_queue *tx_queue; | 1228 | struct efx_tx_queue *tx_queue; |
1229 | int i, tx_pending, rx_pending; | 1229 | int i, tx_pending, rx_pending; |
1230 | 1230 | ||
1231 | /* If necessary prepare the hardware for flushing */ | 1231 | /* If necessary prepare the hardware for flushing */ |
1232 | efx->type->prepare_flush(efx); | 1232 | efx->type->prepare_flush(efx); |
1233 | 1233 | ||
1234 | /* Flush all tx queues in parallel */ | 1234 | /* Flush all tx queues in parallel */ |
1235 | efx_for_each_channel(channel, efx) { | 1235 | efx_for_each_channel(channel, efx) { |
1236 | efx_for_each_possible_channel_tx_queue(tx_queue, channel) { | 1236 | efx_for_each_possible_channel_tx_queue(tx_queue, channel) { |
1237 | if (tx_queue->initialised) | 1237 | if (tx_queue->initialised) |
1238 | efx_flush_tx_queue(tx_queue); | 1238 | efx_flush_tx_queue(tx_queue); |
1239 | } | 1239 | } |
1240 | } | 1240 | } |
1241 | 1241 | ||
1242 | /* The hardware supports four concurrent rx flushes, each of which may | 1242 | /* The hardware supports four concurrent rx flushes, each of which may |
1243 | * need to be retried if there is an outstanding descriptor fetch */ | 1243 | * need to be retried if there is an outstanding descriptor fetch */ |
1244 | for (i = 0; i < EFX_FLUSH_POLL_COUNT; ++i) { | 1244 | for (i = 0; i < EFX_FLUSH_POLL_COUNT; ++i) { |
1245 | rx_pending = tx_pending = 0; | 1245 | rx_pending = tx_pending = 0; |
1246 | efx_for_each_channel(channel, efx) { | 1246 | efx_for_each_channel(channel, efx) { |
1247 | efx_for_each_channel_rx_queue(rx_queue, channel) { | 1247 | efx_for_each_channel_rx_queue(rx_queue, channel) { |
1248 | if (rx_queue->flushed == FLUSH_PENDING) | 1248 | if (rx_queue->flushed == FLUSH_PENDING) |
1249 | ++rx_pending; | 1249 | ++rx_pending; |
1250 | } | 1250 | } |
1251 | } | 1251 | } |
1252 | efx_for_each_channel(channel, efx) { | 1252 | efx_for_each_channel(channel, efx) { |
1253 | efx_for_each_channel_rx_queue(rx_queue, channel) { | 1253 | efx_for_each_channel_rx_queue(rx_queue, channel) { |
1254 | if (rx_pending == EFX_RX_FLUSH_COUNT) | 1254 | if (rx_pending == EFX_RX_FLUSH_COUNT) |
1255 | break; | 1255 | break; |
1256 | if (rx_queue->flushed == FLUSH_FAILED || | 1256 | if (rx_queue->flushed == FLUSH_FAILED || |
1257 | rx_queue->flushed == FLUSH_NONE) { | 1257 | rx_queue->flushed == FLUSH_NONE) { |
1258 | efx_flush_rx_queue(rx_queue); | 1258 | efx_flush_rx_queue(rx_queue); |
1259 | ++rx_pending; | 1259 | ++rx_pending; |
1260 | } | 1260 | } |
1261 | } | 1261 | } |
1262 | efx_for_each_possible_channel_tx_queue(tx_queue, channel) { | 1262 | efx_for_each_possible_channel_tx_queue(tx_queue, channel) { |
1263 | if (tx_queue->initialised && | 1263 | if (tx_queue->initialised && |
1264 | tx_queue->flushed != FLUSH_DONE) | 1264 | tx_queue->flushed != FLUSH_DONE) |
1265 | ++tx_pending; | 1265 | ++tx_pending; |
1266 | } | 1266 | } |
1267 | } | 1267 | } |
1268 | 1268 | ||
1269 | if (rx_pending == 0 && tx_pending == 0) | 1269 | if (rx_pending == 0 && tx_pending == 0) |
1270 | return 0; | 1270 | return 0; |
1271 | 1271 | ||
1272 | msleep(EFX_FLUSH_INTERVAL); | 1272 | msleep(EFX_FLUSH_INTERVAL); |
1273 | efx_poll_flush_events(efx); | 1273 | efx_poll_flush_events(efx); |
1274 | } | 1274 | } |
1275 | 1275 | ||
1276 | /* Mark the queues as all flushed. We're going to return failure | 1276 | /* Mark the queues as all flushed. We're going to return failure |
1277 | * leading to a reset, or fake up success anyway */ | 1277 | * leading to a reset, or fake up success anyway */ |
1278 | efx_for_each_channel(channel, efx) { | 1278 | efx_for_each_channel(channel, efx) { |
1279 | efx_for_each_possible_channel_tx_queue(tx_queue, channel) { | 1279 | efx_for_each_possible_channel_tx_queue(tx_queue, channel) { |
1280 | if (tx_queue->initialised && | 1280 | if (tx_queue->initialised && |
1281 | tx_queue->flushed != FLUSH_DONE) | 1281 | tx_queue->flushed != FLUSH_DONE) |
1282 | netif_err(efx, hw, efx->net_dev, | 1282 | netif_err(efx, hw, efx->net_dev, |
1283 | "tx queue %d flush command timed out\n", | 1283 | "tx queue %d flush command timed out\n", |
1284 | tx_queue->queue); | 1284 | tx_queue->queue); |
1285 | tx_queue->flushed = FLUSH_DONE; | 1285 | tx_queue->flushed = FLUSH_DONE; |
1286 | } | 1286 | } |
1287 | efx_for_each_channel_rx_queue(rx_queue, channel) { | 1287 | efx_for_each_channel_rx_queue(rx_queue, channel) { |
1288 | if (rx_queue->flushed != FLUSH_DONE) | 1288 | if (rx_queue->flushed != FLUSH_DONE) |
1289 | netif_err(efx, hw, efx->net_dev, | 1289 | netif_err(efx, hw, efx->net_dev, |
1290 | "rx queue %d flush command timed out\n", | 1290 | "rx queue %d flush command timed out\n", |
1291 | efx_rx_queue_index(rx_queue)); | 1291 | efx_rx_queue_index(rx_queue)); |
1292 | rx_queue->flushed = FLUSH_DONE; | 1292 | rx_queue->flushed = FLUSH_DONE; |
1293 | } | 1293 | } |
1294 | } | 1294 | } |
1295 | 1295 | ||
1296 | return -ETIMEDOUT; | 1296 | return -ETIMEDOUT; |
1297 | } | 1297 | } |
1298 | 1298 | ||
1299 | /************************************************************************** | 1299 | /************************************************************************** |
1300 | * | 1300 | * |
1301 | * Hardware interrupts | 1301 | * Hardware interrupts |
1302 | * The hardware interrupt handler does very little work; all the event | 1302 | * The hardware interrupt handler does very little work; all the event |
1303 | * queue processing is carried out by per-channel tasklets. | 1303 | * queue processing is carried out by per-channel tasklets. |
1304 | * | 1304 | * |
1305 | **************************************************************************/ | 1305 | **************************************************************************/ |
1306 | 1306 | ||
1307 | /* Enable/disable/generate interrupts */ | 1307 | /* Enable/disable/generate interrupts */ |
1308 | static inline void efx_nic_interrupts(struct efx_nic *efx, | 1308 | static inline void efx_nic_interrupts(struct efx_nic *efx, |
1309 | bool enabled, bool force) | 1309 | bool enabled, bool force) |
1310 | { | 1310 | { |
1311 | efx_oword_t int_en_reg_ker; | 1311 | efx_oword_t int_en_reg_ker; |
1312 | 1312 | ||
1313 | EFX_POPULATE_OWORD_3(int_en_reg_ker, | 1313 | EFX_POPULATE_OWORD_3(int_en_reg_ker, |
1314 | FRF_AZ_KER_INT_LEVE_SEL, efx->fatal_irq_level, | 1314 | FRF_AZ_KER_INT_LEVE_SEL, efx->fatal_irq_level, |
1315 | FRF_AZ_KER_INT_KER, force, | 1315 | FRF_AZ_KER_INT_KER, force, |
1316 | FRF_AZ_DRV_INT_EN_KER, enabled); | 1316 | FRF_AZ_DRV_INT_EN_KER, enabled); |
1317 | efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); | 1317 | efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); |
1318 | } | 1318 | } |
1319 | 1319 | ||
1320 | void efx_nic_enable_interrupts(struct efx_nic *efx) | 1320 | void efx_nic_enable_interrupts(struct efx_nic *efx) |
1321 | { | 1321 | { |
1322 | struct efx_channel *channel; | 1322 | struct efx_channel *channel; |
1323 | 1323 | ||
1324 | EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr)); | 1324 | EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr)); |
1325 | wmb(); /* Ensure interrupt vector is clear before interrupts enabled */ | 1325 | wmb(); /* Ensure interrupt vector is clear before interrupts enabled */ |
1326 | 1326 | ||
1327 | /* Enable interrupts */ | 1327 | /* Enable interrupts */ |
1328 | efx_nic_interrupts(efx, true, false); | 1328 | efx_nic_interrupts(efx, true, false); |
1329 | 1329 | ||
1330 | /* Force processing of all the channels to get the EVQ RPTRs up to | 1330 | /* Force processing of all the channels to get the EVQ RPTRs up to |
1331 | date */ | 1331 | date */ |
1332 | efx_for_each_channel(channel, efx) | 1332 | efx_for_each_channel(channel, efx) |
1333 | efx_schedule_channel(channel); | 1333 | efx_schedule_channel(channel); |
1334 | } | 1334 | } |
1335 | 1335 | ||
1336 | void efx_nic_disable_interrupts(struct efx_nic *efx) | 1336 | void efx_nic_disable_interrupts(struct efx_nic *efx) |
1337 | { | 1337 | { |
1338 | /* Disable interrupts */ | 1338 | /* Disable interrupts */ |
1339 | efx_nic_interrupts(efx, false, false); | 1339 | efx_nic_interrupts(efx, false, false); |
1340 | } | 1340 | } |
1341 | 1341 | ||
1342 | /* Generate a test interrupt | 1342 | /* Generate a test interrupt |
1343 | * Interrupt must already have been enabled, otherwise nasty things | 1343 | * Interrupt must already have been enabled, otherwise nasty things |
1344 | * may happen. | 1344 | * may happen. |
1345 | */ | 1345 | */ |
1346 | void efx_nic_generate_interrupt(struct efx_nic *efx) | 1346 | void efx_nic_generate_interrupt(struct efx_nic *efx) |
1347 | { | 1347 | { |
1348 | efx_nic_interrupts(efx, true, true); | 1348 | efx_nic_interrupts(efx, true, true); |
1349 | } | 1349 | } |
1350 | 1350 | ||
1351 | /* Process a fatal interrupt | 1351 | /* Process a fatal interrupt |
1352 | * Disable bus mastering ASAP and schedule a reset | 1352 | * Disable bus mastering ASAP and schedule a reset |
1353 | */ | 1353 | */ |
1354 | irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx) | 1354 | irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx) |
1355 | { | 1355 | { |
1356 | struct falcon_nic_data *nic_data = efx->nic_data; | 1356 | struct falcon_nic_data *nic_data = efx->nic_data; |
1357 | efx_oword_t *int_ker = efx->irq_status.addr; | 1357 | efx_oword_t *int_ker = efx->irq_status.addr; |
1358 | efx_oword_t fatal_intr; | 1358 | efx_oword_t fatal_intr; |
1359 | int error, mem_perr; | 1359 | int error, mem_perr; |
1360 | 1360 | ||
1361 | efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER); | 1361 | efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER); |
1362 | error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR); | 1362 | error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR); |
1363 | 1363 | ||
1364 | netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status " | 1364 | netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status " |
1365 | EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), | 1365 | EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), |
1366 | EFX_OWORD_VAL(fatal_intr), | 1366 | EFX_OWORD_VAL(fatal_intr), |
1367 | error ? "disabling bus mastering" : "no recognised error"); | 1367 | error ? "disabling bus mastering" : "no recognised error"); |
1368 | 1368 | ||
1369 | /* If this is a memory parity error dump which blocks are offending */ | 1369 | /* If this is a memory parity error dump which blocks are offending */ |
1370 | mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) || | 1370 | mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) || |
1371 | EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER)); | 1371 | EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER)); |
1372 | if (mem_perr) { | 1372 | if (mem_perr) { |
1373 | efx_oword_t reg; | 1373 | efx_oword_t reg; |
1374 | efx_reado(efx, ®, FR_AZ_MEM_STAT); | 1374 | efx_reado(efx, ®, FR_AZ_MEM_STAT); |
1375 | netif_err(efx, hw, efx->net_dev, | 1375 | netif_err(efx, hw, efx->net_dev, |
1376 | "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n", | 1376 | "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n", |
1377 | EFX_OWORD_VAL(reg)); | 1377 | EFX_OWORD_VAL(reg)); |
1378 | } | 1378 | } |
1379 | 1379 | ||
1380 | /* Disable both devices */ | 1380 | /* Disable both devices */ |
1381 | pci_clear_master(efx->pci_dev); | 1381 | pci_clear_master(efx->pci_dev); |
1382 | if (efx_nic_is_dual_func(efx)) | 1382 | if (efx_nic_is_dual_func(efx)) |
1383 | pci_clear_master(nic_data->pci_dev2); | 1383 | pci_clear_master(nic_data->pci_dev2); |
1384 | efx_nic_disable_interrupts(efx); | 1384 | efx_nic_disable_interrupts(efx); |
1385 | 1385 | ||
1386 | /* Count errors and reset or disable the NIC accordingly */ | 1386 | /* Count errors and reset or disable the NIC accordingly */ |
1387 | if (efx->int_error_count == 0 || | 1387 | if (efx->int_error_count == 0 || |
1388 | time_after(jiffies, efx->int_error_expire)) { | 1388 | time_after(jiffies, efx->int_error_expire)) { |
1389 | efx->int_error_count = 0; | 1389 | efx->int_error_count = 0; |
1390 | efx->int_error_expire = | 1390 | efx->int_error_expire = |
1391 | jiffies + EFX_INT_ERROR_EXPIRE * HZ; | 1391 | jiffies + EFX_INT_ERROR_EXPIRE * HZ; |
1392 | } | 1392 | } |
1393 | if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { | 1393 | if (++efx->int_error_count < EFX_MAX_INT_ERRORS) { |
1394 | netif_err(efx, hw, efx->net_dev, | 1394 | netif_err(efx, hw, efx->net_dev, |
1395 | "SYSTEM ERROR - reset scheduled\n"); | 1395 | "SYSTEM ERROR - reset scheduled\n"); |
1396 | efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); | 1396 | efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); |
1397 | } else { | 1397 | } else { |
1398 | netif_err(efx, hw, efx->net_dev, | 1398 | netif_err(efx, hw, efx->net_dev, |
1399 | "SYSTEM ERROR - max number of errors seen." | 1399 | "SYSTEM ERROR - max number of errors seen." |
1400 | "NIC will be disabled\n"); | 1400 | "NIC will be disabled\n"); |
1401 | efx_schedule_reset(efx, RESET_TYPE_DISABLE); | 1401 | efx_schedule_reset(efx, RESET_TYPE_DISABLE); |
1402 | } | 1402 | } |
1403 | 1403 | ||
1404 | return IRQ_HANDLED; | 1404 | return IRQ_HANDLED; |
1405 | } | 1405 | } |
1406 | 1406 | ||
1407 | /* Handle a legacy interrupt | 1407 | /* Handle a legacy interrupt |
1408 | * Acknowledges the interrupt and schedule event queue processing. | 1408 | * Acknowledges the interrupt and schedule event queue processing. |
1409 | */ | 1409 | */ |
1410 | static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id) | 1410 | static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id) |
1411 | { | 1411 | { |
1412 | struct efx_nic *efx = dev_id; | 1412 | struct efx_nic *efx = dev_id; |
1413 | efx_oword_t *int_ker = efx->irq_status.addr; | 1413 | efx_oword_t *int_ker = efx->irq_status.addr; |
1414 | irqreturn_t result = IRQ_NONE; | 1414 | irqreturn_t result = IRQ_NONE; |
1415 | struct efx_channel *channel; | 1415 | struct efx_channel *channel; |
1416 | efx_dword_t reg; | 1416 | efx_dword_t reg; |
1417 | u32 queues; | 1417 | u32 queues; |
1418 | int syserr; | 1418 | int syserr; |
1419 | 1419 | ||
1420 | /* Could this be ours? If interrupts are disabled then the | 1420 | /* Could this be ours? If interrupts are disabled then the |
1421 | * channel state may not be valid. | 1421 | * channel state may not be valid. |
1422 | */ | 1422 | */ |
1423 | if (!efx->legacy_irq_enabled) | 1423 | if (!efx->legacy_irq_enabled) |
1424 | return result; | 1424 | return result; |
1425 | 1425 | ||
1426 | /* Read the ISR which also ACKs the interrupts */ | 1426 | /* Read the ISR which also ACKs the interrupts */ |
1427 | efx_readd(efx, ®, FR_BZ_INT_ISR0); | 1427 | efx_readd(efx, ®, FR_BZ_INT_ISR0); |
1428 | queues = EFX_EXTRACT_DWORD(reg, 0, 31); | 1428 | queues = EFX_EXTRACT_DWORD(reg, 0, 31); |
1429 | 1429 | ||
1430 | /* Check to see if we have a serious error condition */ | 1430 | /* Check to see if we have a serious error condition */ |
1431 | if (queues & (1U << efx->fatal_irq_level)) { | 1431 | if (queues & (1U << efx->fatal_irq_level)) { |
1432 | syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); | 1432 | syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); |
1433 | if (unlikely(syserr)) | 1433 | if (unlikely(syserr)) |
1434 | return efx_nic_fatal_interrupt(efx); | 1434 | return efx_nic_fatal_interrupt(efx); |
1435 | } | 1435 | } |
1436 | 1436 | ||
1437 | if (queues != 0) { | 1437 | if (queues != 0) { |
1438 | if (EFX_WORKAROUND_15783(efx)) | 1438 | if (EFX_WORKAROUND_15783(efx)) |
1439 | efx->irq_zero_count = 0; | 1439 | efx->irq_zero_count = 0; |
1440 | 1440 | ||
1441 | /* Schedule processing of any interrupting queues */ | 1441 | /* Schedule processing of any interrupting queues */ |
1442 | efx_for_each_channel(channel, efx) { | 1442 | efx_for_each_channel(channel, efx) { |
1443 | if (queues & 1) | 1443 | if (queues & 1) |
1444 | efx_schedule_channel(channel); | 1444 | efx_schedule_channel(channel); |
1445 | queues >>= 1; | 1445 | queues >>= 1; |
1446 | } | 1446 | } |
1447 | result = IRQ_HANDLED; | 1447 | result = IRQ_HANDLED; |
1448 | 1448 | ||
1449 | } else if (EFX_WORKAROUND_15783(efx)) { | 1449 | } else if (EFX_WORKAROUND_15783(efx)) { |
1450 | efx_qword_t *event; | 1450 | efx_qword_t *event; |
1451 | 1451 | ||
1452 | /* We can't return IRQ_HANDLED more than once on seeing ISR=0 | 1452 | /* We can't return IRQ_HANDLED more than once on seeing ISR=0 |
1453 | * because this might be a shared interrupt. */ | 1453 | * because this might be a shared interrupt. */ |
1454 | if (efx->irq_zero_count++ == 0) | 1454 | if (efx->irq_zero_count++ == 0) |
1455 | result = IRQ_HANDLED; | 1455 | result = IRQ_HANDLED; |
1456 | 1456 | ||
1457 | /* Ensure we schedule or rearm all event queues */ | 1457 | /* Ensure we schedule or rearm all event queues */ |
1458 | efx_for_each_channel(channel, efx) { | 1458 | efx_for_each_channel(channel, efx) { |
1459 | event = efx_event(channel, channel->eventq_read_ptr); | 1459 | event = efx_event(channel, channel->eventq_read_ptr); |
1460 | if (efx_event_present(event)) | 1460 | if (efx_event_present(event)) |
1461 | efx_schedule_channel(channel); | 1461 | efx_schedule_channel(channel); |
1462 | else | 1462 | else |
1463 | efx_nic_eventq_read_ack(channel); | 1463 | efx_nic_eventq_read_ack(channel); |
1464 | } | 1464 | } |
1465 | } | 1465 | } |
1466 | 1466 | ||
1467 | if (result == IRQ_HANDLED) { | 1467 | if (result == IRQ_HANDLED) { |
1468 | efx->last_irq_cpu = raw_smp_processor_id(); | 1468 | efx->last_irq_cpu = raw_smp_processor_id(); |
1469 | netif_vdbg(efx, intr, efx->net_dev, | 1469 | netif_vdbg(efx, intr, efx->net_dev, |
1470 | "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", | 1470 | "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", |
1471 | irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); | 1471 | irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); |
1472 | } | 1472 | } |
1473 | 1473 | ||
1474 | return result; | 1474 | return result; |
1475 | } | 1475 | } |
1476 | 1476 | ||
1477 | /* Handle an MSI interrupt | 1477 | /* Handle an MSI interrupt |
1478 | * | 1478 | * |
1479 | * Handle an MSI hardware interrupt. This routine schedules event | 1479 | * Handle an MSI hardware interrupt. This routine schedules event |
1480 | * queue processing. No interrupt acknowledgement cycle is necessary. | 1480 | * queue processing. No interrupt acknowledgement cycle is necessary. |
1481 | * Also, we never need to check that the interrupt is for us, since | 1481 | * Also, we never need to check that the interrupt is for us, since |
1482 | * MSI interrupts cannot be shared. | 1482 | * MSI interrupts cannot be shared. |
1483 | */ | 1483 | */ |
1484 | static irqreturn_t efx_msi_interrupt(int irq, void *dev_id) | 1484 | static irqreturn_t efx_msi_interrupt(int irq, void *dev_id) |
1485 | { | 1485 | { |
1486 | struct efx_channel *channel = *(struct efx_channel **)dev_id; | 1486 | struct efx_channel *channel = *(struct efx_channel **)dev_id; |
1487 | struct efx_nic *efx = channel->efx; | 1487 | struct efx_nic *efx = channel->efx; |
1488 | efx_oword_t *int_ker = efx->irq_status.addr; | 1488 | efx_oword_t *int_ker = efx->irq_status.addr; |
1489 | int syserr; | 1489 | int syserr; |
1490 | 1490 | ||
1491 | efx->last_irq_cpu = raw_smp_processor_id(); | 1491 | efx->last_irq_cpu = raw_smp_processor_id(); |
1492 | netif_vdbg(efx, intr, efx->net_dev, | 1492 | netif_vdbg(efx, intr, efx->net_dev, |
1493 | "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", | 1493 | "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", |
1494 | irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); | 1494 | irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); |
1495 | 1495 | ||
1496 | /* Check to see if we have a serious error condition */ | 1496 | /* Check to see if we have a serious error condition */ |
1497 | if (channel->channel == efx->fatal_irq_level) { | 1497 | if (channel->channel == efx->fatal_irq_level) { |
1498 | syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); | 1498 | syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); |
1499 | if (unlikely(syserr)) | 1499 | if (unlikely(syserr)) |
1500 | return efx_nic_fatal_interrupt(efx); | 1500 | return efx_nic_fatal_interrupt(efx); |
1501 | } | 1501 | } |
1502 | 1502 | ||
1503 | /* Schedule processing of the channel */ | 1503 | /* Schedule processing of the channel */ |
1504 | efx_schedule_channel(channel); | 1504 | efx_schedule_channel(channel); |
1505 | 1505 | ||
1506 | return IRQ_HANDLED; | 1506 | return IRQ_HANDLED; |
1507 | } | 1507 | } |
1508 | 1508 | ||
1509 | 1509 | ||
1510 | /* Setup RSS indirection table. | 1510 | /* Setup RSS indirection table. |
1511 | * This maps from the hash value of the packet to RXQ | 1511 | * This maps from the hash value of the packet to RXQ |
1512 | */ | 1512 | */ |
1513 | void efx_nic_push_rx_indir_table(struct efx_nic *efx) | 1513 | void efx_nic_push_rx_indir_table(struct efx_nic *efx) |
1514 | { | 1514 | { |
1515 | size_t i = 0; | 1515 | size_t i = 0; |
1516 | efx_dword_t dword; | 1516 | efx_dword_t dword; |
1517 | 1517 | ||
1518 | if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) | 1518 | if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) |
1519 | return; | 1519 | return; |
1520 | 1520 | ||
1521 | BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != | 1521 | BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) != |
1522 | FR_BZ_RX_INDIRECTION_TBL_ROWS); | 1522 | FR_BZ_RX_INDIRECTION_TBL_ROWS); |
1523 | 1523 | ||
1524 | for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { | 1524 | for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) { |
1525 | EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, | 1525 | EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, |
1526 | efx->rx_indir_table[i]); | 1526 | efx->rx_indir_table[i]); |
1527 | efx_writed_table(efx, &dword, FR_BZ_RX_INDIRECTION_TBL, i); | 1527 | efx_writed_table(efx, &dword, FR_BZ_RX_INDIRECTION_TBL, i); |
1528 | } | 1528 | } |
1529 | } | 1529 | } |
1530 | 1530 | ||
1531 | /* Hook interrupt handler(s) | 1531 | /* Hook interrupt handler(s) |
1532 | * Try MSI and then legacy interrupts. | 1532 | * Try MSI and then legacy interrupts. |
1533 | */ | 1533 | */ |
1534 | int efx_nic_init_interrupt(struct efx_nic *efx) | 1534 | int efx_nic_init_interrupt(struct efx_nic *efx) |
1535 | { | 1535 | { |
1536 | struct efx_channel *channel; | 1536 | struct efx_channel *channel; |
1537 | int rc; | 1537 | int rc; |
1538 | 1538 | ||
1539 | if (!EFX_INT_MODE_USE_MSI(efx)) { | 1539 | if (!EFX_INT_MODE_USE_MSI(efx)) { |
1540 | irq_handler_t handler; | 1540 | irq_handler_t handler; |
1541 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) | 1541 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) |
1542 | handler = efx_legacy_interrupt; | 1542 | handler = efx_legacy_interrupt; |
1543 | else | 1543 | else |
1544 | handler = falcon_legacy_interrupt_a1; | 1544 | handler = falcon_legacy_interrupt_a1; |
1545 | 1545 | ||
1546 | rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED, | 1546 | rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED, |
1547 | efx->name, efx); | 1547 | efx->name, efx); |
1548 | if (rc) { | 1548 | if (rc) { |
1549 | netif_err(efx, drv, efx->net_dev, | 1549 | netif_err(efx, drv, efx->net_dev, |
1550 | "failed to hook legacy IRQ %d\n", | 1550 | "failed to hook legacy IRQ %d\n", |
1551 | efx->pci_dev->irq); | 1551 | efx->pci_dev->irq); |
1552 | goto fail1; | 1552 | goto fail1; |
1553 | } | 1553 | } |
1554 | return 0; | 1554 | return 0; |
1555 | } | 1555 | } |
1556 | 1556 | ||
1557 | /* Hook MSI or MSI-X interrupt */ | 1557 | /* Hook MSI or MSI-X interrupt */ |
1558 | efx_for_each_channel(channel, efx) { | 1558 | efx_for_each_channel(channel, efx) { |
1559 | rc = request_irq(channel->irq, efx_msi_interrupt, | 1559 | rc = request_irq(channel->irq, efx_msi_interrupt, |
1560 | IRQF_PROBE_SHARED, /* Not shared */ | 1560 | IRQF_PROBE_SHARED, /* Not shared */ |
1561 | efx->channel_name[channel->channel], | 1561 | efx->channel_name[channel->channel], |
1562 | &efx->channel[channel->channel]); | 1562 | &efx->channel[channel->channel]); |
1563 | if (rc) { | 1563 | if (rc) { |
1564 | netif_err(efx, drv, efx->net_dev, | 1564 | netif_err(efx, drv, efx->net_dev, |
1565 | "failed to hook IRQ %d\n", channel->irq); | 1565 | "failed to hook IRQ %d\n", channel->irq); |
1566 | goto fail2; | 1566 | goto fail2; |
1567 | } | 1567 | } |
1568 | } | 1568 | } |
1569 | 1569 | ||
1570 | return 0; | 1570 | return 0; |
1571 | 1571 | ||
1572 | fail2: | 1572 | fail2: |
1573 | efx_for_each_channel(channel, efx) | 1573 | efx_for_each_channel(channel, efx) |
1574 | free_irq(channel->irq, &efx->channel[channel->channel]); | 1574 | free_irq(channel->irq, &efx->channel[channel->channel]); |
1575 | fail1: | 1575 | fail1: |
1576 | return rc; | 1576 | return rc; |
1577 | } | 1577 | } |
1578 | 1578 | ||
1579 | void efx_nic_fini_interrupt(struct efx_nic *efx) | 1579 | void efx_nic_fini_interrupt(struct efx_nic *efx) |
1580 | { | 1580 | { |
1581 | struct efx_channel *channel; | 1581 | struct efx_channel *channel; |
1582 | efx_oword_t reg; | 1582 | efx_oword_t reg; |
1583 | 1583 | ||
1584 | /* Disable MSI/MSI-X interrupts */ | 1584 | /* Disable MSI/MSI-X interrupts */ |
1585 | efx_for_each_channel(channel, efx) { | 1585 | efx_for_each_channel(channel, efx) { |
1586 | if (channel->irq) | 1586 | if (channel->irq) |
1587 | free_irq(channel->irq, &efx->channel[channel->channel]); | 1587 | free_irq(channel->irq, &efx->channel[channel->channel]); |
1588 | } | 1588 | } |
1589 | 1589 | ||
1590 | /* ACK legacy interrupt */ | 1590 | /* ACK legacy interrupt */ |
1591 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) | 1591 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) |
1592 | efx_reado(efx, ®, FR_BZ_INT_ISR0); | 1592 | efx_reado(efx, ®, FR_BZ_INT_ISR0); |
1593 | else | 1593 | else |
1594 | falcon_irq_ack_a1(efx); | 1594 | falcon_irq_ack_a1(efx); |
1595 | 1595 | ||
1596 | /* Disable legacy interrupt */ | 1596 | /* Disable legacy interrupt */ |
1597 | if (efx->legacy_irq) | 1597 | if (efx->legacy_irq) |
1598 | free_irq(efx->legacy_irq, efx); | 1598 | free_irq(efx->legacy_irq, efx); |
1599 | } | 1599 | } |
1600 | 1600 | ||
1601 | u32 efx_nic_fpga_ver(struct efx_nic *efx) | 1601 | u32 efx_nic_fpga_ver(struct efx_nic *efx) |
1602 | { | 1602 | { |
1603 | efx_oword_t altera_build; | 1603 | efx_oword_t altera_build; |
1604 | efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD); | 1604 | efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD); |
1605 | return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER); | 1605 | return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER); |
1606 | } | 1606 | } |
1607 | 1607 | ||
1608 | void efx_nic_init_common(struct efx_nic *efx) | 1608 | void efx_nic_init_common(struct efx_nic *efx) |
1609 | { | 1609 | { |
1610 | efx_oword_t temp; | 1610 | efx_oword_t temp; |
1611 | 1611 | ||
1612 | /* Set positions of descriptor caches in SRAM. */ | 1612 | /* Set positions of descriptor caches in SRAM. */ |
1613 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, | 1613 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, |
1614 | efx->type->tx_dc_base / 8); | 1614 | efx->type->tx_dc_base / 8); |
1615 | efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG); | 1615 | efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG); |
1616 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, | 1616 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, |
1617 | efx->type->rx_dc_base / 8); | 1617 | efx->type->rx_dc_base / 8); |
1618 | efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG); | 1618 | efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG); |
1619 | 1619 | ||
1620 | /* Set TX descriptor cache size. */ | 1620 | /* Set TX descriptor cache size. */ |
1621 | BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER)); | 1621 | BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER)); |
1622 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER); | 1622 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER); |
1623 | efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG); | 1623 | efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG); |
1624 | 1624 | ||
1625 | /* Set RX descriptor cache size. Set low watermark to size-8, as | 1625 | /* Set RX descriptor cache size. Set low watermark to size-8, as |
1626 | * this allows most efficient prefetching. | 1626 | * this allows most efficient prefetching. |
1627 | */ | 1627 | */ |
1628 | BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER)); | 1628 | BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER)); |
1629 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER); | 1629 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER); |
1630 | efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG); | 1630 | efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG); |
1631 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8); | 1631 | EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8); |
1632 | efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM); | 1632 | efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM); |
1633 | 1633 | ||
1634 | /* Program INT_KER address */ | 1634 | /* Program INT_KER address */ |
1635 | EFX_POPULATE_OWORD_2(temp, | 1635 | EFX_POPULATE_OWORD_2(temp, |
1636 | FRF_AZ_NORM_INT_VEC_DIS_KER, | 1636 | FRF_AZ_NORM_INT_VEC_DIS_KER, |
1637 | EFX_INT_MODE_USE_MSI(efx), | 1637 | EFX_INT_MODE_USE_MSI(efx), |
1638 | FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); | 1638 | FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); |
1639 | efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER); | 1639 | efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER); |
1640 | 1640 | ||
1641 | if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx)) | 1641 | if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx)) |
1642 | /* Use an interrupt level unused by event queues */ | 1642 | /* Use an interrupt level unused by event queues */ |
1643 | efx->fatal_irq_level = 0x1f; | 1643 | efx->fatal_irq_level = 0x1f; |
1644 | else | 1644 | else |
1645 | /* Use a valid MSI-X vector */ | 1645 | /* Use a valid MSI-X vector */ |
1646 | efx->fatal_irq_level = 0; | 1646 | efx->fatal_irq_level = 0; |
1647 | 1647 | ||
1648 | /* Enable all the genuinely fatal interrupts. (They are still | 1648 | /* Enable all the genuinely fatal interrupts. (They are still |
1649 | * masked by the overall interrupt mask, controlled by | 1649 | * masked by the overall interrupt mask, controlled by |
1650 | * falcon_interrupts()). | 1650 | * falcon_interrupts()). |
1651 | * | 1651 | * |
1652 | * Note: All other fatal interrupts are enabled | 1652 | * Note: All other fatal interrupts are enabled |
1653 | */ | 1653 | */ |
1654 | EFX_POPULATE_OWORD_3(temp, | 1654 | EFX_POPULATE_OWORD_3(temp, |
1655 | FRF_AZ_ILL_ADR_INT_KER_EN, 1, | 1655 | FRF_AZ_ILL_ADR_INT_KER_EN, 1, |
1656 | FRF_AZ_RBUF_OWN_INT_KER_EN, 1, | 1656 | FRF_AZ_RBUF_OWN_INT_KER_EN, 1, |
1657 | FRF_AZ_TBUF_OWN_INT_KER_EN, 1); | 1657 | FRF_AZ_TBUF_OWN_INT_KER_EN, 1); |
1658 | if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) | 1658 | if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) |
1659 | EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1); | 1659 | EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1); |
1660 | EFX_INVERT_OWORD(temp); | 1660 | EFX_INVERT_OWORD(temp); |
1661 | efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); | 1661 | efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); |
1662 | 1662 | ||
1663 | efx_nic_push_rx_indir_table(efx); | 1663 | efx_nic_push_rx_indir_table(efx); |
1664 | 1664 | ||
1665 | /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be | 1665 | /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be |
1666 | * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. | 1666 | * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. |
1667 | */ | 1667 | */ |
1668 | efx_reado(efx, &temp, FR_AZ_TX_RESERVED); | 1668 | efx_reado(efx, &temp, FR_AZ_TX_RESERVED); |
1669 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe); | 1669 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe); |
1670 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1); | 1670 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1); |
1671 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1); | 1671 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1); |
1672 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1); | 1672 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1); |
1673 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); | 1673 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); |
1674 | /* Enable SW_EV to inherit in char driver - assume harmless here */ | 1674 | /* Enable SW_EV to inherit in char driver - assume harmless here */ |
1675 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); | 1675 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); |
1676 | /* Prefetch threshold 2 => fetch when descriptor cache half empty */ | 1676 | /* Prefetch threshold 2 => fetch when descriptor cache half empty */ |
1677 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2); | 1677 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2); |
1678 | /* Disable hardware watchdog which can misfire */ | 1678 | /* Disable hardware watchdog which can misfire */ |
1679 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff); | 1679 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff); |
1680 | /* Squash TX of packets of 16 bytes or less */ | 1680 | /* Squash TX of packets of 16 bytes or less */ |
1681 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) | 1681 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) |
1682 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); | 1682 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); |
1683 | efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); | 1683 | efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); |
1684 | 1684 | ||
1685 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { | 1685 | if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { |
1686 | EFX_POPULATE_OWORD_4(temp, | 1686 | EFX_POPULATE_OWORD_4(temp, |
1687 | /* Default values */ | 1687 | /* Default values */ |
1688 | FRF_BZ_TX_PACE_SB_NOT_AF, 0x15, | 1688 | FRF_BZ_TX_PACE_SB_NOT_AF, 0x15, |
1689 | FRF_BZ_TX_PACE_SB_AF, 0xb, | 1689 | FRF_BZ_TX_PACE_SB_AF, 0xb, |
1690 | FRF_BZ_TX_PACE_FB_BASE, 0, | 1690 | FRF_BZ_TX_PACE_FB_BASE, 0, |
1691 | /* Allow large pace values in the | 1691 | /* Allow large pace values in the |
1692 | * fast bin. */ | 1692 | * fast bin. */ |
1693 | FRF_BZ_TX_PACE_BIN_TH, | 1693 | FRF_BZ_TX_PACE_BIN_TH, |
1694 | FFE_BZ_TX_PACE_RESERVED); | 1694 | FFE_BZ_TX_PACE_RESERVED); |
1695 | efx_writeo(efx, &temp, FR_BZ_TX_PACE); | 1695 | efx_writeo(efx, &temp, FR_BZ_TX_PACE); |
1696 | } | 1696 | } |
1697 | } | 1697 | } |
1698 | 1698 | ||
1699 | /* Register dump */ | 1699 | /* Register dump */ |
1700 | 1700 | ||
1701 | #define REGISTER_REVISION_A 1 | 1701 | #define REGISTER_REVISION_A 1 |
1702 | #define REGISTER_REVISION_B 2 | 1702 | #define REGISTER_REVISION_B 2 |
1703 | #define REGISTER_REVISION_C 3 | 1703 | #define REGISTER_REVISION_C 3 |
1704 | #define REGISTER_REVISION_Z 3 /* latest revision */ | 1704 | #define REGISTER_REVISION_Z 3 /* latest revision */ |
1705 | 1705 | ||
1706 | struct efx_nic_reg { | 1706 | struct efx_nic_reg { |
1707 | u32 offset:24; | 1707 | u32 offset:24; |
1708 | u32 min_revision:2, max_revision:2; | 1708 | u32 min_revision:2, max_revision:2; |
1709 | }; | 1709 | }; |
1710 | 1710 | ||
1711 | #define REGISTER(name, min_rev, max_rev) { \ | 1711 | #define REGISTER(name, min_rev, max_rev) { \ |
1712 | FR_ ## min_rev ## max_rev ## _ ## name, \ | 1712 | FR_ ## min_rev ## max_rev ## _ ## name, \ |
1713 | REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \ | 1713 | REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev \ |
1714 | } | 1714 | } |
1715 | #define REGISTER_AA(name) REGISTER(name, A, A) | 1715 | #define REGISTER_AA(name) REGISTER(name, A, A) |
1716 | #define REGISTER_AB(name) REGISTER(name, A, B) | 1716 | #define REGISTER_AB(name) REGISTER(name, A, B) |
1717 | #define REGISTER_AZ(name) REGISTER(name, A, Z) | 1717 | #define REGISTER_AZ(name) REGISTER(name, A, Z) |
1718 | #define REGISTER_BB(name) REGISTER(name, B, B) | 1718 | #define REGISTER_BB(name) REGISTER(name, B, B) |
1719 | #define REGISTER_BZ(name) REGISTER(name, B, Z) | 1719 | #define REGISTER_BZ(name) REGISTER(name, B, Z) |
1720 | #define REGISTER_CZ(name) REGISTER(name, C, Z) | 1720 | #define REGISTER_CZ(name) REGISTER(name, C, Z) |
1721 | 1721 | ||
1722 | static const struct efx_nic_reg efx_nic_regs[] = { | 1722 | static const struct efx_nic_reg efx_nic_regs[] = { |
1723 | REGISTER_AZ(ADR_REGION), | 1723 | REGISTER_AZ(ADR_REGION), |
1724 | REGISTER_AZ(INT_EN_KER), | 1724 | REGISTER_AZ(INT_EN_KER), |
1725 | REGISTER_BZ(INT_EN_CHAR), | 1725 | REGISTER_BZ(INT_EN_CHAR), |
1726 | REGISTER_AZ(INT_ADR_KER), | 1726 | REGISTER_AZ(INT_ADR_KER), |
1727 | REGISTER_BZ(INT_ADR_CHAR), | 1727 | REGISTER_BZ(INT_ADR_CHAR), |
1728 | /* INT_ACK_KER is WO */ | 1728 | /* INT_ACK_KER is WO */ |
1729 | /* INT_ISR0 is RC */ | 1729 | /* INT_ISR0 is RC */ |
1730 | REGISTER_AZ(HW_INIT), | 1730 | REGISTER_AZ(HW_INIT), |
1731 | REGISTER_CZ(USR_EV_CFG), | 1731 | REGISTER_CZ(USR_EV_CFG), |
1732 | REGISTER_AB(EE_SPI_HCMD), | 1732 | REGISTER_AB(EE_SPI_HCMD), |
1733 | REGISTER_AB(EE_SPI_HADR), | 1733 | REGISTER_AB(EE_SPI_HADR), |
1734 | REGISTER_AB(EE_SPI_HDATA), | 1734 | REGISTER_AB(EE_SPI_HDATA), |
1735 | REGISTER_AB(EE_BASE_PAGE), | 1735 | REGISTER_AB(EE_BASE_PAGE), |
1736 | REGISTER_AB(EE_VPD_CFG0), | 1736 | REGISTER_AB(EE_VPD_CFG0), |
1737 | /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */ | 1737 | /* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */ |
1738 | /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */ | 1738 | /* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */ |
1739 | /* PCIE_CORE_INDIRECT is indirect */ | 1739 | /* PCIE_CORE_INDIRECT is indirect */ |
1740 | REGISTER_AB(NIC_STAT), | 1740 | REGISTER_AB(NIC_STAT), |
1741 | REGISTER_AB(GPIO_CTL), | 1741 | REGISTER_AB(GPIO_CTL), |
1742 | REGISTER_AB(GLB_CTL), | 1742 | REGISTER_AB(GLB_CTL), |
1743 | /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */ | 1743 | /* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */ |
1744 | REGISTER_BZ(DP_CTRL), | 1744 | REGISTER_BZ(DP_CTRL), |
1745 | REGISTER_AZ(MEM_STAT), | 1745 | REGISTER_AZ(MEM_STAT), |
1746 | REGISTER_AZ(CS_DEBUG), | 1746 | REGISTER_AZ(CS_DEBUG), |
1747 | REGISTER_AZ(ALTERA_BUILD), | 1747 | REGISTER_AZ(ALTERA_BUILD), |
1748 | REGISTER_AZ(CSR_SPARE), | 1748 | REGISTER_AZ(CSR_SPARE), |
1749 | REGISTER_AB(PCIE_SD_CTL0123), | 1749 | REGISTER_AB(PCIE_SD_CTL0123), |
1750 | REGISTER_AB(PCIE_SD_CTL45), | 1750 | REGISTER_AB(PCIE_SD_CTL45), |
1751 | REGISTER_AB(PCIE_PCS_CTL_STAT), | 1751 | REGISTER_AB(PCIE_PCS_CTL_STAT), |
1752 | /* DEBUG_DATA_OUT is not used */ | 1752 | /* DEBUG_DATA_OUT is not used */ |
1753 | /* DRV_EV is WO */ | 1753 | /* DRV_EV is WO */ |
1754 | REGISTER_AZ(EVQ_CTL), | 1754 | REGISTER_AZ(EVQ_CTL), |
1755 | REGISTER_AZ(EVQ_CNT1), | 1755 | REGISTER_AZ(EVQ_CNT1), |
1756 | REGISTER_AZ(EVQ_CNT2), | 1756 | REGISTER_AZ(EVQ_CNT2), |
1757 | REGISTER_AZ(BUF_TBL_CFG), | 1757 | REGISTER_AZ(BUF_TBL_CFG), |
1758 | REGISTER_AZ(SRM_RX_DC_CFG), | 1758 | REGISTER_AZ(SRM_RX_DC_CFG), |
1759 | REGISTER_AZ(SRM_TX_DC_CFG), | 1759 | REGISTER_AZ(SRM_TX_DC_CFG), |
1760 | REGISTER_AZ(SRM_CFG), | 1760 | REGISTER_AZ(SRM_CFG), |
1761 | /* BUF_TBL_UPD is WO */ | 1761 | /* BUF_TBL_UPD is WO */ |
1762 | REGISTER_AZ(SRM_UPD_EVQ), | 1762 | REGISTER_AZ(SRM_UPD_EVQ), |
1763 | REGISTER_AZ(SRAM_PARITY), | 1763 | REGISTER_AZ(SRAM_PARITY), |
1764 | REGISTER_AZ(RX_CFG), | 1764 | REGISTER_AZ(RX_CFG), |
1765 | REGISTER_BZ(RX_FILTER_CTL), | 1765 | REGISTER_BZ(RX_FILTER_CTL), |
1766 | /* RX_FLUSH_DESCQ is WO */ | 1766 | /* RX_FLUSH_DESCQ is WO */ |
1767 | REGISTER_AZ(RX_DC_CFG), | 1767 | REGISTER_AZ(RX_DC_CFG), |
1768 | REGISTER_AZ(RX_DC_PF_WM), | 1768 | REGISTER_AZ(RX_DC_PF_WM), |
1769 | REGISTER_BZ(RX_RSS_TKEY), | 1769 | REGISTER_BZ(RX_RSS_TKEY), |
1770 | /* RX_NODESC_DROP is RC */ | 1770 | /* RX_NODESC_DROP is RC */ |
1771 | REGISTER_AA(RX_SELF_RST), | 1771 | REGISTER_AA(RX_SELF_RST), |
1772 | /* RX_DEBUG, RX_PUSH_DROP are not used */ | 1772 | /* RX_DEBUG, RX_PUSH_DROP are not used */ |
1773 | REGISTER_CZ(RX_RSS_IPV6_REG1), | 1773 | REGISTER_CZ(RX_RSS_IPV6_REG1), |
1774 | REGISTER_CZ(RX_RSS_IPV6_REG2), | 1774 | REGISTER_CZ(RX_RSS_IPV6_REG2), |
1775 | REGISTER_CZ(RX_RSS_IPV6_REG3), | 1775 | REGISTER_CZ(RX_RSS_IPV6_REG3), |
1776 | /* TX_FLUSH_DESCQ is WO */ | 1776 | /* TX_FLUSH_DESCQ is WO */ |
1777 | REGISTER_AZ(TX_DC_CFG), | 1777 | REGISTER_AZ(TX_DC_CFG), |
1778 | REGISTER_AA(TX_CHKSM_CFG), | 1778 | REGISTER_AA(TX_CHKSM_CFG), |
1779 | REGISTER_AZ(TX_CFG), | 1779 | REGISTER_AZ(TX_CFG), |
1780 | /* TX_PUSH_DROP is not used */ | 1780 | /* TX_PUSH_DROP is not used */ |
1781 | REGISTER_AZ(TX_RESERVED), | 1781 | REGISTER_AZ(TX_RESERVED), |
1782 | REGISTER_BZ(TX_PACE), | 1782 | REGISTER_BZ(TX_PACE), |
1783 | /* TX_PACE_DROP_QID is RC */ | 1783 | /* TX_PACE_DROP_QID is RC */ |
1784 | REGISTER_BB(TX_VLAN), | 1784 | REGISTER_BB(TX_VLAN), |
1785 | REGISTER_BZ(TX_IPFIL_PORTEN), | 1785 | REGISTER_BZ(TX_IPFIL_PORTEN), |
1786 | REGISTER_AB(MD_TXD), | 1786 | REGISTER_AB(MD_TXD), |
1787 | REGISTER_AB(MD_RXD), | 1787 | REGISTER_AB(MD_RXD), |
1788 | REGISTER_AB(MD_CS), | 1788 | REGISTER_AB(MD_CS), |
1789 | REGISTER_AB(MD_PHY_ADR), | 1789 | REGISTER_AB(MD_PHY_ADR), |
1790 | REGISTER_AB(MD_ID), | 1790 | REGISTER_AB(MD_ID), |
1791 | /* MD_STAT is RC */ | 1791 | /* MD_STAT is RC */ |
1792 | REGISTER_AB(MAC_STAT_DMA), | 1792 | REGISTER_AB(MAC_STAT_DMA), |
1793 | REGISTER_AB(MAC_CTRL), | 1793 | REGISTER_AB(MAC_CTRL), |
1794 | REGISTER_BB(GEN_MODE), | 1794 | REGISTER_BB(GEN_MODE), |
1795 | REGISTER_AB(MAC_MC_HASH_REG0), | 1795 | REGISTER_AB(MAC_MC_HASH_REG0), |
1796 | REGISTER_AB(MAC_MC_HASH_REG1), | 1796 | REGISTER_AB(MAC_MC_HASH_REG1), |
1797 | REGISTER_AB(GM_CFG1), | 1797 | REGISTER_AB(GM_CFG1), |
1798 | REGISTER_AB(GM_CFG2), | 1798 | REGISTER_AB(GM_CFG2), |
1799 | /* GM_IPG and GM_HD are not used */ | 1799 | /* GM_IPG and GM_HD are not used */ |
1800 | REGISTER_AB(GM_MAX_FLEN), | 1800 | REGISTER_AB(GM_MAX_FLEN), |
1801 | /* GM_TEST is not used */ | 1801 | /* GM_TEST is not used */ |
1802 | REGISTER_AB(GM_ADR1), | 1802 | REGISTER_AB(GM_ADR1), |
1803 | REGISTER_AB(GM_ADR2), | 1803 | REGISTER_AB(GM_ADR2), |
1804 | REGISTER_AB(GMF_CFG0), | 1804 | REGISTER_AB(GMF_CFG0), |
1805 | REGISTER_AB(GMF_CFG1), | 1805 | REGISTER_AB(GMF_CFG1), |
1806 | REGISTER_AB(GMF_CFG2), | 1806 | REGISTER_AB(GMF_CFG2), |
1807 | REGISTER_AB(GMF_CFG3), | 1807 | REGISTER_AB(GMF_CFG3), |
1808 | REGISTER_AB(GMF_CFG4), | 1808 | REGISTER_AB(GMF_CFG4), |
1809 | REGISTER_AB(GMF_CFG5), | 1809 | REGISTER_AB(GMF_CFG5), |
1810 | REGISTER_BB(TX_SRC_MAC_CTL), | 1810 | REGISTER_BB(TX_SRC_MAC_CTL), |
1811 | REGISTER_AB(XM_ADR_LO), | 1811 | REGISTER_AB(XM_ADR_LO), |
1812 | REGISTER_AB(XM_ADR_HI), | 1812 | REGISTER_AB(XM_ADR_HI), |
1813 | REGISTER_AB(XM_GLB_CFG), | 1813 | REGISTER_AB(XM_GLB_CFG), |
1814 | REGISTER_AB(XM_TX_CFG), | 1814 | REGISTER_AB(XM_TX_CFG), |
1815 | REGISTER_AB(XM_RX_CFG), | 1815 | REGISTER_AB(XM_RX_CFG), |
1816 | REGISTER_AB(XM_MGT_INT_MASK), | 1816 | REGISTER_AB(XM_MGT_INT_MASK), |
1817 | REGISTER_AB(XM_FC), | 1817 | REGISTER_AB(XM_FC), |
1818 | REGISTER_AB(XM_PAUSE_TIME), | 1818 | REGISTER_AB(XM_PAUSE_TIME), |
1819 | REGISTER_AB(XM_TX_PARAM), | 1819 | REGISTER_AB(XM_TX_PARAM), |
1820 | REGISTER_AB(XM_RX_PARAM), | 1820 | REGISTER_AB(XM_RX_PARAM), |
1821 | /* XM_MGT_INT_MSK (note no 'A') is RC */ | 1821 | /* XM_MGT_INT_MSK (note no 'A') is RC */ |
1822 | REGISTER_AB(XX_PWR_RST), | 1822 | REGISTER_AB(XX_PWR_RST), |
1823 | REGISTER_AB(XX_SD_CTL), | 1823 | REGISTER_AB(XX_SD_CTL), |
1824 | REGISTER_AB(XX_TXDRV_CTL), | 1824 | REGISTER_AB(XX_TXDRV_CTL), |
1825 | /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */ | 1825 | /* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */ |
1826 | /* XX_CORE_STAT is partly RC */ | 1826 | /* XX_CORE_STAT is partly RC */ |
1827 | }; | 1827 | }; |
1828 | 1828 | ||
1829 | struct efx_nic_reg_table { | 1829 | struct efx_nic_reg_table { |
1830 | u32 offset:24; | 1830 | u32 offset:24; |
1831 | u32 min_revision:2, max_revision:2; | 1831 | u32 min_revision:2, max_revision:2; |
1832 | u32 step:6, rows:21; | 1832 | u32 step:6, rows:21; |
1833 | }; | 1833 | }; |
1834 | 1834 | ||
1835 | #define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \ | 1835 | #define REGISTER_TABLE_DIMENSIONS(_, offset, min_rev, max_rev, step, rows) { \ |
1836 | offset, \ | 1836 | offset, \ |
1837 | REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \ | 1837 | REGISTER_REVISION_ ## min_rev, REGISTER_REVISION_ ## max_rev, \ |
1838 | step, rows \ | 1838 | step, rows \ |
1839 | } | 1839 | } |
1840 | #define REGISTER_TABLE(name, min_rev, max_rev) \ | 1840 | #define REGISTER_TABLE(name, min_rev, max_rev) \ |
1841 | REGISTER_TABLE_DIMENSIONS( \ | 1841 | REGISTER_TABLE_DIMENSIONS( \ |
1842 | name, FR_ ## min_rev ## max_rev ## _ ## name, \ | 1842 | name, FR_ ## min_rev ## max_rev ## _ ## name, \ |
1843 | min_rev, max_rev, \ | 1843 | min_rev, max_rev, \ |
1844 | FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \ | 1844 | FR_ ## min_rev ## max_rev ## _ ## name ## _STEP, \ |
1845 | FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS) | 1845 | FR_ ## min_rev ## max_rev ## _ ## name ## _ROWS) |
1846 | #define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A) | 1846 | #define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, A, A) |
1847 | #define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z) | 1847 | #define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, A, Z) |
1848 | #define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B) | 1848 | #define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, B, B) |
1849 | #define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z) | 1849 | #define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, B, Z) |
1850 | #define REGISTER_TABLE_BB_CZ(name) \ | 1850 | #define REGISTER_TABLE_BB_CZ(name) \ |
1851 | REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \ | 1851 | REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, B, B, \ |
1852 | FR_BZ_ ## name ## _STEP, \ | 1852 | FR_BZ_ ## name ## _STEP, \ |
1853 | FR_BB_ ## name ## _ROWS), \ | 1853 | FR_BB_ ## name ## _ROWS), \ |
1854 | REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \ | 1854 | REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, C, Z, \ |
1855 | FR_BZ_ ## name ## _STEP, \ | 1855 | FR_BZ_ ## name ## _STEP, \ |
1856 | FR_CZ_ ## name ## _ROWS) | 1856 | FR_CZ_ ## name ## _ROWS) |
1857 | #define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z) | 1857 | #define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, C, Z) |
1858 | 1858 | ||
1859 | static const struct efx_nic_reg_table efx_nic_reg_tables[] = { | 1859 | static const struct efx_nic_reg_table efx_nic_reg_tables[] = { |
1860 | /* DRIVER is not used */ | 1860 | /* DRIVER is not used */ |
1861 | /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */ | 1861 | /* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */ |
1862 | REGISTER_TABLE_BB(TX_IPFIL_TBL), | 1862 | REGISTER_TABLE_BB(TX_IPFIL_TBL), |
1863 | REGISTER_TABLE_BB(TX_SRC_MAC_TBL), | 1863 | REGISTER_TABLE_BB(TX_SRC_MAC_TBL), |
1864 | REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER), | 1864 | REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER), |
1865 | REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL), | 1865 | REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL), |
1866 | REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER), | 1866 | REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER), |
1867 | REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL), | 1867 | REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL), |
1868 | REGISTER_TABLE_AA(EVQ_PTR_TBL_KER), | 1868 | REGISTER_TABLE_AA(EVQ_PTR_TBL_KER), |
1869 | REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL), | 1869 | REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL), |
1870 | /* We can't reasonably read all of the buffer table (up to 8MB!). | 1870 | /* We can't reasonably read all of the buffer table (up to 8MB!). |
1871 | * However this driver will only use a few entries. Reading | 1871 | * However this driver will only use a few entries. Reading |
1872 | * 1K entries allows for some expansion of queue count and | 1872 | * 1K entries allows for some expansion of queue count and |
1873 | * size before we need to change the version. */ | 1873 | * size before we need to change the version. */ |
1874 | REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER, | 1874 | REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER, |
1875 | A, A, 8, 1024), | 1875 | A, A, 8, 1024), |
1876 | REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL, | 1876 | REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL, |
1877 | B, Z, 8, 1024), | 1877 | B, Z, 8, 1024), |
1878 | REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0), | 1878 | REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0), |
1879 | REGISTER_TABLE_BB_CZ(TIMER_TBL), | 1879 | REGISTER_TABLE_BB_CZ(TIMER_TBL), |
1880 | REGISTER_TABLE_BB_CZ(TX_PACE_TBL), | 1880 | REGISTER_TABLE_BB_CZ(TX_PACE_TBL), |
1881 | REGISTER_TABLE_BZ(RX_INDIRECTION_TBL), | 1881 | REGISTER_TABLE_BZ(RX_INDIRECTION_TBL), |
1882 | /* TX_FILTER_TBL0 is huge and not used by this driver */ | 1882 | /* TX_FILTER_TBL0 is huge and not used by this driver */ |
1883 | REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0), | 1883 | REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0), |
1884 | REGISTER_TABLE_CZ(MC_TREG_SMEM), | 1884 | REGISTER_TABLE_CZ(MC_TREG_SMEM), |
1885 | /* MSIX_PBA_TABLE is not mapped */ | 1885 | /* MSIX_PBA_TABLE is not mapped */ |
1886 | /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */ | 1886 | /* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */ |
1887 | REGISTER_TABLE_BZ(RX_FILTER_TBL0), | 1887 | REGISTER_TABLE_BZ(RX_FILTER_TBL0), |
1888 | }; | 1888 | }; |
1889 | 1889 | ||
1890 | size_t efx_nic_get_regs_len(struct efx_nic *efx) | 1890 | size_t efx_nic_get_regs_len(struct efx_nic *efx) |
1891 | { | 1891 | { |
1892 | const struct efx_nic_reg *reg; | 1892 | const struct efx_nic_reg *reg; |
1893 | const struct efx_nic_reg_table *table; | 1893 | const struct efx_nic_reg_table *table; |
1894 | size_t len = 0; | 1894 | size_t len = 0; |
1895 | 1895 | ||
1896 | for (reg = efx_nic_regs; | 1896 | for (reg = efx_nic_regs; |
1897 | reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); | 1897 | reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); |
1898 | reg++) | 1898 | reg++) |
1899 | if (efx->type->revision >= reg->min_revision && | 1899 | if (efx->type->revision >= reg->min_revision && |
1900 | efx->type->revision <= reg->max_revision) | 1900 | efx->type->revision <= reg->max_revision) |
1901 | len += sizeof(efx_oword_t); | 1901 | len += sizeof(efx_oword_t); |
1902 | 1902 | ||
1903 | for (table = efx_nic_reg_tables; | 1903 | for (table = efx_nic_reg_tables; |
1904 | table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); | 1904 | table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); |
1905 | table++) | 1905 | table++) |
1906 | if (efx->type->revision >= table->min_revision && | 1906 | if (efx->type->revision >= table->min_revision && |
1907 | efx->type->revision <= table->max_revision) | 1907 | efx->type->revision <= table->max_revision) |
1908 | len += table->rows * min_t(size_t, table->step, 16); | 1908 | len += table->rows * min_t(size_t, table->step, 16); |
1909 | 1909 | ||
1910 | return len; | 1910 | return len; |
1911 | } | 1911 | } |
1912 | 1912 | ||
1913 | void efx_nic_get_regs(struct efx_nic *efx, void *buf) | 1913 | void efx_nic_get_regs(struct efx_nic *efx, void *buf) |
1914 | { | 1914 | { |
1915 | const struct efx_nic_reg *reg; | 1915 | const struct efx_nic_reg *reg; |
1916 | const struct efx_nic_reg_table *table; | 1916 | const struct efx_nic_reg_table *table; |
1917 | 1917 | ||
1918 | for (reg = efx_nic_regs; | 1918 | for (reg = efx_nic_regs; |
1919 | reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); | 1919 | reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs); |
1920 | reg++) { | 1920 | reg++) { |
1921 | if (efx->type->revision >= reg->min_revision && | 1921 | if (efx->type->revision >= reg->min_revision && |
1922 | efx->type->revision <= reg->max_revision) { | 1922 | efx->type->revision <= reg->max_revision) { |
1923 | efx_reado(efx, (efx_oword_t *)buf, reg->offset); | 1923 | efx_reado(efx, (efx_oword_t *)buf, reg->offset); |
1924 | buf += sizeof(efx_oword_t); | 1924 | buf += sizeof(efx_oword_t); |
1925 | } | 1925 | } |
1926 | } | 1926 | } |
1927 | 1927 | ||
1928 | for (table = efx_nic_reg_tables; | 1928 | for (table = efx_nic_reg_tables; |
1929 | table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); | 1929 | table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables); |
1930 | table++) { | 1930 | table++) { |
1931 | size_t size, i; | 1931 | size_t size, i; |
1932 | 1932 | ||
1933 | if (!(efx->type->revision >= table->min_revision && | 1933 | if (!(efx->type->revision >= table->min_revision && |
1934 | efx->type->revision <= table->max_revision)) | 1934 | efx->type->revision <= table->max_revision)) |
1935 | continue; | 1935 | continue; |
1936 | 1936 | ||
1937 | size = min_t(size_t, table->step, 16); | 1937 | size = min_t(size_t, table->step, 16); |
1938 | 1938 | ||
1939 | if (table->offset >= efx->type->mem_map_size) { | ||
1940 | /* No longer mapped; return dummy data */ | ||
1941 | memcpy(buf, "\xde\xc0\xad\xde", 4); | ||
1942 | buf += table->rows * size; | ||
1943 | continue; | ||
1944 | } | ||
1945 | |||
1946 | for (i = 0; i < table->rows; i++) { | 1939 | for (i = 0; i < table->rows; i++) { |
1947 | switch (table->step) { | 1940 | switch (table->step) { |
1948 | case 4: /* 32-bit register or SRAM */ | 1941 | case 4: /* 32-bit register or SRAM */ |
1949 | efx_readd_table(efx, buf, table->offset, i); | 1942 | efx_readd_table(efx, buf, table->offset, i); |
1950 | break; | 1943 | break; |
1951 | case 8: /* 64-bit SRAM */ | 1944 | case 8: /* 64-bit SRAM */ |
1952 | efx_sram_readq(efx, | 1945 | efx_sram_readq(efx, |
1953 | efx->membase + table->offset, | 1946 | efx->membase + table->offset, |
1954 | buf, i); | 1947 | buf, i); |
1955 | break; | 1948 | break; |
1956 | case 16: /* 128-bit register */ | 1949 | case 16: /* 128-bit register */ |
1957 | efx_reado_table(efx, buf, table->offset, i); | 1950 | efx_reado_table(efx, buf, table->offset, i); |
1958 | break; | 1951 | break; |
1959 | case 32: /* 128-bit register, interleaved */ | 1952 | case 32: /* 128-bit register, interleaved */ |
1960 | efx_reado_table(efx, buf, table->offset, 2 * i); | 1953 | efx_reado_table(efx, buf, table->offset, 2 * i); |
1961 | break; | 1954 | break; |
1962 | default: | 1955 | default: |
1963 | WARN_ON(1); | 1956 | WARN_ON(1); |
1964 | return; | 1957 | return; |
1965 | } | 1958 | } |
1966 | buf += size; | 1959 | buf += size; |
1967 | } | 1960 | } |
1968 | } | 1961 | } |
1969 | } | 1962 | } |
1970 | 1963 |
drivers/net/sfc/nic.h
1 | /**************************************************************************** | 1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | 2 | * Driver for Solarflare Solarstorm network controllers and boards |
3 | * Copyright 2005-2006 Fen Systems Ltd. | 3 | * Copyright 2005-2006 Fen Systems Ltd. |
4 | * Copyright 2006-2011 Solarflare Communications Inc. | 4 | * Copyright 2006-2011 Solarflare Communications Inc. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License version 2 as published | 7 | * under the terms of the GNU General Public License version 2 as published |
8 | * by the Free Software Foundation, incorporated herein by reference. | 8 | * by the Free Software Foundation, incorporated herein by reference. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #ifndef EFX_NIC_H | 11 | #ifndef EFX_NIC_H |
12 | #define EFX_NIC_H | 12 | #define EFX_NIC_H |
13 | 13 | ||
14 | #include <linux/i2c-algo-bit.h> | 14 | #include <linux/i2c-algo-bit.h> |
15 | #include "net_driver.h" | 15 | #include "net_driver.h" |
16 | #include "efx.h" | 16 | #include "efx.h" |
17 | #include "mcdi.h" | 17 | #include "mcdi.h" |
18 | #include "spi.h" | 18 | #include "spi.h" |
19 | 19 | ||
20 | /* | 20 | /* |
21 | * Falcon hardware control | 21 | * Falcon hardware control |
22 | */ | 22 | */ |
23 | 23 | ||
24 | enum { | 24 | enum { |
25 | EFX_REV_FALCON_A0 = 0, | 25 | EFX_REV_FALCON_A0 = 0, |
26 | EFX_REV_FALCON_A1 = 1, | 26 | EFX_REV_FALCON_A1 = 1, |
27 | EFX_REV_FALCON_B0 = 2, | 27 | EFX_REV_FALCON_B0 = 2, |
28 | EFX_REV_SIENA_A0 = 3, | 28 | EFX_REV_SIENA_A0 = 3, |
29 | }; | 29 | }; |
30 | 30 | ||
31 | static inline int efx_nic_rev(struct efx_nic *efx) | 31 | static inline int efx_nic_rev(struct efx_nic *efx) |
32 | { | 32 | { |
33 | return efx->type->revision; | 33 | return efx->type->revision; |
34 | } | 34 | } |
35 | 35 | ||
36 | extern u32 efx_nic_fpga_ver(struct efx_nic *efx); | 36 | extern u32 efx_nic_fpga_ver(struct efx_nic *efx); |
37 | 37 | ||
38 | static inline bool efx_nic_has_mc(struct efx_nic *efx) | 38 | static inline bool efx_nic_has_mc(struct efx_nic *efx) |
39 | { | 39 | { |
40 | return efx_nic_rev(efx) >= EFX_REV_SIENA_A0; | 40 | return efx_nic_rev(efx) >= EFX_REV_SIENA_A0; |
41 | } | 41 | } |
42 | /* NIC has two interlinked PCI functions for the same port. */ | 42 | /* NIC has two interlinked PCI functions for the same port. */ |
43 | static inline bool efx_nic_is_dual_func(struct efx_nic *efx) | 43 | static inline bool efx_nic_is_dual_func(struct efx_nic *efx) |
44 | { | 44 | { |
45 | return efx_nic_rev(efx) < EFX_REV_FALCON_B0; | 45 | return efx_nic_rev(efx) < EFX_REV_FALCON_B0; |
46 | } | 46 | } |
47 | 47 | ||
48 | enum { | 48 | enum { |
49 | PHY_TYPE_NONE = 0, | 49 | PHY_TYPE_NONE = 0, |
50 | PHY_TYPE_TXC43128 = 1, | 50 | PHY_TYPE_TXC43128 = 1, |
51 | PHY_TYPE_88E1111 = 2, | 51 | PHY_TYPE_88E1111 = 2, |
52 | PHY_TYPE_SFX7101 = 3, | 52 | PHY_TYPE_SFX7101 = 3, |
53 | PHY_TYPE_QT2022C2 = 4, | 53 | PHY_TYPE_QT2022C2 = 4, |
54 | PHY_TYPE_PM8358 = 6, | 54 | PHY_TYPE_PM8358 = 6, |
55 | PHY_TYPE_SFT9001A = 8, | 55 | PHY_TYPE_SFT9001A = 8, |
56 | PHY_TYPE_QT2025C = 9, | 56 | PHY_TYPE_QT2025C = 9, |
57 | PHY_TYPE_SFT9001B = 10, | 57 | PHY_TYPE_SFT9001B = 10, |
58 | }; | 58 | }; |
59 | 59 | ||
60 | #define FALCON_XMAC_LOOPBACKS \ | 60 | #define FALCON_XMAC_LOOPBACKS \ |
61 | ((1 << LOOPBACK_XGMII) | \ | 61 | ((1 << LOOPBACK_XGMII) | \ |
62 | (1 << LOOPBACK_XGXS) | \ | 62 | (1 << LOOPBACK_XGXS) | \ |
63 | (1 << LOOPBACK_XAUI)) | 63 | (1 << LOOPBACK_XAUI)) |
64 | 64 | ||
65 | #define FALCON_GMAC_LOOPBACKS \ | 65 | #define FALCON_GMAC_LOOPBACKS \ |
66 | (1 << LOOPBACK_GMAC) | 66 | (1 << LOOPBACK_GMAC) |
67 | 67 | ||
68 | /** | 68 | /** |
69 | * struct falcon_board_type - board operations and type information | 69 | * struct falcon_board_type - board operations and type information |
70 | * @id: Board type id, as found in NVRAM | 70 | * @id: Board type id, as found in NVRAM |
71 | * @ref_model: Model number of Solarflare reference design | 71 | * @ref_model: Model number of Solarflare reference design |
72 | * @gen_type: Generic board type description | 72 | * @gen_type: Generic board type description |
73 | * @init: Allocate resources and initialise peripheral hardware | 73 | * @init: Allocate resources and initialise peripheral hardware |
74 | * @init_phy: Do board-specific PHY initialisation | 74 | * @init_phy: Do board-specific PHY initialisation |
75 | * @fini: Shut down hardware and free resources | 75 | * @fini: Shut down hardware and free resources |
76 | * @set_id_led: Set state of identifying LED or revert to automatic function | 76 | * @set_id_led: Set state of identifying LED or revert to automatic function |
77 | * @monitor: Board-specific health check function | 77 | * @monitor: Board-specific health check function |
78 | */ | 78 | */ |
79 | struct falcon_board_type { | 79 | struct falcon_board_type { |
80 | u8 id; | 80 | u8 id; |
81 | const char *ref_model; | 81 | const char *ref_model; |
82 | const char *gen_type; | 82 | const char *gen_type; |
83 | int (*init) (struct efx_nic *nic); | 83 | int (*init) (struct efx_nic *nic); |
84 | void (*init_phy) (struct efx_nic *efx); | 84 | void (*init_phy) (struct efx_nic *efx); |
85 | void (*fini) (struct efx_nic *nic); | 85 | void (*fini) (struct efx_nic *nic); |
86 | void (*set_id_led) (struct efx_nic *efx, enum efx_led_mode mode); | 86 | void (*set_id_led) (struct efx_nic *efx, enum efx_led_mode mode); |
87 | int (*monitor) (struct efx_nic *nic); | 87 | int (*monitor) (struct efx_nic *nic); |
88 | }; | 88 | }; |
89 | 89 | ||
90 | /** | 90 | /** |
91 | * struct falcon_board - board information | 91 | * struct falcon_board - board information |
92 | * @type: Type of board | 92 | * @type: Type of board |
93 | * @major: Major rev. ('A', 'B' ...) | 93 | * @major: Major rev. ('A', 'B' ...) |
94 | * @minor: Minor rev. (0, 1, ...) | 94 | * @minor: Minor rev. (0, 1, ...) |
95 | * @i2c_adap: I2C adapter for on-board peripherals | 95 | * @i2c_adap: I2C adapter for on-board peripherals |
96 | * @i2c_data: Data for bit-banging algorithm | 96 | * @i2c_data: Data for bit-banging algorithm |
97 | * @hwmon_client: I2C client for hardware monitor | 97 | * @hwmon_client: I2C client for hardware monitor |
98 | * @ioexp_client: I2C client for power/port control | 98 | * @ioexp_client: I2C client for power/port control |
99 | */ | 99 | */ |
100 | struct falcon_board { | 100 | struct falcon_board { |
101 | const struct falcon_board_type *type; | 101 | const struct falcon_board_type *type; |
102 | int major; | 102 | int major; |
103 | int minor; | 103 | int minor; |
104 | struct i2c_adapter i2c_adap; | 104 | struct i2c_adapter i2c_adap; |
105 | struct i2c_algo_bit_data i2c_data; | 105 | struct i2c_algo_bit_data i2c_data; |
106 | struct i2c_client *hwmon_client, *ioexp_client; | 106 | struct i2c_client *hwmon_client, *ioexp_client; |
107 | }; | 107 | }; |
108 | 108 | ||
109 | /** | 109 | /** |
110 | * struct falcon_nic_data - Falcon NIC state | 110 | * struct falcon_nic_data - Falcon NIC state |
111 | * @pci_dev2: Secondary function of Falcon A | 111 | * @pci_dev2: Secondary function of Falcon A |
112 | * @board: Board state and functions | 112 | * @board: Board state and functions |
113 | * @stats_disable_count: Nest count for disabling statistics fetches | 113 | * @stats_disable_count: Nest count for disabling statistics fetches |
114 | * @stats_pending: Is there a pending DMA of MAC statistics. | 114 | * @stats_pending: Is there a pending DMA of MAC statistics. |
115 | * @stats_timer: A timer for regularly fetching MAC statistics. | 115 | * @stats_timer: A timer for regularly fetching MAC statistics. |
116 | * @stats_dma_done: Pointer to the flag which indicates DMA completion. | 116 | * @stats_dma_done: Pointer to the flag which indicates DMA completion. |
117 | * @spi_flash: SPI flash device | 117 | * @spi_flash: SPI flash device |
118 | * @spi_eeprom: SPI EEPROM device | 118 | * @spi_eeprom: SPI EEPROM device |
119 | * @spi_lock: SPI bus lock | 119 | * @spi_lock: SPI bus lock |
120 | * @mdio_lock: MDIO bus lock | 120 | * @mdio_lock: MDIO bus lock |
121 | * @xmac_poll_required: XMAC link state needs polling | 121 | * @xmac_poll_required: XMAC link state needs polling |
122 | */ | 122 | */ |
123 | struct falcon_nic_data { | 123 | struct falcon_nic_data { |
124 | struct pci_dev *pci_dev2; | 124 | struct pci_dev *pci_dev2; |
125 | struct falcon_board board; | 125 | struct falcon_board board; |
126 | unsigned int stats_disable_count; | 126 | unsigned int stats_disable_count; |
127 | bool stats_pending; | 127 | bool stats_pending; |
128 | struct timer_list stats_timer; | 128 | struct timer_list stats_timer; |
129 | u32 *stats_dma_done; | 129 | u32 *stats_dma_done; |
130 | struct efx_spi_device spi_flash; | 130 | struct efx_spi_device spi_flash; |
131 | struct efx_spi_device spi_eeprom; | 131 | struct efx_spi_device spi_eeprom; |
132 | struct mutex spi_lock; | 132 | struct mutex spi_lock; |
133 | struct mutex mdio_lock; | 133 | struct mutex mdio_lock; |
134 | bool xmac_poll_required; | 134 | bool xmac_poll_required; |
135 | }; | 135 | }; |
136 | 136 | ||
137 | static inline struct falcon_board *falcon_board(struct efx_nic *efx) | 137 | static inline struct falcon_board *falcon_board(struct efx_nic *efx) |
138 | { | 138 | { |
139 | struct falcon_nic_data *data = efx->nic_data; | 139 | struct falcon_nic_data *data = efx->nic_data; |
140 | return &data->board; | 140 | return &data->board; |
141 | } | 141 | } |
142 | 142 | ||
143 | /** | 143 | /** |
144 | * struct siena_nic_data - Siena NIC state | 144 | * struct siena_nic_data - Siena NIC state |
145 | * @mcdi: Management-Controller-to-Driver Interface | 145 | * @mcdi: Management-Controller-to-Driver Interface |
146 | * @mcdi_smem: MCDI shared memory mapping. The mapping is always uncacheable. | ||
147 | * @wol_filter_id: Wake-on-LAN packet filter id | 146 | * @wol_filter_id: Wake-on-LAN packet filter id |
148 | */ | 147 | */ |
149 | struct siena_nic_data { | 148 | struct siena_nic_data { |
150 | struct efx_mcdi_iface mcdi; | 149 | struct efx_mcdi_iface mcdi; |
151 | void __iomem *mcdi_smem; | ||
152 | int wol_filter_id; | 150 | int wol_filter_id; |
153 | }; | 151 | }; |
154 | 152 | ||
155 | extern const struct efx_nic_type falcon_a1_nic_type; | 153 | extern const struct efx_nic_type falcon_a1_nic_type; |
156 | extern const struct efx_nic_type falcon_b0_nic_type; | 154 | extern const struct efx_nic_type falcon_b0_nic_type; |
157 | extern const struct efx_nic_type siena_a0_nic_type; | 155 | extern const struct efx_nic_type siena_a0_nic_type; |
158 | 156 | ||
159 | /************************************************************************** | 157 | /************************************************************************** |
160 | * | 158 | * |
161 | * Externs | 159 | * Externs |
162 | * | 160 | * |
163 | ************************************************************************** | 161 | ************************************************************************** |
164 | */ | 162 | */ |
165 | 163 | ||
166 | extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info); | 164 | extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info); |
167 | 165 | ||
168 | /* TX data path */ | 166 | /* TX data path */ |
169 | extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue); | 167 | extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue); |
170 | extern void efx_nic_init_tx(struct efx_tx_queue *tx_queue); | 168 | extern void efx_nic_init_tx(struct efx_tx_queue *tx_queue); |
171 | extern void efx_nic_fini_tx(struct efx_tx_queue *tx_queue); | 169 | extern void efx_nic_fini_tx(struct efx_tx_queue *tx_queue); |
172 | extern void efx_nic_remove_tx(struct efx_tx_queue *tx_queue); | 170 | extern void efx_nic_remove_tx(struct efx_tx_queue *tx_queue); |
173 | extern void efx_nic_push_buffers(struct efx_tx_queue *tx_queue); | 171 | extern void efx_nic_push_buffers(struct efx_tx_queue *tx_queue); |
174 | 172 | ||
175 | /* RX data path */ | 173 | /* RX data path */ |
176 | extern int efx_nic_probe_rx(struct efx_rx_queue *rx_queue); | 174 | extern int efx_nic_probe_rx(struct efx_rx_queue *rx_queue); |
177 | extern void efx_nic_init_rx(struct efx_rx_queue *rx_queue); | 175 | extern void efx_nic_init_rx(struct efx_rx_queue *rx_queue); |
178 | extern void efx_nic_fini_rx(struct efx_rx_queue *rx_queue); | 176 | extern void efx_nic_fini_rx(struct efx_rx_queue *rx_queue); |
179 | extern void efx_nic_remove_rx(struct efx_rx_queue *rx_queue); | 177 | extern void efx_nic_remove_rx(struct efx_rx_queue *rx_queue); |
180 | extern void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue); | 178 | extern void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue); |
181 | 179 | ||
182 | /* Event data path */ | 180 | /* Event data path */ |
183 | extern int efx_nic_probe_eventq(struct efx_channel *channel); | 181 | extern int efx_nic_probe_eventq(struct efx_channel *channel); |
184 | extern void efx_nic_init_eventq(struct efx_channel *channel); | 182 | extern void efx_nic_init_eventq(struct efx_channel *channel); |
185 | extern void efx_nic_fini_eventq(struct efx_channel *channel); | 183 | extern void efx_nic_fini_eventq(struct efx_channel *channel); |
186 | extern void efx_nic_remove_eventq(struct efx_channel *channel); | 184 | extern void efx_nic_remove_eventq(struct efx_channel *channel); |
187 | extern int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota); | 185 | extern int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota); |
188 | extern void efx_nic_eventq_read_ack(struct efx_channel *channel); | 186 | extern void efx_nic_eventq_read_ack(struct efx_channel *channel); |
189 | extern bool efx_nic_event_present(struct efx_channel *channel); | 187 | extern bool efx_nic_event_present(struct efx_channel *channel); |
190 | 188 | ||
191 | /* MAC/PHY */ | 189 | /* MAC/PHY */ |
192 | extern void falcon_drain_tx_fifo(struct efx_nic *efx); | 190 | extern void falcon_drain_tx_fifo(struct efx_nic *efx); |
193 | extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx); | 191 | extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx); |
194 | 192 | ||
195 | /* Interrupts and test events */ | 193 | /* Interrupts and test events */ |
196 | extern int efx_nic_init_interrupt(struct efx_nic *efx); | 194 | extern int efx_nic_init_interrupt(struct efx_nic *efx); |
197 | extern void efx_nic_enable_interrupts(struct efx_nic *efx); | 195 | extern void efx_nic_enable_interrupts(struct efx_nic *efx); |
198 | extern void efx_nic_generate_test_event(struct efx_channel *channel); | 196 | extern void efx_nic_generate_test_event(struct efx_channel *channel); |
199 | extern void efx_nic_generate_fill_event(struct efx_channel *channel); | 197 | extern void efx_nic_generate_fill_event(struct efx_channel *channel); |
200 | extern void efx_nic_generate_interrupt(struct efx_nic *efx); | 198 | extern void efx_nic_generate_interrupt(struct efx_nic *efx); |
201 | extern void efx_nic_disable_interrupts(struct efx_nic *efx); | 199 | extern void efx_nic_disable_interrupts(struct efx_nic *efx); |
202 | extern void efx_nic_fini_interrupt(struct efx_nic *efx); | 200 | extern void efx_nic_fini_interrupt(struct efx_nic *efx); |
203 | extern irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx); | 201 | extern irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx); |
204 | extern irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id); | 202 | extern irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id); |
205 | extern void falcon_irq_ack_a1(struct efx_nic *efx); | 203 | extern void falcon_irq_ack_a1(struct efx_nic *efx); |
206 | 204 | ||
207 | #define EFX_IRQ_MOD_RESOLUTION 5 | 205 | #define EFX_IRQ_MOD_RESOLUTION 5 |
208 | 206 | ||
209 | /* Global Resources */ | 207 | /* Global Resources */ |
210 | extern int efx_nic_flush_queues(struct efx_nic *efx); | 208 | extern int efx_nic_flush_queues(struct efx_nic *efx); |
211 | extern void falcon_start_nic_stats(struct efx_nic *efx); | 209 | extern void falcon_start_nic_stats(struct efx_nic *efx); |
212 | extern void falcon_stop_nic_stats(struct efx_nic *efx); | 210 | extern void falcon_stop_nic_stats(struct efx_nic *efx); |
213 | extern void falcon_setup_xaui(struct efx_nic *efx); | 211 | extern void falcon_setup_xaui(struct efx_nic *efx); |
214 | extern int falcon_reset_xaui(struct efx_nic *efx); | 212 | extern int falcon_reset_xaui(struct efx_nic *efx); |
215 | extern void efx_nic_init_common(struct efx_nic *efx); | 213 | extern void efx_nic_init_common(struct efx_nic *efx); |
216 | extern void efx_nic_push_rx_indir_table(struct efx_nic *efx); | 214 | extern void efx_nic_push_rx_indir_table(struct efx_nic *efx); |
217 | 215 | ||
218 | int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, | 216 | int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer, |
219 | unsigned int len); | 217 | unsigned int len); |
220 | void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer); | 218 | void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer); |
221 | 219 | ||
222 | /* Tests */ | 220 | /* Tests */ |
223 | struct efx_nic_register_test { | 221 | struct efx_nic_register_test { |
224 | unsigned address; | 222 | unsigned address; |
225 | efx_oword_t mask; | 223 | efx_oword_t mask; |
226 | }; | 224 | }; |
227 | extern int efx_nic_test_registers(struct efx_nic *efx, | 225 | extern int efx_nic_test_registers(struct efx_nic *efx, |
228 | const struct efx_nic_register_test *regs, | 226 | const struct efx_nic_register_test *regs, |
229 | size_t n_regs); | 227 | size_t n_regs); |
230 | 228 | ||
231 | extern size_t efx_nic_get_regs_len(struct efx_nic *efx); | 229 | extern size_t efx_nic_get_regs_len(struct efx_nic *efx); |
232 | extern void efx_nic_get_regs(struct efx_nic *efx, void *buf); | 230 | extern void efx_nic_get_regs(struct efx_nic *efx, void *buf); |
233 | 231 | ||
234 | /************************************************************************** | 232 | /************************************************************************** |
235 | * | 233 | * |
236 | * Falcon MAC stats | 234 | * Falcon MAC stats |
237 | * | 235 | * |
238 | ************************************************************************** | 236 | ************************************************************************** |
239 | */ | 237 | */ |
240 | 238 | ||
241 | #define FALCON_STAT_OFFSET(falcon_stat) EFX_VAL(falcon_stat, offset) | 239 | #define FALCON_STAT_OFFSET(falcon_stat) EFX_VAL(falcon_stat, offset) |
242 | #define FALCON_STAT_WIDTH(falcon_stat) EFX_VAL(falcon_stat, WIDTH) | 240 | #define FALCON_STAT_WIDTH(falcon_stat) EFX_VAL(falcon_stat, WIDTH) |
243 | 241 | ||
244 | /* Retrieve statistic from statistics block */ | 242 | /* Retrieve statistic from statistics block */ |
245 | #define FALCON_STAT(efx, falcon_stat, efx_stat) do { \ | 243 | #define FALCON_STAT(efx, falcon_stat, efx_stat) do { \ |
246 | if (FALCON_STAT_WIDTH(falcon_stat) == 16) \ | 244 | if (FALCON_STAT_WIDTH(falcon_stat) == 16) \ |
247 | (efx)->mac_stats.efx_stat += le16_to_cpu( \ | 245 | (efx)->mac_stats.efx_stat += le16_to_cpu( \ |
248 | *((__force __le16 *) \ | 246 | *((__force __le16 *) \ |
249 | (efx->stats_buffer.addr + \ | 247 | (efx->stats_buffer.addr + \ |
250 | FALCON_STAT_OFFSET(falcon_stat)))); \ | 248 | FALCON_STAT_OFFSET(falcon_stat)))); \ |
251 | else if (FALCON_STAT_WIDTH(falcon_stat) == 32) \ | 249 | else if (FALCON_STAT_WIDTH(falcon_stat) == 32) \ |
252 | (efx)->mac_stats.efx_stat += le32_to_cpu( \ | 250 | (efx)->mac_stats.efx_stat += le32_to_cpu( \ |
253 | *((__force __le32 *) \ | 251 | *((__force __le32 *) \ |
254 | (efx->stats_buffer.addr + \ | 252 | (efx->stats_buffer.addr + \ |
255 | FALCON_STAT_OFFSET(falcon_stat)))); \ | 253 | FALCON_STAT_OFFSET(falcon_stat)))); \ |
256 | else \ | 254 | else \ |
257 | (efx)->mac_stats.efx_stat += le64_to_cpu( \ | 255 | (efx)->mac_stats.efx_stat += le64_to_cpu( \ |
258 | *((__force __le64 *) \ | 256 | *((__force __le64 *) \ |
259 | (efx->stats_buffer.addr + \ | 257 | (efx->stats_buffer.addr + \ |
260 | FALCON_STAT_OFFSET(falcon_stat)))); \ | 258 | FALCON_STAT_OFFSET(falcon_stat)))); \ |
261 | } while (0) | 259 | } while (0) |
262 | 260 | ||
263 | #define FALCON_MAC_STATS_SIZE 0x100 | 261 | #define FALCON_MAC_STATS_SIZE 0x100 |
264 | 262 | ||
265 | #define MAC_DATA_LBN 0 | 263 | #define MAC_DATA_LBN 0 |
266 | #define MAC_DATA_WIDTH 32 | 264 | #define MAC_DATA_WIDTH 32 |
267 | 265 | ||
268 | extern void efx_nic_generate_event(struct efx_channel *channel, | 266 | extern void efx_nic_generate_event(struct efx_channel *channel, |
269 | efx_qword_t *event); | 267 | efx_qword_t *event); |
270 | 268 | ||
271 | extern void falcon_poll_xmac(struct efx_nic *efx); | 269 | extern void falcon_poll_xmac(struct efx_nic *efx); |
272 | 270 | ||
273 | #endif /* EFX_NIC_H */ | 271 | #endif /* EFX_NIC_H */ |
274 | 272 |
drivers/net/sfc/siena.c
1 | /**************************************************************************** | 1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | 2 | * Driver for Solarflare Solarstorm network controllers and boards |
3 | * Copyright 2005-2006 Fen Systems Ltd. | 3 | * Copyright 2005-2006 Fen Systems Ltd. |
4 | * Copyright 2006-2010 Solarflare Communications Inc. | 4 | * Copyright 2006-2010 Solarflare Communications Inc. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License version 2 as published | 7 | * under the terms of the GNU General Public License version 2 as published |
8 | * by the Free Software Foundation, incorporated herein by reference. | 8 | * by the Free Software Foundation, incorporated herein by reference. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/bitops.h> | 11 | #include <linux/bitops.h> |
12 | #include <linux/delay.h> | 12 | #include <linux/delay.h> |
13 | #include <linux/pci.h> | 13 | #include <linux/pci.h> |
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/random.h> | 16 | #include <linux/random.h> |
17 | #include "net_driver.h" | 17 | #include "net_driver.h" |
18 | #include "bitfield.h" | 18 | #include "bitfield.h" |
19 | #include "efx.h" | 19 | #include "efx.h" |
20 | #include "nic.h" | 20 | #include "nic.h" |
21 | #include "mac.h" | 21 | #include "mac.h" |
22 | #include "spi.h" | 22 | #include "spi.h" |
23 | #include "regs.h" | 23 | #include "regs.h" |
24 | #include "io.h" | 24 | #include "io.h" |
25 | #include "phy.h" | 25 | #include "phy.h" |
26 | #include "workarounds.h" | 26 | #include "workarounds.h" |
27 | #include "mcdi.h" | 27 | #include "mcdi.h" |
28 | #include "mcdi_pcol.h" | 28 | #include "mcdi_pcol.h" |
29 | 29 | ||
30 | /* Hardware control for SFC9000 family including SFL9021 (aka Siena). */ | 30 | /* Hardware control for SFC9000 family including SFL9021 (aka Siena). */ |
31 | 31 | ||
32 | static void siena_init_wol(struct efx_nic *efx); | 32 | static void siena_init_wol(struct efx_nic *efx); |
33 | 33 | ||
34 | 34 | ||
35 | static void siena_push_irq_moderation(struct efx_channel *channel) | 35 | static void siena_push_irq_moderation(struct efx_channel *channel) |
36 | { | 36 | { |
37 | efx_dword_t timer_cmd; | 37 | efx_dword_t timer_cmd; |
38 | 38 | ||
39 | if (channel->irq_moderation) | 39 | if (channel->irq_moderation) |
40 | EFX_POPULATE_DWORD_2(timer_cmd, | 40 | EFX_POPULATE_DWORD_2(timer_cmd, |
41 | FRF_CZ_TC_TIMER_MODE, | 41 | FRF_CZ_TC_TIMER_MODE, |
42 | FFE_CZ_TIMER_MODE_INT_HLDOFF, | 42 | FFE_CZ_TIMER_MODE_INT_HLDOFF, |
43 | FRF_CZ_TC_TIMER_VAL, | 43 | FRF_CZ_TC_TIMER_VAL, |
44 | channel->irq_moderation - 1); | 44 | channel->irq_moderation - 1); |
45 | else | 45 | else |
46 | EFX_POPULATE_DWORD_2(timer_cmd, | 46 | EFX_POPULATE_DWORD_2(timer_cmd, |
47 | FRF_CZ_TC_TIMER_MODE, | 47 | FRF_CZ_TC_TIMER_MODE, |
48 | FFE_CZ_TIMER_MODE_DIS, | 48 | FFE_CZ_TIMER_MODE_DIS, |
49 | FRF_CZ_TC_TIMER_VAL, 0); | 49 | FRF_CZ_TC_TIMER_VAL, 0); |
50 | efx_writed_page_locked(channel->efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0, | 50 | efx_writed_page_locked(channel->efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0, |
51 | channel->channel); | 51 | channel->channel); |
52 | } | 52 | } |
53 | 53 | ||
54 | static void siena_push_multicast_hash(struct efx_nic *efx) | 54 | static void siena_push_multicast_hash(struct efx_nic *efx) |
55 | { | 55 | { |
56 | WARN_ON(!mutex_is_locked(&efx->mac_lock)); | 56 | WARN_ON(!mutex_is_locked(&efx->mac_lock)); |
57 | 57 | ||
58 | efx_mcdi_rpc(efx, MC_CMD_SET_MCAST_HASH, | 58 | efx_mcdi_rpc(efx, MC_CMD_SET_MCAST_HASH, |
59 | efx->multicast_hash.byte, sizeof(efx->multicast_hash), | 59 | efx->multicast_hash.byte, sizeof(efx->multicast_hash), |
60 | NULL, 0, NULL); | 60 | NULL, 0, NULL); |
61 | } | 61 | } |
62 | 62 | ||
63 | static int siena_mdio_write(struct net_device *net_dev, | 63 | static int siena_mdio_write(struct net_device *net_dev, |
64 | int prtad, int devad, u16 addr, u16 value) | 64 | int prtad, int devad, u16 addr, u16 value) |
65 | { | 65 | { |
66 | struct efx_nic *efx = netdev_priv(net_dev); | 66 | struct efx_nic *efx = netdev_priv(net_dev); |
67 | uint32_t status; | 67 | uint32_t status; |
68 | int rc; | 68 | int rc; |
69 | 69 | ||
70 | rc = efx_mcdi_mdio_write(efx, efx->mdio_bus, prtad, devad, | 70 | rc = efx_mcdi_mdio_write(efx, efx->mdio_bus, prtad, devad, |
71 | addr, value, &status); | 71 | addr, value, &status); |
72 | if (rc) | 72 | if (rc) |
73 | return rc; | 73 | return rc; |
74 | if (status != MC_CMD_MDIO_STATUS_GOOD) | 74 | if (status != MC_CMD_MDIO_STATUS_GOOD) |
75 | return -EIO; | 75 | return -EIO; |
76 | 76 | ||
77 | return 0; | 77 | return 0; |
78 | } | 78 | } |
79 | 79 | ||
80 | static int siena_mdio_read(struct net_device *net_dev, | 80 | static int siena_mdio_read(struct net_device *net_dev, |
81 | int prtad, int devad, u16 addr) | 81 | int prtad, int devad, u16 addr) |
82 | { | 82 | { |
83 | struct efx_nic *efx = netdev_priv(net_dev); | 83 | struct efx_nic *efx = netdev_priv(net_dev); |
84 | uint16_t value; | 84 | uint16_t value; |
85 | uint32_t status; | 85 | uint32_t status; |
86 | int rc; | 86 | int rc; |
87 | 87 | ||
88 | rc = efx_mcdi_mdio_read(efx, efx->mdio_bus, prtad, devad, | 88 | rc = efx_mcdi_mdio_read(efx, efx->mdio_bus, prtad, devad, |
89 | addr, &value, &status); | 89 | addr, &value, &status); |
90 | if (rc) | 90 | if (rc) |
91 | return rc; | 91 | return rc; |
92 | if (status != MC_CMD_MDIO_STATUS_GOOD) | 92 | if (status != MC_CMD_MDIO_STATUS_GOOD) |
93 | return -EIO; | 93 | return -EIO; |
94 | 94 | ||
95 | return (int)value; | 95 | return (int)value; |
96 | } | 96 | } |
97 | 97 | ||
98 | /* This call is responsible for hooking in the MAC and PHY operations */ | 98 | /* This call is responsible for hooking in the MAC and PHY operations */ |
99 | static int siena_probe_port(struct efx_nic *efx) | 99 | static int siena_probe_port(struct efx_nic *efx) |
100 | { | 100 | { |
101 | int rc; | 101 | int rc; |
102 | 102 | ||
103 | /* Hook in PHY operations table */ | 103 | /* Hook in PHY operations table */ |
104 | efx->phy_op = &efx_mcdi_phy_ops; | 104 | efx->phy_op = &efx_mcdi_phy_ops; |
105 | 105 | ||
106 | /* Set up MDIO structure for PHY */ | 106 | /* Set up MDIO structure for PHY */ |
107 | efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; | 107 | efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; |
108 | efx->mdio.mdio_read = siena_mdio_read; | 108 | efx->mdio.mdio_read = siena_mdio_read; |
109 | efx->mdio.mdio_write = siena_mdio_write; | 109 | efx->mdio.mdio_write = siena_mdio_write; |
110 | 110 | ||
111 | /* Fill out MDIO structure, loopback modes, and initial link state */ | 111 | /* Fill out MDIO structure, loopback modes, and initial link state */ |
112 | rc = efx->phy_op->probe(efx); | 112 | rc = efx->phy_op->probe(efx); |
113 | if (rc != 0) | 113 | if (rc != 0) |
114 | return rc; | 114 | return rc; |
115 | 115 | ||
116 | /* Allocate buffer for stats */ | 116 | /* Allocate buffer for stats */ |
117 | rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer, | 117 | rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer, |
118 | MC_CMD_MAC_NSTATS * sizeof(u64)); | 118 | MC_CMD_MAC_NSTATS * sizeof(u64)); |
119 | if (rc) | 119 | if (rc) |
120 | return rc; | 120 | return rc; |
121 | netif_dbg(efx, probe, efx->net_dev, | 121 | netif_dbg(efx, probe, efx->net_dev, |
122 | "stats buffer at %llx (virt %p phys %llx)\n", | 122 | "stats buffer at %llx (virt %p phys %llx)\n", |
123 | (u64)efx->stats_buffer.dma_addr, | 123 | (u64)efx->stats_buffer.dma_addr, |
124 | efx->stats_buffer.addr, | 124 | efx->stats_buffer.addr, |
125 | (u64)virt_to_phys(efx->stats_buffer.addr)); | 125 | (u64)virt_to_phys(efx->stats_buffer.addr)); |
126 | 126 | ||
127 | efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 1); | 127 | efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 1); |
128 | 128 | ||
129 | return 0; | 129 | return 0; |
130 | } | 130 | } |
131 | 131 | ||
132 | static void siena_remove_port(struct efx_nic *efx) | 132 | static void siena_remove_port(struct efx_nic *efx) |
133 | { | 133 | { |
134 | efx->phy_op->remove(efx); | 134 | efx->phy_op->remove(efx); |
135 | efx_nic_free_buffer(efx, &efx->stats_buffer); | 135 | efx_nic_free_buffer(efx, &efx->stats_buffer); |
136 | } | 136 | } |
137 | 137 | ||
138 | static const struct efx_nic_register_test siena_register_tests[] = { | 138 | static const struct efx_nic_register_test siena_register_tests[] = { |
139 | { FR_AZ_ADR_REGION, | 139 | { FR_AZ_ADR_REGION, |
140 | EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) }, | 140 | EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) }, |
141 | { FR_CZ_USR_EV_CFG, | 141 | { FR_CZ_USR_EV_CFG, |
142 | EFX_OWORD32(0x000103FF, 0x00000000, 0x00000000, 0x00000000) }, | 142 | EFX_OWORD32(0x000103FF, 0x00000000, 0x00000000, 0x00000000) }, |
143 | { FR_AZ_RX_CFG, | 143 | { FR_AZ_RX_CFG, |
144 | EFX_OWORD32(0xFFFFFFFE, 0xFFFFFFFF, 0x0003FFFF, 0x00000000) }, | 144 | EFX_OWORD32(0xFFFFFFFE, 0xFFFFFFFF, 0x0003FFFF, 0x00000000) }, |
145 | { FR_AZ_TX_CFG, | 145 | { FR_AZ_TX_CFG, |
146 | EFX_OWORD32(0x7FFF0037, 0xFFFF8000, 0xFFFFFFFF, 0x03FFFFFF) }, | 146 | EFX_OWORD32(0x7FFF0037, 0xFFFF8000, 0xFFFFFFFF, 0x03FFFFFF) }, |
147 | { FR_AZ_TX_RESERVED, | 147 | { FR_AZ_TX_RESERVED, |
148 | EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) }, | 148 | EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) }, |
149 | { FR_AZ_SRM_TX_DC_CFG, | 149 | { FR_AZ_SRM_TX_DC_CFG, |
150 | EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) }, | 150 | EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) }, |
151 | { FR_AZ_RX_DC_CFG, | 151 | { FR_AZ_RX_DC_CFG, |
152 | EFX_OWORD32(0x00000003, 0x00000000, 0x00000000, 0x00000000) }, | 152 | EFX_OWORD32(0x00000003, 0x00000000, 0x00000000, 0x00000000) }, |
153 | { FR_AZ_RX_DC_PF_WM, | 153 | { FR_AZ_RX_DC_PF_WM, |
154 | EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) }, | 154 | EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) }, |
155 | { FR_BZ_DP_CTRL, | 155 | { FR_BZ_DP_CTRL, |
156 | EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) }, | 156 | EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) }, |
157 | { FR_BZ_RX_RSS_TKEY, | 157 | { FR_BZ_RX_RSS_TKEY, |
158 | EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) }, | 158 | EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) }, |
159 | { FR_CZ_RX_RSS_IPV6_REG1, | 159 | { FR_CZ_RX_RSS_IPV6_REG1, |
160 | EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) }, | 160 | EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) }, |
161 | { FR_CZ_RX_RSS_IPV6_REG2, | 161 | { FR_CZ_RX_RSS_IPV6_REG2, |
162 | EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) }, | 162 | EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) }, |
163 | { FR_CZ_RX_RSS_IPV6_REG3, | 163 | { FR_CZ_RX_RSS_IPV6_REG3, |
164 | EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0x00000007, 0x00000000) }, | 164 | EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0x00000007, 0x00000000) }, |
165 | }; | 165 | }; |
166 | 166 | ||
167 | static int siena_test_registers(struct efx_nic *efx) | 167 | static int siena_test_registers(struct efx_nic *efx) |
168 | { | 168 | { |
169 | return efx_nic_test_registers(efx, siena_register_tests, | 169 | return efx_nic_test_registers(efx, siena_register_tests, |
170 | ARRAY_SIZE(siena_register_tests)); | 170 | ARRAY_SIZE(siena_register_tests)); |
171 | } | 171 | } |
172 | 172 | ||
173 | /************************************************************************** | 173 | /************************************************************************** |
174 | * | 174 | * |
175 | * Device reset | 175 | * Device reset |
176 | * | 176 | * |
177 | ************************************************************************** | 177 | ************************************************************************** |
178 | */ | 178 | */ |
179 | 179 | ||
180 | static enum reset_type siena_map_reset_reason(enum reset_type reason) | 180 | static enum reset_type siena_map_reset_reason(enum reset_type reason) |
181 | { | 181 | { |
182 | return RESET_TYPE_ALL; | 182 | return RESET_TYPE_ALL; |
183 | } | 183 | } |
184 | 184 | ||
185 | static int siena_map_reset_flags(u32 *flags) | 185 | static int siena_map_reset_flags(u32 *flags) |
186 | { | 186 | { |
187 | enum { | 187 | enum { |
188 | SIENA_RESET_PORT = (ETH_RESET_DMA | ETH_RESET_FILTER | | 188 | SIENA_RESET_PORT = (ETH_RESET_DMA | ETH_RESET_FILTER | |
189 | ETH_RESET_OFFLOAD | ETH_RESET_MAC | | 189 | ETH_RESET_OFFLOAD | ETH_RESET_MAC | |
190 | ETH_RESET_PHY), | 190 | ETH_RESET_PHY), |
191 | SIENA_RESET_MC = (SIENA_RESET_PORT | | 191 | SIENA_RESET_MC = (SIENA_RESET_PORT | |
192 | ETH_RESET_MGMT << ETH_RESET_SHARED_SHIFT), | 192 | ETH_RESET_MGMT << ETH_RESET_SHARED_SHIFT), |
193 | }; | 193 | }; |
194 | 194 | ||
195 | if ((*flags & SIENA_RESET_MC) == SIENA_RESET_MC) { | 195 | if ((*flags & SIENA_RESET_MC) == SIENA_RESET_MC) { |
196 | *flags &= ~SIENA_RESET_MC; | 196 | *flags &= ~SIENA_RESET_MC; |
197 | return RESET_TYPE_WORLD; | 197 | return RESET_TYPE_WORLD; |
198 | } | 198 | } |
199 | 199 | ||
200 | if ((*flags & SIENA_RESET_PORT) == SIENA_RESET_PORT) { | 200 | if ((*flags & SIENA_RESET_PORT) == SIENA_RESET_PORT) { |
201 | *flags &= ~SIENA_RESET_PORT; | 201 | *flags &= ~SIENA_RESET_PORT; |
202 | return RESET_TYPE_ALL; | 202 | return RESET_TYPE_ALL; |
203 | } | 203 | } |
204 | 204 | ||
205 | /* no invisible reset implemented */ | 205 | /* no invisible reset implemented */ |
206 | 206 | ||
207 | return -EINVAL; | 207 | return -EINVAL; |
208 | } | 208 | } |
209 | 209 | ||
210 | static int siena_reset_hw(struct efx_nic *efx, enum reset_type method) | 210 | static int siena_reset_hw(struct efx_nic *efx, enum reset_type method) |
211 | { | 211 | { |
212 | int rc; | 212 | int rc; |
213 | 213 | ||
214 | /* Recover from a failed assertion pre-reset */ | 214 | /* Recover from a failed assertion pre-reset */ |
215 | rc = efx_mcdi_handle_assertion(efx); | 215 | rc = efx_mcdi_handle_assertion(efx); |
216 | if (rc) | 216 | if (rc) |
217 | return rc; | 217 | return rc; |
218 | 218 | ||
219 | if (method == RESET_TYPE_WORLD) | 219 | if (method == RESET_TYPE_WORLD) |
220 | return efx_mcdi_reset_mc(efx); | 220 | return efx_mcdi_reset_mc(efx); |
221 | else | 221 | else |
222 | return efx_mcdi_reset_port(efx); | 222 | return efx_mcdi_reset_port(efx); |
223 | } | 223 | } |
224 | 224 | ||
225 | static int siena_probe_nvconfig(struct efx_nic *efx) | 225 | static int siena_probe_nvconfig(struct efx_nic *efx) |
226 | { | 226 | { |
227 | return efx_mcdi_get_board_cfg(efx, efx->net_dev->perm_addr, NULL); | 227 | return efx_mcdi_get_board_cfg(efx, efx->net_dev->perm_addr, NULL); |
228 | } | 228 | } |
229 | 229 | ||
230 | static int siena_probe_nic(struct efx_nic *efx) | 230 | static int siena_probe_nic(struct efx_nic *efx) |
231 | { | 231 | { |
232 | struct siena_nic_data *nic_data; | 232 | struct siena_nic_data *nic_data; |
233 | bool already_attached = 0; | 233 | bool already_attached = 0; |
234 | efx_oword_t reg; | 234 | efx_oword_t reg; |
235 | int rc; | 235 | int rc; |
236 | 236 | ||
237 | /* Allocate storage for hardware specific data */ | 237 | /* Allocate storage for hardware specific data */ |
238 | nic_data = kzalloc(sizeof(struct siena_nic_data), GFP_KERNEL); | 238 | nic_data = kzalloc(sizeof(struct siena_nic_data), GFP_KERNEL); |
239 | if (!nic_data) | 239 | if (!nic_data) |
240 | return -ENOMEM; | 240 | return -ENOMEM; |
241 | efx->nic_data = nic_data; | 241 | efx->nic_data = nic_data; |
242 | 242 | ||
243 | if (efx_nic_fpga_ver(efx) != 0) { | 243 | if (efx_nic_fpga_ver(efx) != 0) { |
244 | netif_err(efx, probe, efx->net_dev, | 244 | netif_err(efx, probe, efx->net_dev, |
245 | "Siena FPGA not supported\n"); | 245 | "Siena FPGA not supported\n"); |
246 | rc = -ENODEV; | 246 | rc = -ENODEV; |
247 | goto fail1; | 247 | goto fail1; |
248 | } | 248 | } |
249 | 249 | ||
250 | efx_reado(efx, ®, FR_AZ_CS_DEBUG); | 250 | efx_reado(efx, ®, FR_AZ_CS_DEBUG); |
251 | efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; | 251 | efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; |
252 | 252 | ||
253 | /* Initialise MCDI */ | ||
254 | nic_data->mcdi_smem = ioremap_nocache(efx->membase_phys + | ||
255 | FR_CZ_MC_TREG_SMEM, | ||
256 | FR_CZ_MC_TREG_SMEM_STEP * | ||
257 | FR_CZ_MC_TREG_SMEM_ROWS); | ||
258 | if (!nic_data->mcdi_smem) { | ||
259 | netif_err(efx, probe, efx->net_dev, | ||
260 | "could not map MCDI at %llx+%x\n", | ||
261 | (unsigned long long)efx->membase_phys + | ||
262 | FR_CZ_MC_TREG_SMEM, | ||
263 | FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS); | ||
264 | rc = -ENOMEM; | ||
265 | goto fail1; | ||
266 | } | ||
267 | efx_mcdi_init(efx); | 253 | efx_mcdi_init(efx); |
268 | 254 | ||
269 | /* Recover from a failed assertion before probing */ | 255 | /* Recover from a failed assertion before probing */ |
270 | rc = efx_mcdi_handle_assertion(efx); | 256 | rc = efx_mcdi_handle_assertion(efx); |
271 | if (rc) | 257 | if (rc) |
272 | goto fail2; | 258 | goto fail1; |
273 | 259 | ||
274 | /* Let the BMC know that the driver is now in charge of link and | 260 | /* Let the BMC know that the driver is now in charge of link and |
275 | * filter settings. We must do this before we reset the NIC */ | 261 | * filter settings. We must do this before we reset the NIC */ |
276 | rc = efx_mcdi_drv_attach(efx, true, &already_attached); | 262 | rc = efx_mcdi_drv_attach(efx, true, &already_attached); |
277 | if (rc) { | 263 | if (rc) { |
278 | netif_err(efx, probe, efx->net_dev, | 264 | netif_err(efx, probe, efx->net_dev, |
279 | "Unable to register driver with MCPU\n"); | 265 | "Unable to register driver with MCPU\n"); |
280 | goto fail2; | 266 | goto fail2; |
281 | } | 267 | } |
282 | if (already_attached) | 268 | if (already_attached) |
283 | /* Not a fatal error */ | 269 | /* Not a fatal error */ |
284 | netif_err(efx, probe, efx->net_dev, | 270 | netif_err(efx, probe, efx->net_dev, |
285 | "Host already registered with MCPU\n"); | 271 | "Host already registered with MCPU\n"); |
286 | 272 | ||
287 | /* Now we can reset the NIC */ | 273 | /* Now we can reset the NIC */ |
288 | rc = siena_reset_hw(efx, RESET_TYPE_ALL); | 274 | rc = siena_reset_hw(efx, RESET_TYPE_ALL); |
289 | if (rc) { | 275 | if (rc) { |
290 | netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n"); | 276 | netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n"); |
291 | goto fail3; | 277 | goto fail3; |
292 | } | 278 | } |
293 | 279 | ||
294 | siena_init_wol(efx); | 280 | siena_init_wol(efx); |
295 | 281 | ||
296 | /* Allocate memory for INT_KER */ | 282 | /* Allocate memory for INT_KER */ |
297 | rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t)); | 283 | rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t)); |
298 | if (rc) | 284 | if (rc) |
299 | goto fail4; | 285 | goto fail4; |
300 | BUG_ON(efx->irq_status.dma_addr & 0x0f); | 286 | BUG_ON(efx->irq_status.dma_addr & 0x0f); |
301 | 287 | ||
302 | netif_dbg(efx, probe, efx->net_dev, | 288 | netif_dbg(efx, probe, efx->net_dev, |
303 | "INT_KER at %llx (virt %p phys %llx)\n", | 289 | "INT_KER at %llx (virt %p phys %llx)\n", |
304 | (unsigned long long)efx->irq_status.dma_addr, | 290 | (unsigned long long)efx->irq_status.dma_addr, |
305 | efx->irq_status.addr, | 291 | efx->irq_status.addr, |
306 | (unsigned long long)virt_to_phys(efx->irq_status.addr)); | 292 | (unsigned long long)virt_to_phys(efx->irq_status.addr)); |
307 | 293 | ||
308 | /* Read in the non-volatile configuration */ | 294 | /* Read in the non-volatile configuration */ |
309 | rc = siena_probe_nvconfig(efx); | 295 | rc = siena_probe_nvconfig(efx); |
310 | if (rc == -EINVAL) { | 296 | if (rc == -EINVAL) { |
311 | netif_err(efx, probe, efx->net_dev, | 297 | netif_err(efx, probe, efx->net_dev, |
312 | "NVRAM is invalid therefore using defaults\n"); | 298 | "NVRAM is invalid therefore using defaults\n"); |
313 | efx->phy_type = PHY_TYPE_NONE; | 299 | efx->phy_type = PHY_TYPE_NONE; |
314 | efx->mdio.prtad = MDIO_PRTAD_NONE; | 300 | efx->mdio.prtad = MDIO_PRTAD_NONE; |
315 | } else if (rc) { | 301 | } else if (rc) { |
316 | goto fail5; | 302 | goto fail5; |
317 | } | 303 | } |
318 | 304 | ||
319 | return 0; | 305 | return 0; |
320 | 306 | ||
321 | fail5: | 307 | fail5: |
322 | efx_nic_free_buffer(efx, &efx->irq_status); | 308 | efx_nic_free_buffer(efx, &efx->irq_status); |
323 | fail4: | 309 | fail4: |
324 | fail3: | 310 | fail3: |
325 | efx_mcdi_drv_attach(efx, false, NULL); | 311 | efx_mcdi_drv_attach(efx, false, NULL); |
326 | fail2: | 312 | fail2: |
327 | iounmap(nic_data->mcdi_smem); | ||
328 | fail1: | 313 | fail1: |
329 | kfree(efx->nic_data); | 314 | kfree(efx->nic_data); |
330 | return rc; | 315 | return rc; |
331 | } | 316 | } |
332 | 317 | ||
333 | /* This call performs hardware-specific global initialisation, such as | 318 | /* This call performs hardware-specific global initialisation, such as |
334 | * defining the descriptor cache sizes and number of RSS channels. | 319 | * defining the descriptor cache sizes and number of RSS channels. |
335 | * It does not set up any buffers, descriptor rings or event queues. | 320 | * It does not set up any buffers, descriptor rings or event queues. |
336 | */ | 321 | */ |
337 | static int siena_init_nic(struct efx_nic *efx) | 322 | static int siena_init_nic(struct efx_nic *efx) |
338 | { | 323 | { |
339 | efx_oword_t temp; | 324 | efx_oword_t temp; |
340 | int rc; | 325 | int rc; |
341 | 326 | ||
342 | /* Recover from a failed assertion post-reset */ | 327 | /* Recover from a failed assertion post-reset */ |
343 | rc = efx_mcdi_handle_assertion(efx); | 328 | rc = efx_mcdi_handle_assertion(efx); |
344 | if (rc) | 329 | if (rc) |
345 | return rc; | 330 | return rc; |
346 | 331 | ||
347 | /* Squash TX of packets of 16 bytes or less */ | 332 | /* Squash TX of packets of 16 bytes or less */ |
348 | efx_reado(efx, &temp, FR_AZ_TX_RESERVED); | 333 | efx_reado(efx, &temp, FR_AZ_TX_RESERVED); |
349 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); | 334 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); |
350 | efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); | 335 | efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); |
351 | 336 | ||
352 | /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16 | 337 | /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16 |
353 | * descriptors (which is bad). | 338 | * descriptors (which is bad). |
354 | */ | 339 | */ |
355 | efx_reado(efx, &temp, FR_AZ_TX_CFG); | 340 | efx_reado(efx, &temp, FR_AZ_TX_CFG); |
356 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0); | 341 | EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0); |
357 | EFX_SET_OWORD_FIELD(temp, FRF_CZ_TX_FILTER_EN_BIT, 1); | 342 | EFX_SET_OWORD_FIELD(temp, FRF_CZ_TX_FILTER_EN_BIT, 1); |
358 | efx_writeo(efx, &temp, FR_AZ_TX_CFG); | 343 | efx_writeo(efx, &temp, FR_AZ_TX_CFG); |
359 | 344 | ||
360 | efx_reado(efx, &temp, FR_AZ_RX_CFG); | 345 | efx_reado(efx, &temp, FR_AZ_RX_CFG); |
361 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_DESC_PUSH_EN, 0); | 346 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_DESC_PUSH_EN, 0); |
362 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_INGR_EN, 1); | 347 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_INGR_EN, 1); |
363 | /* Enable hash insertion. This is broken for the 'Falcon' hash | 348 | /* Enable hash insertion. This is broken for the 'Falcon' hash |
364 | * if IPv6 hashing is also enabled, so also select Toeplitz | 349 | * if IPv6 hashing is also enabled, so also select Toeplitz |
365 | * TCP/IPv4 and IPv4 hashes. */ | 350 | * TCP/IPv4 and IPv4 hashes. */ |
366 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_INSRT_HDR, 1); | 351 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_INSRT_HDR, 1); |
367 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_ALG, 1); | 352 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_ALG, 1); |
368 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_IP_HASH, 1); | 353 | EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_IP_HASH, 1); |
369 | efx_writeo(efx, &temp, FR_AZ_RX_CFG); | 354 | efx_writeo(efx, &temp, FR_AZ_RX_CFG); |
370 | 355 | ||
371 | /* Set hash key for IPv4 */ | 356 | /* Set hash key for IPv4 */ |
372 | memcpy(&temp, efx->rx_hash_key, sizeof(temp)); | 357 | memcpy(&temp, efx->rx_hash_key, sizeof(temp)); |
373 | efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY); | 358 | efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY); |
374 | 359 | ||
375 | /* Enable IPv6 RSS */ | 360 | /* Enable IPv6 RSS */ |
376 | BUILD_BUG_ON(sizeof(efx->rx_hash_key) < | 361 | BUILD_BUG_ON(sizeof(efx->rx_hash_key) < |
377 | 2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 || | 362 | 2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 || |
378 | FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0); | 363 | FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0); |
379 | memcpy(&temp, efx->rx_hash_key, sizeof(temp)); | 364 | memcpy(&temp, efx->rx_hash_key, sizeof(temp)); |
380 | efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1); | 365 | efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1); |
381 | memcpy(&temp, efx->rx_hash_key + sizeof(temp), sizeof(temp)); | 366 | memcpy(&temp, efx->rx_hash_key + sizeof(temp), sizeof(temp)); |
382 | efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2); | 367 | efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2); |
383 | EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1, | 368 | EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1, |
384 | FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1); | 369 | FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1); |
385 | memcpy(&temp, efx->rx_hash_key + 2 * sizeof(temp), | 370 | memcpy(&temp, efx->rx_hash_key + 2 * sizeof(temp), |
386 | FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8); | 371 | FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8); |
387 | efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3); | 372 | efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3); |
388 | 373 | ||
389 | /* Enable event logging */ | 374 | /* Enable event logging */ |
390 | rc = efx_mcdi_log_ctrl(efx, true, false, 0); | 375 | rc = efx_mcdi_log_ctrl(efx, true, false, 0); |
391 | if (rc) | 376 | if (rc) |
392 | return rc; | 377 | return rc; |
393 | 378 | ||
394 | /* Set destination of both TX and RX Flush events */ | 379 | /* Set destination of both TX and RX Flush events */ |
395 | EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0); | 380 | EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0); |
396 | efx_writeo(efx, &temp, FR_BZ_DP_CTRL); | 381 | efx_writeo(efx, &temp, FR_BZ_DP_CTRL); |
397 | 382 | ||
398 | EFX_POPULATE_OWORD_1(temp, FRF_CZ_USREV_DIS, 1); | 383 | EFX_POPULATE_OWORD_1(temp, FRF_CZ_USREV_DIS, 1); |
399 | efx_writeo(efx, &temp, FR_CZ_USR_EV_CFG); | 384 | efx_writeo(efx, &temp, FR_CZ_USR_EV_CFG); |
400 | 385 | ||
401 | efx_nic_init_common(efx); | 386 | efx_nic_init_common(efx); |
402 | return 0; | 387 | return 0; |
403 | } | 388 | } |
404 | 389 | ||
405 | static void siena_remove_nic(struct efx_nic *efx) | 390 | static void siena_remove_nic(struct efx_nic *efx) |
406 | { | 391 | { |
407 | struct siena_nic_data *nic_data = efx->nic_data; | ||
408 | |||
409 | efx_nic_free_buffer(efx, &efx->irq_status); | 392 | efx_nic_free_buffer(efx, &efx->irq_status); |
410 | 393 | ||
411 | siena_reset_hw(efx, RESET_TYPE_ALL); | 394 | siena_reset_hw(efx, RESET_TYPE_ALL); |
412 | 395 | ||
413 | /* Relinquish the device back to the BMC */ | 396 | /* Relinquish the device back to the BMC */ |
414 | if (efx_nic_has_mc(efx)) | 397 | if (efx_nic_has_mc(efx)) |
415 | efx_mcdi_drv_attach(efx, false, NULL); | 398 | efx_mcdi_drv_attach(efx, false, NULL); |
416 | 399 | ||
417 | /* Tear down the private nic state */ | 400 | /* Tear down the private nic state */ |
418 | iounmap(nic_data->mcdi_smem); | 401 | kfree(efx->nic_data); |
419 | kfree(nic_data); | ||
420 | efx->nic_data = NULL; | 402 | efx->nic_data = NULL; |
421 | } | 403 | } |
422 | 404 | ||
423 | #define STATS_GENERATION_INVALID ((__force __le64)(-1)) | 405 | #define STATS_GENERATION_INVALID ((__force __le64)(-1)) |
424 | 406 | ||
425 | static int siena_try_update_nic_stats(struct efx_nic *efx) | 407 | static int siena_try_update_nic_stats(struct efx_nic *efx) |
426 | { | 408 | { |
427 | __le64 *dma_stats; | 409 | __le64 *dma_stats; |
428 | struct efx_mac_stats *mac_stats; | 410 | struct efx_mac_stats *mac_stats; |
429 | __le64 generation_start, generation_end; | 411 | __le64 generation_start, generation_end; |
430 | 412 | ||
431 | mac_stats = &efx->mac_stats; | 413 | mac_stats = &efx->mac_stats; |
432 | dma_stats = efx->stats_buffer.addr; | 414 | dma_stats = efx->stats_buffer.addr; |
433 | 415 | ||
434 | generation_end = dma_stats[MC_CMD_MAC_GENERATION_END]; | 416 | generation_end = dma_stats[MC_CMD_MAC_GENERATION_END]; |
435 | if (generation_end == STATS_GENERATION_INVALID) | 417 | if (generation_end == STATS_GENERATION_INVALID) |
436 | return 0; | 418 | return 0; |
437 | rmb(); | 419 | rmb(); |
438 | 420 | ||
439 | #define MAC_STAT(M, D) \ | 421 | #define MAC_STAT(M, D) \ |
440 | mac_stats->M = le64_to_cpu(dma_stats[MC_CMD_MAC_ ## D]) | 422 | mac_stats->M = le64_to_cpu(dma_stats[MC_CMD_MAC_ ## D]) |
441 | 423 | ||
442 | MAC_STAT(tx_bytes, TX_BYTES); | 424 | MAC_STAT(tx_bytes, TX_BYTES); |
443 | MAC_STAT(tx_bad_bytes, TX_BAD_BYTES); | 425 | MAC_STAT(tx_bad_bytes, TX_BAD_BYTES); |
444 | mac_stats->tx_good_bytes = (mac_stats->tx_bytes - | 426 | mac_stats->tx_good_bytes = (mac_stats->tx_bytes - |
445 | mac_stats->tx_bad_bytes); | 427 | mac_stats->tx_bad_bytes); |
446 | MAC_STAT(tx_packets, TX_PKTS); | 428 | MAC_STAT(tx_packets, TX_PKTS); |
447 | MAC_STAT(tx_bad, TX_BAD_FCS_PKTS); | 429 | MAC_STAT(tx_bad, TX_BAD_FCS_PKTS); |
448 | MAC_STAT(tx_pause, TX_PAUSE_PKTS); | 430 | MAC_STAT(tx_pause, TX_PAUSE_PKTS); |
449 | MAC_STAT(tx_control, TX_CONTROL_PKTS); | 431 | MAC_STAT(tx_control, TX_CONTROL_PKTS); |
450 | MAC_STAT(tx_unicast, TX_UNICAST_PKTS); | 432 | MAC_STAT(tx_unicast, TX_UNICAST_PKTS); |
451 | MAC_STAT(tx_multicast, TX_MULTICAST_PKTS); | 433 | MAC_STAT(tx_multicast, TX_MULTICAST_PKTS); |
452 | MAC_STAT(tx_broadcast, TX_BROADCAST_PKTS); | 434 | MAC_STAT(tx_broadcast, TX_BROADCAST_PKTS); |
453 | MAC_STAT(tx_lt64, TX_LT64_PKTS); | 435 | MAC_STAT(tx_lt64, TX_LT64_PKTS); |
454 | MAC_STAT(tx_64, TX_64_PKTS); | 436 | MAC_STAT(tx_64, TX_64_PKTS); |
455 | MAC_STAT(tx_65_to_127, TX_65_TO_127_PKTS); | 437 | MAC_STAT(tx_65_to_127, TX_65_TO_127_PKTS); |
456 | MAC_STAT(tx_128_to_255, TX_128_TO_255_PKTS); | 438 | MAC_STAT(tx_128_to_255, TX_128_TO_255_PKTS); |
457 | MAC_STAT(tx_256_to_511, TX_256_TO_511_PKTS); | 439 | MAC_STAT(tx_256_to_511, TX_256_TO_511_PKTS); |
458 | MAC_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS); | 440 | MAC_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS); |
459 | MAC_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS); | 441 | MAC_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS); |
460 | MAC_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS); | 442 | MAC_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS); |
461 | MAC_STAT(tx_gtjumbo, TX_GTJUMBO_PKTS); | 443 | MAC_STAT(tx_gtjumbo, TX_GTJUMBO_PKTS); |
462 | mac_stats->tx_collision = 0; | 444 | mac_stats->tx_collision = 0; |
463 | MAC_STAT(tx_single_collision, TX_SINGLE_COLLISION_PKTS); | 445 | MAC_STAT(tx_single_collision, TX_SINGLE_COLLISION_PKTS); |
464 | MAC_STAT(tx_multiple_collision, TX_MULTIPLE_COLLISION_PKTS); | 446 | MAC_STAT(tx_multiple_collision, TX_MULTIPLE_COLLISION_PKTS); |
465 | MAC_STAT(tx_excessive_collision, TX_EXCESSIVE_COLLISION_PKTS); | 447 | MAC_STAT(tx_excessive_collision, TX_EXCESSIVE_COLLISION_PKTS); |
466 | MAC_STAT(tx_deferred, TX_DEFERRED_PKTS); | 448 | MAC_STAT(tx_deferred, TX_DEFERRED_PKTS); |
467 | MAC_STAT(tx_late_collision, TX_LATE_COLLISION_PKTS); | 449 | MAC_STAT(tx_late_collision, TX_LATE_COLLISION_PKTS); |
468 | mac_stats->tx_collision = (mac_stats->tx_single_collision + | 450 | mac_stats->tx_collision = (mac_stats->tx_single_collision + |
469 | mac_stats->tx_multiple_collision + | 451 | mac_stats->tx_multiple_collision + |
470 | mac_stats->tx_excessive_collision + | 452 | mac_stats->tx_excessive_collision + |
471 | mac_stats->tx_late_collision); | 453 | mac_stats->tx_late_collision); |
472 | MAC_STAT(tx_excessive_deferred, TX_EXCESSIVE_DEFERRED_PKTS); | 454 | MAC_STAT(tx_excessive_deferred, TX_EXCESSIVE_DEFERRED_PKTS); |
473 | MAC_STAT(tx_non_tcpudp, TX_NON_TCPUDP_PKTS); | 455 | MAC_STAT(tx_non_tcpudp, TX_NON_TCPUDP_PKTS); |
474 | MAC_STAT(tx_mac_src_error, TX_MAC_SRC_ERR_PKTS); | 456 | MAC_STAT(tx_mac_src_error, TX_MAC_SRC_ERR_PKTS); |
475 | MAC_STAT(tx_ip_src_error, TX_IP_SRC_ERR_PKTS); | 457 | MAC_STAT(tx_ip_src_error, TX_IP_SRC_ERR_PKTS); |
476 | MAC_STAT(rx_bytes, RX_BYTES); | 458 | MAC_STAT(rx_bytes, RX_BYTES); |
477 | MAC_STAT(rx_bad_bytes, RX_BAD_BYTES); | 459 | MAC_STAT(rx_bad_bytes, RX_BAD_BYTES); |
478 | mac_stats->rx_good_bytes = (mac_stats->rx_bytes - | 460 | mac_stats->rx_good_bytes = (mac_stats->rx_bytes - |
479 | mac_stats->rx_bad_bytes); | 461 | mac_stats->rx_bad_bytes); |
480 | MAC_STAT(rx_packets, RX_PKTS); | 462 | MAC_STAT(rx_packets, RX_PKTS); |
481 | MAC_STAT(rx_good, RX_GOOD_PKTS); | 463 | MAC_STAT(rx_good, RX_GOOD_PKTS); |
482 | MAC_STAT(rx_bad, RX_BAD_FCS_PKTS); | 464 | MAC_STAT(rx_bad, RX_BAD_FCS_PKTS); |
483 | MAC_STAT(rx_pause, RX_PAUSE_PKTS); | 465 | MAC_STAT(rx_pause, RX_PAUSE_PKTS); |
484 | MAC_STAT(rx_control, RX_CONTROL_PKTS); | 466 | MAC_STAT(rx_control, RX_CONTROL_PKTS); |
485 | MAC_STAT(rx_unicast, RX_UNICAST_PKTS); | 467 | MAC_STAT(rx_unicast, RX_UNICAST_PKTS); |
486 | MAC_STAT(rx_multicast, RX_MULTICAST_PKTS); | 468 | MAC_STAT(rx_multicast, RX_MULTICAST_PKTS); |
487 | MAC_STAT(rx_broadcast, RX_BROADCAST_PKTS); | 469 | MAC_STAT(rx_broadcast, RX_BROADCAST_PKTS); |
488 | MAC_STAT(rx_lt64, RX_UNDERSIZE_PKTS); | 470 | MAC_STAT(rx_lt64, RX_UNDERSIZE_PKTS); |
489 | MAC_STAT(rx_64, RX_64_PKTS); | 471 | MAC_STAT(rx_64, RX_64_PKTS); |
490 | MAC_STAT(rx_65_to_127, RX_65_TO_127_PKTS); | 472 | MAC_STAT(rx_65_to_127, RX_65_TO_127_PKTS); |
491 | MAC_STAT(rx_128_to_255, RX_128_TO_255_PKTS); | 473 | MAC_STAT(rx_128_to_255, RX_128_TO_255_PKTS); |
492 | MAC_STAT(rx_256_to_511, RX_256_TO_511_PKTS); | 474 | MAC_STAT(rx_256_to_511, RX_256_TO_511_PKTS); |
493 | MAC_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS); | 475 | MAC_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS); |
494 | MAC_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS); | 476 | MAC_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS); |
495 | MAC_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS); | 477 | MAC_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS); |
496 | MAC_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS); | 478 | MAC_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS); |
497 | mac_stats->rx_bad_lt64 = 0; | 479 | mac_stats->rx_bad_lt64 = 0; |
498 | mac_stats->rx_bad_64_to_15xx = 0; | 480 | mac_stats->rx_bad_64_to_15xx = 0; |
499 | mac_stats->rx_bad_15xx_to_jumbo = 0; | 481 | mac_stats->rx_bad_15xx_to_jumbo = 0; |
500 | MAC_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS); | 482 | MAC_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS); |
501 | MAC_STAT(rx_overflow, RX_OVERFLOW_PKTS); | 483 | MAC_STAT(rx_overflow, RX_OVERFLOW_PKTS); |
502 | mac_stats->rx_missed = 0; | 484 | mac_stats->rx_missed = 0; |
503 | MAC_STAT(rx_false_carrier, RX_FALSE_CARRIER_PKTS); | 485 | MAC_STAT(rx_false_carrier, RX_FALSE_CARRIER_PKTS); |
504 | MAC_STAT(rx_symbol_error, RX_SYMBOL_ERROR_PKTS); | 486 | MAC_STAT(rx_symbol_error, RX_SYMBOL_ERROR_PKTS); |
505 | MAC_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS); | 487 | MAC_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS); |
506 | MAC_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS); | 488 | MAC_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS); |
507 | MAC_STAT(rx_internal_error, RX_INTERNAL_ERROR_PKTS); | 489 | MAC_STAT(rx_internal_error, RX_INTERNAL_ERROR_PKTS); |
508 | mac_stats->rx_good_lt64 = 0; | 490 | mac_stats->rx_good_lt64 = 0; |
509 | 491 | ||
510 | efx->n_rx_nodesc_drop_cnt = | 492 | efx->n_rx_nodesc_drop_cnt = |
511 | le64_to_cpu(dma_stats[MC_CMD_MAC_RX_NODESC_DROPS]); | 493 | le64_to_cpu(dma_stats[MC_CMD_MAC_RX_NODESC_DROPS]); |
512 | 494 | ||
513 | #undef MAC_STAT | 495 | #undef MAC_STAT |
514 | 496 | ||
515 | rmb(); | 497 | rmb(); |
516 | generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; | 498 | generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; |
517 | if (generation_end != generation_start) | 499 | if (generation_end != generation_start) |
518 | return -EAGAIN; | 500 | return -EAGAIN; |
519 | 501 | ||
520 | return 0; | 502 | return 0; |
521 | } | 503 | } |
522 | 504 | ||
523 | static void siena_update_nic_stats(struct efx_nic *efx) | 505 | static void siena_update_nic_stats(struct efx_nic *efx) |
524 | { | 506 | { |
525 | int retry; | 507 | int retry; |
526 | 508 | ||
527 | /* If we're unlucky enough to read statistics wduring the DMA, wait | 509 | /* If we're unlucky enough to read statistics wduring the DMA, wait |
528 | * up to 10ms for it to finish (typically takes <500us) */ | 510 | * up to 10ms for it to finish (typically takes <500us) */ |
529 | for (retry = 0; retry < 100; ++retry) { | 511 | for (retry = 0; retry < 100; ++retry) { |
530 | if (siena_try_update_nic_stats(efx) == 0) | 512 | if (siena_try_update_nic_stats(efx) == 0) |
531 | return; | 513 | return; |
532 | udelay(100); | 514 | udelay(100); |
533 | } | 515 | } |
534 | 516 | ||
535 | /* Use the old values instead */ | 517 | /* Use the old values instead */ |
536 | } | 518 | } |
537 | 519 | ||
538 | static void siena_start_nic_stats(struct efx_nic *efx) | 520 | static void siena_start_nic_stats(struct efx_nic *efx) |
539 | { | 521 | { |
540 | __le64 *dma_stats = efx->stats_buffer.addr; | 522 | __le64 *dma_stats = efx->stats_buffer.addr; |
541 | 523 | ||
542 | dma_stats[MC_CMD_MAC_GENERATION_END] = STATS_GENERATION_INVALID; | 524 | dma_stats[MC_CMD_MAC_GENERATION_END] = STATS_GENERATION_INVALID; |
543 | 525 | ||
544 | efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, | 526 | efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, |
545 | MC_CMD_MAC_NSTATS * sizeof(u64), 1, 0); | 527 | MC_CMD_MAC_NSTATS * sizeof(u64), 1, 0); |
546 | } | 528 | } |
547 | 529 | ||
548 | static void siena_stop_nic_stats(struct efx_nic *efx) | 530 | static void siena_stop_nic_stats(struct efx_nic *efx) |
549 | { | 531 | { |
550 | efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0); | 532 | efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0); |
551 | } | 533 | } |
552 | 534 | ||
553 | /************************************************************************** | 535 | /************************************************************************** |
554 | * | 536 | * |
555 | * Wake on LAN | 537 | * Wake on LAN |
556 | * | 538 | * |
557 | ************************************************************************** | 539 | ************************************************************************** |
558 | */ | 540 | */ |
559 | 541 | ||
560 | static void siena_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol) | 542 | static void siena_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol) |
561 | { | 543 | { |
562 | struct siena_nic_data *nic_data = efx->nic_data; | 544 | struct siena_nic_data *nic_data = efx->nic_data; |
563 | 545 | ||
564 | wol->supported = WAKE_MAGIC; | 546 | wol->supported = WAKE_MAGIC; |
565 | if (nic_data->wol_filter_id != -1) | 547 | if (nic_data->wol_filter_id != -1) |
566 | wol->wolopts = WAKE_MAGIC; | 548 | wol->wolopts = WAKE_MAGIC; |
567 | else | 549 | else |
568 | wol->wolopts = 0; | 550 | wol->wolopts = 0; |
569 | memset(&wol->sopass, 0, sizeof(wol->sopass)); | 551 | memset(&wol->sopass, 0, sizeof(wol->sopass)); |
570 | } | 552 | } |
571 | 553 | ||
572 | 554 | ||
573 | static int siena_set_wol(struct efx_nic *efx, u32 type) | 555 | static int siena_set_wol(struct efx_nic *efx, u32 type) |
574 | { | 556 | { |
575 | struct siena_nic_data *nic_data = efx->nic_data; | 557 | struct siena_nic_data *nic_data = efx->nic_data; |
576 | int rc; | 558 | int rc; |
577 | 559 | ||
578 | if (type & ~WAKE_MAGIC) | 560 | if (type & ~WAKE_MAGIC) |
579 | return -EINVAL; | 561 | return -EINVAL; |
580 | 562 | ||
581 | if (type & WAKE_MAGIC) { | 563 | if (type & WAKE_MAGIC) { |
582 | if (nic_data->wol_filter_id != -1) | 564 | if (nic_data->wol_filter_id != -1) |
583 | efx_mcdi_wol_filter_remove(efx, | 565 | efx_mcdi_wol_filter_remove(efx, |
584 | nic_data->wol_filter_id); | 566 | nic_data->wol_filter_id); |
585 | rc = efx_mcdi_wol_filter_set_magic(efx, efx->net_dev->dev_addr, | 567 | rc = efx_mcdi_wol_filter_set_magic(efx, efx->net_dev->dev_addr, |
586 | &nic_data->wol_filter_id); | 568 | &nic_data->wol_filter_id); |
587 | if (rc) | 569 | if (rc) |
588 | goto fail; | 570 | goto fail; |
589 | 571 | ||
590 | pci_wake_from_d3(efx->pci_dev, true); | 572 | pci_wake_from_d3(efx->pci_dev, true); |
591 | } else { | 573 | } else { |
592 | rc = efx_mcdi_wol_filter_reset(efx); | 574 | rc = efx_mcdi_wol_filter_reset(efx); |
593 | nic_data->wol_filter_id = -1; | 575 | nic_data->wol_filter_id = -1; |
594 | pci_wake_from_d3(efx->pci_dev, false); | 576 | pci_wake_from_d3(efx->pci_dev, false); |
595 | if (rc) | 577 | if (rc) |
596 | goto fail; | 578 | goto fail; |
597 | } | 579 | } |
598 | 580 | ||
599 | return 0; | 581 | return 0; |
600 | fail: | 582 | fail: |
601 | netif_err(efx, hw, efx->net_dev, "%s failed: type=%d rc=%d\n", | 583 | netif_err(efx, hw, efx->net_dev, "%s failed: type=%d rc=%d\n", |
602 | __func__, type, rc); | 584 | __func__, type, rc); |
603 | return rc; | 585 | return rc; |
604 | } | 586 | } |
605 | 587 | ||
606 | 588 | ||
607 | static void siena_init_wol(struct efx_nic *efx) | 589 | static void siena_init_wol(struct efx_nic *efx) |
608 | { | 590 | { |
609 | struct siena_nic_data *nic_data = efx->nic_data; | 591 | struct siena_nic_data *nic_data = efx->nic_data; |
610 | int rc; | 592 | int rc; |
611 | 593 | ||
612 | rc = efx_mcdi_wol_filter_get_magic(efx, &nic_data->wol_filter_id); | 594 | rc = efx_mcdi_wol_filter_get_magic(efx, &nic_data->wol_filter_id); |
613 | 595 | ||
614 | if (rc != 0) { | 596 | if (rc != 0) { |
615 | /* If it failed, attempt to get into a synchronised | 597 | /* If it failed, attempt to get into a synchronised |
616 | * state with MC by resetting any set WoL filters */ | 598 | * state with MC by resetting any set WoL filters */ |
617 | efx_mcdi_wol_filter_reset(efx); | 599 | efx_mcdi_wol_filter_reset(efx); |
618 | nic_data->wol_filter_id = -1; | 600 | nic_data->wol_filter_id = -1; |
619 | } else if (nic_data->wol_filter_id != -1) { | 601 | } else if (nic_data->wol_filter_id != -1) { |
620 | pci_wake_from_d3(efx->pci_dev, true); | 602 | pci_wake_from_d3(efx->pci_dev, true); |
621 | } | 603 | } |
622 | } | 604 | } |
623 | 605 | ||
624 | 606 | ||
625 | /************************************************************************** | 607 | /************************************************************************** |
626 | * | 608 | * |
627 | * Revision-dependent attributes used by efx.c and nic.c | 609 | * Revision-dependent attributes used by efx.c and nic.c |
628 | * | 610 | * |
629 | ************************************************************************** | 611 | ************************************************************************** |
630 | */ | 612 | */ |
631 | 613 | ||
632 | const struct efx_nic_type siena_a0_nic_type = { | 614 | const struct efx_nic_type siena_a0_nic_type = { |
633 | .probe = siena_probe_nic, | 615 | .probe = siena_probe_nic, |
634 | .remove = siena_remove_nic, | 616 | .remove = siena_remove_nic, |
635 | .init = siena_init_nic, | 617 | .init = siena_init_nic, |
636 | .fini = efx_port_dummy_op_void, | 618 | .fini = efx_port_dummy_op_void, |
637 | .monitor = NULL, | 619 | .monitor = NULL, |
638 | .map_reset_reason = siena_map_reset_reason, | 620 | .map_reset_reason = siena_map_reset_reason, |
639 | .map_reset_flags = siena_map_reset_flags, | 621 | .map_reset_flags = siena_map_reset_flags, |
640 | .reset = siena_reset_hw, | 622 | .reset = siena_reset_hw, |
641 | .probe_port = siena_probe_port, | 623 | .probe_port = siena_probe_port, |
642 | .remove_port = siena_remove_port, | 624 | .remove_port = siena_remove_port, |
643 | .prepare_flush = efx_port_dummy_op_void, | 625 | .prepare_flush = efx_port_dummy_op_void, |
644 | .update_stats = siena_update_nic_stats, | 626 | .update_stats = siena_update_nic_stats, |
645 | .start_stats = siena_start_nic_stats, | 627 | .start_stats = siena_start_nic_stats, |
646 | .stop_stats = siena_stop_nic_stats, | 628 | .stop_stats = siena_stop_nic_stats, |
647 | .set_id_led = efx_mcdi_set_id_led, | 629 | .set_id_led = efx_mcdi_set_id_led, |
648 | .push_irq_moderation = siena_push_irq_moderation, | 630 | .push_irq_moderation = siena_push_irq_moderation, |
649 | .push_multicast_hash = siena_push_multicast_hash, | 631 | .push_multicast_hash = siena_push_multicast_hash, |
650 | .reconfigure_port = efx_mcdi_phy_reconfigure, | 632 | .reconfigure_port = efx_mcdi_phy_reconfigure, |
651 | .get_wol = siena_get_wol, | 633 | .get_wol = siena_get_wol, |
652 | .set_wol = siena_set_wol, | 634 | .set_wol = siena_set_wol, |
653 | .resume_wol = siena_init_wol, | 635 | .resume_wol = siena_init_wol, |
654 | .test_registers = siena_test_registers, | 636 | .test_registers = siena_test_registers, |
655 | .test_nvram = efx_mcdi_nvram_test_all, | 637 | .test_nvram = efx_mcdi_nvram_test_all, |
656 | .default_mac_ops = &efx_mcdi_mac_operations, | 638 | .default_mac_ops = &efx_mcdi_mac_operations, |
657 | 639 | ||
658 | .revision = EFX_REV_SIENA_A0, | 640 | .revision = EFX_REV_SIENA_A0, |
659 | .mem_map_size = FR_CZ_MC_TREG_SMEM, /* MC_TREG_SMEM mapped separately */ | 641 | .mem_map_size = (FR_CZ_MC_TREG_SMEM + |
642 | FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS), | ||
660 | .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, | 643 | .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, |
661 | .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, | 644 | .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, |
662 | .buf_tbl_base = FR_BZ_BUF_FULL_TBL, | 645 | .buf_tbl_base = FR_BZ_BUF_FULL_TBL, |
663 | .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL, | 646 | .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL, |
664 | .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR, | 647 | .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR, |
665 | .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), | 648 | .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), |
666 | .rx_buffer_hash_size = 0x10, | 649 | .rx_buffer_hash_size = 0x10, |
667 | .rx_buffer_padding = 0, | 650 | .rx_buffer_padding = 0, |
668 | .max_interrupt_mode = EFX_INT_MODE_MSIX, | 651 | .max_interrupt_mode = EFX_INT_MODE_MSIX, |
669 | .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy | 652 | .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy |
670 | * interrupt handler only supports 32 | 653 | * interrupt handler only supports 32 |
671 | * channels */ | 654 | * channels */ |
672 | .tx_dc_base = 0x88000, | 655 | .tx_dc_base = 0x88000, |
673 | .rx_dc_base = 0x68000, | 656 | .rx_dc_base = 0x68000, |
674 | .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | | 657 | .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | |
675 | NETIF_F_RXHASH | NETIF_F_NTUPLE), | 658 | NETIF_F_RXHASH | NETIF_F_NTUPLE), |
676 | }; | 659 | }; |
drivers/net/sfc/workarounds.h
1 | /**************************************************************************** | 1 | /**************************************************************************** |
2 | * Driver for Solarflare Solarstorm network controllers and boards | 2 | * Driver for Solarflare Solarstorm network controllers and boards |
3 | * Copyright 2006-2010 Solarflare Communications Inc. | 3 | * Copyright 2006-2010 Solarflare Communications Inc. |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify it | 5 | * This program is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 as published | 6 | * under the terms of the GNU General Public License version 2 as published |
7 | * by the Free Software Foundation, incorporated herein by reference. | 7 | * by the Free Software Foundation, incorporated herein by reference. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #ifndef EFX_WORKAROUNDS_H | 10 | #ifndef EFX_WORKAROUNDS_H |
11 | #define EFX_WORKAROUNDS_H | 11 | #define EFX_WORKAROUNDS_H |
12 | 12 | ||
13 | /* | 13 | /* |
14 | * Hardware workarounds. | 14 | * Hardware workarounds. |
15 | * Bug numbers are from Solarflare's Bugzilla. | 15 | * Bug numbers are from Solarflare's Bugzilla. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #define EFX_WORKAROUND_ALWAYS(efx) 1 | 18 | #define EFX_WORKAROUND_ALWAYS(efx) 1 |
19 | #define EFX_WORKAROUND_FALCON_A(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) | 19 | #define EFX_WORKAROUND_FALCON_A(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) |
20 | #define EFX_WORKAROUND_FALCON_AB(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_B0) | 20 | #define EFX_WORKAROUND_FALCON_AB(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_B0) |
21 | #define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0) | 21 | #define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0) |
22 | #define EFX_WORKAROUND_10G(efx) 1 | 22 | #define EFX_WORKAROUND_10G(efx) 1 |
23 | 23 | ||
24 | /* XAUI resets if link not detected */ | 24 | /* XAUI resets if link not detected */ |
25 | #define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS | 25 | #define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS |
26 | /* RX PCIe double split performance issue */ | 26 | /* RX PCIe double split performance issue */ |
27 | #define EFX_WORKAROUND_7575 EFX_WORKAROUND_ALWAYS | 27 | #define EFX_WORKAROUND_7575 EFX_WORKAROUND_ALWAYS |
28 | /* Bit-bashed I2C reads cause performance drop */ | 28 | /* Bit-bashed I2C reads cause performance drop */ |
29 | #define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G | 29 | #define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G |
30 | /* TX_EV_PKT_ERR can be caused by a dangling TX descriptor | 30 | /* TX_EV_PKT_ERR can be caused by a dangling TX descriptor |
31 | * or a PCIe error (bug 11028) */ | 31 | * or a PCIe error (bug 11028) */ |
32 | #define EFX_WORKAROUND_10727 EFX_WORKAROUND_ALWAYS | 32 | #define EFX_WORKAROUND_10727 EFX_WORKAROUND_ALWAYS |
33 | /* Transmit flow control may get disabled */ | 33 | /* Transmit flow control may get disabled */ |
34 | #define EFX_WORKAROUND_11482 EFX_WORKAROUND_FALCON_AB | 34 | #define EFX_WORKAROUND_11482 EFX_WORKAROUND_FALCON_AB |
35 | /* Truncated IPv4 packets can confuse the TX packet parser */ | 35 | /* Truncated IPv4 packets can confuse the TX packet parser */ |
36 | #define EFX_WORKAROUND_15592 EFX_WORKAROUND_FALCON_AB | 36 | #define EFX_WORKAROUND_15592 EFX_WORKAROUND_FALCON_AB |
37 | /* Legacy ISR read can return zero once */ | 37 | /* Legacy ISR read can return zero once */ |
38 | #define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS | 38 | #define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS |
39 | /* Legacy interrupt storm when interrupt fifo fills */ | 39 | /* Legacy interrupt storm when interrupt fifo fills */ |
40 | #define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA | 40 | #define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA |
41 | /* Write combining and sriov=enabled are incompatible */ | ||
42 | #define EFX_WORKAROUND_22643 EFX_WORKAROUND_SIENA | ||
43 | 41 | ||
44 | /* Spurious parity errors in TSORT buffers */ | 42 | /* Spurious parity errors in TSORT buffers */ |
45 | #define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A | 43 | #define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A |
46 | /* Unaligned read request >512 bytes after aligning may break TSORT */ | 44 | /* Unaligned read request >512 bytes after aligning may break TSORT */ |
47 | #define EFX_WORKAROUND_5391 EFX_WORKAROUND_FALCON_A | 45 | #define EFX_WORKAROUND_5391 EFX_WORKAROUND_FALCON_A |
48 | /* iSCSI parsing errors */ | 46 | /* iSCSI parsing errors */ |
49 | #define EFX_WORKAROUND_5583 EFX_WORKAROUND_FALCON_A | 47 | #define EFX_WORKAROUND_5583 EFX_WORKAROUND_FALCON_A |
50 | /* RX events go missing */ | 48 | /* RX events go missing */ |
51 | #define EFX_WORKAROUND_5676 EFX_WORKAROUND_FALCON_A | 49 | #define EFX_WORKAROUND_5676 EFX_WORKAROUND_FALCON_A |
52 | /* RX_RESET on A1 */ | 50 | /* RX_RESET on A1 */ |
53 | #define EFX_WORKAROUND_6555 EFX_WORKAROUND_FALCON_A | 51 | #define EFX_WORKAROUND_6555 EFX_WORKAROUND_FALCON_A |
54 | /* Increase filter depth to avoid RX_RESET */ | 52 | /* Increase filter depth to avoid RX_RESET */ |
55 | #define EFX_WORKAROUND_7244 EFX_WORKAROUND_FALCON_A | 53 | #define EFX_WORKAROUND_7244 EFX_WORKAROUND_FALCON_A |
56 | /* Flushes may never complete */ | 54 | /* Flushes may never complete */ |
57 | #define EFX_WORKAROUND_7803 EFX_WORKAROUND_FALCON_AB | 55 | #define EFX_WORKAROUND_7803 EFX_WORKAROUND_FALCON_AB |
58 | /* Leak overlength packets rather than free */ | 56 | /* Leak overlength packets rather than free */ |
59 | #define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A | 57 | #define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A |
60 | 58 | ||
61 | #endif /* EFX_WORKAROUNDS_H */ | 59 | #endif /* EFX_WORKAROUNDS_H */ |
62 | 60 |