Commit 6602041b8343fcdb6433cc72ca6cb6c2e189da6d

Authored by Ben Hutchings
Committed by David S. Miller
1 parent 503b47eafc

sfc: Store port number in private data, not net_device::dev_id

We should not use net_device::dev_id to indicate the port number, as
this affects the way the local part of IPv6 addresses is normally
generated.

This field was intended for use where multiple devices may share a
single assigned MAC address and need to have different IPv6 addresses.
Siena's two ports each have their own MAC addresses.

Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

Showing 2 changed files with 3 additions and 2 deletions Inline Diff

drivers/net/ethernet/sfc/net_driver.h
1 /**************************************************************************** 1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2011 Solarflare Communications Inc. 4 * Copyright 2005-2011 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference. 8 * by the Free Software Foundation, incorporated herein by reference.
9 */ 9 */
10 10
11 /* Common definitions for all Efx net driver code */ 11 /* Common definitions for all Efx net driver code */
12 12
13 #ifndef EFX_NET_DRIVER_H 13 #ifndef EFX_NET_DRIVER_H
14 #define EFX_NET_DRIVER_H 14 #define EFX_NET_DRIVER_H
15 15
16 #include <linux/netdevice.h> 16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h> 17 #include <linux/etherdevice.h>
18 #include <linux/ethtool.h> 18 #include <linux/ethtool.h>
19 #include <linux/if_vlan.h> 19 #include <linux/if_vlan.h>
20 #include <linux/timer.h> 20 #include <linux/timer.h>
21 #include <linux/mdio.h> 21 #include <linux/mdio.h>
22 #include <linux/list.h> 22 #include <linux/list.h>
23 #include <linux/pci.h> 23 #include <linux/pci.h>
24 #include <linux/device.h> 24 #include <linux/device.h>
25 #include <linux/highmem.h> 25 #include <linux/highmem.h>
26 #include <linux/workqueue.h> 26 #include <linux/workqueue.h>
27 #include <linux/mutex.h> 27 #include <linux/mutex.h>
28 #include <linux/vmalloc.h> 28 #include <linux/vmalloc.h>
29 #include <linux/i2c.h> 29 #include <linux/i2c.h>
30 30
31 #include "enum.h" 31 #include "enum.h"
32 #include "bitfield.h" 32 #include "bitfield.h"
33 33
34 /************************************************************************** 34 /**************************************************************************
35 * 35 *
36 * Build definitions 36 * Build definitions
37 * 37 *
38 **************************************************************************/ 38 **************************************************************************/
39 39
40 #define EFX_DRIVER_VERSION "3.2" 40 #define EFX_DRIVER_VERSION "3.2"
41 41
42 #ifdef DEBUG 42 #ifdef DEBUG
43 #define EFX_BUG_ON_PARANOID(x) BUG_ON(x) 43 #define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
44 #define EFX_WARN_ON_PARANOID(x) WARN_ON(x) 44 #define EFX_WARN_ON_PARANOID(x) WARN_ON(x)
45 #else 45 #else
46 #define EFX_BUG_ON_PARANOID(x) do {} while (0) 46 #define EFX_BUG_ON_PARANOID(x) do {} while (0)
47 #define EFX_WARN_ON_PARANOID(x) do {} while (0) 47 #define EFX_WARN_ON_PARANOID(x) do {} while (0)
48 #endif 48 #endif
49 49
50 /************************************************************************** 50 /**************************************************************************
51 * 51 *
52 * Efx data structures 52 * Efx data structures
53 * 53 *
54 **************************************************************************/ 54 **************************************************************************/
55 55
56 #define EFX_MAX_CHANNELS 32U 56 #define EFX_MAX_CHANNELS 32U
57 #define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS 57 #define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS
58 #define EFX_EXTRA_CHANNEL_IOV 0 58 #define EFX_EXTRA_CHANNEL_IOV 0
59 #define EFX_EXTRA_CHANNEL_PTP 1 59 #define EFX_EXTRA_CHANNEL_PTP 1
60 #define EFX_MAX_EXTRA_CHANNELS 2U 60 #define EFX_MAX_EXTRA_CHANNELS 2U
61 61
62 /* Checksum generation is a per-queue option in hardware, so each 62 /* Checksum generation is a per-queue option in hardware, so each
63 * queue visible to the networking core is backed by two hardware TX 63 * queue visible to the networking core is backed by two hardware TX
64 * queues. */ 64 * queues. */
65 #define EFX_MAX_TX_TC 2 65 #define EFX_MAX_TX_TC 2
66 #define EFX_MAX_CORE_TX_QUEUES (EFX_MAX_TX_TC * EFX_MAX_CHANNELS) 66 #define EFX_MAX_CORE_TX_QUEUES (EFX_MAX_TX_TC * EFX_MAX_CHANNELS)
67 #define EFX_TXQ_TYPE_OFFLOAD 1 /* flag */ 67 #define EFX_TXQ_TYPE_OFFLOAD 1 /* flag */
68 #define EFX_TXQ_TYPE_HIGHPRI 2 /* flag */ 68 #define EFX_TXQ_TYPE_HIGHPRI 2 /* flag */
69 #define EFX_TXQ_TYPES 4 69 #define EFX_TXQ_TYPES 4
70 #define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS) 70 #define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS)
71 71
72 /* Maximum possible MTU the driver supports */ 72 /* Maximum possible MTU the driver supports */
73 #define EFX_MAX_MTU (9 * 1024) 73 #define EFX_MAX_MTU (9 * 1024)
74 74
75 /* Size of an RX scatter buffer. Small enough to pack 2 into a 4K page, 75 /* Size of an RX scatter buffer. Small enough to pack 2 into a 4K page,
76 * and should be a multiple of the cache line size. 76 * and should be a multiple of the cache line size.
77 */ 77 */
78 #define EFX_RX_USR_BUF_SIZE (2048 - 256) 78 #define EFX_RX_USR_BUF_SIZE (2048 - 256)
79 79
80 /* If possible, we should ensure cache line alignment at start and end 80 /* If possible, we should ensure cache line alignment at start and end
81 * of every buffer. Otherwise, we just need to ensure 4-byte 81 * of every buffer. Otherwise, we just need to ensure 4-byte
82 * alignment of the network header. 82 * alignment of the network header.
83 */ 83 */
84 #if NET_IP_ALIGN == 0 84 #if NET_IP_ALIGN == 0
85 #define EFX_RX_BUF_ALIGNMENT L1_CACHE_BYTES 85 #define EFX_RX_BUF_ALIGNMENT L1_CACHE_BYTES
86 #else 86 #else
87 #define EFX_RX_BUF_ALIGNMENT 4 87 #define EFX_RX_BUF_ALIGNMENT 4
88 #endif 88 #endif
89 89
90 /* Forward declare Precision Time Protocol (PTP) support structure. */ 90 /* Forward declare Precision Time Protocol (PTP) support structure. */
91 struct efx_ptp_data; 91 struct efx_ptp_data;
92 92
93 struct efx_self_tests; 93 struct efx_self_tests;
94 94
95 /** 95 /**
96 * struct efx_special_buffer - An Efx special buffer 96 * struct efx_special_buffer - An Efx special buffer
97 * @addr: CPU base address of the buffer 97 * @addr: CPU base address of the buffer
98 * @dma_addr: DMA base address of the buffer 98 * @dma_addr: DMA base address of the buffer
99 * @len: Buffer length, in bytes 99 * @len: Buffer length, in bytes
100 * @index: Buffer index within controller;s buffer table 100 * @index: Buffer index within controller;s buffer table
101 * @entries: Number of buffer table entries 101 * @entries: Number of buffer table entries
102 * 102 *
103 * Special buffers are used for the event queues and the TX and RX 103 * Special buffers are used for the event queues and the TX and RX
104 * descriptor queues for each channel. They are *not* used for the 104 * descriptor queues for each channel. They are *not* used for the
105 * actual transmit and receive buffers. 105 * actual transmit and receive buffers.
106 */ 106 */
107 struct efx_special_buffer { 107 struct efx_special_buffer {
108 void *addr; 108 void *addr;
109 dma_addr_t dma_addr; 109 dma_addr_t dma_addr;
110 unsigned int len; 110 unsigned int len;
111 unsigned int index; 111 unsigned int index;
112 unsigned int entries; 112 unsigned int entries;
113 }; 113 };
114 114
115 /** 115 /**
116 * struct efx_tx_buffer - buffer state for a TX descriptor 116 * struct efx_tx_buffer - buffer state for a TX descriptor
117 * @skb: When @flags & %EFX_TX_BUF_SKB, the associated socket buffer to be 117 * @skb: When @flags & %EFX_TX_BUF_SKB, the associated socket buffer to be
118 * freed when descriptor completes 118 * freed when descriptor completes
119 * @heap_buf: When @flags & %EFX_TX_BUF_HEAP, the associated heap buffer to be 119 * @heap_buf: When @flags & %EFX_TX_BUF_HEAP, the associated heap buffer to be
120 * freed when descriptor completes. 120 * freed when descriptor completes.
121 * @dma_addr: DMA address of the fragment. 121 * @dma_addr: DMA address of the fragment.
122 * @flags: Flags for allocation and DMA mapping type 122 * @flags: Flags for allocation and DMA mapping type
123 * @len: Length of this fragment. 123 * @len: Length of this fragment.
124 * This field is zero when the queue slot is empty. 124 * This field is zero when the queue slot is empty.
125 * @unmap_len: Length of this fragment to unmap 125 * @unmap_len: Length of this fragment to unmap
126 */ 126 */
127 struct efx_tx_buffer { 127 struct efx_tx_buffer {
128 union { 128 union {
129 const struct sk_buff *skb; 129 const struct sk_buff *skb;
130 void *heap_buf; 130 void *heap_buf;
131 }; 131 };
132 dma_addr_t dma_addr; 132 dma_addr_t dma_addr;
133 unsigned short flags; 133 unsigned short flags;
134 unsigned short len; 134 unsigned short len;
135 unsigned short unmap_len; 135 unsigned short unmap_len;
136 }; 136 };
137 #define EFX_TX_BUF_CONT 1 /* not last descriptor of packet */ 137 #define EFX_TX_BUF_CONT 1 /* not last descriptor of packet */
138 #define EFX_TX_BUF_SKB 2 /* buffer is last part of skb */ 138 #define EFX_TX_BUF_SKB 2 /* buffer is last part of skb */
139 #define EFX_TX_BUF_HEAP 4 /* buffer was allocated with kmalloc() */ 139 #define EFX_TX_BUF_HEAP 4 /* buffer was allocated with kmalloc() */
140 #define EFX_TX_BUF_MAP_SINGLE 8 /* buffer was mapped with dma_map_single() */ 140 #define EFX_TX_BUF_MAP_SINGLE 8 /* buffer was mapped with dma_map_single() */
141 141
142 /** 142 /**
143 * struct efx_tx_queue - An Efx TX queue 143 * struct efx_tx_queue - An Efx TX queue
144 * 144 *
145 * This is a ring buffer of TX fragments. 145 * This is a ring buffer of TX fragments.
146 * Since the TX completion path always executes on the same 146 * Since the TX completion path always executes on the same
147 * CPU and the xmit path can operate on different CPUs, 147 * CPU and the xmit path can operate on different CPUs,
148 * performance is increased by ensuring that the completion 148 * performance is increased by ensuring that the completion
149 * path and the xmit path operate on different cache lines. 149 * path and the xmit path operate on different cache lines.
150 * This is particularly important if the xmit path is always 150 * This is particularly important if the xmit path is always
151 * executing on one CPU which is different from the completion 151 * executing on one CPU which is different from the completion
152 * path. There is also a cache line for members which are 152 * path. There is also a cache line for members which are
153 * read but not written on the fast path. 153 * read but not written on the fast path.
154 * 154 *
155 * @efx: The associated Efx NIC 155 * @efx: The associated Efx NIC
156 * @queue: DMA queue number 156 * @queue: DMA queue number
157 * @channel: The associated channel 157 * @channel: The associated channel
158 * @core_txq: The networking core TX queue structure 158 * @core_txq: The networking core TX queue structure
159 * @buffer: The software buffer ring 159 * @buffer: The software buffer ring
160 * @tsoh_page: Array of pages of TSO header buffers 160 * @tsoh_page: Array of pages of TSO header buffers
161 * @txd: The hardware descriptor ring 161 * @txd: The hardware descriptor ring
162 * @ptr_mask: The size of the ring minus 1. 162 * @ptr_mask: The size of the ring minus 1.
163 * @initialised: Has hardware queue been initialised? 163 * @initialised: Has hardware queue been initialised?
164 * @read_count: Current read pointer. 164 * @read_count: Current read pointer.
165 * This is the number of buffers that have been removed from both rings. 165 * This is the number of buffers that have been removed from both rings.
166 * @old_write_count: The value of @write_count when last checked. 166 * @old_write_count: The value of @write_count when last checked.
167 * This is here for performance reasons. The xmit path will 167 * This is here for performance reasons. The xmit path will
168 * only get the up-to-date value of @write_count if this 168 * only get the up-to-date value of @write_count if this
169 * variable indicates that the queue is empty. This is to 169 * variable indicates that the queue is empty. This is to
170 * avoid cache-line ping-pong between the xmit path and the 170 * avoid cache-line ping-pong between the xmit path and the
171 * completion path. 171 * completion path.
172 * @insert_count: Current insert pointer 172 * @insert_count: Current insert pointer
173 * This is the number of buffers that have been added to the 173 * This is the number of buffers that have been added to the
174 * software ring. 174 * software ring.
175 * @write_count: Current write pointer 175 * @write_count: Current write pointer
176 * This is the number of buffers that have been added to the 176 * This is the number of buffers that have been added to the
177 * hardware ring. 177 * hardware ring.
178 * @old_read_count: The value of read_count when last checked. 178 * @old_read_count: The value of read_count when last checked.
179 * This is here for performance reasons. The xmit path will 179 * This is here for performance reasons. The xmit path will
180 * only get the up-to-date value of read_count if this 180 * only get the up-to-date value of read_count if this
181 * variable indicates that the queue is full. This is to 181 * variable indicates that the queue is full. This is to
182 * avoid cache-line ping-pong between the xmit path and the 182 * avoid cache-line ping-pong between the xmit path and the
183 * completion path. 183 * completion path.
184 * @tso_bursts: Number of times TSO xmit invoked by kernel 184 * @tso_bursts: Number of times TSO xmit invoked by kernel
185 * @tso_long_headers: Number of packets with headers too long for standard 185 * @tso_long_headers: Number of packets with headers too long for standard
186 * blocks 186 * blocks
187 * @tso_packets: Number of packets via the TSO xmit path 187 * @tso_packets: Number of packets via the TSO xmit path
188 * @pushes: Number of times the TX push feature has been used 188 * @pushes: Number of times the TX push feature has been used
189 * @empty_read_count: If the completion path has seen the queue as empty 189 * @empty_read_count: If the completion path has seen the queue as empty
190 * and the transmission path has not yet checked this, the value of 190 * and the transmission path has not yet checked this, the value of
191 * @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0. 191 * @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0.
192 */ 192 */
193 struct efx_tx_queue { 193 struct efx_tx_queue {
194 /* Members which don't change on the fast path */ 194 /* Members which don't change on the fast path */
195 struct efx_nic *efx ____cacheline_aligned_in_smp; 195 struct efx_nic *efx ____cacheline_aligned_in_smp;
196 unsigned queue; 196 unsigned queue;
197 struct efx_channel *channel; 197 struct efx_channel *channel;
198 struct netdev_queue *core_txq; 198 struct netdev_queue *core_txq;
199 struct efx_tx_buffer *buffer; 199 struct efx_tx_buffer *buffer;
200 struct efx_buffer *tsoh_page; 200 struct efx_buffer *tsoh_page;
201 struct efx_special_buffer txd; 201 struct efx_special_buffer txd;
202 unsigned int ptr_mask; 202 unsigned int ptr_mask;
203 bool initialised; 203 bool initialised;
204 204
205 /* Members used mainly on the completion path */ 205 /* Members used mainly on the completion path */
206 unsigned int read_count ____cacheline_aligned_in_smp; 206 unsigned int read_count ____cacheline_aligned_in_smp;
207 unsigned int old_write_count; 207 unsigned int old_write_count;
208 208
209 /* Members used only on the xmit path */ 209 /* Members used only on the xmit path */
210 unsigned int insert_count ____cacheline_aligned_in_smp; 210 unsigned int insert_count ____cacheline_aligned_in_smp;
211 unsigned int write_count; 211 unsigned int write_count;
212 unsigned int old_read_count; 212 unsigned int old_read_count;
213 unsigned int tso_bursts; 213 unsigned int tso_bursts;
214 unsigned int tso_long_headers; 214 unsigned int tso_long_headers;
215 unsigned int tso_packets; 215 unsigned int tso_packets;
216 unsigned int pushes; 216 unsigned int pushes;
217 217
218 /* Members shared between paths and sometimes updated */ 218 /* Members shared between paths and sometimes updated */
219 unsigned int empty_read_count ____cacheline_aligned_in_smp; 219 unsigned int empty_read_count ____cacheline_aligned_in_smp;
220 #define EFX_EMPTY_COUNT_VALID 0x80000000 220 #define EFX_EMPTY_COUNT_VALID 0x80000000
221 atomic_t flush_outstanding; 221 atomic_t flush_outstanding;
222 }; 222 };
223 223
224 /** 224 /**
225 * struct efx_rx_buffer - An Efx RX data buffer 225 * struct efx_rx_buffer - An Efx RX data buffer
226 * @dma_addr: DMA base address of the buffer 226 * @dma_addr: DMA base address of the buffer
227 * @page: The associated page buffer. 227 * @page: The associated page buffer.
228 * Will be %NULL if the buffer slot is currently free. 228 * Will be %NULL if the buffer slot is currently free.
229 * @page_offset: If pending: offset in @page of DMA base address. 229 * @page_offset: If pending: offset in @page of DMA base address.
230 * If completed: offset in @page of Ethernet header. 230 * If completed: offset in @page of Ethernet header.
231 * @len: If pending: length for DMA descriptor. 231 * @len: If pending: length for DMA descriptor.
232 * If completed: received length, excluding hash prefix. 232 * If completed: received length, excluding hash prefix.
233 * @flags: Flags for buffer and packet state. These are only set on the 233 * @flags: Flags for buffer and packet state. These are only set on the
234 * first buffer of a scattered packet. 234 * first buffer of a scattered packet.
235 */ 235 */
236 struct efx_rx_buffer { 236 struct efx_rx_buffer {
237 dma_addr_t dma_addr; 237 dma_addr_t dma_addr;
238 struct page *page; 238 struct page *page;
239 u16 page_offset; 239 u16 page_offset;
240 u16 len; 240 u16 len;
241 u16 flags; 241 u16 flags;
242 }; 242 };
243 #define EFX_RX_BUF_LAST_IN_PAGE 0x0001 243 #define EFX_RX_BUF_LAST_IN_PAGE 0x0001
244 #define EFX_RX_PKT_CSUMMED 0x0002 244 #define EFX_RX_PKT_CSUMMED 0x0002
245 #define EFX_RX_PKT_DISCARD 0x0004 245 #define EFX_RX_PKT_DISCARD 0x0004
246 246
247 /** 247 /**
248 * struct efx_rx_page_state - Page-based rx buffer state 248 * struct efx_rx_page_state - Page-based rx buffer state
249 * 249 *
250 * Inserted at the start of every page allocated for receive buffers. 250 * Inserted at the start of every page allocated for receive buffers.
251 * Used to facilitate sharing dma mappings between recycled rx buffers 251 * Used to facilitate sharing dma mappings between recycled rx buffers
252 * and those passed up to the kernel. 252 * and those passed up to the kernel.
253 * 253 *
254 * @refcnt: Number of struct efx_rx_buffer's referencing this page. 254 * @refcnt: Number of struct efx_rx_buffer's referencing this page.
255 * When refcnt falls to zero, the page is unmapped for dma 255 * When refcnt falls to zero, the page is unmapped for dma
256 * @dma_addr: The dma address of this page. 256 * @dma_addr: The dma address of this page.
257 */ 257 */
258 struct efx_rx_page_state { 258 struct efx_rx_page_state {
259 unsigned refcnt; 259 unsigned refcnt;
260 dma_addr_t dma_addr; 260 dma_addr_t dma_addr;
261 261
262 unsigned int __pad[0] ____cacheline_aligned; 262 unsigned int __pad[0] ____cacheline_aligned;
263 }; 263 };
264 264
265 /** 265 /**
266 * struct efx_rx_queue - An Efx RX queue 266 * struct efx_rx_queue - An Efx RX queue
267 * @efx: The associated Efx NIC 267 * @efx: The associated Efx NIC
268 * @core_index: Index of network core RX queue. Will be >= 0 iff this 268 * @core_index: Index of network core RX queue. Will be >= 0 iff this
269 * is associated with a real RX queue. 269 * is associated with a real RX queue.
270 * @buffer: The software buffer ring 270 * @buffer: The software buffer ring
271 * @rxd: The hardware descriptor ring 271 * @rxd: The hardware descriptor ring
272 * @ptr_mask: The size of the ring minus 1. 272 * @ptr_mask: The size of the ring minus 1.
273 * @enabled: Receive queue enabled indicator. 273 * @enabled: Receive queue enabled indicator.
274 * @flush_pending: Set when a RX flush is pending. Has the same lifetime as 274 * @flush_pending: Set when a RX flush is pending. Has the same lifetime as
275 * @rxq_flush_pending. 275 * @rxq_flush_pending.
276 * @added_count: Number of buffers added to the receive queue. 276 * @added_count: Number of buffers added to the receive queue.
277 * @notified_count: Number of buffers given to NIC (<= @added_count). 277 * @notified_count: Number of buffers given to NIC (<= @added_count).
278 * @removed_count: Number of buffers removed from the receive queue. 278 * @removed_count: Number of buffers removed from the receive queue.
279 * @scatter_n: Number of buffers used by current packet 279 * @scatter_n: Number of buffers used by current packet
280 * @page_ring: The ring to store DMA mapped pages for reuse. 280 * @page_ring: The ring to store DMA mapped pages for reuse.
281 * @page_add: Counter to calculate the write pointer for the recycle ring. 281 * @page_add: Counter to calculate the write pointer for the recycle ring.
282 * @page_remove: Counter to calculate the read pointer for the recycle ring. 282 * @page_remove: Counter to calculate the read pointer for the recycle ring.
283 * @page_recycle_count: The number of pages that have been recycled. 283 * @page_recycle_count: The number of pages that have been recycled.
284 * @page_recycle_failed: The number of pages that couldn't be recycled because 284 * @page_recycle_failed: The number of pages that couldn't be recycled because
285 * the kernel still held a reference to them. 285 * the kernel still held a reference to them.
286 * @page_recycle_full: The number of pages that were released because the 286 * @page_recycle_full: The number of pages that were released because the
287 * recycle ring was full. 287 * recycle ring was full.
288 * @page_ptr_mask: The number of pages in the RX recycle ring minus 1. 288 * @page_ptr_mask: The number of pages in the RX recycle ring minus 1.
289 * @max_fill: RX descriptor maximum fill level (<= ring size) 289 * @max_fill: RX descriptor maximum fill level (<= ring size)
290 * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill 290 * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill
291 * (<= @max_fill) 291 * (<= @max_fill)
292 * @min_fill: RX descriptor minimum non-zero fill level. 292 * @min_fill: RX descriptor minimum non-zero fill level.
293 * This records the minimum fill level observed when a ring 293 * This records the minimum fill level observed when a ring
294 * refill was triggered. 294 * refill was triggered.
295 * @recycle_count: RX buffer recycle counter. 295 * @recycle_count: RX buffer recycle counter.
296 * @slow_fill: Timer used to defer efx_nic_generate_fill_event(). 296 * @slow_fill: Timer used to defer efx_nic_generate_fill_event().
297 */ 297 */
298 struct efx_rx_queue { 298 struct efx_rx_queue {
299 struct efx_nic *efx; 299 struct efx_nic *efx;
300 int core_index; 300 int core_index;
301 struct efx_rx_buffer *buffer; 301 struct efx_rx_buffer *buffer;
302 struct efx_special_buffer rxd; 302 struct efx_special_buffer rxd;
303 unsigned int ptr_mask; 303 unsigned int ptr_mask;
304 bool enabled; 304 bool enabled;
305 bool flush_pending; 305 bool flush_pending;
306 306
307 unsigned int added_count; 307 unsigned int added_count;
308 unsigned int notified_count; 308 unsigned int notified_count;
309 unsigned int removed_count; 309 unsigned int removed_count;
310 unsigned int scatter_n; 310 unsigned int scatter_n;
311 struct page **page_ring; 311 struct page **page_ring;
312 unsigned int page_add; 312 unsigned int page_add;
313 unsigned int page_remove; 313 unsigned int page_remove;
314 unsigned int page_recycle_count; 314 unsigned int page_recycle_count;
315 unsigned int page_recycle_failed; 315 unsigned int page_recycle_failed;
316 unsigned int page_recycle_full; 316 unsigned int page_recycle_full;
317 unsigned int page_ptr_mask; 317 unsigned int page_ptr_mask;
318 unsigned int max_fill; 318 unsigned int max_fill;
319 unsigned int fast_fill_trigger; 319 unsigned int fast_fill_trigger;
320 unsigned int min_fill; 320 unsigned int min_fill;
321 unsigned int min_overfill; 321 unsigned int min_overfill;
322 unsigned int recycle_count; 322 unsigned int recycle_count;
323 struct timer_list slow_fill; 323 struct timer_list slow_fill;
324 unsigned int slow_fill_count; 324 unsigned int slow_fill_count;
325 }; 325 };
326 326
327 /** 327 /**
328 * struct efx_buffer - An Efx general-purpose buffer 328 * struct efx_buffer - An Efx general-purpose buffer
329 * @addr: host base address of the buffer 329 * @addr: host base address of the buffer
330 * @dma_addr: DMA base address of the buffer 330 * @dma_addr: DMA base address of the buffer
331 * @len: Buffer length, in bytes 331 * @len: Buffer length, in bytes
332 * 332 *
333 * The NIC uses these buffers for its interrupt status registers and 333 * The NIC uses these buffers for its interrupt status registers and
334 * MAC stats dumps. 334 * MAC stats dumps.
335 */ 335 */
336 struct efx_buffer { 336 struct efx_buffer {
337 void *addr; 337 void *addr;
338 dma_addr_t dma_addr; 338 dma_addr_t dma_addr;
339 unsigned int len; 339 unsigned int len;
340 }; 340 };
341 341
342 342
343 enum efx_rx_alloc_method { 343 enum efx_rx_alloc_method {
344 RX_ALLOC_METHOD_AUTO = 0, 344 RX_ALLOC_METHOD_AUTO = 0,
345 RX_ALLOC_METHOD_SKB = 1, 345 RX_ALLOC_METHOD_SKB = 1,
346 RX_ALLOC_METHOD_PAGE = 2, 346 RX_ALLOC_METHOD_PAGE = 2,
347 }; 347 };
348 348
349 /** 349 /**
350 * struct efx_channel - An Efx channel 350 * struct efx_channel - An Efx channel
351 * 351 *
352 * A channel comprises an event queue, at least one TX queue, at least 352 * A channel comprises an event queue, at least one TX queue, at least
353 * one RX queue, and an associated tasklet for processing the event 353 * one RX queue, and an associated tasklet for processing the event
354 * queue. 354 * queue.
355 * 355 *
356 * @efx: Associated Efx NIC 356 * @efx: Associated Efx NIC
357 * @channel: Channel instance number 357 * @channel: Channel instance number
358 * @type: Channel type definition 358 * @type: Channel type definition
359 * @enabled: Channel enabled indicator 359 * @enabled: Channel enabled indicator
360 * @irq: IRQ number (MSI and MSI-X only) 360 * @irq: IRQ number (MSI and MSI-X only)
361 * @irq_moderation: IRQ moderation value (in hardware ticks) 361 * @irq_moderation: IRQ moderation value (in hardware ticks)
362 * @napi_dev: Net device used with NAPI 362 * @napi_dev: Net device used with NAPI
363 * @napi_str: NAPI control structure 363 * @napi_str: NAPI control structure
364 * @work_pending: Is work pending via NAPI? 364 * @work_pending: Is work pending via NAPI?
365 * @eventq: Event queue buffer 365 * @eventq: Event queue buffer
366 * @eventq_mask: Event queue pointer mask 366 * @eventq_mask: Event queue pointer mask
367 * @eventq_read_ptr: Event queue read pointer 367 * @eventq_read_ptr: Event queue read pointer
368 * @event_test_cpu: Last CPU to handle interrupt or test event for this channel 368 * @event_test_cpu: Last CPU to handle interrupt or test event for this channel
369 * @irq_count: Number of IRQs since last adaptive moderation decision 369 * @irq_count: Number of IRQs since last adaptive moderation decision
370 * @irq_mod_score: IRQ moderation score 370 * @irq_mod_score: IRQ moderation score
371 * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors 371 * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
372 * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors 372 * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
373 * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors 373 * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors
374 * @n_rx_mcast_mismatch: Count of unmatched multicast frames 374 * @n_rx_mcast_mismatch: Count of unmatched multicast frames
375 * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors 375 * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors
376 * @n_rx_overlength: Count of RX_OVERLENGTH errors 376 * @n_rx_overlength: Count of RX_OVERLENGTH errors
377 * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun 377 * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
378 * @n_rx_nodesc_trunc: Number of RX packets truncated and then dropped due to 378 * @n_rx_nodesc_trunc: Number of RX packets truncated and then dropped due to
379 * lack of descriptors 379 * lack of descriptors
380 * @rx_pkt_n_frags: Number of fragments in next packet to be delivered by 380 * @rx_pkt_n_frags: Number of fragments in next packet to be delivered by
381 * __efx_rx_packet(), or zero if there is none 381 * __efx_rx_packet(), or zero if there is none
382 * @rx_pkt_index: Ring index of first buffer for next packet to be delivered 382 * @rx_pkt_index: Ring index of first buffer for next packet to be delivered
383 * by __efx_rx_packet(), if @rx_pkt_n_frags != 0 383 * by __efx_rx_packet(), if @rx_pkt_n_frags != 0
384 * @rx_queue: RX queue for this channel 384 * @rx_queue: RX queue for this channel
385 * @tx_queue: TX queues for this channel 385 * @tx_queue: TX queues for this channel
386 */ 386 */
387 struct efx_channel { 387 struct efx_channel {
388 struct efx_nic *efx; 388 struct efx_nic *efx;
389 int channel; 389 int channel;
390 const struct efx_channel_type *type; 390 const struct efx_channel_type *type;
391 bool enabled; 391 bool enabled;
392 int irq; 392 int irq;
393 unsigned int irq_moderation; 393 unsigned int irq_moderation;
394 struct net_device *napi_dev; 394 struct net_device *napi_dev;
395 struct napi_struct napi_str; 395 struct napi_struct napi_str;
396 bool work_pending; 396 bool work_pending;
397 struct efx_special_buffer eventq; 397 struct efx_special_buffer eventq;
398 unsigned int eventq_mask; 398 unsigned int eventq_mask;
399 unsigned int eventq_read_ptr; 399 unsigned int eventq_read_ptr;
400 int event_test_cpu; 400 int event_test_cpu;
401 401
402 unsigned int irq_count; 402 unsigned int irq_count;
403 unsigned int irq_mod_score; 403 unsigned int irq_mod_score;
404 #ifdef CONFIG_RFS_ACCEL 404 #ifdef CONFIG_RFS_ACCEL
405 unsigned int rfs_filters_added; 405 unsigned int rfs_filters_added;
406 #endif 406 #endif
407 407
408 unsigned n_rx_tobe_disc; 408 unsigned n_rx_tobe_disc;
409 unsigned n_rx_ip_hdr_chksum_err; 409 unsigned n_rx_ip_hdr_chksum_err;
410 unsigned n_rx_tcp_udp_chksum_err; 410 unsigned n_rx_tcp_udp_chksum_err;
411 unsigned n_rx_mcast_mismatch; 411 unsigned n_rx_mcast_mismatch;
412 unsigned n_rx_frm_trunc; 412 unsigned n_rx_frm_trunc;
413 unsigned n_rx_overlength; 413 unsigned n_rx_overlength;
414 unsigned n_skbuff_leaks; 414 unsigned n_skbuff_leaks;
415 unsigned int n_rx_nodesc_trunc; 415 unsigned int n_rx_nodesc_trunc;
416 416
417 unsigned int rx_pkt_n_frags; 417 unsigned int rx_pkt_n_frags;
418 unsigned int rx_pkt_index; 418 unsigned int rx_pkt_index;
419 419
420 struct efx_rx_queue rx_queue; 420 struct efx_rx_queue rx_queue;
421 struct efx_tx_queue tx_queue[EFX_TXQ_TYPES]; 421 struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
422 }; 422 };
423 423
424 /** 424 /**
425 * struct efx_channel_type - distinguishes traffic and extra channels 425 * struct efx_channel_type - distinguishes traffic and extra channels
426 * @handle_no_channel: Handle failure to allocate an extra channel 426 * @handle_no_channel: Handle failure to allocate an extra channel
427 * @pre_probe: Set up extra state prior to initialisation 427 * @pre_probe: Set up extra state prior to initialisation
428 * @post_remove: Tear down extra state after finalisation, if allocated. 428 * @post_remove: Tear down extra state after finalisation, if allocated.
429 * May be called on channels that have not been probed. 429 * May be called on channels that have not been probed.
430 * @get_name: Generate the channel's name (used for its IRQ handler) 430 * @get_name: Generate the channel's name (used for its IRQ handler)
431 * @copy: Copy the channel state prior to reallocation. May be %NULL if 431 * @copy: Copy the channel state prior to reallocation. May be %NULL if
432 * reallocation is not supported. 432 * reallocation is not supported.
433 * @receive_skb: Handle an skb ready to be passed to netif_receive_skb() 433 * @receive_skb: Handle an skb ready to be passed to netif_receive_skb()
434 * @keep_eventq: Flag for whether event queue should be kept initialised 434 * @keep_eventq: Flag for whether event queue should be kept initialised
435 * while the device is stopped 435 * while the device is stopped
436 */ 436 */
437 struct efx_channel_type { 437 struct efx_channel_type {
438 void (*handle_no_channel)(struct efx_nic *); 438 void (*handle_no_channel)(struct efx_nic *);
439 int (*pre_probe)(struct efx_channel *); 439 int (*pre_probe)(struct efx_channel *);
440 void (*post_remove)(struct efx_channel *); 440 void (*post_remove)(struct efx_channel *);
441 void (*get_name)(struct efx_channel *, char *buf, size_t len); 441 void (*get_name)(struct efx_channel *, char *buf, size_t len);
442 struct efx_channel *(*copy)(const struct efx_channel *); 442 struct efx_channel *(*copy)(const struct efx_channel *);
443 bool (*receive_skb)(struct efx_channel *, struct sk_buff *); 443 bool (*receive_skb)(struct efx_channel *, struct sk_buff *);
444 bool keep_eventq; 444 bool keep_eventq;
445 }; 445 };
446 446
447 enum efx_led_mode { 447 enum efx_led_mode {
448 EFX_LED_OFF = 0, 448 EFX_LED_OFF = 0,
449 EFX_LED_ON = 1, 449 EFX_LED_ON = 1,
450 EFX_LED_DEFAULT = 2 450 EFX_LED_DEFAULT = 2
451 }; 451 };
452 452
453 #define STRING_TABLE_LOOKUP(val, member) \ 453 #define STRING_TABLE_LOOKUP(val, member) \
454 ((val) < member ## _max) ? member ## _names[val] : "(invalid)" 454 ((val) < member ## _max) ? member ## _names[val] : "(invalid)"
455 455
456 extern const char *const efx_loopback_mode_names[]; 456 extern const char *const efx_loopback_mode_names[];
457 extern const unsigned int efx_loopback_mode_max; 457 extern const unsigned int efx_loopback_mode_max;
458 #define LOOPBACK_MODE(efx) \ 458 #define LOOPBACK_MODE(efx) \
459 STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_loopback_mode) 459 STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_loopback_mode)
460 460
461 extern const char *const efx_reset_type_names[]; 461 extern const char *const efx_reset_type_names[];
462 extern const unsigned int efx_reset_type_max; 462 extern const unsigned int efx_reset_type_max;
463 #define RESET_TYPE(type) \ 463 #define RESET_TYPE(type) \
464 STRING_TABLE_LOOKUP(type, efx_reset_type) 464 STRING_TABLE_LOOKUP(type, efx_reset_type)
465 465
466 enum efx_int_mode { 466 enum efx_int_mode {
467 /* Be careful if altering to correct macro below */ 467 /* Be careful if altering to correct macro below */
468 EFX_INT_MODE_MSIX = 0, 468 EFX_INT_MODE_MSIX = 0,
469 EFX_INT_MODE_MSI = 1, 469 EFX_INT_MODE_MSI = 1,
470 EFX_INT_MODE_LEGACY = 2, 470 EFX_INT_MODE_LEGACY = 2,
471 EFX_INT_MODE_MAX /* Insert any new items before this */ 471 EFX_INT_MODE_MAX /* Insert any new items before this */
472 }; 472 };
473 #define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI) 473 #define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI)
474 474
475 enum nic_state { 475 enum nic_state {
476 STATE_UNINIT = 0, /* device being probed/removed or is frozen */ 476 STATE_UNINIT = 0, /* device being probed/removed or is frozen */
477 STATE_READY = 1, /* hardware ready and netdev registered */ 477 STATE_READY = 1, /* hardware ready and netdev registered */
478 STATE_DISABLED = 2, /* device disabled due to hardware errors */ 478 STATE_DISABLED = 2, /* device disabled due to hardware errors */
479 STATE_RECOVERY = 3, /* device recovering from PCI error */ 479 STATE_RECOVERY = 3, /* device recovering from PCI error */
480 }; 480 };
481 481
482 /* 482 /*
483 * Alignment of the skb->head which wraps a page-allocated RX buffer 483 * Alignment of the skb->head which wraps a page-allocated RX buffer
484 * 484 *
485 * The skb allocated to wrap an rx_buffer can have this alignment. Since 485 * The skb allocated to wrap an rx_buffer can have this alignment. Since
486 * the data is memcpy'd from the rx_buf, it does not need to be equal to 486 * the data is memcpy'd from the rx_buf, it does not need to be equal to
487 * NET_IP_ALIGN. 487 * NET_IP_ALIGN.
488 */ 488 */
489 #define EFX_PAGE_SKB_ALIGN 2 489 #define EFX_PAGE_SKB_ALIGN 2
490 490
491 /* Forward declaration */ 491 /* Forward declaration */
492 struct efx_nic; 492 struct efx_nic;
493 493
494 /* Pseudo bit-mask flow control field */ 494 /* Pseudo bit-mask flow control field */
495 #define EFX_FC_RX FLOW_CTRL_RX 495 #define EFX_FC_RX FLOW_CTRL_RX
496 #define EFX_FC_TX FLOW_CTRL_TX 496 #define EFX_FC_TX FLOW_CTRL_TX
497 #define EFX_FC_AUTO 4 497 #define EFX_FC_AUTO 4
498 498
499 /** 499 /**
500 * struct efx_link_state - Current state of the link 500 * struct efx_link_state - Current state of the link
501 * @up: Link is up 501 * @up: Link is up
502 * @fd: Link is full-duplex 502 * @fd: Link is full-duplex
503 * @fc: Actual flow control flags 503 * @fc: Actual flow control flags
504 * @speed: Link speed (Mbps) 504 * @speed: Link speed (Mbps)
505 */ 505 */
506 struct efx_link_state { 506 struct efx_link_state {
507 bool up; 507 bool up;
508 bool fd; 508 bool fd;
509 u8 fc; 509 u8 fc;
510 unsigned int speed; 510 unsigned int speed;
511 }; 511 };
512 512
513 static inline bool efx_link_state_equal(const struct efx_link_state *left, 513 static inline bool efx_link_state_equal(const struct efx_link_state *left,
514 const struct efx_link_state *right) 514 const struct efx_link_state *right)
515 { 515 {
516 return left->up == right->up && left->fd == right->fd && 516 return left->up == right->up && left->fd == right->fd &&
517 left->fc == right->fc && left->speed == right->speed; 517 left->fc == right->fc && left->speed == right->speed;
518 } 518 }
519 519
520 /** 520 /**
521 * struct efx_phy_operations - Efx PHY operations table 521 * struct efx_phy_operations - Efx PHY operations table
522 * @probe: Probe PHY and initialise efx->mdio.mode_support, efx->mdio.mmds, 522 * @probe: Probe PHY and initialise efx->mdio.mode_support, efx->mdio.mmds,
523 * efx->loopback_modes. 523 * efx->loopback_modes.
524 * @init: Initialise PHY 524 * @init: Initialise PHY
525 * @fini: Shut down PHY 525 * @fini: Shut down PHY
526 * @reconfigure: Reconfigure PHY (e.g. for new link parameters) 526 * @reconfigure: Reconfigure PHY (e.g. for new link parameters)
527 * @poll: Update @link_state and report whether it changed. 527 * @poll: Update @link_state and report whether it changed.
528 * Serialised by the mac_lock. 528 * Serialised by the mac_lock.
529 * @get_settings: Get ethtool settings. Serialised by the mac_lock. 529 * @get_settings: Get ethtool settings. Serialised by the mac_lock.
530 * @set_settings: Set ethtool settings. Serialised by the mac_lock. 530 * @set_settings: Set ethtool settings. Serialised by the mac_lock.
531 * @set_npage_adv: Set abilities advertised in (Extended) Next Page 531 * @set_npage_adv: Set abilities advertised in (Extended) Next Page
532 * (only needed where AN bit is set in mmds) 532 * (only needed where AN bit is set in mmds)
533 * @test_alive: Test that PHY is 'alive' (online) 533 * @test_alive: Test that PHY is 'alive' (online)
534 * @test_name: Get the name of a PHY-specific test/result 534 * @test_name: Get the name of a PHY-specific test/result
535 * @run_tests: Run tests and record results as appropriate (offline). 535 * @run_tests: Run tests and record results as appropriate (offline).
536 * Flags are the ethtool tests flags. 536 * Flags are the ethtool tests flags.
537 */ 537 */
538 struct efx_phy_operations { 538 struct efx_phy_operations {
539 int (*probe) (struct efx_nic *efx); 539 int (*probe) (struct efx_nic *efx);
540 int (*init) (struct efx_nic *efx); 540 int (*init) (struct efx_nic *efx);
541 void (*fini) (struct efx_nic *efx); 541 void (*fini) (struct efx_nic *efx);
542 void (*remove) (struct efx_nic *efx); 542 void (*remove) (struct efx_nic *efx);
543 int (*reconfigure) (struct efx_nic *efx); 543 int (*reconfigure) (struct efx_nic *efx);
544 bool (*poll) (struct efx_nic *efx); 544 bool (*poll) (struct efx_nic *efx);
545 void (*get_settings) (struct efx_nic *efx, 545 void (*get_settings) (struct efx_nic *efx,
546 struct ethtool_cmd *ecmd); 546 struct ethtool_cmd *ecmd);
547 int (*set_settings) (struct efx_nic *efx, 547 int (*set_settings) (struct efx_nic *efx,
548 struct ethtool_cmd *ecmd); 548 struct ethtool_cmd *ecmd);
549 void (*set_npage_adv) (struct efx_nic *efx, u32); 549 void (*set_npage_adv) (struct efx_nic *efx, u32);
550 int (*test_alive) (struct efx_nic *efx); 550 int (*test_alive) (struct efx_nic *efx);
551 const char *(*test_name) (struct efx_nic *efx, unsigned int index); 551 const char *(*test_name) (struct efx_nic *efx, unsigned int index);
552 int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags); 552 int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags);
553 int (*get_module_eeprom) (struct efx_nic *efx, 553 int (*get_module_eeprom) (struct efx_nic *efx,
554 struct ethtool_eeprom *ee, 554 struct ethtool_eeprom *ee,
555 u8 *data); 555 u8 *data);
556 int (*get_module_info) (struct efx_nic *efx, 556 int (*get_module_info) (struct efx_nic *efx,
557 struct ethtool_modinfo *modinfo); 557 struct ethtool_modinfo *modinfo);
558 }; 558 };
559 559
560 /** 560 /**
561 * enum efx_phy_mode - PHY operating mode flags 561 * enum efx_phy_mode - PHY operating mode flags
562 * @PHY_MODE_NORMAL: on and should pass traffic 562 * @PHY_MODE_NORMAL: on and should pass traffic
563 * @PHY_MODE_TX_DISABLED: on with TX disabled 563 * @PHY_MODE_TX_DISABLED: on with TX disabled
564 * @PHY_MODE_LOW_POWER: set to low power through MDIO 564 * @PHY_MODE_LOW_POWER: set to low power through MDIO
565 * @PHY_MODE_OFF: switched off through external control 565 * @PHY_MODE_OFF: switched off through external control
566 * @PHY_MODE_SPECIAL: on but will not pass traffic 566 * @PHY_MODE_SPECIAL: on but will not pass traffic
567 */ 567 */
568 enum efx_phy_mode { 568 enum efx_phy_mode {
569 PHY_MODE_NORMAL = 0, 569 PHY_MODE_NORMAL = 0,
570 PHY_MODE_TX_DISABLED = 1, 570 PHY_MODE_TX_DISABLED = 1,
571 PHY_MODE_LOW_POWER = 2, 571 PHY_MODE_LOW_POWER = 2,
572 PHY_MODE_OFF = 4, 572 PHY_MODE_OFF = 4,
573 PHY_MODE_SPECIAL = 8, 573 PHY_MODE_SPECIAL = 8,
574 }; 574 };
575 575
576 static inline bool efx_phy_mode_disabled(enum efx_phy_mode mode) 576 static inline bool efx_phy_mode_disabled(enum efx_phy_mode mode)
577 { 577 {
578 return !!(mode & ~PHY_MODE_TX_DISABLED); 578 return !!(mode & ~PHY_MODE_TX_DISABLED);
579 } 579 }
580 580
581 /* 581 /*
582 * Efx extended statistics 582 * Efx extended statistics
583 * 583 *
584 * Not all statistics are provided by all supported MACs. The purpose 584 * Not all statistics are provided by all supported MACs. The purpose
585 * is this structure is to contain the raw statistics provided by each 585 * is this structure is to contain the raw statistics provided by each
586 * MAC. 586 * MAC.
587 */ 587 */
588 struct efx_mac_stats { 588 struct efx_mac_stats {
589 u64 tx_bytes; 589 u64 tx_bytes;
590 u64 tx_good_bytes; 590 u64 tx_good_bytes;
591 u64 tx_bad_bytes; 591 u64 tx_bad_bytes;
592 u64 tx_packets; 592 u64 tx_packets;
593 u64 tx_bad; 593 u64 tx_bad;
594 u64 tx_pause; 594 u64 tx_pause;
595 u64 tx_control; 595 u64 tx_control;
596 u64 tx_unicast; 596 u64 tx_unicast;
597 u64 tx_multicast; 597 u64 tx_multicast;
598 u64 tx_broadcast; 598 u64 tx_broadcast;
599 u64 tx_lt64; 599 u64 tx_lt64;
600 u64 tx_64; 600 u64 tx_64;
601 u64 tx_65_to_127; 601 u64 tx_65_to_127;
602 u64 tx_128_to_255; 602 u64 tx_128_to_255;
603 u64 tx_256_to_511; 603 u64 tx_256_to_511;
604 u64 tx_512_to_1023; 604 u64 tx_512_to_1023;
605 u64 tx_1024_to_15xx; 605 u64 tx_1024_to_15xx;
606 u64 tx_15xx_to_jumbo; 606 u64 tx_15xx_to_jumbo;
607 u64 tx_gtjumbo; 607 u64 tx_gtjumbo;
608 u64 tx_collision; 608 u64 tx_collision;
609 u64 tx_single_collision; 609 u64 tx_single_collision;
610 u64 tx_multiple_collision; 610 u64 tx_multiple_collision;
611 u64 tx_excessive_collision; 611 u64 tx_excessive_collision;
612 u64 tx_deferred; 612 u64 tx_deferred;
613 u64 tx_late_collision; 613 u64 tx_late_collision;
614 u64 tx_excessive_deferred; 614 u64 tx_excessive_deferred;
615 u64 tx_non_tcpudp; 615 u64 tx_non_tcpudp;
616 u64 tx_mac_src_error; 616 u64 tx_mac_src_error;
617 u64 tx_ip_src_error; 617 u64 tx_ip_src_error;
618 u64 rx_bytes; 618 u64 rx_bytes;
619 u64 rx_good_bytes; 619 u64 rx_good_bytes;
620 u64 rx_bad_bytes; 620 u64 rx_bad_bytes;
621 u64 rx_packets; 621 u64 rx_packets;
622 u64 rx_good; 622 u64 rx_good;
623 u64 rx_bad; 623 u64 rx_bad;
624 u64 rx_pause; 624 u64 rx_pause;
625 u64 rx_control; 625 u64 rx_control;
626 u64 rx_unicast; 626 u64 rx_unicast;
627 u64 rx_multicast; 627 u64 rx_multicast;
628 u64 rx_broadcast; 628 u64 rx_broadcast;
629 u64 rx_lt64; 629 u64 rx_lt64;
630 u64 rx_64; 630 u64 rx_64;
631 u64 rx_65_to_127; 631 u64 rx_65_to_127;
632 u64 rx_128_to_255; 632 u64 rx_128_to_255;
633 u64 rx_256_to_511; 633 u64 rx_256_to_511;
634 u64 rx_512_to_1023; 634 u64 rx_512_to_1023;
635 u64 rx_1024_to_15xx; 635 u64 rx_1024_to_15xx;
636 u64 rx_15xx_to_jumbo; 636 u64 rx_15xx_to_jumbo;
637 u64 rx_gtjumbo; 637 u64 rx_gtjumbo;
638 u64 rx_bad_lt64; 638 u64 rx_bad_lt64;
639 u64 rx_bad_64_to_15xx; 639 u64 rx_bad_64_to_15xx;
640 u64 rx_bad_15xx_to_jumbo; 640 u64 rx_bad_15xx_to_jumbo;
641 u64 rx_bad_gtjumbo; 641 u64 rx_bad_gtjumbo;
642 u64 rx_overflow; 642 u64 rx_overflow;
643 u64 rx_missed; 643 u64 rx_missed;
644 u64 rx_false_carrier; 644 u64 rx_false_carrier;
645 u64 rx_symbol_error; 645 u64 rx_symbol_error;
646 u64 rx_align_error; 646 u64 rx_align_error;
647 u64 rx_length_error; 647 u64 rx_length_error;
648 u64 rx_internal_error; 648 u64 rx_internal_error;
649 u64 rx_good_lt64; 649 u64 rx_good_lt64;
650 }; 650 };
651 651
652 /* Number of bits used in a multicast filter hash address */ 652 /* Number of bits used in a multicast filter hash address */
653 #define EFX_MCAST_HASH_BITS 8 653 #define EFX_MCAST_HASH_BITS 8
654 654
655 /* Number of (single-bit) entries in a multicast filter hash */ 655 /* Number of (single-bit) entries in a multicast filter hash */
656 #define EFX_MCAST_HASH_ENTRIES (1 << EFX_MCAST_HASH_BITS) 656 #define EFX_MCAST_HASH_ENTRIES (1 << EFX_MCAST_HASH_BITS)
657 657
658 /* An Efx multicast filter hash */ 658 /* An Efx multicast filter hash */
659 union efx_multicast_hash { 659 union efx_multicast_hash {
660 u8 byte[EFX_MCAST_HASH_ENTRIES / 8]; 660 u8 byte[EFX_MCAST_HASH_ENTRIES / 8];
661 efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8]; 661 efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8];
662 }; 662 };
663 663
664 struct efx_filter_state; 664 struct efx_filter_state;
665 struct efx_vf; 665 struct efx_vf;
666 struct vfdi_status; 666 struct vfdi_status;
667 667
668 /** 668 /**
669 * struct efx_nic - an Efx NIC 669 * struct efx_nic - an Efx NIC
670 * @name: Device name (net device name or bus id before net device registered) 670 * @name: Device name (net device name or bus id before net device registered)
671 * @pci_dev: The PCI device 671 * @pci_dev: The PCI device
672 * @type: Controller type attributes 672 * @type: Controller type attributes
673 * @legacy_irq: IRQ number 673 * @legacy_irq: IRQ number
674 * @legacy_irq_enabled: Are IRQs enabled on NIC (INT_EN_KER register)? 674 * @legacy_irq_enabled: Are IRQs enabled on NIC (INT_EN_KER register)?
675 * @workqueue: Workqueue for port reconfigures and the HW monitor. 675 * @workqueue: Workqueue for port reconfigures and the HW monitor.
676 * Work items do not hold and must not acquire RTNL. 676 * Work items do not hold and must not acquire RTNL.
677 * @workqueue_name: Name of workqueue 677 * @workqueue_name: Name of workqueue
678 * @reset_work: Scheduled reset workitem 678 * @reset_work: Scheduled reset workitem
679 * @membase_phys: Memory BAR value as physical address 679 * @membase_phys: Memory BAR value as physical address
680 * @membase: Memory BAR value 680 * @membase: Memory BAR value
681 * @interrupt_mode: Interrupt mode 681 * @interrupt_mode: Interrupt mode
682 * @timer_quantum_ns: Interrupt timer quantum, in nanoseconds 682 * @timer_quantum_ns: Interrupt timer quantum, in nanoseconds
683 * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues 683 * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
684 * @irq_rx_moderation: IRQ moderation time for RX event queues 684 * @irq_rx_moderation: IRQ moderation time for RX event queues
685 * @msg_enable: Log message enable flags 685 * @msg_enable: Log message enable flags
686 * @state: Device state number (%STATE_*). Serialised by the rtnl_lock. 686 * @state: Device state number (%STATE_*). Serialised by the rtnl_lock.
687 * @reset_pending: Bitmask for pending resets 687 * @reset_pending: Bitmask for pending resets
688 * @tx_queue: TX DMA queues 688 * @tx_queue: TX DMA queues
689 * @rx_queue: RX DMA queues 689 * @rx_queue: RX DMA queues
690 * @channel: Channels 690 * @channel: Channels
691 * @channel_name: Names for channels and their IRQs 691 * @channel_name: Names for channels and their IRQs
692 * @extra_channel_types: Types of extra (non-traffic) channels that 692 * @extra_channel_types: Types of extra (non-traffic) channels that
693 * should be allocated for this NIC 693 * should be allocated for this NIC
694 * @rxq_entries: Size of receive queues requested by user. 694 * @rxq_entries: Size of receive queues requested by user.
695 * @txq_entries: Size of transmit queues requested by user. 695 * @txq_entries: Size of transmit queues requested by user.
696 * @txq_stop_thresh: TX queue fill level at or above which we stop it. 696 * @txq_stop_thresh: TX queue fill level at or above which we stop it.
697 * @txq_wake_thresh: TX queue fill level at or below which we wake it. 697 * @txq_wake_thresh: TX queue fill level at or below which we wake it.
698 * @tx_dc_base: Base qword address in SRAM of TX queue descriptor caches 698 * @tx_dc_base: Base qword address in SRAM of TX queue descriptor caches
699 * @rx_dc_base: Base qword address in SRAM of RX queue descriptor caches 699 * @rx_dc_base: Base qword address in SRAM of RX queue descriptor caches
700 * @sram_lim_qw: Qword address limit of SRAM 700 * @sram_lim_qw: Qword address limit of SRAM
701 * @next_buffer_table: First available buffer table id 701 * @next_buffer_table: First available buffer table id
702 * @n_channels: Number of channels in use 702 * @n_channels: Number of channels in use
703 * @n_rx_channels: Number of channels used for RX (= number of RX queues) 703 * @n_rx_channels: Number of channels used for RX (= number of RX queues)
704 * @n_tx_channels: Number of channels used for TX 704 * @n_tx_channels: Number of channels used for TX
705 * @rx_dma_len: Current maximum RX DMA length 705 * @rx_dma_len: Current maximum RX DMA length
706 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer 706 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
707 * @rx_buffer_truesize: Amortised allocation size of an RX buffer, 707 * @rx_buffer_truesize: Amortised allocation size of an RX buffer,
708 * for use in sk_buff::truesize 708 * for use in sk_buff::truesize
709 * @rx_hash_key: Toeplitz hash key for RSS 709 * @rx_hash_key: Toeplitz hash key for RSS
710 * @rx_indir_table: Indirection table for RSS 710 * @rx_indir_table: Indirection table for RSS
711 * @rx_scatter: Scatter mode enabled for receives 711 * @rx_scatter: Scatter mode enabled for receives
712 * @int_error_count: Number of internal errors seen recently 712 * @int_error_count: Number of internal errors seen recently
713 * @int_error_expire: Time at which error count will be expired 713 * @int_error_expire: Time at which error count will be expired
714 * @irq_status: Interrupt status buffer 714 * @irq_status: Interrupt status buffer
715 * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0 715 * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0
716 * @irq_level: IRQ level/index for IRQs not triggered by an event queue 716 * @irq_level: IRQ level/index for IRQs not triggered by an event queue
717 * @selftest_work: Work item for asynchronous self-test 717 * @selftest_work: Work item for asynchronous self-test
718 * @mtd_list: List of MTDs attached to the NIC 718 * @mtd_list: List of MTDs attached to the NIC
719 * @nic_data: Hardware dependent state 719 * @nic_data: Hardware dependent state
720 * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode, 720 * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode,
721 * efx_monitor() and efx_reconfigure_port() 721 * efx_monitor() and efx_reconfigure_port()
722 * @port_enabled: Port enabled indicator. 722 * @port_enabled: Port enabled indicator.
723 * Serialises efx_stop_all(), efx_start_all(), efx_monitor() and 723 * Serialises efx_stop_all(), efx_start_all(), efx_monitor() and
724 * efx_mac_work() with kernel interfaces. Safe to read under any 724 * efx_mac_work() with kernel interfaces. Safe to read under any
725 * one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must 725 * one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must
726 * be held to modify it. 726 * be held to modify it.
727 * @port_initialized: Port initialized? 727 * @port_initialized: Port initialized?
728 * @net_dev: Operating system network device. Consider holding the rtnl lock 728 * @net_dev: Operating system network device. Consider holding the rtnl lock
729 * @stats_buffer: DMA buffer for statistics 729 * @stats_buffer: DMA buffer for statistics
730 * @phy_type: PHY type 730 * @phy_type: PHY type
731 * @phy_op: PHY interface 731 * @phy_op: PHY interface
732 * @phy_data: PHY private data (including PHY-specific stats) 732 * @phy_data: PHY private data (including PHY-specific stats)
733 * @mdio: PHY MDIO interface 733 * @mdio: PHY MDIO interface
734 * @mdio_bus: PHY MDIO bus ID (only used by Siena) 734 * @mdio_bus: PHY MDIO bus ID (only used by Siena)
735 * @phy_mode: PHY operating mode. Serialised by @mac_lock. 735 * @phy_mode: PHY operating mode. Serialised by @mac_lock.
736 * @link_advertising: Autonegotiation advertising flags 736 * @link_advertising: Autonegotiation advertising flags
737 * @link_state: Current state of the link 737 * @link_state: Current state of the link
738 * @n_link_state_changes: Number of times the link has changed state 738 * @n_link_state_changes: Number of times the link has changed state
739 * @promiscuous: Promiscuous flag. Protected by netif_tx_lock. 739 * @promiscuous: Promiscuous flag. Protected by netif_tx_lock.
740 * @multicast_hash: Multicast hash table 740 * @multicast_hash: Multicast hash table
741 * @wanted_fc: Wanted flow control flags 741 * @wanted_fc: Wanted flow control flags
742 * @fc_disable: When non-zero flow control is disabled. Typically used to 742 * @fc_disable: When non-zero flow control is disabled. Typically used to
743 * ensure that network back pressure doesn't delay dma queue flushes. 743 * ensure that network back pressure doesn't delay dma queue flushes.
744 * Serialised by the rtnl lock. 744 * Serialised by the rtnl lock.
745 * @mac_work: Work item for changing MAC promiscuity and multicast hash 745 * @mac_work: Work item for changing MAC promiscuity and multicast hash
746 * @loopback_mode: Loopback status 746 * @loopback_mode: Loopback status
747 * @loopback_modes: Supported loopback mode bitmask 747 * @loopback_modes: Supported loopback mode bitmask
748 * @loopback_selftest: Offline self-test private state 748 * @loopback_selftest: Offline self-test private state
749 * @drain_pending: Count of RX and TX queues that haven't been flushed and drained. 749 * @drain_pending: Count of RX and TX queues that haven't been flushed and drained.
750 * @rxq_flush_pending: Count of number of receive queues that need to be flushed. 750 * @rxq_flush_pending: Count of number of receive queues that need to be flushed.
751 * Decremented when the efx_flush_rx_queue() is called. 751 * Decremented when the efx_flush_rx_queue() is called.
752 * @rxq_flush_outstanding: Count of number of RX flushes started but not yet 752 * @rxq_flush_outstanding: Count of number of RX flushes started but not yet
753 * completed (either success or failure). Not used when MCDI is used to 753 * completed (either success or failure). Not used when MCDI is used to
754 * flush receive queues. 754 * flush receive queues.
755 * @flush_wq: wait queue used by efx_nic_flush_queues() to wait for flush completions. 755 * @flush_wq: wait queue used by efx_nic_flush_queues() to wait for flush completions.
756 * @vf: Array of &struct efx_vf objects. 756 * @vf: Array of &struct efx_vf objects.
757 * @vf_count: Number of VFs intended to be enabled. 757 * @vf_count: Number of VFs intended to be enabled.
758 * @vf_init_count: Number of VFs that have been fully initialised. 758 * @vf_init_count: Number of VFs that have been fully initialised.
759 * @vi_scale: log2 number of vnics per VF. 759 * @vi_scale: log2 number of vnics per VF.
760 * @vf_buftbl_base: The zeroth buffer table index used to back VF queues. 760 * @vf_buftbl_base: The zeroth buffer table index used to back VF queues.
761 * @vfdi_status: Common VFDI status page to be dmad to VF address space. 761 * @vfdi_status: Common VFDI status page to be dmad to VF address space.
762 * @local_addr_list: List of local addresses. Protected by %local_lock. 762 * @local_addr_list: List of local addresses. Protected by %local_lock.
763 * @local_page_list: List of DMA addressable pages used to broadcast 763 * @local_page_list: List of DMA addressable pages used to broadcast
764 * %local_addr_list. Protected by %local_lock. 764 * %local_addr_list. Protected by %local_lock.
765 * @local_lock: Mutex protecting %local_addr_list and %local_page_list. 765 * @local_lock: Mutex protecting %local_addr_list and %local_page_list.
766 * @peer_work: Work item to broadcast peer addresses to VMs. 766 * @peer_work: Work item to broadcast peer addresses to VMs.
767 * @ptp_data: PTP state data 767 * @ptp_data: PTP state data
768 * @monitor_work: Hardware monitor workitem 768 * @monitor_work: Hardware monitor workitem
769 * @biu_lock: BIU (bus interface unit) lock 769 * @biu_lock: BIU (bus interface unit) lock
770 * @last_irq_cpu: Last CPU to handle a possible test interrupt. This 770 * @last_irq_cpu: Last CPU to handle a possible test interrupt. This
771 * field is used by efx_test_interrupts() to verify that an 771 * field is used by efx_test_interrupts() to verify that an
772 * interrupt has occurred. 772 * interrupt has occurred.
773 * @n_rx_nodesc_drop_cnt: RX no descriptor drop count 773 * @n_rx_nodesc_drop_cnt: RX no descriptor drop count
774 * @mac_stats: MAC statistics. These include all statistics the MACs 774 * @mac_stats: MAC statistics. These include all statistics the MACs
775 * can provide. Generic code converts these into a standard 775 * can provide. Generic code converts these into a standard
776 * &struct net_device_stats. 776 * &struct net_device_stats.
777 * @stats_lock: Statistics update lock. Serialises statistics fetches 777 * @stats_lock: Statistics update lock. Serialises statistics fetches
778 * and access to @mac_stats. 778 * and access to @mac_stats.
779 * 779 *
780 * This is stored in the private area of the &struct net_device. 780 * This is stored in the private area of the &struct net_device.
781 */ 781 */
782 struct efx_nic { 782 struct efx_nic {
783 /* The following fields should be written very rarely */ 783 /* The following fields should be written very rarely */
784 784
785 char name[IFNAMSIZ]; 785 char name[IFNAMSIZ];
786 struct pci_dev *pci_dev; 786 struct pci_dev *pci_dev;
787 unsigned int port_num;
787 const struct efx_nic_type *type; 788 const struct efx_nic_type *type;
788 int legacy_irq; 789 int legacy_irq;
789 bool legacy_irq_enabled; 790 bool legacy_irq_enabled;
790 struct workqueue_struct *workqueue; 791 struct workqueue_struct *workqueue;
791 char workqueue_name[16]; 792 char workqueue_name[16];
792 struct work_struct reset_work; 793 struct work_struct reset_work;
793 resource_size_t membase_phys; 794 resource_size_t membase_phys;
794 void __iomem *membase; 795 void __iomem *membase;
795 796
796 enum efx_int_mode interrupt_mode; 797 enum efx_int_mode interrupt_mode;
797 unsigned int timer_quantum_ns; 798 unsigned int timer_quantum_ns;
798 bool irq_rx_adaptive; 799 bool irq_rx_adaptive;
799 unsigned int irq_rx_moderation; 800 unsigned int irq_rx_moderation;
800 u32 msg_enable; 801 u32 msg_enable;
801 802
802 enum nic_state state; 803 enum nic_state state;
803 unsigned long reset_pending; 804 unsigned long reset_pending;
804 805
805 struct efx_channel *channel[EFX_MAX_CHANNELS]; 806 struct efx_channel *channel[EFX_MAX_CHANNELS];
806 char channel_name[EFX_MAX_CHANNELS][IFNAMSIZ + 6]; 807 char channel_name[EFX_MAX_CHANNELS][IFNAMSIZ + 6];
807 const struct efx_channel_type * 808 const struct efx_channel_type *
808 extra_channel_type[EFX_MAX_EXTRA_CHANNELS]; 809 extra_channel_type[EFX_MAX_EXTRA_CHANNELS];
809 810
810 unsigned rxq_entries; 811 unsigned rxq_entries;
811 unsigned txq_entries; 812 unsigned txq_entries;
812 unsigned int txq_stop_thresh; 813 unsigned int txq_stop_thresh;
813 unsigned int txq_wake_thresh; 814 unsigned int txq_wake_thresh;
814 815
815 unsigned tx_dc_base; 816 unsigned tx_dc_base;
816 unsigned rx_dc_base; 817 unsigned rx_dc_base;
817 unsigned sram_lim_qw; 818 unsigned sram_lim_qw;
818 unsigned next_buffer_table; 819 unsigned next_buffer_table;
819 unsigned n_channels; 820 unsigned n_channels;
820 unsigned n_rx_channels; 821 unsigned n_rx_channels;
821 unsigned rss_spread; 822 unsigned rss_spread;
822 unsigned tx_channel_offset; 823 unsigned tx_channel_offset;
823 unsigned n_tx_channels; 824 unsigned n_tx_channels;
824 unsigned int rx_dma_len; 825 unsigned int rx_dma_len;
825 unsigned int rx_buffer_order; 826 unsigned int rx_buffer_order;
826 unsigned int rx_buffer_truesize; 827 unsigned int rx_buffer_truesize;
827 unsigned int rx_page_buf_step; 828 unsigned int rx_page_buf_step;
828 unsigned int rx_bufs_per_page; 829 unsigned int rx_bufs_per_page;
829 unsigned int rx_pages_per_batch; 830 unsigned int rx_pages_per_batch;
830 u8 rx_hash_key[40]; 831 u8 rx_hash_key[40];
831 u32 rx_indir_table[128]; 832 u32 rx_indir_table[128];
832 bool rx_scatter; 833 bool rx_scatter;
833 834
834 unsigned int_error_count; 835 unsigned int_error_count;
835 unsigned long int_error_expire; 836 unsigned long int_error_expire;
836 837
837 struct efx_buffer irq_status; 838 struct efx_buffer irq_status;
838 unsigned irq_zero_count; 839 unsigned irq_zero_count;
839 unsigned irq_level; 840 unsigned irq_level;
840 struct delayed_work selftest_work; 841 struct delayed_work selftest_work;
841 842
842 #ifdef CONFIG_SFC_MTD 843 #ifdef CONFIG_SFC_MTD
843 struct list_head mtd_list; 844 struct list_head mtd_list;
844 #endif 845 #endif
845 846
846 void *nic_data; 847 void *nic_data;
847 848
848 struct mutex mac_lock; 849 struct mutex mac_lock;
849 struct work_struct mac_work; 850 struct work_struct mac_work;
850 bool port_enabled; 851 bool port_enabled;
851 852
852 bool port_initialized; 853 bool port_initialized;
853 struct net_device *net_dev; 854 struct net_device *net_dev;
854 855
855 struct efx_buffer stats_buffer; 856 struct efx_buffer stats_buffer;
856 857
857 unsigned int phy_type; 858 unsigned int phy_type;
858 const struct efx_phy_operations *phy_op; 859 const struct efx_phy_operations *phy_op;
859 void *phy_data; 860 void *phy_data;
860 struct mdio_if_info mdio; 861 struct mdio_if_info mdio;
861 unsigned int mdio_bus; 862 unsigned int mdio_bus;
862 enum efx_phy_mode phy_mode; 863 enum efx_phy_mode phy_mode;
863 864
864 u32 link_advertising; 865 u32 link_advertising;
865 struct efx_link_state link_state; 866 struct efx_link_state link_state;
866 unsigned int n_link_state_changes; 867 unsigned int n_link_state_changes;
867 868
868 bool promiscuous; 869 bool promiscuous;
869 union efx_multicast_hash multicast_hash; 870 union efx_multicast_hash multicast_hash;
870 u8 wanted_fc; 871 u8 wanted_fc;
871 unsigned fc_disable; 872 unsigned fc_disable;
872 873
873 atomic_t rx_reset; 874 atomic_t rx_reset;
874 enum efx_loopback_mode loopback_mode; 875 enum efx_loopback_mode loopback_mode;
875 u64 loopback_modes; 876 u64 loopback_modes;
876 877
877 void *loopback_selftest; 878 void *loopback_selftest;
878 879
879 struct efx_filter_state *filter_state; 880 struct efx_filter_state *filter_state;
880 881
881 atomic_t drain_pending; 882 atomic_t drain_pending;
882 atomic_t rxq_flush_pending; 883 atomic_t rxq_flush_pending;
883 atomic_t rxq_flush_outstanding; 884 atomic_t rxq_flush_outstanding;
884 wait_queue_head_t flush_wq; 885 wait_queue_head_t flush_wq;
885 886
886 #ifdef CONFIG_SFC_SRIOV 887 #ifdef CONFIG_SFC_SRIOV
887 struct efx_channel *vfdi_channel; 888 struct efx_channel *vfdi_channel;
888 struct efx_vf *vf; 889 struct efx_vf *vf;
889 unsigned vf_count; 890 unsigned vf_count;
890 unsigned vf_init_count; 891 unsigned vf_init_count;
891 unsigned vi_scale; 892 unsigned vi_scale;
892 unsigned vf_buftbl_base; 893 unsigned vf_buftbl_base;
893 struct efx_buffer vfdi_status; 894 struct efx_buffer vfdi_status;
894 struct list_head local_addr_list; 895 struct list_head local_addr_list;
895 struct list_head local_page_list; 896 struct list_head local_page_list;
896 struct mutex local_lock; 897 struct mutex local_lock;
897 struct work_struct peer_work; 898 struct work_struct peer_work;
898 #endif 899 #endif
899 900
900 struct efx_ptp_data *ptp_data; 901 struct efx_ptp_data *ptp_data;
901 902
902 /* The following fields may be written more often */ 903 /* The following fields may be written more often */
903 904
904 struct delayed_work monitor_work ____cacheline_aligned_in_smp; 905 struct delayed_work monitor_work ____cacheline_aligned_in_smp;
905 spinlock_t biu_lock; 906 spinlock_t biu_lock;
906 int last_irq_cpu; 907 int last_irq_cpu;
907 unsigned n_rx_nodesc_drop_cnt; 908 unsigned n_rx_nodesc_drop_cnt;
908 struct efx_mac_stats mac_stats; 909 struct efx_mac_stats mac_stats;
909 spinlock_t stats_lock; 910 spinlock_t stats_lock;
910 }; 911 };
911 912
912 static inline int efx_dev_registered(struct efx_nic *efx) 913 static inline int efx_dev_registered(struct efx_nic *efx)
913 { 914 {
914 return efx->net_dev->reg_state == NETREG_REGISTERED; 915 return efx->net_dev->reg_state == NETREG_REGISTERED;
915 } 916 }
916 917
917 static inline unsigned int efx_port_num(struct efx_nic *efx) 918 static inline unsigned int efx_port_num(struct efx_nic *efx)
918 { 919 {
919 return efx->net_dev->dev_id; 920 return efx->port_num;
920 } 921 }
921 922
922 /** 923 /**
923 * struct efx_nic_type - Efx device type definition 924 * struct efx_nic_type - Efx device type definition
924 * @probe: Probe the controller 925 * @probe: Probe the controller
925 * @remove: Free resources allocated by probe() 926 * @remove: Free resources allocated by probe()
926 * @init: Initialise the controller 927 * @init: Initialise the controller
927 * @dimension_resources: Dimension controller resources (buffer table, 928 * @dimension_resources: Dimension controller resources (buffer table,
928 * and VIs once the available interrupt resources are clear) 929 * and VIs once the available interrupt resources are clear)
929 * @fini: Shut down the controller 930 * @fini: Shut down the controller
930 * @monitor: Periodic function for polling link state and hardware monitor 931 * @monitor: Periodic function for polling link state and hardware monitor
931 * @map_reset_reason: Map ethtool reset reason to a reset method 932 * @map_reset_reason: Map ethtool reset reason to a reset method
932 * @map_reset_flags: Map ethtool reset flags to a reset method, if possible 933 * @map_reset_flags: Map ethtool reset flags to a reset method, if possible
933 * @reset: Reset the controller hardware and possibly the PHY. This will 934 * @reset: Reset the controller hardware and possibly the PHY. This will
934 * be called while the controller is uninitialised. 935 * be called while the controller is uninitialised.
935 * @probe_port: Probe the MAC and PHY 936 * @probe_port: Probe the MAC and PHY
936 * @remove_port: Free resources allocated by probe_port() 937 * @remove_port: Free resources allocated by probe_port()
937 * @handle_global_event: Handle a "global" event (may be %NULL) 938 * @handle_global_event: Handle a "global" event (may be %NULL)
938 * @prepare_flush: Prepare the hardware for flushing the DMA queues 939 * @prepare_flush: Prepare the hardware for flushing the DMA queues
939 * @finish_flush: Clean up after flushing the DMA queues 940 * @finish_flush: Clean up after flushing the DMA queues
940 * @update_stats: Update statistics not provided by event handling 941 * @update_stats: Update statistics not provided by event handling
941 * @start_stats: Start the regular fetching of statistics 942 * @start_stats: Start the regular fetching of statistics
942 * @stop_stats: Stop the regular fetching of statistics 943 * @stop_stats: Stop the regular fetching of statistics
943 * @set_id_led: Set state of identifying LED or revert to automatic function 944 * @set_id_led: Set state of identifying LED or revert to automatic function
944 * @push_irq_moderation: Apply interrupt moderation value 945 * @push_irq_moderation: Apply interrupt moderation value
945 * @reconfigure_port: Push loopback/power/txdis changes to the MAC and PHY 946 * @reconfigure_port: Push loopback/power/txdis changes to the MAC and PHY
946 * @reconfigure_mac: Push MAC address, MTU, flow control and filter settings 947 * @reconfigure_mac: Push MAC address, MTU, flow control and filter settings
947 * to the hardware. Serialised by the mac_lock. 948 * to the hardware. Serialised by the mac_lock.
948 * @check_mac_fault: Check MAC fault state. True if fault present. 949 * @check_mac_fault: Check MAC fault state. True if fault present.
949 * @get_wol: Get WoL configuration from driver state 950 * @get_wol: Get WoL configuration from driver state
950 * @set_wol: Push WoL configuration to the NIC 951 * @set_wol: Push WoL configuration to the NIC
951 * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume) 952 * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume)
952 * @test_chip: Test registers. Should use efx_nic_test_registers(), and is 953 * @test_chip: Test registers. Should use efx_nic_test_registers(), and is
953 * expected to reset the NIC. 954 * expected to reset the NIC.
954 * @test_nvram: Test validity of NVRAM contents 955 * @test_nvram: Test validity of NVRAM contents
955 * @revision: Hardware architecture revision 956 * @revision: Hardware architecture revision
956 * @mem_map_size: Memory BAR mapped size 957 * @mem_map_size: Memory BAR mapped size
957 * @txd_ptr_tbl_base: TX descriptor ring base address 958 * @txd_ptr_tbl_base: TX descriptor ring base address
958 * @rxd_ptr_tbl_base: RX descriptor ring base address 959 * @rxd_ptr_tbl_base: RX descriptor ring base address
959 * @buf_tbl_base: Buffer table base address 960 * @buf_tbl_base: Buffer table base address
960 * @evq_ptr_tbl_base: Event queue pointer table base address 961 * @evq_ptr_tbl_base: Event queue pointer table base address
961 * @evq_rptr_tbl_base: Event queue read-pointer table base address 962 * @evq_rptr_tbl_base: Event queue read-pointer table base address
962 * @max_dma_mask: Maximum possible DMA mask 963 * @max_dma_mask: Maximum possible DMA mask
963 * @rx_buffer_hash_size: Size of hash at start of RX packet 964 * @rx_buffer_hash_size: Size of hash at start of RX packet
964 * @rx_buffer_padding: Size of padding at end of RX packet 965 * @rx_buffer_padding: Size of padding at end of RX packet
965 * @can_rx_scatter: NIC is able to scatter packet to multiple buffers 966 * @can_rx_scatter: NIC is able to scatter packet to multiple buffers
966 * @max_interrupt_mode: Highest capability interrupt mode supported 967 * @max_interrupt_mode: Highest capability interrupt mode supported
967 * from &enum efx_init_mode. 968 * from &enum efx_init_mode.
968 * @phys_addr_channels: Number of channels with physically addressed 969 * @phys_addr_channels: Number of channels with physically addressed
969 * descriptors 970 * descriptors
970 * @timer_period_max: Maximum period of interrupt timer (in ticks) 971 * @timer_period_max: Maximum period of interrupt timer (in ticks)
971 * @offload_features: net_device feature flags for protocol offload 972 * @offload_features: net_device feature flags for protocol offload
972 * features implemented in hardware 973 * features implemented in hardware
973 */ 974 */
974 struct efx_nic_type { 975 struct efx_nic_type {
975 int (*probe)(struct efx_nic *efx); 976 int (*probe)(struct efx_nic *efx);
976 void (*remove)(struct efx_nic *efx); 977 void (*remove)(struct efx_nic *efx);
977 int (*init)(struct efx_nic *efx); 978 int (*init)(struct efx_nic *efx);
978 void (*dimension_resources)(struct efx_nic *efx); 979 void (*dimension_resources)(struct efx_nic *efx);
979 void (*fini)(struct efx_nic *efx); 980 void (*fini)(struct efx_nic *efx);
980 void (*monitor)(struct efx_nic *efx); 981 void (*monitor)(struct efx_nic *efx);
981 enum reset_type (*map_reset_reason)(enum reset_type reason); 982 enum reset_type (*map_reset_reason)(enum reset_type reason);
982 int (*map_reset_flags)(u32 *flags); 983 int (*map_reset_flags)(u32 *flags);
983 int (*reset)(struct efx_nic *efx, enum reset_type method); 984 int (*reset)(struct efx_nic *efx, enum reset_type method);
984 int (*probe_port)(struct efx_nic *efx); 985 int (*probe_port)(struct efx_nic *efx);
985 void (*remove_port)(struct efx_nic *efx); 986 void (*remove_port)(struct efx_nic *efx);
986 bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *); 987 bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *);
987 void (*prepare_flush)(struct efx_nic *efx); 988 void (*prepare_flush)(struct efx_nic *efx);
988 void (*finish_flush)(struct efx_nic *efx); 989 void (*finish_flush)(struct efx_nic *efx);
989 void (*update_stats)(struct efx_nic *efx); 990 void (*update_stats)(struct efx_nic *efx);
990 void (*start_stats)(struct efx_nic *efx); 991 void (*start_stats)(struct efx_nic *efx);
991 void (*stop_stats)(struct efx_nic *efx); 992 void (*stop_stats)(struct efx_nic *efx);
992 void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode); 993 void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode);
993 void (*push_irq_moderation)(struct efx_channel *channel); 994 void (*push_irq_moderation)(struct efx_channel *channel);
994 int (*reconfigure_port)(struct efx_nic *efx); 995 int (*reconfigure_port)(struct efx_nic *efx);
995 int (*reconfigure_mac)(struct efx_nic *efx); 996 int (*reconfigure_mac)(struct efx_nic *efx);
996 bool (*check_mac_fault)(struct efx_nic *efx); 997 bool (*check_mac_fault)(struct efx_nic *efx);
997 void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol); 998 void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol);
998 int (*set_wol)(struct efx_nic *efx, u32 type); 999 int (*set_wol)(struct efx_nic *efx, u32 type);
999 void (*resume_wol)(struct efx_nic *efx); 1000 void (*resume_wol)(struct efx_nic *efx);
1000 int (*test_chip)(struct efx_nic *efx, struct efx_self_tests *tests); 1001 int (*test_chip)(struct efx_nic *efx, struct efx_self_tests *tests);
1001 int (*test_nvram)(struct efx_nic *efx); 1002 int (*test_nvram)(struct efx_nic *efx);
1002 1003
1003 int revision; 1004 int revision;
1004 unsigned int mem_map_size; 1005 unsigned int mem_map_size;
1005 unsigned int txd_ptr_tbl_base; 1006 unsigned int txd_ptr_tbl_base;
1006 unsigned int rxd_ptr_tbl_base; 1007 unsigned int rxd_ptr_tbl_base;
1007 unsigned int buf_tbl_base; 1008 unsigned int buf_tbl_base;
1008 unsigned int evq_ptr_tbl_base; 1009 unsigned int evq_ptr_tbl_base;
1009 unsigned int evq_rptr_tbl_base; 1010 unsigned int evq_rptr_tbl_base;
1010 u64 max_dma_mask; 1011 u64 max_dma_mask;
1011 unsigned int rx_buffer_hash_size; 1012 unsigned int rx_buffer_hash_size;
1012 unsigned int rx_buffer_padding; 1013 unsigned int rx_buffer_padding;
1013 bool can_rx_scatter; 1014 bool can_rx_scatter;
1014 unsigned int max_interrupt_mode; 1015 unsigned int max_interrupt_mode;
1015 unsigned int phys_addr_channels; 1016 unsigned int phys_addr_channels;
1016 unsigned int timer_period_max; 1017 unsigned int timer_period_max;
1017 netdev_features_t offload_features; 1018 netdev_features_t offload_features;
1018 }; 1019 };
1019 1020
1020 /************************************************************************** 1021 /**************************************************************************
1021 * 1022 *
1022 * Prototypes and inline functions 1023 * Prototypes and inline functions
1023 * 1024 *
1024 *************************************************************************/ 1025 *************************************************************************/
1025 1026
1026 static inline struct efx_channel * 1027 static inline struct efx_channel *
1027 efx_get_channel(struct efx_nic *efx, unsigned index) 1028 efx_get_channel(struct efx_nic *efx, unsigned index)
1028 { 1029 {
1029 EFX_BUG_ON_PARANOID(index >= efx->n_channels); 1030 EFX_BUG_ON_PARANOID(index >= efx->n_channels);
1030 return efx->channel[index]; 1031 return efx->channel[index];
1031 } 1032 }
1032 1033
1033 /* Iterate over all used channels */ 1034 /* Iterate over all used channels */
1034 #define efx_for_each_channel(_channel, _efx) \ 1035 #define efx_for_each_channel(_channel, _efx) \
1035 for (_channel = (_efx)->channel[0]; \ 1036 for (_channel = (_efx)->channel[0]; \
1036 _channel; \ 1037 _channel; \
1037 _channel = (_channel->channel + 1 < (_efx)->n_channels) ? \ 1038 _channel = (_channel->channel + 1 < (_efx)->n_channels) ? \
1038 (_efx)->channel[_channel->channel + 1] : NULL) 1039 (_efx)->channel[_channel->channel + 1] : NULL)
1039 1040
1040 /* Iterate over all used channels in reverse */ 1041 /* Iterate over all used channels in reverse */
1041 #define efx_for_each_channel_rev(_channel, _efx) \ 1042 #define efx_for_each_channel_rev(_channel, _efx) \
1042 for (_channel = (_efx)->channel[(_efx)->n_channels - 1]; \ 1043 for (_channel = (_efx)->channel[(_efx)->n_channels - 1]; \
1043 _channel; \ 1044 _channel; \
1044 _channel = _channel->channel ? \ 1045 _channel = _channel->channel ? \
1045 (_efx)->channel[_channel->channel - 1] : NULL) 1046 (_efx)->channel[_channel->channel - 1] : NULL)
1046 1047
1047 static inline struct efx_tx_queue * 1048 static inline struct efx_tx_queue *
1048 efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type) 1049 efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
1049 { 1050 {
1050 EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels || 1051 EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels ||
1051 type >= EFX_TXQ_TYPES); 1052 type >= EFX_TXQ_TYPES);
1052 return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type]; 1053 return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type];
1053 } 1054 }
1054 1055
1055 static inline bool efx_channel_has_tx_queues(struct efx_channel *channel) 1056 static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
1056 { 1057 {
1057 return channel->channel - channel->efx->tx_channel_offset < 1058 return channel->channel - channel->efx->tx_channel_offset <
1058 channel->efx->n_tx_channels; 1059 channel->efx->n_tx_channels;
1059 } 1060 }
1060 1061
1061 static inline struct efx_tx_queue * 1062 static inline struct efx_tx_queue *
1062 efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type) 1063 efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type)
1063 { 1064 {
1064 EFX_BUG_ON_PARANOID(!efx_channel_has_tx_queues(channel) || 1065 EFX_BUG_ON_PARANOID(!efx_channel_has_tx_queues(channel) ||
1065 type >= EFX_TXQ_TYPES); 1066 type >= EFX_TXQ_TYPES);
1066 return &channel->tx_queue[type]; 1067 return &channel->tx_queue[type];
1067 } 1068 }
1068 1069
1069 static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue) 1070 static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue)
1070 { 1071 {
1071 return !(tx_queue->efx->net_dev->num_tc < 2 && 1072 return !(tx_queue->efx->net_dev->num_tc < 2 &&
1072 tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI); 1073 tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI);
1073 } 1074 }
1074 1075
1075 /* Iterate over all TX queues belonging to a channel */ 1076 /* Iterate over all TX queues belonging to a channel */
1076 #define efx_for_each_channel_tx_queue(_tx_queue, _channel) \ 1077 #define efx_for_each_channel_tx_queue(_tx_queue, _channel) \
1077 if (!efx_channel_has_tx_queues(_channel)) \ 1078 if (!efx_channel_has_tx_queues(_channel)) \
1078 ; \ 1079 ; \
1079 else \ 1080 else \
1080 for (_tx_queue = (_channel)->tx_queue; \ 1081 for (_tx_queue = (_channel)->tx_queue; \
1081 _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES && \ 1082 _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES && \
1082 efx_tx_queue_used(_tx_queue); \ 1083 efx_tx_queue_used(_tx_queue); \
1083 _tx_queue++) 1084 _tx_queue++)
1084 1085
1085 /* Iterate over all possible TX queues belonging to a channel */ 1086 /* Iterate over all possible TX queues belonging to a channel */
1086 #define efx_for_each_possible_channel_tx_queue(_tx_queue, _channel) \ 1087 #define efx_for_each_possible_channel_tx_queue(_tx_queue, _channel) \
1087 if (!efx_channel_has_tx_queues(_channel)) \ 1088 if (!efx_channel_has_tx_queues(_channel)) \
1088 ; \ 1089 ; \
1089 else \ 1090 else \
1090 for (_tx_queue = (_channel)->tx_queue; \ 1091 for (_tx_queue = (_channel)->tx_queue; \
1091 _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \ 1092 _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \
1092 _tx_queue++) 1093 _tx_queue++)
1093 1094
1094 static inline bool efx_channel_has_rx_queue(struct efx_channel *channel) 1095 static inline bool efx_channel_has_rx_queue(struct efx_channel *channel)
1095 { 1096 {
1096 return channel->rx_queue.core_index >= 0; 1097 return channel->rx_queue.core_index >= 0;
1097 } 1098 }
1098 1099
1099 static inline struct efx_rx_queue * 1100 static inline struct efx_rx_queue *
1100 efx_channel_get_rx_queue(struct efx_channel *channel) 1101 efx_channel_get_rx_queue(struct efx_channel *channel)
1101 { 1102 {
1102 EFX_BUG_ON_PARANOID(!efx_channel_has_rx_queue(channel)); 1103 EFX_BUG_ON_PARANOID(!efx_channel_has_rx_queue(channel));
1103 return &channel->rx_queue; 1104 return &channel->rx_queue;
1104 } 1105 }
1105 1106
1106 /* Iterate over all RX queues belonging to a channel */ 1107 /* Iterate over all RX queues belonging to a channel */
1107 #define efx_for_each_channel_rx_queue(_rx_queue, _channel) \ 1108 #define efx_for_each_channel_rx_queue(_rx_queue, _channel) \
1108 if (!efx_channel_has_rx_queue(_channel)) \ 1109 if (!efx_channel_has_rx_queue(_channel)) \
1109 ; \ 1110 ; \
1110 else \ 1111 else \
1111 for (_rx_queue = &(_channel)->rx_queue; \ 1112 for (_rx_queue = &(_channel)->rx_queue; \
1112 _rx_queue; \ 1113 _rx_queue; \
1113 _rx_queue = NULL) 1114 _rx_queue = NULL)
1114 1115
1115 static inline struct efx_channel * 1116 static inline struct efx_channel *
1116 efx_rx_queue_channel(struct efx_rx_queue *rx_queue) 1117 efx_rx_queue_channel(struct efx_rx_queue *rx_queue)
1117 { 1118 {
1118 return container_of(rx_queue, struct efx_channel, rx_queue); 1119 return container_of(rx_queue, struct efx_channel, rx_queue);
1119 } 1120 }
1120 1121
1121 static inline int efx_rx_queue_index(struct efx_rx_queue *rx_queue) 1122 static inline int efx_rx_queue_index(struct efx_rx_queue *rx_queue)
1122 { 1123 {
1123 return efx_rx_queue_channel(rx_queue)->channel; 1124 return efx_rx_queue_channel(rx_queue)->channel;
1124 } 1125 }
1125 1126
1126 /* Returns a pointer to the specified receive buffer in the RX 1127 /* Returns a pointer to the specified receive buffer in the RX
1127 * descriptor queue. 1128 * descriptor queue.
1128 */ 1129 */
1129 static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue, 1130 static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue,
1130 unsigned int index) 1131 unsigned int index)
1131 { 1132 {
1132 return &rx_queue->buffer[index]; 1133 return &rx_queue->buffer[index];
1133 } 1134 }
1134 1135
1135 1136
1136 /** 1137 /**
1137 * EFX_MAX_FRAME_LEN - calculate maximum frame length 1138 * EFX_MAX_FRAME_LEN - calculate maximum frame length
1138 * 1139 *
1139 * This calculates the maximum frame length that will be used for a 1140 * This calculates the maximum frame length that will be used for a
1140 * given MTU. The frame length will be equal to the MTU plus a 1141 * given MTU. The frame length will be equal to the MTU plus a
1141 * constant amount of header space and padding. This is the quantity 1142 * constant amount of header space and padding. This is the quantity
1142 * that the net driver will program into the MAC as the maximum frame 1143 * that the net driver will program into the MAC as the maximum frame
1143 * length. 1144 * length.
1144 * 1145 *
1145 * The 10G MAC requires 8-byte alignment on the frame 1146 * The 10G MAC requires 8-byte alignment on the frame
1146 * length, so we round up to the nearest 8. 1147 * length, so we round up to the nearest 8.
1147 * 1148 *
1148 * Re-clocking by the XGXS on RX can reduce an IPG to 32 bits (half an 1149 * Re-clocking by the XGXS on RX can reduce an IPG to 32 bits (half an
1149 * XGMII cycle). If the frame length reaches the maximum value in the 1150 * XGMII cycle). If the frame length reaches the maximum value in the
1150 * same cycle, the XMAC can miss the IPG altogether. We work around 1151 * same cycle, the XMAC can miss the IPG altogether. We work around
1151 * this by adding a further 16 bytes. 1152 * this by adding a further 16 bytes.
1152 */ 1153 */
1153 #define EFX_MAX_FRAME_LEN(mtu) \ 1154 #define EFX_MAX_FRAME_LEN(mtu) \
1154 ((((mtu) + ETH_HLEN + VLAN_HLEN + 4/* FCS */ + 7) & ~7) + 16) 1155 ((((mtu) + ETH_HLEN + VLAN_HLEN + 4/* FCS */ + 7) & ~7) + 16)
1155 1156
1156 static inline bool efx_xmit_with_hwtstamp(struct sk_buff *skb) 1157 static inline bool efx_xmit_with_hwtstamp(struct sk_buff *skb)
1157 { 1158 {
1158 return skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP; 1159 return skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP;
1159 } 1160 }
1160 static inline void efx_xmit_hwtstamp_pending(struct sk_buff *skb) 1161 static inline void efx_xmit_hwtstamp_pending(struct sk_buff *skb)
1161 { 1162 {
1162 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1163 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1163 } 1164 }
1164 1165
1165 #endif /* EFX_NET_DRIVER_H */ 1166 #endif /* EFX_NET_DRIVER_H */
1166 1167
drivers/net/ethernet/sfc/siena.c
1 /**************************************************************************** 1 /****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards 2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd. 3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2010 Solarflare Communications Inc. 4 * Copyright 2006-2010 Solarflare Communications Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference. 8 * by the Free Software Foundation, incorporated herein by reference.
9 */ 9 */
10 10
11 #include <linux/bitops.h> 11 #include <linux/bitops.h>
12 #include <linux/delay.h> 12 #include <linux/delay.h>
13 #include <linux/pci.h> 13 #include <linux/pci.h>
14 #include <linux/module.h> 14 #include <linux/module.h>
15 #include <linux/slab.h> 15 #include <linux/slab.h>
16 #include <linux/random.h> 16 #include <linux/random.h>
17 #include "net_driver.h" 17 #include "net_driver.h"
18 #include "bitfield.h" 18 #include "bitfield.h"
19 #include "efx.h" 19 #include "efx.h"
20 #include "nic.h" 20 #include "nic.h"
21 #include "spi.h" 21 #include "spi.h"
22 #include "regs.h" 22 #include "regs.h"
23 #include "io.h" 23 #include "io.h"
24 #include "phy.h" 24 #include "phy.h"
25 #include "workarounds.h" 25 #include "workarounds.h"
26 #include "mcdi.h" 26 #include "mcdi.h"
27 #include "mcdi_pcol.h" 27 #include "mcdi_pcol.h"
28 #include "selftest.h" 28 #include "selftest.h"
29 29
30 /* Hardware control for SFC9000 family including SFL9021 (aka Siena). */ 30 /* Hardware control for SFC9000 family including SFL9021 (aka Siena). */
31 31
32 static void siena_init_wol(struct efx_nic *efx); 32 static void siena_init_wol(struct efx_nic *efx);
33 static int siena_reset_hw(struct efx_nic *efx, enum reset_type method); 33 static int siena_reset_hw(struct efx_nic *efx, enum reset_type method);
34 34
35 35
36 static void siena_push_irq_moderation(struct efx_channel *channel) 36 static void siena_push_irq_moderation(struct efx_channel *channel)
37 { 37 {
38 efx_dword_t timer_cmd; 38 efx_dword_t timer_cmd;
39 39
40 if (channel->irq_moderation) 40 if (channel->irq_moderation)
41 EFX_POPULATE_DWORD_2(timer_cmd, 41 EFX_POPULATE_DWORD_2(timer_cmd,
42 FRF_CZ_TC_TIMER_MODE, 42 FRF_CZ_TC_TIMER_MODE,
43 FFE_CZ_TIMER_MODE_INT_HLDOFF, 43 FFE_CZ_TIMER_MODE_INT_HLDOFF,
44 FRF_CZ_TC_TIMER_VAL, 44 FRF_CZ_TC_TIMER_VAL,
45 channel->irq_moderation - 1); 45 channel->irq_moderation - 1);
46 else 46 else
47 EFX_POPULATE_DWORD_2(timer_cmd, 47 EFX_POPULATE_DWORD_2(timer_cmd,
48 FRF_CZ_TC_TIMER_MODE, 48 FRF_CZ_TC_TIMER_MODE,
49 FFE_CZ_TIMER_MODE_DIS, 49 FFE_CZ_TIMER_MODE_DIS,
50 FRF_CZ_TC_TIMER_VAL, 0); 50 FRF_CZ_TC_TIMER_VAL, 0);
51 efx_writed_page_locked(channel->efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0, 51 efx_writed_page_locked(channel->efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0,
52 channel->channel); 52 channel->channel);
53 } 53 }
54 54
55 static int siena_mdio_write(struct net_device *net_dev, 55 static int siena_mdio_write(struct net_device *net_dev,
56 int prtad, int devad, u16 addr, u16 value) 56 int prtad, int devad, u16 addr, u16 value)
57 { 57 {
58 struct efx_nic *efx = netdev_priv(net_dev); 58 struct efx_nic *efx = netdev_priv(net_dev);
59 uint32_t status; 59 uint32_t status;
60 int rc; 60 int rc;
61 61
62 rc = efx_mcdi_mdio_write(efx, efx->mdio_bus, prtad, devad, 62 rc = efx_mcdi_mdio_write(efx, efx->mdio_bus, prtad, devad,
63 addr, value, &status); 63 addr, value, &status);
64 if (rc) 64 if (rc)
65 return rc; 65 return rc;
66 if (status != MC_CMD_MDIO_STATUS_GOOD) 66 if (status != MC_CMD_MDIO_STATUS_GOOD)
67 return -EIO; 67 return -EIO;
68 68
69 return 0; 69 return 0;
70 } 70 }
71 71
72 static int siena_mdio_read(struct net_device *net_dev, 72 static int siena_mdio_read(struct net_device *net_dev,
73 int prtad, int devad, u16 addr) 73 int prtad, int devad, u16 addr)
74 { 74 {
75 struct efx_nic *efx = netdev_priv(net_dev); 75 struct efx_nic *efx = netdev_priv(net_dev);
76 uint16_t value; 76 uint16_t value;
77 uint32_t status; 77 uint32_t status;
78 int rc; 78 int rc;
79 79
80 rc = efx_mcdi_mdio_read(efx, efx->mdio_bus, prtad, devad, 80 rc = efx_mcdi_mdio_read(efx, efx->mdio_bus, prtad, devad,
81 addr, &value, &status); 81 addr, &value, &status);
82 if (rc) 82 if (rc)
83 return rc; 83 return rc;
84 if (status != MC_CMD_MDIO_STATUS_GOOD) 84 if (status != MC_CMD_MDIO_STATUS_GOOD)
85 return -EIO; 85 return -EIO;
86 86
87 return (int)value; 87 return (int)value;
88 } 88 }
89 89
90 /* This call is responsible for hooking in the MAC and PHY operations */ 90 /* This call is responsible for hooking in the MAC and PHY operations */
91 static int siena_probe_port(struct efx_nic *efx) 91 static int siena_probe_port(struct efx_nic *efx)
92 { 92 {
93 int rc; 93 int rc;
94 94
95 /* Hook in PHY operations table */ 95 /* Hook in PHY operations table */
96 efx->phy_op = &efx_mcdi_phy_ops; 96 efx->phy_op = &efx_mcdi_phy_ops;
97 97
98 /* Set up MDIO structure for PHY */ 98 /* Set up MDIO structure for PHY */
99 efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; 99 efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
100 efx->mdio.mdio_read = siena_mdio_read; 100 efx->mdio.mdio_read = siena_mdio_read;
101 efx->mdio.mdio_write = siena_mdio_write; 101 efx->mdio.mdio_write = siena_mdio_write;
102 102
103 /* Fill out MDIO structure, loopback modes, and initial link state */ 103 /* Fill out MDIO structure, loopback modes, and initial link state */
104 rc = efx->phy_op->probe(efx); 104 rc = efx->phy_op->probe(efx);
105 if (rc != 0) 105 if (rc != 0)
106 return rc; 106 return rc;
107 107
108 /* Allocate buffer for stats */ 108 /* Allocate buffer for stats */
109 rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer, 109 rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
110 MC_CMD_MAC_NSTATS * sizeof(u64)); 110 MC_CMD_MAC_NSTATS * sizeof(u64));
111 if (rc) 111 if (rc)
112 return rc; 112 return rc;
113 netif_dbg(efx, probe, efx->net_dev, 113 netif_dbg(efx, probe, efx->net_dev,
114 "stats buffer at %llx (virt %p phys %llx)\n", 114 "stats buffer at %llx (virt %p phys %llx)\n",
115 (u64)efx->stats_buffer.dma_addr, 115 (u64)efx->stats_buffer.dma_addr,
116 efx->stats_buffer.addr, 116 efx->stats_buffer.addr,
117 (u64)virt_to_phys(efx->stats_buffer.addr)); 117 (u64)virt_to_phys(efx->stats_buffer.addr));
118 118
119 efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 1); 119 efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 1);
120 120
121 return 0; 121 return 0;
122 } 122 }
123 123
124 static void siena_remove_port(struct efx_nic *efx) 124 static void siena_remove_port(struct efx_nic *efx)
125 { 125 {
126 efx->phy_op->remove(efx); 126 efx->phy_op->remove(efx);
127 efx_nic_free_buffer(efx, &efx->stats_buffer); 127 efx_nic_free_buffer(efx, &efx->stats_buffer);
128 } 128 }
129 129
130 void siena_prepare_flush(struct efx_nic *efx) 130 void siena_prepare_flush(struct efx_nic *efx)
131 { 131 {
132 if (efx->fc_disable++ == 0) 132 if (efx->fc_disable++ == 0)
133 efx_mcdi_set_mac(efx); 133 efx_mcdi_set_mac(efx);
134 } 134 }
135 135
136 void siena_finish_flush(struct efx_nic *efx) 136 void siena_finish_flush(struct efx_nic *efx)
137 { 137 {
138 if (--efx->fc_disable == 0) 138 if (--efx->fc_disable == 0)
139 efx_mcdi_set_mac(efx); 139 efx_mcdi_set_mac(efx);
140 } 140 }
141 141
142 static const struct efx_nic_register_test siena_register_tests[] = { 142 static const struct efx_nic_register_test siena_register_tests[] = {
143 { FR_AZ_ADR_REGION, 143 { FR_AZ_ADR_REGION,
144 EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) }, 144 EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
145 { FR_CZ_USR_EV_CFG, 145 { FR_CZ_USR_EV_CFG,
146 EFX_OWORD32(0x000103FF, 0x00000000, 0x00000000, 0x00000000) }, 146 EFX_OWORD32(0x000103FF, 0x00000000, 0x00000000, 0x00000000) },
147 { FR_AZ_RX_CFG, 147 { FR_AZ_RX_CFG,
148 EFX_OWORD32(0xFFFFFFFE, 0xFFFFFFFF, 0x0003FFFF, 0x00000000) }, 148 EFX_OWORD32(0xFFFFFFFE, 0xFFFFFFFF, 0x0003FFFF, 0x00000000) },
149 { FR_AZ_TX_CFG, 149 { FR_AZ_TX_CFG,
150 EFX_OWORD32(0x7FFF0037, 0xFFFF8000, 0xFFFFFFFF, 0x03FFFFFF) }, 150 EFX_OWORD32(0x7FFF0037, 0xFFFF8000, 0xFFFFFFFF, 0x03FFFFFF) },
151 { FR_AZ_TX_RESERVED, 151 { FR_AZ_TX_RESERVED,
152 EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) }, 152 EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
153 { FR_AZ_SRM_TX_DC_CFG, 153 { FR_AZ_SRM_TX_DC_CFG,
154 EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) }, 154 EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
155 { FR_AZ_RX_DC_CFG, 155 { FR_AZ_RX_DC_CFG,
156 EFX_OWORD32(0x00000003, 0x00000000, 0x00000000, 0x00000000) }, 156 EFX_OWORD32(0x00000003, 0x00000000, 0x00000000, 0x00000000) },
157 { FR_AZ_RX_DC_PF_WM, 157 { FR_AZ_RX_DC_PF_WM,
158 EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) }, 158 EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
159 { FR_BZ_DP_CTRL, 159 { FR_BZ_DP_CTRL,
160 EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) }, 160 EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
161 { FR_BZ_RX_RSS_TKEY, 161 { FR_BZ_RX_RSS_TKEY,
162 EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) }, 162 EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) },
163 { FR_CZ_RX_RSS_IPV6_REG1, 163 { FR_CZ_RX_RSS_IPV6_REG1,
164 EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) }, 164 EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) },
165 { FR_CZ_RX_RSS_IPV6_REG2, 165 { FR_CZ_RX_RSS_IPV6_REG2,
166 EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) }, 166 EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF) },
167 { FR_CZ_RX_RSS_IPV6_REG3, 167 { FR_CZ_RX_RSS_IPV6_REG3,
168 EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0x00000007, 0x00000000) }, 168 EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0x00000007, 0x00000000) },
169 }; 169 };
170 170
171 static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) 171 static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
172 { 172 {
173 enum reset_type reset_method = RESET_TYPE_ALL; 173 enum reset_type reset_method = RESET_TYPE_ALL;
174 int rc, rc2; 174 int rc, rc2;
175 175
176 efx_reset_down(efx, reset_method); 176 efx_reset_down(efx, reset_method);
177 177
178 /* Reset the chip immediately so that it is completely 178 /* Reset the chip immediately so that it is completely
179 * quiescent regardless of what any VF driver does. 179 * quiescent regardless of what any VF driver does.
180 */ 180 */
181 rc = siena_reset_hw(efx, reset_method); 181 rc = siena_reset_hw(efx, reset_method);
182 if (rc) 182 if (rc)
183 goto out; 183 goto out;
184 184
185 tests->registers = 185 tests->registers =
186 efx_nic_test_registers(efx, siena_register_tests, 186 efx_nic_test_registers(efx, siena_register_tests,
187 ARRAY_SIZE(siena_register_tests)) 187 ARRAY_SIZE(siena_register_tests))
188 ? -1 : 1; 188 ? -1 : 1;
189 189
190 rc = siena_reset_hw(efx, reset_method); 190 rc = siena_reset_hw(efx, reset_method);
191 out: 191 out:
192 rc2 = efx_reset_up(efx, reset_method, rc == 0); 192 rc2 = efx_reset_up(efx, reset_method, rc == 0);
193 return rc ? rc : rc2; 193 return rc ? rc : rc2;
194 } 194 }
195 195
196 /************************************************************************** 196 /**************************************************************************
197 * 197 *
198 * Device reset 198 * Device reset
199 * 199 *
200 ************************************************************************** 200 **************************************************************************
201 */ 201 */
202 202
203 static enum reset_type siena_map_reset_reason(enum reset_type reason) 203 static enum reset_type siena_map_reset_reason(enum reset_type reason)
204 { 204 {
205 return RESET_TYPE_RECOVER_OR_ALL; 205 return RESET_TYPE_RECOVER_OR_ALL;
206 } 206 }
207 207
208 static int siena_map_reset_flags(u32 *flags) 208 static int siena_map_reset_flags(u32 *flags)
209 { 209 {
210 enum { 210 enum {
211 SIENA_RESET_PORT = (ETH_RESET_DMA | ETH_RESET_FILTER | 211 SIENA_RESET_PORT = (ETH_RESET_DMA | ETH_RESET_FILTER |
212 ETH_RESET_OFFLOAD | ETH_RESET_MAC | 212 ETH_RESET_OFFLOAD | ETH_RESET_MAC |
213 ETH_RESET_PHY), 213 ETH_RESET_PHY),
214 SIENA_RESET_MC = (SIENA_RESET_PORT | 214 SIENA_RESET_MC = (SIENA_RESET_PORT |
215 ETH_RESET_MGMT << ETH_RESET_SHARED_SHIFT), 215 ETH_RESET_MGMT << ETH_RESET_SHARED_SHIFT),
216 }; 216 };
217 217
218 if ((*flags & SIENA_RESET_MC) == SIENA_RESET_MC) { 218 if ((*flags & SIENA_RESET_MC) == SIENA_RESET_MC) {
219 *flags &= ~SIENA_RESET_MC; 219 *flags &= ~SIENA_RESET_MC;
220 return RESET_TYPE_WORLD; 220 return RESET_TYPE_WORLD;
221 } 221 }
222 222
223 if ((*flags & SIENA_RESET_PORT) == SIENA_RESET_PORT) { 223 if ((*flags & SIENA_RESET_PORT) == SIENA_RESET_PORT) {
224 *flags &= ~SIENA_RESET_PORT; 224 *flags &= ~SIENA_RESET_PORT;
225 return RESET_TYPE_ALL; 225 return RESET_TYPE_ALL;
226 } 226 }
227 227
228 /* no invisible reset implemented */ 228 /* no invisible reset implemented */
229 229
230 return -EINVAL; 230 return -EINVAL;
231 } 231 }
232 232
233 static int siena_reset_hw(struct efx_nic *efx, enum reset_type method) 233 static int siena_reset_hw(struct efx_nic *efx, enum reset_type method)
234 { 234 {
235 int rc; 235 int rc;
236 236
237 /* Recover from a failed assertion pre-reset */ 237 /* Recover from a failed assertion pre-reset */
238 rc = efx_mcdi_handle_assertion(efx); 238 rc = efx_mcdi_handle_assertion(efx);
239 if (rc) 239 if (rc)
240 return rc; 240 return rc;
241 241
242 if (method == RESET_TYPE_WORLD) 242 if (method == RESET_TYPE_WORLD)
243 return efx_mcdi_reset_mc(efx); 243 return efx_mcdi_reset_mc(efx);
244 else 244 else
245 return efx_mcdi_reset_port(efx); 245 return efx_mcdi_reset_port(efx);
246 } 246 }
247 247
248 #ifdef CONFIG_EEH 248 #ifdef CONFIG_EEH
249 /* When a PCI device is isolated from the bus, a subsequent MMIO read is 249 /* When a PCI device is isolated from the bus, a subsequent MMIO read is
250 * required for the kernel EEH mechanisms to notice. As the Solarflare driver 250 * required for the kernel EEH mechanisms to notice. As the Solarflare driver
251 * was written to minimise MMIO read (for latency) then a periodic call to check 251 * was written to minimise MMIO read (for latency) then a periodic call to check
252 * the EEH status of the device is required so that device recovery can happen 252 * the EEH status of the device is required so that device recovery can happen
253 * in a timely fashion. 253 * in a timely fashion.
254 */ 254 */
255 static void siena_monitor(struct efx_nic *efx) 255 static void siena_monitor(struct efx_nic *efx)
256 { 256 {
257 struct eeh_dev *eehdev = 257 struct eeh_dev *eehdev =
258 of_node_to_eeh_dev(pci_device_to_OF_node(efx->pci_dev)); 258 of_node_to_eeh_dev(pci_device_to_OF_node(efx->pci_dev));
259 259
260 eeh_dev_check_failure(eehdev); 260 eeh_dev_check_failure(eehdev);
261 } 261 }
262 #endif 262 #endif
263 263
264 static int siena_probe_nvconfig(struct efx_nic *efx) 264 static int siena_probe_nvconfig(struct efx_nic *efx)
265 { 265 {
266 u32 caps = 0; 266 u32 caps = 0;
267 int rc; 267 int rc;
268 268
269 rc = efx_mcdi_get_board_cfg(efx, efx->net_dev->perm_addr, NULL, &caps); 269 rc = efx_mcdi_get_board_cfg(efx, efx->net_dev->perm_addr, NULL, &caps);
270 270
271 efx->timer_quantum_ns = 271 efx->timer_quantum_ns =
272 (caps & (1 << MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN)) ? 272 (caps & (1 << MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN)) ?
273 3072 : 6144; /* 768 cycles */ 273 3072 : 6144; /* 768 cycles */
274 return rc; 274 return rc;
275 } 275 }
276 276
277 static void siena_dimension_resources(struct efx_nic *efx) 277 static void siena_dimension_resources(struct efx_nic *efx)
278 { 278 {
279 /* Each port has a small block of internal SRAM dedicated to 279 /* Each port has a small block of internal SRAM dedicated to
280 * the buffer table and descriptor caches. In theory we can 280 * the buffer table and descriptor caches. In theory we can
281 * map both blocks to one port, but we don't. 281 * map both blocks to one port, but we don't.
282 */ 282 */
283 efx_nic_dimension_resources(efx, FR_CZ_BUF_FULL_TBL_ROWS / 2); 283 efx_nic_dimension_resources(efx, FR_CZ_BUF_FULL_TBL_ROWS / 2);
284 } 284 }
285 285
286 static int siena_probe_nic(struct efx_nic *efx) 286 static int siena_probe_nic(struct efx_nic *efx)
287 { 287 {
288 struct siena_nic_data *nic_data; 288 struct siena_nic_data *nic_data;
289 bool already_attached = false; 289 bool already_attached = false;
290 efx_oword_t reg; 290 efx_oword_t reg;
291 int rc; 291 int rc;
292 292
293 /* Allocate storage for hardware specific data */ 293 /* Allocate storage for hardware specific data */
294 nic_data = kzalloc(sizeof(struct siena_nic_data), GFP_KERNEL); 294 nic_data = kzalloc(sizeof(struct siena_nic_data), GFP_KERNEL);
295 if (!nic_data) 295 if (!nic_data)
296 return -ENOMEM; 296 return -ENOMEM;
297 efx->nic_data = nic_data; 297 efx->nic_data = nic_data;
298 298
299 if (efx_nic_fpga_ver(efx) != 0) { 299 if (efx_nic_fpga_ver(efx) != 0) {
300 netif_err(efx, probe, efx->net_dev, 300 netif_err(efx, probe, efx->net_dev,
301 "Siena FPGA not supported\n"); 301 "Siena FPGA not supported\n");
302 rc = -ENODEV; 302 rc = -ENODEV;
303 goto fail1; 303 goto fail1;
304 } 304 }
305 305
306 efx_reado(efx, &reg, FR_AZ_CS_DEBUG); 306 efx_reado(efx, &reg, FR_AZ_CS_DEBUG);
307 efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1; 307 efx->port_num = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
308 308
309 efx_mcdi_init(efx); 309 efx_mcdi_init(efx);
310 310
311 /* Recover from a failed assertion before probing */ 311 /* Recover from a failed assertion before probing */
312 rc = efx_mcdi_handle_assertion(efx); 312 rc = efx_mcdi_handle_assertion(efx);
313 if (rc) 313 if (rc)
314 goto fail1; 314 goto fail1;
315 315
316 /* Let the BMC know that the driver is now in charge of link and 316 /* Let the BMC know that the driver is now in charge of link and
317 * filter settings. We must do this before we reset the NIC */ 317 * filter settings. We must do this before we reset the NIC */
318 rc = efx_mcdi_drv_attach(efx, true, &already_attached); 318 rc = efx_mcdi_drv_attach(efx, true, &already_attached);
319 if (rc) { 319 if (rc) {
320 netif_err(efx, probe, efx->net_dev, 320 netif_err(efx, probe, efx->net_dev,
321 "Unable to register driver with MCPU\n"); 321 "Unable to register driver with MCPU\n");
322 goto fail2; 322 goto fail2;
323 } 323 }
324 if (already_attached) 324 if (already_attached)
325 /* Not a fatal error */ 325 /* Not a fatal error */
326 netif_err(efx, probe, efx->net_dev, 326 netif_err(efx, probe, efx->net_dev,
327 "Host already registered with MCPU\n"); 327 "Host already registered with MCPU\n");
328 328
329 /* Now we can reset the NIC */ 329 /* Now we can reset the NIC */
330 rc = siena_reset_hw(efx, RESET_TYPE_ALL); 330 rc = siena_reset_hw(efx, RESET_TYPE_ALL);
331 if (rc) { 331 if (rc) {
332 netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n"); 332 netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
333 goto fail3; 333 goto fail3;
334 } 334 }
335 335
336 siena_init_wol(efx); 336 siena_init_wol(efx);
337 337
338 /* Allocate memory for INT_KER */ 338 /* Allocate memory for INT_KER */
339 rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t)); 339 rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t));
340 if (rc) 340 if (rc)
341 goto fail4; 341 goto fail4;
342 BUG_ON(efx->irq_status.dma_addr & 0x0f); 342 BUG_ON(efx->irq_status.dma_addr & 0x0f);
343 343
344 netif_dbg(efx, probe, efx->net_dev, 344 netif_dbg(efx, probe, efx->net_dev,
345 "INT_KER at %llx (virt %p phys %llx)\n", 345 "INT_KER at %llx (virt %p phys %llx)\n",
346 (unsigned long long)efx->irq_status.dma_addr, 346 (unsigned long long)efx->irq_status.dma_addr,
347 efx->irq_status.addr, 347 efx->irq_status.addr,
348 (unsigned long long)virt_to_phys(efx->irq_status.addr)); 348 (unsigned long long)virt_to_phys(efx->irq_status.addr));
349 349
350 /* Read in the non-volatile configuration */ 350 /* Read in the non-volatile configuration */
351 rc = siena_probe_nvconfig(efx); 351 rc = siena_probe_nvconfig(efx);
352 if (rc == -EINVAL) { 352 if (rc == -EINVAL) {
353 netif_err(efx, probe, efx->net_dev, 353 netif_err(efx, probe, efx->net_dev,
354 "NVRAM is invalid therefore using defaults\n"); 354 "NVRAM is invalid therefore using defaults\n");
355 efx->phy_type = PHY_TYPE_NONE; 355 efx->phy_type = PHY_TYPE_NONE;
356 efx->mdio.prtad = MDIO_PRTAD_NONE; 356 efx->mdio.prtad = MDIO_PRTAD_NONE;
357 } else if (rc) { 357 } else if (rc) {
358 goto fail5; 358 goto fail5;
359 } 359 }
360 360
361 rc = efx_mcdi_mon_probe(efx); 361 rc = efx_mcdi_mon_probe(efx);
362 if (rc) 362 if (rc)
363 goto fail5; 363 goto fail5;
364 364
365 efx_sriov_probe(efx); 365 efx_sriov_probe(efx);
366 efx_ptp_probe(efx); 366 efx_ptp_probe(efx);
367 367
368 return 0; 368 return 0;
369 369
370 fail5: 370 fail5:
371 efx_nic_free_buffer(efx, &efx->irq_status); 371 efx_nic_free_buffer(efx, &efx->irq_status);
372 fail4: 372 fail4:
373 fail3: 373 fail3:
374 efx_mcdi_drv_attach(efx, false, NULL); 374 efx_mcdi_drv_attach(efx, false, NULL);
375 fail2: 375 fail2:
376 fail1: 376 fail1:
377 kfree(efx->nic_data); 377 kfree(efx->nic_data);
378 return rc; 378 return rc;
379 } 379 }
380 380
381 /* This call performs hardware-specific global initialisation, such as 381 /* This call performs hardware-specific global initialisation, such as
382 * defining the descriptor cache sizes and number of RSS channels. 382 * defining the descriptor cache sizes and number of RSS channels.
383 * It does not set up any buffers, descriptor rings or event queues. 383 * It does not set up any buffers, descriptor rings or event queues.
384 */ 384 */
385 static int siena_init_nic(struct efx_nic *efx) 385 static int siena_init_nic(struct efx_nic *efx)
386 { 386 {
387 efx_oword_t temp; 387 efx_oword_t temp;
388 int rc; 388 int rc;
389 389
390 /* Recover from a failed assertion post-reset */ 390 /* Recover from a failed assertion post-reset */
391 rc = efx_mcdi_handle_assertion(efx); 391 rc = efx_mcdi_handle_assertion(efx);
392 if (rc) 392 if (rc)
393 return rc; 393 return rc;
394 394
395 /* Squash TX of packets of 16 bytes or less */ 395 /* Squash TX of packets of 16 bytes or less */
396 efx_reado(efx, &temp, FR_AZ_TX_RESERVED); 396 efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
397 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1); 397 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
398 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED); 398 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
399 399
400 /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16 400 /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
401 * descriptors (which is bad). 401 * descriptors (which is bad).
402 */ 402 */
403 efx_reado(efx, &temp, FR_AZ_TX_CFG); 403 efx_reado(efx, &temp, FR_AZ_TX_CFG);
404 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0); 404 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0);
405 EFX_SET_OWORD_FIELD(temp, FRF_CZ_TX_FILTER_EN_BIT, 1); 405 EFX_SET_OWORD_FIELD(temp, FRF_CZ_TX_FILTER_EN_BIT, 1);
406 efx_writeo(efx, &temp, FR_AZ_TX_CFG); 406 efx_writeo(efx, &temp, FR_AZ_TX_CFG);
407 407
408 efx_reado(efx, &temp, FR_AZ_RX_CFG); 408 efx_reado(efx, &temp, FR_AZ_RX_CFG);
409 EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_DESC_PUSH_EN, 0); 409 EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_DESC_PUSH_EN, 0);
410 EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_INGR_EN, 1); 410 EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_INGR_EN, 1);
411 /* Enable hash insertion. This is broken for the 'Falcon' hash 411 /* Enable hash insertion. This is broken for the 'Falcon' hash
412 * if IPv6 hashing is also enabled, so also select Toeplitz 412 * if IPv6 hashing is also enabled, so also select Toeplitz
413 * TCP/IPv4 and IPv4 hashes. */ 413 * TCP/IPv4 and IPv4 hashes. */
414 EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_INSRT_HDR, 1); 414 EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_INSRT_HDR, 1);
415 EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_ALG, 1); 415 EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_ALG, 1);
416 EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_IP_HASH, 1); 416 EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_IP_HASH, 1);
417 EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_USR_BUF_SIZE, 417 EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_USR_BUF_SIZE,
418 EFX_RX_USR_BUF_SIZE >> 5); 418 EFX_RX_USR_BUF_SIZE >> 5);
419 efx_writeo(efx, &temp, FR_AZ_RX_CFG); 419 efx_writeo(efx, &temp, FR_AZ_RX_CFG);
420 420
421 /* Set hash key for IPv4 */ 421 /* Set hash key for IPv4 */
422 memcpy(&temp, efx->rx_hash_key, sizeof(temp)); 422 memcpy(&temp, efx->rx_hash_key, sizeof(temp));
423 efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY); 423 efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
424 424
425 /* Enable IPv6 RSS */ 425 /* Enable IPv6 RSS */
426 BUILD_BUG_ON(sizeof(efx->rx_hash_key) < 426 BUILD_BUG_ON(sizeof(efx->rx_hash_key) <
427 2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 || 427 2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 ||
428 FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0); 428 FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0);
429 memcpy(&temp, efx->rx_hash_key, sizeof(temp)); 429 memcpy(&temp, efx->rx_hash_key, sizeof(temp));
430 efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1); 430 efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1);
431 memcpy(&temp, efx->rx_hash_key + sizeof(temp), sizeof(temp)); 431 memcpy(&temp, efx->rx_hash_key + sizeof(temp), sizeof(temp));
432 efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2); 432 efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2);
433 EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1, 433 EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1,
434 FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1); 434 FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1);
435 memcpy(&temp, efx->rx_hash_key + 2 * sizeof(temp), 435 memcpy(&temp, efx->rx_hash_key + 2 * sizeof(temp),
436 FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8); 436 FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
437 efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3); 437 efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
438 438
439 /* Enable event logging */ 439 /* Enable event logging */
440 rc = efx_mcdi_log_ctrl(efx, true, false, 0); 440 rc = efx_mcdi_log_ctrl(efx, true, false, 0);
441 if (rc) 441 if (rc)
442 return rc; 442 return rc;
443 443
444 /* Set destination of both TX and RX Flush events */ 444 /* Set destination of both TX and RX Flush events */
445 EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0); 445 EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
446 efx_writeo(efx, &temp, FR_BZ_DP_CTRL); 446 efx_writeo(efx, &temp, FR_BZ_DP_CTRL);
447 447
448 EFX_POPULATE_OWORD_1(temp, FRF_CZ_USREV_DIS, 1); 448 EFX_POPULATE_OWORD_1(temp, FRF_CZ_USREV_DIS, 1);
449 efx_writeo(efx, &temp, FR_CZ_USR_EV_CFG); 449 efx_writeo(efx, &temp, FR_CZ_USR_EV_CFG);
450 450
451 efx_nic_init_common(efx); 451 efx_nic_init_common(efx);
452 return 0; 452 return 0;
453 } 453 }
454 454
455 static void siena_remove_nic(struct efx_nic *efx) 455 static void siena_remove_nic(struct efx_nic *efx)
456 { 456 {
457 efx_mcdi_mon_remove(efx); 457 efx_mcdi_mon_remove(efx);
458 458
459 efx_nic_free_buffer(efx, &efx->irq_status); 459 efx_nic_free_buffer(efx, &efx->irq_status);
460 460
461 siena_reset_hw(efx, RESET_TYPE_ALL); 461 siena_reset_hw(efx, RESET_TYPE_ALL);
462 462
463 /* Relinquish the device back to the BMC */ 463 /* Relinquish the device back to the BMC */
464 efx_mcdi_drv_attach(efx, false, NULL); 464 efx_mcdi_drv_attach(efx, false, NULL);
465 465
466 /* Tear down the private nic state */ 466 /* Tear down the private nic state */
467 kfree(efx->nic_data); 467 kfree(efx->nic_data);
468 efx->nic_data = NULL; 468 efx->nic_data = NULL;
469 } 469 }
470 470
471 #define STATS_GENERATION_INVALID ((__force __le64)(-1)) 471 #define STATS_GENERATION_INVALID ((__force __le64)(-1))
472 472
473 static int siena_try_update_nic_stats(struct efx_nic *efx) 473 static int siena_try_update_nic_stats(struct efx_nic *efx)
474 { 474 {
475 __le64 *dma_stats; 475 __le64 *dma_stats;
476 struct efx_mac_stats *mac_stats; 476 struct efx_mac_stats *mac_stats;
477 __le64 generation_start, generation_end; 477 __le64 generation_start, generation_end;
478 478
479 mac_stats = &efx->mac_stats; 479 mac_stats = &efx->mac_stats;
480 dma_stats = efx->stats_buffer.addr; 480 dma_stats = efx->stats_buffer.addr;
481 481
482 generation_end = dma_stats[MC_CMD_MAC_GENERATION_END]; 482 generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
483 if (generation_end == STATS_GENERATION_INVALID) 483 if (generation_end == STATS_GENERATION_INVALID)
484 return 0; 484 return 0;
485 rmb(); 485 rmb();
486 486
487 #define MAC_STAT(M, D) \ 487 #define MAC_STAT(M, D) \
488 mac_stats->M = le64_to_cpu(dma_stats[MC_CMD_MAC_ ## D]) 488 mac_stats->M = le64_to_cpu(dma_stats[MC_CMD_MAC_ ## D])
489 489
490 MAC_STAT(tx_bytes, TX_BYTES); 490 MAC_STAT(tx_bytes, TX_BYTES);
491 MAC_STAT(tx_bad_bytes, TX_BAD_BYTES); 491 MAC_STAT(tx_bad_bytes, TX_BAD_BYTES);
492 efx_update_diff_stat(&mac_stats->tx_good_bytes, 492 efx_update_diff_stat(&mac_stats->tx_good_bytes,
493 mac_stats->tx_bytes - mac_stats->tx_bad_bytes); 493 mac_stats->tx_bytes - mac_stats->tx_bad_bytes);
494 MAC_STAT(tx_packets, TX_PKTS); 494 MAC_STAT(tx_packets, TX_PKTS);
495 MAC_STAT(tx_bad, TX_BAD_FCS_PKTS); 495 MAC_STAT(tx_bad, TX_BAD_FCS_PKTS);
496 MAC_STAT(tx_pause, TX_PAUSE_PKTS); 496 MAC_STAT(tx_pause, TX_PAUSE_PKTS);
497 MAC_STAT(tx_control, TX_CONTROL_PKTS); 497 MAC_STAT(tx_control, TX_CONTROL_PKTS);
498 MAC_STAT(tx_unicast, TX_UNICAST_PKTS); 498 MAC_STAT(tx_unicast, TX_UNICAST_PKTS);
499 MAC_STAT(tx_multicast, TX_MULTICAST_PKTS); 499 MAC_STAT(tx_multicast, TX_MULTICAST_PKTS);
500 MAC_STAT(tx_broadcast, TX_BROADCAST_PKTS); 500 MAC_STAT(tx_broadcast, TX_BROADCAST_PKTS);
501 MAC_STAT(tx_lt64, TX_LT64_PKTS); 501 MAC_STAT(tx_lt64, TX_LT64_PKTS);
502 MAC_STAT(tx_64, TX_64_PKTS); 502 MAC_STAT(tx_64, TX_64_PKTS);
503 MAC_STAT(tx_65_to_127, TX_65_TO_127_PKTS); 503 MAC_STAT(tx_65_to_127, TX_65_TO_127_PKTS);
504 MAC_STAT(tx_128_to_255, TX_128_TO_255_PKTS); 504 MAC_STAT(tx_128_to_255, TX_128_TO_255_PKTS);
505 MAC_STAT(tx_256_to_511, TX_256_TO_511_PKTS); 505 MAC_STAT(tx_256_to_511, TX_256_TO_511_PKTS);
506 MAC_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS); 506 MAC_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS);
507 MAC_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS); 507 MAC_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS);
508 MAC_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS); 508 MAC_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS);
509 MAC_STAT(tx_gtjumbo, TX_GTJUMBO_PKTS); 509 MAC_STAT(tx_gtjumbo, TX_GTJUMBO_PKTS);
510 mac_stats->tx_collision = 0; 510 mac_stats->tx_collision = 0;
511 MAC_STAT(tx_single_collision, TX_SINGLE_COLLISION_PKTS); 511 MAC_STAT(tx_single_collision, TX_SINGLE_COLLISION_PKTS);
512 MAC_STAT(tx_multiple_collision, TX_MULTIPLE_COLLISION_PKTS); 512 MAC_STAT(tx_multiple_collision, TX_MULTIPLE_COLLISION_PKTS);
513 MAC_STAT(tx_excessive_collision, TX_EXCESSIVE_COLLISION_PKTS); 513 MAC_STAT(tx_excessive_collision, TX_EXCESSIVE_COLLISION_PKTS);
514 MAC_STAT(tx_deferred, TX_DEFERRED_PKTS); 514 MAC_STAT(tx_deferred, TX_DEFERRED_PKTS);
515 MAC_STAT(tx_late_collision, TX_LATE_COLLISION_PKTS); 515 MAC_STAT(tx_late_collision, TX_LATE_COLLISION_PKTS);
516 mac_stats->tx_collision = (mac_stats->tx_single_collision + 516 mac_stats->tx_collision = (mac_stats->tx_single_collision +
517 mac_stats->tx_multiple_collision + 517 mac_stats->tx_multiple_collision +
518 mac_stats->tx_excessive_collision + 518 mac_stats->tx_excessive_collision +
519 mac_stats->tx_late_collision); 519 mac_stats->tx_late_collision);
520 MAC_STAT(tx_excessive_deferred, TX_EXCESSIVE_DEFERRED_PKTS); 520 MAC_STAT(tx_excessive_deferred, TX_EXCESSIVE_DEFERRED_PKTS);
521 MAC_STAT(tx_non_tcpudp, TX_NON_TCPUDP_PKTS); 521 MAC_STAT(tx_non_tcpudp, TX_NON_TCPUDP_PKTS);
522 MAC_STAT(tx_mac_src_error, TX_MAC_SRC_ERR_PKTS); 522 MAC_STAT(tx_mac_src_error, TX_MAC_SRC_ERR_PKTS);
523 MAC_STAT(tx_ip_src_error, TX_IP_SRC_ERR_PKTS); 523 MAC_STAT(tx_ip_src_error, TX_IP_SRC_ERR_PKTS);
524 MAC_STAT(rx_bytes, RX_BYTES); 524 MAC_STAT(rx_bytes, RX_BYTES);
525 MAC_STAT(rx_bad_bytes, RX_BAD_BYTES); 525 MAC_STAT(rx_bad_bytes, RX_BAD_BYTES);
526 efx_update_diff_stat(&mac_stats->rx_good_bytes, 526 efx_update_diff_stat(&mac_stats->rx_good_bytes,
527 mac_stats->rx_bytes - mac_stats->rx_bad_bytes); 527 mac_stats->rx_bytes - mac_stats->rx_bad_bytes);
528 MAC_STAT(rx_packets, RX_PKTS); 528 MAC_STAT(rx_packets, RX_PKTS);
529 MAC_STAT(rx_good, RX_GOOD_PKTS); 529 MAC_STAT(rx_good, RX_GOOD_PKTS);
530 MAC_STAT(rx_bad, RX_BAD_FCS_PKTS); 530 MAC_STAT(rx_bad, RX_BAD_FCS_PKTS);
531 MAC_STAT(rx_pause, RX_PAUSE_PKTS); 531 MAC_STAT(rx_pause, RX_PAUSE_PKTS);
532 MAC_STAT(rx_control, RX_CONTROL_PKTS); 532 MAC_STAT(rx_control, RX_CONTROL_PKTS);
533 MAC_STAT(rx_unicast, RX_UNICAST_PKTS); 533 MAC_STAT(rx_unicast, RX_UNICAST_PKTS);
534 MAC_STAT(rx_multicast, RX_MULTICAST_PKTS); 534 MAC_STAT(rx_multicast, RX_MULTICAST_PKTS);
535 MAC_STAT(rx_broadcast, RX_BROADCAST_PKTS); 535 MAC_STAT(rx_broadcast, RX_BROADCAST_PKTS);
536 MAC_STAT(rx_lt64, RX_UNDERSIZE_PKTS); 536 MAC_STAT(rx_lt64, RX_UNDERSIZE_PKTS);
537 MAC_STAT(rx_64, RX_64_PKTS); 537 MAC_STAT(rx_64, RX_64_PKTS);
538 MAC_STAT(rx_65_to_127, RX_65_TO_127_PKTS); 538 MAC_STAT(rx_65_to_127, RX_65_TO_127_PKTS);
539 MAC_STAT(rx_128_to_255, RX_128_TO_255_PKTS); 539 MAC_STAT(rx_128_to_255, RX_128_TO_255_PKTS);
540 MAC_STAT(rx_256_to_511, RX_256_TO_511_PKTS); 540 MAC_STAT(rx_256_to_511, RX_256_TO_511_PKTS);
541 MAC_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS); 541 MAC_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS);
542 MAC_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS); 542 MAC_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS);
543 MAC_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS); 543 MAC_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS);
544 MAC_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS); 544 MAC_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS);
545 mac_stats->rx_bad_lt64 = 0; 545 mac_stats->rx_bad_lt64 = 0;
546 mac_stats->rx_bad_64_to_15xx = 0; 546 mac_stats->rx_bad_64_to_15xx = 0;
547 mac_stats->rx_bad_15xx_to_jumbo = 0; 547 mac_stats->rx_bad_15xx_to_jumbo = 0;
548 MAC_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS); 548 MAC_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS);
549 MAC_STAT(rx_overflow, RX_OVERFLOW_PKTS); 549 MAC_STAT(rx_overflow, RX_OVERFLOW_PKTS);
550 mac_stats->rx_missed = 0; 550 mac_stats->rx_missed = 0;
551 MAC_STAT(rx_false_carrier, RX_FALSE_CARRIER_PKTS); 551 MAC_STAT(rx_false_carrier, RX_FALSE_CARRIER_PKTS);
552 MAC_STAT(rx_symbol_error, RX_SYMBOL_ERROR_PKTS); 552 MAC_STAT(rx_symbol_error, RX_SYMBOL_ERROR_PKTS);
553 MAC_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS); 553 MAC_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS);
554 MAC_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS); 554 MAC_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS);
555 MAC_STAT(rx_internal_error, RX_INTERNAL_ERROR_PKTS); 555 MAC_STAT(rx_internal_error, RX_INTERNAL_ERROR_PKTS);
556 mac_stats->rx_good_lt64 = 0; 556 mac_stats->rx_good_lt64 = 0;
557 557
558 efx->n_rx_nodesc_drop_cnt = 558 efx->n_rx_nodesc_drop_cnt =
559 le64_to_cpu(dma_stats[MC_CMD_MAC_RX_NODESC_DROPS]); 559 le64_to_cpu(dma_stats[MC_CMD_MAC_RX_NODESC_DROPS]);
560 560
561 #undef MAC_STAT 561 #undef MAC_STAT
562 562
563 rmb(); 563 rmb();
564 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START]; 564 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
565 if (generation_end != generation_start) 565 if (generation_end != generation_start)
566 return -EAGAIN; 566 return -EAGAIN;
567 567
568 return 0; 568 return 0;
569 } 569 }
570 570
571 static void siena_update_nic_stats(struct efx_nic *efx) 571 static void siena_update_nic_stats(struct efx_nic *efx)
572 { 572 {
573 int retry; 573 int retry;
574 574
575 /* If we're unlucky enough to read statistics wduring the DMA, wait 575 /* If we're unlucky enough to read statistics wduring the DMA, wait
576 * up to 10ms for it to finish (typically takes <500us) */ 576 * up to 10ms for it to finish (typically takes <500us) */
577 for (retry = 0; retry < 100; ++retry) { 577 for (retry = 0; retry < 100; ++retry) {
578 if (siena_try_update_nic_stats(efx) == 0) 578 if (siena_try_update_nic_stats(efx) == 0)
579 return; 579 return;
580 udelay(100); 580 udelay(100);
581 } 581 }
582 582
583 /* Use the old values instead */ 583 /* Use the old values instead */
584 } 584 }
585 585
586 static void siena_start_nic_stats(struct efx_nic *efx) 586 static void siena_start_nic_stats(struct efx_nic *efx)
587 { 587 {
588 __le64 *dma_stats = efx->stats_buffer.addr; 588 __le64 *dma_stats = efx->stats_buffer.addr;
589 589
590 dma_stats[MC_CMD_MAC_GENERATION_END] = STATS_GENERATION_INVALID; 590 dma_stats[MC_CMD_MAC_GENERATION_END] = STATS_GENERATION_INVALID;
591 591
592 efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 592 efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr,
593 MC_CMD_MAC_NSTATS * sizeof(u64), 1, 0); 593 MC_CMD_MAC_NSTATS * sizeof(u64), 1, 0);
594 } 594 }
595 595
596 static void siena_stop_nic_stats(struct efx_nic *efx) 596 static void siena_stop_nic_stats(struct efx_nic *efx)
597 { 597 {
598 efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0); 598 efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0);
599 } 599 }
600 600
601 /************************************************************************** 601 /**************************************************************************
602 * 602 *
603 * Wake on LAN 603 * Wake on LAN
604 * 604 *
605 ************************************************************************** 605 **************************************************************************
606 */ 606 */
607 607
608 static void siena_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol) 608 static void siena_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
609 { 609 {
610 struct siena_nic_data *nic_data = efx->nic_data; 610 struct siena_nic_data *nic_data = efx->nic_data;
611 611
612 wol->supported = WAKE_MAGIC; 612 wol->supported = WAKE_MAGIC;
613 if (nic_data->wol_filter_id != -1) 613 if (nic_data->wol_filter_id != -1)
614 wol->wolopts = WAKE_MAGIC; 614 wol->wolopts = WAKE_MAGIC;
615 else 615 else
616 wol->wolopts = 0; 616 wol->wolopts = 0;
617 memset(&wol->sopass, 0, sizeof(wol->sopass)); 617 memset(&wol->sopass, 0, sizeof(wol->sopass));
618 } 618 }
619 619
620 620
621 static int siena_set_wol(struct efx_nic *efx, u32 type) 621 static int siena_set_wol(struct efx_nic *efx, u32 type)
622 { 622 {
623 struct siena_nic_data *nic_data = efx->nic_data; 623 struct siena_nic_data *nic_data = efx->nic_data;
624 int rc; 624 int rc;
625 625
626 if (type & ~WAKE_MAGIC) 626 if (type & ~WAKE_MAGIC)
627 return -EINVAL; 627 return -EINVAL;
628 628
629 if (type & WAKE_MAGIC) { 629 if (type & WAKE_MAGIC) {
630 if (nic_data->wol_filter_id != -1) 630 if (nic_data->wol_filter_id != -1)
631 efx_mcdi_wol_filter_remove(efx, 631 efx_mcdi_wol_filter_remove(efx,
632 nic_data->wol_filter_id); 632 nic_data->wol_filter_id);
633 rc = efx_mcdi_wol_filter_set_magic(efx, efx->net_dev->dev_addr, 633 rc = efx_mcdi_wol_filter_set_magic(efx, efx->net_dev->dev_addr,
634 &nic_data->wol_filter_id); 634 &nic_data->wol_filter_id);
635 if (rc) 635 if (rc)
636 goto fail; 636 goto fail;
637 637
638 pci_wake_from_d3(efx->pci_dev, true); 638 pci_wake_from_d3(efx->pci_dev, true);
639 } else { 639 } else {
640 rc = efx_mcdi_wol_filter_reset(efx); 640 rc = efx_mcdi_wol_filter_reset(efx);
641 nic_data->wol_filter_id = -1; 641 nic_data->wol_filter_id = -1;
642 pci_wake_from_d3(efx->pci_dev, false); 642 pci_wake_from_d3(efx->pci_dev, false);
643 if (rc) 643 if (rc)
644 goto fail; 644 goto fail;
645 } 645 }
646 646
647 return 0; 647 return 0;
648 fail: 648 fail:
649 netif_err(efx, hw, efx->net_dev, "%s failed: type=%d rc=%d\n", 649 netif_err(efx, hw, efx->net_dev, "%s failed: type=%d rc=%d\n",
650 __func__, type, rc); 650 __func__, type, rc);
651 return rc; 651 return rc;
652 } 652 }
653 653
654 654
655 static void siena_init_wol(struct efx_nic *efx) 655 static void siena_init_wol(struct efx_nic *efx)
656 { 656 {
657 struct siena_nic_data *nic_data = efx->nic_data; 657 struct siena_nic_data *nic_data = efx->nic_data;
658 int rc; 658 int rc;
659 659
660 rc = efx_mcdi_wol_filter_get_magic(efx, &nic_data->wol_filter_id); 660 rc = efx_mcdi_wol_filter_get_magic(efx, &nic_data->wol_filter_id);
661 661
662 if (rc != 0) { 662 if (rc != 0) {
663 /* If it failed, attempt to get into a synchronised 663 /* If it failed, attempt to get into a synchronised
664 * state with MC by resetting any set WoL filters */ 664 * state with MC by resetting any set WoL filters */
665 efx_mcdi_wol_filter_reset(efx); 665 efx_mcdi_wol_filter_reset(efx);
666 nic_data->wol_filter_id = -1; 666 nic_data->wol_filter_id = -1;
667 } else if (nic_data->wol_filter_id != -1) { 667 } else if (nic_data->wol_filter_id != -1) {
668 pci_wake_from_d3(efx->pci_dev, true); 668 pci_wake_from_d3(efx->pci_dev, true);
669 } 669 }
670 } 670 }
671 671
672 672
673 /************************************************************************** 673 /**************************************************************************
674 * 674 *
675 * Revision-dependent attributes used by efx.c and nic.c 675 * Revision-dependent attributes used by efx.c and nic.c
676 * 676 *
677 ************************************************************************** 677 **************************************************************************
678 */ 678 */
679 679
680 const struct efx_nic_type siena_a0_nic_type = { 680 const struct efx_nic_type siena_a0_nic_type = {
681 .probe = siena_probe_nic, 681 .probe = siena_probe_nic,
682 .remove = siena_remove_nic, 682 .remove = siena_remove_nic,
683 .init = siena_init_nic, 683 .init = siena_init_nic,
684 .dimension_resources = siena_dimension_resources, 684 .dimension_resources = siena_dimension_resources,
685 .fini = efx_port_dummy_op_void, 685 .fini = efx_port_dummy_op_void,
686 #ifdef CONFIG_EEH 686 #ifdef CONFIG_EEH
687 .monitor = siena_monitor, 687 .monitor = siena_monitor,
688 #else 688 #else
689 .monitor = NULL, 689 .monitor = NULL,
690 #endif 690 #endif
691 .map_reset_reason = siena_map_reset_reason, 691 .map_reset_reason = siena_map_reset_reason,
692 .map_reset_flags = siena_map_reset_flags, 692 .map_reset_flags = siena_map_reset_flags,
693 .reset = siena_reset_hw, 693 .reset = siena_reset_hw,
694 .probe_port = siena_probe_port, 694 .probe_port = siena_probe_port,
695 .remove_port = siena_remove_port, 695 .remove_port = siena_remove_port,
696 .prepare_flush = siena_prepare_flush, 696 .prepare_flush = siena_prepare_flush,
697 .finish_flush = siena_finish_flush, 697 .finish_flush = siena_finish_flush,
698 .update_stats = siena_update_nic_stats, 698 .update_stats = siena_update_nic_stats,
699 .start_stats = siena_start_nic_stats, 699 .start_stats = siena_start_nic_stats,
700 .stop_stats = siena_stop_nic_stats, 700 .stop_stats = siena_stop_nic_stats,
701 .set_id_led = efx_mcdi_set_id_led, 701 .set_id_led = efx_mcdi_set_id_led,
702 .push_irq_moderation = siena_push_irq_moderation, 702 .push_irq_moderation = siena_push_irq_moderation,
703 .reconfigure_mac = efx_mcdi_mac_reconfigure, 703 .reconfigure_mac = efx_mcdi_mac_reconfigure,
704 .check_mac_fault = efx_mcdi_mac_check_fault, 704 .check_mac_fault = efx_mcdi_mac_check_fault,
705 .reconfigure_port = efx_mcdi_phy_reconfigure, 705 .reconfigure_port = efx_mcdi_phy_reconfigure,
706 .get_wol = siena_get_wol, 706 .get_wol = siena_get_wol,
707 .set_wol = siena_set_wol, 707 .set_wol = siena_set_wol,
708 .resume_wol = siena_init_wol, 708 .resume_wol = siena_init_wol,
709 .test_chip = siena_test_chip, 709 .test_chip = siena_test_chip,
710 .test_nvram = efx_mcdi_nvram_test_all, 710 .test_nvram = efx_mcdi_nvram_test_all,
711 711
712 .revision = EFX_REV_SIENA_A0, 712 .revision = EFX_REV_SIENA_A0,
713 .mem_map_size = (FR_CZ_MC_TREG_SMEM + 713 .mem_map_size = (FR_CZ_MC_TREG_SMEM +
714 FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS), 714 FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS),
715 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, 715 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
716 .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, 716 .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
717 .buf_tbl_base = FR_BZ_BUF_FULL_TBL, 717 .buf_tbl_base = FR_BZ_BUF_FULL_TBL,
718 .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL, 718 .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
719 .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR, 719 .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
720 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), 720 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
721 .rx_buffer_hash_size = 0x10, 721 .rx_buffer_hash_size = 0x10,
722 .rx_buffer_padding = 0, 722 .rx_buffer_padding = 0,
723 .can_rx_scatter = true, 723 .can_rx_scatter = true,
724 .max_interrupt_mode = EFX_INT_MODE_MSIX, 724 .max_interrupt_mode = EFX_INT_MODE_MSIX,
725 .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy 725 .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
726 * interrupt handler only supports 32 726 * interrupt handler only supports 32
727 * channels */ 727 * channels */
728 .timer_period_max = 1 << FRF_CZ_TC_TIMER_VAL_WIDTH, 728 .timer_period_max = 1 << FRF_CZ_TC_TIMER_VAL_WIDTH,
729 .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 729 .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
730 NETIF_F_RXHASH | NETIF_F_NTUPLE), 730 NETIF_F_RXHASH | NETIF_F_NTUPLE),
731 }; 731 };
732 732