Commit 1a0d6ae5795c376bae6d012fb25e8341e4c6d5f2

Authored by Danny Kukawka
Committed by David S. Miller
1 parent c8585bd89e

rename dev_hw_addr_random and remove redundant second

Renamed dev_hw_addr_random to eth_hw_addr_random() to reflect that
this function only assign a random ethernet address (MAC). Removed
the second parameter (u8 *hwaddr), it's redundant since the also
given net_device already contains net_device->dev_addr.
Set it directly.

Adapt igbvf and ixgbevf to the changed function.

Small fix for ixgbevf_probe(): if ixgbevf_sw_init() fails
(which means the device got no dev_addr) handle the error and
jump to err_sw_init as already done by igbvf in similar case.

Signed-off-by: Danny Kukawka <danny.kukawka@bisect.de>
Signed-off-by: David S. Miller <davem@davemloft.net>

Showing 3 changed files with 31 additions and 21 deletions Inline Diff

drivers/net/ethernet/intel/igbvf/netdev.c
1 /******************************************************************************* 1 /*******************************************************************************
2 2
3 Intel(R) 82576 Virtual Function Linux driver 3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 - 2012 Intel Corporation. 4 Copyright(c) 2009 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation. 8 version 2, as published by the Free Software Foundation.
9 9
10 This program is distributed in the hope it will be useful, but WITHOUT 10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 18
19 The full GNU General Public License is included in this distribution in 19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 25
26 *******************************************************************************/ 26 *******************************************************************************/
27 27
28 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 28 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29 29
30 #include <linux/module.h> 30 #include <linux/module.h>
31 #include <linux/types.h> 31 #include <linux/types.h>
32 #include <linux/init.h> 32 #include <linux/init.h>
33 #include <linux/pci.h> 33 #include <linux/pci.h>
34 #include <linux/vmalloc.h> 34 #include <linux/vmalloc.h>
35 #include <linux/pagemap.h> 35 #include <linux/pagemap.h>
36 #include <linux/delay.h> 36 #include <linux/delay.h>
37 #include <linux/netdevice.h> 37 #include <linux/netdevice.h>
38 #include <linux/tcp.h> 38 #include <linux/tcp.h>
39 #include <linux/ipv6.h> 39 #include <linux/ipv6.h>
40 #include <linux/slab.h> 40 #include <linux/slab.h>
41 #include <net/checksum.h> 41 #include <net/checksum.h>
42 #include <net/ip6_checksum.h> 42 #include <net/ip6_checksum.h>
43 #include <linux/mii.h> 43 #include <linux/mii.h>
44 #include <linux/ethtool.h> 44 #include <linux/ethtool.h>
45 #include <linux/if_vlan.h> 45 #include <linux/if_vlan.h>
46 #include <linux/prefetch.h> 46 #include <linux/prefetch.h>
47 47
48 #include "igbvf.h" 48 #include "igbvf.h"
49 49
50 #define DRV_VERSION "2.0.1-k" 50 #define DRV_VERSION "2.0.1-k"
51 char igbvf_driver_name[] = "igbvf"; 51 char igbvf_driver_name[] = "igbvf";
52 const char igbvf_driver_version[] = DRV_VERSION; 52 const char igbvf_driver_version[] = DRV_VERSION;
53 static const char igbvf_driver_string[] = 53 static const char igbvf_driver_string[] =
54 "Intel(R) Gigabit Virtual Function Network Driver"; 54 "Intel(R) Gigabit Virtual Function Network Driver";
55 static const char igbvf_copyright[] = 55 static const char igbvf_copyright[] =
56 "Copyright (c) 2009 - 2012 Intel Corporation."; 56 "Copyright (c) 2009 - 2012 Intel Corporation.";
57 57
58 static int igbvf_poll(struct napi_struct *napi, int budget); 58 static int igbvf_poll(struct napi_struct *napi, int budget);
59 static void igbvf_reset(struct igbvf_adapter *); 59 static void igbvf_reset(struct igbvf_adapter *);
60 static void igbvf_set_interrupt_capability(struct igbvf_adapter *); 60 static void igbvf_set_interrupt_capability(struct igbvf_adapter *);
61 static void igbvf_reset_interrupt_capability(struct igbvf_adapter *); 61 static void igbvf_reset_interrupt_capability(struct igbvf_adapter *);
62 62
63 static struct igbvf_info igbvf_vf_info = { 63 static struct igbvf_info igbvf_vf_info = {
64 .mac = e1000_vfadapt, 64 .mac = e1000_vfadapt,
65 .flags = 0, 65 .flags = 0,
66 .pba = 10, 66 .pba = 10,
67 .init_ops = e1000_init_function_pointers_vf, 67 .init_ops = e1000_init_function_pointers_vf,
68 }; 68 };
69 69
70 static struct igbvf_info igbvf_i350_vf_info = { 70 static struct igbvf_info igbvf_i350_vf_info = {
71 .mac = e1000_vfadapt_i350, 71 .mac = e1000_vfadapt_i350,
72 .flags = 0, 72 .flags = 0,
73 .pba = 10, 73 .pba = 10,
74 .init_ops = e1000_init_function_pointers_vf, 74 .init_ops = e1000_init_function_pointers_vf,
75 }; 75 };
76 76
77 static const struct igbvf_info *igbvf_info_tbl[] = { 77 static const struct igbvf_info *igbvf_info_tbl[] = {
78 [board_vf] = &igbvf_vf_info, 78 [board_vf] = &igbvf_vf_info,
79 [board_i350_vf] = &igbvf_i350_vf_info, 79 [board_i350_vf] = &igbvf_i350_vf_info,
80 }; 80 };
81 81
82 /** 82 /**
83 * igbvf_desc_unused - calculate if we have unused descriptors 83 * igbvf_desc_unused - calculate if we have unused descriptors
84 **/ 84 **/
85 static int igbvf_desc_unused(struct igbvf_ring *ring) 85 static int igbvf_desc_unused(struct igbvf_ring *ring)
86 { 86 {
87 if (ring->next_to_clean > ring->next_to_use) 87 if (ring->next_to_clean > ring->next_to_use)
88 return ring->next_to_clean - ring->next_to_use - 1; 88 return ring->next_to_clean - ring->next_to_use - 1;
89 89
90 return ring->count + ring->next_to_clean - ring->next_to_use - 1; 90 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
91 } 91 }
92 92
93 /** 93 /**
94 * igbvf_receive_skb - helper function to handle Rx indications 94 * igbvf_receive_skb - helper function to handle Rx indications
95 * @adapter: board private structure 95 * @adapter: board private structure
96 * @status: descriptor status field as written by hardware 96 * @status: descriptor status field as written by hardware
97 * @vlan: descriptor vlan field as written by hardware (no le/be conversion) 97 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
98 * @skb: pointer to sk_buff to be indicated to stack 98 * @skb: pointer to sk_buff to be indicated to stack
99 **/ 99 **/
100 static void igbvf_receive_skb(struct igbvf_adapter *adapter, 100 static void igbvf_receive_skb(struct igbvf_adapter *adapter,
101 struct net_device *netdev, 101 struct net_device *netdev,
102 struct sk_buff *skb, 102 struct sk_buff *skb,
103 u32 status, u16 vlan) 103 u32 status, u16 vlan)
104 { 104 {
105 if (status & E1000_RXD_STAT_VP) { 105 if (status & E1000_RXD_STAT_VP) {
106 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; 106 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
107 if (test_bit(vid, adapter->active_vlans)) 107 if (test_bit(vid, adapter->active_vlans))
108 __vlan_hwaccel_put_tag(skb, vid); 108 __vlan_hwaccel_put_tag(skb, vid);
109 } 109 }
110 netif_receive_skb(skb); 110 netif_receive_skb(skb);
111 } 111 }
112 112
113 static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter, 113 static inline void igbvf_rx_checksum_adv(struct igbvf_adapter *adapter,
114 u32 status_err, struct sk_buff *skb) 114 u32 status_err, struct sk_buff *skb)
115 { 115 {
116 skb_checksum_none_assert(skb); 116 skb_checksum_none_assert(skb);
117 117
118 /* Ignore Checksum bit is set or checksum is disabled through ethtool */ 118 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
119 if ((status_err & E1000_RXD_STAT_IXSM) || 119 if ((status_err & E1000_RXD_STAT_IXSM) ||
120 (adapter->flags & IGBVF_FLAG_RX_CSUM_DISABLED)) 120 (adapter->flags & IGBVF_FLAG_RX_CSUM_DISABLED))
121 return; 121 return;
122 122
123 /* TCP/UDP checksum error bit is set */ 123 /* TCP/UDP checksum error bit is set */
124 if (status_err & 124 if (status_err &
125 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) { 125 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
126 /* let the stack verify checksum errors */ 126 /* let the stack verify checksum errors */
127 adapter->hw_csum_err++; 127 adapter->hw_csum_err++;
128 return; 128 return;
129 } 129 }
130 130
131 /* It must be a TCP or UDP packet with a valid checksum */ 131 /* It must be a TCP or UDP packet with a valid checksum */
132 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) 132 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
133 skb->ip_summed = CHECKSUM_UNNECESSARY; 133 skb->ip_summed = CHECKSUM_UNNECESSARY;
134 134
135 adapter->hw_csum_good++; 135 adapter->hw_csum_good++;
136 } 136 }
137 137
138 /** 138 /**
139 * igbvf_alloc_rx_buffers - Replace used receive buffers; packet split 139 * igbvf_alloc_rx_buffers - Replace used receive buffers; packet split
140 * @rx_ring: address of ring structure to repopulate 140 * @rx_ring: address of ring structure to repopulate
141 * @cleaned_count: number of buffers to repopulate 141 * @cleaned_count: number of buffers to repopulate
142 **/ 142 **/
143 static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring, 143 static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
144 int cleaned_count) 144 int cleaned_count)
145 { 145 {
146 struct igbvf_adapter *adapter = rx_ring->adapter; 146 struct igbvf_adapter *adapter = rx_ring->adapter;
147 struct net_device *netdev = adapter->netdev; 147 struct net_device *netdev = adapter->netdev;
148 struct pci_dev *pdev = adapter->pdev; 148 struct pci_dev *pdev = adapter->pdev;
149 union e1000_adv_rx_desc *rx_desc; 149 union e1000_adv_rx_desc *rx_desc;
150 struct igbvf_buffer *buffer_info; 150 struct igbvf_buffer *buffer_info;
151 struct sk_buff *skb; 151 struct sk_buff *skb;
152 unsigned int i; 152 unsigned int i;
153 int bufsz; 153 int bufsz;
154 154
155 i = rx_ring->next_to_use; 155 i = rx_ring->next_to_use;
156 buffer_info = &rx_ring->buffer_info[i]; 156 buffer_info = &rx_ring->buffer_info[i];
157 157
158 if (adapter->rx_ps_hdr_size) 158 if (adapter->rx_ps_hdr_size)
159 bufsz = adapter->rx_ps_hdr_size; 159 bufsz = adapter->rx_ps_hdr_size;
160 else 160 else
161 bufsz = adapter->rx_buffer_len; 161 bufsz = adapter->rx_buffer_len;
162 162
163 while (cleaned_count--) { 163 while (cleaned_count--) {
164 rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i); 164 rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i);
165 165
166 if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) { 166 if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) {
167 if (!buffer_info->page) { 167 if (!buffer_info->page) {
168 buffer_info->page = alloc_page(GFP_ATOMIC); 168 buffer_info->page = alloc_page(GFP_ATOMIC);
169 if (!buffer_info->page) { 169 if (!buffer_info->page) {
170 adapter->alloc_rx_buff_failed++; 170 adapter->alloc_rx_buff_failed++;
171 goto no_buffers; 171 goto no_buffers;
172 } 172 }
173 buffer_info->page_offset = 0; 173 buffer_info->page_offset = 0;
174 } else { 174 } else {
175 buffer_info->page_offset ^= PAGE_SIZE / 2; 175 buffer_info->page_offset ^= PAGE_SIZE / 2;
176 } 176 }
177 buffer_info->page_dma = 177 buffer_info->page_dma =
178 dma_map_page(&pdev->dev, buffer_info->page, 178 dma_map_page(&pdev->dev, buffer_info->page,
179 buffer_info->page_offset, 179 buffer_info->page_offset,
180 PAGE_SIZE / 2, 180 PAGE_SIZE / 2,
181 DMA_FROM_DEVICE); 181 DMA_FROM_DEVICE);
182 } 182 }
183 183
184 if (!buffer_info->skb) { 184 if (!buffer_info->skb) {
185 skb = netdev_alloc_skb_ip_align(netdev, bufsz); 185 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
186 if (!skb) { 186 if (!skb) {
187 adapter->alloc_rx_buff_failed++; 187 adapter->alloc_rx_buff_failed++;
188 goto no_buffers; 188 goto no_buffers;
189 } 189 }
190 190
191 buffer_info->skb = skb; 191 buffer_info->skb = skb;
192 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, 192 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
193 bufsz, 193 bufsz,
194 DMA_FROM_DEVICE); 194 DMA_FROM_DEVICE);
195 } 195 }
196 /* Refresh the desc even if buffer_addrs didn't change because 196 /* Refresh the desc even if buffer_addrs didn't change because
197 * each write-back erases this info. */ 197 * each write-back erases this info. */
198 if (adapter->rx_ps_hdr_size) { 198 if (adapter->rx_ps_hdr_size) {
199 rx_desc->read.pkt_addr = 199 rx_desc->read.pkt_addr =
200 cpu_to_le64(buffer_info->page_dma); 200 cpu_to_le64(buffer_info->page_dma);
201 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma); 201 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
202 } else { 202 } else {
203 rx_desc->read.pkt_addr = 203 rx_desc->read.pkt_addr =
204 cpu_to_le64(buffer_info->dma); 204 cpu_to_le64(buffer_info->dma);
205 rx_desc->read.hdr_addr = 0; 205 rx_desc->read.hdr_addr = 0;
206 } 206 }
207 207
208 i++; 208 i++;
209 if (i == rx_ring->count) 209 if (i == rx_ring->count)
210 i = 0; 210 i = 0;
211 buffer_info = &rx_ring->buffer_info[i]; 211 buffer_info = &rx_ring->buffer_info[i];
212 } 212 }
213 213
214 no_buffers: 214 no_buffers:
215 if (rx_ring->next_to_use != i) { 215 if (rx_ring->next_to_use != i) {
216 rx_ring->next_to_use = i; 216 rx_ring->next_to_use = i;
217 if (i == 0) 217 if (i == 0)
218 i = (rx_ring->count - 1); 218 i = (rx_ring->count - 1);
219 else 219 else
220 i--; 220 i--;
221 221
222 /* Force memory writes to complete before letting h/w 222 /* Force memory writes to complete before letting h/w
223 * know there are new descriptors to fetch. (Only 223 * know there are new descriptors to fetch. (Only
224 * applicable for weak-ordered memory model archs, 224 * applicable for weak-ordered memory model archs,
225 * such as IA-64). */ 225 * such as IA-64). */
226 wmb(); 226 wmb();
227 writel(i, adapter->hw.hw_addr + rx_ring->tail); 227 writel(i, adapter->hw.hw_addr + rx_ring->tail);
228 } 228 }
229 } 229 }
230 230
231 /** 231 /**
232 * igbvf_clean_rx_irq - Send received data up the network stack; legacy 232 * igbvf_clean_rx_irq - Send received data up the network stack; legacy
233 * @adapter: board private structure 233 * @adapter: board private structure
234 * 234 *
235 * the return value indicates whether actual cleaning was done, there 235 * the return value indicates whether actual cleaning was done, there
236 * is no guarantee that everything was cleaned 236 * is no guarantee that everything was cleaned
237 **/ 237 **/
238 static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter, 238 static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
239 int *work_done, int work_to_do) 239 int *work_done, int work_to_do)
240 { 240 {
241 struct igbvf_ring *rx_ring = adapter->rx_ring; 241 struct igbvf_ring *rx_ring = adapter->rx_ring;
242 struct net_device *netdev = adapter->netdev; 242 struct net_device *netdev = adapter->netdev;
243 struct pci_dev *pdev = adapter->pdev; 243 struct pci_dev *pdev = adapter->pdev;
244 union e1000_adv_rx_desc *rx_desc, *next_rxd; 244 union e1000_adv_rx_desc *rx_desc, *next_rxd;
245 struct igbvf_buffer *buffer_info, *next_buffer; 245 struct igbvf_buffer *buffer_info, *next_buffer;
246 struct sk_buff *skb; 246 struct sk_buff *skb;
247 bool cleaned = false; 247 bool cleaned = false;
248 int cleaned_count = 0; 248 int cleaned_count = 0;
249 unsigned int total_bytes = 0, total_packets = 0; 249 unsigned int total_bytes = 0, total_packets = 0;
250 unsigned int i; 250 unsigned int i;
251 u32 length, hlen, staterr; 251 u32 length, hlen, staterr;
252 252
253 i = rx_ring->next_to_clean; 253 i = rx_ring->next_to_clean;
254 rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i); 254 rx_desc = IGBVF_RX_DESC_ADV(*rx_ring, i);
255 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 255 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
256 256
257 while (staterr & E1000_RXD_STAT_DD) { 257 while (staterr & E1000_RXD_STAT_DD) {
258 if (*work_done >= work_to_do) 258 if (*work_done >= work_to_do)
259 break; 259 break;
260 (*work_done)++; 260 (*work_done)++;
261 rmb(); /* read descriptor and rx_buffer_info after status DD */ 261 rmb(); /* read descriptor and rx_buffer_info after status DD */
262 262
263 buffer_info = &rx_ring->buffer_info[i]; 263 buffer_info = &rx_ring->buffer_info[i];
264 264
265 /* HW will not DMA in data larger than the given buffer, even 265 /* HW will not DMA in data larger than the given buffer, even
266 * if it parses the (NFS, of course) header to be larger. In 266 * if it parses the (NFS, of course) header to be larger. In
267 * that case, it fills the header buffer and spills the rest 267 * that case, it fills the header buffer and spills the rest
268 * into the page. 268 * into the page.
269 */ 269 */
270 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) & 270 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info) &
271 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT; 271 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
272 if (hlen > adapter->rx_ps_hdr_size) 272 if (hlen > adapter->rx_ps_hdr_size)
273 hlen = adapter->rx_ps_hdr_size; 273 hlen = adapter->rx_ps_hdr_size;
274 274
275 length = le16_to_cpu(rx_desc->wb.upper.length); 275 length = le16_to_cpu(rx_desc->wb.upper.length);
276 cleaned = true; 276 cleaned = true;
277 cleaned_count++; 277 cleaned_count++;
278 278
279 skb = buffer_info->skb; 279 skb = buffer_info->skb;
280 prefetch(skb->data - NET_IP_ALIGN); 280 prefetch(skb->data - NET_IP_ALIGN);
281 buffer_info->skb = NULL; 281 buffer_info->skb = NULL;
282 if (!adapter->rx_ps_hdr_size) { 282 if (!adapter->rx_ps_hdr_size) {
283 dma_unmap_single(&pdev->dev, buffer_info->dma, 283 dma_unmap_single(&pdev->dev, buffer_info->dma,
284 adapter->rx_buffer_len, 284 adapter->rx_buffer_len,
285 DMA_FROM_DEVICE); 285 DMA_FROM_DEVICE);
286 buffer_info->dma = 0; 286 buffer_info->dma = 0;
287 skb_put(skb, length); 287 skb_put(skb, length);
288 goto send_up; 288 goto send_up;
289 } 289 }
290 290
291 if (!skb_shinfo(skb)->nr_frags) { 291 if (!skb_shinfo(skb)->nr_frags) {
292 dma_unmap_single(&pdev->dev, buffer_info->dma, 292 dma_unmap_single(&pdev->dev, buffer_info->dma,
293 adapter->rx_ps_hdr_size, 293 adapter->rx_ps_hdr_size,
294 DMA_FROM_DEVICE); 294 DMA_FROM_DEVICE);
295 skb_put(skb, hlen); 295 skb_put(skb, hlen);
296 } 296 }
297 297
298 if (length) { 298 if (length) {
299 dma_unmap_page(&pdev->dev, buffer_info->page_dma, 299 dma_unmap_page(&pdev->dev, buffer_info->page_dma,
300 PAGE_SIZE / 2, 300 PAGE_SIZE / 2,
301 DMA_FROM_DEVICE); 301 DMA_FROM_DEVICE);
302 buffer_info->page_dma = 0; 302 buffer_info->page_dma = 0;
303 303
304 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 304 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
305 buffer_info->page, 305 buffer_info->page,
306 buffer_info->page_offset, 306 buffer_info->page_offset,
307 length); 307 length);
308 308
309 if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) || 309 if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) ||
310 (page_count(buffer_info->page) != 1)) 310 (page_count(buffer_info->page) != 1))
311 buffer_info->page = NULL; 311 buffer_info->page = NULL;
312 else 312 else
313 get_page(buffer_info->page); 313 get_page(buffer_info->page);
314 314
315 skb->len += length; 315 skb->len += length;
316 skb->data_len += length; 316 skb->data_len += length;
317 skb->truesize += PAGE_SIZE / 2; 317 skb->truesize += PAGE_SIZE / 2;
318 } 318 }
319 send_up: 319 send_up:
320 i++; 320 i++;
321 if (i == rx_ring->count) 321 if (i == rx_ring->count)
322 i = 0; 322 i = 0;
323 next_rxd = IGBVF_RX_DESC_ADV(*rx_ring, i); 323 next_rxd = IGBVF_RX_DESC_ADV(*rx_ring, i);
324 prefetch(next_rxd); 324 prefetch(next_rxd);
325 next_buffer = &rx_ring->buffer_info[i]; 325 next_buffer = &rx_ring->buffer_info[i];
326 326
327 if (!(staterr & E1000_RXD_STAT_EOP)) { 327 if (!(staterr & E1000_RXD_STAT_EOP)) {
328 buffer_info->skb = next_buffer->skb; 328 buffer_info->skb = next_buffer->skb;
329 buffer_info->dma = next_buffer->dma; 329 buffer_info->dma = next_buffer->dma;
330 next_buffer->skb = skb; 330 next_buffer->skb = skb;
331 next_buffer->dma = 0; 331 next_buffer->dma = 0;
332 goto next_desc; 332 goto next_desc;
333 } 333 }
334 334
335 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { 335 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
336 dev_kfree_skb_irq(skb); 336 dev_kfree_skb_irq(skb);
337 goto next_desc; 337 goto next_desc;
338 } 338 }
339 339
340 total_bytes += skb->len; 340 total_bytes += skb->len;
341 total_packets++; 341 total_packets++;
342 342
343 igbvf_rx_checksum_adv(adapter, staterr, skb); 343 igbvf_rx_checksum_adv(adapter, staterr, skb);
344 344
345 skb->protocol = eth_type_trans(skb, netdev); 345 skb->protocol = eth_type_trans(skb, netdev);
346 346
347 igbvf_receive_skb(adapter, netdev, skb, staterr, 347 igbvf_receive_skb(adapter, netdev, skb, staterr,
348 rx_desc->wb.upper.vlan); 348 rx_desc->wb.upper.vlan);
349 349
350 next_desc: 350 next_desc:
351 rx_desc->wb.upper.status_error = 0; 351 rx_desc->wb.upper.status_error = 0;
352 352
353 /* return some buffers to hardware, one at a time is too slow */ 353 /* return some buffers to hardware, one at a time is too slow */
354 if (cleaned_count >= IGBVF_RX_BUFFER_WRITE) { 354 if (cleaned_count >= IGBVF_RX_BUFFER_WRITE) {
355 igbvf_alloc_rx_buffers(rx_ring, cleaned_count); 355 igbvf_alloc_rx_buffers(rx_ring, cleaned_count);
356 cleaned_count = 0; 356 cleaned_count = 0;
357 } 357 }
358 358
359 /* use prefetched values */ 359 /* use prefetched values */
360 rx_desc = next_rxd; 360 rx_desc = next_rxd;
361 buffer_info = next_buffer; 361 buffer_info = next_buffer;
362 362
363 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 363 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
364 } 364 }
365 365
366 rx_ring->next_to_clean = i; 366 rx_ring->next_to_clean = i;
367 cleaned_count = igbvf_desc_unused(rx_ring); 367 cleaned_count = igbvf_desc_unused(rx_ring);
368 368
369 if (cleaned_count) 369 if (cleaned_count)
370 igbvf_alloc_rx_buffers(rx_ring, cleaned_count); 370 igbvf_alloc_rx_buffers(rx_ring, cleaned_count);
371 371
372 adapter->total_rx_packets += total_packets; 372 adapter->total_rx_packets += total_packets;
373 adapter->total_rx_bytes += total_bytes; 373 adapter->total_rx_bytes += total_bytes;
374 adapter->net_stats.rx_bytes += total_bytes; 374 adapter->net_stats.rx_bytes += total_bytes;
375 adapter->net_stats.rx_packets += total_packets; 375 adapter->net_stats.rx_packets += total_packets;
376 return cleaned; 376 return cleaned;
377 } 377 }
378 378
379 static void igbvf_put_txbuf(struct igbvf_adapter *adapter, 379 static void igbvf_put_txbuf(struct igbvf_adapter *adapter,
380 struct igbvf_buffer *buffer_info) 380 struct igbvf_buffer *buffer_info)
381 { 381 {
382 if (buffer_info->dma) { 382 if (buffer_info->dma) {
383 if (buffer_info->mapped_as_page) 383 if (buffer_info->mapped_as_page)
384 dma_unmap_page(&adapter->pdev->dev, 384 dma_unmap_page(&adapter->pdev->dev,
385 buffer_info->dma, 385 buffer_info->dma,
386 buffer_info->length, 386 buffer_info->length,
387 DMA_TO_DEVICE); 387 DMA_TO_DEVICE);
388 else 388 else
389 dma_unmap_single(&adapter->pdev->dev, 389 dma_unmap_single(&adapter->pdev->dev,
390 buffer_info->dma, 390 buffer_info->dma,
391 buffer_info->length, 391 buffer_info->length,
392 DMA_TO_DEVICE); 392 DMA_TO_DEVICE);
393 buffer_info->dma = 0; 393 buffer_info->dma = 0;
394 } 394 }
395 if (buffer_info->skb) { 395 if (buffer_info->skb) {
396 dev_kfree_skb_any(buffer_info->skb); 396 dev_kfree_skb_any(buffer_info->skb);
397 buffer_info->skb = NULL; 397 buffer_info->skb = NULL;
398 } 398 }
399 buffer_info->time_stamp = 0; 399 buffer_info->time_stamp = 0;
400 } 400 }
401 401
402 /** 402 /**
403 * igbvf_setup_tx_resources - allocate Tx resources (Descriptors) 403 * igbvf_setup_tx_resources - allocate Tx resources (Descriptors)
404 * @adapter: board private structure 404 * @adapter: board private structure
405 * 405 *
406 * Return 0 on success, negative on failure 406 * Return 0 on success, negative on failure
407 **/ 407 **/
408 int igbvf_setup_tx_resources(struct igbvf_adapter *adapter, 408 int igbvf_setup_tx_resources(struct igbvf_adapter *adapter,
409 struct igbvf_ring *tx_ring) 409 struct igbvf_ring *tx_ring)
410 { 410 {
411 struct pci_dev *pdev = adapter->pdev; 411 struct pci_dev *pdev = adapter->pdev;
412 int size; 412 int size;
413 413
414 size = sizeof(struct igbvf_buffer) * tx_ring->count; 414 size = sizeof(struct igbvf_buffer) * tx_ring->count;
415 tx_ring->buffer_info = vzalloc(size); 415 tx_ring->buffer_info = vzalloc(size);
416 if (!tx_ring->buffer_info) 416 if (!tx_ring->buffer_info)
417 goto err; 417 goto err;
418 418
419 /* round up to nearest 4K */ 419 /* round up to nearest 4K */
420 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); 420 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
421 tx_ring->size = ALIGN(tx_ring->size, 4096); 421 tx_ring->size = ALIGN(tx_ring->size, 4096);
422 422
423 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, 423 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
424 &tx_ring->dma, GFP_KERNEL); 424 &tx_ring->dma, GFP_KERNEL);
425 425
426 if (!tx_ring->desc) 426 if (!tx_ring->desc)
427 goto err; 427 goto err;
428 428
429 tx_ring->adapter = adapter; 429 tx_ring->adapter = adapter;
430 tx_ring->next_to_use = 0; 430 tx_ring->next_to_use = 0;
431 tx_ring->next_to_clean = 0; 431 tx_ring->next_to_clean = 0;
432 432
433 return 0; 433 return 0;
434 err: 434 err:
435 vfree(tx_ring->buffer_info); 435 vfree(tx_ring->buffer_info);
436 dev_err(&adapter->pdev->dev, 436 dev_err(&adapter->pdev->dev,
437 "Unable to allocate memory for the transmit descriptor ring\n"); 437 "Unable to allocate memory for the transmit descriptor ring\n");
438 return -ENOMEM; 438 return -ENOMEM;
439 } 439 }
440 440
441 /** 441 /**
442 * igbvf_setup_rx_resources - allocate Rx resources (Descriptors) 442 * igbvf_setup_rx_resources - allocate Rx resources (Descriptors)
443 * @adapter: board private structure 443 * @adapter: board private structure
444 * 444 *
445 * Returns 0 on success, negative on failure 445 * Returns 0 on success, negative on failure
446 **/ 446 **/
447 int igbvf_setup_rx_resources(struct igbvf_adapter *adapter, 447 int igbvf_setup_rx_resources(struct igbvf_adapter *adapter,
448 struct igbvf_ring *rx_ring) 448 struct igbvf_ring *rx_ring)
449 { 449 {
450 struct pci_dev *pdev = adapter->pdev; 450 struct pci_dev *pdev = adapter->pdev;
451 int size, desc_len; 451 int size, desc_len;
452 452
453 size = sizeof(struct igbvf_buffer) * rx_ring->count; 453 size = sizeof(struct igbvf_buffer) * rx_ring->count;
454 rx_ring->buffer_info = vzalloc(size); 454 rx_ring->buffer_info = vzalloc(size);
455 if (!rx_ring->buffer_info) 455 if (!rx_ring->buffer_info)
456 goto err; 456 goto err;
457 457
458 desc_len = sizeof(union e1000_adv_rx_desc); 458 desc_len = sizeof(union e1000_adv_rx_desc);
459 459
460 /* Round up to nearest 4K */ 460 /* Round up to nearest 4K */
461 rx_ring->size = rx_ring->count * desc_len; 461 rx_ring->size = rx_ring->count * desc_len;
462 rx_ring->size = ALIGN(rx_ring->size, 4096); 462 rx_ring->size = ALIGN(rx_ring->size, 4096);
463 463
464 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, 464 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
465 &rx_ring->dma, GFP_KERNEL); 465 &rx_ring->dma, GFP_KERNEL);
466 466
467 if (!rx_ring->desc) 467 if (!rx_ring->desc)
468 goto err; 468 goto err;
469 469
470 rx_ring->next_to_clean = 0; 470 rx_ring->next_to_clean = 0;
471 rx_ring->next_to_use = 0; 471 rx_ring->next_to_use = 0;
472 472
473 rx_ring->adapter = adapter; 473 rx_ring->adapter = adapter;
474 474
475 return 0; 475 return 0;
476 476
477 err: 477 err:
478 vfree(rx_ring->buffer_info); 478 vfree(rx_ring->buffer_info);
479 rx_ring->buffer_info = NULL; 479 rx_ring->buffer_info = NULL;
480 dev_err(&adapter->pdev->dev, 480 dev_err(&adapter->pdev->dev,
481 "Unable to allocate memory for the receive descriptor ring\n"); 481 "Unable to allocate memory for the receive descriptor ring\n");
482 return -ENOMEM; 482 return -ENOMEM;
483 } 483 }
484 484
485 /** 485 /**
486 * igbvf_clean_tx_ring - Free Tx Buffers 486 * igbvf_clean_tx_ring - Free Tx Buffers
487 * @tx_ring: ring to be cleaned 487 * @tx_ring: ring to be cleaned
488 **/ 488 **/
489 static void igbvf_clean_tx_ring(struct igbvf_ring *tx_ring) 489 static void igbvf_clean_tx_ring(struct igbvf_ring *tx_ring)
490 { 490 {
491 struct igbvf_adapter *adapter = tx_ring->adapter; 491 struct igbvf_adapter *adapter = tx_ring->adapter;
492 struct igbvf_buffer *buffer_info; 492 struct igbvf_buffer *buffer_info;
493 unsigned long size; 493 unsigned long size;
494 unsigned int i; 494 unsigned int i;
495 495
496 if (!tx_ring->buffer_info) 496 if (!tx_ring->buffer_info)
497 return; 497 return;
498 498
499 /* Free all the Tx ring sk_buffs */ 499 /* Free all the Tx ring sk_buffs */
500 for (i = 0; i < tx_ring->count; i++) { 500 for (i = 0; i < tx_ring->count; i++) {
501 buffer_info = &tx_ring->buffer_info[i]; 501 buffer_info = &tx_ring->buffer_info[i];
502 igbvf_put_txbuf(adapter, buffer_info); 502 igbvf_put_txbuf(adapter, buffer_info);
503 } 503 }
504 504
505 size = sizeof(struct igbvf_buffer) * tx_ring->count; 505 size = sizeof(struct igbvf_buffer) * tx_ring->count;
506 memset(tx_ring->buffer_info, 0, size); 506 memset(tx_ring->buffer_info, 0, size);
507 507
508 /* Zero out the descriptor ring */ 508 /* Zero out the descriptor ring */
509 memset(tx_ring->desc, 0, tx_ring->size); 509 memset(tx_ring->desc, 0, tx_ring->size);
510 510
511 tx_ring->next_to_use = 0; 511 tx_ring->next_to_use = 0;
512 tx_ring->next_to_clean = 0; 512 tx_ring->next_to_clean = 0;
513 513
514 writel(0, adapter->hw.hw_addr + tx_ring->head); 514 writel(0, adapter->hw.hw_addr + tx_ring->head);
515 writel(0, adapter->hw.hw_addr + tx_ring->tail); 515 writel(0, adapter->hw.hw_addr + tx_ring->tail);
516 } 516 }
517 517
518 /** 518 /**
519 * igbvf_free_tx_resources - Free Tx Resources per Queue 519 * igbvf_free_tx_resources - Free Tx Resources per Queue
520 * @tx_ring: ring to free resources from 520 * @tx_ring: ring to free resources from
521 * 521 *
522 * Free all transmit software resources 522 * Free all transmit software resources
523 **/ 523 **/
524 void igbvf_free_tx_resources(struct igbvf_ring *tx_ring) 524 void igbvf_free_tx_resources(struct igbvf_ring *tx_ring)
525 { 525 {
526 struct pci_dev *pdev = tx_ring->adapter->pdev; 526 struct pci_dev *pdev = tx_ring->adapter->pdev;
527 527
528 igbvf_clean_tx_ring(tx_ring); 528 igbvf_clean_tx_ring(tx_ring);
529 529
530 vfree(tx_ring->buffer_info); 530 vfree(tx_ring->buffer_info);
531 tx_ring->buffer_info = NULL; 531 tx_ring->buffer_info = NULL;
532 532
533 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, 533 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
534 tx_ring->dma); 534 tx_ring->dma);
535 535
536 tx_ring->desc = NULL; 536 tx_ring->desc = NULL;
537 } 537 }
538 538
539 /** 539 /**
540 * igbvf_clean_rx_ring - Free Rx Buffers per Queue 540 * igbvf_clean_rx_ring - Free Rx Buffers per Queue
541 * @adapter: board private structure 541 * @adapter: board private structure
542 **/ 542 **/
543 static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring) 543 static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring)
544 { 544 {
545 struct igbvf_adapter *adapter = rx_ring->adapter; 545 struct igbvf_adapter *adapter = rx_ring->adapter;
546 struct igbvf_buffer *buffer_info; 546 struct igbvf_buffer *buffer_info;
547 struct pci_dev *pdev = adapter->pdev; 547 struct pci_dev *pdev = adapter->pdev;
548 unsigned long size; 548 unsigned long size;
549 unsigned int i; 549 unsigned int i;
550 550
551 if (!rx_ring->buffer_info) 551 if (!rx_ring->buffer_info)
552 return; 552 return;
553 553
554 /* Free all the Rx ring sk_buffs */ 554 /* Free all the Rx ring sk_buffs */
555 for (i = 0; i < rx_ring->count; i++) { 555 for (i = 0; i < rx_ring->count; i++) {
556 buffer_info = &rx_ring->buffer_info[i]; 556 buffer_info = &rx_ring->buffer_info[i];
557 if (buffer_info->dma) { 557 if (buffer_info->dma) {
558 if (adapter->rx_ps_hdr_size){ 558 if (adapter->rx_ps_hdr_size){
559 dma_unmap_single(&pdev->dev, buffer_info->dma, 559 dma_unmap_single(&pdev->dev, buffer_info->dma,
560 adapter->rx_ps_hdr_size, 560 adapter->rx_ps_hdr_size,
561 DMA_FROM_DEVICE); 561 DMA_FROM_DEVICE);
562 } else { 562 } else {
563 dma_unmap_single(&pdev->dev, buffer_info->dma, 563 dma_unmap_single(&pdev->dev, buffer_info->dma,
564 adapter->rx_buffer_len, 564 adapter->rx_buffer_len,
565 DMA_FROM_DEVICE); 565 DMA_FROM_DEVICE);
566 } 566 }
567 buffer_info->dma = 0; 567 buffer_info->dma = 0;
568 } 568 }
569 569
570 if (buffer_info->skb) { 570 if (buffer_info->skb) {
571 dev_kfree_skb(buffer_info->skb); 571 dev_kfree_skb(buffer_info->skb);
572 buffer_info->skb = NULL; 572 buffer_info->skb = NULL;
573 } 573 }
574 574
575 if (buffer_info->page) { 575 if (buffer_info->page) {
576 if (buffer_info->page_dma) 576 if (buffer_info->page_dma)
577 dma_unmap_page(&pdev->dev, 577 dma_unmap_page(&pdev->dev,
578 buffer_info->page_dma, 578 buffer_info->page_dma,
579 PAGE_SIZE / 2, 579 PAGE_SIZE / 2,
580 DMA_FROM_DEVICE); 580 DMA_FROM_DEVICE);
581 put_page(buffer_info->page); 581 put_page(buffer_info->page);
582 buffer_info->page = NULL; 582 buffer_info->page = NULL;
583 buffer_info->page_dma = 0; 583 buffer_info->page_dma = 0;
584 buffer_info->page_offset = 0; 584 buffer_info->page_offset = 0;
585 } 585 }
586 } 586 }
587 587
588 size = sizeof(struct igbvf_buffer) * rx_ring->count; 588 size = sizeof(struct igbvf_buffer) * rx_ring->count;
589 memset(rx_ring->buffer_info, 0, size); 589 memset(rx_ring->buffer_info, 0, size);
590 590
591 /* Zero out the descriptor ring */ 591 /* Zero out the descriptor ring */
592 memset(rx_ring->desc, 0, rx_ring->size); 592 memset(rx_ring->desc, 0, rx_ring->size);
593 593
594 rx_ring->next_to_clean = 0; 594 rx_ring->next_to_clean = 0;
595 rx_ring->next_to_use = 0; 595 rx_ring->next_to_use = 0;
596 596
597 writel(0, adapter->hw.hw_addr + rx_ring->head); 597 writel(0, adapter->hw.hw_addr + rx_ring->head);
598 writel(0, adapter->hw.hw_addr + rx_ring->tail); 598 writel(0, adapter->hw.hw_addr + rx_ring->tail);
599 } 599 }
600 600
601 /** 601 /**
602 * igbvf_free_rx_resources - Free Rx Resources 602 * igbvf_free_rx_resources - Free Rx Resources
603 * @rx_ring: ring to clean the resources from 603 * @rx_ring: ring to clean the resources from
604 * 604 *
605 * Free all receive software resources 605 * Free all receive software resources
606 **/ 606 **/
607 607
608 void igbvf_free_rx_resources(struct igbvf_ring *rx_ring) 608 void igbvf_free_rx_resources(struct igbvf_ring *rx_ring)
609 { 609 {
610 struct pci_dev *pdev = rx_ring->adapter->pdev; 610 struct pci_dev *pdev = rx_ring->adapter->pdev;
611 611
612 igbvf_clean_rx_ring(rx_ring); 612 igbvf_clean_rx_ring(rx_ring);
613 613
614 vfree(rx_ring->buffer_info); 614 vfree(rx_ring->buffer_info);
615 rx_ring->buffer_info = NULL; 615 rx_ring->buffer_info = NULL;
616 616
617 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, 617 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
618 rx_ring->dma); 618 rx_ring->dma);
619 rx_ring->desc = NULL; 619 rx_ring->desc = NULL;
620 } 620 }
621 621
622 /** 622 /**
623 * igbvf_update_itr - update the dynamic ITR value based on statistics 623 * igbvf_update_itr - update the dynamic ITR value based on statistics
624 * @adapter: pointer to adapter 624 * @adapter: pointer to adapter
625 * @itr_setting: current adapter->itr 625 * @itr_setting: current adapter->itr
626 * @packets: the number of packets during this measurement interval 626 * @packets: the number of packets during this measurement interval
627 * @bytes: the number of bytes during this measurement interval 627 * @bytes: the number of bytes during this measurement interval
628 * 628 *
629 * Stores a new ITR value based on packets and byte 629 * Stores a new ITR value based on packets and byte
630 * counts during the last interrupt. The advantage of per interrupt 630 * counts during the last interrupt. The advantage of per interrupt
631 * computation is faster updates and more accurate ITR for the current 631 * computation is faster updates and more accurate ITR for the current
632 * traffic pattern. Constants in this function were computed 632 * traffic pattern. Constants in this function were computed
633 * based on theoretical maximum wire speed and thresholds were set based 633 * based on theoretical maximum wire speed and thresholds were set based
634 * on testing data as well as attempting to minimize response time 634 * on testing data as well as attempting to minimize response time
635 * while increasing bulk throughput. 635 * while increasing bulk throughput.
636 **/ 636 **/
637 static enum latency_range igbvf_update_itr(struct igbvf_adapter *adapter, 637 static enum latency_range igbvf_update_itr(struct igbvf_adapter *adapter,
638 enum latency_range itr_setting, 638 enum latency_range itr_setting,
639 int packets, int bytes) 639 int packets, int bytes)
640 { 640 {
641 enum latency_range retval = itr_setting; 641 enum latency_range retval = itr_setting;
642 642
643 if (packets == 0) 643 if (packets == 0)
644 goto update_itr_done; 644 goto update_itr_done;
645 645
646 switch (itr_setting) { 646 switch (itr_setting) {
647 case lowest_latency: 647 case lowest_latency:
648 /* handle TSO and jumbo frames */ 648 /* handle TSO and jumbo frames */
649 if (bytes/packets > 8000) 649 if (bytes/packets > 8000)
650 retval = bulk_latency; 650 retval = bulk_latency;
651 else if ((packets < 5) && (bytes > 512)) 651 else if ((packets < 5) && (bytes > 512))
652 retval = low_latency; 652 retval = low_latency;
653 break; 653 break;
654 case low_latency: /* 50 usec aka 20000 ints/s */ 654 case low_latency: /* 50 usec aka 20000 ints/s */
655 if (bytes > 10000) { 655 if (bytes > 10000) {
656 /* this if handles the TSO accounting */ 656 /* this if handles the TSO accounting */
657 if (bytes/packets > 8000) 657 if (bytes/packets > 8000)
658 retval = bulk_latency; 658 retval = bulk_latency;
659 else if ((packets < 10) || ((bytes/packets) > 1200)) 659 else if ((packets < 10) || ((bytes/packets) > 1200))
660 retval = bulk_latency; 660 retval = bulk_latency;
661 else if ((packets > 35)) 661 else if ((packets > 35))
662 retval = lowest_latency; 662 retval = lowest_latency;
663 } else if (bytes/packets > 2000) { 663 } else if (bytes/packets > 2000) {
664 retval = bulk_latency; 664 retval = bulk_latency;
665 } else if (packets <= 2 && bytes < 512) { 665 } else if (packets <= 2 && bytes < 512) {
666 retval = lowest_latency; 666 retval = lowest_latency;
667 } 667 }
668 break; 668 break;
669 case bulk_latency: /* 250 usec aka 4000 ints/s */ 669 case bulk_latency: /* 250 usec aka 4000 ints/s */
670 if (bytes > 25000) { 670 if (bytes > 25000) {
671 if (packets > 35) 671 if (packets > 35)
672 retval = low_latency; 672 retval = low_latency;
673 } else if (bytes < 6000) { 673 } else if (bytes < 6000) {
674 retval = low_latency; 674 retval = low_latency;
675 } 675 }
676 break; 676 break;
677 default: 677 default:
678 break; 678 break;
679 } 679 }
680 680
681 update_itr_done: 681 update_itr_done:
682 return retval; 682 return retval;
683 } 683 }
684 684
685 static int igbvf_range_to_itr(enum latency_range current_range) 685 static int igbvf_range_to_itr(enum latency_range current_range)
686 { 686 {
687 int new_itr; 687 int new_itr;
688 688
689 switch (current_range) { 689 switch (current_range) {
690 /* counts and packets in update_itr are dependent on these numbers */ 690 /* counts and packets in update_itr are dependent on these numbers */
691 case lowest_latency: 691 case lowest_latency:
692 new_itr = IGBVF_70K_ITR; 692 new_itr = IGBVF_70K_ITR;
693 break; 693 break;
694 case low_latency: 694 case low_latency:
695 new_itr = IGBVF_20K_ITR; 695 new_itr = IGBVF_20K_ITR;
696 break; 696 break;
697 case bulk_latency: 697 case bulk_latency:
698 new_itr = IGBVF_4K_ITR; 698 new_itr = IGBVF_4K_ITR;
699 break; 699 break;
700 default: 700 default:
701 new_itr = IGBVF_START_ITR; 701 new_itr = IGBVF_START_ITR;
702 break; 702 break;
703 } 703 }
704 return new_itr; 704 return new_itr;
705 } 705 }
706 706
707 static void igbvf_set_itr(struct igbvf_adapter *adapter) 707 static void igbvf_set_itr(struct igbvf_adapter *adapter)
708 { 708 {
709 u32 new_itr; 709 u32 new_itr;
710 710
711 adapter->tx_ring->itr_range = 711 adapter->tx_ring->itr_range =
712 igbvf_update_itr(adapter, 712 igbvf_update_itr(adapter,
713 adapter->tx_ring->itr_val, 713 adapter->tx_ring->itr_val,
714 adapter->total_tx_packets, 714 adapter->total_tx_packets,
715 adapter->total_tx_bytes); 715 adapter->total_tx_bytes);
716 716
717 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 717 /* conservative mode (itr 3) eliminates the lowest_latency setting */
718 if (adapter->requested_itr == 3 && 718 if (adapter->requested_itr == 3 &&
719 adapter->tx_ring->itr_range == lowest_latency) 719 adapter->tx_ring->itr_range == lowest_latency)
720 adapter->tx_ring->itr_range = low_latency; 720 adapter->tx_ring->itr_range = low_latency;
721 721
722 new_itr = igbvf_range_to_itr(adapter->tx_ring->itr_range); 722 new_itr = igbvf_range_to_itr(adapter->tx_ring->itr_range);
723 723
724 724
725 if (new_itr != adapter->tx_ring->itr_val) { 725 if (new_itr != adapter->tx_ring->itr_val) {
726 u32 current_itr = adapter->tx_ring->itr_val; 726 u32 current_itr = adapter->tx_ring->itr_val;
727 /* 727 /*
728 * this attempts to bias the interrupt rate towards Bulk 728 * this attempts to bias the interrupt rate towards Bulk
729 * by adding intermediate steps when interrupt rate is 729 * by adding intermediate steps when interrupt rate is
730 * increasing 730 * increasing
731 */ 731 */
732 new_itr = new_itr > current_itr ? 732 new_itr = new_itr > current_itr ?
733 min(current_itr + (new_itr >> 2), new_itr) : 733 min(current_itr + (new_itr >> 2), new_itr) :
734 new_itr; 734 new_itr;
735 adapter->tx_ring->itr_val = new_itr; 735 adapter->tx_ring->itr_val = new_itr;
736 736
737 adapter->tx_ring->set_itr = 1; 737 adapter->tx_ring->set_itr = 1;
738 } 738 }
739 739
740 adapter->rx_ring->itr_range = 740 adapter->rx_ring->itr_range =
741 igbvf_update_itr(adapter, adapter->rx_ring->itr_val, 741 igbvf_update_itr(adapter, adapter->rx_ring->itr_val,
742 adapter->total_rx_packets, 742 adapter->total_rx_packets,
743 adapter->total_rx_bytes); 743 adapter->total_rx_bytes);
744 if (adapter->requested_itr == 3 && 744 if (adapter->requested_itr == 3 &&
745 adapter->rx_ring->itr_range == lowest_latency) 745 adapter->rx_ring->itr_range == lowest_latency)
746 adapter->rx_ring->itr_range = low_latency; 746 adapter->rx_ring->itr_range = low_latency;
747 747
748 new_itr = igbvf_range_to_itr(adapter->rx_ring->itr_range); 748 new_itr = igbvf_range_to_itr(adapter->rx_ring->itr_range);
749 749
750 if (new_itr != adapter->rx_ring->itr_val) { 750 if (new_itr != adapter->rx_ring->itr_val) {
751 u32 current_itr = adapter->rx_ring->itr_val; 751 u32 current_itr = adapter->rx_ring->itr_val;
752 new_itr = new_itr > current_itr ? 752 new_itr = new_itr > current_itr ?
753 min(current_itr + (new_itr >> 2), new_itr) : 753 min(current_itr + (new_itr >> 2), new_itr) :
754 new_itr; 754 new_itr;
755 adapter->rx_ring->itr_val = new_itr; 755 adapter->rx_ring->itr_val = new_itr;
756 756
757 adapter->rx_ring->set_itr = 1; 757 adapter->rx_ring->set_itr = 1;
758 } 758 }
759 } 759 }
760 760
761 /** 761 /**
762 * igbvf_clean_tx_irq - Reclaim resources after transmit completes 762 * igbvf_clean_tx_irq - Reclaim resources after transmit completes
763 * @adapter: board private structure 763 * @adapter: board private structure
764 * returns true if ring is completely cleaned 764 * returns true if ring is completely cleaned
765 **/ 765 **/
766 static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) 766 static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
767 { 767 {
768 struct igbvf_adapter *adapter = tx_ring->adapter; 768 struct igbvf_adapter *adapter = tx_ring->adapter;
769 struct net_device *netdev = adapter->netdev; 769 struct net_device *netdev = adapter->netdev;
770 struct igbvf_buffer *buffer_info; 770 struct igbvf_buffer *buffer_info;
771 struct sk_buff *skb; 771 struct sk_buff *skb;
772 union e1000_adv_tx_desc *tx_desc, *eop_desc; 772 union e1000_adv_tx_desc *tx_desc, *eop_desc;
773 unsigned int total_bytes = 0, total_packets = 0; 773 unsigned int total_bytes = 0, total_packets = 0;
774 unsigned int i, eop, count = 0; 774 unsigned int i, eop, count = 0;
775 bool cleaned = false; 775 bool cleaned = false;
776 776
777 i = tx_ring->next_to_clean; 777 i = tx_ring->next_to_clean;
778 eop = tx_ring->buffer_info[i].next_to_watch; 778 eop = tx_ring->buffer_info[i].next_to_watch;
779 eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop); 779 eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop);
780 780
781 while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) && 781 while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) &&
782 (count < tx_ring->count)) { 782 (count < tx_ring->count)) {
783 rmb(); /* read buffer_info after eop_desc status */ 783 rmb(); /* read buffer_info after eop_desc status */
784 for (cleaned = false; !cleaned; count++) { 784 for (cleaned = false; !cleaned; count++) {
785 tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); 785 tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
786 buffer_info = &tx_ring->buffer_info[i]; 786 buffer_info = &tx_ring->buffer_info[i];
787 cleaned = (i == eop); 787 cleaned = (i == eop);
788 skb = buffer_info->skb; 788 skb = buffer_info->skb;
789 789
790 if (skb) { 790 if (skb) {
791 unsigned int segs, bytecount; 791 unsigned int segs, bytecount;
792 792
793 /* gso_segs is currently only valid for tcp */ 793 /* gso_segs is currently only valid for tcp */
794 segs = skb_shinfo(skb)->gso_segs ?: 1; 794 segs = skb_shinfo(skb)->gso_segs ?: 1;
795 /* multiply data chunks by size of headers */ 795 /* multiply data chunks by size of headers */
796 bytecount = ((segs - 1) * skb_headlen(skb)) + 796 bytecount = ((segs - 1) * skb_headlen(skb)) +
797 skb->len; 797 skb->len;
798 total_packets += segs; 798 total_packets += segs;
799 total_bytes += bytecount; 799 total_bytes += bytecount;
800 } 800 }
801 801
802 igbvf_put_txbuf(adapter, buffer_info); 802 igbvf_put_txbuf(adapter, buffer_info);
803 tx_desc->wb.status = 0; 803 tx_desc->wb.status = 0;
804 804
805 i++; 805 i++;
806 if (i == tx_ring->count) 806 if (i == tx_ring->count)
807 i = 0; 807 i = 0;
808 } 808 }
809 eop = tx_ring->buffer_info[i].next_to_watch; 809 eop = tx_ring->buffer_info[i].next_to_watch;
810 eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop); 810 eop_desc = IGBVF_TX_DESC_ADV(*tx_ring, eop);
811 } 811 }
812 812
813 tx_ring->next_to_clean = i; 813 tx_ring->next_to_clean = i;
814 814
815 if (unlikely(count && 815 if (unlikely(count &&
816 netif_carrier_ok(netdev) && 816 netif_carrier_ok(netdev) &&
817 igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) { 817 igbvf_desc_unused(tx_ring) >= IGBVF_TX_QUEUE_WAKE)) {
818 /* Make sure that anybody stopping the queue after this 818 /* Make sure that anybody stopping the queue after this
819 * sees the new next_to_clean. 819 * sees the new next_to_clean.
820 */ 820 */
821 smp_mb(); 821 smp_mb();
822 if (netif_queue_stopped(netdev) && 822 if (netif_queue_stopped(netdev) &&
823 !(test_bit(__IGBVF_DOWN, &adapter->state))) { 823 !(test_bit(__IGBVF_DOWN, &adapter->state))) {
824 netif_wake_queue(netdev); 824 netif_wake_queue(netdev);
825 ++adapter->restart_queue; 825 ++adapter->restart_queue;
826 } 826 }
827 } 827 }
828 828
829 adapter->net_stats.tx_bytes += total_bytes; 829 adapter->net_stats.tx_bytes += total_bytes;
830 adapter->net_stats.tx_packets += total_packets; 830 adapter->net_stats.tx_packets += total_packets;
831 return count < tx_ring->count; 831 return count < tx_ring->count;
832 } 832 }
833 833
834 static irqreturn_t igbvf_msix_other(int irq, void *data) 834 static irqreturn_t igbvf_msix_other(int irq, void *data)
835 { 835 {
836 struct net_device *netdev = data; 836 struct net_device *netdev = data;
837 struct igbvf_adapter *adapter = netdev_priv(netdev); 837 struct igbvf_adapter *adapter = netdev_priv(netdev);
838 struct e1000_hw *hw = &adapter->hw; 838 struct e1000_hw *hw = &adapter->hw;
839 839
840 adapter->int_counter1++; 840 adapter->int_counter1++;
841 841
842 netif_carrier_off(netdev); 842 netif_carrier_off(netdev);
843 hw->mac.get_link_status = 1; 843 hw->mac.get_link_status = 1;
844 if (!test_bit(__IGBVF_DOWN, &adapter->state)) 844 if (!test_bit(__IGBVF_DOWN, &adapter->state))
845 mod_timer(&adapter->watchdog_timer, jiffies + 1); 845 mod_timer(&adapter->watchdog_timer, jiffies + 1);
846 846
847 ew32(EIMS, adapter->eims_other); 847 ew32(EIMS, adapter->eims_other);
848 848
849 return IRQ_HANDLED; 849 return IRQ_HANDLED;
850 } 850 }
851 851
852 static irqreturn_t igbvf_intr_msix_tx(int irq, void *data) 852 static irqreturn_t igbvf_intr_msix_tx(int irq, void *data)
853 { 853 {
854 struct net_device *netdev = data; 854 struct net_device *netdev = data;
855 struct igbvf_adapter *adapter = netdev_priv(netdev); 855 struct igbvf_adapter *adapter = netdev_priv(netdev);
856 struct e1000_hw *hw = &adapter->hw; 856 struct e1000_hw *hw = &adapter->hw;
857 struct igbvf_ring *tx_ring = adapter->tx_ring; 857 struct igbvf_ring *tx_ring = adapter->tx_ring;
858 858
859 if (tx_ring->set_itr) { 859 if (tx_ring->set_itr) {
860 writel(tx_ring->itr_val, 860 writel(tx_ring->itr_val,
861 adapter->hw.hw_addr + tx_ring->itr_register); 861 adapter->hw.hw_addr + tx_ring->itr_register);
862 adapter->tx_ring->set_itr = 0; 862 adapter->tx_ring->set_itr = 0;
863 } 863 }
864 864
865 adapter->total_tx_bytes = 0; 865 adapter->total_tx_bytes = 0;
866 adapter->total_tx_packets = 0; 866 adapter->total_tx_packets = 0;
867 867
868 /* auto mask will automatically reenable the interrupt when we write 868 /* auto mask will automatically reenable the interrupt when we write
869 * EICS */ 869 * EICS */
870 if (!igbvf_clean_tx_irq(tx_ring)) 870 if (!igbvf_clean_tx_irq(tx_ring))
871 /* Ring was not completely cleaned, so fire another interrupt */ 871 /* Ring was not completely cleaned, so fire another interrupt */
872 ew32(EICS, tx_ring->eims_value); 872 ew32(EICS, tx_ring->eims_value);
873 else 873 else
874 ew32(EIMS, tx_ring->eims_value); 874 ew32(EIMS, tx_ring->eims_value);
875 875
876 return IRQ_HANDLED; 876 return IRQ_HANDLED;
877 } 877 }
878 878
879 static irqreturn_t igbvf_intr_msix_rx(int irq, void *data) 879 static irqreturn_t igbvf_intr_msix_rx(int irq, void *data)
880 { 880 {
881 struct net_device *netdev = data; 881 struct net_device *netdev = data;
882 struct igbvf_adapter *adapter = netdev_priv(netdev); 882 struct igbvf_adapter *adapter = netdev_priv(netdev);
883 883
884 adapter->int_counter0++; 884 adapter->int_counter0++;
885 885
886 /* Write the ITR value calculated at the end of the 886 /* Write the ITR value calculated at the end of the
887 * previous interrupt. 887 * previous interrupt.
888 */ 888 */
889 if (adapter->rx_ring->set_itr) { 889 if (adapter->rx_ring->set_itr) {
890 writel(adapter->rx_ring->itr_val, 890 writel(adapter->rx_ring->itr_val,
891 adapter->hw.hw_addr + adapter->rx_ring->itr_register); 891 adapter->hw.hw_addr + adapter->rx_ring->itr_register);
892 adapter->rx_ring->set_itr = 0; 892 adapter->rx_ring->set_itr = 0;
893 } 893 }
894 894
895 if (napi_schedule_prep(&adapter->rx_ring->napi)) { 895 if (napi_schedule_prep(&adapter->rx_ring->napi)) {
896 adapter->total_rx_bytes = 0; 896 adapter->total_rx_bytes = 0;
897 adapter->total_rx_packets = 0; 897 adapter->total_rx_packets = 0;
898 __napi_schedule(&adapter->rx_ring->napi); 898 __napi_schedule(&adapter->rx_ring->napi);
899 } 899 }
900 900
901 return IRQ_HANDLED; 901 return IRQ_HANDLED;
902 } 902 }
903 903
904 #define IGBVF_NO_QUEUE -1 904 #define IGBVF_NO_QUEUE -1
905 905
906 static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue, 906 static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue,
907 int tx_queue, int msix_vector) 907 int tx_queue, int msix_vector)
908 { 908 {
909 struct e1000_hw *hw = &adapter->hw; 909 struct e1000_hw *hw = &adapter->hw;
910 u32 ivar, index; 910 u32 ivar, index;
911 911
912 /* 82576 uses a table-based method for assigning vectors. 912 /* 82576 uses a table-based method for assigning vectors.
913 Each queue has a single entry in the table to which we write 913 Each queue has a single entry in the table to which we write
914 a vector number along with a "valid" bit. Sadly, the layout 914 a vector number along with a "valid" bit. Sadly, the layout
915 of the table is somewhat counterintuitive. */ 915 of the table is somewhat counterintuitive. */
916 if (rx_queue > IGBVF_NO_QUEUE) { 916 if (rx_queue > IGBVF_NO_QUEUE) {
917 index = (rx_queue >> 1); 917 index = (rx_queue >> 1);
918 ivar = array_er32(IVAR0, index); 918 ivar = array_er32(IVAR0, index);
919 if (rx_queue & 0x1) { 919 if (rx_queue & 0x1) {
920 /* vector goes into third byte of register */ 920 /* vector goes into third byte of register */
921 ivar = ivar & 0xFF00FFFF; 921 ivar = ivar & 0xFF00FFFF;
922 ivar |= (msix_vector | E1000_IVAR_VALID) << 16; 922 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
923 } else { 923 } else {
924 /* vector goes into low byte of register */ 924 /* vector goes into low byte of register */
925 ivar = ivar & 0xFFFFFF00; 925 ivar = ivar & 0xFFFFFF00;
926 ivar |= msix_vector | E1000_IVAR_VALID; 926 ivar |= msix_vector | E1000_IVAR_VALID;
927 } 927 }
928 adapter->rx_ring[rx_queue].eims_value = 1 << msix_vector; 928 adapter->rx_ring[rx_queue].eims_value = 1 << msix_vector;
929 array_ew32(IVAR0, index, ivar); 929 array_ew32(IVAR0, index, ivar);
930 } 930 }
931 if (tx_queue > IGBVF_NO_QUEUE) { 931 if (tx_queue > IGBVF_NO_QUEUE) {
932 index = (tx_queue >> 1); 932 index = (tx_queue >> 1);
933 ivar = array_er32(IVAR0, index); 933 ivar = array_er32(IVAR0, index);
934 if (tx_queue & 0x1) { 934 if (tx_queue & 0x1) {
935 /* vector goes into high byte of register */ 935 /* vector goes into high byte of register */
936 ivar = ivar & 0x00FFFFFF; 936 ivar = ivar & 0x00FFFFFF;
937 ivar |= (msix_vector | E1000_IVAR_VALID) << 24; 937 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
938 } else { 938 } else {
939 /* vector goes into second byte of register */ 939 /* vector goes into second byte of register */
940 ivar = ivar & 0xFFFF00FF; 940 ivar = ivar & 0xFFFF00FF;
941 ivar |= (msix_vector | E1000_IVAR_VALID) << 8; 941 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
942 } 942 }
943 adapter->tx_ring[tx_queue].eims_value = 1 << msix_vector; 943 adapter->tx_ring[tx_queue].eims_value = 1 << msix_vector;
944 array_ew32(IVAR0, index, ivar); 944 array_ew32(IVAR0, index, ivar);
945 } 945 }
946 } 946 }
947 947
948 /** 948 /**
949 * igbvf_configure_msix - Configure MSI-X hardware 949 * igbvf_configure_msix - Configure MSI-X hardware
950 * 950 *
951 * igbvf_configure_msix sets up the hardware to properly 951 * igbvf_configure_msix sets up the hardware to properly
952 * generate MSI-X interrupts. 952 * generate MSI-X interrupts.
953 **/ 953 **/
954 static void igbvf_configure_msix(struct igbvf_adapter *adapter) 954 static void igbvf_configure_msix(struct igbvf_adapter *adapter)
955 { 955 {
956 u32 tmp; 956 u32 tmp;
957 struct e1000_hw *hw = &adapter->hw; 957 struct e1000_hw *hw = &adapter->hw;
958 struct igbvf_ring *tx_ring = adapter->tx_ring; 958 struct igbvf_ring *tx_ring = adapter->tx_ring;
959 struct igbvf_ring *rx_ring = adapter->rx_ring; 959 struct igbvf_ring *rx_ring = adapter->rx_ring;
960 int vector = 0; 960 int vector = 0;
961 961
962 adapter->eims_enable_mask = 0; 962 adapter->eims_enable_mask = 0;
963 963
964 igbvf_assign_vector(adapter, IGBVF_NO_QUEUE, 0, vector++); 964 igbvf_assign_vector(adapter, IGBVF_NO_QUEUE, 0, vector++);
965 adapter->eims_enable_mask |= tx_ring->eims_value; 965 adapter->eims_enable_mask |= tx_ring->eims_value;
966 writel(tx_ring->itr_val, hw->hw_addr + tx_ring->itr_register); 966 writel(tx_ring->itr_val, hw->hw_addr + tx_ring->itr_register);
967 igbvf_assign_vector(adapter, 0, IGBVF_NO_QUEUE, vector++); 967 igbvf_assign_vector(adapter, 0, IGBVF_NO_QUEUE, vector++);
968 adapter->eims_enable_mask |= rx_ring->eims_value; 968 adapter->eims_enable_mask |= rx_ring->eims_value;
969 writel(rx_ring->itr_val, hw->hw_addr + rx_ring->itr_register); 969 writel(rx_ring->itr_val, hw->hw_addr + rx_ring->itr_register);
970 970
971 /* set vector for other causes, i.e. link changes */ 971 /* set vector for other causes, i.e. link changes */
972 972
973 tmp = (vector++ | E1000_IVAR_VALID); 973 tmp = (vector++ | E1000_IVAR_VALID);
974 974
975 ew32(IVAR_MISC, tmp); 975 ew32(IVAR_MISC, tmp);
976 976
977 adapter->eims_enable_mask = (1 << (vector)) - 1; 977 adapter->eims_enable_mask = (1 << (vector)) - 1;
978 adapter->eims_other = 1 << (vector - 1); 978 adapter->eims_other = 1 << (vector - 1);
979 e1e_flush(); 979 e1e_flush();
980 } 980 }
981 981
982 static void igbvf_reset_interrupt_capability(struct igbvf_adapter *adapter) 982 static void igbvf_reset_interrupt_capability(struct igbvf_adapter *adapter)
983 { 983 {
984 if (adapter->msix_entries) { 984 if (adapter->msix_entries) {
985 pci_disable_msix(adapter->pdev); 985 pci_disable_msix(adapter->pdev);
986 kfree(adapter->msix_entries); 986 kfree(adapter->msix_entries);
987 adapter->msix_entries = NULL; 987 adapter->msix_entries = NULL;
988 } 988 }
989 } 989 }
990 990
991 /** 991 /**
992 * igbvf_set_interrupt_capability - set MSI or MSI-X if supported 992 * igbvf_set_interrupt_capability - set MSI or MSI-X if supported
993 * 993 *
994 * Attempt to configure interrupts using the best available 994 * Attempt to configure interrupts using the best available
995 * capabilities of the hardware and kernel. 995 * capabilities of the hardware and kernel.
996 **/ 996 **/
997 static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter) 997 static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter)
998 { 998 {
999 int err = -ENOMEM; 999 int err = -ENOMEM;
1000 int i; 1000 int i;
1001 1001
1002 /* we allocate 3 vectors, 1 for tx, 1 for rx, one for pf messages */ 1002 /* we allocate 3 vectors, 1 for tx, 1 for rx, one for pf messages */
1003 adapter->msix_entries = kcalloc(3, sizeof(struct msix_entry), 1003 adapter->msix_entries = kcalloc(3, sizeof(struct msix_entry),
1004 GFP_KERNEL); 1004 GFP_KERNEL);
1005 if (adapter->msix_entries) { 1005 if (adapter->msix_entries) {
1006 for (i = 0; i < 3; i++) 1006 for (i = 0; i < 3; i++)
1007 adapter->msix_entries[i].entry = i; 1007 adapter->msix_entries[i].entry = i;
1008 1008
1009 err = pci_enable_msix(adapter->pdev, 1009 err = pci_enable_msix(adapter->pdev,
1010 adapter->msix_entries, 3); 1010 adapter->msix_entries, 3);
1011 } 1011 }
1012 1012
1013 if (err) { 1013 if (err) {
1014 /* MSI-X failed */ 1014 /* MSI-X failed */
1015 dev_err(&adapter->pdev->dev, 1015 dev_err(&adapter->pdev->dev,
1016 "Failed to initialize MSI-X interrupts.\n"); 1016 "Failed to initialize MSI-X interrupts.\n");
1017 igbvf_reset_interrupt_capability(adapter); 1017 igbvf_reset_interrupt_capability(adapter);
1018 } 1018 }
1019 } 1019 }
1020 1020
1021 /** 1021 /**
1022 * igbvf_request_msix - Initialize MSI-X interrupts 1022 * igbvf_request_msix - Initialize MSI-X interrupts
1023 * 1023 *
1024 * igbvf_request_msix allocates MSI-X vectors and requests interrupts from the 1024 * igbvf_request_msix allocates MSI-X vectors and requests interrupts from the
1025 * kernel. 1025 * kernel.
1026 **/ 1026 **/
1027 static int igbvf_request_msix(struct igbvf_adapter *adapter) 1027 static int igbvf_request_msix(struct igbvf_adapter *adapter)
1028 { 1028 {
1029 struct net_device *netdev = adapter->netdev; 1029 struct net_device *netdev = adapter->netdev;
1030 int err = 0, vector = 0; 1030 int err = 0, vector = 0;
1031 1031
1032 if (strlen(netdev->name) < (IFNAMSIZ - 5)) { 1032 if (strlen(netdev->name) < (IFNAMSIZ - 5)) {
1033 sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name); 1033 sprintf(adapter->tx_ring->name, "%s-tx-0", netdev->name);
1034 sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name); 1034 sprintf(adapter->rx_ring->name, "%s-rx-0", netdev->name);
1035 } else { 1035 } else {
1036 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); 1036 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
1037 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); 1037 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
1038 } 1038 }
1039 1039
1040 err = request_irq(adapter->msix_entries[vector].vector, 1040 err = request_irq(adapter->msix_entries[vector].vector,
1041 igbvf_intr_msix_tx, 0, adapter->tx_ring->name, 1041 igbvf_intr_msix_tx, 0, adapter->tx_ring->name,
1042 netdev); 1042 netdev);
1043 if (err) 1043 if (err)
1044 goto out; 1044 goto out;
1045 1045
1046 adapter->tx_ring->itr_register = E1000_EITR(vector); 1046 adapter->tx_ring->itr_register = E1000_EITR(vector);
1047 adapter->tx_ring->itr_val = adapter->current_itr; 1047 adapter->tx_ring->itr_val = adapter->current_itr;
1048 vector++; 1048 vector++;
1049 1049
1050 err = request_irq(adapter->msix_entries[vector].vector, 1050 err = request_irq(adapter->msix_entries[vector].vector,
1051 igbvf_intr_msix_rx, 0, adapter->rx_ring->name, 1051 igbvf_intr_msix_rx, 0, adapter->rx_ring->name,
1052 netdev); 1052 netdev);
1053 if (err) 1053 if (err)
1054 goto out; 1054 goto out;
1055 1055
1056 adapter->rx_ring->itr_register = E1000_EITR(vector); 1056 adapter->rx_ring->itr_register = E1000_EITR(vector);
1057 adapter->rx_ring->itr_val = adapter->current_itr; 1057 adapter->rx_ring->itr_val = adapter->current_itr;
1058 vector++; 1058 vector++;
1059 1059
1060 err = request_irq(adapter->msix_entries[vector].vector, 1060 err = request_irq(adapter->msix_entries[vector].vector,
1061 igbvf_msix_other, 0, netdev->name, netdev); 1061 igbvf_msix_other, 0, netdev->name, netdev);
1062 if (err) 1062 if (err)
1063 goto out; 1063 goto out;
1064 1064
1065 igbvf_configure_msix(adapter); 1065 igbvf_configure_msix(adapter);
1066 return 0; 1066 return 0;
1067 out: 1067 out:
1068 return err; 1068 return err;
1069 } 1069 }
1070 1070
1071 /** 1071 /**
1072 * igbvf_alloc_queues - Allocate memory for all rings 1072 * igbvf_alloc_queues - Allocate memory for all rings
1073 * @adapter: board private structure to initialize 1073 * @adapter: board private structure to initialize
1074 **/ 1074 **/
1075 static int __devinit igbvf_alloc_queues(struct igbvf_adapter *adapter) 1075 static int __devinit igbvf_alloc_queues(struct igbvf_adapter *adapter)
1076 { 1076 {
1077 struct net_device *netdev = adapter->netdev; 1077 struct net_device *netdev = adapter->netdev;
1078 1078
1079 adapter->tx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL); 1079 adapter->tx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL);
1080 if (!adapter->tx_ring) 1080 if (!adapter->tx_ring)
1081 return -ENOMEM; 1081 return -ENOMEM;
1082 1082
1083 adapter->rx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL); 1083 adapter->rx_ring = kzalloc(sizeof(struct igbvf_ring), GFP_KERNEL);
1084 if (!adapter->rx_ring) { 1084 if (!adapter->rx_ring) {
1085 kfree(adapter->tx_ring); 1085 kfree(adapter->tx_ring);
1086 return -ENOMEM; 1086 return -ENOMEM;
1087 } 1087 }
1088 1088
1089 netif_napi_add(netdev, &adapter->rx_ring->napi, igbvf_poll, 64); 1089 netif_napi_add(netdev, &adapter->rx_ring->napi, igbvf_poll, 64);
1090 1090
1091 return 0; 1091 return 0;
1092 } 1092 }
1093 1093
1094 /** 1094 /**
1095 * igbvf_request_irq - initialize interrupts 1095 * igbvf_request_irq - initialize interrupts
1096 * 1096 *
1097 * Attempts to configure interrupts using the best available 1097 * Attempts to configure interrupts using the best available
1098 * capabilities of the hardware and kernel. 1098 * capabilities of the hardware and kernel.
1099 **/ 1099 **/
1100 static int igbvf_request_irq(struct igbvf_adapter *adapter) 1100 static int igbvf_request_irq(struct igbvf_adapter *adapter)
1101 { 1101 {
1102 int err = -1; 1102 int err = -1;
1103 1103
1104 /* igbvf supports msi-x only */ 1104 /* igbvf supports msi-x only */
1105 if (adapter->msix_entries) 1105 if (adapter->msix_entries)
1106 err = igbvf_request_msix(adapter); 1106 err = igbvf_request_msix(adapter);
1107 1107
1108 if (!err) 1108 if (!err)
1109 return err; 1109 return err;
1110 1110
1111 dev_err(&adapter->pdev->dev, 1111 dev_err(&adapter->pdev->dev,
1112 "Unable to allocate interrupt, Error: %d\n", err); 1112 "Unable to allocate interrupt, Error: %d\n", err);
1113 1113
1114 return err; 1114 return err;
1115 } 1115 }
1116 1116
1117 static void igbvf_free_irq(struct igbvf_adapter *adapter) 1117 static void igbvf_free_irq(struct igbvf_adapter *adapter)
1118 { 1118 {
1119 struct net_device *netdev = adapter->netdev; 1119 struct net_device *netdev = adapter->netdev;
1120 int vector; 1120 int vector;
1121 1121
1122 if (adapter->msix_entries) { 1122 if (adapter->msix_entries) {
1123 for (vector = 0; vector < 3; vector++) 1123 for (vector = 0; vector < 3; vector++)
1124 free_irq(adapter->msix_entries[vector].vector, netdev); 1124 free_irq(adapter->msix_entries[vector].vector, netdev);
1125 } 1125 }
1126 } 1126 }
1127 1127
1128 /** 1128 /**
1129 * igbvf_irq_disable - Mask off interrupt generation on the NIC 1129 * igbvf_irq_disable - Mask off interrupt generation on the NIC
1130 **/ 1130 **/
1131 static void igbvf_irq_disable(struct igbvf_adapter *adapter) 1131 static void igbvf_irq_disable(struct igbvf_adapter *adapter)
1132 { 1132 {
1133 struct e1000_hw *hw = &adapter->hw; 1133 struct e1000_hw *hw = &adapter->hw;
1134 1134
1135 ew32(EIMC, ~0); 1135 ew32(EIMC, ~0);
1136 1136
1137 if (adapter->msix_entries) 1137 if (adapter->msix_entries)
1138 ew32(EIAC, 0); 1138 ew32(EIAC, 0);
1139 } 1139 }
1140 1140
1141 /** 1141 /**
1142 * igbvf_irq_enable - Enable default interrupt generation settings 1142 * igbvf_irq_enable - Enable default interrupt generation settings
1143 **/ 1143 **/
1144 static void igbvf_irq_enable(struct igbvf_adapter *adapter) 1144 static void igbvf_irq_enable(struct igbvf_adapter *adapter)
1145 { 1145 {
1146 struct e1000_hw *hw = &adapter->hw; 1146 struct e1000_hw *hw = &adapter->hw;
1147 1147
1148 ew32(EIAC, adapter->eims_enable_mask); 1148 ew32(EIAC, adapter->eims_enable_mask);
1149 ew32(EIAM, adapter->eims_enable_mask); 1149 ew32(EIAM, adapter->eims_enable_mask);
1150 ew32(EIMS, adapter->eims_enable_mask); 1150 ew32(EIMS, adapter->eims_enable_mask);
1151 } 1151 }
1152 1152
1153 /** 1153 /**
1154 * igbvf_poll - NAPI Rx polling callback 1154 * igbvf_poll - NAPI Rx polling callback
1155 * @napi: struct associated with this polling callback 1155 * @napi: struct associated with this polling callback
1156 * @budget: amount of packets driver is allowed to process this poll 1156 * @budget: amount of packets driver is allowed to process this poll
1157 **/ 1157 **/
1158 static int igbvf_poll(struct napi_struct *napi, int budget) 1158 static int igbvf_poll(struct napi_struct *napi, int budget)
1159 { 1159 {
1160 struct igbvf_ring *rx_ring = container_of(napi, struct igbvf_ring, napi); 1160 struct igbvf_ring *rx_ring = container_of(napi, struct igbvf_ring, napi);
1161 struct igbvf_adapter *adapter = rx_ring->adapter; 1161 struct igbvf_adapter *adapter = rx_ring->adapter;
1162 struct e1000_hw *hw = &adapter->hw; 1162 struct e1000_hw *hw = &adapter->hw;
1163 int work_done = 0; 1163 int work_done = 0;
1164 1164
1165 igbvf_clean_rx_irq(adapter, &work_done, budget); 1165 igbvf_clean_rx_irq(adapter, &work_done, budget);
1166 1166
1167 /* If not enough Rx work done, exit the polling mode */ 1167 /* If not enough Rx work done, exit the polling mode */
1168 if (work_done < budget) { 1168 if (work_done < budget) {
1169 napi_complete(napi); 1169 napi_complete(napi);
1170 1170
1171 if (adapter->requested_itr & 3) 1171 if (adapter->requested_itr & 3)
1172 igbvf_set_itr(adapter); 1172 igbvf_set_itr(adapter);
1173 1173
1174 if (!test_bit(__IGBVF_DOWN, &adapter->state)) 1174 if (!test_bit(__IGBVF_DOWN, &adapter->state))
1175 ew32(EIMS, adapter->rx_ring->eims_value); 1175 ew32(EIMS, adapter->rx_ring->eims_value);
1176 } 1176 }
1177 1177
1178 return work_done; 1178 return work_done;
1179 } 1179 }
1180 1180
1181 /** 1181 /**
1182 * igbvf_set_rlpml - set receive large packet maximum length 1182 * igbvf_set_rlpml - set receive large packet maximum length
1183 * @adapter: board private structure 1183 * @adapter: board private structure
1184 * 1184 *
1185 * Configure the maximum size of packets that will be received 1185 * Configure the maximum size of packets that will be received
1186 */ 1186 */
1187 static void igbvf_set_rlpml(struct igbvf_adapter *adapter) 1187 static void igbvf_set_rlpml(struct igbvf_adapter *adapter)
1188 { 1188 {
1189 int max_frame_size; 1189 int max_frame_size;
1190 struct e1000_hw *hw = &adapter->hw; 1190 struct e1000_hw *hw = &adapter->hw;
1191 1191
1192 max_frame_size = adapter->max_frame_size + VLAN_TAG_SIZE; 1192 max_frame_size = adapter->max_frame_size + VLAN_TAG_SIZE;
1193 e1000_rlpml_set_vf(hw, max_frame_size); 1193 e1000_rlpml_set_vf(hw, max_frame_size);
1194 } 1194 }
1195 1195
1196 static int igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 1196 static int igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1197 { 1197 {
1198 struct igbvf_adapter *adapter = netdev_priv(netdev); 1198 struct igbvf_adapter *adapter = netdev_priv(netdev);
1199 struct e1000_hw *hw = &adapter->hw; 1199 struct e1000_hw *hw = &adapter->hw;
1200 1200
1201 if (hw->mac.ops.set_vfta(hw, vid, true)) { 1201 if (hw->mac.ops.set_vfta(hw, vid, true)) {
1202 dev_err(&adapter->pdev->dev, "Failed to add vlan id %d\n", vid); 1202 dev_err(&adapter->pdev->dev, "Failed to add vlan id %d\n", vid);
1203 return -EINVAL; 1203 return -EINVAL;
1204 } 1204 }
1205 set_bit(vid, adapter->active_vlans); 1205 set_bit(vid, adapter->active_vlans);
1206 return 0; 1206 return 0;
1207 } 1207 }
1208 1208
1209 static int igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 1209 static int igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1210 { 1210 {
1211 struct igbvf_adapter *adapter = netdev_priv(netdev); 1211 struct igbvf_adapter *adapter = netdev_priv(netdev);
1212 struct e1000_hw *hw = &adapter->hw; 1212 struct e1000_hw *hw = &adapter->hw;
1213 1213
1214 if (hw->mac.ops.set_vfta(hw, vid, false)) { 1214 if (hw->mac.ops.set_vfta(hw, vid, false)) {
1215 dev_err(&adapter->pdev->dev, 1215 dev_err(&adapter->pdev->dev,
1216 "Failed to remove vlan id %d\n", vid); 1216 "Failed to remove vlan id %d\n", vid);
1217 return -EINVAL; 1217 return -EINVAL;
1218 } 1218 }
1219 clear_bit(vid, adapter->active_vlans); 1219 clear_bit(vid, adapter->active_vlans);
1220 return 0; 1220 return 0;
1221 } 1221 }
1222 1222
1223 static void igbvf_restore_vlan(struct igbvf_adapter *adapter) 1223 static void igbvf_restore_vlan(struct igbvf_adapter *adapter)
1224 { 1224 {
1225 u16 vid; 1225 u16 vid;
1226 1226
1227 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 1227 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
1228 igbvf_vlan_rx_add_vid(adapter->netdev, vid); 1228 igbvf_vlan_rx_add_vid(adapter->netdev, vid);
1229 } 1229 }
1230 1230
1231 /** 1231 /**
1232 * igbvf_configure_tx - Configure Transmit Unit after Reset 1232 * igbvf_configure_tx - Configure Transmit Unit after Reset
1233 * @adapter: board private structure 1233 * @adapter: board private structure
1234 * 1234 *
1235 * Configure the Tx unit of the MAC after a reset. 1235 * Configure the Tx unit of the MAC after a reset.
1236 **/ 1236 **/
1237 static void igbvf_configure_tx(struct igbvf_adapter *adapter) 1237 static void igbvf_configure_tx(struct igbvf_adapter *adapter)
1238 { 1238 {
1239 struct e1000_hw *hw = &adapter->hw; 1239 struct e1000_hw *hw = &adapter->hw;
1240 struct igbvf_ring *tx_ring = adapter->tx_ring; 1240 struct igbvf_ring *tx_ring = adapter->tx_ring;
1241 u64 tdba; 1241 u64 tdba;
1242 u32 txdctl, dca_txctrl; 1242 u32 txdctl, dca_txctrl;
1243 1243
1244 /* disable transmits */ 1244 /* disable transmits */
1245 txdctl = er32(TXDCTL(0)); 1245 txdctl = er32(TXDCTL(0));
1246 ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE); 1246 ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
1247 e1e_flush(); 1247 e1e_flush();
1248 msleep(10); 1248 msleep(10);
1249 1249
1250 /* Setup the HW Tx Head and Tail descriptor pointers */ 1250 /* Setup the HW Tx Head and Tail descriptor pointers */
1251 ew32(TDLEN(0), tx_ring->count * sizeof(union e1000_adv_tx_desc)); 1251 ew32(TDLEN(0), tx_ring->count * sizeof(union e1000_adv_tx_desc));
1252 tdba = tx_ring->dma; 1252 tdba = tx_ring->dma;
1253 ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32))); 1253 ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32)));
1254 ew32(TDBAH(0), (tdba >> 32)); 1254 ew32(TDBAH(0), (tdba >> 32));
1255 ew32(TDH(0), 0); 1255 ew32(TDH(0), 0);
1256 ew32(TDT(0), 0); 1256 ew32(TDT(0), 0);
1257 tx_ring->head = E1000_TDH(0); 1257 tx_ring->head = E1000_TDH(0);
1258 tx_ring->tail = E1000_TDT(0); 1258 tx_ring->tail = E1000_TDT(0);
1259 1259
1260 /* Turn off Relaxed Ordering on head write-backs. The writebacks 1260 /* Turn off Relaxed Ordering on head write-backs. The writebacks
1261 * MUST be delivered in order or it will completely screw up 1261 * MUST be delivered in order or it will completely screw up
1262 * our bookeeping. 1262 * our bookeeping.
1263 */ 1263 */
1264 dca_txctrl = er32(DCA_TXCTRL(0)); 1264 dca_txctrl = er32(DCA_TXCTRL(0));
1265 dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN; 1265 dca_txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
1266 ew32(DCA_TXCTRL(0), dca_txctrl); 1266 ew32(DCA_TXCTRL(0), dca_txctrl);
1267 1267
1268 /* enable transmits */ 1268 /* enable transmits */
1269 txdctl |= E1000_TXDCTL_QUEUE_ENABLE; 1269 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
1270 ew32(TXDCTL(0), txdctl); 1270 ew32(TXDCTL(0), txdctl);
1271 1271
1272 /* Setup Transmit Descriptor Settings for eop descriptor */ 1272 /* Setup Transmit Descriptor Settings for eop descriptor */
1273 adapter->txd_cmd = E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_IFCS; 1273 adapter->txd_cmd = E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_IFCS;
1274 1274
1275 /* enable Report Status bit */ 1275 /* enable Report Status bit */
1276 adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS; 1276 adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS;
1277 } 1277 }
1278 1278
1279 /** 1279 /**
1280 * igbvf_setup_srrctl - configure the receive control registers 1280 * igbvf_setup_srrctl - configure the receive control registers
1281 * @adapter: Board private structure 1281 * @adapter: Board private structure
1282 **/ 1282 **/
1283 static void igbvf_setup_srrctl(struct igbvf_adapter *adapter) 1283 static void igbvf_setup_srrctl(struct igbvf_adapter *adapter)
1284 { 1284 {
1285 struct e1000_hw *hw = &adapter->hw; 1285 struct e1000_hw *hw = &adapter->hw;
1286 u32 srrctl = 0; 1286 u32 srrctl = 0;
1287 1287
1288 srrctl &= ~(E1000_SRRCTL_DESCTYPE_MASK | 1288 srrctl &= ~(E1000_SRRCTL_DESCTYPE_MASK |
1289 E1000_SRRCTL_BSIZEHDR_MASK | 1289 E1000_SRRCTL_BSIZEHDR_MASK |
1290 E1000_SRRCTL_BSIZEPKT_MASK); 1290 E1000_SRRCTL_BSIZEPKT_MASK);
1291 1291
1292 /* Enable queue drop to avoid head of line blocking */ 1292 /* Enable queue drop to avoid head of line blocking */
1293 srrctl |= E1000_SRRCTL_DROP_EN; 1293 srrctl |= E1000_SRRCTL_DROP_EN;
1294 1294
1295 /* Setup buffer sizes */ 1295 /* Setup buffer sizes */
1296 srrctl |= ALIGN(adapter->rx_buffer_len, 1024) >> 1296 srrctl |= ALIGN(adapter->rx_buffer_len, 1024) >>
1297 E1000_SRRCTL_BSIZEPKT_SHIFT; 1297 E1000_SRRCTL_BSIZEPKT_SHIFT;
1298 1298
1299 if (adapter->rx_buffer_len < 2048) { 1299 if (adapter->rx_buffer_len < 2048) {
1300 adapter->rx_ps_hdr_size = 0; 1300 adapter->rx_ps_hdr_size = 0;
1301 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; 1301 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
1302 } else { 1302 } else {
1303 adapter->rx_ps_hdr_size = 128; 1303 adapter->rx_ps_hdr_size = 128;
1304 srrctl |= adapter->rx_ps_hdr_size << 1304 srrctl |= adapter->rx_ps_hdr_size <<
1305 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; 1305 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
1306 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; 1306 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
1307 } 1307 }
1308 1308
1309 ew32(SRRCTL(0), srrctl); 1309 ew32(SRRCTL(0), srrctl);
1310 } 1310 }
1311 1311
1312 /** 1312 /**
1313 * igbvf_configure_rx - Configure Receive Unit after Reset 1313 * igbvf_configure_rx - Configure Receive Unit after Reset
1314 * @adapter: board private structure 1314 * @adapter: board private structure
1315 * 1315 *
1316 * Configure the Rx unit of the MAC after a reset. 1316 * Configure the Rx unit of the MAC after a reset.
1317 **/ 1317 **/
1318 static void igbvf_configure_rx(struct igbvf_adapter *adapter) 1318 static void igbvf_configure_rx(struct igbvf_adapter *adapter)
1319 { 1319 {
1320 struct e1000_hw *hw = &adapter->hw; 1320 struct e1000_hw *hw = &adapter->hw;
1321 struct igbvf_ring *rx_ring = adapter->rx_ring; 1321 struct igbvf_ring *rx_ring = adapter->rx_ring;
1322 u64 rdba; 1322 u64 rdba;
1323 u32 rdlen, rxdctl; 1323 u32 rdlen, rxdctl;
1324 1324
1325 /* disable receives */ 1325 /* disable receives */
1326 rxdctl = er32(RXDCTL(0)); 1326 rxdctl = er32(RXDCTL(0));
1327 ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE); 1327 ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
1328 e1e_flush(); 1328 e1e_flush();
1329 msleep(10); 1329 msleep(10);
1330 1330
1331 rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc); 1331 rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc);
1332 1332
1333 /* 1333 /*
1334 * Setup the HW Rx Head and Tail Descriptor Pointers and 1334 * Setup the HW Rx Head and Tail Descriptor Pointers and
1335 * the Base and Length of the Rx Descriptor Ring 1335 * the Base and Length of the Rx Descriptor Ring
1336 */ 1336 */
1337 rdba = rx_ring->dma; 1337 rdba = rx_ring->dma;
1338 ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32))); 1338 ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32)));
1339 ew32(RDBAH(0), (rdba >> 32)); 1339 ew32(RDBAH(0), (rdba >> 32));
1340 ew32(RDLEN(0), rx_ring->count * sizeof(union e1000_adv_rx_desc)); 1340 ew32(RDLEN(0), rx_ring->count * sizeof(union e1000_adv_rx_desc));
1341 rx_ring->head = E1000_RDH(0); 1341 rx_ring->head = E1000_RDH(0);
1342 rx_ring->tail = E1000_RDT(0); 1342 rx_ring->tail = E1000_RDT(0);
1343 ew32(RDH(0), 0); 1343 ew32(RDH(0), 0);
1344 ew32(RDT(0), 0); 1344 ew32(RDT(0), 0);
1345 1345
1346 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; 1346 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
1347 rxdctl &= 0xFFF00000; 1347 rxdctl &= 0xFFF00000;
1348 rxdctl |= IGBVF_RX_PTHRESH; 1348 rxdctl |= IGBVF_RX_PTHRESH;
1349 rxdctl |= IGBVF_RX_HTHRESH << 8; 1349 rxdctl |= IGBVF_RX_HTHRESH << 8;
1350 rxdctl |= IGBVF_RX_WTHRESH << 16; 1350 rxdctl |= IGBVF_RX_WTHRESH << 16;
1351 1351
1352 igbvf_set_rlpml(adapter); 1352 igbvf_set_rlpml(adapter);
1353 1353
1354 /* enable receives */ 1354 /* enable receives */
1355 ew32(RXDCTL(0), rxdctl); 1355 ew32(RXDCTL(0), rxdctl);
1356 } 1356 }
1357 1357
1358 /** 1358 /**
1359 * igbvf_set_multi - Multicast and Promiscuous mode set 1359 * igbvf_set_multi - Multicast and Promiscuous mode set
1360 * @netdev: network interface device structure 1360 * @netdev: network interface device structure
1361 * 1361 *
1362 * The set_multi entry point is called whenever the multicast address 1362 * The set_multi entry point is called whenever the multicast address
1363 * list or the network interface flags are updated. This routine is 1363 * list or the network interface flags are updated. This routine is
1364 * responsible for configuring the hardware for proper multicast, 1364 * responsible for configuring the hardware for proper multicast,
1365 * promiscuous mode, and all-multi behavior. 1365 * promiscuous mode, and all-multi behavior.
1366 **/ 1366 **/
1367 static void igbvf_set_multi(struct net_device *netdev) 1367 static void igbvf_set_multi(struct net_device *netdev)
1368 { 1368 {
1369 struct igbvf_adapter *adapter = netdev_priv(netdev); 1369 struct igbvf_adapter *adapter = netdev_priv(netdev);
1370 struct e1000_hw *hw = &adapter->hw; 1370 struct e1000_hw *hw = &adapter->hw;
1371 struct netdev_hw_addr *ha; 1371 struct netdev_hw_addr *ha;
1372 u8 *mta_list = NULL; 1372 u8 *mta_list = NULL;
1373 int i; 1373 int i;
1374 1374
1375 if (!netdev_mc_empty(netdev)) { 1375 if (!netdev_mc_empty(netdev)) {
1376 mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC); 1376 mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
1377 if (!mta_list) { 1377 if (!mta_list) {
1378 dev_err(&adapter->pdev->dev, 1378 dev_err(&adapter->pdev->dev,
1379 "failed to allocate multicast filter list\n"); 1379 "failed to allocate multicast filter list\n");
1380 return; 1380 return;
1381 } 1381 }
1382 } 1382 }
1383 1383
1384 /* prepare a packed array of only addresses. */ 1384 /* prepare a packed array of only addresses. */
1385 i = 0; 1385 i = 0;
1386 netdev_for_each_mc_addr(ha, netdev) 1386 netdev_for_each_mc_addr(ha, netdev)
1387 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); 1387 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
1388 1388
1389 hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0); 1389 hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0);
1390 kfree(mta_list); 1390 kfree(mta_list);
1391 } 1391 }
1392 1392
1393 /** 1393 /**
1394 * igbvf_configure - configure the hardware for Rx and Tx 1394 * igbvf_configure - configure the hardware for Rx and Tx
1395 * @adapter: private board structure 1395 * @adapter: private board structure
1396 **/ 1396 **/
1397 static void igbvf_configure(struct igbvf_adapter *adapter) 1397 static void igbvf_configure(struct igbvf_adapter *adapter)
1398 { 1398 {
1399 igbvf_set_multi(adapter->netdev); 1399 igbvf_set_multi(adapter->netdev);
1400 1400
1401 igbvf_restore_vlan(adapter); 1401 igbvf_restore_vlan(adapter);
1402 1402
1403 igbvf_configure_tx(adapter); 1403 igbvf_configure_tx(adapter);
1404 igbvf_setup_srrctl(adapter); 1404 igbvf_setup_srrctl(adapter);
1405 igbvf_configure_rx(adapter); 1405 igbvf_configure_rx(adapter);
1406 igbvf_alloc_rx_buffers(adapter->rx_ring, 1406 igbvf_alloc_rx_buffers(adapter->rx_ring,
1407 igbvf_desc_unused(adapter->rx_ring)); 1407 igbvf_desc_unused(adapter->rx_ring));
1408 } 1408 }
1409 1409
1410 /* igbvf_reset - bring the hardware into a known good state 1410 /* igbvf_reset - bring the hardware into a known good state
1411 * 1411 *
1412 * This function boots the hardware and enables some settings that 1412 * This function boots the hardware and enables some settings that
1413 * require a configuration cycle of the hardware - those cannot be 1413 * require a configuration cycle of the hardware - those cannot be
1414 * set/changed during runtime. After reset the device needs to be 1414 * set/changed during runtime. After reset the device needs to be
1415 * properly configured for Rx, Tx etc. 1415 * properly configured for Rx, Tx etc.
1416 */ 1416 */
1417 static void igbvf_reset(struct igbvf_adapter *adapter) 1417 static void igbvf_reset(struct igbvf_adapter *adapter)
1418 { 1418 {
1419 struct e1000_mac_info *mac = &adapter->hw.mac; 1419 struct e1000_mac_info *mac = &adapter->hw.mac;
1420 struct net_device *netdev = adapter->netdev; 1420 struct net_device *netdev = adapter->netdev;
1421 struct e1000_hw *hw = &adapter->hw; 1421 struct e1000_hw *hw = &adapter->hw;
1422 1422
1423 /* Allow time for pending master requests to run */ 1423 /* Allow time for pending master requests to run */
1424 if (mac->ops.reset_hw(hw)) 1424 if (mac->ops.reset_hw(hw))
1425 dev_err(&adapter->pdev->dev, "PF still resetting\n"); 1425 dev_err(&adapter->pdev->dev, "PF still resetting\n");
1426 1426
1427 mac->ops.init_hw(hw); 1427 mac->ops.init_hw(hw);
1428 1428
1429 if (is_valid_ether_addr(adapter->hw.mac.addr)) { 1429 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1430 memcpy(netdev->dev_addr, adapter->hw.mac.addr, 1430 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
1431 netdev->addr_len); 1431 netdev->addr_len);
1432 memcpy(netdev->perm_addr, adapter->hw.mac.addr, 1432 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
1433 netdev->addr_len); 1433 netdev->addr_len);
1434 } 1434 }
1435 1435
1436 adapter->last_reset = jiffies; 1436 adapter->last_reset = jiffies;
1437 } 1437 }
1438 1438
1439 int igbvf_up(struct igbvf_adapter *adapter) 1439 int igbvf_up(struct igbvf_adapter *adapter)
1440 { 1440 {
1441 struct e1000_hw *hw = &adapter->hw; 1441 struct e1000_hw *hw = &adapter->hw;
1442 1442
1443 /* hardware has been reset, we need to reload some things */ 1443 /* hardware has been reset, we need to reload some things */
1444 igbvf_configure(adapter); 1444 igbvf_configure(adapter);
1445 1445
1446 clear_bit(__IGBVF_DOWN, &adapter->state); 1446 clear_bit(__IGBVF_DOWN, &adapter->state);
1447 1447
1448 napi_enable(&adapter->rx_ring->napi); 1448 napi_enable(&adapter->rx_ring->napi);
1449 if (adapter->msix_entries) 1449 if (adapter->msix_entries)
1450 igbvf_configure_msix(adapter); 1450 igbvf_configure_msix(adapter);
1451 1451
1452 /* Clear any pending interrupts. */ 1452 /* Clear any pending interrupts. */
1453 er32(EICR); 1453 er32(EICR);
1454 igbvf_irq_enable(adapter); 1454 igbvf_irq_enable(adapter);
1455 1455
1456 /* start the watchdog */ 1456 /* start the watchdog */
1457 hw->mac.get_link_status = 1; 1457 hw->mac.get_link_status = 1;
1458 mod_timer(&adapter->watchdog_timer, jiffies + 1); 1458 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1459 1459
1460 1460
1461 return 0; 1461 return 0;
1462 } 1462 }
1463 1463
1464 void igbvf_down(struct igbvf_adapter *adapter) 1464 void igbvf_down(struct igbvf_adapter *adapter)
1465 { 1465 {
1466 struct net_device *netdev = adapter->netdev; 1466 struct net_device *netdev = adapter->netdev;
1467 struct e1000_hw *hw = &adapter->hw; 1467 struct e1000_hw *hw = &adapter->hw;
1468 u32 rxdctl, txdctl; 1468 u32 rxdctl, txdctl;
1469 1469
1470 /* 1470 /*
1471 * signal that we're down so the interrupt handler does not 1471 * signal that we're down so the interrupt handler does not
1472 * reschedule our watchdog timer 1472 * reschedule our watchdog timer
1473 */ 1473 */
1474 set_bit(__IGBVF_DOWN, &adapter->state); 1474 set_bit(__IGBVF_DOWN, &adapter->state);
1475 1475
1476 /* disable receives in the hardware */ 1476 /* disable receives in the hardware */
1477 rxdctl = er32(RXDCTL(0)); 1477 rxdctl = er32(RXDCTL(0));
1478 ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE); 1478 ew32(RXDCTL(0), rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
1479 1479
1480 netif_stop_queue(netdev); 1480 netif_stop_queue(netdev);
1481 1481
1482 /* disable transmits in the hardware */ 1482 /* disable transmits in the hardware */
1483 txdctl = er32(TXDCTL(0)); 1483 txdctl = er32(TXDCTL(0));
1484 ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE); 1484 ew32(TXDCTL(0), txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
1485 1485
1486 /* flush both disables and wait for them to finish */ 1486 /* flush both disables and wait for them to finish */
1487 e1e_flush(); 1487 e1e_flush();
1488 msleep(10); 1488 msleep(10);
1489 1489
1490 napi_disable(&adapter->rx_ring->napi); 1490 napi_disable(&adapter->rx_ring->napi);
1491 1491
1492 igbvf_irq_disable(adapter); 1492 igbvf_irq_disable(adapter);
1493 1493
1494 del_timer_sync(&adapter->watchdog_timer); 1494 del_timer_sync(&adapter->watchdog_timer);
1495 1495
1496 netif_carrier_off(netdev); 1496 netif_carrier_off(netdev);
1497 1497
1498 /* record the stats before reset*/ 1498 /* record the stats before reset*/
1499 igbvf_update_stats(adapter); 1499 igbvf_update_stats(adapter);
1500 1500
1501 adapter->link_speed = 0; 1501 adapter->link_speed = 0;
1502 adapter->link_duplex = 0; 1502 adapter->link_duplex = 0;
1503 1503
1504 igbvf_reset(adapter); 1504 igbvf_reset(adapter);
1505 igbvf_clean_tx_ring(adapter->tx_ring); 1505 igbvf_clean_tx_ring(adapter->tx_ring);
1506 igbvf_clean_rx_ring(adapter->rx_ring); 1506 igbvf_clean_rx_ring(adapter->rx_ring);
1507 } 1507 }
1508 1508
1509 void igbvf_reinit_locked(struct igbvf_adapter *adapter) 1509 void igbvf_reinit_locked(struct igbvf_adapter *adapter)
1510 { 1510 {
1511 might_sleep(); 1511 might_sleep();
1512 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) 1512 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
1513 msleep(1); 1513 msleep(1);
1514 igbvf_down(adapter); 1514 igbvf_down(adapter);
1515 igbvf_up(adapter); 1515 igbvf_up(adapter);
1516 clear_bit(__IGBVF_RESETTING, &adapter->state); 1516 clear_bit(__IGBVF_RESETTING, &adapter->state);
1517 } 1517 }
1518 1518
1519 /** 1519 /**
1520 * igbvf_sw_init - Initialize general software structures (struct igbvf_adapter) 1520 * igbvf_sw_init - Initialize general software structures (struct igbvf_adapter)
1521 * @adapter: board private structure to initialize 1521 * @adapter: board private structure to initialize
1522 * 1522 *
1523 * igbvf_sw_init initializes the Adapter private data structure. 1523 * igbvf_sw_init initializes the Adapter private data structure.
1524 * Fields are initialized based on PCI device information and 1524 * Fields are initialized based on PCI device information and
1525 * OS network device settings (MTU size). 1525 * OS network device settings (MTU size).
1526 **/ 1526 **/
1527 static int __devinit igbvf_sw_init(struct igbvf_adapter *adapter) 1527 static int __devinit igbvf_sw_init(struct igbvf_adapter *adapter)
1528 { 1528 {
1529 struct net_device *netdev = adapter->netdev; 1529 struct net_device *netdev = adapter->netdev;
1530 s32 rc; 1530 s32 rc;
1531 1531
1532 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN; 1532 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
1533 adapter->rx_ps_hdr_size = 0; 1533 adapter->rx_ps_hdr_size = 0;
1534 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 1534 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1535 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 1535 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1536 1536
1537 adapter->tx_int_delay = 8; 1537 adapter->tx_int_delay = 8;
1538 adapter->tx_abs_int_delay = 32; 1538 adapter->tx_abs_int_delay = 32;
1539 adapter->rx_int_delay = 0; 1539 adapter->rx_int_delay = 0;
1540 adapter->rx_abs_int_delay = 8; 1540 adapter->rx_abs_int_delay = 8;
1541 adapter->requested_itr = 3; 1541 adapter->requested_itr = 3;
1542 adapter->current_itr = IGBVF_START_ITR; 1542 adapter->current_itr = IGBVF_START_ITR;
1543 1543
1544 /* Set various function pointers */ 1544 /* Set various function pointers */
1545 adapter->ei->init_ops(&adapter->hw); 1545 adapter->ei->init_ops(&adapter->hw);
1546 1546
1547 rc = adapter->hw.mac.ops.init_params(&adapter->hw); 1547 rc = adapter->hw.mac.ops.init_params(&adapter->hw);
1548 if (rc) 1548 if (rc)
1549 return rc; 1549 return rc;
1550 1550
1551 rc = adapter->hw.mbx.ops.init_params(&adapter->hw); 1551 rc = adapter->hw.mbx.ops.init_params(&adapter->hw);
1552 if (rc) 1552 if (rc)
1553 return rc; 1553 return rc;
1554 1554
1555 igbvf_set_interrupt_capability(adapter); 1555 igbvf_set_interrupt_capability(adapter);
1556 1556
1557 if (igbvf_alloc_queues(adapter)) 1557 if (igbvf_alloc_queues(adapter))
1558 return -ENOMEM; 1558 return -ENOMEM;
1559 1559
1560 spin_lock_init(&adapter->tx_queue_lock); 1560 spin_lock_init(&adapter->tx_queue_lock);
1561 1561
1562 /* Explicitly disable IRQ since the NIC can be in any state. */ 1562 /* Explicitly disable IRQ since the NIC can be in any state. */
1563 igbvf_irq_disable(adapter); 1563 igbvf_irq_disable(adapter);
1564 1564
1565 spin_lock_init(&adapter->stats_lock); 1565 spin_lock_init(&adapter->stats_lock);
1566 1566
1567 set_bit(__IGBVF_DOWN, &adapter->state); 1567 set_bit(__IGBVF_DOWN, &adapter->state);
1568 return 0; 1568 return 0;
1569 } 1569 }
1570 1570
1571 static void igbvf_initialize_last_counter_stats(struct igbvf_adapter *adapter) 1571 static void igbvf_initialize_last_counter_stats(struct igbvf_adapter *adapter)
1572 { 1572 {
1573 struct e1000_hw *hw = &adapter->hw; 1573 struct e1000_hw *hw = &adapter->hw;
1574 1574
1575 adapter->stats.last_gprc = er32(VFGPRC); 1575 adapter->stats.last_gprc = er32(VFGPRC);
1576 adapter->stats.last_gorc = er32(VFGORC); 1576 adapter->stats.last_gorc = er32(VFGORC);
1577 adapter->stats.last_gptc = er32(VFGPTC); 1577 adapter->stats.last_gptc = er32(VFGPTC);
1578 adapter->stats.last_gotc = er32(VFGOTC); 1578 adapter->stats.last_gotc = er32(VFGOTC);
1579 adapter->stats.last_mprc = er32(VFMPRC); 1579 adapter->stats.last_mprc = er32(VFMPRC);
1580 adapter->stats.last_gotlbc = er32(VFGOTLBC); 1580 adapter->stats.last_gotlbc = er32(VFGOTLBC);
1581 adapter->stats.last_gptlbc = er32(VFGPTLBC); 1581 adapter->stats.last_gptlbc = er32(VFGPTLBC);
1582 adapter->stats.last_gorlbc = er32(VFGORLBC); 1582 adapter->stats.last_gorlbc = er32(VFGORLBC);
1583 adapter->stats.last_gprlbc = er32(VFGPRLBC); 1583 adapter->stats.last_gprlbc = er32(VFGPRLBC);
1584 1584
1585 adapter->stats.base_gprc = er32(VFGPRC); 1585 adapter->stats.base_gprc = er32(VFGPRC);
1586 adapter->stats.base_gorc = er32(VFGORC); 1586 adapter->stats.base_gorc = er32(VFGORC);
1587 adapter->stats.base_gptc = er32(VFGPTC); 1587 adapter->stats.base_gptc = er32(VFGPTC);
1588 adapter->stats.base_gotc = er32(VFGOTC); 1588 adapter->stats.base_gotc = er32(VFGOTC);
1589 adapter->stats.base_mprc = er32(VFMPRC); 1589 adapter->stats.base_mprc = er32(VFMPRC);
1590 adapter->stats.base_gotlbc = er32(VFGOTLBC); 1590 adapter->stats.base_gotlbc = er32(VFGOTLBC);
1591 adapter->stats.base_gptlbc = er32(VFGPTLBC); 1591 adapter->stats.base_gptlbc = er32(VFGPTLBC);
1592 adapter->stats.base_gorlbc = er32(VFGORLBC); 1592 adapter->stats.base_gorlbc = er32(VFGORLBC);
1593 adapter->stats.base_gprlbc = er32(VFGPRLBC); 1593 adapter->stats.base_gprlbc = er32(VFGPRLBC);
1594 } 1594 }
1595 1595
1596 /** 1596 /**
1597 * igbvf_open - Called when a network interface is made active 1597 * igbvf_open - Called when a network interface is made active
1598 * @netdev: network interface device structure 1598 * @netdev: network interface device structure
1599 * 1599 *
1600 * Returns 0 on success, negative value on failure 1600 * Returns 0 on success, negative value on failure
1601 * 1601 *
1602 * The open entry point is called when a network interface is made 1602 * The open entry point is called when a network interface is made
1603 * active by the system (IFF_UP). At this point all resources needed 1603 * active by the system (IFF_UP). At this point all resources needed
1604 * for transmit and receive operations are allocated, the interrupt 1604 * for transmit and receive operations are allocated, the interrupt
1605 * handler is registered with the OS, the watchdog timer is started, 1605 * handler is registered with the OS, the watchdog timer is started,
1606 * and the stack is notified that the interface is ready. 1606 * and the stack is notified that the interface is ready.
1607 **/ 1607 **/
1608 static int igbvf_open(struct net_device *netdev) 1608 static int igbvf_open(struct net_device *netdev)
1609 { 1609 {
1610 struct igbvf_adapter *adapter = netdev_priv(netdev); 1610 struct igbvf_adapter *adapter = netdev_priv(netdev);
1611 struct e1000_hw *hw = &adapter->hw; 1611 struct e1000_hw *hw = &adapter->hw;
1612 int err; 1612 int err;
1613 1613
1614 /* disallow open during test */ 1614 /* disallow open during test */
1615 if (test_bit(__IGBVF_TESTING, &adapter->state)) 1615 if (test_bit(__IGBVF_TESTING, &adapter->state))
1616 return -EBUSY; 1616 return -EBUSY;
1617 1617
1618 /* allocate transmit descriptors */ 1618 /* allocate transmit descriptors */
1619 err = igbvf_setup_tx_resources(adapter, adapter->tx_ring); 1619 err = igbvf_setup_tx_resources(adapter, adapter->tx_ring);
1620 if (err) 1620 if (err)
1621 goto err_setup_tx; 1621 goto err_setup_tx;
1622 1622
1623 /* allocate receive descriptors */ 1623 /* allocate receive descriptors */
1624 err = igbvf_setup_rx_resources(adapter, adapter->rx_ring); 1624 err = igbvf_setup_rx_resources(adapter, adapter->rx_ring);
1625 if (err) 1625 if (err)
1626 goto err_setup_rx; 1626 goto err_setup_rx;
1627 1627
1628 /* 1628 /*
1629 * before we allocate an interrupt, we must be ready to handle it. 1629 * before we allocate an interrupt, we must be ready to handle it.
1630 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt 1630 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1631 * as soon as we call pci_request_irq, so we have to setup our 1631 * as soon as we call pci_request_irq, so we have to setup our
1632 * clean_rx handler before we do so. 1632 * clean_rx handler before we do so.
1633 */ 1633 */
1634 igbvf_configure(adapter); 1634 igbvf_configure(adapter);
1635 1635
1636 err = igbvf_request_irq(adapter); 1636 err = igbvf_request_irq(adapter);
1637 if (err) 1637 if (err)
1638 goto err_req_irq; 1638 goto err_req_irq;
1639 1639
1640 /* From here on the code is the same as igbvf_up() */ 1640 /* From here on the code is the same as igbvf_up() */
1641 clear_bit(__IGBVF_DOWN, &adapter->state); 1641 clear_bit(__IGBVF_DOWN, &adapter->state);
1642 1642
1643 napi_enable(&adapter->rx_ring->napi); 1643 napi_enable(&adapter->rx_ring->napi);
1644 1644
1645 /* clear any pending interrupts */ 1645 /* clear any pending interrupts */
1646 er32(EICR); 1646 er32(EICR);
1647 1647
1648 igbvf_irq_enable(adapter); 1648 igbvf_irq_enable(adapter);
1649 1649
1650 /* start the watchdog */ 1650 /* start the watchdog */
1651 hw->mac.get_link_status = 1; 1651 hw->mac.get_link_status = 1;
1652 mod_timer(&adapter->watchdog_timer, jiffies + 1); 1652 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1653 1653
1654 return 0; 1654 return 0;
1655 1655
1656 err_req_irq: 1656 err_req_irq:
1657 igbvf_free_rx_resources(adapter->rx_ring); 1657 igbvf_free_rx_resources(adapter->rx_ring);
1658 err_setup_rx: 1658 err_setup_rx:
1659 igbvf_free_tx_resources(adapter->tx_ring); 1659 igbvf_free_tx_resources(adapter->tx_ring);
1660 err_setup_tx: 1660 err_setup_tx:
1661 igbvf_reset(adapter); 1661 igbvf_reset(adapter);
1662 1662
1663 return err; 1663 return err;
1664 } 1664 }
1665 1665
1666 /** 1666 /**
1667 * igbvf_close - Disables a network interface 1667 * igbvf_close - Disables a network interface
1668 * @netdev: network interface device structure 1668 * @netdev: network interface device structure
1669 * 1669 *
1670 * Returns 0, this is not allowed to fail 1670 * Returns 0, this is not allowed to fail
1671 * 1671 *
1672 * The close entry point is called when an interface is de-activated 1672 * The close entry point is called when an interface is de-activated
1673 * by the OS. The hardware is still under the drivers control, but 1673 * by the OS. The hardware is still under the drivers control, but
1674 * needs to be disabled. A global MAC reset is issued to stop the 1674 * needs to be disabled. A global MAC reset is issued to stop the
1675 * hardware, and all transmit and receive resources are freed. 1675 * hardware, and all transmit and receive resources are freed.
1676 **/ 1676 **/
1677 static int igbvf_close(struct net_device *netdev) 1677 static int igbvf_close(struct net_device *netdev)
1678 { 1678 {
1679 struct igbvf_adapter *adapter = netdev_priv(netdev); 1679 struct igbvf_adapter *adapter = netdev_priv(netdev);
1680 1680
1681 WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state)); 1681 WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state));
1682 igbvf_down(adapter); 1682 igbvf_down(adapter);
1683 1683
1684 igbvf_free_irq(adapter); 1684 igbvf_free_irq(adapter);
1685 1685
1686 igbvf_free_tx_resources(adapter->tx_ring); 1686 igbvf_free_tx_resources(adapter->tx_ring);
1687 igbvf_free_rx_resources(adapter->rx_ring); 1687 igbvf_free_rx_resources(adapter->rx_ring);
1688 1688
1689 return 0; 1689 return 0;
1690 } 1690 }
1691 /** 1691 /**
1692 * igbvf_set_mac - Change the Ethernet Address of the NIC 1692 * igbvf_set_mac - Change the Ethernet Address of the NIC
1693 * @netdev: network interface device structure 1693 * @netdev: network interface device structure
1694 * @p: pointer to an address structure 1694 * @p: pointer to an address structure
1695 * 1695 *
1696 * Returns 0 on success, negative on failure 1696 * Returns 0 on success, negative on failure
1697 **/ 1697 **/
1698 static int igbvf_set_mac(struct net_device *netdev, void *p) 1698 static int igbvf_set_mac(struct net_device *netdev, void *p)
1699 { 1699 {
1700 struct igbvf_adapter *adapter = netdev_priv(netdev); 1700 struct igbvf_adapter *adapter = netdev_priv(netdev);
1701 struct e1000_hw *hw = &adapter->hw; 1701 struct e1000_hw *hw = &adapter->hw;
1702 struct sockaddr *addr = p; 1702 struct sockaddr *addr = p;
1703 1703
1704 if (!is_valid_ether_addr(addr->sa_data)) 1704 if (!is_valid_ether_addr(addr->sa_data))
1705 return -EADDRNOTAVAIL; 1705 return -EADDRNOTAVAIL;
1706 1706
1707 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 1707 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
1708 1708
1709 hw->mac.ops.rar_set(hw, hw->mac.addr, 0); 1709 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
1710 1710
1711 if (memcmp(addr->sa_data, hw->mac.addr, 6)) 1711 if (memcmp(addr->sa_data, hw->mac.addr, 6))
1712 return -EADDRNOTAVAIL; 1712 return -EADDRNOTAVAIL;
1713 1713
1714 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 1714 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1715 1715
1716 return 0; 1716 return 0;
1717 } 1717 }
1718 1718
1719 #define UPDATE_VF_COUNTER(reg, name) \ 1719 #define UPDATE_VF_COUNTER(reg, name) \
1720 { \ 1720 { \
1721 u32 current_counter = er32(reg); \ 1721 u32 current_counter = er32(reg); \
1722 if (current_counter < adapter->stats.last_##name) \ 1722 if (current_counter < adapter->stats.last_##name) \
1723 adapter->stats.name += 0x100000000LL; \ 1723 adapter->stats.name += 0x100000000LL; \
1724 adapter->stats.last_##name = current_counter; \ 1724 adapter->stats.last_##name = current_counter; \
1725 adapter->stats.name &= 0xFFFFFFFF00000000LL; \ 1725 adapter->stats.name &= 0xFFFFFFFF00000000LL; \
1726 adapter->stats.name |= current_counter; \ 1726 adapter->stats.name |= current_counter; \
1727 } 1727 }
1728 1728
1729 /** 1729 /**
1730 * igbvf_update_stats - Update the board statistics counters 1730 * igbvf_update_stats - Update the board statistics counters
1731 * @adapter: board private structure 1731 * @adapter: board private structure
1732 **/ 1732 **/
1733 void igbvf_update_stats(struct igbvf_adapter *adapter) 1733 void igbvf_update_stats(struct igbvf_adapter *adapter)
1734 { 1734 {
1735 struct e1000_hw *hw = &adapter->hw; 1735 struct e1000_hw *hw = &adapter->hw;
1736 struct pci_dev *pdev = adapter->pdev; 1736 struct pci_dev *pdev = adapter->pdev;
1737 1737
1738 /* 1738 /*
1739 * Prevent stats update while adapter is being reset, link is down 1739 * Prevent stats update while adapter is being reset, link is down
1740 * or if the pci connection is down. 1740 * or if the pci connection is down.
1741 */ 1741 */
1742 if (adapter->link_speed == 0) 1742 if (adapter->link_speed == 0)
1743 return; 1743 return;
1744 1744
1745 if (test_bit(__IGBVF_RESETTING, &adapter->state)) 1745 if (test_bit(__IGBVF_RESETTING, &adapter->state))
1746 return; 1746 return;
1747 1747
1748 if (pci_channel_offline(pdev)) 1748 if (pci_channel_offline(pdev))
1749 return; 1749 return;
1750 1750
1751 UPDATE_VF_COUNTER(VFGPRC, gprc); 1751 UPDATE_VF_COUNTER(VFGPRC, gprc);
1752 UPDATE_VF_COUNTER(VFGORC, gorc); 1752 UPDATE_VF_COUNTER(VFGORC, gorc);
1753 UPDATE_VF_COUNTER(VFGPTC, gptc); 1753 UPDATE_VF_COUNTER(VFGPTC, gptc);
1754 UPDATE_VF_COUNTER(VFGOTC, gotc); 1754 UPDATE_VF_COUNTER(VFGOTC, gotc);
1755 UPDATE_VF_COUNTER(VFMPRC, mprc); 1755 UPDATE_VF_COUNTER(VFMPRC, mprc);
1756 UPDATE_VF_COUNTER(VFGOTLBC, gotlbc); 1756 UPDATE_VF_COUNTER(VFGOTLBC, gotlbc);
1757 UPDATE_VF_COUNTER(VFGPTLBC, gptlbc); 1757 UPDATE_VF_COUNTER(VFGPTLBC, gptlbc);
1758 UPDATE_VF_COUNTER(VFGORLBC, gorlbc); 1758 UPDATE_VF_COUNTER(VFGORLBC, gorlbc);
1759 UPDATE_VF_COUNTER(VFGPRLBC, gprlbc); 1759 UPDATE_VF_COUNTER(VFGPRLBC, gprlbc);
1760 1760
1761 /* Fill out the OS statistics structure */ 1761 /* Fill out the OS statistics structure */
1762 adapter->net_stats.multicast = adapter->stats.mprc; 1762 adapter->net_stats.multicast = adapter->stats.mprc;
1763 } 1763 }
1764 1764
1765 static void igbvf_print_link_info(struct igbvf_adapter *adapter) 1765 static void igbvf_print_link_info(struct igbvf_adapter *adapter)
1766 { 1766 {
1767 dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s Duplex\n", 1767 dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s Duplex\n",
1768 adapter->link_speed, 1768 adapter->link_speed,
1769 adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half"); 1769 adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half");
1770 } 1770 }
1771 1771
1772 static bool igbvf_has_link(struct igbvf_adapter *adapter) 1772 static bool igbvf_has_link(struct igbvf_adapter *adapter)
1773 { 1773 {
1774 struct e1000_hw *hw = &adapter->hw; 1774 struct e1000_hw *hw = &adapter->hw;
1775 s32 ret_val = E1000_SUCCESS; 1775 s32 ret_val = E1000_SUCCESS;
1776 bool link_active; 1776 bool link_active;
1777 1777
1778 /* If interface is down, stay link down */ 1778 /* If interface is down, stay link down */
1779 if (test_bit(__IGBVF_DOWN, &adapter->state)) 1779 if (test_bit(__IGBVF_DOWN, &adapter->state))
1780 return false; 1780 return false;
1781 1781
1782 ret_val = hw->mac.ops.check_for_link(hw); 1782 ret_val = hw->mac.ops.check_for_link(hw);
1783 link_active = !hw->mac.get_link_status; 1783 link_active = !hw->mac.get_link_status;
1784 1784
1785 /* if check for link returns error we will need to reset */ 1785 /* if check for link returns error we will need to reset */
1786 if (ret_val && time_after(jiffies, adapter->last_reset + (10 * HZ))) 1786 if (ret_val && time_after(jiffies, adapter->last_reset + (10 * HZ)))
1787 schedule_work(&adapter->reset_task); 1787 schedule_work(&adapter->reset_task);
1788 1788
1789 return link_active; 1789 return link_active;
1790 } 1790 }
1791 1791
1792 /** 1792 /**
1793 * igbvf_watchdog - Timer Call-back 1793 * igbvf_watchdog - Timer Call-back
1794 * @data: pointer to adapter cast into an unsigned long 1794 * @data: pointer to adapter cast into an unsigned long
1795 **/ 1795 **/
1796 static void igbvf_watchdog(unsigned long data) 1796 static void igbvf_watchdog(unsigned long data)
1797 { 1797 {
1798 struct igbvf_adapter *adapter = (struct igbvf_adapter *) data; 1798 struct igbvf_adapter *adapter = (struct igbvf_adapter *) data;
1799 1799
1800 /* Do the rest outside of interrupt context */ 1800 /* Do the rest outside of interrupt context */
1801 schedule_work(&adapter->watchdog_task); 1801 schedule_work(&adapter->watchdog_task);
1802 } 1802 }
1803 1803
1804 static void igbvf_watchdog_task(struct work_struct *work) 1804 static void igbvf_watchdog_task(struct work_struct *work)
1805 { 1805 {
1806 struct igbvf_adapter *adapter = container_of(work, 1806 struct igbvf_adapter *adapter = container_of(work,
1807 struct igbvf_adapter, 1807 struct igbvf_adapter,
1808 watchdog_task); 1808 watchdog_task);
1809 struct net_device *netdev = adapter->netdev; 1809 struct net_device *netdev = adapter->netdev;
1810 struct e1000_mac_info *mac = &adapter->hw.mac; 1810 struct e1000_mac_info *mac = &adapter->hw.mac;
1811 struct igbvf_ring *tx_ring = adapter->tx_ring; 1811 struct igbvf_ring *tx_ring = adapter->tx_ring;
1812 struct e1000_hw *hw = &adapter->hw; 1812 struct e1000_hw *hw = &adapter->hw;
1813 u32 link; 1813 u32 link;
1814 int tx_pending = 0; 1814 int tx_pending = 0;
1815 1815
1816 link = igbvf_has_link(adapter); 1816 link = igbvf_has_link(adapter);
1817 1817
1818 if (link) { 1818 if (link) {
1819 if (!netif_carrier_ok(netdev)) { 1819 if (!netif_carrier_ok(netdev)) {
1820 mac->ops.get_link_up_info(&adapter->hw, 1820 mac->ops.get_link_up_info(&adapter->hw,
1821 &adapter->link_speed, 1821 &adapter->link_speed,
1822 &adapter->link_duplex); 1822 &adapter->link_duplex);
1823 igbvf_print_link_info(adapter); 1823 igbvf_print_link_info(adapter);
1824 1824
1825 netif_carrier_on(netdev); 1825 netif_carrier_on(netdev);
1826 netif_wake_queue(netdev); 1826 netif_wake_queue(netdev);
1827 } 1827 }
1828 } else { 1828 } else {
1829 if (netif_carrier_ok(netdev)) { 1829 if (netif_carrier_ok(netdev)) {
1830 adapter->link_speed = 0; 1830 adapter->link_speed = 0;
1831 adapter->link_duplex = 0; 1831 adapter->link_duplex = 0;
1832 dev_info(&adapter->pdev->dev, "Link is Down\n"); 1832 dev_info(&adapter->pdev->dev, "Link is Down\n");
1833 netif_carrier_off(netdev); 1833 netif_carrier_off(netdev);
1834 netif_stop_queue(netdev); 1834 netif_stop_queue(netdev);
1835 } 1835 }
1836 } 1836 }
1837 1837
1838 if (netif_carrier_ok(netdev)) { 1838 if (netif_carrier_ok(netdev)) {
1839 igbvf_update_stats(adapter); 1839 igbvf_update_stats(adapter);
1840 } else { 1840 } else {
1841 tx_pending = (igbvf_desc_unused(tx_ring) + 1 < 1841 tx_pending = (igbvf_desc_unused(tx_ring) + 1 <
1842 tx_ring->count); 1842 tx_ring->count);
1843 if (tx_pending) { 1843 if (tx_pending) {
1844 /* 1844 /*
1845 * We've lost link, so the controller stops DMA, 1845 * We've lost link, so the controller stops DMA,
1846 * but we've got queued Tx work that's never going 1846 * but we've got queued Tx work that's never going
1847 * to get done, so reset controller to flush Tx. 1847 * to get done, so reset controller to flush Tx.
1848 * (Do the reset outside of interrupt context). 1848 * (Do the reset outside of interrupt context).
1849 */ 1849 */
1850 adapter->tx_timeout_count++; 1850 adapter->tx_timeout_count++;
1851 schedule_work(&adapter->reset_task); 1851 schedule_work(&adapter->reset_task);
1852 } 1852 }
1853 } 1853 }
1854 1854
1855 /* Cause software interrupt to ensure Rx ring is cleaned */ 1855 /* Cause software interrupt to ensure Rx ring is cleaned */
1856 ew32(EICS, adapter->rx_ring->eims_value); 1856 ew32(EICS, adapter->rx_ring->eims_value);
1857 1857
1858 /* Reset the timer */ 1858 /* Reset the timer */
1859 if (!test_bit(__IGBVF_DOWN, &adapter->state)) 1859 if (!test_bit(__IGBVF_DOWN, &adapter->state))
1860 mod_timer(&adapter->watchdog_timer, 1860 mod_timer(&adapter->watchdog_timer,
1861 round_jiffies(jiffies + (2 * HZ))); 1861 round_jiffies(jiffies + (2 * HZ)));
1862 } 1862 }
1863 1863
1864 #define IGBVF_TX_FLAGS_CSUM 0x00000001 1864 #define IGBVF_TX_FLAGS_CSUM 0x00000001
1865 #define IGBVF_TX_FLAGS_VLAN 0x00000002 1865 #define IGBVF_TX_FLAGS_VLAN 0x00000002
1866 #define IGBVF_TX_FLAGS_TSO 0x00000004 1866 #define IGBVF_TX_FLAGS_TSO 0x00000004
1867 #define IGBVF_TX_FLAGS_IPV4 0x00000008 1867 #define IGBVF_TX_FLAGS_IPV4 0x00000008
1868 #define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000 1868 #define IGBVF_TX_FLAGS_VLAN_MASK 0xffff0000
1869 #define IGBVF_TX_FLAGS_VLAN_SHIFT 16 1869 #define IGBVF_TX_FLAGS_VLAN_SHIFT 16
1870 1870
1871 static int igbvf_tso(struct igbvf_adapter *adapter, 1871 static int igbvf_tso(struct igbvf_adapter *adapter,
1872 struct igbvf_ring *tx_ring, 1872 struct igbvf_ring *tx_ring,
1873 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) 1873 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
1874 { 1874 {
1875 struct e1000_adv_tx_context_desc *context_desc; 1875 struct e1000_adv_tx_context_desc *context_desc;
1876 unsigned int i; 1876 unsigned int i;
1877 int err; 1877 int err;
1878 struct igbvf_buffer *buffer_info; 1878 struct igbvf_buffer *buffer_info;
1879 u32 info = 0, tu_cmd = 0; 1879 u32 info = 0, tu_cmd = 0;
1880 u32 mss_l4len_idx, l4len; 1880 u32 mss_l4len_idx, l4len;
1881 *hdr_len = 0; 1881 *hdr_len = 0;
1882 1882
1883 if (skb_header_cloned(skb)) { 1883 if (skb_header_cloned(skb)) {
1884 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 1884 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1885 if (err) { 1885 if (err) {
1886 dev_err(&adapter->pdev->dev, 1886 dev_err(&adapter->pdev->dev,
1887 "igbvf_tso returning an error\n"); 1887 "igbvf_tso returning an error\n");
1888 return err; 1888 return err;
1889 } 1889 }
1890 } 1890 }
1891 1891
1892 l4len = tcp_hdrlen(skb); 1892 l4len = tcp_hdrlen(skb);
1893 *hdr_len += l4len; 1893 *hdr_len += l4len;
1894 1894
1895 if (skb->protocol == htons(ETH_P_IP)) { 1895 if (skb->protocol == htons(ETH_P_IP)) {
1896 struct iphdr *iph = ip_hdr(skb); 1896 struct iphdr *iph = ip_hdr(skb);
1897 iph->tot_len = 0; 1897 iph->tot_len = 0;
1898 iph->check = 0; 1898 iph->check = 0;
1899 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 1899 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1900 iph->daddr, 0, 1900 iph->daddr, 0,
1901 IPPROTO_TCP, 1901 IPPROTO_TCP,
1902 0); 1902 0);
1903 } else if (skb_is_gso_v6(skb)) { 1903 } else if (skb_is_gso_v6(skb)) {
1904 ipv6_hdr(skb)->payload_len = 0; 1904 ipv6_hdr(skb)->payload_len = 0;
1905 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 1905 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1906 &ipv6_hdr(skb)->daddr, 1906 &ipv6_hdr(skb)->daddr,
1907 0, IPPROTO_TCP, 0); 1907 0, IPPROTO_TCP, 0);
1908 } 1908 }
1909 1909
1910 i = tx_ring->next_to_use; 1910 i = tx_ring->next_to_use;
1911 1911
1912 buffer_info = &tx_ring->buffer_info[i]; 1912 buffer_info = &tx_ring->buffer_info[i];
1913 context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i); 1913 context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i);
1914 /* VLAN MACLEN IPLEN */ 1914 /* VLAN MACLEN IPLEN */
1915 if (tx_flags & IGBVF_TX_FLAGS_VLAN) 1915 if (tx_flags & IGBVF_TX_FLAGS_VLAN)
1916 info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK); 1916 info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK);
1917 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); 1917 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
1918 *hdr_len += skb_network_offset(skb); 1918 *hdr_len += skb_network_offset(skb);
1919 info |= (skb_transport_header(skb) - skb_network_header(skb)); 1919 info |= (skb_transport_header(skb) - skb_network_header(skb));
1920 *hdr_len += (skb_transport_header(skb) - skb_network_header(skb)); 1920 *hdr_len += (skb_transport_header(skb) - skb_network_header(skb));
1921 context_desc->vlan_macip_lens = cpu_to_le32(info); 1921 context_desc->vlan_macip_lens = cpu_to_le32(info);
1922 1922
1923 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 1923 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
1924 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); 1924 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
1925 1925
1926 if (skb->protocol == htons(ETH_P_IP)) 1926 if (skb->protocol == htons(ETH_P_IP))
1927 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; 1927 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
1928 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; 1928 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
1929 1929
1930 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); 1930 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
1931 1931
1932 /* MSS L4LEN IDX */ 1932 /* MSS L4LEN IDX */
1933 mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT); 1933 mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
1934 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT); 1934 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
1935 1935
1936 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 1936 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
1937 context_desc->seqnum_seed = 0; 1937 context_desc->seqnum_seed = 0;
1938 1938
1939 buffer_info->time_stamp = jiffies; 1939 buffer_info->time_stamp = jiffies;
1940 buffer_info->next_to_watch = i; 1940 buffer_info->next_to_watch = i;
1941 buffer_info->dma = 0; 1941 buffer_info->dma = 0;
1942 i++; 1942 i++;
1943 if (i == tx_ring->count) 1943 if (i == tx_ring->count)
1944 i = 0; 1944 i = 0;
1945 1945
1946 tx_ring->next_to_use = i; 1946 tx_ring->next_to_use = i;
1947 1947
1948 return true; 1948 return true;
1949 } 1949 }
1950 1950
1951 static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter, 1951 static inline bool igbvf_tx_csum(struct igbvf_adapter *adapter,
1952 struct igbvf_ring *tx_ring, 1952 struct igbvf_ring *tx_ring,
1953 struct sk_buff *skb, u32 tx_flags) 1953 struct sk_buff *skb, u32 tx_flags)
1954 { 1954 {
1955 struct e1000_adv_tx_context_desc *context_desc; 1955 struct e1000_adv_tx_context_desc *context_desc;
1956 unsigned int i; 1956 unsigned int i;
1957 struct igbvf_buffer *buffer_info; 1957 struct igbvf_buffer *buffer_info;
1958 u32 info = 0, tu_cmd = 0; 1958 u32 info = 0, tu_cmd = 0;
1959 1959
1960 if ((skb->ip_summed == CHECKSUM_PARTIAL) || 1960 if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
1961 (tx_flags & IGBVF_TX_FLAGS_VLAN)) { 1961 (tx_flags & IGBVF_TX_FLAGS_VLAN)) {
1962 i = tx_ring->next_to_use; 1962 i = tx_ring->next_to_use;
1963 buffer_info = &tx_ring->buffer_info[i]; 1963 buffer_info = &tx_ring->buffer_info[i];
1964 context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i); 1964 context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i);
1965 1965
1966 if (tx_flags & IGBVF_TX_FLAGS_VLAN) 1966 if (tx_flags & IGBVF_TX_FLAGS_VLAN)
1967 info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK); 1967 info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK);
1968 1968
1969 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); 1969 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
1970 if (skb->ip_summed == CHECKSUM_PARTIAL) 1970 if (skb->ip_summed == CHECKSUM_PARTIAL)
1971 info |= (skb_transport_header(skb) - 1971 info |= (skb_transport_header(skb) -
1972 skb_network_header(skb)); 1972 skb_network_header(skb));
1973 1973
1974 1974
1975 context_desc->vlan_macip_lens = cpu_to_le32(info); 1975 context_desc->vlan_macip_lens = cpu_to_le32(info);
1976 1976
1977 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT); 1977 tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
1978 1978
1979 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1979 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1980 switch (skb->protocol) { 1980 switch (skb->protocol) {
1981 case __constant_htons(ETH_P_IP): 1981 case __constant_htons(ETH_P_IP):
1982 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4; 1982 tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
1983 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 1983 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1984 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; 1984 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
1985 break; 1985 break;
1986 case __constant_htons(ETH_P_IPV6): 1986 case __constant_htons(ETH_P_IPV6):
1987 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 1987 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1988 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP; 1988 tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
1989 break; 1989 break;
1990 default: 1990 default:
1991 break; 1991 break;
1992 } 1992 }
1993 } 1993 }
1994 1994
1995 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); 1995 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
1996 context_desc->seqnum_seed = 0; 1996 context_desc->seqnum_seed = 0;
1997 context_desc->mss_l4len_idx = 0; 1997 context_desc->mss_l4len_idx = 0;
1998 1998
1999 buffer_info->time_stamp = jiffies; 1999 buffer_info->time_stamp = jiffies;
2000 buffer_info->next_to_watch = i; 2000 buffer_info->next_to_watch = i;
2001 buffer_info->dma = 0; 2001 buffer_info->dma = 0;
2002 i++; 2002 i++;
2003 if (i == tx_ring->count) 2003 if (i == tx_ring->count)
2004 i = 0; 2004 i = 0;
2005 tx_ring->next_to_use = i; 2005 tx_ring->next_to_use = i;
2006 2006
2007 return true; 2007 return true;
2008 } 2008 }
2009 2009
2010 return false; 2010 return false;
2011 } 2011 }
2012 2012
2013 static int igbvf_maybe_stop_tx(struct net_device *netdev, int size) 2013 static int igbvf_maybe_stop_tx(struct net_device *netdev, int size)
2014 { 2014 {
2015 struct igbvf_adapter *adapter = netdev_priv(netdev); 2015 struct igbvf_adapter *adapter = netdev_priv(netdev);
2016 2016
2017 /* there is enough descriptors then we don't need to worry */ 2017 /* there is enough descriptors then we don't need to worry */
2018 if (igbvf_desc_unused(adapter->tx_ring) >= size) 2018 if (igbvf_desc_unused(adapter->tx_ring) >= size)
2019 return 0; 2019 return 0;
2020 2020
2021 netif_stop_queue(netdev); 2021 netif_stop_queue(netdev);
2022 2022
2023 smp_mb(); 2023 smp_mb();
2024 2024
2025 /* We need to check again just in case room has been made available */ 2025 /* We need to check again just in case room has been made available */
2026 if (igbvf_desc_unused(adapter->tx_ring) < size) 2026 if (igbvf_desc_unused(adapter->tx_ring) < size)
2027 return -EBUSY; 2027 return -EBUSY;
2028 2028
2029 netif_wake_queue(netdev); 2029 netif_wake_queue(netdev);
2030 2030
2031 ++adapter->restart_queue; 2031 ++adapter->restart_queue;
2032 return 0; 2032 return 0;
2033 } 2033 }
2034 2034
2035 #define IGBVF_MAX_TXD_PWR 16 2035 #define IGBVF_MAX_TXD_PWR 16
2036 #define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR) 2036 #define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR)
2037 2037
2038 static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter, 2038 static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
2039 struct igbvf_ring *tx_ring, 2039 struct igbvf_ring *tx_ring,
2040 struct sk_buff *skb, 2040 struct sk_buff *skb,
2041 unsigned int first) 2041 unsigned int first)
2042 { 2042 {
2043 struct igbvf_buffer *buffer_info; 2043 struct igbvf_buffer *buffer_info;
2044 struct pci_dev *pdev = adapter->pdev; 2044 struct pci_dev *pdev = adapter->pdev;
2045 unsigned int len = skb_headlen(skb); 2045 unsigned int len = skb_headlen(skb);
2046 unsigned int count = 0, i; 2046 unsigned int count = 0, i;
2047 unsigned int f; 2047 unsigned int f;
2048 2048
2049 i = tx_ring->next_to_use; 2049 i = tx_ring->next_to_use;
2050 2050
2051 buffer_info = &tx_ring->buffer_info[i]; 2051 buffer_info = &tx_ring->buffer_info[i];
2052 BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD); 2052 BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD);
2053 buffer_info->length = len; 2053 buffer_info->length = len;
2054 /* set time_stamp *before* dma to help avoid a possible race */ 2054 /* set time_stamp *before* dma to help avoid a possible race */
2055 buffer_info->time_stamp = jiffies; 2055 buffer_info->time_stamp = jiffies;
2056 buffer_info->next_to_watch = i; 2056 buffer_info->next_to_watch = i;
2057 buffer_info->mapped_as_page = false; 2057 buffer_info->mapped_as_page = false;
2058 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len, 2058 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len,
2059 DMA_TO_DEVICE); 2059 DMA_TO_DEVICE);
2060 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) 2060 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2061 goto dma_error; 2061 goto dma_error;
2062 2062
2063 2063
2064 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { 2064 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
2065 const struct skb_frag_struct *frag; 2065 const struct skb_frag_struct *frag;
2066 2066
2067 count++; 2067 count++;
2068 i++; 2068 i++;
2069 if (i == tx_ring->count) 2069 if (i == tx_ring->count)
2070 i = 0; 2070 i = 0;
2071 2071
2072 frag = &skb_shinfo(skb)->frags[f]; 2072 frag = &skb_shinfo(skb)->frags[f];
2073 len = skb_frag_size(frag); 2073 len = skb_frag_size(frag);
2074 2074
2075 buffer_info = &tx_ring->buffer_info[i]; 2075 buffer_info = &tx_ring->buffer_info[i];
2076 BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD); 2076 BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD);
2077 buffer_info->length = len; 2077 buffer_info->length = len;
2078 buffer_info->time_stamp = jiffies; 2078 buffer_info->time_stamp = jiffies;
2079 buffer_info->next_to_watch = i; 2079 buffer_info->next_to_watch = i;
2080 buffer_info->mapped_as_page = true; 2080 buffer_info->mapped_as_page = true;
2081 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len, 2081 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 0, len,
2082 DMA_TO_DEVICE); 2082 DMA_TO_DEVICE);
2083 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) 2083 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2084 goto dma_error; 2084 goto dma_error;
2085 } 2085 }
2086 2086
2087 tx_ring->buffer_info[i].skb = skb; 2087 tx_ring->buffer_info[i].skb = skb;
2088 tx_ring->buffer_info[first].next_to_watch = i; 2088 tx_ring->buffer_info[first].next_to_watch = i;
2089 2089
2090 return ++count; 2090 return ++count;
2091 2091
2092 dma_error: 2092 dma_error:
2093 dev_err(&pdev->dev, "TX DMA map failed\n"); 2093 dev_err(&pdev->dev, "TX DMA map failed\n");
2094 2094
2095 /* clear timestamp and dma mappings for failed buffer_info mapping */ 2095 /* clear timestamp and dma mappings for failed buffer_info mapping */
2096 buffer_info->dma = 0; 2096 buffer_info->dma = 0;
2097 buffer_info->time_stamp = 0; 2097 buffer_info->time_stamp = 0;
2098 buffer_info->length = 0; 2098 buffer_info->length = 0;
2099 buffer_info->next_to_watch = 0; 2099 buffer_info->next_to_watch = 0;
2100 buffer_info->mapped_as_page = false; 2100 buffer_info->mapped_as_page = false;
2101 if (count) 2101 if (count)
2102 count--; 2102 count--;
2103 2103
2104 /* clear timestamp and dma mappings for remaining portion of packet */ 2104 /* clear timestamp and dma mappings for remaining portion of packet */
2105 while (count--) { 2105 while (count--) {
2106 if (i==0) 2106 if (i==0)
2107 i += tx_ring->count; 2107 i += tx_ring->count;
2108 i--; 2108 i--;
2109 buffer_info = &tx_ring->buffer_info[i]; 2109 buffer_info = &tx_ring->buffer_info[i];
2110 igbvf_put_txbuf(adapter, buffer_info); 2110 igbvf_put_txbuf(adapter, buffer_info);
2111 } 2111 }
2112 2112
2113 return 0; 2113 return 0;
2114 } 2114 }
2115 2115
2116 static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter, 2116 static inline void igbvf_tx_queue_adv(struct igbvf_adapter *adapter,
2117 struct igbvf_ring *tx_ring, 2117 struct igbvf_ring *tx_ring,
2118 int tx_flags, int count, u32 paylen, 2118 int tx_flags, int count, u32 paylen,
2119 u8 hdr_len) 2119 u8 hdr_len)
2120 { 2120 {
2121 union e1000_adv_tx_desc *tx_desc = NULL; 2121 union e1000_adv_tx_desc *tx_desc = NULL;
2122 struct igbvf_buffer *buffer_info; 2122 struct igbvf_buffer *buffer_info;
2123 u32 olinfo_status = 0, cmd_type_len; 2123 u32 olinfo_status = 0, cmd_type_len;
2124 unsigned int i; 2124 unsigned int i;
2125 2125
2126 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS | 2126 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
2127 E1000_ADVTXD_DCMD_DEXT); 2127 E1000_ADVTXD_DCMD_DEXT);
2128 2128
2129 if (tx_flags & IGBVF_TX_FLAGS_VLAN) 2129 if (tx_flags & IGBVF_TX_FLAGS_VLAN)
2130 cmd_type_len |= E1000_ADVTXD_DCMD_VLE; 2130 cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
2131 2131
2132 if (tx_flags & IGBVF_TX_FLAGS_TSO) { 2132 if (tx_flags & IGBVF_TX_FLAGS_TSO) {
2133 cmd_type_len |= E1000_ADVTXD_DCMD_TSE; 2133 cmd_type_len |= E1000_ADVTXD_DCMD_TSE;
2134 2134
2135 /* insert tcp checksum */ 2135 /* insert tcp checksum */
2136 olinfo_status |= E1000_TXD_POPTS_TXSM << 8; 2136 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
2137 2137
2138 /* insert ip checksum */ 2138 /* insert ip checksum */
2139 if (tx_flags & IGBVF_TX_FLAGS_IPV4) 2139 if (tx_flags & IGBVF_TX_FLAGS_IPV4)
2140 olinfo_status |= E1000_TXD_POPTS_IXSM << 8; 2140 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
2141 2141
2142 } else if (tx_flags & IGBVF_TX_FLAGS_CSUM) { 2142 } else if (tx_flags & IGBVF_TX_FLAGS_CSUM) {
2143 olinfo_status |= E1000_TXD_POPTS_TXSM << 8; 2143 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
2144 } 2144 }
2145 2145
2146 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT); 2146 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
2147 2147
2148 i = tx_ring->next_to_use; 2148 i = tx_ring->next_to_use;
2149 while (count--) { 2149 while (count--) {
2150 buffer_info = &tx_ring->buffer_info[i]; 2150 buffer_info = &tx_ring->buffer_info[i];
2151 tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i); 2151 tx_desc = IGBVF_TX_DESC_ADV(*tx_ring, i);
2152 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); 2152 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
2153 tx_desc->read.cmd_type_len = 2153 tx_desc->read.cmd_type_len =
2154 cpu_to_le32(cmd_type_len | buffer_info->length); 2154 cpu_to_le32(cmd_type_len | buffer_info->length);
2155 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 2155 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
2156 i++; 2156 i++;
2157 if (i == tx_ring->count) 2157 if (i == tx_ring->count)
2158 i = 0; 2158 i = 0;
2159 } 2159 }
2160 2160
2161 tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd); 2161 tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd);
2162 /* Force memory writes to complete before letting h/w 2162 /* Force memory writes to complete before letting h/w
2163 * know there are new descriptors to fetch. (Only 2163 * know there are new descriptors to fetch. (Only
2164 * applicable for weak-ordered memory model archs, 2164 * applicable for weak-ordered memory model archs,
2165 * such as IA-64). */ 2165 * such as IA-64). */
2166 wmb(); 2166 wmb();
2167 2167
2168 tx_ring->next_to_use = i; 2168 tx_ring->next_to_use = i;
2169 writel(i, adapter->hw.hw_addr + tx_ring->tail); 2169 writel(i, adapter->hw.hw_addr + tx_ring->tail);
2170 /* we need this if more than one processor can write to our tail 2170 /* we need this if more than one processor can write to our tail
2171 * at a time, it syncronizes IO on IA64/Altix systems */ 2171 * at a time, it syncronizes IO on IA64/Altix systems */
2172 mmiowb(); 2172 mmiowb();
2173 } 2173 }
2174 2174
2175 static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb, 2175 static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
2176 struct net_device *netdev, 2176 struct net_device *netdev,
2177 struct igbvf_ring *tx_ring) 2177 struct igbvf_ring *tx_ring)
2178 { 2178 {
2179 struct igbvf_adapter *adapter = netdev_priv(netdev); 2179 struct igbvf_adapter *adapter = netdev_priv(netdev);
2180 unsigned int first, tx_flags = 0; 2180 unsigned int first, tx_flags = 0;
2181 u8 hdr_len = 0; 2181 u8 hdr_len = 0;
2182 int count = 0; 2182 int count = 0;
2183 int tso = 0; 2183 int tso = 0;
2184 2184
2185 if (test_bit(__IGBVF_DOWN, &adapter->state)) { 2185 if (test_bit(__IGBVF_DOWN, &adapter->state)) {
2186 dev_kfree_skb_any(skb); 2186 dev_kfree_skb_any(skb);
2187 return NETDEV_TX_OK; 2187 return NETDEV_TX_OK;
2188 } 2188 }
2189 2189
2190 if (skb->len <= 0) { 2190 if (skb->len <= 0) {
2191 dev_kfree_skb_any(skb); 2191 dev_kfree_skb_any(skb);
2192 return NETDEV_TX_OK; 2192 return NETDEV_TX_OK;
2193 } 2193 }
2194 2194
2195 /* 2195 /*
2196 * need: count + 4 desc gap to keep tail from touching 2196 * need: count + 4 desc gap to keep tail from touching
2197 * + 2 desc gap to keep tail from touching head, 2197 * + 2 desc gap to keep tail from touching head,
2198 * + 1 desc for skb->data, 2198 * + 1 desc for skb->data,
2199 * + 1 desc for context descriptor, 2199 * + 1 desc for context descriptor,
2200 * head, otherwise try next time 2200 * head, otherwise try next time
2201 */ 2201 */
2202 if (igbvf_maybe_stop_tx(netdev, skb_shinfo(skb)->nr_frags + 4)) { 2202 if (igbvf_maybe_stop_tx(netdev, skb_shinfo(skb)->nr_frags + 4)) {
2203 /* this is a hard error */ 2203 /* this is a hard error */
2204 return NETDEV_TX_BUSY; 2204 return NETDEV_TX_BUSY;
2205 } 2205 }
2206 2206
2207 if (vlan_tx_tag_present(skb)) { 2207 if (vlan_tx_tag_present(skb)) {
2208 tx_flags |= IGBVF_TX_FLAGS_VLAN; 2208 tx_flags |= IGBVF_TX_FLAGS_VLAN;
2209 tx_flags |= (vlan_tx_tag_get(skb) << IGBVF_TX_FLAGS_VLAN_SHIFT); 2209 tx_flags |= (vlan_tx_tag_get(skb) << IGBVF_TX_FLAGS_VLAN_SHIFT);
2210 } 2210 }
2211 2211
2212 if (skb->protocol == htons(ETH_P_IP)) 2212 if (skb->protocol == htons(ETH_P_IP))
2213 tx_flags |= IGBVF_TX_FLAGS_IPV4; 2213 tx_flags |= IGBVF_TX_FLAGS_IPV4;
2214 2214
2215 first = tx_ring->next_to_use; 2215 first = tx_ring->next_to_use;
2216 2216
2217 tso = skb_is_gso(skb) ? 2217 tso = skb_is_gso(skb) ?
2218 igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len) : 0; 2218 igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len) : 0;
2219 if (unlikely(tso < 0)) { 2219 if (unlikely(tso < 0)) {
2220 dev_kfree_skb_any(skb); 2220 dev_kfree_skb_any(skb);
2221 return NETDEV_TX_OK; 2221 return NETDEV_TX_OK;
2222 } 2222 }
2223 2223
2224 if (tso) 2224 if (tso)
2225 tx_flags |= IGBVF_TX_FLAGS_TSO; 2225 tx_flags |= IGBVF_TX_FLAGS_TSO;
2226 else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags) && 2226 else if (igbvf_tx_csum(adapter, tx_ring, skb, tx_flags) &&
2227 (skb->ip_summed == CHECKSUM_PARTIAL)) 2227 (skb->ip_summed == CHECKSUM_PARTIAL))
2228 tx_flags |= IGBVF_TX_FLAGS_CSUM; 2228 tx_flags |= IGBVF_TX_FLAGS_CSUM;
2229 2229
2230 /* 2230 /*
2231 * count reflects descriptors mapped, if 0 then mapping error 2231 * count reflects descriptors mapped, if 0 then mapping error
2232 * has occurred and we need to rewind the descriptor queue 2232 * has occurred and we need to rewind the descriptor queue
2233 */ 2233 */
2234 count = igbvf_tx_map_adv(adapter, tx_ring, skb, first); 2234 count = igbvf_tx_map_adv(adapter, tx_ring, skb, first);
2235 2235
2236 if (count) { 2236 if (count) {
2237 igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count, 2237 igbvf_tx_queue_adv(adapter, tx_ring, tx_flags, count,
2238 skb->len, hdr_len); 2238 skb->len, hdr_len);
2239 /* Make sure there is space in the ring for the next send. */ 2239 /* Make sure there is space in the ring for the next send. */
2240 igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4); 2240 igbvf_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 4);
2241 } else { 2241 } else {
2242 dev_kfree_skb_any(skb); 2242 dev_kfree_skb_any(skb);
2243 tx_ring->buffer_info[first].time_stamp = 0; 2243 tx_ring->buffer_info[first].time_stamp = 0;
2244 tx_ring->next_to_use = first; 2244 tx_ring->next_to_use = first;
2245 } 2245 }
2246 2246
2247 return NETDEV_TX_OK; 2247 return NETDEV_TX_OK;
2248 } 2248 }
2249 2249
2250 static netdev_tx_t igbvf_xmit_frame(struct sk_buff *skb, 2250 static netdev_tx_t igbvf_xmit_frame(struct sk_buff *skb,
2251 struct net_device *netdev) 2251 struct net_device *netdev)
2252 { 2252 {
2253 struct igbvf_adapter *adapter = netdev_priv(netdev); 2253 struct igbvf_adapter *adapter = netdev_priv(netdev);
2254 struct igbvf_ring *tx_ring; 2254 struct igbvf_ring *tx_ring;
2255 2255
2256 if (test_bit(__IGBVF_DOWN, &adapter->state)) { 2256 if (test_bit(__IGBVF_DOWN, &adapter->state)) {
2257 dev_kfree_skb_any(skb); 2257 dev_kfree_skb_any(skb);
2258 return NETDEV_TX_OK; 2258 return NETDEV_TX_OK;
2259 } 2259 }
2260 2260
2261 tx_ring = &adapter->tx_ring[0]; 2261 tx_ring = &adapter->tx_ring[0];
2262 2262
2263 return igbvf_xmit_frame_ring_adv(skb, netdev, tx_ring); 2263 return igbvf_xmit_frame_ring_adv(skb, netdev, tx_ring);
2264 } 2264 }
2265 2265
2266 /** 2266 /**
2267 * igbvf_tx_timeout - Respond to a Tx Hang 2267 * igbvf_tx_timeout - Respond to a Tx Hang
2268 * @netdev: network interface device structure 2268 * @netdev: network interface device structure
2269 **/ 2269 **/
2270 static void igbvf_tx_timeout(struct net_device *netdev) 2270 static void igbvf_tx_timeout(struct net_device *netdev)
2271 { 2271 {
2272 struct igbvf_adapter *adapter = netdev_priv(netdev); 2272 struct igbvf_adapter *adapter = netdev_priv(netdev);
2273 2273
2274 /* Do the reset outside of interrupt context */ 2274 /* Do the reset outside of interrupt context */
2275 adapter->tx_timeout_count++; 2275 adapter->tx_timeout_count++;
2276 schedule_work(&adapter->reset_task); 2276 schedule_work(&adapter->reset_task);
2277 } 2277 }
2278 2278
2279 static void igbvf_reset_task(struct work_struct *work) 2279 static void igbvf_reset_task(struct work_struct *work)
2280 { 2280 {
2281 struct igbvf_adapter *adapter; 2281 struct igbvf_adapter *adapter;
2282 adapter = container_of(work, struct igbvf_adapter, reset_task); 2282 adapter = container_of(work, struct igbvf_adapter, reset_task);
2283 2283
2284 igbvf_reinit_locked(adapter); 2284 igbvf_reinit_locked(adapter);
2285 } 2285 }
2286 2286
2287 /** 2287 /**
2288 * igbvf_get_stats - Get System Network Statistics 2288 * igbvf_get_stats - Get System Network Statistics
2289 * @netdev: network interface device structure 2289 * @netdev: network interface device structure
2290 * 2290 *
2291 * Returns the address of the device statistics structure. 2291 * Returns the address of the device statistics structure.
2292 * The statistics are actually updated from the timer callback. 2292 * The statistics are actually updated from the timer callback.
2293 **/ 2293 **/
2294 static struct net_device_stats *igbvf_get_stats(struct net_device *netdev) 2294 static struct net_device_stats *igbvf_get_stats(struct net_device *netdev)
2295 { 2295 {
2296 struct igbvf_adapter *adapter = netdev_priv(netdev); 2296 struct igbvf_adapter *adapter = netdev_priv(netdev);
2297 2297
2298 /* only return the current stats */ 2298 /* only return the current stats */
2299 return &adapter->net_stats; 2299 return &adapter->net_stats;
2300 } 2300 }
2301 2301
2302 /** 2302 /**
2303 * igbvf_change_mtu - Change the Maximum Transfer Unit 2303 * igbvf_change_mtu - Change the Maximum Transfer Unit
2304 * @netdev: network interface device structure 2304 * @netdev: network interface device structure
2305 * @new_mtu: new value for maximum frame size 2305 * @new_mtu: new value for maximum frame size
2306 * 2306 *
2307 * Returns 0 on success, negative on failure 2307 * Returns 0 on success, negative on failure
2308 **/ 2308 **/
2309 static int igbvf_change_mtu(struct net_device *netdev, int new_mtu) 2309 static int igbvf_change_mtu(struct net_device *netdev, int new_mtu)
2310 { 2310 {
2311 struct igbvf_adapter *adapter = netdev_priv(netdev); 2311 struct igbvf_adapter *adapter = netdev_priv(netdev);
2312 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 2312 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2313 2313
2314 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) { 2314 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
2315 dev_err(&adapter->pdev->dev, "Invalid MTU setting\n"); 2315 dev_err(&adapter->pdev->dev, "Invalid MTU setting\n");
2316 return -EINVAL; 2316 return -EINVAL;
2317 } 2317 }
2318 2318
2319 #define MAX_STD_JUMBO_FRAME_SIZE 9234 2319 #define MAX_STD_JUMBO_FRAME_SIZE 9234
2320 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { 2320 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
2321 dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n"); 2321 dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n");
2322 return -EINVAL; 2322 return -EINVAL;
2323 } 2323 }
2324 2324
2325 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state)) 2325 while (test_and_set_bit(__IGBVF_RESETTING, &adapter->state))
2326 msleep(1); 2326 msleep(1);
2327 /* igbvf_down has a dependency on max_frame_size */ 2327 /* igbvf_down has a dependency on max_frame_size */
2328 adapter->max_frame_size = max_frame; 2328 adapter->max_frame_size = max_frame;
2329 if (netif_running(netdev)) 2329 if (netif_running(netdev))
2330 igbvf_down(adapter); 2330 igbvf_down(adapter);
2331 2331
2332 /* 2332 /*
2333 * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 2333 * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
2334 * means we reserve 2 more, this pushes us to allocate from the next 2334 * means we reserve 2 more, this pushes us to allocate from the next
2335 * larger slab size. 2335 * larger slab size.
2336 * i.e. RXBUFFER_2048 --> size-4096 slab 2336 * i.e. RXBUFFER_2048 --> size-4096 slab
2337 * However with the new *_jumbo_rx* routines, jumbo receives will use 2337 * However with the new *_jumbo_rx* routines, jumbo receives will use
2338 * fragmented skbs 2338 * fragmented skbs
2339 */ 2339 */
2340 2340
2341 if (max_frame <= 1024) 2341 if (max_frame <= 1024)
2342 adapter->rx_buffer_len = 1024; 2342 adapter->rx_buffer_len = 1024;
2343 else if (max_frame <= 2048) 2343 else if (max_frame <= 2048)
2344 adapter->rx_buffer_len = 2048; 2344 adapter->rx_buffer_len = 2048;
2345 else 2345 else
2346 #if (PAGE_SIZE / 2) > 16384 2346 #if (PAGE_SIZE / 2) > 16384
2347 adapter->rx_buffer_len = 16384; 2347 adapter->rx_buffer_len = 16384;
2348 #else 2348 #else
2349 adapter->rx_buffer_len = PAGE_SIZE / 2; 2349 adapter->rx_buffer_len = PAGE_SIZE / 2;
2350 #endif 2350 #endif
2351 2351
2352 2352
2353 /* adjust allocation if LPE protects us, and we aren't using SBP */ 2353 /* adjust allocation if LPE protects us, and we aren't using SBP */
2354 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || 2354 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
2355 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) 2355 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
2356 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + 2356 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN +
2357 ETH_FCS_LEN; 2357 ETH_FCS_LEN;
2358 2358
2359 dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n", 2359 dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n",
2360 netdev->mtu, new_mtu); 2360 netdev->mtu, new_mtu);
2361 netdev->mtu = new_mtu; 2361 netdev->mtu = new_mtu;
2362 2362
2363 if (netif_running(netdev)) 2363 if (netif_running(netdev))
2364 igbvf_up(adapter); 2364 igbvf_up(adapter);
2365 else 2365 else
2366 igbvf_reset(adapter); 2366 igbvf_reset(adapter);
2367 2367
2368 clear_bit(__IGBVF_RESETTING, &adapter->state); 2368 clear_bit(__IGBVF_RESETTING, &adapter->state);
2369 2369
2370 return 0; 2370 return 0;
2371 } 2371 }
2372 2372
2373 static int igbvf_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 2373 static int igbvf_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2374 { 2374 {
2375 switch (cmd) { 2375 switch (cmd) {
2376 default: 2376 default:
2377 return -EOPNOTSUPP; 2377 return -EOPNOTSUPP;
2378 } 2378 }
2379 } 2379 }
2380 2380
2381 static int igbvf_suspend(struct pci_dev *pdev, pm_message_t state) 2381 static int igbvf_suspend(struct pci_dev *pdev, pm_message_t state)
2382 { 2382 {
2383 struct net_device *netdev = pci_get_drvdata(pdev); 2383 struct net_device *netdev = pci_get_drvdata(pdev);
2384 struct igbvf_adapter *adapter = netdev_priv(netdev); 2384 struct igbvf_adapter *adapter = netdev_priv(netdev);
2385 #ifdef CONFIG_PM 2385 #ifdef CONFIG_PM
2386 int retval = 0; 2386 int retval = 0;
2387 #endif 2387 #endif
2388 2388
2389 netif_device_detach(netdev); 2389 netif_device_detach(netdev);
2390 2390
2391 if (netif_running(netdev)) { 2391 if (netif_running(netdev)) {
2392 WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state)); 2392 WARN_ON(test_bit(__IGBVF_RESETTING, &adapter->state));
2393 igbvf_down(adapter); 2393 igbvf_down(adapter);
2394 igbvf_free_irq(adapter); 2394 igbvf_free_irq(adapter);
2395 } 2395 }
2396 2396
2397 #ifdef CONFIG_PM 2397 #ifdef CONFIG_PM
2398 retval = pci_save_state(pdev); 2398 retval = pci_save_state(pdev);
2399 if (retval) 2399 if (retval)
2400 return retval; 2400 return retval;
2401 #endif 2401 #endif
2402 2402
2403 pci_disable_device(pdev); 2403 pci_disable_device(pdev);
2404 2404
2405 return 0; 2405 return 0;
2406 } 2406 }
2407 2407
2408 #ifdef CONFIG_PM 2408 #ifdef CONFIG_PM
2409 static int igbvf_resume(struct pci_dev *pdev) 2409 static int igbvf_resume(struct pci_dev *pdev)
2410 { 2410 {
2411 struct net_device *netdev = pci_get_drvdata(pdev); 2411 struct net_device *netdev = pci_get_drvdata(pdev);
2412 struct igbvf_adapter *adapter = netdev_priv(netdev); 2412 struct igbvf_adapter *adapter = netdev_priv(netdev);
2413 u32 err; 2413 u32 err;
2414 2414
2415 pci_restore_state(pdev); 2415 pci_restore_state(pdev);
2416 err = pci_enable_device_mem(pdev); 2416 err = pci_enable_device_mem(pdev);
2417 if (err) { 2417 if (err) {
2418 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n"); 2418 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
2419 return err; 2419 return err;
2420 } 2420 }
2421 2421
2422 pci_set_master(pdev); 2422 pci_set_master(pdev);
2423 2423
2424 if (netif_running(netdev)) { 2424 if (netif_running(netdev)) {
2425 err = igbvf_request_irq(adapter); 2425 err = igbvf_request_irq(adapter);
2426 if (err) 2426 if (err)
2427 return err; 2427 return err;
2428 } 2428 }
2429 2429
2430 igbvf_reset(adapter); 2430 igbvf_reset(adapter);
2431 2431
2432 if (netif_running(netdev)) 2432 if (netif_running(netdev))
2433 igbvf_up(adapter); 2433 igbvf_up(adapter);
2434 2434
2435 netif_device_attach(netdev); 2435 netif_device_attach(netdev);
2436 2436
2437 return 0; 2437 return 0;
2438 } 2438 }
2439 #endif 2439 #endif
2440 2440
2441 static void igbvf_shutdown(struct pci_dev *pdev) 2441 static void igbvf_shutdown(struct pci_dev *pdev)
2442 { 2442 {
2443 igbvf_suspend(pdev, PMSG_SUSPEND); 2443 igbvf_suspend(pdev, PMSG_SUSPEND);
2444 } 2444 }
2445 2445
2446 #ifdef CONFIG_NET_POLL_CONTROLLER 2446 #ifdef CONFIG_NET_POLL_CONTROLLER
2447 /* 2447 /*
2448 * Polling 'interrupt' - used by things like netconsole to send skbs 2448 * Polling 'interrupt' - used by things like netconsole to send skbs
2449 * without having to re-enable interrupts. It's not called while 2449 * without having to re-enable interrupts. It's not called while
2450 * the interrupt routine is executing. 2450 * the interrupt routine is executing.
2451 */ 2451 */
2452 static void igbvf_netpoll(struct net_device *netdev) 2452 static void igbvf_netpoll(struct net_device *netdev)
2453 { 2453 {
2454 struct igbvf_adapter *adapter = netdev_priv(netdev); 2454 struct igbvf_adapter *adapter = netdev_priv(netdev);
2455 2455
2456 disable_irq(adapter->pdev->irq); 2456 disable_irq(adapter->pdev->irq);
2457 2457
2458 igbvf_clean_tx_irq(adapter->tx_ring); 2458 igbvf_clean_tx_irq(adapter->tx_ring);
2459 2459
2460 enable_irq(adapter->pdev->irq); 2460 enable_irq(adapter->pdev->irq);
2461 } 2461 }
2462 #endif 2462 #endif
2463 2463
2464 /** 2464 /**
2465 * igbvf_io_error_detected - called when PCI error is detected 2465 * igbvf_io_error_detected - called when PCI error is detected
2466 * @pdev: Pointer to PCI device 2466 * @pdev: Pointer to PCI device
2467 * @state: The current pci connection state 2467 * @state: The current pci connection state
2468 * 2468 *
2469 * This function is called after a PCI bus error affecting 2469 * This function is called after a PCI bus error affecting
2470 * this device has been detected. 2470 * this device has been detected.
2471 */ 2471 */
2472 static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev, 2472 static pci_ers_result_t igbvf_io_error_detected(struct pci_dev *pdev,
2473 pci_channel_state_t state) 2473 pci_channel_state_t state)
2474 { 2474 {
2475 struct net_device *netdev = pci_get_drvdata(pdev); 2475 struct net_device *netdev = pci_get_drvdata(pdev);
2476 struct igbvf_adapter *adapter = netdev_priv(netdev); 2476 struct igbvf_adapter *adapter = netdev_priv(netdev);
2477 2477
2478 netif_device_detach(netdev); 2478 netif_device_detach(netdev);
2479 2479
2480 if (state == pci_channel_io_perm_failure) 2480 if (state == pci_channel_io_perm_failure)
2481 return PCI_ERS_RESULT_DISCONNECT; 2481 return PCI_ERS_RESULT_DISCONNECT;
2482 2482
2483 if (netif_running(netdev)) 2483 if (netif_running(netdev))
2484 igbvf_down(adapter); 2484 igbvf_down(adapter);
2485 pci_disable_device(pdev); 2485 pci_disable_device(pdev);
2486 2486
2487 /* Request a slot slot reset. */ 2487 /* Request a slot slot reset. */
2488 return PCI_ERS_RESULT_NEED_RESET; 2488 return PCI_ERS_RESULT_NEED_RESET;
2489 } 2489 }
2490 2490
2491 /** 2491 /**
2492 * igbvf_io_slot_reset - called after the pci bus has been reset. 2492 * igbvf_io_slot_reset - called after the pci bus has been reset.
2493 * @pdev: Pointer to PCI device 2493 * @pdev: Pointer to PCI device
2494 * 2494 *
2495 * Restart the card from scratch, as if from a cold-boot. Implementation 2495 * Restart the card from scratch, as if from a cold-boot. Implementation
2496 * resembles the first-half of the igbvf_resume routine. 2496 * resembles the first-half of the igbvf_resume routine.
2497 */ 2497 */
2498 static pci_ers_result_t igbvf_io_slot_reset(struct pci_dev *pdev) 2498 static pci_ers_result_t igbvf_io_slot_reset(struct pci_dev *pdev)
2499 { 2499 {
2500 struct net_device *netdev = pci_get_drvdata(pdev); 2500 struct net_device *netdev = pci_get_drvdata(pdev);
2501 struct igbvf_adapter *adapter = netdev_priv(netdev); 2501 struct igbvf_adapter *adapter = netdev_priv(netdev);
2502 2502
2503 if (pci_enable_device_mem(pdev)) { 2503 if (pci_enable_device_mem(pdev)) {
2504 dev_err(&pdev->dev, 2504 dev_err(&pdev->dev,
2505 "Cannot re-enable PCI device after reset.\n"); 2505 "Cannot re-enable PCI device after reset.\n");
2506 return PCI_ERS_RESULT_DISCONNECT; 2506 return PCI_ERS_RESULT_DISCONNECT;
2507 } 2507 }
2508 pci_set_master(pdev); 2508 pci_set_master(pdev);
2509 2509
2510 igbvf_reset(adapter); 2510 igbvf_reset(adapter);
2511 2511
2512 return PCI_ERS_RESULT_RECOVERED; 2512 return PCI_ERS_RESULT_RECOVERED;
2513 } 2513 }
2514 2514
2515 /** 2515 /**
2516 * igbvf_io_resume - called when traffic can start flowing again. 2516 * igbvf_io_resume - called when traffic can start flowing again.
2517 * @pdev: Pointer to PCI device 2517 * @pdev: Pointer to PCI device
2518 * 2518 *
2519 * This callback is called when the error recovery driver tells us that 2519 * This callback is called when the error recovery driver tells us that
2520 * its OK to resume normal operation. Implementation resembles the 2520 * its OK to resume normal operation. Implementation resembles the
2521 * second-half of the igbvf_resume routine. 2521 * second-half of the igbvf_resume routine.
2522 */ 2522 */
2523 static void igbvf_io_resume(struct pci_dev *pdev) 2523 static void igbvf_io_resume(struct pci_dev *pdev)
2524 { 2524 {
2525 struct net_device *netdev = pci_get_drvdata(pdev); 2525 struct net_device *netdev = pci_get_drvdata(pdev);
2526 struct igbvf_adapter *adapter = netdev_priv(netdev); 2526 struct igbvf_adapter *adapter = netdev_priv(netdev);
2527 2527
2528 if (netif_running(netdev)) { 2528 if (netif_running(netdev)) {
2529 if (igbvf_up(adapter)) { 2529 if (igbvf_up(adapter)) {
2530 dev_err(&pdev->dev, 2530 dev_err(&pdev->dev,
2531 "can't bring device back up after reset\n"); 2531 "can't bring device back up after reset\n");
2532 return; 2532 return;
2533 } 2533 }
2534 } 2534 }
2535 2535
2536 netif_device_attach(netdev); 2536 netif_device_attach(netdev);
2537 } 2537 }
2538 2538
2539 static void igbvf_print_device_info(struct igbvf_adapter *adapter) 2539 static void igbvf_print_device_info(struct igbvf_adapter *adapter)
2540 { 2540 {
2541 struct e1000_hw *hw = &adapter->hw; 2541 struct e1000_hw *hw = &adapter->hw;
2542 struct net_device *netdev = adapter->netdev; 2542 struct net_device *netdev = adapter->netdev;
2543 struct pci_dev *pdev = adapter->pdev; 2543 struct pci_dev *pdev = adapter->pdev;
2544 2544
2545 if (hw->mac.type == e1000_vfadapt_i350) 2545 if (hw->mac.type == e1000_vfadapt_i350)
2546 dev_info(&pdev->dev, "Intel(R) I350 Virtual Function\n"); 2546 dev_info(&pdev->dev, "Intel(R) I350 Virtual Function\n");
2547 else 2547 else
2548 dev_info(&pdev->dev, "Intel(R) 82576 Virtual Function\n"); 2548 dev_info(&pdev->dev, "Intel(R) 82576 Virtual Function\n");
2549 dev_info(&pdev->dev, "Address: %pM\n", netdev->dev_addr); 2549 dev_info(&pdev->dev, "Address: %pM\n", netdev->dev_addr);
2550 } 2550 }
2551 2551
2552 static int igbvf_set_features(struct net_device *netdev, 2552 static int igbvf_set_features(struct net_device *netdev,
2553 netdev_features_t features) 2553 netdev_features_t features)
2554 { 2554 {
2555 struct igbvf_adapter *adapter = netdev_priv(netdev); 2555 struct igbvf_adapter *adapter = netdev_priv(netdev);
2556 2556
2557 if (features & NETIF_F_RXCSUM) 2557 if (features & NETIF_F_RXCSUM)
2558 adapter->flags &= ~IGBVF_FLAG_RX_CSUM_DISABLED; 2558 adapter->flags &= ~IGBVF_FLAG_RX_CSUM_DISABLED;
2559 else 2559 else
2560 adapter->flags |= IGBVF_FLAG_RX_CSUM_DISABLED; 2560 adapter->flags |= IGBVF_FLAG_RX_CSUM_DISABLED;
2561 2561
2562 return 0; 2562 return 0;
2563 } 2563 }
2564 2564
2565 static const struct net_device_ops igbvf_netdev_ops = { 2565 static const struct net_device_ops igbvf_netdev_ops = {
2566 .ndo_open = igbvf_open, 2566 .ndo_open = igbvf_open,
2567 .ndo_stop = igbvf_close, 2567 .ndo_stop = igbvf_close,
2568 .ndo_start_xmit = igbvf_xmit_frame, 2568 .ndo_start_xmit = igbvf_xmit_frame,
2569 .ndo_get_stats = igbvf_get_stats, 2569 .ndo_get_stats = igbvf_get_stats,
2570 .ndo_set_rx_mode = igbvf_set_multi, 2570 .ndo_set_rx_mode = igbvf_set_multi,
2571 .ndo_set_mac_address = igbvf_set_mac, 2571 .ndo_set_mac_address = igbvf_set_mac,
2572 .ndo_change_mtu = igbvf_change_mtu, 2572 .ndo_change_mtu = igbvf_change_mtu,
2573 .ndo_do_ioctl = igbvf_ioctl, 2573 .ndo_do_ioctl = igbvf_ioctl,
2574 .ndo_tx_timeout = igbvf_tx_timeout, 2574 .ndo_tx_timeout = igbvf_tx_timeout,
2575 .ndo_vlan_rx_add_vid = igbvf_vlan_rx_add_vid, 2575 .ndo_vlan_rx_add_vid = igbvf_vlan_rx_add_vid,
2576 .ndo_vlan_rx_kill_vid = igbvf_vlan_rx_kill_vid, 2576 .ndo_vlan_rx_kill_vid = igbvf_vlan_rx_kill_vid,
2577 #ifdef CONFIG_NET_POLL_CONTROLLER 2577 #ifdef CONFIG_NET_POLL_CONTROLLER
2578 .ndo_poll_controller = igbvf_netpoll, 2578 .ndo_poll_controller = igbvf_netpoll,
2579 #endif 2579 #endif
2580 .ndo_set_features = igbvf_set_features, 2580 .ndo_set_features = igbvf_set_features,
2581 }; 2581 };
2582 2582
2583 /** 2583 /**
2584 * igbvf_probe - Device Initialization Routine 2584 * igbvf_probe - Device Initialization Routine
2585 * @pdev: PCI device information struct 2585 * @pdev: PCI device information struct
2586 * @ent: entry in igbvf_pci_tbl 2586 * @ent: entry in igbvf_pci_tbl
2587 * 2587 *
2588 * Returns 0 on success, negative on failure 2588 * Returns 0 on success, negative on failure
2589 * 2589 *
2590 * igbvf_probe initializes an adapter identified by a pci_dev structure. 2590 * igbvf_probe initializes an adapter identified by a pci_dev structure.
2591 * The OS initialization, configuring of the adapter private structure, 2591 * The OS initialization, configuring of the adapter private structure,
2592 * and a hardware reset occur. 2592 * and a hardware reset occur.
2593 **/ 2593 **/
2594 static int __devinit igbvf_probe(struct pci_dev *pdev, 2594 static int __devinit igbvf_probe(struct pci_dev *pdev,
2595 const struct pci_device_id *ent) 2595 const struct pci_device_id *ent)
2596 { 2596 {
2597 struct net_device *netdev; 2597 struct net_device *netdev;
2598 struct igbvf_adapter *adapter; 2598 struct igbvf_adapter *adapter;
2599 struct e1000_hw *hw; 2599 struct e1000_hw *hw;
2600 const struct igbvf_info *ei = igbvf_info_tbl[ent->driver_data]; 2600 const struct igbvf_info *ei = igbvf_info_tbl[ent->driver_data];
2601 2601
2602 static int cards_found; 2602 static int cards_found;
2603 int err, pci_using_dac; 2603 int err, pci_using_dac;
2604 2604
2605 err = pci_enable_device_mem(pdev); 2605 err = pci_enable_device_mem(pdev);
2606 if (err) 2606 if (err)
2607 return err; 2607 return err;
2608 2608
2609 pci_using_dac = 0; 2609 pci_using_dac = 0;
2610 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); 2610 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
2611 if (!err) { 2611 if (!err) {
2612 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 2612 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
2613 if (!err) 2613 if (!err)
2614 pci_using_dac = 1; 2614 pci_using_dac = 1;
2615 } else { 2615 } else {
2616 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 2616 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
2617 if (err) { 2617 if (err) {
2618 err = dma_set_coherent_mask(&pdev->dev, 2618 err = dma_set_coherent_mask(&pdev->dev,
2619 DMA_BIT_MASK(32)); 2619 DMA_BIT_MASK(32));
2620 if (err) { 2620 if (err) {
2621 dev_err(&pdev->dev, "No usable DMA " 2621 dev_err(&pdev->dev, "No usable DMA "
2622 "configuration, aborting\n"); 2622 "configuration, aborting\n");
2623 goto err_dma; 2623 goto err_dma;
2624 } 2624 }
2625 } 2625 }
2626 } 2626 }
2627 2627
2628 err = pci_request_regions(pdev, igbvf_driver_name); 2628 err = pci_request_regions(pdev, igbvf_driver_name);
2629 if (err) 2629 if (err)
2630 goto err_pci_reg; 2630 goto err_pci_reg;
2631 2631
2632 pci_set_master(pdev); 2632 pci_set_master(pdev);
2633 2633
2634 err = -ENOMEM; 2634 err = -ENOMEM;
2635 netdev = alloc_etherdev(sizeof(struct igbvf_adapter)); 2635 netdev = alloc_etherdev(sizeof(struct igbvf_adapter));
2636 if (!netdev) 2636 if (!netdev)
2637 goto err_alloc_etherdev; 2637 goto err_alloc_etherdev;
2638 2638
2639 SET_NETDEV_DEV(netdev, &pdev->dev); 2639 SET_NETDEV_DEV(netdev, &pdev->dev);
2640 2640
2641 pci_set_drvdata(pdev, netdev); 2641 pci_set_drvdata(pdev, netdev);
2642 adapter = netdev_priv(netdev); 2642 adapter = netdev_priv(netdev);
2643 hw = &adapter->hw; 2643 hw = &adapter->hw;
2644 adapter->netdev = netdev; 2644 adapter->netdev = netdev;
2645 adapter->pdev = pdev; 2645 adapter->pdev = pdev;
2646 adapter->ei = ei; 2646 adapter->ei = ei;
2647 adapter->pba = ei->pba; 2647 adapter->pba = ei->pba;
2648 adapter->flags = ei->flags; 2648 adapter->flags = ei->flags;
2649 adapter->hw.back = adapter; 2649 adapter->hw.back = adapter;
2650 adapter->hw.mac.type = ei->mac; 2650 adapter->hw.mac.type = ei->mac;
2651 adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1; 2651 adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1;
2652 2652
2653 /* PCI config space info */ 2653 /* PCI config space info */
2654 2654
2655 hw->vendor_id = pdev->vendor; 2655 hw->vendor_id = pdev->vendor;
2656 hw->device_id = pdev->device; 2656 hw->device_id = pdev->device;
2657 hw->subsystem_vendor_id = pdev->subsystem_vendor; 2657 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2658 hw->subsystem_device_id = pdev->subsystem_device; 2658 hw->subsystem_device_id = pdev->subsystem_device;
2659 hw->revision_id = pdev->revision; 2659 hw->revision_id = pdev->revision;
2660 2660
2661 err = -EIO; 2661 err = -EIO;
2662 adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0), 2662 adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0),
2663 pci_resource_len(pdev, 0)); 2663 pci_resource_len(pdev, 0));
2664 2664
2665 if (!adapter->hw.hw_addr) 2665 if (!adapter->hw.hw_addr)
2666 goto err_ioremap; 2666 goto err_ioremap;
2667 2667
2668 if (ei->get_variants) { 2668 if (ei->get_variants) {
2669 err = ei->get_variants(adapter); 2669 err = ei->get_variants(adapter);
2670 if (err) 2670 if (err)
2671 goto err_ioremap; 2671 goto err_ioremap;
2672 } 2672 }
2673 2673
2674 /* setup adapter struct */ 2674 /* setup adapter struct */
2675 err = igbvf_sw_init(adapter); 2675 err = igbvf_sw_init(adapter);
2676 if (err) 2676 if (err)
2677 goto err_sw_init; 2677 goto err_sw_init;
2678 2678
2679 /* construct the net_device struct */ 2679 /* construct the net_device struct */
2680 netdev->netdev_ops = &igbvf_netdev_ops; 2680 netdev->netdev_ops = &igbvf_netdev_ops;
2681 2681
2682 igbvf_set_ethtool_ops(netdev); 2682 igbvf_set_ethtool_ops(netdev);
2683 netdev->watchdog_timeo = 5 * HZ; 2683 netdev->watchdog_timeo = 5 * HZ;
2684 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); 2684 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
2685 2685
2686 adapter->bd_number = cards_found++; 2686 adapter->bd_number = cards_found++;
2687 2687
2688 netdev->hw_features = NETIF_F_SG | 2688 netdev->hw_features = NETIF_F_SG |
2689 NETIF_F_IP_CSUM | 2689 NETIF_F_IP_CSUM |
2690 NETIF_F_IPV6_CSUM | 2690 NETIF_F_IPV6_CSUM |
2691 NETIF_F_TSO | 2691 NETIF_F_TSO |
2692 NETIF_F_TSO6 | 2692 NETIF_F_TSO6 |
2693 NETIF_F_RXCSUM; 2693 NETIF_F_RXCSUM;
2694 2694
2695 netdev->features = netdev->hw_features | 2695 netdev->features = netdev->hw_features |
2696 NETIF_F_HW_VLAN_TX | 2696 NETIF_F_HW_VLAN_TX |
2697 NETIF_F_HW_VLAN_RX | 2697 NETIF_F_HW_VLAN_RX |
2698 NETIF_F_HW_VLAN_FILTER; 2698 NETIF_F_HW_VLAN_FILTER;
2699 2699
2700 if (pci_using_dac) 2700 if (pci_using_dac)
2701 netdev->features |= NETIF_F_HIGHDMA; 2701 netdev->features |= NETIF_F_HIGHDMA;
2702 2702
2703 netdev->vlan_features |= NETIF_F_TSO; 2703 netdev->vlan_features |= NETIF_F_TSO;
2704 netdev->vlan_features |= NETIF_F_TSO6; 2704 netdev->vlan_features |= NETIF_F_TSO6;
2705 netdev->vlan_features |= NETIF_F_IP_CSUM; 2705 netdev->vlan_features |= NETIF_F_IP_CSUM;
2706 netdev->vlan_features |= NETIF_F_IPV6_CSUM; 2706 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
2707 netdev->vlan_features |= NETIF_F_SG; 2707 netdev->vlan_features |= NETIF_F_SG;
2708 2708
2709 /*reset the controller to put the device in a known good state */ 2709 /*reset the controller to put the device in a known good state */
2710 err = hw->mac.ops.reset_hw(hw); 2710 err = hw->mac.ops.reset_hw(hw);
2711 if (err) { 2711 if (err) {
2712 dev_info(&pdev->dev, 2712 dev_info(&pdev->dev,
2713 "PF still in reset state, assigning new address." 2713 "PF still in reset state, assigning new address."
2714 " Is the PF interface up?\n"); 2714 " Is the PF interface up?\n");
2715 dev_hw_addr_random(adapter->netdev, hw->mac.addr); 2715 eth_hw_addr_random(netdev);
2716 memcpy(adapter->hw.mac.addr, netdev->dev_addr,
2717 netdev->addr_len);
2716 } else { 2718 } else {
2717 err = hw->mac.ops.read_mac_addr(hw); 2719 err = hw->mac.ops.read_mac_addr(hw);
2718 if (err) { 2720 if (err) {
2719 dev_err(&pdev->dev, "Error reading MAC address\n"); 2721 dev_err(&pdev->dev, "Error reading MAC address\n");
2720 goto err_hw_init; 2722 goto err_hw_init;
2721 } 2723 }
2724 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
2725 netdev->addr_len);
2722 } 2726 }
2723 2727
2724 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
2725 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
2726
2727 if (!is_valid_ether_addr(netdev->perm_addr)) { 2728 if (!is_valid_ether_addr(netdev->perm_addr)) {
2728 dev_err(&pdev->dev, "Invalid MAC Address: %pM\n", 2729 dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
2729 netdev->dev_addr); 2730 netdev->dev_addr);
2730 err = -EIO; 2731 err = -EIO;
2731 goto err_hw_init; 2732 goto err_hw_init;
2732 } 2733 }
2734
2735 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
2733 2736
2734 setup_timer(&adapter->watchdog_timer, &igbvf_watchdog, 2737 setup_timer(&adapter->watchdog_timer, &igbvf_watchdog,
2735 (unsigned long) adapter); 2738 (unsigned long) adapter);
2736 2739
2737 INIT_WORK(&adapter->reset_task, igbvf_reset_task); 2740 INIT_WORK(&adapter->reset_task, igbvf_reset_task);
2738 INIT_WORK(&adapter->watchdog_task, igbvf_watchdog_task); 2741 INIT_WORK(&adapter->watchdog_task, igbvf_watchdog_task);
2739 2742
2740 /* ring size defaults */ 2743 /* ring size defaults */
2741 adapter->rx_ring->count = 1024; 2744 adapter->rx_ring->count = 1024;
2742 adapter->tx_ring->count = 1024; 2745 adapter->tx_ring->count = 1024;
2743 2746
2744 /* reset the hardware with the new settings */ 2747 /* reset the hardware with the new settings */
2745 igbvf_reset(adapter); 2748 igbvf_reset(adapter);
2746 2749
2747 strcpy(netdev->name, "eth%d"); 2750 strcpy(netdev->name, "eth%d");
2748 err = register_netdev(netdev); 2751 err = register_netdev(netdev);
2749 if (err) 2752 if (err)
2750 goto err_hw_init; 2753 goto err_hw_init;
2751 2754
2752 /* tell the stack to leave us alone until igbvf_open() is called */ 2755 /* tell the stack to leave us alone until igbvf_open() is called */
2753 netif_carrier_off(netdev); 2756 netif_carrier_off(netdev);
2754 netif_stop_queue(netdev); 2757 netif_stop_queue(netdev);
2755 2758
2756 igbvf_print_device_info(adapter); 2759 igbvf_print_device_info(adapter);
2757 2760
2758 igbvf_initialize_last_counter_stats(adapter); 2761 igbvf_initialize_last_counter_stats(adapter);
2759 2762
2760 return 0; 2763 return 0;
2761 2764
2762 err_hw_init: 2765 err_hw_init:
2763 kfree(adapter->tx_ring); 2766 kfree(adapter->tx_ring);
2764 kfree(adapter->rx_ring); 2767 kfree(adapter->rx_ring);
2765 err_sw_init: 2768 err_sw_init:
2766 igbvf_reset_interrupt_capability(adapter); 2769 igbvf_reset_interrupt_capability(adapter);
2767 iounmap(adapter->hw.hw_addr); 2770 iounmap(adapter->hw.hw_addr);
2768 err_ioremap: 2771 err_ioremap:
2769 free_netdev(netdev); 2772 free_netdev(netdev);
2770 err_alloc_etherdev: 2773 err_alloc_etherdev:
2771 pci_release_regions(pdev); 2774 pci_release_regions(pdev);
2772 err_pci_reg: 2775 err_pci_reg:
2773 err_dma: 2776 err_dma:
2774 pci_disable_device(pdev); 2777 pci_disable_device(pdev);
2775 return err; 2778 return err;
2776 } 2779 }
2777 2780
2778 /** 2781 /**
2779 * igbvf_remove - Device Removal Routine 2782 * igbvf_remove - Device Removal Routine
2780 * @pdev: PCI device information struct 2783 * @pdev: PCI device information struct
2781 * 2784 *
2782 * igbvf_remove is called by the PCI subsystem to alert the driver 2785 * igbvf_remove is called by the PCI subsystem to alert the driver
2783 * that it should release a PCI device. The could be caused by a 2786 * that it should release a PCI device. The could be caused by a
2784 * Hot-Plug event, or because the driver is going to be removed from 2787 * Hot-Plug event, or because the driver is going to be removed from
2785 * memory. 2788 * memory.
2786 **/ 2789 **/
2787 static void __devexit igbvf_remove(struct pci_dev *pdev) 2790 static void __devexit igbvf_remove(struct pci_dev *pdev)
2788 { 2791 {
2789 struct net_device *netdev = pci_get_drvdata(pdev); 2792 struct net_device *netdev = pci_get_drvdata(pdev);
2790 struct igbvf_adapter *adapter = netdev_priv(netdev); 2793 struct igbvf_adapter *adapter = netdev_priv(netdev);
2791 struct e1000_hw *hw = &adapter->hw; 2794 struct e1000_hw *hw = &adapter->hw;
2792 2795
2793 /* 2796 /*
2794 * The watchdog timer may be rescheduled, so explicitly 2797 * The watchdog timer may be rescheduled, so explicitly
2795 * disable it from being rescheduled. 2798 * disable it from being rescheduled.
2796 */ 2799 */
2797 set_bit(__IGBVF_DOWN, &adapter->state); 2800 set_bit(__IGBVF_DOWN, &adapter->state);
2798 del_timer_sync(&adapter->watchdog_timer); 2801 del_timer_sync(&adapter->watchdog_timer);
2799 2802
2800 cancel_work_sync(&adapter->reset_task); 2803 cancel_work_sync(&adapter->reset_task);
2801 cancel_work_sync(&adapter->watchdog_task); 2804 cancel_work_sync(&adapter->watchdog_task);
2802 2805
2803 unregister_netdev(netdev); 2806 unregister_netdev(netdev);
2804 2807
2805 igbvf_reset_interrupt_capability(adapter); 2808 igbvf_reset_interrupt_capability(adapter);
2806 2809
2807 /* 2810 /*
2808 * it is important to delete the napi struct prior to freeing the 2811 * it is important to delete the napi struct prior to freeing the
2809 * rx ring so that you do not end up with null pointer refs 2812 * rx ring so that you do not end up with null pointer refs
2810 */ 2813 */
2811 netif_napi_del(&adapter->rx_ring->napi); 2814 netif_napi_del(&adapter->rx_ring->napi);
2812 kfree(adapter->tx_ring); 2815 kfree(adapter->tx_ring);
2813 kfree(adapter->rx_ring); 2816 kfree(adapter->rx_ring);
2814 2817
2815 iounmap(hw->hw_addr); 2818 iounmap(hw->hw_addr);
2816 if (hw->flash_address) 2819 if (hw->flash_address)
2817 iounmap(hw->flash_address); 2820 iounmap(hw->flash_address);
2818 pci_release_regions(pdev); 2821 pci_release_regions(pdev);
2819 2822
2820 free_netdev(netdev); 2823 free_netdev(netdev);
2821 2824
2822 pci_disable_device(pdev); 2825 pci_disable_device(pdev);
2823 } 2826 }
2824 2827
2825 /* PCI Error Recovery (ERS) */ 2828 /* PCI Error Recovery (ERS) */
2826 static struct pci_error_handlers igbvf_err_handler = { 2829 static struct pci_error_handlers igbvf_err_handler = {
2827 .error_detected = igbvf_io_error_detected, 2830 .error_detected = igbvf_io_error_detected,
2828 .slot_reset = igbvf_io_slot_reset, 2831 .slot_reset = igbvf_io_slot_reset,
2829 .resume = igbvf_io_resume, 2832 .resume = igbvf_io_resume,
2830 }; 2833 };
2831 2834
2832 static DEFINE_PCI_DEVICE_TABLE(igbvf_pci_tbl) = { 2835 static DEFINE_PCI_DEVICE_TABLE(igbvf_pci_tbl) = {
2833 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_VF), board_vf }, 2836 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_VF), board_vf },
2834 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_VF), board_i350_vf }, 2837 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_VF), board_i350_vf },
2835 { } /* terminate list */ 2838 { } /* terminate list */
2836 }; 2839 };
2837 MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl); 2840 MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl);
2838 2841
2839 /* PCI Device API Driver */ 2842 /* PCI Device API Driver */
2840 static struct pci_driver igbvf_driver = { 2843 static struct pci_driver igbvf_driver = {
2841 .name = igbvf_driver_name, 2844 .name = igbvf_driver_name,
2842 .id_table = igbvf_pci_tbl, 2845 .id_table = igbvf_pci_tbl,
2843 .probe = igbvf_probe, 2846 .probe = igbvf_probe,
2844 .remove = __devexit_p(igbvf_remove), 2847 .remove = __devexit_p(igbvf_remove),
2845 #ifdef CONFIG_PM 2848 #ifdef CONFIG_PM
2846 /* Power Management Hooks */ 2849 /* Power Management Hooks */
2847 .suspend = igbvf_suspend, 2850 .suspend = igbvf_suspend,
2848 .resume = igbvf_resume, 2851 .resume = igbvf_resume,
2849 #endif 2852 #endif
2850 .shutdown = igbvf_shutdown, 2853 .shutdown = igbvf_shutdown,
2851 .err_handler = &igbvf_err_handler 2854 .err_handler = &igbvf_err_handler
2852 }; 2855 };
2853 2856
2854 /** 2857 /**
2855 * igbvf_init_module - Driver Registration Routine 2858 * igbvf_init_module - Driver Registration Routine
2856 * 2859 *
2857 * igbvf_init_module is the first routine called when the driver is 2860 * igbvf_init_module is the first routine called when the driver is
2858 * loaded. All it does is register with the PCI subsystem. 2861 * loaded. All it does is register with the PCI subsystem.
2859 **/ 2862 **/
2860 static int __init igbvf_init_module(void) 2863 static int __init igbvf_init_module(void)
2861 { 2864 {
2862 int ret; 2865 int ret;
2863 pr_info("%s - version %s\n", igbvf_driver_string, igbvf_driver_version); 2866 pr_info("%s - version %s\n", igbvf_driver_string, igbvf_driver_version);
2864 pr_info("%s\n", igbvf_copyright); 2867 pr_info("%s\n", igbvf_copyright);
2865 2868
2866 ret = pci_register_driver(&igbvf_driver); 2869 ret = pci_register_driver(&igbvf_driver);
2867 2870
2868 return ret; 2871 return ret;
2869 } 2872 }
2870 module_init(igbvf_init_module); 2873 module_init(igbvf_init_module);
2871 2874
2872 /** 2875 /**
2873 * igbvf_exit_module - Driver Exit Cleanup Routine 2876 * igbvf_exit_module - Driver Exit Cleanup Routine
2874 * 2877 *
2875 * igbvf_exit_module is called just before the driver is removed 2878 * igbvf_exit_module is called just before the driver is removed
2876 * from memory. 2879 * from memory.
2877 **/ 2880 **/
2878 static void __exit igbvf_exit_module(void) 2881 static void __exit igbvf_exit_module(void)
2879 { 2882 {
2880 pci_unregister_driver(&igbvf_driver); 2883 pci_unregister_driver(&igbvf_driver);
2881 } 2884 }
2882 module_exit(igbvf_exit_module); 2885 module_exit(igbvf_exit_module);
2883 2886
2884 2887
2885 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); 2888 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
2886 MODULE_DESCRIPTION("Intel(R) Gigabit Virtual Function Network Driver"); 2889 MODULE_DESCRIPTION("Intel(R) Gigabit Virtual Function Network Driver");
2887 MODULE_LICENSE("GPL"); 2890 MODULE_LICENSE("GPL");
2888 MODULE_VERSION(DRV_VERSION); 2891 MODULE_VERSION(DRV_VERSION);
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
1 /******************************************************************************* 1 /*******************************************************************************
2 2
3 Intel 82599 Virtual Function driver 3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2012 Intel Corporation. 4 Copyright(c) 1999 - 2012 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation. 8 version 2, as published by the Free Software Foundation.
9 9
10 This program is distributed in the hope it will be useful, but WITHOUT 10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details. 13 more details.
14 14
15 You should have received a copy of the GNU General Public License along with 15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc., 16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 18
19 The full GNU General Public License is included in this distribution in 19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING". 20 the file called "COPYING".
21 21
22 Contact Information: 22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 25
26 *******************************************************************************/ 26 *******************************************************************************/
27 27
28 28
29 /****************************************************************************** 29 /******************************************************************************
30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code 30 Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
31 ******************************************************************************/ 31 ******************************************************************************/
32 32
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34 34
35 #include <linux/types.h> 35 #include <linux/types.h>
36 #include <linux/bitops.h> 36 #include <linux/bitops.h>
37 #include <linux/module.h> 37 #include <linux/module.h>
38 #include <linux/pci.h> 38 #include <linux/pci.h>
39 #include <linux/netdevice.h> 39 #include <linux/netdevice.h>
40 #include <linux/vmalloc.h> 40 #include <linux/vmalloc.h>
41 #include <linux/string.h> 41 #include <linux/string.h>
42 #include <linux/in.h> 42 #include <linux/in.h>
43 #include <linux/ip.h> 43 #include <linux/ip.h>
44 #include <linux/tcp.h> 44 #include <linux/tcp.h>
45 #include <linux/ipv6.h> 45 #include <linux/ipv6.h>
46 #include <linux/slab.h> 46 #include <linux/slab.h>
47 #include <net/checksum.h> 47 #include <net/checksum.h>
48 #include <net/ip6_checksum.h> 48 #include <net/ip6_checksum.h>
49 #include <linux/ethtool.h> 49 #include <linux/ethtool.h>
50 #include <linux/if.h> 50 #include <linux/if.h>
51 #include <linux/if_vlan.h> 51 #include <linux/if_vlan.h>
52 #include <linux/prefetch.h> 52 #include <linux/prefetch.h>
53 53
54 #include "ixgbevf.h" 54 #include "ixgbevf.h"
55 55
56 const char ixgbevf_driver_name[] = "ixgbevf"; 56 const char ixgbevf_driver_name[] = "ixgbevf";
57 static const char ixgbevf_driver_string[] = 57 static const char ixgbevf_driver_string[] =
58 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; 58 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
59 59
60 #define DRV_VERSION "2.2.0-k" 60 #define DRV_VERSION "2.2.0-k"
61 const char ixgbevf_driver_version[] = DRV_VERSION; 61 const char ixgbevf_driver_version[] = DRV_VERSION;
62 static char ixgbevf_copyright[] = 62 static char ixgbevf_copyright[] =
63 "Copyright (c) 2009 - 2012 Intel Corporation."; 63 "Copyright (c) 2009 - 2012 Intel Corporation.";
64 64
65 static const struct ixgbevf_info *ixgbevf_info_tbl[] = { 65 static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
66 [board_82599_vf] = &ixgbevf_82599_vf_info, 66 [board_82599_vf] = &ixgbevf_82599_vf_info,
67 [board_X540_vf] = &ixgbevf_X540_vf_info, 67 [board_X540_vf] = &ixgbevf_X540_vf_info,
68 }; 68 };
69 69
70 /* ixgbevf_pci_tbl - PCI Device ID Table 70 /* ixgbevf_pci_tbl - PCI Device ID Table
71 * 71 *
72 * Wildcard entries (PCI_ANY_ID) should come last 72 * Wildcard entries (PCI_ANY_ID) should come last
73 * Last entry must be all 0s 73 * Last entry must be all 0s
74 * 74 *
75 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 75 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
76 * Class, Class Mask, private data (not used) } 76 * Class, Class Mask, private data (not used) }
77 */ 77 */
78 static struct pci_device_id ixgbevf_pci_tbl[] = { 78 static struct pci_device_id ixgbevf_pci_tbl[] = {
79 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), 79 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF),
80 board_82599_vf}, 80 board_82599_vf},
81 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), 81 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF),
82 board_X540_vf}, 82 board_X540_vf},
83 83
84 /* required last entry */ 84 /* required last entry */
85 {0, } 85 {0, }
86 }; 86 };
87 MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl); 87 MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
88 88
89 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 89 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
90 MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver"); 90 MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
91 MODULE_LICENSE("GPL"); 91 MODULE_LICENSE("GPL");
92 MODULE_VERSION(DRV_VERSION); 92 MODULE_VERSION(DRV_VERSION);
93 93
94 #define DEFAULT_DEBUG_LEVEL_SHIFT 3 94 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
95 95
96 /* forward decls */ 96 /* forward decls */
97 static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector); 97 static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector);
98 static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx, 98 static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
99 u32 itr_reg); 99 u32 itr_reg);
100 100
101 static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw, 101 static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
102 struct ixgbevf_ring *rx_ring, 102 struct ixgbevf_ring *rx_ring,
103 u32 val) 103 u32 val)
104 { 104 {
105 /* 105 /*
106 * Force memory writes to complete before letting h/w 106 * Force memory writes to complete before letting h/w
107 * know there are new descriptors to fetch. (Only 107 * know there are new descriptors to fetch. (Only
108 * applicable for weak-ordered memory model archs, 108 * applicable for weak-ordered memory model archs,
109 * such as IA-64). 109 * such as IA-64).
110 */ 110 */
111 wmb(); 111 wmb();
112 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val); 112 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
113 } 113 }
114 114
115 /* 115 /*
116 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors 116 * ixgbevf_set_ivar - set IVAR registers - maps interrupt causes to vectors
117 * @adapter: pointer to adapter struct 117 * @adapter: pointer to adapter struct
118 * @direction: 0 for Rx, 1 for Tx, -1 for other causes 118 * @direction: 0 for Rx, 1 for Tx, -1 for other causes
119 * @queue: queue to map the corresponding interrupt to 119 * @queue: queue to map the corresponding interrupt to
120 * @msix_vector: the vector to map to the corresponding queue 120 * @msix_vector: the vector to map to the corresponding queue
121 * 121 *
122 */ 122 */
123 static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction, 123 static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
124 u8 queue, u8 msix_vector) 124 u8 queue, u8 msix_vector)
125 { 125 {
126 u32 ivar, index; 126 u32 ivar, index;
127 struct ixgbe_hw *hw = &adapter->hw; 127 struct ixgbe_hw *hw = &adapter->hw;
128 if (direction == -1) { 128 if (direction == -1) {
129 /* other causes */ 129 /* other causes */
130 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 130 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
131 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC); 131 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
132 ivar &= ~0xFF; 132 ivar &= ~0xFF;
133 ivar |= msix_vector; 133 ivar |= msix_vector;
134 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar); 134 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
135 } else { 135 } else {
136 /* tx or rx causes */ 136 /* tx or rx causes */
137 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 137 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
138 index = ((16 * (queue & 1)) + (8 * direction)); 138 index = ((16 * (queue & 1)) + (8 * direction));
139 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1)); 139 ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
140 ivar &= ~(0xFF << index); 140 ivar &= ~(0xFF << index);
141 ivar |= (msix_vector << index); 141 ivar |= (msix_vector << index);
142 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar); 142 IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
143 } 143 }
144 } 144 }
145 145
146 static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter, 146 static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter,
147 struct ixgbevf_tx_buffer 147 struct ixgbevf_tx_buffer
148 *tx_buffer_info) 148 *tx_buffer_info)
149 { 149 {
150 if (tx_buffer_info->dma) { 150 if (tx_buffer_info->dma) {
151 if (tx_buffer_info->mapped_as_page) 151 if (tx_buffer_info->mapped_as_page)
152 dma_unmap_page(&adapter->pdev->dev, 152 dma_unmap_page(&adapter->pdev->dev,
153 tx_buffer_info->dma, 153 tx_buffer_info->dma,
154 tx_buffer_info->length, 154 tx_buffer_info->length,
155 DMA_TO_DEVICE); 155 DMA_TO_DEVICE);
156 else 156 else
157 dma_unmap_single(&adapter->pdev->dev, 157 dma_unmap_single(&adapter->pdev->dev,
158 tx_buffer_info->dma, 158 tx_buffer_info->dma,
159 tx_buffer_info->length, 159 tx_buffer_info->length,
160 DMA_TO_DEVICE); 160 DMA_TO_DEVICE);
161 tx_buffer_info->dma = 0; 161 tx_buffer_info->dma = 0;
162 } 162 }
163 if (tx_buffer_info->skb) { 163 if (tx_buffer_info->skb) {
164 dev_kfree_skb_any(tx_buffer_info->skb); 164 dev_kfree_skb_any(tx_buffer_info->skb);
165 tx_buffer_info->skb = NULL; 165 tx_buffer_info->skb = NULL;
166 } 166 }
167 tx_buffer_info->time_stamp = 0; 167 tx_buffer_info->time_stamp = 0;
168 /* tx_buffer_info must be completely set up in the transmit path */ 168 /* tx_buffer_info must be completely set up in the transmit path */
169 } 169 }
170 170
171 #define IXGBE_MAX_TXD_PWR 14 171 #define IXGBE_MAX_TXD_PWR 14
172 #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) 172 #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
173 173
174 /* Tx Descriptors needed, worst case */ 174 /* Tx Descriptors needed, worst case */
175 #define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \ 175 #define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
176 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0)) 176 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
177 #ifdef MAX_SKB_FRAGS 177 #ifdef MAX_SKB_FRAGS
178 #define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \ 178 #define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
179 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */ 179 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
180 #else 180 #else
181 #define DESC_NEEDED TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) 181 #define DESC_NEEDED TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD)
182 #endif 182 #endif
183 183
184 static void ixgbevf_tx_timeout(struct net_device *netdev); 184 static void ixgbevf_tx_timeout(struct net_device *netdev);
185 185
186 /** 186 /**
187 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes 187 * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
188 * @adapter: board private structure 188 * @adapter: board private structure
189 * @tx_ring: tx ring to clean 189 * @tx_ring: tx ring to clean
190 **/ 190 **/
191 static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter, 191 static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
192 struct ixgbevf_ring *tx_ring) 192 struct ixgbevf_ring *tx_ring)
193 { 193 {
194 struct net_device *netdev = adapter->netdev; 194 struct net_device *netdev = adapter->netdev;
195 struct ixgbe_hw *hw = &adapter->hw; 195 struct ixgbe_hw *hw = &adapter->hw;
196 union ixgbe_adv_tx_desc *tx_desc, *eop_desc; 196 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
197 struct ixgbevf_tx_buffer *tx_buffer_info; 197 struct ixgbevf_tx_buffer *tx_buffer_info;
198 unsigned int i, eop, count = 0; 198 unsigned int i, eop, count = 0;
199 unsigned int total_bytes = 0, total_packets = 0; 199 unsigned int total_bytes = 0, total_packets = 0;
200 200
201 i = tx_ring->next_to_clean; 201 i = tx_ring->next_to_clean;
202 eop = tx_ring->tx_buffer_info[i].next_to_watch; 202 eop = tx_ring->tx_buffer_info[i].next_to_watch;
203 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); 203 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
204 204
205 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && 205 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
206 (count < tx_ring->work_limit)) { 206 (count < tx_ring->work_limit)) {
207 bool cleaned = false; 207 bool cleaned = false;
208 rmb(); /* read buffer_info after eop_desc */ 208 rmb(); /* read buffer_info after eop_desc */
209 /* eop could change between read and DD-check */ 209 /* eop could change between read and DD-check */
210 if (unlikely(eop != tx_ring->tx_buffer_info[i].next_to_watch)) 210 if (unlikely(eop != tx_ring->tx_buffer_info[i].next_to_watch))
211 goto cont_loop; 211 goto cont_loop;
212 for ( ; !cleaned; count++) { 212 for ( ; !cleaned; count++) {
213 struct sk_buff *skb; 213 struct sk_buff *skb;
214 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); 214 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
215 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 215 tx_buffer_info = &tx_ring->tx_buffer_info[i];
216 cleaned = (i == eop); 216 cleaned = (i == eop);
217 skb = tx_buffer_info->skb; 217 skb = tx_buffer_info->skb;
218 218
219 if (cleaned && skb) { 219 if (cleaned && skb) {
220 unsigned int segs, bytecount; 220 unsigned int segs, bytecount;
221 221
222 /* gso_segs is currently only valid for tcp */ 222 /* gso_segs is currently only valid for tcp */
223 segs = skb_shinfo(skb)->gso_segs ?: 1; 223 segs = skb_shinfo(skb)->gso_segs ?: 1;
224 /* multiply data chunks by size of headers */ 224 /* multiply data chunks by size of headers */
225 bytecount = ((segs - 1) * skb_headlen(skb)) + 225 bytecount = ((segs - 1) * skb_headlen(skb)) +
226 skb->len; 226 skb->len;
227 total_packets += segs; 227 total_packets += segs;
228 total_bytes += bytecount; 228 total_bytes += bytecount;
229 } 229 }
230 230
231 ixgbevf_unmap_and_free_tx_resource(adapter, 231 ixgbevf_unmap_and_free_tx_resource(adapter,
232 tx_buffer_info); 232 tx_buffer_info);
233 233
234 tx_desc->wb.status = 0; 234 tx_desc->wb.status = 0;
235 235
236 i++; 236 i++;
237 if (i == tx_ring->count) 237 if (i == tx_ring->count)
238 i = 0; 238 i = 0;
239 } 239 }
240 240
241 cont_loop: 241 cont_loop:
242 eop = tx_ring->tx_buffer_info[i].next_to_watch; 242 eop = tx_ring->tx_buffer_info[i].next_to_watch;
243 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); 243 eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
244 } 244 }
245 245
246 tx_ring->next_to_clean = i; 246 tx_ring->next_to_clean = i;
247 247
248 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 248 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
249 if (unlikely(count && netif_carrier_ok(netdev) && 249 if (unlikely(count && netif_carrier_ok(netdev) &&
250 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 250 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
251 /* Make sure that anybody stopping the queue after this 251 /* Make sure that anybody stopping the queue after this
252 * sees the new next_to_clean. 252 * sees the new next_to_clean.
253 */ 253 */
254 smp_mb(); 254 smp_mb();
255 #ifdef HAVE_TX_MQ 255 #ifdef HAVE_TX_MQ
256 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && 256 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
257 !test_bit(__IXGBEVF_DOWN, &adapter->state)) { 257 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
258 netif_wake_subqueue(netdev, tx_ring->queue_index); 258 netif_wake_subqueue(netdev, tx_ring->queue_index);
259 ++adapter->restart_queue; 259 ++adapter->restart_queue;
260 } 260 }
261 #else 261 #else
262 if (netif_queue_stopped(netdev) && 262 if (netif_queue_stopped(netdev) &&
263 !test_bit(__IXGBEVF_DOWN, &adapter->state)) { 263 !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
264 netif_wake_queue(netdev); 264 netif_wake_queue(netdev);
265 ++adapter->restart_queue; 265 ++adapter->restart_queue;
266 } 266 }
267 #endif 267 #endif
268 } 268 }
269 269
270 /* re-arm the interrupt */ 270 /* re-arm the interrupt */
271 if ((count >= tx_ring->work_limit) && 271 if ((count >= tx_ring->work_limit) &&
272 (!test_bit(__IXGBEVF_DOWN, &adapter->state))) { 272 (!test_bit(__IXGBEVF_DOWN, &adapter->state))) {
273 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, tx_ring->v_idx); 273 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, tx_ring->v_idx);
274 } 274 }
275 275
276 u64_stats_update_begin(&tx_ring->syncp); 276 u64_stats_update_begin(&tx_ring->syncp);
277 tx_ring->total_bytes += total_bytes; 277 tx_ring->total_bytes += total_bytes;
278 tx_ring->total_packets += total_packets; 278 tx_ring->total_packets += total_packets;
279 u64_stats_update_end(&tx_ring->syncp); 279 u64_stats_update_end(&tx_ring->syncp);
280 280
281 return count < tx_ring->work_limit; 281 return count < tx_ring->work_limit;
282 } 282 }
283 283
284 /** 284 /**
285 * ixgbevf_receive_skb - Send a completed packet up the stack 285 * ixgbevf_receive_skb - Send a completed packet up the stack
286 * @q_vector: structure containing interrupt and ring information 286 * @q_vector: structure containing interrupt and ring information
287 * @skb: packet to send up 287 * @skb: packet to send up
288 * @status: hardware indication of status of receive 288 * @status: hardware indication of status of receive
289 * @rx_ring: rx descriptor ring (for a specific queue) to setup 289 * @rx_ring: rx descriptor ring (for a specific queue) to setup
290 * @rx_desc: rx descriptor 290 * @rx_desc: rx descriptor
291 **/ 291 **/
292 static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector, 292 static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
293 struct sk_buff *skb, u8 status, 293 struct sk_buff *skb, u8 status,
294 struct ixgbevf_ring *ring, 294 struct ixgbevf_ring *ring,
295 union ixgbe_adv_rx_desc *rx_desc) 295 union ixgbe_adv_rx_desc *rx_desc)
296 { 296 {
297 struct ixgbevf_adapter *adapter = q_vector->adapter; 297 struct ixgbevf_adapter *adapter = q_vector->adapter;
298 bool is_vlan = (status & IXGBE_RXD_STAT_VP); 298 bool is_vlan = (status & IXGBE_RXD_STAT_VP);
299 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan); 299 u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
300 300
301 if (is_vlan && test_bit(tag, adapter->active_vlans)) 301 if (is_vlan && test_bit(tag, adapter->active_vlans))
302 __vlan_hwaccel_put_tag(skb, tag); 302 __vlan_hwaccel_put_tag(skb, tag);
303 303
304 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) 304 if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
305 napi_gro_receive(&q_vector->napi, skb); 305 napi_gro_receive(&q_vector->napi, skb);
306 else 306 else
307 netif_rx(skb); 307 netif_rx(skb);
308 } 308 }
309 309
310 /** 310 /**
311 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum 311 * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
312 * @adapter: address of board private structure 312 * @adapter: address of board private structure
313 * @status_err: hardware indication of status of receive 313 * @status_err: hardware indication of status of receive
314 * @skb: skb currently being received and modified 314 * @skb: skb currently being received and modified
315 **/ 315 **/
316 static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter, 316 static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
317 u32 status_err, struct sk_buff *skb) 317 u32 status_err, struct sk_buff *skb)
318 { 318 {
319 skb_checksum_none_assert(skb); 319 skb_checksum_none_assert(skb);
320 320
321 /* Rx csum disabled */ 321 /* Rx csum disabled */
322 if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED)) 322 if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
323 return; 323 return;
324 324
325 /* if IP and error */ 325 /* if IP and error */
326 if ((status_err & IXGBE_RXD_STAT_IPCS) && 326 if ((status_err & IXGBE_RXD_STAT_IPCS) &&
327 (status_err & IXGBE_RXDADV_ERR_IPE)) { 327 (status_err & IXGBE_RXDADV_ERR_IPE)) {
328 adapter->hw_csum_rx_error++; 328 adapter->hw_csum_rx_error++;
329 return; 329 return;
330 } 330 }
331 331
332 if (!(status_err & IXGBE_RXD_STAT_L4CS)) 332 if (!(status_err & IXGBE_RXD_STAT_L4CS))
333 return; 333 return;
334 334
335 if (status_err & IXGBE_RXDADV_ERR_TCPE) { 335 if (status_err & IXGBE_RXDADV_ERR_TCPE) {
336 adapter->hw_csum_rx_error++; 336 adapter->hw_csum_rx_error++;
337 return; 337 return;
338 } 338 }
339 339
340 /* It must be a TCP or UDP packet with a valid checksum */ 340 /* It must be a TCP or UDP packet with a valid checksum */
341 skb->ip_summed = CHECKSUM_UNNECESSARY; 341 skb->ip_summed = CHECKSUM_UNNECESSARY;
342 adapter->hw_csum_rx_good++; 342 adapter->hw_csum_rx_good++;
343 } 343 }
344 344
345 /** 345 /**
346 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split 346 * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
347 * @adapter: address of board private structure 347 * @adapter: address of board private structure
348 **/ 348 **/
349 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter, 349 static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
350 struct ixgbevf_ring *rx_ring, 350 struct ixgbevf_ring *rx_ring,
351 int cleaned_count) 351 int cleaned_count)
352 { 352 {
353 struct pci_dev *pdev = adapter->pdev; 353 struct pci_dev *pdev = adapter->pdev;
354 union ixgbe_adv_rx_desc *rx_desc; 354 union ixgbe_adv_rx_desc *rx_desc;
355 struct ixgbevf_rx_buffer *bi; 355 struct ixgbevf_rx_buffer *bi;
356 struct sk_buff *skb; 356 struct sk_buff *skb;
357 unsigned int i; 357 unsigned int i;
358 unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN; 358 unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
359 359
360 i = rx_ring->next_to_use; 360 i = rx_ring->next_to_use;
361 bi = &rx_ring->rx_buffer_info[i]; 361 bi = &rx_ring->rx_buffer_info[i];
362 362
363 while (cleaned_count--) { 363 while (cleaned_count--) {
364 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); 364 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
365 365
366 if (!bi->page_dma && 366 if (!bi->page_dma &&
367 (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) { 367 (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
368 if (!bi->page) { 368 if (!bi->page) {
369 bi->page = alloc_page(GFP_ATOMIC | __GFP_COLD); 369 bi->page = alloc_page(GFP_ATOMIC | __GFP_COLD);
370 if (!bi->page) { 370 if (!bi->page) {
371 adapter->alloc_rx_page_failed++; 371 adapter->alloc_rx_page_failed++;
372 goto no_buffers; 372 goto no_buffers;
373 } 373 }
374 bi->page_offset = 0; 374 bi->page_offset = 0;
375 } else { 375 } else {
376 /* use a half page if we're re-using */ 376 /* use a half page if we're re-using */
377 bi->page_offset ^= (PAGE_SIZE / 2); 377 bi->page_offset ^= (PAGE_SIZE / 2);
378 } 378 }
379 379
380 bi->page_dma = dma_map_page(&pdev->dev, bi->page, 380 bi->page_dma = dma_map_page(&pdev->dev, bi->page,
381 bi->page_offset, 381 bi->page_offset,
382 (PAGE_SIZE / 2), 382 (PAGE_SIZE / 2),
383 DMA_FROM_DEVICE); 383 DMA_FROM_DEVICE);
384 } 384 }
385 385
386 skb = bi->skb; 386 skb = bi->skb;
387 if (!skb) { 387 if (!skb) {
388 skb = netdev_alloc_skb(adapter->netdev, 388 skb = netdev_alloc_skb(adapter->netdev,
389 bufsz); 389 bufsz);
390 390
391 if (!skb) { 391 if (!skb) {
392 adapter->alloc_rx_buff_failed++; 392 adapter->alloc_rx_buff_failed++;
393 goto no_buffers; 393 goto no_buffers;
394 } 394 }
395 395
396 /* 396 /*
397 * Make buffer alignment 2 beyond a 16 byte boundary 397 * Make buffer alignment 2 beyond a 16 byte boundary
398 * this will result in a 16 byte aligned IP header after 398 * this will result in a 16 byte aligned IP header after
399 * the 14 byte MAC header is removed 399 * the 14 byte MAC header is removed
400 */ 400 */
401 skb_reserve(skb, NET_IP_ALIGN); 401 skb_reserve(skb, NET_IP_ALIGN);
402 402
403 bi->skb = skb; 403 bi->skb = skb;
404 } 404 }
405 if (!bi->dma) { 405 if (!bi->dma) {
406 bi->dma = dma_map_single(&pdev->dev, skb->data, 406 bi->dma = dma_map_single(&pdev->dev, skb->data,
407 rx_ring->rx_buf_len, 407 rx_ring->rx_buf_len,
408 DMA_FROM_DEVICE); 408 DMA_FROM_DEVICE);
409 } 409 }
410 /* Refresh the desc even if buffer_addrs didn't change because 410 /* Refresh the desc even if buffer_addrs didn't change because
411 * each write-back erases this info. */ 411 * each write-back erases this info. */
412 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 412 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
413 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); 413 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
414 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); 414 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
415 } else { 415 } else {
416 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); 416 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
417 } 417 }
418 418
419 i++; 419 i++;
420 if (i == rx_ring->count) 420 if (i == rx_ring->count)
421 i = 0; 421 i = 0;
422 bi = &rx_ring->rx_buffer_info[i]; 422 bi = &rx_ring->rx_buffer_info[i];
423 } 423 }
424 424
425 no_buffers: 425 no_buffers:
426 if (rx_ring->next_to_use != i) { 426 if (rx_ring->next_to_use != i) {
427 rx_ring->next_to_use = i; 427 rx_ring->next_to_use = i;
428 if (i-- == 0) 428 if (i-- == 0)
429 i = (rx_ring->count - 1); 429 i = (rx_ring->count - 1);
430 430
431 ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i); 431 ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
432 } 432 }
433 } 433 }
434 434
435 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter, 435 static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
436 u64 qmask) 436 u64 qmask)
437 { 437 {
438 u32 mask; 438 u32 mask;
439 struct ixgbe_hw *hw = &adapter->hw; 439 struct ixgbe_hw *hw = &adapter->hw;
440 440
441 mask = (qmask & 0xFFFFFFFF); 441 mask = (qmask & 0xFFFFFFFF);
442 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); 442 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
443 } 443 }
444 444
445 static inline u16 ixgbevf_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc) 445 static inline u16 ixgbevf_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
446 { 446 {
447 return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info; 447 return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
448 } 448 }
449 449
450 static inline u16 ixgbevf_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc) 450 static inline u16 ixgbevf_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
451 { 451 {
452 return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; 452 return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
453 } 453 }
454 454
455 static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, 455 static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
456 struct ixgbevf_ring *rx_ring, 456 struct ixgbevf_ring *rx_ring,
457 int *work_done, int work_to_do) 457 int *work_done, int work_to_do)
458 { 458 {
459 struct ixgbevf_adapter *adapter = q_vector->adapter; 459 struct ixgbevf_adapter *adapter = q_vector->adapter;
460 struct pci_dev *pdev = adapter->pdev; 460 struct pci_dev *pdev = adapter->pdev;
461 union ixgbe_adv_rx_desc *rx_desc, *next_rxd; 461 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
462 struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer; 462 struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
463 struct sk_buff *skb; 463 struct sk_buff *skb;
464 unsigned int i; 464 unsigned int i;
465 u32 len, staterr; 465 u32 len, staterr;
466 u16 hdr_info; 466 u16 hdr_info;
467 bool cleaned = false; 467 bool cleaned = false;
468 int cleaned_count = 0; 468 int cleaned_count = 0;
469 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 469 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
470 470
471 i = rx_ring->next_to_clean; 471 i = rx_ring->next_to_clean;
472 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); 472 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
473 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 473 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
474 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 474 rx_buffer_info = &rx_ring->rx_buffer_info[i];
475 475
476 while (staterr & IXGBE_RXD_STAT_DD) { 476 while (staterr & IXGBE_RXD_STAT_DD) {
477 u32 upper_len = 0; 477 u32 upper_len = 0;
478 if (*work_done >= work_to_do) 478 if (*work_done >= work_to_do)
479 break; 479 break;
480 (*work_done)++; 480 (*work_done)++;
481 481
482 rmb(); /* read descriptor and rx_buffer_info after status DD */ 482 rmb(); /* read descriptor and rx_buffer_info after status DD */
483 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 483 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
484 hdr_info = le16_to_cpu(ixgbevf_get_hdr_info(rx_desc)); 484 hdr_info = le16_to_cpu(ixgbevf_get_hdr_info(rx_desc));
485 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> 485 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
486 IXGBE_RXDADV_HDRBUFLEN_SHIFT; 486 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
487 if (hdr_info & IXGBE_RXDADV_SPH) 487 if (hdr_info & IXGBE_RXDADV_SPH)
488 adapter->rx_hdr_split++; 488 adapter->rx_hdr_split++;
489 if (len > IXGBEVF_RX_HDR_SIZE) 489 if (len > IXGBEVF_RX_HDR_SIZE)
490 len = IXGBEVF_RX_HDR_SIZE; 490 len = IXGBEVF_RX_HDR_SIZE;
491 upper_len = le16_to_cpu(rx_desc->wb.upper.length); 491 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
492 } else { 492 } else {
493 len = le16_to_cpu(rx_desc->wb.upper.length); 493 len = le16_to_cpu(rx_desc->wb.upper.length);
494 } 494 }
495 cleaned = true; 495 cleaned = true;
496 skb = rx_buffer_info->skb; 496 skb = rx_buffer_info->skb;
497 prefetch(skb->data - NET_IP_ALIGN); 497 prefetch(skb->data - NET_IP_ALIGN);
498 rx_buffer_info->skb = NULL; 498 rx_buffer_info->skb = NULL;
499 499
500 if (rx_buffer_info->dma) { 500 if (rx_buffer_info->dma) {
501 dma_unmap_single(&pdev->dev, rx_buffer_info->dma, 501 dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
502 rx_ring->rx_buf_len, 502 rx_ring->rx_buf_len,
503 DMA_FROM_DEVICE); 503 DMA_FROM_DEVICE);
504 rx_buffer_info->dma = 0; 504 rx_buffer_info->dma = 0;
505 skb_put(skb, len); 505 skb_put(skb, len);
506 } 506 }
507 507
508 if (upper_len) { 508 if (upper_len) {
509 dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma, 509 dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
510 PAGE_SIZE / 2, DMA_FROM_DEVICE); 510 PAGE_SIZE / 2, DMA_FROM_DEVICE);
511 rx_buffer_info->page_dma = 0; 511 rx_buffer_info->page_dma = 0;
512 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 512 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
513 rx_buffer_info->page, 513 rx_buffer_info->page,
514 rx_buffer_info->page_offset, 514 rx_buffer_info->page_offset,
515 upper_len); 515 upper_len);
516 516
517 if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) || 517 if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
518 (page_count(rx_buffer_info->page) != 1)) 518 (page_count(rx_buffer_info->page) != 1))
519 rx_buffer_info->page = NULL; 519 rx_buffer_info->page = NULL;
520 else 520 else
521 get_page(rx_buffer_info->page); 521 get_page(rx_buffer_info->page);
522 522
523 skb->len += upper_len; 523 skb->len += upper_len;
524 skb->data_len += upper_len; 524 skb->data_len += upper_len;
525 skb->truesize += upper_len; 525 skb->truesize += upper_len;
526 } 526 }
527 527
528 i++; 528 i++;
529 if (i == rx_ring->count) 529 if (i == rx_ring->count)
530 i = 0; 530 i = 0;
531 531
532 next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i); 532 next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
533 prefetch(next_rxd); 533 prefetch(next_rxd);
534 cleaned_count++; 534 cleaned_count++;
535 535
536 next_buffer = &rx_ring->rx_buffer_info[i]; 536 next_buffer = &rx_ring->rx_buffer_info[i];
537 537
538 if (!(staterr & IXGBE_RXD_STAT_EOP)) { 538 if (!(staterr & IXGBE_RXD_STAT_EOP)) {
539 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 539 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
540 rx_buffer_info->skb = next_buffer->skb; 540 rx_buffer_info->skb = next_buffer->skb;
541 rx_buffer_info->dma = next_buffer->dma; 541 rx_buffer_info->dma = next_buffer->dma;
542 next_buffer->skb = skb; 542 next_buffer->skb = skb;
543 next_buffer->dma = 0; 543 next_buffer->dma = 0;
544 } else { 544 } else {
545 skb->next = next_buffer->skb; 545 skb->next = next_buffer->skb;
546 skb->next->prev = skb; 546 skb->next->prev = skb;
547 } 547 }
548 adapter->non_eop_descs++; 548 adapter->non_eop_descs++;
549 goto next_desc; 549 goto next_desc;
550 } 550 }
551 551
552 /* ERR_MASK will only have valid bits if EOP set */ 552 /* ERR_MASK will only have valid bits if EOP set */
553 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) { 553 if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
554 dev_kfree_skb_irq(skb); 554 dev_kfree_skb_irq(skb);
555 goto next_desc; 555 goto next_desc;
556 } 556 }
557 557
558 ixgbevf_rx_checksum(adapter, staterr, skb); 558 ixgbevf_rx_checksum(adapter, staterr, skb);
559 559
560 /* probably a little skewed due to removing CRC */ 560 /* probably a little skewed due to removing CRC */
561 total_rx_bytes += skb->len; 561 total_rx_bytes += skb->len;
562 total_rx_packets++; 562 total_rx_packets++;
563 563
564 /* 564 /*
565 * Work around issue of some types of VM to VM loop back 565 * Work around issue of some types of VM to VM loop back
566 * packets not getting split correctly 566 * packets not getting split correctly
567 */ 567 */
568 if (staterr & IXGBE_RXD_STAT_LB) { 568 if (staterr & IXGBE_RXD_STAT_LB) {
569 u32 header_fixup_len = skb_headlen(skb); 569 u32 header_fixup_len = skb_headlen(skb);
570 if (header_fixup_len < 14) 570 if (header_fixup_len < 14)
571 skb_push(skb, header_fixup_len); 571 skb_push(skb, header_fixup_len);
572 } 572 }
573 skb->protocol = eth_type_trans(skb, adapter->netdev); 573 skb->protocol = eth_type_trans(skb, adapter->netdev);
574 574
575 ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc); 575 ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
576 576
577 next_desc: 577 next_desc:
578 rx_desc->wb.upper.status_error = 0; 578 rx_desc->wb.upper.status_error = 0;
579 579
580 /* return some buffers to hardware, one at a time is too slow */ 580 /* return some buffers to hardware, one at a time is too slow */
581 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) { 581 if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
582 ixgbevf_alloc_rx_buffers(adapter, rx_ring, 582 ixgbevf_alloc_rx_buffers(adapter, rx_ring,
583 cleaned_count); 583 cleaned_count);
584 cleaned_count = 0; 584 cleaned_count = 0;
585 } 585 }
586 586
587 /* use prefetched values */ 587 /* use prefetched values */
588 rx_desc = next_rxd; 588 rx_desc = next_rxd;
589 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 589 rx_buffer_info = &rx_ring->rx_buffer_info[i];
590 590
591 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 591 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
592 } 592 }
593 593
594 rx_ring->next_to_clean = i; 594 rx_ring->next_to_clean = i;
595 cleaned_count = IXGBE_DESC_UNUSED(rx_ring); 595 cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
596 596
597 if (cleaned_count) 597 if (cleaned_count)
598 ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count); 598 ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
599 599
600 u64_stats_update_begin(&rx_ring->syncp); 600 u64_stats_update_begin(&rx_ring->syncp);
601 rx_ring->total_packets += total_rx_packets; 601 rx_ring->total_packets += total_rx_packets;
602 rx_ring->total_bytes += total_rx_bytes; 602 rx_ring->total_bytes += total_rx_bytes;
603 u64_stats_update_end(&rx_ring->syncp); 603 u64_stats_update_end(&rx_ring->syncp);
604 604
605 return cleaned; 605 return cleaned;
606 } 606 }
607 607
608 /** 608 /**
609 * ixgbevf_clean_rxonly - msix (aka one shot) rx clean routine 609 * ixgbevf_clean_rxonly - msix (aka one shot) rx clean routine
610 * @napi: napi struct with our devices info in it 610 * @napi: napi struct with our devices info in it
611 * @budget: amount of work driver is allowed to do this pass, in packets 611 * @budget: amount of work driver is allowed to do this pass, in packets
612 * 612 *
613 * This function is optimized for cleaning one queue only on a single 613 * This function is optimized for cleaning one queue only on a single
614 * q_vector!!! 614 * q_vector!!!
615 **/ 615 **/
616 static int ixgbevf_clean_rxonly(struct napi_struct *napi, int budget) 616 static int ixgbevf_clean_rxonly(struct napi_struct *napi, int budget)
617 { 617 {
618 struct ixgbevf_q_vector *q_vector = 618 struct ixgbevf_q_vector *q_vector =
619 container_of(napi, struct ixgbevf_q_vector, napi); 619 container_of(napi, struct ixgbevf_q_vector, napi);
620 struct ixgbevf_adapter *adapter = q_vector->adapter; 620 struct ixgbevf_adapter *adapter = q_vector->adapter;
621 struct ixgbevf_ring *rx_ring = NULL; 621 struct ixgbevf_ring *rx_ring = NULL;
622 int work_done = 0; 622 int work_done = 0;
623 long r_idx; 623 long r_idx;
624 624
625 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 625 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
626 rx_ring = &(adapter->rx_ring[r_idx]); 626 rx_ring = &(adapter->rx_ring[r_idx]);
627 627
628 ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget); 628 ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
629 629
630 /* If all Rx work done, exit the polling mode */ 630 /* If all Rx work done, exit the polling mode */
631 if (work_done < budget) { 631 if (work_done < budget) {
632 napi_complete(napi); 632 napi_complete(napi);
633 if (adapter->itr_setting & 1) 633 if (adapter->itr_setting & 1)
634 ixgbevf_set_itr_msix(q_vector); 634 ixgbevf_set_itr_msix(q_vector);
635 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) 635 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
636 ixgbevf_irq_enable_queues(adapter, rx_ring->v_idx); 636 ixgbevf_irq_enable_queues(adapter, rx_ring->v_idx);
637 } 637 }
638 638
639 return work_done; 639 return work_done;
640 } 640 }
641 641
642 /** 642 /**
643 * ixgbevf_clean_rxonly_many - msix (aka one shot) rx clean routine 643 * ixgbevf_clean_rxonly_many - msix (aka one shot) rx clean routine
644 * @napi: napi struct with our devices info in it 644 * @napi: napi struct with our devices info in it
645 * @budget: amount of work driver is allowed to do this pass, in packets 645 * @budget: amount of work driver is allowed to do this pass, in packets
646 * 646 *
647 * This function will clean more than one rx queue associated with a 647 * This function will clean more than one rx queue associated with a
648 * q_vector. 648 * q_vector.
649 **/ 649 **/
650 static int ixgbevf_clean_rxonly_many(struct napi_struct *napi, int budget) 650 static int ixgbevf_clean_rxonly_many(struct napi_struct *napi, int budget)
651 { 651 {
652 struct ixgbevf_q_vector *q_vector = 652 struct ixgbevf_q_vector *q_vector =
653 container_of(napi, struct ixgbevf_q_vector, napi); 653 container_of(napi, struct ixgbevf_q_vector, napi);
654 struct ixgbevf_adapter *adapter = q_vector->adapter; 654 struct ixgbevf_adapter *adapter = q_vector->adapter;
655 struct ixgbevf_ring *rx_ring = NULL; 655 struct ixgbevf_ring *rx_ring = NULL;
656 int work_done = 0, i; 656 int work_done = 0, i;
657 long r_idx; 657 long r_idx;
658 u64 enable_mask = 0; 658 u64 enable_mask = 0;
659 659
660 /* attempt to distribute budget to each queue fairly, but don't allow 660 /* attempt to distribute budget to each queue fairly, but don't allow
661 * the budget to go below 1 because we'll exit polling */ 661 * the budget to go below 1 because we'll exit polling */
662 budget /= (q_vector->rxr_count ?: 1); 662 budget /= (q_vector->rxr_count ?: 1);
663 budget = max(budget, 1); 663 budget = max(budget, 1);
664 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 664 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
665 for (i = 0; i < q_vector->rxr_count; i++) { 665 for (i = 0; i < q_vector->rxr_count; i++) {
666 rx_ring = &(adapter->rx_ring[r_idx]); 666 rx_ring = &(adapter->rx_ring[r_idx]);
667 ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget); 667 ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
668 enable_mask |= rx_ring->v_idx; 668 enable_mask |= rx_ring->v_idx;
669 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 669 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
670 r_idx + 1); 670 r_idx + 1);
671 } 671 }
672 672
673 #ifndef HAVE_NETDEV_NAPI_LIST 673 #ifndef HAVE_NETDEV_NAPI_LIST
674 if (!netif_running(adapter->netdev)) 674 if (!netif_running(adapter->netdev))
675 work_done = 0; 675 work_done = 0;
676 676
677 #endif 677 #endif
678 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 678 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
679 rx_ring = &(adapter->rx_ring[r_idx]); 679 rx_ring = &(adapter->rx_ring[r_idx]);
680 680
681 /* If all Rx work done, exit the polling mode */ 681 /* If all Rx work done, exit the polling mode */
682 if (work_done < budget) { 682 if (work_done < budget) {
683 napi_complete(napi); 683 napi_complete(napi);
684 if (adapter->itr_setting & 1) 684 if (adapter->itr_setting & 1)
685 ixgbevf_set_itr_msix(q_vector); 685 ixgbevf_set_itr_msix(q_vector);
686 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) 686 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
687 ixgbevf_irq_enable_queues(adapter, enable_mask); 687 ixgbevf_irq_enable_queues(adapter, enable_mask);
688 } 688 }
689 689
690 return work_done; 690 return work_done;
691 } 691 }
692 692
693 693
694 /** 694 /**
695 * ixgbevf_configure_msix - Configure MSI-X hardware 695 * ixgbevf_configure_msix - Configure MSI-X hardware
696 * @adapter: board private structure 696 * @adapter: board private structure
697 * 697 *
698 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X 698 * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
699 * interrupts. 699 * interrupts.
700 **/ 700 **/
701 static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) 701 static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
702 { 702 {
703 struct ixgbevf_q_vector *q_vector; 703 struct ixgbevf_q_vector *q_vector;
704 struct ixgbe_hw *hw = &adapter->hw; 704 struct ixgbe_hw *hw = &adapter->hw;
705 int i, j, q_vectors, v_idx, r_idx; 705 int i, j, q_vectors, v_idx, r_idx;
706 u32 mask; 706 u32 mask;
707 707
708 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 708 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
709 709
710 /* 710 /*
711 * Populate the IVAR table and set the ITR values to the 711 * Populate the IVAR table and set the ITR values to the
712 * corresponding register. 712 * corresponding register.
713 */ 713 */
714 for (v_idx = 0; v_idx < q_vectors; v_idx++) { 714 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
715 q_vector = adapter->q_vector[v_idx]; 715 q_vector = adapter->q_vector[v_idx];
716 /* XXX for_each_set_bit(...) */ 716 /* XXX for_each_set_bit(...) */
717 r_idx = find_first_bit(q_vector->rxr_idx, 717 r_idx = find_first_bit(q_vector->rxr_idx,
718 adapter->num_rx_queues); 718 adapter->num_rx_queues);
719 719
720 for (i = 0; i < q_vector->rxr_count; i++) { 720 for (i = 0; i < q_vector->rxr_count; i++) {
721 j = adapter->rx_ring[r_idx].reg_idx; 721 j = adapter->rx_ring[r_idx].reg_idx;
722 ixgbevf_set_ivar(adapter, 0, j, v_idx); 722 ixgbevf_set_ivar(adapter, 0, j, v_idx);
723 r_idx = find_next_bit(q_vector->rxr_idx, 723 r_idx = find_next_bit(q_vector->rxr_idx,
724 adapter->num_rx_queues, 724 adapter->num_rx_queues,
725 r_idx + 1); 725 r_idx + 1);
726 } 726 }
727 r_idx = find_first_bit(q_vector->txr_idx, 727 r_idx = find_first_bit(q_vector->txr_idx,
728 adapter->num_tx_queues); 728 adapter->num_tx_queues);
729 729
730 for (i = 0; i < q_vector->txr_count; i++) { 730 for (i = 0; i < q_vector->txr_count; i++) {
731 j = adapter->tx_ring[r_idx].reg_idx; 731 j = adapter->tx_ring[r_idx].reg_idx;
732 ixgbevf_set_ivar(adapter, 1, j, v_idx); 732 ixgbevf_set_ivar(adapter, 1, j, v_idx);
733 r_idx = find_next_bit(q_vector->txr_idx, 733 r_idx = find_next_bit(q_vector->txr_idx,
734 adapter->num_tx_queues, 734 adapter->num_tx_queues,
735 r_idx + 1); 735 r_idx + 1);
736 } 736 }
737 737
738 /* if this is a tx only vector halve the interrupt rate */ 738 /* if this is a tx only vector halve the interrupt rate */
739 if (q_vector->txr_count && !q_vector->rxr_count) 739 if (q_vector->txr_count && !q_vector->rxr_count)
740 q_vector->eitr = (adapter->eitr_param >> 1); 740 q_vector->eitr = (adapter->eitr_param >> 1);
741 else if (q_vector->rxr_count) 741 else if (q_vector->rxr_count)
742 /* rx only */ 742 /* rx only */
743 q_vector->eitr = adapter->eitr_param; 743 q_vector->eitr = adapter->eitr_param;
744 744
745 ixgbevf_write_eitr(adapter, v_idx, q_vector->eitr); 745 ixgbevf_write_eitr(adapter, v_idx, q_vector->eitr);
746 } 746 }
747 747
748 ixgbevf_set_ivar(adapter, -1, 1, v_idx); 748 ixgbevf_set_ivar(adapter, -1, 1, v_idx);
749 749
750 /* set up to autoclear timer, and the vectors */ 750 /* set up to autoclear timer, and the vectors */
751 mask = IXGBE_EIMS_ENABLE_MASK; 751 mask = IXGBE_EIMS_ENABLE_MASK;
752 mask &= ~IXGBE_EIMS_OTHER; 752 mask &= ~IXGBE_EIMS_OTHER;
753 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask); 753 IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
754 } 754 }
755 755
756 enum latency_range { 756 enum latency_range {
757 lowest_latency = 0, 757 lowest_latency = 0,
758 low_latency = 1, 758 low_latency = 1,
759 bulk_latency = 2, 759 bulk_latency = 2,
760 latency_invalid = 255 760 latency_invalid = 255
761 }; 761 };
762 762
763 /** 763 /**
764 * ixgbevf_update_itr - update the dynamic ITR value based on statistics 764 * ixgbevf_update_itr - update the dynamic ITR value based on statistics
765 * @adapter: pointer to adapter 765 * @adapter: pointer to adapter
766 * @eitr: eitr setting (ints per sec) to give last timeslice 766 * @eitr: eitr setting (ints per sec) to give last timeslice
767 * @itr_setting: current throttle rate in ints/second 767 * @itr_setting: current throttle rate in ints/second
768 * @packets: the number of packets during this measurement interval 768 * @packets: the number of packets during this measurement interval
769 * @bytes: the number of bytes during this measurement interval 769 * @bytes: the number of bytes during this measurement interval
770 * 770 *
771 * Stores a new ITR value based on packets and byte 771 * Stores a new ITR value based on packets and byte
772 * counts during the last interrupt. The advantage of per interrupt 772 * counts during the last interrupt. The advantage of per interrupt
773 * computation is faster updates and more accurate ITR for the current 773 * computation is faster updates and more accurate ITR for the current
774 * traffic pattern. Constants in this function were computed 774 * traffic pattern. Constants in this function were computed
775 * based on theoretical maximum wire speed and thresholds were set based 775 * based on theoretical maximum wire speed and thresholds were set based
776 * on testing data as well as attempting to minimize response time 776 * on testing data as well as attempting to minimize response time
777 * while increasing bulk throughput. 777 * while increasing bulk throughput.
778 **/ 778 **/
779 static u8 ixgbevf_update_itr(struct ixgbevf_adapter *adapter, 779 static u8 ixgbevf_update_itr(struct ixgbevf_adapter *adapter,
780 u32 eitr, u8 itr_setting, 780 u32 eitr, u8 itr_setting,
781 int packets, int bytes) 781 int packets, int bytes)
782 { 782 {
783 unsigned int retval = itr_setting; 783 unsigned int retval = itr_setting;
784 u32 timepassed_us; 784 u32 timepassed_us;
785 u64 bytes_perint; 785 u64 bytes_perint;
786 786
787 if (packets == 0) 787 if (packets == 0)
788 goto update_itr_done; 788 goto update_itr_done;
789 789
790 790
791 /* simple throttlerate management 791 /* simple throttlerate management
792 * 0-20MB/s lowest (100000 ints/s) 792 * 0-20MB/s lowest (100000 ints/s)
793 * 20-100MB/s low (20000 ints/s) 793 * 20-100MB/s low (20000 ints/s)
794 * 100-1249MB/s bulk (8000 ints/s) 794 * 100-1249MB/s bulk (8000 ints/s)
795 */ 795 */
796 /* what was last interrupt timeslice? */ 796 /* what was last interrupt timeslice? */
797 timepassed_us = 1000000/eitr; 797 timepassed_us = 1000000/eitr;
798 bytes_perint = bytes / timepassed_us; /* bytes/usec */ 798 bytes_perint = bytes / timepassed_us; /* bytes/usec */
799 799
800 switch (itr_setting) { 800 switch (itr_setting) {
801 case lowest_latency: 801 case lowest_latency:
802 if (bytes_perint > adapter->eitr_low) 802 if (bytes_perint > adapter->eitr_low)
803 retval = low_latency; 803 retval = low_latency;
804 break; 804 break;
805 case low_latency: 805 case low_latency:
806 if (bytes_perint > adapter->eitr_high) 806 if (bytes_perint > adapter->eitr_high)
807 retval = bulk_latency; 807 retval = bulk_latency;
808 else if (bytes_perint <= adapter->eitr_low) 808 else if (bytes_perint <= adapter->eitr_low)
809 retval = lowest_latency; 809 retval = lowest_latency;
810 break; 810 break;
811 case bulk_latency: 811 case bulk_latency:
812 if (bytes_perint <= adapter->eitr_high) 812 if (bytes_perint <= adapter->eitr_high)
813 retval = low_latency; 813 retval = low_latency;
814 break; 814 break;
815 } 815 }
816 816
817 update_itr_done: 817 update_itr_done:
818 return retval; 818 return retval;
819 } 819 }
820 820
821 /** 821 /**
822 * ixgbevf_write_eitr - write VTEITR register in hardware specific way 822 * ixgbevf_write_eitr - write VTEITR register in hardware specific way
823 * @adapter: pointer to adapter struct 823 * @adapter: pointer to adapter struct
824 * @v_idx: vector index into q_vector array 824 * @v_idx: vector index into q_vector array
825 * @itr_reg: new value to be written in *register* format, not ints/s 825 * @itr_reg: new value to be written in *register* format, not ints/s
826 * 826 *
827 * This function is made to be called by ethtool and by the driver 827 * This function is made to be called by ethtool and by the driver
828 * when it needs to update VTEITR registers at runtime. Hardware 828 * when it needs to update VTEITR registers at runtime. Hardware
829 * specific quirks/differences are taken care of here. 829 * specific quirks/differences are taken care of here.
830 */ 830 */
831 static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx, 831 static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
832 u32 itr_reg) 832 u32 itr_reg)
833 { 833 {
834 struct ixgbe_hw *hw = &adapter->hw; 834 struct ixgbe_hw *hw = &adapter->hw;
835 835
836 itr_reg = EITR_INTS_PER_SEC_TO_REG(itr_reg); 836 itr_reg = EITR_INTS_PER_SEC_TO_REG(itr_reg);
837 837
838 /* 838 /*
839 * set the WDIS bit to not clear the timer bits and cause an 839 * set the WDIS bit to not clear the timer bits and cause an
840 * immediate assertion of the interrupt 840 * immediate assertion of the interrupt
841 */ 841 */
842 itr_reg |= IXGBE_EITR_CNT_WDIS; 842 itr_reg |= IXGBE_EITR_CNT_WDIS;
843 843
844 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg); 844 IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
845 } 845 }
846 846
847 static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector) 847 static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector)
848 { 848 {
849 struct ixgbevf_adapter *adapter = q_vector->adapter; 849 struct ixgbevf_adapter *adapter = q_vector->adapter;
850 u32 new_itr; 850 u32 new_itr;
851 u8 current_itr, ret_itr; 851 u8 current_itr, ret_itr;
852 int i, r_idx, v_idx = q_vector->v_idx; 852 int i, r_idx, v_idx = q_vector->v_idx;
853 struct ixgbevf_ring *rx_ring, *tx_ring; 853 struct ixgbevf_ring *rx_ring, *tx_ring;
854 854
855 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 855 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
856 for (i = 0; i < q_vector->txr_count; i++) { 856 for (i = 0; i < q_vector->txr_count; i++) {
857 tx_ring = &(adapter->tx_ring[r_idx]); 857 tx_ring = &(adapter->tx_ring[r_idx]);
858 ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr, 858 ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
859 q_vector->tx_itr, 859 q_vector->tx_itr,
860 tx_ring->total_packets, 860 tx_ring->total_packets,
861 tx_ring->total_bytes); 861 tx_ring->total_bytes);
862 /* if the result for this queue would decrease interrupt 862 /* if the result for this queue would decrease interrupt
863 * rate for this vector then use that result */ 863 * rate for this vector then use that result */
864 q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ? 864 q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
865 q_vector->tx_itr - 1 : ret_itr); 865 q_vector->tx_itr - 1 : ret_itr);
866 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 866 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
867 r_idx + 1); 867 r_idx + 1);
868 } 868 }
869 869
870 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 870 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
871 for (i = 0; i < q_vector->rxr_count; i++) { 871 for (i = 0; i < q_vector->rxr_count; i++) {
872 rx_ring = &(adapter->rx_ring[r_idx]); 872 rx_ring = &(adapter->rx_ring[r_idx]);
873 ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr, 873 ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
874 q_vector->rx_itr, 874 q_vector->rx_itr,
875 rx_ring->total_packets, 875 rx_ring->total_packets,
876 rx_ring->total_bytes); 876 rx_ring->total_bytes);
877 /* if the result for this queue would decrease interrupt 877 /* if the result for this queue would decrease interrupt
878 * rate for this vector then use that result */ 878 * rate for this vector then use that result */
879 q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ? 879 q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
880 q_vector->rx_itr - 1 : ret_itr); 880 q_vector->rx_itr - 1 : ret_itr);
881 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 881 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
882 r_idx + 1); 882 r_idx + 1);
883 } 883 }
884 884
885 current_itr = max(q_vector->rx_itr, q_vector->tx_itr); 885 current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
886 886
887 switch (current_itr) { 887 switch (current_itr) {
888 /* counts and packets in update_itr are dependent on these numbers */ 888 /* counts and packets in update_itr are dependent on these numbers */
889 case lowest_latency: 889 case lowest_latency:
890 new_itr = 100000; 890 new_itr = 100000;
891 break; 891 break;
892 case low_latency: 892 case low_latency:
893 new_itr = 20000; /* aka hwitr = ~200 */ 893 new_itr = 20000; /* aka hwitr = ~200 */
894 break; 894 break;
895 case bulk_latency: 895 case bulk_latency:
896 default: 896 default:
897 new_itr = 8000; 897 new_itr = 8000;
898 break; 898 break;
899 } 899 }
900 900
901 if (new_itr != q_vector->eitr) { 901 if (new_itr != q_vector->eitr) {
902 u32 itr_reg; 902 u32 itr_reg;
903 903
904 /* save the algorithm value here, not the smoothed one */ 904 /* save the algorithm value here, not the smoothed one */
905 q_vector->eitr = new_itr; 905 q_vector->eitr = new_itr;
906 /* do an exponential smoothing */ 906 /* do an exponential smoothing */
907 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); 907 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
908 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr); 908 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
909 ixgbevf_write_eitr(adapter, v_idx, itr_reg); 909 ixgbevf_write_eitr(adapter, v_idx, itr_reg);
910 } 910 }
911 } 911 }
912 912
913 static irqreturn_t ixgbevf_msix_mbx(int irq, void *data) 913 static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
914 { 914 {
915 struct net_device *netdev = data; 915 struct net_device *netdev = data;
916 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 916 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
917 struct ixgbe_hw *hw = &adapter->hw; 917 struct ixgbe_hw *hw = &adapter->hw;
918 u32 eicr; 918 u32 eicr;
919 u32 msg; 919 u32 msg;
920 bool got_ack = false; 920 bool got_ack = false;
921 921
922 eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS); 922 eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS);
923 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr); 923 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr);
924 924
925 if (!hw->mbx.ops.check_for_ack(hw)) 925 if (!hw->mbx.ops.check_for_ack(hw))
926 got_ack = true; 926 got_ack = true;
927 927
928 if (!hw->mbx.ops.check_for_msg(hw)) { 928 if (!hw->mbx.ops.check_for_msg(hw)) {
929 hw->mbx.ops.read(hw, &msg, 1); 929 hw->mbx.ops.read(hw, &msg, 1);
930 930
931 if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) 931 if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG)
932 mod_timer(&adapter->watchdog_timer, 932 mod_timer(&adapter->watchdog_timer,
933 round_jiffies(jiffies + 1)); 933 round_jiffies(jiffies + 1));
934 934
935 if (msg & IXGBE_VT_MSGTYPE_NACK) 935 if (msg & IXGBE_VT_MSGTYPE_NACK)
936 pr_warn("Last Request of type %2.2x to PF Nacked\n", 936 pr_warn("Last Request of type %2.2x to PF Nacked\n",
937 msg & 0xFF); 937 msg & 0xFF);
938 /* 938 /*
939 * Restore the PFSTS bit in case someone is polling for a 939 * Restore the PFSTS bit in case someone is polling for a
940 * return message from the PF 940 * return message from the PF
941 */ 941 */
942 hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFSTS; 942 hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFSTS;
943 } 943 }
944 944
945 /* 945 /*
946 * checking for the ack clears the PFACK bit. Place 946 * checking for the ack clears the PFACK bit. Place
947 * it back in the v2p_mailbox cache so that anyone 947 * it back in the v2p_mailbox cache so that anyone
948 * polling for an ack will not miss it 948 * polling for an ack will not miss it
949 */ 949 */
950 if (got_ack) 950 if (got_ack)
951 hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK; 951 hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
952 952
953 return IRQ_HANDLED; 953 return IRQ_HANDLED;
954 } 954 }
955 955
956 static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data) 956 static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data)
957 { 957 {
958 struct ixgbevf_q_vector *q_vector = data; 958 struct ixgbevf_q_vector *q_vector = data;
959 struct ixgbevf_adapter *adapter = q_vector->adapter; 959 struct ixgbevf_adapter *adapter = q_vector->adapter;
960 struct ixgbevf_ring *tx_ring; 960 struct ixgbevf_ring *tx_ring;
961 int i, r_idx; 961 int i, r_idx;
962 962
963 if (!q_vector->txr_count) 963 if (!q_vector->txr_count)
964 return IRQ_HANDLED; 964 return IRQ_HANDLED;
965 965
966 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 966 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
967 for (i = 0; i < q_vector->txr_count; i++) { 967 for (i = 0; i < q_vector->txr_count; i++) {
968 tx_ring = &(adapter->tx_ring[r_idx]); 968 tx_ring = &(adapter->tx_ring[r_idx]);
969 tx_ring->total_bytes = 0; 969 tx_ring->total_bytes = 0;
970 tx_ring->total_packets = 0; 970 tx_ring->total_packets = 0;
971 ixgbevf_clean_tx_irq(adapter, tx_ring); 971 ixgbevf_clean_tx_irq(adapter, tx_ring);
972 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 972 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
973 r_idx + 1); 973 r_idx + 1);
974 } 974 }
975 975
976 if (adapter->itr_setting & 1) 976 if (adapter->itr_setting & 1)
977 ixgbevf_set_itr_msix(q_vector); 977 ixgbevf_set_itr_msix(q_vector);
978 978
979 return IRQ_HANDLED; 979 return IRQ_HANDLED;
980 } 980 }
981 981
982 /** 982 /**
983 * ixgbevf_msix_clean_rx - single unshared vector rx clean (all queues) 983 * ixgbevf_msix_clean_rx - single unshared vector rx clean (all queues)
984 * @irq: unused 984 * @irq: unused
985 * @data: pointer to our q_vector struct for this interrupt vector 985 * @data: pointer to our q_vector struct for this interrupt vector
986 **/ 986 **/
987 static irqreturn_t ixgbevf_msix_clean_rx(int irq, void *data) 987 static irqreturn_t ixgbevf_msix_clean_rx(int irq, void *data)
988 { 988 {
989 struct ixgbevf_q_vector *q_vector = data; 989 struct ixgbevf_q_vector *q_vector = data;
990 struct ixgbevf_adapter *adapter = q_vector->adapter; 990 struct ixgbevf_adapter *adapter = q_vector->adapter;
991 struct ixgbe_hw *hw = &adapter->hw; 991 struct ixgbe_hw *hw = &adapter->hw;
992 struct ixgbevf_ring *rx_ring; 992 struct ixgbevf_ring *rx_ring;
993 int r_idx; 993 int r_idx;
994 int i; 994 int i;
995 995
996 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 996 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
997 for (i = 0; i < q_vector->rxr_count; i++) { 997 for (i = 0; i < q_vector->rxr_count; i++) {
998 rx_ring = &(adapter->rx_ring[r_idx]); 998 rx_ring = &(adapter->rx_ring[r_idx]);
999 rx_ring->total_bytes = 0; 999 rx_ring->total_bytes = 0;
1000 rx_ring->total_packets = 0; 1000 rx_ring->total_packets = 0;
1001 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 1001 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1002 r_idx + 1); 1002 r_idx + 1);
1003 } 1003 }
1004 1004
1005 if (!q_vector->rxr_count) 1005 if (!q_vector->rxr_count)
1006 return IRQ_HANDLED; 1006 return IRQ_HANDLED;
1007 1007
1008 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1008 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1009 rx_ring = &(adapter->rx_ring[r_idx]); 1009 rx_ring = &(adapter->rx_ring[r_idx]);
1010 /* disable interrupts on this vector only */ 1010 /* disable interrupts on this vector only */
1011 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, rx_ring->v_idx); 1011 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, rx_ring->v_idx);
1012 napi_schedule(&q_vector->napi); 1012 napi_schedule(&q_vector->napi);
1013 1013
1014 1014
1015 return IRQ_HANDLED; 1015 return IRQ_HANDLED;
1016 } 1016 }
1017 1017
1018 static irqreturn_t ixgbevf_msix_clean_many(int irq, void *data) 1018 static irqreturn_t ixgbevf_msix_clean_many(int irq, void *data)
1019 { 1019 {
1020 ixgbevf_msix_clean_rx(irq, data); 1020 ixgbevf_msix_clean_rx(irq, data);
1021 ixgbevf_msix_clean_tx(irq, data); 1021 ixgbevf_msix_clean_tx(irq, data);
1022 1022
1023 return IRQ_HANDLED; 1023 return IRQ_HANDLED;
1024 } 1024 }
1025 1025
1026 static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx, 1026 static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
1027 int r_idx) 1027 int r_idx)
1028 { 1028 {
1029 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; 1029 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
1030 1030
1031 set_bit(r_idx, q_vector->rxr_idx); 1031 set_bit(r_idx, q_vector->rxr_idx);
1032 q_vector->rxr_count++; 1032 q_vector->rxr_count++;
1033 a->rx_ring[r_idx].v_idx = 1 << v_idx; 1033 a->rx_ring[r_idx].v_idx = 1 << v_idx;
1034 } 1034 }
1035 1035
1036 static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx, 1036 static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
1037 int t_idx) 1037 int t_idx)
1038 { 1038 {
1039 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx]; 1039 struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
1040 1040
1041 set_bit(t_idx, q_vector->txr_idx); 1041 set_bit(t_idx, q_vector->txr_idx);
1042 q_vector->txr_count++; 1042 q_vector->txr_count++;
1043 a->tx_ring[t_idx].v_idx = 1 << v_idx; 1043 a->tx_ring[t_idx].v_idx = 1 << v_idx;
1044 } 1044 }
1045 1045
1046 /** 1046 /**
1047 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors 1047 * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
1048 * @adapter: board private structure to initialize 1048 * @adapter: board private structure to initialize
1049 * 1049 *
1050 * This function maps descriptor rings to the queue-specific vectors 1050 * This function maps descriptor rings to the queue-specific vectors
1051 * we were allotted through the MSI-X enabling code. Ideally, we'd have 1051 * we were allotted through the MSI-X enabling code. Ideally, we'd have
1052 * one vector per ring/queue, but on a constrained vector budget, we 1052 * one vector per ring/queue, but on a constrained vector budget, we
1053 * group the rings as "efficiently" as possible. You would add new 1053 * group the rings as "efficiently" as possible. You would add new
1054 * mapping configurations in here. 1054 * mapping configurations in here.
1055 **/ 1055 **/
1056 static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter) 1056 static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
1057 { 1057 {
1058 int q_vectors; 1058 int q_vectors;
1059 int v_start = 0; 1059 int v_start = 0;
1060 int rxr_idx = 0, txr_idx = 0; 1060 int rxr_idx = 0, txr_idx = 0;
1061 int rxr_remaining = adapter->num_rx_queues; 1061 int rxr_remaining = adapter->num_rx_queues;
1062 int txr_remaining = adapter->num_tx_queues; 1062 int txr_remaining = adapter->num_tx_queues;
1063 int i, j; 1063 int i, j;
1064 int rqpv, tqpv; 1064 int rqpv, tqpv;
1065 int err = 0; 1065 int err = 0;
1066 1066
1067 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1067 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1068 1068
1069 /* 1069 /*
1070 * The ideal configuration... 1070 * The ideal configuration...
1071 * We have enough vectors to map one per queue. 1071 * We have enough vectors to map one per queue.
1072 */ 1072 */
1073 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) { 1073 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
1074 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) 1074 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
1075 map_vector_to_rxq(adapter, v_start, rxr_idx); 1075 map_vector_to_rxq(adapter, v_start, rxr_idx);
1076 1076
1077 for (; txr_idx < txr_remaining; v_start++, txr_idx++) 1077 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
1078 map_vector_to_txq(adapter, v_start, txr_idx); 1078 map_vector_to_txq(adapter, v_start, txr_idx);
1079 goto out; 1079 goto out;
1080 } 1080 }
1081 1081
1082 /* 1082 /*
1083 * If we don't have enough vectors for a 1-to-1 1083 * If we don't have enough vectors for a 1-to-1
1084 * mapping, we'll have to group them so there are 1084 * mapping, we'll have to group them so there are
1085 * multiple queues per vector. 1085 * multiple queues per vector.
1086 */ 1086 */
1087 /* Re-adjusting *qpv takes care of the remainder. */ 1087 /* Re-adjusting *qpv takes care of the remainder. */
1088 for (i = v_start; i < q_vectors; i++) { 1088 for (i = v_start; i < q_vectors; i++) {
1089 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i); 1089 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
1090 for (j = 0; j < rqpv; j++) { 1090 for (j = 0; j < rqpv; j++) {
1091 map_vector_to_rxq(adapter, i, rxr_idx); 1091 map_vector_to_rxq(adapter, i, rxr_idx);
1092 rxr_idx++; 1092 rxr_idx++;
1093 rxr_remaining--; 1093 rxr_remaining--;
1094 } 1094 }
1095 } 1095 }
1096 for (i = v_start; i < q_vectors; i++) { 1096 for (i = v_start; i < q_vectors; i++) {
1097 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i); 1097 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
1098 for (j = 0; j < tqpv; j++) { 1098 for (j = 0; j < tqpv; j++) {
1099 map_vector_to_txq(adapter, i, txr_idx); 1099 map_vector_to_txq(adapter, i, txr_idx);
1100 txr_idx++; 1100 txr_idx++;
1101 txr_remaining--; 1101 txr_remaining--;
1102 } 1102 }
1103 } 1103 }
1104 1104
1105 out: 1105 out:
1106 return err; 1106 return err;
1107 } 1107 }
1108 1108
1109 /** 1109 /**
1110 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts 1110 * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
1111 * @adapter: board private structure 1111 * @adapter: board private structure
1112 * 1112 *
1113 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests 1113 * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
1114 * interrupts from the kernel. 1114 * interrupts from the kernel.
1115 **/ 1115 **/
1116 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter) 1116 static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
1117 { 1117 {
1118 struct net_device *netdev = adapter->netdev; 1118 struct net_device *netdev = adapter->netdev;
1119 irqreturn_t (*handler)(int, void *); 1119 irqreturn_t (*handler)(int, void *);
1120 int i, vector, q_vectors, err; 1120 int i, vector, q_vectors, err;
1121 int ri = 0, ti = 0; 1121 int ri = 0, ti = 0;
1122 1122
1123 /* Decrement for Other and TCP Timer vectors */ 1123 /* Decrement for Other and TCP Timer vectors */
1124 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1124 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1125 1125
1126 #define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \ 1126 #define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \
1127 ? &ixgbevf_msix_clean_many : \ 1127 ? &ixgbevf_msix_clean_many : \
1128 (_v)->rxr_count ? &ixgbevf_msix_clean_rx : \ 1128 (_v)->rxr_count ? &ixgbevf_msix_clean_rx : \
1129 (_v)->txr_count ? &ixgbevf_msix_clean_tx : \ 1129 (_v)->txr_count ? &ixgbevf_msix_clean_tx : \
1130 NULL) 1130 NULL)
1131 for (vector = 0; vector < q_vectors; vector++) { 1131 for (vector = 0; vector < q_vectors; vector++) {
1132 handler = SET_HANDLER(adapter->q_vector[vector]); 1132 handler = SET_HANDLER(adapter->q_vector[vector]);
1133 1133
1134 if (handler == &ixgbevf_msix_clean_rx) { 1134 if (handler == &ixgbevf_msix_clean_rx) {
1135 sprintf(adapter->name[vector], "%s-%s-%d", 1135 sprintf(adapter->name[vector], "%s-%s-%d",
1136 netdev->name, "rx", ri++); 1136 netdev->name, "rx", ri++);
1137 } else if (handler == &ixgbevf_msix_clean_tx) { 1137 } else if (handler == &ixgbevf_msix_clean_tx) {
1138 sprintf(adapter->name[vector], "%s-%s-%d", 1138 sprintf(adapter->name[vector], "%s-%s-%d",
1139 netdev->name, "tx", ti++); 1139 netdev->name, "tx", ti++);
1140 } else if (handler == &ixgbevf_msix_clean_many) { 1140 } else if (handler == &ixgbevf_msix_clean_many) {
1141 sprintf(adapter->name[vector], "%s-%s-%d", 1141 sprintf(adapter->name[vector], "%s-%s-%d",
1142 netdev->name, "TxRx", vector); 1142 netdev->name, "TxRx", vector);
1143 } else { 1143 } else {
1144 /* skip this unused q_vector */ 1144 /* skip this unused q_vector */
1145 continue; 1145 continue;
1146 } 1146 }
1147 err = request_irq(adapter->msix_entries[vector].vector, 1147 err = request_irq(adapter->msix_entries[vector].vector,
1148 handler, 0, adapter->name[vector], 1148 handler, 0, adapter->name[vector],
1149 adapter->q_vector[vector]); 1149 adapter->q_vector[vector]);
1150 if (err) { 1150 if (err) {
1151 hw_dbg(&adapter->hw, 1151 hw_dbg(&adapter->hw,
1152 "request_irq failed for MSIX interrupt " 1152 "request_irq failed for MSIX interrupt "
1153 "Error: %d\n", err); 1153 "Error: %d\n", err);
1154 goto free_queue_irqs; 1154 goto free_queue_irqs;
1155 } 1155 }
1156 } 1156 }
1157 1157
1158 sprintf(adapter->name[vector], "%s:mbx", netdev->name); 1158 sprintf(adapter->name[vector], "%s:mbx", netdev->name);
1159 err = request_irq(adapter->msix_entries[vector].vector, 1159 err = request_irq(adapter->msix_entries[vector].vector,
1160 &ixgbevf_msix_mbx, 0, adapter->name[vector], netdev); 1160 &ixgbevf_msix_mbx, 0, adapter->name[vector], netdev);
1161 if (err) { 1161 if (err) {
1162 hw_dbg(&adapter->hw, 1162 hw_dbg(&adapter->hw,
1163 "request_irq for msix_mbx failed: %d\n", err); 1163 "request_irq for msix_mbx failed: %d\n", err);
1164 goto free_queue_irqs; 1164 goto free_queue_irqs;
1165 } 1165 }
1166 1166
1167 return 0; 1167 return 0;
1168 1168
1169 free_queue_irqs: 1169 free_queue_irqs:
1170 for (i = vector - 1; i >= 0; i--) 1170 for (i = vector - 1; i >= 0; i--)
1171 free_irq(adapter->msix_entries[--vector].vector, 1171 free_irq(adapter->msix_entries[--vector].vector,
1172 &(adapter->q_vector[i])); 1172 &(adapter->q_vector[i]));
1173 pci_disable_msix(adapter->pdev); 1173 pci_disable_msix(adapter->pdev);
1174 kfree(adapter->msix_entries); 1174 kfree(adapter->msix_entries);
1175 adapter->msix_entries = NULL; 1175 adapter->msix_entries = NULL;
1176 return err; 1176 return err;
1177 } 1177 }
1178 1178
1179 static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter) 1179 static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
1180 { 1180 {
1181 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1181 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1182 1182
1183 for (i = 0; i < q_vectors; i++) { 1183 for (i = 0; i < q_vectors; i++) {
1184 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i]; 1184 struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
1185 bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES); 1185 bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
1186 bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES); 1186 bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
1187 q_vector->rxr_count = 0; 1187 q_vector->rxr_count = 0;
1188 q_vector->txr_count = 0; 1188 q_vector->txr_count = 0;
1189 q_vector->eitr = adapter->eitr_param; 1189 q_vector->eitr = adapter->eitr_param;
1190 } 1190 }
1191 } 1191 }
1192 1192
1193 /** 1193 /**
1194 * ixgbevf_request_irq - initialize interrupts 1194 * ixgbevf_request_irq - initialize interrupts
1195 * @adapter: board private structure 1195 * @adapter: board private structure
1196 * 1196 *
1197 * Attempts to configure interrupts using the best available 1197 * Attempts to configure interrupts using the best available
1198 * capabilities of the hardware and kernel. 1198 * capabilities of the hardware and kernel.
1199 **/ 1199 **/
1200 static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter) 1200 static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
1201 { 1201 {
1202 int err = 0; 1202 int err = 0;
1203 1203
1204 err = ixgbevf_request_msix_irqs(adapter); 1204 err = ixgbevf_request_msix_irqs(adapter);
1205 1205
1206 if (err) 1206 if (err)
1207 hw_dbg(&adapter->hw, 1207 hw_dbg(&adapter->hw,
1208 "request_irq failed, Error %d\n", err); 1208 "request_irq failed, Error %d\n", err);
1209 1209
1210 return err; 1210 return err;
1211 } 1211 }
1212 1212
1213 static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter) 1213 static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
1214 { 1214 {
1215 struct net_device *netdev = adapter->netdev; 1215 struct net_device *netdev = adapter->netdev;
1216 int i, q_vectors; 1216 int i, q_vectors;
1217 1217
1218 q_vectors = adapter->num_msix_vectors; 1218 q_vectors = adapter->num_msix_vectors;
1219 1219
1220 i = q_vectors - 1; 1220 i = q_vectors - 1;
1221 1221
1222 free_irq(adapter->msix_entries[i].vector, netdev); 1222 free_irq(adapter->msix_entries[i].vector, netdev);
1223 i--; 1223 i--;
1224 1224
1225 for (; i >= 0; i--) { 1225 for (; i >= 0; i--) {
1226 free_irq(adapter->msix_entries[i].vector, 1226 free_irq(adapter->msix_entries[i].vector,
1227 adapter->q_vector[i]); 1227 adapter->q_vector[i]);
1228 } 1228 }
1229 1229
1230 ixgbevf_reset_q_vectors(adapter); 1230 ixgbevf_reset_q_vectors(adapter);
1231 } 1231 }
1232 1232
1233 /** 1233 /**
1234 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC 1234 * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
1235 * @adapter: board private structure 1235 * @adapter: board private structure
1236 **/ 1236 **/
1237 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter) 1237 static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
1238 { 1238 {
1239 int i; 1239 int i;
1240 struct ixgbe_hw *hw = &adapter->hw; 1240 struct ixgbe_hw *hw = &adapter->hw;
1241 1241
1242 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0); 1242 IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
1243 1243
1244 IXGBE_WRITE_FLUSH(hw); 1244 IXGBE_WRITE_FLUSH(hw);
1245 1245
1246 for (i = 0; i < adapter->num_msix_vectors; i++) 1246 for (i = 0; i < adapter->num_msix_vectors; i++)
1247 synchronize_irq(adapter->msix_entries[i].vector); 1247 synchronize_irq(adapter->msix_entries[i].vector);
1248 } 1248 }
1249 1249
1250 /** 1250 /**
1251 * ixgbevf_irq_enable - Enable default interrupt generation settings 1251 * ixgbevf_irq_enable - Enable default interrupt generation settings
1252 * @adapter: board private structure 1252 * @adapter: board private structure
1253 **/ 1253 **/
1254 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter, 1254 static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter,
1255 bool queues, bool flush) 1255 bool queues, bool flush)
1256 { 1256 {
1257 struct ixgbe_hw *hw = &adapter->hw; 1257 struct ixgbe_hw *hw = &adapter->hw;
1258 u32 mask; 1258 u32 mask;
1259 u64 qmask; 1259 u64 qmask;
1260 1260
1261 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); 1261 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
1262 qmask = ~0; 1262 qmask = ~0;
1263 1263
1264 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); 1264 IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
1265 1265
1266 if (queues) 1266 if (queues)
1267 ixgbevf_irq_enable_queues(adapter, qmask); 1267 ixgbevf_irq_enable_queues(adapter, qmask);
1268 1268
1269 if (flush) 1269 if (flush)
1270 IXGBE_WRITE_FLUSH(hw); 1270 IXGBE_WRITE_FLUSH(hw);
1271 } 1271 }
1272 1272
1273 /** 1273 /**
1274 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset 1274 * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
1275 * @adapter: board private structure 1275 * @adapter: board private structure
1276 * 1276 *
1277 * Configure the Tx unit of the MAC after a reset. 1277 * Configure the Tx unit of the MAC after a reset.
1278 **/ 1278 **/
1279 static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter) 1279 static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
1280 { 1280 {
1281 u64 tdba; 1281 u64 tdba;
1282 struct ixgbe_hw *hw = &adapter->hw; 1282 struct ixgbe_hw *hw = &adapter->hw;
1283 u32 i, j, tdlen, txctrl; 1283 u32 i, j, tdlen, txctrl;
1284 1284
1285 /* Setup the HW Tx Head and Tail descriptor pointers */ 1285 /* Setup the HW Tx Head and Tail descriptor pointers */
1286 for (i = 0; i < adapter->num_tx_queues; i++) { 1286 for (i = 0; i < adapter->num_tx_queues; i++) {
1287 struct ixgbevf_ring *ring = &adapter->tx_ring[i]; 1287 struct ixgbevf_ring *ring = &adapter->tx_ring[i];
1288 j = ring->reg_idx; 1288 j = ring->reg_idx;
1289 tdba = ring->dma; 1289 tdba = ring->dma;
1290 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc); 1290 tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
1291 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j), 1291 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
1292 (tdba & DMA_BIT_MASK(32))); 1292 (tdba & DMA_BIT_MASK(32)));
1293 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32)); 1293 IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
1294 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen); 1294 IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen);
1295 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0); 1295 IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0);
1296 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0); 1296 IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0);
1297 adapter->tx_ring[i].head = IXGBE_VFTDH(j); 1297 adapter->tx_ring[i].head = IXGBE_VFTDH(j);
1298 adapter->tx_ring[i].tail = IXGBE_VFTDT(j); 1298 adapter->tx_ring[i].tail = IXGBE_VFTDT(j);
1299 /* Disable Tx Head Writeback RO bit, since this hoses 1299 /* Disable Tx Head Writeback RO bit, since this hoses
1300 * bookkeeping if things aren't delivered in order. 1300 * bookkeeping if things aren't delivered in order.
1301 */ 1301 */
1302 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j)); 1302 txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
1303 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN; 1303 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1304 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl); 1304 IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
1305 } 1305 }
1306 } 1306 }
1307 1307
1308 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 1308 #define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
1309 1309
1310 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index) 1310 static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
1311 { 1311 {
1312 struct ixgbevf_ring *rx_ring; 1312 struct ixgbevf_ring *rx_ring;
1313 struct ixgbe_hw *hw = &adapter->hw; 1313 struct ixgbe_hw *hw = &adapter->hw;
1314 u32 srrctl; 1314 u32 srrctl;
1315 1315
1316 rx_ring = &adapter->rx_ring[index]; 1316 rx_ring = &adapter->rx_ring[index];
1317 1317
1318 srrctl = IXGBE_SRRCTL_DROP_EN; 1318 srrctl = IXGBE_SRRCTL_DROP_EN;
1319 1319
1320 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 1320 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
1321 u16 bufsz = IXGBEVF_RXBUFFER_2048; 1321 u16 bufsz = IXGBEVF_RXBUFFER_2048;
1322 /* grow the amount we can receive on large page machines */ 1322 /* grow the amount we can receive on large page machines */
1323 if (bufsz < (PAGE_SIZE / 2)) 1323 if (bufsz < (PAGE_SIZE / 2))
1324 bufsz = (PAGE_SIZE / 2); 1324 bufsz = (PAGE_SIZE / 2);
1325 /* cap the bufsz at our largest descriptor size */ 1325 /* cap the bufsz at our largest descriptor size */
1326 bufsz = min((u16)IXGBEVF_MAX_RXBUFFER, bufsz); 1326 bufsz = min((u16)IXGBEVF_MAX_RXBUFFER, bufsz);
1327 1327
1328 srrctl |= bufsz >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1328 srrctl |= bufsz >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1329 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; 1329 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
1330 srrctl |= ((IXGBEVF_RX_HDR_SIZE << 1330 srrctl |= ((IXGBEVF_RX_HDR_SIZE <<
1331 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & 1331 IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
1332 IXGBE_SRRCTL_BSIZEHDR_MASK); 1332 IXGBE_SRRCTL_BSIZEHDR_MASK);
1333 } else { 1333 } else {
1334 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 1334 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
1335 1335
1336 if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE) 1336 if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
1337 srrctl |= IXGBEVF_RXBUFFER_2048 >> 1337 srrctl |= IXGBEVF_RXBUFFER_2048 >>
1338 IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1338 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1339 else 1339 else
1340 srrctl |= rx_ring->rx_buf_len >> 1340 srrctl |= rx_ring->rx_buf_len >>
1341 IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1341 IXGBE_SRRCTL_BSIZEPKT_SHIFT;
1342 } 1342 }
1343 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl); 1343 IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
1344 } 1344 }
1345 1345
1346 /** 1346 /**
1347 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset 1347 * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
1348 * @adapter: board private structure 1348 * @adapter: board private structure
1349 * 1349 *
1350 * Configure the Rx unit of the MAC after a reset. 1350 * Configure the Rx unit of the MAC after a reset.
1351 **/ 1351 **/
1352 static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) 1352 static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
1353 { 1353 {
1354 u64 rdba; 1354 u64 rdba;
1355 struct ixgbe_hw *hw = &adapter->hw; 1355 struct ixgbe_hw *hw = &adapter->hw;
1356 struct net_device *netdev = adapter->netdev; 1356 struct net_device *netdev = adapter->netdev;
1357 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 1357 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1358 int i, j; 1358 int i, j;
1359 u32 rdlen; 1359 u32 rdlen;
1360 int rx_buf_len; 1360 int rx_buf_len;
1361 1361
1362 /* Decide whether to use packet split mode or not */ 1362 /* Decide whether to use packet split mode or not */
1363 if (netdev->mtu > ETH_DATA_LEN) { 1363 if (netdev->mtu > ETH_DATA_LEN) {
1364 if (adapter->flags & IXGBE_FLAG_RX_PS_CAPABLE) 1364 if (adapter->flags & IXGBE_FLAG_RX_PS_CAPABLE)
1365 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; 1365 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
1366 else 1366 else
1367 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; 1367 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
1368 } else { 1368 } else {
1369 if (adapter->flags & IXGBE_FLAG_RX_1BUF_CAPABLE) 1369 if (adapter->flags & IXGBE_FLAG_RX_1BUF_CAPABLE)
1370 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; 1370 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
1371 else 1371 else
1372 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; 1372 adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
1373 } 1373 }
1374 1374
1375 /* Set the RX buffer length according to the mode */ 1375 /* Set the RX buffer length according to the mode */
1376 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 1376 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
1377 /* PSRTYPE must be initialized in 82599 */ 1377 /* PSRTYPE must be initialized in 82599 */
1378 u32 psrtype = IXGBE_PSRTYPE_TCPHDR | 1378 u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
1379 IXGBE_PSRTYPE_UDPHDR | 1379 IXGBE_PSRTYPE_UDPHDR |
1380 IXGBE_PSRTYPE_IPV4HDR | 1380 IXGBE_PSRTYPE_IPV4HDR |
1381 IXGBE_PSRTYPE_IPV6HDR | 1381 IXGBE_PSRTYPE_IPV6HDR |
1382 IXGBE_PSRTYPE_L2HDR; 1382 IXGBE_PSRTYPE_L2HDR;
1383 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype); 1383 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
1384 rx_buf_len = IXGBEVF_RX_HDR_SIZE; 1384 rx_buf_len = IXGBEVF_RX_HDR_SIZE;
1385 } else { 1385 } else {
1386 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0); 1386 IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
1387 if (netdev->mtu <= ETH_DATA_LEN) 1387 if (netdev->mtu <= ETH_DATA_LEN)
1388 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1388 rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1389 else 1389 else
1390 rx_buf_len = ALIGN(max_frame, 1024); 1390 rx_buf_len = ALIGN(max_frame, 1024);
1391 } 1391 }
1392 1392
1393 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc); 1393 rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
1394 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1394 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1395 * the Base and Length of the Rx Descriptor Ring */ 1395 * the Base and Length of the Rx Descriptor Ring */
1396 for (i = 0; i < adapter->num_rx_queues; i++) { 1396 for (i = 0; i < adapter->num_rx_queues; i++) {
1397 rdba = adapter->rx_ring[i].dma; 1397 rdba = adapter->rx_ring[i].dma;
1398 j = adapter->rx_ring[i].reg_idx; 1398 j = adapter->rx_ring[i].reg_idx;
1399 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j), 1399 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
1400 (rdba & DMA_BIT_MASK(32))); 1400 (rdba & DMA_BIT_MASK(32)));
1401 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32)); 1401 IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
1402 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen); 1402 IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen);
1403 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0); 1403 IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0);
1404 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0); 1404 IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
1405 adapter->rx_ring[i].head = IXGBE_VFRDH(j); 1405 adapter->rx_ring[i].head = IXGBE_VFRDH(j);
1406 adapter->rx_ring[i].tail = IXGBE_VFRDT(j); 1406 adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
1407 adapter->rx_ring[i].rx_buf_len = rx_buf_len; 1407 adapter->rx_ring[i].rx_buf_len = rx_buf_len;
1408 1408
1409 ixgbevf_configure_srrctl(adapter, j); 1409 ixgbevf_configure_srrctl(adapter, j);
1410 } 1410 }
1411 } 1411 }
1412 1412
1413 static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 1413 static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1414 { 1414 {
1415 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1415 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1416 struct ixgbe_hw *hw = &adapter->hw; 1416 struct ixgbe_hw *hw = &adapter->hw;
1417 1417
1418 /* add VID to filter table */ 1418 /* add VID to filter table */
1419 if (hw->mac.ops.set_vfta) 1419 if (hw->mac.ops.set_vfta)
1420 hw->mac.ops.set_vfta(hw, vid, 0, true); 1420 hw->mac.ops.set_vfta(hw, vid, 0, true);
1421 set_bit(vid, adapter->active_vlans); 1421 set_bit(vid, adapter->active_vlans);
1422 1422
1423 return 0; 1423 return 0;
1424 } 1424 }
1425 1425
1426 static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 1426 static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1427 { 1427 {
1428 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1428 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1429 struct ixgbe_hw *hw = &adapter->hw; 1429 struct ixgbe_hw *hw = &adapter->hw;
1430 1430
1431 /* remove VID from filter table */ 1431 /* remove VID from filter table */
1432 if (hw->mac.ops.set_vfta) 1432 if (hw->mac.ops.set_vfta)
1433 hw->mac.ops.set_vfta(hw, vid, 0, false); 1433 hw->mac.ops.set_vfta(hw, vid, 0, false);
1434 clear_bit(vid, adapter->active_vlans); 1434 clear_bit(vid, adapter->active_vlans);
1435 1435
1436 return 0; 1436 return 0;
1437 } 1437 }
1438 1438
1439 static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter) 1439 static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1440 { 1440 {
1441 u16 vid; 1441 u16 vid;
1442 1442
1443 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 1443 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
1444 ixgbevf_vlan_rx_add_vid(adapter->netdev, vid); 1444 ixgbevf_vlan_rx_add_vid(adapter->netdev, vid);
1445 } 1445 }
1446 1446
1447 static int ixgbevf_write_uc_addr_list(struct net_device *netdev) 1447 static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
1448 { 1448 {
1449 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1449 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1450 struct ixgbe_hw *hw = &adapter->hw; 1450 struct ixgbe_hw *hw = &adapter->hw;
1451 int count = 0; 1451 int count = 0;
1452 1452
1453 if ((netdev_uc_count(netdev)) > 10) { 1453 if ((netdev_uc_count(netdev)) > 10) {
1454 pr_err("Too many unicast filters - No Space\n"); 1454 pr_err("Too many unicast filters - No Space\n");
1455 return -ENOSPC; 1455 return -ENOSPC;
1456 } 1456 }
1457 1457
1458 if (!netdev_uc_empty(netdev)) { 1458 if (!netdev_uc_empty(netdev)) {
1459 struct netdev_hw_addr *ha; 1459 struct netdev_hw_addr *ha;
1460 netdev_for_each_uc_addr(ha, netdev) { 1460 netdev_for_each_uc_addr(ha, netdev) {
1461 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr); 1461 hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
1462 udelay(200); 1462 udelay(200);
1463 } 1463 }
1464 } else { 1464 } else {
1465 /* 1465 /*
1466 * If the list is empty then send message to PF driver to 1466 * If the list is empty then send message to PF driver to
1467 * clear all macvlans on this VF. 1467 * clear all macvlans on this VF.
1468 */ 1468 */
1469 hw->mac.ops.set_uc_addr(hw, 0, NULL); 1469 hw->mac.ops.set_uc_addr(hw, 0, NULL);
1470 } 1470 }
1471 1471
1472 return count; 1472 return count;
1473 } 1473 }
1474 1474
1475 /** 1475 /**
1476 * ixgbevf_set_rx_mode - Multicast set 1476 * ixgbevf_set_rx_mode - Multicast set
1477 * @netdev: network interface device structure 1477 * @netdev: network interface device structure
1478 * 1478 *
1479 * The set_rx_method entry point is called whenever the multicast address 1479 * The set_rx_method entry point is called whenever the multicast address
1480 * list or the network interface flags are updated. This routine is 1480 * list or the network interface flags are updated. This routine is
1481 * responsible for configuring the hardware for proper multicast mode. 1481 * responsible for configuring the hardware for proper multicast mode.
1482 **/ 1482 **/
1483 static void ixgbevf_set_rx_mode(struct net_device *netdev) 1483 static void ixgbevf_set_rx_mode(struct net_device *netdev)
1484 { 1484 {
1485 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1485 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1486 struct ixgbe_hw *hw = &adapter->hw; 1486 struct ixgbe_hw *hw = &adapter->hw;
1487 1487
1488 /* reprogram multicast list */ 1488 /* reprogram multicast list */
1489 if (hw->mac.ops.update_mc_addr_list) 1489 if (hw->mac.ops.update_mc_addr_list)
1490 hw->mac.ops.update_mc_addr_list(hw, netdev); 1490 hw->mac.ops.update_mc_addr_list(hw, netdev);
1491 1491
1492 ixgbevf_write_uc_addr_list(netdev); 1492 ixgbevf_write_uc_addr_list(netdev);
1493 } 1493 }
1494 1494
1495 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) 1495 static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
1496 { 1496 {
1497 int q_idx; 1497 int q_idx;
1498 struct ixgbevf_q_vector *q_vector; 1498 struct ixgbevf_q_vector *q_vector;
1499 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1499 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1500 1500
1501 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 1501 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1502 struct napi_struct *napi; 1502 struct napi_struct *napi;
1503 q_vector = adapter->q_vector[q_idx]; 1503 q_vector = adapter->q_vector[q_idx];
1504 if (!q_vector->rxr_count) 1504 if (!q_vector->rxr_count)
1505 continue; 1505 continue;
1506 napi = &q_vector->napi; 1506 napi = &q_vector->napi;
1507 if (q_vector->rxr_count > 1) 1507 if (q_vector->rxr_count > 1)
1508 napi->poll = &ixgbevf_clean_rxonly_many; 1508 napi->poll = &ixgbevf_clean_rxonly_many;
1509 1509
1510 napi_enable(napi); 1510 napi_enable(napi);
1511 } 1511 }
1512 } 1512 }
1513 1513
1514 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter) 1514 static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
1515 { 1515 {
1516 int q_idx; 1516 int q_idx;
1517 struct ixgbevf_q_vector *q_vector; 1517 struct ixgbevf_q_vector *q_vector;
1518 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1518 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1519 1519
1520 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 1520 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1521 q_vector = adapter->q_vector[q_idx]; 1521 q_vector = adapter->q_vector[q_idx];
1522 if (!q_vector->rxr_count) 1522 if (!q_vector->rxr_count)
1523 continue; 1523 continue;
1524 napi_disable(&q_vector->napi); 1524 napi_disable(&q_vector->napi);
1525 } 1525 }
1526 } 1526 }
1527 1527
1528 static void ixgbevf_configure(struct ixgbevf_adapter *adapter) 1528 static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
1529 { 1529 {
1530 struct net_device *netdev = adapter->netdev; 1530 struct net_device *netdev = adapter->netdev;
1531 int i; 1531 int i;
1532 1532
1533 ixgbevf_set_rx_mode(netdev); 1533 ixgbevf_set_rx_mode(netdev);
1534 1534
1535 ixgbevf_restore_vlan(adapter); 1535 ixgbevf_restore_vlan(adapter);
1536 1536
1537 ixgbevf_configure_tx(adapter); 1537 ixgbevf_configure_tx(adapter);
1538 ixgbevf_configure_rx(adapter); 1538 ixgbevf_configure_rx(adapter);
1539 for (i = 0; i < adapter->num_rx_queues; i++) { 1539 for (i = 0; i < adapter->num_rx_queues; i++) {
1540 struct ixgbevf_ring *ring = &adapter->rx_ring[i]; 1540 struct ixgbevf_ring *ring = &adapter->rx_ring[i];
1541 ixgbevf_alloc_rx_buffers(adapter, ring, ring->count); 1541 ixgbevf_alloc_rx_buffers(adapter, ring, ring->count);
1542 ring->next_to_use = ring->count - 1; 1542 ring->next_to_use = ring->count - 1;
1543 writel(ring->next_to_use, adapter->hw.hw_addr + ring->tail); 1543 writel(ring->next_to_use, adapter->hw.hw_addr + ring->tail);
1544 } 1544 }
1545 } 1545 }
1546 1546
1547 #define IXGBE_MAX_RX_DESC_POLL 10 1547 #define IXGBE_MAX_RX_DESC_POLL 10
1548 static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter, 1548 static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
1549 int rxr) 1549 int rxr)
1550 { 1550 {
1551 struct ixgbe_hw *hw = &adapter->hw; 1551 struct ixgbe_hw *hw = &adapter->hw;
1552 int j = adapter->rx_ring[rxr].reg_idx; 1552 int j = adapter->rx_ring[rxr].reg_idx;
1553 int k; 1553 int k;
1554 1554
1555 for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) { 1555 for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
1556 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE) 1556 if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
1557 break; 1557 break;
1558 else 1558 else
1559 msleep(1); 1559 msleep(1);
1560 } 1560 }
1561 if (k >= IXGBE_MAX_RX_DESC_POLL) { 1561 if (k >= IXGBE_MAX_RX_DESC_POLL) {
1562 hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d " 1562 hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d "
1563 "not set within the polling period\n", rxr); 1563 "not set within the polling period\n", rxr);
1564 } 1564 }
1565 1565
1566 ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr], 1566 ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
1567 (adapter->rx_ring[rxr].count - 1)); 1567 (adapter->rx_ring[rxr].count - 1));
1568 } 1568 }
1569 1569
1570 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter) 1570 static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
1571 { 1571 {
1572 /* Only save pre-reset stats if there are some */ 1572 /* Only save pre-reset stats if there are some */
1573 if (adapter->stats.vfgprc || adapter->stats.vfgptc) { 1573 if (adapter->stats.vfgprc || adapter->stats.vfgptc) {
1574 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc - 1574 adapter->stats.saved_reset_vfgprc += adapter->stats.vfgprc -
1575 adapter->stats.base_vfgprc; 1575 adapter->stats.base_vfgprc;
1576 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc - 1576 adapter->stats.saved_reset_vfgptc += adapter->stats.vfgptc -
1577 adapter->stats.base_vfgptc; 1577 adapter->stats.base_vfgptc;
1578 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc - 1578 adapter->stats.saved_reset_vfgorc += adapter->stats.vfgorc -
1579 adapter->stats.base_vfgorc; 1579 adapter->stats.base_vfgorc;
1580 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc - 1580 adapter->stats.saved_reset_vfgotc += adapter->stats.vfgotc -
1581 adapter->stats.base_vfgotc; 1581 adapter->stats.base_vfgotc;
1582 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc - 1582 adapter->stats.saved_reset_vfmprc += adapter->stats.vfmprc -
1583 adapter->stats.base_vfmprc; 1583 adapter->stats.base_vfmprc;
1584 } 1584 }
1585 } 1585 }
1586 1586
1587 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter) 1587 static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
1588 { 1588 {
1589 struct ixgbe_hw *hw = &adapter->hw; 1589 struct ixgbe_hw *hw = &adapter->hw;
1590 1590
1591 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC); 1591 adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
1592 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB); 1592 adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
1593 adapter->stats.last_vfgorc |= 1593 adapter->stats.last_vfgorc |=
1594 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32); 1594 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
1595 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC); 1595 adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
1596 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB); 1596 adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
1597 adapter->stats.last_vfgotc |= 1597 adapter->stats.last_vfgotc |=
1598 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32); 1598 (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
1599 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC); 1599 adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
1600 1600
1601 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc; 1601 adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
1602 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc; 1602 adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
1603 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc; 1603 adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
1604 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc; 1604 adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
1605 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc; 1605 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
1606 } 1606 }
1607 1607
1608 static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter) 1608 static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1609 { 1609 {
1610 struct net_device *netdev = adapter->netdev; 1610 struct net_device *netdev = adapter->netdev;
1611 struct ixgbe_hw *hw = &adapter->hw; 1611 struct ixgbe_hw *hw = &adapter->hw;
1612 int i, j = 0; 1612 int i, j = 0;
1613 int num_rx_rings = adapter->num_rx_queues; 1613 int num_rx_rings = adapter->num_rx_queues;
1614 u32 txdctl, rxdctl; 1614 u32 txdctl, rxdctl;
1615 1615
1616 for (i = 0; i < adapter->num_tx_queues; i++) { 1616 for (i = 0; i < adapter->num_tx_queues; i++) {
1617 j = adapter->tx_ring[i].reg_idx; 1617 j = adapter->tx_ring[i].reg_idx;
1618 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1618 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1619 /* enable WTHRESH=8 descriptors, to encourage burst writeback */ 1619 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
1620 txdctl |= (8 << 16); 1620 txdctl |= (8 << 16);
1621 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); 1621 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1622 } 1622 }
1623 1623
1624 for (i = 0; i < adapter->num_tx_queues; i++) { 1624 for (i = 0; i < adapter->num_tx_queues; i++) {
1625 j = adapter->tx_ring[i].reg_idx; 1625 j = adapter->tx_ring[i].reg_idx;
1626 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1626 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1627 txdctl |= IXGBE_TXDCTL_ENABLE; 1627 txdctl |= IXGBE_TXDCTL_ENABLE;
1628 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl); 1628 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
1629 } 1629 }
1630 1630
1631 for (i = 0; i < num_rx_rings; i++) { 1631 for (i = 0; i < num_rx_rings; i++) {
1632 j = adapter->rx_ring[i].reg_idx; 1632 j = adapter->rx_ring[i].reg_idx;
1633 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)); 1633 rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
1634 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME; 1634 rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
1635 if (hw->mac.type == ixgbe_mac_X540_vf) { 1635 if (hw->mac.type == ixgbe_mac_X540_vf) {
1636 rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK; 1636 rxdctl &= ~IXGBE_RXDCTL_RLPMLMASK;
1637 rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) | 1637 rxdctl |= ((netdev->mtu + ETH_HLEN + ETH_FCS_LEN) |
1638 IXGBE_RXDCTL_RLPML_EN); 1638 IXGBE_RXDCTL_RLPML_EN);
1639 } 1639 }
1640 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl); 1640 IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
1641 ixgbevf_rx_desc_queue_enable(adapter, i); 1641 ixgbevf_rx_desc_queue_enable(adapter, i);
1642 } 1642 }
1643 1643
1644 ixgbevf_configure_msix(adapter); 1644 ixgbevf_configure_msix(adapter);
1645 1645
1646 if (hw->mac.ops.set_rar) { 1646 if (hw->mac.ops.set_rar) {
1647 if (is_valid_ether_addr(hw->mac.addr)) 1647 if (is_valid_ether_addr(hw->mac.addr))
1648 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); 1648 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
1649 else 1649 else
1650 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0); 1650 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
1651 } 1651 }
1652 1652
1653 clear_bit(__IXGBEVF_DOWN, &adapter->state); 1653 clear_bit(__IXGBEVF_DOWN, &adapter->state);
1654 ixgbevf_napi_enable_all(adapter); 1654 ixgbevf_napi_enable_all(adapter);
1655 1655
1656 /* enable transmits */ 1656 /* enable transmits */
1657 netif_tx_start_all_queues(netdev); 1657 netif_tx_start_all_queues(netdev);
1658 1658
1659 ixgbevf_save_reset_stats(adapter); 1659 ixgbevf_save_reset_stats(adapter);
1660 ixgbevf_init_last_counter_stats(adapter); 1660 ixgbevf_init_last_counter_stats(adapter);
1661 1661
1662 /* bring the link up in the watchdog, this could race with our first 1662 /* bring the link up in the watchdog, this could race with our first
1663 * link up interrupt but shouldn't be a problem */ 1663 * link up interrupt but shouldn't be a problem */
1664 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 1664 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
1665 adapter->link_check_timeout = jiffies; 1665 adapter->link_check_timeout = jiffies;
1666 mod_timer(&adapter->watchdog_timer, jiffies); 1666 mod_timer(&adapter->watchdog_timer, jiffies);
1667 return 0; 1667 return 0;
1668 } 1668 }
1669 1669
1670 int ixgbevf_up(struct ixgbevf_adapter *adapter) 1670 int ixgbevf_up(struct ixgbevf_adapter *adapter)
1671 { 1671 {
1672 int err; 1672 int err;
1673 struct ixgbe_hw *hw = &adapter->hw; 1673 struct ixgbe_hw *hw = &adapter->hw;
1674 1674
1675 ixgbevf_configure(adapter); 1675 ixgbevf_configure(adapter);
1676 1676
1677 err = ixgbevf_up_complete(adapter); 1677 err = ixgbevf_up_complete(adapter);
1678 1678
1679 /* clear any pending interrupts, may auto mask */ 1679 /* clear any pending interrupts, may auto mask */
1680 IXGBE_READ_REG(hw, IXGBE_VTEICR); 1680 IXGBE_READ_REG(hw, IXGBE_VTEICR);
1681 1681
1682 ixgbevf_irq_enable(adapter, true, true); 1682 ixgbevf_irq_enable(adapter, true, true);
1683 1683
1684 return err; 1684 return err;
1685 } 1685 }
1686 1686
1687 /** 1687 /**
1688 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue 1688 * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
1689 * @adapter: board private structure 1689 * @adapter: board private structure
1690 * @rx_ring: ring to free buffers from 1690 * @rx_ring: ring to free buffers from
1691 **/ 1691 **/
1692 static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter, 1692 static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
1693 struct ixgbevf_ring *rx_ring) 1693 struct ixgbevf_ring *rx_ring)
1694 { 1694 {
1695 struct pci_dev *pdev = adapter->pdev; 1695 struct pci_dev *pdev = adapter->pdev;
1696 unsigned long size; 1696 unsigned long size;
1697 unsigned int i; 1697 unsigned int i;
1698 1698
1699 if (!rx_ring->rx_buffer_info) 1699 if (!rx_ring->rx_buffer_info)
1700 return; 1700 return;
1701 1701
1702 /* Free all the Rx ring sk_buffs */ 1702 /* Free all the Rx ring sk_buffs */
1703 for (i = 0; i < rx_ring->count; i++) { 1703 for (i = 0; i < rx_ring->count; i++) {
1704 struct ixgbevf_rx_buffer *rx_buffer_info; 1704 struct ixgbevf_rx_buffer *rx_buffer_info;
1705 1705
1706 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 1706 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1707 if (rx_buffer_info->dma) { 1707 if (rx_buffer_info->dma) {
1708 dma_unmap_single(&pdev->dev, rx_buffer_info->dma, 1708 dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
1709 rx_ring->rx_buf_len, 1709 rx_ring->rx_buf_len,
1710 DMA_FROM_DEVICE); 1710 DMA_FROM_DEVICE);
1711 rx_buffer_info->dma = 0; 1711 rx_buffer_info->dma = 0;
1712 } 1712 }
1713 if (rx_buffer_info->skb) { 1713 if (rx_buffer_info->skb) {
1714 struct sk_buff *skb = rx_buffer_info->skb; 1714 struct sk_buff *skb = rx_buffer_info->skb;
1715 rx_buffer_info->skb = NULL; 1715 rx_buffer_info->skb = NULL;
1716 do { 1716 do {
1717 struct sk_buff *this = skb; 1717 struct sk_buff *this = skb;
1718 skb = skb->prev; 1718 skb = skb->prev;
1719 dev_kfree_skb(this); 1719 dev_kfree_skb(this);
1720 } while (skb); 1720 } while (skb);
1721 } 1721 }
1722 if (!rx_buffer_info->page) 1722 if (!rx_buffer_info->page)
1723 continue; 1723 continue;
1724 dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma, 1724 dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
1725 PAGE_SIZE / 2, DMA_FROM_DEVICE); 1725 PAGE_SIZE / 2, DMA_FROM_DEVICE);
1726 rx_buffer_info->page_dma = 0; 1726 rx_buffer_info->page_dma = 0;
1727 put_page(rx_buffer_info->page); 1727 put_page(rx_buffer_info->page);
1728 rx_buffer_info->page = NULL; 1728 rx_buffer_info->page = NULL;
1729 rx_buffer_info->page_offset = 0; 1729 rx_buffer_info->page_offset = 0;
1730 } 1730 }
1731 1731
1732 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; 1732 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
1733 memset(rx_ring->rx_buffer_info, 0, size); 1733 memset(rx_ring->rx_buffer_info, 0, size);
1734 1734
1735 /* Zero out the descriptor ring */ 1735 /* Zero out the descriptor ring */
1736 memset(rx_ring->desc, 0, rx_ring->size); 1736 memset(rx_ring->desc, 0, rx_ring->size);
1737 1737
1738 rx_ring->next_to_clean = 0; 1738 rx_ring->next_to_clean = 0;
1739 rx_ring->next_to_use = 0; 1739 rx_ring->next_to_use = 0;
1740 1740
1741 if (rx_ring->head) 1741 if (rx_ring->head)
1742 writel(0, adapter->hw.hw_addr + rx_ring->head); 1742 writel(0, adapter->hw.hw_addr + rx_ring->head);
1743 if (rx_ring->tail) 1743 if (rx_ring->tail)
1744 writel(0, adapter->hw.hw_addr + rx_ring->tail); 1744 writel(0, adapter->hw.hw_addr + rx_ring->tail);
1745 } 1745 }
1746 1746
1747 /** 1747 /**
1748 * ixgbevf_clean_tx_ring - Free Tx Buffers 1748 * ixgbevf_clean_tx_ring - Free Tx Buffers
1749 * @adapter: board private structure 1749 * @adapter: board private structure
1750 * @tx_ring: ring to be cleaned 1750 * @tx_ring: ring to be cleaned
1751 **/ 1751 **/
1752 static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter, 1752 static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
1753 struct ixgbevf_ring *tx_ring) 1753 struct ixgbevf_ring *tx_ring)
1754 { 1754 {
1755 struct ixgbevf_tx_buffer *tx_buffer_info; 1755 struct ixgbevf_tx_buffer *tx_buffer_info;
1756 unsigned long size; 1756 unsigned long size;
1757 unsigned int i; 1757 unsigned int i;
1758 1758
1759 if (!tx_ring->tx_buffer_info) 1759 if (!tx_ring->tx_buffer_info)
1760 return; 1760 return;
1761 1761
1762 /* Free all the Tx ring sk_buffs */ 1762 /* Free all the Tx ring sk_buffs */
1763 1763
1764 for (i = 0; i < tx_ring->count; i++) { 1764 for (i = 0; i < tx_ring->count; i++) {
1765 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 1765 tx_buffer_info = &tx_ring->tx_buffer_info[i];
1766 ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info); 1766 ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info);
1767 } 1767 }
1768 1768
1769 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; 1769 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
1770 memset(tx_ring->tx_buffer_info, 0, size); 1770 memset(tx_ring->tx_buffer_info, 0, size);
1771 1771
1772 memset(tx_ring->desc, 0, tx_ring->size); 1772 memset(tx_ring->desc, 0, tx_ring->size);
1773 1773
1774 tx_ring->next_to_use = 0; 1774 tx_ring->next_to_use = 0;
1775 tx_ring->next_to_clean = 0; 1775 tx_ring->next_to_clean = 0;
1776 1776
1777 if (tx_ring->head) 1777 if (tx_ring->head)
1778 writel(0, adapter->hw.hw_addr + tx_ring->head); 1778 writel(0, adapter->hw.hw_addr + tx_ring->head);
1779 if (tx_ring->tail) 1779 if (tx_ring->tail)
1780 writel(0, adapter->hw.hw_addr + tx_ring->tail); 1780 writel(0, adapter->hw.hw_addr + tx_ring->tail);
1781 } 1781 }
1782 1782
1783 /** 1783 /**
1784 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues 1784 * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
1785 * @adapter: board private structure 1785 * @adapter: board private structure
1786 **/ 1786 **/
1787 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter) 1787 static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
1788 { 1788 {
1789 int i; 1789 int i;
1790 1790
1791 for (i = 0; i < adapter->num_rx_queues; i++) 1791 for (i = 0; i < adapter->num_rx_queues; i++)
1792 ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]); 1792 ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]);
1793 } 1793 }
1794 1794
1795 /** 1795 /**
1796 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues 1796 * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
1797 * @adapter: board private structure 1797 * @adapter: board private structure
1798 **/ 1798 **/
1799 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter) 1799 static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
1800 { 1800 {
1801 int i; 1801 int i;
1802 1802
1803 for (i = 0; i < adapter->num_tx_queues; i++) 1803 for (i = 0; i < adapter->num_tx_queues; i++)
1804 ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]); 1804 ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]);
1805 } 1805 }
1806 1806
1807 void ixgbevf_down(struct ixgbevf_adapter *adapter) 1807 void ixgbevf_down(struct ixgbevf_adapter *adapter)
1808 { 1808 {
1809 struct net_device *netdev = adapter->netdev; 1809 struct net_device *netdev = adapter->netdev;
1810 struct ixgbe_hw *hw = &adapter->hw; 1810 struct ixgbe_hw *hw = &adapter->hw;
1811 u32 txdctl; 1811 u32 txdctl;
1812 int i, j; 1812 int i, j;
1813 1813
1814 /* signal that we are down to the interrupt handler */ 1814 /* signal that we are down to the interrupt handler */
1815 set_bit(__IXGBEVF_DOWN, &adapter->state); 1815 set_bit(__IXGBEVF_DOWN, &adapter->state);
1816 /* disable receives */ 1816 /* disable receives */
1817 1817
1818 netif_tx_disable(netdev); 1818 netif_tx_disable(netdev);
1819 1819
1820 msleep(10); 1820 msleep(10);
1821 1821
1822 netif_tx_stop_all_queues(netdev); 1822 netif_tx_stop_all_queues(netdev);
1823 1823
1824 ixgbevf_irq_disable(adapter); 1824 ixgbevf_irq_disable(adapter);
1825 1825
1826 ixgbevf_napi_disable_all(adapter); 1826 ixgbevf_napi_disable_all(adapter);
1827 1827
1828 del_timer_sync(&adapter->watchdog_timer); 1828 del_timer_sync(&adapter->watchdog_timer);
1829 /* can't call flush scheduled work here because it can deadlock 1829 /* can't call flush scheduled work here because it can deadlock
1830 * if linkwatch_event tries to acquire the rtnl_lock which we are 1830 * if linkwatch_event tries to acquire the rtnl_lock which we are
1831 * holding */ 1831 * holding */
1832 while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK) 1832 while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
1833 msleep(1); 1833 msleep(1);
1834 1834
1835 /* disable transmits in the hardware now that interrupts are off */ 1835 /* disable transmits in the hardware now that interrupts are off */
1836 for (i = 0; i < adapter->num_tx_queues; i++) { 1836 for (i = 0; i < adapter->num_tx_queues; i++) {
1837 j = adapter->tx_ring[i].reg_idx; 1837 j = adapter->tx_ring[i].reg_idx;
1838 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j)); 1838 txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
1839 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), 1839 IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j),
1840 (txdctl & ~IXGBE_TXDCTL_ENABLE)); 1840 (txdctl & ~IXGBE_TXDCTL_ENABLE));
1841 } 1841 }
1842 1842
1843 netif_carrier_off(netdev); 1843 netif_carrier_off(netdev);
1844 1844
1845 if (!pci_channel_offline(adapter->pdev)) 1845 if (!pci_channel_offline(adapter->pdev))
1846 ixgbevf_reset(adapter); 1846 ixgbevf_reset(adapter);
1847 1847
1848 ixgbevf_clean_all_tx_rings(adapter); 1848 ixgbevf_clean_all_tx_rings(adapter);
1849 ixgbevf_clean_all_rx_rings(adapter); 1849 ixgbevf_clean_all_rx_rings(adapter);
1850 } 1850 }
1851 1851
1852 void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter) 1852 void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
1853 { 1853 {
1854 struct ixgbe_hw *hw = &adapter->hw; 1854 struct ixgbe_hw *hw = &adapter->hw;
1855 1855
1856 WARN_ON(in_interrupt()); 1856 WARN_ON(in_interrupt());
1857 1857
1858 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) 1858 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
1859 msleep(1); 1859 msleep(1);
1860 1860
1861 /* 1861 /*
1862 * Check if PF is up before re-init. If not then skip until 1862 * Check if PF is up before re-init. If not then skip until
1863 * later when the PF is up and ready to service requests from 1863 * later when the PF is up and ready to service requests from
1864 * the VF via mailbox. If the VF is up and running then the 1864 * the VF via mailbox. If the VF is up and running then the
1865 * watchdog task will continue to schedule reset tasks until 1865 * watchdog task will continue to schedule reset tasks until
1866 * the PF is up and running. 1866 * the PF is up and running.
1867 */ 1867 */
1868 if (!hw->mac.ops.reset_hw(hw)) { 1868 if (!hw->mac.ops.reset_hw(hw)) {
1869 ixgbevf_down(adapter); 1869 ixgbevf_down(adapter);
1870 ixgbevf_up(adapter); 1870 ixgbevf_up(adapter);
1871 } 1871 }
1872 1872
1873 clear_bit(__IXGBEVF_RESETTING, &adapter->state); 1873 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
1874 } 1874 }
1875 1875
1876 void ixgbevf_reset(struct ixgbevf_adapter *adapter) 1876 void ixgbevf_reset(struct ixgbevf_adapter *adapter)
1877 { 1877 {
1878 struct ixgbe_hw *hw = &adapter->hw; 1878 struct ixgbe_hw *hw = &adapter->hw;
1879 struct net_device *netdev = adapter->netdev; 1879 struct net_device *netdev = adapter->netdev;
1880 1880
1881 if (hw->mac.ops.reset_hw(hw)) 1881 if (hw->mac.ops.reset_hw(hw))
1882 hw_dbg(hw, "PF still resetting\n"); 1882 hw_dbg(hw, "PF still resetting\n");
1883 else 1883 else
1884 hw->mac.ops.init_hw(hw); 1884 hw->mac.ops.init_hw(hw);
1885 1885
1886 if (is_valid_ether_addr(adapter->hw.mac.addr)) { 1886 if (is_valid_ether_addr(adapter->hw.mac.addr)) {
1887 memcpy(netdev->dev_addr, adapter->hw.mac.addr, 1887 memcpy(netdev->dev_addr, adapter->hw.mac.addr,
1888 netdev->addr_len); 1888 netdev->addr_len);
1889 memcpy(netdev->perm_addr, adapter->hw.mac.addr, 1889 memcpy(netdev->perm_addr, adapter->hw.mac.addr,
1890 netdev->addr_len); 1890 netdev->addr_len);
1891 } 1891 }
1892 } 1892 }
1893 1893
1894 static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter, 1894 static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
1895 int vectors) 1895 int vectors)
1896 { 1896 {
1897 int err, vector_threshold; 1897 int err, vector_threshold;
1898 1898
1899 /* We'll want at least 3 (vector_threshold): 1899 /* We'll want at least 3 (vector_threshold):
1900 * 1) TxQ[0] Cleanup 1900 * 1) TxQ[0] Cleanup
1901 * 2) RxQ[0] Cleanup 1901 * 2) RxQ[0] Cleanup
1902 * 3) Other (Link Status Change, etc.) 1902 * 3) Other (Link Status Change, etc.)
1903 */ 1903 */
1904 vector_threshold = MIN_MSIX_COUNT; 1904 vector_threshold = MIN_MSIX_COUNT;
1905 1905
1906 /* The more we get, the more we will assign to Tx/Rx Cleanup 1906 /* The more we get, the more we will assign to Tx/Rx Cleanup
1907 * for the separate queues...where Rx Cleanup >= Tx Cleanup. 1907 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1908 * Right now, we simply care about how many we'll get; we'll 1908 * Right now, we simply care about how many we'll get; we'll
1909 * set them up later while requesting irq's. 1909 * set them up later while requesting irq's.
1910 */ 1910 */
1911 while (vectors >= vector_threshold) { 1911 while (vectors >= vector_threshold) {
1912 err = pci_enable_msix(adapter->pdev, adapter->msix_entries, 1912 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1913 vectors); 1913 vectors);
1914 if (!err) /* Success in acquiring all requested vectors. */ 1914 if (!err) /* Success in acquiring all requested vectors. */
1915 break; 1915 break;
1916 else if (err < 0) 1916 else if (err < 0)
1917 vectors = 0; /* Nasty failure, quit now */ 1917 vectors = 0; /* Nasty failure, quit now */
1918 else /* err == number of vectors we should try again with */ 1918 else /* err == number of vectors we should try again with */
1919 vectors = err; 1919 vectors = err;
1920 } 1920 }
1921 1921
1922 if (vectors < vector_threshold) { 1922 if (vectors < vector_threshold) {
1923 /* Can't allocate enough MSI-X interrupts? Oh well. 1923 /* Can't allocate enough MSI-X interrupts? Oh well.
1924 * This just means we'll go with either a single MSI 1924 * This just means we'll go with either a single MSI
1925 * vector or fall back to legacy interrupts. 1925 * vector or fall back to legacy interrupts.
1926 */ 1926 */
1927 hw_dbg(&adapter->hw, 1927 hw_dbg(&adapter->hw,
1928 "Unable to allocate MSI-X interrupts\n"); 1928 "Unable to allocate MSI-X interrupts\n");
1929 kfree(adapter->msix_entries); 1929 kfree(adapter->msix_entries);
1930 adapter->msix_entries = NULL; 1930 adapter->msix_entries = NULL;
1931 } else { 1931 } else {
1932 /* 1932 /*
1933 * Adjust for only the vectors we'll use, which is minimum 1933 * Adjust for only the vectors we'll use, which is minimum
1934 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of 1934 * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
1935 * vectors we were allocated. 1935 * vectors we were allocated.
1936 */ 1936 */
1937 adapter->num_msix_vectors = vectors; 1937 adapter->num_msix_vectors = vectors;
1938 } 1938 }
1939 } 1939 }
1940 1940
1941 /* 1941 /*
1942 * ixgbevf_set_num_queues: Allocate queues for device, feature dependent 1942 * ixgbevf_set_num_queues: Allocate queues for device, feature dependent
1943 * @adapter: board private structure to initialize 1943 * @adapter: board private structure to initialize
1944 * 1944 *
1945 * This is the top level queue allocation routine. The order here is very 1945 * This is the top level queue allocation routine. The order here is very
1946 * important, starting with the "most" number of features turned on at once, 1946 * important, starting with the "most" number of features turned on at once,
1947 * and ending with the smallest set of features. This way large combinations 1947 * and ending with the smallest set of features. This way large combinations
1948 * can be allocated if they're turned on, and smaller combinations are the 1948 * can be allocated if they're turned on, and smaller combinations are the
1949 * fallthrough conditions. 1949 * fallthrough conditions.
1950 * 1950 *
1951 **/ 1951 **/
1952 static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter) 1952 static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
1953 { 1953 {
1954 /* Start with base case */ 1954 /* Start with base case */
1955 adapter->num_rx_queues = 1; 1955 adapter->num_rx_queues = 1;
1956 adapter->num_tx_queues = 1; 1956 adapter->num_tx_queues = 1;
1957 adapter->num_rx_pools = adapter->num_rx_queues; 1957 adapter->num_rx_pools = adapter->num_rx_queues;
1958 adapter->num_rx_queues_per_pool = 1; 1958 adapter->num_rx_queues_per_pool = 1;
1959 } 1959 }
1960 1960
1961 /** 1961 /**
1962 * ixgbevf_alloc_queues - Allocate memory for all rings 1962 * ixgbevf_alloc_queues - Allocate memory for all rings
1963 * @adapter: board private structure to initialize 1963 * @adapter: board private structure to initialize
1964 * 1964 *
1965 * We allocate one ring per queue at run-time since we don't know the 1965 * We allocate one ring per queue at run-time since we don't know the
1966 * number of queues at compile-time. The polling_netdev array is 1966 * number of queues at compile-time. The polling_netdev array is
1967 * intended for Multiqueue, but should work fine with a single queue. 1967 * intended for Multiqueue, but should work fine with a single queue.
1968 **/ 1968 **/
1969 static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter) 1969 static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
1970 { 1970 {
1971 int i; 1971 int i;
1972 1972
1973 adapter->tx_ring = kcalloc(adapter->num_tx_queues, 1973 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1974 sizeof(struct ixgbevf_ring), GFP_KERNEL); 1974 sizeof(struct ixgbevf_ring), GFP_KERNEL);
1975 if (!adapter->tx_ring) 1975 if (!adapter->tx_ring)
1976 goto err_tx_ring_allocation; 1976 goto err_tx_ring_allocation;
1977 1977
1978 adapter->rx_ring = kcalloc(adapter->num_rx_queues, 1978 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1979 sizeof(struct ixgbevf_ring), GFP_KERNEL); 1979 sizeof(struct ixgbevf_ring), GFP_KERNEL);
1980 if (!adapter->rx_ring) 1980 if (!adapter->rx_ring)
1981 goto err_rx_ring_allocation; 1981 goto err_rx_ring_allocation;
1982 1982
1983 for (i = 0; i < adapter->num_tx_queues; i++) { 1983 for (i = 0; i < adapter->num_tx_queues; i++) {
1984 adapter->tx_ring[i].count = adapter->tx_ring_count; 1984 adapter->tx_ring[i].count = adapter->tx_ring_count;
1985 adapter->tx_ring[i].queue_index = i; 1985 adapter->tx_ring[i].queue_index = i;
1986 adapter->tx_ring[i].reg_idx = i; 1986 adapter->tx_ring[i].reg_idx = i;
1987 } 1987 }
1988 1988
1989 for (i = 0; i < adapter->num_rx_queues; i++) { 1989 for (i = 0; i < adapter->num_rx_queues; i++) {
1990 adapter->rx_ring[i].count = adapter->rx_ring_count; 1990 adapter->rx_ring[i].count = adapter->rx_ring_count;
1991 adapter->rx_ring[i].queue_index = i; 1991 adapter->rx_ring[i].queue_index = i;
1992 adapter->rx_ring[i].reg_idx = i; 1992 adapter->rx_ring[i].reg_idx = i;
1993 } 1993 }
1994 1994
1995 return 0; 1995 return 0;
1996 1996
1997 err_rx_ring_allocation: 1997 err_rx_ring_allocation:
1998 kfree(adapter->tx_ring); 1998 kfree(adapter->tx_ring);
1999 err_tx_ring_allocation: 1999 err_tx_ring_allocation:
2000 return -ENOMEM; 2000 return -ENOMEM;
2001 } 2001 }
2002 2002
2003 /** 2003 /**
2004 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported 2004 * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
2005 * @adapter: board private structure to initialize 2005 * @adapter: board private structure to initialize
2006 * 2006 *
2007 * Attempt to configure the interrupts using the best available 2007 * Attempt to configure the interrupts using the best available
2008 * capabilities of the hardware and the kernel. 2008 * capabilities of the hardware and the kernel.
2009 **/ 2009 **/
2010 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) 2010 static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
2011 { 2011 {
2012 int err = 0; 2012 int err = 0;
2013 int vector, v_budget; 2013 int vector, v_budget;
2014 2014
2015 /* 2015 /*
2016 * It's easy to be greedy for MSI-X vectors, but it really 2016 * It's easy to be greedy for MSI-X vectors, but it really
2017 * doesn't do us much good if we have a lot more vectors 2017 * doesn't do us much good if we have a lot more vectors
2018 * than CPU's. So let's be conservative and only ask for 2018 * than CPU's. So let's be conservative and only ask for
2019 * (roughly) twice the number of vectors as there are CPU's. 2019 * (roughly) twice the number of vectors as there are CPU's.
2020 */ 2020 */
2021 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues, 2021 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
2022 (int)(num_online_cpus() * 2)) + NON_Q_VECTORS; 2022 (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
2023 2023
2024 /* A failure in MSI-X entry allocation isn't fatal, but it does 2024 /* A failure in MSI-X entry allocation isn't fatal, but it does
2025 * mean we disable MSI-X capabilities of the adapter. */ 2025 * mean we disable MSI-X capabilities of the adapter. */
2026 adapter->msix_entries = kcalloc(v_budget, 2026 adapter->msix_entries = kcalloc(v_budget,
2027 sizeof(struct msix_entry), GFP_KERNEL); 2027 sizeof(struct msix_entry), GFP_KERNEL);
2028 if (!adapter->msix_entries) { 2028 if (!adapter->msix_entries) {
2029 err = -ENOMEM; 2029 err = -ENOMEM;
2030 goto out; 2030 goto out;
2031 } 2031 }
2032 2032
2033 for (vector = 0; vector < v_budget; vector++) 2033 for (vector = 0; vector < v_budget; vector++)
2034 adapter->msix_entries[vector].entry = vector; 2034 adapter->msix_entries[vector].entry = vector;
2035 2035
2036 ixgbevf_acquire_msix_vectors(adapter, v_budget); 2036 ixgbevf_acquire_msix_vectors(adapter, v_budget);
2037 2037
2038 out: 2038 out:
2039 return err; 2039 return err;
2040 } 2040 }
2041 2041
2042 /** 2042 /**
2043 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors 2043 * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
2044 * @adapter: board private structure to initialize 2044 * @adapter: board private structure to initialize
2045 * 2045 *
2046 * We allocate one q_vector per queue interrupt. If allocation fails we 2046 * We allocate one q_vector per queue interrupt. If allocation fails we
2047 * return -ENOMEM. 2047 * return -ENOMEM.
2048 **/ 2048 **/
2049 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter) 2049 static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
2050 { 2050 {
2051 int q_idx, num_q_vectors; 2051 int q_idx, num_q_vectors;
2052 struct ixgbevf_q_vector *q_vector; 2052 struct ixgbevf_q_vector *q_vector;
2053 int napi_vectors; 2053 int napi_vectors;
2054 int (*poll)(struct napi_struct *, int); 2054 int (*poll)(struct napi_struct *, int);
2055 2055
2056 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 2056 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2057 napi_vectors = adapter->num_rx_queues; 2057 napi_vectors = adapter->num_rx_queues;
2058 poll = &ixgbevf_clean_rxonly; 2058 poll = &ixgbevf_clean_rxonly;
2059 2059
2060 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 2060 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2061 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL); 2061 q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
2062 if (!q_vector) 2062 if (!q_vector)
2063 goto err_out; 2063 goto err_out;
2064 q_vector->adapter = adapter; 2064 q_vector->adapter = adapter;
2065 q_vector->v_idx = q_idx; 2065 q_vector->v_idx = q_idx;
2066 q_vector->eitr = adapter->eitr_param; 2066 q_vector->eitr = adapter->eitr_param;
2067 if (q_idx < napi_vectors) 2067 if (q_idx < napi_vectors)
2068 netif_napi_add(adapter->netdev, &q_vector->napi, 2068 netif_napi_add(adapter->netdev, &q_vector->napi,
2069 (*poll), 64); 2069 (*poll), 64);
2070 adapter->q_vector[q_idx] = q_vector; 2070 adapter->q_vector[q_idx] = q_vector;
2071 } 2071 }
2072 2072
2073 return 0; 2073 return 0;
2074 2074
2075 err_out: 2075 err_out:
2076 while (q_idx) { 2076 while (q_idx) {
2077 q_idx--; 2077 q_idx--;
2078 q_vector = adapter->q_vector[q_idx]; 2078 q_vector = adapter->q_vector[q_idx];
2079 netif_napi_del(&q_vector->napi); 2079 netif_napi_del(&q_vector->napi);
2080 kfree(q_vector); 2080 kfree(q_vector);
2081 adapter->q_vector[q_idx] = NULL; 2081 adapter->q_vector[q_idx] = NULL;
2082 } 2082 }
2083 return -ENOMEM; 2083 return -ENOMEM;
2084 } 2084 }
2085 2085
2086 /** 2086 /**
2087 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors 2087 * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
2088 * @adapter: board private structure to initialize 2088 * @adapter: board private structure to initialize
2089 * 2089 *
2090 * This function frees the memory allocated to the q_vectors. In addition if 2090 * This function frees the memory allocated to the q_vectors. In addition if
2091 * NAPI is enabled it will delete any references to the NAPI struct prior 2091 * NAPI is enabled it will delete any references to the NAPI struct prior
2092 * to freeing the q_vector. 2092 * to freeing the q_vector.
2093 **/ 2093 **/
2094 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter) 2094 static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
2095 { 2095 {
2096 int q_idx, num_q_vectors; 2096 int q_idx, num_q_vectors;
2097 int napi_vectors; 2097 int napi_vectors;
2098 2098
2099 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 2099 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2100 napi_vectors = adapter->num_rx_queues; 2100 napi_vectors = adapter->num_rx_queues;
2101 2101
2102 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { 2102 for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
2103 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx]; 2103 struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
2104 2104
2105 adapter->q_vector[q_idx] = NULL; 2105 adapter->q_vector[q_idx] = NULL;
2106 if (q_idx < napi_vectors) 2106 if (q_idx < napi_vectors)
2107 netif_napi_del(&q_vector->napi); 2107 netif_napi_del(&q_vector->napi);
2108 kfree(q_vector); 2108 kfree(q_vector);
2109 } 2109 }
2110 } 2110 }
2111 2111
2112 /** 2112 /**
2113 * ixgbevf_reset_interrupt_capability - Reset MSIX setup 2113 * ixgbevf_reset_interrupt_capability - Reset MSIX setup
2114 * @adapter: board private structure 2114 * @adapter: board private structure
2115 * 2115 *
2116 **/ 2116 **/
2117 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter) 2117 static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
2118 { 2118 {
2119 pci_disable_msix(adapter->pdev); 2119 pci_disable_msix(adapter->pdev);
2120 kfree(adapter->msix_entries); 2120 kfree(adapter->msix_entries);
2121 adapter->msix_entries = NULL; 2121 adapter->msix_entries = NULL;
2122 } 2122 }
2123 2123
2124 /** 2124 /**
2125 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init 2125 * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
2126 * @adapter: board private structure to initialize 2126 * @adapter: board private structure to initialize
2127 * 2127 *
2128 **/ 2128 **/
2129 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter) 2129 static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
2130 { 2130 {
2131 int err; 2131 int err;
2132 2132
2133 /* Number of supported queues */ 2133 /* Number of supported queues */
2134 ixgbevf_set_num_queues(adapter); 2134 ixgbevf_set_num_queues(adapter);
2135 2135
2136 err = ixgbevf_set_interrupt_capability(adapter); 2136 err = ixgbevf_set_interrupt_capability(adapter);
2137 if (err) { 2137 if (err) {
2138 hw_dbg(&adapter->hw, 2138 hw_dbg(&adapter->hw,
2139 "Unable to setup interrupt capabilities\n"); 2139 "Unable to setup interrupt capabilities\n");
2140 goto err_set_interrupt; 2140 goto err_set_interrupt;
2141 } 2141 }
2142 2142
2143 err = ixgbevf_alloc_q_vectors(adapter); 2143 err = ixgbevf_alloc_q_vectors(adapter);
2144 if (err) { 2144 if (err) {
2145 hw_dbg(&adapter->hw, "Unable to allocate memory for queue " 2145 hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
2146 "vectors\n"); 2146 "vectors\n");
2147 goto err_alloc_q_vectors; 2147 goto err_alloc_q_vectors;
2148 } 2148 }
2149 2149
2150 err = ixgbevf_alloc_queues(adapter); 2150 err = ixgbevf_alloc_queues(adapter);
2151 if (err) { 2151 if (err) {
2152 pr_err("Unable to allocate memory for queues\n"); 2152 pr_err("Unable to allocate memory for queues\n");
2153 goto err_alloc_queues; 2153 goto err_alloc_queues;
2154 } 2154 }
2155 2155
2156 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, " 2156 hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, "
2157 "Tx Queue count = %u\n", 2157 "Tx Queue count = %u\n",
2158 (adapter->num_rx_queues > 1) ? "Enabled" : 2158 (adapter->num_rx_queues > 1) ? "Enabled" :
2159 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues); 2159 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
2160 2160
2161 set_bit(__IXGBEVF_DOWN, &adapter->state); 2161 set_bit(__IXGBEVF_DOWN, &adapter->state);
2162 2162
2163 return 0; 2163 return 0;
2164 err_alloc_queues: 2164 err_alloc_queues:
2165 ixgbevf_free_q_vectors(adapter); 2165 ixgbevf_free_q_vectors(adapter);
2166 err_alloc_q_vectors: 2166 err_alloc_q_vectors:
2167 ixgbevf_reset_interrupt_capability(adapter); 2167 ixgbevf_reset_interrupt_capability(adapter);
2168 err_set_interrupt: 2168 err_set_interrupt:
2169 return err; 2169 return err;
2170 } 2170 }
2171 2171
2172 /** 2172 /**
2173 * ixgbevf_sw_init - Initialize general software structures 2173 * ixgbevf_sw_init - Initialize general software structures
2174 * (struct ixgbevf_adapter) 2174 * (struct ixgbevf_adapter)
2175 * @adapter: board private structure to initialize 2175 * @adapter: board private structure to initialize
2176 * 2176 *
2177 * ixgbevf_sw_init initializes the Adapter private data structure. 2177 * ixgbevf_sw_init initializes the Adapter private data structure.
2178 * Fields are initialized based on PCI device information and 2178 * Fields are initialized based on PCI device information and
2179 * OS network device settings (MTU size). 2179 * OS network device settings (MTU size).
2180 **/ 2180 **/
2181 static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter) 2181 static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
2182 { 2182 {
2183 struct ixgbe_hw *hw = &adapter->hw; 2183 struct ixgbe_hw *hw = &adapter->hw;
2184 struct pci_dev *pdev = adapter->pdev; 2184 struct pci_dev *pdev = adapter->pdev;
2185 int err; 2185 int err;
2186 2186
2187 /* PCI config space info */ 2187 /* PCI config space info */
2188 2188
2189 hw->vendor_id = pdev->vendor; 2189 hw->vendor_id = pdev->vendor;
2190 hw->device_id = pdev->device; 2190 hw->device_id = pdev->device;
2191 hw->revision_id = pdev->revision; 2191 hw->revision_id = pdev->revision;
2192 hw->subsystem_vendor_id = pdev->subsystem_vendor; 2192 hw->subsystem_vendor_id = pdev->subsystem_vendor;
2193 hw->subsystem_device_id = pdev->subsystem_device; 2193 hw->subsystem_device_id = pdev->subsystem_device;
2194 2194
2195 hw->mbx.ops.init_params(hw); 2195 hw->mbx.ops.init_params(hw);
2196 hw->mac.max_tx_queues = MAX_TX_QUEUES; 2196 hw->mac.max_tx_queues = MAX_TX_QUEUES;
2197 hw->mac.max_rx_queues = MAX_RX_QUEUES; 2197 hw->mac.max_rx_queues = MAX_RX_QUEUES;
2198 err = hw->mac.ops.reset_hw(hw); 2198 err = hw->mac.ops.reset_hw(hw);
2199 if (err) { 2199 if (err) {
2200 dev_info(&pdev->dev, 2200 dev_info(&pdev->dev,
2201 "PF still in reset state, assigning new address\n"); 2201 "PF still in reset state, assigning new address\n");
2202 dev_hw_addr_random(adapter->netdev, hw->mac.addr); 2202 eth_hw_addr_random(adapter->netdev);
2203 memcpy(adapter->hw.mac.addr, adapter->netdev->dev_addr,
2204 adapter->netdev->addr_len);
2203 } else { 2205 } else {
2204 err = hw->mac.ops.init_hw(hw); 2206 err = hw->mac.ops.init_hw(hw);
2205 if (err) { 2207 if (err) {
2206 pr_err("init_shared_code failed: %d\n", err); 2208 pr_err("init_shared_code failed: %d\n", err);
2207 goto out; 2209 goto out;
2208 } 2210 }
2211 memcpy(adapter->netdev->dev_addr, adapter->hw.mac.addr,
2212 adapter->netdev->addr_len);
2209 } 2213 }
2210 2214
2211 /* Enable dynamic interrupt throttling rates */ 2215 /* Enable dynamic interrupt throttling rates */
2212 adapter->eitr_param = 20000; 2216 adapter->eitr_param = 20000;
2213 adapter->itr_setting = 1; 2217 adapter->itr_setting = 1;
2214 2218
2215 /* set defaults for eitr in MegaBytes */ 2219 /* set defaults for eitr in MegaBytes */
2216 adapter->eitr_low = 10; 2220 adapter->eitr_low = 10;
2217 adapter->eitr_high = 20; 2221 adapter->eitr_high = 20;
2218 2222
2219 /* set default ring sizes */ 2223 /* set default ring sizes */
2220 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD; 2224 adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
2221 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD; 2225 adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
2222 2226
2223 /* enable rx csum by default */ 2227 /* enable rx csum by default */
2224 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED; 2228 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
2225 2229
2226 set_bit(__IXGBEVF_DOWN, &adapter->state); 2230 set_bit(__IXGBEVF_DOWN, &adapter->state);
2231 return 0;
2227 2232
2228 out: 2233 out:
2229 return err; 2234 return err;
2230 } 2235 }
2231 2236
2232 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \ 2237 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
2233 { \ 2238 { \
2234 u32 current_counter = IXGBE_READ_REG(hw, reg); \ 2239 u32 current_counter = IXGBE_READ_REG(hw, reg); \
2235 if (current_counter < last_counter) \ 2240 if (current_counter < last_counter) \
2236 counter += 0x100000000LL; \ 2241 counter += 0x100000000LL; \
2237 last_counter = current_counter; \ 2242 last_counter = current_counter; \
2238 counter &= 0xFFFFFFFF00000000LL; \ 2243 counter &= 0xFFFFFFFF00000000LL; \
2239 counter |= current_counter; \ 2244 counter |= current_counter; \
2240 } 2245 }
2241 2246
2242 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \ 2247 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
2243 { \ 2248 { \
2244 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \ 2249 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
2245 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \ 2250 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
2246 u64 current_counter = (current_counter_msb << 32) | \ 2251 u64 current_counter = (current_counter_msb << 32) | \
2247 current_counter_lsb; \ 2252 current_counter_lsb; \
2248 if (current_counter < last_counter) \ 2253 if (current_counter < last_counter) \
2249 counter += 0x1000000000LL; \ 2254 counter += 0x1000000000LL; \
2250 last_counter = current_counter; \ 2255 last_counter = current_counter; \
2251 counter &= 0xFFFFFFF000000000LL; \ 2256 counter &= 0xFFFFFFF000000000LL; \
2252 counter |= current_counter; \ 2257 counter |= current_counter; \
2253 } 2258 }
2254 /** 2259 /**
2255 * ixgbevf_update_stats - Update the board statistics counters. 2260 * ixgbevf_update_stats - Update the board statistics counters.
2256 * @adapter: board private structure 2261 * @adapter: board private structure
2257 **/ 2262 **/
2258 void ixgbevf_update_stats(struct ixgbevf_adapter *adapter) 2263 void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
2259 { 2264 {
2260 struct ixgbe_hw *hw = &adapter->hw; 2265 struct ixgbe_hw *hw = &adapter->hw;
2261 2266
2262 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc, 2267 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
2263 adapter->stats.vfgprc); 2268 adapter->stats.vfgprc);
2264 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc, 2269 UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
2265 adapter->stats.vfgptc); 2270 adapter->stats.vfgptc);
2266 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB, 2271 UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
2267 adapter->stats.last_vfgorc, 2272 adapter->stats.last_vfgorc,
2268 adapter->stats.vfgorc); 2273 adapter->stats.vfgorc);
2269 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB, 2274 UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
2270 adapter->stats.last_vfgotc, 2275 adapter->stats.last_vfgotc,
2271 adapter->stats.vfgotc); 2276 adapter->stats.vfgotc);
2272 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc, 2277 UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
2273 adapter->stats.vfmprc); 2278 adapter->stats.vfmprc);
2274 } 2279 }
2275 2280
2276 /** 2281 /**
2277 * ixgbevf_watchdog - Timer Call-back 2282 * ixgbevf_watchdog - Timer Call-back
2278 * @data: pointer to adapter cast into an unsigned long 2283 * @data: pointer to adapter cast into an unsigned long
2279 **/ 2284 **/
2280 static void ixgbevf_watchdog(unsigned long data) 2285 static void ixgbevf_watchdog(unsigned long data)
2281 { 2286 {
2282 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data; 2287 struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
2283 struct ixgbe_hw *hw = &adapter->hw; 2288 struct ixgbe_hw *hw = &adapter->hw;
2284 u64 eics = 0; 2289 u64 eics = 0;
2285 int i; 2290 int i;
2286 2291
2287 /* 2292 /*
2288 * Do the watchdog outside of interrupt context due to the lovely 2293 * Do the watchdog outside of interrupt context due to the lovely
2289 * delays that some of the newer hardware requires 2294 * delays that some of the newer hardware requires
2290 */ 2295 */
2291 2296
2292 if (test_bit(__IXGBEVF_DOWN, &adapter->state)) 2297 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
2293 goto watchdog_short_circuit; 2298 goto watchdog_short_circuit;
2294 2299
2295 /* get one bit for every active tx/rx interrupt vector */ 2300 /* get one bit for every active tx/rx interrupt vector */
2296 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { 2301 for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
2297 struct ixgbevf_q_vector *qv = adapter->q_vector[i]; 2302 struct ixgbevf_q_vector *qv = adapter->q_vector[i];
2298 if (qv->rxr_count || qv->txr_count) 2303 if (qv->rxr_count || qv->txr_count)
2299 eics |= (1 << i); 2304 eics |= (1 << i);
2300 } 2305 }
2301 2306
2302 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, (u32)eics); 2307 IXGBE_WRITE_REG(hw, IXGBE_VTEICS, (u32)eics);
2303 2308
2304 watchdog_short_circuit: 2309 watchdog_short_circuit:
2305 schedule_work(&adapter->watchdog_task); 2310 schedule_work(&adapter->watchdog_task);
2306 } 2311 }
2307 2312
2308 /** 2313 /**
2309 * ixgbevf_tx_timeout - Respond to a Tx Hang 2314 * ixgbevf_tx_timeout - Respond to a Tx Hang
2310 * @netdev: network interface device structure 2315 * @netdev: network interface device structure
2311 **/ 2316 **/
2312 static void ixgbevf_tx_timeout(struct net_device *netdev) 2317 static void ixgbevf_tx_timeout(struct net_device *netdev)
2313 { 2318 {
2314 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2319 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2315 2320
2316 /* Do the reset outside of interrupt context */ 2321 /* Do the reset outside of interrupt context */
2317 schedule_work(&adapter->reset_task); 2322 schedule_work(&adapter->reset_task);
2318 } 2323 }
2319 2324
2320 static void ixgbevf_reset_task(struct work_struct *work) 2325 static void ixgbevf_reset_task(struct work_struct *work)
2321 { 2326 {
2322 struct ixgbevf_adapter *adapter; 2327 struct ixgbevf_adapter *adapter;
2323 adapter = container_of(work, struct ixgbevf_adapter, reset_task); 2328 adapter = container_of(work, struct ixgbevf_adapter, reset_task);
2324 2329
2325 /* If we're already down or resetting, just bail */ 2330 /* If we're already down or resetting, just bail */
2326 if (test_bit(__IXGBEVF_DOWN, &adapter->state) || 2331 if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
2327 test_bit(__IXGBEVF_RESETTING, &adapter->state)) 2332 test_bit(__IXGBEVF_RESETTING, &adapter->state))
2328 return; 2333 return;
2329 2334
2330 adapter->tx_timeout_count++; 2335 adapter->tx_timeout_count++;
2331 2336
2332 ixgbevf_reinit_locked(adapter); 2337 ixgbevf_reinit_locked(adapter);
2333 } 2338 }
2334 2339
2335 /** 2340 /**
2336 * ixgbevf_watchdog_task - worker thread to bring link up 2341 * ixgbevf_watchdog_task - worker thread to bring link up
2337 * @work: pointer to work_struct containing our data 2342 * @work: pointer to work_struct containing our data
2338 **/ 2343 **/
2339 static void ixgbevf_watchdog_task(struct work_struct *work) 2344 static void ixgbevf_watchdog_task(struct work_struct *work)
2340 { 2345 {
2341 struct ixgbevf_adapter *adapter = container_of(work, 2346 struct ixgbevf_adapter *adapter = container_of(work,
2342 struct ixgbevf_adapter, 2347 struct ixgbevf_adapter,
2343 watchdog_task); 2348 watchdog_task);
2344 struct net_device *netdev = adapter->netdev; 2349 struct net_device *netdev = adapter->netdev;
2345 struct ixgbe_hw *hw = &adapter->hw; 2350 struct ixgbe_hw *hw = &adapter->hw;
2346 u32 link_speed = adapter->link_speed; 2351 u32 link_speed = adapter->link_speed;
2347 bool link_up = adapter->link_up; 2352 bool link_up = adapter->link_up;
2348 2353
2349 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK; 2354 adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
2350 2355
2351 /* 2356 /*
2352 * Always check the link on the watchdog because we have 2357 * Always check the link on the watchdog because we have
2353 * no LSC interrupt 2358 * no LSC interrupt
2354 */ 2359 */
2355 if (hw->mac.ops.check_link) { 2360 if (hw->mac.ops.check_link) {
2356 if ((hw->mac.ops.check_link(hw, &link_speed, 2361 if ((hw->mac.ops.check_link(hw, &link_speed,
2357 &link_up, false)) != 0) { 2362 &link_up, false)) != 0) {
2358 adapter->link_up = link_up; 2363 adapter->link_up = link_up;
2359 adapter->link_speed = link_speed; 2364 adapter->link_speed = link_speed;
2360 netif_carrier_off(netdev); 2365 netif_carrier_off(netdev);
2361 netif_tx_stop_all_queues(netdev); 2366 netif_tx_stop_all_queues(netdev);
2362 schedule_work(&adapter->reset_task); 2367 schedule_work(&adapter->reset_task);
2363 goto pf_has_reset; 2368 goto pf_has_reset;
2364 } 2369 }
2365 } else { 2370 } else {
2366 /* always assume link is up, if no check link 2371 /* always assume link is up, if no check link
2367 * function */ 2372 * function */
2368 link_speed = IXGBE_LINK_SPEED_10GB_FULL; 2373 link_speed = IXGBE_LINK_SPEED_10GB_FULL;
2369 link_up = true; 2374 link_up = true;
2370 } 2375 }
2371 adapter->link_up = link_up; 2376 adapter->link_up = link_up;
2372 adapter->link_speed = link_speed; 2377 adapter->link_speed = link_speed;
2373 2378
2374 if (link_up) { 2379 if (link_up) {
2375 if (!netif_carrier_ok(netdev)) { 2380 if (!netif_carrier_ok(netdev)) {
2376 hw_dbg(&adapter->hw, "NIC Link is Up, %u Gbps\n", 2381 hw_dbg(&adapter->hw, "NIC Link is Up, %u Gbps\n",
2377 (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? 2382 (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
2378 10 : 1); 2383 10 : 1);
2379 netif_carrier_on(netdev); 2384 netif_carrier_on(netdev);
2380 netif_tx_wake_all_queues(netdev); 2385 netif_tx_wake_all_queues(netdev);
2381 } 2386 }
2382 } else { 2387 } else {
2383 adapter->link_up = false; 2388 adapter->link_up = false;
2384 adapter->link_speed = 0; 2389 adapter->link_speed = 0;
2385 if (netif_carrier_ok(netdev)) { 2390 if (netif_carrier_ok(netdev)) {
2386 hw_dbg(&adapter->hw, "NIC Link is Down\n"); 2391 hw_dbg(&adapter->hw, "NIC Link is Down\n");
2387 netif_carrier_off(netdev); 2392 netif_carrier_off(netdev);
2388 netif_tx_stop_all_queues(netdev); 2393 netif_tx_stop_all_queues(netdev);
2389 } 2394 }
2390 } 2395 }
2391 2396
2392 ixgbevf_update_stats(adapter); 2397 ixgbevf_update_stats(adapter);
2393 2398
2394 pf_has_reset: 2399 pf_has_reset:
2395 /* Reset the timer */ 2400 /* Reset the timer */
2396 if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) 2401 if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
2397 mod_timer(&adapter->watchdog_timer, 2402 mod_timer(&adapter->watchdog_timer,
2398 round_jiffies(jiffies + (2 * HZ))); 2403 round_jiffies(jiffies + (2 * HZ)));
2399 2404
2400 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK; 2405 adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
2401 } 2406 }
2402 2407
2403 /** 2408 /**
2404 * ixgbevf_free_tx_resources - Free Tx Resources per Queue 2409 * ixgbevf_free_tx_resources - Free Tx Resources per Queue
2405 * @adapter: board private structure 2410 * @adapter: board private structure
2406 * @tx_ring: Tx descriptor ring for a specific queue 2411 * @tx_ring: Tx descriptor ring for a specific queue
2407 * 2412 *
2408 * Free all transmit software resources 2413 * Free all transmit software resources
2409 **/ 2414 **/
2410 void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter, 2415 void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
2411 struct ixgbevf_ring *tx_ring) 2416 struct ixgbevf_ring *tx_ring)
2412 { 2417 {
2413 struct pci_dev *pdev = adapter->pdev; 2418 struct pci_dev *pdev = adapter->pdev;
2414 2419
2415 ixgbevf_clean_tx_ring(adapter, tx_ring); 2420 ixgbevf_clean_tx_ring(adapter, tx_ring);
2416 2421
2417 vfree(tx_ring->tx_buffer_info); 2422 vfree(tx_ring->tx_buffer_info);
2418 tx_ring->tx_buffer_info = NULL; 2423 tx_ring->tx_buffer_info = NULL;
2419 2424
2420 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, 2425 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
2421 tx_ring->dma); 2426 tx_ring->dma);
2422 2427
2423 tx_ring->desc = NULL; 2428 tx_ring->desc = NULL;
2424 } 2429 }
2425 2430
2426 /** 2431 /**
2427 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues 2432 * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
2428 * @adapter: board private structure 2433 * @adapter: board private structure
2429 * 2434 *
2430 * Free all transmit software resources 2435 * Free all transmit software resources
2431 **/ 2436 **/
2432 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter) 2437 static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
2433 { 2438 {
2434 int i; 2439 int i;
2435 2440
2436 for (i = 0; i < adapter->num_tx_queues; i++) 2441 for (i = 0; i < adapter->num_tx_queues; i++)
2437 if (adapter->tx_ring[i].desc) 2442 if (adapter->tx_ring[i].desc)
2438 ixgbevf_free_tx_resources(adapter, 2443 ixgbevf_free_tx_resources(adapter,
2439 &adapter->tx_ring[i]); 2444 &adapter->tx_ring[i]);
2440 2445
2441 } 2446 }
2442 2447
2443 /** 2448 /**
2444 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors) 2449 * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
2445 * @adapter: board private structure 2450 * @adapter: board private structure
2446 * @tx_ring: tx descriptor ring (for a specific queue) to setup 2451 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2447 * 2452 *
2448 * Return 0 on success, negative on failure 2453 * Return 0 on success, negative on failure
2449 **/ 2454 **/
2450 int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter, 2455 int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
2451 struct ixgbevf_ring *tx_ring) 2456 struct ixgbevf_ring *tx_ring)
2452 { 2457 {
2453 struct pci_dev *pdev = adapter->pdev; 2458 struct pci_dev *pdev = adapter->pdev;
2454 int size; 2459 int size;
2455 2460
2456 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; 2461 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2457 tx_ring->tx_buffer_info = vzalloc(size); 2462 tx_ring->tx_buffer_info = vzalloc(size);
2458 if (!tx_ring->tx_buffer_info) 2463 if (!tx_ring->tx_buffer_info)
2459 goto err; 2464 goto err;
2460 2465
2461 /* round up to nearest 4K */ 2466 /* round up to nearest 4K */
2462 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 2467 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2463 tx_ring->size = ALIGN(tx_ring->size, 4096); 2468 tx_ring->size = ALIGN(tx_ring->size, 4096);
2464 2469
2465 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, 2470 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
2466 &tx_ring->dma, GFP_KERNEL); 2471 &tx_ring->dma, GFP_KERNEL);
2467 if (!tx_ring->desc) 2472 if (!tx_ring->desc)
2468 goto err; 2473 goto err;
2469 2474
2470 tx_ring->next_to_use = 0; 2475 tx_ring->next_to_use = 0;
2471 tx_ring->next_to_clean = 0; 2476 tx_ring->next_to_clean = 0;
2472 tx_ring->work_limit = tx_ring->count; 2477 tx_ring->work_limit = tx_ring->count;
2473 return 0; 2478 return 0;
2474 2479
2475 err: 2480 err:
2476 vfree(tx_ring->tx_buffer_info); 2481 vfree(tx_ring->tx_buffer_info);
2477 tx_ring->tx_buffer_info = NULL; 2482 tx_ring->tx_buffer_info = NULL;
2478 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit " 2483 hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit "
2479 "descriptor ring\n"); 2484 "descriptor ring\n");
2480 return -ENOMEM; 2485 return -ENOMEM;
2481 } 2486 }
2482 2487
2483 /** 2488 /**
2484 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources 2489 * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
2485 * @adapter: board private structure 2490 * @adapter: board private structure
2486 * 2491 *
2487 * If this function returns with an error, then it's possible one or 2492 * If this function returns with an error, then it's possible one or
2488 * more of the rings is populated (while the rest are not). It is the 2493 * more of the rings is populated (while the rest are not). It is the
2489 * callers duty to clean those orphaned rings. 2494 * callers duty to clean those orphaned rings.
2490 * 2495 *
2491 * Return 0 on success, negative on failure 2496 * Return 0 on success, negative on failure
2492 **/ 2497 **/
2493 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter) 2498 static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
2494 { 2499 {
2495 int i, err = 0; 2500 int i, err = 0;
2496 2501
2497 for (i = 0; i < adapter->num_tx_queues; i++) { 2502 for (i = 0; i < adapter->num_tx_queues; i++) {
2498 err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]); 2503 err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]);
2499 if (!err) 2504 if (!err)
2500 continue; 2505 continue;
2501 hw_dbg(&adapter->hw, 2506 hw_dbg(&adapter->hw,
2502 "Allocation for Tx Queue %u failed\n", i); 2507 "Allocation for Tx Queue %u failed\n", i);
2503 break; 2508 break;
2504 } 2509 }
2505 2510
2506 return err; 2511 return err;
2507 } 2512 }
2508 2513
2509 /** 2514 /**
2510 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors) 2515 * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
2511 * @adapter: board private structure 2516 * @adapter: board private structure
2512 * @rx_ring: rx descriptor ring (for a specific queue) to setup 2517 * @rx_ring: rx descriptor ring (for a specific queue) to setup
2513 * 2518 *
2514 * Returns 0 on success, negative on failure 2519 * Returns 0 on success, negative on failure
2515 **/ 2520 **/
2516 int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter, 2521 int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
2517 struct ixgbevf_ring *rx_ring) 2522 struct ixgbevf_ring *rx_ring)
2518 { 2523 {
2519 struct pci_dev *pdev = adapter->pdev; 2524 struct pci_dev *pdev = adapter->pdev;
2520 int size; 2525 int size;
2521 2526
2522 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; 2527 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
2523 rx_ring->rx_buffer_info = vzalloc(size); 2528 rx_ring->rx_buffer_info = vzalloc(size);
2524 if (!rx_ring->rx_buffer_info) 2529 if (!rx_ring->rx_buffer_info)
2525 goto alloc_failed; 2530 goto alloc_failed;
2526 2531
2527 /* Round up to nearest 4K */ 2532 /* Round up to nearest 4K */
2528 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 2533 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2529 rx_ring->size = ALIGN(rx_ring->size, 4096); 2534 rx_ring->size = ALIGN(rx_ring->size, 4096);
2530 2535
2531 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, 2536 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
2532 &rx_ring->dma, GFP_KERNEL); 2537 &rx_ring->dma, GFP_KERNEL);
2533 2538
2534 if (!rx_ring->desc) { 2539 if (!rx_ring->desc) {
2535 hw_dbg(&adapter->hw, 2540 hw_dbg(&adapter->hw,
2536 "Unable to allocate memory for " 2541 "Unable to allocate memory for "
2537 "the receive descriptor ring\n"); 2542 "the receive descriptor ring\n");
2538 vfree(rx_ring->rx_buffer_info); 2543 vfree(rx_ring->rx_buffer_info);
2539 rx_ring->rx_buffer_info = NULL; 2544 rx_ring->rx_buffer_info = NULL;
2540 goto alloc_failed; 2545 goto alloc_failed;
2541 } 2546 }
2542 2547
2543 rx_ring->next_to_clean = 0; 2548 rx_ring->next_to_clean = 0;
2544 rx_ring->next_to_use = 0; 2549 rx_ring->next_to_use = 0;
2545 2550
2546 return 0; 2551 return 0;
2547 alloc_failed: 2552 alloc_failed:
2548 return -ENOMEM; 2553 return -ENOMEM;
2549 } 2554 }
2550 2555
2551 /** 2556 /**
2552 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources 2557 * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
2553 * @adapter: board private structure 2558 * @adapter: board private structure
2554 * 2559 *
2555 * If this function returns with an error, then it's possible one or 2560 * If this function returns with an error, then it's possible one or
2556 * more of the rings is populated (while the rest are not). It is the 2561 * more of the rings is populated (while the rest are not). It is the
2557 * callers duty to clean those orphaned rings. 2562 * callers duty to clean those orphaned rings.
2558 * 2563 *
2559 * Return 0 on success, negative on failure 2564 * Return 0 on success, negative on failure
2560 **/ 2565 **/
2561 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter) 2566 static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
2562 { 2567 {
2563 int i, err = 0; 2568 int i, err = 0;
2564 2569
2565 for (i = 0; i < adapter->num_rx_queues; i++) { 2570 for (i = 0; i < adapter->num_rx_queues; i++) {
2566 err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]); 2571 err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]);
2567 if (!err) 2572 if (!err)
2568 continue; 2573 continue;
2569 hw_dbg(&adapter->hw, 2574 hw_dbg(&adapter->hw,
2570 "Allocation for Rx Queue %u failed\n", i); 2575 "Allocation for Rx Queue %u failed\n", i);
2571 break; 2576 break;
2572 } 2577 }
2573 return err; 2578 return err;
2574 } 2579 }
2575 2580
2576 /** 2581 /**
2577 * ixgbevf_free_rx_resources - Free Rx Resources 2582 * ixgbevf_free_rx_resources - Free Rx Resources
2578 * @adapter: board private structure 2583 * @adapter: board private structure
2579 * @rx_ring: ring to clean the resources from 2584 * @rx_ring: ring to clean the resources from
2580 * 2585 *
2581 * Free all receive software resources 2586 * Free all receive software resources
2582 **/ 2587 **/
2583 void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter, 2588 void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter,
2584 struct ixgbevf_ring *rx_ring) 2589 struct ixgbevf_ring *rx_ring)
2585 { 2590 {
2586 struct pci_dev *pdev = adapter->pdev; 2591 struct pci_dev *pdev = adapter->pdev;
2587 2592
2588 ixgbevf_clean_rx_ring(adapter, rx_ring); 2593 ixgbevf_clean_rx_ring(adapter, rx_ring);
2589 2594
2590 vfree(rx_ring->rx_buffer_info); 2595 vfree(rx_ring->rx_buffer_info);
2591 rx_ring->rx_buffer_info = NULL; 2596 rx_ring->rx_buffer_info = NULL;
2592 2597
2593 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, 2598 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2594 rx_ring->dma); 2599 rx_ring->dma);
2595 2600
2596 rx_ring->desc = NULL; 2601 rx_ring->desc = NULL;
2597 } 2602 }
2598 2603
2599 /** 2604 /**
2600 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues 2605 * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
2601 * @adapter: board private structure 2606 * @adapter: board private structure
2602 * 2607 *
2603 * Free all receive software resources 2608 * Free all receive software resources
2604 **/ 2609 **/
2605 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter) 2610 static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
2606 { 2611 {
2607 int i; 2612 int i;
2608 2613
2609 for (i = 0; i < adapter->num_rx_queues; i++) 2614 for (i = 0; i < adapter->num_rx_queues; i++)
2610 if (adapter->rx_ring[i].desc) 2615 if (adapter->rx_ring[i].desc)
2611 ixgbevf_free_rx_resources(adapter, 2616 ixgbevf_free_rx_resources(adapter,
2612 &adapter->rx_ring[i]); 2617 &adapter->rx_ring[i]);
2613 } 2618 }
2614 2619
2615 /** 2620 /**
2616 * ixgbevf_open - Called when a network interface is made active 2621 * ixgbevf_open - Called when a network interface is made active
2617 * @netdev: network interface device structure 2622 * @netdev: network interface device structure
2618 * 2623 *
2619 * Returns 0 on success, negative value on failure 2624 * Returns 0 on success, negative value on failure
2620 * 2625 *
2621 * The open entry point is called when a network interface is made 2626 * The open entry point is called when a network interface is made
2622 * active by the system (IFF_UP). At this point all resources needed 2627 * active by the system (IFF_UP). At this point all resources needed
2623 * for transmit and receive operations are allocated, the interrupt 2628 * for transmit and receive operations are allocated, the interrupt
2624 * handler is registered with the OS, the watchdog timer is started, 2629 * handler is registered with the OS, the watchdog timer is started,
2625 * and the stack is notified that the interface is ready. 2630 * and the stack is notified that the interface is ready.
2626 **/ 2631 **/
2627 static int ixgbevf_open(struct net_device *netdev) 2632 static int ixgbevf_open(struct net_device *netdev)
2628 { 2633 {
2629 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2634 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2630 struct ixgbe_hw *hw = &adapter->hw; 2635 struct ixgbe_hw *hw = &adapter->hw;
2631 int err; 2636 int err;
2632 2637
2633 /* disallow open during test */ 2638 /* disallow open during test */
2634 if (test_bit(__IXGBEVF_TESTING, &adapter->state)) 2639 if (test_bit(__IXGBEVF_TESTING, &adapter->state))
2635 return -EBUSY; 2640 return -EBUSY;
2636 2641
2637 if (hw->adapter_stopped) { 2642 if (hw->adapter_stopped) {
2638 ixgbevf_reset(adapter); 2643 ixgbevf_reset(adapter);
2639 /* if adapter is still stopped then PF isn't up and 2644 /* if adapter is still stopped then PF isn't up and
2640 * the vf can't start. */ 2645 * the vf can't start. */
2641 if (hw->adapter_stopped) { 2646 if (hw->adapter_stopped) {
2642 err = IXGBE_ERR_MBX; 2647 err = IXGBE_ERR_MBX;
2643 pr_err("Unable to start - perhaps the PF Driver isn't " 2648 pr_err("Unable to start - perhaps the PF Driver isn't "
2644 "up yet\n"); 2649 "up yet\n");
2645 goto err_setup_reset; 2650 goto err_setup_reset;
2646 } 2651 }
2647 } 2652 }
2648 2653
2649 /* allocate transmit descriptors */ 2654 /* allocate transmit descriptors */
2650 err = ixgbevf_setup_all_tx_resources(adapter); 2655 err = ixgbevf_setup_all_tx_resources(adapter);
2651 if (err) 2656 if (err)
2652 goto err_setup_tx; 2657 goto err_setup_tx;
2653 2658
2654 /* allocate receive descriptors */ 2659 /* allocate receive descriptors */
2655 err = ixgbevf_setup_all_rx_resources(adapter); 2660 err = ixgbevf_setup_all_rx_resources(adapter);
2656 if (err) 2661 if (err)
2657 goto err_setup_rx; 2662 goto err_setup_rx;
2658 2663
2659 ixgbevf_configure(adapter); 2664 ixgbevf_configure(adapter);
2660 2665
2661 /* 2666 /*
2662 * Map the Tx/Rx rings to the vectors we were allotted. 2667 * Map the Tx/Rx rings to the vectors we were allotted.
2663 * if request_irq will be called in this function map_rings 2668 * if request_irq will be called in this function map_rings
2664 * must be called *before* up_complete 2669 * must be called *before* up_complete
2665 */ 2670 */
2666 ixgbevf_map_rings_to_vectors(adapter); 2671 ixgbevf_map_rings_to_vectors(adapter);
2667 2672
2668 err = ixgbevf_up_complete(adapter); 2673 err = ixgbevf_up_complete(adapter);
2669 if (err) 2674 if (err)
2670 goto err_up; 2675 goto err_up;
2671 2676
2672 /* clear any pending interrupts, may auto mask */ 2677 /* clear any pending interrupts, may auto mask */
2673 IXGBE_READ_REG(hw, IXGBE_VTEICR); 2678 IXGBE_READ_REG(hw, IXGBE_VTEICR);
2674 err = ixgbevf_request_irq(adapter); 2679 err = ixgbevf_request_irq(adapter);
2675 if (err) 2680 if (err)
2676 goto err_req_irq; 2681 goto err_req_irq;
2677 2682
2678 ixgbevf_irq_enable(adapter, true, true); 2683 ixgbevf_irq_enable(adapter, true, true);
2679 2684
2680 return 0; 2685 return 0;
2681 2686
2682 err_req_irq: 2687 err_req_irq:
2683 ixgbevf_down(adapter); 2688 ixgbevf_down(adapter);
2684 err_up: 2689 err_up:
2685 ixgbevf_free_irq(adapter); 2690 ixgbevf_free_irq(adapter);
2686 err_setup_rx: 2691 err_setup_rx:
2687 ixgbevf_free_all_rx_resources(adapter); 2692 ixgbevf_free_all_rx_resources(adapter);
2688 err_setup_tx: 2693 err_setup_tx:
2689 ixgbevf_free_all_tx_resources(adapter); 2694 ixgbevf_free_all_tx_resources(adapter);
2690 ixgbevf_reset(adapter); 2695 ixgbevf_reset(adapter);
2691 2696
2692 err_setup_reset: 2697 err_setup_reset:
2693 2698
2694 return err; 2699 return err;
2695 } 2700 }
2696 2701
2697 /** 2702 /**
2698 * ixgbevf_close - Disables a network interface 2703 * ixgbevf_close - Disables a network interface
2699 * @netdev: network interface device structure 2704 * @netdev: network interface device structure
2700 * 2705 *
2701 * Returns 0, this is not allowed to fail 2706 * Returns 0, this is not allowed to fail
2702 * 2707 *
2703 * The close entry point is called when an interface is de-activated 2708 * The close entry point is called when an interface is de-activated
2704 * by the OS. The hardware is still under the drivers control, but 2709 * by the OS. The hardware is still under the drivers control, but
2705 * needs to be disabled. A global MAC reset is issued to stop the 2710 * needs to be disabled. A global MAC reset is issued to stop the
2706 * hardware, and all transmit and receive resources are freed. 2711 * hardware, and all transmit and receive resources are freed.
2707 **/ 2712 **/
2708 static int ixgbevf_close(struct net_device *netdev) 2713 static int ixgbevf_close(struct net_device *netdev)
2709 { 2714 {
2710 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 2715 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
2711 2716
2712 ixgbevf_down(adapter); 2717 ixgbevf_down(adapter);
2713 ixgbevf_free_irq(adapter); 2718 ixgbevf_free_irq(adapter);
2714 2719
2715 ixgbevf_free_all_tx_resources(adapter); 2720 ixgbevf_free_all_tx_resources(adapter);
2716 ixgbevf_free_all_rx_resources(adapter); 2721 ixgbevf_free_all_rx_resources(adapter);
2717 2722
2718 return 0; 2723 return 0;
2719 } 2724 }
2720 2725
2721 static int ixgbevf_tso(struct ixgbevf_adapter *adapter, 2726 static int ixgbevf_tso(struct ixgbevf_adapter *adapter,
2722 struct ixgbevf_ring *tx_ring, 2727 struct ixgbevf_ring *tx_ring,
2723 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) 2728 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
2724 { 2729 {
2725 struct ixgbe_adv_tx_context_desc *context_desc; 2730 struct ixgbe_adv_tx_context_desc *context_desc;
2726 unsigned int i; 2731 unsigned int i;
2727 int err; 2732 int err;
2728 struct ixgbevf_tx_buffer *tx_buffer_info; 2733 struct ixgbevf_tx_buffer *tx_buffer_info;
2729 u32 vlan_macip_lens = 0, type_tucmd_mlhl; 2734 u32 vlan_macip_lens = 0, type_tucmd_mlhl;
2730 u32 mss_l4len_idx, l4len; 2735 u32 mss_l4len_idx, l4len;
2731 2736
2732 if (skb_is_gso(skb)) { 2737 if (skb_is_gso(skb)) {
2733 if (skb_header_cloned(skb)) { 2738 if (skb_header_cloned(skb)) {
2734 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 2739 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2735 if (err) 2740 if (err)
2736 return err; 2741 return err;
2737 } 2742 }
2738 l4len = tcp_hdrlen(skb); 2743 l4len = tcp_hdrlen(skb);
2739 *hdr_len += l4len; 2744 *hdr_len += l4len;
2740 2745
2741 if (skb->protocol == htons(ETH_P_IP)) { 2746 if (skb->protocol == htons(ETH_P_IP)) {
2742 struct iphdr *iph = ip_hdr(skb); 2747 struct iphdr *iph = ip_hdr(skb);
2743 iph->tot_len = 0; 2748 iph->tot_len = 0;
2744 iph->check = 0; 2749 iph->check = 0;
2745 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 2750 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2746 iph->daddr, 0, 2751 iph->daddr, 0,
2747 IPPROTO_TCP, 2752 IPPROTO_TCP,
2748 0); 2753 0);
2749 adapter->hw_tso_ctxt++; 2754 adapter->hw_tso_ctxt++;
2750 } else if (skb_is_gso_v6(skb)) { 2755 } else if (skb_is_gso_v6(skb)) {
2751 ipv6_hdr(skb)->payload_len = 0; 2756 ipv6_hdr(skb)->payload_len = 0;
2752 tcp_hdr(skb)->check = 2757 tcp_hdr(skb)->check =
2753 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 2758 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2754 &ipv6_hdr(skb)->daddr, 2759 &ipv6_hdr(skb)->daddr,
2755 0, IPPROTO_TCP, 0); 2760 0, IPPROTO_TCP, 0);
2756 adapter->hw_tso6_ctxt++; 2761 adapter->hw_tso6_ctxt++;
2757 } 2762 }
2758 2763
2759 i = tx_ring->next_to_use; 2764 i = tx_ring->next_to_use;
2760 2765
2761 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2766 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2762 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i); 2767 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
2763 2768
2764 /* VLAN MACLEN IPLEN */ 2769 /* VLAN MACLEN IPLEN */
2765 if (tx_flags & IXGBE_TX_FLAGS_VLAN) 2770 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2766 vlan_macip_lens |= 2771 vlan_macip_lens |=
2767 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); 2772 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
2768 vlan_macip_lens |= ((skb_network_offset(skb)) << 2773 vlan_macip_lens |= ((skb_network_offset(skb)) <<
2769 IXGBE_ADVTXD_MACLEN_SHIFT); 2774 IXGBE_ADVTXD_MACLEN_SHIFT);
2770 *hdr_len += skb_network_offset(skb); 2775 *hdr_len += skb_network_offset(skb);
2771 vlan_macip_lens |= 2776 vlan_macip_lens |=
2772 (skb_transport_header(skb) - skb_network_header(skb)); 2777 (skb_transport_header(skb) - skb_network_header(skb));
2773 *hdr_len += 2778 *hdr_len +=
2774 (skb_transport_header(skb) - skb_network_header(skb)); 2779 (skb_transport_header(skb) - skb_network_header(skb));
2775 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 2780 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
2776 context_desc->seqnum_seed = 0; 2781 context_desc->seqnum_seed = 0;
2777 2782
2778 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 2783 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
2779 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT | 2784 type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
2780 IXGBE_ADVTXD_DTYP_CTXT); 2785 IXGBE_ADVTXD_DTYP_CTXT);
2781 2786
2782 if (skb->protocol == htons(ETH_P_IP)) 2787 if (skb->protocol == htons(ETH_P_IP))
2783 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; 2788 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2784 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP; 2789 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
2785 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); 2790 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
2786 2791
2787 /* MSS L4LEN IDX */ 2792 /* MSS L4LEN IDX */
2788 mss_l4len_idx = 2793 mss_l4len_idx =
2789 (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT); 2794 (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
2790 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT); 2795 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
2791 /* use index 1 for TSO */ 2796 /* use index 1 for TSO */
2792 mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT); 2797 mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
2793 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 2798 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
2794 2799
2795 tx_buffer_info->time_stamp = jiffies; 2800 tx_buffer_info->time_stamp = jiffies;
2796 tx_buffer_info->next_to_watch = i; 2801 tx_buffer_info->next_to_watch = i;
2797 2802
2798 i++; 2803 i++;
2799 if (i == tx_ring->count) 2804 if (i == tx_ring->count)
2800 i = 0; 2805 i = 0;
2801 tx_ring->next_to_use = i; 2806 tx_ring->next_to_use = i;
2802 2807
2803 return true; 2808 return true;
2804 } 2809 }
2805 2810
2806 return false; 2811 return false;
2807 } 2812 }
2808 2813
2809 static bool ixgbevf_tx_csum(struct ixgbevf_adapter *adapter, 2814 static bool ixgbevf_tx_csum(struct ixgbevf_adapter *adapter,
2810 struct ixgbevf_ring *tx_ring, 2815 struct ixgbevf_ring *tx_ring,
2811 struct sk_buff *skb, u32 tx_flags) 2816 struct sk_buff *skb, u32 tx_flags)
2812 { 2817 {
2813 struct ixgbe_adv_tx_context_desc *context_desc; 2818 struct ixgbe_adv_tx_context_desc *context_desc;
2814 unsigned int i; 2819 unsigned int i;
2815 struct ixgbevf_tx_buffer *tx_buffer_info; 2820 struct ixgbevf_tx_buffer *tx_buffer_info;
2816 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0; 2821 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2817 2822
2818 if (skb->ip_summed == CHECKSUM_PARTIAL || 2823 if (skb->ip_summed == CHECKSUM_PARTIAL ||
2819 (tx_flags & IXGBE_TX_FLAGS_VLAN)) { 2824 (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
2820 i = tx_ring->next_to_use; 2825 i = tx_ring->next_to_use;
2821 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2826 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2822 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i); 2827 context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
2823 2828
2824 if (tx_flags & IXGBE_TX_FLAGS_VLAN) 2829 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
2825 vlan_macip_lens |= (tx_flags & 2830 vlan_macip_lens |= (tx_flags &
2826 IXGBE_TX_FLAGS_VLAN_MASK); 2831 IXGBE_TX_FLAGS_VLAN_MASK);
2827 vlan_macip_lens |= (skb_network_offset(skb) << 2832 vlan_macip_lens |= (skb_network_offset(skb) <<
2828 IXGBE_ADVTXD_MACLEN_SHIFT); 2833 IXGBE_ADVTXD_MACLEN_SHIFT);
2829 if (skb->ip_summed == CHECKSUM_PARTIAL) 2834 if (skb->ip_summed == CHECKSUM_PARTIAL)
2830 vlan_macip_lens |= (skb_transport_header(skb) - 2835 vlan_macip_lens |= (skb_transport_header(skb) -
2831 skb_network_header(skb)); 2836 skb_network_header(skb));
2832 2837
2833 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 2838 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
2834 context_desc->seqnum_seed = 0; 2839 context_desc->seqnum_seed = 0;
2835 2840
2836 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT | 2841 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
2837 IXGBE_ADVTXD_DTYP_CTXT); 2842 IXGBE_ADVTXD_DTYP_CTXT);
2838 2843
2839 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2844 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2840 switch (skb->protocol) { 2845 switch (skb->protocol) {
2841 case __constant_htons(ETH_P_IP): 2846 case __constant_htons(ETH_P_IP):
2842 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; 2847 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
2843 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 2848 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2844 type_tucmd_mlhl |= 2849 type_tucmd_mlhl |=
2845 IXGBE_ADVTXD_TUCMD_L4T_TCP; 2850 IXGBE_ADVTXD_TUCMD_L4T_TCP;
2846 break; 2851 break;
2847 case __constant_htons(ETH_P_IPV6): 2852 case __constant_htons(ETH_P_IPV6):
2848 /* XXX what about other V6 headers?? */ 2853 /* XXX what about other V6 headers?? */
2849 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 2854 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2850 type_tucmd_mlhl |= 2855 type_tucmd_mlhl |=
2851 IXGBE_ADVTXD_TUCMD_L4T_TCP; 2856 IXGBE_ADVTXD_TUCMD_L4T_TCP;
2852 break; 2857 break;
2853 default: 2858 default:
2854 if (unlikely(net_ratelimit())) { 2859 if (unlikely(net_ratelimit())) {
2855 pr_warn("partial checksum but " 2860 pr_warn("partial checksum but "
2856 "proto=%x!\n", skb->protocol); 2861 "proto=%x!\n", skb->protocol);
2857 } 2862 }
2858 break; 2863 break;
2859 } 2864 }
2860 } 2865 }
2861 2866
2862 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); 2867 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
2863 /* use index zero for tx checksum offload */ 2868 /* use index zero for tx checksum offload */
2864 context_desc->mss_l4len_idx = 0; 2869 context_desc->mss_l4len_idx = 0;
2865 2870
2866 tx_buffer_info->time_stamp = jiffies; 2871 tx_buffer_info->time_stamp = jiffies;
2867 tx_buffer_info->next_to_watch = i; 2872 tx_buffer_info->next_to_watch = i;
2868 2873
2869 adapter->hw_csum_tx_good++; 2874 adapter->hw_csum_tx_good++;
2870 i++; 2875 i++;
2871 if (i == tx_ring->count) 2876 if (i == tx_ring->count)
2872 i = 0; 2877 i = 0;
2873 tx_ring->next_to_use = i; 2878 tx_ring->next_to_use = i;
2874 2879
2875 return true; 2880 return true;
2876 } 2881 }
2877 2882
2878 return false; 2883 return false;
2879 } 2884 }
2880 2885
2881 static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter, 2886 static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
2882 struct ixgbevf_ring *tx_ring, 2887 struct ixgbevf_ring *tx_ring,
2883 struct sk_buff *skb, u32 tx_flags, 2888 struct sk_buff *skb, u32 tx_flags,
2884 unsigned int first) 2889 unsigned int first)
2885 { 2890 {
2886 struct pci_dev *pdev = adapter->pdev; 2891 struct pci_dev *pdev = adapter->pdev;
2887 struct ixgbevf_tx_buffer *tx_buffer_info; 2892 struct ixgbevf_tx_buffer *tx_buffer_info;
2888 unsigned int len; 2893 unsigned int len;
2889 unsigned int total = skb->len; 2894 unsigned int total = skb->len;
2890 unsigned int offset = 0, size; 2895 unsigned int offset = 0, size;
2891 int count = 0; 2896 int count = 0;
2892 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 2897 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2893 unsigned int f; 2898 unsigned int f;
2894 int i; 2899 int i;
2895 2900
2896 i = tx_ring->next_to_use; 2901 i = tx_ring->next_to_use;
2897 2902
2898 len = min(skb_headlen(skb), total); 2903 len = min(skb_headlen(skb), total);
2899 while (len) { 2904 while (len) {
2900 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2905 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2901 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD); 2906 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2902 2907
2903 tx_buffer_info->length = size; 2908 tx_buffer_info->length = size;
2904 tx_buffer_info->mapped_as_page = false; 2909 tx_buffer_info->mapped_as_page = false;
2905 tx_buffer_info->dma = dma_map_single(&adapter->pdev->dev, 2910 tx_buffer_info->dma = dma_map_single(&adapter->pdev->dev,
2906 skb->data + offset, 2911 skb->data + offset,
2907 size, DMA_TO_DEVICE); 2912 size, DMA_TO_DEVICE);
2908 if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma)) 2913 if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
2909 goto dma_error; 2914 goto dma_error;
2910 tx_buffer_info->time_stamp = jiffies; 2915 tx_buffer_info->time_stamp = jiffies;
2911 tx_buffer_info->next_to_watch = i; 2916 tx_buffer_info->next_to_watch = i;
2912 2917
2913 len -= size; 2918 len -= size;
2914 total -= size; 2919 total -= size;
2915 offset += size; 2920 offset += size;
2916 count++; 2921 count++;
2917 i++; 2922 i++;
2918 if (i == tx_ring->count) 2923 if (i == tx_ring->count)
2919 i = 0; 2924 i = 0;
2920 } 2925 }
2921 2926
2922 for (f = 0; f < nr_frags; f++) { 2927 for (f = 0; f < nr_frags; f++) {
2923 const struct skb_frag_struct *frag; 2928 const struct skb_frag_struct *frag;
2924 2929
2925 frag = &skb_shinfo(skb)->frags[f]; 2930 frag = &skb_shinfo(skb)->frags[f];
2926 len = min((unsigned int)skb_frag_size(frag), total); 2931 len = min((unsigned int)skb_frag_size(frag), total);
2927 offset = 0; 2932 offset = 0;
2928 2933
2929 while (len) { 2934 while (len) {
2930 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2935 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2931 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD); 2936 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2932 2937
2933 tx_buffer_info->length = size; 2938 tx_buffer_info->length = size;
2934 tx_buffer_info->dma = 2939 tx_buffer_info->dma =
2935 skb_frag_dma_map(&adapter->pdev->dev, frag, 2940 skb_frag_dma_map(&adapter->pdev->dev, frag,
2936 offset, size, DMA_TO_DEVICE); 2941 offset, size, DMA_TO_DEVICE);
2937 tx_buffer_info->mapped_as_page = true; 2942 tx_buffer_info->mapped_as_page = true;
2938 if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma)) 2943 if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
2939 goto dma_error; 2944 goto dma_error;
2940 tx_buffer_info->time_stamp = jiffies; 2945 tx_buffer_info->time_stamp = jiffies;
2941 tx_buffer_info->next_to_watch = i; 2946 tx_buffer_info->next_to_watch = i;
2942 2947
2943 len -= size; 2948 len -= size;
2944 total -= size; 2949 total -= size;
2945 offset += size; 2950 offset += size;
2946 count++; 2951 count++;
2947 i++; 2952 i++;
2948 if (i == tx_ring->count) 2953 if (i == tx_ring->count)
2949 i = 0; 2954 i = 0;
2950 } 2955 }
2951 if (total == 0) 2956 if (total == 0)
2952 break; 2957 break;
2953 } 2958 }
2954 2959
2955 if (i == 0) 2960 if (i == 0)
2956 i = tx_ring->count - 1; 2961 i = tx_ring->count - 1;
2957 else 2962 else
2958 i = i - 1; 2963 i = i - 1;
2959 tx_ring->tx_buffer_info[i].skb = skb; 2964 tx_ring->tx_buffer_info[i].skb = skb;
2960 tx_ring->tx_buffer_info[first].next_to_watch = i; 2965 tx_ring->tx_buffer_info[first].next_to_watch = i;
2961 2966
2962 return count; 2967 return count;
2963 2968
2964 dma_error: 2969 dma_error:
2965 dev_err(&pdev->dev, "TX DMA map failed\n"); 2970 dev_err(&pdev->dev, "TX DMA map failed\n");
2966 2971
2967 /* clear timestamp and dma mappings for failed tx_buffer_info map */ 2972 /* clear timestamp and dma mappings for failed tx_buffer_info map */
2968 tx_buffer_info->dma = 0; 2973 tx_buffer_info->dma = 0;
2969 tx_buffer_info->time_stamp = 0; 2974 tx_buffer_info->time_stamp = 0;
2970 tx_buffer_info->next_to_watch = 0; 2975 tx_buffer_info->next_to_watch = 0;
2971 count--; 2976 count--;
2972 2977
2973 /* clear timestamp and dma mappings for remaining portion of packet */ 2978 /* clear timestamp and dma mappings for remaining portion of packet */
2974 while (count >= 0) { 2979 while (count >= 0) {
2975 count--; 2980 count--;
2976 i--; 2981 i--;
2977 if (i < 0) 2982 if (i < 0)
2978 i += tx_ring->count; 2983 i += tx_ring->count;
2979 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 2984 tx_buffer_info = &tx_ring->tx_buffer_info[i];
2980 ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info); 2985 ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info);
2981 } 2986 }
2982 2987
2983 return count; 2988 return count;
2984 } 2989 }
2985 2990
2986 static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter, 2991 static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter,
2987 struct ixgbevf_ring *tx_ring, int tx_flags, 2992 struct ixgbevf_ring *tx_ring, int tx_flags,
2988 int count, u32 paylen, u8 hdr_len) 2993 int count, u32 paylen, u8 hdr_len)
2989 { 2994 {
2990 union ixgbe_adv_tx_desc *tx_desc = NULL; 2995 union ixgbe_adv_tx_desc *tx_desc = NULL;
2991 struct ixgbevf_tx_buffer *tx_buffer_info; 2996 struct ixgbevf_tx_buffer *tx_buffer_info;
2992 u32 olinfo_status = 0, cmd_type_len = 0; 2997 u32 olinfo_status = 0, cmd_type_len = 0;
2993 unsigned int i; 2998 unsigned int i;
2994 2999
2995 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS; 3000 u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
2996 3001
2997 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA; 3002 cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
2998 3003
2999 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT; 3004 cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
3000 3005
3001 if (tx_flags & IXGBE_TX_FLAGS_VLAN) 3006 if (tx_flags & IXGBE_TX_FLAGS_VLAN)
3002 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE; 3007 cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
3003 3008
3004 if (tx_flags & IXGBE_TX_FLAGS_TSO) { 3009 if (tx_flags & IXGBE_TX_FLAGS_TSO) {
3005 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; 3010 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
3006 3011
3007 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 3012 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
3008 IXGBE_ADVTXD_POPTS_SHIFT; 3013 IXGBE_ADVTXD_POPTS_SHIFT;
3009 3014
3010 /* use index 1 context for tso */ 3015 /* use index 1 context for tso */
3011 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); 3016 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
3012 if (tx_flags & IXGBE_TX_FLAGS_IPV4) 3017 if (tx_flags & IXGBE_TX_FLAGS_IPV4)
3013 olinfo_status |= IXGBE_TXD_POPTS_IXSM << 3018 olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
3014 IXGBE_ADVTXD_POPTS_SHIFT; 3019 IXGBE_ADVTXD_POPTS_SHIFT;
3015 3020
3016 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM) 3021 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
3017 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 3022 olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
3018 IXGBE_ADVTXD_POPTS_SHIFT; 3023 IXGBE_ADVTXD_POPTS_SHIFT;
3019 3024
3020 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT); 3025 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
3021 3026
3022 i = tx_ring->next_to_use; 3027 i = tx_ring->next_to_use;
3023 while (count--) { 3028 while (count--) {
3024 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 3029 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3025 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); 3030 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
3026 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma); 3031 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
3027 tx_desc->read.cmd_type_len = 3032 tx_desc->read.cmd_type_len =
3028 cpu_to_le32(cmd_type_len | tx_buffer_info->length); 3033 cpu_to_le32(cmd_type_len | tx_buffer_info->length);
3029 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 3034 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
3030 i++; 3035 i++;
3031 if (i == tx_ring->count) 3036 if (i == tx_ring->count)
3032 i = 0; 3037 i = 0;
3033 } 3038 }
3034 3039
3035 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd); 3040 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
3036 3041
3037 /* 3042 /*
3038 * Force memory writes to complete before letting h/w 3043 * Force memory writes to complete before letting h/w
3039 * know there are new descriptors to fetch. (Only 3044 * know there are new descriptors to fetch. (Only
3040 * applicable for weak-ordered memory model archs, 3045 * applicable for weak-ordered memory model archs,
3041 * such as IA-64). 3046 * such as IA-64).
3042 */ 3047 */
3043 wmb(); 3048 wmb();
3044 3049
3045 tx_ring->next_to_use = i; 3050 tx_ring->next_to_use = i;
3046 writel(i, adapter->hw.hw_addr + tx_ring->tail); 3051 writel(i, adapter->hw.hw_addr + tx_ring->tail);
3047 } 3052 }
3048 3053
3049 static int __ixgbevf_maybe_stop_tx(struct net_device *netdev, 3054 static int __ixgbevf_maybe_stop_tx(struct net_device *netdev,
3050 struct ixgbevf_ring *tx_ring, int size) 3055 struct ixgbevf_ring *tx_ring, int size)
3051 { 3056 {
3052 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3057 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3053 3058
3054 netif_stop_subqueue(netdev, tx_ring->queue_index); 3059 netif_stop_subqueue(netdev, tx_ring->queue_index);
3055 /* Herbert's original patch had: 3060 /* Herbert's original patch had:
3056 * smp_mb__after_netif_stop_queue(); 3061 * smp_mb__after_netif_stop_queue();
3057 * but since that doesn't exist yet, just open code it. */ 3062 * but since that doesn't exist yet, just open code it. */
3058 smp_mb(); 3063 smp_mb();
3059 3064
3060 /* We need to check again in a case another CPU has just 3065 /* We need to check again in a case another CPU has just
3061 * made room available. */ 3066 * made room available. */
3062 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size)) 3067 if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
3063 return -EBUSY; 3068 return -EBUSY;
3064 3069
3065 /* A reprieve! - use start_queue because it doesn't call schedule */ 3070 /* A reprieve! - use start_queue because it doesn't call schedule */
3066 netif_start_subqueue(netdev, tx_ring->queue_index); 3071 netif_start_subqueue(netdev, tx_ring->queue_index);
3067 ++adapter->restart_queue; 3072 ++adapter->restart_queue;
3068 return 0; 3073 return 0;
3069 } 3074 }
3070 3075
3071 static int ixgbevf_maybe_stop_tx(struct net_device *netdev, 3076 static int ixgbevf_maybe_stop_tx(struct net_device *netdev,
3072 struct ixgbevf_ring *tx_ring, int size) 3077 struct ixgbevf_ring *tx_ring, int size)
3073 { 3078 {
3074 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size)) 3079 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
3075 return 0; 3080 return 0;
3076 return __ixgbevf_maybe_stop_tx(netdev, tx_ring, size); 3081 return __ixgbevf_maybe_stop_tx(netdev, tx_ring, size);
3077 } 3082 }
3078 3083
3079 static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 3084 static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3080 { 3085 {
3081 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3086 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3082 struct ixgbevf_ring *tx_ring; 3087 struct ixgbevf_ring *tx_ring;
3083 unsigned int first; 3088 unsigned int first;
3084 unsigned int tx_flags = 0; 3089 unsigned int tx_flags = 0;
3085 u8 hdr_len = 0; 3090 u8 hdr_len = 0;
3086 int r_idx = 0, tso; 3091 int r_idx = 0, tso;
3087 int count = 0; 3092 int count = 0;
3088 3093
3089 unsigned int f; 3094 unsigned int f;
3090 3095
3091 tx_ring = &adapter->tx_ring[r_idx]; 3096 tx_ring = &adapter->tx_ring[r_idx];
3092 3097
3093 if (vlan_tx_tag_present(skb)) { 3098 if (vlan_tx_tag_present(skb)) {
3094 tx_flags |= vlan_tx_tag_get(skb); 3099 tx_flags |= vlan_tx_tag_get(skb);
3095 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; 3100 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
3096 tx_flags |= IXGBE_TX_FLAGS_VLAN; 3101 tx_flags |= IXGBE_TX_FLAGS_VLAN;
3097 } 3102 }
3098 3103
3099 /* four things can cause us to need a context descriptor */ 3104 /* four things can cause us to need a context descriptor */
3100 if (skb_is_gso(skb) || 3105 if (skb_is_gso(skb) ||
3101 (skb->ip_summed == CHECKSUM_PARTIAL) || 3106 (skb->ip_summed == CHECKSUM_PARTIAL) ||
3102 (tx_flags & IXGBE_TX_FLAGS_VLAN)) 3107 (tx_flags & IXGBE_TX_FLAGS_VLAN))
3103 count++; 3108 count++;
3104 3109
3105 count += TXD_USE_COUNT(skb_headlen(skb)); 3110 count += TXD_USE_COUNT(skb_headlen(skb));
3106 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 3111 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
3107 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f])); 3112 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]));
3108 3113
3109 if (ixgbevf_maybe_stop_tx(netdev, tx_ring, count)) { 3114 if (ixgbevf_maybe_stop_tx(netdev, tx_ring, count)) {
3110 adapter->tx_busy++; 3115 adapter->tx_busy++;
3111 return NETDEV_TX_BUSY; 3116 return NETDEV_TX_BUSY;
3112 } 3117 }
3113 3118
3114 first = tx_ring->next_to_use; 3119 first = tx_ring->next_to_use;
3115 3120
3116 if (skb->protocol == htons(ETH_P_IP)) 3121 if (skb->protocol == htons(ETH_P_IP))
3117 tx_flags |= IXGBE_TX_FLAGS_IPV4; 3122 tx_flags |= IXGBE_TX_FLAGS_IPV4;
3118 tso = ixgbevf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len); 3123 tso = ixgbevf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
3119 if (tso < 0) { 3124 if (tso < 0) {
3120 dev_kfree_skb_any(skb); 3125 dev_kfree_skb_any(skb);
3121 return NETDEV_TX_OK; 3126 return NETDEV_TX_OK;
3122 } 3127 }
3123 3128
3124 if (tso) 3129 if (tso)
3125 tx_flags |= IXGBE_TX_FLAGS_TSO; 3130 tx_flags |= IXGBE_TX_FLAGS_TSO;
3126 else if (ixgbevf_tx_csum(adapter, tx_ring, skb, tx_flags) && 3131 else if (ixgbevf_tx_csum(adapter, tx_ring, skb, tx_flags) &&
3127 (skb->ip_summed == CHECKSUM_PARTIAL)) 3132 (skb->ip_summed == CHECKSUM_PARTIAL))
3128 tx_flags |= IXGBE_TX_FLAGS_CSUM; 3133 tx_flags |= IXGBE_TX_FLAGS_CSUM;
3129 3134
3130 ixgbevf_tx_queue(adapter, tx_ring, tx_flags, 3135 ixgbevf_tx_queue(adapter, tx_ring, tx_flags,
3131 ixgbevf_tx_map(adapter, tx_ring, skb, tx_flags, first), 3136 ixgbevf_tx_map(adapter, tx_ring, skb, tx_flags, first),
3132 skb->len, hdr_len); 3137 skb->len, hdr_len);
3133 3138
3134 ixgbevf_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED); 3139 ixgbevf_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
3135 3140
3136 return NETDEV_TX_OK; 3141 return NETDEV_TX_OK;
3137 } 3142 }
3138 3143
3139 /** 3144 /**
3140 * ixgbevf_set_mac - Change the Ethernet Address of the NIC 3145 * ixgbevf_set_mac - Change the Ethernet Address of the NIC
3141 * @netdev: network interface device structure 3146 * @netdev: network interface device structure
3142 * @p: pointer to an address structure 3147 * @p: pointer to an address structure
3143 * 3148 *
3144 * Returns 0 on success, negative on failure 3149 * Returns 0 on success, negative on failure
3145 **/ 3150 **/
3146 static int ixgbevf_set_mac(struct net_device *netdev, void *p) 3151 static int ixgbevf_set_mac(struct net_device *netdev, void *p)
3147 { 3152 {
3148 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3153 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3149 struct ixgbe_hw *hw = &adapter->hw; 3154 struct ixgbe_hw *hw = &adapter->hw;
3150 struct sockaddr *addr = p; 3155 struct sockaddr *addr = p;
3151 3156
3152 if (!is_valid_ether_addr(addr->sa_data)) 3157 if (!is_valid_ether_addr(addr->sa_data))
3153 return -EADDRNOTAVAIL; 3158 return -EADDRNOTAVAIL;
3154 3159
3155 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 3160 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
3156 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 3161 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
3157 3162
3158 if (hw->mac.ops.set_rar) 3163 if (hw->mac.ops.set_rar)
3159 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); 3164 hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
3160 3165
3161 return 0; 3166 return 0;
3162 } 3167 }
3163 3168
3164 /** 3169 /**
3165 * ixgbevf_change_mtu - Change the Maximum Transfer Unit 3170 * ixgbevf_change_mtu - Change the Maximum Transfer Unit
3166 * @netdev: network interface device structure 3171 * @netdev: network interface device structure
3167 * @new_mtu: new value for maximum frame size 3172 * @new_mtu: new value for maximum frame size
3168 * 3173 *
3169 * Returns 0 on success, negative on failure 3174 * Returns 0 on success, negative on failure
3170 **/ 3175 **/
3171 static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu) 3176 static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3172 { 3177 {
3173 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3178 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3174 struct ixgbe_hw *hw = &adapter->hw; 3179 struct ixgbe_hw *hw = &adapter->hw;
3175 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 3180 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3176 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE; 3181 int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
3177 u32 msg[2]; 3182 u32 msg[2];
3178 3183
3179 if (adapter->hw.mac.type == ixgbe_mac_X540_vf) 3184 if (adapter->hw.mac.type == ixgbe_mac_X540_vf)
3180 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE; 3185 max_possible_frame = IXGBE_MAX_JUMBO_FRAME_SIZE;
3181 3186
3182 /* MTU < 68 is an error and causes problems on some kernels */ 3187 /* MTU < 68 is an error and causes problems on some kernels */
3183 if ((new_mtu < 68) || (max_frame > max_possible_frame)) 3188 if ((new_mtu < 68) || (max_frame > max_possible_frame))
3184 return -EINVAL; 3189 return -EINVAL;
3185 3190
3186 hw_dbg(&adapter->hw, "changing MTU from %d to %d\n", 3191 hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
3187 netdev->mtu, new_mtu); 3192 netdev->mtu, new_mtu);
3188 /* must set new MTU before calling down or up */ 3193 /* must set new MTU before calling down or up */
3189 netdev->mtu = new_mtu; 3194 netdev->mtu = new_mtu;
3190 3195
3191 msg[0] = IXGBE_VF_SET_LPE; 3196 msg[0] = IXGBE_VF_SET_LPE;
3192 msg[1] = max_frame; 3197 msg[1] = max_frame;
3193 hw->mbx.ops.write_posted(hw, msg, 2); 3198 hw->mbx.ops.write_posted(hw, msg, 2);
3194 3199
3195 if (netif_running(netdev)) 3200 if (netif_running(netdev))
3196 ixgbevf_reinit_locked(adapter); 3201 ixgbevf_reinit_locked(adapter);
3197 3202
3198 return 0; 3203 return 0;
3199 } 3204 }
3200 3205
3201 static void ixgbevf_shutdown(struct pci_dev *pdev) 3206 static void ixgbevf_shutdown(struct pci_dev *pdev)
3202 { 3207 {
3203 struct net_device *netdev = pci_get_drvdata(pdev); 3208 struct net_device *netdev = pci_get_drvdata(pdev);
3204 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3209 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3205 3210
3206 netif_device_detach(netdev); 3211 netif_device_detach(netdev);
3207 3212
3208 if (netif_running(netdev)) { 3213 if (netif_running(netdev)) {
3209 ixgbevf_down(adapter); 3214 ixgbevf_down(adapter);
3210 ixgbevf_free_irq(adapter); 3215 ixgbevf_free_irq(adapter);
3211 ixgbevf_free_all_tx_resources(adapter); 3216 ixgbevf_free_all_tx_resources(adapter);
3212 ixgbevf_free_all_rx_resources(adapter); 3217 ixgbevf_free_all_rx_resources(adapter);
3213 } 3218 }
3214 3219
3215 #ifdef CONFIG_PM 3220 #ifdef CONFIG_PM
3216 pci_save_state(pdev); 3221 pci_save_state(pdev);
3217 #endif 3222 #endif
3218 3223
3219 pci_disable_device(pdev); 3224 pci_disable_device(pdev);
3220 } 3225 }
3221 3226
3222 static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev, 3227 static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
3223 struct rtnl_link_stats64 *stats) 3228 struct rtnl_link_stats64 *stats)
3224 { 3229 {
3225 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3230 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3226 unsigned int start; 3231 unsigned int start;
3227 u64 bytes, packets; 3232 u64 bytes, packets;
3228 const struct ixgbevf_ring *ring; 3233 const struct ixgbevf_ring *ring;
3229 int i; 3234 int i;
3230 3235
3231 ixgbevf_update_stats(adapter); 3236 ixgbevf_update_stats(adapter);
3232 3237
3233 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc; 3238 stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
3234 3239
3235 for (i = 0; i < adapter->num_rx_queues; i++) { 3240 for (i = 0; i < adapter->num_rx_queues; i++) {
3236 ring = &adapter->rx_ring[i]; 3241 ring = &adapter->rx_ring[i];
3237 do { 3242 do {
3238 start = u64_stats_fetch_begin_bh(&ring->syncp); 3243 start = u64_stats_fetch_begin_bh(&ring->syncp);
3239 bytes = ring->total_bytes; 3244 bytes = ring->total_bytes;
3240 packets = ring->total_packets; 3245 packets = ring->total_packets;
3241 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 3246 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
3242 stats->rx_bytes += bytes; 3247 stats->rx_bytes += bytes;
3243 stats->rx_packets += packets; 3248 stats->rx_packets += packets;
3244 } 3249 }
3245 3250
3246 for (i = 0; i < adapter->num_tx_queues; i++) { 3251 for (i = 0; i < adapter->num_tx_queues; i++) {
3247 ring = &adapter->tx_ring[i]; 3252 ring = &adapter->tx_ring[i];
3248 do { 3253 do {
3249 start = u64_stats_fetch_begin_bh(&ring->syncp); 3254 start = u64_stats_fetch_begin_bh(&ring->syncp);
3250 bytes = ring->total_bytes; 3255 bytes = ring->total_bytes;
3251 packets = ring->total_packets; 3256 packets = ring->total_packets;
3252 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 3257 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
3253 stats->tx_bytes += bytes; 3258 stats->tx_bytes += bytes;
3254 stats->tx_packets += packets; 3259 stats->tx_packets += packets;
3255 } 3260 }
3256 3261
3257 return stats; 3262 return stats;
3258 } 3263 }
3259 3264
3260 static int ixgbevf_set_features(struct net_device *netdev, 3265 static int ixgbevf_set_features(struct net_device *netdev,
3261 netdev_features_t features) 3266 netdev_features_t features)
3262 { 3267 {
3263 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3268 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3264 3269
3265 if (features & NETIF_F_RXCSUM) 3270 if (features & NETIF_F_RXCSUM)
3266 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED; 3271 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
3267 else 3272 else
3268 adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED; 3273 adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
3269 3274
3270 return 0; 3275 return 0;
3271 } 3276 }
3272 3277
3273 static const struct net_device_ops ixgbe_netdev_ops = { 3278 static const struct net_device_ops ixgbe_netdev_ops = {
3274 .ndo_open = ixgbevf_open, 3279 .ndo_open = ixgbevf_open,
3275 .ndo_stop = ixgbevf_close, 3280 .ndo_stop = ixgbevf_close,
3276 .ndo_start_xmit = ixgbevf_xmit_frame, 3281 .ndo_start_xmit = ixgbevf_xmit_frame,
3277 .ndo_set_rx_mode = ixgbevf_set_rx_mode, 3282 .ndo_set_rx_mode = ixgbevf_set_rx_mode,
3278 .ndo_get_stats64 = ixgbevf_get_stats, 3283 .ndo_get_stats64 = ixgbevf_get_stats,
3279 .ndo_validate_addr = eth_validate_addr, 3284 .ndo_validate_addr = eth_validate_addr,
3280 .ndo_set_mac_address = ixgbevf_set_mac, 3285 .ndo_set_mac_address = ixgbevf_set_mac,
3281 .ndo_change_mtu = ixgbevf_change_mtu, 3286 .ndo_change_mtu = ixgbevf_change_mtu,
3282 .ndo_tx_timeout = ixgbevf_tx_timeout, 3287 .ndo_tx_timeout = ixgbevf_tx_timeout,
3283 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid, 3288 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
3284 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid, 3289 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
3285 .ndo_set_features = ixgbevf_set_features, 3290 .ndo_set_features = ixgbevf_set_features,
3286 }; 3291 };
3287 3292
3288 static void ixgbevf_assign_netdev_ops(struct net_device *dev) 3293 static void ixgbevf_assign_netdev_ops(struct net_device *dev)
3289 { 3294 {
3290 dev->netdev_ops = &ixgbe_netdev_ops; 3295 dev->netdev_ops = &ixgbe_netdev_ops;
3291 ixgbevf_set_ethtool_ops(dev); 3296 ixgbevf_set_ethtool_ops(dev);
3292 dev->watchdog_timeo = 5 * HZ; 3297 dev->watchdog_timeo = 5 * HZ;
3293 } 3298 }
3294 3299
3295 /** 3300 /**
3296 * ixgbevf_probe - Device Initialization Routine 3301 * ixgbevf_probe - Device Initialization Routine
3297 * @pdev: PCI device information struct 3302 * @pdev: PCI device information struct
3298 * @ent: entry in ixgbevf_pci_tbl 3303 * @ent: entry in ixgbevf_pci_tbl
3299 * 3304 *
3300 * Returns 0 on success, negative on failure 3305 * Returns 0 on success, negative on failure
3301 * 3306 *
3302 * ixgbevf_probe initializes an adapter identified by a pci_dev structure. 3307 * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
3303 * The OS initialization, configuring of the adapter private structure, 3308 * The OS initialization, configuring of the adapter private structure,
3304 * and a hardware reset occur. 3309 * and a hardware reset occur.
3305 **/ 3310 **/
3306 static int __devinit ixgbevf_probe(struct pci_dev *pdev, 3311 static int __devinit ixgbevf_probe(struct pci_dev *pdev,
3307 const struct pci_device_id *ent) 3312 const struct pci_device_id *ent)
3308 { 3313 {
3309 struct net_device *netdev; 3314 struct net_device *netdev;
3310 struct ixgbevf_adapter *adapter = NULL; 3315 struct ixgbevf_adapter *adapter = NULL;
3311 struct ixgbe_hw *hw = NULL; 3316 struct ixgbe_hw *hw = NULL;
3312 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data]; 3317 const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
3313 static int cards_found; 3318 static int cards_found;
3314 int err, pci_using_dac; 3319 int err, pci_using_dac;
3315 3320
3316 err = pci_enable_device(pdev); 3321 err = pci_enable_device(pdev);
3317 if (err) 3322 if (err)
3318 return err; 3323 return err;
3319 3324
3320 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) && 3325 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3321 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) { 3326 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
3322 pci_using_dac = 1; 3327 pci_using_dac = 1;
3323 } else { 3328 } else {
3324 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 3329 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3325 if (err) { 3330 if (err) {
3326 err = dma_set_coherent_mask(&pdev->dev, 3331 err = dma_set_coherent_mask(&pdev->dev,
3327 DMA_BIT_MASK(32)); 3332 DMA_BIT_MASK(32));
3328 if (err) { 3333 if (err) {
3329 dev_err(&pdev->dev, "No usable DMA " 3334 dev_err(&pdev->dev, "No usable DMA "
3330 "configuration, aborting\n"); 3335 "configuration, aborting\n");
3331 goto err_dma; 3336 goto err_dma;
3332 } 3337 }
3333 } 3338 }
3334 pci_using_dac = 0; 3339 pci_using_dac = 0;
3335 } 3340 }
3336 3341
3337 err = pci_request_regions(pdev, ixgbevf_driver_name); 3342 err = pci_request_regions(pdev, ixgbevf_driver_name);
3338 if (err) { 3343 if (err) {
3339 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err); 3344 dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
3340 goto err_pci_reg; 3345 goto err_pci_reg;
3341 } 3346 }
3342 3347
3343 pci_set_master(pdev); 3348 pci_set_master(pdev);
3344 3349
3345 #ifdef HAVE_TX_MQ 3350 #ifdef HAVE_TX_MQ
3346 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter), 3351 netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
3347 MAX_TX_QUEUES); 3352 MAX_TX_QUEUES);
3348 #else 3353 #else
3349 netdev = alloc_etherdev(sizeof(struct ixgbevf_adapter)); 3354 netdev = alloc_etherdev(sizeof(struct ixgbevf_adapter));
3350 #endif 3355 #endif
3351 if (!netdev) { 3356 if (!netdev) {
3352 err = -ENOMEM; 3357 err = -ENOMEM;
3353 goto err_alloc_etherdev; 3358 goto err_alloc_etherdev;
3354 } 3359 }
3355 3360
3356 SET_NETDEV_DEV(netdev, &pdev->dev); 3361 SET_NETDEV_DEV(netdev, &pdev->dev);
3357 3362
3358 pci_set_drvdata(pdev, netdev); 3363 pci_set_drvdata(pdev, netdev);
3359 adapter = netdev_priv(netdev); 3364 adapter = netdev_priv(netdev);
3360 3365
3361 adapter->netdev = netdev; 3366 adapter->netdev = netdev;
3362 adapter->pdev = pdev; 3367 adapter->pdev = pdev;
3363 hw = &adapter->hw; 3368 hw = &adapter->hw;
3364 hw->back = adapter; 3369 hw->back = adapter;
3365 adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; 3370 adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
3366 3371
3367 /* 3372 /*
3368 * call save state here in standalone driver because it relies on 3373 * call save state here in standalone driver because it relies on
3369 * adapter struct to exist, and needs to call netdev_priv 3374 * adapter struct to exist, and needs to call netdev_priv
3370 */ 3375 */
3371 pci_save_state(pdev); 3376 pci_save_state(pdev);
3372 3377
3373 hw->hw_addr = ioremap(pci_resource_start(pdev, 0), 3378 hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3374 pci_resource_len(pdev, 0)); 3379 pci_resource_len(pdev, 0));
3375 if (!hw->hw_addr) { 3380 if (!hw->hw_addr) {
3376 err = -EIO; 3381 err = -EIO;
3377 goto err_ioremap; 3382 goto err_ioremap;
3378 } 3383 }
3379 3384
3380 ixgbevf_assign_netdev_ops(netdev); 3385 ixgbevf_assign_netdev_ops(netdev);
3381 3386
3382 adapter->bd_number = cards_found; 3387 adapter->bd_number = cards_found;
3383 3388
3384 /* Setup hw api */ 3389 /* Setup hw api */
3385 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); 3390 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3386 hw->mac.type = ii->mac; 3391 hw->mac.type = ii->mac;
3387 3392
3388 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops, 3393 memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
3389 sizeof(struct ixgbe_mbx_operations)); 3394 sizeof(struct ixgbe_mbx_operations));
3390 3395
3391 adapter->flags &= ~IXGBE_FLAG_RX_PS_CAPABLE; 3396 adapter->flags &= ~IXGBE_FLAG_RX_PS_CAPABLE;
3392 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; 3397 adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
3393 adapter->flags |= IXGBE_FLAG_RX_1BUF_CAPABLE; 3398 adapter->flags |= IXGBE_FLAG_RX_1BUF_CAPABLE;
3394 3399
3395 /* setup the private structure */ 3400 /* setup the private structure */
3396 err = ixgbevf_sw_init(adapter); 3401 err = ixgbevf_sw_init(adapter);
3402 if (err)
3403 goto err_sw_init;
3397 3404
3405 /* The HW MAC address was set and/or determined in sw_init */
3406 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
3407
3408 if (!is_valid_ether_addr(netdev->dev_addr)) {
3409 pr_err("invalid MAC address\n");
3410 err = -EIO;
3411 goto err_sw_init;
3412 }
3413
3398 netdev->hw_features = NETIF_F_SG | 3414 netdev->hw_features = NETIF_F_SG |
3399 NETIF_F_IP_CSUM | 3415 NETIF_F_IP_CSUM |
3400 NETIF_F_IPV6_CSUM | 3416 NETIF_F_IPV6_CSUM |
3401 NETIF_F_TSO | 3417 NETIF_F_TSO |
3402 NETIF_F_TSO6 | 3418 NETIF_F_TSO6 |
3403 NETIF_F_RXCSUM; 3419 NETIF_F_RXCSUM;
3404 3420
3405 netdev->features = netdev->hw_features | 3421 netdev->features = netdev->hw_features |
3406 NETIF_F_HW_VLAN_TX | 3422 NETIF_F_HW_VLAN_TX |
3407 NETIF_F_HW_VLAN_RX | 3423 NETIF_F_HW_VLAN_RX |
3408 NETIF_F_HW_VLAN_FILTER; 3424 NETIF_F_HW_VLAN_FILTER;
3409 3425
3410 netdev->vlan_features |= NETIF_F_TSO; 3426 netdev->vlan_features |= NETIF_F_TSO;
3411 netdev->vlan_features |= NETIF_F_TSO6; 3427 netdev->vlan_features |= NETIF_F_TSO6;
3412 netdev->vlan_features |= NETIF_F_IP_CSUM; 3428 netdev->vlan_features |= NETIF_F_IP_CSUM;
3413 netdev->vlan_features |= NETIF_F_IPV6_CSUM; 3429 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
3414 netdev->vlan_features |= NETIF_F_SG; 3430 netdev->vlan_features |= NETIF_F_SG;
3415 3431
3416 if (pci_using_dac) 3432 if (pci_using_dac)
3417 netdev->features |= NETIF_F_HIGHDMA; 3433 netdev->features |= NETIF_F_HIGHDMA;
3418 3434
3419 netdev->priv_flags |= IFF_UNICAST_FLT; 3435 netdev->priv_flags |= IFF_UNICAST_FLT;
3420
3421 /* The HW MAC address was set and/or determined in sw_init */
3422 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
3423 memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
3424
3425 if (!is_valid_ether_addr(netdev->dev_addr)) {
3426 pr_err("invalid MAC address\n");
3427 err = -EIO;
3428 goto err_sw_init;
3429 }
3430 3436
3431 init_timer(&adapter->watchdog_timer); 3437 init_timer(&adapter->watchdog_timer);
3432 adapter->watchdog_timer.function = ixgbevf_watchdog; 3438 adapter->watchdog_timer.function = ixgbevf_watchdog;
3433 adapter->watchdog_timer.data = (unsigned long)adapter; 3439 adapter->watchdog_timer.data = (unsigned long)adapter;
3434 3440
3435 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task); 3441 INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
3436 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task); 3442 INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
3437 3443
3438 err = ixgbevf_init_interrupt_scheme(adapter); 3444 err = ixgbevf_init_interrupt_scheme(adapter);
3439 if (err) 3445 if (err)
3440 goto err_sw_init; 3446 goto err_sw_init;
3441 3447
3442 /* pick up the PCI bus settings for reporting later */ 3448 /* pick up the PCI bus settings for reporting later */
3443 if (hw->mac.ops.get_bus_info) 3449 if (hw->mac.ops.get_bus_info)
3444 hw->mac.ops.get_bus_info(hw); 3450 hw->mac.ops.get_bus_info(hw);
3445 3451
3446 strcpy(netdev->name, "eth%d"); 3452 strcpy(netdev->name, "eth%d");
3447 3453
3448 err = register_netdev(netdev); 3454 err = register_netdev(netdev);
3449 if (err) 3455 if (err)
3450 goto err_register; 3456 goto err_register;
3451 3457
3452 adapter->netdev_registered = true; 3458 adapter->netdev_registered = true;
3453 3459
3454 netif_carrier_off(netdev); 3460 netif_carrier_off(netdev);
3455 3461
3456 ixgbevf_init_last_counter_stats(adapter); 3462 ixgbevf_init_last_counter_stats(adapter);
3457 3463
3458 /* print the MAC address */ 3464 /* print the MAC address */
3459 hw_dbg(hw, "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n", 3465 hw_dbg(hw, "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
3460 netdev->dev_addr[0], 3466 netdev->dev_addr[0],
3461 netdev->dev_addr[1], 3467 netdev->dev_addr[1],
3462 netdev->dev_addr[2], 3468 netdev->dev_addr[2],
3463 netdev->dev_addr[3], 3469 netdev->dev_addr[3],
3464 netdev->dev_addr[4], 3470 netdev->dev_addr[4],
3465 netdev->dev_addr[5]); 3471 netdev->dev_addr[5]);
3466 3472
3467 hw_dbg(hw, "MAC: %d\n", hw->mac.type); 3473 hw_dbg(hw, "MAC: %d\n", hw->mac.type);
3468 3474
3469 hw_dbg(hw, "LRO is disabled\n"); 3475 hw_dbg(hw, "LRO is disabled\n");
3470 3476
3471 hw_dbg(hw, "Intel(R) 82599 Virtual Function\n"); 3477 hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
3472 cards_found++; 3478 cards_found++;
3473 return 0; 3479 return 0;
3474 3480
3475 err_register: 3481 err_register:
3476 err_sw_init: 3482 err_sw_init:
3477 ixgbevf_reset_interrupt_capability(adapter); 3483 ixgbevf_reset_interrupt_capability(adapter);
3478 iounmap(hw->hw_addr); 3484 iounmap(hw->hw_addr);
3479 err_ioremap: 3485 err_ioremap:
3480 free_netdev(netdev); 3486 free_netdev(netdev);
3481 err_alloc_etherdev: 3487 err_alloc_etherdev:
3482 pci_release_regions(pdev); 3488 pci_release_regions(pdev);
3483 err_pci_reg: 3489 err_pci_reg:
3484 err_dma: 3490 err_dma:
3485 pci_disable_device(pdev); 3491 pci_disable_device(pdev);
3486 return err; 3492 return err;
3487 } 3493 }
3488 3494
3489 /** 3495 /**
3490 * ixgbevf_remove - Device Removal Routine 3496 * ixgbevf_remove - Device Removal Routine
3491 * @pdev: PCI device information struct 3497 * @pdev: PCI device information struct
3492 * 3498 *
3493 * ixgbevf_remove is called by the PCI subsystem to alert the driver 3499 * ixgbevf_remove is called by the PCI subsystem to alert the driver
3494 * that it should release a PCI device. The could be caused by a 3500 * that it should release a PCI device. The could be caused by a
3495 * Hot-Plug event, or because the driver is going to be removed from 3501 * Hot-Plug event, or because the driver is going to be removed from
3496 * memory. 3502 * memory.
3497 **/ 3503 **/
3498 static void __devexit ixgbevf_remove(struct pci_dev *pdev) 3504 static void __devexit ixgbevf_remove(struct pci_dev *pdev)
3499 { 3505 {
3500 struct net_device *netdev = pci_get_drvdata(pdev); 3506 struct net_device *netdev = pci_get_drvdata(pdev);
3501 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 3507 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
3502 3508
3503 set_bit(__IXGBEVF_DOWN, &adapter->state); 3509 set_bit(__IXGBEVF_DOWN, &adapter->state);
3504 3510
3505 del_timer_sync(&adapter->watchdog_timer); 3511 del_timer_sync(&adapter->watchdog_timer);
3506 3512
3507 cancel_work_sync(&adapter->reset_task); 3513 cancel_work_sync(&adapter->reset_task);
3508 cancel_work_sync(&adapter->watchdog_task); 3514 cancel_work_sync(&adapter->watchdog_task);
3509 3515
3510 if (adapter->netdev_registered) { 3516 if (adapter->netdev_registered) {
3511 unregister_netdev(netdev); 3517 unregister_netdev(netdev);
3512 adapter->netdev_registered = false; 3518 adapter->netdev_registered = false;
3513 } 3519 }
3514 3520
3515 ixgbevf_reset_interrupt_capability(adapter); 3521 ixgbevf_reset_interrupt_capability(adapter);
3516 3522
3517 iounmap(adapter->hw.hw_addr); 3523 iounmap(adapter->hw.hw_addr);
3518 pci_release_regions(pdev); 3524 pci_release_regions(pdev);
3519 3525
3520 hw_dbg(&adapter->hw, "Remove complete\n"); 3526 hw_dbg(&adapter->hw, "Remove complete\n");
3521 3527
3522 kfree(adapter->tx_ring); 3528 kfree(adapter->tx_ring);
3523 kfree(adapter->rx_ring); 3529 kfree(adapter->rx_ring);
3524 3530
3525 free_netdev(netdev); 3531 free_netdev(netdev);
3526 3532
3527 pci_disable_device(pdev); 3533 pci_disable_device(pdev);
3528 } 3534 }
3529 3535
3530 static struct pci_driver ixgbevf_driver = { 3536 static struct pci_driver ixgbevf_driver = {
3531 .name = ixgbevf_driver_name, 3537 .name = ixgbevf_driver_name,
3532 .id_table = ixgbevf_pci_tbl, 3538 .id_table = ixgbevf_pci_tbl,
3533 .probe = ixgbevf_probe, 3539 .probe = ixgbevf_probe,
3534 .remove = __devexit_p(ixgbevf_remove), 3540 .remove = __devexit_p(ixgbevf_remove),
3535 .shutdown = ixgbevf_shutdown, 3541 .shutdown = ixgbevf_shutdown,
3536 }; 3542 };
3537 3543
3538 /** 3544 /**
3539 * ixgbevf_init_module - Driver Registration Routine 3545 * ixgbevf_init_module - Driver Registration Routine
3540 * 3546 *
3541 * ixgbevf_init_module is the first routine called when the driver is 3547 * ixgbevf_init_module is the first routine called when the driver is
3542 * loaded. All it does is register with the PCI subsystem. 3548 * loaded. All it does is register with the PCI subsystem.
3543 **/ 3549 **/
3544 static int __init ixgbevf_init_module(void) 3550 static int __init ixgbevf_init_module(void)
3545 { 3551 {
3546 int ret; 3552 int ret;
3547 pr_info("%s - version %s\n", ixgbevf_driver_string, 3553 pr_info("%s - version %s\n", ixgbevf_driver_string,
3548 ixgbevf_driver_version); 3554 ixgbevf_driver_version);
3549 3555
3550 pr_info("%s\n", ixgbevf_copyright); 3556 pr_info("%s\n", ixgbevf_copyright);
3551 3557
3552 ret = pci_register_driver(&ixgbevf_driver); 3558 ret = pci_register_driver(&ixgbevf_driver);
3553 return ret; 3559 return ret;
3554 } 3560 }
3555 3561
3556 module_init(ixgbevf_init_module); 3562 module_init(ixgbevf_init_module);
3557 3563
3558 /** 3564 /**
3559 * ixgbevf_exit_module - Driver Exit Cleanup Routine 3565 * ixgbevf_exit_module - Driver Exit Cleanup Routine
3560 * 3566 *
3561 * ixgbevf_exit_module is called just before the driver is removed 3567 * ixgbevf_exit_module is called just before the driver is removed
3562 * from memory. 3568 * from memory.
3563 **/ 3569 **/
3564 static void __exit ixgbevf_exit_module(void) 3570 static void __exit ixgbevf_exit_module(void)
3565 { 3571 {
3566 pci_unregister_driver(&ixgbevf_driver); 3572 pci_unregister_driver(&ixgbevf_driver);
3567 } 3573 }
3568 3574
3569 #ifdef DEBUG 3575 #ifdef DEBUG
3570 /** 3576 /**
3571 * ixgbevf_get_hw_dev_name - return device name string 3577 * ixgbevf_get_hw_dev_name - return device name string
3572 * used by hardware layer to print debugging information 3578 * used by hardware layer to print debugging information
3573 **/ 3579 **/
3574 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw) 3580 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
include/linux/etherdevice.h
1 /* 1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX 2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. NET is implemented using the BSD Socket 3 * operating system. NET is implemented using the BSD Socket
4 * interface as the means of communication with the user level. 4 * interface as the means of communication with the user level.
5 * 5 *
6 * Definitions for the Ethernet handlers. 6 * Definitions for the Ethernet handlers.
7 * 7 *
8 * Version: @(#)eth.h 1.0.4 05/13/93 8 * Version: @(#)eth.h 1.0.4 05/13/93
9 * 9 *
10 * Authors: Ross Biro 10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * 12 *
13 * Relocated to include/linux where it belongs by Alan Cox 13 * Relocated to include/linux where it belongs by Alan Cox
14 * <gw4pts@gw4pts.ampr.org> 14 * <gw4pts@gw4pts.ampr.org>
15 * 15 *
16 * This program is free software; you can redistribute it and/or 16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License 17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version 18 * as published by the Free Software Foundation; either version
19 * 2 of the License, or (at your option) any later version. 19 * 2 of the License, or (at your option) any later version.
20 * 20 *
21 * WARNING: This move may well be temporary. This file will get merged with others RSN. 21 * WARNING: This move may well be temporary. This file will get merged with others RSN.
22 * 22 *
23 */ 23 */
24 #ifndef _LINUX_ETHERDEVICE_H 24 #ifndef _LINUX_ETHERDEVICE_H
25 #define _LINUX_ETHERDEVICE_H 25 #define _LINUX_ETHERDEVICE_H
26 26
27 #include <linux/if_ether.h> 27 #include <linux/if_ether.h>
28 #include <linux/netdevice.h> 28 #include <linux/netdevice.h>
29 #include <linux/random.h> 29 #include <linux/random.h>
30 #include <asm/unaligned.h> 30 #include <asm/unaligned.h>
31 31
32 #ifdef __KERNEL__ 32 #ifdef __KERNEL__
33 extern __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); 33 extern __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
34 extern const struct header_ops eth_header_ops; 34 extern const struct header_ops eth_header_ops;
35 35
36 extern int eth_header(struct sk_buff *skb, struct net_device *dev, 36 extern int eth_header(struct sk_buff *skb, struct net_device *dev,
37 unsigned short type, 37 unsigned short type,
38 const void *daddr, const void *saddr, unsigned len); 38 const void *daddr, const void *saddr, unsigned len);
39 extern int eth_rebuild_header(struct sk_buff *skb); 39 extern int eth_rebuild_header(struct sk_buff *skb);
40 extern int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr); 40 extern int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
41 extern int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16 type); 41 extern int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
42 extern void eth_header_cache_update(struct hh_cache *hh, 42 extern void eth_header_cache_update(struct hh_cache *hh,
43 const struct net_device *dev, 43 const struct net_device *dev,
44 const unsigned char *haddr); 44 const unsigned char *haddr);
45 extern int eth_mac_addr(struct net_device *dev, void *p); 45 extern int eth_mac_addr(struct net_device *dev, void *p);
46 extern int eth_change_mtu(struct net_device *dev, int new_mtu); 46 extern int eth_change_mtu(struct net_device *dev, int new_mtu);
47 extern int eth_validate_addr(struct net_device *dev); 47 extern int eth_validate_addr(struct net_device *dev);
48 48
49 49
50 50
51 extern struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs, 51 extern struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
52 unsigned int rxqs); 52 unsigned int rxqs);
53 #define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1) 53 #define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1)
54 #define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count) 54 #define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count)
55 55
56 /** 56 /**
57 * is_zero_ether_addr - Determine if give Ethernet address is all zeros. 57 * is_zero_ether_addr - Determine if give Ethernet address is all zeros.
58 * @addr: Pointer to a six-byte array containing the Ethernet address 58 * @addr: Pointer to a six-byte array containing the Ethernet address
59 * 59 *
60 * Return true if the address is all zeroes. 60 * Return true if the address is all zeroes.
61 */ 61 */
62 static inline int is_zero_ether_addr(const u8 *addr) 62 static inline int is_zero_ether_addr(const u8 *addr)
63 { 63 {
64 return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]); 64 return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]);
65 } 65 }
66 66
67 /** 67 /**
68 * is_multicast_ether_addr - Determine if the Ethernet address is a multicast. 68 * is_multicast_ether_addr - Determine if the Ethernet address is a multicast.
69 * @addr: Pointer to a six-byte array containing the Ethernet address 69 * @addr: Pointer to a six-byte array containing the Ethernet address
70 * 70 *
71 * Return true if the address is a multicast address. 71 * Return true if the address is a multicast address.
72 * By definition the broadcast address is also a multicast address. 72 * By definition the broadcast address is also a multicast address.
73 */ 73 */
74 static inline int is_multicast_ether_addr(const u8 *addr) 74 static inline int is_multicast_ether_addr(const u8 *addr)
75 { 75 {
76 return 0x01 & addr[0]; 76 return 0x01 & addr[0];
77 } 77 }
78 78
79 /** 79 /**
80 * is_local_ether_addr - Determine if the Ethernet address is locally-assigned one (IEEE 802). 80 * is_local_ether_addr - Determine if the Ethernet address is locally-assigned one (IEEE 802).
81 * @addr: Pointer to a six-byte array containing the Ethernet address 81 * @addr: Pointer to a six-byte array containing the Ethernet address
82 * 82 *
83 * Return true if the address is a local address. 83 * Return true if the address is a local address.
84 */ 84 */
85 static inline int is_local_ether_addr(const u8 *addr) 85 static inline int is_local_ether_addr(const u8 *addr)
86 { 86 {
87 return 0x02 & addr[0]; 87 return 0x02 & addr[0];
88 } 88 }
89 89
90 /** 90 /**
91 * is_broadcast_ether_addr - Determine if the Ethernet address is broadcast 91 * is_broadcast_ether_addr - Determine if the Ethernet address is broadcast
92 * @addr: Pointer to a six-byte array containing the Ethernet address 92 * @addr: Pointer to a six-byte array containing the Ethernet address
93 * 93 *
94 * Return true if the address is the broadcast address. 94 * Return true if the address is the broadcast address.
95 */ 95 */
96 static inline int is_broadcast_ether_addr(const u8 *addr) 96 static inline int is_broadcast_ether_addr(const u8 *addr)
97 { 97 {
98 return (addr[0] & addr[1] & addr[2] & addr[3] & addr[4] & addr[5]) == 0xff; 98 return (addr[0] & addr[1] & addr[2] & addr[3] & addr[4] & addr[5]) == 0xff;
99 } 99 }
100 100
101 /** 101 /**
102 * is_unicast_ether_addr - Determine if the Ethernet address is unicast 102 * is_unicast_ether_addr - Determine if the Ethernet address is unicast
103 * @addr: Pointer to a six-byte array containing the Ethernet address 103 * @addr: Pointer to a six-byte array containing the Ethernet address
104 * 104 *
105 * Return true if the address is a unicast address. 105 * Return true if the address is a unicast address.
106 */ 106 */
107 static inline int is_unicast_ether_addr(const u8 *addr) 107 static inline int is_unicast_ether_addr(const u8 *addr)
108 { 108 {
109 return !is_multicast_ether_addr(addr); 109 return !is_multicast_ether_addr(addr);
110 } 110 }
111 111
112 /** 112 /**
113 * is_valid_ether_addr - Determine if the given Ethernet address is valid 113 * is_valid_ether_addr - Determine if the given Ethernet address is valid
114 * @addr: Pointer to a six-byte array containing the Ethernet address 114 * @addr: Pointer to a six-byte array containing the Ethernet address
115 * 115 *
116 * Check that the Ethernet address (MAC) is not 00:00:00:00:00:00, is not 116 * Check that the Ethernet address (MAC) is not 00:00:00:00:00:00, is not
117 * a multicast address, and is not FF:FF:FF:FF:FF:FF. 117 * a multicast address, and is not FF:FF:FF:FF:FF:FF.
118 * 118 *
119 * Return true if the address is valid. 119 * Return true if the address is valid.
120 */ 120 */
121 static inline int is_valid_ether_addr(const u8 *addr) 121 static inline int is_valid_ether_addr(const u8 *addr)
122 { 122 {
123 /* FF:FF:FF:FF:FF:FF is a multicast address so we don't need to 123 /* FF:FF:FF:FF:FF:FF is a multicast address so we don't need to
124 * explicitly check for it here. */ 124 * explicitly check for it here. */
125 return !is_multicast_ether_addr(addr) && !is_zero_ether_addr(addr); 125 return !is_multicast_ether_addr(addr) && !is_zero_ether_addr(addr);
126 } 126 }
127 127
128 /** 128 /**
129 * random_ether_addr - Generate software assigned random Ethernet address 129 * random_ether_addr - Generate software assigned random Ethernet address
130 * @addr: Pointer to a six-byte array containing the Ethernet address 130 * @addr: Pointer to a six-byte array containing the Ethernet address
131 * 131 *
132 * Generate a random Ethernet address (MAC) that is not multicast 132 * Generate a random Ethernet address (MAC) that is not multicast
133 * and has the local assigned bit set. 133 * and has the local assigned bit set.
134 */ 134 */
135 static inline void random_ether_addr(u8 *addr) 135 static inline void random_ether_addr(u8 *addr)
136 { 136 {
137 get_random_bytes (addr, ETH_ALEN); 137 get_random_bytes (addr, ETH_ALEN);
138 addr [0] &= 0xfe; /* clear multicast bit */ 138 addr [0] &= 0xfe; /* clear multicast bit */
139 addr [0] |= 0x02; /* set local assignment bit (IEEE802) */ 139 addr [0] |= 0x02; /* set local assignment bit (IEEE802) */
140 } 140 }
141 141
142 /** 142 /**
143 * dev_hw_addr_random - Create random MAC and set device flag 143 * eth_hw_addr_random - Generate software assigned random Ethernet and
144 * set device flag
144 * @dev: pointer to net_device structure 145 * @dev: pointer to net_device structure
145 * @hwaddr: Pointer to a six-byte array containing the Ethernet address
146 * 146 *
147 * Generate random MAC to be used by a device and set addr_assign_type 147 * Generate a random Ethernet address (MAC) to be used by a net device
148 * so the state can be read by sysfs and be used by udev. 148 * and set addr_assign_type so the state can be read by sysfs and be
149 * used by userspace.
149 */ 150 */
150 static inline void dev_hw_addr_random(struct net_device *dev, u8 *hwaddr) 151 static inline void eth_hw_addr_random(struct net_device *dev)
151 { 152 {
152 dev->addr_assign_type |= NET_ADDR_RANDOM; 153 dev->addr_assign_type |= NET_ADDR_RANDOM;
153 random_ether_addr(hwaddr); 154 random_ether_addr(dev->dev_addr);
154 } 155 }
155 156
156 /** 157 /**
157 * compare_ether_addr - Compare two Ethernet addresses 158 * compare_ether_addr - Compare two Ethernet addresses
158 * @addr1: Pointer to a six-byte array containing the Ethernet address 159 * @addr1: Pointer to a six-byte array containing the Ethernet address
159 * @addr2: Pointer other six-byte array containing the Ethernet address 160 * @addr2: Pointer other six-byte array containing the Ethernet address
160 * 161 *
161 * Compare two ethernet addresses, returns 0 if equal 162 * Compare two ethernet addresses, returns 0 if equal
162 */ 163 */
163 static inline unsigned compare_ether_addr(const u8 *addr1, const u8 *addr2) 164 static inline unsigned compare_ether_addr(const u8 *addr1, const u8 *addr2)
164 { 165 {
165 const u16 *a = (const u16 *) addr1; 166 const u16 *a = (const u16 *) addr1;
166 const u16 *b = (const u16 *) addr2; 167 const u16 *b = (const u16 *) addr2;
167 168
168 BUILD_BUG_ON(ETH_ALEN != 6); 169 BUILD_BUG_ON(ETH_ALEN != 6);
169 return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0; 170 return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0;
170 } 171 }
171 172
172 static inline unsigned long zap_last_2bytes(unsigned long value) 173 static inline unsigned long zap_last_2bytes(unsigned long value)
173 { 174 {
174 #ifdef __BIG_ENDIAN 175 #ifdef __BIG_ENDIAN
175 return value >> 16; 176 return value >> 16;
176 #else 177 #else
177 return value << 16; 178 return value << 16;
178 #endif 179 #endif
179 } 180 }
180 181
181 /** 182 /**
182 * compare_ether_addr_64bits - Compare two Ethernet addresses 183 * compare_ether_addr_64bits - Compare two Ethernet addresses
183 * @addr1: Pointer to an array of 8 bytes 184 * @addr1: Pointer to an array of 8 bytes
184 * @addr2: Pointer to an other array of 8 bytes 185 * @addr2: Pointer to an other array of 8 bytes
185 * 186 *
186 * Compare two ethernet addresses, returns 0 if equal. 187 * Compare two ethernet addresses, returns 0 if equal.
187 * Same result than "memcmp(addr1, addr2, ETH_ALEN)" but without conditional 188 * Same result than "memcmp(addr1, addr2, ETH_ALEN)" but without conditional
188 * branches, and possibly long word memory accesses on CPU allowing cheap 189 * branches, and possibly long word memory accesses on CPU allowing cheap
189 * unaligned memory reads. 190 * unaligned memory reads.
190 * arrays = { byte1, byte2, byte3, byte4, byte6, byte7, pad1, pad2} 191 * arrays = { byte1, byte2, byte3, byte4, byte6, byte7, pad1, pad2}
191 * 192 *
192 * Please note that alignment of addr1 & addr2 is only guaranted to be 16 bits. 193 * Please note that alignment of addr1 & addr2 is only guaranted to be 16 bits.
193 */ 194 */
194 195
195 static inline unsigned compare_ether_addr_64bits(const u8 addr1[6+2], 196 static inline unsigned compare_ether_addr_64bits(const u8 addr1[6+2],
196 const u8 addr2[6+2]) 197 const u8 addr2[6+2])
197 { 198 {
198 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 199 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
199 unsigned long fold = ((*(unsigned long *)addr1) ^ 200 unsigned long fold = ((*(unsigned long *)addr1) ^
200 (*(unsigned long *)addr2)); 201 (*(unsigned long *)addr2));
201 202
202 if (sizeof(fold) == 8) 203 if (sizeof(fold) == 8)
203 return zap_last_2bytes(fold) != 0; 204 return zap_last_2bytes(fold) != 0;
204 205
205 fold |= zap_last_2bytes((*(unsigned long *)(addr1 + 4)) ^ 206 fold |= zap_last_2bytes((*(unsigned long *)(addr1 + 4)) ^
206 (*(unsigned long *)(addr2 + 4))); 207 (*(unsigned long *)(addr2 + 4)));
207 return fold != 0; 208 return fold != 0;
208 #else 209 #else
209 return compare_ether_addr(addr1, addr2); 210 return compare_ether_addr(addr1, addr2);
210 #endif 211 #endif
211 } 212 }
212 213
213 /** 214 /**
214 * is_etherdev_addr - Tell if given Ethernet address belongs to the device. 215 * is_etherdev_addr - Tell if given Ethernet address belongs to the device.
215 * @dev: Pointer to a device structure 216 * @dev: Pointer to a device structure
216 * @addr: Pointer to a six-byte array containing the Ethernet address 217 * @addr: Pointer to a six-byte array containing the Ethernet address
217 * 218 *
218 * Compare passed address with all addresses of the device. Return true if the 219 * Compare passed address with all addresses of the device. Return true if the
219 * address if one of the device addresses. 220 * address if one of the device addresses.
220 * 221 *
221 * Note that this function calls compare_ether_addr_64bits() so take care of 222 * Note that this function calls compare_ether_addr_64bits() so take care of
222 * the right padding. 223 * the right padding.
223 */ 224 */
224 static inline bool is_etherdev_addr(const struct net_device *dev, 225 static inline bool is_etherdev_addr(const struct net_device *dev,
225 const u8 addr[6 + 2]) 226 const u8 addr[6 + 2])
226 { 227 {
227 struct netdev_hw_addr *ha; 228 struct netdev_hw_addr *ha;
228 int res = 1; 229 int res = 1;
229 230
230 rcu_read_lock(); 231 rcu_read_lock();
231 for_each_dev_addr(dev, ha) { 232 for_each_dev_addr(dev, ha) {
232 res = compare_ether_addr_64bits(addr, ha->addr); 233 res = compare_ether_addr_64bits(addr, ha->addr);
233 if (!res) 234 if (!res)
234 break; 235 break;
235 } 236 }
236 rcu_read_unlock(); 237 rcu_read_unlock();
237 return !res; 238 return !res;
238 } 239 }
239 #endif /* __KERNEL__ */ 240 #endif /* __KERNEL__ */
240 241
241 /** 242 /**
242 * compare_ether_header - Compare two Ethernet headers 243 * compare_ether_header - Compare two Ethernet headers
243 * @a: Pointer to Ethernet header 244 * @a: Pointer to Ethernet header
244 * @b: Pointer to Ethernet header 245 * @b: Pointer to Ethernet header
245 * 246 *
246 * Compare two ethernet headers, returns 0 if equal. 247 * Compare two ethernet headers, returns 0 if equal.
247 * This assumes that the network header (i.e., IP header) is 4-byte 248 * This assumes that the network header (i.e., IP header) is 4-byte
248 * aligned OR the platform can handle unaligned access. This is the 249 * aligned OR the platform can handle unaligned access. This is the
249 * case for all packets coming into netif_receive_skb or similar 250 * case for all packets coming into netif_receive_skb or similar
250 * entry points. 251 * entry points.
251 */ 252 */
252 253
253 static inline unsigned long compare_ether_header(const void *a, const void *b) 254 static inline unsigned long compare_ether_header(const void *a, const void *b)
254 { 255 {
255 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 256 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
256 unsigned long fold; 257 unsigned long fold;
257 258
258 /* 259 /*
259 * We want to compare 14 bytes: 260 * We want to compare 14 bytes:
260 * [a0 ... a13] ^ [b0 ... b13] 261 * [a0 ... a13] ^ [b0 ... b13]
261 * Use two long XOR, ORed together, with an overlap of two bytes. 262 * Use two long XOR, ORed together, with an overlap of two bytes.
262 * [a0 a1 a2 a3 a4 a5 a6 a7 ] ^ [b0 b1 b2 b3 b4 b5 b6 b7 ] | 263 * [a0 a1 a2 a3 a4 a5 a6 a7 ] ^ [b0 b1 b2 b3 b4 b5 b6 b7 ] |
263 * [a6 a7 a8 a9 a10 a11 a12 a13] ^ [b6 b7 b8 b9 b10 b11 b12 b13] 264 * [a6 a7 a8 a9 a10 a11 a12 a13] ^ [b6 b7 b8 b9 b10 b11 b12 b13]
264 * This means the [a6 a7] ^ [b6 b7] part is done two times. 265 * This means the [a6 a7] ^ [b6 b7] part is done two times.
265 */ 266 */
266 fold = *(unsigned long *)a ^ *(unsigned long *)b; 267 fold = *(unsigned long *)a ^ *(unsigned long *)b;
267 fold |= *(unsigned long *)(a + 6) ^ *(unsigned long *)(b + 6); 268 fold |= *(unsigned long *)(a + 6) ^ *(unsigned long *)(b + 6);
268 return fold; 269 return fold;
269 #else 270 #else
270 u32 *a32 = (u32 *)((u8 *)a + 2); 271 u32 *a32 = (u32 *)((u8 *)a + 2);
271 u32 *b32 = (u32 *)((u8 *)b + 2); 272 u32 *b32 = (u32 *)((u8 *)b + 2);
272 273
273 return (*(u16 *)a ^ *(u16 *)b) | (a32[0] ^ b32[0]) | 274 return (*(u16 *)a ^ *(u16 *)b) | (a32[0] ^ b32[0]) |
274 (a32[1] ^ b32[1]) | (a32[2] ^ b32[2]); 275 (a32[1] ^ b32[1]) | (a32[2] ^ b32[2]);
275 #endif 276 #endif
276 } 277 }
277 278
278 #endif /* _LINUX_ETHERDEVICE_H */ 279 #endif /* _LINUX_ETHERDEVICE_H */