Commit f629d208d27a22f495b7734eede585b5d207e912

Authored by Joe Perches
1 parent 7965bd4d71

[networking]device.h: Remove extern from function prototypes

There are a mix of function prototypes with and without extern
in the kernel sources.  Standardize on not using extern for
function prototypes.

Function prototypes don't need to be written with extern.
extern is assumed by the compiler.  Its use is as unnecessary as
using auto to declare automatic/local variables in a block.

Signed-off-by: Joe Perches <joe@perches.com>

Showing 6 changed files with 248 additions and 266 deletions Inline Diff

include/linux/etherdevice.h
1 /* 1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX 2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. NET is implemented using the BSD Socket 3 * operating system. NET is implemented using the BSD Socket
4 * interface as the means of communication with the user level. 4 * interface as the means of communication with the user level.
5 * 5 *
6 * Definitions for the Ethernet handlers. 6 * Definitions for the Ethernet handlers.
7 * 7 *
8 * Version: @(#)eth.h 1.0.4 05/13/93 8 * Version: @(#)eth.h 1.0.4 05/13/93
9 * 9 *
10 * Authors: Ross Biro 10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * 12 *
13 * Relocated to include/linux where it belongs by Alan Cox 13 * Relocated to include/linux where it belongs by Alan Cox
14 * <gw4pts@gw4pts.ampr.org> 14 * <gw4pts@gw4pts.ampr.org>
15 * 15 *
16 * This program is free software; you can redistribute it and/or 16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License 17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version 18 * as published by the Free Software Foundation; either version
19 * 2 of the License, or (at your option) any later version. 19 * 2 of the License, or (at your option) any later version.
20 * 20 *
21 */ 21 */
22 #ifndef _LINUX_ETHERDEVICE_H 22 #ifndef _LINUX_ETHERDEVICE_H
23 #define _LINUX_ETHERDEVICE_H 23 #define _LINUX_ETHERDEVICE_H
24 24
25 #include <linux/if_ether.h> 25 #include <linux/if_ether.h>
26 #include <linux/netdevice.h> 26 #include <linux/netdevice.h>
27 #include <linux/random.h> 27 #include <linux/random.h>
28 #include <asm/unaligned.h> 28 #include <asm/unaligned.h>
29 29
30 #ifdef __KERNEL__ 30 #ifdef __KERNEL__
31 extern __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev); 31 __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
32 extern const struct header_ops eth_header_ops; 32 extern const struct header_ops eth_header_ops;
33 33
34 extern int eth_header(struct sk_buff *skb, struct net_device *dev, 34 int eth_header(struct sk_buff *skb, struct net_device *dev, unsigned short type,
35 unsigned short type, 35 const void *daddr, const void *saddr, unsigned len);
36 const void *daddr, const void *saddr, unsigned len); 36 int eth_rebuild_header(struct sk_buff *skb);
37 extern int eth_rebuild_header(struct sk_buff *skb); 37 int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
38 extern int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr); 38 int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh,
39 extern int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16 type); 39 __be16 type);
40 extern void eth_header_cache_update(struct hh_cache *hh, 40 void eth_header_cache_update(struct hh_cache *hh, const struct net_device *dev,
41 const struct net_device *dev, 41 const unsigned char *haddr);
42 const unsigned char *haddr); 42 int eth_prepare_mac_addr_change(struct net_device *dev, void *p);
43 extern int eth_prepare_mac_addr_change(struct net_device *dev, void *p); 43 void eth_commit_mac_addr_change(struct net_device *dev, void *p);
44 extern void eth_commit_mac_addr_change(struct net_device *dev, void *p); 44 int eth_mac_addr(struct net_device *dev, void *p);
45 extern int eth_mac_addr(struct net_device *dev, void *p); 45 int eth_change_mtu(struct net_device *dev, int new_mtu);
46 extern int eth_change_mtu(struct net_device *dev, int new_mtu); 46 int eth_validate_addr(struct net_device *dev);
47 extern int eth_validate_addr(struct net_device *dev);
48 47
49 48 struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
50
51 extern struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
52 unsigned int rxqs); 49 unsigned int rxqs);
53 #define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1) 50 #define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1)
54 #define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count) 51 #define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count)
55 52
56 /* Reserved Ethernet Addresses per IEEE 802.1Q */ 53 /* Reserved Ethernet Addresses per IEEE 802.1Q */
57 static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) = 54 static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) =
58 { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; 55 { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
59 56
60 /** 57 /**
61 * is_link_local_ether_addr - Determine if given Ethernet address is link-local 58 * is_link_local_ether_addr - Determine if given Ethernet address is link-local
62 * @addr: Pointer to a six-byte array containing the Ethernet address 59 * @addr: Pointer to a six-byte array containing the Ethernet address
63 * 60 *
64 * Return true if address is link local reserved addr (01:80:c2:00:00:0X) per 61 * Return true if address is link local reserved addr (01:80:c2:00:00:0X) per
65 * IEEE 802.1Q 8.6.3 Frame filtering. 62 * IEEE 802.1Q 8.6.3 Frame filtering.
66 */ 63 */
67 static inline bool is_link_local_ether_addr(const u8 *addr) 64 static inline bool is_link_local_ether_addr(const u8 *addr)
68 { 65 {
69 __be16 *a = (__be16 *)addr; 66 __be16 *a = (__be16 *)addr;
70 static const __be16 *b = (const __be16 *)eth_reserved_addr_base; 67 static const __be16 *b = (const __be16 *)eth_reserved_addr_base;
71 static const __be16 m = cpu_to_be16(0xfff0); 68 static const __be16 m = cpu_to_be16(0xfff0);
72 69
73 return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0; 70 return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0;
74 } 71 }
75 72
76 /** 73 /**
77 * is_zero_ether_addr - Determine if give Ethernet address is all zeros. 74 * is_zero_ether_addr - Determine if give Ethernet address is all zeros.
78 * @addr: Pointer to a six-byte array containing the Ethernet address 75 * @addr: Pointer to a six-byte array containing the Ethernet address
79 * 76 *
80 * Return true if the address is all zeroes. 77 * Return true if the address is all zeroes.
81 */ 78 */
82 static inline bool is_zero_ether_addr(const u8 *addr) 79 static inline bool is_zero_ether_addr(const u8 *addr)
83 { 80 {
84 return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]); 81 return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]);
85 } 82 }
86 83
87 /** 84 /**
88 * is_multicast_ether_addr - Determine if the Ethernet address is a multicast. 85 * is_multicast_ether_addr - Determine if the Ethernet address is a multicast.
89 * @addr: Pointer to a six-byte array containing the Ethernet address 86 * @addr: Pointer to a six-byte array containing the Ethernet address
90 * 87 *
91 * Return true if the address is a multicast address. 88 * Return true if the address is a multicast address.
92 * By definition the broadcast address is also a multicast address. 89 * By definition the broadcast address is also a multicast address.
93 */ 90 */
94 static inline bool is_multicast_ether_addr(const u8 *addr) 91 static inline bool is_multicast_ether_addr(const u8 *addr)
95 { 92 {
96 return 0x01 & addr[0]; 93 return 0x01 & addr[0];
97 } 94 }
98 95
99 /** 96 /**
100 * is_local_ether_addr - Determine if the Ethernet address is locally-assigned one (IEEE 802). 97 * is_local_ether_addr - Determine if the Ethernet address is locally-assigned one (IEEE 802).
101 * @addr: Pointer to a six-byte array containing the Ethernet address 98 * @addr: Pointer to a six-byte array containing the Ethernet address
102 * 99 *
103 * Return true if the address is a local address. 100 * Return true if the address is a local address.
104 */ 101 */
105 static inline bool is_local_ether_addr(const u8 *addr) 102 static inline bool is_local_ether_addr(const u8 *addr)
106 { 103 {
107 return 0x02 & addr[0]; 104 return 0x02 & addr[0];
108 } 105 }
109 106
110 /** 107 /**
111 * is_broadcast_ether_addr - Determine if the Ethernet address is broadcast 108 * is_broadcast_ether_addr - Determine if the Ethernet address is broadcast
112 * @addr: Pointer to a six-byte array containing the Ethernet address 109 * @addr: Pointer to a six-byte array containing the Ethernet address
113 * 110 *
114 * Return true if the address is the broadcast address. 111 * Return true if the address is the broadcast address.
115 */ 112 */
116 static inline bool is_broadcast_ether_addr(const u8 *addr) 113 static inline bool is_broadcast_ether_addr(const u8 *addr)
117 { 114 {
118 return (addr[0] & addr[1] & addr[2] & addr[3] & addr[4] & addr[5]) == 0xff; 115 return (addr[0] & addr[1] & addr[2] & addr[3] & addr[4] & addr[5]) == 0xff;
119 } 116 }
120 117
121 /** 118 /**
122 * is_unicast_ether_addr - Determine if the Ethernet address is unicast 119 * is_unicast_ether_addr - Determine if the Ethernet address is unicast
123 * @addr: Pointer to a six-byte array containing the Ethernet address 120 * @addr: Pointer to a six-byte array containing the Ethernet address
124 * 121 *
125 * Return true if the address is a unicast address. 122 * Return true if the address is a unicast address.
126 */ 123 */
127 static inline bool is_unicast_ether_addr(const u8 *addr) 124 static inline bool is_unicast_ether_addr(const u8 *addr)
128 { 125 {
129 return !is_multicast_ether_addr(addr); 126 return !is_multicast_ether_addr(addr);
130 } 127 }
131 128
132 /** 129 /**
133 * is_valid_ether_addr - Determine if the given Ethernet address is valid 130 * is_valid_ether_addr - Determine if the given Ethernet address is valid
134 * @addr: Pointer to a six-byte array containing the Ethernet address 131 * @addr: Pointer to a six-byte array containing the Ethernet address
135 * 132 *
136 * Check that the Ethernet address (MAC) is not 00:00:00:00:00:00, is not 133 * Check that the Ethernet address (MAC) is not 00:00:00:00:00:00, is not
137 * a multicast address, and is not FF:FF:FF:FF:FF:FF. 134 * a multicast address, and is not FF:FF:FF:FF:FF:FF.
138 * 135 *
139 * Return true if the address is valid. 136 * Return true if the address is valid.
140 */ 137 */
141 static inline bool is_valid_ether_addr(const u8 *addr) 138 static inline bool is_valid_ether_addr(const u8 *addr)
142 { 139 {
143 /* FF:FF:FF:FF:FF:FF is a multicast address so we don't need to 140 /* FF:FF:FF:FF:FF:FF is a multicast address so we don't need to
144 * explicitly check for it here. */ 141 * explicitly check for it here. */
145 return !is_multicast_ether_addr(addr) && !is_zero_ether_addr(addr); 142 return !is_multicast_ether_addr(addr) && !is_zero_ether_addr(addr);
146 } 143 }
147 144
148 /** 145 /**
149 * eth_random_addr - Generate software assigned random Ethernet address 146 * eth_random_addr - Generate software assigned random Ethernet address
150 * @addr: Pointer to a six-byte array containing the Ethernet address 147 * @addr: Pointer to a six-byte array containing the Ethernet address
151 * 148 *
152 * Generate a random Ethernet address (MAC) that is not multicast 149 * Generate a random Ethernet address (MAC) that is not multicast
153 * and has the local assigned bit set. 150 * and has the local assigned bit set.
154 */ 151 */
155 static inline void eth_random_addr(u8 *addr) 152 static inline void eth_random_addr(u8 *addr)
156 { 153 {
157 get_random_bytes(addr, ETH_ALEN); 154 get_random_bytes(addr, ETH_ALEN);
158 addr[0] &= 0xfe; /* clear multicast bit */ 155 addr[0] &= 0xfe; /* clear multicast bit */
159 addr[0] |= 0x02; /* set local assignment bit (IEEE802) */ 156 addr[0] |= 0x02; /* set local assignment bit (IEEE802) */
160 } 157 }
161 158
162 #define random_ether_addr(addr) eth_random_addr(addr) 159 #define random_ether_addr(addr) eth_random_addr(addr)
163 160
164 /** 161 /**
165 * eth_broadcast_addr - Assign broadcast address 162 * eth_broadcast_addr - Assign broadcast address
166 * @addr: Pointer to a six-byte array containing the Ethernet address 163 * @addr: Pointer to a six-byte array containing the Ethernet address
167 * 164 *
168 * Assign the broadcast address to the given address array. 165 * Assign the broadcast address to the given address array.
169 */ 166 */
170 static inline void eth_broadcast_addr(u8 *addr) 167 static inline void eth_broadcast_addr(u8 *addr)
171 { 168 {
172 memset(addr, 0xff, ETH_ALEN); 169 memset(addr, 0xff, ETH_ALEN);
173 } 170 }
174 171
175 /** 172 /**
176 * eth_zero_addr - Assign zero address 173 * eth_zero_addr - Assign zero address
177 * @addr: Pointer to a six-byte array containing the Ethernet address 174 * @addr: Pointer to a six-byte array containing the Ethernet address
178 * 175 *
179 * Assign the zero address to the given address array. 176 * Assign the zero address to the given address array.
180 */ 177 */
181 static inline void eth_zero_addr(u8 *addr) 178 static inline void eth_zero_addr(u8 *addr)
182 { 179 {
183 memset(addr, 0x00, ETH_ALEN); 180 memset(addr, 0x00, ETH_ALEN);
184 } 181 }
185 182
186 /** 183 /**
187 * eth_hw_addr_random - Generate software assigned random Ethernet and 184 * eth_hw_addr_random - Generate software assigned random Ethernet and
188 * set device flag 185 * set device flag
189 * @dev: pointer to net_device structure 186 * @dev: pointer to net_device structure
190 * 187 *
191 * Generate a random Ethernet address (MAC) to be used by a net device 188 * Generate a random Ethernet address (MAC) to be used by a net device
192 * and set addr_assign_type so the state can be read by sysfs and be 189 * and set addr_assign_type so the state can be read by sysfs and be
193 * used by userspace. 190 * used by userspace.
194 */ 191 */
195 static inline void eth_hw_addr_random(struct net_device *dev) 192 static inline void eth_hw_addr_random(struct net_device *dev)
196 { 193 {
197 dev->addr_assign_type = NET_ADDR_RANDOM; 194 dev->addr_assign_type = NET_ADDR_RANDOM;
198 eth_random_addr(dev->dev_addr); 195 eth_random_addr(dev->dev_addr);
199 } 196 }
200 197
201 /** 198 /**
202 * eth_hw_addr_inherit - Copy dev_addr from another net_device 199 * eth_hw_addr_inherit - Copy dev_addr from another net_device
203 * @dst: pointer to net_device to copy dev_addr to 200 * @dst: pointer to net_device to copy dev_addr to
204 * @src: pointer to net_device to copy dev_addr from 201 * @src: pointer to net_device to copy dev_addr from
205 * 202 *
206 * Copy the Ethernet address from one net_device to another along with 203 * Copy the Ethernet address from one net_device to another along with
207 * the address attributes (addr_assign_type). 204 * the address attributes (addr_assign_type).
208 */ 205 */
209 static inline void eth_hw_addr_inherit(struct net_device *dst, 206 static inline void eth_hw_addr_inherit(struct net_device *dst,
210 struct net_device *src) 207 struct net_device *src)
211 { 208 {
212 dst->addr_assign_type = src->addr_assign_type; 209 dst->addr_assign_type = src->addr_assign_type;
213 memcpy(dst->dev_addr, src->dev_addr, ETH_ALEN); 210 memcpy(dst->dev_addr, src->dev_addr, ETH_ALEN);
214 } 211 }
215 212
216 /** 213 /**
217 * compare_ether_addr - Compare two Ethernet addresses 214 * compare_ether_addr - Compare two Ethernet addresses
218 * @addr1: Pointer to a six-byte array containing the Ethernet address 215 * @addr1: Pointer to a six-byte array containing the Ethernet address
219 * @addr2: Pointer other six-byte array containing the Ethernet address 216 * @addr2: Pointer other six-byte array containing the Ethernet address
220 * 217 *
221 * Compare two Ethernet addresses, returns 0 if equal, non-zero otherwise. 218 * Compare two Ethernet addresses, returns 0 if equal, non-zero otherwise.
222 * Unlike memcmp(), it doesn't return a value suitable for sorting. 219 * Unlike memcmp(), it doesn't return a value suitable for sorting.
223 */ 220 */
224 static inline unsigned compare_ether_addr(const u8 *addr1, const u8 *addr2) 221 static inline unsigned compare_ether_addr(const u8 *addr1, const u8 *addr2)
225 { 222 {
226 const u16 *a = (const u16 *) addr1; 223 const u16 *a = (const u16 *) addr1;
227 const u16 *b = (const u16 *) addr2; 224 const u16 *b = (const u16 *) addr2;
228 225
229 BUILD_BUG_ON(ETH_ALEN != 6); 226 BUILD_BUG_ON(ETH_ALEN != 6);
230 return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0; 227 return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0;
231 } 228 }
232 229
233 /** 230 /**
234 * ether_addr_equal - Compare two Ethernet addresses 231 * ether_addr_equal - Compare two Ethernet addresses
235 * @addr1: Pointer to a six-byte array containing the Ethernet address 232 * @addr1: Pointer to a six-byte array containing the Ethernet address
236 * @addr2: Pointer other six-byte array containing the Ethernet address 233 * @addr2: Pointer other six-byte array containing the Ethernet address
237 * 234 *
238 * Compare two Ethernet addresses, returns true if equal 235 * Compare two Ethernet addresses, returns true if equal
239 */ 236 */
240 static inline bool ether_addr_equal(const u8 *addr1, const u8 *addr2) 237 static inline bool ether_addr_equal(const u8 *addr1, const u8 *addr2)
241 { 238 {
242 return !compare_ether_addr(addr1, addr2); 239 return !compare_ether_addr(addr1, addr2);
243 } 240 }
244 241
245 static inline unsigned long zap_last_2bytes(unsigned long value) 242 static inline unsigned long zap_last_2bytes(unsigned long value)
246 { 243 {
247 #ifdef __BIG_ENDIAN 244 #ifdef __BIG_ENDIAN
248 return value >> 16; 245 return value >> 16;
249 #else 246 #else
250 return value << 16; 247 return value << 16;
251 #endif 248 #endif
252 } 249 }
253 250
254 /** 251 /**
255 * ether_addr_equal_64bits - Compare two Ethernet addresses 252 * ether_addr_equal_64bits - Compare two Ethernet addresses
256 * @addr1: Pointer to an array of 8 bytes 253 * @addr1: Pointer to an array of 8 bytes
257 * @addr2: Pointer to an other array of 8 bytes 254 * @addr2: Pointer to an other array of 8 bytes
258 * 255 *
259 * Compare two Ethernet addresses, returns true if equal, false otherwise. 256 * Compare two Ethernet addresses, returns true if equal, false otherwise.
260 * 257 *
261 * The function doesn't need any conditional branches and possibly uses 258 * The function doesn't need any conditional branches and possibly uses
262 * word memory accesses on CPU allowing cheap unaligned memory reads. 259 * word memory accesses on CPU allowing cheap unaligned memory reads.
263 * arrays = { byte1, byte2, byte3, byte4, byte5, byte6, pad1, pad2 } 260 * arrays = { byte1, byte2, byte3, byte4, byte5, byte6, pad1, pad2 }
264 * 261 *
265 * Please note that alignment of addr1 & addr2 are only guaranteed to be 16 bits. 262 * Please note that alignment of addr1 & addr2 are only guaranteed to be 16 bits.
266 */ 263 */
267 264
268 static inline bool ether_addr_equal_64bits(const u8 addr1[6+2], 265 static inline bool ether_addr_equal_64bits(const u8 addr1[6+2],
269 const u8 addr2[6+2]) 266 const u8 addr2[6+2])
270 { 267 {
271 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 268 #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
272 unsigned long fold = ((*(unsigned long *)addr1) ^ 269 unsigned long fold = ((*(unsigned long *)addr1) ^
273 (*(unsigned long *)addr2)); 270 (*(unsigned long *)addr2));
274 271
275 if (sizeof(fold) == 8) 272 if (sizeof(fold) == 8)
276 return zap_last_2bytes(fold) == 0; 273 return zap_last_2bytes(fold) == 0;
277 274
278 fold |= zap_last_2bytes((*(unsigned long *)(addr1 + 4)) ^ 275 fold |= zap_last_2bytes((*(unsigned long *)(addr1 + 4)) ^
279 (*(unsigned long *)(addr2 + 4))); 276 (*(unsigned long *)(addr2 + 4)));
280 return fold == 0; 277 return fold == 0;
281 #else 278 #else
282 return ether_addr_equal(addr1, addr2); 279 return ether_addr_equal(addr1, addr2);
283 #endif 280 #endif
284 } 281 }
285 282
286 /** 283 /**
287 * is_etherdev_addr - Tell if given Ethernet address belongs to the device. 284 * is_etherdev_addr - Tell if given Ethernet address belongs to the device.
288 * @dev: Pointer to a device structure 285 * @dev: Pointer to a device structure
289 * @addr: Pointer to a six-byte array containing the Ethernet address 286 * @addr: Pointer to a six-byte array containing the Ethernet address
290 * 287 *
291 * Compare passed address with all addresses of the device. Return true if the 288 * Compare passed address with all addresses of the device. Return true if the
292 * address if one of the device addresses. 289 * address if one of the device addresses.
293 * 290 *
294 * Note that this function calls ether_addr_equal_64bits() so take care of 291 * Note that this function calls ether_addr_equal_64bits() so take care of
295 * the right padding. 292 * the right padding.
296 */ 293 */
297 static inline bool is_etherdev_addr(const struct net_device *dev, 294 static inline bool is_etherdev_addr(const struct net_device *dev,
298 const u8 addr[6 + 2]) 295 const u8 addr[6 + 2])
299 { 296 {
300 struct netdev_hw_addr *ha; 297 struct netdev_hw_addr *ha;
301 bool res = false; 298 bool res = false;
302 299
303 rcu_read_lock(); 300 rcu_read_lock();
304 for_each_dev_addr(dev, ha) { 301 for_each_dev_addr(dev, ha) {
305 res = ether_addr_equal_64bits(addr, ha->addr); 302 res = ether_addr_equal_64bits(addr, ha->addr);
306 if (res) 303 if (res)
307 break; 304 break;
308 } 305 }
309 rcu_read_unlock(); 306 rcu_read_unlock();
310 return res; 307 return res;
311 } 308 }
312 #endif /* __KERNEL__ */ 309 #endif /* __KERNEL__ */
313 310
314 /** 311 /**
315 * compare_ether_header - Compare two Ethernet headers 312 * compare_ether_header - Compare two Ethernet headers
316 * @a: Pointer to Ethernet header 313 * @a: Pointer to Ethernet header
317 * @b: Pointer to Ethernet header 314 * @b: Pointer to Ethernet header
318 * 315 *
319 * Compare two Ethernet headers, returns 0 if equal. 316 * Compare two Ethernet headers, returns 0 if equal.
320 * This assumes that the network header (i.e., IP header) is 4-byte 317 * This assumes that the network header (i.e., IP header) is 4-byte
321 * aligned OR the platform can handle unaligned access. This is the 318 * aligned OR the platform can handle unaligned access. This is the
322 * case for all packets coming into netif_receive_skb or similar 319 * case for all packets coming into netif_receive_skb or similar
323 * entry points. 320 * entry points.
324 */ 321 */
325 322
326 static inline unsigned long compare_ether_header(const void *a, const void *b) 323 static inline unsigned long compare_ether_header(const void *a, const void *b)
327 { 324 {
328 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 325 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
329 unsigned long fold; 326 unsigned long fold;
330 327
331 /* 328 /*
332 * We want to compare 14 bytes: 329 * We want to compare 14 bytes:
333 * [a0 ... a13] ^ [b0 ... b13] 330 * [a0 ... a13] ^ [b0 ... b13]
334 * Use two long XOR, ORed together, with an overlap of two bytes. 331 * Use two long XOR, ORed together, with an overlap of two bytes.
335 * [a0 a1 a2 a3 a4 a5 a6 a7 ] ^ [b0 b1 b2 b3 b4 b5 b6 b7 ] | 332 * [a0 a1 a2 a3 a4 a5 a6 a7 ] ^ [b0 b1 b2 b3 b4 b5 b6 b7 ] |
336 * [a6 a7 a8 a9 a10 a11 a12 a13] ^ [b6 b7 b8 b9 b10 b11 b12 b13] 333 * [a6 a7 a8 a9 a10 a11 a12 a13] ^ [b6 b7 b8 b9 b10 b11 b12 b13]
337 * This means the [a6 a7] ^ [b6 b7] part is done two times. 334 * This means the [a6 a7] ^ [b6 b7] part is done two times.
338 */ 335 */
339 fold = *(unsigned long *)a ^ *(unsigned long *)b; 336 fold = *(unsigned long *)a ^ *(unsigned long *)b;
340 fold |= *(unsigned long *)(a + 6) ^ *(unsigned long *)(b + 6); 337 fold |= *(unsigned long *)(a + 6) ^ *(unsigned long *)(b + 6);
341 return fold; 338 return fold;
342 #else 339 #else
343 u32 *a32 = (u32 *)((u8 *)a + 2); 340 u32 *a32 = (u32 *)((u8 *)a + 2);
344 u32 *b32 = (u32 *)((u8 *)b + 2); 341 u32 *b32 = (u32 *)((u8 *)b + 2);
345 342
346 return (*(u16 *)a ^ *(u16 *)b) | (a32[0] ^ b32[0]) | 343 return (*(u16 *)a ^ *(u16 *)b) | (a32[0] ^ b32[0]) |
347 (a32[1] ^ b32[1]) | (a32[2] ^ b32[2]); 344 (a32[1] ^ b32[1]) | (a32[2] ^ b32[2]);
348 #endif 345 #endif
349 } 346 }
350 347
351 #endif /* _LINUX_ETHERDEVICE_H */ 348 #endif /* _LINUX_ETHERDEVICE_H */
352 349
include/linux/fcdevice.h
1 /* 1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX 2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. NET is implemented using the BSD Socket 3 * operating system. NET is implemented using the BSD Socket
4 * interface as the means of communication with the user level. 4 * interface as the means of communication with the user level.
5 * 5 *
6 * Definitions for the Fibre Channel handlers. 6 * Definitions for the Fibre Channel handlers.
7 * 7 *
8 * Version: @(#)fcdevice.h 1.0.0 09/26/98 8 * Version: @(#)fcdevice.h 1.0.0 09/26/98
9 * 9 *
10 * Authors: Vineet Abraham <vma@iol.unh.edu> 10 * Authors: Vineet Abraham <vma@iol.unh.edu>
11 * 11 *
12 * Relocated to include/linux where it belongs by Alan Cox 12 * Relocated to include/linux where it belongs by Alan Cox
13 * <gw4pts@gw4pts.ampr.org> 13 * <gw4pts@gw4pts.ampr.org>
14 * 14 *
15 * This program is free software; you can redistribute it and/or 15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License 16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version 17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version. 18 * 2 of the License, or (at your option) any later version.
19 * 19 *
20 * WARNING: This move may well be temporary. This file will get merged with others RSN. 20 * WARNING: This move may well be temporary. This file will get merged with others RSN.
21 * 21 *
22 */ 22 */
23 #ifndef _LINUX_FCDEVICE_H 23 #ifndef _LINUX_FCDEVICE_H
24 #define _LINUX_FCDEVICE_H 24 #define _LINUX_FCDEVICE_H
25 25
26 26
27 #include <linux/if_fc.h> 27 #include <linux/if_fc.h>
28 28
29 #ifdef __KERNEL__ 29 #ifdef __KERNEL__
30 extern struct net_device *alloc_fcdev(int sizeof_priv); 30 struct net_device *alloc_fcdev(int sizeof_priv);
31 #endif 31 #endif
32 32
33 #endif /* _LINUX_FCDEVICE_H */ 33 #endif /* _LINUX_FCDEVICE_H */
34 34
include/linux/fddidevice.h
1 /* 1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX 2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket 3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level. 4 * interface as the means of communication with the user level.
5 * 5 *
6 * Definitions for the FDDI handlers. 6 * Definitions for the FDDI handlers.
7 * 7 *
8 * Version: @(#)fddidevice.h 1.0.0 08/12/96 8 * Version: @(#)fddidevice.h 1.0.0 08/12/96
9 * 9 *
10 * Author: Lawrence V. Stefani, <stefani@lkg.dec.com> 10 * Author: Lawrence V. Stefani, <stefani@lkg.dec.com>
11 * 11 *
12 * fddidevice.h is based on previous trdevice.h work by 12 * fddidevice.h is based on previous trdevice.h work by
13 * Ross Biro 13 * Ross Biro
14 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 14 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org> 15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * 16 *
17 * This program is free software; you can redistribute it and/or 17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License 18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version 19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version. 20 * 2 of the License, or (at your option) any later version.
21 */ 21 */
22 #ifndef _LINUX_FDDIDEVICE_H 22 #ifndef _LINUX_FDDIDEVICE_H
23 #define _LINUX_FDDIDEVICE_H 23 #define _LINUX_FDDIDEVICE_H
24 24
25 #include <linux/if_fddi.h> 25 #include <linux/if_fddi.h>
26 26
27 #ifdef __KERNEL__ 27 #ifdef __KERNEL__
28 extern __be16 fddi_type_trans(struct sk_buff *skb, 28 __be16 fddi_type_trans(struct sk_buff *skb, struct net_device *dev);
29 struct net_device *dev); 29 int fddi_change_mtu(struct net_device *dev, int new_mtu);
30 extern int fddi_change_mtu(struct net_device *dev, int new_mtu); 30 struct net_device *alloc_fddidev(int sizeof_priv);
31 extern struct net_device *alloc_fddidev(int sizeof_priv);
32 #endif 31 #endif
33 32
34 #endif /* _LINUX_FDDIDEVICE_H */ 33 #endif /* _LINUX_FDDIDEVICE_H */
35 34
include/linux/hippidevice.h
1 /* 1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX 2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket 3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level. 4 * interface as the means of communication with the user level.
5 * 5 *
6 * Definitions for the HIPPI handlers. 6 * Definitions for the HIPPI handlers.
7 * 7 *
8 * Version: @(#)hippidevice.h 1.0.0 05/26/97 8 * Version: @(#)hippidevice.h 1.0.0 05/26/97
9 * 9 *
10 * Author: Jes Sorensen, <Jes.Sorensen@cern.ch> 10 * Author: Jes Sorensen, <Jes.Sorensen@cern.ch>
11 * 11 *
12 * hippidevice.h is based on previous fddidevice.h work by 12 * hippidevice.h is based on previous fddidevice.h work by
13 * Ross Biro 13 * Ross Biro
14 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 14 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org> 15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Lawrence V. Stefani, <stefani@lkg.dec.com> 16 * Lawrence V. Stefani, <stefani@lkg.dec.com>
17 * 17 *
18 * This program is free software; you can redistribute it and/or 18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License 19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version 20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version. 21 * 2 of the License, or (at your option) any later version.
22 */ 22 */
23 #ifndef _LINUX_HIPPIDEVICE_H 23 #ifndef _LINUX_HIPPIDEVICE_H
24 #define _LINUX_HIPPIDEVICE_H 24 #define _LINUX_HIPPIDEVICE_H
25 25
26 #include <linux/if_hippi.h> 26 #include <linux/if_hippi.h>
27 27
28 #ifdef __KERNEL__ 28 #ifdef __KERNEL__
29 29
30 struct hippi_cb { 30 struct hippi_cb {
31 __u32 ifield; 31 __u32 ifield;
32 }; 32 };
33 33
34 extern __be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev); 34 __be16 hippi_type_trans(struct sk_buff *skb, struct net_device *dev);
35 extern int hippi_change_mtu(struct net_device *dev, int new_mtu); 35 int hippi_change_mtu(struct net_device *dev, int new_mtu);
36 extern int hippi_mac_addr(struct net_device *dev, void *p); 36 int hippi_mac_addr(struct net_device *dev, void *p);
37 extern int hippi_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p); 37 int hippi_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p);
38 extern struct net_device *alloc_hippi_dev(int sizeof_priv); 38 struct net_device *alloc_hippi_dev(int sizeof_priv);
39 #endif 39 #endif
40 40
41 #endif /* _LINUX_HIPPIDEVICE_H */ 41 #endif /* _LINUX_HIPPIDEVICE_H */
42 42
include/linux/inetdevice.h
1 #ifndef _LINUX_INETDEVICE_H 1 #ifndef _LINUX_INETDEVICE_H
2 #define _LINUX_INETDEVICE_H 2 #define _LINUX_INETDEVICE_H
3 3
4 #ifdef __KERNEL__ 4 #ifdef __KERNEL__
5 5
6 #include <linux/bitmap.h> 6 #include <linux/bitmap.h>
7 #include <linux/if.h> 7 #include <linux/if.h>
8 #include <linux/ip.h> 8 #include <linux/ip.h>
9 #include <linux/netdevice.h> 9 #include <linux/netdevice.h>
10 #include <linux/rcupdate.h> 10 #include <linux/rcupdate.h>
11 #include <linux/timer.h> 11 #include <linux/timer.h>
12 #include <linux/sysctl.h> 12 #include <linux/sysctl.h>
13 #include <linux/rtnetlink.h> 13 #include <linux/rtnetlink.h>
14 14
15 struct ipv4_devconf { 15 struct ipv4_devconf {
16 void *sysctl; 16 void *sysctl;
17 int data[IPV4_DEVCONF_MAX]; 17 int data[IPV4_DEVCONF_MAX];
18 DECLARE_BITMAP(state, IPV4_DEVCONF_MAX); 18 DECLARE_BITMAP(state, IPV4_DEVCONF_MAX);
19 }; 19 };
20 20
21 #define MC_HASH_SZ_LOG 9 21 #define MC_HASH_SZ_LOG 9
22 22
23 struct in_device { 23 struct in_device {
24 struct net_device *dev; 24 struct net_device *dev;
25 atomic_t refcnt; 25 atomic_t refcnt;
26 int dead; 26 int dead;
27 struct in_ifaddr *ifa_list; /* IP ifaddr chain */ 27 struct in_ifaddr *ifa_list; /* IP ifaddr chain */
28 28
29 struct ip_mc_list __rcu *mc_list; /* IP multicast filter chain */ 29 struct ip_mc_list __rcu *mc_list; /* IP multicast filter chain */
30 struct ip_mc_list __rcu * __rcu *mc_hash; 30 struct ip_mc_list __rcu * __rcu *mc_hash;
31 31
32 int mc_count; /* Number of installed mcasts */ 32 int mc_count; /* Number of installed mcasts */
33 spinlock_t mc_tomb_lock; 33 spinlock_t mc_tomb_lock;
34 struct ip_mc_list *mc_tomb; 34 struct ip_mc_list *mc_tomb;
35 unsigned long mr_v1_seen; 35 unsigned long mr_v1_seen;
36 unsigned long mr_v2_seen; 36 unsigned long mr_v2_seen;
37 unsigned long mr_maxdelay; 37 unsigned long mr_maxdelay;
38 unsigned char mr_qrv; 38 unsigned char mr_qrv;
39 unsigned char mr_gq_running; 39 unsigned char mr_gq_running;
40 unsigned char mr_ifc_count; 40 unsigned char mr_ifc_count;
41 struct timer_list mr_gq_timer; /* general query timer */ 41 struct timer_list mr_gq_timer; /* general query timer */
42 struct timer_list mr_ifc_timer; /* interface change timer */ 42 struct timer_list mr_ifc_timer; /* interface change timer */
43 43
44 struct neigh_parms *arp_parms; 44 struct neigh_parms *arp_parms;
45 struct ipv4_devconf cnf; 45 struct ipv4_devconf cnf;
46 struct rcu_head rcu_head; 46 struct rcu_head rcu_head;
47 }; 47 };
48 48
49 #define IPV4_DEVCONF(cnf, attr) ((cnf).data[IPV4_DEVCONF_ ## attr - 1]) 49 #define IPV4_DEVCONF(cnf, attr) ((cnf).data[IPV4_DEVCONF_ ## attr - 1])
50 #define IPV4_DEVCONF_ALL(net, attr) \ 50 #define IPV4_DEVCONF_ALL(net, attr) \
51 IPV4_DEVCONF((*(net)->ipv4.devconf_all), attr) 51 IPV4_DEVCONF((*(net)->ipv4.devconf_all), attr)
52 52
53 static inline int ipv4_devconf_get(struct in_device *in_dev, int index) 53 static inline int ipv4_devconf_get(struct in_device *in_dev, int index)
54 { 54 {
55 index--; 55 index--;
56 return in_dev->cnf.data[index]; 56 return in_dev->cnf.data[index];
57 } 57 }
58 58
59 static inline void ipv4_devconf_set(struct in_device *in_dev, int index, 59 static inline void ipv4_devconf_set(struct in_device *in_dev, int index,
60 int val) 60 int val)
61 { 61 {
62 index--; 62 index--;
63 set_bit(index, in_dev->cnf.state); 63 set_bit(index, in_dev->cnf.state);
64 in_dev->cnf.data[index] = val; 64 in_dev->cnf.data[index] = val;
65 } 65 }
66 66
67 static inline void ipv4_devconf_setall(struct in_device *in_dev) 67 static inline void ipv4_devconf_setall(struct in_device *in_dev)
68 { 68 {
69 bitmap_fill(in_dev->cnf.state, IPV4_DEVCONF_MAX); 69 bitmap_fill(in_dev->cnf.state, IPV4_DEVCONF_MAX);
70 } 70 }
71 71
72 #define IN_DEV_CONF_GET(in_dev, attr) \ 72 #define IN_DEV_CONF_GET(in_dev, attr) \
73 ipv4_devconf_get((in_dev), IPV4_DEVCONF_ ## attr) 73 ipv4_devconf_get((in_dev), IPV4_DEVCONF_ ## attr)
74 #define IN_DEV_CONF_SET(in_dev, attr, val) \ 74 #define IN_DEV_CONF_SET(in_dev, attr, val) \
75 ipv4_devconf_set((in_dev), IPV4_DEVCONF_ ## attr, (val)) 75 ipv4_devconf_set((in_dev), IPV4_DEVCONF_ ## attr, (val))
76 76
77 #define IN_DEV_ANDCONF(in_dev, attr) \ 77 #define IN_DEV_ANDCONF(in_dev, attr) \
78 (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr) && \ 78 (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr) && \
79 IN_DEV_CONF_GET((in_dev), attr)) 79 IN_DEV_CONF_GET((in_dev), attr))
80 80
81 #define IN_DEV_NET_ORCONF(in_dev, net, attr) \ 81 #define IN_DEV_NET_ORCONF(in_dev, net, attr) \
82 (IPV4_DEVCONF_ALL(net, attr) || \ 82 (IPV4_DEVCONF_ALL(net, attr) || \
83 IN_DEV_CONF_GET((in_dev), attr)) 83 IN_DEV_CONF_GET((in_dev), attr))
84 84
85 #define IN_DEV_ORCONF(in_dev, attr) \ 85 #define IN_DEV_ORCONF(in_dev, attr) \
86 IN_DEV_NET_ORCONF(in_dev, dev_net(in_dev->dev), attr) 86 IN_DEV_NET_ORCONF(in_dev, dev_net(in_dev->dev), attr)
87 87
88 #define IN_DEV_MAXCONF(in_dev, attr) \ 88 #define IN_DEV_MAXCONF(in_dev, attr) \
89 (max(IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr), \ 89 (max(IPV4_DEVCONF_ALL(dev_net(in_dev->dev), attr), \
90 IN_DEV_CONF_GET((in_dev), attr))) 90 IN_DEV_CONF_GET((in_dev), attr)))
91 91
92 #define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING) 92 #define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING)
93 #define IN_DEV_MFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), MC_FORWARDING) 93 #define IN_DEV_MFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), MC_FORWARDING)
94 #define IN_DEV_RPFILTER(in_dev) IN_DEV_MAXCONF((in_dev), RP_FILTER) 94 #define IN_DEV_RPFILTER(in_dev) IN_DEV_MAXCONF((in_dev), RP_FILTER)
95 #define IN_DEV_SRC_VMARK(in_dev) IN_DEV_ORCONF((in_dev), SRC_VMARK) 95 #define IN_DEV_SRC_VMARK(in_dev) IN_DEV_ORCONF((in_dev), SRC_VMARK)
96 #define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \ 96 #define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \
97 ACCEPT_SOURCE_ROUTE) 97 ACCEPT_SOURCE_ROUTE)
98 #define IN_DEV_ACCEPT_LOCAL(in_dev) IN_DEV_ORCONF((in_dev), ACCEPT_LOCAL) 98 #define IN_DEV_ACCEPT_LOCAL(in_dev) IN_DEV_ORCONF((in_dev), ACCEPT_LOCAL)
99 #define IN_DEV_BOOTP_RELAY(in_dev) IN_DEV_ANDCONF((in_dev), BOOTP_RELAY) 99 #define IN_DEV_BOOTP_RELAY(in_dev) IN_DEV_ANDCONF((in_dev), BOOTP_RELAY)
100 100
101 #define IN_DEV_LOG_MARTIANS(in_dev) IN_DEV_ORCONF((in_dev), LOG_MARTIANS) 101 #define IN_DEV_LOG_MARTIANS(in_dev) IN_DEV_ORCONF((in_dev), LOG_MARTIANS)
102 #define IN_DEV_PROXY_ARP(in_dev) IN_DEV_ORCONF((in_dev), PROXY_ARP) 102 #define IN_DEV_PROXY_ARP(in_dev) IN_DEV_ORCONF((in_dev), PROXY_ARP)
103 #define IN_DEV_PROXY_ARP_PVLAN(in_dev) IN_DEV_CONF_GET(in_dev, PROXY_ARP_PVLAN) 103 #define IN_DEV_PROXY_ARP_PVLAN(in_dev) IN_DEV_CONF_GET(in_dev, PROXY_ARP_PVLAN)
104 #define IN_DEV_SHARED_MEDIA(in_dev) IN_DEV_ORCONF((in_dev), SHARED_MEDIA) 104 #define IN_DEV_SHARED_MEDIA(in_dev) IN_DEV_ORCONF((in_dev), SHARED_MEDIA)
105 #define IN_DEV_TX_REDIRECTS(in_dev) IN_DEV_ORCONF((in_dev), SEND_REDIRECTS) 105 #define IN_DEV_TX_REDIRECTS(in_dev) IN_DEV_ORCONF((in_dev), SEND_REDIRECTS)
106 #define IN_DEV_SEC_REDIRECTS(in_dev) IN_DEV_ORCONF((in_dev), \ 106 #define IN_DEV_SEC_REDIRECTS(in_dev) IN_DEV_ORCONF((in_dev), \
107 SECURE_REDIRECTS) 107 SECURE_REDIRECTS)
108 #define IN_DEV_IDTAG(in_dev) IN_DEV_CONF_GET(in_dev, TAG) 108 #define IN_DEV_IDTAG(in_dev) IN_DEV_CONF_GET(in_dev, TAG)
109 #define IN_DEV_MEDIUM_ID(in_dev) IN_DEV_CONF_GET(in_dev, MEDIUM_ID) 109 #define IN_DEV_MEDIUM_ID(in_dev) IN_DEV_CONF_GET(in_dev, MEDIUM_ID)
110 #define IN_DEV_PROMOTE_SECONDARIES(in_dev) \ 110 #define IN_DEV_PROMOTE_SECONDARIES(in_dev) \
111 IN_DEV_ORCONF((in_dev), \ 111 IN_DEV_ORCONF((in_dev), \
112 PROMOTE_SECONDARIES) 112 PROMOTE_SECONDARIES)
113 #define IN_DEV_ROUTE_LOCALNET(in_dev) IN_DEV_ORCONF(in_dev, ROUTE_LOCALNET) 113 #define IN_DEV_ROUTE_LOCALNET(in_dev) IN_DEV_ORCONF(in_dev, ROUTE_LOCALNET)
114 #define IN_DEV_NET_ROUTE_LOCALNET(in_dev, net) \ 114 #define IN_DEV_NET_ROUTE_LOCALNET(in_dev, net) \
115 IN_DEV_NET_ORCONF(in_dev, net, ROUTE_LOCALNET) 115 IN_DEV_NET_ORCONF(in_dev, net, ROUTE_LOCALNET)
116 116
117 #define IN_DEV_RX_REDIRECTS(in_dev) \ 117 #define IN_DEV_RX_REDIRECTS(in_dev) \
118 ((IN_DEV_FORWARD(in_dev) && \ 118 ((IN_DEV_FORWARD(in_dev) && \
119 IN_DEV_ANDCONF((in_dev), ACCEPT_REDIRECTS)) \ 119 IN_DEV_ANDCONF((in_dev), ACCEPT_REDIRECTS)) \
120 || (!IN_DEV_FORWARD(in_dev) && \ 120 || (!IN_DEV_FORWARD(in_dev) && \
121 IN_DEV_ORCONF((in_dev), ACCEPT_REDIRECTS))) 121 IN_DEV_ORCONF((in_dev), ACCEPT_REDIRECTS)))
122 122
123 #define IN_DEV_ARPFILTER(in_dev) IN_DEV_ORCONF((in_dev), ARPFILTER) 123 #define IN_DEV_ARPFILTER(in_dev) IN_DEV_ORCONF((in_dev), ARPFILTER)
124 #define IN_DEV_ARP_ACCEPT(in_dev) IN_DEV_ORCONF((in_dev), ARP_ACCEPT) 124 #define IN_DEV_ARP_ACCEPT(in_dev) IN_DEV_ORCONF((in_dev), ARP_ACCEPT)
125 #define IN_DEV_ARP_ANNOUNCE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_ANNOUNCE) 125 #define IN_DEV_ARP_ANNOUNCE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_ANNOUNCE)
126 #define IN_DEV_ARP_IGNORE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_IGNORE) 126 #define IN_DEV_ARP_IGNORE(in_dev) IN_DEV_MAXCONF((in_dev), ARP_IGNORE)
127 #define IN_DEV_ARP_NOTIFY(in_dev) IN_DEV_MAXCONF((in_dev), ARP_NOTIFY) 127 #define IN_DEV_ARP_NOTIFY(in_dev) IN_DEV_MAXCONF((in_dev), ARP_NOTIFY)
128 128
129 struct in_ifaddr { 129 struct in_ifaddr {
130 struct hlist_node hash; 130 struct hlist_node hash;
131 struct in_ifaddr *ifa_next; 131 struct in_ifaddr *ifa_next;
132 struct in_device *ifa_dev; 132 struct in_device *ifa_dev;
133 struct rcu_head rcu_head; 133 struct rcu_head rcu_head;
134 __be32 ifa_local; 134 __be32 ifa_local;
135 __be32 ifa_address; 135 __be32 ifa_address;
136 __be32 ifa_mask; 136 __be32 ifa_mask;
137 __be32 ifa_broadcast; 137 __be32 ifa_broadcast;
138 unsigned char ifa_scope; 138 unsigned char ifa_scope;
139 unsigned char ifa_flags; 139 unsigned char ifa_flags;
140 unsigned char ifa_prefixlen; 140 unsigned char ifa_prefixlen;
141 char ifa_label[IFNAMSIZ]; 141 char ifa_label[IFNAMSIZ];
142 142
143 /* In seconds, relative to tstamp. Expiry is at tstamp + HZ * lft. */ 143 /* In seconds, relative to tstamp. Expiry is at tstamp + HZ * lft. */
144 __u32 ifa_valid_lft; 144 __u32 ifa_valid_lft;
145 __u32 ifa_preferred_lft; 145 __u32 ifa_preferred_lft;
146 unsigned long ifa_cstamp; /* created timestamp */ 146 unsigned long ifa_cstamp; /* created timestamp */
147 unsigned long ifa_tstamp; /* updated timestamp */ 147 unsigned long ifa_tstamp; /* updated timestamp */
148 }; 148 };
149 149
150 extern int register_inetaddr_notifier(struct notifier_block *nb); 150 int register_inetaddr_notifier(struct notifier_block *nb);
151 extern int unregister_inetaddr_notifier(struct notifier_block *nb); 151 int unregister_inetaddr_notifier(struct notifier_block *nb);
152 152
153 extern void inet_netconf_notify_devconf(struct net *net, int type, int ifindex, 153 void inet_netconf_notify_devconf(struct net *net, int type, int ifindex,
154 struct ipv4_devconf *devconf); 154 struct ipv4_devconf *devconf);
155 155
156 extern struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref); 156 struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref);
157 static inline struct net_device *ip_dev_find(struct net *net, __be32 addr) 157 static inline struct net_device *ip_dev_find(struct net *net, __be32 addr)
158 { 158 {
159 return __ip_dev_find(net, addr, true); 159 return __ip_dev_find(net, addr, true);
160 } 160 }
161 161
162 extern int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b); 162 int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b);
163 extern int devinet_ioctl(struct net *net, unsigned int cmd, void __user *); 163 int devinet_ioctl(struct net *net, unsigned int cmd, void __user *);
164 extern void devinet_init(void); 164 void devinet_init(void);
165 extern struct in_device *inetdev_by_index(struct net *, int); 165 struct in_device *inetdev_by_index(struct net *, int);
166 extern __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope); 166 __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope);
167 extern __be32 inet_confirm_addr(struct in_device *in_dev, __be32 dst, __be32 local, int scope); 167 __be32 inet_confirm_addr(struct in_device *in_dev, __be32 dst, __be32 local,
168 extern struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix, __be32 mask); 168 int scope);
169 struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
170 __be32 mask);
169 171
170 static __inline__ int inet_ifa_match(__be32 addr, struct in_ifaddr *ifa) 172 static __inline__ int inet_ifa_match(__be32 addr, struct in_ifaddr *ifa)
171 { 173 {
172 return !((addr^ifa->ifa_address)&ifa->ifa_mask); 174 return !((addr^ifa->ifa_address)&ifa->ifa_mask);
173 } 175 }
174 176
175 /* 177 /*
176 * Check if a mask is acceptable. 178 * Check if a mask is acceptable.
177 */ 179 */
178 180
179 static __inline__ int bad_mask(__be32 mask, __be32 addr) 181 static __inline__ int bad_mask(__be32 mask, __be32 addr)
180 { 182 {
181 __u32 hmask; 183 __u32 hmask;
182 if (addr & (mask = ~mask)) 184 if (addr & (mask = ~mask))
183 return 1; 185 return 1;
184 hmask = ntohl(mask); 186 hmask = ntohl(mask);
185 if (hmask & (hmask+1)) 187 if (hmask & (hmask+1))
186 return 1; 188 return 1;
187 return 0; 189 return 0;
188 } 190 }
189 191
190 #define for_primary_ifa(in_dev) { struct in_ifaddr *ifa; \ 192 #define for_primary_ifa(in_dev) { struct in_ifaddr *ifa; \
191 for (ifa = (in_dev)->ifa_list; ifa && !(ifa->ifa_flags&IFA_F_SECONDARY); ifa = ifa->ifa_next) 193 for (ifa = (in_dev)->ifa_list; ifa && !(ifa->ifa_flags&IFA_F_SECONDARY); ifa = ifa->ifa_next)
192 194
193 #define for_ifa(in_dev) { struct in_ifaddr *ifa; \ 195 #define for_ifa(in_dev) { struct in_ifaddr *ifa; \
194 for (ifa = (in_dev)->ifa_list; ifa; ifa = ifa->ifa_next) 196 for (ifa = (in_dev)->ifa_list; ifa; ifa = ifa->ifa_next)
195 197
196 198
197 #define endfor_ifa(in_dev) } 199 #define endfor_ifa(in_dev) }
198 200
199 static inline struct in_device *__in_dev_get_rcu(const struct net_device *dev) 201 static inline struct in_device *__in_dev_get_rcu(const struct net_device *dev)
200 { 202 {
201 return rcu_dereference(dev->ip_ptr); 203 return rcu_dereference(dev->ip_ptr);
202 } 204 }
203 205
204 static inline struct in_device *in_dev_get(const struct net_device *dev) 206 static inline struct in_device *in_dev_get(const struct net_device *dev)
205 { 207 {
206 struct in_device *in_dev; 208 struct in_device *in_dev;
207 209
208 rcu_read_lock(); 210 rcu_read_lock();
209 in_dev = __in_dev_get_rcu(dev); 211 in_dev = __in_dev_get_rcu(dev);
210 if (in_dev) 212 if (in_dev)
211 atomic_inc(&in_dev->refcnt); 213 atomic_inc(&in_dev->refcnt);
212 rcu_read_unlock(); 214 rcu_read_unlock();
213 return in_dev; 215 return in_dev;
214 } 216 }
215 217
216 static inline struct in_device *__in_dev_get_rtnl(const struct net_device *dev) 218 static inline struct in_device *__in_dev_get_rtnl(const struct net_device *dev)
217 { 219 {
218 return rtnl_dereference(dev->ip_ptr); 220 return rtnl_dereference(dev->ip_ptr);
219 } 221 }
220 222
221 extern void in_dev_finish_destroy(struct in_device *idev); 223 void in_dev_finish_destroy(struct in_device *idev);
222 224
223 static inline void in_dev_put(struct in_device *idev) 225 static inline void in_dev_put(struct in_device *idev)
224 { 226 {
225 if (atomic_dec_and_test(&idev->refcnt)) 227 if (atomic_dec_and_test(&idev->refcnt))
226 in_dev_finish_destroy(idev); 228 in_dev_finish_destroy(idev);
227 } 229 }
228 230
229 #define __in_dev_put(idev) atomic_dec(&(idev)->refcnt) 231 #define __in_dev_put(idev) atomic_dec(&(idev)->refcnt)
230 #define in_dev_hold(idev) atomic_inc(&(idev)->refcnt) 232 #define in_dev_hold(idev) atomic_inc(&(idev)->refcnt)
231 233
232 #endif /* __KERNEL__ */ 234 #endif /* __KERNEL__ */
233 235
234 static __inline__ __be32 inet_make_mask(int logmask) 236 static __inline__ __be32 inet_make_mask(int logmask)
235 { 237 {
236 if (logmask) 238 if (logmask)
237 return htonl(~((1<<(32-logmask))-1)); 239 return htonl(~((1<<(32-logmask))-1));
238 return 0; 240 return 0;
239 } 241 }
240 242
241 static __inline__ int inet_mask_len(__be32 mask) 243 static __inline__ int inet_mask_len(__be32 mask)
242 { 244 {
243 __u32 hmask = ntohl(mask); 245 __u32 hmask = ntohl(mask);
244 if (!hmask) 246 if (!hmask)
245 return 0; 247 return 0;
246 return 32 - ffz(~hmask); 248 return 32 - ffz(~hmask);
247 } 249 }
248 250
249 251
250 #endif /* _LINUX_INETDEVICE_H */ 252 #endif /* _LINUX_INETDEVICE_H */
251 253
include/linux/netdevice.h
1 /* 1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX 2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket 3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level. 4 * interface as the means of communication with the user level.
5 * 5 *
6 * Definitions for the Interfaces handler. 6 * Definitions for the Interfaces handler.
7 * 7 *
8 * Version: @(#)dev.h 1.0.10 08/12/93 8 * Version: @(#)dev.h 1.0.10 08/12/93
9 * 9 *
10 * Authors: Ross Biro 10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net> 12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov> 13 * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
14 * Alan Cox, <alan@lxorguk.ukuu.org.uk> 14 * Alan Cox, <alan@lxorguk.ukuu.org.uk>
15 * Bjorn Ekwall. <bj0rn@blox.se> 15 * Bjorn Ekwall. <bj0rn@blox.se>
16 * Pekka Riikonen <priikone@poseidon.pspt.fi> 16 * Pekka Riikonen <priikone@poseidon.pspt.fi>
17 * 17 *
18 * This program is free software; you can redistribute it and/or 18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License 19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version 20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version. 21 * 2 of the License, or (at your option) any later version.
22 * 22 *
23 * Moved to /usr/include/linux for NET3 23 * Moved to /usr/include/linux for NET3
24 */ 24 */
25 #ifndef _LINUX_NETDEVICE_H 25 #ifndef _LINUX_NETDEVICE_H
26 #define _LINUX_NETDEVICE_H 26 #define _LINUX_NETDEVICE_H
27 27
28 #include <linux/pm_qos.h> 28 #include <linux/pm_qos.h>
29 #include <linux/timer.h> 29 #include <linux/timer.h>
30 #include <linux/bug.h> 30 #include <linux/bug.h>
31 #include <linux/delay.h> 31 #include <linux/delay.h>
32 #include <linux/atomic.h> 32 #include <linux/atomic.h>
33 #include <asm/cache.h> 33 #include <asm/cache.h>
34 #include <asm/byteorder.h> 34 #include <asm/byteorder.h>
35 35
36 #include <linux/percpu.h> 36 #include <linux/percpu.h>
37 #include <linux/rculist.h> 37 #include <linux/rculist.h>
38 #include <linux/dmaengine.h> 38 #include <linux/dmaengine.h>
39 #include <linux/workqueue.h> 39 #include <linux/workqueue.h>
40 #include <linux/dynamic_queue_limits.h> 40 #include <linux/dynamic_queue_limits.h>
41 41
42 #include <linux/ethtool.h> 42 #include <linux/ethtool.h>
43 #include <net/net_namespace.h> 43 #include <net/net_namespace.h>
44 #include <net/dsa.h> 44 #include <net/dsa.h>
45 #ifdef CONFIG_DCB 45 #ifdef CONFIG_DCB
46 #include <net/dcbnl.h> 46 #include <net/dcbnl.h>
47 #endif 47 #endif
48 #include <net/netprio_cgroup.h> 48 #include <net/netprio_cgroup.h>
49 49
50 #include <linux/netdev_features.h> 50 #include <linux/netdev_features.h>
51 #include <linux/neighbour.h> 51 #include <linux/neighbour.h>
52 #include <uapi/linux/netdevice.h> 52 #include <uapi/linux/netdevice.h>
53 53
54 struct netpoll_info; 54 struct netpoll_info;
55 struct device; 55 struct device;
56 struct phy_device; 56 struct phy_device;
57 /* 802.11 specific */ 57 /* 802.11 specific */
58 struct wireless_dev; 58 struct wireless_dev;
59 /* source back-compat hooks */ 59 /* source back-compat hooks */
60 #define SET_ETHTOOL_OPS(netdev,ops) \ 60 #define SET_ETHTOOL_OPS(netdev,ops) \
61 ( (netdev)->ethtool_ops = (ops) ) 61 ( (netdev)->ethtool_ops = (ops) )
62 62
63 extern void netdev_set_default_ethtool_ops(struct net_device *dev, 63 void netdev_set_default_ethtool_ops(struct net_device *dev,
64 const struct ethtool_ops *ops); 64 const struct ethtool_ops *ops);
65 65
66 /* hardware address assignment types */ 66 /* hardware address assignment types */
67 #define NET_ADDR_PERM 0 /* address is permanent (default) */ 67 #define NET_ADDR_PERM 0 /* address is permanent (default) */
68 #define NET_ADDR_RANDOM 1 /* address is generated randomly */ 68 #define NET_ADDR_RANDOM 1 /* address is generated randomly */
69 #define NET_ADDR_STOLEN 2 /* address is stolen from other device */ 69 #define NET_ADDR_STOLEN 2 /* address is stolen from other device */
70 #define NET_ADDR_SET 3 /* address is set using 70 #define NET_ADDR_SET 3 /* address is set using
71 * dev_set_mac_address() */ 71 * dev_set_mac_address() */
72 72
73 /* Backlog congestion levels */ 73 /* Backlog congestion levels */
74 #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ 74 #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
75 #define NET_RX_DROP 1 /* packet dropped */ 75 #define NET_RX_DROP 1 /* packet dropped */
76 76
77 /* 77 /*
78 * Transmit return codes: transmit return codes originate from three different 78 * Transmit return codes: transmit return codes originate from three different
79 * namespaces: 79 * namespaces:
80 * 80 *
81 * - qdisc return codes 81 * - qdisc return codes
82 * - driver transmit return codes 82 * - driver transmit return codes
83 * - errno values 83 * - errno values
84 * 84 *
85 * Drivers are allowed to return any one of those in their hard_start_xmit() 85 * Drivers are allowed to return any one of those in their hard_start_xmit()
86 * function. Real network devices commonly used with qdiscs should only return 86 * function. Real network devices commonly used with qdiscs should only return
87 * the driver transmit return codes though - when qdiscs are used, the actual 87 * the driver transmit return codes though - when qdiscs are used, the actual
88 * transmission happens asynchronously, so the value is not propagated to 88 * transmission happens asynchronously, so the value is not propagated to
89 * higher layers. Virtual network devices transmit synchronously, in this case 89 * higher layers. Virtual network devices transmit synchronously, in this case
90 * the driver transmit return codes are consumed by dev_queue_xmit(), all 90 * the driver transmit return codes are consumed by dev_queue_xmit(), all
91 * others are propagated to higher layers. 91 * others are propagated to higher layers.
92 */ 92 */
93 93
94 /* qdisc ->enqueue() return codes. */ 94 /* qdisc ->enqueue() return codes. */
95 #define NET_XMIT_SUCCESS 0x00 95 #define NET_XMIT_SUCCESS 0x00
96 #define NET_XMIT_DROP 0x01 /* skb dropped */ 96 #define NET_XMIT_DROP 0x01 /* skb dropped */
97 #define NET_XMIT_CN 0x02 /* congestion notification */ 97 #define NET_XMIT_CN 0x02 /* congestion notification */
98 #define NET_XMIT_POLICED 0x03 /* skb is shot by police */ 98 #define NET_XMIT_POLICED 0x03 /* skb is shot by police */
99 #define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */ 99 #define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */
100 100
101 /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It 101 /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
102 * indicates that the device will soon be dropping packets, or already drops 102 * indicates that the device will soon be dropping packets, or already drops
103 * some packets of the same priority; prompting us to send less aggressively. */ 103 * some packets of the same priority; prompting us to send less aggressively. */
104 #define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e)) 104 #define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
105 #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0) 105 #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
106 106
107 /* Driver transmit return codes */ 107 /* Driver transmit return codes */
108 #define NETDEV_TX_MASK 0xf0 108 #define NETDEV_TX_MASK 0xf0
109 109
110 enum netdev_tx { 110 enum netdev_tx {
111 __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */ 111 __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */
112 NETDEV_TX_OK = 0x00, /* driver took care of packet */ 112 NETDEV_TX_OK = 0x00, /* driver took care of packet */
113 NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/ 113 NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/
114 NETDEV_TX_LOCKED = 0x20, /* driver tx lock was already taken */ 114 NETDEV_TX_LOCKED = 0x20, /* driver tx lock was already taken */
115 }; 115 };
116 typedef enum netdev_tx netdev_tx_t; 116 typedef enum netdev_tx netdev_tx_t;
117 117
118 /* 118 /*
119 * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant; 119 * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
120 * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed. 120 * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
121 */ 121 */
122 static inline bool dev_xmit_complete(int rc) 122 static inline bool dev_xmit_complete(int rc)
123 { 123 {
124 /* 124 /*
125 * Positive cases with an skb consumed by a driver: 125 * Positive cases with an skb consumed by a driver:
126 * - successful transmission (rc == NETDEV_TX_OK) 126 * - successful transmission (rc == NETDEV_TX_OK)
127 * - error while transmitting (rc < 0) 127 * - error while transmitting (rc < 0)
128 * - error while queueing to a different device (rc & NET_XMIT_MASK) 128 * - error while queueing to a different device (rc & NET_XMIT_MASK)
129 */ 129 */
130 if (likely(rc < NET_XMIT_MASK)) 130 if (likely(rc < NET_XMIT_MASK))
131 return true; 131 return true;
132 132
133 return false; 133 return false;
134 } 134 }
135 135
136 /* 136 /*
137 * Compute the worst case header length according to the protocols 137 * Compute the worst case header length according to the protocols
138 * used. 138 * used.
139 */ 139 */
140 140
141 #if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25) 141 #if defined(CONFIG_WLAN) || IS_ENABLED(CONFIG_AX25)
142 # if defined(CONFIG_MAC80211_MESH) 142 # if defined(CONFIG_MAC80211_MESH)
143 # define LL_MAX_HEADER 128 143 # define LL_MAX_HEADER 128
144 # else 144 # else
145 # define LL_MAX_HEADER 96 145 # define LL_MAX_HEADER 96
146 # endif 146 # endif
147 #else 147 #else
148 # define LL_MAX_HEADER 32 148 # define LL_MAX_HEADER 32
149 #endif 149 #endif
150 150
151 #if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \ 151 #if !IS_ENABLED(CONFIG_NET_IPIP) && !IS_ENABLED(CONFIG_NET_IPGRE) && \
152 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL) 152 !IS_ENABLED(CONFIG_IPV6_SIT) && !IS_ENABLED(CONFIG_IPV6_TUNNEL)
153 #define MAX_HEADER LL_MAX_HEADER 153 #define MAX_HEADER LL_MAX_HEADER
154 #else 154 #else
155 #define MAX_HEADER (LL_MAX_HEADER + 48) 155 #define MAX_HEADER (LL_MAX_HEADER + 48)
156 #endif 156 #endif
157 157
158 /* 158 /*
159 * Old network device statistics. Fields are native words 159 * Old network device statistics. Fields are native words
160 * (unsigned long) so they can be read and written atomically. 160 * (unsigned long) so they can be read and written atomically.
161 */ 161 */
162 162
163 struct net_device_stats { 163 struct net_device_stats {
164 unsigned long rx_packets; 164 unsigned long rx_packets;
165 unsigned long tx_packets; 165 unsigned long tx_packets;
166 unsigned long rx_bytes; 166 unsigned long rx_bytes;
167 unsigned long tx_bytes; 167 unsigned long tx_bytes;
168 unsigned long rx_errors; 168 unsigned long rx_errors;
169 unsigned long tx_errors; 169 unsigned long tx_errors;
170 unsigned long rx_dropped; 170 unsigned long rx_dropped;
171 unsigned long tx_dropped; 171 unsigned long tx_dropped;
172 unsigned long multicast; 172 unsigned long multicast;
173 unsigned long collisions; 173 unsigned long collisions;
174 unsigned long rx_length_errors; 174 unsigned long rx_length_errors;
175 unsigned long rx_over_errors; 175 unsigned long rx_over_errors;
176 unsigned long rx_crc_errors; 176 unsigned long rx_crc_errors;
177 unsigned long rx_frame_errors; 177 unsigned long rx_frame_errors;
178 unsigned long rx_fifo_errors; 178 unsigned long rx_fifo_errors;
179 unsigned long rx_missed_errors; 179 unsigned long rx_missed_errors;
180 unsigned long tx_aborted_errors; 180 unsigned long tx_aborted_errors;
181 unsigned long tx_carrier_errors; 181 unsigned long tx_carrier_errors;
182 unsigned long tx_fifo_errors; 182 unsigned long tx_fifo_errors;
183 unsigned long tx_heartbeat_errors; 183 unsigned long tx_heartbeat_errors;
184 unsigned long tx_window_errors; 184 unsigned long tx_window_errors;
185 unsigned long rx_compressed; 185 unsigned long rx_compressed;
186 unsigned long tx_compressed; 186 unsigned long tx_compressed;
187 }; 187 };
188 188
189 189
190 #include <linux/cache.h> 190 #include <linux/cache.h>
191 #include <linux/skbuff.h> 191 #include <linux/skbuff.h>
192 192
193 #ifdef CONFIG_RPS 193 #ifdef CONFIG_RPS
194 #include <linux/static_key.h> 194 #include <linux/static_key.h>
195 extern struct static_key rps_needed; 195 extern struct static_key rps_needed;
196 #endif 196 #endif
197 197
198 struct neighbour; 198 struct neighbour;
199 struct neigh_parms; 199 struct neigh_parms;
200 struct sk_buff; 200 struct sk_buff;
201 201
202 struct netdev_hw_addr { 202 struct netdev_hw_addr {
203 struct list_head list; 203 struct list_head list;
204 unsigned char addr[MAX_ADDR_LEN]; 204 unsigned char addr[MAX_ADDR_LEN];
205 unsigned char type; 205 unsigned char type;
206 #define NETDEV_HW_ADDR_T_LAN 1 206 #define NETDEV_HW_ADDR_T_LAN 1
207 #define NETDEV_HW_ADDR_T_SAN 2 207 #define NETDEV_HW_ADDR_T_SAN 2
208 #define NETDEV_HW_ADDR_T_SLAVE 3 208 #define NETDEV_HW_ADDR_T_SLAVE 3
209 #define NETDEV_HW_ADDR_T_UNICAST 4 209 #define NETDEV_HW_ADDR_T_UNICAST 4
210 #define NETDEV_HW_ADDR_T_MULTICAST 5 210 #define NETDEV_HW_ADDR_T_MULTICAST 5
211 bool global_use; 211 bool global_use;
212 int sync_cnt; 212 int sync_cnt;
213 int refcount; 213 int refcount;
214 int synced; 214 int synced;
215 struct rcu_head rcu_head; 215 struct rcu_head rcu_head;
216 }; 216 };
217 217
218 struct netdev_hw_addr_list { 218 struct netdev_hw_addr_list {
219 struct list_head list; 219 struct list_head list;
220 int count; 220 int count;
221 }; 221 };
222 222
223 #define netdev_hw_addr_list_count(l) ((l)->count) 223 #define netdev_hw_addr_list_count(l) ((l)->count)
224 #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0) 224 #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
225 #define netdev_hw_addr_list_for_each(ha, l) \ 225 #define netdev_hw_addr_list_for_each(ha, l) \
226 list_for_each_entry(ha, &(l)->list, list) 226 list_for_each_entry(ha, &(l)->list, list)
227 227
228 #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc) 228 #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
229 #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc) 229 #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
230 #define netdev_for_each_uc_addr(ha, dev) \ 230 #define netdev_for_each_uc_addr(ha, dev) \
231 netdev_hw_addr_list_for_each(ha, &(dev)->uc) 231 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
232 232
233 #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc) 233 #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
234 #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc) 234 #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
235 #define netdev_for_each_mc_addr(ha, dev) \ 235 #define netdev_for_each_mc_addr(ha, dev) \
236 netdev_hw_addr_list_for_each(ha, &(dev)->mc) 236 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
237 237
238 struct hh_cache { 238 struct hh_cache {
239 u16 hh_len; 239 u16 hh_len;
240 u16 __pad; 240 u16 __pad;
241 seqlock_t hh_lock; 241 seqlock_t hh_lock;
242 242
243 /* cached hardware header; allow for machine alignment needs. */ 243 /* cached hardware header; allow for machine alignment needs. */
244 #define HH_DATA_MOD 16 244 #define HH_DATA_MOD 16
245 #define HH_DATA_OFF(__len) \ 245 #define HH_DATA_OFF(__len) \
246 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1)) 246 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
247 #define HH_DATA_ALIGN(__len) \ 247 #define HH_DATA_ALIGN(__len) \
248 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1)) 248 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
249 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)]; 249 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
250 }; 250 };
251 251
252 /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much. 252 /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
253 * Alternative is: 253 * Alternative is:
254 * dev->hard_header_len ? (dev->hard_header_len + 254 * dev->hard_header_len ? (dev->hard_header_len +
255 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0 255 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
256 * 256 *
257 * We could use other alignment values, but we must maintain the 257 * We could use other alignment values, but we must maintain the
258 * relationship HH alignment <= LL alignment. 258 * relationship HH alignment <= LL alignment.
259 */ 259 */
260 #define LL_RESERVED_SPACE(dev) \ 260 #define LL_RESERVED_SPACE(dev) \
261 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) 261 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
262 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \ 262 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
263 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) 263 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
264 264
265 struct header_ops { 265 struct header_ops {
266 int (*create) (struct sk_buff *skb, struct net_device *dev, 266 int (*create) (struct sk_buff *skb, struct net_device *dev,
267 unsigned short type, const void *daddr, 267 unsigned short type, const void *daddr,
268 const void *saddr, unsigned int len); 268 const void *saddr, unsigned int len);
269 int (*parse)(const struct sk_buff *skb, unsigned char *haddr); 269 int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
270 int (*rebuild)(struct sk_buff *skb); 270 int (*rebuild)(struct sk_buff *skb);
271 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type); 271 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
272 void (*cache_update)(struct hh_cache *hh, 272 void (*cache_update)(struct hh_cache *hh,
273 const struct net_device *dev, 273 const struct net_device *dev,
274 const unsigned char *haddr); 274 const unsigned char *haddr);
275 }; 275 };
276 276
277 /* These flag bits are private to the generic network queueing 277 /* These flag bits are private to the generic network queueing
278 * layer, they may not be explicitly referenced by any other 278 * layer, they may not be explicitly referenced by any other
279 * code. 279 * code.
280 */ 280 */
281 281
282 enum netdev_state_t { 282 enum netdev_state_t {
283 __LINK_STATE_START, 283 __LINK_STATE_START,
284 __LINK_STATE_PRESENT, 284 __LINK_STATE_PRESENT,
285 __LINK_STATE_NOCARRIER, 285 __LINK_STATE_NOCARRIER,
286 __LINK_STATE_LINKWATCH_PENDING, 286 __LINK_STATE_LINKWATCH_PENDING,
287 __LINK_STATE_DORMANT, 287 __LINK_STATE_DORMANT,
288 }; 288 };
289 289
290 290
291 /* 291 /*
292 * This structure holds at boot time configured netdevice settings. They 292 * This structure holds at boot time configured netdevice settings. They
293 * are then used in the device probing. 293 * are then used in the device probing.
294 */ 294 */
295 struct netdev_boot_setup { 295 struct netdev_boot_setup {
296 char name[IFNAMSIZ]; 296 char name[IFNAMSIZ];
297 struct ifmap map; 297 struct ifmap map;
298 }; 298 };
299 #define NETDEV_BOOT_SETUP_MAX 8 299 #define NETDEV_BOOT_SETUP_MAX 8
300 300
301 extern int __init netdev_boot_setup(char *str); 301 int __init netdev_boot_setup(char *str);
302 302
303 /* 303 /*
304 * Structure for NAPI scheduling similar to tasklet but with weighting 304 * Structure for NAPI scheduling similar to tasklet but with weighting
305 */ 305 */
306 struct napi_struct { 306 struct napi_struct {
307 /* The poll_list must only be managed by the entity which 307 /* The poll_list must only be managed by the entity which
308 * changes the state of the NAPI_STATE_SCHED bit. This means 308 * changes the state of the NAPI_STATE_SCHED bit. This means
309 * whoever atomically sets that bit can add this napi_struct 309 * whoever atomically sets that bit can add this napi_struct
310 * to the per-cpu poll_list, and whoever clears that bit 310 * to the per-cpu poll_list, and whoever clears that bit
311 * can remove from the list right before clearing the bit. 311 * can remove from the list right before clearing the bit.
312 */ 312 */
313 struct list_head poll_list; 313 struct list_head poll_list;
314 314
315 unsigned long state; 315 unsigned long state;
316 int weight; 316 int weight;
317 unsigned int gro_count; 317 unsigned int gro_count;
318 int (*poll)(struct napi_struct *, int); 318 int (*poll)(struct napi_struct *, int);
319 #ifdef CONFIG_NETPOLL 319 #ifdef CONFIG_NETPOLL
320 spinlock_t poll_lock; 320 spinlock_t poll_lock;
321 int poll_owner; 321 int poll_owner;
322 #endif 322 #endif
323 struct net_device *dev; 323 struct net_device *dev;
324 struct sk_buff *gro_list; 324 struct sk_buff *gro_list;
325 struct sk_buff *skb; 325 struct sk_buff *skb;
326 struct list_head dev_list; 326 struct list_head dev_list;
327 struct hlist_node napi_hash_node; 327 struct hlist_node napi_hash_node;
328 unsigned int napi_id; 328 unsigned int napi_id;
329 }; 329 };
330 330
331 enum { 331 enum {
332 NAPI_STATE_SCHED, /* Poll is scheduled */ 332 NAPI_STATE_SCHED, /* Poll is scheduled */
333 NAPI_STATE_DISABLE, /* Disable pending */ 333 NAPI_STATE_DISABLE, /* Disable pending */
334 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ 334 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
335 NAPI_STATE_HASHED, /* In NAPI hash */ 335 NAPI_STATE_HASHED, /* In NAPI hash */
336 }; 336 };
337 337
338 enum gro_result { 338 enum gro_result {
339 GRO_MERGED, 339 GRO_MERGED,
340 GRO_MERGED_FREE, 340 GRO_MERGED_FREE,
341 GRO_HELD, 341 GRO_HELD,
342 GRO_NORMAL, 342 GRO_NORMAL,
343 GRO_DROP, 343 GRO_DROP,
344 }; 344 };
345 typedef enum gro_result gro_result_t; 345 typedef enum gro_result gro_result_t;
346 346
347 /* 347 /*
348 * enum rx_handler_result - Possible return values for rx_handlers. 348 * enum rx_handler_result - Possible return values for rx_handlers.
349 * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it 349 * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it
350 * further. 350 * further.
351 * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in 351 * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
352 * case skb->dev was changed by rx_handler. 352 * case skb->dev was changed by rx_handler.
353 * @RX_HANDLER_EXACT: Force exact delivery, no wildcard. 353 * @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
354 * @RX_HANDLER_PASS: Do nothing, passe the skb as if no rx_handler was called. 354 * @RX_HANDLER_PASS: Do nothing, passe the skb as if no rx_handler was called.
355 * 355 *
356 * rx_handlers are functions called from inside __netif_receive_skb(), to do 356 * rx_handlers are functions called from inside __netif_receive_skb(), to do
357 * special processing of the skb, prior to delivery to protocol handlers. 357 * special processing of the skb, prior to delivery to protocol handlers.
358 * 358 *
359 * Currently, a net_device can only have a single rx_handler registered. Trying 359 * Currently, a net_device can only have a single rx_handler registered. Trying
360 * to register a second rx_handler will return -EBUSY. 360 * to register a second rx_handler will return -EBUSY.
361 * 361 *
362 * To register a rx_handler on a net_device, use netdev_rx_handler_register(). 362 * To register a rx_handler on a net_device, use netdev_rx_handler_register().
363 * To unregister a rx_handler on a net_device, use 363 * To unregister a rx_handler on a net_device, use
364 * netdev_rx_handler_unregister(). 364 * netdev_rx_handler_unregister().
365 * 365 *
366 * Upon return, rx_handler is expected to tell __netif_receive_skb() what to 366 * Upon return, rx_handler is expected to tell __netif_receive_skb() what to
367 * do with the skb. 367 * do with the skb.
368 * 368 *
369 * If the rx_handler consumed to skb in some way, it should return 369 * If the rx_handler consumed to skb in some way, it should return
370 * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for 370 * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
371 * the skb to be delivered in some other ways. 371 * the skb to be delivered in some other ways.
372 * 372 *
373 * If the rx_handler changed skb->dev, to divert the skb to another 373 * If the rx_handler changed skb->dev, to divert the skb to another
374 * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the 374 * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
375 * new device will be called if it exists. 375 * new device will be called if it exists.
376 * 376 *
377 * If the rx_handler consider the skb should be ignored, it should return 377 * If the rx_handler consider the skb should be ignored, it should return
378 * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that 378 * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
379 * are registered on exact device (ptype->dev == skb->dev). 379 * are registered on exact device (ptype->dev == skb->dev).
380 * 380 *
381 * If the rx_handler didn't changed skb->dev, but want the skb to be normally 381 * If the rx_handler didn't changed skb->dev, but want the skb to be normally
382 * delivered, it should return RX_HANDLER_PASS. 382 * delivered, it should return RX_HANDLER_PASS.
383 * 383 *
384 * A device without a registered rx_handler will behave as if rx_handler 384 * A device without a registered rx_handler will behave as if rx_handler
385 * returned RX_HANDLER_PASS. 385 * returned RX_HANDLER_PASS.
386 */ 386 */
387 387
388 enum rx_handler_result { 388 enum rx_handler_result {
389 RX_HANDLER_CONSUMED, 389 RX_HANDLER_CONSUMED,
390 RX_HANDLER_ANOTHER, 390 RX_HANDLER_ANOTHER,
391 RX_HANDLER_EXACT, 391 RX_HANDLER_EXACT,
392 RX_HANDLER_PASS, 392 RX_HANDLER_PASS,
393 }; 393 };
394 typedef enum rx_handler_result rx_handler_result_t; 394 typedef enum rx_handler_result rx_handler_result_t;
395 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); 395 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
396 396
397 extern void __napi_schedule(struct napi_struct *n); 397 void __napi_schedule(struct napi_struct *n);
398 398
399 static inline bool napi_disable_pending(struct napi_struct *n) 399 static inline bool napi_disable_pending(struct napi_struct *n)
400 { 400 {
401 return test_bit(NAPI_STATE_DISABLE, &n->state); 401 return test_bit(NAPI_STATE_DISABLE, &n->state);
402 } 402 }
403 403
404 /** 404 /**
405 * napi_schedule_prep - check if napi can be scheduled 405 * napi_schedule_prep - check if napi can be scheduled
406 * @n: napi context 406 * @n: napi context
407 * 407 *
408 * Test if NAPI routine is already running, and if not mark 408 * Test if NAPI routine is already running, and if not mark
409 * it as running. This is used as a condition variable 409 * it as running. This is used as a condition variable
410 * insure only one NAPI poll instance runs. We also make 410 * insure only one NAPI poll instance runs. We also make
411 * sure there is no pending NAPI disable. 411 * sure there is no pending NAPI disable.
412 */ 412 */
413 static inline bool napi_schedule_prep(struct napi_struct *n) 413 static inline bool napi_schedule_prep(struct napi_struct *n)
414 { 414 {
415 return !napi_disable_pending(n) && 415 return !napi_disable_pending(n) &&
416 !test_and_set_bit(NAPI_STATE_SCHED, &n->state); 416 !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
417 } 417 }
418 418
419 /** 419 /**
420 * napi_schedule - schedule NAPI poll 420 * napi_schedule - schedule NAPI poll
421 * @n: napi context 421 * @n: napi context
422 * 422 *
423 * Schedule NAPI poll routine to be called if it is not already 423 * Schedule NAPI poll routine to be called if it is not already
424 * running. 424 * running.
425 */ 425 */
426 static inline void napi_schedule(struct napi_struct *n) 426 static inline void napi_schedule(struct napi_struct *n)
427 { 427 {
428 if (napi_schedule_prep(n)) 428 if (napi_schedule_prep(n))
429 __napi_schedule(n); 429 __napi_schedule(n);
430 } 430 }
431 431
432 /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */ 432 /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
433 static inline bool napi_reschedule(struct napi_struct *napi) 433 static inline bool napi_reschedule(struct napi_struct *napi)
434 { 434 {
435 if (napi_schedule_prep(napi)) { 435 if (napi_schedule_prep(napi)) {
436 __napi_schedule(napi); 436 __napi_schedule(napi);
437 return true; 437 return true;
438 } 438 }
439 return false; 439 return false;
440 } 440 }
441 441
442 /** 442 /**
443 * napi_complete - NAPI processing complete 443 * napi_complete - NAPI processing complete
444 * @n: napi context 444 * @n: napi context
445 * 445 *
446 * Mark NAPI processing as complete. 446 * Mark NAPI processing as complete.
447 */ 447 */
448 extern void __napi_complete(struct napi_struct *n); 448 void __napi_complete(struct napi_struct *n);
449 extern void napi_complete(struct napi_struct *n); 449 void napi_complete(struct napi_struct *n);
450 450
451 /** 451 /**
452 * napi_by_id - lookup a NAPI by napi_id 452 * napi_by_id - lookup a NAPI by napi_id
453 * @napi_id: hashed napi_id 453 * @napi_id: hashed napi_id
454 * 454 *
455 * lookup @napi_id in napi_hash table 455 * lookup @napi_id in napi_hash table
456 * must be called under rcu_read_lock() 456 * must be called under rcu_read_lock()
457 */ 457 */
458 extern struct napi_struct *napi_by_id(unsigned int napi_id); 458 struct napi_struct *napi_by_id(unsigned int napi_id);
459 459
460 /** 460 /**
461 * napi_hash_add - add a NAPI to global hashtable 461 * napi_hash_add - add a NAPI to global hashtable
462 * @napi: napi context 462 * @napi: napi context
463 * 463 *
464 * generate a new napi_id and store a @napi under it in napi_hash 464 * generate a new napi_id and store a @napi under it in napi_hash
465 */ 465 */
466 extern void napi_hash_add(struct napi_struct *napi); 466 void napi_hash_add(struct napi_struct *napi);
467 467
468 /** 468 /**
469 * napi_hash_del - remove a NAPI from global table 469 * napi_hash_del - remove a NAPI from global table
470 * @napi: napi context 470 * @napi: napi context
471 * 471 *
472 * Warning: caller must observe rcu grace period 472 * Warning: caller must observe rcu grace period
473 * before freeing memory containing @napi 473 * before freeing memory containing @napi
474 */ 474 */
475 extern void napi_hash_del(struct napi_struct *napi); 475 void napi_hash_del(struct napi_struct *napi);
476 476
477 /** 477 /**
478 * napi_disable - prevent NAPI from scheduling 478 * napi_disable - prevent NAPI from scheduling
479 * @n: napi context 479 * @n: napi context
480 * 480 *
481 * Stop NAPI from being scheduled on this context. 481 * Stop NAPI from being scheduled on this context.
482 * Waits till any outstanding processing completes. 482 * Waits till any outstanding processing completes.
483 */ 483 */
484 static inline void napi_disable(struct napi_struct *n) 484 static inline void napi_disable(struct napi_struct *n)
485 { 485 {
486 set_bit(NAPI_STATE_DISABLE, &n->state); 486 set_bit(NAPI_STATE_DISABLE, &n->state);
487 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) 487 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
488 msleep(1); 488 msleep(1);
489 clear_bit(NAPI_STATE_DISABLE, &n->state); 489 clear_bit(NAPI_STATE_DISABLE, &n->state);
490 } 490 }
491 491
492 /** 492 /**
493 * napi_enable - enable NAPI scheduling 493 * napi_enable - enable NAPI scheduling
494 * @n: napi context 494 * @n: napi context
495 * 495 *
496 * Resume NAPI from being scheduled on this context. 496 * Resume NAPI from being scheduled on this context.
497 * Must be paired with napi_disable. 497 * Must be paired with napi_disable.
498 */ 498 */
499 static inline void napi_enable(struct napi_struct *n) 499 static inline void napi_enable(struct napi_struct *n)
500 { 500 {
501 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); 501 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
502 smp_mb__before_clear_bit(); 502 smp_mb__before_clear_bit();
503 clear_bit(NAPI_STATE_SCHED, &n->state); 503 clear_bit(NAPI_STATE_SCHED, &n->state);
504 } 504 }
505 505
506 #ifdef CONFIG_SMP 506 #ifdef CONFIG_SMP
507 /** 507 /**
508 * napi_synchronize - wait until NAPI is not running 508 * napi_synchronize - wait until NAPI is not running
509 * @n: napi context 509 * @n: napi context
510 * 510 *
511 * Wait until NAPI is done being scheduled on this context. 511 * Wait until NAPI is done being scheduled on this context.
512 * Waits till any outstanding processing completes but 512 * Waits till any outstanding processing completes but
513 * does not disable future activations. 513 * does not disable future activations.
514 */ 514 */
515 static inline void napi_synchronize(const struct napi_struct *n) 515 static inline void napi_synchronize(const struct napi_struct *n)
516 { 516 {
517 while (test_bit(NAPI_STATE_SCHED, &n->state)) 517 while (test_bit(NAPI_STATE_SCHED, &n->state))
518 msleep(1); 518 msleep(1);
519 } 519 }
520 #else 520 #else
521 # define napi_synchronize(n) barrier() 521 # define napi_synchronize(n) barrier()
522 #endif 522 #endif
523 523
524 enum netdev_queue_state_t { 524 enum netdev_queue_state_t {
525 __QUEUE_STATE_DRV_XOFF, 525 __QUEUE_STATE_DRV_XOFF,
526 __QUEUE_STATE_STACK_XOFF, 526 __QUEUE_STATE_STACK_XOFF,
527 __QUEUE_STATE_FROZEN, 527 __QUEUE_STATE_FROZEN,
528 #define QUEUE_STATE_ANY_XOFF ((1 << __QUEUE_STATE_DRV_XOFF) | \ 528 #define QUEUE_STATE_ANY_XOFF ((1 << __QUEUE_STATE_DRV_XOFF) | \
529 (1 << __QUEUE_STATE_STACK_XOFF)) 529 (1 << __QUEUE_STATE_STACK_XOFF))
530 #define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \ 530 #define QUEUE_STATE_ANY_XOFF_OR_FROZEN (QUEUE_STATE_ANY_XOFF | \
531 (1 << __QUEUE_STATE_FROZEN)) 531 (1 << __QUEUE_STATE_FROZEN))
532 }; 532 };
533 /* 533 /*
534 * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The 534 * __QUEUE_STATE_DRV_XOFF is used by drivers to stop the transmit queue. The
535 * netif_tx_* functions below are used to manipulate this flag. The 535 * netif_tx_* functions below are used to manipulate this flag. The
536 * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit 536 * __QUEUE_STATE_STACK_XOFF flag is used by the stack to stop the transmit
537 * queue independently. The netif_xmit_*stopped functions below are called 537 * queue independently. The netif_xmit_*stopped functions below are called
538 * to check if the queue has been stopped by the driver or stack (either 538 * to check if the queue has been stopped by the driver or stack (either
539 * of the XOFF bits are set in the state). Drivers should not need to call 539 * of the XOFF bits are set in the state). Drivers should not need to call
540 * netif_xmit*stopped functions, they should only be using netif_tx_*. 540 * netif_xmit*stopped functions, they should only be using netif_tx_*.
541 */ 541 */
542 542
543 struct netdev_queue { 543 struct netdev_queue {
544 /* 544 /*
545 * read mostly part 545 * read mostly part
546 */ 546 */
547 struct net_device *dev; 547 struct net_device *dev;
548 struct Qdisc *qdisc; 548 struct Qdisc *qdisc;
549 struct Qdisc *qdisc_sleeping; 549 struct Qdisc *qdisc_sleeping;
550 #ifdef CONFIG_SYSFS 550 #ifdef CONFIG_SYSFS
551 struct kobject kobj; 551 struct kobject kobj;
552 #endif 552 #endif
553 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) 553 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
554 int numa_node; 554 int numa_node;
555 #endif 555 #endif
556 /* 556 /*
557 * write mostly part 557 * write mostly part
558 */ 558 */
559 spinlock_t _xmit_lock ____cacheline_aligned_in_smp; 559 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
560 int xmit_lock_owner; 560 int xmit_lock_owner;
561 /* 561 /*
562 * please use this field instead of dev->trans_start 562 * please use this field instead of dev->trans_start
563 */ 563 */
564 unsigned long trans_start; 564 unsigned long trans_start;
565 565
566 /* 566 /*
567 * Number of TX timeouts for this queue 567 * Number of TX timeouts for this queue
568 * (/sys/class/net/DEV/Q/trans_timeout) 568 * (/sys/class/net/DEV/Q/trans_timeout)
569 */ 569 */
570 unsigned long trans_timeout; 570 unsigned long trans_timeout;
571 571
572 unsigned long state; 572 unsigned long state;
573 573
574 #ifdef CONFIG_BQL 574 #ifdef CONFIG_BQL
575 struct dql dql; 575 struct dql dql;
576 #endif 576 #endif
577 } ____cacheline_aligned_in_smp; 577 } ____cacheline_aligned_in_smp;
578 578
579 static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) 579 static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
580 { 580 {
581 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) 581 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
582 return q->numa_node; 582 return q->numa_node;
583 #else 583 #else
584 return NUMA_NO_NODE; 584 return NUMA_NO_NODE;
585 #endif 585 #endif
586 } 586 }
587 587
588 static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node) 588 static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
589 { 589 {
590 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) 590 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
591 q->numa_node = node; 591 q->numa_node = node;
592 #endif 592 #endif
593 } 593 }
594 594
595 #ifdef CONFIG_RPS 595 #ifdef CONFIG_RPS
596 /* 596 /*
597 * This structure holds an RPS map which can be of variable length. The 597 * This structure holds an RPS map which can be of variable length. The
598 * map is an array of CPUs. 598 * map is an array of CPUs.
599 */ 599 */
600 struct rps_map { 600 struct rps_map {
601 unsigned int len; 601 unsigned int len;
602 struct rcu_head rcu; 602 struct rcu_head rcu;
603 u16 cpus[0]; 603 u16 cpus[0];
604 }; 604 };
605 #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16))) 605 #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + ((_num) * sizeof(u16)))
606 606
607 /* 607 /*
608 * The rps_dev_flow structure contains the mapping of a flow to a CPU, the 608 * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
609 * tail pointer for that CPU's input queue at the time of last enqueue, and 609 * tail pointer for that CPU's input queue at the time of last enqueue, and
610 * a hardware filter index. 610 * a hardware filter index.
611 */ 611 */
612 struct rps_dev_flow { 612 struct rps_dev_flow {
613 u16 cpu; 613 u16 cpu;
614 u16 filter; 614 u16 filter;
615 unsigned int last_qtail; 615 unsigned int last_qtail;
616 }; 616 };
617 #define RPS_NO_FILTER 0xffff 617 #define RPS_NO_FILTER 0xffff
618 618
619 /* 619 /*
620 * The rps_dev_flow_table structure contains a table of flow mappings. 620 * The rps_dev_flow_table structure contains a table of flow mappings.
621 */ 621 */
622 struct rps_dev_flow_table { 622 struct rps_dev_flow_table {
623 unsigned int mask; 623 unsigned int mask;
624 struct rcu_head rcu; 624 struct rcu_head rcu;
625 struct rps_dev_flow flows[0]; 625 struct rps_dev_flow flows[0];
626 }; 626 };
627 #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \ 627 #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
628 ((_num) * sizeof(struct rps_dev_flow))) 628 ((_num) * sizeof(struct rps_dev_flow)))
629 629
630 /* 630 /*
631 * The rps_sock_flow_table contains mappings of flows to the last CPU 631 * The rps_sock_flow_table contains mappings of flows to the last CPU
632 * on which they were processed by the application (set in recvmsg). 632 * on which they were processed by the application (set in recvmsg).
633 */ 633 */
634 struct rps_sock_flow_table { 634 struct rps_sock_flow_table {
635 unsigned int mask; 635 unsigned int mask;
636 u16 ents[0]; 636 u16 ents[0];
637 }; 637 };
638 #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \ 638 #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
639 ((_num) * sizeof(u16))) 639 ((_num) * sizeof(u16)))
640 640
641 #define RPS_NO_CPU 0xffff 641 #define RPS_NO_CPU 0xffff
642 642
643 static inline void rps_record_sock_flow(struct rps_sock_flow_table *table, 643 static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
644 u32 hash) 644 u32 hash)
645 { 645 {
646 if (table && hash) { 646 if (table && hash) {
647 unsigned int cpu, index = hash & table->mask; 647 unsigned int cpu, index = hash & table->mask;
648 648
649 /* We only give a hint, preemption can change cpu under us */ 649 /* We only give a hint, preemption can change cpu under us */
650 cpu = raw_smp_processor_id(); 650 cpu = raw_smp_processor_id();
651 651
652 if (table->ents[index] != cpu) 652 if (table->ents[index] != cpu)
653 table->ents[index] = cpu; 653 table->ents[index] = cpu;
654 } 654 }
655 } 655 }
656 656
657 static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table, 657 static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
658 u32 hash) 658 u32 hash)
659 { 659 {
660 if (table && hash) 660 if (table && hash)
661 table->ents[hash & table->mask] = RPS_NO_CPU; 661 table->ents[hash & table->mask] = RPS_NO_CPU;
662 } 662 }
663 663
664 extern struct rps_sock_flow_table __rcu *rps_sock_flow_table; 664 extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
665 665
666 #ifdef CONFIG_RFS_ACCEL 666 #ifdef CONFIG_RFS_ACCEL
667 extern bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, 667 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id,
668 u32 flow_id, u16 filter_id); 668 u16 filter_id);
669 #endif 669 #endif
670 670
671 /* This structure contains an instance of an RX queue. */ 671 /* This structure contains an instance of an RX queue. */
672 struct netdev_rx_queue { 672 struct netdev_rx_queue {
673 struct rps_map __rcu *rps_map; 673 struct rps_map __rcu *rps_map;
674 struct rps_dev_flow_table __rcu *rps_flow_table; 674 struct rps_dev_flow_table __rcu *rps_flow_table;
675 struct kobject kobj; 675 struct kobject kobj;
676 struct net_device *dev; 676 struct net_device *dev;
677 } ____cacheline_aligned_in_smp; 677 } ____cacheline_aligned_in_smp;
678 #endif /* CONFIG_RPS */ 678 #endif /* CONFIG_RPS */
679 679
680 #ifdef CONFIG_XPS 680 #ifdef CONFIG_XPS
681 /* 681 /*
682 * This structure holds an XPS map which can be of variable length. The 682 * This structure holds an XPS map which can be of variable length. The
683 * map is an array of queues. 683 * map is an array of queues.
684 */ 684 */
685 struct xps_map { 685 struct xps_map {
686 unsigned int len; 686 unsigned int len;
687 unsigned int alloc_len; 687 unsigned int alloc_len;
688 struct rcu_head rcu; 688 struct rcu_head rcu;
689 u16 queues[0]; 689 u16 queues[0];
690 }; 690 };
691 #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16))) 691 #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + ((_num) * sizeof(u16)))
692 #define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \ 692 #define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \
693 / sizeof(u16)) 693 / sizeof(u16))
694 694
695 /* 695 /*
696 * This structure holds all XPS maps for device. Maps are indexed by CPU. 696 * This structure holds all XPS maps for device. Maps are indexed by CPU.
697 */ 697 */
698 struct xps_dev_maps { 698 struct xps_dev_maps {
699 struct rcu_head rcu; 699 struct rcu_head rcu;
700 struct xps_map __rcu *cpu_map[0]; 700 struct xps_map __rcu *cpu_map[0];
701 }; 701 };
702 #define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \ 702 #define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \
703 (nr_cpu_ids * sizeof(struct xps_map *))) 703 (nr_cpu_ids * sizeof(struct xps_map *)))
704 #endif /* CONFIG_XPS */ 704 #endif /* CONFIG_XPS */
705 705
706 #define TC_MAX_QUEUE 16 706 #define TC_MAX_QUEUE 16
707 #define TC_BITMASK 15 707 #define TC_BITMASK 15
708 /* HW offloaded queuing disciplines txq count and offset maps */ 708 /* HW offloaded queuing disciplines txq count and offset maps */
709 struct netdev_tc_txq { 709 struct netdev_tc_txq {
710 u16 count; 710 u16 count;
711 u16 offset; 711 u16 offset;
712 }; 712 };
713 713
714 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) 714 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
715 /* 715 /*
716 * This structure is to hold information about the device 716 * This structure is to hold information about the device
717 * configured to run FCoE protocol stack. 717 * configured to run FCoE protocol stack.
718 */ 718 */
719 struct netdev_fcoe_hbainfo { 719 struct netdev_fcoe_hbainfo {
720 char manufacturer[64]; 720 char manufacturer[64];
721 char serial_number[64]; 721 char serial_number[64];
722 char hardware_version[64]; 722 char hardware_version[64];
723 char driver_version[64]; 723 char driver_version[64];
724 char optionrom_version[64]; 724 char optionrom_version[64];
725 char firmware_version[64]; 725 char firmware_version[64];
726 char model[256]; 726 char model[256];
727 char model_description[256]; 727 char model_description[256];
728 }; 728 };
729 #endif 729 #endif
730 730
731 #define MAX_PHYS_PORT_ID_LEN 32 731 #define MAX_PHYS_PORT_ID_LEN 32
732 732
733 /* This structure holds a unique identifier to identify the 733 /* This structure holds a unique identifier to identify the
734 * physical port used by a netdevice. 734 * physical port used by a netdevice.
735 */ 735 */
736 struct netdev_phys_port_id { 736 struct netdev_phys_port_id {
737 unsigned char id[MAX_PHYS_PORT_ID_LEN]; 737 unsigned char id[MAX_PHYS_PORT_ID_LEN];
738 unsigned char id_len; 738 unsigned char id_len;
739 }; 739 };
740 740
741 /* 741 /*
742 * This structure defines the management hooks for network devices. 742 * This structure defines the management hooks for network devices.
743 * The following hooks can be defined; unless noted otherwise, they are 743 * The following hooks can be defined; unless noted otherwise, they are
744 * optional and can be filled with a null pointer. 744 * optional and can be filled with a null pointer.
745 * 745 *
746 * int (*ndo_init)(struct net_device *dev); 746 * int (*ndo_init)(struct net_device *dev);
747 * This function is called once when network device is registered. 747 * This function is called once when network device is registered.
748 * The network device can use this to any late stage initializaton 748 * The network device can use this to any late stage initializaton
749 * or semantic validattion. It can fail with an error code which will 749 * or semantic validattion. It can fail with an error code which will
750 * be propogated back to register_netdev 750 * be propogated back to register_netdev
751 * 751 *
752 * void (*ndo_uninit)(struct net_device *dev); 752 * void (*ndo_uninit)(struct net_device *dev);
753 * This function is called when device is unregistered or when registration 753 * This function is called when device is unregistered or when registration
754 * fails. It is not called if init fails. 754 * fails. It is not called if init fails.
755 * 755 *
756 * int (*ndo_open)(struct net_device *dev); 756 * int (*ndo_open)(struct net_device *dev);
757 * This function is called when network device transistions to the up 757 * This function is called when network device transistions to the up
758 * state. 758 * state.
759 * 759 *
760 * int (*ndo_stop)(struct net_device *dev); 760 * int (*ndo_stop)(struct net_device *dev);
761 * This function is called when network device transistions to the down 761 * This function is called when network device transistions to the down
762 * state. 762 * state.
763 * 763 *
764 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, 764 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
765 * struct net_device *dev); 765 * struct net_device *dev);
766 * Called when a packet needs to be transmitted. 766 * Called when a packet needs to be transmitted.
767 * Must return NETDEV_TX_OK , NETDEV_TX_BUSY. 767 * Must return NETDEV_TX_OK , NETDEV_TX_BUSY.
768 * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX) 768 * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
769 * Required can not be NULL. 769 * Required can not be NULL.
770 * 770 *
771 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb); 771 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb);
772 * Called to decide which queue to when device supports multiple 772 * Called to decide which queue to when device supports multiple
773 * transmit queues. 773 * transmit queues.
774 * 774 *
775 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags); 775 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
776 * This function is called to allow device receiver to make 776 * This function is called to allow device receiver to make
777 * changes to configuration when multicast or promiscious is enabled. 777 * changes to configuration when multicast or promiscious is enabled.
778 * 778 *
779 * void (*ndo_set_rx_mode)(struct net_device *dev); 779 * void (*ndo_set_rx_mode)(struct net_device *dev);
780 * This function is called device changes address list filtering. 780 * This function is called device changes address list filtering.
781 * If driver handles unicast address filtering, it should set 781 * If driver handles unicast address filtering, it should set
782 * IFF_UNICAST_FLT to its priv_flags. 782 * IFF_UNICAST_FLT to its priv_flags.
783 * 783 *
784 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr); 784 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
785 * This function is called when the Media Access Control address 785 * This function is called when the Media Access Control address
786 * needs to be changed. If this interface is not defined, the 786 * needs to be changed. If this interface is not defined, the
787 * mac address can not be changed. 787 * mac address can not be changed.
788 * 788 *
789 * int (*ndo_validate_addr)(struct net_device *dev); 789 * int (*ndo_validate_addr)(struct net_device *dev);
790 * Test if Media Access Control address is valid for the device. 790 * Test if Media Access Control address is valid for the device.
791 * 791 *
792 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); 792 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
793 * Called when a user request an ioctl which can't be handled by 793 * Called when a user request an ioctl which can't be handled by
794 * the generic interface code. If not defined ioctl's return 794 * the generic interface code. If not defined ioctl's return
795 * not supported error code. 795 * not supported error code.
796 * 796 *
797 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map); 797 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
798 * Used to set network devices bus interface parameters. This interface 798 * Used to set network devices bus interface parameters. This interface
799 * is retained for legacy reason, new devices should use the bus 799 * is retained for legacy reason, new devices should use the bus
800 * interface (PCI) for low level management. 800 * interface (PCI) for low level management.
801 * 801 *
802 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu); 802 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
803 * Called when a user wants to change the Maximum Transfer Unit 803 * Called when a user wants to change the Maximum Transfer Unit
804 * of a device. If not defined, any request to change MTU will 804 * of a device. If not defined, any request to change MTU will
805 * will return an error. 805 * will return an error.
806 * 806 *
807 * void (*ndo_tx_timeout)(struct net_device *dev); 807 * void (*ndo_tx_timeout)(struct net_device *dev);
808 * Callback uses when the transmitter has not made any progress 808 * Callback uses when the transmitter has not made any progress
809 * for dev->watchdog ticks. 809 * for dev->watchdog ticks.
810 * 810 *
811 * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev, 811 * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
812 * struct rtnl_link_stats64 *storage); 812 * struct rtnl_link_stats64 *storage);
813 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); 813 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
814 * Called when a user wants to get the network device usage 814 * Called when a user wants to get the network device usage
815 * statistics. Drivers must do one of the following: 815 * statistics. Drivers must do one of the following:
816 * 1. Define @ndo_get_stats64 to fill in a zero-initialised 816 * 1. Define @ndo_get_stats64 to fill in a zero-initialised
817 * rtnl_link_stats64 structure passed by the caller. 817 * rtnl_link_stats64 structure passed by the caller.
818 * 2. Define @ndo_get_stats to update a net_device_stats structure 818 * 2. Define @ndo_get_stats to update a net_device_stats structure
819 * (which should normally be dev->stats) and return a pointer to 819 * (which should normally be dev->stats) and return a pointer to
820 * it. The structure may be changed asynchronously only if each 820 * it. The structure may be changed asynchronously only if each
821 * field is written atomically. 821 * field is written atomically.
822 * 3. Update dev->stats asynchronously and atomically, and define 822 * 3. Update dev->stats asynchronously and atomically, and define
823 * neither operation. 823 * neither operation.
824 * 824 *
825 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16t vid); 825 * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16t vid);
826 * If device support VLAN filtering this function is called when a 826 * If device support VLAN filtering this function is called when a
827 * VLAN id is registered. 827 * VLAN id is registered.
828 * 828 *
829 * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid); 829 * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
830 * If device support VLAN filtering this function is called when a 830 * If device support VLAN filtering this function is called when a
831 * VLAN id is unregistered. 831 * VLAN id is unregistered.
832 * 832 *
833 * void (*ndo_poll_controller)(struct net_device *dev); 833 * void (*ndo_poll_controller)(struct net_device *dev);
834 * 834 *
835 * SR-IOV management functions. 835 * SR-IOV management functions.
836 * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac); 836 * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
837 * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos); 837 * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos);
838 * int (*ndo_set_vf_tx_rate)(struct net_device *dev, int vf, int rate); 838 * int (*ndo_set_vf_tx_rate)(struct net_device *dev, int vf, int rate);
839 * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting); 839 * int (*ndo_set_vf_spoofchk)(struct net_device *dev, int vf, bool setting);
840 * int (*ndo_get_vf_config)(struct net_device *dev, 840 * int (*ndo_get_vf_config)(struct net_device *dev,
841 * int vf, struct ifla_vf_info *ivf); 841 * int vf, struct ifla_vf_info *ivf);
842 * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state); 842 * int (*ndo_set_vf_link_state)(struct net_device *dev, int vf, int link_state);
843 * int (*ndo_set_vf_port)(struct net_device *dev, int vf, 843 * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
844 * struct nlattr *port[]); 844 * struct nlattr *port[]);
845 * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); 845 * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
846 * int (*ndo_setup_tc)(struct net_device *dev, u8 tc) 846 * int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
847 * Called to setup 'tc' number of traffic classes in the net device. This 847 * Called to setup 'tc' number of traffic classes in the net device. This
848 * is always called from the stack with the rtnl lock held and netif tx 848 * is always called from the stack with the rtnl lock held and netif tx
849 * queues stopped. This allows the netdevice to perform queue management 849 * queues stopped. This allows the netdevice to perform queue management
850 * safely. 850 * safely.
851 * 851 *
852 * Fiber Channel over Ethernet (FCoE) offload functions. 852 * Fiber Channel over Ethernet (FCoE) offload functions.
853 * int (*ndo_fcoe_enable)(struct net_device *dev); 853 * int (*ndo_fcoe_enable)(struct net_device *dev);
854 * Called when the FCoE protocol stack wants to start using LLD for FCoE 854 * Called when the FCoE protocol stack wants to start using LLD for FCoE
855 * so the underlying device can perform whatever needed configuration or 855 * so the underlying device can perform whatever needed configuration or
856 * initialization to support acceleration of FCoE traffic. 856 * initialization to support acceleration of FCoE traffic.
857 * 857 *
858 * int (*ndo_fcoe_disable)(struct net_device *dev); 858 * int (*ndo_fcoe_disable)(struct net_device *dev);
859 * Called when the FCoE protocol stack wants to stop using LLD for FCoE 859 * Called when the FCoE protocol stack wants to stop using LLD for FCoE
860 * so the underlying device can perform whatever needed clean-ups to 860 * so the underlying device can perform whatever needed clean-ups to
861 * stop supporting acceleration of FCoE traffic. 861 * stop supporting acceleration of FCoE traffic.
862 * 862 *
863 * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid, 863 * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid,
864 * struct scatterlist *sgl, unsigned int sgc); 864 * struct scatterlist *sgl, unsigned int sgc);
865 * Called when the FCoE Initiator wants to initialize an I/O that 865 * Called when the FCoE Initiator wants to initialize an I/O that
866 * is a possible candidate for Direct Data Placement (DDP). The LLD can 866 * is a possible candidate for Direct Data Placement (DDP). The LLD can
867 * perform necessary setup and returns 1 to indicate the device is set up 867 * perform necessary setup and returns 1 to indicate the device is set up
868 * successfully to perform DDP on this I/O, otherwise this returns 0. 868 * successfully to perform DDP on this I/O, otherwise this returns 0.
869 * 869 *
870 * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid); 870 * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid);
871 * Called when the FCoE Initiator/Target is done with the DDPed I/O as 871 * Called when the FCoE Initiator/Target is done with the DDPed I/O as
872 * indicated by the FC exchange id 'xid', so the underlying device can 872 * indicated by the FC exchange id 'xid', so the underlying device can
873 * clean up and reuse resources for later DDP requests. 873 * clean up and reuse resources for later DDP requests.
874 * 874 *
875 * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid, 875 * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid,
876 * struct scatterlist *sgl, unsigned int sgc); 876 * struct scatterlist *sgl, unsigned int sgc);
877 * Called when the FCoE Target wants to initialize an I/O that 877 * Called when the FCoE Target wants to initialize an I/O that
878 * is a possible candidate for Direct Data Placement (DDP). The LLD can 878 * is a possible candidate for Direct Data Placement (DDP). The LLD can
879 * perform necessary setup and returns 1 to indicate the device is set up 879 * perform necessary setup and returns 1 to indicate the device is set up
880 * successfully to perform DDP on this I/O, otherwise this returns 0. 880 * successfully to perform DDP on this I/O, otherwise this returns 0.
881 * 881 *
882 * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, 882 * int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
883 * struct netdev_fcoe_hbainfo *hbainfo); 883 * struct netdev_fcoe_hbainfo *hbainfo);
884 * Called when the FCoE Protocol stack wants information on the underlying 884 * Called when the FCoE Protocol stack wants information on the underlying
885 * device. This information is utilized by the FCoE protocol stack to 885 * device. This information is utilized by the FCoE protocol stack to
886 * register attributes with Fiber Channel management service as per the 886 * register attributes with Fiber Channel management service as per the
887 * FC-GS Fabric Device Management Information(FDMI) specification. 887 * FC-GS Fabric Device Management Information(FDMI) specification.
888 * 888 *
889 * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type); 889 * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
890 * Called when the underlying device wants to override default World Wide 890 * Called when the underlying device wants to override default World Wide
891 * Name (WWN) generation mechanism in FCoE protocol stack to pass its own 891 * Name (WWN) generation mechanism in FCoE protocol stack to pass its own
892 * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE 892 * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE
893 * protocol stack to use. 893 * protocol stack to use.
894 * 894 *
895 * RFS acceleration. 895 * RFS acceleration.
896 * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb, 896 * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
897 * u16 rxq_index, u32 flow_id); 897 * u16 rxq_index, u32 flow_id);
898 * Set hardware filter for RFS. rxq_index is the target queue index; 898 * Set hardware filter for RFS. rxq_index is the target queue index;
899 * flow_id is a flow ID to be passed to rps_may_expire_flow() later. 899 * flow_id is a flow ID to be passed to rps_may_expire_flow() later.
900 * Return the filter ID on success, or a negative error code. 900 * Return the filter ID on success, or a negative error code.
901 * 901 *
902 * Slave management functions (for bridge, bonding, etc). 902 * Slave management functions (for bridge, bonding, etc).
903 * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev); 903 * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
904 * Called to make another netdev an underling. 904 * Called to make another netdev an underling.
905 * 905 *
906 * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev); 906 * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
907 * Called to release previously enslaved netdev. 907 * Called to release previously enslaved netdev.
908 * 908 *
909 * Feature/offload setting functions. 909 * Feature/offload setting functions.
910 * netdev_features_t (*ndo_fix_features)(struct net_device *dev, 910 * netdev_features_t (*ndo_fix_features)(struct net_device *dev,
911 * netdev_features_t features); 911 * netdev_features_t features);
912 * Adjusts the requested feature flags according to device-specific 912 * Adjusts the requested feature flags according to device-specific
913 * constraints, and returns the resulting flags. Must not modify 913 * constraints, and returns the resulting flags. Must not modify
914 * the device state. 914 * the device state.
915 * 915 *
916 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features); 916 * int (*ndo_set_features)(struct net_device *dev, netdev_features_t features);
917 * Called to update device configuration to new features. Passed 917 * Called to update device configuration to new features. Passed
918 * feature set might be less than what was returned by ndo_fix_features()). 918 * feature set might be less than what was returned by ndo_fix_features()).
919 * Must return >0 or -errno if it changed dev->features itself. 919 * Must return >0 or -errno if it changed dev->features itself.
920 * 920 *
921 * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[], 921 * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[],
922 * struct net_device *dev, 922 * struct net_device *dev,
923 * const unsigned char *addr, u16 flags) 923 * const unsigned char *addr, u16 flags)
924 * Adds an FDB entry to dev for addr. 924 * Adds an FDB entry to dev for addr.
925 * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], 925 * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[],
926 * struct net_device *dev, 926 * struct net_device *dev,
927 * const unsigned char *addr) 927 * const unsigned char *addr)
928 * Deletes the FDB entry from dev coresponding to addr. 928 * Deletes the FDB entry from dev coresponding to addr.
929 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, 929 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
930 * struct net_device *dev, int idx) 930 * struct net_device *dev, int idx)
931 * Used to add FDB entries to dump requests. Implementers should add 931 * Used to add FDB entries to dump requests. Implementers should add
932 * entries to skb and update idx with the number of entries. 932 * entries to skb and update idx with the number of entries.
933 * 933 *
934 * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh) 934 * int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh)
935 * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq, 935 * int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
936 * struct net_device *dev, u32 filter_mask) 936 * struct net_device *dev, u32 filter_mask)
937 * 937 *
938 * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier); 938 * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
939 * Called to change device carrier. Soft-devices (like dummy, team, etc) 939 * Called to change device carrier. Soft-devices (like dummy, team, etc)
940 * which do not represent real hardware may define this to allow their 940 * which do not represent real hardware may define this to allow their
941 * userspace components to manage their virtual carrier state. Devices 941 * userspace components to manage their virtual carrier state. Devices
942 * that determine carrier state from physical hardware properties (eg 942 * that determine carrier state from physical hardware properties (eg
943 * network cables) or protocol-dependent mechanisms (eg 943 * network cables) or protocol-dependent mechanisms (eg
944 * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function. 944 * USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function.
945 * 945 *
946 * int (*ndo_get_phys_port_id)(struct net_device *dev, 946 * int (*ndo_get_phys_port_id)(struct net_device *dev,
947 * struct netdev_phys_port_id *ppid); 947 * struct netdev_phys_port_id *ppid);
948 * Called to get ID of physical port of this device. If driver does 948 * Called to get ID of physical port of this device. If driver does
949 * not implement this, it is assumed that the hw is not able to have 949 * not implement this, it is assumed that the hw is not able to have
950 * multiple net devices on single physical port. 950 * multiple net devices on single physical port.
951 * 951 *
952 * void (*ndo_add_vxlan_port)(struct net_device *dev, 952 * void (*ndo_add_vxlan_port)(struct net_device *dev,
953 * sa_family_t sa_family, __be16 port); 953 * sa_family_t sa_family, __be16 port);
954 * Called by vxlan to notiy a driver about the UDP port and socket 954 * Called by vxlan to notiy a driver about the UDP port and socket
955 * address family that vxlan is listnening to. It is called only when 955 * address family that vxlan is listnening to. It is called only when
956 * a new port starts listening. The operation is protected by the 956 * a new port starts listening. The operation is protected by the
957 * vxlan_net->sock_lock. 957 * vxlan_net->sock_lock.
958 * 958 *
959 * void (*ndo_del_vxlan_port)(struct net_device *dev, 959 * void (*ndo_del_vxlan_port)(struct net_device *dev,
960 * sa_family_t sa_family, __be16 port); 960 * sa_family_t sa_family, __be16 port);
961 * Called by vxlan to notify the driver about a UDP port and socket 961 * Called by vxlan to notify the driver about a UDP port and socket
962 * address family that vxlan is not listening to anymore. The operation 962 * address family that vxlan is not listening to anymore. The operation
963 * is protected by the vxlan_net->sock_lock. 963 * is protected by the vxlan_net->sock_lock.
964 */ 964 */
965 struct net_device_ops { 965 struct net_device_ops {
966 int (*ndo_init)(struct net_device *dev); 966 int (*ndo_init)(struct net_device *dev);
967 void (*ndo_uninit)(struct net_device *dev); 967 void (*ndo_uninit)(struct net_device *dev);
968 int (*ndo_open)(struct net_device *dev); 968 int (*ndo_open)(struct net_device *dev);
969 int (*ndo_stop)(struct net_device *dev); 969 int (*ndo_stop)(struct net_device *dev);
970 netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb, 970 netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb,
971 struct net_device *dev); 971 struct net_device *dev);
972 u16 (*ndo_select_queue)(struct net_device *dev, 972 u16 (*ndo_select_queue)(struct net_device *dev,
973 struct sk_buff *skb); 973 struct sk_buff *skb);
974 void (*ndo_change_rx_flags)(struct net_device *dev, 974 void (*ndo_change_rx_flags)(struct net_device *dev,
975 int flags); 975 int flags);
976 void (*ndo_set_rx_mode)(struct net_device *dev); 976 void (*ndo_set_rx_mode)(struct net_device *dev);
977 int (*ndo_set_mac_address)(struct net_device *dev, 977 int (*ndo_set_mac_address)(struct net_device *dev,
978 void *addr); 978 void *addr);
979 int (*ndo_validate_addr)(struct net_device *dev); 979 int (*ndo_validate_addr)(struct net_device *dev);
980 int (*ndo_do_ioctl)(struct net_device *dev, 980 int (*ndo_do_ioctl)(struct net_device *dev,
981 struct ifreq *ifr, int cmd); 981 struct ifreq *ifr, int cmd);
982 int (*ndo_set_config)(struct net_device *dev, 982 int (*ndo_set_config)(struct net_device *dev,
983 struct ifmap *map); 983 struct ifmap *map);
984 int (*ndo_change_mtu)(struct net_device *dev, 984 int (*ndo_change_mtu)(struct net_device *dev,
985 int new_mtu); 985 int new_mtu);
986 int (*ndo_neigh_setup)(struct net_device *dev, 986 int (*ndo_neigh_setup)(struct net_device *dev,
987 struct neigh_parms *); 987 struct neigh_parms *);
988 void (*ndo_tx_timeout) (struct net_device *dev); 988 void (*ndo_tx_timeout) (struct net_device *dev);
989 989
990 struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev, 990 struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
991 struct rtnl_link_stats64 *storage); 991 struct rtnl_link_stats64 *storage);
992 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); 992 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
993 993
994 int (*ndo_vlan_rx_add_vid)(struct net_device *dev, 994 int (*ndo_vlan_rx_add_vid)(struct net_device *dev,
995 __be16 proto, u16 vid); 995 __be16 proto, u16 vid);
996 int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, 996 int (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
997 __be16 proto, u16 vid); 997 __be16 proto, u16 vid);
998 #ifdef CONFIG_NET_POLL_CONTROLLER 998 #ifdef CONFIG_NET_POLL_CONTROLLER
999 void (*ndo_poll_controller)(struct net_device *dev); 999 void (*ndo_poll_controller)(struct net_device *dev);
1000 int (*ndo_netpoll_setup)(struct net_device *dev, 1000 int (*ndo_netpoll_setup)(struct net_device *dev,
1001 struct netpoll_info *info, 1001 struct netpoll_info *info,
1002 gfp_t gfp); 1002 gfp_t gfp);
1003 void (*ndo_netpoll_cleanup)(struct net_device *dev); 1003 void (*ndo_netpoll_cleanup)(struct net_device *dev);
1004 #endif 1004 #endif
1005 #ifdef CONFIG_NET_RX_BUSY_POLL 1005 #ifdef CONFIG_NET_RX_BUSY_POLL
1006 int (*ndo_busy_poll)(struct napi_struct *dev); 1006 int (*ndo_busy_poll)(struct napi_struct *dev);
1007 #endif 1007 #endif
1008 int (*ndo_set_vf_mac)(struct net_device *dev, 1008 int (*ndo_set_vf_mac)(struct net_device *dev,
1009 int queue, u8 *mac); 1009 int queue, u8 *mac);
1010 int (*ndo_set_vf_vlan)(struct net_device *dev, 1010 int (*ndo_set_vf_vlan)(struct net_device *dev,
1011 int queue, u16 vlan, u8 qos); 1011 int queue, u16 vlan, u8 qos);
1012 int (*ndo_set_vf_tx_rate)(struct net_device *dev, 1012 int (*ndo_set_vf_tx_rate)(struct net_device *dev,
1013 int vf, int rate); 1013 int vf, int rate);
1014 int (*ndo_set_vf_spoofchk)(struct net_device *dev, 1014 int (*ndo_set_vf_spoofchk)(struct net_device *dev,
1015 int vf, bool setting); 1015 int vf, bool setting);
1016 int (*ndo_get_vf_config)(struct net_device *dev, 1016 int (*ndo_get_vf_config)(struct net_device *dev,
1017 int vf, 1017 int vf,
1018 struct ifla_vf_info *ivf); 1018 struct ifla_vf_info *ivf);
1019 int (*ndo_set_vf_link_state)(struct net_device *dev, 1019 int (*ndo_set_vf_link_state)(struct net_device *dev,
1020 int vf, int link_state); 1020 int vf, int link_state);
1021 int (*ndo_set_vf_port)(struct net_device *dev, 1021 int (*ndo_set_vf_port)(struct net_device *dev,
1022 int vf, 1022 int vf,
1023 struct nlattr *port[]); 1023 struct nlattr *port[]);
1024 int (*ndo_get_vf_port)(struct net_device *dev, 1024 int (*ndo_get_vf_port)(struct net_device *dev,
1025 int vf, struct sk_buff *skb); 1025 int vf, struct sk_buff *skb);
1026 int (*ndo_setup_tc)(struct net_device *dev, u8 tc); 1026 int (*ndo_setup_tc)(struct net_device *dev, u8 tc);
1027 #if IS_ENABLED(CONFIG_FCOE) 1027 #if IS_ENABLED(CONFIG_FCOE)
1028 int (*ndo_fcoe_enable)(struct net_device *dev); 1028 int (*ndo_fcoe_enable)(struct net_device *dev);
1029 int (*ndo_fcoe_disable)(struct net_device *dev); 1029 int (*ndo_fcoe_disable)(struct net_device *dev);
1030 int (*ndo_fcoe_ddp_setup)(struct net_device *dev, 1030 int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
1031 u16 xid, 1031 u16 xid,
1032 struct scatterlist *sgl, 1032 struct scatterlist *sgl,
1033 unsigned int sgc); 1033 unsigned int sgc);
1034 int (*ndo_fcoe_ddp_done)(struct net_device *dev, 1034 int (*ndo_fcoe_ddp_done)(struct net_device *dev,
1035 u16 xid); 1035 u16 xid);
1036 int (*ndo_fcoe_ddp_target)(struct net_device *dev, 1036 int (*ndo_fcoe_ddp_target)(struct net_device *dev,
1037 u16 xid, 1037 u16 xid,
1038 struct scatterlist *sgl, 1038 struct scatterlist *sgl,
1039 unsigned int sgc); 1039 unsigned int sgc);
1040 int (*ndo_fcoe_get_hbainfo)(struct net_device *dev, 1040 int (*ndo_fcoe_get_hbainfo)(struct net_device *dev,
1041 struct netdev_fcoe_hbainfo *hbainfo); 1041 struct netdev_fcoe_hbainfo *hbainfo);
1042 #endif 1042 #endif
1043 1043
1044 #if IS_ENABLED(CONFIG_LIBFCOE) 1044 #if IS_ENABLED(CONFIG_LIBFCOE)
1045 #define NETDEV_FCOE_WWNN 0 1045 #define NETDEV_FCOE_WWNN 0
1046 #define NETDEV_FCOE_WWPN 1 1046 #define NETDEV_FCOE_WWPN 1
1047 int (*ndo_fcoe_get_wwn)(struct net_device *dev, 1047 int (*ndo_fcoe_get_wwn)(struct net_device *dev,
1048 u64 *wwn, int type); 1048 u64 *wwn, int type);
1049 #endif 1049 #endif
1050 1050
1051 #ifdef CONFIG_RFS_ACCEL 1051 #ifdef CONFIG_RFS_ACCEL
1052 int (*ndo_rx_flow_steer)(struct net_device *dev, 1052 int (*ndo_rx_flow_steer)(struct net_device *dev,
1053 const struct sk_buff *skb, 1053 const struct sk_buff *skb,
1054 u16 rxq_index, 1054 u16 rxq_index,
1055 u32 flow_id); 1055 u32 flow_id);
1056 #endif 1056 #endif
1057 int (*ndo_add_slave)(struct net_device *dev, 1057 int (*ndo_add_slave)(struct net_device *dev,
1058 struct net_device *slave_dev); 1058 struct net_device *slave_dev);
1059 int (*ndo_del_slave)(struct net_device *dev, 1059 int (*ndo_del_slave)(struct net_device *dev,
1060 struct net_device *slave_dev); 1060 struct net_device *slave_dev);
1061 netdev_features_t (*ndo_fix_features)(struct net_device *dev, 1061 netdev_features_t (*ndo_fix_features)(struct net_device *dev,
1062 netdev_features_t features); 1062 netdev_features_t features);
1063 int (*ndo_set_features)(struct net_device *dev, 1063 int (*ndo_set_features)(struct net_device *dev,
1064 netdev_features_t features); 1064 netdev_features_t features);
1065 int (*ndo_neigh_construct)(struct neighbour *n); 1065 int (*ndo_neigh_construct)(struct neighbour *n);
1066 void (*ndo_neigh_destroy)(struct neighbour *n); 1066 void (*ndo_neigh_destroy)(struct neighbour *n);
1067 1067
1068 int (*ndo_fdb_add)(struct ndmsg *ndm, 1068 int (*ndo_fdb_add)(struct ndmsg *ndm,
1069 struct nlattr *tb[], 1069 struct nlattr *tb[],
1070 struct net_device *dev, 1070 struct net_device *dev,
1071 const unsigned char *addr, 1071 const unsigned char *addr,
1072 u16 flags); 1072 u16 flags);
1073 int (*ndo_fdb_del)(struct ndmsg *ndm, 1073 int (*ndo_fdb_del)(struct ndmsg *ndm,
1074 struct nlattr *tb[], 1074 struct nlattr *tb[],
1075 struct net_device *dev, 1075 struct net_device *dev,
1076 const unsigned char *addr); 1076 const unsigned char *addr);
1077 int (*ndo_fdb_dump)(struct sk_buff *skb, 1077 int (*ndo_fdb_dump)(struct sk_buff *skb,
1078 struct netlink_callback *cb, 1078 struct netlink_callback *cb,
1079 struct net_device *dev, 1079 struct net_device *dev,
1080 int idx); 1080 int idx);
1081 1081
1082 int (*ndo_bridge_setlink)(struct net_device *dev, 1082 int (*ndo_bridge_setlink)(struct net_device *dev,
1083 struct nlmsghdr *nlh); 1083 struct nlmsghdr *nlh);
1084 int (*ndo_bridge_getlink)(struct sk_buff *skb, 1084 int (*ndo_bridge_getlink)(struct sk_buff *skb,
1085 u32 pid, u32 seq, 1085 u32 pid, u32 seq,
1086 struct net_device *dev, 1086 struct net_device *dev,
1087 u32 filter_mask); 1087 u32 filter_mask);
1088 int (*ndo_bridge_dellink)(struct net_device *dev, 1088 int (*ndo_bridge_dellink)(struct net_device *dev,
1089 struct nlmsghdr *nlh); 1089 struct nlmsghdr *nlh);
1090 int (*ndo_change_carrier)(struct net_device *dev, 1090 int (*ndo_change_carrier)(struct net_device *dev,
1091 bool new_carrier); 1091 bool new_carrier);
1092 int (*ndo_get_phys_port_id)(struct net_device *dev, 1092 int (*ndo_get_phys_port_id)(struct net_device *dev,
1093 struct netdev_phys_port_id *ppid); 1093 struct netdev_phys_port_id *ppid);
1094 void (*ndo_add_vxlan_port)(struct net_device *dev, 1094 void (*ndo_add_vxlan_port)(struct net_device *dev,
1095 sa_family_t sa_family, 1095 sa_family_t sa_family,
1096 __be16 port); 1096 __be16 port);
1097 void (*ndo_del_vxlan_port)(struct net_device *dev, 1097 void (*ndo_del_vxlan_port)(struct net_device *dev,
1098 sa_family_t sa_family, 1098 sa_family_t sa_family,
1099 __be16 port); 1099 __be16 port);
1100 }; 1100 };
1101 1101
1102 /* 1102 /*
1103 * The DEVICE structure. 1103 * The DEVICE structure.
1104 * Actually, this whole structure is a big mistake. It mixes I/O 1104 * Actually, this whole structure is a big mistake. It mixes I/O
1105 * data with strictly "high-level" data, and it has to know about 1105 * data with strictly "high-level" data, and it has to know about
1106 * almost every data structure used in the INET module. 1106 * almost every data structure used in the INET module.
1107 * 1107 *
1108 * FIXME: cleanup struct net_device such that network protocol info 1108 * FIXME: cleanup struct net_device such that network protocol info
1109 * moves out. 1109 * moves out.
1110 */ 1110 */
1111 1111
1112 struct net_device { 1112 struct net_device {
1113 1113
1114 /* 1114 /*
1115 * This is the first field of the "visible" part of this structure 1115 * This is the first field of the "visible" part of this structure
1116 * (i.e. as seen by users in the "Space.c" file). It is the name 1116 * (i.e. as seen by users in the "Space.c" file). It is the name
1117 * of the interface. 1117 * of the interface.
1118 */ 1118 */
1119 char name[IFNAMSIZ]; 1119 char name[IFNAMSIZ];
1120 1120
1121 /* device name hash chain, please keep it close to name[] */ 1121 /* device name hash chain, please keep it close to name[] */
1122 struct hlist_node name_hlist; 1122 struct hlist_node name_hlist;
1123 1123
1124 /* snmp alias */ 1124 /* snmp alias */
1125 char *ifalias; 1125 char *ifalias;
1126 1126
1127 /* 1127 /*
1128 * I/O specific fields 1128 * I/O specific fields
1129 * FIXME: Merge these and struct ifmap into one 1129 * FIXME: Merge these and struct ifmap into one
1130 */ 1130 */
1131 unsigned long mem_end; /* shared mem end */ 1131 unsigned long mem_end; /* shared mem end */
1132 unsigned long mem_start; /* shared mem start */ 1132 unsigned long mem_start; /* shared mem start */
1133 unsigned long base_addr; /* device I/O address */ 1133 unsigned long base_addr; /* device I/O address */
1134 unsigned int irq; /* device IRQ number */ 1134 unsigned int irq; /* device IRQ number */
1135 1135
1136 /* 1136 /*
1137 * Some hardware also needs these fields, but they are not 1137 * Some hardware also needs these fields, but they are not
1138 * part of the usual set specified in Space.c. 1138 * part of the usual set specified in Space.c.
1139 */ 1139 */
1140 1140
1141 unsigned long state; 1141 unsigned long state;
1142 1142
1143 struct list_head dev_list; 1143 struct list_head dev_list;
1144 struct list_head napi_list; 1144 struct list_head napi_list;
1145 struct list_head unreg_list; 1145 struct list_head unreg_list;
1146 1146
1147 /* directly linked devices, like slaves for bonding */ 1147 /* directly linked devices, like slaves for bonding */
1148 struct { 1148 struct {
1149 struct list_head upper; 1149 struct list_head upper;
1150 struct list_head lower; 1150 struct list_head lower;
1151 } adj_list; 1151 } adj_list;
1152 1152
1153 /* all linked devices, *including* neighbours */ 1153 /* all linked devices, *including* neighbours */
1154 struct { 1154 struct {
1155 struct list_head upper; 1155 struct list_head upper;
1156 struct list_head lower; 1156 struct list_head lower;
1157 } all_adj_list; 1157 } all_adj_list;
1158 1158
1159 1159
1160 /* currently active device features */ 1160 /* currently active device features */
1161 netdev_features_t features; 1161 netdev_features_t features;
1162 /* user-changeable features */ 1162 /* user-changeable features */
1163 netdev_features_t hw_features; 1163 netdev_features_t hw_features;
1164 /* user-requested features */ 1164 /* user-requested features */
1165 netdev_features_t wanted_features; 1165 netdev_features_t wanted_features;
1166 /* mask of features inheritable by VLAN devices */ 1166 /* mask of features inheritable by VLAN devices */
1167 netdev_features_t vlan_features; 1167 netdev_features_t vlan_features;
1168 /* mask of features inherited by encapsulating devices 1168 /* mask of features inherited by encapsulating devices
1169 * This field indicates what encapsulation offloads 1169 * This field indicates what encapsulation offloads
1170 * the hardware is capable of doing, and drivers will 1170 * the hardware is capable of doing, and drivers will
1171 * need to set them appropriately. 1171 * need to set them appropriately.
1172 */ 1172 */
1173 netdev_features_t hw_enc_features; 1173 netdev_features_t hw_enc_features;
1174 /* mask of fetures inheritable by MPLS */ 1174 /* mask of fetures inheritable by MPLS */
1175 netdev_features_t mpls_features; 1175 netdev_features_t mpls_features;
1176 1176
1177 /* Interface index. Unique device identifier */ 1177 /* Interface index. Unique device identifier */
1178 int ifindex; 1178 int ifindex;
1179 int iflink; 1179 int iflink;
1180 1180
1181 struct net_device_stats stats; 1181 struct net_device_stats stats;
1182 atomic_long_t rx_dropped; /* dropped packets by core network 1182 atomic_long_t rx_dropped; /* dropped packets by core network
1183 * Do not use this in drivers. 1183 * Do not use this in drivers.
1184 */ 1184 */
1185 1185
1186 #ifdef CONFIG_WIRELESS_EXT 1186 #ifdef CONFIG_WIRELESS_EXT
1187 /* List of functions to handle Wireless Extensions (instead of ioctl). 1187 /* List of functions to handle Wireless Extensions (instead of ioctl).
1188 * See <net/iw_handler.h> for details. Jean II */ 1188 * See <net/iw_handler.h> for details. Jean II */
1189 const struct iw_handler_def * wireless_handlers; 1189 const struct iw_handler_def * wireless_handlers;
1190 /* Instance data managed by the core of Wireless Extensions. */ 1190 /* Instance data managed by the core of Wireless Extensions. */
1191 struct iw_public_data * wireless_data; 1191 struct iw_public_data * wireless_data;
1192 #endif 1192 #endif
1193 /* Management operations */ 1193 /* Management operations */
1194 const struct net_device_ops *netdev_ops; 1194 const struct net_device_ops *netdev_ops;
1195 const struct ethtool_ops *ethtool_ops; 1195 const struct ethtool_ops *ethtool_ops;
1196 1196
1197 /* Hardware header description */ 1197 /* Hardware header description */
1198 const struct header_ops *header_ops; 1198 const struct header_ops *header_ops;
1199 1199
1200 unsigned int flags; /* interface flags (a la BSD) */ 1200 unsigned int flags; /* interface flags (a la BSD) */
1201 unsigned int priv_flags; /* Like 'flags' but invisible to userspace. 1201 unsigned int priv_flags; /* Like 'flags' but invisible to userspace.
1202 * See if.h for definitions. */ 1202 * See if.h for definitions. */
1203 unsigned short gflags; 1203 unsigned short gflags;
1204 unsigned short padded; /* How much padding added by alloc_netdev() */ 1204 unsigned short padded; /* How much padding added by alloc_netdev() */
1205 1205
1206 unsigned char operstate; /* RFC2863 operstate */ 1206 unsigned char operstate; /* RFC2863 operstate */
1207 unsigned char link_mode; /* mapping policy to operstate */ 1207 unsigned char link_mode; /* mapping policy to operstate */
1208 1208
1209 unsigned char if_port; /* Selectable AUI, TP,..*/ 1209 unsigned char if_port; /* Selectable AUI, TP,..*/
1210 unsigned char dma; /* DMA channel */ 1210 unsigned char dma; /* DMA channel */
1211 1211
1212 unsigned int mtu; /* interface MTU value */ 1212 unsigned int mtu; /* interface MTU value */
1213 unsigned short type; /* interface hardware type */ 1213 unsigned short type; /* interface hardware type */
1214 unsigned short hard_header_len; /* hardware hdr length */ 1214 unsigned short hard_header_len; /* hardware hdr length */
1215 1215
1216 /* extra head- and tailroom the hardware may need, but not in all cases 1216 /* extra head- and tailroom the hardware may need, but not in all cases
1217 * can this be guaranteed, especially tailroom. Some cases also use 1217 * can this be guaranteed, especially tailroom. Some cases also use
1218 * LL_MAX_HEADER instead to allocate the skb. 1218 * LL_MAX_HEADER instead to allocate the skb.
1219 */ 1219 */
1220 unsigned short needed_headroom; 1220 unsigned short needed_headroom;
1221 unsigned short needed_tailroom; 1221 unsigned short needed_tailroom;
1222 1222
1223 /* Interface address info. */ 1223 /* Interface address info. */
1224 unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */ 1224 unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
1225 unsigned char addr_assign_type; /* hw address assignment type */ 1225 unsigned char addr_assign_type; /* hw address assignment type */
1226 unsigned char addr_len; /* hardware address length */ 1226 unsigned char addr_len; /* hardware address length */
1227 unsigned char neigh_priv_len; 1227 unsigned char neigh_priv_len;
1228 unsigned short dev_id; /* Used to differentiate devices 1228 unsigned short dev_id; /* Used to differentiate devices
1229 * that share the same link 1229 * that share the same link
1230 * layer address 1230 * layer address
1231 */ 1231 */
1232 spinlock_t addr_list_lock; 1232 spinlock_t addr_list_lock;
1233 struct netdev_hw_addr_list uc; /* Unicast mac addresses */ 1233 struct netdev_hw_addr_list uc; /* Unicast mac addresses */
1234 struct netdev_hw_addr_list mc; /* Multicast mac addresses */ 1234 struct netdev_hw_addr_list mc; /* Multicast mac addresses */
1235 struct netdev_hw_addr_list dev_addrs; /* list of device 1235 struct netdev_hw_addr_list dev_addrs; /* list of device
1236 * hw addresses 1236 * hw addresses
1237 */ 1237 */
1238 #ifdef CONFIG_SYSFS 1238 #ifdef CONFIG_SYSFS
1239 struct kset *queues_kset; 1239 struct kset *queues_kset;
1240 #endif 1240 #endif
1241 1241
1242 bool uc_promisc; 1242 bool uc_promisc;
1243 unsigned int promiscuity; 1243 unsigned int promiscuity;
1244 unsigned int allmulti; 1244 unsigned int allmulti;
1245 1245
1246 1246
1247 /* Protocol specific pointers */ 1247 /* Protocol specific pointers */
1248 1248
1249 #if IS_ENABLED(CONFIG_VLAN_8021Q) 1249 #if IS_ENABLED(CONFIG_VLAN_8021Q)
1250 struct vlan_info __rcu *vlan_info; /* VLAN info */ 1250 struct vlan_info __rcu *vlan_info; /* VLAN info */
1251 #endif 1251 #endif
1252 #if IS_ENABLED(CONFIG_NET_DSA) 1252 #if IS_ENABLED(CONFIG_NET_DSA)
1253 struct dsa_switch_tree *dsa_ptr; /* dsa specific data */ 1253 struct dsa_switch_tree *dsa_ptr; /* dsa specific data */
1254 #endif 1254 #endif
1255 void *atalk_ptr; /* AppleTalk link */ 1255 void *atalk_ptr; /* AppleTalk link */
1256 struct in_device __rcu *ip_ptr; /* IPv4 specific data */ 1256 struct in_device __rcu *ip_ptr; /* IPv4 specific data */
1257 struct dn_dev __rcu *dn_ptr; /* DECnet specific data */ 1257 struct dn_dev __rcu *dn_ptr; /* DECnet specific data */
1258 struct inet6_dev __rcu *ip6_ptr; /* IPv6 specific data */ 1258 struct inet6_dev __rcu *ip6_ptr; /* IPv6 specific data */
1259 void *ax25_ptr; /* AX.25 specific data */ 1259 void *ax25_ptr; /* AX.25 specific data */
1260 struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data, 1260 struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data,
1261 assign before registering */ 1261 assign before registering */
1262 1262
1263 /* 1263 /*
1264 * Cache lines mostly used on receive path (including eth_type_trans()) 1264 * Cache lines mostly used on receive path (including eth_type_trans())
1265 */ 1265 */
1266 unsigned long last_rx; /* Time of last Rx 1266 unsigned long last_rx; /* Time of last Rx
1267 * This should not be set in 1267 * This should not be set in
1268 * drivers, unless really needed, 1268 * drivers, unless really needed,
1269 * because network stack (bonding) 1269 * because network stack (bonding)
1270 * use it if/when necessary, to 1270 * use it if/when necessary, to
1271 * avoid dirtying this cache line. 1271 * avoid dirtying this cache line.
1272 */ 1272 */
1273 1273
1274 /* Interface address info used in eth_type_trans() */ 1274 /* Interface address info used in eth_type_trans() */
1275 unsigned char *dev_addr; /* hw address, (before bcast 1275 unsigned char *dev_addr; /* hw address, (before bcast
1276 because most packets are 1276 because most packets are
1277 unicast) */ 1277 unicast) */
1278 1278
1279 1279
1280 #ifdef CONFIG_RPS 1280 #ifdef CONFIG_RPS
1281 struct netdev_rx_queue *_rx; 1281 struct netdev_rx_queue *_rx;
1282 1282
1283 /* Number of RX queues allocated at register_netdev() time */ 1283 /* Number of RX queues allocated at register_netdev() time */
1284 unsigned int num_rx_queues; 1284 unsigned int num_rx_queues;
1285 1285
1286 /* Number of RX queues currently active in device */ 1286 /* Number of RX queues currently active in device */
1287 unsigned int real_num_rx_queues; 1287 unsigned int real_num_rx_queues;
1288 1288
1289 #endif 1289 #endif
1290 1290
1291 rx_handler_func_t __rcu *rx_handler; 1291 rx_handler_func_t __rcu *rx_handler;
1292 void __rcu *rx_handler_data; 1292 void __rcu *rx_handler_data;
1293 1293
1294 struct netdev_queue __rcu *ingress_queue; 1294 struct netdev_queue __rcu *ingress_queue;
1295 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ 1295 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
1296 1296
1297 1297
1298 /* 1298 /*
1299 * Cache lines mostly used on transmit path 1299 * Cache lines mostly used on transmit path
1300 */ 1300 */
1301 struct netdev_queue *_tx ____cacheline_aligned_in_smp; 1301 struct netdev_queue *_tx ____cacheline_aligned_in_smp;
1302 1302
1303 /* Number of TX queues allocated at alloc_netdev_mq() time */ 1303 /* Number of TX queues allocated at alloc_netdev_mq() time */
1304 unsigned int num_tx_queues; 1304 unsigned int num_tx_queues;
1305 1305
1306 /* Number of TX queues currently active in device */ 1306 /* Number of TX queues currently active in device */
1307 unsigned int real_num_tx_queues; 1307 unsigned int real_num_tx_queues;
1308 1308
1309 /* root qdisc from userspace point of view */ 1309 /* root qdisc from userspace point of view */
1310 struct Qdisc *qdisc; 1310 struct Qdisc *qdisc;
1311 1311
1312 unsigned long tx_queue_len; /* Max frames per queue allowed */ 1312 unsigned long tx_queue_len; /* Max frames per queue allowed */
1313 spinlock_t tx_global_lock; 1313 spinlock_t tx_global_lock;
1314 1314
1315 #ifdef CONFIG_XPS 1315 #ifdef CONFIG_XPS
1316 struct xps_dev_maps __rcu *xps_maps; 1316 struct xps_dev_maps __rcu *xps_maps;
1317 #endif 1317 #endif
1318 #ifdef CONFIG_RFS_ACCEL 1318 #ifdef CONFIG_RFS_ACCEL
1319 /* CPU reverse-mapping for RX completion interrupts, indexed 1319 /* CPU reverse-mapping for RX completion interrupts, indexed
1320 * by RX queue number. Assigned by driver. This must only be 1320 * by RX queue number. Assigned by driver. This must only be
1321 * set if the ndo_rx_flow_steer operation is defined. */ 1321 * set if the ndo_rx_flow_steer operation is defined. */
1322 struct cpu_rmap *rx_cpu_rmap; 1322 struct cpu_rmap *rx_cpu_rmap;
1323 #endif 1323 #endif
1324 1324
1325 /* These may be needed for future network-power-down code. */ 1325 /* These may be needed for future network-power-down code. */
1326 1326
1327 /* 1327 /*
1328 * trans_start here is expensive for high speed devices on SMP, 1328 * trans_start here is expensive for high speed devices on SMP,
1329 * please use netdev_queue->trans_start instead. 1329 * please use netdev_queue->trans_start instead.
1330 */ 1330 */
1331 unsigned long trans_start; /* Time (in jiffies) of last Tx */ 1331 unsigned long trans_start; /* Time (in jiffies) of last Tx */
1332 1332
1333 int watchdog_timeo; /* used by dev_watchdog() */ 1333 int watchdog_timeo; /* used by dev_watchdog() */
1334 struct timer_list watchdog_timer; 1334 struct timer_list watchdog_timer;
1335 1335
1336 /* Number of references to this device */ 1336 /* Number of references to this device */
1337 int __percpu *pcpu_refcnt; 1337 int __percpu *pcpu_refcnt;
1338 1338
1339 /* delayed register/unregister */ 1339 /* delayed register/unregister */
1340 struct list_head todo_list; 1340 struct list_head todo_list;
1341 /* device index hash chain */ 1341 /* device index hash chain */
1342 struct hlist_node index_hlist; 1342 struct hlist_node index_hlist;
1343 1343
1344 struct list_head link_watch_list; 1344 struct list_head link_watch_list;
1345 1345
1346 /* register/unregister state machine */ 1346 /* register/unregister state machine */
1347 enum { NETREG_UNINITIALIZED=0, 1347 enum { NETREG_UNINITIALIZED=0,
1348 NETREG_REGISTERED, /* completed register_netdevice */ 1348 NETREG_REGISTERED, /* completed register_netdevice */
1349 NETREG_UNREGISTERING, /* called unregister_netdevice */ 1349 NETREG_UNREGISTERING, /* called unregister_netdevice */
1350 NETREG_UNREGISTERED, /* completed unregister todo */ 1350 NETREG_UNREGISTERED, /* completed unregister todo */
1351 NETREG_RELEASED, /* called free_netdev */ 1351 NETREG_RELEASED, /* called free_netdev */
1352 NETREG_DUMMY, /* dummy device for NAPI poll */ 1352 NETREG_DUMMY, /* dummy device for NAPI poll */
1353 } reg_state:8; 1353 } reg_state:8;
1354 1354
1355 bool dismantle; /* device is going do be freed */ 1355 bool dismantle; /* device is going do be freed */
1356 1356
1357 enum { 1357 enum {
1358 RTNL_LINK_INITIALIZED, 1358 RTNL_LINK_INITIALIZED,
1359 RTNL_LINK_INITIALIZING, 1359 RTNL_LINK_INITIALIZING,
1360 } rtnl_link_state:16; 1360 } rtnl_link_state:16;
1361 1361
1362 /* Called from unregister, can be used to call free_netdev */ 1362 /* Called from unregister, can be used to call free_netdev */
1363 void (*destructor)(struct net_device *dev); 1363 void (*destructor)(struct net_device *dev);
1364 1364
1365 #ifdef CONFIG_NETPOLL 1365 #ifdef CONFIG_NETPOLL
1366 struct netpoll_info __rcu *npinfo; 1366 struct netpoll_info __rcu *npinfo;
1367 #endif 1367 #endif
1368 1368
1369 #ifdef CONFIG_NET_NS 1369 #ifdef CONFIG_NET_NS
1370 /* Network namespace this network device is inside */ 1370 /* Network namespace this network device is inside */
1371 struct net *nd_net; 1371 struct net *nd_net;
1372 #endif 1372 #endif
1373 1373
1374 /* mid-layer private */ 1374 /* mid-layer private */
1375 union { 1375 union {
1376 void *ml_priv; 1376 void *ml_priv;
1377 struct pcpu_lstats __percpu *lstats; /* loopback stats */ 1377 struct pcpu_lstats __percpu *lstats; /* loopback stats */
1378 struct pcpu_tstats __percpu *tstats; /* tunnel stats */ 1378 struct pcpu_tstats __percpu *tstats; /* tunnel stats */
1379 struct pcpu_dstats __percpu *dstats; /* dummy stats */ 1379 struct pcpu_dstats __percpu *dstats; /* dummy stats */
1380 struct pcpu_vstats __percpu *vstats; /* veth stats */ 1380 struct pcpu_vstats __percpu *vstats; /* veth stats */
1381 }; 1381 };
1382 /* GARP */ 1382 /* GARP */
1383 struct garp_port __rcu *garp_port; 1383 struct garp_port __rcu *garp_port;
1384 /* MRP */ 1384 /* MRP */
1385 struct mrp_port __rcu *mrp_port; 1385 struct mrp_port __rcu *mrp_port;
1386 1386
1387 /* class/net/name entry */ 1387 /* class/net/name entry */
1388 struct device dev; 1388 struct device dev;
1389 /* space for optional device, statistics, and wireless sysfs groups */ 1389 /* space for optional device, statistics, and wireless sysfs groups */
1390 const struct attribute_group *sysfs_groups[4]; 1390 const struct attribute_group *sysfs_groups[4];
1391 1391
1392 /* rtnetlink link ops */ 1392 /* rtnetlink link ops */
1393 const struct rtnl_link_ops *rtnl_link_ops; 1393 const struct rtnl_link_ops *rtnl_link_ops;
1394 1394
1395 /* for setting kernel sock attribute on TCP connection setup */ 1395 /* for setting kernel sock attribute on TCP connection setup */
1396 #define GSO_MAX_SIZE 65536 1396 #define GSO_MAX_SIZE 65536
1397 unsigned int gso_max_size; 1397 unsigned int gso_max_size;
1398 #define GSO_MAX_SEGS 65535 1398 #define GSO_MAX_SEGS 65535
1399 u16 gso_max_segs; 1399 u16 gso_max_segs;
1400 1400
1401 #ifdef CONFIG_DCB 1401 #ifdef CONFIG_DCB
1402 /* Data Center Bridging netlink ops */ 1402 /* Data Center Bridging netlink ops */
1403 const struct dcbnl_rtnl_ops *dcbnl_ops; 1403 const struct dcbnl_rtnl_ops *dcbnl_ops;
1404 #endif 1404 #endif
1405 u8 num_tc; 1405 u8 num_tc;
1406 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; 1406 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
1407 u8 prio_tc_map[TC_BITMASK + 1]; 1407 u8 prio_tc_map[TC_BITMASK + 1];
1408 1408
1409 #if IS_ENABLED(CONFIG_FCOE) 1409 #if IS_ENABLED(CONFIG_FCOE)
1410 /* max exchange id for FCoE LRO by ddp */ 1410 /* max exchange id for FCoE LRO by ddp */
1411 unsigned int fcoe_ddp_xid; 1411 unsigned int fcoe_ddp_xid;
1412 #endif 1412 #endif
1413 #if IS_ENABLED(CONFIG_NETPRIO_CGROUP) 1413 #if IS_ENABLED(CONFIG_NETPRIO_CGROUP)
1414 struct netprio_map __rcu *priomap; 1414 struct netprio_map __rcu *priomap;
1415 #endif 1415 #endif
1416 /* phy device may attach itself for hardware timestamping */ 1416 /* phy device may attach itself for hardware timestamping */
1417 struct phy_device *phydev; 1417 struct phy_device *phydev;
1418 1418
1419 struct lock_class_key *qdisc_tx_busylock; 1419 struct lock_class_key *qdisc_tx_busylock;
1420 1420
1421 /* group the device belongs to */ 1421 /* group the device belongs to */
1422 int group; 1422 int group;
1423 1423
1424 struct pm_qos_request pm_qos_req; 1424 struct pm_qos_request pm_qos_req;
1425 }; 1425 };
1426 #define to_net_dev(d) container_of(d, struct net_device, dev) 1426 #define to_net_dev(d) container_of(d, struct net_device, dev)
1427 1427
1428 #define NETDEV_ALIGN 32 1428 #define NETDEV_ALIGN 32
1429 1429
1430 static inline 1430 static inline
1431 int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio) 1431 int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
1432 { 1432 {
1433 return dev->prio_tc_map[prio & TC_BITMASK]; 1433 return dev->prio_tc_map[prio & TC_BITMASK];
1434 } 1434 }
1435 1435
1436 static inline 1436 static inline
1437 int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc) 1437 int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
1438 { 1438 {
1439 if (tc >= dev->num_tc) 1439 if (tc >= dev->num_tc)
1440 return -EINVAL; 1440 return -EINVAL;
1441 1441
1442 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK; 1442 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
1443 return 0; 1443 return 0;
1444 } 1444 }
1445 1445
1446 static inline 1446 static inline
1447 void netdev_reset_tc(struct net_device *dev) 1447 void netdev_reset_tc(struct net_device *dev)
1448 { 1448 {
1449 dev->num_tc = 0; 1449 dev->num_tc = 0;
1450 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq)); 1450 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
1451 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map)); 1451 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
1452 } 1452 }
1453 1453
1454 static inline 1454 static inline
1455 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset) 1455 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
1456 { 1456 {
1457 if (tc >= dev->num_tc) 1457 if (tc >= dev->num_tc)
1458 return -EINVAL; 1458 return -EINVAL;
1459 1459
1460 dev->tc_to_txq[tc].count = count; 1460 dev->tc_to_txq[tc].count = count;
1461 dev->tc_to_txq[tc].offset = offset; 1461 dev->tc_to_txq[tc].offset = offset;
1462 return 0; 1462 return 0;
1463 } 1463 }
1464 1464
1465 static inline 1465 static inline
1466 int netdev_set_num_tc(struct net_device *dev, u8 num_tc) 1466 int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
1467 { 1467 {
1468 if (num_tc > TC_MAX_QUEUE) 1468 if (num_tc > TC_MAX_QUEUE)
1469 return -EINVAL; 1469 return -EINVAL;
1470 1470
1471 dev->num_tc = num_tc; 1471 dev->num_tc = num_tc;
1472 return 0; 1472 return 0;
1473 } 1473 }
1474 1474
1475 static inline 1475 static inline
1476 int netdev_get_num_tc(struct net_device *dev) 1476 int netdev_get_num_tc(struct net_device *dev)
1477 { 1477 {
1478 return dev->num_tc; 1478 return dev->num_tc;
1479 } 1479 }
1480 1480
1481 static inline 1481 static inline
1482 struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, 1482 struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
1483 unsigned int index) 1483 unsigned int index)
1484 { 1484 {
1485 return &dev->_tx[index]; 1485 return &dev->_tx[index];
1486 } 1486 }
1487 1487
1488 static inline void netdev_for_each_tx_queue(struct net_device *dev, 1488 static inline void netdev_for_each_tx_queue(struct net_device *dev,
1489 void (*f)(struct net_device *, 1489 void (*f)(struct net_device *,
1490 struct netdev_queue *, 1490 struct netdev_queue *,
1491 void *), 1491 void *),
1492 void *arg) 1492 void *arg)
1493 { 1493 {
1494 unsigned int i; 1494 unsigned int i;
1495 1495
1496 for (i = 0; i < dev->num_tx_queues; i++) 1496 for (i = 0; i < dev->num_tx_queues; i++)
1497 f(dev, &dev->_tx[i], arg); 1497 f(dev, &dev->_tx[i], arg);
1498 } 1498 }
1499 1499
1500 extern struct netdev_queue *netdev_pick_tx(struct net_device *dev, 1500 struct netdev_queue *netdev_pick_tx(struct net_device *dev,
1501 struct sk_buff *skb); 1501 struct sk_buff *skb);
1502 extern u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb); 1502 u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb);
1503 1503
1504 /* 1504 /*
1505 * Net namespace inlines 1505 * Net namespace inlines
1506 */ 1506 */
1507 static inline 1507 static inline
1508 struct net *dev_net(const struct net_device *dev) 1508 struct net *dev_net(const struct net_device *dev)
1509 { 1509 {
1510 return read_pnet(&dev->nd_net); 1510 return read_pnet(&dev->nd_net);
1511 } 1511 }
1512 1512
1513 static inline 1513 static inline
1514 void dev_net_set(struct net_device *dev, struct net *net) 1514 void dev_net_set(struct net_device *dev, struct net *net)
1515 { 1515 {
1516 #ifdef CONFIG_NET_NS 1516 #ifdef CONFIG_NET_NS
1517 release_net(dev->nd_net); 1517 release_net(dev->nd_net);
1518 dev->nd_net = hold_net(net); 1518 dev->nd_net = hold_net(net);
1519 #endif 1519 #endif
1520 } 1520 }
1521 1521
1522 static inline bool netdev_uses_dsa_tags(struct net_device *dev) 1522 static inline bool netdev_uses_dsa_tags(struct net_device *dev)
1523 { 1523 {
1524 #ifdef CONFIG_NET_DSA_TAG_DSA 1524 #ifdef CONFIG_NET_DSA_TAG_DSA
1525 if (dev->dsa_ptr != NULL) 1525 if (dev->dsa_ptr != NULL)
1526 return dsa_uses_dsa_tags(dev->dsa_ptr); 1526 return dsa_uses_dsa_tags(dev->dsa_ptr);
1527 #endif 1527 #endif
1528 1528
1529 return 0; 1529 return 0;
1530 } 1530 }
1531 1531
1532 static inline bool netdev_uses_trailer_tags(struct net_device *dev) 1532 static inline bool netdev_uses_trailer_tags(struct net_device *dev)
1533 { 1533 {
1534 #ifdef CONFIG_NET_DSA_TAG_TRAILER 1534 #ifdef CONFIG_NET_DSA_TAG_TRAILER
1535 if (dev->dsa_ptr != NULL) 1535 if (dev->dsa_ptr != NULL)
1536 return dsa_uses_trailer_tags(dev->dsa_ptr); 1536 return dsa_uses_trailer_tags(dev->dsa_ptr);
1537 #endif 1537 #endif
1538 1538
1539 return 0; 1539 return 0;
1540 } 1540 }
1541 1541
1542 /** 1542 /**
1543 * netdev_priv - access network device private data 1543 * netdev_priv - access network device private data
1544 * @dev: network device 1544 * @dev: network device
1545 * 1545 *
1546 * Get network device private data 1546 * Get network device private data
1547 */ 1547 */
1548 static inline void *netdev_priv(const struct net_device *dev) 1548 static inline void *netdev_priv(const struct net_device *dev)
1549 { 1549 {
1550 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN); 1550 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
1551 } 1551 }
1552 1552
1553 /* Set the sysfs physical device reference for the network logical device 1553 /* Set the sysfs physical device reference for the network logical device
1554 * if set prior to registration will cause a symlink during initialization. 1554 * if set prior to registration will cause a symlink during initialization.
1555 */ 1555 */
1556 #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev)) 1556 #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
1557 1557
1558 /* Set the sysfs device type for the network logical device to allow 1558 /* Set the sysfs device type for the network logical device to allow
1559 * fin grained indentification of different network device types. For 1559 * fin grained indentification of different network device types. For
1560 * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc. 1560 * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc.
1561 */ 1561 */
1562 #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype)) 1562 #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
1563 1563
1564 /* Default NAPI poll() weight 1564 /* Default NAPI poll() weight
1565 * Device drivers are strongly advised to not use bigger value 1565 * Device drivers are strongly advised to not use bigger value
1566 */ 1566 */
1567 #define NAPI_POLL_WEIGHT 64 1567 #define NAPI_POLL_WEIGHT 64
1568 1568
1569 /** 1569 /**
1570 * netif_napi_add - initialize a napi context 1570 * netif_napi_add - initialize a napi context
1571 * @dev: network device 1571 * @dev: network device
1572 * @napi: napi context 1572 * @napi: napi context
1573 * @poll: polling function 1573 * @poll: polling function
1574 * @weight: default weight 1574 * @weight: default weight
1575 * 1575 *
1576 * netif_napi_add() must be used to initialize a napi context prior to calling 1576 * netif_napi_add() must be used to initialize a napi context prior to calling
1577 * *any* of the other napi related functions. 1577 * *any* of the other napi related functions.
1578 */ 1578 */
1579 void netif_napi_add(struct net_device *dev, struct napi_struct *napi, 1579 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
1580 int (*poll)(struct napi_struct *, int), int weight); 1580 int (*poll)(struct napi_struct *, int), int weight);
1581 1581
1582 /** 1582 /**
1583 * netif_napi_del - remove a napi context 1583 * netif_napi_del - remove a napi context
1584 * @napi: napi context 1584 * @napi: napi context
1585 * 1585 *
1586 * netif_napi_del() removes a napi context from the network device napi list 1586 * netif_napi_del() removes a napi context from the network device napi list
1587 */ 1587 */
1588 void netif_napi_del(struct napi_struct *napi); 1588 void netif_napi_del(struct napi_struct *napi);
1589 1589
1590 struct napi_gro_cb { 1590 struct napi_gro_cb {
1591 /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */ 1591 /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
1592 void *frag0; 1592 void *frag0;
1593 1593
1594 /* Length of frag0. */ 1594 /* Length of frag0. */
1595 unsigned int frag0_len; 1595 unsigned int frag0_len;
1596 1596
1597 /* This indicates where we are processing relative to skb->data. */ 1597 /* This indicates where we are processing relative to skb->data. */
1598 int data_offset; 1598 int data_offset;
1599 1599
1600 /* This is non-zero if the packet cannot be merged with the new skb. */ 1600 /* This is non-zero if the packet cannot be merged with the new skb. */
1601 int flush; 1601 int flush;
1602 1602
1603 /* Number of segments aggregated. */ 1603 /* Number of segments aggregated. */
1604 u16 count; 1604 u16 count;
1605 1605
1606 /* This is non-zero if the packet may be of the same flow. */ 1606 /* This is non-zero if the packet may be of the same flow. */
1607 u8 same_flow; 1607 u8 same_flow;
1608 1608
1609 /* Free the skb? */ 1609 /* Free the skb? */
1610 u8 free; 1610 u8 free;
1611 #define NAPI_GRO_FREE 1 1611 #define NAPI_GRO_FREE 1
1612 #define NAPI_GRO_FREE_STOLEN_HEAD 2 1612 #define NAPI_GRO_FREE_STOLEN_HEAD 2
1613 1613
1614 /* jiffies when first packet was created/queued */ 1614 /* jiffies when first packet was created/queued */
1615 unsigned long age; 1615 unsigned long age;
1616 1616
1617 /* Used in ipv6_gro_receive() */ 1617 /* Used in ipv6_gro_receive() */
1618 int proto; 1618 int proto;
1619 1619
1620 /* used in skb_gro_receive() slow path */ 1620 /* used in skb_gro_receive() slow path */
1621 struct sk_buff *last; 1621 struct sk_buff *last;
1622 }; 1622 };
1623 1623
1624 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb) 1624 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
1625 1625
1626 struct packet_type { 1626 struct packet_type {
1627 __be16 type; /* This is really htons(ether_type). */ 1627 __be16 type; /* This is really htons(ether_type). */
1628 struct net_device *dev; /* NULL is wildcarded here */ 1628 struct net_device *dev; /* NULL is wildcarded here */
1629 int (*func) (struct sk_buff *, 1629 int (*func) (struct sk_buff *,
1630 struct net_device *, 1630 struct net_device *,
1631 struct packet_type *, 1631 struct packet_type *,
1632 struct net_device *); 1632 struct net_device *);
1633 bool (*id_match)(struct packet_type *ptype, 1633 bool (*id_match)(struct packet_type *ptype,
1634 struct sock *sk); 1634 struct sock *sk);
1635 void *af_packet_priv; 1635 void *af_packet_priv;
1636 struct list_head list; 1636 struct list_head list;
1637 }; 1637 };
1638 1638
1639 struct offload_callbacks { 1639 struct offload_callbacks {
1640 struct sk_buff *(*gso_segment)(struct sk_buff *skb, 1640 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
1641 netdev_features_t features); 1641 netdev_features_t features);
1642 int (*gso_send_check)(struct sk_buff *skb); 1642 int (*gso_send_check)(struct sk_buff *skb);
1643 struct sk_buff **(*gro_receive)(struct sk_buff **head, 1643 struct sk_buff **(*gro_receive)(struct sk_buff **head,
1644 struct sk_buff *skb); 1644 struct sk_buff *skb);
1645 int (*gro_complete)(struct sk_buff *skb); 1645 int (*gro_complete)(struct sk_buff *skb);
1646 }; 1646 };
1647 1647
1648 struct packet_offload { 1648 struct packet_offload {
1649 __be16 type; /* This is really htons(ether_type). */ 1649 __be16 type; /* This is really htons(ether_type). */
1650 struct offload_callbacks callbacks; 1650 struct offload_callbacks callbacks;
1651 struct list_head list; 1651 struct list_head list;
1652 }; 1652 };
1653 1653
1654 #include <linux/notifier.h> 1654 #include <linux/notifier.h>
1655 1655
1656 /* netdevice notifier chain. Please remember to update the rtnetlink 1656 /* netdevice notifier chain. Please remember to update the rtnetlink
1657 * notification exclusion list in rtnetlink_event() when adding new 1657 * notification exclusion list in rtnetlink_event() when adding new
1658 * types. 1658 * types.
1659 */ 1659 */
1660 #define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */ 1660 #define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */
1661 #define NETDEV_DOWN 0x0002 1661 #define NETDEV_DOWN 0x0002
1662 #define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface 1662 #define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface
1663 detected a hardware crash and restarted 1663 detected a hardware crash and restarted
1664 - we can use this eg to kick tcp sessions 1664 - we can use this eg to kick tcp sessions
1665 once done */ 1665 once done */
1666 #define NETDEV_CHANGE 0x0004 /* Notify device state change */ 1666 #define NETDEV_CHANGE 0x0004 /* Notify device state change */
1667 #define NETDEV_REGISTER 0x0005 1667 #define NETDEV_REGISTER 0x0005
1668 #define NETDEV_UNREGISTER 0x0006 1668 #define NETDEV_UNREGISTER 0x0006
1669 #define NETDEV_CHANGEMTU 0x0007 1669 #define NETDEV_CHANGEMTU 0x0007
1670 #define NETDEV_CHANGEADDR 0x0008 1670 #define NETDEV_CHANGEADDR 0x0008
1671 #define NETDEV_GOING_DOWN 0x0009 1671 #define NETDEV_GOING_DOWN 0x0009
1672 #define NETDEV_CHANGENAME 0x000A 1672 #define NETDEV_CHANGENAME 0x000A
1673 #define NETDEV_FEAT_CHANGE 0x000B 1673 #define NETDEV_FEAT_CHANGE 0x000B
1674 #define NETDEV_BONDING_FAILOVER 0x000C 1674 #define NETDEV_BONDING_FAILOVER 0x000C
1675 #define NETDEV_PRE_UP 0x000D 1675 #define NETDEV_PRE_UP 0x000D
1676 #define NETDEV_PRE_TYPE_CHANGE 0x000E 1676 #define NETDEV_PRE_TYPE_CHANGE 0x000E
1677 #define NETDEV_POST_TYPE_CHANGE 0x000F 1677 #define NETDEV_POST_TYPE_CHANGE 0x000F
1678 #define NETDEV_POST_INIT 0x0010 1678 #define NETDEV_POST_INIT 0x0010
1679 #define NETDEV_UNREGISTER_FINAL 0x0011 1679 #define NETDEV_UNREGISTER_FINAL 0x0011
1680 #define NETDEV_RELEASE 0x0012 1680 #define NETDEV_RELEASE 0x0012
1681 #define NETDEV_NOTIFY_PEERS 0x0013 1681 #define NETDEV_NOTIFY_PEERS 0x0013
1682 #define NETDEV_JOIN 0x0014 1682 #define NETDEV_JOIN 0x0014
1683 #define NETDEV_CHANGEUPPER 0x0015 1683 #define NETDEV_CHANGEUPPER 0x0015
1684 #define NETDEV_RESEND_IGMP 0x0016 1684 #define NETDEV_RESEND_IGMP 0x0016
1685 1685
1686 extern int register_netdevice_notifier(struct notifier_block *nb); 1686 int register_netdevice_notifier(struct notifier_block *nb);
1687 extern int unregister_netdevice_notifier(struct notifier_block *nb); 1687 int unregister_netdevice_notifier(struct notifier_block *nb);
1688 1688
1689 struct netdev_notifier_info { 1689 struct netdev_notifier_info {
1690 struct net_device *dev; 1690 struct net_device *dev;
1691 }; 1691 };
1692 1692
1693 struct netdev_notifier_change_info { 1693 struct netdev_notifier_change_info {
1694 struct netdev_notifier_info info; /* must be first */ 1694 struct netdev_notifier_info info; /* must be first */
1695 unsigned int flags_changed; 1695 unsigned int flags_changed;
1696 }; 1696 };
1697 1697
1698 static inline void netdev_notifier_info_init(struct netdev_notifier_info *info, 1698 static inline void netdev_notifier_info_init(struct netdev_notifier_info *info,
1699 struct net_device *dev) 1699 struct net_device *dev)
1700 { 1700 {
1701 info->dev = dev; 1701 info->dev = dev;
1702 } 1702 }
1703 1703
1704 static inline struct net_device * 1704 static inline struct net_device *
1705 netdev_notifier_info_to_dev(const struct netdev_notifier_info *info) 1705 netdev_notifier_info_to_dev(const struct netdev_notifier_info *info)
1706 { 1706 {
1707 return info->dev; 1707 return info->dev;
1708 } 1708 }
1709 1709
1710 extern int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev, 1710 int call_netdevice_notifiers_info(unsigned long val, struct net_device *dev,
1711 struct netdev_notifier_info *info); 1711 struct netdev_notifier_info *info);
1712 extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev); 1712 int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
1713 1713
1714 1714
1715 extern rwlock_t dev_base_lock; /* Device list lock */ 1715 extern rwlock_t dev_base_lock; /* Device list lock */
1716 1716
1717 #define for_each_netdev(net, d) \ 1717 #define for_each_netdev(net, d) \
1718 list_for_each_entry(d, &(net)->dev_base_head, dev_list) 1718 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
1719 #define for_each_netdev_reverse(net, d) \ 1719 #define for_each_netdev_reverse(net, d) \
1720 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list) 1720 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
1721 #define for_each_netdev_rcu(net, d) \ 1721 #define for_each_netdev_rcu(net, d) \
1722 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list) 1722 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
1723 #define for_each_netdev_safe(net, d, n) \ 1723 #define for_each_netdev_safe(net, d, n) \
1724 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list) 1724 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
1725 #define for_each_netdev_continue(net, d) \ 1725 #define for_each_netdev_continue(net, d) \
1726 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list) 1726 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
1727 #define for_each_netdev_continue_rcu(net, d) \ 1727 #define for_each_netdev_continue_rcu(net, d) \
1728 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list) 1728 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
1729 #define for_each_netdev_in_bond_rcu(bond, slave) \ 1729 #define for_each_netdev_in_bond_rcu(bond, slave) \
1730 for_each_netdev_rcu(&init_net, slave) \ 1730 for_each_netdev_rcu(&init_net, slave) \
1731 if (netdev_master_upper_dev_get_rcu(slave) == bond) 1731 if (netdev_master_upper_dev_get_rcu(slave) == bond)
1732 #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) 1732 #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
1733 1733
1734 static inline struct net_device *next_net_device(struct net_device *dev) 1734 static inline struct net_device *next_net_device(struct net_device *dev)
1735 { 1735 {
1736 struct list_head *lh; 1736 struct list_head *lh;
1737 struct net *net; 1737 struct net *net;
1738 1738
1739 net = dev_net(dev); 1739 net = dev_net(dev);
1740 lh = dev->dev_list.next; 1740 lh = dev->dev_list.next;
1741 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); 1741 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1742 } 1742 }
1743 1743
1744 static inline struct net_device *next_net_device_rcu(struct net_device *dev) 1744 static inline struct net_device *next_net_device_rcu(struct net_device *dev)
1745 { 1745 {
1746 struct list_head *lh; 1746 struct list_head *lh;
1747 struct net *net; 1747 struct net *net;
1748 1748
1749 net = dev_net(dev); 1749 net = dev_net(dev);
1750 lh = rcu_dereference(list_next_rcu(&dev->dev_list)); 1750 lh = rcu_dereference(list_next_rcu(&dev->dev_list));
1751 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); 1751 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1752 } 1752 }
1753 1753
1754 static inline struct net_device *first_net_device(struct net *net) 1754 static inline struct net_device *first_net_device(struct net *net)
1755 { 1755 {
1756 return list_empty(&net->dev_base_head) ? NULL : 1756 return list_empty(&net->dev_base_head) ? NULL :
1757 net_device_entry(net->dev_base_head.next); 1757 net_device_entry(net->dev_base_head.next);
1758 } 1758 }
1759 1759
1760 static inline struct net_device *first_net_device_rcu(struct net *net) 1760 static inline struct net_device *first_net_device_rcu(struct net *net)
1761 { 1761 {
1762 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head)); 1762 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
1763 1763
1764 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); 1764 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1765 } 1765 }
1766 1766
1767 extern int netdev_boot_setup_check(struct net_device *dev); 1767 int netdev_boot_setup_check(struct net_device *dev);
1768 extern unsigned long netdev_boot_base(const char *prefix, int unit); 1768 unsigned long netdev_boot_base(const char *prefix, int unit);
1769 extern struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, 1769 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
1770 const char *hwaddr); 1770 const char *hwaddr);
1771 extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type); 1771 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
1772 extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type); 1772 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
1773 extern void dev_add_pack(struct packet_type *pt); 1773 void dev_add_pack(struct packet_type *pt);
1774 extern void dev_remove_pack(struct packet_type *pt); 1774 void dev_remove_pack(struct packet_type *pt);
1775 extern void __dev_remove_pack(struct packet_type *pt); 1775 void __dev_remove_pack(struct packet_type *pt);
1776 extern void dev_add_offload(struct packet_offload *po); 1776 void dev_add_offload(struct packet_offload *po);
1777 extern void dev_remove_offload(struct packet_offload *po); 1777 void dev_remove_offload(struct packet_offload *po);
1778 extern void __dev_remove_offload(struct packet_offload *po); 1778 void __dev_remove_offload(struct packet_offload *po);
1779 1779
1780 extern struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags, 1780 struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
1781 unsigned short mask); 1781 unsigned short mask);
1782 extern struct net_device *dev_get_by_name(struct net *net, const char *name); 1782 struct net_device *dev_get_by_name(struct net *net, const char *name);
1783 extern struct net_device *dev_get_by_name_rcu(struct net *net, const char *name); 1783 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
1784 extern struct net_device *__dev_get_by_name(struct net *net, const char *name); 1784 struct net_device *__dev_get_by_name(struct net *net, const char *name);
1785 extern int dev_alloc_name(struct net_device *dev, const char *name); 1785 int dev_alloc_name(struct net_device *dev, const char *name);
1786 extern int dev_open(struct net_device *dev); 1786 int dev_open(struct net_device *dev);
1787 extern int dev_close(struct net_device *dev); 1787 int dev_close(struct net_device *dev);
1788 extern void dev_disable_lro(struct net_device *dev); 1788 void dev_disable_lro(struct net_device *dev);
1789 extern int dev_loopback_xmit(struct sk_buff *newskb); 1789 int dev_loopback_xmit(struct sk_buff *newskb);
1790 extern int dev_queue_xmit(struct sk_buff *skb); 1790 int dev_queue_xmit(struct sk_buff *skb);
1791 extern int register_netdevice(struct net_device *dev); 1791 int register_netdevice(struct net_device *dev);
1792 extern void unregister_netdevice_queue(struct net_device *dev, 1792 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
1793 struct list_head *head); 1793 void unregister_netdevice_many(struct list_head *head);
1794 extern void unregister_netdevice_many(struct list_head *head);
1795 static inline void unregister_netdevice(struct net_device *dev) 1794 static inline void unregister_netdevice(struct net_device *dev)
1796 { 1795 {
1797 unregister_netdevice_queue(dev, NULL); 1796 unregister_netdevice_queue(dev, NULL);
1798 } 1797 }
1799 1798
1800 extern int netdev_refcnt_read(const struct net_device *dev); 1799 int netdev_refcnt_read(const struct net_device *dev);
1801 extern void free_netdev(struct net_device *dev); 1800 void free_netdev(struct net_device *dev);
1802 extern void synchronize_net(void); 1801 void synchronize_net(void);
1803 extern int init_dummy_netdev(struct net_device *dev); 1802 int init_dummy_netdev(struct net_device *dev);
1804 1803
1805 extern struct net_device *dev_get_by_index(struct net *net, int ifindex); 1804 struct net_device *dev_get_by_index(struct net *net, int ifindex);
1806 extern struct net_device *__dev_get_by_index(struct net *net, int ifindex); 1805 struct net_device *__dev_get_by_index(struct net *net, int ifindex);
1807 extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); 1806 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
1808 extern int netdev_get_name(struct net *net, char *name, int ifindex); 1807 int netdev_get_name(struct net *net, char *name, int ifindex);
1809 extern int dev_restart(struct net_device *dev); 1808 int dev_restart(struct net_device *dev);
1810 #ifdef CONFIG_NETPOLL_TRAP 1809 #ifdef CONFIG_NETPOLL_TRAP
1811 extern int netpoll_trap(void); 1810 int netpoll_trap(void);
1812 #endif 1811 #endif
1813 extern int skb_gro_receive(struct sk_buff **head, 1812 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb);
1814 struct sk_buff *skb);
1815 1813
1816 static inline unsigned int skb_gro_offset(const struct sk_buff *skb) 1814 static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
1817 { 1815 {
1818 return NAPI_GRO_CB(skb)->data_offset; 1816 return NAPI_GRO_CB(skb)->data_offset;
1819 } 1817 }
1820 1818
1821 static inline unsigned int skb_gro_len(const struct sk_buff *skb) 1819 static inline unsigned int skb_gro_len(const struct sk_buff *skb)
1822 { 1820 {
1823 return skb->len - NAPI_GRO_CB(skb)->data_offset; 1821 return skb->len - NAPI_GRO_CB(skb)->data_offset;
1824 } 1822 }
1825 1823
1826 static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len) 1824 static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
1827 { 1825 {
1828 NAPI_GRO_CB(skb)->data_offset += len; 1826 NAPI_GRO_CB(skb)->data_offset += len;
1829 } 1827 }
1830 1828
1831 static inline void *skb_gro_header_fast(struct sk_buff *skb, 1829 static inline void *skb_gro_header_fast(struct sk_buff *skb,
1832 unsigned int offset) 1830 unsigned int offset)
1833 { 1831 {
1834 return NAPI_GRO_CB(skb)->frag0 + offset; 1832 return NAPI_GRO_CB(skb)->frag0 + offset;
1835 } 1833 }
1836 1834
1837 static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen) 1835 static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
1838 { 1836 {
1839 return NAPI_GRO_CB(skb)->frag0_len < hlen; 1837 return NAPI_GRO_CB(skb)->frag0_len < hlen;
1840 } 1838 }
1841 1839
1842 static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen, 1840 static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
1843 unsigned int offset) 1841 unsigned int offset)
1844 { 1842 {
1845 if (!pskb_may_pull(skb, hlen)) 1843 if (!pskb_may_pull(skb, hlen))
1846 return NULL; 1844 return NULL;
1847 1845
1848 NAPI_GRO_CB(skb)->frag0 = NULL; 1846 NAPI_GRO_CB(skb)->frag0 = NULL;
1849 NAPI_GRO_CB(skb)->frag0_len = 0; 1847 NAPI_GRO_CB(skb)->frag0_len = 0;
1850 return skb->data + offset; 1848 return skb->data + offset;
1851 } 1849 }
1852 1850
1853 static inline void *skb_gro_mac_header(struct sk_buff *skb) 1851 static inline void *skb_gro_mac_header(struct sk_buff *skb)
1854 { 1852 {
1855 return NAPI_GRO_CB(skb)->frag0 ?: skb_mac_header(skb); 1853 return NAPI_GRO_CB(skb)->frag0 ?: skb_mac_header(skb);
1856 } 1854 }
1857 1855
1858 static inline void *skb_gro_network_header(struct sk_buff *skb) 1856 static inline void *skb_gro_network_header(struct sk_buff *skb)
1859 { 1857 {
1860 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) + 1858 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
1861 skb_network_offset(skb); 1859 skb_network_offset(skb);
1862 } 1860 }
1863 1861
1864 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, 1862 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
1865 unsigned short type, 1863 unsigned short type,
1866 const void *daddr, const void *saddr, 1864 const void *daddr, const void *saddr,
1867 unsigned int len) 1865 unsigned int len)
1868 { 1866 {
1869 if (!dev->header_ops || !dev->header_ops->create) 1867 if (!dev->header_ops || !dev->header_ops->create)
1870 return 0; 1868 return 0;
1871 1869
1872 return dev->header_ops->create(skb, dev, type, daddr, saddr, len); 1870 return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
1873 } 1871 }
1874 1872
1875 static inline int dev_parse_header(const struct sk_buff *skb, 1873 static inline int dev_parse_header(const struct sk_buff *skb,
1876 unsigned char *haddr) 1874 unsigned char *haddr)
1877 { 1875 {
1878 const struct net_device *dev = skb->dev; 1876 const struct net_device *dev = skb->dev;
1879 1877
1880 if (!dev->header_ops || !dev->header_ops->parse) 1878 if (!dev->header_ops || !dev->header_ops->parse)
1881 return 0; 1879 return 0;
1882 return dev->header_ops->parse(skb, haddr); 1880 return dev->header_ops->parse(skb, haddr);
1883 } 1881 }
1884 1882
1885 typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len); 1883 typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
1886 extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf); 1884 int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
1887 static inline int unregister_gifconf(unsigned int family) 1885 static inline int unregister_gifconf(unsigned int family)
1888 { 1886 {
1889 return register_gifconf(family, NULL); 1887 return register_gifconf(family, NULL);
1890 } 1888 }
1891 1889
1892 #ifdef CONFIG_NET_FLOW_LIMIT 1890 #ifdef CONFIG_NET_FLOW_LIMIT
1893 #define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */ 1891 #define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */
1894 struct sd_flow_limit { 1892 struct sd_flow_limit {
1895 u64 count; 1893 u64 count;
1896 unsigned int num_buckets; 1894 unsigned int num_buckets;
1897 unsigned int history_head; 1895 unsigned int history_head;
1898 u16 history[FLOW_LIMIT_HISTORY]; 1896 u16 history[FLOW_LIMIT_HISTORY];
1899 u8 buckets[]; 1897 u8 buckets[];
1900 }; 1898 };
1901 1899
1902 extern int netdev_flow_limit_table_len; 1900 extern int netdev_flow_limit_table_len;
1903 #endif /* CONFIG_NET_FLOW_LIMIT */ 1901 #endif /* CONFIG_NET_FLOW_LIMIT */
1904 1902
1905 /* 1903 /*
1906 * Incoming packets are placed on per-cpu queues 1904 * Incoming packets are placed on per-cpu queues
1907 */ 1905 */
1908 struct softnet_data { 1906 struct softnet_data {
1909 struct Qdisc *output_queue; 1907 struct Qdisc *output_queue;
1910 struct Qdisc **output_queue_tailp; 1908 struct Qdisc **output_queue_tailp;
1911 struct list_head poll_list; 1909 struct list_head poll_list;
1912 struct sk_buff *completion_queue; 1910 struct sk_buff *completion_queue;
1913 struct sk_buff_head process_queue; 1911 struct sk_buff_head process_queue;
1914 1912
1915 /* stats */ 1913 /* stats */
1916 unsigned int processed; 1914 unsigned int processed;
1917 unsigned int time_squeeze; 1915 unsigned int time_squeeze;
1918 unsigned int cpu_collision; 1916 unsigned int cpu_collision;
1919 unsigned int received_rps; 1917 unsigned int received_rps;
1920 1918
1921 #ifdef CONFIG_RPS 1919 #ifdef CONFIG_RPS
1922 struct softnet_data *rps_ipi_list; 1920 struct softnet_data *rps_ipi_list;
1923 1921
1924 /* Elements below can be accessed between CPUs for RPS */ 1922 /* Elements below can be accessed between CPUs for RPS */
1925 struct call_single_data csd ____cacheline_aligned_in_smp; 1923 struct call_single_data csd ____cacheline_aligned_in_smp;
1926 struct softnet_data *rps_ipi_next; 1924 struct softnet_data *rps_ipi_next;
1927 unsigned int cpu; 1925 unsigned int cpu;
1928 unsigned int input_queue_head; 1926 unsigned int input_queue_head;
1929 unsigned int input_queue_tail; 1927 unsigned int input_queue_tail;
1930 #endif 1928 #endif
1931 unsigned int dropped; 1929 unsigned int dropped;
1932 struct sk_buff_head input_pkt_queue; 1930 struct sk_buff_head input_pkt_queue;
1933 struct napi_struct backlog; 1931 struct napi_struct backlog;
1934 1932
1935 #ifdef CONFIG_NET_FLOW_LIMIT 1933 #ifdef CONFIG_NET_FLOW_LIMIT
1936 struct sd_flow_limit __rcu *flow_limit; 1934 struct sd_flow_limit __rcu *flow_limit;
1937 #endif 1935 #endif
1938 }; 1936 };
1939 1937
1940 static inline void input_queue_head_incr(struct softnet_data *sd) 1938 static inline void input_queue_head_incr(struct softnet_data *sd)
1941 { 1939 {
1942 #ifdef CONFIG_RPS 1940 #ifdef CONFIG_RPS
1943 sd->input_queue_head++; 1941 sd->input_queue_head++;
1944 #endif 1942 #endif
1945 } 1943 }
1946 1944
1947 static inline void input_queue_tail_incr_save(struct softnet_data *sd, 1945 static inline void input_queue_tail_incr_save(struct softnet_data *sd,
1948 unsigned int *qtail) 1946 unsigned int *qtail)
1949 { 1947 {
1950 #ifdef CONFIG_RPS 1948 #ifdef CONFIG_RPS
1951 *qtail = ++sd->input_queue_tail; 1949 *qtail = ++sd->input_queue_tail;
1952 #endif 1950 #endif
1953 } 1951 }
1954 1952
1955 DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); 1953 DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
1956 1954
1957 extern void __netif_schedule(struct Qdisc *q); 1955 void __netif_schedule(struct Qdisc *q);
1958 1956
1959 static inline void netif_schedule_queue(struct netdev_queue *txq) 1957 static inline void netif_schedule_queue(struct netdev_queue *txq)
1960 { 1958 {
1961 if (!(txq->state & QUEUE_STATE_ANY_XOFF)) 1959 if (!(txq->state & QUEUE_STATE_ANY_XOFF))
1962 __netif_schedule(txq->qdisc); 1960 __netif_schedule(txq->qdisc);
1963 } 1961 }
1964 1962
1965 static inline void netif_tx_schedule_all(struct net_device *dev) 1963 static inline void netif_tx_schedule_all(struct net_device *dev)
1966 { 1964 {
1967 unsigned int i; 1965 unsigned int i;
1968 1966
1969 for (i = 0; i < dev->num_tx_queues; i++) 1967 for (i = 0; i < dev->num_tx_queues; i++)
1970 netif_schedule_queue(netdev_get_tx_queue(dev, i)); 1968 netif_schedule_queue(netdev_get_tx_queue(dev, i));
1971 } 1969 }
1972 1970
1973 static inline void netif_tx_start_queue(struct netdev_queue *dev_queue) 1971 static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
1974 { 1972 {
1975 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); 1973 clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
1976 } 1974 }
1977 1975
1978 /** 1976 /**
1979 * netif_start_queue - allow transmit 1977 * netif_start_queue - allow transmit
1980 * @dev: network device 1978 * @dev: network device
1981 * 1979 *
1982 * Allow upper layers to call the device hard_start_xmit routine. 1980 * Allow upper layers to call the device hard_start_xmit routine.
1983 */ 1981 */
1984 static inline void netif_start_queue(struct net_device *dev) 1982 static inline void netif_start_queue(struct net_device *dev)
1985 { 1983 {
1986 netif_tx_start_queue(netdev_get_tx_queue(dev, 0)); 1984 netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
1987 } 1985 }
1988 1986
1989 static inline void netif_tx_start_all_queues(struct net_device *dev) 1987 static inline void netif_tx_start_all_queues(struct net_device *dev)
1990 { 1988 {
1991 unsigned int i; 1989 unsigned int i;
1992 1990
1993 for (i = 0; i < dev->num_tx_queues; i++) { 1991 for (i = 0; i < dev->num_tx_queues; i++) {
1994 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 1992 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1995 netif_tx_start_queue(txq); 1993 netif_tx_start_queue(txq);
1996 } 1994 }
1997 } 1995 }
1998 1996
1999 static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue) 1997 static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
2000 { 1998 {
2001 #ifdef CONFIG_NETPOLL_TRAP 1999 #ifdef CONFIG_NETPOLL_TRAP
2002 if (netpoll_trap()) { 2000 if (netpoll_trap()) {
2003 netif_tx_start_queue(dev_queue); 2001 netif_tx_start_queue(dev_queue);
2004 return; 2002 return;
2005 } 2003 }
2006 #endif 2004 #endif
2007 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) 2005 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state))
2008 __netif_schedule(dev_queue->qdisc); 2006 __netif_schedule(dev_queue->qdisc);
2009 } 2007 }
2010 2008
2011 /** 2009 /**
2012 * netif_wake_queue - restart transmit 2010 * netif_wake_queue - restart transmit
2013 * @dev: network device 2011 * @dev: network device
2014 * 2012 *
2015 * Allow upper layers to call the device hard_start_xmit routine. 2013 * Allow upper layers to call the device hard_start_xmit routine.
2016 * Used for flow control when transmit resources are available. 2014 * Used for flow control when transmit resources are available.
2017 */ 2015 */
2018 static inline void netif_wake_queue(struct net_device *dev) 2016 static inline void netif_wake_queue(struct net_device *dev)
2019 { 2017 {
2020 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0)); 2018 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
2021 } 2019 }
2022 2020
2023 static inline void netif_tx_wake_all_queues(struct net_device *dev) 2021 static inline void netif_tx_wake_all_queues(struct net_device *dev)
2024 { 2022 {
2025 unsigned int i; 2023 unsigned int i;
2026 2024
2027 for (i = 0; i < dev->num_tx_queues; i++) { 2025 for (i = 0; i < dev->num_tx_queues; i++) {
2028 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 2026 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2029 netif_tx_wake_queue(txq); 2027 netif_tx_wake_queue(txq);
2030 } 2028 }
2031 } 2029 }
2032 2030
2033 static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) 2031 static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
2034 { 2032 {
2035 if (WARN_ON(!dev_queue)) { 2033 if (WARN_ON(!dev_queue)) {
2036 pr_info("netif_stop_queue() cannot be called before register_netdev()\n"); 2034 pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
2037 return; 2035 return;
2038 } 2036 }
2039 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); 2037 set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
2040 } 2038 }
2041 2039
2042 /** 2040 /**
2043 * netif_stop_queue - stop transmitted packets 2041 * netif_stop_queue - stop transmitted packets
2044 * @dev: network device 2042 * @dev: network device
2045 * 2043 *
2046 * Stop upper layers calling the device hard_start_xmit routine. 2044 * Stop upper layers calling the device hard_start_xmit routine.
2047 * Used for flow control when transmit resources are unavailable. 2045 * Used for flow control when transmit resources are unavailable.
2048 */ 2046 */
2049 static inline void netif_stop_queue(struct net_device *dev) 2047 static inline void netif_stop_queue(struct net_device *dev)
2050 { 2048 {
2051 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0)); 2049 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
2052 } 2050 }
2053 2051
2054 static inline void netif_tx_stop_all_queues(struct net_device *dev) 2052 static inline void netif_tx_stop_all_queues(struct net_device *dev)
2055 { 2053 {
2056 unsigned int i; 2054 unsigned int i;
2057 2055
2058 for (i = 0; i < dev->num_tx_queues; i++) { 2056 for (i = 0; i < dev->num_tx_queues; i++) {
2059 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 2057 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2060 netif_tx_stop_queue(txq); 2058 netif_tx_stop_queue(txq);
2061 } 2059 }
2062 } 2060 }
2063 2061
2064 static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue) 2062 static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
2065 { 2063 {
2066 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state); 2064 return test_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
2067 } 2065 }
2068 2066
2069 /** 2067 /**
2070 * netif_queue_stopped - test if transmit queue is flowblocked 2068 * netif_queue_stopped - test if transmit queue is flowblocked
2071 * @dev: network device 2069 * @dev: network device
2072 * 2070 *
2073 * Test if transmit queue on device is currently unable to send. 2071 * Test if transmit queue on device is currently unable to send.
2074 */ 2072 */
2075 static inline bool netif_queue_stopped(const struct net_device *dev) 2073 static inline bool netif_queue_stopped(const struct net_device *dev)
2076 { 2074 {
2077 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); 2075 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
2078 } 2076 }
2079 2077
2080 static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue) 2078 static inline bool netif_xmit_stopped(const struct netdev_queue *dev_queue)
2081 { 2079 {
2082 return dev_queue->state & QUEUE_STATE_ANY_XOFF; 2080 return dev_queue->state & QUEUE_STATE_ANY_XOFF;
2083 } 2081 }
2084 2082
2085 static inline bool netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue) 2083 static inline bool netif_xmit_frozen_or_stopped(const struct netdev_queue *dev_queue)
2086 { 2084 {
2087 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN; 2085 return dev_queue->state & QUEUE_STATE_ANY_XOFF_OR_FROZEN;
2088 } 2086 }
2089 2087
2090 static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue, 2088 static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
2091 unsigned int bytes) 2089 unsigned int bytes)
2092 { 2090 {
2093 #ifdef CONFIG_BQL 2091 #ifdef CONFIG_BQL
2094 dql_queued(&dev_queue->dql, bytes); 2092 dql_queued(&dev_queue->dql, bytes);
2095 2093
2096 if (likely(dql_avail(&dev_queue->dql) >= 0)) 2094 if (likely(dql_avail(&dev_queue->dql) >= 0))
2097 return; 2095 return;
2098 2096
2099 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); 2097 set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
2100 2098
2101 /* 2099 /*
2102 * The XOFF flag must be set before checking the dql_avail below, 2100 * The XOFF flag must be set before checking the dql_avail below,
2103 * because in netdev_tx_completed_queue we update the dql_completed 2101 * because in netdev_tx_completed_queue we update the dql_completed
2104 * before checking the XOFF flag. 2102 * before checking the XOFF flag.
2105 */ 2103 */
2106 smp_mb(); 2104 smp_mb();
2107 2105
2108 /* check again in case another CPU has just made room avail */ 2106 /* check again in case another CPU has just made room avail */
2109 if (unlikely(dql_avail(&dev_queue->dql) >= 0)) 2107 if (unlikely(dql_avail(&dev_queue->dql) >= 0))
2110 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state); 2108 clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
2111 #endif 2109 #endif
2112 } 2110 }
2113 2111
2114 /** 2112 /**
2115 * netdev_sent_queue - report the number of bytes queued to hardware 2113 * netdev_sent_queue - report the number of bytes queued to hardware
2116 * @dev: network device 2114 * @dev: network device
2117 * @bytes: number of bytes queued to the hardware device queue 2115 * @bytes: number of bytes queued to the hardware device queue
2118 * 2116 *
2119 * Report the number of bytes queued for sending/completion to the network 2117 * Report the number of bytes queued for sending/completion to the network
2120 * device hardware queue. @bytes should be a good approximation and should 2118 * device hardware queue. @bytes should be a good approximation and should
2121 * exactly match netdev_completed_queue() @bytes 2119 * exactly match netdev_completed_queue() @bytes
2122 */ 2120 */
2123 static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes) 2121 static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
2124 { 2122 {
2125 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes); 2123 netdev_tx_sent_queue(netdev_get_tx_queue(dev, 0), bytes);
2126 } 2124 }
2127 2125
2128 static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, 2126 static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
2129 unsigned int pkts, unsigned int bytes) 2127 unsigned int pkts, unsigned int bytes)
2130 { 2128 {
2131 #ifdef CONFIG_BQL 2129 #ifdef CONFIG_BQL
2132 if (unlikely(!bytes)) 2130 if (unlikely(!bytes))
2133 return; 2131 return;
2134 2132
2135 dql_completed(&dev_queue->dql, bytes); 2133 dql_completed(&dev_queue->dql, bytes);
2136 2134
2137 /* 2135 /*
2138 * Without the memory barrier there is a small possiblity that 2136 * Without the memory barrier there is a small possiblity that
2139 * netdev_tx_sent_queue will miss the update and cause the queue to 2137 * netdev_tx_sent_queue will miss the update and cause the queue to
2140 * be stopped forever 2138 * be stopped forever
2141 */ 2139 */
2142 smp_mb(); 2140 smp_mb();
2143 2141
2144 if (dql_avail(&dev_queue->dql) < 0) 2142 if (dql_avail(&dev_queue->dql) < 0)
2145 return; 2143 return;
2146 2144
2147 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state)) 2145 if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
2148 netif_schedule_queue(dev_queue); 2146 netif_schedule_queue(dev_queue);
2149 #endif 2147 #endif
2150 } 2148 }
2151 2149
2152 /** 2150 /**
2153 * netdev_completed_queue - report bytes and packets completed by device 2151 * netdev_completed_queue - report bytes and packets completed by device
2154 * @dev: network device 2152 * @dev: network device
2155 * @pkts: actual number of packets sent over the medium 2153 * @pkts: actual number of packets sent over the medium
2156 * @bytes: actual number of bytes sent over the medium 2154 * @bytes: actual number of bytes sent over the medium
2157 * 2155 *
2158 * Report the number of bytes and packets transmitted by the network device 2156 * Report the number of bytes and packets transmitted by the network device
2159 * hardware queue over the physical medium, @bytes must exactly match the 2157 * hardware queue over the physical medium, @bytes must exactly match the
2160 * @bytes amount passed to netdev_sent_queue() 2158 * @bytes amount passed to netdev_sent_queue()
2161 */ 2159 */
2162 static inline void netdev_completed_queue(struct net_device *dev, 2160 static inline void netdev_completed_queue(struct net_device *dev,
2163 unsigned int pkts, unsigned int bytes) 2161 unsigned int pkts, unsigned int bytes)
2164 { 2162 {
2165 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes); 2163 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
2166 } 2164 }
2167 2165
2168 static inline void netdev_tx_reset_queue(struct netdev_queue *q) 2166 static inline void netdev_tx_reset_queue(struct netdev_queue *q)
2169 { 2167 {
2170 #ifdef CONFIG_BQL 2168 #ifdef CONFIG_BQL
2171 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state); 2169 clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
2172 dql_reset(&q->dql); 2170 dql_reset(&q->dql);
2173 #endif 2171 #endif
2174 } 2172 }
2175 2173
2176 /** 2174 /**
2177 * netdev_reset_queue - reset the packets and bytes count of a network device 2175 * netdev_reset_queue - reset the packets and bytes count of a network device
2178 * @dev_queue: network device 2176 * @dev_queue: network device
2179 * 2177 *
2180 * Reset the bytes and packet count of a network device and clear the 2178 * Reset the bytes and packet count of a network device and clear the
2181 * software flow control OFF bit for this network device 2179 * software flow control OFF bit for this network device
2182 */ 2180 */
2183 static inline void netdev_reset_queue(struct net_device *dev_queue) 2181 static inline void netdev_reset_queue(struct net_device *dev_queue)
2184 { 2182 {
2185 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0)); 2183 netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
2186 } 2184 }
2187 2185
2188 /** 2186 /**
2189 * netif_running - test if up 2187 * netif_running - test if up
2190 * @dev: network device 2188 * @dev: network device
2191 * 2189 *
2192 * Test if the device has been brought up. 2190 * Test if the device has been brought up.
2193 */ 2191 */
2194 static inline bool netif_running(const struct net_device *dev) 2192 static inline bool netif_running(const struct net_device *dev)
2195 { 2193 {
2196 return test_bit(__LINK_STATE_START, &dev->state); 2194 return test_bit(__LINK_STATE_START, &dev->state);
2197 } 2195 }
2198 2196
2199 /* 2197 /*
2200 * Routines to manage the subqueues on a device. We only need start 2198 * Routines to manage the subqueues on a device. We only need start
2201 * stop, and a check if it's stopped. All other device management is 2199 * stop, and a check if it's stopped. All other device management is
2202 * done at the overall netdevice level. 2200 * done at the overall netdevice level.
2203 * Also test the device if we're multiqueue. 2201 * Also test the device if we're multiqueue.
2204 */ 2202 */
2205 2203
2206 /** 2204 /**
2207 * netif_start_subqueue - allow sending packets on subqueue 2205 * netif_start_subqueue - allow sending packets on subqueue
2208 * @dev: network device 2206 * @dev: network device
2209 * @queue_index: sub queue index 2207 * @queue_index: sub queue index
2210 * 2208 *
2211 * Start individual transmit queue of a device with multiple transmit queues. 2209 * Start individual transmit queue of a device with multiple transmit queues.
2212 */ 2210 */
2213 static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) 2211 static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
2214 { 2212 {
2215 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 2213 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2216 2214
2217 netif_tx_start_queue(txq); 2215 netif_tx_start_queue(txq);
2218 } 2216 }
2219 2217
2220 /** 2218 /**
2221 * netif_stop_subqueue - stop sending packets on subqueue 2219 * netif_stop_subqueue - stop sending packets on subqueue
2222 * @dev: network device 2220 * @dev: network device
2223 * @queue_index: sub queue index 2221 * @queue_index: sub queue index
2224 * 2222 *
2225 * Stop individual transmit queue of a device with multiple transmit queues. 2223 * Stop individual transmit queue of a device with multiple transmit queues.
2226 */ 2224 */
2227 static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) 2225 static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
2228 { 2226 {
2229 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 2227 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2230 #ifdef CONFIG_NETPOLL_TRAP 2228 #ifdef CONFIG_NETPOLL_TRAP
2231 if (netpoll_trap()) 2229 if (netpoll_trap())
2232 return; 2230 return;
2233 #endif 2231 #endif
2234 netif_tx_stop_queue(txq); 2232 netif_tx_stop_queue(txq);
2235 } 2233 }
2236 2234
2237 /** 2235 /**
2238 * netif_subqueue_stopped - test status of subqueue 2236 * netif_subqueue_stopped - test status of subqueue
2239 * @dev: network device 2237 * @dev: network device
2240 * @queue_index: sub queue index 2238 * @queue_index: sub queue index
2241 * 2239 *
2242 * Check individual transmit queue of a device with multiple transmit queues. 2240 * Check individual transmit queue of a device with multiple transmit queues.
2243 */ 2241 */
2244 static inline bool __netif_subqueue_stopped(const struct net_device *dev, 2242 static inline bool __netif_subqueue_stopped(const struct net_device *dev,
2245 u16 queue_index) 2243 u16 queue_index)
2246 { 2244 {
2247 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 2245 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2248 2246
2249 return netif_tx_queue_stopped(txq); 2247 return netif_tx_queue_stopped(txq);
2250 } 2248 }
2251 2249
2252 static inline bool netif_subqueue_stopped(const struct net_device *dev, 2250 static inline bool netif_subqueue_stopped(const struct net_device *dev,
2253 struct sk_buff *skb) 2251 struct sk_buff *skb)
2254 { 2252 {
2255 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb)); 2253 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
2256 } 2254 }
2257 2255
2258 /** 2256 /**
2259 * netif_wake_subqueue - allow sending packets on subqueue 2257 * netif_wake_subqueue - allow sending packets on subqueue
2260 * @dev: network device 2258 * @dev: network device
2261 * @queue_index: sub queue index 2259 * @queue_index: sub queue index
2262 * 2260 *
2263 * Resume individual transmit queue of a device with multiple transmit queues. 2261 * Resume individual transmit queue of a device with multiple transmit queues.
2264 */ 2262 */
2265 static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) 2263 static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
2266 { 2264 {
2267 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 2265 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2268 #ifdef CONFIG_NETPOLL_TRAP 2266 #ifdef CONFIG_NETPOLL_TRAP
2269 if (netpoll_trap()) 2267 if (netpoll_trap())
2270 return; 2268 return;
2271 #endif 2269 #endif
2272 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) 2270 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state))
2273 __netif_schedule(txq->qdisc); 2271 __netif_schedule(txq->qdisc);
2274 } 2272 }
2275 2273
2276 #ifdef CONFIG_XPS 2274 #ifdef CONFIG_XPS
2277 extern int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask, 2275 int netif_set_xps_queue(struct net_device *dev, struct cpumask *mask,
2278 u16 index); 2276 u16 index);
2279 #else 2277 #else
2280 static inline int netif_set_xps_queue(struct net_device *dev, 2278 static inline int netif_set_xps_queue(struct net_device *dev,
2281 struct cpumask *mask, 2279 struct cpumask *mask,
2282 u16 index) 2280 u16 index)
2283 { 2281 {
2284 return 0; 2282 return 0;
2285 } 2283 }
2286 #endif 2284 #endif
2287 2285
2288 /* 2286 /*
2289 * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used 2287 * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used
2290 * as a distribution range limit for the returned value. 2288 * as a distribution range limit for the returned value.
2291 */ 2289 */
2292 static inline u16 skb_tx_hash(const struct net_device *dev, 2290 static inline u16 skb_tx_hash(const struct net_device *dev,
2293 const struct sk_buff *skb) 2291 const struct sk_buff *skb)
2294 { 2292 {
2295 return __skb_tx_hash(dev, skb, dev->real_num_tx_queues); 2293 return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
2296 } 2294 }
2297 2295
2298 /** 2296 /**
2299 * netif_is_multiqueue - test if device has multiple transmit queues 2297 * netif_is_multiqueue - test if device has multiple transmit queues
2300 * @dev: network device 2298 * @dev: network device
2301 * 2299 *
2302 * Check if device has multiple transmit queues 2300 * Check if device has multiple transmit queues
2303 */ 2301 */
2304 static inline bool netif_is_multiqueue(const struct net_device *dev) 2302 static inline bool netif_is_multiqueue(const struct net_device *dev)
2305 { 2303 {
2306 return dev->num_tx_queues > 1; 2304 return dev->num_tx_queues > 1;
2307 } 2305 }
2308 2306
2309 extern int netif_set_real_num_tx_queues(struct net_device *dev, 2307 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
2310 unsigned int txq);
2311 2308
2312 #ifdef CONFIG_RPS 2309 #ifdef CONFIG_RPS
2313 extern int netif_set_real_num_rx_queues(struct net_device *dev, 2310 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
2314 unsigned int rxq);
2315 #else 2311 #else
2316 static inline int netif_set_real_num_rx_queues(struct net_device *dev, 2312 static inline int netif_set_real_num_rx_queues(struct net_device *dev,
2317 unsigned int rxq) 2313 unsigned int rxq)
2318 { 2314 {
2319 return 0; 2315 return 0;
2320 } 2316 }
2321 #endif 2317 #endif
2322 2318
2323 static inline int netif_copy_real_num_queues(struct net_device *to_dev, 2319 static inline int netif_copy_real_num_queues(struct net_device *to_dev,
2324 const struct net_device *from_dev) 2320 const struct net_device *from_dev)
2325 { 2321 {
2326 int err; 2322 int err;
2327 2323
2328 err = netif_set_real_num_tx_queues(to_dev, 2324 err = netif_set_real_num_tx_queues(to_dev,
2329 from_dev->real_num_tx_queues); 2325 from_dev->real_num_tx_queues);
2330 if (err) 2326 if (err)
2331 return err; 2327 return err;
2332 #ifdef CONFIG_RPS 2328 #ifdef CONFIG_RPS
2333 return netif_set_real_num_rx_queues(to_dev, 2329 return netif_set_real_num_rx_queues(to_dev,
2334 from_dev->real_num_rx_queues); 2330 from_dev->real_num_rx_queues);
2335 #else 2331 #else
2336 return 0; 2332 return 0;
2337 #endif 2333 #endif
2338 } 2334 }
2339 2335
2340 #define DEFAULT_MAX_NUM_RSS_QUEUES (8) 2336 #define DEFAULT_MAX_NUM_RSS_QUEUES (8)
2341 extern int netif_get_num_default_rss_queues(void); 2337 int netif_get_num_default_rss_queues(void);
2342 2338
2343 /* Use this variant when it is known for sure that it 2339 /* Use this variant when it is known for sure that it
2344 * is executing from hardware interrupt context or with hardware interrupts 2340 * is executing from hardware interrupt context or with hardware interrupts
2345 * disabled. 2341 * disabled.
2346 */ 2342 */
2347 extern void dev_kfree_skb_irq(struct sk_buff *skb); 2343 void dev_kfree_skb_irq(struct sk_buff *skb);
2348 2344
2349 /* Use this variant in places where it could be invoked 2345 /* Use this variant in places where it could be invoked
2350 * from either hardware interrupt or other context, with hardware interrupts 2346 * from either hardware interrupt or other context, with hardware interrupts
2351 * either disabled or enabled. 2347 * either disabled or enabled.
2352 */ 2348 */
2353 extern void dev_kfree_skb_any(struct sk_buff *skb); 2349 void dev_kfree_skb_any(struct sk_buff *skb);
2354 2350
2355 extern int netif_rx(struct sk_buff *skb); 2351 int netif_rx(struct sk_buff *skb);
2356 extern int netif_rx_ni(struct sk_buff *skb); 2352 int netif_rx_ni(struct sk_buff *skb);
2357 extern int netif_receive_skb(struct sk_buff *skb); 2353 int netif_receive_skb(struct sk_buff *skb);
2358 extern gro_result_t napi_gro_receive(struct napi_struct *napi, 2354 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
2359 struct sk_buff *skb); 2355 void napi_gro_flush(struct napi_struct *napi, bool flush_old);
2360 extern void napi_gro_flush(struct napi_struct *napi, bool flush_old); 2356 struct sk_buff *napi_get_frags(struct napi_struct *napi);
2361 extern struct sk_buff * napi_get_frags(struct napi_struct *napi); 2357 gro_result_t napi_gro_frags(struct napi_struct *napi);
2362 extern gro_result_t napi_gro_frags(struct napi_struct *napi);
2363 2358
2364 static inline void napi_free_frags(struct napi_struct *napi) 2359 static inline void napi_free_frags(struct napi_struct *napi)
2365 { 2360 {
2366 kfree_skb(napi->skb); 2361 kfree_skb(napi->skb);
2367 napi->skb = NULL; 2362 napi->skb = NULL;
2368 } 2363 }
2369 2364
2370 extern int netdev_rx_handler_register(struct net_device *dev, 2365 int netdev_rx_handler_register(struct net_device *dev,
2371 rx_handler_func_t *rx_handler, 2366 rx_handler_func_t *rx_handler,
2372 void *rx_handler_data); 2367 void *rx_handler_data);
2373 extern void netdev_rx_handler_unregister(struct net_device *dev); 2368 void netdev_rx_handler_unregister(struct net_device *dev);
2374 2369
2375 extern bool dev_valid_name(const char *name); 2370 bool dev_valid_name(const char *name);
2376 extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *); 2371 int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
2377 extern int dev_ethtool(struct net *net, struct ifreq *); 2372 int dev_ethtool(struct net *net, struct ifreq *);
2378 extern unsigned int dev_get_flags(const struct net_device *); 2373 unsigned int dev_get_flags(const struct net_device *);
2379 extern int __dev_change_flags(struct net_device *, unsigned int flags); 2374 int __dev_change_flags(struct net_device *, unsigned int flags);
2380 extern int dev_change_flags(struct net_device *, unsigned int); 2375 int dev_change_flags(struct net_device *, unsigned int);
2381 extern void __dev_notify_flags(struct net_device *, unsigned int old_flags); 2376 void __dev_notify_flags(struct net_device *, unsigned int old_flags);
2382 extern int dev_change_name(struct net_device *, const char *); 2377 int dev_change_name(struct net_device *, const char *);
2383 extern int dev_set_alias(struct net_device *, const char *, size_t); 2378 int dev_set_alias(struct net_device *, const char *, size_t);
2384 extern int dev_change_net_namespace(struct net_device *, 2379 int dev_change_net_namespace(struct net_device *, struct net *, const char *);
2385 struct net *, const char *); 2380 int dev_set_mtu(struct net_device *, int);
2386 extern int dev_set_mtu(struct net_device *, int); 2381 void dev_set_group(struct net_device *, int);
2387 extern void dev_set_group(struct net_device *, int); 2382 int dev_set_mac_address(struct net_device *, struct sockaddr *);
2388 extern int dev_set_mac_address(struct net_device *, 2383 int dev_change_carrier(struct net_device *, bool new_carrier);
2389 struct sockaddr *); 2384 int dev_get_phys_port_id(struct net_device *dev,
2390 extern int dev_change_carrier(struct net_device *, 2385 struct netdev_phys_port_id *ppid);
2391 bool new_carrier); 2386 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2392 extern int dev_get_phys_port_id(struct net_device *dev, 2387 struct netdev_queue *txq);
2393 struct netdev_phys_port_id *ppid); 2388 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
2394 extern int dev_hard_start_xmit(struct sk_buff *skb,
2395 struct net_device *dev,
2396 struct netdev_queue *txq);
2397 extern int dev_forward_skb(struct net_device *dev,
2398 struct sk_buff *skb);
2399 2389
2400 extern int netdev_budget; 2390 extern int netdev_budget;
2401 2391
2402 /* Called by rtnetlink.c:rtnl_unlock() */ 2392 /* Called by rtnetlink.c:rtnl_unlock() */
2403 extern void netdev_run_todo(void); 2393 void netdev_run_todo(void);
2404 2394
2405 /** 2395 /**
2406 * dev_put - release reference to device 2396 * dev_put - release reference to device
2407 * @dev: network device 2397 * @dev: network device
2408 * 2398 *
2409 * Release reference to device to allow it to be freed. 2399 * Release reference to device to allow it to be freed.
2410 */ 2400 */
2411 static inline void dev_put(struct net_device *dev) 2401 static inline void dev_put(struct net_device *dev)
2412 { 2402 {
2413 this_cpu_dec(*dev->pcpu_refcnt); 2403 this_cpu_dec(*dev->pcpu_refcnt);
2414 } 2404 }
2415 2405
2416 /** 2406 /**
2417 * dev_hold - get reference to device 2407 * dev_hold - get reference to device
2418 * @dev: network device 2408 * @dev: network device
2419 * 2409 *
2420 * Hold reference to device to keep it from being freed. 2410 * Hold reference to device to keep it from being freed.
2421 */ 2411 */
2422 static inline void dev_hold(struct net_device *dev) 2412 static inline void dev_hold(struct net_device *dev)
2423 { 2413 {
2424 this_cpu_inc(*dev->pcpu_refcnt); 2414 this_cpu_inc(*dev->pcpu_refcnt);
2425 } 2415 }
2426 2416
2427 /* Carrier loss detection, dial on demand. The functions netif_carrier_on 2417 /* Carrier loss detection, dial on demand. The functions netif_carrier_on
2428 * and _off may be called from IRQ context, but it is caller 2418 * and _off may be called from IRQ context, but it is caller
2429 * who is responsible for serialization of these calls. 2419 * who is responsible for serialization of these calls.
2430 * 2420 *
2431 * The name carrier is inappropriate, these functions should really be 2421 * The name carrier is inappropriate, these functions should really be
2432 * called netif_lowerlayer_*() because they represent the state of any 2422 * called netif_lowerlayer_*() because they represent the state of any
2433 * kind of lower layer not just hardware media. 2423 * kind of lower layer not just hardware media.
2434 */ 2424 */
2435 2425
2436 extern void linkwatch_init_dev(struct net_device *dev); 2426 void linkwatch_init_dev(struct net_device *dev);
2437 extern void linkwatch_fire_event(struct net_device *dev); 2427 void linkwatch_fire_event(struct net_device *dev);
2438 extern void linkwatch_forget_dev(struct net_device *dev); 2428 void linkwatch_forget_dev(struct net_device *dev);
2439 2429
2440 /** 2430 /**
2441 * netif_carrier_ok - test if carrier present 2431 * netif_carrier_ok - test if carrier present
2442 * @dev: network device 2432 * @dev: network device
2443 * 2433 *
2444 * Check if carrier is present on device 2434 * Check if carrier is present on device
2445 */ 2435 */
2446 static inline bool netif_carrier_ok(const struct net_device *dev) 2436 static inline bool netif_carrier_ok(const struct net_device *dev)
2447 { 2437 {
2448 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); 2438 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
2449 } 2439 }
2450 2440
2451 extern unsigned long dev_trans_start(struct net_device *dev); 2441 unsigned long dev_trans_start(struct net_device *dev);
2452 2442
2453 extern void __netdev_watchdog_up(struct net_device *dev); 2443 void __netdev_watchdog_up(struct net_device *dev);
2454 2444
2455 extern void netif_carrier_on(struct net_device *dev); 2445 void netif_carrier_on(struct net_device *dev);
2456 2446
2457 extern void netif_carrier_off(struct net_device *dev); 2447 void netif_carrier_off(struct net_device *dev);
2458 2448
2459 /** 2449 /**
2460 * netif_dormant_on - mark device as dormant. 2450 * netif_dormant_on - mark device as dormant.
2461 * @dev: network device 2451 * @dev: network device
2462 * 2452 *
2463 * Mark device as dormant (as per RFC2863). 2453 * Mark device as dormant (as per RFC2863).
2464 * 2454 *
2465 * The dormant state indicates that the relevant interface is not 2455 * The dormant state indicates that the relevant interface is not
2466 * actually in a condition to pass packets (i.e., it is not 'up') but is 2456 * actually in a condition to pass packets (i.e., it is not 'up') but is
2467 * in a "pending" state, waiting for some external event. For "on- 2457 * in a "pending" state, waiting for some external event. For "on-
2468 * demand" interfaces, this new state identifies the situation where the 2458 * demand" interfaces, this new state identifies the situation where the
2469 * interface is waiting for events to place it in the up state. 2459 * interface is waiting for events to place it in the up state.
2470 * 2460 *
2471 */ 2461 */
2472 static inline void netif_dormant_on(struct net_device *dev) 2462 static inline void netif_dormant_on(struct net_device *dev)
2473 { 2463 {
2474 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state)) 2464 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
2475 linkwatch_fire_event(dev); 2465 linkwatch_fire_event(dev);
2476 } 2466 }
2477 2467
2478 /** 2468 /**
2479 * netif_dormant_off - set device as not dormant. 2469 * netif_dormant_off - set device as not dormant.
2480 * @dev: network device 2470 * @dev: network device
2481 * 2471 *
2482 * Device is not in dormant state. 2472 * Device is not in dormant state.
2483 */ 2473 */
2484 static inline void netif_dormant_off(struct net_device *dev) 2474 static inline void netif_dormant_off(struct net_device *dev)
2485 { 2475 {
2486 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state)) 2476 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
2487 linkwatch_fire_event(dev); 2477 linkwatch_fire_event(dev);
2488 } 2478 }
2489 2479
2490 /** 2480 /**
2491 * netif_dormant - test if carrier present 2481 * netif_dormant - test if carrier present
2492 * @dev: network device 2482 * @dev: network device
2493 * 2483 *
2494 * Check if carrier is present on device 2484 * Check if carrier is present on device
2495 */ 2485 */
2496 static inline bool netif_dormant(const struct net_device *dev) 2486 static inline bool netif_dormant(const struct net_device *dev)
2497 { 2487 {
2498 return test_bit(__LINK_STATE_DORMANT, &dev->state); 2488 return test_bit(__LINK_STATE_DORMANT, &dev->state);
2499 } 2489 }
2500 2490
2501 2491
2502 /** 2492 /**
2503 * netif_oper_up - test if device is operational 2493 * netif_oper_up - test if device is operational
2504 * @dev: network device 2494 * @dev: network device
2505 * 2495 *
2506 * Check if carrier is operational 2496 * Check if carrier is operational
2507 */ 2497 */
2508 static inline bool netif_oper_up(const struct net_device *dev) 2498 static inline bool netif_oper_up(const struct net_device *dev)
2509 { 2499 {
2510 return (dev->operstate == IF_OPER_UP || 2500 return (dev->operstate == IF_OPER_UP ||
2511 dev->operstate == IF_OPER_UNKNOWN /* backward compat */); 2501 dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
2512 } 2502 }
2513 2503
2514 /** 2504 /**
2515 * netif_device_present - is device available or removed 2505 * netif_device_present - is device available or removed
2516 * @dev: network device 2506 * @dev: network device
2517 * 2507 *
2518 * Check if device has not been removed from system. 2508 * Check if device has not been removed from system.
2519 */ 2509 */
2520 static inline bool netif_device_present(struct net_device *dev) 2510 static inline bool netif_device_present(struct net_device *dev)
2521 { 2511 {
2522 return test_bit(__LINK_STATE_PRESENT, &dev->state); 2512 return test_bit(__LINK_STATE_PRESENT, &dev->state);
2523 } 2513 }
2524 2514
2525 extern void netif_device_detach(struct net_device *dev); 2515 void netif_device_detach(struct net_device *dev);
2526 2516
2527 extern void netif_device_attach(struct net_device *dev); 2517 void netif_device_attach(struct net_device *dev);
2528 2518
2529 /* 2519 /*
2530 * Network interface message level settings 2520 * Network interface message level settings
2531 */ 2521 */
2532 2522
2533 enum { 2523 enum {
2534 NETIF_MSG_DRV = 0x0001, 2524 NETIF_MSG_DRV = 0x0001,
2535 NETIF_MSG_PROBE = 0x0002, 2525 NETIF_MSG_PROBE = 0x0002,
2536 NETIF_MSG_LINK = 0x0004, 2526 NETIF_MSG_LINK = 0x0004,
2537 NETIF_MSG_TIMER = 0x0008, 2527 NETIF_MSG_TIMER = 0x0008,
2538 NETIF_MSG_IFDOWN = 0x0010, 2528 NETIF_MSG_IFDOWN = 0x0010,
2539 NETIF_MSG_IFUP = 0x0020, 2529 NETIF_MSG_IFUP = 0x0020,
2540 NETIF_MSG_RX_ERR = 0x0040, 2530 NETIF_MSG_RX_ERR = 0x0040,
2541 NETIF_MSG_TX_ERR = 0x0080, 2531 NETIF_MSG_TX_ERR = 0x0080,
2542 NETIF_MSG_TX_QUEUED = 0x0100, 2532 NETIF_MSG_TX_QUEUED = 0x0100,
2543 NETIF_MSG_INTR = 0x0200, 2533 NETIF_MSG_INTR = 0x0200,
2544 NETIF_MSG_TX_DONE = 0x0400, 2534 NETIF_MSG_TX_DONE = 0x0400,
2545 NETIF_MSG_RX_STATUS = 0x0800, 2535 NETIF_MSG_RX_STATUS = 0x0800,
2546 NETIF_MSG_PKTDATA = 0x1000, 2536 NETIF_MSG_PKTDATA = 0x1000,
2547 NETIF_MSG_HW = 0x2000, 2537 NETIF_MSG_HW = 0x2000,
2548 NETIF_MSG_WOL = 0x4000, 2538 NETIF_MSG_WOL = 0x4000,
2549 }; 2539 };
2550 2540
2551 #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) 2541 #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
2552 #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) 2542 #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
2553 #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) 2543 #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
2554 #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) 2544 #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
2555 #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) 2545 #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
2556 #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) 2546 #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
2557 #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) 2547 #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
2558 #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) 2548 #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
2559 #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) 2549 #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
2560 #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) 2550 #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
2561 #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) 2551 #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
2562 #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) 2552 #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
2563 #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) 2553 #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
2564 #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) 2554 #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
2565 #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) 2555 #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
2566 2556
2567 static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) 2557 static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
2568 { 2558 {
2569 /* use default */ 2559 /* use default */
2570 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) 2560 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
2571 return default_msg_enable_bits; 2561 return default_msg_enable_bits;
2572 if (debug_value == 0) /* no output */ 2562 if (debug_value == 0) /* no output */
2573 return 0; 2563 return 0;
2574 /* set low N bits */ 2564 /* set low N bits */
2575 return (1 << debug_value) - 1; 2565 return (1 << debug_value) - 1;
2576 } 2566 }
2577 2567
2578 static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) 2568 static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
2579 { 2569 {
2580 spin_lock(&txq->_xmit_lock); 2570 spin_lock(&txq->_xmit_lock);
2581 txq->xmit_lock_owner = cpu; 2571 txq->xmit_lock_owner = cpu;
2582 } 2572 }
2583 2573
2584 static inline void __netif_tx_lock_bh(struct netdev_queue *txq) 2574 static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
2585 { 2575 {
2586 spin_lock_bh(&txq->_xmit_lock); 2576 spin_lock_bh(&txq->_xmit_lock);
2587 txq->xmit_lock_owner = smp_processor_id(); 2577 txq->xmit_lock_owner = smp_processor_id();
2588 } 2578 }
2589 2579
2590 static inline bool __netif_tx_trylock(struct netdev_queue *txq) 2580 static inline bool __netif_tx_trylock(struct netdev_queue *txq)
2591 { 2581 {
2592 bool ok = spin_trylock(&txq->_xmit_lock); 2582 bool ok = spin_trylock(&txq->_xmit_lock);
2593 if (likely(ok)) 2583 if (likely(ok))
2594 txq->xmit_lock_owner = smp_processor_id(); 2584 txq->xmit_lock_owner = smp_processor_id();
2595 return ok; 2585 return ok;
2596 } 2586 }
2597 2587
2598 static inline void __netif_tx_unlock(struct netdev_queue *txq) 2588 static inline void __netif_tx_unlock(struct netdev_queue *txq)
2599 { 2589 {
2600 txq->xmit_lock_owner = -1; 2590 txq->xmit_lock_owner = -1;
2601 spin_unlock(&txq->_xmit_lock); 2591 spin_unlock(&txq->_xmit_lock);
2602 } 2592 }
2603 2593
2604 static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) 2594 static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
2605 { 2595 {
2606 txq->xmit_lock_owner = -1; 2596 txq->xmit_lock_owner = -1;
2607 spin_unlock_bh(&txq->_xmit_lock); 2597 spin_unlock_bh(&txq->_xmit_lock);
2608 } 2598 }
2609 2599
2610 static inline void txq_trans_update(struct netdev_queue *txq) 2600 static inline void txq_trans_update(struct netdev_queue *txq)
2611 { 2601 {
2612 if (txq->xmit_lock_owner != -1) 2602 if (txq->xmit_lock_owner != -1)
2613 txq->trans_start = jiffies; 2603 txq->trans_start = jiffies;
2614 } 2604 }
2615 2605
2616 /** 2606 /**
2617 * netif_tx_lock - grab network device transmit lock 2607 * netif_tx_lock - grab network device transmit lock
2618 * @dev: network device 2608 * @dev: network device
2619 * 2609 *
2620 * Get network device transmit lock 2610 * Get network device transmit lock
2621 */ 2611 */
2622 static inline void netif_tx_lock(struct net_device *dev) 2612 static inline void netif_tx_lock(struct net_device *dev)
2623 { 2613 {
2624 unsigned int i; 2614 unsigned int i;
2625 int cpu; 2615 int cpu;
2626 2616
2627 spin_lock(&dev->tx_global_lock); 2617 spin_lock(&dev->tx_global_lock);
2628 cpu = smp_processor_id(); 2618 cpu = smp_processor_id();
2629 for (i = 0; i < dev->num_tx_queues; i++) { 2619 for (i = 0; i < dev->num_tx_queues; i++) {
2630 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 2620 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2631 2621
2632 /* We are the only thread of execution doing a 2622 /* We are the only thread of execution doing a
2633 * freeze, but we have to grab the _xmit_lock in 2623 * freeze, but we have to grab the _xmit_lock in
2634 * order to synchronize with threads which are in 2624 * order to synchronize with threads which are in
2635 * the ->hard_start_xmit() handler and already 2625 * the ->hard_start_xmit() handler and already
2636 * checked the frozen bit. 2626 * checked the frozen bit.
2637 */ 2627 */
2638 __netif_tx_lock(txq, cpu); 2628 __netif_tx_lock(txq, cpu);
2639 set_bit(__QUEUE_STATE_FROZEN, &txq->state); 2629 set_bit(__QUEUE_STATE_FROZEN, &txq->state);
2640 __netif_tx_unlock(txq); 2630 __netif_tx_unlock(txq);
2641 } 2631 }
2642 } 2632 }
2643 2633
2644 static inline void netif_tx_lock_bh(struct net_device *dev) 2634 static inline void netif_tx_lock_bh(struct net_device *dev)
2645 { 2635 {
2646 local_bh_disable(); 2636 local_bh_disable();
2647 netif_tx_lock(dev); 2637 netif_tx_lock(dev);
2648 } 2638 }
2649 2639
2650 static inline void netif_tx_unlock(struct net_device *dev) 2640 static inline void netif_tx_unlock(struct net_device *dev)
2651 { 2641 {
2652 unsigned int i; 2642 unsigned int i;
2653 2643
2654 for (i = 0; i < dev->num_tx_queues; i++) { 2644 for (i = 0; i < dev->num_tx_queues; i++) {
2655 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 2645 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2656 2646
2657 /* No need to grab the _xmit_lock here. If the 2647 /* No need to grab the _xmit_lock here. If the
2658 * queue is not stopped for another reason, we 2648 * queue is not stopped for another reason, we
2659 * force a schedule. 2649 * force a schedule.
2660 */ 2650 */
2661 clear_bit(__QUEUE_STATE_FROZEN, &txq->state); 2651 clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
2662 netif_schedule_queue(txq); 2652 netif_schedule_queue(txq);
2663 } 2653 }
2664 spin_unlock(&dev->tx_global_lock); 2654 spin_unlock(&dev->tx_global_lock);
2665 } 2655 }
2666 2656
2667 static inline void netif_tx_unlock_bh(struct net_device *dev) 2657 static inline void netif_tx_unlock_bh(struct net_device *dev)
2668 { 2658 {
2669 netif_tx_unlock(dev); 2659 netif_tx_unlock(dev);
2670 local_bh_enable(); 2660 local_bh_enable();
2671 } 2661 }
2672 2662
2673 #define HARD_TX_LOCK(dev, txq, cpu) { \ 2663 #define HARD_TX_LOCK(dev, txq, cpu) { \
2674 if ((dev->features & NETIF_F_LLTX) == 0) { \ 2664 if ((dev->features & NETIF_F_LLTX) == 0) { \
2675 __netif_tx_lock(txq, cpu); \ 2665 __netif_tx_lock(txq, cpu); \
2676 } \ 2666 } \
2677 } 2667 }
2678 2668
2679 #define HARD_TX_UNLOCK(dev, txq) { \ 2669 #define HARD_TX_UNLOCK(dev, txq) { \
2680 if ((dev->features & NETIF_F_LLTX) == 0) { \ 2670 if ((dev->features & NETIF_F_LLTX) == 0) { \
2681 __netif_tx_unlock(txq); \ 2671 __netif_tx_unlock(txq); \
2682 } \ 2672 } \
2683 } 2673 }
2684 2674
2685 static inline void netif_tx_disable(struct net_device *dev) 2675 static inline void netif_tx_disable(struct net_device *dev)
2686 { 2676 {
2687 unsigned int i; 2677 unsigned int i;
2688 int cpu; 2678 int cpu;
2689 2679
2690 local_bh_disable(); 2680 local_bh_disable();
2691 cpu = smp_processor_id(); 2681 cpu = smp_processor_id();
2692 for (i = 0; i < dev->num_tx_queues; i++) { 2682 for (i = 0; i < dev->num_tx_queues; i++) {
2693 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 2683 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2694 2684
2695 __netif_tx_lock(txq, cpu); 2685 __netif_tx_lock(txq, cpu);
2696 netif_tx_stop_queue(txq); 2686 netif_tx_stop_queue(txq);
2697 __netif_tx_unlock(txq); 2687 __netif_tx_unlock(txq);
2698 } 2688 }
2699 local_bh_enable(); 2689 local_bh_enable();
2700 } 2690 }
2701 2691
2702 static inline void netif_addr_lock(struct net_device *dev) 2692 static inline void netif_addr_lock(struct net_device *dev)
2703 { 2693 {
2704 spin_lock(&dev->addr_list_lock); 2694 spin_lock(&dev->addr_list_lock);
2705 } 2695 }
2706 2696
2707 static inline void netif_addr_lock_nested(struct net_device *dev) 2697 static inline void netif_addr_lock_nested(struct net_device *dev)
2708 { 2698 {
2709 spin_lock_nested(&dev->addr_list_lock, SINGLE_DEPTH_NESTING); 2699 spin_lock_nested(&dev->addr_list_lock, SINGLE_DEPTH_NESTING);
2710 } 2700 }
2711 2701
2712 static inline void netif_addr_lock_bh(struct net_device *dev) 2702 static inline void netif_addr_lock_bh(struct net_device *dev)
2713 { 2703 {
2714 spin_lock_bh(&dev->addr_list_lock); 2704 spin_lock_bh(&dev->addr_list_lock);
2715 } 2705 }
2716 2706
2717 static inline void netif_addr_unlock(struct net_device *dev) 2707 static inline void netif_addr_unlock(struct net_device *dev)
2718 { 2708 {
2719 spin_unlock(&dev->addr_list_lock); 2709 spin_unlock(&dev->addr_list_lock);
2720 } 2710 }
2721 2711
2722 static inline void netif_addr_unlock_bh(struct net_device *dev) 2712 static inline void netif_addr_unlock_bh(struct net_device *dev)
2723 { 2713 {
2724 spin_unlock_bh(&dev->addr_list_lock); 2714 spin_unlock_bh(&dev->addr_list_lock);
2725 } 2715 }
2726 2716
2727 /* 2717 /*
2728 * dev_addrs walker. Should be used only for read access. Call with 2718 * dev_addrs walker. Should be used only for read access. Call with
2729 * rcu_read_lock held. 2719 * rcu_read_lock held.
2730 */ 2720 */
2731 #define for_each_dev_addr(dev, ha) \ 2721 #define for_each_dev_addr(dev, ha) \
2732 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list) 2722 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
2733 2723
2734 /* These functions live elsewhere (drivers/net/net_init.c, but related) */ 2724 /* These functions live elsewhere (drivers/net/net_init.c, but related) */
2735 2725
2736 extern void ether_setup(struct net_device *dev); 2726 void ether_setup(struct net_device *dev);
2737 2727
2738 /* Support for loadable net-drivers */ 2728 /* Support for loadable net-drivers */
2739 extern struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, 2729 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
2740 void (*setup)(struct net_device *), 2730 void (*setup)(struct net_device *),
2741 unsigned int txqs, unsigned int rxqs); 2731 unsigned int txqs, unsigned int rxqs);
2742 #define alloc_netdev(sizeof_priv, name, setup) \ 2732 #define alloc_netdev(sizeof_priv, name, setup) \
2743 alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1) 2733 alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
2744 2734
2745 #define alloc_netdev_mq(sizeof_priv, name, setup, count) \ 2735 #define alloc_netdev_mq(sizeof_priv, name, setup, count) \
2746 alloc_netdev_mqs(sizeof_priv, name, setup, count, count) 2736 alloc_netdev_mqs(sizeof_priv, name, setup, count, count)
2747 2737
2748 extern int register_netdev(struct net_device *dev); 2738 int register_netdev(struct net_device *dev);
2749 extern void unregister_netdev(struct net_device *dev); 2739 void unregister_netdev(struct net_device *dev);
2750 2740
2751 /* General hardware address lists handling functions */ 2741 /* General hardware address lists handling functions */
2752 extern int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list, 2742 int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
2753 struct netdev_hw_addr_list *from_list, 2743 struct netdev_hw_addr_list *from_list,
2754 int addr_len, unsigned char addr_type); 2744 int addr_len, unsigned char addr_type);
2755 extern void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list, 2745 void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
2756 struct netdev_hw_addr_list *from_list, 2746 struct netdev_hw_addr_list *from_list,
2757 int addr_len, unsigned char addr_type); 2747 int addr_len, unsigned char addr_type);
2758 extern int __hw_addr_sync(struct netdev_hw_addr_list *to_list, 2748 int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
2759 struct netdev_hw_addr_list *from_list, 2749 struct netdev_hw_addr_list *from_list, int addr_len);
2760 int addr_len); 2750 void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
2761 extern void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, 2751 struct netdev_hw_addr_list *from_list, int addr_len);
2762 struct netdev_hw_addr_list *from_list, 2752 void __hw_addr_flush(struct netdev_hw_addr_list *list);
2763 int addr_len); 2753 void __hw_addr_init(struct netdev_hw_addr_list *list);
2764 extern void __hw_addr_flush(struct netdev_hw_addr_list *list);
2765 extern void __hw_addr_init(struct netdev_hw_addr_list *list);
2766 2754
2767 /* Functions used for device addresses handling */ 2755 /* Functions used for device addresses handling */
2768 extern int dev_addr_add(struct net_device *dev, const unsigned char *addr, 2756 int dev_addr_add(struct net_device *dev, const unsigned char *addr,
2769 unsigned char addr_type); 2757 unsigned char addr_type);
2770 extern int dev_addr_del(struct net_device *dev, const unsigned char *addr, 2758 int dev_addr_del(struct net_device *dev, const unsigned char *addr,
2771 unsigned char addr_type); 2759 unsigned char addr_type);
2772 extern int dev_addr_add_multiple(struct net_device *to_dev, 2760 int dev_addr_add_multiple(struct net_device *to_dev,
2773 struct net_device *from_dev, 2761 struct net_device *from_dev, unsigned char addr_type);
2774 unsigned char addr_type); 2762 int dev_addr_del_multiple(struct net_device *to_dev,
2775 extern int dev_addr_del_multiple(struct net_device *to_dev, 2763 struct net_device *from_dev, unsigned char addr_type);
2776 struct net_device *from_dev, 2764 void dev_addr_flush(struct net_device *dev);
2777 unsigned char addr_type); 2765 int dev_addr_init(struct net_device *dev);
2778 extern void dev_addr_flush(struct net_device *dev);
2779 extern int dev_addr_init(struct net_device *dev);
2780 2766
2781 /* Functions used for unicast addresses handling */ 2767 /* Functions used for unicast addresses handling */
2782 extern int dev_uc_add(struct net_device *dev, const unsigned char *addr); 2768 int dev_uc_add(struct net_device *dev, const unsigned char *addr);
2783 extern int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr); 2769 int dev_uc_add_excl(struct net_device *dev, const unsigned char *addr);
2784 extern int dev_uc_del(struct net_device *dev, const unsigned char *addr); 2770 int dev_uc_del(struct net_device *dev, const unsigned char *addr);
2785 extern int dev_uc_sync(struct net_device *to, struct net_device *from); 2771 int dev_uc_sync(struct net_device *to, struct net_device *from);
2786 extern int dev_uc_sync_multiple(struct net_device *to, struct net_device *from); 2772 int dev_uc_sync_multiple(struct net_device *to, struct net_device *from);
2787 extern void dev_uc_unsync(struct net_device *to, struct net_device *from); 2773 void dev_uc_unsync(struct net_device *to, struct net_device *from);
2788 extern void dev_uc_flush(struct net_device *dev); 2774 void dev_uc_flush(struct net_device *dev);
2789 extern void dev_uc_init(struct net_device *dev); 2775 void dev_uc_init(struct net_device *dev);
2790 2776
2791 /* Functions used for multicast addresses handling */ 2777 /* Functions used for multicast addresses handling */
2792 extern int dev_mc_add(struct net_device *dev, const unsigned char *addr); 2778 int dev_mc_add(struct net_device *dev, const unsigned char *addr);
2793 extern int dev_mc_add_global(struct net_device *dev, const unsigned char *addr); 2779 int dev_mc_add_global(struct net_device *dev, const unsigned char *addr);
2794 extern int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr); 2780 int dev_mc_add_excl(struct net_device *dev, const unsigned char *addr);
2795 extern int dev_mc_del(struct net_device *dev, const unsigned char *addr); 2781 int dev_mc_del(struct net_device *dev, const unsigned char *addr);
2796 extern int dev_mc_del_global(struct net_device *dev, const unsigned char *addr); 2782 int dev_mc_del_global(struct net_device *dev, const unsigned char *addr);
2797 extern int dev_mc_sync(struct net_device *to, struct net_device *from); 2783 int dev_mc_sync(struct net_device *to, struct net_device *from);
2798 extern int dev_mc_sync_multiple(struct net_device *to, struct net_device *from); 2784 int dev_mc_sync_multiple(struct net_device *to, struct net_device *from);
2799 extern void dev_mc_unsync(struct net_device *to, struct net_device *from); 2785 void dev_mc_unsync(struct net_device *to, struct net_device *from);
2800 extern void dev_mc_flush(struct net_device *dev); 2786 void dev_mc_flush(struct net_device *dev);
2801 extern void dev_mc_init(struct net_device *dev); 2787 void dev_mc_init(struct net_device *dev);
2802 2788
2803 /* Functions used for secondary unicast and multicast support */ 2789 /* Functions used for secondary unicast and multicast support */
2804 extern void dev_set_rx_mode(struct net_device *dev); 2790 void dev_set_rx_mode(struct net_device *dev);
2805 extern void __dev_set_rx_mode(struct net_device *dev); 2791 void __dev_set_rx_mode(struct net_device *dev);
2806 extern int dev_set_promiscuity(struct net_device *dev, int inc); 2792 int dev_set_promiscuity(struct net_device *dev, int inc);
2807 extern int dev_set_allmulti(struct net_device *dev, int inc); 2793 int dev_set_allmulti(struct net_device *dev, int inc);
2808 extern void netdev_state_change(struct net_device *dev); 2794 void netdev_state_change(struct net_device *dev);
2809 extern void netdev_notify_peers(struct net_device *dev); 2795 void netdev_notify_peers(struct net_device *dev);
2810 extern void netdev_features_change(struct net_device *dev); 2796 void netdev_features_change(struct net_device *dev);
2811 /* Load a device via the kmod */ 2797 /* Load a device via the kmod */
2812 extern void dev_load(struct net *net, const char *name); 2798 void dev_load(struct net *net, const char *name);
2813 extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, 2799 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
2814 struct rtnl_link_stats64 *storage); 2800 struct rtnl_link_stats64 *storage);
2815 extern void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, 2801 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
2816 const struct net_device_stats *netdev_stats); 2802 const struct net_device_stats *netdev_stats);
2817 2803
2818 extern int netdev_max_backlog; 2804 extern int netdev_max_backlog;
2819 extern int netdev_tstamp_prequeue; 2805 extern int netdev_tstamp_prequeue;
2820 extern int weight_p; 2806 extern int weight_p;
2821 extern int bpf_jit_enable; 2807 extern int bpf_jit_enable;
2822 2808
2823 extern bool netdev_has_upper_dev(struct net_device *dev, 2809 bool netdev_has_upper_dev(struct net_device *dev, struct net_device *upper_dev);
2824 struct net_device *upper_dev); 2810 bool netdev_has_any_upper_dev(struct net_device *dev);
2825 extern bool netdev_has_any_upper_dev(struct net_device *dev); 2811 struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
2826 extern struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev, 2812 struct list_head **iter);
2827 struct list_head **iter);
2828 2813
2829 /* iterate through upper list, must be called under RCU read lock */ 2814 /* iterate through upper list, must be called under RCU read lock */
2830 #define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \ 2815 #define netdev_for_each_all_upper_dev_rcu(dev, updev, iter) \
2831 for (iter = &(dev)->all_adj_list.upper, \ 2816 for (iter = &(dev)->all_adj_list.upper, \
2832 updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)); \ 2817 updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)); \
2833 updev; \ 2818 updev; \
2834 updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter))) 2819 updev = netdev_all_upper_get_next_dev_rcu(dev, &(iter)))
2835 2820
2836 extern void *netdev_lower_get_next_private(struct net_device *dev, 2821 void *netdev_lower_get_next_private(struct net_device *dev,
2837 struct list_head **iter); 2822 struct list_head **iter);
2838 extern void *netdev_lower_get_next_private_rcu(struct net_device *dev, 2823 void *netdev_lower_get_next_private_rcu(struct net_device *dev,
2839 struct list_head **iter); 2824 struct list_head **iter);
2840 2825
2841 #define netdev_for_each_lower_private(dev, priv, iter) \ 2826 #define netdev_for_each_lower_private(dev, priv, iter) \
2842 for (iter = (dev)->adj_list.lower.next, \ 2827 for (iter = (dev)->adj_list.lower.next, \
2843 priv = netdev_lower_get_next_private(dev, &(iter)); \ 2828 priv = netdev_lower_get_next_private(dev, &(iter)); \
2844 priv; \ 2829 priv; \
2845 priv = netdev_lower_get_next_private(dev, &(iter))) 2830 priv = netdev_lower_get_next_private(dev, &(iter)))
2846 2831
2847 #define netdev_for_each_lower_private_rcu(dev, priv, iter) \ 2832 #define netdev_for_each_lower_private_rcu(dev, priv, iter) \
2848 for (iter = &(dev)->adj_list.lower, \ 2833 for (iter = &(dev)->adj_list.lower, \
2849 priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \ 2834 priv = netdev_lower_get_next_private_rcu(dev, &(iter)); \
2850 priv; \ 2835 priv; \
2851 priv = netdev_lower_get_next_private_rcu(dev, &(iter))) 2836 priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
2852 2837
2853 extern void *netdev_adjacent_get_private(struct list_head *adj_list); 2838 void *netdev_adjacent_get_private(struct list_head *adj_list);
2854 extern struct net_device *netdev_master_upper_dev_get(struct net_device *dev); 2839 struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
2855 extern struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev); 2840 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
2856 extern int netdev_upper_dev_link(struct net_device *dev, 2841 int netdev_upper_dev_link(struct net_device *dev, struct net_device *upper_dev);
2842 int netdev_master_upper_dev_link(struct net_device *dev,
2857 struct net_device *upper_dev); 2843 struct net_device *upper_dev);
2858 extern int netdev_master_upper_dev_link(struct net_device *dev, 2844 int netdev_master_upper_dev_link_private(struct net_device *dev,
2859 struct net_device *upper_dev); 2845 struct net_device *upper_dev,
2860 extern int netdev_master_upper_dev_link_private(struct net_device *dev, 2846 void *private);
2861 struct net_device *upper_dev, 2847 void netdev_upper_dev_unlink(struct net_device *dev,
2862 void *private); 2848 struct net_device *upper_dev);
2863 extern void netdev_upper_dev_unlink(struct net_device *dev, 2849 void *netdev_lower_dev_get_private_rcu(struct net_device *dev,
2864 struct net_device *upper_dev); 2850 struct net_device *lower_dev);
2865 extern void *netdev_lower_dev_get_private_rcu(struct net_device *dev, 2851 void *netdev_lower_dev_get_private(struct net_device *dev,
2866 struct net_device *lower_dev); 2852 struct net_device *lower_dev);
2867 extern void *netdev_lower_dev_get_private(struct net_device *dev, 2853 int skb_checksum_help(struct sk_buff *skb);
2868 struct net_device *lower_dev); 2854 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2869 extern int skb_checksum_help(struct sk_buff *skb); 2855 netdev_features_t features, bool tx_path);
2870 extern struct sk_buff *__skb_gso_segment(struct sk_buff *skb, 2856 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2871 netdev_features_t features, bool tx_path); 2857 netdev_features_t features);
2872 extern struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2873 netdev_features_t features);
2874 2858
2875 static inline 2859 static inline
2876 struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features) 2860 struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
2877 { 2861 {
2878 return __skb_gso_segment(skb, features, true); 2862 return __skb_gso_segment(skb, features, true);
2879 } 2863 }
2880 __be16 skb_network_protocol(struct sk_buff *skb); 2864 __be16 skb_network_protocol(struct sk_buff *skb);
2881 2865
2882 static inline bool can_checksum_protocol(netdev_features_t features, 2866 static inline bool can_checksum_protocol(netdev_features_t features,
2883 __be16 protocol) 2867 __be16 protocol)
2884 { 2868 {
2885 return ((features & NETIF_F_GEN_CSUM) || 2869 return ((features & NETIF_F_GEN_CSUM) ||
2886 ((features & NETIF_F_V4_CSUM) && 2870 ((features & NETIF_F_V4_CSUM) &&
2887 protocol == htons(ETH_P_IP)) || 2871 protocol == htons(ETH_P_IP)) ||
2888 ((features & NETIF_F_V6_CSUM) && 2872 ((features & NETIF_F_V6_CSUM) &&
2889 protocol == htons(ETH_P_IPV6)) || 2873 protocol == htons(ETH_P_IPV6)) ||
2890 ((features & NETIF_F_FCOE_CRC) && 2874 ((features & NETIF_F_FCOE_CRC) &&
2891 protocol == htons(ETH_P_FCOE))); 2875 protocol == htons(ETH_P_FCOE)));
2892 } 2876 }
2893 2877
2894 #ifdef CONFIG_BUG 2878 #ifdef CONFIG_BUG
2895 extern void netdev_rx_csum_fault(struct net_device *dev); 2879 void netdev_rx_csum_fault(struct net_device *dev);
2896 #else 2880 #else
2897 static inline void netdev_rx_csum_fault(struct net_device *dev) 2881 static inline void netdev_rx_csum_fault(struct net_device *dev)
2898 { 2882 {
2899 } 2883 }
2900 #endif 2884 #endif
2901 /* rx skb timestamps */ 2885 /* rx skb timestamps */
2902 extern void net_enable_timestamp(void); 2886 void net_enable_timestamp(void);
2903 extern void net_disable_timestamp(void); 2887 void net_disable_timestamp(void);
2904 2888
2905 #ifdef CONFIG_PROC_FS 2889 #ifdef CONFIG_PROC_FS
2906 extern int __init dev_proc_init(void); 2890 int __init dev_proc_init(void);
2907 #else 2891 #else
2908 #define dev_proc_init() 0 2892 #define dev_proc_init() 0
2909 #endif 2893 #endif
2910 2894
2911 extern int netdev_class_create_file(struct class_attribute *class_attr); 2895 int netdev_class_create_file(struct class_attribute *class_attr);
2912 extern void netdev_class_remove_file(struct class_attribute *class_attr); 2896 void netdev_class_remove_file(struct class_attribute *class_attr);
2913 2897
2914 extern struct kobj_ns_type_operations net_ns_type_operations; 2898 extern struct kobj_ns_type_operations net_ns_type_operations;
2915 2899
2916 extern const char *netdev_drivername(const struct net_device *dev); 2900 const char *netdev_drivername(const struct net_device *dev);
2917 2901
2918 extern void linkwatch_run_queue(void); 2902 void linkwatch_run_queue(void);
2919 2903
2920 static inline netdev_features_t netdev_get_wanted_features( 2904 static inline netdev_features_t netdev_get_wanted_features(
2921 struct net_device *dev) 2905 struct net_device *dev)
2922 { 2906 {
2923 return (dev->features & ~dev->hw_features) | dev->wanted_features; 2907 return (dev->features & ~dev->hw_features) | dev->wanted_features;
2924 } 2908 }
2925 netdev_features_t netdev_increment_features(netdev_features_t all, 2909 netdev_features_t netdev_increment_features(netdev_features_t all,
2926 netdev_features_t one, netdev_features_t mask); 2910 netdev_features_t one, netdev_features_t mask);
2927 2911
2928 /* Allow TSO being used on stacked device : 2912 /* Allow TSO being used on stacked device :
2929 * Performing the GSO segmentation before last device 2913 * Performing the GSO segmentation before last device
2930 * is a performance improvement. 2914 * is a performance improvement.
2931 */ 2915 */
2932 static inline netdev_features_t netdev_add_tso_features(netdev_features_t features, 2916 static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
2933 netdev_features_t mask) 2917 netdev_features_t mask)
2934 { 2918 {
2935 return netdev_increment_features(features, NETIF_F_ALL_TSO, mask); 2919 return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
2936 } 2920 }
2937 2921
2938 int __netdev_update_features(struct net_device *dev); 2922 int __netdev_update_features(struct net_device *dev);
2939 void netdev_update_features(struct net_device *dev); 2923 void netdev_update_features(struct net_device *dev);
2940 void netdev_change_features(struct net_device *dev); 2924 void netdev_change_features(struct net_device *dev);
2941 2925
2942 void netif_stacked_transfer_operstate(const struct net_device *rootdev, 2926 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
2943 struct net_device *dev); 2927 struct net_device *dev);
2944 2928
2945 netdev_features_t netif_skb_features(struct sk_buff *skb); 2929 netdev_features_t netif_skb_features(struct sk_buff *skb);
2946 2930
2947 static inline bool net_gso_ok(netdev_features_t features, int gso_type) 2931 static inline bool net_gso_ok(netdev_features_t features, int gso_type)
2948 { 2932 {
2949 netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT; 2933 netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT;
2950 2934
2951 /* check flags correspondence */ 2935 /* check flags correspondence */
2952 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT)); 2936 BUILD_BUG_ON(SKB_GSO_TCPV4 != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
2953 BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT)); 2937 BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_UFO >> NETIF_F_GSO_SHIFT));
2954 BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT)); 2938 BUILD_BUG_ON(SKB_GSO_DODGY != (NETIF_F_GSO_ROBUST >> NETIF_F_GSO_SHIFT));
2955 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT)); 2939 BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
2956 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT)); 2940 BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
2957 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT)); 2941 BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
2958 2942
2959 return (features & feature) == feature; 2943 return (features & feature) == feature;
2960 } 2944 }
2961 2945
2962 static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features) 2946 static inline bool skb_gso_ok(struct sk_buff *skb, netdev_features_t features)
2963 { 2947 {
2964 return net_gso_ok(features, skb_shinfo(skb)->gso_type) && 2948 return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
2965 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); 2949 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
2966 } 2950 }
2967 2951
2968 static inline bool netif_needs_gso(struct sk_buff *skb, 2952 static inline bool netif_needs_gso(struct sk_buff *skb,
2969 netdev_features_t features) 2953 netdev_features_t features)
2970 { 2954 {
2971 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || 2955 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
2972 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) && 2956 unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
2973 (skb->ip_summed != CHECKSUM_UNNECESSARY))); 2957 (skb->ip_summed != CHECKSUM_UNNECESSARY)));
2974 } 2958 }
2975 2959
2976 static inline void netif_set_gso_max_size(struct net_device *dev, 2960 static inline void netif_set_gso_max_size(struct net_device *dev,
2977 unsigned int size) 2961 unsigned int size)
2978 { 2962 {
2979 dev->gso_max_size = size; 2963 dev->gso_max_size = size;
2980 } 2964 }
2981 2965
2982 static inline bool netif_is_bond_master(struct net_device *dev) 2966 static inline bool netif_is_bond_master(struct net_device *dev)
2983 { 2967 {
2984 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING; 2968 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
2985 } 2969 }
2986 2970
2987 static inline bool netif_is_bond_slave(struct net_device *dev) 2971 static inline bool netif_is_bond_slave(struct net_device *dev)
2988 { 2972 {
2989 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING; 2973 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
2990 } 2974 }
2991 2975
2992 static inline bool netif_supports_nofcs(struct net_device *dev) 2976 static inline bool netif_supports_nofcs(struct net_device *dev)
2993 { 2977 {
2994 return dev->priv_flags & IFF_SUPP_NOFCS; 2978 return dev->priv_flags & IFF_SUPP_NOFCS;
2995 } 2979 }
2996 2980
2997 extern struct pernet_operations __net_initdata loopback_net_ops; 2981 extern struct pernet_operations __net_initdata loopback_net_ops;
2998 2982
2999 /* Logging, debugging and troubleshooting/diagnostic helpers. */ 2983 /* Logging, debugging and troubleshooting/diagnostic helpers. */
3000 2984
3001 /* netdev_printk helpers, similar to dev_printk */ 2985 /* netdev_printk helpers, similar to dev_printk */
3002 2986
3003 static inline const char *netdev_name(const struct net_device *dev) 2987 static inline const char *netdev_name(const struct net_device *dev)
3004 { 2988 {
3005 if (dev->reg_state != NETREG_REGISTERED) 2989 if (dev->reg_state != NETREG_REGISTERED)
3006 return "(unregistered net_device)"; 2990 return "(unregistered net_device)";
3007 return dev->name; 2991 return dev->name;
3008 } 2992 }
3009 2993
3010 extern __printf(3, 4) 2994 __printf(3, 4)
3011 int netdev_printk(const char *level, const struct net_device *dev, 2995 int netdev_printk(const char *level, const struct net_device *dev,
3012 const char *format, ...); 2996 const char *format, ...);
3013 extern __printf(2, 3) 2997 __printf(2, 3)
3014 int netdev_emerg(const struct net_device *dev, const char *format, ...); 2998 int netdev_emerg(const struct net_device *dev, const char *format, ...);
3015 extern __printf(2, 3) 2999 __printf(2, 3)
3016 int netdev_alert(const struct net_device *dev, const char *format, ...); 3000 int netdev_alert(const struct net_device *dev, const char *format, ...);
3017 extern __printf(2, 3) 3001 __printf(2, 3)
3018 int netdev_crit(const struct net_device *dev, const char *format, ...); 3002 int netdev_crit(const struct net_device *dev, const char *format, ...);
3019 extern __printf(2, 3) 3003 __printf(2, 3)
3020 int netdev_err(const struct net_device *dev, const char *format, ...); 3004 int netdev_err(const struct net_device *dev, const char *format, ...);
3021 extern __printf(2, 3) 3005 __printf(2, 3)
3022 int netdev_warn(const struct net_device *dev, const char *format, ...); 3006 int netdev_warn(const struct net_device *dev, const char *format, ...);
3023 extern __printf(2, 3) 3007 __printf(2, 3)
3024 int netdev_notice(const struct net_device *dev, const char *format, ...); 3008 int netdev_notice(const struct net_device *dev, const char *format, ...);
3025 extern __printf(2, 3) 3009 __printf(2, 3)
3026 int netdev_info(const struct net_device *dev, const char *format, ...); 3010 int netdev_info(const struct net_device *dev, const char *format, ...);
3027 3011
3028 #define MODULE_ALIAS_NETDEV(device) \ 3012 #define MODULE_ALIAS_NETDEV(device) \
3029 MODULE_ALIAS("netdev-" device) 3013 MODULE_ALIAS("netdev-" device)
3030 3014
3031 #if defined(CONFIG_DYNAMIC_DEBUG) 3015 #if defined(CONFIG_DYNAMIC_DEBUG)
3032 #define netdev_dbg(__dev, format, args...) \ 3016 #define netdev_dbg(__dev, format, args...) \
3033 do { \ 3017 do { \
3034 dynamic_netdev_dbg(__dev, format, ##args); \ 3018 dynamic_netdev_dbg(__dev, format, ##args); \
3035 } while (0) 3019 } while (0)
3036 #elif defined(DEBUG) 3020 #elif defined(DEBUG)
3037 #define netdev_dbg(__dev, format, args...) \ 3021 #define netdev_dbg(__dev, format, args...) \
3038 netdev_printk(KERN_DEBUG, __dev, format, ##args) 3022 netdev_printk(KERN_DEBUG, __dev, format, ##args)
3039 #else 3023 #else
3040 #define netdev_dbg(__dev, format, args...) \ 3024 #define netdev_dbg(__dev, format, args...) \
3041 ({ \ 3025 ({ \
3042 if (0) \ 3026 if (0) \
3043 netdev_printk(KERN_DEBUG, __dev, format, ##args); \ 3027 netdev_printk(KERN_DEBUG, __dev, format, ##args); \
3044 0; \ 3028 0; \
3045 }) 3029 })
3046 #endif 3030 #endif
3047 3031
3048 #if defined(VERBOSE_DEBUG) 3032 #if defined(VERBOSE_DEBUG)
3049 #define netdev_vdbg netdev_dbg 3033 #define netdev_vdbg netdev_dbg
3050 #else 3034 #else
3051 3035
3052 #define netdev_vdbg(dev, format, args...) \ 3036 #define netdev_vdbg(dev, format, args...) \
3053 ({ \ 3037 ({ \
3054 if (0) \ 3038 if (0) \
3055 netdev_printk(KERN_DEBUG, dev, format, ##args); \ 3039 netdev_printk(KERN_DEBUG, dev, format, ##args); \
3056 0; \ 3040 0; \
3057 }) 3041 })
3058 #endif 3042 #endif
3059 3043
3060 /* 3044 /*
3061 * netdev_WARN() acts like dev_printk(), but with the key difference 3045 * netdev_WARN() acts like dev_printk(), but with the key difference
3062 * of using a WARN/WARN_ON to get the message out, including the 3046 * of using a WARN/WARN_ON to get the message out, including the
3063 * file/line information and a backtrace. 3047 * file/line information and a backtrace.
3064 */ 3048 */
3065 #define netdev_WARN(dev, format, args...) \ 3049 #define netdev_WARN(dev, format, args...) \
3066 WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args); 3050 WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args);
3067 3051
3068 /* netif printk helpers, similar to netdev_printk */ 3052 /* netif printk helpers, similar to netdev_printk */
3069 3053
3070 #define netif_printk(priv, type, level, dev, fmt, args...) \ 3054 #define netif_printk(priv, type, level, dev, fmt, args...) \
3071 do { \ 3055 do { \
3072 if (netif_msg_##type(priv)) \ 3056 if (netif_msg_##type(priv)) \
3073 netdev_printk(level, (dev), fmt, ##args); \ 3057 netdev_printk(level, (dev), fmt, ##args); \
3074 } while (0) 3058 } while (0)
3075 3059
3076 #define netif_level(level, priv, type, dev, fmt, args...) \ 3060 #define netif_level(level, priv, type, dev, fmt, args...) \
3077 do { \ 3061 do { \
3078 if (netif_msg_##type(priv)) \ 3062 if (netif_msg_##type(priv)) \
3079 netdev_##level(dev, fmt, ##args); \ 3063 netdev_##level(dev, fmt, ##args); \
3080 } while (0) 3064 } while (0)
3081 3065
3082 #define netif_emerg(priv, type, dev, fmt, args...) \ 3066 #define netif_emerg(priv, type, dev, fmt, args...) \
3083 netif_level(emerg, priv, type, dev, fmt, ##args) 3067 netif_level(emerg, priv, type, dev, fmt, ##args)
3084 #define netif_alert(priv, type, dev, fmt, args...) \ 3068 #define netif_alert(priv, type, dev, fmt, args...) \
3085 netif_level(alert, priv, type, dev, fmt, ##args) 3069 netif_level(alert, priv, type, dev, fmt, ##args)
3086 #define netif_crit(priv, type, dev, fmt, args...) \ 3070 #define netif_crit(priv, type, dev, fmt, args...) \
3087 netif_level(crit, priv, type, dev, fmt, ##args) 3071 netif_level(crit, priv, type, dev, fmt, ##args)
3088 #define netif_err(priv, type, dev, fmt, args...) \ 3072 #define netif_err(priv, type, dev, fmt, args...) \
3089 netif_level(err, priv, type, dev, fmt, ##args) 3073 netif_level(err, priv, type, dev, fmt, ##args)
3090 #define netif_warn(priv, type, dev, fmt, args...) \ 3074 #define netif_warn(priv, type, dev, fmt, args...) \
3091 netif_level(warn, priv, type, dev, fmt, ##args) 3075 netif_level(warn, priv, type, dev, fmt, ##args)
3092 #define netif_notice(priv, type, dev, fmt, args...) \ 3076 #define netif_notice(priv, type, dev, fmt, args...) \
3093 netif_level(notice, priv, type, dev, fmt, ##args) 3077 netif_level(notice, priv, type, dev, fmt, ##args)
3094 #define netif_info(priv, type, dev, fmt, args...) \ 3078 #define netif_info(priv, type, dev, fmt, args...) \
3095 netif_level(info, priv, type, dev, fmt, ##args) 3079 netif_level(info, priv, type, dev, fmt, ##args)
3096 3080
3097 #if defined(CONFIG_DYNAMIC_DEBUG) 3081 #if defined(CONFIG_DYNAMIC_DEBUG)
3098 #define netif_dbg(priv, type, netdev, format, args...) \ 3082 #define netif_dbg(priv, type, netdev, format, args...) \
3099 do { \ 3083 do { \
3100 if (netif_msg_##type(priv)) \ 3084 if (netif_msg_##type(priv)) \
3101 dynamic_netdev_dbg(netdev, format, ##args); \ 3085 dynamic_netdev_dbg(netdev, format, ##args); \
3102 } while (0) 3086 } while (0)
3103 #elif defined(DEBUG) 3087 #elif defined(DEBUG)
3104 #define netif_dbg(priv, type, dev, format, args...) \ 3088 #define netif_dbg(priv, type, dev, format, args...) \
3105 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args) 3089 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
3106 #else 3090 #else
3107 #define netif_dbg(priv, type, dev, format, args...) \ 3091 #define netif_dbg(priv, type, dev, format, args...) \
3108 ({ \ 3092 ({ \
3109 if (0) \ 3093 if (0) \
3110 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \ 3094 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
3111 0; \ 3095 0; \
3112 }) 3096 })
3113 #endif 3097 #endif
3114 3098
3115 #if defined(VERBOSE_DEBUG) 3099 #if defined(VERBOSE_DEBUG)
3116 #define netif_vdbg netif_dbg 3100 #define netif_vdbg netif_dbg
3117 #else 3101 #else
3118 #define netif_vdbg(priv, type, dev, format, args...) \ 3102 #define netif_vdbg(priv, type, dev, format, args...) \
3119 ({ \ 3103 ({ \
3120 if (0) \ 3104 if (0) \
3121 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \ 3105 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
3122 0; \ 3106 0; \
3123 }) 3107 })
3124 #endif 3108 #endif
3125 3109
3126 /* 3110 /*
3127 * The list of packet types we will receive (as opposed to discard) 3111 * The list of packet types we will receive (as opposed to discard)
3128 * and the routines to invoke. 3112 * and the routines to invoke.
3129 * 3113 *
3130 * Why 16. Because with 16 the only overlap we get on a hash of the 3114 * Why 16. Because with 16 the only overlap we get on a hash of the
3131 * low nibble of the protocol value is RARP/SNAP/X.25. 3115 * low nibble of the protocol value is RARP/SNAP/X.25.
3132 * 3116 *
3133 * NOTE: That is no longer true with the addition of VLAN tags. Not 3117 * NOTE: That is no longer true with the addition of VLAN tags. Not
3134 * sure which should go first, but I bet it won't make much 3118 * sure which should go first, but I bet it won't make much
3135 * difference if we are running VLANs. The good news is that 3119 * difference if we are running VLANs. The good news is that
3136 * this protocol won't be in the list unless compiled in, so 3120 * this protocol won't be in the list unless compiled in, so
3137 * the average user (w/out VLANs) will not be adversely affected. 3121 * the average user (w/out VLANs) will not be adversely affected.
3138 * --BLG 3122 * --BLG
3139 * 3123 *
3140 * 0800 IP 3124 * 0800 IP
3141 * 8100 802.1Q VLAN 3125 * 8100 802.1Q VLAN
3142 * 0001 802.3 3126 * 0001 802.3
3143 * 0002 AX.25 3127 * 0002 AX.25
3144 * 0004 802.2 3128 * 0004 802.2
3145 * 8035 RARP 3129 * 8035 RARP
3146 * 0005 SNAP 3130 * 0005 SNAP
3147 * 0805 X.25 3131 * 0805 X.25
3148 * 0806 ARP 3132 * 0806 ARP
3149 * 8137 IPX 3133 * 8137 IPX
3150 * 0009 Localtalk 3134 * 0009 Localtalk
3151 * 86DD IPv6 3135 * 86DD IPv6
3152 */ 3136 */
3153 #define PTYPE_HASH_SIZE (16) 3137 #define PTYPE_HASH_SIZE (16)
3154 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1) 3138 #define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
3155 3139
3156 #endif /* _LINUX_NETDEVICE_H */ 3140 #endif /* _LINUX_NETDEVICE_H */