Commit eed2a12f1ed9aabf0676f4d0db34aad51976c5c6

Authored by Mahesh Bandewar
Committed by David S. Miller
1 parent 47a0200d53

net: Allow ethtool to set interface in loopback mode.

This patch enables ethtool to set the loopback mode on a given interface.
By configuring the interface in loopback mode in conjunction with a policy
route / rule, a userland application can stress the egress / ingress path
exposing the flows of the change in progress and potentially help developer(s)
understand the impact of those changes without even sending a packet out
on the network.

Following set of commands illustrates one such example -
    a) ip -4 addr add 192.168.1.1/24 dev eth1
    b) ip -4 rule add from all iif eth1 lookup 250
    c) ip -4 route add local 0/0 dev lo proto kernel scope host table 250
    d) arp -Ds 192.168.1.100 eth1
    e) arp -Ds 192.168.1.200 eth1
    f) sysctl -w net.ipv4.ip_nonlocal_bind=1
    g) sysctl -w net.ipv4.conf.all.accept_local=1
    # Assuming that the machine has 8 cores
    h) taskset 000f netserver -L 192.168.1.200
    i) taskset 00f0 netperf -t TCP_CRR -L 192.168.1.100 -H 192.168.1.200 -l 30

Signed-off-by: Mahesh Bandewar <maheshb@google.com>
Acked-by: Ben Hutchings <bhutchings@solarflare.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

Showing 3 changed files with 5 additions and 3 deletions Inline Diff

drivers/net/loopback.c
1 /* 1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX 2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket 3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level. 4 * interface as the means of communication with the user level.
5 * 5 *
6 * Pseudo-driver for the loopback interface. 6 * Pseudo-driver for the loopback interface.
7 * 7 *
8 * Version: @(#)loopback.c 1.0.4b 08/16/93 8 * Version: @(#)loopback.c 1.0.4b 08/16/93
9 * 9 *
10 * Authors: Ross Biro 10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Donald Becker, <becker@scyld.com> 12 * Donald Becker, <becker@scyld.com>
13 * 13 *
14 * Alan Cox : Fixed oddments for NET3.014 14 * Alan Cox : Fixed oddments for NET3.014
15 * Alan Cox : Rejig for NET3.029 snap #3 15 * Alan Cox : Rejig for NET3.029 snap #3
16 * Alan Cox : Fixed NET3.029 bugs and sped up 16 * Alan Cox : Fixed NET3.029 bugs and sped up
17 * Larry McVoy : Tiny tweak to double performance 17 * Larry McVoy : Tiny tweak to double performance
18 * Alan Cox : Backed out LMV's tweak - the linux mm 18 * Alan Cox : Backed out LMV's tweak - the linux mm
19 * can't take it... 19 * can't take it...
20 * Michael Griffith: Don't bother computing the checksums 20 * Michael Griffith: Don't bother computing the checksums
21 * on packets received on the loopback 21 * on packets received on the loopback
22 * interface. 22 * interface.
23 * Alexey Kuznetsov: Potential hang under some extreme 23 * Alexey Kuznetsov: Potential hang under some extreme
24 * cases removed. 24 * cases removed.
25 * 25 *
26 * This program is free software; you can redistribute it and/or 26 * This program is free software; you can redistribute it and/or
27 * modify it under the terms of the GNU General Public License 27 * modify it under the terms of the GNU General Public License
28 * as published by the Free Software Foundation; either version 28 * as published by the Free Software Foundation; either version
29 * 2 of the License, or (at your option) any later version. 29 * 2 of the License, or (at your option) any later version.
30 */ 30 */
31 #include <linux/kernel.h> 31 #include <linux/kernel.h>
32 #include <linux/jiffies.h> 32 #include <linux/jiffies.h>
33 #include <linux/module.h> 33 #include <linux/module.h>
34 #include <linux/interrupt.h> 34 #include <linux/interrupt.h>
35 #include <linux/fs.h> 35 #include <linux/fs.h>
36 #include <linux/types.h> 36 #include <linux/types.h>
37 #include <linux/string.h> 37 #include <linux/string.h>
38 #include <linux/socket.h> 38 #include <linux/socket.h>
39 #include <linux/errno.h> 39 #include <linux/errno.h>
40 #include <linux/fcntl.h> 40 #include <linux/fcntl.h>
41 #include <linux/in.h> 41 #include <linux/in.h>
42 #include <linux/init.h> 42 #include <linux/init.h>
43 43
44 #include <asm/system.h> 44 #include <asm/system.h>
45 #include <asm/uaccess.h> 45 #include <asm/uaccess.h>
46 #include <asm/io.h> 46 #include <asm/io.h>
47 47
48 #include <linux/inet.h> 48 #include <linux/inet.h>
49 #include <linux/netdevice.h> 49 #include <linux/netdevice.h>
50 #include <linux/etherdevice.h> 50 #include <linux/etherdevice.h>
51 #include <linux/skbuff.h> 51 #include <linux/skbuff.h>
52 #include <linux/ethtool.h> 52 #include <linux/ethtool.h>
53 #include <net/sock.h> 53 #include <net/sock.h>
54 #include <net/checksum.h> 54 #include <net/checksum.h>
55 #include <linux/if_ether.h> /* For the statistics structure. */ 55 #include <linux/if_ether.h> /* For the statistics structure. */
56 #include <linux/if_arp.h> /* For ARPHRD_ETHER */ 56 #include <linux/if_arp.h> /* For ARPHRD_ETHER */
57 #include <linux/ip.h> 57 #include <linux/ip.h>
58 #include <linux/tcp.h> 58 #include <linux/tcp.h>
59 #include <linux/percpu.h> 59 #include <linux/percpu.h>
60 #include <net/net_namespace.h> 60 #include <net/net_namespace.h>
61 #include <linux/u64_stats_sync.h> 61 #include <linux/u64_stats_sync.h>
62 62
63 struct pcpu_lstats { 63 struct pcpu_lstats {
64 u64 packets; 64 u64 packets;
65 u64 bytes; 65 u64 bytes;
66 struct u64_stats_sync syncp; 66 struct u64_stats_sync syncp;
67 }; 67 };
68 68
69 /* 69 /*
70 * The higher levels take care of making this non-reentrant (it's 70 * The higher levels take care of making this non-reentrant (it's
71 * called with bh's disabled). 71 * called with bh's disabled).
72 */ 72 */
73 static netdev_tx_t loopback_xmit(struct sk_buff *skb, 73 static netdev_tx_t loopback_xmit(struct sk_buff *skb,
74 struct net_device *dev) 74 struct net_device *dev)
75 { 75 {
76 struct pcpu_lstats *lb_stats; 76 struct pcpu_lstats *lb_stats;
77 int len; 77 int len;
78 78
79 skb_orphan(skb); 79 skb_orphan(skb);
80 80
81 skb->protocol = eth_type_trans(skb, dev); 81 skb->protocol = eth_type_trans(skb, dev);
82 82
83 /* it's OK to use per_cpu_ptr() because BHs are off */ 83 /* it's OK to use per_cpu_ptr() because BHs are off */
84 lb_stats = this_cpu_ptr(dev->lstats); 84 lb_stats = this_cpu_ptr(dev->lstats);
85 85
86 len = skb->len; 86 len = skb->len;
87 if (likely(netif_rx(skb) == NET_RX_SUCCESS)) { 87 if (likely(netif_rx(skb) == NET_RX_SUCCESS)) {
88 u64_stats_update_begin(&lb_stats->syncp); 88 u64_stats_update_begin(&lb_stats->syncp);
89 lb_stats->bytes += len; 89 lb_stats->bytes += len;
90 lb_stats->packets++; 90 lb_stats->packets++;
91 u64_stats_update_end(&lb_stats->syncp); 91 u64_stats_update_end(&lb_stats->syncp);
92 } 92 }
93 93
94 return NETDEV_TX_OK; 94 return NETDEV_TX_OK;
95 } 95 }
96 96
97 static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev, 97 static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev,
98 struct rtnl_link_stats64 *stats) 98 struct rtnl_link_stats64 *stats)
99 { 99 {
100 u64 bytes = 0; 100 u64 bytes = 0;
101 u64 packets = 0; 101 u64 packets = 0;
102 int i; 102 int i;
103 103
104 for_each_possible_cpu(i) { 104 for_each_possible_cpu(i) {
105 const struct pcpu_lstats *lb_stats; 105 const struct pcpu_lstats *lb_stats;
106 u64 tbytes, tpackets; 106 u64 tbytes, tpackets;
107 unsigned int start; 107 unsigned int start;
108 108
109 lb_stats = per_cpu_ptr(dev->lstats, i); 109 lb_stats = per_cpu_ptr(dev->lstats, i);
110 do { 110 do {
111 start = u64_stats_fetch_begin(&lb_stats->syncp); 111 start = u64_stats_fetch_begin(&lb_stats->syncp);
112 tbytes = lb_stats->bytes; 112 tbytes = lb_stats->bytes;
113 tpackets = lb_stats->packets; 113 tpackets = lb_stats->packets;
114 } while (u64_stats_fetch_retry(&lb_stats->syncp, start)); 114 } while (u64_stats_fetch_retry(&lb_stats->syncp, start));
115 bytes += tbytes; 115 bytes += tbytes;
116 packets += tpackets; 116 packets += tpackets;
117 } 117 }
118 stats->rx_packets = packets; 118 stats->rx_packets = packets;
119 stats->tx_packets = packets; 119 stats->tx_packets = packets;
120 stats->rx_bytes = bytes; 120 stats->rx_bytes = bytes;
121 stats->tx_bytes = bytes; 121 stats->tx_bytes = bytes;
122 return stats; 122 return stats;
123 } 123 }
124 124
125 static u32 always_on(struct net_device *dev) 125 static u32 always_on(struct net_device *dev)
126 { 126 {
127 return 1; 127 return 1;
128 } 128 }
129 129
130 static const struct ethtool_ops loopback_ethtool_ops = { 130 static const struct ethtool_ops loopback_ethtool_ops = {
131 .get_link = always_on, 131 .get_link = always_on,
132 }; 132 };
133 133
134 static int loopback_dev_init(struct net_device *dev) 134 static int loopback_dev_init(struct net_device *dev)
135 { 135 {
136 dev->lstats = alloc_percpu(struct pcpu_lstats); 136 dev->lstats = alloc_percpu(struct pcpu_lstats);
137 if (!dev->lstats) 137 if (!dev->lstats)
138 return -ENOMEM; 138 return -ENOMEM;
139 139
140 return 0; 140 return 0;
141 } 141 }
142 142
143 static void loopback_dev_free(struct net_device *dev) 143 static void loopback_dev_free(struct net_device *dev)
144 { 144 {
145 free_percpu(dev->lstats); 145 free_percpu(dev->lstats);
146 free_netdev(dev); 146 free_netdev(dev);
147 } 147 }
148 148
149 static const struct net_device_ops loopback_ops = { 149 static const struct net_device_ops loopback_ops = {
150 .ndo_init = loopback_dev_init, 150 .ndo_init = loopback_dev_init,
151 .ndo_start_xmit= loopback_xmit, 151 .ndo_start_xmit= loopback_xmit,
152 .ndo_get_stats64 = loopback_get_stats64, 152 .ndo_get_stats64 = loopback_get_stats64,
153 }; 153 };
154 154
155 /* 155 /*
156 * The loopback device is special. There is only one instance 156 * The loopback device is special. There is only one instance
157 * per network namespace. 157 * per network namespace.
158 */ 158 */
159 static void loopback_setup(struct net_device *dev) 159 static void loopback_setup(struct net_device *dev)
160 { 160 {
161 dev->mtu = (16 * 1024) + 20 + 20 + 12; 161 dev->mtu = (16 * 1024) + 20 + 20 + 12;
162 dev->hard_header_len = ETH_HLEN; /* 14 */ 162 dev->hard_header_len = ETH_HLEN; /* 14 */
163 dev->addr_len = ETH_ALEN; /* 6 */ 163 dev->addr_len = ETH_ALEN; /* 6 */
164 dev->tx_queue_len = 0; 164 dev->tx_queue_len = 0;
165 dev->type = ARPHRD_LOOPBACK; /* 0x0001*/ 165 dev->type = ARPHRD_LOOPBACK; /* 0x0001*/
166 dev->flags = IFF_LOOPBACK; 166 dev->flags = IFF_LOOPBACK;
167 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 167 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
168 dev->hw_features = NETIF_F_ALL_TSO | NETIF_F_UFO; 168 dev->hw_features = NETIF_F_ALL_TSO | NETIF_F_UFO;
169 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST 169 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
170 | NETIF_F_ALL_TSO 170 | NETIF_F_ALL_TSO
171 | NETIF_F_UFO 171 | NETIF_F_UFO
172 | NETIF_F_NO_CSUM 172 | NETIF_F_NO_CSUM
173 | NETIF_F_RXCSUM 173 | NETIF_F_RXCSUM
174 | NETIF_F_HIGHDMA 174 | NETIF_F_HIGHDMA
175 | NETIF_F_LLTX 175 | NETIF_F_LLTX
176 | NETIF_F_NETNS_LOCAL 176 | NETIF_F_NETNS_LOCAL
177 | NETIF_F_VLAN_CHALLENGED; 177 | NETIF_F_VLAN_CHALLENGED
178 | NETIF_F_LOOPBACK;
178 dev->ethtool_ops = &loopback_ethtool_ops; 179 dev->ethtool_ops = &loopback_ethtool_ops;
179 dev->header_ops = &eth_header_ops; 180 dev->header_ops = &eth_header_ops;
180 dev->netdev_ops = &loopback_ops; 181 dev->netdev_ops = &loopback_ops;
181 dev->destructor = loopback_dev_free; 182 dev->destructor = loopback_dev_free;
182 } 183 }
183 184
184 /* Setup and register the loopback device. */ 185 /* Setup and register the loopback device. */
185 static __net_init int loopback_net_init(struct net *net) 186 static __net_init int loopback_net_init(struct net *net)
186 { 187 {
187 struct net_device *dev; 188 struct net_device *dev;
188 int err; 189 int err;
189 190
190 err = -ENOMEM; 191 err = -ENOMEM;
191 dev = alloc_netdev(0, "lo", loopback_setup); 192 dev = alloc_netdev(0, "lo", loopback_setup);
192 if (!dev) 193 if (!dev)
193 goto out; 194 goto out;
194 195
195 dev_net_set(dev, net); 196 dev_net_set(dev, net);
196 err = register_netdev(dev); 197 err = register_netdev(dev);
197 if (err) 198 if (err)
198 goto out_free_netdev; 199 goto out_free_netdev;
199 200
200 net->loopback_dev = dev; 201 net->loopback_dev = dev;
201 return 0; 202 return 0;
202 203
203 204
204 out_free_netdev: 205 out_free_netdev:
205 free_netdev(dev); 206 free_netdev(dev);
206 out: 207 out:
207 if (net_eq(net, &init_net)) 208 if (net_eq(net, &init_net))
208 panic("loopback: Failed to register netdevice: %d\n", err); 209 panic("loopback: Failed to register netdevice: %d\n", err);
209 return err; 210 return err;
210 } 211 }
211 212
212 /* Registered in net/core/dev.c */ 213 /* Registered in net/core/dev.c */
213 struct pernet_operations __net_initdata loopback_net_ops = { 214 struct pernet_operations __net_initdata loopback_net_ops = {
214 .init = loopback_net_init, 215 .init = loopback_net_init,
215 }; 216 };
216 217
include/linux/netdevice.h
1 /* 1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX 2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket 3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level. 4 * interface as the means of communication with the user level.
5 * 5 *
6 * Definitions for the Interfaces handler. 6 * Definitions for the Interfaces handler.
7 * 7 *
8 * Version: @(#)dev.h 1.0.10 08/12/93 8 * Version: @(#)dev.h 1.0.10 08/12/93
9 * 9 *
10 * Authors: Ross Biro 10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net> 12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov> 13 * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
14 * Alan Cox, <alan@lxorguk.ukuu.org.uk> 14 * Alan Cox, <alan@lxorguk.ukuu.org.uk>
15 * Bjorn Ekwall. <bj0rn@blox.se> 15 * Bjorn Ekwall. <bj0rn@blox.se>
16 * Pekka Riikonen <priikone@poseidon.pspt.fi> 16 * Pekka Riikonen <priikone@poseidon.pspt.fi>
17 * 17 *
18 * This program is free software; you can redistribute it and/or 18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License 19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version 20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version. 21 * 2 of the License, or (at your option) any later version.
22 * 22 *
23 * Moved to /usr/include/linux for NET3 23 * Moved to /usr/include/linux for NET3
24 */ 24 */
25 #ifndef _LINUX_NETDEVICE_H 25 #ifndef _LINUX_NETDEVICE_H
26 #define _LINUX_NETDEVICE_H 26 #define _LINUX_NETDEVICE_H
27 27
28 #include <linux/if.h> 28 #include <linux/if.h>
29 #include <linux/if_ether.h> 29 #include <linux/if_ether.h>
30 #include <linux/if_packet.h> 30 #include <linux/if_packet.h>
31 #include <linux/if_link.h> 31 #include <linux/if_link.h>
32 32
33 #ifdef __KERNEL__ 33 #ifdef __KERNEL__
34 #include <linux/pm_qos_params.h> 34 #include <linux/pm_qos_params.h>
35 #include <linux/timer.h> 35 #include <linux/timer.h>
36 #include <linux/delay.h> 36 #include <linux/delay.h>
37 #include <linux/mm.h> 37 #include <linux/mm.h>
38 #include <asm/atomic.h> 38 #include <asm/atomic.h>
39 #include <asm/cache.h> 39 #include <asm/cache.h>
40 #include <asm/byteorder.h> 40 #include <asm/byteorder.h>
41 41
42 #include <linux/device.h> 42 #include <linux/device.h>
43 #include <linux/percpu.h> 43 #include <linux/percpu.h>
44 #include <linux/rculist.h> 44 #include <linux/rculist.h>
45 #include <linux/dmaengine.h> 45 #include <linux/dmaengine.h>
46 #include <linux/workqueue.h> 46 #include <linux/workqueue.h>
47 47
48 #include <linux/ethtool.h> 48 #include <linux/ethtool.h>
49 #include <net/net_namespace.h> 49 #include <net/net_namespace.h>
50 #include <net/dsa.h> 50 #include <net/dsa.h>
51 #ifdef CONFIG_DCB 51 #ifdef CONFIG_DCB
52 #include <net/dcbnl.h> 52 #include <net/dcbnl.h>
53 #endif 53 #endif
54 54
55 struct vlan_group; 55 struct vlan_group;
56 struct netpoll_info; 56 struct netpoll_info;
57 struct phy_device; 57 struct phy_device;
58 /* 802.11 specific */ 58 /* 802.11 specific */
59 struct wireless_dev; 59 struct wireless_dev;
60 /* source back-compat hooks */ 60 /* source back-compat hooks */
61 #define SET_ETHTOOL_OPS(netdev,ops) \ 61 #define SET_ETHTOOL_OPS(netdev,ops) \
62 ( (netdev)->ethtool_ops = (ops) ) 62 ( (netdev)->ethtool_ops = (ops) )
63 63
64 #define HAVE_ALLOC_NETDEV /* feature macro: alloc_xxxdev 64 #define HAVE_ALLOC_NETDEV /* feature macro: alloc_xxxdev
65 functions are available. */ 65 functions are available. */
66 #define HAVE_FREE_NETDEV /* free_netdev() */ 66 #define HAVE_FREE_NETDEV /* free_netdev() */
67 #define HAVE_NETDEV_PRIV /* netdev_priv() */ 67 #define HAVE_NETDEV_PRIV /* netdev_priv() */
68 68
69 /* hardware address assignment types */ 69 /* hardware address assignment types */
70 #define NET_ADDR_PERM 0 /* address is permanent (default) */ 70 #define NET_ADDR_PERM 0 /* address is permanent (default) */
71 #define NET_ADDR_RANDOM 1 /* address is generated randomly */ 71 #define NET_ADDR_RANDOM 1 /* address is generated randomly */
72 #define NET_ADDR_STOLEN 2 /* address is stolen from other device */ 72 #define NET_ADDR_STOLEN 2 /* address is stolen from other device */
73 73
74 /* Backlog congestion levels */ 74 /* Backlog congestion levels */
75 #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ 75 #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
76 #define NET_RX_DROP 1 /* packet dropped */ 76 #define NET_RX_DROP 1 /* packet dropped */
77 77
78 /* 78 /*
79 * Transmit return codes: transmit return codes originate from three different 79 * Transmit return codes: transmit return codes originate from three different
80 * namespaces: 80 * namespaces:
81 * 81 *
82 * - qdisc return codes 82 * - qdisc return codes
83 * - driver transmit return codes 83 * - driver transmit return codes
84 * - errno values 84 * - errno values
85 * 85 *
86 * Drivers are allowed to return any one of those in their hard_start_xmit() 86 * Drivers are allowed to return any one of those in their hard_start_xmit()
87 * function. Real network devices commonly used with qdiscs should only return 87 * function. Real network devices commonly used with qdiscs should only return
88 * the driver transmit return codes though - when qdiscs are used, the actual 88 * the driver transmit return codes though - when qdiscs are used, the actual
89 * transmission happens asynchronously, so the value is not propagated to 89 * transmission happens asynchronously, so the value is not propagated to
90 * higher layers. Virtual network devices transmit synchronously, in this case 90 * higher layers. Virtual network devices transmit synchronously, in this case
91 * the driver transmit return codes are consumed by dev_queue_xmit(), all 91 * the driver transmit return codes are consumed by dev_queue_xmit(), all
92 * others are propagated to higher layers. 92 * others are propagated to higher layers.
93 */ 93 */
94 94
95 /* qdisc ->enqueue() return codes. */ 95 /* qdisc ->enqueue() return codes. */
96 #define NET_XMIT_SUCCESS 0x00 96 #define NET_XMIT_SUCCESS 0x00
97 #define NET_XMIT_DROP 0x01 /* skb dropped */ 97 #define NET_XMIT_DROP 0x01 /* skb dropped */
98 #define NET_XMIT_CN 0x02 /* congestion notification */ 98 #define NET_XMIT_CN 0x02 /* congestion notification */
99 #define NET_XMIT_POLICED 0x03 /* skb is shot by police */ 99 #define NET_XMIT_POLICED 0x03 /* skb is shot by police */
100 #define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */ 100 #define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */
101 101
102 /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It 102 /* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
103 * indicates that the device will soon be dropping packets, or already drops 103 * indicates that the device will soon be dropping packets, or already drops
104 * some packets of the same priority; prompting us to send less aggressively. */ 104 * some packets of the same priority; prompting us to send less aggressively. */
105 #define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e)) 105 #define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
106 #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0) 106 #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
107 107
108 /* Driver transmit return codes */ 108 /* Driver transmit return codes */
109 #define NETDEV_TX_MASK 0xf0 109 #define NETDEV_TX_MASK 0xf0
110 110
111 enum netdev_tx { 111 enum netdev_tx {
112 __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */ 112 __NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */
113 NETDEV_TX_OK = 0x00, /* driver took care of packet */ 113 NETDEV_TX_OK = 0x00, /* driver took care of packet */
114 NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/ 114 NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/
115 NETDEV_TX_LOCKED = 0x20, /* driver tx lock was already taken */ 115 NETDEV_TX_LOCKED = 0x20, /* driver tx lock was already taken */
116 }; 116 };
117 typedef enum netdev_tx netdev_tx_t; 117 typedef enum netdev_tx netdev_tx_t;
118 118
119 /* 119 /*
120 * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant; 120 * Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
121 * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed. 121 * hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
122 */ 122 */
123 static inline bool dev_xmit_complete(int rc) 123 static inline bool dev_xmit_complete(int rc)
124 { 124 {
125 /* 125 /*
126 * Positive cases with an skb consumed by a driver: 126 * Positive cases with an skb consumed by a driver:
127 * - successful transmission (rc == NETDEV_TX_OK) 127 * - successful transmission (rc == NETDEV_TX_OK)
128 * - error while transmitting (rc < 0) 128 * - error while transmitting (rc < 0)
129 * - error while queueing to a different device (rc & NET_XMIT_MASK) 129 * - error while queueing to a different device (rc & NET_XMIT_MASK)
130 */ 130 */
131 if (likely(rc < NET_XMIT_MASK)) 131 if (likely(rc < NET_XMIT_MASK))
132 return true; 132 return true;
133 133
134 return false; 134 return false;
135 } 135 }
136 136
137 #endif 137 #endif
138 138
139 #define MAX_ADDR_LEN 32 /* Largest hardware address length */ 139 #define MAX_ADDR_LEN 32 /* Largest hardware address length */
140 140
141 /* Initial net device group. All devices belong to group 0 by default. */ 141 /* Initial net device group. All devices belong to group 0 by default. */
142 #define INIT_NETDEV_GROUP 0 142 #define INIT_NETDEV_GROUP 0
143 143
144 #ifdef __KERNEL__ 144 #ifdef __KERNEL__
145 /* 145 /*
146 * Compute the worst case header length according to the protocols 146 * Compute the worst case header length according to the protocols
147 * used. 147 * used.
148 */ 148 */
149 149
150 #if defined(CONFIG_WLAN) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) 150 #if defined(CONFIG_WLAN) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
151 # if defined(CONFIG_MAC80211_MESH) 151 # if defined(CONFIG_MAC80211_MESH)
152 # define LL_MAX_HEADER 128 152 # define LL_MAX_HEADER 128
153 # else 153 # else
154 # define LL_MAX_HEADER 96 154 # define LL_MAX_HEADER 96
155 # endif 155 # endif
156 #elif defined(CONFIG_TR) || defined(CONFIG_TR_MODULE) 156 #elif defined(CONFIG_TR) || defined(CONFIG_TR_MODULE)
157 # define LL_MAX_HEADER 48 157 # define LL_MAX_HEADER 48
158 #else 158 #else
159 # define LL_MAX_HEADER 32 159 # define LL_MAX_HEADER 32
160 #endif 160 #endif
161 161
162 #if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \ 162 #if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \
163 !defined(CONFIG_NET_IPGRE) && !defined(CONFIG_NET_IPGRE_MODULE) && \ 163 !defined(CONFIG_NET_IPGRE) && !defined(CONFIG_NET_IPGRE_MODULE) && \
164 !defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \ 164 !defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \
165 !defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE) 165 !defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE)
166 #define MAX_HEADER LL_MAX_HEADER 166 #define MAX_HEADER LL_MAX_HEADER
167 #else 167 #else
168 #define MAX_HEADER (LL_MAX_HEADER + 48) 168 #define MAX_HEADER (LL_MAX_HEADER + 48)
169 #endif 169 #endif
170 170
171 /* 171 /*
172 * Old network device statistics. Fields are native words 172 * Old network device statistics. Fields are native words
173 * (unsigned long) so they can be read and written atomically. 173 * (unsigned long) so they can be read and written atomically.
174 */ 174 */
175 175
176 struct net_device_stats { 176 struct net_device_stats {
177 unsigned long rx_packets; 177 unsigned long rx_packets;
178 unsigned long tx_packets; 178 unsigned long tx_packets;
179 unsigned long rx_bytes; 179 unsigned long rx_bytes;
180 unsigned long tx_bytes; 180 unsigned long tx_bytes;
181 unsigned long rx_errors; 181 unsigned long rx_errors;
182 unsigned long tx_errors; 182 unsigned long tx_errors;
183 unsigned long rx_dropped; 183 unsigned long rx_dropped;
184 unsigned long tx_dropped; 184 unsigned long tx_dropped;
185 unsigned long multicast; 185 unsigned long multicast;
186 unsigned long collisions; 186 unsigned long collisions;
187 unsigned long rx_length_errors; 187 unsigned long rx_length_errors;
188 unsigned long rx_over_errors; 188 unsigned long rx_over_errors;
189 unsigned long rx_crc_errors; 189 unsigned long rx_crc_errors;
190 unsigned long rx_frame_errors; 190 unsigned long rx_frame_errors;
191 unsigned long rx_fifo_errors; 191 unsigned long rx_fifo_errors;
192 unsigned long rx_missed_errors; 192 unsigned long rx_missed_errors;
193 unsigned long tx_aborted_errors; 193 unsigned long tx_aborted_errors;
194 unsigned long tx_carrier_errors; 194 unsigned long tx_carrier_errors;
195 unsigned long tx_fifo_errors; 195 unsigned long tx_fifo_errors;
196 unsigned long tx_heartbeat_errors; 196 unsigned long tx_heartbeat_errors;
197 unsigned long tx_window_errors; 197 unsigned long tx_window_errors;
198 unsigned long rx_compressed; 198 unsigned long rx_compressed;
199 unsigned long tx_compressed; 199 unsigned long tx_compressed;
200 }; 200 };
201 201
202 #endif /* __KERNEL__ */ 202 #endif /* __KERNEL__ */
203 203
204 204
205 /* Media selection options. */ 205 /* Media selection options. */
206 enum { 206 enum {
207 IF_PORT_UNKNOWN = 0, 207 IF_PORT_UNKNOWN = 0,
208 IF_PORT_10BASE2, 208 IF_PORT_10BASE2,
209 IF_PORT_10BASET, 209 IF_PORT_10BASET,
210 IF_PORT_AUI, 210 IF_PORT_AUI,
211 IF_PORT_100BASET, 211 IF_PORT_100BASET,
212 IF_PORT_100BASETX, 212 IF_PORT_100BASETX,
213 IF_PORT_100BASEFX 213 IF_PORT_100BASEFX
214 }; 214 };
215 215
216 #ifdef __KERNEL__ 216 #ifdef __KERNEL__
217 217
218 #include <linux/cache.h> 218 #include <linux/cache.h>
219 #include <linux/skbuff.h> 219 #include <linux/skbuff.h>
220 220
221 struct neighbour; 221 struct neighbour;
222 struct neigh_parms; 222 struct neigh_parms;
223 struct sk_buff; 223 struct sk_buff;
224 224
225 struct netdev_hw_addr { 225 struct netdev_hw_addr {
226 struct list_head list; 226 struct list_head list;
227 unsigned char addr[MAX_ADDR_LEN]; 227 unsigned char addr[MAX_ADDR_LEN];
228 unsigned char type; 228 unsigned char type;
229 #define NETDEV_HW_ADDR_T_LAN 1 229 #define NETDEV_HW_ADDR_T_LAN 1
230 #define NETDEV_HW_ADDR_T_SAN 2 230 #define NETDEV_HW_ADDR_T_SAN 2
231 #define NETDEV_HW_ADDR_T_SLAVE 3 231 #define NETDEV_HW_ADDR_T_SLAVE 3
232 #define NETDEV_HW_ADDR_T_UNICAST 4 232 #define NETDEV_HW_ADDR_T_UNICAST 4
233 #define NETDEV_HW_ADDR_T_MULTICAST 5 233 #define NETDEV_HW_ADDR_T_MULTICAST 5
234 bool synced; 234 bool synced;
235 bool global_use; 235 bool global_use;
236 int refcount; 236 int refcount;
237 struct rcu_head rcu_head; 237 struct rcu_head rcu_head;
238 }; 238 };
239 239
240 struct netdev_hw_addr_list { 240 struct netdev_hw_addr_list {
241 struct list_head list; 241 struct list_head list;
242 int count; 242 int count;
243 }; 243 };
244 244
245 #define netdev_hw_addr_list_count(l) ((l)->count) 245 #define netdev_hw_addr_list_count(l) ((l)->count)
246 #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0) 246 #define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
247 #define netdev_hw_addr_list_for_each(ha, l) \ 247 #define netdev_hw_addr_list_for_each(ha, l) \
248 list_for_each_entry(ha, &(l)->list, list) 248 list_for_each_entry(ha, &(l)->list, list)
249 249
250 #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc) 250 #define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
251 #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc) 251 #define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
252 #define netdev_for_each_uc_addr(ha, dev) \ 252 #define netdev_for_each_uc_addr(ha, dev) \
253 netdev_hw_addr_list_for_each(ha, &(dev)->uc) 253 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
254 254
255 #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc) 255 #define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
256 #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc) 256 #define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
257 #define netdev_for_each_mc_addr(ha, dev) \ 257 #define netdev_for_each_mc_addr(ha, dev) \
258 netdev_hw_addr_list_for_each(ha, &(dev)->mc) 258 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
259 259
260 struct hh_cache { 260 struct hh_cache {
261 struct hh_cache *hh_next; /* Next entry */ 261 struct hh_cache *hh_next; /* Next entry */
262 atomic_t hh_refcnt; /* number of users */ 262 atomic_t hh_refcnt; /* number of users */
263 /* 263 /*
264 * We want hh_output, hh_len, hh_lock and hh_data be a in a separate 264 * We want hh_output, hh_len, hh_lock and hh_data be a in a separate
265 * cache line on SMP. 265 * cache line on SMP.
266 * They are mostly read, but hh_refcnt may be changed quite frequently, 266 * They are mostly read, but hh_refcnt may be changed quite frequently,
267 * incurring cache line ping pongs. 267 * incurring cache line ping pongs.
268 */ 268 */
269 __be16 hh_type ____cacheline_aligned_in_smp; 269 __be16 hh_type ____cacheline_aligned_in_smp;
270 /* protocol identifier, f.e ETH_P_IP 270 /* protocol identifier, f.e ETH_P_IP
271 * NOTE: For VLANs, this will be the 271 * NOTE: For VLANs, this will be the
272 * encapuslated type. --BLG 272 * encapuslated type. --BLG
273 */ 273 */
274 u16 hh_len; /* length of header */ 274 u16 hh_len; /* length of header */
275 int (*hh_output)(struct sk_buff *skb); 275 int (*hh_output)(struct sk_buff *skb);
276 seqlock_t hh_lock; 276 seqlock_t hh_lock;
277 277
278 /* cached hardware header; allow for machine alignment needs. */ 278 /* cached hardware header; allow for machine alignment needs. */
279 #define HH_DATA_MOD 16 279 #define HH_DATA_MOD 16
280 #define HH_DATA_OFF(__len) \ 280 #define HH_DATA_OFF(__len) \
281 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1)) 281 (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
282 #define HH_DATA_ALIGN(__len) \ 282 #define HH_DATA_ALIGN(__len) \
283 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1)) 283 (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
284 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)]; 284 unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
285 }; 285 };
286 286
287 static inline void hh_cache_put(struct hh_cache *hh) 287 static inline void hh_cache_put(struct hh_cache *hh)
288 { 288 {
289 if (atomic_dec_and_test(&hh->hh_refcnt)) 289 if (atomic_dec_and_test(&hh->hh_refcnt))
290 kfree(hh); 290 kfree(hh);
291 } 291 }
292 292
293 /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much. 293 /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
294 * Alternative is: 294 * Alternative is:
295 * dev->hard_header_len ? (dev->hard_header_len + 295 * dev->hard_header_len ? (dev->hard_header_len +
296 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0 296 * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
297 * 297 *
298 * We could use other alignment values, but we must maintain the 298 * We could use other alignment values, but we must maintain the
299 * relationship HH alignment <= LL alignment. 299 * relationship HH alignment <= LL alignment.
300 * 300 *
301 * LL_ALLOCATED_SPACE also takes into account the tailroom the device 301 * LL_ALLOCATED_SPACE also takes into account the tailroom the device
302 * may need. 302 * may need.
303 */ 303 */
304 #define LL_RESERVED_SPACE(dev) \ 304 #define LL_RESERVED_SPACE(dev) \
305 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) 305 ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
306 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \ 306 #define LL_RESERVED_SPACE_EXTRA(dev,extra) \
307 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) 307 ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
308 #define LL_ALLOCATED_SPACE(dev) \ 308 #define LL_ALLOCATED_SPACE(dev) \
309 ((((dev)->hard_header_len+(dev)->needed_headroom+(dev)->needed_tailroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) 309 ((((dev)->hard_header_len+(dev)->needed_headroom+(dev)->needed_tailroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
310 310
311 struct header_ops { 311 struct header_ops {
312 int (*create) (struct sk_buff *skb, struct net_device *dev, 312 int (*create) (struct sk_buff *skb, struct net_device *dev,
313 unsigned short type, const void *daddr, 313 unsigned short type, const void *daddr,
314 const void *saddr, unsigned len); 314 const void *saddr, unsigned len);
315 int (*parse)(const struct sk_buff *skb, unsigned char *haddr); 315 int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
316 int (*rebuild)(struct sk_buff *skb); 316 int (*rebuild)(struct sk_buff *skb);
317 #define HAVE_HEADER_CACHE 317 #define HAVE_HEADER_CACHE
318 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh); 318 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh);
319 void (*cache_update)(struct hh_cache *hh, 319 void (*cache_update)(struct hh_cache *hh,
320 const struct net_device *dev, 320 const struct net_device *dev,
321 const unsigned char *haddr); 321 const unsigned char *haddr);
322 }; 322 };
323 323
324 /* These flag bits are private to the generic network queueing 324 /* These flag bits are private to the generic network queueing
325 * layer, they may not be explicitly referenced by any other 325 * layer, they may not be explicitly referenced by any other
326 * code. 326 * code.
327 */ 327 */
328 328
329 enum netdev_state_t { 329 enum netdev_state_t {
330 __LINK_STATE_START, 330 __LINK_STATE_START,
331 __LINK_STATE_PRESENT, 331 __LINK_STATE_PRESENT,
332 __LINK_STATE_NOCARRIER, 332 __LINK_STATE_NOCARRIER,
333 __LINK_STATE_LINKWATCH_PENDING, 333 __LINK_STATE_LINKWATCH_PENDING,
334 __LINK_STATE_DORMANT, 334 __LINK_STATE_DORMANT,
335 }; 335 };
336 336
337 337
338 /* 338 /*
339 * This structure holds at boot time configured netdevice settings. They 339 * This structure holds at boot time configured netdevice settings. They
340 * are then used in the device probing. 340 * are then used in the device probing.
341 */ 341 */
342 struct netdev_boot_setup { 342 struct netdev_boot_setup {
343 char name[IFNAMSIZ]; 343 char name[IFNAMSIZ];
344 struct ifmap map; 344 struct ifmap map;
345 }; 345 };
346 #define NETDEV_BOOT_SETUP_MAX 8 346 #define NETDEV_BOOT_SETUP_MAX 8
347 347
348 extern int __init netdev_boot_setup(char *str); 348 extern int __init netdev_boot_setup(char *str);
349 349
350 /* 350 /*
351 * Structure for NAPI scheduling similar to tasklet but with weighting 351 * Structure for NAPI scheduling similar to tasklet but with weighting
352 */ 352 */
353 struct napi_struct { 353 struct napi_struct {
354 /* The poll_list must only be managed by the entity which 354 /* The poll_list must only be managed by the entity which
355 * changes the state of the NAPI_STATE_SCHED bit. This means 355 * changes the state of the NAPI_STATE_SCHED bit. This means
356 * whoever atomically sets that bit can add this napi_struct 356 * whoever atomically sets that bit can add this napi_struct
357 * to the per-cpu poll_list, and whoever clears that bit 357 * to the per-cpu poll_list, and whoever clears that bit
358 * can remove from the list right before clearing the bit. 358 * can remove from the list right before clearing the bit.
359 */ 359 */
360 struct list_head poll_list; 360 struct list_head poll_list;
361 361
362 unsigned long state; 362 unsigned long state;
363 int weight; 363 int weight;
364 int (*poll)(struct napi_struct *, int); 364 int (*poll)(struct napi_struct *, int);
365 #ifdef CONFIG_NETPOLL 365 #ifdef CONFIG_NETPOLL
366 spinlock_t poll_lock; 366 spinlock_t poll_lock;
367 int poll_owner; 367 int poll_owner;
368 #endif 368 #endif
369 369
370 unsigned int gro_count; 370 unsigned int gro_count;
371 371
372 struct net_device *dev; 372 struct net_device *dev;
373 struct list_head dev_list; 373 struct list_head dev_list;
374 struct sk_buff *gro_list; 374 struct sk_buff *gro_list;
375 struct sk_buff *skb; 375 struct sk_buff *skb;
376 }; 376 };
377 377
378 enum { 378 enum {
379 NAPI_STATE_SCHED, /* Poll is scheduled */ 379 NAPI_STATE_SCHED, /* Poll is scheduled */
380 NAPI_STATE_DISABLE, /* Disable pending */ 380 NAPI_STATE_DISABLE, /* Disable pending */
381 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ 381 NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
382 }; 382 };
383 383
384 enum gro_result { 384 enum gro_result {
385 GRO_MERGED, 385 GRO_MERGED,
386 GRO_MERGED_FREE, 386 GRO_MERGED_FREE,
387 GRO_HELD, 387 GRO_HELD,
388 GRO_NORMAL, 388 GRO_NORMAL,
389 GRO_DROP, 389 GRO_DROP,
390 }; 390 };
391 typedef enum gro_result gro_result_t; 391 typedef enum gro_result gro_result_t;
392 392
393 /* 393 /*
394 * enum rx_handler_result - Possible return values for rx_handlers. 394 * enum rx_handler_result - Possible return values for rx_handlers.
395 * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it 395 * @RX_HANDLER_CONSUMED: skb was consumed by rx_handler, do not process it
396 * further. 396 * further.
397 * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in 397 * @RX_HANDLER_ANOTHER: Do another round in receive path. This is indicated in
398 * case skb->dev was changed by rx_handler. 398 * case skb->dev was changed by rx_handler.
399 * @RX_HANDLER_EXACT: Force exact delivery, no wildcard. 399 * @RX_HANDLER_EXACT: Force exact delivery, no wildcard.
400 * @RX_HANDLER_PASS: Do nothing, passe the skb as if no rx_handler was called. 400 * @RX_HANDLER_PASS: Do nothing, passe the skb as if no rx_handler was called.
401 * 401 *
402 * rx_handlers are functions called from inside __netif_receive_skb(), to do 402 * rx_handlers are functions called from inside __netif_receive_skb(), to do
403 * special processing of the skb, prior to delivery to protocol handlers. 403 * special processing of the skb, prior to delivery to protocol handlers.
404 * 404 *
405 * Currently, a net_device can only have a single rx_handler registered. Trying 405 * Currently, a net_device can only have a single rx_handler registered. Trying
406 * to register a second rx_handler will return -EBUSY. 406 * to register a second rx_handler will return -EBUSY.
407 * 407 *
408 * To register a rx_handler on a net_device, use netdev_rx_handler_register(). 408 * To register a rx_handler on a net_device, use netdev_rx_handler_register().
409 * To unregister a rx_handler on a net_device, use 409 * To unregister a rx_handler on a net_device, use
410 * netdev_rx_handler_unregister(). 410 * netdev_rx_handler_unregister().
411 * 411 *
412 * Upon return, rx_handler is expected to tell __netif_receive_skb() what to 412 * Upon return, rx_handler is expected to tell __netif_receive_skb() what to
413 * do with the skb. 413 * do with the skb.
414 * 414 *
415 * If the rx_handler consumed to skb in some way, it should return 415 * If the rx_handler consumed to skb in some way, it should return
416 * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for 416 * RX_HANDLER_CONSUMED. This is appropriate when the rx_handler arranged for
417 * the skb to be delivered in some other ways. 417 * the skb to be delivered in some other ways.
418 * 418 *
419 * If the rx_handler changed skb->dev, to divert the skb to another 419 * If the rx_handler changed skb->dev, to divert the skb to another
420 * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the 420 * net_device, it should return RX_HANDLER_ANOTHER. The rx_handler for the
421 * new device will be called if it exists. 421 * new device will be called if it exists.
422 * 422 *
423 * If the rx_handler consider the skb should be ignored, it should return 423 * If the rx_handler consider the skb should be ignored, it should return
424 * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that 424 * RX_HANDLER_EXACT. The skb will only be delivered to protocol handlers that
425 * are registred on exact device (ptype->dev == skb->dev). 425 * are registred on exact device (ptype->dev == skb->dev).
426 * 426 *
427 * If the rx_handler didn't changed skb->dev, but want the skb to be normally 427 * If the rx_handler didn't changed skb->dev, but want the skb to be normally
428 * delivered, it should return RX_HANDLER_PASS. 428 * delivered, it should return RX_HANDLER_PASS.
429 * 429 *
430 * A device without a registered rx_handler will behave as if rx_handler 430 * A device without a registered rx_handler will behave as if rx_handler
431 * returned RX_HANDLER_PASS. 431 * returned RX_HANDLER_PASS.
432 */ 432 */
433 433
434 enum rx_handler_result { 434 enum rx_handler_result {
435 RX_HANDLER_CONSUMED, 435 RX_HANDLER_CONSUMED,
436 RX_HANDLER_ANOTHER, 436 RX_HANDLER_ANOTHER,
437 RX_HANDLER_EXACT, 437 RX_HANDLER_EXACT,
438 RX_HANDLER_PASS, 438 RX_HANDLER_PASS,
439 }; 439 };
440 typedef enum rx_handler_result rx_handler_result_t; 440 typedef enum rx_handler_result rx_handler_result_t;
441 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); 441 typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
442 442
443 extern void __napi_schedule(struct napi_struct *n); 443 extern void __napi_schedule(struct napi_struct *n);
444 444
445 static inline int napi_disable_pending(struct napi_struct *n) 445 static inline int napi_disable_pending(struct napi_struct *n)
446 { 446 {
447 return test_bit(NAPI_STATE_DISABLE, &n->state); 447 return test_bit(NAPI_STATE_DISABLE, &n->state);
448 } 448 }
449 449
450 /** 450 /**
451 * napi_schedule_prep - check if napi can be scheduled 451 * napi_schedule_prep - check if napi can be scheduled
452 * @n: napi context 452 * @n: napi context
453 * 453 *
454 * Test if NAPI routine is already running, and if not mark 454 * Test if NAPI routine is already running, and if not mark
455 * it as running. This is used as a condition variable 455 * it as running. This is used as a condition variable
456 * insure only one NAPI poll instance runs. We also make 456 * insure only one NAPI poll instance runs. We also make
457 * sure there is no pending NAPI disable. 457 * sure there is no pending NAPI disable.
458 */ 458 */
459 static inline int napi_schedule_prep(struct napi_struct *n) 459 static inline int napi_schedule_prep(struct napi_struct *n)
460 { 460 {
461 return !napi_disable_pending(n) && 461 return !napi_disable_pending(n) &&
462 !test_and_set_bit(NAPI_STATE_SCHED, &n->state); 462 !test_and_set_bit(NAPI_STATE_SCHED, &n->state);
463 } 463 }
464 464
465 /** 465 /**
466 * napi_schedule - schedule NAPI poll 466 * napi_schedule - schedule NAPI poll
467 * @n: napi context 467 * @n: napi context
468 * 468 *
469 * Schedule NAPI poll routine to be called if it is not already 469 * Schedule NAPI poll routine to be called if it is not already
470 * running. 470 * running.
471 */ 471 */
472 static inline void napi_schedule(struct napi_struct *n) 472 static inline void napi_schedule(struct napi_struct *n)
473 { 473 {
474 if (napi_schedule_prep(n)) 474 if (napi_schedule_prep(n))
475 __napi_schedule(n); 475 __napi_schedule(n);
476 } 476 }
477 477
478 /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */ 478 /* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
479 static inline int napi_reschedule(struct napi_struct *napi) 479 static inline int napi_reschedule(struct napi_struct *napi)
480 { 480 {
481 if (napi_schedule_prep(napi)) { 481 if (napi_schedule_prep(napi)) {
482 __napi_schedule(napi); 482 __napi_schedule(napi);
483 return 1; 483 return 1;
484 } 484 }
485 return 0; 485 return 0;
486 } 486 }
487 487
488 /** 488 /**
489 * napi_complete - NAPI processing complete 489 * napi_complete - NAPI processing complete
490 * @n: napi context 490 * @n: napi context
491 * 491 *
492 * Mark NAPI processing as complete. 492 * Mark NAPI processing as complete.
493 */ 493 */
494 extern void __napi_complete(struct napi_struct *n); 494 extern void __napi_complete(struct napi_struct *n);
495 extern void napi_complete(struct napi_struct *n); 495 extern void napi_complete(struct napi_struct *n);
496 496
497 /** 497 /**
498 * napi_disable - prevent NAPI from scheduling 498 * napi_disable - prevent NAPI from scheduling
499 * @n: napi context 499 * @n: napi context
500 * 500 *
501 * Stop NAPI from being scheduled on this context. 501 * Stop NAPI from being scheduled on this context.
502 * Waits till any outstanding processing completes. 502 * Waits till any outstanding processing completes.
503 */ 503 */
504 static inline void napi_disable(struct napi_struct *n) 504 static inline void napi_disable(struct napi_struct *n)
505 { 505 {
506 set_bit(NAPI_STATE_DISABLE, &n->state); 506 set_bit(NAPI_STATE_DISABLE, &n->state);
507 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state)) 507 while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
508 msleep(1); 508 msleep(1);
509 clear_bit(NAPI_STATE_DISABLE, &n->state); 509 clear_bit(NAPI_STATE_DISABLE, &n->state);
510 } 510 }
511 511
512 /** 512 /**
513 * napi_enable - enable NAPI scheduling 513 * napi_enable - enable NAPI scheduling
514 * @n: napi context 514 * @n: napi context
515 * 515 *
516 * Resume NAPI from being scheduled on this context. 516 * Resume NAPI from being scheduled on this context.
517 * Must be paired with napi_disable. 517 * Must be paired with napi_disable.
518 */ 518 */
519 static inline void napi_enable(struct napi_struct *n) 519 static inline void napi_enable(struct napi_struct *n)
520 { 520 {
521 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); 521 BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
522 smp_mb__before_clear_bit(); 522 smp_mb__before_clear_bit();
523 clear_bit(NAPI_STATE_SCHED, &n->state); 523 clear_bit(NAPI_STATE_SCHED, &n->state);
524 } 524 }
525 525
526 #ifdef CONFIG_SMP 526 #ifdef CONFIG_SMP
527 /** 527 /**
528 * napi_synchronize - wait until NAPI is not running 528 * napi_synchronize - wait until NAPI is not running
529 * @n: napi context 529 * @n: napi context
530 * 530 *
531 * Wait until NAPI is done being scheduled on this context. 531 * Wait until NAPI is done being scheduled on this context.
532 * Waits till any outstanding processing completes but 532 * Waits till any outstanding processing completes but
533 * does not disable future activations. 533 * does not disable future activations.
534 */ 534 */
535 static inline void napi_synchronize(const struct napi_struct *n) 535 static inline void napi_synchronize(const struct napi_struct *n)
536 { 536 {
537 while (test_bit(NAPI_STATE_SCHED, &n->state)) 537 while (test_bit(NAPI_STATE_SCHED, &n->state))
538 msleep(1); 538 msleep(1);
539 } 539 }
540 #else 540 #else
541 # define napi_synchronize(n) barrier() 541 # define napi_synchronize(n) barrier()
542 #endif 542 #endif
543 543
544 enum netdev_queue_state_t { 544 enum netdev_queue_state_t {
545 __QUEUE_STATE_XOFF, 545 __QUEUE_STATE_XOFF,
546 __QUEUE_STATE_FROZEN, 546 __QUEUE_STATE_FROZEN,
547 #define QUEUE_STATE_XOFF_OR_FROZEN ((1 << __QUEUE_STATE_XOFF) | \ 547 #define QUEUE_STATE_XOFF_OR_FROZEN ((1 << __QUEUE_STATE_XOFF) | \
548 (1 << __QUEUE_STATE_FROZEN)) 548 (1 << __QUEUE_STATE_FROZEN))
549 }; 549 };
550 550
551 struct netdev_queue { 551 struct netdev_queue {
552 /* 552 /*
553 * read mostly part 553 * read mostly part
554 */ 554 */
555 struct net_device *dev; 555 struct net_device *dev;
556 struct Qdisc *qdisc; 556 struct Qdisc *qdisc;
557 unsigned long state; 557 unsigned long state;
558 struct Qdisc *qdisc_sleeping; 558 struct Qdisc *qdisc_sleeping;
559 #ifdef CONFIG_RPS 559 #ifdef CONFIG_RPS
560 struct kobject kobj; 560 struct kobject kobj;
561 #endif 561 #endif
562 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) 562 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
563 int numa_node; 563 int numa_node;
564 #endif 564 #endif
565 /* 565 /*
566 * write mostly part 566 * write mostly part
567 */ 567 */
568 spinlock_t _xmit_lock ____cacheline_aligned_in_smp; 568 spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
569 int xmit_lock_owner; 569 int xmit_lock_owner;
570 /* 570 /*
571 * please use this field instead of dev->trans_start 571 * please use this field instead of dev->trans_start
572 */ 572 */
573 unsigned long trans_start; 573 unsigned long trans_start;
574 } ____cacheline_aligned_in_smp; 574 } ____cacheline_aligned_in_smp;
575 575
576 static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) 576 static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
577 { 577 {
578 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) 578 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
579 return q->numa_node; 579 return q->numa_node;
580 #else 580 #else
581 return NUMA_NO_NODE; 581 return NUMA_NO_NODE;
582 #endif 582 #endif
583 } 583 }
584 584
585 static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node) 585 static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
586 { 586 {
587 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA) 587 #if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
588 q->numa_node = node; 588 q->numa_node = node;
589 #endif 589 #endif
590 } 590 }
591 591
592 #ifdef CONFIG_RPS 592 #ifdef CONFIG_RPS
593 /* 593 /*
594 * This structure holds an RPS map which can be of variable length. The 594 * This structure holds an RPS map which can be of variable length. The
595 * map is an array of CPUs. 595 * map is an array of CPUs.
596 */ 596 */
597 struct rps_map { 597 struct rps_map {
598 unsigned int len; 598 unsigned int len;
599 struct rcu_head rcu; 599 struct rcu_head rcu;
600 u16 cpus[0]; 600 u16 cpus[0];
601 }; 601 };
602 #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + (_num * sizeof(u16))) 602 #define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + (_num * sizeof(u16)))
603 603
604 /* 604 /*
605 * The rps_dev_flow structure contains the mapping of a flow to a CPU, the 605 * The rps_dev_flow structure contains the mapping of a flow to a CPU, the
606 * tail pointer for that CPU's input queue at the time of last enqueue, and 606 * tail pointer for that CPU's input queue at the time of last enqueue, and
607 * a hardware filter index. 607 * a hardware filter index.
608 */ 608 */
609 struct rps_dev_flow { 609 struct rps_dev_flow {
610 u16 cpu; 610 u16 cpu;
611 u16 filter; 611 u16 filter;
612 unsigned int last_qtail; 612 unsigned int last_qtail;
613 }; 613 };
614 #define RPS_NO_FILTER 0xffff 614 #define RPS_NO_FILTER 0xffff
615 615
616 /* 616 /*
617 * The rps_dev_flow_table structure contains a table of flow mappings. 617 * The rps_dev_flow_table structure contains a table of flow mappings.
618 */ 618 */
619 struct rps_dev_flow_table { 619 struct rps_dev_flow_table {
620 unsigned int mask; 620 unsigned int mask;
621 struct rcu_head rcu; 621 struct rcu_head rcu;
622 struct work_struct free_work; 622 struct work_struct free_work;
623 struct rps_dev_flow flows[0]; 623 struct rps_dev_flow flows[0];
624 }; 624 };
625 #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \ 625 #define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
626 (_num * sizeof(struct rps_dev_flow))) 626 (_num * sizeof(struct rps_dev_flow)))
627 627
628 /* 628 /*
629 * The rps_sock_flow_table contains mappings of flows to the last CPU 629 * The rps_sock_flow_table contains mappings of flows to the last CPU
630 * on which they were processed by the application (set in recvmsg). 630 * on which they were processed by the application (set in recvmsg).
631 */ 631 */
632 struct rps_sock_flow_table { 632 struct rps_sock_flow_table {
633 unsigned int mask; 633 unsigned int mask;
634 u16 ents[0]; 634 u16 ents[0];
635 }; 635 };
636 #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \ 636 #define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
637 (_num * sizeof(u16))) 637 (_num * sizeof(u16)))
638 638
639 #define RPS_NO_CPU 0xffff 639 #define RPS_NO_CPU 0xffff
640 640
641 static inline void rps_record_sock_flow(struct rps_sock_flow_table *table, 641 static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
642 u32 hash) 642 u32 hash)
643 { 643 {
644 if (table && hash) { 644 if (table && hash) {
645 unsigned int cpu, index = hash & table->mask; 645 unsigned int cpu, index = hash & table->mask;
646 646
647 /* We only give a hint, preemption can change cpu under us */ 647 /* We only give a hint, preemption can change cpu under us */
648 cpu = raw_smp_processor_id(); 648 cpu = raw_smp_processor_id();
649 649
650 if (table->ents[index] != cpu) 650 if (table->ents[index] != cpu)
651 table->ents[index] = cpu; 651 table->ents[index] = cpu;
652 } 652 }
653 } 653 }
654 654
655 static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table, 655 static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
656 u32 hash) 656 u32 hash)
657 { 657 {
658 if (table && hash) 658 if (table && hash)
659 table->ents[hash & table->mask] = RPS_NO_CPU; 659 table->ents[hash & table->mask] = RPS_NO_CPU;
660 } 660 }
661 661
662 extern struct rps_sock_flow_table __rcu *rps_sock_flow_table; 662 extern struct rps_sock_flow_table __rcu *rps_sock_flow_table;
663 663
664 #ifdef CONFIG_RFS_ACCEL 664 #ifdef CONFIG_RFS_ACCEL
665 extern bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, 665 extern bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
666 u32 flow_id, u16 filter_id); 666 u32 flow_id, u16 filter_id);
667 #endif 667 #endif
668 668
669 /* This structure contains an instance of an RX queue. */ 669 /* This structure contains an instance of an RX queue. */
670 struct netdev_rx_queue { 670 struct netdev_rx_queue {
671 struct rps_map __rcu *rps_map; 671 struct rps_map __rcu *rps_map;
672 struct rps_dev_flow_table __rcu *rps_flow_table; 672 struct rps_dev_flow_table __rcu *rps_flow_table;
673 struct kobject kobj; 673 struct kobject kobj;
674 struct net_device *dev; 674 struct net_device *dev;
675 } ____cacheline_aligned_in_smp; 675 } ____cacheline_aligned_in_smp;
676 #endif /* CONFIG_RPS */ 676 #endif /* CONFIG_RPS */
677 677
678 #ifdef CONFIG_XPS 678 #ifdef CONFIG_XPS
679 /* 679 /*
680 * This structure holds an XPS map which can be of variable length. The 680 * This structure holds an XPS map which can be of variable length. The
681 * map is an array of queues. 681 * map is an array of queues.
682 */ 682 */
683 struct xps_map { 683 struct xps_map {
684 unsigned int len; 684 unsigned int len;
685 unsigned int alloc_len; 685 unsigned int alloc_len;
686 struct rcu_head rcu; 686 struct rcu_head rcu;
687 u16 queues[0]; 687 u16 queues[0];
688 }; 688 };
689 #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + (_num * sizeof(u16))) 689 #define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + (_num * sizeof(u16)))
690 #define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \ 690 #define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map)) \
691 / sizeof(u16)) 691 / sizeof(u16))
692 692
693 /* 693 /*
694 * This structure holds all XPS maps for device. Maps are indexed by CPU. 694 * This structure holds all XPS maps for device. Maps are indexed by CPU.
695 */ 695 */
696 struct xps_dev_maps { 696 struct xps_dev_maps {
697 struct rcu_head rcu; 697 struct rcu_head rcu;
698 struct xps_map __rcu *cpu_map[0]; 698 struct xps_map __rcu *cpu_map[0];
699 }; 699 };
700 #define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \ 700 #define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) + \
701 (nr_cpu_ids * sizeof(struct xps_map *))) 701 (nr_cpu_ids * sizeof(struct xps_map *)))
702 #endif /* CONFIG_XPS */ 702 #endif /* CONFIG_XPS */
703 703
704 #define TC_MAX_QUEUE 16 704 #define TC_MAX_QUEUE 16
705 #define TC_BITMASK 15 705 #define TC_BITMASK 15
706 /* HW offloaded queuing disciplines txq count and offset maps */ 706 /* HW offloaded queuing disciplines txq count and offset maps */
707 struct netdev_tc_txq { 707 struct netdev_tc_txq {
708 u16 count; 708 u16 count;
709 u16 offset; 709 u16 offset;
710 }; 710 };
711 711
712 /* 712 /*
713 * This structure defines the management hooks for network devices. 713 * This structure defines the management hooks for network devices.
714 * The following hooks can be defined; unless noted otherwise, they are 714 * The following hooks can be defined; unless noted otherwise, they are
715 * optional and can be filled with a null pointer. 715 * optional and can be filled with a null pointer.
716 * 716 *
717 * int (*ndo_init)(struct net_device *dev); 717 * int (*ndo_init)(struct net_device *dev);
718 * This function is called once when network device is registered. 718 * This function is called once when network device is registered.
719 * The network device can use this to any late stage initializaton 719 * The network device can use this to any late stage initializaton
720 * or semantic validattion. It can fail with an error code which will 720 * or semantic validattion. It can fail with an error code which will
721 * be propogated back to register_netdev 721 * be propogated back to register_netdev
722 * 722 *
723 * void (*ndo_uninit)(struct net_device *dev); 723 * void (*ndo_uninit)(struct net_device *dev);
724 * This function is called when device is unregistered or when registration 724 * This function is called when device is unregistered or when registration
725 * fails. It is not called if init fails. 725 * fails. It is not called if init fails.
726 * 726 *
727 * int (*ndo_open)(struct net_device *dev); 727 * int (*ndo_open)(struct net_device *dev);
728 * This function is called when network device transistions to the up 728 * This function is called when network device transistions to the up
729 * state. 729 * state.
730 * 730 *
731 * int (*ndo_stop)(struct net_device *dev); 731 * int (*ndo_stop)(struct net_device *dev);
732 * This function is called when network device transistions to the down 732 * This function is called when network device transistions to the down
733 * state. 733 * state.
734 * 734 *
735 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb, 735 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
736 * struct net_device *dev); 736 * struct net_device *dev);
737 * Called when a packet needs to be transmitted. 737 * Called when a packet needs to be transmitted.
738 * Must return NETDEV_TX_OK , NETDEV_TX_BUSY. 738 * Must return NETDEV_TX_OK , NETDEV_TX_BUSY.
739 * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX) 739 * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
740 * Required can not be NULL. 740 * Required can not be NULL.
741 * 741 *
742 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb); 742 * u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb);
743 * Called to decide which queue to when device supports multiple 743 * Called to decide which queue to when device supports multiple
744 * transmit queues. 744 * transmit queues.
745 * 745 *
746 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags); 746 * void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
747 * This function is called to allow device receiver to make 747 * This function is called to allow device receiver to make
748 * changes to configuration when multicast or promiscious is enabled. 748 * changes to configuration when multicast or promiscious is enabled.
749 * 749 *
750 * void (*ndo_set_rx_mode)(struct net_device *dev); 750 * void (*ndo_set_rx_mode)(struct net_device *dev);
751 * This function is called device changes address list filtering. 751 * This function is called device changes address list filtering.
752 * 752 *
753 * void (*ndo_set_multicast_list)(struct net_device *dev); 753 * void (*ndo_set_multicast_list)(struct net_device *dev);
754 * This function is called when the multicast address list changes. 754 * This function is called when the multicast address list changes.
755 * 755 *
756 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr); 756 * int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
757 * This function is called when the Media Access Control address 757 * This function is called when the Media Access Control address
758 * needs to be changed. If this interface is not defined, the 758 * needs to be changed. If this interface is not defined, the
759 * mac address can not be changed. 759 * mac address can not be changed.
760 * 760 *
761 * int (*ndo_validate_addr)(struct net_device *dev); 761 * int (*ndo_validate_addr)(struct net_device *dev);
762 * Test if Media Access Control address is valid for the device. 762 * Test if Media Access Control address is valid for the device.
763 * 763 *
764 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); 764 * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
765 * Called when a user request an ioctl which can't be handled by 765 * Called when a user request an ioctl which can't be handled by
766 * the generic interface code. If not defined ioctl's return 766 * the generic interface code. If not defined ioctl's return
767 * not supported error code. 767 * not supported error code.
768 * 768 *
769 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map); 769 * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
770 * Used to set network devices bus interface parameters. This interface 770 * Used to set network devices bus interface parameters. This interface
771 * is retained for legacy reason, new devices should use the bus 771 * is retained for legacy reason, new devices should use the bus
772 * interface (PCI) for low level management. 772 * interface (PCI) for low level management.
773 * 773 *
774 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu); 774 * int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
775 * Called when a user wants to change the Maximum Transfer Unit 775 * Called when a user wants to change the Maximum Transfer Unit
776 * of a device. If not defined, any request to change MTU will 776 * of a device. If not defined, any request to change MTU will
777 * will return an error. 777 * will return an error.
778 * 778 *
779 * void (*ndo_tx_timeout)(struct net_device *dev); 779 * void (*ndo_tx_timeout)(struct net_device *dev);
780 * Callback uses when the transmitter has not made any progress 780 * Callback uses when the transmitter has not made any progress
781 * for dev->watchdog ticks. 781 * for dev->watchdog ticks.
782 * 782 *
783 * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev, 783 * struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
784 * struct rtnl_link_stats64 *storage); 784 * struct rtnl_link_stats64 *storage);
785 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); 785 * struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
786 * Called when a user wants to get the network device usage 786 * Called when a user wants to get the network device usage
787 * statistics. Drivers must do one of the following: 787 * statistics. Drivers must do one of the following:
788 * 1. Define @ndo_get_stats64 to fill in a zero-initialised 788 * 1. Define @ndo_get_stats64 to fill in a zero-initialised
789 * rtnl_link_stats64 structure passed by the caller. 789 * rtnl_link_stats64 structure passed by the caller.
790 * 2. Define @ndo_get_stats to update a net_device_stats structure 790 * 2. Define @ndo_get_stats to update a net_device_stats structure
791 * (which should normally be dev->stats) and return a pointer to 791 * (which should normally be dev->stats) and return a pointer to
792 * it. The structure may be changed asynchronously only if each 792 * it. The structure may be changed asynchronously only if each
793 * field is written atomically. 793 * field is written atomically.
794 * 3. Update dev->stats asynchronously and atomically, and define 794 * 3. Update dev->stats asynchronously and atomically, and define
795 * neither operation. 795 * neither operation.
796 * 796 *
797 * void (*ndo_vlan_rx_register)(struct net_device *dev, struct vlan_group *grp); 797 * void (*ndo_vlan_rx_register)(struct net_device *dev, struct vlan_group *grp);
798 * If device support VLAN receive acceleration 798 * If device support VLAN receive acceleration
799 * (ie. dev->features & NETIF_F_HW_VLAN_RX), then this function is called 799 * (ie. dev->features & NETIF_F_HW_VLAN_RX), then this function is called
800 * when vlan groups for the device changes. Note: grp is NULL 800 * when vlan groups for the device changes. Note: grp is NULL
801 * if no vlan's groups are being used. 801 * if no vlan's groups are being used.
802 * 802 *
803 * void (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid); 803 * void (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid);
804 * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER) 804 * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
805 * this function is called when a VLAN id is registered. 805 * this function is called when a VLAN id is registered.
806 * 806 *
807 * void (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid); 807 * void (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
808 * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER) 808 * If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
809 * this function is called when a VLAN id is unregistered. 809 * this function is called when a VLAN id is unregistered.
810 * 810 *
811 * void (*ndo_poll_controller)(struct net_device *dev); 811 * void (*ndo_poll_controller)(struct net_device *dev);
812 * 812 *
813 * SR-IOV management functions. 813 * SR-IOV management functions.
814 * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac); 814 * int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
815 * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos); 815 * int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos);
816 * int (*ndo_set_vf_tx_rate)(struct net_device *dev, int vf, int rate); 816 * int (*ndo_set_vf_tx_rate)(struct net_device *dev, int vf, int rate);
817 * int (*ndo_get_vf_config)(struct net_device *dev, 817 * int (*ndo_get_vf_config)(struct net_device *dev,
818 * int vf, struct ifla_vf_info *ivf); 818 * int vf, struct ifla_vf_info *ivf);
819 * int (*ndo_set_vf_port)(struct net_device *dev, int vf, 819 * int (*ndo_set_vf_port)(struct net_device *dev, int vf,
820 * struct nlattr *port[]); 820 * struct nlattr *port[]);
821 * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb); 821 * int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
822 * int (*ndo_setup_tc)(struct net_device *dev, u8 tc) 822 * int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
823 * Called to setup 'tc' number of traffic classes in the net device. This 823 * Called to setup 'tc' number of traffic classes in the net device. This
824 * is always called from the stack with the rtnl lock held and netif tx 824 * is always called from the stack with the rtnl lock held and netif tx
825 * queues stopped. This allows the netdevice to perform queue management 825 * queues stopped. This allows the netdevice to perform queue management
826 * safely. 826 * safely.
827 * 827 *
828 * Fiber Channel over Ethernet (FCoE) offload functions. 828 * Fiber Channel over Ethernet (FCoE) offload functions.
829 * int (*ndo_fcoe_enable)(struct net_device *dev); 829 * int (*ndo_fcoe_enable)(struct net_device *dev);
830 * Called when the FCoE protocol stack wants to start using LLD for FCoE 830 * Called when the FCoE protocol stack wants to start using LLD for FCoE
831 * so the underlying device can perform whatever needed configuration or 831 * so the underlying device can perform whatever needed configuration or
832 * initialization to support acceleration of FCoE traffic. 832 * initialization to support acceleration of FCoE traffic.
833 * 833 *
834 * int (*ndo_fcoe_disable)(struct net_device *dev); 834 * int (*ndo_fcoe_disable)(struct net_device *dev);
835 * Called when the FCoE protocol stack wants to stop using LLD for FCoE 835 * Called when the FCoE protocol stack wants to stop using LLD for FCoE
836 * so the underlying device can perform whatever needed clean-ups to 836 * so the underlying device can perform whatever needed clean-ups to
837 * stop supporting acceleration of FCoE traffic. 837 * stop supporting acceleration of FCoE traffic.
838 * 838 *
839 * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid, 839 * int (*ndo_fcoe_ddp_setup)(struct net_device *dev, u16 xid,
840 * struct scatterlist *sgl, unsigned int sgc); 840 * struct scatterlist *sgl, unsigned int sgc);
841 * Called when the FCoE Initiator wants to initialize an I/O that 841 * Called when the FCoE Initiator wants to initialize an I/O that
842 * is a possible candidate for Direct Data Placement (DDP). The LLD can 842 * is a possible candidate for Direct Data Placement (DDP). The LLD can
843 * perform necessary setup and returns 1 to indicate the device is set up 843 * perform necessary setup and returns 1 to indicate the device is set up
844 * successfully to perform DDP on this I/O, otherwise this returns 0. 844 * successfully to perform DDP on this I/O, otherwise this returns 0.
845 * 845 *
846 * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid); 846 * int (*ndo_fcoe_ddp_done)(struct net_device *dev, u16 xid);
847 * Called when the FCoE Initiator/Target is done with the DDPed I/O as 847 * Called when the FCoE Initiator/Target is done with the DDPed I/O as
848 * indicated by the FC exchange id 'xid', so the underlying device can 848 * indicated by the FC exchange id 'xid', so the underlying device can
849 * clean up and reuse resources for later DDP requests. 849 * clean up and reuse resources for later DDP requests.
850 * 850 *
851 * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid, 851 * int (*ndo_fcoe_ddp_target)(struct net_device *dev, u16 xid,
852 * struct scatterlist *sgl, unsigned int sgc); 852 * struct scatterlist *sgl, unsigned int sgc);
853 * Called when the FCoE Target wants to initialize an I/O that 853 * Called when the FCoE Target wants to initialize an I/O that
854 * is a possible candidate for Direct Data Placement (DDP). The LLD can 854 * is a possible candidate for Direct Data Placement (DDP). The LLD can
855 * perform necessary setup and returns 1 to indicate the device is set up 855 * perform necessary setup and returns 1 to indicate the device is set up
856 * successfully to perform DDP on this I/O, otherwise this returns 0. 856 * successfully to perform DDP on this I/O, otherwise this returns 0.
857 * 857 *
858 * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type); 858 * int (*ndo_fcoe_get_wwn)(struct net_device *dev, u64 *wwn, int type);
859 * Called when the underlying device wants to override default World Wide 859 * Called when the underlying device wants to override default World Wide
860 * Name (WWN) generation mechanism in FCoE protocol stack to pass its own 860 * Name (WWN) generation mechanism in FCoE protocol stack to pass its own
861 * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE 861 * World Wide Port Name (WWPN) or World Wide Node Name (WWNN) to the FCoE
862 * protocol stack to use. 862 * protocol stack to use.
863 * 863 *
864 * RFS acceleration. 864 * RFS acceleration.
865 * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb, 865 * int (*ndo_rx_flow_steer)(struct net_device *dev, const struct sk_buff *skb,
866 * u16 rxq_index, u32 flow_id); 866 * u16 rxq_index, u32 flow_id);
867 * Set hardware filter for RFS. rxq_index is the target queue index; 867 * Set hardware filter for RFS. rxq_index is the target queue index;
868 * flow_id is a flow ID to be passed to rps_may_expire_flow() later. 868 * flow_id is a flow ID to be passed to rps_may_expire_flow() later.
869 * Return the filter ID on success, or a negative error code. 869 * Return the filter ID on success, or a negative error code.
870 * 870 *
871 * Slave management functions (for bridge, bonding, etc). User should 871 * Slave management functions (for bridge, bonding, etc). User should
872 * call netdev_set_master() to set dev->master properly. 872 * call netdev_set_master() to set dev->master properly.
873 * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev); 873 * int (*ndo_add_slave)(struct net_device *dev, struct net_device *slave_dev);
874 * Called to make another netdev an underling. 874 * Called to make another netdev an underling.
875 * 875 *
876 * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev); 876 * int (*ndo_del_slave)(struct net_device *dev, struct net_device *slave_dev);
877 * Called to release previously enslaved netdev. 877 * Called to release previously enslaved netdev.
878 * 878 *
879 * Feature/offload setting functions. 879 * Feature/offload setting functions.
880 * u32 (*ndo_fix_features)(struct net_device *dev, u32 features); 880 * u32 (*ndo_fix_features)(struct net_device *dev, u32 features);
881 * Adjusts the requested feature flags according to device-specific 881 * Adjusts the requested feature flags according to device-specific
882 * constraints, and returns the resulting flags. Must not modify 882 * constraints, and returns the resulting flags. Must not modify
883 * the device state. 883 * the device state.
884 * 884 *
885 * int (*ndo_set_features)(struct net_device *dev, u32 features); 885 * int (*ndo_set_features)(struct net_device *dev, u32 features);
886 * Called to update device configuration to new features. Passed 886 * Called to update device configuration to new features. Passed
887 * feature set might be less than what was returned by ndo_fix_features()). 887 * feature set might be less than what was returned by ndo_fix_features()).
888 * Must return >0 or -errno if it changed dev->features itself. 888 * Must return >0 or -errno if it changed dev->features itself.
889 * 889 *
890 */ 890 */
891 #define HAVE_NET_DEVICE_OPS 891 #define HAVE_NET_DEVICE_OPS
892 struct net_device_ops { 892 struct net_device_ops {
893 int (*ndo_init)(struct net_device *dev); 893 int (*ndo_init)(struct net_device *dev);
894 void (*ndo_uninit)(struct net_device *dev); 894 void (*ndo_uninit)(struct net_device *dev);
895 int (*ndo_open)(struct net_device *dev); 895 int (*ndo_open)(struct net_device *dev);
896 int (*ndo_stop)(struct net_device *dev); 896 int (*ndo_stop)(struct net_device *dev);
897 netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb, 897 netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb,
898 struct net_device *dev); 898 struct net_device *dev);
899 u16 (*ndo_select_queue)(struct net_device *dev, 899 u16 (*ndo_select_queue)(struct net_device *dev,
900 struct sk_buff *skb); 900 struct sk_buff *skb);
901 void (*ndo_change_rx_flags)(struct net_device *dev, 901 void (*ndo_change_rx_flags)(struct net_device *dev,
902 int flags); 902 int flags);
903 void (*ndo_set_rx_mode)(struct net_device *dev); 903 void (*ndo_set_rx_mode)(struct net_device *dev);
904 void (*ndo_set_multicast_list)(struct net_device *dev); 904 void (*ndo_set_multicast_list)(struct net_device *dev);
905 int (*ndo_set_mac_address)(struct net_device *dev, 905 int (*ndo_set_mac_address)(struct net_device *dev,
906 void *addr); 906 void *addr);
907 int (*ndo_validate_addr)(struct net_device *dev); 907 int (*ndo_validate_addr)(struct net_device *dev);
908 int (*ndo_do_ioctl)(struct net_device *dev, 908 int (*ndo_do_ioctl)(struct net_device *dev,
909 struct ifreq *ifr, int cmd); 909 struct ifreq *ifr, int cmd);
910 int (*ndo_set_config)(struct net_device *dev, 910 int (*ndo_set_config)(struct net_device *dev,
911 struct ifmap *map); 911 struct ifmap *map);
912 int (*ndo_change_mtu)(struct net_device *dev, 912 int (*ndo_change_mtu)(struct net_device *dev,
913 int new_mtu); 913 int new_mtu);
914 int (*ndo_neigh_setup)(struct net_device *dev, 914 int (*ndo_neigh_setup)(struct net_device *dev,
915 struct neigh_parms *); 915 struct neigh_parms *);
916 void (*ndo_tx_timeout) (struct net_device *dev); 916 void (*ndo_tx_timeout) (struct net_device *dev);
917 917
918 struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev, 918 struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev,
919 struct rtnl_link_stats64 *storage); 919 struct rtnl_link_stats64 *storage);
920 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev); 920 struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
921 921
922 void (*ndo_vlan_rx_register)(struct net_device *dev, 922 void (*ndo_vlan_rx_register)(struct net_device *dev,
923 struct vlan_group *grp); 923 struct vlan_group *grp);
924 void (*ndo_vlan_rx_add_vid)(struct net_device *dev, 924 void (*ndo_vlan_rx_add_vid)(struct net_device *dev,
925 unsigned short vid); 925 unsigned short vid);
926 void (*ndo_vlan_rx_kill_vid)(struct net_device *dev, 926 void (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
927 unsigned short vid); 927 unsigned short vid);
928 #ifdef CONFIG_NET_POLL_CONTROLLER 928 #ifdef CONFIG_NET_POLL_CONTROLLER
929 void (*ndo_poll_controller)(struct net_device *dev); 929 void (*ndo_poll_controller)(struct net_device *dev);
930 int (*ndo_netpoll_setup)(struct net_device *dev, 930 int (*ndo_netpoll_setup)(struct net_device *dev,
931 struct netpoll_info *info); 931 struct netpoll_info *info);
932 void (*ndo_netpoll_cleanup)(struct net_device *dev); 932 void (*ndo_netpoll_cleanup)(struct net_device *dev);
933 #endif 933 #endif
934 int (*ndo_set_vf_mac)(struct net_device *dev, 934 int (*ndo_set_vf_mac)(struct net_device *dev,
935 int queue, u8 *mac); 935 int queue, u8 *mac);
936 int (*ndo_set_vf_vlan)(struct net_device *dev, 936 int (*ndo_set_vf_vlan)(struct net_device *dev,
937 int queue, u16 vlan, u8 qos); 937 int queue, u16 vlan, u8 qos);
938 int (*ndo_set_vf_tx_rate)(struct net_device *dev, 938 int (*ndo_set_vf_tx_rate)(struct net_device *dev,
939 int vf, int rate); 939 int vf, int rate);
940 int (*ndo_get_vf_config)(struct net_device *dev, 940 int (*ndo_get_vf_config)(struct net_device *dev,
941 int vf, 941 int vf,
942 struct ifla_vf_info *ivf); 942 struct ifla_vf_info *ivf);
943 int (*ndo_set_vf_port)(struct net_device *dev, 943 int (*ndo_set_vf_port)(struct net_device *dev,
944 int vf, 944 int vf,
945 struct nlattr *port[]); 945 struct nlattr *port[]);
946 int (*ndo_get_vf_port)(struct net_device *dev, 946 int (*ndo_get_vf_port)(struct net_device *dev,
947 int vf, struct sk_buff *skb); 947 int vf, struct sk_buff *skb);
948 int (*ndo_setup_tc)(struct net_device *dev, u8 tc); 948 int (*ndo_setup_tc)(struct net_device *dev, u8 tc);
949 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) 949 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
950 int (*ndo_fcoe_enable)(struct net_device *dev); 950 int (*ndo_fcoe_enable)(struct net_device *dev);
951 int (*ndo_fcoe_disable)(struct net_device *dev); 951 int (*ndo_fcoe_disable)(struct net_device *dev);
952 int (*ndo_fcoe_ddp_setup)(struct net_device *dev, 952 int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
953 u16 xid, 953 u16 xid,
954 struct scatterlist *sgl, 954 struct scatterlist *sgl,
955 unsigned int sgc); 955 unsigned int sgc);
956 int (*ndo_fcoe_ddp_done)(struct net_device *dev, 956 int (*ndo_fcoe_ddp_done)(struct net_device *dev,
957 u16 xid); 957 u16 xid);
958 int (*ndo_fcoe_ddp_target)(struct net_device *dev, 958 int (*ndo_fcoe_ddp_target)(struct net_device *dev,
959 u16 xid, 959 u16 xid,
960 struct scatterlist *sgl, 960 struct scatterlist *sgl,
961 unsigned int sgc); 961 unsigned int sgc);
962 #define NETDEV_FCOE_WWNN 0 962 #define NETDEV_FCOE_WWNN 0
963 #define NETDEV_FCOE_WWPN 1 963 #define NETDEV_FCOE_WWPN 1
964 int (*ndo_fcoe_get_wwn)(struct net_device *dev, 964 int (*ndo_fcoe_get_wwn)(struct net_device *dev,
965 u64 *wwn, int type); 965 u64 *wwn, int type);
966 #endif 966 #endif
967 #ifdef CONFIG_RFS_ACCEL 967 #ifdef CONFIG_RFS_ACCEL
968 int (*ndo_rx_flow_steer)(struct net_device *dev, 968 int (*ndo_rx_flow_steer)(struct net_device *dev,
969 const struct sk_buff *skb, 969 const struct sk_buff *skb,
970 u16 rxq_index, 970 u16 rxq_index,
971 u32 flow_id); 971 u32 flow_id);
972 #endif 972 #endif
973 int (*ndo_add_slave)(struct net_device *dev, 973 int (*ndo_add_slave)(struct net_device *dev,
974 struct net_device *slave_dev); 974 struct net_device *slave_dev);
975 int (*ndo_del_slave)(struct net_device *dev, 975 int (*ndo_del_slave)(struct net_device *dev,
976 struct net_device *slave_dev); 976 struct net_device *slave_dev);
977 u32 (*ndo_fix_features)(struct net_device *dev, 977 u32 (*ndo_fix_features)(struct net_device *dev,
978 u32 features); 978 u32 features);
979 int (*ndo_set_features)(struct net_device *dev, 979 int (*ndo_set_features)(struct net_device *dev,
980 u32 features); 980 u32 features);
981 }; 981 };
982 982
983 /* 983 /*
984 * The DEVICE structure. 984 * The DEVICE structure.
985 * Actually, this whole structure is a big mistake. It mixes I/O 985 * Actually, this whole structure is a big mistake. It mixes I/O
986 * data with strictly "high-level" data, and it has to know about 986 * data with strictly "high-level" data, and it has to know about
987 * almost every data structure used in the INET module. 987 * almost every data structure used in the INET module.
988 * 988 *
989 * FIXME: cleanup struct net_device such that network protocol info 989 * FIXME: cleanup struct net_device such that network protocol info
990 * moves out. 990 * moves out.
991 */ 991 */
992 992
993 struct net_device { 993 struct net_device {
994 994
995 /* 995 /*
996 * This is the first field of the "visible" part of this structure 996 * This is the first field of the "visible" part of this structure
997 * (i.e. as seen by users in the "Space.c" file). It is the name 997 * (i.e. as seen by users in the "Space.c" file). It is the name
998 * of the interface. 998 * of the interface.
999 */ 999 */
1000 char name[IFNAMSIZ]; 1000 char name[IFNAMSIZ];
1001 1001
1002 struct pm_qos_request_list pm_qos_req; 1002 struct pm_qos_request_list pm_qos_req;
1003 1003
1004 /* device name hash chain */ 1004 /* device name hash chain */
1005 struct hlist_node name_hlist; 1005 struct hlist_node name_hlist;
1006 /* snmp alias */ 1006 /* snmp alias */
1007 char *ifalias; 1007 char *ifalias;
1008 1008
1009 /* 1009 /*
1010 * I/O specific fields 1010 * I/O specific fields
1011 * FIXME: Merge these and struct ifmap into one 1011 * FIXME: Merge these and struct ifmap into one
1012 */ 1012 */
1013 unsigned long mem_end; /* shared mem end */ 1013 unsigned long mem_end; /* shared mem end */
1014 unsigned long mem_start; /* shared mem start */ 1014 unsigned long mem_start; /* shared mem start */
1015 unsigned long base_addr; /* device I/O address */ 1015 unsigned long base_addr; /* device I/O address */
1016 unsigned int irq; /* device IRQ number */ 1016 unsigned int irq; /* device IRQ number */
1017 1017
1018 /* 1018 /*
1019 * Some hardware also needs these fields, but they are not 1019 * Some hardware also needs these fields, but they are not
1020 * part of the usual set specified in Space.c. 1020 * part of the usual set specified in Space.c.
1021 */ 1021 */
1022 1022
1023 unsigned char if_port; /* Selectable AUI, TP,..*/ 1023 unsigned char if_port; /* Selectable AUI, TP,..*/
1024 unsigned char dma; /* DMA channel */ 1024 unsigned char dma; /* DMA channel */
1025 1025
1026 unsigned long state; 1026 unsigned long state;
1027 1027
1028 struct list_head dev_list; 1028 struct list_head dev_list;
1029 struct list_head napi_list; 1029 struct list_head napi_list;
1030 struct list_head unreg_list; 1030 struct list_head unreg_list;
1031 1031
1032 /* currently active device features */ 1032 /* currently active device features */
1033 u32 features; 1033 u32 features;
1034 /* user-changeable features */ 1034 /* user-changeable features */
1035 u32 hw_features; 1035 u32 hw_features;
1036 /* user-requested features */ 1036 /* user-requested features */
1037 u32 wanted_features; 1037 u32 wanted_features;
1038 /* mask of features inheritable by VLAN devices */ 1038 /* mask of features inheritable by VLAN devices */
1039 u32 vlan_features; 1039 u32 vlan_features;
1040 1040
1041 /* Net device feature bits; if you change something, 1041 /* Net device feature bits; if you change something,
1042 * also update netdev_features_strings[] in ethtool.c */ 1042 * also update netdev_features_strings[] in ethtool.c */
1043 1043
1044 #define NETIF_F_SG 1 /* Scatter/gather IO. */ 1044 #define NETIF_F_SG 1 /* Scatter/gather IO. */
1045 #define NETIF_F_IP_CSUM 2 /* Can checksum TCP/UDP over IPv4. */ 1045 #define NETIF_F_IP_CSUM 2 /* Can checksum TCP/UDP over IPv4. */
1046 #define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */ 1046 #define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */
1047 #define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */ 1047 #define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */
1048 #define NETIF_F_IPV6_CSUM 16 /* Can checksum TCP/UDP over IPV6 */ 1048 #define NETIF_F_IPV6_CSUM 16 /* Can checksum TCP/UDP over IPV6 */
1049 #define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */ 1049 #define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */
1050 #define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */ 1050 #define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */
1051 #define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */ 1051 #define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */
1052 #define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */ 1052 #define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */
1053 #define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */ 1053 #define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */
1054 #define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */ 1054 #define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */
1055 #define NETIF_F_GSO 2048 /* Enable software GSO. */ 1055 #define NETIF_F_GSO 2048 /* Enable software GSO. */
1056 #define NETIF_F_LLTX 4096 /* LockLess TX - deprecated. Please */ 1056 #define NETIF_F_LLTX 4096 /* LockLess TX - deprecated. Please */
1057 /* do not use LLTX in new drivers */ 1057 /* do not use LLTX in new drivers */
1058 #define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */ 1058 #define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */
1059 #define NETIF_F_GRO 16384 /* Generic receive offload */ 1059 #define NETIF_F_GRO 16384 /* Generic receive offload */
1060 #define NETIF_F_LRO 32768 /* large receive offload */ 1060 #define NETIF_F_LRO 32768 /* large receive offload */
1061 1061
1062 /* the GSO_MASK reserves bits 16 through 23 */ 1062 /* the GSO_MASK reserves bits 16 through 23 */
1063 #define NETIF_F_FCOE_CRC (1 << 24) /* FCoE CRC32 */ 1063 #define NETIF_F_FCOE_CRC (1 << 24) /* FCoE CRC32 */
1064 #define NETIF_F_SCTP_CSUM (1 << 25) /* SCTP checksum offload */ 1064 #define NETIF_F_SCTP_CSUM (1 << 25) /* SCTP checksum offload */
1065 #define NETIF_F_FCOE_MTU (1 << 26) /* Supports max FCoE MTU, 2158 bytes*/ 1065 #define NETIF_F_FCOE_MTU (1 << 26) /* Supports max FCoE MTU, 2158 bytes*/
1066 #define NETIF_F_NTUPLE (1 << 27) /* N-tuple filters supported */ 1066 #define NETIF_F_NTUPLE (1 << 27) /* N-tuple filters supported */
1067 #define NETIF_F_RXHASH (1 << 28) /* Receive hashing offload */ 1067 #define NETIF_F_RXHASH (1 << 28) /* Receive hashing offload */
1068 #define NETIF_F_RXCSUM (1 << 29) /* Receive checksumming offload */ 1068 #define NETIF_F_RXCSUM (1 << 29) /* Receive checksumming offload */
1069 #define NETIF_F_NOCACHE_COPY (1 << 30) /* Use no-cache copyfromuser */ 1069 #define NETIF_F_NOCACHE_COPY (1 << 30) /* Use no-cache copyfromuser */
1070 #define NETIF_F_LOOPBACK (1 << 31) /* Enable loopback */
1070 1071
1071 /* Segmentation offload features */ 1072 /* Segmentation offload features */
1072 #define NETIF_F_GSO_SHIFT 16 1073 #define NETIF_F_GSO_SHIFT 16
1073 #define NETIF_F_GSO_MASK 0x00ff0000 1074 #define NETIF_F_GSO_MASK 0x00ff0000
1074 #define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT) 1075 #define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
1075 #define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT) 1076 #define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT)
1076 #define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT) 1077 #define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
1077 #define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT) 1078 #define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
1078 #define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT) 1079 #define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
1079 #define NETIF_F_FSO (SKB_GSO_FCOE << NETIF_F_GSO_SHIFT) 1080 #define NETIF_F_FSO (SKB_GSO_FCOE << NETIF_F_GSO_SHIFT)
1080 1081
1081 /* Features valid for ethtool to change */ 1082 /* Features valid for ethtool to change */
1082 /* = all defined minus driver/device-class-related */ 1083 /* = all defined minus driver/device-class-related */
1083 #define NETIF_F_NEVER_CHANGE (NETIF_F_VLAN_CHALLENGED | \ 1084 #define NETIF_F_NEVER_CHANGE (NETIF_F_VLAN_CHALLENGED | \
1084 NETIF_F_LLTX | NETIF_F_NETNS_LOCAL) 1085 NETIF_F_LLTX | NETIF_F_NETNS_LOCAL)
1085 #define NETIF_F_ETHTOOL_BITS (0x7f3fffff & ~NETIF_F_NEVER_CHANGE) 1086 #define NETIF_F_ETHTOOL_BITS (0xff3fffff & ~NETIF_F_NEVER_CHANGE)
1086 1087
1087 /* List of features with software fallbacks. */ 1088 /* List of features with software fallbacks. */
1088 #define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | \ 1089 #define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | \
1089 NETIF_F_TSO6 | NETIF_F_UFO) 1090 NETIF_F_TSO6 | NETIF_F_UFO)
1090 1091
1091 1092
1092 #define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM) 1093 #define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
1093 #define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM) 1094 #define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM)
1094 #define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM) 1095 #define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
1095 #define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM) 1096 #define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
1096 1097
1097 #define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) 1098 #define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
1098 1099
1099 #define NETIF_F_ALL_TX_OFFLOADS (NETIF_F_ALL_CSUM | NETIF_F_SG | \ 1100 #define NETIF_F_ALL_TX_OFFLOADS (NETIF_F_ALL_CSUM | NETIF_F_SG | \
1100 NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \ 1101 NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
1101 NETIF_F_HIGHDMA | \ 1102 NETIF_F_HIGHDMA | \
1102 NETIF_F_SCTP_CSUM | NETIF_F_FCOE_CRC) 1103 NETIF_F_SCTP_CSUM | NETIF_F_FCOE_CRC)
1103 1104
1104 /* 1105 /*
1105 * If one device supports one of these features, then enable them 1106 * If one device supports one of these features, then enable them
1106 * for all in netdev_increment_features. 1107 * for all in netdev_increment_features.
1107 */ 1108 */
1108 #define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \ 1109 #define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \
1109 NETIF_F_SG | NETIF_F_HIGHDMA | \ 1110 NETIF_F_SG | NETIF_F_HIGHDMA | \
1110 NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED) 1111 NETIF_F_FRAGLIST | NETIF_F_VLAN_CHALLENGED)
1111 /* 1112 /*
1112 * If one device doesn't support one of these features, then disable it 1113 * If one device doesn't support one of these features, then disable it
1113 * for all in netdev_increment_features. 1114 * for all in netdev_increment_features.
1114 */ 1115 */
1115 #define NETIF_F_ALL_FOR_ALL (NETIF_F_NOCACHE_COPY | NETIF_F_FSO) 1116 #define NETIF_F_ALL_FOR_ALL (NETIF_F_NOCACHE_COPY | NETIF_F_FSO)
1116 1117
1117 /* changeable features with no special hardware requirements */ 1118 /* changeable features with no special hardware requirements */
1118 #define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO) 1119 #define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO)
1119 1120
1120 /* Interface index. Unique device identifier */ 1121 /* Interface index. Unique device identifier */
1121 int ifindex; 1122 int ifindex;
1122 int iflink; 1123 int iflink;
1123 1124
1124 struct net_device_stats stats; 1125 struct net_device_stats stats;
1125 atomic_long_t rx_dropped; /* dropped packets by core network 1126 atomic_long_t rx_dropped; /* dropped packets by core network
1126 * Do not use this in drivers. 1127 * Do not use this in drivers.
1127 */ 1128 */
1128 1129
1129 #ifdef CONFIG_WIRELESS_EXT 1130 #ifdef CONFIG_WIRELESS_EXT
1130 /* List of functions to handle Wireless Extensions (instead of ioctl). 1131 /* List of functions to handle Wireless Extensions (instead of ioctl).
1131 * See <net/iw_handler.h> for details. Jean II */ 1132 * See <net/iw_handler.h> for details. Jean II */
1132 const struct iw_handler_def * wireless_handlers; 1133 const struct iw_handler_def * wireless_handlers;
1133 /* Instance data managed by the core of Wireless Extensions. */ 1134 /* Instance data managed by the core of Wireless Extensions. */
1134 struct iw_public_data * wireless_data; 1135 struct iw_public_data * wireless_data;
1135 #endif 1136 #endif
1136 /* Management operations */ 1137 /* Management operations */
1137 const struct net_device_ops *netdev_ops; 1138 const struct net_device_ops *netdev_ops;
1138 const struct ethtool_ops *ethtool_ops; 1139 const struct ethtool_ops *ethtool_ops;
1139 1140
1140 /* Hardware header description */ 1141 /* Hardware header description */
1141 const struct header_ops *header_ops; 1142 const struct header_ops *header_ops;
1142 1143
1143 unsigned int flags; /* interface flags (a la BSD) */ 1144 unsigned int flags; /* interface flags (a la BSD) */
1144 unsigned short gflags; 1145 unsigned short gflags;
1145 unsigned int priv_flags; /* Like 'flags' but invisible to userspace. */ 1146 unsigned int priv_flags; /* Like 'flags' but invisible to userspace. */
1146 unsigned short padded; /* How much padding added by alloc_netdev() */ 1147 unsigned short padded; /* How much padding added by alloc_netdev() */
1147 1148
1148 unsigned char operstate; /* RFC2863 operstate */ 1149 unsigned char operstate; /* RFC2863 operstate */
1149 unsigned char link_mode; /* mapping policy to operstate */ 1150 unsigned char link_mode; /* mapping policy to operstate */
1150 1151
1151 unsigned int mtu; /* interface MTU value */ 1152 unsigned int mtu; /* interface MTU value */
1152 unsigned short type; /* interface hardware type */ 1153 unsigned short type; /* interface hardware type */
1153 unsigned short hard_header_len; /* hardware hdr length */ 1154 unsigned short hard_header_len; /* hardware hdr length */
1154 1155
1155 /* extra head- and tailroom the hardware may need, but not in all cases 1156 /* extra head- and tailroom the hardware may need, but not in all cases
1156 * can this be guaranteed, especially tailroom. Some cases also use 1157 * can this be guaranteed, especially tailroom. Some cases also use
1157 * LL_MAX_HEADER instead to allocate the skb. 1158 * LL_MAX_HEADER instead to allocate the skb.
1158 */ 1159 */
1159 unsigned short needed_headroom; 1160 unsigned short needed_headroom;
1160 unsigned short needed_tailroom; 1161 unsigned short needed_tailroom;
1161 1162
1162 /* Interface address info. */ 1163 /* Interface address info. */
1163 unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */ 1164 unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
1164 unsigned char addr_assign_type; /* hw address assignment type */ 1165 unsigned char addr_assign_type; /* hw address assignment type */
1165 unsigned char addr_len; /* hardware address length */ 1166 unsigned char addr_len; /* hardware address length */
1166 unsigned short dev_id; /* for shared network cards */ 1167 unsigned short dev_id; /* for shared network cards */
1167 1168
1168 spinlock_t addr_list_lock; 1169 spinlock_t addr_list_lock;
1169 struct netdev_hw_addr_list uc; /* Unicast mac addresses */ 1170 struct netdev_hw_addr_list uc; /* Unicast mac addresses */
1170 struct netdev_hw_addr_list mc; /* Multicast mac addresses */ 1171 struct netdev_hw_addr_list mc; /* Multicast mac addresses */
1171 int uc_promisc; 1172 int uc_promisc;
1172 unsigned int promiscuity; 1173 unsigned int promiscuity;
1173 unsigned int allmulti; 1174 unsigned int allmulti;
1174 1175
1175 1176
1176 /* Protocol specific pointers */ 1177 /* Protocol specific pointers */
1177 1178
1178 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 1179 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
1179 struct vlan_group __rcu *vlgrp; /* VLAN group */ 1180 struct vlan_group __rcu *vlgrp; /* VLAN group */
1180 #endif 1181 #endif
1181 #ifdef CONFIG_NET_DSA 1182 #ifdef CONFIG_NET_DSA
1182 void *dsa_ptr; /* dsa specific data */ 1183 void *dsa_ptr; /* dsa specific data */
1183 #endif 1184 #endif
1184 void *atalk_ptr; /* AppleTalk link */ 1185 void *atalk_ptr; /* AppleTalk link */
1185 struct in_device __rcu *ip_ptr; /* IPv4 specific data */ 1186 struct in_device __rcu *ip_ptr; /* IPv4 specific data */
1186 struct dn_dev __rcu *dn_ptr; /* DECnet specific data */ 1187 struct dn_dev __rcu *dn_ptr; /* DECnet specific data */
1187 struct inet6_dev __rcu *ip6_ptr; /* IPv6 specific data */ 1188 struct inet6_dev __rcu *ip6_ptr; /* IPv6 specific data */
1188 void *ec_ptr; /* Econet specific data */ 1189 void *ec_ptr; /* Econet specific data */
1189 void *ax25_ptr; /* AX.25 specific data */ 1190 void *ax25_ptr; /* AX.25 specific data */
1190 struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data, 1191 struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data,
1191 assign before registering */ 1192 assign before registering */
1192 1193
1193 /* 1194 /*
1194 * Cache lines mostly used on receive path (including eth_type_trans()) 1195 * Cache lines mostly used on receive path (including eth_type_trans())
1195 */ 1196 */
1196 unsigned long last_rx; /* Time of last Rx 1197 unsigned long last_rx; /* Time of last Rx
1197 * This should not be set in 1198 * This should not be set in
1198 * drivers, unless really needed, 1199 * drivers, unless really needed,
1199 * because network stack (bonding) 1200 * because network stack (bonding)
1200 * use it if/when necessary, to 1201 * use it if/when necessary, to
1201 * avoid dirtying this cache line. 1202 * avoid dirtying this cache line.
1202 */ 1203 */
1203 1204
1204 struct net_device *master; /* Pointer to master device of a group, 1205 struct net_device *master; /* Pointer to master device of a group,
1205 * which this device is member of. 1206 * which this device is member of.
1206 */ 1207 */
1207 1208
1208 /* Interface address info used in eth_type_trans() */ 1209 /* Interface address info used in eth_type_trans() */
1209 unsigned char *dev_addr; /* hw address, (before bcast 1210 unsigned char *dev_addr; /* hw address, (before bcast
1210 because most packets are 1211 because most packets are
1211 unicast) */ 1212 unicast) */
1212 1213
1213 struct netdev_hw_addr_list dev_addrs; /* list of device 1214 struct netdev_hw_addr_list dev_addrs; /* list of device
1214 hw addresses */ 1215 hw addresses */
1215 1216
1216 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ 1217 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
1217 1218
1218 #ifdef CONFIG_RPS 1219 #ifdef CONFIG_RPS
1219 struct kset *queues_kset; 1220 struct kset *queues_kset;
1220 1221
1221 struct netdev_rx_queue *_rx; 1222 struct netdev_rx_queue *_rx;
1222 1223
1223 /* Number of RX queues allocated at register_netdev() time */ 1224 /* Number of RX queues allocated at register_netdev() time */
1224 unsigned int num_rx_queues; 1225 unsigned int num_rx_queues;
1225 1226
1226 /* Number of RX queues currently active in device */ 1227 /* Number of RX queues currently active in device */
1227 unsigned int real_num_rx_queues; 1228 unsigned int real_num_rx_queues;
1228 1229
1229 #ifdef CONFIG_RFS_ACCEL 1230 #ifdef CONFIG_RFS_ACCEL
1230 /* CPU reverse-mapping for RX completion interrupts, indexed 1231 /* CPU reverse-mapping for RX completion interrupts, indexed
1231 * by RX queue number. Assigned by driver. This must only be 1232 * by RX queue number. Assigned by driver. This must only be
1232 * set if the ndo_rx_flow_steer operation is defined. */ 1233 * set if the ndo_rx_flow_steer operation is defined. */
1233 struct cpu_rmap *rx_cpu_rmap; 1234 struct cpu_rmap *rx_cpu_rmap;
1234 #endif 1235 #endif
1235 #endif 1236 #endif
1236 1237
1237 rx_handler_func_t __rcu *rx_handler; 1238 rx_handler_func_t __rcu *rx_handler;
1238 void __rcu *rx_handler_data; 1239 void __rcu *rx_handler_data;
1239 1240
1240 struct netdev_queue __rcu *ingress_queue; 1241 struct netdev_queue __rcu *ingress_queue;
1241 1242
1242 /* 1243 /*
1243 * Cache lines mostly used on transmit path 1244 * Cache lines mostly used on transmit path
1244 */ 1245 */
1245 struct netdev_queue *_tx ____cacheline_aligned_in_smp; 1246 struct netdev_queue *_tx ____cacheline_aligned_in_smp;
1246 1247
1247 /* Number of TX queues allocated at alloc_netdev_mq() time */ 1248 /* Number of TX queues allocated at alloc_netdev_mq() time */
1248 unsigned int num_tx_queues; 1249 unsigned int num_tx_queues;
1249 1250
1250 /* Number of TX queues currently active in device */ 1251 /* Number of TX queues currently active in device */
1251 unsigned int real_num_tx_queues; 1252 unsigned int real_num_tx_queues;
1252 1253
1253 /* root qdisc from userspace point of view */ 1254 /* root qdisc from userspace point of view */
1254 struct Qdisc *qdisc; 1255 struct Qdisc *qdisc;
1255 1256
1256 unsigned long tx_queue_len; /* Max frames per queue allowed */ 1257 unsigned long tx_queue_len; /* Max frames per queue allowed */
1257 spinlock_t tx_global_lock; 1258 spinlock_t tx_global_lock;
1258 1259
1259 #ifdef CONFIG_XPS 1260 #ifdef CONFIG_XPS
1260 struct xps_dev_maps __rcu *xps_maps; 1261 struct xps_dev_maps __rcu *xps_maps;
1261 #endif 1262 #endif
1262 1263
1263 /* These may be needed for future network-power-down code. */ 1264 /* These may be needed for future network-power-down code. */
1264 1265
1265 /* 1266 /*
1266 * trans_start here is expensive for high speed devices on SMP, 1267 * trans_start here is expensive for high speed devices on SMP,
1267 * please use netdev_queue->trans_start instead. 1268 * please use netdev_queue->trans_start instead.
1268 */ 1269 */
1269 unsigned long trans_start; /* Time (in jiffies) of last Tx */ 1270 unsigned long trans_start; /* Time (in jiffies) of last Tx */
1270 1271
1271 int watchdog_timeo; /* used by dev_watchdog() */ 1272 int watchdog_timeo; /* used by dev_watchdog() */
1272 struct timer_list watchdog_timer; 1273 struct timer_list watchdog_timer;
1273 1274
1274 /* Number of references to this device */ 1275 /* Number of references to this device */
1275 int __percpu *pcpu_refcnt; 1276 int __percpu *pcpu_refcnt;
1276 1277
1277 /* delayed register/unregister */ 1278 /* delayed register/unregister */
1278 struct list_head todo_list; 1279 struct list_head todo_list;
1279 /* device index hash chain */ 1280 /* device index hash chain */
1280 struct hlist_node index_hlist; 1281 struct hlist_node index_hlist;
1281 1282
1282 struct list_head link_watch_list; 1283 struct list_head link_watch_list;
1283 1284
1284 /* register/unregister state machine */ 1285 /* register/unregister state machine */
1285 enum { NETREG_UNINITIALIZED=0, 1286 enum { NETREG_UNINITIALIZED=0,
1286 NETREG_REGISTERED, /* completed register_netdevice */ 1287 NETREG_REGISTERED, /* completed register_netdevice */
1287 NETREG_UNREGISTERING, /* called unregister_netdevice */ 1288 NETREG_UNREGISTERING, /* called unregister_netdevice */
1288 NETREG_UNREGISTERED, /* completed unregister todo */ 1289 NETREG_UNREGISTERED, /* completed unregister todo */
1289 NETREG_RELEASED, /* called free_netdev */ 1290 NETREG_RELEASED, /* called free_netdev */
1290 NETREG_DUMMY, /* dummy device for NAPI poll */ 1291 NETREG_DUMMY, /* dummy device for NAPI poll */
1291 } reg_state:16; 1292 } reg_state:16;
1292 1293
1293 enum { 1294 enum {
1294 RTNL_LINK_INITIALIZED, 1295 RTNL_LINK_INITIALIZED,
1295 RTNL_LINK_INITIALIZING, 1296 RTNL_LINK_INITIALIZING,
1296 } rtnl_link_state:16; 1297 } rtnl_link_state:16;
1297 1298
1298 /* Called from unregister, can be used to call free_netdev */ 1299 /* Called from unregister, can be used to call free_netdev */
1299 void (*destructor)(struct net_device *dev); 1300 void (*destructor)(struct net_device *dev);
1300 1301
1301 #ifdef CONFIG_NETPOLL 1302 #ifdef CONFIG_NETPOLL
1302 struct netpoll_info *npinfo; 1303 struct netpoll_info *npinfo;
1303 #endif 1304 #endif
1304 1305
1305 #ifdef CONFIG_NET_NS 1306 #ifdef CONFIG_NET_NS
1306 /* Network namespace this network device is inside */ 1307 /* Network namespace this network device is inside */
1307 struct net *nd_net; 1308 struct net *nd_net;
1308 #endif 1309 #endif
1309 1310
1310 /* mid-layer private */ 1311 /* mid-layer private */
1311 union { 1312 union {
1312 void *ml_priv; 1313 void *ml_priv;
1313 struct pcpu_lstats __percpu *lstats; /* loopback stats */ 1314 struct pcpu_lstats __percpu *lstats; /* loopback stats */
1314 struct pcpu_tstats __percpu *tstats; /* tunnel stats */ 1315 struct pcpu_tstats __percpu *tstats; /* tunnel stats */
1315 struct pcpu_dstats __percpu *dstats; /* dummy stats */ 1316 struct pcpu_dstats __percpu *dstats; /* dummy stats */
1316 }; 1317 };
1317 /* GARP */ 1318 /* GARP */
1318 struct garp_port __rcu *garp_port; 1319 struct garp_port __rcu *garp_port;
1319 1320
1320 /* class/net/name entry */ 1321 /* class/net/name entry */
1321 struct device dev; 1322 struct device dev;
1322 /* space for optional device, statistics, and wireless sysfs groups */ 1323 /* space for optional device, statistics, and wireless sysfs groups */
1323 const struct attribute_group *sysfs_groups[4]; 1324 const struct attribute_group *sysfs_groups[4];
1324 1325
1325 /* rtnetlink link ops */ 1326 /* rtnetlink link ops */
1326 const struct rtnl_link_ops *rtnl_link_ops; 1327 const struct rtnl_link_ops *rtnl_link_ops;
1327 1328
1328 /* for setting kernel sock attribute on TCP connection setup */ 1329 /* for setting kernel sock attribute on TCP connection setup */
1329 #define GSO_MAX_SIZE 65536 1330 #define GSO_MAX_SIZE 65536
1330 unsigned int gso_max_size; 1331 unsigned int gso_max_size;
1331 1332
1332 #ifdef CONFIG_DCB 1333 #ifdef CONFIG_DCB
1333 /* Data Center Bridging netlink ops */ 1334 /* Data Center Bridging netlink ops */
1334 const struct dcbnl_rtnl_ops *dcbnl_ops; 1335 const struct dcbnl_rtnl_ops *dcbnl_ops;
1335 #endif 1336 #endif
1336 u8 num_tc; 1337 u8 num_tc;
1337 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE]; 1338 struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
1338 u8 prio_tc_map[TC_BITMASK + 1]; 1339 u8 prio_tc_map[TC_BITMASK + 1];
1339 1340
1340 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) 1341 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
1341 /* max exchange id for FCoE LRO by ddp */ 1342 /* max exchange id for FCoE LRO by ddp */
1342 unsigned int fcoe_ddp_xid; 1343 unsigned int fcoe_ddp_xid;
1343 #endif 1344 #endif
1344 /* n-tuple filter list attached to this device */ 1345 /* n-tuple filter list attached to this device */
1345 struct ethtool_rx_ntuple_list ethtool_ntuple_list; 1346 struct ethtool_rx_ntuple_list ethtool_ntuple_list;
1346 1347
1347 /* phy device may attach itself for hardware timestamping */ 1348 /* phy device may attach itself for hardware timestamping */
1348 struct phy_device *phydev; 1349 struct phy_device *phydev;
1349 1350
1350 /* group the device belongs to */ 1351 /* group the device belongs to */
1351 int group; 1352 int group;
1352 }; 1353 };
1353 #define to_net_dev(d) container_of(d, struct net_device, dev) 1354 #define to_net_dev(d) container_of(d, struct net_device, dev)
1354 1355
1355 #define NETDEV_ALIGN 32 1356 #define NETDEV_ALIGN 32
1356 1357
1357 static inline 1358 static inline
1358 int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio) 1359 int netdev_get_prio_tc_map(const struct net_device *dev, u32 prio)
1359 { 1360 {
1360 return dev->prio_tc_map[prio & TC_BITMASK]; 1361 return dev->prio_tc_map[prio & TC_BITMASK];
1361 } 1362 }
1362 1363
1363 static inline 1364 static inline
1364 int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc) 1365 int netdev_set_prio_tc_map(struct net_device *dev, u8 prio, u8 tc)
1365 { 1366 {
1366 if (tc >= dev->num_tc) 1367 if (tc >= dev->num_tc)
1367 return -EINVAL; 1368 return -EINVAL;
1368 1369
1369 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK; 1370 dev->prio_tc_map[prio & TC_BITMASK] = tc & TC_BITMASK;
1370 return 0; 1371 return 0;
1371 } 1372 }
1372 1373
1373 static inline 1374 static inline
1374 void netdev_reset_tc(struct net_device *dev) 1375 void netdev_reset_tc(struct net_device *dev)
1375 { 1376 {
1376 dev->num_tc = 0; 1377 dev->num_tc = 0;
1377 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq)); 1378 memset(dev->tc_to_txq, 0, sizeof(dev->tc_to_txq));
1378 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map)); 1379 memset(dev->prio_tc_map, 0, sizeof(dev->prio_tc_map));
1379 } 1380 }
1380 1381
1381 static inline 1382 static inline
1382 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset) 1383 int netdev_set_tc_queue(struct net_device *dev, u8 tc, u16 count, u16 offset)
1383 { 1384 {
1384 if (tc >= dev->num_tc) 1385 if (tc >= dev->num_tc)
1385 return -EINVAL; 1386 return -EINVAL;
1386 1387
1387 dev->tc_to_txq[tc].count = count; 1388 dev->tc_to_txq[tc].count = count;
1388 dev->tc_to_txq[tc].offset = offset; 1389 dev->tc_to_txq[tc].offset = offset;
1389 return 0; 1390 return 0;
1390 } 1391 }
1391 1392
1392 static inline 1393 static inline
1393 int netdev_set_num_tc(struct net_device *dev, u8 num_tc) 1394 int netdev_set_num_tc(struct net_device *dev, u8 num_tc)
1394 { 1395 {
1395 if (num_tc > TC_MAX_QUEUE) 1396 if (num_tc > TC_MAX_QUEUE)
1396 return -EINVAL; 1397 return -EINVAL;
1397 1398
1398 dev->num_tc = num_tc; 1399 dev->num_tc = num_tc;
1399 return 0; 1400 return 0;
1400 } 1401 }
1401 1402
1402 static inline 1403 static inline
1403 int netdev_get_num_tc(struct net_device *dev) 1404 int netdev_get_num_tc(struct net_device *dev)
1404 { 1405 {
1405 return dev->num_tc; 1406 return dev->num_tc;
1406 } 1407 }
1407 1408
1408 static inline 1409 static inline
1409 struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev, 1410 struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
1410 unsigned int index) 1411 unsigned int index)
1411 { 1412 {
1412 return &dev->_tx[index]; 1413 return &dev->_tx[index];
1413 } 1414 }
1414 1415
1415 static inline void netdev_for_each_tx_queue(struct net_device *dev, 1416 static inline void netdev_for_each_tx_queue(struct net_device *dev,
1416 void (*f)(struct net_device *, 1417 void (*f)(struct net_device *,
1417 struct netdev_queue *, 1418 struct netdev_queue *,
1418 void *), 1419 void *),
1419 void *arg) 1420 void *arg)
1420 { 1421 {
1421 unsigned int i; 1422 unsigned int i;
1422 1423
1423 for (i = 0; i < dev->num_tx_queues; i++) 1424 for (i = 0; i < dev->num_tx_queues; i++)
1424 f(dev, &dev->_tx[i], arg); 1425 f(dev, &dev->_tx[i], arg);
1425 } 1426 }
1426 1427
1427 /* 1428 /*
1428 * Net namespace inlines 1429 * Net namespace inlines
1429 */ 1430 */
1430 static inline 1431 static inline
1431 struct net *dev_net(const struct net_device *dev) 1432 struct net *dev_net(const struct net_device *dev)
1432 { 1433 {
1433 return read_pnet(&dev->nd_net); 1434 return read_pnet(&dev->nd_net);
1434 } 1435 }
1435 1436
1436 static inline 1437 static inline
1437 void dev_net_set(struct net_device *dev, struct net *net) 1438 void dev_net_set(struct net_device *dev, struct net *net)
1438 { 1439 {
1439 #ifdef CONFIG_NET_NS 1440 #ifdef CONFIG_NET_NS
1440 release_net(dev->nd_net); 1441 release_net(dev->nd_net);
1441 dev->nd_net = hold_net(net); 1442 dev->nd_net = hold_net(net);
1442 #endif 1443 #endif
1443 } 1444 }
1444 1445
1445 static inline bool netdev_uses_dsa_tags(struct net_device *dev) 1446 static inline bool netdev_uses_dsa_tags(struct net_device *dev)
1446 { 1447 {
1447 #ifdef CONFIG_NET_DSA_TAG_DSA 1448 #ifdef CONFIG_NET_DSA_TAG_DSA
1448 if (dev->dsa_ptr != NULL) 1449 if (dev->dsa_ptr != NULL)
1449 return dsa_uses_dsa_tags(dev->dsa_ptr); 1450 return dsa_uses_dsa_tags(dev->dsa_ptr);
1450 #endif 1451 #endif
1451 1452
1452 return 0; 1453 return 0;
1453 } 1454 }
1454 1455
1455 #ifndef CONFIG_NET_NS 1456 #ifndef CONFIG_NET_NS
1456 static inline void skb_set_dev(struct sk_buff *skb, struct net_device *dev) 1457 static inline void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
1457 { 1458 {
1458 skb->dev = dev; 1459 skb->dev = dev;
1459 } 1460 }
1460 #else /* CONFIG_NET_NS */ 1461 #else /* CONFIG_NET_NS */
1461 void skb_set_dev(struct sk_buff *skb, struct net_device *dev); 1462 void skb_set_dev(struct sk_buff *skb, struct net_device *dev);
1462 #endif 1463 #endif
1463 1464
1464 static inline bool netdev_uses_trailer_tags(struct net_device *dev) 1465 static inline bool netdev_uses_trailer_tags(struct net_device *dev)
1465 { 1466 {
1466 #ifdef CONFIG_NET_DSA_TAG_TRAILER 1467 #ifdef CONFIG_NET_DSA_TAG_TRAILER
1467 if (dev->dsa_ptr != NULL) 1468 if (dev->dsa_ptr != NULL)
1468 return dsa_uses_trailer_tags(dev->dsa_ptr); 1469 return dsa_uses_trailer_tags(dev->dsa_ptr);
1469 #endif 1470 #endif
1470 1471
1471 return 0; 1472 return 0;
1472 } 1473 }
1473 1474
1474 /** 1475 /**
1475 * netdev_priv - access network device private data 1476 * netdev_priv - access network device private data
1476 * @dev: network device 1477 * @dev: network device
1477 * 1478 *
1478 * Get network device private data 1479 * Get network device private data
1479 */ 1480 */
1480 static inline void *netdev_priv(const struct net_device *dev) 1481 static inline void *netdev_priv(const struct net_device *dev)
1481 { 1482 {
1482 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN); 1483 return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
1483 } 1484 }
1484 1485
1485 /* Set the sysfs physical device reference for the network logical device 1486 /* Set the sysfs physical device reference for the network logical device
1486 * if set prior to registration will cause a symlink during initialization. 1487 * if set prior to registration will cause a symlink during initialization.
1487 */ 1488 */
1488 #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev)) 1489 #define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
1489 1490
1490 /* Set the sysfs device type for the network logical device to allow 1491 /* Set the sysfs device type for the network logical device to allow
1491 * fin grained indentification of different network device types. For 1492 * fin grained indentification of different network device types. For
1492 * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc. 1493 * example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc.
1493 */ 1494 */
1494 #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype)) 1495 #define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
1495 1496
1496 /** 1497 /**
1497 * netif_napi_add - initialize a napi context 1498 * netif_napi_add - initialize a napi context
1498 * @dev: network device 1499 * @dev: network device
1499 * @napi: napi context 1500 * @napi: napi context
1500 * @poll: polling function 1501 * @poll: polling function
1501 * @weight: default weight 1502 * @weight: default weight
1502 * 1503 *
1503 * netif_napi_add() must be used to initialize a napi context prior to calling 1504 * netif_napi_add() must be used to initialize a napi context prior to calling
1504 * *any* of the other napi related functions. 1505 * *any* of the other napi related functions.
1505 */ 1506 */
1506 void netif_napi_add(struct net_device *dev, struct napi_struct *napi, 1507 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
1507 int (*poll)(struct napi_struct *, int), int weight); 1508 int (*poll)(struct napi_struct *, int), int weight);
1508 1509
1509 /** 1510 /**
1510 * netif_napi_del - remove a napi context 1511 * netif_napi_del - remove a napi context
1511 * @napi: napi context 1512 * @napi: napi context
1512 * 1513 *
1513 * netif_napi_del() removes a napi context from the network device napi list 1514 * netif_napi_del() removes a napi context from the network device napi list
1514 */ 1515 */
1515 void netif_napi_del(struct napi_struct *napi); 1516 void netif_napi_del(struct napi_struct *napi);
1516 1517
1517 struct napi_gro_cb { 1518 struct napi_gro_cb {
1518 /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */ 1519 /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
1519 void *frag0; 1520 void *frag0;
1520 1521
1521 /* Length of frag0. */ 1522 /* Length of frag0. */
1522 unsigned int frag0_len; 1523 unsigned int frag0_len;
1523 1524
1524 /* This indicates where we are processing relative to skb->data. */ 1525 /* This indicates where we are processing relative to skb->data. */
1525 int data_offset; 1526 int data_offset;
1526 1527
1527 /* This is non-zero if the packet may be of the same flow. */ 1528 /* This is non-zero if the packet may be of the same flow. */
1528 int same_flow; 1529 int same_flow;
1529 1530
1530 /* This is non-zero if the packet cannot be merged with the new skb. */ 1531 /* This is non-zero if the packet cannot be merged with the new skb. */
1531 int flush; 1532 int flush;
1532 1533
1533 /* Number of segments aggregated. */ 1534 /* Number of segments aggregated. */
1534 int count; 1535 int count;
1535 1536
1536 /* Free the skb? */ 1537 /* Free the skb? */
1537 int free; 1538 int free;
1538 }; 1539 };
1539 1540
1540 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb) 1541 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
1541 1542
1542 struct packet_type { 1543 struct packet_type {
1543 __be16 type; /* This is really htons(ether_type). */ 1544 __be16 type; /* This is really htons(ether_type). */
1544 struct net_device *dev; /* NULL is wildcarded here */ 1545 struct net_device *dev; /* NULL is wildcarded here */
1545 int (*func) (struct sk_buff *, 1546 int (*func) (struct sk_buff *,
1546 struct net_device *, 1547 struct net_device *,
1547 struct packet_type *, 1548 struct packet_type *,
1548 struct net_device *); 1549 struct net_device *);
1549 struct sk_buff *(*gso_segment)(struct sk_buff *skb, 1550 struct sk_buff *(*gso_segment)(struct sk_buff *skb,
1550 u32 features); 1551 u32 features);
1551 int (*gso_send_check)(struct sk_buff *skb); 1552 int (*gso_send_check)(struct sk_buff *skb);
1552 struct sk_buff **(*gro_receive)(struct sk_buff **head, 1553 struct sk_buff **(*gro_receive)(struct sk_buff **head,
1553 struct sk_buff *skb); 1554 struct sk_buff *skb);
1554 int (*gro_complete)(struct sk_buff *skb); 1555 int (*gro_complete)(struct sk_buff *skb);
1555 void *af_packet_priv; 1556 void *af_packet_priv;
1556 struct list_head list; 1557 struct list_head list;
1557 }; 1558 };
1558 1559
1559 #include <linux/interrupt.h> 1560 #include <linux/interrupt.h>
1560 #include <linux/notifier.h> 1561 #include <linux/notifier.h>
1561 1562
1562 extern rwlock_t dev_base_lock; /* Device list lock */ 1563 extern rwlock_t dev_base_lock; /* Device list lock */
1563 1564
1564 1565
1565 #define for_each_netdev(net, d) \ 1566 #define for_each_netdev(net, d) \
1566 list_for_each_entry(d, &(net)->dev_base_head, dev_list) 1567 list_for_each_entry(d, &(net)->dev_base_head, dev_list)
1567 #define for_each_netdev_reverse(net, d) \ 1568 #define for_each_netdev_reverse(net, d) \
1568 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list) 1569 list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
1569 #define for_each_netdev_rcu(net, d) \ 1570 #define for_each_netdev_rcu(net, d) \
1570 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list) 1571 list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
1571 #define for_each_netdev_safe(net, d, n) \ 1572 #define for_each_netdev_safe(net, d, n) \
1572 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list) 1573 list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
1573 #define for_each_netdev_continue(net, d) \ 1574 #define for_each_netdev_continue(net, d) \
1574 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list) 1575 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
1575 #define for_each_netdev_continue_rcu(net, d) \ 1576 #define for_each_netdev_continue_rcu(net, d) \
1576 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list) 1577 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
1577 #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) 1578 #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
1578 1579
1579 static inline struct net_device *next_net_device(struct net_device *dev) 1580 static inline struct net_device *next_net_device(struct net_device *dev)
1580 { 1581 {
1581 struct list_head *lh; 1582 struct list_head *lh;
1582 struct net *net; 1583 struct net *net;
1583 1584
1584 net = dev_net(dev); 1585 net = dev_net(dev);
1585 lh = dev->dev_list.next; 1586 lh = dev->dev_list.next;
1586 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); 1587 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1587 } 1588 }
1588 1589
1589 static inline struct net_device *next_net_device_rcu(struct net_device *dev) 1590 static inline struct net_device *next_net_device_rcu(struct net_device *dev)
1590 { 1591 {
1591 struct list_head *lh; 1592 struct list_head *lh;
1592 struct net *net; 1593 struct net *net;
1593 1594
1594 net = dev_net(dev); 1595 net = dev_net(dev);
1595 lh = rcu_dereference(list_next_rcu(&dev->dev_list)); 1596 lh = rcu_dereference(list_next_rcu(&dev->dev_list));
1596 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); 1597 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1597 } 1598 }
1598 1599
1599 static inline struct net_device *first_net_device(struct net *net) 1600 static inline struct net_device *first_net_device(struct net *net)
1600 { 1601 {
1601 return list_empty(&net->dev_base_head) ? NULL : 1602 return list_empty(&net->dev_base_head) ? NULL :
1602 net_device_entry(net->dev_base_head.next); 1603 net_device_entry(net->dev_base_head.next);
1603 } 1604 }
1604 1605
1605 static inline struct net_device *first_net_device_rcu(struct net *net) 1606 static inline struct net_device *first_net_device_rcu(struct net *net)
1606 { 1607 {
1607 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head)); 1608 struct list_head *lh = rcu_dereference(list_next_rcu(&net->dev_base_head));
1608 1609
1609 return lh == &net->dev_base_head ? NULL : net_device_entry(lh); 1610 return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
1610 } 1611 }
1611 1612
1612 extern int netdev_boot_setup_check(struct net_device *dev); 1613 extern int netdev_boot_setup_check(struct net_device *dev);
1613 extern unsigned long netdev_boot_base(const char *prefix, int unit); 1614 extern unsigned long netdev_boot_base(const char *prefix, int unit);
1614 extern struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, 1615 extern struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
1615 const char *hwaddr); 1616 const char *hwaddr);
1616 extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type); 1617 extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
1617 extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type); 1618 extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
1618 extern void dev_add_pack(struct packet_type *pt); 1619 extern void dev_add_pack(struct packet_type *pt);
1619 extern void dev_remove_pack(struct packet_type *pt); 1620 extern void dev_remove_pack(struct packet_type *pt);
1620 extern void __dev_remove_pack(struct packet_type *pt); 1621 extern void __dev_remove_pack(struct packet_type *pt);
1621 1622
1622 extern struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags, 1623 extern struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
1623 unsigned short mask); 1624 unsigned short mask);
1624 extern struct net_device *dev_get_by_name(struct net *net, const char *name); 1625 extern struct net_device *dev_get_by_name(struct net *net, const char *name);
1625 extern struct net_device *dev_get_by_name_rcu(struct net *net, const char *name); 1626 extern struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
1626 extern struct net_device *__dev_get_by_name(struct net *net, const char *name); 1627 extern struct net_device *__dev_get_by_name(struct net *net, const char *name);
1627 extern int dev_alloc_name(struct net_device *dev, const char *name); 1628 extern int dev_alloc_name(struct net_device *dev, const char *name);
1628 extern int dev_open(struct net_device *dev); 1629 extern int dev_open(struct net_device *dev);
1629 extern int dev_close(struct net_device *dev); 1630 extern int dev_close(struct net_device *dev);
1630 extern void dev_disable_lro(struct net_device *dev); 1631 extern void dev_disable_lro(struct net_device *dev);
1631 extern int dev_queue_xmit(struct sk_buff *skb); 1632 extern int dev_queue_xmit(struct sk_buff *skb);
1632 extern int register_netdevice(struct net_device *dev); 1633 extern int register_netdevice(struct net_device *dev);
1633 extern void unregister_netdevice_queue(struct net_device *dev, 1634 extern void unregister_netdevice_queue(struct net_device *dev,
1634 struct list_head *head); 1635 struct list_head *head);
1635 extern void unregister_netdevice_many(struct list_head *head); 1636 extern void unregister_netdevice_many(struct list_head *head);
1636 static inline void unregister_netdevice(struct net_device *dev) 1637 static inline void unregister_netdevice(struct net_device *dev)
1637 { 1638 {
1638 unregister_netdevice_queue(dev, NULL); 1639 unregister_netdevice_queue(dev, NULL);
1639 } 1640 }
1640 1641
1641 extern int netdev_refcnt_read(const struct net_device *dev); 1642 extern int netdev_refcnt_read(const struct net_device *dev);
1642 extern void free_netdev(struct net_device *dev); 1643 extern void free_netdev(struct net_device *dev);
1643 extern void synchronize_net(void); 1644 extern void synchronize_net(void);
1644 extern int register_netdevice_notifier(struct notifier_block *nb); 1645 extern int register_netdevice_notifier(struct notifier_block *nb);
1645 extern int unregister_netdevice_notifier(struct notifier_block *nb); 1646 extern int unregister_netdevice_notifier(struct notifier_block *nb);
1646 extern int init_dummy_netdev(struct net_device *dev); 1647 extern int init_dummy_netdev(struct net_device *dev);
1647 extern void netdev_resync_ops(struct net_device *dev); 1648 extern void netdev_resync_ops(struct net_device *dev);
1648 1649
1649 extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev); 1650 extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
1650 extern struct net_device *dev_get_by_index(struct net *net, int ifindex); 1651 extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
1651 extern struct net_device *__dev_get_by_index(struct net *net, int ifindex); 1652 extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
1652 extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); 1653 extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
1653 extern int dev_restart(struct net_device *dev); 1654 extern int dev_restart(struct net_device *dev);
1654 #ifdef CONFIG_NETPOLL_TRAP 1655 #ifdef CONFIG_NETPOLL_TRAP
1655 extern int netpoll_trap(void); 1656 extern int netpoll_trap(void);
1656 #endif 1657 #endif
1657 extern int skb_gro_receive(struct sk_buff **head, 1658 extern int skb_gro_receive(struct sk_buff **head,
1658 struct sk_buff *skb); 1659 struct sk_buff *skb);
1659 extern void skb_gro_reset_offset(struct sk_buff *skb); 1660 extern void skb_gro_reset_offset(struct sk_buff *skb);
1660 1661
1661 static inline unsigned int skb_gro_offset(const struct sk_buff *skb) 1662 static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
1662 { 1663 {
1663 return NAPI_GRO_CB(skb)->data_offset; 1664 return NAPI_GRO_CB(skb)->data_offset;
1664 } 1665 }
1665 1666
1666 static inline unsigned int skb_gro_len(const struct sk_buff *skb) 1667 static inline unsigned int skb_gro_len(const struct sk_buff *skb)
1667 { 1668 {
1668 return skb->len - NAPI_GRO_CB(skb)->data_offset; 1669 return skb->len - NAPI_GRO_CB(skb)->data_offset;
1669 } 1670 }
1670 1671
1671 static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len) 1672 static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
1672 { 1673 {
1673 NAPI_GRO_CB(skb)->data_offset += len; 1674 NAPI_GRO_CB(skb)->data_offset += len;
1674 } 1675 }
1675 1676
1676 static inline void *skb_gro_header_fast(struct sk_buff *skb, 1677 static inline void *skb_gro_header_fast(struct sk_buff *skb,
1677 unsigned int offset) 1678 unsigned int offset)
1678 { 1679 {
1679 return NAPI_GRO_CB(skb)->frag0 + offset; 1680 return NAPI_GRO_CB(skb)->frag0 + offset;
1680 } 1681 }
1681 1682
1682 static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen) 1683 static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
1683 { 1684 {
1684 return NAPI_GRO_CB(skb)->frag0_len < hlen; 1685 return NAPI_GRO_CB(skb)->frag0_len < hlen;
1685 } 1686 }
1686 1687
1687 static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen, 1688 static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
1688 unsigned int offset) 1689 unsigned int offset)
1689 { 1690 {
1690 NAPI_GRO_CB(skb)->frag0 = NULL; 1691 NAPI_GRO_CB(skb)->frag0 = NULL;
1691 NAPI_GRO_CB(skb)->frag0_len = 0; 1692 NAPI_GRO_CB(skb)->frag0_len = 0;
1692 return pskb_may_pull(skb, hlen) ? skb->data + offset : NULL; 1693 return pskb_may_pull(skb, hlen) ? skb->data + offset : NULL;
1693 } 1694 }
1694 1695
1695 static inline void *skb_gro_mac_header(struct sk_buff *skb) 1696 static inline void *skb_gro_mac_header(struct sk_buff *skb)
1696 { 1697 {
1697 return NAPI_GRO_CB(skb)->frag0 ?: skb_mac_header(skb); 1698 return NAPI_GRO_CB(skb)->frag0 ?: skb_mac_header(skb);
1698 } 1699 }
1699 1700
1700 static inline void *skb_gro_network_header(struct sk_buff *skb) 1701 static inline void *skb_gro_network_header(struct sk_buff *skb)
1701 { 1702 {
1702 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) + 1703 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
1703 skb_network_offset(skb); 1704 skb_network_offset(skb);
1704 } 1705 }
1705 1706
1706 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, 1707 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
1707 unsigned short type, 1708 unsigned short type,
1708 const void *daddr, const void *saddr, 1709 const void *daddr, const void *saddr,
1709 unsigned len) 1710 unsigned len)
1710 { 1711 {
1711 if (!dev->header_ops || !dev->header_ops->create) 1712 if (!dev->header_ops || !dev->header_ops->create)
1712 return 0; 1713 return 0;
1713 1714
1714 return dev->header_ops->create(skb, dev, type, daddr, saddr, len); 1715 return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
1715 } 1716 }
1716 1717
1717 static inline int dev_parse_header(const struct sk_buff *skb, 1718 static inline int dev_parse_header(const struct sk_buff *skb,
1718 unsigned char *haddr) 1719 unsigned char *haddr)
1719 { 1720 {
1720 const struct net_device *dev = skb->dev; 1721 const struct net_device *dev = skb->dev;
1721 1722
1722 if (!dev->header_ops || !dev->header_ops->parse) 1723 if (!dev->header_ops || !dev->header_ops->parse)
1723 return 0; 1724 return 0;
1724 return dev->header_ops->parse(skb, haddr); 1725 return dev->header_ops->parse(skb, haddr);
1725 } 1726 }
1726 1727
1727 typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len); 1728 typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
1728 extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf); 1729 extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);
1729 static inline int unregister_gifconf(unsigned int family) 1730 static inline int unregister_gifconf(unsigned int family)
1730 { 1731 {
1731 return register_gifconf(family, NULL); 1732 return register_gifconf(family, NULL);
1732 } 1733 }
1733 1734
1734 /* 1735 /*
1735 * Incoming packets are placed on per-cpu queues 1736 * Incoming packets are placed on per-cpu queues
1736 */ 1737 */
1737 struct softnet_data { 1738 struct softnet_data {
1738 struct Qdisc *output_queue; 1739 struct Qdisc *output_queue;
1739 struct Qdisc **output_queue_tailp; 1740 struct Qdisc **output_queue_tailp;
1740 struct list_head poll_list; 1741 struct list_head poll_list;
1741 struct sk_buff *completion_queue; 1742 struct sk_buff *completion_queue;
1742 struct sk_buff_head process_queue; 1743 struct sk_buff_head process_queue;
1743 1744
1744 /* stats */ 1745 /* stats */
1745 unsigned int processed; 1746 unsigned int processed;
1746 unsigned int time_squeeze; 1747 unsigned int time_squeeze;
1747 unsigned int cpu_collision; 1748 unsigned int cpu_collision;
1748 unsigned int received_rps; 1749 unsigned int received_rps;
1749 1750
1750 #ifdef CONFIG_RPS 1751 #ifdef CONFIG_RPS
1751 struct softnet_data *rps_ipi_list; 1752 struct softnet_data *rps_ipi_list;
1752 1753
1753 /* Elements below can be accessed between CPUs for RPS */ 1754 /* Elements below can be accessed between CPUs for RPS */
1754 struct call_single_data csd ____cacheline_aligned_in_smp; 1755 struct call_single_data csd ____cacheline_aligned_in_smp;
1755 struct softnet_data *rps_ipi_next; 1756 struct softnet_data *rps_ipi_next;
1756 unsigned int cpu; 1757 unsigned int cpu;
1757 unsigned int input_queue_head; 1758 unsigned int input_queue_head;
1758 unsigned int input_queue_tail; 1759 unsigned int input_queue_tail;
1759 #endif 1760 #endif
1760 unsigned dropped; 1761 unsigned dropped;
1761 struct sk_buff_head input_pkt_queue; 1762 struct sk_buff_head input_pkt_queue;
1762 struct napi_struct backlog; 1763 struct napi_struct backlog;
1763 }; 1764 };
1764 1765
1765 static inline void input_queue_head_incr(struct softnet_data *sd) 1766 static inline void input_queue_head_incr(struct softnet_data *sd)
1766 { 1767 {
1767 #ifdef CONFIG_RPS 1768 #ifdef CONFIG_RPS
1768 sd->input_queue_head++; 1769 sd->input_queue_head++;
1769 #endif 1770 #endif
1770 } 1771 }
1771 1772
1772 static inline void input_queue_tail_incr_save(struct softnet_data *sd, 1773 static inline void input_queue_tail_incr_save(struct softnet_data *sd,
1773 unsigned int *qtail) 1774 unsigned int *qtail)
1774 { 1775 {
1775 #ifdef CONFIG_RPS 1776 #ifdef CONFIG_RPS
1776 *qtail = ++sd->input_queue_tail; 1777 *qtail = ++sd->input_queue_tail;
1777 #endif 1778 #endif
1778 } 1779 }
1779 1780
1780 DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); 1781 DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
1781 1782
1782 #define HAVE_NETIF_QUEUE 1783 #define HAVE_NETIF_QUEUE
1783 1784
1784 extern void __netif_schedule(struct Qdisc *q); 1785 extern void __netif_schedule(struct Qdisc *q);
1785 1786
1786 static inline void netif_schedule_queue(struct netdev_queue *txq) 1787 static inline void netif_schedule_queue(struct netdev_queue *txq)
1787 { 1788 {
1788 if (!test_bit(__QUEUE_STATE_XOFF, &txq->state)) 1789 if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
1789 __netif_schedule(txq->qdisc); 1790 __netif_schedule(txq->qdisc);
1790 } 1791 }
1791 1792
1792 static inline void netif_tx_schedule_all(struct net_device *dev) 1793 static inline void netif_tx_schedule_all(struct net_device *dev)
1793 { 1794 {
1794 unsigned int i; 1795 unsigned int i;
1795 1796
1796 for (i = 0; i < dev->num_tx_queues; i++) 1797 for (i = 0; i < dev->num_tx_queues; i++)
1797 netif_schedule_queue(netdev_get_tx_queue(dev, i)); 1798 netif_schedule_queue(netdev_get_tx_queue(dev, i));
1798 } 1799 }
1799 1800
1800 static inline void netif_tx_start_queue(struct netdev_queue *dev_queue) 1801 static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
1801 { 1802 {
1802 clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state); 1803 clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
1803 } 1804 }
1804 1805
1805 /** 1806 /**
1806 * netif_start_queue - allow transmit 1807 * netif_start_queue - allow transmit
1807 * @dev: network device 1808 * @dev: network device
1808 * 1809 *
1809 * Allow upper layers to call the device hard_start_xmit routine. 1810 * Allow upper layers to call the device hard_start_xmit routine.
1810 */ 1811 */
1811 static inline void netif_start_queue(struct net_device *dev) 1812 static inline void netif_start_queue(struct net_device *dev)
1812 { 1813 {
1813 netif_tx_start_queue(netdev_get_tx_queue(dev, 0)); 1814 netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
1814 } 1815 }
1815 1816
1816 static inline void netif_tx_start_all_queues(struct net_device *dev) 1817 static inline void netif_tx_start_all_queues(struct net_device *dev)
1817 { 1818 {
1818 unsigned int i; 1819 unsigned int i;
1819 1820
1820 for (i = 0; i < dev->num_tx_queues; i++) { 1821 for (i = 0; i < dev->num_tx_queues; i++) {
1821 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 1822 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1822 netif_tx_start_queue(txq); 1823 netif_tx_start_queue(txq);
1823 } 1824 }
1824 } 1825 }
1825 1826
1826 static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue) 1827 static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
1827 { 1828 {
1828 #ifdef CONFIG_NETPOLL_TRAP 1829 #ifdef CONFIG_NETPOLL_TRAP
1829 if (netpoll_trap()) { 1830 if (netpoll_trap()) {
1830 netif_tx_start_queue(dev_queue); 1831 netif_tx_start_queue(dev_queue);
1831 return; 1832 return;
1832 } 1833 }
1833 #endif 1834 #endif
1834 if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state)) 1835 if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state))
1835 __netif_schedule(dev_queue->qdisc); 1836 __netif_schedule(dev_queue->qdisc);
1836 } 1837 }
1837 1838
1838 /** 1839 /**
1839 * netif_wake_queue - restart transmit 1840 * netif_wake_queue - restart transmit
1840 * @dev: network device 1841 * @dev: network device
1841 * 1842 *
1842 * Allow upper layers to call the device hard_start_xmit routine. 1843 * Allow upper layers to call the device hard_start_xmit routine.
1843 * Used for flow control when transmit resources are available. 1844 * Used for flow control when transmit resources are available.
1844 */ 1845 */
1845 static inline void netif_wake_queue(struct net_device *dev) 1846 static inline void netif_wake_queue(struct net_device *dev)
1846 { 1847 {
1847 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0)); 1848 netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
1848 } 1849 }
1849 1850
1850 static inline void netif_tx_wake_all_queues(struct net_device *dev) 1851 static inline void netif_tx_wake_all_queues(struct net_device *dev)
1851 { 1852 {
1852 unsigned int i; 1853 unsigned int i;
1853 1854
1854 for (i = 0; i < dev->num_tx_queues; i++) { 1855 for (i = 0; i < dev->num_tx_queues; i++) {
1855 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 1856 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1856 netif_tx_wake_queue(txq); 1857 netif_tx_wake_queue(txq);
1857 } 1858 }
1858 } 1859 }
1859 1860
1860 static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue) 1861 static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
1861 { 1862 {
1862 if (WARN_ON(!dev_queue)) { 1863 if (WARN_ON(!dev_queue)) {
1863 pr_info("netif_stop_queue() cannot be called before register_netdev()\n"); 1864 pr_info("netif_stop_queue() cannot be called before register_netdev()\n");
1864 return; 1865 return;
1865 } 1866 }
1866 set_bit(__QUEUE_STATE_XOFF, &dev_queue->state); 1867 set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
1867 } 1868 }
1868 1869
1869 /** 1870 /**
1870 * netif_stop_queue - stop transmitted packets 1871 * netif_stop_queue - stop transmitted packets
1871 * @dev: network device 1872 * @dev: network device
1872 * 1873 *
1873 * Stop upper layers calling the device hard_start_xmit routine. 1874 * Stop upper layers calling the device hard_start_xmit routine.
1874 * Used for flow control when transmit resources are unavailable. 1875 * Used for flow control when transmit resources are unavailable.
1875 */ 1876 */
1876 static inline void netif_stop_queue(struct net_device *dev) 1877 static inline void netif_stop_queue(struct net_device *dev)
1877 { 1878 {
1878 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0)); 1879 netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
1879 } 1880 }
1880 1881
1881 static inline void netif_tx_stop_all_queues(struct net_device *dev) 1882 static inline void netif_tx_stop_all_queues(struct net_device *dev)
1882 { 1883 {
1883 unsigned int i; 1884 unsigned int i;
1884 1885
1885 for (i = 0; i < dev->num_tx_queues; i++) { 1886 for (i = 0; i < dev->num_tx_queues; i++) {
1886 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 1887 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
1887 netif_tx_stop_queue(txq); 1888 netif_tx_stop_queue(txq);
1888 } 1889 }
1889 } 1890 }
1890 1891
1891 static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue) 1892 static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
1892 { 1893 {
1893 return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state); 1894 return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
1894 } 1895 }
1895 1896
1896 /** 1897 /**
1897 * netif_queue_stopped - test if transmit queue is flowblocked 1898 * netif_queue_stopped - test if transmit queue is flowblocked
1898 * @dev: network device 1899 * @dev: network device
1899 * 1900 *
1900 * Test if transmit queue on device is currently unable to send. 1901 * Test if transmit queue on device is currently unable to send.
1901 */ 1902 */
1902 static inline int netif_queue_stopped(const struct net_device *dev) 1903 static inline int netif_queue_stopped(const struct net_device *dev)
1903 { 1904 {
1904 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0)); 1905 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
1905 } 1906 }
1906 1907
1907 static inline int netif_tx_queue_frozen_or_stopped(const struct netdev_queue *dev_queue) 1908 static inline int netif_tx_queue_frozen_or_stopped(const struct netdev_queue *dev_queue)
1908 { 1909 {
1909 return dev_queue->state & QUEUE_STATE_XOFF_OR_FROZEN; 1910 return dev_queue->state & QUEUE_STATE_XOFF_OR_FROZEN;
1910 } 1911 }
1911 1912
1912 /** 1913 /**
1913 * netif_running - test if up 1914 * netif_running - test if up
1914 * @dev: network device 1915 * @dev: network device
1915 * 1916 *
1916 * Test if the device has been brought up. 1917 * Test if the device has been brought up.
1917 */ 1918 */
1918 static inline int netif_running(const struct net_device *dev) 1919 static inline int netif_running(const struct net_device *dev)
1919 { 1920 {
1920 return test_bit(__LINK_STATE_START, &dev->state); 1921 return test_bit(__LINK_STATE_START, &dev->state);
1921 } 1922 }
1922 1923
1923 /* 1924 /*
1924 * Routines to manage the subqueues on a device. We only need start 1925 * Routines to manage the subqueues on a device. We only need start
1925 * stop, and a check if it's stopped. All other device management is 1926 * stop, and a check if it's stopped. All other device management is
1926 * done at the overall netdevice level. 1927 * done at the overall netdevice level.
1927 * Also test the device if we're multiqueue. 1928 * Also test the device if we're multiqueue.
1928 */ 1929 */
1929 1930
1930 /** 1931 /**
1931 * netif_start_subqueue - allow sending packets on subqueue 1932 * netif_start_subqueue - allow sending packets on subqueue
1932 * @dev: network device 1933 * @dev: network device
1933 * @queue_index: sub queue index 1934 * @queue_index: sub queue index
1934 * 1935 *
1935 * Start individual transmit queue of a device with multiple transmit queues. 1936 * Start individual transmit queue of a device with multiple transmit queues.
1936 */ 1937 */
1937 static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index) 1938 static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
1938 { 1939 {
1939 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 1940 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
1940 1941
1941 netif_tx_start_queue(txq); 1942 netif_tx_start_queue(txq);
1942 } 1943 }
1943 1944
1944 /** 1945 /**
1945 * netif_stop_subqueue - stop sending packets on subqueue 1946 * netif_stop_subqueue - stop sending packets on subqueue
1946 * @dev: network device 1947 * @dev: network device
1947 * @queue_index: sub queue index 1948 * @queue_index: sub queue index
1948 * 1949 *
1949 * Stop individual transmit queue of a device with multiple transmit queues. 1950 * Stop individual transmit queue of a device with multiple transmit queues.
1950 */ 1951 */
1951 static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) 1952 static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
1952 { 1953 {
1953 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 1954 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
1954 #ifdef CONFIG_NETPOLL_TRAP 1955 #ifdef CONFIG_NETPOLL_TRAP
1955 if (netpoll_trap()) 1956 if (netpoll_trap())
1956 return; 1957 return;
1957 #endif 1958 #endif
1958 netif_tx_stop_queue(txq); 1959 netif_tx_stop_queue(txq);
1959 } 1960 }
1960 1961
1961 /** 1962 /**
1962 * netif_subqueue_stopped - test status of subqueue 1963 * netif_subqueue_stopped - test status of subqueue
1963 * @dev: network device 1964 * @dev: network device
1964 * @queue_index: sub queue index 1965 * @queue_index: sub queue index
1965 * 1966 *
1966 * Check individual transmit queue of a device with multiple transmit queues. 1967 * Check individual transmit queue of a device with multiple transmit queues.
1967 */ 1968 */
1968 static inline int __netif_subqueue_stopped(const struct net_device *dev, 1969 static inline int __netif_subqueue_stopped(const struct net_device *dev,
1969 u16 queue_index) 1970 u16 queue_index)
1970 { 1971 {
1971 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 1972 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
1972 1973
1973 return netif_tx_queue_stopped(txq); 1974 return netif_tx_queue_stopped(txq);
1974 } 1975 }
1975 1976
1976 static inline int netif_subqueue_stopped(const struct net_device *dev, 1977 static inline int netif_subqueue_stopped(const struct net_device *dev,
1977 struct sk_buff *skb) 1978 struct sk_buff *skb)
1978 { 1979 {
1979 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb)); 1980 return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
1980 } 1981 }
1981 1982
1982 /** 1983 /**
1983 * netif_wake_subqueue - allow sending packets on subqueue 1984 * netif_wake_subqueue - allow sending packets on subqueue
1984 * @dev: network device 1985 * @dev: network device
1985 * @queue_index: sub queue index 1986 * @queue_index: sub queue index
1986 * 1987 *
1987 * Resume individual transmit queue of a device with multiple transmit queues. 1988 * Resume individual transmit queue of a device with multiple transmit queues.
1988 */ 1989 */
1989 static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) 1990 static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
1990 { 1991 {
1991 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 1992 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
1992 #ifdef CONFIG_NETPOLL_TRAP 1993 #ifdef CONFIG_NETPOLL_TRAP
1993 if (netpoll_trap()) 1994 if (netpoll_trap())
1994 return; 1995 return;
1995 #endif 1996 #endif
1996 if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state)) 1997 if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state))
1997 __netif_schedule(txq->qdisc); 1998 __netif_schedule(txq->qdisc);
1998 } 1999 }
1999 2000
2000 /* 2001 /*
2001 * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used 2002 * Returns a Tx hash for the given packet when dev->real_num_tx_queues is used
2002 * as a distribution range limit for the returned value. 2003 * as a distribution range limit for the returned value.
2003 */ 2004 */
2004 static inline u16 skb_tx_hash(const struct net_device *dev, 2005 static inline u16 skb_tx_hash(const struct net_device *dev,
2005 const struct sk_buff *skb) 2006 const struct sk_buff *skb)
2006 { 2007 {
2007 return __skb_tx_hash(dev, skb, dev->real_num_tx_queues); 2008 return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
2008 } 2009 }
2009 2010
2010 /** 2011 /**
2011 * netif_is_multiqueue - test if device has multiple transmit queues 2012 * netif_is_multiqueue - test if device has multiple transmit queues
2012 * @dev: network device 2013 * @dev: network device
2013 * 2014 *
2014 * Check if device has multiple transmit queues 2015 * Check if device has multiple transmit queues
2015 */ 2016 */
2016 static inline int netif_is_multiqueue(const struct net_device *dev) 2017 static inline int netif_is_multiqueue(const struct net_device *dev)
2017 { 2018 {
2018 return dev->num_tx_queues > 1; 2019 return dev->num_tx_queues > 1;
2019 } 2020 }
2020 2021
2021 extern int netif_set_real_num_tx_queues(struct net_device *dev, 2022 extern int netif_set_real_num_tx_queues(struct net_device *dev,
2022 unsigned int txq); 2023 unsigned int txq);
2023 2024
2024 #ifdef CONFIG_RPS 2025 #ifdef CONFIG_RPS
2025 extern int netif_set_real_num_rx_queues(struct net_device *dev, 2026 extern int netif_set_real_num_rx_queues(struct net_device *dev,
2026 unsigned int rxq); 2027 unsigned int rxq);
2027 #else 2028 #else
2028 static inline int netif_set_real_num_rx_queues(struct net_device *dev, 2029 static inline int netif_set_real_num_rx_queues(struct net_device *dev,
2029 unsigned int rxq) 2030 unsigned int rxq)
2030 { 2031 {
2031 return 0; 2032 return 0;
2032 } 2033 }
2033 #endif 2034 #endif
2034 2035
2035 static inline int netif_copy_real_num_queues(struct net_device *to_dev, 2036 static inline int netif_copy_real_num_queues(struct net_device *to_dev,
2036 const struct net_device *from_dev) 2037 const struct net_device *from_dev)
2037 { 2038 {
2038 netif_set_real_num_tx_queues(to_dev, from_dev->real_num_tx_queues); 2039 netif_set_real_num_tx_queues(to_dev, from_dev->real_num_tx_queues);
2039 #ifdef CONFIG_RPS 2040 #ifdef CONFIG_RPS
2040 return netif_set_real_num_rx_queues(to_dev, 2041 return netif_set_real_num_rx_queues(to_dev,
2041 from_dev->real_num_rx_queues); 2042 from_dev->real_num_rx_queues);
2042 #else 2043 #else
2043 return 0; 2044 return 0;
2044 #endif 2045 #endif
2045 } 2046 }
2046 2047
2047 /* Use this variant when it is known for sure that it 2048 /* Use this variant when it is known for sure that it
2048 * is executing from hardware interrupt context or with hardware interrupts 2049 * is executing from hardware interrupt context or with hardware interrupts
2049 * disabled. 2050 * disabled.
2050 */ 2051 */
2051 extern void dev_kfree_skb_irq(struct sk_buff *skb); 2052 extern void dev_kfree_skb_irq(struct sk_buff *skb);
2052 2053
2053 /* Use this variant in places where it could be invoked 2054 /* Use this variant in places where it could be invoked
2054 * from either hardware interrupt or other context, with hardware interrupts 2055 * from either hardware interrupt or other context, with hardware interrupts
2055 * either disabled or enabled. 2056 * either disabled or enabled.
2056 */ 2057 */
2057 extern void dev_kfree_skb_any(struct sk_buff *skb); 2058 extern void dev_kfree_skb_any(struct sk_buff *skb);
2058 2059
2059 #define HAVE_NETIF_RX 1 2060 #define HAVE_NETIF_RX 1
2060 extern int netif_rx(struct sk_buff *skb); 2061 extern int netif_rx(struct sk_buff *skb);
2061 extern int netif_rx_ni(struct sk_buff *skb); 2062 extern int netif_rx_ni(struct sk_buff *skb);
2062 #define HAVE_NETIF_RECEIVE_SKB 1 2063 #define HAVE_NETIF_RECEIVE_SKB 1
2063 extern int netif_receive_skb(struct sk_buff *skb); 2064 extern int netif_receive_skb(struct sk_buff *skb);
2064 extern gro_result_t dev_gro_receive(struct napi_struct *napi, 2065 extern gro_result_t dev_gro_receive(struct napi_struct *napi,
2065 struct sk_buff *skb); 2066 struct sk_buff *skb);
2066 extern gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb); 2067 extern gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb);
2067 extern gro_result_t napi_gro_receive(struct napi_struct *napi, 2068 extern gro_result_t napi_gro_receive(struct napi_struct *napi,
2068 struct sk_buff *skb); 2069 struct sk_buff *skb);
2069 extern void napi_gro_flush(struct napi_struct *napi); 2070 extern void napi_gro_flush(struct napi_struct *napi);
2070 extern struct sk_buff * napi_get_frags(struct napi_struct *napi); 2071 extern struct sk_buff * napi_get_frags(struct napi_struct *napi);
2071 extern gro_result_t napi_frags_finish(struct napi_struct *napi, 2072 extern gro_result_t napi_frags_finish(struct napi_struct *napi,
2072 struct sk_buff *skb, 2073 struct sk_buff *skb,
2073 gro_result_t ret); 2074 gro_result_t ret);
2074 extern struct sk_buff * napi_frags_skb(struct napi_struct *napi); 2075 extern struct sk_buff * napi_frags_skb(struct napi_struct *napi);
2075 extern gro_result_t napi_gro_frags(struct napi_struct *napi); 2076 extern gro_result_t napi_gro_frags(struct napi_struct *napi);
2076 2077
2077 static inline void napi_free_frags(struct napi_struct *napi) 2078 static inline void napi_free_frags(struct napi_struct *napi)
2078 { 2079 {
2079 kfree_skb(napi->skb); 2080 kfree_skb(napi->skb);
2080 napi->skb = NULL; 2081 napi->skb = NULL;
2081 } 2082 }
2082 2083
2083 extern int netdev_rx_handler_register(struct net_device *dev, 2084 extern int netdev_rx_handler_register(struct net_device *dev,
2084 rx_handler_func_t *rx_handler, 2085 rx_handler_func_t *rx_handler,
2085 void *rx_handler_data); 2086 void *rx_handler_data);
2086 extern void netdev_rx_handler_unregister(struct net_device *dev); 2087 extern void netdev_rx_handler_unregister(struct net_device *dev);
2087 2088
2088 extern int dev_valid_name(const char *name); 2089 extern int dev_valid_name(const char *name);
2089 extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *); 2090 extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
2090 extern int dev_ethtool(struct net *net, struct ifreq *); 2091 extern int dev_ethtool(struct net *net, struct ifreq *);
2091 extern unsigned dev_get_flags(const struct net_device *); 2092 extern unsigned dev_get_flags(const struct net_device *);
2092 extern int __dev_change_flags(struct net_device *, unsigned int flags); 2093 extern int __dev_change_flags(struct net_device *, unsigned int flags);
2093 extern int dev_change_flags(struct net_device *, unsigned); 2094 extern int dev_change_flags(struct net_device *, unsigned);
2094 extern void __dev_notify_flags(struct net_device *, unsigned int old_flags); 2095 extern void __dev_notify_flags(struct net_device *, unsigned int old_flags);
2095 extern int dev_change_name(struct net_device *, const char *); 2096 extern int dev_change_name(struct net_device *, const char *);
2096 extern int dev_set_alias(struct net_device *, const char *, size_t); 2097 extern int dev_set_alias(struct net_device *, const char *, size_t);
2097 extern int dev_change_net_namespace(struct net_device *, 2098 extern int dev_change_net_namespace(struct net_device *,
2098 struct net *, const char *); 2099 struct net *, const char *);
2099 extern int dev_set_mtu(struct net_device *, int); 2100 extern int dev_set_mtu(struct net_device *, int);
2100 extern void dev_set_group(struct net_device *, int); 2101 extern void dev_set_group(struct net_device *, int);
2101 extern int dev_set_mac_address(struct net_device *, 2102 extern int dev_set_mac_address(struct net_device *,
2102 struct sockaddr *); 2103 struct sockaddr *);
2103 extern int dev_hard_start_xmit(struct sk_buff *skb, 2104 extern int dev_hard_start_xmit(struct sk_buff *skb,
2104 struct net_device *dev, 2105 struct net_device *dev,
2105 struct netdev_queue *txq); 2106 struct netdev_queue *txq);
2106 extern int dev_forward_skb(struct net_device *dev, 2107 extern int dev_forward_skb(struct net_device *dev,
2107 struct sk_buff *skb); 2108 struct sk_buff *skb);
2108 2109
2109 extern int netdev_budget; 2110 extern int netdev_budget;
2110 2111
2111 /* Called by rtnetlink.c:rtnl_unlock() */ 2112 /* Called by rtnetlink.c:rtnl_unlock() */
2112 extern void netdev_run_todo(void); 2113 extern void netdev_run_todo(void);
2113 2114
2114 /** 2115 /**
2115 * dev_put - release reference to device 2116 * dev_put - release reference to device
2116 * @dev: network device 2117 * @dev: network device
2117 * 2118 *
2118 * Release reference to device to allow it to be freed. 2119 * Release reference to device to allow it to be freed.
2119 */ 2120 */
2120 static inline void dev_put(struct net_device *dev) 2121 static inline void dev_put(struct net_device *dev)
2121 { 2122 {
2122 irqsafe_cpu_dec(*dev->pcpu_refcnt); 2123 irqsafe_cpu_dec(*dev->pcpu_refcnt);
2123 } 2124 }
2124 2125
2125 /** 2126 /**
2126 * dev_hold - get reference to device 2127 * dev_hold - get reference to device
2127 * @dev: network device 2128 * @dev: network device
2128 * 2129 *
2129 * Hold reference to device to keep it from being freed. 2130 * Hold reference to device to keep it from being freed.
2130 */ 2131 */
2131 static inline void dev_hold(struct net_device *dev) 2132 static inline void dev_hold(struct net_device *dev)
2132 { 2133 {
2133 irqsafe_cpu_inc(*dev->pcpu_refcnt); 2134 irqsafe_cpu_inc(*dev->pcpu_refcnt);
2134 } 2135 }
2135 2136
2136 /* Carrier loss detection, dial on demand. The functions netif_carrier_on 2137 /* Carrier loss detection, dial on demand. The functions netif_carrier_on
2137 * and _off may be called from IRQ context, but it is caller 2138 * and _off may be called from IRQ context, but it is caller
2138 * who is responsible for serialization of these calls. 2139 * who is responsible for serialization of these calls.
2139 * 2140 *
2140 * The name carrier is inappropriate, these functions should really be 2141 * The name carrier is inappropriate, these functions should really be
2141 * called netif_lowerlayer_*() because they represent the state of any 2142 * called netif_lowerlayer_*() because they represent the state of any
2142 * kind of lower layer not just hardware media. 2143 * kind of lower layer not just hardware media.
2143 */ 2144 */
2144 2145
2145 extern void linkwatch_fire_event(struct net_device *dev); 2146 extern void linkwatch_fire_event(struct net_device *dev);
2146 extern void linkwatch_forget_dev(struct net_device *dev); 2147 extern void linkwatch_forget_dev(struct net_device *dev);
2147 2148
2148 /** 2149 /**
2149 * netif_carrier_ok - test if carrier present 2150 * netif_carrier_ok - test if carrier present
2150 * @dev: network device 2151 * @dev: network device
2151 * 2152 *
2152 * Check if carrier is present on device 2153 * Check if carrier is present on device
2153 */ 2154 */
2154 static inline int netif_carrier_ok(const struct net_device *dev) 2155 static inline int netif_carrier_ok(const struct net_device *dev)
2155 { 2156 {
2156 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); 2157 return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
2157 } 2158 }
2158 2159
2159 extern unsigned long dev_trans_start(struct net_device *dev); 2160 extern unsigned long dev_trans_start(struct net_device *dev);
2160 2161
2161 extern void __netdev_watchdog_up(struct net_device *dev); 2162 extern void __netdev_watchdog_up(struct net_device *dev);
2162 2163
2163 extern void netif_carrier_on(struct net_device *dev); 2164 extern void netif_carrier_on(struct net_device *dev);
2164 2165
2165 extern void netif_carrier_off(struct net_device *dev); 2166 extern void netif_carrier_off(struct net_device *dev);
2166 2167
2167 extern void netif_notify_peers(struct net_device *dev); 2168 extern void netif_notify_peers(struct net_device *dev);
2168 2169
2169 /** 2170 /**
2170 * netif_dormant_on - mark device as dormant. 2171 * netif_dormant_on - mark device as dormant.
2171 * @dev: network device 2172 * @dev: network device
2172 * 2173 *
2173 * Mark device as dormant (as per RFC2863). 2174 * Mark device as dormant (as per RFC2863).
2174 * 2175 *
2175 * The dormant state indicates that the relevant interface is not 2176 * The dormant state indicates that the relevant interface is not
2176 * actually in a condition to pass packets (i.e., it is not 'up') but is 2177 * actually in a condition to pass packets (i.e., it is not 'up') but is
2177 * in a "pending" state, waiting for some external event. For "on- 2178 * in a "pending" state, waiting for some external event. For "on-
2178 * demand" interfaces, this new state identifies the situation where the 2179 * demand" interfaces, this new state identifies the situation where the
2179 * interface is waiting for events to place it in the up state. 2180 * interface is waiting for events to place it in the up state.
2180 * 2181 *
2181 */ 2182 */
2182 static inline void netif_dormant_on(struct net_device *dev) 2183 static inline void netif_dormant_on(struct net_device *dev)
2183 { 2184 {
2184 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state)) 2185 if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
2185 linkwatch_fire_event(dev); 2186 linkwatch_fire_event(dev);
2186 } 2187 }
2187 2188
2188 /** 2189 /**
2189 * netif_dormant_off - set device as not dormant. 2190 * netif_dormant_off - set device as not dormant.
2190 * @dev: network device 2191 * @dev: network device
2191 * 2192 *
2192 * Device is not in dormant state. 2193 * Device is not in dormant state.
2193 */ 2194 */
2194 static inline void netif_dormant_off(struct net_device *dev) 2195 static inline void netif_dormant_off(struct net_device *dev)
2195 { 2196 {
2196 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state)) 2197 if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
2197 linkwatch_fire_event(dev); 2198 linkwatch_fire_event(dev);
2198 } 2199 }
2199 2200
2200 /** 2201 /**
2201 * netif_dormant - test if carrier present 2202 * netif_dormant - test if carrier present
2202 * @dev: network device 2203 * @dev: network device
2203 * 2204 *
2204 * Check if carrier is present on device 2205 * Check if carrier is present on device
2205 */ 2206 */
2206 static inline int netif_dormant(const struct net_device *dev) 2207 static inline int netif_dormant(const struct net_device *dev)
2207 { 2208 {
2208 return test_bit(__LINK_STATE_DORMANT, &dev->state); 2209 return test_bit(__LINK_STATE_DORMANT, &dev->state);
2209 } 2210 }
2210 2211
2211 2212
2212 /** 2213 /**
2213 * netif_oper_up - test if device is operational 2214 * netif_oper_up - test if device is operational
2214 * @dev: network device 2215 * @dev: network device
2215 * 2216 *
2216 * Check if carrier is operational 2217 * Check if carrier is operational
2217 */ 2218 */
2218 static inline int netif_oper_up(const struct net_device *dev) 2219 static inline int netif_oper_up(const struct net_device *dev)
2219 { 2220 {
2220 return (dev->operstate == IF_OPER_UP || 2221 return (dev->operstate == IF_OPER_UP ||
2221 dev->operstate == IF_OPER_UNKNOWN /* backward compat */); 2222 dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
2222 } 2223 }
2223 2224
2224 /** 2225 /**
2225 * netif_device_present - is device available or removed 2226 * netif_device_present - is device available or removed
2226 * @dev: network device 2227 * @dev: network device
2227 * 2228 *
2228 * Check if device has not been removed from system. 2229 * Check if device has not been removed from system.
2229 */ 2230 */
2230 static inline int netif_device_present(struct net_device *dev) 2231 static inline int netif_device_present(struct net_device *dev)
2231 { 2232 {
2232 return test_bit(__LINK_STATE_PRESENT, &dev->state); 2233 return test_bit(__LINK_STATE_PRESENT, &dev->state);
2233 } 2234 }
2234 2235
2235 extern void netif_device_detach(struct net_device *dev); 2236 extern void netif_device_detach(struct net_device *dev);
2236 2237
2237 extern void netif_device_attach(struct net_device *dev); 2238 extern void netif_device_attach(struct net_device *dev);
2238 2239
2239 /* 2240 /*
2240 * Network interface message level settings 2241 * Network interface message level settings
2241 */ 2242 */
2242 #define HAVE_NETIF_MSG 1 2243 #define HAVE_NETIF_MSG 1
2243 2244
2244 enum { 2245 enum {
2245 NETIF_MSG_DRV = 0x0001, 2246 NETIF_MSG_DRV = 0x0001,
2246 NETIF_MSG_PROBE = 0x0002, 2247 NETIF_MSG_PROBE = 0x0002,
2247 NETIF_MSG_LINK = 0x0004, 2248 NETIF_MSG_LINK = 0x0004,
2248 NETIF_MSG_TIMER = 0x0008, 2249 NETIF_MSG_TIMER = 0x0008,
2249 NETIF_MSG_IFDOWN = 0x0010, 2250 NETIF_MSG_IFDOWN = 0x0010,
2250 NETIF_MSG_IFUP = 0x0020, 2251 NETIF_MSG_IFUP = 0x0020,
2251 NETIF_MSG_RX_ERR = 0x0040, 2252 NETIF_MSG_RX_ERR = 0x0040,
2252 NETIF_MSG_TX_ERR = 0x0080, 2253 NETIF_MSG_TX_ERR = 0x0080,
2253 NETIF_MSG_TX_QUEUED = 0x0100, 2254 NETIF_MSG_TX_QUEUED = 0x0100,
2254 NETIF_MSG_INTR = 0x0200, 2255 NETIF_MSG_INTR = 0x0200,
2255 NETIF_MSG_TX_DONE = 0x0400, 2256 NETIF_MSG_TX_DONE = 0x0400,
2256 NETIF_MSG_RX_STATUS = 0x0800, 2257 NETIF_MSG_RX_STATUS = 0x0800,
2257 NETIF_MSG_PKTDATA = 0x1000, 2258 NETIF_MSG_PKTDATA = 0x1000,
2258 NETIF_MSG_HW = 0x2000, 2259 NETIF_MSG_HW = 0x2000,
2259 NETIF_MSG_WOL = 0x4000, 2260 NETIF_MSG_WOL = 0x4000,
2260 }; 2261 };
2261 2262
2262 #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) 2263 #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
2263 #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) 2264 #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
2264 #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) 2265 #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
2265 #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) 2266 #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
2266 #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) 2267 #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
2267 #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) 2268 #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
2268 #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) 2269 #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
2269 #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) 2270 #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
2270 #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) 2271 #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
2271 #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) 2272 #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
2272 #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) 2273 #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
2273 #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) 2274 #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
2274 #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) 2275 #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
2275 #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) 2276 #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
2276 #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) 2277 #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
2277 2278
2278 static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) 2279 static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
2279 { 2280 {
2280 /* use default */ 2281 /* use default */
2281 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) 2282 if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
2282 return default_msg_enable_bits; 2283 return default_msg_enable_bits;
2283 if (debug_value == 0) /* no output */ 2284 if (debug_value == 0) /* no output */
2284 return 0; 2285 return 0;
2285 /* set low N bits */ 2286 /* set low N bits */
2286 return (1 << debug_value) - 1; 2287 return (1 << debug_value) - 1;
2287 } 2288 }
2288 2289
2289 static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) 2290 static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
2290 { 2291 {
2291 spin_lock(&txq->_xmit_lock); 2292 spin_lock(&txq->_xmit_lock);
2292 txq->xmit_lock_owner = cpu; 2293 txq->xmit_lock_owner = cpu;
2293 } 2294 }
2294 2295
2295 static inline void __netif_tx_lock_bh(struct netdev_queue *txq) 2296 static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
2296 { 2297 {
2297 spin_lock_bh(&txq->_xmit_lock); 2298 spin_lock_bh(&txq->_xmit_lock);
2298 txq->xmit_lock_owner = smp_processor_id(); 2299 txq->xmit_lock_owner = smp_processor_id();
2299 } 2300 }
2300 2301
2301 static inline int __netif_tx_trylock(struct netdev_queue *txq) 2302 static inline int __netif_tx_trylock(struct netdev_queue *txq)
2302 { 2303 {
2303 int ok = spin_trylock(&txq->_xmit_lock); 2304 int ok = spin_trylock(&txq->_xmit_lock);
2304 if (likely(ok)) 2305 if (likely(ok))
2305 txq->xmit_lock_owner = smp_processor_id(); 2306 txq->xmit_lock_owner = smp_processor_id();
2306 return ok; 2307 return ok;
2307 } 2308 }
2308 2309
2309 static inline void __netif_tx_unlock(struct netdev_queue *txq) 2310 static inline void __netif_tx_unlock(struct netdev_queue *txq)
2310 { 2311 {
2311 txq->xmit_lock_owner = -1; 2312 txq->xmit_lock_owner = -1;
2312 spin_unlock(&txq->_xmit_lock); 2313 spin_unlock(&txq->_xmit_lock);
2313 } 2314 }
2314 2315
2315 static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) 2316 static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
2316 { 2317 {
2317 txq->xmit_lock_owner = -1; 2318 txq->xmit_lock_owner = -1;
2318 spin_unlock_bh(&txq->_xmit_lock); 2319 spin_unlock_bh(&txq->_xmit_lock);
2319 } 2320 }
2320 2321
2321 static inline void txq_trans_update(struct netdev_queue *txq) 2322 static inline void txq_trans_update(struct netdev_queue *txq)
2322 { 2323 {
2323 if (txq->xmit_lock_owner != -1) 2324 if (txq->xmit_lock_owner != -1)
2324 txq->trans_start = jiffies; 2325 txq->trans_start = jiffies;
2325 } 2326 }
2326 2327
2327 /** 2328 /**
2328 * netif_tx_lock - grab network device transmit lock 2329 * netif_tx_lock - grab network device transmit lock
2329 * @dev: network device 2330 * @dev: network device
2330 * 2331 *
2331 * Get network device transmit lock 2332 * Get network device transmit lock
2332 */ 2333 */
2333 static inline void netif_tx_lock(struct net_device *dev) 2334 static inline void netif_tx_lock(struct net_device *dev)
2334 { 2335 {
2335 unsigned int i; 2336 unsigned int i;
2336 int cpu; 2337 int cpu;
2337 2338
2338 spin_lock(&dev->tx_global_lock); 2339 spin_lock(&dev->tx_global_lock);
2339 cpu = smp_processor_id(); 2340 cpu = smp_processor_id();
2340 for (i = 0; i < dev->num_tx_queues; i++) { 2341 for (i = 0; i < dev->num_tx_queues; i++) {
2341 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 2342 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2342 2343
2343 /* We are the only thread of execution doing a 2344 /* We are the only thread of execution doing a
2344 * freeze, but we have to grab the _xmit_lock in 2345 * freeze, but we have to grab the _xmit_lock in
2345 * order to synchronize with threads which are in 2346 * order to synchronize with threads which are in
2346 * the ->hard_start_xmit() handler and already 2347 * the ->hard_start_xmit() handler and already
2347 * checked the frozen bit. 2348 * checked the frozen bit.
2348 */ 2349 */
2349 __netif_tx_lock(txq, cpu); 2350 __netif_tx_lock(txq, cpu);
2350 set_bit(__QUEUE_STATE_FROZEN, &txq->state); 2351 set_bit(__QUEUE_STATE_FROZEN, &txq->state);
2351 __netif_tx_unlock(txq); 2352 __netif_tx_unlock(txq);
2352 } 2353 }
2353 } 2354 }
2354 2355
2355 static inline void netif_tx_lock_bh(struct net_device *dev) 2356 static inline void netif_tx_lock_bh(struct net_device *dev)
2356 { 2357 {
2357 local_bh_disable(); 2358 local_bh_disable();
2358 netif_tx_lock(dev); 2359 netif_tx_lock(dev);
2359 } 2360 }
2360 2361
2361 static inline void netif_tx_unlock(struct net_device *dev) 2362 static inline void netif_tx_unlock(struct net_device *dev)
2362 { 2363 {
2363 unsigned int i; 2364 unsigned int i;
2364 2365
2365 for (i = 0; i < dev->num_tx_queues; i++) { 2366 for (i = 0; i < dev->num_tx_queues; i++) {
2366 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 2367 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2367 2368
2368 /* No need to grab the _xmit_lock here. If the 2369 /* No need to grab the _xmit_lock here. If the
2369 * queue is not stopped for another reason, we 2370 * queue is not stopped for another reason, we
2370 * force a schedule. 2371 * force a schedule.
2371 */ 2372 */
2372 clear_bit(__QUEUE_STATE_FROZEN, &txq->state); 2373 clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
2373 netif_schedule_queue(txq); 2374 netif_schedule_queue(txq);
2374 } 2375 }
2375 spin_unlock(&dev->tx_global_lock); 2376 spin_unlock(&dev->tx_global_lock);
2376 } 2377 }
2377 2378
2378 static inline void netif_tx_unlock_bh(struct net_device *dev) 2379 static inline void netif_tx_unlock_bh(struct net_device *dev)
2379 { 2380 {
2380 netif_tx_unlock(dev); 2381 netif_tx_unlock(dev);
2381 local_bh_enable(); 2382 local_bh_enable();
2382 } 2383 }
2383 2384
2384 #define HARD_TX_LOCK(dev, txq, cpu) { \ 2385 #define HARD_TX_LOCK(dev, txq, cpu) { \
2385 if ((dev->features & NETIF_F_LLTX) == 0) { \ 2386 if ((dev->features & NETIF_F_LLTX) == 0) { \
2386 __netif_tx_lock(txq, cpu); \ 2387 __netif_tx_lock(txq, cpu); \
2387 } \ 2388 } \
2388 } 2389 }
2389 2390
2390 #define HARD_TX_UNLOCK(dev, txq) { \ 2391 #define HARD_TX_UNLOCK(dev, txq) { \
2391 if ((dev->features & NETIF_F_LLTX) == 0) { \ 2392 if ((dev->features & NETIF_F_LLTX) == 0) { \
2392 __netif_tx_unlock(txq); \ 2393 __netif_tx_unlock(txq); \
2393 } \ 2394 } \
2394 } 2395 }
2395 2396
2396 static inline void netif_tx_disable(struct net_device *dev) 2397 static inline void netif_tx_disable(struct net_device *dev)
2397 { 2398 {
2398 unsigned int i; 2399 unsigned int i;
2399 int cpu; 2400 int cpu;
2400 2401
2401 local_bh_disable(); 2402 local_bh_disable();
2402 cpu = smp_processor_id(); 2403 cpu = smp_processor_id();
2403 for (i = 0; i < dev->num_tx_queues; i++) { 2404 for (i = 0; i < dev->num_tx_queues; i++) {
2404 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 2405 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
2405 2406
2406 __netif_tx_lock(txq, cpu); 2407 __netif_tx_lock(txq, cpu);
2407 netif_tx_stop_queue(txq); 2408 netif_tx_stop_queue(txq);
2408 __netif_tx_unlock(txq); 2409 __netif_tx_unlock(txq);
2409 } 2410 }
2410 local_bh_enable(); 2411 local_bh_enable();
2411 } 2412 }
2412 2413
2413 static inline void netif_addr_lock(struct net_device *dev) 2414 static inline void netif_addr_lock(struct net_device *dev)
2414 { 2415 {
2415 spin_lock(&dev->addr_list_lock); 2416 spin_lock(&dev->addr_list_lock);
2416 } 2417 }
2417 2418
2418 static inline void netif_addr_lock_bh(struct net_device *dev) 2419 static inline void netif_addr_lock_bh(struct net_device *dev)
2419 { 2420 {
2420 spin_lock_bh(&dev->addr_list_lock); 2421 spin_lock_bh(&dev->addr_list_lock);
2421 } 2422 }
2422 2423
2423 static inline void netif_addr_unlock(struct net_device *dev) 2424 static inline void netif_addr_unlock(struct net_device *dev)
2424 { 2425 {
2425 spin_unlock(&dev->addr_list_lock); 2426 spin_unlock(&dev->addr_list_lock);
2426 } 2427 }
2427 2428
2428 static inline void netif_addr_unlock_bh(struct net_device *dev) 2429 static inline void netif_addr_unlock_bh(struct net_device *dev)
2429 { 2430 {
2430 spin_unlock_bh(&dev->addr_list_lock); 2431 spin_unlock_bh(&dev->addr_list_lock);
2431 } 2432 }
2432 2433
2433 /* 2434 /*
2434 * dev_addrs walker. Should be used only for read access. Call with 2435 * dev_addrs walker. Should be used only for read access. Call with
2435 * rcu_read_lock held. 2436 * rcu_read_lock held.
2436 */ 2437 */
2437 #define for_each_dev_addr(dev, ha) \ 2438 #define for_each_dev_addr(dev, ha) \
2438 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list) 2439 list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
2439 2440
2440 /* These functions live elsewhere (drivers/net/net_init.c, but related) */ 2441 /* These functions live elsewhere (drivers/net/net_init.c, but related) */
2441 2442
2442 extern void ether_setup(struct net_device *dev); 2443 extern void ether_setup(struct net_device *dev);
2443 2444
2444 /* Support for loadable net-drivers */ 2445 /* Support for loadable net-drivers */
2445 extern struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, 2446 extern struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
2446 void (*setup)(struct net_device *), 2447 void (*setup)(struct net_device *),
2447 unsigned int txqs, unsigned int rxqs); 2448 unsigned int txqs, unsigned int rxqs);
2448 #define alloc_netdev(sizeof_priv, name, setup) \ 2449 #define alloc_netdev(sizeof_priv, name, setup) \
2449 alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1) 2450 alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
2450 2451
2451 #define alloc_netdev_mq(sizeof_priv, name, setup, count) \ 2452 #define alloc_netdev_mq(sizeof_priv, name, setup, count) \
2452 alloc_netdev_mqs(sizeof_priv, name, setup, count, count) 2453 alloc_netdev_mqs(sizeof_priv, name, setup, count, count)
2453 2454
2454 extern int register_netdev(struct net_device *dev); 2455 extern int register_netdev(struct net_device *dev);
2455 extern void unregister_netdev(struct net_device *dev); 2456 extern void unregister_netdev(struct net_device *dev);
2456 2457
2457 /* General hardware address lists handling functions */ 2458 /* General hardware address lists handling functions */
2458 extern int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list, 2459 extern int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
2459 struct netdev_hw_addr_list *from_list, 2460 struct netdev_hw_addr_list *from_list,
2460 int addr_len, unsigned char addr_type); 2461 int addr_len, unsigned char addr_type);
2461 extern void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list, 2462 extern void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
2462 struct netdev_hw_addr_list *from_list, 2463 struct netdev_hw_addr_list *from_list,
2463 int addr_len, unsigned char addr_type); 2464 int addr_len, unsigned char addr_type);
2464 extern int __hw_addr_sync(struct netdev_hw_addr_list *to_list, 2465 extern int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
2465 struct netdev_hw_addr_list *from_list, 2466 struct netdev_hw_addr_list *from_list,
2466 int addr_len); 2467 int addr_len);
2467 extern void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, 2468 extern void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
2468 struct netdev_hw_addr_list *from_list, 2469 struct netdev_hw_addr_list *from_list,
2469 int addr_len); 2470 int addr_len);
2470 extern void __hw_addr_flush(struct netdev_hw_addr_list *list); 2471 extern void __hw_addr_flush(struct netdev_hw_addr_list *list);
2471 extern void __hw_addr_init(struct netdev_hw_addr_list *list); 2472 extern void __hw_addr_init(struct netdev_hw_addr_list *list);
2472 2473
2473 /* Functions used for device addresses handling */ 2474 /* Functions used for device addresses handling */
2474 extern int dev_addr_add(struct net_device *dev, unsigned char *addr, 2475 extern int dev_addr_add(struct net_device *dev, unsigned char *addr,
2475 unsigned char addr_type); 2476 unsigned char addr_type);
2476 extern int dev_addr_del(struct net_device *dev, unsigned char *addr, 2477 extern int dev_addr_del(struct net_device *dev, unsigned char *addr,
2477 unsigned char addr_type); 2478 unsigned char addr_type);
2478 extern int dev_addr_add_multiple(struct net_device *to_dev, 2479 extern int dev_addr_add_multiple(struct net_device *to_dev,
2479 struct net_device *from_dev, 2480 struct net_device *from_dev,
2480 unsigned char addr_type); 2481 unsigned char addr_type);
2481 extern int dev_addr_del_multiple(struct net_device *to_dev, 2482 extern int dev_addr_del_multiple(struct net_device *to_dev,
2482 struct net_device *from_dev, 2483 struct net_device *from_dev,
2483 unsigned char addr_type); 2484 unsigned char addr_type);
2484 extern void dev_addr_flush(struct net_device *dev); 2485 extern void dev_addr_flush(struct net_device *dev);
2485 extern int dev_addr_init(struct net_device *dev); 2486 extern int dev_addr_init(struct net_device *dev);
2486 2487
2487 /* Functions used for unicast addresses handling */ 2488 /* Functions used for unicast addresses handling */
2488 extern int dev_uc_add(struct net_device *dev, unsigned char *addr); 2489 extern int dev_uc_add(struct net_device *dev, unsigned char *addr);
2489 extern int dev_uc_del(struct net_device *dev, unsigned char *addr); 2490 extern int dev_uc_del(struct net_device *dev, unsigned char *addr);
2490 extern int dev_uc_sync(struct net_device *to, struct net_device *from); 2491 extern int dev_uc_sync(struct net_device *to, struct net_device *from);
2491 extern void dev_uc_unsync(struct net_device *to, struct net_device *from); 2492 extern void dev_uc_unsync(struct net_device *to, struct net_device *from);
2492 extern void dev_uc_flush(struct net_device *dev); 2493 extern void dev_uc_flush(struct net_device *dev);
2493 extern void dev_uc_init(struct net_device *dev); 2494 extern void dev_uc_init(struct net_device *dev);
2494 2495
2495 /* Functions used for multicast addresses handling */ 2496 /* Functions used for multicast addresses handling */
2496 extern int dev_mc_add(struct net_device *dev, unsigned char *addr); 2497 extern int dev_mc_add(struct net_device *dev, unsigned char *addr);
2497 extern int dev_mc_add_global(struct net_device *dev, unsigned char *addr); 2498 extern int dev_mc_add_global(struct net_device *dev, unsigned char *addr);
2498 extern int dev_mc_del(struct net_device *dev, unsigned char *addr); 2499 extern int dev_mc_del(struct net_device *dev, unsigned char *addr);
2499 extern int dev_mc_del_global(struct net_device *dev, unsigned char *addr); 2500 extern int dev_mc_del_global(struct net_device *dev, unsigned char *addr);
2500 extern int dev_mc_sync(struct net_device *to, struct net_device *from); 2501 extern int dev_mc_sync(struct net_device *to, struct net_device *from);
2501 extern void dev_mc_unsync(struct net_device *to, struct net_device *from); 2502 extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
2502 extern void dev_mc_flush(struct net_device *dev); 2503 extern void dev_mc_flush(struct net_device *dev);
2503 extern void dev_mc_init(struct net_device *dev); 2504 extern void dev_mc_init(struct net_device *dev);
2504 2505
2505 /* Functions used for secondary unicast and multicast support */ 2506 /* Functions used for secondary unicast and multicast support */
2506 extern void dev_set_rx_mode(struct net_device *dev); 2507 extern void dev_set_rx_mode(struct net_device *dev);
2507 extern void __dev_set_rx_mode(struct net_device *dev); 2508 extern void __dev_set_rx_mode(struct net_device *dev);
2508 extern int dev_set_promiscuity(struct net_device *dev, int inc); 2509 extern int dev_set_promiscuity(struct net_device *dev, int inc);
2509 extern int dev_set_allmulti(struct net_device *dev, int inc); 2510 extern int dev_set_allmulti(struct net_device *dev, int inc);
2510 extern void netdev_state_change(struct net_device *dev); 2511 extern void netdev_state_change(struct net_device *dev);
2511 extern int netdev_bonding_change(struct net_device *dev, 2512 extern int netdev_bonding_change(struct net_device *dev,
2512 unsigned long event); 2513 unsigned long event);
2513 extern void netdev_features_change(struct net_device *dev); 2514 extern void netdev_features_change(struct net_device *dev);
2514 /* Load a device via the kmod */ 2515 /* Load a device via the kmod */
2515 extern void dev_load(struct net *net, const char *name); 2516 extern void dev_load(struct net *net, const char *name);
2516 extern void dev_mcast_init(void); 2517 extern void dev_mcast_init(void);
2517 extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, 2518 extern struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
2518 struct rtnl_link_stats64 *storage); 2519 struct rtnl_link_stats64 *storage);
2519 2520
2520 extern int netdev_max_backlog; 2521 extern int netdev_max_backlog;
2521 extern int netdev_tstamp_prequeue; 2522 extern int netdev_tstamp_prequeue;
2522 extern int weight_p; 2523 extern int weight_p;
2523 extern int bpf_jit_enable; 2524 extern int bpf_jit_enable;
2524 extern int netdev_set_master(struct net_device *dev, struct net_device *master); 2525 extern int netdev_set_master(struct net_device *dev, struct net_device *master);
2525 extern int netdev_set_bond_master(struct net_device *dev, 2526 extern int netdev_set_bond_master(struct net_device *dev,
2526 struct net_device *master); 2527 struct net_device *master);
2527 extern int skb_checksum_help(struct sk_buff *skb); 2528 extern int skb_checksum_help(struct sk_buff *skb);
2528 extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features); 2529 extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, u32 features);
2529 #ifdef CONFIG_BUG 2530 #ifdef CONFIG_BUG
2530 extern void netdev_rx_csum_fault(struct net_device *dev); 2531 extern void netdev_rx_csum_fault(struct net_device *dev);
2531 #else 2532 #else
2532 static inline void netdev_rx_csum_fault(struct net_device *dev) 2533 static inline void netdev_rx_csum_fault(struct net_device *dev)
2533 { 2534 {
2534 } 2535 }
2535 #endif 2536 #endif
2536 /* rx skb timestamps */ 2537 /* rx skb timestamps */
2537 extern void net_enable_timestamp(void); 2538 extern void net_enable_timestamp(void);
2538 extern void net_disable_timestamp(void); 2539 extern void net_disable_timestamp(void);
2539 2540
2540 #ifdef CONFIG_PROC_FS 2541 #ifdef CONFIG_PROC_FS
2541 extern void *dev_seq_start(struct seq_file *seq, loff_t *pos); 2542 extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
2542 extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos); 2543 extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
2543 extern void dev_seq_stop(struct seq_file *seq, void *v); 2544 extern void dev_seq_stop(struct seq_file *seq, void *v);
2544 #endif 2545 #endif
2545 2546
2546 extern int netdev_class_create_file(struct class_attribute *class_attr); 2547 extern int netdev_class_create_file(struct class_attribute *class_attr);
2547 extern void netdev_class_remove_file(struct class_attribute *class_attr); 2548 extern void netdev_class_remove_file(struct class_attribute *class_attr);
2548 2549
2549 extern struct kobj_ns_type_operations net_ns_type_operations; 2550 extern struct kobj_ns_type_operations net_ns_type_operations;
2550 2551
2551 extern char *netdev_drivername(const struct net_device *dev, char *buffer, int len); 2552 extern char *netdev_drivername(const struct net_device *dev, char *buffer, int len);
2552 2553
2553 extern void linkwatch_run_queue(void); 2554 extern void linkwatch_run_queue(void);
2554 2555
2555 static inline u32 netdev_get_wanted_features(struct net_device *dev) 2556 static inline u32 netdev_get_wanted_features(struct net_device *dev)
2556 { 2557 {
2557 return (dev->features & ~dev->hw_features) | dev->wanted_features; 2558 return (dev->features & ~dev->hw_features) | dev->wanted_features;
2558 } 2559 }
2559 u32 netdev_increment_features(u32 all, u32 one, u32 mask); 2560 u32 netdev_increment_features(u32 all, u32 one, u32 mask);
2560 u32 netdev_fix_features(struct net_device *dev, u32 features); 2561 u32 netdev_fix_features(struct net_device *dev, u32 features);
2561 int __netdev_update_features(struct net_device *dev); 2562 int __netdev_update_features(struct net_device *dev);
2562 void netdev_update_features(struct net_device *dev); 2563 void netdev_update_features(struct net_device *dev);
2563 2564
2564 void netif_stacked_transfer_operstate(const struct net_device *rootdev, 2565 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
2565 struct net_device *dev); 2566 struct net_device *dev);
2566 2567
2567 u32 netif_skb_features(struct sk_buff *skb); 2568 u32 netif_skb_features(struct sk_buff *skb);
2568 2569
2569 static inline int net_gso_ok(u32 features, int gso_type) 2570 static inline int net_gso_ok(u32 features, int gso_type)
2570 { 2571 {
2571 int feature = gso_type << NETIF_F_GSO_SHIFT; 2572 int feature = gso_type << NETIF_F_GSO_SHIFT;
2572 return (features & feature) == feature; 2573 return (features & feature) == feature;
2573 } 2574 }
2574 2575
2575 static inline int skb_gso_ok(struct sk_buff *skb, u32 features) 2576 static inline int skb_gso_ok(struct sk_buff *skb, u32 features)
2576 { 2577 {
2577 return net_gso_ok(features, skb_shinfo(skb)->gso_type) && 2578 return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
2578 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); 2579 (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST));
2579 } 2580 }
2580 2581
2581 static inline int netif_needs_gso(struct sk_buff *skb, int features) 2582 static inline int netif_needs_gso(struct sk_buff *skb, int features)
2582 { 2583 {
2583 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) || 2584 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
2584 unlikely(skb->ip_summed != CHECKSUM_PARTIAL)); 2585 unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
2585 } 2586 }
2586 2587
2587 static inline void netif_set_gso_max_size(struct net_device *dev, 2588 static inline void netif_set_gso_max_size(struct net_device *dev,
2588 unsigned int size) 2589 unsigned int size)
2589 { 2590 {
2590 dev->gso_max_size = size; 2591 dev->gso_max_size = size;
2591 } 2592 }
2592 2593
2593 static inline int netif_is_bond_slave(struct net_device *dev) 2594 static inline int netif_is_bond_slave(struct net_device *dev)
2594 { 2595 {
2595 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING; 2596 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
2596 } 2597 }
2597 2598
2598 extern struct pernet_operations __net_initdata loopback_net_ops; 2599 extern struct pernet_operations __net_initdata loopback_net_ops;
2599 2600
2600 int dev_ethtool_get_settings(struct net_device *dev, 2601 int dev_ethtool_get_settings(struct net_device *dev,
2601 struct ethtool_cmd *cmd); 2602 struct ethtool_cmd *cmd);
2602 2603
2603 static inline u32 dev_ethtool_get_rx_csum(struct net_device *dev) 2604 static inline u32 dev_ethtool_get_rx_csum(struct net_device *dev)
2604 { 2605 {
2605 if (dev->features & NETIF_F_RXCSUM) 2606 if (dev->features & NETIF_F_RXCSUM)
2606 return 1; 2607 return 1;
2607 if (!dev->ethtool_ops || !dev->ethtool_ops->get_rx_csum) 2608 if (!dev->ethtool_ops || !dev->ethtool_ops->get_rx_csum)
2608 return 0; 2609 return 0;
2609 return dev->ethtool_ops->get_rx_csum(dev); 2610 return dev->ethtool_ops->get_rx_csum(dev);
2610 } 2611 }
2611 2612
2612 static inline u32 dev_ethtool_get_flags(struct net_device *dev) 2613 static inline u32 dev_ethtool_get_flags(struct net_device *dev)
2613 { 2614 {
2614 if (!dev->ethtool_ops || !dev->ethtool_ops->get_flags) 2615 if (!dev->ethtool_ops || !dev->ethtool_ops->get_flags)
2615 return 0; 2616 return 0;
2616 return dev->ethtool_ops->get_flags(dev); 2617 return dev->ethtool_ops->get_flags(dev);
2617 } 2618 }
2618 2619
2619 /* Logging, debugging and troubleshooting/diagnostic helpers. */ 2620 /* Logging, debugging and troubleshooting/diagnostic helpers. */
2620 2621
2621 /* netdev_printk helpers, similar to dev_printk */ 2622 /* netdev_printk helpers, similar to dev_printk */
2622 2623
2623 static inline const char *netdev_name(const struct net_device *dev) 2624 static inline const char *netdev_name(const struct net_device *dev)
2624 { 2625 {
2625 if (dev->reg_state != NETREG_REGISTERED) 2626 if (dev->reg_state != NETREG_REGISTERED)
2626 return "(unregistered net_device)"; 2627 return "(unregistered net_device)";
2627 return dev->name; 2628 return dev->name;
2628 } 2629 }
2629 2630
2630 extern int netdev_printk(const char *level, const struct net_device *dev, 2631 extern int netdev_printk(const char *level, const struct net_device *dev,
2631 const char *format, ...) 2632 const char *format, ...)
2632 __attribute__ ((format (printf, 3, 4))); 2633 __attribute__ ((format (printf, 3, 4)));
2633 extern int netdev_emerg(const struct net_device *dev, const char *format, ...) 2634 extern int netdev_emerg(const struct net_device *dev, const char *format, ...)
2634 __attribute__ ((format (printf, 2, 3))); 2635 __attribute__ ((format (printf, 2, 3)));
2635 extern int netdev_alert(const struct net_device *dev, const char *format, ...) 2636 extern int netdev_alert(const struct net_device *dev, const char *format, ...)
2636 __attribute__ ((format (printf, 2, 3))); 2637 __attribute__ ((format (printf, 2, 3)));
2637 extern int netdev_crit(const struct net_device *dev, const char *format, ...) 2638 extern int netdev_crit(const struct net_device *dev, const char *format, ...)
2638 __attribute__ ((format (printf, 2, 3))); 2639 __attribute__ ((format (printf, 2, 3)));
2639 extern int netdev_err(const struct net_device *dev, const char *format, ...) 2640 extern int netdev_err(const struct net_device *dev, const char *format, ...)
2640 __attribute__ ((format (printf, 2, 3))); 2641 __attribute__ ((format (printf, 2, 3)));
2641 extern int netdev_warn(const struct net_device *dev, const char *format, ...) 2642 extern int netdev_warn(const struct net_device *dev, const char *format, ...)
2642 __attribute__ ((format (printf, 2, 3))); 2643 __attribute__ ((format (printf, 2, 3)));
2643 extern int netdev_notice(const struct net_device *dev, const char *format, ...) 2644 extern int netdev_notice(const struct net_device *dev, const char *format, ...)
2644 __attribute__ ((format (printf, 2, 3))); 2645 __attribute__ ((format (printf, 2, 3)));
2645 extern int netdev_info(const struct net_device *dev, const char *format, ...) 2646 extern int netdev_info(const struct net_device *dev, const char *format, ...)
2646 __attribute__ ((format (printf, 2, 3))); 2647 __attribute__ ((format (printf, 2, 3)));
2647 2648
2648 #define MODULE_ALIAS_NETDEV(device) \ 2649 #define MODULE_ALIAS_NETDEV(device) \
2649 MODULE_ALIAS("netdev-" device) 2650 MODULE_ALIAS("netdev-" device)
2650 2651
2651 #if defined(DEBUG) 2652 #if defined(DEBUG)
2652 #define netdev_dbg(__dev, format, args...) \ 2653 #define netdev_dbg(__dev, format, args...) \
2653 netdev_printk(KERN_DEBUG, __dev, format, ##args) 2654 netdev_printk(KERN_DEBUG, __dev, format, ##args)
2654 #elif defined(CONFIG_DYNAMIC_DEBUG) 2655 #elif defined(CONFIG_DYNAMIC_DEBUG)
2655 #define netdev_dbg(__dev, format, args...) \ 2656 #define netdev_dbg(__dev, format, args...) \
2656 do { \ 2657 do { \
2657 dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \ 2658 dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \
2658 netdev_name(__dev), ##args); \ 2659 netdev_name(__dev), ##args); \
2659 } while (0) 2660 } while (0)
2660 #else 2661 #else
2661 #define netdev_dbg(__dev, format, args...) \ 2662 #define netdev_dbg(__dev, format, args...) \
2662 ({ \ 2663 ({ \
2663 if (0) \ 2664 if (0) \
2664 netdev_printk(KERN_DEBUG, __dev, format, ##args); \ 2665 netdev_printk(KERN_DEBUG, __dev, format, ##args); \
2665 0; \ 2666 0; \
2666 }) 2667 })
2667 #endif 2668 #endif
2668 2669
2669 #if defined(VERBOSE_DEBUG) 2670 #if defined(VERBOSE_DEBUG)
2670 #define netdev_vdbg netdev_dbg 2671 #define netdev_vdbg netdev_dbg
2671 #else 2672 #else
2672 2673
2673 #define netdev_vdbg(dev, format, args...) \ 2674 #define netdev_vdbg(dev, format, args...) \
2674 ({ \ 2675 ({ \
2675 if (0) \ 2676 if (0) \
2676 netdev_printk(KERN_DEBUG, dev, format, ##args); \ 2677 netdev_printk(KERN_DEBUG, dev, format, ##args); \
2677 0; \ 2678 0; \
2678 }) 2679 })
2679 #endif 2680 #endif
2680 2681
2681 /* 2682 /*
2682 * netdev_WARN() acts like dev_printk(), but with the key difference 2683 * netdev_WARN() acts like dev_printk(), but with the key difference
2683 * of using a WARN/WARN_ON to get the message out, including the 2684 * of using a WARN/WARN_ON to get the message out, including the
2684 * file/line information and a backtrace. 2685 * file/line information and a backtrace.
2685 */ 2686 */
2686 #define netdev_WARN(dev, format, args...) \ 2687 #define netdev_WARN(dev, format, args...) \
2687 WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args); 2688 WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args);
2688 2689
2689 /* netif printk helpers, similar to netdev_printk */ 2690 /* netif printk helpers, similar to netdev_printk */
2690 2691
2691 #define netif_printk(priv, type, level, dev, fmt, args...) \ 2692 #define netif_printk(priv, type, level, dev, fmt, args...) \
2692 do { \ 2693 do { \
2693 if (netif_msg_##type(priv)) \ 2694 if (netif_msg_##type(priv)) \
2694 netdev_printk(level, (dev), fmt, ##args); \ 2695 netdev_printk(level, (dev), fmt, ##args); \
2695 } while (0) 2696 } while (0)
2696 2697
2697 #define netif_level(level, priv, type, dev, fmt, args...) \ 2698 #define netif_level(level, priv, type, dev, fmt, args...) \
2698 do { \ 2699 do { \
2699 if (netif_msg_##type(priv)) \ 2700 if (netif_msg_##type(priv)) \
2700 netdev_##level(dev, fmt, ##args); \ 2701 netdev_##level(dev, fmt, ##args); \
2701 } while (0) 2702 } while (0)
2702 2703
2703 #define netif_emerg(priv, type, dev, fmt, args...) \ 2704 #define netif_emerg(priv, type, dev, fmt, args...) \
2704 netif_level(emerg, priv, type, dev, fmt, ##args) 2705 netif_level(emerg, priv, type, dev, fmt, ##args)
2705 #define netif_alert(priv, type, dev, fmt, args...) \ 2706 #define netif_alert(priv, type, dev, fmt, args...) \
2706 netif_level(alert, priv, type, dev, fmt, ##args) 2707 netif_level(alert, priv, type, dev, fmt, ##args)
2707 #define netif_crit(priv, type, dev, fmt, args...) \ 2708 #define netif_crit(priv, type, dev, fmt, args...) \
2708 netif_level(crit, priv, type, dev, fmt, ##args) 2709 netif_level(crit, priv, type, dev, fmt, ##args)
2709 #define netif_err(priv, type, dev, fmt, args...) \ 2710 #define netif_err(priv, type, dev, fmt, args...) \
2710 netif_level(err, priv, type, dev, fmt, ##args) 2711 netif_level(err, priv, type, dev, fmt, ##args)
2711 #define netif_warn(priv, type, dev, fmt, args...) \ 2712 #define netif_warn(priv, type, dev, fmt, args...) \
2712 netif_level(warn, priv, type, dev, fmt, ##args) 2713 netif_level(warn, priv, type, dev, fmt, ##args)
2713 #define netif_notice(priv, type, dev, fmt, args...) \ 2714 #define netif_notice(priv, type, dev, fmt, args...) \
2714 netif_level(notice, priv, type, dev, fmt, ##args) 2715 netif_level(notice, priv, type, dev, fmt, ##args)
2715 #define netif_info(priv, type, dev, fmt, args...) \ 2716 #define netif_info(priv, type, dev, fmt, args...) \
2716 netif_level(info, priv, type, dev, fmt, ##args) 2717 netif_level(info, priv, type, dev, fmt, ##args)
2717 2718
2718 #if defined(DEBUG) 2719 #if defined(DEBUG)
2719 #define netif_dbg(priv, type, dev, format, args...) \ 2720 #define netif_dbg(priv, type, dev, format, args...) \
2720 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args) 2721 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
2721 #elif defined(CONFIG_DYNAMIC_DEBUG) 2722 #elif defined(CONFIG_DYNAMIC_DEBUG)
2722 #define netif_dbg(priv, type, netdev, format, args...) \ 2723 #define netif_dbg(priv, type, netdev, format, args...) \
2723 do { \ 2724 do { \
2724 if (netif_msg_##type(priv)) \ 2725 if (netif_msg_##type(priv)) \
2725 dynamic_dev_dbg((netdev)->dev.parent, \ 2726 dynamic_dev_dbg((netdev)->dev.parent, \
2726 "%s: " format, \ 2727 "%s: " format, \
2727 netdev_name(netdev), ##args); \ 2728 netdev_name(netdev), ##args); \
2728 } while (0) 2729 } while (0)
2729 #else 2730 #else
2730 #define netif_dbg(priv, type, dev, format, args...) \ 2731 #define netif_dbg(priv, type, dev, format, args...) \
2731 ({ \ 2732 ({ \
2732 if (0) \ 2733 if (0) \
2733 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \ 2734 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
2734 0; \ 2735 0; \
2735 }) 2736 })
2736 #endif 2737 #endif
2737 2738
2738 #if defined(VERBOSE_DEBUG) 2739 #if defined(VERBOSE_DEBUG)
2739 #define netif_vdbg netif_dbg 2740 #define netif_vdbg netif_dbg
2740 #else 2741 #else
2741 #define netif_vdbg(priv, type, dev, format, args...) \ 2742 #define netif_vdbg(priv, type, dev, format, args...) \
2742 ({ \ 2743 ({ \
2743 if (0) \ 2744 if (0) \
2744 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \ 2745 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
2745 0; \ 2746 0; \
2746 }) 2747 })
2747 #endif 2748 #endif
2748 2749
2749 #endif /* __KERNEL__ */ 2750 #endif /* __KERNEL__ */
2750 2751
2751 #endif /* _LINUX_NETDEVICE_H */ 2752 #endif /* _LINUX_NETDEVICE_H */
2752 2753
1 /* 1 /*
2 * net/core/ethtool.c - Ethtool ioctl handler 2 * net/core/ethtool.c - Ethtool ioctl handler
3 * Copyright (c) 2003 Matthew Wilcox <matthew@wil.cx> 3 * Copyright (c) 2003 Matthew Wilcox <matthew@wil.cx>
4 * 4 *
5 * This file is where we call all the ethtool_ops commands to get 5 * This file is where we call all the ethtool_ops commands to get
6 * the information ethtool needs. 6 * the information ethtool needs.
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or 10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version. 11 * (at your option) any later version.
12 */ 12 */
13 13
14 #include <linux/module.h> 14 #include <linux/module.h>
15 #include <linux/types.h> 15 #include <linux/types.h>
16 #include <linux/capability.h> 16 #include <linux/capability.h>
17 #include <linux/errno.h> 17 #include <linux/errno.h>
18 #include <linux/ethtool.h> 18 #include <linux/ethtool.h>
19 #include <linux/netdevice.h> 19 #include <linux/netdevice.h>
20 #include <linux/bitops.h> 20 #include <linux/bitops.h>
21 #include <linux/uaccess.h> 21 #include <linux/uaccess.h>
22 #include <linux/vmalloc.h> 22 #include <linux/vmalloc.h>
23 #include <linux/slab.h> 23 #include <linux/slab.h>
24 #include <linux/rtnetlink.h> 24 #include <linux/rtnetlink.h>
25 #include <linux/sched.h> 25 #include <linux/sched.h>
26 26
27 /* 27 /*
28 * Some useful ethtool_ops methods that're device independent. 28 * Some useful ethtool_ops methods that're device independent.
29 * If we find that all drivers want to do the same thing here, 29 * If we find that all drivers want to do the same thing here,
30 * we can turn these into dev_() function calls. 30 * we can turn these into dev_() function calls.
31 */ 31 */
32 32
33 u32 ethtool_op_get_link(struct net_device *dev) 33 u32 ethtool_op_get_link(struct net_device *dev)
34 { 34 {
35 return netif_carrier_ok(dev) ? 1 : 0; 35 return netif_carrier_ok(dev) ? 1 : 0;
36 } 36 }
37 EXPORT_SYMBOL(ethtool_op_get_link); 37 EXPORT_SYMBOL(ethtool_op_get_link);
38 38
39 u32 ethtool_op_get_tx_csum(struct net_device *dev) 39 u32 ethtool_op_get_tx_csum(struct net_device *dev)
40 { 40 {
41 return (dev->features & NETIF_F_ALL_CSUM) != 0; 41 return (dev->features & NETIF_F_ALL_CSUM) != 0;
42 } 42 }
43 EXPORT_SYMBOL(ethtool_op_get_tx_csum); 43 EXPORT_SYMBOL(ethtool_op_get_tx_csum);
44 44
45 int ethtool_op_set_tx_csum(struct net_device *dev, u32 data) 45 int ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
46 { 46 {
47 if (data) 47 if (data)
48 dev->features |= NETIF_F_IP_CSUM; 48 dev->features |= NETIF_F_IP_CSUM;
49 else 49 else
50 dev->features &= ~NETIF_F_IP_CSUM; 50 dev->features &= ~NETIF_F_IP_CSUM;
51 51
52 return 0; 52 return 0;
53 } 53 }
54 EXPORT_SYMBOL(ethtool_op_set_tx_csum); 54 EXPORT_SYMBOL(ethtool_op_set_tx_csum);
55 55
56 int ethtool_op_set_tx_hw_csum(struct net_device *dev, u32 data) 56 int ethtool_op_set_tx_hw_csum(struct net_device *dev, u32 data)
57 { 57 {
58 if (data) 58 if (data)
59 dev->features |= NETIF_F_HW_CSUM; 59 dev->features |= NETIF_F_HW_CSUM;
60 else 60 else
61 dev->features &= ~NETIF_F_HW_CSUM; 61 dev->features &= ~NETIF_F_HW_CSUM;
62 62
63 return 0; 63 return 0;
64 } 64 }
65 EXPORT_SYMBOL(ethtool_op_set_tx_hw_csum); 65 EXPORT_SYMBOL(ethtool_op_set_tx_hw_csum);
66 66
67 int ethtool_op_set_tx_ipv6_csum(struct net_device *dev, u32 data) 67 int ethtool_op_set_tx_ipv6_csum(struct net_device *dev, u32 data)
68 { 68 {
69 if (data) 69 if (data)
70 dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 70 dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
71 else 71 else
72 dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); 72 dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
73 73
74 return 0; 74 return 0;
75 } 75 }
76 EXPORT_SYMBOL(ethtool_op_set_tx_ipv6_csum); 76 EXPORT_SYMBOL(ethtool_op_set_tx_ipv6_csum);
77 77
78 u32 ethtool_op_get_sg(struct net_device *dev) 78 u32 ethtool_op_get_sg(struct net_device *dev)
79 { 79 {
80 return (dev->features & NETIF_F_SG) != 0; 80 return (dev->features & NETIF_F_SG) != 0;
81 } 81 }
82 EXPORT_SYMBOL(ethtool_op_get_sg); 82 EXPORT_SYMBOL(ethtool_op_get_sg);
83 83
84 int ethtool_op_set_sg(struct net_device *dev, u32 data) 84 int ethtool_op_set_sg(struct net_device *dev, u32 data)
85 { 85 {
86 if (data) 86 if (data)
87 dev->features |= NETIF_F_SG; 87 dev->features |= NETIF_F_SG;
88 else 88 else
89 dev->features &= ~NETIF_F_SG; 89 dev->features &= ~NETIF_F_SG;
90 90
91 return 0; 91 return 0;
92 } 92 }
93 EXPORT_SYMBOL(ethtool_op_set_sg); 93 EXPORT_SYMBOL(ethtool_op_set_sg);
94 94
95 u32 ethtool_op_get_tso(struct net_device *dev) 95 u32 ethtool_op_get_tso(struct net_device *dev)
96 { 96 {
97 return (dev->features & NETIF_F_TSO) != 0; 97 return (dev->features & NETIF_F_TSO) != 0;
98 } 98 }
99 EXPORT_SYMBOL(ethtool_op_get_tso); 99 EXPORT_SYMBOL(ethtool_op_get_tso);
100 100
101 int ethtool_op_set_tso(struct net_device *dev, u32 data) 101 int ethtool_op_set_tso(struct net_device *dev, u32 data)
102 { 102 {
103 if (data) 103 if (data)
104 dev->features |= NETIF_F_TSO; 104 dev->features |= NETIF_F_TSO;
105 else 105 else
106 dev->features &= ~NETIF_F_TSO; 106 dev->features &= ~NETIF_F_TSO;
107 107
108 return 0; 108 return 0;
109 } 109 }
110 EXPORT_SYMBOL(ethtool_op_set_tso); 110 EXPORT_SYMBOL(ethtool_op_set_tso);
111 111
112 u32 ethtool_op_get_ufo(struct net_device *dev) 112 u32 ethtool_op_get_ufo(struct net_device *dev)
113 { 113 {
114 return (dev->features & NETIF_F_UFO) != 0; 114 return (dev->features & NETIF_F_UFO) != 0;
115 } 115 }
116 EXPORT_SYMBOL(ethtool_op_get_ufo); 116 EXPORT_SYMBOL(ethtool_op_get_ufo);
117 117
118 int ethtool_op_set_ufo(struct net_device *dev, u32 data) 118 int ethtool_op_set_ufo(struct net_device *dev, u32 data)
119 { 119 {
120 if (data) 120 if (data)
121 dev->features |= NETIF_F_UFO; 121 dev->features |= NETIF_F_UFO;
122 else 122 else
123 dev->features &= ~NETIF_F_UFO; 123 dev->features &= ~NETIF_F_UFO;
124 return 0; 124 return 0;
125 } 125 }
126 EXPORT_SYMBOL(ethtool_op_set_ufo); 126 EXPORT_SYMBOL(ethtool_op_set_ufo);
127 127
128 /* the following list of flags are the same as their associated 128 /* the following list of flags are the same as their associated
129 * NETIF_F_xxx values in include/linux/netdevice.h 129 * NETIF_F_xxx values in include/linux/netdevice.h
130 */ 130 */
131 static const u32 flags_dup_features = 131 static const u32 flags_dup_features =
132 (ETH_FLAG_LRO | ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN | ETH_FLAG_NTUPLE | 132 (ETH_FLAG_LRO | ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN | ETH_FLAG_NTUPLE |
133 ETH_FLAG_RXHASH); 133 ETH_FLAG_RXHASH);
134 134
135 u32 ethtool_op_get_flags(struct net_device *dev) 135 u32 ethtool_op_get_flags(struct net_device *dev)
136 { 136 {
137 /* in the future, this function will probably contain additional 137 /* in the future, this function will probably contain additional
138 * handling for flags which are not so easily handled 138 * handling for flags which are not so easily handled
139 * by a simple masking operation 139 * by a simple masking operation
140 */ 140 */
141 141
142 return dev->features & flags_dup_features; 142 return dev->features & flags_dup_features;
143 } 143 }
144 EXPORT_SYMBOL(ethtool_op_get_flags); 144 EXPORT_SYMBOL(ethtool_op_get_flags);
145 145
146 /* Check if device can enable (or disable) particular feature coded in "data" 146 /* Check if device can enable (or disable) particular feature coded in "data"
147 * argument. Flags "supported" describe features that can be toggled by device. 147 * argument. Flags "supported" describe features that can be toggled by device.
148 * If feature can not be toggled, it state (enabled or disabled) must match 148 * If feature can not be toggled, it state (enabled or disabled) must match
149 * hardcoded device features state, otherwise flags are marked as invalid. 149 * hardcoded device features state, otherwise flags are marked as invalid.
150 */ 150 */
151 bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported) 151 bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported)
152 { 152 {
153 u32 features = dev->features & flags_dup_features; 153 u32 features = dev->features & flags_dup_features;
154 /* "data" can contain only flags_dup_features bits, 154 /* "data" can contain only flags_dup_features bits,
155 * see __ethtool_set_flags */ 155 * see __ethtool_set_flags */
156 156
157 return (features & ~supported) != (data & ~supported); 157 return (features & ~supported) != (data & ~supported);
158 } 158 }
159 EXPORT_SYMBOL(ethtool_invalid_flags); 159 EXPORT_SYMBOL(ethtool_invalid_flags);
160 160
161 int ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported) 161 int ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported)
162 { 162 {
163 if (ethtool_invalid_flags(dev, data, supported)) 163 if (ethtool_invalid_flags(dev, data, supported))
164 return -EINVAL; 164 return -EINVAL;
165 165
166 dev->features = ((dev->features & ~flags_dup_features) | 166 dev->features = ((dev->features & ~flags_dup_features) |
167 (data & flags_dup_features)); 167 (data & flags_dup_features));
168 return 0; 168 return 0;
169 } 169 }
170 EXPORT_SYMBOL(ethtool_op_set_flags); 170 EXPORT_SYMBOL(ethtool_op_set_flags);
171 171
172 void ethtool_ntuple_flush(struct net_device *dev) 172 void ethtool_ntuple_flush(struct net_device *dev)
173 { 173 {
174 struct ethtool_rx_ntuple_flow_spec_container *fsc, *f; 174 struct ethtool_rx_ntuple_flow_spec_container *fsc, *f;
175 175
176 list_for_each_entry_safe(fsc, f, &dev->ethtool_ntuple_list.list, list) { 176 list_for_each_entry_safe(fsc, f, &dev->ethtool_ntuple_list.list, list) {
177 list_del(&fsc->list); 177 list_del(&fsc->list);
178 kfree(fsc); 178 kfree(fsc);
179 } 179 }
180 dev->ethtool_ntuple_list.count = 0; 180 dev->ethtool_ntuple_list.count = 0;
181 } 181 }
182 EXPORT_SYMBOL(ethtool_ntuple_flush); 182 EXPORT_SYMBOL(ethtool_ntuple_flush);
183 183
184 /* Handlers for each ethtool command */ 184 /* Handlers for each ethtool command */
185 185
186 #define ETHTOOL_DEV_FEATURE_WORDS 1 186 #define ETHTOOL_DEV_FEATURE_WORDS 1
187 187
188 static void ethtool_get_features_compat(struct net_device *dev, 188 static void ethtool_get_features_compat(struct net_device *dev,
189 struct ethtool_get_features_block *features) 189 struct ethtool_get_features_block *features)
190 { 190 {
191 if (!dev->ethtool_ops) 191 if (!dev->ethtool_ops)
192 return; 192 return;
193 193
194 /* getting RX checksum */ 194 /* getting RX checksum */
195 if (dev->ethtool_ops->get_rx_csum) 195 if (dev->ethtool_ops->get_rx_csum)
196 if (dev->ethtool_ops->get_rx_csum(dev)) 196 if (dev->ethtool_ops->get_rx_csum(dev))
197 features[0].active |= NETIF_F_RXCSUM; 197 features[0].active |= NETIF_F_RXCSUM;
198 198
199 /* mark legacy-changeable features */ 199 /* mark legacy-changeable features */
200 if (dev->ethtool_ops->set_sg) 200 if (dev->ethtool_ops->set_sg)
201 features[0].available |= NETIF_F_SG; 201 features[0].available |= NETIF_F_SG;
202 if (dev->ethtool_ops->set_tx_csum) 202 if (dev->ethtool_ops->set_tx_csum)
203 features[0].available |= NETIF_F_ALL_CSUM; 203 features[0].available |= NETIF_F_ALL_CSUM;
204 if (dev->ethtool_ops->set_tso) 204 if (dev->ethtool_ops->set_tso)
205 features[0].available |= NETIF_F_ALL_TSO; 205 features[0].available |= NETIF_F_ALL_TSO;
206 if (dev->ethtool_ops->set_rx_csum) 206 if (dev->ethtool_ops->set_rx_csum)
207 features[0].available |= NETIF_F_RXCSUM; 207 features[0].available |= NETIF_F_RXCSUM;
208 if (dev->ethtool_ops->set_flags) 208 if (dev->ethtool_ops->set_flags)
209 features[0].available |= flags_dup_features; 209 features[0].available |= flags_dup_features;
210 } 210 }
211 211
212 static int ethtool_set_feature_compat(struct net_device *dev, 212 static int ethtool_set_feature_compat(struct net_device *dev,
213 int (*legacy_set)(struct net_device *, u32), 213 int (*legacy_set)(struct net_device *, u32),
214 struct ethtool_set_features_block *features, u32 mask) 214 struct ethtool_set_features_block *features, u32 mask)
215 { 215 {
216 u32 do_set; 216 u32 do_set;
217 217
218 if (!legacy_set) 218 if (!legacy_set)
219 return 0; 219 return 0;
220 220
221 if (!(features[0].valid & mask)) 221 if (!(features[0].valid & mask))
222 return 0; 222 return 0;
223 223
224 features[0].valid &= ~mask; 224 features[0].valid &= ~mask;
225 225
226 do_set = !!(features[0].requested & mask); 226 do_set = !!(features[0].requested & mask);
227 227
228 if (legacy_set(dev, do_set) < 0) 228 if (legacy_set(dev, do_set) < 0)
229 netdev_info(dev, 229 netdev_info(dev,
230 "Legacy feature change (%s) failed for 0x%08x\n", 230 "Legacy feature change (%s) failed for 0x%08x\n",
231 do_set ? "set" : "clear", mask); 231 do_set ? "set" : "clear", mask);
232 232
233 return 1; 233 return 1;
234 } 234 }
235 235
236 static int ethtool_set_features_compat(struct net_device *dev, 236 static int ethtool_set_features_compat(struct net_device *dev,
237 struct ethtool_set_features_block *features) 237 struct ethtool_set_features_block *features)
238 { 238 {
239 int compat; 239 int compat;
240 240
241 if (!dev->ethtool_ops) 241 if (!dev->ethtool_ops)
242 return 0; 242 return 0;
243 243
244 compat = ethtool_set_feature_compat(dev, dev->ethtool_ops->set_sg, 244 compat = ethtool_set_feature_compat(dev, dev->ethtool_ops->set_sg,
245 features, NETIF_F_SG); 245 features, NETIF_F_SG);
246 compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_tx_csum, 246 compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_tx_csum,
247 features, NETIF_F_ALL_CSUM); 247 features, NETIF_F_ALL_CSUM);
248 compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_tso, 248 compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_tso,
249 features, NETIF_F_ALL_TSO); 249 features, NETIF_F_ALL_TSO);
250 compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_rx_csum, 250 compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_rx_csum,
251 features, NETIF_F_RXCSUM); 251 features, NETIF_F_RXCSUM);
252 compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_flags, 252 compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_flags,
253 features, flags_dup_features); 253 features, flags_dup_features);
254 254
255 return compat; 255 return compat;
256 } 256 }
257 257
258 static int ethtool_get_features(struct net_device *dev, void __user *useraddr) 258 static int ethtool_get_features(struct net_device *dev, void __user *useraddr)
259 { 259 {
260 struct ethtool_gfeatures cmd = { 260 struct ethtool_gfeatures cmd = {
261 .cmd = ETHTOOL_GFEATURES, 261 .cmd = ETHTOOL_GFEATURES,
262 .size = ETHTOOL_DEV_FEATURE_WORDS, 262 .size = ETHTOOL_DEV_FEATURE_WORDS,
263 }; 263 };
264 struct ethtool_get_features_block features[ETHTOOL_DEV_FEATURE_WORDS] = { 264 struct ethtool_get_features_block features[ETHTOOL_DEV_FEATURE_WORDS] = {
265 { 265 {
266 .available = dev->hw_features, 266 .available = dev->hw_features,
267 .requested = dev->wanted_features, 267 .requested = dev->wanted_features,
268 .active = dev->features, 268 .active = dev->features,
269 .never_changed = NETIF_F_NEVER_CHANGE, 269 .never_changed = NETIF_F_NEVER_CHANGE,
270 }, 270 },
271 }; 271 };
272 u32 __user *sizeaddr; 272 u32 __user *sizeaddr;
273 u32 copy_size; 273 u32 copy_size;
274 274
275 ethtool_get_features_compat(dev, features); 275 ethtool_get_features_compat(dev, features);
276 276
277 sizeaddr = useraddr + offsetof(struct ethtool_gfeatures, size); 277 sizeaddr = useraddr + offsetof(struct ethtool_gfeatures, size);
278 if (get_user(copy_size, sizeaddr)) 278 if (get_user(copy_size, sizeaddr))
279 return -EFAULT; 279 return -EFAULT;
280 280
281 if (copy_size > ETHTOOL_DEV_FEATURE_WORDS) 281 if (copy_size > ETHTOOL_DEV_FEATURE_WORDS)
282 copy_size = ETHTOOL_DEV_FEATURE_WORDS; 282 copy_size = ETHTOOL_DEV_FEATURE_WORDS;
283 283
284 if (copy_to_user(useraddr, &cmd, sizeof(cmd))) 284 if (copy_to_user(useraddr, &cmd, sizeof(cmd)))
285 return -EFAULT; 285 return -EFAULT;
286 useraddr += sizeof(cmd); 286 useraddr += sizeof(cmd);
287 if (copy_to_user(useraddr, features, copy_size * sizeof(*features))) 287 if (copy_to_user(useraddr, features, copy_size * sizeof(*features)))
288 return -EFAULT; 288 return -EFAULT;
289 289
290 return 0; 290 return 0;
291 } 291 }
292 292
293 static int ethtool_set_features(struct net_device *dev, void __user *useraddr) 293 static int ethtool_set_features(struct net_device *dev, void __user *useraddr)
294 { 294 {
295 struct ethtool_sfeatures cmd; 295 struct ethtool_sfeatures cmd;
296 struct ethtool_set_features_block features[ETHTOOL_DEV_FEATURE_WORDS]; 296 struct ethtool_set_features_block features[ETHTOOL_DEV_FEATURE_WORDS];
297 int ret = 0; 297 int ret = 0;
298 298
299 if (copy_from_user(&cmd, useraddr, sizeof(cmd))) 299 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
300 return -EFAULT; 300 return -EFAULT;
301 useraddr += sizeof(cmd); 301 useraddr += sizeof(cmd);
302 302
303 if (cmd.size != ETHTOOL_DEV_FEATURE_WORDS) 303 if (cmd.size != ETHTOOL_DEV_FEATURE_WORDS)
304 return -EINVAL; 304 return -EINVAL;
305 305
306 if (copy_from_user(features, useraddr, sizeof(features))) 306 if (copy_from_user(features, useraddr, sizeof(features)))
307 return -EFAULT; 307 return -EFAULT;
308 308
309 if (features[0].valid & ~NETIF_F_ETHTOOL_BITS) 309 if (features[0].valid & ~NETIF_F_ETHTOOL_BITS)
310 return -EINVAL; 310 return -EINVAL;
311 311
312 if (ethtool_set_features_compat(dev, features)) 312 if (ethtool_set_features_compat(dev, features))
313 ret |= ETHTOOL_F_COMPAT; 313 ret |= ETHTOOL_F_COMPAT;
314 314
315 if (features[0].valid & ~dev->hw_features) { 315 if (features[0].valid & ~dev->hw_features) {
316 features[0].valid &= dev->hw_features; 316 features[0].valid &= dev->hw_features;
317 ret |= ETHTOOL_F_UNSUPPORTED; 317 ret |= ETHTOOL_F_UNSUPPORTED;
318 } 318 }
319 319
320 dev->wanted_features &= ~features[0].valid; 320 dev->wanted_features &= ~features[0].valid;
321 dev->wanted_features |= features[0].valid & features[0].requested; 321 dev->wanted_features |= features[0].valid & features[0].requested;
322 __netdev_update_features(dev); 322 __netdev_update_features(dev);
323 323
324 if ((dev->wanted_features ^ dev->features) & features[0].valid) 324 if ((dev->wanted_features ^ dev->features) & features[0].valid)
325 ret |= ETHTOOL_F_WISH; 325 ret |= ETHTOOL_F_WISH;
326 326
327 return ret; 327 return ret;
328 } 328 }
329 329
330 static const char netdev_features_strings[ETHTOOL_DEV_FEATURE_WORDS * 32][ETH_GSTRING_LEN] = { 330 static const char netdev_features_strings[ETHTOOL_DEV_FEATURE_WORDS * 32][ETH_GSTRING_LEN] = {
331 /* NETIF_F_SG */ "tx-scatter-gather", 331 /* NETIF_F_SG */ "tx-scatter-gather",
332 /* NETIF_F_IP_CSUM */ "tx-checksum-ipv4", 332 /* NETIF_F_IP_CSUM */ "tx-checksum-ipv4",
333 /* NETIF_F_NO_CSUM */ "tx-checksum-unneeded", 333 /* NETIF_F_NO_CSUM */ "tx-checksum-unneeded",
334 /* NETIF_F_HW_CSUM */ "tx-checksum-ip-generic", 334 /* NETIF_F_HW_CSUM */ "tx-checksum-ip-generic",
335 /* NETIF_F_IPV6_CSUM */ "tx_checksum-ipv6", 335 /* NETIF_F_IPV6_CSUM */ "tx_checksum-ipv6",
336 /* NETIF_F_HIGHDMA */ "highdma", 336 /* NETIF_F_HIGHDMA */ "highdma",
337 /* NETIF_F_FRAGLIST */ "tx-scatter-gather-fraglist", 337 /* NETIF_F_FRAGLIST */ "tx-scatter-gather-fraglist",
338 /* NETIF_F_HW_VLAN_TX */ "tx-vlan-hw-insert", 338 /* NETIF_F_HW_VLAN_TX */ "tx-vlan-hw-insert",
339 339
340 /* NETIF_F_HW_VLAN_RX */ "rx-vlan-hw-parse", 340 /* NETIF_F_HW_VLAN_RX */ "rx-vlan-hw-parse",
341 /* NETIF_F_HW_VLAN_FILTER */ "rx-vlan-filter", 341 /* NETIF_F_HW_VLAN_FILTER */ "rx-vlan-filter",
342 /* NETIF_F_VLAN_CHALLENGED */ "vlan-challenged", 342 /* NETIF_F_VLAN_CHALLENGED */ "vlan-challenged",
343 /* NETIF_F_GSO */ "tx-generic-segmentation", 343 /* NETIF_F_GSO */ "tx-generic-segmentation",
344 /* NETIF_F_LLTX */ "tx-lockless", 344 /* NETIF_F_LLTX */ "tx-lockless",
345 /* NETIF_F_NETNS_LOCAL */ "netns-local", 345 /* NETIF_F_NETNS_LOCAL */ "netns-local",
346 /* NETIF_F_GRO */ "rx-gro", 346 /* NETIF_F_GRO */ "rx-gro",
347 /* NETIF_F_LRO */ "rx-lro", 347 /* NETIF_F_LRO */ "rx-lro",
348 348
349 /* NETIF_F_TSO */ "tx-tcp-segmentation", 349 /* NETIF_F_TSO */ "tx-tcp-segmentation",
350 /* NETIF_F_UFO */ "tx-udp-fragmentation", 350 /* NETIF_F_UFO */ "tx-udp-fragmentation",
351 /* NETIF_F_GSO_ROBUST */ "tx-gso-robust", 351 /* NETIF_F_GSO_ROBUST */ "tx-gso-robust",
352 /* NETIF_F_TSO_ECN */ "tx-tcp-ecn-segmentation", 352 /* NETIF_F_TSO_ECN */ "tx-tcp-ecn-segmentation",
353 /* NETIF_F_TSO6 */ "tx-tcp6-segmentation", 353 /* NETIF_F_TSO6 */ "tx-tcp6-segmentation",
354 /* NETIF_F_FSO */ "tx-fcoe-segmentation", 354 /* NETIF_F_FSO */ "tx-fcoe-segmentation",
355 "", 355 "",
356 "", 356 "",
357 357
358 /* NETIF_F_FCOE_CRC */ "tx-checksum-fcoe-crc", 358 /* NETIF_F_FCOE_CRC */ "tx-checksum-fcoe-crc",
359 /* NETIF_F_SCTP_CSUM */ "tx-checksum-sctp", 359 /* NETIF_F_SCTP_CSUM */ "tx-checksum-sctp",
360 /* NETIF_F_FCOE_MTU */ "fcoe-mtu", 360 /* NETIF_F_FCOE_MTU */ "fcoe-mtu",
361 /* NETIF_F_NTUPLE */ "rx-ntuple-filter", 361 /* NETIF_F_NTUPLE */ "rx-ntuple-filter",
362 /* NETIF_F_RXHASH */ "rx-hashing", 362 /* NETIF_F_RXHASH */ "rx-hashing",
363 /* NETIF_F_RXCSUM */ "rx-checksum", 363 /* NETIF_F_RXCSUM */ "rx-checksum",
364 /* NETIF_F_NOCACHE_COPY */ "tx-nocache-copy" 364 /* NETIF_F_NOCACHE_COPY */ "tx-nocache-copy"
365 "", 365 /* NETIF_F_LOOPBACK */ "loopback",
366 }; 366 };
367 367
368 static int __ethtool_get_sset_count(struct net_device *dev, int sset) 368 static int __ethtool_get_sset_count(struct net_device *dev, int sset)
369 { 369 {
370 const struct ethtool_ops *ops = dev->ethtool_ops; 370 const struct ethtool_ops *ops = dev->ethtool_ops;
371 371
372 if (sset == ETH_SS_FEATURES) 372 if (sset == ETH_SS_FEATURES)
373 return ARRAY_SIZE(netdev_features_strings); 373 return ARRAY_SIZE(netdev_features_strings);
374 374
375 if (ops && ops->get_sset_count && ops->get_strings) 375 if (ops && ops->get_sset_count && ops->get_strings)
376 return ops->get_sset_count(dev, sset); 376 return ops->get_sset_count(dev, sset);
377 else 377 else
378 return -EOPNOTSUPP; 378 return -EOPNOTSUPP;
379 } 379 }
380 380
381 static void __ethtool_get_strings(struct net_device *dev, 381 static void __ethtool_get_strings(struct net_device *dev,
382 u32 stringset, u8 *data) 382 u32 stringset, u8 *data)
383 { 383 {
384 const struct ethtool_ops *ops = dev->ethtool_ops; 384 const struct ethtool_ops *ops = dev->ethtool_ops;
385 385
386 if (stringset == ETH_SS_FEATURES) 386 if (stringset == ETH_SS_FEATURES)
387 memcpy(data, netdev_features_strings, 387 memcpy(data, netdev_features_strings,
388 sizeof(netdev_features_strings)); 388 sizeof(netdev_features_strings));
389 else 389 else
390 /* ops->get_strings is valid because checked earlier */ 390 /* ops->get_strings is valid because checked earlier */
391 ops->get_strings(dev, stringset, data); 391 ops->get_strings(dev, stringset, data);
392 } 392 }
393 393
394 static u32 ethtool_get_feature_mask(u32 eth_cmd) 394 static u32 ethtool_get_feature_mask(u32 eth_cmd)
395 { 395 {
396 /* feature masks of legacy discrete ethtool ops */ 396 /* feature masks of legacy discrete ethtool ops */
397 397
398 switch (eth_cmd) { 398 switch (eth_cmd) {
399 case ETHTOOL_GTXCSUM: 399 case ETHTOOL_GTXCSUM:
400 case ETHTOOL_STXCSUM: 400 case ETHTOOL_STXCSUM:
401 return NETIF_F_ALL_CSUM | NETIF_F_SCTP_CSUM; 401 return NETIF_F_ALL_CSUM | NETIF_F_SCTP_CSUM;
402 case ETHTOOL_GRXCSUM: 402 case ETHTOOL_GRXCSUM:
403 case ETHTOOL_SRXCSUM: 403 case ETHTOOL_SRXCSUM:
404 return NETIF_F_RXCSUM; 404 return NETIF_F_RXCSUM;
405 case ETHTOOL_GSG: 405 case ETHTOOL_GSG:
406 case ETHTOOL_SSG: 406 case ETHTOOL_SSG:
407 return NETIF_F_SG; 407 return NETIF_F_SG;
408 case ETHTOOL_GTSO: 408 case ETHTOOL_GTSO:
409 case ETHTOOL_STSO: 409 case ETHTOOL_STSO:
410 return NETIF_F_ALL_TSO; 410 return NETIF_F_ALL_TSO;
411 case ETHTOOL_GUFO: 411 case ETHTOOL_GUFO:
412 case ETHTOOL_SUFO: 412 case ETHTOOL_SUFO:
413 return NETIF_F_UFO; 413 return NETIF_F_UFO;
414 case ETHTOOL_GGSO: 414 case ETHTOOL_GGSO:
415 case ETHTOOL_SGSO: 415 case ETHTOOL_SGSO:
416 return NETIF_F_GSO; 416 return NETIF_F_GSO;
417 case ETHTOOL_GGRO: 417 case ETHTOOL_GGRO:
418 case ETHTOOL_SGRO: 418 case ETHTOOL_SGRO:
419 return NETIF_F_GRO; 419 return NETIF_F_GRO;
420 default: 420 default:
421 BUG(); 421 BUG();
422 } 422 }
423 } 423 }
424 424
425 static void *__ethtool_get_one_feature_actor(struct net_device *dev, u32 ethcmd) 425 static void *__ethtool_get_one_feature_actor(struct net_device *dev, u32 ethcmd)
426 { 426 {
427 const struct ethtool_ops *ops = dev->ethtool_ops; 427 const struct ethtool_ops *ops = dev->ethtool_ops;
428 428
429 if (!ops) 429 if (!ops)
430 return NULL; 430 return NULL;
431 431
432 switch (ethcmd) { 432 switch (ethcmd) {
433 case ETHTOOL_GTXCSUM: 433 case ETHTOOL_GTXCSUM:
434 return ops->get_tx_csum; 434 return ops->get_tx_csum;
435 case ETHTOOL_GRXCSUM: 435 case ETHTOOL_GRXCSUM:
436 return ops->get_rx_csum; 436 return ops->get_rx_csum;
437 case ETHTOOL_SSG: 437 case ETHTOOL_SSG:
438 return ops->get_sg; 438 return ops->get_sg;
439 case ETHTOOL_STSO: 439 case ETHTOOL_STSO:
440 return ops->get_tso; 440 return ops->get_tso;
441 case ETHTOOL_SUFO: 441 case ETHTOOL_SUFO:
442 return ops->get_ufo; 442 return ops->get_ufo;
443 default: 443 default:
444 return NULL; 444 return NULL;
445 } 445 }
446 } 446 }
447 447
448 static u32 __ethtool_get_rx_csum_oldbug(struct net_device *dev) 448 static u32 __ethtool_get_rx_csum_oldbug(struct net_device *dev)
449 { 449 {
450 return !!(dev->features & NETIF_F_ALL_CSUM); 450 return !!(dev->features & NETIF_F_ALL_CSUM);
451 } 451 }
452 452
453 static int ethtool_get_one_feature(struct net_device *dev, 453 static int ethtool_get_one_feature(struct net_device *dev,
454 char __user *useraddr, u32 ethcmd) 454 char __user *useraddr, u32 ethcmd)
455 { 455 {
456 u32 mask = ethtool_get_feature_mask(ethcmd); 456 u32 mask = ethtool_get_feature_mask(ethcmd);
457 struct ethtool_value edata = { 457 struct ethtool_value edata = {
458 .cmd = ethcmd, 458 .cmd = ethcmd,
459 .data = !!(dev->features & mask), 459 .data = !!(dev->features & mask),
460 }; 460 };
461 461
462 /* compatibility with discrete get_ ops */ 462 /* compatibility with discrete get_ ops */
463 if (!(dev->hw_features & mask)) { 463 if (!(dev->hw_features & mask)) {
464 u32 (*actor)(struct net_device *); 464 u32 (*actor)(struct net_device *);
465 465
466 actor = __ethtool_get_one_feature_actor(dev, ethcmd); 466 actor = __ethtool_get_one_feature_actor(dev, ethcmd);
467 467
468 /* bug compatibility with old get_rx_csum */ 468 /* bug compatibility with old get_rx_csum */
469 if (ethcmd == ETHTOOL_GRXCSUM && !actor) 469 if (ethcmd == ETHTOOL_GRXCSUM && !actor)
470 actor = __ethtool_get_rx_csum_oldbug; 470 actor = __ethtool_get_rx_csum_oldbug;
471 471
472 if (actor) 472 if (actor)
473 edata.data = actor(dev); 473 edata.data = actor(dev);
474 } 474 }
475 475
476 if (copy_to_user(useraddr, &edata, sizeof(edata))) 476 if (copy_to_user(useraddr, &edata, sizeof(edata)))
477 return -EFAULT; 477 return -EFAULT;
478 return 0; 478 return 0;
479 } 479 }
480 480
481 static int __ethtool_set_tx_csum(struct net_device *dev, u32 data); 481 static int __ethtool_set_tx_csum(struct net_device *dev, u32 data);
482 static int __ethtool_set_rx_csum(struct net_device *dev, u32 data); 482 static int __ethtool_set_rx_csum(struct net_device *dev, u32 data);
483 static int __ethtool_set_sg(struct net_device *dev, u32 data); 483 static int __ethtool_set_sg(struct net_device *dev, u32 data);
484 static int __ethtool_set_tso(struct net_device *dev, u32 data); 484 static int __ethtool_set_tso(struct net_device *dev, u32 data);
485 static int __ethtool_set_ufo(struct net_device *dev, u32 data); 485 static int __ethtool_set_ufo(struct net_device *dev, u32 data);
486 486
487 static int ethtool_set_one_feature(struct net_device *dev, 487 static int ethtool_set_one_feature(struct net_device *dev,
488 void __user *useraddr, u32 ethcmd) 488 void __user *useraddr, u32 ethcmd)
489 { 489 {
490 struct ethtool_value edata; 490 struct ethtool_value edata;
491 u32 mask; 491 u32 mask;
492 492
493 if (copy_from_user(&edata, useraddr, sizeof(edata))) 493 if (copy_from_user(&edata, useraddr, sizeof(edata)))
494 return -EFAULT; 494 return -EFAULT;
495 495
496 mask = ethtool_get_feature_mask(ethcmd); 496 mask = ethtool_get_feature_mask(ethcmd);
497 mask &= dev->hw_features; 497 mask &= dev->hw_features;
498 if (mask) { 498 if (mask) {
499 if (edata.data) 499 if (edata.data)
500 dev->wanted_features |= mask; 500 dev->wanted_features |= mask;
501 else 501 else
502 dev->wanted_features &= ~mask; 502 dev->wanted_features &= ~mask;
503 503
504 __netdev_update_features(dev); 504 __netdev_update_features(dev);
505 return 0; 505 return 0;
506 } 506 }
507 507
508 /* Driver is not converted to ndo_fix_features or does not 508 /* Driver is not converted to ndo_fix_features or does not
509 * support changing this offload. In the latter case it won't 509 * support changing this offload. In the latter case it won't
510 * have corresponding ethtool_ops field set. 510 * have corresponding ethtool_ops field set.
511 * 511 *
512 * Following part is to be removed after all drivers advertise 512 * Following part is to be removed after all drivers advertise
513 * their changeable features in netdev->hw_features and stop 513 * their changeable features in netdev->hw_features and stop
514 * using discrete offload setting ops. 514 * using discrete offload setting ops.
515 */ 515 */
516 516
517 switch (ethcmd) { 517 switch (ethcmd) {
518 case ETHTOOL_STXCSUM: 518 case ETHTOOL_STXCSUM:
519 return __ethtool_set_tx_csum(dev, edata.data); 519 return __ethtool_set_tx_csum(dev, edata.data);
520 case ETHTOOL_SRXCSUM: 520 case ETHTOOL_SRXCSUM:
521 return __ethtool_set_rx_csum(dev, edata.data); 521 return __ethtool_set_rx_csum(dev, edata.data);
522 case ETHTOOL_SSG: 522 case ETHTOOL_SSG:
523 return __ethtool_set_sg(dev, edata.data); 523 return __ethtool_set_sg(dev, edata.data);
524 case ETHTOOL_STSO: 524 case ETHTOOL_STSO:
525 return __ethtool_set_tso(dev, edata.data); 525 return __ethtool_set_tso(dev, edata.data);
526 case ETHTOOL_SUFO: 526 case ETHTOOL_SUFO:
527 return __ethtool_set_ufo(dev, edata.data); 527 return __ethtool_set_ufo(dev, edata.data);
528 default: 528 default:
529 return -EOPNOTSUPP; 529 return -EOPNOTSUPP;
530 } 530 }
531 } 531 }
532 532
533 int __ethtool_set_flags(struct net_device *dev, u32 data) 533 int __ethtool_set_flags(struct net_device *dev, u32 data)
534 { 534 {
535 u32 changed; 535 u32 changed;
536 536
537 if (data & ~flags_dup_features) 537 if (data & ~flags_dup_features)
538 return -EINVAL; 538 return -EINVAL;
539 539
540 /* legacy set_flags() op */ 540 /* legacy set_flags() op */
541 if (dev->ethtool_ops->set_flags) { 541 if (dev->ethtool_ops->set_flags) {
542 if (unlikely(dev->hw_features & flags_dup_features)) 542 if (unlikely(dev->hw_features & flags_dup_features))
543 netdev_warn(dev, 543 netdev_warn(dev,
544 "driver BUG: mixed hw_features and set_flags()\n"); 544 "driver BUG: mixed hw_features and set_flags()\n");
545 return dev->ethtool_ops->set_flags(dev, data); 545 return dev->ethtool_ops->set_flags(dev, data);
546 } 546 }
547 547
548 /* allow changing only bits set in hw_features */ 548 /* allow changing only bits set in hw_features */
549 changed = (data ^ dev->features) & flags_dup_features; 549 changed = (data ^ dev->features) & flags_dup_features;
550 if (changed & ~dev->hw_features) 550 if (changed & ~dev->hw_features)
551 return (changed & dev->hw_features) ? -EINVAL : -EOPNOTSUPP; 551 return (changed & dev->hw_features) ? -EINVAL : -EOPNOTSUPP;
552 552
553 dev->wanted_features = 553 dev->wanted_features =
554 (dev->wanted_features & ~changed) | (data & dev->hw_features); 554 (dev->wanted_features & ~changed) | (data & dev->hw_features);
555 555
556 __netdev_update_features(dev); 556 __netdev_update_features(dev);
557 557
558 return 0; 558 return 0;
559 } 559 }
560 560
561 static int ethtool_get_settings(struct net_device *dev, void __user *useraddr) 561 static int ethtool_get_settings(struct net_device *dev, void __user *useraddr)
562 { 562 {
563 struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET }; 563 struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET };
564 int err; 564 int err;
565 565
566 if (!dev->ethtool_ops->get_settings) 566 if (!dev->ethtool_ops->get_settings)
567 return -EOPNOTSUPP; 567 return -EOPNOTSUPP;
568 568
569 err = dev->ethtool_ops->get_settings(dev, &cmd); 569 err = dev->ethtool_ops->get_settings(dev, &cmd);
570 if (err < 0) 570 if (err < 0)
571 return err; 571 return err;
572 572
573 if (copy_to_user(useraddr, &cmd, sizeof(cmd))) 573 if (copy_to_user(useraddr, &cmd, sizeof(cmd)))
574 return -EFAULT; 574 return -EFAULT;
575 return 0; 575 return 0;
576 } 576 }
577 577
578 static int ethtool_set_settings(struct net_device *dev, void __user *useraddr) 578 static int ethtool_set_settings(struct net_device *dev, void __user *useraddr)
579 { 579 {
580 struct ethtool_cmd cmd; 580 struct ethtool_cmd cmd;
581 581
582 if (!dev->ethtool_ops->set_settings) 582 if (!dev->ethtool_ops->set_settings)
583 return -EOPNOTSUPP; 583 return -EOPNOTSUPP;
584 584
585 if (copy_from_user(&cmd, useraddr, sizeof(cmd))) 585 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
586 return -EFAULT; 586 return -EFAULT;
587 587
588 return dev->ethtool_ops->set_settings(dev, &cmd); 588 return dev->ethtool_ops->set_settings(dev, &cmd);
589 } 589 }
590 590
591 static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev, 591 static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev,
592 void __user *useraddr) 592 void __user *useraddr)
593 { 593 {
594 struct ethtool_drvinfo info; 594 struct ethtool_drvinfo info;
595 const struct ethtool_ops *ops = dev->ethtool_ops; 595 const struct ethtool_ops *ops = dev->ethtool_ops;
596 596
597 memset(&info, 0, sizeof(info)); 597 memset(&info, 0, sizeof(info));
598 info.cmd = ETHTOOL_GDRVINFO; 598 info.cmd = ETHTOOL_GDRVINFO;
599 if (ops && ops->get_drvinfo) { 599 if (ops && ops->get_drvinfo) {
600 ops->get_drvinfo(dev, &info); 600 ops->get_drvinfo(dev, &info);
601 } else if (dev->dev.parent && dev->dev.parent->driver) { 601 } else if (dev->dev.parent && dev->dev.parent->driver) {
602 strlcpy(info.bus_info, dev_name(dev->dev.parent), 602 strlcpy(info.bus_info, dev_name(dev->dev.parent),
603 sizeof(info.bus_info)); 603 sizeof(info.bus_info));
604 strlcpy(info.driver, dev->dev.parent->driver->name, 604 strlcpy(info.driver, dev->dev.parent->driver->name,
605 sizeof(info.driver)); 605 sizeof(info.driver));
606 } else { 606 } else {
607 return -EOPNOTSUPP; 607 return -EOPNOTSUPP;
608 } 608 }
609 609
610 /* 610 /*
611 * this method of obtaining string set info is deprecated; 611 * this method of obtaining string set info is deprecated;
612 * Use ETHTOOL_GSSET_INFO instead. 612 * Use ETHTOOL_GSSET_INFO instead.
613 */ 613 */
614 if (ops && ops->get_sset_count) { 614 if (ops && ops->get_sset_count) {
615 int rc; 615 int rc;
616 616
617 rc = ops->get_sset_count(dev, ETH_SS_TEST); 617 rc = ops->get_sset_count(dev, ETH_SS_TEST);
618 if (rc >= 0) 618 if (rc >= 0)
619 info.testinfo_len = rc; 619 info.testinfo_len = rc;
620 rc = ops->get_sset_count(dev, ETH_SS_STATS); 620 rc = ops->get_sset_count(dev, ETH_SS_STATS);
621 if (rc >= 0) 621 if (rc >= 0)
622 info.n_stats = rc; 622 info.n_stats = rc;
623 rc = ops->get_sset_count(dev, ETH_SS_PRIV_FLAGS); 623 rc = ops->get_sset_count(dev, ETH_SS_PRIV_FLAGS);
624 if (rc >= 0) 624 if (rc >= 0)
625 info.n_priv_flags = rc; 625 info.n_priv_flags = rc;
626 } 626 }
627 if (ops && ops->get_regs_len) 627 if (ops && ops->get_regs_len)
628 info.regdump_len = ops->get_regs_len(dev); 628 info.regdump_len = ops->get_regs_len(dev);
629 if (ops && ops->get_eeprom_len) 629 if (ops && ops->get_eeprom_len)
630 info.eedump_len = ops->get_eeprom_len(dev); 630 info.eedump_len = ops->get_eeprom_len(dev);
631 631
632 if (copy_to_user(useraddr, &info, sizeof(info))) 632 if (copy_to_user(useraddr, &info, sizeof(info)))
633 return -EFAULT; 633 return -EFAULT;
634 return 0; 634 return 0;
635 } 635 }
636 636
637 static noinline_for_stack int ethtool_get_sset_info(struct net_device *dev, 637 static noinline_for_stack int ethtool_get_sset_info(struct net_device *dev,
638 void __user *useraddr) 638 void __user *useraddr)
639 { 639 {
640 struct ethtool_sset_info info; 640 struct ethtool_sset_info info;
641 u64 sset_mask; 641 u64 sset_mask;
642 int i, idx = 0, n_bits = 0, ret, rc; 642 int i, idx = 0, n_bits = 0, ret, rc;
643 u32 *info_buf = NULL; 643 u32 *info_buf = NULL;
644 644
645 if (copy_from_user(&info, useraddr, sizeof(info))) 645 if (copy_from_user(&info, useraddr, sizeof(info)))
646 return -EFAULT; 646 return -EFAULT;
647 647
648 /* store copy of mask, because we zero struct later on */ 648 /* store copy of mask, because we zero struct later on */
649 sset_mask = info.sset_mask; 649 sset_mask = info.sset_mask;
650 if (!sset_mask) 650 if (!sset_mask)
651 return 0; 651 return 0;
652 652
653 /* calculate size of return buffer */ 653 /* calculate size of return buffer */
654 n_bits = hweight64(sset_mask); 654 n_bits = hweight64(sset_mask);
655 655
656 memset(&info, 0, sizeof(info)); 656 memset(&info, 0, sizeof(info));
657 info.cmd = ETHTOOL_GSSET_INFO; 657 info.cmd = ETHTOOL_GSSET_INFO;
658 658
659 info_buf = kzalloc(n_bits * sizeof(u32), GFP_USER); 659 info_buf = kzalloc(n_bits * sizeof(u32), GFP_USER);
660 if (!info_buf) 660 if (!info_buf)
661 return -ENOMEM; 661 return -ENOMEM;
662 662
663 /* 663 /*
664 * fill return buffer based on input bitmask and successful 664 * fill return buffer based on input bitmask and successful
665 * get_sset_count return 665 * get_sset_count return
666 */ 666 */
667 for (i = 0; i < 64; i++) { 667 for (i = 0; i < 64; i++) {
668 if (!(sset_mask & (1ULL << i))) 668 if (!(sset_mask & (1ULL << i)))
669 continue; 669 continue;
670 670
671 rc = __ethtool_get_sset_count(dev, i); 671 rc = __ethtool_get_sset_count(dev, i);
672 if (rc >= 0) { 672 if (rc >= 0) {
673 info.sset_mask |= (1ULL << i); 673 info.sset_mask |= (1ULL << i);
674 info_buf[idx++] = rc; 674 info_buf[idx++] = rc;
675 } 675 }
676 } 676 }
677 677
678 ret = -EFAULT; 678 ret = -EFAULT;
679 if (copy_to_user(useraddr, &info, sizeof(info))) 679 if (copy_to_user(useraddr, &info, sizeof(info)))
680 goto out; 680 goto out;
681 681
682 useraddr += offsetof(struct ethtool_sset_info, data); 682 useraddr += offsetof(struct ethtool_sset_info, data);
683 if (copy_to_user(useraddr, info_buf, idx * sizeof(u32))) 683 if (copy_to_user(useraddr, info_buf, idx * sizeof(u32)))
684 goto out; 684 goto out;
685 685
686 ret = 0; 686 ret = 0;
687 687
688 out: 688 out:
689 kfree(info_buf); 689 kfree(info_buf);
690 return ret; 690 return ret;
691 } 691 }
692 692
693 static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, 693 static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev,
694 u32 cmd, void __user *useraddr) 694 u32 cmd, void __user *useraddr)
695 { 695 {
696 struct ethtool_rxnfc info; 696 struct ethtool_rxnfc info;
697 size_t info_size = sizeof(info); 697 size_t info_size = sizeof(info);
698 698
699 if (!dev->ethtool_ops->set_rxnfc) 699 if (!dev->ethtool_ops->set_rxnfc)
700 return -EOPNOTSUPP; 700 return -EOPNOTSUPP;
701 701
702 /* struct ethtool_rxnfc was originally defined for 702 /* struct ethtool_rxnfc was originally defined for
703 * ETHTOOL_{G,S}RXFH with only the cmd, flow_type and data 703 * ETHTOOL_{G,S}RXFH with only the cmd, flow_type and data
704 * members. User-space might still be using that 704 * members. User-space might still be using that
705 * definition. */ 705 * definition. */
706 if (cmd == ETHTOOL_SRXFH) 706 if (cmd == ETHTOOL_SRXFH)
707 info_size = (offsetof(struct ethtool_rxnfc, data) + 707 info_size = (offsetof(struct ethtool_rxnfc, data) +
708 sizeof(info.data)); 708 sizeof(info.data));
709 709
710 if (copy_from_user(&info, useraddr, info_size)) 710 if (copy_from_user(&info, useraddr, info_size))
711 return -EFAULT; 711 return -EFAULT;
712 712
713 return dev->ethtool_ops->set_rxnfc(dev, &info); 713 return dev->ethtool_ops->set_rxnfc(dev, &info);
714 } 714 }
715 715
716 static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, 716 static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
717 u32 cmd, void __user *useraddr) 717 u32 cmd, void __user *useraddr)
718 { 718 {
719 struct ethtool_rxnfc info; 719 struct ethtool_rxnfc info;
720 size_t info_size = sizeof(info); 720 size_t info_size = sizeof(info);
721 const struct ethtool_ops *ops = dev->ethtool_ops; 721 const struct ethtool_ops *ops = dev->ethtool_ops;
722 int ret; 722 int ret;
723 void *rule_buf = NULL; 723 void *rule_buf = NULL;
724 724
725 if (!ops->get_rxnfc) 725 if (!ops->get_rxnfc)
726 return -EOPNOTSUPP; 726 return -EOPNOTSUPP;
727 727
728 /* struct ethtool_rxnfc was originally defined for 728 /* struct ethtool_rxnfc was originally defined for
729 * ETHTOOL_{G,S}RXFH with only the cmd, flow_type and data 729 * ETHTOOL_{G,S}RXFH with only the cmd, flow_type and data
730 * members. User-space might still be using that 730 * members. User-space might still be using that
731 * definition. */ 731 * definition. */
732 if (cmd == ETHTOOL_GRXFH) 732 if (cmd == ETHTOOL_GRXFH)
733 info_size = (offsetof(struct ethtool_rxnfc, data) + 733 info_size = (offsetof(struct ethtool_rxnfc, data) +
734 sizeof(info.data)); 734 sizeof(info.data));
735 735
736 if (copy_from_user(&info, useraddr, info_size)) 736 if (copy_from_user(&info, useraddr, info_size))
737 return -EFAULT; 737 return -EFAULT;
738 738
739 if (info.cmd == ETHTOOL_GRXCLSRLALL) { 739 if (info.cmd == ETHTOOL_GRXCLSRLALL) {
740 if (info.rule_cnt > 0) { 740 if (info.rule_cnt > 0) {
741 if (info.rule_cnt <= KMALLOC_MAX_SIZE / sizeof(u32)) 741 if (info.rule_cnt <= KMALLOC_MAX_SIZE / sizeof(u32))
742 rule_buf = kzalloc(info.rule_cnt * sizeof(u32), 742 rule_buf = kzalloc(info.rule_cnt * sizeof(u32),
743 GFP_USER); 743 GFP_USER);
744 if (!rule_buf) 744 if (!rule_buf)
745 return -ENOMEM; 745 return -ENOMEM;
746 } 746 }
747 } 747 }
748 748
749 ret = ops->get_rxnfc(dev, &info, rule_buf); 749 ret = ops->get_rxnfc(dev, &info, rule_buf);
750 if (ret < 0) 750 if (ret < 0)
751 goto err_out; 751 goto err_out;
752 752
753 ret = -EFAULT; 753 ret = -EFAULT;
754 if (copy_to_user(useraddr, &info, info_size)) 754 if (copy_to_user(useraddr, &info, info_size))
755 goto err_out; 755 goto err_out;
756 756
757 if (rule_buf) { 757 if (rule_buf) {
758 useraddr += offsetof(struct ethtool_rxnfc, rule_locs); 758 useraddr += offsetof(struct ethtool_rxnfc, rule_locs);
759 if (copy_to_user(useraddr, rule_buf, 759 if (copy_to_user(useraddr, rule_buf,
760 info.rule_cnt * sizeof(u32))) 760 info.rule_cnt * sizeof(u32)))
761 goto err_out; 761 goto err_out;
762 } 762 }
763 ret = 0; 763 ret = 0;
764 764
765 err_out: 765 err_out:
766 kfree(rule_buf); 766 kfree(rule_buf);
767 767
768 return ret; 768 return ret;
769 } 769 }
770 770
771 static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev, 771 static noinline_for_stack int ethtool_get_rxfh_indir(struct net_device *dev,
772 void __user *useraddr) 772 void __user *useraddr)
773 { 773 {
774 struct ethtool_rxfh_indir *indir; 774 struct ethtool_rxfh_indir *indir;
775 u32 table_size; 775 u32 table_size;
776 size_t full_size; 776 size_t full_size;
777 int ret; 777 int ret;
778 778
779 if (!dev->ethtool_ops->get_rxfh_indir) 779 if (!dev->ethtool_ops->get_rxfh_indir)
780 return -EOPNOTSUPP; 780 return -EOPNOTSUPP;
781 781
782 if (copy_from_user(&table_size, 782 if (copy_from_user(&table_size,
783 useraddr + offsetof(struct ethtool_rxfh_indir, size), 783 useraddr + offsetof(struct ethtool_rxfh_indir, size),
784 sizeof(table_size))) 784 sizeof(table_size)))
785 return -EFAULT; 785 return -EFAULT;
786 786
787 if (table_size > 787 if (table_size >
788 (KMALLOC_MAX_SIZE - sizeof(*indir)) / sizeof(*indir->ring_index)) 788 (KMALLOC_MAX_SIZE - sizeof(*indir)) / sizeof(*indir->ring_index))
789 return -ENOMEM; 789 return -ENOMEM;
790 full_size = sizeof(*indir) + sizeof(*indir->ring_index) * table_size; 790 full_size = sizeof(*indir) + sizeof(*indir->ring_index) * table_size;
791 indir = kzalloc(full_size, GFP_USER); 791 indir = kzalloc(full_size, GFP_USER);
792 if (!indir) 792 if (!indir)
793 return -ENOMEM; 793 return -ENOMEM;
794 794
795 indir->cmd = ETHTOOL_GRXFHINDIR; 795 indir->cmd = ETHTOOL_GRXFHINDIR;
796 indir->size = table_size; 796 indir->size = table_size;
797 ret = dev->ethtool_ops->get_rxfh_indir(dev, indir); 797 ret = dev->ethtool_ops->get_rxfh_indir(dev, indir);
798 if (ret) 798 if (ret)
799 goto out; 799 goto out;
800 800
801 if (copy_to_user(useraddr, indir, full_size)) 801 if (copy_to_user(useraddr, indir, full_size))
802 ret = -EFAULT; 802 ret = -EFAULT;
803 803
804 out: 804 out:
805 kfree(indir); 805 kfree(indir);
806 return ret; 806 return ret;
807 } 807 }
808 808
809 static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev, 809 static noinline_for_stack int ethtool_set_rxfh_indir(struct net_device *dev,
810 void __user *useraddr) 810 void __user *useraddr)
811 { 811 {
812 struct ethtool_rxfh_indir *indir; 812 struct ethtool_rxfh_indir *indir;
813 u32 table_size; 813 u32 table_size;
814 size_t full_size; 814 size_t full_size;
815 int ret; 815 int ret;
816 816
817 if (!dev->ethtool_ops->set_rxfh_indir) 817 if (!dev->ethtool_ops->set_rxfh_indir)
818 return -EOPNOTSUPP; 818 return -EOPNOTSUPP;
819 819
820 if (copy_from_user(&table_size, 820 if (copy_from_user(&table_size,
821 useraddr + offsetof(struct ethtool_rxfh_indir, size), 821 useraddr + offsetof(struct ethtool_rxfh_indir, size),
822 sizeof(table_size))) 822 sizeof(table_size)))
823 return -EFAULT; 823 return -EFAULT;
824 824
825 if (table_size > 825 if (table_size >
826 (KMALLOC_MAX_SIZE - sizeof(*indir)) / sizeof(*indir->ring_index)) 826 (KMALLOC_MAX_SIZE - sizeof(*indir)) / sizeof(*indir->ring_index))
827 return -ENOMEM; 827 return -ENOMEM;
828 full_size = sizeof(*indir) + sizeof(*indir->ring_index) * table_size; 828 full_size = sizeof(*indir) + sizeof(*indir->ring_index) * table_size;
829 indir = kmalloc(full_size, GFP_USER); 829 indir = kmalloc(full_size, GFP_USER);
830 if (!indir) 830 if (!indir)
831 return -ENOMEM; 831 return -ENOMEM;
832 832
833 if (copy_from_user(indir, useraddr, full_size)) { 833 if (copy_from_user(indir, useraddr, full_size)) {
834 ret = -EFAULT; 834 ret = -EFAULT;
835 goto out; 835 goto out;
836 } 836 }
837 837
838 ret = dev->ethtool_ops->set_rxfh_indir(dev, indir); 838 ret = dev->ethtool_ops->set_rxfh_indir(dev, indir);
839 839
840 out: 840 out:
841 kfree(indir); 841 kfree(indir);
842 return ret; 842 return ret;
843 } 843 }
844 844
845 static void __rx_ntuple_filter_add(struct ethtool_rx_ntuple_list *list, 845 static void __rx_ntuple_filter_add(struct ethtool_rx_ntuple_list *list,
846 struct ethtool_rx_ntuple_flow_spec *spec, 846 struct ethtool_rx_ntuple_flow_spec *spec,
847 struct ethtool_rx_ntuple_flow_spec_container *fsc) 847 struct ethtool_rx_ntuple_flow_spec_container *fsc)
848 { 848 {
849 849
850 /* don't add filters forever */ 850 /* don't add filters forever */
851 if (list->count >= ETHTOOL_MAX_NTUPLE_LIST_ENTRY) { 851 if (list->count >= ETHTOOL_MAX_NTUPLE_LIST_ENTRY) {
852 /* free the container */ 852 /* free the container */
853 kfree(fsc); 853 kfree(fsc);
854 return; 854 return;
855 } 855 }
856 856
857 /* Copy the whole filter over */ 857 /* Copy the whole filter over */
858 fsc->fs.flow_type = spec->flow_type; 858 fsc->fs.flow_type = spec->flow_type;
859 memcpy(&fsc->fs.h_u, &spec->h_u, sizeof(spec->h_u)); 859 memcpy(&fsc->fs.h_u, &spec->h_u, sizeof(spec->h_u));
860 memcpy(&fsc->fs.m_u, &spec->m_u, sizeof(spec->m_u)); 860 memcpy(&fsc->fs.m_u, &spec->m_u, sizeof(spec->m_u));
861 861
862 fsc->fs.vlan_tag = spec->vlan_tag; 862 fsc->fs.vlan_tag = spec->vlan_tag;
863 fsc->fs.vlan_tag_mask = spec->vlan_tag_mask; 863 fsc->fs.vlan_tag_mask = spec->vlan_tag_mask;
864 fsc->fs.data = spec->data; 864 fsc->fs.data = spec->data;
865 fsc->fs.data_mask = spec->data_mask; 865 fsc->fs.data_mask = spec->data_mask;
866 fsc->fs.action = spec->action; 866 fsc->fs.action = spec->action;
867 867
868 /* add to the list */ 868 /* add to the list */
869 list_add_tail_rcu(&fsc->list, &list->list); 869 list_add_tail_rcu(&fsc->list, &list->list);
870 list->count++; 870 list->count++;
871 } 871 }
872 872
873 /* 873 /*
874 * ethtool does not (or did not) set masks for flow parameters that are 874 * ethtool does not (or did not) set masks for flow parameters that are
875 * not specified, so if both value and mask are 0 then this must be 875 * not specified, so if both value and mask are 0 then this must be
876 * treated as equivalent to a mask with all bits set. Implement that 876 * treated as equivalent to a mask with all bits set. Implement that
877 * here rather than in drivers. 877 * here rather than in drivers.
878 */ 878 */
879 static void rx_ntuple_fix_masks(struct ethtool_rx_ntuple_flow_spec *fs) 879 static void rx_ntuple_fix_masks(struct ethtool_rx_ntuple_flow_spec *fs)
880 { 880 {
881 struct ethtool_tcpip4_spec *entry = &fs->h_u.tcp_ip4_spec; 881 struct ethtool_tcpip4_spec *entry = &fs->h_u.tcp_ip4_spec;
882 struct ethtool_tcpip4_spec *mask = &fs->m_u.tcp_ip4_spec; 882 struct ethtool_tcpip4_spec *mask = &fs->m_u.tcp_ip4_spec;
883 883
884 if (fs->flow_type != TCP_V4_FLOW && 884 if (fs->flow_type != TCP_V4_FLOW &&
885 fs->flow_type != UDP_V4_FLOW && 885 fs->flow_type != UDP_V4_FLOW &&
886 fs->flow_type != SCTP_V4_FLOW) 886 fs->flow_type != SCTP_V4_FLOW)
887 return; 887 return;
888 888
889 if (!(entry->ip4src | mask->ip4src)) 889 if (!(entry->ip4src | mask->ip4src))
890 mask->ip4src = htonl(0xffffffff); 890 mask->ip4src = htonl(0xffffffff);
891 if (!(entry->ip4dst | mask->ip4dst)) 891 if (!(entry->ip4dst | mask->ip4dst))
892 mask->ip4dst = htonl(0xffffffff); 892 mask->ip4dst = htonl(0xffffffff);
893 if (!(entry->psrc | mask->psrc)) 893 if (!(entry->psrc | mask->psrc))
894 mask->psrc = htons(0xffff); 894 mask->psrc = htons(0xffff);
895 if (!(entry->pdst | mask->pdst)) 895 if (!(entry->pdst | mask->pdst))
896 mask->pdst = htons(0xffff); 896 mask->pdst = htons(0xffff);
897 if (!(entry->tos | mask->tos)) 897 if (!(entry->tos | mask->tos))
898 mask->tos = 0xff; 898 mask->tos = 0xff;
899 if (!(fs->vlan_tag | fs->vlan_tag_mask)) 899 if (!(fs->vlan_tag | fs->vlan_tag_mask))
900 fs->vlan_tag_mask = 0xffff; 900 fs->vlan_tag_mask = 0xffff;
901 if (!(fs->data | fs->data_mask)) 901 if (!(fs->data | fs->data_mask))
902 fs->data_mask = 0xffffffffffffffffULL; 902 fs->data_mask = 0xffffffffffffffffULL;
903 } 903 }
904 904
905 static noinline_for_stack int ethtool_set_rx_ntuple(struct net_device *dev, 905 static noinline_for_stack int ethtool_set_rx_ntuple(struct net_device *dev,
906 void __user *useraddr) 906 void __user *useraddr)
907 { 907 {
908 struct ethtool_rx_ntuple cmd; 908 struct ethtool_rx_ntuple cmd;
909 const struct ethtool_ops *ops = dev->ethtool_ops; 909 const struct ethtool_ops *ops = dev->ethtool_ops;
910 struct ethtool_rx_ntuple_flow_spec_container *fsc = NULL; 910 struct ethtool_rx_ntuple_flow_spec_container *fsc = NULL;
911 int ret; 911 int ret;
912 912
913 if (!ops->set_rx_ntuple) 913 if (!ops->set_rx_ntuple)
914 return -EOPNOTSUPP; 914 return -EOPNOTSUPP;
915 915
916 if (!(dev->features & NETIF_F_NTUPLE)) 916 if (!(dev->features & NETIF_F_NTUPLE))
917 return -EINVAL; 917 return -EINVAL;
918 918
919 if (copy_from_user(&cmd, useraddr, sizeof(cmd))) 919 if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
920 return -EFAULT; 920 return -EFAULT;
921 921
922 rx_ntuple_fix_masks(&cmd.fs); 922 rx_ntuple_fix_masks(&cmd.fs);
923 923
924 /* 924 /*
925 * Cache filter in dev struct for GET operation only if 925 * Cache filter in dev struct for GET operation only if
926 * the underlying driver doesn't have its own GET operation, and 926 * the underlying driver doesn't have its own GET operation, and
927 * only if the filter was added successfully. First make sure we 927 * only if the filter was added successfully. First make sure we
928 * can allocate the filter, then continue if successful. 928 * can allocate the filter, then continue if successful.
929 */ 929 */
930 if (!ops->get_rx_ntuple) { 930 if (!ops->get_rx_ntuple) {
931 fsc = kmalloc(sizeof(*fsc), GFP_ATOMIC); 931 fsc = kmalloc(sizeof(*fsc), GFP_ATOMIC);
932 if (!fsc) 932 if (!fsc)
933 return -ENOMEM; 933 return -ENOMEM;
934 } 934 }
935 935
936 ret = ops->set_rx_ntuple(dev, &cmd); 936 ret = ops->set_rx_ntuple(dev, &cmd);
937 if (ret) { 937 if (ret) {
938 kfree(fsc); 938 kfree(fsc);
939 return ret; 939 return ret;
940 } 940 }
941 941
942 if (!ops->get_rx_ntuple) 942 if (!ops->get_rx_ntuple)
943 __rx_ntuple_filter_add(&dev->ethtool_ntuple_list, &cmd.fs, fsc); 943 __rx_ntuple_filter_add(&dev->ethtool_ntuple_list, &cmd.fs, fsc);
944 944
945 return ret; 945 return ret;
946 } 946 }
947 947
948 static int ethtool_get_rx_ntuple(struct net_device *dev, void __user *useraddr) 948 static int ethtool_get_rx_ntuple(struct net_device *dev, void __user *useraddr)
949 { 949 {
950 struct ethtool_gstrings gstrings; 950 struct ethtool_gstrings gstrings;
951 const struct ethtool_ops *ops = dev->ethtool_ops; 951 const struct ethtool_ops *ops = dev->ethtool_ops;
952 struct ethtool_rx_ntuple_flow_spec_container *fsc; 952 struct ethtool_rx_ntuple_flow_spec_container *fsc;
953 u8 *data; 953 u8 *data;
954 char *p; 954 char *p;
955 int ret, i, num_strings = 0; 955 int ret, i, num_strings = 0;
956 956
957 if (!ops->get_sset_count) 957 if (!ops->get_sset_count)
958 return -EOPNOTSUPP; 958 return -EOPNOTSUPP;
959 959
960 if (copy_from_user(&gstrings, useraddr, sizeof(gstrings))) 960 if (copy_from_user(&gstrings, useraddr, sizeof(gstrings)))
961 return -EFAULT; 961 return -EFAULT;
962 962
963 ret = ops->get_sset_count(dev, gstrings.string_set); 963 ret = ops->get_sset_count(dev, gstrings.string_set);
964 if (ret < 0) 964 if (ret < 0)
965 return ret; 965 return ret;
966 966
967 gstrings.len = ret; 967 gstrings.len = ret;
968 968
969 data = kzalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER); 969 data = kzalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
970 if (!data) 970 if (!data)
971 return -ENOMEM; 971 return -ENOMEM;
972 972
973 if (ops->get_rx_ntuple) { 973 if (ops->get_rx_ntuple) {
974 /* driver-specific filter grab */ 974 /* driver-specific filter grab */
975 ret = ops->get_rx_ntuple(dev, gstrings.string_set, data); 975 ret = ops->get_rx_ntuple(dev, gstrings.string_set, data);
976 goto copy; 976 goto copy;
977 } 977 }
978 978
979 /* default ethtool filter grab */ 979 /* default ethtool filter grab */
980 i = 0; 980 i = 0;
981 p = (char *)data; 981 p = (char *)data;
982 list_for_each_entry(fsc, &dev->ethtool_ntuple_list.list, list) { 982 list_for_each_entry(fsc, &dev->ethtool_ntuple_list.list, list) {
983 sprintf(p, "Filter %d:\n", i); 983 sprintf(p, "Filter %d:\n", i);
984 p += ETH_GSTRING_LEN; 984 p += ETH_GSTRING_LEN;
985 num_strings++; 985 num_strings++;
986 986
987 switch (fsc->fs.flow_type) { 987 switch (fsc->fs.flow_type) {
988 case TCP_V4_FLOW: 988 case TCP_V4_FLOW:
989 sprintf(p, "\tFlow Type: TCP\n"); 989 sprintf(p, "\tFlow Type: TCP\n");
990 p += ETH_GSTRING_LEN; 990 p += ETH_GSTRING_LEN;
991 num_strings++; 991 num_strings++;
992 break; 992 break;
993 case UDP_V4_FLOW: 993 case UDP_V4_FLOW:
994 sprintf(p, "\tFlow Type: UDP\n"); 994 sprintf(p, "\tFlow Type: UDP\n");
995 p += ETH_GSTRING_LEN; 995 p += ETH_GSTRING_LEN;
996 num_strings++; 996 num_strings++;
997 break; 997 break;
998 case SCTP_V4_FLOW: 998 case SCTP_V4_FLOW:
999 sprintf(p, "\tFlow Type: SCTP\n"); 999 sprintf(p, "\tFlow Type: SCTP\n");
1000 p += ETH_GSTRING_LEN; 1000 p += ETH_GSTRING_LEN;
1001 num_strings++; 1001 num_strings++;
1002 break; 1002 break;
1003 case AH_ESP_V4_FLOW: 1003 case AH_ESP_V4_FLOW:
1004 sprintf(p, "\tFlow Type: AH ESP\n"); 1004 sprintf(p, "\tFlow Type: AH ESP\n");
1005 p += ETH_GSTRING_LEN; 1005 p += ETH_GSTRING_LEN;
1006 num_strings++; 1006 num_strings++;
1007 break; 1007 break;
1008 case ESP_V4_FLOW: 1008 case ESP_V4_FLOW:
1009 sprintf(p, "\tFlow Type: ESP\n"); 1009 sprintf(p, "\tFlow Type: ESP\n");
1010 p += ETH_GSTRING_LEN; 1010 p += ETH_GSTRING_LEN;
1011 num_strings++; 1011 num_strings++;
1012 break; 1012 break;
1013 case IP_USER_FLOW: 1013 case IP_USER_FLOW:
1014 sprintf(p, "\tFlow Type: Raw IP\n"); 1014 sprintf(p, "\tFlow Type: Raw IP\n");
1015 p += ETH_GSTRING_LEN; 1015 p += ETH_GSTRING_LEN;
1016 num_strings++; 1016 num_strings++;
1017 break; 1017 break;
1018 case IPV4_FLOW: 1018 case IPV4_FLOW:
1019 sprintf(p, "\tFlow Type: IPv4\n"); 1019 sprintf(p, "\tFlow Type: IPv4\n");
1020 p += ETH_GSTRING_LEN; 1020 p += ETH_GSTRING_LEN;
1021 num_strings++; 1021 num_strings++;
1022 break; 1022 break;
1023 default: 1023 default:
1024 sprintf(p, "\tFlow Type: Unknown\n"); 1024 sprintf(p, "\tFlow Type: Unknown\n");
1025 p += ETH_GSTRING_LEN; 1025 p += ETH_GSTRING_LEN;
1026 num_strings++; 1026 num_strings++;
1027 goto unknown_filter; 1027 goto unknown_filter;
1028 } 1028 }
1029 1029
1030 /* now the rest of the filters */ 1030 /* now the rest of the filters */
1031 switch (fsc->fs.flow_type) { 1031 switch (fsc->fs.flow_type) {
1032 case TCP_V4_FLOW: 1032 case TCP_V4_FLOW:
1033 case UDP_V4_FLOW: 1033 case UDP_V4_FLOW:
1034 case SCTP_V4_FLOW: 1034 case SCTP_V4_FLOW:
1035 sprintf(p, "\tSrc IP addr: 0x%x\n", 1035 sprintf(p, "\tSrc IP addr: 0x%x\n",
1036 fsc->fs.h_u.tcp_ip4_spec.ip4src); 1036 fsc->fs.h_u.tcp_ip4_spec.ip4src);
1037 p += ETH_GSTRING_LEN; 1037 p += ETH_GSTRING_LEN;
1038 num_strings++; 1038 num_strings++;
1039 sprintf(p, "\tSrc IP mask: 0x%x\n", 1039 sprintf(p, "\tSrc IP mask: 0x%x\n",
1040 fsc->fs.m_u.tcp_ip4_spec.ip4src); 1040 fsc->fs.m_u.tcp_ip4_spec.ip4src);
1041 p += ETH_GSTRING_LEN; 1041 p += ETH_GSTRING_LEN;
1042 num_strings++; 1042 num_strings++;
1043 sprintf(p, "\tDest IP addr: 0x%x\n", 1043 sprintf(p, "\tDest IP addr: 0x%x\n",
1044 fsc->fs.h_u.tcp_ip4_spec.ip4dst); 1044 fsc->fs.h_u.tcp_ip4_spec.ip4dst);
1045 p += ETH_GSTRING_LEN; 1045 p += ETH_GSTRING_LEN;
1046 num_strings++; 1046 num_strings++;
1047 sprintf(p, "\tDest IP mask: 0x%x\n", 1047 sprintf(p, "\tDest IP mask: 0x%x\n",
1048 fsc->fs.m_u.tcp_ip4_spec.ip4dst); 1048 fsc->fs.m_u.tcp_ip4_spec.ip4dst);
1049 p += ETH_GSTRING_LEN; 1049 p += ETH_GSTRING_LEN;
1050 num_strings++; 1050 num_strings++;
1051 sprintf(p, "\tSrc Port: %d, mask: 0x%x\n", 1051 sprintf(p, "\tSrc Port: %d, mask: 0x%x\n",
1052 fsc->fs.h_u.tcp_ip4_spec.psrc, 1052 fsc->fs.h_u.tcp_ip4_spec.psrc,
1053 fsc->fs.m_u.tcp_ip4_spec.psrc); 1053 fsc->fs.m_u.tcp_ip4_spec.psrc);
1054 p += ETH_GSTRING_LEN; 1054 p += ETH_GSTRING_LEN;
1055 num_strings++; 1055 num_strings++;
1056 sprintf(p, "\tDest Port: %d, mask: 0x%x\n", 1056 sprintf(p, "\tDest Port: %d, mask: 0x%x\n",
1057 fsc->fs.h_u.tcp_ip4_spec.pdst, 1057 fsc->fs.h_u.tcp_ip4_spec.pdst,
1058 fsc->fs.m_u.tcp_ip4_spec.pdst); 1058 fsc->fs.m_u.tcp_ip4_spec.pdst);
1059 p += ETH_GSTRING_LEN; 1059 p += ETH_GSTRING_LEN;
1060 num_strings++; 1060 num_strings++;
1061 sprintf(p, "\tTOS: %d, mask: 0x%x\n", 1061 sprintf(p, "\tTOS: %d, mask: 0x%x\n",
1062 fsc->fs.h_u.tcp_ip4_spec.tos, 1062 fsc->fs.h_u.tcp_ip4_spec.tos,
1063 fsc->fs.m_u.tcp_ip4_spec.tos); 1063 fsc->fs.m_u.tcp_ip4_spec.tos);
1064 p += ETH_GSTRING_LEN; 1064 p += ETH_GSTRING_LEN;
1065 num_strings++; 1065 num_strings++;
1066 break; 1066 break;
1067 case AH_ESP_V4_FLOW: 1067 case AH_ESP_V4_FLOW:
1068 case ESP_V4_FLOW: 1068 case ESP_V4_FLOW:
1069 sprintf(p, "\tSrc IP addr: 0x%x\n", 1069 sprintf(p, "\tSrc IP addr: 0x%x\n",
1070 fsc->fs.h_u.ah_ip4_spec.ip4src); 1070 fsc->fs.h_u.ah_ip4_spec.ip4src);
1071 p += ETH_GSTRING_LEN; 1071 p += ETH_GSTRING_LEN;
1072 num_strings++; 1072 num_strings++;
1073 sprintf(p, "\tSrc IP mask: 0x%x\n", 1073 sprintf(p, "\tSrc IP mask: 0x%x\n",
1074 fsc->fs.m_u.ah_ip4_spec.ip4src); 1074 fsc->fs.m_u.ah_ip4_spec.ip4src);
1075 p += ETH_GSTRING_LEN; 1075 p += ETH_GSTRING_LEN;
1076 num_strings++; 1076 num_strings++;
1077 sprintf(p, "\tDest IP addr: 0x%x\n", 1077 sprintf(p, "\tDest IP addr: 0x%x\n",
1078 fsc->fs.h_u.ah_ip4_spec.ip4dst); 1078 fsc->fs.h_u.ah_ip4_spec.ip4dst);
1079 p += ETH_GSTRING_LEN; 1079 p += ETH_GSTRING_LEN;
1080 num_strings++; 1080 num_strings++;
1081 sprintf(p, "\tDest IP mask: 0x%x\n", 1081 sprintf(p, "\tDest IP mask: 0x%x\n",
1082 fsc->fs.m_u.ah_ip4_spec.ip4dst); 1082 fsc->fs.m_u.ah_ip4_spec.ip4dst);
1083 p += ETH_GSTRING_LEN; 1083 p += ETH_GSTRING_LEN;
1084 num_strings++; 1084 num_strings++;
1085 sprintf(p, "\tSPI: %d, mask: 0x%x\n", 1085 sprintf(p, "\tSPI: %d, mask: 0x%x\n",
1086 fsc->fs.h_u.ah_ip4_spec.spi, 1086 fsc->fs.h_u.ah_ip4_spec.spi,
1087 fsc->fs.m_u.ah_ip4_spec.spi); 1087 fsc->fs.m_u.ah_ip4_spec.spi);
1088 p += ETH_GSTRING_LEN; 1088 p += ETH_GSTRING_LEN;
1089 num_strings++; 1089 num_strings++;
1090 sprintf(p, "\tTOS: %d, mask: 0x%x\n", 1090 sprintf(p, "\tTOS: %d, mask: 0x%x\n",
1091 fsc->fs.h_u.ah_ip4_spec.tos, 1091 fsc->fs.h_u.ah_ip4_spec.tos,
1092 fsc->fs.m_u.ah_ip4_spec.tos); 1092 fsc->fs.m_u.ah_ip4_spec.tos);
1093 p += ETH_GSTRING_LEN; 1093 p += ETH_GSTRING_LEN;
1094 num_strings++; 1094 num_strings++;
1095 break; 1095 break;
1096 case IP_USER_FLOW: 1096 case IP_USER_FLOW:
1097 sprintf(p, "\tSrc IP addr: 0x%x\n", 1097 sprintf(p, "\tSrc IP addr: 0x%x\n",
1098 fsc->fs.h_u.usr_ip4_spec.ip4src); 1098 fsc->fs.h_u.usr_ip4_spec.ip4src);
1099 p += ETH_GSTRING_LEN; 1099 p += ETH_GSTRING_LEN;
1100 num_strings++; 1100 num_strings++;
1101 sprintf(p, "\tSrc IP mask: 0x%x\n", 1101 sprintf(p, "\tSrc IP mask: 0x%x\n",
1102 fsc->fs.m_u.usr_ip4_spec.ip4src); 1102 fsc->fs.m_u.usr_ip4_spec.ip4src);
1103 p += ETH_GSTRING_LEN; 1103 p += ETH_GSTRING_LEN;
1104 num_strings++; 1104 num_strings++;
1105 sprintf(p, "\tDest IP addr: 0x%x\n", 1105 sprintf(p, "\tDest IP addr: 0x%x\n",
1106 fsc->fs.h_u.usr_ip4_spec.ip4dst); 1106 fsc->fs.h_u.usr_ip4_spec.ip4dst);
1107 p += ETH_GSTRING_LEN; 1107 p += ETH_GSTRING_LEN;
1108 num_strings++; 1108 num_strings++;
1109 sprintf(p, "\tDest IP mask: 0x%x\n", 1109 sprintf(p, "\tDest IP mask: 0x%x\n",
1110 fsc->fs.m_u.usr_ip4_spec.ip4dst); 1110 fsc->fs.m_u.usr_ip4_spec.ip4dst);
1111 p += ETH_GSTRING_LEN; 1111 p += ETH_GSTRING_LEN;
1112 num_strings++; 1112 num_strings++;
1113 break; 1113 break;
1114 case IPV4_FLOW: 1114 case IPV4_FLOW:
1115 sprintf(p, "\tSrc IP addr: 0x%x\n", 1115 sprintf(p, "\tSrc IP addr: 0x%x\n",
1116 fsc->fs.h_u.usr_ip4_spec.ip4src); 1116 fsc->fs.h_u.usr_ip4_spec.ip4src);
1117 p += ETH_GSTRING_LEN; 1117 p += ETH_GSTRING_LEN;
1118 num_strings++; 1118 num_strings++;
1119 sprintf(p, "\tSrc IP mask: 0x%x\n", 1119 sprintf(p, "\tSrc IP mask: 0x%x\n",
1120 fsc->fs.m_u.usr_ip4_spec.ip4src); 1120 fsc->fs.m_u.usr_ip4_spec.ip4src);
1121 p += ETH_GSTRING_LEN; 1121 p += ETH_GSTRING_LEN;
1122 num_strings++; 1122 num_strings++;
1123 sprintf(p, "\tDest IP addr: 0x%x\n", 1123 sprintf(p, "\tDest IP addr: 0x%x\n",
1124 fsc->fs.h_u.usr_ip4_spec.ip4dst); 1124 fsc->fs.h_u.usr_ip4_spec.ip4dst);
1125 p += ETH_GSTRING_LEN; 1125 p += ETH_GSTRING_LEN;
1126 num_strings++; 1126 num_strings++;
1127 sprintf(p, "\tDest IP mask: 0x%x\n", 1127 sprintf(p, "\tDest IP mask: 0x%x\n",
1128 fsc->fs.m_u.usr_ip4_spec.ip4dst); 1128 fsc->fs.m_u.usr_ip4_spec.ip4dst);
1129 p += ETH_GSTRING_LEN; 1129 p += ETH_GSTRING_LEN;
1130 num_strings++; 1130 num_strings++;
1131 sprintf(p, "\tL4 bytes: 0x%x, mask: 0x%x\n", 1131 sprintf(p, "\tL4 bytes: 0x%x, mask: 0x%x\n",
1132 fsc->fs.h_u.usr_ip4_spec.l4_4_bytes, 1132 fsc->fs.h_u.usr_ip4_spec.l4_4_bytes,
1133 fsc->fs.m_u.usr_ip4_spec.l4_4_bytes); 1133 fsc->fs.m_u.usr_ip4_spec.l4_4_bytes);
1134 p += ETH_GSTRING_LEN; 1134 p += ETH_GSTRING_LEN;
1135 num_strings++; 1135 num_strings++;
1136 sprintf(p, "\tTOS: %d, mask: 0x%x\n", 1136 sprintf(p, "\tTOS: %d, mask: 0x%x\n",
1137 fsc->fs.h_u.usr_ip4_spec.tos, 1137 fsc->fs.h_u.usr_ip4_spec.tos,
1138 fsc->fs.m_u.usr_ip4_spec.tos); 1138 fsc->fs.m_u.usr_ip4_spec.tos);
1139 p += ETH_GSTRING_LEN; 1139 p += ETH_GSTRING_LEN;
1140 num_strings++; 1140 num_strings++;
1141 sprintf(p, "\tIP Version: %d, mask: 0x%x\n", 1141 sprintf(p, "\tIP Version: %d, mask: 0x%x\n",
1142 fsc->fs.h_u.usr_ip4_spec.ip_ver, 1142 fsc->fs.h_u.usr_ip4_spec.ip_ver,
1143 fsc->fs.m_u.usr_ip4_spec.ip_ver); 1143 fsc->fs.m_u.usr_ip4_spec.ip_ver);
1144 p += ETH_GSTRING_LEN; 1144 p += ETH_GSTRING_LEN;
1145 num_strings++; 1145 num_strings++;
1146 sprintf(p, "\tProtocol: %d, mask: 0x%x\n", 1146 sprintf(p, "\tProtocol: %d, mask: 0x%x\n",
1147 fsc->fs.h_u.usr_ip4_spec.proto, 1147 fsc->fs.h_u.usr_ip4_spec.proto,
1148 fsc->fs.m_u.usr_ip4_spec.proto); 1148 fsc->fs.m_u.usr_ip4_spec.proto);
1149 p += ETH_GSTRING_LEN; 1149 p += ETH_GSTRING_LEN;
1150 num_strings++; 1150 num_strings++;
1151 break; 1151 break;
1152 } 1152 }
1153 sprintf(p, "\tVLAN: %d, mask: 0x%x\n", 1153 sprintf(p, "\tVLAN: %d, mask: 0x%x\n",
1154 fsc->fs.vlan_tag, fsc->fs.vlan_tag_mask); 1154 fsc->fs.vlan_tag, fsc->fs.vlan_tag_mask);
1155 p += ETH_GSTRING_LEN; 1155 p += ETH_GSTRING_LEN;
1156 num_strings++; 1156 num_strings++;
1157 sprintf(p, "\tUser-defined: 0x%Lx\n", fsc->fs.data); 1157 sprintf(p, "\tUser-defined: 0x%Lx\n", fsc->fs.data);
1158 p += ETH_GSTRING_LEN; 1158 p += ETH_GSTRING_LEN;
1159 num_strings++; 1159 num_strings++;
1160 sprintf(p, "\tUser-defined mask: 0x%Lx\n", fsc->fs.data_mask); 1160 sprintf(p, "\tUser-defined mask: 0x%Lx\n", fsc->fs.data_mask);
1161 p += ETH_GSTRING_LEN; 1161 p += ETH_GSTRING_LEN;
1162 num_strings++; 1162 num_strings++;
1163 if (fsc->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP) 1163 if (fsc->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP)
1164 sprintf(p, "\tAction: Drop\n"); 1164 sprintf(p, "\tAction: Drop\n");
1165 else 1165 else
1166 sprintf(p, "\tAction: Direct to queue %d\n", 1166 sprintf(p, "\tAction: Direct to queue %d\n",
1167 fsc->fs.action); 1167 fsc->fs.action);
1168 p += ETH_GSTRING_LEN; 1168 p += ETH_GSTRING_LEN;
1169 num_strings++; 1169 num_strings++;
1170 unknown_filter: 1170 unknown_filter:
1171 i++; 1171 i++;
1172 } 1172 }
1173 copy: 1173 copy:
1174 /* indicate to userspace how many strings we actually have */ 1174 /* indicate to userspace how many strings we actually have */
1175 gstrings.len = num_strings; 1175 gstrings.len = num_strings;
1176 ret = -EFAULT; 1176 ret = -EFAULT;
1177 if (copy_to_user(useraddr, &gstrings, sizeof(gstrings))) 1177 if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
1178 goto out; 1178 goto out;
1179 useraddr += sizeof(gstrings); 1179 useraddr += sizeof(gstrings);
1180 if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN)) 1180 if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN))
1181 goto out; 1181 goto out;
1182 ret = 0; 1182 ret = 0;
1183 1183
1184 out: 1184 out:
1185 kfree(data); 1185 kfree(data);
1186 return ret; 1186 return ret;
1187 } 1187 }
1188 1188
1189 static int ethtool_get_regs(struct net_device *dev, char __user *useraddr) 1189 static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
1190 { 1190 {
1191 struct ethtool_regs regs; 1191 struct ethtool_regs regs;
1192 const struct ethtool_ops *ops = dev->ethtool_ops; 1192 const struct ethtool_ops *ops = dev->ethtool_ops;
1193 void *regbuf; 1193 void *regbuf;
1194 int reglen, ret; 1194 int reglen, ret;
1195 1195
1196 if (!ops->get_regs || !ops->get_regs_len) 1196 if (!ops->get_regs || !ops->get_regs_len)
1197 return -EOPNOTSUPP; 1197 return -EOPNOTSUPP;
1198 1198
1199 if (copy_from_user(&regs, useraddr, sizeof(regs))) 1199 if (copy_from_user(&regs, useraddr, sizeof(regs)))
1200 return -EFAULT; 1200 return -EFAULT;
1201 1201
1202 reglen = ops->get_regs_len(dev); 1202 reglen = ops->get_regs_len(dev);
1203 if (regs.len > reglen) 1203 if (regs.len > reglen)
1204 regs.len = reglen; 1204 regs.len = reglen;
1205 1205
1206 regbuf = vzalloc(reglen); 1206 regbuf = vzalloc(reglen);
1207 if (!regbuf) 1207 if (!regbuf)
1208 return -ENOMEM; 1208 return -ENOMEM;
1209 1209
1210 ops->get_regs(dev, &regs, regbuf); 1210 ops->get_regs(dev, &regs, regbuf);
1211 1211
1212 ret = -EFAULT; 1212 ret = -EFAULT;
1213 if (copy_to_user(useraddr, &regs, sizeof(regs))) 1213 if (copy_to_user(useraddr, &regs, sizeof(regs)))
1214 goto out; 1214 goto out;
1215 useraddr += offsetof(struct ethtool_regs, data); 1215 useraddr += offsetof(struct ethtool_regs, data);
1216 if (copy_to_user(useraddr, regbuf, regs.len)) 1216 if (copy_to_user(useraddr, regbuf, regs.len))
1217 goto out; 1217 goto out;
1218 ret = 0; 1218 ret = 0;
1219 1219
1220 out: 1220 out:
1221 vfree(regbuf); 1221 vfree(regbuf);
1222 return ret; 1222 return ret;
1223 } 1223 }
1224 1224
1225 static int ethtool_reset(struct net_device *dev, char __user *useraddr) 1225 static int ethtool_reset(struct net_device *dev, char __user *useraddr)
1226 { 1226 {
1227 struct ethtool_value reset; 1227 struct ethtool_value reset;
1228 int ret; 1228 int ret;
1229 1229
1230 if (!dev->ethtool_ops->reset) 1230 if (!dev->ethtool_ops->reset)
1231 return -EOPNOTSUPP; 1231 return -EOPNOTSUPP;
1232 1232
1233 if (copy_from_user(&reset, useraddr, sizeof(reset))) 1233 if (copy_from_user(&reset, useraddr, sizeof(reset)))
1234 return -EFAULT; 1234 return -EFAULT;
1235 1235
1236 ret = dev->ethtool_ops->reset(dev, &reset.data); 1236 ret = dev->ethtool_ops->reset(dev, &reset.data);
1237 if (ret) 1237 if (ret)
1238 return ret; 1238 return ret;
1239 1239
1240 if (copy_to_user(useraddr, &reset, sizeof(reset))) 1240 if (copy_to_user(useraddr, &reset, sizeof(reset)))
1241 return -EFAULT; 1241 return -EFAULT;
1242 return 0; 1242 return 0;
1243 } 1243 }
1244 1244
1245 static int ethtool_get_wol(struct net_device *dev, char __user *useraddr) 1245 static int ethtool_get_wol(struct net_device *dev, char __user *useraddr)
1246 { 1246 {
1247 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; 1247 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1248 1248
1249 if (!dev->ethtool_ops->get_wol) 1249 if (!dev->ethtool_ops->get_wol)
1250 return -EOPNOTSUPP; 1250 return -EOPNOTSUPP;
1251 1251
1252 dev->ethtool_ops->get_wol(dev, &wol); 1252 dev->ethtool_ops->get_wol(dev, &wol);
1253 1253
1254 if (copy_to_user(useraddr, &wol, sizeof(wol))) 1254 if (copy_to_user(useraddr, &wol, sizeof(wol)))
1255 return -EFAULT; 1255 return -EFAULT;
1256 return 0; 1256 return 0;
1257 } 1257 }
1258 1258
1259 static int ethtool_set_wol(struct net_device *dev, char __user *useraddr) 1259 static int ethtool_set_wol(struct net_device *dev, char __user *useraddr)
1260 { 1260 {
1261 struct ethtool_wolinfo wol; 1261 struct ethtool_wolinfo wol;
1262 1262
1263 if (!dev->ethtool_ops->set_wol) 1263 if (!dev->ethtool_ops->set_wol)
1264 return -EOPNOTSUPP; 1264 return -EOPNOTSUPP;
1265 1265
1266 if (copy_from_user(&wol, useraddr, sizeof(wol))) 1266 if (copy_from_user(&wol, useraddr, sizeof(wol)))
1267 return -EFAULT; 1267 return -EFAULT;
1268 1268
1269 return dev->ethtool_ops->set_wol(dev, &wol); 1269 return dev->ethtool_ops->set_wol(dev, &wol);
1270 } 1270 }
1271 1271
1272 static int ethtool_nway_reset(struct net_device *dev) 1272 static int ethtool_nway_reset(struct net_device *dev)
1273 { 1273 {
1274 if (!dev->ethtool_ops->nway_reset) 1274 if (!dev->ethtool_ops->nway_reset)
1275 return -EOPNOTSUPP; 1275 return -EOPNOTSUPP;
1276 1276
1277 return dev->ethtool_ops->nway_reset(dev); 1277 return dev->ethtool_ops->nway_reset(dev);
1278 } 1278 }
1279 1279
1280 static int ethtool_get_link(struct net_device *dev, char __user *useraddr) 1280 static int ethtool_get_link(struct net_device *dev, char __user *useraddr)
1281 { 1281 {
1282 struct ethtool_value edata = { .cmd = ETHTOOL_GLINK }; 1282 struct ethtool_value edata = { .cmd = ETHTOOL_GLINK };
1283 1283
1284 if (!dev->ethtool_ops->get_link) 1284 if (!dev->ethtool_ops->get_link)
1285 return -EOPNOTSUPP; 1285 return -EOPNOTSUPP;
1286 1286
1287 edata.data = netif_running(dev) && dev->ethtool_ops->get_link(dev); 1287 edata.data = netif_running(dev) && dev->ethtool_ops->get_link(dev);
1288 1288
1289 if (copy_to_user(useraddr, &edata, sizeof(edata))) 1289 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1290 return -EFAULT; 1290 return -EFAULT;
1291 return 0; 1291 return 0;
1292 } 1292 }
1293 1293
1294 static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr) 1294 static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr)
1295 { 1295 {
1296 struct ethtool_eeprom eeprom; 1296 struct ethtool_eeprom eeprom;
1297 const struct ethtool_ops *ops = dev->ethtool_ops; 1297 const struct ethtool_ops *ops = dev->ethtool_ops;
1298 void __user *userbuf = useraddr + sizeof(eeprom); 1298 void __user *userbuf = useraddr + sizeof(eeprom);
1299 u32 bytes_remaining; 1299 u32 bytes_remaining;
1300 u8 *data; 1300 u8 *data;
1301 int ret = 0; 1301 int ret = 0;
1302 1302
1303 if (!ops->get_eeprom || !ops->get_eeprom_len) 1303 if (!ops->get_eeprom || !ops->get_eeprom_len)
1304 return -EOPNOTSUPP; 1304 return -EOPNOTSUPP;
1305 1305
1306 if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) 1306 if (copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
1307 return -EFAULT; 1307 return -EFAULT;
1308 1308
1309 /* Check for wrap and zero */ 1309 /* Check for wrap and zero */
1310 if (eeprom.offset + eeprom.len <= eeprom.offset) 1310 if (eeprom.offset + eeprom.len <= eeprom.offset)
1311 return -EINVAL; 1311 return -EINVAL;
1312 1312
1313 /* Check for exceeding total eeprom len */ 1313 /* Check for exceeding total eeprom len */
1314 if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev)) 1314 if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev))
1315 return -EINVAL; 1315 return -EINVAL;
1316 1316
1317 data = kmalloc(PAGE_SIZE, GFP_USER); 1317 data = kmalloc(PAGE_SIZE, GFP_USER);
1318 if (!data) 1318 if (!data)
1319 return -ENOMEM; 1319 return -ENOMEM;
1320 1320
1321 bytes_remaining = eeprom.len; 1321 bytes_remaining = eeprom.len;
1322 while (bytes_remaining > 0) { 1322 while (bytes_remaining > 0) {
1323 eeprom.len = min(bytes_remaining, (u32)PAGE_SIZE); 1323 eeprom.len = min(bytes_remaining, (u32)PAGE_SIZE);
1324 1324
1325 ret = ops->get_eeprom(dev, &eeprom, data); 1325 ret = ops->get_eeprom(dev, &eeprom, data);
1326 if (ret) 1326 if (ret)
1327 break; 1327 break;
1328 if (copy_to_user(userbuf, data, eeprom.len)) { 1328 if (copy_to_user(userbuf, data, eeprom.len)) {
1329 ret = -EFAULT; 1329 ret = -EFAULT;
1330 break; 1330 break;
1331 } 1331 }
1332 userbuf += eeprom.len; 1332 userbuf += eeprom.len;
1333 eeprom.offset += eeprom.len; 1333 eeprom.offset += eeprom.len;
1334 bytes_remaining -= eeprom.len; 1334 bytes_remaining -= eeprom.len;
1335 } 1335 }
1336 1336
1337 eeprom.len = userbuf - (useraddr + sizeof(eeprom)); 1337 eeprom.len = userbuf - (useraddr + sizeof(eeprom));
1338 eeprom.offset -= eeprom.len; 1338 eeprom.offset -= eeprom.len;
1339 if (copy_to_user(useraddr, &eeprom, sizeof(eeprom))) 1339 if (copy_to_user(useraddr, &eeprom, sizeof(eeprom)))
1340 ret = -EFAULT; 1340 ret = -EFAULT;
1341 1341
1342 kfree(data); 1342 kfree(data);
1343 return ret; 1343 return ret;
1344 } 1344 }
1345 1345
1346 static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr) 1346 static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr)
1347 { 1347 {
1348 struct ethtool_eeprom eeprom; 1348 struct ethtool_eeprom eeprom;
1349 const struct ethtool_ops *ops = dev->ethtool_ops; 1349 const struct ethtool_ops *ops = dev->ethtool_ops;
1350 void __user *userbuf = useraddr + sizeof(eeprom); 1350 void __user *userbuf = useraddr + sizeof(eeprom);
1351 u32 bytes_remaining; 1351 u32 bytes_remaining;
1352 u8 *data; 1352 u8 *data;
1353 int ret = 0; 1353 int ret = 0;
1354 1354
1355 if (!ops->set_eeprom || !ops->get_eeprom_len) 1355 if (!ops->set_eeprom || !ops->get_eeprom_len)
1356 return -EOPNOTSUPP; 1356 return -EOPNOTSUPP;
1357 1357
1358 if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) 1358 if (copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
1359 return -EFAULT; 1359 return -EFAULT;
1360 1360
1361 /* Check for wrap and zero */ 1361 /* Check for wrap and zero */
1362 if (eeprom.offset + eeprom.len <= eeprom.offset) 1362 if (eeprom.offset + eeprom.len <= eeprom.offset)
1363 return -EINVAL; 1363 return -EINVAL;
1364 1364
1365 /* Check for exceeding total eeprom len */ 1365 /* Check for exceeding total eeprom len */
1366 if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev)) 1366 if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev))
1367 return -EINVAL; 1367 return -EINVAL;
1368 1368
1369 data = kmalloc(PAGE_SIZE, GFP_USER); 1369 data = kmalloc(PAGE_SIZE, GFP_USER);
1370 if (!data) 1370 if (!data)
1371 return -ENOMEM; 1371 return -ENOMEM;
1372 1372
1373 bytes_remaining = eeprom.len; 1373 bytes_remaining = eeprom.len;
1374 while (bytes_remaining > 0) { 1374 while (bytes_remaining > 0) {
1375 eeprom.len = min(bytes_remaining, (u32)PAGE_SIZE); 1375 eeprom.len = min(bytes_remaining, (u32)PAGE_SIZE);
1376 1376
1377 if (copy_from_user(data, userbuf, eeprom.len)) { 1377 if (copy_from_user(data, userbuf, eeprom.len)) {
1378 ret = -EFAULT; 1378 ret = -EFAULT;
1379 break; 1379 break;
1380 } 1380 }
1381 ret = ops->set_eeprom(dev, &eeprom, data); 1381 ret = ops->set_eeprom(dev, &eeprom, data);
1382 if (ret) 1382 if (ret)
1383 break; 1383 break;
1384 userbuf += eeprom.len; 1384 userbuf += eeprom.len;
1385 eeprom.offset += eeprom.len; 1385 eeprom.offset += eeprom.len;
1386 bytes_remaining -= eeprom.len; 1386 bytes_remaining -= eeprom.len;
1387 } 1387 }
1388 1388
1389 kfree(data); 1389 kfree(data);
1390 return ret; 1390 return ret;
1391 } 1391 }
1392 1392
1393 static noinline_for_stack int ethtool_get_coalesce(struct net_device *dev, 1393 static noinline_for_stack int ethtool_get_coalesce(struct net_device *dev,
1394 void __user *useraddr) 1394 void __user *useraddr)
1395 { 1395 {
1396 struct ethtool_coalesce coalesce = { .cmd = ETHTOOL_GCOALESCE }; 1396 struct ethtool_coalesce coalesce = { .cmd = ETHTOOL_GCOALESCE };
1397 1397
1398 if (!dev->ethtool_ops->get_coalesce) 1398 if (!dev->ethtool_ops->get_coalesce)
1399 return -EOPNOTSUPP; 1399 return -EOPNOTSUPP;
1400 1400
1401 dev->ethtool_ops->get_coalesce(dev, &coalesce); 1401 dev->ethtool_ops->get_coalesce(dev, &coalesce);
1402 1402
1403 if (copy_to_user(useraddr, &coalesce, sizeof(coalesce))) 1403 if (copy_to_user(useraddr, &coalesce, sizeof(coalesce)))
1404 return -EFAULT; 1404 return -EFAULT;
1405 return 0; 1405 return 0;
1406 } 1406 }
1407 1407
1408 static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev, 1408 static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev,
1409 void __user *useraddr) 1409 void __user *useraddr)
1410 { 1410 {
1411 struct ethtool_coalesce coalesce; 1411 struct ethtool_coalesce coalesce;
1412 1412
1413 if (!dev->ethtool_ops->set_coalesce) 1413 if (!dev->ethtool_ops->set_coalesce)
1414 return -EOPNOTSUPP; 1414 return -EOPNOTSUPP;
1415 1415
1416 if (copy_from_user(&coalesce, useraddr, sizeof(coalesce))) 1416 if (copy_from_user(&coalesce, useraddr, sizeof(coalesce)))
1417 return -EFAULT; 1417 return -EFAULT;
1418 1418
1419 return dev->ethtool_ops->set_coalesce(dev, &coalesce); 1419 return dev->ethtool_ops->set_coalesce(dev, &coalesce);
1420 } 1420 }
1421 1421
1422 static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr) 1422 static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr)
1423 { 1423 {
1424 struct ethtool_ringparam ringparam = { .cmd = ETHTOOL_GRINGPARAM }; 1424 struct ethtool_ringparam ringparam = { .cmd = ETHTOOL_GRINGPARAM };
1425 1425
1426 if (!dev->ethtool_ops->get_ringparam) 1426 if (!dev->ethtool_ops->get_ringparam)
1427 return -EOPNOTSUPP; 1427 return -EOPNOTSUPP;
1428 1428
1429 dev->ethtool_ops->get_ringparam(dev, &ringparam); 1429 dev->ethtool_ops->get_ringparam(dev, &ringparam);
1430 1430
1431 if (copy_to_user(useraddr, &ringparam, sizeof(ringparam))) 1431 if (copy_to_user(useraddr, &ringparam, sizeof(ringparam)))
1432 return -EFAULT; 1432 return -EFAULT;
1433 return 0; 1433 return 0;
1434 } 1434 }
1435 1435
1436 static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr) 1436 static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr)
1437 { 1437 {
1438 struct ethtool_ringparam ringparam; 1438 struct ethtool_ringparam ringparam;
1439 1439
1440 if (!dev->ethtool_ops->set_ringparam) 1440 if (!dev->ethtool_ops->set_ringparam)
1441 return -EOPNOTSUPP; 1441 return -EOPNOTSUPP;
1442 1442
1443 if (copy_from_user(&ringparam, useraddr, sizeof(ringparam))) 1443 if (copy_from_user(&ringparam, useraddr, sizeof(ringparam)))
1444 return -EFAULT; 1444 return -EFAULT;
1445 1445
1446 return dev->ethtool_ops->set_ringparam(dev, &ringparam); 1446 return dev->ethtool_ops->set_ringparam(dev, &ringparam);
1447 } 1447 }
1448 1448
1449 static noinline_for_stack int ethtool_get_channels(struct net_device *dev, 1449 static noinline_for_stack int ethtool_get_channels(struct net_device *dev,
1450 void __user *useraddr) 1450 void __user *useraddr)
1451 { 1451 {
1452 struct ethtool_channels channels = { .cmd = ETHTOOL_GCHANNELS }; 1452 struct ethtool_channels channels = { .cmd = ETHTOOL_GCHANNELS };
1453 1453
1454 if (!dev->ethtool_ops->get_channels) 1454 if (!dev->ethtool_ops->get_channels)
1455 return -EOPNOTSUPP; 1455 return -EOPNOTSUPP;
1456 1456
1457 dev->ethtool_ops->get_channels(dev, &channels); 1457 dev->ethtool_ops->get_channels(dev, &channels);
1458 1458
1459 if (copy_to_user(useraddr, &channels, sizeof(channels))) 1459 if (copy_to_user(useraddr, &channels, sizeof(channels)))
1460 return -EFAULT; 1460 return -EFAULT;
1461 return 0; 1461 return 0;
1462 } 1462 }
1463 1463
1464 static noinline_for_stack int ethtool_set_channels(struct net_device *dev, 1464 static noinline_for_stack int ethtool_set_channels(struct net_device *dev,
1465 void __user *useraddr) 1465 void __user *useraddr)
1466 { 1466 {
1467 struct ethtool_channels channels; 1467 struct ethtool_channels channels;
1468 1468
1469 if (!dev->ethtool_ops->set_channels) 1469 if (!dev->ethtool_ops->set_channels)
1470 return -EOPNOTSUPP; 1470 return -EOPNOTSUPP;
1471 1471
1472 if (copy_from_user(&channels, useraddr, sizeof(channels))) 1472 if (copy_from_user(&channels, useraddr, sizeof(channels)))
1473 return -EFAULT; 1473 return -EFAULT;
1474 1474
1475 return dev->ethtool_ops->set_channels(dev, &channels); 1475 return dev->ethtool_ops->set_channels(dev, &channels);
1476 } 1476 }
1477 1477
1478 static int ethtool_get_pauseparam(struct net_device *dev, void __user *useraddr) 1478 static int ethtool_get_pauseparam(struct net_device *dev, void __user *useraddr)
1479 { 1479 {
1480 struct ethtool_pauseparam pauseparam = { ETHTOOL_GPAUSEPARAM }; 1480 struct ethtool_pauseparam pauseparam = { ETHTOOL_GPAUSEPARAM };
1481 1481
1482 if (!dev->ethtool_ops->get_pauseparam) 1482 if (!dev->ethtool_ops->get_pauseparam)
1483 return -EOPNOTSUPP; 1483 return -EOPNOTSUPP;
1484 1484
1485 dev->ethtool_ops->get_pauseparam(dev, &pauseparam); 1485 dev->ethtool_ops->get_pauseparam(dev, &pauseparam);
1486 1486
1487 if (copy_to_user(useraddr, &pauseparam, sizeof(pauseparam))) 1487 if (copy_to_user(useraddr, &pauseparam, sizeof(pauseparam)))
1488 return -EFAULT; 1488 return -EFAULT;
1489 return 0; 1489 return 0;
1490 } 1490 }
1491 1491
1492 static int ethtool_set_pauseparam(struct net_device *dev, void __user *useraddr) 1492 static int ethtool_set_pauseparam(struct net_device *dev, void __user *useraddr)
1493 { 1493 {
1494 struct ethtool_pauseparam pauseparam; 1494 struct ethtool_pauseparam pauseparam;
1495 1495
1496 if (!dev->ethtool_ops->set_pauseparam) 1496 if (!dev->ethtool_ops->set_pauseparam)
1497 return -EOPNOTSUPP; 1497 return -EOPNOTSUPP;
1498 1498
1499 if (copy_from_user(&pauseparam, useraddr, sizeof(pauseparam))) 1499 if (copy_from_user(&pauseparam, useraddr, sizeof(pauseparam)))
1500 return -EFAULT; 1500 return -EFAULT;
1501 1501
1502 return dev->ethtool_ops->set_pauseparam(dev, &pauseparam); 1502 return dev->ethtool_ops->set_pauseparam(dev, &pauseparam);
1503 } 1503 }
1504 1504
1505 static int __ethtool_set_sg(struct net_device *dev, u32 data) 1505 static int __ethtool_set_sg(struct net_device *dev, u32 data)
1506 { 1506 {
1507 int err; 1507 int err;
1508 1508
1509 if (!dev->ethtool_ops->set_sg) 1509 if (!dev->ethtool_ops->set_sg)
1510 return -EOPNOTSUPP; 1510 return -EOPNOTSUPP;
1511 1511
1512 if (data && !(dev->features & NETIF_F_ALL_CSUM)) 1512 if (data && !(dev->features & NETIF_F_ALL_CSUM))
1513 return -EINVAL; 1513 return -EINVAL;
1514 1514
1515 if (!data && dev->ethtool_ops->set_tso) { 1515 if (!data && dev->ethtool_ops->set_tso) {
1516 err = dev->ethtool_ops->set_tso(dev, 0); 1516 err = dev->ethtool_ops->set_tso(dev, 0);
1517 if (err) 1517 if (err)
1518 return err; 1518 return err;
1519 } 1519 }
1520 1520
1521 if (!data && dev->ethtool_ops->set_ufo) { 1521 if (!data && dev->ethtool_ops->set_ufo) {
1522 err = dev->ethtool_ops->set_ufo(dev, 0); 1522 err = dev->ethtool_ops->set_ufo(dev, 0);
1523 if (err) 1523 if (err)
1524 return err; 1524 return err;
1525 } 1525 }
1526 return dev->ethtool_ops->set_sg(dev, data); 1526 return dev->ethtool_ops->set_sg(dev, data);
1527 } 1527 }
1528 1528
1529 static int __ethtool_set_tx_csum(struct net_device *dev, u32 data) 1529 static int __ethtool_set_tx_csum(struct net_device *dev, u32 data)
1530 { 1530 {
1531 int err; 1531 int err;
1532 1532
1533 if (!dev->ethtool_ops->set_tx_csum) 1533 if (!dev->ethtool_ops->set_tx_csum)
1534 return -EOPNOTSUPP; 1534 return -EOPNOTSUPP;
1535 1535
1536 if (!data && dev->ethtool_ops->set_sg) { 1536 if (!data && dev->ethtool_ops->set_sg) {
1537 err = __ethtool_set_sg(dev, 0); 1537 err = __ethtool_set_sg(dev, 0);
1538 if (err) 1538 if (err)
1539 return err; 1539 return err;
1540 } 1540 }
1541 1541
1542 return dev->ethtool_ops->set_tx_csum(dev, data); 1542 return dev->ethtool_ops->set_tx_csum(dev, data);
1543 } 1543 }
1544 1544
1545 static int __ethtool_set_rx_csum(struct net_device *dev, u32 data) 1545 static int __ethtool_set_rx_csum(struct net_device *dev, u32 data)
1546 { 1546 {
1547 if (!dev->ethtool_ops->set_rx_csum) 1547 if (!dev->ethtool_ops->set_rx_csum)
1548 return -EOPNOTSUPP; 1548 return -EOPNOTSUPP;
1549 1549
1550 if (!data) 1550 if (!data)
1551 dev->features &= ~NETIF_F_GRO; 1551 dev->features &= ~NETIF_F_GRO;
1552 1552
1553 return dev->ethtool_ops->set_rx_csum(dev, data); 1553 return dev->ethtool_ops->set_rx_csum(dev, data);
1554 } 1554 }
1555 1555
1556 static int __ethtool_set_tso(struct net_device *dev, u32 data) 1556 static int __ethtool_set_tso(struct net_device *dev, u32 data)
1557 { 1557 {
1558 if (!dev->ethtool_ops->set_tso) 1558 if (!dev->ethtool_ops->set_tso)
1559 return -EOPNOTSUPP; 1559 return -EOPNOTSUPP;
1560 1560
1561 if (data && !(dev->features & NETIF_F_SG)) 1561 if (data && !(dev->features & NETIF_F_SG))
1562 return -EINVAL; 1562 return -EINVAL;
1563 1563
1564 return dev->ethtool_ops->set_tso(dev, data); 1564 return dev->ethtool_ops->set_tso(dev, data);
1565 } 1565 }
1566 1566
1567 static int __ethtool_set_ufo(struct net_device *dev, u32 data) 1567 static int __ethtool_set_ufo(struct net_device *dev, u32 data)
1568 { 1568 {
1569 if (!dev->ethtool_ops->set_ufo) 1569 if (!dev->ethtool_ops->set_ufo)
1570 return -EOPNOTSUPP; 1570 return -EOPNOTSUPP;
1571 if (data && !(dev->features & NETIF_F_SG)) 1571 if (data && !(dev->features & NETIF_F_SG))
1572 return -EINVAL; 1572 return -EINVAL;
1573 if (data && !((dev->features & NETIF_F_GEN_CSUM) || 1573 if (data && !((dev->features & NETIF_F_GEN_CSUM) ||
1574 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM)) 1574 (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
1575 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) 1575 == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM)))
1576 return -EINVAL; 1576 return -EINVAL;
1577 return dev->ethtool_ops->set_ufo(dev, data); 1577 return dev->ethtool_ops->set_ufo(dev, data);
1578 } 1578 }
1579 1579
1580 static int ethtool_self_test(struct net_device *dev, char __user *useraddr) 1580 static int ethtool_self_test(struct net_device *dev, char __user *useraddr)
1581 { 1581 {
1582 struct ethtool_test test; 1582 struct ethtool_test test;
1583 const struct ethtool_ops *ops = dev->ethtool_ops; 1583 const struct ethtool_ops *ops = dev->ethtool_ops;
1584 u64 *data; 1584 u64 *data;
1585 int ret, test_len; 1585 int ret, test_len;
1586 1586
1587 if (!ops->self_test || !ops->get_sset_count) 1587 if (!ops->self_test || !ops->get_sset_count)
1588 return -EOPNOTSUPP; 1588 return -EOPNOTSUPP;
1589 1589
1590 test_len = ops->get_sset_count(dev, ETH_SS_TEST); 1590 test_len = ops->get_sset_count(dev, ETH_SS_TEST);
1591 if (test_len < 0) 1591 if (test_len < 0)
1592 return test_len; 1592 return test_len;
1593 WARN_ON(test_len == 0); 1593 WARN_ON(test_len == 0);
1594 1594
1595 if (copy_from_user(&test, useraddr, sizeof(test))) 1595 if (copy_from_user(&test, useraddr, sizeof(test)))
1596 return -EFAULT; 1596 return -EFAULT;
1597 1597
1598 test.len = test_len; 1598 test.len = test_len;
1599 data = kmalloc(test_len * sizeof(u64), GFP_USER); 1599 data = kmalloc(test_len * sizeof(u64), GFP_USER);
1600 if (!data) 1600 if (!data)
1601 return -ENOMEM; 1601 return -ENOMEM;
1602 1602
1603 ops->self_test(dev, &test, data); 1603 ops->self_test(dev, &test, data);
1604 1604
1605 ret = -EFAULT; 1605 ret = -EFAULT;
1606 if (copy_to_user(useraddr, &test, sizeof(test))) 1606 if (copy_to_user(useraddr, &test, sizeof(test)))
1607 goto out; 1607 goto out;
1608 useraddr += sizeof(test); 1608 useraddr += sizeof(test);
1609 if (copy_to_user(useraddr, data, test.len * sizeof(u64))) 1609 if (copy_to_user(useraddr, data, test.len * sizeof(u64)))
1610 goto out; 1610 goto out;
1611 ret = 0; 1611 ret = 0;
1612 1612
1613 out: 1613 out:
1614 kfree(data); 1614 kfree(data);
1615 return ret; 1615 return ret;
1616 } 1616 }
1617 1617
1618 static int ethtool_get_strings(struct net_device *dev, void __user *useraddr) 1618 static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
1619 { 1619 {
1620 struct ethtool_gstrings gstrings; 1620 struct ethtool_gstrings gstrings;
1621 u8 *data; 1621 u8 *data;
1622 int ret; 1622 int ret;
1623 1623
1624 if (copy_from_user(&gstrings, useraddr, sizeof(gstrings))) 1624 if (copy_from_user(&gstrings, useraddr, sizeof(gstrings)))
1625 return -EFAULT; 1625 return -EFAULT;
1626 1626
1627 ret = __ethtool_get_sset_count(dev, gstrings.string_set); 1627 ret = __ethtool_get_sset_count(dev, gstrings.string_set);
1628 if (ret < 0) 1628 if (ret < 0)
1629 return ret; 1629 return ret;
1630 1630
1631 gstrings.len = ret; 1631 gstrings.len = ret;
1632 1632
1633 data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER); 1633 data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
1634 if (!data) 1634 if (!data)
1635 return -ENOMEM; 1635 return -ENOMEM;
1636 1636
1637 __ethtool_get_strings(dev, gstrings.string_set, data); 1637 __ethtool_get_strings(dev, gstrings.string_set, data);
1638 1638
1639 ret = -EFAULT; 1639 ret = -EFAULT;
1640 if (copy_to_user(useraddr, &gstrings, sizeof(gstrings))) 1640 if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
1641 goto out; 1641 goto out;
1642 useraddr += sizeof(gstrings); 1642 useraddr += sizeof(gstrings);
1643 if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN)) 1643 if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN))
1644 goto out; 1644 goto out;
1645 ret = 0; 1645 ret = 0;
1646 1646
1647 out: 1647 out:
1648 kfree(data); 1648 kfree(data);
1649 return ret; 1649 return ret;
1650 } 1650 }
1651 1651
1652 static int ethtool_phys_id(struct net_device *dev, void __user *useraddr) 1652 static int ethtool_phys_id(struct net_device *dev, void __user *useraddr)
1653 { 1653 {
1654 struct ethtool_value id; 1654 struct ethtool_value id;
1655 static bool busy; 1655 static bool busy;
1656 int rc; 1656 int rc;
1657 1657
1658 if (!dev->ethtool_ops->set_phys_id) 1658 if (!dev->ethtool_ops->set_phys_id)
1659 return -EOPNOTSUPP; 1659 return -EOPNOTSUPP;
1660 1660
1661 if (busy) 1661 if (busy)
1662 return -EBUSY; 1662 return -EBUSY;
1663 1663
1664 if (copy_from_user(&id, useraddr, sizeof(id))) 1664 if (copy_from_user(&id, useraddr, sizeof(id)))
1665 return -EFAULT; 1665 return -EFAULT;
1666 1666
1667 rc = dev->ethtool_ops->set_phys_id(dev, ETHTOOL_ID_ACTIVE); 1667 rc = dev->ethtool_ops->set_phys_id(dev, ETHTOOL_ID_ACTIVE);
1668 if (rc < 0) 1668 if (rc < 0)
1669 return rc; 1669 return rc;
1670 1670
1671 /* Drop the RTNL lock while waiting, but prevent reentry or 1671 /* Drop the RTNL lock while waiting, but prevent reentry or
1672 * removal of the device. 1672 * removal of the device.
1673 */ 1673 */
1674 busy = true; 1674 busy = true;
1675 dev_hold(dev); 1675 dev_hold(dev);
1676 rtnl_unlock(); 1676 rtnl_unlock();
1677 1677
1678 if (rc == 0) { 1678 if (rc == 0) {
1679 /* Driver will handle this itself */ 1679 /* Driver will handle this itself */
1680 schedule_timeout_interruptible( 1680 schedule_timeout_interruptible(
1681 id.data ? (id.data * HZ) : MAX_SCHEDULE_TIMEOUT); 1681 id.data ? (id.data * HZ) : MAX_SCHEDULE_TIMEOUT);
1682 } else { 1682 } else {
1683 /* Driver expects to be called at twice the frequency in rc */ 1683 /* Driver expects to be called at twice the frequency in rc */
1684 int n = rc * 2, i, interval = HZ / n; 1684 int n = rc * 2, i, interval = HZ / n;
1685 1685
1686 /* Count down seconds */ 1686 /* Count down seconds */
1687 do { 1687 do {
1688 /* Count down iterations per second */ 1688 /* Count down iterations per second */
1689 i = n; 1689 i = n;
1690 do { 1690 do {
1691 rtnl_lock(); 1691 rtnl_lock();
1692 rc = dev->ethtool_ops->set_phys_id(dev, 1692 rc = dev->ethtool_ops->set_phys_id(dev,
1693 (i & 1) ? ETHTOOL_ID_OFF : ETHTOOL_ID_ON); 1693 (i & 1) ? ETHTOOL_ID_OFF : ETHTOOL_ID_ON);
1694 rtnl_unlock(); 1694 rtnl_unlock();
1695 if (rc) 1695 if (rc)
1696 break; 1696 break;
1697 schedule_timeout_interruptible(interval); 1697 schedule_timeout_interruptible(interval);
1698 } while (!signal_pending(current) && --i != 0); 1698 } while (!signal_pending(current) && --i != 0);
1699 } while (!signal_pending(current) && 1699 } while (!signal_pending(current) &&
1700 (id.data == 0 || --id.data != 0)); 1700 (id.data == 0 || --id.data != 0));
1701 } 1701 }
1702 1702
1703 rtnl_lock(); 1703 rtnl_lock();
1704 dev_put(dev); 1704 dev_put(dev);
1705 busy = false; 1705 busy = false;
1706 1706
1707 (void)dev->ethtool_ops->set_phys_id(dev, ETHTOOL_ID_INACTIVE); 1707 (void)dev->ethtool_ops->set_phys_id(dev, ETHTOOL_ID_INACTIVE);
1708 return rc; 1708 return rc;
1709 } 1709 }
1710 1710
1711 static int ethtool_get_stats(struct net_device *dev, void __user *useraddr) 1711 static int ethtool_get_stats(struct net_device *dev, void __user *useraddr)
1712 { 1712 {
1713 struct ethtool_stats stats; 1713 struct ethtool_stats stats;
1714 const struct ethtool_ops *ops = dev->ethtool_ops; 1714 const struct ethtool_ops *ops = dev->ethtool_ops;
1715 u64 *data; 1715 u64 *data;
1716 int ret, n_stats; 1716 int ret, n_stats;
1717 1717
1718 if (!ops->get_ethtool_stats || !ops->get_sset_count) 1718 if (!ops->get_ethtool_stats || !ops->get_sset_count)
1719 return -EOPNOTSUPP; 1719 return -EOPNOTSUPP;
1720 1720
1721 n_stats = ops->get_sset_count(dev, ETH_SS_STATS); 1721 n_stats = ops->get_sset_count(dev, ETH_SS_STATS);
1722 if (n_stats < 0) 1722 if (n_stats < 0)
1723 return n_stats; 1723 return n_stats;
1724 WARN_ON(n_stats == 0); 1724 WARN_ON(n_stats == 0);
1725 1725
1726 if (copy_from_user(&stats, useraddr, sizeof(stats))) 1726 if (copy_from_user(&stats, useraddr, sizeof(stats)))
1727 return -EFAULT; 1727 return -EFAULT;
1728 1728
1729 stats.n_stats = n_stats; 1729 stats.n_stats = n_stats;
1730 data = kmalloc(n_stats * sizeof(u64), GFP_USER); 1730 data = kmalloc(n_stats * sizeof(u64), GFP_USER);
1731 if (!data) 1731 if (!data)
1732 return -ENOMEM; 1732 return -ENOMEM;
1733 1733
1734 ops->get_ethtool_stats(dev, &stats, data); 1734 ops->get_ethtool_stats(dev, &stats, data);
1735 1735
1736 ret = -EFAULT; 1736 ret = -EFAULT;
1737 if (copy_to_user(useraddr, &stats, sizeof(stats))) 1737 if (copy_to_user(useraddr, &stats, sizeof(stats)))
1738 goto out; 1738 goto out;
1739 useraddr += sizeof(stats); 1739 useraddr += sizeof(stats);
1740 if (copy_to_user(useraddr, data, stats.n_stats * sizeof(u64))) 1740 if (copy_to_user(useraddr, data, stats.n_stats * sizeof(u64)))
1741 goto out; 1741 goto out;
1742 ret = 0; 1742 ret = 0;
1743 1743
1744 out: 1744 out:
1745 kfree(data); 1745 kfree(data);
1746 return ret; 1746 return ret;
1747 } 1747 }
1748 1748
1749 static int ethtool_get_perm_addr(struct net_device *dev, void __user *useraddr) 1749 static int ethtool_get_perm_addr(struct net_device *dev, void __user *useraddr)
1750 { 1750 {
1751 struct ethtool_perm_addr epaddr; 1751 struct ethtool_perm_addr epaddr;
1752 1752
1753 if (copy_from_user(&epaddr, useraddr, sizeof(epaddr))) 1753 if (copy_from_user(&epaddr, useraddr, sizeof(epaddr)))
1754 return -EFAULT; 1754 return -EFAULT;
1755 1755
1756 if (epaddr.size < dev->addr_len) 1756 if (epaddr.size < dev->addr_len)
1757 return -ETOOSMALL; 1757 return -ETOOSMALL;
1758 epaddr.size = dev->addr_len; 1758 epaddr.size = dev->addr_len;
1759 1759
1760 if (copy_to_user(useraddr, &epaddr, sizeof(epaddr))) 1760 if (copy_to_user(useraddr, &epaddr, sizeof(epaddr)))
1761 return -EFAULT; 1761 return -EFAULT;
1762 useraddr += sizeof(epaddr); 1762 useraddr += sizeof(epaddr);
1763 if (copy_to_user(useraddr, dev->perm_addr, epaddr.size)) 1763 if (copy_to_user(useraddr, dev->perm_addr, epaddr.size))
1764 return -EFAULT; 1764 return -EFAULT;
1765 return 0; 1765 return 0;
1766 } 1766 }
1767 1767
1768 static int ethtool_get_value(struct net_device *dev, char __user *useraddr, 1768 static int ethtool_get_value(struct net_device *dev, char __user *useraddr,
1769 u32 cmd, u32 (*actor)(struct net_device *)) 1769 u32 cmd, u32 (*actor)(struct net_device *))
1770 { 1770 {
1771 struct ethtool_value edata = { .cmd = cmd }; 1771 struct ethtool_value edata = { .cmd = cmd };
1772 1772
1773 if (!actor) 1773 if (!actor)
1774 return -EOPNOTSUPP; 1774 return -EOPNOTSUPP;
1775 1775
1776 edata.data = actor(dev); 1776 edata.data = actor(dev);
1777 1777
1778 if (copy_to_user(useraddr, &edata, sizeof(edata))) 1778 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1779 return -EFAULT; 1779 return -EFAULT;
1780 return 0; 1780 return 0;
1781 } 1781 }
1782 1782
1783 static int ethtool_set_value_void(struct net_device *dev, char __user *useraddr, 1783 static int ethtool_set_value_void(struct net_device *dev, char __user *useraddr,
1784 void (*actor)(struct net_device *, u32)) 1784 void (*actor)(struct net_device *, u32))
1785 { 1785 {
1786 struct ethtool_value edata; 1786 struct ethtool_value edata;
1787 1787
1788 if (!actor) 1788 if (!actor)
1789 return -EOPNOTSUPP; 1789 return -EOPNOTSUPP;
1790 1790
1791 if (copy_from_user(&edata, useraddr, sizeof(edata))) 1791 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1792 return -EFAULT; 1792 return -EFAULT;
1793 1793
1794 actor(dev, edata.data); 1794 actor(dev, edata.data);
1795 return 0; 1795 return 0;
1796 } 1796 }
1797 1797
1798 static int ethtool_set_value(struct net_device *dev, char __user *useraddr, 1798 static int ethtool_set_value(struct net_device *dev, char __user *useraddr,
1799 int (*actor)(struct net_device *, u32)) 1799 int (*actor)(struct net_device *, u32))
1800 { 1800 {
1801 struct ethtool_value edata; 1801 struct ethtool_value edata;
1802 1802
1803 if (!actor) 1803 if (!actor)
1804 return -EOPNOTSUPP; 1804 return -EOPNOTSUPP;
1805 1805
1806 if (copy_from_user(&edata, useraddr, sizeof(edata))) 1806 if (copy_from_user(&edata, useraddr, sizeof(edata)))
1807 return -EFAULT; 1807 return -EFAULT;
1808 1808
1809 return actor(dev, edata.data); 1809 return actor(dev, edata.data);
1810 } 1810 }
1811 1811
1812 static noinline_for_stack int ethtool_flash_device(struct net_device *dev, 1812 static noinline_for_stack int ethtool_flash_device(struct net_device *dev,
1813 char __user *useraddr) 1813 char __user *useraddr)
1814 { 1814 {
1815 struct ethtool_flash efl; 1815 struct ethtool_flash efl;
1816 1816
1817 if (copy_from_user(&efl, useraddr, sizeof(efl))) 1817 if (copy_from_user(&efl, useraddr, sizeof(efl)))
1818 return -EFAULT; 1818 return -EFAULT;
1819 1819
1820 if (!dev->ethtool_ops->flash_device) 1820 if (!dev->ethtool_ops->flash_device)
1821 return -EOPNOTSUPP; 1821 return -EOPNOTSUPP;
1822 1822
1823 return dev->ethtool_ops->flash_device(dev, &efl); 1823 return dev->ethtool_ops->flash_device(dev, &efl);
1824 } 1824 }
1825 1825
1826 /* The main entry point in this file. Called from net/core/dev.c */ 1826 /* The main entry point in this file. Called from net/core/dev.c */
1827 1827
1828 int dev_ethtool(struct net *net, struct ifreq *ifr) 1828 int dev_ethtool(struct net *net, struct ifreq *ifr)
1829 { 1829 {
1830 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name); 1830 struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
1831 void __user *useraddr = ifr->ifr_data; 1831 void __user *useraddr = ifr->ifr_data;
1832 u32 ethcmd; 1832 u32 ethcmd;
1833 int rc; 1833 int rc;
1834 u32 old_features; 1834 u32 old_features;
1835 1835
1836 if (!dev || !netif_device_present(dev)) 1836 if (!dev || !netif_device_present(dev))
1837 return -ENODEV; 1837 return -ENODEV;
1838 1838
1839 if (copy_from_user(&ethcmd, useraddr, sizeof(ethcmd))) 1839 if (copy_from_user(&ethcmd, useraddr, sizeof(ethcmd)))
1840 return -EFAULT; 1840 return -EFAULT;
1841 1841
1842 if (!dev->ethtool_ops) { 1842 if (!dev->ethtool_ops) {
1843 /* ETHTOOL_GDRVINFO does not require any driver support. 1843 /* ETHTOOL_GDRVINFO does not require any driver support.
1844 * It is also unprivileged and does not change anything, 1844 * It is also unprivileged and does not change anything,
1845 * so we can take a shortcut to it. */ 1845 * so we can take a shortcut to it. */
1846 if (ethcmd == ETHTOOL_GDRVINFO) 1846 if (ethcmd == ETHTOOL_GDRVINFO)
1847 return ethtool_get_drvinfo(dev, useraddr); 1847 return ethtool_get_drvinfo(dev, useraddr);
1848 else 1848 else
1849 return -EOPNOTSUPP; 1849 return -EOPNOTSUPP;
1850 } 1850 }
1851 1851
1852 /* Allow some commands to be done by anyone */ 1852 /* Allow some commands to be done by anyone */
1853 switch (ethcmd) { 1853 switch (ethcmd) {
1854 case ETHTOOL_GSET: 1854 case ETHTOOL_GSET:
1855 case ETHTOOL_GDRVINFO: 1855 case ETHTOOL_GDRVINFO:
1856 case ETHTOOL_GMSGLVL: 1856 case ETHTOOL_GMSGLVL:
1857 case ETHTOOL_GCOALESCE: 1857 case ETHTOOL_GCOALESCE:
1858 case ETHTOOL_GRINGPARAM: 1858 case ETHTOOL_GRINGPARAM:
1859 case ETHTOOL_GPAUSEPARAM: 1859 case ETHTOOL_GPAUSEPARAM:
1860 case ETHTOOL_GRXCSUM: 1860 case ETHTOOL_GRXCSUM:
1861 case ETHTOOL_GTXCSUM: 1861 case ETHTOOL_GTXCSUM:
1862 case ETHTOOL_GSG: 1862 case ETHTOOL_GSG:
1863 case ETHTOOL_GSTRINGS: 1863 case ETHTOOL_GSTRINGS:
1864 case ETHTOOL_GTSO: 1864 case ETHTOOL_GTSO:
1865 case ETHTOOL_GPERMADDR: 1865 case ETHTOOL_GPERMADDR:
1866 case ETHTOOL_GUFO: 1866 case ETHTOOL_GUFO:
1867 case ETHTOOL_GGSO: 1867 case ETHTOOL_GGSO:
1868 case ETHTOOL_GGRO: 1868 case ETHTOOL_GGRO:
1869 case ETHTOOL_GFLAGS: 1869 case ETHTOOL_GFLAGS:
1870 case ETHTOOL_GPFLAGS: 1870 case ETHTOOL_GPFLAGS:
1871 case ETHTOOL_GRXFH: 1871 case ETHTOOL_GRXFH:
1872 case ETHTOOL_GRXRINGS: 1872 case ETHTOOL_GRXRINGS:
1873 case ETHTOOL_GRXCLSRLCNT: 1873 case ETHTOOL_GRXCLSRLCNT:
1874 case ETHTOOL_GRXCLSRULE: 1874 case ETHTOOL_GRXCLSRULE:
1875 case ETHTOOL_GRXCLSRLALL: 1875 case ETHTOOL_GRXCLSRLALL:
1876 case ETHTOOL_GFEATURES: 1876 case ETHTOOL_GFEATURES:
1877 break; 1877 break;
1878 default: 1878 default:
1879 if (!capable(CAP_NET_ADMIN)) 1879 if (!capable(CAP_NET_ADMIN))
1880 return -EPERM; 1880 return -EPERM;
1881 } 1881 }
1882 1882
1883 if (dev->ethtool_ops->begin) { 1883 if (dev->ethtool_ops->begin) {
1884 rc = dev->ethtool_ops->begin(dev); 1884 rc = dev->ethtool_ops->begin(dev);
1885 if (rc < 0) 1885 if (rc < 0)
1886 return rc; 1886 return rc;
1887 } 1887 }
1888 old_features = dev->features; 1888 old_features = dev->features;
1889 1889
1890 switch (ethcmd) { 1890 switch (ethcmd) {
1891 case ETHTOOL_GSET: 1891 case ETHTOOL_GSET:
1892 rc = ethtool_get_settings(dev, useraddr); 1892 rc = ethtool_get_settings(dev, useraddr);
1893 break; 1893 break;
1894 case ETHTOOL_SSET: 1894 case ETHTOOL_SSET:
1895 rc = ethtool_set_settings(dev, useraddr); 1895 rc = ethtool_set_settings(dev, useraddr);
1896 break; 1896 break;
1897 case ETHTOOL_GDRVINFO: 1897 case ETHTOOL_GDRVINFO:
1898 rc = ethtool_get_drvinfo(dev, useraddr); 1898 rc = ethtool_get_drvinfo(dev, useraddr);
1899 break; 1899 break;
1900 case ETHTOOL_GREGS: 1900 case ETHTOOL_GREGS:
1901 rc = ethtool_get_regs(dev, useraddr); 1901 rc = ethtool_get_regs(dev, useraddr);
1902 break; 1902 break;
1903 case ETHTOOL_GWOL: 1903 case ETHTOOL_GWOL:
1904 rc = ethtool_get_wol(dev, useraddr); 1904 rc = ethtool_get_wol(dev, useraddr);
1905 break; 1905 break;
1906 case ETHTOOL_SWOL: 1906 case ETHTOOL_SWOL:
1907 rc = ethtool_set_wol(dev, useraddr); 1907 rc = ethtool_set_wol(dev, useraddr);
1908 break; 1908 break;
1909 case ETHTOOL_GMSGLVL: 1909 case ETHTOOL_GMSGLVL:
1910 rc = ethtool_get_value(dev, useraddr, ethcmd, 1910 rc = ethtool_get_value(dev, useraddr, ethcmd,
1911 dev->ethtool_ops->get_msglevel); 1911 dev->ethtool_ops->get_msglevel);
1912 break; 1912 break;
1913 case ETHTOOL_SMSGLVL: 1913 case ETHTOOL_SMSGLVL:
1914 rc = ethtool_set_value_void(dev, useraddr, 1914 rc = ethtool_set_value_void(dev, useraddr,
1915 dev->ethtool_ops->set_msglevel); 1915 dev->ethtool_ops->set_msglevel);
1916 break; 1916 break;
1917 case ETHTOOL_NWAY_RST: 1917 case ETHTOOL_NWAY_RST:
1918 rc = ethtool_nway_reset(dev); 1918 rc = ethtool_nway_reset(dev);
1919 break; 1919 break;
1920 case ETHTOOL_GLINK: 1920 case ETHTOOL_GLINK:
1921 rc = ethtool_get_link(dev, useraddr); 1921 rc = ethtool_get_link(dev, useraddr);
1922 break; 1922 break;
1923 case ETHTOOL_GEEPROM: 1923 case ETHTOOL_GEEPROM:
1924 rc = ethtool_get_eeprom(dev, useraddr); 1924 rc = ethtool_get_eeprom(dev, useraddr);
1925 break; 1925 break;
1926 case ETHTOOL_SEEPROM: 1926 case ETHTOOL_SEEPROM:
1927 rc = ethtool_set_eeprom(dev, useraddr); 1927 rc = ethtool_set_eeprom(dev, useraddr);
1928 break; 1928 break;
1929 case ETHTOOL_GCOALESCE: 1929 case ETHTOOL_GCOALESCE:
1930 rc = ethtool_get_coalesce(dev, useraddr); 1930 rc = ethtool_get_coalesce(dev, useraddr);
1931 break; 1931 break;
1932 case ETHTOOL_SCOALESCE: 1932 case ETHTOOL_SCOALESCE:
1933 rc = ethtool_set_coalesce(dev, useraddr); 1933 rc = ethtool_set_coalesce(dev, useraddr);
1934 break; 1934 break;
1935 case ETHTOOL_GRINGPARAM: 1935 case ETHTOOL_GRINGPARAM:
1936 rc = ethtool_get_ringparam(dev, useraddr); 1936 rc = ethtool_get_ringparam(dev, useraddr);
1937 break; 1937 break;
1938 case ETHTOOL_SRINGPARAM: 1938 case ETHTOOL_SRINGPARAM:
1939 rc = ethtool_set_ringparam(dev, useraddr); 1939 rc = ethtool_set_ringparam(dev, useraddr);
1940 break; 1940 break;
1941 case ETHTOOL_GPAUSEPARAM: 1941 case ETHTOOL_GPAUSEPARAM:
1942 rc = ethtool_get_pauseparam(dev, useraddr); 1942 rc = ethtool_get_pauseparam(dev, useraddr);
1943 break; 1943 break;
1944 case ETHTOOL_SPAUSEPARAM: 1944 case ETHTOOL_SPAUSEPARAM:
1945 rc = ethtool_set_pauseparam(dev, useraddr); 1945 rc = ethtool_set_pauseparam(dev, useraddr);
1946 break; 1946 break;
1947 case ETHTOOL_TEST: 1947 case ETHTOOL_TEST:
1948 rc = ethtool_self_test(dev, useraddr); 1948 rc = ethtool_self_test(dev, useraddr);
1949 break; 1949 break;
1950 case ETHTOOL_GSTRINGS: 1950 case ETHTOOL_GSTRINGS:
1951 rc = ethtool_get_strings(dev, useraddr); 1951 rc = ethtool_get_strings(dev, useraddr);
1952 break; 1952 break;
1953 case ETHTOOL_PHYS_ID: 1953 case ETHTOOL_PHYS_ID:
1954 rc = ethtool_phys_id(dev, useraddr); 1954 rc = ethtool_phys_id(dev, useraddr);
1955 break; 1955 break;
1956 case ETHTOOL_GSTATS: 1956 case ETHTOOL_GSTATS:
1957 rc = ethtool_get_stats(dev, useraddr); 1957 rc = ethtool_get_stats(dev, useraddr);
1958 break; 1958 break;
1959 case ETHTOOL_GPERMADDR: 1959 case ETHTOOL_GPERMADDR:
1960 rc = ethtool_get_perm_addr(dev, useraddr); 1960 rc = ethtool_get_perm_addr(dev, useraddr);
1961 break; 1961 break;
1962 case ETHTOOL_GFLAGS: 1962 case ETHTOOL_GFLAGS:
1963 rc = ethtool_get_value(dev, useraddr, ethcmd, 1963 rc = ethtool_get_value(dev, useraddr, ethcmd,
1964 (dev->ethtool_ops->get_flags ? 1964 (dev->ethtool_ops->get_flags ?
1965 dev->ethtool_ops->get_flags : 1965 dev->ethtool_ops->get_flags :
1966 ethtool_op_get_flags)); 1966 ethtool_op_get_flags));
1967 break; 1967 break;
1968 case ETHTOOL_SFLAGS: 1968 case ETHTOOL_SFLAGS:
1969 rc = ethtool_set_value(dev, useraddr, __ethtool_set_flags); 1969 rc = ethtool_set_value(dev, useraddr, __ethtool_set_flags);
1970 break; 1970 break;
1971 case ETHTOOL_GPFLAGS: 1971 case ETHTOOL_GPFLAGS:
1972 rc = ethtool_get_value(dev, useraddr, ethcmd, 1972 rc = ethtool_get_value(dev, useraddr, ethcmd,
1973 dev->ethtool_ops->get_priv_flags); 1973 dev->ethtool_ops->get_priv_flags);
1974 break; 1974 break;
1975 case ETHTOOL_SPFLAGS: 1975 case ETHTOOL_SPFLAGS:
1976 rc = ethtool_set_value(dev, useraddr, 1976 rc = ethtool_set_value(dev, useraddr,
1977 dev->ethtool_ops->set_priv_flags); 1977 dev->ethtool_ops->set_priv_flags);
1978 break; 1978 break;
1979 case ETHTOOL_GRXFH: 1979 case ETHTOOL_GRXFH:
1980 case ETHTOOL_GRXRINGS: 1980 case ETHTOOL_GRXRINGS:
1981 case ETHTOOL_GRXCLSRLCNT: 1981 case ETHTOOL_GRXCLSRLCNT:
1982 case ETHTOOL_GRXCLSRULE: 1982 case ETHTOOL_GRXCLSRULE:
1983 case ETHTOOL_GRXCLSRLALL: 1983 case ETHTOOL_GRXCLSRLALL:
1984 rc = ethtool_get_rxnfc(dev, ethcmd, useraddr); 1984 rc = ethtool_get_rxnfc(dev, ethcmd, useraddr);
1985 break; 1985 break;
1986 case ETHTOOL_SRXFH: 1986 case ETHTOOL_SRXFH:
1987 case ETHTOOL_SRXCLSRLDEL: 1987 case ETHTOOL_SRXCLSRLDEL:
1988 case ETHTOOL_SRXCLSRLINS: 1988 case ETHTOOL_SRXCLSRLINS:
1989 rc = ethtool_set_rxnfc(dev, ethcmd, useraddr); 1989 rc = ethtool_set_rxnfc(dev, ethcmd, useraddr);
1990 break; 1990 break;
1991 case ETHTOOL_FLASHDEV: 1991 case ETHTOOL_FLASHDEV:
1992 rc = ethtool_flash_device(dev, useraddr); 1992 rc = ethtool_flash_device(dev, useraddr);
1993 break; 1993 break;
1994 case ETHTOOL_RESET: 1994 case ETHTOOL_RESET:
1995 rc = ethtool_reset(dev, useraddr); 1995 rc = ethtool_reset(dev, useraddr);
1996 break; 1996 break;
1997 case ETHTOOL_SRXNTUPLE: 1997 case ETHTOOL_SRXNTUPLE:
1998 rc = ethtool_set_rx_ntuple(dev, useraddr); 1998 rc = ethtool_set_rx_ntuple(dev, useraddr);
1999 break; 1999 break;
2000 case ETHTOOL_GRXNTUPLE: 2000 case ETHTOOL_GRXNTUPLE:
2001 rc = ethtool_get_rx_ntuple(dev, useraddr); 2001 rc = ethtool_get_rx_ntuple(dev, useraddr);
2002 break; 2002 break;
2003 case ETHTOOL_GSSET_INFO: 2003 case ETHTOOL_GSSET_INFO:
2004 rc = ethtool_get_sset_info(dev, useraddr); 2004 rc = ethtool_get_sset_info(dev, useraddr);
2005 break; 2005 break;
2006 case ETHTOOL_GRXFHINDIR: 2006 case ETHTOOL_GRXFHINDIR:
2007 rc = ethtool_get_rxfh_indir(dev, useraddr); 2007 rc = ethtool_get_rxfh_indir(dev, useraddr);
2008 break; 2008 break;
2009 case ETHTOOL_SRXFHINDIR: 2009 case ETHTOOL_SRXFHINDIR:
2010 rc = ethtool_set_rxfh_indir(dev, useraddr); 2010 rc = ethtool_set_rxfh_indir(dev, useraddr);
2011 break; 2011 break;
2012 case ETHTOOL_GFEATURES: 2012 case ETHTOOL_GFEATURES:
2013 rc = ethtool_get_features(dev, useraddr); 2013 rc = ethtool_get_features(dev, useraddr);
2014 break; 2014 break;
2015 case ETHTOOL_SFEATURES: 2015 case ETHTOOL_SFEATURES:
2016 rc = ethtool_set_features(dev, useraddr); 2016 rc = ethtool_set_features(dev, useraddr);
2017 break; 2017 break;
2018 case ETHTOOL_GTXCSUM: 2018 case ETHTOOL_GTXCSUM:
2019 case ETHTOOL_GRXCSUM: 2019 case ETHTOOL_GRXCSUM:
2020 case ETHTOOL_GSG: 2020 case ETHTOOL_GSG:
2021 case ETHTOOL_GTSO: 2021 case ETHTOOL_GTSO:
2022 case ETHTOOL_GUFO: 2022 case ETHTOOL_GUFO:
2023 case ETHTOOL_GGSO: 2023 case ETHTOOL_GGSO:
2024 case ETHTOOL_GGRO: 2024 case ETHTOOL_GGRO:
2025 rc = ethtool_get_one_feature(dev, useraddr, ethcmd); 2025 rc = ethtool_get_one_feature(dev, useraddr, ethcmd);
2026 break; 2026 break;
2027 case ETHTOOL_STXCSUM: 2027 case ETHTOOL_STXCSUM:
2028 case ETHTOOL_SRXCSUM: 2028 case ETHTOOL_SRXCSUM:
2029 case ETHTOOL_SSG: 2029 case ETHTOOL_SSG:
2030 case ETHTOOL_STSO: 2030 case ETHTOOL_STSO:
2031 case ETHTOOL_SUFO: 2031 case ETHTOOL_SUFO:
2032 case ETHTOOL_SGSO: 2032 case ETHTOOL_SGSO:
2033 case ETHTOOL_SGRO: 2033 case ETHTOOL_SGRO:
2034 rc = ethtool_set_one_feature(dev, useraddr, ethcmd); 2034 rc = ethtool_set_one_feature(dev, useraddr, ethcmd);
2035 break; 2035 break;
2036 case ETHTOOL_GCHANNELS: 2036 case ETHTOOL_GCHANNELS:
2037 rc = ethtool_get_channels(dev, useraddr); 2037 rc = ethtool_get_channels(dev, useraddr);
2038 break; 2038 break;
2039 case ETHTOOL_SCHANNELS: 2039 case ETHTOOL_SCHANNELS:
2040 rc = ethtool_set_channels(dev, useraddr); 2040 rc = ethtool_set_channels(dev, useraddr);
2041 break; 2041 break;
2042 default: 2042 default:
2043 rc = -EOPNOTSUPP; 2043 rc = -EOPNOTSUPP;
2044 } 2044 }
2045 2045
2046 if (dev->ethtool_ops->complete) 2046 if (dev->ethtool_ops->complete)
2047 dev->ethtool_ops->complete(dev); 2047 dev->ethtool_ops->complete(dev);
2048 2048
2049 if (old_features != dev->features) 2049 if (old_features != dev->features)
2050 netdev_features_change(dev); 2050 netdev_features_change(dev);
2051 2051
2052 return rc; 2052 return rc;
2053 } 2053 }
2054 2054