Commit 0a242efc4fb859b2da506cdf8f3366231602e4ff
Committed by
David S. Miller
1 parent
5917ed961d
Exists in
master
and in
7 other branches
[NET]: Deinline netif_carrier_{on,off}().
# grep -r 'netif_carrier_o[nf]' linux-2.6.12 | wc -l 246 # size vmlinux.org vmlinux.carrier text data bss dec hex filename 4339634 1054414 259296 5653344 564360 vmlinux.org 4337710 1054414 259296 5651420 563bdc vmlinux.carrier And this ain't an allyesconfig kernel! Signed-off-by: David S. Miller <davem@davemloft.net>
Showing 2 changed files with 18 additions and 12 deletions Inline Diff
include/linux/netdevice.h
1 | /* | 1 | /* |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | 2 | * INET An implementation of the TCP/IP protocol suite for the LINUX |
3 | * operating system. INET is implemented using the BSD Socket | 3 | * operating system. INET is implemented using the BSD Socket |
4 | * interface as the means of communication with the user level. | 4 | * interface as the means of communication with the user level. |
5 | * | 5 | * |
6 | * Definitions for the Interfaces handler. | 6 | * Definitions for the Interfaces handler. |
7 | * | 7 | * |
8 | * Version: @(#)dev.h 1.0.10 08/12/93 | 8 | * Version: @(#)dev.h 1.0.10 08/12/93 |
9 | * | 9 | * |
10 | * Authors: Ross Biro | 10 | * Authors: Ross Biro |
11 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> | 11 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
12 | * Corey Minyard <wf-rch!minyard@relay.EU.net> | 12 | * Corey Minyard <wf-rch!minyard@relay.EU.net> |
13 | * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov> | 13 | * Donald J. Becker, <becker@cesdis.gsfc.nasa.gov> |
14 | * Alan Cox, <Alan.Cox@linux.org> | 14 | * Alan Cox, <Alan.Cox@linux.org> |
15 | * Bjorn Ekwall. <bj0rn@blox.se> | 15 | * Bjorn Ekwall. <bj0rn@blox.se> |
16 | * Pekka Riikonen <priikone@poseidon.pspt.fi> | 16 | * Pekka Riikonen <priikone@poseidon.pspt.fi> |
17 | * | 17 | * |
18 | * This program is free software; you can redistribute it and/or | 18 | * This program is free software; you can redistribute it and/or |
19 | * modify it under the terms of the GNU General Public License | 19 | * modify it under the terms of the GNU General Public License |
20 | * as published by the Free Software Foundation; either version | 20 | * as published by the Free Software Foundation; either version |
21 | * 2 of the License, or (at your option) any later version. | 21 | * 2 of the License, or (at your option) any later version. |
22 | * | 22 | * |
23 | * Moved to /usr/include/linux for NET3 | 23 | * Moved to /usr/include/linux for NET3 |
24 | */ | 24 | */ |
25 | #ifndef _LINUX_NETDEVICE_H | 25 | #ifndef _LINUX_NETDEVICE_H |
26 | #define _LINUX_NETDEVICE_H | 26 | #define _LINUX_NETDEVICE_H |
27 | 27 | ||
28 | #include <linux/if.h> | 28 | #include <linux/if.h> |
29 | #include <linux/if_ether.h> | 29 | #include <linux/if_ether.h> |
30 | #include <linux/if_packet.h> | 30 | #include <linux/if_packet.h> |
31 | 31 | ||
32 | #ifdef __KERNEL__ | 32 | #ifdef __KERNEL__ |
33 | #include <asm/atomic.h> | 33 | #include <asm/atomic.h> |
34 | #include <asm/cache.h> | 34 | #include <asm/cache.h> |
35 | #include <asm/byteorder.h> | 35 | #include <asm/byteorder.h> |
36 | 36 | ||
37 | #include <linux/config.h> | 37 | #include <linux/config.h> |
38 | #include <linux/device.h> | 38 | #include <linux/device.h> |
39 | #include <linux/percpu.h> | 39 | #include <linux/percpu.h> |
40 | 40 | ||
41 | struct divert_blk; | 41 | struct divert_blk; |
42 | struct vlan_group; | 42 | struct vlan_group; |
43 | struct ethtool_ops; | 43 | struct ethtool_ops; |
44 | struct netpoll_info; | 44 | struct netpoll_info; |
45 | /* source back-compat hooks */ | 45 | /* source back-compat hooks */ |
46 | #define SET_ETHTOOL_OPS(netdev,ops) \ | 46 | #define SET_ETHTOOL_OPS(netdev,ops) \ |
47 | ( (netdev)->ethtool_ops = (ops) ) | 47 | ( (netdev)->ethtool_ops = (ops) ) |
48 | 48 | ||
49 | #define HAVE_ALLOC_NETDEV /* feature macro: alloc_xxxdev | 49 | #define HAVE_ALLOC_NETDEV /* feature macro: alloc_xxxdev |
50 | functions are available. */ | 50 | functions are available. */ |
51 | #define HAVE_FREE_NETDEV /* free_netdev() */ | 51 | #define HAVE_FREE_NETDEV /* free_netdev() */ |
52 | #define HAVE_NETDEV_PRIV /* netdev_priv() */ | 52 | #define HAVE_NETDEV_PRIV /* netdev_priv() */ |
53 | 53 | ||
54 | #define NET_XMIT_SUCCESS 0 | 54 | #define NET_XMIT_SUCCESS 0 |
55 | #define NET_XMIT_DROP 1 /* skb dropped */ | 55 | #define NET_XMIT_DROP 1 /* skb dropped */ |
56 | #define NET_XMIT_CN 2 /* congestion notification */ | 56 | #define NET_XMIT_CN 2 /* congestion notification */ |
57 | #define NET_XMIT_POLICED 3 /* skb is shot by police */ | 57 | #define NET_XMIT_POLICED 3 /* skb is shot by police */ |
58 | #define NET_XMIT_BYPASS 4 /* packet does not leave via dequeue; | 58 | #define NET_XMIT_BYPASS 4 /* packet does not leave via dequeue; |
59 | (TC use only - dev_queue_xmit | 59 | (TC use only - dev_queue_xmit |
60 | returns this as NET_XMIT_SUCCESS) */ | 60 | returns this as NET_XMIT_SUCCESS) */ |
61 | 61 | ||
62 | /* Backlog congestion levels */ | 62 | /* Backlog congestion levels */ |
63 | #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ | 63 | #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */ |
64 | #define NET_RX_DROP 1 /* packet dropped */ | 64 | #define NET_RX_DROP 1 /* packet dropped */ |
65 | #define NET_RX_CN_LOW 2 /* storm alert, just in case */ | 65 | #define NET_RX_CN_LOW 2 /* storm alert, just in case */ |
66 | #define NET_RX_CN_MOD 3 /* Storm on its way! */ | 66 | #define NET_RX_CN_MOD 3 /* Storm on its way! */ |
67 | #define NET_RX_CN_HIGH 4 /* The storm is here */ | 67 | #define NET_RX_CN_HIGH 4 /* The storm is here */ |
68 | #define NET_RX_BAD 5 /* packet dropped due to kernel error */ | 68 | #define NET_RX_BAD 5 /* packet dropped due to kernel error */ |
69 | 69 | ||
70 | #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0) | 70 | #define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0) |
71 | 71 | ||
72 | #endif | 72 | #endif |
73 | 73 | ||
74 | #define MAX_ADDR_LEN 32 /* Largest hardware address length */ | 74 | #define MAX_ADDR_LEN 32 /* Largest hardware address length */ |
75 | 75 | ||
76 | /* Driver transmit return codes */ | 76 | /* Driver transmit return codes */ |
77 | #define NETDEV_TX_OK 0 /* driver took care of packet */ | 77 | #define NETDEV_TX_OK 0 /* driver took care of packet */ |
78 | #define NETDEV_TX_BUSY 1 /* driver tx path was busy*/ | 78 | #define NETDEV_TX_BUSY 1 /* driver tx path was busy*/ |
79 | #define NETDEV_TX_LOCKED -1 /* driver tx lock was already taken */ | 79 | #define NETDEV_TX_LOCKED -1 /* driver tx lock was already taken */ |
80 | 80 | ||
81 | /* | 81 | /* |
82 | * Compute the worst case header length according to the protocols | 82 | * Compute the worst case header length according to the protocols |
83 | * used. | 83 | * used. |
84 | */ | 84 | */ |
85 | 85 | ||
86 | #if !defined(CONFIG_AX25) && !defined(CONFIG_AX25_MODULE) && !defined(CONFIG_TR) | 86 | #if !defined(CONFIG_AX25) && !defined(CONFIG_AX25_MODULE) && !defined(CONFIG_TR) |
87 | #define LL_MAX_HEADER 32 | 87 | #define LL_MAX_HEADER 32 |
88 | #else | 88 | #else |
89 | #if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) | 89 | #if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) |
90 | #define LL_MAX_HEADER 96 | 90 | #define LL_MAX_HEADER 96 |
91 | #else | 91 | #else |
92 | #define LL_MAX_HEADER 48 | 92 | #define LL_MAX_HEADER 48 |
93 | #endif | 93 | #endif |
94 | #endif | 94 | #endif |
95 | 95 | ||
96 | #if !defined(CONFIG_NET_IPIP) && \ | 96 | #if !defined(CONFIG_NET_IPIP) && \ |
97 | !defined(CONFIG_IPV6) && !defined(CONFIG_IPV6_MODULE) | 97 | !defined(CONFIG_IPV6) && !defined(CONFIG_IPV6_MODULE) |
98 | #define MAX_HEADER LL_MAX_HEADER | 98 | #define MAX_HEADER LL_MAX_HEADER |
99 | #else | 99 | #else |
100 | #define MAX_HEADER (LL_MAX_HEADER + 48) | 100 | #define MAX_HEADER (LL_MAX_HEADER + 48) |
101 | #endif | 101 | #endif |
102 | 102 | ||
103 | /* | 103 | /* |
104 | * Network device statistics. Akin to the 2.0 ether stats but | 104 | * Network device statistics. Akin to the 2.0 ether stats but |
105 | * with byte counters. | 105 | * with byte counters. |
106 | */ | 106 | */ |
107 | 107 | ||
108 | struct net_device_stats | 108 | struct net_device_stats |
109 | { | 109 | { |
110 | unsigned long rx_packets; /* total packets received */ | 110 | unsigned long rx_packets; /* total packets received */ |
111 | unsigned long tx_packets; /* total packets transmitted */ | 111 | unsigned long tx_packets; /* total packets transmitted */ |
112 | unsigned long rx_bytes; /* total bytes received */ | 112 | unsigned long rx_bytes; /* total bytes received */ |
113 | unsigned long tx_bytes; /* total bytes transmitted */ | 113 | unsigned long tx_bytes; /* total bytes transmitted */ |
114 | unsigned long rx_errors; /* bad packets received */ | 114 | unsigned long rx_errors; /* bad packets received */ |
115 | unsigned long tx_errors; /* packet transmit problems */ | 115 | unsigned long tx_errors; /* packet transmit problems */ |
116 | unsigned long rx_dropped; /* no space in linux buffers */ | 116 | unsigned long rx_dropped; /* no space in linux buffers */ |
117 | unsigned long tx_dropped; /* no space available in linux */ | 117 | unsigned long tx_dropped; /* no space available in linux */ |
118 | unsigned long multicast; /* multicast packets received */ | 118 | unsigned long multicast; /* multicast packets received */ |
119 | unsigned long collisions; | 119 | unsigned long collisions; |
120 | 120 | ||
121 | /* detailed rx_errors: */ | 121 | /* detailed rx_errors: */ |
122 | unsigned long rx_length_errors; | 122 | unsigned long rx_length_errors; |
123 | unsigned long rx_over_errors; /* receiver ring buff overflow */ | 123 | unsigned long rx_over_errors; /* receiver ring buff overflow */ |
124 | unsigned long rx_crc_errors; /* recved pkt with crc error */ | 124 | unsigned long rx_crc_errors; /* recved pkt with crc error */ |
125 | unsigned long rx_frame_errors; /* recv'd frame alignment error */ | 125 | unsigned long rx_frame_errors; /* recv'd frame alignment error */ |
126 | unsigned long rx_fifo_errors; /* recv'r fifo overrun */ | 126 | unsigned long rx_fifo_errors; /* recv'r fifo overrun */ |
127 | unsigned long rx_missed_errors; /* receiver missed packet */ | 127 | unsigned long rx_missed_errors; /* receiver missed packet */ |
128 | 128 | ||
129 | /* detailed tx_errors */ | 129 | /* detailed tx_errors */ |
130 | unsigned long tx_aborted_errors; | 130 | unsigned long tx_aborted_errors; |
131 | unsigned long tx_carrier_errors; | 131 | unsigned long tx_carrier_errors; |
132 | unsigned long tx_fifo_errors; | 132 | unsigned long tx_fifo_errors; |
133 | unsigned long tx_heartbeat_errors; | 133 | unsigned long tx_heartbeat_errors; |
134 | unsigned long tx_window_errors; | 134 | unsigned long tx_window_errors; |
135 | 135 | ||
136 | /* for cslip etc */ | 136 | /* for cslip etc */ |
137 | unsigned long rx_compressed; | 137 | unsigned long rx_compressed; |
138 | unsigned long tx_compressed; | 138 | unsigned long tx_compressed; |
139 | }; | 139 | }; |
140 | 140 | ||
141 | 141 | ||
142 | /* Media selection options. */ | 142 | /* Media selection options. */ |
143 | enum { | 143 | enum { |
144 | IF_PORT_UNKNOWN = 0, | 144 | IF_PORT_UNKNOWN = 0, |
145 | IF_PORT_10BASE2, | 145 | IF_PORT_10BASE2, |
146 | IF_PORT_10BASET, | 146 | IF_PORT_10BASET, |
147 | IF_PORT_AUI, | 147 | IF_PORT_AUI, |
148 | IF_PORT_100BASET, | 148 | IF_PORT_100BASET, |
149 | IF_PORT_100BASETX, | 149 | IF_PORT_100BASETX, |
150 | IF_PORT_100BASEFX | 150 | IF_PORT_100BASEFX |
151 | }; | 151 | }; |
152 | 152 | ||
153 | #ifdef __KERNEL__ | 153 | #ifdef __KERNEL__ |
154 | 154 | ||
155 | #include <linux/cache.h> | 155 | #include <linux/cache.h> |
156 | #include <linux/skbuff.h> | 156 | #include <linux/skbuff.h> |
157 | 157 | ||
158 | struct neighbour; | 158 | struct neighbour; |
159 | struct neigh_parms; | 159 | struct neigh_parms; |
160 | struct sk_buff; | 160 | struct sk_buff; |
161 | 161 | ||
162 | struct netif_rx_stats | 162 | struct netif_rx_stats |
163 | { | 163 | { |
164 | unsigned total; | 164 | unsigned total; |
165 | unsigned dropped; | 165 | unsigned dropped; |
166 | unsigned time_squeeze; | 166 | unsigned time_squeeze; |
167 | unsigned cpu_collision; | 167 | unsigned cpu_collision; |
168 | }; | 168 | }; |
169 | 169 | ||
170 | DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat); | 170 | DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat); |
171 | 171 | ||
172 | 172 | ||
173 | /* | 173 | /* |
174 | * We tag multicasts with these structures. | 174 | * We tag multicasts with these structures. |
175 | */ | 175 | */ |
176 | 176 | ||
177 | struct dev_mc_list | 177 | struct dev_mc_list |
178 | { | 178 | { |
179 | struct dev_mc_list *next; | 179 | struct dev_mc_list *next; |
180 | __u8 dmi_addr[MAX_ADDR_LEN]; | 180 | __u8 dmi_addr[MAX_ADDR_LEN]; |
181 | unsigned char dmi_addrlen; | 181 | unsigned char dmi_addrlen; |
182 | int dmi_users; | 182 | int dmi_users; |
183 | int dmi_gusers; | 183 | int dmi_gusers; |
184 | }; | 184 | }; |
185 | 185 | ||
186 | struct hh_cache | 186 | struct hh_cache |
187 | { | 187 | { |
188 | struct hh_cache *hh_next; /* Next entry */ | 188 | struct hh_cache *hh_next; /* Next entry */ |
189 | atomic_t hh_refcnt; /* number of users */ | 189 | atomic_t hh_refcnt; /* number of users */ |
190 | unsigned short hh_type; /* protocol identifier, f.e ETH_P_IP | 190 | unsigned short hh_type; /* protocol identifier, f.e ETH_P_IP |
191 | * NOTE: For VLANs, this will be the | 191 | * NOTE: For VLANs, this will be the |
192 | * encapuslated type. --BLG | 192 | * encapuslated type. --BLG |
193 | */ | 193 | */ |
194 | int hh_len; /* length of header */ | 194 | int hh_len; /* length of header */ |
195 | int (*hh_output)(struct sk_buff *skb); | 195 | int (*hh_output)(struct sk_buff *skb); |
196 | rwlock_t hh_lock; | 196 | rwlock_t hh_lock; |
197 | 197 | ||
198 | /* cached hardware header; allow for machine alignment needs. */ | 198 | /* cached hardware header; allow for machine alignment needs. */ |
199 | #define HH_DATA_MOD 16 | 199 | #define HH_DATA_MOD 16 |
200 | #define HH_DATA_OFF(__len) \ | 200 | #define HH_DATA_OFF(__len) \ |
201 | (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1)) | 201 | (HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1)) |
202 | #define HH_DATA_ALIGN(__len) \ | 202 | #define HH_DATA_ALIGN(__len) \ |
203 | (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1)) | 203 | (((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1)) |
204 | unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)]; | 204 | unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)]; |
205 | }; | 205 | }; |
206 | 206 | ||
207 | /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much. | 207 | /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much. |
208 | * Alternative is: | 208 | * Alternative is: |
209 | * dev->hard_header_len ? (dev->hard_header_len + | 209 | * dev->hard_header_len ? (dev->hard_header_len + |
210 | * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0 | 210 | * (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0 |
211 | * | 211 | * |
212 | * We could use other alignment values, but we must maintain the | 212 | * We could use other alignment values, but we must maintain the |
213 | * relationship HH alignment <= LL alignment. | 213 | * relationship HH alignment <= LL alignment. |
214 | */ | 214 | */ |
215 | #define LL_RESERVED_SPACE(dev) \ | 215 | #define LL_RESERVED_SPACE(dev) \ |
216 | (((dev)->hard_header_len&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) | 216 | (((dev)->hard_header_len&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) |
217 | #define LL_RESERVED_SPACE_EXTRA(dev,extra) \ | 217 | #define LL_RESERVED_SPACE_EXTRA(dev,extra) \ |
218 | ((((dev)->hard_header_len+extra)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) | 218 | ((((dev)->hard_header_len+extra)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) |
219 | 219 | ||
220 | /* These flag bits are private to the generic network queueing | 220 | /* These flag bits are private to the generic network queueing |
221 | * layer, they may not be explicitly referenced by any other | 221 | * layer, they may not be explicitly referenced by any other |
222 | * code. | 222 | * code. |
223 | */ | 223 | */ |
224 | 224 | ||
225 | enum netdev_state_t | 225 | enum netdev_state_t |
226 | { | 226 | { |
227 | __LINK_STATE_XOFF=0, | 227 | __LINK_STATE_XOFF=0, |
228 | __LINK_STATE_START, | 228 | __LINK_STATE_START, |
229 | __LINK_STATE_PRESENT, | 229 | __LINK_STATE_PRESENT, |
230 | __LINK_STATE_SCHED, | 230 | __LINK_STATE_SCHED, |
231 | __LINK_STATE_NOCARRIER, | 231 | __LINK_STATE_NOCARRIER, |
232 | __LINK_STATE_RX_SCHED, | 232 | __LINK_STATE_RX_SCHED, |
233 | __LINK_STATE_LINKWATCH_PENDING | 233 | __LINK_STATE_LINKWATCH_PENDING |
234 | }; | 234 | }; |
235 | 235 | ||
236 | 236 | ||
237 | /* | 237 | /* |
238 | * This structure holds at boot time configured netdevice settings. They | 238 | * This structure holds at boot time configured netdevice settings. They |
239 | * are then used in the device probing. | 239 | * are then used in the device probing. |
240 | */ | 240 | */ |
241 | struct netdev_boot_setup { | 241 | struct netdev_boot_setup { |
242 | char name[IFNAMSIZ]; | 242 | char name[IFNAMSIZ]; |
243 | struct ifmap map; | 243 | struct ifmap map; |
244 | }; | 244 | }; |
245 | #define NETDEV_BOOT_SETUP_MAX 8 | 245 | #define NETDEV_BOOT_SETUP_MAX 8 |
246 | 246 | ||
247 | 247 | ||
248 | /* | 248 | /* |
249 | * The DEVICE structure. | 249 | * The DEVICE structure. |
250 | * Actually, this whole structure is a big mistake. It mixes I/O | 250 | * Actually, this whole structure is a big mistake. It mixes I/O |
251 | * data with strictly "high-level" data, and it has to know about | 251 | * data with strictly "high-level" data, and it has to know about |
252 | * almost every data structure used in the INET module. | 252 | * almost every data structure used in the INET module. |
253 | * | 253 | * |
254 | * FIXME: cleanup struct net_device such that network protocol info | 254 | * FIXME: cleanup struct net_device such that network protocol info |
255 | * moves out. | 255 | * moves out. |
256 | */ | 256 | */ |
257 | 257 | ||
258 | struct net_device | 258 | struct net_device |
259 | { | 259 | { |
260 | 260 | ||
261 | /* | 261 | /* |
262 | * This is the first field of the "visible" part of this structure | 262 | * This is the first field of the "visible" part of this structure |
263 | * (i.e. as seen by users in the "Space.c" file). It is the name | 263 | * (i.e. as seen by users in the "Space.c" file). It is the name |
264 | * the interface. | 264 | * the interface. |
265 | */ | 265 | */ |
266 | char name[IFNAMSIZ]; | 266 | char name[IFNAMSIZ]; |
267 | 267 | ||
268 | /* | 268 | /* |
269 | * I/O specific fields | 269 | * I/O specific fields |
270 | * FIXME: Merge these and struct ifmap into one | 270 | * FIXME: Merge these and struct ifmap into one |
271 | */ | 271 | */ |
272 | unsigned long mem_end; /* shared mem end */ | 272 | unsigned long mem_end; /* shared mem end */ |
273 | unsigned long mem_start; /* shared mem start */ | 273 | unsigned long mem_start; /* shared mem start */ |
274 | unsigned long base_addr; /* device I/O address */ | 274 | unsigned long base_addr; /* device I/O address */ |
275 | unsigned int irq; /* device IRQ number */ | 275 | unsigned int irq; /* device IRQ number */ |
276 | 276 | ||
277 | /* | 277 | /* |
278 | * Some hardware also needs these fields, but they are not | 278 | * Some hardware also needs these fields, but they are not |
279 | * part of the usual set specified in Space.c. | 279 | * part of the usual set specified in Space.c. |
280 | */ | 280 | */ |
281 | 281 | ||
282 | unsigned char if_port; /* Selectable AUI, TP,..*/ | 282 | unsigned char if_port; /* Selectable AUI, TP,..*/ |
283 | unsigned char dma; /* DMA channel */ | 283 | unsigned char dma; /* DMA channel */ |
284 | 284 | ||
285 | unsigned long state; | 285 | unsigned long state; |
286 | 286 | ||
287 | struct net_device *next; | 287 | struct net_device *next; |
288 | 288 | ||
289 | /* The device initialization function. Called only once. */ | 289 | /* The device initialization function. Called only once. */ |
290 | int (*init)(struct net_device *dev); | 290 | int (*init)(struct net_device *dev); |
291 | 291 | ||
292 | /* ------- Fields preinitialized in Space.c finish here ------- */ | 292 | /* ------- Fields preinitialized in Space.c finish here ------- */ |
293 | 293 | ||
294 | struct net_device *next_sched; | 294 | struct net_device *next_sched; |
295 | 295 | ||
296 | /* Interface index. Unique device identifier */ | 296 | /* Interface index. Unique device identifier */ |
297 | int ifindex; | 297 | int ifindex; |
298 | int iflink; | 298 | int iflink; |
299 | 299 | ||
300 | 300 | ||
301 | struct net_device_stats* (*get_stats)(struct net_device *dev); | 301 | struct net_device_stats* (*get_stats)(struct net_device *dev); |
302 | struct iw_statistics* (*get_wireless_stats)(struct net_device *dev); | 302 | struct iw_statistics* (*get_wireless_stats)(struct net_device *dev); |
303 | 303 | ||
304 | /* List of functions to handle Wireless Extensions (instead of ioctl). | 304 | /* List of functions to handle Wireless Extensions (instead of ioctl). |
305 | * See <net/iw_handler.h> for details. Jean II */ | 305 | * See <net/iw_handler.h> for details. Jean II */ |
306 | const struct iw_handler_def * wireless_handlers; | 306 | const struct iw_handler_def * wireless_handlers; |
307 | /* Instance data managed by the core of Wireless Extensions. */ | 307 | /* Instance data managed by the core of Wireless Extensions. */ |
308 | struct iw_public_data * wireless_data; | 308 | struct iw_public_data * wireless_data; |
309 | 309 | ||
310 | struct ethtool_ops *ethtool_ops; | 310 | struct ethtool_ops *ethtool_ops; |
311 | 311 | ||
312 | /* | 312 | /* |
313 | * This marks the end of the "visible" part of the structure. All | 313 | * This marks the end of the "visible" part of the structure. All |
314 | * fields hereafter are internal to the system, and may change at | 314 | * fields hereafter are internal to the system, and may change at |
315 | * will (read: may be cleaned up at will). | 315 | * will (read: may be cleaned up at will). |
316 | */ | 316 | */ |
317 | 317 | ||
318 | /* These may be needed for future network-power-down code. */ | 318 | /* These may be needed for future network-power-down code. */ |
319 | unsigned long trans_start; /* Time (in jiffies) of last Tx */ | 319 | unsigned long trans_start; /* Time (in jiffies) of last Tx */ |
320 | unsigned long last_rx; /* Time of last Rx */ | 320 | unsigned long last_rx; /* Time of last Rx */ |
321 | 321 | ||
322 | unsigned short flags; /* interface flags (a la BSD) */ | 322 | unsigned short flags; /* interface flags (a la BSD) */ |
323 | unsigned short gflags; | 323 | unsigned short gflags; |
324 | unsigned short priv_flags; /* Like 'flags' but invisible to userspace. */ | 324 | unsigned short priv_flags; /* Like 'flags' but invisible to userspace. */ |
325 | unsigned short padded; /* How much padding added by alloc_netdev() */ | 325 | unsigned short padded; /* How much padding added by alloc_netdev() */ |
326 | 326 | ||
327 | unsigned mtu; /* interface MTU value */ | 327 | unsigned mtu; /* interface MTU value */ |
328 | unsigned short type; /* interface hardware type */ | 328 | unsigned short type; /* interface hardware type */ |
329 | unsigned short hard_header_len; /* hardware hdr length */ | 329 | unsigned short hard_header_len; /* hardware hdr length */ |
330 | void *priv; /* pointer to private data */ | 330 | void *priv; /* pointer to private data */ |
331 | 331 | ||
332 | struct net_device *master; /* Pointer to master device of a group, | 332 | struct net_device *master; /* Pointer to master device of a group, |
333 | * which this device is member of. | 333 | * which this device is member of. |
334 | */ | 334 | */ |
335 | 335 | ||
336 | /* Interface address info. */ | 336 | /* Interface address info. */ |
337 | unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ | 337 | unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ |
338 | unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address */ | 338 | unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address */ |
339 | unsigned char addr_len; /* hardware address length */ | 339 | unsigned char addr_len; /* hardware address length */ |
340 | unsigned short dev_id; /* for shared network cards */ | 340 | unsigned short dev_id; /* for shared network cards */ |
341 | 341 | ||
342 | struct dev_mc_list *mc_list; /* Multicast mac addresses */ | 342 | struct dev_mc_list *mc_list; /* Multicast mac addresses */ |
343 | int mc_count; /* Number of installed mcasts */ | 343 | int mc_count; /* Number of installed mcasts */ |
344 | int promiscuity; | 344 | int promiscuity; |
345 | int allmulti; | 345 | int allmulti; |
346 | 346 | ||
347 | int watchdog_timeo; | 347 | int watchdog_timeo; |
348 | struct timer_list watchdog_timer; | 348 | struct timer_list watchdog_timer; |
349 | 349 | ||
350 | /* Protocol specific pointers */ | 350 | /* Protocol specific pointers */ |
351 | 351 | ||
352 | void *atalk_ptr; /* AppleTalk link */ | 352 | void *atalk_ptr; /* AppleTalk link */ |
353 | void *ip_ptr; /* IPv4 specific data */ | 353 | void *ip_ptr; /* IPv4 specific data */ |
354 | void *dn_ptr; /* DECnet specific data */ | 354 | void *dn_ptr; /* DECnet specific data */ |
355 | void *ip6_ptr; /* IPv6 specific data */ | 355 | void *ip6_ptr; /* IPv6 specific data */ |
356 | void *ec_ptr; /* Econet specific data */ | 356 | void *ec_ptr; /* Econet specific data */ |
357 | void *ax25_ptr; /* AX.25 specific data */ | 357 | void *ax25_ptr; /* AX.25 specific data */ |
358 | 358 | ||
359 | struct list_head poll_list; /* Link to poll list */ | 359 | struct list_head poll_list; /* Link to poll list */ |
360 | int quota; | 360 | int quota; |
361 | int weight; | 361 | int weight; |
362 | 362 | ||
363 | struct Qdisc *qdisc; | 363 | struct Qdisc *qdisc; |
364 | struct Qdisc *qdisc_sleeping; | 364 | struct Qdisc *qdisc_sleeping; |
365 | struct Qdisc *qdisc_ingress; | 365 | struct Qdisc *qdisc_ingress; |
366 | struct list_head qdisc_list; | 366 | struct list_head qdisc_list; |
367 | unsigned long tx_queue_len; /* Max frames per queue allowed */ | 367 | unsigned long tx_queue_len; /* Max frames per queue allowed */ |
368 | 368 | ||
369 | /* ingress path synchronizer */ | 369 | /* ingress path synchronizer */ |
370 | spinlock_t ingress_lock; | 370 | spinlock_t ingress_lock; |
371 | /* hard_start_xmit synchronizer */ | 371 | /* hard_start_xmit synchronizer */ |
372 | spinlock_t xmit_lock; | 372 | spinlock_t xmit_lock; |
373 | /* cpu id of processor entered to hard_start_xmit or -1, | 373 | /* cpu id of processor entered to hard_start_xmit or -1, |
374 | if nobody entered there. | 374 | if nobody entered there. |
375 | */ | 375 | */ |
376 | int xmit_lock_owner; | 376 | int xmit_lock_owner; |
377 | /* device queue lock */ | 377 | /* device queue lock */ |
378 | spinlock_t queue_lock; | 378 | spinlock_t queue_lock; |
379 | /* Number of references to this device */ | 379 | /* Number of references to this device */ |
380 | atomic_t refcnt; | 380 | atomic_t refcnt; |
381 | /* delayed register/unregister */ | 381 | /* delayed register/unregister */ |
382 | struct list_head todo_list; | 382 | struct list_head todo_list; |
383 | /* device name hash chain */ | 383 | /* device name hash chain */ |
384 | struct hlist_node name_hlist; | 384 | struct hlist_node name_hlist; |
385 | /* device index hash chain */ | 385 | /* device index hash chain */ |
386 | struct hlist_node index_hlist; | 386 | struct hlist_node index_hlist; |
387 | 387 | ||
388 | /* register/unregister state machine */ | 388 | /* register/unregister state machine */ |
389 | enum { NETREG_UNINITIALIZED=0, | 389 | enum { NETREG_UNINITIALIZED=0, |
390 | NETREG_REGISTERING, /* called register_netdevice */ | 390 | NETREG_REGISTERING, /* called register_netdevice */ |
391 | NETREG_REGISTERED, /* completed register todo */ | 391 | NETREG_REGISTERED, /* completed register todo */ |
392 | NETREG_UNREGISTERING, /* called unregister_netdevice */ | 392 | NETREG_UNREGISTERING, /* called unregister_netdevice */ |
393 | NETREG_UNREGISTERED, /* completed unregister todo */ | 393 | NETREG_UNREGISTERED, /* completed unregister todo */ |
394 | NETREG_RELEASED, /* called free_netdev */ | 394 | NETREG_RELEASED, /* called free_netdev */ |
395 | } reg_state; | 395 | } reg_state; |
396 | 396 | ||
397 | /* Net device features */ | 397 | /* Net device features */ |
398 | unsigned long features; | 398 | unsigned long features; |
399 | #define NETIF_F_SG 1 /* Scatter/gather IO. */ | 399 | #define NETIF_F_SG 1 /* Scatter/gather IO. */ |
400 | #define NETIF_F_IP_CSUM 2 /* Can checksum only TCP/UDP over IPv4. */ | 400 | #define NETIF_F_IP_CSUM 2 /* Can checksum only TCP/UDP over IPv4. */ |
401 | #define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */ | 401 | #define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */ |
402 | #define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */ | 402 | #define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */ |
403 | #define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */ | 403 | #define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */ |
404 | #define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */ | 404 | #define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */ |
405 | #define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */ | 405 | #define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */ |
406 | #define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */ | 406 | #define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */ |
407 | #define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */ | 407 | #define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */ |
408 | #define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */ | 408 | #define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */ |
409 | #define NETIF_F_TSO 2048 /* Can offload TCP/IP segmentation */ | 409 | #define NETIF_F_TSO 2048 /* Can offload TCP/IP segmentation */ |
410 | #define NETIF_F_LLTX 4096 /* LockLess TX */ | 410 | #define NETIF_F_LLTX 4096 /* LockLess TX */ |
411 | 411 | ||
412 | /* Called after device is detached from network. */ | 412 | /* Called after device is detached from network. */ |
413 | void (*uninit)(struct net_device *dev); | 413 | void (*uninit)(struct net_device *dev); |
414 | /* Called after last user reference disappears. */ | 414 | /* Called after last user reference disappears. */ |
415 | void (*destructor)(struct net_device *dev); | 415 | void (*destructor)(struct net_device *dev); |
416 | 416 | ||
417 | /* Pointers to interface service routines. */ | 417 | /* Pointers to interface service routines. */ |
418 | int (*open)(struct net_device *dev); | 418 | int (*open)(struct net_device *dev); |
419 | int (*stop)(struct net_device *dev); | 419 | int (*stop)(struct net_device *dev); |
420 | int (*hard_start_xmit) (struct sk_buff *skb, | 420 | int (*hard_start_xmit) (struct sk_buff *skb, |
421 | struct net_device *dev); | 421 | struct net_device *dev); |
422 | #define HAVE_NETDEV_POLL | 422 | #define HAVE_NETDEV_POLL |
423 | int (*poll) (struct net_device *dev, int *quota); | 423 | int (*poll) (struct net_device *dev, int *quota); |
424 | int (*hard_header) (struct sk_buff *skb, | 424 | int (*hard_header) (struct sk_buff *skb, |
425 | struct net_device *dev, | 425 | struct net_device *dev, |
426 | unsigned short type, | 426 | unsigned short type, |
427 | void *daddr, | 427 | void *daddr, |
428 | void *saddr, | 428 | void *saddr, |
429 | unsigned len); | 429 | unsigned len); |
430 | int (*rebuild_header)(struct sk_buff *skb); | 430 | int (*rebuild_header)(struct sk_buff *skb); |
431 | #define HAVE_MULTICAST | 431 | #define HAVE_MULTICAST |
432 | void (*set_multicast_list)(struct net_device *dev); | 432 | void (*set_multicast_list)(struct net_device *dev); |
433 | #define HAVE_SET_MAC_ADDR | 433 | #define HAVE_SET_MAC_ADDR |
434 | int (*set_mac_address)(struct net_device *dev, | 434 | int (*set_mac_address)(struct net_device *dev, |
435 | void *addr); | 435 | void *addr); |
436 | #define HAVE_PRIVATE_IOCTL | 436 | #define HAVE_PRIVATE_IOCTL |
437 | int (*do_ioctl)(struct net_device *dev, | 437 | int (*do_ioctl)(struct net_device *dev, |
438 | struct ifreq *ifr, int cmd); | 438 | struct ifreq *ifr, int cmd); |
439 | #define HAVE_SET_CONFIG | 439 | #define HAVE_SET_CONFIG |
440 | int (*set_config)(struct net_device *dev, | 440 | int (*set_config)(struct net_device *dev, |
441 | struct ifmap *map); | 441 | struct ifmap *map); |
442 | #define HAVE_HEADER_CACHE | 442 | #define HAVE_HEADER_CACHE |
443 | int (*hard_header_cache)(struct neighbour *neigh, | 443 | int (*hard_header_cache)(struct neighbour *neigh, |
444 | struct hh_cache *hh); | 444 | struct hh_cache *hh); |
445 | void (*header_cache_update)(struct hh_cache *hh, | 445 | void (*header_cache_update)(struct hh_cache *hh, |
446 | struct net_device *dev, | 446 | struct net_device *dev, |
447 | unsigned char * haddr); | 447 | unsigned char * haddr); |
448 | #define HAVE_CHANGE_MTU | 448 | #define HAVE_CHANGE_MTU |
449 | int (*change_mtu)(struct net_device *dev, int new_mtu); | 449 | int (*change_mtu)(struct net_device *dev, int new_mtu); |
450 | 450 | ||
451 | #define HAVE_TX_TIMEOUT | 451 | #define HAVE_TX_TIMEOUT |
452 | void (*tx_timeout) (struct net_device *dev); | 452 | void (*tx_timeout) (struct net_device *dev); |
453 | 453 | ||
454 | void (*vlan_rx_register)(struct net_device *dev, | 454 | void (*vlan_rx_register)(struct net_device *dev, |
455 | struct vlan_group *grp); | 455 | struct vlan_group *grp); |
456 | void (*vlan_rx_add_vid)(struct net_device *dev, | 456 | void (*vlan_rx_add_vid)(struct net_device *dev, |
457 | unsigned short vid); | 457 | unsigned short vid); |
458 | void (*vlan_rx_kill_vid)(struct net_device *dev, | 458 | void (*vlan_rx_kill_vid)(struct net_device *dev, |
459 | unsigned short vid); | 459 | unsigned short vid); |
460 | 460 | ||
461 | int (*hard_header_parse)(struct sk_buff *skb, | 461 | int (*hard_header_parse)(struct sk_buff *skb, |
462 | unsigned char *haddr); | 462 | unsigned char *haddr); |
463 | int (*neigh_setup)(struct net_device *dev, struct neigh_parms *); | 463 | int (*neigh_setup)(struct net_device *dev, struct neigh_parms *); |
464 | #ifdef CONFIG_NETPOLL | 464 | #ifdef CONFIG_NETPOLL |
465 | struct netpoll_info *npinfo; | 465 | struct netpoll_info *npinfo; |
466 | #endif | 466 | #endif |
467 | #ifdef CONFIG_NET_POLL_CONTROLLER | 467 | #ifdef CONFIG_NET_POLL_CONTROLLER |
468 | void (*poll_controller)(struct net_device *dev); | 468 | void (*poll_controller)(struct net_device *dev); |
469 | #endif | 469 | #endif |
470 | 470 | ||
471 | /* bridge stuff */ | 471 | /* bridge stuff */ |
472 | struct net_bridge_port *br_port; | 472 | struct net_bridge_port *br_port; |
473 | 473 | ||
474 | #ifdef CONFIG_NET_DIVERT | 474 | #ifdef CONFIG_NET_DIVERT |
475 | /* this will get initialized at each interface type init routine */ | 475 | /* this will get initialized at each interface type init routine */ |
476 | struct divert_blk *divert; | 476 | struct divert_blk *divert; |
477 | #endif /* CONFIG_NET_DIVERT */ | 477 | #endif /* CONFIG_NET_DIVERT */ |
478 | 478 | ||
479 | /* class/net/name entry */ | 479 | /* class/net/name entry */ |
480 | struct class_device class_dev; | 480 | struct class_device class_dev; |
481 | }; | 481 | }; |
482 | 482 | ||
483 | #define NETDEV_ALIGN 32 | 483 | #define NETDEV_ALIGN 32 |
484 | #define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1) | 484 | #define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1) |
485 | 485 | ||
486 | static inline void *netdev_priv(struct net_device *dev) | 486 | static inline void *netdev_priv(struct net_device *dev) |
487 | { | 487 | { |
488 | return (char *)dev + ((sizeof(struct net_device) | 488 | return (char *)dev + ((sizeof(struct net_device) |
489 | + NETDEV_ALIGN_CONST) | 489 | + NETDEV_ALIGN_CONST) |
490 | & ~NETDEV_ALIGN_CONST); | 490 | & ~NETDEV_ALIGN_CONST); |
491 | } | 491 | } |
492 | 492 | ||
493 | #define SET_MODULE_OWNER(dev) do { } while (0) | 493 | #define SET_MODULE_OWNER(dev) do { } while (0) |
494 | /* Set the sysfs physical device reference for the network logical device | 494 | /* Set the sysfs physical device reference for the network logical device |
495 | * if set prior to registration will cause a symlink during initialization. | 495 | * if set prior to registration will cause a symlink during initialization. |
496 | */ | 496 | */ |
497 | #define SET_NETDEV_DEV(net, pdev) ((net)->class_dev.dev = (pdev)) | 497 | #define SET_NETDEV_DEV(net, pdev) ((net)->class_dev.dev = (pdev)) |
498 | 498 | ||
499 | struct packet_type { | 499 | struct packet_type { |
500 | __be16 type; /* This is really htons(ether_type). */ | 500 | __be16 type; /* This is really htons(ether_type). */ |
501 | struct net_device *dev; /* NULL is wildcarded here */ | 501 | struct net_device *dev; /* NULL is wildcarded here */ |
502 | int (*func) (struct sk_buff *, | 502 | int (*func) (struct sk_buff *, |
503 | struct net_device *, | 503 | struct net_device *, |
504 | struct packet_type *, | 504 | struct packet_type *, |
505 | struct net_device *); | 505 | struct net_device *); |
506 | void *af_packet_priv; | 506 | void *af_packet_priv; |
507 | struct list_head list; | 507 | struct list_head list; |
508 | }; | 508 | }; |
509 | 509 | ||
510 | #include <linux/interrupt.h> | 510 | #include <linux/interrupt.h> |
511 | #include <linux/notifier.h> | 511 | #include <linux/notifier.h> |
512 | 512 | ||
513 | extern struct net_device loopback_dev; /* The loopback */ | 513 | extern struct net_device loopback_dev; /* The loopback */ |
514 | extern struct net_device *dev_base; /* All devices */ | 514 | extern struct net_device *dev_base; /* All devices */ |
515 | extern rwlock_t dev_base_lock; /* Device list lock */ | 515 | extern rwlock_t dev_base_lock; /* Device list lock */ |
516 | 516 | ||
517 | extern int netdev_boot_setup_check(struct net_device *dev); | 517 | extern int netdev_boot_setup_check(struct net_device *dev); |
518 | extern unsigned long netdev_boot_base(const char *prefix, int unit); | 518 | extern unsigned long netdev_boot_base(const char *prefix, int unit); |
519 | extern struct net_device *dev_getbyhwaddr(unsigned short type, char *hwaddr); | 519 | extern struct net_device *dev_getbyhwaddr(unsigned short type, char *hwaddr); |
520 | extern struct net_device *dev_getfirstbyhwtype(unsigned short type); | 520 | extern struct net_device *dev_getfirstbyhwtype(unsigned short type); |
521 | extern void dev_add_pack(struct packet_type *pt); | 521 | extern void dev_add_pack(struct packet_type *pt); |
522 | extern void dev_remove_pack(struct packet_type *pt); | 522 | extern void dev_remove_pack(struct packet_type *pt); |
523 | extern void __dev_remove_pack(struct packet_type *pt); | 523 | extern void __dev_remove_pack(struct packet_type *pt); |
524 | 524 | ||
525 | extern struct net_device *dev_get_by_flags(unsigned short flags, | 525 | extern struct net_device *dev_get_by_flags(unsigned short flags, |
526 | unsigned short mask); | 526 | unsigned short mask); |
527 | extern struct net_device *dev_get_by_name(const char *name); | 527 | extern struct net_device *dev_get_by_name(const char *name); |
528 | extern struct net_device *__dev_get_by_name(const char *name); | 528 | extern struct net_device *__dev_get_by_name(const char *name); |
529 | extern int dev_alloc_name(struct net_device *dev, const char *name); | 529 | extern int dev_alloc_name(struct net_device *dev, const char *name); |
530 | extern int dev_open(struct net_device *dev); | 530 | extern int dev_open(struct net_device *dev); |
531 | extern int dev_close(struct net_device *dev); | 531 | extern int dev_close(struct net_device *dev); |
532 | extern int dev_queue_xmit(struct sk_buff *skb); | 532 | extern int dev_queue_xmit(struct sk_buff *skb); |
533 | extern int register_netdevice(struct net_device *dev); | 533 | extern int register_netdevice(struct net_device *dev); |
534 | extern int unregister_netdevice(struct net_device *dev); | 534 | extern int unregister_netdevice(struct net_device *dev); |
535 | extern void free_netdev(struct net_device *dev); | 535 | extern void free_netdev(struct net_device *dev); |
536 | extern void synchronize_net(void); | 536 | extern void synchronize_net(void); |
537 | extern int register_netdevice_notifier(struct notifier_block *nb); | 537 | extern int register_netdevice_notifier(struct notifier_block *nb); |
538 | extern int unregister_netdevice_notifier(struct notifier_block *nb); | 538 | extern int unregister_netdevice_notifier(struct notifier_block *nb); |
539 | extern int call_netdevice_notifiers(unsigned long val, void *v); | 539 | extern int call_netdevice_notifiers(unsigned long val, void *v); |
540 | extern struct net_device *dev_get_by_index(int ifindex); | 540 | extern struct net_device *dev_get_by_index(int ifindex); |
541 | extern struct net_device *__dev_get_by_index(int ifindex); | 541 | extern struct net_device *__dev_get_by_index(int ifindex); |
542 | extern int dev_restart(struct net_device *dev); | 542 | extern int dev_restart(struct net_device *dev); |
543 | #ifdef CONFIG_NETPOLL_TRAP | 543 | #ifdef CONFIG_NETPOLL_TRAP |
544 | extern int netpoll_trap(void); | 544 | extern int netpoll_trap(void); |
545 | #endif | 545 | #endif |
546 | 546 | ||
547 | typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len); | 547 | typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len); |
548 | extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf); | 548 | extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf); |
549 | static inline int unregister_gifconf(unsigned int family) | 549 | static inline int unregister_gifconf(unsigned int family) |
550 | { | 550 | { |
551 | return register_gifconf(family, NULL); | 551 | return register_gifconf(family, NULL); |
552 | } | 552 | } |
553 | 553 | ||
554 | /* | 554 | /* |
555 | * Incoming packets are placed on per-cpu queues so that | 555 | * Incoming packets are placed on per-cpu queues so that |
556 | * no locking is needed. | 556 | * no locking is needed. |
557 | */ | 557 | */ |
558 | 558 | ||
559 | struct softnet_data | 559 | struct softnet_data |
560 | { | 560 | { |
561 | struct net_device *output_queue; | 561 | struct net_device *output_queue; |
562 | struct sk_buff_head input_pkt_queue; | 562 | struct sk_buff_head input_pkt_queue; |
563 | struct list_head poll_list; | 563 | struct list_head poll_list; |
564 | struct sk_buff *completion_queue; | 564 | struct sk_buff *completion_queue; |
565 | 565 | ||
566 | struct net_device backlog_dev; /* Sorry. 8) */ | 566 | struct net_device backlog_dev; /* Sorry. 8) */ |
567 | }; | 567 | }; |
568 | 568 | ||
569 | DECLARE_PER_CPU(struct softnet_data,softnet_data); | 569 | DECLARE_PER_CPU(struct softnet_data,softnet_data); |
570 | 570 | ||
571 | #define HAVE_NETIF_QUEUE | 571 | #define HAVE_NETIF_QUEUE |
572 | 572 | ||
573 | static inline void __netif_schedule(struct net_device *dev) | 573 | static inline void __netif_schedule(struct net_device *dev) |
574 | { | 574 | { |
575 | if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) { | 575 | if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) { |
576 | unsigned long flags; | 576 | unsigned long flags; |
577 | struct softnet_data *sd; | 577 | struct softnet_data *sd; |
578 | 578 | ||
579 | local_irq_save(flags); | 579 | local_irq_save(flags); |
580 | sd = &__get_cpu_var(softnet_data); | 580 | sd = &__get_cpu_var(softnet_data); |
581 | dev->next_sched = sd->output_queue; | 581 | dev->next_sched = sd->output_queue; |
582 | sd->output_queue = dev; | 582 | sd->output_queue = dev; |
583 | raise_softirq_irqoff(NET_TX_SOFTIRQ); | 583 | raise_softirq_irqoff(NET_TX_SOFTIRQ); |
584 | local_irq_restore(flags); | 584 | local_irq_restore(flags); |
585 | } | 585 | } |
586 | } | 586 | } |
587 | 587 | ||
588 | static inline void netif_schedule(struct net_device *dev) | 588 | static inline void netif_schedule(struct net_device *dev) |
589 | { | 589 | { |
590 | if (!test_bit(__LINK_STATE_XOFF, &dev->state)) | 590 | if (!test_bit(__LINK_STATE_XOFF, &dev->state)) |
591 | __netif_schedule(dev); | 591 | __netif_schedule(dev); |
592 | } | 592 | } |
593 | 593 | ||
594 | static inline void netif_start_queue(struct net_device *dev) | 594 | static inline void netif_start_queue(struct net_device *dev) |
595 | { | 595 | { |
596 | clear_bit(__LINK_STATE_XOFF, &dev->state); | 596 | clear_bit(__LINK_STATE_XOFF, &dev->state); |
597 | } | 597 | } |
598 | 598 | ||
599 | static inline void netif_wake_queue(struct net_device *dev) | 599 | static inline void netif_wake_queue(struct net_device *dev) |
600 | { | 600 | { |
601 | #ifdef CONFIG_NETPOLL_TRAP | 601 | #ifdef CONFIG_NETPOLL_TRAP |
602 | if (netpoll_trap()) | 602 | if (netpoll_trap()) |
603 | return; | 603 | return; |
604 | #endif | 604 | #endif |
605 | if (test_and_clear_bit(__LINK_STATE_XOFF, &dev->state)) | 605 | if (test_and_clear_bit(__LINK_STATE_XOFF, &dev->state)) |
606 | __netif_schedule(dev); | 606 | __netif_schedule(dev); |
607 | } | 607 | } |
608 | 608 | ||
609 | static inline void netif_stop_queue(struct net_device *dev) | 609 | static inline void netif_stop_queue(struct net_device *dev) |
610 | { | 610 | { |
611 | #ifdef CONFIG_NETPOLL_TRAP | 611 | #ifdef CONFIG_NETPOLL_TRAP |
612 | if (netpoll_trap()) | 612 | if (netpoll_trap()) |
613 | return; | 613 | return; |
614 | #endif | 614 | #endif |
615 | set_bit(__LINK_STATE_XOFF, &dev->state); | 615 | set_bit(__LINK_STATE_XOFF, &dev->state); |
616 | } | 616 | } |
617 | 617 | ||
618 | static inline int netif_queue_stopped(const struct net_device *dev) | 618 | static inline int netif_queue_stopped(const struct net_device *dev) |
619 | { | 619 | { |
620 | return test_bit(__LINK_STATE_XOFF, &dev->state); | 620 | return test_bit(__LINK_STATE_XOFF, &dev->state); |
621 | } | 621 | } |
622 | 622 | ||
623 | static inline int netif_running(const struct net_device *dev) | 623 | static inline int netif_running(const struct net_device *dev) |
624 | { | 624 | { |
625 | return test_bit(__LINK_STATE_START, &dev->state); | 625 | return test_bit(__LINK_STATE_START, &dev->state); |
626 | } | 626 | } |
627 | 627 | ||
628 | 628 | ||
629 | /* Use this variant when it is known for sure that it | 629 | /* Use this variant when it is known for sure that it |
630 | * is executing from interrupt context. | 630 | * is executing from interrupt context. |
631 | */ | 631 | */ |
632 | static inline void dev_kfree_skb_irq(struct sk_buff *skb) | 632 | static inline void dev_kfree_skb_irq(struct sk_buff *skb) |
633 | { | 633 | { |
634 | if (atomic_dec_and_test(&skb->users)) { | 634 | if (atomic_dec_and_test(&skb->users)) { |
635 | struct softnet_data *sd; | 635 | struct softnet_data *sd; |
636 | unsigned long flags; | 636 | unsigned long flags; |
637 | 637 | ||
638 | local_irq_save(flags); | 638 | local_irq_save(flags); |
639 | sd = &__get_cpu_var(softnet_data); | 639 | sd = &__get_cpu_var(softnet_data); |
640 | skb->next = sd->completion_queue; | 640 | skb->next = sd->completion_queue; |
641 | sd->completion_queue = skb; | 641 | sd->completion_queue = skb; |
642 | raise_softirq_irqoff(NET_TX_SOFTIRQ); | 642 | raise_softirq_irqoff(NET_TX_SOFTIRQ); |
643 | local_irq_restore(flags); | 643 | local_irq_restore(flags); |
644 | } | 644 | } |
645 | } | 645 | } |
646 | 646 | ||
647 | /* Use this variant in places where it could be invoked | 647 | /* Use this variant in places where it could be invoked |
648 | * either from interrupt or non-interrupt context. | 648 | * either from interrupt or non-interrupt context. |
649 | */ | 649 | */ |
650 | static inline void dev_kfree_skb_any(struct sk_buff *skb) | 650 | static inline void dev_kfree_skb_any(struct sk_buff *skb) |
651 | { | 651 | { |
652 | if (in_irq() || irqs_disabled()) | 652 | if (in_irq() || irqs_disabled()) |
653 | dev_kfree_skb_irq(skb); | 653 | dev_kfree_skb_irq(skb); |
654 | else | 654 | else |
655 | dev_kfree_skb(skb); | 655 | dev_kfree_skb(skb); |
656 | } | 656 | } |
657 | 657 | ||
658 | #define HAVE_NETIF_RX 1 | 658 | #define HAVE_NETIF_RX 1 |
659 | extern int netif_rx(struct sk_buff *skb); | 659 | extern int netif_rx(struct sk_buff *skb); |
660 | extern int netif_rx_ni(struct sk_buff *skb); | 660 | extern int netif_rx_ni(struct sk_buff *skb); |
661 | #define HAVE_NETIF_RECEIVE_SKB 1 | 661 | #define HAVE_NETIF_RECEIVE_SKB 1 |
662 | extern int netif_receive_skb(struct sk_buff *skb); | 662 | extern int netif_receive_skb(struct sk_buff *skb); |
663 | extern int dev_ioctl(unsigned int cmd, void __user *); | 663 | extern int dev_ioctl(unsigned int cmd, void __user *); |
664 | extern int dev_ethtool(struct ifreq *); | 664 | extern int dev_ethtool(struct ifreq *); |
665 | extern unsigned dev_get_flags(const struct net_device *); | 665 | extern unsigned dev_get_flags(const struct net_device *); |
666 | extern int dev_change_flags(struct net_device *, unsigned); | 666 | extern int dev_change_flags(struct net_device *, unsigned); |
667 | extern int dev_change_name(struct net_device *, char *); | 667 | extern int dev_change_name(struct net_device *, char *); |
668 | extern int dev_set_mtu(struct net_device *, int); | 668 | extern int dev_set_mtu(struct net_device *, int); |
669 | extern int dev_set_mac_address(struct net_device *, | 669 | extern int dev_set_mac_address(struct net_device *, |
670 | struct sockaddr *); | 670 | struct sockaddr *); |
671 | extern void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); | 671 | extern void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); |
672 | 672 | ||
673 | extern void dev_init(void); | 673 | extern void dev_init(void); |
674 | 674 | ||
675 | extern int netdev_nit; | 675 | extern int netdev_nit; |
676 | 676 | ||
677 | /* Called by rtnetlink.c:rtnl_unlock() */ | 677 | /* Called by rtnetlink.c:rtnl_unlock() */ |
678 | extern void netdev_run_todo(void); | 678 | extern void netdev_run_todo(void); |
679 | 679 | ||
680 | static inline void dev_put(struct net_device *dev) | 680 | static inline void dev_put(struct net_device *dev) |
681 | { | 681 | { |
682 | atomic_dec(&dev->refcnt); | 682 | atomic_dec(&dev->refcnt); |
683 | } | 683 | } |
684 | 684 | ||
685 | #define __dev_put(dev) atomic_dec(&(dev)->refcnt) | 685 | #define __dev_put(dev) atomic_dec(&(dev)->refcnt) |
686 | #define dev_hold(dev) atomic_inc(&(dev)->refcnt) | 686 | #define dev_hold(dev) atomic_inc(&(dev)->refcnt) |
687 | 687 | ||
688 | /* Carrier loss detection, dial on demand. The functions netif_carrier_on | 688 | /* Carrier loss detection, dial on demand. The functions netif_carrier_on |
689 | * and _off may be called from IRQ context, but it is caller | 689 | * and _off may be called from IRQ context, but it is caller |
690 | * who is responsible for serialization of these calls. | 690 | * who is responsible for serialization of these calls. |
691 | */ | 691 | */ |
692 | 692 | ||
693 | extern void linkwatch_fire_event(struct net_device *dev); | 693 | extern void linkwatch_fire_event(struct net_device *dev); |
694 | 694 | ||
695 | static inline int netif_carrier_ok(const struct net_device *dev) | 695 | static inline int netif_carrier_ok(const struct net_device *dev) |
696 | { | 696 | { |
697 | return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); | 697 | return !test_bit(__LINK_STATE_NOCARRIER, &dev->state); |
698 | } | 698 | } |
699 | 699 | ||
700 | extern void __netdev_watchdog_up(struct net_device *dev); | 700 | extern void __netdev_watchdog_up(struct net_device *dev); |
701 | 701 | ||
702 | static inline void netif_carrier_on(struct net_device *dev) | 702 | extern void netif_carrier_on(struct net_device *dev); |
703 | { | ||
704 | if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) | ||
705 | linkwatch_fire_event(dev); | ||
706 | if (netif_running(dev)) | ||
707 | __netdev_watchdog_up(dev); | ||
708 | } | ||
709 | 703 | ||
710 | static inline void netif_carrier_off(struct net_device *dev) | 704 | extern void netif_carrier_off(struct net_device *dev); |
711 | { | ||
712 | if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) | ||
713 | linkwatch_fire_event(dev); | ||
714 | } | ||
715 | 705 | ||
716 | /* Hot-plugging. */ | 706 | /* Hot-plugging. */ |
717 | static inline int netif_device_present(struct net_device *dev) | 707 | static inline int netif_device_present(struct net_device *dev) |
718 | { | 708 | { |
719 | return test_bit(__LINK_STATE_PRESENT, &dev->state); | 709 | return test_bit(__LINK_STATE_PRESENT, &dev->state); |
720 | } | 710 | } |
721 | 711 | ||
722 | static inline void netif_device_detach(struct net_device *dev) | 712 | static inline void netif_device_detach(struct net_device *dev) |
723 | { | 713 | { |
724 | if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && | 714 | if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) && |
725 | netif_running(dev)) { | 715 | netif_running(dev)) { |
726 | netif_stop_queue(dev); | 716 | netif_stop_queue(dev); |
727 | } | 717 | } |
728 | } | 718 | } |
729 | 719 | ||
730 | static inline void netif_device_attach(struct net_device *dev) | 720 | static inline void netif_device_attach(struct net_device *dev) |
731 | { | 721 | { |
732 | if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && | 722 | if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) && |
733 | netif_running(dev)) { | 723 | netif_running(dev)) { |
734 | netif_wake_queue(dev); | 724 | netif_wake_queue(dev); |
735 | __netdev_watchdog_up(dev); | 725 | __netdev_watchdog_up(dev); |
736 | } | 726 | } |
737 | } | 727 | } |
738 | 728 | ||
739 | /* | 729 | /* |
740 | * Network interface message level settings | 730 | * Network interface message level settings |
741 | */ | 731 | */ |
742 | #define HAVE_NETIF_MSG 1 | 732 | #define HAVE_NETIF_MSG 1 |
743 | 733 | ||
744 | enum { | 734 | enum { |
745 | NETIF_MSG_DRV = 0x0001, | 735 | NETIF_MSG_DRV = 0x0001, |
746 | NETIF_MSG_PROBE = 0x0002, | 736 | NETIF_MSG_PROBE = 0x0002, |
747 | NETIF_MSG_LINK = 0x0004, | 737 | NETIF_MSG_LINK = 0x0004, |
748 | NETIF_MSG_TIMER = 0x0008, | 738 | NETIF_MSG_TIMER = 0x0008, |
749 | NETIF_MSG_IFDOWN = 0x0010, | 739 | NETIF_MSG_IFDOWN = 0x0010, |
750 | NETIF_MSG_IFUP = 0x0020, | 740 | NETIF_MSG_IFUP = 0x0020, |
751 | NETIF_MSG_RX_ERR = 0x0040, | 741 | NETIF_MSG_RX_ERR = 0x0040, |
752 | NETIF_MSG_TX_ERR = 0x0080, | 742 | NETIF_MSG_TX_ERR = 0x0080, |
753 | NETIF_MSG_TX_QUEUED = 0x0100, | 743 | NETIF_MSG_TX_QUEUED = 0x0100, |
754 | NETIF_MSG_INTR = 0x0200, | 744 | NETIF_MSG_INTR = 0x0200, |
755 | NETIF_MSG_TX_DONE = 0x0400, | 745 | NETIF_MSG_TX_DONE = 0x0400, |
756 | NETIF_MSG_RX_STATUS = 0x0800, | 746 | NETIF_MSG_RX_STATUS = 0x0800, |
757 | NETIF_MSG_PKTDATA = 0x1000, | 747 | NETIF_MSG_PKTDATA = 0x1000, |
758 | NETIF_MSG_HW = 0x2000, | 748 | NETIF_MSG_HW = 0x2000, |
759 | NETIF_MSG_WOL = 0x4000, | 749 | NETIF_MSG_WOL = 0x4000, |
760 | }; | 750 | }; |
761 | 751 | ||
762 | #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) | 752 | #define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) |
763 | #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) | 753 | #define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) |
764 | #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) | 754 | #define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) |
765 | #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) | 755 | #define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) |
766 | #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) | 756 | #define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) |
767 | #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) | 757 | #define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) |
768 | #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) | 758 | #define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) |
769 | #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) | 759 | #define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) |
770 | #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) | 760 | #define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) |
771 | #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) | 761 | #define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) |
772 | #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) | 762 | #define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) |
773 | #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) | 763 | #define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) |
774 | #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) | 764 | #define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) |
775 | #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) | 765 | #define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) |
776 | #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) | 766 | #define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) |
777 | 767 | ||
778 | static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) | 768 | static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) |
779 | { | 769 | { |
780 | /* use default */ | 770 | /* use default */ |
781 | if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) | 771 | if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) |
782 | return default_msg_enable_bits; | 772 | return default_msg_enable_bits; |
783 | if (debug_value == 0) /* no output */ | 773 | if (debug_value == 0) /* no output */ |
784 | return 0; | 774 | return 0; |
785 | /* set low N bits */ | 775 | /* set low N bits */ |
786 | return (1 << debug_value) - 1; | 776 | return (1 << debug_value) - 1; |
787 | } | 777 | } |
788 | 778 | ||
789 | /* Schedule rx intr now? */ | 779 | /* Schedule rx intr now? */ |
790 | 780 | ||
791 | static inline int netif_rx_schedule_prep(struct net_device *dev) | 781 | static inline int netif_rx_schedule_prep(struct net_device *dev) |
792 | { | 782 | { |
793 | return netif_running(dev) && | 783 | return netif_running(dev) && |
794 | !test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state); | 784 | !test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state); |
795 | } | 785 | } |
796 | 786 | ||
797 | /* Add interface to tail of rx poll list. This assumes that _prep has | 787 | /* Add interface to tail of rx poll list. This assumes that _prep has |
798 | * already been called and returned 1. | 788 | * already been called and returned 1. |
799 | */ | 789 | */ |
800 | 790 | ||
801 | static inline void __netif_rx_schedule(struct net_device *dev) | 791 | static inline void __netif_rx_schedule(struct net_device *dev) |
802 | { | 792 | { |
803 | unsigned long flags; | 793 | unsigned long flags; |
804 | 794 | ||
805 | local_irq_save(flags); | 795 | local_irq_save(flags); |
806 | dev_hold(dev); | 796 | dev_hold(dev); |
807 | list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list); | 797 | list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list); |
808 | if (dev->quota < 0) | 798 | if (dev->quota < 0) |
809 | dev->quota += dev->weight; | 799 | dev->quota += dev->weight; |
810 | else | 800 | else |
811 | dev->quota = dev->weight; | 801 | dev->quota = dev->weight; |
812 | __raise_softirq_irqoff(NET_RX_SOFTIRQ); | 802 | __raise_softirq_irqoff(NET_RX_SOFTIRQ); |
813 | local_irq_restore(flags); | 803 | local_irq_restore(flags); |
814 | } | 804 | } |
815 | 805 | ||
816 | /* Try to reschedule poll. Called by irq handler. */ | 806 | /* Try to reschedule poll. Called by irq handler. */ |
817 | 807 | ||
818 | static inline void netif_rx_schedule(struct net_device *dev) | 808 | static inline void netif_rx_schedule(struct net_device *dev) |
819 | { | 809 | { |
820 | if (netif_rx_schedule_prep(dev)) | 810 | if (netif_rx_schedule_prep(dev)) |
821 | __netif_rx_schedule(dev); | 811 | __netif_rx_schedule(dev); |
822 | } | 812 | } |
823 | 813 | ||
824 | /* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). | 814 | /* Try to reschedule poll. Called by dev->poll() after netif_rx_complete(). |
825 | * Do not inline this? | 815 | * Do not inline this? |
826 | */ | 816 | */ |
827 | static inline int netif_rx_reschedule(struct net_device *dev, int undo) | 817 | static inline int netif_rx_reschedule(struct net_device *dev, int undo) |
828 | { | 818 | { |
829 | if (netif_rx_schedule_prep(dev)) { | 819 | if (netif_rx_schedule_prep(dev)) { |
830 | unsigned long flags; | 820 | unsigned long flags; |
831 | 821 | ||
832 | dev->quota += undo; | 822 | dev->quota += undo; |
833 | 823 | ||
834 | local_irq_save(flags); | 824 | local_irq_save(flags); |
835 | list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list); | 825 | list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list); |
836 | __raise_softirq_irqoff(NET_RX_SOFTIRQ); | 826 | __raise_softirq_irqoff(NET_RX_SOFTIRQ); |
837 | local_irq_restore(flags); | 827 | local_irq_restore(flags); |
838 | return 1; | 828 | return 1; |
839 | } | 829 | } |
840 | return 0; | 830 | return 0; |
841 | } | 831 | } |
842 | 832 | ||
843 | /* Remove interface from poll list: it must be in the poll list | 833 | /* Remove interface from poll list: it must be in the poll list |
844 | * on current cpu. This primitive is called by dev->poll(), when | 834 | * on current cpu. This primitive is called by dev->poll(), when |
845 | * it completes the work. The device cannot be out of poll list at this | 835 | * it completes the work. The device cannot be out of poll list at this |
846 | * moment, it is BUG(). | 836 | * moment, it is BUG(). |
847 | */ | 837 | */ |
848 | static inline void netif_rx_complete(struct net_device *dev) | 838 | static inline void netif_rx_complete(struct net_device *dev) |
849 | { | 839 | { |
850 | unsigned long flags; | 840 | unsigned long flags; |
851 | 841 | ||
852 | local_irq_save(flags); | 842 | local_irq_save(flags); |
853 | BUG_ON(!test_bit(__LINK_STATE_RX_SCHED, &dev->state)); | 843 | BUG_ON(!test_bit(__LINK_STATE_RX_SCHED, &dev->state)); |
854 | list_del(&dev->poll_list); | 844 | list_del(&dev->poll_list); |
855 | smp_mb__before_clear_bit(); | 845 | smp_mb__before_clear_bit(); |
856 | clear_bit(__LINK_STATE_RX_SCHED, &dev->state); | 846 | clear_bit(__LINK_STATE_RX_SCHED, &dev->state); |
857 | local_irq_restore(flags); | 847 | local_irq_restore(flags); |
858 | } | 848 | } |
859 | 849 | ||
860 | static inline void netif_poll_disable(struct net_device *dev) | 850 | static inline void netif_poll_disable(struct net_device *dev) |
861 | { | 851 | { |
862 | while (test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state)) { | 852 | while (test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state)) { |
863 | /* No hurry. */ | 853 | /* No hurry. */ |
864 | current->state = TASK_INTERRUPTIBLE; | 854 | current->state = TASK_INTERRUPTIBLE; |
865 | schedule_timeout(1); | 855 | schedule_timeout(1); |
866 | } | 856 | } |
867 | } | 857 | } |
868 | 858 | ||
869 | static inline void netif_poll_enable(struct net_device *dev) | 859 | static inline void netif_poll_enable(struct net_device *dev) |
870 | { | 860 | { |
871 | clear_bit(__LINK_STATE_RX_SCHED, &dev->state); | 861 | clear_bit(__LINK_STATE_RX_SCHED, &dev->state); |
872 | } | 862 | } |
873 | 863 | ||
874 | /* same as netif_rx_complete, except that local_irq_save(flags) | 864 | /* same as netif_rx_complete, except that local_irq_save(flags) |
875 | * has already been issued | 865 | * has already been issued |
876 | */ | 866 | */ |
877 | static inline void __netif_rx_complete(struct net_device *dev) | 867 | static inline void __netif_rx_complete(struct net_device *dev) |
878 | { | 868 | { |
879 | BUG_ON(!test_bit(__LINK_STATE_RX_SCHED, &dev->state)); | 869 | BUG_ON(!test_bit(__LINK_STATE_RX_SCHED, &dev->state)); |
880 | list_del(&dev->poll_list); | 870 | list_del(&dev->poll_list); |
881 | smp_mb__before_clear_bit(); | 871 | smp_mb__before_clear_bit(); |
882 | clear_bit(__LINK_STATE_RX_SCHED, &dev->state); | 872 | clear_bit(__LINK_STATE_RX_SCHED, &dev->state); |
883 | } | 873 | } |
884 | 874 | ||
885 | static inline void netif_tx_disable(struct net_device *dev) | 875 | static inline void netif_tx_disable(struct net_device *dev) |
886 | { | 876 | { |
887 | spin_lock_bh(&dev->xmit_lock); | 877 | spin_lock_bh(&dev->xmit_lock); |
888 | netif_stop_queue(dev); | 878 | netif_stop_queue(dev); |
889 | spin_unlock_bh(&dev->xmit_lock); | 879 | spin_unlock_bh(&dev->xmit_lock); |
890 | } | 880 | } |
891 | 881 | ||
892 | /* These functions live elsewhere (drivers/net/net_init.c, but related) */ | 882 | /* These functions live elsewhere (drivers/net/net_init.c, but related) */ |
893 | 883 | ||
894 | extern void ether_setup(struct net_device *dev); | 884 | extern void ether_setup(struct net_device *dev); |
895 | 885 | ||
896 | /* Support for loadable net-drivers */ | 886 | /* Support for loadable net-drivers */ |
897 | extern struct net_device *alloc_netdev(int sizeof_priv, const char *name, | 887 | extern struct net_device *alloc_netdev(int sizeof_priv, const char *name, |
898 | void (*setup)(struct net_device *)); | 888 | void (*setup)(struct net_device *)); |
899 | extern int register_netdev(struct net_device *dev); | 889 | extern int register_netdev(struct net_device *dev); |
900 | extern void unregister_netdev(struct net_device *dev); | 890 | extern void unregister_netdev(struct net_device *dev); |
901 | /* Functions used for multicast support */ | 891 | /* Functions used for multicast support */ |
902 | extern void dev_mc_upload(struct net_device *dev); | 892 | extern void dev_mc_upload(struct net_device *dev); |
903 | extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all); | 893 | extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all); |
904 | extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly); | 894 | extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly); |
905 | extern void dev_mc_discard(struct net_device *dev); | 895 | extern void dev_mc_discard(struct net_device *dev); |
906 | extern void dev_set_promiscuity(struct net_device *dev, int inc); | 896 | extern void dev_set_promiscuity(struct net_device *dev, int inc); |
907 | extern void dev_set_allmulti(struct net_device *dev, int inc); | 897 | extern void dev_set_allmulti(struct net_device *dev, int inc); |
908 | extern void netdev_state_change(struct net_device *dev); | 898 | extern void netdev_state_change(struct net_device *dev); |
909 | extern void netdev_features_change(struct net_device *dev); | 899 | extern void netdev_features_change(struct net_device *dev); |
910 | /* Load a device via the kmod */ | 900 | /* Load a device via the kmod */ |
911 | extern void dev_load(const char *name); | 901 | extern void dev_load(const char *name); |
912 | extern void dev_mcast_init(void); | 902 | extern void dev_mcast_init(void); |
913 | extern int netdev_max_backlog; | 903 | extern int netdev_max_backlog; |
914 | extern int weight_p; | 904 | extern int weight_p; |
915 | extern int netdev_set_master(struct net_device *dev, struct net_device *master); | 905 | extern int netdev_set_master(struct net_device *dev, struct net_device *master); |
916 | extern int skb_checksum_help(struct sk_buff *skb, int inward); | 906 | extern int skb_checksum_help(struct sk_buff *skb, int inward); |
917 | /* rx skb timestamps */ | 907 | /* rx skb timestamps */ |
918 | extern void net_enable_timestamp(void); | 908 | extern void net_enable_timestamp(void); |
919 | extern void net_disable_timestamp(void); | 909 | extern void net_disable_timestamp(void); |
920 | 910 | ||
921 | #endif /* __KERNEL__ */ | 911 | #endif /* __KERNEL__ */ |
922 | 912 | ||
923 | #endif /* _LINUX_DEV_H */ | 913 | #endif /* _LINUX_DEV_H */ |
924 | 914 |
net/sched/sch_generic.c
1 | /* | 1 | /* |
2 | * net/sched/sch_generic.c Generic packet scheduler routines. | 2 | * net/sched/sch_generic.c Generic packet scheduler routines. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or | 4 | * This program is free software; you can redistribute it and/or |
5 | * modify it under the terms of the GNU General Public License | 5 | * modify it under the terms of the GNU General Public License |
6 | * as published by the Free Software Foundation; either version | 6 | * as published by the Free Software Foundation; either version |
7 | * 2 of the License, or (at your option) any later version. | 7 | * 2 of the License, or (at your option) any later version. |
8 | * | 8 | * |
9 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> | 9 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> |
10 | * Jamal Hadi Salim, <hadi@cyberus.ca> 990601 | 10 | * Jamal Hadi Salim, <hadi@cyberus.ca> 990601 |
11 | * - Ingress support | 11 | * - Ingress support |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <asm/uaccess.h> | 14 | #include <asm/uaccess.h> |
15 | #include <asm/system.h> | 15 | #include <asm/system.h> |
16 | #include <linux/bitops.h> | 16 | #include <linux/bitops.h> |
17 | #include <linux/config.h> | 17 | #include <linux/config.h> |
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/types.h> | 19 | #include <linux/types.h> |
20 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
21 | #include <linux/sched.h> | 21 | #include <linux/sched.h> |
22 | #include <linux/string.h> | 22 | #include <linux/string.h> |
23 | #include <linux/mm.h> | 23 | #include <linux/mm.h> |
24 | #include <linux/socket.h> | 24 | #include <linux/socket.h> |
25 | #include <linux/sockios.h> | 25 | #include <linux/sockios.h> |
26 | #include <linux/in.h> | 26 | #include <linux/in.h> |
27 | #include <linux/errno.h> | 27 | #include <linux/errno.h> |
28 | #include <linux/interrupt.h> | 28 | #include <linux/interrupt.h> |
29 | #include <linux/netdevice.h> | 29 | #include <linux/netdevice.h> |
30 | #include <linux/skbuff.h> | 30 | #include <linux/skbuff.h> |
31 | #include <linux/rtnetlink.h> | 31 | #include <linux/rtnetlink.h> |
32 | #include <linux/init.h> | 32 | #include <linux/init.h> |
33 | #include <linux/rcupdate.h> | 33 | #include <linux/rcupdate.h> |
34 | #include <linux/list.h> | 34 | #include <linux/list.h> |
35 | #include <net/sock.h> | 35 | #include <net/sock.h> |
36 | #include <net/pkt_sched.h> | 36 | #include <net/pkt_sched.h> |
37 | 37 | ||
38 | /* Main transmission queue. */ | 38 | /* Main transmission queue. */ |
39 | 39 | ||
40 | /* Main qdisc structure lock. | 40 | /* Main qdisc structure lock. |
41 | 41 | ||
42 | However, modifications | 42 | However, modifications |
43 | to data, participating in scheduling must be additionally | 43 | to data, participating in scheduling must be additionally |
44 | protected with dev->queue_lock spinlock. | 44 | protected with dev->queue_lock spinlock. |
45 | 45 | ||
46 | The idea is the following: | 46 | The idea is the following: |
47 | - enqueue, dequeue are serialized via top level device | 47 | - enqueue, dequeue are serialized via top level device |
48 | spinlock dev->queue_lock. | 48 | spinlock dev->queue_lock. |
49 | - tree walking is protected by read_lock_bh(qdisc_tree_lock) | 49 | - tree walking is protected by read_lock_bh(qdisc_tree_lock) |
50 | and this lock is used only in process context. | 50 | and this lock is used only in process context. |
51 | - updates to tree are made under rtnl semaphore or | 51 | - updates to tree are made under rtnl semaphore or |
52 | from softirq context (__qdisc_destroy rcu-callback) | 52 | from softirq context (__qdisc_destroy rcu-callback) |
53 | hence this lock needs local bh disabling. | 53 | hence this lock needs local bh disabling. |
54 | 54 | ||
55 | qdisc_tree_lock must be grabbed BEFORE dev->queue_lock! | 55 | qdisc_tree_lock must be grabbed BEFORE dev->queue_lock! |
56 | */ | 56 | */ |
57 | DEFINE_RWLOCK(qdisc_tree_lock); | 57 | DEFINE_RWLOCK(qdisc_tree_lock); |
58 | 58 | ||
59 | void qdisc_lock_tree(struct net_device *dev) | 59 | void qdisc_lock_tree(struct net_device *dev) |
60 | { | 60 | { |
61 | write_lock_bh(&qdisc_tree_lock); | 61 | write_lock_bh(&qdisc_tree_lock); |
62 | spin_lock_bh(&dev->queue_lock); | 62 | spin_lock_bh(&dev->queue_lock); |
63 | } | 63 | } |
64 | 64 | ||
65 | void qdisc_unlock_tree(struct net_device *dev) | 65 | void qdisc_unlock_tree(struct net_device *dev) |
66 | { | 66 | { |
67 | spin_unlock_bh(&dev->queue_lock); | 67 | spin_unlock_bh(&dev->queue_lock); |
68 | write_unlock_bh(&qdisc_tree_lock); | 68 | write_unlock_bh(&qdisc_tree_lock); |
69 | } | 69 | } |
70 | 70 | ||
71 | /* | 71 | /* |
72 | dev->queue_lock serializes queue accesses for this device | 72 | dev->queue_lock serializes queue accesses for this device |
73 | AND dev->qdisc pointer itself. | 73 | AND dev->qdisc pointer itself. |
74 | 74 | ||
75 | dev->xmit_lock serializes accesses to device driver. | 75 | dev->xmit_lock serializes accesses to device driver. |
76 | 76 | ||
77 | dev->queue_lock and dev->xmit_lock are mutually exclusive, | 77 | dev->queue_lock and dev->xmit_lock are mutually exclusive, |
78 | if one is grabbed, another must be free. | 78 | if one is grabbed, another must be free. |
79 | */ | 79 | */ |
80 | 80 | ||
81 | 81 | ||
82 | /* Kick device. | 82 | /* Kick device. |
83 | Note, that this procedure can be called by a watchdog timer, so that | 83 | Note, that this procedure can be called by a watchdog timer, so that |
84 | we do not check dev->tbusy flag here. | 84 | we do not check dev->tbusy flag here. |
85 | 85 | ||
86 | Returns: 0 - queue is empty. | 86 | Returns: 0 - queue is empty. |
87 | >0 - queue is not empty, but throttled. | 87 | >0 - queue is not empty, but throttled. |
88 | <0 - queue is not empty. Device is throttled, if dev->tbusy != 0. | 88 | <0 - queue is not empty. Device is throttled, if dev->tbusy != 0. |
89 | 89 | ||
90 | NOTE: Called under dev->queue_lock with locally disabled BH. | 90 | NOTE: Called under dev->queue_lock with locally disabled BH. |
91 | */ | 91 | */ |
92 | 92 | ||
93 | int qdisc_restart(struct net_device *dev) | 93 | int qdisc_restart(struct net_device *dev) |
94 | { | 94 | { |
95 | struct Qdisc *q = dev->qdisc; | 95 | struct Qdisc *q = dev->qdisc; |
96 | struct sk_buff *skb; | 96 | struct sk_buff *skb; |
97 | 97 | ||
98 | /* Dequeue packet */ | 98 | /* Dequeue packet */ |
99 | if ((skb = q->dequeue(q)) != NULL) { | 99 | if ((skb = q->dequeue(q)) != NULL) { |
100 | unsigned nolock = (dev->features & NETIF_F_LLTX); | 100 | unsigned nolock = (dev->features & NETIF_F_LLTX); |
101 | /* | 101 | /* |
102 | * When the driver has LLTX set it does its own locking | 102 | * When the driver has LLTX set it does its own locking |
103 | * in start_xmit. No need to add additional overhead by | 103 | * in start_xmit. No need to add additional overhead by |
104 | * locking again. These checks are worth it because | 104 | * locking again. These checks are worth it because |
105 | * even uncongested locks can be quite expensive. | 105 | * even uncongested locks can be quite expensive. |
106 | * The driver can do trylock like here too, in case | 106 | * The driver can do trylock like here too, in case |
107 | * of lock congestion it should return -1 and the packet | 107 | * of lock congestion it should return -1 and the packet |
108 | * will be requeued. | 108 | * will be requeued. |
109 | */ | 109 | */ |
110 | if (!nolock) { | 110 | if (!nolock) { |
111 | if (!spin_trylock(&dev->xmit_lock)) { | 111 | if (!spin_trylock(&dev->xmit_lock)) { |
112 | collision: | 112 | collision: |
113 | /* So, someone grabbed the driver. */ | 113 | /* So, someone grabbed the driver. */ |
114 | 114 | ||
115 | /* It may be transient configuration error, | 115 | /* It may be transient configuration error, |
116 | when hard_start_xmit() recurses. We detect | 116 | when hard_start_xmit() recurses. We detect |
117 | it by checking xmit owner and drop the | 117 | it by checking xmit owner and drop the |
118 | packet when deadloop is detected. | 118 | packet when deadloop is detected. |
119 | */ | 119 | */ |
120 | if (dev->xmit_lock_owner == smp_processor_id()) { | 120 | if (dev->xmit_lock_owner == smp_processor_id()) { |
121 | kfree_skb(skb); | 121 | kfree_skb(skb); |
122 | if (net_ratelimit()) | 122 | if (net_ratelimit()) |
123 | printk(KERN_DEBUG "Dead loop on netdevice %s, fix it urgently!\n", dev->name); | 123 | printk(KERN_DEBUG "Dead loop on netdevice %s, fix it urgently!\n", dev->name); |
124 | return -1; | 124 | return -1; |
125 | } | 125 | } |
126 | __get_cpu_var(netdev_rx_stat).cpu_collision++; | 126 | __get_cpu_var(netdev_rx_stat).cpu_collision++; |
127 | goto requeue; | 127 | goto requeue; |
128 | } | 128 | } |
129 | /* Remember that the driver is grabbed by us. */ | 129 | /* Remember that the driver is grabbed by us. */ |
130 | dev->xmit_lock_owner = smp_processor_id(); | 130 | dev->xmit_lock_owner = smp_processor_id(); |
131 | } | 131 | } |
132 | 132 | ||
133 | { | 133 | { |
134 | /* And release queue */ | 134 | /* And release queue */ |
135 | spin_unlock(&dev->queue_lock); | 135 | spin_unlock(&dev->queue_lock); |
136 | 136 | ||
137 | if (!netif_queue_stopped(dev)) { | 137 | if (!netif_queue_stopped(dev)) { |
138 | int ret; | 138 | int ret; |
139 | if (netdev_nit) | 139 | if (netdev_nit) |
140 | dev_queue_xmit_nit(skb, dev); | 140 | dev_queue_xmit_nit(skb, dev); |
141 | 141 | ||
142 | ret = dev->hard_start_xmit(skb, dev); | 142 | ret = dev->hard_start_xmit(skb, dev); |
143 | if (ret == NETDEV_TX_OK) { | 143 | if (ret == NETDEV_TX_OK) { |
144 | if (!nolock) { | 144 | if (!nolock) { |
145 | dev->xmit_lock_owner = -1; | 145 | dev->xmit_lock_owner = -1; |
146 | spin_unlock(&dev->xmit_lock); | 146 | spin_unlock(&dev->xmit_lock); |
147 | } | 147 | } |
148 | spin_lock(&dev->queue_lock); | 148 | spin_lock(&dev->queue_lock); |
149 | return -1; | 149 | return -1; |
150 | } | 150 | } |
151 | if (ret == NETDEV_TX_LOCKED && nolock) { | 151 | if (ret == NETDEV_TX_LOCKED && nolock) { |
152 | spin_lock(&dev->queue_lock); | 152 | spin_lock(&dev->queue_lock); |
153 | goto collision; | 153 | goto collision; |
154 | } | 154 | } |
155 | } | 155 | } |
156 | 156 | ||
157 | /* NETDEV_TX_BUSY - we need to requeue */ | 157 | /* NETDEV_TX_BUSY - we need to requeue */ |
158 | /* Release the driver */ | 158 | /* Release the driver */ |
159 | if (!nolock) { | 159 | if (!nolock) { |
160 | dev->xmit_lock_owner = -1; | 160 | dev->xmit_lock_owner = -1; |
161 | spin_unlock(&dev->xmit_lock); | 161 | spin_unlock(&dev->xmit_lock); |
162 | } | 162 | } |
163 | spin_lock(&dev->queue_lock); | 163 | spin_lock(&dev->queue_lock); |
164 | q = dev->qdisc; | 164 | q = dev->qdisc; |
165 | } | 165 | } |
166 | 166 | ||
167 | /* Device kicked us out :( | 167 | /* Device kicked us out :( |
168 | This is possible in three cases: | 168 | This is possible in three cases: |
169 | 169 | ||
170 | 0. driver is locked | 170 | 0. driver is locked |
171 | 1. fastroute is enabled | 171 | 1. fastroute is enabled |
172 | 2. device cannot determine busy state | 172 | 2. device cannot determine busy state |
173 | before start of transmission (f.e. dialout) | 173 | before start of transmission (f.e. dialout) |
174 | 3. device is buggy (ppp) | 174 | 3. device is buggy (ppp) |
175 | */ | 175 | */ |
176 | 176 | ||
177 | requeue: | 177 | requeue: |
178 | q->ops->requeue(skb, q); | 178 | q->ops->requeue(skb, q); |
179 | netif_schedule(dev); | 179 | netif_schedule(dev); |
180 | return 1; | 180 | return 1; |
181 | } | 181 | } |
182 | BUG_ON((int) q->q.qlen < 0); | 182 | BUG_ON((int) q->q.qlen < 0); |
183 | return q->q.qlen; | 183 | return q->q.qlen; |
184 | } | 184 | } |
185 | 185 | ||
186 | static void dev_watchdog(unsigned long arg) | 186 | static void dev_watchdog(unsigned long arg) |
187 | { | 187 | { |
188 | struct net_device *dev = (struct net_device *)arg; | 188 | struct net_device *dev = (struct net_device *)arg; |
189 | 189 | ||
190 | spin_lock(&dev->xmit_lock); | 190 | spin_lock(&dev->xmit_lock); |
191 | if (dev->qdisc != &noop_qdisc) { | 191 | if (dev->qdisc != &noop_qdisc) { |
192 | if (netif_device_present(dev) && | 192 | if (netif_device_present(dev) && |
193 | netif_running(dev) && | 193 | netif_running(dev) && |
194 | netif_carrier_ok(dev)) { | 194 | netif_carrier_ok(dev)) { |
195 | if (netif_queue_stopped(dev) && | 195 | if (netif_queue_stopped(dev) && |
196 | (jiffies - dev->trans_start) > dev->watchdog_timeo) { | 196 | (jiffies - dev->trans_start) > dev->watchdog_timeo) { |
197 | printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n", dev->name); | 197 | printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n", dev->name); |
198 | dev->tx_timeout(dev); | 198 | dev->tx_timeout(dev); |
199 | } | 199 | } |
200 | if (!mod_timer(&dev->watchdog_timer, jiffies + dev->watchdog_timeo)) | 200 | if (!mod_timer(&dev->watchdog_timer, jiffies + dev->watchdog_timeo)) |
201 | dev_hold(dev); | 201 | dev_hold(dev); |
202 | } | 202 | } |
203 | } | 203 | } |
204 | spin_unlock(&dev->xmit_lock); | 204 | spin_unlock(&dev->xmit_lock); |
205 | 205 | ||
206 | dev_put(dev); | 206 | dev_put(dev); |
207 | } | 207 | } |
208 | 208 | ||
209 | static void dev_watchdog_init(struct net_device *dev) | 209 | static void dev_watchdog_init(struct net_device *dev) |
210 | { | 210 | { |
211 | init_timer(&dev->watchdog_timer); | 211 | init_timer(&dev->watchdog_timer); |
212 | dev->watchdog_timer.data = (unsigned long)dev; | 212 | dev->watchdog_timer.data = (unsigned long)dev; |
213 | dev->watchdog_timer.function = dev_watchdog; | 213 | dev->watchdog_timer.function = dev_watchdog; |
214 | } | 214 | } |
215 | 215 | ||
216 | void __netdev_watchdog_up(struct net_device *dev) | 216 | void __netdev_watchdog_up(struct net_device *dev) |
217 | { | 217 | { |
218 | if (dev->tx_timeout) { | 218 | if (dev->tx_timeout) { |
219 | if (dev->watchdog_timeo <= 0) | 219 | if (dev->watchdog_timeo <= 0) |
220 | dev->watchdog_timeo = 5*HZ; | 220 | dev->watchdog_timeo = 5*HZ; |
221 | if (!mod_timer(&dev->watchdog_timer, jiffies + dev->watchdog_timeo)) | 221 | if (!mod_timer(&dev->watchdog_timer, jiffies + dev->watchdog_timeo)) |
222 | dev_hold(dev); | 222 | dev_hold(dev); |
223 | } | 223 | } |
224 | } | 224 | } |
225 | 225 | ||
226 | static void dev_watchdog_up(struct net_device *dev) | 226 | static void dev_watchdog_up(struct net_device *dev) |
227 | { | 227 | { |
228 | spin_lock_bh(&dev->xmit_lock); | 228 | spin_lock_bh(&dev->xmit_lock); |
229 | __netdev_watchdog_up(dev); | 229 | __netdev_watchdog_up(dev); |
230 | spin_unlock_bh(&dev->xmit_lock); | 230 | spin_unlock_bh(&dev->xmit_lock); |
231 | } | 231 | } |
232 | 232 | ||
233 | static void dev_watchdog_down(struct net_device *dev) | 233 | static void dev_watchdog_down(struct net_device *dev) |
234 | { | 234 | { |
235 | spin_lock_bh(&dev->xmit_lock); | 235 | spin_lock_bh(&dev->xmit_lock); |
236 | if (del_timer(&dev->watchdog_timer)) | 236 | if (del_timer(&dev->watchdog_timer)) |
237 | __dev_put(dev); | 237 | __dev_put(dev); |
238 | spin_unlock_bh(&dev->xmit_lock); | 238 | spin_unlock_bh(&dev->xmit_lock); |
239 | } | 239 | } |
240 | 240 | ||
241 | void netif_carrier_on(struct net_device *dev) | ||
242 | { | ||
243 | if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) | ||
244 | linkwatch_fire_event(dev); | ||
245 | if (netif_running(dev)) | ||
246 | __netdev_watchdog_up(dev); | ||
247 | } | ||
248 | |||
249 | void netif_carrier_off(struct net_device *dev) | ||
250 | { | ||
251 | if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) | ||
252 | linkwatch_fire_event(dev); | ||
253 | } | ||
254 | |||
241 | /* "NOOP" scheduler: the best scheduler, recommended for all interfaces | 255 | /* "NOOP" scheduler: the best scheduler, recommended for all interfaces |
242 | under all circumstances. It is difficult to invent anything faster or | 256 | under all circumstances. It is difficult to invent anything faster or |
243 | cheaper. | 257 | cheaper. |
244 | */ | 258 | */ |
245 | 259 | ||
246 | static int noop_enqueue(struct sk_buff *skb, struct Qdisc * qdisc) | 260 | static int noop_enqueue(struct sk_buff *skb, struct Qdisc * qdisc) |
247 | { | 261 | { |
248 | kfree_skb(skb); | 262 | kfree_skb(skb); |
249 | return NET_XMIT_CN; | 263 | return NET_XMIT_CN; |
250 | } | 264 | } |
251 | 265 | ||
252 | static struct sk_buff *noop_dequeue(struct Qdisc * qdisc) | 266 | static struct sk_buff *noop_dequeue(struct Qdisc * qdisc) |
253 | { | 267 | { |
254 | return NULL; | 268 | return NULL; |
255 | } | 269 | } |
256 | 270 | ||
257 | static int noop_requeue(struct sk_buff *skb, struct Qdisc* qdisc) | 271 | static int noop_requeue(struct sk_buff *skb, struct Qdisc* qdisc) |
258 | { | 272 | { |
259 | if (net_ratelimit()) | 273 | if (net_ratelimit()) |
260 | printk(KERN_DEBUG "%s deferred output. It is buggy.\n", | 274 | printk(KERN_DEBUG "%s deferred output. It is buggy.\n", |
261 | skb->dev->name); | 275 | skb->dev->name); |
262 | kfree_skb(skb); | 276 | kfree_skb(skb); |
263 | return NET_XMIT_CN; | 277 | return NET_XMIT_CN; |
264 | } | 278 | } |
265 | 279 | ||
266 | struct Qdisc_ops noop_qdisc_ops = { | 280 | struct Qdisc_ops noop_qdisc_ops = { |
267 | .id = "noop", | 281 | .id = "noop", |
268 | .priv_size = 0, | 282 | .priv_size = 0, |
269 | .enqueue = noop_enqueue, | 283 | .enqueue = noop_enqueue, |
270 | .dequeue = noop_dequeue, | 284 | .dequeue = noop_dequeue, |
271 | .requeue = noop_requeue, | 285 | .requeue = noop_requeue, |
272 | .owner = THIS_MODULE, | 286 | .owner = THIS_MODULE, |
273 | }; | 287 | }; |
274 | 288 | ||
275 | struct Qdisc noop_qdisc = { | 289 | struct Qdisc noop_qdisc = { |
276 | .enqueue = noop_enqueue, | 290 | .enqueue = noop_enqueue, |
277 | .dequeue = noop_dequeue, | 291 | .dequeue = noop_dequeue, |
278 | .flags = TCQ_F_BUILTIN, | 292 | .flags = TCQ_F_BUILTIN, |
279 | .ops = &noop_qdisc_ops, | 293 | .ops = &noop_qdisc_ops, |
280 | .list = LIST_HEAD_INIT(noop_qdisc.list), | 294 | .list = LIST_HEAD_INIT(noop_qdisc.list), |
281 | }; | 295 | }; |
282 | 296 | ||
283 | static struct Qdisc_ops noqueue_qdisc_ops = { | 297 | static struct Qdisc_ops noqueue_qdisc_ops = { |
284 | .id = "noqueue", | 298 | .id = "noqueue", |
285 | .priv_size = 0, | 299 | .priv_size = 0, |
286 | .enqueue = noop_enqueue, | 300 | .enqueue = noop_enqueue, |
287 | .dequeue = noop_dequeue, | 301 | .dequeue = noop_dequeue, |
288 | .requeue = noop_requeue, | 302 | .requeue = noop_requeue, |
289 | .owner = THIS_MODULE, | 303 | .owner = THIS_MODULE, |
290 | }; | 304 | }; |
291 | 305 | ||
292 | static struct Qdisc noqueue_qdisc = { | 306 | static struct Qdisc noqueue_qdisc = { |
293 | .enqueue = NULL, | 307 | .enqueue = NULL, |
294 | .dequeue = noop_dequeue, | 308 | .dequeue = noop_dequeue, |
295 | .flags = TCQ_F_BUILTIN, | 309 | .flags = TCQ_F_BUILTIN, |
296 | .ops = &noqueue_qdisc_ops, | 310 | .ops = &noqueue_qdisc_ops, |
297 | .list = LIST_HEAD_INIT(noqueue_qdisc.list), | 311 | .list = LIST_HEAD_INIT(noqueue_qdisc.list), |
298 | }; | 312 | }; |
299 | 313 | ||
300 | 314 | ||
301 | static const u8 prio2band[TC_PRIO_MAX+1] = | 315 | static const u8 prio2band[TC_PRIO_MAX+1] = |
302 | { 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 }; | 316 | { 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 }; |
303 | 317 | ||
304 | /* 3-band FIFO queue: old style, but should be a bit faster than | 318 | /* 3-band FIFO queue: old style, but should be a bit faster than |
305 | generic prio+fifo combination. | 319 | generic prio+fifo combination. |
306 | */ | 320 | */ |
307 | 321 | ||
308 | #define PFIFO_FAST_BANDS 3 | 322 | #define PFIFO_FAST_BANDS 3 |
309 | 323 | ||
310 | static inline struct sk_buff_head *prio2list(struct sk_buff *skb, | 324 | static inline struct sk_buff_head *prio2list(struct sk_buff *skb, |
311 | struct Qdisc *qdisc) | 325 | struct Qdisc *qdisc) |
312 | { | 326 | { |
313 | struct sk_buff_head *list = qdisc_priv(qdisc); | 327 | struct sk_buff_head *list = qdisc_priv(qdisc); |
314 | return list + prio2band[skb->priority & TC_PRIO_MAX]; | 328 | return list + prio2band[skb->priority & TC_PRIO_MAX]; |
315 | } | 329 | } |
316 | 330 | ||
317 | static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) | 331 | static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) |
318 | { | 332 | { |
319 | struct sk_buff_head *list = prio2list(skb, qdisc); | 333 | struct sk_buff_head *list = prio2list(skb, qdisc); |
320 | 334 | ||
321 | if (skb_queue_len(list) < qdisc->dev->tx_queue_len) { | 335 | if (skb_queue_len(list) < qdisc->dev->tx_queue_len) { |
322 | qdisc->q.qlen++; | 336 | qdisc->q.qlen++; |
323 | return __qdisc_enqueue_tail(skb, qdisc, list); | 337 | return __qdisc_enqueue_tail(skb, qdisc, list); |
324 | } | 338 | } |
325 | 339 | ||
326 | return qdisc_drop(skb, qdisc); | 340 | return qdisc_drop(skb, qdisc); |
327 | } | 341 | } |
328 | 342 | ||
329 | static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc) | 343 | static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc) |
330 | { | 344 | { |
331 | int prio; | 345 | int prio; |
332 | struct sk_buff_head *list = qdisc_priv(qdisc); | 346 | struct sk_buff_head *list = qdisc_priv(qdisc); |
333 | 347 | ||
334 | for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { | 348 | for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { |
335 | if (!skb_queue_empty(list + prio)) { | 349 | if (!skb_queue_empty(list + prio)) { |
336 | qdisc->q.qlen--; | 350 | qdisc->q.qlen--; |
337 | return __qdisc_dequeue_head(qdisc, list + prio); | 351 | return __qdisc_dequeue_head(qdisc, list + prio); |
338 | } | 352 | } |
339 | } | 353 | } |
340 | 354 | ||
341 | return NULL; | 355 | return NULL; |
342 | } | 356 | } |
343 | 357 | ||
344 | static int pfifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc) | 358 | static int pfifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc) |
345 | { | 359 | { |
346 | qdisc->q.qlen++; | 360 | qdisc->q.qlen++; |
347 | return __qdisc_requeue(skb, qdisc, prio2list(skb, qdisc)); | 361 | return __qdisc_requeue(skb, qdisc, prio2list(skb, qdisc)); |
348 | } | 362 | } |
349 | 363 | ||
350 | static void pfifo_fast_reset(struct Qdisc* qdisc) | 364 | static void pfifo_fast_reset(struct Qdisc* qdisc) |
351 | { | 365 | { |
352 | int prio; | 366 | int prio; |
353 | struct sk_buff_head *list = qdisc_priv(qdisc); | 367 | struct sk_buff_head *list = qdisc_priv(qdisc); |
354 | 368 | ||
355 | for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) | 369 | for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) |
356 | __qdisc_reset_queue(qdisc, list + prio); | 370 | __qdisc_reset_queue(qdisc, list + prio); |
357 | 371 | ||
358 | qdisc->qstats.backlog = 0; | 372 | qdisc->qstats.backlog = 0; |
359 | qdisc->q.qlen = 0; | 373 | qdisc->q.qlen = 0; |
360 | } | 374 | } |
361 | 375 | ||
362 | static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb) | 376 | static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb) |
363 | { | 377 | { |
364 | struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS }; | 378 | struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS }; |
365 | 379 | ||
366 | memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1); | 380 | memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1); |
367 | RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); | 381 | RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); |
368 | return skb->len; | 382 | return skb->len; |
369 | 383 | ||
370 | rtattr_failure: | 384 | rtattr_failure: |
371 | return -1; | 385 | return -1; |
372 | } | 386 | } |
373 | 387 | ||
374 | static int pfifo_fast_init(struct Qdisc *qdisc, struct rtattr *opt) | 388 | static int pfifo_fast_init(struct Qdisc *qdisc, struct rtattr *opt) |
375 | { | 389 | { |
376 | int prio; | 390 | int prio; |
377 | struct sk_buff_head *list = qdisc_priv(qdisc); | 391 | struct sk_buff_head *list = qdisc_priv(qdisc); |
378 | 392 | ||
379 | for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) | 393 | for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) |
380 | skb_queue_head_init(list + prio); | 394 | skb_queue_head_init(list + prio); |
381 | 395 | ||
382 | return 0; | 396 | return 0; |
383 | } | 397 | } |
384 | 398 | ||
385 | static struct Qdisc_ops pfifo_fast_ops = { | 399 | static struct Qdisc_ops pfifo_fast_ops = { |
386 | .id = "pfifo_fast", | 400 | .id = "pfifo_fast", |
387 | .priv_size = PFIFO_FAST_BANDS * sizeof(struct sk_buff_head), | 401 | .priv_size = PFIFO_FAST_BANDS * sizeof(struct sk_buff_head), |
388 | .enqueue = pfifo_fast_enqueue, | 402 | .enqueue = pfifo_fast_enqueue, |
389 | .dequeue = pfifo_fast_dequeue, | 403 | .dequeue = pfifo_fast_dequeue, |
390 | .requeue = pfifo_fast_requeue, | 404 | .requeue = pfifo_fast_requeue, |
391 | .init = pfifo_fast_init, | 405 | .init = pfifo_fast_init, |
392 | .reset = pfifo_fast_reset, | 406 | .reset = pfifo_fast_reset, |
393 | .dump = pfifo_fast_dump, | 407 | .dump = pfifo_fast_dump, |
394 | .owner = THIS_MODULE, | 408 | .owner = THIS_MODULE, |
395 | }; | 409 | }; |
396 | 410 | ||
397 | struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops) | 411 | struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops) |
398 | { | 412 | { |
399 | void *p; | 413 | void *p; |
400 | struct Qdisc *sch; | 414 | struct Qdisc *sch; |
401 | unsigned int size; | 415 | unsigned int size; |
402 | int err = -ENOBUFS; | 416 | int err = -ENOBUFS; |
403 | 417 | ||
404 | /* ensure that the Qdisc and the private data are 32-byte aligned */ | 418 | /* ensure that the Qdisc and the private data are 32-byte aligned */ |
405 | size = QDISC_ALIGN(sizeof(*sch)); | 419 | size = QDISC_ALIGN(sizeof(*sch)); |
406 | size += ops->priv_size + (QDISC_ALIGNTO - 1); | 420 | size += ops->priv_size + (QDISC_ALIGNTO - 1); |
407 | 421 | ||
408 | p = kmalloc(size, GFP_KERNEL); | 422 | p = kmalloc(size, GFP_KERNEL); |
409 | if (!p) | 423 | if (!p) |
410 | goto errout; | 424 | goto errout; |
411 | memset(p, 0, size); | 425 | memset(p, 0, size); |
412 | sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); | 426 | sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); |
413 | sch->padded = (char *) sch - (char *) p; | 427 | sch->padded = (char *) sch - (char *) p; |
414 | 428 | ||
415 | INIT_LIST_HEAD(&sch->list); | 429 | INIT_LIST_HEAD(&sch->list); |
416 | skb_queue_head_init(&sch->q); | 430 | skb_queue_head_init(&sch->q); |
417 | sch->ops = ops; | 431 | sch->ops = ops; |
418 | sch->enqueue = ops->enqueue; | 432 | sch->enqueue = ops->enqueue; |
419 | sch->dequeue = ops->dequeue; | 433 | sch->dequeue = ops->dequeue; |
420 | sch->dev = dev; | 434 | sch->dev = dev; |
421 | dev_hold(dev); | 435 | dev_hold(dev); |
422 | sch->stats_lock = &dev->queue_lock; | 436 | sch->stats_lock = &dev->queue_lock; |
423 | atomic_set(&sch->refcnt, 1); | 437 | atomic_set(&sch->refcnt, 1); |
424 | 438 | ||
425 | return sch; | 439 | return sch; |
426 | errout: | 440 | errout: |
427 | return ERR_PTR(-err); | 441 | return ERR_PTR(-err); |
428 | } | 442 | } |
429 | 443 | ||
430 | struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops) | 444 | struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops) |
431 | { | 445 | { |
432 | struct Qdisc *sch; | 446 | struct Qdisc *sch; |
433 | 447 | ||
434 | sch = qdisc_alloc(dev, ops); | 448 | sch = qdisc_alloc(dev, ops); |
435 | if (IS_ERR(sch)) | 449 | if (IS_ERR(sch)) |
436 | goto errout; | 450 | goto errout; |
437 | 451 | ||
438 | if (!ops->init || ops->init(sch, NULL) == 0) | 452 | if (!ops->init || ops->init(sch, NULL) == 0) |
439 | return sch; | 453 | return sch; |
440 | 454 | ||
441 | qdisc_destroy(sch); | 455 | qdisc_destroy(sch); |
442 | errout: | 456 | errout: |
443 | return NULL; | 457 | return NULL; |
444 | } | 458 | } |
445 | 459 | ||
446 | /* Under dev->queue_lock and BH! */ | 460 | /* Under dev->queue_lock and BH! */ |
447 | 461 | ||
448 | void qdisc_reset(struct Qdisc *qdisc) | 462 | void qdisc_reset(struct Qdisc *qdisc) |
449 | { | 463 | { |
450 | struct Qdisc_ops *ops = qdisc->ops; | 464 | struct Qdisc_ops *ops = qdisc->ops; |
451 | 465 | ||
452 | if (ops->reset) | 466 | if (ops->reset) |
453 | ops->reset(qdisc); | 467 | ops->reset(qdisc); |
454 | } | 468 | } |
455 | 469 | ||
456 | /* this is the rcu callback function to clean up a qdisc when there | 470 | /* this is the rcu callback function to clean up a qdisc when there |
457 | * are no further references to it */ | 471 | * are no further references to it */ |
458 | 472 | ||
459 | static void __qdisc_destroy(struct rcu_head *head) | 473 | static void __qdisc_destroy(struct rcu_head *head) |
460 | { | 474 | { |
461 | struct Qdisc *qdisc = container_of(head, struct Qdisc, q_rcu); | 475 | struct Qdisc *qdisc = container_of(head, struct Qdisc, q_rcu); |
462 | struct Qdisc_ops *ops = qdisc->ops; | 476 | struct Qdisc_ops *ops = qdisc->ops; |
463 | 477 | ||
464 | #ifdef CONFIG_NET_ESTIMATOR | 478 | #ifdef CONFIG_NET_ESTIMATOR |
465 | gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est); | 479 | gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est); |
466 | #endif | 480 | #endif |
467 | write_lock(&qdisc_tree_lock); | 481 | write_lock(&qdisc_tree_lock); |
468 | if (ops->reset) | 482 | if (ops->reset) |
469 | ops->reset(qdisc); | 483 | ops->reset(qdisc); |
470 | if (ops->destroy) | 484 | if (ops->destroy) |
471 | ops->destroy(qdisc); | 485 | ops->destroy(qdisc); |
472 | write_unlock(&qdisc_tree_lock); | 486 | write_unlock(&qdisc_tree_lock); |
473 | module_put(ops->owner); | 487 | module_put(ops->owner); |
474 | 488 | ||
475 | dev_put(qdisc->dev); | 489 | dev_put(qdisc->dev); |
476 | kfree((char *) qdisc - qdisc->padded); | 490 | kfree((char *) qdisc - qdisc->padded); |
477 | } | 491 | } |
478 | 492 | ||
479 | /* Under dev->queue_lock and BH! */ | 493 | /* Under dev->queue_lock and BH! */ |
480 | 494 | ||
481 | void qdisc_destroy(struct Qdisc *qdisc) | 495 | void qdisc_destroy(struct Qdisc *qdisc) |
482 | { | 496 | { |
483 | struct list_head cql = LIST_HEAD_INIT(cql); | 497 | struct list_head cql = LIST_HEAD_INIT(cql); |
484 | struct Qdisc *cq, *q, *n; | 498 | struct Qdisc *cq, *q, *n; |
485 | 499 | ||
486 | if (qdisc->flags & TCQ_F_BUILTIN || | 500 | if (qdisc->flags & TCQ_F_BUILTIN || |
487 | !atomic_dec_and_test(&qdisc->refcnt)) | 501 | !atomic_dec_and_test(&qdisc->refcnt)) |
488 | return; | 502 | return; |
489 | 503 | ||
490 | if (!list_empty(&qdisc->list)) { | 504 | if (!list_empty(&qdisc->list)) { |
491 | if (qdisc->ops->cl_ops == NULL) | 505 | if (qdisc->ops->cl_ops == NULL) |
492 | list_del(&qdisc->list); | 506 | list_del(&qdisc->list); |
493 | else | 507 | else |
494 | list_move(&qdisc->list, &cql); | 508 | list_move(&qdisc->list, &cql); |
495 | } | 509 | } |
496 | 510 | ||
497 | /* unlink inner qdiscs from dev->qdisc_list immediately */ | 511 | /* unlink inner qdiscs from dev->qdisc_list immediately */ |
498 | list_for_each_entry(cq, &cql, list) | 512 | list_for_each_entry(cq, &cql, list) |
499 | list_for_each_entry_safe(q, n, &qdisc->dev->qdisc_list, list) | 513 | list_for_each_entry_safe(q, n, &qdisc->dev->qdisc_list, list) |
500 | if (TC_H_MAJ(q->parent) == TC_H_MAJ(cq->handle)) { | 514 | if (TC_H_MAJ(q->parent) == TC_H_MAJ(cq->handle)) { |
501 | if (q->ops->cl_ops == NULL) | 515 | if (q->ops->cl_ops == NULL) |
502 | list_del_init(&q->list); | 516 | list_del_init(&q->list); |
503 | else | 517 | else |
504 | list_move_tail(&q->list, &cql); | 518 | list_move_tail(&q->list, &cql); |
505 | } | 519 | } |
506 | list_for_each_entry_safe(cq, n, &cql, list) | 520 | list_for_each_entry_safe(cq, n, &cql, list) |
507 | list_del_init(&cq->list); | 521 | list_del_init(&cq->list); |
508 | 522 | ||
509 | call_rcu(&qdisc->q_rcu, __qdisc_destroy); | 523 | call_rcu(&qdisc->q_rcu, __qdisc_destroy); |
510 | } | 524 | } |
511 | 525 | ||
512 | void dev_activate(struct net_device *dev) | 526 | void dev_activate(struct net_device *dev) |
513 | { | 527 | { |
514 | /* No queueing discipline is attached to device; | 528 | /* No queueing discipline is attached to device; |
515 | create default one i.e. pfifo_fast for devices, | 529 | create default one i.e. pfifo_fast for devices, |
516 | which need queueing and noqueue_qdisc for | 530 | which need queueing and noqueue_qdisc for |
517 | virtual interfaces | 531 | virtual interfaces |
518 | */ | 532 | */ |
519 | 533 | ||
520 | if (dev->qdisc_sleeping == &noop_qdisc) { | 534 | if (dev->qdisc_sleeping == &noop_qdisc) { |
521 | struct Qdisc *qdisc; | 535 | struct Qdisc *qdisc; |
522 | if (dev->tx_queue_len) { | 536 | if (dev->tx_queue_len) { |
523 | qdisc = qdisc_create_dflt(dev, &pfifo_fast_ops); | 537 | qdisc = qdisc_create_dflt(dev, &pfifo_fast_ops); |
524 | if (qdisc == NULL) { | 538 | if (qdisc == NULL) { |
525 | printk(KERN_INFO "%s: activation failed\n", dev->name); | 539 | printk(KERN_INFO "%s: activation failed\n", dev->name); |
526 | return; | 540 | return; |
527 | } | 541 | } |
528 | write_lock_bh(&qdisc_tree_lock); | 542 | write_lock_bh(&qdisc_tree_lock); |
529 | list_add_tail(&qdisc->list, &dev->qdisc_list); | 543 | list_add_tail(&qdisc->list, &dev->qdisc_list); |
530 | write_unlock_bh(&qdisc_tree_lock); | 544 | write_unlock_bh(&qdisc_tree_lock); |
531 | } else { | 545 | } else { |
532 | qdisc = &noqueue_qdisc; | 546 | qdisc = &noqueue_qdisc; |
533 | } | 547 | } |
534 | write_lock_bh(&qdisc_tree_lock); | 548 | write_lock_bh(&qdisc_tree_lock); |
535 | dev->qdisc_sleeping = qdisc; | 549 | dev->qdisc_sleeping = qdisc; |
536 | write_unlock_bh(&qdisc_tree_lock); | 550 | write_unlock_bh(&qdisc_tree_lock); |
537 | } | 551 | } |
538 | 552 | ||
539 | if (!netif_carrier_ok(dev)) | 553 | if (!netif_carrier_ok(dev)) |
540 | /* Delay activation until next carrier-on event */ | 554 | /* Delay activation until next carrier-on event */ |
541 | return; | 555 | return; |
542 | 556 | ||
543 | spin_lock_bh(&dev->queue_lock); | 557 | spin_lock_bh(&dev->queue_lock); |
544 | rcu_assign_pointer(dev->qdisc, dev->qdisc_sleeping); | 558 | rcu_assign_pointer(dev->qdisc, dev->qdisc_sleeping); |
545 | if (dev->qdisc != &noqueue_qdisc) { | 559 | if (dev->qdisc != &noqueue_qdisc) { |
546 | dev->trans_start = jiffies; | 560 | dev->trans_start = jiffies; |
547 | dev_watchdog_up(dev); | 561 | dev_watchdog_up(dev); |
548 | } | 562 | } |
549 | spin_unlock_bh(&dev->queue_lock); | 563 | spin_unlock_bh(&dev->queue_lock); |
550 | } | 564 | } |
551 | 565 | ||
552 | void dev_deactivate(struct net_device *dev) | 566 | void dev_deactivate(struct net_device *dev) |
553 | { | 567 | { |
554 | struct Qdisc *qdisc; | 568 | struct Qdisc *qdisc; |
555 | 569 | ||
556 | spin_lock_bh(&dev->queue_lock); | 570 | spin_lock_bh(&dev->queue_lock); |
557 | qdisc = dev->qdisc; | 571 | qdisc = dev->qdisc; |
558 | dev->qdisc = &noop_qdisc; | 572 | dev->qdisc = &noop_qdisc; |
559 | 573 | ||
560 | qdisc_reset(qdisc); | 574 | qdisc_reset(qdisc); |
561 | 575 | ||
562 | spin_unlock_bh(&dev->queue_lock); | 576 | spin_unlock_bh(&dev->queue_lock); |
563 | 577 | ||
564 | dev_watchdog_down(dev); | 578 | dev_watchdog_down(dev); |
565 | 579 | ||
566 | while (test_bit(__LINK_STATE_SCHED, &dev->state)) | 580 | while (test_bit(__LINK_STATE_SCHED, &dev->state)) |
567 | yield(); | 581 | yield(); |
568 | 582 | ||
569 | spin_unlock_wait(&dev->xmit_lock); | 583 | spin_unlock_wait(&dev->xmit_lock); |
570 | } | 584 | } |
571 | 585 | ||
572 | void dev_init_scheduler(struct net_device *dev) | 586 | void dev_init_scheduler(struct net_device *dev) |
573 | { | 587 | { |
574 | qdisc_lock_tree(dev); | 588 | qdisc_lock_tree(dev); |
575 | dev->qdisc = &noop_qdisc; | 589 | dev->qdisc = &noop_qdisc; |
576 | dev->qdisc_sleeping = &noop_qdisc; | 590 | dev->qdisc_sleeping = &noop_qdisc; |
577 | INIT_LIST_HEAD(&dev->qdisc_list); | 591 | INIT_LIST_HEAD(&dev->qdisc_list); |
578 | qdisc_unlock_tree(dev); | 592 | qdisc_unlock_tree(dev); |
579 | 593 | ||
580 | dev_watchdog_init(dev); | 594 | dev_watchdog_init(dev); |
581 | } | 595 | } |
582 | 596 | ||
583 | void dev_shutdown(struct net_device *dev) | 597 | void dev_shutdown(struct net_device *dev) |
584 | { | 598 | { |
585 | struct Qdisc *qdisc; | 599 | struct Qdisc *qdisc; |
586 | 600 | ||
587 | qdisc_lock_tree(dev); | 601 | qdisc_lock_tree(dev); |
588 | qdisc = dev->qdisc_sleeping; | 602 | qdisc = dev->qdisc_sleeping; |
589 | dev->qdisc = &noop_qdisc; | 603 | dev->qdisc = &noop_qdisc; |
590 | dev->qdisc_sleeping = &noop_qdisc; | 604 | dev->qdisc_sleeping = &noop_qdisc; |
591 | qdisc_destroy(qdisc); | 605 | qdisc_destroy(qdisc); |
592 | #if defined(CONFIG_NET_SCH_INGRESS) || defined(CONFIG_NET_SCH_INGRESS_MODULE) | 606 | #if defined(CONFIG_NET_SCH_INGRESS) || defined(CONFIG_NET_SCH_INGRESS_MODULE) |
593 | if ((qdisc = dev->qdisc_ingress) != NULL) { | 607 | if ((qdisc = dev->qdisc_ingress) != NULL) { |
594 | dev->qdisc_ingress = NULL; | 608 | dev->qdisc_ingress = NULL; |
595 | qdisc_destroy(qdisc); | 609 | qdisc_destroy(qdisc); |
596 | } | 610 | } |
597 | #endif | 611 | #endif |
598 | BUG_TRAP(!timer_pending(&dev->watchdog_timer)); | 612 | BUG_TRAP(!timer_pending(&dev->watchdog_timer)); |
599 | qdisc_unlock_tree(dev); | 613 | qdisc_unlock_tree(dev); |
600 | } | 614 | } |
601 | 615 | ||
602 | EXPORT_SYMBOL(__netdev_watchdog_up); | 616 | EXPORT_SYMBOL(__netdev_watchdog_up); |
617 | EXPORT_SYMBOL(netif_carrier_on); | ||
618 | EXPORT_SYMBOL(netif_carrier_off); | ||
603 | EXPORT_SYMBOL(noop_qdisc); | 619 | EXPORT_SYMBOL(noop_qdisc); |
604 | EXPORT_SYMBOL(noop_qdisc_ops); | 620 | EXPORT_SYMBOL(noop_qdisc_ops); |
605 | EXPORT_SYMBOL(qdisc_create_dflt); | 621 | EXPORT_SYMBOL(qdisc_create_dflt); |
606 | EXPORT_SYMBOL(qdisc_alloc); | 622 | EXPORT_SYMBOL(qdisc_alloc); |
607 | EXPORT_SYMBOL(qdisc_destroy); | 623 | EXPORT_SYMBOL(qdisc_destroy); |
608 | EXPORT_SYMBOL(qdisc_reset); | 624 | EXPORT_SYMBOL(qdisc_reset); |
609 | EXPORT_SYMBOL(qdisc_restart); | 625 | EXPORT_SYMBOL(qdisc_restart); |
610 | EXPORT_SYMBOL(qdisc_lock_tree); | 626 | EXPORT_SYMBOL(qdisc_lock_tree); |
611 | EXPORT_SYMBOL(qdisc_unlock_tree); | 627 | EXPORT_SYMBOL(qdisc_unlock_tree); |
612 | 628 |