Commit ad712087f78469a783281d0d15657edfbff69594

Authored by Patrick McHardy
Committed by David S. Miller
1 parent 2029cc2c84

[VLAN]: Update list address

VLAN related mail should go to netdev.

Signed-off-by: Patrick McHardy <kaber@trash.net>
Signed-off-by: David S. Miller <davem@davemloft.net>

Showing 2 changed files with 2 additions and 2 deletions Inline Diff

1 /* 1 /*
2 * INET 802.1Q VLAN 2 * INET 802.1Q VLAN
3 * Ethernet-type device handling. 3 * Ethernet-type device handling.
4 * 4 *
5 * Authors: Ben Greear <greearb@candelatech.com> 5 * Authors: Ben Greear <greearb@candelatech.com>
6 * Please send support related email to: vlan@scry.wanfear.com 6 * Please send support related email to: netdev@vger.kernel.org
7 * VLAN Home Page: http://www.candelatech.com/~greear/vlan.html 7 * VLAN Home Page: http://www.candelatech.com/~greear/vlan.html
8 * 8 *
9 * Fixes: 9 * Fixes:
10 * Fix for packet capture - Nick Eggleston <nick@dccinc.com>; 10 * Fix for packet capture - Nick Eggleston <nick@dccinc.com>;
11 * Add HW acceleration hooks - David S. Miller <davem@redhat.com>; 11 * Add HW acceleration hooks - David S. Miller <davem@redhat.com>;
12 * Correct all the locking - David S. Miller <davem@redhat.com>; 12 * Correct all the locking - David S. Miller <davem@redhat.com>;
13 * Use hash table for VLAN groups - David S. Miller <davem@redhat.com> 13 * Use hash table for VLAN groups - David S. Miller <davem@redhat.com>
14 * 14 *
15 * This program is free software; you can redistribute it and/or 15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License 16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version 17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version. 18 * 2 of the License, or (at your option) any later version.
19 */ 19 */
20 20
21 #include <asm/uaccess.h> /* for copy_from_user */ 21 #include <asm/uaccess.h> /* for copy_from_user */
22 #include <linux/capability.h> 22 #include <linux/capability.h>
23 #include <linux/module.h> 23 #include <linux/module.h>
24 #include <linux/netdevice.h> 24 #include <linux/netdevice.h>
25 #include <linux/skbuff.h> 25 #include <linux/skbuff.h>
26 #include <net/datalink.h> 26 #include <net/datalink.h>
27 #include <linux/mm.h> 27 #include <linux/mm.h>
28 #include <linux/in.h> 28 #include <linux/in.h>
29 #include <linux/init.h> 29 #include <linux/init.h>
30 #include <net/p8022.h> 30 #include <net/p8022.h>
31 #include <net/arp.h> 31 #include <net/arp.h>
32 #include <linux/rtnetlink.h> 32 #include <linux/rtnetlink.h>
33 #include <linux/notifier.h> 33 #include <linux/notifier.h>
34 #include <net/net_namespace.h> 34 #include <net/net_namespace.h>
35 35
36 #include <linux/if_vlan.h> 36 #include <linux/if_vlan.h>
37 #include "vlan.h" 37 #include "vlan.h"
38 #include "vlanproc.h" 38 #include "vlanproc.h"
39 39
40 #define DRV_VERSION "1.8" 40 #define DRV_VERSION "1.8"
41 41
42 /* Global VLAN variables */ 42 /* Global VLAN variables */
43 43
44 /* Our listing of VLAN group(s) */ 44 /* Our listing of VLAN group(s) */
45 static struct hlist_head vlan_group_hash[VLAN_GRP_HASH_SIZE]; 45 static struct hlist_head vlan_group_hash[VLAN_GRP_HASH_SIZE];
46 46
47 static char vlan_fullname[] = "802.1Q VLAN Support"; 47 static char vlan_fullname[] = "802.1Q VLAN Support";
48 static char vlan_version[] = DRV_VERSION; 48 static char vlan_version[] = DRV_VERSION;
49 static char vlan_copyright[] = "Ben Greear <greearb@candelatech.com>"; 49 static char vlan_copyright[] = "Ben Greear <greearb@candelatech.com>";
50 static char vlan_buggyright[] = "David S. Miller <davem@redhat.com>"; 50 static char vlan_buggyright[] = "David S. Miller <davem@redhat.com>";
51 51
52 /* Determines interface naming scheme. */ 52 /* Determines interface naming scheme. */
53 unsigned short vlan_name_type = VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD; 53 unsigned short vlan_name_type = VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD;
54 54
55 static struct packet_type vlan_packet_type = { 55 static struct packet_type vlan_packet_type = {
56 .type = __constant_htons(ETH_P_8021Q), 56 .type = __constant_htons(ETH_P_8021Q),
57 .func = vlan_skb_recv, /* VLAN receive method */ 57 .func = vlan_skb_recv, /* VLAN receive method */
58 }; 58 };
59 59
60 /* End of global variables definitions. */ 60 /* End of global variables definitions. */
61 61
62 static inline unsigned int vlan_grp_hashfn(unsigned int idx) 62 static inline unsigned int vlan_grp_hashfn(unsigned int idx)
63 { 63 {
64 return ((idx >> VLAN_GRP_HASH_SHIFT) ^ idx) & VLAN_GRP_HASH_MASK; 64 return ((idx >> VLAN_GRP_HASH_SHIFT) ^ idx) & VLAN_GRP_HASH_MASK;
65 } 65 }
66 66
67 /* Must be invoked with RCU read lock (no preempt) */ 67 /* Must be invoked with RCU read lock (no preempt) */
68 static struct vlan_group *__vlan_find_group(int real_dev_ifindex) 68 static struct vlan_group *__vlan_find_group(int real_dev_ifindex)
69 { 69 {
70 struct vlan_group *grp; 70 struct vlan_group *grp;
71 struct hlist_node *n; 71 struct hlist_node *n;
72 int hash = vlan_grp_hashfn(real_dev_ifindex); 72 int hash = vlan_grp_hashfn(real_dev_ifindex);
73 73
74 hlist_for_each_entry_rcu(grp, n, &vlan_group_hash[hash], hlist) { 74 hlist_for_each_entry_rcu(grp, n, &vlan_group_hash[hash], hlist) {
75 if (grp->real_dev_ifindex == real_dev_ifindex) 75 if (grp->real_dev_ifindex == real_dev_ifindex)
76 return grp; 76 return grp;
77 } 77 }
78 78
79 return NULL; 79 return NULL;
80 } 80 }
81 81
82 /* Find the protocol handler. Assumes VID < VLAN_VID_MASK. 82 /* Find the protocol handler. Assumes VID < VLAN_VID_MASK.
83 * 83 *
84 * Must be invoked with RCU read lock (no preempt) 84 * Must be invoked with RCU read lock (no preempt)
85 */ 85 */
86 struct net_device *__find_vlan_dev(struct net_device *real_dev, 86 struct net_device *__find_vlan_dev(struct net_device *real_dev,
87 unsigned short VID) 87 unsigned short VID)
88 { 88 {
89 struct vlan_group *grp = __vlan_find_group(real_dev->ifindex); 89 struct vlan_group *grp = __vlan_find_group(real_dev->ifindex);
90 90
91 if (grp) 91 if (grp)
92 return vlan_group_get_device(grp, VID); 92 return vlan_group_get_device(grp, VID);
93 93
94 return NULL; 94 return NULL;
95 } 95 }
96 96
97 static void vlan_group_free(struct vlan_group *grp) 97 static void vlan_group_free(struct vlan_group *grp)
98 { 98 {
99 int i; 99 int i;
100 100
101 for (i = 0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++) 101 for (i = 0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++)
102 kfree(grp->vlan_devices_arrays[i]); 102 kfree(grp->vlan_devices_arrays[i]);
103 kfree(grp); 103 kfree(grp);
104 } 104 }
105 105
106 static struct vlan_group *vlan_group_alloc(int ifindex) 106 static struct vlan_group *vlan_group_alloc(int ifindex)
107 { 107 {
108 struct vlan_group *grp; 108 struct vlan_group *grp;
109 unsigned int size; 109 unsigned int size;
110 unsigned int i; 110 unsigned int i;
111 111
112 grp = kzalloc(sizeof(struct vlan_group), GFP_KERNEL); 112 grp = kzalloc(sizeof(struct vlan_group), GFP_KERNEL);
113 if (!grp) 113 if (!grp)
114 return NULL; 114 return NULL;
115 115
116 size = sizeof(struct net_device *) * VLAN_GROUP_ARRAY_PART_LEN; 116 size = sizeof(struct net_device *) * VLAN_GROUP_ARRAY_PART_LEN;
117 117
118 for (i = 0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++) { 118 for (i = 0; i < VLAN_GROUP_ARRAY_SPLIT_PARTS; i++) {
119 grp->vlan_devices_arrays[i] = kzalloc(size, GFP_KERNEL); 119 grp->vlan_devices_arrays[i] = kzalloc(size, GFP_KERNEL);
120 if (!grp->vlan_devices_arrays[i]) 120 if (!grp->vlan_devices_arrays[i])
121 goto err; 121 goto err;
122 } 122 }
123 123
124 grp->real_dev_ifindex = ifindex; 124 grp->real_dev_ifindex = ifindex;
125 hlist_add_head_rcu(&grp->hlist, 125 hlist_add_head_rcu(&grp->hlist,
126 &vlan_group_hash[vlan_grp_hashfn(ifindex)]); 126 &vlan_group_hash[vlan_grp_hashfn(ifindex)]);
127 return grp; 127 return grp;
128 128
129 err: 129 err:
130 vlan_group_free(grp); 130 vlan_group_free(grp);
131 return NULL; 131 return NULL;
132 } 132 }
133 133
134 static void vlan_rcu_free(struct rcu_head *rcu) 134 static void vlan_rcu_free(struct rcu_head *rcu)
135 { 135 {
136 vlan_group_free(container_of(rcu, struct vlan_group, rcu)); 136 vlan_group_free(container_of(rcu, struct vlan_group, rcu));
137 } 137 }
138 138
139 void unregister_vlan_dev(struct net_device *dev) 139 void unregister_vlan_dev(struct net_device *dev)
140 { 140 {
141 struct vlan_dev_info *vlan = vlan_dev_info(dev); 141 struct vlan_dev_info *vlan = vlan_dev_info(dev);
142 struct net_device *real_dev = vlan->real_dev; 142 struct net_device *real_dev = vlan->real_dev;
143 struct vlan_group *grp; 143 struct vlan_group *grp;
144 unsigned short vlan_id = vlan->vlan_id; 144 unsigned short vlan_id = vlan->vlan_id;
145 145
146 ASSERT_RTNL(); 146 ASSERT_RTNL();
147 147
148 grp = __vlan_find_group(real_dev->ifindex); 148 grp = __vlan_find_group(real_dev->ifindex);
149 BUG_ON(!grp); 149 BUG_ON(!grp);
150 150
151 vlan_proc_rem_dev(dev); 151 vlan_proc_rem_dev(dev);
152 152
153 /* Take it out of our own structures, but be sure to interlock with 153 /* Take it out of our own structures, but be sure to interlock with
154 * HW accelerating devices or SW vlan input packet processing. 154 * HW accelerating devices or SW vlan input packet processing.
155 */ 155 */
156 if (real_dev->features & NETIF_F_HW_VLAN_FILTER) 156 if (real_dev->features & NETIF_F_HW_VLAN_FILTER)
157 real_dev->vlan_rx_kill_vid(real_dev, vlan_id); 157 real_dev->vlan_rx_kill_vid(real_dev, vlan_id);
158 158
159 vlan_group_set_device(grp, vlan_id, NULL); 159 vlan_group_set_device(grp, vlan_id, NULL);
160 grp->nr_vlans--; 160 grp->nr_vlans--;
161 161
162 synchronize_net(); 162 synchronize_net();
163 163
164 /* If the group is now empty, kill off the group. */ 164 /* If the group is now empty, kill off the group. */
165 if (grp->nr_vlans == 0) { 165 if (grp->nr_vlans == 0) {
166 if (real_dev->features & NETIF_F_HW_VLAN_RX) 166 if (real_dev->features & NETIF_F_HW_VLAN_RX)
167 real_dev->vlan_rx_register(real_dev, NULL); 167 real_dev->vlan_rx_register(real_dev, NULL);
168 168
169 hlist_del_rcu(&grp->hlist); 169 hlist_del_rcu(&grp->hlist);
170 170
171 /* Free the group, after all cpu's are done. */ 171 /* Free the group, after all cpu's are done. */
172 call_rcu(&grp->rcu, vlan_rcu_free); 172 call_rcu(&grp->rcu, vlan_rcu_free);
173 } 173 }
174 174
175 /* Get rid of the vlan's reference to real_dev */ 175 /* Get rid of the vlan's reference to real_dev */
176 dev_put(real_dev); 176 dev_put(real_dev);
177 177
178 unregister_netdevice(dev); 178 unregister_netdevice(dev);
179 } 179 }
180 180
181 static void vlan_transfer_operstate(const struct net_device *dev, 181 static void vlan_transfer_operstate(const struct net_device *dev,
182 struct net_device *vlandev) 182 struct net_device *vlandev)
183 { 183 {
184 /* Have to respect userspace enforced dormant state 184 /* Have to respect userspace enforced dormant state
185 * of real device, also must allow supplicant running 185 * of real device, also must allow supplicant running
186 * on VLAN device 186 * on VLAN device
187 */ 187 */
188 if (dev->operstate == IF_OPER_DORMANT) 188 if (dev->operstate == IF_OPER_DORMANT)
189 netif_dormant_on(vlandev); 189 netif_dormant_on(vlandev);
190 else 190 else
191 netif_dormant_off(vlandev); 191 netif_dormant_off(vlandev);
192 192
193 if (netif_carrier_ok(dev)) { 193 if (netif_carrier_ok(dev)) {
194 if (!netif_carrier_ok(vlandev)) 194 if (!netif_carrier_ok(vlandev))
195 netif_carrier_on(vlandev); 195 netif_carrier_on(vlandev);
196 } else { 196 } else {
197 if (netif_carrier_ok(vlandev)) 197 if (netif_carrier_ok(vlandev))
198 netif_carrier_off(vlandev); 198 netif_carrier_off(vlandev);
199 } 199 }
200 } 200 }
201 201
202 int vlan_check_real_dev(struct net_device *real_dev, unsigned short vlan_id) 202 int vlan_check_real_dev(struct net_device *real_dev, unsigned short vlan_id)
203 { 203 {
204 char *name = real_dev->name; 204 char *name = real_dev->name;
205 205
206 if (real_dev->features & NETIF_F_VLAN_CHALLENGED) { 206 if (real_dev->features & NETIF_F_VLAN_CHALLENGED) {
207 pr_info("8021q: VLANs not supported on %s\n", name); 207 pr_info("8021q: VLANs not supported on %s\n", name);
208 return -EOPNOTSUPP; 208 return -EOPNOTSUPP;
209 } 209 }
210 210
211 if ((real_dev->features & NETIF_F_HW_VLAN_RX) && 211 if ((real_dev->features & NETIF_F_HW_VLAN_RX) &&
212 !real_dev->vlan_rx_register) { 212 !real_dev->vlan_rx_register) {
213 pr_info("8021q: device %s has buggy VLAN hw accel\n", name); 213 pr_info("8021q: device %s has buggy VLAN hw accel\n", name);
214 return -EOPNOTSUPP; 214 return -EOPNOTSUPP;
215 } 215 }
216 216
217 if ((real_dev->features & NETIF_F_HW_VLAN_FILTER) && 217 if ((real_dev->features & NETIF_F_HW_VLAN_FILTER) &&
218 (!real_dev->vlan_rx_add_vid || !real_dev->vlan_rx_kill_vid)) { 218 (!real_dev->vlan_rx_add_vid || !real_dev->vlan_rx_kill_vid)) {
219 pr_info("8021q: Device %s has buggy VLAN hw accel\n", name); 219 pr_info("8021q: Device %s has buggy VLAN hw accel\n", name);
220 return -EOPNOTSUPP; 220 return -EOPNOTSUPP;
221 } 221 }
222 222
223 /* The real device must be up and operating in order to 223 /* The real device must be up and operating in order to
224 * assosciate a VLAN device with it. 224 * assosciate a VLAN device with it.
225 */ 225 */
226 if (!(real_dev->flags & IFF_UP)) 226 if (!(real_dev->flags & IFF_UP))
227 return -ENETDOWN; 227 return -ENETDOWN;
228 228
229 if (__find_vlan_dev(real_dev, vlan_id) != NULL) 229 if (__find_vlan_dev(real_dev, vlan_id) != NULL)
230 return -EEXIST; 230 return -EEXIST;
231 231
232 return 0; 232 return 0;
233 } 233 }
234 234
235 int register_vlan_dev(struct net_device *dev) 235 int register_vlan_dev(struct net_device *dev)
236 { 236 {
237 struct vlan_dev_info *vlan = vlan_dev_info(dev); 237 struct vlan_dev_info *vlan = vlan_dev_info(dev);
238 struct net_device *real_dev = vlan->real_dev; 238 struct net_device *real_dev = vlan->real_dev;
239 unsigned short vlan_id = vlan->vlan_id; 239 unsigned short vlan_id = vlan->vlan_id;
240 struct vlan_group *grp, *ngrp = NULL; 240 struct vlan_group *grp, *ngrp = NULL;
241 int err; 241 int err;
242 242
243 grp = __vlan_find_group(real_dev->ifindex); 243 grp = __vlan_find_group(real_dev->ifindex);
244 if (!grp) { 244 if (!grp) {
245 ngrp = grp = vlan_group_alloc(real_dev->ifindex); 245 ngrp = grp = vlan_group_alloc(real_dev->ifindex);
246 if (!grp) 246 if (!grp)
247 return -ENOBUFS; 247 return -ENOBUFS;
248 } 248 }
249 249
250 err = register_netdevice(dev); 250 err = register_netdevice(dev);
251 if (err < 0) 251 if (err < 0)
252 goto out_free_group; 252 goto out_free_group;
253 253
254 /* Account for reference in struct vlan_dev_info */ 254 /* Account for reference in struct vlan_dev_info */
255 dev_hold(real_dev); 255 dev_hold(real_dev);
256 256
257 vlan_transfer_operstate(real_dev, dev); 257 vlan_transfer_operstate(real_dev, dev);
258 linkwatch_fire_event(dev); /* _MUST_ call rfc2863_policy() */ 258 linkwatch_fire_event(dev); /* _MUST_ call rfc2863_policy() */
259 259
260 /* So, got the sucker initialized, now lets place 260 /* So, got the sucker initialized, now lets place
261 * it into our local structure. 261 * it into our local structure.
262 */ 262 */
263 vlan_group_set_device(grp, vlan_id, dev); 263 vlan_group_set_device(grp, vlan_id, dev);
264 grp->nr_vlans++; 264 grp->nr_vlans++;
265 265
266 if (ngrp && real_dev->features & NETIF_F_HW_VLAN_RX) 266 if (ngrp && real_dev->features & NETIF_F_HW_VLAN_RX)
267 real_dev->vlan_rx_register(real_dev, ngrp); 267 real_dev->vlan_rx_register(real_dev, ngrp);
268 if (real_dev->features & NETIF_F_HW_VLAN_FILTER) 268 if (real_dev->features & NETIF_F_HW_VLAN_FILTER)
269 real_dev->vlan_rx_add_vid(real_dev, vlan_id); 269 real_dev->vlan_rx_add_vid(real_dev, vlan_id);
270 270
271 if (vlan_proc_add_dev(dev) < 0) 271 if (vlan_proc_add_dev(dev) < 0)
272 pr_warning("8021q: failed to add proc entry for %s\n", 272 pr_warning("8021q: failed to add proc entry for %s\n",
273 dev->name); 273 dev->name);
274 return 0; 274 return 0;
275 275
276 out_free_group: 276 out_free_group:
277 if (ngrp) 277 if (ngrp)
278 vlan_group_free(ngrp); 278 vlan_group_free(ngrp);
279 return err; 279 return err;
280 } 280 }
281 281
282 /* Attach a VLAN device to a mac address (ie Ethernet Card). 282 /* Attach a VLAN device to a mac address (ie Ethernet Card).
283 * Returns 0 if the device was created or a negative error code otherwise. 283 * Returns 0 if the device was created or a negative error code otherwise.
284 */ 284 */
285 static int register_vlan_device(struct net_device *real_dev, 285 static int register_vlan_device(struct net_device *real_dev,
286 unsigned short VLAN_ID) 286 unsigned short VLAN_ID)
287 { 287 {
288 struct net_device *new_dev; 288 struct net_device *new_dev;
289 char name[IFNAMSIZ]; 289 char name[IFNAMSIZ];
290 int err; 290 int err;
291 291
292 if (VLAN_ID >= VLAN_VID_MASK) 292 if (VLAN_ID >= VLAN_VID_MASK)
293 return -ERANGE; 293 return -ERANGE;
294 294
295 err = vlan_check_real_dev(real_dev, VLAN_ID); 295 err = vlan_check_real_dev(real_dev, VLAN_ID);
296 if (err < 0) 296 if (err < 0)
297 return err; 297 return err;
298 298
299 /* Gotta set up the fields for the device. */ 299 /* Gotta set up the fields for the device. */
300 switch (vlan_name_type) { 300 switch (vlan_name_type) {
301 case VLAN_NAME_TYPE_RAW_PLUS_VID: 301 case VLAN_NAME_TYPE_RAW_PLUS_VID:
302 /* name will look like: eth1.0005 */ 302 /* name will look like: eth1.0005 */
303 snprintf(name, IFNAMSIZ, "%s.%.4i", real_dev->name, VLAN_ID); 303 snprintf(name, IFNAMSIZ, "%s.%.4i", real_dev->name, VLAN_ID);
304 break; 304 break;
305 case VLAN_NAME_TYPE_PLUS_VID_NO_PAD: 305 case VLAN_NAME_TYPE_PLUS_VID_NO_PAD:
306 /* Put our vlan.VID in the name. 306 /* Put our vlan.VID in the name.
307 * Name will look like: vlan5 307 * Name will look like: vlan5
308 */ 308 */
309 snprintf(name, IFNAMSIZ, "vlan%i", VLAN_ID); 309 snprintf(name, IFNAMSIZ, "vlan%i", VLAN_ID);
310 break; 310 break;
311 case VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD: 311 case VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD:
312 /* Put our vlan.VID in the name. 312 /* Put our vlan.VID in the name.
313 * Name will look like: eth0.5 313 * Name will look like: eth0.5
314 */ 314 */
315 snprintf(name, IFNAMSIZ, "%s.%i", real_dev->name, VLAN_ID); 315 snprintf(name, IFNAMSIZ, "%s.%i", real_dev->name, VLAN_ID);
316 break; 316 break;
317 case VLAN_NAME_TYPE_PLUS_VID: 317 case VLAN_NAME_TYPE_PLUS_VID:
318 /* Put our vlan.VID in the name. 318 /* Put our vlan.VID in the name.
319 * Name will look like: vlan0005 319 * Name will look like: vlan0005
320 */ 320 */
321 default: 321 default:
322 snprintf(name, IFNAMSIZ, "vlan%.4i", VLAN_ID); 322 snprintf(name, IFNAMSIZ, "vlan%.4i", VLAN_ID);
323 } 323 }
324 324
325 new_dev = alloc_netdev(sizeof(struct vlan_dev_info), name, 325 new_dev = alloc_netdev(sizeof(struct vlan_dev_info), name,
326 vlan_setup); 326 vlan_setup);
327 327
328 if (new_dev == NULL) 328 if (new_dev == NULL)
329 return -ENOBUFS; 329 return -ENOBUFS;
330 330
331 /* need 4 bytes for extra VLAN header info, 331 /* need 4 bytes for extra VLAN header info,
332 * hope the underlying device can handle it. 332 * hope the underlying device can handle it.
333 */ 333 */
334 new_dev->mtu = real_dev->mtu; 334 new_dev->mtu = real_dev->mtu;
335 335
336 vlan_dev_info(new_dev)->vlan_id = VLAN_ID; /* 1 through VLAN_VID_MASK */ 336 vlan_dev_info(new_dev)->vlan_id = VLAN_ID; /* 1 through VLAN_VID_MASK */
337 vlan_dev_info(new_dev)->real_dev = real_dev; 337 vlan_dev_info(new_dev)->real_dev = real_dev;
338 vlan_dev_info(new_dev)->dent = NULL; 338 vlan_dev_info(new_dev)->dent = NULL;
339 vlan_dev_info(new_dev)->flags = VLAN_FLAG_REORDER_HDR; 339 vlan_dev_info(new_dev)->flags = VLAN_FLAG_REORDER_HDR;
340 340
341 new_dev->rtnl_link_ops = &vlan_link_ops; 341 new_dev->rtnl_link_ops = &vlan_link_ops;
342 err = register_vlan_dev(new_dev); 342 err = register_vlan_dev(new_dev);
343 if (err < 0) 343 if (err < 0)
344 goto out_free_newdev; 344 goto out_free_newdev;
345 345
346 return 0; 346 return 0;
347 347
348 out_free_newdev: 348 out_free_newdev:
349 free_netdev(new_dev); 349 free_netdev(new_dev);
350 return err; 350 return err;
351 } 351 }
352 352
353 static void vlan_sync_address(struct net_device *dev, 353 static void vlan_sync_address(struct net_device *dev,
354 struct net_device *vlandev) 354 struct net_device *vlandev)
355 { 355 {
356 struct vlan_dev_info *vlan = vlan_dev_info(vlandev); 356 struct vlan_dev_info *vlan = vlan_dev_info(vlandev);
357 357
358 /* May be called without an actual change */ 358 /* May be called without an actual change */
359 if (!compare_ether_addr(vlan->real_dev_addr, dev->dev_addr)) 359 if (!compare_ether_addr(vlan->real_dev_addr, dev->dev_addr))
360 return; 360 return;
361 361
362 /* vlan address was different from the old address and is equal to 362 /* vlan address was different from the old address and is equal to
363 * the new address */ 363 * the new address */
364 if (compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) && 364 if (compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) &&
365 !compare_ether_addr(vlandev->dev_addr, dev->dev_addr)) 365 !compare_ether_addr(vlandev->dev_addr, dev->dev_addr))
366 dev_unicast_delete(dev, vlandev->dev_addr, ETH_ALEN); 366 dev_unicast_delete(dev, vlandev->dev_addr, ETH_ALEN);
367 367
368 /* vlan address was equal to the old address and is different from 368 /* vlan address was equal to the old address and is different from
369 * the new address */ 369 * the new address */
370 if (!compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) && 370 if (!compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) &&
371 compare_ether_addr(vlandev->dev_addr, dev->dev_addr)) 371 compare_ether_addr(vlandev->dev_addr, dev->dev_addr))
372 dev_unicast_add(dev, vlandev->dev_addr, ETH_ALEN); 372 dev_unicast_add(dev, vlandev->dev_addr, ETH_ALEN);
373 373
374 memcpy(vlan->real_dev_addr, dev->dev_addr, ETH_ALEN); 374 memcpy(vlan->real_dev_addr, dev->dev_addr, ETH_ALEN);
375 } 375 }
376 376
377 static int vlan_device_event(struct notifier_block *unused, unsigned long event, 377 static int vlan_device_event(struct notifier_block *unused, unsigned long event,
378 void *ptr) 378 void *ptr)
379 { 379 {
380 struct net_device *dev = ptr; 380 struct net_device *dev = ptr;
381 struct vlan_group *grp = __vlan_find_group(dev->ifindex); 381 struct vlan_group *grp = __vlan_find_group(dev->ifindex);
382 int i, flgs; 382 int i, flgs;
383 struct net_device *vlandev; 383 struct net_device *vlandev;
384 384
385 if (dev->nd_net != &init_net) 385 if (dev->nd_net != &init_net)
386 return NOTIFY_DONE; 386 return NOTIFY_DONE;
387 387
388 if (!grp) 388 if (!grp)
389 goto out; 389 goto out;
390 390
391 /* It is OK that we do not hold the group lock right now, 391 /* It is OK that we do not hold the group lock right now,
392 * as we run under the RTNL lock. 392 * as we run under the RTNL lock.
393 */ 393 */
394 394
395 switch (event) { 395 switch (event) {
396 case NETDEV_CHANGE: 396 case NETDEV_CHANGE:
397 /* Propagate real device state to vlan devices */ 397 /* Propagate real device state to vlan devices */
398 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 398 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
399 vlandev = vlan_group_get_device(grp, i); 399 vlandev = vlan_group_get_device(grp, i);
400 if (!vlandev) 400 if (!vlandev)
401 continue; 401 continue;
402 402
403 vlan_transfer_operstate(dev, vlandev); 403 vlan_transfer_operstate(dev, vlandev);
404 } 404 }
405 break; 405 break;
406 406
407 case NETDEV_CHANGEADDR: 407 case NETDEV_CHANGEADDR:
408 /* Adjust unicast filters on underlying device */ 408 /* Adjust unicast filters on underlying device */
409 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 409 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
410 vlandev = vlan_group_get_device(grp, i); 410 vlandev = vlan_group_get_device(grp, i);
411 if (!vlandev) 411 if (!vlandev)
412 continue; 412 continue;
413 413
414 flgs = vlandev->flags; 414 flgs = vlandev->flags;
415 if (!(flgs & IFF_UP)) 415 if (!(flgs & IFF_UP))
416 continue; 416 continue;
417 417
418 vlan_sync_address(dev, vlandev); 418 vlan_sync_address(dev, vlandev);
419 } 419 }
420 break; 420 break;
421 421
422 case NETDEV_DOWN: 422 case NETDEV_DOWN:
423 /* Put all VLANs for this dev in the down state too. */ 423 /* Put all VLANs for this dev in the down state too. */
424 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 424 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
425 vlandev = vlan_group_get_device(grp, i); 425 vlandev = vlan_group_get_device(grp, i);
426 if (!vlandev) 426 if (!vlandev)
427 continue; 427 continue;
428 428
429 flgs = vlandev->flags; 429 flgs = vlandev->flags;
430 if (!(flgs & IFF_UP)) 430 if (!(flgs & IFF_UP))
431 continue; 431 continue;
432 432
433 dev_change_flags(vlandev, flgs & ~IFF_UP); 433 dev_change_flags(vlandev, flgs & ~IFF_UP);
434 } 434 }
435 break; 435 break;
436 436
437 case NETDEV_UP: 437 case NETDEV_UP:
438 /* Put all VLANs for this dev in the up state too. */ 438 /* Put all VLANs for this dev in the up state too. */
439 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 439 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
440 vlandev = vlan_group_get_device(grp, i); 440 vlandev = vlan_group_get_device(grp, i);
441 if (!vlandev) 441 if (!vlandev)
442 continue; 442 continue;
443 443
444 flgs = vlandev->flags; 444 flgs = vlandev->flags;
445 if (flgs & IFF_UP) 445 if (flgs & IFF_UP)
446 continue; 446 continue;
447 447
448 dev_change_flags(vlandev, flgs | IFF_UP); 448 dev_change_flags(vlandev, flgs | IFF_UP);
449 } 449 }
450 break; 450 break;
451 451
452 case NETDEV_UNREGISTER: 452 case NETDEV_UNREGISTER:
453 /* Delete all VLANs for this dev. */ 453 /* Delete all VLANs for this dev. */
454 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { 454 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
455 vlandev = vlan_group_get_device(grp, i); 455 vlandev = vlan_group_get_device(grp, i);
456 if (!vlandev) 456 if (!vlandev)
457 continue; 457 continue;
458 458
459 /* unregistration of last vlan destroys group, abort 459 /* unregistration of last vlan destroys group, abort
460 * afterwards */ 460 * afterwards */
461 if (grp->nr_vlans == 1) 461 if (grp->nr_vlans == 1)
462 i = VLAN_GROUP_ARRAY_LEN; 462 i = VLAN_GROUP_ARRAY_LEN;
463 463
464 unregister_vlan_dev(vlandev); 464 unregister_vlan_dev(vlandev);
465 } 465 }
466 break; 466 break;
467 } 467 }
468 468
469 out: 469 out:
470 return NOTIFY_DONE; 470 return NOTIFY_DONE;
471 } 471 }
472 472
473 static struct notifier_block vlan_notifier_block __read_mostly = { 473 static struct notifier_block vlan_notifier_block __read_mostly = {
474 .notifier_call = vlan_device_event, 474 .notifier_call = vlan_device_event,
475 }; 475 };
476 476
477 /* 477 /*
478 * VLAN IOCTL handler. 478 * VLAN IOCTL handler.
479 * o execute requested action or pass command to the device driver 479 * o execute requested action or pass command to the device driver
480 * arg is really a struct vlan_ioctl_args __user *. 480 * arg is really a struct vlan_ioctl_args __user *.
481 */ 481 */
482 static int vlan_ioctl_handler(struct net *net, void __user *arg) 482 static int vlan_ioctl_handler(struct net *net, void __user *arg)
483 { 483 {
484 int err; 484 int err;
485 unsigned short vid = 0; 485 unsigned short vid = 0;
486 struct vlan_ioctl_args args; 486 struct vlan_ioctl_args args;
487 struct net_device *dev = NULL; 487 struct net_device *dev = NULL;
488 488
489 if (copy_from_user(&args, arg, sizeof(struct vlan_ioctl_args))) 489 if (copy_from_user(&args, arg, sizeof(struct vlan_ioctl_args)))
490 return -EFAULT; 490 return -EFAULT;
491 491
492 /* Null terminate this sucker, just in case. */ 492 /* Null terminate this sucker, just in case. */
493 args.device1[23] = 0; 493 args.device1[23] = 0;
494 args.u.device2[23] = 0; 494 args.u.device2[23] = 0;
495 495
496 rtnl_lock(); 496 rtnl_lock();
497 497
498 switch (args.cmd) { 498 switch (args.cmd) {
499 case SET_VLAN_INGRESS_PRIORITY_CMD: 499 case SET_VLAN_INGRESS_PRIORITY_CMD:
500 case SET_VLAN_EGRESS_PRIORITY_CMD: 500 case SET_VLAN_EGRESS_PRIORITY_CMD:
501 case SET_VLAN_FLAG_CMD: 501 case SET_VLAN_FLAG_CMD:
502 case ADD_VLAN_CMD: 502 case ADD_VLAN_CMD:
503 case DEL_VLAN_CMD: 503 case DEL_VLAN_CMD:
504 case GET_VLAN_REALDEV_NAME_CMD: 504 case GET_VLAN_REALDEV_NAME_CMD:
505 case GET_VLAN_VID_CMD: 505 case GET_VLAN_VID_CMD:
506 err = -ENODEV; 506 err = -ENODEV;
507 dev = __dev_get_by_name(&init_net, args.device1); 507 dev = __dev_get_by_name(&init_net, args.device1);
508 if (!dev) 508 if (!dev)
509 goto out; 509 goto out;
510 510
511 err = -EINVAL; 511 err = -EINVAL;
512 if (args.cmd != ADD_VLAN_CMD && 512 if (args.cmd != ADD_VLAN_CMD &&
513 !(dev->priv_flags & IFF_802_1Q_VLAN)) 513 !(dev->priv_flags & IFF_802_1Q_VLAN))
514 goto out; 514 goto out;
515 } 515 }
516 516
517 switch (args.cmd) { 517 switch (args.cmd) {
518 case SET_VLAN_INGRESS_PRIORITY_CMD: 518 case SET_VLAN_INGRESS_PRIORITY_CMD:
519 err = -EPERM; 519 err = -EPERM;
520 if (!capable(CAP_NET_ADMIN)) 520 if (!capable(CAP_NET_ADMIN))
521 break; 521 break;
522 vlan_dev_set_ingress_priority(dev, 522 vlan_dev_set_ingress_priority(dev,
523 args.u.skb_priority, 523 args.u.skb_priority,
524 args.vlan_qos); 524 args.vlan_qos);
525 err = 0; 525 err = 0;
526 break; 526 break;
527 527
528 case SET_VLAN_EGRESS_PRIORITY_CMD: 528 case SET_VLAN_EGRESS_PRIORITY_CMD:
529 err = -EPERM; 529 err = -EPERM;
530 if (!capable(CAP_NET_ADMIN)) 530 if (!capable(CAP_NET_ADMIN))
531 break; 531 break;
532 err = vlan_dev_set_egress_priority(dev, 532 err = vlan_dev_set_egress_priority(dev,
533 args.u.skb_priority, 533 args.u.skb_priority,
534 args.vlan_qos); 534 args.vlan_qos);
535 break; 535 break;
536 536
537 case SET_VLAN_FLAG_CMD: 537 case SET_VLAN_FLAG_CMD:
538 err = -EPERM; 538 err = -EPERM;
539 if (!capable(CAP_NET_ADMIN)) 539 if (!capable(CAP_NET_ADMIN))
540 break; 540 break;
541 err = vlan_dev_set_vlan_flag(dev, 541 err = vlan_dev_set_vlan_flag(dev,
542 args.u.flag, 542 args.u.flag,
543 args.vlan_qos); 543 args.vlan_qos);
544 break; 544 break;
545 545
546 case SET_VLAN_NAME_TYPE_CMD: 546 case SET_VLAN_NAME_TYPE_CMD:
547 err = -EPERM; 547 err = -EPERM;
548 if (!capable(CAP_NET_ADMIN)) 548 if (!capable(CAP_NET_ADMIN))
549 break; 549 break;
550 if ((args.u.name_type >= 0) && 550 if ((args.u.name_type >= 0) &&
551 (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) { 551 (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
552 vlan_name_type = args.u.name_type; 552 vlan_name_type = args.u.name_type;
553 err = 0; 553 err = 0;
554 } else { 554 } else {
555 err = -EINVAL; 555 err = -EINVAL;
556 } 556 }
557 break; 557 break;
558 558
559 case ADD_VLAN_CMD: 559 case ADD_VLAN_CMD:
560 err = -EPERM; 560 err = -EPERM;
561 if (!capable(CAP_NET_ADMIN)) 561 if (!capable(CAP_NET_ADMIN))
562 break; 562 break;
563 err = register_vlan_device(dev, args.u.VID); 563 err = register_vlan_device(dev, args.u.VID);
564 break; 564 break;
565 565
566 case DEL_VLAN_CMD: 566 case DEL_VLAN_CMD:
567 err = -EPERM; 567 err = -EPERM;
568 if (!capable(CAP_NET_ADMIN)) 568 if (!capable(CAP_NET_ADMIN))
569 break; 569 break;
570 unregister_vlan_dev(dev); 570 unregister_vlan_dev(dev);
571 err = 0; 571 err = 0;
572 break; 572 break;
573 573
574 case GET_VLAN_REALDEV_NAME_CMD: 574 case GET_VLAN_REALDEV_NAME_CMD:
575 err = 0; 575 err = 0;
576 vlan_dev_get_realdev_name(dev, args.u.device2); 576 vlan_dev_get_realdev_name(dev, args.u.device2);
577 if (copy_to_user(arg, &args, 577 if (copy_to_user(arg, &args,
578 sizeof(struct vlan_ioctl_args))) 578 sizeof(struct vlan_ioctl_args)))
579 err = -EFAULT; 579 err = -EFAULT;
580 break; 580 break;
581 581
582 case GET_VLAN_VID_CMD: 582 case GET_VLAN_VID_CMD:
583 err = 0; 583 err = 0;
584 vlan_dev_get_vid(dev, &vid); 584 vlan_dev_get_vid(dev, &vid);
585 args.u.VID = vid; 585 args.u.VID = vid;
586 if (copy_to_user(arg, &args, 586 if (copy_to_user(arg, &args,
587 sizeof(struct vlan_ioctl_args))) 587 sizeof(struct vlan_ioctl_args)))
588 err = -EFAULT; 588 err = -EFAULT;
589 break; 589 break;
590 590
591 default: 591 default:
592 err = -EOPNOTSUPP; 592 err = -EOPNOTSUPP;
593 break; 593 break;
594 } 594 }
595 out: 595 out:
596 rtnl_unlock(); 596 rtnl_unlock();
597 return err; 597 return err;
598 } 598 }
599 599
600 static int __init vlan_proto_init(void) 600 static int __init vlan_proto_init(void)
601 { 601 {
602 int err; 602 int err;
603 603
604 pr_info("%s v%s %s\n", vlan_fullname, vlan_version, vlan_copyright); 604 pr_info("%s v%s %s\n", vlan_fullname, vlan_version, vlan_copyright);
605 pr_info("All bugs added by %s\n", vlan_buggyright); 605 pr_info("All bugs added by %s\n", vlan_buggyright);
606 606
607 err = vlan_proc_init(); 607 err = vlan_proc_init();
608 if (err < 0) 608 if (err < 0)
609 goto err1; 609 goto err1;
610 610
611 err = register_netdevice_notifier(&vlan_notifier_block); 611 err = register_netdevice_notifier(&vlan_notifier_block);
612 if (err < 0) 612 if (err < 0)
613 goto err2; 613 goto err2;
614 614
615 err = vlan_netlink_init(); 615 err = vlan_netlink_init();
616 if (err < 0) 616 if (err < 0)
617 goto err3; 617 goto err3;
618 618
619 dev_add_pack(&vlan_packet_type); 619 dev_add_pack(&vlan_packet_type);
620 vlan_ioctl_set(vlan_ioctl_handler); 620 vlan_ioctl_set(vlan_ioctl_handler);
621 return 0; 621 return 0;
622 622
623 err3: 623 err3:
624 unregister_netdevice_notifier(&vlan_notifier_block); 624 unregister_netdevice_notifier(&vlan_notifier_block);
625 err2: 625 err2:
626 vlan_proc_cleanup(); 626 vlan_proc_cleanup();
627 err1: 627 err1:
628 return err; 628 return err;
629 } 629 }
630 630
631 static void __exit vlan_cleanup_module(void) 631 static void __exit vlan_cleanup_module(void)
632 { 632 {
633 unsigned int i; 633 unsigned int i;
634 634
635 vlan_ioctl_set(NULL); 635 vlan_ioctl_set(NULL);
636 vlan_netlink_fini(); 636 vlan_netlink_fini();
637 637
638 unregister_netdevice_notifier(&vlan_notifier_block); 638 unregister_netdevice_notifier(&vlan_notifier_block);
639 639
640 dev_remove_pack(&vlan_packet_type); 640 dev_remove_pack(&vlan_packet_type);
641 641
642 /* This table must be empty if there are no module references left. */ 642 /* This table must be empty if there are no module references left. */
643 for (i = 0; i < VLAN_GRP_HASH_SIZE; i++) 643 for (i = 0; i < VLAN_GRP_HASH_SIZE; i++)
644 BUG_ON(!hlist_empty(&vlan_group_hash[i])); 644 BUG_ON(!hlist_empty(&vlan_group_hash[i]));
645 645
646 vlan_proc_cleanup(); 646 vlan_proc_cleanup();
647 647
648 synchronize_net(); 648 synchronize_net();
649 } 649 }
650 650
651 module_init(vlan_proto_init); 651 module_init(vlan_proto_init);
652 module_exit(vlan_cleanup_module); 652 module_exit(vlan_cleanup_module);
653 653
654 MODULE_LICENSE("GPL"); 654 MODULE_LICENSE("GPL");
655 MODULE_VERSION(DRV_VERSION); 655 MODULE_VERSION(DRV_VERSION);
656 656
net/8021q/vlan_dev.c
1 /* -*- linux-c -*- 1 /* -*- linux-c -*-
2 * INET 802.1Q VLAN 2 * INET 802.1Q VLAN
3 * Ethernet-type device handling. 3 * Ethernet-type device handling.
4 * 4 *
5 * Authors: Ben Greear <greearb@candelatech.com> 5 * Authors: Ben Greear <greearb@candelatech.com>
6 * Please send support related email to: vlan@scry.wanfear.com 6 * Please send support related email to: netdev@vger.kernel.org
7 * VLAN Home Page: http://www.candelatech.com/~greear/vlan.html 7 * VLAN Home Page: http://www.candelatech.com/~greear/vlan.html
8 * 8 *
9 * Fixes: Mar 22 2001: Martin Bokaemper <mbokaemper@unispherenetworks.com> 9 * Fixes: Mar 22 2001: Martin Bokaemper <mbokaemper@unispherenetworks.com>
10 * - reset skb->pkt_type on incoming packets when MAC was changed 10 * - reset skb->pkt_type on incoming packets when MAC was changed
11 * - see that changed MAC is saddr for outgoing packets 11 * - see that changed MAC is saddr for outgoing packets
12 * Oct 20, 2001: Ard van Breeman: 12 * Oct 20, 2001: Ard van Breeman:
13 * - Fix MC-list, finally. 13 * - Fix MC-list, finally.
14 * - Flush MC-list on VLAN destroy. 14 * - Flush MC-list on VLAN destroy.
15 * 15 *
16 * 16 *
17 * This program is free software; you can redistribute it and/or 17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License 18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version 19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version. 20 * 2 of the License, or (at your option) any later version.
21 */ 21 */
22 22
23 #include <linux/module.h> 23 #include <linux/module.h>
24 #include <linux/mm.h> 24 #include <linux/mm.h>
25 #include <linux/in.h> 25 #include <linux/in.h>
26 #include <linux/init.h> 26 #include <linux/init.h>
27 #include <asm/uaccess.h> /* for copy_from_user */ 27 #include <asm/uaccess.h> /* for copy_from_user */
28 #include <linux/skbuff.h> 28 #include <linux/skbuff.h>
29 #include <linux/netdevice.h> 29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h> 30 #include <linux/etherdevice.h>
31 #include <net/datalink.h> 31 #include <net/datalink.h>
32 #include <net/p8022.h> 32 #include <net/p8022.h>
33 #include <net/arp.h> 33 #include <net/arp.h>
34 34
35 #include "vlan.h" 35 #include "vlan.h"
36 #include "vlanproc.h" 36 #include "vlanproc.h"
37 #include <linux/if_vlan.h> 37 #include <linux/if_vlan.h>
38 #include <net/ip.h> 38 #include <net/ip.h>
39 39
40 /* 40 /*
41 * Rebuild the Ethernet MAC header. This is called after an ARP 41 * Rebuild the Ethernet MAC header. This is called after an ARP
42 * (or in future other address resolution) has completed on this 42 * (or in future other address resolution) has completed on this
43 * sk_buff. We now let ARP fill in the other fields. 43 * sk_buff. We now let ARP fill in the other fields.
44 * 44 *
45 * This routine CANNOT use cached dst->neigh! 45 * This routine CANNOT use cached dst->neigh!
46 * Really, it is used only when dst->neigh is wrong. 46 * Really, it is used only when dst->neigh is wrong.
47 * 47 *
48 * TODO: This needs a checkup, I'm ignorant here. --BLG 48 * TODO: This needs a checkup, I'm ignorant here. --BLG
49 */ 49 */
50 static int vlan_dev_rebuild_header(struct sk_buff *skb) 50 static int vlan_dev_rebuild_header(struct sk_buff *skb)
51 { 51 {
52 struct net_device *dev = skb->dev; 52 struct net_device *dev = skb->dev;
53 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data); 53 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
54 54
55 switch (veth->h_vlan_encapsulated_proto) { 55 switch (veth->h_vlan_encapsulated_proto) {
56 #ifdef CONFIG_INET 56 #ifdef CONFIG_INET
57 case __constant_htons(ETH_P_IP): 57 case __constant_htons(ETH_P_IP):
58 58
59 /* TODO: Confirm this will work with VLAN headers... */ 59 /* TODO: Confirm this will work with VLAN headers... */
60 return arp_find(veth->h_dest, skb); 60 return arp_find(veth->h_dest, skb);
61 #endif 61 #endif
62 default: 62 default:
63 pr_debug("%s: unable to resolve type %X addresses.\n", 63 pr_debug("%s: unable to resolve type %X addresses.\n",
64 dev->name, ntohs(veth->h_vlan_encapsulated_proto)); 64 dev->name, ntohs(veth->h_vlan_encapsulated_proto));
65 65
66 memcpy(veth->h_source, dev->dev_addr, ETH_ALEN); 66 memcpy(veth->h_source, dev->dev_addr, ETH_ALEN);
67 break; 67 break;
68 } 68 }
69 69
70 return 0; 70 return 0;
71 } 71 }
72 72
73 static inline struct sk_buff *vlan_check_reorder_header(struct sk_buff *skb) 73 static inline struct sk_buff *vlan_check_reorder_header(struct sk_buff *skb)
74 { 74 {
75 if (vlan_dev_info(skb->dev)->flags & VLAN_FLAG_REORDER_HDR) { 75 if (vlan_dev_info(skb->dev)->flags & VLAN_FLAG_REORDER_HDR) {
76 if (skb_shared(skb) || skb_cloned(skb)) { 76 if (skb_shared(skb) || skb_cloned(skb)) {
77 struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); 77 struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC);
78 kfree_skb(skb); 78 kfree_skb(skb);
79 skb = nskb; 79 skb = nskb;
80 } 80 }
81 if (skb) { 81 if (skb) {
82 /* Lifted from Gleb's VLAN code... */ 82 /* Lifted from Gleb's VLAN code... */
83 memmove(skb->data - ETH_HLEN, 83 memmove(skb->data - ETH_HLEN,
84 skb->data - VLAN_ETH_HLEN, 12); 84 skb->data - VLAN_ETH_HLEN, 12);
85 skb->mac_header += VLAN_HLEN; 85 skb->mac_header += VLAN_HLEN;
86 } 86 }
87 } 87 }
88 88
89 return skb; 89 return skb;
90 } 90 }
91 91
92 /* 92 /*
93 * Determine the packet's protocol ID. The rule here is that we 93 * Determine the packet's protocol ID. The rule here is that we
94 * assume 802.3 if the type field is short enough to be a length. 94 * assume 802.3 if the type field is short enough to be a length.
95 * This is normal practice and works for any 'now in use' protocol. 95 * This is normal practice and works for any 'now in use' protocol.
96 * 96 *
97 * Also, at this point we assume that we ARE dealing exclusively with 97 * Also, at this point we assume that we ARE dealing exclusively with
98 * VLAN packets, or packets that should be made into VLAN packets based 98 * VLAN packets, or packets that should be made into VLAN packets based
99 * on a default VLAN ID. 99 * on a default VLAN ID.
100 * 100 *
101 * NOTE: Should be similar to ethernet/eth.c. 101 * NOTE: Should be similar to ethernet/eth.c.
102 * 102 *
103 * SANITY NOTE: This method is called when a packet is moving up the stack 103 * SANITY NOTE: This method is called when a packet is moving up the stack
104 * towards userland. To get here, it would have already passed 104 * towards userland. To get here, it would have already passed
105 * through the ethernet/eth.c eth_type_trans() method. 105 * through the ethernet/eth.c eth_type_trans() method.
106 * SANITY NOTE 2: We are referencing to the VLAN_HDR frields, which MAY be 106 * SANITY NOTE 2: We are referencing to the VLAN_HDR frields, which MAY be
107 * stored UNALIGNED in the memory. RISC systems don't like 107 * stored UNALIGNED in the memory. RISC systems don't like
108 * such cases very much... 108 * such cases very much...
109 * SANITY NOTE 2a: According to Dave Miller & Alexey, it will always be 109 * SANITY NOTE 2a: According to Dave Miller & Alexey, it will always be
110 * aligned, so there doesn't need to be any of the unaligned 110 * aligned, so there doesn't need to be any of the unaligned
111 * stuff. It has been commented out now... --Ben 111 * stuff. It has been commented out now... --Ben
112 * 112 *
113 */ 113 */
114 int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, 114 int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
115 struct packet_type *ptype, struct net_device *orig_dev) 115 struct packet_type *ptype, struct net_device *orig_dev)
116 { 116 {
117 unsigned char *rawp = NULL; 117 unsigned char *rawp = NULL;
118 struct vlan_hdr *vhdr; 118 struct vlan_hdr *vhdr;
119 unsigned short vid; 119 unsigned short vid;
120 struct net_device_stats *stats; 120 struct net_device_stats *stats;
121 unsigned short vlan_TCI; 121 unsigned short vlan_TCI;
122 __be16 proto; 122 __be16 proto;
123 123
124 if (dev->nd_net != &init_net) { 124 if (dev->nd_net != &init_net) {
125 kfree_skb(skb); 125 kfree_skb(skb);
126 return -1; 126 return -1;
127 } 127 }
128 128
129 skb = skb_share_check(skb, GFP_ATOMIC); 129 skb = skb_share_check(skb, GFP_ATOMIC);
130 if (skb == NULL) 130 if (skb == NULL)
131 return -1; 131 return -1;
132 132
133 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) { 133 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) {
134 kfree_skb(skb); 134 kfree_skb(skb);
135 return -1; 135 return -1;
136 } 136 }
137 137
138 vhdr = (struct vlan_hdr *)(skb->data); 138 vhdr = (struct vlan_hdr *)(skb->data);
139 139
140 /* vlan_TCI = ntohs(get_unaligned(&vhdr->h_vlan_TCI)); */ 140 /* vlan_TCI = ntohs(get_unaligned(&vhdr->h_vlan_TCI)); */
141 vlan_TCI = ntohs(vhdr->h_vlan_TCI); 141 vlan_TCI = ntohs(vhdr->h_vlan_TCI);
142 142
143 vid = (vlan_TCI & VLAN_VID_MASK); 143 vid = (vlan_TCI & VLAN_VID_MASK);
144 144
145 /* Ok, we will find the correct VLAN device, strip the header, 145 /* Ok, we will find the correct VLAN device, strip the header,
146 * and then go on as usual. 146 * and then go on as usual.
147 */ 147 */
148 148
149 /* We have 12 bits of vlan ID. 149 /* We have 12 bits of vlan ID.
150 * 150 *
151 * We must not drop allow preempt until we hold a 151 * We must not drop allow preempt until we hold a
152 * reference to the device (netif_rx does that) or we 152 * reference to the device (netif_rx does that) or we
153 * fail. 153 * fail.
154 */ 154 */
155 155
156 rcu_read_lock(); 156 rcu_read_lock();
157 skb->dev = __find_vlan_dev(dev, vid); 157 skb->dev = __find_vlan_dev(dev, vid);
158 if (!skb->dev) { 158 if (!skb->dev) {
159 rcu_read_unlock(); 159 rcu_read_unlock();
160 pr_debug("%s: ERROR: No net_device for VID: %u on dev: %s\n", 160 pr_debug("%s: ERROR: No net_device for VID: %u on dev: %s\n",
161 __FUNCTION__, (unsigned int)vid, dev->name); 161 __FUNCTION__, (unsigned int)vid, dev->name);
162 kfree_skb(skb); 162 kfree_skb(skb);
163 return -1; 163 return -1;
164 } 164 }
165 165
166 skb->dev->last_rx = jiffies; 166 skb->dev->last_rx = jiffies;
167 167
168 /* Bump the rx counters for the VLAN device. */ 168 /* Bump the rx counters for the VLAN device. */
169 stats = &skb->dev->stats; 169 stats = &skb->dev->stats;
170 stats->rx_packets++; 170 stats->rx_packets++;
171 stats->rx_bytes += skb->len; 171 stats->rx_bytes += skb->len;
172 172
173 /* Take off the VLAN header (4 bytes currently) */ 173 /* Take off the VLAN header (4 bytes currently) */
174 skb_pull_rcsum(skb, VLAN_HLEN); 174 skb_pull_rcsum(skb, VLAN_HLEN);
175 175
176 /* 176 /*
177 * Deal with ingress priority mapping. 177 * Deal with ingress priority mapping.
178 */ 178 */
179 skb->priority = vlan_get_ingress_priority(skb->dev, 179 skb->priority = vlan_get_ingress_priority(skb->dev,
180 ntohs(vhdr->h_vlan_TCI)); 180 ntohs(vhdr->h_vlan_TCI));
181 181
182 pr_debug("%s: priority: %u for TCI: %hu\n", 182 pr_debug("%s: priority: %u for TCI: %hu\n",
183 __FUNCTION__, skb->priority, ntohs(vhdr->h_vlan_TCI)); 183 __FUNCTION__, skb->priority, ntohs(vhdr->h_vlan_TCI));
184 184
185 /* The ethernet driver already did the pkt_type calculations 185 /* The ethernet driver already did the pkt_type calculations
186 * for us... 186 * for us...
187 */ 187 */
188 switch (skb->pkt_type) { 188 switch (skb->pkt_type) {
189 case PACKET_BROADCAST: /* Yeah, stats collect these together.. */ 189 case PACKET_BROADCAST: /* Yeah, stats collect these together.. */
190 /* stats->broadcast ++; // no such counter :-( */ 190 /* stats->broadcast ++; // no such counter :-( */
191 break; 191 break;
192 192
193 case PACKET_MULTICAST: 193 case PACKET_MULTICAST:
194 stats->multicast++; 194 stats->multicast++;
195 break; 195 break;
196 196
197 case PACKET_OTHERHOST: 197 case PACKET_OTHERHOST:
198 /* Our lower layer thinks this is not local, let's make sure. 198 /* Our lower layer thinks this is not local, let's make sure.
199 * This allows the VLAN to have a different MAC than the 199 * This allows the VLAN to have a different MAC than the
200 * underlying device, and still route correctly. 200 * underlying device, and still route correctly.
201 */ 201 */
202 if (!compare_ether_addr(eth_hdr(skb)->h_dest, 202 if (!compare_ether_addr(eth_hdr(skb)->h_dest,
203 skb->dev->dev_addr)) 203 skb->dev->dev_addr))
204 /* It is for our (changed) MAC-address! */ 204 /* It is for our (changed) MAC-address! */
205 skb->pkt_type = PACKET_HOST; 205 skb->pkt_type = PACKET_HOST;
206 break; 206 break;
207 default: 207 default:
208 break; 208 break;
209 } 209 }
210 210
211 /* Was a VLAN packet, grab the encapsulated protocol, which the layer 211 /* Was a VLAN packet, grab the encapsulated protocol, which the layer
212 * three protocols care about. 212 * three protocols care about.
213 */ 213 */
214 /* proto = get_unaligned(&vhdr->h_vlan_encapsulated_proto); */ 214 /* proto = get_unaligned(&vhdr->h_vlan_encapsulated_proto); */
215 proto = vhdr->h_vlan_encapsulated_proto; 215 proto = vhdr->h_vlan_encapsulated_proto;
216 216
217 skb->protocol = proto; 217 skb->protocol = proto;
218 if (ntohs(proto) >= 1536) { 218 if (ntohs(proto) >= 1536) {
219 /* place it back on the queue to be handled by 219 /* place it back on the queue to be handled by
220 * true layer 3 protocols. 220 * true layer 3 protocols.
221 */ 221 */
222 222
223 /* See if we are configured to re-write the VLAN header 223 /* See if we are configured to re-write the VLAN header
224 * to make it look like ethernet... 224 * to make it look like ethernet...
225 */ 225 */
226 skb = vlan_check_reorder_header(skb); 226 skb = vlan_check_reorder_header(skb);
227 227
228 /* Can be null if skb-clone fails when re-ordering */ 228 /* Can be null if skb-clone fails when re-ordering */
229 if (skb) { 229 if (skb) {
230 netif_rx(skb); 230 netif_rx(skb);
231 } else { 231 } else {
232 /* TODO: Add a more specific counter here. */ 232 /* TODO: Add a more specific counter here. */
233 stats->rx_errors++; 233 stats->rx_errors++;
234 } 234 }
235 rcu_read_unlock(); 235 rcu_read_unlock();
236 return 0; 236 return 0;
237 } 237 }
238 238
239 rawp = skb->data; 239 rawp = skb->data;
240 240
241 /* 241 /*
242 * This is a magic hack to spot IPX packets. Older Novell breaks 242 * This is a magic hack to spot IPX packets. Older Novell breaks
243 * the protocol design and runs IPX over 802.3 without an 802.2 LLC 243 * the protocol design and runs IPX over 802.3 without an 802.2 LLC
244 * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This 244 * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
245 * won't work for fault tolerant netware but does for the rest. 245 * won't work for fault tolerant netware but does for the rest.
246 */ 246 */
247 if (*(unsigned short *)rawp == 0xFFFF) { 247 if (*(unsigned short *)rawp == 0xFFFF) {
248 skb->protocol = htons(ETH_P_802_3); 248 skb->protocol = htons(ETH_P_802_3);
249 /* place it back on the queue to be handled by true layer 3 249 /* place it back on the queue to be handled by true layer 3
250 * protocols. */ 250 * protocols. */
251 251
252 /* See if we are configured to re-write the VLAN header 252 /* See if we are configured to re-write the VLAN header
253 * to make it look like ethernet... 253 * to make it look like ethernet...
254 */ 254 */
255 skb = vlan_check_reorder_header(skb); 255 skb = vlan_check_reorder_header(skb);
256 256
257 /* Can be null if skb-clone fails when re-ordering */ 257 /* Can be null if skb-clone fails when re-ordering */
258 if (skb) { 258 if (skb) {
259 netif_rx(skb); 259 netif_rx(skb);
260 } else { 260 } else {
261 /* TODO: Add a more specific counter here. */ 261 /* TODO: Add a more specific counter here. */
262 stats->rx_errors++; 262 stats->rx_errors++;
263 } 263 }
264 rcu_read_unlock(); 264 rcu_read_unlock();
265 return 0; 265 return 0;
266 } 266 }
267 267
268 /* 268 /*
269 * Real 802.2 LLC 269 * Real 802.2 LLC
270 */ 270 */
271 skb->protocol = htons(ETH_P_802_2); 271 skb->protocol = htons(ETH_P_802_2);
272 /* place it back on the queue to be handled by upper layer protocols. 272 /* place it back on the queue to be handled by upper layer protocols.
273 */ 273 */
274 274
275 /* See if we are configured to re-write the VLAN header 275 /* See if we are configured to re-write the VLAN header
276 * to make it look like ethernet... 276 * to make it look like ethernet...
277 */ 277 */
278 skb = vlan_check_reorder_header(skb); 278 skb = vlan_check_reorder_header(skb);
279 279
280 /* Can be null if skb-clone fails when re-ordering */ 280 /* Can be null if skb-clone fails when re-ordering */
281 if (skb) { 281 if (skb) {
282 netif_rx(skb); 282 netif_rx(skb);
283 } else { 283 } else {
284 /* TODO: Add a more specific counter here. */ 284 /* TODO: Add a more specific counter here. */
285 stats->rx_errors++; 285 stats->rx_errors++;
286 } 286 }
287 rcu_read_unlock(); 287 rcu_read_unlock();
288 return 0; 288 return 0;
289 } 289 }
290 290
291 static inline unsigned short 291 static inline unsigned short
292 vlan_dev_get_egress_qos_mask(struct net_device *dev, struct sk_buff *skb) 292 vlan_dev_get_egress_qos_mask(struct net_device *dev, struct sk_buff *skb)
293 { 293 {
294 struct vlan_priority_tci_mapping *mp; 294 struct vlan_priority_tci_mapping *mp;
295 295
296 mp = vlan_dev_info(dev)->egress_priority_map[(skb->priority & 0xF)]; 296 mp = vlan_dev_info(dev)->egress_priority_map[(skb->priority & 0xF)];
297 while (mp) { 297 while (mp) {
298 if (mp->priority == skb->priority) { 298 if (mp->priority == skb->priority) {
299 return mp->vlan_qos; /* This should already be shifted 299 return mp->vlan_qos; /* This should already be shifted
300 * to mask correctly with the 300 * to mask correctly with the
301 * VLAN's TCI */ 301 * VLAN's TCI */
302 } 302 }
303 mp = mp->next; 303 mp = mp->next;
304 } 304 }
305 return 0; 305 return 0;
306 } 306 }
307 307
308 /* 308 /*
309 * Create the VLAN header for an arbitrary protocol layer 309 * Create the VLAN header for an arbitrary protocol layer
310 * 310 *
311 * saddr=NULL means use device source address 311 * saddr=NULL means use device source address
312 * daddr=NULL means leave destination address (eg unresolved arp) 312 * daddr=NULL means leave destination address (eg unresolved arp)
313 * 313 *
314 * This is called when the SKB is moving down the stack towards the 314 * This is called when the SKB is moving down the stack towards the
315 * physical devices. 315 * physical devices.
316 */ 316 */
317 static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev, 317 static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
318 unsigned short type, 318 unsigned short type,
319 const void *daddr, const void *saddr, 319 const void *daddr, const void *saddr,
320 unsigned int len) 320 unsigned int len)
321 { 321 {
322 struct vlan_hdr *vhdr; 322 struct vlan_hdr *vhdr;
323 unsigned short veth_TCI = 0; 323 unsigned short veth_TCI = 0;
324 int rc = 0; 324 int rc = 0;
325 int build_vlan_header = 0; 325 int build_vlan_header = 0;
326 struct net_device *vdev = dev; 326 struct net_device *vdev = dev;
327 327
328 pr_debug("%s: skb: %p type: %hx len: %u vlan_id: %hx, daddr: %p\n", 328 pr_debug("%s: skb: %p type: %hx len: %u vlan_id: %hx, daddr: %p\n",
329 __FUNCTION__, skb, type, len, vlan_dev_info(dev)->vlan_id, 329 __FUNCTION__, skb, type, len, vlan_dev_info(dev)->vlan_id,
330 daddr); 330 daddr);
331 331
332 /* build vlan header only if re_order_header flag is NOT set. This 332 /* build vlan header only if re_order_header flag is NOT set. This
333 * fixes some programs that get confused when they see a VLAN device 333 * fixes some programs that get confused when they see a VLAN device
334 * sending a frame that is VLAN encoded (the consensus is that the VLAN 334 * sending a frame that is VLAN encoded (the consensus is that the VLAN
335 * device should look completely like an Ethernet device when the 335 * device should look completely like an Ethernet device when the
336 * REORDER_HEADER flag is set) The drawback to this is some extra 336 * REORDER_HEADER flag is set) The drawback to this is some extra
337 * header shuffling in the hard_start_xmit. Users can turn off this 337 * header shuffling in the hard_start_xmit. Users can turn off this
338 * REORDER behaviour with the vconfig tool. 338 * REORDER behaviour with the vconfig tool.
339 */ 339 */
340 if (!(vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR)) 340 if (!(vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR))
341 build_vlan_header = 1; 341 build_vlan_header = 1;
342 342
343 if (build_vlan_header) { 343 if (build_vlan_header) {
344 vhdr = (struct vlan_hdr *) skb_push(skb, VLAN_HLEN); 344 vhdr = (struct vlan_hdr *) skb_push(skb, VLAN_HLEN);
345 345
346 /* build the four bytes that make this a VLAN header. */ 346 /* build the four bytes that make this a VLAN header. */
347 347
348 /* Now, construct the second two bytes. This field looks 348 /* Now, construct the second two bytes. This field looks
349 * something like: 349 * something like:
350 * usr_priority: 3 bits (high bits) 350 * usr_priority: 3 bits (high bits)
351 * CFI 1 bit 351 * CFI 1 bit
352 * VLAN ID 12 bits (low bits) 352 * VLAN ID 12 bits (low bits)
353 * 353 *
354 */ 354 */
355 veth_TCI = vlan_dev_info(dev)->vlan_id; 355 veth_TCI = vlan_dev_info(dev)->vlan_id;
356 veth_TCI |= vlan_dev_get_egress_qos_mask(dev, skb); 356 veth_TCI |= vlan_dev_get_egress_qos_mask(dev, skb);
357 357
358 vhdr->h_vlan_TCI = htons(veth_TCI); 358 vhdr->h_vlan_TCI = htons(veth_TCI);
359 359
360 /* 360 /*
361 * Set the protocol type. For a packet of type ETH_P_802_3 we 361 * Set the protocol type. For a packet of type ETH_P_802_3 we
362 * put the length in here instead. It is up to the 802.2 362 * put the length in here instead. It is up to the 802.2
363 * layer to carry protocol information. 363 * layer to carry protocol information.
364 */ 364 */
365 365
366 if (type != ETH_P_802_3) 366 if (type != ETH_P_802_3)
367 vhdr->h_vlan_encapsulated_proto = htons(type); 367 vhdr->h_vlan_encapsulated_proto = htons(type);
368 else 368 else
369 vhdr->h_vlan_encapsulated_proto = htons(len); 369 vhdr->h_vlan_encapsulated_proto = htons(len);
370 370
371 skb->protocol = htons(ETH_P_8021Q); 371 skb->protocol = htons(ETH_P_8021Q);
372 skb_reset_network_header(skb); 372 skb_reset_network_header(skb);
373 } 373 }
374 374
375 /* Before delegating work to the lower layer, enter our MAC-address */ 375 /* Before delegating work to the lower layer, enter our MAC-address */
376 if (saddr == NULL) 376 if (saddr == NULL)
377 saddr = dev->dev_addr; 377 saddr = dev->dev_addr;
378 378
379 dev = vlan_dev_info(dev)->real_dev; 379 dev = vlan_dev_info(dev)->real_dev;
380 380
381 /* MPLS can send us skbuffs w/out enough space. This check will grow 381 /* MPLS can send us skbuffs w/out enough space. This check will grow
382 * the skb if it doesn't have enough headroom. Not a beautiful solution, 382 * the skb if it doesn't have enough headroom. Not a beautiful solution,
383 * so I'll tick a counter so that users can know it's happening... 383 * so I'll tick a counter so that users can know it's happening...
384 * If they care... 384 * If they care...
385 */ 385 */
386 386
387 /* NOTE: This may still break if the underlying device is not the final 387 /* NOTE: This may still break if the underlying device is not the final
388 * device (and thus there are more headers to add...) It should work for 388 * device (and thus there are more headers to add...) It should work for
389 * good-ole-ethernet though. 389 * good-ole-ethernet though.
390 */ 390 */
391 if (skb_headroom(skb) < dev->hard_header_len) { 391 if (skb_headroom(skb) < dev->hard_header_len) {
392 struct sk_buff *sk_tmp = skb; 392 struct sk_buff *sk_tmp = skb;
393 skb = skb_realloc_headroom(sk_tmp, dev->hard_header_len); 393 skb = skb_realloc_headroom(sk_tmp, dev->hard_header_len);
394 kfree_skb(sk_tmp); 394 kfree_skb(sk_tmp);
395 if (skb == NULL) { 395 if (skb == NULL) {
396 struct net_device_stats *stats = &vdev->stats; 396 struct net_device_stats *stats = &vdev->stats;
397 stats->tx_dropped++; 397 stats->tx_dropped++;
398 return -ENOMEM; 398 return -ENOMEM;
399 } 399 }
400 vlan_dev_info(vdev)->cnt_inc_headroom_on_tx++; 400 vlan_dev_info(vdev)->cnt_inc_headroom_on_tx++;
401 pr_debug("%s: %s: had to grow skb\n", __FUNCTION__, vdev->name); 401 pr_debug("%s: %s: had to grow skb\n", __FUNCTION__, vdev->name);
402 } 402 }
403 403
404 if (build_vlan_header) { 404 if (build_vlan_header) {
405 /* Now make the underlying real hard header */ 405 /* Now make the underlying real hard header */
406 rc = dev_hard_header(skb, dev, ETH_P_8021Q, daddr, saddr, 406 rc = dev_hard_header(skb, dev, ETH_P_8021Q, daddr, saddr,
407 len + VLAN_HLEN); 407 len + VLAN_HLEN);
408 if (rc > 0) 408 if (rc > 0)
409 rc += VLAN_HLEN; 409 rc += VLAN_HLEN;
410 else if (rc < 0) 410 else if (rc < 0)
411 rc -= VLAN_HLEN; 411 rc -= VLAN_HLEN;
412 } else 412 } else
413 /* If here, then we'll just make a normal looking ethernet 413 /* If here, then we'll just make a normal looking ethernet
414 * frame, but, the hard_start_xmit method will insert the tag 414 * frame, but, the hard_start_xmit method will insert the tag
415 * (it has to be able to do this for bridged and other skbs 415 * (it has to be able to do this for bridged and other skbs
416 * that don't come down the protocol stack in an orderly manner. 416 * that don't come down the protocol stack in an orderly manner.
417 */ 417 */
418 rc = dev_hard_header(skb, dev, type, daddr, saddr, len); 418 rc = dev_hard_header(skb, dev, type, daddr, saddr, len);
419 419
420 return rc; 420 return rc;
421 } 421 }
422 422
423 static int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) 423 static int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
424 { 424 {
425 struct net_device_stats *stats = &dev->stats; 425 struct net_device_stats *stats = &dev->stats;
426 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data); 426 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
427 427
428 /* Handle non-VLAN frames if they are sent to us, for example by DHCP. 428 /* Handle non-VLAN frames if they are sent to us, for example by DHCP.
429 * 429 *
430 * NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING 430 * NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING
431 * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs... 431 * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs...
432 */ 432 */
433 433
434 if (veth->h_vlan_proto != htons(ETH_P_8021Q) || 434 if (veth->h_vlan_proto != htons(ETH_P_8021Q) ||
435 vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR) { 435 vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR) {
436 int orig_headroom = skb_headroom(skb); 436 int orig_headroom = skb_headroom(skb);
437 unsigned short veth_TCI; 437 unsigned short veth_TCI;
438 438
439 /* This is not a VLAN frame...but we can fix that! */ 439 /* This is not a VLAN frame...but we can fix that! */
440 vlan_dev_info(dev)->cnt_encap_on_xmit++; 440 vlan_dev_info(dev)->cnt_encap_on_xmit++;
441 441
442 pr_debug("%s: proto to encap: 0x%hx\n", 442 pr_debug("%s: proto to encap: 0x%hx\n",
443 __FUNCTION__, htons(veth->h_vlan_proto)); 443 __FUNCTION__, htons(veth->h_vlan_proto));
444 /* Construct the second two bytes. This field looks something 444 /* Construct the second two bytes. This field looks something
445 * like: 445 * like:
446 * usr_priority: 3 bits (high bits) 446 * usr_priority: 3 bits (high bits)
447 * CFI 1 bit 447 * CFI 1 bit
448 * VLAN ID 12 bits (low bits) 448 * VLAN ID 12 bits (low bits)
449 */ 449 */
450 veth_TCI = vlan_dev_info(dev)->vlan_id; 450 veth_TCI = vlan_dev_info(dev)->vlan_id;
451 veth_TCI |= vlan_dev_get_egress_qos_mask(dev, skb); 451 veth_TCI |= vlan_dev_get_egress_qos_mask(dev, skb);
452 452
453 skb = __vlan_put_tag(skb, veth_TCI); 453 skb = __vlan_put_tag(skb, veth_TCI);
454 if (!skb) { 454 if (!skb) {
455 stats->tx_dropped++; 455 stats->tx_dropped++;
456 return 0; 456 return 0;
457 } 457 }
458 458
459 if (orig_headroom < VLAN_HLEN) 459 if (orig_headroom < VLAN_HLEN)
460 vlan_dev_info(dev)->cnt_inc_headroom_on_tx++; 460 vlan_dev_info(dev)->cnt_inc_headroom_on_tx++;
461 } 461 }
462 462
463 pr_debug("%s: about to send skb: %p to dev: %s\n", 463 pr_debug("%s: about to send skb: %p to dev: %s\n",
464 __FUNCTION__, skb, skb->dev->name); 464 __FUNCTION__, skb, skb->dev->name);
465 pr_debug(" " MAC_FMT " " MAC_FMT " %4hx %4hx %4hx\n", 465 pr_debug(" " MAC_FMT " " MAC_FMT " %4hx %4hx %4hx\n",
466 veth->h_dest[0], veth->h_dest[1], veth->h_dest[2], 466 veth->h_dest[0], veth->h_dest[1], veth->h_dest[2],
467 veth->h_dest[3], veth->h_dest[4], veth->h_dest[5], 467 veth->h_dest[3], veth->h_dest[4], veth->h_dest[5],
468 veth->h_source[0], veth->h_source[1], veth->h_source[2], 468 veth->h_source[0], veth->h_source[1], veth->h_source[2],
469 veth->h_source[3], veth->h_source[4], veth->h_source[5], 469 veth->h_source[3], veth->h_source[4], veth->h_source[5],
470 veth->h_vlan_proto, veth->h_vlan_TCI, 470 veth->h_vlan_proto, veth->h_vlan_TCI,
471 veth->h_vlan_encapsulated_proto); 471 veth->h_vlan_encapsulated_proto);
472 472
473 stats->tx_packets++; /* for statics only */ 473 stats->tx_packets++; /* for statics only */
474 stats->tx_bytes += skb->len; 474 stats->tx_bytes += skb->len;
475 475
476 skb->dev = vlan_dev_info(dev)->real_dev; 476 skb->dev = vlan_dev_info(dev)->real_dev;
477 dev_queue_xmit(skb); 477 dev_queue_xmit(skb);
478 478
479 return 0; 479 return 0;
480 } 480 }
481 481
482 static int vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb, 482 static int vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb,
483 struct net_device *dev) 483 struct net_device *dev)
484 { 484 {
485 struct net_device_stats *stats = &dev->stats; 485 struct net_device_stats *stats = &dev->stats;
486 unsigned short veth_TCI; 486 unsigned short veth_TCI;
487 487
488 /* Construct the second two bytes. This field looks something 488 /* Construct the second two bytes. This field looks something
489 * like: 489 * like:
490 * usr_priority: 3 bits (high bits) 490 * usr_priority: 3 bits (high bits)
491 * CFI 1 bit 491 * CFI 1 bit
492 * VLAN ID 12 bits (low bits) 492 * VLAN ID 12 bits (low bits)
493 */ 493 */
494 veth_TCI = vlan_dev_info(dev)->vlan_id; 494 veth_TCI = vlan_dev_info(dev)->vlan_id;
495 veth_TCI |= vlan_dev_get_egress_qos_mask(dev, skb); 495 veth_TCI |= vlan_dev_get_egress_qos_mask(dev, skb);
496 skb = __vlan_hwaccel_put_tag(skb, veth_TCI); 496 skb = __vlan_hwaccel_put_tag(skb, veth_TCI);
497 497
498 stats->tx_packets++; 498 stats->tx_packets++;
499 stats->tx_bytes += skb->len; 499 stats->tx_bytes += skb->len;
500 500
501 skb->dev = vlan_dev_info(dev)->real_dev; 501 skb->dev = vlan_dev_info(dev)->real_dev;
502 dev_queue_xmit(skb); 502 dev_queue_xmit(skb);
503 503
504 return 0; 504 return 0;
505 } 505 }
506 506
507 static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu) 507 static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
508 { 508 {
509 /* TODO: gotta make sure the underlying layer can handle it, 509 /* TODO: gotta make sure the underlying layer can handle it,
510 * maybe an IFF_VLAN_CAPABLE flag for devices? 510 * maybe an IFF_VLAN_CAPABLE flag for devices?
511 */ 511 */
512 if (vlan_dev_info(dev)->real_dev->mtu < new_mtu) 512 if (vlan_dev_info(dev)->real_dev->mtu < new_mtu)
513 return -ERANGE; 513 return -ERANGE;
514 514
515 dev->mtu = new_mtu; 515 dev->mtu = new_mtu;
516 516
517 return 0; 517 return 0;
518 } 518 }
519 519
520 void vlan_dev_set_ingress_priority(const struct net_device *dev, 520 void vlan_dev_set_ingress_priority(const struct net_device *dev,
521 u32 skb_prio, short vlan_prio) 521 u32 skb_prio, short vlan_prio)
522 { 522 {
523 struct vlan_dev_info *vlan = vlan_dev_info(dev); 523 struct vlan_dev_info *vlan = vlan_dev_info(dev);
524 524
525 if (vlan->ingress_priority_map[vlan_prio & 0x7] && !skb_prio) 525 if (vlan->ingress_priority_map[vlan_prio & 0x7] && !skb_prio)
526 vlan->nr_ingress_mappings--; 526 vlan->nr_ingress_mappings--;
527 else if (!vlan->ingress_priority_map[vlan_prio & 0x7] && skb_prio) 527 else if (!vlan->ingress_priority_map[vlan_prio & 0x7] && skb_prio)
528 vlan->nr_ingress_mappings++; 528 vlan->nr_ingress_mappings++;
529 529
530 vlan->ingress_priority_map[vlan_prio & 0x7] = skb_prio; 530 vlan->ingress_priority_map[vlan_prio & 0x7] = skb_prio;
531 } 531 }
532 532
533 int vlan_dev_set_egress_priority(const struct net_device *dev, 533 int vlan_dev_set_egress_priority(const struct net_device *dev,
534 u32 skb_prio, short vlan_prio) 534 u32 skb_prio, short vlan_prio)
535 { 535 {
536 struct vlan_dev_info *vlan = vlan_dev_info(dev); 536 struct vlan_dev_info *vlan = vlan_dev_info(dev);
537 struct vlan_priority_tci_mapping *mp = NULL; 537 struct vlan_priority_tci_mapping *mp = NULL;
538 struct vlan_priority_tci_mapping *np; 538 struct vlan_priority_tci_mapping *np;
539 u32 vlan_qos = (vlan_prio << 13) & 0xE000; 539 u32 vlan_qos = (vlan_prio << 13) & 0xE000;
540 540
541 /* See if a priority mapping exists.. */ 541 /* See if a priority mapping exists.. */
542 mp = vlan->egress_priority_map[skb_prio & 0xF]; 542 mp = vlan->egress_priority_map[skb_prio & 0xF];
543 while (mp) { 543 while (mp) {
544 if (mp->priority == skb_prio) { 544 if (mp->priority == skb_prio) {
545 if (mp->vlan_qos && !vlan_qos) 545 if (mp->vlan_qos && !vlan_qos)
546 vlan->nr_egress_mappings--; 546 vlan->nr_egress_mappings--;
547 else if (!mp->vlan_qos && vlan_qos) 547 else if (!mp->vlan_qos && vlan_qos)
548 vlan->nr_egress_mappings++; 548 vlan->nr_egress_mappings++;
549 mp->vlan_qos = vlan_qos; 549 mp->vlan_qos = vlan_qos;
550 return 0; 550 return 0;
551 } 551 }
552 mp = mp->next; 552 mp = mp->next;
553 } 553 }
554 554
555 /* Create a new mapping then. */ 555 /* Create a new mapping then. */
556 mp = vlan->egress_priority_map[skb_prio & 0xF]; 556 mp = vlan->egress_priority_map[skb_prio & 0xF];
557 np = kmalloc(sizeof(struct vlan_priority_tci_mapping), GFP_KERNEL); 557 np = kmalloc(sizeof(struct vlan_priority_tci_mapping), GFP_KERNEL);
558 if (!np) 558 if (!np)
559 return -ENOBUFS; 559 return -ENOBUFS;
560 560
561 np->next = mp; 561 np->next = mp;
562 np->priority = skb_prio; 562 np->priority = skb_prio;
563 np->vlan_qos = vlan_qos; 563 np->vlan_qos = vlan_qos;
564 vlan->egress_priority_map[skb_prio & 0xF] = np; 564 vlan->egress_priority_map[skb_prio & 0xF] = np;
565 if (vlan_qos) 565 if (vlan_qos)
566 vlan->nr_egress_mappings++; 566 vlan->nr_egress_mappings++;
567 return 0; 567 return 0;
568 } 568 }
569 569
570 /* Flags are defined in the vlan_flags enum in include/linux/if_vlan.h file. */ 570 /* Flags are defined in the vlan_flags enum in include/linux/if_vlan.h file. */
571 int vlan_dev_set_vlan_flag(const struct net_device *dev, 571 int vlan_dev_set_vlan_flag(const struct net_device *dev,
572 u32 flag, short flag_val) 572 u32 flag, short flag_val)
573 { 573 {
574 /* verify flag is supported */ 574 /* verify flag is supported */
575 if (flag == VLAN_FLAG_REORDER_HDR) { 575 if (flag == VLAN_FLAG_REORDER_HDR) {
576 if (flag_val) 576 if (flag_val)
577 vlan_dev_info(dev)->flags |= VLAN_FLAG_REORDER_HDR; 577 vlan_dev_info(dev)->flags |= VLAN_FLAG_REORDER_HDR;
578 else 578 else
579 vlan_dev_info(dev)->flags &= ~VLAN_FLAG_REORDER_HDR; 579 vlan_dev_info(dev)->flags &= ~VLAN_FLAG_REORDER_HDR;
580 return 0; 580 return 0;
581 } 581 }
582 return -EINVAL; 582 return -EINVAL;
583 } 583 }
584 584
585 void vlan_dev_get_realdev_name(const struct net_device *dev, char *result) 585 void vlan_dev_get_realdev_name(const struct net_device *dev, char *result)
586 { 586 {
587 strncpy(result, vlan_dev_info(dev)->real_dev->name, 23); 587 strncpy(result, vlan_dev_info(dev)->real_dev->name, 23);
588 } 588 }
589 589
590 void vlan_dev_get_vid(const struct net_device *dev, unsigned short *result) 590 void vlan_dev_get_vid(const struct net_device *dev, unsigned short *result)
591 { 591 {
592 *result = vlan_dev_info(dev)->vlan_id; 592 *result = vlan_dev_info(dev)->vlan_id;
593 } 593 }
594 594
595 static int vlan_dev_open(struct net_device *dev) 595 static int vlan_dev_open(struct net_device *dev)
596 { 596 {
597 struct vlan_dev_info *vlan = vlan_dev_info(dev); 597 struct vlan_dev_info *vlan = vlan_dev_info(dev);
598 struct net_device *real_dev = vlan->real_dev; 598 struct net_device *real_dev = vlan->real_dev;
599 int err; 599 int err;
600 600
601 if (!(real_dev->flags & IFF_UP)) 601 if (!(real_dev->flags & IFF_UP))
602 return -ENETDOWN; 602 return -ENETDOWN;
603 603
604 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) { 604 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) {
605 err = dev_unicast_add(real_dev, dev->dev_addr, ETH_ALEN); 605 err = dev_unicast_add(real_dev, dev->dev_addr, ETH_ALEN);
606 if (err < 0) 606 if (err < 0)
607 return err; 607 return err;
608 } 608 }
609 memcpy(vlan->real_dev_addr, real_dev->dev_addr, ETH_ALEN); 609 memcpy(vlan->real_dev_addr, real_dev->dev_addr, ETH_ALEN);
610 610
611 if (dev->flags & IFF_ALLMULTI) 611 if (dev->flags & IFF_ALLMULTI)
612 dev_set_allmulti(real_dev, 1); 612 dev_set_allmulti(real_dev, 1);
613 if (dev->flags & IFF_PROMISC) 613 if (dev->flags & IFF_PROMISC)
614 dev_set_promiscuity(real_dev, 1); 614 dev_set_promiscuity(real_dev, 1);
615 615
616 return 0; 616 return 0;
617 } 617 }
618 618
619 static int vlan_dev_stop(struct net_device *dev) 619 static int vlan_dev_stop(struct net_device *dev)
620 { 620 {
621 struct net_device *real_dev = vlan_dev_info(dev)->real_dev; 621 struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
622 622
623 dev_mc_unsync(real_dev, dev); 623 dev_mc_unsync(real_dev, dev);
624 if (dev->flags & IFF_ALLMULTI) 624 if (dev->flags & IFF_ALLMULTI)
625 dev_set_allmulti(real_dev, -1); 625 dev_set_allmulti(real_dev, -1);
626 if (dev->flags & IFF_PROMISC) 626 if (dev->flags & IFF_PROMISC)
627 dev_set_promiscuity(real_dev, -1); 627 dev_set_promiscuity(real_dev, -1);
628 628
629 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) 629 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
630 dev_unicast_delete(real_dev, dev->dev_addr, dev->addr_len); 630 dev_unicast_delete(real_dev, dev->dev_addr, dev->addr_len);
631 631
632 return 0; 632 return 0;
633 } 633 }
634 634
635 static int vlan_dev_set_mac_address(struct net_device *dev, void *p) 635 static int vlan_dev_set_mac_address(struct net_device *dev, void *p)
636 { 636 {
637 struct net_device *real_dev = vlan_dev_info(dev)->real_dev; 637 struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
638 struct sockaddr *addr = p; 638 struct sockaddr *addr = p;
639 int err; 639 int err;
640 640
641 if (!is_valid_ether_addr(addr->sa_data)) 641 if (!is_valid_ether_addr(addr->sa_data))
642 return -EADDRNOTAVAIL; 642 return -EADDRNOTAVAIL;
643 643
644 if (!(dev->flags & IFF_UP)) 644 if (!(dev->flags & IFF_UP))
645 goto out; 645 goto out;
646 646
647 if (compare_ether_addr(addr->sa_data, real_dev->dev_addr)) { 647 if (compare_ether_addr(addr->sa_data, real_dev->dev_addr)) {
648 err = dev_unicast_add(real_dev, addr->sa_data, ETH_ALEN); 648 err = dev_unicast_add(real_dev, addr->sa_data, ETH_ALEN);
649 if (err < 0) 649 if (err < 0)
650 return err; 650 return err;
651 } 651 }
652 652
653 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) 653 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
654 dev_unicast_delete(real_dev, dev->dev_addr, ETH_ALEN); 654 dev_unicast_delete(real_dev, dev->dev_addr, ETH_ALEN);
655 655
656 out: 656 out:
657 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 657 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
658 return 0; 658 return 0;
659 } 659 }
660 660
661 static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 661 static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
662 { 662 {
663 struct net_device *real_dev = vlan_dev_info(dev)->real_dev; 663 struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
664 struct ifreq ifrr; 664 struct ifreq ifrr;
665 int err = -EOPNOTSUPP; 665 int err = -EOPNOTSUPP;
666 666
667 strncpy(ifrr.ifr_name, real_dev->name, IFNAMSIZ); 667 strncpy(ifrr.ifr_name, real_dev->name, IFNAMSIZ);
668 ifrr.ifr_ifru = ifr->ifr_ifru; 668 ifrr.ifr_ifru = ifr->ifr_ifru;
669 669
670 switch (cmd) { 670 switch (cmd) {
671 case SIOCGMIIPHY: 671 case SIOCGMIIPHY:
672 case SIOCGMIIREG: 672 case SIOCGMIIREG:
673 case SIOCSMIIREG: 673 case SIOCSMIIREG:
674 if (real_dev->do_ioctl && netif_device_present(real_dev)) 674 if (real_dev->do_ioctl && netif_device_present(real_dev))
675 err = real_dev->do_ioctl(real_dev, &ifrr, cmd); 675 err = real_dev->do_ioctl(real_dev, &ifrr, cmd);
676 break; 676 break;
677 } 677 }
678 678
679 if (!err) 679 if (!err)
680 ifr->ifr_ifru = ifrr.ifr_ifru; 680 ifr->ifr_ifru = ifrr.ifr_ifru;
681 681
682 return err; 682 return err;
683 } 683 }
684 684
685 static void vlan_dev_change_rx_flags(struct net_device *dev, int change) 685 static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
686 { 686 {
687 struct net_device *real_dev = vlan_dev_info(dev)->real_dev; 687 struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
688 688
689 if (change & IFF_ALLMULTI) 689 if (change & IFF_ALLMULTI)
690 dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1); 690 dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1);
691 if (change & IFF_PROMISC) 691 if (change & IFF_PROMISC)
692 dev_set_promiscuity(real_dev, dev->flags & IFF_PROMISC ? 1 : -1); 692 dev_set_promiscuity(real_dev, dev->flags & IFF_PROMISC ? 1 : -1);
693 } 693 }
694 694
695 static void vlan_dev_set_multicast_list(struct net_device *vlan_dev) 695 static void vlan_dev_set_multicast_list(struct net_device *vlan_dev)
696 { 696 {
697 dev_mc_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev); 697 dev_mc_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev);
698 } 698 }
699 699
700 /* 700 /*
701 * vlan network devices have devices nesting below it, and are a special 701 * vlan network devices have devices nesting below it, and are a special
702 * "super class" of normal network devices; split their locks off into a 702 * "super class" of normal network devices; split their locks off into a
703 * separate class since they always nest. 703 * separate class since they always nest.
704 */ 704 */
705 static struct lock_class_key vlan_netdev_xmit_lock_key; 705 static struct lock_class_key vlan_netdev_xmit_lock_key;
706 706
707 static const struct header_ops vlan_header_ops = { 707 static const struct header_ops vlan_header_ops = {
708 .create = vlan_dev_hard_header, 708 .create = vlan_dev_hard_header,
709 .rebuild = vlan_dev_rebuild_header, 709 .rebuild = vlan_dev_rebuild_header,
710 .parse = eth_header_parse, 710 .parse = eth_header_parse,
711 }; 711 };
712 712
713 static int vlan_dev_init(struct net_device *dev) 713 static int vlan_dev_init(struct net_device *dev)
714 { 714 {
715 struct net_device *real_dev = vlan_dev_info(dev)->real_dev; 715 struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
716 int subclass = 0; 716 int subclass = 0;
717 717
718 /* IFF_BROADCAST|IFF_MULTICAST; ??? */ 718 /* IFF_BROADCAST|IFF_MULTICAST; ??? */
719 dev->flags = real_dev->flags & ~IFF_UP; 719 dev->flags = real_dev->flags & ~IFF_UP;
720 dev->iflink = real_dev->ifindex; 720 dev->iflink = real_dev->ifindex;
721 dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) | 721 dev->state = (real_dev->state & ((1<<__LINK_STATE_NOCARRIER) |
722 (1<<__LINK_STATE_DORMANT))) | 722 (1<<__LINK_STATE_DORMANT))) |
723 (1<<__LINK_STATE_PRESENT); 723 (1<<__LINK_STATE_PRESENT);
724 724
725 /* ipv6 shared card related stuff */ 725 /* ipv6 shared card related stuff */
726 dev->dev_id = real_dev->dev_id; 726 dev->dev_id = real_dev->dev_id;
727 727
728 if (is_zero_ether_addr(dev->dev_addr)) 728 if (is_zero_ether_addr(dev->dev_addr))
729 memcpy(dev->dev_addr, real_dev->dev_addr, dev->addr_len); 729 memcpy(dev->dev_addr, real_dev->dev_addr, dev->addr_len);
730 if (is_zero_ether_addr(dev->broadcast)) 730 if (is_zero_ether_addr(dev->broadcast))
731 memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len); 731 memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
732 732
733 if (real_dev->features & NETIF_F_HW_VLAN_TX) { 733 if (real_dev->features & NETIF_F_HW_VLAN_TX) {
734 dev->header_ops = real_dev->header_ops; 734 dev->header_ops = real_dev->header_ops;
735 dev->hard_header_len = real_dev->hard_header_len; 735 dev->hard_header_len = real_dev->hard_header_len;
736 dev->hard_start_xmit = vlan_dev_hwaccel_hard_start_xmit; 736 dev->hard_start_xmit = vlan_dev_hwaccel_hard_start_xmit;
737 } else { 737 } else {
738 dev->header_ops = &vlan_header_ops; 738 dev->header_ops = &vlan_header_ops;
739 dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN; 739 dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN;
740 dev->hard_start_xmit = vlan_dev_hard_start_xmit; 740 dev->hard_start_xmit = vlan_dev_hard_start_xmit;
741 } 741 }
742 742
743 if (real_dev->priv_flags & IFF_802_1Q_VLAN) 743 if (real_dev->priv_flags & IFF_802_1Q_VLAN)
744 subclass = 1; 744 subclass = 1;
745 745
746 lockdep_set_class_and_subclass(&dev->_xmit_lock, 746 lockdep_set_class_and_subclass(&dev->_xmit_lock,
747 &vlan_netdev_xmit_lock_key, subclass); 747 &vlan_netdev_xmit_lock_key, subclass);
748 return 0; 748 return 0;
749 } 749 }
750 750
751 void vlan_setup(struct net_device *dev) 751 void vlan_setup(struct net_device *dev)
752 { 752 {
753 ether_setup(dev); 753 ether_setup(dev);
754 754
755 dev->priv_flags |= IFF_802_1Q_VLAN; 755 dev->priv_flags |= IFF_802_1Q_VLAN;
756 dev->tx_queue_len = 0; 756 dev->tx_queue_len = 0;
757 757
758 dev->change_mtu = vlan_dev_change_mtu; 758 dev->change_mtu = vlan_dev_change_mtu;
759 dev->init = vlan_dev_init; 759 dev->init = vlan_dev_init;
760 dev->open = vlan_dev_open; 760 dev->open = vlan_dev_open;
761 dev->stop = vlan_dev_stop; 761 dev->stop = vlan_dev_stop;
762 dev->set_mac_address = vlan_dev_set_mac_address; 762 dev->set_mac_address = vlan_dev_set_mac_address;
763 dev->set_multicast_list = vlan_dev_set_multicast_list; 763 dev->set_multicast_list = vlan_dev_set_multicast_list;
764 dev->change_rx_flags = vlan_dev_change_rx_flags; 764 dev->change_rx_flags = vlan_dev_change_rx_flags;
765 dev->do_ioctl = vlan_dev_ioctl; 765 dev->do_ioctl = vlan_dev_ioctl;
766 dev->destructor = free_netdev; 766 dev->destructor = free_netdev;
767 767
768 memset(dev->broadcast, 0, ETH_ALEN); 768 memset(dev->broadcast, 0, ETH_ALEN);
769 } 769 }
770 770