Blame view
net/8021q/vlan_core.c
4.27 KB
7750f403c
|
1 2 3 |
#include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/if_vlan.h> |
4ead44316
|
4 |
#include <linux/netpoll.h> |
7750f403c
|
5 |
#include "vlan.h" |
bcc6d4790
|
6 |
bool vlan_do_receive(struct sk_buff **skbp) |
7750f403c
|
7 |
{ |
3701e5138
|
8 9 |
struct sk_buff *skb = *skbp; u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK; |
ad1afb003
|
10 |
struct net_device *vlan_dev; |
4af429d29
|
11 |
struct vlan_pcpu_stats *rx_stats; |
7750f403c
|
12 |
|
3701e5138
|
13 14 15 16 17 |
vlan_dev = vlan_find_dev(skb->dev, vlan_id); if (!vlan_dev) { if (vlan_id) skb->pkt_type = PACKET_OTHERHOST; return false; |
173e79fb7
|
18 |
} |
9b22ea560
|
19 |
|
3701e5138
|
20 21 22 |
skb = *skbp = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) return false; |
e1c096e25
|
23 |
|
3701e5138
|
24 |
skb->dev = vlan_dev; |
0b5c9db1b
|
25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
if (skb->pkt_type == PACKET_OTHERHOST) { /* Our lower layer thinks this is not local, let's make sure. * This allows the VLAN to have a different MAC than the * underlying device, and still route correctly. */ if (!compare_ether_addr(eth_hdr(skb)->h_dest, vlan_dev->dev_addr)) skb->pkt_type = PACKET_HOST; } if (!(vlan_dev_info(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) { unsigned int offset = skb->data - skb_mac_header(skb); /* * vlan_insert_tag expect skb->data pointing to mac header. * So change skb->data before calling it and change back to * original position later */ skb_push(skb, offset); skb = *skbp = vlan_insert_tag(skb, skb->vlan_tci); if (!skb) return false; skb_pull(skb, offset + VLAN_HLEN); skb_reset_mac_len(skb); } |
3701e5138
|
49 |
skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci); |
bc1d0411b
|
50 |
skb->vlan_tci = 0; |
7750f403c
|
51 |
|
4af429d29
|
52 |
rx_stats = this_cpu_ptr(vlan_dev_info(vlan_dev)->vlan_pcpu_stats); |
9793241fe
|
53 |
|
9618e2ffd
|
54 |
u64_stats_update_begin(&rx_stats->syncp); |
9793241fe
|
55 56 |
rx_stats->rx_packets++; rx_stats->rx_bytes += skb->len; |
0b5c9db1b
|
57 |
if (skb->pkt_type == PACKET_MULTICAST) |
9618e2ffd
|
58 |
rx_stats->rx_multicast++; |
9618e2ffd
|
59 |
u64_stats_update_end(&rx_stats->syncp); |
3701e5138
|
60 61 |
return true; |
7750f403c
|
62 |
} |
22d1ba74b
|
63 |
|
cec9c1336
|
64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 |
/* Must be invoked with rcu_read_lock or with RTNL. */ struct net_device *__vlan_find_dev_deep(struct net_device *real_dev, u16 vlan_id) { struct vlan_group *grp = rcu_dereference_rtnl(real_dev->vlgrp); if (grp) { return vlan_group_get_device(grp, vlan_id); } else { /* * Bonding slaves do not have grp assigned to themselves. * Grp is assigned to bonding master instead. */ if (netif_is_bond_slave(real_dev)) return __vlan_find_dev_deep(real_dev->master, vlan_id); } return NULL; } EXPORT_SYMBOL(__vlan_find_dev_deep); |
22d1ba74b
|
84 85 86 87 |
struct net_device *vlan_dev_real_dev(const struct net_device *dev) { return vlan_dev_info(dev)->real_dev; } |
116cb4285
|
88 |
EXPORT_SYMBOL(vlan_dev_real_dev); |
22d1ba74b
|
89 90 91 92 93 |
u16 vlan_dev_vlan_id(const struct net_device *dev) { return vlan_dev_info(dev)->vlan_id; } |
116cb4285
|
94 |
EXPORT_SYMBOL(vlan_dev_vlan_id); |
e1c096e25
|
95 |
|
0b5c9db1b
|
96 |
static struct sk_buff *vlan_reorder_header(struct sk_buff *skb) |
bcc6d4790
|
97 |
{ |
0b5c9db1b
|
98 99 100 101 102 |
if (skb_cow(skb, skb_headroom(skb)) < 0) return NULL; memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN); skb->mac_header += VLAN_HLEN; skb_reset_mac_len(skb); |
bcc6d4790
|
103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 |
return skb; } static void vlan_set_encap_proto(struct sk_buff *skb, struct vlan_hdr *vhdr) { __be16 proto; unsigned char *rawp; /* * Was a VLAN packet, grab the encapsulated protocol, which the layer * three protocols care about. */ proto = vhdr->h_vlan_encapsulated_proto; if (ntohs(proto) >= 1536) { skb->protocol = proto; return; } rawp = skb->data; if (*(unsigned short *) rawp == 0xFFFF) /* * This is a magic hack to spot IPX packets. Older Novell * breaks the protocol design and runs IPX over 802.3 without * an 802.2 LLC layer. We look for FFFF which isn't a used * 802.2 SSAP/DSAP. This won't work for fault tolerant netware * but does for the rest. */ skb->protocol = htons(ETH_P_802_3); else /* * Real 802.2 LLC */ skb->protocol = htons(ETH_P_802_2); } struct sk_buff *vlan_untag(struct sk_buff *skb) { struct vlan_hdr *vhdr; u16 vlan_tci; if (unlikely(vlan_tx_tag_present(skb))) { /* vlan_tci is already set-up so leave this for another time */ return skb; } skb = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) goto err_free; if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) goto err_free; vhdr = (struct vlan_hdr *) skb->data; vlan_tci = ntohs(vhdr->h_vlan_TCI); __vlan_hwaccel_put_tag(skb, vlan_tci); skb_pull_rcsum(skb, VLAN_HLEN); vlan_set_encap_proto(skb, vhdr); |
0b5c9db1b
|
162 |
skb = vlan_reorder_header(skb); |
bcc6d4790
|
163 164 |
if (unlikely(!skb)) goto err_free; |
c5114cd59
|
165 166 |
skb_reset_network_header(skb); skb_reset_transport_header(skb); |
bcc6d4790
|
167 168 169 170 171 172 |
return skb; err_free: kfree_skb(skb); return NULL; } |