Blame view
net/core/flow_dissector.c
9.24 KB
0744dd00c
|
1 |
#include <linux/skbuff.h> |
c452ed707
|
2 |
#include <linux/export.h> |
0744dd00c
|
3 4 5 6 |
#include <linux/ip.h> #include <linux/ipv6.h> #include <linux/if_vlan.h> #include <net/ip.h> |
ddbe50320
|
7 |
#include <net/ipv6.h> |
f77668dc2
|
8 9 10 11 |
#include <linux/igmp.h> #include <linux/icmp.h> #include <linux/sctp.h> #include <linux/dccp.h> |
0744dd00c
|
12 13 14 15 |
#include <linux/if_tunnel.h> #include <linux/if_pppox.h> #include <linux/ppp_defs.h> #include <net/flow_keys.h> |
4d77d2b56
|
16 17 18 19 20 21 22 23 24 25 |
/* copy saddr & daddr, possibly using 64bit load/store * Equivalent to : flow->src = iph->saddr; * flow->dst = iph->daddr; */ static void iph_to_flow_copy_addrs(struct flow_keys *flow, const struct iphdr *iph) { BUILD_BUG_ON(offsetof(typeof(*flow), dst) != offsetof(typeof(*flow), src) + sizeof(flow->src)); memcpy(&flow->src, &iph->saddr, sizeof(flow->src) + sizeof(flow->dst)); } |
0744dd00c
|
26 |
|
357afe9c4
|
27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 |
/** * skb_flow_get_ports - extract the upper layer ports and return them * @skb: buffer to extract the ports from * @thoff: transport header offset * @ip_proto: protocol for which to get port offset * * The function will try to retrieve the ports at offset thoff + poff where poff * is the protocol port offset returned from proto_ports_offset */ __be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto) { int poff = proto_ports_offset(ip_proto); if (poff >= 0) { __be32 *ports, _ports; ports = skb_header_pointer(skb, thoff + poff, sizeof(_ports), &_ports); if (ports) return *ports; } return 0; } EXPORT_SYMBOL(skb_flow_get_ports); |
0744dd00c
|
52 53 |
bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow) { |
357afe9c4
|
54 |
int nhoff = skb_network_offset(skb); |
0744dd00c
|
55 56 57 58 59 60 61 |
u8 ip_proto; __be16 proto = skb->protocol; memset(flow, 0, sizeof(*flow)); again: switch (proto) { |
2b8837aea
|
62 |
case htons(ETH_P_IP): { |
0744dd00c
|
63 64 65 66 |
const struct iphdr *iph; struct iphdr _iph; ip: iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph); |
6f0923438
|
67 |
if (!iph || iph->ihl < 5) |
0744dd00c
|
68 |
return false; |
3797d3e84
|
69 |
nhoff += iph->ihl * 4; |
0744dd00c
|
70 |
|
3797d3e84
|
71 |
ip_proto = iph->protocol; |
0744dd00c
|
72 73 |
if (ip_is_fragment(iph)) ip_proto = 0; |
3797d3e84
|
74 |
|
4d77d2b56
|
75 |
iph_to_flow_copy_addrs(flow, iph); |
0744dd00c
|
76 77 |
break; } |
2b8837aea
|
78 |
case htons(ETH_P_IPV6): { |
0744dd00c
|
79 80 81 82 83 84 85 86 |
const struct ipv6hdr *iph; struct ipv6hdr _iph; ipv6: iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph); if (!iph) return false; ip_proto = iph->nexthdr; |
ddbe50320
|
87 88 |
flow->src = (__force __be32)ipv6_addr_hash(&iph->saddr); flow->dst = (__force __be32)ipv6_addr_hash(&iph->daddr); |
0744dd00c
|
89 90 91 |
nhoff += sizeof(struct ipv6hdr); break; } |
2b8837aea
|
92 93 |
case htons(ETH_P_8021AD): case htons(ETH_P_8021Q): { |
0744dd00c
|
94 95 96 97 98 99 100 101 102 103 104 |
const struct vlan_hdr *vlan; struct vlan_hdr _vlan; vlan = skb_header_pointer(skb, nhoff, sizeof(_vlan), &_vlan); if (!vlan) return false; proto = vlan->h_vlan_encapsulated_proto; nhoff += sizeof(*vlan); goto again; } |
2b8837aea
|
105 |
case htons(ETH_P_PPP_SES): { |
0744dd00c
|
106 107 108 109 110 111 112 113 114 115 |
struct { struct pppoe_hdr hdr; __be16 proto; } *hdr, _hdr; hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr); if (!hdr) return false; proto = hdr->proto; nhoff += PPPOE_SES_HLEN; switch (proto) { |
2b8837aea
|
116 |
case htons(PPP_IP): |
0744dd00c
|
117 |
goto ip; |
2b8837aea
|
118 |
case htons(PPP_IPV6): |
0744dd00c
|
119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 |
goto ipv6; default: return false; } } default: return false; } switch (ip_proto) { case IPPROTO_GRE: { struct gre_hdr { __be16 flags; __be16 proto; } *hdr, _hdr; hdr = skb_header_pointer(skb, nhoff, sizeof(_hdr), &_hdr); if (!hdr) return false; /* * Only look inside GRE if version zero and no * routing */ if (!(hdr->flags & (GRE_VERSION|GRE_ROUTING))) { proto = hdr->proto; nhoff += 4; if (hdr->flags & GRE_CSUM) nhoff += 4; if (hdr->flags & GRE_KEY) nhoff += 4; if (hdr->flags & GRE_SEQ) nhoff += 4; |
e1733de22
|
151 152 153 154 155 156 157 158 159 160 161 |
if (proto == htons(ETH_P_TEB)) { const struct ethhdr *eth; struct ethhdr _eth; eth = skb_header_pointer(skb, nhoff, sizeof(_eth), &_eth); if (!eth) return false; proto = eth->h_proto; nhoff += sizeof(*eth); } |
0744dd00c
|
162 163 164 165 166 |
goto again; } break; } case IPPROTO_IPIP: |
fca418955
|
167 168 |
proto = htons(ETH_P_IP); goto ip; |
b438f940d
|
169 170 171 |
case IPPROTO_IPV6: proto = htons(ETH_P_IPV6); goto ipv6; |
0744dd00c
|
172 173 174 175 176 |
default: break; } flow->ip_proto = ip_proto; |
357afe9c4
|
177 |
flow->ports = skb_flow_get_ports(skb, nhoff, ip_proto); |
8ed781668
|
178 |
flow->thoff = (u16) nhoff; |
0744dd00c
|
179 180 181 |
return true; } EXPORT_SYMBOL(skb_flow_dissect); |
441d9d327
|
182 183 |
static u32 hashrnd __read_mostly; |
66415cf8a
|
184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 |
static __always_inline void __flow_hash_secret_init(void) { net_get_random_once(&hashrnd, sizeof(hashrnd)); } static __always_inline u32 __flow_hash_3words(u32 a, u32 b, u32 c) { __flow_hash_secret_init(); return jhash_3words(a, b, c, hashrnd); } static __always_inline u32 __flow_hash_1word(u32 a) { __flow_hash_secret_init(); return jhash_1word(a, hashrnd); } |
441d9d327
|
200 201 |
/* |
3958afa1b
|
202 |
* __skb_get_hash: calculate a flow hash based on src/dst addresses |
61b905da3
|
203 204 |
* and src/dst port numbers. Sets hash in skb to non-zero hash value * on success, zero indicates no valid hash. Also, sets l4_hash in skb |
441d9d327
|
205 206 |
* if hash is a canonical 4-tuple hash over transport ports. */ |
3958afa1b
|
207 |
void __skb_get_hash(struct sk_buff *skb) |
441d9d327
|
208 209 210 211 212 213 214 215 |
{ struct flow_keys keys; u32 hash; if (!skb_flow_dissect(skb, &keys)) return; if (keys.ports) |
61b905da3
|
216 |
skb->l4_hash = 1; |
441d9d327
|
217 218 219 220 221 222 223 224 |
/* get a consistent hash (same value on both flow directions) */ if (((__force u32)keys.dst < (__force u32)keys.src) || (((__force u32)keys.dst == (__force u32)keys.src) && ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]))) { swap(keys.dst, keys.src); swap(keys.port16[0], keys.port16[1]); } |
66415cf8a
|
225 226 227 |
hash = __flow_hash_3words((__force u32)keys.dst, (__force u32)keys.src, (__force u32)keys.ports); |
441d9d327
|
228 229 |
if (!hash) hash = 1; |
61b905da3
|
230 |
skb->hash = hash; |
441d9d327
|
231 |
} |
3958afa1b
|
232 |
EXPORT_SYMBOL(__skb_get_hash); |
441d9d327
|
233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 |
/* * Returns a Tx hash based on the given packet descriptor a Tx queues' number * to be used as a distribution range. */ u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb, unsigned int num_tx_queues) { u32 hash; u16 qoffset = 0; u16 qcount = num_tx_queues; if (skb_rx_queue_recorded(skb)) { hash = skb_get_rx_queue(skb); while (unlikely(hash >= num_tx_queues)) hash -= num_tx_queues; return hash; } if (dev->num_tc) { u8 tc = netdev_get_prio_tc_map(dev, skb->priority); qoffset = dev->tc_to_txq[tc].offset; qcount = dev->tc_to_txq[tc].count; } if (skb->sk && skb->sk->sk_hash) hash = skb->sk->sk_hash; else hash = (__force u16) skb->protocol; |
66415cf8a
|
262 |
hash = __flow_hash_1word(hash); |
441d9d327
|
263 264 265 266 |
return (u16) (((u64) hash * qcount) >> 32) + qoffset; } EXPORT_SYMBOL(__skb_tx_hash); |
f77668dc2
|
267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 |
/* __skb_get_poff() returns the offset to the payload as far as it could * be dissected. The main user is currently BPF, so that we can dynamically * truncate packets without needing to push actual payload to the user * space and can analyze headers only, instead. */ u32 __skb_get_poff(const struct sk_buff *skb) { struct flow_keys keys; u32 poff = 0; if (!skb_flow_dissect(skb, &keys)) return 0; poff += keys.thoff; switch (keys.ip_proto) { case IPPROTO_TCP: { const struct tcphdr *tcph; struct tcphdr _tcph; tcph = skb_header_pointer(skb, poff, sizeof(_tcph), &_tcph); if (!tcph) return poff; poff += max_t(u32, sizeof(struct tcphdr), tcph->doff * 4); break; } case IPPROTO_UDP: case IPPROTO_UDPLITE: poff += sizeof(struct udphdr); break; /* For the rest, we do not really care about header * extensions at this point for now. */ case IPPROTO_ICMP: poff += sizeof(struct icmphdr); break; case IPPROTO_ICMPV6: poff += sizeof(struct icmp6hdr); break; case IPPROTO_IGMP: poff += sizeof(struct igmphdr); break; case IPPROTO_DCCP: poff += sizeof(struct dccp_hdr); break; case IPPROTO_SCTP: poff += sizeof(struct sctphdr); break; } return poff; } |
441d9d327
|
319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 |
static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb) { #ifdef CONFIG_XPS struct xps_dev_maps *dev_maps; struct xps_map *map; int queue_index = -1; rcu_read_lock(); dev_maps = rcu_dereference(dev->xps_maps); if (dev_maps) { map = rcu_dereference( dev_maps->cpu_map[raw_smp_processor_id()]); if (map) { if (map->len == 1) queue_index = map->queues[0]; else { u32 hash; if (skb->sk && skb->sk->sk_hash) hash = skb->sk->sk_hash; else hash = (__force u16) skb->protocol ^ |
61b905da3
|
340 |
skb->hash; |
66415cf8a
|
341 |
hash = __flow_hash_1word(hash); |
441d9d327
|
342 343 344 345 346 347 348 349 350 351 352 353 354 355 |
queue_index = map->queues[ ((u64)hash * map->len) >> 32]; } if (unlikely(queue_index >= dev->real_num_tx_queues)) queue_index = -1; } } rcu_read_unlock(); return queue_index; #else return -1; #endif } |
99932d4fc
|
356 |
static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) |
441d9d327
|
357 358 359 360 361 362 363 364 365 |
{ struct sock *sk = skb->sk; int queue_index = sk_tx_queue_get(sk); if (queue_index < 0 || skb->ooo_okay || queue_index >= dev->real_num_tx_queues) { int new_index = get_xps_queue(dev, skb); if (new_index < 0) new_index = skb_tx_hash(dev, skb); |
702821f4e
|
366 367 |
if (queue_index != new_index && sk && rcu_access_pointer(sk->sk_dst_cache)) |
50d1784ee
|
368 |
sk_tx_queue_set(sk, new_index); |
441d9d327
|
369 370 371 372 373 374 |
queue_index = new_index; } return queue_index; } |
441d9d327
|
375 376 |
struct netdev_queue *netdev_pick_tx(struct net_device *dev, |
f663dd9aa
|
377 378 |
struct sk_buff *skb, void *accel_priv) |
441d9d327
|
379 380 381 382 383 384 |
{ int queue_index = 0; if (dev->real_num_tx_queues != 1) { const struct net_device_ops *ops = dev->netdev_ops; if (ops->ndo_select_queue) |
99932d4fc
|
385 386 |
queue_index = ops->ndo_select_queue(dev, skb, accel_priv, __netdev_pick_tx); |
441d9d327
|
387 388 |
else queue_index = __netdev_pick_tx(dev, skb); |
f663dd9aa
|
389 390 |
if (!accel_priv) |
b9507bdaf
|
391 |
queue_index = netdev_cap_txqueue(dev, queue_index); |
441d9d327
|
392 393 394 395 396 |
} skb_set_queue_mapping(skb, queue_index); return netdev_get_tx_queue(dev, queue_index); } |