Commit f2b3ee9e4200b32d113b1bd3c93f9a836c97357c

Authored by Willem de Bruijn
Committed by David S. Miller
1 parent 40206dd98f

ipv6: Fix ip_gre lockless xmits.

Tunnel devices set NETIF_F_LLTX to bypass HARD_TX_LOCK.  Sit and
ipip set this unconditionally in ops->setup, but gre enables it
conditionally after parameter passing in ops->newlink. This is
not called during tunnel setup as below, however, so GRE tunnels are
still taking the lock.

modprobe ip_gre
ip tunnel add test0 mode gre remote 10.5.1.1 dev lo
ip link set test0 up
ip addr add 10.6.0.1 dev test0
 # cat /sys/class/net/test0/features
 # $DIR/test_tunnel_xmit 10 10.5.2.1
ip route add 10.5.2.0/24 dev test0
ip tunnel del test0

The newlink callback is only called in rtnl_netlink, and only if
the device is new, as it calls register_netdevice internally. Gre
tunnels are created at 'ip tunnel add' with ioctl SIOCADDTUNNEL,
which calls ipgre_tunnel_locate, which calls register_netdev.
rtnl_newlink is called at 'ip link set', but skips ops->newlink
and the device is up with locking still enabled. The equivalent
ipip tunnel works fine, btw (just substitute 'method gre' for
'method ipip').

On kernels before /sys/class/net/*/features was removed [1],
the first commented out line returns 0x6000 with method gre,
which indicates that NETIF_F_LLTX (0x1000) is not set. With ipip,
it reports 0x7000. This test cannot be used on recent kernels where
the sysfs file is removed (and ETHTOOL_GFEATURES does not currently
work for tunnel devices, because they lack dev->ethtool_ops).

The second commented out line calls a simple transmission test [2]
that sends on 24 cores at maximum rate. Results of a single run:

ipip:			19,372,306
gre before patch:	 4,839,753
gre after patch:	19,133,873

This patch replicates the condition check in ipgre_newlink to
ipgre_tunnel_locate. It works for me, both with oseq on and off.
This is the first time I looked at rtnetlink and iproute2 code,
though, so someone more knowledgeable should probably check the
patch. Thanks.

The tail of both functions is now identical, by the way. To avoid
code duplication, I'll be happy to rework this and merge the two.

[1] http://patchwork.ozlabs.org/patch/104610/
[2] http://kernel.googlecode.com/files/xmit_udp_parallel.c

Signed-off-by: Willem de Bruijn <willemb@google.com>
Acked-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

Showing 1 changed file with 4 additions and 0 deletions Inline Diff

1 /* 1 /*
2 * Linux NET3: GRE over IP protocol decoder. 2 * Linux NET3: GRE over IP protocol decoder.
3 * 3 *
4 * Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru) 4 * Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
5 * 5 *
6 * This program is free software; you can redistribute it and/or 6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License 7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 * 10 *
11 */ 11 */
12 12
13 #include <linux/capability.h> 13 #include <linux/capability.h>
14 #include <linux/module.h> 14 #include <linux/module.h>
15 #include <linux/types.h> 15 #include <linux/types.h>
16 #include <linux/kernel.h> 16 #include <linux/kernel.h>
17 #include <linux/slab.h> 17 #include <linux/slab.h>
18 #include <asm/uaccess.h> 18 #include <asm/uaccess.h>
19 #include <linux/skbuff.h> 19 #include <linux/skbuff.h>
20 #include <linux/netdevice.h> 20 #include <linux/netdevice.h>
21 #include <linux/in.h> 21 #include <linux/in.h>
22 #include <linux/tcp.h> 22 #include <linux/tcp.h>
23 #include <linux/udp.h> 23 #include <linux/udp.h>
24 #include <linux/if_arp.h> 24 #include <linux/if_arp.h>
25 #include <linux/mroute.h> 25 #include <linux/mroute.h>
26 #include <linux/init.h> 26 #include <linux/init.h>
27 #include <linux/in6.h> 27 #include <linux/in6.h>
28 #include <linux/inetdevice.h> 28 #include <linux/inetdevice.h>
29 #include <linux/igmp.h> 29 #include <linux/igmp.h>
30 #include <linux/netfilter_ipv4.h> 30 #include <linux/netfilter_ipv4.h>
31 #include <linux/etherdevice.h> 31 #include <linux/etherdevice.h>
32 #include <linux/if_ether.h> 32 #include <linux/if_ether.h>
33 33
34 #include <net/sock.h> 34 #include <net/sock.h>
35 #include <net/ip.h> 35 #include <net/ip.h>
36 #include <net/icmp.h> 36 #include <net/icmp.h>
37 #include <net/protocol.h> 37 #include <net/protocol.h>
38 #include <net/ipip.h> 38 #include <net/ipip.h>
39 #include <net/arp.h> 39 #include <net/arp.h>
40 #include <net/checksum.h> 40 #include <net/checksum.h>
41 #include <net/dsfield.h> 41 #include <net/dsfield.h>
42 #include <net/inet_ecn.h> 42 #include <net/inet_ecn.h>
43 #include <net/xfrm.h> 43 #include <net/xfrm.h>
44 #include <net/net_namespace.h> 44 #include <net/net_namespace.h>
45 #include <net/netns/generic.h> 45 #include <net/netns/generic.h>
46 #include <net/rtnetlink.h> 46 #include <net/rtnetlink.h>
47 #include <net/gre.h> 47 #include <net/gre.h>
48 48
49 #if IS_ENABLED(CONFIG_IPV6) 49 #if IS_ENABLED(CONFIG_IPV6)
50 #include <net/ipv6.h> 50 #include <net/ipv6.h>
51 #include <net/ip6_fib.h> 51 #include <net/ip6_fib.h>
52 #include <net/ip6_route.h> 52 #include <net/ip6_route.h>
53 #endif 53 #endif
54 54
55 /* 55 /*
56 Problems & solutions 56 Problems & solutions
57 -------------------- 57 --------------------
58 58
59 1. The most important issue is detecting local dead loops. 59 1. The most important issue is detecting local dead loops.
60 They would cause complete host lockup in transmit, which 60 They would cause complete host lockup in transmit, which
61 would be "resolved" by stack overflow or, if queueing is enabled, 61 would be "resolved" by stack overflow or, if queueing is enabled,
62 with infinite looping in net_bh. 62 with infinite looping in net_bh.
63 63
64 We cannot track such dead loops during route installation, 64 We cannot track such dead loops during route installation,
65 it is infeasible task. The most general solutions would be 65 it is infeasible task. The most general solutions would be
66 to keep skb->encapsulation counter (sort of local ttl), 66 to keep skb->encapsulation counter (sort of local ttl),
67 and silently drop packet when it expires. It is a good 67 and silently drop packet when it expires. It is a good
68 solution, but it supposes maintaing new variable in ALL 68 solution, but it supposes maintaing new variable in ALL
69 skb, even if no tunneling is used. 69 skb, even if no tunneling is used.
70 70
71 Current solution: xmit_recursion breaks dead loops. This is a percpu 71 Current solution: xmit_recursion breaks dead loops. This is a percpu
72 counter, since when we enter the first ndo_xmit(), cpu migration is 72 counter, since when we enter the first ndo_xmit(), cpu migration is
73 forbidden. We force an exit if this counter reaches RECURSION_LIMIT 73 forbidden. We force an exit if this counter reaches RECURSION_LIMIT
74 74
75 2. Networking dead loops would not kill routers, but would really 75 2. Networking dead loops would not kill routers, but would really
76 kill network. IP hop limit plays role of "t->recursion" in this case, 76 kill network. IP hop limit plays role of "t->recursion" in this case,
77 if we copy it from packet being encapsulated to upper header. 77 if we copy it from packet being encapsulated to upper header.
78 It is very good solution, but it introduces two problems: 78 It is very good solution, but it introduces two problems:
79 79
80 - Routing protocols, using packets with ttl=1 (OSPF, RIP2), 80 - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
81 do not work over tunnels. 81 do not work over tunnels.
82 - traceroute does not work. I planned to relay ICMP from tunnel, 82 - traceroute does not work. I planned to relay ICMP from tunnel,
83 so that this problem would be solved and traceroute output 83 so that this problem would be solved and traceroute output
84 would even more informative. This idea appeared to be wrong: 84 would even more informative. This idea appeared to be wrong:
85 only Linux complies to rfc1812 now (yes, guys, Linux is the only 85 only Linux complies to rfc1812 now (yes, guys, Linux is the only
86 true router now :-)), all routers (at least, in neighbourhood of mine) 86 true router now :-)), all routers (at least, in neighbourhood of mine)
87 return only 8 bytes of payload. It is the end. 87 return only 8 bytes of payload. It is the end.
88 88
89 Hence, if we want that OSPF worked or traceroute said something reasonable, 89 Hence, if we want that OSPF worked or traceroute said something reasonable,
90 we should search for another solution. 90 we should search for another solution.
91 91
92 One of them is to parse packet trying to detect inner encapsulation 92 One of them is to parse packet trying to detect inner encapsulation
93 made by our node. It is difficult or even impossible, especially, 93 made by our node. It is difficult or even impossible, especially,
94 taking into account fragmentation. TO be short, tt is not solution at all. 94 taking into account fragmentation. TO be short, tt is not solution at all.
95 95
96 Current solution: The solution was UNEXPECTEDLY SIMPLE. 96 Current solution: The solution was UNEXPECTEDLY SIMPLE.
97 We force DF flag on tunnels with preconfigured hop limit, 97 We force DF flag on tunnels with preconfigured hop limit,
98 that is ALL. :-) Well, it does not remove the problem completely, 98 that is ALL. :-) Well, it does not remove the problem completely,
99 but exponential growth of network traffic is changed to linear 99 but exponential growth of network traffic is changed to linear
100 (branches, that exceed pmtu are pruned) and tunnel mtu 100 (branches, that exceed pmtu are pruned) and tunnel mtu
101 fastly degrades to value <68, where looping stops. 101 fastly degrades to value <68, where looping stops.
102 Yes, it is not good if there exists a router in the loop, 102 Yes, it is not good if there exists a router in the loop,
103 which does not force DF, even when encapsulating packets have DF set. 103 which does not force DF, even when encapsulating packets have DF set.
104 But it is not our problem! Nobody could accuse us, we made 104 But it is not our problem! Nobody could accuse us, we made
105 all that we could make. Even if it is your gated who injected 105 all that we could make. Even if it is your gated who injected
106 fatal route to network, even if it were you who configured 106 fatal route to network, even if it were you who configured
107 fatal static route: you are innocent. :-) 107 fatal static route: you are innocent. :-)
108 108
109 109
110 110
111 3. Really, ipv4/ipip.c, ipv4/ip_gre.c and ipv6/sit.c contain 111 3. Really, ipv4/ipip.c, ipv4/ip_gre.c and ipv6/sit.c contain
112 practically identical code. It would be good to glue them 112 practically identical code. It would be good to glue them
113 together, but it is not very evident, how to make them modular. 113 together, but it is not very evident, how to make them modular.
114 sit is integral part of IPv6, ipip and gre are naturally modular. 114 sit is integral part of IPv6, ipip and gre are naturally modular.
115 We could extract common parts (hash table, ioctl etc) 115 We could extract common parts (hash table, ioctl etc)
116 to a separate module (ip_tunnel.c). 116 to a separate module (ip_tunnel.c).
117 117
118 Alexey Kuznetsov. 118 Alexey Kuznetsov.
119 */ 119 */
120 120
121 static struct rtnl_link_ops ipgre_link_ops __read_mostly; 121 static struct rtnl_link_ops ipgre_link_ops __read_mostly;
122 static int ipgre_tunnel_init(struct net_device *dev); 122 static int ipgre_tunnel_init(struct net_device *dev);
123 static void ipgre_tunnel_setup(struct net_device *dev); 123 static void ipgre_tunnel_setup(struct net_device *dev);
124 static int ipgre_tunnel_bind_dev(struct net_device *dev); 124 static int ipgre_tunnel_bind_dev(struct net_device *dev);
125 125
126 /* Fallback tunnel: no source, no destination, no key, no options */ 126 /* Fallback tunnel: no source, no destination, no key, no options */
127 127
128 #define HASH_SIZE 16 128 #define HASH_SIZE 16
129 129
130 static int ipgre_net_id __read_mostly; 130 static int ipgre_net_id __read_mostly;
131 struct ipgre_net { 131 struct ipgre_net {
132 struct ip_tunnel __rcu *tunnels[4][HASH_SIZE]; 132 struct ip_tunnel __rcu *tunnels[4][HASH_SIZE];
133 133
134 struct net_device *fb_tunnel_dev; 134 struct net_device *fb_tunnel_dev;
135 }; 135 };
136 136
137 /* Tunnel hash table */ 137 /* Tunnel hash table */
138 138
139 /* 139 /*
140 4 hash tables: 140 4 hash tables:
141 141
142 3: (remote,local) 142 3: (remote,local)
143 2: (remote,*) 143 2: (remote,*)
144 1: (*,local) 144 1: (*,local)
145 0: (*,*) 145 0: (*,*)
146 146
147 We require exact key match i.e. if a key is present in packet 147 We require exact key match i.e. if a key is present in packet
148 it will match only tunnel with the same key; if it is not present, 148 it will match only tunnel with the same key; if it is not present,
149 it will match only keyless tunnel. 149 it will match only keyless tunnel.
150 150
151 All keysless packets, if not matched configured keyless tunnels 151 All keysless packets, if not matched configured keyless tunnels
152 will match fallback tunnel. 152 will match fallback tunnel.
153 */ 153 */
154 154
155 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF) 155 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
156 156
157 #define tunnels_r_l tunnels[3] 157 #define tunnels_r_l tunnels[3]
158 #define tunnels_r tunnels[2] 158 #define tunnels_r tunnels[2]
159 #define tunnels_l tunnels[1] 159 #define tunnels_l tunnels[1]
160 #define tunnels_wc tunnels[0] 160 #define tunnels_wc tunnels[0]
161 /* 161 /*
162 * Locking : hash tables are protected by RCU and RTNL 162 * Locking : hash tables are protected by RCU and RTNL
163 */ 163 */
164 164
165 #define for_each_ip_tunnel_rcu(start) \ 165 #define for_each_ip_tunnel_rcu(start) \
166 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) 166 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
167 167
168 /* often modified stats are per cpu, other are shared (netdev->stats) */ 168 /* often modified stats are per cpu, other are shared (netdev->stats) */
169 struct pcpu_tstats { 169 struct pcpu_tstats {
170 unsigned long rx_packets; 170 unsigned long rx_packets;
171 unsigned long rx_bytes; 171 unsigned long rx_bytes;
172 unsigned long tx_packets; 172 unsigned long tx_packets;
173 unsigned long tx_bytes; 173 unsigned long tx_bytes;
174 } __attribute__((aligned(4*sizeof(unsigned long)))); 174 } __attribute__((aligned(4*sizeof(unsigned long))));
175 175
176 static struct net_device_stats *ipgre_get_stats(struct net_device *dev) 176 static struct net_device_stats *ipgre_get_stats(struct net_device *dev)
177 { 177 {
178 struct pcpu_tstats sum = { 0 }; 178 struct pcpu_tstats sum = { 0 };
179 int i; 179 int i;
180 180
181 for_each_possible_cpu(i) { 181 for_each_possible_cpu(i) {
182 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i); 182 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
183 183
184 sum.rx_packets += tstats->rx_packets; 184 sum.rx_packets += tstats->rx_packets;
185 sum.rx_bytes += tstats->rx_bytes; 185 sum.rx_bytes += tstats->rx_bytes;
186 sum.tx_packets += tstats->tx_packets; 186 sum.tx_packets += tstats->tx_packets;
187 sum.tx_bytes += tstats->tx_bytes; 187 sum.tx_bytes += tstats->tx_bytes;
188 } 188 }
189 dev->stats.rx_packets = sum.rx_packets; 189 dev->stats.rx_packets = sum.rx_packets;
190 dev->stats.rx_bytes = sum.rx_bytes; 190 dev->stats.rx_bytes = sum.rx_bytes;
191 dev->stats.tx_packets = sum.tx_packets; 191 dev->stats.tx_packets = sum.tx_packets;
192 dev->stats.tx_bytes = sum.tx_bytes; 192 dev->stats.tx_bytes = sum.tx_bytes;
193 return &dev->stats; 193 return &dev->stats;
194 } 194 }
195 195
196 /* Given src, dst and key, find appropriate for input tunnel. */ 196 /* Given src, dst and key, find appropriate for input tunnel. */
197 197
198 static struct ip_tunnel * ipgre_tunnel_lookup(struct net_device *dev, 198 static struct ip_tunnel * ipgre_tunnel_lookup(struct net_device *dev,
199 __be32 remote, __be32 local, 199 __be32 remote, __be32 local,
200 __be32 key, __be16 gre_proto) 200 __be32 key, __be16 gre_proto)
201 { 201 {
202 struct net *net = dev_net(dev); 202 struct net *net = dev_net(dev);
203 int link = dev->ifindex; 203 int link = dev->ifindex;
204 unsigned int h0 = HASH(remote); 204 unsigned int h0 = HASH(remote);
205 unsigned int h1 = HASH(key); 205 unsigned int h1 = HASH(key);
206 struct ip_tunnel *t, *cand = NULL; 206 struct ip_tunnel *t, *cand = NULL;
207 struct ipgre_net *ign = net_generic(net, ipgre_net_id); 207 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
208 int dev_type = (gre_proto == htons(ETH_P_TEB)) ? 208 int dev_type = (gre_proto == htons(ETH_P_TEB)) ?
209 ARPHRD_ETHER : ARPHRD_IPGRE; 209 ARPHRD_ETHER : ARPHRD_IPGRE;
210 int score, cand_score = 4; 210 int score, cand_score = 4;
211 211
212 for_each_ip_tunnel_rcu(ign->tunnels_r_l[h0 ^ h1]) { 212 for_each_ip_tunnel_rcu(ign->tunnels_r_l[h0 ^ h1]) {
213 if (local != t->parms.iph.saddr || 213 if (local != t->parms.iph.saddr ||
214 remote != t->parms.iph.daddr || 214 remote != t->parms.iph.daddr ||
215 key != t->parms.i_key || 215 key != t->parms.i_key ||
216 !(t->dev->flags & IFF_UP)) 216 !(t->dev->flags & IFF_UP))
217 continue; 217 continue;
218 218
219 if (t->dev->type != ARPHRD_IPGRE && 219 if (t->dev->type != ARPHRD_IPGRE &&
220 t->dev->type != dev_type) 220 t->dev->type != dev_type)
221 continue; 221 continue;
222 222
223 score = 0; 223 score = 0;
224 if (t->parms.link != link) 224 if (t->parms.link != link)
225 score |= 1; 225 score |= 1;
226 if (t->dev->type != dev_type) 226 if (t->dev->type != dev_type)
227 score |= 2; 227 score |= 2;
228 if (score == 0) 228 if (score == 0)
229 return t; 229 return t;
230 230
231 if (score < cand_score) { 231 if (score < cand_score) {
232 cand = t; 232 cand = t;
233 cand_score = score; 233 cand_score = score;
234 } 234 }
235 } 235 }
236 236
237 for_each_ip_tunnel_rcu(ign->tunnels_r[h0 ^ h1]) { 237 for_each_ip_tunnel_rcu(ign->tunnels_r[h0 ^ h1]) {
238 if (remote != t->parms.iph.daddr || 238 if (remote != t->parms.iph.daddr ||
239 key != t->parms.i_key || 239 key != t->parms.i_key ||
240 !(t->dev->flags & IFF_UP)) 240 !(t->dev->flags & IFF_UP))
241 continue; 241 continue;
242 242
243 if (t->dev->type != ARPHRD_IPGRE && 243 if (t->dev->type != ARPHRD_IPGRE &&
244 t->dev->type != dev_type) 244 t->dev->type != dev_type)
245 continue; 245 continue;
246 246
247 score = 0; 247 score = 0;
248 if (t->parms.link != link) 248 if (t->parms.link != link)
249 score |= 1; 249 score |= 1;
250 if (t->dev->type != dev_type) 250 if (t->dev->type != dev_type)
251 score |= 2; 251 score |= 2;
252 if (score == 0) 252 if (score == 0)
253 return t; 253 return t;
254 254
255 if (score < cand_score) { 255 if (score < cand_score) {
256 cand = t; 256 cand = t;
257 cand_score = score; 257 cand_score = score;
258 } 258 }
259 } 259 }
260 260
261 for_each_ip_tunnel_rcu(ign->tunnels_l[h1]) { 261 for_each_ip_tunnel_rcu(ign->tunnels_l[h1]) {
262 if ((local != t->parms.iph.saddr && 262 if ((local != t->parms.iph.saddr &&
263 (local != t->parms.iph.daddr || 263 (local != t->parms.iph.daddr ||
264 !ipv4_is_multicast(local))) || 264 !ipv4_is_multicast(local))) ||
265 key != t->parms.i_key || 265 key != t->parms.i_key ||
266 !(t->dev->flags & IFF_UP)) 266 !(t->dev->flags & IFF_UP))
267 continue; 267 continue;
268 268
269 if (t->dev->type != ARPHRD_IPGRE && 269 if (t->dev->type != ARPHRD_IPGRE &&
270 t->dev->type != dev_type) 270 t->dev->type != dev_type)
271 continue; 271 continue;
272 272
273 score = 0; 273 score = 0;
274 if (t->parms.link != link) 274 if (t->parms.link != link)
275 score |= 1; 275 score |= 1;
276 if (t->dev->type != dev_type) 276 if (t->dev->type != dev_type)
277 score |= 2; 277 score |= 2;
278 if (score == 0) 278 if (score == 0)
279 return t; 279 return t;
280 280
281 if (score < cand_score) { 281 if (score < cand_score) {
282 cand = t; 282 cand = t;
283 cand_score = score; 283 cand_score = score;
284 } 284 }
285 } 285 }
286 286
287 for_each_ip_tunnel_rcu(ign->tunnels_wc[h1]) { 287 for_each_ip_tunnel_rcu(ign->tunnels_wc[h1]) {
288 if (t->parms.i_key != key || 288 if (t->parms.i_key != key ||
289 !(t->dev->flags & IFF_UP)) 289 !(t->dev->flags & IFF_UP))
290 continue; 290 continue;
291 291
292 if (t->dev->type != ARPHRD_IPGRE && 292 if (t->dev->type != ARPHRD_IPGRE &&
293 t->dev->type != dev_type) 293 t->dev->type != dev_type)
294 continue; 294 continue;
295 295
296 score = 0; 296 score = 0;
297 if (t->parms.link != link) 297 if (t->parms.link != link)
298 score |= 1; 298 score |= 1;
299 if (t->dev->type != dev_type) 299 if (t->dev->type != dev_type)
300 score |= 2; 300 score |= 2;
301 if (score == 0) 301 if (score == 0)
302 return t; 302 return t;
303 303
304 if (score < cand_score) { 304 if (score < cand_score) {
305 cand = t; 305 cand = t;
306 cand_score = score; 306 cand_score = score;
307 } 307 }
308 } 308 }
309 309
310 if (cand != NULL) 310 if (cand != NULL)
311 return cand; 311 return cand;
312 312
313 dev = ign->fb_tunnel_dev; 313 dev = ign->fb_tunnel_dev;
314 if (dev->flags & IFF_UP) 314 if (dev->flags & IFF_UP)
315 return netdev_priv(dev); 315 return netdev_priv(dev);
316 316
317 return NULL; 317 return NULL;
318 } 318 }
319 319
320 static struct ip_tunnel __rcu **__ipgre_bucket(struct ipgre_net *ign, 320 static struct ip_tunnel __rcu **__ipgre_bucket(struct ipgre_net *ign,
321 struct ip_tunnel_parm *parms) 321 struct ip_tunnel_parm *parms)
322 { 322 {
323 __be32 remote = parms->iph.daddr; 323 __be32 remote = parms->iph.daddr;
324 __be32 local = parms->iph.saddr; 324 __be32 local = parms->iph.saddr;
325 __be32 key = parms->i_key; 325 __be32 key = parms->i_key;
326 unsigned int h = HASH(key); 326 unsigned int h = HASH(key);
327 int prio = 0; 327 int prio = 0;
328 328
329 if (local) 329 if (local)
330 prio |= 1; 330 prio |= 1;
331 if (remote && !ipv4_is_multicast(remote)) { 331 if (remote && !ipv4_is_multicast(remote)) {
332 prio |= 2; 332 prio |= 2;
333 h ^= HASH(remote); 333 h ^= HASH(remote);
334 } 334 }
335 335
336 return &ign->tunnels[prio][h]; 336 return &ign->tunnels[prio][h];
337 } 337 }
338 338
339 static inline struct ip_tunnel __rcu **ipgre_bucket(struct ipgre_net *ign, 339 static inline struct ip_tunnel __rcu **ipgre_bucket(struct ipgre_net *ign,
340 struct ip_tunnel *t) 340 struct ip_tunnel *t)
341 { 341 {
342 return __ipgre_bucket(ign, &t->parms); 342 return __ipgre_bucket(ign, &t->parms);
343 } 343 }
344 344
345 static void ipgre_tunnel_link(struct ipgre_net *ign, struct ip_tunnel *t) 345 static void ipgre_tunnel_link(struct ipgre_net *ign, struct ip_tunnel *t)
346 { 346 {
347 struct ip_tunnel __rcu **tp = ipgre_bucket(ign, t); 347 struct ip_tunnel __rcu **tp = ipgre_bucket(ign, t);
348 348
349 rcu_assign_pointer(t->next, rtnl_dereference(*tp)); 349 rcu_assign_pointer(t->next, rtnl_dereference(*tp));
350 rcu_assign_pointer(*tp, t); 350 rcu_assign_pointer(*tp, t);
351 } 351 }
352 352
353 static void ipgre_tunnel_unlink(struct ipgre_net *ign, struct ip_tunnel *t) 353 static void ipgre_tunnel_unlink(struct ipgre_net *ign, struct ip_tunnel *t)
354 { 354 {
355 struct ip_tunnel __rcu **tp; 355 struct ip_tunnel __rcu **tp;
356 struct ip_tunnel *iter; 356 struct ip_tunnel *iter;
357 357
358 for (tp = ipgre_bucket(ign, t); 358 for (tp = ipgre_bucket(ign, t);
359 (iter = rtnl_dereference(*tp)) != NULL; 359 (iter = rtnl_dereference(*tp)) != NULL;
360 tp = &iter->next) { 360 tp = &iter->next) {
361 if (t == iter) { 361 if (t == iter) {
362 rcu_assign_pointer(*tp, t->next); 362 rcu_assign_pointer(*tp, t->next);
363 break; 363 break;
364 } 364 }
365 } 365 }
366 } 366 }
367 367
368 static struct ip_tunnel *ipgre_tunnel_find(struct net *net, 368 static struct ip_tunnel *ipgre_tunnel_find(struct net *net,
369 struct ip_tunnel_parm *parms, 369 struct ip_tunnel_parm *parms,
370 int type) 370 int type)
371 { 371 {
372 __be32 remote = parms->iph.daddr; 372 __be32 remote = parms->iph.daddr;
373 __be32 local = parms->iph.saddr; 373 __be32 local = parms->iph.saddr;
374 __be32 key = parms->i_key; 374 __be32 key = parms->i_key;
375 int link = parms->link; 375 int link = parms->link;
376 struct ip_tunnel *t; 376 struct ip_tunnel *t;
377 struct ip_tunnel __rcu **tp; 377 struct ip_tunnel __rcu **tp;
378 struct ipgre_net *ign = net_generic(net, ipgre_net_id); 378 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
379 379
380 for (tp = __ipgre_bucket(ign, parms); 380 for (tp = __ipgre_bucket(ign, parms);
381 (t = rtnl_dereference(*tp)) != NULL; 381 (t = rtnl_dereference(*tp)) != NULL;
382 tp = &t->next) 382 tp = &t->next)
383 if (local == t->parms.iph.saddr && 383 if (local == t->parms.iph.saddr &&
384 remote == t->parms.iph.daddr && 384 remote == t->parms.iph.daddr &&
385 key == t->parms.i_key && 385 key == t->parms.i_key &&
386 link == t->parms.link && 386 link == t->parms.link &&
387 type == t->dev->type) 387 type == t->dev->type)
388 break; 388 break;
389 389
390 return t; 390 return t;
391 } 391 }
392 392
393 static struct ip_tunnel *ipgre_tunnel_locate(struct net *net, 393 static struct ip_tunnel *ipgre_tunnel_locate(struct net *net,
394 struct ip_tunnel_parm *parms, int create) 394 struct ip_tunnel_parm *parms, int create)
395 { 395 {
396 struct ip_tunnel *t, *nt; 396 struct ip_tunnel *t, *nt;
397 struct net_device *dev; 397 struct net_device *dev;
398 char name[IFNAMSIZ]; 398 char name[IFNAMSIZ];
399 struct ipgre_net *ign = net_generic(net, ipgre_net_id); 399 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
400 400
401 t = ipgre_tunnel_find(net, parms, ARPHRD_IPGRE); 401 t = ipgre_tunnel_find(net, parms, ARPHRD_IPGRE);
402 if (t || !create) 402 if (t || !create)
403 return t; 403 return t;
404 404
405 if (parms->name[0]) 405 if (parms->name[0])
406 strlcpy(name, parms->name, IFNAMSIZ); 406 strlcpy(name, parms->name, IFNAMSIZ);
407 else 407 else
408 strcpy(name, "gre%d"); 408 strcpy(name, "gre%d");
409 409
410 dev = alloc_netdev(sizeof(*t), name, ipgre_tunnel_setup); 410 dev = alloc_netdev(sizeof(*t), name, ipgre_tunnel_setup);
411 if (!dev) 411 if (!dev)
412 return NULL; 412 return NULL;
413 413
414 dev_net_set(dev, net); 414 dev_net_set(dev, net);
415 415
416 nt = netdev_priv(dev); 416 nt = netdev_priv(dev);
417 nt->parms = *parms; 417 nt->parms = *parms;
418 dev->rtnl_link_ops = &ipgre_link_ops; 418 dev->rtnl_link_ops = &ipgre_link_ops;
419 419
420 dev->mtu = ipgre_tunnel_bind_dev(dev); 420 dev->mtu = ipgre_tunnel_bind_dev(dev);
421 421
422 if (register_netdevice(dev) < 0) 422 if (register_netdevice(dev) < 0)
423 goto failed_free; 423 goto failed_free;
424 424
425 /* Can use a lockless transmit, unless we generate output sequences */
426 if (!(nt->parms.o_flags & GRE_SEQ))
427 dev->features |= NETIF_F_LLTX;
428
425 dev_hold(dev); 429 dev_hold(dev);
426 ipgre_tunnel_link(ign, nt); 430 ipgre_tunnel_link(ign, nt);
427 return nt; 431 return nt;
428 432
429 failed_free: 433 failed_free:
430 free_netdev(dev); 434 free_netdev(dev);
431 return NULL; 435 return NULL;
432 } 436 }
433 437
434 static void ipgre_tunnel_uninit(struct net_device *dev) 438 static void ipgre_tunnel_uninit(struct net_device *dev)
435 { 439 {
436 struct net *net = dev_net(dev); 440 struct net *net = dev_net(dev);
437 struct ipgre_net *ign = net_generic(net, ipgre_net_id); 441 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
438 442
439 ipgre_tunnel_unlink(ign, netdev_priv(dev)); 443 ipgre_tunnel_unlink(ign, netdev_priv(dev));
440 dev_put(dev); 444 dev_put(dev);
441 } 445 }
442 446
443 447
444 static void ipgre_err(struct sk_buff *skb, u32 info) 448 static void ipgre_err(struct sk_buff *skb, u32 info)
445 { 449 {
446 450
447 /* All the routers (except for Linux) return only 451 /* All the routers (except for Linux) return only
448 8 bytes of packet payload. It means, that precise relaying of 452 8 bytes of packet payload. It means, that precise relaying of
449 ICMP in the real Internet is absolutely infeasible. 453 ICMP in the real Internet is absolutely infeasible.
450 454
451 Moreover, Cisco "wise men" put GRE key to the third word 455 Moreover, Cisco "wise men" put GRE key to the third word
452 in GRE header. It makes impossible maintaining even soft state for keyed 456 in GRE header. It makes impossible maintaining even soft state for keyed
453 GRE tunnels with enabled checksum. Tell them "thank you". 457 GRE tunnels with enabled checksum. Tell them "thank you".
454 458
455 Well, I wonder, rfc1812 was written by Cisco employee, 459 Well, I wonder, rfc1812 was written by Cisco employee,
456 what the hell these idiots break standrads established 460 what the hell these idiots break standrads established
457 by themself??? 461 by themself???
458 */ 462 */
459 463
460 const struct iphdr *iph = (const struct iphdr *)skb->data; 464 const struct iphdr *iph = (const struct iphdr *)skb->data;
461 __be16 *p = (__be16*)(skb->data+(iph->ihl<<2)); 465 __be16 *p = (__be16*)(skb->data+(iph->ihl<<2));
462 int grehlen = (iph->ihl<<2) + 4; 466 int grehlen = (iph->ihl<<2) + 4;
463 const int type = icmp_hdr(skb)->type; 467 const int type = icmp_hdr(skb)->type;
464 const int code = icmp_hdr(skb)->code; 468 const int code = icmp_hdr(skb)->code;
465 struct ip_tunnel *t; 469 struct ip_tunnel *t;
466 __be16 flags; 470 __be16 flags;
467 471
468 flags = p[0]; 472 flags = p[0];
469 if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) { 473 if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
470 if (flags&(GRE_VERSION|GRE_ROUTING)) 474 if (flags&(GRE_VERSION|GRE_ROUTING))
471 return; 475 return;
472 if (flags&GRE_KEY) { 476 if (flags&GRE_KEY) {
473 grehlen += 4; 477 grehlen += 4;
474 if (flags&GRE_CSUM) 478 if (flags&GRE_CSUM)
475 grehlen += 4; 479 grehlen += 4;
476 } 480 }
477 } 481 }
478 482
479 /* If only 8 bytes returned, keyed message will be dropped here */ 483 /* If only 8 bytes returned, keyed message will be dropped here */
480 if (skb_headlen(skb) < grehlen) 484 if (skb_headlen(skb) < grehlen)
481 return; 485 return;
482 486
483 switch (type) { 487 switch (type) {
484 default: 488 default:
485 case ICMP_PARAMETERPROB: 489 case ICMP_PARAMETERPROB:
486 return; 490 return;
487 491
488 case ICMP_DEST_UNREACH: 492 case ICMP_DEST_UNREACH:
489 switch (code) { 493 switch (code) {
490 case ICMP_SR_FAILED: 494 case ICMP_SR_FAILED:
491 case ICMP_PORT_UNREACH: 495 case ICMP_PORT_UNREACH:
492 /* Impossible event. */ 496 /* Impossible event. */
493 return; 497 return;
494 case ICMP_FRAG_NEEDED: 498 case ICMP_FRAG_NEEDED:
495 /* Soft state for pmtu is maintained by IP core. */ 499 /* Soft state for pmtu is maintained by IP core. */
496 return; 500 return;
497 default: 501 default:
498 /* All others are translated to HOST_UNREACH. 502 /* All others are translated to HOST_UNREACH.
499 rfc2003 contains "deep thoughts" about NET_UNREACH, 503 rfc2003 contains "deep thoughts" about NET_UNREACH,
500 I believe they are just ether pollution. --ANK 504 I believe they are just ether pollution. --ANK
501 */ 505 */
502 break; 506 break;
503 } 507 }
504 break; 508 break;
505 case ICMP_TIME_EXCEEDED: 509 case ICMP_TIME_EXCEEDED:
506 if (code != ICMP_EXC_TTL) 510 if (code != ICMP_EXC_TTL)
507 return; 511 return;
508 break; 512 break;
509 } 513 }
510 514
511 rcu_read_lock(); 515 rcu_read_lock();
512 t = ipgre_tunnel_lookup(skb->dev, iph->daddr, iph->saddr, 516 t = ipgre_tunnel_lookup(skb->dev, iph->daddr, iph->saddr,
513 flags & GRE_KEY ? 517 flags & GRE_KEY ?
514 *(((__be32 *)p) + (grehlen / 4) - 1) : 0, 518 *(((__be32 *)p) + (grehlen / 4) - 1) : 0,
515 p[1]); 519 p[1]);
516 if (t == NULL || t->parms.iph.daddr == 0 || 520 if (t == NULL || t->parms.iph.daddr == 0 ||
517 ipv4_is_multicast(t->parms.iph.daddr)) 521 ipv4_is_multicast(t->parms.iph.daddr))
518 goto out; 522 goto out;
519 523
520 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED) 524 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
521 goto out; 525 goto out;
522 526
523 if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO)) 527 if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
524 t->err_count++; 528 t->err_count++;
525 else 529 else
526 t->err_count = 1; 530 t->err_count = 1;
527 t->err_time = jiffies; 531 t->err_time = jiffies;
528 out: 532 out:
529 rcu_read_unlock(); 533 rcu_read_unlock();
530 } 534 }
531 535
532 static inline void ipgre_ecn_decapsulate(const struct iphdr *iph, struct sk_buff *skb) 536 static inline void ipgre_ecn_decapsulate(const struct iphdr *iph, struct sk_buff *skb)
533 { 537 {
534 if (INET_ECN_is_ce(iph->tos)) { 538 if (INET_ECN_is_ce(iph->tos)) {
535 if (skb->protocol == htons(ETH_P_IP)) { 539 if (skb->protocol == htons(ETH_P_IP)) {
536 IP_ECN_set_ce(ip_hdr(skb)); 540 IP_ECN_set_ce(ip_hdr(skb));
537 } else if (skb->protocol == htons(ETH_P_IPV6)) { 541 } else if (skb->protocol == htons(ETH_P_IPV6)) {
538 IP6_ECN_set_ce(ipv6_hdr(skb)); 542 IP6_ECN_set_ce(ipv6_hdr(skb));
539 } 543 }
540 } 544 }
541 } 545 }
542 546
543 static inline u8 547 static inline u8
544 ipgre_ecn_encapsulate(u8 tos, const struct iphdr *old_iph, struct sk_buff *skb) 548 ipgre_ecn_encapsulate(u8 tos, const struct iphdr *old_iph, struct sk_buff *skb)
545 { 549 {
546 u8 inner = 0; 550 u8 inner = 0;
547 if (skb->protocol == htons(ETH_P_IP)) 551 if (skb->protocol == htons(ETH_P_IP))
548 inner = old_iph->tos; 552 inner = old_iph->tos;
549 else if (skb->protocol == htons(ETH_P_IPV6)) 553 else if (skb->protocol == htons(ETH_P_IPV6))
550 inner = ipv6_get_dsfield((const struct ipv6hdr *)old_iph); 554 inner = ipv6_get_dsfield((const struct ipv6hdr *)old_iph);
551 return INET_ECN_encapsulate(tos, inner); 555 return INET_ECN_encapsulate(tos, inner);
552 } 556 }
553 557
554 static int ipgre_rcv(struct sk_buff *skb) 558 static int ipgre_rcv(struct sk_buff *skb)
555 { 559 {
556 const struct iphdr *iph; 560 const struct iphdr *iph;
557 u8 *h; 561 u8 *h;
558 __be16 flags; 562 __be16 flags;
559 __sum16 csum = 0; 563 __sum16 csum = 0;
560 __be32 key = 0; 564 __be32 key = 0;
561 u32 seqno = 0; 565 u32 seqno = 0;
562 struct ip_tunnel *tunnel; 566 struct ip_tunnel *tunnel;
563 int offset = 4; 567 int offset = 4;
564 __be16 gre_proto; 568 __be16 gre_proto;
565 569
566 if (!pskb_may_pull(skb, 16)) 570 if (!pskb_may_pull(skb, 16))
567 goto drop_nolock; 571 goto drop_nolock;
568 572
569 iph = ip_hdr(skb); 573 iph = ip_hdr(skb);
570 h = skb->data; 574 h = skb->data;
571 flags = *(__be16*)h; 575 flags = *(__be16*)h;
572 576
573 if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) { 577 if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) {
574 /* - Version must be 0. 578 /* - Version must be 0.
575 - We do not support routing headers. 579 - We do not support routing headers.
576 */ 580 */
577 if (flags&(GRE_VERSION|GRE_ROUTING)) 581 if (flags&(GRE_VERSION|GRE_ROUTING))
578 goto drop_nolock; 582 goto drop_nolock;
579 583
580 if (flags&GRE_CSUM) { 584 if (flags&GRE_CSUM) {
581 switch (skb->ip_summed) { 585 switch (skb->ip_summed) {
582 case CHECKSUM_COMPLETE: 586 case CHECKSUM_COMPLETE:
583 csum = csum_fold(skb->csum); 587 csum = csum_fold(skb->csum);
584 if (!csum) 588 if (!csum)
585 break; 589 break;
586 /* fall through */ 590 /* fall through */
587 case CHECKSUM_NONE: 591 case CHECKSUM_NONE:
588 skb->csum = 0; 592 skb->csum = 0;
589 csum = __skb_checksum_complete(skb); 593 csum = __skb_checksum_complete(skb);
590 skb->ip_summed = CHECKSUM_COMPLETE; 594 skb->ip_summed = CHECKSUM_COMPLETE;
591 } 595 }
592 offset += 4; 596 offset += 4;
593 } 597 }
594 if (flags&GRE_KEY) { 598 if (flags&GRE_KEY) {
595 key = *(__be32*)(h + offset); 599 key = *(__be32*)(h + offset);
596 offset += 4; 600 offset += 4;
597 } 601 }
598 if (flags&GRE_SEQ) { 602 if (flags&GRE_SEQ) {
599 seqno = ntohl(*(__be32*)(h + offset)); 603 seqno = ntohl(*(__be32*)(h + offset));
600 offset += 4; 604 offset += 4;
601 } 605 }
602 } 606 }
603 607
604 gre_proto = *(__be16 *)(h + 2); 608 gre_proto = *(__be16 *)(h + 2);
605 609
606 rcu_read_lock(); 610 rcu_read_lock();
607 if ((tunnel = ipgre_tunnel_lookup(skb->dev, 611 if ((tunnel = ipgre_tunnel_lookup(skb->dev,
608 iph->saddr, iph->daddr, key, 612 iph->saddr, iph->daddr, key,
609 gre_proto))) { 613 gre_proto))) {
610 struct pcpu_tstats *tstats; 614 struct pcpu_tstats *tstats;
611 615
612 secpath_reset(skb); 616 secpath_reset(skb);
613 617
614 skb->protocol = gre_proto; 618 skb->protocol = gre_proto;
615 /* WCCP version 1 and 2 protocol decoding. 619 /* WCCP version 1 and 2 protocol decoding.
616 * - Change protocol to IP 620 * - Change protocol to IP
617 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header 621 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
618 */ 622 */
619 if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) { 623 if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) {
620 skb->protocol = htons(ETH_P_IP); 624 skb->protocol = htons(ETH_P_IP);
621 if ((*(h + offset) & 0xF0) != 0x40) 625 if ((*(h + offset) & 0xF0) != 0x40)
622 offset += 4; 626 offset += 4;
623 } 627 }
624 628
625 skb->mac_header = skb->network_header; 629 skb->mac_header = skb->network_header;
626 __pskb_pull(skb, offset); 630 __pskb_pull(skb, offset);
627 skb_postpull_rcsum(skb, skb_transport_header(skb), offset); 631 skb_postpull_rcsum(skb, skb_transport_header(skb), offset);
628 skb->pkt_type = PACKET_HOST; 632 skb->pkt_type = PACKET_HOST;
629 #ifdef CONFIG_NET_IPGRE_BROADCAST 633 #ifdef CONFIG_NET_IPGRE_BROADCAST
630 if (ipv4_is_multicast(iph->daddr)) { 634 if (ipv4_is_multicast(iph->daddr)) {
631 /* Looped back packet, drop it! */ 635 /* Looped back packet, drop it! */
632 if (rt_is_output_route(skb_rtable(skb))) 636 if (rt_is_output_route(skb_rtable(skb)))
633 goto drop; 637 goto drop;
634 tunnel->dev->stats.multicast++; 638 tunnel->dev->stats.multicast++;
635 skb->pkt_type = PACKET_BROADCAST; 639 skb->pkt_type = PACKET_BROADCAST;
636 } 640 }
637 #endif 641 #endif
638 642
639 if (((flags&GRE_CSUM) && csum) || 643 if (((flags&GRE_CSUM) && csum) ||
640 (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) { 644 (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) {
641 tunnel->dev->stats.rx_crc_errors++; 645 tunnel->dev->stats.rx_crc_errors++;
642 tunnel->dev->stats.rx_errors++; 646 tunnel->dev->stats.rx_errors++;
643 goto drop; 647 goto drop;
644 } 648 }
645 if (tunnel->parms.i_flags&GRE_SEQ) { 649 if (tunnel->parms.i_flags&GRE_SEQ) {
646 if (!(flags&GRE_SEQ) || 650 if (!(flags&GRE_SEQ) ||
647 (tunnel->i_seqno && (s32)(seqno - tunnel->i_seqno) < 0)) { 651 (tunnel->i_seqno && (s32)(seqno - tunnel->i_seqno) < 0)) {
648 tunnel->dev->stats.rx_fifo_errors++; 652 tunnel->dev->stats.rx_fifo_errors++;
649 tunnel->dev->stats.rx_errors++; 653 tunnel->dev->stats.rx_errors++;
650 goto drop; 654 goto drop;
651 } 655 }
652 tunnel->i_seqno = seqno + 1; 656 tunnel->i_seqno = seqno + 1;
653 } 657 }
654 658
655 /* Warning: All skb pointers will be invalidated! */ 659 /* Warning: All skb pointers will be invalidated! */
656 if (tunnel->dev->type == ARPHRD_ETHER) { 660 if (tunnel->dev->type == ARPHRD_ETHER) {
657 if (!pskb_may_pull(skb, ETH_HLEN)) { 661 if (!pskb_may_pull(skb, ETH_HLEN)) {
658 tunnel->dev->stats.rx_length_errors++; 662 tunnel->dev->stats.rx_length_errors++;
659 tunnel->dev->stats.rx_errors++; 663 tunnel->dev->stats.rx_errors++;
660 goto drop; 664 goto drop;
661 } 665 }
662 666
663 iph = ip_hdr(skb); 667 iph = ip_hdr(skb);
664 skb->protocol = eth_type_trans(skb, tunnel->dev); 668 skb->protocol = eth_type_trans(skb, tunnel->dev);
665 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); 669 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
666 } 670 }
667 671
668 tstats = this_cpu_ptr(tunnel->dev->tstats); 672 tstats = this_cpu_ptr(tunnel->dev->tstats);
669 tstats->rx_packets++; 673 tstats->rx_packets++;
670 tstats->rx_bytes += skb->len; 674 tstats->rx_bytes += skb->len;
671 675
672 __skb_tunnel_rx(skb, tunnel->dev); 676 __skb_tunnel_rx(skb, tunnel->dev);
673 677
674 skb_reset_network_header(skb); 678 skb_reset_network_header(skb);
675 ipgre_ecn_decapsulate(iph, skb); 679 ipgre_ecn_decapsulate(iph, skb);
676 680
677 netif_rx(skb); 681 netif_rx(skb);
678 682
679 rcu_read_unlock(); 683 rcu_read_unlock();
680 return 0; 684 return 0;
681 } 685 }
682 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); 686 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
683 687
684 drop: 688 drop:
685 rcu_read_unlock(); 689 rcu_read_unlock();
686 drop_nolock: 690 drop_nolock:
687 kfree_skb(skb); 691 kfree_skb(skb);
688 return 0; 692 return 0;
689 } 693 }
690 694
691 static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) 695 static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
692 { 696 {
693 struct ip_tunnel *tunnel = netdev_priv(dev); 697 struct ip_tunnel *tunnel = netdev_priv(dev);
694 struct pcpu_tstats *tstats; 698 struct pcpu_tstats *tstats;
695 const struct iphdr *old_iph = ip_hdr(skb); 699 const struct iphdr *old_iph = ip_hdr(skb);
696 const struct iphdr *tiph; 700 const struct iphdr *tiph;
697 struct flowi4 fl4; 701 struct flowi4 fl4;
698 u8 tos; 702 u8 tos;
699 __be16 df; 703 __be16 df;
700 struct rtable *rt; /* Route to the other host */ 704 struct rtable *rt; /* Route to the other host */
701 struct net_device *tdev; /* Device to other host */ 705 struct net_device *tdev; /* Device to other host */
702 struct iphdr *iph; /* Our new IP header */ 706 struct iphdr *iph; /* Our new IP header */
703 unsigned int max_headroom; /* The extra header space needed */ 707 unsigned int max_headroom; /* The extra header space needed */
704 int gre_hlen; 708 int gre_hlen;
705 __be32 dst; 709 __be32 dst;
706 int mtu; 710 int mtu;
707 711
708 if (dev->type == ARPHRD_ETHER) 712 if (dev->type == ARPHRD_ETHER)
709 IPCB(skb)->flags = 0; 713 IPCB(skb)->flags = 0;
710 714
711 if (dev->header_ops && dev->type == ARPHRD_IPGRE) { 715 if (dev->header_ops && dev->type == ARPHRD_IPGRE) {
712 gre_hlen = 0; 716 gre_hlen = 0;
713 tiph = (const struct iphdr *)skb->data; 717 tiph = (const struct iphdr *)skb->data;
714 } else { 718 } else {
715 gre_hlen = tunnel->hlen; 719 gre_hlen = tunnel->hlen;
716 tiph = &tunnel->parms.iph; 720 tiph = &tunnel->parms.iph;
717 } 721 }
718 722
719 if ((dst = tiph->daddr) == 0) { 723 if ((dst = tiph->daddr) == 0) {
720 /* NBMA tunnel */ 724 /* NBMA tunnel */
721 725
722 if (skb_dst(skb) == NULL) { 726 if (skb_dst(skb) == NULL) {
723 dev->stats.tx_fifo_errors++; 727 dev->stats.tx_fifo_errors++;
724 goto tx_error; 728 goto tx_error;
725 } 729 }
726 730
727 if (skb->protocol == htons(ETH_P_IP)) { 731 if (skb->protocol == htons(ETH_P_IP)) {
728 rt = skb_rtable(skb); 732 rt = skb_rtable(skb);
729 if ((dst = rt->rt_gateway) == 0) 733 if ((dst = rt->rt_gateway) == 0)
730 goto tx_error_icmp; 734 goto tx_error_icmp;
731 } 735 }
732 #if IS_ENABLED(CONFIG_IPV6) 736 #if IS_ENABLED(CONFIG_IPV6)
733 else if (skb->protocol == htons(ETH_P_IPV6)) { 737 else if (skb->protocol == htons(ETH_P_IPV6)) {
734 struct neighbour *neigh = dst_get_neighbour_noref(skb_dst(skb)); 738 struct neighbour *neigh = dst_get_neighbour_noref(skb_dst(skb));
735 const struct in6_addr *addr6; 739 const struct in6_addr *addr6;
736 int addr_type; 740 int addr_type;
737 741
738 if (neigh == NULL) 742 if (neigh == NULL)
739 goto tx_error; 743 goto tx_error;
740 744
741 addr6 = (const struct in6_addr *)&neigh->primary_key; 745 addr6 = (const struct in6_addr *)&neigh->primary_key;
742 addr_type = ipv6_addr_type(addr6); 746 addr_type = ipv6_addr_type(addr6);
743 747
744 if (addr_type == IPV6_ADDR_ANY) { 748 if (addr_type == IPV6_ADDR_ANY) {
745 addr6 = &ipv6_hdr(skb)->daddr; 749 addr6 = &ipv6_hdr(skb)->daddr;
746 addr_type = ipv6_addr_type(addr6); 750 addr_type = ipv6_addr_type(addr6);
747 } 751 }
748 752
749 if ((addr_type & IPV6_ADDR_COMPATv4) == 0) 753 if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
750 goto tx_error_icmp; 754 goto tx_error_icmp;
751 755
752 dst = addr6->s6_addr32[3]; 756 dst = addr6->s6_addr32[3];
753 } 757 }
754 #endif 758 #endif
755 else 759 else
756 goto tx_error; 760 goto tx_error;
757 } 761 }
758 762
759 tos = tiph->tos; 763 tos = tiph->tos;
760 if (tos == 1) { 764 if (tos == 1) {
761 tos = 0; 765 tos = 0;
762 if (skb->protocol == htons(ETH_P_IP)) 766 if (skb->protocol == htons(ETH_P_IP))
763 tos = old_iph->tos; 767 tos = old_iph->tos;
764 else if (skb->protocol == htons(ETH_P_IPV6)) 768 else if (skb->protocol == htons(ETH_P_IPV6))
765 tos = ipv6_get_dsfield((const struct ipv6hdr *)old_iph); 769 tos = ipv6_get_dsfield((const struct ipv6hdr *)old_iph);
766 } 770 }
767 771
768 rt = ip_route_output_gre(dev_net(dev), &fl4, dst, tiph->saddr, 772 rt = ip_route_output_gre(dev_net(dev), &fl4, dst, tiph->saddr,
769 tunnel->parms.o_key, RT_TOS(tos), 773 tunnel->parms.o_key, RT_TOS(tos),
770 tunnel->parms.link); 774 tunnel->parms.link);
771 if (IS_ERR(rt)) { 775 if (IS_ERR(rt)) {
772 dev->stats.tx_carrier_errors++; 776 dev->stats.tx_carrier_errors++;
773 goto tx_error; 777 goto tx_error;
774 } 778 }
775 tdev = rt->dst.dev; 779 tdev = rt->dst.dev;
776 780
777 if (tdev == dev) { 781 if (tdev == dev) {
778 ip_rt_put(rt); 782 ip_rt_put(rt);
779 dev->stats.collisions++; 783 dev->stats.collisions++;
780 goto tx_error; 784 goto tx_error;
781 } 785 }
782 786
783 df = tiph->frag_off; 787 df = tiph->frag_off;
784 if (df) 788 if (df)
785 mtu = dst_mtu(&rt->dst) - dev->hard_header_len - tunnel->hlen; 789 mtu = dst_mtu(&rt->dst) - dev->hard_header_len - tunnel->hlen;
786 else 790 else
787 mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; 791 mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
788 792
789 if (skb_dst(skb)) 793 if (skb_dst(skb))
790 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu); 794 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
791 795
792 if (skb->protocol == htons(ETH_P_IP)) { 796 if (skb->protocol == htons(ETH_P_IP)) {
793 df |= (old_iph->frag_off&htons(IP_DF)); 797 df |= (old_iph->frag_off&htons(IP_DF));
794 798
795 if ((old_iph->frag_off&htons(IP_DF)) && 799 if ((old_iph->frag_off&htons(IP_DF)) &&
796 mtu < ntohs(old_iph->tot_len)) { 800 mtu < ntohs(old_iph->tot_len)) {
797 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); 801 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
798 ip_rt_put(rt); 802 ip_rt_put(rt);
799 goto tx_error; 803 goto tx_error;
800 } 804 }
801 } 805 }
802 #if IS_ENABLED(CONFIG_IPV6) 806 #if IS_ENABLED(CONFIG_IPV6)
803 else if (skb->protocol == htons(ETH_P_IPV6)) { 807 else if (skb->protocol == htons(ETH_P_IPV6)) {
804 struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb); 808 struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
805 809
806 if (rt6 && mtu < dst_mtu(skb_dst(skb)) && mtu >= IPV6_MIN_MTU) { 810 if (rt6 && mtu < dst_mtu(skb_dst(skb)) && mtu >= IPV6_MIN_MTU) {
807 if ((tunnel->parms.iph.daddr && 811 if ((tunnel->parms.iph.daddr &&
808 !ipv4_is_multicast(tunnel->parms.iph.daddr)) || 812 !ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
809 rt6->rt6i_dst.plen == 128) { 813 rt6->rt6i_dst.plen == 128) {
810 rt6->rt6i_flags |= RTF_MODIFIED; 814 rt6->rt6i_flags |= RTF_MODIFIED;
811 dst_metric_set(skb_dst(skb), RTAX_MTU, mtu); 815 dst_metric_set(skb_dst(skb), RTAX_MTU, mtu);
812 } 816 }
813 } 817 }
814 818
815 if (mtu >= IPV6_MIN_MTU && mtu < skb->len - tunnel->hlen + gre_hlen) { 819 if (mtu >= IPV6_MIN_MTU && mtu < skb->len - tunnel->hlen + gre_hlen) {
816 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); 820 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
817 ip_rt_put(rt); 821 ip_rt_put(rt);
818 goto tx_error; 822 goto tx_error;
819 } 823 }
820 } 824 }
821 #endif 825 #endif
822 826
823 if (tunnel->err_count > 0) { 827 if (tunnel->err_count > 0) {
824 if (time_before(jiffies, 828 if (time_before(jiffies,
825 tunnel->err_time + IPTUNNEL_ERR_TIMEO)) { 829 tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
826 tunnel->err_count--; 830 tunnel->err_count--;
827 831
828 dst_link_failure(skb); 832 dst_link_failure(skb);
829 } else 833 } else
830 tunnel->err_count = 0; 834 tunnel->err_count = 0;
831 } 835 }
832 836
833 max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + rt->dst.header_len; 837 max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + rt->dst.header_len;
834 838
835 if (skb_headroom(skb) < max_headroom || skb_shared(skb)|| 839 if (skb_headroom(skb) < max_headroom || skb_shared(skb)||
836 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { 840 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
837 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); 841 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
838 if (max_headroom > dev->needed_headroom) 842 if (max_headroom > dev->needed_headroom)
839 dev->needed_headroom = max_headroom; 843 dev->needed_headroom = max_headroom;
840 if (!new_skb) { 844 if (!new_skb) {
841 ip_rt_put(rt); 845 ip_rt_put(rt);
842 dev->stats.tx_dropped++; 846 dev->stats.tx_dropped++;
843 dev_kfree_skb(skb); 847 dev_kfree_skb(skb);
844 return NETDEV_TX_OK; 848 return NETDEV_TX_OK;
845 } 849 }
846 if (skb->sk) 850 if (skb->sk)
847 skb_set_owner_w(new_skb, skb->sk); 851 skb_set_owner_w(new_skb, skb->sk);
848 dev_kfree_skb(skb); 852 dev_kfree_skb(skb);
849 skb = new_skb; 853 skb = new_skb;
850 old_iph = ip_hdr(skb); 854 old_iph = ip_hdr(skb);
851 } 855 }
852 856
853 skb_reset_transport_header(skb); 857 skb_reset_transport_header(skb);
854 skb_push(skb, gre_hlen); 858 skb_push(skb, gre_hlen);
855 skb_reset_network_header(skb); 859 skb_reset_network_header(skb);
856 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 860 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
857 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | 861 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
858 IPSKB_REROUTED); 862 IPSKB_REROUTED);
859 skb_dst_drop(skb); 863 skb_dst_drop(skb);
860 skb_dst_set(skb, &rt->dst); 864 skb_dst_set(skb, &rt->dst);
861 865
862 /* 866 /*
863 * Push down and install the IPIP header. 867 * Push down and install the IPIP header.
864 */ 868 */
865 869
866 iph = ip_hdr(skb); 870 iph = ip_hdr(skb);
867 iph->version = 4; 871 iph->version = 4;
868 iph->ihl = sizeof(struct iphdr) >> 2; 872 iph->ihl = sizeof(struct iphdr) >> 2;
869 iph->frag_off = df; 873 iph->frag_off = df;
870 iph->protocol = IPPROTO_GRE; 874 iph->protocol = IPPROTO_GRE;
871 iph->tos = ipgre_ecn_encapsulate(tos, old_iph, skb); 875 iph->tos = ipgre_ecn_encapsulate(tos, old_iph, skb);
872 iph->daddr = fl4.daddr; 876 iph->daddr = fl4.daddr;
873 iph->saddr = fl4.saddr; 877 iph->saddr = fl4.saddr;
874 878
875 if ((iph->ttl = tiph->ttl) == 0) { 879 if ((iph->ttl = tiph->ttl) == 0) {
876 if (skb->protocol == htons(ETH_P_IP)) 880 if (skb->protocol == htons(ETH_P_IP))
877 iph->ttl = old_iph->ttl; 881 iph->ttl = old_iph->ttl;
878 #if IS_ENABLED(CONFIG_IPV6) 882 #if IS_ENABLED(CONFIG_IPV6)
879 else if (skb->protocol == htons(ETH_P_IPV6)) 883 else if (skb->protocol == htons(ETH_P_IPV6))
880 iph->ttl = ((const struct ipv6hdr *)old_iph)->hop_limit; 884 iph->ttl = ((const struct ipv6hdr *)old_iph)->hop_limit;
881 #endif 885 #endif
882 else 886 else
883 iph->ttl = ip4_dst_hoplimit(&rt->dst); 887 iph->ttl = ip4_dst_hoplimit(&rt->dst);
884 } 888 }
885 889
886 ((__be16 *)(iph + 1))[0] = tunnel->parms.o_flags; 890 ((__be16 *)(iph + 1))[0] = tunnel->parms.o_flags;
887 ((__be16 *)(iph + 1))[1] = (dev->type == ARPHRD_ETHER) ? 891 ((__be16 *)(iph + 1))[1] = (dev->type == ARPHRD_ETHER) ?
888 htons(ETH_P_TEB) : skb->protocol; 892 htons(ETH_P_TEB) : skb->protocol;
889 893
890 if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) { 894 if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
891 __be32 *ptr = (__be32*)(((u8*)iph) + tunnel->hlen - 4); 895 __be32 *ptr = (__be32*)(((u8*)iph) + tunnel->hlen - 4);
892 896
893 if (tunnel->parms.o_flags&GRE_SEQ) { 897 if (tunnel->parms.o_flags&GRE_SEQ) {
894 ++tunnel->o_seqno; 898 ++tunnel->o_seqno;
895 *ptr = htonl(tunnel->o_seqno); 899 *ptr = htonl(tunnel->o_seqno);
896 ptr--; 900 ptr--;
897 } 901 }
898 if (tunnel->parms.o_flags&GRE_KEY) { 902 if (tunnel->parms.o_flags&GRE_KEY) {
899 *ptr = tunnel->parms.o_key; 903 *ptr = tunnel->parms.o_key;
900 ptr--; 904 ptr--;
901 } 905 }
902 if (tunnel->parms.o_flags&GRE_CSUM) { 906 if (tunnel->parms.o_flags&GRE_CSUM) {
903 *ptr = 0; 907 *ptr = 0;
904 *(__sum16*)ptr = ip_compute_csum((void*)(iph+1), skb->len - sizeof(struct iphdr)); 908 *(__sum16*)ptr = ip_compute_csum((void*)(iph+1), skb->len - sizeof(struct iphdr));
905 } 909 }
906 } 910 }
907 911
908 nf_reset(skb); 912 nf_reset(skb);
909 tstats = this_cpu_ptr(dev->tstats); 913 tstats = this_cpu_ptr(dev->tstats);
910 __IPTUNNEL_XMIT(tstats, &dev->stats); 914 __IPTUNNEL_XMIT(tstats, &dev->stats);
911 return NETDEV_TX_OK; 915 return NETDEV_TX_OK;
912 916
913 tx_error_icmp: 917 tx_error_icmp:
914 dst_link_failure(skb); 918 dst_link_failure(skb);
915 919
916 tx_error: 920 tx_error:
917 dev->stats.tx_errors++; 921 dev->stats.tx_errors++;
918 dev_kfree_skb(skb); 922 dev_kfree_skb(skb);
919 return NETDEV_TX_OK; 923 return NETDEV_TX_OK;
920 } 924 }
921 925
922 static int ipgre_tunnel_bind_dev(struct net_device *dev) 926 static int ipgre_tunnel_bind_dev(struct net_device *dev)
923 { 927 {
924 struct net_device *tdev = NULL; 928 struct net_device *tdev = NULL;
925 struct ip_tunnel *tunnel; 929 struct ip_tunnel *tunnel;
926 const struct iphdr *iph; 930 const struct iphdr *iph;
927 int hlen = LL_MAX_HEADER; 931 int hlen = LL_MAX_HEADER;
928 int mtu = ETH_DATA_LEN; 932 int mtu = ETH_DATA_LEN;
929 int addend = sizeof(struct iphdr) + 4; 933 int addend = sizeof(struct iphdr) + 4;
930 934
931 tunnel = netdev_priv(dev); 935 tunnel = netdev_priv(dev);
932 iph = &tunnel->parms.iph; 936 iph = &tunnel->parms.iph;
933 937
934 /* Guess output device to choose reasonable mtu and needed_headroom */ 938 /* Guess output device to choose reasonable mtu and needed_headroom */
935 939
936 if (iph->daddr) { 940 if (iph->daddr) {
937 struct flowi4 fl4; 941 struct flowi4 fl4;
938 struct rtable *rt; 942 struct rtable *rt;
939 943
940 rt = ip_route_output_gre(dev_net(dev), &fl4, 944 rt = ip_route_output_gre(dev_net(dev), &fl4,
941 iph->daddr, iph->saddr, 945 iph->daddr, iph->saddr,
942 tunnel->parms.o_key, 946 tunnel->parms.o_key,
943 RT_TOS(iph->tos), 947 RT_TOS(iph->tos),
944 tunnel->parms.link); 948 tunnel->parms.link);
945 if (!IS_ERR(rt)) { 949 if (!IS_ERR(rt)) {
946 tdev = rt->dst.dev; 950 tdev = rt->dst.dev;
947 ip_rt_put(rt); 951 ip_rt_put(rt);
948 } 952 }
949 953
950 if (dev->type != ARPHRD_ETHER) 954 if (dev->type != ARPHRD_ETHER)
951 dev->flags |= IFF_POINTOPOINT; 955 dev->flags |= IFF_POINTOPOINT;
952 } 956 }
953 957
954 if (!tdev && tunnel->parms.link) 958 if (!tdev && tunnel->parms.link)
955 tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link); 959 tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
956 960
957 if (tdev) { 961 if (tdev) {
958 hlen = tdev->hard_header_len + tdev->needed_headroom; 962 hlen = tdev->hard_header_len + tdev->needed_headroom;
959 mtu = tdev->mtu; 963 mtu = tdev->mtu;
960 } 964 }
961 dev->iflink = tunnel->parms.link; 965 dev->iflink = tunnel->parms.link;
962 966
963 /* Precalculate GRE options length */ 967 /* Precalculate GRE options length */
964 if (tunnel->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) { 968 if (tunnel->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) {
965 if (tunnel->parms.o_flags&GRE_CSUM) 969 if (tunnel->parms.o_flags&GRE_CSUM)
966 addend += 4; 970 addend += 4;
967 if (tunnel->parms.o_flags&GRE_KEY) 971 if (tunnel->parms.o_flags&GRE_KEY)
968 addend += 4; 972 addend += 4;
969 if (tunnel->parms.o_flags&GRE_SEQ) 973 if (tunnel->parms.o_flags&GRE_SEQ)
970 addend += 4; 974 addend += 4;
971 } 975 }
972 dev->needed_headroom = addend + hlen; 976 dev->needed_headroom = addend + hlen;
973 mtu -= dev->hard_header_len + addend; 977 mtu -= dev->hard_header_len + addend;
974 978
975 if (mtu < 68) 979 if (mtu < 68)
976 mtu = 68; 980 mtu = 68;
977 981
978 tunnel->hlen = addend; 982 tunnel->hlen = addend;
979 983
980 return mtu; 984 return mtu;
981 } 985 }
982 986
983 static int 987 static int
984 ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) 988 ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
985 { 989 {
986 int err = 0; 990 int err = 0;
987 struct ip_tunnel_parm p; 991 struct ip_tunnel_parm p;
988 struct ip_tunnel *t; 992 struct ip_tunnel *t;
989 struct net *net = dev_net(dev); 993 struct net *net = dev_net(dev);
990 struct ipgre_net *ign = net_generic(net, ipgre_net_id); 994 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
991 995
992 switch (cmd) { 996 switch (cmd) {
993 case SIOCGETTUNNEL: 997 case SIOCGETTUNNEL:
994 t = NULL; 998 t = NULL;
995 if (dev == ign->fb_tunnel_dev) { 999 if (dev == ign->fb_tunnel_dev) {
996 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) { 1000 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
997 err = -EFAULT; 1001 err = -EFAULT;
998 break; 1002 break;
999 } 1003 }
1000 t = ipgre_tunnel_locate(net, &p, 0); 1004 t = ipgre_tunnel_locate(net, &p, 0);
1001 } 1005 }
1002 if (t == NULL) 1006 if (t == NULL)
1003 t = netdev_priv(dev); 1007 t = netdev_priv(dev);
1004 memcpy(&p, &t->parms, sizeof(p)); 1008 memcpy(&p, &t->parms, sizeof(p));
1005 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) 1009 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1006 err = -EFAULT; 1010 err = -EFAULT;
1007 break; 1011 break;
1008 1012
1009 case SIOCADDTUNNEL: 1013 case SIOCADDTUNNEL:
1010 case SIOCCHGTUNNEL: 1014 case SIOCCHGTUNNEL:
1011 err = -EPERM; 1015 err = -EPERM;
1012 if (!capable(CAP_NET_ADMIN)) 1016 if (!capable(CAP_NET_ADMIN))
1013 goto done; 1017 goto done;
1014 1018
1015 err = -EFAULT; 1019 err = -EFAULT;
1016 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) 1020 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1017 goto done; 1021 goto done;
1018 1022
1019 err = -EINVAL; 1023 err = -EINVAL;
1020 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE || 1024 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
1021 p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) || 1025 p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
1022 ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING))) 1026 ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)))
1023 goto done; 1027 goto done;
1024 if (p.iph.ttl) 1028 if (p.iph.ttl)
1025 p.iph.frag_off |= htons(IP_DF); 1029 p.iph.frag_off |= htons(IP_DF);
1026 1030
1027 if (!(p.i_flags&GRE_KEY)) 1031 if (!(p.i_flags&GRE_KEY))
1028 p.i_key = 0; 1032 p.i_key = 0;
1029 if (!(p.o_flags&GRE_KEY)) 1033 if (!(p.o_flags&GRE_KEY))
1030 p.o_key = 0; 1034 p.o_key = 0;
1031 1035
1032 t = ipgre_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL); 1036 t = ipgre_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL);
1033 1037
1034 if (dev != ign->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) { 1038 if (dev != ign->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
1035 if (t != NULL) { 1039 if (t != NULL) {
1036 if (t->dev != dev) { 1040 if (t->dev != dev) {
1037 err = -EEXIST; 1041 err = -EEXIST;
1038 break; 1042 break;
1039 } 1043 }
1040 } else { 1044 } else {
1041 unsigned int nflags = 0; 1045 unsigned int nflags = 0;
1042 1046
1043 t = netdev_priv(dev); 1047 t = netdev_priv(dev);
1044 1048
1045 if (ipv4_is_multicast(p.iph.daddr)) 1049 if (ipv4_is_multicast(p.iph.daddr))
1046 nflags = IFF_BROADCAST; 1050 nflags = IFF_BROADCAST;
1047 else if (p.iph.daddr) 1051 else if (p.iph.daddr)
1048 nflags = IFF_POINTOPOINT; 1052 nflags = IFF_POINTOPOINT;
1049 1053
1050 if ((dev->flags^nflags)&(IFF_POINTOPOINT|IFF_BROADCAST)) { 1054 if ((dev->flags^nflags)&(IFF_POINTOPOINT|IFF_BROADCAST)) {
1051 err = -EINVAL; 1055 err = -EINVAL;
1052 break; 1056 break;
1053 } 1057 }
1054 ipgre_tunnel_unlink(ign, t); 1058 ipgre_tunnel_unlink(ign, t);
1055 synchronize_net(); 1059 synchronize_net();
1056 t->parms.iph.saddr = p.iph.saddr; 1060 t->parms.iph.saddr = p.iph.saddr;
1057 t->parms.iph.daddr = p.iph.daddr; 1061 t->parms.iph.daddr = p.iph.daddr;
1058 t->parms.i_key = p.i_key; 1062 t->parms.i_key = p.i_key;
1059 t->parms.o_key = p.o_key; 1063 t->parms.o_key = p.o_key;
1060 memcpy(dev->dev_addr, &p.iph.saddr, 4); 1064 memcpy(dev->dev_addr, &p.iph.saddr, 4);
1061 memcpy(dev->broadcast, &p.iph.daddr, 4); 1065 memcpy(dev->broadcast, &p.iph.daddr, 4);
1062 ipgre_tunnel_link(ign, t); 1066 ipgre_tunnel_link(ign, t);
1063 netdev_state_change(dev); 1067 netdev_state_change(dev);
1064 } 1068 }
1065 } 1069 }
1066 1070
1067 if (t) { 1071 if (t) {
1068 err = 0; 1072 err = 0;
1069 if (cmd == SIOCCHGTUNNEL) { 1073 if (cmd == SIOCCHGTUNNEL) {
1070 t->parms.iph.ttl = p.iph.ttl; 1074 t->parms.iph.ttl = p.iph.ttl;
1071 t->parms.iph.tos = p.iph.tos; 1075 t->parms.iph.tos = p.iph.tos;
1072 t->parms.iph.frag_off = p.iph.frag_off; 1076 t->parms.iph.frag_off = p.iph.frag_off;
1073 if (t->parms.link != p.link) { 1077 if (t->parms.link != p.link) {
1074 t->parms.link = p.link; 1078 t->parms.link = p.link;
1075 dev->mtu = ipgre_tunnel_bind_dev(dev); 1079 dev->mtu = ipgre_tunnel_bind_dev(dev);
1076 netdev_state_change(dev); 1080 netdev_state_change(dev);
1077 } 1081 }
1078 } 1082 }
1079 if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p))) 1083 if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p)))
1080 err = -EFAULT; 1084 err = -EFAULT;
1081 } else 1085 } else
1082 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT); 1086 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
1083 break; 1087 break;
1084 1088
1085 case SIOCDELTUNNEL: 1089 case SIOCDELTUNNEL:
1086 err = -EPERM; 1090 err = -EPERM;
1087 if (!capable(CAP_NET_ADMIN)) 1091 if (!capable(CAP_NET_ADMIN))
1088 goto done; 1092 goto done;
1089 1093
1090 if (dev == ign->fb_tunnel_dev) { 1094 if (dev == ign->fb_tunnel_dev) {
1091 err = -EFAULT; 1095 err = -EFAULT;
1092 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) 1096 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1093 goto done; 1097 goto done;
1094 err = -ENOENT; 1098 err = -ENOENT;
1095 if ((t = ipgre_tunnel_locate(net, &p, 0)) == NULL) 1099 if ((t = ipgre_tunnel_locate(net, &p, 0)) == NULL)
1096 goto done; 1100 goto done;
1097 err = -EPERM; 1101 err = -EPERM;
1098 if (t == netdev_priv(ign->fb_tunnel_dev)) 1102 if (t == netdev_priv(ign->fb_tunnel_dev))
1099 goto done; 1103 goto done;
1100 dev = t->dev; 1104 dev = t->dev;
1101 } 1105 }
1102 unregister_netdevice(dev); 1106 unregister_netdevice(dev);
1103 err = 0; 1107 err = 0;
1104 break; 1108 break;
1105 1109
1106 default: 1110 default:
1107 err = -EINVAL; 1111 err = -EINVAL;
1108 } 1112 }
1109 1113
1110 done: 1114 done:
1111 return err; 1115 return err;
1112 } 1116 }
1113 1117
1114 static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu) 1118 static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
1115 { 1119 {
1116 struct ip_tunnel *tunnel = netdev_priv(dev); 1120 struct ip_tunnel *tunnel = netdev_priv(dev);
1117 if (new_mtu < 68 || 1121 if (new_mtu < 68 ||
1118 new_mtu > 0xFFF8 - dev->hard_header_len - tunnel->hlen) 1122 new_mtu > 0xFFF8 - dev->hard_header_len - tunnel->hlen)
1119 return -EINVAL; 1123 return -EINVAL;
1120 dev->mtu = new_mtu; 1124 dev->mtu = new_mtu;
1121 return 0; 1125 return 0;
1122 } 1126 }
1123 1127
1124 /* Nice toy. Unfortunately, useless in real life :-) 1128 /* Nice toy. Unfortunately, useless in real life :-)
1125 It allows to construct virtual multiprotocol broadcast "LAN" 1129 It allows to construct virtual multiprotocol broadcast "LAN"
1126 over the Internet, provided multicast routing is tuned. 1130 over the Internet, provided multicast routing is tuned.
1127 1131
1128 1132
1129 I have no idea was this bicycle invented before me, 1133 I have no idea was this bicycle invented before me,
1130 so that I had to set ARPHRD_IPGRE to a random value. 1134 so that I had to set ARPHRD_IPGRE to a random value.
1131 I have an impression, that Cisco could make something similar, 1135 I have an impression, that Cisco could make something similar,
1132 but this feature is apparently missing in IOS<=11.2(8). 1136 but this feature is apparently missing in IOS<=11.2(8).
1133 1137
1134 I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks 1138 I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
1135 with broadcast 224.66.66.66. If you have access to mbone, play with me :-) 1139 with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
1136 1140
1137 ping -t 255 224.66.66.66 1141 ping -t 255 224.66.66.66
1138 1142
1139 If nobody answers, mbone does not work. 1143 If nobody answers, mbone does not work.
1140 1144
1141 ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255 1145 ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
1142 ip addr add 10.66.66.<somewhat>/24 dev Universe 1146 ip addr add 10.66.66.<somewhat>/24 dev Universe
1143 ifconfig Universe up 1147 ifconfig Universe up
1144 ifconfig Universe add fe80::<Your_real_addr>/10 1148 ifconfig Universe add fe80::<Your_real_addr>/10
1145 ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96 1149 ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
1146 ftp 10.66.66.66 1150 ftp 10.66.66.66
1147 ... 1151 ...
1148 ftp fec0:6666:6666::193.233.7.65 1152 ftp fec0:6666:6666::193.233.7.65
1149 ... 1153 ...
1150 1154
1151 */ 1155 */
1152 1156
1153 static int ipgre_header(struct sk_buff *skb, struct net_device *dev, 1157 static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
1154 unsigned short type, 1158 unsigned short type,
1155 const void *daddr, const void *saddr, unsigned int len) 1159 const void *daddr, const void *saddr, unsigned int len)
1156 { 1160 {
1157 struct ip_tunnel *t = netdev_priv(dev); 1161 struct ip_tunnel *t = netdev_priv(dev);
1158 struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen); 1162 struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen);
1159 __be16 *p = (__be16*)(iph+1); 1163 __be16 *p = (__be16*)(iph+1);
1160 1164
1161 memcpy(iph, &t->parms.iph, sizeof(struct iphdr)); 1165 memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
1162 p[0] = t->parms.o_flags; 1166 p[0] = t->parms.o_flags;
1163 p[1] = htons(type); 1167 p[1] = htons(type);
1164 1168
1165 /* 1169 /*
1166 * Set the source hardware address. 1170 * Set the source hardware address.
1167 */ 1171 */
1168 1172
1169 if (saddr) 1173 if (saddr)
1170 memcpy(&iph->saddr, saddr, 4); 1174 memcpy(&iph->saddr, saddr, 4);
1171 if (daddr) 1175 if (daddr)
1172 memcpy(&iph->daddr, daddr, 4); 1176 memcpy(&iph->daddr, daddr, 4);
1173 if (iph->daddr) 1177 if (iph->daddr)
1174 return t->hlen; 1178 return t->hlen;
1175 1179
1176 return -t->hlen; 1180 return -t->hlen;
1177 } 1181 }
1178 1182
1179 static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr) 1183 static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
1180 { 1184 {
1181 const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb); 1185 const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
1182 memcpy(haddr, &iph->saddr, 4); 1186 memcpy(haddr, &iph->saddr, 4);
1183 return 4; 1187 return 4;
1184 } 1188 }
1185 1189
1186 static const struct header_ops ipgre_header_ops = { 1190 static const struct header_ops ipgre_header_ops = {
1187 .create = ipgre_header, 1191 .create = ipgre_header,
1188 .parse = ipgre_header_parse, 1192 .parse = ipgre_header_parse,
1189 }; 1193 };
1190 1194
1191 #ifdef CONFIG_NET_IPGRE_BROADCAST 1195 #ifdef CONFIG_NET_IPGRE_BROADCAST
1192 static int ipgre_open(struct net_device *dev) 1196 static int ipgre_open(struct net_device *dev)
1193 { 1197 {
1194 struct ip_tunnel *t = netdev_priv(dev); 1198 struct ip_tunnel *t = netdev_priv(dev);
1195 1199
1196 if (ipv4_is_multicast(t->parms.iph.daddr)) { 1200 if (ipv4_is_multicast(t->parms.iph.daddr)) {
1197 struct flowi4 fl4; 1201 struct flowi4 fl4;
1198 struct rtable *rt; 1202 struct rtable *rt;
1199 1203
1200 rt = ip_route_output_gre(dev_net(dev), &fl4, 1204 rt = ip_route_output_gre(dev_net(dev), &fl4,
1201 t->parms.iph.daddr, 1205 t->parms.iph.daddr,
1202 t->parms.iph.saddr, 1206 t->parms.iph.saddr,
1203 t->parms.o_key, 1207 t->parms.o_key,
1204 RT_TOS(t->parms.iph.tos), 1208 RT_TOS(t->parms.iph.tos),
1205 t->parms.link); 1209 t->parms.link);
1206 if (IS_ERR(rt)) 1210 if (IS_ERR(rt))
1207 return -EADDRNOTAVAIL; 1211 return -EADDRNOTAVAIL;
1208 dev = rt->dst.dev; 1212 dev = rt->dst.dev;
1209 ip_rt_put(rt); 1213 ip_rt_put(rt);
1210 if (__in_dev_get_rtnl(dev) == NULL) 1214 if (__in_dev_get_rtnl(dev) == NULL)
1211 return -EADDRNOTAVAIL; 1215 return -EADDRNOTAVAIL;
1212 t->mlink = dev->ifindex; 1216 t->mlink = dev->ifindex;
1213 ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr); 1217 ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
1214 } 1218 }
1215 return 0; 1219 return 0;
1216 } 1220 }
1217 1221
1218 static int ipgre_close(struct net_device *dev) 1222 static int ipgre_close(struct net_device *dev)
1219 { 1223 {
1220 struct ip_tunnel *t = netdev_priv(dev); 1224 struct ip_tunnel *t = netdev_priv(dev);
1221 1225
1222 if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) { 1226 if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
1223 struct in_device *in_dev; 1227 struct in_device *in_dev;
1224 in_dev = inetdev_by_index(dev_net(dev), t->mlink); 1228 in_dev = inetdev_by_index(dev_net(dev), t->mlink);
1225 if (in_dev) 1229 if (in_dev)
1226 ip_mc_dec_group(in_dev, t->parms.iph.daddr); 1230 ip_mc_dec_group(in_dev, t->parms.iph.daddr);
1227 } 1231 }
1228 return 0; 1232 return 0;
1229 } 1233 }
1230 1234
1231 #endif 1235 #endif
1232 1236
1233 static const struct net_device_ops ipgre_netdev_ops = { 1237 static const struct net_device_ops ipgre_netdev_ops = {
1234 .ndo_init = ipgre_tunnel_init, 1238 .ndo_init = ipgre_tunnel_init,
1235 .ndo_uninit = ipgre_tunnel_uninit, 1239 .ndo_uninit = ipgre_tunnel_uninit,
1236 #ifdef CONFIG_NET_IPGRE_BROADCAST 1240 #ifdef CONFIG_NET_IPGRE_BROADCAST
1237 .ndo_open = ipgre_open, 1241 .ndo_open = ipgre_open,
1238 .ndo_stop = ipgre_close, 1242 .ndo_stop = ipgre_close,
1239 #endif 1243 #endif
1240 .ndo_start_xmit = ipgre_tunnel_xmit, 1244 .ndo_start_xmit = ipgre_tunnel_xmit,
1241 .ndo_do_ioctl = ipgre_tunnel_ioctl, 1245 .ndo_do_ioctl = ipgre_tunnel_ioctl,
1242 .ndo_change_mtu = ipgre_tunnel_change_mtu, 1246 .ndo_change_mtu = ipgre_tunnel_change_mtu,
1243 .ndo_get_stats = ipgre_get_stats, 1247 .ndo_get_stats = ipgre_get_stats,
1244 }; 1248 };
1245 1249
1246 static void ipgre_dev_free(struct net_device *dev) 1250 static void ipgre_dev_free(struct net_device *dev)
1247 { 1251 {
1248 free_percpu(dev->tstats); 1252 free_percpu(dev->tstats);
1249 free_netdev(dev); 1253 free_netdev(dev);
1250 } 1254 }
1251 1255
1252 static void ipgre_tunnel_setup(struct net_device *dev) 1256 static void ipgre_tunnel_setup(struct net_device *dev)
1253 { 1257 {
1254 dev->netdev_ops = &ipgre_netdev_ops; 1258 dev->netdev_ops = &ipgre_netdev_ops;
1255 dev->destructor = ipgre_dev_free; 1259 dev->destructor = ipgre_dev_free;
1256 1260
1257 dev->type = ARPHRD_IPGRE; 1261 dev->type = ARPHRD_IPGRE;
1258 dev->needed_headroom = LL_MAX_HEADER + sizeof(struct iphdr) + 4; 1262 dev->needed_headroom = LL_MAX_HEADER + sizeof(struct iphdr) + 4;
1259 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 4; 1263 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 4;
1260 dev->flags = IFF_NOARP; 1264 dev->flags = IFF_NOARP;
1261 dev->iflink = 0; 1265 dev->iflink = 0;
1262 dev->addr_len = 4; 1266 dev->addr_len = 4;
1263 dev->features |= NETIF_F_NETNS_LOCAL; 1267 dev->features |= NETIF_F_NETNS_LOCAL;
1264 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 1268 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1265 } 1269 }
1266 1270
1267 static int ipgre_tunnel_init(struct net_device *dev) 1271 static int ipgre_tunnel_init(struct net_device *dev)
1268 { 1272 {
1269 struct ip_tunnel *tunnel; 1273 struct ip_tunnel *tunnel;
1270 struct iphdr *iph; 1274 struct iphdr *iph;
1271 1275
1272 tunnel = netdev_priv(dev); 1276 tunnel = netdev_priv(dev);
1273 iph = &tunnel->parms.iph; 1277 iph = &tunnel->parms.iph;
1274 1278
1275 tunnel->dev = dev; 1279 tunnel->dev = dev;
1276 strcpy(tunnel->parms.name, dev->name); 1280 strcpy(tunnel->parms.name, dev->name);
1277 1281
1278 memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4); 1282 memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
1279 memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4); 1283 memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
1280 1284
1281 if (iph->daddr) { 1285 if (iph->daddr) {
1282 #ifdef CONFIG_NET_IPGRE_BROADCAST 1286 #ifdef CONFIG_NET_IPGRE_BROADCAST
1283 if (ipv4_is_multicast(iph->daddr)) { 1287 if (ipv4_is_multicast(iph->daddr)) {
1284 if (!iph->saddr) 1288 if (!iph->saddr)
1285 return -EINVAL; 1289 return -EINVAL;
1286 dev->flags = IFF_BROADCAST; 1290 dev->flags = IFF_BROADCAST;
1287 dev->header_ops = &ipgre_header_ops; 1291 dev->header_ops = &ipgre_header_ops;
1288 } 1292 }
1289 #endif 1293 #endif
1290 } else 1294 } else
1291 dev->header_ops = &ipgre_header_ops; 1295 dev->header_ops = &ipgre_header_ops;
1292 1296
1293 dev->tstats = alloc_percpu(struct pcpu_tstats); 1297 dev->tstats = alloc_percpu(struct pcpu_tstats);
1294 if (!dev->tstats) 1298 if (!dev->tstats)
1295 return -ENOMEM; 1299 return -ENOMEM;
1296 1300
1297 return 0; 1301 return 0;
1298 } 1302 }
1299 1303
1300 static void ipgre_fb_tunnel_init(struct net_device *dev) 1304 static void ipgre_fb_tunnel_init(struct net_device *dev)
1301 { 1305 {
1302 struct ip_tunnel *tunnel = netdev_priv(dev); 1306 struct ip_tunnel *tunnel = netdev_priv(dev);
1303 struct iphdr *iph = &tunnel->parms.iph; 1307 struct iphdr *iph = &tunnel->parms.iph;
1304 1308
1305 tunnel->dev = dev; 1309 tunnel->dev = dev;
1306 strcpy(tunnel->parms.name, dev->name); 1310 strcpy(tunnel->parms.name, dev->name);
1307 1311
1308 iph->version = 4; 1312 iph->version = 4;
1309 iph->protocol = IPPROTO_GRE; 1313 iph->protocol = IPPROTO_GRE;
1310 iph->ihl = 5; 1314 iph->ihl = 5;
1311 tunnel->hlen = sizeof(struct iphdr) + 4; 1315 tunnel->hlen = sizeof(struct iphdr) + 4;
1312 1316
1313 dev_hold(dev); 1317 dev_hold(dev);
1314 } 1318 }
1315 1319
1316 1320
1317 static const struct gre_protocol ipgre_protocol = { 1321 static const struct gre_protocol ipgre_protocol = {
1318 .handler = ipgre_rcv, 1322 .handler = ipgre_rcv,
1319 .err_handler = ipgre_err, 1323 .err_handler = ipgre_err,
1320 }; 1324 };
1321 1325
1322 static void ipgre_destroy_tunnels(struct ipgre_net *ign, struct list_head *head) 1326 static void ipgre_destroy_tunnels(struct ipgre_net *ign, struct list_head *head)
1323 { 1327 {
1324 int prio; 1328 int prio;
1325 1329
1326 for (prio = 0; prio < 4; prio++) { 1330 for (prio = 0; prio < 4; prio++) {
1327 int h; 1331 int h;
1328 for (h = 0; h < HASH_SIZE; h++) { 1332 for (h = 0; h < HASH_SIZE; h++) {
1329 struct ip_tunnel *t; 1333 struct ip_tunnel *t;
1330 1334
1331 t = rtnl_dereference(ign->tunnels[prio][h]); 1335 t = rtnl_dereference(ign->tunnels[prio][h]);
1332 1336
1333 while (t != NULL) { 1337 while (t != NULL) {
1334 unregister_netdevice_queue(t->dev, head); 1338 unregister_netdevice_queue(t->dev, head);
1335 t = rtnl_dereference(t->next); 1339 t = rtnl_dereference(t->next);
1336 } 1340 }
1337 } 1341 }
1338 } 1342 }
1339 } 1343 }
1340 1344
1341 static int __net_init ipgre_init_net(struct net *net) 1345 static int __net_init ipgre_init_net(struct net *net)
1342 { 1346 {
1343 struct ipgre_net *ign = net_generic(net, ipgre_net_id); 1347 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1344 int err; 1348 int err;
1345 1349
1346 ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), "gre0", 1350 ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), "gre0",
1347 ipgre_tunnel_setup); 1351 ipgre_tunnel_setup);
1348 if (!ign->fb_tunnel_dev) { 1352 if (!ign->fb_tunnel_dev) {
1349 err = -ENOMEM; 1353 err = -ENOMEM;
1350 goto err_alloc_dev; 1354 goto err_alloc_dev;
1351 } 1355 }
1352 dev_net_set(ign->fb_tunnel_dev, net); 1356 dev_net_set(ign->fb_tunnel_dev, net);
1353 1357
1354 ipgre_fb_tunnel_init(ign->fb_tunnel_dev); 1358 ipgre_fb_tunnel_init(ign->fb_tunnel_dev);
1355 ign->fb_tunnel_dev->rtnl_link_ops = &ipgre_link_ops; 1359 ign->fb_tunnel_dev->rtnl_link_ops = &ipgre_link_ops;
1356 1360
1357 if ((err = register_netdev(ign->fb_tunnel_dev))) 1361 if ((err = register_netdev(ign->fb_tunnel_dev)))
1358 goto err_reg_dev; 1362 goto err_reg_dev;
1359 1363
1360 rcu_assign_pointer(ign->tunnels_wc[0], 1364 rcu_assign_pointer(ign->tunnels_wc[0],
1361 netdev_priv(ign->fb_tunnel_dev)); 1365 netdev_priv(ign->fb_tunnel_dev));
1362 return 0; 1366 return 0;
1363 1367
1364 err_reg_dev: 1368 err_reg_dev:
1365 ipgre_dev_free(ign->fb_tunnel_dev); 1369 ipgre_dev_free(ign->fb_tunnel_dev);
1366 err_alloc_dev: 1370 err_alloc_dev:
1367 return err; 1371 return err;
1368 } 1372 }
1369 1373
1370 static void __net_exit ipgre_exit_net(struct net *net) 1374 static void __net_exit ipgre_exit_net(struct net *net)
1371 { 1375 {
1372 struct ipgre_net *ign; 1376 struct ipgre_net *ign;
1373 LIST_HEAD(list); 1377 LIST_HEAD(list);
1374 1378
1375 ign = net_generic(net, ipgre_net_id); 1379 ign = net_generic(net, ipgre_net_id);
1376 rtnl_lock(); 1380 rtnl_lock();
1377 ipgre_destroy_tunnels(ign, &list); 1381 ipgre_destroy_tunnels(ign, &list);
1378 unregister_netdevice_many(&list); 1382 unregister_netdevice_many(&list);
1379 rtnl_unlock(); 1383 rtnl_unlock();
1380 } 1384 }
1381 1385
1382 static struct pernet_operations ipgre_net_ops = { 1386 static struct pernet_operations ipgre_net_ops = {
1383 .init = ipgre_init_net, 1387 .init = ipgre_init_net,
1384 .exit = ipgre_exit_net, 1388 .exit = ipgre_exit_net,
1385 .id = &ipgre_net_id, 1389 .id = &ipgre_net_id,
1386 .size = sizeof(struct ipgre_net), 1390 .size = sizeof(struct ipgre_net),
1387 }; 1391 };
1388 1392
1389 static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[]) 1393 static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
1390 { 1394 {
1391 __be16 flags; 1395 __be16 flags;
1392 1396
1393 if (!data) 1397 if (!data)
1394 return 0; 1398 return 0;
1395 1399
1396 flags = 0; 1400 flags = 0;
1397 if (data[IFLA_GRE_IFLAGS]) 1401 if (data[IFLA_GRE_IFLAGS])
1398 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]); 1402 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1399 if (data[IFLA_GRE_OFLAGS]) 1403 if (data[IFLA_GRE_OFLAGS])
1400 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]); 1404 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1401 if (flags & (GRE_VERSION|GRE_ROUTING)) 1405 if (flags & (GRE_VERSION|GRE_ROUTING))
1402 return -EINVAL; 1406 return -EINVAL;
1403 1407
1404 return 0; 1408 return 0;
1405 } 1409 }
1406 1410
1407 static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[]) 1411 static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[])
1408 { 1412 {
1409 __be32 daddr; 1413 __be32 daddr;
1410 1414
1411 if (tb[IFLA_ADDRESS]) { 1415 if (tb[IFLA_ADDRESS]) {
1412 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) 1416 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1413 return -EINVAL; 1417 return -EINVAL;
1414 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) 1418 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1415 return -EADDRNOTAVAIL; 1419 return -EADDRNOTAVAIL;
1416 } 1420 }
1417 1421
1418 if (!data) 1422 if (!data)
1419 goto out; 1423 goto out;
1420 1424
1421 if (data[IFLA_GRE_REMOTE]) { 1425 if (data[IFLA_GRE_REMOTE]) {
1422 memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4); 1426 memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
1423 if (!daddr) 1427 if (!daddr)
1424 return -EINVAL; 1428 return -EINVAL;
1425 } 1429 }
1426 1430
1427 out: 1431 out:
1428 return ipgre_tunnel_validate(tb, data); 1432 return ipgre_tunnel_validate(tb, data);
1429 } 1433 }
1430 1434
1431 static void ipgre_netlink_parms(struct nlattr *data[], 1435 static void ipgre_netlink_parms(struct nlattr *data[],
1432 struct ip_tunnel_parm *parms) 1436 struct ip_tunnel_parm *parms)
1433 { 1437 {
1434 memset(parms, 0, sizeof(*parms)); 1438 memset(parms, 0, sizeof(*parms));
1435 1439
1436 parms->iph.protocol = IPPROTO_GRE; 1440 parms->iph.protocol = IPPROTO_GRE;
1437 1441
1438 if (!data) 1442 if (!data)
1439 return; 1443 return;
1440 1444
1441 if (data[IFLA_GRE_LINK]) 1445 if (data[IFLA_GRE_LINK])
1442 parms->link = nla_get_u32(data[IFLA_GRE_LINK]); 1446 parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1443 1447
1444 if (data[IFLA_GRE_IFLAGS]) 1448 if (data[IFLA_GRE_IFLAGS])
1445 parms->i_flags = nla_get_be16(data[IFLA_GRE_IFLAGS]); 1449 parms->i_flags = nla_get_be16(data[IFLA_GRE_IFLAGS]);
1446 1450
1447 if (data[IFLA_GRE_OFLAGS]) 1451 if (data[IFLA_GRE_OFLAGS])
1448 parms->o_flags = nla_get_be16(data[IFLA_GRE_OFLAGS]); 1452 parms->o_flags = nla_get_be16(data[IFLA_GRE_OFLAGS]);
1449 1453
1450 if (data[IFLA_GRE_IKEY]) 1454 if (data[IFLA_GRE_IKEY])
1451 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]); 1455 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1452 1456
1453 if (data[IFLA_GRE_OKEY]) 1457 if (data[IFLA_GRE_OKEY])
1454 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]); 1458 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1455 1459
1456 if (data[IFLA_GRE_LOCAL]) 1460 if (data[IFLA_GRE_LOCAL])
1457 parms->iph.saddr = nla_get_be32(data[IFLA_GRE_LOCAL]); 1461 parms->iph.saddr = nla_get_be32(data[IFLA_GRE_LOCAL]);
1458 1462
1459 if (data[IFLA_GRE_REMOTE]) 1463 if (data[IFLA_GRE_REMOTE])
1460 parms->iph.daddr = nla_get_be32(data[IFLA_GRE_REMOTE]); 1464 parms->iph.daddr = nla_get_be32(data[IFLA_GRE_REMOTE]);
1461 1465
1462 if (data[IFLA_GRE_TTL]) 1466 if (data[IFLA_GRE_TTL])
1463 parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]); 1467 parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
1464 1468
1465 if (data[IFLA_GRE_TOS]) 1469 if (data[IFLA_GRE_TOS])
1466 parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]); 1470 parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
1467 1471
1468 if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) 1472 if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC]))
1469 parms->iph.frag_off = htons(IP_DF); 1473 parms->iph.frag_off = htons(IP_DF);
1470 } 1474 }
1471 1475
1472 static int ipgre_tap_init(struct net_device *dev) 1476 static int ipgre_tap_init(struct net_device *dev)
1473 { 1477 {
1474 struct ip_tunnel *tunnel; 1478 struct ip_tunnel *tunnel;
1475 1479
1476 tunnel = netdev_priv(dev); 1480 tunnel = netdev_priv(dev);
1477 1481
1478 tunnel->dev = dev; 1482 tunnel->dev = dev;
1479 strcpy(tunnel->parms.name, dev->name); 1483 strcpy(tunnel->parms.name, dev->name);
1480 1484
1481 ipgre_tunnel_bind_dev(dev); 1485 ipgre_tunnel_bind_dev(dev);
1482 1486
1483 dev->tstats = alloc_percpu(struct pcpu_tstats); 1487 dev->tstats = alloc_percpu(struct pcpu_tstats);
1484 if (!dev->tstats) 1488 if (!dev->tstats)
1485 return -ENOMEM; 1489 return -ENOMEM;
1486 1490
1487 return 0; 1491 return 0;
1488 } 1492 }
1489 1493
1490 static const struct net_device_ops ipgre_tap_netdev_ops = { 1494 static const struct net_device_ops ipgre_tap_netdev_ops = {
1491 .ndo_init = ipgre_tap_init, 1495 .ndo_init = ipgre_tap_init,
1492 .ndo_uninit = ipgre_tunnel_uninit, 1496 .ndo_uninit = ipgre_tunnel_uninit,
1493 .ndo_start_xmit = ipgre_tunnel_xmit, 1497 .ndo_start_xmit = ipgre_tunnel_xmit,
1494 .ndo_set_mac_address = eth_mac_addr, 1498 .ndo_set_mac_address = eth_mac_addr,
1495 .ndo_validate_addr = eth_validate_addr, 1499 .ndo_validate_addr = eth_validate_addr,
1496 .ndo_change_mtu = ipgre_tunnel_change_mtu, 1500 .ndo_change_mtu = ipgre_tunnel_change_mtu,
1497 .ndo_get_stats = ipgre_get_stats, 1501 .ndo_get_stats = ipgre_get_stats,
1498 }; 1502 };
1499 1503
1500 static void ipgre_tap_setup(struct net_device *dev) 1504 static void ipgre_tap_setup(struct net_device *dev)
1501 { 1505 {
1502 1506
1503 ether_setup(dev); 1507 ether_setup(dev);
1504 1508
1505 dev->netdev_ops = &ipgre_tap_netdev_ops; 1509 dev->netdev_ops = &ipgre_tap_netdev_ops;
1506 dev->destructor = ipgre_dev_free; 1510 dev->destructor = ipgre_dev_free;
1507 1511
1508 dev->iflink = 0; 1512 dev->iflink = 0;
1509 dev->features |= NETIF_F_NETNS_LOCAL; 1513 dev->features |= NETIF_F_NETNS_LOCAL;
1510 } 1514 }
1511 1515
1512 static int ipgre_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], 1516 static int ipgre_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[],
1513 struct nlattr *data[]) 1517 struct nlattr *data[])
1514 { 1518 {
1515 struct ip_tunnel *nt; 1519 struct ip_tunnel *nt;
1516 struct net *net = dev_net(dev); 1520 struct net *net = dev_net(dev);
1517 struct ipgre_net *ign = net_generic(net, ipgre_net_id); 1521 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1518 int mtu; 1522 int mtu;
1519 int err; 1523 int err;
1520 1524
1521 nt = netdev_priv(dev); 1525 nt = netdev_priv(dev);
1522 ipgre_netlink_parms(data, &nt->parms); 1526 ipgre_netlink_parms(data, &nt->parms);
1523 1527
1524 if (ipgre_tunnel_find(net, &nt->parms, dev->type)) 1528 if (ipgre_tunnel_find(net, &nt->parms, dev->type))
1525 return -EEXIST; 1529 return -EEXIST;
1526 1530
1527 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS]) 1531 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
1528 random_ether_addr(dev->dev_addr); 1532 random_ether_addr(dev->dev_addr);
1529 1533
1530 mtu = ipgre_tunnel_bind_dev(dev); 1534 mtu = ipgre_tunnel_bind_dev(dev);
1531 if (!tb[IFLA_MTU]) 1535 if (!tb[IFLA_MTU])
1532 dev->mtu = mtu; 1536 dev->mtu = mtu;
1533 1537
1534 /* Can use a lockless transmit, unless we generate output sequences */ 1538 /* Can use a lockless transmit, unless we generate output sequences */
1535 if (!(nt->parms.o_flags & GRE_SEQ)) 1539 if (!(nt->parms.o_flags & GRE_SEQ))
1536 dev->features |= NETIF_F_LLTX; 1540 dev->features |= NETIF_F_LLTX;
1537 1541
1538 err = register_netdevice(dev); 1542 err = register_netdevice(dev);
1539 if (err) 1543 if (err)
1540 goto out; 1544 goto out;
1541 1545
1542 dev_hold(dev); 1546 dev_hold(dev);
1543 ipgre_tunnel_link(ign, nt); 1547 ipgre_tunnel_link(ign, nt);
1544 1548
1545 out: 1549 out:
1546 return err; 1550 return err;
1547 } 1551 }
1548 1552
1549 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[], 1553 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1550 struct nlattr *data[]) 1554 struct nlattr *data[])
1551 { 1555 {
1552 struct ip_tunnel *t, *nt; 1556 struct ip_tunnel *t, *nt;
1553 struct net *net = dev_net(dev); 1557 struct net *net = dev_net(dev);
1554 struct ipgre_net *ign = net_generic(net, ipgre_net_id); 1558 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1555 struct ip_tunnel_parm p; 1559 struct ip_tunnel_parm p;
1556 int mtu; 1560 int mtu;
1557 1561
1558 if (dev == ign->fb_tunnel_dev) 1562 if (dev == ign->fb_tunnel_dev)
1559 return -EINVAL; 1563 return -EINVAL;
1560 1564
1561 nt = netdev_priv(dev); 1565 nt = netdev_priv(dev);
1562 ipgre_netlink_parms(data, &p); 1566 ipgre_netlink_parms(data, &p);
1563 1567
1564 t = ipgre_tunnel_locate(net, &p, 0); 1568 t = ipgre_tunnel_locate(net, &p, 0);
1565 1569
1566 if (t) { 1570 if (t) {
1567 if (t->dev != dev) 1571 if (t->dev != dev)
1568 return -EEXIST; 1572 return -EEXIST;
1569 } else { 1573 } else {
1570 t = nt; 1574 t = nt;
1571 1575
1572 if (dev->type != ARPHRD_ETHER) { 1576 if (dev->type != ARPHRD_ETHER) {
1573 unsigned int nflags = 0; 1577 unsigned int nflags = 0;
1574 1578
1575 if (ipv4_is_multicast(p.iph.daddr)) 1579 if (ipv4_is_multicast(p.iph.daddr))
1576 nflags = IFF_BROADCAST; 1580 nflags = IFF_BROADCAST;
1577 else if (p.iph.daddr) 1581 else if (p.iph.daddr)
1578 nflags = IFF_POINTOPOINT; 1582 nflags = IFF_POINTOPOINT;
1579 1583
1580 if ((dev->flags ^ nflags) & 1584 if ((dev->flags ^ nflags) &
1581 (IFF_POINTOPOINT | IFF_BROADCAST)) 1585 (IFF_POINTOPOINT | IFF_BROADCAST))
1582 return -EINVAL; 1586 return -EINVAL;
1583 } 1587 }
1584 1588
1585 ipgre_tunnel_unlink(ign, t); 1589 ipgre_tunnel_unlink(ign, t);
1586 t->parms.iph.saddr = p.iph.saddr; 1590 t->parms.iph.saddr = p.iph.saddr;
1587 t->parms.iph.daddr = p.iph.daddr; 1591 t->parms.iph.daddr = p.iph.daddr;
1588 t->parms.i_key = p.i_key; 1592 t->parms.i_key = p.i_key;
1589 if (dev->type != ARPHRD_ETHER) { 1593 if (dev->type != ARPHRD_ETHER) {
1590 memcpy(dev->dev_addr, &p.iph.saddr, 4); 1594 memcpy(dev->dev_addr, &p.iph.saddr, 4);
1591 memcpy(dev->broadcast, &p.iph.daddr, 4); 1595 memcpy(dev->broadcast, &p.iph.daddr, 4);
1592 } 1596 }
1593 ipgre_tunnel_link(ign, t); 1597 ipgre_tunnel_link(ign, t);
1594 netdev_state_change(dev); 1598 netdev_state_change(dev);
1595 } 1599 }
1596 1600
1597 t->parms.o_key = p.o_key; 1601 t->parms.o_key = p.o_key;
1598 t->parms.iph.ttl = p.iph.ttl; 1602 t->parms.iph.ttl = p.iph.ttl;
1599 t->parms.iph.tos = p.iph.tos; 1603 t->parms.iph.tos = p.iph.tos;
1600 t->parms.iph.frag_off = p.iph.frag_off; 1604 t->parms.iph.frag_off = p.iph.frag_off;
1601 1605
1602 if (t->parms.link != p.link) { 1606 if (t->parms.link != p.link) {
1603 t->parms.link = p.link; 1607 t->parms.link = p.link;
1604 mtu = ipgre_tunnel_bind_dev(dev); 1608 mtu = ipgre_tunnel_bind_dev(dev);
1605 if (!tb[IFLA_MTU]) 1609 if (!tb[IFLA_MTU])
1606 dev->mtu = mtu; 1610 dev->mtu = mtu;
1607 netdev_state_change(dev); 1611 netdev_state_change(dev);
1608 } 1612 }
1609 1613
1610 return 0; 1614 return 0;
1611 } 1615 }
1612 1616
1613 static size_t ipgre_get_size(const struct net_device *dev) 1617 static size_t ipgre_get_size(const struct net_device *dev)
1614 { 1618 {
1615 return 1619 return
1616 /* IFLA_GRE_LINK */ 1620 /* IFLA_GRE_LINK */
1617 nla_total_size(4) + 1621 nla_total_size(4) +
1618 /* IFLA_GRE_IFLAGS */ 1622 /* IFLA_GRE_IFLAGS */
1619 nla_total_size(2) + 1623 nla_total_size(2) +
1620 /* IFLA_GRE_OFLAGS */ 1624 /* IFLA_GRE_OFLAGS */
1621 nla_total_size(2) + 1625 nla_total_size(2) +
1622 /* IFLA_GRE_IKEY */ 1626 /* IFLA_GRE_IKEY */
1623 nla_total_size(4) + 1627 nla_total_size(4) +
1624 /* IFLA_GRE_OKEY */ 1628 /* IFLA_GRE_OKEY */
1625 nla_total_size(4) + 1629 nla_total_size(4) +
1626 /* IFLA_GRE_LOCAL */ 1630 /* IFLA_GRE_LOCAL */
1627 nla_total_size(4) + 1631 nla_total_size(4) +
1628 /* IFLA_GRE_REMOTE */ 1632 /* IFLA_GRE_REMOTE */
1629 nla_total_size(4) + 1633 nla_total_size(4) +
1630 /* IFLA_GRE_TTL */ 1634 /* IFLA_GRE_TTL */
1631 nla_total_size(1) + 1635 nla_total_size(1) +
1632 /* IFLA_GRE_TOS */ 1636 /* IFLA_GRE_TOS */
1633 nla_total_size(1) + 1637 nla_total_size(1) +
1634 /* IFLA_GRE_PMTUDISC */ 1638 /* IFLA_GRE_PMTUDISC */
1635 nla_total_size(1) + 1639 nla_total_size(1) +
1636 0; 1640 0;
1637 } 1641 }
1638 1642
1639 static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev) 1643 static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1640 { 1644 {
1641 struct ip_tunnel *t = netdev_priv(dev); 1645 struct ip_tunnel *t = netdev_priv(dev);
1642 struct ip_tunnel_parm *p = &t->parms; 1646 struct ip_tunnel_parm *p = &t->parms;
1643 1647
1644 NLA_PUT_U32(skb, IFLA_GRE_LINK, p->link); 1648 NLA_PUT_U32(skb, IFLA_GRE_LINK, p->link);
1645 NLA_PUT_BE16(skb, IFLA_GRE_IFLAGS, p->i_flags); 1649 NLA_PUT_BE16(skb, IFLA_GRE_IFLAGS, p->i_flags);
1646 NLA_PUT_BE16(skb, IFLA_GRE_OFLAGS, p->o_flags); 1650 NLA_PUT_BE16(skb, IFLA_GRE_OFLAGS, p->o_flags);
1647 NLA_PUT_BE32(skb, IFLA_GRE_IKEY, p->i_key); 1651 NLA_PUT_BE32(skb, IFLA_GRE_IKEY, p->i_key);
1648 NLA_PUT_BE32(skb, IFLA_GRE_OKEY, p->o_key); 1652 NLA_PUT_BE32(skb, IFLA_GRE_OKEY, p->o_key);
1649 NLA_PUT_BE32(skb, IFLA_GRE_LOCAL, p->iph.saddr); 1653 NLA_PUT_BE32(skb, IFLA_GRE_LOCAL, p->iph.saddr);
1650 NLA_PUT_BE32(skb, IFLA_GRE_REMOTE, p->iph.daddr); 1654 NLA_PUT_BE32(skb, IFLA_GRE_REMOTE, p->iph.daddr);
1651 NLA_PUT_U8(skb, IFLA_GRE_TTL, p->iph.ttl); 1655 NLA_PUT_U8(skb, IFLA_GRE_TTL, p->iph.ttl);
1652 NLA_PUT_U8(skb, IFLA_GRE_TOS, p->iph.tos); 1656 NLA_PUT_U8(skb, IFLA_GRE_TOS, p->iph.tos);
1653 NLA_PUT_U8(skb, IFLA_GRE_PMTUDISC, !!(p->iph.frag_off & htons(IP_DF))); 1657 NLA_PUT_U8(skb, IFLA_GRE_PMTUDISC, !!(p->iph.frag_off & htons(IP_DF)));
1654 1658
1655 return 0; 1659 return 0;
1656 1660
1657 nla_put_failure: 1661 nla_put_failure:
1658 return -EMSGSIZE; 1662 return -EMSGSIZE;
1659 } 1663 }
1660 1664
1661 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = { 1665 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1662 [IFLA_GRE_LINK] = { .type = NLA_U32 }, 1666 [IFLA_GRE_LINK] = { .type = NLA_U32 },
1663 [IFLA_GRE_IFLAGS] = { .type = NLA_U16 }, 1667 [IFLA_GRE_IFLAGS] = { .type = NLA_U16 },
1664 [IFLA_GRE_OFLAGS] = { .type = NLA_U16 }, 1668 [IFLA_GRE_OFLAGS] = { .type = NLA_U16 },
1665 [IFLA_GRE_IKEY] = { .type = NLA_U32 }, 1669 [IFLA_GRE_IKEY] = { .type = NLA_U32 },
1666 [IFLA_GRE_OKEY] = { .type = NLA_U32 }, 1670 [IFLA_GRE_OKEY] = { .type = NLA_U32 },
1667 [IFLA_GRE_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) }, 1671 [IFLA_GRE_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
1668 [IFLA_GRE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) }, 1672 [IFLA_GRE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1669 [IFLA_GRE_TTL] = { .type = NLA_U8 }, 1673 [IFLA_GRE_TTL] = { .type = NLA_U8 },
1670 [IFLA_GRE_TOS] = { .type = NLA_U8 }, 1674 [IFLA_GRE_TOS] = { .type = NLA_U8 },
1671 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 }, 1675 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
1672 }; 1676 };
1673 1677
1674 static struct rtnl_link_ops ipgre_link_ops __read_mostly = { 1678 static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1675 .kind = "gre", 1679 .kind = "gre",
1676 .maxtype = IFLA_GRE_MAX, 1680 .maxtype = IFLA_GRE_MAX,
1677 .policy = ipgre_policy, 1681 .policy = ipgre_policy,
1678 .priv_size = sizeof(struct ip_tunnel), 1682 .priv_size = sizeof(struct ip_tunnel),
1679 .setup = ipgre_tunnel_setup, 1683 .setup = ipgre_tunnel_setup,
1680 .validate = ipgre_tunnel_validate, 1684 .validate = ipgre_tunnel_validate,
1681 .newlink = ipgre_newlink, 1685 .newlink = ipgre_newlink,
1682 .changelink = ipgre_changelink, 1686 .changelink = ipgre_changelink,
1683 .get_size = ipgre_get_size, 1687 .get_size = ipgre_get_size,
1684 .fill_info = ipgre_fill_info, 1688 .fill_info = ipgre_fill_info,
1685 }; 1689 };
1686 1690
1687 static struct rtnl_link_ops ipgre_tap_ops __read_mostly = { 1691 static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1688 .kind = "gretap", 1692 .kind = "gretap",
1689 .maxtype = IFLA_GRE_MAX, 1693 .maxtype = IFLA_GRE_MAX,
1690 .policy = ipgre_policy, 1694 .policy = ipgre_policy,
1691 .priv_size = sizeof(struct ip_tunnel), 1695 .priv_size = sizeof(struct ip_tunnel),
1692 .setup = ipgre_tap_setup, 1696 .setup = ipgre_tap_setup,
1693 .validate = ipgre_tap_validate, 1697 .validate = ipgre_tap_validate,
1694 .newlink = ipgre_newlink, 1698 .newlink = ipgre_newlink,
1695 .changelink = ipgre_changelink, 1699 .changelink = ipgre_changelink,
1696 .get_size = ipgre_get_size, 1700 .get_size = ipgre_get_size,
1697 .fill_info = ipgre_fill_info, 1701 .fill_info = ipgre_fill_info,
1698 }; 1702 };
1699 1703
1700 /* 1704 /*
1701 * And now the modules code and kernel interface. 1705 * And now the modules code and kernel interface.
1702 */ 1706 */
1703 1707
1704 static int __init ipgre_init(void) 1708 static int __init ipgre_init(void)
1705 { 1709 {
1706 int err; 1710 int err;
1707 1711
1708 printk(KERN_INFO "GRE over IPv4 tunneling driver\n"); 1712 printk(KERN_INFO "GRE over IPv4 tunneling driver\n");
1709 1713
1710 err = register_pernet_device(&ipgre_net_ops); 1714 err = register_pernet_device(&ipgre_net_ops);
1711 if (err < 0) 1715 if (err < 0)
1712 return err; 1716 return err;
1713 1717
1714 err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO); 1718 err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1715 if (err < 0) { 1719 if (err < 0) {
1716 printk(KERN_INFO "ipgre init: can't add protocol\n"); 1720 printk(KERN_INFO "ipgre init: can't add protocol\n");
1717 goto add_proto_failed; 1721 goto add_proto_failed;
1718 } 1722 }
1719 1723
1720 err = rtnl_link_register(&ipgre_link_ops); 1724 err = rtnl_link_register(&ipgre_link_ops);
1721 if (err < 0) 1725 if (err < 0)
1722 goto rtnl_link_failed; 1726 goto rtnl_link_failed;
1723 1727
1724 err = rtnl_link_register(&ipgre_tap_ops); 1728 err = rtnl_link_register(&ipgre_tap_ops);
1725 if (err < 0) 1729 if (err < 0)
1726 goto tap_ops_failed; 1730 goto tap_ops_failed;
1727 1731
1728 out: 1732 out:
1729 return err; 1733 return err;
1730 1734
1731 tap_ops_failed: 1735 tap_ops_failed:
1732 rtnl_link_unregister(&ipgre_link_ops); 1736 rtnl_link_unregister(&ipgre_link_ops);
1733 rtnl_link_failed: 1737 rtnl_link_failed:
1734 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO); 1738 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1735 add_proto_failed: 1739 add_proto_failed:
1736 unregister_pernet_device(&ipgre_net_ops); 1740 unregister_pernet_device(&ipgre_net_ops);
1737 goto out; 1741 goto out;
1738 } 1742 }
1739 1743
1740 static void __exit ipgre_fini(void) 1744 static void __exit ipgre_fini(void)
1741 { 1745 {
1742 rtnl_link_unregister(&ipgre_tap_ops); 1746 rtnl_link_unregister(&ipgre_tap_ops);
1743 rtnl_link_unregister(&ipgre_link_ops); 1747 rtnl_link_unregister(&ipgre_link_ops);
1744 if (gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO) < 0) 1748 if (gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO) < 0)
1745 printk(KERN_INFO "ipgre close: can't remove protocol\n"); 1749 printk(KERN_INFO "ipgre close: can't remove protocol\n");
1746 unregister_pernet_device(&ipgre_net_ops); 1750 unregister_pernet_device(&ipgre_net_ops);
1747 } 1751 }
1748 1752
1749 module_init(ipgre_init); 1753 module_init(ipgre_init);
1750 module_exit(ipgre_fini); 1754 module_exit(ipgre_fini);
1751 MODULE_LICENSE("GPL"); 1755 MODULE_LICENSE("GPL");
1752 MODULE_ALIAS_RTNL_LINK("gre"); 1756 MODULE_ALIAS_RTNL_LINK("gre");
1753 MODULE_ALIAS_RTNL_LINK("gretap"); 1757 MODULE_ALIAS_RTNL_LINK("gretap");
1754 MODULE_ALIAS_NETDEV("gre0"); 1758 MODULE_ALIAS_NETDEV("gre0");
1755 1759