Commit bee31369ce16fc3898ec9a54161248c9eddb06bc
Committed by
David S. Miller
1 parent
ae3568adf4
Exists in
master
and in
41 other branches
tun: keep link (carrier) state up to date
Currently, only ethtool can get accurate link state of a tap device. With this patch, IFF_RUNNING and IF_OPER_UP/DOWN are kept up to date as well. Signed-off-by: Nolan Leake <nolan@cumulusnetworks.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Showing 1 changed file with 3 additions and 7 deletions Inline Diff
drivers/net/tun.c
1 | /* | 1 | /* |
2 | * TUN - Universal TUN/TAP device driver. | 2 | * TUN - Universal TUN/TAP device driver. |
3 | * Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com> | 3 | * Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com> |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
7 | * the Free Software Foundation; either version 2 of the License, or | 7 | * the Free Software Foundation; either version 2 of the License, or |
8 | * (at your option) any later version. | 8 | * (at your option) any later version. |
9 | * | 9 | * |
10 | * This program is distributed in the hope that it will be useful, | 10 | * This program is distributed in the hope that it will be useful, |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
13 | * GNU General Public License for more details. | 13 | * GNU General Public License for more details. |
14 | * | 14 | * |
15 | * $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $ | 15 | * $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $ |
16 | */ | 16 | */ |
17 | 17 | ||
18 | /* | 18 | /* |
19 | * Changes: | 19 | * Changes: |
20 | * | 20 | * |
21 | * Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14 | 21 | * Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14 |
22 | * Add TUNSETLINK ioctl to set the link encapsulation | 22 | * Add TUNSETLINK ioctl to set the link encapsulation |
23 | * | 23 | * |
24 | * Mark Smith <markzzzsmith@yahoo.com.au> | 24 | * Mark Smith <markzzzsmith@yahoo.com.au> |
25 | * Use random_ether_addr() for tap MAC address. | 25 | * Use random_ether_addr() for tap MAC address. |
26 | * | 26 | * |
27 | * Harald Roelle <harald.roelle@ifi.lmu.de> 2004/04/20 | 27 | * Harald Roelle <harald.roelle@ifi.lmu.de> 2004/04/20 |
28 | * Fixes in packet dropping, queue length setting and queue wakeup. | 28 | * Fixes in packet dropping, queue length setting and queue wakeup. |
29 | * Increased default tx queue length. | 29 | * Increased default tx queue length. |
30 | * Added ethtool API. | 30 | * Added ethtool API. |
31 | * Minor cleanups | 31 | * Minor cleanups |
32 | * | 32 | * |
33 | * Daniel Podlejski <underley@underley.eu.org> | 33 | * Daniel Podlejski <underley@underley.eu.org> |
34 | * Modifications for 2.3.99-pre5 kernel. | 34 | * Modifications for 2.3.99-pre5 kernel. |
35 | */ | 35 | */ |
36 | 36 | ||
37 | #define DRV_NAME "tun" | 37 | #define DRV_NAME "tun" |
38 | #define DRV_VERSION "1.6" | 38 | #define DRV_VERSION "1.6" |
39 | #define DRV_DESCRIPTION "Universal TUN/TAP device driver" | 39 | #define DRV_DESCRIPTION "Universal TUN/TAP device driver" |
40 | #define DRV_COPYRIGHT "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>" | 40 | #define DRV_COPYRIGHT "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>" |
41 | 41 | ||
42 | #include <linux/module.h> | 42 | #include <linux/module.h> |
43 | #include <linux/errno.h> | 43 | #include <linux/errno.h> |
44 | #include <linux/kernel.h> | 44 | #include <linux/kernel.h> |
45 | #include <linux/major.h> | 45 | #include <linux/major.h> |
46 | #include <linux/slab.h> | 46 | #include <linux/slab.h> |
47 | #include <linux/poll.h> | 47 | #include <linux/poll.h> |
48 | #include <linux/fcntl.h> | 48 | #include <linux/fcntl.h> |
49 | #include <linux/init.h> | 49 | #include <linux/init.h> |
50 | #include <linux/skbuff.h> | 50 | #include <linux/skbuff.h> |
51 | #include <linux/netdevice.h> | 51 | #include <linux/netdevice.h> |
52 | #include <linux/etherdevice.h> | 52 | #include <linux/etherdevice.h> |
53 | #include <linux/miscdevice.h> | 53 | #include <linux/miscdevice.h> |
54 | #include <linux/ethtool.h> | 54 | #include <linux/ethtool.h> |
55 | #include <linux/rtnetlink.h> | 55 | #include <linux/rtnetlink.h> |
56 | #include <linux/compat.h> | 56 | #include <linux/compat.h> |
57 | #include <linux/if.h> | 57 | #include <linux/if.h> |
58 | #include <linux/if_arp.h> | 58 | #include <linux/if_arp.h> |
59 | #include <linux/if_ether.h> | 59 | #include <linux/if_ether.h> |
60 | #include <linux/if_tun.h> | 60 | #include <linux/if_tun.h> |
61 | #include <linux/crc32.h> | 61 | #include <linux/crc32.h> |
62 | #include <linux/nsproxy.h> | 62 | #include <linux/nsproxy.h> |
63 | #include <linux/virtio_net.h> | 63 | #include <linux/virtio_net.h> |
64 | #include <linux/rcupdate.h> | 64 | #include <linux/rcupdate.h> |
65 | #include <net/net_namespace.h> | 65 | #include <net/net_namespace.h> |
66 | #include <net/netns/generic.h> | 66 | #include <net/netns/generic.h> |
67 | #include <net/rtnetlink.h> | 67 | #include <net/rtnetlink.h> |
68 | #include <net/sock.h> | 68 | #include <net/sock.h> |
69 | 69 | ||
70 | #include <asm/system.h> | 70 | #include <asm/system.h> |
71 | #include <asm/uaccess.h> | 71 | #include <asm/uaccess.h> |
72 | 72 | ||
73 | /* Uncomment to enable debugging */ | 73 | /* Uncomment to enable debugging */ |
74 | /* #define TUN_DEBUG 1 */ | 74 | /* #define TUN_DEBUG 1 */ |
75 | 75 | ||
76 | #ifdef TUN_DEBUG | 76 | #ifdef TUN_DEBUG |
77 | static int debug; | 77 | static int debug; |
78 | 78 | ||
79 | #define DBG if(tun->debug)printk | 79 | #define DBG if(tun->debug)printk |
80 | #define DBG1 if(debug==2)printk | 80 | #define DBG1 if(debug==2)printk |
81 | #else | 81 | #else |
82 | #define DBG( a... ) | 82 | #define DBG( a... ) |
83 | #define DBG1( a... ) | 83 | #define DBG1( a... ) |
84 | #endif | 84 | #endif |
85 | 85 | ||
86 | #define FLT_EXACT_COUNT 8 | 86 | #define FLT_EXACT_COUNT 8 |
87 | struct tap_filter { | 87 | struct tap_filter { |
88 | unsigned int count; /* Number of addrs. Zero means disabled */ | 88 | unsigned int count; /* Number of addrs. Zero means disabled */ |
89 | u32 mask[2]; /* Mask of the hashed addrs */ | 89 | u32 mask[2]; /* Mask of the hashed addrs */ |
90 | unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN]; | 90 | unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN]; |
91 | }; | 91 | }; |
92 | 92 | ||
93 | struct tun_file { | 93 | struct tun_file { |
94 | atomic_t count; | 94 | atomic_t count; |
95 | struct tun_struct *tun; | 95 | struct tun_struct *tun; |
96 | struct net *net; | 96 | struct net *net; |
97 | }; | 97 | }; |
98 | 98 | ||
99 | struct tun_sock; | 99 | struct tun_sock; |
100 | 100 | ||
101 | struct tun_struct { | 101 | struct tun_struct { |
102 | struct tun_file *tfile; | 102 | struct tun_file *tfile; |
103 | unsigned int flags; | 103 | unsigned int flags; |
104 | uid_t owner; | 104 | uid_t owner; |
105 | gid_t group; | 105 | gid_t group; |
106 | 106 | ||
107 | struct net_device *dev; | 107 | struct net_device *dev; |
108 | struct fasync_struct *fasync; | 108 | struct fasync_struct *fasync; |
109 | 109 | ||
110 | struct tap_filter txflt; | 110 | struct tap_filter txflt; |
111 | struct socket socket; | 111 | struct socket socket; |
112 | struct socket_wq wq; | 112 | struct socket_wq wq; |
113 | 113 | ||
114 | int vnet_hdr_sz; | 114 | int vnet_hdr_sz; |
115 | 115 | ||
116 | #ifdef TUN_DEBUG | 116 | #ifdef TUN_DEBUG |
117 | int debug; | 117 | int debug; |
118 | #endif | 118 | #endif |
119 | }; | 119 | }; |
120 | 120 | ||
121 | struct tun_sock { | 121 | struct tun_sock { |
122 | struct sock sk; | 122 | struct sock sk; |
123 | struct tun_struct *tun; | 123 | struct tun_struct *tun; |
124 | }; | 124 | }; |
125 | 125 | ||
126 | static inline struct tun_sock *tun_sk(struct sock *sk) | 126 | static inline struct tun_sock *tun_sk(struct sock *sk) |
127 | { | 127 | { |
128 | return container_of(sk, struct tun_sock, sk); | 128 | return container_of(sk, struct tun_sock, sk); |
129 | } | 129 | } |
130 | 130 | ||
131 | static int tun_attach(struct tun_struct *tun, struct file *file) | 131 | static int tun_attach(struct tun_struct *tun, struct file *file) |
132 | { | 132 | { |
133 | struct tun_file *tfile = file->private_data; | 133 | struct tun_file *tfile = file->private_data; |
134 | int err; | 134 | int err; |
135 | 135 | ||
136 | ASSERT_RTNL(); | 136 | ASSERT_RTNL(); |
137 | 137 | ||
138 | netif_tx_lock_bh(tun->dev); | 138 | netif_tx_lock_bh(tun->dev); |
139 | 139 | ||
140 | err = -EINVAL; | 140 | err = -EINVAL; |
141 | if (tfile->tun) | 141 | if (tfile->tun) |
142 | goto out; | 142 | goto out; |
143 | 143 | ||
144 | err = -EBUSY; | 144 | err = -EBUSY; |
145 | if (tun->tfile) | 145 | if (tun->tfile) |
146 | goto out; | 146 | goto out; |
147 | 147 | ||
148 | err = 0; | 148 | err = 0; |
149 | tfile->tun = tun; | 149 | tfile->tun = tun; |
150 | tun->tfile = tfile; | 150 | tun->tfile = tfile; |
151 | tun->socket.file = file; | 151 | tun->socket.file = file; |
152 | netif_carrier_on(tun->dev); | ||
152 | dev_hold(tun->dev); | 153 | dev_hold(tun->dev); |
153 | sock_hold(tun->socket.sk); | 154 | sock_hold(tun->socket.sk); |
154 | atomic_inc(&tfile->count); | 155 | atomic_inc(&tfile->count); |
155 | 156 | ||
156 | out: | 157 | out: |
157 | netif_tx_unlock_bh(tun->dev); | 158 | netif_tx_unlock_bh(tun->dev); |
158 | return err; | 159 | return err; |
159 | } | 160 | } |
160 | 161 | ||
161 | static void __tun_detach(struct tun_struct *tun) | 162 | static void __tun_detach(struct tun_struct *tun) |
162 | { | 163 | { |
163 | /* Detach from net device */ | 164 | /* Detach from net device */ |
164 | netif_tx_lock_bh(tun->dev); | 165 | netif_tx_lock_bh(tun->dev); |
166 | netif_carrier_off(tun->dev); | ||
165 | tun->tfile = NULL; | 167 | tun->tfile = NULL; |
166 | tun->socket.file = NULL; | 168 | tun->socket.file = NULL; |
167 | netif_tx_unlock_bh(tun->dev); | 169 | netif_tx_unlock_bh(tun->dev); |
168 | 170 | ||
169 | /* Drop read queue */ | 171 | /* Drop read queue */ |
170 | skb_queue_purge(&tun->socket.sk->sk_receive_queue); | 172 | skb_queue_purge(&tun->socket.sk->sk_receive_queue); |
171 | 173 | ||
172 | /* Drop the extra count on the net device */ | 174 | /* Drop the extra count on the net device */ |
173 | dev_put(tun->dev); | 175 | dev_put(tun->dev); |
174 | } | 176 | } |
175 | 177 | ||
176 | static void tun_detach(struct tun_struct *tun) | 178 | static void tun_detach(struct tun_struct *tun) |
177 | { | 179 | { |
178 | rtnl_lock(); | 180 | rtnl_lock(); |
179 | __tun_detach(tun); | 181 | __tun_detach(tun); |
180 | rtnl_unlock(); | 182 | rtnl_unlock(); |
181 | } | 183 | } |
182 | 184 | ||
183 | static struct tun_struct *__tun_get(struct tun_file *tfile) | 185 | static struct tun_struct *__tun_get(struct tun_file *tfile) |
184 | { | 186 | { |
185 | struct tun_struct *tun = NULL; | 187 | struct tun_struct *tun = NULL; |
186 | 188 | ||
187 | if (atomic_inc_not_zero(&tfile->count)) | 189 | if (atomic_inc_not_zero(&tfile->count)) |
188 | tun = tfile->tun; | 190 | tun = tfile->tun; |
189 | 191 | ||
190 | return tun; | 192 | return tun; |
191 | } | 193 | } |
192 | 194 | ||
193 | static struct tun_struct *tun_get(struct file *file) | 195 | static struct tun_struct *tun_get(struct file *file) |
194 | { | 196 | { |
195 | return __tun_get(file->private_data); | 197 | return __tun_get(file->private_data); |
196 | } | 198 | } |
197 | 199 | ||
198 | static void tun_put(struct tun_struct *tun) | 200 | static void tun_put(struct tun_struct *tun) |
199 | { | 201 | { |
200 | struct tun_file *tfile = tun->tfile; | 202 | struct tun_file *tfile = tun->tfile; |
201 | 203 | ||
202 | if (atomic_dec_and_test(&tfile->count)) | 204 | if (atomic_dec_and_test(&tfile->count)) |
203 | tun_detach(tfile->tun); | 205 | tun_detach(tfile->tun); |
204 | } | 206 | } |
205 | 207 | ||
206 | /* TAP filterting */ | 208 | /* TAP filterting */ |
207 | static void addr_hash_set(u32 *mask, const u8 *addr) | 209 | static void addr_hash_set(u32 *mask, const u8 *addr) |
208 | { | 210 | { |
209 | int n = ether_crc(ETH_ALEN, addr) >> 26; | 211 | int n = ether_crc(ETH_ALEN, addr) >> 26; |
210 | mask[n >> 5] |= (1 << (n & 31)); | 212 | mask[n >> 5] |= (1 << (n & 31)); |
211 | } | 213 | } |
212 | 214 | ||
213 | static unsigned int addr_hash_test(const u32 *mask, const u8 *addr) | 215 | static unsigned int addr_hash_test(const u32 *mask, const u8 *addr) |
214 | { | 216 | { |
215 | int n = ether_crc(ETH_ALEN, addr) >> 26; | 217 | int n = ether_crc(ETH_ALEN, addr) >> 26; |
216 | return mask[n >> 5] & (1 << (n & 31)); | 218 | return mask[n >> 5] & (1 << (n & 31)); |
217 | } | 219 | } |
218 | 220 | ||
219 | static int update_filter(struct tap_filter *filter, void __user *arg) | 221 | static int update_filter(struct tap_filter *filter, void __user *arg) |
220 | { | 222 | { |
221 | struct { u8 u[ETH_ALEN]; } *addr; | 223 | struct { u8 u[ETH_ALEN]; } *addr; |
222 | struct tun_filter uf; | 224 | struct tun_filter uf; |
223 | int err, alen, n, nexact; | 225 | int err, alen, n, nexact; |
224 | 226 | ||
225 | if (copy_from_user(&uf, arg, sizeof(uf))) | 227 | if (copy_from_user(&uf, arg, sizeof(uf))) |
226 | return -EFAULT; | 228 | return -EFAULT; |
227 | 229 | ||
228 | if (!uf.count) { | 230 | if (!uf.count) { |
229 | /* Disabled */ | 231 | /* Disabled */ |
230 | filter->count = 0; | 232 | filter->count = 0; |
231 | return 0; | 233 | return 0; |
232 | } | 234 | } |
233 | 235 | ||
234 | alen = ETH_ALEN * uf.count; | 236 | alen = ETH_ALEN * uf.count; |
235 | addr = kmalloc(alen, GFP_KERNEL); | 237 | addr = kmalloc(alen, GFP_KERNEL); |
236 | if (!addr) | 238 | if (!addr) |
237 | return -ENOMEM; | 239 | return -ENOMEM; |
238 | 240 | ||
239 | if (copy_from_user(addr, arg + sizeof(uf), alen)) { | 241 | if (copy_from_user(addr, arg + sizeof(uf), alen)) { |
240 | err = -EFAULT; | 242 | err = -EFAULT; |
241 | goto done; | 243 | goto done; |
242 | } | 244 | } |
243 | 245 | ||
244 | /* The filter is updated without holding any locks. Which is | 246 | /* The filter is updated without holding any locks. Which is |
245 | * perfectly safe. We disable it first and in the worst | 247 | * perfectly safe. We disable it first and in the worst |
246 | * case we'll accept a few undesired packets. */ | 248 | * case we'll accept a few undesired packets. */ |
247 | filter->count = 0; | 249 | filter->count = 0; |
248 | wmb(); | 250 | wmb(); |
249 | 251 | ||
250 | /* Use first set of addresses as an exact filter */ | 252 | /* Use first set of addresses as an exact filter */ |
251 | for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++) | 253 | for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++) |
252 | memcpy(filter->addr[n], addr[n].u, ETH_ALEN); | 254 | memcpy(filter->addr[n], addr[n].u, ETH_ALEN); |
253 | 255 | ||
254 | nexact = n; | 256 | nexact = n; |
255 | 257 | ||
256 | /* Remaining multicast addresses are hashed, | 258 | /* Remaining multicast addresses are hashed, |
257 | * unicast will leave the filter disabled. */ | 259 | * unicast will leave the filter disabled. */ |
258 | memset(filter->mask, 0, sizeof(filter->mask)); | 260 | memset(filter->mask, 0, sizeof(filter->mask)); |
259 | for (; n < uf.count; n++) { | 261 | for (; n < uf.count; n++) { |
260 | if (!is_multicast_ether_addr(addr[n].u)) { | 262 | if (!is_multicast_ether_addr(addr[n].u)) { |
261 | err = 0; /* no filter */ | 263 | err = 0; /* no filter */ |
262 | goto done; | 264 | goto done; |
263 | } | 265 | } |
264 | addr_hash_set(filter->mask, addr[n].u); | 266 | addr_hash_set(filter->mask, addr[n].u); |
265 | } | 267 | } |
266 | 268 | ||
267 | /* For ALLMULTI just set the mask to all ones. | 269 | /* For ALLMULTI just set the mask to all ones. |
268 | * This overrides the mask populated above. */ | 270 | * This overrides the mask populated above. */ |
269 | if ((uf.flags & TUN_FLT_ALLMULTI)) | 271 | if ((uf.flags & TUN_FLT_ALLMULTI)) |
270 | memset(filter->mask, ~0, sizeof(filter->mask)); | 272 | memset(filter->mask, ~0, sizeof(filter->mask)); |
271 | 273 | ||
272 | /* Now enable the filter */ | 274 | /* Now enable the filter */ |
273 | wmb(); | 275 | wmb(); |
274 | filter->count = nexact; | 276 | filter->count = nexact; |
275 | 277 | ||
276 | /* Return the number of exact filters */ | 278 | /* Return the number of exact filters */ |
277 | err = nexact; | 279 | err = nexact; |
278 | 280 | ||
279 | done: | 281 | done: |
280 | kfree(addr); | 282 | kfree(addr); |
281 | return err; | 283 | return err; |
282 | } | 284 | } |
283 | 285 | ||
284 | /* Returns: 0 - drop, !=0 - accept */ | 286 | /* Returns: 0 - drop, !=0 - accept */ |
285 | static int run_filter(struct tap_filter *filter, const struct sk_buff *skb) | 287 | static int run_filter(struct tap_filter *filter, const struct sk_buff *skb) |
286 | { | 288 | { |
287 | /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect | 289 | /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect |
288 | * at this point. */ | 290 | * at this point. */ |
289 | struct ethhdr *eh = (struct ethhdr *) skb->data; | 291 | struct ethhdr *eh = (struct ethhdr *) skb->data; |
290 | int i; | 292 | int i; |
291 | 293 | ||
292 | /* Exact match */ | 294 | /* Exact match */ |
293 | for (i = 0; i < filter->count; i++) | 295 | for (i = 0; i < filter->count; i++) |
294 | if (!compare_ether_addr(eh->h_dest, filter->addr[i])) | 296 | if (!compare_ether_addr(eh->h_dest, filter->addr[i])) |
295 | return 1; | 297 | return 1; |
296 | 298 | ||
297 | /* Inexact match (multicast only) */ | 299 | /* Inexact match (multicast only) */ |
298 | if (is_multicast_ether_addr(eh->h_dest)) | 300 | if (is_multicast_ether_addr(eh->h_dest)) |
299 | return addr_hash_test(filter->mask, eh->h_dest); | 301 | return addr_hash_test(filter->mask, eh->h_dest); |
300 | 302 | ||
301 | return 0; | 303 | return 0; |
302 | } | 304 | } |
303 | 305 | ||
304 | /* | 306 | /* |
305 | * Checks whether the packet is accepted or not. | 307 | * Checks whether the packet is accepted or not. |
306 | * Returns: 0 - drop, !=0 - accept | 308 | * Returns: 0 - drop, !=0 - accept |
307 | */ | 309 | */ |
308 | static int check_filter(struct tap_filter *filter, const struct sk_buff *skb) | 310 | static int check_filter(struct tap_filter *filter, const struct sk_buff *skb) |
309 | { | 311 | { |
310 | if (!filter->count) | 312 | if (!filter->count) |
311 | return 1; | 313 | return 1; |
312 | 314 | ||
313 | return run_filter(filter, skb); | 315 | return run_filter(filter, skb); |
314 | } | 316 | } |
315 | 317 | ||
316 | /* Network device part of the driver */ | 318 | /* Network device part of the driver */ |
317 | 319 | ||
318 | static const struct ethtool_ops tun_ethtool_ops; | 320 | static const struct ethtool_ops tun_ethtool_ops; |
319 | 321 | ||
320 | /* Net device detach from fd. */ | 322 | /* Net device detach from fd. */ |
321 | static void tun_net_uninit(struct net_device *dev) | 323 | static void tun_net_uninit(struct net_device *dev) |
322 | { | 324 | { |
323 | struct tun_struct *tun = netdev_priv(dev); | 325 | struct tun_struct *tun = netdev_priv(dev); |
324 | struct tun_file *tfile = tun->tfile; | 326 | struct tun_file *tfile = tun->tfile; |
325 | 327 | ||
326 | /* Inform the methods they need to stop using the dev. | 328 | /* Inform the methods they need to stop using the dev. |
327 | */ | 329 | */ |
328 | if (tfile) { | 330 | if (tfile) { |
329 | wake_up_all(&tun->wq.wait); | 331 | wake_up_all(&tun->wq.wait); |
330 | if (atomic_dec_and_test(&tfile->count)) | 332 | if (atomic_dec_and_test(&tfile->count)) |
331 | __tun_detach(tun); | 333 | __tun_detach(tun); |
332 | } | 334 | } |
333 | } | 335 | } |
334 | 336 | ||
335 | static void tun_free_netdev(struct net_device *dev) | 337 | static void tun_free_netdev(struct net_device *dev) |
336 | { | 338 | { |
337 | struct tun_struct *tun = netdev_priv(dev); | 339 | struct tun_struct *tun = netdev_priv(dev); |
338 | 340 | ||
339 | sock_put(tun->socket.sk); | 341 | sock_put(tun->socket.sk); |
340 | } | 342 | } |
341 | 343 | ||
342 | /* Net device open. */ | 344 | /* Net device open. */ |
343 | static int tun_net_open(struct net_device *dev) | 345 | static int tun_net_open(struct net_device *dev) |
344 | { | 346 | { |
345 | netif_start_queue(dev); | 347 | netif_start_queue(dev); |
346 | return 0; | 348 | return 0; |
347 | } | 349 | } |
348 | 350 | ||
349 | /* Net device close. */ | 351 | /* Net device close. */ |
350 | static int tun_net_close(struct net_device *dev) | 352 | static int tun_net_close(struct net_device *dev) |
351 | { | 353 | { |
352 | netif_stop_queue(dev); | 354 | netif_stop_queue(dev); |
353 | return 0; | 355 | return 0; |
354 | } | 356 | } |
355 | 357 | ||
356 | /* Net device start xmit */ | 358 | /* Net device start xmit */ |
357 | static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) | 359 | static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) |
358 | { | 360 | { |
359 | struct tun_struct *tun = netdev_priv(dev); | 361 | struct tun_struct *tun = netdev_priv(dev); |
360 | 362 | ||
361 | DBG(KERN_INFO "%s: tun_net_xmit %d\n", tun->dev->name, skb->len); | 363 | DBG(KERN_INFO "%s: tun_net_xmit %d\n", tun->dev->name, skb->len); |
362 | 364 | ||
363 | /* Drop packet if interface is not attached */ | 365 | /* Drop packet if interface is not attached */ |
364 | if (!tun->tfile) | 366 | if (!tun->tfile) |
365 | goto drop; | 367 | goto drop; |
366 | 368 | ||
367 | /* Drop if the filter does not like it. | 369 | /* Drop if the filter does not like it. |
368 | * This is a noop if the filter is disabled. | 370 | * This is a noop if the filter is disabled. |
369 | * Filter can be enabled only for the TAP devices. */ | 371 | * Filter can be enabled only for the TAP devices. */ |
370 | if (!check_filter(&tun->txflt, skb)) | 372 | if (!check_filter(&tun->txflt, skb)) |
371 | goto drop; | 373 | goto drop; |
372 | 374 | ||
373 | if (tun->socket.sk->sk_filter && | 375 | if (tun->socket.sk->sk_filter && |
374 | sk_filter(tun->socket.sk, skb)) | 376 | sk_filter(tun->socket.sk, skb)) |
375 | goto drop; | 377 | goto drop; |
376 | 378 | ||
377 | if (skb_queue_len(&tun->socket.sk->sk_receive_queue) >= dev->tx_queue_len) { | 379 | if (skb_queue_len(&tun->socket.sk->sk_receive_queue) >= dev->tx_queue_len) { |
378 | if (!(tun->flags & TUN_ONE_QUEUE)) { | 380 | if (!(tun->flags & TUN_ONE_QUEUE)) { |
379 | /* Normal queueing mode. */ | 381 | /* Normal queueing mode. */ |
380 | /* Packet scheduler handles dropping of further packets. */ | 382 | /* Packet scheduler handles dropping of further packets. */ |
381 | netif_stop_queue(dev); | 383 | netif_stop_queue(dev); |
382 | 384 | ||
383 | /* We won't see all dropped packets individually, so overrun | 385 | /* We won't see all dropped packets individually, so overrun |
384 | * error is more appropriate. */ | 386 | * error is more appropriate. */ |
385 | dev->stats.tx_fifo_errors++; | 387 | dev->stats.tx_fifo_errors++; |
386 | } else { | 388 | } else { |
387 | /* Single queue mode. | 389 | /* Single queue mode. |
388 | * Driver handles dropping of all packets itself. */ | 390 | * Driver handles dropping of all packets itself. */ |
389 | goto drop; | 391 | goto drop; |
390 | } | 392 | } |
391 | } | 393 | } |
392 | 394 | ||
393 | /* Orphan the skb - required as we might hang on to it | 395 | /* Orphan the skb - required as we might hang on to it |
394 | * for indefinite time. */ | 396 | * for indefinite time. */ |
395 | skb_orphan(skb); | 397 | skb_orphan(skb); |
396 | 398 | ||
397 | /* Enqueue packet */ | 399 | /* Enqueue packet */ |
398 | skb_queue_tail(&tun->socket.sk->sk_receive_queue, skb); | 400 | skb_queue_tail(&tun->socket.sk->sk_receive_queue, skb); |
399 | 401 | ||
400 | /* Notify and wake up reader process */ | 402 | /* Notify and wake up reader process */ |
401 | if (tun->flags & TUN_FASYNC) | 403 | if (tun->flags & TUN_FASYNC) |
402 | kill_fasync(&tun->fasync, SIGIO, POLL_IN); | 404 | kill_fasync(&tun->fasync, SIGIO, POLL_IN); |
403 | wake_up_interruptible_poll(&tun->wq.wait, POLLIN | | 405 | wake_up_interruptible_poll(&tun->wq.wait, POLLIN | |
404 | POLLRDNORM | POLLRDBAND); | 406 | POLLRDNORM | POLLRDBAND); |
405 | return NETDEV_TX_OK; | 407 | return NETDEV_TX_OK; |
406 | 408 | ||
407 | drop: | 409 | drop: |
408 | dev->stats.tx_dropped++; | 410 | dev->stats.tx_dropped++; |
409 | kfree_skb(skb); | 411 | kfree_skb(skb); |
410 | return NETDEV_TX_OK; | 412 | return NETDEV_TX_OK; |
411 | } | 413 | } |
412 | 414 | ||
413 | static void tun_net_mclist(struct net_device *dev) | 415 | static void tun_net_mclist(struct net_device *dev) |
414 | { | 416 | { |
415 | /* | 417 | /* |
416 | * This callback is supposed to deal with mc filter in | 418 | * This callback is supposed to deal with mc filter in |
417 | * _rx_ path and has nothing to do with the _tx_ path. | 419 | * _rx_ path and has nothing to do with the _tx_ path. |
418 | * In rx path we always accept everything userspace gives us. | 420 | * In rx path we always accept everything userspace gives us. |
419 | */ | 421 | */ |
420 | } | 422 | } |
421 | 423 | ||
422 | #define MIN_MTU 68 | 424 | #define MIN_MTU 68 |
423 | #define MAX_MTU 65535 | 425 | #define MAX_MTU 65535 |
424 | 426 | ||
425 | static int | 427 | static int |
426 | tun_net_change_mtu(struct net_device *dev, int new_mtu) | 428 | tun_net_change_mtu(struct net_device *dev, int new_mtu) |
427 | { | 429 | { |
428 | if (new_mtu < MIN_MTU || new_mtu + dev->hard_header_len > MAX_MTU) | 430 | if (new_mtu < MIN_MTU || new_mtu + dev->hard_header_len > MAX_MTU) |
429 | return -EINVAL; | 431 | return -EINVAL; |
430 | dev->mtu = new_mtu; | 432 | dev->mtu = new_mtu; |
431 | return 0; | 433 | return 0; |
432 | } | 434 | } |
433 | 435 | ||
434 | static const struct net_device_ops tun_netdev_ops = { | 436 | static const struct net_device_ops tun_netdev_ops = { |
435 | .ndo_uninit = tun_net_uninit, | 437 | .ndo_uninit = tun_net_uninit, |
436 | .ndo_open = tun_net_open, | 438 | .ndo_open = tun_net_open, |
437 | .ndo_stop = tun_net_close, | 439 | .ndo_stop = tun_net_close, |
438 | .ndo_start_xmit = tun_net_xmit, | 440 | .ndo_start_xmit = tun_net_xmit, |
439 | .ndo_change_mtu = tun_net_change_mtu, | 441 | .ndo_change_mtu = tun_net_change_mtu, |
440 | }; | 442 | }; |
441 | 443 | ||
442 | static const struct net_device_ops tap_netdev_ops = { | 444 | static const struct net_device_ops tap_netdev_ops = { |
443 | .ndo_uninit = tun_net_uninit, | 445 | .ndo_uninit = tun_net_uninit, |
444 | .ndo_open = tun_net_open, | 446 | .ndo_open = tun_net_open, |
445 | .ndo_stop = tun_net_close, | 447 | .ndo_stop = tun_net_close, |
446 | .ndo_start_xmit = tun_net_xmit, | 448 | .ndo_start_xmit = tun_net_xmit, |
447 | .ndo_change_mtu = tun_net_change_mtu, | 449 | .ndo_change_mtu = tun_net_change_mtu, |
448 | .ndo_set_multicast_list = tun_net_mclist, | 450 | .ndo_set_multicast_list = tun_net_mclist, |
449 | .ndo_set_mac_address = eth_mac_addr, | 451 | .ndo_set_mac_address = eth_mac_addr, |
450 | .ndo_validate_addr = eth_validate_addr, | 452 | .ndo_validate_addr = eth_validate_addr, |
451 | }; | 453 | }; |
452 | 454 | ||
453 | /* Initialize net device. */ | 455 | /* Initialize net device. */ |
454 | static void tun_net_init(struct net_device *dev) | 456 | static void tun_net_init(struct net_device *dev) |
455 | { | 457 | { |
456 | struct tun_struct *tun = netdev_priv(dev); | 458 | struct tun_struct *tun = netdev_priv(dev); |
457 | 459 | ||
458 | switch (tun->flags & TUN_TYPE_MASK) { | 460 | switch (tun->flags & TUN_TYPE_MASK) { |
459 | case TUN_TUN_DEV: | 461 | case TUN_TUN_DEV: |
460 | dev->netdev_ops = &tun_netdev_ops; | 462 | dev->netdev_ops = &tun_netdev_ops; |
461 | 463 | ||
462 | /* Point-to-Point TUN Device */ | 464 | /* Point-to-Point TUN Device */ |
463 | dev->hard_header_len = 0; | 465 | dev->hard_header_len = 0; |
464 | dev->addr_len = 0; | 466 | dev->addr_len = 0; |
465 | dev->mtu = 1500; | 467 | dev->mtu = 1500; |
466 | 468 | ||
467 | /* Zero header length */ | 469 | /* Zero header length */ |
468 | dev->type = ARPHRD_NONE; | 470 | dev->type = ARPHRD_NONE; |
469 | dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; | 471 | dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; |
470 | dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */ | 472 | dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */ |
471 | break; | 473 | break; |
472 | 474 | ||
473 | case TUN_TAP_DEV: | 475 | case TUN_TAP_DEV: |
474 | dev->netdev_ops = &tap_netdev_ops; | 476 | dev->netdev_ops = &tap_netdev_ops; |
475 | /* Ethernet TAP Device */ | 477 | /* Ethernet TAP Device */ |
476 | ether_setup(dev); | 478 | ether_setup(dev); |
477 | 479 | ||
478 | random_ether_addr(dev->dev_addr); | 480 | random_ether_addr(dev->dev_addr); |
479 | 481 | ||
480 | dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */ | 482 | dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */ |
481 | break; | 483 | break; |
482 | } | 484 | } |
483 | } | 485 | } |
484 | 486 | ||
485 | /* Character device part */ | 487 | /* Character device part */ |
486 | 488 | ||
487 | /* Poll */ | 489 | /* Poll */ |
488 | static unsigned int tun_chr_poll(struct file *file, poll_table * wait) | 490 | static unsigned int tun_chr_poll(struct file *file, poll_table * wait) |
489 | { | 491 | { |
490 | struct tun_file *tfile = file->private_data; | 492 | struct tun_file *tfile = file->private_data; |
491 | struct tun_struct *tun = __tun_get(tfile); | 493 | struct tun_struct *tun = __tun_get(tfile); |
492 | struct sock *sk; | 494 | struct sock *sk; |
493 | unsigned int mask = 0; | 495 | unsigned int mask = 0; |
494 | 496 | ||
495 | if (!tun) | 497 | if (!tun) |
496 | return POLLERR; | 498 | return POLLERR; |
497 | 499 | ||
498 | sk = tun->socket.sk; | 500 | sk = tun->socket.sk; |
499 | 501 | ||
500 | DBG(KERN_INFO "%s: tun_chr_poll\n", tun->dev->name); | 502 | DBG(KERN_INFO "%s: tun_chr_poll\n", tun->dev->name); |
501 | 503 | ||
502 | poll_wait(file, &tun->wq.wait, wait); | 504 | poll_wait(file, &tun->wq.wait, wait); |
503 | 505 | ||
504 | if (!skb_queue_empty(&sk->sk_receive_queue)) | 506 | if (!skb_queue_empty(&sk->sk_receive_queue)) |
505 | mask |= POLLIN | POLLRDNORM; | 507 | mask |= POLLIN | POLLRDNORM; |
506 | 508 | ||
507 | if (sock_writeable(sk) || | 509 | if (sock_writeable(sk) || |
508 | (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags) && | 510 | (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags) && |
509 | sock_writeable(sk))) | 511 | sock_writeable(sk))) |
510 | mask |= POLLOUT | POLLWRNORM; | 512 | mask |= POLLOUT | POLLWRNORM; |
511 | 513 | ||
512 | if (tun->dev->reg_state != NETREG_REGISTERED) | 514 | if (tun->dev->reg_state != NETREG_REGISTERED) |
513 | mask = POLLERR; | 515 | mask = POLLERR; |
514 | 516 | ||
515 | tun_put(tun); | 517 | tun_put(tun); |
516 | return mask; | 518 | return mask; |
517 | } | 519 | } |
518 | 520 | ||
519 | /* prepad is the amount to reserve at front. len is length after that. | 521 | /* prepad is the amount to reserve at front. len is length after that. |
520 | * linear is a hint as to how much to copy (usually headers). */ | 522 | * linear is a hint as to how much to copy (usually headers). */ |
521 | static inline struct sk_buff *tun_alloc_skb(struct tun_struct *tun, | 523 | static inline struct sk_buff *tun_alloc_skb(struct tun_struct *tun, |
522 | size_t prepad, size_t len, | 524 | size_t prepad, size_t len, |
523 | size_t linear, int noblock) | 525 | size_t linear, int noblock) |
524 | { | 526 | { |
525 | struct sock *sk = tun->socket.sk; | 527 | struct sock *sk = tun->socket.sk; |
526 | struct sk_buff *skb; | 528 | struct sk_buff *skb; |
527 | int err; | 529 | int err; |
528 | 530 | ||
529 | sock_update_classid(sk); | 531 | sock_update_classid(sk); |
530 | 532 | ||
531 | /* Under a page? Don't bother with paged skb. */ | 533 | /* Under a page? Don't bother with paged skb. */ |
532 | if (prepad + len < PAGE_SIZE || !linear) | 534 | if (prepad + len < PAGE_SIZE || !linear) |
533 | linear = len; | 535 | linear = len; |
534 | 536 | ||
535 | skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, | 537 | skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, |
536 | &err); | 538 | &err); |
537 | if (!skb) | 539 | if (!skb) |
538 | return ERR_PTR(err); | 540 | return ERR_PTR(err); |
539 | 541 | ||
540 | skb_reserve(skb, prepad); | 542 | skb_reserve(skb, prepad); |
541 | skb_put(skb, linear); | 543 | skb_put(skb, linear); |
542 | skb->data_len = len - linear; | 544 | skb->data_len = len - linear; |
543 | skb->len += len - linear; | 545 | skb->len += len - linear; |
544 | 546 | ||
545 | return skb; | 547 | return skb; |
546 | } | 548 | } |
547 | 549 | ||
548 | /* Get packet from user space buffer */ | 550 | /* Get packet from user space buffer */ |
549 | static __inline__ ssize_t tun_get_user(struct tun_struct *tun, | 551 | static __inline__ ssize_t tun_get_user(struct tun_struct *tun, |
550 | const struct iovec *iv, size_t count, | 552 | const struct iovec *iv, size_t count, |
551 | int noblock) | 553 | int noblock) |
552 | { | 554 | { |
553 | struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) }; | 555 | struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) }; |
554 | struct sk_buff *skb; | 556 | struct sk_buff *skb; |
555 | size_t len = count, align = 0; | 557 | size_t len = count, align = 0; |
556 | struct virtio_net_hdr gso = { 0 }; | 558 | struct virtio_net_hdr gso = { 0 }; |
557 | int offset = 0; | 559 | int offset = 0; |
558 | 560 | ||
559 | if (!(tun->flags & TUN_NO_PI)) { | 561 | if (!(tun->flags & TUN_NO_PI)) { |
560 | if ((len -= sizeof(pi)) > count) | 562 | if ((len -= sizeof(pi)) > count) |
561 | return -EINVAL; | 563 | return -EINVAL; |
562 | 564 | ||
563 | if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi))) | 565 | if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi))) |
564 | return -EFAULT; | 566 | return -EFAULT; |
565 | offset += sizeof(pi); | 567 | offset += sizeof(pi); |
566 | } | 568 | } |
567 | 569 | ||
568 | if (tun->flags & TUN_VNET_HDR) { | 570 | if (tun->flags & TUN_VNET_HDR) { |
569 | if ((len -= tun->vnet_hdr_sz) > count) | 571 | if ((len -= tun->vnet_hdr_sz) > count) |
570 | return -EINVAL; | 572 | return -EINVAL; |
571 | 573 | ||
572 | if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso))) | 574 | if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso))) |
573 | return -EFAULT; | 575 | return -EFAULT; |
574 | 576 | ||
575 | if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && | 577 | if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && |
576 | gso.csum_start + gso.csum_offset + 2 > gso.hdr_len) | 578 | gso.csum_start + gso.csum_offset + 2 > gso.hdr_len) |
577 | gso.hdr_len = gso.csum_start + gso.csum_offset + 2; | 579 | gso.hdr_len = gso.csum_start + gso.csum_offset + 2; |
578 | 580 | ||
579 | if (gso.hdr_len > len) | 581 | if (gso.hdr_len > len) |
580 | return -EINVAL; | 582 | return -EINVAL; |
581 | offset += tun->vnet_hdr_sz; | 583 | offset += tun->vnet_hdr_sz; |
582 | } | 584 | } |
583 | 585 | ||
584 | if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) { | 586 | if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) { |
585 | align = NET_IP_ALIGN; | 587 | align = NET_IP_ALIGN; |
586 | if (unlikely(len < ETH_HLEN || | 588 | if (unlikely(len < ETH_HLEN || |
587 | (gso.hdr_len && gso.hdr_len < ETH_HLEN))) | 589 | (gso.hdr_len && gso.hdr_len < ETH_HLEN))) |
588 | return -EINVAL; | 590 | return -EINVAL; |
589 | } | 591 | } |
590 | 592 | ||
591 | skb = tun_alloc_skb(tun, align, len, gso.hdr_len, noblock); | 593 | skb = tun_alloc_skb(tun, align, len, gso.hdr_len, noblock); |
592 | if (IS_ERR(skb)) { | 594 | if (IS_ERR(skb)) { |
593 | if (PTR_ERR(skb) != -EAGAIN) | 595 | if (PTR_ERR(skb) != -EAGAIN) |
594 | tun->dev->stats.rx_dropped++; | 596 | tun->dev->stats.rx_dropped++; |
595 | return PTR_ERR(skb); | 597 | return PTR_ERR(skb); |
596 | } | 598 | } |
597 | 599 | ||
598 | if (skb_copy_datagram_from_iovec(skb, 0, iv, offset, len)) { | 600 | if (skb_copy_datagram_from_iovec(skb, 0, iv, offset, len)) { |
599 | tun->dev->stats.rx_dropped++; | 601 | tun->dev->stats.rx_dropped++; |
600 | kfree_skb(skb); | 602 | kfree_skb(skb); |
601 | return -EFAULT; | 603 | return -EFAULT; |
602 | } | 604 | } |
603 | 605 | ||
604 | if (gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { | 606 | if (gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { |
605 | if (!skb_partial_csum_set(skb, gso.csum_start, | 607 | if (!skb_partial_csum_set(skb, gso.csum_start, |
606 | gso.csum_offset)) { | 608 | gso.csum_offset)) { |
607 | tun->dev->stats.rx_frame_errors++; | 609 | tun->dev->stats.rx_frame_errors++; |
608 | kfree_skb(skb); | 610 | kfree_skb(skb); |
609 | return -EINVAL; | 611 | return -EINVAL; |
610 | } | 612 | } |
611 | } else if (tun->flags & TUN_NOCHECKSUM) | 613 | } else if (tun->flags & TUN_NOCHECKSUM) |
612 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 614 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
613 | 615 | ||
614 | switch (tun->flags & TUN_TYPE_MASK) { | 616 | switch (tun->flags & TUN_TYPE_MASK) { |
615 | case TUN_TUN_DEV: | 617 | case TUN_TUN_DEV: |
616 | if (tun->flags & TUN_NO_PI) { | 618 | if (tun->flags & TUN_NO_PI) { |
617 | switch (skb->data[0] & 0xf0) { | 619 | switch (skb->data[0] & 0xf0) { |
618 | case 0x40: | 620 | case 0x40: |
619 | pi.proto = htons(ETH_P_IP); | 621 | pi.proto = htons(ETH_P_IP); |
620 | break; | 622 | break; |
621 | case 0x60: | 623 | case 0x60: |
622 | pi.proto = htons(ETH_P_IPV6); | 624 | pi.proto = htons(ETH_P_IPV6); |
623 | break; | 625 | break; |
624 | default: | 626 | default: |
625 | tun->dev->stats.rx_dropped++; | 627 | tun->dev->stats.rx_dropped++; |
626 | kfree_skb(skb); | 628 | kfree_skb(skb); |
627 | return -EINVAL; | 629 | return -EINVAL; |
628 | } | 630 | } |
629 | } | 631 | } |
630 | 632 | ||
631 | skb_reset_mac_header(skb); | 633 | skb_reset_mac_header(skb); |
632 | skb->protocol = pi.proto; | 634 | skb->protocol = pi.proto; |
633 | skb->dev = tun->dev; | 635 | skb->dev = tun->dev; |
634 | break; | 636 | break; |
635 | case TUN_TAP_DEV: | 637 | case TUN_TAP_DEV: |
636 | skb->protocol = eth_type_trans(skb, tun->dev); | 638 | skb->protocol = eth_type_trans(skb, tun->dev); |
637 | break; | 639 | break; |
638 | }; | 640 | }; |
639 | 641 | ||
640 | if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) { | 642 | if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) { |
641 | pr_debug("GSO!\n"); | 643 | pr_debug("GSO!\n"); |
642 | switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { | 644 | switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { |
643 | case VIRTIO_NET_HDR_GSO_TCPV4: | 645 | case VIRTIO_NET_HDR_GSO_TCPV4: |
644 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; | 646 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; |
645 | break; | 647 | break; |
646 | case VIRTIO_NET_HDR_GSO_TCPV6: | 648 | case VIRTIO_NET_HDR_GSO_TCPV6: |
647 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; | 649 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; |
648 | break; | 650 | break; |
649 | case VIRTIO_NET_HDR_GSO_UDP: | 651 | case VIRTIO_NET_HDR_GSO_UDP: |
650 | skb_shinfo(skb)->gso_type = SKB_GSO_UDP; | 652 | skb_shinfo(skb)->gso_type = SKB_GSO_UDP; |
651 | break; | 653 | break; |
652 | default: | 654 | default: |
653 | tun->dev->stats.rx_frame_errors++; | 655 | tun->dev->stats.rx_frame_errors++; |
654 | kfree_skb(skb); | 656 | kfree_skb(skb); |
655 | return -EINVAL; | 657 | return -EINVAL; |
656 | } | 658 | } |
657 | 659 | ||
658 | if (gso.gso_type & VIRTIO_NET_HDR_GSO_ECN) | 660 | if (gso.gso_type & VIRTIO_NET_HDR_GSO_ECN) |
659 | skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; | 661 | skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; |
660 | 662 | ||
661 | skb_shinfo(skb)->gso_size = gso.gso_size; | 663 | skb_shinfo(skb)->gso_size = gso.gso_size; |
662 | if (skb_shinfo(skb)->gso_size == 0) { | 664 | if (skb_shinfo(skb)->gso_size == 0) { |
663 | tun->dev->stats.rx_frame_errors++; | 665 | tun->dev->stats.rx_frame_errors++; |
664 | kfree_skb(skb); | 666 | kfree_skb(skb); |
665 | return -EINVAL; | 667 | return -EINVAL; |
666 | } | 668 | } |
667 | 669 | ||
668 | /* Header must be checked, and gso_segs computed. */ | 670 | /* Header must be checked, and gso_segs computed. */ |
669 | skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; | 671 | skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; |
670 | skb_shinfo(skb)->gso_segs = 0; | 672 | skb_shinfo(skb)->gso_segs = 0; |
671 | } | 673 | } |
672 | 674 | ||
673 | netif_rx_ni(skb); | 675 | netif_rx_ni(skb); |
674 | 676 | ||
675 | tun->dev->stats.rx_packets++; | 677 | tun->dev->stats.rx_packets++; |
676 | tun->dev->stats.rx_bytes += len; | 678 | tun->dev->stats.rx_bytes += len; |
677 | 679 | ||
678 | return count; | 680 | return count; |
679 | } | 681 | } |
680 | 682 | ||
681 | static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv, | 683 | static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv, |
682 | unsigned long count, loff_t pos) | 684 | unsigned long count, loff_t pos) |
683 | { | 685 | { |
684 | struct file *file = iocb->ki_filp; | 686 | struct file *file = iocb->ki_filp; |
685 | struct tun_struct *tun = tun_get(file); | 687 | struct tun_struct *tun = tun_get(file); |
686 | ssize_t result; | 688 | ssize_t result; |
687 | 689 | ||
688 | if (!tun) | 690 | if (!tun) |
689 | return -EBADFD; | 691 | return -EBADFD; |
690 | 692 | ||
691 | DBG(KERN_INFO "%s: tun_chr_write %ld\n", tun->dev->name, count); | 693 | DBG(KERN_INFO "%s: tun_chr_write %ld\n", tun->dev->name, count); |
692 | 694 | ||
693 | result = tun_get_user(tun, iv, iov_length(iv, count), | 695 | result = tun_get_user(tun, iv, iov_length(iv, count), |
694 | file->f_flags & O_NONBLOCK); | 696 | file->f_flags & O_NONBLOCK); |
695 | 697 | ||
696 | tun_put(tun); | 698 | tun_put(tun); |
697 | return result; | 699 | return result; |
698 | } | 700 | } |
699 | 701 | ||
700 | /* Put packet to the user space buffer */ | 702 | /* Put packet to the user space buffer */ |
701 | static __inline__ ssize_t tun_put_user(struct tun_struct *tun, | 703 | static __inline__ ssize_t tun_put_user(struct tun_struct *tun, |
702 | struct sk_buff *skb, | 704 | struct sk_buff *skb, |
703 | const struct iovec *iv, int len) | 705 | const struct iovec *iv, int len) |
704 | { | 706 | { |
705 | struct tun_pi pi = { 0, skb->protocol }; | 707 | struct tun_pi pi = { 0, skb->protocol }; |
706 | ssize_t total = 0; | 708 | ssize_t total = 0; |
707 | 709 | ||
708 | if (!(tun->flags & TUN_NO_PI)) { | 710 | if (!(tun->flags & TUN_NO_PI)) { |
709 | if ((len -= sizeof(pi)) < 0) | 711 | if ((len -= sizeof(pi)) < 0) |
710 | return -EINVAL; | 712 | return -EINVAL; |
711 | 713 | ||
712 | if (len < skb->len) { | 714 | if (len < skb->len) { |
713 | /* Packet will be striped */ | 715 | /* Packet will be striped */ |
714 | pi.flags |= TUN_PKT_STRIP; | 716 | pi.flags |= TUN_PKT_STRIP; |
715 | } | 717 | } |
716 | 718 | ||
717 | if (memcpy_toiovecend(iv, (void *) &pi, 0, sizeof(pi))) | 719 | if (memcpy_toiovecend(iv, (void *) &pi, 0, sizeof(pi))) |
718 | return -EFAULT; | 720 | return -EFAULT; |
719 | total += sizeof(pi); | 721 | total += sizeof(pi); |
720 | } | 722 | } |
721 | 723 | ||
722 | if (tun->flags & TUN_VNET_HDR) { | 724 | if (tun->flags & TUN_VNET_HDR) { |
723 | struct virtio_net_hdr gso = { 0 }; /* no info leak */ | 725 | struct virtio_net_hdr gso = { 0 }; /* no info leak */ |
724 | if ((len -= tun->vnet_hdr_sz) < 0) | 726 | if ((len -= tun->vnet_hdr_sz) < 0) |
725 | return -EINVAL; | 727 | return -EINVAL; |
726 | 728 | ||
727 | if (skb_is_gso(skb)) { | 729 | if (skb_is_gso(skb)) { |
728 | struct skb_shared_info *sinfo = skb_shinfo(skb); | 730 | struct skb_shared_info *sinfo = skb_shinfo(skb); |
729 | 731 | ||
730 | /* This is a hint as to how much should be linear. */ | 732 | /* This is a hint as to how much should be linear. */ |
731 | gso.hdr_len = skb_headlen(skb); | 733 | gso.hdr_len = skb_headlen(skb); |
732 | gso.gso_size = sinfo->gso_size; | 734 | gso.gso_size = sinfo->gso_size; |
733 | if (sinfo->gso_type & SKB_GSO_TCPV4) | 735 | if (sinfo->gso_type & SKB_GSO_TCPV4) |
734 | gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; | 736 | gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; |
735 | else if (sinfo->gso_type & SKB_GSO_TCPV6) | 737 | else if (sinfo->gso_type & SKB_GSO_TCPV6) |
736 | gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; | 738 | gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; |
737 | else if (sinfo->gso_type & SKB_GSO_UDP) | 739 | else if (sinfo->gso_type & SKB_GSO_UDP) |
738 | gso.gso_type = VIRTIO_NET_HDR_GSO_UDP; | 740 | gso.gso_type = VIRTIO_NET_HDR_GSO_UDP; |
739 | else { | 741 | else { |
740 | printk(KERN_ERR "tun: unexpected GSO type: " | 742 | printk(KERN_ERR "tun: unexpected GSO type: " |
741 | "0x%x, gso_size %d, hdr_len %d\n", | 743 | "0x%x, gso_size %d, hdr_len %d\n", |
742 | sinfo->gso_type, gso.gso_size, | 744 | sinfo->gso_type, gso.gso_size, |
743 | gso.hdr_len); | 745 | gso.hdr_len); |
744 | print_hex_dump(KERN_ERR, "tun: ", | 746 | print_hex_dump(KERN_ERR, "tun: ", |
745 | DUMP_PREFIX_NONE, | 747 | DUMP_PREFIX_NONE, |
746 | 16, 1, skb->head, | 748 | 16, 1, skb->head, |
747 | min((int)gso.hdr_len, 64), true); | 749 | min((int)gso.hdr_len, 64), true); |
748 | WARN_ON_ONCE(1); | 750 | WARN_ON_ONCE(1); |
749 | return -EINVAL; | 751 | return -EINVAL; |
750 | } | 752 | } |
751 | if (sinfo->gso_type & SKB_GSO_TCP_ECN) | 753 | if (sinfo->gso_type & SKB_GSO_TCP_ECN) |
752 | gso.gso_type |= VIRTIO_NET_HDR_GSO_ECN; | 754 | gso.gso_type |= VIRTIO_NET_HDR_GSO_ECN; |
753 | } else | 755 | } else |
754 | gso.gso_type = VIRTIO_NET_HDR_GSO_NONE; | 756 | gso.gso_type = VIRTIO_NET_HDR_GSO_NONE; |
755 | 757 | ||
756 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 758 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
757 | gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; | 759 | gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; |
758 | gso.csum_start = skb->csum_start - skb_headroom(skb); | 760 | gso.csum_start = skb->csum_start - skb_headroom(skb); |
759 | gso.csum_offset = skb->csum_offset; | 761 | gso.csum_offset = skb->csum_offset; |
760 | } /* else everything is zero */ | 762 | } /* else everything is zero */ |
761 | 763 | ||
762 | if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total, | 764 | if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total, |
763 | sizeof(gso)))) | 765 | sizeof(gso)))) |
764 | return -EFAULT; | 766 | return -EFAULT; |
765 | total += tun->vnet_hdr_sz; | 767 | total += tun->vnet_hdr_sz; |
766 | } | 768 | } |
767 | 769 | ||
768 | len = min_t(int, skb->len, len); | 770 | len = min_t(int, skb->len, len); |
769 | 771 | ||
770 | skb_copy_datagram_const_iovec(skb, 0, iv, total, len); | 772 | skb_copy_datagram_const_iovec(skb, 0, iv, total, len); |
771 | total += skb->len; | 773 | total += skb->len; |
772 | 774 | ||
773 | tun->dev->stats.tx_packets++; | 775 | tun->dev->stats.tx_packets++; |
774 | tun->dev->stats.tx_bytes += len; | 776 | tun->dev->stats.tx_bytes += len; |
775 | 777 | ||
776 | return total; | 778 | return total; |
777 | } | 779 | } |
778 | 780 | ||
779 | static ssize_t tun_do_read(struct tun_struct *tun, | 781 | static ssize_t tun_do_read(struct tun_struct *tun, |
780 | struct kiocb *iocb, const struct iovec *iv, | 782 | struct kiocb *iocb, const struct iovec *iv, |
781 | ssize_t len, int noblock) | 783 | ssize_t len, int noblock) |
782 | { | 784 | { |
783 | DECLARE_WAITQUEUE(wait, current); | 785 | DECLARE_WAITQUEUE(wait, current); |
784 | struct sk_buff *skb; | 786 | struct sk_buff *skb; |
785 | ssize_t ret = 0; | 787 | ssize_t ret = 0; |
786 | 788 | ||
787 | DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name); | 789 | DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name); |
788 | 790 | ||
789 | add_wait_queue(&tun->wq.wait, &wait); | 791 | add_wait_queue(&tun->wq.wait, &wait); |
790 | while (len) { | 792 | while (len) { |
791 | current->state = TASK_INTERRUPTIBLE; | 793 | current->state = TASK_INTERRUPTIBLE; |
792 | 794 | ||
793 | /* Read frames from the queue */ | 795 | /* Read frames from the queue */ |
794 | if (!(skb=skb_dequeue(&tun->socket.sk->sk_receive_queue))) { | 796 | if (!(skb=skb_dequeue(&tun->socket.sk->sk_receive_queue))) { |
795 | if (noblock) { | 797 | if (noblock) { |
796 | ret = -EAGAIN; | 798 | ret = -EAGAIN; |
797 | break; | 799 | break; |
798 | } | 800 | } |
799 | if (signal_pending(current)) { | 801 | if (signal_pending(current)) { |
800 | ret = -ERESTARTSYS; | 802 | ret = -ERESTARTSYS; |
801 | break; | 803 | break; |
802 | } | 804 | } |
803 | if (tun->dev->reg_state != NETREG_REGISTERED) { | 805 | if (tun->dev->reg_state != NETREG_REGISTERED) { |
804 | ret = -EIO; | 806 | ret = -EIO; |
805 | break; | 807 | break; |
806 | } | 808 | } |
807 | 809 | ||
808 | /* Nothing to read, let's sleep */ | 810 | /* Nothing to read, let's sleep */ |
809 | schedule(); | 811 | schedule(); |
810 | continue; | 812 | continue; |
811 | } | 813 | } |
812 | netif_wake_queue(tun->dev); | 814 | netif_wake_queue(tun->dev); |
813 | 815 | ||
814 | ret = tun_put_user(tun, skb, iv, len); | 816 | ret = tun_put_user(tun, skb, iv, len); |
815 | kfree_skb(skb); | 817 | kfree_skb(skb); |
816 | break; | 818 | break; |
817 | } | 819 | } |
818 | 820 | ||
819 | current->state = TASK_RUNNING; | 821 | current->state = TASK_RUNNING; |
820 | remove_wait_queue(&tun->wq.wait, &wait); | 822 | remove_wait_queue(&tun->wq.wait, &wait); |
821 | 823 | ||
822 | return ret; | 824 | return ret; |
823 | } | 825 | } |
824 | 826 | ||
825 | static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv, | 827 | static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv, |
826 | unsigned long count, loff_t pos) | 828 | unsigned long count, loff_t pos) |
827 | { | 829 | { |
828 | struct file *file = iocb->ki_filp; | 830 | struct file *file = iocb->ki_filp; |
829 | struct tun_file *tfile = file->private_data; | 831 | struct tun_file *tfile = file->private_data; |
830 | struct tun_struct *tun = __tun_get(tfile); | 832 | struct tun_struct *tun = __tun_get(tfile); |
831 | ssize_t len, ret; | 833 | ssize_t len, ret; |
832 | 834 | ||
833 | if (!tun) | 835 | if (!tun) |
834 | return -EBADFD; | 836 | return -EBADFD; |
835 | len = iov_length(iv, count); | 837 | len = iov_length(iv, count); |
836 | if (len < 0) { | 838 | if (len < 0) { |
837 | ret = -EINVAL; | 839 | ret = -EINVAL; |
838 | goto out; | 840 | goto out; |
839 | } | 841 | } |
840 | 842 | ||
841 | ret = tun_do_read(tun, iocb, iv, len, file->f_flags & O_NONBLOCK); | 843 | ret = tun_do_read(tun, iocb, iv, len, file->f_flags & O_NONBLOCK); |
842 | ret = min_t(ssize_t, ret, len); | 844 | ret = min_t(ssize_t, ret, len); |
843 | out: | 845 | out: |
844 | tun_put(tun); | 846 | tun_put(tun); |
845 | return ret; | 847 | return ret; |
846 | } | 848 | } |
847 | 849 | ||
848 | static void tun_setup(struct net_device *dev) | 850 | static void tun_setup(struct net_device *dev) |
849 | { | 851 | { |
850 | struct tun_struct *tun = netdev_priv(dev); | 852 | struct tun_struct *tun = netdev_priv(dev); |
851 | 853 | ||
852 | tun->owner = -1; | 854 | tun->owner = -1; |
853 | tun->group = -1; | 855 | tun->group = -1; |
854 | 856 | ||
855 | dev->ethtool_ops = &tun_ethtool_ops; | 857 | dev->ethtool_ops = &tun_ethtool_ops; |
856 | dev->destructor = tun_free_netdev; | 858 | dev->destructor = tun_free_netdev; |
857 | } | 859 | } |
858 | 860 | ||
859 | /* Trivial set of netlink ops to allow deleting tun or tap | 861 | /* Trivial set of netlink ops to allow deleting tun or tap |
860 | * device with netlink. | 862 | * device with netlink. |
861 | */ | 863 | */ |
862 | static int tun_validate(struct nlattr *tb[], struct nlattr *data[]) | 864 | static int tun_validate(struct nlattr *tb[], struct nlattr *data[]) |
863 | { | 865 | { |
864 | return -EINVAL; | 866 | return -EINVAL; |
865 | } | 867 | } |
866 | 868 | ||
867 | static struct rtnl_link_ops tun_link_ops __read_mostly = { | 869 | static struct rtnl_link_ops tun_link_ops __read_mostly = { |
868 | .kind = DRV_NAME, | 870 | .kind = DRV_NAME, |
869 | .priv_size = sizeof(struct tun_struct), | 871 | .priv_size = sizeof(struct tun_struct), |
870 | .setup = tun_setup, | 872 | .setup = tun_setup, |
871 | .validate = tun_validate, | 873 | .validate = tun_validate, |
872 | }; | 874 | }; |
873 | 875 | ||
874 | static void tun_sock_write_space(struct sock *sk) | 876 | static void tun_sock_write_space(struct sock *sk) |
875 | { | 877 | { |
876 | struct tun_struct *tun; | 878 | struct tun_struct *tun; |
877 | wait_queue_head_t *wqueue; | 879 | wait_queue_head_t *wqueue; |
878 | 880 | ||
879 | if (!sock_writeable(sk)) | 881 | if (!sock_writeable(sk)) |
880 | return; | 882 | return; |
881 | 883 | ||
882 | if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags)) | 884 | if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags)) |
883 | return; | 885 | return; |
884 | 886 | ||
885 | wqueue = sk_sleep(sk); | 887 | wqueue = sk_sleep(sk); |
886 | if (wqueue && waitqueue_active(wqueue)) | 888 | if (wqueue && waitqueue_active(wqueue)) |
887 | wake_up_interruptible_sync_poll(wqueue, POLLOUT | | 889 | wake_up_interruptible_sync_poll(wqueue, POLLOUT | |
888 | POLLWRNORM | POLLWRBAND); | 890 | POLLWRNORM | POLLWRBAND); |
889 | 891 | ||
890 | tun = tun_sk(sk)->tun; | 892 | tun = tun_sk(sk)->tun; |
891 | kill_fasync(&tun->fasync, SIGIO, POLL_OUT); | 893 | kill_fasync(&tun->fasync, SIGIO, POLL_OUT); |
892 | } | 894 | } |
893 | 895 | ||
894 | static void tun_sock_destruct(struct sock *sk) | 896 | static void tun_sock_destruct(struct sock *sk) |
895 | { | 897 | { |
896 | free_netdev(tun_sk(sk)->tun->dev); | 898 | free_netdev(tun_sk(sk)->tun->dev); |
897 | } | 899 | } |
898 | 900 | ||
899 | static int tun_sendmsg(struct kiocb *iocb, struct socket *sock, | 901 | static int tun_sendmsg(struct kiocb *iocb, struct socket *sock, |
900 | struct msghdr *m, size_t total_len) | 902 | struct msghdr *m, size_t total_len) |
901 | { | 903 | { |
902 | struct tun_struct *tun = container_of(sock, struct tun_struct, socket); | 904 | struct tun_struct *tun = container_of(sock, struct tun_struct, socket); |
903 | return tun_get_user(tun, m->msg_iov, total_len, | 905 | return tun_get_user(tun, m->msg_iov, total_len, |
904 | m->msg_flags & MSG_DONTWAIT); | 906 | m->msg_flags & MSG_DONTWAIT); |
905 | } | 907 | } |
906 | 908 | ||
907 | static int tun_recvmsg(struct kiocb *iocb, struct socket *sock, | 909 | static int tun_recvmsg(struct kiocb *iocb, struct socket *sock, |
908 | struct msghdr *m, size_t total_len, | 910 | struct msghdr *m, size_t total_len, |
909 | int flags) | 911 | int flags) |
910 | { | 912 | { |
911 | struct tun_struct *tun = container_of(sock, struct tun_struct, socket); | 913 | struct tun_struct *tun = container_of(sock, struct tun_struct, socket); |
912 | int ret; | 914 | int ret; |
913 | if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) | 915 | if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) |
914 | return -EINVAL; | 916 | return -EINVAL; |
915 | ret = tun_do_read(tun, iocb, m->msg_iov, total_len, | 917 | ret = tun_do_read(tun, iocb, m->msg_iov, total_len, |
916 | flags & MSG_DONTWAIT); | 918 | flags & MSG_DONTWAIT); |
917 | if (ret > total_len) { | 919 | if (ret > total_len) { |
918 | m->msg_flags |= MSG_TRUNC; | 920 | m->msg_flags |= MSG_TRUNC; |
919 | ret = flags & MSG_TRUNC ? ret : total_len; | 921 | ret = flags & MSG_TRUNC ? ret : total_len; |
920 | } | 922 | } |
921 | return ret; | 923 | return ret; |
922 | } | 924 | } |
923 | 925 | ||
924 | /* Ops structure to mimic raw sockets with tun */ | 926 | /* Ops structure to mimic raw sockets with tun */ |
925 | static const struct proto_ops tun_socket_ops = { | 927 | static const struct proto_ops tun_socket_ops = { |
926 | .sendmsg = tun_sendmsg, | 928 | .sendmsg = tun_sendmsg, |
927 | .recvmsg = tun_recvmsg, | 929 | .recvmsg = tun_recvmsg, |
928 | }; | 930 | }; |
929 | 931 | ||
930 | static struct proto tun_proto = { | 932 | static struct proto tun_proto = { |
931 | .name = "tun", | 933 | .name = "tun", |
932 | .owner = THIS_MODULE, | 934 | .owner = THIS_MODULE, |
933 | .obj_size = sizeof(struct tun_sock), | 935 | .obj_size = sizeof(struct tun_sock), |
934 | }; | 936 | }; |
935 | 937 | ||
936 | static int tun_flags(struct tun_struct *tun) | 938 | static int tun_flags(struct tun_struct *tun) |
937 | { | 939 | { |
938 | int flags = 0; | 940 | int flags = 0; |
939 | 941 | ||
940 | if (tun->flags & TUN_TUN_DEV) | 942 | if (tun->flags & TUN_TUN_DEV) |
941 | flags |= IFF_TUN; | 943 | flags |= IFF_TUN; |
942 | else | 944 | else |
943 | flags |= IFF_TAP; | 945 | flags |= IFF_TAP; |
944 | 946 | ||
945 | if (tun->flags & TUN_NO_PI) | 947 | if (tun->flags & TUN_NO_PI) |
946 | flags |= IFF_NO_PI; | 948 | flags |= IFF_NO_PI; |
947 | 949 | ||
948 | if (tun->flags & TUN_ONE_QUEUE) | 950 | if (tun->flags & TUN_ONE_QUEUE) |
949 | flags |= IFF_ONE_QUEUE; | 951 | flags |= IFF_ONE_QUEUE; |
950 | 952 | ||
951 | if (tun->flags & TUN_VNET_HDR) | 953 | if (tun->flags & TUN_VNET_HDR) |
952 | flags |= IFF_VNET_HDR; | 954 | flags |= IFF_VNET_HDR; |
953 | 955 | ||
954 | return flags; | 956 | return flags; |
955 | } | 957 | } |
956 | 958 | ||
957 | static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr, | 959 | static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr, |
958 | char *buf) | 960 | char *buf) |
959 | { | 961 | { |
960 | struct tun_struct *tun = netdev_priv(to_net_dev(dev)); | 962 | struct tun_struct *tun = netdev_priv(to_net_dev(dev)); |
961 | return sprintf(buf, "0x%x\n", tun_flags(tun)); | 963 | return sprintf(buf, "0x%x\n", tun_flags(tun)); |
962 | } | 964 | } |
963 | 965 | ||
964 | static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr, | 966 | static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr, |
965 | char *buf) | 967 | char *buf) |
966 | { | 968 | { |
967 | struct tun_struct *tun = netdev_priv(to_net_dev(dev)); | 969 | struct tun_struct *tun = netdev_priv(to_net_dev(dev)); |
968 | return sprintf(buf, "%d\n", tun->owner); | 970 | return sprintf(buf, "%d\n", tun->owner); |
969 | } | 971 | } |
970 | 972 | ||
971 | static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr, | 973 | static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr, |
972 | char *buf) | 974 | char *buf) |
973 | { | 975 | { |
974 | struct tun_struct *tun = netdev_priv(to_net_dev(dev)); | 976 | struct tun_struct *tun = netdev_priv(to_net_dev(dev)); |
975 | return sprintf(buf, "%d\n", tun->group); | 977 | return sprintf(buf, "%d\n", tun->group); |
976 | } | 978 | } |
977 | 979 | ||
978 | static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL); | 980 | static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL); |
979 | static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL); | 981 | static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL); |
980 | static DEVICE_ATTR(group, 0444, tun_show_group, NULL); | 982 | static DEVICE_ATTR(group, 0444, tun_show_group, NULL); |
981 | 983 | ||
982 | static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) | 984 | static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) |
983 | { | 985 | { |
984 | struct sock *sk; | 986 | struct sock *sk; |
985 | struct tun_struct *tun; | 987 | struct tun_struct *tun; |
986 | struct net_device *dev; | 988 | struct net_device *dev; |
987 | int err; | 989 | int err; |
988 | 990 | ||
989 | dev = __dev_get_by_name(net, ifr->ifr_name); | 991 | dev = __dev_get_by_name(net, ifr->ifr_name); |
990 | if (dev) { | 992 | if (dev) { |
991 | const struct cred *cred = current_cred(); | 993 | const struct cred *cred = current_cred(); |
992 | 994 | ||
993 | if (ifr->ifr_flags & IFF_TUN_EXCL) | 995 | if (ifr->ifr_flags & IFF_TUN_EXCL) |
994 | return -EBUSY; | 996 | return -EBUSY; |
995 | if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops) | 997 | if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops) |
996 | tun = netdev_priv(dev); | 998 | tun = netdev_priv(dev); |
997 | else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops) | 999 | else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops) |
998 | tun = netdev_priv(dev); | 1000 | tun = netdev_priv(dev); |
999 | else | 1001 | else |
1000 | return -EINVAL; | 1002 | return -EINVAL; |
1001 | 1003 | ||
1002 | if (((tun->owner != -1 && cred->euid != tun->owner) || | 1004 | if (((tun->owner != -1 && cred->euid != tun->owner) || |
1003 | (tun->group != -1 && !in_egroup_p(tun->group))) && | 1005 | (tun->group != -1 && !in_egroup_p(tun->group))) && |
1004 | !capable(CAP_NET_ADMIN)) | 1006 | !capable(CAP_NET_ADMIN)) |
1005 | return -EPERM; | 1007 | return -EPERM; |
1006 | err = security_tun_dev_attach(tun->socket.sk); | 1008 | err = security_tun_dev_attach(tun->socket.sk); |
1007 | if (err < 0) | 1009 | if (err < 0) |
1008 | return err; | 1010 | return err; |
1009 | 1011 | ||
1010 | err = tun_attach(tun, file); | 1012 | err = tun_attach(tun, file); |
1011 | if (err < 0) | 1013 | if (err < 0) |
1012 | return err; | 1014 | return err; |
1013 | } | 1015 | } |
1014 | else { | 1016 | else { |
1015 | char *name; | 1017 | char *name; |
1016 | unsigned long flags = 0; | 1018 | unsigned long flags = 0; |
1017 | 1019 | ||
1018 | if (!capable(CAP_NET_ADMIN)) | 1020 | if (!capable(CAP_NET_ADMIN)) |
1019 | return -EPERM; | 1021 | return -EPERM; |
1020 | err = security_tun_dev_create(); | 1022 | err = security_tun_dev_create(); |
1021 | if (err < 0) | 1023 | if (err < 0) |
1022 | return err; | 1024 | return err; |
1023 | 1025 | ||
1024 | /* Set dev type */ | 1026 | /* Set dev type */ |
1025 | if (ifr->ifr_flags & IFF_TUN) { | 1027 | if (ifr->ifr_flags & IFF_TUN) { |
1026 | /* TUN device */ | 1028 | /* TUN device */ |
1027 | flags |= TUN_TUN_DEV; | 1029 | flags |= TUN_TUN_DEV; |
1028 | name = "tun%d"; | 1030 | name = "tun%d"; |
1029 | } else if (ifr->ifr_flags & IFF_TAP) { | 1031 | } else if (ifr->ifr_flags & IFF_TAP) { |
1030 | /* TAP device */ | 1032 | /* TAP device */ |
1031 | flags |= TUN_TAP_DEV; | 1033 | flags |= TUN_TAP_DEV; |
1032 | name = "tap%d"; | 1034 | name = "tap%d"; |
1033 | } else | 1035 | } else |
1034 | return -EINVAL; | 1036 | return -EINVAL; |
1035 | 1037 | ||
1036 | if (*ifr->ifr_name) | 1038 | if (*ifr->ifr_name) |
1037 | name = ifr->ifr_name; | 1039 | name = ifr->ifr_name; |
1038 | 1040 | ||
1039 | dev = alloc_netdev(sizeof(struct tun_struct), name, | 1041 | dev = alloc_netdev(sizeof(struct tun_struct), name, |
1040 | tun_setup); | 1042 | tun_setup); |
1041 | if (!dev) | 1043 | if (!dev) |
1042 | return -ENOMEM; | 1044 | return -ENOMEM; |
1043 | 1045 | ||
1044 | dev_net_set(dev, net); | 1046 | dev_net_set(dev, net); |
1045 | dev->rtnl_link_ops = &tun_link_ops; | 1047 | dev->rtnl_link_ops = &tun_link_ops; |
1046 | 1048 | ||
1047 | tun = netdev_priv(dev); | 1049 | tun = netdev_priv(dev); |
1048 | tun->dev = dev; | 1050 | tun->dev = dev; |
1049 | tun->flags = flags; | 1051 | tun->flags = flags; |
1050 | tun->txflt.count = 0; | 1052 | tun->txflt.count = 0; |
1051 | tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr); | 1053 | tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr); |
1052 | 1054 | ||
1053 | err = -ENOMEM; | 1055 | err = -ENOMEM; |
1054 | sk = sk_alloc(net, AF_UNSPEC, GFP_KERNEL, &tun_proto); | 1056 | sk = sk_alloc(net, AF_UNSPEC, GFP_KERNEL, &tun_proto); |
1055 | if (!sk) | 1057 | if (!sk) |
1056 | goto err_free_dev; | 1058 | goto err_free_dev; |
1057 | 1059 | ||
1058 | tun->socket.wq = &tun->wq; | 1060 | tun->socket.wq = &tun->wq; |
1059 | init_waitqueue_head(&tun->wq.wait); | 1061 | init_waitqueue_head(&tun->wq.wait); |
1060 | tun->socket.ops = &tun_socket_ops; | 1062 | tun->socket.ops = &tun_socket_ops; |
1061 | sock_init_data(&tun->socket, sk); | 1063 | sock_init_data(&tun->socket, sk); |
1062 | sk->sk_write_space = tun_sock_write_space; | 1064 | sk->sk_write_space = tun_sock_write_space; |
1063 | sk->sk_sndbuf = INT_MAX; | 1065 | sk->sk_sndbuf = INT_MAX; |
1064 | 1066 | ||
1065 | tun_sk(sk)->tun = tun; | 1067 | tun_sk(sk)->tun = tun; |
1066 | 1068 | ||
1067 | security_tun_dev_post_create(sk); | 1069 | security_tun_dev_post_create(sk); |
1068 | 1070 | ||
1069 | tun_net_init(dev); | 1071 | tun_net_init(dev); |
1070 | 1072 | ||
1071 | if (strchr(dev->name, '%')) { | 1073 | if (strchr(dev->name, '%')) { |
1072 | err = dev_alloc_name(dev, dev->name); | 1074 | err = dev_alloc_name(dev, dev->name); |
1073 | if (err < 0) | 1075 | if (err < 0) |
1074 | goto err_free_sk; | 1076 | goto err_free_sk; |
1075 | } | 1077 | } |
1076 | 1078 | ||
1077 | err = register_netdevice(tun->dev); | 1079 | err = register_netdevice(tun->dev); |
1078 | if (err < 0) | 1080 | if (err < 0) |
1079 | goto err_free_sk; | 1081 | goto err_free_sk; |
1080 | 1082 | ||
1081 | if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) || | 1083 | if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) || |
1082 | device_create_file(&tun->dev->dev, &dev_attr_owner) || | 1084 | device_create_file(&tun->dev->dev, &dev_attr_owner) || |
1083 | device_create_file(&tun->dev->dev, &dev_attr_group)) | 1085 | device_create_file(&tun->dev->dev, &dev_attr_group)) |
1084 | printk(KERN_ERR "Failed to create tun sysfs files\n"); | 1086 | printk(KERN_ERR "Failed to create tun sysfs files\n"); |
1085 | 1087 | ||
1086 | sk->sk_destruct = tun_sock_destruct; | 1088 | sk->sk_destruct = tun_sock_destruct; |
1087 | 1089 | ||
1088 | err = tun_attach(tun, file); | 1090 | err = tun_attach(tun, file); |
1089 | if (err < 0) | 1091 | if (err < 0) |
1090 | goto failed; | 1092 | goto failed; |
1091 | } | 1093 | } |
1092 | 1094 | ||
1093 | DBG(KERN_INFO "%s: tun_set_iff\n", tun->dev->name); | 1095 | DBG(KERN_INFO "%s: tun_set_iff\n", tun->dev->name); |
1094 | 1096 | ||
1095 | if (ifr->ifr_flags & IFF_NO_PI) | 1097 | if (ifr->ifr_flags & IFF_NO_PI) |
1096 | tun->flags |= TUN_NO_PI; | 1098 | tun->flags |= TUN_NO_PI; |
1097 | else | 1099 | else |
1098 | tun->flags &= ~TUN_NO_PI; | 1100 | tun->flags &= ~TUN_NO_PI; |
1099 | 1101 | ||
1100 | if (ifr->ifr_flags & IFF_ONE_QUEUE) | 1102 | if (ifr->ifr_flags & IFF_ONE_QUEUE) |
1101 | tun->flags |= TUN_ONE_QUEUE; | 1103 | tun->flags |= TUN_ONE_QUEUE; |
1102 | else | 1104 | else |
1103 | tun->flags &= ~TUN_ONE_QUEUE; | 1105 | tun->flags &= ~TUN_ONE_QUEUE; |
1104 | 1106 | ||
1105 | if (ifr->ifr_flags & IFF_VNET_HDR) | 1107 | if (ifr->ifr_flags & IFF_VNET_HDR) |
1106 | tun->flags |= TUN_VNET_HDR; | 1108 | tun->flags |= TUN_VNET_HDR; |
1107 | else | 1109 | else |
1108 | tun->flags &= ~TUN_VNET_HDR; | 1110 | tun->flags &= ~TUN_VNET_HDR; |
1109 | 1111 | ||
1110 | /* Make sure persistent devices do not get stuck in | 1112 | /* Make sure persistent devices do not get stuck in |
1111 | * xoff state. | 1113 | * xoff state. |
1112 | */ | 1114 | */ |
1113 | if (netif_running(tun->dev)) | 1115 | if (netif_running(tun->dev)) |
1114 | netif_wake_queue(tun->dev); | 1116 | netif_wake_queue(tun->dev); |
1115 | 1117 | ||
1116 | strcpy(ifr->ifr_name, tun->dev->name); | 1118 | strcpy(ifr->ifr_name, tun->dev->name); |
1117 | return 0; | 1119 | return 0; |
1118 | 1120 | ||
1119 | err_free_sk: | 1121 | err_free_sk: |
1120 | sock_put(sk); | 1122 | sock_put(sk); |
1121 | err_free_dev: | 1123 | err_free_dev: |
1122 | free_netdev(dev); | 1124 | free_netdev(dev); |
1123 | failed: | 1125 | failed: |
1124 | return err; | 1126 | return err; |
1125 | } | 1127 | } |
1126 | 1128 | ||
1127 | static int tun_get_iff(struct net *net, struct tun_struct *tun, | 1129 | static int tun_get_iff(struct net *net, struct tun_struct *tun, |
1128 | struct ifreq *ifr) | 1130 | struct ifreq *ifr) |
1129 | { | 1131 | { |
1130 | DBG(KERN_INFO "%s: tun_get_iff\n", tun->dev->name); | 1132 | DBG(KERN_INFO "%s: tun_get_iff\n", tun->dev->name); |
1131 | 1133 | ||
1132 | strcpy(ifr->ifr_name, tun->dev->name); | 1134 | strcpy(ifr->ifr_name, tun->dev->name); |
1133 | 1135 | ||
1134 | ifr->ifr_flags = tun_flags(tun); | 1136 | ifr->ifr_flags = tun_flags(tun); |
1135 | 1137 | ||
1136 | return 0; | 1138 | return 0; |
1137 | } | 1139 | } |
1138 | 1140 | ||
1139 | /* This is like a cut-down ethtool ops, except done via tun fd so no | 1141 | /* This is like a cut-down ethtool ops, except done via tun fd so no |
1140 | * privs required. */ | 1142 | * privs required. */ |
1141 | static int set_offload(struct net_device *dev, unsigned long arg) | 1143 | static int set_offload(struct net_device *dev, unsigned long arg) |
1142 | { | 1144 | { |
1143 | unsigned int old_features, features; | 1145 | unsigned int old_features, features; |
1144 | 1146 | ||
1145 | old_features = dev->features; | 1147 | old_features = dev->features; |
1146 | /* Unset features, set them as we chew on the arg. */ | 1148 | /* Unset features, set them as we chew on the arg. */ |
1147 | features = (old_features & ~(NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST | 1149 | features = (old_features & ~(NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST |
1148 | |NETIF_F_TSO_ECN|NETIF_F_TSO|NETIF_F_TSO6 | 1150 | |NETIF_F_TSO_ECN|NETIF_F_TSO|NETIF_F_TSO6 |
1149 | |NETIF_F_UFO)); | 1151 | |NETIF_F_UFO)); |
1150 | 1152 | ||
1151 | if (arg & TUN_F_CSUM) { | 1153 | if (arg & TUN_F_CSUM) { |
1152 | features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; | 1154 | features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; |
1153 | arg &= ~TUN_F_CSUM; | 1155 | arg &= ~TUN_F_CSUM; |
1154 | 1156 | ||
1155 | if (arg & (TUN_F_TSO4|TUN_F_TSO6)) { | 1157 | if (arg & (TUN_F_TSO4|TUN_F_TSO6)) { |
1156 | if (arg & TUN_F_TSO_ECN) { | 1158 | if (arg & TUN_F_TSO_ECN) { |
1157 | features |= NETIF_F_TSO_ECN; | 1159 | features |= NETIF_F_TSO_ECN; |
1158 | arg &= ~TUN_F_TSO_ECN; | 1160 | arg &= ~TUN_F_TSO_ECN; |
1159 | } | 1161 | } |
1160 | if (arg & TUN_F_TSO4) | 1162 | if (arg & TUN_F_TSO4) |
1161 | features |= NETIF_F_TSO; | 1163 | features |= NETIF_F_TSO; |
1162 | if (arg & TUN_F_TSO6) | 1164 | if (arg & TUN_F_TSO6) |
1163 | features |= NETIF_F_TSO6; | 1165 | features |= NETIF_F_TSO6; |
1164 | arg &= ~(TUN_F_TSO4|TUN_F_TSO6); | 1166 | arg &= ~(TUN_F_TSO4|TUN_F_TSO6); |
1165 | } | 1167 | } |
1166 | 1168 | ||
1167 | if (arg & TUN_F_UFO) { | 1169 | if (arg & TUN_F_UFO) { |
1168 | features |= NETIF_F_UFO; | 1170 | features |= NETIF_F_UFO; |
1169 | arg &= ~TUN_F_UFO; | 1171 | arg &= ~TUN_F_UFO; |
1170 | } | 1172 | } |
1171 | } | 1173 | } |
1172 | 1174 | ||
1173 | /* This gives the user a way to test for new features in future by | 1175 | /* This gives the user a way to test for new features in future by |
1174 | * trying to set them. */ | 1176 | * trying to set them. */ |
1175 | if (arg) | 1177 | if (arg) |
1176 | return -EINVAL; | 1178 | return -EINVAL; |
1177 | 1179 | ||
1178 | dev->features = features; | 1180 | dev->features = features; |
1179 | if (old_features != dev->features) | 1181 | if (old_features != dev->features) |
1180 | netdev_features_change(dev); | 1182 | netdev_features_change(dev); |
1181 | 1183 | ||
1182 | return 0; | 1184 | return 0; |
1183 | } | 1185 | } |
1184 | 1186 | ||
1185 | static long __tun_chr_ioctl(struct file *file, unsigned int cmd, | 1187 | static long __tun_chr_ioctl(struct file *file, unsigned int cmd, |
1186 | unsigned long arg, int ifreq_len) | 1188 | unsigned long arg, int ifreq_len) |
1187 | { | 1189 | { |
1188 | struct tun_file *tfile = file->private_data; | 1190 | struct tun_file *tfile = file->private_data; |
1189 | struct tun_struct *tun; | 1191 | struct tun_struct *tun; |
1190 | void __user* argp = (void __user*)arg; | 1192 | void __user* argp = (void __user*)arg; |
1191 | struct sock_fprog fprog; | 1193 | struct sock_fprog fprog; |
1192 | struct ifreq ifr; | 1194 | struct ifreq ifr; |
1193 | int sndbuf; | 1195 | int sndbuf; |
1194 | int vnet_hdr_sz; | 1196 | int vnet_hdr_sz; |
1195 | int ret; | 1197 | int ret; |
1196 | 1198 | ||
1197 | if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89) | 1199 | if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89) |
1198 | if (copy_from_user(&ifr, argp, ifreq_len)) | 1200 | if (copy_from_user(&ifr, argp, ifreq_len)) |
1199 | return -EFAULT; | 1201 | return -EFAULT; |
1200 | 1202 | ||
1201 | if (cmd == TUNGETFEATURES) { | 1203 | if (cmd == TUNGETFEATURES) { |
1202 | /* Currently this just means: "what IFF flags are valid?". | 1204 | /* Currently this just means: "what IFF flags are valid?". |
1203 | * This is needed because we never checked for invalid flags on | 1205 | * This is needed because we never checked for invalid flags on |
1204 | * TUNSETIFF. */ | 1206 | * TUNSETIFF. */ |
1205 | return put_user(IFF_TUN | IFF_TAP | IFF_NO_PI | IFF_ONE_QUEUE | | 1207 | return put_user(IFF_TUN | IFF_TAP | IFF_NO_PI | IFF_ONE_QUEUE | |
1206 | IFF_VNET_HDR, | 1208 | IFF_VNET_HDR, |
1207 | (unsigned int __user*)argp); | 1209 | (unsigned int __user*)argp); |
1208 | } | 1210 | } |
1209 | 1211 | ||
1210 | rtnl_lock(); | 1212 | rtnl_lock(); |
1211 | 1213 | ||
1212 | tun = __tun_get(tfile); | 1214 | tun = __tun_get(tfile); |
1213 | if (cmd == TUNSETIFF && !tun) { | 1215 | if (cmd == TUNSETIFF && !tun) { |
1214 | ifr.ifr_name[IFNAMSIZ-1] = '\0'; | 1216 | ifr.ifr_name[IFNAMSIZ-1] = '\0'; |
1215 | 1217 | ||
1216 | ret = tun_set_iff(tfile->net, file, &ifr); | 1218 | ret = tun_set_iff(tfile->net, file, &ifr); |
1217 | 1219 | ||
1218 | if (ret) | 1220 | if (ret) |
1219 | goto unlock; | 1221 | goto unlock; |
1220 | 1222 | ||
1221 | if (copy_to_user(argp, &ifr, ifreq_len)) | 1223 | if (copy_to_user(argp, &ifr, ifreq_len)) |
1222 | ret = -EFAULT; | 1224 | ret = -EFAULT; |
1223 | goto unlock; | 1225 | goto unlock; |
1224 | } | 1226 | } |
1225 | 1227 | ||
1226 | ret = -EBADFD; | 1228 | ret = -EBADFD; |
1227 | if (!tun) | 1229 | if (!tun) |
1228 | goto unlock; | 1230 | goto unlock; |
1229 | 1231 | ||
1230 | DBG(KERN_INFO "%s: tun_chr_ioctl cmd %d\n", tun->dev->name, cmd); | 1232 | DBG(KERN_INFO "%s: tun_chr_ioctl cmd %d\n", tun->dev->name, cmd); |
1231 | 1233 | ||
1232 | ret = 0; | 1234 | ret = 0; |
1233 | switch (cmd) { | 1235 | switch (cmd) { |
1234 | case TUNGETIFF: | 1236 | case TUNGETIFF: |
1235 | ret = tun_get_iff(current->nsproxy->net_ns, tun, &ifr); | 1237 | ret = tun_get_iff(current->nsproxy->net_ns, tun, &ifr); |
1236 | if (ret) | 1238 | if (ret) |
1237 | break; | 1239 | break; |
1238 | 1240 | ||
1239 | if (copy_to_user(argp, &ifr, ifreq_len)) | 1241 | if (copy_to_user(argp, &ifr, ifreq_len)) |
1240 | ret = -EFAULT; | 1242 | ret = -EFAULT; |
1241 | break; | 1243 | break; |
1242 | 1244 | ||
1243 | case TUNSETNOCSUM: | 1245 | case TUNSETNOCSUM: |
1244 | /* Disable/Enable checksum */ | 1246 | /* Disable/Enable checksum */ |
1245 | if (arg) | 1247 | if (arg) |
1246 | tun->flags |= TUN_NOCHECKSUM; | 1248 | tun->flags |= TUN_NOCHECKSUM; |
1247 | else | 1249 | else |
1248 | tun->flags &= ~TUN_NOCHECKSUM; | 1250 | tun->flags &= ~TUN_NOCHECKSUM; |
1249 | 1251 | ||
1250 | DBG(KERN_INFO "%s: checksum %s\n", | 1252 | DBG(KERN_INFO "%s: checksum %s\n", |
1251 | tun->dev->name, arg ? "disabled" : "enabled"); | 1253 | tun->dev->name, arg ? "disabled" : "enabled"); |
1252 | break; | 1254 | break; |
1253 | 1255 | ||
1254 | case TUNSETPERSIST: | 1256 | case TUNSETPERSIST: |
1255 | /* Disable/Enable persist mode */ | 1257 | /* Disable/Enable persist mode */ |
1256 | if (arg) | 1258 | if (arg) |
1257 | tun->flags |= TUN_PERSIST; | 1259 | tun->flags |= TUN_PERSIST; |
1258 | else | 1260 | else |
1259 | tun->flags &= ~TUN_PERSIST; | 1261 | tun->flags &= ~TUN_PERSIST; |
1260 | 1262 | ||
1261 | DBG(KERN_INFO "%s: persist %s\n", | 1263 | DBG(KERN_INFO "%s: persist %s\n", |
1262 | tun->dev->name, arg ? "enabled" : "disabled"); | 1264 | tun->dev->name, arg ? "enabled" : "disabled"); |
1263 | break; | 1265 | break; |
1264 | 1266 | ||
1265 | case TUNSETOWNER: | 1267 | case TUNSETOWNER: |
1266 | /* Set owner of the device */ | 1268 | /* Set owner of the device */ |
1267 | tun->owner = (uid_t) arg; | 1269 | tun->owner = (uid_t) arg; |
1268 | 1270 | ||
1269 | DBG(KERN_INFO "%s: owner set to %d\n", tun->dev->name, tun->owner); | 1271 | DBG(KERN_INFO "%s: owner set to %d\n", tun->dev->name, tun->owner); |
1270 | break; | 1272 | break; |
1271 | 1273 | ||
1272 | case TUNSETGROUP: | 1274 | case TUNSETGROUP: |
1273 | /* Set group of the device */ | 1275 | /* Set group of the device */ |
1274 | tun->group= (gid_t) arg; | 1276 | tun->group= (gid_t) arg; |
1275 | 1277 | ||
1276 | DBG(KERN_INFO "%s: group set to %d\n", tun->dev->name, tun->group); | 1278 | DBG(KERN_INFO "%s: group set to %d\n", tun->dev->name, tun->group); |
1277 | break; | 1279 | break; |
1278 | 1280 | ||
1279 | case TUNSETLINK: | 1281 | case TUNSETLINK: |
1280 | /* Only allow setting the type when the interface is down */ | 1282 | /* Only allow setting the type when the interface is down */ |
1281 | if (tun->dev->flags & IFF_UP) { | 1283 | if (tun->dev->flags & IFF_UP) { |
1282 | DBG(KERN_INFO "%s: Linktype set failed because interface is up\n", | 1284 | DBG(KERN_INFO "%s: Linktype set failed because interface is up\n", |
1283 | tun->dev->name); | 1285 | tun->dev->name); |
1284 | ret = -EBUSY; | 1286 | ret = -EBUSY; |
1285 | } else { | 1287 | } else { |
1286 | tun->dev->type = (int) arg; | 1288 | tun->dev->type = (int) arg; |
1287 | DBG(KERN_INFO "%s: linktype set to %d\n", tun->dev->name, tun->dev->type); | 1289 | DBG(KERN_INFO "%s: linktype set to %d\n", tun->dev->name, tun->dev->type); |
1288 | ret = 0; | 1290 | ret = 0; |
1289 | } | 1291 | } |
1290 | break; | 1292 | break; |
1291 | 1293 | ||
1292 | #ifdef TUN_DEBUG | 1294 | #ifdef TUN_DEBUG |
1293 | case TUNSETDEBUG: | 1295 | case TUNSETDEBUG: |
1294 | tun->debug = arg; | 1296 | tun->debug = arg; |
1295 | break; | 1297 | break; |
1296 | #endif | 1298 | #endif |
1297 | case TUNSETOFFLOAD: | 1299 | case TUNSETOFFLOAD: |
1298 | ret = set_offload(tun->dev, arg); | 1300 | ret = set_offload(tun->dev, arg); |
1299 | break; | 1301 | break; |
1300 | 1302 | ||
1301 | case TUNSETTXFILTER: | 1303 | case TUNSETTXFILTER: |
1302 | /* Can be set only for TAPs */ | 1304 | /* Can be set only for TAPs */ |
1303 | ret = -EINVAL; | 1305 | ret = -EINVAL; |
1304 | if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV) | 1306 | if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV) |
1305 | break; | 1307 | break; |
1306 | ret = update_filter(&tun->txflt, (void __user *)arg); | 1308 | ret = update_filter(&tun->txflt, (void __user *)arg); |
1307 | break; | 1309 | break; |
1308 | 1310 | ||
1309 | case SIOCGIFHWADDR: | 1311 | case SIOCGIFHWADDR: |
1310 | /* Get hw addres */ | 1312 | /* Get hw addres */ |
1311 | memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN); | 1313 | memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN); |
1312 | ifr.ifr_hwaddr.sa_family = tun->dev->type; | 1314 | ifr.ifr_hwaddr.sa_family = tun->dev->type; |
1313 | if (copy_to_user(argp, &ifr, ifreq_len)) | 1315 | if (copy_to_user(argp, &ifr, ifreq_len)) |
1314 | ret = -EFAULT; | 1316 | ret = -EFAULT; |
1315 | break; | 1317 | break; |
1316 | 1318 | ||
1317 | case SIOCSIFHWADDR: | 1319 | case SIOCSIFHWADDR: |
1318 | /* Set hw address */ | 1320 | /* Set hw address */ |
1319 | DBG(KERN_DEBUG "%s: set hw address: %pM\n", | 1321 | DBG(KERN_DEBUG "%s: set hw address: %pM\n", |
1320 | tun->dev->name, ifr.ifr_hwaddr.sa_data); | 1322 | tun->dev->name, ifr.ifr_hwaddr.sa_data); |
1321 | 1323 | ||
1322 | ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr); | 1324 | ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr); |
1323 | break; | 1325 | break; |
1324 | 1326 | ||
1325 | case TUNGETSNDBUF: | 1327 | case TUNGETSNDBUF: |
1326 | sndbuf = tun->socket.sk->sk_sndbuf; | 1328 | sndbuf = tun->socket.sk->sk_sndbuf; |
1327 | if (copy_to_user(argp, &sndbuf, sizeof(sndbuf))) | 1329 | if (copy_to_user(argp, &sndbuf, sizeof(sndbuf))) |
1328 | ret = -EFAULT; | 1330 | ret = -EFAULT; |
1329 | break; | 1331 | break; |
1330 | 1332 | ||
1331 | case TUNSETSNDBUF: | 1333 | case TUNSETSNDBUF: |
1332 | if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) { | 1334 | if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) { |
1333 | ret = -EFAULT; | 1335 | ret = -EFAULT; |
1334 | break; | 1336 | break; |
1335 | } | 1337 | } |
1336 | 1338 | ||
1337 | tun->socket.sk->sk_sndbuf = sndbuf; | 1339 | tun->socket.sk->sk_sndbuf = sndbuf; |
1338 | break; | 1340 | break; |
1339 | 1341 | ||
1340 | case TUNGETVNETHDRSZ: | 1342 | case TUNGETVNETHDRSZ: |
1341 | vnet_hdr_sz = tun->vnet_hdr_sz; | 1343 | vnet_hdr_sz = tun->vnet_hdr_sz; |
1342 | if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz))) | 1344 | if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz))) |
1343 | ret = -EFAULT; | 1345 | ret = -EFAULT; |
1344 | break; | 1346 | break; |
1345 | 1347 | ||
1346 | case TUNSETVNETHDRSZ: | 1348 | case TUNSETVNETHDRSZ: |
1347 | if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) { | 1349 | if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) { |
1348 | ret = -EFAULT; | 1350 | ret = -EFAULT; |
1349 | break; | 1351 | break; |
1350 | } | 1352 | } |
1351 | if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) { | 1353 | if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) { |
1352 | ret = -EINVAL; | 1354 | ret = -EINVAL; |
1353 | break; | 1355 | break; |
1354 | } | 1356 | } |
1355 | 1357 | ||
1356 | tun->vnet_hdr_sz = vnet_hdr_sz; | 1358 | tun->vnet_hdr_sz = vnet_hdr_sz; |
1357 | break; | 1359 | break; |
1358 | 1360 | ||
1359 | case TUNATTACHFILTER: | 1361 | case TUNATTACHFILTER: |
1360 | /* Can be set only for TAPs */ | 1362 | /* Can be set only for TAPs */ |
1361 | ret = -EINVAL; | 1363 | ret = -EINVAL; |
1362 | if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV) | 1364 | if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV) |
1363 | break; | 1365 | break; |
1364 | ret = -EFAULT; | 1366 | ret = -EFAULT; |
1365 | if (copy_from_user(&fprog, argp, sizeof(fprog))) | 1367 | if (copy_from_user(&fprog, argp, sizeof(fprog))) |
1366 | break; | 1368 | break; |
1367 | 1369 | ||
1368 | ret = sk_attach_filter(&fprog, tun->socket.sk); | 1370 | ret = sk_attach_filter(&fprog, tun->socket.sk); |
1369 | break; | 1371 | break; |
1370 | 1372 | ||
1371 | case TUNDETACHFILTER: | 1373 | case TUNDETACHFILTER: |
1372 | /* Can be set only for TAPs */ | 1374 | /* Can be set only for TAPs */ |
1373 | ret = -EINVAL; | 1375 | ret = -EINVAL; |
1374 | if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV) | 1376 | if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV) |
1375 | break; | 1377 | break; |
1376 | ret = sk_detach_filter(tun->socket.sk); | 1378 | ret = sk_detach_filter(tun->socket.sk); |
1377 | break; | 1379 | break; |
1378 | 1380 | ||
1379 | default: | 1381 | default: |
1380 | ret = -EINVAL; | 1382 | ret = -EINVAL; |
1381 | break; | 1383 | break; |
1382 | } | 1384 | } |
1383 | 1385 | ||
1384 | unlock: | 1386 | unlock: |
1385 | rtnl_unlock(); | 1387 | rtnl_unlock(); |
1386 | if (tun) | 1388 | if (tun) |
1387 | tun_put(tun); | 1389 | tun_put(tun); |
1388 | return ret; | 1390 | return ret; |
1389 | } | 1391 | } |
1390 | 1392 | ||
1391 | static long tun_chr_ioctl(struct file *file, | 1393 | static long tun_chr_ioctl(struct file *file, |
1392 | unsigned int cmd, unsigned long arg) | 1394 | unsigned int cmd, unsigned long arg) |
1393 | { | 1395 | { |
1394 | return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq)); | 1396 | return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq)); |
1395 | } | 1397 | } |
1396 | 1398 | ||
1397 | #ifdef CONFIG_COMPAT | 1399 | #ifdef CONFIG_COMPAT |
1398 | static long tun_chr_compat_ioctl(struct file *file, | 1400 | static long tun_chr_compat_ioctl(struct file *file, |
1399 | unsigned int cmd, unsigned long arg) | 1401 | unsigned int cmd, unsigned long arg) |
1400 | { | 1402 | { |
1401 | switch (cmd) { | 1403 | switch (cmd) { |
1402 | case TUNSETIFF: | 1404 | case TUNSETIFF: |
1403 | case TUNGETIFF: | 1405 | case TUNGETIFF: |
1404 | case TUNSETTXFILTER: | 1406 | case TUNSETTXFILTER: |
1405 | case TUNGETSNDBUF: | 1407 | case TUNGETSNDBUF: |
1406 | case TUNSETSNDBUF: | 1408 | case TUNSETSNDBUF: |
1407 | case SIOCGIFHWADDR: | 1409 | case SIOCGIFHWADDR: |
1408 | case SIOCSIFHWADDR: | 1410 | case SIOCSIFHWADDR: |
1409 | arg = (unsigned long)compat_ptr(arg); | 1411 | arg = (unsigned long)compat_ptr(arg); |
1410 | break; | 1412 | break; |
1411 | default: | 1413 | default: |
1412 | arg = (compat_ulong_t)arg; | 1414 | arg = (compat_ulong_t)arg; |
1413 | break; | 1415 | break; |
1414 | } | 1416 | } |
1415 | 1417 | ||
1416 | /* | 1418 | /* |
1417 | * compat_ifreq is shorter than ifreq, so we must not access beyond | 1419 | * compat_ifreq is shorter than ifreq, so we must not access beyond |
1418 | * the end of that structure. All fields that are used in this | 1420 | * the end of that structure. All fields that are used in this |
1419 | * driver are compatible though, we don't need to convert the | 1421 | * driver are compatible though, we don't need to convert the |
1420 | * contents. | 1422 | * contents. |
1421 | */ | 1423 | */ |
1422 | return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq)); | 1424 | return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq)); |
1423 | } | 1425 | } |
1424 | #endif /* CONFIG_COMPAT */ | 1426 | #endif /* CONFIG_COMPAT */ |
1425 | 1427 | ||
1426 | static int tun_chr_fasync(int fd, struct file *file, int on) | 1428 | static int tun_chr_fasync(int fd, struct file *file, int on) |
1427 | { | 1429 | { |
1428 | struct tun_struct *tun = tun_get(file); | 1430 | struct tun_struct *tun = tun_get(file); |
1429 | int ret; | 1431 | int ret; |
1430 | 1432 | ||
1431 | if (!tun) | 1433 | if (!tun) |
1432 | return -EBADFD; | 1434 | return -EBADFD; |
1433 | 1435 | ||
1434 | DBG(KERN_INFO "%s: tun_chr_fasync %d\n", tun->dev->name, on); | 1436 | DBG(KERN_INFO "%s: tun_chr_fasync %d\n", tun->dev->name, on); |
1435 | 1437 | ||
1436 | if ((ret = fasync_helper(fd, file, on, &tun->fasync)) < 0) | 1438 | if ((ret = fasync_helper(fd, file, on, &tun->fasync)) < 0) |
1437 | goto out; | 1439 | goto out; |
1438 | 1440 | ||
1439 | if (on) { | 1441 | if (on) { |
1440 | ret = __f_setown(file, task_pid(current), PIDTYPE_PID, 0); | 1442 | ret = __f_setown(file, task_pid(current), PIDTYPE_PID, 0); |
1441 | if (ret) | 1443 | if (ret) |
1442 | goto out; | 1444 | goto out; |
1443 | tun->flags |= TUN_FASYNC; | 1445 | tun->flags |= TUN_FASYNC; |
1444 | } else | 1446 | } else |
1445 | tun->flags &= ~TUN_FASYNC; | 1447 | tun->flags &= ~TUN_FASYNC; |
1446 | ret = 0; | 1448 | ret = 0; |
1447 | out: | 1449 | out: |
1448 | tun_put(tun); | 1450 | tun_put(tun); |
1449 | return ret; | 1451 | return ret; |
1450 | } | 1452 | } |
1451 | 1453 | ||
1452 | static int tun_chr_open(struct inode *inode, struct file * file) | 1454 | static int tun_chr_open(struct inode *inode, struct file * file) |
1453 | { | 1455 | { |
1454 | struct tun_file *tfile; | 1456 | struct tun_file *tfile; |
1455 | 1457 | ||
1456 | DBG1(KERN_INFO "tunX: tun_chr_open\n"); | 1458 | DBG1(KERN_INFO "tunX: tun_chr_open\n"); |
1457 | 1459 | ||
1458 | tfile = kmalloc(sizeof(*tfile), GFP_KERNEL); | 1460 | tfile = kmalloc(sizeof(*tfile), GFP_KERNEL); |
1459 | if (!tfile) | 1461 | if (!tfile) |
1460 | return -ENOMEM; | 1462 | return -ENOMEM; |
1461 | atomic_set(&tfile->count, 0); | 1463 | atomic_set(&tfile->count, 0); |
1462 | tfile->tun = NULL; | 1464 | tfile->tun = NULL; |
1463 | tfile->net = get_net(current->nsproxy->net_ns); | 1465 | tfile->net = get_net(current->nsproxy->net_ns); |
1464 | file->private_data = tfile; | 1466 | file->private_data = tfile; |
1465 | return 0; | 1467 | return 0; |
1466 | } | 1468 | } |
1467 | 1469 | ||
1468 | static int tun_chr_close(struct inode *inode, struct file *file) | 1470 | static int tun_chr_close(struct inode *inode, struct file *file) |
1469 | { | 1471 | { |
1470 | struct tun_file *tfile = file->private_data; | 1472 | struct tun_file *tfile = file->private_data; |
1471 | struct tun_struct *tun; | 1473 | struct tun_struct *tun; |
1472 | 1474 | ||
1473 | tun = __tun_get(tfile); | 1475 | tun = __tun_get(tfile); |
1474 | if (tun) { | 1476 | if (tun) { |
1475 | struct net_device *dev = tun->dev; | 1477 | struct net_device *dev = tun->dev; |
1476 | 1478 | ||
1477 | DBG(KERN_INFO "%s: tun_chr_close\n", dev->name); | 1479 | DBG(KERN_INFO "%s: tun_chr_close\n", dev->name); |
1478 | 1480 | ||
1479 | __tun_detach(tun); | 1481 | __tun_detach(tun); |
1480 | 1482 | ||
1481 | /* If desirable, unregister the netdevice. */ | 1483 | /* If desirable, unregister the netdevice. */ |
1482 | if (!(tun->flags & TUN_PERSIST)) { | 1484 | if (!(tun->flags & TUN_PERSIST)) { |
1483 | rtnl_lock(); | 1485 | rtnl_lock(); |
1484 | if (dev->reg_state == NETREG_REGISTERED) | 1486 | if (dev->reg_state == NETREG_REGISTERED) |
1485 | unregister_netdevice(dev); | 1487 | unregister_netdevice(dev); |
1486 | rtnl_unlock(); | 1488 | rtnl_unlock(); |
1487 | } | 1489 | } |
1488 | } | 1490 | } |
1489 | 1491 | ||
1490 | tun = tfile->tun; | 1492 | tun = tfile->tun; |
1491 | if (tun) | 1493 | if (tun) |
1492 | sock_put(tun->socket.sk); | 1494 | sock_put(tun->socket.sk); |
1493 | 1495 | ||
1494 | put_net(tfile->net); | 1496 | put_net(tfile->net); |
1495 | kfree(tfile); | 1497 | kfree(tfile); |
1496 | 1498 | ||
1497 | return 0; | 1499 | return 0; |
1498 | } | 1500 | } |
1499 | 1501 | ||
1500 | static const struct file_operations tun_fops = { | 1502 | static const struct file_operations tun_fops = { |
1501 | .owner = THIS_MODULE, | 1503 | .owner = THIS_MODULE, |
1502 | .llseek = no_llseek, | 1504 | .llseek = no_llseek, |
1503 | .read = do_sync_read, | 1505 | .read = do_sync_read, |
1504 | .aio_read = tun_chr_aio_read, | 1506 | .aio_read = tun_chr_aio_read, |
1505 | .write = do_sync_write, | 1507 | .write = do_sync_write, |
1506 | .aio_write = tun_chr_aio_write, | 1508 | .aio_write = tun_chr_aio_write, |
1507 | .poll = tun_chr_poll, | 1509 | .poll = tun_chr_poll, |
1508 | .unlocked_ioctl = tun_chr_ioctl, | 1510 | .unlocked_ioctl = tun_chr_ioctl, |
1509 | #ifdef CONFIG_COMPAT | 1511 | #ifdef CONFIG_COMPAT |
1510 | .compat_ioctl = tun_chr_compat_ioctl, | 1512 | .compat_ioctl = tun_chr_compat_ioctl, |
1511 | #endif | 1513 | #endif |
1512 | .open = tun_chr_open, | 1514 | .open = tun_chr_open, |
1513 | .release = tun_chr_close, | 1515 | .release = tun_chr_close, |
1514 | .fasync = tun_chr_fasync | 1516 | .fasync = tun_chr_fasync |
1515 | }; | 1517 | }; |
1516 | 1518 | ||
1517 | static struct miscdevice tun_miscdev = { | 1519 | static struct miscdevice tun_miscdev = { |
1518 | .minor = TUN_MINOR, | 1520 | .minor = TUN_MINOR, |
1519 | .name = "tun", | 1521 | .name = "tun", |
1520 | .nodename = "net/tun", | 1522 | .nodename = "net/tun", |
1521 | .fops = &tun_fops, | 1523 | .fops = &tun_fops, |
1522 | }; | 1524 | }; |
1523 | 1525 | ||
1524 | /* ethtool interface */ | 1526 | /* ethtool interface */ |
1525 | 1527 | ||
1526 | static int tun_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | 1528 | static int tun_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
1527 | { | 1529 | { |
1528 | cmd->supported = 0; | 1530 | cmd->supported = 0; |
1529 | cmd->advertising = 0; | 1531 | cmd->advertising = 0; |
1530 | cmd->speed = SPEED_10; | 1532 | cmd->speed = SPEED_10; |
1531 | cmd->duplex = DUPLEX_FULL; | 1533 | cmd->duplex = DUPLEX_FULL; |
1532 | cmd->port = PORT_TP; | 1534 | cmd->port = PORT_TP; |
1533 | cmd->phy_address = 0; | 1535 | cmd->phy_address = 0; |
1534 | cmd->transceiver = XCVR_INTERNAL; | 1536 | cmd->transceiver = XCVR_INTERNAL; |
1535 | cmd->autoneg = AUTONEG_DISABLE; | 1537 | cmd->autoneg = AUTONEG_DISABLE; |
1536 | cmd->maxtxpkt = 0; | 1538 | cmd->maxtxpkt = 0; |
1537 | cmd->maxrxpkt = 0; | 1539 | cmd->maxrxpkt = 0; |
1538 | return 0; | 1540 | return 0; |
1539 | } | 1541 | } |
1540 | 1542 | ||
1541 | static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | 1543 | static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) |
1542 | { | 1544 | { |
1543 | struct tun_struct *tun = netdev_priv(dev); | 1545 | struct tun_struct *tun = netdev_priv(dev); |
1544 | 1546 | ||
1545 | strcpy(info->driver, DRV_NAME); | 1547 | strcpy(info->driver, DRV_NAME); |
1546 | strcpy(info->version, DRV_VERSION); | 1548 | strcpy(info->version, DRV_VERSION); |
1547 | strcpy(info->fw_version, "N/A"); | 1549 | strcpy(info->fw_version, "N/A"); |
1548 | 1550 | ||
1549 | switch (tun->flags & TUN_TYPE_MASK) { | 1551 | switch (tun->flags & TUN_TYPE_MASK) { |
1550 | case TUN_TUN_DEV: | 1552 | case TUN_TUN_DEV: |
1551 | strcpy(info->bus_info, "tun"); | 1553 | strcpy(info->bus_info, "tun"); |
1552 | break; | 1554 | break; |
1553 | case TUN_TAP_DEV: | 1555 | case TUN_TAP_DEV: |
1554 | strcpy(info->bus_info, "tap"); | 1556 | strcpy(info->bus_info, "tap"); |
1555 | break; | 1557 | break; |
1556 | } | 1558 | } |
1557 | } | 1559 | } |
1558 | 1560 | ||
1559 | static u32 tun_get_msglevel(struct net_device *dev) | 1561 | static u32 tun_get_msglevel(struct net_device *dev) |
1560 | { | 1562 | { |
1561 | #ifdef TUN_DEBUG | 1563 | #ifdef TUN_DEBUG |
1562 | struct tun_struct *tun = netdev_priv(dev); | 1564 | struct tun_struct *tun = netdev_priv(dev); |
1563 | return tun->debug; | 1565 | return tun->debug; |
1564 | #else | 1566 | #else |
1565 | return -EOPNOTSUPP; | 1567 | return -EOPNOTSUPP; |
1566 | #endif | 1568 | #endif |
1567 | } | 1569 | } |
1568 | 1570 | ||
1569 | static void tun_set_msglevel(struct net_device *dev, u32 value) | 1571 | static void tun_set_msglevel(struct net_device *dev, u32 value) |
1570 | { | 1572 | { |
1571 | #ifdef TUN_DEBUG | 1573 | #ifdef TUN_DEBUG |
1572 | struct tun_struct *tun = netdev_priv(dev); | 1574 | struct tun_struct *tun = netdev_priv(dev); |
1573 | tun->debug = value; | 1575 | tun->debug = value; |
1574 | #endif | 1576 | #endif |
1575 | } | 1577 | } |
1576 | 1578 | ||
1577 | static u32 tun_get_link(struct net_device *dev) | ||
1578 | { | ||
1579 | struct tun_struct *tun = netdev_priv(dev); | ||
1580 | return !!tun->tfile; | ||
1581 | } | ||
1582 | |||
1583 | static u32 tun_get_rx_csum(struct net_device *dev) | 1579 | static u32 tun_get_rx_csum(struct net_device *dev) |
1584 | { | 1580 | { |
1585 | struct tun_struct *tun = netdev_priv(dev); | 1581 | struct tun_struct *tun = netdev_priv(dev); |
1586 | return (tun->flags & TUN_NOCHECKSUM) == 0; | 1582 | return (tun->flags & TUN_NOCHECKSUM) == 0; |
1587 | } | 1583 | } |
1588 | 1584 | ||
1589 | static int tun_set_rx_csum(struct net_device *dev, u32 data) | 1585 | static int tun_set_rx_csum(struct net_device *dev, u32 data) |
1590 | { | 1586 | { |
1591 | struct tun_struct *tun = netdev_priv(dev); | 1587 | struct tun_struct *tun = netdev_priv(dev); |
1592 | if (data) | 1588 | if (data) |
1593 | tun->flags &= ~TUN_NOCHECKSUM; | 1589 | tun->flags &= ~TUN_NOCHECKSUM; |
1594 | else | 1590 | else |
1595 | tun->flags |= TUN_NOCHECKSUM; | 1591 | tun->flags |= TUN_NOCHECKSUM; |
1596 | return 0; | 1592 | return 0; |
1597 | } | 1593 | } |
1598 | 1594 | ||
1599 | static const struct ethtool_ops tun_ethtool_ops = { | 1595 | static const struct ethtool_ops tun_ethtool_ops = { |
1600 | .get_settings = tun_get_settings, | 1596 | .get_settings = tun_get_settings, |
1601 | .get_drvinfo = tun_get_drvinfo, | 1597 | .get_drvinfo = tun_get_drvinfo, |
1602 | .get_msglevel = tun_get_msglevel, | 1598 | .get_msglevel = tun_get_msglevel, |
1603 | .set_msglevel = tun_set_msglevel, | 1599 | .set_msglevel = tun_set_msglevel, |
1604 | .get_link = tun_get_link, | 1600 | .get_link = ethtool_op_get_link, |
1605 | .get_rx_csum = tun_get_rx_csum, | 1601 | .get_rx_csum = tun_get_rx_csum, |
1606 | .set_rx_csum = tun_set_rx_csum | 1602 | .set_rx_csum = tun_set_rx_csum |
1607 | }; | 1603 | }; |
1608 | 1604 | ||
1609 | 1605 | ||
1610 | static int __init tun_init(void) | 1606 | static int __init tun_init(void) |
1611 | { | 1607 | { |
1612 | int ret = 0; | 1608 | int ret = 0; |
1613 | 1609 | ||
1614 | printk(KERN_INFO "tun: %s, %s\n", DRV_DESCRIPTION, DRV_VERSION); | 1610 | printk(KERN_INFO "tun: %s, %s\n", DRV_DESCRIPTION, DRV_VERSION); |
1615 | printk(KERN_INFO "tun: %s\n", DRV_COPYRIGHT); | 1611 | printk(KERN_INFO "tun: %s\n", DRV_COPYRIGHT); |
1616 | 1612 | ||
1617 | ret = rtnl_link_register(&tun_link_ops); | 1613 | ret = rtnl_link_register(&tun_link_ops); |
1618 | if (ret) { | 1614 | if (ret) { |
1619 | printk(KERN_ERR "tun: Can't register link_ops\n"); | 1615 | printk(KERN_ERR "tun: Can't register link_ops\n"); |
1620 | goto err_linkops; | 1616 | goto err_linkops; |
1621 | } | 1617 | } |
1622 | 1618 | ||
1623 | ret = misc_register(&tun_miscdev); | 1619 | ret = misc_register(&tun_miscdev); |
1624 | if (ret) { | 1620 | if (ret) { |
1625 | printk(KERN_ERR "tun: Can't register misc device %d\n", TUN_MINOR); | 1621 | printk(KERN_ERR "tun: Can't register misc device %d\n", TUN_MINOR); |
1626 | goto err_misc; | 1622 | goto err_misc; |
1627 | } | 1623 | } |
1628 | return 0; | 1624 | return 0; |
1629 | err_misc: | 1625 | err_misc: |
1630 | rtnl_link_unregister(&tun_link_ops); | 1626 | rtnl_link_unregister(&tun_link_ops); |
1631 | err_linkops: | 1627 | err_linkops: |
1632 | return ret; | 1628 | return ret; |
1633 | } | 1629 | } |
1634 | 1630 | ||
1635 | static void tun_cleanup(void) | 1631 | static void tun_cleanup(void) |
1636 | { | 1632 | { |
1637 | misc_deregister(&tun_miscdev); | 1633 | misc_deregister(&tun_miscdev); |
1638 | rtnl_link_unregister(&tun_link_ops); | 1634 | rtnl_link_unregister(&tun_link_ops); |
1639 | } | 1635 | } |
1640 | 1636 | ||
1641 | /* Get an underlying socket object from tun file. Returns error unless file is | 1637 | /* Get an underlying socket object from tun file. Returns error unless file is |
1642 | * attached to a device. The returned object works like a packet socket, it | 1638 | * attached to a device. The returned object works like a packet socket, it |
1643 | * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for | 1639 | * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for |
1644 | * holding a reference to the file for as long as the socket is in use. */ | 1640 | * holding a reference to the file for as long as the socket is in use. */ |
1645 | struct socket *tun_get_socket(struct file *file) | 1641 | struct socket *tun_get_socket(struct file *file) |
1646 | { | 1642 | { |
1647 | struct tun_struct *tun; | 1643 | struct tun_struct *tun; |
1648 | if (file->f_op != &tun_fops) | 1644 | if (file->f_op != &tun_fops) |
1649 | return ERR_PTR(-EINVAL); | 1645 | return ERR_PTR(-EINVAL); |
1650 | tun = tun_get(file); | 1646 | tun = tun_get(file); |
1651 | if (!tun) | 1647 | if (!tun) |
1652 | return ERR_PTR(-EBADFD); | 1648 | return ERR_PTR(-EBADFD); |
1653 | tun_put(tun); | 1649 | tun_put(tun); |
1654 | return &tun->socket; | 1650 | return &tun->socket; |
1655 | } | 1651 | } |
1656 | EXPORT_SYMBOL_GPL(tun_get_socket); | 1652 | EXPORT_SYMBOL_GPL(tun_get_socket); |
1657 | 1653 | ||
1658 | module_init(tun_init); | 1654 | module_init(tun_init); |
1659 | module_exit(tun_cleanup); | 1655 | module_exit(tun_cleanup); |
1660 | MODULE_DESCRIPTION(DRV_DESCRIPTION); | 1656 | MODULE_DESCRIPTION(DRV_DESCRIPTION); |
1661 | MODULE_AUTHOR(DRV_COPYRIGHT); | 1657 | MODULE_AUTHOR(DRV_COPYRIGHT); |
1662 | MODULE_LICENSE("GPL"); | 1658 | MODULE_LICENSE("GPL"); |
1663 | MODULE_ALIAS_MISCDEV(TUN_MINOR); | 1659 | MODULE_ALIAS_MISCDEV(TUN_MINOR); |