Blame view
drivers/net/tap.c
30.4 KB
09c434b8a
|
1 |
// SPDX-License-Identifier: GPL-2.0-only |
20d29d7a9
|
2 |
#include <linux/etherdevice.h> |
6fe3faf86
|
3 |
#include <linux/if_tap.h> |
f09e2249c
|
4 |
#include <linux/if_vlan.h> |
20d29d7a9
|
5 6 7 8 9 10 11 |
#include <linux/interrupt.h> #include <linux/nsproxy.h> #include <linux/compat.h> #include <linux/if_tun.h> #include <linux/module.h> #include <linux/skbuff.h> #include <linux/cache.h> |
c3edc4010
|
12 |
#include <linux/sched/signal.h> |
20d29d7a9
|
13 |
#include <linux/types.h> |
5a0e3ad6a
|
14 |
#include <linux/slab.h> |
20d29d7a9
|
15 16 |
#include <linux/wait.h> #include <linux/cdev.h> |
404015308
|
17 |
#include <linux/idr.h> |
20d29d7a9
|
18 |
#include <linux/fs.h> |
6c36d2e26
|
19 |
#include <linux/uio.h> |
20d29d7a9
|
20 21 22 23 |
#include <net/net_namespace.h> #include <net/rtnetlink.h> #include <net/sock.h> |
b9fb9ee07
|
24 |
#include <linux/virtio_net.h> |
362899b87
|
25 |
#include <linux/skb_array.h> |
20d29d7a9
|
26 |
|
635b8c8ec
|
27 |
#define TAP_IFFEATURES (IFF_VNET_HDR | IFF_MULTI_QUEUE) |
01b07fb35
|
28 |
|
635b8c8ec
|
29 30 |
#define TAP_VNET_LE 0x80000000 #define TAP_VNET_BE 0x40000000 |
8b8e658b1
|
31 32 |
#ifdef CONFIG_TUN_VNET_CROSS_LE |
635b8c8ec
|
33 |
static inline bool tap_legacy_is_little_endian(struct tap_queue *q) |
8b8e658b1
|
34 |
{ |
635b8c8ec
|
35 |
return q->flags & TAP_VNET_BE ? false : |
8b8e658b1
|
36 37 |
virtio_legacy_is_little_endian(); } |
635b8c8ec
|
38 |
static long tap_get_vnet_be(struct tap_queue *q, int __user *sp) |
8b8e658b1
|
39 |
{ |
635b8c8ec
|
40 |
int s = !!(q->flags & TAP_VNET_BE); |
8b8e658b1
|
41 42 43 44 45 46 |
if (put_user(s, sp)) return -EFAULT; return 0; } |
635b8c8ec
|
47 |
static long tap_set_vnet_be(struct tap_queue *q, int __user *sp) |
8b8e658b1
|
48 49 50 51 52 53 54 |
{ int s; if (get_user(s, sp)) return -EFAULT; if (s) |
635b8c8ec
|
55 |
q->flags |= TAP_VNET_BE; |
8b8e658b1
|
56 |
else |
635b8c8ec
|
57 |
q->flags &= ~TAP_VNET_BE; |
8b8e658b1
|
58 59 60 61 |
return 0; } #else |
635b8c8ec
|
62 |
static inline bool tap_legacy_is_little_endian(struct tap_queue *q) |
8b8e658b1
|
63 64 65 |
{ return virtio_legacy_is_little_endian(); } |
635b8c8ec
|
66 |
static long tap_get_vnet_be(struct tap_queue *q, int __user *argp) |
8b8e658b1
|
67 68 69 |
{ return -EINVAL; } |
635b8c8ec
|
70 |
static long tap_set_vnet_be(struct tap_queue *q, int __user *argp) |
8b8e658b1
|
71 72 73 74 |
{ return -EINVAL; } #endif /* CONFIG_TUN_VNET_CROSS_LE */ |
6ae7feb31
|
75 |
|
635b8c8ec
|
76 |
static inline bool tap_is_little_endian(struct tap_queue *q) |
5b11e15f2
|
77 |
{ |
635b8c8ec
|
78 79 |
return q->flags & TAP_VNET_LE || tap_legacy_is_little_endian(q); |
5b11e15f2
|
80 |
} |
6ae7feb31
|
81 |
|
635b8c8ec
|
82 |
static inline u16 tap16_to_cpu(struct tap_queue *q, __virtio16 val) |
6ae7feb31
|
83 |
{ |
635b8c8ec
|
84 |
return __virtio16_to_cpu(tap_is_little_endian(q), val); |
6ae7feb31
|
85 |
} |
635b8c8ec
|
86 |
static inline __virtio16 cpu_to_tap16(struct tap_queue *q, u16 val) |
6ae7feb31
|
87 |
{ |
635b8c8ec
|
88 |
return __cpu_to_virtio16(tap_is_little_endian(q), val); |
6ae7feb31
|
89 |
} |
635b8c8ec
|
90 91 |
static struct proto tap_proto = { .name = "tap", |
20d29d7a9
|
92 |
.owner = THIS_MODULE, |
635b8c8ec
|
93 |
.obj_size = sizeof(struct tap_queue), |
20d29d7a9
|
94 |
}; |
635b8c8ec
|
95 |
#define TAP_NUM_DEVS (1U << MINORBITS) |
d9f1f61c0
|
96 97 |
static LIST_HEAD(major_list); |
ebc05ba7e
|
98 |
struct major_info { |
d9f1f61c0
|
99 |
struct rcu_head rcu; |
ebc05ba7e
|
100 101 |
dev_t major; struct idr minor_idr; |
ffa423fb3
|
102 |
spinlock_t minor_lock; |
ebc05ba7e
|
103 |
const char *device_name; |
d9f1f61c0
|
104 105 |
struct list_head next; }; |
e09eff7fc
|
106 |
|
97bc3633b
|
107 |
#define GOODCOPY_LEN 128 |
20d29d7a9
|
108 |
|
635b8c8ec
|
109 |
static const struct proto_ops tap_socket_ops; |
501c774cb
|
110 |
|
2be5c7679
|
111 |
#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO) |
f23d538bc
|
112 |
#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG | NETIF_F_FRAGLIST) |
a567dd625
|
113 |
|
6fe3faf86
|
114 |
static struct tap_dev *tap_dev_get_rcu(const struct net_device *dev) |
6acf54f1c
|
115 116 117 |
{ return rcu_dereference(dev->rx_handler_data); } |
20d29d7a9
|
118 119 |
/* * RCU usage: |
635b8c8ec
|
120 |
* The tap_queue and the macvlan_dev are loosely coupled, the |
02df55d28
|
121 |
* pointers from one to the other can only be read while rcu_read_lock |
441ac0fca
|
122 |
* or rtnl is held. |
20d29d7a9
|
123 |
* |
635b8c8ec
|
124 |
* Both the file and the macvlan_dev hold a reference on the tap_queue |
02df55d28
|
125 126 |
* through sock_hold(&q->sk). When the macvlan_dev goes away first, * q->vlan becomes inaccessible. When the files gets closed, |
635b8c8ec
|
127 |
* tap_get_queue() fails. |
20d29d7a9
|
128 |
* |
02df55d28
|
129 130 131 132 |
* There may still be references to the struct sock inside of the * queue from outbound SKBs, but these never reference back to the * file or the dev. The data structure is freed through __sk_free * when both our references and any pending SKBs are gone. |
20d29d7a9
|
133 |
*/ |
20d29d7a9
|
134 |
|
6fe3faf86
|
135 |
static int tap_enable_queue(struct tap_dev *tap, struct file *file, |
635b8c8ec
|
136 |
struct tap_queue *q) |
20d29d7a9
|
137 |
{ |
815f236d6
|
138 |
int err = -EINVAL; |
441ac0fca
|
139 |
ASSERT_RTNL(); |
815f236d6
|
140 141 142 143 144 |
if (q->enabled) goto out; err = 0; |
6fe3faf86
|
145 146 |
rcu_assign_pointer(tap->taps[tap->numvtaps], q); q->queue_index = tap->numvtaps; |
815f236d6
|
147 |
q->enabled = true; |
6fe3faf86
|
148 |
tap->numvtaps++; |
815f236d6
|
149 |
out: |
815f236d6
|
150 151 |
return err; } |
40b8fe45d
|
152 |
/* Requires RTNL */ |
6fe3faf86
|
153 |
static int tap_set_queue(struct tap_dev *tap, struct file *file, |
635b8c8ec
|
154 |
struct tap_queue *q) |
815f236d6
|
155 |
{ |
6fe3faf86
|
156 |
if (tap->numqueues == MAX_TAP_QUEUES) |
40b8fe45d
|
157 |
return -EBUSY; |
20d29d7a9
|
158 |
|
6fe3faf86
|
159 160 |
rcu_assign_pointer(q->tap, tap); rcu_assign_pointer(tap->taps[tap->numvtaps], q); |
02df55d28
|
161 |
sock_hold(&q->sk); |
20d29d7a9
|
162 163 |
q->file = file; |
6fe3faf86
|
164 |
q->queue_index = tap->numvtaps; |
815f236d6
|
165 |
q->enabled = true; |
02df55d28
|
166 |
file->private_data = q; |
6fe3faf86
|
167 |
list_add_tail(&q->next, &tap->queue_list); |
20d29d7a9
|
168 |
|
6fe3faf86
|
169 170 |
tap->numvtaps++; tap->numqueues++; |
1565c7c1c
|
171 |
|
40b8fe45d
|
172 |
return 0; |
20d29d7a9
|
173 |
} |
635b8c8ec
|
174 |
static int tap_disable_queue(struct tap_queue *q) |
815f236d6
|
175 |
{ |
6fe3faf86
|
176 |
struct tap_dev *tap; |
635b8c8ec
|
177 |
struct tap_queue *nq; |
815f236d6
|
178 |
|
441ac0fca
|
179 |
ASSERT_RTNL(); |
815f236d6
|
180 181 |
if (!q->enabled) return -EINVAL; |
6fe3faf86
|
182 |
tap = rtnl_dereference(q->tap); |
441ac0fca
|
183 |
|
6fe3faf86
|
184 |
if (tap) { |
815f236d6
|
185 |
int index = q->queue_index; |
6fe3faf86
|
186 187 |
BUG_ON(index >= tap->numvtaps); nq = rtnl_dereference(tap->taps[tap->numvtaps - 1]); |
815f236d6
|
188 |
nq->queue_index = index; |
6fe3faf86
|
189 190 |
rcu_assign_pointer(tap->taps[index], nq); RCU_INIT_POINTER(tap->taps[tap->numvtaps - 1], NULL); |
815f236d6
|
191 |
q->enabled = false; |
6fe3faf86
|
192 |
tap->numvtaps--; |
815f236d6
|
193 194 195 196 |
} return 0; } |
20d29d7a9
|
197 |
/* |
02df55d28
|
198 199 200 |
* The file owning the queue got closed, give up both * the reference that the files holds as well as the * one from the macvlan_dev if that still exists. |
20d29d7a9
|
201 202 203 |
* * Using the spinlock makes sure that we don't get * to the queue again after destroying it. |
20d29d7a9
|
204 |
*/ |
635b8c8ec
|
205 |
static void tap_put_queue(struct tap_queue *q) |
20d29d7a9
|
206 |
{ |
6fe3faf86
|
207 |
struct tap_dev *tap; |
20d29d7a9
|
208 |
|
441ac0fca
|
209 |
rtnl_lock(); |
6fe3faf86
|
210 |
tap = rtnl_dereference(q->tap); |
441ac0fca
|
211 |
|
6fe3faf86
|
212 |
if (tap) { |
815f236d6
|
213 |
if (q->enabled) |
635b8c8ec
|
214 |
BUG_ON(tap_disable_queue(q)); |
376b1aabe
|
215 |
|
6fe3faf86
|
216 217 |
tap->numqueues--; RCU_INIT_POINTER(q->tap, NULL); |
02df55d28
|
218 |
sock_put(&q->sk); |
815f236d6
|
219 |
list_del_init(&q->next); |
20d29d7a9
|
220 |
} |
441ac0fca
|
221 |
rtnl_unlock(); |
20d29d7a9
|
222 223 224 225 226 227 |
synchronize_rcu(); sock_put(&q->sk); } /* |
1565c7c1c
|
228 229 230 231 232 |
* Select a queue based on the rxq of the device on which this packet * arrived. If the incoming device is not mq, calculate a flow hash * to select a queue. If all fails, find the first available queue. * Cache vlan->numvtaps since it can become zero during the execution * of this function. |
20d29d7a9
|
233 |
*/ |
6fe3faf86
|
234 |
static struct tap_queue *tap_get_queue(struct tap_dev *tap, |
635b8c8ec
|
235 |
struct sk_buff *skb) |
20d29d7a9
|
236 |
{ |
6fe3faf86
|
237 |
struct tap_queue *queue = NULL; |
815f236d6
|
238 239 240 241 242 |
/* Access to taps array is protected by rcu, but access to numvtaps * isn't. Below we use it to lookup a queue, but treat it as a hint * and validate that the result isn't NULL - in case we are * racing against queue removal. */ |
6aa7de059
|
243 |
int numvtaps = READ_ONCE(tap->numvtaps); |
1565c7c1c
|
244 245 246 247 |
__u32 rxq; if (!numvtaps) goto out; |
1b16bf42d
|
248 249 |
if (numvtaps == 1) goto single; |
ef0002b57
|
250 |
/* Check if we can use flow to select a queue */ |
3958afa1b
|
251 |
rxq = skb_get_hash(skb); |
ef0002b57
|
252 |
if (rxq) { |
6fe3faf86
|
253 |
queue = rcu_dereference(tap->taps[rxq % numvtaps]); |
376b1aabe
|
254 |
goto out; |
ef0002b57
|
255 |
} |
1565c7c1c
|
256 257 |
if (likely(skb_rx_queue_recorded(skb))) { rxq = skb_get_rx_queue(skb); |
20d29d7a9
|
258 |
|
1565c7c1c
|
259 260 |
while (unlikely(rxq >= numvtaps)) rxq -= numvtaps; |
6fe3faf86
|
261 |
queue = rcu_dereference(tap->taps[rxq]); |
376b1aabe
|
262 |
goto out; |
1565c7c1c
|
263 |
} |
1b16bf42d
|
264 |
single: |
6fe3faf86
|
265 |
queue = rcu_dereference(tap->taps[0]); |
1565c7c1c
|
266 |
out: |
6fe3faf86
|
267 |
return queue; |
20d29d7a9
|
268 |
} |
02df55d28
|
269 270 |
/* * The net_device is going away, give up the reference |
1565c7c1c
|
271 272 |
* that it holds on all queues and safely set the pointer * from the queues to NULL. |
02df55d28
|
273 |
*/ |
6fe3faf86
|
274 |
void tap_del_queues(struct tap_dev *tap) |
20d29d7a9
|
275 |
{ |
635b8c8ec
|
276 |
struct tap_queue *q, *tmp; |
02df55d28
|
277 |
|
441ac0fca
|
278 |
ASSERT_RTNL(); |
6fe3faf86
|
279 |
list_for_each_entry_safe(q, tmp, &tap->queue_list, next) { |
815f236d6
|
280 |
list_del_init(&q->next); |
6fe3faf86
|
281 |
RCU_INIT_POINTER(q->tap, NULL); |
815f236d6
|
282 |
if (q->enabled) |
6fe3faf86
|
283 284 |
tap->numvtaps--; tap->numqueues--; |
dfe816c5e
|
285 |
sock_put(&q->sk); |
564517e80
|
286 |
} |
6fe3faf86
|
287 288 |
BUG_ON(tap->numvtaps); BUG_ON(tap->numqueues); |
635b8c8ec
|
289 |
/* guarantee that any future tap_set_queue will fail */ |
6fe3faf86
|
290 |
tap->numvtaps = MAX_TAP_QUEUES; |
20d29d7a9
|
291 |
} |
9a393b5d5
|
292 |
EXPORT_SYMBOL_GPL(tap_del_queues); |
20d29d7a9
|
293 |
|
635b8c8ec
|
294 |
rx_handler_result_t tap_handle_frame(struct sk_buff **pskb) |
20d29d7a9
|
295 |
{ |
6acf54f1c
|
296 297 |
struct sk_buff *skb = *pskb; struct net_device *dev = skb->dev; |
6fe3faf86
|
298 |
struct tap_dev *tap; |
635b8c8ec
|
299 |
struct tap_queue *q; |
a567dd625
|
300 |
netdev_features_t features = TAP_FEATURES; |
6fe3faf86
|
301 302 |
tap = tap_dev_get_rcu(dev); if (!tap) |
6acf54f1c
|
303 |
return RX_HANDLER_PASS; |
6fe3faf86
|
304 |
q = tap_get_queue(tap, skb); |
20d29d7a9
|
305 |
if (!q) |
6acf54f1c
|
306 |
return RX_HANDLER_PASS; |
8a35747a5
|
307 |
|
6acf54f1c
|
308 |
skb_push(skb, ETH_HLEN); |
3e4f8b787
|
309 |
/* Apply the forward feature mask so that we perform segmentation |
e5733321d
|
310 311 |
* according to users wishes. This only works if VNET_HDR is * enabled. |
3e4f8b787
|
312 |
*/ |
e5733321d
|
313 |
if (q->flags & IFF_VNET_HDR) |
6fe3faf86
|
314 |
features |= tap->tap_features; |
8b86a61da
|
315 |
if (netif_needs_gso(skb, features)) { |
3e4f8b787
|
316 317 318 319 320 321 |
struct sk_buff *segs = __skb_gso_segment(skb, features, false); if (IS_ERR(segs)) goto drop; if (!segs) { |
5990a3051
|
322 |
if (ptr_ring_produce(&q->ring, skb)) |
362899b87
|
323 |
goto drop; |
3e4f8b787
|
324 325 |
goto wake_up; } |
be0bd3160
|
326 |
consume_skb(skb); |
3e4f8b787
|
327 328 329 330 |
while (segs) { struct sk_buff *nskb = segs->next; segs->next = NULL; |
5990a3051
|
331 |
if (ptr_ring_produce(&q->ring, segs)) { |
362899b87
|
332 333 334 335 |
kfree_skb(segs); kfree_skb_list(nskb); break; } |
3e4f8b787
|
336 337 338 |
segs = nskb; } } else { |
cbdb04279
|
339 340 341 |
/* If we receive a partial checksum and the tap side * doesn't support checksum offload, compute the checksum. * Note: it doesn't matter which checksum feature to |
a8e046987
|
342 |
* check, we either support them all or none. |
cbdb04279
|
343 344 |
*/ if (skb->ip_summed == CHECKSUM_PARTIAL && |
a188222b6
|
345 |
!(features & NETIF_F_CSUM_MASK) && |
cbdb04279
|
346 347 |
skb_checksum_help(skb)) goto drop; |
5990a3051
|
348 |
if (ptr_ring_produce(&q->ring, skb)) |
362899b87
|
349 |
goto drop; |
3e4f8b787
|
350 351 352 |
} wake_up: |
a9a08845e
|
353 |
wake_up_interruptible_poll(sk_sleep(&q->sk), EPOLLIN | EPOLLRDNORM | EPOLLRDBAND); |
6acf54f1c
|
354 |
return RX_HANDLER_CONSUMED; |
8a35747a5
|
355 356 |
drop: |
6acf54f1c
|
357 |
/* Count errors/drops only here, thus don't care about args. */ |
6fe3faf86
|
358 359 |
if (tap->count_rx_dropped) tap->count_rx_dropped(tap); |
8a35747a5
|
360 |
kfree_skb(skb); |
6acf54f1c
|
361 |
return RX_HANDLER_CONSUMED; |
20d29d7a9
|
362 |
} |
9a393b5d5
|
363 |
EXPORT_SYMBOL_GPL(tap_handle_frame); |
20d29d7a9
|
364 |
|
d9f1f61c0
|
365 366 367 368 369 370 371 372 373 374 375 376 377 |
static struct major_info *tap_get_major(int major) { struct major_info *tap_major; list_for_each_entry_rcu(tap_major, &major_list, next) { if (tap_major->major == major) return tap_major; } return NULL; } int tap_get_minor(dev_t major, struct tap_dev *tap) |
e09eff7fc
|
378 379 |
{ int retval = -ENOMEM; |
d9f1f61c0
|
380 381 382 383 384 385 386 387 |
struct major_info *tap_major; rcu_read_lock(); tap_major = tap_get_major(MAJOR(major)); if (!tap_major) { retval = -EINVAL; goto unlock; } |
e09eff7fc
|
388 |
|
ffa423fb3
|
389 390 |
spin_lock(&tap_major->minor_lock); retval = idr_alloc(&tap_major->minor_idr, tap, 1, TAP_NUM_DEVS, GFP_ATOMIC); |
ec09ebc14
|
391 |
if (retval >= 0) { |
6fe3faf86
|
392 |
tap->minor = retval; |
ec09ebc14
|
393 |
} else if (retval == -ENOSPC) { |
6fe3faf86
|
394 395 |
netdev_err(tap->dev, "Too many tap devices "); |
e09eff7fc
|
396 |
retval = -EINVAL; |
e09eff7fc
|
397 |
} |
ffa423fb3
|
398 |
spin_unlock(&tap_major->minor_lock); |
d9f1f61c0
|
399 400 401 |
unlock: rcu_read_unlock(); |
ec09ebc14
|
402 |
return retval < 0 ? retval : 0; |
e09eff7fc
|
403 |
} |
9a393b5d5
|
404 |
EXPORT_SYMBOL_GPL(tap_get_minor); |
e09eff7fc
|
405 |
|
d9f1f61c0
|
406 |
void tap_free_minor(dev_t major, struct tap_dev *tap) |
e09eff7fc
|
407 |
{ |
d9f1f61c0
|
408 409 410 411 412 413 414 |
struct major_info *tap_major; rcu_read_lock(); tap_major = tap_get_major(MAJOR(major)); if (!tap_major) { goto unlock; } |
ffa423fb3
|
415 |
spin_lock(&tap_major->minor_lock); |
6fe3faf86
|
416 |
if (tap->minor) { |
d9f1f61c0
|
417 |
idr_remove(&tap_major->minor_idr, tap->minor); |
6fe3faf86
|
418 |
tap->minor = 0; |
e09eff7fc
|
419 |
} |
ffa423fb3
|
420 |
spin_unlock(&tap_major->minor_lock); |
d9f1f61c0
|
421 422 423 |
unlock: rcu_read_unlock(); |
e09eff7fc
|
424 |
} |
9a393b5d5
|
425 |
EXPORT_SYMBOL_GPL(tap_free_minor); |
e09eff7fc
|
426 |
|
d9f1f61c0
|
427 |
static struct tap_dev *dev_get_by_tap_file(int major, int minor) |
e09eff7fc
|
428 429 |
{ struct net_device *dev = NULL; |
6fe3faf86
|
430 |
struct tap_dev *tap; |
d9f1f61c0
|
431 |
struct major_info *tap_major; |
e09eff7fc
|
432 |
|
d9f1f61c0
|
433 434 435 436 437 438 |
rcu_read_lock(); tap_major = tap_get_major(major); if (!tap_major) { tap = NULL; goto unlock; } |
ffa423fb3
|
439 |
spin_lock(&tap_major->minor_lock); |
d9f1f61c0
|
440 |
tap = idr_find(&tap_major->minor_idr, minor); |
6fe3faf86
|
441 442 |
if (tap) { dev = tap->dev; |
e09eff7fc
|
443 444 |
dev_hold(dev); } |
ffa423fb3
|
445 |
spin_unlock(&tap_major->minor_lock); |
d9f1f61c0
|
446 447 448 |
unlock: rcu_read_unlock(); |
6fe3faf86
|
449 |
return tap; |
e09eff7fc
|
450 |
} |
635b8c8ec
|
451 |
static void tap_sock_write_space(struct sock *sk) |
20d29d7a9
|
452 |
{ |
438154823
|
453 |
wait_queue_head_t *wqueue; |
20d29d7a9
|
454 |
if (!sock_writeable(sk) || |
9cd3e072b
|
455 |
!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags)) |
20d29d7a9
|
456 |
return; |
438154823
|
457 458 |
wqueue = sk_sleep(sk); if (wqueue && waitqueue_active(wqueue)) |
a9a08845e
|
459 |
wake_up_interruptible_poll(wqueue, EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND); |
20d29d7a9
|
460 |
} |
635b8c8ec
|
461 |
static void tap_sock_destruct(struct sock *sk) |
2259fef0b
|
462 |
{ |
635b8c8ec
|
463 |
struct tap_queue *q = container_of(sk, struct tap_queue, sk); |
362899b87
|
464 |
|
5990a3051
|
465 |
ptr_ring_cleanup(&q->ring, __skb_array_destroy_skb); |
2259fef0b
|
466 |
} |
635b8c8ec
|
467 |
static int tap_open(struct inode *inode, struct file *file) |
20d29d7a9
|
468 469 |
{ struct net *net = current->nsproxy->net_ns; |
6fe3faf86
|
470 |
struct tap_dev *tap; |
635b8c8ec
|
471 |
struct tap_queue *q; |
40b8fe45d
|
472 |
int err = -ENODEV; |
20d29d7a9
|
473 |
|
40b8fe45d
|
474 |
rtnl_lock(); |
d9f1f61c0
|
475 |
tap = dev_get_by_tap_file(imajor(inode), iminor(inode)); |
6fe3faf86
|
476 |
if (!tap) |
362899b87
|
477 |
goto err; |
20d29d7a9
|
478 |
|
20d29d7a9
|
479 |
err = -ENOMEM; |
635b8c8ec
|
480 481 |
q = (struct tap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL, &tap_proto, 0); |
20d29d7a9
|
482 |
if (!q) |
362899b87
|
483 |
goto err; |
5990a3051
|
484 |
if (ptr_ring_init(&q->ring, tap->dev->tx_queue_len, GFP_KERNEL)) { |
78e0ea679
|
485 486 487 |
sk_free(&q->sk); goto err; } |
20d29d7a9
|
488 |
|
333f7909a
|
489 |
init_waitqueue_head(&q->sock.wq.wait); |
20d29d7a9
|
490 491 |
q->sock.type = SOCK_RAW; q->sock.state = SS_CONNECTED; |
501c774cb
|
492 |
q->sock.file = file; |
635b8c8ec
|
493 |
q->sock.ops = &tap_socket_ops; |
20d29d7a9
|
494 |
sock_init_data(&q->sock, &q->sk); |
635b8c8ec
|
495 496 |
q->sk.sk_write_space = tap_sock_write_space; q->sk.sk_destruct = tap_sock_destruct; |
b9fb9ee07
|
497 |
q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP; |
55afbd081
|
498 |
q->vnet_hdr_sz = sizeof(struct virtio_net_hdr); |
20d29d7a9
|
499 |
|
97bc3633b
|
500 |
/* |
635b8c8ec
|
501 |
* so far only KVM virtio_net uses tap, enable zero copy between |
97bc3633b
|
502 |
* guest kernel and host kernel when lower device supports zerocopy |
047af9cfe
|
503 504 505 |
* * The macvlan supports zerocopy iff the lower device supports zero * copy so we don't have to look at the lower device directly. |
97bc3633b
|
506 |
*/ |
6fe3faf86
|
507 |
if ((tap->dev->features & NETIF_F_HIGHDMA) && (tap->dev->features & NETIF_F_SG)) |
047af9cfe
|
508 |
sock_set_flag(&q->sk, SOCK_ZEROCOPY); |
97bc3633b
|
509 |
|
6fe3faf86
|
510 |
err = tap_set_queue(tap, file, q); |
78e0ea679
|
511 |
if (err) { |
5990a3051
|
512 |
/* tap_sock_destruct() will take care of freeing ptr_ring */ |
78e0ea679
|
513 514 |
goto err_put; } |
20d29d7a9
|
515 |
|
6fe3faf86
|
516 |
dev_put(tap->dev); |
362899b87
|
517 518 519 |
rtnl_unlock(); return err; |
78e0ea679
|
520 |
err_put: |
362899b87
|
521 522 |
sock_put(&q->sk); err: |
6fe3faf86
|
523 524 |
if (tap) dev_put(tap->dev); |
20d29d7a9
|
525 |
|
40b8fe45d
|
526 |
rtnl_unlock(); |
20d29d7a9
|
527 528 |
return err; } |
635b8c8ec
|
529 |
static int tap_release(struct inode *inode, struct file *file) |
20d29d7a9
|
530 |
{ |
635b8c8ec
|
531 532 |
struct tap_queue *q = file->private_data; tap_put_queue(q); |
20d29d7a9
|
533 534 |
return 0; } |
afc9a42b7
|
535 |
static __poll_t tap_poll(struct file *file, poll_table *wait) |
20d29d7a9
|
536 |
{ |
635b8c8ec
|
537 |
struct tap_queue *q = file->private_data; |
a9a08845e
|
538 |
__poll_t mask = EPOLLERR; |
20d29d7a9
|
539 540 541 542 543 |
if (!q) goto out; mask = 0; |
333f7909a
|
544 |
poll_wait(file, &q->sock.wq.wait, wait); |
20d29d7a9
|
545 |
|
5990a3051
|
546 |
if (!ptr_ring_empty(&q->ring)) |
a9a08845e
|
547 |
mask |= EPOLLIN | EPOLLRDNORM; |
20d29d7a9
|
548 549 |
if (sock_writeable(&q->sk) || |
9cd3e072b
|
550 |
(!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &q->sock.flags) && |
20d29d7a9
|
551 |
sock_writeable(&q->sk))) |
a9a08845e
|
552 |
mask |= EPOLLOUT | EPOLLWRNORM; |
20d29d7a9
|
553 554 |
out: |
20d29d7a9
|
555 556 |
return mask; } |
635b8c8ec
|
557 558 |
static inline struct sk_buff *tap_alloc_skb(struct sock *sk, size_t prepad, size_t len, size_t linear, |
b9fb9ee07
|
559 560 561 562 563 564 565 566 567 |
int noblock, int *err) { struct sk_buff *skb; /* Under a page? Don't bother with paged skb. */ if (prepad + len < PAGE_SIZE || !linear) linear = len; skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, |
28d642710
|
568 |
err, 0); |
b9fb9ee07
|
569 570 571 572 573 574 575 576 577 578 |
if (!skb) return NULL; skb_reserve(skb, prepad); skb_put(skb, linear); skb->data_len = len - linear; skb->len += len - linear; return skb; } |
2f1d8b9e8
|
579 |
/* Neighbour code has some assumptions on HH_DATA_MOD alignment */ |
635b8c8ec
|
580 |
#define TAP_RESERVE HH_DATA_OFF(ETH_HLEN) |
2f1d8b9e8
|
581 |
|
20d29d7a9
|
582 |
/* Get packet from user space buffer */ |
fe8dd45bb
|
583 |
static ssize_t tap_get_user(struct tap_queue *q, void *msg_control, |
635b8c8ec
|
584 |
struct iov_iter *from, int noblock) |
20d29d7a9
|
585 |
{ |
635b8c8ec
|
586 |
int good_linear = SKB_MAX_HEAD(TAP_RESERVE); |
20d29d7a9
|
587 |
struct sk_buff *skb; |
6fe3faf86
|
588 |
struct tap_dev *tap; |
f5ff53b4d
|
589 |
unsigned long total_len = iov_iter_count(from); |
97bc3633b
|
590 |
unsigned long len = total_len; |
20d29d7a9
|
591 |
int err; |
b9fb9ee07
|
592 593 |
struct virtio_net_hdr vnet_hdr = { 0 }; int vnet_hdr_len = 0; |
b92946e29
|
594 |
int copylen = 0; |
c5c62f1bb
|
595 |
int depth; |
97bc3633b
|
596 |
bool zerocopy = false; |
61d46bf97
|
597 |
size_t linear; |
b9fb9ee07
|
598 599 |
if (q->flags & IFF_VNET_HDR) { |
837585a53
|
600 |
vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz); |
b9fb9ee07
|
601 602 |
err = -EINVAL; |
ce3c86928
|
603 |
if (len < vnet_hdr_len) |
b9fb9ee07
|
604 |
goto err; |
ce3c86928
|
605 |
len -= vnet_hdr_len; |
b9fb9ee07
|
606 |
|
f5ff53b4d
|
607 |
err = -EFAULT; |
cbbd26b8b
|
608 |
if (!copy_from_iter_full(&vnet_hdr, sizeof(vnet_hdr), from)) |
b9fb9ee07
|
609 |
goto err; |
f5ff53b4d
|
610 |
iov_iter_advance(from, vnet_hdr_len - sizeof(vnet_hdr)); |
b9fb9ee07
|
611 |
if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && |
635b8c8ec
|
612 613 614 615 616 617 |
tap16_to_cpu(q, vnet_hdr.csum_start) + tap16_to_cpu(q, vnet_hdr.csum_offset) + 2 > tap16_to_cpu(q, vnet_hdr.hdr_len)) vnet_hdr.hdr_len = cpu_to_tap16(q, tap16_to_cpu(q, vnet_hdr.csum_start) + tap16_to_cpu(q, vnet_hdr.csum_offset) + 2); |
b9fb9ee07
|
618 |
err = -EINVAL; |
635b8c8ec
|
619 |
if (tap16_to_cpu(q, vnet_hdr.hdr_len) > len) |
b9fb9ee07
|
620 621 |
goto err; } |
20d29d7a9
|
622 |
|
b9fb9ee07
|
623 |
err = -EINVAL; |
20d29d7a9
|
624 |
if (unlikely(len < ETH_HLEN)) |
b9fb9ee07
|
625 |
goto err; |
20d29d7a9
|
626 |
|
fe8dd45bb
|
627 |
if (msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) { |
f5ff53b4d
|
628 |
struct iov_iter i; |
6ae7feb31
|
629 |
copylen = vnet_hdr.hdr_len ? |
635b8c8ec
|
630 |
tap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN; |
16a3fa286
|
631 632 |
if (copylen > good_linear) copylen = good_linear; |
8e2ad4113
|
633 634 |
else if (copylen < ETH_HLEN) copylen = ETH_HLEN; |
61d46bf97
|
635 |
linear = copylen; |
f5ff53b4d
|
636 637 638 |
i = *from; iov_iter_advance(&i, copylen); if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS) |
ece793fcf
|
639 640 641 642 |
zerocopy = true; } if (!zerocopy) { |
97bc3633b
|
643 |
copylen = len; |
635b8c8ec
|
644 |
linear = tap16_to_cpu(q, vnet_hdr.hdr_len); |
8e2ad4113
|
645 |
if (linear > good_linear) |
16a3fa286
|
646 |
linear = good_linear; |
8e2ad4113
|
647 648 |
else if (linear < ETH_HLEN) linear = ETH_HLEN; |
61d46bf97
|
649 |
} |
97bc3633b
|
650 |
|
635b8c8ec
|
651 652 |
skb = tap_alloc_skb(&q->sk, TAP_RESERVE, copylen, linear, noblock, &err); |
02df55d28
|
653 654 |
if (!skb) goto err; |
20d29d7a9
|
655 |
|
01d6657b3
|
656 |
if (zerocopy) |
f5ff53b4d
|
657 |
err = zerocopy_sg_from_iter(skb, from); |
aa196eed3
|
658 |
else |
f5ff53b4d
|
659 |
err = skb_copy_datagram_from_iter(skb, 0, from, len); |
ece793fcf
|
660 |
|
02df55d28
|
661 |
if (err) |
b9fb9ee07
|
662 |
goto err_kfree; |
20d29d7a9
|
663 664 |
skb_set_network_header(skb, ETH_HLEN); |
b9fb9ee07
|
665 666 667 668 |
skb_reset_mac_header(skb); skb->protocol = eth_hdr(skb)->h_proto; if (vnet_hdr_len) { |
fd88d68b3
|
669 |
err = virtio_net_hdr_to_skb(skb, &vnet_hdr, |
635b8c8ec
|
670 |
tap_is_little_endian(q)); |
b9fb9ee07
|
671 672 673 |
if (err) goto err_kfree; } |
d2aa125d6
|
674 |
skb_probe_transport_header(skb); |
9b4d669bc
|
675 |
|
c5c62f1bb
|
676 677 678 679 680 |
/* Move network header to the right position for VLAN tagged packets */ if ((skb->protocol == htons(ETH_P_8021Q) || skb->protocol == htons(ETH_P_8021AD)) && __vlan_get_protocol(skb, skb->protocol, &depth) != 0) skb_set_network_header(skb, depth); |
ac4e4af1e
|
681 |
rcu_read_lock(); |
6fe3faf86
|
682 |
tap = rcu_dereference(q->tap); |
97bc3633b
|
683 |
/* copy skb_ubuf_info for callback when skb has no error */ |
01d6657b3
|
684 |
if (zerocopy) { |
fe8dd45bb
|
685 |
skb_shinfo(skb)->destructor_arg = msg_control; |
01d6657b3
|
686 |
skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; |
c9af6db4c
|
687 |
skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; |
fe8dd45bb
|
688 689 |
} else if (msg_control) { struct ubuf_info *uarg = msg_control; |
aa196eed3
|
690 |
uarg->callback(uarg, false); |
01d6657b3
|
691 |
} |
aa196eed3
|
692 |
|
6fe3faf86
|
693 694 |
if (tap) { skb->dev = tap->dev; |
6acf54f1c
|
695 |
dev_queue_xmit(skb); |
29d791969
|
696 |
} else { |
02df55d28
|
697 |
kfree_skb(skb); |
29d791969
|
698 |
} |
ac4e4af1e
|
699 |
rcu_read_unlock(); |
20d29d7a9
|
700 |
|
97bc3633b
|
701 |
return total_len; |
02df55d28
|
702 |
|
b9fb9ee07
|
703 704 |
err_kfree: kfree_skb(skb); |
02df55d28
|
705 |
err: |
ac4e4af1e
|
706 |
rcu_read_lock(); |
6fe3faf86
|
707 708 709 |
tap = rcu_dereference(q->tap); if (tap && tap->count_tx_dropped) tap->count_tx_dropped(tap); |
ac4e4af1e
|
710 |
rcu_read_unlock(); |
02df55d28
|
711 |
|
02df55d28
|
712 |
return err; |
20d29d7a9
|
713 |
} |
635b8c8ec
|
714 |
static ssize_t tap_write_iter(struct kiocb *iocb, struct iov_iter *from) |
20d29d7a9
|
715 716 |
{ struct file *file = iocb->ki_filp; |
635b8c8ec
|
717 |
struct tap_queue *q = file->private_data; |
20d29d7a9
|
718 |
|
635b8c8ec
|
719 |
return tap_get_user(q, NULL, from, file->f_flags & O_NONBLOCK); |
20d29d7a9
|
720 721 722 |
} /* Put packet to the user space buffer */ |
635b8c8ec
|
723 724 725 |
static ssize_t tap_put_user(struct tap_queue *q, const struct sk_buff *skb, struct iov_iter *iter) |
20d29d7a9
|
726 |
{ |
20d29d7a9
|
727 |
int ret; |
b9fb9ee07
|
728 |
int vnet_hdr_len = 0; |
f09e2249c
|
729 |
int vlan_offset = 0; |
6c36d2e26
|
730 |
int total; |
b9fb9ee07
|
731 732 |
if (q->flags & IFF_VNET_HDR) { |
fd3a88625
|
733 |
int vlan_hlen = skb_vlan_tag_present(skb) ? VLAN_HLEN : 0; |
b9fb9ee07
|
734 |
struct virtio_net_hdr vnet_hdr; |
fd3a88625
|
735 |
|
837585a53
|
736 |
vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz); |
6c36d2e26
|
737 |
if (iov_iter_count(iter) < vnet_hdr_len) |
b9fb9ee07
|
738 |
return -EINVAL; |
3e9e40e74
|
739 |
if (virtio_net_hdr_from_skb(skb, &vnet_hdr, |
fd3a88625
|
740 741 |
tap_is_little_endian(q), true, vlan_hlen)) |
fd88d68b3
|
742 |
BUG(); |
b9fb9ee07
|
743 |
|
6c36d2e26
|
744 745 |
if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) != sizeof(vnet_hdr)) |
b9fb9ee07
|
746 |
return -EFAULT; |
7cc76f515
|
747 748 |
iov_iter_advance(iter, vnet_hdr_len - sizeof(vnet_hdr)); |
b9fb9ee07
|
749 |
} |
6c36d2e26
|
750 |
total = vnet_hdr_len; |
ce232ce01
|
751 |
total += skb->len; |
f09e2249c
|
752 |
|
df8a39def
|
753 |
if (skb_vlan_tag_present(skb)) { |
f09e2249c
|
754 755 756 757 |
struct { __be16 h_vlan_proto; __be16 h_vlan_TCI; } veth; |
0fbe0d47b
|
758 |
veth.h_vlan_proto = skb->vlan_proto; |
df8a39def
|
759 |
veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb)); |
f09e2249c
|
760 761 |
vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); |
ce232ce01
|
762 |
total += VLAN_HLEN; |
f09e2249c
|
763 |
|
6c36d2e26
|
764 765 |
ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset); if (ret || !iov_iter_count(iter)) |
f09e2249c
|
766 |
goto done; |
6c36d2e26
|
767 768 |
ret = copy_to_iter(&veth, sizeof(veth), iter); if (ret != sizeof(veth) || !iov_iter_count(iter)) |
f09e2249c
|
769 770 |
goto done; } |
20d29d7a9
|
771 |
|
6c36d2e26
|
772 773 |
ret = skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset); |
20d29d7a9
|
774 |
|
f09e2249c
|
775 |
done: |
ce232ce01
|
776 |
return ret ? ret : total; |
20d29d7a9
|
777 |
} |
635b8c8ec
|
778 779 |
static ssize_t tap_do_read(struct tap_queue *q, struct iov_iter *to, |
3b4ba04ac
|
780 |
int noblock, struct sk_buff *skb) |
20d29d7a9
|
781 |
{ |
ccf7e72b5
|
782 |
DEFINE_WAIT(wait); |
501c774cb
|
783 |
ssize_t ret = 0; |
20d29d7a9
|
784 |
|
61d785378
|
785 |
if (!iov_iter_count(to)) { |
144a6adfa
|
786 |
kfree_skb(skb); |
3af0bfe58
|
787 |
return 0; |
61d785378
|
788 |
} |
3af0bfe58
|
789 |
|
3b4ba04ac
|
790 791 |
if (skb) goto put; |
3af0bfe58
|
792 |
while (1) { |
89cee917d
|
793 794 795 |
if (!noblock) prepare_to_wait(sk_sleep(&q->sk), &wait, TASK_INTERRUPTIBLE); |
20d29d7a9
|
796 797 |
/* Read frames from the queue */ |
5990a3051
|
798 |
skb = ptr_ring_consume(&q->ring); |
3af0bfe58
|
799 800 801 802 803 |
if (skb) break; if (noblock) { ret = -EAGAIN; break; |
20d29d7a9
|
804 |
} |
3af0bfe58
|
805 806 807 808 809 810 811 |
if (signal_pending(current)) { ret = -ERESTARTSYS; break; } /* Nothing to read, let's sleep */ schedule(); } |
a499a2e9d
|
812 813 |
if (!noblock) finish_wait(sk_sleep(&q->sk), &wait); |
3b4ba04ac
|
814 |
put: |
3af0bfe58
|
815 |
if (skb) { |
635b8c8ec
|
816 |
ret = tap_put_user(q, skb, to); |
f51a5e82e
|
817 818 819 820 |
if (unlikely(ret < 0)) kfree_skb(skb); else consume_skb(skb); |
20d29d7a9
|
821 |
} |
501c774cb
|
822 823 |
return ret; } |
635b8c8ec
|
824 |
static ssize_t tap_read_iter(struct kiocb *iocb, struct iov_iter *to) |
501c774cb
|
825 826 |
{ struct file *file = iocb->ki_filp; |
635b8c8ec
|
827 |
struct tap_queue *q = file->private_data; |
3af0bfe58
|
828 |
ssize_t len = iov_iter_count(to), ret; |
20d29d7a9
|
829 |
|
3b4ba04ac
|
830 |
ret = tap_do_read(q, to, file->f_flags & O_NONBLOCK, NULL); |
ce232ce01
|
831 |
ret = min_t(ssize_t, ret, len); |
e6ebc7f16
|
832 833 |
if (ret > 0) iocb->ki_pos = ret; |
20d29d7a9
|
834 835 |
return ret; } |
6fe3faf86
|
836 |
static struct tap_dev *tap_get_tap_dev(struct tap_queue *q) |
8f475a318
|
837 |
{ |
6fe3faf86
|
838 |
struct tap_dev *tap; |
8f475a318
|
839 |
|
441ac0fca
|
840 |
ASSERT_RTNL(); |
6fe3faf86
|
841 842 843 |
tap = rtnl_dereference(q->tap); if (tap) dev_hold(tap->dev); |
8f475a318
|
844 |
|
6fe3faf86
|
845 |
return tap; |
8f475a318
|
846 |
} |
6fe3faf86
|
847 |
static void tap_put_tap_dev(struct tap_dev *tap) |
8f475a318
|
848 |
{ |
6fe3faf86
|
849 |
dev_put(tap->dev); |
8f475a318
|
850 |
} |
635b8c8ec
|
851 |
static int tap_ioctl_set_queue(struct file *file, unsigned int flags) |
815f236d6
|
852 |
{ |
635b8c8ec
|
853 |
struct tap_queue *q = file->private_data; |
6fe3faf86
|
854 |
struct tap_dev *tap; |
815f236d6
|
855 |
int ret; |
6fe3faf86
|
856 857 |
tap = tap_get_tap_dev(q); if (!tap) |
815f236d6
|
858 859 860 |
return -EINVAL; if (flags & IFF_ATTACH_QUEUE) |
6fe3faf86
|
861 |
ret = tap_enable_queue(tap, file, q); |
815f236d6
|
862 |
else if (flags & IFF_DETACH_QUEUE) |
635b8c8ec
|
863 |
ret = tap_disable_queue(q); |
f57855a54
|
864 865 |
else ret = -EINVAL; |
815f236d6
|
866 |
|
6fe3faf86
|
867 |
tap_put_tap_dev(tap); |
815f236d6
|
868 869 |
return ret; } |
635b8c8ec
|
870 |
static int set_offload(struct tap_queue *q, unsigned long arg) |
2be5c7679
|
871 |
{ |
6fe3faf86
|
872 |
struct tap_dev *tap; |
2be5c7679
|
873 874 |
netdev_features_t features; netdev_features_t feature_mask = 0; |
6fe3faf86
|
875 876 |
tap = rtnl_dereference(q->tap); if (!tap) |
2be5c7679
|
877 |
return -ENOLINK; |
6fe3faf86
|
878 |
features = tap->dev->features; |
2be5c7679
|
879 880 881 882 883 884 885 886 887 888 889 890 |
if (arg & TUN_F_CSUM) { feature_mask = NETIF_F_HW_CSUM; if (arg & (TUN_F_TSO4 | TUN_F_TSO6)) { if (arg & TUN_F_TSO_ECN) feature_mask |= NETIF_F_TSO_ECN; if (arg & TUN_F_TSO4) feature_mask |= NETIF_F_TSO; if (arg & TUN_F_TSO6) feature_mask |= NETIF_F_TSO6; } |
2be5c7679
|
891 892 893 894 895 896 |
} /* tun/tap driver inverts the usage for TSO offloads, where * setting the TSO bit means that the userspace wants to * accept TSO frames and turning it off means that user space * does not support TSO. |
635b8c8ec
|
897 |
* For tap, we have to invert it to mean the same thing. |
2be5c7679
|
898 899 900 |
* When user space turns off TSO, we turn off GSO/LRO so that * user-space will not receive TSO frames. */ |
d591a1f3a
|
901 |
if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6)) |
2be5c7679
|
902 903 904 905 906 907 908 |
features |= RX_OFFLOADS; else features &= ~RX_OFFLOADS; /* tap_features are the same as features on tun/tap and * reflect user expectations. */ |
6fe3faf86
|
909 910 911 |
tap->tap_features = feature_mask; if (tap->update_features) tap->update_features(tap, features); |
2be5c7679
|
912 913 914 |
return 0; } |
20d29d7a9
|
915 916 917 |
/* * provide compatibility with generic tun/tap interface */ |
635b8c8ec
|
918 919 |
static long tap_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
20d29d7a9
|
920 |
{ |
635b8c8ec
|
921 |
struct tap_queue *q = file->private_data; |
6fe3faf86
|
922 |
struct tap_dev *tap; |
20d29d7a9
|
923 924 925 |
void __user *argp = (void __user *)arg; struct ifreq __user *ifr = argp; unsigned int __user *up = argp; |
39ec7de70
|
926 |
unsigned short u; |
55afbd081
|
927 |
int __user *sp = argp; |
7f460d30c
|
928 |
struct sockaddr sa; |
55afbd081
|
929 |
int s; |
02df55d28
|
930 |
int ret; |
20d29d7a9
|
931 932 933 934 935 936 |
switch (cmd) { case TUNSETIFF: /* ignore the name, just look at flags */ if (get_user(u, &ifr->ifr_flags)) return -EFAULT; |
b9fb9ee07
|
937 938 |
ret = 0; |
635b8c8ec
|
939 |
if ((u & ~TAP_IFFEATURES) != (IFF_NO_PI | IFF_TAP)) |
b9fb9ee07
|
940 941 |
ret = -EINVAL; else |
635b8c8ec
|
942 |
q->flags = (q->flags & ~TAP_IFFEATURES) | u; |
b9fb9ee07
|
943 944 |
return ret; |
20d29d7a9
|
945 946 |
case TUNGETIFF: |
441ac0fca
|
947 |
rtnl_lock(); |
6fe3faf86
|
948 949 |
tap = tap_get_tap_dev(q); if (!tap) { |
441ac0fca
|
950 |
rtnl_unlock(); |
20d29d7a9
|
951 |
return -ENOLINK; |
441ac0fca
|
952 |
} |
20d29d7a9
|
953 |
|
02df55d28
|
954 |
ret = 0; |
39ec7de70
|
955 |
u = q->flags; |
6fe3faf86
|
956 |
if (copy_to_user(&ifr->ifr_name, tap->dev->name, IFNAMSIZ) || |
39ec7de70
|
957 |
put_user(u, &ifr->ifr_flags)) |
02df55d28
|
958 |
ret = -EFAULT; |
6fe3faf86
|
959 |
tap_put_tap_dev(tap); |
441ac0fca
|
960 |
rtnl_unlock(); |
02df55d28
|
961 |
return ret; |
20d29d7a9
|
962 |
|
815f236d6
|
963 964 965 |
case TUNSETQUEUE: if (get_user(u, &ifr->ifr_flags)) return -EFAULT; |
441ac0fca
|
966 |
rtnl_lock(); |
635b8c8ec
|
967 |
ret = tap_ioctl_set_queue(file, u); |
441ac0fca
|
968 |
rtnl_unlock(); |
82a19eb8c
|
969 |
return ret; |
815f236d6
|
970 |
|
20d29d7a9
|
971 |
case TUNGETFEATURES: |
635b8c8ec
|
972 |
if (put_user(IFF_TAP | IFF_NO_PI | TAP_IFFEATURES, up)) |
20d29d7a9
|
973 974 975 976 |
return -EFAULT; return 0; case TUNSETSNDBUF: |
3ea79249e
|
977 |
if (get_user(s, sp)) |
20d29d7a9
|
978 |
return -EFAULT; |
93161922c
|
979 980 |
if (s <= 0) return -EINVAL; |
20d29d7a9
|
981 |
|
3ea79249e
|
982 |
q->sk.sk_sndbuf = s; |
20d29d7a9
|
983 |
return 0; |
55afbd081
|
984 985 986 987 988 989 990 991 992 993 994 995 996 997 |
case TUNGETVNETHDRSZ: s = q->vnet_hdr_sz; if (put_user(s, sp)) return -EFAULT; return 0; case TUNSETVNETHDRSZ: if (get_user(s, sp)) return -EFAULT; if (s < (int)sizeof(struct virtio_net_hdr)) return -EINVAL; q->vnet_hdr_sz = s; return 0; |
01b07fb35
|
998 |
case TUNGETVNETLE: |
635b8c8ec
|
999 |
s = !!(q->flags & TAP_VNET_LE); |
01b07fb35
|
1000 1001 1002 1003 1004 1005 1006 1007 |
if (put_user(s, sp)) return -EFAULT; return 0; case TUNSETVNETLE: if (get_user(s, sp)) return -EFAULT; if (s) |
635b8c8ec
|
1008 |
q->flags |= TAP_VNET_LE; |
01b07fb35
|
1009 |
else |
635b8c8ec
|
1010 |
q->flags &= ~TAP_VNET_LE; |
01b07fb35
|
1011 |
return 0; |
8b8e658b1
|
1012 |
case TUNGETVNETBE: |
635b8c8ec
|
1013 |
return tap_get_vnet_be(q, sp); |
8b8e658b1
|
1014 1015 |
case TUNSETVNETBE: |
635b8c8ec
|
1016 |
return tap_set_vnet_be(q, sp); |
8b8e658b1
|
1017 |
|
20d29d7a9
|
1018 1019 1020 |
case TUNSETOFFLOAD: /* let the user check for future flags */ if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | |
0c19f846d
|
1021 |
TUN_F_TSO_ECN | TUN_F_UFO)) |
20d29d7a9
|
1022 |
return -EINVAL; |
2be5c7679
|
1023 1024 1025 1026 |
rtnl_lock(); ret = set_offload(q, arg); rtnl_unlock(); return ret; |
20d29d7a9
|
1027 |
|
b50820833
|
1028 1029 |
case SIOCGIFHWADDR: rtnl_lock(); |
6fe3faf86
|
1030 1031 |
tap = tap_get_tap_dev(q); if (!tap) { |
b50820833
|
1032 1033 1034 1035 |
rtnl_unlock(); return -ENOLINK; } ret = 0; |
6fe3faf86
|
1036 1037 1038 |
u = tap->dev->type; if (copy_to_user(&ifr->ifr_name, tap->dev->name, IFNAMSIZ) || copy_to_user(&ifr->ifr_hwaddr.sa_data, tap->dev->dev_addr, ETH_ALEN) || |
b50820833
|
1039 1040 |
put_user(u, &ifr->ifr_hwaddr.sa_family)) ret = -EFAULT; |
6fe3faf86
|
1041 |
tap_put_tap_dev(tap); |
b50820833
|
1042 1043 1044 1045 |
rtnl_unlock(); return ret; case SIOCSIFHWADDR: |
7f460d30c
|
1046 1047 |
if (copy_from_user(&sa, &ifr->ifr_hwaddr, sizeof(sa))) return -EFAULT; |
b50820833
|
1048 |
rtnl_lock(); |
6fe3faf86
|
1049 1050 |
tap = tap_get_tap_dev(q); if (!tap) { |
b50820833
|
1051 1052 1053 |
rtnl_unlock(); return -ENOLINK; } |
3a37a9636
|
1054 |
ret = dev_set_mac_address(tap->dev, &sa, NULL); |
6fe3faf86
|
1055 |
tap_put_tap_dev(tap); |
b50820833
|
1056 1057 |
rtnl_unlock(); return ret; |
20d29d7a9
|
1058 1059 1060 1061 1062 1063 |
default: return -EINVAL; } } #ifdef CONFIG_COMPAT |
635b8c8ec
|
1064 1065 |
static long tap_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
20d29d7a9
|
1066 |
{ |
635b8c8ec
|
1067 |
return tap_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); |
20d29d7a9
|
1068 1069 |
} #endif |
d17eb73bb
|
1070 |
static const struct file_operations tap_fops = { |
20d29d7a9
|
1071 |
.owner = THIS_MODULE, |
635b8c8ec
|
1072 1073 1074 1075 1076 |
.open = tap_open, .release = tap_release, .read_iter = tap_read_iter, .write_iter = tap_write_iter, .poll = tap_poll, |
20d29d7a9
|
1077 |
.llseek = no_llseek, |
635b8c8ec
|
1078 |
.unlocked_ioctl = tap_ioctl, |
20d29d7a9
|
1079 |
#ifdef CONFIG_COMPAT |
635b8c8ec
|
1080 |
.compat_ioctl = tap_compat_ioctl, |
20d29d7a9
|
1081 1082 |
#endif }; |
0efac2779
|
1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 |
static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp) { struct tun_xdp_hdr *hdr = xdp->data_hard_start; struct virtio_net_hdr *gso = &hdr->gso; int buflen = hdr->buflen; int vnet_hdr_len = 0; struct tap_dev *tap; struct sk_buff *skb; int err, depth; if (q->flags & IFF_VNET_HDR) vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz); skb = build_skb(xdp->data_hard_start, buflen); if (!skb) { err = -ENOMEM; goto err; } skb_reserve(skb, xdp->data - xdp->data_hard_start); skb_put(skb, xdp->data_end - xdp->data); skb_set_network_header(skb, ETH_HLEN); skb_reset_mac_header(skb); skb->protocol = eth_hdr(skb)->h_proto; if (vnet_hdr_len) { err = virtio_net_hdr_to_skb(skb, gso, tap_is_little_endian(q)); if (err) goto err_kfree; } |
0efac2779
|
1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 |
/* Move network header to the right position for VLAN tagged packets */ if ((skb->protocol == htons(ETH_P_8021Q) || skb->protocol == htons(ETH_P_8021AD)) && __vlan_get_protocol(skb, skb->protocol, &depth) != 0) skb_set_network_header(skb, depth); rcu_read_lock(); tap = rcu_dereference(q->tap); if (tap) { skb->dev = tap->dev; |
d2aa125d6
|
1124 |
skb_probe_transport_header(skb); |
0efac2779
|
1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 |
dev_queue_xmit(skb); } else { kfree_skb(skb); } rcu_read_unlock(); return 0; err_kfree: kfree_skb(skb); err: rcu_read_lock(); |
faeacb6dd
|
1137 |
tap = rcu_dereference(q->tap); |
0efac2779
|
1138 1139 1140 1141 1142 |
if (tap && tap->count_tx_dropped) tap->count_tx_dropped(tap); rcu_read_unlock(); return err; } |
635b8c8ec
|
1143 1144 |
static int tap_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) |
501c774cb
|
1145 |
{ |
635b8c8ec
|
1146 |
struct tap_queue *q = container_of(sock, struct tap_queue, sock); |
fe8dd45bb
|
1147 |
struct tun_msg_ctl *ctl = m->msg_control; |
0efac2779
|
1148 1149 |
struct xdp_buff *xdp; int i; |
fe8dd45bb
|
1150 |
|
0efac2779
|
1151 1152 1153 1154 1155 1156 1157 |
if (ctl && (ctl->type == TUN_MSG_PTR)) { for (i = 0; i < ctl->num; i++) { xdp = &((struct xdp_buff *)ctl->ptr)[i]; tap_get_user_xdp(q, xdp); } return 0; } |
fe8dd45bb
|
1158 1159 1160 |
return tap_get_user(q, ctl ? ctl->ptr : NULL, &m->msg_iter, m->msg_flags & MSG_DONTWAIT); |
501c774cb
|
1161 |
} |
635b8c8ec
|
1162 1163 |
static int tap_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len, int flags) |
501c774cb
|
1164 |
{ |
635b8c8ec
|
1165 |
struct tap_queue *q = container_of(sock, struct tap_queue, sock); |
61d785378
|
1166 |
struct sk_buff *skb = m->msg_control; |
501c774cb
|
1167 |
int ret; |
61d785378
|
1168 |
if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) { |
144a6adfa
|
1169 |
kfree_skb(skb); |
501c774cb
|
1170 |
return -EINVAL; |
61d785378
|
1171 1172 |
} ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT, skb); |
de2aa4760
|
1173 1174 1175 1176 |
if (ret > total_len) { m->msg_flags |= MSG_TRUNC; ret = flags & MSG_TRUNC ? ret : total_len; } |
501c774cb
|
1177 1178 |
return ret; } |
635b8c8ec
|
1179 |
static int tap_peek_len(struct socket *sock) |
362899b87
|
1180 |
{ |
635b8c8ec
|
1181 |
struct tap_queue *q = container_of(sock, struct tap_queue, |
362899b87
|
1182 |
sock); |
5990a3051
|
1183 |
return PTR_RING_PEEK_CALL(&q->ring, __skb_array_len_with_tag); |
362899b87
|
1184 |
} |
501c774cb
|
1185 |
/* Ops structure to mimic raw sockets with tun */ |
635b8c8ec
|
1186 1187 1188 1189 |
static const struct proto_ops tap_socket_ops = { .sendmsg = tap_sendmsg, .recvmsg = tap_recvmsg, .peek_len = tap_peek_len, |
501c774cb
|
1190 1191 1192 1193 1194 1195 |
}; /* Get an underlying socket object from tun file. Returns error unless file is * attached to a device. The returned object works like a packet socket, it * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for * holding a reference to the file for as long as the socket is in use. */ |
635b8c8ec
|
1196 |
struct socket *tap_get_socket(struct file *file) |
501c774cb
|
1197 |
{ |
635b8c8ec
|
1198 1199 |
struct tap_queue *q; if (file->f_op != &tap_fops) |
501c774cb
|
1200 1201 1202 1203 1204 1205 |
return ERR_PTR(-EINVAL); q = file->private_data; if (!q) return ERR_PTR(-EBADFD); return &q->sock; } |
635b8c8ec
|
1206 |
EXPORT_SYMBOL_GPL(tap_get_socket); |
501c774cb
|
1207 |
|
5990a3051
|
1208 |
struct ptr_ring *tap_get_ptr_ring(struct file *file) |
49f96fd0c
|
1209 1210 1211 1212 1213 1214 1215 1216 |
{ struct tap_queue *q; if (file->f_op != &tap_fops) return ERR_PTR(-EINVAL); q = file->private_data; if (!q) return ERR_PTR(-EBADFD); |
5990a3051
|
1217 |
return &q->ring; |
49f96fd0c
|
1218 |
} |
5990a3051
|
1219 |
EXPORT_SYMBOL_GPL(tap_get_ptr_ring); |
49f96fd0c
|
1220 |
|
6fe3faf86
|
1221 |
int tap_queue_resize(struct tap_dev *tap) |
362899b87
|
1222 |
{ |
6fe3faf86
|
1223 |
struct net_device *dev = tap->dev; |
635b8c8ec
|
1224 |
struct tap_queue *q; |
5990a3051
|
1225 |
struct ptr_ring **rings; |
6fe3faf86
|
1226 |
int n = tap->numqueues; |
362899b87
|
1227 |
int ret, i = 0; |
5990a3051
|
1228 1229 |
rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL); if (!rings) |
362899b87
|
1230 |
return -ENOMEM; |
6fe3faf86
|
1231 |
list_for_each_entry(q, &tap->queue_list, next) |
5990a3051
|
1232 |
rings[i++] = &q->ring; |
362899b87
|
1233 |
|
5990a3051
|
1234 1235 1236 |
ret = ptr_ring_resize_multiple(rings, n, dev->tx_queue_len, GFP_KERNEL, __skb_array_destroy_skb); |
362899b87
|
1237 |
|
5990a3051
|
1238 |
kfree(rings); |
362899b87
|
1239 1240 |
return ret; } |
9a393b5d5
|
1241 |
EXPORT_SYMBOL_GPL(tap_queue_resize); |
ebc05ba7e
|
1242 |
|
d9f1f61c0
|
1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 |
static int tap_list_add(dev_t major, const char *device_name) { struct major_info *tap_major; tap_major = kzalloc(sizeof(*tap_major), GFP_ATOMIC); if (!tap_major) return -ENOMEM; tap_major->major = MAJOR(major); idr_init(&tap_major->minor_idr); |
ffa423fb3
|
1254 |
spin_lock_init(&tap_major->minor_lock); |
d9f1f61c0
|
1255 1256 1257 1258 1259 1260 |
tap_major->device_name = device_name; list_add_tail_rcu(&tap_major->next, &major_list); return 0; } |
dea6e19f4
|
1261 1262 |
int tap_create_cdev(struct cdev *tap_cdev, dev_t *tap_major, const char *device_name, struct module *module) |
ebc05ba7e
|
1263 1264 1265 1266 1267 1268 1269 1270 |
{ int err; err = alloc_chrdev_region(tap_major, 0, TAP_NUM_DEVS, device_name); if (err) goto out1; cdev_init(tap_cdev, &tap_fops); |
dea6e19f4
|
1271 |
tap_cdev->owner = module; |
ebc05ba7e
|
1272 1273 1274 |
err = cdev_add(tap_cdev, *tap_major, TAP_NUM_DEVS); if (err) goto out2; |
d9f1f61c0
|
1275 1276 1277 |
err = tap_list_add(*tap_major, device_name); if (err) goto out3; |
ebc05ba7e
|
1278 1279 |
return 0; |
d9f1f61c0
|
1280 1281 |
out3: cdev_del(tap_cdev); |
ebc05ba7e
|
1282 1283 1284 1285 1286 |
out2: unregister_chrdev_region(*tap_major, TAP_NUM_DEVS); out1: return err; } |
9a393b5d5
|
1287 |
EXPORT_SYMBOL_GPL(tap_create_cdev); |
ebc05ba7e
|
1288 1289 1290 |
void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev) { |
d9f1f61c0
|
1291 |
struct major_info *tap_major, *tmp; |
ebc05ba7e
|
1292 1293 |
cdev_del(tap_cdev); unregister_chrdev_region(major, TAP_NUM_DEVS); |
d9f1f61c0
|
1294 1295 1296 1297 1298 1299 1300 |
list_for_each_entry_safe(tap_major, tmp, &major_list, next) { if (tap_major->major == MAJOR(major)) { idr_destroy(&tap_major->minor_idr); list_del_rcu(&tap_major->next); kfree_rcu(tap_major, rcu); } } |
ebc05ba7e
|
1301 |
} |
9a393b5d5
|
1302 1303 1304 1305 1306 |
EXPORT_SYMBOL_GPL(tap_destroy_cdev); MODULE_AUTHOR("Arnd Bergmann <arnd@arndb.de>"); MODULE_AUTHOR("Sainath Grandhi <sainath.grandhi@intel.com>"); MODULE_LICENSE("GPL"); |