Blame view
drivers/net/virtio_net.c
31.6 KB
48925e372 virtio_net: avoid... |
1 |
/* A network driver using virtio. |
296f96fcf Net driver using ... |
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 |
* * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ //#define DEBUG #include <linux/netdevice.h> #include <linux/etherdevice.h> |
a9ea3fc6f virtio net: Add e... |
22 |
#include <linux/ethtool.h> |
296f96fcf Net driver using ... |
23 24 25 26 |
#include <linux/module.h> #include <linux/virtio.h> #include <linux/virtio_net.h> #include <linux/scatterlist.h> |
e918085aa virtio_net: Fix M... |
27 |
#include <linux/if_vlan.h> |
5a0e3ad6a include cleanup: ... |
28 |
#include <linux/slab.h> |
296f96fcf Net driver using ... |
29 |
|
6c0cd7c00 virtio_net: param... |
30 31 |
static int napi_weight = 128; module_param(napi_weight, int, 0444); |
eb9399220 module_param: mak... |
32 |
static bool csum = true, gso = true; |
34a48579e virtio: Tweak vir... |
33 34 |
module_param(csum, bool, 0444); module_param(gso, bool, 0444); |
296f96fcf Net driver using ... |
35 |
/* FIXME: MTU in config. */ |
e918085aa virtio_net: Fix M... |
36 |
#define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) |
3f2c31d90 virtio_net: VIRTI... |
37 |
#define GOOD_COPY_LEN 128 |
296f96fcf Net driver using ... |
38 |
|
f565a7c25 virtio_net: Add a... |
39 |
#define VIRTNET_SEND_COMMAND_SG_MAX 2 |
66846048f enable virtio_net... |
40 |
#define VIRTNET_DRIVER_VERSION "1.0.0" |
2a41f71d3 virtio_net: Add a... |
41 |
|
3fa2a1df9 virtio-net: per c... |
42 43 44 45 46 47 48 49 |
struct virtnet_stats { struct u64_stats_sync syncp; u64 tx_bytes; u64 tx_packets; u64 rx_bytes; u64 rx_packets; }; |
5e01d2f91 virtio-net: move ... |
50 |
struct virtnet_info { |
296f96fcf Net driver using ... |
51 |
struct virtio_device *vdev; |
2a41f71d3 virtio_net: Add a... |
52 |
struct virtqueue *rvq, *svq, *cvq; |
296f96fcf Net driver using ... |
53 54 |
struct net_device *dev; struct napi_struct napi; |
9f4d26d0f virtio_net: add l... |
55 |
unsigned int status; |
296f96fcf Net driver using ... |
56 57 58 |
/* Number of input buffers, and max we've ever had. */ unsigned int num, max; |
97402b96f virtio net: Allow... |
59 60 |
/* I like... big packets and I cannot lie! */ bool big_packets; |
3f2c31d90 virtio_net: VIRTI... |
61 62 |
/* Host will merge rx buffers for big packets (shake it! shake it!) */ bool mergeable_rx_bufs; |
3fa2a1df9 virtio-net: per c... |
63 64 |
/* Active statistics */ struct virtnet_stats __percpu *stats; |
3161e453e virtio: net refil... |
65 66 |
/* Work struct for refilling if we run low on memory. */ struct delayed_work refill; |
fb6813f48 virtio: Recycle u... |
67 68 |
/* Chain pages by the private ptr. */ struct page *pages; |
5e01d2f91 virtio-net: move ... |
69 70 71 72 |
/* fragments + linear part + virtio header */ struct scatterlist rx_sg[MAX_SKB_FRAGS + 2]; struct scatterlist tx_sg[MAX_SKB_FRAGS + 2]; |
296f96fcf Net driver using ... |
73 |
}; |
b3f24698a virtio_net: forma... |
74 75 76 77 78 |
struct skb_vnet_hdr { union { struct virtio_net_hdr hdr; struct virtio_net_hdr_mrg_rxbuf mhdr; }; |
48925e372 virtio_net: avoid... |
79 |
unsigned int num_sg; |
b3f24698a virtio_net: forma... |
80 |
}; |
9ab86bbcf virtio_net: Defer... |
81 82 83 84 85 86 87 88 89 |
struct padded_vnet_hdr { struct virtio_net_hdr hdr; /* * virtio_net_hdr should be in a separated sg buffer because of a * QEMU bug, and data sg buffer shares same page with this header sg. * This padding makes next sg 16 byte aligned after virtio_net_hdr. */ char padding[6]; }; |
b3f24698a virtio_net: forma... |
90 |
static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb) |
296f96fcf Net driver using ... |
91 |
{ |
b3f24698a virtio_net: forma... |
92 |
return (struct skb_vnet_hdr *)skb->cb; |
296f96fcf Net driver using ... |
93 |
} |
9ab86bbcf virtio_net: Defer... |
94 95 96 97 98 |
/* * private is used to chain pages for big packets, put the whole * most recent used list in the beginning for reuse */ static void give_pages(struct virtnet_info *vi, struct page *page) |
0a888fd1f virtio_net: Recyc... |
99 |
{ |
9ab86bbcf virtio_net: Defer... |
100 |
struct page *end; |
0a888fd1f virtio_net: Recyc... |
101 |
|
9ab86bbcf virtio_net: Defer... |
102 103 104 105 |
/* Find end of list, sew whole thing into vi->pages. */ for (end = page; end->private; end = (struct page *)end->private); end->private = (unsigned long)vi->pages; vi->pages = page; |
0a888fd1f virtio_net: Recyc... |
106 |
} |
fb6813f48 virtio: Recycle u... |
107 108 109 |
static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask) { struct page *p = vi->pages; |
9ab86bbcf virtio_net: Defer... |
110 |
if (p) { |
fb6813f48 virtio: Recycle u... |
111 |
vi->pages = (struct page *)p->private; |
9ab86bbcf virtio_net: Defer... |
112 113 114 |
/* clear private here, it is used to chain pages */ p->private = 0; } else |
fb6813f48 virtio: Recycle u... |
115 116 117 |
p = alloc_page(gfp_mask); return p; } |
2cb9c6baf virtio: free tran... |
118 |
static void skb_xmit_done(struct virtqueue *svq) |
296f96fcf Net driver using ... |
119 |
{ |
2cb9c6baf virtio: free tran... |
120 |
struct virtnet_info *vi = svq->vdev->priv; |
296f96fcf Net driver using ... |
121 |
|
2cb9c6baf virtio: free tran... |
122 |
/* Suppress further interrupts. */ |
1915a712f virtio_net: use v... |
123 |
virtqueue_disable_cb(svq); |
11a3a1546 virtio: fix delay... |
124 |
|
363f15149 virtio: use callb... |
125 |
/* We were probably waiting for more output buffers. */ |
296f96fcf Net driver using ... |
126 |
netif_wake_queue(vi->dev); |
296f96fcf Net driver using ... |
127 |
} |
9ab86bbcf virtio_net: Defer... |
128 129 |
static void set_skb_frag(struct sk_buff *skb, struct page *page, unsigned int offset, unsigned int *len) |
296f96fcf Net driver using ... |
130 |
{ |
8a59a7b94 virtio_net: Clean... |
131 |
int size = min((unsigned)PAGE_SIZE - offset, *len); |
9ab86bbcf virtio_net: Defer... |
132 |
int i = skb_shinfo(skb)->nr_frags; |
9ab86bbcf virtio_net: Defer... |
133 |
|
8a59a7b94 virtio_net: Clean... |
134 |
__skb_fill_page_desc(skb, i, page, offset, size); |
9ab86bbcf virtio_net: Defer... |
135 |
|
8a59a7b94 virtio_net: Clean... |
136 137 |
skb->data_len += size; skb->len += size; |
4b727361f virtio_net: fix t... |
138 |
skb->truesize += PAGE_SIZE; |
9ab86bbcf virtio_net: Defer... |
139 |
skb_shinfo(skb)->nr_frags++; |
8a59a7b94 virtio_net: Clean... |
140 |
*len -= size; |
9ab86bbcf virtio_net: Defer... |
141 |
} |
23cde76d8 virtio_net: Fix s... |
142 |
|
3464645a1 virtio_net: Pass ... |
143 |
/* Called from bottom half context */ |
9ab86bbcf virtio_net: Defer... |
144 145 146 147 148 149 150 |
static struct sk_buff *page_to_skb(struct virtnet_info *vi, struct page *page, unsigned int len) { struct sk_buff *skb; struct skb_vnet_hdr *hdr; unsigned int copy, hdr_len, offset; char *p; |
fb6813f48 virtio: Recycle u... |
151 |
|
9ab86bbcf virtio_net: Defer... |
152 |
p = page_address(page); |
3f2c31d90 virtio_net: VIRTI... |
153 |
|
9ab86bbcf virtio_net: Defer... |
154 155 156 157 |
/* copy small packet so we can reuse these pages for small data */ skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN); if (unlikely(!skb)) return NULL; |
3f2c31d90 virtio_net: VIRTI... |
158 |
|
9ab86bbcf virtio_net: Defer... |
159 |
hdr = skb_vnet_hdr(skb); |
3f2c31d90 virtio_net: VIRTI... |
160 |
|
9ab86bbcf virtio_net: Defer... |
161 162 163 164 165 166 167 |
if (vi->mergeable_rx_bufs) { hdr_len = sizeof hdr->mhdr; offset = hdr_len; } else { hdr_len = sizeof hdr->hdr; offset = sizeof(struct padded_vnet_hdr); } |
3f2c31d90 virtio_net: VIRTI... |
168 |
|
9ab86bbcf virtio_net: Defer... |
169 |
memcpy(hdr, p, hdr_len); |
3f2c31d90 virtio_net: VIRTI... |
170 |
|
9ab86bbcf virtio_net: Defer... |
171 172 |
len -= hdr_len; p += offset; |
3f2c31d90 virtio_net: VIRTI... |
173 |
|
9ab86bbcf virtio_net: Defer... |
174 175 176 177 |
copy = len; if (copy > skb_tailroom(skb)) copy = skb_tailroom(skb); memcpy(skb_put(skb, copy), p, copy); |
3f2c31d90 virtio_net: VIRTI... |
178 |
|
9ab86bbcf virtio_net: Defer... |
179 180 |
len -= copy; offset += copy; |
3f2c31d90 virtio_net: VIRTI... |
181 |
|
e878d78b9 virtio-net: Verif... |
182 183 184 185 186 187 188 189 190 191 192 193 194 |
/* * Verify that we can indeed put this data into a skb. * This is here to handle cases when the device erroneously * tries to receive more than is possible. This is usually * the case of a broken device. */ if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) { if (net_ratelimit()) pr_debug("%s: too much data ", skb->dev->name); dev_kfree_skb(skb); return NULL; } |
9ab86bbcf virtio_net: Defer... |
195 196 197 198 199 |
while (len) { set_skb_frag(skb, page, offset, &len); page = (struct page *)page->private; offset = 0; } |
3f2c31d90 virtio_net: VIRTI... |
200 |
|
9ab86bbcf virtio_net: Defer... |
201 202 |
if (page) give_pages(vi, page); |
3f2c31d90 virtio_net: VIRTI... |
203 |
|
9ab86bbcf virtio_net: Defer... |
204 205 |
return skb; } |
3f2c31d90 virtio_net: VIRTI... |
206 |
|
9ab86bbcf virtio_net: Defer... |
207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 |
static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb) { struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb); struct page *page; int num_buf, i, len; num_buf = hdr->mhdr.num_buffers; while (--num_buf) { i = skb_shinfo(skb)->nr_frags; if (i >= MAX_SKB_FRAGS) { pr_debug("%s: packet too long ", skb->dev->name); skb->dev->stats.rx_length_errors++; return -EINVAL; } |
1915a712f virtio_net: use v... |
222 |
page = virtqueue_get_buf(vi->rvq, &len); |
9ab86bbcf virtio_net: Defer... |
223 224 225 226 227 228 |
if (!page) { pr_debug("%s: rx error: %d buffers missing ", skb->dev->name, hdr->mhdr.num_buffers); skb->dev->stats.rx_length_errors++; return -EINVAL; |
3f2c31d90 virtio_net: VIRTI... |
229 |
} |
3fa2a1df9 virtio-net: per c... |
230 |
|
9ab86bbcf virtio_net: Defer... |
231 232 233 234 235 236 237 238 239 240 241 242 243 |
if (len > PAGE_SIZE) len = PAGE_SIZE; set_skb_frag(skb, page, 0, &len); --vi->num; } return 0; } static void receive_buf(struct net_device *dev, void *buf, unsigned int len) { struct virtnet_info *vi = netdev_priv(dev); |
3fa2a1df9 virtio-net: per c... |
244 |
struct virtnet_stats __percpu *stats = this_cpu_ptr(vi->stats); |
9ab86bbcf virtio_net: Defer... |
245 246 247 |
struct sk_buff *skb; struct page *page; struct skb_vnet_hdr *hdr; |
3f2c31d90 virtio_net: VIRTI... |
248 |
|
9ab86bbcf virtio_net: Defer... |
249 250 251 252 253 254 255 256 257 258 |
if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { pr_debug("%s: short packet %i ", dev->name, len); dev->stats.rx_length_errors++; if (vi->mergeable_rx_bufs || vi->big_packets) give_pages(vi, buf); else dev_kfree_skb(buf); return; } |
3f2c31d90 virtio_net: VIRTI... |
259 |
|
9ab86bbcf virtio_net: Defer... |
260 261 262 263 264 265 266 267 |
if (!vi->mergeable_rx_bufs && !vi->big_packets) { skb = buf; len -= sizeof(struct virtio_net_hdr); skb_trim(skb, len); } else { page = buf; skb = page_to_skb(vi, page, len); if (unlikely(!skb)) { |
3f2c31d90 virtio_net: VIRTI... |
268 |
dev->stats.rx_dropped++; |
9ab86bbcf virtio_net: Defer... |
269 270 |
give_pages(vi, page); return; |
3f2c31d90 virtio_net: VIRTI... |
271 |
} |
9ab86bbcf virtio_net: Defer... |
272 273 274 275 276 |
if (vi->mergeable_rx_bufs) if (receive_mergeable(vi, skb)) { dev_kfree_skb(skb); return; } |
97402b96f virtio net: Allow... |
277 |
} |
3f2c31d90 virtio_net: VIRTI... |
278 |
|
9ab86bbcf virtio_net: Defer... |
279 |
hdr = skb_vnet_hdr(skb); |
3fa2a1df9 virtio-net: per c... |
280 281 282 283 284 |
u64_stats_update_begin(&stats->syncp); stats->rx_bytes += skb->len; stats->rx_packets++; u64_stats_update_end(&stats->syncp); |
296f96fcf Net driver using ... |
285 |
|
b3f24698a virtio_net: forma... |
286 |
if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { |
296f96fcf Net driver using ... |
287 288 |
pr_debug("Needs csum! "); |
b3f24698a virtio_net: forma... |
289 290 291 |
if (!skb_partial_csum_set(skb, hdr->hdr.csum_start, hdr->hdr.csum_offset)) |
296f96fcf Net driver using ... |
292 |
goto frame_err; |
10a8d94a9 virtio_net: intro... |
293 294 |
} else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) { skb->ip_summed = CHECKSUM_UNNECESSARY; |
296f96fcf Net driver using ... |
295 |
} |
23cde76d8 virtio_net: Fix s... |
296 297 298 299 |
skb->protocol = eth_type_trans(skb, dev); pr_debug("Receiving skb proto 0x%04x len %i type %i ", ntohs(skb->protocol), skb->len, skb->pkt_type); |
b3f24698a virtio_net: forma... |
300 |
if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) { |
296f96fcf Net driver using ... |
301 302 |
pr_debug("GSO! "); |
b3f24698a virtio_net: forma... |
303 |
switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { |
296f96fcf Net driver using ... |
304 305 306 |
case VIRTIO_NET_HDR_GSO_TCPV4: skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; break; |
296f96fcf Net driver using ... |
307 308 309 310 311 312 313 314 315 316 |
case VIRTIO_NET_HDR_GSO_UDP: skb_shinfo(skb)->gso_type = SKB_GSO_UDP; break; case VIRTIO_NET_HDR_GSO_TCPV6: skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; break; default: if (net_ratelimit()) printk(KERN_WARNING "%s: bad gso type %u. ", |
b3f24698a virtio_net: forma... |
317 |
dev->name, hdr->hdr.gso_type); |
296f96fcf Net driver using ... |
318 319 |
goto frame_err; } |
b3f24698a virtio_net: forma... |
320 |
if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN) |
34a48579e virtio: Tweak vir... |
321 |
skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; |
b3f24698a virtio_net: forma... |
322 |
skb_shinfo(skb)->gso_size = hdr->hdr.gso_size; |
296f96fcf Net driver using ... |
323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 |
if (skb_shinfo(skb)->gso_size == 0) { if (net_ratelimit()) printk(KERN_WARNING "%s: zero gso size. ", dev->name); goto frame_err; } /* Header must be checked, and gso_segs computed. */ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; skb_shinfo(skb)->gso_segs = 0; } netif_receive_skb(skb); return; frame_err: dev->stats.rx_frame_errors++; |
296f96fcf Net driver using ... |
341 342 |
dev_kfree_skb(skb); } |
9ab86bbcf virtio_net: Defer... |
343 |
static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp) |
296f96fcf Net driver using ... |
344 345 |
{ struct sk_buff *skb; |
9ab86bbcf virtio_net: Defer... |
346 |
struct skb_vnet_hdr *hdr; |
9ab86bbcf virtio_net: Defer... |
347 |
int err; |
3f2c31d90 virtio_net: VIRTI... |
348 |
|
3464645a1 virtio_net: Pass ... |
349 |
skb = __netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN, gfp); |
9ab86bbcf virtio_net: Defer... |
350 351 |
if (unlikely(!skb)) return -ENOMEM; |
296f96fcf Net driver using ... |
352 |
|
9ab86bbcf virtio_net: Defer... |
353 |
skb_put(skb, MAX_PACKET_LEN); |
3f2c31d90 virtio_net: VIRTI... |
354 |
|
9ab86bbcf virtio_net: Defer... |
355 |
hdr = skb_vnet_hdr(skb); |
5e01d2f91 virtio-net: move ... |
356 |
sg_set_buf(vi->rx_sg, &hdr->hdr, sizeof hdr->hdr); |
97402b96f virtio net: Allow... |
357 |
|
5e01d2f91 virtio-net: move ... |
358 |
skb_to_sgvec(skb, vi->rx_sg + 1, 0, skb->len); |
97402b96f virtio net: Allow... |
359 |
|
f96fde41f virtio: rename vi... |
360 |
err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 2, skb, gfp); |
9ab86bbcf virtio_net: Defer... |
361 362 |
if (err < 0) dev_kfree_skb(skb); |
97402b96f virtio net: Allow... |
363 |
|
9ab86bbcf virtio_net: Defer... |
364 365 |
return err; } |
97402b96f virtio net: Allow... |
366 |
|
9ab86bbcf virtio_net: Defer... |
367 368 |
static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp) { |
9ab86bbcf virtio_net: Defer... |
369 370 371 |
struct page *first, *list = NULL; char *p; int i, err, offset; |
5e01d2f91 virtio-net: move ... |
372 |
/* page in vi->rx_sg[MAX_SKB_FRAGS + 1] is list tail */ |
9ab86bbcf virtio_net: Defer... |
373 374 375 376 377 378 |
for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { first = get_a_page(vi, gfp); if (!first) { if (list) give_pages(vi, list); return -ENOMEM; |
97402b96f virtio net: Allow... |
379 |
} |
5e01d2f91 virtio-net: move ... |
380 |
sg_set_buf(&vi->rx_sg[i], page_address(first), PAGE_SIZE); |
97402b96f virtio net: Allow... |
381 |
|
9ab86bbcf virtio_net: Defer... |
382 383 384 385 |
/* chain new page in list head to match sg */ first->private = (unsigned long)list; list = first; } |
296f96fcf Net driver using ... |
386 |
|
9ab86bbcf virtio_net: Defer... |
387 388 389 390 391 392 |
first = get_a_page(vi, gfp); if (!first) { give_pages(vi, list); return -ENOMEM; } p = page_address(first); |
5e01d2f91 virtio-net: move ... |
393 394 395 |
/* vi->rx_sg[0], vi->rx_sg[1] share the same page */ /* a separated vi->rx_sg[0] for virtio_net_hdr only due to QEMU bug */ sg_set_buf(&vi->rx_sg[0], p, sizeof(struct virtio_net_hdr)); |
9ab86bbcf virtio_net: Defer... |
396 |
|
5e01d2f91 virtio-net: move ... |
397 |
/* vi->rx_sg[1] for data packet, from offset */ |
9ab86bbcf virtio_net: Defer... |
398 |
offset = sizeof(struct padded_vnet_hdr); |
5e01d2f91 virtio-net: move ... |
399 |
sg_set_buf(&vi->rx_sg[1], p + offset, PAGE_SIZE - offset); |
9ab86bbcf virtio_net: Defer... |
400 401 402 |
/* chain first in list head */ first->private = (unsigned long)list; |
f96fde41f virtio: rename vi... |
403 404 |
err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2, first, gfp); |
9ab86bbcf virtio_net: Defer... |
405 406 407 408 |
if (err < 0) give_pages(vi, first); return err; |
296f96fcf Net driver using ... |
409 |
} |
9ab86bbcf virtio_net: Defer... |
410 |
static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp) |
3f2c31d90 virtio_net: VIRTI... |
411 |
{ |
9ab86bbcf virtio_net: Defer... |
412 |
struct page *page; |
3f2c31d90 virtio_net: VIRTI... |
413 |
int err; |
3f2c31d90 virtio_net: VIRTI... |
414 |
|
9ab86bbcf virtio_net: Defer... |
415 416 417 |
page = get_a_page(vi, gfp); if (!page) return -ENOMEM; |
3f2c31d90 virtio_net: VIRTI... |
418 |
|
5e01d2f91 virtio-net: move ... |
419 |
sg_init_one(vi->rx_sg, page_address(page), PAGE_SIZE); |
3f2c31d90 virtio_net: VIRTI... |
420 |
|
f96fde41f virtio: rename vi... |
421 |
err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 1, page, gfp); |
9ab86bbcf virtio_net: Defer... |
422 423 |
if (err < 0) give_pages(vi, page); |
3f2c31d90 virtio_net: VIRTI... |
424 |
|
9ab86bbcf virtio_net: Defer... |
425 426 |
return err; } |
3f2c31d90 virtio_net: VIRTI... |
427 |
|
b2baed69e virtio_net: set/c... |
428 429 430 431 432 433 434 |
/* * Returns false if we couldn't fill entirely (OOM). * * Normally run in the receive path, but can also be run from ndo_open * before we're receiving packets, or from refill_work which is * careful to disable receiving (using napi_disable). */ |
9ab86bbcf virtio_net: Defer... |
435 436 437 |
static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp) { int err; |
1788f4954 virtio_net: do no... |
438 |
bool oom; |
3f2c31d90 virtio_net: VIRTI... |
439 |
|
9ab86bbcf virtio_net: Defer... |
440 441 442 443 444 445 446 |
do { if (vi->mergeable_rx_bufs) err = add_recvbuf_mergeable(vi, gfp); else if (vi->big_packets) err = add_recvbuf_big(vi, gfp); else err = add_recvbuf_small(vi, gfp); |
3f2c31d90 virtio_net: VIRTI... |
447 |
|
1788f4954 virtio_net: do no... |
448 449 |
oom = err == -ENOMEM; if (err < 0) |
3f2c31d90 virtio_net: VIRTI... |
450 |
break; |
9ab86bbcf virtio_net: Defer... |
451 |
++vi->num; |
0aea51c37 virtio_net: Check... |
452 |
} while (err > 0); |
3f2c31d90 virtio_net: VIRTI... |
453 454 |
if (unlikely(vi->num > vi->max)) vi->max = vi->num; |
1915a712f virtio_net: use v... |
455 |
virtqueue_kick(vi->rvq); |
3161e453e virtio: net refil... |
456 |
return !oom; |
3f2c31d90 virtio_net: VIRTI... |
457 |
} |
18445c4d5 virtio: explicit ... |
458 |
static void skb_recv_done(struct virtqueue *rvq) |
296f96fcf Net driver using ... |
459 460 |
{ struct virtnet_info *vi = rvq->vdev->priv; |
18445c4d5 virtio: explicit ... |
461 |
/* Schedule NAPI, Suppress further interrupts if successful. */ |
288379f05 net: Remove redun... |
462 |
if (napi_schedule_prep(&vi->napi)) { |
1915a712f virtio_net: use v... |
463 |
virtqueue_disable_cb(rvq); |
288379f05 net: Remove redun... |
464 |
__napi_schedule(&vi->napi); |
18445c4d5 virtio: explicit ... |
465 |
} |
296f96fcf Net driver using ... |
466 |
} |
3e9d08ec0 virtio_net: Add s... |
467 468 469 470 471 472 473 474 475 476 477 478 479 |
static void virtnet_napi_enable(struct virtnet_info *vi) { napi_enable(&vi->napi); /* If all buffers were filled by other side before we napi_enabled, we * won't get another interrupt, so process any outstanding packets * now. virtnet_poll wants re-enable the queue, so we disable here. * We synchronize against interrupts via NAPI_STATE_SCHED */ if (napi_schedule_prep(&vi->napi)) { virtqueue_disable_cb(vi->rvq); __napi_schedule(&vi->napi); } } |
3161e453e virtio: net refil... |
480 481 482 483 484 485 486 |
static void refill_work(struct work_struct *work) { struct virtnet_info *vi; bool still_empty; vi = container_of(work, struct virtnet_info, refill.work); napi_disable(&vi->napi); |
39d321577 virtio_net: Make ... |
487 |
still_empty = !try_fill_recv(vi, GFP_KERNEL); |
3e9d08ec0 virtio_net: Add s... |
488 |
virtnet_napi_enable(vi); |
3161e453e virtio: net refil... |
489 490 491 492 |
/* In theory, this can happen: if we don't get any buffers in * we will *never* try to fill again. */ if (still_empty) |
f1776dade virtio_net: use n... |
493 |
queue_delayed_work(system_nrt_wq, &vi->refill, HZ/2); |
3161e453e virtio: net refil... |
494 |
} |
296f96fcf Net driver using ... |
495 496 497 |
static int virtnet_poll(struct napi_struct *napi, int budget) { struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi); |
9ab86bbcf virtio_net: Defer... |
498 |
void *buf; |
296f96fcf Net driver using ... |
499 500 501 502 |
unsigned int len, received = 0; again: while (received < budget && |
1915a712f virtio_net: use v... |
503 |
(buf = virtqueue_get_buf(vi->rvq, &len)) != NULL) { |
9ab86bbcf virtio_net: Defer... |
504 505 |
receive_buf(vi->dev, buf, len); --vi->num; |
296f96fcf Net driver using ... |
506 507 |
received++; } |
3161e453e virtio: net refil... |
508 509 |
if (vi->num < vi->max / 2) { if (!try_fill_recv(vi, GFP_ATOMIC)) |
f1776dade virtio_net: use n... |
510 |
queue_delayed_work(system_nrt_wq, &vi->refill, 0); |
3161e453e virtio: net refil... |
511 |
} |
296f96fcf Net driver using ... |
512 |
|
8329d98e4 virtio: fix net d... |
513 514 |
/* Out of packets? */ if (received < budget) { |
288379f05 net: Remove redun... |
515 |
napi_complete(napi); |
1915a712f virtio_net: use v... |
516 |
if (unlikely(!virtqueue_enable_cb(vi->rvq)) && |
8e95a2026 drivers/net: Move... |
517 |
napi_schedule_prep(napi)) { |
1915a712f virtio_net: use v... |
518 |
virtqueue_disable_cb(vi->rvq); |
288379f05 net: Remove redun... |
519 |
__napi_schedule(napi); |
296f96fcf Net driver using ... |
520 |
goto again; |
4265f161b virtio: fix race ... |
521 |
} |
296f96fcf Net driver using ... |
522 523 524 525 |
} return received; } |
48925e372 virtio_net: avoid... |
526 |
static unsigned int free_old_xmit_skbs(struct virtnet_info *vi) |
296f96fcf Net driver using ... |
527 528 |
{ struct sk_buff *skb; |
48925e372 virtio_net: avoid... |
529 |
unsigned int len, tot_sgs = 0; |
3fa2a1df9 virtio-net: per c... |
530 |
struct virtnet_stats __percpu *stats = this_cpu_ptr(vi->stats); |
296f96fcf Net driver using ... |
531 |
|
1915a712f virtio_net: use v... |
532 |
while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) { |
296f96fcf Net driver using ... |
533 534 |
pr_debug("Sent skb %p ", skb); |
3fa2a1df9 virtio-net: per c... |
535 536 537 538 539 |
u64_stats_update_begin(&stats->syncp); stats->tx_bytes += skb->len; stats->tx_packets++; u64_stats_update_end(&stats->syncp); |
48925e372 virtio_net: avoid... |
540 |
tot_sgs += skb_vnet_hdr(skb)->num_sg; |
ed79bab84 virtio_net: use d... |
541 |
dev_kfree_skb_any(skb); |
296f96fcf Net driver using ... |
542 |
} |
48925e372 virtio_net: avoid... |
543 |
return tot_sgs; |
296f96fcf Net driver using ... |
544 |
} |
99ffc696d virtio: wean net ... |
545 |
static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb) |
296f96fcf Net driver using ... |
546 |
{ |
b3f24698a virtio_net: forma... |
547 |
struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb); |
296f96fcf Net driver using ... |
548 |
const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; |
296f96fcf Net driver using ... |
549 |
|
e174961ca net: convert prin... |
550 551 |
pr_debug("%s: xmit %p %pM ", vi->dev->name, skb, dest); |
296f96fcf Net driver using ... |
552 |
|
296f96fcf Net driver using ... |
553 |
if (skb->ip_summed == CHECKSUM_PARTIAL) { |
b3f24698a virtio_net: forma... |
554 |
hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; |
55508d601 net: Use skb_chec... |
555 |
hdr->hdr.csum_start = skb_checksum_start_offset(skb); |
b3f24698a virtio_net: forma... |
556 |
hdr->hdr.csum_offset = skb->csum_offset; |
296f96fcf Net driver using ... |
557 |
} else { |
b3f24698a virtio_net: forma... |
558 559 |
hdr->hdr.flags = 0; hdr->hdr.csum_offset = hdr->hdr.csum_start = 0; |
296f96fcf Net driver using ... |
560 561 562 |
} if (skb_is_gso(skb)) { |
b3f24698a virtio_net: forma... |
563 564 |
hdr->hdr.hdr_len = skb_headlen(skb); hdr->hdr.gso_size = skb_shinfo(skb)->gso_size; |
34a48579e virtio: Tweak vir... |
565 |
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) |
b3f24698a virtio_net: forma... |
566 |
hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; |
296f96fcf Net driver using ... |
567 |
else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) |
b3f24698a virtio_net: forma... |
568 |
hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; |
296f96fcf Net driver using ... |
569 |
else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) |
b3f24698a virtio_net: forma... |
570 |
hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP; |
296f96fcf Net driver using ... |
571 572 |
else BUG(); |
34a48579e virtio: Tweak vir... |
573 |
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN) |
b3f24698a virtio_net: forma... |
574 |
hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN; |
296f96fcf Net driver using ... |
575 |
} else { |
b3f24698a virtio_net: forma... |
576 577 |
hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE; hdr->hdr.gso_size = hdr->hdr.hdr_len = 0; |
296f96fcf Net driver using ... |
578 |
} |
b3f24698a virtio_net: forma... |
579 |
hdr->mhdr.num_buffers = 0; |
3f2c31d90 virtio_net: VIRTI... |
580 581 582 |
/* Encode metadata header at front. */ if (vi->mergeable_rx_bufs) |
5e01d2f91 virtio-net: move ... |
583 |
sg_set_buf(vi->tx_sg, &hdr->mhdr, sizeof hdr->mhdr); |
3f2c31d90 virtio_net: VIRTI... |
584 |
else |
5e01d2f91 virtio-net: move ... |
585 |
sg_set_buf(vi->tx_sg, &hdr->hdr, sizeof hdr->hdr); |
3f2c31d90 virtio_net: VIRTI... |
586 |
|
5e01d2f91 virtio-net: move ... |
587 |
hdr->num_sg = skb_to_sgvec(skb, vi->tx_sg + 1, 0, skb->len) + 1; |
1756ac3d3 Merge branch 'vir... |
588 |
return virtqueue_add_buf(vi->svq, vi->tx_sg, hdr->num_sg, |
f96fde41f virtio: rename vi... |
589 |
0, skb, GFP_ATOMIC); |
11a3a1546 virtio: fix delay... |
590 |
} |
424efe9ca netdev: convert p... |
591 |
static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) |
99ffc696d virtio: wean net ... |
592 593 |
{ struct virtnet_info *vi = netdev_priv(dev); |
48925e372 virtio_net: avoid... |
594 |
int capacity; |
2cb9c6baf virtio: free tran... |
595 |
|
2cb9c6baf virtio: free tran... |
596 597 |
/* Free up any pending old buffers before queueing new ones. */ free_old_xmit_skbs(vi); |
99ffc696d virtio: wean net ... |
598 |
|
03f191bab virtio-net: fix d... |
599 |
/* Try to transmit */ |
48925e372 virtio_net: avoid... |
600 601 602 603 |
capacity = xmit_skb(vi, skb); /* This can happen with OOM and indirect buffers. */ if (unlikely(capacity < 0)) { |
58eba97d0 virtio_net: fix o... |
604 605 606 607 608 609 610 611 612 613 614 615 |
if (net_ratelimit()) { if (likely(capacity == -ENOMEM)) { dev_warn(&dev->dev, "TX queue failure: out of memory "); } else { dev->stats.tx_fifo_errors++; dev_warn(&dev->dev, "Unexpected TX queue failure: %d ", capacity); } |
48925e372 virtio_net: avoid... |
616 |
} |
58eba97d0 virtio_net: fix o... |
617 618 619 |
dev->stats.tx_dropped++; kfree_skb(skb); return NETDEV_TX_OK; |
296f96fcf Net driver using ... |
620 |
} |
1915a712f virtio_net: use v... |
621 |
virtqueue_kick(vi->svq); |
03f191bab virtio-net: fix d... |
622 |
|
48925e372 virtio_net: avoid... |
623 624 625 626 627 628 629 630 |
/* Don't wait up for transmitted skbs to be freed. */ skb_orphan(skb); nf_reset(skb); /* Apparently nice girls don't return TX_BUSY; stop the queue * before it gets out of hand. Naturally, this wastes entries. */ if (capacity < 2+MAX_SKB_FRAGS) { netif_stop_queue(dev); |
7a66f7843 virtio_net: delay... |
631 |
if (unlikely(!virtqueue_enable_cb_delayed(vi->svq))) { |
48925e372 virtio_net: avoid... |
632 633 634 635 |
/* More just got used, free them then recheck. */ capacity += free_old_xmit_skbs(vi); if (capacity >= 2+MAX_SKB_FRAGS) { netif_start_queue(dev); |
1915a712f virtio_net: use v... |
636 |
virtqueue_disable_cb(vi->svq); |
48925e372 virtio_net: avoid... |
637 638 |
} } |
99ffc696d virtio: wean net ... |
639 |
} |
48925e372 virtio_net: avoid... |
640 641 |
return NETDEV_TX_OK; |
296f96fcf Net driver using ... |
642 |
} |
9c46f6d42 virtio_net: Allow... |
643 644 645 646 647 648 649 650 651 |
static int virtnet_set_mac_address(struct net_device *dev, void *p) { struct virtnet_info *vi = netdev_priv(dev); struct virtio_device *vdev = vi->vdev; int ret; ret = eth_mac_addr(dev, p); if (ret) return ret; |
62994b2d6 virtio_net: Set t... |
652 653 654 |
if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) vdev->config->set(vdev, offsetof(struct virtio_net_config, mac), dev->dev_addr, dev->addr_len); |
9c46f6d42 virtio_net: Allow... |
655 656 657 |
return 0; } |
3fa2a1df9 virtio-net: per c... |
658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 |
static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev, struct rtnl_link_stats64 *tot) { struct virtnet_info *vi = netdev_priv(dev); int cpu; unsigned int start; for_each_possible_cpu(cpu) { struct virtnet_stats __percpu *stats = per_cpu_ptr(vi->stats, cpu); u64 tpackets, tbytes, rpackets, rbytes; do { start = u64_stats_fetch_begin(&stats->syncp); tpackets = stats->tx_packets; tbytes = stats->tx_bytes; rpackets = stats->rx_packets; rbytes = stats->rx_bytes; } while (u64_stats_fetch_retry(&stats->syncp, start)); tot->rx_packets += rpackets; tot->tx_packets += tpackets; tot->rx_bytes += rbytes; tot->tx_bytes += tbytes; } tot->tx_dropped = dev->stats.tx_dropped; |
021ac8d38 virtio_net: retur... |
685 |
tot->tx_fifo_errors = dev->stats.tx_fifo_errors; |
3fa2a1df9 virtio-net: per c... |
686 687 688 689 690 691 |
tot->rx_dropped = dev->stats.rx_dropped; tot->rx_length_errors = dev->stats.rx_length_errors; tot->rx_frame_errors = dev->stats.rx_frame_errors; return tot; } |
da74e89d4 virtio: Enable ne... |
692 693 694 695 696 697 698 699 |
#ifdef CONFIG_NET_POLL_CONTROLLER static void virtnet_netpoll(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); napi_schedule(&vi->napi); } #endif |
296f96fcf Net driver using ... |
700 701 702 |
static int virtnet_open(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); |
b2baed69e virtio_net: set/c... |
703 704 |
/* Make sure we have some buffers: if oom use wq. */ if (!try_fill_recv(vi, GFP_KERNEL)) |
f1776dade virtio_net: use n... |
705 |
queue_delayed_work(system_nrt_wq, &vi->refill, 0); |
b2baed69e virtio_net: set/c... |
706 |
|
3e9d08ec0 virtio_net: Add s... |
707 |
virtnet_napi_enable(vi); |
296f96fcf Net driver using ... |
708 709 |
return 0; } |
2a41f71d3 virtio_net: Add a... |
710 711 712 713 714 715 716 717 |
/* * Send command via the control virtqueue and check status. Commands * supported by the hypervisor, as indicated by feature bits, should * never fail unless improperly formated. */ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, struct scatterlist *data, int out, int in) { |
23e258e1a virtio_net: Clean... |
718 |
struct scatterlist *s, sg[VIRTNET_SEND_COMMAND_SG_MAX + 2]; |
2a41f71d3 virtio_net: Add a... |
719 720 721 |
struct virtio_net_ctrl_hdr ctrl; virtio_net_ctrl_ack status = ~0; unsigned int tmp; |
23e258e1a virtio_net: Clean... |
722 |
int i; |
2a41f71d3 virtio_net: Add a... |
723 |
|
0ee904c35 drivers/net: repl... |
724 725 726 |
/* Caller should know better */ BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) || (out + in > VIRTNET_SEND_COMMAND_SG_MAX)); |
2a41f71d3 virtio_net: Add a... |
727 728 729 730 731 732 733 734 735 736 |
out++; /* Add header */ in++; /* Add return status */ ctrl.class = class; ctrl.cmd = cmd; sg_init_table(sg, out + in); sg_set_buf(&sg[0], &ctrl, sizeof(ctrl)); |
23e258e1a virtio_net: Clean... |
737 738 |
for_each_sg(data, s, out + in - 2, i) sg_set_buf(&sg[i + 1], sg_virt(s), s->length); |
2a41f71d3 virtio_net: Add a... |
739 |
sg_set_buf(&sg[out + in - 1], &status, sizeof(status)); |
f96fde41f virtio: rename vi... |
740 |
BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi, GFP_ATOMIC) < 0); |
2a41f71d3 virtio_net: Add a... |
741 |
|
1915a712f virtio_net: use v... |
742 |
virtqueue_kick(vi->cvq); |
2a41f71d3 virtio_net: Add a... |
743 744 745 746 747 |
/* * Spin for a response, the kick causes an ioport write, trapping * into the hypervisor, so the request should be handled immediately. */ |
1915a712f virtio_net: use v... |
748 |
while (!virtqueue_get_buf(vi->cvq, &tmp)) |
2a41f71d3 virtio_net: Add a... |
749 750 751 752 |
cpu_relax(); return status == VIRTIO_NET_OK; } |
296f96fcf Net driver using ... |
753 754 755 |
static int virtnet_close(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); |
296f96fcf Net driver using ... |
756 |
|
b2baed69e virtio_net: set/c... |
757 758 |
/* Make sure refill_work doesn't re-enable napi! */ cancel_delayed_work_sync(&vi->refill); |
296f96fcf Net driver using ... |
759 |
napi_disable(&vi->napi); |
296f96fcf Net driver using ... |
760 761 |
return 0; } |
2af7698e2 virtio_net: Add a... |
762 763 764 |
static void virtnet_set_rx_mode(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); |
f565a7c25 virtio_net: Add a... |
765 |
struct scatterlist sg[2]; |
2af7698e2 virtio_net: Add a... |
766 |
u8 promisc, allmulti; |
f565a7c25 virtio_net: Add a... |
767 |
struct virtio_net_ctrl_mac *mac_data; |
ccffad25b net: convert unic... |
768 |
struct netdev_hw_addr *ha; |
32e7bfc41 net: use helpers ... |
769 |
int uc_count; |
4cd24eaf0 net: use netdev_m... |
770 |
int mc_count; |
f565a7c25 virtio_net: Add a... |
771 772 |
void *buf; int i; |
2af7698e2 virtio_net: Add a... |
773 774 775 776 |
/* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */ if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) return; |
f565a7c25 virtio_net: Add a... |
777 778 |
promisc = ((dev->flags & IFF_PROMISC) != 0); allmulti = ((dev->flags & IFF_ALLMULTI) != 0); |
2af7698e2 virtio_net: Add a... |
779 |
|
23e258e1a virtio_net: Clean... |
780 |
sg_init_one(sg, &promisc, sizeof(promisc)); |
2af7698e2 virtio_net: Add a... |
781 782 783 |
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, VIRTIO_NET_CTRL_RX_PROMISC, |
f565a7c25 virtio_net: Add a... |
784 |
sg, 1, 0)) |
2af7698e2 virtio_net: Add a... |
785 786 787 |
dev_warn(&dev->dev, "Failed to %sable promisc mode. ", promisc ? "en" : "dis"); |
23e258e1a virtio_net: Clean... |
788 |
sg_init_one(sg, &allmulti, sizeof(allmulti)); |
2af7698e2 virtio_net: Add a... |
789 790 791 |
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, VIRTIO_NET_CTRL_RX_ALLMULTI, |
f565a7c25 virtio_net: Add a... |
792 |
sg, 1, 0)) |
2af7698e2 virtio_net: Add a... |
793 794 795 |
dev_warn(&dev->dev, "Failed to %sable allmulti mode. ", allmulti ? "en" : "dis"); |
f565a7c25 virtio_net: Add a... |
796 |
|
32e7bfc41 net: use helpers ... |
797 |
uc_count = netdev_uc_count(dev); |
4cd24eaf0 net: use netdev_m... |
798 |
mc_count = netdev_mc_count(dev); |
f565a7c25 virtio_net: Add a... |
799 |
/* MAC filter - use one buffer for both lists */ |
4cd24eaf0 net: use netdev_m... |
800 801 802 |
buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + (2 * sizeof(mac_data->entries)), GFP_ATOMIC); mac_data = buf; |
f565a7c25 virtio_net: Add a... |
803 804 805 806 807 |
if (!buf) { dev_warn(&dev->dev, "No memory for MAC address buffer "); return; } |
23e258e1a virtio_net: Clean... |
808 |
sg_init_table(sg, 2); |
f565a7c25 virtio_net: Add a... |
809 |
/* Store the unicast list and count in the front of the buffer */ |
32e7bfc41 net: use helpers ... |
810 |
mac_data->entries = uc_count; |
ccffad25b net: convert unic... |
811 |
i = 0; |
32e7bfc41 net: use helpers ... |
812 |
netdev_for_each_uc_addr(ha, dev) |
ccffad25b net: convert unic... |
813 |
memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); |
f565a7c25 virtio_net: Add a... |
814 815 |
sg_set_buf(&sg[0], mac_data, |
32e7bfc41 net: use helpers ... |
816 |
sizeof(mac_data->entries) + (uc_count * ETH_ALEN)); |
f565a7c25 virtio_net: Add a... |
817 818 |
/* multicast list and count fill the end */ |
32e7bfc41 net: use helpers ... |
819 |
mac_data = (void *)&mac_data->macs[uc_count][0]; |
f565a7c25 virtio_net: Add a... |
820 |
|
4cd24eaf0 net: use netdev_m... |
821 |
mac_data->entries = mc_count; |
567ec874d net: convert mult... |
822 |
i = 0; |
22bedad3c net: convert mult... |
823 824 |
netdev_for_each_mc_addr(ha, dev) memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); |
f565a7c25 virtio_net: Add a... |
825 826 |
sg_set_buf(&sg[1], mac_data, |
4cd24eaf0 net: use netdev_m... |
827 |
sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); |
f565a7c25 virtio_net: Add a... |
828 829 830 831 832 833 834 835 |
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, VIRTIO_NET_CTRL_MAC_TABLE_SET, sg, 2, 0)) dev_warn(&dev->dev, "Failed to set MAC fitler table. "); kfree(buf); |
2af7698e2 virtio_net: Add a... |
836 |
} |
8e586137e net: make vlan nd... |
837 |
static int virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid) |
0bde95690 virtio_net: Add s... |
838 839 840 |
{ struct virtnet_info *vi = netdev_priv(dev); struct scatterlist sg; |
23e258e1a virtio_net: Clean... |
841 |
sg_init_one(&sg, &vid, sizeof(vid)); |
0bde95690 virtio_net: Add s... |
842 843 844 845 846 |
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0)) dev_warn(&dev->dev, "Failed to add VLAN ID %d. ", vid); |
8e586137e net: make vlan nd... |
847 |
return 0; |
0bde95690 virtio_net: Add s... |
848 |
} |
8e586137e net: make vlan nd... |
849 |
static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid) |
0bde95690 virtio_net: Add s... |
850 851 852 |
{ struct virtnet_info *vi = netdev_priv(dev); struct scatterlist sg; |
23e258e1a virtio_net: Clean... |
853 |
sg_init_one(&sg, &vid, sizeof(vid)); |
0bde95690 virtio_net: Add s... |
854 855 856 857 858 |
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0)) dev_warn(&dev->dev, "Failed to kill VLAN ID %d. ", vid); |
8e586137e net: make vlan nd... |
859 |
return 0; |
0bde95690 virtio_net: Add s... |
860 |
} |
8f9f4668b Add ethtool -g su... |
861 862 863 864 865 866 867 868 869 870 871 |
static void virtnet_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ring) { struct virtnet_info *vi = netdev_priv(dev); ring->rx_max_pending = virtqueue_get_vring_size(vi->rvq); ring->tx_max_pending = virtqueue_get_vring_size(vi->svq); ring->rx_pending = ring->rx_max_pending; ring->tx_pending = ring->tx_max_pending; } |
66846048f enable virtio_net... |
872 873 874 875 876 877 878 879 880 881 882 883 |
static void virtnet_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct virtnet_info *vi = netdev_priv(dev); struct virtio_device *vdev = vi->vdev; strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version)); strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info)); } |
0fc0b732e netdev: drivers s... |
884 |
static const struct ethtool_ops virtnet_ethtool_ops = { |
66846048f enable virtio_net... |
885 |
.get_drvinfo = virtnet_get_drvinfo, |
9f4d26d0f virtio_net: add l... |
886 |
.get_link = ethtool_op_get_link, |
8f9f4668b Add ethtool -g su... |
887 |
.get_ringparam = virtnet_get_ringparam, |
a9ea3fc6f virtio net: Add e... |
888 |
}; |
39da5814d virtio_net: large... |
889 890 891 892 893 894 895 896 897 898 |
#define MIN_MTU 68 #define MAX_MTU 65535 static int virtnet_change_mtu(struct net_device *dev, int new_mtu) { if (new_mtu < MIN_MTU || new_mtu > MAX_MTU) return -EINVAL; dev->mtu = new_mtu; return 0; } |
76288b4e5 virtio: convert t... |
899 900 901 902 903 |
static const struct net_device_ops virtnet_netdev = { .ndo_open = virtnet_open, .ndo_stop = virtnet_close, .ndo_start_xmit = start_xmit, .ndo_validate_addr = eth_validate_addr, |
9c46f6d42 virtio_net: Allow... |
904 |
.ndo_set_mac_address = virtnet_set_mac_address, |
2af7698e2 virtio_net: Add a... |
905 |
.ndo_set_rx_mode = virtnet_set_rx_mode, |
76288b4e5 virtio: convert t... |
906 |
.ndo_change_mtu = virtnet_change_mtu, |
3fa2a1df9 virtio-net: per c... |
907 |
.ndo_get_stats64 = virtnet_stats, |
1824a9897 virtio_net: Fix f... |
908 909 |
.ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, |
76288b4e5 virtio: convert t... |
910 911 912 913 |
#ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = virtnet_netpoll, #endif }; |
9f4d26d0f virtio_net: add l... |
914 915 916 |
static void virtnet_update_status(struct virtnet_info *vi) { u16 v; |
77dd7693c virtio-net: Use v... |
917 |
if (virtio_config_val(vi->vdev, VIRTIO_NET_F_STATUS, |
9f4d26d0f virtio_net: add l... |
918 |
offsetof(struct virtio_net_config, status), |
77dd7693c virtio-net: Use v... |
919 920 |
&v) < 0) return; |
9f4d26d0f virtio_net: add l... |
921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 |
/* Ignore unknown (future) status bits */ v &= VIRTIO_NET_S_LINK_UP; if (vi->status == v) return; vi->status = v; if (vi->status & VIRTIO_NET_S_LINK_UP) { netif_carrier_on(vi->dev); netif_wake_queue(vi->dev); } else { netif_carrier_off(vi->dev); netif_stop_queue(vi->dev); } } static void virtnet_config_changed(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; virtnet_update_status(vi); } |
3f9c10b0d virtio: net: Move... |
945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 |
static int init_vqs(struct virtnet_info *vi) { struct virtqueue *vqs[3]; vq_callback_t *callbacks[] = { skb_recv_done, skb_xmit_done, NULL}; const char *names[] = { "input", "output", "control" }; int nvqs, err; /* We expect two virtqueues, receive then send, * and optionally control. */ nvqs = virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2; err = vi->vdev->config->find_vqs(vi->vdev, nvqs, vqs, callbacks, names); if (err) return err; vi->rvq = vqs[0]; vi->svq = vqs[1]; if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) { vi->cvq = vqs[2]; if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) vi->dev->features |= NETIF_F_HW_VLAN_FILTER; } return 0; } |
296f96fcf Net driver using ... |
971 972 973 |
static int virtnet_probe(struct virtio_device *vdev) { int err; |
296f96fcf Net driver using ... |
974 975 |
struct net_device *dev; struct virtnet_info *vi; |
296f96fcf Net driver using ... |
976 977 978 979 980 981 982 |
/* Allocate ourselves a network device with room for our info */ dev = alloc_etherdev(sizeof(struct virtnet_info)); if (!dev) return -ENOMEM; /* Set up network device as normal. */ |
01789349e net: introduce IF... |
983 |
dev->priv_flags |= IFF_UNICAST_FLT; |
76288b4e5 virtio: convert t... |
984 |
dev->netdev_ops = &virtnet_netdev; |
296f96fcf Net driver using ... |
985 |
dev->features = NETIF_F_HIGHDMA; |
3fa2a1df9 virtio-net: per c... |
986 |
|
a9ea3fc6f virtio net: Add e... |
987 |
SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops); |
296f96fcf Net driver using ... |
988 989 990 |
SET_NETDEV_DEV(dev, &vdev->dev); /* Do we support "hardware" checksums? */ |
98e778c9a virtio_net: conve... |
991 |
if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { |
296f96fcf Net driver using ... |
992 |
/* This opens up the world of extra features. */ |
98e778c9a virtio_net: conve... |
993 994 995 996 997 998 |
dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; if (csum) dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO |
34a48579e virtio: Tweak vir... |
999 1000 |
| NETIF_F_TSO_ECN | NETIF_F_TSO6; } |
5539ae961 virtio: finer-gra... |
1001 |
/* Individual feature bits: what can host handle? */ |
98e778c9a virtio_net: conve... |
1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 |
if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) dev->hw_features |= NETIF_F_TSO; if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) dev->hw_features |= NETIF_F_TSO6; if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) dev->hw_features |= NETIF_F_TSO_ECN; if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO)) dev->hw_features |= NETIF_F_UFO; if (gso) dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO); /* (!csum && gso) case will be fixed by register_netdev() */ |
296f96fcf Net driver using ... |
1014 1015 1016 |
} /* Configuration may specify what MAC to use. Otherwise random. */ |
77dd7693c virtio-net: Use v... |
1017 |
if (virtio_config_val_len(vdev, VIRTIO_NET_F_MAC, |
a586d4f60 virtio: simplify ... |
1018 |
offsetof(struct virtio_net_config, mac), |
77dd7693c virtio-net: Use v... |
1019 |
dev->dev_addr, dev->addr_len) < 0) |
296f96fcf Net driver using ... |
1020 1021 1022 1023 |
random_ether_addr(dev->dev_addr); /* Set up our device-specific information */ vi = netdev_priv(dev); |
6c0cd7c00 virtio_net: param... |
1024 |
netif_napi_add(dev, &vi->napi, virtnet_poll, napi_weight); |
296f96fcf Net driver using ... |
1025 1026 |
vi->dev = dev; vi->vdev = vdev; |
d9d5dcc88 virtio_net: Fix o... |
1027 |
vdev->priv = vi; |
fb6813f48 virtio: Recycle u... |
1028 |
vi->pages = NULL; |
3fa2a1df9 virtio-net: per c... |
1029 1030 1031 1032 |
vi->stats = alloc_percpu(struct virtnet_stats); err = -ENOMEM; if (vi->stats == NULL) goto free; |
3161e453e virtio: net refil... |
1033 |
INIT_DELAYED_WORK(&vi->refill, refill_work); |
5e01d2f91 virtio-net: move ... |
1034 1035 |
sg_init_table(vi->rx_sg, ARRAY_SIZE(vi->rx_sg)); sg_init_table(vi->tx_sg, ARRAY_SIZE(vi->tx_sg)); |
296f96fcf Net driver using ... |
1036 |
|
97402b96f virtio net: Allow... |
1037 |
/* If we can receive ANY GSO packets, we must allocate large ones. */ |
8e95a2026 drivers/net: Move... |
1038 1039 1040 |
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN)) |
97402b96f virtio net: Allow... |
1041 |
vi->big_packets = true; |
3f2c31d90 virtio_net: VIRTI... |
1042 1043 |
if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) vi->mergeable_rx_bufs = true; |
3f9c10b0d virtio: net: Move... |
1044 |
err = init_vqs(vi); |
d2a7ddda9 virtio: find_vqs/... |
1045 |
if (err) |
3fa2a1df9 virtio-net: per c... |
1046 |
goto free_stats; |
296f96fcf Net driver using ... |
1047 |
|
296f96fcf Net driver using ... |
1048 1049 1050 1051 |
err = register_netdev(dev); if (err) { pr_debug("virtio_net: registering device failed "); |
d2a7ddda9 virtio: find_vqs/... |
1052 |
goto free_vqs; |
296f96fcf Net driver using ... |
1053 |
} |
b3369c1fb virtio: populate ... |
1054 1055 |
/* Last of all, set up some receive buffers. */ |
3161e453e virtio: net refil... |
1056 |
try_fill_recv(vi, GFP_KERNEL); |
b3369c1fb virtio: populate ... |
1057 1058 1059 1060 1061 1062 |
/* If we didn't even get one input buffer, we're useless. */ if (vi->num == 0) { err = -ENOMEM; goto unregister; } |
167c25e4c virtio-net: init ... |
1063 1064 1065 1066 1067 1068 1069 1070 1071 |
/* Assume link up if device can't report link status, otherwise get link status from config. */ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { netif_carrier_off(dev); virtnet_update_status(vi); } else { vi->status = VIRTIO_NET_S_LINK_UP; netif_carrier_on(dev); } |
9f4d26d0f virtio_net: add l... |
1072 |
|
296f96fcf Net driver using ... |
1073 1074 |
pr_debug("virtnet: registered device %s ", dev->name); |
296f96fcf Net driver using ... |
1075 |
return 0; |
b3369c1fb virtio: populate ... |
1076 1077 |
unregister: unregister_netdev(dev); |
d2a7ddda9 virtio: find_vqs/... |
1078 1079 |
free_vqs: vdev->config->del_vqs(vdev); |
3fa2a1df9 virtio-net: per c... |
1080 1081 |
free_stats: free_percpu(vi->stats); |
296f96fcf Net driver using ... |
1082 1083 1084 1085 |
free: free_netdev(dev); return err; } |
9ab86bbcf virtio_net: Defer... |
1086 1087 1088 1089 |
static void free_unused_bufs(struct virtnet_info *vi) { void *buf; while (1) { |
1915a712f virtio_net: use v... |
1090 |
buf = virtqueue_detach_unused_buf(vi->svq); |
830a8a976 virtio_net: remov... |
1091 1092 1093 1094 1095 |
if (!buf) break; dev_kfree_skb(buf); } while (1) { |
1915a712f virtio_net: use v... |
1096 |
buf = virtqueue_detach_unused_buf(vi->rvq); |
9ab86bbcf virtio_net: Defer... |
1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 |
if (!buf) break; if (vi->mergeable_rx_bufs || vi->big_packets) give_pages(vi, buf); else dev_kfree_skb(buf); --vi->num; } BUG_ON(vi->num != 0); } |
04486ed01 virtio: net: Move... |
1107 |
static void remove_vq_common(struct virtnet_info *vi) |
296f96fcf Net driver using ... |
1108 |
{ |
04486ed01 virtio: net: Move... |
1109 |
vi->vdev->config->reset(vi->vdev); |
830a8a976 virtio_net: remov... |
1110 1111 |
/* Free unused buffers in both send and recv, if any. */ |
9ab86bbcf virtio_net: Defer... |
1112 |
free_unused_bufs(vi); |
fb6813f48 virtio: Recycle u... |
1113 |
|
04486ed01 virtio: net: Move... |
1114 |
vi->vdev->config->del_vqs(vi->vdev); |
d2a7ddda9 virtio: find_vqs/... |
1115 |
|
fb6813f48 virtio: Recycle u... |
1116 1117 |
while (vi->pages) __free_pages(get_a_page(vi, GFP_KERNEL), 0); |
04486ed01 virtio: net: Move... |
1118 1119 1120 1121 1122 1123 1124 1125 1126 |
} static void __devexit virtnet_remove(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; unregister_netdev(vi->dev); remove_vq_common(vi); |
fb6813f48 virtio: Recycle u... |
1127 |
|
2e66f55b3 virtio_net: Fix p... |
1128 |
free_percpu(vi->stats); |
74b2553f1 virtio: fix modul... |
1129 |
free_netdev(vi->dev); |
296f96fcf Net driver using ... |
1130 |
} |
0741bcb55 virtio: net: Add ... |
1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 |
#ifdef CONFIG_PM static int virtnet_freeze(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; virtqueue_disable_cb(vi->rvq); virtqueue_disable_cb(vi->svq); if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) virtqueue_disable_cb(vi->cvq); netif_device_detach(vi->dev); cancel_delayed_work_sync(&vi->refill); if (netif_running(vi->dev)) napi_disable(&vi->napi); remove_vq_common(vi); return 0; } static int virtnet_restore(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; int err; err = init_vqs(vi); if (err) return err; if (netif_running(vi->dev)) virtnet_napi_enable(vi); netif_device_attach(vi->dev); if (!try_fill_recv(vi, GFP_KERNEL)) queue_delayed_work(system_nrt_wq, &vi->refill, 0); return 0; } #endif |
296f96fcf Net driver using ... |
1172 1173 1174 1175 |
static struct virtio_device_id id_table[] = { { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, { 0 }, }; |
c45a6816c virtio: explicit ... |
1176 |
static unsigned int features[] = { |
5e4fe5c45 virtio_net: Set V... |
1177 1178 |
VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, |
c45a6816c virtio: explicit ... |
1179 |
VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, |
97402b96f virtio net: Allow... |
1180 |
VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, |
5c5167515 virtio-net: Allow... |
1181 |
VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, |
2a41f71d3 virtio_net: Add a... |
1182 |
VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, |
0bde95690 virtio_net: Add s... |
1183 |
VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, |
c45a6816c virtio: explicit ... |
1184 |
}; |
22402529d virtio_net: renam... |
1185 |
static struct virtio_driver virtio_net_driver = { |
c45a6816c virtio: explicit ... |
1186 1187 |
.feature_table = features, .feature_table_size = ARRAY_SIZE(features), |
296f96fcf Net driver using ... |
1188 1189 1190 1191 1192 |
.driver.name = KBUILD_MODNAME, .driver.owner = THIS_MODULE, .id_table = id_table, .probe = virtnet_probe, .remove = __devexit_p(virtnet_remove), |
9f4d26d0f virtio_net: add l... |
1193 |
.config_changed = virtnet_config_changed, |
0741bcb55 virtio: net: Add ... |
1194 1195 1196 1197 |
#ifdef CONFIG_PM .freeze = virtnet_freeze, .restore = virtnet_restore, #endif |
296f96fcf Net driver using ... |
1198 1199 1200 1201 |
}; static int __init init(void) { |
22402529d virtio_net: renam... |
1202 |
return register_virtio_driver(&virtio_net_driver); |
296f96fcf Net driver using ... |
1203 1204 1205 1206 |
} static void __exit fini(void) { |
22402529d virtio_net: renam... |
1207 |
unregister_virtio_driver(&virtio_net_driver); |
296f96fcf Net driver using ... |
1208 1209 1210 1211 1212 1213 1214 |
} module_init(init); module_exit(fini); MODULE_DEVICE_TABLE(virtio, id_table); MODULE_DESCRIPTION("Virtio network driver"); MODULE_LICENSE("GPL"); |