Commit e53fbd11e983e896adaabef2d2f1695d6e0af829
Committed by
Rusty Russell
1 parent
401bbdc901
Exists in
ti-lsk-linux-4.1.y
and in
10 other branches
virtio_net: enable VQs early on restore
virtio spec requires drivers to set DRIVER_OK before using VQs. This is set automatically after restore returns, virtio net violated this rule by using receive VQs within restore. To fix, call virtio_device_ready before using VQs. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Reviewed-by: Cornelia Huck <cornelia.huck@de.ibm.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Showing 1 changed file with 2 additions and 0 deletions Inline Diff
drivers/net/virtio_net.c
1 | /* A network driver using virtio. | 1 | /* A network driver using virtio. |
2 | * | 2 | * |
3 | * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation | 3 | * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
7 | * the Free Software Foundation; either version 2 of the License, or | 7 | * the Free Software Foundation; either version 2 of the License, or |
8 | * (at your option) any later version. | 8 | * (at your option) any later version. |
9 | * | 9 | * |
10 | * This program is distributed in the hope that it will be useful, | 10 | * This program is distributed in the hope that it will be useful, |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
13 | * GNU General Public License for more details. | 13 | * GNU General Public License for more details. |
14 | * | 14 | * |
15 | * You should have received a copy of the GNU General Public License | 15 | * You should have received a copy of the GNU General Public License |
16 | * along with this program; if not, see <http://www.gnu.org/licenses/>. | 16 | * along with this program; if not, see <http://www.gnu.org/licenses/>. |
17 | */ | 17 | */ |
18 | //#define DEBUG | 18 | //#define DEBUG |
19 | #include <linux/netdevice.h> | 19 | #include <linux/netdevice.h> |
20 | #include <linux/etherdevice.h> | 20 | #include <linux/etherdevice.h> |
21 | #include <linux/ethtool.h> | 21 | #include <linux/ethtool.h> |
22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
23 | #include <linux/virtio.h> | 23 | #include <linux/virtio.h> |
24 | #include <linux/virtio_net.h> | 24 | #include <linux/virtio_net.h> |
25 | #include <linux/scatterlist.h> | 25 | #include <linux/scatterlist.h> |
26 | #include <linux/if_vlan.h> | 26 | #include <linux/if_vlan.h> |
27 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
28 | #include <linux/cpu.h> | 28 | #include <linux/cpu.h> |
29 | #include <linux/average.h> | 29 | #include <linux/average.h> |
30 | #include <net/busy_poll.h> | 30 | #include <net/busy_poll.h> |
31 | 31 | ||
32 | static int napi_weight = NAPI_POLL_WEIGHT; | 32 | static int napi_weight = NAPI_POLL_WEIGHT; |
33 | module_param(napi_weight, int, 0444); | 33 | module_param(napi_weight, int, 0444); |
34 | 34 | ||
35 | static bool csum = true, gso = true; | 35 | static bool csum = true, gso = true; |
36 | module_param(csum, bool, 0444); | 36 | module_param(csum, bool, 0444); |
37 | module_param(gso, bool, 0444); | 37 | module_param(gso, bool, 0444); |
38 | 38 | ||
39 | /* FIXME: MTU in config. */ | 39 | /* FIXME: MTU in config. */ |
40 | #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) | 40 | #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) |
41 | #define GOOD_COPY_LEN 128 | 41 | #define GOOD_COPY_LEN 128 |
42 | 42 | ||
43 | /* Weight used for the RX packet size EWMA. The average packet size is used to | 43 | /* Weight used for the RX packet size EWMA. The average packet size is used to |
44 | * determine the packet buffer size when refilling RX rings. As the entire RX | 44 | * determine the packet buffer size when refilling RX rings. As the entire RX |
45 | * ring may be refilled at once, the weight is chosen so that the EWMA will be | 45 | * ring may be refilled at once, the weight is chosen so that the EWMA will be |
46 | * insensitive to short-term, transient changes in packet size. | 46 | * insensitive to short-term, transient changes in packet size. |
47 | */ | 47 | */ |
48 | #define RECEIVE_AVG_WEIGHT 64 | 48 | #define RECEIVE_AVG_WEIGHT 64 |
49 | 49 | ||
50 | /* Minimum alignment for mergeable packet buffers. */ | 50 | /* Minimum alignment for mergeable packet buffers. */ |
51 | #define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256) | 51 | #define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256) |
52 | 52 | ||
53 | #define VIRTNET_DRIVER_VERSION "1.0.0" | 53 | #define VIRTNET_DRIVER_VERSION "1.0.0" |
54 | 54 | ||
55 | struct virtnet_stats { | 55 | struct virtnet_stats { |
56 | struct u64_stats_sync tx_syncp; | 56 | struct u64_stats_sync tx_syncp; |
57 | struct u64_stats_sync rx_syncp; | 57 | struct u64_stats_sync rx_syncp; |
58 | u64 tx_bytes; | 58 | u64 tx_bytes; |
59 | u64 tx_packets; | 59 | u64 tx_packets; |
60 | 60 | ||
61 | u64 rx_bytes; | 61 | u64 rx_bytes; |
62 | u64 rx_packets; | 62 | u64 rx_packets; |
63 | }; | 63 | }; |
64 | 64 | ||
65 | /* Internal representation of a send virtqueue */ | 65 | /* Internal representation of a send virtqueue */ |
66 | struct send_queue { | 66 | struct send_queue { |
67 | /* Virtqueue associated with this send _queue */ | 67 | /* Virtqueue associated with this send _queue */ |
68 | struct virtqueue *vq; | 68 | struct virtqueue *vq; |
69 | 69 | ||
70 | /* TX: fragments + linear part + virtio header */ | 70 | /* TX: fragments + linear part + virtio header */ |
71 | struct scatterlist sg[MAX_SKB_FRAGS + 2]; | 71 | struct scatterlist sg[MAX_SKB_FRAGS + 2]; |
72 | 72 | ||
73 | /* Name of the send queue: output.$index */ | 73 | /* Name of the send queue: output.$index */ |
74 | char name[40]; | 74 | char name[40]; |
75 | }; | 75 | }; |
76 | 76 | ||
77 | /* Internal representation of a receive virtqueue */ | 77 | /* Internal representation of a receive virtqueue */ |
78 | struct receive_queue { | 78 | struct receive_queue { |
79 | /* Virtqueue associated with this receive_queue */ | 79 | /* Virtqueue associated with this receive_queue */ |
80 | struct virtqueue *vq; | 80 | struct virtqueue *vq; |
81 | 81 | ||
82 | struct napi_struct napi; | 82 | struct napi_struct napi; |
83 | 83 | ||
84 | /* Chain pages by the private ptr. */ | 84 | /* Chain pages by the private ptr. */ |
85 | struct page *pages; | 85 | struct page *pages; |
86 | 86 | ||
87 | /* Average packet length for mergeable receive buffers. */ | 87 | /* Average packet length for mergeable receive buffers. */ |
88 | struct ewma mrg_avg_pkt_len; | 88 | struct ewma mrg_avg_pkt_len; |
89 | 89 | ||
90 | /* Page frag for packet buffer allocation. */ | 90 | /* Page frag for packet buffer allocation. */ |
91 | struct page_frag alloc_frag; | 91 | struct page_frag alloc_frag; |
92 | 92 | ||
93 | /* RX: fragments + linear part + virtio header */ | 93 | /* RX: fragments + linear part + virtio header */ |
94 | struct scatterlist sg[MAX_SKB_FRAGS + 2]; | 94 | struct scatterlist sg[MAX_SKB_FRAGS + 2]; |
95 | 95 | ||
96 | /* Name of this receive queue: input.$index */ | 96 | /* Name of this receive queue: input.$index */ |
97 | char name[40]; | 97 | char name[40]; |
98 | }; | 98 | }; |
99 | 99 | ||
100 | struct virtnet_info { | 100 | struct virtnet_info { |
101 | struct virtio_device *vdev; | 101 | struct virtio_device *vdev; |
102 | struct virtqueue *cvq; | 102 | struct virtqueue *cvq; |
103 | struct net_device *dev; | 103 | struct net_device *dev; |
104 | struct send_queue *sq; | 104 | struct send_queue *sq; |
105 | struct receive_queue *rq; | 105 | struct receive_queue *rq; |
106 | unsigned int status; | 106 | unsigned int status; |
107 | 107 | ||
108 | /* Max # of queue pairs supported by the device */ | 108 | /* Max # of queue pairs supported by the device */ |
109 | u16 max_queue_pairs; | 109 | u16 max_queue_pairs; |
110 | 110 | ||
111 | /* # of queue pairs currently used by the driver */ | 111 | /* # of queue pairs currently used by the driver */ |
112 | u16 curr_queue_pairs; | 112 | u16 curr_queue_pairs; |
113 | 113 | ||
114 | /* I like... big packets and I cannot lie! */ | 114 | /* I like... big packets and I cannot lie! */ |
115 | bool big_packets; | 115 | bool big_packets; |
116 | 116 | ||
117 | /* Host will merge rx buffers for big packets (shake it! shake it!) */ | 117 | /* Host will merge rx buffers for big packets (shake it! shake it!) */ |
118 | bool mergeable_rx_bufs; | 118 | bool mergeable_rx_bufs; |
119 | 119 | ||
120 | /* Has control virtqueue */ | 120 | /* Has control virtqueue */ |
121 | bool has_cvq; | 121 | bool has_cvq; |
122 | 122 | ||
123 | /* Host can handle any s/g split between our header and packet data */ | 123 | /* Host can handle any s/g split between our header and packet data */ |
124 | bool any_header_sg; | 124 | bool any_header_sg; |
125 | 125 | ||
126 | /* Active statistics */ | 126 | /* Active statistics */ |
127 | struct virtnet_stats __percpu *stats; | 127 | struct virtnet_stats __percpu *stats; |
128 | 128 | ||
129 | /* Work struct for refilling if we run low on memory. */ | 129 | /* Work struct for refilling if we run low on memory. */ |
130 | struct delayed_work refill; | 130 | struct delayed_work refill; |
131 | 131 | ||
132 | /* Work struct for config space updates */ | 132 | /* Work struct for config space updates */ |
133 | struct work_struct config_work; | 133 | struct work_struct config_work; |
134 | 134 | ||
135 | /* Does the affinity hint is set for virtqueues? */ | 135 | /* Does the affinity hint is set for virtqueues? */ |
136 | bool affinity_hint_set; | 136 | bool affinity_hint_set; |
137 | 137 | ||
138 | /* CPU hot plug notifier */ | 138 | /* CPU hot plug notifier */ |
139 | struct notifier_block nb; | 139 | struct notifier_block nb; |
140 | }; | 140 | }; |
141 | 141 | ||
142 | struct skb_vnet_hdr { | 142 | struct skb_vnet_hdr { |
143 | union { | 143 | union { |
144 | struct virtio_net_hdr hdr; | 144 | struct virtio_net_hdr hdr; |
145 | struct virtio_net_hdr_mrg_rxbuf mhdr; | 145 | struct virtio_net_hdr_mrg_rxbuf mhdr; |
146 | }; | 146 | }; |
147 | }; | 147 | }; |
148 | 148 | ||
149 | struct padded_vnet_hdr { | 149 | struct padded_vnet_hdr { |
150 | struct virtio_net_hdr hdr; | 150 | struct virtio_net_hdr hdr; |
151 | /* | 151 | /* |
152 | * virtio_net_hdr should be in a separated sg buffer because of a | 152 | * virtio_net_hdr should be in a separated sg buffer because of a |
153 | * QEMU bug, and data sg buffer shares same page with this header sg. | 153 | * QEMU bug, and data sg buffer shares same page with this header sg. |
154 | * This padding makes next sg 16 byte aligned after virtio_net_hdr. | 154 | * This padding makes next sg 16 byte aligned after virtio_net_hdr. |
155 | */ | 155 | */ |
156 | char padding[6]; | 156 | char padding[6]; |
157 | }; | 157 | }; |
158 | 158 | ||
159 | /* Converting between virtqueue no. and kernel tx/rx queue no. | 159 | /* Converting between virtqueue no. and kernel tx/rx queue no. |
160 | * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq | 160 | * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq |
161 | */ | 161 | */ |
162 | static int vq2txq(struct virtqueue *vq) | 162 | static int vq2txq(struct virtqueue *vq) |
163 | { | 163 | { |
164 | return (vq->index - 1) / 2; | 164 | return (vq->index - 1) / 2; |
165 | } | 165 | } |
166 | 166 | ||
167 | static int txq2vq(int txq) | 167 | static int txq2vq(int txq) |
168 | { | 168 | { |
169 | return txq * 2 + 1; | 169 | return txq * 2 + 1; |
170 | } | 170 | } |
171 | 171 | ||
172 | static int vq2rxq(struct virtqueue *vq) | 172 | static int vq2rxq(struct virtqueue *vq) |
173 | { | 173 | { |
174 | return vq->index / 2; | 174 | return vq->index / 2; |
175 | } | 175 | } |
176 | 176 | ||
177 | static int rxq2vq(int rxq) | 177 | static int rxq2vq(int rxq) |
178 | { | 178 | { |
179 | return rxq * 2; | 179 | return rxq * 2; |
180 | } | 180 | } |
181 | 181 | ||
182 | static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb) | 182 | static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb) |
183 | { | 183 | { |
184 | return (struct skb_vnet_hdr *)skb->cb; | 184 | return (struct skb_vnet_hdr *)skb->cb; |
185 | } | 185 | } |
186 | 186 | ||
187 | /* | 187 | /* |
188 | * private is used to chain pages for big packets, put the whole | 188 | * private is used to chain pages for big packets, put the whole |
189 | * most recent used list in the beginning for reuse | 189 | * most recent used list in the beginning for reuse |
190 | */ | 190 | */ |
191 | static void give_pages(struct receive_queue *rq, struct page *page) | 191 | static void give_pages(struct receive_queue *rq, struct page *page) |
192 | { | 192 | { |
193 | struct page *end; | 193 | struct page *end; |
194 | 194 | ||
195 | /* Find end of list, sew whole thing into vi->rq.pages. */ | 195 | /* Find end of list, sew whole thing into vi->rq.pages. */ |
196 | for (end = page; end->private; end = (struct page *)end->private); | 196 | for (end = page; end->private; end = (struct page *)end->private); |
197 | end->private = (unsigned long)rq->pages; | 197 | end->private = (unsigned long)rq->pages; |
198 | rq->pages = page; | 198 | rq->pages = page; |
199 | } | 199 | } |
200 | 200 | ||
201 | static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) | 201 | static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) |
202 | { | 202 | { |
203 | struct page *p = rq->pages; | 203 | struct page *p = rq->pages; |
204 | 204 | ||
205 | if (p) { | 205 | if (p) { |
206 | rq->pages = (struct page *)p->private; | 206 | rq->pages = (struct page *)p->private; |
207 | /* clear private here, it is used to chain pages */ | 207 | /* clear private here, it is used to chain pages */ |
208 | p->private = 0; | 208 | p->private = 0; |
209 | } else | 209 | } else |
210 | p = alloc_page(gfp_mask); | 210 | p = alloc_page(gfp_mask); |
211 | return p; | 211 | return p; |
212 | } | 212 | } |
213 | 213 | ||
214 | static void skb_xmit_done(struct virtqueue *vq) | 214 | static void skb_xmit_done(struct virtqueue *vq) |
215 | { | 215 | { |
216 | struct virtnet_info *vi = vq->vdev->priv; | 216 | struct virtnet_info *vi = vq->vdev->priv; |
217 | 217 | ||
218 | /* Suppress further interrupts. */ | 218 | /* Suppress further interrupts. */ |
219 | virtqueue_disable_cb(vq); | 219 | virtqueue_disable_cb(vq); |
220 | 220 | ||
221 | /* We were probably waiting for more output buffers. */ | 221 | /* We were probably waiting for more output buffers. */ |
222 | netif_wake_subqueue(vi->dev, vq2txq(vq)); | 222 | netif_wake_subqueue(vi->dev, vq2txq(vq)); |
223 | } | 223 | } |
224 | 224 | ||
225 | static unsigned int mergeable_ctx_to_buf_truesize(unsigned long mrg_ctx) | 225 | static unsigned int mergeable_ctx_to_buf_truesize(unsigned long mrg_ctx) |
226 | { | 226 | { |
227 | unsigned int truesize = mrg_ctx & (MERGEABLE_BUFFER_ALIGN - 1); | 227 | unsigned int truesize = mrg_ctx & (MERGEABLE_BUFFER_ALIGN - 1); |
228 | return (truesize + 1) * MERGEABLE_BUFFER_ALIGN; | 228 | return (truesize + 1) * MERGEABLE_BUFFER_ALIGN; |
229 | } | 229 | } |
230 | 230 | ||
231 | static void *mergeable_ctx_to_buf_address(unsigned long mrg_ctx) | 231 | static void *mergeable_ctx_to_buf_address(unsigned long mrg_ctx) |
232 | { | 232 | { |
233 | return (void *)(mrg_ctx & -MERGEABLE_BUFFER_ALIGN); | 233 | return (void *)(mrg_ctx & -MERGEABLE_BUFFER_ALIGN); |
234 | 234 | ||
235 | } | 235 | } |
236 | 236 | ||
237 | static unsigned long mergeable_buf_to_ctx(void *buf, unsigned int truesize) | 237 | static unsigned long mergeable_buf_to_ctx(void *buf, unsigned int truesize) |
238 | { | 238 | { |
239 | unsigned int size = truesize / MERGEABLE_BUFFER_ALIGN; | 239 | unsigned int size = truesize / MERGEABLE_BUFFER_ALIGN; |
240 | return (unsigned long)buf | (size - 1); | 240 | return (unsigned long)buf | (size - 1); |
241 | } | 241 | } |
242 | 242 | ||
243 | /* Called from bottom half context */ | 243 | /* Called from bottom half context */ |
244 | static struct sk_buff *page_to_skb(struct receive_queue *rq, | 244 | static struct sk_buff *page_to_skb(struct receive_queue *rq, |
245 | struct page *page, unsigned int offset, | 245 | struct page *page, unsigned int offset, |
246 | unsigned int len, unsigned int truesize) | 246 | unsigned int len, unsigned int truesize) |
247 | { | 247 | { |
248 | struct virtnet_info *vi = rq->vq->vdev->priv; | 248 | struct virtnet_info *vi = rq->vq->vdev->priv; |
249 | struct sk_buff *skb; | 249 | struct sk_buff *skb; |
250 | struct skb_vnet_hdr *hdr; | 250 | struct skb_vnet_hdr *hdr; |
251 | unsigned int copy, hdr_len, hdr_padded_len; | 251 | unsigned int copy, hdr_len, hdr_padded_len; |
252 | char *p; | 252 | char *p; |
253 | 253 | ||
254 | p = page_address(page) + offset; | 254 | p = page_address(page) + offset; |
255 | 255 | ||
256 | /* copy small packet so we can reuse these pages for small data */ | 256 | /* copy small packet so we can reuse these pages for small data */ |
257 | skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN); | 257 | skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN); |
258 | if (unlikely(!skb)) | 258 | if (unlikely(!skb)) |
259 | return NULL; | 259 | return NULL; |
260 | 260 | ||
261 | hdr = skb_vnet_hdr(skb); | 261 | hdr = skb_vnet_hdr(skb); |
262 | 262 | ||
263 | if (vi->mergeable_rx_bufs) { | 263 | if (vi->mergeable_rx_bufs) { |
264 | hdr_len = sizeof hdr->mhdr; | 264 | hdr_len = sizeof hdr->mhdr; |
265 | hdr_padded_len = sizeof hdr->mhdr; | 265 | hdr_padded_len = sizeof hdr->mhdr; |
266 | } else { | 266 | } else { |
267 | hdr_len = sizeof hdr->hdr; | 267 | hdr_len = sizeof hdr->hdr; |
268 | hdr_padded_len = sizeof(struct padded_vnet_hdr); | 268 | hdr_padded_len = sizeof(struct padded_vnet_hdr); |
269 | } | 269 | } |
270 | 270 | ||
271 | memcpy(hdr, p, hdr_len); | 271 | memcpy(hdr, p, hdr_len); |
272 | 272 | ||
273 | len -= hdr_len; | 273 | len -= hdr_len; |
274 | offset += hdr_padded_len; | 274 | offset += hdr_padded_len; |
275 | p += hdr_padded_len; | 275 | p += hdr_padded_len; |
276 | 276 | ||
277 | copy = len; | 277 | copy = len; |
278 | if (copy > skb_tailroom(skb)) | 278 | if (copy > skb_tailroom(skb)) |
279 | copy = skb_tailroom(skb); | 279 | copy = skb_tailroom(skb); |
280 | memcpy(skb_put(skb, copy), p, copy); | 280 | memcpy(skb_put(skb, copy), p, copy); |
281 | 281 | ||
282 | len -= copy; | 282 | len -= copy; |
283 | offset += copy; | 283 | offset += copy; |
284 | 284 | ||
285 | if (vi->mergeable_rx_bufs) { | 285 | if (vi->mergeable_rx_bufs) { |
286 | if (len) | 286 | if (len) |
287 | skb_add_rx_frag(skb, 0, page, offset, len, truesize); | 287 | skb_add_rx_frag(skb, 0, page, offset, len, truesize); |
288 | else | 288 | else |
289 | put_page(page); | 289 | put_page(page); |
290 | return skb; | 290 | return skb; |
291 | } | 291 | } |
292 | 292 | ||
293 | /* | 293 | /* |
294 | * Verify that we can indeed put this data into a skb. | 294 | * Verify that we can indeed put this data into a skb. |
295 | * This is here to handle cases when the device erroneously | 295 | * This is here to handle cases when the device erroneously |
296 | * tries to receive more than is possible. This is usually | 296 | * tries to receive more than is possible. This is usually |
297 | * the case of a broken device. | 297 | * the case of a broken device. |
298 | */ | 298 | */ |
299 | if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) { | 299 | if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) { |
300 | net_dbg_ratelimited("%s: too much data\n", skb->dev->name); | 300 | net_dbg_ratelimited("%s: too much data\n", skb->dev->name); |
301 | dev_kfree_skb(skb); | 301 | dev_kfree_skb(skb); |
302 | return NULL; | 302 | return NULL; |
303 | } | 303 | } |
304 | BUG_ON(offset >= PAGE_SIZE); | 304 | BUG_ON(offset >= PAGE_SIZE); |
305 | while (len) { | 305 | while (len) { |
306 | unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len); | 306 | unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len); |
307 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, | 307 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, |
308 | frag_size, truesize); | 308 | frag_size, truesize); |
309 | len -= frag_size; | 309 | len -= frag_size; |
310 | page = (struct page *)page->private; | 310 | page = (struct page *)page->private; |
311 | offset = 0; | 311 | offset = 0; |
312 | } | 312 | } |
313 | 313 | ||
314 | if (page) | 314 | if (page) |
315 | give_pages(rq, page); | 315 | give_pages(rq, page); |
316 | 316 | ||
317 | return skb; | 317 | return skb; |
318 | } | 318 | } |
319 | 319 | ||
320 | static struct sk_buff *receive_small(void *buf, unsigned int len) | 320 | static struct sk_buff *receive_small(void *buf, unsigned int len) |
321 | { | 321 | { |
322 | struct sk_buff * skb = buf; | 322 | struct sk_buff * skb = buf; |
323 | 323 | ||
324 | len -= sizeof(struct virtio_net_hdr); | 324 | len -= sizeof(struct virtio_net_hdr); |
325 | skb_trim(skb, len); | 325 | skb_trim(skb, len); |
326 | 326 | ||
327 | return skb; | 327 | return skb; |
328 | } | 328 | } |
329 | 329 | ||
330 | static struct sk_buff *receive_big(struct net_device *dev, | 330 | static struct sk_buff *receive_big(struct net_device *dev, |
331 | struct receive_queue *rq, | 331 | struct receive_queue *rq, |
332 | void *buf, | 332 | void *buf, |
333 | unsigned int len) | 333 | unsigned int len) |
334 | { | 334 | { |
335 | struct page *page = buf; | 335 | struct page *page = buf; |
336 | struct sk_buff *skb = page_to_skb(rq, page, 0, len, PAGE_SIZE); | 336 | struct sk_buff *skb = page_to_skb(rq, page, 0, len, PAGE_SIZE); |
337 | 337 | ||
338 | if (unlikely(!skb)) | 338 | if (unlikely(!skb)) |
339 | goto err; | 339 | goto err; |
340 | 340 | ||
341 | return skb; | 341 | return skb; |
342 | 342 | ||
343 | err: | 343 | err: |
344 | dev->stats.rx_dropped++; | 344 | dev->stats.rx_dropped++; |
345 | give_pages(rq, page); | 345 | give_pages(rq, page); |
346 | return NULL; | 346 | return NULL; |
347 | } | 347 | } |
348 | 348 | ||
349 | static struct sk_buff *receive_mergeable(struct net_device *dev, | 349 | static struct sk_buff *receive_mergeable(struct net_device *dev, |
350 | struct receive_queue *rq, | 350 | struct receive_queue *rq, |
351 | unsigned long ctx, | 351 | unsigned long ctx, |
352 | unsigned int len) | 352 | unsigned int len) |
353 | { | 353 | { |
354 | void *buf = mergeable_ctx_to_buf_address(ctx); | 354 | void *buf = mergeable_ctx_to_buf_address(ctx); |
355 | struct skb_vnet_hdr *hdr = buf; | 355 | struct skb_vnet_hdr *hdr = buf; |
356 | int num_buf = hdr->mhdr.num_buffers; | 356 | int num_buf = hdr->mhdr.num_buffers; |
357 | struct page *page = virt_to_head_page(buf); | 357 | struct page *page = virt_to_head_page(buf); |
358 | int offset = buf - page_address(page); | 358 | int offset = buf - page_address(page); |
359 | unsigned int truesize = max(len, mergeable_ctx_to_buf_truesize(ctx)); | 359 | unsigned int truesize = max(len, mergeable_ctx_to_buf_truesize(ctx)); |
360 | 360 | ||
361 | struct sk_buff *head_skb = page_to_skb(rq, page, offset, len, truesize); | 361 | struct sk_buff *head_skb = page_to_skb(rq, page, offset, len, truesize); |
362 | struct sk_buff *curr_skb = head_skb; | 362 | struct sk_buff *curr_skb = head_skb; |
363 | 363 | ||
364 | if (unlikely(!curr_skb)) | 364 | if (unlikely(!curr_skb)) |
365 | goto err_skb; | 365 | goto err_skb; |
366 | while (--num_buf) { | 366 | while (--num_buf) { |
367 | int num_skb_frags; | 367 | int num_skb_frags; |
368 | 368 | ||
369 | ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len); | 369 | ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len); |
370 | if (unlikely(!ctx)) { | 370 | if (unlikely(!ctx)) { |
371 | pr_debug("%s: rx error: %d buffers out of %d missing\n", | 371 | pr_debug("%s: rx error: %d buffers out of %d missing\n", |
372 | dev->name, num_buf, hdr->mhdr.num_buffers); | 372 | dev->name, num_buf, hdr->mhdr.num_buffers); |
373 | dev->stats.rx_length_errors++; | 373 | dev->stats.rx_length_errors++; |
374 | goto err_buf; | 374 | goto err_buf; |
375 | } | 375 | } |
376 | 376 | ||
377 | buf = mergeable_ctx_to_buf_address(ctx); | 377 | buf = mergeable_ctx_to_buf_address(ctx); |
378 | page = virt_to_head_page(buf); | 378 | page = virt_to_head_page(buf); |
379 | 379 | ||
380 | num_skb_frags = skb_shinfo(curr_skb)->nr_frags; | 380 | num_skb_frags = skb_shinfo(curr_skb)->nr_frags; |
381 | if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { | 381 | if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { |
382 | struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); | 382 | struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); |
383 | 383 | ||
384 | if (unlikely(!nskb)) | 384 | if (unlikely(!nskb)) |
385 | goto err_skb; | 385 | goto err_skb; |
386 | if (curr_skb == head_skb) | 386 | if (curr_skb == head_skb) |
387 | skb_shinfo(curr_skb)->frag_list = nskb; | 387 | skb_shinfo(curr_skb)->frag_list = nskb; |
388 | else | 388 | else |
389 | curr_skb->next = nskb; | 389 | curr_skb->next = nskb; |
390 | curr_skb = nskb; | 390 | curr_skb = nskb; |
391 | head_skb->truesize += nskb->truesize; | 391 | head_skb->truesize += nskb->truesize; |
392 | num_skb_frags = 0; | 392 | num_skb_frags = 0; |
393 | } | 393 | } |
394 | truesize = max(len, mergeable_ctx_to_buf_truesize(ctx)); | 394 | truesize = max(len, mergeable_ctx_to_buf_truesize(ctx)); |
395 | if (curr_skb != head_skb) { | 395 | if (curr_skb != head_skb) { |
396 | head_skb->data_len += len; | 396 | head_skb->data_len += len; |
397 | head_skb->len += len; | 397 | head_skb->len += len; |
398 | head_skb->truesize += truesize; | 398 | head_skb->truesize += truesize; |
399 | } | 399 | } |
400 | offset = buf - page_address(page); | 400 | offset = buf - page_address(page); |
401 | if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { | 401 | if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { |
402 | put_page(page); | 402 | put_page(page); |
403 | skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, | 403 | skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, |
404 | len, truesize); | 404 | len, truesize); |
405 | } else { | 405 | } else { |
406 | skb_add_rx_frag(curr_skb, num_skb_frags, page, | 406 | skb_add_rx_frag(curr_skb, num_skb_frags, page, |
407 | offset, len, truesize); | 407 | offset, len, truesize); |
408 | } | 408 | } |
409 | } | 409 | } |
410 | 410 | ||
411 | ewma_add(&rq->mrg_avg_pkt_len, head_skb->len); | 411 | ewma_add(&rq->mrg_avg_pkt_len, head_skb->len); |
412 | return head_skb; | 412 | return head_skb; |
413 | 413 | ||
414 | err_skb: | 414 | err_skb: |
415 | put_page(page); | 415 | put_page(page); |
416 | while (--num_buf) { | 416 | while (--num_buf) { |
417 | ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len); | 417 | ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len); |
418 | if (unlikely(!ctx)) { | 418 | if (unlikely(!ctx)) { |
419 | pr_debug("%s: rx error: %d buffers missing\n", | 419 | pr_debug("%s: rx error: %d buffers missing\n", |
420 | dev->name, num_buf); | 420 | dev->name, num_buf); |
421 | dev->stats.rx_length_errors++; | 421 | dev->stats.rx_length_errors++; |
422 | break; | 422 | break; |
423 | } | 423 | } |
424 | page = virt_to_head_page(mergeable_ctx_to_buf_address(ctx)); | 424 | page = virt_to_head_page(mergeable_ctx_to_buf_address(ctx)); |
425 | put_page(page); | 425 | put_page(page); |
426 | } | 426 | } |
427 | err_buf: | 427 | err_buf: |
428 | dev->stats.rx_dropped++; | 428 | dev->stats.rx_dropped++; |
429 | dev_kfree_skb(head_skb); | 429 | dev_kfree_skb(head_skb); |
430 | return NULL; | 430 | return NULL; |
431 | } | 431 | } |
432 | 432 | ||
433 | static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) | 433 | static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) |
434 | { | 434 | { |
435 | struct virtnet_info *vi = rq->vq->vdev->priv; | 435 | struct virtnet_info *vi = rq->vq->vdev->priv; |
436 | struct net_device *dev = vi->dev; | 436 | struct net_device *dev = vi->dev; |
437 | struct virtnet_stats *stats = this_cpu_ptr(vi->stats); | 437 | struct virtnet_stats *stats = this_cpu_ptr(vi->stats); |
438 | struct sk_buff *skb; | 438 | struct sk_buff *skb; |
439 | struct skb_vnet_hdr *hdr; | 439 | struct skb_vnet_hdr *hdr; |
440 | 440 | ||
441 | if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { | 441 | if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { |
442 | pr_debug("%s: short packet %i\n", dev->name, len); | 442 | pr_debug("%s: short packet %i\n", dev->name, len); |
443 | dev->stats.rx_length_errors++; | 443 | dev->stats.rx_length_errors++; |
444 | if (vi->mergeable_rx_bufs) { | 444 | if (vi->mergeable_rx_bufs) { |
445 | unsigned long ctx = (unsigned long)buf; | 445 | unsigned long ctx = (unsigned long)buf; |
446 | void *base = mergeable_ctx_to_buf_address(ctx); | 446 | void *base = mergeable_ctx_to_buf_address(ctx); |
447 | put_page(virt_to_head_page(base)); | 447 | put_page(virt_to_head_page(base)); |
448 | } else if (vi->big_packets) { | 448 | } else if (vi->big_packets) { |
449 | give_pages(rq, buf); | 449 | give_pages(rq, buf); |
450 | } else { | 450 | } else { |
451 | dev_kfree_skb(buf); | 451 | dev_kfree_skb(buf); |
452 | } | 452 | } |
453 | return; | 453 | return; |
454 | } | 454 | } |
455 | 455 | ||
456 | if (vi->mergeable_rx_bufs) | 456 | if (vi->mergeable_rx_bufs) |
457 | skb = receive_mergeable(dev, rq, (unsigned long)buf, len); | 457 | skb = receive_mergeable(dev, rq, (unsigned long)buf, len); |
458 | else if (vi->big_packets) | 458 | else if (vi->big_packets) |
459 | skb = receive_big(dev, rq, buf, len); | 459 | skb = receive_big(dev, rq, buf, len); |
460 | else | 460 | else |
461 | skb = receive_small(buf, len); | 461 | skb = receive_small(buf, len); |
462 | 462 | ||
463 | if (unlikely(!skb)) | 463 | if (unlikely(!skb)) |
464 | return; | 464 | return; |
465 | 465 | ||
466 | hdr = skb_vnet_hdr(skb); | 466 | hdr = skb_vnet_hdr(skb); |
467 | 467 | ||
468 | u64_stats_update_begin(&stats->rx_syncp); | 468 | u64_stats_update_begin(&stats->rx_syncp); |
469 | stats->rx_bytes += skb->len; | 469 | stats->rx_bytes += skb->len; |
470 | stats->rx_packets++; | 470 | stats->rx_packets++; |
471 | u64_stats_update_end(&stats->rx_syncp); | 471 | u64_stats_update_end(&stats->rx_syncp); |
472 | 472 | ||
473 | if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { | 473 | if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { |
474 | pr_debug("Needs csum!\n"); | 474 | pr_debug("Needs csum!\n"); |
475 | if (!skb_partial_csum_set(skb, | 475 | if (!skb_partial_csum_set(skb, |
476 | hdr->hdr.csum_start, | 476 | hdr->hdr.csum_start, |
477 | hdr->hdr.csum_offset)) | 477 | hdr->hdr.csum_offset)) |
478 | goto frame_err; | 478 | goto frame_err; |
479 | } else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) { | 479 | } else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) { |
480 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 480 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
481 | } | 481 | } |
482 | 482 | ||
483 | skb->protocol = eth_type_trans(skb, dev); | 483 | skb->protocol = eth_type_trans(skb, dev); |
484 | pr_debug("Receiving skb proto 0x%04x len %i type %i\n", | 484 | pr_debug("Receiving skb proto 0x%04x len %i type %i\n", |
485 | ntohs(skb->protocol), skb->len, skb->pkt_type); | 485 | ntohs(skb->protocol), skb->len, skb->pkt_type); |
486 | 486 | ||
487 | if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) { | 487 | if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) { |
488 | pr_debug("GSO!\n"); | 488 | pr_debug("GSO!\n"); |
489 | switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { | 489 | switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { |
490 | case VIRTIO_NET_HDR_GSO_TCPV4: | 490 | case VIRTIO_NET_HDR_GSO_TCPV4: |
491 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; | 491 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; |
492 | break; | 492 | break; |
493 | case VIRTIO_NET_HDR_GSO_UDP: | 493 | case VIRTIO_NET_HDR_GSO_UDP: |
494 | skb_shinfo(skb)->gso_type = SKB_GSO_UDP; | 494 | skb_shinfo(skb)->gso_type = SKB_GSO_UDP; |
495 | break; | 495 | break; |
496 | case VIRTIO_NET_HDR_GSO_TCPV6: | 496 | case VIRTIO_NET_HDR_GSO_TCPV6: |
497 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; | 497 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; |
498 | break; | 498 | break; |
499 | default: | 499 | default: |
500 | net_warn_ratelimited("%s: bad gso type %u.\n", | 500 | net_warn_ratelimited("%s: bad gso type %u.\n", |
501 | dev->name, hdr->hdr.gso_type); | 501 | dev->name, hdr->hdr.gso_type); |
502 | goto frame_err; | 502 | goto frame_err; |
503 | } | 503 | } |
504 | 504 | ||
505 | if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN) | 505 | if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN) |
506 | skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; | 506 | skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; |
507 | 507 | ||
508 | skb_shinfo(skb)->gso_size = hdr->hdr.gso_size; | 508 | skb_shinfo(skb)->gso_size = hdr->hdr.gso_size; |
509 | if (skb_shinfo(skb)->gso_size == 0) { | 509 | if (skb_shinfo(skb)->gso_size == 0) { |
510 | net_warn_ratelimited("%s: zero gso size.\n", dev->name); | 510 | net_warn_ratelimited("%s: zero gso size.\n", dev->name); |
511 | goto frame_err; | 511 | goto frame_err; |
512 | } | 512 | } |
513 | 513 | ||
514 | /* Header must be checked, and gso_segs computed. */ | 514 | /* Header must be checked, and gso_segs computed. */ |
515 | skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; | 515 | skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; |
516 | skb_shinfo(skb)->gso_segs = 0; | 516 | skb_shinfo(skb)->gso_segs = 0; |
517 | } | 517 | } |
518 | 518 | ||
519 | skb_mark_napi_id(skb, &rq->napi); | 519 | skb_mark_napi_id(skb, &rq->napi); |
520 | 520 | ||
521 | netif_receive_skb(skb); | 521 | netif_receive_skb(skb); |
522 | return; | 522 | return; |
523 | 523 | ||
524 | frame_err: | 524 | frame_err: |
525 | dev->stats.rx_frame_errors++; | 525 | dev->stats.rx_frame_errors++; |
526 | dev_kfree_skb(skb); | 526 | dev_kfree_skb(skb); |
527 | } | 527 | } |
528 | 528 | ||
529 | static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp) | 529 | static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp) |
530 | { | 530 | { |
531 | struct virtnet_info *vi = rq->vq->vdev->priv; | 531 | struct virtnet_info *vi = rq->vq->vdev->priv; |
532 | struct sk_buff *skb; | 532 | struct sk_buff *skb; |
533 | struct skb_vnet_hdr *hdr; | 533 | struct skb_vnet_hdr *hdr; |
534 | int err; | 534 | int err; |
535 | 535 | ||
536 | skb = __netdev_alloc_skb_ip_align(vi->dev, GOOD_PACKET_LEN, gfp); | 536 | skb = __netdev_alloc_skb_ip_align(vi->dev, GOOD_PACKET_LEN, gfp); |
537 | if (unlikely(!skb)) | 537 | if (unlikely(!skb)) |
538 | return -ENOMEM; | 538 | return -ENOMEM; |
539 | 539 | ||
540 | skb_put(skb, GOOD_PACKET_LEN); | 540 | skb_put(skb, GOOD_PACKET_LEN); |
541 | 541 | ||
542 | hdr = skb_vnet_hdr(skb); | 542 | hdr = skb_vnet_hdr(skb); |
543 | sg_set_buf(rq->sg, &hdr->hdr, sizeof hdr->hdr); | 543 | sg_set_buf(rq->sg, &hdr->hdr, sizeof hdr->hdr); |
544 | 544 | ||
545 | skb_to_sgvec(skb, rq->sg + 1, 0, skb->len); | 545 | skb_to_sgvec(skb, rq->sg + 1, 0, skb->len); |
546 | 546 | ||
547 | err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp); | 547 | err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp); |
548 | if (err < 0) | 548 | if (err < 0) |
549 | dev_kfree_skb(skb); | 549 | dev_kfree_skb(skb); |
550 | 550 | ||
551 | return err; | 551 | return err; |
552 | } | 552 | } |
553 | 553 | ||
554 | static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp) | 554 | static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp) |
555 | { | 555 | { |
556 | struct page *first, *list = NULL; | 556 | struct page *first, *list = NULL; |
557 | char *p; | 557 | char *p; |
558 | int i, err, offset; | 558 | int i, err, offset; |
559 | 559 | ||
560 | /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */ | 560 | /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */ |
561 | for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { | 561 | for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { |
562 | first = get_a_page(rq, gfp); | 562 | first = get_a_page(rq, gfp); |
563 | if (!first) { | 563 | if (!first) { |
564 | if (list) | 564 | if (list) |
565 | give_pages(rq, list); | 565 | give_pages(rq, list); |
566 | return -ENOMEM; | 566 | return -ENOMEM; |
567 | } | 567 | } |
568 | sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE); | 568 | sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE); |
569 | 569 | ||
570 | /* chain new page in list head to match sg */ | 570 | /* chain new page in list head to match sg */ |
571 | first->private = (unsigned long)list; | 571 | first->private = (unsigned long)list; |
572 | list = first; | 572 | list = first; |
573 | } | 573 | } |
574 | 574 | ||
575 | first = get_a_page(rq, gfp); | 575 | first = get_a_page(rq, gfp); |
576 | if (!first) { | 576 | if (!first) { |
577 | give_pages(rq, list); | 577 | give_pages(rq, list); |
578 | return -ENOMEM; | 578 | return -ENOMEM; |
579 | } | 579 | } |
580 | p = page_address(first); | 580 | p = page_address(first); |
581 | 581 | ||
582 | /* rq->sg[0], rq->sg[1] share the same page */ | 582 | /* rq->sg[0], rq->sg[1] share the same page */ |
583 | /* a separated rq->sg[0] for virtio_net_hdr only due to QEMU bug */ | 583 | /* a separated rq->sg[0] for virtio_net_hdr only due to QEMU bug */ |
584 | sg_set_buf(&rq->sg[0], p, sizeof(struct virtio_net_hdr)); | 584 | sg_set_buf(&rq->sg[0], p, sizeof(struct virtio_net_hdr)); |
585 | 585 | ||
586 | /* rq->sg[1] for data packet, from offset */ | 586 | /* rq->sg[1] for data packet, from offset */ |
587 | offset = sizeof(struct padded_vnet_hdr); | 587 | offset = sizeof(struct padded_vnet_hdr); |
588 | sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset); | 588 | sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset); |
589 | 589 | ||
590 | /* chain first in list head */ | 590 | /* chain first in list head */ |
591 | first->private = (unsigned long)list; | 591 | first->private = (unsigned long)list; |
592 | err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2, | 592 | err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2, |
593 | first, gfp); | 593 | first, gfp); |
594 | if (err < 0) | 594 | if (err < 0) |
595 | give_pages(rq, first); | 595 | give_pages(rq, first); |
596 | 596 | ||
597 | return err; | 597 | return err; |
598 | } | 598 | } |
599 | 599 | ||
600 | static unsigned int get_mergeable_buf_len(struct ewma *avg_pkt_len) | 600 | static unsigned int get_mergeable_buf_len(struct ewma *avg_pkt_len) |
601 | { | 601 | { |
602 | const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); | 602 | const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); |
603 | unsigned int len; | 603 | unsigned int len; |
604 | 604 | ||
605 | len = hdr_len + clamp_t(unsigned int, ewma_read(avg_pkt_len), | 605 | len = hdr_len + clamp_t(unsigned int, ewma_read(avg_pkt_len), |
606 | GOOD_PACKET_LEN, PAGE_SIZE - hdr_len); | 606 | GOOD_PACKET_LEN, PAGE_SIZE - hdr_len); |
607 | return ALIGN(len, MERGEABLE_BUFFER_ALIGN); | 607 | return ALIGN(len, MERGEABLE_BUFFER_ALIGN); |
608 | } | 608 | } |
609 | 609 | ||
610 | static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp) | 610 | static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp) |
611 | { | 611 | { |
612 | struct page_frag *alloc_frag = &rq->alloc_frag; | 612 | struct page_frag *alloc_frag = &rq->alloc_frag; |
613 | char *buf; | 613 | char *buf; |
614 | unsigned long ctx; | 614 | unsigned long ctx; |
615 | int err; | 615 | int err; |
616 | unsigned int len, hole; | 616 | unsigned int len, hole; |
617 | 617 | ||
618 | len = get_mergeable_buf_len(&rq->mrg_avg_pkt_len); | 618 | len = get_mergeable_buf_len(&rq->mrg_avg_pkt_len); |
619 | if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp))) | 619 | if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp))) |
620 | return -ENOMEM; | 620 | return -ENOMEM; |
621 | 621 | ||
622 | buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; | 622 | buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; |
623 | ctx = mergeable_buf_to_ctx(buf, len); | 623 | ctx = mergeable_buf_to_ctx(buf, len); |
624 | get_page(alloc_frag->page); | 624 | get_page(alloc_frag->page); |
625 | alloc_frag->offset += len; | 625 | alloc_frag->offset += len; |
626 | hole = alloc_frag->size - alloc_frag->offset; | 626 | hole = alloc_frag->size - alloc_frag->offset; |
627 | if (hole < len) { | 627 | if (hole < len) { |
628 | /* To avoid internal fragmentation, if there is very likely not | 628 | /* To avoid internal fragmentation, if there is very likely not |
629 | * enough space for another buffer, add the remaining space to | 629 | * enough space for another buffer, add the remaining space to |
630 | * the current buffer. This extra space is not included in | 630 | * the current buffer. This extra space is not included in |
631 | * the truesize stored in ctx. | 631 | * the truesize stored in ctx. |
632 | */ | 632 | */ |
633 | len += hole; | 633 | len += hole; |
634 | alloc_frag->offset += hole; | 634 | alloc_frag->offset += hole; |
635 | } | 635 | } |
636 | 636 | ||
637 | sg_init_one(rq->sg, buf, len); | 637 | sg_init_one(rq->sg, buf, len); |
638 | err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, (void *)ctx, gfp); | 638 | err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, (void *)ctx, gfp); |
639 | if (err < 0) | 639 | if (err < 0) |
640 | put_page(virt_to_head_page(buf)); | 640 | put_page(virt_to_head_page(buf)); |
641 | 641 | ||
642 | return err; | 642 | return err; |
643 | } | 643 | } |
644 | 644 | ||
645 | /* | 645 | /* |
646 | * Returns false if we couldn't fill entirely (OOM). | 646 | * Returns false if we couldn't fill entirely (OOM). |
647 | * | 647 | * |
648 | * Normally run in the receive path, but can also be run from ndo_open | 648 | * Normally run in the receive path, but can also be run from ndo_open |
649 | * before we're receiving packets, or from refill_work which is | 649 | * before we're receiving packets, or from refill_work which is |
650 | * careful to disable receiving (using napi_disable). | 650 | * careful to disable receiving (using napi_disable). |
651 | */ | 651 | */ |
652 | static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp) | 652 | static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp) |
653 | { | 653 | { |
654 | struct virtnet_info *vi = rq->vq->vdev->priv; | 654 | struct virtnet_info *vi = rq->vq->vdev->priv; |
655 | int err; | 655 | int err; |
656 | bool oom; | 656 | bool oom; |
657 | 657 | ||
658 | gfp |= __GFP_COLD; | 658 | gfp |= __GFP_COLD; |
659 | do { | 659 | do { |
660 | if (vi->mergeable_rx_bufs) | 660 | if (vi->mergeable_rx_bufs) |
661 | err = add_recvbuf_mergeable(rq, gfp); | 661 | err = add_recvbuf_mergeable(rq, gfp); |
662 | else if (vi->big_packets) | 662 | else if (vi->big_packets) |
663 | err = add_recvbuf_big(rq, gfp); | 663 | err = add_recvbuf_big(rq, gfp); |
664 | else | 664 | else |
665 | err = add_recvbuf_small(rq, gfp); | 665 | err = add_recvbuf_small(rq, gfp); |
666 | 666 | ||
667 | oom = err == -ENOMEM; | 667 | oom = err == -ENOMEM; |
668 | if (err) | 668 | if (err) |
669 | break; | 669 | break; |
670 | } while (rq->vq->num_free); | 670 | } while (rq->vq->num_free); |
671 | virtqueue_kick(rq->vq); | 671 | virtqueue_kick(rq->vq); |
672 | return !oom; | 672 | return !oom; |
673 | } | 673 | } |
674 | 674 | ||
675 | static void skb_recv_done(struct virtqueue *rvq) | 675 | static void skb_recv_done(struct virtqueue *rvq) |
676 | { | 676 | { |
677 | struct virtnet_info *vi = rvq->vdev->priv; | 677 | struct virtnet_info *vi = rvq->vdev->priv; |
678 | struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; | 678 | struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; |
679 | 679 | ||
680 | /* Schedule NAPI, Suppress further interrupts if successful. */ | 680 | /* Schedule NAPI, Suppress further interrupts if successful. */ |
681 | if (napi_schedule_prep(&rq->napi)) { | 681 | if (napi_schedule_prep(&rq->napi)) { |
682 | virtqueue_disable_cb(rvq); | 682 | virtqueue_disable_cb(rvq); |
683 | __napi_schedule(&rq->napi); | 683 | __napi_schedule(&rq->napi); |
684 | } | 684 | } |
685 | } | 685 | } |
686 | 686 | ||
687 | static void virtnet_napi_enable(struct receive_queue *rq) | 687 | static void virtnet_napi_enable(struct receive_queue *rq) |
688 | { | 688 | { |
689 | napi_enable(&rq->napi); | 689 | napi_enable(&rq->napi); |
690 | 690 | ||
691 | /* If all buffers were filled by other side before we napi_enabled, we | 691 | /* If all buffers were filled by other side before we napi_enabled, we |
692 | * won't get another interrupt, so process any outstanding packets | 692 | * won't get another interrupt, so process any outstanding packets |
693 | * now. virtnet_poll wants re-enable the queue, so we disable here. | 693 | * now. virtnet_poll wants re-enable the queue, so we disable here. |
694 | * We synchronize against interrupts via NAPI_STATE_SCHED */ | 694 | * We synchronize against interrupts via NAPI_STATE_SCHED */ |
695 | if (napi_schedule_prep(&rq->napi)) { | 695 | if (napi_schedule_prep(&rq->napi)) { |
696 | virtqueue_disable_cb(rq->vq); | 696 | virtqueue_disable_cb(rq->vq); |
697 | local_bh_disable(); | 697 | local_bh_disable(); |
698 | __napi_schedule(&rq->napi); | 698 | __napi_schedule(&rq->napi); |
699 | local_bh_enable(); | 699 | local_bh_enable(); |
700 | } | 700 | } |
701 | } | 701 | } |
702 | 702 | ||
703 | static void refill_work(struct work_struct *work) | 703 | static void refill_work(struct work_struct *work) |
704 | { | 704 | { |
705 | struct virtnet_info *vi = | 705 | struct virtnet_info *vi = |
706 | container_of(work, struct virtnet_info, refill.work); | 706 | container_of(work, struct virtnet_info, refill.work); |
707 | bool still_empty; | 707 | bool still_empty; |
708 | int i; | 708 | int i; |
709 | 709 | ||
710 | for (i = 0; i < vi->curr_queue_pairs; i++) { | 710 | for (i = 0; i < vi->curr_queue_pairs; i++) { |
711 | struct receive_queue *rq = &vi->rq[i]; | 711 | struct receive_queue *rq = &vi->rq[i]; |
712 | 712 | ||
713 | napi_disable(&rq->napi); | 713 | napi_disable(&rq->napi); |
714 | still_empty = !try_fill_recv(rq, GFP_KERNEL); | 714 | still_empty = !try_fill_recv(rq, GFP_KERNEL); |
715 | virtnet_napi_enable(rq); | 715 | virtnet_napi_enable(rq); |
716 | 716 | ||
717 | /* In theory, this can happen: if we don't get any buffers in | 717 | /* In theory, this can happen: if we don't get any buffers in |
718 | * we will *never* try to fill again. | 718 | * we will *never* try to fill again. |
719 | */ | 719 | */ |
720 | if (still_empty) | 720 | if (still_empty) |
721 | schedule_delayed_work(&vi->refill, HZ/2); | 721 | schedule_delayed_work(&vi->refill, HZ/2); |
722 | } | 722 | } |
723 | } | 723 | } |
724 | 724 | ||
725 | static int virtnet_receive(struct receive_queue *rq, int budget) | 725 | static int virtnet_receive(struct receive_queue *rq, int budget) |
726 | { | 726 | { |
727 | struct virtnet_info *vi = rq->vq->vdev->priv; | 727 | struct virtnet_info *vi = rq->vq->vdev->priv; |
728 | unsigned int len, received = 0; | 728 | unsigned int len, received = 0; |
729 | void *buf; | 729 | void *buf; |
730 | 730 | ||
731 | while (received < budget && | 731 | while (received < budget && |
732 | (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { | 732 | (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { |
733 | receive_buf(rq, buf, len); | 733 | receive_buf(rq, buf, len); |
734 | received++; | 734 | received++; |
735 | } | 735 | } |
736 | 736 | ||
737 | if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) { | 737 | if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) { |
738 | if (!try_fill_recv(rq, GFP_ATOMIC)) | 738 | if (!try_fill_recv(rq, GFP_ATOMIC)) |
739 | schedule_delayed_work(&vi->refill, 0); | 739 | schedule_delayed_work(&vi->refill, 0); |
740 | } | 740 | } |
741 | 741 | ||
742 | return received; | 742 | return received; |
743 | } | 743 | } |
744 | 744 | ||
745 | static int virtnet_poll(struct napi_struct *napi, int budget) | 745 | static int virtnet_poll(struct napi_struct *napi, int budget) |
746 | { | 746 | { |
747 | struct receive_queue *rq = | 747 | struct receive_queue *rq = |
748 | container_of(napi, struct receive_queue, napi); | 748 | container_of(napi, struct receive_queue, napi); |
749 | unsigned int r, received = 0; | 749 | unsigned int r, received = 0; |
750 | 750 | ||
751 | again: | 751 | again: |
752 | received += virtnet_receive(rq, budget - received); | 752 | received += virtnet_receive(rq, budget - received); |
753 | 753 | ||
754 | /* Out of packets? */ | 754 | /* Out of packets? */ |
755 | if (received < budget) { | 755 | if (received < budget) { |
756 | r = virtqueue_enable_cb_prepare(rq->vq); | 756 | r = virtqueue_enable_cb_prepare(rq->vq); |
757 | napi_complete(napi); | 757 | napi_complete(napi); |
758 | if (unlikely(virtqueue_poll(rq->vq, r)) && | 758 | if (unlikely(virtqueue_poll(rq->vq, r)) && |
759 | napi_schedule_prep(napi)) { | 759 | napi_schedule_prep(napi)) { |
760 | virtqueue_disable_cb(rq->vq); | 760 | virtqueue_disable_cb(rq->vq); |
761 | __napi_schedule(napi); | 761 | __napi_schedule(napi); |
762 | goto again; | 762 | goto again; |
763 | } | 763 | } |
764 | } | 764 | } |
765 | 765 | ||
766 | return received; | 766 | return received; |
767 | } | 767 | } |
768 | 768 | ||
769 | #ifdef CONFIG_NET_RX_BUSY_POLL | 769 | #ifdef CONFIG_NET_RX_BUSY_POLL |
770 | /* must be called with local_bh_disable()d */ | 770 | /* must be called with local_bh_disable()d */ |
771 | static int virtnet_busy_poll(struct napi_struct *napi) | 771 | static int virtnet_busy_poll(struct napi_struct *napi) |
772 | { | 772 | { |
773 | struct receive_queue *rq = | 773 | struct receive_queue *rq = |
774 | container_of(napi, struct receive_queue, napi); | 774 | container_of(napi, struct receive_queue, napi); |
775 | struct virtnet_info *vi = rq->vq->vdev->priv; | 775 | struct virtnet_info *vi = rq->vq->vdev->priv; |
776 | int r, received = 0, budget = 4; | 776 | int r, received = 0, budget = 4; |
777 | 777 | ||
778 | if (!(vi->status & VIRTIO_NET_S_LINK_UP)) | 778 | if (!(vi->status & VIRTIO_NET_S_LINK_UP)) |
779 | return LL_FLUSH_FAILED; | 779 | return LL_FLUSH_FAILED; |
780 | 780 | ||
781 | if (!napi_schedule_prep(napi)) | 781 | if (!napi_schedule_prep(napi)) |
782 | return LL_FLUSH_BUSY; | 782 | return LL_FLUSH_BUSY; |
783 | 783 | ||
784 | virtqueue_disable_cb(rq->vq); | 784 | virtqueue_disable_cb(rq->vq); |
785 | 785 | ||
786 | again: | 786 | again: |
787 | received += virtnet_receive(rq, budget); | 787 | received += virtnet_receive(rq, budget); |
788 | 788 | ||
789 | r = virtqueue_enable_cb_prepare(rq->vq); | 789 | r = virtqueue_enable_cb_prepare(rq->vq); |
790 | clear_bit(NAPI_STATE_SCHED, &napi->state); | 790 | clear_bit(NAPI_STATE_SCHED, &napi->state); |
791 | if (unlikely(virtqueue_poll(rq->vq, r)) && | 791 | if (unlikely(virtqueue_poll(rq->vq, r)) && |
792 | napi_schedule_prep(napi)) { | 792 | napi_schedule_prep(napi)) { |
793 | virtqueue_disable_cb(rq->vq); | 793 | virtqueue_disable_cb(rq->vq); |
794 | if (received < budget) { | 794 | if (received < budget) { |
795 | budget -= received; | 795 | budget -= received; |
796 | goto again; | 796 | goto again; |
797 | } else { | 797 | } else { |
798 | __napi_schedule(napi); | 798 | __napi_schedule(napi); |
799 | } | 799 | } |
800 | } | 800 | } |
801 | 801 | ||
802 | return received; | 802 | return received; |
803 | } | 803 | } |
804 | #endif /* CONFIG_NET_RX_BUSY_POLL */ | 804 | #endif /* CONFIG_NET_RX_BUSY_POLL */ |
805 | 805 | ||
806 | static int virtnet_open(struct net_device *dev) | 806 | static int virtnet_open(struct net_device *dev) |
807 | { | 807 | { |
808 | struct virtnet_info *vi = netdev_priv(dev); | 808 | struct virtnet_info *vi = netdev_priv(dev); |
809 | int i; | 809 | int i; |
810 | 810 | ||
811 | for (i = 0; i < vi->max_queue_pairs; i++) { | 811 | for (i = 0; i < vi->max_queue_pairs; i++) { |
812 | if (i < vi->curr_queue_pairs) | 812 | if (i < vi->curr_queue_pairs) |
813 | /* Make sure we have some buffers: if oom use wq. */ | 813 | /* Make sure we have some buffers: if oom use wq. */ |
814 | if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) | 814 | if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) |
815 | schedule_delayed_work(&vi->refill, 0); | 815 | schedule_delayed_work(&vi->refill, 0); |
816 | virtnet_napi_enable(&vi->rq[i]); | 816 | virtnet_napi_enable(&vi->rq[i]); |
817 | } | 817 | } |
818 | 818 | ||
819 | return 0; | 819 | return 0; |
820 | } | 820 | } |
821 | 821 | ||
822 | static void free_old_xmit_skbs(struct send_queue *sq) | 822 | static void free_old_xmit_skbs(struct send_queue *sq) |
823 | { | 823 | { |
824 | struct sk_buff *skb; | 824 | struct sk_buff *skb; |
825 | unsigned int len; | 825 | unsigned int len; |
826 | struct virtnet_info *vi = sq->vq->vdev->priv; | 826 | struct virtnet_info *vi = sq->vq->vdev->priv; |
827 | struct virtnet_stats *stats = this_cpu_ptr(vi->stats); | 827 | struct virtnet_stats *stats = this_cpu_ptr(vi->stats); |
828 | 828 | ||
829 | while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) { | 829 | while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) { |
830 | pr_debug("Sent skb %p\n", skb); | 830 | pr_debug("Sent skb %p\n", skb); |
831 | 831 | ||
832 | u64_stats_update_begin(&stats->tx_syncp); | 832 | u64_stats_update_begin(&stats->tx_syncp); |
833 | stats->tx_bytes += skb->len; | 833 | stats->tx_bytes += skb->len; |
834 | stats->tx_packets++; | 834 | stats->tx_packets++; |
835 | u64_stats_update_end(&stats->tx_syncp); | 835 | u64_stats_update_end(&stats->tx_syncp); |
836 | 836 | ||
837 | dev_kfree_skb_any(skb); | 837 | dev_kfree_skb_any(skb); |
838 | } | 838 | } |
839 | } | 839 | } |
840 | 840 | ||
841 | static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) | 841 | static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) |
842 | { | 842 | { |
843 | struct skb_vnet_hdr *hdr; | 843 | struct skb_vnet_hdr *hdr; |
844 | const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; | 844 | const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; |
845 | struct virtnet_info *vi = sq->vq->vdev->priv; | 845 | struct virtnet_info *vi = sq->vq->vdev->priv; |
846 | unsigned num_sg; | 846 | unsigned num_sg; |
847 | unsigned hdr_len; | 847 | unsigned hdr_len; |
848 | bool can_push; | 848 | bool can_push; |
849 | 849 | ||
850 | pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); | 850 | pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); |
851 | if (vi->mergeable_rx_bufs) | 851 | if (vi->mergeable_rx_bufs) |
852 | hdr_len = sizeof hdr->mhdr; | 852 | hdr_len = sizeof hdr->mhdr; |
853 | else | 853 | else |
854 | hdr_len = sizeof hdr->hdr; | 854 | hdr_len = sizeof hdr->hdr; |
855 | 855 | ||
856 | can_push = vi->any_header_sg && | 856 | can_push = vi->any_header_sg && |
857 | !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) && | 857 | !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) && |
858 | !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len; | 858 | !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len; |
859 | /* Even if we can, don't push here yet as this would skew | 859 | /* Even if we can, don't push here yet as this would skew |
860 | * csum_start offset below. */ | 860 | * csum_start offset below. */ |
861 | if (can_push) | 861 | if (can_push) |
862 | hdr = (struct skb_vnet_hdr *)(skb->data - hdr_len); | 862 | hdr = (struct skb_vnet_hdr *)(skb->data - hdr_len); |
863 | else | 863 | else |
864 | hdr = skb_vnet_hdr(skb); | 864 | hdr = skb_vnet_hdr(skb); |
865 | 865 | ||
866 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 866 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
867 | hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; | 867 | hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; |
868 | hdr->hdr.csum_start = skb_checksum_start_offset(skb); | 868 | hdr->hdr.csum_start = skb_checksum_start_offset(skb); |
869 | hdr->hdr.csum_offset = skb->csum_offset; | 869 | hdr->hdr.csum_offset = skb->csum_offset; |
870 | } else { | 870 | } else { |
871 | hdr->hdr.flags = 0; | 871 | hdr->hdr.flags = 0; |
872 | hdr->hdr.csum_offset = hdr->hdr.csum_start = 0; | 872 | hdr->hdr.csum_offset = hdr->hdr.csum_start = 0; |
873 | } | 873 | } |
874 | 874 | ||
875 | if (skb_is_gso(skb)) { | 875 | if (skb_is_gso(skb)) { |
876 | hdr->hdr.hdr_len = skb_headlen(skb); | 876 | hdr->hdr.hdr_len = skb_headlen(skb); |
877 | hdr->hdr.gso_size = skb_shinfo(skb)->gso_size; | 877 | hdr->hdr.gso_size = skb_shinfo(skb)->gso_size; |
878 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) | 878 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) |
879 | hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; | 879 | hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; |
880 | else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) | 880 | else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) |
881 | hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; | 881 | hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; |
882 | else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) | 882 | else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) |
883 | hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP; | 883 | hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP; |
884 | else | 884 | else |
885 | BUG(); | 885 | BUG(); |
886 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN) | 886 | if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN) |
887 | hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN; | 887 | hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN; |
888 | } else { | 888 | } else { |
889 | hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE; | 889 | hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE; |
890 | hdr->hdr.gso_size = hdr->hdr.hdr_len = 0; | 890 | hdr->hdr.gso_size = hdr->hdr.hdr_len = 0; |
891 | } | 891 | } |
892 | 892 | ||
893 | if (vi->mergeable_rx_bufs) | 893 | if (vi->mergeable_rx_bufs) |
894 | hdr->mhdr.num_buffers = 0; | 894 | hdr->mhdr.num_buffers = 0; |
895 | 895 | ||
896 | if (can_push) { | 896 | if (can_push) { |
897 | __skb_push(skb, hdr_len); | 897 | __skb_push(skb, hdr_len); |
898 | num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); | 898 | num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); |
899 | /* Pull header back to avoid skew in tx bytes calculations. */ | 899 | /* Pull header back to avoid skew in tx bytes calculations. */ |
900 | __skb_pull(skb, hdr_len); | 900 | __skb_pull(skb, hdr_len); |
901 | } else { | 901 | } else { |
902 | sg_set_buf(sq->sg, hdr, hdr_len); | 902 | sg_set_buf(sq->sg, hdr, hdr_len); |
903 | num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1; | 903 | num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1; |
904 | } | 904 | } |
905 | return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); | 905 | return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); |
906 | } | 906 | } |
907 | 907 | ||
908 | static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) | 908 | static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) |
909 | { | 909 | { |
910 | struct virtnet_info *vi = netdev_priv(dev); | 910 | struct virtnet_info *vi = netdev_priv(dev); |
911 | int qnum = skb_get_queue_mapping(skb); | 911 | int qnum = skb_get_queue_mapping(skb); |
912 | struct send_queue *sq = &vi->sq[qnum]; | 912 | struct send_queue *sq = &vi->sq[qnum]; |
913 | int err; | 913 | int err; |
914 | 914 | ||
915 | /* Free up any pending old buffers before queueing new ones. */ | 915 | /* Free up any pending old buffers before queueing new ones. */ |
916 | free_old_xmit_skbs(sq); | 916 | free_old_xmit_skbs(sq); |
917 | 917 | ||
918 | /* Try to transmit */ | 918 | /* Try to transmit */ |
919 | err = xmit_skb(sq, skb); | 919 | err = xmit_skb(sq, skb); |
920 | 920 | ||
921 | /* This should not happen! */ | 921 | /* This should not happen! */ |
922 | if (unlikely(err)) { | 922 | if (unlikely(err)) { |
923 | dev->stats.tx_fifo_errors++; | 923 | dev->stats.tx_fifo_errors++; |
924 | if (net_ratelimit()) | 924 | if (net_ratelimit()) |
925 | dev_warn(&dev->dev, | 925 | dev_warn(&dev->dev, |
926 | "Unexpected TXQ (%d) queue failure: %d\n", qnum, err); | 926 | "Unexpected TXQ (%d) queue failure: %d\n", qnum, err); |
927 | dev->stats.tx_dropped++; | 927 | dev->stats.tx_dropped++; |
928 | dev_kfree_skb_any(skb); | 928 | dev_kfree_skb_any(skb); |
929 | return NETDEV_TX_OK; | 929 | return NETDEV_TX_OK; |
930 | } | 930 | } |
931 | virtqueue_kick(sq->vq); | 931 | virtqueue_kick(sq->vq); |
932 | 932 | ||
933 | /* Don't wait up for transmitted skbs to be freed. */ | 933 | /* Don't wait up for transmitted skbs to be freed. */ |
934 | skb_orphan(skb); | 934 | skb_orphan(skb); |
935 | nf_reset(skb); | 935 | nf_reset(skb); |
936 | 936 | ||
937 | /* Apparently nice girls don't return TX_BUSY; stop the queue | 937 | /* Apparently nice girls don't return TX_BUSY; stop the queue |
938 | * before it gets out of hand. Naturally, this wastes entries. */ | 938 | * before it gets out of hand. Naturally, this wastes entries. */ |
939 | if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { | 939 | if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { |
940 | netif_stop_subqueue(dev, qnum); | 940 | netif_stop_subqueue(dev, qnum); |
941 | if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { | 941 | if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { |
942 | /* More just got used, free them then recheck. */ | 942 | /* More just got used, free them then recheck. */ |
943 | free_old_xmit_skbs(sq); | 943 | free_old_xmit_skbs(sq); |
944 | if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { | 944 | if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { |
945 | netif_start_subqueue(dev, qnum); | 945 | netif_start_subqueue(dev, qnum); |
946 | virtqueue_disable_cb(sq->vq); | 946 | virtqueue_disable_cb(sq->vq); |
947 | } | 947 | } |
948 | } | 948 | } |
949 | } | 949 | } |
950 | 950 | ||
951 | return NETDEV_TX_OK; | 951 | return NETDEV_TX_OK; |
952 | } | 952 | } |
953 | 953 | ||
954 | /* | 954 | /* |
955 | * Send command via the control virtqueue and check status. Commands | 955 | * Send command via the control virtqueue and check status. Commands |
956 | * supported by the hypervisor, as indicated by feature bits, should | 956 | * supported by the hypervisor, as indicated by feature bits, should |
957 | * never fail unless improperly formatted. | 957 | * never fail unless improperly formatted. |
958 | */ | 958 | */ |
959 | static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, | 959 | static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, |
960 | struct scatterlist *out) | 960 | struct scatterlist *out) |
961 | { | 961 | { |
962 | struct scatterlist *sgs[4], hdr, stat; | 962 | struct scatterlist *sgs[4], hdr, stat; |
963 | struct virtio_net_ctrl_hdr ctrl; | 963 | struct virtio_net_ctrl_hdr ctrl; |
964 | virtio_net_ctrl_ack status = ~0; | 964 | virtio_net_ctrl_ack status = ~0; |
965 | unsigned out_num = 0, tmp; | 965 | unsigned out_num = 0, tmp; |
966 | 966 | ||
967 | /* Caller should know better */ | 967 | /* Caller should know better */ |
968 | BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); | 968 | BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); |
969 | 969 | ||
970 | ctrl.class = class; | 970 | ctrl.class = class; |
971 | ctrl.cmd = cmd; | 971 | ctrl.cmd = cmd; |
972 | /* Add header */ | 972 | /* Add header */ |
973 | sg_init_one(&hdr, &ctrl, sizeof(ctrl)); | 973 | sg_init_one(&hdr, &ctrl, sizeof(ctrl)); |
974 | sgs[out_num++] = &hdr; | 974 | sgs[out_num++] = &hdr; |
975 | 975 | ||
976 | if (out) | 976 | if (out) |
977 | sgs[out_num++] = out; | 977 | sgs[out_num++] = out; |
978 | 978 | ||
979 | /* Add return status. */ | 979 | /* Add return status. */ |
980 | sg_init_one(&stat, &status, sizeof(status)); | 980 | sg_init_one(&stat, &status, sizeof(status)); |
981 | sgs[out_num] = &stat; | 981 | sgs[out_num] = &stat; |
982 | 982 | ||
983 | BUG_ON(out_num + 1 > ARRAY_SIZE(sgs)); | 983 | BUG_ON(out_num + 1 > ARRAY_SIZE(sgs)); |
984 | virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); | 984 | virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); |
985 | 985 | ||
986 | if (unlikely(!virtqueue_kick(vi->cvq))) | 986 | if (unlikely(!virtqueue_kick(vi->cvq))) |
987 | return status == VIRTIO_NET_OK; | 987 | return status == VIRTIO_NET_OK; |
988 | 988 | ||
989 | /* Spin for a response, the kick causes an ioport write, trapping | 989 | /* Spin for a response, the kick causes an ioport write, trapping |
990 | * into the hypervisor, so the request should be handled immediately. | 990 | * into the hypervisor, so the request should be handled immediately. |
991 | */ | 991 | */ |
992 | while (!virtqueue_get_buf(vi->cvq, &tmp) && | 992 | while (!virtqueue_get_buf(vi->cvq, &tmp) && |
993 | !virtqueue_is_broken(vi->cvq)) | 993 | !virtqueue_is_broken(vi->cvq)) |
994 | cpu_relax(); | 994 | cpu_relax(); |
995 | 995 | ||
996 | return status == VIRTIO_NET_OK; | 996 | return status == VIRTIO_NET_OK; |
997 | } | 997 | } |
998 | 998 | ||
999 | static int virtnet_set_mac_address(struct net_device *dev, void *p) | 999 | static int virtnet_set_mac_address(struct net_device *dev, void *p) |
1000 | { | 1000 | { |
1001 | struct virtnet_info *vi = netdev_priv(dev); | 1001 | struct virtnet_info *vi = netdev_priv(dev); |
1002 | struct virtio_device *vdev = vi->vdev; | 1002 | struct virtio_device *vdev = vi->vdev; |
1003 | int ret; | 1003 | int ret; |
1004 | struct sockaddr *addr = p; | 1004 | struct sockaddr *addr = p; |
1005 | struct scatterlist sg; | 1005 | struct scatterlist sg; |
1006 | 1006 | ||
1007 | ret = eth_prepare_mac_addr_change(dev, p); | 1007 | ret = eth_prepare_mac_addr_change(dev, p); |
1008 | if (ret) | 1008 | if (ret) |
1009 | return ret; | 1009 | return ret; |
1010 | 1010 | ||
1011 | if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { | 1011 | if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { |
1012 | sg_init_one(&sg, addr->sa_data, dev->addr_len); | 1012 | sg_init_one(&sg, addr->sa_data, dev->addr_len); |
1013 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, | 1013 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, |
1014 | VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) { | 1014 | VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) { |
1015 | dev_warn(&vdev->dev, | 1015 | dev_warn(&vdev->dev, |
1016 | "Failed to set mac address by vq command.\n"); | 1016 | "Failed to set mac address by vq command.\n"); |
1017 | return -EINVAL; | 1017 | return -EINVAL; |
1018 | } | 1018 | } |
1019 | } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) { | 1019 | } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) { |
1020 | unsigned int i; | 1020 | unsigned int i; |
1021 | 1021 | ||
1022 | /* Naturally, this has an atomicity problem. */ | 1022 | /* Naturally, this has an atomicity problem. */ |
1023 | for (i = 0; i < dev->addr_len; i++) | 1023 | for (i = 0; i < dev->addr_len; i++) |
1024 | virtio_cwrite8(vdev, | 1024 | virtio_cwrite8(vdev, |
1025 | offsetof(struct virtio_net_config, mac) + | 1025 | offsetof(struct virtio_net_config, mac) + |
1026 | i, addr->sa_data[i]); | 1026 | i, addr->sa_data[i]); |
1027 | } | 1027 | } |
1028 | 1028 | ||
1029 | eth_commit_mac_addr_change(dev, p); | 1029 | eth_commit_mac_addr_change(dev, p); |
1030 | 1030 | ||
1031 | return 0; | 1031 | return 0; |
1032 | } | 1032 | } |
1033 | 1033 | ||
1034 | static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev, | 1034 | static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev, |
1035 | struct rtnl_link_stats64 *tot) | 1035 | struct rtnl_link_stats64 *tot) |
1036 | { | 1036 | { |
1037 | struct virtnet_info *vi = netdev_priv(dev); | 1037 | struct virtnet_info *vi = netdev_priv(dev); |
1038 | int cpu; | 1038 | int cpu; |
1039 | unsigned int start; | 1039 | unsigned int start; |
1040 | 1040 | ||
1041 | for_each_possible_cpu(cpu) { | 1041 | for_each_possible_cpu(cpu) { |
1042 | struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu); | 1042 | struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu); |
1043 | u64 tpackets, tbytes, rpackets, rbytes; | 1043 | u64 tpackets, tbytes, rpackets, rbytes; |
1044 | 1044 | ||
1045 | do { | 1045 | do { |
1046 | start = u64_stats_fetch_begin_irq(&stats->tx_syncp); | 1046 | start = u64_stats_fetch_begin_irq(&stats->tx_syncp); |
1047 | tpackets = stats->tx_packets; | 1047 | tpackets = stats->tx_packets; |
1048 | tbytes = stats->tx_bytes; | 1048 | tbytes = stats->tx_bytes; |
1049 | } while (u64_stats_fetch_retry_irq(&stats->tx_syncp, start)); | 1049 | } while (u64_stats_fetch_retry_irq(&stats->tx_syncp, start)); |
1050 | 1050 | ||
1051 | do { | 1051 | do { |
1052 | start = u64_stats_fetch_begin_irq(&stats->rx_syncp); | 1052 | start = u64_stats_fetch_begin_irq(&stats->rx_syncp); |
1053 | rpackets = stats->rx_packets; | 1053 | rpackets = stats->rx_packets; |
1054 | rbytes = stats->rx_bytes; | 1054 | rbytes = stats->rx_bytes; |
1055 | } while (u64_stats_fetch_retry_irq(&stats->rx_syncp, start)); | 1055 | } while (u64_stats_fetch_retry_irq(&stats->rx_syncp, start)); |
1056 | 1056 | ||
1057 | tot->rx_packets += rpackets; | 1057 | tot->rx_packets += rpackets; |
1058 | tot->tx_packets += tpackets; | 1058 | tot->tx_packets += tpackets; |
1059 | tot->rx_bytes += rbytes; | 1059 | tot->rx_bytes += rbytes; |
1060 | tot->tx_bytes += tbytes; | 1060 | tot->tx_bytes += tbytes; |
1061 | } | 1061 | } |
1062 | 1062 | ||
1063 | tot->tx_dropped = dev->stats.tx_dropped; | 1063 | tot->tx_dropped = dev->stats.tx_dropped; |
1064 | tot->tx_fifo_errors = dev->stats.tx_fifo_errors; | 1064 | tot->tx_fifo_errors = dev->stats.tx_fifo_errors; |
1065 | tot->rx_dropped = dev->stats.rx_dropped; | 1065 | tot->rx_dropped = dev->stats.rx_dropped; |
1066 | tot->rx_length_errors = dev->stats.rx_length_errors; | 1066 | tot->rx_length_errors = dev->stats.rx_length_errors; |
1067 | tot->rx_frame_errors = dev->stats.rx_frame_errors; | 1067 | tot->rx_frame_errors = dev->stats.rx_frame_errors; |
1068 | 1068 | ||
1069 | return tot; | 1069 | return tot; |
1070 | } | 1070 | } |
1071 | 1071 | ||
1072 | #ifdef CONFIG_NET_POLL_CONTROLLER | 1072 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1073 | static void virtnet_netpoll(struct net_device *dev) | 1073 | static void virtnet_netpoll(struct net_device *dev) |
1074 | { | 1074 | { |
1075 | struct virtnet_info *vi = netdev_priv(dev); | 1075 | struct virtnet_info *vi = netdev_priv(dev); |
1076 | int i; | 1076 | int i; |
1077 | 1077 | ||
1078 | for (i = 0; i < vi->curr_queue_pairs; i++) | 1078 | for (i = 0; i < vi->curr_queue_pairs; i++) |
1079 | napi_schedule(&vi->rq[i].napi); | 1079 | napi_schedule(&vi->rq[i].napi); |
1080 | } | 1080 | } |
1081 | #endif | 1081 | #endif |
1082 | 1082 | ||
1083 | static void virtnet_ack_link_announce(struct virtnet_info *vi) | 1083 | static void virtnet_ack_link_announce(struct virtnet_info *vi) |
1084 | { | 1084 | { |
1085 | rtnl_lock(); | 1085 | rtnl_lock(); |
1086 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, | 1086 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, |
1087 | VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL)) | 1087 | VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL)) |
1088 | dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); | 1088 | dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); |
1089 | rtnl_unlock(); | 1089 | rtnl_unlock(); |
1090 | } | 1090 | } |
1091 | 1091 | ||
1092 | static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) | 1092 | static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) |
1093 | { | 1093 | { |
1094 | struct scatterlist sg; | 1094 | struct scatterlist sg; |
1095 | struct virtio_net_ctrl_mq s; | 1095 | struct virtio_net_ctrl_mq s; |
1096 | struct net_device *dev = vi->dev; | 1096 | struct net_device *dev = vi->dev; |
1097 | 1097 | ||
1098 | if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) | 1098 | if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) |
1099 | return 0; | 1099 | return 0; |
1100 | 1100 | ||
1101 | s.virtqueue_pairs = queue_pairs; | 1101 | s.virtqueue_pairs = queue_pairs; |
1102 | sg_init_one(&sg, &s, sizeof(s)); | 1102 | sg_init_one(&sg, &s, sizeof(s)); |
1103 | 1103 | ||
1104 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, | 1104 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, |
1105 | VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) { | 1105 | VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) { |
1106 | dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n", | 1106 | dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n", |
1107 | queue_pairs); | 1107 | queue_pairs); |
1108 | return -EINVAL; | 1108 | return -EINVAL; |
1109 | } else { | 1109 | } else { |
1110 | vi->curr_queue_pairs = queue_pairs; | 1110 | vi->curr_queue_pairs = queue_pairs; |
1111 | /* virtnet_open() will refill when device is going to up. */ | 1111 | /* virtnet_open() will refill when device is going to up. */ |
1112 | if (dev->flags & IFF_UP) | 1112 | if (dev->flags & IFF_UP) |
1113 | schedule_delayed_work(&vi->refill, 0); | 1113 | schedule_delayed_work(&vi->refill, 0); |
1114 | } | 1114 | } |
1115 | 1115 | ||
1116 | return 0; | 1116 | return 0; |
1117 | } | 1117 | } |
1118 | 1118 | ||
1119 | static int virtnet_close(struct net_device *dev) | 1119 | static int virtnet_close(struct net_device *dev) |
1120 | { | 1120 | { |
1121 | struct virtnet_info *vi = netdev_priv(dev); | 1121 | struct virtnet_info *vi = netdev_priv(dev); |
1122 | int i; | 1122 | int i; |
1123 | 1123 | ||
1124 | /* Make sure refill_work doesn't re-enable napi! */ | 1124 | /* Make sure refill_work doesn't re-enable napi! */ |
1125 | cancel_delayed_work_sync(&vi->refill); | 1125 | cancel_delayed_work_sync(&vi->refill); |
1126 | 1126 | ||
1127 | for (i = 0; i < vi->max_queue_pairs; i++) | 1127 | for (i = 0; i < vi->max_queue_pairs; i++) |
1128 | napi_disable(&vi->rq[i].napi); | 1128 | napi_disable(&vi->rq[i].napi); |
1129 | 1129 | ||
1130 | return 0; | 1130 | return 0; |
1131 | } | 1131 | } |
1132 | 1132 | ||
1133 | static void virtnet_set_rx_mode(struct net_device *dev) | 1133 | static void virtnet_set_rx_mode(struct net_device *dev) |
1134 | { | 1134 | { |
1135 | struct virtnet_info *vi = netdev_priv(dev); | 1135 | struct virtnet_info *vi = netdev_priv(dev); |
1136 | struct scatterlist sg[2]; | 1136 | struct scatterlist sg[2]; |
1137 | u8 promisc, allmulti; | 1137 | u8 promisc, allmulti; |
1138 | struct virtio_net_ctrl_mac *mac_data; | 1138 | struct virtio_net_ctrl_mac *mac_data; |
1139 | struct netdev_hw_addr *ha; | 1139 | struct netdev_hw_addr *ha; |
1140 | int uc_count; | 1140 | int uc_count; |
1141 | int mc_count; | 1141 | int mc_count; |
1142 | void *buf; | 1142 | void *buf; |
1143 | int i; | 1143 | int i; |
1144 | 1144 | ||
1145 | /* We can't dynamically set ndo_set_rx_mode, so return gracefully */ | 1145 | /* We can't dynamically set ndo_set_rx_mode, so return gracefully */ |
1146 | if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) | 1146 | if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) |
1147 | return; | 1147 | return; |
1148 | 1148 | ||
1149 | promisc = ((dev->flags & IFF_PROMISC) != 0); | 1149 | promisc = ((dev->flags & IFF_PROMISC) != 0); |
1150 | allmulti = ((dev->flags & IFF_ALLMULTI) != 0); | 1150 | allmulti = ((dev->flags & IFF_ALLMULTI) != 0); |
1151 | 1151 | ||
1152 | sg_init_one(sg, &promisc, sizeof(promisc)); | 1152 | sg_init_one(sg, &promisc, sizeof(promisc)); |
1153 | 1153 | ||
1154 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, | 1154 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, |
1155 | VIRTIO_NET_CTRL_RX_PROMISC, sg)) | 1155 | VIRTIO_NET_CTRL_RX_PROMISC, sg)) |
1156 | dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", | 1156 | dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", |
1157 | promisc ? "en" : "dis"); | 1157 | promisc ? "en" : "dis"); |
1158 | 1158 | ||
1159 | sg_init_one(sg, &allmulti, sizeof(allmulti)); | 1159 | sg_init_one(sg, &allmulti, sizeof(allmulti)); |
1160 | 1160 | ||
1161 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, | 1161 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, |
1162 | VIRTIO_NET_CTRL_RX_ALLMULTI, sg)) | 1162 | VIRTIO_NET_CTRL_RX_ALLMULTI, sg)) |
1163 | dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", | 1163 | dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", |
1164 | allmulti ? "en" : "dis"); | 1164 | allmulti ? "en" : "dis"); |
1165 | 1165 | ||
1166 | uc_count = netdev_uc_count(dev); | 1166 | uc_count = netdev_uc_count(dev); |
1167 | mc_count = netdev_mc_count(dev); | 1167 | mc_count = netdev_mc_count(dev); |
1168 | /* MAC filter - use one buffer for both lists */ | 1168 | /* MAC filter - use one buffer for both lists */ |
1169 | buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + | 1169 | buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + |
1170 | (2 * sizeof(mac_data->entries)), GFP_ATOMIC); | 1170 | (2 * sizeof(mac_data->entries)), GFP_ATOMIC); |
1171 | mac_data = buf; | 1171 | mac_data = buf; |
1172 | if (!buf) | 1172 | if (!buf) |
1173 | return; | 1173 | return; |
1174 | 1174 | ||
1175 | sg_init_table(sg, 2); | 1175 | sg_init_table(sg, 2); |
1176 | 1176 | ||
1177 | /* Store the unicast list and count in the front of the buffer */ | 1177 | /* Store the unicast list and count in the front of the buffer */ |
1178 | mac_data->entries = uc_count; | 1178 | mac_data->entries = uc_count; |
1179 | i = 0; | 1179 | i = 0; |
1180 | netdev_for_each_uc_addr(ha, dev) | 1180 | netdev_for_each_uc_addr(ha, dev) |
1181 | memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); | 1181 | memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); |
1182 | 1182 | ||
1183 | sg_set_buf(&sg[0], mac_data, | 1183 | sg_set_buf(&sg[0], mac_data, |
1184 | sizeof(mac_data->entries) + (uc_count * ETH_ALEN)); | 1184 | sizeof(mac_data->entries) + (uc_count * ETH_ALEN)); |
1185 | 1185 | ||
1186 | /* multicast list and count fill the end */ | 1186 | /* multicast list and count fill the end */ |
1187 | mac_data = (void *)&mac_data->macs[uc_count][0]; | 1187 | mac_data = (void *)&mac_data->macs[uc_count][0]; |
1188 | 1188 | ||
1189 | mac_data->entries = mc_count; | 1189 | mac_data->entries = mc_count; |
1190 | i = 0; | 1190 | i = 0; |
1191 | netdev_for_each_mc_addr(ha, dev) | 1191 | netdev_for_each_mc_addr(ha, dev) |
1192 | memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); | 1192 | memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); |
1193 | 1193 | ||
1194 | sg_set_buf(&sg[1], mac_data, | 1194 | sg_set_buf(&sg[1], mac_data, |
1195 | sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); | 1195 | sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); |
1196 | 1196 | ||
1197 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, | 1197 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, |
1198 | VIRTIO_NET_CTRL_MAC_TABLE_SET, sg)) | 1198 | VIRTIO_NET_CTRL_MAC_TABLE_SET, sg)) |
1199 | dev_warn(&dev->dev, "Failed to set MAC filter table.\n"); | 1199 | dev_warn(&dev->dev, "Failed to set MAC filter table.\n"); |
1200 | 1200 | ||
1201 | kfree(buf); | 1201 | kfree(buf); |
1202 | } | 1202 | } |
1203 | 1203 | ||
1204 | static int virtnet_vlan_rx_add_vid(struct net_device *dev, | 1204 | static int virtnet_vlan_rx_add_vid(struct net_device *dev, |
1205 | __be16 proto, u16 vid) | 1205 | __be16 proto, u16 vid) |
1206 | { | 1206 | { |
1207 | struct virtnet_info *vi = netdev_priv(dev); | 1207 | struct virtnet_info *vi = netdev_priv(dev); |
1208 | struct scatterlist sg; | 1208 | struct scatterlist sg; |
1209 | 1209 | ||
1210 | sg_init_one(&sg, &vid, sizeof(vid)); | 1210 | sg_init_one(&sg, &vid, sizeof(vid)); |
1211 | 1211 | ||
1212 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, | 1212 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, |
1213 | VIRTIO_NET_CTRL_VLAN_ADD, &sg)) | 1213 | VIRTIO_NET_CTRL_VLAN_ADD, &sg)) |
1214 | dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); | 1214 | dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); |
1215 | return 0; | 1215 | return 0; |
1216 | } | 1216 | } |
1217 | 1217 | ||
1218 | static int virtnet_vlan_rx_kill_vid(struct net_device *dev, | 1218 | static int virtnet_vlan_rx_kill_vid(struct net_device *dev, |
1219 | __be16 proto, u16 vid) | 1219 | __be16 proto, u16 vid) |
1220 | { | 1220 | { |
1221 | struct virtnet_info *vi = netdev_priv(dev); | 1221 | struct virtnet_info *vi = netdev_priv(dev); |
1222 | struct scatterlist sg; | 1222 | struct scatterlist sg; |
1223 | 1223 | ||
1224 | sg_init_one(&sg, &vid, sizeof(vid)); | 1224 | sg_init_one(&sg, &vid, sizeof(vid)); |
1225 | 1225 | ||
1226 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, | 1226 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, |
1227 | VIRTIO_NET_CTRL_VLAN_DEL, &sg)) | 1227 | VIRTIO_NET_CTRL_VLAN_DEL, &sg)) |
1228 | dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); | 1228 | dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); |
1229 | return 0; | 1229 | return 0; |
1230 | } | 1230 | } |
1231 | 1231 | ||
1232 | static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu) | 1232 | static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu) |
1233 | { | 1233 | { |
1234 | int i; | 1234 | int i; |
1235 | 1235 | ||
1236 | if (vi->affinity_hint_set) { | 1236 | if (vi->affinity_hint_set) { |
1237 | for (i = 0; i < vi->max_queue_pairs; i++) { | 1237 | for (i = 0; i < vi->max_queue_pairs; i++) { |
1238 | virtqueue_set_affinity(vi->rq[i].vq, -1); | 1238 | virtqueue_set_affinity(vi->rq[i].vq, -1); |
1239 | virtqueue_set_affinity(vi->sq[i].vq, -1); | 1239 | virtqueue_set_affinity(vi->sq[i].vq, -1); |
1240 | } | 1240 | } |
1241 | 1241 | ||
1242 | vi->affinity_hint_set = false; | 1242 | vi->affinity_hint_set = false; |
1243 | } | 1243 | } |
1244 | } | 1244 | } |
1245 | 1245 | ||
1246 | static void virtnet_set_affinity(struct virtnet_info *vi) | 1246 | static void virtnet_set_affinity(struct virtnet_info *vi) |
1247 | { | 1247 | { |
1248 | int i; | 1248 | int i; |
1249 | int cpu; | 1249 | int cpu; |
1250 | 1250 | ||
1251 | /* In multiqueue mode, when the number of cpu is equal to the number of | 1251 | /* In multiqueue mode, when the number of cpu is equal to the number of |
1252 | * queue pairs, we let the queue pairs to be private to one cpu by | 1252 | * queue pairs, we let the queue pairs to be private to one cpu by |
1253 | * setting the affinity hint to eliminate the contention. | 1253 | * setting the affinity hint to eliminate the contention. |
1254 | */ | 1254 | */ |
1255 | if (vi->curr_queue_pairs == 1 || | 1255 | if (vi->curr_queue_pairs == 1 || |
1256 | vi->max_queue_pairs != num_online_cpus()) { | 1256 | vi->max_queue_pairs != num_online_cpus()) { |
1257 | virtnet_clean_affinity(vi, -1); | 1257 | virtnet_clean_affinity(vi, -1); |
1258 | return; | 1258 | return; |
1259 | } | 1259 | } |
1260 | 1260 | ||
1261 | i = 0; | 1261 | i = 0; |
1262 | for_each_online_cpu(cpu) { | 1262 | for_each_online_cpu(cpu) { |
1263 | virtqueue_set_affinity(vi->rq[i].vq, cpu); | 1263 | virtqueue_set_affinity(vi->rq[i].vq, cpu); |
1264 | virtqueue_set_affinity(vi->sq[i].vq, cpu); | 1264 | virtqueue_set_affinity(vi->sq[i].vq, cpu); |
1265 | netif_set_xps_queue(vi->dev, cpumask_of(cpu), i); | 1265 | netif_set_xps_queue(vi->dev, cpumask_of(cpu), i); |
1266 | i++; | 1266 | i++; |
1267 | } | 1267 | } |
1268 | 1268 | ||
1269 | vi->affinity_hint_set = true; | 1269 | vi->affinity_hint_set = true; |
1270 | } | 1270 | } |
1271 | 1271 | ||
1272 | static int virtnet_cpu_callback(struct notifier_block *nfb, | 1272 | static int virtnet_cpu_callback(struct notifier_block *nfb, |
1273 | unsigned long action, void *hcpu) | 1273 | unsigned long action, void *hcpu) |
1274 | { | 1274 | { |
1275 | struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb); | 1275 | struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb); |
1276 | 1276 | ||
1277 | switch(action & ~CPU_TASKS_FROZEN) { | 1277 | switch(action & ~CPU_TASKS_FROZEN) { |
1278 | case CPU_ONLINE: | 1278 | case CPU_ONLINE: |
1279 | case CPU_DOWN_FAILED: | 1279 | case CPU_DOWN_FAILED: |
1280 | case CPU_DEAD: | 1280 | case CPU_DEAD: |
1281 | virtnet_set_affinity(vi); | 1281 | virtnet_set_affinity(vi); |
1282 | break; | 1282 | break; |
1283 | case CPU_DOWN_PREPARE: | 1283 | case CPU_DOWN_PREPARE: |
1284 | virtnet_clean_affinity(vi, (long)hcpu); | 1284 | virtnet_clean_affinity(vi, (long)hcpu); |
1285 | break; | 1285 | break; |
1286 | default: | 1286 | default: |
1287 | break; | 1287 | break; |
1288 | } | 1288 | } |
1289 | 1289 | ||
1290 | return NOTIFY_OK; | 1290 | return NOTIFY_OK; |
1291 | } | 1291 | } |
1292 | 1292 | ||
1293 | static void virtnet_get_ringparam(struct net_device *dev, | 1293 | static void virtnet_get_ringparam(struct net_device *dev, |
1294 | struct ethtool_ringparam *ring) | 1294 | struct ethtool_ringparam *ring) |
1295 | { | 1295 | { |
1296 | struct virtnet_info *vi = netdev_priv(dev); | 1296 | struct virtnet_info *vi = netdev_priv(dev); |
1297 | 1297 | ||
1298 | ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq); | 1298 | ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq); |
1299 | ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq); | 1299 | ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq); |
1300 | ring->rx_pending = ring->rx_max_pending; | 1300 | ring->rx_pending = ring->rx_max_pending; |
1301 | ring->tx_pending = ring->tx_max_pending; | 1301 | ring->tx_pending = ring->tx_max_pending; |
1302 | } | 1302 | } |
1303 | 1303 | ||
1304 | 1304 | ||
1305 | static void virtnet_get_drvinfo(struct net_device *dev, | 1305 | static void virtnet_get_drvinfo(struct net_device *dev, |
1306 | struct ethtool_drvinfo *info) | 1306 | struct ethtool_drvinfo *info) |
1307 | { | 1307 | { |
1308 | struct virtnet_info *vi = netdev_priv(dev); | 1308 | struct virtnet_info *vi = netdev_priv(dev); |
1309 | struct virtio_device *vdev = vi->vdev; | 1309 | struct virtio_device *vdev = vi->vdev; |
1310 | 1310 | ||
1311 | strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); | 1311 | strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); |
1312 | strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version)); | 1312 | strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version)); |
1313 | strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info)); | 1313 | strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info)); |
1314 | 1314 | ||
1315 | } | 1315 | } |
1316 | 1316 | ||
1317 | /* TODO: Eliminate OOO packets during switching */ | 1317 | /* TODO: Eliminate OOO packets during switching */ |
1318 | static int virtnet_set_channels(struct net_device *dev, | 1318 | static int virtnet_set_channels(struct net_device *dev, |
1319 | struct ethtool_channels *channels) | 1319 | struct ethtool_channels *channels) |
1320 | { | 1320 | { |
1321 | struct virtnet_info *vi = netdev_priv(dev); | 1321 | struct virtnet_info *vi = netdev_priv(dev); |
1322 | u16 queue_pairs = channels->combined_count; | 1322 | u16 queue_pairs = channels->combined_count; |
1323 | int err; | 1323 | int err; |
1324 | 1324 | ||
1325 | /* We don't support separate rx/tx channels. | 1325 | /* We don't support separate rx/tx channels. |
1326 | * We don't allow setting 'other' channels. | 1326 | * We don't allow setting 'other' channels. |
1327 | */ | 1327 | */ |
1328 | if (channels->rx_count || channels->tx_count || channels->other_count) | 1328 | if (channels->rx_count || channels->tx_count || channels->other_count) |
1329 | return -EINVAL; | 1329 | return -EINVAL; |
1330 | 1330 | ||
1331 | if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0) | 1331 | if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0) |
1332 | return -EINVAL; | 1332 | return -EINVAL; |
1333 | 1333 | ||
1334 | get_online_cpus(); | 1334 | get_online_cpus(); |
1335 | err = virtnet_set_queues(vi, queue_pairs); | 1335 | err = virtnet_set_queues(vi, queue_pairs); |
1336 | if (!err) { | 1336 | if (!err) { |
1337 | netif_set_real_num_tx_queues(dev, queue_pairs); | 1337 | netif_set_real_num_tx_queues(dev, queue_pairs); |
1338 | netif_set_real_num_rx_queues(dev, queue_pairs); | 1338 | netif_set_real_num_rx_queues(dev, queue_pairs); |
1339 | 1339 | ||
1340 | virtnet_set_affinity(vi); | 1340 | virtnet_set_affinity(vi); |
1341 | } | 1341 | } |
1342 | put_online_cpus(); | 1342 | put_online_cpus(); |
1343 | 1343 | ||
1344 | return err; | 1344 | return err; |
1345 | } | 1345 | } |
1346 | 1346 | ||
1347 | static void virtnet_get_channels(struct net_device *dev, | 1347 | static void virtnet_get_channels(struct net_device *dev, |
1348 | struct ethtool_channels *channels) | 1348 | struct ethtool_channels *channels) |
1349 | { | 1349 | { |
1350 | struct virtnet_info *vi = netdev_priv(dev); | 1350 | struct virtnet_info *vi = netdev_priv(dev); |
1351 | 1351 | ||
1352 | channels->combined_count = vi->curr_queue_pairs; | 1352 | channels->combined_count = vi->curr_queue_pairs; |
1353 | channels->max_combined = vi->max_queue_pairs; | 1353 | channels->max_combined = vi->max_queue_pairs; |
1354 | channels->max_other = 0; | 1354 | channels->max_other = 0; |
1355 | channels->rx_count = 0; | 1355 | channels->rx_count = 0; |
1356 | channels->tx_count = 0; | 1356 | channels->tx_count = 0; |
1357 | channels->other_count = 0; | 1357 | channels->other_count = 0; |
1358 | } | 1358 | } |
1359 | 1359 | ||
1360 | static const struct ethtool_ops virtnet_ethtool_ops = { | 1360 | static const struct ethtool_ops virtnet_ethtool_ops = { |
1361 | .get_drvinfo = virtnet_get_drvinfo, | 1361 | .get_drvinfo = virtnet_get_drvinfo, |
1362 | .get_link = ethtool_op_get_link, | 1362 | .get_link = ethtool_op_get_link, |
1363 | .get_ringparam = virtnet_get_ringparam, | 1363 | .get_ringparam = virtnet_get_ringparam, |
1364 | .set_channels = virtnet_set_channels, | 1364 | .set_channels = virtnet_set_channels, |
1365 | .get_channels = virtnet_get_channels, | 1365 | .get_channels = virtnet_get_channels, |
1366 | }; | 1366 | }; |
1367 | 1367 | ||
1368 | #define MIN_MTU 68 | 1368 | #define MIN_MTU 68 |
1369 | #define MAX_MTU 65535 | 1369 | #define MAX_MTU 65535 |
1370 | 1370 | ||
1371 | static int virtnet_change_mtu(struct net_device *dev, int new_mtu) | 1371 | static int virtnet_change_mtu(struct net_device *dev, int new_mtu) |
1372 | { | 1372 | { |
1373 | if (new_mtu < MIN_MTU || new_mtu > MAX_MTU) | 1373 | if (new_mtu < MIN_MTU || new_mtu > MAX_MTU) |
1374 | return -EINVAL; | 1374 | return -EINVAL; |
1375 | dev->mtu = new_mtu; | 1375 | dev->mtu = new_mtu; |
1376 | return 0; | 1376 | return 0; |
1377 | } | 1377 | } |
1378 | 1378 | ||
1379 | static const struct net_device_ops virtnet_netdev = { | 1379 | static const struct net_device_ops virtnet_netdev = { |
1380 | .ndo_open = virtnet_open, | 1380 | .ndo_open = virtnet_open, |
1381 | .ndo_stop = virtnet_close, | 1381 | .ndo_stop = virtnet_close, |
1382 | .ndo_start_xmit = start_xmit, | 1382 | .ndo_start_xmit = start_xmit, |
1383 | .ndo_validate_addr = eth_validate_addr, | 1383 | .ndo_validate_addr = eth_validate_addr, |
1384 | .ndo_set_mac_address = virtnet_set_mac_address, | 1384 | .ndo_set_mac_address = virtnet_set_mac_address, |
1385 | .ndo_set_rx_mode = virtnet_set_rx_mode, | 1385 | .ndo_set_rx_mode = virtnet_set_rx_mode, |
1386 | .ndo_change_mtu = virtnet_change_mtu, | 1386 | .ndo_change_mtu = virtnet_change_mtu, |
1387 | .ndo_get_stats64 = virtnet_stats, | 1387 | .ndo_get_stats64 = virtnet_stats, |
1388 | .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, | 1388 | .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, |
1389 | .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, | 1389 | .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, |
1390 | #ifdef CONFIG_NET_POLL_CONTROLLER | 1390 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1391 | .ndo_poll_controller = virtnet_netpoll, | 1391 | .ndo_poll_controller = virtnet_netpoll, |
1392 | #endif | 1392 | #endif |
1393 | #ifdef CONFIG_NET_RX_BUSY_POLL | 1393 | #ifdef CONFIG_NET_RX_BUSY_POLL |
1394 | .ndo_busy_poll = virtnet_busy_poll, | 1394 | .ndo_busy_poll = virtnet_busy_poll, |
1395 | #endif | 1395 | #endif |
1396 | }; | 1396 | }; |
1397 | 1397 | ||
1398 | static void virtnet_config_changed_work(struct work_struct *work) | 1398 | static void virtnet_config_changed_work(struct work_struct *work) |
1399 | { | 1399 | { |
1400 | struct virtnet_info *vi = | 1400 | struct virtnet_info *vi = |
1401 | container_of(work, struct virtnet_info, config_work); | 1401 | container_of(work, struct virtnet_info, config_work); |
1402 | u16 v; | 1402 | u16 v; |
1403 | 1403 | ||
1404 | if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS, | 1404 | if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS, |
1405 | struct virtio_net_config, status, &v) < 0) | 1405 | struct virtio_net_config, status, &v) < 0) |
1406 | return; | 1406 | return; |
1407 | 1407 | ||
1408 | if (v & VIRTIO_NET_S_ANNOUNCE) { | 1408 | if (v & VIRTIO_NET_S_ANNOUNCE) { |
1409 | netdev_notify_peers(vi->dev); | 1409 | netdev_notify_peers(vi->dev); |
1410 | virtnet_ack_link_announce(vi); | 1410 | virtnet_ack_link_announce(vi); |
1411 | } | 1411 | } |
1412 | 1412 | ||
1413 | /* Ignore unknown (future) status bits */ | 1413 | /* Ignore unknown (future) status bits */ |
1414 | v &= VIRTIO_NET_S_LINK_UP; | 1414 | v &= VIRTIO_NET_S_LINK_UP; |
1415 | 1415 | ||
1416 | if (vi->status == v) | 1416 | if (vi->status == v) |
1417 | return; | 1417 | return; |
1418 | 1418 | ||
1419 | vi->status = v; | 1419 | vi->status = v; |
1420 | 1420 | ||
1421 | if (vi->status & VIRTIO_NET_S_LINK_UP) { | 1421 | if (vi->status & VIRTIO_NET_S_LINK_UP) { |
1422 | netif_carrier_on(vi->dev); | 1422 | netif_carrier_on(vi->dev); |
1423 | netif_tx_wake_all_queues(vi->dev); | 1423 | netif_tx_wake_all_queues(vi->dev); |
1424 | } else { | 1424 | } else { |
1425 | netif_carrier_off(vi->dev); | 1425 | netif_carrier_off(vi->dev); |
1426 | netif_tx_stop_all_queues(vi->dev); | 1426 | netif_tx_stop_all_queues(vi->dev); |
1427 | } | 1427 | } |
1428 | } | 1428 | } |
1429 | 1429 | ||
1430 | static void virtnet_config_changed(struct virtio_device *vdev) | 1430 | static void virtnet_config_changed(struct virtio_device *vdev) |
1431 | { | 1431 | { |
1432 | struct virtnet_info *vi = vdev->priv; | 1432 | struct virtnet_info *vi = vdev->priv; |
1433 | 1433 | ||
1434 | schedule_work(&vi->config_work); | 1434 | schedule_work(&vi->config_work); |
1435 | } | 1435 | } |
1436 | 1436 | ||
1437 | static void virtnet_free_queues(struct virtnet_info *vi) | 1437 | static void virtnet_free_queues(struct virtnet_info *vi) |
1438 | { | 1438 | { |
1439 | int i; | 1439 | int i; |
1440 | 1440 | ||
1441 | for (i = 0; i < vi->max_queue_pairs; i++) | 1441 | for (i = 0; i < vi->max_queue_pairs; i++) |
1442 | netif_napi_del(&vi->rq[i].napi); | 1442 | netif_napi_del(&vi->rq[i].napi); |
1443 | 1443 | ||
1444 | kfree(vi->rq); | 1444 | kfree(vi->rq); |
1445 | kfree(vi->sq); | 1445 | kfree(vi->sq); |
1446 | } | 1446 | } |
1447 | 1447 | ||
1448 | static void free_receive_bufs(struct virtnet_info *vi) | 1448 | static void free_receive_bufs(struct virtnet_info *vi) |
1449 | { | 1449 | { |
1450 | int i; | 1450 | int i; |
1451 | 1451 | ||
1452 | for (i = 0; i < vi->max_queue_pairs; i++) { | 1452 | for (i = 0; i < vi->max_queue_pairs; i++) { |
1453 | while (vi->rq[i].pages) | 1453 | while (vi->rq[i].pages) |
1454 | __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); | 1454 | __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); |
1455 | } | 1455 | } |
1456 | } | 1456 | } |
1457 | 1457 | ||
1458 | static void free_receive_page_frags(struct virtnet_info *vi) | 1458 | static void free_receive_page_frags(struct virtnet_info *vi) |
1459 | { | 1459 | { |
1460 | int i; | 1460 | int i; |
1461 | for (i = 0; i < vi->max_queue_pairs; i++) | 1461 | for (i = 0; i < vi->max_queue_pairs; i++) |
1462 | if (vi->rq[i].alloc_frag.page) | 1462 | if (vi->rq[i].alloc_frag.page) |
1463 | put_page(vi->rq[i].alloc_frag.page); | 1463 | put_page(vi->rq[i].alloc_frag.page); |
1464 | } | 1464 | } |
1465 | 1465 | ||
1466 | static void free_unused_bufs(struct virtnet_info *vi) | 1466 | static void free_unused_bufs(struct virtnet_info *vi) |
1467 | { | 1467 | { |
1468 | void *buf; | 1468 | void *buf; |
1469 | int i; | 1469 | int i; |
1470 | 1470 | ||
1471 | for (i = 0; i < vi->max_queue_pairs; i++) { | 1471 | for (i = 0; i < vi->max_queue_pairs; i++) { |
1472 | struct virtqueue *vq = vi->sq[i].vq; | 1472 | struct virtqueue *vq = vi->sq[i].vq; |
1473 | while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) | 1473 | while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) |
1474 | dev_kfree_skb(buf); | 1474 | dev_kfree_skb(buf); |
1475 | } | 1475 | } |
1476 | 1476 | ||
1477 | for (i = 0; i < vi->max_queue_pairs; i++) { | 1477 | for (i = 0; i < vi->max_queue_pairs; i++) { |
1478 | struct virtqueue *vq = vi->rq[i].vq; | 1478 | struct virtqueue *vq = vi->rq[i].vq; |
1479 | 1479 | ||
1480 | while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { | 1480 | while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { |
1481 | if (vi->mergeable_rx_bufs) { | 1481 | if (vi->mergeable_rx_bufs) { |
1482 | unsigned long ctx = (unsigned long)buf; | 1482 | unsigned long ctx = (unsigned long)buf; |
1483 | void *base = mergeable_ctx_to_buf_address(ctx); | 1483 | void *base = mergeable_ctx_to_buf_address(ctx); |
1484 | put_page(virt_to_head_page(base)); | 1484 | put_page(virt_to_head_page(base)); |
1485 | } else if (vi->big_packets) { | 1485 | } else if (vi->big_packets) { |
1486 | give_pages(&vi->rq[i], buf); | 1486 | give_pages(&vi->rq[i], buf); |
1487 | } else { | 1487 | } else { |
1488 | dev_kfree_skb(buf); | 1488 | dev_kfree_skb(buf); |
1489 | } | 1489 | } |
1490 | } | 1490 | } |
1491 | } | 1491 | } |
1492 | } | 1492 | } |
1493 | 1493 | ||
1494 | static void virtnet_del_vqs(struct virtnet_info *vi) | 1494 | static void virtnet_del_vqs(struct virtnet_info *vi) |
1495 | { | 1495 | { |
1496 | struct virtio_device *vdev = vi->vdev; | 1496 | struct virtio_device *vdev = vi->vdev; |
1497 | 1497 | ||
1498 | virtnet_clean_affinity(vi, -1); | 1498 | virtnet_clean_affinity(vi, -1); |
1499 | 1499 | ||
1500 | vdev->config->del_vqs(vdev); | 1500 | vdev->config->del_vqs(vdev); |
1501 | 1501 | ||
1502 | virtnet_free_queues(vi); | 1502 | virtnet_free_queues(vi); |
1503 | } | 1503 | } |
1504 | 1504 | ||
1505 | static int virtnet_find_vqs(struct virtnet_info *vi) | 1505 | static int virtnet_find_vqs(struct virtnet_info *vi) |
1506 | { | 1506 | { |
1507 | vq_callback_t **callbacks; | 1507 | vq_callback_t **callbacks; |
1508 | struct virtqueue **vqs; | 1508 | struct virtqueue **vqs; |
1509 | int ret = -ENOMEM; | 1509 | int ret = -ENOMEM; |
1510 | int i, total_vqs; | 1510 | int i, total_vqs; |
1511 | const char **names; | 1511 | const char **names; |
1512 | 1512 | ||
1513 | /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by | 1513 | /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by |
1514 | * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by | 1514 | * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by |
1515 | * possible control vq. | 1515 | * possible control vq. |
1516 | */ | 1516 | */ |
1517 | total_vqs = vi->max_queue_pairs * 2 + | 1517 | total_vqs = vi->max_queue_pairs * 2 + |
1518 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ); | 1518 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ); |
1519 | 1519 | ||
1520 | /* Allocate space for find_vqs parameters */ | 1520 | /* Allocate space for find_vqs parameters */ |
1521 | vqs = kzalloc(total_vqs * sizeof(*vqs), GFP_KERNEL); | 1521 | vqs = kzalloc(total_vqs * sizeof(*vqs), GFP_KERNEL); |
1522 | if (!vqs) | 1522 | if (!vqs) |
1523 | goto err_vq; | 1523 | goto err_vq; |
1524 | callbacks = kmalloc(total_vqs * sizeof(*callbacks), GFP_KERNEL); | 1524 | callbacks = kmalloc(total_vqs * sizeof(*callbacks), GFP_KERNEL); |
1525 | if (!callbacks) | 1525 | if (!callbacks) |
1526 | goto err_callback; | 1526 | goto err_callback; |
1527 | names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL); | 1527 | names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL); |
1528 | if (!names) | 1528 | if (!names) |
1529 | goto err_names; | 1529 | goto err_names; |
1530 | 1530 | ||
1531 | /* Parameters for control virtqueue, if any */ | 1531 | /* Parameters for control virtqueue, if any */ |
1532 | if (vi->has_cvq) { | 1532 | if (vi->has_cvq) { |
1533 | callbacks[total_vqs - 1] = NULL; | 1533 | callbacks[total_vqs - 1] = NULL; |
1534 | names[total_vqs - 1] = "control"; | 1534 | names[total_vqs - 1] = "control"; |
1535 | } | 1535 | } |
1536 | 1536 | ||
1537 | /* Allocate/initialize parameters for send/receive virtqueues */ | 1537 | /* Allocate/initialize parameters for send/receive virtqueues */ |
1538 | for (i = 0; i < vi->max_queue_pairs; i++) { | 1538 | for (i = 0; i < vi->max_queue_pairs; i++) { |
1539 | callbacks[rxq2vq(i)] = skb_recv_done; | 1539 | callbacks[rxq2vq(i)] = skb_recv_done; |
1540 | callbacks[txq2vq(i)] = skb_xmit_done; | 1540 | callbacks[txq2vq(i)] = skb_xmit_done; |
1541 | sprintf(vi->rq[i].name, "input.%d", i); | 1541 | sprintf(vi->rq[i].name, "input.%d", i); |
1542 | sprintf(vi->sq[i].name, "output.%d", i); | 1542 | sprintf(vi->sq[i].name, "output.%d", i); |
1543 | names[rxq2vq(i)] = vi->rq[i].name; | 1543 | names[rxq2vq(i)] = vi->rq[i].name; |
1544 | names[txq2vq(i)] = vi->sq[i].name; | 1544 | names[txq2vq(i)] = vi->sq[i].name; |
1545 | } | 1545 | } |
1546 | 1546 | ||
1547 | ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks, | 1547 | ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks, |
1548 | names); | 1548 | names); |
1549 | if (ret) | 1549 | if (ret) |
1550 | goto err_find; | 1550 | goto err_find; |
1551 | 1551 | ||
1552 | if (vi->has_cvq) { | 1552 | if (vi->has_cvq) { |
1553 | vi->cvq = vqs[total_vqs - 1]; | 1553 | vi->cvq = vqs[total_vqs - 1]; |
1554 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) | 1554 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) |
1555 | vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; | 1555 | vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; |
1556 | } | 1556 | } |
1557 | 1557 | ||
1558 | for (i = 0; i < vi->max_queue_pairs; i++) { | 1558 | for (i = 0; i < vi->max_queue_pairs; i++) { |
1559 | vi->rq[i].vq = vqs[rxq2vq(i)]; | 1559 | vi->rq[i].vq = vqs[rxq2vq(i)]; |
1560 | vi->sq[i].vq = vqs[txq2vq(i)]; | 1560 | vi->sq[i].vq = vqs[txq2vq(i)]; |
1561 | } | 1561 | } |
1562 | 1562 | ||
1563 | kfree(names); | 1563 | kfree(names); |
1564 | kfree(callbacks); | 1564 | kfree(callbacks); |
1565 | kfree(vqs); | 1565 | kfree(vqs); |
1566 | 1566 | ||
1567 | return 0; | 1567 | return 0; |
1568 | 1568 | ||
1569 | err_find: | 1569 | err_find: |
1570 | kfree(names); | 1570 | kfree(names); |
1571 | err_names: | 1571 | err_names: |
1572 | kfree(callbacks); | 1572 | kfree(callbacks); |
1573 | err_callback: | 1573 | err_callback: |
1574 | kfree(vqs); | 1574 | kfree(vqs); |
1575 | err_vq: | 1575 | err_vq: |
1576 | return ret; | 1576 | return ret; |
1577 | } | 1577 | } |
1578 | 1578 | ||
1579 | static int virtnet_alloc_queues(struct virtnet_info *vi) | 1579 | static int virtnet_alloc_queues(struct virtnet_info *vi) |
1580 | { | 1580 | { |
1581 | int i; | 1581 | int i; |
1582 | 1582 | ||
1583 | vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL); | 1583 | vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL); |
1584 | if (!vi->sq) | 1584 | if (!vi->sq) |
1585 | goto err_sq; | 1585 | goto err_sq; |
1586 | vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL); | 1586 | vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL); |
1587 | if (!vi->rq) | 1587 | if (!vi->rq) |
1588 | goto err_rq; | 1588 | goto err_rq; |
1589 | 1589 | ||
1590 | INIT_DELAYED_WORK(&vi->refill, refill_work); | 1590 | INIT_DELAYED_WORK(&vi->refill, refill_work); |
1591 | for (i = 0; i < vi->max_queue_pairs; i++) { | 1591 | for (i = 0; i < vi->max_queue_pairs; i++) { |
1592 | vi->rq[i].pages = NULL; | 1592 | vi->rq[i].pages = NULL; |
1593 | netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll, | 1593 | netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll, |
1594 | napi_weight); | 1594 | napi_weight); |
1595 | napi_hash_add(&vi->rq[i].napi); | 1595 | napi_hash_add(&vi->rq[i].napi); |
1596 | 1596 | ||
1597 | sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); | 1597 | sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); |
1598 | ewma_init(&vi->rq[i].mrg_avg_pkt_len, 1, RECEIVE_AVG_WEIGHT); | 1598 | ewma_init(&vi->rq[i].mrg_avg_pkt_len, 1, RECEIVE_AVG_WEIGHT); |
1599 | sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); | 1599 | sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); |
1600 | } | 1600 | } |
1601 | 1601 | ||
1602 | return 0; | 1602 | return 0; |
1603 | 1603 | ||
1604 | err_rq: | 1604 | err_rq: |
1605 | kfree(vi->sq); | 1605 | kfree(vi->sq); |
1606 | err_sq: | 1606 | err_sq: |
1607 | return -ENOMEM; | 1607 | return -ENOMEM; |
1608 | } | 1608 | } |
1609 | 1609 | ||
1610 | static int init_vqs(struct virtnet_info *vi) | 1610 | static int init_vqs(struct virtnet_info *vi) |
1611 | { | 1611 | { |
1612 | int ret; | 1612 | int ret; |
1613 | 1613 | ||
1614 | /* Allocate send & receive queues */ | 1614 | /* Allocate send & receive queues */ |
1615 | ret = virtnet_alloc_queues(vi); | 1615 | ret = virtnet_alloc_queues(vi); |
1616 | if (ret) | 1616 | if (ret) |
1617 | goto err; | 1617 | goto err; |
1618 | 1618 | ||
1619 | ret = virtnet_find_vqs(vi); | 1619 | ret = virtnet_find_vqs(vi); |
1620 | if (ret) | 1620 | if (ret) |
1621 | goto err_free; | 1621 | goto err_free; |
1622 | 1622 | ||
1623 | get_online_cpus(); | 1623 | get_online_cpus(); |
1624 | virtnet_set_affinity(vi); | 1624 | virtnet_set_affinity(vi); |
1625 | put_online_cpus(); | 1625 | put_online_cpus(); |
1626 | 1626 | ||
1627 | return 0; | 1627 | return 0; |
1628 | 1628 | ||
1629 | err_free: | 1629 | err_free: |
1630 | virtnet_free_queues(vi); | 1630 | virtnet_free_queues(vi); |
1631 | err: | 1631 | err: |
1632 | return ret; | 1632 | return ret; |
1633 | } | 1633 | } |
1634 | 1634 | ||
1635 | #ifdef CONFIG_SYSFS | 1635 | #ifdef CONFIG_SYSFS |
1636 | static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue, | 1636 | static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue, |
1637 | struct rx_queue_attribute *attribute, char *buf) | 1637 | struct rx_queue_attribute *attribute, char *buf) |
1638 | { | 1638 | { |
1639 | struct virtnet_info *vi = netdev_priv(queue->dev); | 1639 | struct virtnet_info *vi = netdev_priv(queue->dev); |
1640 | unsigned int queue_index = get_netdev_rx_queue_index(queue); | 1640 | unsigned int queue_index = get_netdev_rx_queue_index(queue); |
1641 | struct ewma *avg; | 1641 | struct ewma *avg; |
1642 | 1642 | ||
1643 | BUG_ON(queue_index >= vi->max_queue_pairs); | 1643 | BUG_ON(queue_index >= vi->max_queue_pairs); |
1644 | avg = &vi->rq[queue_index].mrg_avg_pkt_len; | 1644 | avg = &vi->rq[queue_index].mrg_avg_pkt_len; |
1645 | return sprintf(buf, "%u\n", get_mergeable_buf_len(avg)); | 1645 | return sprintf(buf, "%u\n", get_mergeable_buf_len(avg)); |
1646 | } | 1646 | } |
1647 | 1647 | ||
1648 | static struct rx_queue_attribute mergeable_rx_buffer_size_attribute = | 1648 | static struct rx_queue_attribute mergeable_rx_buffer_size_attribute = |
1649 | __ATTR_RO(mergeable_rx_buffer_size); | 1649 | __ATTR_RO(mergeable_rx_buffer_size); |
1650 | 1650 | ||
1651 | static struct attribute *virtio_net_mrg_rx_attrs[] = { | 1651 | static struct attribute *virtio_net_mrg_rx_attrs[] = { |
1652 | &mergeable_rx_buffer_size_attribute.attr, | 1652 | &mergeable_rx_buffer_size_attribute.attr, |
1653 | NULL | 1653 | NULL |
1654 | }; | 1654 | }; |
1655 | 1655 | ||
1656 | static const struct attribute_group virtio_net_mrg_rx_group = { | 1656 | static const struct attribute_group virtio_net_mrg_rx_group = { |
1657 | .name = "virtio_net", | 1657 | .name = "virtio_net", |
1658 | .attrs = virtio_net_mrg_rx_attrs | 1658 | .attrs = virtio_net_mrg_rx_attrs |
1659 | }; | 1659 | }; |
1660 | #endif | 1660 | #endif |
1661 | 1661 | ||
1662 | static int virtnet_probe(struct virtio_device *vdev) | 1662 | static int virtnet_probe(struct virtio_device *vdev) |
1663 | { | 1663 | { |
1664 | int i, err; | 1664 | int i, err; |
1665 | struct net_device *dev; | 1665 | struct net_device *dev; |
1666 | struct virtnet_info *vi; | 1666 | struct virtnet_info *vi; |
1667 | u16 max_queue_pairs; | 1667 | u16 max_queue_pairs; |
1668 | 1668 | ||
1669 | /* Find if host supports multiqueue virtio_net device */ | 1669 | /* Find if host supports multiqueue virtio_net device */ |
1670 | err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ, | 1670 | err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ, |
1671 | struct virtio_net_config, | 1671 | struct virtio_net_config, |
1672 | max_virtqueue_pairs, &max_queue_pairs); | 1672 | max_virtqueue_pairs, &max_queue_pairs); |
1673 | 1673 | ||
1674 | /* We need at least 2 queue's */ | 1674 | /* We need at least 2 queue's */ |
1675 | if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || | 1675 | if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || |
1676 | max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || | 1676 | max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || |
1677 | !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) | 1677 | !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) |
1678 | max_queue_pairs = 1; | 1678 | max_queue_pairs = 1; |
1679 | 1679 | ||
1680 | /* Allocate ourselves a network device with room for our info */ | 1680 | /* Allocate ourselves a network device with room for our info */ |
1681 | dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs); | 1681 | dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs); |
1682 | if (!dev) | 1682 | if (!dev) |
1683 | return -ENOMEM; | 1683 | return -ENOMEM; |
1684 | 1684 | ||
1685 | /* Set up network device as normal. */ | 1685 | /* Set up network device as normal. */ |
1686 | dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; | 1686 | dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; |
1687 | dev->netdev_ops = &virtnet_netdev; | 1687 | dev->netdev_ops = &virtnet_netdev; |
1688 | dev->features = NETIF_F_HIGHDMA; | 1688 | dev->features = NETIF_F_HIGHDMA; |
1689 | 1689 | ||
1690 | dev->ethtool_ops = &virtnet_ethtool_ops; | 1690 | dev->ethtool_ops = &virtnet_ethtool_ops; |
1691 | SET_NETDEV_DEV(dev, &vdev->dev); | 1691 | SET_NETDEV_DEV(dev, &vdev->dev); |
1692 | 1692 | ||
1693 | /* Do we support "hardware" checksums? */ | 1693 | /* Do we support "hardware" checksums? */ |
1694 | if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { | 1694 | if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { |
1695 | /* This opens up the world of extra features. */ | 1695 | /* This opens up the world of extra features. */ |
1696 | dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; | 1696 | dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; |
1697 | if (csum) | 1697 | if (csum) |
1698 | dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; | 1698 | dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; |
1699 | 1699 | ||
1700 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { | 1700 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { |
1701 | dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO | 1701 | dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO |
1702 | | NETIF_F_TSO_ECN | NETIF_F_TSO6; | 1702 | | NETIF_F_TSO_ECN | NETIF_F_TSO6; |
1703 | } | 1703 | } |
1704 | /* Individual feature bits: what can host handle? */ | 1704 | /* Individual feature bits: what can host handle? */ |
1705 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) | 1705 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) |
1706 | dev->hw_features |= NETIF_F_TSO; | 1706 | dev->hw_features |= NETIF_F_TSO; |
1707 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) | 1707 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) |
1708 | dev->hw_features |= NETIF_F_TSO6; | 1708 | dev->hw_features |= NETIF_F_TSO6; |
1709 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) | 1709 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) |
1710 | dev->hw_features |= NETIF_F_TSO_ECN; | 1710 | dev->hw_features |= NETIF_F_TSO_ECN; |
1711 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO)) | 1711 | if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO)) |
1712 | dev->hw_features |= NETIF_F_UFO; | 1712 | dev->hw_features |= NETIF_F_UFO; |
1713 | 1713 | ||
1714 | if (gso) | 1714 | if (gso) |
1715 | dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO); | 1715 | dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO); |
1716 | /* (!csum && gso) case will be fixed by register_netdev() */ | 1716 | /* (!csum && gso) case will be fixed by register_netdev() */ |
1717 | } | 1717 | } |
1718 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM)) | 1718 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM)) |
1719 | dev->features |= NETIF_F_RXCSUM; | 1719 | dev->features |= NETIF_F_RXCSUM; |
1720 | 1720 | ||
1721 | dev->vlan_features = dev->features; | 1721 | dev->vlan_features = dev->features; |
1722 | 1722 | ||
1723 | /* Configuration may specify what MAC to use. Otherwise random. */ | 1723 | /* Configuration may specify what MAC to use. Otherwise random. */ |
1724 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) | 1724 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) |
1725 | virtio_cread_bytes(vdev, | 1725 | virtio_cread_bytes(vdev, |
1726 | offsetof(struct virtio_net_config, mac), | 1726 | offsetof(struct virtio_net_config, mac), |
1727 | dev->dev_addr, dev->addr_len); | 1727 | dev->dev_addr, dev->addr_len); |
1728 | else | 1728 | else |
1729 | eth_hw_addr_random(dev); | 1729 | eth_hw_addr_random(dev); |
1730 | 1730 | ||
1731 | /* Set up our device-specific information */ | 1731 | /* Set up our device-specific information */ |
1732 | vi = netdev_priv(dev); | 1732 | vi = netdev_priv(dev); |
1733 | vi->dev = dev; | 1733 | vi->dev = dev; |
1734 | vi->vdev = vdev; | 1734 | vi->vdev = vdev; |
1735 | vdev->priv = vi; | 1735 | vdev->priv = vi; |
1736 | vi->stats = alloc_percpu(struct virtnet_stats); | 1736 | vi->stats = alloc_percpu(struct virtnet_stats); |
1737 | err = -ENOMEM; | 1737 | err = -ENOMEM; |
1738 | if (vi->stats == NULL) | 1738 | if (vi->stats == NULL) |
1739 | goto free; | 1739 | goto free; |
1740 | 1740 | ||
1741 | for_each_possible_cpu(i) { | 1741 | for_each_possible_cpu(i) { |
1742 | struct virtnet_stats *virtnet_stats; | 1742 | struct virtnet_stats *virtnet_stats; |
1743 | virtnet_stats = per_cpu_ptr(vi->stats, i); | 1743 | virtnet_stats = per_cpu_ptr(vi->stats, i); |
1744 | u64_stats_init(&virtnet_stats->tx_syncp); | 1744 | u64_stats_init(&virtnet_stats->tx_syncp); |
1745 | u64_stats_init(&virtnet_stats->rx_syncp); | 1745 | u64_stats_init(&virtnet_stats->rx_syncp); |
1746 | } | 1746 | } |
1747 | 1747 | ||
1748 | INIT_WORK(&vi->config_work, virtnet_config_changed_work); | 1748 | INIT_WORK(&vi->config_work, virtnet_config_changed_work); |
1749 | 1749 | ||
1750 | /* If we can receive ANY GSO packets, we must allocate large ones. */ | 1750 | /* If we can receive ANY GSO packets, we must allocate large ones. */ |
1751 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || | 1751 | if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || |
1752 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || | 1752 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || |
1753 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) || | 1753 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) || |
1754 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO)) | 1754 | virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO)) |
1755 | vi->big_packets = true; | 1755 | vi->big_packets = true; |
1756 | 1756 | ||
1757 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) | 1757 | if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) |
1758 | vi->mergeable_rx_bufs = true; | 1758 | vi->mergeable_rx_bufs = true; |
1759 | 1759 | ||
1760 | if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT)) | 1760 | if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT)) |
1761 | vi->any_header_sg = true; | 1761 | vi->any_header_sg = true; |
1762 | 1762 | ||
1763 | if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) | 1763 | if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) |
1764 | vi->has_cvq = true; | 1764 | vi->has_cvq = true; |
1765 | 1765 | ||
1766 | if (vi->any_header_sg) { | 1766 | if (vi->any_header_sg) { |
1767 | if (vi->mergeable_rx_bufs) | 1767 | if (vi->mergeable_rx_bufs) |
1768 | dev->needed_headroom = sizeof(struct virtio_net_hdr_mrg_rxbuf); | 1768 | dev->needed_headroom = sizeof(struct virtio_net_hdr_mrg_rxbuf); |
1769 | else | 1769 | else |
1770 | dev->needed_headroom = sizeof(struct virtio_net_hdr); | 1770 | dev->needed_headroom = sizeof(struct virtio_net_hdr); |
1771 | } | 1771 | } |
1772 | 1772 | ||
1773 | /* Use single tx/rx queue pair as default */ | 1773 | /* Use single tx/rx queue pair as default */ |
1774 | vi->curr_queue_pairs = 1; | 1774 | vi->curr_queue_pairs = 1; |
1775 | vi->max_queue_pairs = max_queue_pairs; | 1775 | vi->max_queue_pairs = max_queue_pairs; |
1776 | 1776 | ||
1777 | /* Allocate/initialize the rx/tx queues, and invoke find_vqs */ | 1777 | /* Allocate/initialize the rx/tx queues, and invoke find_vqs */ |
1778 | err = init_vqs(vi); | 1778 | err = init_vqs(vi); |
1779 | if (err) | 1779 | if (err) |
1780 | goto free_stats; | 1780 | goto free_stats; |
1781 | 1781 | ||
1782 | #ifdef CONFIG_SYSFS | 1782 | #ifdef CONFIG_SYSFS |
1783 | if (vi->mergeable_rx_bufs) | 1783 | if (vi->mergeable_rx_bufs) |
1784 | dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group; | 1784 | dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group; |
1785 | #endif | 1785 | #endif |
1786 | netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs); | 1786 | netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs); |
1787 | netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs); | 1787 | netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs); |
1788 | 1788 | ||
1789 | err = register_netdev(dev); | 1789 | err = register_netdev(dev); |
1790 | if (err) { | 1790 | if (err) { |
1791 | pr_debug("virtio_net: registering device failed\n"); | 1791 | pr_debug("virtio_net: registering device failed\n"); |
1792 | goto free_vqs; | 1792 | goto free_vqs; |
1793 | } | 1793 | } |
1794 | 1794 | ||
1795 | virtio_device_ready(vdev); | 1795 | virtio_device_ready(vdev); |
1796 | 1796 | ||
1797 | /* Last of all, set up some receive buffers. */ | 1797 | /* Last of all, set up some receive buffers. */ |
1798 | for (i = 0; i < vi->curr_queue_pairs; i++) { | 1798 | for (i = 0; i < vi->curr_queue_pairs; i++) { |
1799 | try_fill_recv(&vi->rq[i], GFP_KERNEL); | 1799 | try_fill_recv(&vi->rq[i], GFP_KERNEL); |
1800 | 1800 | ||
1801 | /* If we didn't even get one input buffer, we're useless. */ | 1801 | /* If we didn't even get one input buffer, we're useless. */ |
1802 | if (vi->rq[i].vq->num_free == | 1802 | if (vi->rq[i].vq->num_free == |
1803 | virtqueue_get_vring_size(vi->rq[i].vq)) { | 1803 | virtqueue_get_vring_size(vi->rq[i].vq)) { |
1804 | free_unused_bufs(vi); | 1804 | free_unused_bufs(vi); |
1805 | err = -ENOMEM; | 1805 | err = -ENOMEM; |
1806 | goto free_recv_bufs; | 1806 | goto free_recv_bufs; |
1807 | } | 1807 | } |
1808 | } | 1808 | } |
1809 | 1809 | ||
1810 | vi->nb.notifier_call = &virtnet_cpu_callback; | 1810 | vi->nb.notifier_call = &virtnet_cpu_callback; |
1811 | err = register_hotcpu_notifier(&vi->nb); | 1811 | err = register_hotcpu_notifier(&vi->nb); |
1812 | if (err) { | 1812 | if (err) { |
1813 | pr_debug("virtio_net: registering cpu notifier failed\n"); | 1813 | pr_debug("virtio_net: registering cpu notifier failed\n"); |
1814 | goto free_recv_bufs; | 1814 | goto free_recv_bufs; |
1815 | } | 1815 | } |
1816 | 1816 | ||
1817 | /* Assume link up if device can't report link status, | 1817 | /* Assume link up if device can't report link status, |
1818 | otherwise get link status from config. */ | 1818 | otherwise get link status from config. */ |
1819 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { | 1819 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { |
1820 | netif_carrier_off(dev); | 1820 | netif_carrier_off(dev); |
1821 | schedule_work(&vi->config_work); | 1821 | schedule_work(&vi->config_work); |
1822 | } else { | 1822 | } else { |
1823 | vi->status = VIRTIO_NET_S_LINK_UP; | 1823 | vi->status = VIRTIO_NET_S_LINK_UP; |
1824 | netif_carrier_on(dev); | 1824 | netif_carrier_on(dev); |
1825 | } | 1825 | } |
1826 | 1826 | ||
1827 | pr_debug("virtnet: registered device %s with %d RX and TX vq's\n", | 1827 | pr_debug("virtnet: registered device %s with %d RX and TX vq's\n", |
1828 | dev->name, max_queue_pairs); | 1828 | dev->name, max_queue_pairs); |
1829 | 1829 | ||
1830 | return 0; | 1830 | return 0; |
1831 | 1831 | ||
1832 | free_recv_bufs: | 1832 | free_recv_bufs: |
1833 | vi->vdev->config->reset(vdev); | 1833 | vi->vdev->config->reset(vdev); |
1834 | 1834 | ||
1835 | free_receive_bufs(vi); | 1835 | free_receive_bufs(vi); |
1836 | unregister_netdev(dev); | 1836 | unregister_netdev(dev); |
1837 | free_vqs: | 1837 | free_vqs: |
1838 | cancel_delayed_work_sync(&vi->refill); | 1838 | cancel_delayed_work_sync(&vi->refill); |
1839 | free_receive_page_frags(vi); | 1839 | free_receive_page_frags(vi); |
1840 | virtnet_del_vqs(vi); | 1840 | virtnet_del_vqs(vi); |
1841 | free_stats: | 1841 | free_stats: |
1842 | free_percpu(vi->stats); | 1842 | free_percpu(vi->stats); |
1843 | free: | 1843 | free: |
1844 | free_netdev(dev); | 1844 | free_netdev(dev); |
1845 | return err; | 1845 | return err; |
1846 | } | 1846 | } |
1847 | 1847 | ||
1848 | static void remove_vq_common(struct virtnet_info *vi) | 1848 | static void remove_vq_common(struct virtnet_info *vi) |
1849 | { | 1849 | { |
1850 | vi->vdev->config->reset(vi->vdev); | 1850 | vi->vdev->config->reset(vi->vdev); |
1851 | 1851 | ||
1852 | /* Free unused buffers in both send and recv, if any. */ | 1852 | /* Free unused buffers in both send and recv, if any. */ |
1853 | free_unused_bufs(vi); | 1853 | free_unused_bufs(vi); |
1854 | 1854 | ||
1855 | free_receive_bufs(vi); | 1855 | free_receive_bufs(vi); |
1856 | 1856 | ||
1857 | free_receive_page_frags(vi); | 1857 | free_receive_page_frags(vi); |
1858 | 1858 | ||
1859 | virtnet_del_vqs(vi); | 1859 | virtnet_del_vqs(vi); |
1860 | } | 1860 | } |
1861 | 1861 | ||
1862 | static void virtnet_remove(struct virtio_device *vdev) | 1862 | static void virtnet_remove(struct virtio_device *vdev) |
1863 | { | 1863 | { |
1864 | struct virtnet_info *vi = vdev->priv; | 1864 | struct virtnet_info *vi = vdev->priv; |
1865 | 1865 | ||
1866 | unregister_hotcpu_notifier(&vi->nb); | 1866 | unregister_hotcpu_notifier(&vi->nb); |
1867 | 1867 | ||
1868 | /* Make sure no work handler is accessing the device. */ | 1868 | /* Make sure no work handler is accessing the device. */ |
1869 | flush_work(&vi->config_work); | 1869 | flush_work(&vi->config_work); |
1870 | 1870 | ||
1871 | unregister_netdev(vi->dev); | 1871 | unregister_netdev(vi->dev); |
1872 | 1872 | ||
1873 | remove_vq_common(vi); | 1873 | remove_vq_common(vi); |
1874 | 1874 | ||
1875 | free_percpu(vi->stats); | 1875 | free_percpu(vi->stats); |
1876 | free_netdev(vi->dev); | 1876 | free_netdev(vi->dev); |
1877 | } | 1877 | } |
1878 | 1878 | ||
1879 | #ifdef CONFIG_PM_SLEEP | 1879 | #ifdef CONFIG_PM_SLEEP |
1880 | static int virtnet_freeze(struct virtio_device *vdev) | 1880 | static int virtnet_freeze(struct virtio_device *vdev) |
1881 | { | 1881 | { |
1882 | struct virtnet_info *vi = vdev->priv; | 1882 | struct virtnet_info *vi = vdev->priv; |
1883 | int i; | 1883 | int i; |
1884 | 1884 | ||
1885 | unregister_hotcpu_notifier(&vi->nb); | 1885 | unregister_hotcpu_notifier(&vi->nb); |
1886 | 1886 | ||
1887 | /* Make sure no work handler is accessing the device */ | 1887 | /* Make sure no work handler is accessing the device */ |
1888 | flush_work(&vi->config_work); | 1888 | flush_work(&vi->config_work); |
1889 | 1889 | ||
1890 | netif_device_detach(vi->dev); | 1890 | netif_device_detach(vi->dev); |
1891 | cancel_delayed_work_sync(&vi->refill); | 1891 | cancel_delayed_work_sync(&vi->refill); |
1892 | 1892 | ||
1893 | if (netif_running(vi->dev)) { | 1893 | if (netif_running(vi->dev)) { |
1894 | for (i = 0; i < vi->max_queue_pairs; i++) { | 1894 | for (i = 0; i < vi->max_queue_pairs; i++) { |
1895 | napi_disable(&vi->rq[i].napi); | 1895 | napi_disable(&vi->rq[i].napi); |
1896 | napi_hash_del(&vi->rq[i].napi); | 1896 | napi_hash_del(&vi->rq[i].napi); |
1897 | netif_napi_del(&vi->rq[i].napi); | 1897 | netif_napi_del(&vi->rq[i].napi); |
1898 | } | 1898 | } |
1899 | } | 1899 | } |
1900 | 1900 | ||
1901 | remove_vq_common(vi); | 1901 | remove_vq_common(vi); |
1902 | 1902 | ||
1903 | return 0; | 1903 | return 0; |
1904 | } | 1904 | } |
1905 | 1905 | ||
1906 | static int virtnet_restore(struct virtio_device *vdev) | 1906 | static int virtnet_restore(struct virtio_device *vdev) |
1907 | { | 1907 | { |
1908 | struct virtnet_info *vi = vdev->priv; | 1908 | struct virtnet_info *vi = vdev->priv; |
1909 | int err, i; | 1909 | int err, i; |
1910 | 1910 | ||
1911 | err = init_vqs(vi); | 1911 | err = init_vqs(vi); |
1912 | if (err) | 1912 | if (err) |
1913 | return err; | 1913 | return err; |
1914 | 1914 | ||
1915 | virtio_device_ready(vdev); | ||
1916 | |||
1915 | if (netif_running(vi->dev)) { | 1917 | if (netif_running(vi->dev)) { |
1916 | for (i = 0; i < vi->curr_queue_pairs; i++) | 1918 | for (i = 0; i < vi->curr_queue_pairs; i++) |
1917 | if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) | 1919 | if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) |
1918 | schedule_delayed_work(&vi->refill, 0); | 1920 | schedule_delayed_work(&vi->refill, 0); |
1919 | 1921 | ||
1920 | for (i = 0; i < vi->max_queue_pairs; i++) | 1922 | for (i = 0; i < vi->max_queue_pairs; i++) |
1921 | virtnet_napi_enable(&vi->rq[i]); | 1923 | virtnet_napi_enable(&vi->rq[i]); |
1922 | } | 1924 | } |
1923 | 1925 | ||
1924 | netif_device_attach(vi->dev); | 1926 | netif_device_attach(vi->dev); |
1925 | 1927 | ||
1926 | rtnl_lock(); | 1928 | rtnl_lock(); |
1927 | virtnet_set_queues(vi, vi->curr_queue_pairs); | 1929 | virtnet_set_queues(vi, vi->curr_queue_pairs); |
1928 | rtnl_unlock(); | 1930 | rtnl_unlock(); |
1929 | 1931 | ||
1930 | err = register_hotcpu_notifier(&vi->nb); | 1932 | err = register_hotcpu_notifier(&vi->nb); |
1931 | if (err) | 1933 | if (err) |
1932 | return err; | 1934 | return err; |
1933 | 1935 | ||
1934 | return 0; | 1936 | return 0; |
1935 | } | 1937 | } |
1936 | #endif | 1938 | #endif |
1937 | 1939 | ||
1938 | static struct virtio_device_id id_table[] = { | 1940 | static struct virtio_device_id id_table[] = { |
1939 | { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, | 1941 | { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, |
1940 | { 0 }, | 1942 | { 0 }, |
1941 | }; | 1943 | }; |
1942 | 1944 | ||
1943 | static unsigned int features[] = { | 1945 | static unsigned int features[] = { |
1944 | VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, | 1946 | VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, |
1945 | VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, | 1947 | VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, |
1946 | VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, | 1948 | VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, |
1947 | VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, | 1949 | VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, |
1948 | VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, | 1950 | VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, |
1949 | VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, | 1951 | VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, |
1950 | VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, | 1952 | VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, |
1951 | VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, | 1953 | VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, |
1952 | VIRTIO_NET_F_CTRL_MAC_ADDR, | 1954 | VIRTIO_NET_F_CTRL_MAC_ADDR, |
1953 | VIRTIO_F_ANY_LAYOUT, | 1955 | VIRTIO_F_ANY_LAYOUT, |
1954 | }; | 1956 | }; |
1955 | 1957 | ||
1956 | static struct virtio_driver virtio_net_driver = { | 1958 | static struct virtio_driver virtio_net_driver = { |
1957 | .feature_table = features, | 1959 | .feature_table = features, |
1958 | .feature_table_size = ARRAY_SIZE(features), | 1960 | .feature_table_size = ARRAY_SIZE(features), |
1959 | .driver.name = KBUILD_MODNAME, | 1961 | .driver.name = KBUILD_MODNAME, |
1960 | .driver.owner = THIS_MODULE, | 1962 | .driver.owner = THIS_MODULE, |
1961 | .id_table = id_table, | 1963 | .id_table = id_table, |
1962 | .probe = virtnet_probe, | 1964 | .probe = virtnet_probe, |
1963 | .remove = virtnet_remove, | 1965 | .remove = virtnet_remove, |
1964 | .config_changed = virtnet_config_changed, | 1966 | .config_changed = virtnet_config_changed, |
1965 | #ifdef CONFIG_PM_SLEEP | 1967 | #ifdef CONFIG_PM_SLEEP |
1966 | .freeze = virtnet_freeze, | 1968 | .freeze = virtnet_freeze, |
1967 | .restore = virtnet_restore, | 1969 | .restore = virtnet_restore, |
1968 | #endif | 1970 | #endif |
1969 | }; | 1971 | }; |
1970 | 1972 | ||
1971 | module_virtio_driver(virtio_net_driver); | 1973 | module_virtio_driver(virtio_net_driver); |
1972 | 1974 | ||
1973 | MODULE_DEVICE_TABLE(virtio, id_table); | 1975 | MODULE_DEVICE_TABLE(virtio, id_table); |
1974 | MODULE_DESCRIPTION("Virtio network driver"); | 1976 | MODULE_DESCRIPTION("Virtio network driver"); |
1975 | MODULE_LICENSE("GPL"); | 1977 | MODULE_LICENSE("GPL"); |
1976 | 1978 |