Commit 2899656b494dcd118123af1126826b115c8ea6f9

Authored by Amerigo Wang
Committed by David S. Miller
1 parent 91fe4a4b9e

netpoll: take rcu_read_lock_bh() in netpoll_send_skb_on_dev()

This patch fixes several problems in the call path of
netpoll_send_skb_on_dev():

1. Disable IRQ's before calling netpoll_send_skb_on_dev().

2. All the callees of netpoll_send_skb_on_dev() should use
   rcu_dereference_bh() to dereference ->npinfo.

3. Rename arp_reply() to netpoll_arp_reply(), the former is too generic.

Cc: "David S. Miller" <davem@davemloft.net>
Signed-off-by: Cong Wang <amwang@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

Showing 2 changed files with 20 additions and 14 deletions Side-by-side Diff

include/linux/netpoll.h
... ... @@ -57,7 +57,10 @@
57 57 struct net_device *dev);
58 58 static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
59 59 {
  60 + unsigned long flags;
  61 + local_irq_save(flags);
60 62 netpoll_send_skb_on_dev(np, skb, np->dev);
  63 + local_irq_restore(flags);
61 64 }
62 65  
63 66  
... ... @@ -54,7 +54,7 @@
54 54 MAX_UDP_CHUNK)
55 55  
56 56 static void zap_completion_queue(void);
57   -static void arp_reply(struct sk_buff *skb);
  57 +static void netpoll_arp_reply(struct sk_buff *skb, struct netpoll_info *npinfo);
58 58  
59 59 static unsigned int carrier_timeout = 4;
60 60 module_param(carrier_timeout, uint, 0644);
... ... @@ -170,7 +170,8 @@
170 170 list_for_each_entry(napi, &dev->napi_list, dev_list) {
171 171 if (napi->poll_owner != smp_processor_id() &&
172 172 spin_trylock(&napi->poll_lock)) {
173   - budget = poll_one_napi(dev->npinfo, napi, budget);
  173 + budget = poll_one_napi(rcu_dereference_bh(dev->npinfo),
  174 + napi, budget);
174 175 spin_unlock(&napi->poll_lock);
175 176  
176 177 if (!budget)
177 178  
... ... @@ -185,13 +186,14 @@
185 186 struct sk_buff *skb;
186 187  
187 188 while ((skb = skb_dequeue(&npi->arp_tx)))
188   - arp_reply(skb);
  189 + netpoll_arp_reply(skb, npi);
189 190 }
190 191 }
191 192  
192 193 static void netpoll_poll_dev(struct net_device *dev)
193 194 {
194 195 const struct net_device_ops *ops;
  196 + struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
195 197  
196 198 if (!dev || !netif_running(dev))
197 199 return;
198 200  
199 201  
200 202  
... ... @@ -206,17 +208,18 @@
206 208 poll_napi(dev);
207 209  
208 210 if (dev->flags & IFF_SLAVE) {
209   - if (dev->npinfo) {
  211 + if (ni) {
210 212 struct net_device *bond_dev = dev->master;
211 213 struct sk_buff *skb;
212   - while ((skb = skb_dequeue(&dev->npinfo->arp_tx))) {
  214 + struct netpoll_info *bond_ni = rcu_dereference_bh(bond_dev->npinfo);
  215 + while ((skb = skb_dequeue(&ni->arp_tx))) {
213 216 skb->dev = bond_dev;
214   - skb_queue_tail(&bond_dev->npinfo->arp_tx, skb);
  217 + skb_queue_tail(&bond_ni->arp_tx, skb);
215 218 }
216 219 }
217 220 }
218 221  
219   - service_arp_queue(dev->npinfo);
  222 + service_arp_queue(ni);
220 223  
221 224 zap_completion_queue();
222 225 }
... ... @@ -302,6 +305,7 @@
302 305 return 0;
303 306 }
304 307  
  308 +/* call with IRQ disabled */
305 309 void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
306 310 struct net_device *dev)
307 311 {
308 312  
... ... @@ -309,8 +313,11 @@
309 313 unsigned long tries;
310 314 const struct net_device_ops *ops = dev->netdev_ops;
311 315 /* It is up to the caller to keep npinfo alive. */
312   - struct netpoll_info *npinfo = np->dev->npinfo;
  316 + struct netpoll_info *npinfo;
313 317  
  318 + WARN_ON_ONCE(!irqs_disabled());
  319 +
  320 + npinfo = rcu_dereference_bh(np->dev->npinfo);
314 321 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
315 322 __kfree_skb(skb);
316 323 return;
317 324  
... ... @@ -319,11 +326,9 @@
319 326 /* don't get messages out of order, and no recursion */
320 327 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
321 328 struct netdev_queue *txq;
322   - unsigned long flags;
323 329  
324 330 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
325 331  
326   - local_irq_save(flags);
327 332 /* try until next clock tick */
328 333 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
329 334 tries > 0; --tries) {
330 335  
... ... @@ -347,10 +352,9 @@
347 352 }
348 353  
349 354 WARN_ONCE(!irqs_disabled(),
350   - "netpoll_send_skb(): %s enabled interrupts in poll (%pF)\n",
  355 + "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n",
351 356 dev->name, ops->ndo_start_xmit);
352 357  
353   - local_irq_restore(flags);
354 358 }
355 359  
356 360 if (status != NETDEV_TX_OK) {
357 361  
... ... @@ -423,9 +427,8 @@
423 427 }
424 428 EXPORT_SYMBOL(netpoll_send_udp);
425 429  
426   -static void arp_reply(struct sk_buff *skb)
  430 +static void netpoll_arp_reply(struct sk_buff *skb, struct netpoll_info *npinfo)
427 431 {
428   - struct netpoll_info *npinfo = skb->dev->npinfo;
429 432 struct arphdr *arp;
430 433 unsigned char *arp_ptr;
431 434 int size, type = ARPOP_REPLY, ptype = ETH_P_ARP;