Commit 38e6bc185d9544dfad1774b3f8902a0b061aea25
Committed by
David S. Miller
1 parent
47be03a28c
netpoll: make __netpoll_cleanup non-block
Like the previous patch, slave_disable_netpoll() and __netpoll_cleanup() may be called with read_lock() held too, so we should make them non-block, by moving the cleanup and kfree() to call_rcu_bh() callbacks. Cc: "David S. Miller" <davem@davemloft.net> Signed-off-by: Cong Wang <amwang@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Showing 5 changed files with 38 additions and 23 deletions Side-by-side Diff
drivers/net/bonding/bond_main.c
... | ... | @@ -1257,9 +1257,7 @@ |
1257 | 1257 | return; |
1258 | 1258 | |
1259 | 1259 | slave->np = NULL; |
1260 | - synchronize_rcu_bh(); | |
1261 | - __netpoll_cleanup(np); | |
1262 | - kfree(np); | |
1260 | + __netpoll_free_rcu(np); | |
1263 | 1261 | } |
1264 | 1262 | static inline bool slave_dev_support_netpoll(struct net_device *slave_dev) |
1265 | 1263 | { |
include/linux/netpoll.h
... | ... | @@ -23,6 +23,7 @@ |
23 | 23 | u8 remote_mac[ETH_ALEN]; |
24 | 24 | |
25 | 25 | struct list_head rx; /* rx_np list element */ |
26 | + struct rcu_head rcu; | |
26 | 27 | }; |
27 | 28 | |
28 | 29 | struct netpoll_info { |
... | ... | @@ -38,6 +39,7 @@ |
38 | 39 | struct delayed_work tx_work; |
39 | 40 | |
40 | 41 | struct netpoll *netpoll; |
42 | + struct rcu_head rcu; | |
41 | 43 | }; |
42 | 44 | |
43 | 45 | void netpoll_send_udp(struct netpoll *np, const char *msg, int len); |
... | ... | @@ -48,6 +50,7 @@ |
48 | 50 | int netpoll_trap(void); |
49 | 51 | void netpoll_set_trap(int trap); |
50 | 52 | void __netpoll_cleanup(struct netpoll *np); |
53 | +void __netpoll_free_rcu(struct netpoll *np); | |
51 | 54 | void netpoll_cleanup(struct netpoll *np); |
52 | 55 | int __netpoll_rx(struct sk_buff *skb); |
53 | 56 | void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, |
net/8021q/vlan_dev.c
... | ... | @@ -704,11 +704,7 @@ |
704 | 704 | |
705 | 705 | info->netpoll = NULL; |
706 | 706 | |
707 | - /* Wait for transmitting packets to finish before freeing. */ | |
708 | - synchronize_rcu_bh(); | |
709 | - | |
710 | - __netpoll_cleanup(netpoll); | |
711 | - kfree(netpoll); | |
707 | + __netpoll_free_rcu(netpoll); | |
712 | 708 | } |
713 | 709 | #endif /* CONFIG_NET_POLL_CONTROLLER */ |
714 | 710 |
net/bridge/br_device.c
net/core/netpoll.c
... | ... | @@ -878,6 +878,24 @@ |
878 | 878 | } |
879 | 879 | core_initcall(netpoll_init); |
880 | 880 | |
881 | +static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head) | |
882 | +{ | |
883 | + struct netpoll_info *npinfo = | |
884 | + container_of(rcu_head, struct netpoll_info, rcu); | |
885 | + | |
886 | + skb_queue_purge(&npinfo->arp_tx); | |
887 | + skb_queue_purge(&npinfo->txq); | |
888 | + | |
889 | + /* we can't call cancel_delayed_work_sync here, as we are in softirq */ | |
890 | + cancel_delayed_work(&npinfo->tx_work); | |
891 | + | |
892 | + /* clean after last, unfinished work */ | |
893 | + __skb_queue_purge(&npinfo->txq); | |
894 | + /* now cancel it again */ | |
895 | + cancel_delayed_work(&npinfo->tx_work); | |
896 | + kfree(npinfo); | |
897 | +} | |
898 | + | |
881 | 899 | void __netpoll_cleanup(struct netpoll *np) |
882 | 900 | { |
883 | 901 | struct netpoll_info *npinfo; |
884 | 902 | |
885 | 903 | |
886 | 904 | |
887 | 905 | |
... | ... | @@ -903,20 +921,24 @@ |
903 | 921 | ops->ndo_netpoll_cleanup(np->dev); |
904 | 922 | |
905 | 923 | RCU_INIT_POINTER(np->dev->npinfo, NULL); |
924 | + call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info); | |
925 | + } | |
926 | +} | |
927 | +EXPORT_SYMBOL_GPL(__netpoll_cleanup); | |
906 | 928 | |
907 | - /* avoid racing with NAPI reading npinfo */ | |
908 | - synchronize_rcu_bh(); | |
929 | +static void rcu_cleanup_netpoll(struct rcu_head *rcu_head) | |
930 | +{ | |
931 | + struct netpoll *np = container_of(rcu_head, struct netpoll, rcu); | |
909 | 932 | |
910 | - skb_queue_purge(&npinfo->arp_tx); | |
911 | - skb_queue_purge(&npinfo->txq); | |
912 | - cancel_delayed_work_sync(&npinfo->tx_work); | |
933 | + __netpoll_cleanup(np); | |
934 | + kfree(np); | |
935 | +} | |
913 | 936 | |
914 | - /* clean after last, unfinished work */ | |
915 | - __skb_queue_purge(&npinfo->txq); | |
916 | - kfree(npinfo); | |
917 | - } | |
937 | +void __netpoll_free_rcu(struct netpoll *np) | |
938 | +{ | |
939 | + call_rcu_bh(&np->rcu, rcu_cleanup_netpoll); | |
918 | 940 | } |
919 | -EXPORT_SYMBOL_GPL(__netpoll_cleanup); | |
941 | +EXPORT_SYMBOL_GPL(__netpoll_free_rcu); | |
920 | 942 | |
921 | 943 | void netpoll_cleanup(struct netpoll *np) |
922 | 944 | { |