Commit 47be03a28cc6c80e3aa2b3e8ed6d960ff0c5c0af
Committed by
David S. Miller
1 parent
ddf343f635
netpoll: use GFP_ATOMIC in slave_enable_netpoll() and __netpoll_setup()
slave_enable_netpoll() and __netpoll_setup() may be called with read_lock() held, so should use GFP_ATOMIC to allocate memory. Eric suggested to pass gfp flags to __netpoll_setup(). Cc: Eric Dumazet <eric.dumazet@gmail.com> Cc: "David S. Miller" <davem@davemloft.net> Reported-by: Dan Carpenter <dan.carpenter@oracle.com> Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: Cong Wang <amwang@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Showing 9 changed files with 32 additions and 28 deletions Side-by-side Diff
drivers/net/bonding/bond_main.c
... | ... | @@ -1235,12 +1235,12 @@ |
1235 | 1235 | struct netpoll *np; |
1236 | 1236 | int err = 0; |
1237 | 1237 | |
1238 | - np = kzalloc(sizeof(*np), GFP_KERNEL); | |
1238 | + np = kzalloc(sizeof(*np), GFP_ATOMIC); | |
1239 | 1239 | err = -ENOMEM; |
1240 | 1240 | if (!np) |
1241 | 1241 | goto out; |
1242 | 1242 | |
1243 | - err = __netpoll_setup(np, slave->dev); | |
1243 | + err = __netpoll_setup(np, slave->dev, GFP_ATOMIC); | |
1244 | 1244 | if (err) { |
1245 | 1245 | kfree(np); |
1246 | 1246 | goto out; |
... | ... | @@ -1292,7 +1292,7 @@ |
1292 | 1292 | read_unlock(&bond->lock); |
1293 | 1293 | } |
1294 | 1294 | |
1295 | -static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni) | |
1295 | +static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni, gfp_t gfp) | |
1296 | 1296 | { |
1297 | 1297 | struct bonding *bond = netdev_priv(dev); |
1298 | 1298 | struct slave *slave; |
drivers/net/team/team.c
... | ... | @@ -795,16 +795,17 @@ |
795 | 795 | } |
796 | 796 | |
797 | 797 | #ifdef CONFIG_NET_POLL_CONTROLLER |
798 | -static int team_port_enable_netpoll(struct team *team, struct team_port *port) | |
798 | +static int team_port_enable_netpoll(struct team *team, struct team_port *port, | |
799 | + gfp_t gfp) | |
799 | 800 | { |
800 | 801 | struct netpoll *np; |
801 | 802 | int err; |
802 | 803 | |
803 | - np = kzalloc(sizeof(*np), GFP_KERNEL); | |
804 | + np = kzalloc(sizeof(*np), gfp); | |
804 | 805 | if (!np) |
805 | 806 | return -ENOMEM; |
806 | 807 | |
807 | - err = __netpoll_setup(np, port->dev); | |
808 | + err = __netpoll_setup(np, port->dev, gfp); | |
808 | 809 | if (err) { |
809 | 810 | kfree(np); |
810 | 811 | return err; |
... | ... | @@ -833,7 +834,8 @@ |
833 | 834 | } |
834 | 835 | |
835 | 836 | #else |
836 | -static int team_port_enable_netpoll(struct team *team, struct team_port *port) | |
837 | +static int team_port_enable_netpoll(struct team *team, struct team_port *port, | |
838 | + gfp_t gfp) | |
837 | 839 | { |
838 | 840 | return 0; |
839 | 841 | } |
... | ... | @@ -913,7 +915,7 @@ |
913 | 915 | } |
914 | 916 | |
915 | 917 | if (team_netpoll_info(team)) { |
916 | - err = team_port_enable_netpoll(team, port); | |
918 | + err = team_port_enable_netpoll(team, port, GFP_KERNEL); | |
917 | 919 | if (err) { |
918 | 920 | netdev_err(dev, "Failed to enable netpoll on device %s\n", |
919 | 921 | portname); |
... | ... | @@ -1443,7 +1445,7 @@ |
1443 | 1445 | } |
1444 | 1446 | |
1445 | 1447 | static int team_netpoll_setup(struct net_device *dev, |
1446 | - struct netpoll_info *npifo) | |
1448 | + struct netpoll_info *npifo, gfp_t gfp) | |
1447 | 1449 | { |
1448 | 1450 | struct team *team = netdev_priv(dev); |
1449 | 1451 | struct team_port *port; |
... | ... | @@ -1451,7 +1453,7 @@ |
1451 | 1453 | |
1452 | 1454 | mutex_lock(&team->lock); |
1453 | 1455 | list_for_each_entry(port, &team->port_list, list) { |
1454 | - err = team_port_enable_netpoll(team, port); | |
1456 | + err = team_port_enable_netpoll(team, port, gfp); | |
1455 | 1457 | if (err) { |
1456 | 1458 | __team_netpoll_cleanup(team); |
1457 | 1459 | break; |
include/linux/netdevice.h
... | ... | @@ -953,7 +953,8 @@ |
953 | 953 | #ifdef CONFIG_NET_POLL_CONTROLLER |
954 | 954 | void (*ndo_poll_controller)(struct net_device *dev); |
955 | 955 | int (*ndo_netpoll_setup)(struct net_device *dev, |
956 | - struct netpoll_info *info); | |
956 | + struct netpoll_info *info, | |
957 | + gfp_t gfp); | |
957 | 958 | void (*ndo_netpoll_cleanup)(struct net_device *dev); |
958 | 959 | #endif |
959 | 960 | int (*ndo_set_vf_mac)(struct net_device *dev, |
include/linux/netpoll.h
... | ... | @@ -43,7 +43,7 @@ |
43 | 43 | void netpoll_send_udp(struct netpoll *np, const char *msg, int len); |
44 | 44 | void netpoll_print_options(struct netpoll *np); |
45 | 45 | int netpoll_parse_options(struct netpoll *np, char *opt); |
46 | -int __netpoll_setup(struct netpoll *np, struct net_device *ndev); | |
46 | +int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp); | |
47 | 47 | int netpoll_setup(struct netpoll *np); |
48 | 48 | int netpoll_trap(void); |
49 | 49 | void netpoll_set_trap(int trap); |
net/8021q/vlan_dev.c
... | ... | @@ -669,19 +669,20 @@ |
669 | 669 | return; |
670 | 670 | } |
671 | 671 | |
672 | -static int vlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo) | |
672 | +static int vlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo, | |
673 | + gfp_t gfp) | |
673 | 674 | { |
674 | 675 | struct vlan_dev_priv *info = vlan_dev_priv(dev); |
675 | 676 | struct net_device *real_dev = info->real_dev; |
676 | 677 | struct netpoll *netpoll; |
677 | 678 | int err = 0; |
678 | 679 | |
679 | - netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL); | |
680 | + netpoll = kzalloc(sizeof(*netpoll), gfp); | |
680 | 681 | err = -ENOMEM; |
681 | 682 | if (!netpoll) |
682 | 683 | goto out; |
683 | 684 | |
684 | - err = __netpoll_setup(netpoll, real_dev); | |
685 | + err = __netpoll_setup(netpoll, real_dev, gfp); | |
685 | 686 | if (err) { |
686 | 687 | kfree(netpoll); |
687 | 688 | goto out; |
net/bridge/br_device.c
... | ... | @@ -213,7 +213,8 @@ |
213 | 213 | } |
214 | 214 | } |
215 | 215 | |
216 | -static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni) | |
216 | +static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni, | |
217 | + gfp_t gfp) | |
217 | 218 | { |
218 | 219 | struct net_bridge *br = netdev_priv(dev); |
219 | 220 | struct net_bridge_port *p, *n; |
... | ... | @@ -222,8 +223,7 @@ |
222 | 223 | list_for_each_entry_safe(p, n, &br->port_list, list) { |
223 | 224 | if (!p->dev) |
224 | 225 | continue; |
225 | - | |
226 | - err = br_netpoll_enable(p); | |
226 | + err = br_netpoll_enable(p, gfp); | |
227 | 227 | if (err) |
228 | 228 | goto fail; |
229 | 229 | } |
230 | 230 | |
231 | 231 | |
... | ... | @@ -236,17 +236,17 @@ |
236 | 236 | goto out; |
237 | 237 | } |
238 | 238 | |
239 | -int br_netpoll_enable(struct net_bridge_port *p) | |
239 | +int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp) | |
240 | 240 | { |
241 | 241 | struct netpoll *np; |
242 | 242 | int err = 0; |
243 | 243 | |
244 | - np = kzalloc(sizeof(*p->np), GFP_KERNEL); | |
244 | + np = kzalloc(sizeof(*p->np), gfp); | |
245 | 245 | err = -ENOMEM; |
246 | 246 | if (!np) |
247 | 247 | goto out; |
248 | 248 | |
249 | - err = __netpoll_setup(np, p->dev); | |
249 | + err = __netpoll_setup(np, p->dev, gfp); | |
250 | 250 | if (err) { |
251 | 251 | kfree(np); |
252 | 252 | goto out; |
net/bridge/br_if.c
net/bridge/br_private.h
... | ... | @@ -316,7 +316,7 @@ |
316 | 316 | netpoll_send_skb(np, skb); |
317 | 317 | } |
318 | 318 | |
319 | -extern int br_netpoll_enable(struct net_bridge_port *p); | |
319 | +extern int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp); | |
320 | 320 | extern void br_netpoll_disable(struct net_bridge_port *p); |
321 | 321 | #else |
322 | 322 | static inline struct netpoll_info *br_netpoll_info(struct net_bridge *br) |
... | ... | @@ -329,7 +329,7 @@ |
329 | 329 | { |
330 | 330 | } |
331 | 331 | |
332 | -static inline int br_netpoll_enable(struct net_bridge_port *p) | |
332 | +static inline int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp) | |
333 | 333 | { |
334 | 334 | return 0; |
335 | 335 | } |
net/core/netpoll.c
... | ... | @@ -715,7 +715,7 @@ |
715 | 715 | } |
716 | 716 | EXPORT_SYMBOL(netpoll_parse_options); |
717 | 717 | |
718 | -int __netpoll_setup(struct netpoll *np, struct net_device *ndev) | |
718 | +int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp) | |
719 | 719 | { |
720 | 720 | struct netpoll_info *npinfo; |
721 | 721 | const struct net_device_ops *ops; |
... | ... | @@ -734,7 +734,7 @@ |
734 | 734 | } |
735 | 735 | |
736 | 736 | if (!ndev->npinfo) { |
737 | - npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL); | |
737 | + npinfo = kmalloc(sizeof(*npinfo), gfp); | |
738 | 738 | if (!npinfo) { |
739 | 739 | err = -ENOMEM; |
740 | 740 | goto out; |
... | ... | @@ -752,7 +752,7 @@ |
752 | 752 | |
753 | 753 | ops = np->dev->netdev_ops; |
754 | 754 | if (ops->ndo_netpoll_setup) { |
755 | - err = ops->ndo_netpoll_setup(ndev, npinfo); | |
755 | + err = ops->ndo_netpoll_setup(ndev, npinfo, gfp); | |
756 | 756 | if (err) |
757 | 757 | goto free_npinfo; |
758 | 758 | } |
... | ... | @@ -857,7 +857,7 @@ |
857 | 857 | refill_skbs(); |
858 | 858 | |
859 | 859 | rtnl_lock(); |
860 | - err = __netpoll_setup(np, ndev); | |
860 | + err = __netpoll_setup(np, ndev, GFP_KERNEL); | |
861 | 861 | rtnl_unlock(); |
862 | 862 | |
863 | 863 | if (err) |