Commit 9d367eddf363553c7668ba92c3b9d187ec4f71f7

Authored by David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Conflicts:
	drivers/net/bonding/bond_main.c
	drivers/net/ethernet/mellanox/mlxsw/spectrum.h
	drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c

The bond_main.c and mellanox switch conflicts were cases of
overlapping changes.

Signed-off-by: David S. Miller <davem@davemloft.net>

Showing 30 changed files Side-by-side Diff

drivers/isdn/act2000/module.c
... ... @@ -37,7 +37,7 @@
37 37 MODULE_AUTHOR("Fritz Elfert");
38 38 MODULE_LICENSE("GPL");
39 39 MODULE_PARM_DESC(act_bus, "BusType of first card, 1=ISA, 2=MCA, 3=PCMCIA, currently only ISA");
40   -MODULE_PARM_DESC(membase, "Base port address of first card");
  40 +MODULE_PARM_DESC(act_port, "Base port address of first card");
41 41 MODULE_PARM_DESC(act_irq, "IRQ of first card");
42 42 MODULE_PARM_DESC(act_id, "ID-String of first card");
43 43 module_param(act_bus, int, 0);
drivers/net/bonding/bond_main.c
... ... @@ -1226,7 +1226,6 @@
1226 1226 &lag_upper_info);
1227 1227 if (err)
1228 1228 return err;
1229   - slave->dev->flags |= IFF_SLAVE;
1230 1229 rtmsg_ifinfo(RTM_NEWLINK, slave->dev, IFF_SLAVE, GFP_KERNEL);
1231 1230 return 0;
1232 1231 }
... ... @@ -1493,6 +1492,9 @@
1493 1492 }
1494 1493 }
1495 1494  
  1495 + /* set slave flag before open to prevent IPv6 addrconf */
  1496 + slave_dev->flags |= IFF_SLAVE;
  1497 +
1496 1498 /* open the slave since the application closed it */
1497 1499 res = dev_open(slave_dev);
1498 1500 if (res) {
... ... @@ -1758,6 +1760,7 @@
1758 1760 dev_close(slave_dev);
1759 1761  
1760 1762 err_restore_mac:
  1763 + slave_dev->flags &= ~IFF_SLAVE;
1761 1764 if (!bond->params.fail_over_mac ||
1762 1765 BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
1763 1766 /* XXX TODO - fom follow mode needs to change master's
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
... ... @@ -120,6 +120,7 @@
120 120 } fdb_notify;
121 121 #define MLXSW_SP_DEFAULT_AGEING_TIME 300
122 122 u32 ageing_time;
  123 + struct mutex fdb_lock; /* Make sure FDB sessions are atomic. */
123 124 struct mlxsw_sp_upper master_bridge;
124 125 struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX];
125 126 };
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
... ... @@ -1057,6 +1057,7 @@
1057 1057 if (!sfd_pl)
1058 1058 return -ENOMEM;
1059 1059  
  1060 + mutex_lock(&mlxsw_sp_port->mlxsw_sp->fdb_lock);
1060 1061 if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
1061 1062 u16 tmp;
1062 1063  
... ... @@ -1122,6 +1123,7 @@
1122 1123 } while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT);
1123 1124  
1124 1125 out:
  1126 + mutex_unlock(&mlxsw_sp_port->mlxsw_sp->fdb_lock);
1125 1127 kfree(sfd_pl);
1126 1128 return stored_err ? stored_err : err;
1127 1129 }
... ... @@ -1371,6 +1373,7 @@
1371 1373  
1372 1374 mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work);
1373 1375  
  1376 + mutex_lock(&mlxsw_sp->fdb_lock);
1374 1377 do {
1375 1378 mlxsw_reg_sfn_pack(sfn_pl);
1376 1379 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
... ... @@ -1383,6 +1386,7 @@
1383 1386 mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
1384 1387  
1385 1388 } while (num_rec);
  1389 + mutex_unlock(&mlxsw_sp->fdb_lock);
1386 1390  
1387 1391 kfree(sfn_pl);
1388 1392 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
... ... @@ -1397,6 +1401,7 @@
1397 1401 dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
1398 1402 return err;
1399 1403 }
  1404 + mutex_init(&mlxsw_sp->fdb_lock);
1400 1405 INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
1401 1406 mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
1402 1407 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
drivers/net/ethernet/renesas/ravb_main.c
... ... @@ -343,16 +343,13 @@
343 343 static void ravb_emac_init(struct net_device *ndev)
344 344 {
345 345 struct ravb_private *priv = netdev_priv(ndev);
346   - u32 ecmr;
347 346  
348 347 /* Receive frame limit set register */
349 348 ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR);
350 349  
351 350 /* PAUSE prohibition */
352   - ecmr = ravb_read(ndev, ECMR);
353   - ecmr &= ECMR_DM;
354   - ecmr |= ECMR_ZPF | (priv->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
355   - ravb_write(ndev, ecmr, ECMR);
  351 + ravb_write(ndev, ECMR_ZPF | (priv->duplex ? ECMR_DM : 0) |
  352 + ECMR_TE | ECMR_RE, ECMR);
356 353  
357 354 ravb_set_rate(ndev);
358 355  
drivers/net/ethernet/renesas/sh_eth.c
... ... @@ -1240,7 +1240,6 @@
1240 1240 {
1241 1241 int ret = 0;
1242 1242 struct sh_eth_private *mdp = netdev_priv(ndev);
1243   - u32 val;
1244 1243  
1245 1244 /* Soft Reset */
1246 1245 ret = sh_eth_reset(ndev);
... ... @@ -1293,10 +1292,8 @@
1293 1292 }
1294 1293  
1295 1294 /* PAUSE Prohibition */
1296   - val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
1297   - ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
1298   -
1299   - sh_eth_write(ndev, val, ECMR);
  1295 + sh_eth_write(ndev, ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) |
  1296 + ECMR_TE | ECMR_RE, ECMR);
1300 1297  
1301 1298 if (mdp->cd->set_rate)
1302 1299 mdp->cd->set_rate(ndev);
drivers/net/geneve.c
... ... @@ -388,7 +388,7 @@
388 388 int err;
389 389  
390 390 if (sa_family == AF_INET) {
391   - err = udp_add_offload(&gs->udp_offloads);
  391 + err = udp_add_offload(sock_net(sk), &gs->udp_offloads);
392 392 if (err)
393 393 pr_warn("geneve: udp_add_offload failed with status %d\n",
394 394 err);
drivers/net/irda/toim3232-sir.c
... ... @@ -130,16 +130,6 @@
130 130 module_param(toim3232delay, int, 0);
131 131 MODULE_PARM_DESC(toim3232delay, "toim3232 dongle write complete delay");
132 132  
133   -#if 0
134   -static int toim3232flipdtr = 0; /* default is DTR high to reset */
135   -module_param(toim3232flipdtr, int, 0);
136   -MODULE_PARM_DESC(toim3232flipdtr, "toim3232 dongle invert DTR (Reset)");
137   -
138   -static int toim3232fliprts = 0; /* default is RTS high for baud change */
139   -module_param(toim3232fliptrs, int, 0);
140   -MODULE_PARM_DESC(toim3232fliprts, "toim3232 dongle invert RTS (BR/D)");
141   -#endif
142   -
143 133 static int toim3232_open(struct sir_dev *);
144 134 static int toim3232_close(struct sir_dev *);
145 135 static int toim3232_change_speed(struct sir_dev *, unsigned);
drivers/net/phy/micrel.c
... ... @@ -483,9 +483,17 @@
483 483 "txd2-skew-ps", "txd3-skew-ps"
484 484 };
485 485 static const char *control_skews[2] = {"txen-skew-ps", "rxdv-skew-ps"};
  486 + const struct device *dev_walker;
486 487  
487   - if (!of_node && dev->parent->of_node)
488   - of_node = dev->parent->of_node;
  488 + /* The Micrel driver has a deprecated option to place phy OF
  489 + * properties in the MAC node. Walk up the tree of devices to
  490 + * find a device with an OF node.
  491 + */
  492 + dev_walker = &phydev->mdio.dev;
  493 + do {
  494 + of_node = dev_walker->of_node;
  495 + dev_walker = dev_walker->parent;
  496 + } while (!of_node && dev_walker);
489 497  
490 498 if (of_node) {
491 499 ksz9031_of_load_skew_values(phydev, of_node,
drivers/net/usb/cdc_ether.c
... ... @@ -160,6 +160,12 @@
160 160 info->u = header.usb_cdc_union_desc;
161 161 info->header = header.usb_cdc_header_desc;
162 162 info->ether = header.usb_cdc_ether_desc;
  163 + if (!info->u) {
  164 + if (rndis)
  165 + goto skip;
  166 + else /* in that case a quirk is mandatory */
  167 + goto bad_desc;
  168 + }
163 169 /* we need a master/control interface (what we're
164 170 * probed with) and a slave/data interface; union
165 171 * descriptors sort this all out.
... ... @@ -256,7 +262,7 @@
256 262 goto bad_desc;
257 263 }
258 264  
259   - } else if (!info->header || !info->u || (!rndis && !info->ether)) {
  265 + } else if (!info->header || (!rndis && !info->ether)) {
260 266 dev_dbg(&intf->dev, "missing cdc %s%s%sdescriptor\n",
261 267 info->header ? "" : "header ",
262 268 info->u ? "" : "union ",
drivers/net/usb/lan78xx.c
... ... @@ -603,6 +603,59 @@
603 603 return 0;
604 604 }
605 605  
  606 +static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
  607 + u32 length, u8 *data)
  608 +{
  609 + int i;
  610 + int ret;
  611 + u32 buf;
  612 + unsigned long timeout;
  613 +
  614 + ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
  615 +
  616 + if (buf & OTP_PWR_DN_PWRDN_N_) {
  617 + /* clear it and wait to be cleared */
  618 + ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
  619 +
  620 + timeout = jiffies + HZ;
  621 + do {
  622 + udelay(1);
  623 + ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
  624 + if (time_after(jiffies, timeout)) {
  625 + netdev_warn(dev->net,
  626 + "timeout on OTP_PWR_DN completion");
  627 + return -EIO;
  628 + }
  629 + } while (buf & OTP_PWR_DN_PWRDN_N_);
  630 + }
  631 +
  632 + /* set to BYTE program mode */
  633 + ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
  634 +
  635 + for (i = 0; i < length; i++) {
  636 + ret = lan78xx_write_reg(dev, OTP_ADDR1,
  637 + ((offset + i) >> 8) & OTP_ADDR1_15_11);
  638 + ret = lan78xx_write_reg(dev, OTP_ADDR2,
  639 + ((offset + i) & OTP_ADDR2_10_3));
  640 + ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
  641 + ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
  642 + ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
  643 +
  644 + timeout = jiffies + HZ;
  645 + do {
  646 + udelay(1);
  647 + ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
  648 + if (time_after(jiffies, timeout)) {
  649 + netdev_warn(dev->net,
  650 + "Timeout on OTP_STATUS completion");
  651 + return -EIO;
  652 + }
  653 + } while (buf & OTP_STATUS_BUSY_);
  654 + }
  655 +
  656 + return 0;
  657 +}
  658 +
606 659 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
607 660 u32 length, u8 *data)
608 661 {
... ... @@ -969,7 +1022,7 @@
969 1022 (ee->offset == 0) &&
970 1023 (ee->len == 512) &&
971 1024 (data[0] == OTP_INDICATOR_1))
972   - return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
  1025 + return lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
973 1026  
974 1027 return -EINVAL;
975 1028 }
drivers/net/usb/qmi_wwan.c
... ... @@ -886,6 +886,7 @@
886 886 {QMI_FIXED_INTF(0x413c, 0x81b1, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
887 887 {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
888 888 {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
  889 + {QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */
889 890  
890 891 /* 4. Gobi 1000 devices */
891 892 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
drivers/net/usb/r8152.c
... ... @@ -25,12 +25,13 @@
25 25 #include <uapi/linux/mdio.h>
26 26 #include <linux/mdio.h>
27 27 #include <linux/usb/cdc.h>
  28 +#include <linux/suspend.h>
28 29  
29 30 /* Information for net-next */
30 31 #define NETNEXT_VERSION "08"
31 32  
32 33 /* Information for net */
33   -#define NET_VERSION "2"
  34 +#define NET_VERSION "3"
34 35  
35 36 #define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
36 37 #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
... ... @@ -604,6 +605,9 @@
604 605 struct delayed_work schedule;
605 606 struct mii_if_info mii;
606 607 struct mutex control; /* use for hw setting */
  608 +#ifdef CONFIG_PM_SLEEP
  609 + struct notifier_block pm_notifier;
  610 +#endif
607 611  
608 612 struct rtl_ops {
609 613 void (*init)(struct r8152 *);
... ... @@ -3036,6 +3040,33 @@
3036 3040 usb_autopm_put_interface(tp->intf);
3037 3041 }
3038 3042  
  3043 +#ifdef CONFIG_PM_SLEEP
  3044 +static int rtl_notifier(struct notifier_block *nb, unsigned long action,
  3045 + void *data)
  3046 +{
  3047 + struct r8152 *tp = container_of(nb, struct r8152, pm_notifier);
  3048 +
  3049 + switch (action) {
  3050 + case PM_HIBERNATION_PREPARE:
  3051 + case PM_SUSPEND_PREPARE:
  3052 + usb_autopm_get_interface(tp->intf);
  3053 + break;
  3054 +
  3055 + case PM_POST_HIBERNATION:
  3056 + case PM_POST_SUSPEND:
  3057 + usb_autopm_put_interface(tp->intf);
  3058 + break;
  3059 +
  3060 + case PM_POST_RESTORE:
  3061 + case PM_RESTORE_PREPARE:
  3062 + default:
  3063 + break;
  3064 + }
  3065 +
  3066 + return NOTIFY_DONE;
  3067 +}
  3068 +#endif
  3069 +
3039 3070 static int rtl8152_open(struct net_device *netdev)
3040 3071 {
3041 3072 struct r8152 *tp = netdev_priv(netdev);
... ... @@ -3078,6 +3109,10 @@
3078 3109 mutex_unlock(&tp->control);
3079 3110  
3080 3111 usb_autopm_put_interface(tp->intf);
  3112 +#ifdef CONFIG_PM_SLEEP
  3113 + tp->pm_notifier.notifier_call = rtl_notifier;
  3114 + register_pm_notifier(&tp->pm_notifier);
  3115 +#endif
3081 3116  
3082 3117 out:
3083 3118 return res;
... ... @@ -3088,6 +3123,9 @@
3088 3123 struct r8152 *tp = netdev_priv(netdev);
3089 3124 int res = 0;
3090 3125  
  3126 +#ifdef CONFIG_PM_SLEEP
  3127 + unregister_pm_notifier(&tp->pm_notifier);
  3128 +#endif
3091 3129 napi_disable(&tp->napi);
3092 3130 clear_bit(WORK_ENABLE, &tp->flags);
3093 3131 usb_kill_urb(tp->intr_urb);
... ... @@ -621,7 +621,7 @@
621 621 int err;
622 622  
623 623 if (sa_family == AF_INET) {
624   - err = udp_add_offload(&vs->udp_offloads);
  624 + err = udp_add_offload(net, &vs->udp_offloads);
625 625 if (err)
626 626 pr_warn("vxlan: udp_add_offload failed with status %d\n", err);
627 627 }
... ... @@ -2750,7 +2750,7 @@
2750 2750 struct vxlan_config *conf)
2751 2751 {
2752 2752 struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
2753   - struct vxlan_dev *vxlan = netdev_priv(dev);
  2753 + struct vxlan_dev *vxlan = netdev_priv(dev), *tmp;
2754 2754 struct vxlan_rdst *dst = &vxlan->default_dst;
2755 2755 unsigned short needed_headroom = ETH_HLEN;
2756 2756 int err;
2757 2757  
... ... @@ -2816,9 +2816,15 @@
2816 2816 if (!vxlan->cfg.age_interval)
2817 2817 vxlan->cfg.age_interval = FDB_AGE_DEFAULT;
2818 2818  
2819   - if (vxlan_find_vni(src_net, conf->vni, use_ipv6 ? AF_INET6 : AF_INET,
2820   - vxlan->cfg.dst_port, vxlan->flags))
  2819 + list_for_each_entry(tmp, &vn->vxlan_list, next) {
  2820 + if (tmp->cfg.vni == conf->vni &&
  2821 + (tmp->default_dst.remote_ip.sa.sa_family == AF_INET6 ||
  2822 + tmp->cfg.saddr.sa.sa_family == AF_INET6) == use_ipv6 &&
  2823 + tmp->cfg.dst_port == vxlan->cfg.dst_port &&
  2824 + (tmp->flags & VXLAN_F_RCV_FLAGS) ==
  2825 + (vxlan->flags & VXLAN_F_RCV_FLAGS))
2821 2826 return -EEXIST;
  2827 + }
2822 2828  
2823 2829 dev->ethtool_ops = &vxlan_ethtool_ops;
2824 2830  
include/linux/sched.h
... ... @@ -830,6 +830,7 @@
830 830 unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
831 831 #endif
832 832 unsigned long locked_shm; /* How many pages of mlocked shm ? */
  833 + unsigned long unix_inflight; /* How many files in flight in unix sockets */
833 834  
834 835 #ifdef CONFIG_KEYS
835 836 struct key *uid_keyring; /* UID specific keyring */
include/net/protocol.h
... ... @@ -107,7 +107,7 @@
107 107 void inet_register_protosw(struct inet_protosw *p);
108 108 void inet_unregister_protosw(struct inet_protosw *p);
109 109  
110   -int udp_add_offload(struct udp_offload *prot);
  110 +int udp_add_offload(struct net *net, struct udp_offload *prot);
111 111 void udp_del_offload(struct udp_offload *prot);
112 112  
113 113 #if IS_ENABLED(CONFIG_IPV6)
net/batman-adv/bat_iv_ogm.c
... ... @@ -185,7 +185,8 @@
185 185 static int batadv_iv_ogm_orig_del_if(struct batadv_orig_node *orig_node,
186 186 int max_if_num, int del_if_num)
187 187 {
188   - int chunk_size, ret = -ENOMEM, if_offset;
  188 + int ret = -ENOMEM;
  189 + size_t chunk_size, if_offset;
189 190 void *data_ptr = NULL;
190 191  
191 192 spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
192 193  
... ... @@ -203,8 +204,9 @@
203 204 memcpy(data_ptr, orig_node->bat_iv.bcast_own, del_if_num * chunk_size);
204 205  
205 206 /* copy second part */
  207 + if_offset = (del_if_num + 1) * chunk_size;
206 208 memcpy((char *)data_ptr + del_if_num * chunk_size,
207   - orig_node->bat_iv.bcast_own + ((del_if_num + 1) * chunk_size),
  209 + (uint8_t *)orig_node->bat_iv.bcast_own + if_offset,
208 210 (max_if_num - del_if_num) * chunk_size);
209 211  
210 212 free_bcast_own:
... ... @@ -2787,7 +2787,9 @@
2787 2787 } else {
2788 2788 skb = __netdev_alloc_skb(dev, size, GFP_NOWAIT);
2789 2789 }
2790   - skb_reserve(skb, LL_RESERVED_SPACE(dev));
  2790 +
  2791 + if (likely(skb))
  2792 + skb_reserve(skb, LL_RESERVED_SPACE(dev));
2791 2793  
2792 2794 return skb;
2793 2795 }
... ... @@ -498,7 +498,7 @@
498 498 sk->sk_allocation = GFP_ATOMIC;
499 499  
500 500 if (cfg->udp_config.family == AF_INET) {
501   - err = udp_add_offload(&fou->udp_offloads);
  501 + err = udp_add_offload(net, &fou->udp_offloads);
502 502 if (err)
503 503 goto error;
504 504 }
net/ipv4/ip_output.c
... ... @@ -920,7 +920,7 @@
920 920 if (((length > mtu) || (skb && skb_is_gso(skb))) &&
921 921 (sk->sk_protocol == IPPROTO_UDP) &&
922 922 (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
923   - (sk->sk_type == SOCK_DGRAM)) {
  923 + (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) {
924 924 err = ip_ufo_append_data(sk, queue, getfrag, from, length,
925 925 hh_len, fragheaderlen, transhdrlen,
926 926 maxfraglen, flags);
... ... @@ -219,7 +219,7 @@
219 219 yeah->fast_count = 0;
220 220 yeah->reno_count = max(yeah->reno_count>>1, 2U);
221 221  
222   - return tp->snd_cwnd - reduction;
  222 + return max_t(int, tp->snd_cwnd - reduction, 2);
223 223 }
224 224  
225 225 static struct tcp_congestion_ops tcp_yeah __read_mostly = {
net/ipv4/udp_offload.c
... ... @@ -21,6 +21,7 @@
21 21  
22 22 struct udp_offload_priv {
23 23 struct udp_offload *offload;
  24 + possible_net_t net;
24 25 struct rcu_head rcu;
25 26 struct udp_offload_priv __rcu *next;
26 27 };
27 28  
... ... @@ -242,13 +243,14 @@
242 243 return segs;
243 244 }
244 245  
245   -int udp_add_offload(struct udp_offload *uo)
  246 +int udp_add_offload(struct net *net, struct udp_offload *uo)
246 247 {
247 248 struct udp_offload_priv *new_offload = kzalloc(sizeof(*new_offload), GFP_ATOMIC);
248 249  
249 250 if (!new_offload)
250 251 return -ENOMEM;
251 252  
  253 + write_pnet(&new_offload->net, net);
252 254 new_offload->offload = uo;
253 255  
254 256 spin_lock(&udp_offload_lock);
... ... @@ -312,7 +314,8 @@
312 314 rcu_read_lock();
313 315 uo_priv = rcu_dereference(udp_offload_base);
314 316 for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) {
315   - if (uo_priv->offload->port == uh->dest &&
  317 + if (net_eq(read_pnet(&uo_priv->net), dev_net(skb->dev)) &&
  318 + uo_priv->offload->port == uh->dest &&
316 319 uo_priv->offload->callbacks.gro_receive)
317 320 goto unflush;
318 321 }
... ... @@ -390,7 +393,8 @@
390 393  
391 394 uo_priv = rcu_dereference(udp_offload_base);
392 395 for (; uo_priv != NULL; uo_priv = rcu_dereference(uo_priv->next)) {
393   - if (uo_priv->offload->port == uh->dest &&
  396 + if (net_eq(read_pnet(&uo_priv->net), dev_net(skb->dev)) &&
  397 + uo_priv->offload->port == uh->dest &&
394 398 uo_priv->offload->callbacks.gro_complete)
395 399 break;
396 400 }
net/ipv6/ip6_output.c
... ... @@ -1353,7 +1353,7 @@
1353 1353 (skb && skb_is_gso(skb))) &&
1354 1354 (sk->sk_protocol == IPPROTO_UDP) &&
1355 1355 (rt->dst.dev->features & NETIF_F_UFO) &&
1356   - (sk->sk_type == SOCK_DGRAM)) {
  1356 + (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
1357 1357 err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
1358 1358 hh_len, fragheaderlen,
1359 1359 transhdrlen, mtu, flags, fl6);
... ... @@ -462,8 +462,10 @@
462 462 if (np->repflow && ireq->pktopts)
463 463 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
464 464  
  465 + rcu_read_lock();
465 466 err = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt),
466 467 np->tclass);
  468 + rcu_read_unlock();
467 469 err = net_xmit_eval(err);
468 470 }
469 471  
net/sched/cls_flower.c
... ... @@ -252,23 +252,28 @@
252 252 fl_set_key_val(tb, key->eth.src, TCA_FLOWER_KEY_ETH_SRC,
253 253 mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK,
254 254 sizeof(key->eth.src));
  255 +
255 256 fl_set_key_val(tb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE,
256 257 &mask->basic.n_proto, TCA_FLOWER_UNSPEC,
257 258 sizeof(key->basic.n_proto));
  259 +
258 260 if (key->basic.n_proto == htons(ETH_P_IP) ||
259 261 key->basic.n_proto == htons(ETH_P_IPV6)) {
260 262 fl_set_key_val(tb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO,
261 263 &mask->basic.ip_proto, TCA_FLOWER_UNSPEC,
262 264 sizeof(key->basic.ip_proto));
263 265 }
264   - if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
  266 +
  267 + if (tb[TCA_FLOWER_KEY_IPV4_SRC] || tb[TCA_FLOWER_KEY_IPV4_DST]) {
  268 + key->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
265 269 fl_set_key_val(tb, &key->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC,
266 270 &mask->ipv4.src, TCA_FLOWER_KEY_IPV4_SRC_MASK,
267 271 sizeof(key->ipv4.src));
268 272 fl_set_key_val(tb, &key->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST,
269 273 &mask->ipv4.dst, TCA_FLOWER_KEY_IPV4_DST_MASK,
270 274 sizeof(key->ipv4.dst));
271   - } else if (key->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
  275 + } else if (tb[TCA_FLOWER_KEY_IPV6_SRC] || tb[TCA_FLOWER_KEY_IPV6_DST]) {
  276 + key->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
272 277 fl_set_key_val(tb, &key->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC,
273 278 &mask->ipv6.src, TCA_FLOWER_KEY_IPV6_SRC_MASK,
274 279 sizeof(key->ipv6.src));
... ... @@ -276,6 +281,7 @@
276 281 &mask->ipv6.dst, TCA_FLOWER_KEY_IPV6_DST_MASK,
277 282 sizeof(key->ipv6.dst));
278 283 }
  284 +
279 285 if (key->basic.ip_proto == IPPROTO_TCP) {
280 286 fl_set_key_val(tb, &key->tp.src, TCA_FLOWER_KEY_TCP_SRC,
281 287 &mask->tp.src, TCA_FLOWER_UNSPEC,
net/sctp/sm_sideeffect.c
... ... @@ -63,7 +63,7 @@
63 63 static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
64 64 sctp_state_t state,
65 65 struct sctp_endpoint *ep,
66   - struct sctp_association *asoc,
  66 + struct sctp_association **asoc,
67 67 void *event_arg,
68 68 sctp_disposition_t status,
69 69 sctp_cmd_seq_t *commands,
... ... @@ -1125,7 +1125,7 @@
1125 1125 debug_post_sfn();
1126 1126  
1127 1127 error = sctp_side_effects(event_type, subtype, state,
1128   - ep, asoc, event_arg, status,
  1128 + ep, &asoc, event_arg, status,
1129 1129 &commands, gfp);
1130 1130 debug_post_sfx();
1131 1131  
... ... @@ -1138,7 +1138,7 @@
1138 1138 static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype,
1139 1139 sctp_state_t state,
1140 1140 struct sctp_endpoint *ep,
1141   - struct sctp_association *asoc,
  1141 + struct sctp_association **asoc,
1142 1142 void *event_arg,
1143 1143 sctp_disposition_t status,
1144 1144 sctp_cmd_seq_t *commands,
... ... @@ -1153,7 +1153,7 @@
1153 1153 * disposition SCTP_DISPOSITION_CONSUME.
1154 1154 */
1155 1155 if (0 != (error = sctp_cmd_interpreter(event_type, subtype, state,
1156   - ep, asoc,
  1156 + ep, *asoc,
1157 1157 event_arg, status,
1158 1158 commands, gfp)))
1159 1159 goto bail;
1160 1160  
1161 1161  
... ... @@ -1176,11 +1176,12 @@
1176 1176 break;
1177 1177  
1178 1178 case SCTP_DISPOSITION_DELETE_TCB:
  1179 + case SCTP_DISPOSITION_ABORT:
1179 1180 /* This should now be a command. */
  1181 + *asoc = NULL;
1180 1182 break;
1181 1183  
1182 1184 case SCTP_DISPOSITION_CONSUME:
1183   - case SCTP_DISPOSITION_ABORT:
1184 1185 /*
1185 1186 * We should no longer have much work to do here as the
1186 1187 * real work has been done as explicit commands above.
net/sctp/sm_statefuns.c
... ... @@ -2976,7 +2976,7 @@
2976 2976 SCTP_INC_STATS(net, SCTP_MIB_IN_DATA_CHUNK_DISCARDS);
2977 2977 goto discard_force;
2978 2978 case SCTP_IERROR_NO_DATA:
2979   - goto consume;
  2979 + return SCTP_DISPOSITION_ABORT;
2980 2980 case SCTP_IERROR_PROTO_VIOLATION:
2981 2981 return sctp_sf_abort_violation(net, ep, asoc, chunk, commands,
2982 2982 (u8 *)chunk->subh.data_hdr, sizeof(sctp_datahdr_t));
... ... @@ -3043,9 +3043,6 @@
3043 3043 sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, force);
3044 3044  
3045 3045 return SCTP_DISPOSITION_DISCARD;
3046   -consume:
3047   - return SCTP_DISPOSITION_CONSUME;
3048   -
3049 3046 }
3050 3047  
3051 3048 /*
... ... @@ -3093,7 +3090,7 @@
3093 3090 case SCTP_IERROR_BAD_STREAM:
3094 3091 break;
3095 3092 case SCTP_IERROR_NO_DATA:
3096   - goto consume;
  3093 + return SCTP_DISPOSITION_ABORT;
3097 3094 case SCTP_IERROR_PROTO_VIOLATION:
3098 3095 return sctp_sf_abort_violation(net, ep, asoc, chunk, commands,
3099 3096 (u8 *)chunk->subh.data_hdr, sizeof(sctp_datahdr_t));
... ... @@ -3119,7 +3116,6 @@
3119 3116 SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
3120 3117 }
3121 3118  
3122   -consume:
3123 3119 return SCTP_DISPOSITION_CONSUME;
3124 3120 }
3125 3121  
3126 3122  
... ... @@ -4825,10 +4821,7 @@
4825 4821 * if necessary to fill gaps.
4826 4822 */
4827 4823 struct sctp_chunk *abort = arg;
4828   - sctp_disposition_t retval;
4829 4824  
4830   - retval = SCTP_DISPOSITION_CONSUME;
4831   -
4832 4825 if (abort)
4833 4826 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
4834 4827  
... ... @@ -4845,7 +4838,7 @@
4845 4838 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
4846 4839 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
4847 4840  
4848   - return retval;
  4841 + return SCTP_DISPOSITION_ABORT;
4849 4842 }
4850 4843  
4851 4844 /* We tried an illegal operation on an association which is closed. */
4852 4845  
... ... @@ -4960,12 +4953,10 @@
4960 4953 sctp_cmd_seq_t *commands)
4961 4954 {
4962 4955 struct sctp_chunk *abort = arg;
4963   - sctp_disposition_t retval;
4964 4956  
4965 4957 /* Stop T1-init timer */
4966 4958 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
4967 4959 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
4968   - retval = SCTP_DISPOSITION_CONSUME;
4969 4960  
4970 4961 if (abort)
4971 4962 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
... ... @@ -4985,7 +4976,7 @@
4985 4976 sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
4986 4977 SCTP_PERR(SCTP_ERROR_USER_ABORT));
4987 4978  
4988   - return retval;
  4979 + return SCTP_DISPOSITION_ABORT;
4989 4980 }
4990 4981  
4991 4982 /*
... ... @@ -327,7 +327,7 @@
327 327 struct ctl_table tbl;
328 328 bool changed = false;
329 329 char *none = "none";
330   - char tmp[8];
  330 + char tmp[8] = {0};
331 331 int ret;
332 332  
333 333 memset(&tbl, 0, sizeof(struct ctl_table));
... ... @@ -1513,6 +1513,21 @@
1513 1513 sock_wfree(skb);
1514 1514 }
1515 1515  
  1516 +/*
  1517 + * The "user->unix_inflight" variable is protected by the garbage
  1518 + * collection lock, and we just read it locklessly here. If you go
  1519 + * over the limit, there might be a tiny race in actually noticing
  1520 + * it across threads. Tough.
  1521 + */
  1522 +static inline bool too_many_unix_fds(struct task_struct *p)
  1523 +{
  1524 + struct user_struct *user = current_user();
  1525 +
  1526 + if (unlikely(user->unix_inflight > task_rlimit(p, RLIMIT_NOFILE)))
  1527 + return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
  1528 + return false;
  1529 +}
  1530 +
1516 1531 #define MAX_RECURSION_LEVEL 4
1517 1532  
1518 1533 static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
... ... @@ -1521,6 +1536,9 @@
1521 1536 unsigned char max_level = 0;
1522 1537 int unix_sock_count = 0;
1523 1538  
  1539 + if (too_many_unix_fds(current))
  1540 + return -ETOOMANYREFS;
  1541 +
1524 1542 for (i = scm->fp->count - 1; i >= 0; i--) {
1525 1543 struct sock *sk = unix_get_socket(scm->fp->fp[i]);
1526 1544  
... ... @@ -1542,10 +1560,8 @@
1542 1560 if (!UNIXCB(skb).fp)
1543 1561 return -ENOMEM;
1544 1562  
1545   - if (unix_sock_count) {
1546   - for (i = scm->fp->count - 1; i >= 0; i--)
1547   - unix_inflight(scm->fp->fp[i]);
1548   - }
  1563 + for (i = scm->fp->count - 1; i >= 0; i--)
  1564 + unix_inflight(scm->fp->fp[i]);
1549 1565 return max_level;
1550 1566 }
1551 1567  
... ... @@ -120,11 +120,11 @@
120 120 {
121 121 struct sock *s = unix_get_socket(fp);
122 122  
  123 + spin_lock(&unix_gc_lock);
  124 +
123 125 if (s) {
124 126 struct unix_sock *u = unix_sk(s);
125 127  
126   - spin_lock(&unix_gc_lock);
127   -
128 128 if (atomic_long_inc_return(&u->inflight) == 1) {
129 129 BUG_ON(!list_empty(&u->link));
130 130 list_add_tail(&u->link, &gc_inflight_list);
131 131  
132 132  
133 133  
134 134  
135 135  
... ... @@ -132,25 +132,28 @@
132 132 BUG_ON(list_empty(&u->link));
133 133 }
134 134 unix_tot_inflight++;
135   - spin_unlock(&unix_gc_lock);
136 135 }
  136 + fp->f_cred->user->unix_inflight++;
  137 + spin_unlock(&unix_gc_lock);
137 138 }
138 139  
139 140 void unix_notinflight(struct file *fp)
140 141 {
141 142 struct sock *s = unix_get_socket(fp);
142 143  
  144 + spin_lock(&unix_gc_lock);
  145 +
143 146 if (s) {
144 147 struct unix_sock *u = unix_sk(s);
145 148  
146   - spin_lock(&unix_gc_lock);
147 149 BUG_ON(list_empty(&u->link));
148 150  
149 151 if (atomic_long_dec_and_test(&u->inflight))
150 152 list_del_init(&u->link);
151 153 unix_tot_inflight--;
152   - spin_unlock(&unix_gc_lock);
153 154 }
  155 + fp->f_cred->user->unix_inflight--;
  156 + spin_unlock(&unix_gc_lock);
154 157 }
155 158  
156 159 static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),