Commit 9044f940ea7479cbda4cf015ec5727fbdb048080

Authored by Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Fill in ethtool link parameters for all link types in cxgb4, from
    Hariprasad Shenai.

 2) Fix probe regressions in stmmac driver, from Huacai Chen.

 3) Network namespace leaks on errirs in rtnetlink, from Nicolas
    Dichtel.

 4) Remove erroneous BUG check which can actually trigger legitimately,
    in xen-netfront.  From Seth Forshee.

 5) Validate length of IFLA_BOND_ARP_IP_TARGET netlink attributes, from
    Thomas Grag.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net:
  cxgb4: Fill in supported link mode for SFP modules
  xen-netfront: Remove BUGs on paged skb data which crosses a page boundary
  sh_eth: Fix sleeping function called from invalid context
  stmmac: platform: Move plat_dat checking earlier
  sh_eth: Fix skb alloc size and alignment adjust rule.
  rtnetlink: release net refcnt on error in do_setlink()
  bond: Check length of IFLA_BOND_ARP_IP_TARGET attributes

Showing 7 changed files Side-by-side Diff

drivers/net/bonding/bond_netlink.c
... ... @@ -225,7 +225,12 @@
225 225  
226 226 bond_option_arp_ip_targets_clear(bond);
227 227 nla_for_each_nested(attr, data[IFLA_BOND_ARP_IP_TARGET], rem) {
228   - __be32 target = nla_get_be32(attr);
  228 + __be32 target;
  229 +
  230 + if (nla_len(attr) < sizeof(target))
  231 + return -EINVAL;
  232 +
  233 + target = nla_get_be32(attr);
229 234  
230 235 bond_opt_initval(&newval, (__force u64)target);
231 236 err = __bond_opt_set(bond, BOND_OPT_ARP_TARGETS,
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
... ... @@ -2442,9 +2442,13 @@
2442 2442 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
2443 2443 SUPPORTED_10000baseKX4_Full;
2444 2444 else if (type == FW_PORT_TYPE_FIBER_XFI ||
2445   - type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
  2445 + type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP) {
2446 2446 v |= SUPPORTED_FIBRE;
2447   - else if (type == FW_PORT_TYPE_BP40_BA)
  2447 + if (caps & FW_PORT_CAP_SPEED_1G)
  2448 + v |= SUPPORTED_1000baseT_Full;
  2449 + if (caps & FW_PORT_CAP_SPEED_10G)
  2450 + v |= SUPPORTED_10000baseT_Full;
  2451 + } else if (type == FW_PORT_TYPE_BP40_BA)
2448 2452 v |= SUPPORTED_40000baseSR4_Full;
2449 2453  
2450 2454 if (caps & FW_PORT_CAP_ANEG)
drivers/net/ethernet/renesas/sh_eth.c
... ... @@ -917,21 +917,13 @@
917 917 return ret;
918 918 }
919 919  
920   -#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
921 920 static void sh_eth_set_receive_align(struct sk_buff *skb)
922 921 {
923   - int reserve;
  922 + uintptr_t reserve = (uintptr_t)skb->data & (SH_ETH_RX_ALIGN - 1);
924 923  
925   - reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1));
926 924 if (reserve)
927   - skb_reserve(skb, reserve);
  925 + skb_reserve(skb, SH_ETH_RX_ALIGN - reserve);
928 926 }
929   -#else
930   -static void sh_eth_set_receive_align(struct sk_buff *skb)
931   -{
932   - skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN);
933   -}
934   -#endif
935 927  
936 928  
937 929 /* CPU <-> EDMAC endian convert */
... ... @@ -1119,6 +1111,7 @@
1119 1111 struct sh_eth_txdesc *txdesc = NULL;
1120 1112 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
1121 1113 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
  1114 + int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
1122 1115  
1123 1116 mdp->cur_rx = 0;
1124 1117 mdp->cur_tx = 0;
1125 1118  
1126 1119  
1127 1120  
... ... @@ -1131,21 +1124,21 @@
1131 1124 for (i = 0; i < mdp->num_rx_ring; i++) {
1132 1125 /* skb */
1133 1126 mdp->rx_skbuff[i] = NULL;
1134   - skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
  1127 + skb = netdev_alloc_skb(ndev, skbuff_size);
1135 1128 mdp->rx_skbuff[i] = skb;
1136 1129 if (skb == NULL)
1137 1130 break;
1138   - dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
1139   - DMA_FROM_DEVICE);
1140 1131 sh_eth_set_receive_align(skb);
1141 1132  
1142 1133 /* RX descriptor */
1143 1134 rxdesc = &mdp->rx_ring[i];
  1135 + /* The size of the buffer is a multiple of 16 bytes. */
  1136 + rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
  1137 + dma_map_single(&ndev->dev, skb->data, rxdesc->buffer_length,
  1138 + DMA_FROM_DEVICE);
1144 1139 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
1145 1140 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
1146 1141  
1147   - /* The size of the buffer is 16 byte boundary. */
1148   - rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
1149 1142 /* Rx descriptor address set */
1150 1143 if (i == 0) {
1151 1144 sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
... ... @@ -1397,6 +1390,7 @@
1397 1390 struct sk_buff *skb;
1398 1391 u16 pkt_len = 0;
1399 1392 u32 desc_status;
  1393 + int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
1400 1394  
1401 1395 rxdesc = &mdp->rx_ring[entry];
1402 1396 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
... ... @@ -1448,7 +1442,7 @@
1448 1442 if (mdp->cd->rpadir)
1449 1443 skb_reserve(skb, NET_IP_ALIGN);
1450 1444 dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr,
1451   - mdp->rx_buf_sz,
  1445 + ALIGN(mdp->rx_buf_sz, 16),
1452 1446 DMA_FROM_DEVICE);
1453 1447 skb_put(skb, pkt_len);
1454 1448 skb->protocol = eth_type_trans(skb, ndev);
1455 1449  
1456 1450  
... ... @@ -1468,13 +1462,13 @@
1468 1462 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
1469 1463  
1470 1464 if (mdp->rx_skbuff[entry] == NULL) {
1471   - skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
  1465 + skb = netdev_alloc_skb(ndev, skbuff_size);
1472 1466 mdp->rx_skbuff[entry] = skb;
1473 1467 if (skb == NULL)
1474 1468 break; /* Better luck next round. */
1475   - dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
1476   - DMA_FROM_DEVICE);
1477 1469 sh_eth_set_receive_align(skb);
  1470 + dma_map_single(&ndev->dev, skb->data,
  1471 + rxdesc->buffer_length, DMA_FROM_DEVICE);
1478 1472  
1479 1473 skb_checksum_none_assert(skb);
1480 1474 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
... ... @@ -2042,6 +2036,8 @@
2042 2036 if (ret)
2043 2037 goto out_free_irq;
2044 2038  
  2039 + mdp->is_opened = 1;
  2040 +
2045 2041 return ret;
2046 2042  
2047 2043 out_free_irq:
... ... @@ -2131,6 +2127,36 @@
2131 2127 return NETDEV_TX_OK;
2132 2128 }
2133 2129  
  2130 +static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
  2131 +{
  2132 + struct sh_eth_private *mdp = netdev_priv(ndev);
  2133 +
  2134 + if (sh_eth_is_rz_fast_ether(mdp))
  2135 + return &ndev->stats;
  2136 +
  2137 + if (!mdp->is_opened)
  2138 + return &ndev->stats;
  2139 +
  2140 + ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
  2141 + sh_eth_write(ndev, 0, TROCR); /* (write clear) */
  2142 + ndev->stats.collisions += sh_eth_read(ndev, CDCR);
  2143 + sh_eth_write(ndev, 0, CDCR); /* (write clear) */
  2144 + ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
  2145 + sh_eth_write(ndev, 0, LCCR); /* (write clear) */
  2146 +
  2147 + if (sh_eth_is_gether(mdp)) {
  2148 + ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
  2149 + sh_eth_write(ndev, 0, CERCR); /* (write clear) */
  2150 + ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
  2151 + sh_eth_write(ndev, 0, CEECR); /* (write clear) */
  2152 + } else {
  2153 + ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
  2154 + sh_eth_write(ndev, 0, CNDCR); /* (write clear) */
  2155 + }
  2156 +
  2157 + return &ndev->stats;
  2158 +}
  2159 +
2134 2160 /* device close function */
2135 2161 static int sh_eth_close(struct net_device *ndev)
2136 2162 {
... ... @@ -2145,6 +2171,7 @@
2145 2171 sh_eth_write(ndev, 0, EDTRR);
2146 2172 sh_eth_write(ndev, 0, EDRRR);
2147 2173  
  2174 + sh_eth_get_stats(ndev);
2148 2175 /* PHY Disconnect */
2149 2176 if (mdp->phydev) {
2150 2177 phy_stop(mdp->phydev);
2151 2178  
... ... @@ -2163,36 +2190,9 @@
2163 2190  
2164 2191 pm_runtime_put_sync(&mdp->pdev->dev);
2165 2192  
2166   - return 0;
2167   -}
  2193 + mdp->is_opened = 0;
2168 2194  
2169   -static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
2170   -{
2171   - struct sh_eth_private *mdp = netdev_priv(ndev);
2172   -
2173   - if (sh_eth_is_rz_fast_ether(mdp))
2174   - return &ndev->stats;
2175   -
2176   - pm_runtime_get_sync(&mdp->pdev->dev);
2177   -
2178   - ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
2179   - sh_eth_write(ndev, 0, TROCR); /* (write clear) */
2180   - ndev->stats.collisions += sh_eth_read(ndev, CDCR);
2181   - sh_eth_write(ndev, 0, CDCR); /* (write clear) */
2182   - ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
2183   - sh_eth_write(ndev, 0, LCCR); /* (write clear) */
2184   - if (sh_eth_is_gether(mdp)) {
2185   - ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
2186   - sh_eth_write(ndev, 0, CERCR); /* (write clear) */
2187   - ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
2188   - sh_eth_write(ndev, 0, CEECR); /* (write clear) */
2189   - } else {
2190   - ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
2191   - sh_eth_write(ndev, 0, CNDCR); /* (write clear) */
2192   - }
2193   - pm_runtime_put_sync(&mdp->pdev->dev);
2194   -
2195   - return &ndev->stats;
  2195 + return 0;
2196 2196 }
2197 2197  
2198 2198 /* ioctl to device function */
drivers/net/ethernet/renesas/sh_eth.h
... ... @@ -162,9 +162,9 @@
162 162  
163 163 /* Driver's parameters */
164 164 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
165   -#define SH4_SKB_RX_ALIGN 32
  165 +#define SH_ETH_RX_ALIGN 32
166 166 #else
167   -#define SH2_SH3_SKB_RX_ALIGN 2
  167 +#define SH_ETH_RX_ALIGN 2
168 168 #endif
169 169  
170 170 /* Register's bits
... ... @@ -522,6 +522,7 @@
522 522  
523 523 unsigned no_ether_link:1;
524 524 unsigned ether_link_active_low:1;
  525 + unsigned is_opened:1;
525 526 };
526 527  
527 528 static inline void sh_eth_soft_swap(char *src, int len)
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
... ... @@ -265,6 +265,15 @@
265 265  
266 266 plat_dat = dev_get_platdata(&pdev->dev);
267 267  
  268 + if (!plat_dat)
  269 + plat_dat = devm_kzalloc(&pdev->dev,
  270 + sizeof(struct plat_stmmacenet_data),
  271 + GFP_KERNEL);
  272 + if (!plat_dat) {
  273 + pr_err("%s: ERROR: no memory", __func__);
  274 + return -ENOMEM;
  275 + }
  276 +
268 277 /* Set default value for multicast hash bins */
269 278 plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
270 279  
... ... @@ -272,15 +281,6 @@
272 281 plat_dat->unicast_filter_entries = 1;
273 282  
274 283 if (pdev->dev.of_node) {
275   - if (!plat_dat)
276   - plat_dat = devm_kzalloc(&pdev->dev,
277   - sizeof(struct plat_stmmacenet_data),
278   - GFP_KERNEL);
279   - if (!plat_dat) {
280   - pr_err("%s: ERROR: no memory", __func__);
281   - return -ENOMEM;
282   - }
283   -
284 284 ret = stmmac_probe_config_dt(pdev, plat_dat, &mac);
285 285 if (ret) {
286 286 pr_err("%s: main dt probe failed", __func__);
drivers/net/xen-netfront.c
... ... @@ -496,17 +496,12 @@
496 496 len = skb_frag_size(frag);
497 497 offset = frag->page_offset;
498 498  
499   - /* Data must not cross a page boundary. */
500   - BUG_ON(len + offset > PAGE_SIZE<<compound_order(page));
501   -
502 499 /* Skip unused frames from start of page */
503 500 page += offset >> PAGE_SHIFT;
504 501 offset &= ~PAGE_MASK;
505 502  
506 503 while (len > 0) {
507 504 unsigned long bytes;
508   -
509   - BUG_ON(offset >= PAGE_SIZE);
510 505  
511 506 bytes = PAGE_SIZE - offset;
512 507 if (bytes > len)
net/core/rtnetlink.c
... ... @@ -1498,6 +1498,7 @@
1498 1498 goto errout;
1499 1499 }
1500 1500 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
  1501 + put_net(net);
1501 1502 err = -EPERM;
1502 1503 goto errout;
1503 1504 }