Commit 5ed1836814d908f45cafde0e79cb85314ab9d41d
Exists in
master
and in
4 other branches
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: net: Fix percpu counters deadlock cpumask: prepare for iterators to only go to nr_cpu_ids/nr_cpumask_bits: net drivers/net/usb: use USB API functions rather than constants cls_cgroup: clean up Kconfig cls_cgroup: clean up for cgroup part cls_cgroup: fix an oops when removing a cgroup EtherExpress16: fix printing timed out status mlx4_en: Added "set_ringparam" Ethtool interface implementation mlx4_en: Always allocate RX ring for each interrupt vector mlx4_en: Verify number of RX rings doesn't exceed MAX_RX_RINGS IPVS: Make "no destination available" message more consistent between schedulers net: KS8695: removed duplicated #include tun: Fix SIOCSIFHWADDR error. smsc911x: compile fix re netif_rx signature changes netns: foreach_netdev_safe is insufficient in default_device_exit net: make xfrm_statistics_seq_show use generic snmp_fold_field net: Fix more NAPI interface netdev argument drop fallout. net: Fix unused variable warnings in pasemi_mac.c and spider_net.c
Showing 38 changed files Side-by-side Diff
- drivers/net/arm/ep93xx_eth.c
- drivers/net/arm/ixp4xx_eth.c
- drivers/net/arm/ks8695net.c
- drivers/net/eexpress.h
- drivers/net/mlx4/en_main.c
- drivers/net/mlx4/en_netdev.c
- drivers/net/mlx4/en_params.c
- drivers/net/mlx4/mlx4_en.h
- drivers/net/pasemi_mac.c
- drivers/net/smsc911x.c
- drivers/net/spider_net.c
- drivers/net/tun.c
- drivers/net/usb/hso.c
- drivers/net/wan/ixp4xx_hss.c
- drivers/net/wireless/zd1211rw/zd_usb.c
- net/core/dev.c
- net/core/neighbour.c
- net/dccp/proto.c
- net/ipv4/inet_connection_sock.c
- net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
- net/ipv4/proc.c
- net/ipv4/route.c
- net/ipv4/tcp.c
- net/ipv4/tcp_ipv4.c
- net/ipv6/tcp_ipv6.c
- net/netfilter/ipvs/ip_vs_lblc.c
- net/netfilter/ipvs/ip_vs_lblcr.c
- net/netfilter/ipvs/ip_vs_lc.c
- net/netfilter/ipvs/ip_vs_nq.c
- net/netfilter/ipvs/ip_vs_rr.c
- net/netfilter/ipvs/ip_vs_sed.c
- net/netfilter/ipvs/ip_vs_sh.c
- net/netfilter/ipvs/ip_vs_wlc.c
- net/netfilter/ipvs/ip_vs_wrr.c
- net/netfilter/nf_conntrack_standalone.c
- net/sched/Kconfig
- net/sched/cls_cgroup.c
- net/xfrm/xfrm_proc.c
drivers/net/arm/ep93xx_eth.c
drivers/net/arm/ixp4xx_eth.c
... | ... | @@ -504,7 +504,7 @@ |
504 | 504 | netif_rx_complete(napi); |
505 | 505 | qmgr_enable_irq(rxq); |
506 | 506 | if (!qmgr_stat_empty(rxq) && |
507 | - netif_rx_reschedule(dev, napi)) { | |
507 | + netif_rx_reschedule(napi)) { | |
508 | 508 | #if DEBUG_RX |
509 | 509 | printk(KERN_DEBUG "%s: eth_poll" |
510 | 510 | " netif_rx_reschedule successed\n", |
drivers/net/arm/ks8695net.c
drivers/net/eexpress.h
... | ... | @@ -68,17 +68,17 @@ |
68 | 68 | */ |
69 | 69 | |
70 | 70 | /* these functions take the SCB status word and test the relevant status bit */ |
71 | -#define SCB_complete(s) ((s&0x8000)!=0) | |
72 | -#define SCB_rxdframe(s) ((s&0x4000)!=0) | |
73 | -#define SCB_CUdead(s) ((s&0x2000)!=0) | |
74 | -#define SCB_RUdead(s) ((s&0x1000)!=0) | |
75 | -#define SCB_ack(s) (s & 0xf000) | |
71 | +#define SCB_complete(s) (((s) & 0x8000) != 0) | |
72 | +#define SCB_rxdframe(s) (((s) & 0x4000) != 0) | |
73 | +#define SCB_CUdead(s) (((s) & 0x2000) != 0) | |
74 | +#define SCB_RUdead(s) (((s) & 0x1000) != 0) | |
75 | +#define SCB_ack(s) ((s) & 0xf000) | |
76 | 76 | |
77 | 77 | /* Command unit status: 0=idle, 1=suspended, 2=active */ |
78 | -#define SCB_CUstat(s) ((s&0x0300)>>8) | |
78 | +#define SCB_CUstat(s) (((s)&0x0300)>>8) | |
79 | 79 | |
80 | 80 | /* Receive unit status: 0=idle, 1=suspended, 2=out of resources, 4=ready */ |
81 | -#define SCB_RUstat(s) ((s&0x0070)>>4) | |
81 | +#define SCB_RUstat(s) (((s)&0x0070)>>4) | |
82 | 82 | |
83 | 83 | /* SCB commands */ |
84 | 84 | #define SCB_CUnop 0x0000 |
... | ... | @@ -98,18 +98,18 @@ |
98 | 98 | * Command block defines |
99 | 99 | */ |
100 | 100 | |
101 | -#define Stat_Done(s) ((s&0x8000)!=0) | |
102 | -#define Stat_Busy(s) ((s&0x4000)!=0) | |
103 | -#define Stat_OK(s) ((s&0x2000)!=0) | |
104 | -#define Stat_Abort(s) ((s&0x1000)!=0) | |
105 | -#define Stat_STFail ((s&0x0800)!=0) | |
106 | -#define Stat_TNoCar(s) ((s&0x0400)!=0) | |
107 | -#define Stat_TNoCTS(s) ((s&0x0200)!=0) | |
108 | -#define Stat_TNoDMA(s) ((s&0x0100)!=0) | |
109 | -#define Stat_TDefer(s) ((s&0x0080)!=0) | |
110 | -#define Stat_TColl(s) ((s&0x0040)!=0) | |
111 | -#define Stat_TXColl(s) ((s&0x0020)!=0) | |
112 | -#define Stat_NoColl(s) (s&0x000f) | |
101 | +#define Stat_Done(s) (((s) & 0x8000) != 0) | |
102 | +#define Stat_Busy(s) (((s) & 0x4000) != 0) | |
103 | +#define Stat_OK(s) (((s) & 0x2000) != 0) | |
104 | +#define Stat_Abort(s) (((s) & 0x1000) != 0) | |
105 | +#define Stat_STFail (((s) & 0x0800) != 0) | |
106 | +#define Stat_TNoCar(s) (((s) & 0x0400) != 0) | |
107 | +#define Stat_TNoCTS(s) (((s) & 0x0200) != 0) | |
108 | +#define Stat_TNoDMA(s) (((s) & 0x0100) != 0) | |
109 | +#define Stat_TDefer(s) (((s) & 0x0080) != 0) | |
110 | +#define Stat_TColl(s) (((s) & 0x0040) != 0) | |
111 | +#define Stat_TXColl(s) (((s) & 0x0020) != 0) | |
112 | +#define Stat_NoColl(s) ((s) & 0x000f) | |
113 | 113 | |
114 | 114 | /* Cmd_END will end AFTER the command if this is the first |
115 | 115 | * command block after an SCB_CUstart, but BEFORE the command |
116 | 116 | |
... | ... | @@ -136,16 +136,16 @@ |
136 | 136 | * Frame Descriptor (Receive block) defines |
137 | 137 | */ |
138 | 138 | |
139 | -#define FD_Done(s) ((s&0x8000)!=0) | |
140 | -#define FD_Busy(s) ((s&0x4000)!=0) | |
141 | -#define FD_OK(s) ((s&0x2000)!=0) | |
139 | +#define FD_Done(s) (((s) & 0x8000) != 0) | |
140 | +#define FD_Busy(s) (((s) & 0x4000) != 0) | |
141 | +#define FD_OK(s) (((s) & 0x2000) != 0) | |
142 | 142 | |
143 | -#define FD_CRC(s) ((s&0x0800)!=0) | |
144 | -#define FD_Align(s) ((s&0x0400)!=0) | |
145 | -#define FD_Resrc(s) ((s&0x0200)!=0) | |
146 | -#define FD_DMA(s) ((s&0x0100)!=0) | |
147 | -#define FD_Short(s) ((s&0x0080)!=0) | |
148 | -#define FD_NoEOF(s) ((s&0x0040)!=0) | |
143 | +#define FD_CRC(s) (((s) & 0x0800) != 0) | |
144 | +#define FD_Align(s) (((s) & 0x0400) != 0) | |
145 | +#define FD_Resrc(s) (((s) & 0x0200) != 0) | |
146 | +#define FD_DMA(s) (((s) & 0x0100) != 0) | |
147 | +#define FD_Short(s) (((s) & 0x0080) != 0) | |
148 | +#define FD_NoEOF(s) (((s) & 0x0040) != 0) | |
149 | 149 | |
150 | 150 | struct rfd_header { |
151 | 151 | volatile unsigned long flags; |
drivers/net/mlx4/en_main.c
... | ... | @@ -169,13 +169,10 @@ |
169 | 169 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { |
170 | 170 | mlx4_info(mdev, "Using %d tx rings for port:%d\n", |
171 | 171 | mdev->profile.prof[i].tx_ring_num, i); |
172 | - if (!mdev->profile.prof[i].rx_ring_num) { | |
173 | - mdev->profile.prof[i].rx_ring_num = dev->caps.num_comp_vectors; | |
174 | - mlx4_info(mdev, "Defaulting to %d rx rings for port:%d\n", | |
175 | - mdev->profile.prof[i].rx_ring_num, i); | |
176 | - } else | |
177 | - mlx4_info(mdev, "Using %d rx rings for port:%d\n", | |
178 | - mdev->profile.prof[i].rx_ring_num, i); | |
172 | + mdev->profile.prof[i].rx_ring_num = | |
173 | + min_t(int, dev->caps.num_comp_vectors, MAX_RX_RINGS); | |
174 | + mlx4_info(mdev, "Defaulting to %d rx rings for port:%d\n", | |
175 | + mdev->profile.prof[i].rx_ring_num, i); | |
179 | 176 | } |
180 | 177 | |
181 | 178 | /* Create our own workqueue for reset/multicast tasks |
drivers/net/mlx4/en_netdev.c
... | ... | @@ -552,7 +552,7 @@ |
552 | 552 | } |
553 | 553 | |
554 | 554 | |
555 | -static int mlx4_en_start_port(struct net_device *dev) | |
555 | +int mlx4_en_start_port(struct net_device *dev) | |
556 | 556 | { |
557 | 557 | struct mlx4_en_priv *priv = netdev_priv(dev); |
558 | 558 | struct mlx4_en_dev *mdev = priv->mdev; |
... | ... | @@ -707,7 +707,7 @@ |
707 | 707 | } |
708 | 708 | |
709 | 709 | |
710 | -static void mlx4_en_stop_port(struct net_device *dev) | |
710 | +void mlx4_en_stop_port(struct net_device *dev) | |
711 | 711 | { |
712 | 712 | struct mlx4_en_priv *priv = netdev_priv(dev); |
713 | 713 | struct mlx4_en_dev *mdev = priv->mdev; |
... | ... | @@ -826,7 +826,7 @@ |
826 | 826 | return 0; |
827 | 827 | } |
828 | 828 | |
829 | -static void mlx4_en_free_resources(struct mlx4_en_priv *priv) | |
829 | +void mlx4_en_free_resources(struct mlx4_en_priv *priv) | |
830 | 830 | { |
831 | 831 | int i; |
832 | 832 | |
... | ... | @@ -845,7 +845,7 @@ |
845 | 845 | } |
846 | 846 | } |
847 | 847 | |
848 | -static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) | |
848 | +int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) | |
849 | 849 | { |
850 | 850 | struct mlx4_en_dev *mdev = priv->mdev; |
851 | 851 | struct mlx4_en_port_profile *prof = priv->prof; |
drivers/net/mlx4/en_params.c
... | ... | @@ -65,15 +65,6 @@ |
65 | 65 | MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]." |
66 | 66 | " Per priority bit mask"); |
67 | 67 | |
68 | -MLX4_EN_PARM_INT(rx_ring_num1, 0, "Number or Rx rings for port 1 (0 = #cores)"); | |
69 | -MLX4_EN_PARM_INT(rx_ring_num2, 0, "Number or Rx rings for port 2 (0 = #cores)"); | |
70 | - | |
71 | -MLX4_EN_PARM_INT(tx_ring_size1, MLX4_EN_AUTO_CONF, "Tx ring size for port 1"); | |
72 | -MLX4_EN_PARM_INT(tx_ring_size2, MLX4_EN_AUTO_CONF, "Tx ring size for port 2"); | |
73 | -MLX4_EN_PARM_INT(rx_ring_size1, MLX4_EN_AUTO_CONF, "Rx ring size for port 1"); | |
74 | -MLX4_EN_PARM_INT(rx_ring_size2, MLX4_EN_AUTO_CONF, "Rx ring size for port 2"); | |
75 | - | |
76 | - | |
77 | 68 | int mlx4_en_get_profile(struct mlx4_en_dev *mdev) |
78 | 69 | { |
79 | 70 | struct mlx4_en_profile *params = &mdev->profile; |
... | ... | @@ -87,6 +78,8 @@ |
87 | 78 | params->prof[i].rx_ppp = pfcrx; |
88 | 79 | params->prof[i].tx_pause = 1; |
89 | 80 | params->prof[i].tx_ppp = pfctx; |
81 | + params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE; | |
82 | + params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE; | |
90 | 83 | } |
91 | 84 | if (pfcrx || pfctx) { |
92 | 85 | params->prof[1].tx_ring_num = MLX4_EN_TX_RING_NUM; |
93 | 86 | |
... | ... | @@ -95,32 +88,7 @@ |
95 | 88 | params->prof[1].tx_ring_num = 1; |
96 | 89 | params->prof[2].tx_ring_num = 1; |
97 | 90 | } |
98 | - params->prof[1].rx_ring_num = min_t(int, rx_ring_num1, MAX_RX_RINGS); | |
99 | - params->prof[2].rx_ring_num = min_t(int, rx_ring_num2, MAX_RX_RINGS); | |
100 | 91 | |
101 | - if (tx_ring_size1 == MLX4_EN_AUTO_CONF) | |
102 | - tx_ring_size1 = MLX4_EN_DEF_TX_RING_SIZE; | |
103 | - params->prof[1].tx_ring_size = | |
104 | - (tx_ring_size1 < MLX4_EN_MIN_TX_SIZE) ? | |
105 | - MLX4_EN_MIN_TX_SIZE : roundup_pow_of_two(tx_ring_size1); | |
106 | - | |
107 | - if (tx_ring_size2 == MLX4_EN_AUTO_CONF) | |
108 | - tx_ring_size2 = MLX4_EN_DEF_TX_RING_SIZE; | |
109 | - params->prof[2].tx_ring_size = | |
110 | - (tx_ring_size2 < MLX4_EN_MIN_TX_SIZE) ? | |
111 | - MLX4_EN_MIN_TX_SIZE : roundup_pow_of_two(tx_ring_size2); | |
112 | - | |
113 | - if (rx_ring_size1 == MLX4_EN_AUTO_CONF) | |
114 | - rx_ring_size1 = MLX4_EN_DEF_RX_RING_SIZE; | |
115 | - params->prof[1].rx_ring_size = | |
116 | - (rx_ring_size1 < MLX4_EN_MIN_RX_SIZE) ? | |
117 | - MLX4_EN_MIN_RX_SIZE : roundup_pow_of_two(rx_ring_size1); | |
118 | - | |
119 | - if (rx_ring_size2 == MLX4_EN_AUTO_CONF) | |
120 | - rx_ring_size2 = MLX4_EN_DEF_RX_RING_SIZE; | |
121 | - params->prof[2].rx_ring_size = | |
122 | - (rx_ring_size2 < MLX4_EN_MIN_RX_SIZE) ? | |
123 | - MLX4_EN_MIN_RX_SIZE : roundup_pow_of_two(rx_ring_size2); | |
124 | 92 | return 0; |
125 | 93 | } |
126 | 94 | |
... | ... | @@ -417,6 +385,54 @@ |
417 | 385 | pause->rx_pause = priv->prof->rx_pause; |
418 | 386 | } |
419 | 387 | |
388 | +static int mlx4_en_set_ringparam(struct net_device *dev, | |
389 | + struct ethtool_ringparam *param) | |
390 | +{ | |
391 | + struct mlx4_en_priv *priv = netdev_priv(dev); | |
392 | + struct mlx4_en_dev *mdev = priv->mdev; | |
393 | + u32 rx_size, tx_size; | |
394 | + int port_up = 0; | |
395 | + int err = 0; | |
396 | + | |
397 | + if (param->rx_jumbo_pending || param->rx_mini_pending) | |
398 | + return -EINVAL; | |
399 | + | |
400 | + rx_size = roundup_pow_of_two(param->rx_pending); | |
401 | + rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE); | |
402 | + tx_size = roundup_pow_of_two(param->tx_pending); | |
403 | + tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE); | |
404 | + | |
405 | + if (rx_size == priv->prof->rx_ring_size && | |
406 | + tx_size == priv->prof->tx_ring_size) | |
407 | + return 0; | |
408 | + | |
409 | + mutex_lock(&mdev->state_lock); | |
410 | + if (priv->port_up) { | |
411 | + port_up = 1; | |
412 | + mlx4_en_stop_port(dev); | |
413 | + } | |
414 | + | |
415 | + mlx4_en_free_resources(priv); | |
416 | + | |
417 | + priv->prof->tx_ring_size = tx_size; | |
418 | + priv->prof->rx_ring_size = rx_size; | |
419 | + | |
420 | + err = mlx4_en_alloc_resources(priv); | |
421 | + if (err) { | |
422 | + mlx4_err(mdev, "Failed reallocating port resources\n"); | |
423 | + goto out; | |
424 | + } | |
425 | + if (port_up) { | |
426 | + err = mlx4_en_start_port(dev); | |
427 | + if (err) | |
428 | + mlx4_err(mdev, "Failed starting port\n"); | |
429 | + } | |
430 | + | |
431 | +out: | |
432 | + mutex_unlock(&mdev->state_lock); | |
433 | + return err; | |
434 | +} | |
435 | + | |
420 | 436 | static void mlx4_en_get_ringparam(struct net_device *dev, |
421 | 437 | struct ethtool_ringparam *param) |
422 | 438 | { |
... | ... | @@ -456,6 +472,7 @@ |
456 | 472 | .get_pauseparam = mlx4_en_get_pauseparam, |
457 | 473 | .set_pauseparam = mlx4_en_set_pauseparam, |
458 | 474 | .get_ringparam = mlx4_en_get_ringparam, |
475 | + .set_ringparam = mlx4_en_set_ringparam, | |
459 | 476 | .get_flags = ethtool_op_get_flags, |
460 | 477 | .set_flags = ethtool_op_set_flags, |
461 | 478 | }; |
drivers/net/mlx4/mlx4_en.h
... | ... | @@ -489,6 +489,12 @@ |
489 | 489 | int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, |
490 | 490 | struct mlx4_en_port_profile *prof); |
491 | 491 | |
492 | +int mlx4_en_start_port(struct net_device *dev); | |
493 | +void mlx4_en_stop_port(struct net_device *dev); | |
494 | + | |
495 | +void mlx4_en_free_resources(struct mlx4_en_priv *priv); | |
496 | +int mlx4_en_alloc_resources(struct mlx4_en_priv *priv); | |
497 | + | |
492 | 498 | int mlx4_en_get_profile(struct mlx4_en_dev *mdev); |
493 | 499 | |
494 | 500 | int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, |
drivers/net/pasemi_mac.c
... | ... | @@ -954,7 +954,6 @@ |
954 | 954 | { |
955 | 955 | const struct pasemi_mac_rxring *rxring = data; |
956 | 956 | struct pasemi_mac *mac = rxring->mac; |
957 | - struct net_device *dev = mac->netdev; | |
958 | 957 | const struct pasemi_dmachan *chan = &rxring->chan; |
959 | 958 | unsigned int reg; |
960 | 959 | |
... | ... | @@ -1634,7 +1633,6 @@ |
1634 | 1633 | static int pasemi_mac_poll(struct napi_struct *napi, int budget) |
1635 | 1634 | { |
1636 | 1635 | struct pasemi_mac *mac = container_of(napi, struct pasemi_mac, napi); |
1637 | - struct net_device *dev = mac->netdev; | |
1638 | 1636 | int pkts; |
1639 | 1637 | |
1640 | 1638 | pasemi_mac_clean_tx(tx_ring(mac)); |
drivers/net/smsc911x.c
... | ... | @@ -1484,13 +1484,13 @@ |
1484 | 1484 | } |
1485 | 1485 | |
1486 | 1486 | if (likely(intsts & inten & INT_STS_RSFL_)) { |
1487 | - if (likely(netif_rx_schedule_prep(dev, &pdata->napi))) { | |
1487 | + if (likely(netif_rx_schedule_prep(&pdata->napi))) { | |
1488 | 1488 | /* Disable Rx interrupts */ |
1489 | 1489 | temp = smsc911x_reg_read(pdata, INT_EN); |
1490 | 1490 | temp &= (~INT_EN_RSFL_EN_); |
1491 | 1491 | smsc911x_reg_write(pdata, INT_EN, temp); |
1492 | 1492 | /* Schedule a NAPI poll */ |
1493 | - __netif_rx_schedule(dev, &pdata->napi); | |
1493 | + __netif_rx_schedule(&pdata->napi); | |
1494 | 1494 | } else { |
1495 | 1495 | SMSC_WARNING(RX_ERR, |
1496 | 1496 | "netif_rx_schedule_prep failed"); |
drivers/net/spider_net.c
... | ... | @@ -1277,7 +1277,6 @@ |
1277 | 1277 | static int spider_net_poll(struct napi_struct *napi, int budget) |
1278 | 1278 | { |
1279 | 1279 | struct spider_net_card *card = container_of(napi, struct spider_net_card, napi); |
1280 | - struct net_device *netdev = card->netdev; | |
1281 | 1280 | int packets_done = 0; |
1282 | 1281 | |
1283 | 1282 | while (packets_done < budget) { |
drivers/net/tun.c
drivers/net/usb/hso.c
... | ... | @@ -2831,7 +2831,7 @@ |
2831 | 2831 | for (i = 0; i < iface->desc.bNumEndpoints; i++) { |
2832 | 2832 | endp = &iface->endpoint[i].desc; |
2833 | 2833 | if (((endp->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == dir) && |
2834 | - ((endp->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == type)) | |
2834 | + (usb_endpoint_type(endp) == type)) | |
2835 | 2835 | return endp; |
2836 | 2836 | } |
2837 | 2837 |
drivers/net/wan/ixp4xx_hss.c
... | ... | @@ -654,7 +654,7 @@ |
654 | 654 | netif_rx_complete(dev, napi); |
655 | 655 | qmgr_enable_irq(rxq); |
656 | 656 | if (!qmgr_stat_empty(rxq) && |
657 | - netif_rx_reschedule(dev, napi)) { | |
657 | + netif_rx_reschedule(napi)) { | |
658 | 658 | #if DEBUG_RX |
659 | 659 | printk(KERN_DEBUG "%s: hss_hdlc_poll" |
660 | 660 | " netif_rx_reschedule succeeded\n", |
drivers/net/wireless/zd1211rw/zd_usb.c
... | ... | @@ -1065,8 +1065,7 @@ |
1065 | 1065 | /* Find bulk out endpoint */ |
1066 | 1066 | endpoint = &iface_desc->endpoint[1].desc; |
1067 | 1067 | if ((endpoint->bEndpointAddress & USB_TYPE_MASK) == USB_DIR_OUT && |
1068 | - (endpoint->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == | |
1069 | - USB_ENDPOINT_XFER_BULK) { | |
1068 | + usb_endpoint_xfer_bulk(endpoint)) { | |
1070 | 1069 | bulk_out_ep = endpoint->bEndpointAddress; |
1071 | 1070 | } else { |
1072 | 1071 | dev_err(&udev->dev, |
net/core/dev.c
... | ... | @@ -5066,13 +5066,14 @@ |
5066 | 5066 | |
5067 | 5067 | static void __net_exit default_device_exit(struct net *net) |
5068 | 5068 | { |
5069 | - struct net_device *dev, *next; | |
5069 | + struct net_device *dev; | |
5070 | 5070 | /* |
5071 | 5071 | * Push all migratable of the network devices back to the |
5072 | 5072 | * initial network namespace |
5073 | 5073 | */ |
5074 | 5074 | rtnl_lock(); |
5075 | - for_each_netdev_safe(net, dev, next) { | |
5075 | +restart: | |
5076 | + for_each_netdev(net, dev) { | |
5076 | 5077 | int err; |
5077 | 5078 | char fb_name[IFNAMSIZ]; |
5078 | 5079 | |
... | ... | @@ -5083,7 +5084,7 @@ |
5083 | 5084 | /* Delete virtual devices */ |
5084 | 5085 | if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) { |
5085 | 5086 | dev->rtnl_link_ops->dellink(dev); |
5086 | - continue; | |
5087 | + goto restart; | |
5087 | 5088 | } |
5088 | 5089 | |
5089 | 5090 | /* Push remaing network devices to init_net */ |
... | ... | @@ -5094,6 +5095,7 @@ |
5094 | 5095 | __func__, dev->name, err); |
5095 | 5096 | BUG(); |
5096 | 5097 | } |
5098 | + goto restart; | |
5097 | 5099 | } |
5098 | 5100 | rtnl_unlock(); |
5099 | 5101 | } |
net/core/neighbour.c
... | ... | @@ -2414,7 +2414,7 @@ |
2414 | 2414 | if (*pos == 0) |
2415 | 2415 | return SEQ_START_TOKEN; |
2416 | 2416 | |
2417 | - for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) { | |
2417 | + for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { | |
2418 | 2418 | if (!cpu_possible(cpu)) |
2419 | 2419 | continue; |
2420 | 2420 | *pos = cpu+1; |
... | ... | @@ -2429,7 +2429,7 @@ |
2429 | 2429 | struct neigh_table *tbl = pde->data; |
2430 | 2430 | int cpu; |
2431 | 2431 | |
2432 | - for (cpu = *pos; cpu < NR_CPUS; ++cpu) { | |
2432 | + for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { | |
2433 | 2433 | if (!cpu_possible(cpu)) |
2434 | 2434 | continue; |
2435 | 2435 | *pos = cpu+1; |
net/dccp/proto.c
... | ... | @@ -964,7 +964,6 @@ |
964 | 964 | state = sk->sk_state; |
965 | 965 | sock_hold(sk); |
966 | 966 | sock_orphan(sk); |
967 | - percpu_counter_inc(sk->sk_prot->orphan_count); | |
968 | 967 | |
969 | 968 | /* |
970 | 969 | * It is the last release_sock in its life. It will remove backlog. |
... | ... | @@ -977,6 +976,8 @@ |
977 | 976 | local_bh_disable(); |
978 | 977 | bh_lock_sock(sk); |
979 | 978 | WARN_ON(sock_owned_by_user(sk)); |
979 | + | |
980 | + percpu_counter_inc(sk->sk_prot->orphan_count); | |
980 | 981 | |
981 | 982 | /* Have we already been destroyed by a softirq or backlog? */ |
982 | 983 | if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED) |
net/ipv4/inet_connection_sock.c
... | ... | @@ -633,8 +633,6 @@ |
633 | 633 | |
634 | 634 | acc_req = req->dl_next; |
635 | 635 | |
636 | - percpu_counter_inc(sk->sk_prot->orphan_count); | |
637 | - | |
638 | 636 | local_bh_disable(); |
639 | 637 | bh_lock_sock(child); |
640 | 638 | WARN_ON(sock_owned_by_user(child)); |
... | ... | @@ -643,6 +641,8 @@ |
643 | 641 | sk->sk_prot->disconnect(child, O_NONBLOCK); |
644 | 642 | |
645 | 643 | sock_orphan(child); |
644 | + | |
645 | + percpu_counter_inc(sk->sk_prot->orphan_count); | |
646 | 646 | |
647 | 647 | inet_csk_destroy_sock(child); |
648 | 648 |
net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
... | ... | @@ -291,7 +291,7 @@ |
291 | 291 | if (*pos == 0) |
292 | 292 | return SEQ_START_TOKEN; |
293 | 293 | |
294 | - for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) { | |
294 | + for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { | |
295 | 295 | if (!cpu_possible(cpu)) |
296 | 296 | continue; |
297 | 297 | *pos = cpu+1; |
... | ... | @@ -306,7 +306,7 @@ |
306 | 306 | struct net *net = seq_file_net(seq); |
307 | 307 | int cpu; |
308 | 308 | |
309 | - for (cpu = *pos; cpu < NR_CPUS; ++cpu) { | |
309 | + for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { | |
310 | 310 | if (!cpu_possible(cpu)) |
311 | 311 | continue; |
312 | 312 | *pos = cpu+1; |
net/ipv4/proc.c
... | ... | @@ -38,6 +38,7 @@ |
38 | 38 | #include <net/tcp.h> |
39 | 39 | #include <net/udp.h> |
40 | 40 | #include <net/udplite.h> |
41 | +#include <linux/bottom_half.h> | |
41 | 42 | #include <linux/inetdevice.h> |
42 | 43 | #include <linux/proc_fs.h> |
43 | 44 | #include <linux/seq_file.h> |
44 | 45 | |
45 | 46 | |
... | ... | @@ -50,13 +51,17 @@ |
50 | 51 | static int sockstat_seq_show(struct seq_file *seq, void *v) |
51 | 52 | { |
52 | 53 | struct net *net = seq->private; |
54 | + int orphans, sockets; | |
53 | 55 | |
56 | + local_bh_disable(); | |
57 | + orphans = percpu_counter_sum_positive(&tcp_orphan_count), | |
58 | + sockets = percpu_counter_sum_positive(&tcp_sockets_allocated), | |
59 | + local_bh_enable(); | |
60 | + | |
54 | 61 | socket_seq_show(seq); |
55 | 62 | seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %d\n", |
56 | - sock_prot_inuse_get(net, &tcp_prot), | |
57 | - (int)percpu_counter_sum_positive(&tcp_orphan_count), | |
58 | - tcp_death_row.tw_count, | |
59 | - (int)percpu_counter_sum_positive(&tcp_sockets_allocated), | |
63 | + sock_prot_inuse_get(net, &tcp_prot), orphans, | |
64 | + tcp_death_row.tw_count, sockets, | |
60 | 65 | atomic_read(&tcp_memory_allocated)); |
61 | 66 | seq_printf(seq, "UDP: inuse %d mem %d\n", |
62 | 67 | sock_prot_inuse_get(net, &udp_prot), |
net/ipv4/route.c
... | ... | @@ -429,7 +429,7 @@ |
429 | 429 | if (*pos == 0) |
430 | 430 | return SEQ_START_TOKEN; |
431 | 431 | |
432 | - for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) { | |
432 | + for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { | |
433 | 433 | if (!cpu_possible(cpu)) |
434 | 434 | continue; |
435 | 435 | *pos = cpu+1; |
... | ... | @@ -442,7 +442,7 @@ |
442 | 442 | { |
443 | 443 | int cpu; |
444 | 444 | |
445 | - for (cpu = *pos; cpu < NR_CPUS; ++cpu) { | |
445 | + for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { | |
446 | 446 | if (!cpu_possible(cpu)) |
447 | 447 | continue; |
448 | 448 | *pos = cpu+1; |
net/ipv4/tcp.c
... | ... | @@ -1836,7 +1836,6 @@ |
1836 | 1836 | state = sk->sk_state; |
1837 | 1837 | sock_hold(sk); |
1838 | 1838 | sock_orphan(sk); |
1839 | - percpu_counter_inc(sk->sk_prot->orphan_count); | |
1840 | 1839 | |
1841 | 1840 | /* It is the last release_sock in its life. It will remove backlog. */ |
1842 | 1841 | release_sock(sk); |
... | ... | @@ -1848,6 +1847,8 @@ |
1848 | 1847 | local_bh_disable(); |
1849 | 1848 | bh_lock_sock(sk); |
1850 | 1849 | WARN_ON(sock_owned_by_user(sk)); |
1850 | + | |
1851 | + percpu_counter_inc(sk->sk_prot->orphan_count); | |
1851 | 1852 | |
1852 | 1853 | /* Have we already been destroyed by a softirq or backlog? */ |
1853 | 1854 | if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) |
net/ipv4/tcp_ipv4.c
... | ... | @@ -51,6 +51,7 @@ |
51 | 51 | */ |
52 | 52 | |
53 | 53 | |
54 | +#include <linux/bottom_half.h> | |
54 | 55 | #include <linux/types.h> |
55 | 56 | #include <linux/fcntl.h> |
56 | 57 | #include <linux/module.h> |
57 | 58 | |
... | ... | @@ -1797,7 +1798,9 @@ |
1797 | 1798 | sk->sk_sndbuf = sysctl_tcp_wmem[1]; |
1798 | 1799 | sk->sk_rcvbuf = sysctl_tcp_rmem[1]; |
1799 | 1800 | |
1801 | + local_bh_disable(); | |
1800 | 1802 | percpu_counter_inc(&tcp_sockets_allocated); |
1803 | + local_bh_enable(); | |
1801 | 1804 | |
1802 | 1805 | return 0; |
1803 | 1806 | } |
net/ipv6/tcp_ipv6.c
... | ... | @@ -23,6 +23,7 @@ |
23 | 23 | * 2 of the License, or (at your option) any later version. |
24 | 24 | */ |
25 | 25 | |
26 | +#include <linux/bottom_half.h> | |
26 | 27 | #include <linux/module.h> |
27 | 28 | #include <linux/errno.h> |
28 | 29 | #include <linux/types.h> |
29 | 30 | |
... | ... | @@ -1830,7 +1831,9 @@ |
1830 | 1831 | sk->sk_sndbuf = sysctl_tcp_wmem[1]; |
1831 | 1832 | sk->sk_rcvbuf = sysctl_tcp_rmem[1]; |
1832 | 1833 | |
1834 | + local_bh_disable(); | |
1833 | 1835 | percpu_counter_inc(&tcp_sockets_allocated); |
1836 | + local_bh_enable(); | |
1834 | 1837 | |
1835 | 1838 | return 0; |
1836 | 1839 | } |
net/netfilter/ipvs/ip_vs_lblc.c
... | ... | @@ -507,7 +507,7 @@ |
507 | 507 | /* No cache entry or it is invalid, time to schedule */ |
508 | 508 | dest = __ip_vs_lblc_schedule(svc); |
509 | 509 | if (!dest) { |
510 | - IP_VS_DBG(1, "no destination available\n"); | |
510 | + IP_VS_ERR_RL("LBLC: no destination available\n"); | |
511 | 511 | return NULL; |
512 | 512 | } |
513 | 513 |
net/netfilter/ipvs/ip_vs_lblcr.c
... | ... | @@ -690,7 +690,7 @@ |
690 | 690 | /* The cache entry is invalid, time to schedule */ |
691 | 691 | dest = __ip_vs_lblcr_schedule(svc); |
692 | 692 | if (!dest) { |
693 | - IP_VS_DBG(1, "no destination available\n"); | |
693 | + IP_VS_ERR_RL("LBLCR: no destination available\n"); | |
694 | 694 | read_unlock(&svc->sched_lock); |
695 | 695 | return NULL; |
696 | 696 | } |
net/netfilter/ipvs/ip_vs_lc.c
... | ... | @@ -66,11 +66,15 @@ |
66 | 66 | } |
67 | 67 | } |
68 | 68 | |
69 | - if (least) | |
70 | - IP_VS_DBG_BUF(6, "LC: server %s:%u activeconns %d inactconns %d\n", | |
71 | - IP_VS_DBG_ADDR(svc->af, &least->addr), ntohs(least->port), | |
72 | - atomic_read(&least->activeconns), | |
73 | - atomic_read(&least->inactconns)); | |
69 | + if (!least) | |
70 | + IP_VS_ERR_RL("LC: no destination available\n"); | |
71 | + else | |
72 | + IP_VS_DBG_BUF(6, "LC: server %s:%u activeconns %d " | |
73 | + "inactconns %d\n", | |
74 | + IP_VS_DBG_ADDR(svc->af, &least->addr), | |
75 | + ntohs(least->port), | |
76 | + atomic_read(&least->activeconns), | |
77 | + atomic_read(&least->inactconns)); | |
74 | 78 | |
75 | 79 | return least; |
76 | 80 | } |
net/netfilter/ipvs/ip_vs_nq.c
net/netfilter/ipvs/ip_vs_rr.c
net/netfilter/ipvs/ip_vs_sed.c
net/netfilter/ipvs/ip_vs_sh.c
net/netfilter/ipvs/ip_vs_wlc.c
net/netfilter/ipvs/ip_vs_wrr.c
... | ... | @@ -155,6 +155,8 @@ |
155 | 155 | |
156 | 156 | if (mark->cl == mark->cl->next) { |
157 | 157 | /* no dest entry */ |
158 | + IP_VS_ERR_RL("WRR: no destination available: " | |
159 | + "no destinations present\n"); | |
158 | 160 | dest = NULL; |
159 | 161 | goto out; |
160 | 162 | } |
... | ... | @@ -168,8 +170,8 @@ |
168 | 170 | */ |
169 | 171 | if (mark->cw == 0) { |
170 | 172 | mark->cl = &svc->destinations; |
171 | - IP_VS_ERR_RL("ip_vs_wrr_schedule(): " | |
172 | - "no available servers\n"); | |
173 | + IP_VS_ERR_RL("WRR: no destination " | |
174 | + "available\n"); | |
173 | 175 | dest = NULL; |
174 | 176 | goto out; |
175 | 177 | } |
... | ... | @@ -191,6 +193,8 @@ |
191 | 193 | /* back to the start, and no dest is found. |
192 | 194 | It is only possible when all dests are OVERLOADED */ |
193 | 195 | dest = NULL; |
196 | + IP_VS_ERR_RL("WRR: no destination available: " | |
197 | + "all destinations are overloaded\n"); | |
194 | 198 | goto out; |
195 | 199 | } |
196 | 200 | } |
net/netfilter/nf_conntrack_standalone.c
... | ... | @@ -200,7 +200,7 @@ |
200 | 200 | if (*pos == 0) |
201 | 201 | return SEQ_START_TOKEN; |
202 | 202 | |
203 | - for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) { | |
203 | + for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { | |
204 | 204 | if (!cpu_possible(cpu)) |
205 | 205 | continue; |
206 | 206 | *pos = cpu + 1; |
... | ... | @@ -215,7 +215,7 @@ |
215 | 215 | struct net *net = seq_file_net(seq); |
216 | 216 | int cpu; |
217 | 217 | |
218 | - for (cpu = *pos; cpu < NR_CPUS; ++cpu) { | |
218 | + for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { | |
219 | 219 | if (!cpu_possible(cpu)) |
220 | 220 | continue; |
221 | 221 | *pos = cpu + 1; |
net/sched/Kconfig
... | ... | @@ -335,9 +335,6 @@ |
335 | 335 | Say Y here if you want to classify packets based on the control |
336 | 336 | cgroup of their process. |
337 | 337 | |
338 | - To compile this code as a module, choose M here: the | |
339 | - module will be called cls_cgroup. | |
340 | - | |
341 | 338 | config NET_EMATCH |
342 | 339 | bool "Extended Matches" |
343 | 340 | select NET_CLS |
net/sched/cls_cgroup.c
... | ... | @@ -24,12 +24,18 @@ |
24 | 24 | u32 classid; |
25 | 25 | }; |
26 | 26 | |
27 | -static inline struct cgroup_cls_state *net_cls_state(struct cgroup *cgrp) | |
27 | +static inline struct cgroup_cls_state *cgrp_cls_state(struct cgroup *cgrp) | |
28 | 28 | { |
29 | - return (struct cgroup_cls_state *) | |
30 | - cgroup_subsys_state(cgrp, net_cls_subsys_id); | |
29 | + return container_of(cgroup_subsys_state(cgrp, net_cls_subsys_id), | |
30 | + struct cgroup_cls_state, css); | |
31 | 31 | } |
32 | 32 | |
33 | +static inline struct cgroup_cls_state *task_cls_state(struct task_struct *p) | |
34 | +{ | |
35 | + return container_of(task_subsys_state(p, net_cls_subsys_id), | |
36 | + struct cgroup_cls_state, css); | |
37 | +} | |
38 | + | |
33 | 39 | static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss, |
34 | 40 | struct cgroup *cgrp) |
35 | 41 | { |
36 | 42 | |
37 | 43 | |
... | ... | @@ -39,19 +45,19 @@ |
39 | 45 | return ERR_PTR(-ENOMEM); |
40 | 46 | |
41 | 47 | if (cgrp->parent) |
42 | - cs->classid = net_cls_state(cgrp->parent)->classid; | |
48 | + cs->classid = cgrp_cls_state(cgrp->parent)->classid; | |
43 | 49 | |
44 | 50 | return &cs->css; |
45 | 51 | } |
46 | 52 | |
47 | 53 | static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) |
48 | 54 | { |
49 | - kfree(ss); | |
55 | + kfree(cgrp_cls_state(cgrp)); | |
50 | 56 | } |
51 | 57 | |
52 | 58 | static u64 read_classid(struct cgroup *cgrp, struct cftype *cft) |
53 | 59 | { |
54 | - return net_cls_state(cgrp)->classid; | |
60 | + return cgrp_cls_state(cgrp)->classid; | |
55 | 61 | } |
56 | 62 | |
57 | 63 | static int write_classid(struct cgroup *cgrp, struct cftype *cft, u64 value) |
... | ... | @@ -59,7 +65,7 @@ |
59 | 65 | if (!cgroup_lock_live_group(cgrp)) |
60 | 66 | return -ENODEV; |
61 | 67 | |
62 | - net_cls_state(cgrp)->classid = (u32) value; | |
68 | + cgrp_cls_state(cgrp)->classid = (u32) value; | |
63 | 69 | |
64 | 70 | cgroup_unlock(); |
65 | 71 | |
... | ... | @@ -115,8 +121,7 @@ |
115 | 121 | return -1; |
116 | 122 | |
117 | 123 | rcu_read_lock(); |
118 | - cs = (struct cgroup_cls_state *) task_subsys_state(current, | |
119 | - net_cls_subsys_id); | |
124 | + cs = task_cls_state(current); | |
120 | 125 | if (cs->classid && tcf_em_tree_match(skb, &head->ematches, NULL)) { |
121 | 126 | res->classid = cs->classid; |
122 | 127 | res->class = 0; |
net/xfrm/xfrm_proc.c
... | ... | @@ -44,27 +44,14 @@ |
44 | 44 | SNMP_MIB_SENTINEL |
45 | 45 | }; |
46 | 46 | |
47 | -static unsigned long | |
48 | -fold_field(void *mib[], int offt) | |
49 | -{ | |
50 | - unsigned long res = 0; | |
51 | - int i; | |
52 | - | |
53 | - for_each_possible_cpu(i) { | |
54 | - res += *(((unsigned long *)per_cpu_ptr(mib[0], i)) + offt); | |
55 | - res += *(((unsigned long *)per_cpu_ptr(mib[1], i)) + offt); | |
56 | - } | |
57 | - return res; | |
58 | -} | |
59 | - | |
60 | 47 | static int xfrm_statistics_seq_show(struct seq_file *seq, void *v) |
61 | 48 | { |
62 | 49 | struct net *net = seq->private; |
63 | 50 | int i; |
64 | 51 | for (i=0; xfrm_mib_list[i].name; i++) |
65 | 52 | seq_printf(seq, "%-24s\t%lu\n", xfrm_mib_list[i].name, |
66 | - fold_field((void **)net->mib.xfrm_statistics, | |
67 | - xfrm_mib_list[i].entry)); | |
53 | + snmp_fold_field((void **)net->mib.xfrm_statistics, | |
54 | + xfrm_mib_list[i].entry)); | |
68 | 55 | return 0; |
69 | 56 | } |
70 | 57 |