Commit e0a5c57848f7690a247bb8af4fa412844b0b00bb

Authored by Linus Torvalds

Merge branch 'upstream-fixes' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6

Showing 2 changed files Side-by-side Diff

drivers/net/e1000/e1000.h
... ... @@ -225,9 +225,6 @@
225 225 struct e1000_ps_page *ps_page;
226 226 struct e1000_ps_page_dma *ps_page_dma;
227 227  
228   - struct sk_buff *rx_skb_top;
229   - struct sk_buff *rx_skb_prev;
230   -
231 228 /* cpu for rx queue */
232 229 int cpu;
233 230  
drivers/net/e1000/e1000_main.c
... ... @@ -103,7 +103,7 @@
103 103 #else
104 104 #define DRIVERNAPI "-NAPI"
105 105 #endif
106   -#define DRV_VERSION "6.3.9-k2"DRIVERNAPI
  106 +#define DRV_VERSION "6.3.9-k4"DRIVERNAPI
107 107 char e1000_driver_version[] = DRV_VERSION;
108 108 static char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
109 109  
... ... @@ -1635,8 +1635,6 @@
1635 1635  
1636 1636 rxdr->next_to_clean = 0;
1637 1637 rxdr->next_to_use = 0;
1638   - rxdr->rx_skb_top = NULL;
1639   - rxdr->rx_skb_prev = NULL;
1640 1638  
1641 1639 return 0;
1642 1640 }
... ... @@ -1713,8 +1711,23 @@
1713 1711 rctl |= adapter->rx_buffer_len << 0x11;
1714 1712 } else {
1715 1713 rctl &= ~E1000_RCTL_SZ_4096;
1716   - rctl &= ~E1000_RCTL_BSEX;
1717   - rctl |= E1000_RCTL_SZ_2048;
  1714 + rctl |= E1000_RCTL_BSEX;
  1715 + switch (adapter->rx_buffer_len) {
  1716 + case E1000_RXBUFFER_2048:
  1717 + default:
  1718 + rctl |= E1000_RCTL_SZ_2048;
  1719 + rctl &= ~E1000_RCTL_BSEX;
  1720 + break;
  1721 + case E1000_RXBUFFER_4096:
  1722 + rctl |= E1000_RCTL_SZ_4096;
  1723 + break;
  1724 + case E1000_RXBUFFER_8192:
  1725 + rctl |= E1000_RCTL_SZ_8192;
  1726 + break;
  1727 + case E1000_RXBUFFER_16384:
  1728 + rctl |= E1000_RCTL_SZ_16384;
  1729 + break;
  1730 + }
1718 1731 }
1719 1732  
1720 1733 #ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
... ... @@ -2107,16 +2120,6 @@
2107 2120 }
2108 2121 }
2109 2122  
2110   - /* there also may be some cached data in our adapter */
2111   - if (rx_ring->rx_skb_top) {
2112   - dev_kfree_skb(rx_ring->rx_skb_top);
2113   -
2114   - /* rx_skb_prev will be wiped out by rx_skb_top */
2115   - rx_ring->rx_skb_top = NULL;
2116   - rx_ring->rx_skb_prev = NULL;
2117   - }
2118   -
2119   -
2120 2123 size = sizeof(struct e1000_buffer) * rx_ring->count;
2121 2124 memset(rx_ring->buffer_info, 0, size);
2122 2125 size = sizeof(struct e1000_ps_page) * rx_ring->count;
2123 2126  
2124 2127  
2125 2128  
... ... @@ -3106,24 +3109,27 @@
3106 3109 break;
3107 3110 }
3108 3111  
3109   - /* since the driver code now supports splitting a packet across
3110   - * multiple descriptors, most of the fifo related limitations on
3111   - * jumbo frame traffic have gone away.
3112   - * simply use 2k descriptors for everything.
3113   - *
3114   - * NOTE: dev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3115   - * means we reserve 2 more, this pushes us to allocate from the next
3116   - * larger slab size
3117   - * i.e. RXBUFFER_2048 --> size-4096 slab */
3118 3112  
3119   - /* recent hardware supports 1KB granularity */
3120 3113 if (adapter->hw.mac_type > e1000_82547_rev_2) {
3121   - adapter->rx_buffer_len =
3122   - ((max_frame < E1000_RXBUFFER_2048) ?
3123   - max_frame : E1000_RXBUFFER_2048);
  3114 + adapter->rx_buffer_len = max_frame;
3124 3115 E1000_ROUNDUP(adapter->rx_buffer_len, 1024);
3125   - } else
3126   - adapter->rx_buffer_len = E1000_RXBUFFER_2048;
  3116 + } else {
  3117 + if(unlikely((adapter->hw.mac_type < e1000_82543) &&
  3118 + (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE))) {
  3119 + DPRINTK(PROBE, ERR, "Jumbo Frames not supported "
  3120 + "on 82542\n");
  3121 + return -EINVAL;
  3122 + } else {
  3123 + if(max_frame <= E1000_RXBUFFER_2048)
  3124 + adapter->rx_buffer_len = E1000_RXBUFFER_2048;
  3125 + else if(max_frame <= E1000_RXBUFFER_4096)
  3126 + adapter->rx_buffer_len = E1000_RXBUFFER_4096;
  3127 + else if(max_frame <= E1000_RXBUFFER_8192)
  3128 + adapter->rx_buffer_len = E1000_RXBUFFER_8192;
  3129 + else if(max_frame <= E1000_RXBUFFER_16384)
  3130 + adapter->rx_buffer_len = E1000_RXBUFFER_16384;
  3131 + }
  3132 + }
3127 3133  
3128 3134 netdev->mtu = new_mtu;
3129 3135  
... ... @@ -3620,7 +3626,7 @@
3620 3626 uint8_t last_byte;
3621 3627 unsigned int i;
3622 3628 int cleaned_count = 0;
3623   - boolean_t cleaned = FALSE, multi_descriptor = FALSE;
  3629 + boolean_t cleaned = FALSE;
3624 3630  
3625 3631 i = rx_ring->next_to_clean;
3626 3632 rx_desc = E1000_RX_DESC(*rx_ring, i);
3627 3633  
... ... @@ -3652,43 +3658,12 @@
3652 3658  
3653 3659 length = le16_to_cpu(rx_desc->length);
3654 3660  
3655   - skb_put(skb, length);
3656   -
3657   - if (!(status & E1000_RXD_STAT_EOP)) {
3658   - if (!rx_ring->rx_skb_top) {
3659   - rx_ring->rx_skb_top = skb;
3660   - rx_ring->rx_skb_top->len = length;
3661   - rx_ring->rx_skb_prev = skb;
3662   - } else {
3663   - if (skb_shinfo(rx_ring->rx_skb_top)->frag_list) {
3664   - rx_ring->rx_skb_prev->next = skb;
3665   - skb->prev = rx_ring->rx_skb_prev;
3666   - } else {
3667   - skb_shinfo(rx_ring->rx_skb_top)->frag_list = skb;
3668   - }
3669   - rx_ring->rx_skb_prev = skb;
3670   - rx_ring->rx_skb_top->data_len += length;
3671   - }
  3661 + if (unlikely(!(status & E1000_RXD_STAT_EOP))) {
  3662 + /* All receives must fit into a single buffer */
  3663 + E1000_DBG("%s: Receive packet consumed multiple"
  3664 + " buffers\n", netdev->name);
  3665 + dev_kfree_skb_irq(skb);
3672 3666 goto next_desc;
3673   - } else {
3674   - if (rx_ring->rx_skb_top) {
3675   - if (skb_shinfo(rx_ring->rx_skb_top)
3676   - ->frag_list) {
3677   - rx_ring->rx_skb_prev->next = skb;
3678   - skb->prev = rx_ring->rx_skb_prev;
3679   - } else
3680   - skb_shinfo(rx_ring->rx_skb_top)
3681   - ->frag_list = skb;
3682   -
3683   - rx_ring->rx_skb_top->data_len += length;
3684   - rx_ring->rx_skb_top->len +=
3685   - rx_ring->rx_skb_top->data_len;
3686   -
3687   - skb = rx_ring->rx_skb_top;
3688   - multi_descriptor = TRUE;
3689   - rx_ring->rx_skb_top = NULL;
3690   - rx_ring->rx_skb_prev = NULL;
3691   - }
3692 3667 }
3693 3668  
3694 3669 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
... ... @@ -3712,10 +3687,7 @@
3712 3687 * performance for small packets with large amounts
3713 3688 * of reassembly being done in the stack */
3714 3689 #define E1000_CB_LENGTH 256
3715   - if ((length < E1000_CB_LENGTH) &&
3716   - !rx_ring->rx_skb_top &&
3717   - /* or maybe (status & E1000_RXD_STAT_EOP) && */
3718   - !multi_descriptor) {
  3690 + if (length < E1000_CB_LENGTH) {
3719 3691 struct sk_buff *new_skb =
3720 3692 dev_alloc_skb(length + NET_IP_ALIGN);
3721 3693 if (new_skb) {
... ... @@ -3729,7 +3701,8 @@
3729 3701 skb = new_skb;
3730 3702 skb_put(skb, length);
3731 3703 }
3732   - }
  3704 + } else
  3705 + skb_put(skb, length);
3733 3706  
3734 3707 /* end copybreak code */
3735 3708