Commit 232c56408861e666d2546960d1180eb2c65260bd
Committed by
Jeff Garzik
1 parent
19af35546d
Exists in
master
and in
7 other branches
pcnet32: use NET_IP_ALIGN instead of 2
Change hard coded 2 to NET_IP_ALIGN. Added new #define with comments. Tested amd_64 Signed-off-by: Don Fry <pcnet32@verizon.net> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Showing 1 changed file with 24 additions and 20 deletions Side-by-side Diff
drivers/net/pcnet32.c
... | ... | @@ -174,7 +174,11 @@ |
174 | 174 | #define RX_RING_SIZE (1 << (PCNET32_LOG_RX_BUFFERS)) |
175 | 175 | #define RX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_RX_BUFFERS)) |
176 | 176 | |
177 | -#define PKT_BUF_SZ 1544 | |
177 | +#define PKT_BUF_SKB 1544 | |
178 | +/* actual buffer length after being aligned */ | |
179 | +#define PKT_BUF_SIZE (PKT_BUF_SKB - NET_IP_ALIGN) | |
180 | +/* chip wants twos complement of the (aligned) buffer length */ | |
181 | +#define NEG_BUF_SIZE (NET_IP_ALIGN - PKT_BUF_SKB) | |
178 | 182 | |
179 | 183 | /* Offsets from base I/O address. */ |
180 | 184 | #define PCNET32_WIO_RDP 0x10 |
... | ... | @@ -604,7 +608,7 @@ |
604 | 608 | /* now allocate any new buffers needed */ |
605 | 609 | for (; new < size; new++ ) { |
606 | 610 | struct sk_buff *rx_skbuff; |
607 | - new_skb_list[new] = dev_alloc_skb(PKT_BUF_SZ); | |
611 | + new_skb_list[new] = dev_alloc_skb(PKT_BUF_SKB); | |
608 | 612 | if (!(rx_skbuff = new_skb_list[new])) { |
609 | 613 | /* keep the original lists and buffers */ |
610 | 614 | if (netif_msg_drv(lp)) |
611 | 615 | |
612 | 616 | |
613 | 617 | |
... | ... | @@ -613,20 +617,20 @@ |
613 | 617 | dev->name); |
614 | 618 | goto free_all_new; |
615 | 619 | } |
616 | - skb_reserve(rx_skbuff, 2); | |
620 | + skb_reserve(rx_skbuff, NET_IP_ALIGN); | |
617 | 621 | |
618 | 622 | new_dma_addr_list[new] = |
619 | 623 | pci_map_single(lp->pci_dev, rx_skbuff->data, |
620 | - PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); | |
624 | + PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); | |
621 | 625 | new_rx_ring[new].base = cpu_to_le32(new_dma_addr_list[new]); |
622 | - new_rx_ring[new].buf_length = cpu_to_le16(2 - PKT_BUF_SZ); | |
626 | + new_rx_ring[new].buf_length = cpu_to_le16(NEG_BUF_SIZE); | |
623 | 627 | new_rx_ring[new].status = cpu_to_le16(0x8000); |
624 | 628 | } |
625 | 629 | /* and free any unneeded buffers */ |
626 | 630 | for (; new < lp->rx_ring_size; new++) { |
627 | 631 | if (lp->rx_skbuff[new]) { |
628 | 632 | pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[new], |
629 | - PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); | |
633 | + PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); | |
630 | 634 | dev_kfree_skb(lp->rx_skbuff[new]); |
631 | 635 | } |
632 | 636 | } |
... | ... | @@ -651,7 +655,7 @@ |
651 | 655 | for (; --new >= lp->rx_ring_size; ) { |
652 | 656 | if (new_skb_list[new]) { |
653 | 657 | pci_unmap_single(lp->pci_dev, new_dma_addr_list[new], |
654 | - PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); | |
658 | + PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); | |
655 | 659 | dev_kfree_skb(new_skb_list[new]); |
656 | 660 | } |
657 | 661 | } |
... | ... | @@ -678,7 +682,7 @@ |
678 | 682 | wmb(); /* Make sure adapter sees owner change */ |
679 | 683 | if (lp->rx_skbuff[i]) { |
680 | 684 | pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], |
681 | - PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); | |
685 | + PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); | |
682 | 686 | dev_kfree_skb_any(lp->rx_skbuff[i]); |
683 | 687 | } |
684 | 688 | lp->rx_skbuff[i] = NULL; |
... | ... | @@ -1201,7 +1205,7 @@ |
1201 | 1205 | pkt_len = (le32_to_cpu(rxp->msg_length) & 0xfff) - 4; |
1202 | 1206 | |
1203 | 1207 | /* Discard oversize frames. */ |
1204 | - if (unlikely(pkt_len > PKT_BUF_SZ - 2)) { | |
1208 | + if (unlikely(pkt_len > PKT_BUF_SIZE)) { | |
1205 | 1209 | if (netif_msg_drv(lp)) |
1206 | 1210 | printk(KERN_ERR "%s: Impossible packet size %d!\n", |
1207 | 1211 | dev->name, pkt_len); |
1208 | 1212 | |
1209 | 1213 | |
1210 | 1214 | |
... | ... | @@ -1218,26 +1222,26 @@ |
1218 | 1222 | if (pkt_len > rx_copybreak) { |
1219 | 1223 | struct sk_buff *newskb; |
1220 | 1224 | |
1221 | - if ((newskb = dev_alloc_skb(PKT_BUF_SZ))) { | |
1222 | - skb_reserve(newskb, 2); | |
1225 | + if ((newskb = dev_alloc_skb(PKT_BUF_SKB))) { | |
1226 | + skb_reserve(newskb, NET_IP_ALIGN); | |
1223 | 1227 | skb = lp->rx_skbuff[entry]; |
1224 | 1228 | pci_unmap_single(lp->pci_dev, |
1225 | 1229 | lp->rx_dma_addr[entry], |
1226 | - PKT_BUF_SZ - 2, | |
1230 | + PKT_BUF_SIZE, | |
1227 | 1231 | PCI_DMA_FROMDEVICE); |
1228 | 1232 | skb_put(skb, pkt_len); |
1229 | 1233 | lp->rx_skbuff[entry] = newskb; |
1230 | 1234 | lp->rx_dma_addr[entry] = |
1231 | 1235 | pci_map_single(lp->pci_dev, |
1232 | 1236 | newskb->data, |
1233 | - PKT_BUF_SZ - 2, | |
1237 | + PKT_BUF_SIZE, | |
1234 | 1238 | PCI_DMA_FROMDEVICE); |
1235 | 1239 | rxp->base = cpu_to_le32(lp->rx_dma_addr[entry]); |
1236 | 1240 | rx_in_place = 1; |
1237 | 1241 | } else |
1238 | 1242 | skb = NULL; |
1239 | 1243 | } else { |
1240 | - skb = dev_alloc_skb(pkt_len + 2); | |
1244 | + skb = dev_alloc_skb(pkt_len + NET_IP_ALIGN); | |
1241 | 1245 | } |
1242 | 1246 | |
1243 | 1247 | if (skb == NULL) { |
... | ... | @@ -1250,7 +1254,7 @@ |
1250 | 1254 | } |
1251 | 1255 | skb->dev = dev; |
1252 | 1256 | if (!rx_in_place) { |
1253 | - skb_reserve(skb, 2); /* 16 byte align */ | |
1257 | + skb_reserve(skb, NET_IP_ALIGN); | |
1254 | 1258 | skb_put(skb, pkt_len); /* Make room */ |
1255 | 1259 | pci_dma_sync_single_for_cpu(lp->pci_dev, |
1256 | 1260 | lp->rx_dma_addr[entry], |
... | ... | @@ -1291,7 +1295,7 @@ |
1291 | 1295 | * The docs say that the buffer length isn't touched, but Andrew |
1292 | 1296 | * Boyd of QNX reports that some revs of the 79C965 clear it. |
1293 | 1297 | */ |
1294 | - rxp->buf_length = cpu_to_le16(2 - PKT_BUF_SZ); | |
1298 | + rxp->buf_length = cpu_to_le16(NEG_BUF_SIZE); | |
1295 | 1299 | wmb(); /* Make sure owner changes after others are visible */ |
1296 | 1300 | rxp->status = cpu_to_le16(0x8000); |
1297 | 1301 | entry = (++lp->cur_rx) & lp->rx_mod_mask; |
... | ... | @@ -2396,7 +2400,7 @@ |
2396 | 2400 | if (rx_skbuff == NULL) { |
2397 | 2401 | if (! |
2398 | 2402 | (rx_skbuff = lp->rx_skbuff[i] = |
2399 | - dev_alloc_skb(PKT_BUF_SZ))) { | |
2403 | + dev_alloc_skb(PKT_BUF_SKB))) { | |
2400 | 2404 | /* there is not much, we can do at this point */ |
2401 | 2405 | if (netif_msg_drv(lp)) |
2402 | 2406 | printk(KERN_ERR |
2403 | 2407 | |
2404 | 2408 | |
... | ... | @@ -2404,16 +2408,16 @@ |
2404 | 2408 | dev->name); |
2405 | 2409 | return -1; |
2406 | 2410 | } |
2407 | - skb_reserve(rx_skbuff, 2); | |
2411 | + skb_reserve(rx_skbuff, NET_IP_ALIGN); | |
2408 | 2412 | } |
2409 | 2413 | |
2410 | 2414 | rmb(); |
2411 | 2415 | if (lp->rx_dma_addr[i] == 0) |
2412 | 2416 | lp->rx_dma_addr[i] = |
2413 | 2417 | pci_map_single(lp->pci_dev, rx_skbuff->data, |
2414 | - PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); | |
2418 | + PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); | |
2415 | 2419 | lp->rx_ring[i].base = cpu_to_le32(lp->rx_dma_addr[i]); |
2416 | - lp->rx_ring[i].buf_length = cpu_to_le16(2 - PKT_BUF_SZ); | |
2420 | + lp->rx_ring[i].buf_length = cpu_to_le16(NEG_BUF_SIZE); | |
2417 | 2421 | wmb(); /* Make sure owner changes after all others are visible */ |
2418 | 2422 | lp->rx_ring[i].status = cpu_to_le16(0x8000); |
2419 | 2423 | } |