Commit 0fe9f15ee8bd652242a778ddfd30aa6d97a98e23
1 parent
031cf19e6f
Exists in
master
and in
7 other branches
via-velocity: separated struct allow wholesale copy during MTU changes.
It should help people fix the bugs in my code :o) Signed-off-by: Francois Romieu <romieu@fr.zoreil.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Showing 2 changed files with 114 additions and 109 deletions Side-by-side Diff
drivers/net/via-velocity.c
... | ... | @@ -677,16 +677,16 @@ |
677 | 677 | struct mac_regs __iomem * regs = vptr->mac_regs; |
678 | 678 | int i; |
679 | 679 | |
680 | - vptr->rd_dirty = vptr->rd_filled = vptr->rd_curr = 0; | |
680 | + vptr->rx.dirty = vptr->rx.filled = vptr->rx.curr = 0; | |
681 | 681 | |
682 | 682 | /* |
683 | 683 | * Init state, all RD entries belong to the NIC |
684 | 684 | */ |
685 | 685 | for (i = 0; i < vptr->options.numrx; ++i) |
686 | - vptr->rd_ring[i].rdesc0.len |= OWNED_BY_NIC; | |
686 | + vptr->rx.ring[i].rdesc0.len |= OWNED_BY_NIC; | |
687 | 687 | |
688 | 688 | writew(vptr->options.numrx, ®s->RBRDU); |
689 | - writel(vptr->rd_pool_dma, ®s->RDBaseLo); | |
689 | + writel(vptr->rx.pool_dma, ®s->RDBaseLo); | |
690 | 690 | writew(0, ®s->RDIdx); |
691 | 691 | writew(vptr->options.numrx - 1, ®s->RDCSize); |
692 | 692 | } |
693 | 693 | |
... | ... | @@ -779,15 +779,15 @@ |
779 | 779 | |
780 | 780 | vptr->int_mask = INT_MASK_DEF; |
781 | 781 | |
782 | - writel(vptr->rd_pool_dma, ®s->RDBaseLo); | |
782 | + writel(vptr->rx.pool_dma, ®s->RDBaseLo); | |
783 | 783 | writew(vptr->options.numrx - 1, ®s->RDCSize); |
784 | 784 | mac_rx_queue_run(regs); |
785 | 785 | mac_rx_queue_wake(regs); |
786 | 786 | |
787 | 787 | writew(vptr->options.numtx - 1, ®s->TDCSize); |
788 | 788 | |
789 | - for (i = 0; i < vptr->num_txq; i++) { | |
790 | - writel(vptr->td_pool_dma[i], ®s->TDBaseLo[i]); | |
789 | + for (i = 0; i < vptr->tx.numq; i++) { | |
790 | + writel(vptr->tx.pool_dma[i], ®s->TDBaseLo[i]); | |
791 | 791 | mac_tx_queue_run(regs, i); |
792 | 792 | } |
793 | 793 | |
... | ... | @@ -1047,7 +1047,7 @@ |
1047 | 1047 | |
1048 | 1048 | vptr->pdev = pdev; |
1049 | 1049 | vptr->chip_id = info->chip_id; |
1050 | - vptr->num_txq = info->txqueue; | |
1050 | + vptr->tx.numq = info->txqueue; | |
1051 | 1051 | vptr->multicast_limit = MCAM_SIZE; |
1052 | 1052 | spin_lock_init(&vptr->lock); |
1053 | 1053 | INIT_LIST_HEAD(&vptr->list); |
... | ... | @@ -1116,7 +1116,7 @@ |
1116 | 1116 | * pci_alloc_consistent() fulfills the requirement for 64 bytes |
1117 | 1117 | * alignment |
1118 | 1118 | */ |
1119 | - pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->num_txq + | |
1119 | + pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->tx.numq + | |
1120 | 1120 | rx_ring_size, &pool_dma); |
1121 | 1121 | if (!pool) { |
1122 | 1122 | dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n", |
1123 | 1123 | |
... | ... | @@ -1124,15 +1124,15 @@ |
1124 | 1124 | return -ENOMEM; |
1125 | 1125 | } |
1126 | 1126 | |
1127 | - vptr->rd_ring = pool; | |
1128 | - vptr->rd_pool_dma = pool_dma; | |
1127 | + vptr->rx.ring = pool; | |
1128 | + vptr->rx.pool_dma = pool_dma; | |
1129 | 1129 | |
1130 | 1130 | pool += rx_ring_size; |
1131 | 1131 | pool_dma += rx_ring_size; |
1132 | 1132 | |
1133 | - for (i = 0; i < vptr->num_txq; i++) { | |
1134 | - vptr->td_rings[i] = pool; | |
1135 | - vptr->td_pool_dma[i] = pool_dma; | |
1133 | + for (i = 0; i < vptr->tx.numq; i++) { | |
1134 | + vptr->tx.rings[i] = pool; | |
1135 | + vptr->tx.pool_dma[i] = pool_dma; | |
1136 | 1136 | pool += tx_ring_size; |
1137 | 1137 | pool_dma += tx_ring_size; |
1138 | 1138 | } |
1139 | 1139 | |
... | ... | @@ -1150,9 +1150,9 @@ |
1150 | 1150 | static void velocity_free_rings(struct velocity_info *vptr) |
1151 | 1151 | { |
1152 | 1152 | const int size = vptr->options.numrx * sizeof(struct rx_desc) + |
1153 | - vptr->options.numtx * sizeof(struct tx_desc) * vptr->num_txq; | |
1153 | + vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq; | |
1154 | 1154 | |
1155 | - pci_free_consistent(vptr->pdev, size, vptr->rd_ring, vptr->rd_pool_dma); | |
1155 | + pci_free_consistent(vptr->pdev, size, vptr->rx.ring, vptr->rx.pool_dma); | |
1156 | 1156 | } |
1157 | 1157 | |
1158 | 1158 | static void velocity_give_many_rx_descs(struct velocity_info *vptr) |
1159 | 1159 | |
1160 | 1160 | |
1161 | 1161 | |
1162 | 1162 | |
1163 | 1163 | |
1164 | 1164 | |
1165 | 1165 | |
1166 | 1166 | |
... | ... | @@ -1164,44 +1164,44 @@ |
1164 | 1164 | * RD number must be equal to 4X per hardware spec |
1165 | 1165 | * (programming guide rev 1.20, p.13) |
1166 | 1166 | */ |
1167 | - if (vptr->rd_filled < 4) | |
1167 | + if (vptr->rx.filled < 4) | |
1168 | 1168 | return; |
1169 | 1169 | |
1170 | 1170 | wmb(); |
1171 | 1171 | |
1172 | - unusable = vptr->rd_filled & 0x0003; | |
1173 | - dirty = vptr->rd_dirty - unusable; | |
1174 | - for (avail = vptr->rd_filled & 0xfffc; avail; avail--) { | |
1172 | + unusable = vptr->rx.filled & 0x0003; | |
1173 | + dirty = vptr->rx.dirty - unusable; | |
1174 | + for (avail = vptr->rx.filled & 0xfffc; avail; avail--) { | |
1175 | 1175 | dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1; |
1176 | - vptr->rd_ring[dirty].rdesc0.len |= OWNED_BY_NIC; | |
1176 | + vptr->rx.ring[dirty].rdesc0.len |= OWNED_BY_NIC; | |
1177 | 1177 | } |
1178 | 1178 | |
1179 | - writew(vptr->rd_filled & 0xfffc, ®s->RBRDU); | |
1180 | - vptr->rd_filled = unusable; | |
1179 | + writew(vptr->rx.filled & 0xfffc, ®s->RBRDU); | |
1180 | + vptr->rx.filled = unusable; | |
1181 | 1181 | } |
1182 | 1182 | |
1183 | 1183 | static int velocity_rx_refill(struct velocity_info *vptr) |
1184 | 1184 | { |
1185 | - int dirty = vptr->rd_dirty, done = 0; | |
1185 | + int dirty = vptr->rx.dirty, done = 0; | |
1186 | 1186 | |
1187 | 1187 | do { |
1188 | - struct rx_desc *rd = vptr->rd_ring + dirty; | |
1188 | + struct rx_desc *rd = vptr->rx.ring + dirty; | |
1189 | 1189 | |
1190 | 1190 | /* Fine for an all zero Rx desc at init time as well */ |
1191 | 1191 | if (rd->rdesc0.len & OWNED_BY_NIC) |
1192 | 1192 | break; |
1193 | 1193 | |
1194 | - if (!vptr->rd_info[dirty].skb) { | |
1194 | + if (!vptr->rx.info[dirty].skb) { | |
1195 | 1195 | if (velocity_alloc_rx_buf(vptr, dirty) < 0) |
1196 | 1196 | break; |
1197 | 1197 | } |
1198 | 1198 | done++; |
1199 | 1199 | dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0; |
1200 | - } while (dirty != vptr->rd_curr); | |
1200 | + } while (dirty != vptr->rx.curr); | |
1201 | 1201 | |
1202 | 1202 | if (done) { |
1203 | - vptr->rd_dirty = dirty; | |
1204 | - vptr->rd_filled += done; | |
1203 | + vptr->rx.dirty = dirty; | |
1204 | + vptr->rx.filled += done; | |
1205 | 1205 | } |
1206 | 1206 | |
1207 | 1207 | return done; |
... | ... | @@ -1209,7 +1209,7 @@ |
1209 | 1209 | |
1210 | 1210 | static void velocity_set_rxbufsize(struct velocity_info *vptr, int mtu) |
1211 | 1211 | { |
1212 | - vptr->rx_buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32; | |
1212 | + vptr->rx.buf_sz = (mtu <= ETH_DATA_LEN) ? PKT_BUF_SZ : mtu + 32; | |
1213 | 1213 | } |
1214 | 1214 | |
1215 | 1215 | /** |
1216 | 1216 | |
1217 | 1217 | |
... | ... | @@ -1224,12 +1224,12 @@ |
1224 | 1224 | { |
1225 | 1225 | int ret = -ENOMEM; |
1226 | 1226 | |
1227 | - vptr->rd_info = kcalloc(vptr->options.numrx, | |
1227 | + vptr->rx.info = kcalloc(vptr->options.numrx, | |
1228 | 1228 | sizeof(struct velocity_rd_info), GFP_KERNEL); |
1229 | - if (!vptr->rd_info) | |
1229 | + if (!vptr->rx.info) | |
1230 | 1230 | goto out; |
1231 | 1231 | |
1232 | - vptr->rd_filled = vptr->rd_dirty = vptr->rd_curr = 0; | |
1232 | + vptr->rx.filled = vptr->rx.dirty = vptr->rx.curr = 0; | |
1233 | 1233 | |
1234 | 1234 | if (velocity_rx_refill(vptr) != vptr->options.numrx) { |
1235 | 1235 | VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR |
1236 | 1236 | |
1237 | 1237 | |
... | ... | @@ -1255,18 +1255,18 @@ |
1255 | 1255 | { |
1256 | 1256 | int i; |
1257 | 1257 | |
1258 | - if (vptr->rd_info == NULL) | |
1258 | + if (vptr->rx.info == NULL) | |
1259 | 1259 | return; |
1260 | 1260 | |
1261 | 1261 | for (i = 0; i < vptr->options.numrx; i++) { |
1262 | - struct velocity_rd_info *rd_info = &(vptr->rd_info[i]); | |
1263 | - struct rx_desc *rd = vptr->rd_ring + i; | |
1262 | + struct velocity_rd_info *rd_info = &(vptr->rx.info[i]); | |
1263 | + struct rx_desc *rd = vptr->rx.ring + i; | |
1264 | 1264 | |
1265 | 1265 | memset(rd, 0, sizeof(*rd)); |
1266 | 1266 | |
1267 | 1267 | if (!rd_info->skb) |
1268 | 1268 | continue; |
1269 | - pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz, | |
1269 | + pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz, | |
1270 | 1270 | PCI_DMA_FROMDEVICE); |
1271 | 1271 | rd_info->skb_dma = (dma_addr_t) NULL; |
1272 | 1272 | |
... | ... | @@ -1274,8 +1274,8 @@ |
1274 | 1274 | rd_info->skb = NULL; |
1275 | 1275 | } |
1276 | 1276 | |
1277 | - kfree(vptr->rd_info); | |
1278 | - vptr->rd_info = NULL; | |
1277 | + kfree(vptr->rx.info); | |
1278 | + vptr->rx.info = NULL; | |
1279 | 1279 | } |
1280 | 1280 | |
1281 | 1281 | /** |
1282 | 1282 | |
1283 | 1283 | |
1284 | 1284 | |
1285 | 1285 | |
... | ... | @@ -1293,19 +1293,19 @@ |
1293 | 1293 | unsigned int j; |
1294 | 1294 | |
1295 | 1295 | /* Init the TD ring entries */ |
1296 | - for (j = 0; j < vptr->num_txq; j++) { | |
1297 | - curr = vptr->td_pool_dma[j]; | |
1296 | + for (j = 0; j < vptr->tx.numq; j++) { | |
1297 | + curr = vptr->tx.pool_dma[j]; | |
1298 | 1298 | |
1299 | - vptr->td_infos[j] = kcalloc(vptr->options.numtx, | |
1299 | + vptr->tx.infos[j] = kcalloc(vptr->options.numtx, | |
1300 | 1300 | sizeof(struct velocity_td_info), |
1301 | 1301 | GFP_KERNEL); |
1302 | - if (!vptr->td_infos[j]) { | |
1302 | + if (!vptr->tx.infos[j]) { | |
1303 | 1303 | while(--j >= 0) |
1304 | - kfree(vptr->td_infos[j]); | |
1304 | + kfree(vptr->tx.infos[j]); | |
1305 | 1305 | return -ENOMEM; |
1306 | 1306 | } |
1307 | 1307 | |
1308 | - vptr->td_tail[j] = vptr->td_curr[j] = vptr->td_used[j] = 0; | |
1308 | + vptr->tx.tail[j] = vptr->tx.curr[j] = vptr->tx.used[j] = 0; | |
1309 | 1309 | } |
1310 | 1310 | return 0; |
1311 | 1311 | } |
... | ... | @@ -1317,7 +1317,7 @@ |
1317 | 1317 | static void velocity_free_td_ring_entry(struct velocity_info *vptr, |
1318 | 1318 | int q, int n) |
1319 | 1319 | { |
1320 | - struct velocity_td_info * td_info = &(vptr->td_infos[q][n]); | |
1320 | + struct velocity_td_info * td_info = &(vptr->tx.infos[q][n]); | |
1321 | 1321 | int i; |
1322 | 1322 | |
1323 | 1323 | if (td_info == NULL) |
1324 | 1324 | |
... | ... | @@ -1349,15 +1349,15 @@ |
1349 | 1349 | { |
1350 | 1350 | int i, j; |
1351 | 1351 | |
1352 | - for (j = 0; j < vptr->num_txq; j++) { | |
1353 | - if (vptr->td_infos[j] == NULL) | |
1352 | + for (j = 0; j < vptr->tx.numq; j++) { | |
1353 | + if (vptr->tx.infos[j] == NULL) | |
1354 | 1354 | continue; |
1355 | 1355 | for (i = 0; i < vptr->options.numtx; i++) { |
1356 | 1356 | velocity_free_td_ring_entry(vptr, j, i); |
1357 | 1357 | |
1358 | 1358 | } |
1359 | - kfree(vptr->td_infos[j]); | |
1360 | - vptr->td_infos[j] = NULL; | |
1359 | + kfree(vptr->tx.infos[j]); | |
1360 | + vptr->tx.infos[j] = NULL; | |
1361 | 1361 | } |
1362 | 1362 | } |
1363 | 1363 | |
1364 | 1364 | |
1365 | 1365 | |
... | ... | @@ -1374,13 +1374,13 @@ |
1374 | 1374 | static int velocity_rx_srv(struct velocity_info *vptr, int status) |
1375 | 1375 | { |
1376 | 1376 | struct net_device_stats *stats = &vptr->stats; |
1377 | - int rd_curr = vptr->rd_curr; | |
1377 | + int rd_curr = vptr->rx.curr; | |
1378 | 1378 | int works = 0; |
1379 | 1379 | |
1380 | 1380 | do { |
1381 | - struct rx_desc *rd = vptr->rd_ring + rd_curr; | |
1381 | + struct rx_desc *rd = vptr->rx.ring + rd_curr; | |
1382 | 1382 | |
1383 | - if (!vptr->rd_info[rd_curr].skb) | |
1383 | + if (!vptr->rx.info[rd_curr].skb) | |
1384 | 1384 | break; |
1385 | 1385 | |
1386 | 1386 | if (rd->rdesc0.len & OWNED_BY_NIC) |
... | ... | @@ -1412,7 +1412,7 @@ |
1412 | 1412 | rd_curr = 0; |
1413 | 1413 | } while (++works <= 15); |
1414 | 1414 | |
1415 | - vptr->rd_curr = rd_curr; | |
1415 | + vptr->rx.curr = rd_curr; | |
1416 | 1416 | |
1417 | 1417 | if ((works > 0) && (velocity_rx_refill(vptr) > 0)) |
1418 | 1418 | velocity_give_many_rx_descs(vptr); |
... | ... | @@ -1510,8 +1510,8 @@ |
1510 | 1510 | { |
1511 | 1511 | void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int); |
1512 | 1512 | struct net_device_stats *stats = &vptr->stats; |
1513 | - struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]); | |
1514 | - struct rx_desc *rd = &(vptr->rd_ring[idx]); | |
1513 | + struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]); | |
1514 | + struct rx_desc *rd = &(vptr->rx.ring[idx]); | |
1515 | 1515 | int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff; |
1516 | 1516 | struct sk_buff *skb; |
1517 | 1517 | |
... | ... | @@ -1527,7 +1527,7 @@ |
1527 | 1527 | skb = rd_info->skb; |
1528 | 1528 | |
1529 | 1529 | pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma, |
1530 | - vptr->rx_buf_sz, PCI_DMA_FROMDEVICE); | |
1530 | + vptr->rx.buf_sz, PCI_DMA_FROMDEVICE); | |
1531 | 1531 | |
1532 | 1532 | /* |
1533 | 1533 | * Drop frame not meeting IEEE 802.3 |
... | ... | @@ -1550,7 +1550,7 @@ |
1550 | 1550 | rd_info->skb = NULL; |
1551 | 1551 | } |
1552 | 1552 | |
1553 | - pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx_buf_sz, | |
1553 | + pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz, | |
1554 | 1554 | PCI_DMA_FROMDEVICE); |
1555 | 1555 | |
1556 | 1556 | skb_put(skb, pkt_len - 4); |
1557 | 1557 | |
... | ... | @@ -1580,10 +1580,10 @@ |
1580 | 1580 | |
1581 | 1581 | static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx) |
1582 | 1582 | { |
1583 | - struct rx_desc *rd = &(vptr->rd_ring[idx]); | |
1584 | - struct velocity_rd_info *rd_info = &(vptr->rd_info[idx]); | |
1583 | + struct rx_desc *rd = &(vptr->rx.ring[idx]); | |
1584 | + struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]); | |
1585 | 1585 | |
1586 | - rd_info->skb = netdev_alloc_skb(vptr->dev, vptr->rx_buf_sz + 64); | |
1586 | + rd_info->skb = dev_alloc_skb(vptr->rx.buf_sz + 64); | |
1587 | 1587 | if (rd_info->skb == NULL) |
1588 | 1588 | return -ENOMEM; |
1589 | 1589 | |
1590 | 1590 | |
1591 | 1591 | |
... | ... | @@ -1592,14 +1592,15 @@ |
1592 | 1592 | * 64byte alignment. |
1593 | 1593 | */ |
1594 | 1594 | skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63); |
1595 | - rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, vptr->rx_buf_sz, PCI_DMA_FROMDEVICE); | |
1595 | + rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, | |
1596 | + vptr->rx.buf_sz, PCI_DMA_FROMDEVICE); | |
1596 | 1597 | |
1597 | 1598 | /* |
1598 | 1599 | * Fill in the descriptor to match |
1599 | - */ | |
1600 | + */ | |
1600 | 1601 | |
1601 | 1602 | *((u32 *) & (rd->rdesc0)) = 0; |
1602 | - rd->size = cpu_to_le16(vptr->rx_buf_sz) | RX_INTEN; | |
1603 | + rd->size = cpu_to_le16(vptr->rx.buf_sz) | RX_INTEN; | |
1603 | 1604 | rd->pa_low = cpu_to_le32(rd_info->skb_dma); |
1604 | 1605 | rd->pa_high = 0; |
1605 | 1606 | return 0; |
1606 | 1607 | |
... | ... | @@ -1625,15 +1626,15 @@ |
1625 | 1626 | struct velocity_td_info *tdinfo; |
1626 | 1627 | struct net_device_stats *stats = &vptr->stats; |
1627 | 1628 | |
1628 | - for (qnum = 0; qnum < vptr->num_txq; qnum++) { | |
1629 | - for (idx = vptr->td_tail[qnum]; vptr->td_used[qnum] > 0; | |
1629 | + for (qnum = 0; qnum < vptr->tx.numq; qnum++) { | |
1630 | + for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0; | |
1630 | 1631 | idx = (idx + 1) % vptr->options.numtx) { |
1631 | 1632 | |
1632 | 1633 | /* |
1633 | 1634 | * Get Tx Descriptor |
1634 | 1635 | */ |
1635 | - td = &(vptr->td_rings[qnum][idx]); | |
1636 | - tdinfo = &(vptr->td_infos[qnum][idx]); | |
1636 | + td = &(vptr->tx.rings[qnum][idx]); | |
1637 | + tdinfo = &(vptr->tx.infos[qnum][idx]); | |
1637 | 1638 | |
1638 | 1639 | if (td->tdesc0.len & OWNED_BY_NIC) |
1639 | 1640 | break; |
1640 | 1641 | |
... | ... | @@ -1657,9 +1658,9 @@ |
1657 | 1658 | stats->tx_bytes += tdinfo->skb->len; |
1658 | 1659 | } |
1659 | 1660 | velocity_free_tx_buf(vptr, tdinfo); |
1660 | - vptr->td_used[qnum]--; | |
1661 | + vptr->tx.used[qnum]--; | |
1661 | 1662 | } |
1662 | - vptr->td_tail[qnum] = idx; | |
1663 | + vptr->tx.tail[qnum] = idx; | |
1663 | 1664 | |
1664 | 1665 | if (AVAIL_TD(vptr, qnum) < 1) { |
1665 | 1666 | full = 1; |
... | ... | @@ -2056,9 +2057,9 @@ |
2056 | 2057 | |
2057 | 2058 | spin_lock_irqsave(&vptr->lock, flags); |
2058 | 2059 | |
2059 | - index = vptr->td_curr[qnum]; | |
2060 | - td_ptr = &(vptr->td_rings[qnum][index]); | |
2061 | - tdinfo = &(vptr->td_infos[qnum][index]); | |
2060 | + index = vptr->tx.curr[qnum]; | |
2061 | + td_ptr = &(vptr->tx.rings[qnum][index]); | |
2062 | + tdinfo = &(vptr->tx.infos[qnum][index]); | |
2062 | 2063 | |
2063 | 2064 | td_ptr->tdesc1.TCR = TCR0_TIC; |
2064 | 2065 | td_ptr->td_buf[0].size &= ~TD_QUEUE; |
... | ... | @@ -2071,9 +2072,9 @@ |
2071 | 2072 | skb_copy_from_linear_data(skb, tdinfo->buf, skb->len); |
2072 | 2073 | tdinfo->skb_dma[0] = tdinfo->buf_dma; |
2073 | 2074 | td_ptr->tdesc0.len = len; |
2074 | - td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); | |
2075 | - td_ptr->td_buf[0].pa_high = 0; | |
2076 | - td_ptr->td_buf[0].size = len; /* queue is 0 anyway */ | |
2075 | + td_ptr->tx.buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]); | |
2076 | + td_ptr->tx.buf[0].pa_high = 0; | |
2077 | + td_ptr->tx.buf[0].size = len; /* queue is 0 anyway */ | |
2077 | 2078 | tdinfo->nskb_dma = 1; |
2078 | 2079 | } else { |
2079 | 2080 | int i = 0; |
... | ... | @@ -2084,9 +2085,9 @@ |
2084 | 2085 | td_ptr->tdesc0.len = len; |
2085 | 2086 | |
2086 | 2087 | /* FIXME: support 48bit DMA later */ |
2087 | - td_ptr->td_buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma); | |
2088 | - td_ptr->td_buf[i].pa_high = 0; | |
2089 | - td_ptr->td_buf[i].size = cpu_to_le16(skb_headlen(skb)); | |
2088 | + td_ptr->tx.buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma); | |
2089 | + td_ptr->tx.buf[i].pa_high = 0; | |
2090 | + td_ptr->tx.buf[i].size = cpu_to_le16(skb_headlen(skb)); | |
2090 | 2091 | |
2091 | 2092 | for (i = 0; i < nfrags; i++) { |
2092 | 2093 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
... | ... | @@ -2094,9 +2095,9 @@ |
2094 | 2095 | |
2095 | 2096 | tdinfo->skb_dma[i + 1] = pci_map_single(vptr->pdev, addr, frag->size, PCI_DMA_TODEVICE); |
2096 | 2097 | |
2097 | - td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]); | |
2098 | - td_ptr->td_buf[i + 1].pa_high = 0; | |
2099 | - td_ptr->td_buf[i + 1].size = cpu_to_le16(frag->size); | |
2098 | + td_ptr->tx.buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]); | |
2099 | + td_ptr->tx.buf[i + 1].pa_high = 0; | |
2100 | + td_ptr->tx.buf[i + 1].size = cpu_to_le16(frag->size); | |
2100 | 2101 | } |
2101 | 2102 | tdinfo->nskb_dma = i - 1; |
2102 | 2103 | } |
2103 | 2104 | |
... | ... | @@ -2142,13 +2143,13 @@ |
2142 | 2143 | if (prev < 0) |
2143 | 2144 | prev = vptr->options.numtx - 1; |
2144 | 2145 | td_ptr->tdesc0.len |= OWNED_BY_NIC; |
2145 | - vptr->td_used[qnum]++; | |
2146 | - vptr->td_curr[qnum] = (index + 1) % vptr->options.numtx; | |
2146 | + vptr->tx.used[qnum]++; | |
2147 | + vptr->tx.curr[qnum] = (index + 1) % vptr->options.numtx; | |
2147 | 2148 | |
2148 | 2149 | if (AVAIL_TD(vptr, qnum) < 1) |
2149 | 2150 | netif_stop_queue(dev); |
2150 | 2151 | |
2151 | - td_ptr = &(vptr->td_rings[qnum][prev]); | |
2152 | + td_ptr = &(vptr->tx.rings[qnum][prev]); | |
2152 | 2153 | td_ptr->td_buf[0].size |= TD_QUEUE; |
2153 | 2154 | mac_tx_queue_wake(vptr->mac_regs, qnum); |
2154 | 2155 | } |
... | ... | @@ -3405,8 +3406,8 @@ |
3405 | 3406 | |
3406 | 3407 | velocity_tx_srv(vptr, 0); |
3407 | 3408 | |
3408 | - for (i = 0; i < vptr->num_txq; i++) { | |
3409 | - if (vptr->td_used[i]) { | |
3409 | + for (i = 0; i < vptr->tx.numq; i++) { | |
3410 | + if (vptr->tx.used[i]) { | |
3410 | 3411 | mac_tx_queue_wake(vptr->mac_regs, i); |
3411 | 3412 | } |
3412 | 3413 | } |
drivers/net/via-velocity.h
... | ... | @@ -1494,6 +1494,10 @@ |
1494 | 1494 | u32 flags; |
1495 | 1495 | }; |
1496 | 1496 | |
1497 | +#define AVAIL_TD(p,q) ((p)->options.numtx-((p)->tx.used[(q)])) | |
1498 | + | |
1499 | +#define GET_RD_BY_IDX(vptr, idx) (vptr->rd_ring[idx]) | |
1500 | + | |
1497 | 1501 | struct velocity_info { |
1498 | 1502 | struct list_head list; |
1499 | 1503 | |
... | ... | @@ -1501,9 +1505,6 @@ |
1501 | 1505 | struct net_device *dev; |
1502 | 1506 | struct net_device_stats stats; |
1503 | 1507 | |
1504 | - dma_addr_t rd_pool_dma; | |
1505 | - dma_addr_t td_pool_dma[TX_QUEUE_NO]; | |
1506 | - | |
1507 | 1508 | struct vlan_group *vlgrp; |
1508 | 1509 | u8 ip_addr[4]; |
1509 | 1510 | enum chip_type chip_id; |
1510 | 1511 | |
1511 | 1512 | |
1512 | 1513 | |
1513 | 1514 | |
... | ... | @@ -1512,25 +1513,29 @@ |
1512 | 1513 | unsigned long memaddr; |
1513 | 1514 | unsigned long ioaddr; |
1514 | 1515 | |
1515 | - u8 rev_id; | |
1516 | + struct tx_info { | |
1517 | + int numq; | |
1516 | 1518 | |
1517 | -#define AVAIL_TD(p,q) ((p)->options.numtx-((p)->td_used[(q)])) | |
1519 | + /* FIXME: the locality of the data seems rather poor. */ | |
1520 | + int used[TX_QUEUE_NO]; | |
1521 | + int curr[TX_QUEUE_NO]; | |
1522 | + int tail[TX_QUEUE_NO]; | |
1523 | + struct tx_desc *rings[TX_QUEUE_NO]; | |
1524 | + struct velocity_td_info *infos[TX_QUEUE_NO]; | |
1525 | + dma_addr_t pool_dma[TX_QUEUE_NO]; | |
1526 | + } tx; | |
1518 | 1527 | |
1519 | - int num_txq; | |
1528 | + struct rx_info { | |
1529 | + int buf_sz; | |
1520 | 1530 | |
1521 | - volatile int td_used[TX_QUEUE_NO]; | |
1522 | - int td_curr[TX_QUEUE_NO]; | |
1523 | - int td_tail[TX_QUEUE_NO]; | |
1524 | - struct tx_desc *td_rings[TX_QUEUE_NO]; | |
1525 | - struct velocity_td_info *td_infos[TX_QUEUE_NO]; | |
1531 | + int dirty; | |
1532 | + int curr; | |
1533 | + u32 filled; | |
1534 | + struct rx_desc *ring; | |
1535 | + struct velocity_rd_info *info; /* It's an array */ | |
1536 | + dma_addr_t pool_dma; | |
1537 | + } rx; | |
1526 | 1538 | |
1527 | - int rd_curr; | |
1528 | - int rd_dirty; | |
1529 | - u32 rd_filled; | |
1530 | - struct rx_desc *rd_ring; | |
1531 | - struct velocity_rd_info *rd_info; /* It's an array */ | |
1532 | - | |
1533 | -#define GET_RD_BY_IDX(vptr, idx) (vptr->rd_ring[idx]) | |
1534 | 1539 | u32 mib_counter[MAX_HW_MIB_COUNTER]; |
1535 | 1540 | struct velocity_opt options; |
1536 | 1541 | |
... | ... | @@ -1538,7 +1543,6 @@ |
1538 | 1543 | |
1539 | 1544 | u32 flags; |
1540 | 1545 | |
1541 | - int rx_buf_sz; | |
1542 | 1546 | u32 mii_status; |
1543 | 1547 | u32 phy_id; |
1544 | 1548 | int multicast_limit; |
1545 | 1549 | |
... | ... | @@ -1554,8 +1558,8 @@ |
1554 | 1558 | struct velocity_context context; |
1555 | 1559 | |
1556 | 1560 | u32 ticks; |
1557 | - u32 rx_bytes; | |
1558 | 1561 | |
1562 | + u8 rev_id; | |
1559 | 1563 | }; |
1560 | 1564 | |
1561 | 1565 | /** |