Commit 22ece0e2e23c5cc5a23a5b8aff3dc75c9832e82f
Committed by
Joe Hershberger
1 parent
a5144237ac
Exists in
master
and in
50 other branches
net: rtl8169: Improve cache maintenance
Instead of directly calling the low-level invalidate_dcache_range() and flush_cache() functions, provide thin wrappers that take into account alignment requirements. While at it, fix a case where the cache was flushed but should have been invalidated, two cases where the buffer data was flushed instead of the descriptor and a missing cache invalidation before reading the packet data that the NIC just wrote to memory. Signed-off-by: Thierry Reding <treding@nvidia.com> Patch: 276474
Showing 1 changed file with 53 additions and 8 deletions Side-by-side Diff
drivers/net/rtl8169.c
... | ... | @@ -395,6 +395,50 @@ |
395 | 395 | return 0; |
396 | 396 | } |
397 | 397 | |
398 | +/* | |
399 | + * Cache maintenance functions. These are simple wrappers around the more | |
400 | + * general purpose flush_cache() and invalidate_dcache_range() functions. | |
401 | + */ | |
402 | + | |
403 | +static void rtl_inval_rx_desc(struct RxDesc *desc) | |
404 | +{ | |
405 | + unsigned long start = (unsigned long)desc & ~(ARCH_DMA_MINALIGN - 1); | |
406 | + unsigned long end = ALIGN(start + sizeof(*desc), ARCH_DMA_MINALIGN); | |
407 | + | |
408 | + invalidate_dcache_range(start, end); | |
409 | +} | |
410 | + | |
411 | +static void rtl_flush_rx_desc(struct RxDesc *desc) | |
412 | +{ | |
413 | + flush_cache((unsigned long)desc, sizeof(*desc)); | |
414 | +} | |
415 | + | |
416 | +static void rtl_inval_tx_desc(struct TxDesc *desc) | |
417 | +{ | |
418 | + unsigned long start = (unsigned long)desc & ~(ARCH_DMA_MINALIGN - 1); | |
419 | + unsigned long end = ALIGN(start + sizeof(*desc), ARCH_DMA_MINALIGN); | |
420 | + | |
421 | + invalidate_dcache_range(start, end); | |
422 | +} | |
423 | + | |
424 | +static void rtl_flush_tx_desc(struct TxDesc *desc) | |
425 | +{ | |
426 | + flush_cache((unsigned long)desc, sizeof(*desc)); | |
427 | +} | |
428 | + | |
429 | +static void rtl_inval_buffer(void *buf, size_t size) | |
430 | +{ | |
431 | + unsigned long start = (unsigned long)buf & ~(ARCH_DMA_MINALIGN - 1); | |
432 | + unsigned long end = ALIGN(start + size, ARCH_DMA_MINALIGN); | |
433 | + | |
434 | + invalidate_dcache_range(start, end); | |
435 | +} | |
436 | + | |
437 | +static void rtl_flush_buffer(void *buf, size_t size) | |
438 | +{ | |
439 | + flush_cache((unsigned long)buf, size); | |
440 | +} | |
441 | + | |
398 | 442 | /************************************************************************** |
399 | 443 | RECV - Receive a frame |
400 | 444 | ***************************************************************************/ |
401 | 445 | |
... | ... | @@ -412,14 +456,16 @@ |
412 | 456 | ioaddr = dev->iobase; |
413 | 457 | |
414 | 458 | cur_rx = tpc->cur_rx; |
415 | - flush_cache((unsigned long)&tpc->RxDescArray[cur_rx], | |
416 | - sizeof(struct RxDesc)); | |
459 | + | |
460 | + rtl_inval_rx_desc(&tpc->RxDescArray[cur_rx]); | |
461 | + | |
417 | 462 | if ((le32_to_cpu(tpc->RxDescArray[cur_rx].status) & OWNbit) == 0) { |
418 | 463 | if (!(le32_to_cpu(tpc->RxDescArray[cur_rx].status) & RxRES)) { |
419 | 464 | unsigned char rxdata[RX_BUF_LEN]; |
420 | 465 | length = (int) (le32_to_cpu(tpc->RxDescArray[cur_rx]. |
421 | 466 | status) & 0x00001FFF) - 4; |
422 | 467 | |
468 | + rtl_inval_buffer(tpc->RxBufferRing[cur_rx], length); | |
423 | 469 | memcpy(rxdata, tpc->RxBufferRing[cur_rx], length); |
424 | 470 | NetReceive(rxdata, length); |
425 | 471 | |
... | ... | @@ -431,8 +477,7 @@ |
431 | 477 | cpu_to_le32(OWNbit + RX_BUF_SIZE); |
432 | 478 | tpc->RxDescArray[cur_rx].buf_addr = |
433 | 479 | cpu_to_le32(bus_to_phys(tpc->RxBufferRing[cur_rx])); |
434 | - flush_cache((unsigned long)tpc->RxBufferRing[cur_rx], | |
435 | - RX_BUF_SIZE); | |
480 | + rtl_flush_rx_desc(&tpc->RxDescArray[cur_rx]); | |
436 | 481 | } else { |
437 | 482 | puts("Error Rx"); |
438 | 483 | } |
... | ... | @@ -474,7 +519,7 @@ |
474 | 519 | /* point to the current txb incase multiple tx_rings are used */ |
475 | 520 | ptxb = tpc->Tx_skbuff[entry * MAX_ETH_FRAME_SIZE]; |
476 | 521 | memcpy(ptxb, (char *)packet, (int)length); |
477 | - flush_cache((unsigned long)ptxb, length); | |
522 | + rtl_flush_buffer(ptxb, length); | |
478 | 523 | |
479 | 524 | while (len < ETH_ZLEN) |
480 | 525 | ptxb[len++] = '\0'; |
481 | 526 | |
... | ... | @@ -490,13 +535,13 @@ |
490 | 535 | cpu_to_le32((OWNbit | EORbit | FSbit | LSbit) | |
491 | 536 | ((len > ETH_ZLEN) ? len : ETH_ZLEN)); |
492 | 537 | } |
538 | + rtl_flush_tx_desc(&tpc->TxDescArray[entry]); | |
493 | 539 | RTL_W8(TxPoll, 0x40); /* set polling bit */ |
494 | 540 | |
495 | 541 | tpc->cur_tx++; |
496 | 542 | to = currticks() + TX_TIMEOUT; |
497 | 543 | do { |
498 | - flush_cache((unsigned long)&tpc->TxDescArray[entry], | |
499 | - sizeof(struct TxDesc)); | |
544 | + rtl_inval_tx_desc(&tpc->TxDescArray[entry]); | |
500 | 545 | } while ((le32_to_cpu(tpc->TxDescArray[entry].status) & OWNbit) |
501 | 546 | && (currticks() < to)); /* wait */ |
502 | 547 | |
... | ... | @@ -639,7 +684,7 @@ |
639 | 684 | tpc->RxBufferRing[i] = &rxb[i * RX_BUF_SIZE]; |
640 | 685 | tpc->RxDescArray[i].buf_addr = |
641 | 686 | cpu_to_le32(bus_to_phys(tpc->RxBufferRing[i])); |
642 | - flush_cache((unsigned long)tpc->RxBufferRing[i], RX_BUF_SIZE); | |
687 | + rtl_flush_rx_desc(&tpc->RxDescArray[i]); | |
643 | 688 | } |
644 | 689 | |
645 | 690 | #ifdef DEBUG_RTL8169 |