Commit e08e1f7adba522378e8d2ae941bf25443866136d

Authored by Ian Campbell
Committed by Ingo Molnar
1 parent a5ddde4a55

swiotlb: allow architectures to override phys<->bus<->phys conversions

Impact: generalize phys<->bus<->phys conversions in the swiotlb code

Architectures may need to override these conversions. Implement a
__weak hook point containing the default implementation.

Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

Showing 2 changed files with 39 additions and 16 deletions Side-by-side Diff

include/linux/swiotlb.h
... ... @@ -27,6 +27,9 @@
27 27 extern void *swiotlb_alloc_boot(size_t bytes, unsigned long nslabs);
28 28 extern void *swiotlb_alloc(unsigned order, unsigned long nslabs);
29 29  
  30 +extern dma_addr_t swiotlb_phys_to_bus(phys_addr_t address);
  31 +extern phys_addr_t swiotlb_bus_to_phys(dma_addr_t address);
  32 +
30 33 extern void
31 34 *swiotlb_alloc_coherent(struct device *hwdev, size_t size,
32 35 dma_addr_t *dma_handle, gfp_t flags);
... ... @@ -125,6 +125,26 @@
125 125 return (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN, order);
126 126 }
127 127  
  128 +dma_addr_t __weak swiotlb_phys_to_bus(phys_addr_t paddr)
  129 +{
  130 + return paddr;
  131 +}
  132 +
  133 +phys_addr_t __weak swiotlb_bus_to_phys(dma_addr_t baddr)
  134 +{
  135 + return baddr;
  136 +}
  137 +
  138 +static dma_addr_t swiotlb_virt_to_bus(volatile void *address)
  139 +{
  140 + return swiotlb_phys_to_bus(virt_to_phys(address));
  141 +}
  142 +
  143 +static void *swiotlb_bus_to_virt(dma_addr_t address)
  144 +{
  145 + return phys_to_virt(swiotlb_bus_to_phys(address));
  146 +}
  147 +
128 148 /*
129 149 * Statically reserve bounce buffer space and initialize bounce buffer data
130 150 * structures for the software IO TLB used to implement the DMA API.
... ... @@ -168,7 +188,7 @@
168 188 panic("Cannot allocate SWIOTLB overflow buffer!\n");
169 189  
170 190 printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n",
171   - virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end));
  191 + swiotlb_virt_to_bus(io_tlb_start), swiotlb_virt_to_bus(io_tlb_end));
172 192 }
173 193  
174 194 void __init
... ... @@ -250,7 +270,7 @@
250 270  
251 271 printk(KERN_INFO "Placing %luMB software IO TLB between 0x%lx - "
252 272 "0x%lx\n", bytes >> 20,
253   - virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end));
  273 + swiotlb_virt_to_bus(io_tlb_start), swiotlb_virt_to_bus(io_tlb_end));
254 274  
255 275 return 0;
256 276  
... ... @@ -298,7 +318,7 @@
298 318 unsigned long max_slots;
299 319  
300 320 mask = dma_get_seg_boundary(hwdev);
301   - start_dma_addr = virt_to_bus(io_tlb_start) & mask;
  321 + start_dma_addr = swiotlb_virt_to_bus(io_tlb_start) & mask;
302 322  
303 323 offset_slots = ALIGN(start_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
304 324  
... ... @@ -475,7 +495,7 @@
475 495 dma_mask = hwdev->coherent_dma_mask;
476 496  
477 497 ret = (void *)__get_free_pages(flags, order);
478   - if (ret && !is_buffer_dma_capable(dma_mask, virt_to_bus(ret), size)) {
  498 + if (ret && !is_buffer_dma_capable(dma_mask, swiotlb_virt_to_bus(ret), size)) {
479 499 /*
480 500 * The allocated memory isn't reachable by the device.
481 501 * Fall back on swiotlb_map_single().
... ... @@ -496,7 +516,7 @@
496 516 }
497 517  
498 518 memset(ret, 0, size);
499   - dev_addr = virt_to_bus(ret);
  519 + dev_addr = swiotlb_virt_to_bus(ret);
500 520  
501 521 /* Confirm address can be DMA'd by device */
502 522 if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) {
... ... @@ -556,7 +576,7 @@
556 576 swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
557 577 int dir, struct dma_attrs *attrs)
558 578 {
559   - dma_addr_t dev_addr = virt_to_bus(ptr);
  579 + dma_addr_t dev_addr = swiotlb_virt_to_bus(ptr);
560 580 void *map;
561 581  
562 582 BUG_ON(dir == DMA_NONE);
... ... @@ -577,7 +597,7 @@
577 597 map = io_tlb_overflow_buffer;
578 598 }
579 599  
580   - dev_addr = virt_to_bus(map);
  600 + dev_addr = swiotlb_virt_to_bus(map);
581 601  
582 602 /*
583 603 * Ensure that the address returned is DMA'ble
... ... @@ -607,7 +627,7 @@
607 627 swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr,
608 628 size_t size, int dir, struct dma_attrs *attrs)
609 629 {
610   - char *dma_addr = bus_to_virt(dev_addr);
  630 + char *dma_addr = swiotlb_bus_to_virt(dev_addr);
611 631  
612 632 BUG_ON(dir == DMA_NONE);
613 633 if (is_swiotlb_buffer(dma_addr))
... ... @@ -637,7 +657,7 @@
637 657 swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
638 658 size_t size, int dir, int target)
639 659 {
640   - char *dma_addr = bus_to_virt(dev_addr);
  660 + char *dma_addr = swiotlb_bus_to_virt(dev_addr);
641 661  
642 662 BUG_ON(dir == DMA_NONE);
643 663 if (is_swiotlb_buffer(dma_addr))
... ... @@ -668,7 +688,7 @@
668 688 unsigned long offset, size_t size,
669 689 int dir, int target)
670 690 {
671   - char *dma_addr = bus_to_virt(dev_addr) + offset;
  691 + char *dma_addr = swiotlb_bus_to_virt(dev_addr) + offset;
672 692  
673 693 BUG_ON(dir == DMA_NONE);
674 694 if (is_swiotlb_buffer(dma_addr))
... ... @@ -724,7 +744,7 @@
724 744  
725 745 for_each_sg(sgl, sg, nelems, i) {
726 746 addr = SG_ENT_VIRT_ADDRESS(sg);
727   - dev_addr = virt_to_bus(addr);
  747 + dev_addr = swiotlb_virt_to_bus(addr);
728 748 if (swiotlb_force ||
729 749 address_needs_mapping(hwdev, dev_addr, sg->length)) {
730 750 void *map = map_single(hwdev, addr, sg->length, dir);
... ... @@ -737,7 +757,7 @@
737 757 sgl[0].dma_length = 0;
738 758 return 0;
739 759 }
740   - sg->dma_address = virt_to_bus(map);
  760 + sg->dma_address = swiotlb_virt_to_bus(map);
741 761 } else
742 762 sg->dma_address = dev_addr;
743 763 sg->dma_length = sg->length;
... ... @@ -768,7 +788,7 @@
768 788  
769 789 for_each_sg(sgl, sg, nelems, i) {
770 790 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
771   - unmap_single(hwdev, bus_to_virt(sg->dma_address),
  791 + unmap_single(hwdev, swiotlb_bus_to_virt(sg->dma_address),
772 792 sg->dma_length, dir);
773 793 else if (dir == DMA_FROM_DEVICE)
774 794 dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
... ... @@ -801,7 +821,7 @@
801 821  
802 822 for_each_sg(sgl, sg, nelems, i) {
803 823 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
804   - sync_single(hwdev, bus_to_virt(sg->dma_address),
  824 + sync_single(hwdev, swiotlb_bus_to_virt(sg->dma_address),
805 825 sg->dma_length, dir, target);
806 826 else if (dir == DMA_FROM_DEVICE)
807 827 dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
... ... @@ -825,7 +845,7 @@
825 845 int
826 846 swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
827 847 {
828   - return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
  848 + return (dma_addr == swiotlb_virt_to_bus(io_tlb_overflow_buffer));
829 849 }
830 850  
831 851 /*
... ... @@ -837,7 +857,7 @@
837 857 int
838 858 swiotlb_dma_supported(struct device *hwdev, u64 mask)
839 859 {
840   - return virt_to_bus(io_tlb_end - 1) <= mask;
  860 + return swiotlb_virt_to_bus(io_tlb_end - 1) <= mask;
841 861 }
842 862  
843 863 EXPORT_SYMBOL(swiotlb_map_single);