Commit b81ea27b2329bf44b30c427800954f845896d476
Committed by
Ingo Molnar
1 parent
e08e1f7adb
Exists in
master
and in
4 other branches
swiotlb: add arch hook to force mapping
Impact: generalize the sw-IOTLB range checks Some architectures require special rules to determine whether a range needs mapping or not. This adds a weak function for architectures to override. Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Showing 2 changed files with 15 additions and 2 deletions Side-by-side Diff
include/linux/swiotlb.h
... | ... | @@ -30,6 +30,8 @@ |
30 | 30 | extern dma_addr_t swiotlb_phys_to_bus(phys_addr_t address); |
31 | 31 | extern phys_addr_t swiotlb_bus_to_phys(dma_addr_t address); |
32 | 32 | |
33 | +extern int swiotlb_arch_range_needs_mapping(void *ptr, size_t size); | |
34 | + | |
33 | 35 | extern void |
34 | 36 | *swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
35 | 37 | dma_addr_t *dma_handle, gfp_t flags); |
lib/swiotlb.c
... | ... | @@ -145,6 +145,11 @@ |
145 | 145 | return phys_to_virt(swiotlb_bus_to_phys(address)); |
146 | 146 | } |
147 | 147 | |
148 | +int __weak swiotlb_arch_range_needs_mapping(void *ptr, size_t size) | |
149 | +{ | |
150 | + return 0; | |
151 | +} | |
152 | + | |
148 | 153 | /* |
149 | 154 | * Statically reserve bounce buffer space and initialize bounce buffer data |
150 | 155 | * structures for the software IO TLB used to implement the DMA API. |
... | ... | @@ -297,6 +302,11 @@ |
297 | 302 | return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size); |
298 | 303 | } |
299 | 304 | |
305 | +static inline int range_needs_mapping(void *ptr, size_t size) | |
306 | +{ | |
307 | + return swiotlb_force || swiotlb_arch_range_needs_mapping(ptr, size); | |
308 | +} | |
309 | + | |
300 | 310 | static int is_swiotlb_buffer(char *addr) |
301 | 311 | { |
302 | 312 | return addr >= io_tlb_start && addr < io_tlb_end; |
... | ... | @@ -585,7 +595,8 @@ |
585 | 595 | * we can safely return the device addr and not worry about bounce |
586 | 596 | * buffering it. |
587 | 597 | */ |
588 | - if (!address_needs_mapping(hwdev, dev_addr, size) && !swiotlb_force) | |
598 | + if (!address_needs_mapping(hwdev, dev_addr, size) && | |
599 | + !range_needs_mapping(ptr, size)) | |
589 | 600 | return dev_addr; |
590 | 601 | |
591 | 602 | /* |
... | ... | @@ -745,7 +756,7 @@ |
745 | 756 | for_each_sg(sgl, sg, nelems, i) { |
746 | 757 | addr = SG_ENT_VIRT_ADDRESS(sg); |
747 | 758 | dev_addr = swiotlb_virt_to_bus(addr); |
748 | - if (swiotlb_force || | |
759 | + if (range_needs_mapping(sg_virt(sg), sg->length) || | |
749 | 760 | address_needs_mapping(hwdev, dev_addr, sg->length)) { |
750 | 761 | void *map = map_single(hwdev, addr, sg->length, dir); |
751 | 762 | if (!map) { |