Commit baa676fcf8d555269bd0a5a2496782beee55824d
Committed by
Marek Szyprowski
1 parent
613c4578d4
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
X86 & IA64: adapt for dma_map_ops changes
Adapt core x86 and IA64 architecture code for dma_map_ops changes: replace alloc/free_coherent with generic alloc/free methods. Signed-off-by: Andrzej Pietrasiewicz <andrzej.p@samsung.com> Acked-by: Kyungmin Park <kyungmin.park@samsung.com> [removed swiotlb related changes and replaced it with wrappers, merged with IA64 patch to avoid inter-patch dependences in intel-iommu code] Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Reviewed-by: Arnd Bergmann <arnd@arndb.de> Acked-by: Tony Luck <tony.luck@intel.com>
Showing 15 changed files with 99 additions and 59 deletions Side-by-side Diff
- arch/ia64/hp/common/sba_iommu.c
- arch/ia64/include/asm/dma-mapping.h
- arch/ia64/kernel/pci-swiotlb.c
- arch/ia64/sn/pci/pci_dma.c
- arch/x86/include/asm/dma-mapping.h
- arch/x86/kernel/amd_gart_64.c
- arch/x86/kernel/pci-calgary_64.c
- arch/x86/kernel/pci-dma.c
- arch/x86/kernel/pci-nommu.c
- arch/x86/kernel/pci-swiotlb.c
- arch/x86/xen/pci-swiotlb-xen.c
- drivers/iommu/amd_iommu.c
- drivers/iommu/intel-iommu.c
- drivers/xen/swiotlb-xen.c
- include/xen/swiotlb-xen.h
arch/ia64/hp/common/sba_iommu.c
... | ... | @@ -1130,7 +1130,8 @@ |
1130 | 1130 | * See Documentation/DMA-API-HOWTO.txt |
1131 | 1131 | */ |
1132 | 1132 | static void * |
1133 | -sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags) | |
1133 | +sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | |
1134 | + gfp_t flags, struct dma_attrs *attrs) | |
1134 | 1135 | { |
1135 | 1136 | struct ioc *ioc; |
1136 | 1137 | void *addr; |
... | ... | @@ -1192,8 +1193,8 @@ |
1192 | 1193 | * |
1193 | 1194 | * See Documentation/DMA-API-HOWTO.txt |
1194 | 1195 | */ |
1195 | -static void sba_free_coherent (struct device *dev, size_t size, void *vaddr, | |
1196 | - dma_addr_t dma_handle) | |
1196 | +static void sba_free_coherent(struct device *dev, size_t size, void *vaddr, | |
1197 | + dma_addr_t dma_handle, struct dma_attrs *attrs) | |
1197 | 1198 | { |
1198 | 1199 | sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL); |
1199 | 1200 | free_pages((unsigned long) vaddr, get_order(size)); |
... | ... | @@ -2213,8 +2214,8 @@ |
2213 | 2214 | __setup("sbapagesize=",sba_page_override); |
2214 | 2215 | |
2215 | 2216 | struct dma_map_ops sba_dma_ops = { |
2216 | - .alloc_coherent = sba_alloc_coherent, | |
2217 | - .free_coherent = sba_free_coherent, | |
2217 | + .alloc = sba_alloc_coherent, | |
2218 | + .free = sba_free_coherent, | |
2218 | 2219 | .map_page = sba_map_page, |
2219 | 2220 | .unmap_page = sba_unmap_page, |
2220 | 2221 | .map_sg = sba_map_sg_attrs, |
arch/ia64/include/asm/dma-mapping.h
... | ... | @@ -23,23 +23,29 @@ |
23 | 23 | extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int, |
24 | 24 | enum dma_data_direction); |
25 | 25 | |
26 | -static inline void *dma_alloc_coherent(struct device *dev, size_t size, | |
27 | - dma_addr_t *daddr, gfp_t gfp) | |
26 | +#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) | |
27 | + | |
28 | +static inline void *dma_alloc_attrs(struct device *dev, size_t size, | |
29 | + dma_addr_t *daddr, gfp_t gfp, | |
30 | + struct dma_attrs *attrs) | |
28 | 31 | { |
29 | 32 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
30 | 33 | void *caddr; |
31 | 34 | |
32 | - caddr = ops->alloc_coherent(dev, size, daddr, gfp); | |
35 | + caddr = ops->alloc(dev, size, daddr, gfp, attrs); | |
33 | 36 | debug_dma_alloc_coherent(dev, size, *daddr, caddr); |
34 | 37 | return caddr; |
35 | 38 | } |
36 | 39 | |
37 | -static inline void dma_free_coherent(struct device *dev, size_t size, | |
38 | - void *caddr, dma_addr_t daddr) | |
40 | +#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) | |
41 | + | |
42 | +static inline void dma_free_attrs(struct device *dev, size_t size, | |
43 | + void *caddr, dma_addr_t daddr, | |
44 | + struct dma_attrs *attrs) | |
39 | 45 | { |
40 | 46 | struct dma_map_ops *ops = platform_dma_get_ops(dev); |
41 | 47 | debug_dma_free_coherent(dev, size, caddr, daddr); |
42 | - ops->free_coherent(dev, size, caddr, daddr); | |
48 | + ops->free(dev, size, caddr, daddr, attrs); | |
43 | 49 | } |
44 | 50 | |
45 | 51 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
arch/ia64/kernel/pci-swiotlb.c
... | ... | @@ -15,16 +15,24 @@ |
15 | 15 | EXPORT_SYMBOL(swiotlb); |
16 | 16 | |
17 | 17 | static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size, |
18 | - dma_addr_t *dma_handle, gfp_t gfp) | |
18 | + dma_addr_t *dma_handle, gfp_t gfp, | |
19 | + struct dma_attrs *attrs) | |
19 | 20 | { |
20 | 21 | if (dev->coherent_dma_mask != DMA_BIT_MASK(64)) |
21 | 22 | gfp |= GFP_DMA; |
22 | 23 | return swiotlb_alloc_coherent(dev, size, dma_handle, gfp); |
23 | 24 | } |
24 | 25 | |
26 | +static void ia64_swiotlb_free_coherent(struct device *dev, size_t size, | |
27 | + void *vaddr, dma_addr_t dma_addr, | |
28 | + struct dma_attrs *attrs) | |
29 | +{ | |
30 | + swiotlb_free_coherent(dev, size, vaddr, dma_addr); | |
31 | +} | |
32 | + | |
25 | 33 | struct dma_map_ops swiotlb_dma_ops = { |
26 | - .alloc_coherent = ia64_swiotlb_alloc_coherent, | |
27 | - .free_coherent = swiotlb_free_coherent, | |
34 | + .alloc = ia64_swiotlb_alloc_coherent, | |
35 | + .free = ia64_swiotlb_free_coherent, | |
28 | 36 | .map_page = swiotlb_map_page, |
29 | 37 | .unmap_page = swiotlb_unmap_page, |
30 | 38 | .map_sg = swiotlb_map_sg_attrs, |
arch/ia64/sn/pci/pci_dma.c
... | ... | @@ -76,7 +76,8 @@ |
76 | 76 | * more information. |
77 | 77 | */ |
78 | 78 | static void *sn_dma_alloc_coherent(struct device *dev, size_t size, |
79 | - dma_addr_t * dma_handle, gfp_t flags) | |
79 | + dma_addr_t * dma_handle, gfp_t flags, | |
80 | + struct dma_attrs *attrs) | |
80 | 81 | { |
81 | 82 | void *cpuaddr; |
82 | 83 | unsigned long phys_addr; |
... | ... | @@ -137,7 +138,7 @@ |
137 | 138 | * any associated IOMMU mappings. |
138 | 139 | */ |
139 | 140 | static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, |
140 | - dma_addr_t dma_handle) | |
141 | + dma_addr_t dma_handle, struct dma_attrs *attrs) | |
141 | 142 | { |
142 | 143 | struct pci_dev *pdev = to_pci_dev(dev); |
143 | 144 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); |
... | ... | @@ -466,8 +467,8 @@ |
466 | 467 | } |
467 | 468 | |
468 | 469 | static struct dma_map_ops sn_dma_ops = { |
469 | - .alloc_coherent = sn_dma_alloc_coherent, | |
470 | - .free_coherent = sn_dma_free_coherent, | |
470 | + .alloc = sn_dma_alloc_coherent, | |
471 | + .free = sn_dma_free_coherent, | |
471 | 472 | .map_page = sn_dma_map_page, |
472 | 473 | .unmap_page = sn_dma_unmap_page, |
473 | 474 | .map_sg = sn_dma_map_sg, |
arch/x86/include/asm/dma-mapping.h
... | ... | @@ -59,7 +59,8 @@ |
59 | 59 | extern int dma_set_mask(struct device *dev, u64 mask); |
60 | 60 | |
61 | 61 | extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, |
62 | - dma_addr_t *dma_addr, gfp_t flag); | |
62 | + dma_addr_t *dma_addr, gfp_t flag, | |
63 | + struct dma_attrs *attrs); | |
63 | 64 | |
64 | 65 | static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) |
65 | 66 | { |
66 | 67 | |
... | ... | @@ -111,9 +112,11 @@ |
111 | 112 | return gfp; |
112 | 113 | } |
113 | 114 | |
115 | +#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL) | |
116 | + | |
114 | 117 | static inline void * |
115 | -dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | |
116 | - gfp_t gfp) | |
118 | +dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, | |
119 | + gfp_t gfp, struct dma_attrs *attrs) | |
117 | 120 | { |
118 | 121 | struct dma_map_ops *ops = get_dma_ops(dev); |
119 | 122 | void *memory; |
120 | 123 | |
121 | 124 | |
... | ... | @@ -129,18 +132,21 @@ |
129 | 132 | if (!is_device_dma_capable(dev)) |
130 | 133 | return NULL; |
131 | 134 | |
132 | - if (!ops->alloc_coherent) | |
135 | + if (!ops->alloc) | |
133 | 136 | return NULL; |
134 | 137 | |
135 | - memory = ops->alloc_coherent(dev, size, dma_handle, | |
136 | - dma_alloc_coherent_gfp_flags(dev, gfp)); | |
138 | + memory = ops->alloc(dev, size, dma_handle, | |
139 | + dma_alloc_coherent_gfp_flags(dev, gfp), attrs); | |
137 | 140 | debug_dma_alloc_coherent(dev, size, *dma_handle, memory); |
138 | 141 | |
139 | 142 | return memory; |
140 | 143 | } |
141 | 144 | |
142 | -static inline void dma_free_coherent(struct device *dev, size_t size, | |
143 | - void *vaddr, dma_addr_t bus) | |
145 | +#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL) | |
146 | + | |
147 | +static inline void dma_free_attrs(struct device *dev, size_t size, | |
148 | + void *vaddr, dma_addr_t bus, | |
149 | + struct dma_attrs *attrs) | |
144 | 150 | { |
145 | 151 | struct dma_map_ops *ops = get_dma_ops(dev); |
146 | 152 | |
... | ... | @@ -150,8 +156,8 @@ |
150 | 156 | return; |
151 | 157 | |
152 | 158 | debug_dma_free_coherent(dev, size, vaddr, bus); |
153 | - if (ops->free_coherent) | |
154 | - ops->free_coherent(dev, size, vaddr, bus); | |
159 | + if (ops->free) | |
160 | + ops->free(dev, size, vaddr, bus, attrs); | |
155 | 161 | } |
156 | 162 | |
157 | 163 | #endif |
arch/x86/kernel/amd_gart_64.c
... | ... | @@ -477,7 +477,7 @@ |
477 | 477 | /* allocate and map a coherent mapping */ |
478 | 478 | static void * |
479 | 479 | gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, |
480 | - gfp_t flag) | |
480 | + gfp_t flag, struct dma_attrs *attrs) | |
481 | 481 | { |
482 | 482 | dma_addr_t paddr; |
483 | 483 | unsigned long align_mask; |
... | ... | @@ -500,7 +500,8 @@ |
500 | 500 | } |
501 | 501 | __free_pages(page, get_order(size)); |
502 | 502 | } else |
503 | - return dma_generic_alloc_coherent(dev, size, dma_addr, flag); | |
503 | + return dma_generic_alloc_coherent(dev, size, dma_addr, flag, | |
504 | + attrs); | |
504 | 505 | |
505 | 506 | return NULL; |
506 | 507 | } |
... | ... | @@ -508,7 +509,7 @@ |
508 | 509 | /* free a coherent mapping */ |
509 | 510 | static void |
510 | 511 | gart_free_coherent(struct device *dev, size_t size, void *vaddr, |
511 | - dma_addr_t dma_addr) | |
512 | + dma_addr_t dma_addr, struct dma_attrs *attrs) | |
512 | 513 | { |
513 | 514 | gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, NULL); |
514 | 515 | free_pages((unsigned long)vaddr, get_order(size)); |
... | ... | @@ -700,8 +701,8 @@ |
700 | 701 | .unmap_sg = gart_unmap_sg, |
701 | 702 | .map_page = gart_map_page, |
702 | 703 | .unmap_page = gart_unmap_page, |
703 | - .alloc_coherent = gart_alloc_coherent, | |
704 | - .free_coherent = gart_free_coherent, | |
704 | + .alloc = gart_alloc_coherent, | |
705 | + .free = gart_free_coherent, | |
705 | 706 | .mapping_error = gart_mapping_error, |
706 | 707 | }; |
707 | 708 |
arch/x86/kernel/pci-calgary_64.c
... | ... | @@ -431,7 +431,7 @@ |
431 | 431 | } |
432 | 432 | |
433 | 433 | static void* calgary_alloc_coherent(struct device *dev, size_t size, |
434 | - dma_addr_t *dma_handle, gfp_t flag) | |
434 | + dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs) | |
435 | 435 | { |
436 | 436 | void *ret = NULL; |
437 | 437 | dma_addr_t mapping; |
... | ... | @@ -464,7 +464,8 @@ |
464 | 464 | } |
465 | 465 | |
466 | 466 | static void calgary_free_coherent(struct device *dev, size_t size, |
467 | - void *vaddr, dma_addr_t dma_handle) | |
467 | + void *vaddr, dma_addr_t dma_handle, | |
468 | + struct dma_attrs *attrs) | |
468 | 469 | { |
469 | 470 | unsigned int npages; |
470 | 471 | struct iommu_table *tbl = find_iommu_table(dev); |
... | ... | @@ -477,8 +478,8 @@ |
477 | 478 | } |
478 | 479 | |
479 | 480 | static struct dma_map_ops calgary_dma_ops = { |
480 | - .alloc_coherent = calgary_alloc_coherent, | |
481 | - .free_coherent = calgary_free_coherent, | |
481 | + .alloc = calgary_alloc_coherent, | |
482 | + .free = calgary_free_coherent, | |
482 | 483 | .map_sg = calgary_map_sg, |
483 | 484 | .unmap_sg = calgary_unmap_sg, |
484 | 485 | .map_page = calgary_map_page, |
arch/x86/kernel/pci-dma.c
arch/x86/kernel/pci-nommu.c
... | ... | @@ -75,7 +75,7 @@ |
75 | 75 | } |
76 | 76 | |
77 | 77 | static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr, |
78 | - dma_addr_t dma_addr) | |
78 | + dma_addr_t dma_addr, struct dma_attrs *attrs) | |
79 | 79 | { |
80 | 80 | free_pages((unsigned long)vaddr, get_order(size)); |
81 | 81 | } |
... | ... | @@ -96,8 +96,8 @@ |
96 | 96 | } |
97 | 97 | |
98 | 98 | struct dma_map_ops nommu_dma_ops = { |
99 | - .alloc_coherent = dma_generic_alloc_coherent, | |
100 | - .free_coherent = nommu_free_coherent, | |
99 | + .alloc = dma_generic_alloc_coherent, | |
100 | + .free = nommu_free_coherent, | |
101 | 101 | .map_sg = nommu_map_sg, |
102 | 102 | .map_page = nommu_map_page, |
103 | 103 | .sync_single_for_device = nommu_sync_single_for_device, |
arch/x86/kernel/pci-swiotlb.c
... | ... | @@ -15,21 +15,30 @@ |
15 | 15 | int swiotlb __read_mostly; |
16 | 16 | |
17 | 17 | static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
18 | - dma_addr_t *dma_handle, gfp_t flags) | |
18 | + dma_addr_t *dma_handle, gfp_t flags, | |
19 | + struct dma_attrs *attrs) | |
19 | 20 | { |
20 | 21 | void *vaddr; |
21 | 22 | |
22 | - vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags); | |
23 | + vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags, | |
24 | + attrs); | |
23 | 25 | if (vaddr) |
24 | 26 | return vaddr; |
25 | 27 | |
26 | 28 | return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags); |
27 | 29 | } |
28 | 30 | |
31 | +static void x86_swiotlb_free_coherent(struct device *dev, size_t size, | |
32 | + void *vaddr, dma_addr_t dma_addr, | |
33 | + struct dma_attrs *attrs) | |
34 | +{ | |
35 | + swiotlb_free_coherent(dev, size, vaddr, dma_addr); | |
36 | +} | |
37 | + | |
29 | 38 | static struct dma_map_ops swiotlb_dma_ops = { |
30 | 39 | .mapping_error = swiotlb_dma_mapping_error, |
31 | - .alloc_coherent = x86_swiotlb_alloc_coherent, | |
32 | - .free_coherent = swiotlb_free_coherent, | |
40 | + .alloc = x86_swiotlb_alloc_coherent, | |
41 | + .free = x86_swiotlb_free_coherent, | |
33 | 42 | .sync_single_for_cpu = swiotlb_sync_single_for_cpu, |
34 | 43 | .sync_single_for_device = swiotlb_sync_single_for_device, |
35 | 44 | .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, |
arch/x86/xen/pci-swiotlb-xen.c
... | ... | @@ -12,8 +12,8 @@ |
12 | 12 | |
13 | 13 | static struct dma_map_ops xen_swiotlb_dma_ops = { |
14 | 14 | .mapping_error = xen_swiotlb_dma_mapping_error, |
15 | - .alloc_coherent = xen_swiotlb_alloc_coherent, | |
16 | - .free_coherent = xen_swiotlb_free_coherent, | |
15 | + .alloc = xen_swiotlb_alloc_coherent, | |
16 | + .free = xen_swiotlb_free_coherent, | |
17 | 17 | .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu, |
18 | 18 | .sync_single_for_device = xen_swiotlb_sync_single_for_device, |
19 | 19 | .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu, |
drivers/iommu/amd_iommu.c
... | ... | @@ -2707,7 +2707,8 @@ |
2707 | 2707 | * The exported alloc_coherent function for dma_ops. |
2708 | 2708 | */ |
2709 | 2709 | static void *alloc_coherent(struct device *dev, size_t size, |
2710 | - dma_addr_t *dma_addr, gfp_t flag) | |
2710 | + dma_addr_t *dma_addr, gfp_t flag, | |
2711 | + struct dma_attrs *attrs) | |
2711 | 2712 | { |
2712 | 2713 | unsigned long flags; |
2713 | 2714 | void *virt_addr; |
... | ... | @@ -2765,7 +2766,8 @@ |
2765 | 2766 | * The exported free_coherent function for dma_ops. |
2766 | 2767 | */ |
2767 | 2768 | static void free_coherent(struct device *dev, size_t size, |
2768 | - void *virt_addr, dma_addr_t dma_addr) | |
2769 | + void *virt_addr, dma_addr_t dma_addr, | |
2770 | + struct dma_attrs *attrs) | |
2769 | 2771 | { |
2770 | 2772 | unsigned long flags; |
2771 | 2773 | struct protection_domain *domain; |
... | ... | @@ -2846,8 +2848,8 @@ |
2846 | 2848 | } |
2847 | 2849 | |
2848 | 2850 | static struct dma_map_ops amd_iommu_dma_ops = { |
2849 | - .alloc_coherent = alloc_coherent, | |
2850 | - .free_coherent = free_coherent, | |
2851 | + .alloc = alloc_coherent, | |
2852 | + .free = free_coherent, | |
2851 | 2853 | .map_page = map_page, |
2852 | 2854 | .unmap_page = unmap_page, |
2853 | 2855 | .map_sg = map_sg, |
drivers/iommu/intel-iommu.c
... | ... | @@ -2938,7 +2938,8 @@ |
2938 | 2938 | } |
2939 | 2939 | |
2940 | 2940 | static void *intel_alloc_coherent(struct device *hwdev, size_t size, |
2941 | - dma_addr_t *dma_handle, gfp_t flags) | |
2941 | + dma_addr_t *dma_handle, gfp_t flags, | |
2942 | + struct dma_attrs *attrs) | |
2942 | 2943 | { |
2943 | 2944 | void *vaddr; |
2944 | 2945 | int order; |
... | ... | @@ -2970,7 +2971,7 @@ |
2970 | 2971 | } |
2971 | 2972 | |
2972 | 2973 | static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, |
2973 | - dma_addr_t dma_handle) | |
2974 | + dma_addr_t dma_handle, struct dma_attrs *attrs) | |
2974 | 2975 | { |
2975 | 2976 | int order; |
2976 | 2977 | |
... | ... | @@ -3115,8 +3116,8 @@ |
3115 | 3116 | } |
3116 | 3117 | |
3117 | 3118 | struct dma_map_ops intel_dma_ops = { |
3118 | - .alloc_coherent = intel_alloc_coherent, | |
3119 | - .free_coherent = intel_free_coherent, | |
3119 | + .alloc = intel_alloc_coherent, | |
3120 | + .free = intel_free_coherent, | |
3120 | 3121 | .map_sg = intel_map_sg, |
3121 | 3122 | .unmap_sg = intel_unmap_sg, |
3122 | 3123 | .map_page = intel_map_page, |
drivers/xen/swiotlb-xen.c
... | ... | @@ -204,7 +204,8 @@ |
204 | 204 | |
205 | 205 | void * |
206 | 206 | xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
207 | - dma_addr_t *dma_handle, gfp_t flags) | |
207 | + dma_addr_t *dma_handle, gfp_t flags, | |
208 | + struct dma_attrs *attrs) | |
208 | 209 | { |
209 | 210 | void *ret; |
210 | 211 | int order = get_order(size); |
... | ... | @@ -253,7 +254,7 @@ |
253 | 254 | |
254 | 255 | void |
255 | 256 | xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, |
256 | - dma_addr_t dev_addr) | |
257 | + dma_addr_t dev_addr, struct dma_attrs *attrs) | |
257 | 258 | { |
258 | 259 | int order = get_order(size); |
259 | 260 | phys_addr_t phys; |
include/xen/swiotlb-xen.h
... | ... | @@ -7,11 +7,13 @@ |
7 | 7 | |
8 | 8 | extern void |
9 | 9 | *xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
10 | - dma_addr_t *dma_handle, gfp_t flags); | |
10 | + dma_addr_t *dma_handle, gfp_t flags, | |
11 | + struct dma_attrs *attrs); | |
11 | 12 | |
12 | 13 | extern void |
13 | 14 | xen_swiotlb_free_coherent(struct device *hwdev, size_t size, |
14 | - void *vaddr, dma_addr_t dma_handle); | |
15 | + void *vaddr, dma_addr_t dma_handle, | |
16 | + struct dma_attrs *attrs); | |
15 | 17 | |
16 | 18 | extern dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, |
17 | 19 | unsigned long offset, size_t size, |