Blame view
arch/x86/include/asm/dma-mapping.h
3.61 KB
1965aae3c x86: Fix ASM_X86_... |
1 2 |
#ifndef _ASM_X86_DMA_MAPPING_H #define _ASM_X86_DMA_MAPPING_H |
6f5366354 x86: move dma_ops... |
3 4 |
/* |
395cf9691 doc: fix broken r... |
5 |
* IOMMU interface. See Documentation/DMA-API-HOWTO.txt and |
5872fb94f Documentation: mo... |
6 |
* Documentation/DMA-API.txt for documentation. |
6f5366354 x86: move dma_ops... |
7 |
*/ |
d7002857d kmemcheck: add DM... |
8 |
#include <linux/kmemcheck.h> |
6f5366354 x86: move dma_ops... |
9 |
#include <linux/scatterlist.h> |
2118d0c54 dma-debug: x86 ar... |
10 |
#include <linux/dma-debug.h> |
abe6602bf x86: add map_page... |
11 |
#include <linux/dma-attrs.h> |
6f5366354 x86: move dma_ops... |
12 13 |
#include <asm/io.h> #include <asm/swiotlb.h> |
6c505ce39 x86: move dma_*_c... |
14 |
#include <asm-generic/dma-coherent.h> |
6f5366354 x86: move dma_ops... |
15 |
|
eb647138a x86/PCI: Adjust G... |
16 17 18 19 20 |
#ifdef CONFIG_ISA # define ISA_DMA_BIT_MASK DMA_BIT_MASK(24) #else # define ISA_DMA_BIT_MASK DMA_BIT_MASK(32) #endif |
8fd524b35 x86: Kill bad_dma... |
21 |
#define DMA_ERROR_CODE 0 |
b7107a3d9 x86: delete the a... |
22 |
extern int iommu_merge; |
6c505ce39 x86: move dma_*_c... |
23 |
extern struct device x86_dma_fallback_dev; |
b7107a3d9 x86: delete the a... |
24 |
extern int panic_on_overflow; |
7c1834166 x86: provide a ba... |
25 |
|
160c1d8e4 x86, ia64: conver... |
26 27 28 |
extern struct dma_map_ops *dma_ops; static inline struct dma_map_ops *get_dma_ops(struct device *dev) |
c786df08f x86: unify dma_ma... |
29 |
{ |
8d8bb39b9 dma-mapping: add ... |
30 31 32 33 34 35 36 |
#ifdef CONFIG_X86_32 return dma_ops; #else if (unlikely(!dev) || !dev->archdata.dma_ops) return dma_ops; else return dev->archdata.dma_ops; |
cfb80c9ea x86: unify pci io... |
37 |
#endif |
8d8bb39b9 dma-mapping: add ... |
38 |
} |
7c095e460 dma-mapping: x86:... |
39 |
#include <asm-generic/dma-mapping-common.h> |
8d8bb39b9 dma-mapping: add ... |
40 41 42 |
/* Make sure we keep the same behaviour */ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { |
160c1d8e4 x86, ia64: conver... |
43 |
struct dma_map_ops *ops = get_dma_ops(dev); |
8d8bb39b9 dma-mapping: add ... |
44 45 |
if (ops->mapping_error) return ops->mapping_error(dev, dma_addr); |
c786df08f x86: unify dma_ma... |
46 |
|
8fd524b35 x86: Kill bad_dma... |
47 |
return (dma_addr == DMA_ERROR_CODE); |
c786df08f x86: unify dma_ma... |
48 |
} |
8d396ded7 x86: move alloc a... |
49 50 |
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) |
8d396ded7 x86: move alloc a... |
51 |
|
802c1f664 x86: move dma_sup... |
52 53 |
extern int dma_supported(struct device *hwdev, u64 mask); extern int dma_set_mask(struct device *dev, u64 mask); |
9f6ac5772 x86: export pci-n... |
54 55 |
extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, gfp_t flag); |
99becaca8 x86: add dma_capa... |
56 57 58 59 |
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) { if (!dev->dma_mask) return 0; |
ac2b3e67d dma-mapping: fix ... |
60 |
return addr + size - 1 <= *dev->dma_mask; |
99becaca8 x86: add dma_capa... |
61 |
} |
8d4f5339d x86, IA64, powerp... |
62 63 64 65 66 67 68 69 70 |
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) { return paddr; } static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) { return daddr; } |
3cb6a9171 x86: move dma_cac... |
71 72 73 74 75 76 |
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, enum dma_data_direction dir) { flush_write_buffers(); } |
ae17a63b0 x86: move ARCH_HA... |
77 |
|
823e7e8c6 x86: dma_alloc_co... |
78 79 80 81 |
static inline unsigned long dma_alloc_coherent_mask(struct device *dev, gfp_t gfp) { unsigned long dma_mask = 0; |
b7107a3d9 x86: delete the a... |
82 |
|
823e7e8c6 x86: dma_alloc_co... |
83 84 |
dma_mask = dev->coherent_dma_mask; if (!dma_mask) |
2f4f27d42 dma-mapping: repl... |
85 |
dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32); |
823e7e8c6 x86: dma_alloc_co... |
86 87 88 89 90 91 92 |
return dma_mask; } static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp) { unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp); |
2f4f27d42 dma-mapping: repl... |
93 |
if (dma_mask <= DMA_BIT_MASK(24)) |
75bebb7f0 x86: use GFP_DMA ... |
94 95 |
gfp |= GFP_DMA; #ifdef CONFIG_X86_64 |
284901a90 dma-mapping: repl... |
96 |
if (dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA)) |
823e7e8c6 x86: dma_alloc_co... |
97 98 99 100 |
gfp |= GFP_DMA32; #endif return gfp; } |
6c505ce39 x86: move dma_*_c... |
101 102 103 104 |
static inline void * dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp) { |
160c1d8e4 x86, ia64: conver... |
105 |
struct dma_map_ops *ops = get_dma_ops(dev); |
6c505ce39 x86: move dma_*_c... |
106 |
void *memory; |
8a53ad675 x86: fix nommu_al... |
107 |
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); |
6c505ce39 x86: move dma_*_c... |
108 109 |
if (dma_alloc_from_coherent(dev, size, dma_handle, &memory)) return memory; |
eb647138a x86/PCI: Adjust G... |
110 |
if (!dev) |
6c505ce39 x86: move dma_*_c... |
111 |
dev = &x86_dma_fallback_dev; |
6c505ce39 x86: move dma_*_c... |
112 |
|
982162602 x86: convert dma_... |
113 |
if (!is_device_dma_capable(dev)) |
de9f521fb x86: move pci-nom... |
114 |
return NULL; |
823e7e8c6 x86: dma_alloc_co... |
115 116 |
if (!ops->alloc_coherent) return NULL; |
2118d0c54 dma-debug: x86 ar... |
117 118 119 120 121 |
memory = ops->alloc_coherent(dev, size, dma_handle, dma_alloc_coherent_gfp_flags(dev, gfp)); debug_dma_alloc_coherent(dev, size, *dma_handle, memory); return memory; |
6c505ce39 x86: move dma_*_c... |
122 123 124 125 126 |
} static inline void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t bus) { |
160c1d8e4 x86, ia64: conver... |
127 |
struct dma_map_ops *ops = get_dma_ops(dev); |
6c505ce39 x86: move dma_*_c... |
128 129 130 131 132 |
WARN_ON(irqs_disabled()); /* for portability */ if (dma_release_from_coherent(dev, get_order(size), vaddr)) return; |
2118d0c54 dma-debug: x86 ar... |
133 |
debug_dma_free_coherent(dev, size, vaddr, bus); |
6c505ce39 x86: move dma_*_c... |
134 135 136 |
if (ops->free_coherent) ops->free_coherent(dev, size, vaddr, bus); } |
b7107a3d9 x86: delete the a... |
137 |
|
6f5366354 x86: move dma_ops... |
138 |
#endif |