Blame view
arch/microblaze/kernel/dma.c
5.26 KB
ccfe27d70
|
1 2 3 4 5 6 7 8 9 10 |
/* * Copyright (C) 2009-2010 PetaLogix * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation * * Provide default implementations of the DMA mapping callbacks for * directly mapped busses. */ #include <linux/device.h> #include <linux/dma-mapping.h> |
5a0e3ad6a
|
11 |
#include <linux/gfp.h> |
ccfe27d70
|
12 |
#include <linux/dma-debug.h> |
66421a648
|
13 |
#include <linux/export.h> |
6bd55f0bb
|
14 |
#include <linux/bug.h> |
ccfe27d70
|
15 |
|
1be53e084
|
16 17 18 |
#define NOT_COHERENT_CACHE static void *dma_direct_alloc_coherent(struct device *dev, size_t size, |
988624ec1
|
19 20 |
dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs) |
ccfe27d70
|
21 |
{ |
1be53e084
|
22 23 24 |
#ifdef NOT_COHERENT_CACHE return consistent_alloc(flag, size, dma_handle); #else |
ccfe27d70
|
25 26 27 28 29 30 31 32 33 34 35 36 |
void *ret; struct page *page; int node = dev_to_node(dev); /* ignore region specifiers */ flag &= ~(__GFP_HIGHMEM); page = alloc_pages_node(node, flag, get_order(size)); if (page == NULL) return NULL; ret = page_address(page); memset(ret, 0, size); |
193bca593
|
37 |
*dma_handle = virt_to_phys(ret); |
ccfe27d70
|
38 39 |
return ret; |
1be53e084
|
40 |
#endif |
ccfe27d70
|
41 |
} |
1be53e084
|
42 |
static void dma_direct_free_coherent(struct device *dev, size_t size, |
988624ec1
|
43 44 |
void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) |
ccfe27d70
|
45 |
{ |
1be53e084
|
46 |
#ifdef NOT_COHERENT_CACHE |
f1525765f
|
47 |
consistent_free(size, vaddr); |
1be53e084
|
48 |
#else |
ccfe27d70
|
49 |
free_pages((unsigned long)vaddr, get_order(size)); |
1be53e084
|
50 |
#endif |
ccfe27d70
|
51 52 53 54 55 56 57 58 |
} static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction direction, struct dma_attrs *attrs) { struct scatterlist *sg; int i; |
d79f3b06a
|
59 |
/* FIXME this part of code is untested */ |
ccfe27d70
|
60 |
for_each_sg(sgl, sg, nents, i) { |
193bca593
|
61 |
sg->dma_address = sg_phys(sg); |
3e6110fd5
|
62 63 |
__dma_sync(page_to_phys(sg_page(sg)) + sg->offset, sg->length, direction); |
ccfe27d70
|
64 65 66 67 |
} return nents; } |
ccfe27d70
|
68 69 70 71 72 73 74 75 76 |
static int dma_direct_dma_supported(struct device *dev, u64 mask) { return 1; } static inline dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, |
2549edd35
|
77 |
enum dma_data_direction direction, |
ccfe27d70
|
78 79 |
struct dma_attrs *attrs) { |
cf560c180
|
80 |
__dma_sync(page_to_phys(page) + offset, size, direction); |
193bca593
|
81 |
return page_to_phys(page) + offset; |
ccfe27d70
|
82 83 84 85 86 87 88 89 |
} static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) { |
d79f3b06a
|
90 91 92 93 94 |
/* There is not necessary to do cache cleanup * * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and * dma_address is physical address */ |
cf560c180
|
95 |
__dma_sync(dma_address, size, direction); |
ccfe27d70
|
96 |
} |
0fb2a6f28
|
97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 |
static inline void dma_direct_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { /* * It's pointless to flush the cache as the memory segment * is given to the CPU */ if (direction == DMA_FROM_DEVICE) __dma_sync(dma_handle, size, direction); } static inline void dma_direct_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { /* * It's pointless to invalidate the cache if the device isn't * supposed to write to the relevant region */ if (direction == DMA_TO_DEVICE) __dma_sync(dma_handle, size, direction); } static inline void dma_direct_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction direction) { struct scatterlist *sg; int i; /* FIXME this part of code is untested */ if (direction == DMA_FROM_DEVICE) for_each_sg(sgl, sg, nents, i) __dma_sync(sg->dma_address, sg->length, direction); } static inline void dma_direct_sync_sg_for_device(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction direction) { struct scatterlist *sg; int i; /* FIXME this part of code is untested */ if (direction == DMA_TO_DEVICE) for_each_sg(sgl, sg, nents, i) __dma_sync(sg->dma_address, sg->length, direction); } |
55ae2f3b8
|
152 |
static |
3a8e32651
|
153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 |
int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t handle, size_t size, struct dma_attrs *attrs) { #ifdef CONFIG_MMU unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; unsigned long off = vma->vm_pgoff; unsigned long pfn; if (off >= count || user_count > (count - off)) return -ENXIO; #ifdef NOT_COHERENT_CACHE vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); pfn = consistent_virt_to_pfn(cpu_addr); #else pfn = virt_to_pfn(cpu_addr); #endif return remap_pfn_range(vma, vma->vm_start, pfn + off, vma->vm_end - vma->vm_start, vma->vm_page_prot); #else return -ENXIO; #endif } |
ccfe27d70
|
178 |
struct dma_map_ops dma_direct_ops = { |
988624ec1
|
179 180 |
.alloc = dma_direct_alloc_coherent, .free = dma_direct_free_coherent, |
3a8e32651
|
181 |
.mmap = dma_direct_mmap_coherent, |
ccfe27d70
|
182 |
.map_sg = dma_direct_map_sg, |
ccfe27d70
|
183 184 185 |
.dma_supported = dma_direct_dma_supported, .map_page = dma_direct_map_page, .unmap_page = dma_direct_unmap_page, |
0fb2a6f28
|
186 187 188 189 |
.sync_single_for_cpu = dma_direct_sync_single_for_cpu, .sync_single_for_device = dma_direct_sync_single_for_device, .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu, .sync_sg_for_device = dma_direct_sync_sg_for_device, |
ccfe27d70
|
190 191 192 193 194 195 196 197 |
}; EXPORT_SYMBOL(dma_direct_ops); /* Number of entries preallocated for DMA-API debugging */ #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) static int __init dma_init(void) { |
6bd55f0bb
|
198 |
dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); |
ccfe27d70
|
199 |
|
6bd55f0bb
|
200 |
return 0; |
ccfe27d70
|
201 202 |
} fs_initcall(dma_init); |