Commit b035c96b2da7258bb2bba31812b5f9dda3499f00
Committed by
Linus Torvalds
1 parent
3921ee21e0
Exists in
master
and in
7 other branches
[PATCH] m68k: Add the generic dma API functions
Signed-off-by: Roman Zippel <zippel@linux-m68k.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Showing 6 changed files with 197 additions and 17 deletions Side-by-side Diff
arch/m68k/apollo/Makefile
arch/m68k/kernel/Makefile
... | ... | @@ -9,8 +9,8 @@ |
9 | 9 | endif |
10 | 10 | extra-y += vmlinux.lds |
11 | 11 | |
12 | -obj-y := entry.o process.o traps.o ints.o signal.o ptrace.o \ | |
13 | - sys_m68k.o time.o semaphore.o setup.o m68k_ksyms.o | |
12 | +obj-y := entry.o process.o traps.o ints.o dma.o signal.o ptrace.o \ | |
13 | + sys_m68k.o time.o semaphore.o setup.o m68k_ksyms.o | |
14 | 14 | |
15 | 15 | obj-$(CONFIG_PCI) += bios32.o |
16 | 16 | obj-$(CONFIG_MODULES) += module.o |
arch/m68k/kernel/dma.c
1 | +/* | |
2 | + * This file is subject to the terms and conditions of the GNU General Public | |
3 | + * License. See the file COPYING in the main directory of this archive | |
4 | + * for more details. | |
5 | + */ | |
6 | + | |
7 | +#undef DEBUG | |
8 | + | |
9 | +#include <linux/dma-mapping.h> | |
10 | +#include <linux/device.h> | |
11 | +#include <linux/kernel.h> | |
12 | +#include <linux/vmalloc.h> | |
13 | + | |
14 | +#include <asm/pgalloc.h> | |
15 | +#include <asm/scatterlist.h> | |
16 | + | |
17 | +void *dma_alloc_coherent(struct device *dev, size_t size, | |
18 | + dma_addr_t *handle, int flag) | |
19 | +{ | |
20 | + struct page *page, **map; | |
21 | + pgprot_t pgprot; | |
22 | + void *addr; | |
23 | + int i, order; | |
24 | + | |
25 | + pr_debug("dma_alloc_coherent: %d,%x\n", size, flag); | |
26 | + | |
27 | + size = PAGE_ALIGN(size); | |
28 | + order = get_order(size); | |
29 | + | |
30 | + page = alloc_pages(flag, order); | |
31 | + if (!page) | |
32 | + return NULL; | |
33 | + | |
34 | + *handle = page_to_phys(page); | |
35 | + map = kmalloc(sizeof(struct page *) << order, flag & ~__GFP_DMA); | |
36 | + if (!map) { | |
37 | + __free_pages(page, order); | |
38 | + return NULL; | |
39 | + } | |
40 | + split_page(page, order); | |
41 | + | |
42 | + order = 1 << order; | |
43 | + size >>= PAGE_SHIFT; | |
44 | + map[0] = page; | |
45 | + for (i = 1; i < size; i++) | |
46 | + map[i] = page + i; | |
47 | + for (; i < order; i++) | |
48 | + __free_page(page + i); | |
49 | + pgprot = __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY); | |
50 | + if (CPU_IS_040_OR_060) | |
51 | + pgprot_val(pgprot) |= _PAGE_GLOBAL040 | _PAGE_NOCACHE_S; | |
52 | + else | |
53 | + pgprot_val(pgprot) |= _PAGE_NOCACHE030; | |
54 | + addr = vmap(map, size, flag, pgprot); | |
55 | + kfree(map); | |
56 | + | |
57 | + return addr; | |
58 | +} | |
59 | +EXPORT_SYMBOL(dma_alloc_coherent); | |
60 | + | |
61 | +void dma_free_coherent(struct device *dev, size_t size, | |
62 | + void *addr, dma_addr_t handle) | |
63 | +{ | |
64 | + pr_debug("dma_free_coherent: %p, %x\n", addr, handle); | |
65 | + vfree(addr); | |
66 | +} | |
67 | +EXPORT_SYMBOL(dma_free_coherent); | |
68 | + | |
69 | +inline void dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size, | |
70 | + enum dma_data_direction dir) | |
71 | +{ | |
72 | + switch (dir) { | |
73 | + case DMA_TO_DEVICE: | |
74 | + cache_push(handle, size); | |
75 | + break; | |
76 | + case DMA_FROM_DEVICE: | |
77 | + cache_clear(handle, size); | |
78 | + break; | |
79 | + default: | |
80 | + if (printk_ratelimit()) | |
81 | + printk("dma_sync_single_for_device: unsupported dir %u\n", dir); | |
82 | + break; | |
83 | + } | |
84 | +} | |
85 | +EXPORT_SYMBOL(dma_sync_single_for_device); | |
86 | + | |
87 | +void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, | |
88 | + enum dma_data_direction dir) | |
89 | +{ | |
90 | + int i; | |
91 | + | |
92 | + for (i = 0; i < nents; sg++, i++) | |
93 | + dma_sync_single_for_device(dev, sg->dma_address, sg->length, dir); | |
94 | +} | |
95 | +EXPORT_SYMBOL(dma_sync_sg_for_device); | |
96 | + | |
97 | +dma_addr_t dma_map_single(struct device *dev, void *addr, size_t size, | |
98 | + enum dma_data_direction dir) | |
99 | +{ | |
100 | + dma_addr_t handle = virt_to_bus(addr); | |
101 | + | |
102 | + dma_sync_single_for_device(dev, handle, size, dir); | |
103 | + return handle; | |
104 | +} | |
105 | +EXPORT_SYMBOL(dma_map_single); | |
106 | + | |
107 | +dma_addr_t dma_map_page(struct device *dev, struct page *page, | |
108 | + unsigned long offset, size_t size, | |
109 | + enum dma_data_direction dir) | |
110 | +{ | |
111 | + dma_addr_t handle = page_to_phys(page) + offset; | |
112 | + | |
113 | + dma_sync_single_for_device(dev, handle, size, dir); | |
114 | + return handle; | |
115 | +} | |
116 | +EXPORT_SYMBOL(dma_map_page); | |
117 | + | |
118 | +int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |
119 | + enum dma_data_direction dir) | |
120 | +{ | |
121 | + int i; | |
122 | + | |
123 | + for (i = 0; i < nents; sg++, i++) { | |
124 | + sg->dma_address = page_to_phys(sg->page) + sg->offset; | |
125 | + dma_sync_single_for_device(dev, sg->dma_address, sg->length, dir); | |
126 | + } | |
127 | + return nents; | |
128 | +} | |
129 | +EXPORT_SYMBOL(dma_map_sg); |
drivers/scsi/sun3x_esp.c
... | ... | @@ -332,11 +332,11 @@ |
332 | 332 | struct scatterlist *sg = sp->SCp.buffer; |
333 | 333 | |
334 | 334 | while (sz >= 0) { |
335 | - sg[sz].dvma_address = dvma_map((unsigned long)page_address(sg[sz].page) + | |
335 | + sg[sz].dma_address = dvma_map((unsigned long)page_address(sg[sz].page) + | |
336 | 336 | sg[sz].offset, sg[sz].length); |
337 | 337 | sz--; |
338 | 338 | } |
339 | - sp->SCp.ptr=(char *)((unsigned long)sp->SCp.buffer->dvma_address); | |
339 | + sp->SCp.ptr=(char *)((unsigned long)sp->SCp.buffer->dma_address); | |
340 | 340 | } |
341 | 341 | |
342 | 342 | static void dma_mmu_release_scsi_one (struct NCR_ESP *esp, Scsi_Cmnd *sp) |
343 | 343 | |
... | ... | @@ -350,14 +350,14 @@ |
350 | 350 | struct scatterlist *sg = (struct scatterlist *)sp->buffer; |
351 | 351 | |
352 | 352 | while(sz >= 0) { |
353 | - dvma_unmap((char *)sg[sz].dvma_address); | |
353 | + dvma_unmap((char *)sg[sz].dma_address); | |
354 | 354 | sz--; |
355 | 355 | } |
356 | 356 | } |
357 | 357 | |
358 | 358 | static void dma_advance_sg (Scsi_Cmnd *sp) |
359 | 359 | { |
360 | - sp->SCp.ptr = (char *)((unsigned long)sp->SCp.buffer->dvma_address); | |
360 | + sp->SCp.ptr = (char *)((unsigned long)sp->SCp.buffer->dma_address); | |
361 | 361 | } |
362 | 362 | |
363 | 363 | static int sun3x_esp_release(struct Scsi_Host *instance) |
include/asm-m68k/dma-mapping.h
1 | 1 | #ifndef _M68K_DMA_MAPPING_H |
2 | 2 | #define _M68K_DMA_MAPPING_H |
3 | 3 | |
4 | +struct scatterlist; | |
4 | 5 | |
5 | -#ifdef CONFIG_PCI | |
6 | -#include <asm-generic/dma-mapping.h> | |
7 | -#else | |
8 | -#include <asm-generic/dma-mapping-broken.h> | |
9 | -#endif | |
6 | +static inline int dma_supported(struct device *dev, u64 mask) | |
7 | +{ | |
8 | + return 1; | |
9 | +} | |
10 | + | |
11 | +static inline int dma_set_mask(struct device *dev, u64 mask) | |
12 | +{ | |
13 | + return 0; | |
14 | +} | |
15 | + | |
16 | +extern void *dma_alloc_coherent(struct device *, size_t, | |
17 | + dma_addr_t *, int); | |
18 | +extern void dma_free_coherent(struct device *, size_t, | |
19 | + void *, dma_addr_t); | |
20 | + | |
21 | +extern dma_addr_t dma_map_single(struct device *, void *, size_t, | |
22 | + enum dma_data_direction); | |
23 | +static inline void dma_unmap_single(struct device *dev, dma_addr_t addr, | |
24 | + size_t size, enum dma_data_direction dir) | |
25 | +{ | |
26 | +} | |
27 | + | |
28 | +extern dma_addr_t dma_map_page(struct device *, struct page *, | |
29 | + unsigned long, size_t size, | |
30 | + enum dma_data_direction); | |
31 | +static inline void dma_unmap_page(struct device *dev, dma_addr_t address, | |
32 | + size_t size, enum dma_data_direction dir) | |
33 | +{ | |
34 | +} | |
35 | + | |
36 | +extern int dma_map_sg(struct device *, struct scatterlist *, int, | |
37 | + enum dma_data_direction); | |
38 | +static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg, | |
39 | + int nhwentries, enum dma_data_direction dir) | |
40 | +{ | |
41 | +} | |
42 | + | |
43 | +extern void dma_sync_single_for_device(struct device *, dma_addr_t, size_t, | |
44 | + enum dma_data_direction); | |
45 | +extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int, | |
46 | + enum dma_data_direction); | |
47 | + | |
48 | +static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, | |
49 | + size_t size, enum dma_data_direction dir) | |
50 | +{ | |
51 | +} | |
52 | + | |
53 | +static inline void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | |
54 | + int nents, enum dma_data_direction dir) | |
55 | +{ | |
56 | +} | |
57 | + | |
58 | +static inline int dma_mapping_error(dma_addr_t handle) | |
59 | +{ | |
60 | + return 0; | |
61 | +} | |
10 | 62 | |
11 | 63 | #endif /* _M68K_DMA_MAPPING_H */ |
include/asm-m68k/scatterlist.h
... | ... | @@ -2,19 +2,18 @@ |
2 | 2 | #define _M68K_SCATTERLIST_H |
3 | 3 | |
4 | 4 | struct scatterlist { |
5 | - /* These two are only valid if ADDRESS member of this | |
6 | - * struct is NULL. | |
7 | - */ | |
8 | 5 | struct page *page; |
9 | 6 | unsigned int offset; |
10 | - | |
11 | 7 | unsigned int length; |
12 | 8 | |
13 | - __u32 dvma_address; /* A place to hang host-specific addresses at. */ | |
9 | + __u32 dma_address; /* A place to hang host-specific addresses at. */ | |
14 | 10 | }; |
15 | 11 | |
16 | 12 | /* This is bogus and should go away. */ |
17 | 13 | #define ISA_DMA_THRESHOLD (0x00ffffff) |
14 | + | |
15 | +#define sg_dma_address(sg) ((sg)->dma_address) | |
16 | +#define sg_dma_len(sg) ((sg)->length) | |
18 | 17 | |
19 | 18 | #endif /* !(_M68K_SCATTERLIST_H) */ |