Commit be1f94812c2cc0aaf696d39fe23104763ea52b5b
1 parent
a6cf912c60
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
ARM: OMAP: Fix dmaengine init for multiplatform
Otherwise omap dmaengine will initialized when booted on other SoCs. Fix this by initializing the platform device in arch/arm/*omap*/dma.c instead. Cc: Russell King <linux@arm.linux.org.uk> Cc: Dan Williams <djbw@fb.com> Cc: Vinod Koul <vinod.koul@intel.com> Tested-by: Ezequiel Garcia <ezequiel.garcia@free-electrons.com> Signed-off-by: Tony Lindgren <tony@atomide.com>
Showing 3 changed files with 36 additions and 23 deletions Inline Diff
arch/arm/mach-omap1/dma.c
1 | /* | 1 | /* |
2 | * OMAP1/OMAP7xx - specific DMA driver | 2 | * OMAP1/OMAP7xx - specific DMA driver |
3 | * | 3 | * |
4 | * Copyright (C) 2003 - 2008 Nokia Corporation | 4 | * Copyright (C) 2003 - 2008 Nokia Corporation |
5 | * Author: Juha Yrjölä <juha.yrjola@nokia.com> | 5 | * Author: Juha Yrjölä <juha.yrjola@nokia.com> |
6 | * DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com> | 6 | * DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com> |
7 | * Graphics DMA and LCD DMA graphics tranformations | 7 | * Graphics DMA and LCD DMA graphics tranformations |
8 | * by Imre Deak <imre.deak@nokia.com> | 8 | * by Imre Deak <imre.deak@nokia.com> |
9 | * OMAP2/3 support Copyright (C) 2004-2007 Texas Instruments, Inc. | 9 | * OMAP2/3 support Copyright (C) 2004-2007 Texas Instruments, Inc. |
10 | * Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc. | 10 | * Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc. |
11 | * | 11 | * |
12 | * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/ | 12 | * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/ |
13 | * Converted DMA library into platform driver | 13 | * Converted DMA library into platform driver |
14 | * - G, Manjunath Kondaiah <manjugk@ti.com> | 14 | * - G, Manjunath Kondaiah <manjugk@ti.com> |
15 | * | 15 | * |
16 | * This program is free software; you can redistribute it and/or modify | 16 | * This program is free software; you can redistribute it and/or modify |
17 | * it under the terms of the GNU General Public License version 2 as | 17 | * it under the terms of the GNU General Public License version 2 as |
18 | * published by the Free Software Foundation. | 18 | * published by the Free Software Foundation. |
19 | */ | 19 | */ |
20 | 20 | ||
21 | #include <linux/err.h> | 21 | #include <linux/err.h> |
22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
24 | #include <linux/init.h> | 24 | #include <linux/init.h> |
25 | #include <linux/device.h> | 25 | #include <linux/device.h> |
26 | #include <linux/io.h> | 26 | #include <linux/io.h> |
27 | 27 | #include <linux/dma-mapping.h> | |
28 | #include <linux/omap-dma.h> | 28 | #include <linux/omap-dma.h> |
29 | #include <mach/tc.h> | 29 | #include <mach/tc.h> |
30 | 30 | ||
31 | #include <mach/irqs.h> | 31 | #include <mach/irqs.h> |
32 | 32 | ||
33 | #include "dma.h" | 33 | #include "dma.h" |
34 | 34 | ||
35 | #define OMAP1_DMA_BASE (0xfffed800) | 35 | #define OMAP1_DMA_BASE (0xfffed800) |
36 | #define OMAP1_LOGICAL_DMA_CH_COUNT 17 | 36 | #define OMAP1_LOGICAL_DMA_CH_COUNT 17 |
37 | #define OMAP1_DMA_STRIDE 0x40 | 37 | #define OMAP1_DMA_STRIDE 0x40 |
38 | 38 | ||
39 | static u32 errata; | 39 | static u32 errata; |
40 | static u32 enable_1510_mode; | 40 | static u32 enable_1510_mode; |
41 | static u8 dma_stride; | 41 | static u8 dma_stride; |
42 | static enum omap_reg_offsets dma_common_ch_start, dma_common_ch_end; | 42 | static enum omap_reg_offsets dma_common_ch_start, dma_common_ch_end; |
43 | 43 | ||
44 | static u16 reg_map[] = { | 44 | static u16 reg_map[] = { |
45 | [GCR] = 0x400, | 45 | [GCR] = 0x400, |
46 | [GSCR] = 0x404, | 46 | [GSCR] = 0x404, |
47 | [GRST1] = 0x408, | 47 | [GRST1] = 0x408, |
48 | [HW_ID] = 0x442, | 48 | [HW_ID] = 0x442, |
49 | [PCH2_ID] = 0x444, | 49 | [PCH2_ID] = 0x444, |
50 | [PCH0_ID] = 0x446, | 50 | [PCH0_ID] = 0x446, |
51 | [PCH1_ID] = 0x448, | 51 | [PCH1_ID] = 0x448, |
52 | [PCHG_ID] = 0x44a, | 52 | [PCHG_ID] = 0x44a, |
53 | [PCHD_ID] = 0x44c, | 53 | [PCHD_ID] = 0x44c, |
54 | [CAPS_0] = 0x44e, | 54 | [CAPS_0] = 0x44e, |
55 | [CAPS_1] = 0x452, | 55 | [CAPS_1] = 0x452, |
56 | [CAPS_2] = 0x456, | 56 | [CAPS_2] = 0x456, |
57 | [CAPS_3] = 0x458, | 57 | [CAPS_3] = 0x458, |
58 | [CAPS_4] = 0x45a, | 58 | [CAPS_4] = 0x45a, |
59 | [PCH2_SR] = 0x460, | 59 | [PCH2_SR] = 0x460, |
60 | [PCH0_SR] = 0x480, | 60 | [PCH0_SR] = 0x480, |
61 | [PCH1_SR] = 0x482, | 61 | [PCH1_SR] = 0x482, |
62 | [PCHD_SR] = 0x4c0, | 62 | [PCHD_SR] = 0x4c0, |
63 | 63 | ||
64 | /* Common Registers */ | 64 | /* Common Registers */ |
65 | [CSDP] = 0x00, | 65 | [CSDP] = 0x00, |
66 | [CCR] = 0x02, | 66 | [CCR] = 0x02, |
67 | [CICR] = 0x04, | 67 | [CICR] = 0x04, |
68 | [CSR] = 0x06, | 68 | [CSR] = 0x06, |
69 | [CEN] = 0x10, | 69 | [CEN] = 0x10, |
70 | [CFN] = 0x12, | 70 | [CFN] = 0x12, |
71 | [CSFI] = 0x14, | 71 | [CSFI] = 0x14, |
72 | [CSEI] = 0x16, | 72 | [CSEI] = 0x16, |
73 | [CPC] = 0x18, /* 15xx only */ | 73 | [CPC] = 0x18, /* 15xx only */ |
74 | [CSAC] = 0x18, | 74 | [CSAC] = 0x18, |
75 | [CDAC] = 0x1a, | 75 | [CDAC] = 0x1a, |
76 | [CDEI] = 0x1c, | 76 | [CDEI] = 0x1c, |
77 | [CDFI] = 0x1e, | 77 | [CDFI] = 0x1e, |
78 | [CLNK_CTRL] = 0x28, | 78 | [CLNK_CTRL] = 0x28, |
79 | 79 | ||
80 | /* Channel specific register offsets */ | 80 | /* Channel specific register offsets */ |
81 | [CSSA] = 0x08, | 81 | [CSSA] = 0x08, |
82 | [CDSA] = 0x0c, | 82 | [CDSA] = 0x0c, |
83 | [COLOR] = 0x20, | 83 | [COLOR] = 0x20, |
84 | [CCR2] = 0x24, | 84 | [CCR2] = 0x24, |
85 | [LCH_CTRL] = 0x2a, | 85 | [LCH_CTRL] = 0x2a, |
86 | }; | 86 | }; |
87 | 87 | ||
88 | static struct resource res[] __initdata = { | 88 | static struct resource res[] __initdata = { |
89 | [0] = { | 89 | [0] = { |
90 | .start = OMAP1_DMA_BASE, | 90 | .start = OMAP1_DMA_BASE, |
91 | .end = OMAP1_DMA_BASE + SZ_2K - 1, | 91 | .end = OMAP1_DMA_BASE + SZ_2K - 1, |
92 | .flags = IORESOURCE_MEM, | 92 | .flags = IORESOURCE_MEM, |
93 | }, | 93 | }, |
94 | [1] = { | 94 | [1] = { |
95 | .name = "0", | 95 | .name = "0", |
96 | .start = INT_DMA_CH0_6, | 96 | .start = INT_DMA_CH0_6, |
97 | .flags = IORESOURCE_IRQ, | 97 | .flags = IORESOURCE_IRQ, |
98 | }, | 98 | }, |
99 | [2] = { | 99 | [2] = { |
100 | .name = "1", | 100 | .name = "1", |
101 | .start = INT_DMA_CH1_7, | 101 | .start = INT_DMA_CH1_7, |
102 | .flags = IORESOURCE_IRQ, | 102 | .flags = IORESOURCE_IRQ, |
103 | }, | 103 | }, |
104 | [3] = { | 104 | [3] = { |
105 | .name = "2", | 105 | .name = "2", |
106 | .start = INT_DMA_CH2_8, | 106 | .start = INT_DMA_CH2_8, |
107 | .flags = IORESOURCE_IRQ, | 107 | .flags = IORESOURCE_IRQ, |
108 | }, | 108 | }, |
109 | [4] = { | 109 | [4] = { |
110 | .name = "3", | 110 | .name = "3", |
111 | .start = INT_DMA_CH3, | 111 | .start = INT_DMA_CH3, |
112 | .flags = IORESOURCE_IRQ, | 112 | .flags = IORESOURCE_IRQ, |
113 | }, | 113 | }, |
114 | [5] = { | 114 | [5] = { |
115 | .name = "4", | 115 | .name = "4", |
116 | .start = INT_DMA_CH4, | 116 | .start = INT_DMA_CH4, |
117 | .flags = IORESOURCE_IRQ, | 117 | .flags = IORESOURCE_IRQ, |
118 | }, | 118 | }, |
119 | [6] = { | 119 | [6] = { |
120 | .name = "5", | 120 | .name = "5", |
121 | .start = INT_DMA_CH5, | 121 | .start = INT_DMA_CH5, |
122 | .flags = IORESOURCE_IRQ, | 122 | .flags = IORESOURCE_IRQ, |
123 | }, | 123 | }, |
124 | /* Handled in lcd_dma.c */ | 124 | /* Handled in lcd_dma.c */ |
125 | [7] = { | 125 | [7] = { |
126 | .name = "6", | 126 | .name = "6", |
127 | .start = INT_1610_DMA_CH6, | 127 | .start = INT_1610_DMA_CH6, |
128 | .flags = IORESOURCE_IRQ, | 128 | .flags = IORESOURCE_IRQ, |
129 | }, | 129 | }, |
130 | /* irq's for omap16xx and omap7xx */ | 130 | /* irq's for omap16xx and omap7xx */ |
131 | [8] = { | 131 | [8] = { |
132 | .name = "7", | 132 | .name = "7", |
133 | .start = INT_1610_DMA_CH7, | 133 | .start = INT_1610_DMA_CH7, |
134 | .flags = IORESOURCE_IRQ, | 134 | .flags = IORESOURCE_IRQ, |
135 | }, | 135 | }, |
136 | [9] = { | 136 | [9] = { |
137 | .name = "8", | 137 | .name = "8", |
138 | .start = INT_1610_DMA_CH8, | 138 | .start = INT_1610_DMA_CH8, |
139 | .flags = IORESOURCE_IRQ, | 139 | .flags = IORESOURCE_IRQ, |
140 | }, | 140 | }, |
141 | [10] = { | 141 | [10] = { |
142 | .name = "9", | 142 | .name = "9", |
143 | .start = INT_1610_DMA_CH9, | 143 | .start = INT_1610_DMA_CH9, |
144 | .flags = IORESOURCE_IRQ, | 144 | .flags = IORESOURCE_IRQ, |
145 | }, | 145 | }, |
146 | [11] = { | 146 | [11] = { |
147 | .name = "10", | 147 | .name = "10", |
148 | .start = INT_1610_DMA_CH10, | 148 | .start = INT_1610_DMA_CH10, |
149 | .flags = IORESOURCE_IRQ, | 149 | .flags = IORESOURCE_IRQ, |
150 | }, | 150 | }, |
151 | [12] = { | 151 | [12] = { |
152 | .name = "11", | 152 | .name = "11", |
153 | .start = INT_1610_DMA_CH11, | 153 | .start = INT_1610_DMA_CH11, |
154 | .flags = IORESOURCE_IRQ, | 154 | .flags = IORESOURCE_IRQ, |
155 | }, | 155 | }, |
156 | [13] = { | 156 | [13] = { |
157 | .name = "12", | 157 | .name = "12", |
158 | .start = INT_1610_DMA_CH12, | 158 | .start = INT_1610_DMA_CH12, |
159 | .flags = IORESOURCE_IRQ, | 159 | .flags = IORESOURCE_IRQ, |
160 | }, | 160 | }, |
161 | [14] = { | 161 | [14] = { |
162 | .name = "13", | 162 | .name = "13", |
163 | .start = INT_1610_DMA_CH13, | 163 | .start = INT_1610_DMA_CH13, |
164 | .flags = IORESOURCE_IRQ, | 164 | .flags = IORESOURCE_IRQ, |
165 | }, | 165 | }, |
166 | [15] = { | 166 | [15] = { |
167 | .name = "14", | 167 | .name = "14", |
168 | .start = INT_1610_DMA_CH14, | 168 | .start = INT_1610_DMA_CH14, |
169 | .flags = IORESOURCE_IRQ, | 169 | .flags = IORESOURCE_IRQ, |
170 | }, | 170 | }, |
171 | [16] = { | 171 | [16] = { |
172 | .name = "15", | 172 | .name = "15", |
173 | .start = INT_1610_DMA_CH15, | 173 | .start = INT_1610_DMA_CH15, |
174 | .flags = IORESOURCE_IRQ, | 174 | .flags = IORESOURCE_IRQ, |
175 | }, | 175 | }, |
176 | [17] = { | 176 | [17] = { |
177 | .name = "16", | 177 | .name = "16", |
178 | .start = INT_DMA_LCD, | 178 | .start = INT_DMA_LCD, |
179 | .flags = IORESOURCE_IRQ, | 179 | .flags = IORESOURCE_IRQ, |
180 | }, | 180 | }, |
181 | }; | 181 | }; |
182 | 182 | ||
183 | static void __iomem *dma_base; | 183 | static void __iomem *dma_base; |
184 | static inline void dma_write(u32 val, int reg, int lch) | 184 | static inline void dma_write(u32 val, int reg, int lch) |
185 | { | 185 | { |
186 | u8 stride; | 186 | u8 stride; |
187 | u32 offset; | 187 | u32 offset; |
188 | 188 | ||
189 | stride = (reg >= dma_common_ch_start) ? dma_stride : 0; | 189 | stride = (reg >= dma_common_ch_start) ? dma_stride : 0; |
190 | offset = reg_map[reg] + (stride * lch); | 190 | offset = reg_map[reg] + (stride * lch); |
191 | 191 | ||
192 | __raw_writew(val, dma_base + offset); | 192 | __raw_writew(val, dma_base + offset); |
193 | if ((reg > CLNK_CTRL && reg < CCEN) || | 193 | if ((reg > CLNK_CTRL && reg < CCEN) || |
194 | (reg > PCHD_ID && reg < CAPS_2)) { | 194 | (reg > PCHD_ID && reg < CAPS_2)) { |
195 | u32 offset2 = reg_map[reg] + 2 + (stride * lch); | 195 | u32 offset2 = reg_map[reg] + 2 + (stride * lch); |
196 | __raw_writew(val >> 16, dma_base + offset2); | 196 | __raw_writew(val >> 16, dma_base + offset2); |
197 | } | 197 | } |
198 | } | 198 | } |
199 | 199 | ||
200 | static inline u32 dma_read(int reg, int lch) | 200 | static inline u32 dma_read(int reg, int lch) |
201 | { | 201 | { |
202 | u8 stride; | 202 | u8 stride; |
203 | u32 offset, val; | 203 | u32 offset, val; |
204 | 204 | ||
205 | stride = (reg >= dma_common_ch_start) ? dma_stride : 0; | 205 | stride = (reg >= dma_common_ch_start) ? dma_stride : 0; |
206 | offset = reg_map[reg] + (stride * lch); | 206 | offset = reg_map[reg] + (stride * lch); |
207 | 207 | ||
208 | val = __raw_readw(dma_base + offset); | 208 | val = __raw_readw(dma_base + offset); |
209 | if ((reg > CLNK_CTRL && reg < CCEN) || | 209 | if ((reg > CLNK_CTRL && reg < CCEN) || |
210 | (reg > PCHD_ID && reg < CAPS_2)) { | 210 | (reg > PCHD_ID && reg < CAPS_2)) { |
211 | u16 upper; | 211 | u16 upper; |
212 | u32 offset2 = reg_map[reg] + 2 + (stride * lch); | 212 | u32 offset2 = reg_map[reg] + 2 + (stride * lch); |
213 | upper = __raw_readw(dma_base + offset2); | 213 | upper = __raw_readw(dma_base + offset2); |
214 | val |= (upper << 16); | 214 | val |= (upper << 16); |
215 | } | 215 | } |
216 | return val; | 216 | return val; |
217 | } | 217 | } |
218 | 218 | ||
219 | static void omap1_clear_lch_regs(int lch) | 219 | static void omap1_clear_lch_regs(int lch) |
220 | { | 220 | { |
221 | int i = dma_common_ch_start; | 221 | int i = dma_common_ch_start; |
222 | 222 | ||
223 | for (; i <= dma_common_ch_end; i += 1) | 223 | for (; i <= dma_common_ch_end; i += 1) |
224 | dma_write(0, i, lch); | 224 | dma_write(0, i, lch); |
225 | } | 225 | } |
226 | 226 | ||
227 | static void omap1_clear_dma(int lch) | 227 | static void omap1_clear_dma(int lch) |
228 | { | 228 | { |
229 | u32 l; | 229 | u32 l; |
230 | 230 | ||
231 | l = dma_read(CCR, lch); | 231 | l = dma_read(CCR, lch); |
232 | l &= ~OMAP_DMA_CCR_EN; | 232 | l &= ~OMAP_DMA_CCR_EN; |
233 | dma_write(l, CCR, lch); | 233 | dma_write(l, CCR, lch); |
234 | 234 | ||
235 | /* Clear pending interrupts */ | 235 | /* Clear pending interrupts */ |
236 | l = dma_read(CSR, lch); | 236 | l = dma_read(CSR, lch); |
237 | } | 237 | } |
238 | 238 | ||
239 | static void omap1_show_dma_caps(void) | 239 | static void omap1_show_dma_caps(void) |
240 | { | 240 | { |
241 | if (enable_1510_mode) { | 241 | if (enable_1510_mode) { |
242 | printk(KERN_INFO "DMA support for OMAP15xx initialized\n"); | 242 | printk(KERN_INFO "DMA support for OMAP15xx initialized\n"); |
243 | } else { | 243 | } else { |
244 | u16 w; | 244 | u16 w; |
245 | printk(KERN_INFO "OMAP DMA hardware version %d\n", | 245 | printk(KERN_INFO "OMAP DMA hardware version %d\n", |
246 | dma_read(HW_ID, 0)); | 246 | dma_read(HW_ID, 0)); |
247 | printk(KERN_INFO "DMA capabilities: %08x:%08x:%04x:%04x:%04x\n", | 247 | printk(KERN_INFO "DMA capabilities: %08x:%08x:%04x:%04x:%04x\n", |
248 | dma_read(CAPS_0, 0), dma_read(CAPS_1, 0), | 248 | dma_read(CAPS_0, 0), dma_read(CAPS_1, 0), |
249 | dma_read(CAPS_2, 0), dma_read(CAPS_3, 0), | 249 | dma_read(CAPS_2, 0), dma_read(CAPS_3, 0), |
250 | dma_read(CAPS_4, 0)); | 250 | dma_read(CAPS_4, 0)); |
251 | 251 | ||
252 | /* Disable OMAP 3.0/3.1 compatibility mode. */ | 252 | /* Disable OMAP 3.0/3.1 compatibility mode. */ |
253 | w = dma_read(GSCR, 0); | 253 | w = dma_read(GSCR, 0); |
254 | w |= 1 << 3; | 254 | w |= 1 << 3; |
255 | dma_write(w, GSCR, 0); | 255 | dma_write(w, GSCR, 0); |
256 | } | 256 | } |
257 | return; | 257 | return; |
258 | } | 258 | } |
259 | 259 | ||
260 | static u32 configure_dma_errata(void) | 260 | static u32 configure_dma_errata(void) |
261 | { | 261 | { |
262 | 262 | ||
263 | /* | 263 | /* |
264 | * Erratum 3.2/3.3: sometimes 0 is returned if CSAC/CDAC is | 264 | * Erratum 3.2/3.3: sometimes 0 is returned if CSAC/CDAC is |
265 | * read before the DMA controller finished disabling the channel. | 265 | * read before the DMA controller finished disabling the channel. |
266 | */ | 266 | */ |
267 | if (!cpu_is_omap15xx()) | 267 | if (!cpu_is_omap15xx()) |
268 | SET_DMA_ERRATA(DMA_ERRATA_3_3); | 268 | SET_DMA_ERRATA(DMA_ERRATA_3_3); |
269 | 269 | ||
270 | return errata; | 270 | return errata; |
271 | } | 271 | } |
272 | 272 | ||
273 | static const struct platform_device_info omap_dma_dev_info = { | ||
274 | .name = "omap-dma-engine", | ||
275 | .id = -1, | ||
276 | .dma_mask = DMA_BIT_MASK(32), | ||
277 | }; | ||
278 | |||
273 | static int __init omap1_system_dma_init(void) | 279 | static int __init omap1_system_dma_init(void) |
274 | { | 280 | { |
275 | struct omap_system_dma_plat_info *p; | 281 | struct omap_system_dma_plat_info *p; |
276 | struct omap_dma_dev_attr *d; | 282 | struct omap_dma_dev_attr *d; |
277 | struct platform_device *pdev; | 283 | struct platform_device *pdev, *dma_pdev; |
278 | int ret; | 284 | int ret; |
279 | 285 | ||
280 | pdev = platform_device_alloc("omap_dma_system", 0); | 286 | pdev = platform_device_alloc("omap_dma_system", 0); |
281 | if (!pdev) { | 287 | if (!pdev) { |
282 | pr_err("%s: Unable to device alloc for dma\n", | 288 | pr_err("%s: Unable to device alloc for dma\n", |
283 | __func__); | 289 | __func__); |
284 | return -ENOMEM; | 290 | return -ENOMEM; |
285 | } | 291 | } |
286 | 292 | ||
287 | dma_base = ioremap(res[0].start, resource_size(&res[0])); | 293 | dma_base = ioremap(res[0].start, resource_size(&res[0])); |
288 | if (!dma_base) { | 294 | if (!dma_base) { |
289 | pr_err("%s: Unable to ioremap\n", __func__); | 295 | pr_err("%s: Unable to ioremap\n", __func__); |
290 | ret = -ENODEV; | 296 | ret = -ENODEV; |
291 | goto exit_device_put; | 297 | goto exit_device_put; |
292 | } | 298 | } |
293 | 299 | ||
294 | ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res)); | 300 | ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res)); |
295 | if (ret) { | 301 | if (ret) { |
296 | dev_err(&pdev->dev, "%s: Unable to add resources for %s%d\n", | 302 | dev_err(&pdev->dev, "%s: Unable to add resources for %s%d\n", |
297 | __func__, pdev->name, pdev->id); | 303 | __func__, pdev->name, pdev->id); |
298 | goto exit_device_put; | 304 | goto exit_device_put; |
299 | } | 305 | } |
300 | 306 | ||
301 | p = kzalloc(sizeof(struct omap_system_dma_plat_info), GFP_KERNEL); | 307 | p = kzalloc(sizeof(struct omap_system_dma_plat_info), GFP_KERNEL); |
302 | if (!p) { | 308 | if (!p) { |
303 | dev_err(&pdev->dev, "%s: Unable to allocate 'p' for %s\n", | 309 | dev_err(&pdev->dev, "%s: Unable to allocate 'p' for %s\n", |
304 | __func__, pdev->name); | 310 | __func__, pdev->name); |
305 | ret = -ENOMEM; | 311 | ret = -ENOMEM; |
306 | goto exit_device_del; | 312 | goto exit_device_del; |
307 | } | 313 | } |
308 | 314 | ||
309 | d = kzalloc(sizeof(struct omap_dma_dev_attr), GFP_KERNEL); | 315 | d = kzalloc(sizeof(struct omap_dma_dev_attr), GFP_KERNEL); |
310 | if (!d) { | 316 | if (!d) { |
311 | dev_err(&pdev->dev, "%s: Unable to allocate 'd' for %s\n", | 317 | dev_err(&pdev->dev, "%s: Unable to allocate 'd' for %s\n", |
312 | __func__, pdev->name); | 318 | __func__, pdev->name); |
313 | ret = -ENOMEM; | 319 | ret = -ENOMEM; |
314 | goto exit_release_p; | 320 | goto exit_release_p; |
315 | } | 321 | } |
316 | 322 | ||
317 | d->lch_count = OMAP1_LOGICAL_DMA_CH_COUNT; | 323 | d->lch_count = OMAP1_LOGICAL_DMA_CH_COUNT; |
318 | 324 | ||
319 | /* Valid attributes for omap1 plus processors */ | 325 | /* Valid attributes for omap1 plus processors */ |
320 | if (cpu_is_omap15xx()) | 326 | if (cpu_is_omap15xx()) |
321 | d->dev_caps = ENABLE_1510_MODE; | 327 | d->dev_caps = ENABLE_1510_MODE; |
322 | enable_1510_mode = d->dev_caps & ENABLE_1510_MODE; | 328 | enable_1510_mode = d->dev_caps & ENABLE_1510_MODE; |
323 | 329 | ||
324 | if (cpu_is_omap16xx()) | 330 | if (cpu_is_omap16xx()) |
325 | d->dev_caps = ENABLE_16XX_MODE; | 331 | d->dev_caps = ENABLE_16XX_MODE; |
326 | 332 | ||
327 | d->dev_caps |= SRC_PORT; | 333 | d->dev_caps |= SRC_PORT; |
328 | d->dev_caps |= DST_PORT; | 334 | d->dev_caps |= DST_PORT; |
329 | d->dev_caps |= SRC_INDEX; | 335 | d->dev_caps |= SRC_INDEX; |
330 | d->dev_caps |= DST_INDEX; | 336 | d->dev_caps |= DST_INDEX; |
331 | d->dev_caps |= IS_BURST_ONLY4; | 337 | d->dev_caps |= IS_BURST_ONLY4; |
332 | d->dev_caps |= CLEAR_CSR_ON_READ; | 338 | d->dev_caps |= CLEAR_CSR_ON_READ; |
333 | d->dev_caps |= IS_WORD_16; | 339 | d->dev_caps |= IS_WORD_16; |
334 | 340 | ||
335 | 341 | ||
336 | d->chan = kzalloc(sizeof(struct omap_dma_lch) * | 342 | d->chan = kzalloc(sizeof(struct omap_dma_lch) * |
337 | (d->lch_count), GFP_KERNEL); | 343 | (d->lch_count), GFP_KERNEL); |
338 | if (!d->chan) { | 344 | if (!d->chan) { |
339 | dev_err(&pdev->dev, | 345 | dev_err(&pdev->dev, |
340 | "%s: Memory allocation failed for d->chan!\n", | 346 | "%s: Memory allocation failed for d->chan!\n", |
341 | __func__); | 347 | __func__); |
342 | goto exit_release_d; | 348 | goto exit_release_d; |
343 | } | 349 | } |
344 | 350 | ||
345 | if (cpu_is_omap15xx()) | 351 | if (cpu_is_omap15xx()) |
346 | d->chan_count = 9; | 352 | d->chan_count = 9; |
347 | else if (cpu_is_omap16xx() || cpu_is_omap7xx()) { | 353 | else if (cpu_is_omap16xx() || cpu_is_omap7xx()) { |
348 | if (!(d->dev_caps & ENABLE_1510_MODE)) | 354 | if (!(d->dev_caps & ENABLE_1510_MODE)) |
349 | d->chan_count = 16; | 355 | d->chan_count = 16; |
350 | else | 356 | else |
351 | d->chan_count = 9; | 357 | d->chan_count = 9; |
352 | } | 358 | } |
353 | 359 | ||
354 | p->dma_attr = d; | 360 | p->dma_attr = d; |
355 | 361 | ||
356 | p->show_dma_caps = omap1_show_dma_caps; | 362 | p->show_dma_caps = omap1_show_dma_caps; |
357 | p->clear_lch_regs = omap1_clear_lch_regs; | 363 | p->clear_lch_regs = omap1_clear_lch_regs; |
358 | p->clear_dma = omap1_clear_dma; | 364 | p->clear_dma = omap1_clear_dma; |
359 | p->dma_write = dma_write; | 365 | p->dma_write = dma_write; |
360 | p->dma_read = dma_read; | 366 | p->dma_read = dma_read; |
361 | p->disable_irq_lch = NULL; | 367 | p->disable_irq_lch = NULL; |
362 | 368 | ||
363 | p->errata = configure_dma_errata(); | 369 | p->errata = configure_dma_errata(); |
364 | 370 | ||
365 | ret = platform_device_add_data(pdev, p, sizeof(*p)); | 371 | ret = platform_device_add_data(pdev, p, sizeof(*p)); |
366 | if (ret) { | 372 | if (ret) { |
367 | dev_err(&pdev->dev, "%s: Unable to add resources for %s%d\n", | 373 | dev_err(&pdev->dev, "%s: Unable to add resources for %s%d\n", |
368 | __func__, pdev->name, pdev->id); | 374 | __func__, pdev->name, pdev->id); |
369 | goto exit_release_chan; | 375 | goto exit_release_chan; |
370 | } | 376 | } |
371 | 377 | ||
372 | ret = platform_device_add(pdev); | 378 | ret = platform_device_add(pdev); |
373 | if (ret) { | 379 | if (ret) { |
374 | dev_err(&pdev->dev, "%s: Unable to add resources for %s%d\n", | 380 | dev_err(&pdev->dev, "%s: Unable to add resources for %s%d\n", |
375 | __func__, pdev->name, pdev->id); | 381 | __func__, pdev->name, pdev->id); |
376 | goto exit_release_chan; | 382 | goto exit_release_chan; |
377 | } | 383 | } |
378 | 384 | ||
379 | dma_stride = OMAP1_DMA_STRIDE; | 385 | dma_stride = OMAP1_DMA_STRIDE; |
380 | dma_common_ch_start = CPC; | 386 | dma_common_ch_start = CPC; |
381 | dma_common_ch_end = COLOR; | 387 | dma_common_ch_end = COLOR; |
382 | 388 | ||
389 | dma_pdev = platform_device_register_full(&omap_dma_dev_info); | ||
390 | if (IS_ERR(dma_pdev)) { | ||
391 | ret = PTR_ERR(dma_pdev); | ||
392 | goto exit_release_pdev; | ||
393 | } | ||
394 | |||
383 | return ret; | 395 | return ret; |
384 | 396 | ||
397 | exit_release_pdev: | ||
398 | platform_device_del(pdev); | ||
385 | exit_release_chan: | 399 | exit_release_chan: |
386 | kfree(d->chan); | 400 | kfree(d->chan); |
387 | exit_release_d: | 401 | exit_release_d: |
388 | kfree(d); | 402 | kfree(d); |
389 | exit_release_p: | 403 | exit_release_p: |
390 | kfree(p); | 404 | kfree(p); |
391 | exit_device_del: | 405 | exit_device_del: |
392 | platform_device_del(pdev); | 406 | platform_device_del(pdev); |
393 | exit_device_put: | 407 | exit_device_put: |
394 | platform_device_put(pdev); | 408 | platform_device_put(pdev); |
395 | 409 | ||
396 | return ret; | 410 | return ret; |
397 | } | 411 | } |
398 | arch_initcall(omap1_system_dma_init); | 412 | arch_initcall(omap1_system_dma_init); |
399 | 413 |
arch/arm/mach-omap2/dma.c
1 | /* | 1 | /* |
2 | * OMAP2+ DMA driver | 2 | * OMAP2+ DMA driver |
3 | * | 3 | * |
4 | * Copyright (C) 2003 - 2008 Nokia Corporation | 4 | * Copyright (C) 2003 - 2008 Nokia Corporation |
5 | * Author: Juha Yrjölä <juha.yrjola@nokia.com> | 5 | * Author: Juha Yrjölä <juha.yrjola@nokia.com> |
6 | * DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com> | 6 | * DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com> |
7 | * Graphics DMA and LCD DMA graphics tranformations | 7 | * Graphics DMA and LCD DMA graphics tranformations |
8 | * by Imre Deak <imre.deak@nokia.com> | 8 | * by Imre Deak <imre.deak@nokia.com> |
9 | * OMAP2/3 support Copyright (C) 2004-2007 Texas Instruments, Inc. | 9 | * OMAP2/3 support Copyright (C) 2004-2007 Texas Instruments, Inc. |
10 | * Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc. | 10 | * Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc. |
11 | * | 11 | * |
12 | * Copyright (C) 2009 Texas Instruments | 12 | * Copyright (C) 2009 Texas Instruments |
13 | * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com> | 13 | * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com> |
14 | * | 14 | * |
15 | * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/ | 15 | * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/ |
16 | * Converted DMA library into platform driver | 16 | * Converted DMA library into platform driver |
17 | * - G, Manjunath Kondaiah <manjugk@ti.com> | 17 | * - G, Manjunath Kondaiah <manjugk@ti.com> |
18 | * | 18 | * |
19 | * This program is free software; you can redistribute it and/or modify | 19 | * This program is free software; you can redistribute it and/or modify |
20 | * it under the terms of the GNU General Public License version 2 as | 20 | * it under the terms of the GNU General Public License version 2 as |
21 | * published by the Free Software Foundation. | 21 | * published by the Free Software Foundation. |
22 | */ | 22 | */ |
23 | 23 | ||
24 | #include <linux/err.h> | 24 | #include <linux/err.h> |
25 | #include <linux/io.h> | 25 | #include <linux/io.h> |
26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
28 | #include <linux/init.h> | 28 | #include <linux/init.h> |
29 | #include <linux/device.h> | 29 | #include <linux/device.h> |
30 | 30 | #include <linux/dma-mapping.h> | |
31 | #include <linux/omap-dma.h> | 31 | #include <linux/omap-dma.h> |
32 | 32 | ||
33 | #include "soc.h" | 33 | #include "soc.h" |
34 | #include "omap_hwmod.h" | 34 | #include "omap_hwmod.h" |
35 | #include "omap_device.h" | 35 | #include "omap_device.h" |
36 | 36 | ||
37 | #define OMAP2_DMA_STRIDE 0x60 | 37 | #define OMAP2_DMA_STRIDE 0x60 |
38 | 38 | ||
39 | static u32 errata; | 39 | static u32 errata; |
40 | static u8 dma_stride; | 40 | static u8 dma_stride; |
41 | 41 | ||
42 | static struct omap_dma_dev_attr *d; | 42 | static struct omap_dma_dev_attr *d; |
43 | 43 | ||
44 | static enum omap_reg_offsets dma_common_ch_start, dma_common_ch_end; | 44 | static enum omap_reg_offsets dma_common_ch_start, dma_common_ch_end; |
45 | 45 | ||
46 | static u16 reg_map[] = { | 46 | static u16 reg_map[] = { |
47 | [REVISION] = 0x00, | 47 | [REVISION] = 0x00, |
48 | [GCR] = 0x78, | 48 | [GCR] = 0x78, |
49 | [IRQSTATUS_L0] = 0x08, | 49 | [IRQSTATUS_L0] = 0x08, |
50 | [IRQSTATUS_L1] = 0x0c, | 50 | [IRQSTATUS_L1] = 0x0c, |
51 | [IRQSTATUS_L2] = 0x10, | 51 | [IRQSTATUS_L2] = 0x10, |
52 | [IRQSTATUS_L3] = 0x14, | 52 | [IRQSTATUS_L3] = 0x14, |
53 | [IRQENABLE_L0] = 0x18, | 53 | [IRQENABLE_L0] = 0x18, |
54 | [IRQENABLE_L1] = 0x1c, | 54 | [IRQENABLE_L1] = 0x1c, |
55 | [IRQENABLE_L2] = 0x20, | 55 | [IRQENABLE_L2] = 0x20, |
56 | [IRQENABLE_L3] = 0x24, | 56 | [IRQENABLE_L3] = 0x24, |
57 | [SYSSTATUS] = 0x28, | 57 | [SYSSTATUS] = 0x28, |
58 | [OCP_SYSCONFIG] = 0x2c, | 58 | [OCP_SYSCONFIG] = 0x2c, |
59 | [CAPS_0] = 0x64, | 59 | [CAPS_0] = 0x64, |
60 | [CAPS_2] = 0x6c, | 60 | [CAPS_2] = 0x6c, |
61 | [CAPS_3] = 0x70, | 61 | [CAPS_3] = 0x70, |
62 | [CAPS_4] = 0x74, | 62 | [CAPS_4] = 0x74, |
63 | 63 | ||
64 | /* Common register offsets */ | 64 | /* Common register offsets */ |
65 | [CCR] = 0x80, | 65 | [CCR] = 0x80, |
66 | [CLNK_CTRL] = 0x84, | 66 | [CLNK_CTRL] = 0x84, |
67 | [CICR] = 0x88, | 67 | [CICR] = 0x88, |
68 | [CSR] = 0x8c, | 68 | [CSR] = 0x8c, |
69 | [CSDP] = 0x90, | 69 | [CSDP] = 0x90, |
70 | [CEN] = 0x94, | 70 | [CEN] = 0x94, |
71 | [CFN] = 0x98, | 71 | [CFN] = 0x98, |
72 | [CSEI] = 0xa4, | 72 | [CSEI] = 0xa4, |
73 | [CSFI] = 0xa8, | 73 | [CSFI] = 0xa8, |
74 | [CDEI] = 0xac, | 74 | [CDEI] = 0xac, |
75 | [CDFI] = 0xb0, | 75 | [CDFI] = 0xb0, |
76 | [CSAC] = 0xb4, | 76 | [CSAC] = 0xb4, |
77 | [CDAC] = 0xb8, | 77 | [CDAC] = 0xb8, |
78 | 78 | ||
79 | /* Channel specific register offsets */ | 79 | /* Channel specific register offsets */ |
80 | [CSSA] = 0x9c, | 80 | [CSSA] = 0x9c, |
81 | [CDSA] = 0xa0, | 81 | [CDSA] = 0xa0, |
82 | [CCEN] = 0xbc, | 82 | [CCEN] = 0xbc, |
83 | [CCFN] = 0xc0, | 83 | [CCFN] = 0xc0, |
84 | [COLOR] = 0xc4, | 84 | [COLOR] = 0xc4, |
85 | 85 | ||
86 | /* OMAP4 specific registers */ | 86 | /* OMAP4 specific registers */ |
87 | [CDP] = 0xd0, | 87 | [CDP] = 0xd0, |
88 | [CNDP] = 0xd4, | 88 | [CNDP] = 0xd4, |
89 | [CCDN] = 0xd8, | 89 | [CCDN] = 0xd8, |
90 | }; | 90 | }; |
91 | 91 | ||
92 | static void __iomem *dma_base; | 92 | static void __iomem *dma_base; |
93 | static inline void dma_write(u32 val, int reg, int lch) | 93 | static inline void dma_write(u32 val, int reg, int lch) |
94 | { | 94 | { |
95 | u8 stride; | 95 | u8 stride; |
96 | u32 offset; | 96 | u32 offset; |
97 | 97 | ||
98 | stride = (reg >= dma_common_ch_start) ? dma_stride : 0; | 98 | stride = (reg >= dma_common_ch_start) ? dma_stride : 0; |
99 | offset = reg_map[reg] + (stride * lch); | 99 | offset = reg_map[reg] + (stride * lch); |
100 | __raw_writel(val, dma_base + offset); | 100 | __raw_writel(val, dma_base + offset); |
101 | } | 101 | } |
102 | 102 | ||
103 | static inline u32 dma_read(int reg, int lch) | 103 | static inline u32 dma_read(int reg, int lch) |
104 | { | 104 | { |
105 | u8 stride; | 105 | u8 stride; |
106 | u32 offset, val; | 106 | u32 offset, val; |
107 | 107 | ||
108 | stride = (reg >= dma_common_ch_start) ? dma_stride : 0; | 108 | stride = (reg >= dma_common_ch_start) ? dma_stride : 0; |
109 | offset = reg_map[reg] + (stride * lch); | 109 | offset = reg_map[reg] + (stride * lch); |
110 | val = __raw_readl(dma_base + offset); | 110 | val = __raw_readl(dma_base + offset); |
111 | return val; | 111 | return val; |
112 | } | 112 | } |
113 | 113 | ||
114 | static inline void omap2_disable_irq_lch(int lch) | 114 | static inline void omap2_disable_irq_lch(int lch) |
115 | { | 115 | { |
116 | u32 val; | 116 | u32 val; |
117 | 117 | ||
118 | val = dma_read(IRQENABLE_L0, lch); | 118 | val = dma_read(IRQENABLE_L0, lch); |
119 | val &= ~(1 << lch); | 119 | val &= ~(1 << lch); |
120 | dma_write(val, IRQENABLE_L0, lch); | 120 | dma_write(val, IRQENABLE_L0, lch); |
121 | } | 121 | } |
122 | 122 | ||
123 | static void omap2_clear_dma(int lch) | 123 | static void omap2_clear_dma(int lch) |
124 | { | 124 | { |
125 | int i = dma_common_ch_start; | 125 | int i = dma_common_ch_start; |
126 | 126 | ||
127 | for (; i <= dma_common_ch_end; i += 1) | 127 | for (; i <= dma_common_ch_end; i += 1) |
128 | dma_write(0, i, lch); | 128 | dma_write(0, i, lch); |
129 | } | 129 | } |
130 | 130 | ||
131 | static void omap2_show_dma_caps(void) | 131 | static void omap2_show_dma_caps(void) |
132 | { | 132 | { |
133 | u8 revision = dma_read(REVISION, 0) & 0xff; | 133 | u8 revision = dma_read(REVISION, 0) & 0xff; |
134 | printk(KERN_INFO "OMAP DMA hardware revision %d.%d\n", | 134 | printk(KERN_INFO "OMAP DMA hardware revision %d.%d\n", |
135 | revision >> 4, revision & 0xf); | 135 | revision >> 4, revision & 0xf); |
136 | return; | 136 | return; |
137 | } | 137 | } |
138 | 138 | ||
139 | static u32 configure_dma_errata(void) | 139 | static u32 configure_dma_errata(void) |
140 | { | 140 | { |
141 | 141 | ||
142 | /* | 142 | /* |
143 | * Errata applicable for OMAP2430ES1.0 and all omap2420 | 143 | * Errata applicable for OMAP2430ES1.0 and all omap2420 |
144 | * | 144 | * |
145 | * I. | 145 | * I. |
146 | * Erratum ID: Not Available | 146 | * Erratum ID: Not Available |
147 | * Inter Frame DMA buffering issue DMA will wrongly | 147 | * Inter Frame DMA buffering issue DMA will wrongly |
148 | * buffer elements if packing and bursting is enabled. This might | 148 | * buffer elements if packing and bursting is enabled. This might |
149 | * result in data gets stalled in FIFO at the end of the block. | 149 | * result in data gets stalled in FIFO at the end of the block. |
150 | * Workaround: DMA channels must have BUFFERING_DISABLED bit set to | 150 | * Workaround: DMA channels must have BUFFERING_DISABLED bit set to |
151 | * guarantee no data will stay in the DMA FIFO in case inter frame | 151 | * guarantee no data will stay in the DMA FIFO in case inter frame |
152 | * buffering occurs | 152 | * buffering occurs |
153 | * | 153 | * |
154 | * II. | 154 | * II. |
155 | * Erratum ID: Not Available | 155 | * Erratum ID: Not Available |
156 | * DMA may hang when several channels are used in parallel | 156 | * DMA may hang when several channels are used in parallel |
157 | * In the following configuration, DMA channel hanging can occur: | 157 | * In the following configuration, DMA channel hanging can occur: |
158 | * a. Channel i, hardware synchronized, is enabled | 158 | * a. Channel i, hardware synchronized, is enabled |
159 | * b. Another channel (Channel x), software synchronized, is enabled. | 159 | * b. Another channel (Channel x), software synchronized, is enabled. |
160 | * c. Channel i is disabled before end of transfer | 160 | * c. Channel i is disabled before end of transfer |
161 | * d. Channel i is reenabled. | 161 | * d. Channel i is reenabled. |
162 | * e. Steps 1 to 4 are repeated a certain number of times. | 162 | * e. Steps 1 to 4 are repeated a certain number of times. |
163 | * f. A third channel (Channel y), software synchronized, is enabled. | 163 | * f. A third channel (Channel y), software synchronized, is enabled. |
164 | * Channel x and Channel y may hang immediately after step 'f'. | 164 | * Channel x and Channel y may hang immediately after step 'f'. |
165 | * Workaround: | 165 | * Workaround: |
166 | * For any channel used - make sure NextLCH_ID is set to the value j. | 166 | * For any channel used - make sure NextLCH_ID is set to the value j. |
167 | */ | 167 | */ |
168 | if (cpu_is_omap2420() || (cpu_is_omap2430() && | 168 | if (cpu_is_omap2420() || (cpu_is_omap2430() && |
169 | (omap_type() == OMAP2430_REV_ES1_0))) { | 169 | (omap_type() == OMAP2430_REV_ES1_0))) { |
170 | 170 | ||
171 | SET_DMA_ERRATA(DMA_ERRATA_IFRAME_BUFFERING); | 171 | SET_DMA_ERRATA(DMA_ERRATA_IFRAME_BUFFERING); |
172 | SET_DMA_ERRATA(DMA_ERRATA_PARALLEL_CHANNELS); | 172 | SET_DMA_ERRATA(DMA_ERRATA_PARALLEL_CHANNELS); |
173 | } | 173 | } |
174 | 174 | ||
175 | /* | 175 | /* |
176 | * Erratum ID: i378: OMAP2+: sDMA Channel is not disabled | 176 | * Erratum ID: i378: OMAP2+: sDMA Channel is not disabled |
177 | * after a transaction error. | 177 | * after a transaction error. |
178 | * Workaround: SW should explicitely disable the channel. | 178 | * Workaround: SW should explicitely disable the channel. |
179 | */ | 179 | */ |
180 | if (cpu_class_is_omap2()) | 180 | if (cpu_class_is_omap2()) |
181 | SET_DMA_ERRATA(DMA_ERRATA_i378); | 181 | SET_DMA_ERRATA(DMA_ERRATA_i378); |
182 | 182 | ||
183 | /* | 183 | /* |
184 | * Erratum ID: i541: sDMA FIFO draining does not finish | 184 | * Erratum ID: i541: sDMA FIFO draining does not finish |
185 | * If sDMA channel is disabled on the fly, sDMA enters standby even | 185 | * If sDMA channel is disabled on the fly, sDMA enters standby even |
186 | * through FIFO Drain is still in progress | 186 | * through FIFO Drain is still in progress |
187 | * Workaround: Put sDMA in NoStandby more before a logical channel is | 187 | * Workaround: Put sDMA in NoStandby more before a logical channel is |
188 | * disabled, then put it back to SmartStandby right after the channel | 188 | * disabled, then put it back to SmartStandby right after the channel |
189 | * finishes FIFO draining. | 189 | * finishes FIFO draining. |
190 | */ | 190 | */ |
191 | if (cpu_is_omap34xx()) | 191 | if (cpu_is_omap34xx()) |
192 | SET_DMA_ERRATA(DMA_ERRATA_i541); | 192 | SET_DMA_ERRATA(DMA_ERRATA_i541); |
193 | 193 | ||
194 | /* | 194 | /* |
195 | * Erratum ID: i88 : Special programming model needed to disable DMA | 195 | * Erratum ID: i88 : Special programming model needed to disable DMA |
196 | * before end of block. | 196 | * before end of block. |
197 | * Workaround: software must ensure that the DMA is configured in No | 197 | * Workaround: software must ensure that the DMA is configured in No |
198 | * Standby mode(DMAx_OCP_SYSCONFIG.MIDLEMODE = "01") | 198 | * Standby mode(DMAx_OCP_SYSCONFIG.MIDLEMODE = "01") |
199 | */ | 199 | */ |
200 | if (omap_type() == OMAP3430_REV_ES1_0) | 200 | if (omap_type() == OMAP3430_REV_ES1_0) |
201 | SET_DMA_ERRATA(DMA_ERRATA_i88); | 201 | SET_DMA_ERRATA(DMA_ERRATA_i88); |
202 | 202 | ||
203 | /* | 203 | /* |
204 | * Erratum 3.2/3.3: sometimes 0 is returned if CSAC/CDAC is | 204 | * Erratum 3.2/3.3: sometimes 0 is returned if CSAC/CDAC is |
205 | * read before the DMA controller finished disabling the channel. | 205 | * read before the DMA controller finished disabling the channel. |
206 | */ | 206 | */ |
207 | SET_DMA_ERRATA(DMA_ERRATA_3_3); | 207 | SET_DMA_ERRATA(DMA_ERRATA_3_3); |
208 | 208 | ||
209 | /* | 209 | /* |
210 | * Erratum ID: Not Available | 210 | * Erratum ID: Not Available |
211 | * A bug in ROM code leaves IRQ status for channels 0 and 1 uncleared | 211 | * A bug in ROM code leaves IRQ status for channels 0 and 1 uncleared |
212 | * after secure sram context save and restore. | 212 | * after secure sram context save and restore. |
213 | * Work around: Hence we need to manually clear those IRQs to avoid | 213 | * Work around: Hence we need to manually clear those IRQs to avoid |
214 | * spurious interrupts. This affects only secure devices. | 214 | * spurious interrupts. This affects only secure devices. |
215 | */ | 215 | */ |
216 | if (cpu_is_omap34xx() && (omap_type() != OMAP2_DEVICE_TYPE_GP)) | 216 | if (cpu_is_omap34xx() && (omap_type() != OMAP2_DEVICE_TYPE_GP)) |
217 | SET_DMA_ERRATA(DMA_ROMCODE_BUG); | 217 | SET_DMA_ERRATA(DMA_ROMCODE_BUG); |
218 | 218 | ||
219 | return errata; | 219 | return errata; |
220 | } | 220 | } |
221 | 221 | ||
222 | /* One time initializations */ | 222 | /* One time initializations */ |
223 | static int __init omap2_system_dma_init_dev(struct omap_hwmod *oh, void *unused) | 223 | static int __init omap2_system_dma_init_dev(struct omap_hwmod *oh, void *unused) |
224 | { | 224 | { |
225 | struct platform_device *pdev; | 225 | struct platform_device *pdev; |
226 | struct omap_system_dma_plat_info *p; | 226 | struct omap_system_dma_plat_info *p; |
227 | struct resource *mem; | 227 | struct resource *mem; |
228 | char *name = "omap_dma_system"; | 228 | char *name = "omap_dma_system"; |
229 | 229 | ||
230 | dma_stride = OMAP2_DMA_STRIDE; | 230 | dma_stride = OMAP2_DMA_STRIDE; |
231 | dma_common_ch_start = CSDP; | 231 | dma_common_ch_start = CSDP; |
232 | 232 | ||
233 | p = kzalloc(sizeof(struct omap_system_dma_plat_info), GFP_KERNEL); | 233 | p = kzalloc(sizeof(struct omap_system_dma_plat_info), GFP_KERNEL); |
234 | if (!p) { | 234 | if (!p) { |
235 | pr_err("%s: Unable to allocate pdata for %s:%s\n", | 235 | pr_err("%s: Unable to allocate pdata for %s:%s\n", |
236 | __func__, name, oh->name); | 236 | __func__, name, oh->name); |
237 | return -ENOMEM; | 237 | return -ENOMEM; |
238 | } | 238 | } |
239 | 239 | ||
240 | p->dma_attr = (struct omap_dma_dev_attr *)oh->dev_attr; | 240 | p->dma_attr = (struct omap_dma_dev_attr *)oh->dev_attr; |
241 | p->disable_irq_lch = omap2_disable_irq_lch; | 241 | p->disable_irq_lch = omap2_disable_irq_lch; |
242 | p->show_dma_caps = omap2_show_dma_caps; | 242 | p->show_dma_caps = omap2_show_dma_caps; |
243 | p->clear_dma = omap2_clear_dma; | 243 | p->clear_dma = omap2_clear_dma; |
244 | p->dma_write = dma_write; | 244 | p->dma_write = dma_write; |
245 | p->dma_read = dma_read; | 245 | p->dma_read = dma_read; |
246 | 246 | ||
247 | p->clear_lch_regs = NULL; | 247 | p->clear_lch_regs = NULL; |
248 | 248 | ||
249 | p->errata = configure_dma_errata(); | 249 | p->errata = configure_dma_errata(); |
250 | 250 | ||
251 | pdev = omap_device_build(name, 0, oh, p, sizeof(*p), NULL, 0, 0); | 251 | pdev = omap_device_build(name, 0, oh, p, sizeof(*p), NULL, 0, 0); |
252 | kfree(p); | 252 | kfree(p); |
253 | if (IS_ERR(pdev)) { | 253 | if (IS_ERR(pdev)) { |
254 | pr_err("%s: Can't build omap_device for %s:%s.\n", | 254 | pr_err("%s: Can't build omap_device for %s:%s.\n", |
255 | __func__, name, oh->name); | 255 | __func__, name, oh->name); |
256 | return PTR_ERR(pdev); | 256 | return PTR_ERR(pdev); |
257 | } | 257 | } |
258 | 258 | ||
259 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 259 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
260 | if (!mem) { | 260 | if (!mem) { |
261 | dev_err(&pdev->dev, "%s: no mem resource\n", __func__); | 261 | dev_err(&pdev->dev, "%s: no mem resource\n", __func__); |
262 | return -EINVAL; | 262 | return -EINVAL; |
263 | } | 263 | } |
264 | dma_base = ioremap(mem->start, resource_size(mem)); | 264 | dma_base = ioremap(mem->start, resource_size(mem)); |
265 | if (!dma_base) { | 265 | if (!dma_base) { |
266 | dev_err(&pdev->dev, "%s: ioremap fail\n", __func__); | 266 | dev_err(&pdev->dev, "%s: ioremap fail\n", __func__); |
267 | return -ENOMEM; | 267 | return -ENOMEM; |
268 | } | 268 | } |
269 | 269 | ||
270 | d = oh->dev_attr; | 270 | d = oh->dev_attr; |
271 | d->chan = kzalloc(sizeof(struct omap_dma_lch) * | 271 | d->chan = kzalloc(sizeof(struct omap_dma_lch) * |
272 | (d->lch_count), GFP_KERNEL); | 272 | (d->lch_count), GFP_KERNEL); |
273 | 273 | ||
274 | if (!d->chan) { | 274 | if (!d->chan) { |
275 | dev_err(&pdev->dev, "%s: kzalloc fail\n", __func__); | 275 | dev_err(&pdev->dev, "%s: kzalloc fail\n", __func__); |
276 | return -ENOMEM; | 276 | return -ENOMEM; |
277 | } | 277 | } |
278 | 278 | ||
279 | if (cpu_is_omap34xx() && (omap_type() != OMAP2_DEVICE_TYPE_GP)) | 279 | if (cpu_is_omap34xx() && (omap_type() != OMAP2_DEVICE_TYPE_GP)) |
280 | d->dev_caps |= HS_CHANNELS_RESERVED; | 280 | d->dev_caps |= HS_CHANNELS_RESERVED; |
281 | 281 | ||
282 | /* Check the capabilities register for descriptor loading feature */ | 282 | /* Check the capabilities register for descriptor loading feature */ |
283 | if (dma_read(CAPS_0, 0) & DMA_HAS_DESCRIPTOR_CAPS) | 283 | if (dma_read(CAPS_0, 0) & DMA_HAS_DESCRIPTOR_CAPS) |
284 | dma_common_ch_end = CCDN; | 284 | dma_common_ch_end = CCDN; |
285 | else | 285 | else |
286 | dma_common_ch_end = CCFN; | 286 | dma_common_ch_end = CCFN; |
287 | 287 | ||
288 | return 0; | 288 | return 0; |
289 | } | 289 | } |
290 | 290 | ||
291 | static const struct platform_device_info omap_dma_dev_info = { | ||
292 | .name = "omap-dma-engine", | ||
293 | .id = -1, | ||
294 | .dma_mask = DMA_BIT_MASK(32), | ||
295 | }; | ||
296 | |||
291 | static int __init omap2_system_dma_init(void) | 297 | static int __init omap2_system_dma_init(void) |
292 | { | 298 | { |
293 | return omap_hwmod_for_each_by_class("dma", | 299 | struct platform_device *pdev; |
300 | int res; | ||
301 | |||
302 | res = omap_hwmod_for_each_by_class("dma", | ||
294 | omap2_system_dma_init_dev, NULL); | 303 | omap2_system_dma_init_dev, NULL); |
304 | if (res) | ||
305 | return res; | ||
306 | |||
307 | pdev = platform_device_register_full(&omap_dma_dev_info); | ||
308 | if (IS_ERR(pdev)) | ||
309 | return PTR_ERR(pdev); | ||
310 | |||
311 | return res; | ||
295 | } | 312 | } |
296 | omap_arch_initcall(omap2_system_dma_init); | 313 | omap_arch_initcall(omap2_system_dma_init); |
297 | 314 |
drivers/dma/omap-dma.c
1 | /* | 1 | /* |
2 | * OMAP DMAengine support | 2 | * OMAP DMAengine support |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License version 2 as | 5 | * it under the terms of the GNU General Public License version 2 as |
6 | * published by the Free Software Foundation. | 6 | * published by the Free Software Foundation. |
7 | */ | 7 | */ |
8 | #include <linux/dmaengine.h> | 8 | #include <linux/dmaengine.h> |
9 | #include <linux/dma-mapping.h> | 9 | #include <linux/dma-mapping.h> |
10 | #include <linux/err.h> | 10 | #include <linux/err.h> |
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/interrupt.h> | 12 | #include <linux/interrupt.h> |
13 | #include <linux/list.h> | 13 | #include <linux/list.h> |
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/omap-dma.h> | 15 | #include <linux/omap-dma.h> |
16 | #include <linux/platform_device.h> | 16 | #include <linux/platform_device.h> |
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <linux/spinlock.h> | 18 | #include <linux/spinlock.h> |
19 | 19 | ||
20 | #include "virt-dma.h" | 20 | #include "virt-dma.h" |
21 | 21 | ||
22 | struct omap_dmadev { | 22 | struct omap_dmadev { |
23 | struct dma_device ddev; | 23 | struct dma_device ddev; |
24 | spinlock_t lock; | 24 | spinlock_t lock; |
25 | struct tasklet_struct task; | 25 | struct tasklet_struct task; |
26 | struct list_head pending; | 26 | struct list_head pending; |
27 | }; | 27 | }; |
28 | 28 | ||
29 | struct omap_chan { | 29 | struct omap_chan { |
30 | struct virt_dma_chan vc; | 30 | struct virt_dma_chan vc; |
31 | struct list_head node; | 31 | struct list_head node; |
32 | 32 | ||
33 | struct dma_slave_config cfg; | 33 | struct dma_slave_config cfg; |
34 | unsigned dma_sig; | 34 | unsigned dma_sig; |
35 | bool cyclic; | 35 | bool cyclic; |
36 | bool paused; | 36 | bool paused; |
37 | 37 | ||
38 | int dma_ch; | 38 | int dma_ch; |
39 | struct omap_desc *desc; | 39 | struct omap_desc *desc; |
40 | unsigned sgidx; | 40 | unsigned sgidx; |
41 | }; | 41 | }; |
42 | 42 | ||
43 | struct omap_sg { | 43 | struct omap_sg { |
44 | dma_addr_t addr; | 44 | dma_addr_t addr; |
45 | uint32_t en; /* number of elements (24-bit) */ | 45 | uint32_t en; /* number of elements (24-bit) */ |
46 | uint32_t fn; /* number of frames (16-bit) */ | 46 | uint32_t fn; /* number of frames (16-bit) */ |
47 | }; | 47 | }; |
48 | 48 | ||
49 | struct omap_desc { | 49 | struct omap_desc { |
50 | struct virt_dma_desc vd; | 50 | struct virt_dma_desc vd; |
51 | enum dma_transfer_direction dir; | 51 | enum dma_transfer_direction dir; |
52 | dma_addr_t dev_addr; | 52 | dma_addr_t dev_addr; |
53 | 53 | ||
54 | int16_t fi; /* for OMAP_DMA_SYNC_PACKET */ | 54 | int16_t fi; /* for OMAP_DMA_SYNC_PACKET */ |
55 | uint8_t es; /* OMAP_DMA_DATA_TYPE_xxx */ | 55 | uint8_t es; /* OMAP_DMA_DATA_TYPE_xxx */ |
56 | uint8_t sync_mode; /* OMAP_DMA_SYNC_xxx */ | 56 | uint8_t sync_mode; /* OMAP_DMA_SYNC_xxx */ |
57 | uint8_t sync_type; /* OMAP_DMA_xxx_SYNC* */ | 57 | uint8_t sync_type; /* OMAP_DMA_xxx_SYNC* */ |
58 | uint8_t periph_port; /* Peripheral port */ | 58 | uint8_t periph_port; /* Peripheral port */ |
59 | 59 | ||
60 | unsigned sglen; | 60 | unsigned sglen; |
61 | struct omap_sg sg[0]; | 61 | struct omap_sg sg[0]; |
62 | }; | 62 | }; |
63 | 63 | ||
64 | static const unsigned es_bytes[] = { | 64 | static const unsigned es_bytes[] = { |
65 | [OMAP_DMA_DATA_TYPE_S8] = 1, | 65 | [OMAP_DMA_DATA_TYPE_S8] = 1, |
66 | [OMAP_DMA_DATA_TYPE_S16] = 2, | 66 | [OMAP_DMA_DATA_TYPE_S16] = 2, |
67 | [OMAP_DMA_DATA_TYPE_S32] = 4, | 67 | [OMAP_DMA_DATA_TYPE_S32] = 4, |
68 | }; | 68 | }; |
69 | 69 | ||
70 | static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d) | 70 | static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d) |
71 | { | 71 | { |
72 | return container_of(d, struct omap_dmadev, ddev); | 72 | return container_of(d, struct omap_dmadev, ddev); |
73 | } | 73 | } |
74 | 74 | ||
75 | static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c) | 75 | static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c) |
76 | { | 76 | { |
77 | return container_of(c, struct omap_chan, vc.chan); | 77 | return container_of(c, struct omap_chan, vc.chan); |
78 | } | 78 | } |
79 | 79 | ||
80 | static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t) | 80 | static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t) |
81 | { | 81 | { |
82 | return container_of(t, struct omap_desc, vd.tx); | 82 | return container_of(t, struct omap_desc, vd.tx); |
83 | } | 83 | } |
84 | 84 | ||
85 | static void omap_dma_desc_free(struct virt_dma_desc *vd) | 85 | static void omap_dma_desc_free(struct virt_dma_desc *vd) |
86 | { | 86 | { |
87 | kfree(container_of(vd, struct omap_desc, vd)); | 87 | kfree(container_of(vd, struct omap_desc, vd)); |
88 | } | 88 | } |
89 | 89 | ||
90 | static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d, | 90 | static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d, |
91 | unsigned idx) | 91 | unsigned idx) |
92 | { | 92 | { |
93 | struct omap_sg *sg = d->sg + idx; | 93 | struct omap_sg *sg = d->sg + idx; |
94 | 94 | ||
95 | if (d->dir == DMA_DEV_TO_MEM) | 95 | if (d->dir == DMA_DEV_TO_MEM) |
96 | omap_set_dma_dest_params(c->dma_ch, OMAP_DMA_PORT_EMIFF, | 96 | omap_set_dma_dest_params(c->dma_ch, OMAP_DMA_PORT_EMIFF, |
97 | OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0); | 97 | OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0); |
98 | else | 98 | else |
99 | omap_set_dma_src_params(c->dma_ch, OMAP_DMA_PORT_EMIFF, | 99 | omap_set_dma_src_params(c->dma_ch, OMAP_DMA_PORT_EMIFF, |
100 | OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0); | 100 | OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0); |
101 | 101 | ||
102 | omap_set_dma_transfer_params(c->dma_ch, d->es, sg->en, sg->fn, | 102 | omap_set_dma_transfer_params(c->dma_ch, d->es, sg->en, sg->fn, |
103 | d->sync_mode, c->dma_sig, d->sync_type); | 103 | d->sync_mode, c->dma_sig, d->sync_type); |
104 | 104 | ||
105 | omap_start_dma(c->dma_ch); | 105 | omap_start_dma(c->dma_ch); |
106 | } | 106 | } |
107 | 107 | ||
108 | static void omap_dma_start_desc(struct omap_chan *c) | 108 | static void omap_dma_start_desc(struct omap_chan *c) |
109 | { | 109 | { |
110 | struct virt_dma_desc *vd = vchan_next_desc(&c->vc); | 110 | struct virt_dma_desc *vd = vchan_next_desc(&c->vc); |
111 | struct omap_desc *d; | 111 | struct omap_desc *d; |
112 | 112 | ||
113 | if (!vd) { | 113 | if (!vd) { |
114 | c->desc = NULL; | 114 | c->desc = NULL; |
115 | return; | 115 | return; |
116 | } | 116 | } |
117 | 117 | ||
118 | list_del(&vd->node); | 118 | list_del(&vd->node); |
119 | 119 | ||
120 | c->desc = d = to_omap_dma_desc(&vd->tx); | 120 | c->desc = d = to_omap_dma_desc(&vd->tx); |
121 | c->sgidx = 0; | 121 | c->sgidx = 0; |
122 | 122 | ||
123 | if (d->dir == DMA_DEV_TO_MEM) | 123 | if (d->dir == DMA_DEV_TO_MEM) |
124 | omap_set_dma_src_params(c->dma_ch, d->periph_port, | 124 | omap_set_dma_src_params(c->dma_ch, d->periph_port, |
125 | OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi); | 125 | OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi); |
126 | else | 126 | else |
127 | omap_set_dma_dest_params(c->dma_ch, d->periph_port, | 127 | omap_set_dma_dest_params(c->dma_ch, d->periph_port, |
128 | OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi); | 128 | OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi); |
129 | 129 | ||
130 | omap_dma_start_sg(c, d, 0); | 130 | omap_dma_start_sg(c, d, 0); |
131 | } | 131 | } |
132 | 132 | ||
133 | static void omap_dma_callback(int ch, u16 status, void *data) | 133 | static void omap_dma_callback(int ch, u16 status, void *data) |
134 | { | 134 | { |
135 | struct omap_chan *c = data; | 135 | struct omap_chan *c = data; |
136 | struct omap_desc *d; | 136 | struct omap_desc *d; |
137 | unsigned long flags; | 137 | unsigned long flags; |
138 | 138 | ||
139 | spin_lock_irqsave(&c->vc.lock, flags); | 139 | spin_lock_irqsave(&c->vc.lock, flags); |
140 | d = c->desc; | 140 | d = c->desc; |
141 | if (d) { | 141 | if (d) { |
142 | if (!c->cyclic) { | 142 | if (!c->cyclic) { |
143 | if (++c->sgidx < d->sglen) { | 143 | if (++c->sgidx < d->sglen) { |
144 | omap_dma_start_sg(c, d, c->sgidx); | 144 | omap_dma_start_sg(c, d, c->sgidx); |
145 | } else { | 145 | } else { |
146 | omap_dma_start_desc(c); | 146 | omap_dma_start_desc(c); |
147 | vchan_cookie_complete(&d->vd); | 147 | vchan_cookie_complete(&d->vd); |
148 | } | 148 | } |
149 | } else { | 149 | } else { |
150 | vchan_cyclic_callback(&d->vd); | 150 | vchan_cyclic_callback(&d->vd); |
151 | } | 151 | } |
152 | } | 152 | } |
153 | spin_unlock_irqrestore(&c->vc.lock, flags); | 153 | spin_unlock_irqrestore(&c->vc.lock, flags); |
154 | } | 154 | } |
155 | 155 | ||
156 | /* | 156 | /* |
157 | * This callback schedules all pending channels. We could be more | 157 | * This callback schedules all pending channels. We could be more |
158 | * clever here by postponing allocation of the real DMA channels to | 158 | * clever here by postponing allocation of the real DMA channels to |
159 | * this point, and freeing them when our virtual channel becomes idle. | 159 | * this point, and freeing them when our virtual channel becomes idle. |
160 | * | 160 | * |
161 | * We would then need to deal with 'all channels in-use' | 161 | * We would then need to deal with 'all channels in-use' |
162 | */ | 162 | */ |
163 | static void omap_dma_sched(unsigned long data) | 163 | static void omap_dma_sched(unsigned long data) |
164 | { | 164 | { |
165 | struct omap_dmadev *d = (struct omap_dmadev *)data; | 165 | struct omap_dmadev *d = (struct omap_dmadev *)data; |
166 | LIST_HEAD(head); | 166 | LIST_HEAD(head); |
167 | 167 | ||
168 | spin_lock_irq(&d->lock); | 168 | spin_lock_irq(&d->lock); |
169 | list_splice_tail_init(&d->pending, &head); | 169 | list_splice_tail_init(&d->pending, &head); |
170 | spin_unlock_irq(&d->lock); | 170 | spin_unlock_irq(&d->lock); |
171 | 171 | ||
172 | while (!list_empty(&head)) { | 172 | while (!list_empty(&head)) { |
173 | struct omap_chan *c = list_first_entry(&head, | 173 | struct omap_chan *c = list_first_entry(&head, |
174 | struct omap_chan, node); | 174 | struct omap_chan, node); |
175 | 175 | ||
176 | spin_lock_irq(&c->vc.lock); | 176 | spin_lock_irq(&c->vc.lock); |
177 | list_del_init(&c->node); | 177 | list_del_init(&c->node); |
178 | omap_dma_start_desc(c); | 178 | omap_dma_start_desc(c); |
179 | spin_unlock_irq(&c->vc.lock); | 179 | spin_unlock_irq(&c->vc.lock); |
180 | } | 180 | } |
181 | } | 181 | } |
182 | 182 | ||
183 | static int omap_dma_alloc_chan_resources(struct dma_chan *chan) | 183 | static int omap_dma_alloc_chan_resources(struct dma_chan *chan) |
184 | { | 184 | { |
185 | struct omap_chan *c = to_omap_dma_chan(chan); | 185 | struct omap_chan *c = to_omap_dma_chan(chan); |
186 | 186 | ||
187 | dev_info(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig); | 187 | dev_info(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig); |
188 | 188 | ||
189 | return omap_request_dma(c->dma_sig, "DMA engine", | 189 | return omap_request_dma(c->dma_sig, "DMA engine", |
190 | omap_dma_callback, c, &c->dma_ch); | 190 | omap_dma_callback, c, &c->dma_ch); |
191 | } | 191 | } |
192 | 192 | ||
193 | static void omap_dma_free_chan_resources(struct dma_chan *chan) | 193 | static void omap_dma_free_chan_resources(struct dma_chan *chan) |
194 | { | 194 | { |
195 | struct omap_chan *c = to_omap_dma_chan(chan); | 195 | struct omap_chan *c = to_omap_dma_chan(chan); |
196 | 196 | ||
197 | vchan_free_chan_resources(&c->vc); | 197 | vchan_free_chan_resources(&c->vc); |
198 | omap_free_dma(c->dma_ch); | 198 | omap_free_dma(c->dma_ch); |
199 | 199 | ||
200 | dev_info(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig); | 200 | dev_info(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig); |
201 | } | 201 | } |
202 | 202 | ||
203 | static size_t omap_dma_sg_size(struct omap_sg *sg) | 203 | static size_t omap_dma_sg_size(struct omap_sg *sg) |
204 | { | 204 | { |
205 | return sg->en * sg->fn; | 205 | return sg->en * sg->fn; |
206 | } | 206 | } |
207 | 207 | ||
208 | static size_t omap_dma_desc_size(struct omap_desc *d) | 208 | static size_t omap_dma_desc_size(struct omap_desc *d) |
209 | { | 209 | { |
210 | unsigned i; | 210 | unsigned i; |
211 | size_t size; | 211 | size_t size; |
212 | 212 | ||
213 | for (size = i = 0; i < d->sglen; i++) | 213 | for (size = i = 0; i < d->sglen; i++) |
214 | size += omap_dma_sg_size(&d->sg[i]); | 214 | size += omap_dma_sg_size(&d->sg[i]); |
215 | 215 | ||
216 | return size * es_bytes[d->es]; | 216 | return size * es_bytes[d->es]; |
217 | } | 217 | } |
218 | 218 | ||
219 | static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr) | 219 | static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr) |
220 | { | 220 | { |
221 | unsigned i; | 221 | unsigned i; |
222 | size_t size, es_size = es_bytes[d->es]; | 222 | size_t size, es_size = es_bytes[d->es]; |
223 | 223 | ||
224 | for (size = i = 0; i < d->sglen; i++) { | 224 | for (size = i = 0; i < d->sglen; i++) { |
225 | size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size; | 225 | size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size; |
226 | 226 | ||
227 | if (size) | 227 | if (size) |
228 | size += this_size; | 228 | size += this_size; |
229 | else if (addr >= d->sg[i].addr && | 229 | else if (addr >= d->sg[i].addr && |
230 | addr < d->sg[i].addr + this_size) | 230 | addr < d->sg[i].addr + this_size) |
231 | size += d->sg[i].addr + this_size - addr; | 231 | size += d->sg[i].addr + this_size - addr; |
232 | } | 232 | } |
233 | return size; | 233 | return size; |
234 | } | 234 | } |
235 | 235 | ||
236 | static enum dma_status omap_dma_tx_status(struct dma_chan *chan, | 236 | static enum dma_status omap_dma_tx_status(struct dma_chan *chan, |
237 | dma_cookie_t cookie, struct dma_tx_state *txstate) | 237 | dma_cookie_t cookie, struct dma_tx_state *txstate) |
238 | { | 238 | { |
239 | struct omap_chan *c = to_omap_dma_chan(chan); | 239 | struct omap_chan *c = to_omap_dma_chan(chan); |
240 | struct virt_dma_desc *vd; | 240 | struct virt_dma_desc *vd; |
241 | enum dma_status ret; | 241 | enum dma_status ret; |
242 | unsigned long flags; | 242 | unsigned long flags; |
243 | 243 | ||
244 | ret = dma_cookie_status(chan, cookie, txstate); | 244 | ret = dma_cookie_status(chan, cookie, txstate); |
245 | if (ret == DMA_SUCCESS || !txstate) | 245 | if (ret == DMA_SUCCESS || !txstate) |
246 | return ret; | 246 | return ret; |
247 | 247 | ||
248 | spin_lock_irqsave(&c->vc.lock, flags); | 248 | spin_lock_irqsave(&c->vc.lock, flags); |
249 | vd = vchan_find_desc(&c->vc, cookie); | 249 | vd = vchan_find_desc(&c->vc, cookie); |
250 | if (vd) { | 250 | if (vd) { |
251 | txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx)); | 251 | txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx)); |
252 | } else if (c->desc && c->desc->vd.tx.cookie == cookie) { | 252 | } else if (c->desc && c->desc->vd.tx.cookie == cookie) { |
253 | struct omap_desc *d = c->desc; | 253 | struct omap_desc *d = c->desc; |
254 | dma_addr_t pos; | 254 | dma_addr_t pos; |
255 | 255 | ||
256 | if (d->dir == DMA_MEM_TO_DEV) | 256 | if (d->dir == DMA_MEM_TO_DEV) |
257 | pos = omap_get_dma_src_pos(c->dma_ch); | 257 | pos = omap_get_dma_src_pos(c->dma_ch); |
258 | else if (d->dir == DMA_DEV_TO_MEM) | 258 | else if (d->dir == DMA_DEV_TO_MEM) |
259 | pos = omap_get_dma_dst_pos(c->dma_ch); | 259 | pos = omap_get_dma_dst_pos(c->dma_ch); |
260 | else | 260 | else |
261 | pos = 0; | 261 | pos = 0; |
262 | 262 | ||
263 | txstate->residue = omap_dma_desc_size_pos(d, pos); | 263 | txstate->residue = omap_dma_desc_size_pos(d, pos); |
264 | } else { | 264 | } else { |
265 | txstate->residue = 0; | 265 | txstate->residue = 0; |
266 | } | 266 | } |
267 | spin_unlock_irqrestore(&c->vc.lock, flags); | 267 | spin_unlock_irqrestore(&c->vc.lock, flags); |
268 | 268 | ||
269 | return ret; | 269 | return ret; |
270 | } | 270 | } |
271 | 271 | ||
272 | static void omap_dma_issue_pending(struct dma_chan *chan) | 272 | static void omap_dma_issue_pending(struct dma_chan *chan) |
273 | { | 273 | { |
274 | struct omap_chan *c = to_omap_dma_chan(chan); | 274 | struct omap_chan *c = to_omap_dma_chan(chan); |
275 | unsigned long flags; | 275 | unsigned long flags; |
276 | 276 | ||
277 | spin_lock_irqsave(&c->vc.lock, flags); | 277 | spin_lock_irqsave(&c->vc.lock, flags); |
278 | if (vchan_issue_pending(&c->vc) && !c->desc) { | 278 | if (vchan_issue_pending(&c->vc) && !c->desc) { |
279 | struct omap_dmadev *d = to_omap_dma_dev(chan->device); | 279 | struct omap_dmadev *d = to_omap_dma_dev(chan->device); |
280 | spin_lock(&d->lock); | 280 | spin_lock(&d->lock); |
281 | if (list_empty(&c->node)) | 281 | if (list_empty(&c->node)) |
282 | list_add_tail(&c->node, &d->pending); | 282 | list_add_tail(&c->node, &d->pending); |
283 | spin_unlock(&d->lock); | 283 | spin_unlock(&d->lock); |
284 | tasklet_schedule(&d->task); | 284 | tasklet_schedule(&d->task); |
285 | } | 285 | } |
286 | spin_unlock_irqrestore(&c->vc.lock, flags); | 286 | spin_unlock_irqrestore(&c->vc.lock, flags); |
287 | } | 287 | } |
288 | 288 | ||
289 | static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( | 289 | static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( |
290 | struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen, | 290 | struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen, |
291 | enum dma_transfer_direction dir, unsigned long tx_flags, void *context) | 291 | enum dma_transfer_direction dir, unsigned long tx_flags, void *context) |
292 | { | 292 | { |
293 | struct omap_chan *c = to_omap_dma_chan(chan); | 293 | struct omap_chan *c = to_omap_dma_chan(chan); |
294 | enum dma_slave_buswidth dev_width; | 294 | enum dma_slave_buswidth dev_width; |
295 | struct scatterlist *sgent; | 295 | struct scatterlist *sgent; |
296 | struct omap_desc *d; | 296 | struct omap_desc *d; |
297 | dma_addr_t dev_addr; | 297 | dma_addr_t dev_addr; |
298 | unsigned i, j = 0, es, en, frame_bytes, sync_type; | 298 | unsigned i, j = 0, es, en, frame_bytes, sync_type; |
299 | u32 burst; | 299 | u32 burst; |
300 | 300 | ||
301 | if (dir == DMA_DEV_TO_MEM) { | 301 | if (dir == DMA_DEV_TO_MEM) { |
302 | dev_addr = c->cfg.src_addr; | 302 | dev_addr = c->cfg.src_addr; |
303 | dev_width = c->cfg.src_addr_width; | 303 | dev_width = c->cfg.src_addr_width; |
304 | burst = c->cfg.src_maxburst; | 304 | burst = c->cfg.src_maxburst; |
305 | sync_type = OMAP_DMA_SRC_SYNC; | 305 | sync_type = OMAP_DMA_SRC_SYNC; |
306 | } else if (dir == DMA_MEM_TO_DEV) { | 306 | } else if (dir == DMA_MEM_TO_DEV) { |
307 | dev_addr = c->cfg.dst_addr; | 307 | dev_addr = c->cfg.dst_addr; |
308 | dev_width = c->cfg.dst_addr_width; | 308 | dev_width = c->cfg.dst_addr_width; |
309 | burst = c->cfg.dst_maxburst; | 309 | burst = c->cfg.dst_maxburst; |
310 | sync_type = OMAP_DMA_DST_SYNC; | 310 | sync_type = OMAP_DMA_DST_SYNC; |
311 | } else { | 311 | } else { |
312 | dev_err(chan->device->dev, "%s: bad direction?\n", __func__); | 312 | dev_err(chan->device->dev, "%s: bad direction?\n", __func__); |
313 | return NULL; | 313 | return NULL; |
314 | } | 314 | } |
315 | 315 | ||
316 | /* Bus width translates to the element size (ES) */ | 316 | /* Bus width translates to the element size (ES) */ |
317 | switch (dev_width) { | 317 | switch (dev_width) { |
318 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | 318 | case DMA_SLAVE_BUSWIDTH_1_BYTE: |
319 | es = OMAP_DMA_DATA_TYPE_S8; | 319 | es = OMAP_DMA_DATA_TYPE_S8; |
320 | break; | 320 | break; |
321 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | 321 | case DMA_SLAVE_BUSWIDTH_2_BYTES: |
322 | es = OMAP_DMA_DATA_TYPE_S16; | 322 | es = OMAP_DMA_DATA_TYPE_S16; |
323 | break; | 323 | break; |
324 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | 324 | case DMA_SLAVE_BUSWIDTH_4_BYTES: |
325 | es = OMAP_DMA_DATA_TYPE_S32; | 325 | es = OMAP_DMA_DATA_TYPE_S32; |
326 | break; | 326 | break; |
327 | default: /* not reached */ | 327 | default: /* not reached */ |
328 | return NULL; | 328 | return NULL; |
329 | } | 329 | } |
330 | 330 | ||
331 | /* Now allocate and setup the descriptor. */ | 331 | /* Now allocate and setup the descriptor. */ |
332 | d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC); | 332 | d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC); |
333 | if (!d) | 333 | if (!d) |
334 | return NULL; | 334 | return NULL; |
335 | 335 | ||
336 | d->dir = dir; | 336 | d->dir = dir; |
337 | d->dev_addr = dev_addr; | 337 | d->dev_addr = dev_addr; |
338 | d->es = es; | 338 | d->es = es; |
339 | d->sync_mode = OMAP_DMA_SYNC_FRAME; | 339 | d->sync_mode = OMAP_DMA_SYNC_FRAME; |
340 | d->sync_type = sync_type; | 340 | d->sync_type = sync_type; |
341 | d->periph_port = OMAP_DMA_PORT_TIPB; | 341 | d->periph_port = OMAP_DMA_PORT_TIPB; |
342 | 342 | ||
343 | /* | 343 | /* |
344 | * Build our scatterlist entries: each contains the address, | 344 | * Build our scatterlist entries: each contains the address, |
345 | * the number of elements (EN) in each frame, and the number of | 345 | * the number of elements (EN) in each frame, and the number of |
346 | * frames (FN). Number of bytes for this entry = ES * EN * FN. | 346 | * frames (FN). Number of bytes for this entry = ES * EN * FN. |
347 | * | 347 | * |
348 | * Burst size translates to number of elements with frame sync. | 348 | * Burst size translates to number of elements with frame sync. |
349 | * Note: DMA engine defines burst to be the number of dev-width | 349 | * Note: DMA engine defines burst to be the number of dev-width |
350 | * transfers. | 350 | * transfers. |
351 | */ | 351 | */ |
352 | en = burst; | 352 | en = burst; |
353 | frame_bytes = es_bytes[es] * en; | 353 | frame_bytes = es_bytes[es] * en; |
354 | for_each_sg(sgl, sgent, sglen, i) { | 354 | for_each_sg(sgl, sgent, sglen, i) { |
355 | d->sg[j].addr = sg_dma_address(sgent); | 355 | d->sg[j].addr = sg_dma_address(sgent); |
356 | d->sg[j].en = en; | 356 | d->sg[j].en = en; |
357 | d->sg[j].fn = sg_dma_len(sgent) / frame_bytes; | 357 | d->sg[j].fn = sg_dma_len(sgent) / frame_bytes; |
358 | j++; | 358 | j++; |
359 | } | 359 | } |
360 | 360 | ||
361 | d->sglen = j; | 361 | d->sglen = j; |
362 | 362 | ||
363 | return vchan_tx_prep(&c->vc, &d->vd, tx_flags); | 363 | return vchan_tx_prep(&c->vc, &d->vd, tx_flags); |
364 | } | 364 | } |
365 | 365 | ||
366 | static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic( | 366 | static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic( |
367 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | 367 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, |
368 | size_t period_len, enum dma_transfer_direction dir, unsigned long flags, | 368 | size_t period_len, enum dma_transfer_direction dir, unsigned long flags, |
369 | void *context) | 369 | void *context) |
370 | { | 370 | { |
371 | struct omap_chan *c = to_omap_dma_chan(chan); | 371 | struct omap_chan *c = to_omap_dma_chan(chan); |
372 | enum dma_slave_buswidth dev_width; | 372 | enum dma_slave_buswidth dev_width; |
373 | struct omap_desc *d; | 373 | struct omap_desc *d; |
374 | dma_addr_t dev_addr; | 374 | dma_addr_t dev_addr; |
375 | unsigned es, sync_type; | 375 | unsigned es, sync_type; |
376 | u32 burst; | 376 | u32 burst; |
377 | 377 | ||
378 | if (dir == DMA_DEV_TO_MEM) { | 378 | if (dir == DMA_DEV_TO_MEM) { |
379 | dev_addr = c->cfg.src_addr; | 379 | dev_addr = c->cfg.src_addr; |
380 | dev_width = c->cfg.src_addr_width; | 380 | dev_width = c->cfg.src_addr_width; |
381 | burst = c->cfg.src_maxburst; | 381 | burst = c->cfg.src_maxburst; |
382 | sync_type = OMAP_DMA_SRC_SYNC; | 382 | sync_type = OMAP_DMA_SRC_SYNC; |
383 | } else if (dir == DMA_MEM_TO_DEV) { | 383 | } else if (dir == DMA_MEM_TO_DEV) { |
384 | dev_addr = c->cfg.dst_addr; | 384 | dev_addr = c->cfg.dst_addr; |
385 | dev_width = c->cfg.dst_addr_width; | 385 | dev_width = c->cfg.dst_addr_width; |
386 | burst = c->cfg.dst_maxburst; | 386 | burst = c->cfg.dst_maxburst; |
387 | sync_type = OMAP_DMA_DST_SYNC; | 387 | sync_type = OMAP_DMA_DST_SYNC; |
388 | } else { | 388 | } else { |
389 | dev_err(chan->device->dev, "%s: bad direction?\n", __func__); | 389 | dev_err(chan->device->dev, "%s: bad direction?\n", __func__); |
390 | return NULL; | 390 | return NULL; |
391 | } | 391 | } |
392 | 392 | ||
393 | /* Bus width translates to the element size (ES) */ | 393 | /* Bus width translates to the element size (ES) */ |
394 | switch (dev_width) { | 394 | switch (dev_width) { |
395 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | 395 | case DMA_SLAVE_BUSWIDTH_1_BYTE: |
396 | es = OMAP_DMA_DATA_TYPE_S8; | 396 | es = OMAP_DMA_DATA_TYPE_S8; |
397 | break; | 397 | break; |
398 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | 398 | case DMA_SLAVE_BUSWIDTH_2_BYTES: |
399 | es = OMAP_DMA_DATA_TYPE_S16; | 399 | es = OMAP_DMA_DATA_TYPE_S16; |
400 | break; | 400 | break; |
401 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | 401 | case DMA_SLAVE_BUSWIDTH_4_BYTES: |
402 | es = OMAP_DMA_DATA_TYPE_S32; | 402 | es = OMAP_DMA_DATA_TYPE_S32; |
403 | break; | 403 | break; |
404 | default: /* not reached */ | 404 | default: /* not reached */ |
405 | return NULL; | 405 | return NULL; |
406 | } | 406 | } |
407 | 407 | ||
408 | /* Now allocate and setup the descriptor. */ | 408 | /* Now allocate and setup the descriptor. */ |
409 | d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC); | 409 | d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC); |
410 | if (!d) | 410 | if (!d) |
411 | return NULL; | 411 | return NULL; |
412 | 412 | ||
413 | d->dir = dir; | 413 | d->dir = dir; |
414 | d->dev_addr = dev_addr; | 414 | d->dev_addr = dev_addr; |
415 | d->fi = burst; | 415 | d->fi = burst; |
416 | d->es = es; | 416 | d->es = es; |
417 | if (burst) | 417 | if (burst) |
418 | d->sync_mode = OMAP_DMA_SYNC_PACKET; | 418 | d->sync_mode = OMAP_DMA_SYNC_PACKET; |
419 | else | 419 | else |
420 | d->sync_mode = OMAP_DMA_SYNC_ELEMENT; | 420 | d->sync_mode = OMAP_DMA_SYNC_ELEMENT; |
421 | d->sync_type = sync_type; | 421 | d->sync_type = sync_type; |
422 | d->periph_port = OMAP_DMA_PORT_MPUI; | 422 | d->periph_port = OMAP_DMA_PORT_MPUI; |
423 | d->sg[0].addr = buf_addr; | 423 | d->sg[0].addr = buf_addr; |
424 | d->sg[0].en = period_len / es_bytes[es]; | 424 | d->sg[0].en = period_len / es_bytes[es]; |
425 | d->sg[0].fn = buf_len / period_len; | 425 | d->sg[0].fn = buf_len / period_len; |
426 | d->sglen = 1; | 426 | d->sglen = 1; |
427 | 427 | ||
428 | if (!c->cyclic) { | 428 | if (!c->cyclic) { |
429 | c->cyclic = true; | 429 | c->cyclic = true; |
430 | omap_dma_link_lch(c->dma_ch, c->dma_ch); | 430 | omap_dma_link_lch(c->dma_ch, c->dma_ch); |
431 | 431 | ||
432 | if (flags & DMA_PREP_INTERRUPT) | 432 | if (flags & DMA_PREP_INTERRUPT) |
433 | omap_enable_dma_irq(c->dma_ch, OMAP_DMA_FRAME_IRQ); | 433 | omap_enable_dma_irq(c->dma_ch, OMAP_DMA_FRAME_IRQ); |
434 | 434 | ||
435 | omap_disable_dma_irq(c->dma_ch, OMAP_DMA_BLOCK_IRQ); | 435 | omap_disable_dma_irq(c->dma_ch, OMAP_DMA_BLOCK_IRQ); |
436 | } | 436 | } |
437 | 437 | ||
438 | if (dma_omap2plus()) { | 438 | if (dma_omap2plus()) { |
439 | omap_set_dma_src_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16); | 439 | omap_set_dma_src_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16); |
440 | omap_set_dma_dest_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16); | 440 | omap_set_dma_dest_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16); |
441 | } | 441 | } |
442 | 442 | ||
443 | return vchan_tx_prep(&c->vc, &d->vd, flags); | 443 | return vchan_tx_prep(&c->vc, &d->vd, flags); |
444 | } | 444 | } |
445 | 445 | ||
446 | static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg) | 446 | static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg) |
447 | { | 447 | { |
448 | if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || | 448 | if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || |
449 | cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) | 449 | cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) |
450 | return -EINVAL; | 450 | return -EINVAL; |
451 | 451 | ||
452 | memcpy(&c->cfg, cfg, sizeof(c->cfg)); | 452 | memcpy(&c->cfg, cfg, sizeof(c->cfg)); |
453 | 453 | ||
454 | return 0; | 454 | return 0; |
455 | } | 455 | } |
456 | 456 | ||
457 | static int omap_dma_terminate_all(struct omap_chan *c) | 457 | static int omap_dma_terminate_all(struct omap_chan *c) |
458 | { | 458 | { |
459 | struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device); | 459 | struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device); |
460 | unsigned long flags; | 460 | unsigned long flags; |
461 | LIST_HEAD(head); | 461 | LIST_HEAD(head); |
462 | 462 | ||
463 | spin_lock_irqsave(&c->vc.lock, flags); | 463 | spin_lock_irqsave(&c->vc.lock, flags); |
464 | 464 | ||
465 | /* Prevent this channel being scheduled */ | 465 | /* Prevent this channel being scheduled */ |
466 | spin_lock(&d->lock); | 466 | spin_lock(&d->lock); |
467 | list_del_init(&c->node); | 467 | list_del_init(&c->node); |
468 | spin_unlock(&d->lock); | 468 | spin_unlock(&d->lock); |
469 | 469 | ||
470 | /* | 470 | /* |
471 | * Stop DMA activity: we assume the callback will not be called | 471 | * Stop DMA activity: we assume the callback will not be called |
472 | * after omap_stop_dma() returns (even if it does, it will see | 472 | * after omap_stop_dma() returns (even if it does, it will see |
473 | * c->desc is NULL and exit.) | 473 | * c->desc is NULL and exit.) |
474 | */ | 474 | */ |
475 | if (c->desc) { | 475 | if (c->desc) { |
476 | c->desc = NULL; | 476 | c->desc = NULL; |
477 | /* Avoid stopping the dma twice */ | 477 | /* Avoid stopping the dma twice */ |
478 | if (!c->paused) | 478 | if (!c->paused) |
479 | omap_stop_dma(c->dma_ch); | 479 | omap_stop_dma(c->dma_ch); |
480 | } | 480 | } |
481 | 481 | ||
482 | if (c->cyclic) { | 482 | if (c->cyclic) { |
483 | c->cyclic = false; | 483 | c->cyclic = false; |
484 | c->paused = false; | 484 | c->paused = false; |
485 | omap_dma_unlink_lch(c->dma_ch, c->dma_ch); | 485 | omap_dma_unlink_lch(c->dma_ch, c->dma_ch); |
486 | } | 486 | } |
487 | 487 | ||
488 | vchan_get_all_descriptors(&c->vc, &head); | 488 | vchan_get_all_descriptors(&c->vc, &head); |
489 | spin_unlock_irqrestore(&c->vc.lock, flags); | 489 | spin_unlock_irqrestore(&c->vc.lock, flags); |
490 | vchan_dma_desc_free_list(&c->vc, &head); | 490 | vchan_dma_desc_free_list(&c->vc, &head); |
491 | 491 | ||
492 | return 0; | 492 | return 0; |
493 | } | 493 | } |
494 | 494 | ||
495 | static int omap_dma_pause(struct omap_chan *c) | 495 | static int omap_dma_pause(struct omap_chan *c) |
496 | { | 496 | { |
497 | /* Pause/Resume only allowed with cyclic mode */ | 497 | /* Pause/Resume only allowed with cyclic mode */ |
498 | if (!c->cyclic) | 498 | if (!c->cyclic) |
499 | return -EINVAL; | 499 | return -EINVAL; |
500 | 500 | ||
501 | if (!c->paused) { | 501 | if (!c->paused) { |
502 | omap_stop_dma(c->dma_ch); | 502 | omap_stop_dma(c->dma_ch); |
503 | c->paused = true; | 503 | c->paused = true; |
504 | } | 504 | } |
505 | 505 | ||
506 | return 0; | 506 | return 0; |
507 | } | 507 | } |
508 | 508 | ||
509 | static int omap_dma_resume(struct omap_chan *c) | 509 | static int omap_dma_resume(struct omap_chan *c) |
510 | { | 510 | { |
511 | /* Pause/Resume only allowed with cyclic mode */ | 511 | /* Pause/Resume only allowed with cyclic mode */ |
512 | if (!c->cyclic) | 512 | if (!c->cyclic) |
513 | return -EINVAL; | 513 | return -EINVAL; |
514 | 514 | ||
515 | if (c->paused) { | 515 | if (c->paused) { |
516 | omap_start_dma(c->dma_ch); | 516 | omap_start_dma(c->dma_ch); |
517 | c->paused = false; | 517 | c->paused = false; |
518 | } | 518 | } |
519 | 519 | ||
520 | return 0; | 520 | return 0; |
521 | } | 521 | } |
522 | 522 | ||
523 | static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 523 | static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
524 | unsigned long arg) | 524 | unsigned long arg) |
525 | { | 525 | { |
526 | struct omap_chan *c = to_omap_dma_chan(chan); | 526 | struct omap_chan *c = to_omap_dma_chan(chan); |
527 | int ret; | 527 | int ret; |
528 | 528 | ||
529 | switch (cmd) { | 529 | switch (cmd) { |
530 | case DMA_SLAVE_CONFIG: | 530 | case DMA_SLAVE_CONFIG: |
531 | ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg); | 531 | ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg); |
532 | break; | 532 | break; |
533 | 533 | ||
534 | case DMA_TERMINATE_ALL: | 534 | case DMA_TERMINATE_ALL: |
535 | ret = omap_dma_terminate_all(c); | 535 | ret = omap_dma_terminate_all(c); |
536 | break; | 536 | break; |
537 | 537 | ||
538 | case DMA_PAUSE: | 538 | case DMA_PAUSE: |
539 | ret = omap_dma_pause(c); | 539 | ret = omap_dma_pause(c); |
540 | break; | 540 | break; |
541 | 541 | ||
542 | case DMA_RESUME: | 542 | case DMA_RESUME: |
543 | ret = omap_dma_resume(c); | 543 | ret = omap_dma_resume(c); |
544 | break; | 544 | break; |
545 | 545 | ||
546 | default: | 546 | default: |
547 | ret = -ENXIO; | 547 | ret = -ENXIO; |
548 | break; | 548 | break; |
549 | } | 549 | } |
550 | 550 | ||
551 | return ret; | 551 | return ret; |
552 | } | 552 | } |
553 | 553 | ||
554 | static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig) | 554 | static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig) |
555 | { | 555 | { |
556 | struct omap_chan *c; | 556 | struct omap_chan *c; |
557 | 557 | ||
558 | c = kzalloc(sizeof(*c), GFP_KERNEL); | 558 | c = kzalloc(sizeof(*c), GFP_KERNEL); |
559 | if (!c) | 559 | if (!c) |
560 | return -ENOMEM; | 560 | return -ENOMEM; |
561 | 561 | ||
562 | c->dma_sig = dma_sig; | 562 | c->dma_sig = dma_sig; |
563 | c->vc.desc_free = omap_dma_desc_free; | 563 | c->vc.desc_free = omap_dma_desc_free; |
564 | vchan_init(&c->vc, &od->ddev); | 564 | vchan_init(&c->vc, &od->ddev); |
565 | INIT_LIST_HEAD(&c->node); | 565 | INIT_LIST_HEAD(&c->node); |
566 | 566 | ||
567 | od->ddev.chancnt++; | 567 | od->ddev.chancnt++; |
568 | 568 | ||
569 | return 0; | 569 | return 0; |
570 | } | 570 | } |
571 | 571 | ||
572 | static void omap_dma_free(struct omap_dmadev *od) | 572 | static void omap_dma_free(struct omap_dmadev *od) |
573 | { | 573 | { |
574 | tasklet_kill(&od->task); | 574 | tasklet_kill(&od->task); |
575 | while (!list_empty(&od->ddev.channels)) { | 575 | while (!list_empty(&od->ddev.channels)) { |
576 | struct omap_chan *c = list_first_entry(&od->ddev.channels, | 576 | struct omap_chan *c = list_first_entry(&od->ddev.channels, |
577 | struct omap_chan, vc.chan.device_node); | 577 | struct omap_chan, vc.chan.device_node); |
578 | 578 | ||
579 | list_del(&c->vc.chan.device_node); | 579 | list_del(&c->vc.chan.device_node); |
580 | tasklet_kill(&c->vc.task); | 580 | tasklet_kill(&c->vc.task); |
581 | kfree(c); | 581 | kfree(c); |
582 | } | 582 | } |
583 | kfree(od); | 583 | kfree(od); |
584 | } | 584 | } |
585 | 585 | ||
586 | static int omap_dma_probe(struct platform_device *pdev) | 586 | static int omap_dma_probe(struct platform_device *pdev) |
587 | { | 587 | { |
588 | struct omap_dmadev *od; | 588 | struct omap_dmadev *od; |
589 | int rc, i; | 589 | int rc, i; |
590 | 590 | ||
591 | od = kzalloc(sizeof(*od), GFP_KERNEL); | 591 | od = kzalloc(sizeof(*od), GFP_KERNEL); |
592 | if (!od) | 592 | if (!od) |
593 | return -ENOMEM; | 593 | return -ENOMEM; |
594 | 594 | ||
595 | dma_cap_set(DMA_SLAVE, od->ddev.cap_mask); | 595 | dma_cap_set(DMA_SLAVE, od->ddev.cap_mask); |
596 | dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask); | 596 | dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask); |
597 | od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources; | 597 | od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources; |
598 | od->ddev.device_free_chan_resources = omap_dma_free_chan_resources; | 598 | od->ddev.device_free_chan_resources = omap_dma_free_chan_resources; |
599 | od->ddev.device_tx_status = omap_dma_tx_status; | 599 | od->ddev.device_tx_status = omap_dma_tx_status; |
600 | od->ddev.device_issue_pending = omap_dma_issue_pending; | 600 | od->ddev.device_issue_pending = omap_dma_issue_pending; |
601 | od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg; | 601 | od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg; |
602 | od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic; | 602 | od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic; |
603 | od->ddev.device_control = omap_dma_control; | 603 | od->ddev.device_control = omap_dma_control; |
604 | od->ddev.dev = &pdev->dev; | 604 | od->ddev.dev = &pdev->dev; |
605 | INIT_LIST_HEAD(&od->ddev.channels); | 605 | INIT_LIST_HEAD(&od->ddev.channels); |
606 | INIT_LIST_HEAD(&od->pending); | 606 | INIT_LIST_HEAD(&od->pending); |
607 | spin_lock_init(&od->lock); | 607 | spin_lock_init(&od->lock); |
608 | 608 | ||
609 | tasklet_init(&od->task, omap_dma_sched, (unsigned long)od); | 609 | tasklet_init(&od->task, omap_dma_sched, (unsigned long)od); |
610 | 610 | ||
611 | for (i = 0; i < 127; i++) { | 611 | for (i = 0; i < 127; i++) { |
612 | rc = omap_dma_chan_init(od, i); | 612 | rc = omap_dma_chan_init(od, i); |
613 | if (rc) { | 613 | if (rc) { |
614 | omap_dma_free(od); | 614 | omap_dma_free(od); |
615 | return rc; | 615 | return rc; |
616 | } | 616 | } |
617 | } | 617 | } |
618 | 618 | ||
619 | rc = dma_async_device_register(&od->ddev); | 619 | rc = dma_async_device_register(&od->ddev); |
620 | if (rc) { | 620 | if (rc) { |
621 | pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n", | 621 | pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n", |
622 | rc); | 622 | rc); |
623 | omap_dma_free(od); | 623 | omap_dma_free(od); |
624 | } else { | 624 | } else { |
625 | platform_set_drvdata(pdev, od); | 625 | platform_set_drvdata(pdev, od); |
626 | } | 626 | } |
627 | 627 | ||
628 | dev_info(&pdev->dev, "OMAP DMA engine driver\n"); | 628 | dev_info(&pdev->dev, "OMAP DMA engine driver\n"); |
629 | 629 | ||
630 | return rc; | 630 | return rc; |
631 | } | 631 | } |
632 | 632 | ||
633 | static int omap_dma_remove(struct platform_device *pdev) | 633 | static int omap_dma_remove(struct platform_device *pdev) |
634 | { | 634 | { |
635 | struct omap_dmadev *od = platform_get_drvdata(pdev); | 635 | struct omap_dmadev *od = platform_get_drvdata(pdev); |
636 | 636 | ||
637 | dma_async_device_unregister(&od->ddev); | 637 | dma_async_device_unregister(&od->ddev); |
638 | omap_dma_free(od); | 638 | omap_dma_free(od); |
639 | 639 | ||
640 | return 0; | 640 | return 0; |
641 | } | 641 | } |
642 | 642 | ||
643 | static struct platform_driver omap_dma_driver = { | 643 | static struct platform_driver omap_dma_driver = { |
644 | .probe = omap_dma_probe, | 644 | .probe = omap_dma_probe, |
645 | .remove = omap_dma_remove, | 645 | .remove = omap_dma_remove, |
646 | .driver = { | 646 | .driver = { |
647 | .name = "omap-dma-engine", | 647 | .name = "omap-dma-engine", |
648 | .owner = THIS_MODULE, | 648 | .owner = THIS_MODULE, |
649 | }, | 649 | }, |
650 | }; | 650 | }; |
651 | 651 | ||
652 | bool omap_dma_filter_fn(struct dma_chan *chan, void *param) | 652 | bool omap_dma_filter_fn(struct dma_chan *chan, void *param) |
653 | { | 653 | { |
654 | if (chan->device->dev->driver == &omap_dma_driver.driver) { | 654 | if (chan->device->dev->driver == &omap_dma_driver.driver) { |
655 | struct omap_chan *c = to_omap_dma_chan(chan); | 655 | struct omap_chan *c = to_omap_dma_chan(chan); |
656 | unsigned req = *(unsigned *)param; | 656 | unsigned req = *(unsigned *)param; |
657 | 657 | ||
658 | return req == c->dma_sig; | 658 | return req == c->dma_sig; |
659 | } | 659 | } |
660 | return false; | 660 | return false; |
661 | } | 661 | } |
662 | EXPORT_SYMBOL_GPL(omap_dma_filter_fn); | 662 | EXPORT_SYMBOL_GPL(omap_dma_filter_fn); |
663 | 663 | ||
664 | static struct platform_device *pdev; | ||
665 | |||
666 | static const struct platform_device_info omap_dma_dev_info = { | ||
667 | .name = "omap-dma-engine", | ||
668 | .id = -1, | ||
669 | .dma_mask = DMA_BIT_MASK(32), | ||
670 | }; | ||
671 | |||
672 | static int omap_dma_init(void) | 664 | static int omap_dma_init(void) |
673 | { | 665 | { |
674 | int rc = platform_driver_register(&omap_dma_driver); | 666 | return platform_driver_register(&omap_dma_driver); |
675 | |||
676 | if (rc == 0) { | ||
677 | pdev = platform_device_register_full(&omap_dma_dev_info); | ||
678 | if (IS_ERR(pdev)) { | ||
679 | platform_driver_unregister(&omap_dma_driver); | ||
680 | rc = PTR_ERR(pdev); | ||
681 | } | ||
682 | } | ||
683 | return rc; | ||
684 | } | 667 | } |
685 | subsys_initcall(omap_dma_init); | 668 | subsys_initcall(omap_dma_init); |
686 | 669 | ||
687 | static void __exit omap_dma_exit(void) | 670 | static void __exit omap_dma_exit(void) |
688 | { | 671 | { |
689 | platform_device_unregister(pdev); | ||
690 | platform_driver_unregister(&omap_dma_driver); | 672 | platform_driver_unregister(&omap_dma_driver); |
691 | } | 673 | } |
692 | module_exit(omap_dma_exit); | 674 | module_exit(omap_dma_exit); |
693 | 675 | ||
694 | MODULE_AUTHOR("Russell King"); | 676 | MODULE_AUTHOR("Russell King"); |
695 | MODULE_LICENSE("GPL"); | 677 | MODULE_LICENSE("GPL"); |
696 | 678 |