Commit c2dde5f8f2095d7c623ff3565c1462e190272273
Committed by
Vinod Koul
1 parent
8eb4da28b2
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
dmaengine: add TI EDMA DMA engine driver
Add a DMA engine driver for the TI EDMA controller. This driver is implemented as a wrapper around the existing DaVinci private DMA implementation. This approach allows for incremental conversion of each peripheral driver to the DMA engine API. The EDMA driver supports slave transfers but does not yet support cyclic transfers. Signed-off-by: Matt Porter <mporter@ti.com> Tested-by: Tom Rini <trini@ti.com> Tested-by: Sekhar Nori <nsekhar@ti.com> Signed-off-by: Vinod Koul <vinod.koul@linux.intel.com>
Showing 4 changed files with 711 additions and 0 deletions Inline Diff
drivers/dma/Kconfig
1 | # | 1 | # |
2 | # DMA engine configuration | 2 | # DMA engine configuration |
3 | # | 3 | # |
4 | 4 | ||
5 | menuconfig DMADEVICES | 5 | menuconfig DMADEVICES |
6 | bool "DMA Engine support" | 6 | bool "DMA Engine support" |
7 | depends on HAS_DMA | 7 | depends on HAS_DMA |
8 | help | 8 | help |
9 | DMA engines can do asynchronous data transfers without | 9 | DMA engines can do asynchronous data transfers without |
10 | involving the host CPU. Currently, this framework can be | 10 | involving the host CPU. Currently, this framework can be |
11 | used to offload memory copies in the network stack and | 11 | used to offload memory copies in the network stack and |
12 | RAID operations in the MD driver. This menu only presents | 12 | RAID operations in the MD driver. This menu only presents |
13 | DMA Device drivers supported by the configured arch, it may | 13 | DMA Device drivers supported by the configured arch, it may |
14 | be empty in some cases. | 14 | be empty in some cases. |
15 | 15 | ||
16 | config DMADEVICES_DEBUG | 16 | config DMADEVICES_DEBUG |
17 | bool "DMA Engine debugging" | 17 | bool "DMA Engine debugging" |
18 | depends on DMADEVICES != n | 18 | depends on DMADEVICES != n |
19 | help | 19 | help |
20 | This is an option for use by developers; most people should | 20 | This is an option for use by developers; most people should |
21 | say N here. This enables DMA engine core and driver debugging. | 21 | say N here. This enables DMA engine core and driver debugging. |
22 | 22 | ||
23 | config DMADEVICES_VDEBUG | 23 | config DMADEVICES_VDEBUG |
24 | bool "DMA Engine verbose debugging" | 24 | bool "DMA Engine verbose debugging" |
25 | depends on DMADEVICES_DEBUG != n | 25 | depends on DMADEVICES_DEBUG != n |
26 | help | 26 | help |
27 | This is an option for use by developers; most people should | 27 | This is an option for use by developers; most people should |
28 | say N here. This enables deeper (more verbose) debugging of | 28 | say N here. This enables deeper (more verbose) debugging of |
29 | the DMA engine core and drivers. | 29 | the DMA engine core and drivers. |
30 | 30 | ||
31 | 31 | ||
32 | if DMADEVICES | 32 | if DMADEVICES |
33 | 33 | ||
34 | comment "DMA Devices" | 34 | comment "DMA Devices" |
35 | 35 | ||
36 | config INTEL_MID_DMAC | 36 | config INTEL_MID_DMAC |
37 | tristate "Intel MID DMA support for Peripheral DMA controllers" | 37 | tristate "Intel MID DMA support for Peripheral DMA controllers" |
38 | depends on PCI && X86 | 38 | depends on PCI && X86 |
39 | select DMA_ENGINE | 39 | select DMA_ENGINE |
40 | default n | 40 | default n |
41 | help | 41 | help |
42 | Enable support for the Intel(R) MID DMA engine present | 42 | Enable support for the Intel(R) MID DMA engine present |
43 | in Intel MID chipsets. | 43 | in Intel MID chipsets. |
44 | 44 | ||
45 | Say Y here if you have such a chipset. | 45 | Say Y here if you have such a chipset. |
46 | 46 | ||
47 | If unsure, say N. | 47 | If unsure, say N. |
48 | 48 | ||
49 | config ASYNC_TX_ENABLE_CHANNEL_SWITCH | 49 | config ASYNC_TX_ENABLE_CHANNEL_SWITCH |
50 | bool | 50 | bool |
51 | 51 | ||
52 | config AMBA_PL08X | 52 | config AMBA_PL08X |
53 | bool "ARM PrimeCell PL080 or PL081 support" | 53 | bool "ARM PrimeCell PL080 or PL081 support" |
54 | depends on ARM_AMBA && EXPERIMENTAL | 54 | depends on ARM_AMBA && EXPERIMENTAL |
55 | select DMA_ENGINE | 55 | select DMA_ENGINE |
56 | select DMA_VIRTUAL_CHANNELS | 56 | select DMA_VIRTUAL_CHANNELS |
57 | help | 57 | help |
58 | Platform has a PL08x DMAC device | 58 | Platform has a PL08x DMAC device |
59 | which can provide DMA engine support | 59 | which can provide DMA engine support |
60 | 60 | ||
61 | config INTEL_IOATDMA | 61 | config INTEL_IOATDMA |
62 | tristate "Intel I/OAT DMA support" | 62 | tristate "Intel I/OAT DMA support" |
63 | depends on PCI && X86 | 63 | depends on PCI && X86 |
64 | select DMA_ENGINE | 64 | select DMA_ENGINE |
65 | select DCA | 65 | select DCA |
66 | select ASYNC_TX_DISABLE_PQ_VAL_DMA | 66 | select ASYNC_TX_DISABLE_PQ_VAL_DMA |
67 | select ASYNC_TX_DISABLE_XOR_VAL_DMA | 67 | select ASYNC_TX_DISABLE_XOR_VAL_DMA |
68 | help | 68 | help |
69 | Enable support for the Intel(R) I/OAT DMA engine present | 69 | Enable support for the Intel(R) I/OAT DMA engine present |
70 | in recent Intel Xeon chipsets. | 70 | in recent Intel Xeon chipsets. |
71 | 71 | ||
72 | Say Y here if you have such a chipset. | 72 | Say Y here if you have such a chipset. |
73 | 73 | ||
74 | If unsure, say N. | 74 | If unsure, say N. |
75 | 75 | ||
76 | config INTEL_IOP_ADMA | 76 | config INTEL_IOP_ADMA |
77 | tristate "Intel IOP ADMA support" | 77 | tristate "Intel IOP ADMA support" |
78 | depends on ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX | 78 | depends on ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX |
79 | select DMA_ENGINE | 79 | select DMA_ENGINE |
80 | select ASYNC_TX_ENABLE_CHANNEL_SWITCH | 80 | select ASYNC_TX_ENABLE_CHANNEL_SWITCH |
81 | help | 81 | help |
82 | Enable support for the Intel(R) IOP Series RAID engines. | 82 | Enable support for the Intel(R) IOP Series RAID engines. |
83 | 83 | ||
84 | config DW_DMAC | 84 | config DW_DMAC |
85 | tristate "Synopsys DesignWare AHB DMA support" | 85 | tristate "Synopsys DesignWare AHB DMA support" |
86 | depends on HAVE_CLK | 86 | depends on HAVE_CLK |
87 | select DMA_ENGINE | 87 | select DMA_ENGINE |
88 | default y if CPU_AT32AP7000 | 88 | default y if CPU_AT32AP7000 |
89 | help | 89 | help |
90 | Support the Synopsys DesignWare AHB DMA controller. This | 90 | Support the Synopsys DesignWare AHB DMA controller. This |
91 | can be integrated in chips such as the Atmel AT32ap7000. | 91 | can be integrated in chips such as the Atmel AT32ap7000. |
92 | 92 | ||
93 | config AT_HDMAC | 93 | config AT_HDMAC |
94 | tristate "Atmel AHB DMA support" | 94 | tristate "Atmel AHB DMA support" |
95 | depends on ARCH_AT91 | 95 | depends on ARCH_AT91 |
96 | select DMA_ENGINE | 96 | select DMA_ENGINE |
97 | help | 97 | help |
98 | Support the Atmel AHB DMA controller. | 98 | Support the Atmel AHB DMA controller. |
99 | 99 | ||
100 | config FSL_DMA | 100 | config FSL_DMA |
101 | tristate "Freescale Elo and Elo Plus DMA support" | 101 | tristate "Freescale Elo and Elo Plus DMA support" |
102 | depends on FSL_SOC | 102 | depends on FSL_SOC |
103 | select DMA_ENGINE | 103 | select DMA_ENGINE |
104 | select ASYNC_TX_ENABLE_CHANNEL_SWITCH | 104 | select ASYNC_TX_ENABLE_CHANNEL_SWITCH |
105 | ---help--- | 105 | ---help--- |
106 | Enable support for the Freescale Elo and Elo Plus DMA controllers. | 106 | Enable support for the Freescale Elo and Elo Plus DMA controllers. |
107 | The Elo is the DMA controller on some 82xx and 83xx parts, and the | 107 | The Elo is the DMA controller on some 82xx and 83xx parts, and the |
108 | Elo Plus is the DMA controller on 85xx and 86xx parts. | 108 | Elo Plus is the DMA controller on 85xx and 86xx parts. |
109 | 109 | ||
110 | config MPC512X_DMA | 110 | config MPC512X_DMA |
111 | tristate "Freescale MPC512x built-in DMA engine support" | 111 | tristate "Freescale MPC512x built-in DMA engine support" |
112 | depends on PPC_MPC512x || PPC_MPC831x | 112 | depends on PPC_MPC512x || PPC_MPC831x |
113 | select DMA_ENGINE | 113 | select DMA_ENGINE |
114 | ---help--- | 114 | ---help--- |
115 | Enable support for the Freescale MPC512x built-in DMA engine. | 115 | Enable support for the Freescale MPC512x built-in DMA engine. |
116 | 116 | ||
117 | config MV_XOR | 117 | config MV_XOR |
118 | bool "Marvell XOR engine support" | 118 | bool "Marvell XOR engine support" |
119 | depends on PLAT_ORION | 119 | depends on PLAT_ORION |
120 | select DMA_ENGINE | 120 | select DMA_ENGINE |
121 | select ASYNC_TX_ENABLE_CHANNEL_SWITCH | 121 | select ASYNC_TX_ENABLE_CHANNEL_SWITCH |
122 | ---help--- | 122 | ---help--- |
123 | Enable support for the Marvell XOR engine. | 123 | Enable support for the Marvell XOR engine. |
124 | 124 | ||
125 | config MX3_IPU | 125 | config MX3_IPU |
126 | bool "MX3x Image Processing Unit support" | 126 | bool "MX3x Image Processing Unit support" |
127 | depends on ARCH_MXC | 127 | depends on ARCH_MXC |
128 | select DMA_ENGINE | 128 | select DMA_ENGINE |
129 | default y | 129 | default y |
130 | help | 130 | help |
131 | If you plan to use the Image Processing unit in the i.MX3x, say | 131 | If you plan to use the Image Processing unit in the i.MX3x, say |
132 | Y here. If unsure, select Y. | 132 | Y here. If unsure, select Y. |
133 | 133 | ||
134 | config MX3_IPU_IRQS | 134 | config MX3_IPU_IRQS |
135 | int "Number of dynamically mapped interrupts for IPU" | 135 | int "Number of dynamically mapped interrupts for IPU" |
136 | depends on MX3_IPU | 136 | depends on MX3_IPU |
137 | range 2 137 | 137 | range 2 137 |
138 | default 4 | 138 | default 4 |
139 | help | 139 | help |
140 | Out of 137 interrupt sources on i.MX31 IPU only very few are used. | 140 | Out of 137 interrupt sources on i.MX31 IPU only very few are used. |
141 | To avoid bloating the irq_desc[] array we allocate a sufficient | 141 | To avoid bloating the irq_desc[] array we allocate a sufficient |
142 | number of IRQ slots and map them dynamically to specific sources. | 142 | number of IRQ slots and map them dynamically to specific sources. |
143 | 143 | ||
144 | config TXX9_DMAC | 144 | config TXX9_DMAC |
145 | tristate "Toshiba TXx9 SoC DMA support" | 145 | tristate "Toshiba TXx9 SoC DMA support" |
146 | depends on MACH_TX49XX || MACH_TX39XX | 146 | depends on MACH_TX49XX || MACH_TX39XX |
147 | select DMA_ENGINE | 147 | select DMA_ENGINE |
148 | help | 148 | help |
149 | Support the TXx9 SoC internal DMA controller. This can be | 149 | Support the TXx9 SoC internal DMA controller. This can be |
150 | integrated in chips such as the Toshiba TX4927/38/39. | 150 | integrated in chips such as the Toshiba TX4927/38/39. |
151 | 151 | ||
152 | config TEGRA20_APB_DMA | 152 | config TEGRA20_APB_DMA |
153 | bool "NVIDIA Tegra20 APB DMA support" | 153 | bool "NVIDIA Tegra20 APB DMA support" |
154 | depends on ARCH_TEGRA | 154 | depends on ARCH_TEGRA |
155 | select DMA_ENGINE | 155 | select DMA_ENGINE |
156 | help | 156 | help |
157 | Support for the NVIDIA Tegra20 APB DMA controller driver. The | 157 | Support for the NVIDIA Tegra20 APB DMA controller driver. The |
158 | DMA controller is having multiple DMA channel which can be | 158 | DMA controller is having multiple DMA channel which can be |
159 | configured for different peripherals like audio, UART, SPI, | 159 | configured for different peripherals like audio, UART, SPI, |
160 | I2C etc which is in APB bus. | 160 | I2C etc which is in APB bus. |
161 | This DMA controller transfers data from memory to peripheral fifo | 161 | This DMA controller transfers data from memory to peripheral fifo |
162 | or vice versa. It does not support memory to memory data transfer. | 162 | or vice versa. It does not support memory to memory data transfer. |
163 | 163 | ||
164 | 164 | ||
165 | 165 | ||
166 | config SH_DMAE | 166 | config SH_DMAE |
167 | tristate "Renesas SuperH DMAC support" | 167 | tristate "Renesas SuperH DMAC support" |
168 | depends on (SUPERH && SH_DMA) || (ARM && ARCH_SHMOBILE) | 168 | depends on (SUPERH && SH_DMA) || (ARM && ARCH_SHMOBILE) |
169 | depends on !SH_DMA_API | 169 | depends on !SH_DMA_API |
170 | select DMA_ENGINE | 170 | select DMA_ENGINE |
171 | help | 171 | help |
172 | Enable support for the Renesas SuperH DMA controllers. | 172 | Enable support for the Renesas SuperH DMA controllers. |
173 | 173 | ||
174 | config COH901318 | 174 | config COH901318 |
175 | bool "ST-Ericsson COH901318 DMA support" | 175 | bool "ST-Ericsson COH901318 DMA support" |
176 | select DMA_ENGINE | 176 | select DMA_ENGINE |
177 | depends on ARCH_U300 | 177 | depends on ARCH_U300 |
178 | help | 178 | help |
179 | Enable support for ST-Ericsson COH 901 318 DMA. | 179 | Enable support for ST-Ericsson COH 901 318 DMA. |
180 | 180 | ||
181 | config STE_DMA40 | 181 | config STE_DMA40 |
182 | bool "ST-Ericsson DMA40 support" | 182 | bool "ST-Ericsson DMA40 support" |
183 | depends on ARCH_U8500 | 183 | depends on ARCH_U8500 |
184 | select DMA_ENGINE | 184 | select DMA_ENGINE |
185 | help | 185 | help |
186 | Support for ST-Ericsson DMA40 controller | 186 | Support for ST-Ericsson DMA40 controller |
187 | 187 | ||
188 | config AMCC_PPC440SPE_ADMA | 188 | config AMCC_PPC440SPE_ADMA |
189 | tristate "AMCC PPC440SPe ADMA support" | 189 | tristate "AMCC PPC440SPe ADMA support" |
190 | depends on 440SPe || 440SP | 190 | depends on 440SPe || 440SP |
191 | select DMA_ENGINE | 191 | select DMA_ENGINE |
192 | select ARCH_HAS_ASYNC_TX_FIND_CHANNEL | 192 | select ARCH_HAS_ASYNC_TX_FIND_CHANNEL |
193 | select ASYNC_TX_ENABLE_CHANNEL_SWITCH | 193 | select ASYNC_TX_ENABLE_CHANNEL_SWITCH |
194 | help | 194 | help |
195 | Enable support for the AMCC PPC440SPe RAID engines. | 195 | Enable support for the AMCC PPC440SPe RAID engines. |
196 | 196 | ||
197 | config TIMB_DMA | 197 | config TIMB_DMA |
198 | tristate "Timberdale FPGA DMA support" | 198 | tristate "Timberdale FPGA DMA support" |
199 | depends on MFD_TIMBERDALE || HAS_IOMEM | 199 | depends on MFD_TIMBERDALE || HAS_IOMEM |
200 | select DMA_ENGINE | 200 | select DMA_ENGINE |
201 | help | 201 | help |
202 | Enable support for the Timberdale FPGA DMA engine. | 202 | Enable support for the Timberdale FPGA DMA engine. |
203 | 203 | ||
204 | config SIRF_DMA | 204 | config SIRF_DMA |
205 | tristate "CSR SiRFprimaII DMA support" | 205 | tristate "CSR SiRFprimaII DMA support" |
206 | depends on ARCH_PRIMA2 | 206 | depends on ARCH_PRIMA2 |
207 | select DMA_ENGINE | 207 | select DMA_ENGINE |
208 | help | 208 | help |
209 | Enable support for the CSR SiRFprimaII DMA engine. | 209 | Enable support for the CSR SiRFprimaII DMA engine. |
210 | 210 | ||
211 | config TI_EDMA | ||
212 | tristate "TI EDMA support" | ||
213 | depends on ARCH_DAVINCI | ||
214 | select DMA_ENGINE | ||
215 | select DMA_VIRTUAL_CHANNELS | ||
216 | default n | ||
217 | help | ||
218 | Enable support for the TI EDMA controller. This DMA | ||
219 | engine is found on TI DaVinci and AM33xx parts. | ||
220 | |||
211 | config ARCH_HAS_ASYNC_TX_FIND_CHANNEL | 221 | config ARCH_HAS_ASYNC_TX_FIND_CHANNEL |
212 | bool | 222 | bool |
213 | 223 | ||
214 | config PL330_DMA | 224 | config PL330_DMA |
215 | tristate "DMA API Driver for PL330" | 225 | tristate "DMA API Driver for PL330" |
216 | select DMA_ENGINE | 226 | select DMA_ENGINE |
217 | depends on ARM_AMBA | 227 | depends on ARM_AMBA |
218 | help | 228 | help |
219 | Select if your platform has one or more PL330 DMACs. | 229 | Select if your platform has one or more PL330 DMACs. |
220 | You need to provide platform specific settings via | 230 | You need to provide platform specific settings via |
221 | platform_data for a dma-pl330 device. | 231 | platform_data for a dma-pl330 device. |
222 | 232 | ||
223 | config PCH_DMA | 233 | config PCH_DMA |
224 | tristate "Intel EG20T PCH / LAPIS Semicon IOH(ML7213/ML7223/ML7831) DMA" | 234 | tristate "Intel EG20T PCH / LAPIS Semicon IOH(ML7213/ML7223/ML7831) DMA" |
225 | depends on PCI && X86 | 235 | depends on PCI && X86 |
226 | select DMA_ENGINE | 236 | select DMA_ENGINE |
227 | help | 237 | help |
228 | Enable support for Intel EG20T PCH DMA engine. | 238 | Enable support for Intel EG20T PCH DMA engine. |
229 | 239 | ||
230 | This driver also can be used for LAPIS Semiconductor IOH(Input/ | 240 | This driver also can be used for LAPIS Semiconductor IOH(Input/ |
231 | Output Hub), ML7213, ML7223 and ML7831. | 241 | Output Hub), ML7213, ML7223 and ML7831. |
232 | ML7213 IOH is for IVI(In-Vehicle Infotainment) use, ML7223 IOH is | 242 | ML7213 IOH is for IVI(In-Vehicle Infotainment) use, ML7223 IOH is |
233 | for MP(Media Phone) use and ML7831 IOH is for general purpose use. | 243 | for MP(Media Phone) use and ML7831 IOH is for general purpose use. |
234 | ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series. | 244 | ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series. |
235 | ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH. | 245 | ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH. |
236 | 246 | ||
237 | config IMX_SDMA | 247 | config IMX_SDMA |
238 | tristate "i.MX SDMA support" | 248 | tristate "i.MX SDMA support" |
239 | depends on ARCH_MXC | 249 | depends on ARCH_MXC |
240 | select DMA_ENGINE | 250 | select DMA_ENGINE |
241 | help | 251 | help |
242 | Support the i.MX SDMA engine. This engine is integrated into | 252 | Support the i.MX SDMA engine. This engine is integrated into |
243 | Freescale i.MX25/31/35/51/53 chips. | 253 | Freescale i.MX25/31/35/51/53 chips. |
244 | 254 | ||
245 | config IMX_DMA | 255 | config IMX_DMA |
246 | tristate "i.MX DMA support" | 256 | tristate "i.MX DMA support" |
247 | depends on ARCH_MXC | 257 | depends on ARCH_MXC |
248 | select DMA_ENGINE | 258 | select DMA_ENGINE |
249 | help | 259 | help |
250 | Support the i.MX DMA engine. This engine is integrated into | 260 | Support the i.MX DMA engine. This engine is integrated into |
251 | Freescale i.MX1/21/27 chips. | 261 | Freescale i.MX1/21/27 chips. |
252 | 262 | ||
253 | config MXS_DMA | 263 | config MXS_DMA |
254 | bool "MXS DMA support" | 264 | bool "MXS DMA support" |
255 | depends on SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q | 265 | depends on SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q |
256 | select STMP_DEVICE | 266 | select STMP_DEVICE |
257 | select DMA_ENGINE | 267 | select DMA_ENGINE |
258 | help | 268 | help |
259 | Support the MXS DMA engine. This engine including APBH-DMA | 269 | Support the MXS DMA engine. This engine including APBH-DMA |
260 | and APBX-DMA is integrated into Freescale i.MX23/28 chips. | 270 | and APBX-DMA is integrated into Freescale i.MX23/28 chips. |
261 | 271 | ||
262 | config EP93XX_DMA | 272 | config EP93XX_DMA |
263 | bool "Cirrus Logic EP93xx DMA support" | 273 | bool "Cirrus Logic EP93xx DMA support" |
264 | depends on ARCH_EP93XX | 274 | depends on ARCH_EP93XX |
265 | select DMA_ENGINE | 275 | select DMA_ENGINE |
266 | help | 276 | help |
267 | Enable support for the Cirrus Logic EP93xx M2P/M2M DMA controller. | 277 | Enable support for the Cirrus Logic EP93xx M2P/M2M DMA controller. |
268 | 278 | ||
269 | config DMA_SA11X0 | 279 | config DMA_SA11X0 |
270 | tristate "SA-11x0 DMA support" | 280 | tristate "SA-11x0 DMA support" |
271 | depends on ARCH_SA1100 | 281 | depends on ARCH_SA1100 |
272 | select DMA_ENGINE | 282 | select DMA_ENGINE |
273 | select DMA_VIRTUAL_CHANNELS | 283 | select DMA_VIRTUAL_CHANNELS |
274 | help | 284 | help |
275 | Support the DMA engine found on Intel StrongARM SA-1100 and | 285 | Support the DMA engine found on Intel StrongARM SA-1100 and |
276 | SA-1110 SoCs. This DMA engine can only be used with on-chip | 286 | SA-1110 SoCs. This DMA engine can only be used with on-chip |
277 | devices. | 287 | devices. |
278 | 288 | ||
279 | config MMP_TDMA | 289 | config MMP_TDMA |
280 | bool "MMP Two-Channel DMA support" | 290 | bool "MMP Two-Channel DMA support" |
281 | depends on ARCH_MMP | 291 | depends on ARCH_MMP |
282 | select DMA_ENGINE | 292 | select DMA_ENGINE |
283 | help | 293 | help |
284 | Support the MMP Two-Channel DMA engine. | 294 | Support the MMP Two-Channel DMA engine. |
285 | This engine used for MMP Audio DMA and pxa910 SQU. | 295 | This engine used for MMP Audio DMA and pxa910 SQU. |
286 | 296 | ||
287 | Say Y here if you enabled MMP ADMA, otherwise say N. | 297 | Say Y here if you enabled MMP ADMA, otherwise say N. |
288 | 298 | ||
289 | config DMA_OMAP | 299 | config DMA_OMAP |
290 | tristate "OMAP DMA support" | 300 | tristate "OMAP DMA support" |
291 | depends on ARCH_OMAP | 301 | depends on ARCH_OMAP |
292 | select DMA_ENGINE | 302 | select DMA_ENGINE |
293 | select DMA_VIRTUAL_CHANNELS | 303 | select DMA_VIRTUAL_CHANNELS |
294 | 304 | ||
295 | config DMA_ENGINE | 305 | config DMA_ENGINE |
296 | bool | 306 | bool |
297 | 307 | ||
298 | config DMA_VIRTUAL_CHANNELS | 308 | config DMA_VIRTUAL_CHANNELS |
299 | tristate | 309 | tristate |
300 | 310 | ||
301 | comment "DMA Clients" | 311 | comment "DMA Clients" |
302 | depends on DMA_ENGINE | 312 | depends on DMA_ENGINE |
303 | 313 | ||
304 | config NET_DMA | 314 | config NET_DMA |
305 | bool "Network: TCP receive copy offload" | 315 | bool "Network: TCP receive copy offload" |
306 | depends on DMA_ENGINE && NET | 316 | depends on DMA_ENGINE && NET |
307 | default (INTEL_IOATDMA || FSL_DMA) | 317 | default (INTEL_IOATDMA || FSL_DMA) |
308 | help | 318 | help |
309 | This enables the use of DMA engines in the network stack to | 319 | This enables the use of DMA engines in the network stack to |
310 | offload receive copy-to-user operations, freeing CPU cycles. | 320 | offload receive copy-to-user operations, freeing CPU cycles. |
311 | 321 | ||
312 | Say Y here if you enabled INTEL_IOATDMA or FSL_DMA, otherwise | 322 | Say Y here if you enabled INTEL_IOATDMA or FSL_DMA, otherwise |
313 | say N. | 323 | say N. |
314 | 324 | ||
315 | config ASYNC_TX_DMA | 325 | config ASYNC_TX_DMA |
316 | bool "Async_tx: Offload support for the async_tx api" | 326 | bool "Async_tx: Offload support for the async_tx api" |
317 | depends on DMA_ENGINE | 327 | depends on DMA_ENGINE |
318 | help | 328 | help |
319 | This allows the async_tx api to take advantage of offload engines for | 329 | This allows the async_tx api to take advantage of offload engines for |
320 | memcpy, memset, xor, and raid6 p+q operations. If your platform has | 330 | memcpy, memset, xor, and raid6 p+q operations. If your platform has |
321 | a dma engine that can perform raid operations and you have enabled | 331 | a dma engine that can perform raid operations and you have enabled |
322 | MD_RAID456 say Y. | 332 | MD_RAID456 say Y. |
323 | 333 | ||
324 | If unsure, say N. | 334 | If unsure, say N. |
325 | 335 | ||
326 | config DMATEST | 336 | config DMATEST |
327 | tristate "DMA Test client" | 337 | tristate "DMA Test client" |
328 | depends on DMA_ENGINE | 338 | depends on DMA_ENGINE |
329 | help | 339 | help |
330 | Simple DMA test client. Say N unless you're debugging a | 340 | Simple DMA test client. Say N unless you're debugging a |
331 | DMA Device driver. | 341 | DMA Device driver. |
332 | 342 | ||
333 | endif | 343 | endif |
334 | 344 |
drivers/dma/Makefile
1 | ccflags-$(CONFIG_DMADEVICES_DEBUG) := -DDEBUG | 1 | ccflags-$(CONFIG_DMADEVICES_DEBUG) := -DDEBUG |
2 | ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG | 2 | ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG |
3 | 3 | ||
4 | obj-$(CONFIG_DMA_ENGINE) += dmaengine.o | 4 | obj-$(CONFIG_DMA_ENGINE) += dmaengine.o |
5 | obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o | 5 | obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o |
6 | obj-$(CONFIG_NET_DMA) += iovlock.o | 6 | obj-$(CONFIG_NET_DMA) += iovlock.o |
7 | obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o | 7 | obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o |
8 | obj-$(CONFIG_DMATEST) += dmatest.o | 8 | obj-$(CONFIG_DMATEST) += dmatest.o |
9 | obj-$(CONFIG_INTEL_IOATDMA) += ioat/ | 9 | obj-$(CONFIG_INTEL_IOATDMA) += ioat/ |
10 | obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o | 10 | obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o |
11 | obj-$(CONFIG_FSL_DMA) += fsldma.o | 11 | obj-$(CONFIG_FSL_DMA) += fsldma.o |
12 | obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o | 12 | obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o |
13 | obj-$(CONFIG_MV_XOR) += mv_xor.o | 13 | obj-$(CONFIG_MV_XOR) += mv_xor.o |
14 | obj-$(CONFIG_DW_DMAC) += dw_dmac.o | 14 | obj-$(CONFIG_DW_DMAC) += dw_dmac.o |
15 | obj-$(CONFIG_AT_HDMAC) += at_hdmac.o | 15 | obj-$(CONFIG_AT_HDMAC) += at_hdmac.o |
16 | obj-$(CONFIG_MX3_IPU) += ipu/ | 16 | obj-$(CONFIG_MX3_IPU) += ipu/ |
17 | obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o | 17 | obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o |
18 | obj-$(CONFIG_SH_DMAE) += sh/ | 18 | obj-$(CONFIG_SH_DMAE) += sh/ |
19 | obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o | 19 | obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o |
20 | obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ | 20 | obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ |
21 | obj-$(CONFIG_IMX_SDMA) += imx-sdma.o | 21 | obj-$(CONFIG_IMX_SDMA) += imx-sdma.o |
22 | obj-$(CONFIG_IMX_DMA) += imx-dma.o | 22 | obj-$(CONFIG_IMX_DMA) += imx-dma.o |
23 | obj-$(CONFIG_MXS_DMA) += mxs-dma.o | 23 | obj-$(CONFIG_MXS_DMA) += mxs-dma.o |
24 | obj-$(CONFIG_TIMB_DMA) += timb_dma.o | 24 | obj-$(CONFIG_TIMB_DMA) += timb_dma.o |
25 | obj-$(CONFIG_SIRF_DMA) += sirf-dma.o | 25 | obj-$(CONFIG_SIRF_DMA) += sirf-dma.o |
26 | obj-$(CONFIG_TI_EDMA) += edma.o | ||
26 | obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o | 27 | obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o |
27 | obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o | 28 | obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o |
28 | obj-$(CONFIG_PL330_DMA) += pl330.o | 29 | obj-$(CONFIG_PL330_DMA) += pl330.o |
29 | obj-$(CONFIG_PCH_DMA) += pch_dma.o | 30 | obj-$(CONFIG_PCH_DMA) += pch_dma.o |
30 | obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o | 31 | obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o |
31 | obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o | 32 | obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o |
32 | obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o | 33 | obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o |
33 | obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o | 34 | obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o |
34 | obj-$(CONFIG_DMA_OMAP) += omap-dma.o | 35 | obj-$(CONFIG_DMA_OMAP) += omap-dma.o |
35 | 36 |
drivers/dma/edma.c
File was created | 1 | /* | |
2 | * TI EDMA DMA engine driver | ||
3 | * | ||
4 | * Copyright 2012 Texas Instruments | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License as | ||
8 | * published by the Free Software Foundation version 2. | ||
9 | * | ||
10 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | ||
11 | * kind, whether express or implied; without even the implied warranty | ||
12 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | */ | ||
15 | |||
16 | #include <linux/dmaengine.h> | ||
17 | #include <linux/dma-mapping.h> | ||
18 | #include <linux/err.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/interrupt.h> | ||
21 | #include <linux/list.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/platform_device.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <linux/spinlock.h> | ||
26 | |||
27 | #include <mach/edma.h> | ||
28 | |||
29 | #include "dmaengine.h" | ||
30 | #include "virt-dma.h" | ||
31 | |||
32 | /* | ||
33 | * This will go away when the private EDMA API is folded | ||
34 | * into this driver and the platform device(s) are | ||
35 | * instantiated in the arch code. We can only get away | ||
36 | * with this simplification because DA8XX may not be built | ||
37 | * in the same kernel image with other DaVinci parts. This | ||
38 | * avoids having to sprinkle dmaengine driver platform devices | ||
39 | * and data throughout all the existing board files. | ||
40 | */ | ||
41 | #ifdef CONFIG_ARCH_DAVINCI_DA8XX | ||
42 | #define EDMA_CTLRS 2 | ||
43 | #define EDMA_CHANS 32 | ||
44 | #else | ||
45 | #define EDMA_CTLRS 1 | ||
46 | #define EDMA_CHANS 64 | ||
47 | #endif /* CONFIG_ARCH_DAVINCI_DA8XX */ | ||
48 | |||
49 | /* Max of 16 segments per channel to conserve PaRAM slots */ | ||
50 | #define MAX_NR_SG 16 | ||
51 | #define EDMA_MAX_SLOTS MAX_NR_SG | ||
52 | #define EDMA_DESCRIPTORS 16 | ||
53 | |||
54 | struct edma_desc { | ||
55 | struct virt_dma_desc vdesc; | ||
56 | struct list_head node; | ||
57 | int absync; | ||
58 | int pset_nr; | ||
59 | struct edmacc_param pset[0]; | ||
60 | }; | ||
61 | |||
62 | struct edma_cc; | ||
63 | |||
64 | struct edma_chan { | ||
65 | struct virt_dma_chan vchan; | ||
66 | struct list_head node; | ||
67 | struct edma_desc *edesc; | ||
68 | struct edma_cc *ecc; | ||
69 | int ch_num; | ||
70 | bool alloced; | ||
71 | int slot[EDMA_MAX_SLOTS]; | ||
72 | dma_addr_t addr; | ||
73 | int addr_width; | ||
74 | int maxburst; | ||
75 | }; | ||
76 | |||
77 | struct edma_cc { | ||
78 | int ctlr; | ||
79 | struct dma_device dma_slave; | ||
80 | struct edma_chan slave_chans[EDMA_CHANS]; | ||
81 | int num_slave_chans; | ||
82 | int dummy_slot; | ||
83 | }; | ||
84 | |||
85 | static inline struct edma_cc *to_edma_cc(struct dma_device *d) | ||
86 | { | ||
87 | return container_of(d, struct edma_cc, dma_slave); | ||
88 | } | ||
89 | |||
90 | static inline struct edma_chan *to_edma_chan(struct dma_chan *c) | ||
91 | { | ||
92 | return container_of(c, struct edma_chan, vchan.chan); | ||
93 | } | ||
94 | |||
95 | static inline struct edma_desc | ||
96 | *to_edma_desc(struct dma_async_tx_descriptor *tx) | ||
97 | { | ||
98 | return container_of(tx, struct edma_desc, vdesc.tx); | ||
99 | } | ||
100 | |||
101 | static void edma_desc_free(struct virt_dma_desc *vdesc) | ||
102 | { | ||
103 | kfree(container_of(vdesc, struct edma_desc, vdesc)); | ||
104 | } | ||
105 | |||
106 | /* Dispatch a queued descriptor to the controller (caller holds lock) */ | ||
107 | static void edma_execute(struct edma_chan *echan) | ||
108 | { | ||
109 | struct virt_dma_desc *vdesc = vchan_next_desc(&echan->vchan); | ||
110 | struct edma_desc *edesc; | ||
111 | int i; | ||
112 | |||
113 | if (!vdesc) { | ||
114 | echan->edesc = NULL; | ||
115 | return; | ||
116 | } | ||
117 | |||
118 | list_del(&vdesc->node); | ||
119 | |||
120 | echan->edesc = edesc = to_edma_desc(&vdesc->tx); | ||
121 | |||
122 | /* Write descriptor PaRAM set(s) */ | ||
123 | for (i = 0; i < edesc->pset_nr; i++) { | ||
124 | edma_write_slot(echan->slot[i], &edesc->pset[i]); | ||
125 | dev_dbg(echan->vchan.chan.device->dev, | ||
126 | "\n pset[%d]:\n" | ||
127 | " chnum\t%d\n" | ||
128 | " slot\t%d\n" | ||
129 | " opt\t%08x\n" | ||
130 | " src\t%08x\n" | ||
131 | " dst\t%08x\n" | ||
132 | " abcnt\t%08x\n" | ||
133 | " ccnt\t%08x\n" | ||
134 | " bidx\t%08x\n" | ||
135 | " cidx\t%08x\n" | ||
136 | " lkrld\t%08x\n", | ||
137 | i, echan->ch_num, echan->slot[i], | ||
138 | edesc->pset[i].opt, | ||
139 | edesc->pset[i].src, | ||
140 | edesc->pset[i].dst, | ||
141 | edesc->pset[i].a_b_cnt, | ||
142 | edesc->pset[i].ccnt, | ||
143 | edesc->pset[i].src_dst_bidx, | ||
144 | edesc->pset[i].src_dst_cidx, | ||
145 | edesc->pset[i].link_bcntrld); | ||
146 | /* Link to the previous slot if not the last set */ | ||
147 | if (i != (edesc->pset_nr - 1)) | ||
148 | edma_link(echan->slot[i], echan->slot[i+1]); | ||
149 | /* Final pset links to the dummy pset */ | ||
150 | else | ||
151 | edma_link(echan->slot[i], echan->ecc->dummy_slot); | ||
152 | } | ||
153 | |||
154 | edma_start(echan->ch_num); | ||
155 | } | ||
156 | |||
157 | static int edma_terminate_all(struct edma_chan *echan) | ||
158 | { | ||
159 | unsigned long flags; | ||
160 | LIST_HEAD(head); | ||
161 | |||
162 | spin_lock_irqsave(&echan->vchan.lock, flags); | ||
163 | |||
164 | /* | ||
165 | * Stop DMA activity: we assume the callback will not be called | ||
166 | * after edma_dma() returns (even if it does, it will see | ||
167 | * echan->edesc is NULL and exit.) | ||
168 | */ | ||
169 | if (echan->edesc) { | ||
170 | echan->edesc = NULL; | ||
171 | edma_stop(echan->ch_num); | ||
172 | } | ||
173 | |||
174 | vchan_get_all_descriptors(&echan->vchan, &head); | ||
175 | spin_unlock_irqrestore(&echan->vchan.lock, flags); | ||
176 | vchan_dma_desc_free_list(&echan->vchan, &head); | ||
177 | |||
178 | return 0; | ||
179 | } | ||
180 | |||
181 | |||
182 | static int edma_slave_config(struct edma_chan *echan, | ||
183 | struct dma_slave_config *config) | ||
184 | { | ||
185 | if ((config->src_addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES) || | ||
186 | (config->dst_addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES)) | ||
187 | return -EINVAL; | ||
188 | |||
189 | if (config->direction == DMA_MEM_TO_DEV) { | ||
190 | if (config->dst_addr) | ||
191 | echan->addr = config->dst_addr; | ||
192 | if (config->dst_addr_width) | ||
193 | echan->addr_width = config->dst_addr_width; | ||
194 | if (config->dst_maxburst) | ||
195 | echan->maxburst = config->dst_maxburst; | ||
196 | } else if (config->direction == DMA_DEV_TO_MEM) { | ||
197 | if (config->src_addr) | ||
198 | echan->addr = config->src_addr; | ||
199 | if (config->src_addr_width) | ||
200 | echan->addr_width = config->src_addr_width; | ||
201 | if (config->src_maxburst) | ||
202 | echan->maxburst = config->src_maxburst; | ||
203 | } | ||
204 | |||
205 | return 0; | ||
206 | } | ||
207 | |||
208 | static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
209 | unsigned long arg) | ||
210 | { | ||
211 | int ret = 0; | ||
212 | struct dma_slave_config *config; | ||
213 | struct edma_chan *echan = to_edma_chan(chan); | ||
214 | |||
215 | switch (cmd) { | ||
216 | case DMA_TERMINATE_ALL: | ||
217 | edma_terminate_all(echan); | ||
218 | break; | ||
219 | case DMA_SLAVE_CONFIG: | ||
220 | config = (struct dma_slave_config *)arg; | ||
221 | ret = edma_slave_config(echan, config); | ||
222 | break; | ||
223 | default: | ||
224 | ret = -ENOSYS; | ||
225 | } | ||
226 | |||
227 | return ret; | ||
228 | } | ||
229 | |||
230 | static struct dma_async_tx_descriptor *edma_prep_slave_sg( | ||
231 | struct dma_chan *chan, struct scatterlist *sgl, | ||
232 | unsigned int sg_len, enum dma_transfer_direction direction, | ||
233 | unsigned long tx_flags, void *context) | ||
234 | { | ||
235 | struct edma_chan *echan = to_edma_chan(chan); | ||
236 | struct device *dev = chan->device->dev; | ||
237 | struct edma_desc *edesc; | ||
238 | struct scatterlist *sg; | ||
239 | int i; | ||
240 | int acnt, bcnt, ccnt, src, dst, cidx; | ||
241 | int src_bidx, dst_bidx, src_cidx, dst_cidx; | ||
242 | |||
243 | if (unlikely(!echan || !sgl || !sg_len)) | ||
244 | return NULL; | ||
245 | |||
246 | if (echan->addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { | ||
247 | dev_err(dev, "Undefined slave buswidth\n"); | ||
248 | return NULL; | ||
249 | } | ||
250 | |||
251 | if (sg_len > MAX_NR_SG) { | ||
252 | dev_err(dev, "Exceeded max SG segments %d > %d\n", | ||
253 | sg_len, MAX_NR_SG); | ||
254 | return NULL; | ||
255 | } | ||
256 | |||
257 | edesc = kzalloc(sizeof(*edesc) + sg_len * | ||
258 | sizeof(edesc->pset[0]), GFP_ATOMIC); | ||
259 | if (!edesc) { | ||
260 | dev_dbg(dev, "Failed to allocate a descriptor\n"); | ||
261 | return NULL; | ||
262 | } | ||
263 | |||
264 | edesc->pset_nr = sg_len; | ||
265 | |||
266 | for_each_sg(sgl, sg, sg_len, i) { | ||
267 | /* Allocate a PaRAM slot, if needed */ | ||
268 | if (echan->slot[i] < 0) { | ||
269 | echan->slot[i] = | ||
270 | edma_alloc_slot(EDMA_CTLR(echan->ch_num), | ||
271 | EDMA_SLOT_ANY); | ||
272 | if (echan->slot[i] < 0) { | ||
273 | dev_err(dev, "Failed to allocate slot\n"); | ||
274 | return NULL; | ||
275 | } | ||
276 | } | ||
277 | |||
278 | acnt = echan->addr_width; | ||
279 | |||
280 | /* | ||
281 | * If the maxburst is equal to the fifo width, use | ||
282 | * A-synced transfers. This allows for large contiguous | ||
283 | * buffer transfers using only one PaRAM set. | ||
284 | */ | ||
285 | if (echan->maxburst == 1) { | ||
286 | edesc->absync = false; | ||
287 | ccnt = sg_dma_len(sg) / acnt / (SZ_64K - 1); | ||
288 | bcnt = sg_dma_len(sg) / acnt - ccnt * (SZ_64K - 1); | ||
289 | if (bcnt) | ||
290 | ccnt++; | ||
291 | else | ||
292 | bcnt = SZ_64K - 1; | ||
293 | cidx = acnt; | ||
294 | /* | ||
295 | * If maxburst is greater than the fifo address_width, | ||
296 | * use AB-synced transfers where A count is the fifo | ||
297 | * address_width and B count is the maxburst. In this | ||
298 | * case, we are limited to transfers of C count frames | ||
299 | * of (address_width * maxburst) where C count is limited | ||
300 | * to SZ_64K-1. This places an upper bound on the length | ||
301 | * of an SG segment that can be handled. | ||
302 | */ | ||
303 | } else { | ||
304 | edesc->absync = true; | ||
305 | bcnt = echan->maxburst; | ||
306 | ccnt = sg_dma_len(sg) / (acnt * bcnt); | ||
307 | if (ccnt > (SZ_64K - 1)) { | ||
308 | dev_err(dev, "Exceeded max SG segment size\n"); | ||
309 | return NULL; | ||
310 | } | ||
311 | cidx = acnt * bcnt; | ||
312 | } | ||
313 | |||
314 | if (direction == DMA_MEM_TO_DEV) { | ||
315 | src = sg_dma_address(sg); | ||
316 | dst = echan->addr; | ||
317 | src_bidx = acnt; | ||
318 | src_cidx = cidx; | ||
319 | dst_bidx = 0; | ||
320 | dst_cidx = 0; | ||
321 | } else { | ||
322 | src = echan->addr; | ||
323 | dst = sg_dma_address(sg); | ||
324 | src_bidx = 0; | ||
325 | src_cidx = 0; | ||
326 | dst_bidx = acnt; | ||
327 | dst_cidx = cidx; | ||
328 | } | ||
329 | |||
330 | edesc->pset[i].opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num)); | ||
331 | /* Configure A or AB synchronized transfers */ | ||
332 | if (edesc->absync) | ||
333 | edesc->pset[i].opt |= SYNCDIM; | ||
334 | /* If this is the last set, enable completion interrupt flag */ | ||
335 | if (i == sg_len - 1) | ||
336 | edesc->pset[i].opt |= TCINTEN; | ||
337 | |||
338 | edesc->pset[i].src = src; | ||
339 | edesc->pset[i].dst = dst; | ||
340 | |||
341 | edesc->pset[i].src_dst_bidx = (dst_bidx << 16) | src_bidx; | ||
342 | edesc->pset[i].src_dst_cidx = (dst_cidx << 16) | src_cidx; | ||
343 | |||
344 | edesc->pset[i].a_b_cnt = bcnt << 16 | acnt; | ||
345 | edesc->pset[i].ccnt = ccnt; | ||
346 | edesc->pset[i].link_bcntrld = 0xffffffff; | ||
347 | |||
348 | } | ||
349 | |||
350 | return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); | ||
351 | } | ||
352 | |||
353 | static void edma_callback(unsigned ch_num, u16 ch_status, void *data) | ||
354 | { | ||
355 | struct edma_chan *echan = data; | ||
356 | struct device *dev = echan->vchan.chan.device->dev; | ||
357 | struct edma_desc *edesc; | ||
358 | unsigned long flags; | ||
359 | |||
360 | /* Stop the channel */ | ||
361 | edma_stop(echan->ch_num); | ||
362 | |||
363 | switch (ch_status) { | ||
364 | case DMA_COMPLETE: | ||
365 | dev_dbg(dev, "transfer complete on channel %d\n", ch_num); | ||
366 | |||
367 | spin_lock_irqsave(&echan->vchan.lock, flags); | ||
368 | |||
369 | edesc = echan->edesc; | ||
370 | if (edesc) { | ||
371 | edma_execute(echan); | ||
372 | vchan_cookie_complete(&edesc->vdesc); | ||
373 | } | ||
374 | |||
375 | spin_unlock_irqrestore(&echan->vchan.lock, flags); | ||
376 | |||
377 | break; | ||
378 | case DMA_CC_ERROR: | ||
379 | dev_dbg(dev, "transfer error on channel %d\n", ch_num); | ||
380 | break; | ||
381 | default: | ||
382 | break; | ||
383 | } | ||
384 | } | ||
385 | |||
386 | /* Alloc channel resources */ | ||
387 | static int edma_alloc_chan_resources(struct dma_chan *chan) | ||
388 | { | ||
389 | struct edma_chan *echan = to_edma_chan(chan); | ||
390 | struct device *dev = chan->device->dev; | ||
391 | int ret; | ||
392 | int a_ch_num; | ||
393 | LIST_HEAD(descs); | ||
394 | |||
395 | a_ch_num = edma_alloc_channel(echan->ch_num, edma_callback, | ||
396 | chan, EVENTQ_DEFAULT); | ||
397 | |||
398 | if (a_ch_num < 0) { | ||
399 | ret = -ENODEV; | ||
400 | goto err_no_chan; | ||
401 | } | ||
402 | |||
403 | if (a_ch_num != echan->ch_num) { | ||
404 | dev_err(dev, "failed to allocate requested channel %u:%u\n", | ||
405 | EDMA_CTLR(echan->ch_num), | ||
406 | EDMA_CHAN_SLOT(echan->ch_num)); | ||
407 | ret = -ENODEV; | ||
408 | goto err_wrong_chan; | ||
409 | } | ||
410 | |||
411 | echan->alloced = true; | ||
412 | echan->slot[0] = echan->ch_num; | ||
413 | |||
414 | dev_info(dev, "allocated channel for %u:%u\n", | ||
415 | EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num)); | ||
416 | |||
417 | return 0; | ||
418 | |||
419 | err_wrong_chan: | ||
420 | edma_free_channel(a_ch_num); | ||
421 | err_no_chan: | ||
422 | return ret; | ||
423 | } | ||
424 | |||
425 | /* Free channel resources */ | ||
426 | static void edma_free_chan_resources(struct dma_chan *chan) | ||
427 | { | ||
428 | struct edma_chan *echan = to_edma_chan(chan); | ||
429 | struct device *dev = chan->device->dev; | ||
430 | int i; | ||
431 | |||
432 | /* Terminate transfers */ | ||
433 | edma_stop(echan->ch_num); | ||
434 | |||
435 | vchan_free_chan_resources(&echan->vchan); | ||
436 | |||
437 | /* Free EDMA PaRAM slots */ | ||
438 | for (i = 1; i < EDMA_MAX_SLOTS; i++) { | ||
439 | if (echan->slot[i] >= 0) { | ||
440 | edma_free_slot(echan->slot[i]); | ||
441 | echan->slot[i] = -1; | ||
442 | } | ||
443 | } | ||
444 | |||
445 | /* Free EDMA channel */ | ||
446 | if (echan->alloced) { | ||
447 | edma_free_channel(echan->ch_num); | ||
448 | echan->alloced = false; | ||
449 | } | ||
450 | |||
451 | dev_info(dev, "freeing channel for %u\n", echan->ch_num); | ||
452 | } | ||
453 | |||
454 | /* Send pending descriptor to hardware */ | ||
455 | static void edma_issue_pending(struct dma_chan *chan) | ||
456 | { | ||
457 | struct edma_chan *echan = to_edma_chan(chan); | ||
458 | unsigned long flags; | ||
459 | |||
460 | spin_lock_irqsave(&echan->vchan.lock, flags); | ||
461 | if (vchan_issue_pending(&echan->vchan) && !echan->edesc) | ||
462 | edma_execute(echan); | ||
463 | spin_unlock_irqrestore(&echan->vchan.lock, flags); | ||
464 | } | ||
465 | |||
466 | static size_t edma_desc_size(struct edma_desc *edesc) | ||
467 | { | ||
468 | int i; | ||
469 | size_t size; | ||
470 | |||
471 | if (edesc->absync) | ||
472 | for (size = i = 0; i < edesc->pset_nr; i++) | ||
473 | size += (edesc->pset[i].a_b_cnt & 0xffff) * | ||
474 | (edesc->pset[i].a_b_cnt >> 16) * | ||
475 | edesc->pset[i].ccnt; | ||
476 | else | ||
477 | size = (edesc->pset[0].a_b_cnt & 0xffff) * | ||
478 | (edesc->pset[0].a_b_cnt >> 16) + | ||
479 | (edesc->pset[0].a_b_cnt & 0xffff) * | ||
480 | (SZ_64K - 1) * edesc->pset[0].ccnt; | ||
481 | |||
482 | return size; | ||
483 | } | ||
484 | |||
485 | /* Check request completion status */ | ||
486 | static enum dma_status edma_tx_status(struct dma_chan *chan, | ||
487 | dma_cookie_t cookie, | ||
488 | struct dma_tx_state *txstate) | ||
489 | { | ||
490 | struct edma_chan *echan = to_edma_chan(chan); | ||
491 | struct virt_dma_desc *vdesc; | ||
492 | enum dma_status ret; | ||
493 | unsigned long flags; | ||
494 | |||
495 | ret = dma_cookie_status(chan, cookie, txstate); | ||
496 | if (ret == DMA_SUCCESS || !txstate) | ||
497 | return ret; | ||
498 | |||
499 | spin_lock_irqsave(&echan->vchan.lock, flags); | ||
500 | vdesc = vchan_find_desc(&echan->vchan, cookie); | ||
501 | if (vdesc) { | ||
502 | txstate->residue = edma_desc_size(to_edma_desc(&vdesc->tx)); | ||
503 | } else if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) { | ||
504 | struct edma_desc *edesc = echan->edesc; | ||
505 | txstate->residue = edma_desc_size(edesc); | ||
506 | } else { | ||
507 | txstate->residue = 0; | ||
508 | } | ||
509 | spin_unlock_irqrestore(&echan->vchan.lock, flags); | ||
510 | |||
511 | return ret; | ||
512 | } | ||
513 | |||
514 | static void __init edma_chan_init(struct edma_cc *ecc, | ||
515 | struct dma_device *dma, | ||
516 | struct edma_chan *echans) | ||
517 | { | ||
518 | int i, j; | ||
519 | |||
520 | for (i = 0; i < EDMA_CHANS; i++) { | ||
521 | struct edma_chan *echan = &echans[i]; | ||
522 | echan->ch_num = EDMA_CTLR_CHAN(ecc->ctlr, i); | ||
523 | echan->ecc = ecc; | ||
524 | echan->vchan.desc_free = edma_desc_free; | ||
525 | |||
526 | vchan_init(&echan->vchan, dma); | ||
527 | |||
528 | INIT_LIST_HEAD(&echan->node); | ||
529 | for (j = 0; j < EDMA_MAX_SLOTS; j++) | ||
530 | echan->slot[j] = -1; | ||
531 | } | ||
532 | } | ||
533 | |||
534 | static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma, | ||
535 | struct device *dev) | ||
536 | { | ||
537 | dma->device_prep_slave_sg = edma_prep_slave_sg; | ||
538 | dma->device_alloc_chan_resources = edma_alloc_chan_resources; | ||
539 | dma->device_free_chan_resources = edma_free_chan_resources; | ||
540 | dma->device_issue_pending = edma_issue_pending; | ||
541 | dma->device_tx_status = edma_tx_status; | ||
542 | dma->device_control = edma_control; | ||
543 | dma->dev = dev; | ||
544 | |||
545 | INIT_LIST_HEAD(&dma->channels); | ||
546 | } | ||
547 | |||
548 | static int __devinit edma_probe(struct platform_device *pdev) | ||
549 | { | ||
550 | struct edma_cc *ecc; | ||
551 | int ret; | ||
552 | |||
553 | ecc = devm_kzalloc(&pdev->dev, sizeof(*ecc), GFP_KERNEL); | ||
554 | if (!ecc) { | ||
555 | dev_err(&pdev->dev, "Can't allocate controller\n"); | ||
556 | return -ENOMEM; | ||
557 | } | ||
558 | |||
559 | ecc->ctlr = pdev->id; | ||
560 | ecc->dummy_slot = edma_alloc_slot(ecc->ctlr, EDMA_SLOT_ANY); | ||
561 | if (ecc->dummy_slot < 0) { | ||
562 | dev_err(&pdev->dev, "Can't allocate PaRAM dummy slot\n"); | ||
563 | return -EIO; | ||
564 | } | ||
565 | |||
566 | dma_cap_zero(ecc->dma_slave.cap_mask); | ||
567 | dma_cap_set(DMA_SLAVE, ecc->dma_slave.cap_mask); | ||
568 | |||
569 | edma_dma_init(ecc, &ecc->dma_slave, &pdev->dev); | ||
570 | |||
571 | edma_chan_init(ecc, &ecc->dma_slave, ecc->slave_chans); | ||
572 | |||
573 | ret = dma_async_device_register(&ecc->dma_slave); | ||
574 | if (ret) | ||
575 | goto err_reg1; | ||
576 | |||
577 | platform_set_drvdata(pdev, ecc); | ||
578 | |||
579 | dev_info(&pdev->dev, "TI EDMA DMA engine driver\n"); | ||
580 | |||
581 | return 0; | ||
582 | |||
583 | err_reg1: | ||
584 | edma_free_slot(ecc->dummy_slot); | ||
585 | return ret; | ||
586 | } | ||
587 | |||
588 | static int __devexit edma_remove(struct platform_device *pdev) | ||
589 | { | ||
590 | struct device *dev = &pdev->dev; | ||
591 | struct edma_cc *ecc = dev_get_drvdata(dev); | ||
592 | |||
593 | dma_async_device_unregister(&ecc->dma_slave); | ||
594 | edma_free_slot(ecc->dummy_slot); | ||
595 | |||
596 | return 0; | ||
597 | } | ||
598 | |||
599 | static struct platform_driver edma_driver = { | ||
600 | .probe = edma_probe, | ||
601 | .remove = __devexit_p(edma_remove), | ||
602 | .driver = { | ||
603 | .name = "edma-dma-engine", | ||
604 | .owner = THIS_MODULE, | ||
605 | }, | ||
606 | }; | ||
607 | |||
608 | bool edma_filter_fn(struct dma_chan *chan, void *param) | ||
609 | { | ||
610 | if (chan->device->dev->driver == &edma_driver.driver) { | ||
611 | struct edma_chan *echan = to_edma_chan(chan); | ||
612 | unsigned ch_req = *(unsigned *)param; | ||
613 | return ch_req == echan->ch_num; | ||
614 | } | ||
615 | return false; | ||
616 | } | ||
617 | EXPORT_SYMBOL(edma_filter_fn); | ||
618 | |||
619 | static struct platform_device *pdev0, *pdev1; | ||
620 | |||
621 | static const struct platform_device_info edma_dev_info0 = { | ||
622 | .name = "edma-dma-engine", | ||
623 | .id = 0, | ||
624 | .dma_mask = DMA_BIT_MASK(32), | ||
625 | }; | ||
626 | |||
627 | static const struct platform_device_info edma_dev_info1 = { | ||
628 | .name = "edma-dma-engine", | ||
629 | .id = 1, | ||
630 | .dma_mask = DMA_BIT_MASK(32), | ||
631 | }; | ||
632 | |||
633 | static int edma_init(void) | ||
634 | { | ||
635 | int ret = platform_driver_register(&edma_driver); | ||
636 | |||
637 | if (ret == 0) { | ||
638 | pdev0 = platform_device_register_full(&edma_dev_info0); | ||
639 | if (IS_ERR(pdev0)) { | ||
640 | platform_driver_unregister(&edma_driver); | ||
641 | ret = PTR_ERR(pdev0); | ||
642 | goto out; | ||
643 | } | ||
644 | } | ||
645 | |||
646 | if (EDMA_CTLRS == 2) { | ||
647 | pdev1 = platform_device_register_full(&edma_dev_info1); | ||
648 | if (IS_ERR(pdev1)) { | ||
649 | platform_driver_unregister(&edma_driver); | ||
650 | platform_device_unregister(pdev0); | ||
651 | ret = PTR_ERR(pdev1); | ||
652 | } | ||
653 | } | ||
654 | |||
655 | out: | ||
656 | return ret; | ||
657 | } | ||
658 | subsys_initcall(edma_init); | ||
659 | |||
660 | static void __exit edma_exit(void) | ||
661 | { | ||
662 | platform_device_unregister(pdev0); | ||
663 | if (pdev1) | ||
664 | platform_device_unregister(pdev1); | ||
665 | platform_driver_unregister(&edma_driver); | ||
666 | } | ||
667 | module_exit(edma_exit); | ||
668 | |||
669 | MODULE_AUTHOR("Matt Porter <mporter@ti.com>"); | ||
670 | MODULE_DESCRIPTION("TI EDMA DMA engine driver"); | ||
671 | MODULE_LICENSE("GPL v2"); | ||
672 |
include/linux/edma.h
File was created | 1 | /* | |
2 | * TI EDMA DMA engine driver | ||
3 | * | ||
4 | * Copyright 2012 Texas Instruments | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License as | ||
8 | * published by the Free Software Foundation version 2. | ||
9 | * | ||
10 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | ||
11 | * kind, whether express or implied; without even the implied warranty | ||
12 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | */ | ||
15 | #ifndef __LINUX_EDMA_H | ||
16 | #define __LINUX_EDMA_H | ||
17 | |||
18 | struct dma_chan; | ||
19 | |||
20 | #if defined(CONFIG_TI_EDMA) || defined(CONFIG_TI_EDMA_MODULE) | ||
21 | bool edma_filter_fn(struct dma_chan *, void *); | ||
22 | #else | ||
23 | static inline bool edma_filter_fn(struct dma_chan *chan, void *param) | ||
24 | { | ||
25 | return false; | ||
26 | } | ||
27 | #endif | ||
28 | |||
29 | #endif | ||
30 |