Blame view
drivers/dma/imx-dma.c
34.6 KB
ce9c28ca6
|
1 2 3 4 5 6 7 8 9 |
// SPDX-License-Identifier: GPL-2.0+ // // drivers/dma/imx-dma.c // // This file contains a driver for the Freescale i.MX DMA engine // found on i.MX1/21/27 // // Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> // Copyright 2012 Javier Martin, Vista Silicon <javier.martin@vista-silicon.com> |
7331205a9
|
10 |
#include <linux/err.h> |
1f1846c6c
|
11 12 13 14 15 16 17 18 19 |
#include <linux/init.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/platform_device.h> |
6bd081277
|
20 |
#include <linux/clk.h> |
1f1846c6c
|
21 |
#include <linux/dmaengine.h> |
5c45ad77f
|
22 |
#include <linux/module.h> |
290ad0f9d
|
23 24 |
#include <linux/of_device.h> #include <linux/of_dma.h> |
1f1846c6c
|
25 26 |
#include <asm/irq.h> |
82906b13a
|
27 |
#include <linux/platform_data/dma-imx.h> |
1f1846c6c
|
28 |
|
d2ebfb335
|
29 |
#include "dmaengine.h" |
9e15db7ce
|
30 |
#define IMXDMA_MAX_CHAN_DESCRIPTORS 16 |
6bd081277
|
31 |
#define IMX_DMA_CHANNELS 16 |
f606ab897
|
32 33 34 |
#define IMX_DMA_2D_SLOTS 2 #define IMX_DMA_2D_SLOT_A 0 #define IMX_DMA_2D_SLOT_B 1 |
6bd081277
|
35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 |
#define IMX_DMA_LENGTH_LOOP ((unsigned int)-1) #define IMX_DMA_MEMSIZE_32 (0 << 4) #define IMX_DMA_MEMSIZE_8 (1 << 4) #define IMX_DMA_MEMSIZE_16 (2 << 4) #define IMX_DMA_TYPE_LINEAR (0 << 10) #define IMX_DMA_TYPE_2D (1 << 10) #define IMX_DMA_TYPE_FIFO (2 << 10) #define IMX_DMA_ERR_BURST (1 << 0) #define IMX_DMA_ERR_REQUEST (1 << 1) #define IMX_DMA_ERR_TRANSFER (1 << 2) #define IMX_DMA_ERR_BUFFER (1 << 3) #define IMX_DMA_ERR_TIMEOUT (1 << 4) #define DMA_DCR 0x00 /* Control Register */ #define DMA_DISR 0x04 /* Interrupt status Register */ #define DMA_DIMR 0x08 /* Interrupt mask Register */ #define DMA_DBTOSR 0x0c /* Burst timeout status Register */ #define DMA_DRTOSR 0x10 /* Request timeout Register */ #define DMA_DSESR 0x14 /* Transfer Error Status Register */ #define DMA_DBOSR 0x18 /* Buffer overflow status Register */ #define DMA_DBTOCR 0x1c /* Burst timeout control Register */ #define DMA_WSRA 0x40 /* W-Size Register A */ #define DMA_XSRA 0x44 /* X-Size Register A */ #define DMA_YSRA 0x48 /* Y-Size Register A */ #define DMA_WSRB 0x4c /* W-Size Register B */ #define DMA_XSRB 0x50 /* X-Size Register B */ #define DMA_YSRB 0x54 /* Y-Size Register B */ #define DMA_SAR(x) (0x80 + ((x) << 6)) /* Source Address Registers */ #define DMA_DAR(x) (0x84 + ((x) << 6)) /* Destination Address Registers */ #define DMA_CNTR(x) (0x88 + ((x) << 6)) /* Count Registers */ #define DMA_CCR(x) (0x8c + ((x) << 6)) /* Control Registers */ #define DMA_RSSR(x) (0x90 + ((x) << 6)) /* Request source select Registers */ #define DMA_BLR(x) (0x94 + ((x) << 6)) /* Burst length Registers */ #define DMA_RTOR(x) (0x98 + ((x) << 6)) /* Request timeout Registers */ #define DMA_BUCR(x) (0x98 + ((x) << 6)) /* Bus Utilization Registers */ #define DMA_CCNR(x) (0x9C + ((x) << 6)) /* Channel counter Registers */ #define DCR_DRST (1<<1) #define DCR_DEN (1<<0) #define DBTOCR_EN (1<<15) #define DBTOCR_CNT(x) ((x) & 0x7fff) #define CNTR_CNT(x) ((x) & 0xffffff) #define CCR_ACRPT (1<<14) #define CCR_DMOD_LINEAR (0x0 << 12) #define CCR_DMOD_2D (0x1 << 12) #define CCR_DMOD_FIFO (0x2 << 12) #define CCR_DMOD_EOBFIFO (0x3 << 12) #define CCR_SMOD_LINEAR (0x0 << 10) #define CCR_SMOD_2D (0x1 << 10) #define CCR_SMOD_FIFO (0x2 << 10) #define CCR_SMOD_EOBFIFO (0x3 << 10) #define CCR_MDIR_DEC (1<<9) #define CCR_MSEL_B (1<<8) #define CCR_DSIZ_32 (0x0 << 6) #define CCR_DSIZ_8 (0x1 << 6) #define CCR_DSIZ_16 (0x2 << 6) #define CCR_SSIZ_32 (0x0 << 4) #define CCR_SSIZ_8 (0x1 << 4) #define CCR_SSIZ_16 (0x2 << 4) #define CCR_REN (1<<3) #define CCR_RPT (1<<2) #define CCR_FRC (1<<1) #define CCR_CEN (1<<0) #define RTOR_EN (1<<15) #define RTOR_CLK (1<<14) #define RTOR_PSC (1<<13) |
9e15db7ce
|
102 103 104 105 106 107 108 |
enum imxdma_prep_type { IMXDMA_DESC_MEMCPY, IMXDMA_DESC_INTERLEAVED, IMXDMA_DESC_SLAVE_SG, IMXDMA_DESC_CYCLIC, }; |
f606ab897
|
109 110 111 112 113 114 |
struct imx_dma_2d_config { u16 xsr; u16 ysr; u16 wsr; int count; }; |
9e15db7ce
|
115 116 117 118 119 120 121 |
struct imxdma_desc { struct list_head node; struct dma_async_tx_descriptor desc; enum dma_status status; dma_addr_t src; dma_addr_t dest; size_t len; |
2efc3449d
|
122 |
enum dma_transfer_direction direction; |
9e15db7ce
|
123 124 125 126 127 128 129 130 131 132 133 134 |
enum imxdma_prep_type type; /* For memcpy and interleaved */ unsigned int config_port; unsigned int config_mem; /* For interleaved transfers */ unsigned int x; unsigned int y; unsigned int w; /* For slave sg and cyclic */ struct scatterlist *sg; unsigned int sgcount; }; |
1f1846c6c
|
135 |
struct imxdma_channel { |
2d9c2fc59
|
136 137 |
int hw_chaining; struct timer_list watchdog; |
1f1846c6c
|
138 139 |
struct imxdma_engine *imxdma; unsigned int channel; |
1f1846c6c
|
140 |
|
9e15db7ce
|
141 142 143 144 145 |
struct tasklet_struct dma_tasklet; struct list_head ld_free; struct list_head ld_queue; struct list_head ld_active; int descs_allocated; |
1f1846c6c
|
146 147 148 149 |
enum dma_slave_buswidth word_size; dma_addr_t per_address; u32 watermark_level; struct dma_chan chan; |
1f1846c6c
|
150 |
struct dma_async_tx_descriptor desc; |
1f1846c6c
|
151 152 153 |
enum dma_status status; int dma_request; struct scatterlist *sg_list; |
359291a1a
|
154 155 |
u32 ccr_from_device; u32 ccr_to_device; |
f606ab897
|
156 157 |
bool enabled_2d; int slot_2d; |
ea62aa80b
|
158 |
unsigned int irq; |
dea7a9fbb
|
159 |
struct dma_slave_config config; |
1f1846c6c
|
160 |
}; |
e51d0f0ac
|
161 162 163 164 165 |
enum imx_dma_type { IMX1_DMA, IMX21_DMA, IMX27_DMA, }; |
1f1846c6c
|
166 167 |
struct imxdma_engine { struct device *dev; |
1e070a609
|
168 |
struct device_dma_parameters dma_parms; |
1f1846c6c
|
169 |
struct dma_device dma_device; |
cd5cf9da0
|
170 |
void __iomem *base; |
a2367db2e
|
171 172 |
struct clk *dma_ahb; struct clk *dma_ipg; |
f606ab897
|
173 174 |
spinlock_t lock; struct imx_dma_2d_config slots_2d[IMX_DMA_2D_SLOTS]; |
6bd081277
|
175 |
struct imxdma_channel channel[IMX_DMA_CHANNELS]; |
e51d0f0ac
|
176 |
enum imx_dma_type devtype; |
ea62aa80b
|
177 178 |
unsigned int irq; unsigned int irq_err; |
1f1846c6c
|
179 |
}; |
290ad0f9d
|
180 181 182 183 |
struct imxdma_filter_data { struct imxdma_engine *imxdma; int request; }; |
afe7cded9
|
184 |
static const struct platform_device_id imx_dma_devtype[] = { |
e51d0f0ac
|
185 186 187 188 189 190 191 192 193 194 195 196 197 198 |
{ .name = "imx1-dma", .driver_data = IMX1_DMA, }, { .name = "imx21-dma", .driver_data = IMX21_DMA, }, { .name = "imx27-dma", .driver_data = IMX27_DMA, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(platform, imx_dma_devtype); |
290ad0f9d
|
199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 |
static const struct of_device_id imx_dma_of_dev_id[] = { { .compatible = "fsl,imx1-dma", .data = &imx_dma_devtype[IMX1_DMA], }, { .compatible = "fsl,imx21-dma", .data = &imx_dma_devtype[IMX21_DMA], }, { .compatible = "fsl,imx27-dma", .data = &imx_dma_devtype[IMX27_DMA], }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, imx_dma_of_dev_id); |
e51d0f0ac
|
214 215 216 217 |
static inline int is_imx1_dma(struct imxdma_engine *imxdma) { return imxdma->devtype == IMX1_DMA; } |
e51d0f0ac
|
218 219 220 221 |
static inline int is_imx27_dma(struct imxdma_engine *imxdma) { return imxdma->devtype == IMX27_DMA; } |
1f1846c6c
|
222 223 224 225 |
static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan) { return container_of(chan, struct imxdma_channel, chan); } |
9e15db7ce
|
226 |
static inline bool imxdma_chan_is_doing_cyclic(struct imxdma_channel *imxdmac) |
1f1846c6c
|
227 |
{ |
9e15db7ce
|
228 229 230 231 232 233 234 235 236 |
struct imxdma_desc *desc; if (!list_empty(&imxdmac->ld_active)) { desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node); if (desc->type == IMXDMA_DESC_CYCLIC) return true; } return false; |
1f1846c6c
|
237 |
} |
6bd081277
|
238 |
|
cd5cf9da0
|
239 240 241 |
static void imx_dmav1_writel(struct imxdma_engine *imxdma, unsigned val, unsigned offset) |
6bd081277
|
242 |
{ |
cd5cf9da0
|
243 |
__raw_writel(val, imxdma->base + offset); |
6bd081277
|
244 |
} |
cd5cf9da0
|
245 |
static unsigned imx_dmav1_readl(struct imxdma_engine *imxdma, unsigned offset) |
1f1846c6c
|
246 |
{ |
cd5cf9da0
|
247 |
return __raw_readl(imxdma->base + offset); |
6bd081277
|
248 |
} |
1f1846c6c
|
249 |
|
2d9c2fc59
|
250 |
static int imxdma_hw_chain(struct imxdma_channel *imxdmac) |
6bd081277
|
251 |
{ |
e51d0f0ac
|
252 253 254 |
struct imxdma_engine *imxdma = imxdmac->imxdma; if (is_imx27_dma(imxdma)) |
2d9c2fc59
|
255 |
return imxdmac->hw_chaining; |
6bd081277
|
256 257 258 259 260 261 262 |
else return 0; } /* * imxdma_sg_next - prepare next chunk for scatter-gather DMA emulation */ |
452fd6dc8
|
263 |
static inline void imxdma_sg_next(struct imxdma_desc *d) |
1f1846c6c
|
264 |
{ |
2efc3449d
|
265 |
struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); |
cd5cf9da0
|
266 |
struct imxdma_engine *imxdma = imxdmac->imxdma; |
a6cbb2d87
|
267 |
struct scatterlist *sg = d->sg; |
da5035f37
|
268 |
size_t now; |
6bd081277
|
269 |
|
9227ab564
|
270 |
now = min_t(size_t, d->len, sg_dma_len(sg)); |
6b0e2f55e
|
271 272 |
if (d->len != IMX_DMA_LENGTH_LOOP) d->len -= now; |
6bd081277
|
273 |
|
2efc3449d
|
274 |
if (d->direction == DMA_DEV_TO_MEM) |
cd5cf9da0
|
275 276 |
imx_dmav1_writel(imxdma, sg->dma_address, DMA_DAR(imxdmac->channel)); |
6bd081277
|
277 |
else |
cd5cf9da0
|
278 279 |
imx_dmav1_writel(imxdma, sg->dma_address, DMA_SAR(imxdmac->channel)); |
6bd081277
|
280 |
|
cd5cf9da0
|
281 |
imx_dmav1_writel(imxdma, now, DMA_CNTR(imxdmac->channel)); |
6bd081277
|
282 |
|
f9b283a6e
|
283 284 285 |
dev_dbg(imxdma->dev, " %s channel: %d dst 0x%08x, src 0x%08x, " "size 0x%08x ", __func__, imxdmac->channel, |
cd5cf9da0
|
286 287 288 |
imx_dmav1_readl(imxdma, DMA_DAR(imxdmac->channel)), imx_dmav1_readl(imxdma, DMA_SAR(imxdmac->channel)), imx_dmav1_readl(imxdma, DMA_CNTR(imxdmac->channel))); |
1f1846c6c
|
289 |
} |
2efc3449d
|
290 |
static void imxdma_enable_hw(struct imxdma_desc *d) |
1f1846c6c
|
291 |
{ |
2efc3449d
|
292 |
struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); |
cd5cf9da0
|
293 |
struct imxdma_engine *imxdma = imxdmac->imxdma; |
6bd081277
|
294 295 |
int channel = imxdmac->channel; unsigned long flags; |
f9b283a6e
|
296 297 |
dev_dbg(imxdma->dev, "%s channel %d ", __func__, channel); |
6bd081277
|
298 |
|
6bd081277
|
299 |
local_irq_save(flags); |
cd5cf9da0
|
300 301 302 303 304 |
imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR); imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) & ~(1 << channel), DMA_DIMR); imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) | CCR_CEN | CCR_ACRPT, DMA_CCR(channel)); |
6bd081277
|
305 |
|
e51d0f0ac
|
306 |
if (!is_imx1_dma(imxdma) && |
2d9c2fc59
|
307 |
d->sg && imxdma_hw_chain(imxdmac)) { |
833bc03bf
|
308 309 |
d->sg = sg_next(d->sg); if (d->sg) { |
6bd081277
|
310 |
u32 tmp; |
a6cbb2d87
|
311 |
imxdma_sg_next(d); |
cd5cf9da0
|
312 313 314 |
tmp = imx_dmav1_readl(imxdma, DMA_CCR(channel)); imx_dmav1_writel(imxdma, tmp | CCR_RPT | CCR_ACRPT, DMA_CCR(channel)); |
6bd081277
|
315 316 |
} } |
6bd081277
|
317 318 319 320 321 322 |
local_irq_restore(flags); } static void imxdma_disable_hw(struct imxdma_channel *imxdmac) { |
cd5cf9da0
|
323 |
struct imxdma_engine *imxdma = imxdmac->imxdma; |
6bd081277
|
324 325 |
int channel = imxdmac->channel; unsigned long flags; |
f9b283a6e
|
326 327 |
dev_dbg(imxdma->dev, "%s channel %d ", __func__, channel); |
6bd081277
|
328 |
|
2d9c2fc59
|
329 330 |
if (imxdma_hw_chain(imxdmac)) del_timer(&imxdmac->watchdog); |
6bd081277
|
331 332 |
local_irq_save(flags); |
cd5cf9da0
|
333 334 335 336 337 |
imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_DIMR) | (1 << channel), DMA_DIMR); imx_dmav1_writel(imxdma, imx_dmav1_readl(imxdma, DMA_CCR(channel)) & ~CCR_CEN, DMA_CCR(channel)); imx_dmav1_writel(imxdma, 1 << channel, DMA_DISR); |
6bd081277
|
338 339 |
local_irq_restore(flags); } |
bcdc4bd35
|
340 |
static void imxdma_watchdog(struct timer_list *t) |
1f1846c6c
|
341 |
{ |
bcdc4bd35
|
342 |
struct imxdma_channel *imxdmac = from_timer(imxdmac, t, watchdog); |
cd5cf9da0
|
343 |
struct imxdma_engine *imxdma = imxdmac->imxdma; |
6bd081277
|
344 |
int channel = imxdmac->channel; |
1f1846c6c
|
345 |
|
cd5cf9da0
|
346 |
imx_dmav1_writel(imxdma, 0, DMA_CCR(channel)); |
1f1846c6c
|
347 |
|
6bd081277
|
348 |
/* Tasklet watchdog error handler */ |
9e15db7ce
|
349 |
tasklet_schedule(&imxdmac->dma_tasklet); |
f9b283a6e
|
350 351 352 |
dev_dbg(imxdma->dev, "channel %d: watchdog timeout! ", imxdmac->channel); |
1f1846c6c
|
353 |
} |
6bd081277
|
354 |
static irqreturn_t imxdma_err_handler(int irq, void *dev_id) |
1f1846c6c
|
355 |
{ |
6bd081277
|
356 |
struct imxdma_engine *imxdma = dev_id; |
6bd081277
|
357 358 359 |
unsigned int err_mask; int i, disr; int errcode; |
cd5cf9da0
|
360 |
disr = imx_dmav1_readl(imxdma, DMA_DISR); |
6bd081277
|
361 |
|
cd5cf9da0
|
362 363 364 365 |
err_mask = imx_dmav1_readl(imxdma, DMA_DBTOSR) | imx_dmav1_readl(imxdma, DMA_DRTOSR) | imx_dmav1_readl(imxdma, DMA_DSESR) | imx_dmav1_readl(imxdma, DMA_DBOSR); |
6bd081277
|
366 367 368 |
if (!err_mask) return IRQ_HANDLED; |
cd5cf9da0
|
369 |
imx_dmav1_writel(imxdma, disr & err_mask, DMA_DISR); |
6bd081277
|
370 371 372 373 |
for (i = 0; i < IMX_DMA_CHANNELS; i++) { if (!(err_mask & (1 << i))) continue; |
6bd081277
|
374 |
errcode = 0; |
cd5cf9da0
|
375 376 |
if (imx_dmav1_readl(imxdma, DMA_DBTOSR) & (1 << i)) { imx_dmav1_writel(imxdma, 1 << i, DMA_DBTOSR); |
6bd081277
|
377 378 |
errcode |= IMX_DMA_ERR_BURST; } |
cd5cf9da0
|
379 380 |
if (imx_dmav1_readl(imxdma, DMA_DRTOSR) & (1 << i)) { imx_dmav1_writel(imxdma, 1 << i, DMA_DRTOSR); |
6bd081277
|
381 382 |
errcode |= IMX_DMA_ERR_REQUEST; } |
cd5cf9da0
|
383 384 |
if (imx_dmav1_readl(imxdma, DMA_DSESR) & (1 << i)) { imx_dmav1_writel(imxdma, 1 << i, DMA_DSESR); |
6bd081277
|
385 386 |
errcode |= IMX_DMA_ERR_TRANSFER; } |
cd5cf9da0
|
387 388 |
if (imx_dmav1_readl(imxdma, DMA_DBOSR) & (1 << i)) { imx_dmav1_writel(imxdma, 1 << i, DMA_DBOSR); |
6bd081277
|
389 390 391 392 |
errcode |= IMX_DMA_ERR_BUFFER; } /* Tasklet error handler */ tasklet_schedule(&imxdma->channel[i].dma_tasklet); |
1d94fe060
|
393 394 395 396 397 398 399 |
dev_warn(imxdma->dev, "DMA timeout on channel %d -%s%s%s%s ", i, errcode & IMX_DMA_ERR_BURST ? " burst" : "", errcode & IMX_DMA_ERR_REQUEST ? " request" : "", errcode & IMX_DMA_ERR_TRANSFER ? " transfer" : "", errcode & IMX_DMA_ERR_BUFFER ? " buffer" : ""); |
6bd081277
|
400 401 |
} return IRQ_HANDLED; |
1f1846c6c
|
402 |
} |
6bd081277
|
403 |
static void dma_irq_handle_channel(struct imxdma_channel *imxdmac) |
1f1846c6c
|
404 |
{ |
cd5cf9da0
|
405 |
struct imxdma_engine *imxdma = imxdmac->imxdma; |
6bd081277
|
406 |
int chno = imxdmac->channel; |
2efc3449d
|
407 |
struct imxdma_desc *desc; |
5a276fa6b
|
408 |
unsigned long flags; |
6bd081277
|
409 |
|
5a276fa6b
|
410 |
spin_lock_irqsave(&imxdma->lock, flags); |
833bc03bf
|
411 |
if (list_empty(&imxdmac->ld_active)) { |
5a276fa6b
|
412 |
spin_unlock_irqrestore(&imxdma->lock, flags); |
833bc03bf
|
413 414 |
goto out; } |
2efc3449d
|
415 |
|
833bc03bf
|
416 417 418 |
desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node); |
5a276fa6b
|
419 |
spin_unlock_irqrestore(&imxdma->lock, flags); |
2efc3449d
|
420 |
|
833bc03bf
|
421 422 423 |
if (desc->sg) { u32 tmp; desc->sg = sg_next(desc->sg); |
2efc3449d
|
424 |
|
833bc03bf
|
425 |
if (desc->sg) { |
a6cbb2d87
|
426 |
imxdma_sg_next(desc); |
6bd081277
|
427 |
|
cd5cf9da0
|
428 |
tmp = imx_dmav1_readl(imxdma, DMA_CCR(chno)); |
6bd081277
|
429 |
|
2d9c2fc59
|
430 |
if (imxdma_hw_chain(imxdmac)) { |
6bd081277
|
431 432 433 |
/* FIXME: The timeout should probably be * configurable */ |
2d9c2fc59
|
434 |
mod_timer(&imxdmac->watchdog, |
6bd081277
|
435 436 437 |
jiffies + msecs_to_jiffies(500)); tmp |= CCR_CEN | CCR_RPT | CCR_ACRPT; |
cd5cf9da0
|
438 |
imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno)); |
6bd081277
|
439 |
} else { |
cd5cf9da0
|
440 441 |
imx_dmav1_writel(imxdma, tmp & ~CCR_CEN, DMA_CCR(chno)); |
6bd081277
|
442 443 |
tmp |= CCR_CEN; } |
cd5cf9da0
|
444 |
imx_dmav1_writel(imxdma, tmp, DMA_CCR(chno)); |
6bd081277
|
445 446 447 448 |
if (imxdma_chan_is_doing_cyclic(imxdmac)) /* Tasklet progression */ tasklet_schedule(&imxdmac->dma_tasklet); |
1f1846c6c
|
449 |
|
6bd081277
|
450 451 |
return; } |
2d9c2fc59
|
452 453 |
if (imxdma_hw_chain(imxdmac)) { del_timer(&imxdmac->watchdog); |
6bd081277
|
454 455 456 |
return; } } |
2efc3449d
|
457 |
out: |
cd5cf9da0
|
458 |
imx_dmav1_writel(imxdma, 0, DMA_CCR(chno)); |
6bd081277
|
459 |
/* Tasklet irq */ |
9e15db7ce
|
460 461 |
tasklet_schedule(&imxdmac->dma_tasklet); } |
6bd081277
|
462 463 464 |
static irqreturn_t dma_irq_handler(int irq, void *dev_id) { struct imxdma_engine *imxdma = dev_id; |
6bd081277
|
465 |
int i, disr; |
e51d0f0ac
|
466 |
if (!is_imx1_dma(imxdma)) |
6bd081277
|
467 |
imxdma_err_handler(irq, dev_id); |
cd5cf9da0
|
468 |
disr = imx_dmav1_readl(imxdma, DMA_DISR); |
6bd081277
|
469 |
|
f9b283a6e
|
470 471 |
dev_dbg(imxdma->dev, "%s called, disr=0x%08x ", __func__, disr); |
6bd081277
|
472 |
|
cd5cf9da0
|
473 |
imx_dmav1_writel(imxdma, disr, DMA_DISR); |
6bd081277
|
474 |
for (i = 0; i < IMX_DMA_CHANNELS; i++) { |
2d9c2fc59
|
475 |
if (disr & (1 << i)) |
6bd081277
|
476 |
dma_irq_handle_channel(&imxdma->channel[i]); |
6bd081277
|
477 478 479 480 |
} return IRQ_HANDLED; } |
9e15db7ce
|
481 482 483 |
static int imxdma_xfer_desc(struct imxdma_desc *d) { struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); |
3b4b6dfc2
|
484 |
struct imxdma_engine *imxdma = imxdmac->imxdma; |
f606ab897
|
485 486 |
int slot = -1; int i; |
9e15db7ce
|
487 488 489 |
/* Configure and enable */ switch (d->type) { |
f606ab897
|
490 491 |
case IMXDMA_DESC_INTERLEAVED: /* Try to get a free 2D slot */ |
f606ab897
|
492 493 494 495 496 497 498 499 500 |
for (i = 0; i < IMX_DMA_2D_SLOTS; i++) { if ((imxdma->slots_2d[i].count > 0) && ((imxdma->slots_2d[i].xsr != d->x) || (imxdma->slots_2d[i].ysr != d->y) || (imxdma->slots_2d[i].wsr != d->w))) continue; slot = i; break; } |
5a276fa6b
|
501 |
if (slot < 0) |
f606ab897
|
502 503 504 505 506 507 508 509 510 |
return -EBUSY; imxdma->slots_2d[slot].xsr = d->x; imxdma->slots_2d[slot].ysr = d->y; imxdma->slots_2d[slot].wsr = d->w; imxdma->slots_2d[slot].count++; imxdmac->slot_2d = slot; imxdmac->enabled_2d = true; |
f606ab897
|
511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 |
if (slot == IMX_DMA_2D_SLOT_A) { d->config_mem &= ~CCR_MSEL_B; d->config_port &= ~CCR_MSEL_B; imx_dmav1_writel(imxdma, d->x, DMA_XSRA); imx_dmav1_writel(imxdma, d->y, DMA_YSRA); imx_dmav1_writel(imxdma, d->w, DMA_WSRA); } else { d->config_mem |= CCR_MSEL_B; d->config_port |= CCR_MSEL_B; imx_dmav1_writel(imxdma, d->x, DMA_XSRB); imx_dmav1_writel(imxdma, d->y, DMA_YSRB); imx_dmav1_writel(imxdma, d->w, DMA_WSRB); } /* * We fall-through here intentionally, since a 2D transfer is * similar to MEMCPY just adding the 2D slot configuration. */ |
7f5d74257
|
529 |
/* Fall through */ |
9e15db7ce
|
530 |
case IMXDMA_DESC_MEMCPY: |
cd5cf9da0
|
531 532 533 |
imx_dmav1_writel(imxdma, d->src, DMA_SAR(imxdmac->channel)); imx_dmav1_writel(imxdma, d->dest, DMA_DAR(imxdmac->channel)); imx_dmav1_writel(imxdma, d->config_mem | (d->config_port << 2), |
3b4b6dfc2
|
534 |
DMA_CCR(imxdmac->channel)); |
6bd081277
|
535 |
|
cd5cf9da0
|
536 |
imx_dmav1_writel(imxdma, d->len, DMA_CNTR(imxdmac->channel)); |
3b4b6dfc2
|
537 |
|
ac806a1c8
|
538 539 540 541 542 543 |
dev_dbg(imxdma->dev, "%s channel: %d dest=0x%08llx src=0x%08llx dma_length=%zu ", __func__, imxdmac->channel, (unsigned long long)d->dest, (unsigned long long)d->src, d->len); |
3b4b6dfc2
|
544 545 |
break; |
6bd081277
|
546 |
/* Cyclic transfer is the same as slave_sg with special sg configuration. */ |
9e15db7ce
|
547 |
case IMXDMA_DESC_CYCLIC: |
9e15db7ce
|
548 |
case IMXDMA_DESC_SLAVE_SG: |
359291a1a
|
549 |
if (d->direction == DMA_DEV_TO_MEM) { |
cd5cf9da0
|
550 |
imx_dmav1_writel(imxdma, imxdmac->per_address, |
359291a1a
|
551 |
DMA_SAR(imxdmac->channel)); |
cd5cf9da0
|
552 |
imx_dmav1_writel(imxdma, imxdmac->ccr_from_device, |
359291a1a
|
553 |
DMA_CCR(imxdmac->channel)); |
ac806a1c8
|
554 555 556 557 558 559 |
dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (dev2mem) ", __func__, imxdmac->channel, d->sg, d->sgcount, d->len, (unsigned long long)imxdmac->per_address); |
359291a1a
|
560 |
} else if (d->direction == DMA_MEM_TO_DEV) { |
cd5cf9da0
|
561 |
imx_dmav1_writel(imxdma, imxdmac->per_address, |
359291a1a
|
562 |
DMA_DAR(imxdmac->channel)); |
cd5cf9da0
|
563 |
imx_dmav1_writel(imxdma, imxdmac->ccr_to_device, |
359291a1a
|
564 |
DMA_CCR(imxdmac->channel)); |
ac806a1c8
|
565 566 567 568 569 570 |
dev_dbg(imxdma->dev, "%s channel: %d sg=%p sgcount=%d total length=%zu dev_addr=0x%08llx (mem2dev) ", __func__, imxdmac->channel, d->sg, d->sgcount, d->len, (unsigned long long)imxdmac->per_address); |
359291a1a
|
571 572 573 574 575 576 |
} else { dev_err(imxdma->dev, "%s channel: %d bad dma mode ", __func__, imxdmac->channel); return -EINVAL; } |
a6cbb2d87
|
577 |
imxdma_sg_next(d); |
1f1846c6c
|
578 |
|
9e15db7ce
|
579 580 581 582 |
break; default: return -EINVAL; } |
2efc3449d
|
583 |
imxdma_enable_hw(d); |
9e15db7ce
|
584 |
return 0; |
1f1846c6c
|
585 |
} |
9e15db7ce
|
586 |
static void imxdma_tasklet(unsigned long data) |
1f1846c6c
|
587 |
{ |
9e15db7ce
|
588 589 |
struct imxdma_channel *imxdmac = (void *)data; struct imxdma_engine *imxdma = imxdmac->imxdma; |
341198eda
|
590 |
struct imxdma_desc *desc, *next_desc; |
5a276fa6b
|
591 |
unsigned long flags; |
1f1846c6c
|
592 |
|
5a276fa6b
|
593 |
spin_lock_irqsave(&imxdma->lock, flags); |
9e15db7ce
|
594 595 596 |
if (list_empty(&imxdmac->ld_active)) { /* Someone might have called terminate all */ |
fcaaba6c7
|
597 598 |
spin_unlock_irqrestore(&imxdma->lock, flags); return; |
9e15db7ce
|
599 600 |
} desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node); |
d73111c6d
|
601 602 |
/* If we are dealing with a cyclic descriptor, keep it on ld_active * and dont mark the descriptor as complete. |
60f2951e3
|
603 604 |
* Only in non-cyclic cases it would be marked as complete */ |
9e15db7ce
|
605 606 |
if (imxdma_chan_is_doing_cyclic(imxdmac)) goto out; |
60f2951e3
|
607 608 |
else dma_cookie_complete(&desc->desc); |
9e15db7ce
|
609 |
|
f606ab897
|
610 611 612 613 614 |
/* Free 2D slot if it was an interleaved transfer */ if (imxdmac->enabled_2d) { imxdma->slots_2d[imxdmac->slot_2d].count--; imxdmac->enabled_2d = false; } |
9e15db7ce
|
615 616 617 |
list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free); if (!list_empty(&imxdmac->ld_queue)) { |
341198eda
|
618 619 |
next_desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc, node); |
9e15db7ce
|
620 |
list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active); |
341198eda
|
621 |
if (imxdma_xfer_desc(next_desc) < 0) |
9e15db7ce
|
622 623 624 625 626 |
dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc ", __func__, imxdmac->channel); } out: |
5a276fa6b
|
627 |
spin_unlock_irqrestore(&imxdma->lock, flags); |
fcaaba6c7
|
628 |
|
be5af2855
|
629 |
dmaengine_desc_get_callback_invoke(&desc->desc, NULL); |
1f1846c6c
|
630 |
} |
502c2ef26
|
631 |
static int imxdma_terminate_all(struct dma_chan *chan) |
1f1846c6c
|
632 633 |
{ struct imxdma_channel *imxdmac = to_imxdma_chan(chan); |
cd5cf9da0
|
634 |
struct imxdma_engine *imxdma = imxdmac->imxdma; |
9e15db7ce
|
635 |
unsigned long flags; |
9e15db7ce
|
636 |
|
502c2ef26
|
637 |
imxdma_disable_hw(imxdmac); |
1f1846c6c
|
638 |
|
502c2ef26
|
639 640 641 642 643 644 |
spin_lock_irqsave(&imxdma->lock, flags); list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free); list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free); spin_unlock_irqrestore(&imxdma->lock, flags); return 0; } |
bef2a8d3f
|
645 |
|
dea7a9fbb
|
646 647 648 |
static int imxdma_config_write(struct dma_chan *chan, struct dma_slave_config *dmaengine_cfg, enum dma_transfer_direction direction) |
502c2ef26
|
649 650 651 652 |
{ struct imxdma_channel *imxdmac = to_imxdma_chan(chan); struct imxdma_engine *imxdma = imxdmac->imxdma; unsigned int mode = 0; |
bdc0c7534
|
653 |
|
dea7a9fbb
|
654 |
if (direction == DMA_DEV_TO_MEM) { |
502c2ef26
|
655 656 657 658 659 660 661 662 |
imxdmac->per_address = dmaengine_cfg->src_addr; imxdmac->watermark_level = dmaengine_cfg->src_maxburst; imxdmac->word_size = dmaengine_cfg->src_addr_width; } else { imxdmac->per_address = dmaengine_cfg->dst_addr; imxdmac->watermark_level = dmaengine_cfg->dst_maxburst; imxdmac->word_size = dmaengine_cfg->dst_addr_width; } |
1f1846c6c
|
663 |
|
502c2ef26
|
664 665 666 667 668 669 670 |
switch (imxdmac->word_size) { case DMA_SLAVE_BUSWIDTH_1_BYTE: mode = IMX_DMA_MEMSIZE_8; break; case DMA_SLAVE_BUSWIDTH_2_BYTES: mode = IMX_DMA_MEMSIZE_16; break; |
1f1846c6c
|
671 |
default: |
502c2ef26
|
672 673 674 |
case DMA_SLAVE_BUSWIDTH_4_BYTES: mode = IMX_DMA_MEMSIZE_32; break; |
1f1846c6c
|
675 |
} |
502c2ef26
|
676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 |
imxdmac->hw_chaining = 0; imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) | ((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) | CCR_REN; imxdmac->ccr_to_device = (IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) | ((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN; imx_dmav1_writel(imxdma, imxdmac->dma_request, DMA_RSSR(imxdmac->channel)); /* Set burst length */ imx_dmav1_writel(imxdma, imxdmac->watermark_level * imxdmac->word_size, DMA_BLR(imxdmac->channel)); return 0; |
1f1846c6c
|
692 |
} |
dea7a9fbb
|
693 694 695 696 697 698 699 700 701 |
static int imxdma_config(struct dma_chan *chan, struct dma_slave_config *dmaengine_cfg) { struct imxdma_channel *imxdmac = to_imxdma_chan(chan); memcpy(&imxdmac->config, dmaengine_cfg, sizeof(*dmaengine_cfg)); return 0; } |
1f1846c6c
|
702 703 704 705 |
static enum dma_status imxdma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { |
96a2af41c
|
706 |
return dma_cookie_status(chan, cookie, txstate); |
1f1846c6c
|
707 708 709 710 711 |
} static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx) { struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan); |
f606ab897
|
712 |
struct imxdma_engine *imxdma = imxdmac->imxdma; |
1f1846c6c
|
713 |
dma_cookie_t cookie; |
9e15db7ce
|
714 |
unsigned long flags; |
1f1846c6c
|
715 |
|
f606ab897
|
716 |
spin_lock_irqsave(&imxdma->lock, flags); |
660cd0dd9
|
717 |
list_move_tail(imxdmac->ld_free.next, &imxdmac->ld_queue); |
884485e1f
|
718 |
cookie = dma_cookie_assign(tx); |
f606ab897
|
719 |
spin_unlock_irqrestore(&imxdma->lock, flags); |
1f1846c6c
|
720 721 722 723 724 725 726 727 |
return cookie; } static int imxdma_alloc_chan_resources(struct dma_chan *chan) { struct imxdma_channel *imxdmac = to_imxdma_chan(chan); struct imx_dma_data *data = chan->private; |
6c05f0915
|
728 729 |
if (data != NULL) imxdmac->dma_request = data->dma_request; |
1f1846c6c
|
730 |
|
9e15db7ce
|
731 732 |
while (imxdmac->descs_allocated < IMXDMA_MAX_CHAN_DESCRIPTORS) { struct imxdma_desc *desc; |
1f1846c6c
|
733 |
|
9e15db7ce
|
734 735 736 |
desc = kzalloc(sizeof(*desc), GFP_KERNEL); if (!desc) break; |
ff5fdafc9
|
737 |
memset(&desc->desc, 0, sizeof(struct dma_async_tx_descriptor)); |
9e15db7ce
|
738 739 740 741 |
dma_async_tx_descriptor_init(&desc->desc, chan); desc->desc.tx_submit = imxdma_tx_submit; /* txd.flags will be overwritten in prep funcs */ desc->desc.flags = DMA_CTRL_ACK; |
3ded1ad14
|
742 |
desc->status = DMA_COMPLETE; |
9e15db7ce
|
743 744 745 746 |
list_add_tail(&desc->node, &imxdmac->ld_free); imxdmac->descs_allocated++; } |
1f1846c6c
|
747 |
|
9e15db7ce
|
748 749 750 751 |
if (!imxdmac->descs_allocated) return -ENOMEM; return imxdmac->descs_allocated; |
1f1846c6c
|
752 753 754 755 756 |
} static void imxdma_free_chan_resources(struct dma_chan *chan) { struct imxdma_channel *imxdmac = to_imxdma_chan(chan); |
f606ab897
|
757 |
struct imxdma_engine *imxdma = imxdmac->imxdma; |
9e15db7ce
|
758 759 |
struct imxdma_desc *desc, *_desc; unsigned long flags; |
f606ab897
|
760 |
spin_lock_irqsave(&imxdma->lock, flags); |
1f1846c6c
|
761 |
|
6bd081277
|
762 |
imxdma_disable_hw(imxdmac); |
9e15db7ce
|
763 764 |
list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free); list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free); |
1f1846c6c
|
765 |
|
f606ab897
|
766 |
spin_unlock_irqrestore(&imxdma->lock, flags); |
9e15db7ce
|
767 768 769 770 771 772 |
list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) { kfree(desc); imxdmac->descs_allocated--; } INIT_LIST_HEAD(&imxdmac->ld_free); |
1f1846c6c
|
773 |
|
06f8db4b6
|
774 775 |
kfree(imxdmac->sg_list); imxdmac->sg_list = NULL; |
1f1846c6c
|
776 777 778 779 |
} static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( struct dma_chan *chan, struct scatterlist *sgl, |
db8196df4
|
780 |
unsigned int sg_len, enum dma_transfer_direction direction, |
185ecb5f4
|
781 |
unsigned long flags, void *context) |
1f1846c6c
|
782 783 784 |
{ struct imxdma_channel *imxdmac = to_imxdma_chan(chan); struct scatterlist *sg; |
9e15db7ce
|
785 786 |
int i, dma_length = 0; struct imxdma_desc *desc; |
1f1846c6c
|
787 |
|
9e15db7ce
|
788 789 |
if (list_empty(&imxdmac->ld_free) || imxdma_chan_is_doing_cyclic(imxdmac)) |
1f1846c6c
|
790 |
return NULL; |
9e15db7ce
|
791 |
desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); |
1f1846c6c
|
792 793 |
for_each_sg(sgl, sg, sg_len, i) { |
fdaf9c4b2
|
794 |
dma_length += sg_dma_len(sg); |
1f1846c6c
|
795 |
} |
d07102a1b
|
796 797 |
switch (imxdmac->word_size) { case DMA_SLAVE_BUSWIDTH_4_BYTES: |
fdaf9c4b2
|
798 |
if (sg_dma_len(sgl) & 3 || sgl->dma_address & 3) |
d07102a1b
|
799 800 801 |
return NULL; break; case DMA_SLAVE_BUSWIDTH_2_BYTES: |
fdaf9c4b2
|
802 |
if (sg_dma_len(sgl) & 1 || sgl->dma_address & 1) |
d07102a1b
|
803 804 805 806 807 808 809 |
return NULL; break; case DMA_SLAVE_BUSWIDTH_1_BYTE: break; default: return NULL; } |
9e15db7ce
|
810 811 812 813 |
desc->type = IMXDMA_DESC_SLAVE_SG; desc->sg = sgl; desc->sgcount = sg_len; desc->len = dma_length; |
2efc3449d
|
814 |
desc->direction = direction; |
9e15db7ce
|
815 |
if (direction == DMA_DEV_TO_MEM) { |
9e15db7ce
|
816 817 |
desc->src = imxdmac->per_address; } else { |
9e15db7ce
|
818 819 820 821 |
desc->dest = imxdmac->per_address; } desc->desc.callback = NULL; desc->desc.callback_param = NULL; |
1f1846c6c
|
822 |
|
9e15db7ce
|
823 |
return &desc->desc; |
1f1846c6c
|
824 825 826 827 |
} static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, |
185ecb5f4
|
828 |
size_t period_len, enum dma_transfer_direction direction, |
31c1e5a13
|
829 |
unsigned long flags) |
1f1846c6c
|
830 831 832 |
{ struct imxdma_channel *imxdmac = to_imxdma_chan(chan); struct imxdma_engine *imxdma = imxdmac->imxdma; |
9e15db7ce
|
833 834 |
struct imxdma_desc *desc; int i; |
1f1846c6c
|
835 |
unsigned int periods = buf_len / period_len; |
1f1846c6c
|
836 |
|
ac806a1c8
|
837 838 |
dev_dbg(imxdma->dev, "%s channel: %d buf_len=%zu period_len=%zu ", |
1f1846c6c
|
839 |
__func__, imxdmac->channel, buf_len, period_len); |
9e15db7ce
|
840 841 |
if (list_empty(&imxdmac->ld_free) || imxdma_chan_is_doing_cyclic(imxdmac)) |
1f1846c6c
|
842 |
return NULL; |
1f1846c6c
|
843 |
|
9e15db7ce
|
844 |
desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); |
1f1846c6c
|
845 |
|
96a3713eb
|
846 |
kfree(imxdmac->sg_list); |
1f1846c6c
|
847 848 |
imxdmac->sg_list = kcalloc(periods + 1, |
edc530fe7
|
849 |
sizeof(struct scatterlist), GFP_ATOMIC); |
1f1846c6c
|
850 851 852 853 854 855 |
if (!imxdmac->sg_list) return NULL; sg_init_table(imxdmac->sg_list, periods); for (i = 0; i < periods; i++) { |
ce8180137
|
856 |
sg_assign_page(&imxdmac->sg_list[i], NULL); |
1f1846c6c
|
857 858 |
imxdmac->sg_list[i].offset = 0; imxdmac->sg_list[i].dma_address = dma_addr; |
fdaf9c4b2
|
859 |
sg_dma_len(&imxdmac->sg_list[i]) = period_len; |
1f1846c6c
|
860 861 862 863 |
dma_addr += period_len; } /* close the loop */ |
ce8180137
|
864 |
sg_chain(imxdmac->sg_list, periods + 1, imxdmac->sg_list); |
1f1846c6c
|
865 |
|
9e15db7ce
|
866 867 868 869 |
desc->type = IMXDMA_DESC_CYCLIC; desc->sg = imxdmac->sg_list; desc->sgcount = periods; desc->len = IMX_DMA_LENGTH_LOOP; |
2efc3449d
|
870 |
desc->direction = direction; |
9e15db7ce
|
871 |
if (direction == DMA_DEV_TO_MEM) { |
9e15db7ce
|
872 873 |
desc->src = imxdmac->per_address; } else { |
9e15db7ce
|
874 875 876 877 |
desc->dest = imxdmac->per_address; } desc->desc.callback = NULL; desc->desc.callback_param = NULL; |
1f1846c6c
|
878 |
|
dea7a9fbb
|
879 |
imxdma_config_write(chan, &imxdmac->config, direction); |
9e15db7ce
|
880 |
return &desc->desc; |
1f1846c6c
|
881 |
} |
6c05f0915
|
882 883 884 885 886 887 |
static struct dma_async_tx_descriptor *imxdma_prep_dma_memcpy( struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len, unsigned long flags) { struct imxdma_channel *imxdmac = to_imxdma_chan(chan); struct imxdma_engine *imxdma = imxdmac->imxdma; |
9e15db7ce
|
888 |
struct imxdma_desc *desc; |
1f1846c6c
|
889 |
|
ac806a1c8
|
890 891 892 893 |
dev_dbg(imxdma->dev, "%s channel: %d src=0x%llx dst=0x%llx len=%zu ", __func__, imxdmac->channel, (unsigned long long)src, (unsigned long long)dest, len); |
6c05f0915
|
894 |
|
9e15db7ce
|
895 896 |
if (list_empty(&imxdmac->ld_free) || imxdma_chan_is_doing_cyclic(imxdmac)) |
1f1846c6c
|
897 |
return NULL; |
9e15db7ce
|
898 |
desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); |
6c05f0915
|
899 |
|
9e15db7ce
|
900 901 902 903 |
desc->type = IMXDMA_DESC_MEMCPY; desc->src = src; desc->dest = dest; desc->len = len; |
2efc3449d
|
904 |
desc->direction = DMA_MEM_TO_MEM; |
9e15db7ce
|
905 906 907 908 |
desc->config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR; desc->config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR; desc->desc.callback = NULL; desc->desc.callback_param = NULL; |
6c05f0915
|
909 |
|
9e15db7ce
|
910 |
return &desc->desc; |
6c05f0915
|
911 |
} |
f606ab897
|
912 913 914 915 916 917 918 |
static struct dma_async_tx_descriptor *imxdma_prep_dma_interleaved( struct dma_chan *chan, struct dma_interleaved_template *xt, unsigned long flags) { struct imxdma_channel *imxdmac = to_imxdma_chan(chan); struct imxdma_engine *imxdma = imxdmac->imxdma; struct imxdma_desc *desc; |
ac806a1c8
|
919 920 921 922 923 924 |
dev_dbg(imxdma->dev, "%s channel: %d src_start=0x%llx dst_start=0x%llx " " src_sgl=%s dst_sgl=%s numf=%zu frame_size=%zu ", __func__, imxdmac->channel, (unsigned long long)xt->src_start, (unsigned long long) xt->dst_start, |
f606ab897
|
925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 |
xt->src_sgl ? "true" : "false", xt->dst_sgl ? "true" : "false", xt->numf, xt->frame_size); if (list_empty(&imxdmac->ld_free) || imxdma_chan_is_doing_cyclic(imxdmac)) return NULL; if (xt->frame_size != 1 || xt->numf <= 0 || xt->dir != DMA_MEM_TO_MEM) return NULL; desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); desc->type = IMXDMA_DESC_INTERLEAVED; desc->src = xt->src_start; desc->dest = xt->dst_start; desc->x = xt->sgl[0].size; desc->y = xt->numf; desc->w = xt->sgl[0].icg + desc->x; desc->len = desc->x * desc->y; desc->direction = DMA_MEM_TO_MEM; desc->config_port = IMX_DMA_MEMSIZE_32; desc->config_mem = IMX_DMA_MEMSIZE_32; if (xt->src_sgl) desc->config_mem |= IMX_DMA_TYPE_2D; if (xt->dst_sgl) desc->config_port |= IMX_DMA_TYPE_2D; desc->desc.callback = NULL; desc->desc.callback_param = NULL; return &desc->desc; |
1f1846c6c
|
955 956 957 958 |
} static void imxdma_issue_pending(struct dma_chan *chan) { |
5b3168763
|
959 |
struct imxdma_channel *imxdmac = to_imxdma_chan(chan); |
9e15db7ce
|
960 961 962 |
struct imxdma_engine *imxdma = imxdmac->imxdma; struct imxdma_desc *desc; unsigned long flags; |
f606ab897
|
963 |
spin_lock_irqsave(&imxdma->lock, flags); |
9e15db7ce
|
964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 |
if (list_empty(&imxdmac->ld_active) && !list_empty(&imxdmac->ld_queue)) { desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc, node); if (imxdma_xfer_desc(desc) < 0) { dev_warn(imxdma->dev, "%s: channel: %d couldn't issue DMA xfer ", __func__, imxdmac->channel); } else { list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active); } } |
f606ab897
|
979 |
spin_unlock_irqrestore(&imxdma->lock, flags); |
1f1846c6c
|
980 |
} |
290ad0f9d
|
981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 |
static bool imxdma_filter_fn(struct dma_chan *chan, void *param) { struct imxdma_filter_data *fdata = param; struct imxdma_channel *imxdma_chan = to_imxdma_chan(chan); if (chan->device->dev != fdata->imxdma->dev) return false; imxdma_chan->dma_request = fdata->request; chan->private = NULL; return true; } static struct dma_chan *imxdma_xlate(struct of_phandle_args *dma_spec, struct of_dma *ofdma) { int count = dma_spec->args_count; struct imxdma_engine *imxdma = ofdma->of_dma_data; struct imxdma_filter_data fdata = { .imxdma = imxdma, }; if (count != 1) return NULL; fdata.request = dma_spec->args[0]; return dma_request_channel(imxdma->dma_device.cap_mask, imxdma_filter_fn, &fdata); } |
1f1846c6c
|
1012 |
static int __init imxdma_probe(struct platform_device *pdev) |
71c6b6634
|
1013 |
{ |
1f1846c6c
|
1014 |
struct imxdma_engine *imxdma; |
73930eb31
|
1015 |
struct resource *res; |
290ad0f9d
|
1016 |
const struct of_device_id *of_id; |
1f1846c6c
|
1017 |
int ret, i; |
73930eb31
|
1018 |
int irq, irq_err; |
cd5cf9da0
|
1019 |
|
290ad0f9d
|
1020 1021 1022 |
of_id = of_match_device(imx_dma_of_dev_id, &pdev->dev); if (of_id) pdev->id_entry = of_id->data; |
04bbd8ef5
|
1023 |
imxdma = devm_kzalloc(&pdev->dev, sizeof(*imxdma), GFP_KERNEL); |
1f1846c6c
|
1024 1025 |
if (!imxdma) return -ENOMEM; |
5c6b3e772
|
1026 |
imxdma->dev = &pdev->dev; |
e51d0f0ac
|
1027 |
imxdma->devtype = pdev->id_entry->driver_data; |
73930eb31
|
1028 |
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
7331205a9
|
1029 1030 1031 |
imxdma->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(imxdma->base)) return PTR_ERR(imxdma->base); |
73930eb31
|
1032 1033 1034 1035 |
irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; |
6bd081277
|
1036 |
|
a2367db2e
|
1037 |
imxdma->dma_ipg = devm_clk_get(&pdev->dev, "ipg"); |
04bbd8ef5
|
1038 1039 |
if (IS_ERR(imxdma->dma_ipg)) return PTR_ERR(imxdma->dma_ipg); |
a2367db2e
|
1040 1041 |
imxdma->dma_ahb = devm_clk_get(&pdev->dev, "ahb"); |
04bbd8ef5
|
1042 1043 |
if (IS_ERR(imxdma->dma_ahb)) return PTR_ERR(imxdma->dma_ahb); |
a2367db2e
|
1044 |
|
fce9a74ba
|
1045 1046 1047 1048 1049 1050 |
ret = clk_prepare_enable(imxdma->dma_ipg); if (ret) return ret; ret = clk_prepare_enable(imxdma->dma_ahb); if (ret) goto disable_dma_ipg_clk; |
6bd081277
|
1051 1052 |
/* reset DMA module */ |
cd5cf9da0
|
1053 |
imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR); |
6bd081277
|
1054 |
|
e51d0f0ac
|
1055 |
if (is_imx1_dma(imxdma)) { |
73930eb31
|
1056 |
ret = devm_request_irq(&pdev->dev, irq, |
04bbd8ef5
|
1057 |
dma_irq_handler, 0, "DMA", imxdma); |
6bd081277
|
1058 |
if (ret) { |
f9b283a6e
|
1059 1060 |
dev_warn(imxdma->dev, "Can't register IRQ for DMA "); |
fce9a74ba
|
1061 |
goto disable_dma_ahb_clk; |
6bd081277
|
1062 |
} |
ea62aa80b
|
1063 |
imxdma->irq = irq; |
6bd081277
|
1064 |
|
73930eb31
|
1065 1066 1067 |
irq_err = platform_get_irq(pdev, 1); if (irq_err < 0) { ret = irq_err; |
fce9a74ba
|
1068 |
goto disable_dma_ahb_clk; |
73930eb31
|
1069 1070 1071 |
} ret = devm_request_irq(&pdev->dev, irq_err, |
04bbd8ef5
|
1072 |
imxdma_err_handler, 0, "DMA", imxdma); |
6bd081277
|
1073 |
if (ret) { |
f9b283a6e
|
1074 1075 |
dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA "); |
fce9a74ba
|
1076 |
goto disable_dma_ahb_clk; |
6bd081277
|
1077 |
} |
ea62aa80b
|
1078 |
imxdma->irq_err = irq_err; |
6bd081277
|
1079 1080 1081 |
} /* enable DMA module */ |
cd5cf9da0
|
1082 |
imx_dmav1_writel(imxdma, DCR_DEN, DMA_DCR); |
6bd081277
|
1083 1084 |
/* clear all interrupts */ |
cd5cf9da0
|
1085 |
imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DISR); |
6bd081277
|
1086 1087 |
/* disable interrupts */ |
cd5cf9da0
|
1088 |
imx_dmav1_writel(imxdma, (1 << IMX_DMA_CHANNELS) - 1, DMA_DIMR); |
1f1846c6c
|
1089 1090 |
INIT_LIST_HEAD(&imxdma->dma_device.channels); |
f8a356ff9
|
1091 1092 |
dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask); dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask); |
6c05f0915
|
1093 |
dma_cap_set(DMA_MEMCPY, imxdma->dma_device.cap_mask); |
f606ab897
|
1094 1095 1096 1097 1098 1099 1100 |
dma_cap_set(DMA_INTERLEAVE, imxdma->dma_device.cap_mask); /* Initialize 2D global parameters */ for (i = 0; i < IMX_DMA_2D_SLOTS; i++) imxdma->slots_2d[i].count = 0; spin_lock_init(&imxdma->lock); |
f8a356ff9
|
1101 |
|
1f1846c6c
|
1102 |
/* Initialize channel parameters */ |
6bd081277
|
1103 |
for (i = 0; i < IMX_DMA_CHANNELS; i++) { |
1f1846c6c
|
1104 |
struct imxdma_channel *imxdmac = &imxdma->channel[i]; |
e51d0f0ac
|
1105 |
if (!is_imx1_dma(imxdma)) { |
73930eb31
|
1106 |
ret = devm_request_irq(&pdev->dev, irq + i, |
6bd081277
|
1107 1108 |
dma_irq_handler, 0, "DMA", imxdma); if (ret) { |
f9b283a6e
|
1109 1110 1111 |
dev_warn(imxdma->dev, "Can't register IRQ %d " "for DMA channel %d ", |
73930eb31
|
1112 |
irq + i, i); |
fce9a74ba
|
1113 |
goto disable_dma_ahb_clk; |
6bd081277
|
1114 |
} |
ea62aa80b
|
1115 1116 |
imxdmac->irq = irq + i; |
bcdc4bd35
|
1117 |
timer_setup(&imxdmac->watchdog, imxdma_watchdog, 0); |
8267f16e8
|
1118 |
} |
1f1846c6c
|
1119 |
|
1f1846c6c
|
1120 |
imxdmac->imxdma = imxdma; |
1f1846c6c
|
1121 |
|
9e15db7ce
|
1122 1123 1124 1125 1126 1127 |
INIT_LIST_HEAD(&imxdmac->ld_queue); INIT_LIST_HEAD(&imxdmac->ld_free); INIT_LIST_HEAD(&imxdmac->ld_active); tasklet_init(&imxdmac->dma_tasklet, imxdma_tasklet, (unsigned long)imxdmac); |
1f1846c6c
|
1128 |
imxdmac->chan.device = &imxdma->dma_device; |
8ac695463
|
1129 |
dma_cookie_init(&imxdmac->chan); |
1f1846c6c
|
1130 1131 1132 |
imxdmac->channel = i; /* Add the channel to the DMAC list */ |
9e15db7ce
|
1133 1134 |
list_add_tail(&imxdmac->chan.device_node, &imxdma->dma_device.channels); |
1f1846c6c
|
1135 |
} |
1f1846c6c
|
1136 1137 1138 1139 1140 1141 1142 |
imxdma->dma_device.dev = &pdev->dev; imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources; imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources; imxdma->dma_device.device_tx_status = imxdma_tx_status; imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg; imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic; |
6c05f0915
|
1143 |
imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy; |
f606ab897
|
1144 |
imxdma->dma_device.device_prep_interleaved_dma = imxdma_prep_dma_interleaved; |
502c2ef26
|
1145 1146 |
imxdma->dma_device.device_config = imxdma_config; imxdma->dma_device.device_terminate_all = imxdma_terminate_all; |
1f1846c6c
|
1147 1148 1149 |
imxdma->dma_device.device_issue_pending = imxdma_issue_pending; platform_set_drvdata(pdev, imxdma); |
77a68e56a
|
1150 |
imxdma->dma_device.copy_align = DMAENGINE_ALIGN_4_BYTES; |
1e070a609
|
1151 1152 |
imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms; dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff); |
1f1846c6c
|
1153 1154 1155 1156 |
ret = dma_async_device_register(&imxdma->dma_device); if (ret) { dev_err(&pdev->dev, "unable to register "); |
fce9a74ba
|
1157 |
goto disable_dma_ahb_clk; |
1f1846c6c
|
1158 |
} |
290ad0f9d
|
1159 1160 1161 1162 1163 1164 1165 1166 1167 |
if (pdev->dev.of_node) { ret = of_dma_controller_register(pdev->dev.of_node, imxdma_xlate, imxdma); if (ret) { dev_err(&pdev->dev, "unable to register of_dma_controller "); goto err_of_dma_controller; } } |
1f1846c6c
|
1168 |
return 0; |
290ad0f9d
|
1169 1170 |
err_of_dma_controller: dma_async_device_unregister(&imxdma->dma_device); |
fce9a74ba
|
1171 |
disable_dma_ahb_clk: |
a2367db2e
|
1172 |
clk_disable_unprepare(imxdma->dma_ahb); |
fce9a74ba
|
1173 1174 |
disable_dma_ipg_clk: clk_disable_unprepare(imxdma->dma_ipg); |
1f1846c6c
|
1175 1176 |
return ret; } |
ea62aa80b
|
1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 |
static void imxdma_free_irq(struct platform_device *pdev, struct imxdma_engine *imxdma) { int i; if (is_imx1_dma(imxdma)) { disable_irq(imxdma->irq); disable_irq(imxdma->irq_err); } for (i = 0; i < IMX_DMA_CHANNELS; i++) { struct imxdma_channel *imxdmac = &imxdma->channel[i]; if (!is_imx1_dma(imxdma)) disable_irq(imxdmac->irq); tasklet_kill(&imxdmac->dma_tasklet); } } |
1d1bbd305
|
1195 |
static int imxdma_remove(struct platform_device *pdev) |
1f1846c6c
|
1196 1197 |
{ struct imxdma_engine *imxdma = platform_get_drvdata(pdev); |
1f1846c6c
|
1198 |
|
ea62aa80b
|
1199 |
imxdma_free_irq(pdev, imxdma); |
1f1846c6c
|
1200 |
dma_async_device_unregister(&imxdma->dma_device); |
290ad0f9d
|
1201 1202 |
if (pdev->dev.of_node) of_dma_controller_free(pdev->dev.of_node); |
a2367db2e
|
1203 1204 |
clk_disable_unprepare(imxdma->dma_ipg); clk_disable_unprepare(imxdma->dma_ahb); |
1f1846c6c
|
1205 1206 1207 1208 1209 1210 1211 |
return 0; } static struct platform_driver imxdma_driver = { .driver = { .name = "imx-dma", |
290ad0f9d
|
1212 |
.of_match_table = imx_dma_of_dev_id, |
1f1846c6c
|
1213 |
}, |
e51d0f0ac
|
1214 |
.id_table = imx_dma_devtype, |
1d1bbd305
|
1215 |
.remove = imxdma_remove, |
1f1846c6c
|
1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 |
}; static int __init imxdma_module_init(void) { return platform_driver_probe(&imxdma_driver, imxdma_probe); } subsys_initcall(imxdma_module_init); MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>"); MODULE_DESCRIPTION("i.MX dma driver"); MODULE_LICENSE("GPL"); |