Blame view
drivers/dma/mxs-dma.c
25.5 KB
d9617a3f2 dmaengine: mxs-dm... |
1 2 3 4 5 |
// SPDX-License-Identifier: GPL-2.0 // // Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved. // // Refer to drivers/dma/imx-sdma.c |
a580b8c54 dmaengine: mxs-dm... |
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 |
#include <linux/init.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/clk.h> #include <linux/wait.h> #include <linux/sched.h> #include <linux/semaphore.h> #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <linux/dmaengine.h> #include <linux/delay.h> |
90c9abc5b dma: mxs-dma: add... |
21 |
#include <linux/module.h> |
f5b7efccd dma: mxs-dma: use... |
22 |
#include <linux/stmp_device.h> |
90c9abc5b dma: mxs-dma: add... |
23 24 |
#include <linux/of.h> #include <linux/of_device.h> |
d84f638b0 dma: mxs-dma: mov... |
25 |
#include <linux/of_dma.h> |
b2d639890 dma: mxs-dma: Cle... |
26 |
#include <linux/list.h> |
e0ddaab76 dmaengine: mxs: A... |
27 |
#include <linux/dma/mxs-dma.h> |
819221d03 LF-251-1: dma: mx... |
28 |
#include <linux/pm_runtime.h> |
864537f2e LF-251-2: dma: mx... |
29 |
#include <linux/dmapool.h> |
a580b8c54 dmaengine: mxs-dm... |
30 31 |
#include <asm/irq.h> |
a580b8c54 dmaengine: mxs-dm... |
32 |
|
d2ebfb335 dmaengine: add pr... |
33 |
#include "dmaengine.h" |
a580b8c54 dmaengine: mxs-dm... |
34 35 36 37 38 |
/* * NOTE: The term "PIO" throughout the mxs-dma implementation means * PIO mode of mxs apbh-dma and apbx-dma. With this working mode, * dma can program the controller registers of peripheral devices. */ |
8c9201364 dma: mxs-dma: mak... |
39 40 |
#define dma_is_apbh(mxs_dma) ((mxs_dma)->type == MXS_DMA_APBH) #define apbh_is_old(mxs_dma) ((mxs_dma)->dev_id == IMX23_DMA) |
a580b8c54 dmaengine: mxs-dm... |
41 |
|
819221d03 LF-251-1: dma: mx... |
42 |
#define MXS_DMA_RPM_TIMEOUT 50 /* ms */ |
a580b8c54 dmaengine: mxs-dm... |
43 44 45 |
#define HW_APBHX_CTRL0 0x000 #define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29) #define BM_APBH_CTRL0_APB_BURST_EN (1 << 28) |
a580b8c54 dmaengine: mxs-dm... |
46 47 48 49 50 |
#define BP_APBH_CTRL0_RESET_CHANNEL 16 #define HW_APBHX_CTRL1 0x010 #define HW_APBHX_CTRL2 0x020 #define HW_APBHX_CHANNEL_CTRL 0x030 #define BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL 16 |
bb11fb63f dma: mxs-dma: let... |
51 52 53 54 55 56 57 58 |
/* * The offset of NXTCMDAR register is different per both dma type and version, * while stride for each channel is all the same 0x70. */ #define HW_APBHX_CHn_NXTCMDAR(d, n) \ (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x050 : 0x110) + (n) * 0x70) #define HW_APBHX_CHn_SEMA(d, n) \ (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x080 : 0x140) + (n) * 0x70) |
7b11304a3 dma: mxs-dma: Rep... |
59 60 |
#define HW_APBHX_CHn_BAR(d, n) \ (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x070 : 0x130) + (n) * 0x70) |
702e94d66 dma: mxs-dma: Fix... |
61 |
#define HW_APBX_CHn_DEBUG1(d, n) (0x150 + (n) * 0x70) |
a580b8c54 dmaengine: mxs-dm... |
62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 |
/* * ccw bits definitions * * COMMAND: 0..1 (2) * CHAIN: 2 (1) * IRQ: 3 (1) * NAND_LOCK: 4 (1) - not implemented * NAND_WAIT4READY: 5 (1) - not implemented * DEC_SEM: 6 (1) * WAIT4END: 7 (1) * HALT_ON_TERMINATE: 8 (1) * TERMINATE_FLUSH: 9 (1) * RESERVED: 10..11 (2) * PIO_NUM: 12..15 (4) */ #define BP_CCW_COMMAND 0 #define BM_CCW_COMMAND (3 << 0) #define CCW_CHAIN (1 << 2) #define CCW_IRQ (1 << 3) |
ef347c0cf mtd: rawnand: gpm... |
82 |
#define CCW_WAIT4RDY (1 << 5) |
a580b8c54 dmaengine: mxs-dm... |
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 |
#define CCW_DEC_SEM (1 << 6) #define CCW_WAIT4END (1 << 7) #define CCW_HALT_ON_TERM (1 << 8) #define CCW_TERM_FLUSH (1 << 9) #define BP_CCW_PIO_NUM 12 #define BM_CCW_PIO_NUM (0xf << 12) #define BF_CCW(value, field) (((value) << BP_CCW_##field) & BM_CCW_##field) #define MXS_DMA_CMD_NO_XFER 0 #define MXS_DMA_CMD_WRITE 1 #define MXS_DMA_CMD_READ 2 #define MXS_DMA_CMD_DMA_SENSE 3 /* not implemented */ struct mxs_dma_ccw { u32 next; u16 bits; u16 xfer_bytes; #define MAX_XFER_BYTES 0xff00 u32 bufaddr; #define MXS_PIO_WORDS 16 u32 pio_words[MXS_PIO_WORDS]; }; |
5e97fa914 mxs/dma: Enlarge ... |
106 107 |
#define CCW_BLOCK_SIZE (4 * PAGE_SIZE) #define NUM_CCW (int)(CCW_BLOCK_SIZE / sizeof(struct mxs_dma_ccw)) |
a580b8c54 dmaengine: mxs-dm... |
108 109 110 111 112 113 |
struct mxs_dma_chan { struct mxs_dma_engine *mxs_dma; struct dma_chan chan; struct dma_async_tx_descriptor desc; struct tasklet_struct tasklet; |
f2ad69925 dma: mxs-dma: Fix... |
114 |
unsigned int chan_irq; |
a580b8c54 dmaengine: mxs-dm... |
115 116 |
struct mxs_dma_ccw *ccw; dma_addr_t ccw_phys; |
6d23ea4b1 dma: mxs-dma: mak... |
117 |
int desc_count; |
a580b8c54 dmaengine: mxs-dm... |
118 119 |
enum dma_status status; unsigned int flags; |
2dcbdce36 dma: mxs-dma: Use... |
120 |
bool reset; |
864537f2e LF-251-2: dma: mx... |
121 |
struct dma_pool *ccw_pool; |
a580b8c54 dmaengine: mxs-dm... |
122 |
#define MXS_DMA_SG_LOOP (1 << 0) |
2dcbdce36 dma: mxs-dma: Use... |
123 |
#define MXS_DMA_USE_SEMAPHORE (1 << 1) |
a580b8c54 dmaengine: mxs-dm... |
124 125 126 127 |
}; #define MXS_DMA_CHANNELS 16 #define MXS_DMA_CHANNELS_MASK 0xffff |
8c9201364 dma: mxs-dma: mak... |
128 129 130 131 132 133 134 135 136 |
enum mxs_dma_devtype { MXS_DMA_APBH, MXS_DMA_APBX, }; enum mxs_dma_id { IMX23_DMA, IMX28_DMA, }; |
a580b8c54 dmaengine: mxs-dm... |
137 |
struct mxs_dma_engine { |
8c9201364 dma: mxs-dma: mak... |
138 139 |
enum mxs_dma_id dev_id; enum mxs_dma_devtype type; |
a580b8c54 dmaengine: mxs-dm... |
140 141 142 |
void __iomem *base; struct clk *clk; struct dma_device dma_device; |
a580b8c54 dmaengine: mxs-dm... |
143 |
struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS]; |
d84f638b0 dma: mxs-dma: mov... |
144 145 |
struct platform_device *pdev; unsigned int nr_channels; |
a580b8c54 dmaengine: mxs-dm... |
146 |
}; |
8c9201364 dma: mxs-dma: mak... |
147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 |
struct mxs_dma_type { enum mxs_dma_id id; enum mxs_dma_devtype type; }; static struct mxs_dma_type mxs_dma_types[] = { { .id = IMX23_DMA, .type = MXS_DMA_APBH, }, { .id = IMX23_DMA, .type = MXS_DMA_APBX, }, { .id = IMX28_DMA, .type = MXS_DMA_APBH, }, { .id = IMX28_DMA, .type = MXS_DMA_APBX, } }; |
0d8505045 dmaengine: mxs: C... |
167 |
static const struct platform_device_id mxs_dma_ids[] = { |
8c9201364 dma: mxs-dma: mak... |
168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 |
{ .name = "imx23-dma-apbh", .driver_data = (kernel_ulong_t) &mxs_dma_types[0], }, { .name = "imx23-dma-apbx", .driver_data = (kernel_ulong_t) &mxs_dma_types[1], }, { .name = "imx28-dma-apbh", .driver_data = (kernel_ulong_t) &mxs_dma_types[2], }, { .name = "imx28-dma-apbx", .driver_data = (kernel_ulong_t) &mxs_dma_types[3], }, { /* end of list */ } }; |
90c9abc5b dma: mxs-dma: add... |
184 185 186 187 188 189 190 191 |
static const struct of_device_id mxs_dma_dt_ids[] = { { .compatible = "fsl,imx23-dma-apbh", .data = &mxs_dma_ids[0], }, { .compatible = "fsl,imx23-dma-apbx", .data = &mxs_dma_ids[1], }, { .compatible = "fsl,imx28-dma-apbh", .data = &mxs_dma_ids[2], }, { .compatible = "fsl,imx28-dma-apbx", .data = &mxs_dma_ids[3], }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, mxs_dma_dt_ids); |
8c9201364 dma: mxs-dma: mak... |
192 193 194 195 |
static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan) { return container_of(chan, struct mxs_dma_chan, chan); } |
5c9d2e37a dmaengine: mxs: S... |
196 |
static void mxs_dma_reset_chan(struct dma_chan *chan) |
a580b8c54 dmaengine: mxs-dm... |
197 |
{ |
5c9d2e37a dmaengine: mxs: S... |
198 |
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
a580b8c54 dmaengine: mxs-dm... |
199 200 |
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; int chan_id = mxs_chan->chan.chan_id; |
2dcbdce36 dma: mxs-dma: Use... |
201 202 203 204 205 206 207 208 209 210 211 |
/* * mxs dma channel resets can cause a channel stall. To recover from a * channel stall, we have to reset the whole DMA engine. To avoid this, * we use cyclic DMA with semaphores, that are enhanced in * mxs_dma_int_handler. To reset the channel, we can simply stop writing * into the semaphore counter. */ if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE && mxs_chan->flags & MXS_DMA_SG_LOOP) { mxs_chan->reset = true; } else if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) { |
a580b8c54 dmaengine: mxs-dm... |
212 |
writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL), |
f5b7efccd dma: mxs-dma: use... |
213 |
mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET); |
702e94d66 dma: mxs-dma: Fix... |
214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 |
} else { unsigned long elapsed = 0; const unsigned long max_wait = 50000; /* 50ms */ void __iomem *reg_dbg1 = mxs_dma->base + HW_APBX_CHn_DEBUG1(mxs_dma, chan_id); /* * On i.MX28 APBX, the DMA channel can stop working if we reset * the channel while it is in READ_FLUSH (0x08) state. * We wait here until we leave the state. Then we trigger the * reset. Waiting a maximum of 50ms, the kernel shouldn't crash * because of this. */ while ((readl(reg_dbg1) & 0xf) == 0x8 && elapsed < max_wait) { udelay(100); elapsed += 100; } if (elapsed >= max_wait) dev_err(&mxs_chan->mxs_dma->pdev->dev, "Failed waiting for the DMA channel %d to leave state READ_FLUSH, trying to reset channel in READ_FLUSH state now ", chan_id); |
a580b8c54 dmaengine: mxs-dm... |
237 |
writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL), |
f5b7efccd dma: mxs-dma: use... |
238 |
mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET); |
702e94d66 dma: mxs-dma: Fix... |
239 |
} |
bb3660f13 dma: mxs-dma: Upd... |
240 241 |
mxs_chan->status = DMA_COMPLETE; |
a580b8c54 dmaengine: mxs-dm... |
242 |
} |
5c9d2e37a dmaengine: mxs: S... |
243 |
static void mxs_dma_enable_chan(struct dma_chan *chan) |
a580b8c54 dmaengine: mxs-dm... |
244 |
{ |
5c9d2e37a dmaengine: mxs: S... |
245 |
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
a580b8c54 dmaengine: mxs-dm... |
246 247 248 249 250 |
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; int chan_id = mxs_chan->chan.chan_id; /* set cmd_addr up */ writel(mxs_chan->ccw_phys, |
bb11fb63f dma: mxs-dma: let... |
251 |
mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(mxs_dma, chan_id)); |
a580b8c54 dmaengine: mxs-dm... |
252 |
|
a580b8c54 dmaengine: mxs-dm... |
253 |
/* write 1 to SEMA to kick off the channel */ |
2dcbdce36 dma: mxs-dma: Use... |
254 255 256 257 258 259 260 261 262 263 |
if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE && mxs_chan->flags & MXS_DMA_SG_LOOP) { /* A cyclic DMA consists of at least 2 segments, so initialize * the semaphore with 2 so we have enough time to add 1 to the * semaphore if we need to */ writel(2, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id)); } else { writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id)); } mxs_chan->reset = false; |
a580b8c54 dmaengine: mxs-dm... |
264 |
} |
5c9d2e37a dmaengine: mxs: S... |
265 |
static void mxs_dma_disable_chan(struct dma_chan *chan) |
a580b8c54 dmaengine: mxs-dm... |
266 |
{ |
5c9d2e37a dmaengine: mxs: S... |
267 |
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
2737583ea dmaengine: mxs-dm... |
268 |
mxs_chan->status = DMA_COMPLETE; |
a580b8c54 dmaengine: mxs-dm... |
269 |
} |
a29c39563 dmaengine: mxs-dm... |
270 |
static int mxs_dma_pause_chan(struct dma_chan *chan) |
a580b8c54 dmaengine: mxs-dm... |
271 |
{ |
5c9d2e37a dmaengine: mxs: S... |
272 |
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
a580b8c54 dmaengine: mxs-dm... |
273 274 275 276 |
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; int chan_id = mxs_chan->chan.chan_id; /* freeze the channel */ |
bb11fb63f dma: mxs-dma: let... |
277 |
if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) |
a580b8c54 dmaengine: mxs-dm... |
278 |
writel(1 << chan_id, |
f5b7efccd dma: mxs-dma: use... |
279 |
mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET); |
a580b8c54 dmaengine: mxs-dm... |
280 281 |
else writel(1 << chan_id, |
f5b7efccd dma: mxs-dma: use... |
282 |
mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET); |
a580b8c54 dmaengine: mxs-dm... |
283 284 |
mxs_chan->status = DMA_PAUSED; |
a29c39563 dmaengine: mxs-dm... |
285 |
return 0; |
a580b8c54 dmaengine: mxs-dm... |
286 |
} |
a29c39563 dmaengine: mxs-dm... |
287 |
static int mxs_dma_resume_chan(struct dma_chan *chan) |
a580b8c54 dmaengine: mxs-dm... |
288 |
{ |
5c9d2e37a dmaengine: mxs: S... |
289 |
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
a580b8c54 dmaengine: mxs-dm... |
290 291 292 293 |
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; int chan_id = mxs_chan->chan.chan_id; /* unfreeze the channel */ |
bb11fb63f dma: mxs-dma: let... |
294 |
if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) |
a580b8c54 dmaengine: mxs-dm... |
295 |
writel(1 << chan_id, |
f5b7efccd dma: mxs-dma: use... |
296 |
mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_CLR); |
a580b8c54 dmaengine: mxs-dm... |
297 298 |
else writel(1 << chan_id, |
f5b7efccd dma: mxs-dma: use... |
299 |
mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_CLR); |
a580b8c54 dmaengine: mxs-dm... |
300 301 |
mxs_chan->status = DMA_IN_PROGRESS; |
a29c39563 dmaengine: mxs-dm... |
302 |
return 0; |
a580b8c54 dmaengine: mxs-dm... |
303 |
} |
a580b8c54 dmaengine: mxs-dm... |
304 305 |
static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx) { |
884485e1f dmaengine: consol... |
306 |
return dma_cookie_assign(tx); |
a580b8c54 dmaengine: mxs-dm... |
307 |
} |
6afe87786 dmaengine: mxs-dm... |
308 |
static void mxs_dma_tasklet(struct tasklet_struct *t) |
a580b8c54 dmaengine: mxs-dm... |
309 |
{ |
6afe87786 dmaengine: mxs-dm... |
310 |
struct mxs_dma_chan *mxs_chan = from_tasklet(mxs_chan, t, tasklet); |
a580b8c54 dmaengine: mxs-dm... |
311 |
|
064370c6a dmaengine: mxs-dm... |
312 |
dmaengine_desc_get_callback_invoke(&mxs_chan->desc, NULL); |
a580b8c54 dmaengine: mxs-dm... |
313 |
} |
b2d639890 dma: mxs-dma: Cle... |
314 315 316 317 318 319 320 321 322 323 |
static int mxs_dma_irq_to_chan(struct mxs_dma_engine *mxs_dma, int irq) { int i; for (i = 0; i != mxs_dma->nr_channels; ++i) if (mxs_dma->mxs_chans[i].chan_irq == irq) return i; return -EINVAL; } |
a580b8c54 dmaengine: mxs-dm... |
324 325 326 |
static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id) { struct mxs_dma_engine *mxs_dma = dev_id; |
b2d639890 dma: mxs-dma: Cle... |
327 328 329 330 331 332 333 |
struct mxs_dma_chan *mxs_chan; u32 completed; u32 err; int chan = mxs_dma_irq_to_chan(mxs_dma, irq); if (chan < 0) return IRQ_NONE; |
a580b8c54 dmaengine: mxs-dm... |
334 335 |
/* completion status */ |
b2d639890 dma: mxs-dma: Cle... |
336 337 338 339 340 341 |
completed = readl(mxs_dma->base + HW_APBHX_CTRL1); completed = (completed >> chan) & 0x1; /* Clear interrupt */ writel((1 << chan), mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR); |
a580b8c54 dmaengine: mxs-dm... |
342 343 |
/* error status */ |
b2d639890 dma: mxs-dma: Cle... |
344 345 346 347 348 349 350 351 352 353 354 355 356 |
err = readl(mxs_dma->base + HW_APBHX_CTRL2); err &= (1 << (MXS_DMA_CHANNELS + chan)) | (1 << chan); /* * error status bit is in the upper 16 bits, error irq bit in the lower * 16 bits. We transform it into a simpler error code: * err: 0x00 = no error, 0x01 = TERMINATION, 0x02 = BUS_ERROR */ err = (err >> (MXS_DMA_CHANNELS + chan)) + (err >> chan); /* Clear error irq */ writel((1 << chan), mxs_dma->base + HW_APBHX_CTRL2 + STMP_OFFSET_REG_CLR); |
a580b8c54 dmaengine: mxs-dm... |
357 358 359 360 |
/* * When both completion and error of termination bits set at the * same time, we do not take it as an error. IOW, it only becomes |
b2d639890 dma: mxs-dma: Cle... |
361 362 363 |
* an error we need to handle here in case of either it's a bus * error or a termination error with no completion. 0x01 is termination * error, so we can subtract err & completed to get the real error case. |
a580b8c54 dmaengine: mxs-dm... |
364 |
*/ |
b2d639890 dma: mxs-dma: Cle... |
365 366 367 368 369 370 371 372 373 374 |
err -= err & completed; mxs_chan = &mxs_dma->mxs_chans[chan]; if (err) { dev_dbg(mxs_dma->dma_device.dev, "%s: error in channel %d ", __func__, chan); mxs_chan->status = DMA_ERROR; |
e0cad7a00 dmaengine: mxs-dm... |
375 |
mxs_dma_reset_chan(&mxs_chan->chan); |
bb3660f13 dma: mxs-dma: Upd... |
376 |
} else if (mxs_chan->status != DMA_COMPLETE) { |
2dcbdce36 dma: mxs-dma: Use... |
377 |
if (mxs_chan->flags & MXS_DMA_SG_LOOP) { |
b2d639890 dma: mxs-dma: Cle... |
378 |
mxs_chan->status = DMA_IN_PROGRESS; |
2dcbdce36 dma: mxs-dma: Use... |
379 380 381 382 |
if (mxs_chan->flags & MXS_DMA_USE_SEMAPHORE) writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan)); } else { |
b2d639890 dma: mxs-dma: Cle... |
383 |
mxs_chan->status = DMA_COMPLETE; |
2dcbdce36 dma: mxs-dma: Use... |
384 |
} |
a580b8c54 dmaengine: mxs-dm... |
385 |
} |
2dcbdce36 dma: mxs-dma: Use... |
386 387 388 |
if (mxs_chan->status == DMA_COMPLETE) { if (mxs_chan->reset) return IRQ_HANDLED; |
b2d639890 dma: mxs-dma: Cle... |
389 |
dma_cookie_complete(&mxs_chan->desc); |
2dcbdce36 dma: mxs-dma: Use... |
390 |
} |
b2d639890 dma: mxs-dma: Cle... |
391 392 393 |
/* schedule tasklet on this channel */ tasklet_schedule(&mxs_chan->tasklet); |
a580b8c54 dmaengine: mxs-dm... |
394 395 396 397 398 399 |
return IRQ_HANDLED; } static int mxs_dma_alloc_chan_resources(struct dma_chan *chan) { struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
a580b8c54 dmaengine: mxs-dm... |
400 |
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
819221d03 LF-251-1: dma: mx... |
401 |
struct device *dev = &mxs_dma->pdev->dev; |
a580b8c54 dmaengine: mxs-dm... |
402 |
int ret; |
864537f2e LF-251-2: dma: mx... |
403 404 405 |
mxs_chan->ccw = dma_pool_zalloc(mxs_chan->ccw_pool, GFP_ATOMIC, &mxs_chan->ccw_phys); |
a580b8c54 dmaengine: mxs-dm... |
406 407 408 409 |
if (!mxs_chan->ccw) { ret = -ENOMEM; goto err_alloc; } |
028e84a1d dmaengine: mxs: r... |
410 411 412 413 |
ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler, 0, "mxs-dma", mxs_dma); if (ret) goto err_irq; |
a580b8c54 dmaengine: mxs-dm... |
414 |
|
819221d03 LF-251-1: dma: mx... |
415 416 417 418 |
ret = pm_runtime_get_sync(dev); if (ret < 0) { dev_err(dev, "Failed to enable clock "); |
a580b8c54 dmaengine: mxs-dm... |
419 |
goto err_clk; |
819221d03 LF-251-1: dma: mx... |
420 |
} |
a580b8c54 dmaengine: mxs-dm... |
421 |
|
5c9d2e37a dmaengine: mxs: S... |
422 |
mxs_dma_reset_chan(chan); |
a580b8c54 dmaengine: mxs-dm... |
423 424 425 426 427 428 429 430 431 432 433 434 |
dma_async_tx_descriptor_init(&mxs_chan->desc, chan); mxs_chan->desc.tx_submit = mxs_dma_tx_submit; /* the descriptor is ready */ async_tx_ack(&mxs_chan->desc); return 0; err_clk: free_irq(mxs_chan->chan_irq, mxs_dma); err_irq: |
864537f2e LF-251-2: dma: mx... |
435 436 |
dma_pool_free(mxs_chan->ccw_pool, mxs_chan->ccw, mxs_chan->ccw_phys); |
a580b8c54 dmaengine: mxs-dm... |
437 438 439 440 441 442 443 444 |
err_alloc: return ret; } static void mxs_dma_free_chan_resources(struct dma_chan *chan) { struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
819221d03 LF-251-1: dma: mx... |
445 |
struct device *dev = &mxs_dma->pdev->dev; |
a580b8c54 dmaengine: mxs-dm... |
446 |
|
5c9d2e37a dmaengine: mxs: S... |
447 |
mxs_dma_disable_chan(chan); |
a580b8c54 dmaengine: mxs-dm... |
448 449 |
free_irq(mxs_chan->chan_irq, mxs_dma); |
864537f2e LF-251-2: dma: mx... |
450 451 |
dma_pool_free(mxs_chan->ccw_pool, mxs_chan->ccw, mxs_chan->ccw_phys); |
a580b8c54 dmaengine: mxs-dm... |
452 |
|
819221d03 LF-251-1: dma: mx... |
453 454 |
pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); |
a580b8c54 dmaengine: mxs-dm... |
455 |
} |
921de864b mxs-dma : rewrite... |
456 457 458 459 460 461 462 463 464 465 |
/* * How to use the flags for ->device_prep_slave_sg() : * [1] If there is only one DMA command in the DMA chain, the code should be: * ...... * ->device_prep_slave_sg(DMA_CTRL_ACK); * ...... * [2] If there are two DMA commands in the DMA chain, the code should be * ...... * ->device_prep_slave_sg(0); * ...... |
d443cb25c dmaengine: mxs: D... |
466 |
* ->device_prep_slave_sg(DMA_CTRL_ACK); |
921de864b mxs-dma : rewrite... |
467 468 469 470 471 472 |
* ...... * [3] If there are more than two DMA commands in the DMA chain, the code * should be: * ...... * ->device_prep_slave_sg(0); // First * ...... |
d443cb25c dmaengine: mxs: D... |
473 |
* ->device_prep_slave_sg(DMA_CTRL_ACK]); |
921de864b mxs-dma : rewrite... |
474 |
* ...... |
d443cb25c dmaengine: mxs: D... |
475 |
* ->device_prep_slave_sg(DMA_CTRL_ACK); // Last |
921de864b mxs-dma : rewrite... |
476 477 |
* ...... */ |
a580b8c54 dmaengine: mxs-dm... |
478 479 |
static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( struct dma_chan *chan, struct scatterlist *sgl, |
db8196df4 dmaengine: move d... |
480 |
unsigned int sg_len, enum dma_transfer_direction direction, |
623ff7739 Merge tag 'for-li... |
481 |
unsigned long flags, void *context) |
a580b8c54 dmaengine: mxs-dm... |
482 483 484 485 486 |
{ struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; struct mxs_dma_ccw *ccw; struct scatterlist *sg; |
f2ad69925 dma: mxs-dma: Fix... |
487 |
u32 i, j; |
a580b8c54 dmaengine: mxs-dm... |
488 |
u32 *pio; |
d443cb25c dmaengine: mxs: D... |
489 |
int idx = 0; |
a580b8c54 dmaengine: mxs-dm... |
490 |
|
d443cb25c dmaengine: mxs: D... |
491 492 |
if (mxs_chan->status == DMA_IN_PROGRESS) idx = mxs_chan->desc_count; |
a580b8c54 dmaengine: mxs-dm... |
493 |
|
d443cb25c dmaengine: mxs: D... |
494 |
if (sg_len + idx > NUM_CCW) { |
a580b8c54 dmaengine: mxs-dm... |
495 496 497 498 499 500 501 502 503 504 505 506 507 508 |
dev_err(mxs_dma->dma_device.dev, "maximum number of sg exceeded: %d > %d ", sg_len, NUM_CCW); goto err_out; } mxs_chan->status = DMA_IN_PROGRESS; mxs_chan->flags = 0; /* * If the sg is prepared with append flag set, the sg * will be appended to the last prepared sg. */ |
d443cb25c dmaengine: mxs: D... |
509 |
if (idx) { |
a580b8c54 dmaengine: mxs-dm... |
510 511 512 513 514 515 |
BUG_ON(idx < 1); ccw = &mxs_chan->ccw[idx - 1]; ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx; ccw->bits |= CCW_CHAIN; ccw->bits &= ~CCW_IRQ; ccw->bits &= ~CCW_DEC_SEM; |
a580b8c54 dmaengine: mxs-dm... |
516 517 518 |
} else { idx = 0; } |
62268ce91 dmaengine: add DM... |
519 |
if (direction == DMA_TRANS_NONE) { |
a580b8c54 dmaengine: mxs-dm... |
520 521 522 523 524 525 526 527 528 |
ccw = &mxs_chan->ccw[idx++]; pio = (u32 *) sgl; for (j = 0; j < sg_len;) ccw->pio_words[j++] = *pio++; ccw->bits = 0; ccw->bits |= CCW_IRQ; ccw->bits |= CCW_DEC_SEM; |
ceeeb99cd dmaengine: mxs: r... |
529 |
if (flags & MXS_DMA_CTRL_WAIT4END) |
921de864b mxs-dma : rewrite... |
530 |
ccw->bits |= CCW_WAIT4END; |
a580b8c54 dmaengine: mxs-dm... |
531 532 533 534 |
ccw->bits |= CCW_HALT_ON_TERM; ccw->bits |= CCW_TERM_FLUSH; ccw->bits |= BF_CCW(sg_len, PIO_NUM); ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND); |
ef347c0cf mtd: rawnand: gpm... |
535 536 |
if (flags & MXS_DMA_CTRL_WAIT4RDY) ccw->bits |= CCW_WAIT4RDY; |
a580b8c54 dmaengine: mxs-dm... |
537 538 |
} else { for_each_sg(sgl, sg, sg_len, i) { |
fdaf9c4b2 dmaengine: Use dm... |
539 |
if (sg_dma_len(sg) > MAX_XFER_BYTES) { |
a580b8c54 dmaengine: mxs-dm... |
540 541 |
dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d ", |
fdaf9c4b2 dmaengine: Use dm... |
542 |
sg_dma_len(sg), MAX_XFER_BYTES); |
a580b8c54 dmaengine: mxs-dm... |
543 544 545 546 547 548 549 |
goto err_out; } ccw = &mxs_chan->ccw[idx++]; ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx; ccw->bufaddr = sg->dma_address; |
fdaf9c4b2 dmaengine: Use dm... |
550 |
ccw->xfer_bytes = sg_dma_len(sg); |
a580b8c54 dmaengine: mxs-dm... |
551 552 553 554 555 |
ccw->bits = 0; ccw->bits |= CCW_CHAIN; ccw->bits |= CCW_HALT_ON_TERM; ccw->bits |= CCW_TERM_FLUSH; |
db8196df4 dmaengine: move d... |
556 |
ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ? |
a580b8c54 dmaengine: mxs-dm... |
557 558 559 560 561 562 563 |
MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND); if (i + 1 == sg_len) { ccw->bits &= ~CCW_CHAIN; ccw->bits |= CCW_IRQ; ccw->bits |= CCW_DEC_SEM; |
ceeeb99cd dmaengine: mxs: r... |
564 |
if (flags & MXS_DMA_CTRL_WAIT4END) |
921de864b mxs-dma : rewrite... |
565 |
ccw->bits |= CCW_WAIT4END; |
a580b8c54 dmaengine: mxs-dm... |
566 567 568 |
} } } |
6d23ea4b1 dma: mxs-dma: mak... |
569 |
mxs_chan->desc_count = idx; |
a580b8c54 dmaengine: mxs-dm... |
570 571 572 573 574 575 576 577 578 579 |
return &mxs_chan->desc; err_out: mxs_chan->status = DMA_ERROR; return NULL; } static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, |
185ecb5f4 dmaengine: add co... |
580 |
size_t period_len, enum dma_transfer_direction direction, |
31c1e5a13 dmaengine: Remove... |
581 |
unsigned long flags) |
a580b8c54 dmaengine: mxs-dm... |
582 583 584 |
{ struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
f2ad69925 dma: mxs-dma: Fix... |
585 586 |
u32 num_periods = buf_len / period_len; u32 i = 0, buf = 0; |
a580b8c54 dmaengine: mxs-dm... |
587 588 589 590 591 592 |
if (mxs_chan->status == DMA_IN_PROGRESS) return NULL; mxs_chan->status = DMA_IN_PROGRESS; mxs_chan->flags |= MXS_DMA_SG_LOOP; |
2dcbdce36 dma: mxs-dma: Use... |
593 |
mxs_chan->flags |= MXS_DMA_USE_SEMAPHORE; |
a580b8c54 dmaengine: mxs-dm... |
594 595 596 597 598 599 600 601 602 603 604 |
if (num_periods > NUM_CCW) { dev_err(mxs_dma->dma_device.dev, "maximum number of sg exceeded: %d > %d ", num_periods, NUM_CCW); goto err_out; } if (period_len > MAX_XFER_BYTES) { dev_err(mxs_dma->dma_device.dev, |
4aff2f935 dmaengine: mxs: U... |
605 606 |
"maximum period size exceeded: %zu > %d ", |
a580b8c54 dmaengine: mxs-dm... |
607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 |
period_len, MAX_XFER_BYTES); goto err_out; } while (buf < buf_len) { struct mxs_dma_ccw *ccw = &mxs_chan->ccw[i]; if (i + 1 == num_periods) ccw->next = mxs_chan->ccw_phys; else ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * (i + 1); ccw->bufaddr = dma_addr; ccw->xfer_bytes = period_len; ccw->bits = 0; ccw->bits |= CCW_CHAIN; ccw->bits |= CCW_IRQ; ccw->bits |= CCW_HALT_ON_TERM; ccw->bits |= CCW_TERM_FLUSH; |
2dcbdce36 dma: mxs-dma: Use... |
627 |
ccw->bits |= CCW_DEC_SEM; |
db8196df4 dmaengine: move d... |
628 |
ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ? |
a580b8c54 dmaengine: mxs-dm... |
629 630 631 632 633 634 635 |
MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND); dma_addr += period_len; buf += period_len; i++; } |
6d23ea4b1 dma: mxs-dma: mak... |
636 |
mxs_chan->desc_count = i; |
a580b8c54 dmaengine: mxs-dm... |
637 638 639 640 641 642 643 |
return &mxs_chan->desc; err_out: mxs_chan->status = DMA_ERROR; return NULL; } |
5c9d2e37a dmaengine: mxs: S... |
644 |
static int mxs_dma_terminate_all(struct dma_chan *chan) |
a580b8c54 dmaengine: mxs-dm... |
645 |
{ |
5c9d2e37a dmaengine: mxs: S... |
646 647 648 649 |
mxs_dma_reset_chan(chan); mxs_dma_disable_chan(chan); return 0; |
a580b8c54 dmaengine: mxs-dm... |
650 651 652 653 654 655 |
} static enum dma_status mxs_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
7b11304a3 dma: mxs-dma: Rep... |
656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 |
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; u32 residue = 0; if (mxs_chan->status == DMA_IN_PROGRESS && mxs_chan->flags & MXS_DMA_SG_LOOP) { struct mxs_dma_ccw *last_ccw; u32 bar; last_ccw = &mxs_chan->ccw[mxs_chan->desc_count - 1]; residue = last_ccw->xfer_bytes + last_ccw->bufaddr; bar = readl(mxs_dma->base + HW_APBHX_CHn_BAR(mxs_dma, chan->chan_id)); residue -= bar; } |
a580b8c54 dmaengine: mxs-dm... |
671 |
|
7b11304a3 dma: mxs-dma: Rep... |
672 673 |
dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, residue); |
a580b8c54 dmaengine: mxs-dm... |
674 675 676 |
return mxs_chan->status; } |
819221d03 LF-251-1: dma: mx... |
677 678 679 680 681 682 683 684 685 686 687 688 |
static int mxs_dma_init_rpm(struct mxs_dma_engine *mxs_dma) { struct device *dev = &mxs_dma->pdev->dev; pm_runtime_enable(dev); pm_runtime_set_autosuspend_delay(dev, MXS_DMA_RPM_TIMEOUT); pm_runtime_use_autosuspend(dev); return 0; } static int mxs_dma_init(struct mxs_dma_engine *mxs_dma) |
a580b8c54 dmaengine: mxs-dm... |
689 |
{ |
819221d03 LF-251-1: dma: mx... |
690 |
struct device *dev = &mxs_dma->pdev->dev; |
a580b8c54 dmaengine: mxs-dm... |
691 |
int ret; |
819221d03 LF-251-1: dma: mx... |
692 |
ret = mxs_dma_init_rpm(mxs_dma); |
a580b8c54 dmaengine: mxs-dm... |
693 |
if (ret) |
feb397de6 dma: mxs-dma: Alw... |
694 |
return ret; |
a580b8c54 dmaengine: mxs-dm... |
695 |
|
819221d03 LF-251-1: dma: mx... |
696 697 698 699 700 701 |
ret = pm_runtime_get_sync(dev); if (ret < 0) { dev_err(dev, "Failed to enable clock "); return ret; } |
f5b7efccd dma: mxs-dma: use... |
702 |
ret = stmp_reset_block(mxs_dma->base); |
a580b8c54 dmaengine: mxs-dm... |
703 704 |
if (ret) goto err_out; |
a580b8c54 dmaengine: mxs-dm... |
705 |
/* enable apbh burst */ |
bb11fb63f dma: mxs-dma: let... |
706 |
if (dma_is_apbh(mxs_dma)) { |
a580b8c54 dmaengine: mxs-dm... |
707 |
writel(BM_APBH_CTRL0_APB_BURST_EN, |
f5b7efccd dma: mxs-dma: use... |
708 |
mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET); |
a580b8c54 dmaengine: mxs-dm... |
709 |
writel(BM_APBH_CTRL0_APB_BURST8_EN, |
f5b7efccd dma: mxs-dma: use... |
710 |
mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET); |
a580b8c54 dmaengine: mxs-dm... |
711 712 713 714 |
} /* enable irq for all the channels */ writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS, |
f5b7efccd dma: mxs-dma: use... |
715 |
mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_SET); |
a580b8c54 dmaengine: mxs-dm... |
716 |
|
a580b8c54 dmaengine: mxs-dm... |
717 |
err_out: |
819221d03 LF-251-1: dma: mx... |
718 719 |
pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); |
a580b8c54 dmaengine: mxs-dm... |
720 721 |
return ret; } |
d84f638b0 dma: mxs-dma: mov... |
722 |
struct mxs_dma_filter_param { |
d84f638b0 dma: mxs-dma: mov... |
723 724 725 726 727 728 729 730 731 |
unsigned int chan_id; }; static bool mxs_dma_filter_fn(struct dma_chan *chan, void *fn_param) { struct mxs_dma_filter_param *param = fn_param; struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; int chan_irq; |
82a051d6f MLK-19897: dma: m... |
732 733 734 735 736 |
if (strcmp(chan->device->dev->driver->name, "mxs-dma")) return false; if (!mxs_dma) return false; |
d84f638b0 dma: mxs-dma: mov... |
737 738 739 740 741 742 743 744 745 746 747 |
if (chan->chan_id != param->chan_id) return false; chan_irq = platform_get_irq(mxs_dma->pdev, param->chan_id); if (chan_irq < 0) return false; mxs_chan->chan_irq = chan_irq; return true; } |
3208b3701 dma: mxs-dma: Sta... |
748 |
static struct dma_chan *mxs_dma_xlate(struct of_phandle_args *dma_spec, |
d84f638b0 dma: mxs-dma: mov... |
749 750 751 752 753 754 755 756 |
struct of_dma *ofdma) { struct mxs_dma_engine *mxs_dma = ofdma->of_dma_data; dma_cap_mask_t mask = mxs_dma->dma_device.cap_mask; struct mxs_dma_filter_param param; if (dma_spec->args_count != 1) return NULL; |
d84f638b0 dma: mxs-dma: mov... |
757 758 759 760 |
param.chan_id = dma_spec->args[0]; if (param.chan_id >= mxs_dma->nr_channels) return NULL; |
caf5e3e6e dmaengine: mxs-dm... |
761 762 |
return __dma_request_channel(&mask, mxs_dma_filter_fn, ¶m, ofdma->of_node); |
d84f638b0 dma: mxs-dma: mov... |
763 |
} |
20ca14856 dma: mxs-dma: cha... |
764 |
static int mxs_dma_probe(struct platform_device *pdev) |
a580b8c54 dmaengine: mxs-dm... |
765 |
{ |
d84f638b0 dma: mxs-dma: mov... |
766 |
struct device_node *np = pdev->dev.of_node; |
90c9abc5b dma: mxs-dma: add... |
767 768 769 |
const struct platform_device_id *id_entry; const struct of_device_id *of_id; const struct mxs_dma_type *dma_type; |
a580b8c54 dmaengine: mxs-dm... |
770 771 |
struct mxs_dma_engine *mxs_dma; struct resource *iores; |
864537f2e LF-251-2: dma: mx... |
772 |
struct dma_pool *ccw_pool; |
a580b8c54 dmaengine: mxs-dm... |
773 |
int ret, i; |
aaa20517c dma: mxs-dma: use... |
774 |
mxs_dma = devm_kzalloc(&pdev->dev, sizeof(*mxs_dma), GFP_KERNEL); |
a580b8c54 dmaengine: mxs-dm... |
775 776 |
if (!mxs_dma) return -ENOMEM; |
d84f638b0 dma: mxs-dma: mov... |
777 778 779 780 781 782 |
ret = of_property_read_u32(np, "dma-channels", &mxs_dma->nr_channels); if (ret) { dev_err(&pdev->dev, "failed to read dma-channels "); return ret; } |
90c9abc5b dma: mxs-dma: add... |
783 784 785 786 787 788 789 |
of_id = of_match_device(mxs_dma_dt_ids, &pdev->dev); if (of_id) id_entry = of_id->data; else id_entry = platform_get_device_id(pdev); dma_type = (struct mxs_dma_type *)id_entry->driver_data; |
8c9201364 dma: mxs-dma: mak... |
790 |
mxs_dma->type = dma_type->type; |
90c9abc5b dma: mxs-dma: add... |
791 |
mxs_dma->dev_id = dma_type->id; |
a580b8c54 dmaengine: mxs-dm... |
792 793 |
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
aaa20517c dma: mxs-dma: use... |
794 795 796 |
mxs_dma->base = devm_ioremap_resource(&pdev->dev, iores); if (IS_ERR(mxs_dma->base)) return PTR_ERR(mxs_dma->base); |
a580b8c54 dmaengine: mxs-dm... |
797 |
|
aaa20517c dma: mxs-dma: use... |
798 799 800 |
mxs_dma->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(mxs_dma->clk)) return PTR_ERR(mxs_dma->clk); |
a580b8c54 dmaengine: mxs-dm... |
801 802 803 804 805 806 807 808 809 810 811 812 |
dma_cap_set(DMA_SLAVE, mxs_dma->dma_device.cap_mask); dma_cap_set(DMA_CYCLIC, mxs_dma->dma_device.cap_mask); INIT_LIST_HEAD(&mxs_dma->dma_device.channels); /* Initialize channel parameters */ for (i = 0; i < MXS_DMA_CHANNELS; i++) { struct mxs_dma_chan *mxs_chan = &mxs_dma->mxs_chans[i]; mxs_chan->mxs_dma = mxs_dma; mxs_chan->chan.device = &mxs_dma->dma_device; |
8ac695463 dmaengine: ensure... |
813 |
dma_cookie_init(&mxs_chan->chan); |
a580b8c54 dmaengine: mxs-dm... |
814 |
|
6afe87786 dmaengine: mxs-dm... |
815 |
tasklet_setup(&mxs_chan->tasklet, mxs_dma_tasklet); |
a580b8c54 dmaengine: mxs-dm... |
816 |
|
a580b8c54 dmaengine: mxs-dm... |
817 818 819 820 |
/* Add the channel to mxs_chan list */ list_add_tail(&mxs_chan->chan.device_node, &mxs_dma->dma_device.channels); } |
819221d03 LF-251-1: dma: mx... |
821 822 |
platform_set_drvdata(pdev, mxs_dma); mxs_dma->pdev = pdev; |
a580b8c54 dmaengine: mxs-dm... |
823 824 |
ret = mxs_dma_init(mxs_dma); if (ret) |
aaa20517c dma: mxs-dma: use... |
825 |
return ret; |
a580b8c54 dmaengine: mxs-dm... |
826 827 |
mxs_dma->dma_device.dev = &pdev->dev; |
864537f2e LF-251-2: dma: mx... |
828 829 830 831 832 833 834 835 836 837 |
/* create the dma pool */ ccw_pool = dma_pool_create("ccw_pool", mxs_dma->dma_device.dev, CCW_BLOCK_SIZE, 32, 0); for (i = 0; i < MXS_DMA_CHANNELS; i++) { struct mxs_dma_chan *mxs_chan = &mxs_dma->mxs_chans[i]; mxs_chan->ccw_pool = ccw_pool; } |
a580b8c54 dmaengine: mxs-dm... |
838 |
/* mxs_dma gets 65535 bytes maximum sg size */ |
a580b8c54 dmaengine: mxs-dm... |
839 840 841 842 843 844 845 |
dma_set_max_seg_size(mxs_dma->dma_device.dev, MAX_XFER_BYTES); mxs_dma->dma_device.device_alloc_chan_resources = mxs_dma_alloc_chan_resources; mxs_dma->dma_device.device_free_chan_resources = mxs_dma_free_chan_resources; mxs_dma->dma_device.device_tx_status = mxs_dma_tx_status; mxs_dma->dma_device.device_prep_slave_sg = mxs_dma_prep_slave_sg; mxs_dma->dma_device.device_prep_dma_cyclic = mxs_dma_prep_dma_cyclic; |
5c9d2e37a dmaengine: mxs: S... |
846 847 848 |
mxs_dma->dma_device.device_pause = mxs_dma_pause_chan; mxs_dma->dma_device.device_resume = mxs_dma_resume_chan; mxs_dma->dma_device.device_terminate_all = mxs_dma_terminate_all; |
ef9d2a923 dmaengine: mxs-dm... |
849 850 851 852 |
mxs_dma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); mxs_dma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); mxs_dma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); mxs_dma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; |
5c9d2e37a dmaengine: mxs: S... |
853 |
mxs_dma->dma_device.device_issue_pending = mxs_dma_enable_chan; |
a580b8c54 dmaengine: mxs-dm... |
854 |
|
fbb69ece0 dmaengine: mxs-dm... |
855 |
ret = dmaenginem_async_device_register(&mxs_dma->dma_device); |
a580b8c54 dmaengine: mxs-dm... |
856 857 858 |
if (ret) { dev_err(mxs_dma->dma_device.dev, "unable to register "); |
aaa20517c dma: mxs-dma: use... |
859 |
return ret; |
a580b8c54 dmaengine: mxs-dm... |
860 |
} |
d84f638b0 dma: mxs-dma: mov... |
861 862 863 864 865 |
ret = of_dma_controller_register(np, mxs_dma_xlate, mxs_dma); if (ret) { dev_err(mxs_dma->dma_device.dev, "failed to register controller "); |
d84f638b0 dma: mxs-dma: mov... |
866 |
} |
a580b8c54 dmaengine: mxs-dm... |
867 868 869 870 |
dev_info(mxs_dma->dma_device.dev, "initialized "); return 0; |
a580b8c54 dmaengine: mxs-dm... |
871 |
} |
819221d03 LF-251-1: dma: mx... |
872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 |
static int mxs_dma_remove(struct platform_device *pdev) { struct mxs_dma_engine *mxs_dma = platform_get_drvdata(pdev); int i; dma_async_device_unregister(&mxs_dma->dma_device); dma_pool_destroy(mxs_dma->mxs_chans[0].ccw_pool); for (i = 0; i < MXS_DMA_CHANNELS; i++) { struct mxs_dma_chan *mxs_chan = &mxs_dma->mxs_chans[i]; tasklet_kill(&mxs_chan->tasklet); mxs_chan->ccw_pool = NULL; } return 0; } #ifdef CONFIG_PM_SLEEP static int mxs_dma_pm_suspend(struct device *dev) { int ret; ret = pm_runtime_force_suspend(dev); return ret; } static int mxs_dma_pm_resume(struct device *dev) { struct mxs_dma_engine *mxs_dma = dev_get_drvdata(dev); int ret; ret = mxs_dma_init(mxs_dma); if (ret) return ret; return 0; } #endif int mxs_dma_runtime_suspend(struct device *dev) { struct mxs_dma_engine *mxs_dma = dev_get_drvdata(dev); clk_disable_unprepare(mxs_dma->clk); return 0; } int mxs_dma_runtime_resume(struct device *dev) { struct mxs_dma_engine *mxs_dma = dev_get_drvdata(dev); int ret; ret = clk_prepare_enable(mxs_dma->clk); if (ret) { dev_err(&mxs_dma->pdev->dev, "failed to enable the clock "); return ret; } return 0; } static const struct dev_pm_ops mxs_dma_pm_ops = { SET_RUNTIME_PM_OPS(mxs_dma_runtime_suspend, mxs_dma_runtime_resume, NULL) SET_SYSTEM_SLEEP_PM_OPS(mxs_dma_pm_suspend, mxs_dma_pm_resume) }; |
a580b8c54 dmaengine: mxs-dm... |
941 942 943 |
static struct platform_driver mxs_dma_driver = { .driver = { .name = "mxs-dma", |
819221d03 LF-251-1: dma: mx... |
944 |
.pm = &mxs_dma_pm_ops, |
90c9abc5b dma: mxs-dma: add... |
945 |
.of_match_table = mxs_dma_dt_ids, |
a580b8c54 dmaengine: mxs-dm... |
946 |
}, |
8c9201364 dma: mxs-dma: mak... |
947 |
.id_table = mxs_dma_ids, |
819221d03 LF-251-1: dma: mx... |
948 |
.remove = mxs_dma_remove, |
20ca14856 dma: mxs-dma: cha... |
949 |
.probe = mxs_dma_probe, |
a580b8c54 dmaengine: mxs-dm... |
950 |
}; |
20ca14856 dma: mxs-dma: cha... |
951 |
module_platform_driver(mxs_dma_driver); |