Commit 41c556a8cb22dcb4751fed3fb3d2ccc37f945646
Committed by
Vinod Koul
1 parent
ce3a1ab742
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
dma: mxs-dma: Export missing symbols from mxs-dma.c
mxs-dma.c provides two functions mxs_dma_is_apbh and mxs_dma_is_apbx which are used at least in mxs-mmc.c. Building mxs-mmc as module fails due to those two symbols not being exported. Signed-off-by: Attila Kinali <attila@kinali.ch> Acked-by: Shawn Guo <shawn.guo@linaro.org> Signed-off-by: Vinod Koul <vinod.koul@linux.intel.com>
Showing 1 changed file with 2 additions and 0 deletions Inline Diff
drivers/dma/mxs-dma.c
1 | /* | 1 | /* |
2 | * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved. | 2 | * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved. |
3 | * | 3 | * |
4 | * Refer to drivers/dma/imx-sdma.c | 4 | * Refer to drivers/dma/imx-sdma.c |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/types.h> | 12 | #include <linux/types.h> |
13 | #include <linux/mm.h> | 13 | #include <linux/mm.h> |
14 | #include <linux/interrupt.h> | 14 | #include <linux/interrupt.h> |
15 | #include <linux/clk.h> | 15 | #include <linux/clk.h> |
16 | #include <linux/wait.h> | 16 | #include <linux/wait.h> |
17 | #include <linux/sched.h> | 17 | #include <linux/sched.h> |
18 | #include <linux/semaphore.h> | 18 | #include <linux/semaphore.h> |
19 | #include <linux/device.h> | 19 | #include <linux/device.h> |
20 | #include <linux/dma-mapping.h> | 20 | #include <linux/dma-mapping.h> |
21 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
22 | #include <linux/platform_device.h> | 22 | #include <linux/platform_device.h> |
23 | #include <linux/dmaengine.h> | 23 | #include <linux/dmaengine.h> |
24 | #include <linux/delay.h> | 24 | #include <linux/delay.h> |
25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
26 | #include <linux/fsl/mxs-dma.h> | 26 | #include <linux/fsl/mxs-dma.h> |
27 | #include <linux/stmp_device.h> | 27 | #include <linux/stmp_device.h> |
28 | #include <linux/of.h> | 28 | #include <linux/of.h> |
29 | #include <linux/of_device.h> | 29 | #include <linux/of_device.h> |
30 | 30 | ||
31 | #include <asm/irq.h> | 31 | #include <asm/irq.h> |
32 | 32 | ||
33 | #include "dmaengine.h" | 33 | #include "dmaengine.h" |
34 | 34 | ||
35 | /* | 35 | /* |
36 | * NOTE: The term "PIO" throughout the mxs-dma implementation means | 36 | * NOTE: The term "PIO" throughout the mxs-dma implementation means |
37 | * PIO mode of mxs apbh-dma and apbx-dma. With this working mode, | 37 | * PIO mode of mxs apbh-dma and apbx-dma. With this working mode, |
38 | * dma can program the controller registers of peripheral devices. | 38 | * dma can program the controller registers of peripheral devices. |
39 | */ | 39 | */ |
40 | 40 | ||
41 | #define dma_is_apbh(mxs_dma) ((mxs_dma)->type == MXS_DMA_APBH) | 41 | #define dma_is_apbh(mxs_dma) ((mxs_dma)->type == MXS_DMA_APBH) |
42 | #define apbh_is_old(mxs_dma) ((mxs_dma)->dev_id == IMX23_DMA) | 42 | #define apbh_is_old(mxs_dma) ((mxs_dma)->dev_id == IMX23_DMA) |
43 | 43 | ||
44 | #define HW_APBHX_CTRL0 0x000 | 44 | #define HW_APBHX_CTRL0 0x000 |
45 | #define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29) | 45 | #define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29) |
46 | #define BM_APBH_CTRL0_APB_BURST_EN (1 << 28) | 46 | #define BM_APBH_CTRL0_APB_BURST_EN (1 << 28) |
47 | #define BP_APBH_CTRL0_RESET_CHANNEL 16 | 47 | #define BP_APBH_CTRL0_RESET_CHANNEL 16 |
48 | #define HW_APBHX_CTRL1 0x010 | 48 | #define HW_APBHX_CTRL1 0x010 |
49 | #define HW_APBHX_CTRL2 0x020 | 49 | #define HW_APBHX_CTRL2 0x020 |
50 | #define HW_APBHX_CHANNEL_CTRL 0x030 | 50 | #define HW_APBHX_CHANNEL_CTRL 0x030 |
51 | #define BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL 16 | 51 | #define BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL 16 |
52 | /* | 52 | /* |
53 | * The offset of NXTCMDAR register is different per both dma type and version, | 53 | * The offset of NXTCMDAR register is different per both dma type and version, |
54 | * while stride for each channel is all the same 0x70. | 54 | * while stride for each channel is all the same 0x70. |
55 | */ | 55 | */ |
56 | #define HW_APBHX_CHn_NXTCMDAR(d, n) \ | 56 | #define HW_APBHX_CHn_NXTCMDAR(d, n) \ |
57 | (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x050 : 0x110) + (n) * 0x70) | 57 | (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x050 : 0x110) + (n) * 0x70) |
58 | #define HW_APBHX_CHn_SEMA(d, n) \ | 58 | #define HW_APBHX_CHn_SEMA(d, n) \ |
59 | (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x080 : 0x140) + (n) * 0x70) | 59 | (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x080 : 0x140) + (n) * 0x70) |
60 | 60 | ||
61 | /* | 61 | /* |
62 | * ccw bits definitions | 62 | * ccw bits definitions |
63 | * | 63 | * |
64 | * COMMAND: 0..1 (2) | 64 | * COMMAND: 0..1 (2) |
65 | * CHAIN: 2 (1) | 65 | * CHAIN: 2 (1) |
66 | * IRQ: 3 (1) | 66 | * IRQ: 3 (1) |
67 | * NAND_LOCK: 4 (1) - not implemented | 67 | * NAND_LOCK: 4 (1) - not implemented |
68 | * NAND_WAIT4READY: 5 (1) - not implemented | 68 | * NAND_WAIT4READY: 5 (1) - not implemented |
69 | * DEC_SEM: 6 (1) | 69 | * DEC_SEM: 6 (1) |
70 | * WAIT4END: 7 (1) | 70 | * WAIT4END: 7 (1) |
71 | * HALT_ON_TERMINATE: 8 (1) | 71 | * HALT_ON_TERMINATE: 8 (1) |
72 | * TERMINATE_FLUSH: 9 (1) | 72 | * TERMINATE_FLUSH: 9 (1) |
73 | * RESERVED: 10..11 (2) | 73 | * RESERVED: 10..11 (2) |
74 | * PIO_NUM: 12..15 (4) | 74 | * PIO_NUM: 12..15 (4) |
75 | */ | 75 | */ |
76 | #define BP_CCW_COMMAND 0 | 76 | #define BP_CCW_COMMAND 0 |
77 | #define BM_CCW_COMMAND (3 << 0) | 77 | #define BM_CCW_COMMAND (3 << 0) |
78 | #define CCW_CHAIN (1 << 2) | 78 | #define CCW_CHAIN (1 << 2) |
79 | #define CCW_IRQ (1 << 3) | 79 | #define CCW_IRQ (1 << 3) |
80 | #define CCW_DEC_SEM (1 << 6) | 80 | #define CCW_DEC_SEM (1 << 6) |
81 | #define CCW_WAIT4END (1 << 7) | 81 | #define CCW_WAIT4END (1 << 7) |
82 | #define CCW_HALT_ON_TERM (1 << 8) | 82 | #define CCW_HALT_ON_TERM (1 << 8) |
83 | #define CCW_TERM_FLUSH (1 << 9) | 83 | #define CCW_TERM_FLUSH (1 << 9) |
84 | #define BP_CCW_PIO_NUM 12 | 84 | #define BP_CCW_PIO_NUM 12 |
85 | #define BM_CCW_PIO_NUM (0xf << 12) | 85 | #define BM_CCW_PIO_NUM (0xf << 12) |
86 | 86 | ||
87 | #define BF_CCW(value, field) (((value) << BP_CCW_##field) & BM_CCW_##field) | 87 | #define BF_CCW(value, field) (((value) << BP_CCW_##field) & BM_CCW_##field) |
88 | 88 | ||
89 | #define MXS_DMA_CMD_NO_XFER 0 | 89 | #define MXS_DMA_CMD_NO_XFER 0 |
90 | #define MXS_DMA_CMD_WRITE 1 | 90 | #define MXS_DMA_CMD_WRITE 1 |
91 | #define MXS_DMA_CMD_READ 2 | 91 | #define MXS_DMA_CMD_READ 2 |
92 | #define MXS_DMA_CMD_DMA_SENSE 3 /* not implemented */ | 92 | #define MXS_DMA_CMD_DMA_SENSE 3 /* not implemented */ |
93 | 93 | ||
94 | struct mxs_dma_ccw { | 94 | struct mxs_dma_ccw { |
95 | u32 next; | 95 | u32 next; |
96 | u16 bits; | 96 | u16 bits; |
97 | u16 xfer_bytes; | 97 | u16 xfer_bytes; |
98 | #define MAX_XFER_BYTES 0xff00 | 98 | #define MAX_XFER_BYTES 0xff00 |
99 | u32 bufaddr; | 99 | u32 bufaddr; |
100 | #define MXS_PIO_WORDS 16 | 100 | #define MXS_PIO_WORDS 16 |
101 | u32 pio_words[MXS_PIO_WORDS]; | 101 | u32 pio_words[MXS_PIO_WORDS]; |
102 | }; | 102 | }; |
103 | 103 | ||
104 | #define NUM_CCW (int)(PAGE_SIZE / sizeof(struct mxs_dma_ccw)) | 104 | #define NUM_CCW (int)(PAGE_SIZE / sizeof(struct mxs_dma_ccw)) |
105 | 105 | ||
106 | struct mxs_dma_chan { | 106 | struct mxs_dma_chan { |
107 | struct mxs_dma_engine *mxs_dma; | 107 | struct mxs_dma_engine *mxs_dma; |
108 | struct dma_chan chan; | 108 | struct dma_chan chan; |
109 | struct dma_async_tx_descriptor desc; | 109 | struct dma_async_tx_descriptor desc; |
110 | struct tasklet_struct tasklet; | 110 | struct tasklet_struct tasklet; |
111 | int chan_irq; | 111 | int chan_irq; |
112 | struct mxs_dma_ccw *ccw; | 112 | struct mxs_dma_ccw *ccw; |
113 | dma_addr_t ccw_phys; | 113 | dma_addr_t ccw_phys; |
114 | int desc_count; | 114 | int desc_count; |
115 | enum dma_status status; | 115 | enum dma_status status; |
116 | unsigned int flags; | 116 | unsigned int flags; |
117 | #define MXS_DMA_SG_LOOP (1 << 0) | 117 | #define MXS_DMA_SG_LOOP (1 << 0) |
118 | }; | 118 | }; |
119 | 119 | ||
120 | #define MXS_DMA_CHANNELS 16 | 120 | #define MXS_DMA_CHANNELS 16 |
121 | #define MXS_DMA_CHANNELS_MASK 0xffff | 121 | #define MXS_DMA_CHANNELS_MASK 0xffff |
122 | 122 | ||
123 | enum mxs_dma_devtype { | 123 | enum mxs_dma_devtype { |
124 | MXS_DMA_APBH, | 124 | MXS_DMA_APBH, |
125 | MXS_DMA_APBX, | 125 | MXS_DMA_APBX, |
126 | }; | 126 | }; |
127 | 127 | ||
128 | enum mxs_dma_id { | 128 | enum mxs_dma_id { |
129 | IMX23_DMA, | 129 | IMX23_DMA, |
130 | IMX28_DMA, | 130 | IMX28_DMA, |
131 | }; | 131 | }; |
132 | 132 | ||
133 | struct mxs_dma_engine { | 133 | struct mxs_dma_engine { |
134 | enum mxs_dma_id dev_id; | 134 | enum mxs_dma_id dev_id; |
135 | enum mxs_dma_devtype type; | 135 | enum mxs_dma_devtype type; |
136 | void __iomem *base; | 136 | void __iomem *base; |
137 | struct clk *clk; | 137 | struct clk *clk; |
138 | struct dma_device dma_device; | 138 | struct dma_device dma_device; |
139 | struct device_dma_parameters dma_parms; | 139 | struct device_dma_parameters dma_parms; |
140 | struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS]; | 140 | struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS]; |
141 | }; | 141 | }; |
142 | 142 | ||
143 | struct mxs_dma_type { | 143 | struct mxs_dma_type { |
144 | enum mxs_dma_id id; | 144 | enum mxs_dma_id id; |
145 | enum mxs_dma_devtype type; | 145 | enum mxs_dma_devtype type; |
146 | }; | 146 | }; |
147 | 147 | ||
148 | static struct mxs_dma_type mxs_dma_types[] = { | 148 | static struct mxs_dma_type mxs_dma_types[] = { |
149 | { | 149 | { |
150 | .id = IMX23_DMA, | 150 | .id = IMX23_DMA, |
151 | .type = MXS_DMA_APBH, | 151 | .type = MXS_DMA_APBH, |
152 | }, { | 152 | }, { |
153 | .id = IMX23_DMA, | 153 | .id = IMX23_DMA, |
154 | .type = MXS_DMA_APBX, | 154 | .type = MXS_DMA_APBX, |
155 | }, { | 155 | }, { |
156 | .id = IMX28_DMA, | 156 | .id = IMX28_DMA, |
157 | .type = MXS_DMA_APBH, | 157 | .type = MXS_DMA_APBH, |
158 | }, { | 158 | }, { |
159 | .id = IMX28_DMA, | 159 | .id = IMX28_DMA, |
160 | .type = MXS_DMA_APBX, | 160 | .type = MXS_DMA_APBX, |
161 | } | 161 | } |
162 | }; | 162 | }; |
163 | 163 | ||
164 | static struct platform_device_id mxs_dma_ids[] = { | 164 | static struct platform_device_id mxs_dma_ids[] = { |
165 | { | 165 | { |
166 | .name = "imx23-dma-apbh", | 166 | .name = "imx23-dma-apbh", |
167 | .driver_data = (kernel_ulong_t) &mxs_dma_types[0], | 167 | .driver_data = (kernel_ulong_t) &mxs_dma_types[0], |
168 | }, { | 168 | }, { |
169 | .name = "imx23-dma-apbx", | 169 | .name = "imx23-dma-apbx", |
170 | .driver_data = (kernel_ulong_t) &mxs_dma_types[1], | 170 | .driver_data = (kernel_ulong_t) &mxs_dma_types[1], |
171 | }, { | 171 | }, { |
172 | .name = "imx28-dma-apbh", | 172 | .name = "imx28-dma-apbh", |
173 | .driver_data = (kernel_ulong_t) &mxs_dma_types[2], | 173 | .driver_data = (kernel_ulong_t) &mxs_dma_types[2], |
174 | }, { | 174 | }, { |
175 | .name = "imx28-dma-apbx", | 175 | .name = "imx28-dma-apbx", |
176 | .driver_data = (kernel_ulong_t) &mxs_dma_types[3], | 176 | .driver_data = (kernel_ulong_t) &mxs_dma_types[3], |
177 | }, { | 177 | }, { |
178 | /* end of list */ | 178 | /* end of list */ |
179 | } | 179 | } |
180 | }; | 180 | }; |
181 | 181 | ||
182 | static const struct of_device_id mxs_dma_dt_ids[] = { | 182 | static const struct of_device_id mxs_dma_dt_ids[] = { |
183 | { .compatible = "fsl,imx23-dma-apbh", .data = &mxs_dma_ids[0], }, | 183 | { .compatible = "fsl,imx23-dma-apbh", .data = &mxs_dma_ids[0], }, |
184 | { .compatible = "fsl,imx23-dma-apbx", .data = &mxs_dma_ids[1], }, | 184 | { .compatible = "fsl,imx23-dma-apbx", .data = &mxs_dma_ids[1], }, |
185 | { .compatible = "fsl,imx28-dma-apbh", .data = &mxs_dma_ids[2], }, | 185 | { .compatible = "fsl,imx28-dma-apbh", .data = &mxs_dma_ids[2], }, |
186 | { .compatible = "fsl,imx28-dma-apbx", .data = &mxs_dma_ids[3], }, | 186 | { .compatible = "fsl,imx28-dma-apbx", .data = &mxs_dma_ids[3], }, |
187 | { /* sentinel */ } | 187 | { /* sentinel */ } |
188 | }; | 188 | }; |
189 | MODULE_DEVICE_TABLE(of, mxs_dma_dt_ids); | 189 | MODULE_DEVICE_TABLE(of, mxs_dma_dt_ids); |
190 | 190 | ||
191 | static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan) | 191 | static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan) |
192 | { | 192 | { |
193 | return container_of(chan, struct mxs_dma_chan, chan); | 193 | return container_of(chan, struct mxs_dma_chan, chan); |
194 | } | 194 | } |
195 | 195 | ||
196 | int mxs_dma_is_apbh(struct dma_chan *chan) | 196 | int mxs_dma_is_apbh(struct dma_chan *chan) |
197 | { | 197 | { |
198 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | 198 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
199 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | 199 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
200 | 200 | ||
201 | return dma_is_apbh(mxs_dma); | 201 | return dma_is_apbh(mxs_dma); |
202 | } | 202 | } |
203 | EXPORT_SYMBOL_GPL(mxs_dma_is_apbh); | ||
203 | 204 | ||
204 | int mxs_dma_is_apbx(struct dma_chan *chan) | 205 | int mxs_dma_is_apbx(struct dma_chan *chan) |
205 | { | 206 | { |
206 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | 207 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
207 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | 208 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
208 | 209 | ||
209 | return !dma_is_apbh(mxs_dma); | 210 | return !dma_is_apbh(mxs_dma); |
210 | } | 211 | } |
212 | EXPORT_SYMBOL_GPL(mxs_dma_is_apbx); | ||
211 | 213 | ||
212 | static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) | 214 | static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) |
213 | { | 215 | { |
214 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | 216 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
215 | int chan_id = mxs_chan->chan.chan_id; | 217 | int chan_id = mxs_chan->chan.chan_id; |
216 | 218 | ||
217 | if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) | 219 | if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) |
218 | writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL), | 220 | writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL), |
219 | mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET); | 221 | mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET); |
220 | else | 222 | else |
221 | writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL), | 223 | writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL), |
222 | mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET); | 224 | mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET); |
223 | } | 225 | } |
224 | 226 | ||
225 | static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) | 227 | static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) |
226 | { | 228 | { |
227 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | 229 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
228 | int chan_id = mxs_chan->chan.chan_id; | 230 | int chan_id = mxs_chan->chan.chan_id; |
229 | 231 | ||
230 | /* set cmd_addr up */ | 232 | /* set cmd_addr up */ |
231 | writel(mxs_chan->ccw_phys, | 233 | writel(mxs_chan->ccw_phys, |
232 | mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(mxs_dma, chan_id)); | 234 | mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(mxs_dma, chan_id)); |
233 | 235 | ||
234 | /* write 1 to SEMA to kick off the channel */ | 236 | /* write 1 to SEMA to kick off the channel */ |
235 | writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id)); | 237 | writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id)); |
236 | } | 238 | } |
237 | 239 | ||
238 | static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) | 240 | static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) |
239 | { | 241 | { |
240 | mxs_chan->status = DMA_SUCCESS; | 242 | mxs_chan->status = DMA_SUCCESS; |
241 | } | 243 | } |
242 | 244 | ||
243 | static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan) | 245 | static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan) |
244 | { | 246 | { |
245 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | 247 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
246 | int chan_id = mxs_chan->chan.chan_id; | 248 | int chan_id = mxs_chan->chan.chan_id; |
247 | 249 | ||
248 | /* freeze the channel */ | 250 | /* freeze the channel */ |
249 | if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) | 251 | if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) |
250 | writel(1 << chan_id, | 252 | writel(1 << chan_id, |
251 | mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET); | 253 | mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET); |
252 | else | 254 | else |
253 | writel(1 << chan_id, | 255 | writel(1 << chan_id, |
254 | mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET); | 256 | mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET); |
255 | 257 | ||
256 | mxs_chan->status = DMA_PAUSED; | 258 | mxs_chan->status = DMA_PAUSED; |
257 | } | 259 | } |
258 | 260 | ||
259 | static void mxs_dma_resume_chan(struct mxs_dma_chan *mxs_chan) | 261 | static void mxs_dma_resume_chan(struct mxs_dma_chan *mxs_chan) |
260 | { | 262 | { |
261 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | 263 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
262 | int chan_id = mxs_chan->chan.chan_id; | 264 | int chan_id = mxs_chan->chan.chan_id; |
263 | 265 | ||
264 | /* unfreeze the channel */ | 266 | /* unfreeze the channel */ |
265 | if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) | 267 | if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma)) |
266 | writel(1 << chan_id, | 268 | writel(1 << chan_id, |
267 | mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_CLR); | 269 | mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_CLR); |
268 | else | 270 | else |
269 | writel(1 << chan_id, | 271 | writel(1 << chan_id, |
270 | mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_CLR); | 272 | mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_CLR); |
271 | 273 | ||
272 | mxs_chan->status = DMA_IN_PROGRESS; | 274 | mxs_chan->status = DMA_IN_PROGRESS; |
273 | } | 275 | } |
274 | 276 | ||
275 | static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx) | 277 | static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx) |
276 | { | 278 | { |
277 | return dma_cookie_assign(tx); | 279 | return dma_cookie_assign(tx); |
278 | } | 280 | } |
279 | 281 | ||
280 | static void mxs_dma_tasklet(unsigned long data) | 282 | static void mxs_dma_tasklet(unsigned long data) |
281 | { | 283 | { |
282 | struct mxs_dma_chan *mxs_chan = (struct mxs_dma_chan *) data; | 284 | struct mxs_dma_chan *mxs_chan = (struct mxs_dma_chan *) data; |
283 | 285 | ||
284 | if (mxs_chan->desc.callback) | 286 | if (mxs_chan->desc.callback) |
285 | mxs_chan->desc.callback(mxs_chan->desc.callback_param); | 287 | mxs_chan->desc.callback(mxs_chan->desc.callback_param); |
286 | } | 288 | } |
287 | 289 | ||
288 | static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id) | 290 | static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id) |
289 | { | 291 | { |
290 | struct mxs_dma_engine *mxs_dma = dev_id; | 292 | struct mxs_dma_engine *mxs_dma = dev_id; |
291 | u32 stat1, stat2; | 293 | u32 stat1, stat2; |
292 | 294 | ||
293 | /* completion status */ | 295 | /* completion status */ |
294 | stat1 = readl(mxs_dma->base + HW_APBHX_CTRL1); | 296 | stat1 = readl(mxs_dma->base + HW_APBHX_CTRL1); |
295 | stat1 &= MXS_DMA_CHANNELS_MASK; | 297 | stat1 &= MXS_DMA_CHANNELS_MASK; |
296 | writel(stat1, mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR); | 298 | writel(stat1, mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR); |
297 | 299 | ||
298 | /* error status */ | 300 | /* error status */ |
299 | stat2 = readl(mxs_dma->base + HW_APBHX_CTRL2); | 301 | stat2 = readl(mxs_dma->base + HW_APBHX_CTRL2); |
300 | writel(stat2, mxs_dma->base + HW_APBHX_CTRL2 + STMP_OFFSET_REG_CLR); | 302 | writel(stat2, mxs_dma->base + HW_APBHX_CTRL2 + STMP_OFFSET_REG_CLR); |
301 | 303 | ||
302 | /* | 304 | /* |
303 | * When both completion and error of termination bits set at the | 305 | * When both completion and error of termination bits set at the |
304 | * same time, we do not take it as an error. IOW, it only becomes | 306 | * same time, we do not take it as an error. IOW, it only becomes |
305 | * an error we need to handle here in case of either it's (1) a bus | 307 | * an error we need to handle here in case of either it's (1) a bus |
306 | * error or (2) a termination error with no completion. | 308 | * error or (2) a termination error with no completion. |
307 | */ | 309 | */ |
308 | stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */ | 310 | stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */ |
309 | (~(stat2 >> MXS_DMA_CHANNELS) & stat2 & ~stat1); /* (2) */ | 311 | (~(stat2 >> MXS_DMA_CHANNELS) & stat2 & ~stat1); /* (2) */ |
310 | 312 | ||
311 | /* combine error and completion status for checking */ | 313 | /* combine error and completion status for checking */ |
312 | stat1 = (stat2 << MXS_DMA_CHANNELS) | stat1; | 314 | stat1 = (stat2 << MXS_DMA_CHANNELS) | stat1; |
313 | while (stat1) { | 315 | while (stat1) { |
314 | int channel = fls(stat1) - 1; | 316 | int channel = fls(stat1) - 1; |
315 | struct mxs_dma_chan *mxs_chan = | 317 | struct mxs_dma_chan *mxs_chan = |
316 | &mxs_dma->mxs_chans[channel % MXS_DMA_CHANNELS]; | 318 | &mxs_dma->mxs_chans[channel % MXS_DMA_CHANNELS]; |
317 | 319 | ||
318 | if (channel >= MXS_DMA_CHANNELS) { | 320 | if (channel >= MXS_DMA_CHANNELS) { |
319 | dev_dbg(mxs_dma->dma_device.dev, | 321 | dev_dbg(mxs_dma->dma_device.dev, |
320 | "%s: error in channel %d\n", __func__, | 322 | "%s: error in channel %d\n", __func__, |
321 | channel - MXS_DMA_CHANNELS); | 323 | channel - MXS_DMA_CHANNELS); |
322 | mxs_chan->status = DMA_ERROR; | 324 | mxs_chan->status = DMA_ERROR; |
323 | mxs_dma_reset_chan(mxs_chan); | 325 | mxs_dma_reset_chan(mxs_chan); |
324 | } else { | 326 | } else { |
325 | if (mxs_chan->flags & MXS_DMA_SG_LOOP) | 327 | if (mxs_chan->flags & MXS_DMA_SG_LOOP) |
326 | mxs_chan->status = DMA_IN_PROGRESS; | 328 | mxs_chan->status = DMA_IN_PROGRESS; |
327 | else | 329 | else |
328 | mxs_chan->status = DMA_SUCCESS; | 330 | mxs_chan->status = DMA_SUCCESS; |
329 | } | 331 | } |
330 | 332 | ||
331 | stat1 &= ~(1 << channel); | 333 | stat1 &= ~(1 << channel); |
332 | 334 | ||
333 | if (mxs_chan->status == DMA_SUCCESS) | 335 | if (mxs_chan->status == DMA_SUCCESS) |
334 | dma_cookie_complete(&mxs_chan->desc); | 336 | dma_cookie_complete(&mxs_chan->desc); |
335 | 337 | ||
336 | /* schedule tasklet on this channel */ | 338 | /* schedule tasklet on this channel */ |
337 | tasklet_schedule(&mxs_chan->tasklet); | 339 | tasklet_schedule(&mxs_chan->tasklet); |
338 | } | 340 | } |
339 | 341 | ||
340 | return IRQ_HANDLED; | 342 | return IRQ_HANDLED; |
341 | } | 343 | } |
342 | 344 | ||
343 | static int mxs_dma_alloc_chan_resources(struct dma_chan *chan) | 345 | static int mxs_dma_alloc_chan_resources(struct dma_chan *chan) |
344 | { | 346 | { |
345 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | 347 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
346 | struct mxs_dma_data *data = chan->private; | 348 | struct mxs_dma_data *data = chan->private; |
347 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | 349 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
348 | int ret; | 350 | int ret; |
349 | 351 | ||
350 | if (!data) | 352 | if (!data) |
351 | return -EINVAL; | 353 | return -EINVAL; |
352 | 354 | ||
353 | mxs_chan->chan_irq = data->chan_irq; | 355 | mxs_chan->chan_irq = data->chan_irq; |
354 | 356 | ||
355 | mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, | 357 | mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, |
356 | &mxs_chan->ccw_phys, GFP_KERNEL); | 358 | &mxs_chan->ccw_phys, GFP_KERNEL); |
357 | if (!mxs_chan->ccw) { | 359 | if (!mxs_chan->ccw) { |
358 | ret = -ENOMEM; | 360 | ret = -ENOMEM; |
359 | goto err_alloc; | 361 | goto err_alloc; |
360 | } | 362 | } |
361 | 363 | ||
362 | memset(mxs_chan->ccw, 0, PAGE_SIZE); | 364 | memset(mxs_chan->ccw, 0, PAGE_SIZE); |
363 | 365 | ||
364 | if (mxs_chan->chan_irq != NO_IRQ) { | 366 | if (mxs_chan->chan_irq != NO_IRQ) { |
365 | ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler, | 367 | ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler, |
366 | 0, "mxs-dma", mxs_dma); | 368 | 0, "mxs-dma", mxs_dma); |
367 | if (ret) | 369 | if (ret) |
368 | goto err_irq; | 370 | goto err_irq; |
369 | } | 371 | } |
370 | 372 | ||
371 | ret = clk_prepare_enable(mxs_dma->clk); | 373 | ret = clk_prepare_enable(mxs_dma->clk); |
372 | if (ret) | 374 | if (ret) |
373 | goto err_clk; | 375 | goto err_clk; |
374 | 376 | ||
375 | mxs_dma_reset_chan(mxs_chan); | 377 | mxs_dma_reset_chan(mxs_chan); |
376 | 378 | ||
377 | dma_async_tx_descriptor_init(&mxs_chan->desc, chan); | 379 | dma_async_tx_descriptor_init(&mxs_chan->desc, chan); |
378 | mxs_chan->desc.tx_submit = mxs_dma_tx_submit; | 380 | mxs_chan->desc.tx_submit = mxs_dma_tx_submit; |
379 | 381 | ||
380 | /* the descriptor is ready */ | 382 | /* the descriptor is ready */ |
381 | async_tx_ack(&mxs_chan->desc); | 383 | async_tx_ack(&mxs_chan->desc); |
382 | 384 | ||
383 | return 0; | 385 | return 0; |
384 | 386 | ||
385 | err_clk: | 387 | err_clk: |
386 | free_irq(mxs_chan->chan_irq, mxs_dma); | 388 | free_irq(mxs_chan->chan_irq, mxs_dma); |
387 | err_irq: | 389 | err_irq: |
388 | dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, | 390 | dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, |
389 | mxs_chan->ccw, mxs_chan->ccw_phys); | 391 | mxs_chan->ccw, mxs_chan->ccw_phys); |
390 | err_alloc: | 392 | err_alloc: |
391 | return ret; | 393 | return ret; |
392 | } | 394 | } |
393 | 395 | ||
394 | static void mxs_dma_free_chan_resources(struct dma_chan *chan) | 396 | static void mxs_dma_free_chan_resources(struct dma_chan *chan) |
395 | { | 397 | { |
396 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | 398 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
397 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | 399 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
398 | 400 | ||
399 | mxs_dma_disable_chan(mxs_chan); | 401 | mxs_dma_disable_chan(mxs_chan); |
400 | 402 | ||
401 | free_irq(mxs_chan->chan_irq, mxs_dma); | 403 | free_irq(mxs_chan->chan_irq, mxs_dma); |
402 | 404 | ||
403 | dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, | 405 | dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, |
404 | mxs_chan->ccw, mxs_chan->ccw_phys); | 406 | mxs_chan->ccw, mxs_chan->ccw_phys); |
405 | 407 | ||
406 | clk_disable_unprepare(mxs_dma->clk); | 408 | clk_disable_unprepare(mxs_dma->clk); |
407 | } | 409 | } |
408 | 410 | ||
409 | /* | 411 | /* |
410 | * How to use the flags for ->device_prep_slave_sg() : | 412 | * How to use the flags for ->device_prep_slave_sg() : |
411 | * [1] If there is only one DMA command in the DMA chain, the code should be: | 413 | * [1] If there is only one DMA command in the DMA chain, the code should be: |
412 | * ...... | 414 | * ...... |
413 | * ->device_prep_slave_sg(DMA_CTRL_ACK); | 415 | * ->device_prep_slave_sg(DMA_CTRL_ACK); |
414 | * ...... | 416 | * ...... |
415 | * [2] If there are two DMA commands in the DMA chain, the code should be | 417 | * [2] If there are two DMA commands in the DMA chain, the code should be |
416 | * ...... | 418 | * ...... |
417 | * ->device_prep_slave_sg(0); | 419 | * ->device_prep_slave_sg(0); |
418 | * ...... | 420 | * ...... |
419 | * ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 421 | * ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
420 | * ...... | 422 | * ...... |
421 | * [3] If there are more than two DMA commands in the DMA chain, the code | 423 | * [3] If there are more than two DMA commands in the DMA chain, the code |
422 | * should be: | 424 | * should be: |
423 | * ...... | 425 | * ...... |
424 | * ->device_prep_slave_sg(0); // First | 426 | * ->device_prep_slave_sg(0); // First |
425 | * ...... | 427 | * ...... |
426 | * ->device_prep_slave_sg(DMA_PREP_INTERRUPT [| DMA_CTRL_ACK]); | 428 | * ->device_prep_slave_sg(DMA_PREP_INTERRUPT [| DMA_CTRL_ACK]); |
427 | * ...... | 429 | * ...... |
428 | * ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK); // Last | 430 | * ->device_prep_slave_sg(DMA_PREP_INTERRUPT | DMA_CTRL_ACK); // Last |
429 | * ...... | 431 | * ...... |
430 | */ | 432 | */ |
431 | static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( | 433 | static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( |
432 | struct dma_chan *chan, struct scatterlist *sgl, | 434 | struct dma_chan *chan, struct scatterlist *sgl, |
433 | unsigned int sg_len, enum dma_transfer_direction direction, | 435 | unsigned int sg_len, enum dma_transfer_direction direction, |
434 | unsigned long flags, void *context) | 436 | unsigned long flags, void *context) |
435 | { | 437 | { |
436 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | 438 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
437 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | 439 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
438 | struct mxs_dma_ccw *ccw; | 440 | struct mxs_dma_ccw *ccw; |
439 | struct scatterlist *sg; | 441 | struct scatterlist *sg; |
440 | int i, j; | 442 | int i, j; |
441 | u32 *pio; | 443 | u32 *pio; |
442 | bool append = flags & DMA_PREP_INTERRUPT; | 444 | bool append = flags & DMA_PREP_INTERRUPT; |
443 | int idx = append ? mxs_chan->desc_count : 0; | 445 | int idx = append ? mxs_chan->desc_count : 0; |
444 | 446 | ||
445 | if (mxs_chan->status == DMA_IN_PROGRESS && !append) | 447 | if (mxs_chan->status == DMA_IN_PROGRESS && !append) |
446 | return NULL; | 448 | return NULL; |
447 | 449 | ||
448 | if (sg_len + (append ? idx : 0) > NUM_CCW) { | 450 | if (sg_len + (append ? idx : 0) > NUM_CCW) { |
449 | dev_err(mxs_dma->dma_device.dev, | 451 | dev_err(mxs_dma->dma_device.dev, |
450 | "maximum number of sg exceeded: %d > %d\n", | 452 | "maximum number of sg exceeded: %d > %d\n", |
451 | sg_len, NUM_CCW); | 453 | sg_len, NUM_CCW); |
452 | goto err_out; | 454 | goto err_out; |
453 | } | 455 | } |
454 | 456 | ||
455 | mxs_chan->status = DMA_IN_PROGRESS; | 457 | mxs_chan->status = DMA_IN_PROGRESS; |
456 | mxs_chan->flags = 0; | 458 | mxs_chan->flags = 0; |
457 | 459 | ||
458 | /* | 460 | /* |
459 | * If the sg is prepared with append flag set, the sg | 461 | * If the sg is prepared with append flag set, the sg |
460 | * will be appended to the last prepared sg. | 462 | * will be appended to the last prepared sg. |
461 | */ | 463 | */ |
462 | if (append) { | 464 | if (append) { |
463 | BUG_ON(idx < 1); | 465 | BUG_ON(idx < 1); |
464 | ccw = &mxs_chan->ccw[idx - 1]; | 466 | ccw = &mxs_chan->ccw[idx - 1]; |
465 | ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx; | 467 | ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx; |
466 | ccw->bits |= CCW_CHAIN; | 468 | ccw->bits |= CCW_CHAIN; |
467 | ccw->bits &= ~CCW_IRQ; | 469 | ccw->bits &= ~CCW_IRQ; |
468 | ccw->bits &= ~CCW_DEC_SEM; | 470 | ccw->bits &= ~CCW_DEC_SEM; |
469 | } else { | 471 | } else { |
470 | idx = 0; | 472 | idx = 0; |
471 | } | 473 | } |
472 | 474 | ||
473 | if (direction == DMA_TRANS_NONE) { | 475 | if (direction == DMA_TRANS_NONE) { |
474 | ccw = &mxs_chan->ccw[idx++]; | 476 | ccw = &mxs_chan->ccw[idx++]; |
475 | pio = (u32 *) sgl; | 477 | pio = (u32 *) sgl; |
476 | 478 | ||
477 | for (j = 0; j < sg_len;) | 479 | for (j = 0; j < sg_len;) |
478 | ccw->pio_words[j++] = *pio++; | 480 | ccw->pio_words[j++] = *pio++; |
479 | 481 | ||
480 | ccw->bits = 0; | 482 | ccw->bits = 0; |
481 | ccw->bits |= CCW_IRQ; | 483 | ccw->bits |= CCW_IRQ; |
482 | ccw->bits |= CCW_DEC_SEM; | 484 | ccw->bits |= CCW_DEC_SEM; |
483 | if (flags & DMA_CTRL_ACK) | 485 | if (flags & DMA_CTRL_ACK) |
484 | ccw->bits |= CCW_WAIT4END; | 486 | ccw->bits |= CCW_WAIT4END; |
485 | ccw->bits |= CCW_HALT_ON_TERM; | 487 | ccw->bits |= CCW_HALT_ON_TERM; |
486 | ccw->bits |= CCW_TERM_FLUSH; | 488 | ccw->bits |= CCW_TERM_FLUSH; |
487 | ccw->bits |= BF_CCW(sg_len, PIO_NUM); | 489 | ccw->bits |= BF_CCW(sg_len, PIO_NUM); |
488 | ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND); | 490 | ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND); |
489 | } else { | 491 | } else { |
490 | for_each_sg(sgl, sg, sg_len, i) { | 492 | for_each_sg(sgl, sg, sg_len, i) { |
491 | if (sg_dma_len(sg) > MAX_XFER_BYTES) { | 493 | if (sg_dma_len(sg) > MAX_XFER_BYTES) { |
492 | dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n", | 494 | dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n", |
493 | sg_dma_len(sg), MAX_XFER_BYTES); | 495 | sg_dma_len(sg), MAX_XFER_BYTES); |
494 | goto err_out; | 496 | goto err_out; |
495 | } | 497 | } |
496 | 498 | ||
497 | ccw = &mxs_chan->ccw[idx++]; | 499 | ccw = &mxs_chan->ccw[idx++]; |
498 | 500 | ||
499 | ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx; | 501 | ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx; |
500 | ccw->bufaddr = sg->dma_address; | 502 | ccw->bufaddr = sg->dma_address; |
501 | ccw->xfer_bytes = sg_dma_len(sg); | 503 | ccw->xfer_bytes = sg_dma_len(sg); |
502 | 504 | ||
503 | ccw->bits = 0; | 505 | ccw->bits = 0; |
504 | ccw->bits |= CCW_CHAIN; | 506 | ccw->bits |= CCW_CHAIN; |
505 | ccw->bits |= CCW_HALT_ON_TERM; | 507 | ccw->bits |= CCW_HALT_ON_TERM; |
506 | ccw->bits |= CCW_TERM_FLUSH; | 508 | ccw->bits |= CCW_TERM_FLUSH; |
507 | ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ? | 509 | ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ? |
508 | MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, | 510 | MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, |
509 | COMMAND); | 511 | COMMAND); |
510 | 512 | ||
511 | if (i + 1 == sg_len) { | 513 | if (i + 1 == sg_len) { |
512 | ccw->bits &= ~CCW_CHAIN; | 514 | ccw->bits &= ~CCW_CHAIN; |
513 | ccw->bits |= CCW_IRQ; | 515 | ccw->bits |= CCW_IRQ; |
514 | ccw->bits |= CCW_DEC_SEM; | 516 | ccw->bits |= CCW_DEC_SEM; |
515 | if (flags & DMA_CTRL_ACK) | 517 | if (flags & DMA_CTRL_ACK) |
516 | ccw->bits |= CCW_WAIT4END; | 518 | ccw->bits |= CCW_WAIT4END; |
517 | } | 519 | } |
518 | } | 520 | } |
519 | } | 521 | } |
520 | mxs_chan->desc_count = idx; | 522 | mxs_chan->desc_count = idx; |
521 | 523 | ||
522 | return &mxs_chan->desc; | 524 | return &mxs_chan->desc; |
523 | 525 | ||
524 | err_out: | 526 | err_out: |
525 | mxs_chan->status = DMA_ERROR; | 527 | mxs_chan->status = DMA_ERROR; |
526 | return NULL; | 528 | return NULL; |
527 | } | 529 | } |
528 | 530 | ||
529 | static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( | 531 | static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( |
530 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, | 532 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, |
531 | size_t period_len, enum dma_transfer_direction direction, | 533 | size_t period_len, enum dma_transfer_direction direction, |
532 | void *context) | 534 | void *context) |
533 | { | 535 | { |
534 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | 536 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
535 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; | 537 | struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; |
536 | int num_periods = buf_len / period_len; | 538 | int num_periods = buf_len / period_len; |
537 | int i = 0, buf = 0; | 539 | int i = 0, buf = 0; |
538 | 540 | ||
539 | if (mxs_chan->status == DMA_IN_PROGRESS) | 541 | if (mxs_chan->status == DMA_IN_PROGRESS) |
540 | return NULL; | 542 | return NULL; |
541 | 543 | ||
542 | mxs_chan->status = DMA_IN_PROGRESS; | 544 | mxs_chan->status = DMA_IN_PROGRESS; |
543 | mxs_chan->flags |= MXS_DMA_SG_LOOP; | 545 | mxs_chan->flags |= MXS_DMA_SG_LOOP; |
544 | 546 | ||
545 | if (num_periods > NUM_CCW) { | 547 | if (num_periods > NUM_CCW) { |
546 | dev_err(mxs_dma->dma_device.dev, | 548 | dev_err(mxs_dma->dma_device.dev, |
547 | "maximum number of sg exceeded: %d > %d\n", | 549 | "maximum number of sg exceeded: %d > %d\n", |
548 | num_periods, NUM_CCW); | 550 | num_periods, NUM_CCW); |
549 | goto err_out; | 551 | goto err_out; |
550 | } | 552 | } |
551 | 553 | ||
552 | if (period_len > MAX_XFER_BYTES) { | 554 | if (period_len > MAX_XFER_BYTES) { |
553 | dev_err(mxs_dma->dma_device.dev, | 555 | dev_err(mxs_dma->dma_device.dev, |
554 | "maximum period size exceeded: %d > %d\n", | 556 | "maximum period size exceeded: %d > %d\n", |
555 | period_len, MAX_XFER_BYTES); | 557 | period_len, MAX_XFER_BYTES); |
556 | goto err_out; | 558 | goto err_out; |
557 | } | 559 | } |
558 | 560 | ||
559 | while (buf < buf_len) { | 561 | while (buf < buf_len) { |
560 | struct mxs_dma_ccw *ccw = &mxs_chan->ccw[i]; | 562 | struct mxs_dma_ccw *ccw = &mxs_chan->ccw[i]; |
561 | 563 | ||
562 | if (i + 1 == num_periods) | 564 | if (i + 1 == num_periods) |
563 | ccw->next = mxs_chan->ccw_phys; | 565 | ccw->next = mxs_chan->ccw_phys; |
564 | else | 566 | else |
565 | ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * (i + 1); | 567 | ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * (i + 1); |
566 | 568 | ||
567 | ccw->bufaddr = dma_addr; | 569 | ccw->bufaddr = dma_addr; |
568 | ccw->xfer_bytes = period_len; | 570 | ccw->xfer_bytes = period_len; |
569 | 571 | ||
570 | ccw->bits = 0; | 572 | ccw->bits = 0; |
571 | ccw->bits |= CCW_CHAIN; | 573 | ccw->bits |= CCW_CHAIN; |
572 | ccw->bits |= CCW_IRQ; | 574 | ccw->bits |= CCW_IRQ; |
573 | ccw->bits |= CCW_HALT_ON_TERM; | 575 | ccw->bits |= CCW_HALT_ON_TERM; |
574 | ccw->bits |= CCW_TERM_FLUSH; | 576 | ccw->bits |= CCW_TERM_FLUSH; |
575 | ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ? | 577 | ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ? |
576 | MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND); | 578 | MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND); |
577 | 579 | ||
578 | dma_addr += period_len; | 580 | dma_addr += period_len; |
579 | buf += period_len; | 581 | buf += period_len; |
580 | 582 | ||
581 | i++; | 583 | i++; |
582 | } | 584 | } |
583 | mxs_chan->desc_count = i; | 585 | mxs_chan->desc_count = i; |
584 | 586 | ||
585 | return &mxs_chan->desc; | 587 | return &mxs_chan->desc; |
586 | 588 | ||
587 | err_out: | 589 | err_out: |
588 | mxs_chan->status = DMA_ERROR; | 590 | mxs_chan->status = DMA_ERROR; |
589 | return NULL; | 591 | return NULL; |
590 | } | 592 | } |
591 | 593 | ||
592 | static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 594 | static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
593 | unsigned long arg) | 595 | unsigned long arg) |
594 | { | 596 | { |
595 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | 597 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
596 | int ret = 0; | 598 | int ret = 0; |
597 | 599 | ||
598 | switch (cmd) { | 600 | switch (cmd) { |
599 | case DMA_TERMINATE_ALL: | 601 | case DMA_TERMINATE_ALL: |
600 | mxs_dma_reset_chan(mxs_chan); | 602 | mxs_dma_reset_chan(mxs_chan); |
601 | mxs_dma_disable_chan(mxs_chan); | 603 | mxs_dma_disable_chan(mxs_chan); |
602 | break; | 604 | break; |
603 | case DMA_PAUSE: | 605 | case DMA_PAUSE: |
604 | mxs_dma_pause_chan(mxs_chan); | 606 | mxs_dma_pause_chan(mxs_chan); |
605 | break; | 607 | break; |
606 | case DMA_RESUME: | 608 | case DMA_RESUME: |
607 | mxs_dma_resume_chan(mxs_chan); | 609 | mxs_dma_resume_chan(mxs_chan); |
608 | break; | 610 | break; |
609 | default: | 611 | default: |
610 | ret = -ENOSYS; | 612 | ret = -ENOSYS; |
611 | } | 613 | } |
612 | 614 | ||
613 | return ret; | 615 | return ret; |
614 | } | 616 | } |
615 | 617 | ||
616 | static enum dma_status mxs_dma_tx_status(struct dma_chan *chan, | 618 | static enum dma_status mxs_dma_tx_status(struct dma_chan *chan, |
617 | dma_cookie_t cookie, struct dma_tx_state *txstate) | 619 | dma_cookie_t cookie, struct dma_tx_state *txstate) |
618 | { | 620 | { |
619 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | 621 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
620 | dma_cookie_t last_used; | 622 | dma_cookie_t last_used; |
621 | 623 | ||
622 | last_used = chan->cookie; | 624 | last_used = chan->cookie; |
623 | dma_set_tx_state(txstate, chan->completed_cookie, last_used, 0); | 625 | dma_set_tx_state(txstate, chan->completed_cookie, last_used, 0); |
624 | 626 | ||
625 | return mxs_chan->status; | 627 | return mxs_chan->status; |
626 | } | 628 | } |
627 | 629 | ||
628 | static void mxs_dma_issue_pending(struct dma_chan *chan) | 630 | static void mxs_dma_issue_pending(struct dma_chan *chan) |
629 | { | 631 | { |
630 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); | 632 | struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); |
631 | 633 | ||
632 | mxs_dma_enable_chan(mxs_chan); | 634 | mxs_dma_enable_chan(mxs_chan); |
633 | } | 635 | } |
634 | 636 | ||
635 | static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma) | 637 | static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma) |
636 | { | 638 | { |
637 | int ret; | 639 | int ret; |
638 | 640 | ||
639 | ret = clk_prepare_enable(mxs_dma->clk); | 641 | ret = clk_prepare_enable(mxs_dma->clk); |
640 | if (ret) | 642 | if (ret) |
641 | return ret; | 643 | return ret; |
642 | 644 | ||
643 | ret = stmp_reset_block(mxs_dma->base); | 645 | ret = stmp_reset_block(mxs_dma->base); |
644 | if (ret) | 646 | if (ret) |
645 | goto err_out; | 647 | goto err_out; |
646 | 648 | ||
647 | /* enable apbh burst */ | 649 | /* enable apbh burst */ |
648 | if (dma_is_apbh(mxs_dma)) { | 650 | if (dma_is_apbh(mxs_dma)) { |
649 | writel(BM_APBH_CTRL0_APB_BURST_EN, | 651 | writel(BM_APBH_CTRL0_APB_BURST_EN, |
650 | mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET); | 652 | mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET); |
651 | writel(BM_APBH_CTRL0_APB_BURST8_EN, | 653 | writel(BM_APBH_CTRL0_APB_BURST8_EN, |
652 | mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET); | 654 | mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET); |
653 | } | 655 | } |
654 | 656 | ||
655 | /* enable irq for all the channels */ | 657 | /* enable irq for all the channels */ |
656 | writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS, | 658 | writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS, |
657 | mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_SET); | 659 | mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_SET); |
658 | 660 | ||
659 | err_out: | 661 | err_out: |
660 | clk_disable_unprepare(mxs_dma->clk); | 662 | clk_disable_unprepare(mxs_dma->clk); |
661 | return ret; | 663 | return ret; |
662 | } | 664 | } |
663 | 665 | ||
664 | static int __init mxs_dma_probe(struct platform_device *pdev) | 666 | static int __init mxs_dma_probe(struct platform_device *pdev) |
665 | { | 667 | { |
666 | const struct platform_device_id *id_entry; | 668 | const struct platform_device_id *id_entry; |
667 | const struct of_device_id *of_id; | 669 | const struct of_device_id *of_id; |
668 | const struct mxs_dma_type *dma_type; | 670 | const struct mxs_dma_type *dma_type; |
669 | struct mxs_dma_engine *mxs_dma; | 671 | struct mxs_dma_engine *mxs_dma; |
670 | struct resource *iores; | 672 | struct resource *iores; |
671 | int ret, i; | 673 | int ret, i; |
672 | 674 | ||
673 | mxs_dma = kzalloc(sizeof(*mxs_dma), GFP_KERNEL); | 675 | mxs_dma = kzalloc(sizeof(*mxs_dma), GFP_KERNEL); |
674 | if (!mxs_dma) | 676 | if (!mxs_dma) |
675 | return -ENOMEM; | 677 | return -ENOMEM; |
676 | 678 | ||
677 | of_id = of_match_device(mxs_dma_dt_ids, &pdev->dev); | 679 | of_id = of_match_device(mxs_dma_dt_ids, &pdev->dev); |
678 | if (of_id) | 680 | if (of_id) |
679 | id_entry = of_id->data; | 681 | id_entry = of_id->data; |
680 | else | 682 | else |
681 | id_entry = platform_get_device_id(pdev); | 683 | id_entry = platform_get_device_id(pdev); |
682 | 684 | ||
683 | dma_type = (struct mxs_dma_type *)id_entry->driver_data; | 685 | dma_type = (struct mxs_dma_type *)id_entry->driver_data; |
684 | mxs_dma->type = dma_type->type; | 686 | mxs_dma->type = dma_type->type; |
685 | mxs_dma->dev_id = dma_type->id; | 687 | mxs_dma->dev_id = dma_type->id; |
686 | 688 | ||
687 | iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 689 | iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
688 | 690 | ||
689 | if (!request_mem_region(iores->start, resource_size(iores), | 691 | if (!request_mem_region(iores->start, resource_size(iores), |
690 | pdev->name)) { | 692 | pdev->name)) { |
691 | ret = -EBUSY; | 693 | ret = -EBUSY; |
692 | goto err_request_region; | 694 | goto err_request_region; |
693 | } | 695 | } |
694 | 696 | ||
695 | mxs_dma->base = ioremap(iores->start, resource_size(iores)); | 697 | mxs_dma->base = ioremap(iores->start, resource_size(iores)); |
696 | if (!mxs_dma->base) { | 698 | if (!mxs_dma->base) { |
697 | ret = -ENOMEM; | 699 | ret = -ENOMEM; |
698 | goto err_ioremap; | 700 | goto err_ioremap; |
699 | } | 701 | } |
700 | 702 | ||
701 | mxs_dma->clk = clk_get(&pdev->dev, NULL); | 703 | mxs_dma->clk = clk_get(&pdev->dev, NULL); |
702 | if (IS_ERR(mxs_dma->clk)) { | 704 | if (IS_ERR(mxs_dma->clk)) { |
703 | ret = PTR_ERR(mxs_dma->clk); | 705 | ret = PTR_ERR(mxs_dma->clk); |
704 | goto err_clk; | 706 | goto err_clk; |
705 | } | 707 | } |
706 | 708 | ||
707 | dma_cap_set(DMA_SLAVE, mxs_dma->dma_device.cap_mask); | 709 | dma_cap_set(DMA_SLAVE, mxs_dma->dma_device.cap_mask); |
708 | dma_cap_set(DMA_CYCLIC, mxs_dma->dma_device.cap_mask); | 710 | dma_cap_set(DMA_CYCLIC, mxs_dma->dma_device.cap_mask); |
709 | 711 | ||
710 | INIT_LIST_HEAD(&mxs_dma->dma_device.channels); | 712 | INIT_LIST_HEAD(&mxs_dma->dma_device.channels); |
711 | 713 | ||
712 | /* Initialize channel parameters */ | 714 | /* Initialize channel parameters */ |
713 | for (i = 0; i < MXS_DMA_CHANNELS; i++) { | 715 | for (i = 0; i < MXS_DMA_CHANNELS; i++) { |
714 | struct mxs_dma_chan *mxs_chan = &mxs_dma->mxs_chans[i]; | 716 | struct mxs_dma_chan *mxs_chan = &mxs_dma->mxs_chans[i]; |
715 | 717 | ||
716 | mxs_chan->mxs_dma = mxs_dma; | 718 | mxs_chan->mxs_dma = mxs_dma; |
717 | mxs_chan->chan.device = &mxs_dma->dma_device; | 719 | mxs_chan->chan.device = &mxs_dma->dma_device; |
718 | dma_cookie_init(&mxs_chan->chan); | 720 | dma_cookie_init(&mxs_chan->chan); |
719 | 721 | ||
720 | tasklet_init(&mxs_chan->tasklet, mxs_dma_tasklet, | 722 | tasklet_init(&mxs_chan->tasklet, mxs_dma_tasklet, |
721 | (unsigned long) mxs_chan); | 723 | (unsigned long) mxs_chan); |
722 | 724 | ||
723 | 725 | ||
724 | /* Add the channel to mxs_chan list */ | 726 | /* Add the channel to mxs_chan list */ |
725 | list_add_tail(&mxs_chan->chan.device_node, | 727 | list_add_tail(&mxs_chan->chan.device_node, |
726 | &mxs_dma->dma_device.channels); | 728 | &mxs_dma->dma_device.channels); |
727 | } | 729 | } |
728 | 730 | ||
729 | ret = mxs_dma_init(mxs_dma); | 731 | ret = mxs_dma_init(mxs_dma); |
730 | if (ret) | 732 | if (ret) |
731 | goto err_init; | 733 | goto err_init; |
732 | 734 | ||
733 | mxs_dma->dma_device.dev = &pdev->dev; | 735 | mxs_dma->dma_device.dev = &pdev->dev; |
734 | 736 | ||
735 | /* mxs_dma gets 65535 bytes maximum sg size */ | 737 | /* mxs_dma gets 65535 bytes maximum sg size */ |
736 | mxs_dma->dma_device.dev->dma_parms = &mxs_dma->dma_parms; | 738 | mxs_dma->dma_device.dev->dma_parms = &mxs_dma->dma_parms; |
737 | dma_set_max_seg_size(mxs_dma->dma_device.dev, MAX_XFER_BYTES); | 739 | dma_set_max_seg_size(mxs_dma->dma_device.dev, MAX_XFER_BYTES); |
738 | 740 | ||
739 | mxs_dma->dma_device.device_alloc_chan_resources = mxs_dma_alloc_chan_resources; | 741 | mxs_dma->dma_device.device_alloc_chan_resources = mxs_dma_alloc_chan_resources; |
740 | mxs_dma->dma_device.device_free_chan_resources = mxs_dma_free_chan_resources; | 742 | mxs_dma->dma_device.device_free_chan_resources = mxs_dma_free_chan_resources; |
741 | mxs_dma->dma_device.device_tx_status = mxs_dma_tx_status; | 743 | mxs_dma->dma_device.device_tx_status = mxs_dma_tx_status; |
742 | mxs_dma->dma_device.device_prep_slave_sg = mxs_dma_prep_slave_sg; | 744 | mxs_dma->dma_device.device_prep_slave_sg = mxs_dma_prep_slave_sg; |
743 | mxs_dma->dma_device.device_prep_dma_cyclic = mxs_dma_prep_dma_cyclic; | 745 | mxs_dma->dma_device.device_prep_dma_cyclic = mxs_dma_prep_dma_cyclic; |
744 | mxs_dma->dma_device.device_control = mxs_dma_control; | 746 | mxs_dma->dma_device.device_control = mxs_dma_control; |
745 | mxs_dma->dma_device.device_issue_pending = mxs_dma_issue_pending; | 747 | mxs_dma->dma_device.device_issue_pending = mxs_dma_issue_pending; |
746 | 748 | ||
747 | ret = dma_async_device_register(&mxs_dma->dma_device); | 749 | ret = dma_async_device_register(&mxs_dma->dma_device); |
748 | if (ret) { | 750 | if (ret) { |
749 | dev_err(mxs_dma->dma_device.dev, "unable to register\n"); | 751 | dev_err(mxs_dma->dma_device.dev, "unable to register\n"); |
750 | goto err_init; | 752 | goto err_init; |
751 | } | 753 | } |
752 | 754 | ||
753 | dev_info(mxs_dma->dma_device.dev, "initialized\n"); | 755 | dev_info(mxs_dma->dma_device.dev, "initialized\n"); |
754 | 756 | ||
755 | return 0; | 757 | return 0; |
756 | 758 | ||
757 | err_init: | 759 | err_init: |
758 | clk_put(mxs_dma->clk); | 760 | clk_put(mxs_dma->clk); |
759 | err_clk: | 761 | err_clk: |
760 | iounmap(mxs_dma->base); | 762 | iounmap(mxs_dma->base); |
761 | err_ioremap: | 763 | err_ioremap: |
762 | release_mem_region(iores->start, resource_size(iores)); | 764 | release_mem_region(iores->start, resource_size(iores)); |
763 | err_request_region: | 765 | err_request_region: |
764 | kfree(mxs_dma); | 766 | kfree(mxs_dma); |
765 | return ret; | 767 | return ret; |
766 | } | 768 | } |
767 | 769 | ||
768 | static struct platform_driver mxs_dma_driver = { | 770 | static struct platform_driver mxs_dma_driver = { |
769 | .driver = { | 771 | .driver = { |
770 | .name = "mxs-dma", | 772 | .name = "mxs-dma", |
771 | .of_match_table = mxs_dma_dt_ids, | 773 | .of_match_table = mxs_dma_dt_ids, |
772 | }, | 774 | }, |
773 | .id_table = mxs_dma_ids, | 775 | .id_table = mxs_dma_ids, |
774 | }; | 776 | }; |
775 | 777 | ||
776 | static int __init mxs_dma_module_init(void) | 778 | static int __init mxs_dma_module_init(void) |
777 | { | 779 | { |
778 | return platform_driver_probe(&mxs_dma_driver, mxs_dma_probe); | 780 | return platform_driver_probe(&mxs_dma_driver, mxs_dma_probe); |
779 | } | 781 | } |
780 | subsys_initcall(mxs_dma_module_init); | 782 | subsys_initcall(mxs_dma_module_init); |
781 | 783 |