Commit 4092dc8f0bcc0963c045460157107d7461a094d7
Exists in
ti-lsk-linux-4.1.y
and in
10 other branches
Merge branch 'fixes' of git://git.infradead.org/users/vkoul/slave-dma
Pull slave-dmaengine fixes from Vinod Koul: "Two small fixes for omap dmaengine driver which fixes cyclic suspend and resume" * 'fixes' of git://git.infradead.org/users/vkoul/slave-dma: dmaengine: omap-dma: Restore the CLINK_CTRL in resume path dmaengine: omap-dma: Add memory barrier to dma_resume path
Showing 1 changed file Inline Diff
drivers/dma/omap-dma.c
1 | /* | 1 | /* |
2 | * OMAP DMAengine support | 2 | * OMAP DMAengine support |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License version 2 as | 5 | * it under the terms of the GNU General Public License version 2 as |
6 | * published by the Free Software Foundation. | 6 | * published by the Free Software Foundation. |
7 | */ | 7 | */ |
8 | #include <linux/delay.h> | 8 | #include <linux/delay.h> |
9 | #include <linux/dmaengine.h> | 9 | #include <linux/dmaengine.h> |
10 | #include <linux/dma-mapping.h> | 10 | #include <linux/dma-mapping.h> |
11 | #include <linux/err.h> | 11 | #include <linux/err.h> |
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/interrupt.h> | 13 | #include <linux/interrupt.h> |
14 | #include <linux/list.h> | 14 | #include <linux/list.h> |
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/omap-dma.h> | 16 | #include <linux/omap-dma.h> |
17 | #include <linux/platform_device.h> | 17 | #include <linux/platform_device.h> |
18 | #include <linux/slab.h> | 18 | #include <linux/slab.h> |
19 | #include <linux/spinlock.h> | 19 | #include <linux/spinlock.h> |
20 | #include <linux/of_dma.h> | 20 | #include <linux/of_dma.h> |
21 | #include <linux/of_device.h> | 21 | #include <linux/of_device.h> |
22 | 22 | ||
23 | #include "virt-dma.h" | 23 | #include "virt-dma.h" |
24 | 24 | ||
25 | struct omap_dmadev { | 25 | struct omap_dmadev { |
26 | struct dma_device ddev; | 26 | struct dma_device ddev; |
27 | spinlock_t lock; | 27 | spinlock_t lock; |
28 | struct tasklet_struct task; | 28 | struct tasklet_struct task; |
29 | struct list_head pending; | 29 | struct list_head pending; |
30 | void __iomem *base; | 30 | void __iomem *base; |
31 | const struct omap_dma_reg *reg_map; | 31 | const struct omap_dma_reg *reg_map; |
32 | struct omap_system_dma_plat_info *plat; | 32 | struct omap_system_dma_plat_info *plat; |
33 | bool legacy; | 33 | bool legacy; |
34 | spinlock_t irq_lock; | 34 | spinlock_t irq_lock; |
35 | uint32_t irq_enable_mask; | 35 | uint32_t irq_enable_mask; |
36 | struct omap_chan *lch_map[32]; | 36 | struct omap_chan *lch_map[32]; |
37 | }; | 37 | }; |
38 | 38 | ||
39 | struct omap_chan { | 39 | struct omap_chan { |
40 | struct virt_dma_chan vc; | 40 | struct virt_dma_chan vc; |
41 | struct list_head node; | 41 | struct list_head node; |
42 | void __iomem *channel_base; | 42 | void __iomem *channel_base; |
43 | const struct omap_dma_reg *reg_map; | 43 | const struct omap_dma_reg *reg_map; |
44 | uint32_t ccr; | 44 | uint32_t ccr; |
45 | 45 | ||
46 | struct dma_slave_config cfg; | 46 | struct dma_slave_config cfg; |
47 | unsigned dma_sig; | 47 | unsigned dma_sig; |
48 | bool cyclic; | 48 | bool cyclic; |
49 | bool paused; | 49 | bool paused; |
50 | 50 | ||
51 | int dma_ch; | 51 | int dma_ch; |
52 | struct omap_desc *desc; | 52 | struct omap_desc *desc; |
53 | unsigned sgidx; | 53 | unsigned sgidx; |
54 | }; | 54 | }; |
55 | 55 | ||
56 | struct omap_sg { | 56 | struct omap_sg { |
57 | dma_addr_t addr; | 57 | dma_addr_t addr; |
58 | uint32_t en; /* number of elements (24-bit) */ | 58 | uint32_t en; /* number of elements (24-bit) */ |
59 | uint32_t fn; /* number of frames (16-bit) */ | 59 | uint32_t fn; /* number of frames (16-bit) */ |
60 | }; | 60 | }; |
61 | 61 | ||
62 | struct omap_desc { | 62 | struct omap_desc { |
63 | struct virt_dma_desc vd; | 63 | struct virt_dma_desc vd; |
64 | enum dma_transfer_direction dir; | 64 | enum dma_transfer_direction dir; |
65 | dma_addr_t dev_addr; | 65 | dma_addr_t dev_addr; |
66 | 66 | ||
67 | int16_t fi; /* for OMAP_DMA_SYNC_PACKET */ | 67 | int16_t fi; /* for OMAP_DMA_SYNC_PACKET */ |
68 | uint8_t es; /* CSDP_DATA_TYPE_xxx */ | 68 | uint8_t es; /* CSDP_DATA_TYPE_xxx */ |
69 | uint32_t ccr; /* CCR value */ | 69 | uint32_t ccr; /* CCR value */ |
70 | uint16_t clnk_ctrl; /* CLNK_CTRL value */ | 70 | uint16_t clnk_ctrl; /* CLNK_CTRL value */ |
71 | uint16_t cicr; /* CICR value */ | 71 | uint16_t cicr; /* CICR value */ |
72 | uint32_t csdp; /* CSDP value */ | 72 | uint32_t csdp; /* CSDP value */ |
73 | 73 | ||
74 | unsigned sglen; | 74 | unsigned sglen; |
75 | struct omap_sg sg[0]; | 75 | struct omap_sg sg[0]; |
76 | }; | 76 | }; |
77 | 77 | ||
78 | enum { | 78 | enum { |
79 | CCR_FS = BIT(5), | 79 | CCR_FS = BIT(5), |
80 | CCR_READ_PRIORITY = BIT(6), | 80 | CCR_READ_PRIORITY = BIT(6), |
81 | CCR_ENABLE = BIT(7), | 81 | CCR_ENABLE = BIT(7), |
82 | CCR_AUTO_INIT = BIT(8), /* OMAP1 only */ | 82 | CCR_AUTO_INIT = BIT(8), /* OMAP1 only */ |
83 | CCR_REPEAT = BIT(9), /* OMAP1 only */ | 83 | CCR_REPEAT = BIT(9), /* OMAP1 only */ |
84 | CCR_OMAP31_DISABLE = BIT(10), /* OMAP1 only */ | 84 | CCR_OMAP31_DISABLE = BIT(10), /* OMAP1 only */ |
85 | CCR_SUSPEND_SENSITIVE = BIT(8), /* OMAP2+ only */ | 85 | CCR_SUSPEND_SENSITIVE = BIT(8), /* OMAP2+ only */ |
86 | CCR_RD_ACTIVE = BIT(9), /* OMAP2+ only */ | 86 | CCR_RD_ACTIVE = BIT(9), /* OMAP2+ only */ |
87 | CCR_WR_ACTIVE = BIT(10), /* OMAP2+ only */ | 87 | CCR_WR_ACTIVE = BIT(10), /* OMAP2+ only */ |
88 | CCR_SRC_AMODE_CONSTANT = 0 << 12, | 88 | CCR_SRC_AMODE_CONSTANT = 0 << 12, |
89 | CCR_SRC_AMODE_POSTINC = 1 << 12, | 89 | CCR_SRC_AMODE_POSTINC = 1 << 12, |
90 | CCR_SRC_AMODE_SGLIDX = 2 << 12, | 90 | CCR_SRC_AMODE_SGLIDX = 2 << 12, |
91 | CCR_SRC_AMODE_DBLIDX = 3 << 12, | 91 | CCR_SRC_AMODE_DBLIDX = 3 << 12, |
92 | CCR_DST_AMODE_CONSTANT = 0 << 14, | 92 | CCR_DST_AMODE_CONSTANT = 0 << 14, |
93 | CCR_DST_AMODE_POSTINC = 1 << 14, | 93 | CCR_DST_AMODE_POSTINC = 1 << 14, |
94 | CCR_DST_AMODE_SGLIDX = 2 << 14, | 94 | CCR_DST_AMODE_SGLIDX = 2 << 14, |
95 | CCR_DST_AMODE_DBLIDX = 3 << 14, | 95 | CCR_DST_AMODE_DBLIDX = 3 << 14, |
96 | CCR_CONSTANT_FILL = BIT(16), | 96 | CCR_CONSTANT_FILL = BIT(16), |
97 | CCR_TRANSPARENT_COPY = BIT(17), | 97 | CCR_TRANSPARENT_COPY = BIT(17), |
98 | CCR_BS = BIT(18), | 98 | CCR_BS = BIT(18), |
99 | CCR_SUPERVISOR = BIT(22), | 99 | CCR_SUPERVISOR = BIT(22), |
100 | CCR_PREFETCH = BIT(23), | 100 | CCR_PREFETCH = BIT(23), |
101 | CCR_TRIGGER_SRC = BIT(24), | 101 | CCR_TRIGGER_SRC = BIT(24), |
102 | CCR_BUFFERING_DISABLE = BIT(25), | 102 | CCR_BUFFERING_DISABLE = BIT(25), |
103 | CCR_WRITE_PRIORITY = BIT(26), | 103 | CCR_WRITE_PRIORITY = BIT(26), |
104 | CCR_SYNC_ELEMENT = 0, | 104 | CCR_SYNC_ELEMENT = 0, |
105 | CCR_SYNC_FRAME = CCR_FS, | 105 | CCR_SYNC_FRAME = CCR_FS, |
106 | CCR_SYNC_BLOCK = CCR_BS, | 106 | CCR_SYNC_BLOCK = CCR_BS, |
107 | CCR_SYNC_PACKET = CCR_BS | CCR_FS, | 107 | CCR_SYNC_PACKET = CCR_BS | CCR_FS, |
108 | 108 | ||
109 | CSDP_DATA_TYPE_8 = 0, | 109 | CSDP_DATA_TYPE_8 = 0, |
110 | CSDP_DATA_TYPE_16 = 1, | 110 | CSDP_DATA_TYPE_16 = 1, |
111 | CSDP_DATA_TYPE_32 = 2, | 111 | CSDP_DATA_TYPE_32 = 2, |
112 | CSDP_SRC_PORT_EMIFF = 0 << 2, /* OMAP1 only */ | 112 | CSDP_SRC_PORT_EMIFF = 0 << 2, /* OMAP1 only */ |
113 | CSDP_SRC_PORT_EMIFS = 1 << 2, /* OMAP1 only */ | 113 | CSDP_SRC_PORT_EMIFS = 1 << 2, /* OMAP1 only */ |
114 | CSDP_SRC_PORT_OCP_T1 = 2 << 2, /* OMAP1 only */ | 114 | CSDP_SRC_PORT_OCP_T1 = 2 << 2, /* OMAP1 only */ |
115 | CSDP_SRC_PORT_TIPB = 3 << 2, /* OMAP1 only */ | 115 | CSDP_SRC_PORT_TIPB = 3 << 2, /* OMAP1 only */ |
116 | CSDP_SRC_PORT_OCP_T2 = 4 << 2, /* OMAP1 only */ | 116 | CSDP_SRC_PORT_OCP_T2 = 4 << 2, /* OMAP1 only */ |
117 | CSDP_SRC_PORT_MPUI = 5 << 2, /* OMAP1 only */ | 117 | CSDP_SRC_PORT_MPUI = 5 << 2, /* OMAP1 only */ |
118 | CSDP_SRC_PACKED = BIT(6), | 118 | CSDP_SRC_PACKED = BIT(6), |
119 | CSDP_SRC_BURST_1 = 0 << 7, | 119 | CSDP_SRC_BURST_1 = 0 << 7, |
120 | CSDP_SRC_BURST_16 = 1 << 7, | 120 | CSDP_SRC_BURST_16 = 1 << 7, |
121 | CSDP_SRC_BURST_32 = 2 << 7, | 121 | CSDP_SRC_BURST_32 = 2 << 7, |
122 | CSDP_SRC_BURST_64 = 3 << 7, | 122 | CSDP_SRC_BURST_64 = 3 << 7, |
123 | CSDP_DST_PORT_EMIFF = 0 << 9, /* OMAP1 only */ | 123 | CSDP_DST_PORT_EMIFF = 0 << 9, /* OMAP1 only */ |
124 | CSDP_DST_PORT_EMIFS = 1 << 9, /* OMAP1 only */ | 124 | CSDP_DST_PORT_EMIFS = 1 << 9, /* OMAP1 only */ |
125 | CSDP_DST_PORT_OCP_T1 = 2 << 9, /* OMAP1 only */ | 125 | CSDP_DST_PORT_OCP_T1 = 2 << 9, /* OMAP1 only */ |
126 | CSDP_DST_PORT_TIPB = 3 << 9, /* OMAP1 only */ | 126 | CSDP_DST_PORT_TIPB = 3 << 9, /* OMAP1 only */ |
127 | CSDP_DST_PORT_OCP_T2 = 4 << 9, /* OMAP1 only */ | 127 | CSDP_DST_PORT_OCP_T2 = 4 << 9, /* OMAP1 only */ |
128 | CSDP_DST_PORT_MPUI = 5 << 9, /* OMAP1 only */ | 128 | CSDP_DST_PORT_MPUI = 5 << 9, /* OMAP1 only */ |
129 | CSDP_DST_PACKED = BIT(13), | 129 | CSDP_DST_PACKED = BIT(13), |
130 | CSDP_DST_BURST_1 = 0 << 14, | 130 | CSDP_DST_BURST_1 = 0 << 14, |
131 | CSDP_DST_BURST_16 = 1 << 14, | 131 | CSDP_DST_BURST_16 = 1 << 14, |
132 | CSDP_DST_BURST_32 = 2 << 14, | 132 | CSDP_DST_BURST_32 = 2 << 14, |
133 | CSDP_DST_BURST_64 = 3 << 14, | 133 | CSDP_DST_BURST_64 = 3 << 14, |
134 | 134 | ||
135 | CICR_TOUT_IE = BIT(0), /* OMAP1 only */ | 135 | CICR_TOUT_IE = BIT(0), /* OMAP1 only */ |
136 | CICR_DROP_IE = BIT(1), | 136 | CICR_DROP_IE = BIT(1), |
137 | CICR_HALF_IE = BIT(2), | 137 | CICR_HALF_IE = BIT(2), |
138 | CICR_FRAME_IE = BIT(3), | 138 | CICR_FRAME_IE = BIT(3), |
139 | CICR_LAST_IE = BIT(4), | 139 | CICR_LAST_IE = BIT(4), |
140 | CICR_BLOCK_IE = BIT(5), | 140 | CICR_BLOCK_IE = BIT(5), |
141 | CICR_PKT_IE = BIT(7), /* OMAP2+ only */ | 141 | CICR_PKT_IE = BIT(7), /* OMAP2+ only */ |
142 | CICR_TRANS_ERR_IE = BIT(8), /* OMAP2+ only */ | 142 | CICR_TRANS_ERR_IE = BIT(8), /* OMAP2+ only */ |
143 | CICR_SUPERVISOR_ERR_IE = BIT(10), /* OMAP2+ only */ | 143 | CICR_SUPERVISOR_ERR_IE = BIT(10), /* OMAP2+ only */ |
144 | CICR_MISALIGNED_ERR_IE = BIT(11), /* OMAP2+ only */ | 144 | CICR_MISALIGNED_ERR_IE = BIT(11), /* OMAP2+ only */ |
145 | CICR_DRAIN_IE = BIT(12), /* OMAP2+ only */ | 145 | CICR_DRAIN_IE = BIT(12), /* OMAP2+ only */ |
146 | CICR_SUPER_BLOCK_IE = BIT(14), /* OMAP2+ only */ | 146 | CICR_SUPER_BLOCK_IE = BIT(14), /* OMAP2+ only */ |
147 | 147 | ||
148 | CLNK_CTRL_ENABLE_LNK = BIT(15), | 148 | CLNK_CTRL_ENABLE_LNK = BIT(15), |
149 | }; | 149 | }; |
150 | 150 | ||
151 | static const unsigned es_bytes[] = { | 151 | static const unsigned es_bytes[] = { |
152 | [CSDP_DATA_TYPE_8] = 1, | 152 | [CSDP_DATA_TYPE_8] = 1, |
153 | [CSDP_DATA_TYPE_16] = 2, | 153 | [CSDP_DATA_TYPE_16] = 2, |
154 | [CSDP_DATA_TYPE_32] = 4, | 154 | [CSDP_DATA_TYPE_32] = 4, |
155 | }; | 155 | }; |
156 | 156 | ||
157 | static struct of_dma_filter_info omap_dma_info = { | 157 | static struct of_dma_filter_info omap_dma_info = { |
158 | .filter_fn = omap_dma_filter_fn, | 158 | .filter_fn = omap_dma_filter_fn, |
159 | }; | 159 | }; |
160 | 160 | ||
161 | static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d) | 161 | static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d) |
162 | { | 162 | { |
163 | return container_of(d, struct omap_dmadev, ddev); | 163 | return container_of(d, struct omap_dmadev, ddev); |
164 | } | 164 | } |
165 | 165 | ||
166 | static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c) | 166 | static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c) |
167 | { | 167 | { |
168 | return container_of(c, struct omap_chan, vc.chan); | 168 | return container_of(c, struct omap_chan, vc.chan); |
169 | } | 169 | } |
170 | 170 | ||
171 | static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t) | 171 | static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t) |
172 | { | 172 | { |
173 | return container_of(t, struct omap_desc, vd.tx); | 173 | return container_of(t, struct omap_desc, vd.tx); |
174 | } | 174 | } |
175 | 175 | ||
176 | static void omap_dma_desc_free(struct virt_dma_desc *vd) | 176 | static void omap_dma_desc_free(struct virt_dma_desc *vd) |
177 | { | 177 | { |
178 | kfree(container_of(vd, struct omap_desc, vd)); | 178 | kfree(container_of(vd, struct omap_desc, vd)); |
179 | } | 179 | } |
180 | 180 | ||
181 | static void omap_dma_write(uint32_t val, unsigned type, void __iomem *addr) | 181 | static void omap_dma_write(uint32_t val, unsigned type, void __iomem *addr) |
182 | { | 182 | { |
183 | switch (type) { | 183 | switch (type) { |
184 | case OMAP_DMA_REG_16BIT: | 184 | case OMAP_DMA_REG_16BIT: |
185 | writew_relaxed(val, addr); | 185 | writew_relaxed(val, addr); |
186 | break; | 186 | break; |
187 | case OMAP_DMA_REG_2X16BIT: | 187 | case OMAP_DMA_REG_2X16BIT: |
188 | writew_relaxed(val, addr); | 188 | writew_relaxed(val, addr); |
189 | writew_relaxed(val >> 16, addr + 2); | 189 | writew_relaxed(val >> 16, addr + 2); |
190 | break; | 190 | break; |
191 | case OMAP_DMA_REG_32BIT: | 191 | case OMAP_DMA_REG_32BIT: |
192 | writel_relaxed(val, addr); | 192 | writel_relaxed(val, addr); |
193 | break; | 193 | break; |
194 | default: | 194 | default: |
195 | WARN_ON(1); | 195 | WARN_ON(1); |
196 | } | 196 | } |
197 | } | 197 | } |
198 | 198 | ||
199 | static unsigned omap_dma_read(unsigned type, void __iomem *addr) | 199 | static unsigned omap_dma_read(unsigned type, void __iomem *addr) |
200 | { | 200 | { |
201 | unsigned val; | 201 | unsigned val; |
202 | 202 | ||
203 | switch (type) { | 203 | switch (type) { |
204 | case OMAP_DMA_REG_16BIT: | 204 | case OMAP_DMA_REG_16BIT: |
205 | val = readw_relaxed(addr); | 205 | val = readw_relaxed(addr); |
206 | break; | 206 | break; |
207 | case OMAP_DMA_REG_2X16BIT: | 207 | case OMAP_DMA_REG_2X16BIT: |
208 | val = readw_relaxed(addr); | 208 | val = readw_relaxed(addr); |
209 | val |= readw_relaxed(addr + 2) << 16; | 209 | val |= readw_relaxed(addr + 2) << 16; |
210 | break; | 210 | break; |
211 | case OMAP_DMA_REG_32BIT: | 211 | case OMAP_DMA_REG_32BIT: |
212 | val = readl_relaxed(addr); | 212 | val = readl_relaxed(addr); |
213 | break; | 213 | break; |
214 | default: | 214 | default: |
215 | WARN_ON(1); | 215 | WARN_ON(1); |
216 | val = 0; | 216 | val = 0; |
217 | } | 217 | } |
218 | 218 | ||
219 | return val; | 219 | return val; |
220 | } | 220 | } |
221 | 221 | ||
222 | static void omap_dma_glbl_write(struct omap_dmadev *od, unsigned reg, unsigned val) | 222 | static void omap_dma_glbl_write(struct omap_dmadev *od, unsigned reg, unsigned val) |
223 | { | 223 | { |
224 | const struct omap_dma_reg *r = od->reg_map + reg; | 224 | const struct omap_dma_reg *r = od->reg_map + reg; |
225 | 225 | ||
226 | WARN_ON(r->stride); | 226 | WARN_ON(r->stride); |
227 | 227 | ||
228 | omap_dma_write(val, r->type, od->base + r->offset); | 228 | omap_dma_write(val, r->type, od->base + r->offset); |
229 | } | 229 | } |
230 | 230 | ||
231 | static unsigned omap_dma_glbl_read(struct omap_dmadev *od, unsigned reg) | 231 | static unsigned omap_dma_glbl_read(struct omap_dmadev *od, unsigned reg) |
232 | { | 232 | { |
233 | const struct omap_dma_reg *r = od->reg_map + reg; | 233 | const struct omap_dma_reg *r = od->reg_map + reg; |
234 | 234 | ||
235 | WARN_ON(r->stride); | 235 | WARN_ON(r->stride); |
236 | 236 | ||
237 | return omap_dma_read(r->type, od->base + r->offset); | 237 | return omap_dma_read(r->type, od->base + r->offset); |
238 | } | 238 | } |
239 | 239 | ||
240 | static void omap_dma_chan_write(struct omap_chan *c, unsigned reg, unsigned val) | 240 | static void omap_dma_chan_write(struct omap_chan *c, unsigned reg, unsigned val) |
241 | { | 241 | { |
242 | const struct omap_dma_reg *r = c->reg_map + reg; | 242 | const struct omap_dma_reg *r = c->reg_map + reg; |
243 | 243 | ||
244 | omap_dma_write(val, r->type, c->channel_base + r->offset); | 244 | omap_dma_write(val, r->type, c->channel_base + r->offset); |
245 | } | 245 | } |
246 | 246 | ||
247 | static unsigned omap_dma_chan_read(struct omap_chan *c, unsigned reg) | 247 | static unsigned omap_dma_chan_read(struct omap_chan *c, unsigned reg) |
248 | { | 248 | { |
249 | const struct omap_dma_reg *r = c->reg_map + reg; | 249 | const struct omap_dma_reg *r = c->reg_map + reg; |
250 | 250 | ||
251 | return omap_dma_read(r->type, c->channel_base + r->offset); | 251 | return omap_dma_read(r->type, c->channel_base + r->offset); |
252 | } | 252 | } |
253 | 253 | ||
254 | static void omap_dma_clear_csr(struct omap_chan *c) | 254 | static void omap_dma_clear_csr(struct omap_chan *c) |
255 | { | 255 | { |
256 | if (dma_omap1()) | 256 | if (dma_omap1()) |
257 | omap_dma_chan_read(c, CSR); | 257 | omap_dma_chan_read(c, CSR); |
258 | else | 258 | else |
259 | omap_dma_chan_write(c, CSR, ~0); | 259 | omap_dma_chan_write(c, CSR, ~0); |
260 | } | 260 | } |
261 | 261 | ||
262 | static unsigned omap_dma_get_csr(struct omap_chan *c) | 262 | static unsigned omap_dma_get_csr(struct omap_chan *c) |
263 | { | 263 | { |
264 | unsigned val = omap_dma_chan_read(c, CSR); | 264 | unsigned val = omap_dma_chan_read(c, CSR); |
265 | 265 | ||
266 | if (!dma_omap1()) | 266 | if (!dma_omap1()) |
267 | omap_dma_chan_write(c, CSR, val); | 267 | omap_dma_chan_write(c, CSR, val); |
268 | 268 | ||
269 | return val; | 269 | return val; |
270 | } | 270 | } |
271 | 271 | ||
272 | static void omap_dma_assign(struct omap_dmadev *od, struct omap_chan *c, | 272 | static void omap_dma_assign(struct omap_dmadev *od, struct omap_chan *c, |
273 | unsigned lch) | 273 | unsigned lch) |
274 | { | 274 | { |
275 | c->channel_base = od->base + od->plat->channel_stride * lch; | 275 | c->channel_base = od->base + od->plat->channel_stride * lch; |
276 | 276 | ||
277 | od->lch_map[lch] = c; | 277 | od->lch_map[lch] = c; |
278 | } | 278 | } |
279 | 279 | ||
280 | static void omap_dma_start(struct omap_chan *c, struct omap_desc *d) | 280 | static void omap_dma_start(struct omap_chan *c, struct omap_desc *d) |
281 | { | 281 | { |
282 | struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); | 282 | struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); |
283 | 283 | ||
284 | if (__dma_omap15xx(od->plat->dma_attr)) | 284 | if (__dma_omap15xx(od->plat->dma_attr)) |
285 | omap_dma_chan_write(c, CPC, 0); | 285 | omap_dma_chan_write(c, CPC, 0); |
286 | else | 286 | else |
287 | omap_dma_chan_write(c, CDAC, 0); | 287 | omap_dma_chan_write(c, CDAC, 0); |
288 | 288 | ||
289 | omap_dma_clear_csr(c); | 289 | omap_dma_clear_csr(c); |
290 | 290 | ||
291 | /* Enable interrupts */ | 291 | /* Enable interrupts */ |
292 | omap_dma_chan_write(c, CICR, d->cicr); | 292 | omap_dma_chan_write(c, CICR, d->cicr); |
293 | 293 | ||
294 | /* Enable channel */ | 294 | /* Enable channel */ |
295 | omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE); | 295 | omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE); |
296 | } | 296 | } |
297 | 297 | ||
298 | static void omap_dma_stop(struct omap_chan *c) | 298 | static void omap_dma_stop(struct omap_chan *c) |
299 | { | 299 | { |
300 | struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); | 300 | struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); |
301 | uint32_t val; | 301 | uint32_t val; |
302 | 302 | ||
303 | /* disable irq */ | 303 | /* disable irq */ |
304 | omap_dma_chan_write(c, CICR, 0); | 304 | omap_dma_chan_write(c, CICR, 0); |
305 | 305 | ||
306 | omap_dma_clear_csr(c); | 306 | omap_dma_clear_csr(c); |
307 | 307 | ||
308 | val = omap_dma_chan_read(c, CCR); | 308 | val = omap_dma_chan_read(c, CCR); |
309 | if (od->plat->errata & DMA_ERRATA_i541 && val & CCR_TRIGGER_SRC) { | 309 | if (od->plat->errata & DMA_ERRATA_i541 && val & CCR_TRIGGER_SRC) { |
310 | uint32_t sysconfig; | 310 | uint32_t sysconfig; |
311 | unsigned i; | 311 | unsigned i; |
312 | 312 | ||
313 | sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG); | 313 | sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG); |
314 | val = sysconfig & ~DMA_SYSCONFIG_MIDLEMODE_MASK; | 314 | val = sysconfig & ~DMA_SYSCONFIG_MIDLEMODE_MASK; |
315 | val |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE); | 315 | val |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE); |
316 | omap_dma_glbl_write(od, OCP_SYSCONFIG, val); | 316 | omap_dma_glbl_write(od, OCP_SYSCONFIG, val); |
317 | 317 | ||
318 | val = omap_dma_chan_read(c, CCR); | 318 | val = omap_dma_chan_read(c, CCR); |
319 | val &= ~CCR_ENABLE; | 319 | val &= ~CCR_ENABLE; |
320 | omap_dma_chan_write(c, CCR, val); | 320 | omap_dma_chan_write(c, CCR, val); |
321 | 321 | ||
322 | /* Wait for sDMA FIFO to drain */ | 322 | /* Wait for sDMA FIFO to drain */ |
323 | for (i = 0; ; i++) { | 323 | for (i = 0; ; i++) { |
324 | val = omap_dma_chan_read(c, CCR); | 324 | val = omap_dma_chan_read(c, CCR); |
325 | if (!(val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE))) | 325 | if (!(val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE))) |
326 | break; | 326 | break; |
327 | 327 | ||
328 | if (i > 100) | 328 | if (i > 100) |
329 | break; | 329 | break; |
330 | 330 | ||
331 | udelay(5); | 331 | udelay(5); |
332 | } | 332 | } |
333 | 333 | ||
334 | if (val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE)) | 334 | if (val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE)) |
335 | dev_err(c->vc.chan.device->dev, | 335 | dev_err(c->vc.chan.device->dev, |
336 | "DMA drain did not complete on lch %d\n", | 336 | "DMA drain did not complete on lch %d\n", |
337 | c->dma_ch); | 337 | c->dma_ch); |
338 | 338 | ||
339 | omap_dma_glbl_write(od, OCP_SYSCONFIG, sysconfig); | 339 | omap_dma_glbl_write(od, OCP_SYSCONFIG, sysconfig); |
340 | } else { | 340 | } else { |
341 | val &= ~CCR_ENABLE; | 341 | val &= ~CCR_ENABLE; |
342 | omap_dma_chan_write(c, CCR, val); | 342 | omap_dma_chan_write(c, CCR, val); |
343 | } | 343 | } |
344 | 344 | ||
345 | mb(); | 345 | mb(); |
346 | 346 | ||
347 | if (!__dma_omap15xx(od->plat->dma_attr) && c->cyclic) { | 347 | if (!__dma_omap15xx(od->plat->dma_attr) && c->cyclic) { |
348 | val = omap_dma_chan_read(c, CLNK_CTRL); | 348 | val = omap_dma_chan_read(c, CLNK_CTRL); |
349 | 349 | ||
350 | if (dma_omap1()) | 350 | if (dma_omap1()) |
351 | val |= 1 << 14; /* set the STOP_LNK bit */ | 351 | val |= 1 << 14; /* set the STOP_LNK bit */ |
352 | else | 352 | else |
353 | val &= ~CLNK_CTRL_ENABLE_LNK; | 353 | val &= ~CLNK_CTRL_ENABLE_LNK; |
354 | 354 | ||
355 | omap_dma_chan_write(c, CLNK_CTRL, val); | 355 | omap_dma_chan_write(c, CLNK_CTRL, val); |
356 | } | 356 | } |
357 | } | 357 | } |
358 | 358 | ||
359 | static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d, | 359 | static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d, |
360 | unsigned idx) | 360 | unsigned idx) |
361 | { | 361 | { |
362 | struct omap_sg *sg = d->sg + idx; | 362 | struct omap_sg *sg = d->sg + idx; |
363 | unsigned cxsa, cxei, cxfi; | 363 | unsigned cxsa, cxei, cxfi; |
364 | 364 | ||
365 | if (d->dir == DMA_DEV_TO_MEM) { | 365 | if (d->dir == DMA_DEV_TO_MEM) { |
366 | cxsa = CDSA; | 366 | cxsa = CDSA; |
367 | cxei = CDEI; | 367 | cxei = CDEI; |
368 | cxfi = CDFI; | 368 | cxfi = CDFI; |
369 | } else { | 369 | } else { |
370 | cxsa = CSSA; | 370 | cxsa = CSSA; |
371 | cxei = CSEI; | 371 | cxei = CSEI; |
372 | cxfi = CSFI; | 372 | cxfi = CSFI; |
373 | } | 373 | } |
374 | 374 | ||
375 | omap_dma_chan_write(c, cxsa, sg->addr); | 375 | omap_dma_chan_write(c, cxsa, sg->addr); |
376 | omap_dma_chan_write(c, cxei, 0); | 376 | omap_dma_chan_write(c, cxei, 0); |
377 | omap_dma_chan_write(c, cxfi, 0); | 377 | omap_dma_chan_write(c, cxfi, 0); |
378 | omap_dma_chan_write(c, CEN, sg->en); | 378 | omap_dma_chan_write(c, CEN, sg->en); |
379 | omap_dma_chan_write(c, CFN, sg->fn); | 379 | omap_dma_chan_write(c, CFN, sg->fn); |
380 | 380 | ||
381 | omap_dma_start(c, d); | 381 | omap_dma_start(c, d); |
382 | } | 382 | } |
383 | 383 | ||
384 | static void omap_dma_start_desc(struct omap_chan *c) | 384 | static void omap_dma_start_desc(struct omap_chan *c) |
385 | { | 385 | { |
386 | struct virt_dma_desc *vd = vchan_next_desc(&c->vc); | 386 | struct virt_dma_desc *vd = vchan_next_desc(&c->vc); |
387 | struct omap_desc *d; | 387 | struct omap_desc *d; |
388 | unsigned cxsa, cxei, cxfi; | 388 | unsigned cxsa, cxei, cxfi; |
389 | 389 | ||
390 | if (!vd) { | 390 | if (!vd) { |
391 | c->desc = NULL; | 391 | c->desc = NULL; |
392 | return; | 392 | return; |
393 | } | 393 | } |
394 | 394 | ||
395 | list_del(&vd->node); | 395 | list_del(&vd->node); |
396 | 396 | ||
397 | c->desc = d = to_omap_dma_desc(&vd->tx); | 397 | c->desc = d = to_omap_dma_desc(&vd->tx); |
398 | c->sgidx = 0; | 398 | c->sgidx = 0; |
399 | 399 | ||
400 | /* | 400 | /* |
401 | * This provides the necessary barrier to ensure data held in | 401 | * This provides the necessary barrier to ensure data held in |
402 | * DMA coherent memory is visible to the DMA engine prior to | 402 | * DMA coherent memory is visible to the DMA engine prior to |
403 | * the transfer starting. | 403 | * the transfer starting. |
404 | */ | 404 | */ |
405 | mb(); | 405 | mb(); |
406 | 406 | ||
407 | omap_dma_chan_write(c, CCR, d->ccr); | 407 | omap_dma_chan_write(c, CCR, d->ccr); |
408 | if (dma_omap1()) | 408 | if (dma_omap1()) |
409 | omap_dma_chan_write(c, CCR2, d->ccr >> 16); | 409 | omap_dma_chan_write(c, CCR2, d->ccr >> 16); |
410 | 410 | ||
411 | if (d->dir == DMA_DEV_TO_MEM) { | 411 | if (d->dir == DMA_DEV_TO_MEM) { |
412 | cxsa = CSSA; | 412 | cxsa = CSSA; |
413 | cxei = CSEI; | 413 | cxei = CSEI; |
414 | cxfi = CSFI; | 414 | cxfi = CSFI; |
415 | } else { | 415 | } else { |
416 | cxsa = CDSA; | 416 | cxsa = CDSA; |
417 | cxei = CDEI; | 417 | cxei = CDEI; |
418 | cxfi = CDFI; | 418 | cxfi = CDFI; |
419 | } | 419 | } |
420 | 420 | ||
421 | omap_dma_chan_write(c, cxsa, d->dev_addr); | 421 | omap_dma_chan_write(c, cxsa, d->dev_addr); |
422 | omap_dma_chan_write(c, cxei, 0); | 422 | omap_dma_chan_write(c, cxei, 0); |
423 | omap_dma_chan_write(c, cxfi, d->fi); | 423 | omap_dma_chan_write(c, cxfi, d->fi); |
424 | omap_dma_chan_write(c, CSDP, d->csdp); | 424 | omap_dma_chan_write(c, CSDP, d->csdp); |
425 | omap_dma_chan_write(c, CLNK_CTRL, d->clnk_ctrl); | 425 | omap_dma_chan_write(c, CLNK_CTRL, d->clnk_ctrl); |
426 | 426 | ||
427 | omap_dma_start_sg(c, d, 0); | 427 | omap_dma_start_sg(c, d, 0); |
428 | } | 428 | } |
429 | 429 | ||
430 | static void omap_dma_callback(int ch, u16 status, void *data) | 430 | static void omap_dma_callback(int ch, u16 status, void *data) |
431 | { | 431 | { |
432 | struct omap_chan *c = data; | 432 | struct omap_chan *c = data; |
433 | struct omap_desc *d; | 433 | struct omap_desc *d; |
434 | unsigned long flags; | 434 | unsigned long flags; |
435 | 435 | ||
436 | spin_lock_irqsave(&c->vc.lock, flags); | 436 | spin_lock_irqsave(&c->vc.lock, flags); |
437 | d = c->desc; | 437 | d = c->desc; |
438 | if (d) { | 438 | if (d) { |
439 | if (!c->cyclic) { | 439 | if (!c->cyclic) { |
440 | if (++c->sgidx < d->sglen) { | 440 | if (++c->sgidx < d->sglen) { |
441 | omap_dma_start_sg(c, d, c->sgidx); | 441 | omap_dma_start_sg(c, d, c->sgidx); |
442 | } else { | 442 | } else { |
443 | omap_dma_start_desc(c); | 443 | omap_dma_start_desc(c); |
444 | vchan_cookie_complete(&d->vd); | 444 | vchan_cookie_complete(&d->vd); |
445 | } | 445 | } |
446 | } else { | 446 | } else { |
447 | vchan_cyclic_callback(&d->vd); | 447 | vchan_cyclic_callback(&d->vd); |
448 | } | 448 | } |
449 | } | 449 | } |
450 | spin_unlock_irqrestore(&c->vc.lock, flags); | 450 | spin_unlock_irqrestore(&c->vc.lock, flags); |
451 | } | 451 | } |
452 | 452 | ||
453 | /* | 453 | /* |
454 | * This callback schedules all pending channels. We could be more | 454 | * This callback schedules all pending channels. We could be more |
455 | * clever here by postponing allocation of the real DMA channels to | 455 | * clever here by postponing allocation of the real DMA channels to |
456 | * this point, and freeing them when our virtual channel becomes idle. | 456 | * this point, and freeing them when our virtual channel becomes idle. |
457 | * | 457 | * |
458 | * We would then need to deal with 'all channels in-use' | 458 | * We would then need to deal with 'all channels in-use' |
459 | */ | 459 | */ |
460 | static void omap_dma_sched(unsigned long data) | 460 | static void omap_dma_sched(unsigned long data) |
461 | { | 461 | { |
462 | struct omap_dmadev *d = (struct omap_dmadev *)data; | 462 | struct omap_dmadev *d = (struct omap_dmadev *)data; |
463 | LIST_HEAD(head); | 463 | LIST_HEAD(head); |
464 | 464 | ||
465 | spin_lock_irq(&d->lock); | 465 | spin_lock_irq(&d->lock); |
466 | list_splice_tail_init(&d->pending, &head); | 466 | list_splice_tail_init(&d->pending, &head); |
467 | spin_unlock_irq(&d->lock); | 467 | spin_unlock_irq(&d->lock); |
468 | 468 | ||
469 | while (!list_empty(&head)) { | 469 | while (!list_empty(&head)) { |
470 | struct omap_chan *c = list_first_entry(&head, | 470 | struct omap_chan *c = list_first_entry(&head, |
471 | struct omap_chan, node); | 471 | struct omap_chan, node); |
472 | 472 | ||
473 | spin_lock_irq(&c->vc.lock); | 473 | spin_lock_irq(&c->vc.lock); |
474 | list_del_init(&c->node); | 474 | list_del_init(&c->node); |
475 | omap_dma_start_desc(c); | 475 | omap_dma_start_desc(c); |
476 | spin_unlock_irq(&c->vc.lock); | 476 | spin_unlock_irq(&c->vc.lock); |
477 | } | 477 | } |
478 | } | 478 | } |
479 | 479 | ||
480 | static irqreturn_t omap_dma_irq(int irq, void *devid) | 480 | static irqreturn_t omap_dma_irq(int irq, void *devid) |
481 | { | 481 | { |
482 | struct omap_dmadev *od = devid; | 482 | struct omap_dmadev *od = devid; |
483 | unsigned status, channel; | 483 | unsigned status, channel; |
484 | 484 | ||
485 | spin_lock(&od->irq_lock); | 485 | spin_lock(&od->irq_lock); |
486 | 486 | ||
487 | status = omap_dma_glbl_read(od, IRQSTATUS_L1); | 487 | status = omap_dma_glbl_read(od, IRQSTATUS_L1); |
488 | status &= od->irq_enable_mask; | 488 | status &= od->irq_enable_mask; |
489 | if (status == 0) { | 489 | if (status == 0) { |
490 | spin_unlock(&od->irq_lock); | 490 | spin_unlock(&od->irq_lock); |
491 | return IRQ_NONE; | 491 | return IRQ_NONE; |
492 | } | 492 | } |
493 | 493 | ||
494 | while ((channel = ffs(status)) != 0) { | 494 | while ((channel = ffs(status)) != 0) { |
495 | unsigned mask, csr; | 495 | unsigned mask, csr; |
496 | struct omap_chan *c; | 496 | struct omap_chan *c; |
497 | 497 | ||
498 | channel -= 1; | 498 | channel -= 1; |
499 | mask = BIT(channel); | 499 | mask = BIT(channel); |
500 | status &= ~mask; | 500 | status &= ~mask; |
501 | 501 | ||
502 | c = od->lch_map[channel]; | 502 | c = od->lch_map[channel]; |
503 | if (c == NULL) { | 503 | if (c == NULL) { |
504 | /* This should never happen */ | 504 | /* This should never happen */ |
505 | dev_err(od->ddev.dev, "invalid channel %u\n", channel); | 505 | dev_err(od->ddev.dev, "invalid channel %u\n", channel); |
506 | continue; | 506 | continue; |
507 | } | 507 | } |
508 | 508 | ||
509 | csr = omap_dma_get_csr(c); | 509 | csr = omap_dma_get_csr(c); |
510 | omap_dma_glbl_write(od, IRQSTATUS_L1, mask); | 510 | omap_dma_glbl_write(od, IRQSTATUS_L1, mask); |
511 | 511 | ||
512 | omap_dma_callback(channel, csr, c); | 512 | omap_dma_callback(channel, csr, c); |
513 | } | 513 | } |
514 | 514 | ||
515 | spin_unlock(&od->irq_lock); | 515 | spin_unlock(&od->irq_lock); |
516 | 516 | ||
517 | return IRQ_HANDLED; | 517 | return IRQ_HANDLED; |
518 | } | 518 | } |
519 | 519 | ||
520 | static int omap_dma_alloc_chan_resources(struct dma_chan *chan) | 520 | static int omap_dma_alloc_chan_resources(struct dma_chan *chan) |
521 | { | 521 | { |
522 | struct omap_dmadev *od = to_omap_dma_dev(chan->device); | 522 | struct omap_dmadev *od = to_omap_dma_dev(chan->device); |
523 | struct omap_chan *c = to_omap_dma_chan(chan); | 523 | struct omap_chan *c = to_omap_dma_chan(chan); |
524 | int ret; | 524 | int ret; |
525 | 525 | ||
526 | if (od->legacy) { | 526 | if (od->legacy) { |
527 | ret = omap_request_dma(c->dma_sig, "DMA engine", | 527 | ret = omap_request_dma(c->dma_sig, "DMA engine", |
528 | omap_dma_callback, c, &c->dma_ch); | 528 | omap_dma_callback, c, &c->dma_ch); |
529 | } else { | 529 | } else { |
530 | ret = omap_request_dma(c->dma_sig, "DMA engine", NULL, NULL, | 530 | ret = omap_request_dma(c->dma_sig, "DMA engine", NULL, NULL, |
531 | &c->dma_ch); | 531 | &c->dma_ch); |
532 | } | 532 | } |
533 | 533 | ||
534 | dev_dbg(od->ddev.dev, "allocating channel %u for %u\n", | 534 | dev_dbg(od->ddev.dev, "allocating channel %u for %u\n", |
535 | c->dma_ch, c->dma_sig); | 535 | c->dma_ch, c->dma_sig); |
536 | 536 | ||
537 | if (ret >= 0) { | 537 | if (ret >= 0) { |
538 | omap_dma_assign(od, c, c->dma_ch); | 538 | omap_dma_assign(od, c, c->dma_ch); |
539 | 539 | ||
540 | if (!od->legacy) { | 540 | if (!od->legacy) { |
541 | unsigned val; | 541 | unsigned val; |
542 | 542 | ||
543 | spin_lock_irq(&od->irq_lock); | 543 | spin_lock_irq(&od->irq_lock); |
544 | val = BIT(c->dma_ch); | 544 | val = BIT(c->dma_ch); |
545 | omap_dma_glbl_write(od, IRQSTATUS_L1, val); | 545 | omap_dma_glbl_write(od, IRQSTATUS_L1, val); |
546 | od->irq_enable_mask |= val; | 546 | od->irq_enable_mask |= val; |
547 | omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask); | 547 | omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask); |
548 | 548 | ||
549 | val = omap_dma_glbl_read(od, IRQENABLE_L0); | 549 | val = omap_dma_glbl_read(od, IRQENABLE_L0); |
550 | val &= ~BIT(c->dma_ch); | 550 | val &= ~BIT(c->dma_ch); |
551 | omap_dma_glbl_write(od, IRQENABLE_L0, val); | 551 | omap_dma_glbl_write(od, IRQENABLE_L0, val); |
552 | spin_unlock_irq(&od->irq_lock); | 552 | spin_unlock_irq(&od->irq_lock); |
553 | } | 553 | } |
554 | } | 554 | } |
555 | 555 | ||
556 | if (dma_omap1()) { | 556 | if (dma_omap1()) { |
557 | if (__dma_omap16xx(od->plat->dma_attr)) { | 557 | if (__dma_omap16xx(od->plat->dma_attr)) { |
558 | c->ccr = CCR_OMAP31_DISABLE; | 558 | c->ccr = CCR_OMAP31_DISABLE; |
559 | /* Duplicate what plat-omap/dma.c does */ | 559 | /* Duplicate what plat-omap/dma.c does */ |
560 | c->ccr |= c->dma_ch + 1; | 560 | c->ccr |= c->dma_ch + 1; |
561 | } else { | 561 | } else { |
562 | c->ccr = c->dma_sig & 0x1f; | 562 | c->ccr = c->dma_sig & 0x1f; |
563 | } | 563 | } |
564 | } else { | 564 | } else { |
565 | c->ccr = c->dma_sig & 0x1f; | 565 | c->ccr = c->dma_sig & 0x1f; |
566 | c->ccr |= (c->dma_sig & ~0x1f) << 14; | 566 | c->ccr |= (c->dma_sig & ~0x1f) << 14; |
567 | } | 567 | } |
568 | if (od->plat->errata & DMA_ERRATA_IFRAME_BUFFERING) | 568 | if (od->plat->errata & DMA_ERRATA_IFRAME_BUFFERING) |
569 | c->ccr |= CCR_BUFFERING_DISABLE; | 569 | c->ccr |= CCR_BUFFERING_DISABLE; |
570 | 570 | ||
571 | return ret; | 571 | return ret; |
572 | } | 572 | } |
573 | 573 | ||
574 | static void omap_dma_free_chan_resources(struct dma_chan *chan) | 574 | static void omap_dma_free_chan_resources(struct dma_chan *chan) |
575 | { | 575 | { |
576 | struct omap_dmadev *od = to_omap_dma_dev(chan->device); | 576 | struct omap_dmadev *od = to_omap_dma_dev(chan->device); |
577 | struct omap_chan *c = to_omap_dma_chan(chan); | 577 | struct omap_chan *c = to_omap_dma_chan(chan); |
578 | 578 | ||
579 | if (!od->legacy) { | 579 | if (!od->legacy) { |
580 | spin_lock_irq(&od->irq_lock); | 580 | spin_lock_irq(&od->irq_lock); |
581 | od->irq_enable_mask &= ~BIT(c->dma_ch); | 581 | od->irq_enable_mask &= ~BIT(c->dma_ch); |
582 | omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask); | 582 | omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask); |
583 | spin_unlock_irq(&od->irq_lock); | 583 | spin_unlock_irq(&od->irq_lock); |
584 | } | 584 | } |
585 | 585 | ||
586 | c->channel_base = NULL; | 586 | c->channel_base = NULL; |
587 | od->lch_map[c->dma_ch] = NULL; | 587 | od->lch_map[c->dma_ch] = NULL; |
588 | vchan_free_chan_resources(&c->vc); | 588 | vchan_free_chan_resources(&c->vc); |
589 | omap_free_dma(c->dma_ch); | 589 | omap_free_dma(c->dma_ch); |
590 | 590 | ||
591 | dev_dbg(od->ddev.dev, "freeing channel for %u\n", c->dma_sig); | 591 | dev_dbg(od->ddev.dev, "freeing channel for %u\n", c->dma_sig); |
592 | } | 592 | } |
593 | 593 | ||
594 | static size_t omap_dma_sg_size(struct omap_sg *sg) | 594 | static size_t omap_dma_sg_size(struct omap_sg *sg) |
595 | { | 595 | { |
596 | return sg->en * sg->fn; | 596 | return sg->en * sg->fn; |
597 | } | 597 | } |
598 | 598 | ||
599 | static size_t omap_dma_desc_size(struct omap_desc *d) | 599 | static size_t omap_dma_desc_size(struct omap_desc *d) |
600 | { | 600 | { |
601 | unsigned i; | 601 | unsigned i; |
602 | size_t size; | 602 | size_t size; |
603 | 603 | ||
604 | for (size = i = 0; i < d->sglen; i++) | 604 | for (size = i = 0; i < d->sglen; i++) |
605 | size += omap_dma_sg_size(&d->sg[i]); | 605 | size += omap_dma_sg_size(&d->sg[i]); |
606 | 606 | ||
607 | return size * es_bytes[d->es]; | 607 | return size * es_bytes[d->es]; |
608 | } | 608 | } |
609 | 609 | ||
610 | static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr) | 610 | static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr) |
611 | { | 611 | { |
612 | unsigned i; | 612 | unsigned i; |
613 | size_t size, es_size = es_bytes[d->es]; | 613 | size_t size, es_size = es_bytes[d->es]; |
614 | 614 | ||
615 | for (size = i = 0; i < d->sglen; i++) { | 615 | for (size = i = 0; i < d->sglen; i++) { |
616 | size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size; | 616 | size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size; |
617 | 617 | ||
618 | if (size) | 618 | if (size) |
619 | size += this_size; | 619 | size += this_size; |
620 | else if (addr >= d->sg[i].addr && | 620 | else if (addr >= d->sg[i].addr && |
621 | addr < d->sg[i].addr + this_size) | 621 | addr < d->sg[i].addr + this_size) |
622 | size += d->sg[i].addr + this_size - addr; | 622 | size += d->sg[i].addr + this_size - addr; |
623 | } | 623 | } |
624 | return size; | 624 | return size; |
625 | } | 625 | } |
626 | 626 | ||
627 | /* | 627 | /* |
628 | * OMAP 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is | 628 | * OMAP 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is |
629 | * read before the DMA controller finished disabling the channel. | 629 | * read before the DMA controller finished disabling the channel. |
630 | */ | 630 | */ |
631 | static uint32_t omap_dma_chan_read_3_3(struct omap_chan *c, unsigned reg) | 631 | static uint32_t omap_dma_chan_read_3_3(struct omap_chan *c, unsigned reg) |
632 | { | 632 | { |
633 | struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); | 633 | struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); |
634 | uint32_t val; | 634 | uint32_t val; |
635 | 635 | ||
636 | val = omap_dma_chan_read(c, reg); | 636 | val = omap_dma_chan_read(c, reg); |
637 | if (val == 0 && od->plat->errata & DMA_ERRATA_3_3) | 637 | if (val == 0 && od->plat->errata & DMA_ERRATA_3_3) |
638 | val = omap_dma_chan_read(c, reg); | 638 | val = omap_dma_chan_read(c, reg); |
639 | 639 | ||
640 | return val; | 640 | return val; |
641 | } | 641 | } |
642 | 642 | ||
643 | static dma_addr_t omap_dma_get_src_pos(struct omap_chan *c) | 643 | static dma_addr_t omap_dma_get_src_pos(struct omap_chan *c) |
644 | { | 644 | { |
645 | struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); | 645 | struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); |
646 | dma_addr_t addr, cdac; | 646 | dma_addr_t addr, cdac; |
647 | 647 | ||
648 | if (__dma_omap15xx(od->plat->dma_attr)) { | 648 | if (__dma_omap15xx(od->plat->dma_attr)) { |
649 | addr = omap_dma_chan_read(c, CPC); | 649 | addr = omap_dma_chan_read(c, CPC); |
650 | } else { | 650 | } else { |
651 | addr = omap_dma_chan_read_3_3(c, CSAC); | 651 | addr = omap_dma_chan_read_3_3(c, CSAC); |
652 | cdac = omap_dma_chan_read_3_3(c, CDAC); | 652 | cdac = omap_dma_chan_read_3_3(c, CDAC); |
653 | 653 | ||
654 | /* | 654 | /* |
655 | * CDAC == 0 indicates that the DMA transfer on the channel has | 655 | * CDAC == 0 indicates that the DMA transfer on the channel has |
656 | * not been started (no data has been transferred so far). | 656 | * not been started (no data has been transferred so far). |
657 | * Return the programmed source start address in this case. | 657 | * Return the programmed source start address in this case. |
658 | */ | 658 | */ |
659 | if (cdac == 0) | 659 | if (cdac == 0) |
660 | addr = omap_dma_chan_read(c, CSSA); | 660 | addr = omap_dma_chan_read(c, CSSA); |
661 | } | 661 | } |
662 | 662 | ||
663 | if (dma_omap1()) | 663 | if (dma_omap1()) |
664 | addr |= omap_dma_chan_read(c, CSSA) & 0xffff0000; | 664 | addr |= omap_dma_chan_read(c, CSSA) & 0xffff0000; |
665 | 665 | ||
666 | return addr; | 666 | return addr; |
667 | } | 667 | } |
668 | 668 | ||
669 | static dma_addr_t omap_dma_get_dst_pos(struct omap_chan *c) | 669 | static dma_addr_t omap_dma_get_dst_pos(struct omap_chan *c) |
670 | { | 670 | { |
671 | struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); | 671 | struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device); |
672 | dma_addr_t addr; | 672 | dma_addr_t addr; |
673 | 673 | ||
674 | if (__dma_omap15xx(od->plat->dma_attr)) { | 674 | if (__dma_omap15xx(od->plat->dma_attr)) { |
675 | addr = omap_dma_chan_read(c, CPC); | 675 | addr = omap_dma_chan_read(c, CPC); |
676 | } else { | 676 | } else { |
677 | addr = omap_dma_chan_read_3_3(c, CDAC); | 677 | addr = omap_dma_chan_read_3_3(c, CDAC); |
678 | 678 | ||
679 | /* | 679 | /* |
680 | * CDAC == 0 indicates that the DMA transfer on the channel | 680 | * CDAC == 0 indicates that the DMA transfer on the channel |
681 | * has not been started (no data has been transferred so | 681 | * has not been started (no data has been transferred so |
682 | * far). Return the programmed destination start address in | 682 | * far). Return the programmed destination start address in |
683 | * this case. | 683 | * this case. |
684 | */ | 684 | */ |
685 | if (addr == 0) | 685 | if (addr == 0) |
686 | addr = omap_dma_chan_read(c, CDSA); | 686 | addr = omap_dma_chan_read(c, CDSA); |
687 | } | 687 | } |
688 | 688 | ||
689 | if (dma_omap1()) | 689 | if (dma_omap1()) |
690 | addr |= omap_dma_chan_read(c, CDSA) & 0xffff0000; | 690 | addr |= omap_dma_chan_read(c, CDSA) & 0xffff0000; |
691 | 691 | ||
692 | return addr; | 692 | return addr; |
693 | } | 693 | } |
694 | 694 | ||
695 | static enum dma_status omap_dma_tx_status(struct dma_chan *chan, | 695 | static enum dma_status omap_dma_tx_status(struct dma_chan *chan, |
696 | dma_cookie_t cookie, struct dma_tx_state *txstate) | 696 | dma_cookie_t cookie, struct dma_tx_state *txstate) |
697 | { | 697 | { |
698 | struct omap_chan *c = to_omap_dma_chan(chan); | 698 | struct omap_chan *c = to_omap_dma_chan(chan); |
699 | struct virt_dma_desc *vd; | 699 | struct virt_dma_desc *vd; |
700 | enum dma_status ret; | 700 | enum dma_status ret; |
701 | unsigned long flags; | 701 | unsigned long flags; |
702 | 702 | ||
703 | ret = dma_cookie_status(chan, cookie, txstate); | 703 | ret = dma_cookie_status(chan, cookie, txstate); |
704 | if (ret == DMA_COMPLETE || !txstate) | 704 | if (ret == DMA_COMPLETE || !txstate) |
705 | return ret; | 705 | return ret; |
706 | 706 | ||
707 | spin_lock_irqsave(&c->vc.lock, flags); | 707 | spin_lock_irqsave(&c->vc.lock, flags); |
708 | vd = vchan_find_desc(&c->vc, cookie); | 708 | vd = vchan_find_desc(&c->vc, cookie); |
709 | if (vd) { | 709 | if (vd) { |
710 | txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx)); | 710 | txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx)); |
711 | } else if (c->desc && c->desc->vd.tx.cookie == cookie) { | 711 | } else if (c->desc && c->desc->vd.tx.cookie == cookie) { |
712 | struct omap_desc *d = c->desc; | 712 | struct omap_desc *d = c->desc; |
713 | dma_addr_t pos; | 713 | dma_addr_t pos; |
714 | 714 | ||
715 | if (d->dir == DMA_MEM_TO_DEV) | 715 | if (d->dir == DMA_MEM_TO_DEV) |
716 | pos = omap_dma_get_src_pos(c); | 716 | pos = omap_dma_get_src_pos(c); |
717 | else if (d->dir == DMA_DEV_TO_MEM) | 717 | else if (d->dir == DMA_DEV_TO_MEM) |
718 | pos = omap_dma_get_dst_pos(c); | 718 | pos = omap_dma_get_dst_pos(c); |
719 | else | 719 | else |
720 | pos = 0; | 720 | pos = 0; |
721 | 721 | ||
722 | txstate->residue = omap_dma_desc_size_pos(d, pos); | 722 | txstate->residue = omap_dma_desc_size_pos(d, pos); |
723 | } else { | 723 | } else { |
724 | txstate->residue = 0; | 724 | txstate->residue = 0; |
725 | } | 725 | } |
726 | spin_unlock_irqrestore(&c->vc.lock, flags); | 726 | spin_unlock_irqrestore(&c->vc.lock, flags); |
727 | 727 | ||
728 | return ret; | 728 | return ret; |
729 | } | 729 | } |
730 | 730 | ||
731 | static void omap_dma_issue_pending(struct dma_chan *chan) | 731 | static void omap_dma_issue_pending(struct dma_chan *chan) |
732 | { | 732 | { |
733 | struct omap_chan *c = to_omap_dma_chan(chan); | 733 | struct omap_chan *c = to_omap_dma_chan(chan); |
734 | unsigned long flags; | 734 | unsigned long flags; |
735 | 735 | ||
736 | spin_lock_irqsave(&c->vc.lock, flags); | 736 | spin_lock_irqsave(&c->vc.lock, flags); |
737 | if (vchan_issue_pending(&c->vc) && !c->desc) { | 737 | if (vchan_issue_pending(&c->vc) && !c->desc) { |
738 | /* | 738 | /* |
739 | * c->cyclic is used only by audio and in this case the DMA need | 739 | * c->cyclic is used only by audio and in this case the DMA need |
740 | * to be started without delay. | 740 | * to be started without delay. |
741 | */ | 741 | */ |
742 | if (!c->cyclic) { | 742 | if (!c->cyclic) { |
743 | struct omap_dmadev *d = to_omap_dma_dev(chan->device); | 743 | struct omap_dmadev *d = to_omap_dma_dev(chan->device); |
744 | spin_lock(&d->lock); | 744 | spin_lock(&d->lock); |
745 | if (list_empty(&c->node)) | 745 | if (list_empty(&c->node)) |
746 | list_add_tail(&c->node, &d->pending); | 746 | list_add_tail(&c->node, &d->pending); |
747 | spin_unlock(&d->lock); | 747 | spin_unlock(&d->lock); |
748 | tasklet_schedule(&d->task); | 748 | tasklet_schedule(&d->task); |
749 | } else { | 749 | } else { |
750 | omap_dma_start_desc(c); | 750 | omap_dma_start_desc(c); |
751 | } | 751 | } |
752 | } | 752 | } |
753 | spin_unlock_irqrestore(&c->vc.lock, flags); | 753 | spin_unlock_irqrestore(&c->vc.lock, flags); |
754 | } | 754 | } |
755 | 755 | ||
756 | static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( | 756 | static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( |
757 | struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen, | 757 | struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen, |
758 | enum dma_transfer_direction dir, unsigned long tx_flags, void *context) | 758 | enum dma_transfer_direction dir, unsigned long tx_flags, void *context) |
759 | { | 759 | { |
760 | struct omap_dmadev *od = to_omap_dma_dev(chan->device); | 760 | struct omap_dmadev *od = to_omap_dma_dev(chan->device); |
761 | struct omap_chan *c = to_omap_dma_chan(chan); | 761 | struct omap_chan *c = to_omap_dma_chan(chan); |
762 | enum dma_slave_buswidth dev_width; | 762 | enum dma_slave_buswidth dev_width; |
763 | struct scatterlist *sgent; | 763 | struct scatterlist *sgent; |
764 | struct omap_desc *d; | 764 | struct omap_desc *d; |
765 | dma_addr_t dev_addr; | 765 | dma_addr_t dev_addr; |
766 | unsigned i, j = 0, es, en, frame_bytes; | 766 | unsigned i, j = 0, es, en, frame_bytes; |
767 | u32 burst; | 767 | u32 burst; |
768 | 768 | ||
769 | if (dir == DMA_DEV_TO_MEM) { | 769 | if (dir == DMA_DEV_TO_MEM) { |
770 | dev_addr = c->cfg.src_addr; | 770 | dev_addr = c->cfg.src_addr; |
771 | dev_width = c->cfg.src_addr_width; | 771 | dev_width = c->cfg.src_addr_width; |
772 | burst = c->cfg.src_maxburst; | 772 | burst = c->cfg.src_maxburst; |
773 | } else if (dir == DMA_MEM_TO_DEV) { | 773 | } else if (dir == DMA_MEM_TO_DEV) { |
774 | dev_addr = c->cfg.dst_addr; | 774 | dev_addr = c->cfg.dst_addr; |
775 | dev_width = c->cfg.dst_addr_width; | 775 | dev_width = c->cfg.dst_addr_width; |
776 | burst = c->cfg.dst_maxburst; | 776 | burst = c->cfg.dst_maxburst; |
777 | } else { | 777 | } else { |
778 | dev_err(chan->device->dev, "%s: bad direction?\n", __func__); | 778 | dev_err(chan->device->dev, "%s: bad direction?\n", __func__); |
779 | return NULL; | 779 | return NULL; |
780 | } | 780 | } |
781 | 781 | ||
782 | /* Bus width translates to the element size (ES) */ | 782 | /* Bus width translates to the element size (ES) */ |
783 | switch (dev_width) { | 783 | switch (dev_width) { |
784 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | 784 | case DMA_SLAVE_BUSWIDTH_1_BYTE: |
785 | es = CSDP_DATA_TYPE_8; | 785 | es = CSDP_DATA_TYPE_8; |
786 | break; | 786 | break; |
787 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | 787 | case DMA_SLAVE_BUSWIDTH_2_BYTES: |
788 | es = CSDP_DATA_TYPE_16; | 788 | es = CSDP_DATA_TYPE_16; |
789 | break; | 789 | break; |
790 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | 790 | case DMA_SLAVE_BUSWIDTH_4_BYTES: |
791 | es = CSDP_DATA_TYPE_32; | 791 | es = CSDP_DATA_TYPE_32; |
792 | break; | 792 | break; |
793 | default: /* not reached */ | 793 | default: /* not reached */ |
794 | return NULL; | 794 | return NULL; |
795 | } | 795 | } |
796 | 796 | ||
797 | /* Now allocate and setup the descriptor. */ | 797 | /* Now allocate and setup the descriptor. */ |
798 | d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC); | 798 | d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC); |
799 | if (!d) | 799 | if (!d) |
800 | return NULL; | 800 | return NULL; |
801 | 801 | ||
802 | d->dir = dir; | 802 | d->dir = dir; |
803 | d->dev_addr = dev_addr; | 803 | d->dev_addr = dev_addr; |
804 | d->es = es; | 804 | d->es = es; |
805 | 805 | ||
806 | d->ccr = c->ccr | CCR_SYNC_FRAME; | 806 | d->ccr = c->ccr | CCR_SYNC_FRAME; |
807 | if (dir == DMA_DEV_TO_MEM) | 807 | if (dir == DMA_DEV_TO_MEM) |
808 | d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT; | 808 | d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT; |
809 | else | 809 | else |
810 | d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC; | 810 | d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC; |
811 | 811 | ||
812 | d->cicr = CICR_DROP_IE | CICR_BLOCK_IE; | 812 | d->cicr = CICR_DROP_IE | CICR_BLOCK_IE; |
813 | d->csdp = es; | 813 | d->csdp = es; |
814 | 814 | ||
815 | if (dma_omap1()) { | 815 | if (dma_omap1()) { |
816 | d->cicr |= CICR_TOUT_IE; | 816 | d->cicr |= CICR_TOUT_IE; |
817 | 817 | ||
818 | if (dir == DMA_DEV_TO_MEM) | 818 | if (dir == DMA_DEV_TO_MEM) |
819 | d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_TIPB; | 819 | d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_TIPB; |
820 | else | 820 | else |
821 | d->csdp |= CSDP_DST_PORT_TIPB | CSDP_SRC_PORT_EMIFF; | 821 | d->csdp |= CSDP_DST_PORT_TIPB | CSDP_SRC_PORT_EMIFF; |
822 | } else { | 822 | } else { |
823 | if (dir == DMA_DEV_TO_MEM) | 823 | if (dir == DMA_DEV_TO_MEM) |
824 | d->ccr |= CCR_TRIGGER_SRC; | 824 | d->ccr |= CCR_TRIGGER_SRC; |
825 | 825 | ||
826 | d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE; | 826 | d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE; |
827 | } | 827 | } |
828 | if (od->plat->errata & DMA_ERRATA_PARALLEL_CHANNELS) | 828 | if (od->plat->errata & DMA_ERRATA_PARALLEL_CHANNELS) |
829 | d->clnk_ctrl = c->dma_ch; | 829 | d->clnk_ctrl = c->dma_ch; |
830 | 830 | ||
831 | /* | 831 | /* |
832 | * Build our scatterlist entries: each contains the address, | 832 | * Build our scatterlist entries: each contains the address, |
833 | * the number of elements (EN) in each frame, and the number of | 833 | * the number of elements (EN) in each frame, and the number of |
834 | * frames (FN). Number of bytes for this entry = ES * EN * FN. | 834 | * frames (FN). Number of bytes for this entry = ES * EN * FN. |
835 | * | 835 | * |
836 | * Burst size translates to number of elements with frame sync. | 836 | * Burst size translates to number of elements with frame sync. |
837 | * Note: DMA engine defines burst to be the number of dev-width | 837 | * Note: DMA engine defines burst to be the number of dev-width |
838 | * transfers. | 838 | * transfers. |
839 | */ | 839 | */ |
840 | en = burst; | 840 | en = burst; |
841 | frame_bytes = es_bytes[es] * en; | 841 | frame_bytes = es_bytes[es] * en; |
842 | for_each_sg(sgl, sgent, sglen, i) { | 842 | for_each_sg(sgl, sgent, sglen, i) { |
843 | d->sg[j].addr = sg_dma_address(sgent); | 843 | d->sg[j].addr = sg_dma_address(sgent); |
844 | d->sg[j].en = en; | 844 | d->sg[j].en = en; |
845 | d->sg[j].fn = sg_dma_len(sgent) / frame_bytes; | 845 | d->sg[j].fn = sg_dma_len(sgent) / frame_bytes; |
846 | j++; | 846 | j++; |
847 | } | 847 | } |
848 | 848 | ||
849 | d->sglen = j; | 849 | d->sglen = j; |
850 | 850 | ||
851 | return vchan_tx_prep(&c->vc, &d->vd, tx_flags); | 851 | return vchan_tx_prep(&c->vc, &d->vd, tx_flags); |
852 | } | 852 | } |
853 | 853 | ||
854 | static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic( | 854 | static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic( |
855 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | 855 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, |
856 | size_t period_len, enum dma_transfer_direction dir, unsigned long flags) | 856 | size_t period_len, enum dma_transfer_direction dir, unsigned long flags) |
857 | { | 857 | { |
858 | struct omap_dmadev *od = to_omap_dma_dev(chan->device); | 858 | struct omap_dmadev *od = to_omap_dma_dev(chan->device); |
859 | struct omap_chan *c = to_omap_dma_chan(chan); | 859 | struct omap_chan *c = to_omap_dma_chan(chan); |
860 | enum dma_slave_buswidth dev_width; | 860 | enum dma_slave_buswidth dev_width; |
861 | struct omap_desc *d; | 861 | struct omap_desc *d; |
862 | dma_addr_t dev_addr; | 862 | dma_addr_t dev_addr; |
863 | unsigned es; | 863 | unsigned es; |
864 | u32 burst; | 864 | u32 burst; |
865 | 865 | ||
866 | if (dir == DMA_DEV_TO_MEM) { | 866 | if (dir == DMA_DEV_TO_MEM) { |
867 | dev_addr = c->cfg.src_addr; | 867 | dev_addr = c->cfg.src_addr; |
868 | dev_width = c->cfg.src_addr_width; | 868 | dev_width = c->cfg.src_addr_width; |
869 | burst = c->cfg.src_maxburst; | 869 | burst = c->cfg.src_maxburst; |
870 | } else if (dir == DMA_MEM_TO_DEV) { | 870 | } else if (dir == DMA_MEM_TO_DEV) { |
871 | dev_addr = c->cfg.dst_addr; | 871 | dev_addr = c->cfg.dst_addr; |
872 | dev_width = c->cfg.dst_addr_width; | 872 | dev_width = c->cfg.dst_addr_width; |
873 | burst = c->cfg.dst_maxburst; | 873 | burst = c->cfg.dst_maxburst; |
874 | } else { | 874 | } else { |
875 | dev_err(chan->device->dev, "%s: bad direction?\n", __func__); | 875 | dev_err(chan->device->dev, "%s: bad direction?\n", __func__); |
876 | return NULL; | 876 | return NULL; |
877 | } | 877 | } |
878 | 878 | ||
879 | /* Bus width translates to the element size (ES) */ | 879 | /* Bus width translates to the element size (ES) */ |
880 | switch (dev_width) { | 880 | switch (dev_width) { |
881 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | 881 | case DMA_SLAVE_BUSWIDTH_1_BYTE: |
882 | es = CSDP_DATA_TYPE_8; | 882 | es = CSDP_DATA_TYPE_8; |
883 | break; | 883 | break; |
884 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | 884 | case DMA_SLAVE_BUSWIDTH_2_BYTES: |
885 | es = CSDP_DATA_TYPE_16; | 885 | es = CSDP_DATA_TYPE_16; |
886 | break; | 886 | break; |
887 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | 887 | case DMA_SLAVE_BUSWIDTH_4_BYTES: |
888 | es = CSDP_DATA_TYPE_32; | 888 | es = CSDP_DATA_TYPE_32; |
889 | break; | 889 | break; |
890 | default: /* not reached */ | 890 | default: /* not reached */ |
891 | return NULL; | 891 | return NULL; |
892 | } | 892 | } |
893 | 893 | ||
894 | /* Now allocate and setup the descriptor. */ | 894 | /* Now allocate and setup the descriptor. */ |
895 | d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC); | 895 | d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC); |
896 | if (!d) | 896 | if (!d) |
897 | return NULL; | 897 | return NULL; |
898 | 898 | ||
899 | d->dir = dir; | 899 | d->dir = dir; |
900 | d->dev_addr = dev_addr; | 900 | d->dev_addr = dev_addr; |
901 | d->fi = burst; | 901 | d->fi = burst; |
902 | d->es = es; | 902 | d->es = es; |
903 | d->sg[0].addr = buf_addr; | 903 | d->sg[0].addr = buf_addr; |
904 | d->sg[0].en = period_len / es_bytes[es]; | 904 | d->sg[0].en = period_len / es_bytes[es]; |
905 | d->sg[0].fn = buf_len / period_len; | 905 | d->sg[0].fn = buf_len / period_len; |
906 | d->sglen = 1; | 906 | d->sglen = 1; |
907 | 907 | ||
908 | d->ccr = c->ccr; | 908 | d->ccr = c->ccr; |
909 | if (dir == DMA_DEV_TO_MEM) | 909 | if (dir == DMA_DEV_TO_MEM) |
910 | d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT; | 910 | d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT; |
911 | else | 911 | else |
912 | d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC; | 912 | d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC; |
913 | 913 | ||
914 | d->cicr = CICR_DROP_IE; | 914 | d->cicr = CICR_DROP_IE; |
915 | if (flags & DMA_PREP_INTERRUPT) | 915 | if (flags & DMA_PREP_INTERRUPT) |
916 | d->cicr |= CICR_FRAME_IE; | 916 | d->cicr |= CICR_FRAME_IE; |
917 | 917 | ||
918 | d->csdp = es; | 918 | d->csdp = es; |
919 | 919 | ||
920 | if (dma_omap1()) { | 920 | if (dma_omap1()) { |
921 | d->cicr |= CICR_TOUT_IE; | 921 | d->cicr |= CICR_TOUT_IE; |
922 | 922 | ||
923 | if (dir == DMA_DEV_TO_MEM) | 923 | if (dir == DMA_DEV_TO_MEM) |
924 | d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_MPUI; | 924 | d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_MPUI; |
925 | else | 925 | else |
926 | d->csdp |= CSDP_DST_PORT_MPUI | CSDP_SRC_PORT_EMIFF; | 926 | d->csdp |= CSDP_DST_PORT_MPUI | CSDP_SRC_PORT_EMIFF; |
927 | } else { | 927 | } else { |
928 | if (burst) | 928 | if (burst) |
929 | d->ccr |= CCR_SYNC_PACKET; | 929 | d->ccr |= CCR_SYNC_PACKET; |
930 | else | 930 | else |
931 | d->ccr |= CCR_SYNC_ELEMENT; | 931 | d->ccr |= CCR_SYNC_ELEMENT; |
932 | 932 | ||
933 | if (dir == DMA_DEV_TO_MEM) | 933 | if (dir == DMA_DEV_TO_MEM) |
934 | d->ccr |= CCR_TRIGGER_SRC; | 934 | d->ccr |= CCR_TRIGGER_SRC; |
935 | 935 | ||
936 | d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE; | 936 | d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE; |
937 | 937 | ||
938 | d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64; | 938 | d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64; |
939 | } | 939 | } |
940 | 940 | ||
941 | if (__dma_omap15xx(od->plat->dma_attr)) | 941 | if (__dma_omap15xx(od->plat->dma_attr)) |
942 | d->ccr |= CCR_AUTO_INIT | CCR_REPEAT; | 942 | d->ccr |= CCR_AUTO_INIT | CCR_REPEAT; |
943 | else | 943 | else |
944 | d->clnk_ctrl = c->dma_ch | CLNK_CTRL_ENABLE_LNK; | 944 | d->clnk_ctrl = c->dma_ch | CLNK_CTRL_ENABLE_LNK; |
945 | 945 | ||
946 | c->cyclic = true; | 946 | c->cyclic = true; |
947 | 947 | ||
948 | return vchan_tx_prep(&c->vc, &d->vd, flags); | 948 | return vchan_tx_prep(&c->vc, &d->vd, flags); |
949 | } | 949 | } |
950 | 950 | ||
951 | static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg) | 951 | static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg) |
952 | { | 952 | { |
953 | if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || | 953 | if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || |
954 | cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) | 954 | cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) |
955 | return -EINVAL; | 955 | return -EINVAL; |
956 | 956 | ||
957 | memcpy(&c->cfg, cfg, sizeof(c->cfg)); | 957 | memcpy(&c->cfg, cfg, sizeof(c->cfg)); |
958 | 958 | ||
959 | return 0; | 959 | return 0; |
960 | } | 960 | } |
961 | 961 | ||
962 | static int omap_dma_terminate_all(struct omap_chan *c) | 962 | static int omap_dma_terminate_all(struct omap_chan *c) |
963 | { | 963 | { |
964 | struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device); | 964 | struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device); |
965 | unsigned long flags; | 965 | unsigned long flags; |
966 | LIST_HEAD(head); | 966 | LIST_HEAD(head); |
967 | 967 | ||
968 | spin_lock_irqsave(&c->vc.lock, flags); | 968 | spin_lock_irqsave(&c->vc.lock, flags); |
969 | 969 | ||
970 | /* Prevent this channel being scheduled */ | 970 | /* Prevent this channel being scheduled */ |
971 | spin_lock(&d->lock); | 971 | spin_lock(&d->lock); |
972 | list_del_init(&c->node); | 972 | list_del_init(&c->node); |
973 | spin_unlock(&d->lock); | 973 | spin_unlock(&d->lock); |
974 | 974 | ||
975 | /* | 975 | /* |
976 | * Stop DMA activity: we assume the callback will not be called | 976 | * Stop DMA activity: we assume the callback will not be called |
977 | * after omap_dma_stop() returns (even if it does, it will see | 977 | * after omap_dma_stop() returns (even if it does, it will see |
978 | * c->desc is NULL and exit.) | 978 | * c->desc is NULL and exit.) |
979 | */ | 979 | */ |
980 | if (c->desc) { | 980 | if (c->desc) { |
981 | c->desc = NULL; | 981 | c->desc = NULL; |
982 | /* Avoid stopping the dma twice */ | 982 | /* Avoid stopping the dma twice */ |
983 | if (!c->paused) | 983 | if (!c->paused) |
984 | omap_dma_stop(c); | 984 | omap_dma_stop(c); |
985 | } | 985 | } |
986 | 986 | ||
987 | if (c->cyclic) { | 987 | if (c->cyclic) { |
988 | c->cyclic = false; | 988 | c->cyclic = false; |
989 | c->paused = false; | 989 | c->paused = false; |
990 | } | 990 | } |
991 | 991 | ||
992 | vchan_get_all_descriptors(&c->vc, &head); | 992 | vchan_get_all_descriptors(&c->vc, &head); |
993 | spin_unlock_irqrestore(&c->vc.lock, flags); | 993 | spin_unlock_irqrestore(&c->vc.lock, flags); |
994 | vchan_dma_desc_free_list(&c->vc, &head); | 994 | vchan_dma_desc_free_list(&c->vc, &head); |
995 | 995 | ||
996 | return 0; | 996 | return 0; |
997 | } | 997 | } |
998 | 998 | ||
999 | static int omap_dma_pause(struct omap_chan *c) | 999 | static int omap_dma_pause(struct omap_chan *c) |
1000 | { | 1000 | { |
1001 | /* Pause/Resume only allowed with cyclic mode */ | 1001 | /* Pause/Resume only allowed with cyclic mode */ |
1002 | if (!c->cyclic) | 1002 | if (!c->cyclic) |
1003 | return -EINVAL; | 1003 | return -EINVAL; |
1004 | 1004 | ||
1005 | if (!c->paused) { | 1005 | if (!c->paused) { |
1006 | omap_dma_stop(c); | 1006 | omap_dma_stop(c); |
1007 | c->paused = true; | 1007 | c->paused = true; |
1008 | } | 1008 | } |
1009 | 1009 | ||
1010 | return 0; | 1010 | return 0; |
1011 | } | 1011 | } |
1012 | 1012 | ||
1013 | static int omap_dma_resume(struct omap_chan *c) | 1013 | static int omap_dma_resume(struct omap_chan *c) |
1014 | { | 1014 | { |
1015 | /* Pause/Resume only allowed with cyclic mode */ | 1015 | /* Pause/Resume only allowed with cyclic mode */ |
1016 | if (!c->cyclic) | 1016 | if (!c->cyclic) |
1017 | return -EINVAL; | 1017 | return -EINVAL; |
1018 | 1018 | ||
1019 | if (c->paused) { | 1019 | if (c->paused) { |
1020 | mb(); | ||
1021 | |||
1022 | /* Restore channel link register */ | ||
1023 | omap_dma_chan_write(c, CLNK_CTRL, c->desc->clnk_ctrl); | ||
1024 | |||
1020 | omap_dma_start(c, c->desc); | 1025 | omap_dma_start(c, c->desc); |
1021 | c->paused = false; | 1026 | c->paused = false; |
1022 | } | 1027 | } |
1023 | 1028 | ||
1024 | return 0; | 1029 | return 0; |
1025 | } | 1030 | } |
1026 | 1031 | ||
1027 | static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 1032 | static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
1028 | unsigned long arg) | 1033 | unsigned long arg) |
1029 | { | 1034 | { |
1030 | struct omap_chan *c = to_omap_dma_chan(chan); | 1035 | struct omap_chan *c = to_omap_dma_chan(chan); |
1031 | int ret; | 1036 | int ret; |
1032 | 1037 | ||
1033 | switch (cmd) { | 1038 | switch (cmd) { |
1034 | case DMA_SLAVE_CONFIG: | 1039 | case DMA_SLAVE_CONFIG: |
1035 | ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg); | 1040 | ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg); |
1036 | break; | 1041 | break; |
1037 | 1042 | ||
1038 | case DMA_TERMINATE_ALL: | 1043 | case DMA_TERMINATE_ALL: |
1039 | ret = omap_dma_terminate_all(c); | 1044 | ret = omap_dma_terminate_all(c); |
1040 | break; | 1045 | break; |
1041 | 1046 | ||
1042 | case DMA_PAUSE: | 1047 | case DMA_PAUSE: |
1043 | ret = omap_dma_pause(c); | 1048 | ret = omap_dma_pause(c); |
1044 | break; | 1049 | break; |
1045 | 1050 | ||
1046 | case DMA_RESUME: | 1051 | case DMA_RESUME: |
1047 | ret = omap_dma_resume(c); | 1052 | ret = omap_dma_resume(c); |
1048 | break; | 1053 | break; |
1049 | 1054 | ||
1050 | default: | 1055 | default: |
1051 | ret = -ENXIO; | 1056 | ret = -ENXIO; |
1052 | break; | 1057 | break; |
1053 | } | 1058 | } |
1054 | 1059 | ||
1055 | return ret; | 1060 | return ret; |
1056 | } | 1061 | } |
1057 | 1062 | ||
1058 | static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig) | 1063 | static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig) |
1059 | { | 1064 | { |
1060 | struct omap_chan *c; | 1065 | struct omap_chan *c; |
1061 | 1066 | ||
1062 | c = kzalloc(sizeof(*c), GFP_KERNEL); | 1067 | c = kzalloc(sizeof(*c), GFP_KERNEL); |
1063 | if (!c) | 1068 | if (!c) |
1064 | return -ENOMEM; | 1069 | return -ENOMEM; |
1065 | 1070 | ||
1066 | c->reg_map = od->reg_map; | 1071 | c->reg_map = od->reg_map; |
1067 | c->dma_sig = dma_sig; | 1072 | c->dma_sig = dma_sig; |
1068 | c->vc.desc_free = omap_dma_desc_free; | 1073 | c->vc.desc_free = omap_dma_desc_free; |
1069 | vchan_init(&c->vc, &od->ddev); | 1074 | vchan_init(&c->vc, &od->ddev); |
1070 | INIT_LIST_HEAD(&c->node); | 1075 | INIT_LIST_HEAD(&c->node); |
1071 | 1076 | ||
1072 | od->ddev.chancnt++; | 1077 | od->ddev.chancnt++; |
1073 | 1078 | ||
1074 | return 0; | 1079 | return 0; |
1075 | } | 1080 | } |
1076 | 1081 | ||
1077 | static void omap_dma_free(struct omap_dmadev *od) | 1082 | static void omap_dma_free(struct omap_dmadev *od) |
1078 | { | 1083 | { |
1079 | tasklet_kill(&od->task); | 1084 | tasklet_kill(&od->task); |
1080 | while (!list_empty(&od->ddev.channels)) { | 1085 | while (!list_empty(&od->ddev.channels)) { |
1081 | struct omap_chan *c = list_first_entry(&od->ddev.channels, | 1086 | struct omap_chan *c = list_first_entry(&od->ddev.channels, |
1082 | struct omap_chan, vc.chan.device_node); | 1087 | struct omap_chan, vc.chan.device_node); |
1083 | 1088 | ||
1084 | list_del(&c->vc.chan.device_node); | 1089 | list_del(&c->vc.chan.device_node); |
1085 | tasklet_kill(&c->vc.task); | 1090 | tasklet_kill(&c->vc.task); |
1086 | kfree(c); | 1091 | kfree(c); |
1087 | } | 1092 | } |
1088 | } | 1093 | } |
1089 | 1094 | ||
1090 | #define OMAP_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ | 1095 | #define OMAP_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ |
1091 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ | 1096 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ |
1092 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) | 1097 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) |
1093 | 1098 | ||
1094 | static int omap_dma_device_slave_caps(struct dma_chan *dchan, | 1099 | static int omap_dma_device_slave_caps(struct dma_chan *dchan, |
1095 | struct dma_slave_caps *caps) | 1100 | struct dma_slave_caps *caps) |
1096 | { | 1101 | { |
1097 | caps->src_addr_widths = OMAP_DMA_BUSWIDTHS; | 1102 | caps->src_addr_widths = OMAP_DMA_BUSWIDTHS; |
1098 | caps->dstn_addr_widths = OMAP_DMA_BUSWIDTHS; | 1103 | caps->dstn_addr_widths = OMAP_DMA_BUSWIDTHS; |
1099 | caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | 1104 | caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); |
1100 | caps->cmd_pause = true; | 1105 | caps->cmd_pause = true; |
1101 | caps->cmd_terminate = true; | 1106 | caps->cmd_terminate = true; |
1102 | caps->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | 1107 | caps->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; |
1103 | 1108 | ||
1104 | return 0; | 1109 | return 0; |
1105 | } | 1110 | } |
1106 | 1111 | ||
1107 | static int omap_dma_probe(struct platform_device *pdev) | 1112 | static int omap_dma_probe(struct platform_device *pdev) |
1108 | { | 1113 | { |
1109 | struct omap_dmadev *od; | 1114 | struct omap_dmadev *od; |
1110 | struct resource *res; | 1115 | struct resource *res; |
1111 | int rc, i, irq; | 1116 | int rc, i, irq; |
1112 | 1117 | ||
1113 | od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL); | 1118 | od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL); |
1114 | if (!od) | 1119 | if (!od) |
1115 | return -ENOMEM; | 1120 | return -ENOMEM; |
1116 | 1121 | ||
1117 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1122 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1118 | od->base = devm_ioremap_resource(&pdev->dev, res); | 1123 | od->base = devm_ioremap_resource(&pdev->dev, res); |
1119 | if (IS_ERR(od->base)) | 1124 | if (IS_ERR(od->base)) |
1120 | return PTR_ERR(od->base); | 1125 | return PTR_ERR(od->base); |
1121 | 1126 | ||
1122 | od->plat = omap_get_plat_info(); | 1127 | od->plat = omap_get_plat_info(); |
1123 | if (!od->plat) | 1128 | if (!od->plat) |
1124 | return -EPROBE_DEFER; | 1129 | return -EPROBE_DEFER; |
1125 | 1130 | ||
1126 | od->reg_map = od->plat->reg_map; | 1131 | od->reg_map = od->plat->reg_map; |
1127 | 1132 | ||
1128 | dma_cap_set(DMA_SLAVE, od->ddev.cap_mask); | 1133 | dma_cap_set(DMA_SLAVE, od->ddev.cap_mask); |
1129 | dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask); | 1134 | dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask); |
1130 | od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources; | 1135 | od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources; |
1131 | od->ddev.device_free_chan_resources = omap_dma_free_chan_resources; | 1136 | od->ddev.device_free_chan_resources = omap_dma_free_chan_resources; |
1132 | od->ddev.device_tx_status = omap_dma_tx_status; | 1137 | od->ddev.device_tx_status = omap_dma_tx_status; |
1133 | od->ddev.device_issue_pending = omap_dma_issue_pending; | 1138 | od->ddev.device_issue_pending = omap_dma_issue_pending; |
1134 | od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg; | 1139 | od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg; |
1135 | od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic; | 1140 | od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic; |
1136 | od->ddev.device_control = omap_dma_control; | 1141 | od->ddev.device_control = omap_dma_control; |
1137 | od->ddev.device_slave_caps = omap_dma_device_slave_caps; | 1142 | od->ddev.device_slave_caps = omap_dma_device_slave_caps; |
1138 | od->ddev.dev = &pdev->dev; | 1143 | od->ddev.dev = &pdev->dev; |
1139 | INIT_LIST_HEAD(&od->ddev.channels); | 1144 | INIT_LIST_HEAD(&od->ddev.channels); |
1140 | INIT_LIST_HEAD(&od->pending); | 1145 | INIT_LIST_HEAD(&od->pending); |
1141 | spin_lock_init(&od->lock); | 1146 | spin_lock_init(&od->lock); |
1142 | spin_lock_init(&od->irq_lock); | 1147 | spin_lock_init(&od->irq_lock); |
1143 | 1148 | ||
1144 | tasklet_init(&od->task, omap_dma_sched, (unsigned long)od); | 1149 | tasklet_init(&od->task, omap_dma_sched, (unsigned long)od); |
1145 | 1150 | ||
1146 | for (i = 0; i < 127; i++) { | 1151 | for (i = 0; i < 127; i++) { |
1147 | rc = omap_dma_chan_init(od, i); | 1152 | rc = omap_dma_chan_init(od, i); |
1148 | if (rc) { | 1153 | if (rc) { |
1149 | omap_dma_free(od); | 1154 | omap_dma_free(od); |
1150 | return rc; | 1155 | return rc; |
1151 | } | 1156 | } |
1152 | } | 1157 | } |
1153 | 1158 | ||
1154 | irq = platform_get_irq(pdev, 1); | 1159 | irq = platform_get_irq(pdev, 1); |
1155 | if (irq <= 0) { | 1160 | if (irq <= 0) { |
1156 | dev_info(&pdev->dev, "failed to get L1 IRQ: %d\n", irq); | 1161 | dev_info(&pdev->dev, "failed to get L1 IRQ: %d\n", irq); |
1157 | od->legacy = true; | 1162 | od->legacy = true; |
1158 | } else { | 1163 | } else { |
1159 | /* Disable all interrupts */ | 1164 | /* Disable all interrupts */ |
1160 | od->irq_enable_mask = 0; | 1165 | od->irq_enable_mask = 0; |
1161 | omap_dma_glbl_write(od, IRQENABLE_L1, 0); | 1166 | omap_dma_glbl_write(od, IRQENABLE_L1, 0); |
1162 | 1167 | ||
1163 | rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq, | 1168 | rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq, |
1164 | IRQF_SHARED, "omap-dma-engine", od); | 1169 | IRQF_SHARED, "omap-dma-engine", od); |
1165 | if (rc) | 1170 | if (rc) |
1166 | return rc; | 1171 | return rc; |
1167 | } | 1172 | } |
1168 | 1173 | ||
1169 | rc = dma_async_device_register(&od->ddev); | 1174 | rc = dma_async_device_register(&od->ddev); |
1170 | if (rc) { | 1175 | if (rc) { |
1171 | pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n", | 1176 | pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n", |
1172 | rc); | 1177 | rc); |
1173 | omap_dma_free(od); | 1178 | omap_dma_free(od); |
1174 | return rc; | 1179 | return rc; |
1175 | } | 1180 | } |
1176 | 1181 | ||
1177 | platform_set_drvdata(pdev, od); | 1182 | platform_set_drvdata(pdev, od); |
1178 | 1183 | ||
1179 | if (pdev->dev.of_node) { | 1184 | if (pdev->dev.of_node) { |
1180 | omap_dma_info.dma_cap = od->ddev.cap_mask; | 1185 | omap_dma_info.dma_cap = od->ddev.cap_mask; |
1181 | 1186 | ||
1182 | /* Device-tree DMA controller registration */ | 1187 | /* Device-tree DMA controller registration */ |
1183 | rc = of_dma_controller_register(pdev->dev.of_node, | 1188 | rc = of_dma_controller_register(pdev->dev.of_node, |
1184 | of_dma_simple_xlate, &omap_dma_info); | 1189 | of_dma_simple_xlate, &omap_dma_info); |
1185 | if (rc) { | 1190 | if (rc) { |
1186 | pr_warn("OMAP-DMA: failed to register DMA controller\n"); | 1191 | pr_warn("OMAP-DMA: failed to register DMA controller\n"); |
1187 | dma_async_device_unregister(&od->ddev); | 1192 | dma_async_device_unregister(&od->ddev); |
1188 | omap_dma_free(od); | 1193 | omap_dma_free(od); |
1189 | } | 1194 | } |
1190 | } | 1195 | } |
1191 | 1196 | ||
1192 | dev_info(&pdev->dev, "OMAP DMA engine driver\n"); | 1197 | dev_info(&pdev->dev, "OMAP DMA engine driver\n"); |
1193 | 1198 | ||
1194 | return rc; | 1199 | return rc; |
1195 | } | 1200 | } |
1196 | 1201 | ||
1197 | static int omap_dma_remove(struct platform_device *pdev) | 1202 | static int omap_dma_remove(struct platform_device *pdev) |
1198 | { | 1203 | { |
1199 | struct omap_dmadev *od = platform_get_drvdata(pdev); | 1204 | struct omap_dmadev *od = platform_get_drvdata(pdev); |
1200 | 1205 | ||
1201 | if (pdev->dev.of_node) | 1206 | if (pdev->dev.of_node) |
1202 | of_dma_controller_free(pdev->dev.of_node); | 1207 | of_dma_controller_free(pdev->dev.of_node); |
1203 | 1208 | ||
1204 | dma_async_device_unregister(&od->ddev); | 1209 | dma_async_device_unregister(&od->ddev); |
1205 | 1210 | ||
1206 | if (!od->legacy) { | 1211 | if (!od->legacy) { |
1207 | /* Disable all interrupts */ | 1212 | /* Disable all interrupts */ |
1208 | omap_dma_glbl_write(od, IRQENABLE_L0, 0); | 1213 | omap_dma_glbl_write(od, IRQENABLE_L0, 0); |
1209 | } | 1214 | } |
1210 | 1215 | ||
1211 | omap_dma_free(od); | 1216 | omap_dma_free(od); |
1212 | 1217 | ||
1213 | return 0; | 1218 | return 0; |
1214 | } | 1219 | } |
1215 | 1220 | ||
1216 | static const struct of_device_id omap_dma_match[] = { | 1221 | static const struct of_device_id omap_dma_match[] = { |
1217 | { .compatible = "ti,omap2420-sdma", }, | 1222 | { .compatible = "ti,omap2420-sdma", }, |
1218 | { .compatible = "ti,omap2430-sdma", }, | 1223 | { .compatible = "ti,omap2430-sdma", }, |
1219 | { .compatible = "ti,omap3430-sdma", }, | 1224 | { .compatible = "ti,omap3430-sdma", }, |
1220 | { .compatible = "ti,omap3630-sdma", }, | 1225 | { .compatible = "ti,omap3630-sdma", }, |
1221 | { .compatible = "ti,omap4430-sdma", }, | 1226 | { .compatible = "ti,omap4430-sdma", }, |
1222 | {}, | 1227 | {}, |
1223 | }; | 1228 | }; |
1224 | MODULE_DEVICE_TABLE(of, omap_dma_match); | 1229 | MODULE_DEVICE_TABLE(of, omap_dma_match); |
1225 | 1230 | ||
1226 | static struct platform_driver omap_dma_driver = { | 1231 | static struct platform_driver omap_dma_driver = { |
1227 | .probe = omap_dma_probe, | 1232 | .probe = omap_dma_probe, |
1228 | .remove = omap_dma_remove, | 1233 | .remove = omap_dma_remove, |
1229 | .driver = { | 1234 | .driver = { |
1230 | .name = "omap-dma-engine", | 1235 | .name = "omap-dma-engine", |
1231 | .owner = THIS_MODULE, | 1236 | .owner = THIS_MODULE, |
1232 | .of_match_table = of_match_ptr(omap_dma_match), | 1237 | .of_match_table = of_match_ptr(omap_dma_match), |
1233 | }, | 1238 | }, |
1234 | }; | 1239 | }; |
1235 | 1240 | ||
1236 | bool omap_dma_filter_fn(struct dma_chan *chan, void *param) | 1241 | bool omap_dma_filter_fn(struct dma_chan *chan, void *param) |
1237 | { | 1242 | { |
1238 | if (chan->device->dev->driver == &omap_dma_driver.driver) { | 1243 | if (chan->device->dev->driver == &omap_dma_driver.driver) { |
1239 | struct omap_chan *c = to_omap_dma_chan(chan); | 1244 | struct omap_chan *c = to_omap_dma_chan(chan); |
1240 | unsigned req = *(unsigned *)param; | 1245 | unsigned req = *(unsigned *)param; |
1241 | 1246 | ||
1242 | return req == c->dma_sig; | 1247 | return req == c->dma_sig; |
1243 | } | 1248 | } |
1244 | return false; | 1249 | return false; |
1245 | } | 1250 | } |
1246 | EXPORT_SYMBOL_GPL(omap_dma_filter_fn); | 1251 | EXPORT_SYMBOL_GPL(omap_dma_filter_fn); |
1247 | 1252 | ||
1248 | static int omap_dma_init(void) | 1253 | static int omap_dma_init(void) |
1249 | { | 1254 | { |
1250 | return platform_driver_register(&omap_dma_driver); | 1255 | return platform_driver_register(&omap_dma_driver); |
1251 | } | 1256 | } |
1252 | subsys_initcall(omap_dma_init); | 1257 | subsys_initcall(omap_dma_init); |
1253 | 1258 | ||
1254 | static void __exit omap_dma_exit(void) | 1259 | static void __exit omap_dma_exit(void) |
1255 | { | 1260 | { |
1256 | platform_driver_unregister(&omap_dma_driver); | 1261 | platform_driver_unregister(&omap_dma_driver); |
1257 | } | 1262 | } |
1258 | module_exit(omap_dma_exit); | 1263 | module_exit(omap_dma_exit); |
1259 | 1264 | ||
1260 | MODULE_AUTHOR("Russell King"); | 1265 | MODULE_AUTHOR("Russell King"); |
1261 | MODULE_LICENSE("GPL"); | 1266 | MODULE_LICENSE("GPL"); |
1262 | 1267 |