Commit 0c32269d813c148194524fc8272f7ec1f7c90e6a

Authored by Jonas Aaberg
Committed by Dan Williams
1 parent 2292b880e4

DMAENGINE: ste_dma40: avoid doing unnessecary suspend

Avoid doing unnessecary suspend when modifying logical channels.

Signed-off-by: Jonas Aaberg <jonas.aberg@stericsson.com>
Signed-off-by: Linus Walleij <linus.walleij@stericsson.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>

Showing 1 changed file with 7 additions and 32 deletions Inline Diff

drivers/dma/ste_dma40.c
1 /* 1 /*
2 * driver/dma/ste_dma40.c 2 * driver/dma/ste_dma40.c
3 * 3 *
4 * Copyright (C) ST-Ericsson 2007-2010 4 * Copyright (C) ST-Ericsson 2007-2010
5 * License terms: GNU General Public License (GPL) version 2 5 * License terms: GNU General Public License (GPL) version 2
6 * Author: Per Friden <per.friden@stericsson.com> 6 * Author: Per Friden <per.friden@stericsson.com>
7 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> 7 * Author: Jonas Aaberg <jonas.aberg@stericsson.com>
8 * 8 *
9 */ 9 */
10 10
11 #include <linux/kernel.h> 11 #include <linux/kernel.h>
12 #include <linux/slab.h> 12 #include <linux/slab.h>
13 #include <linux/dmaengine.h> 13 #include <linux/dmaengine.h>
14 #include <linux/platform_device.h> 14 #include <linux/platform_device.h>
15 #include <linux/clk.h> 15 #include <linux/clk.h>
16 #include <linux/delay.h> 16 #include <linux/delay.h>
17 17
18 #include <plat/ste_dma40.h> 18 #include <plat/ste_dma40.h>
19 19
20 #include "ste_dma40_ll.h" 20 #include "ste_dma40_ll.h"
21 21
22 #define D40_NAME "dma40" 22 #define D40_NAME "dma40"
23 23
24 #define D40_PHY_CHAN -1 24 #define D40_PHY_CHAN -1
25 25
26 /* For masking out/in 2 bit channel positions */ 26 /* For masking out/in 2 bit channel positions */
27 #define D40_CHAN_POS(chan) (2 * (chan / 2)) 27 #define D40_CHAN_POS(chan) (2 * (chan / 2))
28 #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan)) 28 #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
29 29
30 /* Maximum iterations taken before giving up suspending a channel */ 30 /* Maximum iterations taken before giving up suspending a channel */
31 #define D40_SUSPEND_MAX_IT 500 31 #define D40_SUSPEND_MAX_IT 500
32 32
33 #define D40_ALLOC_FREE (1 << 31) 33 #define D40_ALLOC_FREE (1 << 31)
34 #define D40_ALLOC_PHY (1 << 30) 34 #define D40_ALLOC_PHY (1 << 30)
35 #define D40_ALLOC_LOG_FREE 0 35 #define D40_ALLOC_LOG_FREE 0
36 36
37 /* Hardware designer of the block */ 37 /* Hardware designer of the block */
38 #define D40_PERIPHID2_DESIGNER 0x8 38 #define D40_PERIPHID2_DESIGNER 0x8
39 39
40 /** 40 /**
41 * enum 40_command - The different commands and/or statuses. 41 * enum 40_command - The different commands and/or statuses.
42 * 42 *
43 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED, 43 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
44 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN. 44 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
45 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible. 45 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
46 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED. 46 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
47 */ 47 */
48 enum d40_command { 48 enum d40_command {
49 D40_DMA_STOP = 0, 49 D40_DMA_STOP = 0,
50 D40_DMA_RUN = 1, 50 D40_DMA_RUN = 1,
51 D40_DMA_SUSPEND_REQ = 2, 51 D40_DMA_SUSPEND_REQ = 2,
52 D40_DMA_SUSPENDED = 3 52 D40_DMA_SUSPENDED = 3
53 }; 53 };
54 54
55 /** 55 /**
56 * struct d40_lli_pool - Structure for keeping LLIs in memory 56 * struct d40_lli_pool - Structure for keeping LLIs in memory
57 * 57 *
58 * @base: Pointer to memory area when the pre_alloc_lli's are not large 58 * @base: Pointer to memory area when the pre_alloc_lli's are not large
59 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if 59 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
60 * pre_alloc_lli is used. 60 * pre_alloc_lli is used.
61 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli. 61 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
62 * @pre_alloc_lli: Pre allocated area for the most common case of transfers, 62 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
63 * one buffer to one buffer. 63 * one buffer to one buffer.
64 */ 64 */
65 struct d40_lli_pool { 65 struct d40_lli_pool {
66 void *base; 66 void *base;
67 int size; 67 int size;
68 /* Space for dst and src, plus an extra for padding */ 68 /* Space for dst and src, plus an extra for padding */
69 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)]; 69 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
70 }; 70 };
71 71
72 /** 72 /**
73 * struct d40_desc - A descriptor is one DMA job. 73 * struct d40_desc - A descriptor is one DMA job.
74 * 74 *
75 * @lli_phy: LLI settings for physical channel. Both src and dst= 75 * @lli_phy: LLI settings for physical channel. Both src and dst=
76 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if 76 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
77 * lli_len equals one. 77 * lli_len equals one.
78 * @lli_log: Same as above but for logical channels. 78 * @lli_log: Same as above but for logical channels.
79 * @lli_pool: The pool with two entries pre-allocated. 79 * @lli_pool: The pool with two entries pre-allocated.
80 * @lli_len: Number of llis of current descriptor. 80 * @lli_len: Number of llis of current descriptor.
81 * @lli_count: Number of transfered llis. 81 * @lli_count: Number of transfered llis.
82 * @lli_tx_len: Max number of LLIs per transfer, there can be 82 * @lli_tx_len: Max number of LLIs per transfer, there can be
83 * many transfer for one descriptor. 83 * many transfer for one descriptor.
84 * @txd: DMA engine struct. Used for among other things for communication 84 * @txd: DMA engine struct. Used for among other things for communication
85 * during a transfer. 85 * during a transfer.
86 * @node: List entry. 86 * @node: List entry.
87 * @dir: The transfer direction of this job. 87 * @dir: The transfer direction of this job.
88 * @is_in_client_list: true if the client owns this descriptor. 88 * @is_in_client_list: true if the client owns this descriptor.
89 * 89 *
90 * This descriptor is used for both logical and physical transfers. 90 * This descriptor is used for both logical and physical transfers.
91 */ 91 */
92 92
93 struct d40_desc { 93 struct d40_desc {
94 /* LLI physical */ 94 /* LLI physical */
95 struct d40_phy_lli_bidir lli_phy; 95 struct d40_phy_lli_bidir lli_phy;
96 /* LLI logical */ 96 /* LLI logical */
97 struct d40_log_lli_bidir lli_log; 97 struct d40_log_lli_bidir lli_log;
98 98
99 struct d40_lli_pool lli_pool; 99 struct d40_lli_pool lli_pool;
100 int lli_len; 100 int lli_len;
101 int lli_count; 101 int lli_count;
102 u32 lli_tx_len; 102 u32 lli_tx_len;
103 103
104 struct dma_async_tx_descriptor txd; 104 struct dma_async_tx_descriptor txd;
105 struct list_head node; 105 struct list_head node;
106 106
107 enum dma_data_direction dir; 107 enum dma_data_direction dir;
108 bool is_in_client_list; 108 bool is_in_client_list;
109 }; 109 };
110 110
111 /** 111 /**
112 * struct d40_lcla_pool - LCLA pool settings and data. 112 * struct d40_lcla_pool - LCLA pool settings and data.
113 * 113 *
114 * @base: The virtual address of LCLA. 114 * @base: The virtual address of LCLA.
115 * @phy: Physical base address of LCLA. 115 * @phy: Physical base address of LCLA.
116 * @base_size: size of lcla. 116 * @base_size: size of lcla.
117 * @lock: Lock to protect the content in this struct. 117 * @lock: Lock to protect the content in this struct.
118 * @alloc_map: Mapping between physical channel and LCLA entries. 118 * @alloc_map: Mapping between physical channel and LCLA entries.
119 * @num_blocks: The number of entries of alloc_map. Equals to the 119 * @num_blocks: The number of entries of alloc_map. Equals to the
120 * number of physical channels. 120 * number of physical channels.
121 */ 121 */
122 struct d40_lcla_pool { 122 struct d40_lcla_pool {
123 void *base; 123 void *base;
124 dma_addr_t phy; 124 dma_addr_t phy;
125 resource_size_t base_size; 125 resource_size_t base_size;
126 spinlock_t lock; 126 spinlock_t lock;
127 u32 *alloc_map; 127 u32 *alloc_map;
128 int num_blocks; 128 int num_blocks;
129 }; 129 };
130 130
131 /** 131 /**
132 * struct d40_phy_res - struct for handling eventlines mapped to physical 132 * struct d40_phy_res - struct for handling eventlines mapped to physical
133 * channels. 133 * channels.
134 * 134 *
135 * @lock: A lock protection this entity. 135 * @lock: A lock protection this entity.
136 * @num: The physical channel number of this entity. 136 * @num: The physical channel number of this entity.
137 * @allocated_src: Bit mapped to show which src event line's are mapped to 137 * @allocated_src: Bit mapped to show which src event line's are mapped to
138 * this physical channel. Can also be free or physically allocated. 138 * this physical channel. Can also be free or physically allocated.
139 * @allocated_dst: Same as for src but is dst. 139 * @allocated_dst: Same as for src but is dst.
140 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as 140 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
141 * event line number. Both allocated_src and allocated_dst can not be 141 * event line number. Both allocated_src and allocated_dst can not be
142 * allocated to a physical channel, since the interrupt handler has then 142 * allocated to a physical channel, since the interrupt handler has then
143 * no way of figure out which one the interrupt belongs to. 143 * no way of figure out which one the interrupt belongs to.
144 */ 144 */
145 struct d40_phy_res { 145 struct d40_phy_res {
146 spinlock_t lock; 146 spinlock_t lock;
147 int num; 147 int num;
148 u32 allocated_src; 148 u32 allocated_src;
149 u32 allocated_dst; 149 u32 allocated_dst;
150 }; 150 };
151 151
152 struct d40_base; 152 struct d40_base;
153 153
154 /** 154 /**
155 * struct d40_chan - Struct that describes a channel. 155 * struct d40_chan - Struct that describes a channel.
156 * 156 *
157 * @lock: A spinlock to protect this struct. 157 * @lock: A spinlock to protect this struct.
158 * @log_num: The logical number, if any of this channel. 158 * @log_num: The logical number, if any of this channel.
159 * @completed: Starts with 1, after first interrupt it is set to dma engine's 159 * @completed: Starts with 1, after first interrupt it is set to dma engine's
160 * current cookie. 160 * current cookie.
161 * @pending_tx: The number of pending transfers. Used between interrupt handler 161 * @pending_tx: The number of pending transfers. Used between interrupt handler
162 * and tasklet. 162 * and tasklet.
163 * @busy: Set to true when transfer is ongoing on this channel. 163 * @busy: Set to true when transfer is ongoing on this channel.
164 * @phy_chan: Pointer to physical channel which this instance runs on. If this 164 * @phy_chan: Pointer to physical channel which this instance runs on. If this
165 * point is NULL, then the channel is not allocated. 165 * point is NULL, then the channel is not allocated.
166 * @chan: DMA engine handle. 166 * @chan: DMA engine handle.
167 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a 167 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
168 * transfer and call client callback. 168 * transfer and call client callback.
169 * @client: Cliented owned descriptor list. 169 * @client: Cliented owned descriptor list.
170 * @active: Active descriptor. 170 * @active: Active descriptor.
171 * @queue: Queued jobs. 171 * @queue: Queued jobs.
172 * @dma_cfg: The client configuration of this dma channel. 172 * @dma_cfg: The client configuration of this dma channel.
173 * @base: Pointer to the device instance struct. 173 * @base: Pointer to the device instance struct.
174 * @src_def_cfg: Default cfg register setting for src. 174 * @src_def_cfg: Default cfg register setting for src.
175 * @dst_def_cfg: Default cfg register setting for dst. 175 * @dst_def_cfg: Default cfg register setting for dst.
176 * @log_def: Default logical channel settings. 176 * @log_def: Default logical channel settings.
177 * @lcla: Space for one dst src pair for logical channel transfers. 177 * @lcla: Space for one dst src pair for logical channel transfers.
178 * @lcpa: Pointer to dst and src lcpa settings. 178 * @lcpa: Pointer to dst and src lcpa settings.
179 * 179 *
180 * This struct can either "be" a logical or a physical channel. 180 * This struct can either "be" a logical or a physical channel.
181 */ 181 */
182 struct d40_chan { 182 struct d40_chan {
183 spinlock_t lock; 183 spinlock_t lock;
184 int log_num; 184 int log_num;
185 /* ID of the most recent completed transfer */ 185 /* ID of the most recent completed transfer */
186 int completed; 186 int completed;
187 int pending_tx; 187 int pending_tx;
188 bool busy; 188 bool busy;
189 struct d40_phy_res *phy_chan; 189 struct d40_phy_res *phy_chan;
190 struct dma_chan chan; 190 struct dma_chan chan;
191 struct tasklet_struct tasklet; 191 struct tasklet_struct tasklet;
192 struct list_head client; 192 struct list_head client;
193 struct list_head active; 193 struct list_head active;
194 struct list_head queue; 194 struct list_head queue;
195 struct stedma40_chan_cfg dma_cfg; 195 struct stedma40_chan_cfg dma_cfg;
196 struct d40_base *base; 196 struct d40_base *base;
197 /* Default register configurations */ 197 /* Default register configurations */
198 u32 src_def_cfg; 198 u32 src_def_cfg;
199 u32 dst_def_cfg; 199 u32 dst_def_cfg;
200 struct d40_def_lcsp log_def; 200 struct d40_def_lcsp log_def;
201 struct d40_lcla_elem lcla; 201 struct d40_lcla_elem lcla;
202 struct d40_log_lli_full *lcpa; 202 struct d40_log_lli_full *lcpa;
203 }; 203 };
204 204
205 /** 205 /**
206 * struct d40_base - The big global struct, one for each probe'd instance. 206 * struct d40_base - The big global struct, one for each probe'd instance.
207 * 207 *
208 * @interrupt_lock: Lock used to make sure one interrupt is handle a time. 208 * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
209 * @execmd_lock: Lock for execute command usage since several channels share 209 * @execmd_lock: Lock for execute command usage since several channels share
210 * the same physical register. 210 * the same physical register.
211 * @dev: The device structure. 211 * @dev: The device structure.
212 * @virtbase: The virtual base address of the DMA's register. 212 * @virtbase: The virtual base address of the DMA's register.
213 * @clk: Pointer to the DMA clock structure. 213 * @clk: Pointer to the DMA clock structure.
214 * @phy_start: Physical memory start of the DMA registers. 214 * @phy_start: Physical memory start of the DMA registers.
215 * @phy_size: Size of the DMA register map. 215 * @phy_size: Size of the DMA register map.
216 * @irq: The IRQ number. 216 * @irq: The IRQ number.
217 * @num_phy_chans: The number of physical channels. Read from HW. This 217 * @num_phy_chans: The number of physical channels. Read from HW. This
218 * is the number of available channels for this driver, not counting "Secure 218 * is the number of available channels for this driver, not counting "Secure
219 * mode" allocated physical channels. 219 * mode" allocated physical channels.
220 * @num_log_chans: The number of logical channels. Calculated from 220 * @num_log_chans: The number of logical channels. Calculated from
221 * num_phy_chans. 221 * num_phy_chans.
222 * @dma_both: dma_device channels that can do both memcpy and slave transfers. 222 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
223 * @dma_slave: dma_device channels that can do only do slave transfers. 223 * @dma_slave: dma_device channels that can do only do slave transfers.
224 * @dma_memcpy: dma_device channels that can do only do memcpy transfers. 224 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
225 * @phy_chans: Room for all possible physical channels in system. 225 * @phy_chans: Room for all possible physical channels in system.
226 * @log_chans: Room for all possible logical channels in system. 226 * @log_chans: Room for all possible logical channels in system.
227 * @lookup_log_chans: Used to map interrupt number to logical channel. Points 227 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
228 * to log_chans entries. 228 * to log_chans entries.
229 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points 229 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
230 * to phy_chans entries. 230 * to phy_chans entries.
231 * @plat_data: Pointer to provided platform_data which is the driver 231 * @plat_data: Pointer to provided platform_data which is the driver
232 * configuration. 232 * configuration.
233 * @phy_res: Vector containing all physical channels. 233 * @phy_res: Vector containing all physical channels.
234 * @lcla_pool: lcla pool settings and data. 234 * @lcla_pool: lcla pool settings and data.
235 * @lcpa_base: The virtual mapped address of LCPA. 235 * @lcpa_base: The virtual mapped address of LCPA.
236 * @phy_lcpa: The physical address of the LCPA. 236 * @phy_lcpa: The physical address of the LCPA.
237 * @lcpa_size: The size of the LCPA area. 237 * @lcpa_size: The size of the LCPA area.
238 * @desc_slab: cache for descriptors. 238 * @desc_slab: cache for descriptors.
239 */ 239 */
240 struct d40_base { 240 struct d40_base {
241 spinlock_t interrupt_lock; 241 spinlock_t interrupt_lock;
242 spinlock_t execmd_lock; 242 spinlock_t execmd_lock;
243 struct device *dev; 243 struct device *dev;
244 void __iomem *virtbase; 244 void __iomem *virtbase;
245 struct clk *clk; 245 struct clk *clk;
246 phys_addr_t phy_start; 246 phys_addr_t phy_start;
247 resource_size_t phy_size; 247 resource_size_t phy_size;
248 int irq; 248 int irq;
249 int num_phy_chans; 249 int num_phy_chans;
250 int num_log_chans; 250 int num_log_chans;
251 struct dma_device dma_both; 251 struct dma_device dma_both;
252 struct dma_device dma_slave; 252 struct dma_device dma_slave;
253 struct dma_device dma_memcpy; 253 struct dma_device dma_memcpy;
254 struct d40_chan *phy_chans; 254 struct d40_chan *phy_chans;
255 struct d40_chan *log_chans; 255 struct d40_chan *log_chans;
256 struct d40_chan **lookup_log_chans; 256 struct d40_chan **lookup_log_chans;
257 struct d40_chan **lookup_phy_chans; 257 struct d40_chan **lookup_phy_chans;
258 struct stedma40_platform_data *plat_data; 258 struct stedma40_platform_data *plat_data;
259 /* Physical half channels */ 259 /* Physical half channels */
260 struct d40_phy_res *phy_res; 260 struct d40_phy_res *phy_res;
261 struct d40_lcla_pool lcla_pool; 261 struct d40_lcla_pool lcla_pool;
262 void *lcpa_base; 262 void *lcpa_base;
263 dma_addr_t phy_lcpa; 263 dma_addr_t phy_lcpa;
264 resource_size_t lcpa_size; 264 resource_size_t lcpa_size;
265 struct kmem_cache *desc_slab; 265 struct kmem_cache *desc_slab;
266 }; 266 };
267 267
268 /** 268 /**
269 * struct d40_interrupt_lookup - lookup table for interrupt handler 269 * struct d40_interrupt_lookup - lookup table for interrupt handler
270 * 270 *
271 * @src: Interrupt mask register. 271 * @src: Interrupt mask register.
272 * @clr: Interrupt clear register. 272 * @clr: Interrupt clear register.
273 * @is_error: true if this is an error interrupt. 273 * @is_error: true if this is an error interrupt.
274 * @offset: start delta in the lookup_log_chans in d40_base. If equals to 274 * @offset: start delta in the lookup_log_chans in d40_base. If equals to
275 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead. 275 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
276 */ 276 */
277 struct d40_interrupt_lookup { 277 struct d40_interrupt_lookup {
278 u32 src; 278 u32 src;
279 u32 clr; 279 u32 clr;
280 bool is_error; 280 bool is_error;
281 int offset; 281 int offset;
282 }; 282 };
283 283
284 /** 284 /**
285 * struct d40_reg_val - simple lookup struct 285 * struct d40_reg_val - simple lookup struct
286 * 286 *
287 * @reg: The register. 287 * @reg: The register.
288 * @val: The value that belongs to the register in reg. 288 * @val: The value that belongs to the register in reg.
289 */ 289 */
290 struct d40_reg_val { 290 struct d40_reg_val {
291 unsigned int reg; 291 unsigned int reg;
292 unsigned int val; 292 unsigned int val;
293 }; 293 };
294 294
295 static int d40_pool_lli_alloc(struct d40_desc *d40d, 295 static int d40_pool_lli_alloc(struct d40_desc *d40d,
296 int lli_len, bool is_log) 296 int lli_len, bool is_log)
297 { 297 {
298 u32 align; 298 u32 align;
299 void *base; 299 void *base;
300 300
301 if (is_log) 301 if (is_log)
302 align = sizeof(struct d40_log_lli); 302 align = sizeof(struct d40_log_lli);
303 else 303 else
304 align = sizeof(struct d40_phy_lli); 304 align = sizeof(struct d40_phy_lli);
305 305
306 if (lli_len == 1) { 306 if (lli_len == 1) {
307 base = d40d->lli_pool.pre_alloc_lli; 307 base = d40d->lli_pool.pre_alloc_lli;
308 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli); 308 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
309 d40d->lli_pool.base = NULL; 309 d40d->lli_pool.base = NULL;
310 } else { 310 } else {
311 d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align); 311 d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align);
312 312
313 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT); 313 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
314 d40d->lli_pool.base = base; 314 d40d->lli_pool.base = base;
315 315
316 if (d40d->lli_pool.base == NULL) 316 if (d40d->lli_pool.base == NULL)
317 return -ENOMEM; 317 return -ENOMEM;
318 } 318 }
319 319
320 if (is_log) { 320 if (is_log) {
321 d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base, 321 d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base,
322 align); 322 align);
323 d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len, 323 d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len,
324 align); 324 align);
325 } else { 325 } else {
326 d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base, 326 d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base,
327 align); 327 align);
328 d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len, 328 d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len,
329 align); 329 align);
330 330
331 d40d->lli_phy.src_addr = virt_to_phys(d40d->lli_phy.src); 331 d40d->lli_phy.src_addr = virt_to_phys(d40d->lli_phy.src);
332 d40d->lli_phy.dst_addr = virt_to_phys(d40d->lli_phy.dst); 332 d40d->lli_phy.dst_addr = virt_to_phys(d40d->lli_phy.dst);
333 } 333 }
334 334
335 return 0; 335 return 0;
336 } 336 }
337 337
338 static void d40_pool_lli_free(struct d40_desc *d40d) 338 static void d40_pool_lli_free(struct d40_desc *d40d)
339 { 339 {
340 kfree(d40d->lli_pool.base); 340 kfree(d40d->lli_pool.base);
341 d40d->lli_pool.base = NULL; 341 d40d->lli_pool.base = NULL;
342 d40d->lli_pool.size = 0; 342 d40d->lli_pool.size = 0;
343 d40d->lli_log.src = NULL; 343 d40d->lli_log.src = NULL;
344 d40d->lli_log.dst = NULL; 344 d40d->lli_log.dst = NULL;
345 d40d->lli_phy.src = NULL; 345 d40d->lli_phy.src = NULL;
346 d40d->lli_phy.dst = NULL; 346 d40d->lli_phy.dst = NULL;
347 d40d->lli_phy.src_addr = 0; 347 d40d->lli_phy.src_addr = 0;
348 d40d->lli_phy.dst_addr = 0; 348 d40d->lli_phy.dst_addr = 0;
349 } 349 }
350 350
351 static dma_cookie_t d40_assign_cookie(struct d40_chan *d40c, 351 static dma_cookie_t d40_assign_cookie(struct d40_chan *d40c,
352 struct d40_desc *desc) 352 struct d40_desc *desc)
353 { 353 {
354 dma_cookie_t cookie = d40c->chan.cookie; 354 dma_cookie_t cookie = d40c->chan.cookie;
355 355
356 if (++cookie < 0) 356 if (++cookie < 0)
357 cookie = 1; 357 cookie = 1;
358 358
359 d40c->chan.cookie = cookie; 359 d40c->chan.cookie = cookie;
360 desc->txd.cookie = cookie; 360 desc->txd.cookie = cookie;
361 361
362 return cookie; 362 return cookie;
363 } 363 }
364 364
365 static void d40_desc_remove(struct d40_desc *d40d) 365 static void d40_desc_remove(struct d40_desc *d40d)
366 { 366 {
367 list_del(&d40d->node); 367 list_del(&d40d->node);
368 } 368 }
369 369
370 static struct d40_desc *d40_desc_get(struct d40_chan *d40c) 370 static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
371 { 371 {
372 struct d40_desc *d; 372 struct d40_desc *d;
373 struct d40_desc *_d; 373 struct d40_desc *_d;
374 374
375 if (!list_empty(&d40c->client)) { 375 if (!list_empty(&d40c->client)) {
376 list_for_each_entry_safe(d, _d, &d40c->client, node) 376 list_for_each_entry_safe(d, _d, &d40c->client, node)
377 if (async_tx_test_ack(&d->txd)) { 377 if (async_tx_test_ack(&d->txd)) {
378 d40_pool_lli_free(d); 378 d40_pool_lli_free(d);
379 d40_desc_remove(d); 379 d40_desc_remove(d);
380 break; 380 break;
381 } 381 }
382 } else { 382 } else {
383 d = kmem_cache_alloc(d40c->base->desc_slab, GFP_NOWAIT); 383 d = kmem_cache_alloc(d40c->base->desc_slab, GFP_NOWAIT);
384 if (d != NULL) { 384 if (d != NULL) {
385 memset(d, 0, sizeof(struct d40_desc)); 385 memset(d, 0, sizeof(struct d40_desc));
386 INIT_LIST_HEAD(&d->node); 386 INIT_LIST_HEAD(&d->node);
387 } 387 }
388 } 388 }
389 return d; 389 return d;
390 } 390 }
391 391
392 static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d) 392 static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
393 { 393 {
394 kmem_cache_free(d40c->base->desc_slab, d40d); 394 kmem_cache_free(d40c->base->desc_slab, d40d);
395 } 395 }
396 396
397 static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc) 397 static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
398 { 398 {
399 list_add_tail(&desc->node, &d40c->active); 399 list_add_tail(&desc->node, &d40c->active);
400 } 400 }
401 401
402 static struct d40_desc *d40_first_active_get(struct d40_chan *d40c) 402 static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
403 { 403 {
404 struct d40_desc *d; 404 struct d40_desc *d;
405 405
406 if (list_empty(&d40c->active)) 406 if (list_empty(&d40c->active))
407 return NULL; 407 return NULL;
408 408
409 d = list_first_entry(&d40c->active, 409 d = list_first_entry(&d40c->active,
410 struct d40_desc, 410 struct d40_desc,
411 node); 411 node);
412 return d; 412 return d;
413 } 413 }
414 414
415 static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc) 415 static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
416 { 416 {
417 list_add_tail(&desc->node, &d40c->queue); 417 list_add_tail(&desc->node, &d40c->queue);
418 } 418 }
419 419
420 static struct d40_desc *d40_first_queued(struct d40_chan *d40c) 420 static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
421 { 421 {
422 struct d40_desc *d; 422 struct d40_desc *d;
423 423
424 if (list_empty(&d40c->queue)) 424 if (list_empty(&d40c->queue))
425 return NULL; 425 return NULL;
426 426
427 d = list_first_entry(&d40c->queue, 427 d = list_first_entry(&d40c->queue,
428 struct d40_desc, 428 struct d40_desc,
429 node); 429 node);
430 return d; 430 return d;
431 } 431 }
432 432
433 /* Support functions for logical channels */ 433 /* Support functions for logical channels */
434 434
435 static int d40_lcla_id_get(struct d40_chan *d40c, 435 static int d40_lcla_id_get(struct d40_chan *d40c,
436 struct d40_lcla_pool *pool) 436 struct d40_lcla_pool *pool)
437 { 437 {
438 int src_id = 0; 438 int src_id = 0;
439 int dst_id = 0; 439 int dst_id = 0;
440 struct d40_log_lli *lcla_lidx_base = 440 struct d40_log_lli *lcla_lidx_base =
441 pool->base + d40c->phy_chan->num * 1024; 441 pool->base + d40c->phy_chan->num * 1024;
442 int i; 442 int i;
443 int lli_per_log = d40c->base->plat_data->llis_per_log; 443 int lli_per_log = d40c->base->plat_data->llis_per_log;
444 unsigned long flags; 444 unsigned long flags;
445 445
446 if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0) 446 if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0)
447 return 0; 447 return 0;
448 448
449 if (pool->num_blocks > 32) 449 if (pool->num_blocks > 32)
450 return -EINVAL; 450 return -EINVAL;
451 451
452 spin_lock_irqsave(&pool->lock, flags); 452 spin_lock_irqsave(&pool->lock, flags);
453 453
454 for (i = 0; i < pool->num_blocks; i++) { 454 for (i = 0; i < pool->num_blocks; i++) {
455 if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) { 455 if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) {
456 pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i); 456 pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i);
457 break; 457 break;
458 } 458 }
459 } 459 }
460 src_id = i; 460 src_id = i;
461 if (src_id >= pool->num_blocks) 461 if (src_id >= pool->num_blocks)
462 goto err; 462 goto err;
463 463
464 for (; i < pool->num_blocks; i++) { 464 for (; i < pool->num_blocks; i++) {
465 if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) { 465 if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) {
466 pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i); 466 pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i);
467 break; 467 break;
468 } 468 }
469 } 469 }
470 470
471 dst_id = i; 471 dst_id = i;
472 if (dst_id == src_id) 472 if (dst_id == src_id)
473 goto err; 473 goto err;
474 474
475 d40c->lcla.src_id = src_id; 475 d40c->lcla.src_id = src_id;
476 d40c->lcla.dst_id = dst_id; 476 d40c->lcla.dst_id = dst_id;
477 d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1; 477 d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1;
478 d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1; 478 d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1;
479 479
480 480
481 spin_unlock_irqrestore(&pool->lock, flags); 481 spin_unlock_irqrestore(&pool->lock, flags);
482 return 0; 482 return 0;
483 err: 483 err:
484 spin_unlock_irqrestore(&pool->lock, flags); 484 spin_unlock_irqrestore(&pool->lock, flags);
485 return -EINVAL; 485 return -EINVAL;
486 } 486 }
487 487
488 static void d40_lcla_id_put(struct d40_chan *d40c, 488 static void d40_lcla_id_put(struct d40_chan *d40c,
489 struct d40_lcla_pool *pool, 489 struct d40_lcla_pool *pool,
490 int id) 490 int id)
491 { 491 {
492 unsigned long flags; 492 unsigned long flags;
493 if (id < 0) 493 if (id < 0)
494 return; 494 return;
495 495
496 d40c->lcla.src_id = -1; 496 d40c->lcla.src_id = -1;
497 d40c->lcla.dst_id = -1; 497 d40c->lcla.dst_id = -1;
498 498
499 spin_lock_irqsave(&pool->lock, flags); 499 spin_lock_irqsave(&pool->lock, flags);
500 pool->alloc_map[d40c->phy_chan->num] &= (~(0x1 << id)); 500 pool->alloc_map[d40c->phy_chan->num] &= (~(0x1 << id));
501 spin_unlock_irqrestore(&pool->lock, flags); 501 spin_unlock_irqrestore(&pool->lock, flags);
502 } 502 }
503 503
504 static int d40_channel_execute_command(struct d40_chan *d40c, 504 static int d40_channel_execute_command(struct d40_chan *d40c,
505 enum d40_command command) 505 enum d40_command command)
506 { 506 {
507 int status, i; 507 int status, i;
508 void __iomem *active_reg; 508 void __iomem *active_reg;
509 int ret = 0; 509 int ret = 0;
510 unsigned long flags; 510 unsigned long flags;
511 511
512 spin_lock_irqsave(&d40c->base->execmd_lock, flags); 512 spin_lock_irqsave(&d40c->base->execmd_lock, flags);
513 513
514 if (d40c->phy_chan->num % 2 == 0) 514 if (d40c->phy_chan->num % 2 == 0)
515 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; 515 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
516 else 516 else
517 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; 517 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
518 518
519 if (command == D40_DMA_SUSPEND_REQ) { 519 if (command == D40_DMA_SUSPEND_REQ) {
520 status = (readl(active_reg) & 520 status = (readl(active_reg) &
521 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> 521 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
522 D40_CHAN_POS(d40c->phy_chan->num); 522 D40_CHAN_POS(d40c->phy_chan->num);
523 523
524 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP) 524 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
525 goto done; 525 goto done;
526 } 526 }
527 527
528 writel(command << D40_CHAN_POS(d40c->phy_chan->num), active_reg); 528 writel(command << D40_CHAN_POS(d40c->phy_chan->num), active_reg);
529 529
530 if (command == D40_DMA_SUSPEND_REQ) { 530 if (command == D40_DMA_SUSPEND_REQ) {
531 531
532 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) { 532 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
533 status = (readl(active_reg) & 533 status = (readl(active_reg) &
534 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> 534 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
535 D40_CHAN_POS(d40c->phy_chan->num); 535 D40_CHAN_POS(d40c->phy_chan->num);
536 536
537 cpu_relax(); 537 cpu_relax();
538 /* 538 /*
539 * Reduce the number of bus accesses while 539 * Reduce the number of bus accesses while
540 * waiting for the DMA to suspend. 540 * waiting for the DMA to suspend.
541 */ 541 */
542 udelay(3); 542 udelay(3);
543 543
544 if (status == D40_DMA_STOP || 544 if (status == D40_DMA_STOP ||
545 status == D40_DMA_SUSPENDED) 545 status == D40_DMA_SUSPENDED)
546 break; 546 break;
547 } 547 }
548 548
549 if (i == D40_SUSPEND_MAX_IT) { 549 if (i == D40_SUSPEND_MAX_IT) {
550 dev_err(&d40c->chan.dev->device, 550 dev_err(&d40c->chan.dev->device,
551 "[%s]: unable to suspend the chl %d (log: %d) status %x\n", 551 "[%s]: unable to suspend the chl %d (log: %d) status %x\n",
552 __func__, d40c->phy_chan->num, d40c->log_num, 552 __func__, d40c->phy_chan->num, d40c->log_num,
553 status); 553 status);
554 dump_stack(); 554 dump_stack();
555 ret = -EBUSY; 555 ret = -EBUSY;
556 } 556 }
557 557
558 } 558 }
559 done: 559 done:
560 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags); 560 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
561 return ret; 561 return ret;
562 } 562 }
563 563
564 static void d40_term_all(struct d40_chan *d40c) 564 static void d40_term_all(struct d40_chan *d40c)
565 { 565 {
566 struct d40_desc *d40d; 566 struct d40_desc *d40d;
567 567
568 /* Release active descriptors */ 568 /* Release active descriptors */
569 while ((d40d = d40_first_active_get(d40c))) { 569 while ((d40d = d40_first_active_get(d40c))) {
570 d40_desc_remove(d40d); 570 d40_desc_remove(d40d);
571 571
572 /* Return desc to free-list */ 572 /* Return desc to free-list */
573 d40_desc_free(d40c, d40d); 573 d40_desc_free(d40c, d40d);
574 } 574 }
575 575
576 /* Release queued descriptors waiting for transfer */ 576 /* Release queued descriptors waiting for transfer */
577 while ((d40d = d40_first_queued(d40c))) { 577 while ((d40d = d40_first_queued(d40c))) {
578 d40_desc_remove(d40d); 578 d40_desc_remove(d40d);
579 579
580 /* Return desc to free-list */ 580 /* Return desc to free-list */
581 d40_desc_free(d40c, d40d); 581 d40_desc_free(d40c, d40d);
582 } 582 }
583 583
584 d40_lcla_id_put(d40c, &d40c->base->lcla_pool, 584 d40_lcla_id_put(d40c, &d40c->base->lcla_pool,
585 d40c->lcla.src_id); 585 d40c->lcla.src_id);
586 d40_lcla_id_put(d40c, &d40c->base->lcla_pool, 586 d40_lcla_id_put(d40c, &d40c->base->lcla_pool,
587 d40c->lcla.dst_id); 587 d40c->lcla.dst_id);
588 588
589 d40c->pending_tx = 0; 589 d40c->pending_tx = 0;
590 d40c->busy = false; 590 d40c->busy = false;
591 } 591 }
592 592
593 static void d40_config_set_event(struct d40_chan *d40c, bool do_enable) 593 static void d40_config_set_event(struct d40_chan *d40c, bool do_enable)
594 { 594 {
595 u32 val; 595 u32 val;
596 unsigned long flags; 596 unsigned long flags;
597 597
598 /* Notice, that disable requires the physical channel to be stopped */
598 if (do_enable) 599 if (do_enable)
599 val = D40_ACTIVATE_EVENTLINE; 600 val = D40_ACTIVATE_EVENTLINE;
600 else 601 else
601 val = D40_DEACTIVATE_EVENTLINE; 602 val = D40_DEACTIVATE_EVENTLINE;
602 603
603 spin_lock_irqsave(&d40c->phy_chan->lock, flags); 604 spin_lock_irqsave(&d40c->phy_chan->lock, flags);
604 605
605 /* Enable event line connected to device (or memcpy) */ 606 /* Enable event line connected to device (or memcpy) */
606 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || 607 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
607 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) { 608 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) {
608 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); 609 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
609 610
610 writel((val << D40_EVENTLINE_POS(event)) | 611 writel((val << D40_EVENTLINE_POS(event)) |
611 ~D40_EVENTLINE_MASK(event), 612 ~D40_EVENTLINE_MASK(event),
612 d40c->base->virtbase + D40_DREG_PCBASE + 613 d40c->base->virtbase + D40_DREG_PCBASE +
613 d40c->phy_chan->num * D40_DREG_PCDELTA + 614 d40c->phy_chan->num * D40_DREG_PCDELTA +
614 D40_CHAN_REG_SSLNK); 615 D40_CHAN_REG_SSLNK);
615 } 616 }
616 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) { 617 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) {
617 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); 618 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
618 619
619 writel((val << D40_EVENTLINE_POS(event)) | 620 writel((val << D40_EVENTLINE_POS(event)) |
620 ~D40_EVENTLINE_MASK(event), 621 ~D40_EVENTLINE_MASK(event),
621 d40c->base->virtbase + D40_DREG_PCBASE + 622 d40c->base->virtbase + D40_DREG_PCBASE +
622 d40c->phy_chan->num * D40_DREG_PCDELTA + 623 d40c->phy_chan->num * D40_DREG_PCDELTA +
623 D40_CHAN_REG_SDLNK); 624 D40_CHAN_REG_SDLNK);
624 } 625 }
625 626
626 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags); 627 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
627 } 628 }
628 629
629 static u32 d40_chan_has_events(struct d40_chan *d40c) 630 static u32 d40_chan_has_events(struct d40_chan *d40c)
630 { 631 {
631 u32 val = 0; 632 u32 val = 0;
632 633
633 /* If SSLNK or SDLNK is zero all events are disabled */ 634 /* If SSLNK or SDLNK is zero all events are disabled */
634 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || 635 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
635 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) 636 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
636 val = readl(d40c->base->virtbase + D40_DREG_PCBASE + 637 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
637 d40c->phy_chan->num * D40_DREG_PCDELTA + 638 d40c->phy_chan->num * D40_DREG_PCDELTA +
638 D40_CHAN_REG_SSLNK); 639 D40_CHAN_REG_SSLNK);
639 640
640 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) 641 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM)
641 val = readl(d40c->base->virtbase + D40_DREG_PCBASE + 642 val = readl(d40c->base->virtbase + D40_DREG_PCBASE +
642 d40c->phy_chan->num * D40_DREG_PCDELTA + 643 d40c->phy_chan->num * D40_DREG_PCDELTA +
643 D40_CHAN_REG_SDLNK); 644 D40_CHAN_REG_SDLNK);
644 return val; 645 return val;
645 } 646 }
646 647
647 static void d40_config_enable_lidx(struct d40_chan *d40c) 648 static void d40_config_enable_lidx(struct d40_chan *d40c)
648 { 649 {
649 /* Set LIDX for lcla */ 650 /* Set LIDX for lcla */
650 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) & 651 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
651 D40_SREG_ELEM_LOG_LIDX_MASK, 652 D40_SREG_ELEM_LOG_LIDX_MASK,
652 d40c->base->virtbase + D40_DREG_PCBASE + 653 d40c->base->virtbase + D40_DREG_PCBASE +
653 d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT); 654 d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT);
654 655
655 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) & 656 writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) &
656 D40_SREG_ELEM_LOG_LIDX_MASK, 657 D40_SREG_ELEM_LOG_LIDX_MASK,
657 d40c->base->virtbase + D40_DREG_PCBASE + 658 d40c->base->virtbase + D40_DREG_PCBASE +
658 d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT); 659 d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT);
659 } 660 }
660 661
661 static int d40_config_write(struct d40_chan *d40c) 662 static int d40_config_write(struct d40_chan *d40c)
662 { 663 {
663 u32 addr_base; 664 u32 addr_base;
664 u32 var; 665 u32 var;
665 int res; 666 int res;
666 667
667 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); 668 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
668 if (res) 669 if (res)
669 return res; 670 return res;
670 671
671 /* Odd addresses are even addresses + 4 */ 672 /* Odd addresses are even addresses + 4 */
672 addr_base = (d40c->phy_chan->num % 2) * 4; 673 addr_base = (d40c->phy_chan->num % 2) * 4;
673 /* Setup channel mode to logical or physical */ 674 /* Setup channel mode to logical or physical */
674 var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) << 675 var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) <<
675 D40_CHAN_POS(d40c->phy_chan->num); 676 D40_CHAN_POS(d40c->phy_chan->num);
676 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base); 677 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
677 678
678 /* Setup operational mode option register */ 679 /* Setup operational mode option register */
679 var = ((d40c->dma_cfg.channel_type >> STEDMA40_INFO_CH_MODE_OPT_POS) & 680 var = ((d40c->dma_cfg.channel_type >> STEDMA40_INFO_CH_MODE_OPT_POS) &
680 0x3) << D40_CHAN_POS(d40c->phy_chan->num); 681 0x3) << D40_CHAN_POS(d40c->phy_chan->num);
681 682
682 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base); 683 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
683 684
684 if (d40c->log_num != D40_PHY_CHAN) { 685 if (d40c->log_num != D40_PHY_CHAN) {
685 /* Set default config for CFG reg */ 686 /* Set default config for CFG reg */
686 writel(d40c->src_def_cfg, 687 writel(d40c->src_def_cfg,
687 d40c->base->virtbase + D40_DREG_PCBASE + 688 d40c->base->virtbase + D40_DREG_PCBASE +
688 d40c->phy_chan->num * D40_DREG_PCDELTA + 689 d40c->phy_chan->num * D40_DREG_PCDELTA +
689 D40_CHAN_REG_SSCFG); 690 D40_CHAN_REG_SSCFG);
690 writel(d40c->dst_def_cfg, 691 writel(d40c->dst_def_cfg,
691 d40c->base->virtbase + D40_DREG_PCBASE + 692 d40c->base->virtbase + D40_DREG_PCBASE +
692 d40c->phy_chan->num * D40_DREG_PCDELTA + 693 d40c->phy_chan->num * D40_DREG_PCDELTA +
693 D40_CHAN_REG_SDCFG); 694 D40_CHAN_REG_SDCFG);
694 695
695 d40_config_enable_lidx(d40c); 696 d40_config_enable_lidx(d40c);
696 } 697 }
697 return res; 698 return res;
698 } 699 }
699 700
700 static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) 701 static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
701 { 702 {
702 703
703 if (d40d->lli_phy.dst && d40d->lli_phy.src) { 704 if (d40d->lli_phy.dst && d40d->lli_phy.src) {
704 d40_phy_lli_write(d40c->base->virtbase, 705 d40_phy_lli_write(d40c->base->virtbase,
705 d40c->phy_chan->num, 706 d40c->phy_chan->num,
706 d40d->lli_phy.dst, 707 d40d->lli_phy.dst,
707 d40d->lli_phy.src); 708 d40d->lli_phy.src);
708 } else if (d40d->lli_log.dst && d40d->lli_log.src) { 709 } else if (d40d->lli_log.dst && d40d->lli_log.src) {
709 struct d40_log_lli *src = d40d->lli_log.src; 710 struct d40_log_lli *src = d40d->lli_log.src;
710 struct d40_log_lli *dst = d40d->lli_log.dst; 711 struct d40_log_lli *dst = d40d->lli_log.dst;
711 712
712 src += d40d->lli_count; 713 src += d40d->lli_count;
713 dst += d40d->lli_count; 714 dst += d40d->lli_count;
714 d40_log_lli_write(d40c->lcpa, d40c->lcla.src, 715 d40_log_lli_write(d40c->lcpa, d40c->lcla.src,
715 d40c->lcla.dst, 716 d40c->lcla.dst,
716 dst, src, 717 dst, src,
717 d40c->base->plat_data->llis_per_log); 718 d40c->base->plat_data->llis_per_log);
718 } 719 }
719 d40d->lli_count += d40d->lli_tx_len; 720 d40d->lli_count += d40d->lli_tx_len;
720 } 721 }
721 722
722 static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) 723 static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
723 { 724 {
724 struct d40_chan *d40c = container_of(tx->chan, 725 struct d40_chan *d40c = container_of(tx->chan,
725 struct d40_chan, 726 struct d40_chan,
726 chan); 727 chan);
727 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd); 728 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
728 unsigned long flags; 729 unsigned long flags;
729 730
730 spin_lock_irqsave(&d40c->lock, flags); 731 spin_lock_irqsave(&d40c->lock, flags);
731 732
732 tx->cookie = d40_assign_cookie(d40c, d40d); 733 tx->cookie = d40_assign_cookie(d40c, d40d);
733 734
734 d40_desc_queue(d40c, d40d); 735 d40_desc_queue(d40c, d40d);
735 736
736 spin_unlock_irqrestore(&d40c->lock, flags); 737 spin_unlock_irqrestore(&d40c->lock, flags);
737 738
738 return tx->cookie; 739 return tx->cookie;
739 } 740 }
740 741
741 static int d40_start(struct d40_chan *d40c) 742 static int d40_start(struct d40_chan *d40c)
742 { 743 {
743 int err; 744 if (d40c->log_num != D40_PHY_CHAN)
744
745 if (d40c->log_num != D40_PHY_CHAN) {
746 err = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
747 if (err)
748 return err;
749 d40_config_set_event(d40c, true); 745 d40_config_set_event(d40c, true);
750 }
751 746
752 err = d40_channel_execute_command(d40c, D40_DMA_RUN); 747 return d40_channel_execute_command(d40c, D40_DMA_RUN);
753
754 return err;
755 } 748 }
756 749
757 static struct d40_desc *d40_queue_start(struct d40_chan *d40c) 750 static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
758 { 751 {
759 struct d40_desc *d40d; 752 struct d40_desc *d40d;
760 int err; 753 int err;
761 754
762 /* Start queued jobs, if any */ 755 /* Start queued jobs, if any */
763 d40d = d40_first_queued(d40c); 756 d40d = d40_first_queued(d40c);
764 757
765 if (d40d != NULL) { 758 if (d40d != NULL) {
766 d40c->busy = true; 759 d40c->busy = true;
767 760
768 /* Remove from queue */ 761 /* Remove from queue */
769 d40_desc_remove(d40d); 762 d40_desc_remove(d40d);
770 763
771 /* Add to active queue */ 764 /* Add to active queue */
772 d40_desc_submit(d40c, d40d); 765 d40_desc_submit(d40c, d40d);
773 766
774 /* Initiate DMA job */ 767 /* Initiate DMA job */
775 d40_desc_load(d40c, d40d); 768 d40_desc_load(d40c, d40d);
776 769
777 /* Start dma job */ 770 /* Start dma job */
778 err = d40_start(d40c); 771 err = d40_start(d40c);
779 772
780 if (err) 773 if (err)
781 return NULL; 774 return NULL;
782 } 775 }
783 776
784 return d40d; 777 return d40d;
785 } 778 }
786 779
787 /* called from interrupt context */ 780 /* called from interrupt context */
788 static void dma_tc_handle(struct d40_chan *d40c) 781 static void dma_tc_handle(struct d40_chan *d40c)
789 { 782 {
790 struct d40_desc *d40d; 783 struct d40_desc *d40d;
791 784
792 if (!d40c->phy_chan) 785 if (!d40c->phy_chan)
793 return; 786 return;
794 787
795 /* Get first active entry from list */ 788 /* Get first active entry from list */
796 d40d = d40_first_active_get(d40c); 789 d40d = d40_first_active_get(d40c);
797 790
798 if (d40d == NULL) 791 if (d40d == NULL)
799 return; 792 return;
800 793
801 if (d40d->lli_count < d40d->lli_len) { 794 if (d40d->lli_count < d40d->lli_len) {
802 795
803 d40_desc_load(d40c, d40d); 796 d40_desc_load(d40c, d40d);
804 /* Start dma job */ 797 /* Start dma job */
805 (void) d40_start(d40c); 798 (void) d40_start(d40c);
806 return; 799 return;
807 } 800 }
808 801
809 if (d40_queue_start(d40c) == NULL) 802 if (d40_queue_start(d40c) == NULL)
810 d40c->busy = false; 803 d40c->busy = false;
811 804
812 d40c->pending_tx++; 805 d40c->pending_tx++;
813 tasklet_schedule(&d40c->tasklet); 806 tasklet_schedule(&d40c->tasklet);
814 807
815 } 808 }
816 809
817 static void dma_tasklet(unsigned long data) 810 static void dma_tasklet(unsigned long data)
818 { 811 {
819 struct d40_chan *d40c = (struct d40_chan *) data; 812 struct d40_chan *d40c = (struct d40_chan *) data;
820 struct d40_desc *d40d_fin; 813 struct d40_desc *d40d_fin;
821 unsigned long flags; 814 unsigned long flags;
822 dma_async_tx_callback callback; 815 dma_async_tx_callback callback;
823 void *callback_param; 816 void *callback_param;
824 817
825 spin_lock_irqsave(&d40c->lock, flags); 818 spin_lock_irqsave(&d40c->lock, flags);
826 819
827 /* Get first active entry from list */ 820 /* Get first active entry from list */
828 d40d_fin = d40_first_active_get(d40c); 821 d40d_fin = d40_first_active_get(d40c);
829 822
830 if (d40d_fin == NULL) 823 if (d40d_fin == NULL)
831 goto err; 824 goto err;
832 825
833 d40c->completed = d40d_fin->txd.cookie; 826 d40c->completed = d40d_fin->txd.cookie;
834 827
835 /* 828 /*
836 * If terminating a channel pending_tx is set to zero. 829 * If terminating a channel pending_tx is set to zero.
837 * This prevents any finished active jobs to return to the client. 830 * This prevents any finished active jobs to return to the client.
838 */ 831 */
839 if (d40c->pending_tx == 0) { 832 if (d40c->pending_tx == 0) {
840 spin_unlock_irqrestore(&d40c->lock, flags); 833 spin_unlock_irqrestore(&d40c->lock, flags);
841 return; 834 return;
842 } 835 }
843 836
844 /* Callback to client */ 837 /* Callback to client */
845 callback = d40d_fin->txd.callback; 838 callback = d40d_fin->txd.callback;
846 callback_param = d40d_fin->txd.callback_param; 839 callback_param = d40d_fin->txd.callback_param;
847 840
848 if (async_tx_test_ack(&d40d_fin->txd)) { 841 if (async_tx_test_ack(&d40d_fin->txd)) {
849 d40_pool_lli_free(d40d_fin); 842 d40_pool_lli_free(d40d_fin);
850 d40_desc_remove(d40d_fin); 843 d40_desc_remove(d40d_fin);
851 /* Return desc to free-list */ 844 /* Return desc to free-list */
852 d40_desc_free(d40c, d40d_fin); 845 d40_desc_free(d40c, d40d_fin);
853 } else { 846 } else {
854 if (!d40d_fin->is_in_client_list) { 847 if (!d40d_fin->is_in_client_list) {
855 d40_desc_remove(d40d_fin); 848 d40_desc_remove(d40d_fin);
856 list_add_tail(&d40d_fin->node, &d40c->client); 849 list_add_tail(&d40d_fin->node, &d40c->client);
857 d40d_fin->is_in_client_list = true; 850 d40d_fin->is_in_client_list = true;
858 } 851 }
859 } 852 }
860 853
861 d40c->pending_tx--; 854 d40c->pending_tx--;
862 855
863 if (d40c->pending_tx) 856 if (d40c->pending_tx)
864 tasklet_schedule(&d40c->tasklet); 857 tasklet_schedule(&d40c->tasklet);
865 858
866 spin_unlock_irqrestore(&d40c->lock, flags); 859 spin_unlock_irqrestore(&d40c->lock, flags);
867 860
868 if (callback) 861 if (callback)
869 callback(callback_param); 862 callback(callback_param);
870 863
871 return; 864 return;
872 865
873 err: 866 err:
874 /* Rescue manouver if receiving double interrupts */ 867 /* Rescue manouver if receiving double interrupts */
875 if (d40c->pending_tx > 0) 868 if (d40c->pending_tx > 0)
876 d40c->pending_tx--; 869 d40c->pending_tx--;
877 spin_unlock_irqrestore(&d40c->lock, flags); 870 spin_unlock_irqrestore(&d40c->lock, flags);
878 } 871 }
879 872
880 static irqreturn_t d40_handle_interrupt(int irq, void *data) 873 static irqreturn_t d40_handle_interrupt(int irq, void *data)
881 { 874 {
882 static const struct d40_interrupt_lookup il[] = { 875 static const struct d40_interrupt_lookup il[] = {
883 {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0}, 876 {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
884 {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32}, 877 {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
885 {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64}, 878 {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
886 {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96}, 879 {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
887 {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0}, 880 {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
888 {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32}, 881 {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
889 {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64}, 882 {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
890 {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96}, 883 {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
891 {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN}, 884 {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
892 {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN}, 885 {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
893 }; 886 };
894 887
895 int i; 888 int i;
896 u32 regs[ARRAY_SIZE(il)]; 889 u32 regs[ARRAY_SIZE(il)];
897 u32 tmp; 890 u32 tmp;
898 u32 idx; 891 u32 idx;
899 u32 row; 892 u32 row;
900 long chan = -1; 893 long chan = -1;
901 struct d40_chan *d40c; 894 struct d40_chan *d40c;
902 unsigned long flags; 895 unsigned long flags;
903 struct d40_base *base = data; 896 struct d40_base *base = data;
904 897
905 spin_lock_irqsave(&base->interrupt_lock, flags); 898 spin_lock_irqsave(&base->interrupt_lock, flags);
906 899
907 /* Read interrupt status of both logical and physical channels */ 900 /* Read interrupt status of both logical and physical channels */
908 for (i = 0; i < ARRAY_SIZE(il); i++) 901 for (i = 0; i < ARRAY_SIZE(il); i++)
909 regs[i] = readl(base->virtbase + il[i].src); 902 regs[i] = readl(base->virtbase + il[i].src);
910 903
911 for (;;) { 904 for (;;) {
912 905
913 chan = find_next_bit((unsigned long *)regs, 906 chan = find_next_bit((unsigned long *)regs,
914 BITS_PER_LONG * ARRAY_SIZE(il), chan + 1); 907 BITS_PER_LONG * ARRAY_SIZE(il), chan + 1);
915 908
916 /* No more set bits found? */ 909 /* No more set bits found? */
917 if (chan == BITS_PER_LONG * ARRAY_SIZE(il)) 910 if (chan == BITS_PER_LONG * ARRAY_SIZE(il))
918 break; 911 break;
919 912
920 row = chan / BITS_PER_LONG; 913 row = chan / BITS_PER_LONG;
921 idx = chan & (BITS_PER_LONG - 1); 914 idx = chan & (BITS_PER_LONG - 1);
922 915
923 /* ACK interrupt */ 916 /* ACK interrupt */
924 tmp = readl(base->virtbase + il[row].clr); 917 tmp = readl(base->virtbase + il[row].clr);
925 tmp |= 1 << idx; 918 tmp |= 1 << idx;
926 writel(tmp, base->virtbase + il[row].clr); 919 writel(tmp, base->virtbase + il[row].clr);
927 920
928 if (il[row].offset == D40_PHY_CHAN) 921 if (il[row].offset == D40_PHY_CHAN)
929 d40c = base->lookup_phy_chans[idx]; 922 d40c = base->lookup_phy_chans[idx];
930 else 923 else
931 d40c = base->lookup_log_chans[il[row].offset + idx]; 924 d40c = base->lookup_log_chans[il[row].offset + idx];
932 spin_lock(&d40c->lock); 925 spin_lock(&d40c->lock);
933 926
934 if (!il[row].is_error) 927 if (!il[row].is_error)
935 dma_tc_handle(d40c); 928 dma_tc_handle(d40c);
936 else 929 else
937 dev_err(base->dev, "[%s] IRQ chan: %ld offset %d idx %d\n", 930 dev_err(base->dev, "[%s] IRQ chan: %ld offset %d idx %d\n",
938 __func__, chan, il[row].offset, idx); 931 __func__, chan, il[row].offset, idx);
939 932
940 spin_unlock(&d40c->lock); 933 spin_unlock(&d40c->lock);
941 } 934 }
942 935
943 spin_unlock_irqrestore(&base->interrupt_lock, flags); 936 spin_unlock_irqrestore(&base->interrupt_lock, flags);
944 937
945 return IRQ_HANDLED; 938 return IRQ_HANDLED;
946 } 939 }
947 940
948 941
949 static int d40_validate_conf(struct d40_chan *d40c, 942 static int d40_validate_conf(struct d40_chan *d40c,
950 struct stedma40_chan_cfg *conf) 943 struct stedma40_chan_cfg *conf)
951 { 944 {
952 int res = 0; 945 int res = 0;
953 u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type); 946 u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type);
954 u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type); 947 u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type);
955 bool is_log = (conf->channel_type & STEDMA40_CHANNEL_IN_OPER_MODE) 948 bool is_log = (conf->channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
956 == STEDMA40_CHANNEL_IN_LOG_MODE; 949 == STEDMA40_CHANNEL_IN_LOG_MODE;
957 950
958 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH && 951 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH &&
959 dst_event_group == STEDMA40_DEV_DST_MEMORY) { 952 dst_event_group == STEDMA40_DEV_DST_MEMORY) {
960 dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n", 953 dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n",
961 __func__); 954 __func__);
962 res = -EINVAL; 955 res = -EINVAL;
963 } 956 }
964 957
965 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM && 958 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM &&
966 src_event_group == STEDMA40_DEV_SRC_MEMORY) { 959 src_event_group == STEDMA40_DEV_SRC_MEMORY) {
967 dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n", 960 dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n",
968 __func__); 961 __func__);
969 res = -EINVAL; 962 res = -EINVAL;
970 } 963 }
971 964
972 if (src_event_group == STEDMA40_DEV_SRC_MEMORY && 965 if (src_event_group == STEDMA40_DEV_SRC_MEMORY &&
973 dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) { 966 dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) {
974 dev_err(&d40c->chan.dev->device, 967 dev_err(&d40c->chan.dev->device,
975 "[%s] No event line\n", __func__); 968 "[%s] No event line\n", __func__);
976 res = -EINVAL; 969 res = -EINVAL;
977 } 970 }
978 971
979 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH && 972 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH &&
980 (src_event_group != dst_event_group)) { 973 (src_event_group != dst_event_group)) {
981 dev_err(&d40c->chan.dev->device, 974 dev_err(&d40c->chan.dev->device,
982 "[%s] Invalid event group\n", __func__); 975 "[%s] Invalid event group\n", __func__);
983 res = -EINVAL; 976 res = -EINVAL;
984 } 977 }
985 978
986 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) { 979 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
987 /* 980 /*
988 * DMAC HW supports it. Will be added to this driver, 981 * DMAC HW supports it. Will be added to this driver,
989 * in case any dma client requires it. 982 * in case any dma client requires it.
990 */ 983 */
991 dev_err(&d40c->chan.dev->device, 984 dev_err(&d40c->chan.dev->device,
992 "[%s] periph to periph not supported\n", 985 "[%s] periph to periph not supported\n",
993 __func__); 986 __func__);
994 res = -EINVAL; 987 res = -EINVAL;
995 } 988 }
996 989
997 return res; 990 return res;
998 } 991 }
999 992
1000 static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src, 993 static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
1001 int log_event_line, bool is_log) 994 int log_event_line, bool is_log)
1002 { 995 {
1003 unsigned long flags; 996 unsigned long flags;
1004 spin_lock_irqsave(&phy->lock, flags); 997 spin_lock_irqsave(&phy->lock, flags);
1005 if (!is_log) { 998 if (!is_log) {
1006 /* Physical interrupts are masked per physical full channel */ 999 /* Physical interrupts are masked per physical full channel */
1007 if (phy->allocated_src == D40_ALLOC_FREE && 1000 if (phy->allocated_src == D40_ALLOC_FREE &&
1008 phy->allocated_dst == D40_ALLOC_FREE) { 1001 phy->allocated_dst == D40_ALLOC_FREE) {
1009 phy->allocated_dst = D40_ALLOC_PHY; 1002 phy->allocated_dst = D40_ALLOC_PHY;
1010 phy->allocated_src = D40_ALLOC_PHY; 1003 phy->allocated_src = D40_ALLOC_PHY;
1011 goto found; 1004 goto found;
1012 } else 1005 } else
1013 goto not_found; 1006 goto not_found;
1014 } 1007 }
1015 1008
1016 /* Logical channel */ 1009 /* Logical channel */
1017 if (is_src) { 1010 if (is_src) {
1018 if (phy->allocated_src == D40_ALLOC_PHY) 1011 if (phy->allocated_src == D40_ALLOC_PHY)
1019 goto not_found; 1012 goto not_found;
1020 1013
1021 if (phy->allocated_src == D40_ALLOC_FREE) 1014 if (phy->allocated_src == D40_ALLOC_FREE)
1022 phy->allocated_src = D40_ALLOC_LOG_FREE; 1015 phy->allocated_src = D40_ALLOC_LOG_FREE;
1023 1016
1024 if (!(phy->allocated_src & (1 << log_event_line))) { 1017 if (!(phy->allocated_src & (1 << log_event_line))) {
1025 phy->allocated_src |= 1 << log_event_line; 1018 phy->allocated_src |= 1 << log_event_line;
1026 goto found; 1019 goto found;
1027 } else 1020 } else
1028 goto not_found; 1021 goto not_found;
1029 } else { 1022 } else {
1030 if (phy->allocated_dst == D40_ALLOC_PHY) 1023 if (phy->allocated_dst == D40_ALLOC_PHY)
1031 goto not_found; 1024 goto not_found;
1032 1025
1033 if (phy->allocated_dst == D40_ALLOC_FREE) 1026 if (phy->allocated_dst == D40_ALLOC_FREE)
1034 phy->allocated_dst = D40_ALLOC_LOG_FREE; 1027 phy->allocated_dst = D40_ALLOC_LOG_FREE;
1035 1028
1036 if (!(phy->allocated_dst & (1 << log_event_line))) { 1029 if (!(phy->allocated_dst & (1 << log_event_line))) {
1037 phy->allocated_dst |= 1 << log_event_line; 1030 phy->allocated_dst |= 1 << log_event_line;
1038 goto found; 1031 goto found;
1039 } else 1032 } else
1040 goto not_found; 1033 goto not_found;
1041 } 1034 }
1042 1035
1043 not_found: 1036 not_found:
1044 spin_unlock_irqrestore(&phy->lock, flags); 1037 spin_unlock_irqrestore(&phy->lock, flags);
1045 return false; 1038 return false;
1046 found: 1039 found:
1047 spin_unlock_irqrestore(&phy->lock, flags); 1040 spin_unlock_irqrestore(&phy->lock, flags);
1048 return true; 1041 return true;
1049 } 1042 }
1050 1043
1051 static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src, 1044 static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1052 int log_event_line) 1045 int log_event_line)
1053 { 1046 {
1054 unsigned long flags; 1047 unsigned long flags;
1055 bool is_free = false; 1048 bool is_free = false;
1056 1049
1057 spin_lock_irqsave(&phy->lock, flags); 1050 spin_lock_irqsave(&phy->lock, flags);
1058 if (!log_event_line) { 1051 if (!log_event_line) {
1059 /* Physical interrupts are masked per physical full channel */ 1052 /* Physical interrupts are masked per physical full channel */
1060 phy->allocated_dst = D40_ALLOC_FREE; 1053 phy->allocated_dst = D40_ALLOC_FREE;
1061 phy->allocated_src = D40_ALLOC_FREE; 1054 phy->allocated_src = D40_ALLOC_FREE;
1062 is_free = true; 1055 is_free = true;
1063 goto out; 1056 goto out;
1064 } 1057 }
1065 1058
1066 /* Logical channel */ 1059 /* Logical channel */
1067 if (is_src) { 1060 if (is_src) {
1068 phy->allocated_src &= ~(1 << log_event_line); 1061 phy->allocated_src &= ~(1 << log_event_line);
1069 if (phy->allocated_src == D40_ALLOC_LOG_FREE) 1062 if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1070 phy->allocated_src = D40_ALLOC_FREE; 1063 phy->allocated_src = D40_ALLOC_FREE;
1071 } else { 1064 } else {
1072 phy->allocated_dst &= ~(1 << log_event_line); 1065 phy->allocated_dst &= ~(1 << log_event_line);
1073 if (phy->allocated_dst == D40_ALLOC_LOG_FREE) 1066 if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1074 phy->allocated_dst = D40_ALLOC_FREE; 1067 phy->allocated_dst = D40_ALLOC_FREE;
1075 } 1068 }
1076 1069
1077 is_free = ((phy->allocated_src | phy->allocated_dst) == 1070 is_free = ((phy->allocated_src | phy->allocated_dst) ==
1078 D40_ALLOC_FREE); 1071 D40_ALLOC_FREE);
1079 1072
1080 out: 1073 out:
1081 spin_unlock_irqrestore(&phy->lock, flags); 1074 spin_unlock_irqrestore(&phy->lock, flags);
1082 1075
1083 return is_free; 1076 return is_free;
1084 } 1077 }
1085 1078
1086 static int d40_allocate_channel(struct d40_chan *d40c) 1079 static int d40_allocate_channel(struct d40_chan *d40c)
1087 { 1080 {
1088 int dev_type; 1081 int dev_type;
1089 int event_group; 1082 int event_group;
1090 int event_line; 1083 int event_line;
1091 struct d40_phy_res *phys; 1084 struct d40_phy_res *phys;
1092 int i; 1085 int i;
1093 int j; 1086 int j;
1094 int log_num; 1087 int log_num;
1095 bool is_src; 1088 bool is_src;
1096 bool is_log = (d40c->dma_cfg.channel_type & STEDMA40_CHANNEL_IN_OPER_MODE) 1089 bool is_log = (d40c->dma_cfg.channel_type & STEDMA40_CHANNEL_IN_OPER_MODE)
1097 == STEDMA40_CHANNEL_IN_LOG_MODE; 1090 == STEDMA40_CHANNEL_IN_LOG_MODE;
1098 1091
1099 1092
1100 phys = d40c->base->phy_res; 1093 phys = d40c->base->phy_res;
1101 1094
1102 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { 1095 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1103 dev_type = d40c->dma_cfg.src_dev_type; 1096 dev_type = d40c->dma_cfg.src_dev_type;
1104 log_num = 2 * dev_type; 1097 log_num = 2 * dev_type;
1105 is_src = true; 1098 is_src = true;
1106 } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || 1099 } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1107 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { 1100 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1108 /* dst event lines are used for logical memcpy */ 1101 /* dst event lines are used for logical memcpy */
1109 dev_type = d40c->dma_cfg.dst_dev_type; 1102 dev_type = d40c->dma_cfg.dst_dev_type;
1110 log_num = 2 * dev_type + 1; 1103 log_num = 2 * dev_type + 1;
1111 is_src = false; 1104 is_src = false;
1112 } else 1105 } else
1113 return -EINVAL; 1106 return -EINVAL;
1114 1107
1115 event_group = D40_TYPE_TO_GROUP(dev_type); 1108 event_group = D40_TYPE_TO_GROUP(dev_type);
1116 event_line = D40_TYPE_TO_EVENT(dev_type); 1109 event_line = D40_TYPE_TO_EVENT(dev_type);
1117 1110
1118 if (!is_log) { 1111 if (!is_log) {
1119 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { 1112 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1120 /* Find physical half channel */ 1113 /* Find physical half channel */
1121 for (i = 0; i < d40c->base->num_phy_chans; i++) { 1114 for (i = 0; i < d40c->base->num_phy_chans; i++) {
1122 1115
1123 if (d40_alloc_mask_set(&phys[i], is_src, 1116 if (d40_alloc_mask_set(&phys[i], is_src,
1124 0, is_log)) 1117 0, is_log))
1125 goto found_phy; 1118 goto found_phy;
1126 } 1119 }
1127 } else 1120 } else
1128 for (j = 0; j < d40c->base->num_phy_chans; j += 8) { 1121 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1129 int phy_num = j + event_group * 2; 1122 int phy_num = j + event_group * 2;
1130 for (i = phy_num; i < phy_num + 2; i++) { 1123 for (i = phy_num; i < phy_num + 2; i++) {
1131 if (d40_alloc_mask_set(&phys[i], is_src, 1124 if (d40_alloc_mask_set(&phys[i], is_src,
1132 0, is_log)) 1125 0, is_log))
1133 goto found_phy; 1126 goto found_phy;
1134 } 1127 }
1135 } 1128 }
1136 return -EINVAL; 1129 return -EINVAL;
1137 found_phy: 1130 found_phy:
1138 d40c->phy_chan = &phys[i]; 1131 d40c->phy_chan = &phys[i];
1139 d40c->log_num = D40_PHY_CHAN; 1132 d40c->log_num = D40_PHY_CHAN;
1140 goto out; 1133 goto out;
1141 } 1134 }
1142 if (dev_type == -1) 1135 if (dev_type == -1)
1143 return -EINVAL; 1136 return -EINVAL;
1144 1137
1145 /* Find logical channel */ 1138 /* Find logical channel */
1146 for (j = 0; j < d40c->base->num_phy_chans; j += 8) { 1139 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1147 int phy_num = j + event_group * 2; 1140 int phy_num = j + event_group * 2;
1148 /* 1141 /*
1149 * Spread logical channels across all available physical rather 1142 * Spread logical channels across all available physical rather
1150 * than pack every logical channel at the first available phy 1143 * than pack every logical channel at the first available phy
1151 * channels. 1144 * channels.
1152 */ 1145 */
1153 if (is_src) { 1146 if (is_src) {
1154 for (i = phy_num; i < phy_num + 2; i++) { 1147 for (i = phy_num; i < phy_num + 2; i++) {
1155 if (d40_alloc_mask_set(&phys[i], is_src, 1148 if (d40_alloc_mask_set(&phys[i], is_src,
1156 event_line, is_log)) 1149 event_line, is_log))
1157 goto found_log; 1150 goto found_log;
1158 } 1151 }
1159 } else { 1152 } else {
1160 for (i = phy_num + 1; i >= phy_num; i--) { 1153 for (i = phy_num + 1; i >= phy_num; i--) {
1161 if (d40_alloc_mask_set(&phys[i], is_src, 1154 if (d40_alloc_mask_set(&phys[i], is_src,
1162 event_line, is_log)) 1155 event_line, is_log))
1163 goto found_log; 1156 goto found_log;
1164 } 1157 }
1165 } 1158 }
1166 } 1159 }
1167 return -EINVAL; 1160 return -EINVAL;
1168 1161
1169 found_log: 1162 found_log:
1170 d40c->phy_chan = &phys[i]; 1163 d40c->phy_chan = &phys[i];
1171 d40c->log_num = log_num; 1164 d40c->log_num = log_num;
1172 out: 1165 out:
1173 1166
1174 if (is_log) 1167 if (is_log)
1175 d40c->base->lookup_log_chans[d40c->log_num] = d40c; 1168 d40c->base->lookup_log_chans[d40c->log_num] = d40c;
1176 else 1169 else
1177 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c; 1170 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
1178 1171
1179 return 0; 1172 return 0;
1180 1173
1181 } 1174 }
1182 1175
1183 static int d40_config_memcpy(struct d40_chan *d40c) 1176 static int d40_config_memcpy(struct d40_chan *d40c)
1184 { 1177 {
1185 dma_cap_mask_t cap = d40c->chan.device->cap_mask; 1178 dma_cap_mask_t cap = d40c->chan.device->cap_mask;
1186 1179
1187 if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) { 1180 if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
1188 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log; 1181 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log;
1189 d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY; 1182 d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY;
1190 d40c->dma_cfg.dst_dev_type = d40c->base->plat_data-> 1183 d40c->dma_cfg.dst_dev_type = d40c->base->plat_data->
1191 memcpy[d40c->chan.chan_id]; 1184 memcpy[d40c->chan.chan_id];
1192 1185
1193 } else if (dma_has_cap(DMA_MEMCPY, cap) && 1186 } else if (dma_has_cap(DMA_MEMCPY, cap) &&
1194 dma_has_cap(DMA_SLAVE, cap)) { 1187 dma_has_cap(DMA_SLAVE, cap)) {
1195 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy; 1188 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy;
1196 } else { 1189 } else {
1197 dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n", 1190 dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n",
1198 __func__); 1191 __func__);
1199 return -EINVAL; 1192 return -EINVAL;
1200 } 1193 }
1201 1194
1202 return 0; 1195 return 0;
1203 } 1196 }
1204 1197
1205 1198
1206 static int d40_free_dma(struct d40_chan *d40c) 1199 static int d40_free_dma(struct d40_chan *d40c)
1207 { 1200 {
1208 1201
1209 int res = 0; 1202 int res = 0;
1210 u32 event, dir; 1203 u32 event, dir;
1211 struct d40_phy_res *phy = d40c->phy_chan; 1204 struct d40_phy_res *phy = d40c->phy_chan;
1212 bool is_src; 1205 bool is_src;
1213 struct d40_desc *d; 1206 struct d40_desc *d;
1214 struct d40_desc *_d; 1207 struct d40_desc *_d;
1215 1208
1216 1209
1217 /* Terminate all queued and active transfers */ 1210 /* Terminate all queued and active transfers */
1218 d40_term_all(d40c); 1211 d40_term_all(d40c);
1219 1212
1220 /* Release client owned descriptors */ 1213 /* Release client owned descriptors */
1221 if (!list_empty(&d40c->client)) 1214 if (!list_empty(&d40c->client))
1222 list_for_each_entry_safe(d, _d, &d40c->client, node) { 1215 list_for_each_entry_safe(d, _d, &d40c->client, node) {
1223 d40_pool_lli_free(d); 1216 d40_pool_lli_free(d);
1224 d40_desc_remove(d); 1217 d40_desc_remove(d);
1225 /* Return desc to free-list */ 1218 /* Return desc to free-list */
1226 d40_desc_free(d40c, d); 1219 d40_desc_free(d40c, d);
1227 } 1220 }
1228 1221
1229 if (phy == NULL) { 1222 if (phy == NULL) {
1230 dev_err(&d40c->chan.dev->device, "[%s] phy == null\n", 1223 dev_err(&d40c->chan.dev->device, "[%s] phy == null\n",
1231 __func__); 1224 __func__);
1232 return -EINVAL; 1225 return -EINVAL;
1233 } 1226 }
1234 1227
1235 if (phy->allocated_src == D40_ALLOC_FREE && 1228 if (phy->allocated_src == D40_ALLOC_FREE &&
1236 phy->allocated_dst == D40_ALLOC_FREE) { 1229 phy->allocated_dst == D40_ALLOC_FREE) {
1237 dev_err(&d40c->chan.dev->device, "[%s] channel already free\n", 1230 dev_err(&d40c->chan.dev->device, "[%s] channel already free\n",
1238 __func__); 1231 __func__);
1239 return -EINVAL; 1232 return -EINVAL;
1240 } 1233 }
1241 1234
1242 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); 1235 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1243 if (res) { 1236 if (res) {
1244 dev_err(&d40c->chan.dev->device, "[%s] suspend failed\n", 1237 dev_err(&d40c->chan.dev->device, "[%s] suspend failed\n",
1245 __func__); 1238 __func__);
1246 return res; 1239 return res;
1247 } 1240 }
1248 1241
1249 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || 1242 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1250 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { 1243 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1251 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); 1244 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1252 dir = D40_CHAN_REG_SDLNK; 1245 dir = D40_CHAN_REG_SDLNK;
1253 is_src = false; 1246 is_src = false;
1254 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { 1247 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
1255 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); 1248 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1256 dir = D40_CHAN_REG_SSLNK; 1249 dir = D40_CHAN_REG_SSLNK;
1257 is_src = true; 1250 is_src = true;
1258 } else { 1251 } else {
1259 dev_err(&d40c->chan.dev->device, 1252 dev_err(&d40c->chan.dev->device,
1260 "[%s] Unknown direction\n", __func__); 1253 "[%s] Unknown direction\n", __func__);
1261 return -EINVAL; 1254 return -EINVAL;
1262 } 1255 }
1263 1256
1264 if (d40c->log_num != D40_PHY_CHAN) { 1257 if (d40c->log_num != D40_PHY_CHAN) {
1265 /* 1258 /*
1266 * Release logical channel, deactivate the event line during 1259 * Release logical channel, deactivate the event line during
1267 * the time physical res is suspended. 1260 * the time physical res is suspended.
1268 */ 1261 */
1269 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) & 1262 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) &
1270 D40_EVENTLINE_MASK(event), 1263 D40_EVENTLINE_MASK(event),
1271 d40c->base->virtbase + D40_DREG_PCBASE + 1264 d40c->base->virtbase + D40_DREG_PCBASE +
1272 phy->num * D40_DREG_PCDELTA + dir); 1265 phy->num * D40_DREG_PCDELTA + dir);
1273 1266
1274 d40c->base->lookup_log_chans[d40c->log_num] = NULL; 1267 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
1275 1268
1276 /* 1269 /*
1277 * Check if there are more logical allocation 1270 * Check if there are more logical allocation
1278 * on this phy channel. 1271 * on this phy channel.
1279 */ 1272 */
1280 if (!d40_alloc_mask_free(phy, is_src, event)) { 1273 if (!d40_alloc_mask_free(phy, is_src, event)) {
1281 /* Resume the other logical channels if any */ 1274 /* Resume the other logical channels if any */
1282 if (d40_chan_has_events(d40c)) { 1275 if (d40_chan_has_events(d40c)) {
1283 res = d40_channel_execute_command(d40c, 1276 res = d40_channel_execute_command(d40c,
1284 D40_DMA_RUN); 1277 D40_DMA_RUN);
1285 if (res) { 1278 if (res) {
1286 dev_err(&d40c->chan.dev->device, 1279 dev_err(&d40c->chan.dev->device,
1287 "[%s] Executing RUN command\n", 1280 "[%s] Executing RUN command\n",
1288 __func__); 1281 __func__);
1289 return res; 1282 return res;
1290 } 1283 }
1291 } 1284 }
1292 return 0; 1285 return 0;
1293 } 1286 }
1294 } else 1287 } else
1295 d40_alloc_mask_free(phy, is_src, 0); 1288 d40_alloc_mask_free(phy, is_src, 0);
1296 1289
1297 /* Release physical channel */ 1290 /* Release physical channel */
1298 res = d40_channel_execute_command(d40c, D40_DMA_STOP); 1291 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
1299 if (res) { 1292 if (res) {
1300 dev_err(&d40c->chan.dev->device, 1293 dev_err(&d40c->chan.dev->device,
1301 "[%s] Failed to stop channel\n", __func__); 1294 "[%s] Failed to stop channel\n", __func__);
1302 return res; 1295 return res;
1303 } 1296 }
1304 d40c->phy_chan = NULL; 1297 d40c->phy_chan = NULL;
1305 /* Invalidate channel type */ 1298 /* Invalidate channel type */
1306 d40c->dma_cfg.channel_type = 0; 1299 d40c->dma_cfg.channel_type = 0;
1307 d40c->base->lookup_phy_chans[phy->num] = NULL; 1300 d40c->base->lookup_phy_chans[phy->num] = NULL;
1308 1301
1309 return 0; 1302 return 0;
1310 } 1303 }
1311 1304
1312 static int d40_pause(struct dma_chan *chan) 1305 static int d40_pause(struct dma_chan *chan)
1313 { 1306 {
1314 struct d40_chan *d40c = 1307 struct d40_chan *d40c =
1315 container_of(chan, struct d40_chan, chan); 1308 container_of(chan, struct d40_chan, chan);
1316 int res; 1309 int res;
1317 unsigned long flags; 1310 unsigned long flags;
1318 1311
1319 spin_lock_irqsave(&d40c->lock, flags); 1312 spin_lock_irqsave(&d40c->lock, flags);
1320 1313
1321 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); 1314 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1322 if (res == 0) { 1315 if (res == 0) {
1323 if (d40c->log_num != D40_PHY_CHAN) { 1316 if (d40c->log_num != D40_PHY_CHAN) {
1324 d40_config_set_event(d40c, false); 1317 d40_config_set_event(d40c, false);
1325 /* Resume the other logical channels if any */ 1318 /* Resume the other logical channels if any */
1326 if (d40_chan_has_events(d40c)) 1319 if (d40_chan_has_events(d40c))
1327 res = d40_channel_execute_command(d40c, 1320 res = d40_channel_execute_command(d40c,
1328 D40_DMA_RUN); 1321 D40_DMA_RUN);
1329 } 1322 }
1330 } 1323 }
1331 1324
1332 spin_unlock_irqrestore(&d40c->lock, flags); 1325 spin_unlock_irqrestore(&d40c->lock, flags);
1333 return res; 1326 return res;
1334 } 1327 }
1335 1328
1336 static bool d40_is_paused(struct d40_chan *d40c) 1329 static bool d40_is_paused(struct d40_chan *d40c)
1337 { 1330 {
1338 bool is_paused = false; 1331 bool is_paused = false;
1339 unsigned long flags; 1332 unsigned long flags;
1340 void __iomem *active_reg; 1333 void __iomem *active_reg;
1341 u32 status; 1334 u32 status;
1342 u32 event; 1335 u32 event;
1343 int res;
1344 1336
1345 spin_lock_irqsave(&d40c->lock, flags); 1337 spin_lock_irqsave(&d40c->lock, flags);
1346 1338
1347 if (d40c->log_num == D40_PHY_CHAN) { 1339 if (d40c->log_num == D40_PHY_CHAN) {
1348 if (d40c->phy_chan->num % 2 == 0) 1340 if (d40c->phy_chan->num % 2 == 0)
1349 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; 1341 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1350 else 1342 else
1351 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; 1343 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1352 1344
1353 status = (readl(active_reg) & 1345 status = (readl(active_reg) &
1354 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> 1346 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1355 D40_CHAN_POS(d40c->phy_chan->num); 1347 D40_CHAN_POS(d40c->phy_chan->num);
1356 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP) 1348 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1357 is_paused = true; 1349 is_paused = true;
1358 1350
1359 goto _exit; 1351 goto _exit;
1360 } 1352 }
1361 1353
1362 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1363 if (res != 0)
1364 goto _exit;
1365
1366 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || 1354 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1367 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) 1355 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM)
1368 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); 1356 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type);
1369 else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) 1357 else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1370 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); 1358 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type);
1371 else { 1359 else {
1372 dev_err(&d40c->chan.dev->device, 1360 dev_err(&d40c->chan.dev->device,
1373 "[%s] Unknown direction\n", __func__); 1361 "[%s] Unknown direction\n", __func__);
1374 goto _exit; 1362 goto _exit;
1375 } 1363 }
1376 status = d40_chan_has_events(d40c); 1364 status = d40_chan_has_events(d40c);
1377 status = (status & D40_EVENTLINE_MASK(event)) >> 1365 status = (status & D40_EVENTLINE_MASK(event)) >>
1378 D40_EVENTLINE_POS(event); 1366 D40_EVENTLINE_POS(event);
1379 1367
1380 if (status != D40_DMA_RUN) 1368 if (status != D40_DMA_RUN)
1381 is_paused = true; 1369 is_paused = true;
1382
1383 /* Resume the other logical channels if any */
1384 if (d40_chan_has_events(d40c))
1385 res = d40_channel_execute_command(d40c,
1386 D40_DMA_RUN);
1387
1388 _exit: 1370 _exit:
1389 spin_unlock_irqrestore(&d40c->lock, flags); 1371 spin_unlock_irqrestore(&d40c->lock, flags);
1390 return is_paused; 1372 return is_paused;
1391 1373
1392 } 1374 }
1393 1375
1394 1376
1395 static bool d40_tx_is_linked(struct d40_chan *d40c) 1377 static bool d40_tx_is_linked(struct d40_chan *d40c)
1396 { 1378 {
1397 bool is_link; 1379 bool is_link;
1398 1380
1399 if (d40c->log_num != D40_PHY_CHAN) 1381 if (d40c->log_num != D40_PHY_CHAN)
1400 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK; 1382 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
1401 else 1383 else
1402 is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE + 1384 is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE +
1403 d40c->phy_chan->num * D40_DREG_PCDELTA + 1385 d40c->phy_chan->num * D40_DREG_PCDELTA +
1404 D40_CHAN_REG_SDLNK) & 1386 D40_CHAN_REG_SDLNK) &
1405 D40_SREG_LNK_PHYS_LNK_MASK; 1387 D40_SREG_LNK_PHYS_LNK_MASK;
1406 return is_link; 1388 return is_link;
1407 } 1389 }
1408 1390
1409 static u32 d40_residue(struct d40_chan *d40c) 1391 static u32 d40_residue(struct d40_chan *d40c)
1410 { 1392 {
1411 u32 num_elt; 1393 u32 num_elt;
1412 1394
1413 if (d40c->log_num != D40_PHY_CHAN) 1395 if (d40c->log_num != D40_PHY_CHAN)
1414 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK) 1396 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
1415 >> D40_MEM_LCSP2_ECNT_POS; 1397 >> D40_MEM_LCSP2_ECNT_POS;
1416 else 1398 else
1417 num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE + 1399 num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE +
1418 d40c->phy_chan->num * D40_DREG_PCDELTA + 1400 d40c->phy_chan->num * D40_DREG_PCDELTA +
1419 D40_CHAN_REG_SDELT) & 1401 D40_CHAN_REG_SDELT) &
1420 D40_SREG_ELEM_PHY_ECNT_MASK) >> D40_SREG_ELEM_PHY_ECNT_POS; 1402 D40_SREG_ELEM_PHY_ECNT_MASK) >> D40_SREG_ELEM_PHY_ECNT_POS;
1421 return num_elt * (1 << d40c->dma_cfg.dst_info.data_width); 1403 return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
1422 } 1404 }
1423 1405
1424 static int d40_resume(struct dma_chan *chan) 1406 static int d40_resume(struct dma_chan *chan)
1425 { 1407 {
1426 struct d40_chan *d40c = 1408 struct d40_chan *d40c =
1427 container_of(chan, struct d40_chan, chan); 1409 container_of(chan, struct d40_chan, chan);
1428 int res = 0; 1410 int res = 0;
1429 unsigned long flags; 1411 unsigned long flags;
1430 1412
1431 spin_lock_irqsave(&d40c->lock, flags); 1413 spin_lock_irqsave(&d40c->lock, flags);
1432 1414
1433 if (d40c->log_num != D40_PHY_CHAN) { 1415 /* If bytes left to transfer or linked tx resume job */
1434 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); 1416 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
1435 if (res) 1417 if (d40c->log_num != D40_PHY_CHAN)
1436 goto out;
1437
1438 /* If bytes left to transfer or linked tx resume job */
1439 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) {
1440 d40_config_set_event(d40c, true); 1418 d40_config_set_event(d40c, true);
1441 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1442 }
1443 } else if (d40_residue(d40c) || d40_tx_is_linked(d40c))
1444 res = d40_channel_execute_command(d40c, D40_DMA_RUN); 1419 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
1420 }
1445 1421
1446 out:
1447 spin_unlock_irqrestore(&d40c->lock, flags); 1422 spin_unlock_irqrestore(&d40c->lock, flags);
1448 return res; 1423 return res;
1449 } 1424 }
1450 1425
1451 static u32 stedma40_residue(struct dma_chan *chan) 1426 static u32 stedma40_residue(struct dma_chan *chan)
1452 { 1427 {
1453 struct d40_chan *d40c = 1428 struct d40_chan *d40c =
1454 container_of(chan, struct d40_chan, chan); 1429 container_of(chan, struct d40_chan, chan);
1455 u32 bytes_left; 1430 u32 bytes_left;
1456 unsigned long flags; 1431 unsigned long flags;
1457 1432
1458 spin_lock_irqsave(&d40c->lock, flags); 1433 spin_lock_irqsave(&d40c->lock, flags);
1459 bytes_left = d40_residue(d40c); 1434 bytes_left = d40_residue(d40c);
1460 spin_unlock_irqrestore(&d40c->lock, flags); 1435 spin_unlock_irqrestore(&d40c->lock, flags);
1461 1436
1462 return bytes_left; 1437 return bytes_left;
1463 } 1438 }
1464 1439
1465 /* Public DMA functions in addition to the DMA engine framework */ 1440 /* Public DMA functions in addition to the DMA engine framework */
1466 1441
1467 int stedma40_set_psize(struct dma_chan *chan, 1442 int stedma40_set_psize(struct dma_chan *chan,
1468 int src_psize, 1443 int src_psize,
1469 int dst_psize) 1444 int dst_psize)
1470 { 1445 {
1471 struct d40_chan *d40c = 1446 struct d40_chan *d40c =
1472 container_of(chan, struct d40_chan, chan); 1447 container_of(chan, struct d40_chan, chan);
1473 unsigned long flags; 1448 unsigned long flags;
1474 1449
1475 spin_lock_irqsave(&d40c->lock, flags); 1450 spin_lock_irqsave(&d40c->lock, flags);
1476 1451
1477 if (d40c->log_num != D40_PHY_CHAN) { 1452 if (d40c->log_num != D40_PHY_CHAN) {
1478 d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK; 1453 d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
1479 d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK; 1454 d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK;
1480 d40c->log_def.lcsp1 |= src_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS; 1455 d40c->log_def.lcsp1 |= src_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
1481 d40c->log_def.lcsp3 |= dst_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS; 1456 d40c->log_def.lcsp3 |= dst_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
1482 goto out; 1457 goto out;
1483 } 1458 }
1484 1459
1485 if (src_psize == STEDMA40_PSIZE_PHY_1) 1460 if (src_psize == STEDMA40_PSIZE_PHY_1)
1486 d40c->src_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS); 1461 d40c->src_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
1487 else { 1462 else {
1488 d40c->src_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS; 1463 d40c->src_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
1489 d40c->src_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 << 1464 d40c->src_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
1490 D40_SREG_CFG_PSIZE_POS); 1465 D40_SREG_CFG_PSIZE_POS);
1491 d40c->src_def_cfg |= src_psize << D40_SREG_CFG_PSIZE_POS; 1466 d40c->src_def_cfg |= src_psize << D40_SREG_CFG_PSIZE_POS;
1492 } 1467 }
1493 1468
1494 if (dst_psize == STEDMA40_PSIZE_PHY_1) 1469 if (dst_psize == STEDMA40_PSIZE_PHY_1)
1495 d40c->dst_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS); 1470 d40c->dst_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS);
1496 else { 1471 else {
1497 d40c->dst_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS; 1472 d40c->dst_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS;
1498 d40c->dst_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 << 1473 d40c->dst_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 <<
1499 D40_SREG_CFG_PSIZE_POS); 1474 D40_SREG_CFG_PSIZE_POS);
1500 d40c->dst_def_cfg |= dst_psize << D40_SREG_CFG_PSIZE_POS; 1475 d40c->dst_def_cfg |= dst_psize << D40_SREG_CFG_PSIZE_POS;
1501 } 1476 }
1502 out: 1477 out:
1503 spin_unlock_irqrestore(&d40c->lock, flags); 1478 spin_unlock_irqrestore(&d40c->lock, flags);
1504 return 0; 1479 return 0;
1505 } 1480 }
1506 EXPORT_SYMBOL(stedma40_set_psize); 1481 EXPORT_SYMBOL(stedma40_set_psize);
1507 1482
1508 struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, 1483 struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
1509 struct scatterlist *sgl_dst, 1484 struct scatterlist *sgl_dst,
1510 struct scatterlist *sgl_src, 1485 struct scatterlist *sgl_src,
1511 unsigned int sgl_len, 1486 unsigned int sgl_len,
1512 unsigned long dma_flags) 1487 unsigned long dma_flags)
1513 { 1488 {
1514 int res; 1489 int res;
1515 struct d40_desc *d40d; 1490 struct d40_desc *d40d;
1516 struct d40_chan *d40c = container_of(chan, struct d40_chan, 1491 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1517 chan); 1492 chan);
1518 unsigned long flags; 1493 unsigned long flags;
1519 1494
1520 if (d40c->phy_chan == NULL) { 1495 if (d40c->phy_chan == NULL) {
1521 dev_err(&d40c->chan.dev->device, 1496 dev_err(&d40c->chan.dev->device,
1522 "[%s] Unallocated channel.\n", __func__); 1497 "[%s] Unallocated channel.\n", __func__);
1523 return ERR_PTR(-EINVAL); 1498 return ERR_PTR(-EINVAL);
1524 } 1499 }
1525 1500
1526 spin_lock_irqsave(&d40c->lock, flags); 1501 spin_lock_irqsave(&d40c->lock, flags);
1527 d40d = d40_desc_get(d40c); 1502 d40d = d40_desc_get(d40c);
1528 1503
1529 if (d40d == NULL) 1504 if (d40d == NULL)
1530 goto err; 1505 goto err;
1531 1506
1532 d40d->lli_len = sgl_len; 1507 d40d->lli_len = sgl_len;
1533 d40d->lli_tx_len = d40d->lli_len; 1508 d40d->lli_tx_len = d40d->lli_len;
1534 d40d->txd.flags = dma_flags; 1509 d40d->txd.flags = dma_flags;
1535 1510
1536 if (d40c->log_num != D40_PHY_CHAN) { 1511 if (d40c->log_num != D40_PHY_CHAN) {
1537 if (d40d->lli_len > d40c->base->plat_data->llis_per_log) 1512 if (d40d->lli_len > d40c->base->plat_data->llis_per_log)
1538 d40d->lli_tx_len = d40c->base->plat_data->llis_per_log; 1513 d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
1539 1514
1540 if (sgl_len > 1) 1515 if (sgl_len > 1)
1541 /* 1516 /*
1542 * Check if there is space available in lcla. If not, 1517 * Check if there is space available in lcla. If not,
1543 * split list into 1-length and run only in lcpa 1518 * split list into 1-length and run only in lcpa
1544 * space. 1519 * space.
1545 */ 1520 */
1546 if (d40_lcla_id_get(d40c, 1521 if (d40_lcla_id_get(d40c,
1547 &d40c->base->lcla_pool) != 0) 1522 &d40c->base->lcla_pool) != 0)
1548 d40d->lli_tx_len = 1; 1523 d40d->lli_tx_len = 1;
1549 1524
1550 if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) { 1525 if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) {
1551 dev_err(&d40c->chan.dev->device, 1526 dev_err(&d40c->chan.dev->device,
1552 "[%s] Out of memory\n", __func__); 1527 "[%s] Out of memory\n", __func__);
1553 goto err; 1528 goto err;
1554 } 1529 }
1555 1530
1556 (void) d40_log_sg_to_lli(d40c->lcla.src_id, 1531 (void) d40_log_sg_to_lli(d40c->lcla.src_id,
1557 sgl_src, 1532 sgl_src,
1558 sgl_len, 1533 sgl_len,
1559 d40d->lli_log.src, 1534 d40d->lli_log.src,
1560 d40c->log_def.lcsp1, 1535 d40c->log_def.lcsp1,
1561 d40c->dma_cfg.src_info.data_width, 1536 d40c->dma_cfg.src_info.data_width,
1562 dma_flags & DMA_PREP_INTERRUPT, 1537 dma_flags & DMA_PREP_INTERRUPT,
1563 d40d->lli_tx_len, 1538 d40d->lli_tx_len,
1564 d40c->base->plat_data->llis_per_log); 1539 d40c->base->plat_data->llis_per_log);
1565 1540
1566 (void) d40_log_sg_to_lli(d40c->lcla.dst_id, 1541 (void) d40_log_sg_to_lli(d40c->lcla.dst_id,
1567 sgl_dst, 1542 sgl_dst,
1568 sgl_len, 1543 sgl_len,
1569 d40d->lli_log.dst, 1544 d40d->lli_log.dst,
1570 d40c->log_def.lcsp3, 1545 d40c->log_def.lcsp3,
1571 d40c->dma_cfg.dst_info.data_width, 1546 d40c->dma_cfg.dst_info.data_width,
1572 dma_flags & DMA_PREP_INTERRUPT, 1547 dma_flags & DMA_PREP_INTERRUPT,
1573 d40d->lli_tx_len, 1548 d40d->lli_tx_len,
1574 d40c->base->plat_data->llis_per_log); 1549 d40c->base->plat_data->llis_per_log);
1575 1550
1576 1551
1577 } else { 1552 } else {
1578 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) { 1553 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
1579 dev_err(&d40c->chan.dev->device, 1554 dev_err(&d40c->chan.dev->device,
1580 "[%s] Out of memory\n", __func__); 1555 "[%s] Out of memory\n", __func__);
1581 goto err; 1556 goto err;
1582 } 1557 }
1583 1558
1584 res = d40_phy_sg_to_lli(sgl_src, 1559 res = d40_phy_sg_to_lli(sgl_src,
1585 sgl_len, 1560 sgl_len,
1586 0, 1561 0,
1587 d40d->lli_phy.src, 1562 d40d->lli_phy.src,
1588 d40d->lli_phy.src_addr, 1563 d40d->lli_phy.src_addr,
1589 d40c->src_def_cfg, 1564 d40c->src_def_cfg,
1590 d40c->dma_cfg.src_info.data_width, 1565 d40c->dma_cfg.src_info.data_width,
1591 d40c->dma_cfg.src_info.psize, 1566 d40c->dma_cfg.src_info.psize,
1592 true); 1567 true);
1593 1568
1594 if (res < 0) 1569 if (res < 0)
1595 goto err; 1570 goto err;
1596 1571
1597 res = d40_phy_sg_to_lli(sgl_dst, 1572 res = d40_phy_sg_to_lli(sgl_dst,
1598 sgl_len, 1573 sgl_len,
1599 0, 1574 0,
1600 d40d->lli_phy.dst, 1575 d40d->lli_phy.dst,
1601 d40d->lli_phy.dst_addr, 1576 d40d->lli_phy.dst_addr,
1602 d40c->dst_def_cfg, 1577 d40c->dst_def_cfg,
1603 d40c->dma_cfg.dst_info.data_width, 1578 d40c->dma_cfg.dst_info.data_width,
1604 d40c->dma_cfg.dst_info.psize, 1579 d40c->dma_cfg.dst_info.psize,
1605 true); 1580 true);
1606 1581
1607 if (res < 0) 1582 if (res < 0)
1608 goto err; 1583 goto err;
1609 1584
1610 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, 1585 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1611 d40d->lli_pool.size, DMA_TO_DEVICE); 1586 d40d->lli_pool.size, DMA_TO_DEVICE);
1612 } 1587 }
1613 1588
1614 dma_async_tx_descriptor_init(&d40d->txd, chan); 1589 dma_async_tx_descriptor_init(&d40d->txd, chan);
1615 1590
1616 d40d->txd.tx_submit = d40_tx_submit; 1591 d40d->txd.tx_submit = d40_tx_submit;
1617 1592
1618 spin_unlock_irqrestore(&d40c->lock, flags); 1593 spin_unlock_irqrestore(&d40c->lock, flags);
1619 1594
1620 return &d40d->txd; 1595 return &d40d->txd;
1621 err: 1596 err:
1622 spin_unlock_irqrestore(&d40c->lock, flags); 1597 spin_unlock_irqrestore(&d40c->lock, flags);
1623 return NULL; 1598 return NULL;
1624 } 1599 }
1625 EXPORT_SYMBOL(stedma40_memcpy_sg); 1600 EXPORT_SYMBOL(stedma40_memcpy_sg);
1626 1601
1627 bool stedma40_filter(struct dma_chan *chan, void *data) 1602 bool stedma40_filter(struct dma_chan *chan, void *data)
1628 { 1603 {
1629 struct stedma40_chan_cfg *info = data; 1604 struct stedma40_chan_cfg *info = data;
1630 struct d40_chan *d40c = 1605 struct d40_chan *d40c =
1631 container_of(chan, struct d40_chan, chan); 1606 container_of(chan, struct d40_chan, chan);
1632 int err; 1607 int err;
1633 1608
1634 if (data) { 1609 if (data) {
1635 err = d40_validate_conf(d40c, info); 1610 err = d40_validate_conf(d40c, info);
1636 if (!err) 1611 if (!err)
1637 d40c->dma_cfg = *info; 1612 d40c->dma_cfg = *info;
1638 } else 1613 } else
1639 err = d40_config_memcpy(d40c); 1614 err = d40_config_memcpy(d40c);
1640 1615
1641 return err == 0; 1616 return err == 0;
1642 } 1617 }
1643 EXPORT_SYMBOL(stedma40_filter); 1618 EXPORT_SYMBOL(stedma40_filter);
1644 1619
1645 /* DMA ENGINE functions */ 1620 /* DMA ENGINE functions */
1646 static int d40_alloc_chan_resources(struct dma_chan *chan) 1621 static int d40_alloc_chan_resources(struct dma_chan *chan)
1647 { 1622 {
1648 int err; 1623 int err;
1649 unsigned long flags; 1624 unsigned long flags;
1650 struct d40_chan *d40c = 1625 struct d40_chan *d40c =
1651 container_of(chan, struct d40_chan, chan); 1626 container_of(chan, struct d40_chan, chan);
1652 bool is_free_phy; 1627 bool is_free_phy;
1653 spin_lock_irqsave(&d40c->lock, flags); 1628 spin_lock_irqsave(&d40c->lock, flags);
1654 1629
1655 d40c->completed = chan->cookie = 1; 1630 d40c->completed = chan->cookie = 1;
1656 1631
1657 /* 1632 /*
1658 * If no dma configuration is set (channel_type == 0) 1633 * If no dma configuration is set (channel_type == 0)
1659 * use default configuration (memcpy) 1634 * use default configuration (memcpy)
1660 */ 1635 */
1661 if (d40c->dma_cfg.channel_type == 0) { 1636 if (d40c->dma_cfg.channel_type == 0) {
1662 err = d40_config_memcpy(d40c); 1637 err = d40_config_memcpy(d40c);
1663 if (err) { 1638 if (err) {
1664 dev_err(&d40c->chan.dev->device, 1639 dev_err(&d40c->chan.dev->device,
1665 "[%s] Failed to configure memcpy channel\n", 1640 "[%s] Failed to configure memcpy channel\n",
1666 __func__); 1641 __func__);
1667 goto fail; 1642 goto fail;
1668 } 1643 }
1669 } 1644 }
1670 is_free_phy = (d40c->phy_chan == NULL); 1645 is_free_phy = (d40c->phy_chan == NULL);
1671 1646
1672 err = d40_allocate_channel(d40c); 1647 err = d40_allocate_channel(d40c);
1673 if (err) { 1648 if (err) {
1674 dev_err(&d40c->chan.dev->device, 1649 dev_err(&d40c->chan.dev->device,
1675 "[%s] Failed to allocate channel\n", __func__); 1650 "[%s] Failed to allocate channel\n", __func__);
1676 goto fail; 1651 goto fail;
1677 } 1652 }
1678 1653
1679 /* Fill in basic CFG register values */ 1654 /* Fill in basic CFG register values */
1680 d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, 1655 d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
1681 &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN); 1656 &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN);
1682 1657
1683 if (d40c->log_num != D40_PHY_CHAN) { 1658 if (d40c->log_num != D40_PHY_CHAN) {
1684 d40_log_cfg(&d40c->dma_cfg, 1659 d40_log_cfg(&d40c->dma_cfg,
1685 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); 1660 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
1686 1661
1687 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) 1662 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
1688 d40c->lcpa = d40c->base->lcpa_base + 1663 d40c->lcpa = d40c->base->lcpa_base +
1689 d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE; 1664 d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE;
1690 else 1665 else
1691 d40c->lcpa = d40c->base->lcpa_base + 1666 d40c->lcpa = d40c->base->lcpa_base +
1692 d40c->dma_cfg.dst_dev_type * 1667 d40c->dma_cfg.dst_dev_type *
1693 D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA; 1668 D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
1694 } 1669 }
1695 1670
1696 /* 1671 /*
1697 * Only write channel configuration to the DMA if the physical 1672 * Only write channel configuration to the DMA if the physical
1698 * resource is free. In case of multiple logical channels 1673 * resource is free. In case of multiple logical channels
1699 * on the same physical resource, only the first write is necessary. 1674 * on the same physical resource, only the first write is necessary.
1700 */ 1675 */
1701 if (is_free_phy) { 1676 if (is_free_phy) {
1702 err = d40_config_write(d40c); 1677 err = d40_config_write(d40c);
1703 if (err) { 1678 if (err) {
1704 dev_err(&d40c->chan.dev->device, 1679 dev_err(&d40c->chan.dev->device,
1705 "[%s] Failed to configure channel\n", 1680 "[%s] Failed to configure channel\n",
1706 __func__); 1681 __func__);
1707 } 1682 }
1708 } 1683 }
1709 fail: 1684 fail:
1710 spin_unlock_irqrestore(&d40c->lock, flags); 1685 spin_unlock_irqrestore(&d40c->lock, flags);
1711 return err; 1686 return err;
1712 } 1687 }
1713 1688
1714 static void d40_free_chan_resources(struct dma_chan *chan) 1689 static void d40_free_chan_resources(struct dma_chan *chan)
1715 { 1690 {
1716 struct d40_chan *d40c = 1691 struct d40_chan *d40c =
1717 container_of(chan, struct d40_chan, chan); 1692 container_of(chan, struct d40_chan, chan);
1718 int err; 1693 int err;
1719 unsigned long flags; 1694 unsigned long flags;
1720 1695
1721 if (d40c->phy_chan == NULL) { 1696 if (d40c->phy_chan == NULL) {
1722 dev_err(&d40c->chan.dev->device, 1697 dev_err(&d40c->chan.dev->device,
1723 "[%s] Cannot free unallocated channel\n", __func__); 1698 "[%s] Cannot free unallocated channel\n", __func__);
1724 return; 1699 return;
1725 } 1700 }
1726 1701
1727 1702
1728 spin_lock_irqsave(&d40c->lock, flags); 1703 spin_lock_irqsave(&d40c->lock, flags);
1729 1704
1730 err = d40_free_dma(d40c); 1705 err = d40_free_dma(d40c);
1731 1706
1732 if (err) 1707 if (err)
1733 dev_err(&d40c->chan.dev->device, 1708 dev_err(&d40c->chan.dev->device,
1734 "[%s] Failed to free channel\n", __func__); 1709 "[%s] Failed to free channel\n", __func__);
1735 spin_unlock_irqrestore(&d40c->lock, flags); 1710 spin_unlock_irqrestore(&d40c->lock, flags);
1736 } 1711 }
1737 1712
1738 static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, 1713 static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
1739 dma_addr_t dst, 1714 dma_addr_t dst,
1740 dma_addr_t src, 1715 dma_addr_t src,
1741 size_t size, 1716 size_t size,
1742 unsigned long dma_flags) 1717 unsigned long dma_flags)
1743 { 1718 {
1744 struct d40_desc *d40d; 1719 struct d40_desc *d40d;
1745 struct d40_chan *d40c = container_of(chan, struct d40_chan, 1720 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1746 chan); 1721 chan);
1747 unsigned long flags; 1722 unsigned long flags;
1748 int err = 0; 1723 int err = 0;
1749 1724
1750 if (d40c->phy_chan == NULL) { 1725 if (d40c->phy_chan == NULL) {
1751 dev_err(&d40c->chan.dev->device, 1726 dev_err(&d40c->chan.dev->device,
1752 "[%s] Channel is not allocated.\n", __func__); 1727 "[%s] Channel is not allocated.\n", __func__);
1753 return ERR_PTR(-EINVAL); 1728 return ERR_PTR(-EINVAL);
1754 } 1729 }
1755 1730
1756 spin_lock_irqsave(&d40c->lock, flags); 1731 spin_lock_irqsave(&d40c->lock, flags);
1757 d40d = d40_desc_get(d40c); 1732 d40d = d40_desc_get(d40c);
1758 1733
1759 if (d40d == NULL) { 1734 if (d40d == NULL) {
1760 dev_err(&d40c->chan.dev->device, 1735 dev_err(&d40c->chan.dev->device,
1761 "[%s] Descriptor is NULL\n", __func__); 1736 "[%s] Descriptor is NULL\n", __func__);
1762 goto err; 1737 goto err;
1763 } 1738 }
1764 1739
1765 d40d->txd.flags = dma_flags; 1740 d40d->txd.flags = dma_flags;
1766 1741
1767 dma_async_tx_descriptor_init(&d40d->txd, chan); 1742 dma_async_tx_descriptor_init(&d40d->txd, chan);
1768 1743
1769 d40d->txd.tx_submit = d40_tx_submit; 1744 d40d->txd.tx_submit = d40_tx_submit;
1770 1745
1771 if (d40c->log_num != D40_PHY_CHAN) { 1746 if (d40c->log_num != D40_PHY_CHAN) {
1772 1747
1773 if (d40_pool_lli_alloc(d40d, 1, true) < 0) { 1748 if (d40_pool_lli_alloc(d40d, 1, true) < 0) {
1774 dev_err(&d40c->chan.dev->device, 1749 dev_err(&d40c->chan.dev->device,
1775 "[%s] Out of memory\n", __func__); 1750 "[%s] Out of memory\n", __func__);
1776 goto err; 1751 goto err;
1777 } 1752 }
1778 d40d->lli_len = 1; 1753 d40d->lli_len = 1;
1779 d40d->lli_tx_len = 1; 1754 d40d->lli_tx_len = 1;
1780 1755
1781 d40_log_fill_lli(d40d->lli_log.src, 1756 d40_log_fill_lli(d40d->lli_log.src,
1782 src, 1757 src,
1783 size, 1758 size,
1784 0, 1759 0,
1785 d40c->log_def.lcsp1, 1760 d40c->log_def.lcsp1,
1786 d40c->dma_cfg.src_info.data_width, 1761 d40c->dma_cfg.src_info.data_width,
1787 true, true); 1762 true, true);
1788 1763
1789 d40_log_fill_lli(d40d->lli_log.dst, 1764 d40_log_fill_lli(d40d->lli_log.dst,
1790 dst, 1765 dst,
1791 size, 1766 size,
1792 0, 1767 0,
1793 d40c->log_def.lcsp3, 1768 d40c->log_def.lcsp3,
1794 d40c->dma_cfg.dst_info.data_width, 1769 d40c->dma_cfg.dst_info.data_width,
1795 true, true); 1770 true, true);
1796 1771
1797 } else { 1772 } else {
1798 1773
1799 if (d40_pool_lli_alloc(d40d, 1, false) < 0) { 1774 if (d40_pool_lli_alloc(d40d, 1, false) < 0) {
1800 dev_err(&d40c->chan.dev->device, 1775 dev_err(&d40c->chan.dev->device,
1801 "[%s] Out of memory\n", __func__); 1776 "[%s] Out of memory\n", __func__);
1802 goto err; 1777 goto err;
1803 } 1778 }
1804 1779
1805 err = d40_phy_fill_lli(d40d->lli_phy.src, 1780 err = d40_phy_fill_lli(d40d->lli_phy.src,
1806 src, 1781 src,
1807 size, 1782 size,
1808 d40c->dma_cfg.src_info.psize, 1783 d40c->dma_cfg.src_info.psize,
1809 0, 1784 0,
1810 d40c->src_def_cfg, 1785 d40c->src_def_cfg,
1811 true, 1786 true,
1812 d40c->dma_cfg.src_info.data_width, 1787 d40c->dma_cfg.src_info.data_width,
1813 false); 1788 false);
1814 if (err) 1789 if (err)
1815 goto err_fill_lli; 1790 goto err_fill_lli;
1816 1791
1817 err = d40_phy_fill_lli(d40d->lli_phy.dst, 1792 err = d40_phy_fill_lli(d40d->lli_phy.dst,
1818 dst, 1793 dst,
1819 size, 1794 size,
1820 d40c->dma_cfg.dst_info.psize, 1795 d40c->dma_cfg.dst_info.psize,
1821 0, 1796 0,
1822 d40c->dst_def_cfg, 1797 d40c->dst_def_cfg,
1823 true, 1798 true,
1824 d40c->dma_cfg.dst_info.data_width, 1799 d40c->dma_cfg.dst_info.data_width,
1825 false); 1800 false);
1826 1801
1827 if (err) 1802 if (err)
1828 goto err_fill_lli; 1803 goto err_fill_lli;
1829 1804
1830 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, 1805 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1831 d40d->lli_pool.size, DMA_TO_DEVICE); 1806 d40d->lli_pool.size, DMA_TO_DEVICE);
1832 } 1807 }
1833 1808
1834 spin_unlock_irqrestore(&d40c->lock, flags); 1809 spin_unlock_irqrestore(&d40c->lock, flags);
1835 return &d40d->txd; 1810 return &d40d->txd;
1836 1811
1837 err_fill_lli: 1812 err_fill_lli:
1838 dev_err(&d40c->chan.dev->device, 1813 dev_err(&d40c->chan.dev->device,
1839 "[%s] Failed filling in PHY LLI\n", __func__); 1814 "[%s] Failed filling in PHY LLI\n", __func__);
1840 d40_pool_lli_free(d40d); 1815 d40_pool_lli_free(d40d);
1841 err: 1816 err:
1842 spin_unlock_irqrestore(&d40c->lock, flags); 1817 spin_unlock_irqrestore(&d40c->lock, flags);
1843 return NULL; 1818 return NULL;
1844 } 1819 }
1845 1820
1846 static int d40_prep_slave_sg_log(struct d40_desc *d40d, 1821 static int d40_prep_slave_sg_log(struct d40_desc *d40d,
1847 struct d40_chan *d40c, 1822 struct d40_chan *d40c,
1848 struct scatterlist *sgl, 1823 struct scatterlist *sgl,
1849 unsigned int sg_len, 1824 unsigned int sg_len,
1850 enum dma_data_direction direction, 1825 enum dma_data_direction direction,
1851 unsigned long dma_flags) 1826 unsigned long dma_flags)
1852 { 1827 {
1853 dma_addr_t dev_addr = 0; 1828 dma_addr_t dev_addr = 0;
1854 int total_size; 1829 int total_size;
1855 1830
1856 if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) { 1831 if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) {
1857 dev_err(&d40c->chan.dev->device, 1832 dev_err(&d40c->chan.dev->device,
1858 "[%s] Out of memory\n", __func__); 1833 "[%s] Out of memory\n", __func__);
1859 return -ENOMEM; 1834 return -ENOMEM;
1860 } 1835 }
1861 1836
1862 d40d->lli_len = sg_len; 1837 d40d->lli_len = sg_len;
1863 if (d40d->lli_len <= d40c->base->plat_data->llis_per_log) 1838 if (d40d->lli_len <= d40c->base->plat_data->llis_per_log)
1864 d40d->lli_tx_len = d40d->lli_len; 1839 d40d->lli_tx_len = d40d->lli_len;
1865 else 1840 else
1866 d40d->lli_tx_len = d40c->base->plat_data->llis_per_log; 1841 d40d->lli_tx_len = d40c->base->plat_data->llis_per_log;
1867 1842
1868 if (sg_len > 1) 1843 if (sg_len > 1)
1869 /* 1844 /*
1870 * Check if there is space available in lcla. 1845 * Check if there is space available in lcla.
1871 * If not, split list into 1-length and run only 1846 * If not, split list into 1-length and run only
1872 * in lcpa space. 1847 * in lcpa space.
1873 */ 1848 */
1874 if (d40_lcla_id_get(d40c, &d40c->base->lcla_pool) != 0) 1849 if (d40_lcla_id_get(d40c, &d40c->base->lcla_pool) != 0)
1875 d40d->lli_tx_len = 1; 1850 d40d->lli_tx_len = 1;
1876 1851
1877 if (direction == DMA_FROM_DEVICE) 1852 if (direction == DMA_FROM_DEVICE)
1878 dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type]; 1853 dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
1879 else if (direction == DMA_TO_DEVICE) 1854 else if (direction == DMA_TO_DEVICE)
1880 dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type]; 1855 dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
1881 else 1856 else
1882 return -EINVAL; 1857 return -EINVAL;
1883 1858
1884 total_size = d40_log_sg_to_dev(&d40c->lcla, 1859 total_size = d40_log_sg_to_dev(&d40c->lcla,
1885 sgl, sg_len, 1860 sgl, sg_len,
1886 &d40d->lli_log, 1861 &d40d->lli_log,
1887 &d40c->log_def, 1862 &d40c->log_def,
1888 d40c->dma_cfg.src_info.data_width, 1863 d40c->dma_cfg.src_info.data_width,
1889 d40c->dma_cfg.dst_info.data_width, 1864 d40c->dma_cfg.dst_info.data_width,
1890 direction, 1865 direction,
1891 dma_flags & DMA_PREP_INTERRUPT, 1866 dma_flags & DMA_PREP_INTERRUPT,
1892 dev_addr, d40d->lli_tx_len, 1867 dev_addr, d40d->lli_tx_len,
1893 d40c->base->plat_data->llis_per_log); 1868 d40c->base->plat_data->llis_per_log);
1894 1869
1895 if (total_size < 0) 1870 if (total_size < 0)
1896 return -EINVAL; 1871 return -EINVAL;
1897 1872
1898 return 0; 1873 return 0;
1899 } 1874 }
1900 1875
1901 static int d40_prep_slave_sg_phy(struct d40_desc *d40d, 1876 static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
1902 struct d40_chan *d40c, 1877 struct d40_chan *d40c,
1903 struct scatterlist *sgl, 1878 struct scatterlist *sgl,
1904 unsigned int sgl_len, 1879 unsigned int sgl_len,
1905 enum dma_data_direction direction, 1880 enum dma_data_direction direction,
1906 unsigned long dma_flags) 1881 unsigned long dma_flags)
1907 { 1882 {
1908 dma_addr_t src_dev_addr; 1883 dma_addr_t src_dev_addr;
1909 dma_addr_t dst_dev_addr; 1884 dma_addr_t dst_dev_addr;
1910 int res; 1885 int res;
1911 1886
1912 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) { 1887 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) {
1913 dev_err(&d40c->chan.dev->device, 1888 dev_err(&d40c->chan.dev->device,
1914 "[%s] Out of memory\n", __func__); 1889 "[%s] Out of memory\n", __func__);
1915 return -ENOMEM; 1890 return -ENOMEM;
1916 } 1891 }
1917 1892
1918 d40d->lli_len = sgl_len; 1893 d40d->lli_len = sgl_len;
1919 d40d->lli_tx_len = sgl_len; 1894 d40d->lli_tx_len = sgl_len;
1920 1895
1921 if (direction == DMA_FROM_DEVICE) { 1896 if (direction == DMA_FROM_DEVICE) {
1922 dst_dev_addr = 0; 1897 dst_dev_addr = 0;
1923 src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type]; 1898 src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type];
1924 } else if (direction == DMA_TO_DEVICE) { 1899 } else if (direction == DMA_TO_DEVICE) {
1925 dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type]; 1900 dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type];
1926 src_dev_addr = 0; 1901 src_dev_addr = 0;
1927 } else 1902 } else
1928 return -EINVAL; 1903 return -EINVAL;
1929 1904
1930 res = d40_phy_sg_to_lli(sgl, 1905 res = d40_phy_sg_to_lli(sgl,
1931 sgl_len, 1906 sgl_len,
1932 src_dev_addr, 1907 src_dev_addr,
1933 d40d->lli_phy.src, 1908 d40d->lli_phy.src,
1934 d40d->lli_phy.src_addr, 1909 d40d->lli_phy.src_addr,
1935 d40c->src_def_cfg, 1910 d40c->src_def_cfg,
1936 d40c->dma_cfg.src_info.data_width, 1911 d40c->dma_cfg.src_info.data_width,
1937 d40c->dma_cfg.src_info.psize, 1912 d40c->dma_cfg.src_info.psize,
1938 true); 1913 true);
1939 if (res < 0) 1914 if (res < 0)
1940 return res; 1915 return res;
1941 1916
1942 res = d40_phy_sg_to_lli(sgl, 1917 res = d40_phy_sg_to_lli(sgl,
1943 sgl_len, 1918 sgl_len,
1944 dst_dev_addr, 1919 dst_dev_addr,
1945 d40d->lli_phy.dst, 1920 d40d->lli_phy.dst,
1946 d40d->lli_phy.dst_addr, 1921 d40d->lli_phy.dst_addr,
1947 d40c->dst_def_cfg, 1922 d40c->dst_def_cfg,
1948 d40c->dma_cfg.dst_info.data_width, 1923 d40c->dma_cfg.dst_info.data_width,
1949 d40c->dma_cfg.dst_info.psize, 1924 d40c->dma_cfg.dst_info.psize,
1950 true); 1925 true);
1951 if (res < 0) 1926 if (res < 0)
1952 return res; 1927 return res;
1953 1928
1954 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, 1929 (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src,
1955 d40d->lli_pool.size, DMA_TO_DEVICE); 1930 d40d->lli_pool.size, DMA_TO_DEVICE);
1956 return 0; 1931 return 0;
1957 } 1932 }
1958 1933
1959 static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, 1934 static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
1960 struct scatterlist *sgl, 1935 struct scatterlist *sgl,
1961 unsigned int sg_len, 1936 unsigned int sg_len,
1962 enum dma_data_direction direction, 1937 enum dma_data_direction direction,
1963 unsigned long dma_flags) 1938 unsigned long dma_flags)
1964 { 1939 {
1965 struct d40_desc *d40d; 1940 struct d40_desc *d40d;
1966 struct d40_chan *d40c = container_of(chan, struct d40_chan, 1941 struct d40_chan *d40c = container_of(chan, struct d40_chan,
1967 chan); 1942 chan);
1968 unsigned long flags; 1943 unsigned long flags;
1969 int err; 1944 int err;
1970 1945
1971 if (d40c->phy_chan == NULL) { 1946 if (d40c->phy_chan == NULL) {
1972 dev_err(&d40c->chan.dev->device, 1947 dev_err(&d40c->chan.dev->device,
1973 "[%s] Cannot prepare unallocated channel\n", __func__); 1948 "[%s] Cannot prepare unallocated channel\n", __func__);
1974 return ERR_PTR(-EINVAL); 1949 return ERR_PTR(-EINVAL);
1975 } 1950 }
1976 1951
1977 if (d40c->dma_cfg.pre_transfer) 1952 if (d40c->dma_cfg.pre_transfer)
1978 d40c->dma_cfg.pre_transfer(chan, 1953 d40c->dma_cfg.pre_transfer(chan,
1979 d40c->dma_cfg.pre_transfer_data, 1954 d40c->dma_cfg.pre_transfer_data,
1980 sg_dma_len(sgl)); 1955 sg_dma_len(sgl));
1981 1956
1982 spin_lock_irqsave(&d40c->lock, flags); 1957 spin_lock_irqsave(&d40c->lock, flags);
1983 d40d = d40_desc_get(d40c); 1958 d40d = d40_desc_get(d40c);
1984 spin_unlock_irqrestore(&d40c->lock, flags); 1959 spin_unlock_irqrestore(&d40c->lock, flags);
1985 1960
1986 if (d40d == NULL) 1961 if (d40d == NULL)
1987 return NULL; 1962 return NULL;
1988 1963
1989 if (d40c->log_num != D40_PHY_CHAN) 1964 if (d40c->log_num != D40_PHY_CHAN)
1990 err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len, 1965 err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len,
1991 direction, dma_flags); 1966 direction, dma_flags);
1992 else 1967 else
1993 err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len, 1968 err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len,
1994 direction, dma_flags); 1969 direction, dma_flags);
1995 if (err) { 1970 if (err) {
1996 dev_err(&d40c->chan.dev->device, 1971 dev_err(&d40c->chan.dev->device,
1997 "[%s] Failed to prepare %s slave sg job: %d\n", 1972 "[%s] Failed to prepare %s slave sg job: %d\n",
1998 __func__, 1973 __func__,
1999 d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err); 1974 d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err);
2000 return NULL; 1975 return NULL;
2001 } 1976 }
2002 1977
2003 d40d->txd.flags = dma_flags; 1978 d40d->txd.flags = dma_flags;
2004 1979
2005 dma_async_tx_descriptor_init(&d40d->txd, chan); 1980 dma_async_tx_descriptor_init(&d40d->txd, chan);
2006 1981
2007 d40d->txd.tx_submit = d40_tx_submit; 1982 d40d->txd.tx_submit = d40_tx_submit;
2008 1983
2009 return &d40d->txd; 1984 return &d40d->txd;
2010 } 1985 }
2011 1986
2012 static enum dma_status d40_tx_status(struct dma_chan *chan, 1987 static enum dma_status d40_tx_status(struct dma_chan *chan,
2013 dma_cookie_t cookie, 1988 dma_cookie_t cookie,
2014 struct dma_tx_state *txstate) 1989 struct dma_tx_state *txstate)
2015 { 1990 {
2016 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); 1991 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2017 dma_cookie_t last_used; 1992 dma_cookie_t last_used;
2018 dma_cookie_t last_complete; 1993 dma_cookie_t last_complete;
2019 int ret; 1994 int ret;
2020 1995
2021 if (d40c->phy_chan == NULL) { 1996 if (d40c->phy_chan == NULL) {
2022 dev_err(&d40c->chan.dev->device, 1997 dev_err(&d40c->chan.dev->device,
2023 "[%s] Cannot read status of unallocated channel\n", 1998 "[%s] Cannot read status of unallocated channel\n",
2024 __func__); 1999 __func__);
2025 return -EINVAL; 2000 return -EINVAL;
2026 } 2001 }
2027 2002
2028 last_complete = d40c->completed; 2003 last_complete = d40c->completed;
2029 last_used = chan->cookie; 2004 last_used = chan->cookie;
2030 2005
2031 if (d40_is_paused(d40c)) 2006 if (d40_is_paused(d40c))
2032 ret = DMA_PAUSED; 2007 ret = DMA_PAUSED;
2033 else 2008 else
2034 ret = dma_async_is_complete(cookie, last_complete, last_used); 2009 ret = dma_async_is_complete(cookie, last_complete, last_used);
2035 2010
2036 dma_set_tx_state(txstate, last_complete, last_used, 2011 dma_set_tx_state(txstate, last_complete, last_used,
2037 stedma40_residue(chan)); 2012 stedma40_residue(chan));
2038 2013
2039 return ret; 2014 return ret;
2040 } 2015 }
2041 2016
2042 static void d40_issue_pending(struct dma_chan *chan) 2017 static void d40_issue_pending(struct dma_chan *chan)
2043 { 2018 {
2044 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); 2019 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2045 unsigned long flags; 2020 unsigned long flags;
2046 2021
2047 if (d40c->phy_chan == NULL) { 2022 if (d40c->phy_chan == NULL) {
2048 dev_err(&d40c->chan.dev->device, 2023 dev_err(&d40c->chan.dev->device,
2049 "[%s] Channel is not allocated!\n", __func__); 2024 "[%s] Channel is not allocated!\n", __func__);
2050 return; 2025 return;
2051 } 2026 }
2052 2027
2053 spin_lock_irqsave(&d40c->lock, flags); 2028 spin_lock_irqsave(&d40c->lock, flags);
2054 2029
2055 /* Busy means that pending jobs are already being processed */ 2030 /* Busy means that pending jobs are already being processed */
2056 if (!d40c->busy) 2031 if (!d40c->busy)
2057 (void) d40_queue_start(d40c); 2032 (void) d40_queue_start(d40c);
2058 2033
2059 spin_unlock_irqrestore(&d40c->lock, flags); 2034 spin_unlock_irqrestore(&d40c->lock, flags);
2060 } 2035 }
2061 2036
2062 static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 2037 static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2063 unsigned long arg) 2038 unsigned long arg)
2064 { 2039 {
2065 unsigned long flags; 2040 unsigned long flags;
2066 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); 2041 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2067 2042
2068 if (d40c->phy_chan == NULL) { 2043 if (d40c->phy_chan == NULL) {
2069 dev_err(&d40c->chan.dev->device, 2044 dev_err(&d40c->chan.dev->device,
2070 "[%s] Channel is not allocated!\n", __func__); 2045 "[%s] Channel is not allocated!\n", __func__);
2071 return -EINVAL; 2046 return -EINVAL;
2072 } 2047 }
2073 2048
2074 switch (cmd) { 2049 switch (cmd) {
2075 case DMA_TERMINATE_ALL: 2050 case DMA_TERMINATE_ALL:
2076 spin_lock_irqsave(&d40c->lock, flags); 2051 spin_lock_irqsave(&d40c->lock, flags);
2077 d40_term_all(d40c); 2052 d40_term_all(d40c);
2078 spin_unlock_irqrestore(&d40c->lock, flags); 2053 spin_unlock_irqrestore(&d40c->lock, flags);
2079 return 0; 2054 return 0;
2080 case DMA_PAUSE: 2055 case DMA_PAUSE:
2081 return d40_pause(chan); 2056 return d40_pause(chan);
2082 case DMA_RESUME: 2057 case DMA_RESUME:
2083 return d40_resume(chan); 2058 return d40_resume(chan);
2084 } 2059 }
2085 2060
2086 /* Other commands are unimplemented */ 2061 /* Other commands are unimplemented */
2087 return -ENXIO; 2062 return -ENXIO;
2088 } 2063 }
2089 2064
2090 /* Initialization functions */ 2065 /* Initialization functions */
2091 2066
2092 static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma, 2067 static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2093 struct d40_chan *chans, int offset, 2068 struct d40_chan *chans, int offset,
2094 int num_chans) 2069 int num_chans)
2095 { 2070 {
2096 int i = 0; 2071 int i = 0;
2097 struct d40_chan *d40c; 2072 struct d40_chan *d40c;
2098 2073
2099 INIT_LIST_HEAD(&dma->channels); 2074 INIT_LIST_HEAD(&dma->channels);
2100 2075
2101 for (i = offset; i < offset + num_chans; i++) { 2076 for (i = offset; i < offset + num_chans; i++) {
2102 d40c = &chans[i]; 2077 d40c = &chans[i];
2103 d40c->base = base; 2078 d40c->base = base;
2104 d40c->chan.device = dma; 2079 d40c->chan.device = dma;
2105 2080
2106 /* Invalidate lcla element */ 2081 /* Invalidate lcla element */
2107 d40c->lcla.src_id = -1; 2082 d40c->lcla.src_id = -1;
2108 d40c->lcla.dst_id = -1; 2083 d40c->lcla.dst_id = -1;
2109 2084
2110 spin_lock_init(&d40c->lock); 2085 spin_lock_init(&d40c->lock);
2111 2086
2112 d40c->log_num = D40_PHY_CHAN; 2087 d40c->log_num = D40_PHY_CHAN;
2113 2088
2114 INIT_LIST_HEAD(&d40c->active); 2089 INIT_LIST_HEAD(&d40c->active);
2115 INIT_LIST_HEAD(&d40c->queue); 2090 INIT_LIST_HEAD(&d40c->queue);
2116 INIT_LIST_HEAD(&d40c->client); 2091 INIT_LIST_HEAD(&d40c->client);
2117 2092
2118 tasklet_init(&d40c->tasklet, dma_tasklet, 2093 tasklet_init(&d40c->tasklet, dma_tasklet,
2119 (unsigned long) d40c); 2094 (unsigned long) d40c);
2120 2095
2121 list_add_tail(&d40c->chan.device_node, 2096 list_add_tail(&d40c->chan.device_node,
2122 &dma->channels); 2097 &dma->channels);
2123 } 2098 }
2124 } 2099 }
2125 2100
2126 static int __init d40_dmaengine_init(struct d40_base *base, 2101 static int __init d40_dmaengine_init(struct d40_base *base,
2127 int num_reserved_chans) 2102 int num_reserved_chans)
2128 { 2103 {
2129 int err ; 2104 int err ;
2130 2105
2131 d40_chan_init(base, &base->dma_slave, base->log_chans, 2106 d40_chan_init(base, &base->dma_slave, base->log_chans,
2132 0, base->num_log_chans); 2107 0, base->num_log_chans);
2133 2108
2134 dma_cap_zero(base->dma_slave.cap_mask); 2109 dma_cap_zero(base->dma_slave.cap_mask);
2135 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask); 2110 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
2136 2111
2137 base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources; 2112 base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources;
2138 base->dma_slave.device_free_chan_resources = d40_free_chan_resources; 2113 base->dma_slave.device_free_chan_resources = d40_free_chan_resources;
2139 base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy; 2114 base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy;
2140 base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg; 2115 base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg;
2141 base->dma_slave.device_tx_status = d40_tx_status; 2116 base->dma_slave.device_tx_status = d40_tx_status;
2142 base->dma_slave.device_issue_pending = d40_issue_pending; 2117 base->dma_slave.device_issue_pending = d40_issue_pending;
2143 base->dma_slave.device_control = d40_control; 2118 base->dma_slave.device_control = d40_control;
2144 base->dma_slave.dev = base->dev; 2119 base->dma_slave.dev = base->dev;
2145 2120
2146 err = dma_async_device_register(&base->dma_slave); 2121 err = dma_async_device_register(&base->dma_slave);
2147 2122
2148 if (err) { 2123 if (err) {
2149 dev_err(base->dev, 2124 dev_err(base->dev,
2150 "[%s] Failed to register slave channels\n", 2125 "[%s] Failed to register slave channels\n",
2151 __func__); 2126 __func__);
2152 goto failure1; 2127 goto failure1;
2153 } 2128 }
2154 2129
2155 d40_chan_init(base, &base->dma_memcpy, base->log_chans, 2130 d40_chan_init(base, &base->dma_memcpy, base->log_chans,
2156 base->num_log_chans, base->plat_data->memcpy_len); 2131 base->num_log_chans, base->plat_data->memcpy_len);
2157 2132
2158 dma_cap_zero(base->dma_memcpy.cap_mask); 2133 dma_cap_zero(base->dma_memcpy.cap_mask);
2159 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); 2134 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
2160 2135
2161 base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources; 2136 base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources;
2162 base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources; 2137 base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources;
2163 base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy; 2138 base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy;
2164 base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg; 2139 base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg;
2165 base->dma_memcpy.device_tx_status = d40_tx_status; 2140 base->dma_memcpy.device_tx_status = d40_tx_status;
2166 base->dma_memcpy.device_issue_pending = d40_issue_pending; 2141 base->dma_memcpy.device_issue_pending = d40_issue_pending;
2167 base->dma_memcpy.device_control = d40_control; 2142 base->dma_memcpy.device_control = d40_control;
2168 base->dma_memcpy.dev = base->dev; 2143 base->dma_memcpy.dev = base->dev;
2169 /* 2144 /*
2170 * This controller can only access address at even 2145 * This controller can only access address at even
2171 * 32bit boundaries, i.e. 2^2 2146 * 32bit boundaries, i.e. 2^2
2172 */ 2147 */
2173 base->dma_memcpy.copy_align = 2; 2148 base->dma_memcpy.copy_align = 2;
2174 2149
2175 err = dma_async_device_register(&base->dma_memcpy); 2150 err = dma_async_device_register(&base->dma_memcpy);
2176 2151
2177 if (err) { 2152 if (err) {
2178 dev_err(base->dev, 2153 dev_err(base->dev,
2179 "[%s] Failed to regsiter memcpy only channels\n", 2154 "[%s] Failed to regsiter memcpy only channels\n",
2180 __func__); 2155 __func__);
2181 goto failure2; 2156 goto failure2;
2182 } 2157 }
2183 2158
2184 d40_chan_init(base, &base->dma_both, base->phy_chans, 2159 d40_chan_init(base, &base->dma_both, base->phy_chans,
2185 0, num_reserved_chans); 2160 0, num_reserved_chans);
2186 2161
2187 dma_cap_zero(base->dma_both.cap_mask); 2162 dma_cap_zero(base->dma_both.cap_mask);
2188 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask); 2163 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2189 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask); 2164 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
2190 2165
2191 base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources; 2166 base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources;
2192 base->dma_both.device_free_chan_resources = d40_free_chan_resources; 2167 base->dma_both.device_free_chan_resources = d40_free_chan_resources;
2193 base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy; 2168 base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy;
2194 base->dma_both.device_prep_slave_sg = d40_prep_slave_sg; 2169 base->dma_both.device_prep_slave_sg = d40_prep_slave_sg;
2195 base->dma_both.device_tx_status = d40_tx_status; 2170 base->dma_both.device_tx_status = d40_tx_status;
2196 base->dma_both.device_issue_pending = d40_issue_pending; 2171 base->dma_both.device_issue_pending = d40_issue_pending;
2197 base->dma_both.device_control = d40_control; 2172 base->dma_both.device_control = d40_control;
2198 base->dma_both.dev = base->dev; 2173 base->dma_both.dev = base->dev;
2199 base->dma_both.copy_align = 2; 2174 base->dma_both.copy_align = 2;
2200 err = dma_async_device_register(&base->dma_both); 2175 err = dma_async_device_register(&base->dma_both);
2201 2176
2202 if (err) { 2177 if (err) {
2203 dev_err(base->dev, 2178 dev_err(base->dev,
2204 "[%s] Failed to register logical and physical capable channels\n", 2179 "[%s] Failed to register logical and physical capable channels\n",
2205 __func__); 2180 __func__);
2206 goto failure3; 2181 goto failure3;
2207 } 2182 }
2208 return 0; 2183 return 0;
2209 failure3: 2184 failure3:
2210 dma_async_device_unregister(&base->dma_memcpy); 2185 dma_async_device_unregister(&base->dma_memcpy);
2211 failure2: 2186 failure2:
2212 dma_async_device_unregister(&base->dma_slave); 2187 dma_async_device_unregister(&base->dma_slave);
2213 failure1: 2188 failure1:
2214 return err; 2189 return err;
2215 } 2190 }
2216 2191
2217 /* Initialization functions. */ 2192 /* Initialization functions. */
2218 2193
2219 static int __init d40_phy_res_init(struct d40_base *base) 2194 static int __init d40_phy_res_init(struct d40_base *base)
2220 { 2195 {
2221 int i; 2196 int i;
2222 int num_phy_chans_avail = 0; 2197 int num_phy_chans_avail = 0;
2223 u32 val[2]; 2198 u32 val[2];
2224 int odd_even_bit = -2; 2199 int odd_even_bit = -2;
2225 2200
2226 val[0] = readl(base->virtbase + D40_DREG_PRSME); 2201 val[0] = readl(base->virtbase + D40_DREG_PRSME);
2227 val[1] = readl(base->virtbase + D40_DREG_PRSMO); 2202 val[1] = readl(base->virtbase + D40_DREG_PRSMO);
2228 2203
2229 for (i = 0; i < base->num_phy_chans; i++) { 2204 for (i = 0; i < base->num_phy_chans; i++) {
2230 base->phy_res[i].num = i; 2205 base->phy_res[i].num = i;
2231 odd_even_bit += 2 * ((i % 2) == 0); 2206 odd_even_bit += 2 * ((i % 2) == 0);
2232 if (((val[i % 2] >> odd_even_bit) & 3) == 1) { 2207 if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
2233 /* Mark security only channels as occupied */ 2208 /* Mark security only channels as occupied */
2234 base->phy_res[i].allocated_src = D40_ALLOC_PHY; 2209 base->phy_res[i].allocated_src = D40_ALLOC_PHY;
2235 base->phy_res[i].allocated_dst = D40_ALLOC_PHY; 2210 base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
2236 } else { 2211 } else {
2237 base->phy_res[i].allocated_src = D40_ALLOC_FREE; 2212 base->phy_res[i].allocated_src = D40_ALLOC_FREE;
2238 base->phy_res[i].allocated_dst = D40_ALLOC_FREE; 2213 base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
2239 num_phy_chans_avail++; 2214 num_phy_chans_avail++;
2240 } 2215 }
2241 spin_lock_init(&base->phy_res[i].lock); 2216 spin_lock_init(&base->phy_res[i].lock);
2242 } 2217 }
2243 dev_info(base->dev, "%d of %d physical DMA channels available\n", 2218 dev_info(base->dev, "%d of %d physical DMA channels available\n",
2244 num_phy_chans_avail, base->num_phy_chans); 2219 num_phy_chans_avail, base->num_phy_chans);
2245 2220
2246 /* Verify settings extended vs standard */ 2221 /* Verify settings extended vs standard */
2247 val[0] = readl(base->virtbase + D40_DREG_PRTYP); 2222 val[0] = readl(base->virtbase + D40_DREG_PRTYP);
2248 2223
2249 for (i = 0; i < base->num_phy_chans; i++) { 2224 for (i = 0; i < base->num_phy_chans; i++) {
2250 2225
2251 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE && 2226 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
2252 (val[0] & 0x3) != 1) 2227 (val[0] & 0x3) != 1)
2253 dev_info(base->dev, 2228 dev_info(base->dev,
2254 "[%s] INFO: channel %d is misconfigured (%d)\n", 2229 "[%s] INFO: channel %d is misconfigured (%d)\n",
2255 __func__, i, val[0] & 0x3); 2230 __func__, i, val[0] & 0x3);
2256 2231
2257 val[0] = val[0] >> 2; 2232 val[0] = val[0] >> 2;
2258 } 2233 }
2259 2234
2260 return num_phy_chans_avail; 2235 return num_phy_chans_avail;
2261 } 2236 }
2262 2237
2263 static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) 2238 static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
2264 { 2239 {
2265 static const struct d40_reg_val dma_id_regs[] = { 2240 static const struct d40_reg_val dma_id_regs[] = {
2266 /* Peripheral Id */ 2241 /* Peripheral Id */
2267 { .reg = D40_DREG_PERIPHID0, .val = 0x0040}, 2242 { .reg = D40_DREG_PERIPHID0, .val = 0x0040},
2268 { .reg = D40_DREG_PERIPHID1, .val = 0x0000}, 2243 { .reg = D40_DREG_PERIPHID1, .val = 0x0000},
2269 /* 2244 /*
2270 * D40_DREG_PERIPHID2 Depends on HW revision: 2245 * D40_DREG_PERIPHID2 Depends on HW revision:
2271 * MOP500/HREF ED has 0x0008, 2246 * MOP500/HREF ED has 0x0008,
2272 * ? has 0x0018, 2247 * ? has 0x0018,
2273 * HREF V1 has 0x0028 2248 * HREF V1 has 0x0028
2274 */ 2249 */
2275 { .reg = D40_DREG_PERIPHID3, .val = 0x0000}, 2250 { .reg = D40_DREG_PERIPHID3, .val = 0x0000},
2276 2251
2277 /* PCell Id */ 2252 /* PCell Id */
2278 { .reg = D40_DREG_CELLID0, .val = 0x000d}, 2253 { .reg = D40_DREG_CELLID0, .val = 0x000d},
2279 { .reg = D40_DREG_CELLID1, .val = 0x00f0}, 2254 { .reg = D40_DREG_CELLID1, .val = 0x00f0},
2280 { .reg = D40_DREG_CELLID2, .val = 0x0005}, 2255 { .reg = D40_DREG_CELLID2, .val = 0x0005},
2281 { .reg = D40_DREG_CELLID3, .val = 0x00b1} 2256 { .reg = D40_DREG_CELLID3, .val = 0x00b1}
2282 }; 2257 };
2283 struct stedma40_platform_data *plat_data; 2258 struct stedma40_platform_data *plat_data;
2284 struct clk *clk = NULL; 2259 struct clk *clk = NULL;
2285 void __iomem *virtbase = NULL; 2260 void __iomem *virtbase = NULL;
2286 struct resource *res = NULL; 2261 struct resource *res = NULL;
2287 struct d40_base *base = NULL; 2262 struct d40_base *base = NULL;
2288 int num_log_chans = 0; 2263 int num_log_chans = 0;
2289 int num_phy_chans; 2264 int num_phy_chans;
2290 int i; 2265 int i;
2291 2266
2292 clk = clk_get(&pdev->dev, NULL); 2267 clk = clk_get(&pdev->dev, NULL);
2293 2268
2294 if (IS_ERR(clk)) { 2269 if (IS_ERR(clk)) {
2295 dev_err(&pdev->dev, "[%s] No matching clock found\n", 2270 dev_err(&pdev->dev, "[%s] No matching clock found\n",
2296 __func__); 2271 __func__);
2297 goto failure; 2272 goto failure;
2298 } 2273 }
2299 2274
2300 clk_enable(clk); 2275 clk_enable(clk);
2301 2276
2302 /* Get IO for DMAC base address */ 2277 /* Get IO for DMAC base address */
2303 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base"); 2278 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
2304 if (!res) 2279 if (!res)
2305 goto failure; 2280 goto failure;
2306 2281
2307 if (request_mem_region(res->start, resource_size(res), 2282 if (request_mem_region(res->start, resource_size(res),
2308 D40_NAME " I/O base") == NULL) 2283 D40_NAME " I/O base") == NULL)
2309 goto failure; 2284 goto failure;
2310 2285
2311 virtbase = ioremap(res->start, resource_size(res)); 2286 virtbase = ioremap(res->start, resource_size(res));
2312 if (!virtbase) 2287 if (!virtbase)
2313 goto failure; 2288 goto failure;
2314 2289
2315 /* HW version check */ 2290 /* HW version check */
2316 for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) { 2291 for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) {
2317 if (dma_id_regs[i].val != 2292 if (dma_id_regs[i].val !=
2318 readl(virtbase + dma_id_regs[i].reg)) { 2293 readl(virtbase + dma_id_regs[i].reg)) {
2319 dev_err(&pdev->dev, 2294 dev_err(&pdev->dev,
2320 "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n", 2295 "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n",
2321 __func__, 2296 __func__,
2322 dma_id_regs[i].val, 2297 dma_id_regs[i].val,
2323 dma_id_regs[i].reg, 2298 dma_id_regs[i].reg,
2324 readl(virtbase + dma_id_regs[i].reg)); 2299 readl(virtbase + dma_id_regs[i].reg));
2325 goto failure; 2300 goto failure;
2326 } 2301 }
2327 } 2302 }
2328 2303
2329 i = readl(virtbase + D40_DREG_PERIPHID2); 2304 i = readl(virtbase + D40_DREG_PERIPHID2);
2330 2305
2331 if ((i & 0xf) != D40_PERIPHID2_DESIGNER) { 2306 if ((i & 0xf) != D40_PERIPHID2_DESIGNER) {
2332 dev_err(&pdev->dev, 2307 dev_err(&pdev->dev,
2333 "[%s] Unknown designer! Got %x wanted %x\n", 2308 "[%s] Unknown designer! Got %x wanted %x\n",
2334 __func__, i & 0xf, D40_PERIPHID2_DESIGNER); 2309 __func__, i & 0xf, D40_PERIPHID2_DESIGNER);
2335 goto failure; 2310 goto failure;
2336 } 2311 }
2337 2312
2338 /* The number of physical channels on this HW */ 2313 /* The number of physical channels on this HW */
2339 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; 2314 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
2340 2315
2341 dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n", 2316 dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n",
2342 (i >> 4) & 0xf, res->start); 2317 (i >> 4) & 0xf, res->start);
2343 2318
2344 plat_data = pdev->dev.platform_data; 2319 plat_data = pdev->dev.platform_data;
2345 2320
2346 /* Count the number of logical channels in use */ 2321 /* Count the number of logical channels in use */
2347 for (i = 0; i < plat_data->dev_len; i++) 2322 for (i = 0; i < plat_data->dev_len; i++)
2348 if (plat_data->dev_rx[i] != 0) 2323 if (plat_data->dev_rx[i] != 0)
2349 num_log_chans++; 2324 num_log_chans++;
2350 2325
2351 for (i = 0; i < plat_data->dev_len; i++) 2326 for (i = 0; i < plat_data->dev_len; i++)
2352 if (plat_data->dev_tx[i] != 0) 2327 if (plat_data->dev_tx[i] != 0)
2353 num_log_chans++; 2328 num_log_chans++;
2354 2329
2355 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) + 2330 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
2356 (num_phy_chans + num_log_chans + plat_data->memcpy_len) * 2331 (num_phy_chans + num_log_chans + plat_data->memcpy_len) *
2357 sizeof(struct d40_chan), GFP_KERNEL); 2332 sizeof(struct d40_chan), GFP_KERNEL);
2358 2333
2359 if (base == NULL) { 2334 if (base == NULL) {
2360 dev_err(&pdev->dev, "[%s] Out of memory\n", __func__); 2335 dev_err(&pdev->dev, "[%s] Out of memory\n", __func__);
2361 goto failure; 2336 goto failure;
2362 } 2337 }
2363 2338
2364 base->clk = clk; 2339 base->clk = clk;
2365 base->num_phy_chans = num_phy_chans; 2340 base->num_phy_chans = num_phy_chans;
2366 base->num_log_chans = num_log_chans; 2341 base->num_log_chans = num_log_chans;
2367 base->phy_start = res->start; 2342 base->phy_start = res->start;
2368 base->phy_size = resource_size(res); 2343 base->phy_size = resource_size(res);
2369 base->virtbase = virtbase; 2344 base->virtbase = virtbase;
2370 base->plat_data = plat_data; 2345 base->plat_data = plat_data;
2371 base->dev = &pdev->dev; 2346 base->dev = &pdev->dev;
2372 base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4); 2347 base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
2373 base->log_chans = &base->phy_chans[num_phy_chans]; 2348 base->log_chans = &base->phy_chans[num_phy_chans];
2374 2349
2375 base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res), 2350 base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
2376 GFP_KERNEL); 2351 GFP_KERNEL);
2377 if (!base->phy_res) 2352 if (!base->phy_res)
2378 goto failure; 2353 goto failure;
2379 2354
2380 base->lookup_phy_chans = kzalloc(num_phy_chans * 2355 base->lookup_phy_chans = kzalloc(num_phy_chans *
2381 sizeof(struct d40_chan *), 2356 sizeof(struct d40_chan *),
2382 GFP_KERNEL); 2357 GFP_KERNEL);
2383 if (!base->lookup_phy_chans) 2358 if (!base->lookup_phy_chans)
2384 goto failure; 2359 goto failure;
2385 2360
2386 if (num_log_chans + plat_data->memcpy_len) { 2361 if (num_log_chans + plat_data->memcpy_len) {
2387 /* 2362 /*
2388 * The max number of logical channels are event lines for all 2363 * The max number of logical channels are event lines for all
2389 * src devices and dst devices 2364 * src devices and dst devices
2390 */ 2365 */
2391 base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 * 2366 base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 *
2392 sizeof(struct d40_chan *), 2367 sizeof(struct d40_chan *),
2393 GFP_KERNEL); 2368 GFP_KERNEL);
2394 if (!base->lookup_log_chans) 2369 if (!base->lookup_log_chans)
2395 goto failure; 2370 goto failure;
2396 } 2371 }
2397 base->lcla_pool.alloc_map = kzalloc(num_phy_chans * sizeof(u32), 2372 base->lcla_pool.alloc_map = kzalloc(num_phy_chans * sizeof(u32),
2398 GFP_KERNEL); 2373 GFP_KERNEL);
2399 if (!base->lcla_pool.alloc_map) 2374 if (!base->lcla_pool.alloc_map)
2400 goto failure; 2375 goto failure;
2401 2376
2402 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc), 2377 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
2403 0, SLAB_HWCACHE_ALIGN, 2378 0, SLAB_HWCACHE_ALIGN,
2404 NULL); 2379 NULL);
2405 if (base->desc_slab == NULL) 2380 if (base->desc_slab == NULL)
2406 goto failure; 2381 goto failure;
2407 2382
2408 return base; 2383 return base;
2409 2384
2410 failure: 2385 failure:
2411 if (clk) { 2386 if (clk) {
2412 clk_disable(clk); 2387 clk_disable(clk);
2413 clk_put(clk); 2388 clk_put(clk);
2414 } 2389 }
2415 if (virtbase) 2390 if (virtbase)
2416 iounmap(virtbase); 2391 iounmap(virtbase);
2417 if (res) 2392 if (res)
2418 release_mem_region(res->start, 2393 release_mem_region(res->start,
2419 resource_size(res)); 2394 resource_size(res));
2420 if (virtbase) 2395 if (virtbase)
2421 iounmap(virtbase); 2396 iounmap(virtbase);
2422 2397
2423 if (base) { 2398 if (base) {
2424 kfree(base->lcla_pool.alloc_map); 2399 kfree(base->lcla_pool.alloc_map);
2425 kfree(base->lookup_log_chans); 2400 kfree(base->lookup_log_chans);
2426 kfree(base->lookup_phy_chans); 2401 kfree(base->lookup_phy_chans);
2427 kfree(base->phy_res); 2402 kfree(base->phy_res);
2428 kfree(base); 2403 kfree(base);
2429 } 2404 }
2430 2405
2431 return NULL; 2406 return NULL;
2432 } 2407 }
2433 2408
2434 static void __init d40_hw_init(struct d40_base *base) 2409 static void __init d40_hw_init(struct d40_base *base)
2435 { 2410 {
2436 2411
2437 static const struct d40_reg_val dma_init_reg[] = { 2412 static const struct d40_reg_val dma_init_reg[] = {
2438 /* Clock every part of the DMA block from start */ 2413 /* Clock every part of the DMA block from start */
2439 { .reg = D40_DREG_GCC, .val = 0x0000ff01}, 2414 { .reg = D40_DREG_GCC, .val = 0x0000ff01},
2440 2415
2441 /* Interrupts on all logical channels */ 2416 /* Interrupts on all logical channels */
2442 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF}, 2417 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
2443 { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF}, 2418 { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
2444 { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF}, 2419 { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
2445 { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF}, 2420 { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
2446 { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF}, 2421 { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
2447 { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF}, 2422 { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
2448 { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF}, 2423 { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
2449 { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF}, 2424 { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
2450 { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF}, 2425 { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
2451 { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF}, 2426 { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
2452 { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF}, 2427 { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
2453 { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF} 2428 { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
2454 }; 2429 };
2455 int i; 2430 int i;
2456 u32 prmseo[2] = {0, 0}; 2431 u32 prmseo[2] = {0, 0};
2457 u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF}; 2432 u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
2458 u32 pcmis = 0; 2433 u32 pcmis = 0;
2459 u32 pcicr = 0; 2434 u32 pcicr = 0;
2460 2435
2461 for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++) 2436 for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++)
2462 writel(dma_init_reg[i].val, 2437 writel(dma_init_reg[i].val,
2463 base->virtbase + dma_init_reg[i].reg); 2438 base->virtbase + dma_init_reg[i].reg);
2464 2439
2465 /* Configure all our dma channels to default settings */ 2440 /* Configure all our dma channels to default settings */
2466 for (i = 0; i < base->num_phy_chans; i++) { 2441 for (i = 0; i < base->num_phy_chans; i++) {
2467 2442
2468 activeo[i % 2] = activeo[i % 2] << 2; 2443 activeo[i % 2] = activeo[i % 2] << 2;
2469 2444
2470 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src 2445 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
2471 == D40_ALLOC_PHY) { 2446 == D40_ALLOC_PHY) {
2472 activeo[i % 2] |= 3; 2447 activeo[i % 2] |= 3;
2473 continue; 2448 continue;
2474 } 2449 }
2475 2450
2476 /* Enable interrupt # */ 2451 /* Enable interrupt # */
2477 pcmis = (pcmis << 1) | 1; 2452 pcmis = (pcmis << 1) | 1;
2478 2453
2479 /* Clear interrupt # */ 2454 /* Clear interrupt # */
2480 pcicr = (pcicr << 1) | 1; 2455 pcicr = (pcicr << 1) | 1;
2481 2456
2482 /* Set channel to physical mode */ 2457 /* Set channel to physical mode */
2483 prmseo[i % 2] = prmseo[i % 2] << 2; 2458 prmseo[i % 2] = prmseo[i % 2] << 2;
2484 prmseo[i % 2] |= 1; 2459 prmseo[i % 2] |= 1;
2485 2460
2486 } 2461 }
2487 2462
2488 writel(prmseo[1], base->virtbase + D40_DREG_PRMSE); 2463 writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
2489 writel(prmseo[0], base->virtbase + D40_DREG_PRMSO); 2464 writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
2490 writel(activeo[1], base->virtbase + D40_DREG_ACTIVE); 2465 writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
2491 writel(activeo[0], base->virtbase + D40_DREG_ACTIVO); 2466 writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
2492 2467
2493 /* Write which interrupt to enable */ 2468 /* Write which interrupt to enable */
2494 writel(pcmis, base->virtbase + D40_DREG_PCMIS); 2469 writel(pcmis, base->virtbase + D40_DREG_PCMIS);
2495 2470
2496 /* Write which interrupt to clear */ 2471 /* Write which interrupt to clear */
2497 writel(pcicr, base->virtbase + D40_DREG_PCICR); 2472 writel(pcicr, base->virtbase + D40_DREG_PCICR);
2498 2473
2499 } 2474 }
2500 2475
2501 static int __init d40_probe(struct platform_device *pdev) 2476 static int __init d40_probe(struct platform_device *pdev)
2502 { 2477 {
2503 int err; 2478 int err;
2504 int ret = -ENOENT; 2479 int ret = -ENOENT;
2505 struct d40_base *base; 2480 struct d40_base *base;
2506 struct resource *res = NULL; 2481 struct resource *res = NULL;
2507 int num_reserved_chans; 2482 int num_reserved_chans;
2508 u32 val; 2483 u32 val;
2509 2484
2510 base = d40_hw_detect_init(pdev); 2485 base = d40_hw_detect_init(pdev);
2511 2486
2512 if (!base) 2487 if (!base)
2513 goto failure; 2488 goto failure;
2514 2489
2515 num_reserved_chans = d40_phy_res_init(base); 2490 num_reserved_chans = d40_phy_res_init(base);
2516 2491
2517 platform_set_drvdata(pdev, base); 2492 platform_set_drvdata(pdev, base);
2518 2493
2519 spin_lock_init(&base->interrupt_lock); 2494 spin_lock_init(&base->interrupt_lock);
2520 spin_lock_init(&base->execmd_lock); 2495 spin_lock_init(&base->execmd_lock);
2521 2496
2522 /* Get IO for logical channel parameter address */ 2497 /* Get IO for logical channel parameter address */
2523 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa"); 2498 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
2524 if (!res) { 2499 if (!res) {
2525 ret = -ENOENT; 2500 ret = -ENOENT;
2526 dev_err(&pdev->dev, 2501 dev_err(&pdev->dev,
2527 "[%s] No \"lcpa\" memory resource\n", 2502 "[%s] No \"lcpa\" memory resource\n",
2528 __func__); 2503 __func__);
2529 goto failure; 2504 goto failure;
2530 } 2505 }
2531 base->lcpa_size = resource_size(res); 2506 base->lcpa_size = resource_size(res);
2532 base->phy_lcpa = res->start; 2507 base->phy_lcpa = res->start;
2533 2508
2534 if (request_mem_region(res->start, resource_size(res), 2509 if (request_mem_region(res->start, resource_size(res),
2535 D40_NAME " I/O lcpa") == NULL) { 2510 D40_NAME " I/O lcpa") == NULL) {
2536 ret = -EBUSY; 2511 ret = -EBUSY;
2537 dev_err(&pdev->dev, 2512 dev_err(&pdev->dev,
2538 "[%s] Failed to request LCPA region 0x%x-0x%x\n", 2513 "[%s] Failed to request LCPA region 0x%x-0x%x\n",
2539 __func__, res->start, res->end); 2514 __func__, res->start, res->end);
2540 goto failure; 2515 goto failure;
2541 } 2516 }
2542 2517
2543 /* We make use of ESRAM memory for this. */ 2518 /* We make use of ESRAM memory for this. */
2544 val = readl(base->virtbase + D40_DREG_LCPA); 2519 val = readl(base->virtbase + D40_DREG_LCPA);
2545 if (res->start != val && val != 0) { 2520 if (res->start != val && val != 0) {
2546 dev_warn(&pdev->dev, 2521 dev_warn(&pdev->dev,
2547 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n", 2522 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
2548 __func__, val, res->start); 2523 __func__, val, res->start);
2549 } else 2524 } else
2550 writel(res->start, base->virtbase + D40_DREG_LCPA); 2525 writel(res->start, base->virtbase + D40_DREG_LCPA);
2551 2526
2552 base->lcpa_base = ioremap(res->start, resource_size(res)); 2527 base->lcpa_base = ioremap(res->start, resource_size(res));
2553 if (!base->lcpa_base) { 2528 if (!base->lcpa_base) {
2554 ret = -ENOMEM; 2529 ret = -ENOMEM;
2555 dev_err(&pdev->dev, 2530 dev_err(&pdev->dev,
2556 "[%s] Failed to ioremap LCPA region\n", 2531 "[%s] Failed to ioremap LCPA region\n",
2557 __func__); 2532 __func__);
2558 goto failure; 2533 goto failure;
2559 } 2534 }
2560 /* Get IO for logical channel link address */ 2535 /* Get IO for logical channel link address */
2561 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcla"); 2536 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcla");
2562 if (!res) { 2537 if (!res) {
2563 ret = -ENOENT; 2538 ret = -ENOENT;
2564 dev_err(&pdev->dev, 2539 dev_err(&pdev->dev,
2565 "[%s] No \"lcla\" resource defined\n", 2540 "[%s] No \"lcla\" resource defined\n",
2566 __func__); 2541 __func__);
2567 goto failure; 2542 goto failure;
2568 } 2543 }
2569 2544
2570 base->lcla_pool.base_size = resource_size(res); 2545 base->lcla_pool.base_size = resource_size(res);
2571 base->lcla_pool.phy = res->start; 2546 base->lcla_pool.phy = res->start;
2572 2547
2573 if (request_mem_region(res->start, resource_size(res), 2548 if (request_mem_region(res->start, resource_size(res),
2574 D40_NAME " I/O lcla") == NULL) { 2549 D40_NAME " I/O lcla") == NULL) {
2575 ret = -EBUSY; 2550 ret = -EBUSY;
2576 dev_err(&pdev->dev, 2551 dev_err(&pdev->dev,
2577 "[%s] Failed to request LCLA region 0x%x-0x%x\n", 2552 "[%s] Failed to request LCLA region 0x%x-0x%x\n",
2578 __func__, res->start, res->end); 2553 __func__, res->start, res->end);
2579 goto failure; 2554 goto failure;
2580 } 2555 }
2581 val = readl(base->virtbase + D40_DREG_LCLA); 2556 val = readl(base->virtbase + D40_DREG_LCLA);
2582 if (res->start != val && val != 0) { 2557 if (res->start != val && val != 0) {
2583 dev_warn(&pdev->dev, 2558 dev_warn(&pdev->dev,
2584 "[%s] Mismatch LCLA dma 0x%x, def 0x%x\n", 2559 "[%s] Mismatch LCLA dma 0x%x, def 0x%x\n",
2585 __func__, val, res->start); 2560 __func__, val, res->start);
2586 } else 2561 } else
2587 writel(res->start, base->virtbase + D40_DREG_LCLA); 2562 writel(res->start, base->virtbase + D40_DREG_LCLA);
2588 2563
2589 base->lcla_pool.base = ioremap(res->start, resource_size(res)); 2564 base->lcla_pool.base = ioremap(res->start, resource_size(res));
2590 if (!base->lcla_pool.base) { 2565 if (!base->lcla_pool.base) {
2591 ret = -ENOMEM; 2566 ret = -ENOMEM;
2592 dev_err(&pdev->dev, 2567 dev_err(&pdev->dev,
2593 "[%s] Failed to ioremap LCLA 0x%x-0x%x\n", 2568 "[%s] Failed to ioremap LCLA 0x%x-0x%x\n",
2594 __func__, res->start, res->end); 2569 __func__, res->start, res->end);
2595 goto failure; 2570 goto failure;
2596 } 2571 }
2597 2572
2598 spin_lock_init(&base->lcla_pool.lock); 2573 spin_lock_init(&base->lcla_pool.lock);
2599 2574
2600 base->lcla_pool.num_blocks = base->num_phy_chans; 2575 base->lcla_pool.num_blocks = base->num_phy_chans;
2601 2576
2602 base->irq = platform_get_irq(pdev, 0); 2577 base->irq = platform_get_irq(pdev, 0);
2603 2578
2604 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base); 2579 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
2605 2580
2606 if (ret) { 2581 if (ret) {
2607 dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__); 2582 dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__);
2608 goto failure; 2583 goto failure;
2609 } 2584 }
2610 2585
2611 err = d40_dmaengine_init(base, num_reserved_chans); 2586 err = d40_dmaengine_init(base, num_reserved_chans);
2612 if (err) 2587 if (err)
2613 goto failure; 2588 goto failure;
2614 2589
2615 d40_hw_init(base); 2590 d40_hw_init(base);
2616 2591
2617 dev_info(base->dev, "initialized\n"); 2592 dev_info(base->dev, "initialized\n");
2618 return 0; 2593 return 0;
2619 2594
2620 failure: 2595 failure:
2621 if (base) { 2596 if (base) {
2622 if (base->desc_slab) 2597 if (base->desc_slab)
2623 kmem_cache_destroy(base->desc_slab); 2598 kmem_cache_destroy(base->desc_slab);
2624 if (base->virtbase) 2599 if (base->virtbase)
2625 iounmap(base->virtbase); 2600 iounmap(base->virtbase);
2626 if (base->lcla_pool.phy) 2601 if (base->lcla_pool.phy)
2627 release_mem_region(base->lcla_pool.phy, 2602 release_mem_region(base->lcla_pool.phy,
2628 base->lcla_pool.base_size); 2603 base->lcla_pool.base_size);
2629 if (base->phy_lcpa) 2604 if (base->phy_lcpa)
2630 release_mem_region(base->phy_lcpa, 2605 release_mem_region(base->phy_lcpa,
2631 base->lcpa_size); 2606 base->lcpa_size);
2632 if (base->phy_start) 2607 if (base->phy_start)
2633 release_mem_region(base->phy_start, 2608 release_mem_region(base->phy_start,
2634 base->phy_size); 2609 base->phy_size);
2635 if (base->clk) { 2610 if (base->clk) {
2636 clk_disable(base->clk); 2611 clk_disable(base->clk);
2637 clk_put(base->clk); 2612 clk_put(base->clk);
2638 } 2613 }
2639 2614
2640 kfree(base->lcla_pool.alloc_map); 2615 kfree(base->lcla_pool.alloc_map);
2641 kfree(base->lookup_log_chans); 2616 kfree(base->lookup_log_chans);
2642 kfree(base->lookup_phy_chans); 2617 kfree(base->lookup_phy_chans);
2643 kfree(base->phy_res); 2618 kfree(base->phy_res);
2644 kfree(base); 2619 kfree(base);
2645 } 2620 }
2646 2621
2647 dev_err(&pdev->dev, "[%s] probe failed\n", __func__); 2622 dev_err(&pdev->dev, "[%s] probe failed\n", __func__);
2648 return ret; 2623 return ret;
2649 } 2624 }
2650 2625
2651 static struct platform_driver d40_driver = { 2626 static struct platform_driver d40_driver = {
2652 .driver = { 2627 .driver = {
2653 .owner = THIS_MODULE, 2628 .owner = THIS_MODULE,
2654 .name = D40_NAME, 2629 .name = D40_NAME,
2655 }, 2630 },
2656 }; 2631 };
2657 2632
2658 int __init stedma40_init(void) 2633 int __init stedma40_init(void)
2659 { 2634 {
2660 return platform_driver_probe(&d40_driver, d40_probe); 2635 return platform_driver_probe(&d40_driver, d40_probe);
2661 } 2636 }