Blame view
drivers/dma/xilinx/xilinx_dma.c
72.4 KB
9cd4360de dma: Add Xilinx A... |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 |
/* * DMA driver for Xilinx Video DMA Engine * * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved. * * Based on the Freescale DMA driver. * * Description: * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP * core that provides high-bandwidth direct memory access between memory * and AXI4-Stream type video target peripherals. The core provides efficient * two dimensional DMA operations with independent asynchronous read (S2MM) * and write (MM2S) channel operation. It can be configured to have either * one channel or two channels. If configured as two channels, one is to * transmit to the video device (MM2S) and another is to receive from the * video device (S2MM). Initialization, status, interrupt and management * registers are accessed through an AXI4-Lite slave interface. * |
c0bba3a99 dmaengine: vdma: ... |
19 20 21 22 23 |
* The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that * provides high-bandwidth one dimensional direct memory access between memory * and AXI4-Stream target peripherals. It supports one receive and one * transmit channel, both of them optional at synthesis time. * |
07b0e7d49 dmaengine: vdma: ... |
24 25 26 27 |
* The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory * Access (DMA) between a memory-mapped source address and a memory-mapped * destination address. * |
9cd4360de dma: Add Xilinx A... |
28 29 30 31 32 |
* This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or * (at your option) any later version. */ |
9cd4360de dma: Add Xilinx A... |
33 34 |
#include <linux/bitops.h> #include <linux/dmapool.h> |
937abe88a dmaengine: xilinx... |
35 |
#include <linux/dma/xilinx_dma.h> |
9cd4360de dma: Add Xilinx A... |
36 37 38 |
#include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> |
9495f2648 dmaengine: xilinx... |
39 |
#include <linux/iopoll.h> |
9cd4360de dma: Add Xilinx A... |
40 41 42 43 44 45 |
#include <linux/module.h> #include <linux/of_address.h> #include <linux/of_dma.h> #include <linux/of_platform.h> #include <linux/of_irq.h> #include <linux/slab.h> |
ba16db36b dmaengine: vdma: ... |
46 |
#include <linux/clk.h> |
f0cba685c dmaengine: vdma: ... |
47 |
#include <linux/io-64-nonatomic-lo-hi.h> |
9cd4360de dma: Add Xilinx A... |
48 49 50 51 |
#include "../dmaengine.h" /* Register/Descriptor Offsets */ |
42c1a2ede dmaengine: vdma: ... |
52 53 |
#define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000 #define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030 |
9cd4360de dma: Add Xilinx A... |
54 55 56 57 |
#define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050 #define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0 /* Control Registers */ |
42c1a2ede dmaengine: vdma: ... |
58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 |
#define XILINX_DMA_REG_DMACR 0x0000 #define XILINX_DMA_DMACR_DELAY_MAX 0xff #define XILINX_DMA_DMACR_DELAY_SHIFT 24 #define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff #define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16 #define XILINX_DMA_DMACR_ERR_IRQ BIT(14) #define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13) #define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12) #define XILINX_DMA_DMACR_MASTER_SHIFT 8 #define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5 #define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4) #define XILINX_DMA_DMACR_GENLOCK_EN BIT(3) #define XILINX_DMA_DMACR_RESET BIT(2) #define XILINX_DMA_DMACR_CIRC_EN BIT(1) #define XILINX_DMA_DMACR_RUNSTOP BIT(0) #define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5) #define XILINX_DMA_REG_DMASR 0x0004 #define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15) #define XILINX_DMA_DMASR_ERR_IRQ BIT(14) #define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13) #define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12) #define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11) #define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10) #define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9) #define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8) #define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7) #define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6) #define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5) #define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4) #define XILINX_DMA_DMASR_IDLE BIT(1) #define XILINX_DMA_DMASR_HALTED BIT(0) #define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24) #define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16) #define XILINX_DMA_REG_CURDESC 0x0008 #define XILINX_DMA_REG_TAILDESC 0x0010 #define XILINX_DMA_REG_REG_INDEX 0x0014 #define XILINX_DMA_REG_FRMSTORE 0x0018 #define XILINX_DMA_REG_THRESHOLD 0x001c #define XILINX_DMA_REG_FRMPTR_STS 0x0024 #define XILINX_DMA_REG_PARK_PTR 0x0028 #define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8 #define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0 #define XILINX_DMA_REG_VDMA_VERSION 0x002c |
9cd4360de dma: Add Xilinx A... |
103 104 |
/* Register Direct Mode Registers */ |
42c1a2ede dmaengine: vdma: ... |
105 106 |
#define XILINX_DMA_REG_VSIZE 0x0000 #define XILINX_DMA_REG_HSIZE 0x0004 |
9cd4360de dma: Add Xilinx A... |
107 |
|
42c1a2ede dmaengine: vdma: ... |
108 109 110 |
#define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008 #define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24 #define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0 |
9cd4360de dma: Add Xilinx A... |
111 112 |
#define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n)) |
b72db4005 dmaengine: vdma: ... |
113 |
#define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n)) |
9cd4360de dma: Add Xilinx A... |
114 115 |
/* HW specific definitions */ |
1a9e7a03c dmaengine: vdma: ... |
116 |
#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20 |
42c1a2ede dmaengine: vdma: ... |
117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 |
#define XILINX_DMA_DMAXR_ALL_IRQ_MASK \ (XILINX_DMA_DMASR_FRM_CNT_IRQ | \ XILINX_DMA_DMASR_DLY_CNT_IRQ | \ XILINX_DMA_DMASR_ERR_IRQ) #define XILINX_DMA_DMASR_ALL_ERR_MASK \ (XILINX_DMA_DMASR_EOL_LATE_ERR | \ XILINX_DMA_DMASR_SOF_LATE_ERR | \ XILINX_DMA_DMASR_SG_DEC_ERR | \ XILINX_DMA_DMASR_SG_SLV_ERR | \ XILINX_DMA_DMASR_EOF_EARLY_ERR | \ XILINX_DMA_DMASR_SOF_EARLY_ERR | \ XILINX_DMA_DMASR_DMA_DEC_ERR | \ XILINX_DMA_DMASR_DMA_SLAVE_ERR | \ XILINX_DMA_DMASR_DMA_INT_ERR) |
9cd4360de dma: Add Xilinx A... |
133 134 135 136 137 138 |
/* * Recoverable errors are DMA Internal error, SOF Early, EOF Early * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC * is enabled in the h/w system. */ |
42c1a2ede dmaengine: vdma: ... |
139 140 141 142 143 |
#define XILINX_DMA_DMASR_ERR_RECOVER_MASK \ (XILINX_DMA_DMASR_SOF_LATE_ERR | \ XILINX_DMA_DMASR_EOF_EARLY_ERR | \ XILINX_DMA_DMASR_SOF_EARLY_ERR | \ XILINX_DMA_DMASR_DMA_INT_ERR) |
9cd4360de dma: Add Xilinx A... |
144 145 |
/* Axi VDMA Flush on Fsync bits */ |
42c1a2ede dmaengine: vdma: ... |
146 147 148 |
#define XILINX_DMA_FLUSH_S2MM 3 #define XILINX_DMA_FLUSH_MM2S 2 #define XILINX_DMA_FLUSH_BOTH 1 |
9cd4360de dma: Add Xilinx A... |
149 150 |
/* Delay loop counter to prevent hardware failure */ |
42c1a2ede dmaengine: vdma: ... |
151 |
#define XILINX_DMA_LOOP_COUNT 1000000 |
9cd4360de dma: Add Xilinx A... |
152 |
|
c0bba3a99 dmaengine: vdma: ... |
153 154 155 156 157 158 159 |
/* AXI DMA Specific Registers/Offsets */ #define XILINX_DMA_REG_SRCDSTADDR 0x18 #define XILINX_DMA_REG_BTT 0x28 /* AXI DMA Specific Masks/Bit fields */ #define XILINX_DMA_MAX_TRANS_LEN GENMASK(22, 0) #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16) |
92d794dfb dmaengine: vdma: ... |
160 |
#define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4) |
c0bba3a99 dmaengine: vdma: ... |
161 162 163 164 165 |
#define XILINX_DMA_CR_COALESCE_SHIFT 16 #define XILINX_DMA_BD_SOP BIT(27) #define XILINX_DMA_BD_EOP BIT(26) #define XILINX_DMA_COALESCE_MAX 255 #define XILINX_DMA_NUM_APP_WORDS 5 |
1a9e7a03c dmaengine: vdma: ... |
166 167 168 169 170 171 172 173 174 175 176 |
/* Multi-Channel DMA Descriptor offsets*/ #define XILINX_DMA_MCRX_CDESC(x) (0x40 + (x-1) * 0x20) #define XILINX_DMA_MCRX_TDESC(x) (0x48 + (x-1) * 0x20) /* Multi-Channel DMA Masks/Shifts */ #define XILINX_DMA_BD_HSIZE_MASK GENMASK(15, 0) #define XILINX_DMA_BD_STRIDE_MASK GENMASK(15, 0) #define XILINX_DMA_BD_VSIZE_MASK GENMASK(31, 19) #define XILINX_DMA_BD_TDEST_MASK GENMASK(4, 0) #define XILINX_DMA_BD_STRIDE_SHIFT 0 #define XILINX_DMA_BD_VSIZE_SHIFT 19 |
07b0e7d49 dmaengine: vdma: ... |
177 178 179 180 181 182 |
/* AXI CDMA Specific Registers/Offsets */ #define XILINX_CDMA_REG_SRCADDR 0x18 #define XILINX_CDMA_REG_DSTADDR 0x20 /* AXI CDMA Specific Masks */ #define XILINX_CDMA_CR_SGMODE BIT(3) |
9cd4360de dma: Add Xilinx A... |
183 184 185 186 187 188 |
/** * struct xilinx_vdma_desc_hw - Hardware Descriptor * @next_desc: Next Descriptor Pointer @0x00 * @pad1: Reserved @0x04 * @buf_addr: Buffer address @0x08 |
b72db4005 dmaengine: vdma: ... |
189 |
* @buf_addr_msb: MSB of Buffer address @0x0C |
9cd4360de dma: Add Xilinx A... |
190 191 192 193 194 195 196 197 198 |
* @vsize: Vertical Size @0x10 * @hsize: Horizontal Size @0x14 * @stride: Number of bytes between the first * pixels of each horizontal line @0x18 */ struct xilinx_vdma_desc_hw { u32 next_desc; u32 pad1; u32 buf_addr; |
b72db4005 dmaengine: vdma: ... |
199 |
u32 buf_addr_msb; |
9cd4360de dma: Add Xilinx A... |
200 201 202 203 204 205 |
u32 vsize; u32 hsize; u32 stride; } __aligned(64); /** |
c0bba3a99 dmaengine: vdma: ... |
206 207 |
* struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA * @next_desc: Next Descriptor Pointer @0x00 |
f0cba685c dmaengine: vdma: ... |
208 |
* @next_desc_msb: MSB of Next Descriptor Pointer @0x04 |
c0bba3a99 dmaengine: vdma: ... |
209 |
* @buf_addr: Buffer address @0x08 |
f0cba685c dmaengine: vdma: ... |
210 211 212 |
* @buf_addr_msb: MSB of Buffer address @0x0C * @pad1: Reserved @0x10 * @pad2: Reserved @0x14 |
c0bba3a99 dmaengine: vdma: ... |
213 214 215 216 217 218 |
* @control: Control field @0x18 * @status: Status field @0x1C * @app: APP Fields @0x20 - 0x30 */ struct xilinx_axidma_desc_hw { u32 next_desc; |
f0cba685c dmaengine: vdma: ... |
219 |
u32 next_desc_msb; |
c0bba3a99 dmaengine: vdma: ... |
220 |
u32 buf_addr; |
f0cba685c dmaengine: vdma: ... |
221 |
u32 buf_addr_msb; |
1a9e7a03c dmaengine: vdma: ... |
222 223 |
u32 mcdma_control; u32 vsize_stride; |
c0bba3a99 dmaengine: vdma: ... |
224 225 226 227 228 229 |
u32 control; u32 status; u32 app[XILINX_DMA_NUM_APP_WORDS]; } __aligned(64); /** |
07b0e7d49 dmaengine: vdma: ... |
230 231 |
* struct xilinx_cdma_desc_hw - Hardware Descriptor * @next_desc: Next Descriptor Pointer @0x00 |
9791e71a0 dmaengine: vdma: ... |
232 |
* @next_descmsb: Next Descriptor Pointer MSB @0x04 |
07b0e7d49 dmaengine: vdma: ... |
233 |
* @src_addr: Source address @0x08 |
9791e71a0 dmaengine: vdma: ... |
234 |
* @src_addrmsb: Source address MSB @0x0C |
07b0e7d49 dmaengine: vdma: ... |
235 |
* @dest_addr: Destination address @0x10 |
9791e71a0 dmaengine: vdma: ... |
236 |
* @dest_addrmsb: Destination address MSB @0x14 |
07b0e7d49 dmaengine: vdma: ... |
237 238 239 240 241 |
* @control: Control field @0x18 * @status: Status field @0x1C */ struct xilinx_cdma_desc_hw { u32 next_desc; |
9791e71a0 dmaengine: vdma: ... |
242 |
u32 next_desc_msb; |
07b0e7d49 dmaengine: vdma: ... |
243 |
u32 src_addr; |
9791e71a0 dmaengine: vdma: ... |
244 |
u32 src_addr_msb; |
07b0e7d49 dmaengine: vdma: ... |
245 |
u32 dest_addr; |
9791e71a0 dmaengine: vdma: ... |
246 |
u32 dest_addr_msb; |
07b0e7d49 dmaengine: vdma: ... |
247 248 249 250 251 |
u32 control; u32 status; } __aligned(64); /** |
9cd4360de dma: Add Xilinx A... |
252 253 254 255 256 257 258 259 260 261 262 263 |
* struct xilinx_vdma_tx_segment - Descriptor segment * @hw: Hardware descriptor * @node: Node in the descriptor segments list * @phys: Physical address of segment */ struct xilinx_vdma_tx_segment { struct xilinx_vdma_desc_hw hw; struct list_head node; dma_addr_t phys; } __aligned(64); /** |
c0bba3a99 dmaengine: vdma: ... |
264 265 266 267 268 269 270 271 272 273 274 275 |
* struct xilinx_axidma_tx_segment - Descriptor segment * @hw: Hardware descriptor * @node: Node in the descriptor segments list * @phys: Physical address of segment */ struct xilinx_axidma_tx_segment { struct xilinx_axidma_desc_hw hw; struct list_head node; dma_addr_t phys; } __aligned(64); /** |
07b0e7d49 dmaengine: vdma: ... |
276 277 278 279 280 281 282 283 284 285 286 287 |
* struct xilinx_cdma_tx_segment - Descriptor segment * @hw: Hardware descriptor * @node: Node in the descriptor segments list * @phys: Physical address of segment */ struct xilinx_cdma_tx_segment { struct xilinx_cdma_desc_hw hw; struct list_head node; dma_addr_t phys; } __aligned(64); /** |
42c1a2ede dmaengine: vdma: ... |
288 |
* struct xilinx_dma_tx_descriptor - Per Transaction structure |
9cd4360de dma: Add Xilinx A... |
289 290 291 |
* @async_tx: Async transaction descriptor * @segments: TX segments list * @node: Node in the channel descriptors list |
92d794dfb dmaengine: vdma: ... |
292 |
* @cyclic: Check for cyclic transfers. |
9cd4360de dma: Add Xilinx A... |
293 |
*/ |
42c1a2ede dmaengine: vdma: ... |
294 |
struct xilinx_dma_tx_descriptor { |
9cd4360de dma: Add Xilinx A... |
295 296 297 |
struct dma_async_tx_descriptor async_tx; struct list_head segments; struct list_head node; |
92d794dfb dmaengine: vdma: ... |
298 |
bool cyclic; |
9cd4360de dma: Add Xilinx A... |
299 300 301 |
}; /** |
42c1a2ede dmaengine: vdma: ... |
302 |
* struct xilinx_dma_chan - Driver specific DMA channel structure |
9cd4360de dma: Add Xilinx A... |
303 304 305 306 307 |
* @xdev: Driver specific device structure * @ctrl_offset: Control registers offset * @desc_offset: TX descriptor registers offset * @lock: Descriptor operation lock * @pending_list: Descriptors waiting |
7096f36e5 dmaengine: xilinx... |
308 |
* @active_list: Descriptors ready to submit |
9cd4360de dma: Add Xilinx A... |
309 310 311 312 313 314 315 316 317 |
* @done_list: Complete descriptors * @common: DMA common channel * @desc_pool: Descriptors pool * @dev: The dma device * @irq: Channel IRQ * @id: Channel ID * @direction: Transfer direction * @num_frms: Number of frames * @has_sg: Support scatter transfers |
92d794dfb dmaengine: vdma: ... |
318 |
* @cyclic: Check for cyclic transfers. |
9cd4360de dma: Add Xilinx A... |
319 320 321 322 323 |
* @genlock: Support genlock mode * @err: Channel has errors * @tasklet: Cleanup work after irq * @config: Device configuration info * @flush_on_fsync: Flush on Frame sync |
7096f36e5 dmaengine: xilinx... |
324 |
* @desc_pendingcount: Descriptor pending count |
b72db4005 dmaengine: vdma: ... |
325 |
* @ext_addr: Indicates 64 bit addressing is supported by dma channel |
a65cf5125 dmaengine: vdma: ... |
326 |
* @desc_submitcount: Descriptor h/w submitted count |
c0bba3a99 dmaengine: vdma: ... |
327 328 |
* @residue: Residue for AXI DMA * @seg_v: Statically allocated segments base |
92d794dfb dmaengine: vdma: ... |
329 |
* @cyclic_seg_v: Statically allocated segment base for cyclic transfers |
c0bba3a99 dmaengine: vdma: ... |
330 |
* @start_transfer: Differentiate b/w DMA IP's transfer |
9cd4360de dma: Add Xilinx A... |
331 |
*/ |
42c1a2ede dmaengine: vdma: ... |
332 333 |
struct xilinx_dma_chan { struct xilinx_dma_device *xdev; |
9cd4360de dma: Add Xilinx A... |
334 335 336 337 |
u32 ctrl_offset; u32 desc_offset; spinlock_t lock; struct list_head pending_list; |
7096f36e5 dmaengine: xilinx... |
338 |
struct list_head active_list; |
9cd4360de dma: Add Xilinx A... |
339 340 341 342 343 344 345 346 347 |
struct list_head done_list; struct dma_chan common; struct dma_pool *desc_pool; struct device *dev; int irq; int id; enum dma_transfer_direction direction; int num_frms; bool has_sg; |
92d794dfb dmaengine: vdma: ... |
348 |
bool cyclic; |
9cd4360de dma: Add Xilinx A... |
349 350 351 352 353 |
bool genlock; bool err; struct tasklet_struct tasklet; struct xilinx_vdma_config config; bool flush_on_fsync; |
7096f36e5 dmaengine: xilinx... |
354 |
u32 desc_pendingcount; |
b72db4005 dmaengine: vdma: ... |
355 |
bool ext_addr; |
a65cf5125 dmaengine: vdma: ... |
356 |
u32 desc_submitcount; |
c0bba3a99 dmaengine: vdma: ... |
357 358 |
u32 residue; struct xilinx_axidma_tx_segment *seg_v; |
92d794dfb dmaengine: vdma: ... |
359 |
struct xilinx_axidma_tx_segment *cyclic_seg_v; |
c0bba3a99 dmaengine: vdma: ... |
360 |
void (*start_transfer)(struct xilinx_dma_chan *chan); |
1a9e7a03c dmaengine: vdma: ... |
361 |
u16 tdest; |
9cd4360de dma: Add Xilinx A... |
362 |
}; |
fb2366675 dmaengine: vdma: ... |
363 364 |
struct xilinx_dma_config { enum xdma_ip_type dmatype; |
ba16db36b dmaengine: vdma: ... |
365 366 367 |
int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk, struct clk **tx_clk, struct clk **txs_clk, struct clk **rx_clk, struct clk **rxs_clk); |
9cd4360de dma: Add Xilinx A... |
368 369 370 |
}; /** |
42c1a2ede dmaengine: vdma: ... |
371 |
* struct xilinx_dma_device - DMA device structure |
9cd4360de dma: Add Xilinx A... |
372 373 374 |
* @regs: I/O mapped base address * @dev: Device Structure * @common: DMA device structure |
42c1a2ede dmaengine: vdma: ... |
375 |
* @chan: Driver specific DMA channel |
9cd4360de dma: Add Xilinx A... |
376 |
* @has_sg: Specifies whether Scatter-Gather is present or not |
1a9e7a03c dmaengine: vdma: ... |
377 |
* @mcdma: Specifies whether Multi-Channel is present or not |
9cd4360de dma: Add Xilinx A... |
378 |
* @flush_on_fsync: Flush on frame sync |
b72db4005 dmaengine: vdma: ... |
379 |
* @ext_addr: Indicates 64 bit addressing is supported by dma device |
ba16db36b dmaengine: vdma: ... |
380 |
* @pdev: Platform device structure pointer |
fb2366675 dmaengine: vdma: ... |
381 |
* @dma_config: DMA config structure |
ba16db36b dmaengine: vdma: ... |
382 383 384 385 386 |
* @axi_clk: DMA Axi4-lite interace clock * @tx_clk: DMA mm2s clock * @txs_clk: DMA mm2s stream clock * @rx_clk: DMA s2mm clock * @rxs_clk: DMA s2mm stream clock |
1a9e7a03c dmaengine: vdma: ... |
387 388 |
* @nr_channels: Number of channels DMA device supports * @chan_id: DMA channel identifier |
9cd4360de dma: Add Xilinx A... |
389 |
*/ |
42c1a2ede dmaengine: vdma: ... |
390 |
struct xilinx_dma_device { |
9cd4360de dma: Add Xilinx A... |
391 392 393 |
void __iomem *regs; struct device *dev; struct dma_device common; |
42c1a2ede dmaengine: vdma: ... |
394 |
struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE]; |
9cd4360de dma: Add Xilinx A... |
395 |
bool has_sg; |
1a9e7a03c dmaengine: vdma: ... |
396 |
bool mcdma; |
9cd4360de dma: Add Xilinx A... |
397 |
u32 flush_on_fsync; |
b72db4005 dmaengine: vdma: ... |
398 |
bool ext_addr; |
ba16db36b dmaengine: vdma: ... |
399 |
struct platform_device *pdev; |
fb2366675 dmaengine: vdma: ... |
400 |
const struct xilinx_dma_config *dma_config; |
ba16db36b dmaengine: vdma: ... |
401 402 403 404 405 |
struct clk *axi_clk; struct clk *tx_clk; struct clk *txs_clk; struct clk *rx_clk; struct clk *rxs_clk; |
1a9e7a03c dmaengine: vdma: ... |
406 407 |
u32 nr_channels; u32 chan_id; |
9cd4360de dma: Add Xilinx A... |
408 409 410 411 |
}; /* Macros */ #define to_xilinx_chan(chan) \ |
42c1a2ede dmaengine: vdma: ... |
412 413 414 415 |
container_of(chan, struct xilinx_dma_chan, common) #define to_dma_tx_descriptor(tx) \ container_of(tx, struct xilinx_dma_tx_descriptor, async_tx) #define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \ |
9495f2648 dmaengine: xilinx... |
416 417 |
readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \ cond, delay_us, timeout_us) |
9cd4360de dma: Add Xilinx A... |
418 419 |
/* IO accessors */ |
42c1a2ede dmaengine: vdma: ... |
420 |
static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg) |
9cd4360de dma: Add Xilinx A... |
421 422 423 |
{ return ioread32(chan->xdev->regs + reg); } |
42c1a2ede dmaengine: vdma: ... |
424 |
static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value) |
9cd4360de dma: Add Xilinx A... |
425 426 427 |
{ iowrite32(value, chan->xdev->regs + reg); } |
42c1a2ede dmaengine: vdma: ... |
428 |
static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg, |
9cd4360de dma: Add Xilinx A... |
429 430 |
u32 value) { |
42c1a2ede dmaengine: vdma: ... |
431 |
dma_write(chan, chan->desc_offset + reg, value); |
9cd4360de dma: Add Xilinx A... |
432 |
} |
42c1a2ede dmaengine: vdma: ... |
433 |
static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg) |
9cd4360de dma: Add Xilinx A... |
434 |
{ |
42c1a2ede dmaengine: vdma: ... |
435 |
return dma_read(chan, chan->ctrl_offset + reg); |
9cd4360de dma: Add Xilinx A... |
436 |
} |
42c1a2ede dmaengine: vdma: ... |
437 |
static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg, |
9cd4360de dma: Add Xilinx A... |
438 439 |
u32 value) { |
42c1a2ede dmaengine: vdma: ... |
440 |
dma_write(chan, chan->ctrl_offset + reg, value); |
9cd4360de dma: Add Xilinx A... |
441 |
} |
42c1a2ede dmaengine: vdma: ... |
442 |
static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg, |
9cd4360de dma: Add Xilinx A... |
443 444 |
u32 clr) { |
42c1a2ede dmaengine: vdma: ... |
445 |
dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr); |
9cd4360de dma: Add Xilinx A... |
446 |
} |
42c1a2ede dmaengine: vdma: ... |
447 |
static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg, |
9cd4360de dma: Add Xilinx A... |
448 449 |
u32 set) { |
42c1a2ede dmaengine: vdma: ... |
450 |
dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set); |
9cd4360de dma: Add Xilinx A... |
451 |
} |
b72db4005 dmaengine: vdma: ... |
452 453 454 455 456 457 458 459 460 461 462 |
/** * vdma_desc_write_64 - 64-bit descriptor write * @chan: Driver specific VDMA channel * @reg: Register to write * @value_lsb: lower address of the descriptor. * @value_msb: upper address of the descriptor. * * Since vdma driver is trying to write to a register offset which is not a * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits * instead of a single 64 bit register write. */ |
42c1a2ede dmaengine: vdma: ... |
463 |
static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg, |
b72db4005 dmaengine: vdma: ... |
464 465 466 467 468 469 470 |
u32 value_lsb, u32 value_msb) { /* Write the lsb 32 bits*/ writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg); /* Write the msb 32 bits */ writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4); |
9cd4360de dma: Add Xilinx A... |
471 |
} |
f0cba685c dmaengine: vdma: ... |
472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 |
static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value) { lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg); } static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg, dma_addr_t addr) { if (chan->ext_addr) dma_writeq(chan, reg, addr); else dma_ctrl_write(chan, reg, addr); } static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan, struct xilinx_axidma_desc_hw *hw, dma_addr_t buf_addr, size_t sg_used, size_t period_len) { if (chan->ext_addr) { hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len); hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used + period_len); } else { hw->buf_addr = buf_addr + sg_used + period_len; } } |
9cd4360de dma: Add Xilinx A... |
499 500 501 502 503 504 |
/* ----------------------------------------------------------------------------- * Descriptors and segments alloc and free */ /** * xilinx_vdma_alloc_tx_segment - Allocate transaction segment |
42c1a2ede dmaengine: vdma: ... |
505 |
* @chan: Driver specific DMA channel |
9cd4360de dma: Add Xilinx A... |
506 507 508 509 |
* * Return: The allocated segment on success and NULL on failure. */ static struct xilinx_vdma_tx_segment * |
42c1a2ede dmaengine: vdma: ... |
510 |
xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan) |
9cd4360de dma: Add Xilinx A... |
511 512 513 |
{ struct xilinx_vdma_tx_segment *segment; dma_addr_t phys; |
2ba4f8abf dmaengine: vdma: ... |
514 |
segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys); |
9cd4360de dma: Add Xilinx A... |
515 516 |
if (!segment) return NULL; |
9cd4360de dma: Add Xilinx A... |
517 518 519 520 521 522 |
segment->phys = phys; return segment; } /** |
07b0e7d49 dmaengine: vdma: ... |
523 524 525 526 527 528 529 530 531 532 |
* xilinx_cdma_alloc_tx_segment - Allocate transaction segment * @chan: Driver specific DMA channel * * Return: The allocated segment on success and NULL on failure. */ static struct xilinx_cdma_tx_segment * xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan) { struct xilinx_cdma_tx_segment *segment; dma_addr_t phys; |
621478665 dmaengine: vdma: ... |
533 |
segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys); |
07b0e7d49 dmaengine: vdma: ... |
534 535 |
if (!segment) return NULL; |
07b0e7d49 dmaengine: vdma: ... |
536 537 538 539 540 541 |
segment->phys = phys; return segment; } /** |
c0bba3a99 dmaengine: vdma: ... |
542 543 544 545 546 547 548 549 550 551 |
* xilinx_axidma_alloc_tx_segment - Allocate transaction segment * @chan: Driver specific DMA channel * * Return: The allocated segment on success and NULL on failure. */ static struct xilinx_axidma_tx_segment * xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan) { struct xilinx_axidma_tx_segment *segment; dma_addr_t phys; |
621478665 dmaengine: vdma: ... |
552 |
segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys); |
9cd4360de dma: Add Xilinx A... |
553 554 |
if (!segment) return NULL; |
9cd4360de dma: Add Xilinx A... |
555 556 557 558 559 560 |
segment->phys = phys; return segment; } /** |
c0bba3a99 dmaengine: vdma: ... |
561 562 563 564 565 566 567 568 569 570 571 |
* xilinx_dma_free_tx_segment - Free transaction segment * @chan: Driver specific DMA channel * @segment: DMA transaction segment */ static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan, struct xilinx_axidma_tx_segment *segment) { dma_pool_free(chan->desc_pool, segment, segment->phys); } /** |
07b0e7d49 dmaengine: vdma: ... |
572 573 574 575 576 577 578 579 580 581 582 |
* xilinx_cdma_free_tx_segment - Free transaction segment * @chan: Driver specific DMA channel * @segment: DMA transaction segment */ static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan, struct xilinx_cdma_tx_segment *segment) { dma_pool_free(chan->desc_pool, segment, segment->phys); } /** |
9cd4360de dma: Add Xilinx A... |
583 |
* xilinx_vdma_free_tx_segment - Free transaction segment |
42c1a2ede dmaengine: vdma: ... |
584 585 |
* @chan: Driver specific DMA channel * @segment: DMA transaction segment |
9cd4360de dma: Add Xilinx A... |
586 |
*/ |
42c1a2ede dmaengine: vdma: ... |
587 |
static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan, |
9cd4360de dma: Add Xilinx A... |
588 589 590 591 592 593 |
struct xilinx_vdma_tx_segment *segment) { dma_pool_free(chan->desc_pool, segment, segment->phys); } /** |
42c1a2ede dmaengine: vdma: ... |
594 595 |
* xilinx_dma_tx_descriptor - Allocate transaction descriptor * @chan: Driver specific DMA channel |
9cd4360de dma: Add Xilinx A... |
596 597 598 |
* * Return: The allocated descriptor on success and NULL on failure. */ |
42c1a2ede dmaengine: vdma: ... |
599 600 |
static struct xilinx_dma_tx_descriptor * xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan) |
9cd4360de dma: Add Xilinx A... |
601 |
{ |
42c1a2ede dmaengine: vdma: ... |
602 |
struct xilinx_dma_tx_descriptor *desc; |
9cd4360de dma: Add Xilinx A... |
603 604 605 606 |
desc = kzalloc(sizeof(*desc), GFP_KERNEL); if (!desc) return NULL; |
9cd4360de dma: Add Xilinx A... |
607 608 609 610 611 612 |
INIT_LIST_HEAD(&desc->segments); return desc; } /** |
42c1a2ede dmaengine: vdma: ... |
613 614 615 |
* xilinx_dma_free_tx_descriptor - Free transaction descriptor * @chan: Driver specific DMA channel * @desc: DMA transaction descriptor |
9cd4360de dma: Add Xilinx A... |
616 617 |
*/ static void |
42c1a2ede dmaengine: vdma: ... |
618 619 |
xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan, struct xilinx_dma_tx_descriptor *desc) |
9cd4360de dma: Add Xilinx A... |
620 621 |
{ struct xilinx_vdma_tx_segment *segment, *next; |
07b0e7d49 dmaengine: vdma: ... |
622 |
struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next; |
c0bba3a99 dmaengine: vdma: ... |
623 |
struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next; |
9cd4360de dma: Add Xilinx A... |
624 625 626 |
if (!desc) return; |
fb2366675 dmaengine: vdma: ... |
627 |
if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { |
c0bba3a99 dmaengine: vdma: ... |
628 629 630 631 |
list_for_each_entry_safe(segment, next, &desc->segments, node) { list_del(&segment->node); xilinx_vdma_free_tx_segment(chan, segment); } |
fb2366675 dmaengine: vdma: ... |
632 |
} else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { |
07b0e7d49 dmaengine: vdma: ... |
633 634 635 636 637 |
list_for_each_entry_safe(cdma_segment, cdma_next, &desc->segments, node) { list_del(&cdma_segment->node); xilinx_cdma_free_tx_segment(chan, cdma_segment); } |
c0bba3a99 dmaengine: vdma: ... |
638 639 640 641 642 643 |
} else { list_for_each_entry_safe(axidma_segment, axidma_next, &desc->segments, node) { list_del(&axidma_segment->node); xilinx_dma_free_tx_segment(chan, axidma_segment); } |
9cd4360de dma: Add Xilinx A... |
644 645 646 647 648 649 650 651 |
} kfree(desc); } /* Required functions */ /** |
42c1a2ede dmaengine: vdma: ... |
652 653 |
* xilinx_dma_free_desc_list - Free descriptors list * @chan: Driver specific DMA channel |
9cd4360de dma: Add Xilinx A... |
654 655 |
* @list: List to parse and delete the descriptor */ |
42c1a2ede dmaengine: vdma: ... |
656 |
static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan, |
9cd4360de dma: Add Xilinx A... |
657 658 |
struct list_head *list) { |
42c1a2ede dmaengine: vdma: ... |
659 |
struct xilinx_dma_tx_descriptor *desc, *next; |
9cd4360de dma: Add Xilinx A... |
660 661 662 |
list_for_each_entry_safe(desc, next, list, node) { list_del(&desc->node); |
42c1a2ede dmaengine: vdma: ... |
663 |
xilinx_dma_free_tx_descriptor(chan, desc); |
9cd4360de dma: Add Xilinx A... |
664 665 666 667 |
} } /** |
42c1a2ede dmaengine: vdma: ... |
668 669 |
* xilinx_dma_free_descriptors - Free channel descriptors * @chan: Driver specific DMA channel |
9cd4360de dma: Add Xilinx A... |
670 |
*/ |
42c1a2ede dmaengine: vdma: ... |
671 |
static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan) |
9cd4360de dma: Add Xilinx A... |
672 673 674 675 |
{ unsigned long flags; spin_lock_irqsave(&chan->lock, flags); |
42c1a2ede dmaengine: vdma: ... |
676 677 678 |
xilinx_dma_free_desc_list(chan, &chan->pending_list); xilinx_dma_free_desc_list(chan, &chan->done_list); xilinx_dma_free_desc_list(chan, &chan->active_list); |
9cd4360de dma: Add Xilinx A... |
679 680 681 682 683 |
spin_unlock_irqrestore(&chan->lock, flags); } /** |
42c1a2ede dmaengine: vdma: ... |
684 |
* xilinx_dma_free_chan_resources - Free channel resources |
9cd4360de dma: Add Xilinx A... |
685 686 |
* @dchan: DMA channel */ |
42c1a2ede dmaengine: vdma: ... |
687 |
static void xilinx_dma_free_chan_resources(struct dma_chan *dchan) |
9cd4360de dma: Add Xilinx A... |
688 |
{ |
42c1a2ede dmaengine: vdma: ... |
689 |
struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); |
9cd4360de dma: Add Xilinx A... |
690 691 692 |
dev_dbg(chan->dev, "Free all channel resources. "); |
42c1a2ede dmaengine: vdma: ... |
693 |
xilinx_dma_free_descriptors(chan); |
92d794dfb dmaengine: vdma: ... |
694 695 |
if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { xilinx_dma_free_tx_segment(chan, chan->cyclic_seg_v); |
c0bba3a99 dmaengine: vdma: ... |
696 |
xilinx_dma_free_tx_segment(chan, chan->seg_v); |
92d794dfb dmaengine: vdma: ... |
697 |
} |
9cd4360de dma: Add Xilinx A... |
698 699 700 701 702 |
dma_pool_destroy(chan->desc_pool); chan->desc_pool = NULL; } /** |
92d794dfb dmaengine: vdma: ... |
703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 |
* xilinx_dma_chan_handle_cyclic - Cyclic dma callback * @chan: Driver specific dma channel * @desc: dma transaction descriptor * @flags: flags for spin lock */ static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan, struct xilinx_dma_tx_descriptor *desc, unsigned long *flags) { dma_async_tx_callback callback; void *callback_param; callback = desc->async_tx.callback; callback_param = desc->async_tx.callback_param; if (callback) { spin_unlock_irqrestore(&chan->lock, *flags); callback(callback_param); spin_lock_irqsave(&chan->lock, *flags); } } /** |
42c1a2ede dmaengine: vdma: ... |
725 726 |
* xilinx_dma_chan_desc_cleanup - Clean channel descriptors * @chan: Driver specific DMA channel |
9cd4360de dma: Add Xilinx A... |
727 |
*/ |
42c1a2ede dmaengine: vdma: ... |
728 |
static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan) |
9cd4360de dma: Add Xilinx A... |
729 |
{ |
42c1a2ede dmaengine: vdma: ... |
730 |
struct xilinx_dma_tx_descriptor *desc, *next; |
9cd4360de dma: Add Xilinx A... |
731 732 733 734 735 |
unsigned long flags; spin_lock_irqsave(&chan->lock, flags); list_for_each_entry_safe(desc, next, &chan->done_list, node) { |
369dbadac dmengine: xilinx_... |
736 |
struct dmaengine_desc_callback cb; |
9cd4360de dma: Add Xilinx A... |
737 |
|
92d794dfb dmaengine: vdma: ... |
738 739 740 741 |
if (desc->cyclic) { xilinx_dma_chan_handle_cyclic(chan, desc, &flags); break; } |
9cd4360de dma: Add Xilinx A... |
742 743 744 745 |
/* Remove from the list of running transactions */ list_del(&desc->node); /* Run the link descriptor callback function */ |
369dbadac dmengine: xilinx_... |
746 747 |
dmaengine_desc_get_callback(&desc->async_tx, &cb); if (dmaengine_desc_callback_valid(&cb)) { |
9cd4360de dma: Add Xilinx A... |
748 |
spin_unlock_irqrestore(&chan->lock, flags); |
369dbadac dmengine: xilinx_... |
749 |
dmaengine_desc_callback_invoke(&cb, NULL); |
9cd4360de dma: Add Xilinx A... |
750 751 752 753 754 |
spin_lock_irqsave(&chan->lock, flags); } /* Run any dependencies, then free the descriptor */ dma_run_dependencies(&desc->async_tx); |
42c1a2ede dmaengine: vdma: ... |
755 |
xilinx_dma_free_tx_descriptor(chan, desc); |
9cd4360de dma: Add Xilinx A... |
756 757 758 759 760 761 |
} spin_unlock_irqrestore(&chan->lock, flags); } /** |
42c1a2ede dmaengine: vdma: ... |
762 763 |
* xilinx_dma_do_tasklet - Schedule completion tasklet * @data: Pointer to the Xilinx DMA channel structure |
9cd4360de dma: Add Xilinx A... |
764 |
*/ |
42c1a2ede dmaengine: vdma: ... |
765 |
static void xilinx_dma_do_tasklet(unsigned long data) |
9cd4360de dma: Add Xilinx A... |
766 |
{ |
42c1a2ede dmaengine: vdma: ... |
767 |
struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data; |
9cd4360de dma: Add Xilinx A... |
768 |
|
42c1a2ede dmaengine: vdma: ... |
769 |
xilinx_dma_chan_desc_cleanup(chan); |
9cd4360de dma: Add Xilinx A... |
770 771 772 |
} /** |
42c1a2ede dmaengine: vdma: ... |
773 |
* xilinx_dma_alloc_chan_resources - Allocate channel resources |
9cd4360de dma: Add Xilinx A... |
774 775 776 777 |
* @dchan: DMA channel * * Return: '0' on success and failure value on error */ |
42c1a2ede dmaengine: vdma: ... |
778 |
static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) |
9cd4360de dma: Add Xilinx A... |
779 |
{ |
42c1a2ede dmaengine: vdma: ... |
780 |
struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); |
9cd4360de dma: Add Xilinx A... |
781 782 783 784 785 786 787 788 789 |
/* Has this channel already been allocated? */ if (chan->desc_pool) return 0; /* * We need the descriptor to be aligned to 64bytes * for meeting Xilinx VDMA specification requirement. */ |
fb2366675 dmaengine: vdma: ... |
790 |
if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { |
c0bba3a99 dmaengine: vdma: ... |
791 792 793 794 795 |
chan->desc_pool = dma_pool_create("xilinx_dma_desc_pool", chan->dev, sizeof(struct xilinx_axidma_tx_segment), __alignof__(struct xilinx_axidma_tx_segment), 0); |
fb2366675 dmaengine: vdma: ... |
796 |
} else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { |
07b0e7d49 dmaengine: vdma: ... |
797 798 799 800 801 |
chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool", chan->dev, sizeof(struct xilinx_cdma_tx_segment), __alignof__(struct xilinx_cdma_tx_segment), 0); |
c0bba3a99 dmaengine: vdma: ... |
802 803 804 805 806 807 808 |
} else { chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool", chan->dev, sizeof(struct xilinx_vdma_tx_segment), __alignof__(struct xilinx_vdma_tx_segment), 0); } |
9cd4360de dma: Add Xilinx A... |
809 810 811 812 813 814 815 |
if (!chan->desc_pool) { dev_err(chan->dev, "unable to allocate channel %d descriptor pool ", chan->id); return -ENOMEM; } |
92d794dfb dmaengine: vdma: ... |
816 |
if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { |
c0bba3a99 dmaengine: vdma: ... |
817 818 819 820 821 822 823 824 825 |
/* * For AXI DMA case after submitting a pending_list, keep * an extra segment allocated so that the "next descriptor" * pointer on the tail descriptor always points to a * valid descriptor, even when paused after reaching taildesc. * This way, it is possible to issue additional * transfers without halting and restarting the channel. */ chan->seg_v = xilinx_axidma_alloc_tx_segment(chan); |
92d794dfb dmaengine: vdma: ... |
826 827 828 829 830 831 832 833 |
/* * For cyclic DMA mode we need to program the tail Descriptor * register with a value which is not a part of the BD chain * so allocating a desc segment during channel allocation for * programming tail descriptor. */ chan->cyclic_seg_v = xilinx_axidma_alloc_tx_segment(chan); } |
9cd4360de dma: Add Xilinx A... |
834 |
dma_cookie_init(dchan); |
c0bba3a99 dmaengine: vdma: ... |
835 |
|
fb2366675 dmaengine: vdma: ... |
836 |
if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { |
c0bba3a99 dmaengine: vdma: ... |
837 838 839 840 841 842 |
/* For AXI DMA resetting once channel will reset the * other channel as well so enable the interrupts here. */ dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMAXR_ALL_IRQ_MASK); } |
fb2366675 dmaengine: vdma: ... |
843 |
if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg) |
07b0e7d49 dmaengine: vdma: ... |
844 845 |
dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_CDMA_CR_SGMODE); |
9cd4360de dma: Add Xilinx A... |
846 847 848 849 |
return 0; } /** |
42c1a2ede dmaengine: vdma: ... |
850 |
* xilinx_dma_tx_status - Get DMA transaction status |
9cd4360de dma: Add Xilinx A... |
851 852 853 854 855 856 |
* @dchan: DMA channel * @cookie: Transaction identifier * @txstate: Transaction state * * Return: DMA transaction status */ |
42c1a2ede dmaengine: vdma: ... |
857 |
static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan, |
9cd4360de dma: Add Xilinx A... |
858 859 860 |
dma_cookie_t cookie, struct dma_tx_state *txstate) { |
c0bba3a99 dmaengine: vdma: ... |
861 862 863 864 865 866 867 868 869 870 871 |
struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); struct xilinx_dma_tx_descriptor *desc; struct xilinx_axidma_tx_segment *segment; struct xilinx_axidma_desc_hw *hw; enum dma_status ret; unsigned long flags; u32 residue = 0; ret = dma_cookie_status(dchan, cookie, txstate); if (ret == DMA_COMPLETE || !txstate) return ret; |
fb2366675 dmaengine: vdma: ... |
872 |
if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { |
c0bba3a99 dmaengine: vdma: ... |
873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 |
spin_lock_irqsave(&chan->lock, flags); desc = list_last_entry(&chan->active_list, struct xilinx_dma_tx_descriptor, node); if (chan->has_sg) { list_for_each_entry(segment, &desc->segments, node) { hw = &segment->hw; residue += (hw->control - hw->status) & XILINX_DMA_MAX_TRANS_LEN; } } spin_unlock_irqrestore(&chan->lock, flags); chan->residue = residue; dma_set_residue(txstate, chan->residue); } return ret; |
9cd4360de dma: Add Xilinx A... |
891 892 893 |
} /** |
42c1a2ede dmaengine: vdma: ... |
894 895 |
* xilinx_dma_is_running - Check if DMA channel is running * @chan: Driver specific DMA channel |
9cd4360de dma: Add Xilinx A... |
896 897 898 |
* * Return: '1' if running, '0' if not. */ |
42c1a2ede dmaengine: vdma: ... |
899 |
static bool xilinx_dma_is_running(struct xilinx_dma_chan *chan) |
9cd4360de dma: Add Xilinx A... |
900 |
{ |
42c1a2ede dmaengine: vdma: ... |
901 902 903 904 |
return !(dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) & XILINX_DMA_DMASR_HALTED) && (dma_ctrl_read(chan, XILINX_DMA_REG_DMACR) & XILINX_DMA_DMACR_RUNSTOP); |
9cd4360de dma: Add Xilinx A... |
905 906 907 |
} /** |
42c1a2ede dmaengine: vdma: ... |
908 909 |
* xilinx_dma_is_idle - Check if DMA channel is idle * @chan: Driver specific DMA channel |
9cd4360de dma: Add Xilinx A... |
910 911 912 |
* * Return: '1' if idle, '0' if not. */ |
42c1a2ede dmaengine: vdma: ... |
913 |
static bool xilinx_dma_is_idle(struct xilinx_dma_chan *chan) |
9cd4360de dma: Add Xilinx A... |
914 |
{ |
42c1a2ede dmaengine: vdma: ... |
915 916 |
return dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) & XILINX_DMA_DMASR_IDLE; |
9cd4360de dma: Add Xilinx A... |
917 918 919 |
} /** |
42c1a2ede dmaengine: vdma: ... |
920 921 |
* xilinx_dma_halt - Halt DMA channel * @chan: Driver specific DMA channel |
9cd4360de dma: Add Xilinx A... |
922 |
*/ |
42c1a2ede dmaengine: vdma: ... |
923 |
static void xilinx_dma_halt(struct xilinx_dma_chan *chan) |
9cd4360de dma: Add Xilinx A... |
924 |
{ |
694906348 dmaengine: xilinx... |
925 |
int err; |
9495f2648 dmaengine: xilinx... |
926 |
u32 val; |
9cd4360de dma: Add Xilinx A... |
927 |
|
42c1a2ede dmaengine: vdma: ... |
928 |
dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP); |
9cd4360de dma: Add Xilinx A... |
929 930 |
/* Wait for the hardware to halt */ |
42c1a2ede dmaengine: vdma: ... |
931 932 933 |
err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, (val & XILINX_DMA_DMASR_HALTED), 0, XILINX_DMA_LOOP_COUNT); |
9cd4360de dma: Add Xilinx A... |
934 |
|
9495f2648 dmaengine: xilinx... |
935 |
if (err) { |
9cd4360de dma: Add Xilinx A... |
936 937 |
dev_err(chan->dev, "Cannot stop channel %p: %x ", |
42c1a2ede dmaengine: vdma: ... |
938 |
chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); |
9cd4360de dma: Add Xilinx A... |
939 940 |
chan->err = true; } |
9cd4360de dma: Add Xilinx A... |
941 942 943 |
} /** |
42c1a2ede dmaengine: vdma: ... |
944 945 |
* xilinx_dma_start - Start DMA channel * @chan: Driver specific DMA channel |
9cd4360de dma: Add Xilinx A... |
946 |
*/ |
42c1a2ede dmaengine: vdma: ... |
947 |
static void xilinx_dma_start(struct xilinx_dma_chan *chan) |
9cd4360de dma: Add Xilinx A... |
948 |
{ |
694906348 dmaengine: xilinx... |
949 |
int err; |
9495f2648 dmaengine: xilinx... |
950 |
u32 val; |
9cd4360de dma: Add Xilinx A... |
951 |
|
42c1a2ede dmaengine: vdma: ... |
952 |
dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP); |
9cd4360de dma: Add Xilinx A... |
953 954 |
/* Wait for the hardware to start */ |
42c1a2ede dmaengine: vdma: ... |
955 956 957 |
err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, !(val & XILINX_DMA_DMASR_HALTED), 0, XILINX_DMA_LOOP_COUNT); |
9cd4360de dma: Add Xilinx A... |
958 |
|
9495f2648 dmaengine: xilinx... |
959 |
if (err) { |
9cd4360de dma: Add Xilinx A... |
960 961 |
dev_err(chan->dev, "Cannot start channel %p: %x ", |
42c1a2ede dmaengine: vdma: ... |
962 |
chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); |
9cd4360de dma: Add Xilinx A... |
963 964 965 |
chan->err = true; } |
9cd4360de dma: Add Xilinx A... |
966 967 968 969 970 971 |
} /** * xilinx_vdma_start_transfer - Starts VDMA transfer * @chan: Driver specific channel struct pointer */ |
42c1a2ede dmaengine: vdma: ... |
972 |
static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) |
9cd4360de dma: Add Xilinx A... |
973 974 |
{ struct xilinx_vdma_config *config = &chan->config; |
42c1a2ede dmaengine: vdma: ... |
975 |
struct xilinx_dma_tx_descriptor *desc, *tail_desc; |
9cd4360de dma: Add Xilinx A... |
976 |
u32 reg; |
7096f36e5 dmaengine: xilinx... |
977 |
struct xilinx_vdma_tx_segment *tail_segment; |
9cd4360de dma: Add Xilinx A... |
978 |
|
26c5e3693 dmaengine: xilinx... |
979 |
/* This function was invoked with lock held */ |
9cd4360de dma: Add Xilinx A... |
980 981 |
if (chan->err) return; |
9cd4360de dma: Add Xilinx A... |
982 |
if (list_empty(&chan->pending_list)) |
26c5e3693 dmaengine: xilinx... |
983 |
return; |
9cd4360de dma: Add Xilinx A... |
984 985 |
desc = list_first_entry(&chan->pending_list, |
42c1a2ede dmaengine: vdma: ... |
986 |
struct xilinx_dma_tx_descriptor, node); |
7096f36e5 dmaengine: xilinx... |
987 |
tail_desc = list_last_entry(&chan->pending_list, |
42c1a2ede dmaengine: vdma: ... |
988 |
struct xilinx_dma_tx_descriptor, node); |
7096f36e5 dmaengine: xilinx... |
989 990 991 |
tail_segment = list_last_entry(&tail_desc->segments, struct xilinx_vdma_tx_segment, node); |
9cd4360de dma: Add Xilinx A... |
992 993 |
/* If it is SG mode and hardware is busy, cannot submit */ |
42c1a2ede dmaengine: vdma: ... |
994 995 |
if (chan->has_sg && xilinx_dma_is_running(chan) && !xilinx_dma_is_idle(chan)) { |
9cd4360de dma: Add Xilinx A... |
996 997 |
dev_dbg(chan->dev, "DMA controller still busy "); |
26c5e3693 dmaengine: xilinx... |
998 |
return; |
9cd4360de dma: Add Xilinx A... |
999 1000 1001 1002 1003 1004 |
} /* * If hardware is idle, then all descriptors on the running lists are * done, start new transfers */ |
7096f36e5 dmaengine: xilinx... |
1005 |
if (chan->has_sg) |
42c1a2ede dmaengine: vdma: ... |
1006 |
dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, |
7096f36e5 dmaengine: xilinx... |
1007 |
desc->async_tx.phys); |
9cd4360de dma: Add Xilinx A... |
1008 1009 |
/* Configure the hardware using info in the config structure */ |
42c1a2ede dmaengine: vdma: ... |
1010 |
reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); |
9cd4360de dma: Add Xilinx A... |
1011 1012 |
if (config->frm_cnt_en) |
42c1a2ede dmaengine: vdma: ... |
1013 |
reg |= XILINX_DMA_DMACR_FRAMECNT_EN; |
9cd4360de dma: Add Xilinx A... |
1014 |
else |
42c1a2ede dmaengine: vdma: ... |
1015 |
reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN; |
9cd4360de dma: Add Xilinx A... |
1016 |
|
e2b538a77 dmaengine: xilinx... |
1017 |
/* Configure channel to allow number frame buffers */ |
42c1a2ede dmaengine: vdma: ... |
1018 |
dma_ctrl_write(chan, XILINX_DMA_REG_FRMSTORE, |
e2b538a77 dmaengine: xilinx... |
1019 |
chan->desc_pendingcount); |
9cd4360de dma: Add Xilinx A... |
1020 1021 1022 1023 1024 |
/* * With SG, start with circular mode, so that BDs can be fetched. * In direct register mode, if not parking, enable circular mode */ if (chan->has_sg || !config->park) |
42c1a2ede dmaengine: vdma: ... |
1025 |
reg |= XILINX_DMA_DMACR_CIRC_EN; |
9cd4360de dma: Add Xilinx A... |
1026 1027 |
if (config->park) |
42c1a2ede dmaengine: vdma: ... |
1028 |
reg &= ~XILINX_DMA_DMACR_CIRC_EN; |
9cd4360de dma: Add Xilinx A... |
1029 |
|
42c1a2ede dmaengine: vdma: ... |
1030 |
dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); |
9cd4360de dma: Add Xilinx A... |
1031 1032 1033 1034 |
if (config->park && (config->park_frm >= 0) && (config->park_frm < chan->num_frms)) { if (chan->direction == DMA_MEM_TO_DEV) |
42c1a2ede dmaengine: vdma: ... |
1035 |
dma_write(chan, XILINX_DMA_REG_PARK_PTR, |
9cd4360de dma: Add Xilinx A... |
1036 |
config->park_frm << |
42c1a2ede dmaengine: vdma: ... |
1037 |
XILINX_DMA_PARK_PTR_RD_REF_SHIFT); |
9cd4360de dma: Add Xilinx A... |
1038 |
else |
42c1a2ede dmaengine: vdma: ... |
1039 |
dma_write(chan, XILINX_DMA_REG_PARK_PTR, |
9cd4360de dma: Add Xilinx A... |
1040 |
config->park_frm << |
42c1a2ede dmaengine: vdma: ... |
1041 |
XILINX_DMA_PARK_PTR_WR_REF_SHIFT); |
9cd4360de dma: Add Xilinx A... |
1042 1043 1044 |
} /* Start the hardware */ |
42c1a2ede dmaengine: vdma: ... |
1045 |
xilinx_dma_start(chan); |
9cd4360de dma: Add Xilinx A... |
1046 1047 |
if (chan->err) |
26c5e3693 dmaengine: xilinx... |
1048 |
return; |
9cd4360de dma: Add Xilinx A... |
1049 1050 1051 |
/* Start the transfer */ if (chan->has_sg) { |
42c1a2ede dmaengine: vdma: ... |
1052 |
dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, |
7096f36e5 dmaengine: xilinx... |
1053 |
tail_segment->phys); |
9cd4360de dma: Add Xilinx A... |
1054 1055 1056 |
} else { struct xilinx_vdma_tx_segment *segment, *last = NULL; int i = 0; |
a65cf5125 dmaengine: vdma: ... |
1057 1058 1059 1060 |
if (chan->desc_submitcount < chan->num_frms) i = chan->desc_submitcount; list_for_each_entry(segment, &desc->segments, node) { |
b72db4005 dmaengine: vdma: ... |
1061 1062 1063 1064 1065 1066 1067 |
if (chan->ext_addr) vdma_desc_write_64(chan, XILINX_VDMA_REG_START_ADDRESS_64(i++), segment->hw.buf_addr, segment->hw.buf_addr_msb); else vdma_desc_write(chan, |
9cd4360de dma: Add Xilinx A... |
1068 1069 |
XILINX_VDMA_REG_START_ADDRESS(i++), segment->hw.buf_addr); |
b72db4005 dmaengine: vdma: ... |
1070 |
|
9cd4360de dma: Add Xilinx A... |
1071 1072 1073 1074 |
last = segment; } if (!last) |
26c5e3693 dmaengine: xilinx... |
1075 |
return; |
9cd4360de dma: Add Xilinx A... |
1076 1077 |
/* HW expects these parameters to be same for one transaction */ |
42c1a2ede dmaengine: vdma: ... |
1078 1079 |
vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize); vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE, |
9cd4360de dma: Add Xilinx A... |
1080 |
last->hw.stride); |
42c1a2ede dmaengine: vdma: ... |
1081 |
vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize); |
9cd4360de dma: Add Xilinx A... |
1082 |
} |
a65cf5125 dmaengine: vdma: ... |
1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 |
if (!chan->has_sg) { list_del(&desc->node); list_add_tail(&desc->node, &chan->active_list); chan->desc_submitcount++; chan->desc_pendingcount--; if (chan->desc_submitcount == chan->num_frms) chan->desc_submitcount = 0; } else { list_splice_tail_init(&chan->pending_list, &chan->active_list); chan->desc_pendingcount = 0; } |
9cd4360de dma: Add Xilinx A... |
1094 1095 1096 |
} /** |
07b0e7d49 dmaengine: vdma: ... |
1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 |
* xilinx_cdma_start_transfer - Starts cdma transfer * @chan: Driver specific channel struct pointer */ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan) { struct xilinx_dma_tx_descriptor *head_desc, *tail_desc; struct xilinx_cdma_tx_segment *tail_segment; u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR); if (chan->err) return; if (list_empty(&chan->pending_list)) return; head_desc = list_first_entry(&chan->pending_list, struct xilinx_dma_tx_descriptor, node); tail_desc = list_last_entry(&chan->pending_list, struct xilinx_dma_tx_descriptor, node); tail_segment = list_last_entry(&tail_desc->segments, struct xilinx_cdma_tx_segment, node); if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) { ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX; ctrl_reg |= chan->desc_pendingcount << XILINX_DMA_CR_COALESCE_SHIFT; dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg); } if (chan->has_sg) { |
9791e71a0 dmaengine: vdma: ... |
1127 1128 |
xilinx_write(chan, XILINX_DMA_REG_CURDESC, head_desc->async_tx.phys); |
07b0e7d49 dmaengine: vdma: ... |
1129 1130 |
/* Update tail ptr register which will start the transfer */ |
9791e71a0 dmaengine: vdma: ... |
1131 1132 |
xilinx_write(chan, XILINX_DMA_REG_TAILDESC, tail_segment->phys); |
07b0e7d49 dmaengine: vdma: ... |
1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 |
} else { /* In simple mode */ struct xilinx_cdma_tx_segment *segment; struct xilinx_cdma_desc_hw *hw; segment = list_first_entry(&head_desc->segments, struct xilinx_cdma_tx_segment, node); hw = &segment->hw; |
9791e71a0 dmaengine: vdma: ... |
1143 1144 |
xilinx_write(chan, XILINX_CDMA_REG_SRCADDR, hw->src_addr); xilinx_write(chan, XILINX_CDMA_REG_DSTADDR, hw->dest_addr); |
07b0e7d49 dmaengine: vdma: ... |
1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 |
/* Start the transfer */ dma_ctrl_write(chan, XILINX_DMA_REG_BTT, hw->control & XILINX_DMA_MAX_TRANS_LEN); } list_splice_tail_init(&chan->pending_list, &chan->active_list); chan->desc_pendingcount = 0; } /** |
c0bba3a99 dmaengine: vdma: ... |
1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 |
* xilinx_dma_start_transfer - Starts DMA transfer * @chan: Driver specific channel struct pointer */ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan) { struct xilinx_dma_tx_descriptor *head_desc, *tail_desc; struct xilinx_axidma_tx_segment *tail_segment, *old_head, *new_head; u32 reg; if (chan->err) return; if (list_empty(&chan->pending_list)) return; /* If it is SG mode and hardware is busy, cannot submit */ if (chan->has_sg && xilinx_dma_is_running(chan) && !xilinx_dma_is_idle(chan)) { dev_dbg(chan->dev, "DMA controller still busy "); return; } head_desc = list_first_entry(&chan->pending_list, struct xilinx_dma_tx_descriptor, node); tail_desc = list_last_entry(&chan->pending_list, struct xilinx_dma_tx_descriptor, node); tail_segment = list_last_entry(&tail_desc->segments, struct xilinx_axidma_tx_segment, node); |
1a9e7a03c dmaengine: vdma: ... |
1185 1186 1187 1188 1189 1190 |
if (chan->has_sg && !chan->xdev->mcdma) { old_head = list_first_entry(&head_desc->segments, struct xilinx_axidma_tx_segment, node); new_head = chan->seg_v; /* Copy Buffer Descriptor fields. */ new_head->hw = old_head->hw; |
c0bba3a99 dmaengine: vdma: ... |
1191 |
|
1a9e7a03c dmaengine: vdma: ... |
1192 1193 1194 |
/* Swap and save new reserve */ list_replace_init(&old_head->node, &new_head->node); chan->seg_v = old_head; |
c0bba3a99 dmaengine: vdma: ... |
1195 |
|
1a9e7a03c dmaengine: vdma: ... |
1196 1197 1198 |
tail_segment->hw.next_desc = chan->seg_v->phys; head_desc->async_tx.phys = new_head->phys; } |
c0bba3a99 dmaengine: vdma: ... |
1199 1200 1201 1202 1203 1204 1205 1206 1207 |
reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) { reg &= ~XILINX_DMA_CR_COALESCE_MAX; reg |= chan->desc_pendingcount << XILINX_DMA_CR_COALESCE_SHIFT; dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); } |
1a9e7a03c dmaengine: vdma: ... |
1208 |
if (chan->has_sg && !chan->xdev->mcdma) |
f0cba685c dmaengine: vdma: ... |
1209 1210 |
xilinx_write(chan, XILINX_DMA_REG_CURDESC, head_desc->async_tx.phys); |
c0bba3a99 dmaengine: vdma: ... |
1211 |
|
1a9e7a03c dmaengine: vdma: ... |
1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 |
if (chan->has_sg && chan->xdev->mcdma) { if (chan->direction == DMA_MEM_TO_DEV) { dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, head_desc->async_tx.phys); } else { if (!chan->tdest) { dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, head_desc->async_tx.phys); } else { dma_ctrl_write(chan, XILINX_DMA_MCRX_CDESC(chan->tdest), head_desc->async_tx.phys); } } } |
c0bba3a99 dmaengine: vdma: ... |
1227 1228 1229 1230 1231 1232 |
xilinx_dma_start(chan); if (chan->err) return; /* Start the transfer */ |
1a9e7a03c dmaengine: vdma: ... |
1233 |
if (chan->has_sg && !chan->xdev->mcdma) { |
92d794dfb dmaengine: vdma: ... |
1234 |
if (chan->cyclic) |
f0cba685c dmaengine: vdma: ... |
1235 1236 |
xilinx_write(chan, XILINX_DMA_REG_TAILDESC, chan->cyclic_seg_v->phys); |
92d794dfb dmaengine: vdma: ... |
1237 |
else |
f0cba685c dmaengine: vdma: ... |
1238 1239 |
xilinx_write(chan, XILINX_DMA_REG_TAILDESC, tail_segment->phys); |
1a9e7a03c dmaengine: vdma: ... |
1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 |
} else if (chan->has_sg && chan->xdev->mcdma) { if (chan->direction == DMA_MEM_TO_DEV) { dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, tail_segment->phys); } else { if (!chan->tdest) { dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, tail_segment->phys); } else { dma_ctrl_write(chan, XILINX_DMA_MCRX_TDESC(chan->tdest), tail_segment->phys); } } |
c0bba3a99 dmaengine: vdma: ... |
1254 1255 1256 1257 1258 1259 1260 1261 |
} else { struct xilinx_axidma_tx_segment *segment; struct xilinx_axidma_desc_hw *hw; segment = list_first_entry(&head_desc->segments, struct xilinx_axidma_tx_segment, node); hw = &segment->hw; |
f0cba685c dmaengine: vdma: ... |
1262 |
xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr); |
c0bba3a99 dmaengine: vdma: ... |
1263 1264 1265 1266 |
/* Start the transfer */ dma_ctrl_write(chan, XILINX_DMA_REG_BTT, hw->control & XILINX_DMA_MAX_TRANS_LEN); |
9cd4360de dma: Add Xilinx A... |
1267 |
} |
7096f36e5 dmaengine: xilinx... |
1268 1269 |
list_splice_tail_init(&chan->pending_list, &chan->active_list); chan->desc_pendingcount = 0; |
9cd4360de dma: Add Xilinx A... |
1270 1271 1272 |
} /** |
42c1a2ede dmaengine: vdma: ... |
1273 |
* xilinx_dma_issue_pending - Issue pending transactions |
9cd4360de dma: Add Xilinx A... |
1274 1275 |
* @dchan: DMA channel */ |
42c1a2ede dmaengine: vdma: ... |
1276 |
static void xilinx_dma_issue_pending(struct dma_chan *dchan) |
9cd4360de dma: Add Xilinx A... |
1277 |
{ |
42c1a2ede dmaengine: vdma: ... |
1278 |
struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); |
26c5e3693 dmaengine: xilinx... |
1279 |
unsigned long flags; |
9cd4360de dma: Add Xilinx A... |
1280 |
|
26c5e3693 dmaengine: xilinx... |
1281 |
spin_lock_irqsave(&chan->lock, flags); |
c0bba3a99 dmaengine: vdma: ... |
1282 |
chan->start_transfer(chan); |
26c5e3693 dmaengine: xilinx... |
1283 |
spin_unlock_irqrestore(&chan->lock, flags); |
9cd4360de dma: Add Xilinx A... |
1284 1285 1286 |
} /** |
42c1a2ede dmaengine: vdma: ... |
1287 |
* xilinx_dma_complete_descriptor - Mark the active descriptor as complete |
9cd4360de dma: Add Xilinx A... |
1288 1289 1290 1291 |
* @chan : xilinx DMA channel * * CONTEXT: hardirq */ |
42c1a2ede dmaengine: vdma: ... |
1292 |
static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan) |
9cd4360de dma: Add Xilinx A... |
1293 |
{ |
42c1a2ede dmaengine: vdma: ... |
1294 |
struct xilinx_dma_tx_descriptor *desc, *next; |
9cd4360de dma: Add Xilinx A... |
1295 |
|
26c5e3693 dmaengine: xilinx... |
1296 |
/* This function was invoked with lock held */ |
7096f36e5 dmaengine: xilinx... |
1297 |
if (list_empty(&chan->active_list)) |
26c5e3693 dmaengine: xilinx... |
1298 |
return; |
9cd4360de dma: Add Xilinx A... |
1299 |
|
7096f36e5 dmaengine: xilinx... |
1300 1301 |
list_for_each_entry_safe(desc, next, &chan->active_list, node) { list_del(&desc->node); |
92d794dfb dmaengine: vdma: ... |
1302 1303 |
if (!desc->cyclic) dma_cookie_complete(&desc->async_tx); |
7096f36e5 dmaengine: xilinx... |
1304 1305 |
list_add_tail(&desc->node, &chan->done_list); } |
9cd4360de dma: Add Xilinx A... |
1306 1307 1308 |
} /** |
42c1a2ede dmaengine: vdma: ... |
1309 1310 |
* xilinx_dma_reset - Reset DMA channel * @chan: Driver specific DMA channel |
9cd4360de dma: Add Xilinx A... |
1311 1312 1313 |
* * Return: '0' on success and failure value on error */ |
42c1a2ede dmaengine: vdma: ... |
1314 |
static int xilinx_dma_reset(struct xilinx_dma_chan *chan) |
9cd4360de dma: Add Xilinx A... |
1315 |
{ |
694906348 dmaengine: xilinx... |
1316 |
int err; |
9cd4360de dma: Add Xilinx A... |
1317 |
u32 tmp; |
42c1a2ede dmaengine: vdma: ... |
1318 |
dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET); |
9cd4360de dma: Add Xilinx A... |
1319 |
|
9cd4360de dma: Add Xilinx A... |
1320 |
/* Wait for the hardware to finish reset */ |
42c1a2ede dmaengine: vdma: ... |
1321 1322 1323 |
err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp, !(tmp & XILINX_DMA_DMACR_RESET), 0, XILINX_DMA_LOOP_COUNT); |
9cd4360de dma: Add Xilinx A... |
1324 |
|
9495f2648 dmaengine: xilinx... |
1325 |
if (err) { |
9cd4360de dma: Add Xilinx A... |
1326 1327 |
dev_err(chan->dev, "reset timeout, cr %x, sr %x ", |
42c1a2ede dmaengine: vdma: ... |
1328 1329 |
dma_ctrl_read(chan, XILINX_DMA_REG_DMACR), dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); |
9cd4360de dma: Add Xilinx A... |
1330 1331 1332 1333 |
return -ETIMEDOUT; } chan->err = false; |
9495f2648 dmaengine: xilinx... |
1334 |
return err; |
9cd4360de dma: Add Xilinx A... |
1335 1336 1337 |
} /** |
42c1a2ede dmaengine: vdma: ... |
1338 1339 |
* xilinx_dma_chan_reset - Reset DMA channel and enable interrupts * @chan: Driver specific DMA channel |
9cd4360de dma: Add Xilinx A... |
1340 1341 1342 |
* * Return: '0' on success and failure value on error */ |
42c1a2ede dmaengine: vdma: ... |
1343 |
static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan) |
9cd4360de dma: Add Xilinx A... |
1344 1345 1346 1347 |
{ int err; /* Reset VDMA */ |
42c1a2ede dmaengine: vdma: ... |
1348 |
err = xilinx_dma_reset(chan); |
9cd4360de dma: Add Xilinx A... |
1349 1350 1351 1352 |
if (err) return err; /* Enable interrupts */ |
42c1a2ede dmaengine: vdma: ... |
1353 1354 |
dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMAXR_ALL_IRQ_MASK); |
9cd4360de dma: Add Xilinx A... |
1355 1356 1357 1358 1359 |
return 0; } /** |
42c1a2ede dmaengine: vdma: ... |
1360 |
* xilinx_dma_irq_handler - DMA Interrupt handler |
9cd4360de dma: Add Xilinx A... |
1361 |
* @irq: IRQ number |
42c1a2ede dmaengine: vdma: ... |
1362 |
* @data: Pointer to the Xilinx DMA channel structure |
9cd4360de dma: Add Xilinx A... |
1363 1364 1365 |
* * Return: IRQ_HANDLED/IRQ_NONE */ |
42c1a2ede dmaengine: vdma: ... |
1366 |
static irqreturn_t xilinx_dma_irq_handler(int irq, void *data) |
9cd4360de dma: Add Xilinx A... |
1367 |
{ |
42c1a2ede dmaengine: vdma: ... |
1368 |
struct xilinx_dma_chan *chan = data; |
9cd4360de dma: Add Xilinx A... |
1369 1370 1371 |
u32 status; /* Read the status and ack the interrupts. */ |
42c1a2ede dmaengine: vdma: ... |
1372 1373 |
status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR); if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK)) |
9cd4360de dma: Add Xilinx A... |
1374 |
return IRQ_NONE; |
42c1a2ede dmaengine: vdma: ... |
1375 1376 |
dma_ctrl_write(chan, XILINX_DMA_REG_DMASR, status & XILINX_DMA_DMAXR_ALL_IRQ_MASK); |
9cd4360de dma: Add Xilinx A... |
1377 |
|
42c1a2ede dmaengine: vdma: ... |
1378 |
if (status & XILINX_DMA_DMASR_ERR_IRQ) { |
9cd4360de dma: Add Xilinx A... |
1379 1380 1381 1382 1383 1384 1385 |
/* * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the * error is recoverable, ignore it. Otherwise flag the error. * * Only recoverable errors can be cleared in the DMASR register, * make sure not to write to other error bits to 1. */ |
42c1a2ede dmaengine: vdma: ... |
1386 |
u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK; |
48a59edc6 dmaengine: vdma: ... |
1387 |
|
42c1a2ede dmaengine: vdma: ... |
1388 1389 |
dma_ctrl_write(chan, XILINX_DMA_REG_DMASR, errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK); |
9cd4360de dma: Add Xilinx A... |
1390 1391 |
if (!chan->flush_on_fsync || |
42c1a2ede dmaengine: vdma: ... |
1392 |
(errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) { |
9cd4360de dma: Add Xilinx A... |
1393 1394 1395 1396 |
dev_err(chan->dev, "Channel %p has errors %x, cdr %x tdr %x ", chan, errors, |
42c1a2ede dmaengine: vdma: ... |
1397 1398 |
dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC), dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC)); |
9cd4360de dma: Add Xilinx A... |
1399 1400 1401 |
chan->err = true; } } |
42c1a2ede dmaengine: vdma: ... |
1402 |
if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) { |
9cd4360de dma: Add Xilinx A... |
1403 1404 1405 1406 1407 1408 1409 |
/* * Device takes too long to do the transfer when user requires * responsiveness. */ dev_dbg(chan->dev, "Inter-packet latency too long "); } |
42c1a2ede dmaengine: vdma: ... |
1410 |
if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) { |
26c5e3693 dmaengine: xilinx... |
1411 |
spin_lock(&chan->lock); |
42c1a2ede dmaengine: vdma: ... |
1412 |
xilinx_dma_complete_descriptor(chan); |
c0bba3a99 dmaengine: vdma: ... |
1413 |
chan->start_transfer(chan); |
26c5e3693 dmaengine: xilinx... |
1414 |
spin_unlock(&chan->lock); |
9cd4360de dma: Add Xilinx A... |
1415 1416 1417 1418 1419 1420 1421 |
} tasklet_schedule(&chan->tasklet); return IRQ_HANDLED; } /** |
7096f36e5 dmaengine: xilinx... |
1422 1423 1424 1425 |
* append_desc_queue - Queuing descriptor * @chan: Driver specific dma channel * @desc: dma transaction descriptor */ |
42c1a2ede dmaengine: vdma: ... |
1426 1427 |
static void append_desc_queue(struct xilinx_dma_chan *chan, struct xilinx_dma_tx_descriptor *desc) |
7096f36e5 dmaengine: xilinx... |
1428 1429 |
{ struct xilinx_vdma_tx_segment *tail_segment; |
42c1a2ede dmaengine: vdma: ... |
1430 |
struct xilinx_dma_tx_descriptor *tail_desc; |
c0bba3a99 dmaengine: vdma: ... |
1431 |
struct xilinx_axidma_tx_segment *axidma_tail_segment; |
07b0e7d49 dmaengine: vdma: ... |
1432 |
struct xilinx_cdma_tx_segment *cdma_tail_segment; |
7096f36e5 dmaengine: xilinx... |
1433 1434 1435 1436 1437 1438 1439 1440 1441 |
if (list_empty(&chan->pending_list)) goto append; /* * Add the hardware descriptor to the chain of hardware descriptors * that already exists in memory. */ tail_desc = list_last_entry(&chan->pending_list, |
42c1a2ede dmaengine: vdma: ... |
1442 |
struct xilinx_dma_tx_descriptor, node); |
fb2366675 dmaengine: vdma: ... |
1443 |
if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { |
c0bba3a99 dmaengine: vdma: ... |
1444 1445 1446 1447 |
tail_segment = list_last_entry(&tail_desc->segments, struct xilinx_vdma_tx_segment, node); tail_segment->hw.next_desc = (u32)desc->async_tx.phys; |
fb2366675 dmaengine: vdma: ... |
1448 |
} else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { |
07b0e7d49 dmaengine: vdma: ... |
1449 1450 1451 1452 |
cdma_tail_segment = list_last_entry(&tail_desc->segments, struct xilinx_cdma_tx_segment, node); cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys; |
c0bba3a99 dmaengine: vdma: ... |
1453 1454 1455 1456 1457 1458 |
} else { axidma_tail_segment = list_last_entry(&tail_desc->segments, struct xilinx_axidma_tx_segment, node); axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys; } |
7096f36e5 dmaengine: xilinx... |
1459 1460 1461 1462 1463 1464 1465 1466 |
/* * Add the software descriptor and all children to the list * of pending transactions */ append: list_add_tail(&desc->node, &chan->pending_list); chan->desc_pendingcount++; |
fb2366675 dmaengine: vdma: ... |
1467 1468 |
if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) && unlikely(chan->desc_pendingcount > chan->num_frms)) { |
7096f36e5 dmaengine: xilinx... |
1469 1470 1471 1472 1473 1474 1475 |
dev_dbg(chan->dev, "desc pendingcount is too high "); chan->desc_pendingcount = chan->num_frms; } } /** |
42c1a2ede dmaengine: vdma: ... |
1476 |
* xilinx_dma_tx_submit - Submit DMA transaction |
9cd4360de dma: Add Xilinx A... |
1477 1478 1479 1480 |
* @tx: Async transaction descriptor * * Return: cookie value on success and failure value on error */ |
42c1a2ede dmaengine: vdma: ... |
1481 |
static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx) |
9cd4360de dma: Add Xilinx A... |
1482 |
{ |
42c1a2ede dmaengine: vdma: ... |
1483 1484 |
struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx); struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan); |
9cd4360de dma: Add Xilinx A... |
1485 1486 1487 |
dma_cookie_t cookie; unsigned long flags; int err; |
92d794dfb dmaengine: vdma: ... |
1488 1489 1490 1491 |
if (chan->cyclic) { xilinx_dma_free_tx_descriptor(chan, desc); return -EBUSY; } |
9cd4360de dma: Add Xilinx A... |
1492 1493 1494 1495 1496 |
if (chan->err) { /* * If reset fails, need to hard reset the system. * Channel is no longer functional */ |
42c1a2ede dmaengine: vdma: ... |
1497 |
err = xilinx_dma_chan_reset(chan); |
9cd4360de dma: Add Xilinx A... |
1498 1499 1500 1501 1502 1503 1504 |
if (err < 0) return err; } spin_lock_irqsave(&chan->lock, flags); cookie = dma_cookie_assign(tx); |
7096f36e5 dmaengine: xilinx... |
1505 1506 |
/* Put this transaction onto the tail of the pending queue */ append_desc_queue(chan, desc); |
9cd4360de dma: Add Xilinx A... |
1507 |
|
92d794dfb dmaengine: vdma: ... |
1508 1509 |
if (desc->cyclic) chan->cyclic = true; |
9cd4360de dma: Add Xilinx A... |
1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 |
spin_unlock_irqrestore(&chan->lock, flags); return cookie; } /** * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a * DMA_SLAVE transaction * @dchan: DMA channel * @xt: Interleaved template pointer * @flags: transfer ack flags * * Return: Async transaction descriptor on success and NULL on failure */ static struct dma_async_tx_descriptor * xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan, struct dma_interleaved_template *xt, unsigned long flags) { |
42c1a2ede dmaengine: vdma: ... |
1529 1530 |
struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); struct xilinx_dma_tx_descriptor *desc; |
9cd4360de dma: Add Xilinx A... |
1531 1532 1533 1534 1535 1536 1537 1538 |
struct xilinx_vdma_tx_segment *segment, *prev = NULL; struct xilinx_vdma_desc_hw *hw; if (!is_slave_direction(xt->dir)) return NULL; if (!xt->numf || !xt->sgl[0].size) return NULL; |
a5e48e243 dmaengine: xilinx... |
1539 1540 |
if (xt->frame_size != 1) return NULL; |
9cd4360de dma: Add Xilinx A... |
1541 |
/* Allocate a transaction descriptor. */ |
42c1a2ede dmaengine: vdma: ... |
1542 |
desc = xilinx_dma_alloc_tx_descriptor(chan); |
9cd4360de dma: Add Xilinx A... |
1543 1544 1545 1546 |
if (!desc) return NULL; dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); |
42c1a2ede dmaengine: vdma: ... |
1547 |
desc->async_tx.tx_submit = xilinx_dma_tx_submit; |
9cd4360de dma: Add Xilinx A... |
1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 |
async_tx_ack(&desc->async_tx); /* Allocate the link descriptor from DMA pool */ segment = xilinx_vdma_alloc_tx_segment(chan); if (!segment) goto error; /* Fill in the hardware descriptor */ hw = &segment->hw; hw->vsize = xt->numf; hw->hsize = xt->sgl[0].size; |
6d80f45f5 dmaengine: xilinx... |
1559 |
hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) << |
42c1a2ede dmaengine: vdma: ... |
1560 |
XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT; |
9cd4360de dma: Add Xilinx A... |
1561 |
hw->stride |= chan->config.frm_dly << |
42c1a2ede dmaengine: vdma: ... |
1562 |
XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT; |
9cd4360de dma: Add Xilinx A... |
1563 |
|
b72db4005 dmaengine: vdma: ... |
1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 |
if (xt->dir != DMA_MEM_TO_DEV) { if (chan->ext_addr) { hw->buf_addr = lower_32_bits(xt->dst_start); hw->buf_addr_msb = upper_32_bits(xt->dst_start); } else { hw->buf_addr = xt->dst_start; } } else { if (chan->ext_addr) { hw->buf_addr = lower_32_bits(xt->src_start); hw->buf_addr_msb = upper_32_bits(xt->src_start); } else { hw->buf_addr = xt->src_start; } } |
9cd4360de dma: Add Xilinx A... |
1579 |
|
9cd4360de dma: Add Xilinx A... |
1580 1581 1582 1583 1584 1585 1586 1587 |
/* Insert the segment into the descriptor segments list. */ list_add_tail(&segment->node, &desc->segments); prev = segment; /* Link the last hardware descriptor with the first. */ segment = list_first_entry(&desc->segments, struct xilinx_vdma_tx_segment, node); |
7096f36e5 dmaengine: xilinx... |
1588 |
desc->async_tx.phys = segment->phys; |
9cd4360de dma: Add Xilinx A... |
1589 1590 1591 1592 |
return &desc->async_tx; error: |
42c1a2ede dmaengine: vdma: ... |
1593 |
xilinx_dma_free_tx_descriptor(chan, desc); |
9cd4360de dma: Add Xilinx A... |
1594 1595 1596 1597 |
return NULL; } /** |
07b0e7d49 dmaengine: vdma: ... |
1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 |
* xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction * @dchan: DMA channel * @dma_dst: destination address * @dma_src: source address * @len: transfer length * @flags: transfer ack flags * * Return: Async transaction descriptor on success and NULL on failure */ static struct dma_async_tx_descriptor * xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst, dma_addr_t dma_src, size_t len, unsigned long flags) { struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); struct xilinx_dma_tx_descriptor *desc; struct xilinx_cdma_tx_segment *segment, *prev; struct xilinx_cdma_desc_hw *hw; if (!len || len > XILINX_DMA_MAX_TRANS_LEN) return NULL; desc = xilinx_dma_alloc_tx_descriptor(chan); if (!desc) return NULL; dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); desc->async_tx.tx_submit = xilinx_dma_tx_submit; /* Allocate the link descriptor from DMA pool */ segment = xilinx_cdma_alloc_tx_segment(chan); if (!segment) goto error; hw = &segment->hw; hw->control = len; hw->src_addr = dma_src; hw->dest_addr = dma_dst; |
9791e71a0 dmaengine: vdma: ... |
1635 1636 1637 1638 |
if (chan->ext_addr) { hw->src_addr_msb = upper_32_bits(dma_src); hw->dest_addr_msb = upper_32_bits(dma_dst); } |
07b0e7d49 dmaengine: vdma: ... |
1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 |
/* Fill the previous next descriptor with current */ prev = list_last_entry(&desc->segments, struct xilinx_cdma_tx_segment, node); prev->hw.next_desc = segment->phys; /* Insert the segment into the descriptor segments list. */ list_add_tail(&segment->node, &desc->segments); prev = segment; /* Link the last hardware descriptor with the first. */ segment = list_first_entry(&desc->segments, struct xilinx_cdma_tx_segment, node); desc->async_tx.phys = segment->phys; prev->hw.next_desc = segment->phys; return &desc->async_tx; error: xilinx_dma_free_tx_descriptor(chan, desc); return NULL; } /** |
c0bba3a99 dmaengine: vdma: ... |
1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 |
* xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction * @dchan: DMA channel * @sgl: scatterlist to transfer to/from * @sg_len: number of entries in @scatterlist * @direction: DMA direction * @flags: transfer ack flags * @context: APP words of the descriptor * * Return: Async transaction descriptor on success and NULL on failure */ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg( struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction direction, unsigned long flags, void *context) { struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); struct xilinx_dma_tx_descriptor *desc; struct xilinx_axidma_tx_segment *segment = NULL, *prev = NULL; u32 *app_w = (u32 *)context; struct scatterlist *sg; size_t copy; size_t sg_used; unsigned int i; if (!is_slave_direction(direction)) return NULL; /* Allocate a transaction descriptor. */ desc = xilinx_dma_alloc_tx_descriptor(chan); if (!desc) return NULL; dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); desc->async_tx.tx_submit = xilinx_dma_tx_submit; /* Build transactions using information in the scatter gather list */ for_each_sg(sgl, sg, sg_len, i) { sg_used = 0; /* Loop until the entire scatterlist entry is used */ while (sg_used < sg_dma_len(sg)) { struct xilinx_axidma_desc_hw *hw; /* Get a free segment */ segment = xilinx_axidma_alloc_tx_segment(chan); if (!segment) goto error; /* * Calculate the maximum number of bytes to transfer, * making sure it is less than the hw limit */ copy = min_t(size_t, sg_dma_len(sg) - sg_used, XILINX_DMA_MAX_TRANS_LEN); hw = &segment->hw; /* Fill in the descriptor */ |
f0cba685c dmaengine: vdma: ... |
1721 1722 |
xilinx_axidma_buf(chan, hw, sg_dma_address(sg), sg_used, 0); |
c0bba3a99 dmaengine: vdma: ... |
1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 |
hw->control = copy; if (chan->direction == DMA_MEM_TO_DEV) { if (app_w) memcpy(hw->app, app_w, sizeof(u32) * XILINX_DMA_NUM_APP_WORDS); } if (prev) prev->hw.next_desc = segment->phys; prev = segment; sg_used += copy; /* * Insert the segment into the descriptor segments * list. */ list_add_tail(&segment->node, &desc->segments); } } segment = list_first_entry(&desc->segments, struct xilinx_axidma_tx_segment, node); desc->async_tx.phys = segment->phys; prev->hw.next_desc = segment->phys; /* For the last DMA_MEM_TO_DEV transfer, set EOP */ if (chan->direction == DMA_MEM_TO_DEV) { segment->hw.control |= XILINX_DMA_BD_SOP; segment = list_last_entry(&desc->segments, struct xilinx_axidma_tx_segment, node); segment->hw.control |= XILINX_DMA_BD_EOP; } return &desc->async_tx; error: xilinx_dma_free_tx_descriptor(chan, desc); |
9cd4360de dma: Add Xilinx A... |
1764 1765 1766 1767 |
return NULL; } /** |
92d794dfb dmaengine: vdma: ... |
1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 |
* xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction * @chan: DMA channel * @sgl: scatterlist to transfer to/from * @sg_len: number of entries in @scatterlist * @direction: DMA direction * @flags: transfer ack flags */ static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic( struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction direction, unsigned long flags) { struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); struct xilinx_dma_tx_descriptor *desc; struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL; size_t copy, sg_used; unsigned int num_periods; int i; u32 reg; |
f67c3bdab dmaengine: xilinx... |
1787 1788 |
if (!period_len) return NULL; |
92d794dfb dmaengine: vdma: ... |
1789 |
num_periods = buf_len / period_len; |
f67c3bdab dmaengine: xilinx... |
1790 1791 |
if (!num_periods) return NULL; |
92d794dfb dmaengine: vdma: ... |
1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 |
if (!is_slave_direction(direction)) return NULL; /* Allocate a transaction descriptor. */ desc = xilinx_dma_alloc_tx_descriptor(chan); if (!desc) return NULL; chan->direction = direction; dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); desc->async_tx.tx_submit = xilinx_dma_tx_submit; for (i = 0; i < num_periods; ++i) { sg_used = 0; while (sg_used < period_len) { struct xilinx_axidma_desc_hw *hw; /* Get a free segment */ segment = xilinx_axidma_alloc_tx_segment(chan); if (!segment) goto error; /* * Calculate the maximum number of bytes to transfer, * making sure it is less than the hw limit */ copy = min_t(size_t, period_len - sg_used, XILINX_DMA_MAX_TRANS_LEN); hw = &segment->hw; |
f0cba685c dmaengine: vdma: ... |
1822 1823 |
xilinx_axidma_buf(chan, hw, buf_addr, sg_used, period_len * i); |
92d794dfb dmaengine: vdma: ... |
1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 |
hw->control = copy; if (prev) prev->hw.next_desc = segment->phys; prev = segment; sg_used += copy; /* * Insert the segment into the descriptor segments * list. */ list_add_tail(&segment->node, &desc->segments); } } head_segment = list_first_entry(&desc->segments, struct xilinx_axidma_tx_segment, node); desc->async_tx.phys = head_segment->phys; desc->cyclic = true; reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK; dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); |
e598e6eb4 dmaengine: xilinx... |
1848 1849 1850 1851 |
segment = list_last_entry(&desc->segments, struct xilinx_axidma_tx_segment, node); segment->hw.next_desc = (u32) head_segment->phys; |
92d794dfb dmaengine: vdma: ... |
1852 1853 |
/* For the last DMA_MEM_TO_DEV transfer, set EOP */ if (direction == DMA_MEM_TO_DEV) { |
e167a0b6d dmaengine: vdma: ... |
1854 |
head_segment->hw.control |= XILINX_DMA_BD_SOP; |
92d794dfb dmaengine: vdma: ... |
1855 |
segment->hw.control |= XILINX_DMA_BD_EOP; |
92d794dfb dmaengine: vdma: ... |
1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 |
} return &desc->async_tx; error: xilinx_dma_free_tx_descriptor(chan, desc); return NULL; } /** |
1a9e7a03c dmaengine: vdma: ... |
1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 |
* xilinx_dma_prep_interleaved - prepare a descriptor for a * DMA_SLAVE transaction * @dchan: DMA channel * @xt: Interleaved template pointer * @flags: transfer ack flags * * Return: Async transaction descriptor on success and NULL on failure */ static struct dma_async_tx_descriptor * xilinx_dma_prep_interleaved(struct dma_chan *dchan, struct dma_interleaved_template *xt, unsigned long flags) { struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); struct xilinx_dma_tx_descriptor *desc; struct xilinx_axidma_tx_segment *segment; struct xilinx_axidma_desc_hw *hw; if (!is_slave_direction(xt->dir)) return NULL; if (!xt->numf || !xt->sgl[0].size) return NULL; if (xt->frame_size != 1) return NULL; /* Allocate a transaction descriptor. */ desc = xilinx_dma_alloc_tx_descriptor(chan); if (!desc) return NULL; chan->direction = xt->dir; dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); desc->async_tx.tx_submit = xilinx_dma_tx_submit; /* Get a free segment */ segment = xilinx_axidma_alloc_tx_segment(chan); if (!segment) goto error; hw = &segment->hw; /* Fill in the descriptor */ if (xt->dir != DMA_MEM_TO_DEV) hw->buf_addr = xt->dst_start; else hw->buf_addr = xt->src_start; hw->mcdma_control = chan->tdest & XILINX_DMA_BD_TDEST_MASK; hw->vsize_stride = (xt->numf << XILINX_DMA_BD_VSIZE_SHIFT) & XILINX_DMA_BD_VSIZE_MASK; hw->vsize_stride |= (xt->sgl[0].icg + xt->sgl[0].size) & XILINX_DMA_BD_STRIDE_MASK; hw->control = xt->sgl[0].size & XILINX_DMA_BD_HSIZE_MASK; /* * Insert the segment into the descriptor segments * list. */ list_add_tail(&segment->node, &desc->segments); segment = list_first_entry(&desc->segments, struct xilinx_axidma_tx_segment, node); desc->async_tx.phys = segment->phys; /* For the last DMA_MEM_TO_DEV transfer, set EOP */ if (xt->dir == DMA_MEM_TO_DEV) { segment->hw.control |= XILINX_DMA_BD_SOP; segment = list_last_entry(&desc->segments, struct xilinx_axidma_tx_segment, node); segment->hw.control |= XILINX_DMA_BD_EOP; } return &desc->async_tx; error: xilinx_dma_free_tx_descriptor(chan, desc); return NULL; } /** |
42c1a2ede dmaengine: vdma: ... |
1950 1951 |
* xilinx_dma_terminate_all - Halt the channel and free descriptors * @chan: Driver specific DMA Channel pointer |
9cd4360de dma: Add Xilinx A... |
1952 |
*/ |
42c1a2ede dmaengine: vdma: ... |
1953 |
static int xilinx_dma_terminate_all(struct dma_chan *dchan) |
9cd4360de dma: Add Xilinx A... |
1954 |
{ |
42c1a2ede dmaengine: vdma: ... |
1955 |
struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); |
92d794dfb dmaengine: vdma: ... |
1956 1957 1958 1959 |
u32 reg; if (chan->cyclic) xilinx_dma_chan_reset(chan); |
ba7140462 dmaengine: xilinx... |
1960 |
|
9cd4360de dma: Add Xilinx A... |
1961 |
/* Halt the DMA engine */ |
42c1a2ede dmaengine: vdma: ... |
1962 |
xilinx_dma_halt(chan); |
9cd4360de dma: Add Xilinx A... |
1963 1964 |
/* Remove and free all of the descriptors in the lists */ |
42c1a2ede dmaengine: vdma: ... |
1965 |
xilinx_dma_free_descriptors(chan); |
ba7140462 dmaengine: xilinx... |
1966 |
|
92d794dfb dmaengine: vdma: ... |
1967 1968 1969 1970 1971 1972 |
if (chan->cyclic) { reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK; dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); chan->cyclic = false; } |
ba7140462 dmaengine: xilinx... |
1973 |
return 0; |
9cd4360de dma: Add Xilinx A... |
1974 1975 1976 |
} /** |
42c1a2ede dmaengine: vdma: ... |
1977 |
* xilinx_dma_channel_set_config - Configure VDMA channel |
9cd4360de dma: Add Xilinx A... |
1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 |
* Run-time configuration for Axi VDMA, supports: * . halt the channel * . configure interrupt coalescing and inter-packet delay threshold * . start/stop parking * . enable genlock * * @dchan: DMA channel * @cfg: VDMA device configuration pointer * * Return: '0' on success and failure value on error */ int xilinx_vdma_channel_set_config(struct dma_chan *dchan, struct xilinx_vdma_config *cfg) { |
42c1a2ede dmaengine: vdma: ... |
1992 |
struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); |
9cd4360de dma: Add Xilinx A... |
1993 1994 1995 |
u32 dmacr; if (cfg->reset) |
42c1a2ede dmaengine: vdma: ... |
1996 |
return xilinx_dma_chan_reset(chan); |
9cd4360de dma: Add Xilinx A... |
1997 |
|
42c1a2ede dmaengine: vdma: ... |
1998 |
dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); |
9cd4360de dma: Add Xilinx A... |
1999 2000 2001 2002 2003 2004 2005 2006 2007 |
chan->config.frm_dly = cfg->frm_dly; chan->config.park = cfg->park; /* genlock settings */ chan->config.gen_lock = cfg->gen_lock; chan->config.master = cfg->master; if (cfg->gen_lock && chan->genlock) { |
42c1a2ede dmaengine: vdma: ... |
2008 2009 |
dmacr |= XILINX_DMA_DMACR_GENLOCK_EN; dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT; |
9cd4360de dma: Add Xilinx A... |
2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 |
} chan->config.frm_cnt_en = cfg->frm_cnt_en; if (cfg->park) chan->config.park_frm = cfg->park_frm; else chan->config.park_frm = -1; chan->config.coalesc = cfg->coalesc; chan->config.delay = cfg->delay; |
42c1a2ede dmaengine: vdma: ... |
2020 2021 |
if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) { dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT; |
9cd4360de dma: Add Xilinx A... |
2022 2023 |
chan->config.coalesc = cfg->coalesc; } |
42c1a2ede dmaengine: vdma: ... |
2024 2025 |
if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) { dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT; |
9cd4360de dma: Add Xilinx A... |
2026 2027 2028 2029 |
chan->config.delay = cfg->delay; } /* FSync Source selection */ |
42c1a2ede dmaengine: vdma: ... |
2030 2031 |
dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK; dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT; |
9cd4360de dma: Add Xilinx A... |
2032 |
|
42c1a2ede dmaengine: vdma: ... |
2033 |
dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr); |
9cd4360de dma: Add Xilinx A... |
2034 2035 2036 2037 |
return 0; } EXPORT_SYMBOL(xilinx_vdma_channel_set_config); |
9cd4360de dma: Add Xilinx A... |
2038 2039 2040 2041 2042 |
/* ----------------------------------------------------------------------------- * Probe and remove */ /** |
42c1a2ede dmaengine: vdma: ... |
2043 2044 |
* xilinx_dma_chan_remove - Per Channel remove function * @chan: Driver specific DMA channel |
9cd4360de dma: Add Xilinx A... |
2045 |
*/ |
42c1a2ede dmaengine: vdma: ... |
2046 |
static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan) |
9cd4360de dma: Add Xilinx A... |
2047 2048 |
{ /* Disable all interrupts */ |
42c1a2ede dmaengine: vdma: ... |
2049 2050 |
dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMAXR_ALL_IRQ_MASK); |
9cd4360de dma: Add Xilinx A... |
2051 2052 2053 2054 2055 2056 2057 2058 |
if (chan->irq > 0) free_irq(chan->irq, chan); tasklet_kill(&chan->tasklet); list_del(&chan->common.device_node); } |
ba16db36b dmaengine: vdma: ... |
2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 |
static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk, struct clk **tx_clk, struct clk **rx_clk, struct clk **sg_clk, struct clk **tmp_clk) { int err; *tmp_clk = NULL; *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); if (IS_ERR(*axi_clk)) { err = PTR_ERR(*axi_clk); dev_err(&pdev->dev, "failed to get axi_aclk (%u) ", err); return err; } *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk"); if (IS_ERR(*tx_clk)) *tx_clk = NULL; *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk"); if (IS_ERR(*rx_clk)) *rx_clk = NULL; *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk"); if (IS_ERR(*sg_clk)) *sg_clk = NULL; err = clk_prepare_enable(*axi_clk); if (err) { dev_err(&pdev->dev, "failed to enable axi_clk (%u) ", err); return err; } err = clk_prepare_enable(*tx_clk); if (err) { dev_err(&pdev->dev, "failed to enable tx_clk (%u) ", err); goto err_disable_axiclk; } err = clk_prepare_enable(*rx_clk); if (err) { dev_err(&pdev->dev, "failed to enable rx_clk (%u) ", err); goto err_disable_txclk; } err = clk_prepare_enable(*sg_clk); if (err) { dev_err(&pdev->dev, "failed to enable sg_clk (%u) ", err); goto err_disable_rxclk; } return 0; err_disable_rxclk: clk_disable_unprepare(*rx_clk); err_disable_txclk: clk_disable_unprepare(*tx_clk); err_disable_axiclk: clk_disable_unprepare(*axi_clk); return err; } static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk, struct clk **dev_clk, struct clk **tmp_clk, struct clk **tmp1_clk, struct clk **tmp2_clk) { int err; *tmp_clk = NULL; *tmp1_clk = NULL; *tmp2_clk = NULL; *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); if (IS_ERR(*axi_clk)) { err = PTR_ERR(*axi_clk); dev_err(&pdev->dev, "failed to get axi_clk (%u) ", err); return err; } *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk"); if (IS_ERR(*dev_clk)) { err = PTR_ERR(*dev_clk); dev_err(&pdev->dev, "failed to get dev_clk (%u) ", err); return err; } err = clk_prepare_enable(*axi_clk); if (err) { dev_err(&pdev->dev, "failed to enable axi_clk (%u) ", err); return err; } err = clk_prepare_enable(*dev_clk); if (err) { dev_err(&pdev->dev, "failed to enable dev_clk (%u) ", err); goto err_disable_axiclk; } return 0; err_disable_axiclk: clk_disable_unprepare(*axi_clk); return err; } static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk, struct clk **tx_clk, struct clk **txs_clk, struct clk **rx_clk, struct clk **rxs_clk) { int err; *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk"); if (IS_ERR(*axi_clk)) { err = PTR_ERR(*axi_clk); dev_err(&pdev->dev, "failed to get axi_aclk (%u) ", err); return err; } *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk"); if (IS_ERR(*tx_clk)) *tx_clk = NULL; *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk"); if (IS_ERR(*txs_clk)) *txs_clk = NULL; *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk"); if (IS_ERR(*rx_clk)) *rx_clk = NULL; *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk"); if (IS_ERR(*rxs_clk)) *rxs_clk = NULL; err = clk_prepare_enable(*axi_clk); if (err) { dev_err(&pdev->dev, "failed to enable axi_clk (%u) ", err); return err; } err = clk_prepare_enable(*tx_clk); if (err) { dev_err(&pdev->dev, "failed to enable tx_clk (%u) ", err); goto err_disable_axiclk; } err = clk_prepare_enable(*txs_clk); if (err) { dev_err(&pdev->dev, "failed to enable txs_clk (%u) ", err); goto err_disable_txclk; } err = clk_prepare_enable(*rx_clk); if (err) { dev_err(&pdev->dev, "failed to enable rx_clk (%u) ", err); goto err_disable_txsclk; } err = clk_prepare_enable(*rxs_clk); if (err) { dev_err(&pdev->dev, "failed to enable rxs_clk (%u) ", err); goto err_disable_rxclk; } return 0; err_disable_rxclk: clk_disable_unprepare(*rx_clk); err_disable_txsclk: clk_disable_unprepare(*txs_clk); err_disable_txclk: clk_disable_unprepare(*tx_clk); err_disable_axiclk: clk_disable_unprepare(*axi_clk); return err; } static void xdma_disable_allclks(struct xilinx_dma_device *xdev) { clk_disable_unprepare(xdev->rxs_clk); clk_disable_unprepare(xdev->rx_clk); clk_disable_unprepare(xdev->txs_clk); clk_disable_unprepare(xdev->tx_clk); clk_disable_unprepare(xdev->axi_clk); } |
9cd4360de dma: Add Xilinx A... |
2262 |
/** |
42c1a2ede dmaengine: vdma: ... |
2263 |
* xilinx_dma_chan_probe - Per Channel Probing |
9cd4360de dma: Add Xilinx A... |
2264 2265 2266 2267 2268 2269 2270 2271 |
* It get channel features from the device tree entry and * initialize special channel handling routines * * @xdev: Driver specific device structure * @node: Device node * * Return: '0' on success and failure value on error */ |
42c1a2ede dmaengine: vdma: ... |
2272 |
static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, |
1a9e7a03c dmaengine: vdma: ... |
2273 |
struct device_node *node, int chan_id) |
9cd4360de dma: Add Xilinx A... |
2274 |
{ |
42c1a2ede dmaengine: vdma: ... |
2275 |
struct xilinx_dma_chan *chan; |
9cd4360de dma: Add Xilinx A... |
2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 |
bool has_dre = false; u32 value, width; int err; /* Allocate and initialize the channel structure */ chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL); if (!chan) return -ENOMEM; chan->dev = xdev->dev; chan->xdev = xdev; chan->has_sg = xdev->has_sg; |
7096f36e5 dmaengine: xilinx... |
2288 |
chan->desc_pendingcount = 0x0; |
b72db4005 dmaengine: vdma: ... |
2289 |
chan->ext_addr = xdev->ext_addr; |
9cd4360de dma: Add Xilinx A... |
2290 2291 2292 2293 |
spin_lock_init(&chan->lock); INIT_LIST_HEAD(&chan->pending_list); INIT_LIST_HEAD(&chan->done_list); |
7096f36e5 dmaengine: xilinx... |
2294 |
INIT_LIST_HEAD(&chan->active_list); |
9cd4360de dma: Add Xilinx A... |
2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 |
/* Retrieve the channel properties from the device tree */ has_dre = of_property_read_bool(node, "xlnx,include-dre"); chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode"); err = of_property_read_u32(node, "xlnx,datawidth", &value); if (err) { dev_err(xdev->dev, "missing xlnx,datawidth property "); return err; } width = value >> 3; /* Convert bits to bytes */ /* If data width is greater than 8 bytes, DRE is not in hw */ if (width > 8) has_dre = false; if (!has_dre) xdev->common.copy_align = fls(width - 1); |
e131f1ba6 dmaengine: xilinx... |
2315 2316 2317 |
if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") || of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") || of_device_is_compatible(node, "xlnx,axi-cdma-channel")) { |
9cd4360de dma: Add Xilinx A... |
2318 |
chan->direction = DMA_MEM_TO_DEV; |
1a9e7a03c dmaengine: vdma: ... |
2319 2320 |
chan->id = chan_id; chan->tdest = chan_id; |
9cd4360de dma: Add Xilinx A... |
2321 |
|
42c1a2ede dmaengine: vdma: ... |
2322 |
chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET; |
fb2366675 dmaengine: vdma: ... |
2323 |
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { |
c0bba3a99 dmaengine: vdma: ... |
2324 |
chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET; |
9cd4360de dma: Add Xilinx A... |
2325 |
|
c0bba3a99 dmaengine: vdma: ... |
2326 2327 2328 2329 |
if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH || xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S) chan->flush_on_fsync = true; } |
9cd4360de dma: Add Xilinx A... |
2330 |
} else if (of_device_is_compatible(node, |
e131f1ba6 dmaengine: xilinx... |
2331 2332 2333 |
"xlnx,axi-vdma-s2mm-channel") || of_device_is_compatible(node, "xlnx,axi-dma-s2mm-channel")) { |
9cd4360de dma: Add Xilinx A... |
2334 |
chan->direction = DMA_DEV_TO_MEM; |
1a9e7a03c dmaengine: vdma: ... |
2335 2336 |
chan->id = chan_id; chan->tdest = chan_id - xdev->nr_channels; |
9cd4360de dma: Add Xilinx A... |
2337 |
|
42c1a2ede dmaengine: vdma: ... |
2338 |
chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET; |
fb2366675 dmaengine: vdma: ... |
2339 |
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { |
c0bba3a99 dmaengine: vdma: ... |
2340 |
chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET; |
9cd4360de dma: Add Xilinx A... |
2341 |
|
c0bba3a99 dmaengine: vdma: ... |
2342 2343 2344 2345 |
if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH || xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM) chan->flush_on_fsync = true; } |
9cd4360de dma: Add Xilinx A... |
2346 2347 2348 2349 2350 2351 2352 2353 |
} else { dev_err(xdev->dev, "Invalid channel compatible node "); return -EINVAL; } /* Request the interrupt */ chan->irq = irq_of_parse_and_map(node, 0); |
42c1a2ede dmaengine: vdma: ... |
2354 2355 |
err = request_irq(chan->irq, xilinx_dma_irq_handler, IRQF_SHARED, "xilinx-dma-controller", chan); |
9cd4360de dma: Add Xilinx A... |
2356 2357 2358 2359 2360 |
if (err) { dev_err(xdev->dev, "unable to request IRQ %d ", chan->irq); return err; } |
fb2366675 dmaengine: vdma: ... |
2361 |
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) |
c0bba3a99 dmaengine: vdma: ... |
2362 |
chan->start_transfer = xilinx_dma_start_transfer; |
fb2366675 dmaengine: vdma: ... |
2363 |
else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) |
07b0e7d49 dmaengine: vdma: ... |
2364 |
chan->start_transfer = xilinx_cdma_start_transfer; |
c0bba3a99 dmaengine: vdma: ... |
2365 2366 |
else chan->start_transfer = xilinx_vdma_start_transfer; |
9cd4360de dma: Add Xilinx A... |
2367 |
/* Initialize the tasklet */ |
42c1a2ede dmaengine: vdma: ... |
2368 |
tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet, |
9cd4360de dma: Add Xilinx A... |
2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 |
(unsigned long)chan); /* * Initialize the DMA channel and add it to the DMA engine channels * list. */ chan->common.device = &xdev->common; list_add_tail(&chan->common.device_node, &xdev->common.channels); xdev->chan[chan->id] = chan; /* Reset the channel */ |
42c1a2ede dmaengine: vdma: ... |
2381 |
err = xilinx_dma_chan_reset(chan); |
9cd4360de dma: Add Xilinx A... |
2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 |
if (err < 0) { dev_err(xdev->dev, "Reset channel failed "); return err; } return 0; } /** |
1a9e7a03c dmaengine: vdma: ... |
2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 |
* xilinx_dma_child_probe - Per child node probe * It get number of dma-channels per child node from * device-tree and initializes all the channels. * * @xdev: Driver specific device structure * @node: Device node * * Return: 0 always. */ static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev, struct device_node *node) { int ret, i, nr_channels = 1; ret = of_property_read_u32(node, "dma-channels", &nr_channels); if ((ret < 0) && xdev->mcdma) dev_warn(xdev->dev, "missing dma-channels property "); for (i = 0; i < nr_channels; i++) xilinx_dma_chan_probe(xdev, node, xdev->chan_id++); xdev->nr_channels += nr_channels; return 0; } /** |
9cd4360de dma: Add Xilinx A... |
2419 2420 2421 2422 2423 2424 2425 2426 2427 |
* of_dma_xilinx_xlate - Translation function * @dma_spec: Pointer to DMA specifier as found in the device tree * @ofdma: Pointer to DMA controller data * * Return: DMA channel pointer on success and NULL on error */ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec, struct of_dma *ofdma) { |
42c1a2ede dmaengine: vdma: ... |
2428 |
struct xilinx_dma_device *xdev = ofdma->of_dma_data; |
9cd4360de dma: Add Xilinx A... |
2429 |
int chan_id = dma_spec->args[0]; |
1a9e7a03c dmaengine: vdma: ... |
2430 |
if (chan_id >= xdev->nr_channels || !xdev->chan[chan_id]) |
9cd4360de dma: Add Xilinx A... |
2431 2432 2433 2434 |
return NULL; return dma_get_slave_channel(&xdev->chan[chan_id]->common); } |
fb2366675 dmaengine: vdma: ... |
2435 2436 |
static const struct xilinx_dma_config axidma_config = { .dmatype = XDMA_TYPE_AXIDMA, |
ba16db36b dmaengine: vdma: ... |
2437 |
.clk_init = axidma_clk_init, |
fb2366675 dmaengine: vdma: ... |
2438 2439 2440 2441 |
}; static const struct xilinx_dma_config axicdma_config = { .dmatype = XDMA_TYPE_CDMA, |
ba16db36b dmaengine: vdma: ... |
2442 |
.clk_init = axicdma_clk_init, |
fb2366675 dmaengine: vdma: ... |
2443 2444 2445 2446 |
}; static const struct xilinx_dma_config axivdma_config = { .dmatype = XDMA_TYPE_VDMA, |
ba16db36b dmaengine: vdma: ... |
2447 |
.clk_init = axivdma_clk_init, |
fb2366675 dmaengine: vdma: ... |
2448 |
}; |
c0bba3a99 dmaengine: vdma: ... |
2449 |
static const struct of_device_id xilinx_dma_of_ids[] = { |
fb2366675 dmaengine: vdma: ... |
2450 2451 2452 |
{ .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config }, { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config }, { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config }, |
c0bba3a99 dmaengine: vdma: ... |
2453 2454 2455 |
{} }; MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids); |
9cd4360de dma: Add Xilinx A... |
2456 |
/** |
42c1a2ede dmaengine: vdma: ... |
2457 |
* xilinx_dma_probe - Driver probe function |
9cd4360de dma: Add Xilinx A... |
2458 2459 2460 2461 |
* @pdev: Pointer to the platform_device structure * * Return: '0' on success and failure value on error */ |
42c1a2ede dmaengine: vdma: ... |
2462 |
static int xilinx_dma_probe(struct platform_device *pdev) |
9cd4360de dma: Add Xilinx A... |
2463 |
{ |
ba16db36b dmaengine: vdma: ... |
2464 2465 2466 |
int (*clk_init)(struct platform_device *, struct clk **, struct clk **, struct clk **, struct clk **, struct clk **) = axivdma_clk_init; |
9cd4360de dma: Add Xilinx A... |
2467 |
struct device_node *node = pdev->dev.of_node; |
42c1a2ede dmaengine: vdma: ... |
2468 |
struct xilinx_dma_device *xdev; |
fb2366675 dmaengine: vdma: ... |
2469 |
struct device_node *child, *np = pdev->dev.of_node; |
9cd4360de dma: Add Xilinx A... |
2470 |
struct resource *io; |
b72db4005 dmaengine: vdma: ... |
2471 |
u32 num_frames, addr_width; |
9cd4360de dma: Add Xilinx A... |
2472 2473 2474 2475 2476 2477 2478 2479 |
int i, err; /* Allocate and initialize the DMA engine structure */ xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL); if (!xdev) return -ENOMEM; xdev->dev = &pdev->dev; |
fb2366675 dmaengine: vdma: ... |
2480 2481 2482 2483 |
if (np) { const struct of_device_id *match; match = of_match_node(xilinx_dma_of_ids, np); |
ba16db36b dmaengine: vdma: ... |
2484 |
if (match && match->data) { |
fb2366675 dmaengine: vdma: ... |
2485 |
xdev->dma_config = match->data; |
ba16db36b dmaengine: vdma: ... |
2486 2487 |
clk_init = xdev->dma_config->clk_init; } |
fb2366675 dmaengine: vdma: ... |
2488 |
} |
9cd4360de dma: Add Xilinx A... |
2489 |
|
ba16db36b dmaengine: vdma: ... |
2490 2491 2492 2493 |
err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk, &xdev->rx_clk, &xdev->rxs_clk); if (err) return err; |
9cd4360de dma: Add Xilinx A... |
2494 2495 2496 2497 2498 2499 2500 2501 2502 |
/* Request and map I/O memory */ io = platform_get_resource(pdev, IORESOURCE_MEM, 0); xdev->regs = devm_ioremap_resource(&pdev->dev, io); if (IS_ERR(xdev->regs)) return PTR_ERR(xdev->regs); /* Retrieve the DMA engine properties from the device tree */ xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg"); |
1a9e7a03c dmaengine: vdma: ... |
2503 2504 |
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma"); |
9cd4360de dma: Add Xilinx A... |
2505 |
|
fb2366675 dmaengine: vdma: ... |
2506 |
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { |
c0bba3a99 dmaengine: vdma: ... |
2507 2508 2509 2510 2511 2512 2513 2514 |
err = of_property_read_u32(node, "xlnx,num-fstores", &num_frames); if (err < 0) { dev_err(xdev->dev, "missing xlnx,num-fstores property "); return err; } |
9cd4360de dma: Add Xilinx A... |
2515 |
|
c0bba3a99 dmaengine: vdma: ... |
2516 2517 2518 2519 2520 2521 |
err = of_property_read_u32(node, "xlnx,flush-fsync", &xdev->flush_on_fsync); if (err < 0) dev_warn(xdev->dev, "missing xlnx,flush-fsync property "); |
9cd4360de dma: Add Xilinx A... |
2522 |
} |
b72db4005 dmaengine: vdma: ... |
2523 |
err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width); |
9cd4360de dma: Add Xilinx A... |
2524 |
if (err < 0) |
b72db4005 dmaengine: vdma: ... |
2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 |
dev_warn(xdev->dev, "missing xlnx,addrwidth property "); if (addr_width > 32) xdev->ext_addr = true; else xdev->ext_addr = false; /* Set the dma mask bits */ dma_set_mask(xdev->dev, DMA_BIT_MASK(addr_width)); |
9cd4360de dma: Add Xilinx A... |
2535 2536 2537 2538 2539 |
/* Initialize the DMA engine */ xdev->common.dev = &pdev->dev; INIT_LIST_HEAD(&xdev->common.channels); |
fb2366675 dmaengine: vdma: ... |
2540 |
if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) { |
07b0e7d49 dmaengine: vdma: ... |
2541 2542 2543 |
dma_cap_set(DMA_SLAVE, xdev->common.cap_mask); dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask); } |
9cd4360de dma: Add Xilinx A... |
2544 2545 |
xdev->common.device_alloc_chan_resources = |
42c1a2ede dmaengine: vdma: ... |
2546 |
xilinx_dma_alloc_chan_resources; |
9cd4360de dma: Add Xilinx A... |
2547 |
xdev->common.device_free_chan_resources = |
42c1a2ede dmaengine: vdma: ... |
2548 |
xilinx_dma_free_chan_resources; |
42c1a2ede dmaengine: vdma: ... |
2549 2550 2551 |
xdev->common.device_terminate_all = xilinx_dma_terminate_all; xdev->common.device_tx_status = xilinx_dma_tx_status; xdev->common.device_issue_pending = xilinx_dma_issue_pending; |
fb2366675 dmaengine: vdma: ... |
2552 |
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { |
92d794dfb dmaengine: vdma: ... |
2553 |
dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask); |
c0bba3a99 dmaengine: vdma: ... |
2554 |
xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg; |
92d794dfb dmaengine: vdma: ... |
2555 2556 |
xdev->common.device_prep_dma_cyclic = xilinx_dma_prep_dma_cyclic; |
1a9e7a03c dmaengine: vdma: ... |
2557 2558 |
xdev->common.device_prep_interleaved_dma = xilinx_dma_prep_interleaved; |
c0bba3a99 dmaengine: vdma: ... |
2559 2560 2561 |
/* Residue calculation is supported by only AXI DMA */ xdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; |
fb2366675 dmaengine: vdma: ... |
2562 |
} else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) { |
07b0e7d49 dmaengine: vdma: ... |
2563 2564 |
dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask); xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy; |
c0bba3a99 dmaengine: vdma: ... |
2565 2566 |
} else { xdev->common.device_prep_interleaved_dma = |
9cd4360de dma: Add Xilinx A... |
2567 |
xilinx_vdma_dma_prep_interleaved; |
c0bba3a99 dmaengine: vdma: ... |
2568 |
} |
9cd4360de dma: Add Xilinx A... |
2569 2570 2571 2572 2573 |
platform_set_drvdata(pdev, xdev); /* Initialize the channels */ for_each_child_of_node(node, child) { |
1a9e7a03c dmaengine: vdma: ... |
2574 |
err = xilinx_dma_child_probe(xdev, child); |
9cd4360de dma: Add Xilinx A... |
2575 |
if (err < 0) |
ba16db36b dmaengine: vdma: ... |
2576 |
goto disable_clks; |
9cd4360de dma: Add Xilinx A... |
2577 |
} |
fb2366675 dmaengine: vdma: ... |
2578 |
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { |
1a9e7a03c dmaengine: vdma: ... |
2579 |
for (i = 0; i < xdev->nr_channels; i++) |
c0bba3a99 dmaengine: vdma: ... |
2580 2581 2582 |
if (xdev->chan[i]) xdev->chan[i]->num_frms = num_frames; } |
9cd4360de dma: Add Xilinx A... |
2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 |
/* Register the DMA engine with the core */ dma_async_device_register(&xdev->common); err = of_dma_controller_register(node, of_dma_xilinx_xlate, xdev); if (err < 0) { dev_err(&pdev->dev, "Unable to register DMA to DT "); dma_async_device_unregister(&xdev->common); goto error; } dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!! "); return 0; |
ba16db36b dmaengine: vdma: ... |
2600 2601 |
disable_clks: xdma_disable_allclks(xdev); |
9cd4360de dma: Add Xilinx A... |
2602 |
error: |
1a9e7a03c dmaengine: vdma: ... |
2603 |
for (i = 0; i < xdev->nr_channels; i++) |
9cd4360de dma: Add Xilinx A... |
2604 |
if (xdev->chan[i]) |
42c1a2ede dmaengine: vdma: ... |
2605 |
xilinx_dma_chan_remove(xdev->chan[i]); |
9cd4360de dma: Add Xilinx A... |
2606 2607 2608 2609 2610 |
return err; } /** |
42c1a2ede dmaengine: vdma: ... |
2611 |
* xilinx_dma_remove - Driver remove function |
9cd4360de dma: Add Xilinx A... |
2612 2613 2614 2615 |
* @pdev: Pointer to the platform_device structure * * Return: Always '0' */ |
42c1a2ede dmaengine: vdma: ... |
2616 |
static int xilinx_dma_remove(struct platform_device *pdev) |
9cd4360de dma: Add Xilinx A... |
2617 |
{ |
42c1a2ede dmaengine: vdma: ... |
2618 |
struct xilinx_dma_device *xdev = platform_get_drvdata(pdev); |
9cd4360de dma: Add Xilinx A... |
2619 2620 2621 2622 2623 |
int i; of_dma_controller_free(pdev->dev.of_node); dma_async_device_unregister(&xdev->common); |
1a9e7a03c dmaengine: vdma: ... |
2624 |
for (i = 0; i < xdev->nr_channels; i++) |
9cd4360de dma: Add Xilinx A... |
2625 |
if (xdev->chan[i]) |
42c1a2ede dmaengine: vdma: ... |
2626 |
xilinx_dma_chan_remove(xdev->chan[i]); |
9cd4360de dma: Add Xilinx A... |
2627 |
|
ba16db36b dmaengine: vdma: ... |
2628 |
xdma_disable_allclks(xdev); |
9cd4360de dma: Add Xilinx A... |
2629 2630 2631 |
return 0; } |
9cd4360de dma: Add Xilinx A... |
2632 2633 2634 |
static struct platform_driver xilinx_vdma_driver = { .driver = { .name = "xilinx-vdma", |
42c1a2ede dmaengine: vdma: ... |
2635 |
.of_match_table = xilinx_dma_of_ids, |
9cd4360de dma: Add Xilinx A... |
2636 |
}, |
42c1a2ede dmaengine: vdma: ... |
2637 2638 |
.probe = xilinx_dma_probe, .remove = xilinx_dma_remove, |
9cd4360de dma: Add Xilinx A... |
2639 2640 2641 2642 2643 2644 2645 |
}; module_platform_driver(xilinx_vdma_driver); MODULE_AUTHOR("Xilinx, Inc."); MODULE_DESCRIPTION("Xilinx VDMA driver"); MODULE_LICENSE("GPL v2"); |