Commit bbea0b6e0d214ef1511b9c6ccf3af26b38f0af7d

Authored by Ira Snyder
Committed by Dan Williams
1 parent e6c7ecb64e

fsldma: Add DMA_SLAVE support

Use the DMA_SLAVE capability of the DMAEngine API to copy/from a
scatterlist into an arbitrary list of hardware address/length pairs.

This allows a single DMA transaction to copy data from several different
devices into a scatterlist at the same time.

This also adds support to enable some controller-specific features such as
external start and external pause for a DMA transaction.

[dan.j.williams@intel.com: rebased on tx_list movement]
Signed-off-by: Ira W. Snyder <iws@ovro.caltech.edu>
Acked-by: Li Yang <leoli@freescale.com>
Acked-by: Kumar Gala <galak@kernel.crashing.org>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>

Showing 2 changed files with 363 additions and 0 deletions Inline Diff

arch/powerpc/include/asm/fsldma.h
File was created 1 /*
2 * Freescale MPC83XX / MPC85XX DMA Controller
3 *
4 * Copyright (c) 2009 Ira W. Snyder <iws@ovro.caltech.edu>
5 *
6 * This file is licensed under the terms of the GNU General Public License
7 * version 2. This program is licensed "as is" without any warranty of any
8 * kind, whether express or implied.
9 */
10
11 #ifndef __ARCH_POWERPC_ASM_FSLDMA_H__
12 #define __ARCH_POWERPC_ASM_FSLDMA_H__
13
14 #include <linux/dmaengine.h>
15
16 /*
17 * Definitions for the Freescale DMA controller's DMA_SLAVE implemention
18 *
19 * The Freescale DMA_SLAVE implementation was designed to handle many-to-many
20 * transfers. An example usage would be an accelerated copy between two
21 * scatterlists. Another example use would be an accelerated copy from
22 * multiple non-contiguous device buffers into a single scatterlist.
23 *
24 * A DMA_SLAVE transaction is defined by a struct fsl_dma_slave. This
25 * structure contains a list of hardware addresses that should be copied
26 * to/from the scatterlist passed into device_prep_slave_sg(). The structure
27 * also has some fields to enable hardware-specific features.
28 */
29
30 /**
31 * struct fsl_dma_hw_addr
32 * @entry: linked list entry
33 * @address: the hardware address
34 * @length: length to transfer
35 *
36 * Holds a single physical hardware address / length pair for use
37 * with the DMAEngine DMA_SLAVE API.
38 */
39 struct fsl_dma_hw_addr {
40 struct list_head entry;
41
42 dma_addr_t address;
43 size_t length;
44 };
45
46 /**
47 * struct fsl_dma_slave
48 * @addresses: a linked list of struct fsl_dma_hw_addr structures
49 * @request_count: value for DMA request count
50 * @src_loop_size: setup and enable constant source-address DMA transfers
51 * @dst_loop_size: setup and enable constant destination address DMA transfers
52 * @external_start: enable externally started DMA transfers
53 * @external_pause: enable externally paused DMA transfers
54 *
55 * Holds a list of address / length pairs for use with the DMAEngine
56 * DMA_SLAVE API implementation for the Freescale DMA controller.
57 */
58 struct fsl_dma_slave {
59
60 /* List of hardware address/length pairs */
61 struct list_head addresses;
62
63 /* Support for extra controller features */
64 unsigned int request_count;
65 unsigned int src_loop_size;
66 unsigned int dst_loop_size;
67 bool external_start;
68 bool external_pause;
69 };
70
71 /**
72 * fsl_dma_slave_append - add an address/length pair to a struct fsl_dma_slave
73 * @slave: the &struct fsl_dma_slave to add to
74 * @address: the hardware address to add
75 * @length: the length of bytes to transfer from @address
76 *
77 * Add a hardware address/length pair to a struct fsl_dma_slave. Returns 0 on
78 * success, -ERRNO otherwise.
79 */
80 static inline int fsl_dma_slave_append(struct fsl_dma_slave *slave,
81 dma_addr_t address, size_t length)
82 {
83 struct fsl_dma_hw_addr *addr;
84
85 addr = kzalloc(sizeof(*addr), GFP_ATOMIC);
86 if (!addr)
87 return -ENOMEM;
88
89 INIT_LIST_HEAD(&addr->entry);
90 addr->address = address;
91 addr->length = length;
92
93 list_add_tail(&addr->entry, &slave->addresses);
94 return 0;
95 }
96
97 /**
98 * fsl_dma_slave_free - free a struct fsl_dma_slave
99 * @slave: the struct fsl_dma_slave to free
100 *
101 * Free a struct fsl_dma_slave and all associated address/length pairs
102 */
103 static inline void fsl_dma_slave_free(struct fsl_dma_slave *slave)
104 {
105 struct fsl_dma_hw_addr *addr, *tmp;
106
107 if (slave) {
108 list_for_each_entry_safe(addr, tmp, &slave->addresses, entry) {
109 list_del(&addr->entry);
110 kfree(addr);
111 }
112
113 kfree(slave);
114 }
115 }
116
117 /**
118 * fsl_dma_slave_alloc - allocate a struct fsl_dma_slave
119 * @gfp: the flags to pass to kmalloc when allocating this structure
120 *
121 * Allocate a struct fsl_dma_slave for use by the DMA_SLAVE API. Returns a new
122 * struct fsl_dma_slave on success, or NULL on failure.
123 */
124 static inline struct fsl_dma_slave *fsl_dma_slave_alloc(gfp_t gfp)
125 {
126 struct fsl_dma_slave *slave;
127
128 slave = kzalloc(sizeof(*slave), gfp);
129 if (!slave)
130 return NULL;
131
132 INIT_LIST_HEAD(&slave->addresses);
133 return slave;
134 }
135
136 #endif /* __ARCH_POWERPC_ASM_FSLDMA_H__ */
137
drivers/dma/fsldma.c
1 /* 1 /*
2 * Freescale MPC85xx, MPC83xx DMA Engine support 2 * Freescale MPC85xx, MPC83xx DMA Engine support
3 * 3 *
4 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. 4 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
5 * 5 *
6 * Author: 6 * Author:
7 * Zhang Wei <wei.zhang@freescale.com>, Jul 2007 7 * Zhang Wei <wei.zhang@freescale.com>, Jul 2007
8 * Ebony Zhu <ebony.zhu@freescale.com>, May 2007 8 * Ebony Zhu <ebony.zhu@freescale.com>, May 2007
9 * 9 *
10 * Description: 10 * Description:
11 * DMA engine driver for Freescale MPC8540 DMA controller, which is 11 * DMA engine driver for Freescale MPC8540 DMA controller, which is
12 * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc. 12 * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc.
13 * The support for MPC8349 DMA contorller is also added. 13 * The support for MPC8349 DMA contorller is also added.
14 * 14 *
15 * This driver instructs the DMA controller to issue the PCI Read Multiple 15 * This driver instructs the DMA controller to issue the PCI Read Multiple
16 * command for PCI read operations, instead of using the default PCI Read Line 16 * command for PCI read operations, instead of using the default PCI Read Line
17 * command. Please be aware that this setting may result in read pre-fetching 17 * command. Please be aware that this setting may result in read pre-fetching
18 * on some platforms. 18 * on some platforms.
19 * 19 *
20 * This is free software; you can redistribute it and/or modify 20 * This is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by 21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or 22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version. 23 * (at your option) any later version.
24 * 24 *
25 */ 25 */
26 26
27 #include <linux/init.h> 27 #include <linux/init.h>
28 #include <linux/module.h> 28 #include <linux/module.h>
29 #include <linux/pci.h> 29 #include <linux/pci.h>
30 #include <linux/interrupt.h> 30 #include <linux/interrupt.h>
31 #include <linux/dmaengine.h> 31 #include <linux/dmaengine.h>
32 #include <linux/delay.h> 32 #include <linux/delay.h>
33 #include <linux/dma-mapping.h> 33 #include <linux/dma-mapping.h>
34 #include <linux/dmapool.h> 34 #include <linux/dmapool.h>
35 #include <linux/of_platform.h> 35 #include <linux/of_platform.h>
36 36
37 #include <asm/fsldma.h>
37 #include "fsldma.h" 38 #include "fsldma.h"
38 39
39 static void dma_init(struct fsl_dma_chan *fsl_chan) 40 static void dma_init(struct fsl_dma_chan *fsl_chan)
40 { 41 {
41 /* Reset the channel */ 42 /* Reset the channel */
42 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 0, 32); 43 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 0, 32);
43 44
44 switch (fsl_chan->feature & FSL_DMA_IP_MASK) { 45 switch (fsl_chan->feature & FSL_DMA_IP_MASK) {
45 case FSL_DMA_IP_85XX: 46 case FSL_DMA_IP_85XX:
46 /* Set the channel to below modes: 47 /* Set the channel to below modes:
47 * EIE - Error interrupt enable 48 * EIE - Error interrupt enable
48 * EOSIE - End of segments interrupt enable (basic mode) 49 * EOSIE - End of segments interrupt enable (basic mode)
49 * EOLNIE - End of links interrupt enable 50 * EOLNIE - End of links interrupt enable
50 */ 51 */
51 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EIE 52 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EIE
52 | FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32); 53 | FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32);
53 break; 54 break;
54 case FSL_DMA_IP_83XX: 55 case FSL_DMA_IP_83XX:
55 /* Set the channel to below modes: 56 /* Set the channel to below modes:
56 * EOTIE - End-of-transfer interrupt enable 57 * EOTIE - End-of-transfer interrupt enable
57 * PRC_RM - PCI read multiple 58 * PRC_RM - PCI read multiple
58 */ 59 */
59 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EOTIE 60 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EOTIE
60 | FSL_DMA_MR_PRC_RM, 32); 61 | FSL_DMA_MR_PRC_RM, 32);
61 break; 62 break;
62 } 63 }
63 64
64 } 65 }
65 66
66 static void set_sr(struct fsl_dma_chan *fsl_chan, u32 val) 67 static void set_sr(struct fsl_dma_chan *fsl_chan, u32 val)
67 { 68 {
68 DMA_OUT(fsl_chan, &fsl_chan->reg_base->sr, val, 32); 69 DMA_OUT(fsl_chan, &fsl_chan->reg_base->sr, val, 32);
69 } 70 }
70 71
71 static u32 get_sr(struct fsl_dma_chan *fsl_chan) 72 static u32 get_sr(struct fsl_dma_chan *fsl_chan)
72 { 73 {
73 return DMA_IN(fsl_chan, &fsl_chan->reg_base->sr, 32); 74 return DMA_IN(fsl_chan, &fsl_chan->reg_base->sr, 32);
74 } 75 }
75 76
76 static void set_desc_cnt(struct fsl_dma_chan *fsl_chan, 77 static void set_desc_cnt(struct fsl_dma_chan *fsl_chan,
77 struct fsl_dma_ld_hw *hw, u32 count) 78 struct fsl_dma_ld_hw *hw, u32 count)
78 { 79 {
79 hw->count = CPU_TO_DMA(fsl_chan, count, 32); 80 hw->count = CPU_TO_DMA(fsl_chan, count, 32);
80 } 81 }
81 82
82 static void set_desc_src(struct fsl_dma_chan *fsl_chan, 83 static void set_desc_src(struct fsl_dma_chan *fsl_chan,
83 struct fsl_dma_ld_hw *hw, dma_addr_t src) 84 struct fsl_dma_ld_hw *hw, dma_addr_t src)
84 { 85 {
85 u64 snoop_bits; 86 u64 snoop_bits;
86 87
87 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) 88 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
88 ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; 89 ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
89 hw->src_addr = CPU_TO_DMA(fsl_chan, snoop_bits | src, 64); 90 hw->src_addr = CPU_TO_DMA(fsl_chan, snoop_bits | src, 64);
90 } 91 }
91 92
92 static void set_desc_dest(struct fsl_dma_chan *fsl_chan, 93 static void set_desc_dest(struct fsl_dma_chan *fsl_chan,
93 struct fsl_dma_ld_hw *hw, dma_addr_t dest) 94 struct fsl_dma_ld_hw *hw, dma_addr_t dest)
94 { 95 {
95 u64 snoop_bits; 96 u64 snoop_bits;
96 97
97 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) 98 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
98 ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; 99 ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
99 hw->dst_addr = CPU_TO_DMA(fsl_chan, snoop_bits | dest, 64); 100 hw->dst_addr = CPU_TO_DMA(fsl_chan, snoop_bits | dest, 64);
100 } 101 }
101 102
102 static void set_desc_next(struct fsl_dma_chan *fsl_chan, 103 static void set_desc_next(struct fsl_dma_chan *fsl_chan,
103 struct fsl_dma_ld_hw *hw, dma_addr_t next) 104 struct fsl_dma_ld_hw *hw, dma_addr_t next)
104 { 105 {
105 u64 snoop_bits; 106 u64 snoop_bits;
106 107
107 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) 108 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
108 ? FSL_DMA_SNEN : 0; 109 ? FSL_DMA_SNEN : 0;
109 hw->next_ln_addr = CPU_TO_DMA(fsl_chan, snoop_bits | next, 64); 110 hw->next_ln_addr = CPU_TO_DMA(fsl_chan, snoop_bits | next, 64);
110 } 111 }
111 112
112 static void set_cdar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr) 113 static void set_cdar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr)
113 { 114 {
114 DMA_OUT(fsl_chan, &fsl_chan->reg_base->cdar, addr | FSL_DMA_SNEN, 64); 115 DMA_OUT(fsl_chan, &fsl_chan->reg_base->cdar, addr | FSL_DMA_SNEN, 64);
115 } 116 }
116 117
117 static dma_addr_t get_cdar(struct fsl_dma_chan *fsl_chan) 118 static dma_addr_t get_cdar(struct fsl_dma_chan *fsl_chan)
118 { 119 {
119 return DMA_IN(fsl_chan, &fsl_chan->reg_base->cdar, 64) & ~FSL_DMA_SNEN; 120 return DMA_IN(fsl_chan, &fsl_chan->reg_base->cdar, 64) & ~FSL_DMA_SNEN;
120 } 121 }
121 122
122 static void set_ndar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr) 123 static void set_ndar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr)
123 { 124 {
124 DMA_OUT(fsl_chan, &fsl_chan->reg_base->ndar, addr, 64); 125 DMA_OUT(fsl_chan, &fsl_chan->reg_base->ndar, addr, 64);
125 } 126 }
126 127
127 static dma_addr_t get_ndar(struct fsl_dma_chan *fsl_chan) 128 static dma_addr_t get_ndar(struct fsl_dma_chan *fsl_chan)
128 { 129 {
129 return DMA_IN(fsl_chan, &fsl_chan->reg_base->ndar, 64); 130 return DMA_IN(fsl_chan, &fsl_chan->reg_base->ndar, 64);
130 } 131 }
131 132
132 static u32 get_bcr(struct fsl_dma_chan *fsl_chan) 133 static u32 get_bcr(struct fsl_dma_chan *fsl_chan)
133 { 134 {
134 return DMA_IN(fsl_chan, &fsl_chan->reg_base->bcr, 32); 135 return DMA_IN(fsl_chan, &fsl_chan->reg_base->bcr, 32);
135 } 136 }
136 137
137 static int dma_is_idle(struct fsl_dma_chan *fsl_chan) 138 static int dma_is_idle(struct fsl_dma_chan *fsl_chan)
138 { 139 {
139 u32 sr = get_sr(fsl_chan); 140 u32 sr = get_sr(fsl_chan);
140 return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); 141 return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
141 } 142 }
142 143
143 static void dma_start(struct fsl_dma_chan *fsl_chan) 144 static void dma_start(struct fsl_dma_chan *fsl_chan)
144 { 145 {
145 u32 mr_set = 0; 146 u32 mr_set = 0;
146 147
147 if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { 148 if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
148 DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32); 149 DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32);
149 mr_set |= FSL_DMA_MR_EMP_EN; 150 mr_set |= FSL_DMA_MR_EMP_EN;
150 } else if ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { 151 } else if ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
151 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 152 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
152 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) 153 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
153 & ~FSL_DMA_MR_EMP_EN, 32); 154 & ~FSL_DMA_MR_EMP_EN, 32);
154 } 155 }
155 156
156 if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT) 157 if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT)
157 mr_set |= FSL_DMA_MR_EMS_EN; 158 mr_set |= FSL_DMA_MR_EMS_EN;
158 else 159 else
159 mr_set |= FSL_DMA_MR_CS; 160 mr_set |= FSL_DMA_MR_CS;
160 161
161 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 162 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
162 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) 163 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
163 | mr_set, 32); 164 | mr_set, 32);
164 } 165 }
165 166
166 static void dma_halt(struct fsl_dma_chan *fsl_chan) 167 static void dma_halt(struct fsl_dma_chan *fsl_chan)
167 { 168 {
168 int i; 169 int i;
169 170
170 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 171 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
171 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | FSL_DMA_MR_CA, 172 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | FSL_DMA_MR_CA,
172 32); 173 32);
173 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 174 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
174 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & ~(FSL_DMA_MR_CS 175 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & ~(FSL_DMA_MR_CS
175 | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA), 32); 176 | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA), 32);
176 177
177 for (i = 0; i < 100; i++) { 178 for (i = 0; i < 100; i++) {
178 if (dma_is_idle(fsl_chan)) 179 if (dma_is_idle(fsl_chan))
179 break; 180 break;
180 udelay(10); 181 udelay(10);
181 } 182 }
182 if (i >= 100 && !dma_is_idle(fsl_chan)) 183 if (i >= 100 && !dma_is_idle(fsl_chan))
183 dev_err(fsl_chan->dev, "DMA halt timeout!\n"); 184 dev_err(fsl_chan->dev, "DMA halt timeout!\n");
184 } 185 }
185 186
186 static void set_ld_eol(struct fsl_dma_chan *fsl_chan, 187 static void set_ld_eol(struct fsl_dma_chan *fsl_chan,
187 struct fsl_desc_sw *desc) 188 struct fsl_desc_sw *desc)
188 { 189 {
189 u64 snoop_bits; 190 u64 snoop_bits;
190 191
191 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) 192 snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
192 ? FSL_DMA_SNEN : 0; 193 ? FSL_DMA_SNEN : 0;
193 194
194 desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan, 195 desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan,
195 DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL 196 DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
196 | snoop_bits, 64); 197 | snoop_bits, 64);
197 } 198 }
198 199
199 static void append_ld_queue(struct fsl_dma_chan *fsl_chan, 200 static void append_ld_queue(struct fsl_dma_chan *fsl_chan,
200 struct fsl_desc_sw *new_desc) 201 struct fsl_desc_sw *new_desc)
201 { 202 {
202 struct fsl_desc_sw *queue_tail = to_fsl_desc(fsl_chan->ld_queue.prev); 203 struct fsl_desc_sw *queue_tail = to_fsl_desc(fsl_chan->ld_queue.prev);
203 204
204 if (list_empty(&fsl_chan->ld_queue)) 205 if (list_empty(&fsl_chan->ld_queue))
205 return; 206 return;
206 207
207 /* Link to the new descriptor physical address and 208 /* Link to the new descriptor physical address and
208 * Enable End-of-segment interrupt for 209 * Enable End-of-segment interrupt for
209 * the last link descriptor. 210 * the last link descriptor.
210 * (the previous node's next link descriptor) 211 * (the previous node's next link descriptor)
211 * 212 *
212 * For FSL_DMA_IP_83xx, the snoop enable bit need be set. 213 * For FSL_DMA_IP_83xx, the snoop enable bit need be set.
213 */ 214 */
214 queue_tail->hw.next_ln_addr = CPU_TO_DMA(fsl_chan, 215 queue_tail->hw.next_ln_addr = CPU_TO_DMA(fsl_chan,
215 new_desc->async_tx.phys | FSL_DMA_EOSIE | 216 new_desc->async_tx.phys | FSL_DMA_EOSIE |
216 (((fsl_chan->feature & FSL_DMA_IP_MASK) 217 (((fsl_chan->feature & FSL_DMA_IP_MASK)
217 == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0), 64); 218 == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0), 64);
218 } 219 }
219 220
220 /** 221 /**
221 * fsl_chan_set_src_loop_size - Set source address hold transfer size 222 * fsl_chan_set_src_loop_size - Set source address hold transfer size
222 * @fsl_chan : Freescale DMA channel 223 * @fsl_chan : Freescale DMA channel
223 * @size : Address loop size, 0 for disable loop 224 * @size : Address loop size, 0 for disable loop
224 * 225 *
225 * The set source address hold transfer size. The source 226 * The set source address hold transfer size. The source
226 * address hold or loop transfer size is when the DMA transfer 227 * address hold or loop transfer size is when the DMA transfer
227 * data from source address (SA), if the loop size is 4, the DMA will 228 * data from source address (SA), if the loop size is 4, the DMA will
228 * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA, 229 * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
229 * SA + 1 ... and so on. 230 * SA + 1 ... and so on.
230 */ 231 */
231 static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size) 232 static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size)
232 { 233 {
233 switch (size) { 234 switch (size) {
234 case 0: 235 case 0:
235 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 236 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
236 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & 237 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) &
237 (~FSL_DMA_MR_SAHE), 32); 238 (~FSL_DMA_MR_SAHE), 32);
238 break; 239 break;
239 case 1: 240 case 1:
240 case 2: 241 case 2:
241 case 4: 242 case 4:
242 case 8: 243 case 8:
243 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 244 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
244 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | 245 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) |
245 FSL_DMA_MR_SAHE | (__ilog2(size) << 14), 246 FSL_DMA_MR_SAHE | (__ilog2(size) << 14),
246 32); 247 32);
247 break; 248 break;
248 } 249 }
249 } 250 }
250 251
251 /** 252 /**
252 * fsl_chan_set_dest_loop_size - Set destination address hold transfer size 253 * fsl_chan_set_dest_loop_size - Set destination address hold transfer size
253 * @fsl_chan : Freescale DMA channel 254 * @fsl_chan : Freescale DMA channel
254 * @size : Address loop size, 0 for disable loop 255 * @size : Address loop size, 0 for disable loop
255 * 256 *
256 * The set destination address hold transfer size. The destination 257 * The set destination address hold transfer size. The destination
257 * address hold or loop transfer size is when the DMA transfer 258 * address hold or loop transfer size is when the DMA transfer
258 * data to destination address (TA), if the loop size is 4, the DMA will 259 * data to destination address (TA), if the loop size is 4, the DMA will
259 * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA, 260 * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
260 * TA + 1 ... and so on. 261 * TA + 1 ... and so on.
261 */ 262 */
262 static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size) 263 static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size)
263 { 264 {
264 switch (size) { 265 switch (size) {
265 case 0: 266 case 0:
266 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 267 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
267 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & 268 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) &
268 (~FSL_DMA_MR_DAHE), 32); 269 (~FSL_DMA_MR_DAHE), 32);
269 break; 270 break;
270 case 1: 271 case 1:
271 case 2: 272 case 2:
272 case 4: 273 case 4:
273 case 8: 274 case 8:
274 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 275 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
275 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | 276 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) |
276 FSL_DMA_MR_DAHE | (__ilog2(size) << 16), 277 FSL_DMA_MR_DAHE | (__ilog2(size) << 16),
277 32); 278 32);
278 break; 279 break;
279 } 280 }
280 } 281 }
281 282
282 /** 283 /**
283 * fsl_chan_set_request_count - Set DMA Request Count for external control 284 * fsl_chan_set_request_count - Set DMA Request Count for external control
284 * @fsl_chan : Freescale DMA channel 285 * @fsl_chan : Freescale DMA channel
285 * @size : Number of bytes to transfer in a single request 286 * @size : Number of bytes to transfer in a single request
286 * 287 *
287 * The Freescale DMA channel can be controlled by the external signal DREQ#. 288 * The Freescale DMA channel can be controlled by the external signal DREQ#.
288 * The DMA request count is how many bytes are allowed to transfer before 289 * The DMA request count is how many bytes are allowed to transfer before
289 * pausing the channel, after which a new assertion of DREQ# resumes channel 290 * pausing the channel, after which a new assertion of DREQ# resumes channel
290 * operation. 291 * operation.
291 * 292 *
292 * A size of 0 disables external pause control. The maximum size is 1024. 293 * A size of 0 disables external pause control. The maximum size is 1024.
293 */ 294 */
294 static void fsl_chan_set_request_count(struct fsl_dma_chan *fsl_chan, int size) 295 static void fsl_chan_set_request_count(struct fsl_dma_chan *fsl_chan, int size)
295 { 296 {
296 BUG_ON(size > 1024); 297 BUG_ON(size > 1024);
297 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 298 DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
298 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) 299 DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
299 | ((__ilog2(size) << 24) & 0x0f000000), 300 | ((__ilog2(size) << 24) & 0x0f000000),
300 32); 301 32);
301 } 302 }
302 303
303 /** 304 /**
304 * fsl_chan_toggle_ext_pause - Toggle channel external pause status 305 * fsl_chan_toggle_ext_pause - Toggle channel external pause status
305 * @fsl_chan : Freescale DMA channel 306 * @fsl_chan : Freescale DMA channel
306 * @enable : 0 is disabled, 1 is enabled. 307 * @enable : 0 is disabled, 1 is enabled.
307 * 308 *
308 * The Freescale DMA channel can be controlled by the external signal DREQ#. 309 * The Freescale DMA channel can be controlled by the external signal DREQ#.
309 * The DMA Request Count feature should be used in addition to this feature 310 * The DMA Request Count feature should be used in addition to this feature
310 * to set the number of bytes to transfer before pausing the channel. 311 * to set the number of bytes to transfer before pausing the channel.
311 */ 312 */
312 static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int enable) 313 static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int enable)
313 { 314 {
314 if (enable) 315 if (enable)
315 fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT; 316 fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
316 else 317 else
317 fsl_chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT; 318 fsl_chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
318 } 319 }
319 320
320 /** 321 /**
321 * fsl_chan_toggle_ext_start - Toggle channel external start status 322 * fsl_chan_toggle_ext_start - Toggle channel external start status
322 * @fsl_chan : Freescale DMA channel 323 * @fsl_chan : Freescale DMA channel
323 * @enable : 0 is disabled, 1 is enabled. 324 * @enable : 0 is disabled, 1 is enabled.
324 * 325 *
325 * If enable the external start, the channel can be started by an 326 * If enable the external start, the channel can be started by an
326 * external DMA start pin. So the dma_start() does not start the 327 * external DMA start pin. So the dma_start() does not start the
327 * transfer immediately. The DMA channel will wait for the 328 * transfer immediately. The DMA channel will wait for the
328 * control pin asserted. 329 * control pin asserted.
329 */ 330 */
330 static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable) 331 static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable)
331 { 332 {
332 if (enable) 333 if (enable)
333 fsl_chan->feature |= FSL_DMA_CHAN_START_EXT; 334 fsl_chan->feature |= FSL_DMA_CHAN_START_EXT;
334 else 335 else
335 fsl_chan->feature &= ~FSL_DMA_CHAN_START_EXT; 336 fsl_chan->feature &= ~FSL_DMA_CHAN_START_EXT;
336 } 337 }
337 338
338 static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) 339 static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
339 { 340 {
340 struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan); 341 struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan);
341 struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); 342 struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
342 struct fsl_desc_sw *child; 343 struct fsl_desc_sw *child;
343 unsigned long flags; 344 unsigned long flags;
344 dma_cookie_t cookie; 345 dma_cookie_t cookie;
345 346
346 /* cookie increment and adding to ld_queue must be atomic */ 347 /* cookie increment and adding to ld_queue must be atomic */
347 spin_lock_irqsave(&fsl_chan->desc_lock, flags); 348 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
348 349
349 cookie = fsl_chan->common.cookie; 350 cookie = fsl_chan->common.cookie;
350 list_for_each_entry(child, &desc->tx_list, node) { 351 list_for_each_entry(child, &desc->tx_list, node) {
351 cookie++; 352 cookie++;
352 if (cookie < 0) 353 if (cookie < 0)
353 cookie = 1; 354 cookie = 1;
354 355
355 desc->async_tx.cookie = cookie; 356 desc->async_tx.cookie = cookie;
356 } 357 }
357 358
358 fsl_chan->common.cookie = cookie; 359 fsl_chan->common.cookie = cookie;
359 append_ld_queue(fsl_chan, desc); 360 append_ld_queue(fsl_chan, desc);
360 list_splice_init(&desc->tx_list, fsl_chan->ld_queue.prev); 361 list_splice_init(&desc->tx_list, fsl_chan->ld_queue.prev);
361 362
362 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); 363 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
363 364
364 return cookie; 365 return cookie;
365 } 366 }
366 367
367 /** 368 /**
368 * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool. 369 * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
369 * @fsl_chan : Freescale DMA channel 370 * @fsl_chan : Freescale DMA channel
370 * 371 *
371 * Return - The descriptor allocated. NULL for failed. 372 * Return - The descriptor allocated. NULL for failed.
372 */ 373 */
373 static struct fsl_desc_sw *fsl_dma_alloc_descriptor( 374 static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
374 struct fsl_dma_chan *fsl_chan) 375 struct fsl_dma_chan *fsl_chan)
375 { 376 {
376 dma_addr_t pdesc; 377 dma_addr_t pdesc;
377 struct fsl_desc_sw *desc_sw; 378 struct fsl_desc_sw *desc_sw;
378 379
379 desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc); 380 desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc);
380 if (desc_sw) { 381 if (desc_sw) {
381 memset(desc_sw, 0, sizeof(struct fsl_desc_sw)); 382 memset(desc_sw, 0, sizeof(struct fsl_desc_sw));
382 INIT_LIST_HEAD(&desc_sw->tx_list); 383 INIT_LIST_HEAD(&desc_sw->tx_list);
383 dma_async_tx_descriptor_init(&desc_sw->async_tx, 384 dma_async_tx_descriptor_init(&desc_sw->async_tx,
384 &fsl_chan->common); 385 &fsl_chan->common);
385 desc_sw->async_tx.tx_submit = fsl_dma_tx_submit; 386 desc_sw->async_tx.tx_submit = fsl_dma_tx_submit;
386 desc_sw->async_tx.phys = pdesc; 387 desc_sw->async_tx.phys = pdesc;
387 } 388 }
388 389
389 return desc_sw; 390 return desc_sw;
390 } 391 }
391 392
392 393
393 /** 394 /**
394 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel. 395 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
395 * @fsl_chan : Freescale DMA channel 396 * @fsl_chan : Freescale DMA channel
396 * 397 *
397 * This function will create a dma pool for descriptor allocation. 398 * This function will create a dma pool for descriptor allocation.
398 * 399 *
399 * Return - The number of descriptors allocated. 400 * Return - The number of descriptors allocated.
400 */ 401 */
401 static int fsl_dma_alloc_chan_resources(struct dma_chan *chan) 402 static int fsl_dma_alloc_chan_resources(struct dma_chan *chan)
402 { 403 {
403 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); 404 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
404 405
405 /* Has this channel already been allocated? */ 406 /* Has this channel already been allocated? */
406 if (fsl_chan->desc_pool) 407 if (fsl_chan->desc_pool)
407 return 1; 408 return 1;
408 409
409 /* We need the descriptor to be aligned to 32bytes 410 /* We need the descriptor to be aligned to 32bytes
410 * for meeting FSL DMA specification requirement. 411 * for meeting FSL DMA specification requirement.
411 */ 412 */
412 fsl_chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool", 413 fsl_chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool",
413 fsl_chan->dev, sizeof(struct fsl_desc_sw), 414 fsl_chan->dev, sizeof(struct fsl_desc_sw),
414 32, 0); 415 32, 0);
415 if (!fsl_chan->desc_pool) { 416 if (!fsl_chan->desc_pool) {
416 dev_err(fsl_chan->dev, "No memory for channel %d " 417 dev_err(fsl_chan->dev, "No memory for channel %d "
417 "descriptor dma pool.\n", fsl_chan->id); 418 "descriptor dma pool.\n", fsl_chan->id);
418 return 0; 419 return 0;
419 } 420 }
420 421
421 return 1; 422 return 1;
422 } 423 }
423 424
424 /** 425 /**
425 * fsl_dma_free_chan_resources - Free all resources of the channel. 426 * fsl_dma_free_chan_resources - Free all resources of the channel.
426 * @fsl_chan : Freescale DMA channel 427 * @fsl_chan : Freescale DMA channel
427 */ 428 */
428 static void fsl_dma_free_chan_resources(struct dma_chan *chan) 429 static void fsl_dma_free_chan_resources(struct dma_chan *chan)
429 { 430 {
430 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); 431 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
431 struct fsl_desc_sw *desc, *_desc; 432 struct fsl_desc_sw *desc, *_desc;
432 unsigned long flags; 433 unsigned long flags;
433 434
434 dev_dbg(fsl_chan->dev, "Free all channel resources.\n"); 435 dev_dbg(fsl_chan->dev, "Free all channel resources.\n");
435 spin_lock_irqsave(&fsl_chan->desc_lock, flags); 436 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
436 list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) { 437 list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) {
437 #ifdef FSL_DMA_LD_DEBUG 438 #ifdef FSL_DMA_LD_DEBUG
438 dev_dbg(fsl_chan->dev, 439 dev_dbg(fsl_chan->dev,
439 "LD %p will be released.\n", desc); 440 "LD %p will be released.\n", desc);
440 #endif 441 #endif
441 list_del(&desc->node); 442 list_del(&desc->node);
442 /* free link descriptor */ 443 /* free link descriptor */
443 dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys); 444 dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
444 } 445 }
445 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); 446 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
446 dma_pool_destroy(fsl_chan->desc_pool); 447 dma_pool_destroy(fsl_chan->desc_pool);
447 448
448 fsl_chan->desc_pool = NULL; 449 fsl_chan->desc_pool = NULL;
449 } 450 }
450 451
451 static struct dma_async_tx_descriptor * 452 static struct dma_async_tx_descriptor *
452 fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags) 453 fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags)
453 { 454 {
454 struct fsl_dma_chan *fsl_chan; 455 struct fsl_dma_chan *fsl_chan;
455 struct fsl_desc_sw *new; 456 struct fsl_desc_sw *new;
456 457
457 if (!chan) 458 if (!chan)
458 return NULL; 459 return NULL;
459 460
460 fsl_chan = to_fsl_chan(chan); 461 fsl_chan = to_fsl_chan(chan);
461 462
462 new = fsl_dma_alloc_descriptor(fsl_chan); 463 new = fsl_dma_alloc_descriptor(fsl_chan);
463 if (!new) { 464 if (!new) {
464 dev_err(fsl_chan->dev, "No free memory for link descriptor\n"); 465 dev_err(fsl_chan->dev, "No free memory for link descriptor\n");
465 return NULL; 466 return NULL;
466 } 467 }
467 468
468 new->async_tx.cookie = -EBUSY; 469 new->async_tx.cookie = -EBUSY;
469 new->async_tx.flags = flags; 470 new->async_tx.flags = flags;
470 471
471 /* Insert the link descriptor to the LD ring */ 472 /* Insert the link descriptor to the LD ring */
472 list_add_tail(&new->node, &new->tx_list); 473 list_add_tail(&new->node, &new->tx_list);
473 474
474 /* Set End-of-link to the last link descriptor of new list*/ 475 /* Set End-of-link to the last link descriptor of new list*/
475 set_ld_eol(fsl_chan, new); 476 set_ld_eol(fsl_chan, new);
476 477
477 return &new->async_tx; 478 return &new->async_tx;
478 } 479 }
479 480
480 static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( 481 static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
481 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, 482 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
482 size_t len, unsigned long flags) 483 size_t len, unsigned long flags)
483 { 484 {
484 struct fsl_dma_chan *fsl_chan; 485 struct fsl_dma_chan *fsl_chan;
485 struct fsl_desc_sw *first = NULL, *prev = NULL, *new; 486 struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
486 struct list_head *list; 487 struct list_head *list;
487 size_t copy; 488 size_t copy;
488 489
489 if (!chan) 490 if (!chan)
490 return NULL; 491 return NULL;
491 492
492 if (!len) 493 if (!len)
493 return NULL; 494 return NULL;
494 495
495 fsl_chan = to_fsl_chan(chan); 496 fsl_chan = to_fsl_chan(chan);
496 497
497 do { 498 do {
498 499
499 /* Allocate the link descriptor from DMA pool */ 500 /* Allocate the link descriptor from DMA pool */
500 new = fsl_dma_alloc_descriptor(fsl_chan); 501 new = fsl_dma_alloc_descriptor(fsl_chan);
501 if (!new) { 502 if (!new) {
502 dev_err(fsl_chan->dev, 503 dev_err(fsl_chan->dev,
503 "No free memory for link descriptor\n"); 504 "No free memory for link descriptor\n");
504 goto fail; 505 goto fail;
505 } 506 }
506 #ifdef FSL_DMA_LD_DEBUG 507 #ifdef FSL_DMA_LD_DEBUG
507 dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new); 508 dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new);
508 #endif 509 #endif
509 510
510 copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT); 511 copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
511 512
512 set_desc_cnt(fsl_chan, &new->hw, copy); 513 set_desc_cnt(fsl_chan, &new->hw, copy);
513 set_desc_src(fsl_chan, &new->hw, dma_src); 514 set_desc_src(fsl_chan, &new->hw, dma_src);
514 set_desc_dest(fsl_chan, &new->hw, dma_dest); 515 set_desc_dest(fsl_chan, &new->hw, dma_dest);
515 516
516 if (!first) 517 if (!first)
517 first = new; 518 first = new;
518 else 519 else
519 set_desc_next(fsl_chan, &prev->hw, new->async_tx.phys); 520 set_desc_next(fsl_chan, &prev->hw, new->async_tx.phys);
520 521
521 new->async_tx.cookie = 0; 522 new->async_tx.cookie = 0;
522 async_tx_ack(&new->async_tx); 523 async_tx_ack(&new->async_tx);
523 524
524 prev = new; 525 prev = new;
525 len -= copy; 526 len -= copy;
526 dma_src += copy; 527 dma_src += copy;
527 dma_dest += copy; 528 dma_dest += copy;
528 529
529 /* Insert the link descriptor to the LD ring */ 530 /* Insert the link descriptor to the LD ring */
530 list_add_tail(&new->node, &first->tx_list); 531 list_add_tail(&new->node, &first->tx_list);
531 } while (len); 532 } while (len);
532 533
533 new->async_tx.flags = flags; /* client is in control of this ack */ 534 new->async_tx.flags = flags; /* client is in control of this ack */
534 new->async_tx.cookie = -EBUSY; 535 new->async_tx.cookie = -EBUSY;
535 536
536 /* Set End-of-link to the last link descriptor of new list*/ 537 /* Set End-of-link to the last link descriptor of new list*/
537 set_ld_eol(fsl_chan, new); 538 set_ld_eol(fsl_chan, new);
538 539
539 return &first->async_tx; 540 return &first->async_tx;
540 541
541 fail: 542 fail:
542 if (!first) 543 if (!first)
543 return NULL; 544 return NULL;
544 545
545 list = &first->tx_list; 546 list = &first->tx_list;
546 list_for_each_entry_safe_reverse(new, prev, list, node) { 547 list_for_each_entry_safe_reverse(new, prev, list, node) {
547 list_del(&new->node); 548 list_del(&new->node);
548 dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys); 549 dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys);
549 } 550 }
550 551
551 return NULL; 552 return NULL;
552 } 553 }
553 554
554 /** 555 /**
556 * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
557 * @chan: DMA channel
558 * @sgl: scatterlist to transfer to/from
559 * @sg_len: number of entries in @scatterlist
560 * @direction: DMA direction
561 * @flags: DMAEngine flags
562 *
563 * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the
564 * DMA_SLAVE API, this gets the device-specific information from the
565 * chan->private variable.
566 */
567 static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
568 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
569 enum dma_data_direction direction, unsigned long flags)
570 {
571 struct fsl_dma_chan *fsl_chan;
572 struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
573 struct fsl_dma_slave *slave;
574 struct list_head *tx_list;
575 size_t copy;
576
577 int i;
578 struct scatterlist *sg;
579 size_t sg_used;
580 size_t hw_used;
581 struct fsl_dma_hw_addr *hw;
582 dma_addr_t dma_dst, dma_src;
583
584 if (!chan)
585 return NULL;
586
587 if (!chan->private)
588 return NULL;
589
590 fsl_chan = to_fsl_chan(chan);
591 slave = chan->private;
592
593 if (list_empty(&slave->addresses))
594 return NULL;
595
596 hw = list_first_entry(&slave->addresses, struct fsl_dma_hw_addr, entry);
597 hw_used = 0;
598
599 /*
600 * Build the hardware transaction to copy from the scatterlist to
601 * the hardware, or from the hardware to the scatterlist
602 *
603 * If you are copying from the hardware to the scatterlist and it
604 * takes two hardware entries to fill an entire page, then both
605 * hardware entries will be coalesced into the same page
606 *
607 * If you are copying from the scatterlist to the hardware and a
608 * single page can fill two hardware entries, then the data will
609 * be read out of the page into the first hardware entry, and so on
610 */
611 for_each_sg(sgl, sg, sg_len, i) {
612 sg_used = 0;
613
614 /* Loop until the entire scatterlist entry is used */
615 while (sg_used < sg_dma_len(sg)) {
616
617 /*
618 * If we've used up the current hardware address/length
619 * pair, we need to load a new one
620 *
621 * This is done in a while loop so that descriptors with
622 * length == 0 will be skipped
623 */
624 while (hw_used >= hw->length) {
625
626 /*
627 * If the current hardware entry is the last
628 * entry in the list, we're finished
629 */
630 if (list_is_last(&hw->entry, &slave->addresses))
631 goto finished;
632
633 /* Get the next hardware address/length pair */
634 hw = list_entry(hw->entry.next,
635 struct fsl_dma_hw_addr, entry);
636 hw_used = 0;
637 }
638
639 /* Allocate the link descriptor from DMA pool */
640 new = fsl_dma_alloc_descriptor(fsl_chan);
641 if (!new) {
642 dev_err(fsl_chan->dev, "No free memory for "
643 "link descriptor\n");
644 goto fail;
645 }
646 #ifdef FSL_DMA_LD_DEBUG
647 dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new);
648 #endif
649
650 /*
651 * Calculate the maximum number of bytes to transfer,
652 * making sure it is less than the DMA controller limit
653 */
654 copy = min_t(size_t, sg_dma_len(sg) - sg_used,
655 hw->length - hw_used);
656 copy = min_t(size_t, copy, FSL_DMA_BCR_MAX_CNT);
657
658 /*
659 * DMA_FROM_DEVICE
660 * from the hardware to the scatterlist
661 *
662 * DMA_TO_DEVICE
663 * from the scatterlist to the hardware
664 */
665 if (direction == DMA_FROM_DEVICE) {
666 dma_src = hw->address + hw_used;
667 dma_dst = sg_dma_address(sg) + sg_used;
668 } else {
669 dma_src = sg_dma_address(sg) + sg_used;
670 dma_dst = hw->address + hw_used;
671 }
672
673 /* Fill in the descriptor */
674 set_desc_cnt(fsl_chan, &new->hw, copy);
675 set_desc_src(fsl_chan, &new->hw, dma_src);
676 set_desc_dest(fsl_chan, &new->hw, dma_dst);
677
678 /*
679 * If this is not the first descriptor, chain the
680 * current descriptor after the previous descriptor
681 */
682 if (!first) {
683 first = new;
684 } else {
685 set_desc_next(fsl_chan, &prev->hw,
686 new->async_tx.phys);
687 }
688
689 new->async_tx.cookie = 0;
690 async_tx_ack(&new->async_tx);
691
692 prev = new;
693 sg_used += copy;
694 hw_used += copy;
695
696 /* Insert the link descriptor into the LD ring */
697 list_add_tail(&new->node, &first->tx_list);
698 }
699 }
700
701 finished:
702
703 /* All of the hardware address/length pairs had length == 0 */
704 if (!first || !new)
705 return NULL;
706
707 new->async_tx.flags = flags;
708 new->async_tx.cookie = -EBUSY;
709
710 /* Set End-of-link to the last link descriptor of new list */
711 set_ld_eol(fsl_chan, new);
712
713 /* Enable extra controller features */
714 if (fsl_chan->set_src_loop_size)
715 fsl_chan->set_src_loop_size(fsl_chan, slave->src_loop_size);
716
717 if (fsl_chan->set_dest_loop_size)
718 fsl_chan->set_dest_loop_size(fsl_chan, slave->dst_loop_size);
719
720 if (fsl_chan->toggle_ext_start)
721 fsl_chan->toggle_ext_start(fsl_chan, slave->external_start);
722
723 if (fsl_chan->toggle_ext_pause)
724 fsl_chan->toggle_ext_pause(fsl_chan, slave->external_pause);
725
726 if (fsl_chan->set_request_count)
727 fsl_chan->set_request_count(fsl_chan, slave->request_count);
728
729 return &first->async_tx;
730
731 fail:
732 /* If first was not set, then we failed to allocate the very first
733 * descriptor, and we're done */
734 if (!first)
735 return NULL;
736
737 /*
738 * First is set, so all of the descriptors we allocated have been added
739 * to first->tx_list, INCLUDING "first" itself. Therefore we
740 * must traverse the list backwards freeing each descriptor in turn
741 *
742 * We're re-using variables for the loop, oh well
743 */
744 tx_list = &first->tx_list;
745 list_for_each_entry_safe_reverse(new, prev, tx_list, node) {
746 list_del_init(&new->node);
747 dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys);
748 }
749
750 return NULL;
751 }
752
753 static void fsl_dma_device_terminate_all(struct dma_chan *chan)
754 {
755 struct fsl_dma_chan *fsl_chan;
756 struct fsl_desc_sw *desc, *tmp;
757 unsigned long flags;
758
759 if (!chan)
760 return;
761
762 fsl_chan = to_fsl_chan(chan);
763
764 /* Halt the DMA engine */
765 dma_halt(fsl_chan);
766
767 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
768
769 /* Remove and free all of the descriptors in the LD queue */
770 list_for_each_entry_safe(desc, tmp, &fsl_chan->ld_queue, node) {
771 list_del(&desc->node);
772 dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
773 }
774
775 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
776 }
777
778 /**
555 * fsl_dma_update_completed_cookie - Update the completed cookie. 779 * fsl_dma_update_completed_cookie - Update the completed cookie.
556 * @fsl_chan : Freescale DMA channel 780 * @fsl_chan : Freescale DMA channel
557 */ 781 */
558 static void fsl_dma_update_completed_cookie(struct fsl_dma_chan *fsl_chan) 782 static void fsl_dma_update_completed_cookie(struct fsl_dma_chan *fsl_chan)
559 { 783 {
560 struct fsl_desc_sw *cur_desc, *desc; 784 struct fsl_desc_sw *cur_desc, *desc;
561 dma_addr_t ld_phy; 785 dma_addr_t ld_phy;
562 786
563 ld_phy = get_cdar(fsl_chan) & FSL_DMA_NLDA_MASK; 787 ld_phy = get_cdar(fsl_chan) & FSL_DMA_NLDA_MASK;
564 788
565 if (ld_phy) { 789 if (ld_phy) {
566 cur_desc = NULL; 790 cur_desc = NULL;
567 list_for_each_entry(desc, &fsl_chan->ld_queue, node) 791 list_for_each_entry(desc, &fsl_chan->ld_queue, node)
568 if (desc->async_tx.phys == ld_phy) { 792 if (desc->async_tx.phys == ld_phy) {
569 cur_desc = desc; 793 cur_desc = desc;
570 break; 794 break;
571 } 795 }
572 796
573 if (cur_desc && cur_desc->async_tx.cookie) { 797 if (cur_desc && cur_desc->async_tx.cookie) {
574 if (dma_is_idle(fsl_chan)) 798 if (dma_is_idle(fsl_chan))
575 fsl_chan->completed_cookie = 799 fsl_chan->completed_cookie =
576 cur_desc->async_tx.cookie; 800 cur_desc->async_tx.cookie;
577 else 801 else
578 fsl_chan->completed_cookie = 802 fsl_chan->completed_cookie =
579 cur_desc->async_tx.cookie - 1; 803 cur_desc->async_tx.cookie - 1;
580 } 804 }
581 } 805 }
582 } 806 }
583 807
584 /** 808 /**
585 * fsl_chan_ld_cleanup - Clean up link descriptors 809 * fsl_chan_ld_cleanup - Clean up link descriptors
586 * @fsl_chan : Freescale DMA channel 810 * @fsl_chan : Freescale DMA channel
587 * 811 *
588 * This function clean up the ld_queue of DMA channel. 812 * This function clean up the ld_queue of DMA channel.
589 * If 'in_intr' is set, the function will move the link descriptor to 813 * If 'in_intr' is set, the function will move the link descriptor to
590 * the recycle list. Otherwise, free it directly. 814 * the recycle list. Otherwise, free it directly.
591 */ 815 */
592 static void fsl_chan_ld_cleanup(struct fsl_dma_chan *fsl_chan) 816 static void fsl_chan_ld_cleanup(struct fsl_dma_chan *fsl_chan)
593 { 817 {
594 struct fsl_desc_sw *desc, *_desc; 818 struct fsl_desc_sw *desc, *_desc;
595 unsigned long flags; 819 unsigned long flags;
596 820
597 spin_lock_irqsave(&fsl_chan->desc_lock, flags); 821 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
598 822
599 dev_dbg(fsl_chan->dev, "chan completed_cookie = %d\n", 823 dev_dbg(fsl_chan->dev, "chan completed_cookie = %d\n",
600 fsl_chan->completed_cookie); 824 fsl_chan->completed_cookie);
601 list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) { 825 list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) {
602 dma_async_tx_callback callback; 826 dma_async_tx_callback callback;
603 void *callback_param; 827 void *callback_param;
604 828
605 if (dma_async_is_complete(desc->async_tx.cookie, 829 if (dma_async_is_complete(desc->async_tx.cookie,
606 fsl_chan->completed_cookie, fsl_chan->common.cookie) 830 fsl_chan->completed_cookie, fsl_chan->common.cookie)
607 == DMA_IN_PROGRESS) 831 == DMA_IN_PROGRESS)
608 break; 832 break;
609 833
610 callback = desc->async_tx.callback; 834 callback = desc->async_tx.callback;
611 callback_param = desc->async_tx.callback_param; 835 callback_param = desc->async_tx.callback_param;
612 836
613 /* Remove from ld_queue list */ 837 /* Remove from ld_queue list */
614 list_del(&desc->node); 838 list_del(&desc->node);
615 839
616 dev_dbg(fsl_chan->dev, "link descriptor %p will be recycle.\n", 840 dev_dbg(fsl_chan->dev, "link descriptor %p will be recycle.\n",
617 desc); 841 desc);
618 dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys); 842 dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
619 843
620 /* Run the link descriptor callback function */ 844 /* Run the link descriptor callback function */
621 if (callback) { 845 if (callback) {
622 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); 846 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
623 dev_dbg(fsl_chan->dev, "link descriptor %p callback\n", 847 dev_dbg(fsl_chan->dev, "link descriptor %p callback\n",
624 desc); 848 desc);
625 callback(callback_param); 849 callback(callback_param);
626 spin_lock_irqsave(&fsl_chan->desc_lock, flags); 850 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
627 } 851 }
628 } 852 }
629 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); 853 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
630 } 854 }
631 855
632 /** 856 /**
633 * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue. 857 * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue.
634 * @fsl_chan : Freescale DMA channel 858 * @fsl_chan : Freescale DMA channel
635 */ 859 */
636 static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan) 860 static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan)
637 { 861 {
638 struct list_head *ld_node; 862 struct list_head *ld_node;
639 dma_addr_t next_dest_addr; 863 dma_addr_t next_dest_addr;
640 unsigned long flags; 864 unsigned long flags;
641 865
642 spin_lock_irqsave(&fsl_chan->desc_lock, flags); 866 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
643 867
644 if (!dma_is_idle(fsl_chan)) 868 if (!dma_is_idle(fsl_chan))
645 goto out_unlock; 869 goto out_unlock;
646 870
647 dma_halt(fsl_chan); 871 dma_halt(fsl_chan);
648 872
649 /* If there are some link descriptors 873 /* If there are some link descriptors
650 * not transfered in queue. We need to start it. 874 * not transfered in queue. We need to start it.
651 */ 875 */
652 876
653 /* Find the first un-transfer desciptor */ 877 /* Find the first un-transfer desciptor */
654 for (ld_node = fsl_chan->ld_queue.next; 878 for (ld_node = fsl_chan->ld_queue.next;
655 (ld_node != &fsl_chan->ld_queue) 879 (ld_node != &fsl_chan->ld_queue)
656 && (dma_async_is_complete( 880 && (dma_async_is_complete(
657 to_fsl_desc(ld_node)->async_tx.cookie, 881 to_fsl_desc(ld_node)->async_tx.cookie,
658 fsl_chan->completed_cookie, 882 fsl_chan->completed_cookie,
659 fsl_chan->common.cookie) == DMA_SUCCESS); 883 fsl_chan->common.cookie) == DMA_SUCCESS);
660 ld_node = ld_node->next); 884 ld_node = ld_node->next);
661 885
662 if (ld_node != &fsl_chan->ld_queue) { 886 if (ld_node != &fsl_chan->ld_queue) {
663 /* Get the ld start address from ld_queue */ 887 /* Get the ld start address from ld_queue */
664 next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys; 888 next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys;
665 dev_dbg(fsl_chan->dev, "xfer LDs staring from 0x%llx\n", 889 dev_dbg(fsl_chan->dev, "xfer LDs staring from 0x%llx\n",
666 (unsigned long long)next_dest_addr); 890 (unsigned long long)next_dest_addr);
667 set_cdar(fsl_chan, next_dest_addr); 891 set_cdar(fsl_chan, next_dest_addr);
668 dma_start(fsl_chan); 892 dma_start(fsl_chan);
669 } else { 893 } else {
670 set_cdar(fsl_chan, 0); 894 set_cdar(fsl_chan, 0);
671 set_ndar(fsl_chan, 0); 895 set_ndar(fsl_chan, 0);
672 } 896 }
673 897
674 out_unlock: 898 out_unlock:
675 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); 899 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
676 } 900 }
677 901
678 /** 902 /**
679 * fsl_dma_memcpy_issue_pending - Issue the DMA start command 903 * fsl_dma_memcpy_issue_pending - Issue the DMA start command
680 * @fsl_chan : Freescale DMA channel 904 * @fsl_chan : Freescale DMA channel
681 */ 905 */
682 static void fsl_dma_memcpy_issue_pending(struct dma_chan *chan) 906 static void fsl_dma_memcpy_issue_pending(struct dma_chan *chan)
683 { 907 {
684 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); 908 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
685 909
686 #ifdef FSL_DMA_LD_DEBUG 910 #ifdef FSL_DMA_LD_DEBUG
687 struct fsl_desc_sw *ld; 911 struct fsl_desc_sw *ld;
688 unsigned long flags; 912 unsigned long flags;
689 913
690 spin_lock_irqsave(&fsl_chan->desc_lock, flags); 914 spin_lock_irqsave(&fsl_chan->desc_lock, flags);
691 if (list_empty(&fsl_chan->ld_queue)) { 915 if (list_empty(&fsl_chan->ld_queue)) {
692 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); 916 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
693 return; 917 return;
694 } 918 }
695 919
696 dev_dbg(fsl_chan->dev, "--memcpy issue--\n"); 920 dev_dbg(fsl_chan->dev, "--memcpy issue--\n");
697 list_for_each_entry(ld, &fsl_chan->ld_queue, node) { 921 list_for_each_entry(ld, &fsl_chan->ld_queue, node) {
698 int i; 922 int i;
699 dev_dbg(fsl_chan->dev, "Ch %d, LD %08x\n", 923 dev_dbg(fsl_chan->dev, "Ch %d, LD %08x\n",
700 fsl_chan->id, ld->async_tx.phys); 924 fsl_chan->id, ld->async_tx.phys);
701 for (i = 0; i < 8; i++) 925 for (i = 0; i < 8; i++)
702 dev_dbg(fsl_chan->dev, "LD offset %d: %08x\n", 926 dev_dbg(fsl_chan->dev, "LD offset %d: %08x\n",
703 i, *(((u32 *)&ld->hw) + i)); 927 i, *(((u32 *)&ld->hw) + i));
704 } 928 }
705 dev_dbg(fsl_chan->dev, "----------------\n"); 929 dev_dbg(fsl_chan->dev, "----------------\n");
706 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); 930 spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
707 #endif 931 #endif
708 932
709 fsl_chan_xfer_ld_queue(fsl_chan); 933 fsl_chan_xfer_ld_queue(fsl_chan);
710 } 934 }
711 935
712 /** 936 /**
713 * fsl_dma_is_complete - Determine the DMA status 937 * fsl_dma_is_complete - Determine the DMA status
714 * @fsl_chan : Freescale DMA channel 938 * @fsl_chan : Freescale DMA channel
715 */ 939 */
716 static enum dma_status fsl_dma_is_complete(struct dma_chan *chan, 940 static enum dma_status fsl_dma_is_complete(struct dma_chan *chan,
717 dma_cookie_t cookie, 941 dma_cookie_t cookie,
718 dma_cookie_t *done, 942 dma_cookie_t *done,
719 dma_cookie_t *used) 943 dma_cookie_t *used)
720 { 944 {
721 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); 945 struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
722 dma_cookie_t last_used; 946 dma_cookie_t last_used;
723 dma_cookie_t last_complete; 947 dma_cookie_t last_complete;
724 948
725 fsl_chan_ld_cleanup(fsl_chan); 949 fsl_chan_ld_cleanup(fsl_chan);
726 950
727 last_used = chan->cookie; 951 last_used = chan->cookie;
728 last_complete = fsl_chan->completed_cookie; 952 last_complete = fsl_chan->completed_cookie;
729 953
730 if (done) 954 if (done)
731 *done = last_complete; 955 *done = last_complete;
732 956
733 if (used) 957 if (used)
734 *used = last_used; 958 *used = last_used;
735 959
736 return dma_async_is_complete(cookie, last_complete, last_used); 960 return dma_async_is_complete(cookie, last_complete, last_used);
737 } 961 }
738 962
739 static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data) 963 static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data)
740 { 964 {
741 struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data; 965 struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data;
742 u32 stat; 966 u32 stat;
743 int update_cookie = 0; 967 int update_cookie = 0;
744 int xfer_ld_q = 0; 968 int xfer_ld_q = 0;
745 969
746 stat = get_sr(fsl_chan); 970 stat = get_sr(fsl_chan);
747 dev_dbg(fsl_chan->dev, "event: channel %d, stat = 0x%x\n", 971 dev_dbg(fsl_chan->dev, "event: channel %d, stat = 0x%x\n",
748 fsl_chan->id, stat); 972 fsl_chan->id, stat);
749 set_sr(fsl_chan, stat); /* Clear the event register */ 973 set_sr(fsl_chan, stat); /* Clear the event register */
750 974
751 stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); 975 stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
752 if (!stat) 976 if (!stat)
753 return IRQ_NONE; 977 return IRQ_NONE;
754 978
755 if (stat & FSL_DMA_SR_TE) 979 if (stat & FSL_DMA_SR_TE)
756 dev_err(fsl_chan->dev, "Transfer Error!\n"); 980 dev_err(fsl_chan->dev, "Transfer Error!\n");
757 981
758 /* Programming Error 982 /* Programming Error
759 * The DMA_INTERRUPT async_tx is a NULL transfer, which will 983 * The DMA_INTERRUPT async_tx is a NULL transfer, which will
760 * triger a PE interrupt. 984 * triger a PE interrupt.
761 */ 985 */
762 if (stat & FSL_DMA_SR_PE) { 986 if (stat & FSL_DMA_SR_PE) {
763 dev_dbg(fsl_chan->dev, "event: Programming Error INT\n"); 987 dev_dbg(fsl_chan->dev, "event: Programming Error INT\n");
764 if (get_bcr(fsl_chan) == 0) { 988 if (get_bcr(fsl_chan) == 0) {
765 /* BCR register is 0, this is a DMA_INTERRUPT async_tx. 989 /* BCR register is 0, this is a DMA_INTERRUPT async_tx.
766 * Now, update the completed cookie, and continue the 990 * Now, update the completed cookie, and continue the
767 * next uncompleted transfer. 991 * next uncompleted transfer.
768 */ 992 */
769 update_cookie = 1; 993 update_cookie = 1;
770 xfer_ld_q = 1; 994 xfer_ld_q = 1;
771 } 995 }
772 stat &= ~FSL_DMA_SR_PE; 996 stat &= ~FSL_DMA_SR_PE;
773 } 997 }
774 998
775 /* If the link descriptor segment transfer finishes, 999 /* If the link descriptor segment transfer finishes,
776 * we will recycle the used descriptor. 1000 * we will recycle the used descriptor.
777 */ 1001 */
778 if (stat & FSL_DMA_SR_EOSI) { 1002 if (stat & FSL_DMA_SR_EOSI) {
779 dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n"); 1003 dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n");
780 dev_dbg(fsl_chan->dev, "event: clndar 0x%llx, nlndar 0x%llx\n", 1004 dev_dbg(fsl_chan->dev, "event: clndar 0x%llx, nlndar 0x%llx\n",
781 (unsigned long long)get_cdar(fsl_chan), 1005 (unsigned long long)get_cdar(fsl_chan),
782 (unsigned long long)get_ndar(fsl_chan)); 1006 (unsigned long long)get_ndar(fsl_chan));
783 stat &= ~FSL_DMA_SR_EOSI; 1007 stat &= ~FSL_DMA_SR_EOSI;
784 update_cookie = 1; 1008 update_cookie = 1;
785 } 1009 }
786 1010
787 /* For MPC8349, EOCDI event need to update cookie 1011 /* For MPC8349, EOCDI event need to update cookie
788 * and start the next transfer if it exist. 1012 * and start the next transfer if it exist.
789 */ 1013 */
790 if (stat & FSL_DMA_SR_EOCDI) { 1014 if (stat & FSL_DMA_SR_EOCDI) {
791 dev_dbg(fsl_chan->dev, "event: End-of-Chain link INT\n"); 1015 dev_dbg(fsl_chan->dev, "event: End-of-Chain link INT\n");
792 stat &= ~FSL_DMA_SR_EOCDI; 1016 stat &= ~FSL_DMA_SR_EOCDI;
793 update_cookie = 1; 1017 update_cookie = 1;
794 xfer_ld_q = 1; 1018 xfer_ld_q = 1;
795 } 1019 }
796 1020
797 /* If it current transfer is the end-of-transfer, 1021 /* If it current transfer is the end-of-transfer,
798 * we should clear the Channel Start bit for 1022 * we should clear the Channel Start bit for
799 * prepare next transfer. 1023 * prepare next transfer.
800 */ 1024 */
801 if (stat & FSL_DMA_SR_EOLNI) { 1025 if (stat & FSL_DMA_SR_EOLNI) {
802 dev_dbg(fsl_chan->dev, "event: End-of-link INT\n"); 1026 dev_dbg(fsl_chan->dev, "event: End-of-link INT\n");
803 stat &= ~FSL_DMA_SR_EOLNI; 1027 stat &= ~FSL_DMA_SR_EOLNI;
804 xfer_ld_q = 1; 1028 xfer_ld_q = 1;
805 } 1029 }
806 1030
807 if (update_cookie) 1031 if (update_cookie)
808 fsl_dma_update_completed_cookie(fsl_chan); 1032 fsl_dma_update_completed_cookie(fsl_chan);
809 if (xfer_ld_q) 1033 if (xfer_ld_q)
810 fsl_chan_xfer_ld_queue(fsl_chan); 1034 fsl_chan_xfer_ld_queue(fsl_chan);
811 if (stat) 1035 if (stat)
812 dev_dbg(fsl_chan->dev, "event: unhandled sr 0x%02x\n", 1036 dev_dbg(fsl_chan->dev, "event: unhandled sr 0x%02x\n",
813 stat); 1037 stat);
814 1038
815 dev_dbg(fsl_chan->dev, "event: Exit\n"); 1039 dev_dbg(fsl_chan->dev, "event: Exit\n");
816 tasklet_schedule(&fsl_chan->tasklet); 1040 tasklet_schedule(&fsl_chan->tasklet);
817 return IRQ_HANDLED; 1041 return IRQ_HANDLED;
818 } 1042 }
819 1043
820 static irqreturn_t fsl_dma_do_interrupt(int irq, void *data) 1044 static irqreturn_t fsl_dma_do_interrupt(int irq, void *data)
821 { 1045 {
822 struct fsl_dma_device *fdev = (struct fsl_dma_device *)data; 1046 struct fsl_dma_device *fdev = (struct fsl_dma_device *)data;
823 u32 gsr; 1047 u32 gsr;
824 int ch_nr; 1048 int ch_nr;
825 1049
826 gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->reg_base) 1050 gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->reg_base)
827 : in_le32(fdev->reg_base); 1051 : in_le32(fdev->reg_base);
828 ch_nr = (32 - ffs(gsr)) / 8; 1052 ch_nr = (32 - ffs(gsr)) / 8;
829 1053
830 return fdev->chan[ch_nr] ? fsl_dma_chan_do_interrupt(irq, 1054 return fdev->chan[ch_nr] ? fsl_dma_chan_do_interrupt(irq,
831 fdev->chan[ch_nr]) : IRQ_NONE; 1055 fdev->chan[ch_nr]) : IRQ_NONE;
832 } 1056 }
833 1057
834 static void dma_do_tasklet(unsigned long data) 1058 static void dma_do_tasklet(unsigned long data)
835 { 1059 {
836 struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data; 1060 struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data;
837 fsl_chan_ld_cleanup(fsl_chan); 1061 fsl_chan_ld_cleanup(fsl_chan);
838 } 1062 }
839 1063
840 static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev, 1064 static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev,
841 struct device_node *node, u32 feature, const char *compatible) 1065 struct device_node *node, u32 feature, const char *compatible)
842 { 1066 {
843 struct fsl_dma_chan *new_fsl_chan; 1067 struct fsl_dma_chan *new_fsl_chan;
844 int err; 1068 int err;
845 1069
846 /* alloc channel */ 1070 /* alloc channel */
847 new_fsl_chan = kzalloc(sizeof(struct fsl_dma_chan), GFP_KERNEL); 1071 new_fsl_chan = kzalloc(sizeof(struct fsl_dma_chan), GFP_KERNEL);
848 if (!new_fsl_chan) { 1072 if (!new_fsl_chan) {
849 dev_err(fdev->dev, "No free memory for allocating " 1073 dev_err(fdev->dev, "No free memory for allocating "
850 "dma channels!\n"); 1074 "dma channels!\n");
851 return -ENOMEM; 1075 return -ENOMEM;
852 } 1076 }
853 1077
854 /* get dma channel register base */ 1078 /* get dma channel register base */
855 err = of_address_to_resource(node, 0, &new_fsl_chan->reg); 1079 err = of_address_to_resource(node, 0, &new_fsl_chan->reg);
856 if (err) { 1080 if (err) {
857 dev_err(fdev->dev, "Can't get %s property 'reg'\n", 1081 dev_err(fdev->dev, "Can't get %s property 'reg'\n",
858 node->full_name); 1082 node->full_name);
859 goto err_no_reg; 1083 goto err_no_reg;
860 } 1084 }
861 1085
862 new_fsl_chan->feature = feature; 1086 new_fsl_chan->feature = feature;
863 1087
864 if (!fdev->feature) 1088 if (!fdev->feature)
865 fdev->feature = new_fsl_chan->feature; 1089 fdev->feature = new_fsl_chan->feature;
866 1090
867 /* If the DMA device's feature is different than its channels', 1091 /* If the DMA device's feature is different than its channels',
868 * report the bug. 1092 * report the bug.
869 */ 1093 */
870 WARN_ON(fdev->feature != new_fsl_chan->feature); 1094 WARN_ON(fdev->feature != new_fsl_chan->feature);
871 1095
872 new_fsl_chan->dev = fdev->dev; 1096 new_fsl_chan->dev = fdev->dev;
873 new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start, 1097 new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start,
874 new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1); 1098 new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1);
875 1099
876 new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7; 1100 new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7;
877 if (new_fsl_chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) { 1101 if (new_fsl_chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
878 dev_err(fdev->dev, "There is no %d channel!\n", 1102 dev_err(fdev->dev, "There is no %d channel!\n",
879 new_fsl_chan->id); 1103 new_fsl_chan->id);
880 err = -EINVAL; 1104 err = -EINVAL;
881 goto err_no_chan; 1105 goto err_no_chan;
882 } 1106 }
883 fdev->chan[new_fsl_chan->id] = new_fsl_chan; 1107 fdev->chan[new_fsl_chan->id] = new_fsl_chan;
884 tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet, 1108 tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet,
885 (unsigned long)new_fsl_chan); 1109 (unsigned long)new_fsl_chan);
886 1110
887 /* Init the channel */ 1111 /* Init the channel */
888 dma_init(new_fsl_chan); 1112 dma_init(new_fsl_chan);
889 1113
890 /* Clear cdar registers */ 1114 /* Clear cdar registers */
891 set_cdar(new_fsl_chan, 0); 1115 set_cdar(new_fsl_chan, 0);
892 1116
893 switch (new_fsl_chan->feature & FSL_DMA_IP_MASK) { 1117 switch (new_fsl_chan->feature & FSL_DMA_IP_MASK) {
894 case FSL_DMA_IP_85XX: 1118 case FSL_DMA_IP_85XX:
895 new_fsl_chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; 1119 new_fsl_chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
896 case FSL_DMA_IP_83XX: 1120 case FSL_DMA_IP_83XX:
897 new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start; 1121 new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start;
898 new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size; 1122 new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size;
899 new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size; 1123 new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size;
900 new_fsl_chan->set_request_count = fsl_chan_set_request_count; 1124 new_fsl_chan->set_request_count = fsl_chan_set_request_count;
901 } 1125 }
902 1126
903 spin_lock_init(&new_fsl_chan->desc_lock); 1127 spin_lock_init(&new_fsl_chan->desc_lock);
904 INIT_LIST_HEAD(&new_fsl_chan->ld_queue); 1128 INIT_LIST_HEAD(&new_fsl_chan->ld_queue);
905 1129
906 new_fsl_chan->common.device = &fdev->common; 1130 new_fsl_chan->common.device = &fdev->common;
907 1131
908 /* Add the channel to DMA device channel list */ 1132 /* Add the channel to DMA device channel list */
909 list_add_tail(&new_fsl_chan->common.device_node, 1133 list_add_tail(&new_fsl_chan->common.device_node,
910 &fdev->common.channels); 1134 &fdev->common.channels);
911 fdev->common.chancnt++; 1135 fdev->common.chancnt++;
912 1136
913 new_fsl_chan->irq = irq_of_parse_and_map(node, 0); 1137 new_fsl_chan->irq = irq_of_parse_and_map(node, 0);
914 if (new_fsl_chan->irq != NO_IRQ) { 1138 if (new_fsl_chan->irq != NO_IRQ) {
915 err = request_irq(new_fsl_chan->irq, 1139 err = request_irq(new_fsl_chan->irq,
916 &fsl_dma_chan_do_interrupt, IRQF_SHARED, 1140 &fsl_dma_chan_do_interrupt, IRQF_SHARED,
917 "fsldma-channel", new_fsl_chan); 1141 "fsldma-channel", new_fsl_chan);
918 if (err) { 1142 if (err) {
919 dev_err(fdev->dev, "DMA channel %s request_irq error " 1143 dev_err(fdev->dev, "DMA channel %s request_irq error "
920 "with return %d\n", node->full_name, err); 1144 "with return %d\n", node->full_name, err);
921 goto err_no_irq; 1145 goto err_no_irq;
922 } 1146 }
923 } 1147 }
924 1148
925 dev_info(fdev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id, 1149 dev_info(fdev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id,
926 compatible, 1150 compatible,
927 new_fsl_chan->irq != NO_IRQ ? new_fsl_chan->irq : fdev->irq); 1151 new_fsl_chan->irq != NO_IRQ ? new_fsl_chan->irq : fdev->irq);
928 1152
929 return 0; 1153 return 0;
930 1154
931 err_no_irq: 1155 err_no_irq:
932 list_del(&new_fsl_chan->common.device_node); 1156 list_del(&new_fsl_chan->common.device_node);
933 err_no_chan: 1157 err_no_chan:
934 iounmap(new_fsl_chan->reg_base); 1158 iounmap(new_fsl_chan->reg_base);
935 err_no_reg: 1159 err_no_reg:
936 kfree(new_fsl_chan); 1160 kfree(new_fsl_chan);
937 return err; 1161 return err;
938 } 1162 }
939 1163
940 static void fsl_dma_chan_remove(struct fsl_dma_chan *fchan) 1164 static void fsl_dma_chan_remove(struct fsl_dma_chan *fchan)
941 { 1165 {
942 if (fchan->irq != NO_IRQ) 1166 if (fchan->irq != NO_IRQ)
943 free_irq(fchan->irq, fchan); 1167 free_irq(fchan->irq, fchan);
944 list_del(&fchan->common.device_node); 1168 list_del(&fchan->common.device_node);
945 iounmap(fchan->reg_base); 1169 iounmap(fchan->reg_base);
946 kfree(fchan); 1170 kfree(fchan);
947 } 1171 }
948 1172
949 static int __devinit of_fsl_dma_probe(struct of_device *dev, 1173 static int __devinit of_fsl_dma_probe(struct of_device *dev,
950 const struct of_device_id *match) 1174 const struct of_device_id *match)
951 { 1175 {
952 int err; 1176 int err;
953 struct fsl_dma_device *fdev; 1177 struct fsl_dma_device *fdev;
954 struct device_node *child; 1178 struct device_node *child;
955 1179
956 fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL); 1180 fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL);
957 if (!fdev) { 1181 if (!fdev) {
958 dev_err(&dev->dev, "No enough memory for 'priv'\n"); 1182 dev_err(&dev->dev, "No enough memory for 'priv'\n");
959 return -ENOMEM; 1183 return -ENOMEM;
960 } 1184 }
961 fdev->dev = &dev->dev; 1185 fdev->dev = &dev->dev;
962 INIT_LIST_HEAD(&fdev->common.channels); 1186 INIT_LIST_HEAD(&fdev->common.channels);
963 1187
964 /* get DMA controller register base */ 1188 /* get DMA controller register base */
965 err = of_address_to_resource(dev->node, 0, &fdev->reg); 1189 err = of_address_to_resource(dev->node, 0, &fdev->reg);
966 if (err) { 1190 if (err) {
967 dev_err(&dev->dev, "Can't get %s property 'reg'\n", 1191 dev_err(&dev->dev, "Can't get %s property 'reg'\n",
968 dev->node->full_name); 1192 dev->node->full_name);
969 goto err_no_reg; 1193 goto err_no_reg;
970 } 1194 }
971 1195
972 dev_info(&dev->dev, "Probe the Freescale DMA driver for %s " 1196 dev_info(&dev->dev, "Probe the Freescale DMA driver for %s "
973 "controller at 0x%llx...\n", 1197 "controller at 0x%llx...\n",
974 match->compatible, (unsigned long long)fdev->reg.start); 1198 match->compatible, (unsigned long long)fdev->reg.start);
975 fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end 1199 fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end
976 - fdev->reg.start + 1); 1200 - fdev->reg.start + 1);
977 1201
978 dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); 1202 dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
979 dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); 1203 dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
1204 dma_cap_set(DMA_SLAVE, fdev->common.cap_mask);
980 fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; 1205 fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
981 fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; 1206 fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
982 fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt; 1207 fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt;
983 fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; 1208 fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
984 fdev->common.device_is_tx_complete = fsl_dma_is_complete; 1209 fdev->common.device_is_tx_complete = fsl_dma_is_complete;
985 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; 1210 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
1211 fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
1212 fdev->common.device_terminate_all = fsl_dma_device_terminate_all;
986 fdev->common.dev = &dev->dev; 1213 fdev->common.dev = &dev->dev;
987 1214
988 fdev->irq = irq_of_parse_and_map(dev->node, 0); 1215 fdev->irq = irq_of_parse_and_map(dev->node, 0);
989 if (fdev->irq != NO_IRQ) { 1216 if (fdev->irq != NO_IRQ) {
990 err = request_irq(fdev->irq, &fsl_dma_do_interrupt, IRQF_SHARED, 1217 err = request_irq(fdev->irq, &fsl_dma_do_interrupt, IRQF_SHARED,
991 "fsldma-device", fdev); 1218 "fsldma-device", fdev);
992 if (err) { 1219 if (err) {
993 dev_err(&dev->dev, "DMA device request_irq error " 1220 dev_err(&dev->dev, "DMA device request_irq error "
994 "with return %d\n", err); 1221 "with return %d\n", err);
995 goto err; 1222 goto err;
996 } 1223 }
997 } 1224 }
998 1225
999 dev_set_drvdata(&(dev->dev), fdev); 1226 dev_set_drvdata(&(dev->dev), fdev);
1000 1227
1001 /* We cannot use of_platform_bus_probe() because there is no 1228 /* We cannot use of_platform_bus_probe() because there is no
1002 * of_platform_bus_remove. Instead, we manually instantiate every DMA 1229 * of_platform_bus_remove. Instead, we manually instantiate every DMA
1003 * channel object. 1230 * channel object.
1004 */ 1231 */
1005 for_each_child_of_node(dev->node, child) { 1232 for_each_child_of_node(dev->node, child) {
1006 if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) 1233 if (of_device_is_compatible(child, "fsl,eloplus-dma-channel"))
1007 fsl_dma_chan_probe(fdev, child, 1234 fsl_dma_chan_probe(fdev, child,
1008 FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN, 1235 FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
1009 "fsl,eloplus-dma-channel"); 1236 "fsl,eloplus-dma-channel");
1010 if (of_device_is_compatible(child, "fsl,elo-dma-channel")) 1237 if (of_device_is_compatible(child, "fsl,elo-dma-channel"))
1011 fsl_dma_chan_probe(fdev, child, 1238 fsl_dma_chan_probe(fdev, child,
1012 FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN, 1239 FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN,
1013 "fsl,elo-dma-channel"); 1240 "fsl,elo-dma-channel");
1014 } 1241 }
1015 1242
1016 dma_async_device_register(&fdev->common); 1243 dma_async_device_register(&fdev->common);
1017 return 0; 1244 return 0;
1018 1245
1019 err: 1246 err:
1020 iounmap(fdev->reg_base); 1247 iounmap(fdev->reg_base);
1021 err_no_reg: 1248 err_no_reg:
1022 kfree(fdev); 1249 kfree(fdev);
1023 return err; 1250 return err;
1024 } 1251 }
1025 1252
1026 static int of_fsl_dma_remove(struct of_device *of_dev) 1253 static int of_fsl_dma_remove(struct of_device *of_dev)
1027 { 1254 {
1028 struct fsl_dma_device *fdev; 1255 struct fsl_dma_device *fdev;
1029 unsigned int i; 1256 unsigned int i;
1030 1257
1031 fdev = dev_get_drvdata(&of_dev->dev); 1258 fdev = dev_get_drvdata(&of_dev->dev);
1032 1259
1033 dma_async_device_unregister(&fdev->common); 1260 dma_async_device_unregister(&fdev->common);
1034 1261
1035 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) 1262 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++)
1036 if (fdev->chan[i]) 1263 if (fdev->chan[i])
1037 fsl_dma_chan_remove(fdev->chan[i]); 1264 fsl_dma_chan_remove(fdev->chan[i]);
1038 1265
1039 if (fdev->irq != NO_IRQ) 1266 if (fdev->irq != NO_IRQ)
1040 free_irq(fdev->irq, fdev); 1267 free_irq(fdev->irq, fdev);
1041 1268
1042 iounmap(fdev->reg_base); 1269 iounmap(fdev->reg_base);
1043 1270
1044 kfree(fdev); 1271 kfree(fdev);
1045 dev_set_drvdata(&of_dev->dev, NULL); 1272 dev_set_drvdata(&of_dev->dev, NULL);
1046 1273
1047 return 0; 1274 return 0;
1048 } 1275 }
1049 1276
1050 static struct of_device_id of_fsl_dma_ids[] = { 1277 static struct of_device_id of_fsl_dma_ids[] = {
1051 { .compatible = "fsl,eloplus-dma", }, 1278 { .compatible = "fsl,eloplus-dma", },
1052 { .compatible = "fsl,elo-dma", }, 1279 { .compatible = "fsl,elo-dma", },
1053 {} 1280 {}
1054 }; 1281 };
1055 1282
1056 static struct of_platform_driver of_fsl_dma_driver = { 1283 static struct of_platform_driver of_fsl_dma_driver = {
1057 .name = "fsl-elo-dma", 1284 .name = "fsl-elo-dma",
1058 .match_table = of_fsl_dma_ids, 1285 .match_table = of_fsl_dma_ids,
1059 .probe = of_fsl_dma_probe, 1286 .probe = of_fsl_dma_probe,
1060 .remove = of_fsl_dma_remove, 1287 .remove = of_fsl_dma_remove,
1061 }; 1288 };
1062 1289
1063 static __init int of_fsl_dma_init(void) 1290 static __init int of_fsl_dma_init(void)
1064 { 1291 {
1065 int ret; 1292 int ret;
1066 1293
1067 pr_info("Freescale Elo / Elo Plus DMA driver\n"); 1294 pr_info("Freescale Elo / Elo Plus DMA driver\n");
1068 1295
1069 ret = of_register_platform_driver(&of_fsl_dma_driver); 1296 ret = of_register_platform_driver(&of_fsl_dma_driver);
1070 if (ret) 1297 if (ret)
1071 pr_err("fsldma: failed to register platform driver\n"); 1298 pr_err("fsldma: failed to register platform driver\n");
1072 1299
1073 return ret; 1300 return ret;
1074 } 1301 }
1075 1302
1076 static void __exit of_fsl_dma_exit(void) 1303 static void __exit of_fsl_dma_exit(void)
1077 { 1304 {
1078 of_unregister_platform_driver(&of_fsl_dma_driver); 1305 of_unregister_platform_driver(&of_fsl_dma_driver);
1079 } 1306 }
1080 1307
1081 subsys_initcall(of_fsl_dma_init); 1308 subsys_initcall(of_fsl_dma_init);
1082 module_exit(of_fsl_dma_exit); 1309 module_exit(of_fsl_dma_exit);
1083 1310
1084 MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver"); 1311 MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver");
1085 MODULE_LICENSE("GPL"); 1312 MODULE_LICENSE("GPL");
1086 1313