Commit 058276303dbc4ed089c1f7dad0871810b1f5ddf1
Committed by
Dan Williams
1 parent
4aed79b281
Exists in
master
and in
7 other branches
DMAENGINE: extend the control command to include an arg
This adds an argument to the DMAengine control function, so that we can later provide control commands that need some external data passed in through an argument akin to the ioctl() operation prototype. [dan.j.williams@intel.com: fix up some missed conversions] Signed-off-by: Linus Walleij <linus.walleij@stericsson.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Showing 14 changed files with 32 additions and 21 deletions Inline Diff
- drivers/dma/at_hdmac.c
- drivers/dma/coh901318.c
- drivers/dma/dw_dmac.c
- drivers/dma/fsldma.c
- drivers/dma/ipu/ipu_idmac.c
- drivers/dma/shdma.c
- drivers/dma/ste_dma40.c
- drivers/dma/timb_dma.c
- drivers/dma/txx9dmac.c
- drivers/mmc/host/atmel-mci.c
- drivers/serial/sh-sci.c
- drivers/video/mx3fb.c
- include/linux/dmaengine.h
- sound/soc/txx9/txx9aclc.c
drivers/dma/at_hdmac.c
1 | /* | 1 | /* |
2 | * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems) | 2 | * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems) |
3 | * | 3 | * |
4 | * Copyright (C) 2008 Atmel Corporation | 4 | * Copyright (C) 2008 Atmel Corporation |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
8 | * the Free Software Foundation; either version 2 of the License, or | 8 | * the Free Software Foundation; either version 2 of the License, or |
9 | * (at your option) any later version. | 9 | * (at your option) any later version. |
10 | * | 10 | * |
11 | * | 11 | * |
12 | * This supports the Atmel AHB DMA Controller, | 12 | * This supports the Atmel AHB DMA Controller, |
13 | * | 13 | * |
14 | * The driver has currently been tested with the Atmel AT91SAM9RL | 14 | * The driver has currently been tested with the Atmel AT91SAM9RL |
15 | * and AT91SAM9G45 series. | 15 | * and AT91SAM9G45 series. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include <linux/clk.h> | 18 | #include <linux/clk.h> |
19 | #include <linux/dmaengine.h> | 19 | #include <linux/dmaengine.h> |
20 | #include <linux/dma-mapping.h> | 20 | #include <linux/dma-mapping.h> |
21 | #include <linux/dmapool.h> | 21 | #include <linux/dmapool.h> |
22 | #include <linux/interrupt.h> | 22 | #include <linux/interrupt.h> |
23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
24 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
25 | 25 | ||
26 | #include "at_hdmac_regs.h" | 26 | #include "at_hdmac_regs.h" |
27 | 27 | ||
28 | /* | 28 | /* |
29 | * Glossary | 29 | * Glossary |
30 | * -------- | 30 | * -------- |
31 | * | 31 | * |
32 | * at_hdmac : Name of the ATmel AHB DMA Controller | 32 | * at_hdmac : Name of the ATmel AHB DMA Controller |
33 | * at_dma_ / atdma : ATmel DMA controller entity related | 33 | * at_dma_ / atdma : ATmel DMA controller entity related |
34 | * atc_ / atchan : ATmel DMA Channel entity related | 34 | * atc_ / atchan : ATmel DMA Channel entity related |
35 | */ | 35 | */ |
36 | 36 | ||
37 | #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO) | 37 | #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO) |
38 | #define ATC_DEFAULT_CTRLA (0) | 38 | #define ATC_DEFAULT_CTRLA (0) |
39 | #define ATC_DEFAULT_CTRLB (ATC_SIF(0) \ | 39 | #define ATC_DEFAULT_CTRLB (ATC_SIF(0) \ |
40 | |ATC_DIF(1)) | 40 | |ATC_DIF(1)) |
41 | 41 | ||
42 | /* | 42 | /* |
43 | * Initial number of descriptors to allocate for each channel. This could | 43 | * Initial number of descriptors to allocate for each channel. This could |
44 | * be increased during dma usage. | 44 | * be increased during dma usage. |
45 | */ | 45 | */ |
46 | static unsigned int init_nr_desc_per_channel = 64; | 46 | static unsigned int init_nr_desc_per_channel = 64; |
47 | module_param(init_nr_desc_per_channel, uint, 0644); | 47 | module_param(init_nr_desc_per_channel, uint, 0644); |
48 | MODULE_PARM_DESC(init_nr_desc_per_channel, | 48 | MODULE_PARM_DESC(init_nr_desc_per_channel, |
49 | "initial descriptors per channel (default: 64)"); | 49 | "initial descriptors per channel (default: 64)"); |
50 | 50 | ||
51 | 51 | ||
52 | /* prototypes */ | 52 | /* prototypes */ |
53 | static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx); | 53 | static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx); |
54 | 54 | ||
55 | 55 | ||
56 | /*----------------------------------------------------------------------*/ | 56 | /*----------------------------------------------------------------------*/ |
57 | 57 | ||
58 | static struct at_desc *atc_first_active(struct at_dma_chan *atchan) | 58 | static struct at_desc *atc_first_active(struct at_dma_chan *atchan) |
59 | { | 59 | { |
60 | return list_first_entry(&atchan->active_list, | 60 | return list_first_entry(&atchan->active_list, |
61 | struct at_desc, desc_node); | 61 | struct at_desc, desc_node); |
62 | } | 62 | } |
63 | 63 | ||
64 | static struct at_desc *atc_first_queued(struct at_dma_chan *atchan) | 64 | static struct at_desc *atc_first_queued(struct at_dma_chan *atchan) |
65 | { | 65 | { |
66 | return list_first_entry(&atchan->queue, | 66 | return list_first_entry(&atchan->queue, |
67 | struct at_desc, desc_node); | 67 | struct at_desc, desc_node); |
68 | } | 68 | } |
69 | 69 | ||
70 | /** | 70 | /** |
71 | * atc_alloc_descriptor - allocate and return an initilized descriptor | 71 | * atc_alloc_descriptor - allocate and return an initilized descriptor |
72 | * @chan: the channel to allocate descriptors for | 72 | * @chan: the channel to allocate descriptors for |
73 | * @gfp_flags: GFP allocation flags | 73 | * @gfp_flags: GFP allocation flags |
74 | * | 74 | * |
75 | * Note: The ack-bit is positioned in the descriptor flag at creation time | 75 | * Note: The ack-bit is positioned in the descriptor flag at creation time |
76 | * to make initial allocation more convenient. This bit will be cleared | 76 | * to make initial allocation more convenient. This bit will be cleared |
77 | * and control will be given to client at usage time (during | 77 | * and control will be given to client at usage time (during |
78 | * preparation functions). | 78 | * preparation functions). |
79 | */ | 79 | */ |
80 | static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan, | 80 | static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan, |
81 | gfp_t gfp_flags) | 81 | gfp_t gfp_flags) |
82 | { | 82 | { |
83 | struct at_desc *desc = NULL; | 83 | struct at_desc *desc = NULL; |
84 | struct at_dma *atdma = to_at_dma(chan->device); | 84 | struct at_dma *atdma = to_at_dma(chan->device); |
85 | dma_addr_t phys; | 85 | dma_addr_t phys; |
86 | 86 | ||
87 | desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys); | 87 | desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys); |
88 | if (desc) { | 88 | if (desc) { |
89 | memset(desc, 0, sizeof(struct at_desc)); | 89 | memset(desc, 0, sizeof(struct at_desc)); |
90 | INIT_LIST_HEAD(&desc->tx_list); | 90 | INIT_LIST_HEAD(&desc->tx_list); |
91 | dma_async_tx_descriptor_init(&desc->txd, chan); | 91 | dma_async_tx_descriptor_init(&desc->txd, chan); |
92 | /* txd.flags will be overwritten in prep functions */ | 92 | /* txd.flags will be overwritten in prep functions */ |
93 | desc->txd.flags = DMA_CTRL_ACK; | 93 | desc->txd.flags = DMA_CTRL_ACK; |
94 | desc->txd.tx_submit = atc_tx_submit; | 94 | desc->txd.tx_submit = atc_tx_submit; |
95 | desc->txd.phys = phys; | 95 | desc->txd.phys = phys; |
96 | } | 96 | } |
97 | 97 | ||
98 | return desc; | 98 | return desc; |
99 | } | 99 | } |
100 | 100 | ||
101 | /** | 101 | /** |
102 | * atc_desc_get - get an unused descriptor from free_list | 102 | * atc_desc_get - get an unused descriptor from free_list |
103 | * @atchan: channel we want a new descriptor for | 103 | * @atchan: channel we want a new descriptor for |
104 | */ | 104 | */ |
105 | static struct at_desc *atc_desc_get(struct at_dma_chan *atchan) | 105 | static struct at_desc *atc_desc_get(struct at_dma_chan *atchan) |
106 | { | 106 | { |
107 | struct at_desc *desc, *_desc; | 107 | struct at_desc *desc, *_desc; |
108 | struct at_desc *ret = NULL; | 108 | struct at_desc *ret = NULL; |
109 | unsigned int i = 0; | 109 | unsigned int i = 0; |
110 | LIST_HEAD(tmp_list); | 110 | LIST_HEAD(tmp_list); |
111 | 111 | ||
112 | spin_lock_bh(&atchan->lock); | 112 | spin_lock_bh(&atchan->lock); |
113 | list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { | 113 | list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { |
114 | i++; | 114 | i++; |
115 | if (async_tx_test_ack(&desc->txd)) { | 115 | if (async_tx_test_ack(&desc->txd)) { |
116 | list_del(&desc->desc_node); | 116 | list_del(&desc->desc_node); |
117 | ret = desc; | 117 | ret = desc; |
118 | break; | 118 | break; |
119 | } | 119 | } |
120 | dev_dbg(chan2dev(&atchan->chan_common), | 120 | dev_dbg(chan2dev(&atchan->chan_common), |
121 | "desc %p not ACKed\n", desc); | 121 | "desc %p not ACKed\n", desc); |
122 | } | 122 | } |
123 | spin_unlock_bh(&atchan->lock); | 123 | spin_unlock_bh(&atchan->lock); |
124 | dev_vdbg(chan2dev(&atchan->chan_common), | 124 | dev_vdbg(chan2dev(&atchan->chan_common), |
125 | "scanned %u descriptors on freelist\n", i); | 125 | "scanned %u descriptors on freelist\n", i); |
126 | 126 | ||
127 | /* no more descriptor available in initial pool: create one more */ | 127 | /* no more descriptor available in initial pool: create one more */ |
128 | if (!ret) { | 128 | if (!ret) { |
129 | ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC); | 129 | ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC); |
130 | if (ret) { | 130 | if (ret) { |
131 | spin_lock_bh(&atchan->lock); | 131 | spin_lock_bh(&atchan->lock); |
132 | atchan->descs_allocated++; | 132 | atchan->descs_allocated++; |
133 | spin_unlock_bh(&atchan->lock); | 133 | spin_unlock_bh(&atchan->lock); |
134 | } else { | 134 | } else { |
135 | dev_err(chan2dev(&atchan->chan_common), | 135 | dev_err(chan2dev(&atchan->chan_common), |
136 | "not enough descriptors available\n"); | 136 | "not enough descriptors available\n"); |
137 | } | 137 | } |
138 | } | 138 | } |
139 | 139 | ||
140 | return ret; | 140 | return ret; |
141 | } | 141 | } |
142 | 142 | ||
143 | /** | 143 | /** |
144 | * atc_desc_put - move a descriptor, including any children, to the free list | 144 | * atc_desc_put - move a descriptor, including any children, to the free list |
145 | * @atchan: channel we work on | 145 | * @atchan: channel we work on |
146 | * @desc: descriptor, at the head of a chain, to move to free list | 146 | * @desc: descriptor, at the head of a chain, to move to free list |
147 | */ | 147 | */ |
148 | static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc) | 148 | static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc) |
149 | { | 149 | { |
150 | if (desc) { | 150 | if (desc) { |
151 | struct at_desc *child; | 151 | struct at_desc *child; |
152 | 152 | ||
153 | spin_lock_bh(&atchan->lock); | 153 | spin_lock_bh(&atchan->lock); |
154 | list_for_each_entry(child, &desc->tx_list, desc_node) | 154 | list_for_each_entry(child, &desc->tx_list, desc_node) |
155 | dev_vdbg(chan2dev(&atchan->chan_common), | 155 | dev_vdbg(chan2dev(&atchan->chan_common), |
156 | "moving child desc %p to freelist\n", | 156 | "moving child desc %p to freelist\n", |
157 | child); | 157 | child); |
158 | list_splice_init(&desc->tx_list, &atchan->free_list); | 158 | list_splice_init(&desc->tx_list, &atchan->free_list); |
159 | dev_vdbg(chan2dev(&atchan->chan_common), | 159 | dev_vdbg(chan2dev(&atchan->chan_common), |
160 | "moving desc %p to freelist\n", desc); | 160 | "moving desc %p to freelist\n", desc); |
161 | list_add(&desc->desc_node, &atchan->free_list); | 161 | list_add(&desc->desc_node, &atchan->free_list); |
162 | spin_unlock_bh(&atchan->lock); | 162 | spin_unlock_bh(&atchan->lock); |
163 | } | 163 | } |
164 | } | 164 | } |
165 | 165 | ||
166 | /** | 166 | /** |
167 | * atc_assign_cookie - compute and assign new cookie | 167 | * atc_assign_cookie - compute and assign new cookie |
168 | * @atchan: channel we work on | 168 | * @atchan: channel we work on |
169 | * @desc: descriptor to asign cookie for | 169 | * @desc: descriptor to asign cookie for |
170 | * | 170 | * |
171 | * Called with atchan->lock held and bh disabled | 171 | * Called with atchan->lock held and bh disabled |
172 | */ | 172 | */ |
173 | static dma_cookie_t | 173 | static dma_cookie_t |
174 | atc_assign_cookie(struct at_dma_chan *atchan, struct at_desc *desc) | 174 | atc_assign_cookie(struct at_dma_chan *atchan, struct at_desc *desc) |
175 | { | 175 | { |
176 | dma_cookie_t cookie = atchan->chan_common.cookie; | 176 | dma_cookie_t cookie = atchan->chan_common.cookie; |
177 | 177 | ||
178 | if (++cookie < 0) | 178 | if (++cookie < 0) |
179 | cookie = 1; | 179 | cookie = 1; |
180 | 180 | ||
181 | atchan->chan_common.cookie = cookie; | 181 | atchan->chan_common.cookie = cookie; |
182 | desc->txd.cookie = cookie; | 182 | desc->txd.cookie = cookie; |
183 | 183 | ||
184 | return cookie; | 184 | return cookie; |
185 | } | 185 | } |
186 | 186 | ||
187 | /** | 187 | /** |
188 | * atc_dostart - starts the DMA engine for real | 188 | * atc_dostart - starts the DMA engine for real |
189 | * @atchan: the channel we want to start | 189 | * @atchan: the channel we want to start |
190 | * @first: first descriptor in the list we want to begin with | 190 | * @first: first descriptor in the list we want to begin with |
191 | * | 191 | * |
192 | * Called with atchan->lock held and bh disabled | 192 | * Called with atchan->lock held and bh disabled |
193 | */ | 193 | */ |
194 | static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first) | 194 | static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first) |
195 | { | 195 | { |
196 | struct at_dma *atdma = to_at_dma(atchan->chan_common.device); | 196 | struct at_dma *atdma = to_at_dma(atchan->chan_common.device); |
197 | 197 | ||
198 | /* ASSERT: channel is idle */ | 198 | /* ASSERT: channel is idle */ |
199 | if (atc_chan_is_enabled(atchan)) { | 199 | if (atc_chan_is_enabled(atchan)) { |
200 | dev_err(chan2dev(&atchan->chan_common), | 200 | dev_err(chan2dev(&atchan->chan_common), |
201 | "BUG: Attempted to start non-idle channel\n"); | 201 | "BUG: Attempted to start non-idle channel\n"); |
202 | dev_err(chan2dev(&atchan->chan_common), | 202 | dev_err(chan2dev(&atchan->chan_common), |
203 | " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n", | 203 | " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n", |
204 | channel_readl(atchan, SADDR), | 204 | channel_readl(atchan, SADDR), |
205 | channel_readl(atchan, DADDR), | 205 | channel_readl(atchan, DADDR), |
206 | channel_readl(atchan, CTRLA), | 206 | channel_readl(atchan, CTRLA), |
207 | channel_readl(atchan, CTRLB), | 207 | channel_readl(atchan, CTRLB), |
208 | channel_readl(atchan, DSCR)); | 208 | channel_readl(atchan, DSCR)); |
209 | 209 | ||
210 | /* The tasklet will hopefully advance the queue... */ | 210 | /* The tasklet will hopefully advance the queue... */ |
211 | return; | 211 | return; |
212 | } | 212 | } |
213 | 213 | ||
214 | vdbg_dump_regs(atchan); | 214 | vdbg_dump_regs(atchan); |
215 | 215 | ||
216 | /* clear any pending interrupt */ | 216 | /* clear any pending interrupt */ |
217 | while (dma_readl(atdma, EBCISR)) | 217 | while (dma_readl(atdma, EBCISR)) |
218 | cpu_relax(); | 218 | cpu_relax(); |
219 | 219 | ||
220 | channel_writel(atchan, SADDR, 0); | 220 | channel_writel(atchan, SADDR, 0); |
221 | channel_writel(atchan, DADDR, 0); | 221 | channel_writel(atchan, DADDR, 0); |
222 | channel_writel(atchan, CTRLA, 0); | 222 | channel_writel(atchan, CTRLA, 0); |
223 | channel_writel(atchan, CTRLB, 0); | 223 | channel_writel(atchan, CTRLB, 0); |
224 | channel_writel(atchan, DSCR, first->txd.phys); | 224 | channel_writel(atchan, DSCR, first->txd.phys); |
225 | dma_writel(atdma, CHER, atchan->mask); | 225 | dma_writel(atdma, CHER, atchan->mask); |
226 | 226 | ||
227 | vdbg_dump_regs(atchan); | 227 | vdbg_dump_regs(atchan); |
228 | } | 228 | } |
229 | 229 | ||
230 | /** | 230 | /** |
231 | * atc_chain_complete - finish work for one transaction chain | 231 | * atc_chain_complete - finish work for one transaction chain |
232 | * @atchan: channel we work on | 232 | * @atchan: channel we work on |
233 | * @desc: descriptor at the head of the chain we want do complete | 233 | * @desc: descriptor at the head of the chain we want do complete |
234 | * | 234 | * |
235 | * Called with atchan->lock held and bh disabled */ | 235 | * Called with atchan->lock held and bh disabled */ |
236 | static void | 236 | static void |
237 | atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) | 237 | atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) |
238 | { | 238 | { |
239 | dma_async_tx_callback callback; | 239 | dma_async_tx_callback callback; |
240 | void *param; | 240 | void *param; |
241 | struct dma_async_tx_descriptor *txd = &desc->txd; | 241 | struct dma_async_tx_descriptor *txd = &desc->txd; |
242 | 242 | ||
243 | dev_vdbg(chan2dev(&atchan->chan_common), | 243 | dev_vdbg(chan2dev(&atchan->chan_common), |
244 | "descriptor %u complete\n", txd->cookie); | 244 | "descriptor %u complete\n", txd->cookie); |
245 | 245 | ||
246 | atchan->completed_cookie = txd->cookie; | 246 | atchan->completed_cookie = txd->cookie; |
247 | callback = txd->callback; | 247 | callback = txd->callback; |
248 | param = txd->callback_param; | 248 | param = txd->callback_param; |
249 | 249 | ||
250 | /* move children to free_list */ | 250 | /* move children to free_list */ |
251 | list_splice_init(&desc->tx_list, &atchan->free_list); | 251 | list_splice_init(&desc->tx_list, &atchan->free_list); |
252 | /* move myself to free_list */ | 252 | /* move myself to free_list */ |
253 | list_move(&desc->desc_node, &atchan->free_list); | 253 | list_move(&desc->desc_node, &atchan->free_list); |
254 | 254 | ||
255 | /* unmap dma addresses */ | 255 | /* unmap dma addresses */ |
256 | if (!atchan->chan_common.private) { | 256 | if (!atchan->chan_common.private) { |
257 | struct device *parent = chan2parent(&atchan->chan_common); | 257 | struct device *parent = chan2parent(&atchan->chan_common); |
258 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | 258 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { |
259 | if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) | 259 | if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) |
260 | dma_unmap_single(parent, | 260 | dma_unmap_single(parent, |
261 | desc->lli.daddr, | 261 | desc->lli.daddr, |
262 | desc->len, DMA_FROM_DEVICE); | 262 | desc->len, DMA_FROM_DEVICE); |
263 | else | 263 | else |
264 | dma_unmap_page(parent, | 264 | dma_unmap_page(parent, |
265 | desc->lli.daddr, | 265 | desc->lli.daddr, |
266 | desc->len, DMA_FROM_DEVICE); | 266 | desc->len, DMA_FROM_DEVICE); |
267 | } | 267 | } |
268 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | 268 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { |
269 | if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) | 269 | if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) |
270 | dma_unmap_single(parent, | 270 | dma_unmap_single(parent, |
271 | desc->lli.saddr, | 271 | desc->lli.saddr, |
272 | desc->len, DMA_TO_DEVICE); | 272 | desc->len, DMA_TO_DEVICE); |
273 | else | 273 | else |
274 | dma_unmap_page(parent, | 274 | dma_unmap_page(parent, |
275 | desc->lli.saddr, | 275 | desc->lli.saddr, |
276 | desc->len, DMA_TO_DEVICE); | 276 | desc->len, DMA_TO_DEVICE); |
277 | } | 277 | } |
278 | } | 278 | } |
279 | 279 | ||
280 | /* | 280 | /* |
281 | * The API requires that no submissions are done from a | 281 | * The API requires that no submissions are done from a |
282 | * callback, so we don't need to drop the lock here | 282 | * callback, so we don't need to drop the lock here |
283 | */ | 283 | */ |
284 | if (callback) | 284 | if (callback) |
285 | callback(param); | 285 | callback(param); |
286 | 286 | ||
287 | dma_run_dependencies(txd); | 287 | dma_run_dependencies(txd); |
288 | } | 288 | } |
289 | 289 | ||
290 | /** | 290 | /** |
291 | * atc_complete_all - finish work for all transactions | 291 | * atc_complete_all - finish work for all transactions |
292 | * @atchan: channel to complete transactions for | 292 | * @atchan: channel to complete transactions for |
293 | * | 293 | * |
294 | * Eventually submit queued descriptors if any | 294 | * Eventually submit queued descriptors if any |
295 | * | 295 | * |
296 | * Assume channel is idle while calling this function | 296 | * Assume channel is idle while calling this function |
297 | * Called with atchan->lock held and bh disabled | 297 | * Called with atchan->lock held and bh disabled |
298 | */ | 298 | */ |
299 | static void atc_complete_all(struct at_dma_chan *atchan) | 299 | static void atc_complete_all(struct at_dma_chan *atchan) |
300 | { | 300 | { |
301 | struct at_desc *desc, *_desc; | 301 | struct at_desc *desc, *_desc; |
302 | LIST_HEAD(list); | 302 | LIST_HEAD(list); |
303 | 303 | ||
304 | dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n"); | 304 | dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n"); |
305 | 305 | ||
306 | BUG_ON(atc_chan_is_enabled(atchan)); | 306 | BUG_ON(atc_chan_is_enabled(atchan)); |
307 | 307 | ||
308 | /* | 308 | /* |
309 | * Submit queued descriptors ASAP, i.e. before we go through | 309 | * Submit queued descriptors ASAP, i.e. before we go through |
310 | * the completed ones. | 310 | * the completed ones. |
311 | */ | 311 | */ |
312 | if (!list_empty(&atchan->queue)) | 312 | if (!list_empty(&atchan->queue)) |
313 | atc_dostart(atchan, atc_first_queued(atchan)); | 313 | atc_dostart(atchan, atc_first_queued(atchan)); |
314 | /* empty active_list now it is completed */ | 314 | /* empty active_list now it is completed */ |
315 | list_splice_init(&atchan->active_list, &list); | 315 | list_splice_init(&atchan->active_list, &list); |
316 | /* empty queue list by moving descriptors (if any) to active_list */ | 316 | /* empty queue list by moving descriptors (if any) to active_list */ |
317 | list_splice_init(&atchan->queue, &atchan->active_list); | 317 | list_splice_init(&atchan->queue, &atchan->active_list); |
318 | 318 | ||
319 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | 319 | list_for_each_entry_safe(desc, _desc, &list, desc_node) |
320 | atc_chain_complete(atchan, desc); | 320 | atc_chain_complete(atchan, desc); |
321 | } | 321 | } |
322 | 322 | ||
323 | /** | 323 | /** |
324 | * atc_cleanup_descriptors - cleanup up finished descriptors in active_list | 324 | * atc_cleanup_descriptors - cleanup up finished descriptors in active_list |
325 | * @atchan: channel to be cleaned up | 325 | * @atchan: channel to be cleaned up |
326 | * | 326 | * |
327 | * Called with atchan->lock held and bh disabled | 327 | * Called with atchan->lock held and bh disabled |
328 | */ | 328 | */ |
329 | static void atc_cleanup_descriptors(struct at_dma_chan *atchan) | 329 | static void atc_cleanup_descriptors(struct at_dma_chan *atchan) |
330 | { | 330 | { |
331 | struct at_desc *desc, *_desc; | 331 | struct at_desc *desc, *_desc; |
332 | struct at_desc *child; | 332 | struct at_desc *child; |
333 | 333 | ||
334 | dev_vdbg(chan2dev(&atchan->chan_common), "cleanup descriptors\n"); | 334 | dev_vdbg(chan2dev(&atchan->chan_common), "cleanup descriptors\n"); |
335 | 335 | ||
336 | list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) { | 336 | list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) { |
337 | if (!(desc->lli.ctrla & ATC_DONE)) | 337 | if (!(desc->lli.ctrla & ATC_DONE)) |
338 | /* This one is currently in progress */ | 338 | /* This one is currently in progress */ |
339 | return; | 339 | return; |
340 | 340 | ||
341 | list_for_each_entry(child, &desc->tx_list, desc_node) | 341 | list_for_each_entry(child, &desc->tx_list, desc_node) |
342 | if (!(child->lli.ctrla & ATC_DONE)) | 342 | if (!(child->lli.ctrla & ATC_DONE)) |
343 | /* Currently in progress */ | 343 | /* Currently in progress */ |
344 | return; | 344 | return; |
345 | 345 | ||
346 | /* | 346 | /* |
347 | * No descriptors so far seem to be in progress, i.e. | 347 | * No descriptors so far seem to be in progress, i.e. |
348 | * this chain must be done. | 348 | * this chain must be done. |
349 | */ | 349 | */ |
350 | atc_chain_complete(atchan, desc); | 350 | atc_chain_complete(atchan, desc); |
351 | } | 351 | } |
352 | } | 352 | } |
353 | 353 | ||
354 | /** | 354 | /** |
355 | * atc_advance_work - at the end of a transaction, move forward | 355 | * atc_advance_work - at the end of a transaction, move forward |
356 | * @atchan: channel where the transaction ended | 356 | * @atchan: channel where the transaction ended |
357 | * | 357 | * |
358 | * Called with atchan->lock held and bh disabled | 358 | * Called with atchan->lock held and bh disabled |
359 | */ | 359 | */ |
360 | static void atc_advance_work(struct at_dma_chan *atchan) | 360 | static void atc_advance_work(struct at_dma_chan *atchan) |
361 | { | 361 | { |
362 | dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n"); | 362 | dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n"); |
363 | 363 | ||
364 | if (list_empty(&atchan->active_list) || | 364 | if (list_empty(&atchan->active_list) || |
365 | list_is_singular(&atchan->active_list)) { | 365 | list_is_singular(&atchan->active_list)) { |
366 | atc_complete_all(atchan); | 366 | atc_complete_all(atchan); |
367 | } else { | 367 | } else { |
368 | atc_chain_complete(atchan, atc_first_active(atchan)); | 368 | atc_chain_complete(atchan, atc_first_active(atchan)); |
369 | /* advance work */ | 369 | /* advance work */ |
370 | atc_dostart(atchan, atc_first_active(atchan)); | 370 | atc_dostart(atchan, atc_first_active(atchan)); |
371 | } | 371 | } |
372 | } | 372 | } |
373 | 373 | ||
374 | 374 | ||
375 | /** | 375 | /** |
376 | * atc_handle_error - handle errors reported by DMA controller | 376 | * atc_handle_error - handle errors reported by DMA controller |
377 | * @atchan: channel where error occurs | 377 | * @atchan: channel where error occurs |
378 | * | 378 | * |
379 | * Called with atchan->lock held and bh disabled | 379 | * Called with atchan->lock held and bh disabled |
380 | */ | 380 | */ |
381 | static void atc_handle_error(struct at_dma_chan *atchan) | 381 | static void atc_handle_error(struct at_dma_chan *atchan) |
382 | { | 382 | { |
383 | struct at_desc *bad_desc; | 383 | struct at_desc *bad_desc; |
384 | struct at_desc *child; | 384 | struct at_desc *child; |
385 | 385 | ||
386 | /* | 386 | /* |
387 | * The descriptor currently at the head of the active list is | 387 | * The descriptor currently at the head of the active list is |
388 | * broked. Since we don't have any way to report errors, we'll | 388 | * broked. Since we don't have any way to report errors, we'll |
389 | * just have to scream loudly and try to carry on. | 389 | * just have to scream loudly and try to carry on. |
390 | */ | 390 | */ |
391 | bad_desc = atc_first_active(atchan); | 391 | bad_desc = atc_first_active(atchan); |
392 | list_del_init(&bad_desc->desc_node); | 392 | list_del_init(&bad_desc->desc_node); |
393 | 393 | ||
394 | /* As we are stopped, take advantage to push queued descriptors | 394 | /* As we are stopped, take advantage to push queued descriptors |
395 | * in active_list */ | 395 | * in active_list */ |
396 | list_splice_init(&atchan->queue, atchan->active_list.prev); | 396 | list_splice_init(&atchan->queue, atchan->active_list.prev); |
397 | 397 | ||
398 | /* Try to restart the controller */ | 398 | /* Try to restart the controller */ |
399 | if (!list_empty(&atchan->active_list)) | 399 | if (!list_empty(&atchan->active_list)) |
400 | atc_dostart(atchan, atc_first_active(atchan)); | 400 | atc_dostart(atchan, atc_first_active(atchan)); |
401 | 401 | ||
402 | /* | 402 | /* |
403 | * KERN_CRITICAL may seem harsh, but since this only happens | 403 | * KERN_CRITICAL may seem harsh, but since this only happens |
404 | * when someone submits a bad physical address in a | 404 | * when someone submits a bad physical address in a |
405 | * descriptor, we should consider ourselves lucky that the | 405 | * descriptor, we should consider ourselves lucky that the |
406 | * controller flagged an error instead of scribbling over | 406 | * controller flagged an error instead of scribbling over |
407 | * random memory locations. | 407 | * random memory locations. |
408 | */ | 408 | */ |
409 | dev_crit(chan2dev(&atchan->chan_common), | 409 | dev_crit(chan2dev(&atchan->chan_common), |
410 | "Bad descriptor submitted for DMA!\n"); | 410 | "Bad descriptor submitted for DMA!\n"); |
411 | dev_crit(chan2dev(&atchan->chan_common), | 411 | dev_crit(chan2dev(&atchan->chan_common), |
412 | " cookie: %d\n", bad_desc->txd.cookie); | 412 | " cookie: %d\n", bad_desc->txd.cookie); |
413 | atc_dump_lli(atchan, &bad_desc->lli); | 413 | atc_dump_lli(atchan, &bad_desc->lli); |
414 | list_for_each_entry(child, &bad_desc->tx_list, desc_node) | 414 | list_for_each_entry(child, &bad_desc->tx_list, desc_node) |
415 | atc_dump_lli(atchan, &child->lli); | 415 | atc_dump_lli(atchan, &child->lli); |
416 | 416 | ||
417 | /* Pretend the descriptor completed successfully */ | 417 | /* Pretend the descriptor completed successfully */ |
418 | atc_chain_complete(atchan, bad_desc); | 418 | atc_chain_complete(atchan, bad_desc); |
419 | } | 419 | } |
420 | 420 | ||
421 | 421 | ||
422 | /*-- IRQ & Tasklet ---------------------------------------------------*/ | 422 | /*-- IRQ & Tasklet ---------------------------------------------------*/ |
423 | 423 | ||
424 | static void atc_tasklet(unsigned long data) | 424 | static void atc_tasklet(unsigned long data) |
425 | { | 425 | { |
426 | struct at_dma_chan *atchan = (struct at_dma_chan *)data; | 426 | struct at_dma_chan *atchan = (struct at_dma_chan *)data; |
427 | 427 | ||
428 | /* Channel cannot be enabled here */ | 428 | /* Channel cannot be enabled here */ |
429 | if (atc_chan_is_enabled(atchan)) { | 429 | if (atc_chan_is_enabled(atchan)) { |
430 | dev_err(chan2dev(&atchan->chan_common), | 430 | dev_err(chan2dev(&atchan->chan_common), |
431 | "BUG: channel enabled in tasklet\n"); | 431 | "BUG: channel enabled in tasklet\n"); |
432 | return; | 432 | return; |
433 | } | 433 | } |
434 | 434 | ||
435 | spin_lock(&atchan->lock); | 435 | spin_lock(&atchan->lock); |
436 | if (test_and_clear_bit(0, &atchan->error_status)) | 436 | if (test_and_clear_bit(0, &atchan->error_status)) |
437 | atc_handle_error(atchan); | 437 | atc_handle_error(atchan); |
438 | else | 438 | else |
439 | atc_advance_work(atchan); | 439 | atc_advance_work(atchan); |
440 | 440 | ||
441 | spin_unlock(&atchan->lock); | 441 | spin_unlock(&atchan->lock); |
442 | } | 442 | } |
443 | 443 | ||
444 | static irqreturn_t at_dma_interrupt(int irq, void *dev_id) | 444 | static irqreturn_t at_dma_interrupt(int irq, void *dev_id) |
445 | { | 445 | { |
446 | struct at_dma *atdma = (struct at_dma *)dev_id; | 446 | struct at_dma *atdma = (struct at_dma *)dev_id; |
447 | struct at_dma_chan *atchan; | 447 | struct at_dma_chan *atchan; |
448 | int i; | 448 | int i; |
449 | u32 status, pending, imr; | 449 | u32 status, pending, imr; |
450 | int ret = IRQ_NONE; | 450 | int ret = IRQ_NONE; |
451 | 451 | ||
452 | do { | 452 | do { |
453 | imr = dma_readl(atdma, EBCIMR); | 453 | imr = dma_readl(atdma, EBCIMR); |
454 | status = dma_readl(atdma, EBCISR); | 454 | status = dma_readl(atdma, EBCISR); |
455 | pending = status & imr; | 455 | pending = status & imr; |
456 | 456 | ||
457 | if (!pending) | 457 | if (!pending) |
458 | break; | 458 | break; |
459 | 459 | ||
460 | dev_vdbg(atdma->dma_common.dev, | 460 | dev_vdbg(atdma->dma_common.dev, |
461 | "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n", | 461 | "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n", |
462 | status, imr, pending); | 462 | status, imr, pending); |
463 | 463 | ||
464 | for (i = 0; i < atdma->dma_common.chancnt; i++) { | 464 | for (i = 0; i < atdma->dma_common.chancnt; i++) { |
465 | atchan = &atdma->chan[i]; | 465 | atchan = &atdma->chan[i]; |
466 | if (pending & (AT_DMA_CBTC(i) | AT_DMA_ERR(i))) { | 466 | if (pending & (AT_DMA_CBTC(i) | AT_DMA_ERR(i))) { |
467 | if (pending & AT_DMA_ERR(i)) { | 467 | if (pending & AT_DMA_ERR(i)) { |
468 | /* Disable channel on AHB error */ | 468 | /* Disable channel on AHB error */ |
469 | dma_writel(atdma, CHDR, atchan->mask); | 469 | dma_writel(atdma, CHDR, atchan->mask); |
470 | /* Give information to tasklet */ | 470 | /* Give information to tasklet */ |
471 | set_bit(0, &atchan->error_status); | 471 | set_bit(0, &atchan->error_status); |
472 | } | 472 | } |
473 | tasklet_schedule(&atchan->tasklet); | 473 | tasklet_schedule(&atchan->tasklet); |
474 | ret = IRQ_HANDLED; | 474 | ret = IRQ_HANDLED; |
475 | } | 475 | } |
476 | } | 476 | } |
477 | 477 | ||
478 | } while (pending); | 478 | } while (pending); |
479 | 479 | ||
480 | return ret; | 480 | return ret; |
481 | } | 481 | } |
482 | 482 | ||
483 | 483 | ||
484 | /*-- DMA Engine API --------------------------------------------------*/ | 484 | /*-- DMA Engine API --------------------------------------------------*/ |
485 | 485 | ||
486 | /** | 486 | /** |
487 | * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine | 487 | * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine |
488 | * @desc: descriptor at the head of the transaction chain | 488 | * @desc: descriptor at the head of the transaction chain |
489 | * | 489 | * |
490 | * Queue chain if DMA engine is working already | 490 | * Queue chain if DMA engine is working already |
491 | * | 491 | * |
492 | * Cookie increment and adding to active_list or queue must be atomic | 492 | * Cookie increment and adding to active_list or queue must be atomic |
493 | */ | 493 | */ |
494 | static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx) | 494 | static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx) |
495 | { | 495 | { |
496 | struct at_desc *desc = txd_to_at_desc(tx); | 496 | struct at_desc *desc = txd_to_at_desc(tx); |
497 | struct at_dma_chan *atchan = to_at_dma_chan(tx->chan); | 497 | struct at_dma_chan *atchan = to_at_dma_chan(tx->chan); |
498 | dma_cookie_t cookie; | 498 | dma_cookie_t cookie; |
499 | 499 | ||
500 | spin_lock_bh(&atchan->lock); | 500 | spin_lock_bh(&atchan->lock); |
501 | cookie = atc_assign_cookie(atchan, desc); | 501 | cookie = atc_assign_cookie(atchan, desc); |
502 | 502 | ||
503 | if (list_empty(&atchan->active_list)) { | 503 | if (list_empty(&atchan->active_list)) { |
504 | dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", | 504 | dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", |
505 | desc->txd.cookie); | 505 | desc->txd.cookie); |
506 | atc_dostart(atchan, desc); | 506 | atc_dostart(atchan, desc); |
507 | list_add_tail(&desc->desc_node, &atchan->active_list); | 507 | list_add_tail(&desc->desc_node, &atchan->active_list); |
508 | } else { | 508 | } else { |
509 | dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", | 509 | dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", |
510 | desc->txd.cookie); | 510 | desc->txd.cookie); |
511 | list_add_tail(&desc->desc_node, &atchan->queue); | 511 | list_add_tail(&desc->desc_node, &atchan->queue); |
512 | } | 512 | } |
513 | 513 | ||
514 | spin_unlock_bh(&atchan->lock); | 514 | spin_unlock_bh(&atchan->lock); |
515 | 515 | ||
516 | return cookie; | 516 | return cookie; |
517 | } | 517 | } |
518 | 518 | ||
519 | /** | 519 | /** |
520 | * atc_prep_dma_memcpy - prepare a memcpy operation | 520 | * atc_prep_dma_memcpy - prepare a memcpy operation |
521 | * @chan: the channel to prepare operation on | 521 | * @chan: the channel to prepare operation on |
522 | * @dest: operation virtual destination address | 522 | * @dest: operation virtual destination address |
523 | * @src: operation virtual source address | 523 | * @src: operation virtual source address |
524 | * @len: operation length | 524 | * @len: operation length |
525 | * @flags: tx descriptor status flags | 525 | * @flags: tx descriptor status flags |
526 | */ | 526 | */ |
527 | static struct dma_async_tx_descriptor * | 527 | static struct dma_async_tx_descriptor * |
528 | atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | 528 | atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, |
529 | size_t len, unsigned long flags) | 529 | size_t len, unsigned long flags) |
530 | { | 530 | { |
531 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | 531 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
532 | struct at_desc *desc = NULL; | 532 | struct at_desc *desc = NULL; |
533 | struct at_desc *first = NULL; | 533 | struct at_desc *first = NULL; |
534 | struct at_desc *prev = NULL; | 534 | struct at_desc *prev = NULL; |
535 | size_t xfer_count; | 535 | size_t xfer_count; |
536 | size_t offset; | 536 | size_t offset; |
537 | unsigned int src_width; | 537 | unsigned int src_width; |
538 | unsigned int dst_width; | 538 | unsigned int dst_width; |
539 | u32 ctrla; | 539 | u32 ctrla; |
540 | u32 ctrlb; | 540 | u32 ctrlb; |
541 | 541 | ||
542 | dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n", | 542 | dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n", |
543 | dest, src, len, flags); | 543 | dest, src, len, flags); |
544 | 544 | ||
545 | if (unlikely(!len)) { | 545 | if (unlikely(!len)) { |
546 | dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); | 546 | dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); |
547 | return NULL; | 547 | return NULL; |
548 | } | 548 | } |
549 | 549 | ||
550 | ctrla = ATC_DEFAULT_CTRLA; | 550 | ctrla = ATC_DEFAULT_CTRLA; |
551 | ctrlb = ATC_DEFAULT_CTRLB | 551 | ctrlb = ATC_DEFAULT_CTRLB |
552 | | ATC_SRC_ADDR_MODE_INCR | 552 | | ATC_SRC_ADDR_MODE_INCR |
553 | | ATC_DST_ADDR_MODE_INCR | 553 | | ATC_DST_ADDR_MODE_INCR |
554 | | ATC_FC_MEM2MEM; | 554 | | ATC_FC_MEM2MEM; |
555 | 555 | ||
556 | /* | 556 | /* |
557 | * We can be a lot more clever here, but this should take care | 557 | * We can be a lot more clever here, but this should take care |
558 | * of the most common optimization. | 558 | * of the most common optimization. |
559 | */ | 559 | */ |
560 | if (!((src | dest | len) & 3)) { | 560 | if (!((src | dest | len) & 3)) { |
561 | ctrla |= ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD; | 561 | ctrla |= ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD; |
562 | src_width = dst_width = 2; | 562 | src_width = dst_width = 2; |
563 | } else if (!((src | dest | len) & 1)) { | 563 | } else if (!((src | dest | len) & 1)) { |
564 | ctrla |= ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD; | 564 | ctrla |= ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD; |
565 | src_width = dst_width = 1; | 565 | src_width = dst_width = 1; |
566 | } else { | 566 | } else { |
567 | ctrla |= ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE; | 567 | ctrla |= ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE; |
568 | src_width = dst_width = 0; | 568 | src_width = dst_width = 0; |
569 | } | 569 | } |
570 | 570 | ||
571 | for (offset = 0; offset < len; offset += xfer_count << src_width) { | 571 | for (offset = 0; offset < len; offset += xfer_count << src_width) { |
572 | xfer_count = min_t(size_t, (len - offset) >> src_width, | 572 | xfer_count = min_t(size_t, (len - offset) >> src_width, |
573 | ATC_BTSIZE_MAX); | 573 | ATC_BTSIZE_MAX); |
574 | 574 | ||
575 | desc = atc_desc_get(atchan); | 575 | desc = atc_desc_get(atchan); |
576 | if (!desc) | 576 | if (!desc) |
577 | goto err_desc_get; | 577 | goto err_desc_get; |
578 | 578 | ||
579 | desc->lli.saddr = src + offset; | 579 | desc->lli.saddr = src + offset; |
580 | desc->lli.daddr = dest + offset; | 580 | desc->lli.daddr = dest + offset; |
581 | desc->lli.ctrla = ctrla | xfer_count; | 581 | desc->lli.ctrla = ctrla | xfer_count; |
582 | desc->lli.ctrlb = ctrlb; | 582 | desc->lli.ctrlb = ctrlb; |
583 | 583 | ||
584 | desc->txd.cookie = 0; | 584 | desc->txd.cookie = 0; |
585 | async_tx_ack(&desc->txd); | 585 | async_tx_ack(&desc->txd); |
586 | 586 | ||
587 | if (!first) { | 587 | if (!first) { |
588 | first = desc; | 588 | first = desc; |
589 | } else { | 589 | } else { |
590 | /* inform the HW lli about chaining */ | 590 | /* inform the HW lli about chaining */ |
591 | prev->lli.dscr = desc->txd.phys; | 591 | prev->lli.dscr = desc->txd.phys; |
592 | /* insert the link descriptor to the LD ring */ | 592 | /* insert the link descriptor to the LD ring */ |
593 | list_add_tail(&desc->desc_node, | 593 | list_add_tail(&desc->desc_node, |
594 | &first->tx_list); | 594 | &first->tx_list); |
595 | } | 595 | } |
596 | prev = desc; | 596 | prev = desc; |
597 | } | 597 | } |
598 | 598 | ||
599 | /* First descriptor of the chain embedds additional information */ | 599 | /* First descriptor of the chain embedds additional information */ |
600 | first->txd.cookie = -EBUSY; | 600 | first->txd.cookie = -EBUSY; |
601 | first->len = len; | 601 | first->len = len; |
602 | 602 | ||
603 | /* set end-of-link to the last link descriptor of list*/ | 603 | /* set end-of-link to the last link descriptor of list*/ |
604 | set_desc_eol(desc); | 604 | set_desc_eol(desc); |
605 | 605 | ||
606 | desc->txd.flags = flags; /* client is in control of this ack */ | 606 | desc->txd.flags = flags; /* client is in control of this ack */ |
607 | 607 | ||
608 | return &first->txd; | 608 | return &first->txd; |
609 | 609 | ||
610 | err_desc_get: | 610 | err_desc_get: |
611 | atc_desc_put(atchan, first); | 611 | atc_desc_put(atchan, first); |
612 | return NULL; | 612 | return NULL; |
613 | } | 613 | } |
614 | 614 | ||
615 | 615 | ||
616 | /** | 616 | /** |
617 | * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction | 617 | * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction |
618 | * @chan: DMA channel | 618 | * @chan: DMA channel |
619 | * @sgl: scatterlist to transfer to/from | 619 | * @sgl: scatterlist to transfer to/from |
620 | * @sg_len: number of entries in @scatterlist | 620 | * @sg_len: number of entries in @scatterlist |
621 | * @direction: DMA direction | 621 | * @direction: DMA direction |
622 | * @flags: tx descriptor status flags | 622 | * @flags: tx descriptor status flags |
623 | */ | 623 | */ |
624 | static struct dma_async_tx_descriptor * | 624 | static struct dma_async_tx_descriptor * |
625 | atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | 625 | atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
626 | unsigned int sg_len, enum dma_data_direction direction, | 626 | unsigned int sg_len, enum dma_data_direction direction, |
627 | unsigned long flags) | 627 | unsigned long flags) |
628 | { | 628 | { |
629 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | 629 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
630 | struct at_dma_slave *atslave = chan->private; | 630 | struct at_dma_slave *atslave = chan->private; |
631 | struct at_desc *first = NULL; | 631 | struct at_desc *first = NULL; |
632 | struct at_desc *prev = NULL; | 632 | struct at_desc *prev = NULL; |
633 | u32 ctrla; | 633 | u32 ctrla; |
634 | u32 ctrlb; | 634 | u32 ctrlb; |
635 | dma_addr_t reg; | 635 | dma_addr_t reg; |
636 | unsigned int reg_width; | 636 | unsigned int reg_width; |
637 | unsigned int mem_width; | 637 | unsigned int mem_width; |
638 | unsigned int i; | 638 | unsigned int i; |
639 | struct scatterlist *sg; | 639 | struct scatterlist *sg; |
640 | size_t total_len = 0; | 640 | size_t total_len = 0; |
641 | 641 | ||
642 | dev_vdbg(chan2dev(chan), "prep_slave_sg: %s f0x%lx\n", | 642 | dev_vdbg(chan2dev(chan), "prep_slave_sg: %s f0x%lx\n", |
643 | direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE", | 643 | direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE", |
644 | flags); | 644 | flags); |
645 | 645 | ||
646 | if (unlikely(!atslave || !sg_len)) { | 646 | if (unlikely(!atslave || !sg_len)) { |
647 | dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); | 647 | dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); |
648 | return NULL; | 648 | return NULL; |
649 | } | 649 | } |
650 | 650 | ||
651 | reg_width = atslave->reg_width; | 651 | reg_width = atslave->reg_width; |
652 | 652 | ||
653 | ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla; | 653 | ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla; |
654 | ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN; | 654 | ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN; |
655 | 655 | ||
656 | switch (direction) { | 656 | switch (direction) { |
657 | case DMA_TO_DEVICE: | 657 | case DMA_TO_DEVICE: |
658 | ctrla |= ATC_DST_WIDTH(reg_width); | 658 | ctrla |= ATC_DST_WIDTH(reg_width); |
659 | ctrlb |= ATC_DST_ADDR_MODE_FIXED | 659 | ctrlb |= ATC_DST_ADDR_MODE_FIXED |
660 | | ATC_SRC_ADDR_MODE_INCR | 660 | | ATC_SRC_ADDR_MODE_INCR |
661 | | ATC_FC_MEM2PER; | 661 | | ATC_FC_MEM2PER; |
662 | reg = atslave->tx_reg; | 662 | reg = atslave->tx_reg; |
663 | for_each_sg(sgl, sg, sg_len, i) { | 663 | for_each_sg(sgl, sg, sg_len, i) { |
664 | struct at_desc *desc; | 664 | struct at_desc *desc; |
665 | u32 len; | 665 | u32 len; |
666 | u32 mem; | 666 | u32 mem; |
667 | 667 | ||
668 | desc = atc_desc_get(atchan); | 668 | desc = atc_desc_get(atchan); |
669 | if (!desc) | 669 | if (!desc) |
670 | goto err_desc_get; | 670 | goto err_desc_get; |
671 | 671 | ||
672 | mem = sg_phys(sg); | 672 | mem = sg_phys(sg); |
673 | len = sg_dma_len(sg); | 673 | len = sg_dma_len(sg); |
674 | mem_width = 2; | 674 | mem_width = 2; |
675 | if (unlikely(mem & 3 || len & 3)) | 675 | if (unlikely(mem & 3 || len & 3)) |
676 | mem_width = 0; | 676 | mem_width = 0; |
677 | 677 | ||
678 | desc->lli.saddr = mem; | 678 | desc->lli.saddr = mem; |
679 | desc->lli.daddr = reg; | 679 | desc->lli.daddr = reg; |
680 | desc->lli.ctrla = ctrla | 680 | desc->lli.ctrla = ctrla |
681 | | ATC_SRC_WIDTH(mem_width) | 681 | | ATC_SRC_WIDTH(mem_width) |
682 | | len >> mem_width; | 682 | | len >> mem_width; |
683 | desc->lli.ctrlb = ctrlb; | 683 | desc->lli.ctrlb = ctrlb; |
684 | 684 | ||
685 | if (!first) { | 685 | if (!first) { |
686 | first = desc; | 686 | first = desc; |
687 | } else { | 687 | } else { |
688 | /* inform the HW lli about chaining */ | 688 | /* inform the HW lli about chaining */ |
689 | prev->lli.dscr = desc->txd.phys; | 689 | prev->lli.dscr = desc->txd.phys; |
690 | /* insert the link descriptor to the LD ring */ | 690 | /* insert the link descriptor to the LD ring */ |
691 | list_add_tail(&desc->desc_node, | 691 | list_add_tail(&desc->desc_node, |
692 | &first->tx_list); | 692 | &first->tx_list); |
693 | } | 693 | } |
694 | prev = desc; | 694 | prev = desc; |
695 | total_len += len; | 695 | total_len += len; |
696 | } | 696 | } |
697 | break; | 697 | break; |
698 | case DMA_FROM_DEVICE: | 698 | case DMA_FROM_DEVICE: |
699 | ctrla |= ATC_SRC_WIDTH(reg_width); | 699 | ctrla |= ATC_SRC_WIDTH(reg_width); |
700 | ctrlb |= ATC_DST_ADDR_MODE_INCR | 700 | ctrlb |= ATC_DST_ADDR_MODE_INCR |
701 | | ATC_SRC_ADDR_MODE_FIXED | 701 | | ATC_SRC_ADDR_MODE_FIXED |
702 | | ATC_FC_PER2MEM; | 702 | | ATC_FC_PER2MEM; |
703 | 703 | ||
704 | reg = atslave->rx_reg; | 704 | reg = atslave->rx_reg; |
705 | for_each_sg(sgl, sg, sg_len, i) { | 705 | for_each_sg(sgl, sg, sg_len, i) { |
706 | struct at_desc *desc; | 706 | struct at_desc *desc; |
707 | u32 len; | 707 | u32 len; |
708 | u32 mem; | 708 | u32 mem; |
709 | 709 | ||
710 | desc = atc_desc_get(atchan); | 710 | desc = atc_desc_get(atchan); |
711 | if (!desc) | 711 | if (!desc) |
712 | goto err_desc_get; | 712 | goto err_desc_get; |
713 | 713 | ||
714 | mem = sg_phys(sg); | 714 | mem = sg_phys(sg); |
715 | len = sg_dma_len(sg); | 715 | len = sg_dma_len(sg); |
716 | mem_width = 2; | 716 | mem_width = 2; |
717 | if (unlikely(mem & 3 || len & 3)) | 717 | if (unlikely(mem & 3 || len & 3)) |
718 | mem_width = 0; | 718 | mem_width = 0; |
719 | 719 | ||
720 | desc->lli.saddr = reg; | 720 | desc->lli.saddr = reg; |
721 | desc->lli.daddr = mem; | 721 | desc->lli.daddr = mem; |
722 | desc->lli.ctrla = ctrla | 722 | desc->lli.ctrla = ctrla |
723 | | ATC_DST_WIDTH(mem_width) | 723 | | ATC_DST_WIDTH(mem_width) |
724 | | len >> mem_width; | 724 | | len >> mem_width; |
725 | desc->lli.ctrlb = ctrlb; | 725 | desc->lli.ctrlb = ctrlb; |
726 | 726 | ||
727 | if (!first) { | 727 | if (!first) { |
728 | first = desc; | 728 | first = desc; |
729 | } else { | 729 | } else { |
730 | /* inform the HW lli about chaining */ | 730 | /* inform the HW lli about chaining */ |
731 | prev->lli.dscr = desc->txd.phys; | 731 | prev->lli.dscr = desc->txd.phys; |
732 | /* insert the link descriptor to the LD ring */ | 732 | /* insert the link descriptor to the LD ring */ |
733 | list_add_tail(&desc->desc_node, | 733 | list_add_tail(&desc->desc_node, |
734 | &first->tx_list); | 734 | &first->tx_list); |
735 | } | 735 | } |
736 | prev = desc; | 736 | prev = desc; |
737 | total_len += len; | 737 | total_len += len; |
738 | } | 738 | } |
739 | break; | 739 | break; |
740 | default: | 740 | default: |
741 | return NULL; | 741 | return NULL; |
742 | } | 742 | } |
743 | 743 | ||
744 | /* set end-of-link to the last link descriptor of list*/ | 744 | /* set end-of-link to the last link descriptor of list*/ |
745 | set_desc_eol(prev); | 745 | set_desc_eol(prev); |
746 | 746 | ||
747 | /* First descriptor of the chain embedds additional information */ | 747 | /* First descriptor of the chain embedds additional information */ |
748 | first->txd.cookie = -EBUSY; | 748 | first->txd.cookie = -EBUSY; |
749 | first->len = total_len; | 749 | first->len = total_len; |
750 | 750 | ||
751 | /* last link descriptor of list is responsible of flags */ | 751 | /* last link descriptor of list is responsible of flags */ |
752 | prev->txd.flags = flags; /* client is in control of this ack */ | 752 | prev->txd.flags = flags; /* client is in control of this ack */ |
753 | 753 | ||
754 | return &first->txd; | 754 | return &first->txd; |
755 | 755 | ||
756 | err_desc_get: | 756 | err_desc_get: |
757 | dev_err(chan2dev(chan), "not enough descriptors available\n"); | 757 | dev_err(chan2dev(chan), "not enough descriptors available\n"); |
758 | atc_desc_put(atchan, first); | 758 | atc_desc_put(atchan, first); |
759 | return NULL; | 759 | return NULL; |
760 | } | 760 | } |
761 | 761 | ||
762 | static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd) | 762 | static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
763 | unsigned long arg) | ||
763 | { | 764 | { |
764 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | 765 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
765 | struct at_dma *atdma = to_at_dma(chan->device); | 766 | struct at_dma *atdma = to_at_dma(chan->device); |
766 | struct at_desc *desc, *_desc; | 767 | struct at_desc *desc, *_desc; |
767 | LIST_HEAD(list); | 768 | LIST_HEAD(list); |
768 | 769 | ||
769 | /* Only supports DMA_TERMINATE_ALL */ | 770 | /* Only supports DMA_TERMINATE_ALL */ |
770 | if (cmd != DMA_TERMINATE_ALL) | 771 | if (cmd != DMA_TERMINATE_ALL) |
771 | return -ENXIO; | 772 | return -ENXIO; |
772 | 773 | ||
773 | /* | 774 | /* |
774 | * This is only called when something went wrong elsewhere, so | 775 | * This is only called when something went wrong elsewhere, so |
775 | * we don't really care about the data. Just disable the | 776 | * we don't really care about the data. Just disable the |
776 | * channel. We still have to poll the channel enable bit due | 777 | * channel. We still have to poll the channel enable bit due |
777 | * to AHB/HSB limitations. | 778 | * to AHB/HSB limitations. |
778 | */ | 779 | */ |
779 | spin_lock_bh(&atchan->lock); | 780 | spin_lock_bh(&atchan->lock); |
780 | 781 | ||
781 | dma_writel(atdma, CHDR, atchan->mask); | 782 | dma_writel(atdma, CHDR, atchan->mask); |
782 | 783 | ||
783 | /* confirm that this channel is disabled */ | 784 | /* confirm that this channel is disabled */ |
784 | while (dma_readl(atdma, CHSR) & atchan->mask) | 785 | while (dma_readl(atdma, CHSR) & atchan->mask) |
785 | cpu_relax(); | 786 | cpu_relax(); |
786 | 787 | ||
787 | /* active_list entries will end up before queued entries */ | 788 | /* active_list entries will end up before queued entries */ |
788 | list_splice_init(&atchan->queue, &list); | 789 | list_splice_init(&atchan->queue, &list); |
789 | list_splice_init(&atchan->active_list, &list); | 790 | list_splice_init(&atchan->active_list, &list); |
790 | 791 | ||
791 | spin_unlock_bh(&atchan->lock); | 792 | spin_unlock_bh(&atchan->lock); |
792 | 793 | ||
793 | /* Flush all pending and queued descriptors */ | 794 | /* Flush all pending and queued descriptors */ |
794 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | 795 | list_for_each_entry_safe(desc, _desc, &list, desc_node) |
795 | atc_chain_complete(atchan, desc); | 796 | atc_chain_complete(atchan, desc); |
796 | 797 | ||
797 | return 0; | 798 | return 0; |
798 | } | 799 | } |
799 | 800 | ||
800 | /** | 801 | /** |
801 | * atc_tx_status - poll for transaction completion | 802 | * atc_tx_status - poll for transaction completion |
802 | * @chan: DMA channel | 803 | * @chan: DMA channel |
803 | * @cookie: transaction identifier to check status of | 804 | * @cookie: transaction identifier to check status of |
804 | * @txstate: if not %NULL updated with transaction state | 805 | * @txstate: if not %NULL updated with transaction state |
805 | * | 806 | * |
806 | * If @txstate is passed in, upon return it reflect the driver | 807 | * If @txstate is passed in, upon return it reflect the driver |
807 | * internal state and can be used with dma_async_is_complete() to check | 808 | * internal state and can be used with dma_async_is_complete() to check |
808 | * the status of multiple cookies without re-checking hardware state. | 809 | * the status of multiple cookies without re-checking hardware state. |
809 | */ | 810 | */ |
810 | static enum dma_status | 811 | static enum dma_status |
811 | atc_tx_status(struct dma_chan *chan, | 812 | atc_tx_status(struct dma_chan *chan, |
812 | dma_cookie_t cookie, | 813 | dma_cookie_t cookie, |
813 | struct dma_tx_state *txstate) | 814 | struct dma_tx_state *txstate) |
814 | { | 815 | { |
815 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | 816 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
816 | dma_cookie_t last_used; | 817 | dma_cookie_t last_used; |
817 | dma_cookie_t last_complete; | 818 | dma_cookie_t last_complete; |
818 | enum dma_status ret; | 819 | enum dma_status ret; |
819 | 820 | ||
820 | spin_lock_bh(&atchan->lock); | 821 | spin_lock_bh(&atchan->lock); |
821 | 822 | ||
822 | last_complete = atchan->completed_cookie; | 823 | last_complete = atchan->completed_cookie; |
823 | last_used = chan->cookie; | 824 | last_used = chan->cookie; |
824 | 825 | ||
825 | ret = dma_async_is_complete(cookie, last_complete, last_used); | 826 | ret = dma_async_is_complete(cookie, last_complete, last_used); |
826 | if (ret != DMA_SUCCESS) { | 827 | if (ret != DMA_SUCCESS) { |
827 | atc_cleanup_descriptors(atchan); | 828 | atc_cleanup_descriptors(atchan); |
828 | 829 | ||
829 | last_complete = atchan->completed_cookie; | 830 | last_complete = atchan->completed_cookie; |
830 | last_used = chan->cookie; | 831 | last_used = chan->cookie; |
831 | 832 | ||
832 | ret = dma_async_is_complete(cookie, last_complete, last_used); | 833 | ret = dma_async_is_complete(cookie, last_complete, last_used); |
833 | } | 834 | } |
834 | 835 | ||
835 | spin_unlock_bh(&atchan->lock); | 836 | spin_unlock_bh(&atchan->lock); |
836 | 837 | ||
837 | dma_set_tx_state(txstate, last_complete, last_used, 0); | 838 | dma_set_tx_state(txstate, last_complete, last_used, 0); |
838 | dev_vdbg(chan2dev(chan), "tx_status: %d (d%d, u%d)\n", | 839 | dev_vdbg(chan2dev(chan), "tx_status: %d (d%d, u%d)\n", |
839 | cookie, last_complete ? last_complete : 0, | 840 | cookie, last_complete ? last_complete : 0, |
840 | last_used ? last_used : 0); | 841 | last_used ? last_used : 0); |
841 | 842 | ||
842 | return ret; | 843 | return ret; |
843 | } | 844 | } |
844 | 845 | ||
845 | /** | 846 | /** |
846 | * atc_issue_pending - try to finish work | 847 | * atc_issue_pending - try to finish work |
847 | * @chan: target DMA channel | 848 | * @chan: target DMA channel |
848 | */ | 849 | */ |
849 | static void atc_issue_pending(struct dma_chan *chan) | 850 | static void atc_issue_pending(struct dma_chan *chan) |
850 | { | 851 | { |
851 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | 852 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
852 | 853 | ||
853 | dev_vdbg(chan2dev(chan), "issue_pending\n"); | 854 | dev_vdbg(chan2dev(chan), "issue_pending\n"); |
854 | 855 | ||
855 | if (!atc_chan_is_enabled(atchan)) { | 856 | if (!atc_chan_is_enabled(atchan)) { |
856 | spin_lock_bh(&atchan->lock); | 857 | spin_lock_bh(&atchan->lock); |
857 | atc_advance_work(atchan); | 858 | atc_advance_work(atchan); |
858 | spin_unlock_bh(&atchan->lock); | 859 | spin_unlock_bh(&atchan->lock); |
859 | } | 860 | } |
860 | } | 861 | } |
861 | 862 | ||
862 | /** | 863 | /** |
863 | * atc_alloc_chan_resources - allocate resources for DMA channel | 864 | * atc_alloc_chan_resources - allocate resources for DMA channel |
864 | * @chan: allocate descriptor resources for this channel | 865 | * @chan: allocate descriptor resources for this channel |
865 | * @client: current client requesting the channel be ready for requests | 866 | * @client: current client requesting the channel be ready for requests |
866 | * | 867 | * |
867 | * return - the number of allocated descriptors | 868 | * return - the number of allocated descriptors |
868 | */ | 869 | */ |
869 | static int atc_alloc_chan_resources(struct dma_chan *chan) | 870 | static int atc_alloc_chan_resources(struct dma_chan *chan) |
870 | { | 871 | { |
871 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | 872 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
872 | struct at_dma *atdma = to_at_dma(chan->device); | 873 | struct at_dma *atdma = to_at_dma(chan->device); |
873 | struct at_desc *desc; | 874 | struct at_desc *desc; |
874 | struct at_dma_slave *atslave; | 875 | struct at_dma_slave *atslave; |
875 | int i; | 876 | int i; |
876 | u32 cfg; | 877 | u32 cfg; |
877 | LIST_HEAD(tmp_list); | 878 | LIST_HEAD(tmp_list); |
878 | 879 | ||
879 | dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); | 880 | dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); |
880 | 881 | ||
881 | /* ASSERT: channel is idle */ | 882 | /* ASSERT: channel is idle */ |
882 | if (atc_chan_is_enabled(atchan)) { | 883 | if (atc_chan_is_enabled(atchan)) { |
883 | dev_dbg(chan2dev(chan), "DMA channel not idle ?\n"); | 884 | dev_dbg(chan2dev(chan), "DMA channel not idle ?\n"); |
884 | return -EIO; | 885 | return -EIO; |
885 | } | 886 | } |
886 | 887 | ||
887 | cfg = ATC_DEFAULT_CFG; | 888 | cfg = ATC_DEFAULT_CFG; |
888 | 889 | ||
889 | atslave = chan->private; | 890 | atslave = chan->private; |
890 | if (atslave) { | 891 | if (atslave) { |
891 | /* | 892 | /* |
892 | * We need controller-specific data to set up slave | 893 | * We need controller-specific data to set up slave |
893 | * transfers. | 894 | * transfers. |
894 | */ | 895 | */ |
895 | BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev); | 896 | BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev); |
896 | 897 | ||
897 | /* if cfg configuration specified take it instad of default */ | 898 | /* if cfg configuration specified take it instad of default */ |
898 | if (atslave->cfg) | 899 | if (atslave->cfg) |
899 | cfg = atslave->cfg; | 900 | cfg = atslave->cfg; |
900 | } | 901 | } |
901 | 902 | ||
902 | /* have we already been set up? | 903 | /* have we already been set up? |
903 | * reconfigure channel but no need to reallocate descriptors */ | 904 | * reconfigure channel but no need to reallocate descriptors */ |
904 | if (!list_empty(&atchan->free_list)) | 905 | if (!list_empty(&atchan->free_list)) |
905 | return atchan->descs_allocated; | 906 | return atchan->descs_allocated; |
906 | 907 | ||
907 | /* Allocate initial pool of descriptors */ | 908 | /* Allocate initial pool of descriptors */ |
908 | for (i = 0; i < init_nr_desc_per_channel; i++) { | 909 | for (i = 0; i < init_nr_desc_per_channel; i++) { |
909 | desc = atc_alloc_descriptor(chan, GFP_KERNEL); | 910 | desc = atc_alloc_descriptor(chan, GFP_KERNEL); |
910 | if (!desc) { | 911 | if (!desc) { |
911 | dev_err(atdma->dma_common.dev, | 912 | dev_err(atdma->dma_common.dev, |
912 | "Only %d initial descriptors\n", i); | 913 | "Only %d initial descriptors\n", i); |
913 | break; | 914 | break; |
914 | } | 915 | } |
915 | list_add_tail(&desc->desc_node, &tmp_list); | 916 | list_add_tail(&desc->desc_node, &tmp_list); |
916 | } | 917 | } |
917 | 918 | ||
918 | spin_lock_bh(&atchan->lock); | 919 | spin_lock_bh(&atchan->lock); |
919 | atchan->descs_allocated = i; | 920 | atchan->descs_allocated = i; |
920 | list_splice(&tmp_list, &atchan->free_list); | 921 | list_splice(&tmp_list, &atchan->free_list); |
921 | atchan->completed_cookie = chan->cookie = 1; | 922 | atchan->completed_cookie = chan->cookie = 1; |
922 | spin_unlock_bh(&atchan->lock); | 923 | spin_unlock_bh(&atchan->lock); |
923 | 924 | ||
924 | /* channel parameters */ | 925 | /* channel parameters */ |
925 | channel_writel(atchan, CFG, cfg); | 926 | channel_writel(atchan, CFG, cfg); |
926 | 927 | ||
927 | dev_dbg(chan2dev(chan), | 928 | dev_dbg(chan2dev(chan), |
928 | "alloc_chan_resources: allocated %d descriptors\n", | 929 | "alloc_chan_resources: allocated %d descriptors\n", |
929 | atchan->descs_allocated); | 930 | atchan->descs_allocated); |
930 | 931 | ||
931 | return atchan->descs_allocated; | 932 | return atchan->descs_allocated; |
932 | } | 933 | } |
933 | 934 | ||
934 | /** | 935 | /** |
935 | * atc_free_chan_resources - free all channel resources | 936 | * atc_free_chan_resources - free all channel resources |
936 | * @chan: DMA channel | 937 | * @chan: DMA channel |
937 | */ | 938 | */ |
938 | static void atc_free_chan_resources(struct dma_chan *chan) | 939 | static void atc_free_chan_resources(struct dma_chan *chan) |
939 | { | 940 | { |
940 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | 941 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
941 | struct at_dma *atdma = to_at_dma(chan->device); | 942 | struct at_dma *atdma = to_at_dma(chan->device); |
942 | struct at_desc *desc, *_desc; | 943 | struct at_desc *desc, *_desc; |
943 | LIST_HEAD(list); | 944 | LIST_HEAD(list); |
944 | 945 | ||
945 | dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n", | 946 | dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n", |
946 | atchan->descs_allocated); | 947 | atchan->descs_allocated); |
947 | 948 | ||
948 | /* ASSERT: channel is idle */ | 949 | /* ASSERT: channel is idle */ |
949 | BUG_ON(!list_empty(&atchan->active_list)); | 950 | BUG_ON(!list_empty(&atchan->active_list)); |
950 | BUG_ON(!list_empty(&atchan->queue)); | 951 | BUG_ON(!list_empty(&atchan->queue)); |
951 | BUG_ON(atc_chan_is_enabled(atchan)); | 952 | BUG_ON(atc_chan_is_enabled(atchan)); |
952 | 953 | ||
953 | list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { | 954 | list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { |
954 | dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); | 955 | dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); |
955 | list_del(&desc->desc_node); | 956 | list_del(&desc->desc_node); |
956 | /* free link descriptor */ | 957 | /* free link descriptor */ |
957 | dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys); | 958 | dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys); |
958 | } | 959 | } |
959 | list_splice_init(&atchan->free_list, &list); | 960 | list_splice_init(&atchan->free_list, &list); |
960 | atchan->descs_allocated = 0; | 961 | atchan->descs_allocated = 0; |
961 | 962 | ||
962 | dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); | 963 | dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); |
963 | } | 964 | } |
964 | 965 | ||
965 | 966 | ||
966 | /*-- Module Management -----------------------------------------------*/ | 967 | /*-- Module Management -----------------------------------------------*/ |
967 | 968 | ||
968 | /** | 969 | /** |
969 | * at_dma_off - disable DMA controller | 970 | * at_dma_off - disable DMA controller |
970 | * @atdma: the Atmel HDAMC device | 971 | * @atdma: the Atmel HDAMC device |
971 | */ | 972 | */ |
972 | static void at_dma_off(struct at_dma *atdma) | 973 | static void at_dma_off(struct at_dma *atdma) |
973 | { | 974 | { |
974 | dma_writel(atdma, EN, 0); | 975 | dma_writel(atdma, EN, 0); |
975 | 976 | ||
976 | /* disable all interrupts */ | 977 | /* disable all interrupts */ |
977 | dma_writel(atdma, EBCIDR, -1L); | 978 | dma_writel(atdma, EBCIDR, -1L); |
978 | 979 | ||
979 | /* confirm that all channels are disabled */ | 980 | /* confirm that all channels are disabled */ |
980 | while (dma_readl(atdma, CHSR) & atdma->all_chan_mask) | 981 | while (dma_readl(atdma, CHSR) & atdma->all_chan_mask) |
981 | cpu_relax(); | 982 | cpu_relax(); |
982 | } | 983 | } |
983 | 984 | ||
984 | static int __init at_dma_probe(struct platform_device *pdev) | 985 | static int __init at_dma_probe(struct platform_device *pdev) |
985 | { | 986 | { |
986 | struct at_dma_platform_data *pdata; | 987 | struct at_dma_platform_data *pdata; |
987 | struct resource *io; | 988 | struct resource *io; |
988 | struct at_dma *atdma; | 989 | struct at_dma *atdma; |
989 | size_t size; | 990 | size_t size; |
990 | int irq; | 991 | int irq; |
991 | int err; | 992 | int err; |
992 | int i; | 993 | int i; |
993 | 994 | ||
994 | /* get DMA Controller parameters from platform */ | 995 | /* get DMA Controller parameters from platform */ |
995 | pdata = pdev->dev.platform_data; | 996 | pdata = pdev->dev.platform_data; |
996 | if (!pdata || pdata->nr_channels > AT_DMA_MAX_NR_CHANNELS) | 997 | if (!pdata || pdata->nr_channels > AT_DMA_MAX_NR_CHANNELS) |
997 | return -EINVAL; | 998 | return -EINVAL; |
998 | 999 | ||
999 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1000 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1000 | if (!io) | 1001 | if (!io) |
1001 | return -EINVAL; | 1002 | return -EINVAL; |
1002 | 1003 | ||
1003 | irq = platform_get_irq(pdev, 0); | 1004 | irq = platform_get_irq(pdev, 0); |
1004 | if (irq < 0) | 1005 | if (irq < 0) |
1005 | return irq; | 1006 | return irq; |
1006 | 1007 | ||
1007 | size = sizeof(struct at_dma); | 1008 | size = sizeof(struct at_dma); |
1008 | size += pdata->nr_channels * sizeof(struct at_dma_chan); | 1009 | size += pdata->nr_channels * sizeof(struct at_dma_chan); |
1009 | atdma = kzalloc(size, GFP_KERNEL); | 1010 | atdma = kzalloc(size, GFP_KERNEL); |
1010 | if (!atdma) | 1011 | if (!atdma) |
1011 | return -ENOMEM; | 1012 | return -ENOMEM; |
1012 | 1013 | ||
1013 | /* discover transaction capabilites from the platform data */ | 1014 | /* discover transaction capabilites from the platform data */ |
1014 | atdma->dma_common.cap_mask = pdata->cap_mask; | 1015 | atdma->dma_common.cap_mask = pdata->cap_mask; |
1015 | atdma->all_chan_mask = (1 << pdata->nr_channels) - 1; | 1016 | atdma->all_chan_mask = (1 << pdata->nr_channels) - 1; |
1016 | 1017 | ||
1017 | size = io->end - io->start + 1; | 1018 | size = io->end - io->start + 1; |
1018 | if (!request_mem_region(io->start, size, pdev->dev.driver->name)) { | 1019 | if (!request_mem_region(io->start, size, pdev->dev.driver->name)) { |
1019 | err = -EBUSY; | 1020 | err = -EBUSY; |
1020 | goto err_kfree; | 1021 | goto err_kfree; |
1021 | } | 1022 | } |
1022 | 1023 | ||
1023 | atdma->regs = ioremap(io->start, size); | 1024 | atdma->regs = ioremap(io->start, size); |
1024 | if (!atdma->regs) { | 1025 | if (!atdma->regs) { |
1025 | err = -ENOMEM; | 1026 | err = -ENOMEM; |
1026 | goto err_release_r; | 1027 | goto err_release_r; |
1027 | } | 1028 | } |
1028 | 1029 | ||
1029 | atdma->clk = clk_get(&pdev->dev, "dma_clk"); | 1030 | atdma->clk = clk_get(&pdev->dev, "dma_clk"); |
1030 | if (IS_ERR(atdma->clk)) { | 1031 | if (IS_ERR(atdma->clk)) { |
1031 | err = PTR_ERR(atdma->clk); | 1032 | err = PTR_ERR(atdma->clk); |
1032 | goto err_clk; | 1033 | goto err_clk; |
1033 | } | 1034 | } |
1034 | clk_enable(atdma->clk); | 1035 | clk_enable(atdma->clk); |
1035 | 1036 | ||
1036 | /* force dma off, just in case */ | 1037 | /* force dma off, just in case */ |
1037 | at_dma_off(atdma); | 1038 | at_dma_off(atdma); |
1038 | 1039 | ||
1039 | err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma); | 1040 | err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma); |
1040 | if (err) | 1041 | if (err) |
1041 | goto err_irq; | 1042 | goto err_irq; |
1042 | 1043 | ||
1043 | platform_set_drvdata(pdev, atdma); | 1044 | platform_set_drvdata(pdev, atdma); |
1044 | 1045 | ||
1045 | /* create a pool of consistent memory blocks for hardware descriptors */ | 1046 | /* create a pool of consistent memory blocks for hardware descriptors */ |
1046 | atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool", | 1047 | atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool", |
1047 | &pdev->dev, sizeof(struct at_desc), | 1048 | &pdev->dev, sizeof(struct at_desc), |
1048 | 4 /* word alignment */, 0); | 1049 | 4 /* word alignment */, 0); |
1049 | if (!atdma->dma_desc_pool) { | 1050 | if (!atdma->dma_desc_pool) { |
1050 | dev_err(&pdev->dev, "No memory for descriptors dma pool\n"); | 1051 | dev_err(&pdev->dev, "No memory for descriptors dma pool\n"); |
1051 | err = -ENOMEM; | 1052 | err = -ENOMEM; |
1052 | goto err_pool_create; | 1053 | goto err_pool_create; |
1053 | } | 1054 | } |
1054 | 1055 | ||
1055 | /* clear any pending interrupt */ | 1056 | /* clear any pending interrupt */ |
1056 | while (dma_readl(atdma, EBCISR)) | 1057 | while (dma_readl(atdma, EBCISR)) |
1057 | cpu_relax(); | 1058 | cpu_relax(); |
1058 | 1059 | ||
1059 | /* initialize channels related values */ | 1060 | /* initialize channels related values */ |
1060 | INIT_LIST_HEAD(&atdma->dma_common.channels); | 1061 | INIT_LIST_HEAD(&atdma->dma_common.channels); |
1061 | for (i = 0; i < pdata->nr_channels; i++, atdma->dma_common.chancnt++) { | 1062 | for (i = 0; i < pdata->nr_channels; i++, atdma->dma_common.chancnt++) { |
1062 | struct at_dma_chan *atchan = &atdma->chan[i]; | 1063 | struct at_dma_chan *atchan = &atdma->chan[i]; |
1063 | 1064 | ||
1064 | atchan->chan_common.device = &atdma->dma_common; | 1065 | atchan->chan_common.device = &atdma->dma_common; |
1065 | atchan->chan_common.cookie = atchan->completed_cookie = 1; | 1066 | atchan->chan_common.cookie = atchan->completed_cookie = 1; |
1066 | atchan->chan_common.chan_id = i; | 1067 | atchan->chan_common.chan_id = i; |
1067 | list_add_tail(&atchan->chan_common.device_node, | 1068 | list_add_tail(&atchan->chan_common.device_node, |
1068 | &atdma->dma_common.channels); | 1069 | &atdma->dma_common.channels); |
1069 | 1070 | ||
1070 | atchan->ch_regs = atdma->regs + ch_regs(i); | 1071 | atchan->ch_regs = atdma->regs + ch_regs(i); |
1071 | spin_lock_init(&atchan->lock); | 1072 | spin_lock_init(&atchan->lock); |
1072 | atchan->mask = 1 << i; | 1073 | atchan->mask = 1 << i; |
1073 | 1074 | ||
1074 | INIT_LIST_HEAD(&atchan->active_list); | 1075 | INIT_LIST_HEAD(&atchan->active_list); |
1075 | INIT_LIST_HEAD(&atchan->queue); | 1076 | INIT_LIST_HEAD(&atchan->queue); |
1076 | INIT_LIST_HEAD(&atchan->free_list); | 1077 | INIT_LIST_HEAD(&atchan->free_list); |
1077 | 1078 | ||
1078 | tasklet_init(&atchan->tasklet, atc_tasklet, | 1079 | tasklet_init(&atchan->tasklet, atc_tasklet, |
1079 | (unsigned long)atchan); | 1080 | (unsigned long)atchan); |
1080 | atc_enable_irq(atchan); | 1081 | atc_enable_irq(atchan); |
1081 | } | 1082 | } |
1082 | 1083 | ||
1083 | /* set base routines */ | 1084 | /* set base routines */ |
1084 | atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources; | 1085 | atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources; |
1085 | atdma->dma_common.device_free_chan_resources = atc_free_chan_resources; | 1086 | atdma->dma_common.device_free_chan_resources = atc_free_chan_resources; |
1086 | atdma->dma_common.device_tx_status = atc_tx_status; | 1087 | atdma->dma_common.device_tx_status = atc_tx_status; |
1087 | atdma->dma_common.device_issue_pending = atc_issue_pending; | 1088 | atdma->dma_common.device_issue_pending = atc_issue_pending; |
1088 | atdma->dma_common.dev = &pdev->dev; | 1089 | atdma->dma_common.dev = &pdev->dev; |
1089 | 1090 | ||
1090 | /* set prep routines based on capability */ | 1091 | /* set prep routines based on capability */ |
1091 | if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask)) | 1092 | if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask)) |
1092 | atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy; | 1093 | atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy; |
1093 | 1094 | ||
1094 | if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) { | 1095 | if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) { |
1095 | atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; | 1096 | atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; |
1096 | atdma->dma_common.device_control = atc_control; | 1097 | atdma->dma_common.device_control = atc_control; |
1097 | } | 1098 | } |
1098 | 1099 | ||
1099 | dma_writel(atdma, EN, AT_DMA_ENABLE); | 1100 | dma_writel(atdma, EN, AT_DMA_ENABLE); |
1100 | 1101 | ||
1101 | dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n", | 1102 | dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n", |
1102 | dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "", | 1103 | dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "", |
1103 | dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "", | 1104 | dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "", |
1104 | atdma->dma_common.chancnt); | 1105 | atdma->dma_common.chancnt); |
1105 | 1106 | ||
1106 | dma_async_device_register(&atdma->dma_common); | 1107 | dma_async_device_register(&atdma->dma_common); |
1107 | 1108 | ||
1108 | return 0; | 1109 | return 0; |
1109 | 1110 | ||
1110 | err_pool_create: | 1111 | err_pool_create: |
1111 | platform_set_drvdata(pdev, NULL); | 1112 | platform_set_drvdata(pdev, NULL); |
1112 | free_irq(platform_get_irq(pdev, 0), atdma); | 1113 | free_irq(platform_get_irq(pdev, 0), atdma); |
1113 | err_irq: | 1114 | err_irq: |
1114 | clk_disable(atdma->clk); | 1115 | clk_disable(atdma->clk); |
1115 | clk_put(atdma->clk); | 1116 | clk_put(atdma->clk); |
1116 | err_clk: | 1117 | err_clk: |
1117 | iounmap(atdma->regs); | 1118 | iounmap(atdma->regs); |
1118 | atdma->regs = NULL; | 1119 | atdma->regs = NULL; |
1119 | err_release_r: | 1120 | err_release_r: |
1120 | release_mem_region(io->start, size); | 1121 | release_mem_region(io->start, size); |
1121 | err_kfree: | 1122 | err_kfree: |
1122 | kfree(atdma); | 1123 | kfree(atdma); |
1123 | return err; | 1124 | return err; |
1124 | } | 1125 | } |
1125 | 1126 | ||
1126 | static int __exit at_dma_remove(struct platform_device *pdev) | 1127 | static int __exit at_dma_remove(struct platform_device *pdev) |
1127 | { | 1128 | { |
1128 | struct at_dma *atdma = platform_get_drvdata(pdev); | 1129 | struct at_dma *atdma = platform_get_drvdata(pdev); |
1129 | struct dma_chan *chan, *_chan; | 1130 | struct dma_chan *chan, *_chan; |
1130 | struct resource *io; | 1131 | struct resource *io; |
1131 | 1132 | ||
1132 | at_dma_off(atdma); | 1133 | at_dma_off(atdma); |
1133 | dma_async_device_unregister(&atdma->dma_common); | 1134 | dma_async_device_unregister(&atdma->dma_common); |
1134 | 1135 | ||
1135 | dma_pool_destroy(atdma->dma_desc_pool); | 1136 | dma_pool_destroy(atdma->dma_desc_pool); |
1136 | platform_set_drvdata(pdev, NULL); | 1137 | platform_set_drvdata(pdev, NULL); |
1137 | free_irq(platform_get_irq(pdev, 0), atdma); | 1138 | free_irq(platform_get_irq(pdev, 0), atdma); |
1138 | 1139 | ||
1139 | list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, | 1140 | list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, |
1140 | device_node) { | 1141 | device_node) { |
1141 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | 1142 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
1142 | 1143 | ||
1143 | /* Disable interrupts */ | 1144 | /* Disable interrupts */ |
1144 | atc_disable_irq(atchan); | 1145 | atc_disable_irq(atchan); |
1145 | tasklet_disable(&atchan->tasklet); | 1146 | tasklet_disable(&atchan->tasklet); |
1146 | 1147 | ||
1147 | tasklet_kill(&atchan->tasklet); | 1148 | tasklet_kill(&atchan->tasklet); |
1148 | list_del(&chan->device_node); | 1149 | list_del(&chan->device_node); |
1149 | } | 1150 | } |
1150 | 1151 | ||
1151 | clk_disable(atdma->clk); | 1152 | clk_disable(atdma->clk); |
1152 | clk_put(atdma->clk); | 1153 | clk_put(atdma->clk); |
1153 | 1154 | ||
1154 | iounmap(atdma->regs); | 1155 | iounmap(atdma->regs); |
1155 | atdma->regs = NULL; | 1156 | atdma->regs = NULL; |
1156 | 1157 | ||
1157 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1158 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1158 | release_mem_region(io->start, io->end - io->start + 1); | 1159 | release_mem_region(io->start, io->end - io->start + 1); |
1159 | 1160 | ||
1160 | kfree(atdma); | 1161 | kfree(atdma); |
1161 | 1162 | ||
1162 | return 0; | 1163 | return 0; |
1163 | } | 1164 | } |
1164 | 1165 | ||
1165 | static void at_dma_shutdown(struct platform_device *pdev) | 1166 | static void at_dma_shutdown(struct platform_device *pdev) |
1166 | { | 1167 | { |
1167 | struct at_dma *atdma = platform_get_drvdata(pdev); | 1168 | struct at_dma *atdma = platform_get_drvdata(pdev); |
1168 | 1169 | ||
1169 | at_dma_off(platform_get_drvdata(pdev)); | 1170 | at_dma_off(platform_get_drvdata(pdev)); |
1170 | clk_disable(atdma->clk); | 1171 | clk_disable(atdma->clk); |
1171 | } | 1172 | } |
1172 | 1173 | ||
1173 | static int at_dma_suspend_noirq(struct device *dev) | 1174 | static int at_dma_suspend_noirq(struct device *dev) |
1174 | { | 1175 | { |
1175 | struct platform_device *pdev = to_platform_device(dev); | 1176 | struct platform_device *pdev = to_platform_device(dev); |
1176 | struct at_dma *atdma = platform_get_drvdata(pdev); | 1177 | struct at_dma *atdma = platform_get_drvdata(pdev); |
1177 | 1178 | ||
1178 | at_dma_off(platform_get_drvdata(pdev)); | 1179 | at_dma_off(platform_get_drvdata(pdev)); |
1179 | clk_disable(atdma->clk); | 1180 | clk_disable(atdma->clk); |
1180 | return 0; | 1181 | return 0; |
1181 | } | 1182 | } |
1182 | 1183 | ||
1183 | static int at_dma_resume_noirq(struct device *dev) | 1184 | static int at_dma_resume_noirq(struct device *dev) |
1184 | { | 1185 | { |
1185 | struct platform_device *pdev = to_platform_device(dev); | 1186 | struct platform_device *pdev = to_platform_device(dev); |
1186 | struct at_dma *atdma = platform_get_drvdata(pdev); | 1187 | struct at_dma *atdma = platform_get_drvdata(pdev); |
1187 | 1188 | ||
1188 | clk_enable(atdma->clk); | 1189 | clk_enable(atdma->clk); |
1189 | dma_writel(atdma, EN, AT_DMA_ENABLE); | 1190 | dma_writel(atdma, EN, AT_DMA_ENABLE); |
1190 | return 0; | 1191 | return 0; |
1191 | } | 1192 | } |
1192 | 1193 | ||
1193 | static const struct dev_pm_ops at_dma_dev_pm_ops = { | 1194 | static const struct dev_pm_ops at_dma_dev_pm_ops = { |
1194 | .suspend_noirq = at_dma_suspend_noirq, | 1195 | .suspend_noirq = at_dma_suspend_noirq, |
1195 | .resume_noirq = at_dma_resume_noirq, | 1196 | .resume_noirq = at_dma_resume_noirq, |
1196 | }; | 1197 | }; |
1197 | 1198 | ||
1198 | static struct platform_driver at_dma_driver = { | 1199 | static struct platform_driver at_dma_driver = { |
1199 | .remove = __exit_p(at_dma_remove), | 1200 | .remove = __exit_p(at_dma_remove), |
1200 | .shutdown = at_dma_shutdown, | 1201 | .shutdown = at_dma_shutdown, |
1201 | .driver = { | 1202 | .driver = { |
1202 | .name = "at_hdmac", | 1203 | .name = "at_hdmac", |
1203 | .pm = &at_dma_dev_pm_ops, | 1204 | .pm = &at_dma_dev_pm_ops, |
1204 | }, | 1205 | }, |
1205 | }; | 1206 | }; |
1206 | 1207 | ||
1207 | static int __init at_dma_init(void) | 1208 | static int __init at_dma_init(void) |
1208 | { | 1209 | { |
1209 | return platform_driver_probe(&at_dma_driver, at_dma_probe); | 1210 | return platform_driver_probe(&at_dma_driver, at_dma_probe); |
1210 | } | 1211 | } |
1211 | module_init(at_dma_init); | 1212 | module_init(at_dma_init); |
1212 | 1213 | ||
1213 | static void __exit at_dma_exit(void) | 1214 | static void __exit at_dma_exit(void) |
1214 | { | 1215 | { |
1215 | platform_driver_unregister(&at_dma_driver); | 1216 | platform_driver_unregister(&at_dma_driver); |
1216 | } | 1217 | } |
1217 | module_exit(at_dma_exit); | 1218 | module_exit(at_dma_exit); |
1218 | 1219 | ||
1219 | MODULE_DESCRIPTION("Atmel AHB DMA Controller driver"); | 1220 | MODULE_DESCRIPTION("Atmel AHB DMA Controller driver"); |
1220 | MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>"); | 1221 | MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>"); |
1221 | MODULE_LICENSE("GPL"); | 1222 | MODULE_LICENSE("GPL"); |
1222 | MODULE_ALIAS("platform:at_hdmac"); | 1223 | MODULE_ALIAS("platform:at_hdmac"); |
1223 | 1224 |
drivers/dma/coh901318.c
1 | /* | 1 | /* |
2 | * driver/dma/coh901318.c | 2 | * driver/dma/coh901318.c |
3 | * | 3 | * |
4 | * Copyright (C) 2007-2009 ST-Ericsson | 4 | * Copyright (C) 2007-2009 ST-Ericsson |
5 | * License terms: GNU General Public License (GPL) version 2 | 5 | * License terms: GNU General Public License (GPL) version 2 |
6 | * DMA driver for COH 901 318 | 6 | * DMA driver for COH 901 318 |
7 | * Author: Per Friden <per.friden@stericsson.com> | 7 | * Author: Per Friden <per.friden@stericsson.com> |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/init.h> | 10 | #include <linux/init.h> |
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/kernel.h> /* printk() */ | 12 | #include <linux/kernel.h> /* printk() */ |
13 | #include <linux/fs.h> /* everything... */ | 13 | #include <linux/fs.h> /* everything... */ |
14 | #include <linux/slab.h> /* kmalloc() */ | 14 | #include <linux/slab.h> /* kmalloc() */ |
15 | #include <linux/dmaengine.h> | 15 | #include <linux/dmaengine.h> |
16 | #include <linux/platform_device.h> | 16 | #include <linux/platform_device.h> |
17 | #include <linux/device.h> | 17 | #include <linux/device.h> |
18 | #include <linux/irqreturn.h> | 18 | #include <linux/irqreturn.h> |
19 | #include <linux/interrupt.h> | 19 | #include <linux/interrupt.h> |
20 | #include <linux/io.h> | 20 | #include <linux/io.h> |
21 | #include <linux/uaccess.h> | 21 | #include <linux/uaccess.h> |
22 | #include <linux/debugfs.h> | 22 | #include <linux/debugfs.h> |
23 | #include <mach/coh901318.h> | 23 | #include <mach/coh901318.h> |
24 | 24 | ||
25 | #include "coh901318_lli.h" | 25 | #include "coh901318_lli.h" |
26 | 26 | ||
27 | #define COHC_2_DEV(cohc) (&cohc->chan.dev->device) | 27 | #define COHC_2_DEV(cohc) (&cohc->chan.dev->device) |
28 | 28 | ||
29 | #ifdef VERBOSE_DEBUG | 29 | #ifdef VERBOSE_DEBUG |
30 | #define COH_DBG(x) ({ if (1) x; 0; }) | 30 | #define COH_DBG(x) ({ if (1) x; 0; }) |
31 | #else | 31 | #else |
32 | #define COH_DBG(x) ({ if (0) x; 0; }) | 32 | #define COH_DBG(x) ({ if (0) x; 0; }) |
33 | #endif | 33 | #endif |
34 | 34 | ||
35 | struct coh901318_desc { | 35 | struct coh901318_desc { |
36 | struct dma_async_tx_descriptor desc; | 36 | struct dma_async_tx_descriptor desc; |
37 | struct list_head node; | 37 | struct list_head node; |
38 | struct scatterlist *sg; | 38 | struct scatterlist *sg; |
39 | unsigned int sg_len; | 39 | unsigned int sg_len; |
40 | struct coh901318_lli *lli; | 40 | struct coh901318_lli *lli; |
41 | enum dma_data_direction dir; | 41 | enum dma_data_direction dir; |
42 | unsigned long flags; | 42 | unsigned long flags; |
43 | }; | 43 | }; |
44 | 44 | ||
45 | struct coh901318_base { | 45 | struct coh901318_base { |
46 | struct device *dev; | 46 | struct device *dev; |
47 | void __iomem *virtbase; | 47 | void __iomem *virtbase; |
48 | struct coh901318_pool pool; | 48 | struct coh901318_pool pool; |
49 | struct powersave pm; | 49 | struct powersave pm; |
50 | struct dma_device dma_slave; | 50 | struct dma_device dma_slave; |
51 | struct dma_device dma_memcpy; | 51 | struct dma_device dma_memcpy; |
52 | struct coh901318_chan *chans; | 52 | struct coh901318_chan *chans; |
53 | struct coh901318_platform *platform; | 53 | struct coh901318_platform *platform; |
54 | }; | 54 | }; |
55 | 55 | ||
56 | struct coh901318_chan { | 56 | struct coh901318_chan { |
57 | spinlock_t lock; | 57 | spinlock_t lock; |
58 | int allocated; | 58 | int allocated; |
59 | int completed; | 59 | int completed; |
60 | int id; | 60 | int id; |
61 | int stopped; | 61 | int stopped; |
62 | 62 | ||
63 | struct work_struct free_work; | 63 | struct work_struct free_work; |
64 | struct dma_chan chan; | 64 | struct dma_chan chan; |
65 | 65 | ||
66 | struct tasklet_struct tasklet; | 66 | struct tasklet_struct tasklet; |
67 | 67 | ||
68 | struct list_head active; | 68 | struct list_head active; |
69 | struct list_head queue; | 69 | struct list_head queue; |
70 | struct list_head free; | 70 | struct list_head free; |
71 | 71 | ||
72 | unsigned long nbr_active_done; | 72 | unsigned long nbr_active_done; |
73 | unsigned long busy; | 73 | unsigned long busy; |
74 | 74 | ||
75 | struct coh901318_base *base; | 75 | struct coh901318_base *base; |
76 | }; | 76 | }; |
77 | 77 | ||
78 | static void coh901318_list_print(struct coh901318_chan *cohc, | 78 | static void coh901318_list_print(struct coh901318_chan *cohc, |
79 | struct coh901318_lli *lli) | 79 | struct coh901318_lli *lli) |
80 | { | 80 | { |
81 | struct coh901318_lli *l = lli; | 81 | struct coh901318_lli *l = lli; |
82 | int i = 0; | 82 | int i = 0; |
83 | 83 | ||
84 | while (l) { | 84 | while (l) { |
85 | dev_vdbg(COHC_2_DEV(cohc), "i %d, lli %p, ctrl 0x%x, src 0x%x" | 85 | dev_vdbg(COHC_2_DEV(cohc), "i %d, lli %p, ctrl 0x%x, src 0x%x" |
86 | ", dst 0x%x, link 0x%x virt_link_addr 0x%p\n", | 86 | ", dst 0x%x, link 0x%x virt_link_addr 0x%p\n", |
87 | i, l, l->control, l->src_addr, l->dst_addr, | 87 | i, l, l->control, l->src_addr, l->dst_addr, |
88 | l->link_addr, l->virt_link_addr); | 88 | l->link_addr, l->virt_link_addr); |
89 | i++; | 89 | i++; |
90 | l = l->virt_link_addr; | 90 | l = l->virt_link_addr; |
91 | } | 91 | } |
92 | } | 92 | } |
93 | 93 | ||
94 | #ifdef CONFIG_DEBUG_FS | 94 | #ifdef CONFIG_DEBUG_FS |
95 | 95 | ||
96 | #define COH901318_DEBUGFS_ASSIGN(x, y) (x = y) | 96 | #define COH901318_DEBUGFS_ASSIGN(x, y) (x = y) |
97 | 97 | ||
98 | static struct coh901318_base *debugfs_dma_base; | 98 | static struct coh901318_base *debugfs_dma_base; |
99 | static struct dentry *dma_dentry; | 99 | static struct dentry *dma_dentry; |
100 | 100 | ||
101 | static int coh901318_debugfs_open(struct inode *inode, struct file *file) | 101 | static int coh901318_debugfs_open(struct inode *inode, struct file *file) |
102 | { | 102 | { |
103 | 103 | ||
104 | file->private_data = inode->i_private; | 104 | file->private_data = inode->i_private; |
105 | return 0; | 105 | return 0; |
106 | } | 106 | } |
107 | 107 | ||
108 | static int coh901318_debugfs_read(struct file *file, char __user *buf, | 108 | static int coh901318_debugfs_read(struct file *file, char __user *buf, |
109 | size_t count, loff_t *f_pos) | 109 | size_t count, loff_t *f_pos) |
110 | { | 110 | { |
111 | u64 started_channels = debugfs_dma_base->pm.started_channels; | 111 | u64 started_channels = debugfs_dma_base->pm.started_channels; |
112 | int pool_count = debugfs_dma_base->pool.debugfs_pool_counter; | 112 | int pool_count = debugfs_dma_base->pool.debugfs_pool_counter; |
113 | int i; | 113 | int i; |
114 | int ret = 0; | 114 | int ret = 0; |
115 | char *dev_buf; | 115 | char *dev_buf; |
116 | char *tmp; | 116 | char *tmp; |
117 | int dev_size; | 117 | int dev_size; |
118 | 118 | ||
119 | dev_buf = kmalloc(4*1024, GFP_KERNEL); | 119 | dev_buf = kmalloc(4*1024, GFP_KERNEL); |
120 | if (dev_buf == NULL) | 120 | if (dev_buf == NULL) |
121 | goto err_kmalloc; | 121 | goto err_kmalloc; |
122 | tmp = dev_buf; | 122 | tmp = dev_buf; |
123 | 123 | ||
124 | tmp += sprintf(tmp, "DMA -- enabled dma channels\n"); | 124 | tmp += sprintf(tmp, "DMA -- enabled dma channels\n"); |
125 | 125 | ||
126 | for (i = 0; i < debugfs_dma_base->platform->max_channels; i++) | 126 | for (i = 0; i < debugfs_dma_base->platform->max_channels; i++) |
127 | if (started_channels & (1 << i)) | 127 | if (started_channels & (1 << i)) |
128 | tmp += sprintf(tmp, "channel %d\n", i); | 128 | tmp += sprintf(tmp, "channel %d\n", i); |
129 | 129 | ||
130 | tmp += sprintf(tmp, "Pool alloc nbr %d\n", pool_count); | 130 | tmp += sprintf(tmp, "Pool alloc nbr %d\n", pool_count); |
131 | dev_size = tmp - dev_buf; | 131 | dev_size = tmp - dev_buf; |
132 | 132 | ||
133 | /* No more to read if offset != 0 */ | 133 | /* No more to read if offset != 0 */ |
134 | if (*f_pos > dev_size) | 134 | if (*f_pos > dev_size) |
135 | goto out; | 135 | goto out; |
136 | 136 | ||
137 | if (count > dev_size - *f_pos) | 137 | if (count > dev_size - *f_pos) |
138 | count = dev_size - *f_pos; | 138 | count = dev_size - *f_pos; |
139 | 139 | ||
140 | if (copy_to_user(buf, dev_buf + *f_pos, count)) | 140 | if (copy_to_user(buf, dev_buf + *f_pos, count)) |
141 | ret = -EINVAL; | 141 | ret = -EINVAL; |
142 | ret = count; | 142 | ret = count; |
143 | *f_pos += count; | 143 | *f_pos += count; |
144 | 144 | ||
145 | out: | 145 | out: |
146 | kfree(dev_buf); | 146 | kfree(dev_buf); |
147 | return ret; | 147 | return ret; |
148 | 148 | ||
149 | err_kmalloc: | 149 | err_kmalloc: |
150 | return 0; | 150 | return 0; |
151 | } | 151 | } |
152 | 152 | ||
153 | static const struct file_operations coh901318_debugfs_status_operations = { | 153 | static const struct file_operations coh901318_debugfs_status_operations = { |
154 | .owner = THIS_MODULE, | 154 | .owner = THIS_MODULE, |
155 | .open = coh901318_debugfs_open, | 155 | .open = coh901318_debugfs_open, |
156 | .read = coh901318_debugfs_read, | 156 | .read = coh901318_debugfs_read, |
157 | }; | 157 | }; |
158 | 158 | ||
159 | 159 | ||
160 | static int __init init_coh901318_debugfs(void) | 160 | static int __init init_coh901318_debugfs(void) |
161 | { | 161 | { |
162 | 162 | ||
163 | dma_dentry = debugfs_create_dir("dma", NULL); | 163 | dma_dentry = debugfs_create_dir("dma", NULL); |
164 | 164 | ||
165 | (void) debugfs_create_file("status", | 165 | (void) debugfs_create_file("status", |
166 | S_IFREG | S_IRUGO, | 166 | S_IFREG | S_IRUGO, |
167 | dma_dentry, NULL, | 167 | dma_dentry, NULL, |
168 | &coh901318_debugfs_status_operations); | 168 | &coh901318_debugfs_status_operations); |
169 | return 0; | 169 | return 0; |
170 | } | 170 | } |
171 | 171 | ||
172 | static void __exit exit_coh901318_debugfs(void) | 172 | static void __exit exit_coh901318_debugfs(void) |
173 | { | 173 | { |
174 | debugfs_remove_recursive(dma_dentry); | 174 | debugfs_remove_recursive(dma_dentry); |
175 | } | 175 | } |
176 | 176 | ||
177 | module_init(init_coh901318_debugfs); | 177 | module_init(init_coh901318_debugfs); |
178 | module_exit(exit_coh901318_debugfs); | 178 | module_exit(exit_coh901318_debugfs); |
179 | #else | 179 | #else |
180 | 180 | ||
181 | #define COH901318_DEBUGFS_ASSIGN(x, y) | 181 | #define COH901318_DEBUGFS_ASSIGN(x, y) |
182 | 182 | ||
183 | #endif /* CONFIG_DEBUG_FS */ | 183 | #endif /* CONFIG_DEBUG_FS */ |
184 | 184 | ||
185 | static inline struct coh901318_chan *to_coh901318_chan(struct dma_chan *chan) | 185 | static inline struct coh901318_chan *to_coh901318_chan(struct dma_chan *chan) |
186 | { | 186 | { |
187 | return container_of(chan, struct coh901318_chan, chan); | 187 | return container_of(chan, struct coh901318_chan, chan); |
188 | } | 188 | } |
189 | 189 | ||
190 | static inline dma_addr_t | 190 | static inline dma_addr_t |
191 | cohc_dev_addr(struct coh901318_chan *cohc) | 191 | cohc_dev_addr(struct coh901318_chan *cohc) |
192 | { | 192 | { |
193 | return cohc->base->platform->chan_conf[cohc->id].dev_addr; | 193 | return cohc->base->platform->chan_conf[cohc->id].dev_addr; |
194 | } | 194 | } |
195 | 195 | ||
196 | static inline const struct coh901318_params * | 196 | static inline const struct coh901318_params * |
197 | cohc_chan_param(struct coh901318_chan *cohc) | 197 | cohc_chan_param(struct coh901318_chan *cohc) |
198 | { | 198 | { |
199 | return &cohc->base->platform->chan_conf[cohc->id].param; | 199 | return &cohc->base->platform->chan_conf[cohc->id].param; |
200 | } | 200 | } |
201 | 201 | ||
202 | static inline const struct coh_dma_channel * | 202 | static inline const struct coh_dma_channel * |
203 | cohc_chan_conf(struct coh901318_chan *cohc) | 203 | cohc_chan_conf(struct coh901318_chan *cohc) |
204 | { | 204 | { |
205 | return &cohc->base->platform->chan_conf[cohc->id]; | 205 | return &cohc->base->platform->chan_conf[cohc->id]; |
206 | } | 206 | } |
207 | 207 | ||
208 | static void enable_powersave(struct coh901318_chan *cohc) | 208 | static void enable_powersave(struct coh901318_chan *cohc) |
209 | { | 209 | { |
210 | unsigned long flags; | 210 | unsigned long flags; |
211 | struct powersave *pm = &cohc->base->pm; | 211 | struct powersave *pm = &cohc->base->pm; |
212 | 212 | ||
213 | spin_lock_irqsave(&pm->lock, flags); | 213 | spin_lock_irqsave(&pm->lock, flags); |
214 | 214 | ||
215 | pm->started_channels &= ~(1ULL << cohc->id); | 215 | pm->started_channels &= ~(1ULL << cohc->id); |
216 | 216 | ||
217 | if (!pm->started_channels) { | 217 | if (!pm->started_channels) { |
218 | /* DMA no longer intends to access memory */ | 218 | /* DMA no longer intends to access memory */ |
219 | cohc->base->platform->access_memory_state(cohc->base->dev, | 219 | cohc->base->platform->access_memory_state(cohc->base->dev, |
220 | false); | 220 | false); |
221 | } | 221 | } |
222 | 222 | ||
223 | spin_unlock_irqrestore(&pm->lock, flags); | 223 | spin_unlock_irqrestore(&pm->lock, flags); |
224 | } | 224 | } |
225 | static void disable_powersave(struct coh901318_chan *cohc) | 225 | static void disable_powersave(struct coh901318_chan *cohc) |
226 | { | 226 | { |
227 | unsigned long flags; | 227 | unsigned long flags; |
228 | struct powersave *pm = &cohc->base->pm; | 228 | struct powersave *pm = &cohc->base->pm; |
229 | 229 | ||
230 | spin_lock_irqsave(&pm->lock, flags); | 230 | spin_lock_irqsave(&pm->lock, flags); |
231 | 231 | ||
232 | if (!pm->started_channels) { | 232 | if (!pm->started_channels) { |
233 | /* DMA intends to access memory */ | 233 | /* DMA intends to access memory */ |
234 | cohc->base->platform->access_memory_state(cohc->base->dev, | 234 | cohc->base->platform->access_memory_state(cohc->base->dev, |
235 | true); | 235 | true); |
236 | } | 236 | } |
237 | 237 | ||
238 | pm->started_channels |= (1ULL << cohc->id); | 238 | pm->started_channels |= (1ULL << cohc->id); |
239 | 239 | ||
240 | spin_unlock_irqrestore(&pm->lock, flags); | 240 | spin_unlock_irqrestore(&pm->lock, flags); |
241 | } | 241 | } |
242 | 242 | ||
243 | static inline int coh901318_set_ctrl(struct coh901318_chan *cohc, u32 control) | 243 | static inline int coh901318_set_ctrl(struct coh901318_chan *cohc, u32 control) |
244 | { | 244 | { |
245 | int channel = cohc->id; | 245 | int channel = cohc->id; |
246 | void __iomem *virtbase = cohc->base->virtbase; | 246 | void __iomem *virtbase = cohc->base->virtbase; |
247 | 247 | ||
248 | writel(control, | 248 | writel(control, |
249 | virtbase + COH901318_CX_CTRL + | 249 | virtbase + COH901318_CX_CTRL + |
250 | COH901318_CX_CTRL_SPACING * channel); | 250 | COH901318_CX_CTRL_SPACING * channel); |
251 | return 0; | 251 | return 0; |
252 | } | 252 | } |
253 | 253 | ||
254 | static inline int coh901318_set_conf(struct coh901318_chan *cohc, u32 conf) | 254 | static inline int coh901318_set_conf(struct coh901318_chan *cohc, u32 conf) |
255 | { | 255 | { |
256 | int channel = cohc->id; | 256 | int channel = cohc->id; |
257 | void __iomem *virtbase = cohc->base->virtbase; | 257 | void __iomem *virtbase = cohc->base->virtbase; |
258 | 258 | ||
259 | writel(conf, | 259 | writel(conf, |
260 | virtbase + COH901318_CX_CFG + | 260 | virtbase + COH901318_CX_CFG + |
261 | COH901318_CX_CFG_SPACING*channel); | 261 | COH901318_CX_CFG_SPACING*channel); |
262 | return 0; | 262 | return 0; |
263 | } | 263 | } |
264 | 264 | ||
265 | 265 | ||
266 | static int coh901318_start(struct coh901318_chan *cohc) | 266 | static int coh901318_start(struct coh901318_chan *cohc) |
267 | { | 267 | { |
268 | u32 val; | 268 | u32 val; |
269 | int channel = cohc->id; | 269 | int channel = cohc->id; |
270 | void __iomem *virtbase = cohc->base->virtbase; | 270 | void __iomem *virtbase = cohc->base->virtbase; |
271 | 271 | ||
272 | disable_powersave(cohc); | 272 | disable_powersave(cohc); |
273 | 273 | ||
274 | val = readl(virtbase + COH901318_CX_CFG + | 274 | val = readl(virtbase + COH901318_CX_CFG + |
275 | COH901318_CX_CFG_SPACING * channel); | 275 | COH901318_CX_CFG_SPACING * channel); |
276 | 276 | ||
277 | /* Enable channel */ | 277 | /* Enable channel */ |
278 | val |= COH901318_CX_CFG_CH_ENABLE; | 278 | val |= COH901318_CX_CFG_CH_ENABLE; |
279 | writel(val, virtbase + COH901318_CX_CFG + | 279 | writel(val, virtbase + COH901318_CX_CFG + |
280 | COH901318_CX_CFG_SPACING * channel); | 280 | COH901318_CX_CFG_SPACING * channel); |
281 | 281 | ||
282 | return 0; | 282 | return 0; |
283 | } | 283 | } |
284 | 284 | ||
285 | static int coh901318_prep_linked_list(struct coh901318_chan *cohc, | 285 | static int coh901318_prep_linked_list(struct coh901318_chan *cohc, |
286 | struct coh901318_lli *lli) | 286 | struct coh901318_lli *lli) |
287 | { | 287 | { |
288 | int channel = cohc->id; | 288 | int channel = cohc->id; |
289 | void __iomem *virtbase = cohc->base->virtbase; | 289 | void __iomem *virtbase = cohc->base->virtbase; |
290 | 290 | ||
291 | BUG_ON(readl(virtbase + COH901318_CX_STAT + | 291 | BUG_ON(readl(virtbase + COH901318_CX_STAT + |
292 | COH901318_CX_STAT_SPACING*channel) & | 292 | COH901318_CX_STAT_SPACING*channel) & |
293 | COH901318_CX_STAT_ACTIVE); | 293 | COH901318_CX_STAT_ACTIVE); |
294 | 294 | ||
295 | writel(lli->src_addr, | 295 | writel(lli->src_addr, |
296 | virtbase + COH901318_CX_SRC_ADDR + | 296 | virtbase + COH901318_CX_SRC_ADDR + |
297 | COH901318_CX_SRC_ADDR_SPACING * channel); | 297 | COH901318_CX_SRC_ADDR_SPACING * channel); |
298 | 298 | ||
299 | writel(lli->dst_addr, virtbase + | 299 | writel(lli->dst_addr, virtbase + |
300 | COH901318_CX_DST_ADDR + | 300 | COH901318_CX_DST_ADDR + |
301 | COH901318_CX_DST_ADDR_SPACING * channel); | 301 | COH901318_CX_DST_ADDR_SPACING * channel); |
302 | 302 | ||
303 | writel(lli->link_addr, virtbase + COH901318_CX_LNK_ADDR + | 303 | writel(lli->link_addr, virtbase + COH901318_CX_LNK_ADDR + |
304 | COH901318_CX_LNK_ADDR_SPACING * channel); | 304 | COH901318_CX_LNK_ADDR_SPACING * channel); |
305 | 305 | ||
306 | writel(lli->control, virtbase + COH901318_CX_CTRL + | 306 | writel(lli->control, virtbase + COH901318_CX_CTRL + |
307 | COH901318_CX_CTRL_SPACING * channel); | 307 | COH901318_CX_CTRL_SPACING * channel); |
308 | 308 | ||
309 | return 0; | 309 | return 0; |
310 | } | 310 | } |
311 | static dma_cookie_t | 311 | static dma_cookie_t |
312 | coh901318_assign_cookie(struct coh901318_chan *cohc, | 312 | coh901318_assign_cookie(struct coh901318_chan *cohc, |
313 | struct coh901318_desc *cohd) | 313 | struct coh901318_desc *cohd) |
314 | { | 314 | { |
315 | dma_cookie_t cookie = cohc->chan.cookie; | 315 | dma_cookie_t cookie = cohc->chan.cookie; |
316 | 316 | ||
317 | if (++cookie < 0) | 317 | if (++cookie < 0) |
318 | cookie = 1; | 318 | cookie = 1; |
319 | 319 | ||
320 | cohc->chan.cookie = cookie; | 320 | cohc->chan.cookie = cookie; |
321 | cohd->desc.cookie = cookie; | 321 | cohd->desc.cookie = cookie; |
322 | 322 | ||
323 | return cookie; | 323 | return cookie; |
324 | } | 324 | } |
325 | 325 | ||
326 | static struct coh901318_desc * | 326 | static struct coh901318_desc * |
327 | coh901318_desc_get(struct coh901318_chan *cohc) | 327 | coh901318_desc_get(struct coh901318_chan *cohc) |
328 | { | 328 | { |
329 | struct coh901318_desc *desc; | 329 | struct coh901318_desc *desc; |
330 | 330 | ||
331 | if (list_empty(&cohc->free)) { | 331 | if (list_empty(&cohc->free)) { |
332 | /* alloc new desc because we're out of used ones | 332 | /* alloc new desc because we're out of used ones |
333 | * TODO: alloc a pile of descs instead of just one, | 333 | * TODO: alloc a pile of descs instead of just one, |
334 | * avoid many small allocations. | 334 | * avoid many small allocations. |
335 | */ | 335 | */ |
336 | desc = kzalloc(sizeof(struct coh901318_desc), GFP_NOWAIT); | 336 | desc = kzalloc(sizeof(struct coh901318_desc), GFP_NOWAIT); |
337 | if (desc == NULL) | 337 | if (desc == NULL) |
338 | goto out; | 338 | goto out; |
339 | INIT_LIST_HEAD(&desc->node); | 339 | INIT_LIST_HEAD(&desc->node); |
340 | dma_async_tx_descriptor_init(&desc->desc, &cohc->chan); | 340 | dma_async_tx_descriptor_init(&desc->desc, &cohc->chan); |
341 | } else { | 341 | } else { |
342 | /* Reuse an old desc. */ | 342 | /* Reuse an old desc. */ |
343 | desc = list_first_entry(&cohc->free, | 343 | desc = list_first_entry(&cohc->free, |
344 | struct coh901318_desc, | 344 | struct coh901318_desc, |
345 | node); | 345 | node); |
346 | list_del(&desc->node); | 346 | list_del(&desc->node); |
347 | /* Initialize it a bit so it's not insane */ | 347 | /* Initialize it a bit so it's not insane */ |
348 | desc->sg = NULL; | 348 | desc->sg = NULL; |
349 | desc->sg_len = 0; | 349 | desc->sg_len = 0; |
350 | desc->desc.callback = NULL; | 350 | desc->desc.callback = NULL; |
351 | desc->desc.callback_param = NULL; | 351 | desc->desc.callback_param = NULL; |
352 | } | 352 | } |
353 | 353 | ||
354 | out: | 354 | out: |
355 | return desc; | 355 | return desc; |
356 | } | 356 | } |
357 | 357 | ||
358 | static void | 358 | static void |
359 | coh901318_desc_free(struct coh901318_chan *cohc, struct coh901318_desc *cohd) | 359 | coh901318_desc_free(struct coh901318_chan *cohc, struct coh901318_desc *cohd) |
360 | { | 360 | { |
361 | list_add_tail(&cohd->node, &cohc->free); | 361 | list_add_tail(&cohd->node, &cohc->free); |
362 | } | 362 | } |
363 | 363 | ||
364 | /* call with irq lock held */ | 364 | /* call with irq lock held */ |
365 | static void | 365 | static void |
366 | coh901318_desc_submit(struct coh901318_chan *cohc, struct coh901318_desc *desc) | 366 | coh901318_desc_submit(struct coh901318_chan *cohc, struct coh901318_desc *desc) |
367 | { | 367 | { |
368 | list_add_tail(&desc->node, &cohc->active); | 368 | list_add_tail(&desc->node, &cohc->active); |
369 | } | 369 | } |
370 | 370 | ||
371 | static struct coh901318_desc * | 371 | static struct coh901318_desc * |
372 | coh901318_first_active_get(struct coh901318_chan *cohc) | 372 | coh901318_first_active_get(struct coh901318_chan *cohc) |
373 | { | 373 | { |
374 | struct coh901318_desc *d; | 374 | struct coh901318_desc *d; |
375 | 375 | ||
376 | if (list_empty(&cohc->active)) | 376 | if (list_empty(&cohc->active)) |
377 | return NULL; | 377 | return NULL; |
378 | 378 | ||
379 | d = list_first_entry(&cohc->active, | 379 | d = list_first_entry(&cohc->active, |
380 | struct coh901318_desc, | 380 | struct coh901318_desc, |
381 | node); | 381 | node); |
382 | return d; | 382 | return d; |
383 | } | 383 | } |
384 | 384 | ||
385 | static void | 385 | static void |
386 | coh901318_desc_remove(struct coh901318_desc *cohd) | 386 | coh901318_desc_remove(struct coh901318_desc *cohd) |
387 | { | 387 | { |
388 | list_del(&cohd->node); | 388 | list_del(&cohd->node); |
389 | } | 389 | } |
390 | 390 | ||
391 | static void | 391 | static void |
392 | coh901318_desc_queue(struct coh901318_chan *cohc, struct coh901318_desc *desc) | 392 | coh901318_desc_queue(struct coh901318_chan *cohc, struct coh901318_desc *desc) |
393 | { | 393 | { |
394 | list_add_tail(&desc->node, &cohc->queue); | 394 | list_add_tail(&desc->node, &cohc->queue); |
395 | } | 395 | } |
396 | 396 | ||
397 | static struct coh901318_desc * | 397 | static struct coh901318_desc * |
398 | coh901318_first_queued(struct coh901318_chan *cohc) | 398 | coh901318_first_queued(struct coh901318_chan *cohc) |
399 | { | 399 | { |
400 | struct coh901318_desc *d; | 400 | struct coh901318_desc *d; |
401 | 401 | ||
402 | if (list_empty(&cohc->queue)) | 402 | if (list_empty(&cohc->queue)) |
403 | return NULL; | 403 | return NULL; |
404 | 404 | ||
405 | d = list_first_entry(&cohc->queue, | 405 | d = list_first_entry(&cohc->queue, |
406 | struct coh901318_desc, | 406 | struct coh901318_desc, |
407 | node); | 407 | node); |
408 | return d; | 408 | return d; |
409 | } | 409 | } |
410 | 410 | ||
411 | static inline u32 coh901318_get_bytes_in_lli(struct coh901318_lli *in_lli) | 411 | static inline u32 coh901318_get_bytes_in_lli(struct coh901318_lli *in_lli) |
412 | { | 412 | { |
413 | struct coh901318_lli *lli = in_lli; | 413 | struct coh901318_lli *lli = in_lli; |
414 | u32 bytes = 0; | 414 | u32 bytes = 0; |
415 | 415 | ||
416 | while (lli) { | 416 | while (lli) { |
417 | bytes += lli->control & COH901318_CX_CTRL_TC_VALUE_MASK; | 417 | bytes += lli->control & COH901318_CX_CTRL_TC_VALUE_MASK; |
418 | lli = lli->virt_link_addr; | 418 | lli = lli->virt_link_addr; |
419 | } | 419 | } |
420 | return bytes; | 420 | return bytes; |
421 | } | 421 | } |
422 | 422 | ||
423 | /* | 423 | /* |
424 | * Get the number of bytes left to transfer on this channel, | 424 | * Get the number of bytes left to transfer on this channel, |
425 | * it is unwise to call this before stopping the channel for | 425 | * it is unwise to call this before stopping the channel for |
426 | * absolute measures, but for a rough guess you can still call | 426 | * absolute measures, but for a rough guess you can still call |
427 | * it. | 427 | * it. |
428 | */ | 428 | */ |
429 | static u32 coh901318_get_bytes_left(struct dma_chan *chan) | 429 | static u32 coh901318_get_bytes_left(struct dma_chan *chan) |
430 | { | 430 | { |
431 | struct coh901318_chan *cohc = to_coh901318_chan(chan); | 431 | struct coh901318_chan *cohc = to_coh901318_chan(chan); |
432 | struct coh901318_desc *cohd; | 432 | struct coh901318_desc *cohd; |
433 | struct list_head *pos; | 433 | struct list_head *pos; |
434 | unsigned long flags; | 434 | unsigned long flags; |
435 | u32 left = 0; | 435 | u32 left = 0; |
436 | int i = 0; | 436 | int i = 0; |
437 | 437 | ||
438 | spin_lock_irqsave(&cohc->lock, flags); | 438 | spin_lock_irqsave(&cohc->lock, flags); |
439 | 439 | ||
440 | /* | 440 | /* |
441 | * If there are many queued jobs, we iterate and add the | 441 | * If there are many queued jobs, we iterate and add the |
442 | * size of them all. We take a special look on the first | 442 | * size of them all. We take a special look on the first |
443 | * job though, since it is probably active. | 443 | * job though, since it is probably active. |
444 | */ | 444 | */ |
445 | list_for_each(pos, &cohc->active) { | 445 | list_for_each(pos, &cohc->active) { |
446 | /* | 446 | /* |
447 | * The first job in the list will be working on the | 447 | * The first job in the list will be working on the |
448 | * hardware. The job can be stopped but still active, | 448 | * hardware. The job can be stopped but still active, |
449 | * so that the transfer counter is somewhere inside | 449 | * so that the transfer counter is somewhere inside |
450 | * the buffer. | 450 | * the buffer. |
451 | */ | 451 | */ |
452 | cohd = list_entry(pos, struct coh901318_desc, node); | 452 | cohd = list_entry(pos, struct coh901318_desc, node); |
453 | 453 | ||
454 | if (i == 0) { | 454 | if (i == 0) { |
455 | struct coh901318_lli *lli; | 455 | struct coh901318_lli *lli; |
456 | dma_addr_t ladd; | 456 | dma_addr_t ladd; |
457 | 457 | ||
458 | /* Read current transfer count value */ | 458 | /* Read current transfer count value */ |
459 | left = readl(cohc->base->virtbase + | 459 | left = readl(cohc->base->virtbase + |
460 | COH901318_CX_CTRL + | 460 | COH901318_CX_CTRL + |
461 | COH901318_CX_CTRL_SPACING * cohc->id) & | 461 | COH901318_CX_CTRL_SPACING * cohc->id) & |
462 | COH901318_CX_CTRL_TC_VALUE_MASK; | 462 | COH901318_CX_CTRL_TC_VALUE_MASK; |
463 | 463 | ||
464 | /* See if the transfer is linked... */ | 464 | /* See if the transfer is linked... */ |
465 | ladd = readl(cohc->base->virtbase + | 465 | ladd = readl(cohc->base->virtbase + |
466 | COH901318_CX_LNK_ADDR + | 466 | COH901318_CX_LNK_ADDR + |
467 | COH901318_CX_LNK_ADDR_SPACING * | 467 | COH901318_CX_LNK_ADDR_SPACING * |
468 | cohc->id) & | 468 | cohc->id) & |
469 | ~COH901318_CX_LNK_LINK_IMMEDIATE; | 469 | ~COH901318_CX_LNK_LINK_IMMEDIATE; |
470 | /* Single transaction */ | 470 | /* Single transaction */ |
471 | if (!ladd) | 471 | if (!ladd) |
472 | continue; | 472 | continue; |
473 | 473 | ||
474 | /* | 474 | /* |
475 | * Linked transaction, follow the lli, find the | 475 | * Linked transaction, follow the lli, find the |
476 | * currently processing lli, and proceed to the next | 476 | * currently processing lli, and proceed to the next |
477 | */ | 477 | */ |
478 | lli = cohd->lli; | 478 | lli = cohd->lli; |
479 | while (lli && lli->link_addr != ladd) | 479 | while (lli && lli->link_addr != ladd) |
480 | lli = lli->virt_link_addr; | 480 | lli = lli->virt_link_addr; |
481 | 481 | ||
482 | if (lli) | 482 | if (lli) |
483 | lli = lli->virt_link_addr; | 483 | lli = lli->virt_link_addr; |
484 | 484 | ||
485 | /* | 485 | /* |
486 | * Follow remaining lli links around to count the total | 486 | * Follow remaining lli links around to count the total |
487 | * number of bytes left | 487 | * number of bytes left |
488 | */ | 488 | */ |
489 | left += coh901318_get_bytes_in_lli(lli); | 489 | left += coh901318_get_bytes_in_lli(lli); |
490 | } else { | 490 | } else { |
491 | left += coh901318_get_bytes_in_lli(cohd->lli); | 491 | left += coh901318_get_bytes_in_lli(cohd->lli); |
492 | } | 492 | } |
493 | i++; | 493 | i++; |
494 | } | 494 | } |
495 | 495 | ||
496 | /* Also count bytes in the queued jobs */ | 496 | /* Also count bytes in the queued jobs */ |
497 | list_for_each(pos, &cohc->queue) { | 497 | list_for_each(pos, &cohc->queue) { |
498 | cohd = list_entry(pos, struct coh901318_desc, node); | 498 | cohd = list_entry(pos, struct coh901318_desc, node); |
499 | left += coh901318_get_bytes_in_lli(cohd->lli); | 499 | left += coh901318_get_bytes_in_lli(cohd->lli); |
500 | } | 500 | } |
501 | 501 | ||
502 | spin_unlock_irqrestore(&cohc->lock, flags); | 502 | spin_unlock_irqrestore(&cohc->lock, flags); |
503 | 503 | ||
504 | return left; | 504 | return left; |
505 | } | 505 | } |
506 | 506 | ||
507 | /* | 507 | /* |
508 | * Pauses a transfer without losing data. Enables power save. | 508 | * Pauses a transfer without losing data. Enables power save. |
509 | * Use this function in conjunction with coh901318_resume. | 509 | * Use this function in conjunction with coh901318_resume. |
510 | */ | 510 | */ |
511 | static void coh901318_pause(struct dma_chan *chan) | 511 | static void coh901318_pause(struct dma_chan *chan) |
512 | { | 512 | { |
513 | u32 val; | 513 | u32 val; |
514 | unsigned long flags; | 514 | unsigned long flags; |
515 | struct coh901318_chan *cohc = to_coh901318_chan(chan); | 515 | struct coh901318_chan *cohc = to_coh901318_chan(chan); |
516 | int channel = cohc->id; | 516 | int channel = cohc->id; |
517 | void __iomem *virtbase = cohc->base->virtbase; | 517 | void __iomem *virtbase = cohc->base->virtbase; |
518 | 518 | ||
519 | spin_lock_irqsave(&cohc->lock, flags); | 519 | spin_lock_irqsave(&cohc->lock, flags); |
520 | 520 | ||
521 | /* Disable channel in HW */ | 521 | /* Disable channel in HW */ |
522 | val = readl(virtbase + COH901318_CX_CFG + | 522 | val = readl(virtbase + COH901318_CX_CFG + |
523 | COH901318_CX_CFG_SPACING * channel); | 523 | COH901318_CX_CFG_SPACING * channel); |
524 | 524 | ||
525 | /* Stopping infinit transfer */ | 525 | /* Stopping infinit transfer */ |
526 | if ((val & COH901318_CX_CTRL_TC_ENABLE) == 0 && | 526 | if ((val & COH901318_CX_CTRL_TC_ENABLE) == 0 && |
527 | (val & COH901318_CX_CFG_CH_ENABLE)) | 527 | (val & COH901318_CX_CFG_CH_ENABLE)) |
528 | cohc->stopped = 1; | 528 | cohc->stopped = 1; |
529 | 529 | ||
530 | 530 | ||
531 | val &= ~COH901318_CX_CFG_CH_ENABLE; | 531 | val &= ~COH901318_CX_CFG_CH_ENABLE; |
532 | /* Enable twice, HW bug work around */ | 532 | /* Enable twice, HW bug work around */ |
533 | writel(val, virtbase + COH901318_CX_CFG + | 533 | writel(val, virtbase + COH901318_CX_CFG + |
534 | COH901318_CX_CFG_SPACING * channel); | 534 | COH901318_CX_CFG_SPACING * channel); |
535 | writel(val, virtbase + COH901318_CX_CFG + | 535 | writel(val, virtbase + COH901318_CX_CFG + |
536 | COH901318_CX_CFG_SPACING * channel); | 536 | COH901318_CX_CFG_SPACING * channel); |
537 | 537 | ||
538 | /* Spin-wait for it to actually go inactive */ | 538 | /* Spin-wait for it to actually go inactive */ |
539 | while (readl(virtbase + COH901318_CX_STAT+COH901318_CX_STAT_SPACING * | 539 | while (readl(virtbase + COH901318_CX_STAT+COH901318_CX_STAT_SPACING * |
540 | channel) & COH901318_CX_STAT_ACTIVE) | 540 | channel) & COH901318_CX_STAT_ACTIVE) |
541 | cpu_relax(); | 541 | cpu_relax(); |
542 | 542 | ||
543 | /* Check if we stopped an active job */ | 543 | /* Check if we stopped an active job */ |
544 | if ((readl(virtbase + COH901318_CX_CTRL+COH901318_CX_CTRL_SPACING * | 544 | if ((readl(virtbase + COH901318_CX_CTRL+COH901318_CX_CTRL_SPACING * |
545 | channel) & COH901318_CX_CTRL_TC_VALUE_MASK) > 0) | 545 | channel) & COH901318_CX_CTRL_TC_VALUE_MASK) > 0) |
546 | cohc->stopped = 1; | 546 | cohc->stopped = 1; |
547 | 547 | ||
548 | enable_powersave(cohc); | 548 | enable_powersave(cohc); |
549 | 549 | ||
550 | spin_unlock_irqrestore(&cohc->lock, flags); | 550 | spin_unlock_irqrestore(&cohc->lock, flags); |
551 | } | 551 | } |
552 | 552 | ||
553 | /* Resumes a transfer that has been stopped via 300_dma_stop(..). | 553 | /* Resumes a transfer that has been stopped via 300_dma_stop(..). |
554 | Power save is handled. | 554 | Power save is handled. |
555 | */ | 555 | */ |
556 | static void coh901318_resume(struct dma_chan *chan) | 556 | static void coh901318_resume(struct dma_chan *chan) |
557 | { | 557 | { |
558 | u32 val; | 558 | u32 val; |
559 | unsigned long flags; | 559 | unsigned long flags; |
560 | struct coh901318_chan *cohc = to_coh901318_chan(chan); | 560 | struct coh901318_chan *cohc = to_coh901318_chan(chan); |
561 | int channel = cohc->id; | 561 | int channel = cohc->id; |
562 | 562 | ||
563 | spin_lock_irqsave(&cohc->lock, flags); | 563 | spin_lock_irqsave(&cohc->lock, flags); |
564 | 564 | ||
565 | disable_powersave(cohc); | 565 | disable_powersave(cohc); |
566 | 566 | ||
567 | if (cohc->stopped) { | 567 | if (cohc->stopped) { |
568 | /* Enable channel in HW */ | 568 | /* Enable channel in HW */ |
569 | val = readl(cohc->base->virtbase + COH901318_CX_CFG + | 569 | val = readl(cohc->base->virtbase + COH901318_CX_CFG + |
570 | COH901318_CX_CFG_SPACING * channel); | 570 | COH901318_CX_CFG_SPACING * channel); |
571 | 571 | ||
572 | val |= COH901318_CX_CFG_CH_ENABLE; | 572 | val |= COH901318_CX_CFG_CH_ENABLE; |
573 | 573 | ||
574 | writel(val, cohc->base->virtbase + COH901318_CX_CFG + | 574 | writel(val, cohc->base->virtbase + COH901318_CX_CFG + |
575 | COH901318_CX_CFG_SPACING*channel); | 575 | COH901318_CX_CFG_SPACING*channel); |
576 | 576 | ||
577 | cohc->stopped = 0; | 577 | cohc->stopped = 0; |
578 | } | 578 | } |
579 | 579 | ||
580 | spin_unlock_irqrestore(&cohc->lock, flags); | 580 | spin_unlock_irqrestore(&cohc->lock, flags); |
581 | } | 581 | } |
582 | 582 | ||
583 | bool coh901318_filter_id(struct dma_chan *chan, void *chan_id) | 583 | bool coh901318_filter_id(struct dma_chan *chan, void *chan_id) |
584 | { | 584 | { |
585 | unsigned int ch_nr = (unsigned int) chan_id; | 585 | unsigned int ch_nr = (unsigned int) chan_id; |
586 | 586 | ||
587 | if (ch_nr == to_coh901318_chan(chan)->id) | 587 | if (ch_nr == to_coh901318_chan(chan)->id) |
588 | return true; | 588 | return true; |
589 | 589 | ||
590 | return false; | 590 | return false; |
591 | } | 591 | } |
592 | EXPORT_SYMBOL(coh901318_filter_id); | 592 | EXPORT_SYMBOL(coh901318_filter_id); |
593 | 593 | ||
594 | /* | 594 | /* |
595 | * DMA channel allocation | 595 | * DMA channel allocation |
596 | */ | 596 | */ |
597 | static int coh901318_config(struct coh901318_chan *cohc, | 597 | static int coh901318_config(struct coh901318_chan *cohc, |
598 | struct coh901318_params *param) | 598 | struct coh901318_params *param) |
599 | { | 599 | { |
600 | unsigned long flags; | 600 | unsigned long flags; |
601 | const struct coh901318_params *p; | 601 | const struct coh901318_params *p; |
602 | int channel = cohc->id; | 602 | int channel = cohc->id; |
603 | void __iomem *virtbase = cohc->base->virtbase; | 603 | void __iomem *virtbase = cohc->base->virtbase; |
604 | 604 | ||
605 | spin_lock_irqsave(&cohc->lock, flags); | 605 | spin_lock_irqsave(&cohc->lock, flags); |
606 | 606 | ||
607 | if (param) | 607 | if (param) |
608 | p = param; | 608 | p = param; |
609 | else | 609 | else |
610 | p = &cohc->base->platform->chan_conf[channel].param; | 610 | p = &cohc->base->platform->chan_conf[channel].param; |
611 | 611 | ||
612 | /* Clear any pending BE or TC interrupt */ | 612 | /* Clear any pending BE or TC interrupt */ |
613 | if (channel < 32) { | 613 | if (channel < 32) { |
614 | writel(1 << channel, virtbase + COH901318_BE_INT_CLEAR1); | 614 | writel(1 << channel, virtbase + COH901318_BE_INT_CLEAR1); |
615 | writel(1 << channel, virtbase + COH901318_TC_INT_CLEAR1); | 615 | writel(1 << channel, virtbase + COH901318_TC_INT_CLEAR1); |
616 | } else { | 616 | } else { |
617 | writel(1 << (channel - 32), virtbase + | 617 | writel(1 << (channel - 32), virtbase + |
618 | COH901318_BE_INT_CLEAR2); | 618 | COH901318_BE_INT_CLEAR2); |
619 | writel(1 << (channel - 32), virtbase + | 619 | writel(1 << (channel - 32), virtbase + |
620 | COH901318_TC_INT_CLEAR2); | 620 | COH901318_TC_INT_CLEAR2); |
621 | } | 621 | } |
622 | 622 | ||
623 | coh901318_set_conf(cohc, p->config); | 623 | coh901318_set_conf(cohc, p->config); |
624 | coh901318_set_ctrl(cohc, p->ctrl_lli_last); | 624 | coh901318_set_ctrl(cohc, p->ctrl_lli_last); |
625 | 625 | ||
626 | spin_unlock_irqrestore(&cohc->lock, flags); | 626 | spin_unlock_irqrestore(&cohc->lock, flags); |
627 | 627 | ||
628 | return 0; | 628 | return 0; |
629 | } | 629 | } |
630 | 630 | ||
631 | /* must lock when calling this function | 631 | /* must lock when calling this function |
632 | * start queued jobs, if any | 632 | * start queued jobs, if any |
633 | * TODO: start all queued jobs in one go | 633 | * TODO: start all queued jobs in one go |
634 | * | 634 | * |
635 | * Returns descriptor if queued job is started otherwise NULL. | 635 | * Returns descriptor if queued job is started otherwise NULL. |
636 | * If the queue is empty NULL is returned. | 636 | * If the queue is empty NULL is returned. |
637 | */ | 637 | */ |
638 | static struct coh901318_desc *coh901318_queue_start(struct coh901318_chan *cohc) | 638 | static struct coh901318_desc *coh901318_queue_start(struct coh901318_chan *cohc) |
639 | { | 639 | { |
640 | struct coh901318_desc *cohd; | 640 | struct coh901318_desc *cohd; |
641 | 641 | ||
642 | /* | 642 | /* |
643 | * start queued jobs, if any | 643 | * start queued jobs, if any |
644 | * TODO: transmit all queued jobs in one go | 644 | * TODO: transmit all queued jobs in one go |
645 | */ | 645 | */ |
646 | cohd = coh901318_first_queued(cohc); | 646 | cohd = coh901318_first_queued(cohc); |
647 | 647 | ||
648 | if (cohd != NULL) { | 648 | if (cohd != NULL) { |
649 | /* Remove from queue */ | 649 | /* Remove from queue */ |
650 | coh901318_desc_remove(cohd); | 650 | coh901318_desc_remove(cohd); |
651 | /* initiate DMA job */ | 651 | /* initiate DMA job */ |
652 | cohc->busy = 1; | 652 | cohc->busy = 1; |
653 | 653 | ||
654 | coh901318_desc_submit(cohc, cohd); | 654 | coh901318_desc_submit(cohc, cohd); |
655 | 655 | ||
656 | coh901318_prep_linked_list(cohc, cohd->lli); | 656 | coh901318_prep_linked_list(cohc, cohd->lli); |
657 | 657 | ||
658 | /* start dma job on this channel */ | 658 | /* start dma job on this channel */ |
659 | coh901318_start(cohc); | 659 | coh901318_start(cohc); |
660 | 660 | ||
661 | } | 661 | } |
662 | 662 | ||
663 | return cohd; | 663 | return cohd; |
664 | } | 664 | } |
665 | 665 | ||
666 | /* | 666 | /* |
667 | * This tasklet is called from the interrupt handler to | 667 | * This tasklet is called from the interrupt handler to |
668 | * handle each descriptor (DMA job) that is sent to a channel. | 668 | * handle each descriptor (DMA job) that is sent to a channel. |
669 | */ | 669 | */ |
670 | static void dma_tasklet(unsigned long data) | 670 | static void dma_tasklet(unsigned long data) |
671 | { | 671 | { |
672 | struct coh901318_chan *cohc = (struct coh901318_chan *) data; | 672 | struct coh901318_chan *cohc = (struct coh901318_chan *) data; |
673 | struct coh901318_desc *cohd_fin; | 673 | struct coh901318_desc *cohd_fin; |
674 | unsigned long flags; | 674 | unsigned long flags; |
675 | dma_async_tx_callback callback; | 675 | dma_async_tx_callback callback; |
676 | void *callback_param; | 676 | void *callback_param; |
677 | 677 | ||
678 | dev_vdbg(COHC_2_DEV(cohc), "[%s] chan_id %d" | 678 | dev_vdbg(COHC_2_DEV(cohc), "[%s] chan_id %d" |
679 | " nbr_active_done %ld\n", __func__, | 679 | " nbr_active_done %ld\n", __func__, |
680 | cohc->id, cohc->nbr_active_done); | 680 | cohc->id, cohc->nbr_active_done); |
681 | 681 | ||
682 | spin_lock_irqsave(&cohc->lock, flags); | 682 | spin_lock_irqsave(&cohc->lock, flags); |
683 | 683 | ||
684 | /* get first active descriptor entry from list */ | 684 | /* get first active descriptor entry from list */ |
685 | cohd_fin = coh901318_first_active_get(cohc); | 685 | cohd_fin = coh901318_first_active_get(cohc); |
686 | 686 | ||
687 | if (cohd_fin == NULL) | 687 | if (cohd_fin == NULL) |
688 | goto err; | 688 | goto err; |
689 | 689 | ||
690 | /* locate callback to client */ | 690 | /* locate callback to client */ |
691 | callback = cohd_fin->desc.callback; | 691 | callback = cohd_fin->desc.callback; |
692 | callback_param = cohd_fin->desc.callback_param; | 692 | callback_param = cohd_fin->desc.callback_param; |
693 | 693 | ||
694 | /* sign this job as completed on the channel */ | 694 | /* sign this job as completed on the channel */ |
695 | cohc->completed = cohd_fin->desc.cookie; | 695 | cohc->completed = cohd_fin->desc.cookie; |
696 | 696 | ||
697 | /* release the lli allocation and remove the descriptor */ | 697 | /* release the lli allocation and remove the descriptor */ |
698 | coh901318_lli_free(&cohc->base->pool, &cohd_fin->lli); | 698 | coh901318_lli_free(&cohc->base->pool, &cohd_fin->lli); |
699 | 699 | ||
700 | /* return desc to free-list */ | 700 | /* return desc to free-list */ |
701 | coh901318_desc_remove(cohd_fin); | 701 | coh901318_desc_remove(cohd_fin); |
702 | coh901318_desc_free(cohc, cohd_fin); | 702 | coh901318_desc_free(cohc, cohd_fin); |
703 | 703 | ||
704 | spin_unlock_irqrestore(&cohc->lock, flags); | 704 | spin_unlock_irqrestore(&cohc->lock, flags); |
705 | 705 | ||
706 | /* Call the callback when we're done */ | 706 | /* Call the callback when we're done */ |
707 | if (callback) | 707 | if (callback) |
708 | callback(callback_param); | 708 | callback(callback_param); |
709 | 709 | ||
710 | spin_lock_irqsave(&cohc->lock, flags); | 710 | spin_lock_irqsave(&cohc->lock, flags); |
711 | 711 | ||
712 | /* | 712 | /* |
713 | * If another interrupt fired while the tasklet was scheduling, | 713 | * If another interrupt fired while the tasklet was scheduling, |
714 | * we don't get called twice, so we have this number of active | 714 | * we don't get called twice, so we have this number of active |
715 | * counter that keep track of the number of IRQs expected to | 715 | * counter that keep track of the number of IRQs expected to |
716 | * be handled for this channel. If there happen to be more than | 716 | * be handled for this channel. If there happen to be more than |
717 | * one IRQ to be ack:ed, we simply schedule this tasklet again. | 717 | * one IRQ to be ack:ed, we simply schedule this tasklet again. |
718 | */ | 718 | */ |
719 | cohc->nbr_active_done--; | 719 | cohc->nbr_active_done--; |
720 | if (cohc->nbr_active_done) { | 720 | if (cohc->nbr_active_done) { |
721 | dev_dbg(COHC_2_DEV(cohc), "scheduling tasklet again, new IRQs " | 721 | dev_dbg(COHC_2_DEV(cohc), "scheduling tasklet again, new IRQs " |
722 | "came in while we were scheduling this tasklet\n"); | 722 | "came in while we were scheduling this tasklet\n"); |
723 | if (cohc_chan_conf(cohc)->priority_high) | 723 | if (cohc_chan_conf(cohc)->priority_high) |
724 | tasklet_hi_schedule(&cohc->tasklet); | 724 | tasklet_hi_schedule(&cohc->tasklet); |
725 | else | 725 | else |
726 | tasklet_schedule(&cohc->tasklet); | 726 | tasklet_schedule(&cohc->tasklet); |
727 | } | 727 | } |
728 | 728 | ||
729 | spin_unlock_irqrestore(&cohc->lock, flags); | 729 | spin_unlock_irqrestore(&cohc->lock, flags); |
730 | 730 | ||
731 | return; | 731 | return; |
732 | 732 | ||
733 | err: | 733 | err: |
734 | spin_unlock_irqrestore(&cohc->lock, flags); | 734 | spin_unlock_irqrestore(&cohc->lock, flags); |
735 | dev_err(COHC_2_DEV(cohc), "[%s] No active dma desc\n", __func__); | 735 | dev_err(COHC_2_DEV(cohc), "[%s] No active dma desc\n", __func__); |
736 | } | 736 | } |
737 | 737 | ||
738 | 738 | ||
739 | /* called from interrupt context */ | 739 | /* called from interrupt context */ |
740 | static void dma_tc_handle(struct coh901318_chan *cohc) | 740 | static void dma_tc_handle(struct coh901318_chan *cohc) |
741 | { | 741 | { |
742 | /* | 742 | /* |
743 | * If the channel is not allocated, then we shouldn't have | 743 | * If the channel is not allocated, then we shouldn't have |
744 | * any TC interrupts on it. | 744 | * any TC interrupts on it. |
745 | */ | 745 | */ |
746 | if (!cohc->allocated) { | 746 | if (!cohc->allocated) { |
747 | dev_err(COHC_2_DEV(cohc), "spurious interrupt from " | 747 | dev_err(COHC_2_DEV(cohc), "spurious interrupt from " |
748 | "unallocated channel\n"); | 748 | "unallocated channel\n"); |
749 | return; | 749 | return; |
750 | } | 750 | } |
751 | 751 | ||
752 | spin_lock(&cohc->lock); | 752 | spin_lock(&cohc->lock); |
753 | 753 | ||
754 | /* | 754 | /* |
755 | * When we reach this point, at least one queue item | 755 | * When we reach this point, at least one queue item |
756 | * should have been moved over from cohc->queue to | 756 | * should have been moved over from cohc->queue to |
757 | * cohc->active and run to completion, that is why we're | 757 | * cohc->active and run to completion, that is why we're |
758 | * getting a terminal count interrupt is it not? | 758 | * getting a terminal count interrupt is it not? |
759 | * If you get this BUG() the most probable cause is that | 759 | * If you get this BUG() the most probable cause is that |
760 | * the individual nodes in the lli chain have IRQ enabled, | 760 | * the individual nodes in the lli chain have IRQ enabled, |
761 | * so check your platform config for lli chain ctrl. | 761 | * so check your platform config for lli chain ctrl. |
762 | */ | 762 | */ |
763 | BUG_ON(list_empty(&cohc->active)); | 763 | BUG_ON(list_empty(&cohc->active)); |
764 | 764 | ||
765 | cohc->nbr_active_done++; | 765 | cohc->nbr_active_done++; |
766 | 766 | ||
767 | /* | 767 | /* |
768 | * This attempt to take a job from cohc->queue, put it | 768 | * This attempt to take a job from cohc->queue, put it |
769 | * into cohc->active and start it. | 769 | * into cohc->active and start it. |
770 | */ | 770 | */ |
771 | if (coh901318_queue_start(cohc) == NULL) | 771 | if (coh901318_queue_start(cohc) == NULL) |
772 | cohc->busy = 0; | 772 | cohc->busy = 0; |
773 | 773 | ||
774 | spin_unlock(&cohc->lock); | 774 | spin_unlock(&cohc->lock); |
775 | 775 | ||
776 | /* | 776 | /* |
777 | * This tasklet will remove items from cohc->active | 777 | * This tasklet will remove items from cohc->active |
778 | * and thus terminates them. | 778 | * and thus terminates them. |
779 | */ | 779 | */ |
780 | if (cohc_chan_conf(cohc)->priority_high) | 780 | if (cohc_chan_conf(cohc)->priority_high) |
781 | tasklet_hi_schedule(&cohc->tasklet); | 781 | tasklet_hi_schedule(&cohc->tasklet); |
782 | else | 782 | else |
783 | tasklet_schedule(&cohc->tasklet); | 783 | tasklet_schedule(&cohc->tasklet); |
784 | } | 784 | } |
785 | 785 | ||
786 | 786 | ||
787 | static irqreturn_t dma_irq_handler(int irq, void *dev_id) | 787 | static irqreturn_t dma_irq_handler(int irq, void *dev_id) |
788 | { | 788 | { |
789 | u32 status1; | 789 | u32 status1; |
790 | u32 status2; | 790 | u32 status2; |
791 | int i; | 791 | int i; |
792 | int ch; | 792 | int ch; |
793 | struct coh901318_base *base = dev_id; | 793 | struct coh901318_base *base = dev_id; |
794 | struct coh901318_chan *cohc; | 794 | struct coh901318_chan *cohc; |
795 | void __iomem *virtbase = base->virtbase; | 795 | void __iomem *virtbase = base->virtbase; |
796 | 796 | ||
797 | status1 = readl(virtbase + COH901318_INT_STATUS1); | 797 | status1 = readl(virtbase + COH901318_INT_STATUS1); |
798 | status2 = readl(virtbase + COH901318_INT_STATUS2); | 798 | status2 = readl(virtbase + COH901318_INT_STATUS2); |
799 | 799 | ||
800 | if (unlikely(status1 == 0 && status2 == 0)) { | 800 | if (unlikely(status1 == 0 && status2 == 0)) { |
801 | dev_warn(base->dev, "spurious DMA IRQ from no channel!\n"); | 801 | dev_warn(base->dev, "spurious DMA IRQ from no channel!\n"); |
802 | return IRQ_HANDLED; | 802 | return IRQ_HANDLED; |
803 | } | 803 | } |
804 | 804 | ||
805 | /* TODO: consider handle IRQ in tasklet here to | 805 | /* TODO: consider handle IRQ in tasklet here to |
806 | * minimize interrupt latency */ | 806 | * minimize interrupt latency */ |
807 | 807 | ||
808 | /* Check the first 32 DMA channels for IRQ */ | 808 | /* Check the first 32 DMA channels for IRQ */ |
809 | while (status1) { | 809 | while (status1) { |
810 | /* Find first bit set, return as a number. */ | 810 | /* Find first bit set, return as a number. */ |
811 | i = ffs(status1) - 1; | 811 | i = ffs(status1) - 1; |
812 | ch = i; | 812 | ch = i; |
813 | 813 | ||
814 | cohc = &base->chans[ch]; | 814 | cohc = &base->chans[ch]; |
815 | spin_lock(&cohc->lock); | 815 | spin_lock(&cohc->lock); |
816 | 816 | ||
817 | /* Mask off this bit */ | 817 | /* Mask off this bit */ |
818 | status1 &= ~(1 << i); | 818 | status1 &= ~(1 << i); |
819 | /* Check the individual channel bits */ | 819 | /* Check the individual channel bits */ |
820 | if (test_bit(i, virtbase + COH901318_BE_INT_STATUS1)) { | 820 | if (test_bit(i, virtbase + COH901318_BE_INT_STATUS1)) { |
821 | dev_crit(COHC_2_DEV(cohc), | 821 | dev_crit(COHC_2_DEV(cohc), |
822 | "DMA bus error on channel %d!\n", ch); | 822 | "DMA bus error on channel %d!\n", ch); |
823 | BUG_ON(1); | 823 | BUG_ON(1); |
824 | /* Clear BE interrupt */ | 824 | /* Clear BE interrupt */ |
825 | __set_bit(i, virtbase + COH901318_BE_INT_CLEAR1); | 825 | __set_bit(i, virtbase + COH901318_BE_INT_CLEAR1); |
826 | } else { | 826 | } else { |
827 | /* Caused by TC, really? */ | 827 | /* Caused by TC, really? */ |
828 | if (unlikely(!test_bit(i, virtbase + | 828 | if (unlikely(!test_bit(i, virtbase + |
829 | COH901318_TC_INT_STATUS1))) { | 829 | COH901318_TC_INT_STATUS1))) { |
830 | dev_warn(COHC_2_DEV(cohc), | 830 | dev_warn(COHC_2_DEV(cohc), |
831 | "ignoring interrupt not caused by terminal count on channel %d\n", ch); | 831 | "ignoring interrupt not caused by terminal count on channel %d\n", ch); |
832 | /* Clear TC interrupt */ | 832 | /* Clear TC interrupt */ |
833 | BUG_ON(1); | 833 | BUG_ON(1); |
834 | __set_bit(i, virtbase + COH901318_TC_INT_CLEAR1); | 834 | __set_bit(i, virtbase + COH901318_TC_INT_CLEAR1); |
835 | } else { | 835 | } else { |
836 | /* Enable powersave if transfer has finished */ | 836 | /* Enable powersave if transfer has finished */ |
837 | if (!(readl(virtbase + COH901318_CX_STAT + | 837 | if (!(readl(virtbase + COH901318_CX_STAT + |
838 | COH901318_CX_STAT_SPACING*ch) & | 838 | COH901318_CX_STAT_SPACING*ch) & |
839 | COH901318_CX_STAT_ENABLED)) { | 839 | COH901318_CX_STAT_ENABLED)) { |
840 | enable_powersave(cohc); | 840 | enable_powersave(cohc); |
841 | } | 841 | } |
842 | 842 | ||
843 | /* Must clear TC interrupt before calling | 843 | /* Must clear TC interrupt before calling |
844 | * dma_tc_handle | 844 | * dma_tc_handle |
845 | * in case tc_handle initate a new dma job | 845 | * in case tc_handle initate a new dma job |
846 | */ | 846 | */ |
847 | __set_bit(i, virtbase + COH901318_TC_INT_CLEAR1); | 847 | __set_bit(i, virtbase + COH901318_TC_INT_CLEAR1); |
848 | 848 | ||
849 | dma_tc_handle(cohc); | 849 | dma_tc_handle(cohc); |
850 | } | 850 | } |
851 | } | 851 | } |
852 | spin_unlock(&cohc->lock); | 852 | spin_unlock(&cohc->lock); |
853 | } | 853 | } |
854 | 854 | ||
855 | /* Check the remaining 32 DMA channels for IRQ */ | 855 | /* Check the remaining 32 DMA channels for IRQ */ |
856 | while (status2) { | 856 | while (status2) { |
857 | /* Find first bit set, return as a number. */ | 857 | /* Find first bit set, return as a number. */ |
858 | i = ffs(status2) - 1; | 858 | i = ffs(status2) - 1; |
859 | ch = i + 32; | 859 | ch = i + 32; |
860 | cohc = &base->chans[ch]; | 860 | cohc = &base->chans[ch]; |
861 | spin_lock(&cohc->lock); | 861 | spin_lock(&cohc->lock); |
862 | 862 | ||
863 | /* Mask off this bit */ | 863 | /* Mask off this bit */ |
864 | status2 &= ~(1 << i); | 864 | status2 &= ~(1 << i); |
865 | /* Check the individual channel bits */ | 865 | /* Check the individual channel bits */ |
866 | if (test_bit(i, virtbase + COH901318_BE_INT_STATUS2)) { | 866 | if (test_bit(i, virtbase + COH901318_BE_INT_STATUS2)) { |
867 | dev_crit(COHC_2_DEV(cohc), | 867 | dev_crit(COHC_2_DEV(cohc), |
868 | "DMA bus error on channel %d!\n", ch); | 868 | "DMA bus error on channel %d!\n", ch); |
869 | /* Clear BE interrupt */ | 869 | /* Clear BE interrupt */ |
870 | BUG_ON(1); | 870 | BUG_ON(1); |
871 | __set_bit(i, virtbase + COH901318_BE_INT_CLEAR2); | 871 | __set_bit(i, virtbase + COH901318_BE_INT_CLEAR2); |
872 | } else { | 872 | } else { |
873 | /* Caused by TC, really? */ | 873 | /* Caused by TC, really? */ |
874 | if (unlikely(!test_bit(i, virtbase + | 874 | if (unlikely(!test_bit(i, virtbase + |
875 | COH901318_TC_INT_STATUS2))) { | 875 | COH901318_TC_INT_STATUS2))) { |
876 | dev_warn(COHC_2_DEV(cohc), | 876 | dev_warn(COHC_2_DEV(cohc), |
877 | "ignoring interrupt not caused by terminal count on channel %d\n", ch); | 877 | "ignoring interrupt not caused by terminal count on channel %d\n", ch); |
878 | /* Clear TC interrupt */ | 878 | /* Clear TC interrupt */ |
879 | __set_bit(i, virtbase + COH901318_TC_INT_CLEAR2); | 879 | __set_bit(i, virtbase + COH901318_TC_INT_CLEAR2); |
880 | BUG_ON(1); | 880 | BUG_ON(1); |
881 | } else { | 881 | } else { |
882 | /* Enable powersave if transfer has finished */ | 882 | /* Enable powersave if transfer has finished */ |
883 | if (!(readl(virtbase + COH901318_CX_STAT + | 883 | if (!(readl(virtbase + COH901318_CX_STAT + |
884 | COH901318_CX_STAT_SPACING*ch) & | 884 | COH901318_CX_STAT_SPACING*ch) & |
885 | COH901318_CX_STAT_ENABLED)) { | 885 | COH901318_CX_STAT_ENABLED)) { |
886 | enable_powersave(cohc); | 886 | enable_powersave(cohc); |
887 | } | 887 | } |
888 | /* Must clear TC interrupt before calling | 888 | /* Must clear TC interrupt before calling |
889 | * dma_tc_handle | 889 | * dma_tc_handle |
890 | * in case tc_handle initate a new dma job | 890 | * in case tc_handle initate a new dma job |
891 | */ | 891 | */ |
892 | __set_bit(i, virtbase + COH901318_TC_INT_CLEAR2); | 892 | __set_bit(i, virtbase + COH901318_TC_INT_CLEAR2); |
893 | 893 | ||
894 | dma_tc_handle(cohc); | 894 | dma_tc_handle(cohc); |
895 | } | 895 | } |
896 | } | 896 | } |
897 | spin_unlock(&cohc->lock); | 897 | spin_unlock(&cohc->lock); |
898 | } | 898 | } |
899 | 899 | ||
900 | return IRQ_HANDLED; | 900 | return IRQ_HANDLED; |
901 | } | 901 | } |
902 | 902 | ||
903 | static int coh901318_alloc_chan_resources(struct dma_chan *chan) | 903 | static int coh901318_alloc_chan_resources(struct dma_chan *chan) |
904 | { | 904 | { |
905 | struct coh901318_chan *cohc = to_coh901318_chan(chan); | 905 | struct coh901318_chan *cohc = to_coh901318_chan(chan); |
906 | unsigned long flags; | 906 | unsigned long flags; |
907 | 907 | ||
908 | dev_vdbg(COHC_2_DEV(cohc), "[%s] DMA channel %d\n", | 908 | dev_vdbg(COHC_2_DEV(cohc), "[%s] DMA channel %d\n", |
909 | __func__, cohc->id); | 909 | __func__, cohc->id); |
910 | 910 | ||
911 | if (chan->client_count > 1) | 911 | if (chan->client_count > 1) |
912 | return -EBUSY; | 912 | return -EBUSY; |
913 | 913 | ||
914 | spin_lock_irqsave(&cohc->lock, flags); | 914 | spin_lock_irqsave(&cohc->lock, flags); |
915 | 915 | ||
916 | coh901318_config(cohc, NULL); | 916 | coh901318_config(cohc, NULL); |
917 | 917 | ||
918 | cohc->allocated = 1; | 918 | cohc->allocated = 1; |
919 | cohc->completed = chan->cookie = 1; | 919 | cohc->completed = chan->cookie = 1; |
920 | 920 | ||
921 | spin_unlock_irqrestore(&cohc->lock, flags); | 921 | spin_unlock_irqrestore(&cohc->lock, flags); |
922 | 922 | ||
923 | return 1; | 923 | return 1; |
924 | } | 924 | } |
925 | 925 | ||
926 | static void | 926 | static void |
927 | coh901318_free_chan_resources(struct dma_chan *chan) | 927 | coh901318_free_chan_resources(struct dma_chan *chan) |
928 | { | 928 | { |
929 | struct coh901318_chan *cohc = to_coh901318_chan(chan); | 929 | struct coh901318_chan *cohc = to_coh901318_chan(chan); |
930 | int channel = cohc->id; | 930 | int channel = cohc->id; |
931 | unsigned long flags; | 931 | unsigned long flags; |
932 | 932 | ||
933 | spin_lock_irqsave(&cohc->lock, flags); | 933 | spin_lock_irqsave(&cohc->lock, flags); |
934 | 934 | ||
935 | /* Disable HW */ | 935 | /* Disable HW */ |
936 | writel(0x00000000U, cohc->base->virtbase + COH901318_CX_CFG + | 936 | writel(0x00000000U, cohc->base->virtbase + COH901318_CX_CFG + |
937 | COH901318_CX_CFG_SPACING*channel); | 937 | COH901318_CX_CFG_SPACING*channel); |
938 | writel(0x00000000U, cohc->base->virtbase + COH901318_CX_CTRL + | 938 | writel(0x00000000U, cohc->base->virtbase + COH901318_CX_CTRL + |
939 | COH901318_CX_CTRL_SPACING*channel); | 939 | COH901318_CX_CTRL_SPACING*channel); |
940 | 940 | ||
941 | cohc->allocated = 0; | 941 | cohc->allocated = 0; |
942 | 942 | ||
943 | spin_unlock_irqrestore(&cohc->lock, flags); | 943 | spin_unlock_irqrestore(&cohc->lock, flags); |
944 | 944 | ||
945 | chan->device->device_control(chan, DMA_TERMINATE_ALL); | 945 | chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); |
946 | } | 946 | } |
947 | 947 | ||
948 | 948 | ||
949 | static dma_cookie_t | 949 | static dma_cookie_t |
950 | coh901318_tx_submit(struct dma_async_tx_descriptor *tx) | 950 | coh901318_tx_submit(struct dma_async_tx_descriptor *tx) |
951 | { | 951 | { |
952 | struct coh901318_desc *cohd = container_of(tx, struct coh901318_desc, | 952 | struct coh901318_desc *cohd = container_of(tx, struct coh901318_desc, |
953 | desc); | 953 | desc); |
954 | struct coh901318_chan *cohc = to_coh901318_chan(tx->chan); | 954 | struct coh901318_chan *cohc = to_coh901318_chan(tx->chan); |
955 | unsigned long flags; | 955 | unsigned long flags; |
956 | 956 | ||
957 | spin_lock_irqsave(&cohc->lock, flags); | 957 | spin_lock_irqsave(&cohc->lock, flags); |
958 | 958 | ||
959 | tx->cookie = coh901318_assign_cookie(cohc, cohd); | 959 | tx->cookie = coh901318_assign_cookie(cohc, cohd); |
960 | 960 | ||
961 | coh901318_desc_queue(cohc, cohd); | 961 | coh901318_desc_queue(cohc, cohd); |
962 | 962 | ||
963 | spin_unlock_irqrestore(&cohc->lock, flags); | 963 | spin_unlock_irqrestore(&cohc->lock, flags); |
964 | 964 | ||
965 | return tx->cookie; | 965 | return tx->cookie; |
966 | } | 966 | } |
967 | 967 | ||
968 | static struct dma_async_tx_descriptor * | 968 | static struct dma_async_tx_descriptor * |
969 | coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | 969 | coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, |
970 | size_t size, unsigned long flags) | 970 | size_t size, unsigned long flags) |
971 | { | 971 | { |
972 | struct coh901318_lli *lli; | 972 | struct coh901318_lli *lli; |
973 | struct coh901318_desc *cohd; | 973 | struct coh901318_desc *cohd; |
974 | unsigned long flg; | 974 | unsigned long flg; |
975 | struct coh901318_chan *cohc = to_coh901318_chan(chan); | 975 | struct coh901318_chan *cohc = to_coh901318_chan(chan); |
976 | int lli_len; | 976 | int lli_len; |
977 | u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last; | 977 | u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last; |
978 | int ret; | 978 | int ret; |
979 | 979 | ||
980 | spin_lock_irqsave(&cohc->lock, flg); | 980 | spin_lock_irqsave(&cohc->lock, flg); |
981 | 981 | ||
982 | dev_vdbg(COHC_2_DEV(cohc), | 982 | dev_vdbg(COHC_2_DEV(cohc), |
983 | "[%s] channel %d src 0x%x dest 0x%x size %d\n", | 983 | "[%s] channel %d src 0x%x dest 0x%x size %d\n", |
984 | __func__, cohc->id, src, dest, size); | 984 | __func__, cohc->id, src, dest, size); |
985 | 985 | ||
986 | if (flags & DMA_PREP_INTERRUPT) | 986 | if (flags & DMA_PREP_INTERRUPT) |
987 | /* Trigger interrupt after last lli */ | 987 | /* Trigger interrupt after last lli */ |
988 | ctrl_last |= COH901318_CX_CTRL_TC_IRQ_ENABLE; | 988 | ctrl_last |= COH901318_CX_CTRL_TC_IRQ_ENABLE; |
989 | 989 | ||
990 | lli_len = size >> MAX_DMA_PACKET_SIZE_SHIFT; | 990 | lli_len = size >> MAX_DMA_PACKET_SIZE_SHIFT; |
991 | if ((lli_len << MAX_DMA_PACKET_SIZE_SHIFT) < size) | 991 | if ((lli_len << MAX_DMA_PACKET_SIZE_SHIFT) < size) |
992 | lli_len++; | 992 | lli_len++; |
993 | 993 | ||
994 | lli = coh901318_lli_alloc(&cohc->base->pool, lli_len); | 994 | lli = coh901318_lli_alloc(&cohc->base->pool, lli_len); |
995 | 995 | ||
996 | if (lli == NULL) | 996 | if (lli == NULL) |
997 | goto err; | 997 | goto err; |
998 | 998 | ||
999 | ret = coh901318_lli_fill_memcpy( | 999 | ret = coh901318_lli_fill_memcpy( |
1000 | &cohc->base->pool, lli, src, size, dest, | 1000 | &cohc->base->pool, lli, src, size, dest, |
1001 | cohc_chan_param(cohc)->ctrl_lli_chained, | 1001 | cohc_chan_param(cohc)->ctrl_lli_chained, |
1002 | ctrl_last); | 1002 | ctrl_last); |
1003 | if (ret) | 1003 | if (ret) |
1004 | goto err; | 1004 | goto err; |
1005 | 1005 | ||
1006 | COH_DBG(coh901318_list_print(cohc, lli)); | 1006 | COH_DBG(coh901318_list_print(cohc, lli)); |
1007 | 1007 | ||
1008 | /* Pick a descriptor to handle this transfer */ | 1008 | /* Pick a descriptor to handle this transfer */ |
1009 | cohd = coh901318_desc_get(cohc); | 1009 | cohd = coh901318_desc_get(cohc); |
1010 | cohd->lli = lli; | 1010 | cohd->lli = lli; |
1011 | cohd->flags = flags; | 1011 | cohd->flags = flags; |
1012 | cohd->desc.tx_submit = coh901318_tx_submit; | 1012 | cohd->desc.tx_submit = coh901318_tx_submit; |
1013 | 1013 | ||
1014 | spin_unlock_irqrestore(&cohc->lock, flg); | 1014 | spin_unlock_irqrestore(&cohc->lock, flg); |
1015 | 1015 | ||
1016 | return &cohd->desc; | 1016 | return &cohd->desc; |
1017 | err: | 1017 | err: |
1018 | spin_unlock_irqrestore(&cohc->lock, flg); | 1018 | spin_unlock_irqrestore(&cohc->lock, flg); |
1019 | return NULL; | 1019 | return NULL; |
1020 | } | 1020 | } |
1021 | 1021 | ||
1022 | static struct dma_async_tx_descriptor * | 1022 | static struct dma_async_tx_descriptor * |
1023 | coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | 1023 | coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
1024 | unsigned int sg_len, enum dma_data_direction direction, | 1024 | unsigned int sg_len, enum dma_data_direction direction, |
1025 | unsigned long flags) | 1025 | unsigned long flags) |
1026 | { | 1026 | { |
1027 | struct coh901318_chan *cohc = to_coh901318_chan(chan); | 1027 | struct coh901318_chan *cohc = to_coh901318_chan(chan); |
1028 | struct coh901318_lli *lli; | 1028 | struct coh901318_lli *lli; |
1029 | struct coh901318_desc *cohd; | 1029 | struct coh901318_desc *cohd; |
1030 | const struct coh901318_params *params; | 1030 | const struct coh901318_params *params; |
1031 | struct scatterlist *sg; | 1031 | struct scatterlist *sg; |
1032 | int len = 0; | 1032 | int len = 0; |
1033 | int size; | 1033 | int size; |
1034 | int i; | 1034 | int i; |
1035 | u32 ctrl_chained = cohc_chan_param(cohc)->ctrl_lli_chained; | 1035 | u32 ctrl_chained = cohc_chan_param(cohc)->ctrl_lli_chained; |
1036 | u32 ctrl = cohc_chan_param(cohc)->ctrl_lli; | 1036 | u32 ctrl = cohc_chan_param(cohc)->ctrl_lli; |
1037 | u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last; | 1037 | u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last; |
1038 | u32 config; | 1038 | u32 config; |
1039 | unsigned long flg; | 1039 | unsigned long flg; |
1040 | int ret; | 1040 | int ret; |
1041 | 1041 | ||
1042 | if (!sgl) | 1042 | if (!sgl) |
1043 | goto out; | 1043 | goto out; |
1044 | if (sgl->length == 0) | 1044 | if (sgl->length == 0) |
1045 | goto out; | 1045 | goto out; |
1046 | 1046 | ||
1047 | spin_lock_irqsave(&cohc->lock, flg); | 1047 | spin_lock_irqsave(&cohc->lock, flg); |
1048 | 1048 | ||
1049 | dev_vdbg(COHC_2_DEV(cohc), "[%s] sg_len %d dir %d\n", | 1049 | dev_vdbg(COHC_2_DEV(cohc), "[%s] sg_len %d dir %d\n", |
1050 | __func__, sg_len, direction); | 1050 | __func__, sg_len, direction); |
1051 | 1051 | ||
1052 | if (flags & DMA_PREP_INTERRUPT) | 1052 | if (flags & DMA_PREP_INTERRUPT) |
1053 | /* Trigger interrupt after last lli */ | 1053 | /* Trigger interrupt after last lli */ |
1054 | ctrl_last |= COH901318_CX_CTRL_TC_IRQ_ENABLE; | 1054 | ctrl_last |= COH901318_CX_CTRL_TC_IRQ_ENABLE; |
1055 | 1055 | ||
1056 | params = cohc_chan_param(cohc); | 1056 | params = cohc_chan_param(cohc); |
1057 | config = params->config; | 1057 | config = params->config; |
1058 | 1058 | ||
1059 | if (direction == DMA_TO_DEVICE) { | 1059 | if (direction == DMA_TO_DEVICE) { |
1060 | u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE | | 1060 | u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE | |
1061 | COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE; | 1061 | COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE; |
1062 | 1062 | ||
1063 | config |= COH901318_CX_CFG_RM_MEMORY_TO_PRIMARY; | 1063 | config |= COH901318_CX_CFG_RM_MEMORY_TO_PRIMARY; |
1064 | ctrl_chained |= tx_flags; | 1064 | ctrl_chained |= tx_flags; |
1065 | ctrl_last |= tx_flags; | 1065 | ctrl_last |= tx_flags; |
1066 | ctrl |= tx_flags; | 1066 | ctrl |= tx_flags; |
1067 | } else if (direction == DMA_FROM_DEVICE) { | 1067 | } else if (direction == DMA_FROM_DEVICE) { |
1068 | u32 rx_flags = COH901318_CX_CTRL_PRDD_DEST | | 1068 | u32 rx_flags = COH901318_CX_CTRL_PRDD_DEST | |
1069 | COH901318_CX_CTRL_DST_ADDR_INC_ENABLE; | 1069 | COH901318_CX_CTRL_DST_ADDR_INC_ENABLE; |
1070 | 1070 | ||
1071 | config |= COH901318_CX_CFG_RM_PRIMARY_TO_MEMORY; | 1071 | config |= COH901318_CX_CFG_RM_PRIMARY_TO_MEMORY; |
1072 | ctrl_chained |= rx_flags; | 1072 | ctrl_chained |= rx_flags; |
1073 | ctrl_last |= rx_flags; | 1073 | ctrl_last |= rx_flags; |
1074 | ctrl |= rx_flags; | 1074 | ctrl |= rx_flags; |
1075 | } else | 1075 | } else |
1076 | goto err_direction; | 1076 | goto err_direction; |
1077 | 1077 | ||
1078 | coh901318_set_conf(cohc, config); | 1078 | coh901318_set_conf(cohc, config); |
1079 | 1079 | ||
1080 | /* The dma only supports transmitting packages up to | 1080 | /* The dma only supports transmitting packages up to |
1081 | * MAX_DMA_PACKET_SIZE. Calculate to total number of | 1081 | * MAX_DMA_PACKET_SIZE. Calculate to total number of |
1082 | * dma elemts required to send the entire sg list | 1082 | * dma elemts required to send the entire sg list |
1083 | */ | 1083 | */ |
1084 | for_each_sg(sgl, sg, sg_len, i) { | 1084 | for_each_sg(sgl, sg, sg_len, i) { |
1085 | unsigned int factor; | 1085 | unsigned int factor; |
1086 | size = sg_dma_len(sg); | 1086 | size = sg_dma_len(sg); |
1087 | 1087 | ||
1088 | if (size <= MAX_DMA_PACKET_SIZE) { | 1088 | if (size <= MAX_DMA_PACKET_SIZE) { |
1089 | len++; | 1089 | len++; |
1090 | continue; | 1090 | continue; |
1091 | } | 1091 | } |
1092 | 1092 | ||
1093 | factor = size >> MAX_DMA_PACKET_SIZE_SHIFT; | 1093 | factor = size >> MAX_DMA_PACKET_SIZE_SHIFT; |
1094 | if ((factor << MAX_DMA_PACKET_SIZE_SHIFT) < size) | 1094 | if ((factor << MAX_DMA_PACKET_SIZE_SHIFT) < size) |
1095 | factor++; | 1095 | factor++; |
1096 | 1096 | ||
1097 | len += factor; | 1097 | len += factor; |
1098 | } | 1098 | } |
1099 | 1099 | ||
1100 | pr_debug("Allocate %d lli:s for this transfer\n", len); | 1100 | pr_debug("Allocate %d lli:s for this transfer\n", len); |
1101 | lli = coh901318_lli_alloc(&cohc->base->pool, len); | 1101 | lli = coh901318_lli_alloc(&cohc->base->pool, len); |
1102 | 1102 | ||
1103 | if (lli == NULL) | 1103 | if (lli == NULL) |
1104 | goto err_dma_alloc; | 1104 | goto err_dma_alloc; |
1105 | 1105 | ||
1106 | /* initiate allocated lli list */ | 1106 | /* initiate allocated lli list */ |
1107 | ret = coh901318_lli_fill_sg(&cohc->base->pool, lli, sgl, sg_len, | 1107 | ret = coh901318_lli_fill_sg(&cohc->base->pool, lli, sgl, sg_len, |
1108 | cohc_dev_addr(cohc), | 1108 | cohc_dev_addr(cohc), |
1109 | ctrl_chained, | 1109 | ctrl_chained, |
1110 | ctrl, | 1110 | ctrl, |
1111 | ctrl_last, | 1111 | ctrl_last, |
1112 | direction, COH901318_CX_CTRL_TC_IRQ_ENABLE); | 1112 | direction, COH901318_CX_CTRL_TC_IRQ_ENABLE); |
1113 | if (ret) | 1113 | if (ret) |
1114 | goto err_lli_fill; | 1114 | goto err_lli_fill; |
1115 | 1115 | ||
1116 | COH_DBG(coh901318_list_print(cohc, lli)); | 1116 | COH_DBG(coh901318_list_print(cohc, lli)); |
1117 | 1117 | ||
1118 | /* Pick a descriptor to handle this transfer */ | 1118 | /* Pick a descriptor to handle this transfer */ |
1119 | cohd = coh901318_desc_get(cohc); | 1119 | cohd = coh901318_desc_get(cohc); |
1120 | cohd->dir = direction; | 1120 | cohd->dir = direction; |
1121 | cohd->flags = flags; | 1121 | cohd->flags = flags; |
1122 | cohd->desc.tx_submit = coh901318_tx_submit; | 1122 | cohd->desc.tx_submit = coh901318_tx_submit; |
1123 | cohd->lli = lli; | 1123 | cohd->lli = lli; |
1124 | 1124 | ||
1125 | spin_unlock_irqrestore(&cohc->lock, flg); | 1125 | spin_unlock_irqrestore(&cohc->lock, flg); |
1126 | 1126 | ||
1127 | return &cohd->desc; | 1127 | return &cohd->desc; |
1128 | err_lli_fill: | 1128 | err_lli_fill: |
1129 | err_dma_alloc: | 1129 | err_dma_alloc: |
1130 | err_direction: | 1130 | err_direction: |
1131 | spin_unlock_irqrestore(&cohc->lock, flg); | 1131 | spin_unlock_irqrestore(&cohc->lock, flg); |
1132 | out: | 1132 | out: |
1133 | return NULL; | 1133 | return NULL; |
1134 | } | 1134 | } |
1135 | 1135 | ||
1136 | static enum dma_status | 1136 | static enum dma_status |
1137 | coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | 1137 | coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie, |
1138 | struct dma_tx_state *txstate) | 1138 | struct dma_tx_state *txstate) |
1139 | { | 1139 | { |
1140 | struct coh901318_chan *cohc = to_coh901318_chan(chan); | 1140 | struct coh901318_chan *cohc = to_coh901318_chan(chan); |
1141 | dma_cookie_t last_used; | 1141 | dma_cookie_t last_used; |
1142 | dma_cookie_t last_complete; | 1142 | dma_cookie_t last_complete; |
1143 | int ret; | 1143 | int ret; |
1144 | 1144 | ||
1145 | last_complete = cohc->completed; | 1145 | last_complete = cohc->completed; |
1146 | last_used = chan->cookie; | 1146 | last_used = chan->cookie; |
1147 | 1147 | ||
1148 | ret = dma_async_is_complete(cookie, last_complete, last_used); | 1148 | ret = dma_async_is_complete(cookie, last_complete, last_used); |
1149 | 1149 | ||
1150 | dma_set_tx_state(txstate, last_complete, last_used, | 1150 | dma_set_tx_state(txstate, last_complete, last_used, |
1151 | coh901318_get_bytes_left(chan)); | 1151 | coh901318_get_bytes_left(chan)); |
1152 | if (ret == DMA_IN_PROGRESS && cohc->stopped) | 1152 | if (ret == DMA_IN_PROGRESS && cohc->stopped) |
1153 | ret = DMA_PAUSED; | 1153 | ret = DMA_PAUSED; |
1154 | 1154 | ||
1155 | return ret; | 1155 | return ret; |
1156 | } | 1156 | } |
1157 | 1157 | ||
1158 | static void | 1158 | static void |
1159 | coh901318_issue_pending(struct dma_chan *chan) | 1159 | coh901318_issue_pending(struct dma_chan *chan) |
1160 | { | 1160 | { |
1161 | struct coh901318_chan *cohc = to_coh901318_chan(chan); | 1161 | struct coh901318_chan *cohc = to_coh901318_chan(chan); |
1162 | unsigned long flags; | 1162 | unsigned long flags; |
1163 | 1163 | ||
1164 | spin_lock_irqsave(&cohc->lock, flags); | 1164 | spin_lock_irqsave(&cohc->lock, flags); |
1165 | 1165 | ||
1166 | /* | 1166 | /* |
1167 | * Busy means that pending jobs are already being processed, | 1167 | * Busy means that pending jobs are already being processed, |
1168 | * and then there is no point in starting the queue: the | 1168 | * and then there is no point in starting the queue: the |
1169 | * terminal count interrupt on the channel will take the next | 1169 | * terminal count interrupt on the channel will take the next |
1170 | * job on the queue and execute it anyway. | 1170 | * job on the queue and execute it anyway. |
1171 | */ | 1171 | */ |
1172 | if (!cohc->busy) | 1172 | if (!cohc->busy) |
1173 | coh901318_queue_start(cohc); | 1173 | coh901318_queue_start(cohc); |
1174 | 1174 | ||
1175 | spin_unlock_irqrestore(&cohc->lock, flags); | 1175 | spin_unlock_irqrestore(&cohc->lock, flags); |
1176 | } | 1176 | } |
1177 | 1177 | ||
1178 | static int | 1178 | static int |
1179 | coh901318_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd) | 1179 | coh901318_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
1180 | unsigned long arg) | ||
1180 | { | 1181 | { |
1181 | unsigned long flags; | 1182 | unsigned long flags; |
1182 | struct coh901318_chan *cohc = to_coh901318_chan(chan); | 1183 | struct coh901318_chan *cohc = to_coh901318_chan(chan); |
1183 | struct coh901318_desc *cohd; | 1184 | struct coh901318_desc *cohd; |
1184 | void __iomem *virtbase = cohc->base->virtbase; | 1185 | void __iomem *virtbase = cohc->base->virtbase; |
1185 | 1186 | ||
1186 | if (cmd == DMA_PAUSE) { | 1187 | if (cmd == DMA_PAUSE) { |
1187 | coh901318_pause(chan); | 1188 | coh901318_pause(chan); |
1188 | return 0; | 1189 | return 0; |
1189 | } | 1190 | } |
1190 | 1191 | ||
1191 | if (cmd == DMA_RESUME) { | 1192 | if (cmd == DMA_RESUME) { |
1192 | coh901318_resume(chan); | 1193 | coh901318_resume(chan); |
1193 | return 0; | 1194 | return 0; |
1194 | } | 1195 | } |
1195 | 1196 | ||
1196 | if (cmd != DMA_TERMINATE_ALL) | 1197 | if (cmd != DMA_TERMINATE_ALL) |
1197 | return -ENXIO; | 1198 | return -ENXIO; |
1198 | 1199 | ||
1199 | /* The remainder of this function terminates the transfer */ | 1200 | /* The remainder of this function terminates the transfer */ |
1200 | coh901318_pause(chan); | 1201 | coh901318_pause(chan); |
1201 | spin_lock_irqsave(&cohc->lock, flags); | 1202 | spin_lock_irqsave(&cohc->lock, flags); |
1202 | 1203 | ||
1203 | /* Clear any pending BE or TC interrupt */ | 1204 | /* Clear any pending BE or TC interrupt */ |
1204 | if (cohc->id < 32) { | 1205 | if (cohc->id < 32) { |
1205 | writel(1 << cohc->id, virtbase + COH901318_BE_INT_CLEAR1); | 1206 | writel(1 << cohc->id, virtbase + COH901318_BE_INT_CLEAR1); |
1206 | writel(1 << cohc->id, virtbase + COH901318_TC_INT_CLEAR1); | 1207 | writel(1 << cohc->id, virtbase + COH901318_TC_INT_CLEAR1); |
1207 | } else { | 1208 | } else { |
1208 | writel(1 << (cohc->id - 32), virtbase + | 1209 | writel(1 << (cohc->id - 32), virtbase + |
1209 | COH901318_BE_INT_CLEAR2); | 1210 | COH901318_BE_INT_CLEAR2); |
1210 | writel(1 << (cohc->id - 32), virtbase + | 1211 | writel(1 << (cohc->id - 32), virtbase + |
1211 | COH901318_TC_INT_CLEAR2); | 1212 | COH901318_TC_INT_CLEAR2); |
1212 | } | 1213 | } |
1213 | 1214 | ||
1214 | enable_powersave(cohc); | 1215 | enable_powersave(cohc); |
1215 | 1216 | ||
1216 | while ((cohd = coh901318_first_active_get(cohc))) { | 1217 | while ((cohd = coh901318_first_active_get(cohc))) { |
1217 | /* release the lli allocation*/ | 1218 | /* release the lli allocation*/ |
1218 | coh901318_lli_free(&cohc->base->pool, &cohd->lli); | 1219 | coh901318_lli_free(&cohc->base->pool, &cohd->lli); |
1219 | 1220 | ||
1220 | /* return desc to free-list */ | 1221 | /* return desc to free-list */ |
1221 | coh901318_desc_remove(cohd); | 1222 | coh901318_desc_remove(cohd); |
1222 | coh901318_desc_free(cohc, cohd); | 1223 | coh901318_desc_free(cohc, cohd); |
1223 | } | 1224 | } |
1224 | 1225 | ||
1225 | while ((cohd = coh901318_first_queued(cohc))) { | 1226 | while ((cohd = coh901318_first_queued(cohc))) { |
1226 | /* release the lli allocation*/ | 1227 | /* release the lli allocation*/ |
1227 | coh901318_lli_free(&cohc->base->pool, &cohd->lli); | 1228 | coh901318_lli_free(&cohc->base->pool, &cohd->lli); |
1228 | 1229 | ||
1229 | /* return desc to free-list */ | 1230 | /* return desc to free-list */ |
1230 | coh901318_desc_remove(cohd); | 1231 | coh901318_desc_remove(cohd); |
1231 | coh901318_desc_free(cohc, cohd); | 1232 | coh901318_desc_free(cohc, cohd); |
1232 | } | 1233 | } |
1233 | 1234 | ||
1234 | 1235 | ||
1235 | cohc->nbr_active_done = 0; | 1236 | cohc->nbr_active_done = 0; |
1236 | cohc->busy = 0; | 1237 | cohc->busy = 0; |
1237 | 1238 | ||
1238 | spin_unlock_irqrestore(&cohc->lock, flags); | 1239 | spin_unlock_irqrestore(&cohc->lock, flags); |
1239 | 1240 | ||
1240 | return 0; | 1241 | return 0; |
1241 | } | 1242 | } |
1242 | void coh901318_base_init(struct dma_device *dma, const int *pick_chans, | 1243 | void coh901318_base_init(struct dma_device *dma, const int *pick_chans, |
1243 | struct coh901318_base *base) | 1244 | struct coh901318_base *base) |
1244 | { | 1245 | { |
1245 | int chans_i; | 1246 | int chans_i; |
1246 | int i = 0; | 1247 | int i = 0; |
1247 | struct coh901318_chan *cohc; | 1248 | struct coh901318_chan *cohc; |
1248 | 1249 | ||
1249 | INIT_LIST_HEAD(&dma->channels); | 1250 | INIT_LIST_HEAD(&dma->channels); |
1250 | 1251 | ||
1251 | for (chans_i = 0; pick_chans[chans_i] != -1; chans_i += 2) { | 1252 | for (chans_i = 0; pick_chans[chans_i] != -1; chans_i += 2) { |
1252 | for (i = pick_chans[chans_i]; i <= pick_chans[chans_i+1]; i++) { | 1253 | for (i = pick_chans[chans_i]; i <= pick_chans[chans_i+1]; i++) { |
1253 | cohc = &base->chans[i]; | 1254 | cohc = &base->chans[i]; |
1254 | 1255 | ||
1255 | cohc->base = base; | 1256 | cohc->base = base; |
1256 | cohc->chan.device = dma; | 1257 | cohc->chan.device = dma; |
1257 | cohc->id = i; | 1258 | cohc->id = i; |
1258 | 1259 | ||
1259 | /* TODO: do we really need this lock if only one | 1260 | /* TODO: do we really need this lock if only one |
1260 | * client is connected to each channel? | 1261 | * client is connected to each channel? |
1261 | */ | 1262 | */ |
1262 | 1263 | ||
1263 | spin_lock_init(&cohc->lock); | 1264 | spin_lock_init(&cohc->lock); |
1264 | 1265 | ||
1265 | cohc->nbr_active_done = 0; | 1266 | cohc->nbr_active_done = 0; |
1266 | cohc->busy = 0; | 1267 | cohc->busy = 0; |
1267 | INIT_LIST_HEAD(&cohc->free); | 1268 | INIT_LIST_HEAD(&cohc->free); |
1268 | INIT_LIST_HEAD(&cohc->active); | 1269 | INIT_LIST_HEAD(&cohc->active); |
1269 | INIT_LIST_HEAD(&cohc->queue); | 1270 | INIT_LIST_HEAD(&cohc->queue); |
1270 | 1271 | ||
1271 | tasklet_init(&cohc->tasklet, dma_tasklet, | 1272 | tasklet_init(&cohc->tasklet, dma_tasklet, |
1272 | (unsigned long) cohc); | 1273 | (unsigned long) cohc); |
1273 | 1274 | ||
1274 | list_add_tail(&cohc->chan.device_node, | 1275 | list_add_tail(&cohc->chan.device_node, |
1275 | &dma->channels); | 1276 | &dma->channels); |
1276 | } | 1277 | } |
1277 | } | 1278 | } |
1278 | } | 1279 | } |
1279 | 1280 | ||
1280 | static int __init coh901318_probe(struct platform_device *pdev) | 1281 | static int __init coh901318_probe(struct platform_device *pdev) |
1281 | { | 1282 | { |
1282 | int err = 0; | 1283 | int err = 0; |
1283 | struct coh901318_platform *pdata; | 1284 | struct coh901318_platform *pdata; |
1284 | struct coh901318_base *base; | 1285 | struct coh901318_base *base; |
1285 | int irq; | 1286 | int irq; |
1286 | struct resource *io; | 1287 | struct resource *io; |
1287 | 1288 | ||
1288 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1289 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1289 | if (!io) | 1290 | if (!io) |
1290 | goto err_get_resource; | 1291 | goto err_get_resource; |
1291 | 1292 | ||
1292 | /* Map DMA controller registers to virtual memory */ | 1293 | /* Map DMA controller registers to virtual memory */ |
1293 | if (request_mem_region(io->start, | 1294 | if (request_mem_region(io->start, |
1294 | resource_size(io), | 1295 | resource_size(io), |
1295 | pdev->dev.driver->name) == NULL) { | 1296 | pdev->dev.driver->name) == NULL) { |
1296 | err = -EBUSY; | 1297 | err = -EBUSY; |
1297 | goto err_request_mem; | 1298 | goto err_request_mem; |
1298 | } | 1299 | } |
1299 | 1300 | ||
1300 | pdata = pdev->dev.platform_data; | 1301 | pdata = pdev->dev.platform_data; |
1301 | if (!pdata) | 1302 | if (!pdata) |
1302 | goto err_no_platformdata; | 1303 | goto err_no_platformdata; |
1303 | 1304 | ||
1304 | base = kmalloc(ALIGN(sizeof(struct coh901318_base), 4) + | 1305 | base = kmalloc(ALIGN(sizeof(struct coh901318_base), 4) + |
1305 | pdata->max_channels * | 1306 | pdata->max_channels * |
1306 | sizeof(struct coh901318_chan), | 1307 | sizeof(struct coh901318_chan), |
1307 | GFP_KERNEL); | 1308 | GFP_KERNEL); |
1308 | if (!base) | 1309 | if (!base) |
1309 | goto err_alloc_coh_dma_channels; | 1310 | goto err_alloc_coh_dma_channels; |
1310 | 1311 | ||
1311 | base->chans = ((void *)base) + ALIGN(sizeof(struct coh901318_base), 4); | 1312 | base->chans = ((void *)base) + ALIGN(sizeof(struct coh901318_base), 4); |
1312 | 1313 | ||
1313 | base->virtbase = ioremap(io->start, resource_size(io)); | 1314 | base->virtbase = ioremap(io->start, resource_size(io)); |
1314 | if (!base->virtbase) { | 1315 | if (!base->virtbase) { |
1315 | err = -ENOMEM; | 1316 | err = -ENOMEM; |
1316 | goto err_no_ioremap; | 1317 | goto err_no_ioremap; |
1317 | } | 1318 | } |
1318 | 1319 | ||
1319 | base->dev = &pdev->dev; | 1320 | base->dev = &pdev->dev; |
1320 | base->platform = pdata; | 1321 | base->platform = pdata; |
1321 | spin_lock_init(&base->pm.lock); | 1322 | spin_lock_init(&base->pm.lock); |
1322 | base->pm.started_channels = 0; | 1323 | base->pm.started_channels = 0; |
1323 | 1324 | ||
1324 | COH901318_DEBUGFS_ASSIGN(debugfs_dma_base, base); | 1325 | COH901318_DEBUGFS_ASSIGN(debugfs_dma_base, base); |
1325 | 1326 | ||
1326 | platform_set_drvdata(pdev, base); | 1327 | platform_set_drvdata(pdev, base); |
1327 | 1328 | ||
1328 | irq = platform_get_irq(pdev, 0); | 1329 | irq = platform_get_irq(pdev, 0); |
1329 | if (irq < 0) | 1330 | if (irq < 0) |
1330 | goto err_no_irq; | 1331 | goto err_no_irq; |
1331 | 1332 | ||
1332 | err = request_irq(irq, dma_irq_handler, IRQF_DISABLED, | 1333 | err = request_irq(irq, dma_irq_handler, IRQF_DISABLED, |
1333 | "coh901318", base); | 1334 | "coh901318", base); |
1334 | if (err) { | 1335 | if (err) { |
1335 | dev_crit(&pdev->dev, | 1336 | dev_crit(&pdev->dev, |
1336 | "Cannot allocate IRQ for DMA controller!\n"); | 1337 | "Cannot allocate IRQ for DMA controller!\n"); |
1337 | goto err_request_irq; | 1338 | goto err_request_irq; |
1338 | } | 1339 | } |
1339 | 1340 | ||
1340 | err = coh901318_pool_create(&base->pool, &pdev->dev, | 1341 | err = coh901318_pool_create(&base->pool, &pdev->dev, |
1341 | sizeof(struct coh901318_lli), | 1342 | sizeof(struct coh901318_lli), |
1342 | 32); | 1343 | 32); |
1343 | if (err) | 1344 | if (err) |
1344 | goto err_pool_create; | 1345 | goto err_pool_create; |
1345 | 1346 | ||
1346 | /* init channels for device transfers */ | 1347 | /* init channels for device transfers */ |
1347 | coh901318_base_init(&base->dma_slave, base->platform->chans_slave, | 1348 | coh901318_base_init(&base->dma_slave, base->platform->chans_slave, |
1348 | base); | 1349 | base); |
1349 | 1350 | ||
1350 | dma_cap_zero(base->dma_slave.cap_mask); | 1351 | dma_cap_zero(base->dma_slave.cap_mask); |
1351 | dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask); | 1352 | dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask); |
1352 | 1353 | ||
1353 | base->dma_slave.device_alloc_chan_resources = coh901318_alloc_chan_resources; | 1354 | base->dma_slave.device_alloc_chan_resources = coh901318_alloc_chan_resources; |
1354 | base->dma_slave.device_free_chan_resources = coh901318_free_chan_resources; | 1355 | base->dma_slave.device_free_chan_resources = coh901318_free_chan_resources; |
1355 | base->dma_slave.device_prep_slave_sg = coh901318_prep_slave_sg; | 1356 | base->dma_slave.device_prep_slave_sg = coh901318_prep_slave_sg; |
1356 | base->dma_slave.device_tx_status = coh901318_tx_status; | 1357 | base->dma_slave.device_tx_status = coh901318_tx_status; |
1357 | base->dma_slave.device_issue_pending = coh901318_issue_pending; | 1358 | base->dma_slave.device_issue_pending = coh901318_issue_pending; |
1358 | base->dma_slave.device_control = coh901318_control; | 1359 | base->dma_slave.device_control = coh901318_control; |
1359 | base->dma_slave.dev = &pdev->dev; | 1360 | base->dma_slave.dev = &pdev->dev; |
1360 | 1361 | ||
1361 | err = dma_async_device_register(&base->dma_slave); | 1362 | err = dma_async_device_register(&base->dma_slave); |
1362 | 1363 | ||
1363 | if (err) | 1364 | if (err) |
1364 | goto err_register_slave; | 1365 | goto err_register_slave; |
1365 | 1366 | ||
1366 | /* init channels for memcpy */ | 1367 | /* init channels for memcpy */ |
1367 | coh901318_base_init(&base->dma_memcpy, base->platform->chans_memcpy, | 1368 | coh901318_base_init(&base->dma_memcpy, base->platform->chans_memcpy, |
1368 | base); | 1369 | base); |
1369 | 1370 | ||
1370 | dma_cap_zero(base->dma_memcpy.cap_mask); | 1371 | dma_cap_zero(base->dma_memcpy.cap_mask); |
1371 | dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); | 1372 | dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); |
1372 | 1373 | ||
1373 | base->dma_memcpy.device_alloc_chan_resources = coh901318_alloc_chan_resources; | 1374 | base->dma_memcpy.device_alloc_chan_resources = coh901318_alloc_chan_resources; |
1374 | base->dma_memcpy.device_free_chan_resources = coh901318_free_chan_resources; | 1375 | base->dma_memcpy.device_free_chan_resources = coh901318_free_chan_resources; |
1375 | base->dma_memcpy.device_prep_dma_memcpy = coh901318_prep_memcpy; | 1376 | base->dma_memcpy.device_prep_dma_memcpy = coh901318_prep_memcpy; |
1376 | base->dma_memcpy.device_tx_status = coh901318_tx_status; | 1377 | base->dma_memcpy.device_tx_status = coh901318_tx_status; |
1377 | base->dma_memcpy.device_issue_pending = coh901318_issue_pending; | 1378 | base->dma_memcpy.device_issue_pending = coh901318_issue_pending; |
1378 | base->dma_memcpy.device_control = coh901318_control; | 1379 | base->dma_memcpy.device_control = coh901318_control; |
1379 | base->dma_memcpy.dev = &pdev->dev; | 1380 | base->dma_memcpy.dev = &pdev->dev; |
1380 | /* | 1381 | /* |
1381 | * This controller can only access address at even 32bit boundaries, | 1382 | * This controller can only access address at even 32bit boundaries, |
1382 | * i.e. 2^2 | 1383 | * i.e. 2^2 |
1383 | */ | 1384 | */ |
1384 | base->dma_memcpy.copy_align = 2; | 1385 | base->dma_memcpy.copy_align = 2; |
1385 | err = dma_async_device_register(&base->dma_memcpy); | 1386 | err = dma_async_device_register(&base->dma_memcpy); |
1386 | 1387 | ||
1387 | if (err) | 1388 | if (err) |
1388 | goto err_register_memcpy; | 1389 | goto err_register_memcpy; |
1389 | 1390 | ||
1390 | dev_info(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n", | 1391 | dev_info(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n", |
1391 | (u32) base->virtbase); | 1392 | (u32) base->virtbase); |
1392 | 1393 | ||
1393 | return err; | 1394 | return err; |
1394 | 1395 | ||
1395 | err_register_memcpy: | 1396 | err_register_memcpy: |
1396 | dma_async_device_unregister(&base->dma_slave); | 1397 | dma_async_device_unregister(&base->dma_slave); |
1397 | err_register_slave: | 1398 | err_register_slave: |
1398 | coh901318_pool_destroy(&base->pool); | 1399 | coh901318_pool_destroy(&base->pool); |
1399 | err_pool_create: | 1400 | err_pool_create: |
1400 | free_irq(platform_get_irq(pdev, 0), base); | 1401 | free_irq(platform_get_irq(pdev, 0), base); |
1401 | err_request_irq: | 1402 | err_request_irq: |
1402 | err_no_irq: | 1403 | err_no_irq: |
1403 | iounmap(base->virtbase); | 1404 | iounmap(base->virtbase); |
1404 | err_no_ioremap: | 1405 | err_no_ioremap: |
1405 | kfree(base); | 1406 | kfree(base); |
1406 | err_alloc_coh_dma_channels: | 1407 | err_alloc_coh_dma_channels: |
1407 | err_no_platformdata: | 1408 | err_no_platformdata: |
1408 | release_mem_region(pdev->resource->start, | 1409 | release_mem_region(pdev->resource->start, |
1409 | resource_size(pdev->resource)); | 1410 | resource_size(pdev->resource)); |
1410 | err_request_mem: | 1411 | err_request_mem: |
1411 | err_get_resource: | 1412 | err_get_resource: |
1412 | return err; | 1413 | return err; |
1413 | } | 1414 | } |
1414 | 1415 | ||
1415 | static int __exit coh901318_remove(struct platform_device *pdev) | 1416 | static int __exit coh901318_remove(struct platform_device *pdev) |
1416 | { | 1417 | { |
1417 | struct coh901318_base *base = platform_get_drvdata(pdev); | 1418 | struct coh901318_base *base = platform_get_drvdata(pdev); |
1418 | 1419 | ||
1419 | dma_async_device_unregister(&base->dma_memcpy); | 1420 | dma_async_device_unregister(&base->dma_memcpy); |
1420 | dma_async_device_unregister(&base->dma_slave); | 1421 | dma_async_device_unregister(&base->dma_slave); |
1421 | coh901318_pool_destroy(&base->pool); | 1422 | coh901318_pool_destroy(&base->pool); |
1422 | free_irq(platform_get_irq(pdev, 0), base); | 1423 | free_irq(platform_get_irq(pdev, 0), base); |
1423 | iounmap(base->virtbase); | 1424 | iounmap(base->virtbase); |
1424 | kfree(base); | 1425 | kfree(base); |
1425 | release_mem_region(pdev->resource->start, | 1426 | release_mem_region(pdev->resource->start, |
1426 | resource_size(pdev->resource)); | 1427 | resource_size(pdev->resource)); |
1427 | return 0; | 1428 | return 0; |
1428 | } | 1429 | } |
1429 | 1430 | ||
1430 | 1431 | ||
1431 | static struct platform_driver coh901318_driver = { | 1432 | static struct platform_driver coh901318_driver = { |
1432 | .remove = __exit_p(coh901318_remove), | 1433 | .remove = __exit_p(coh901318_remove), |
1433 | .driver = { | 1434 | .driver = { |
1434 | .name = "coh901318", | 1435 | .name = "coh901318", |
1435 | }, | 1436 | }, |
1436 | }; | 1437 | }; |
1437 | 1438 | ||
1438 | int __init coh901318_init(void) | 1439 | int __init coh901318_init(void) |
1439 | { | 1440 | { |
1440 | return platform_driver_probe(&coh901318_driver, coh901318_probe); | 1441 | return platform_driver_probe(&coh901318_driver, coh901318_probe); |
1441 | } | 1442 | } |
1442 | subsys_initcall(coh901318_init); | 1443 | subsys_initcall(coh901318_init); |
1443 | 1444 | ||
1444 | void __exit coh901318_exit(void) | 1445 | void __exit coh901318_exit(void) |
1445 | { | 1446 | { |
1446 | platform_driver_unregister(&coh901318_driver); | 1447 | platform_driver_unregister(&coh901318_driver); |
1447 | } | 1448 | } |
1448 | module_exit(coh901318_exit); | 1449 | module_exit(coh901318_exit); |
1449 | 1450 | ||
1450 | MODULE_LICENSE("GPL"); | 1451 | MODULE_LICENSE("GPL"); |
1451 | MODULE_AUTHOR("Per Friden"); | 1452 | MODULE_AUTHOR("Per Friden"); |
1452 | 1453 |
drivers/dma/dw_dmac.c
1 | /* | 1 | /* |
2 | * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on | 2 | * Driver for the Synopsys DesignWare DMA Controller (aka DMACA on |
3 | * AVR32 systems.) | 3 | * AVR32 systems.) |
4 | * | 4 | * |
5 | * Copyright (C) 2007-2008 Atmel Corporation | 5 | * Copyright (C) 2007-2008 Atmel Corporation |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | #include <linux/clk.h> | 11 | #include <linux/clk.h> |
12 | #include <linux/delay.h> | 12 | #include <linux/delay.h> |
13 | #include <linux/dmaengine.h> | 13 | #include <linux/dmaengine.h> |
14 | #include <linux/dma-mapping.h> | 14 | #include <linux/dma-mapping.h> |
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
17 | #include <linux/io.h> | 17 | #include <linux/io.h> |
18 | #include <linux/mm.h> | 18 | #include <linux/mm.h> |
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <linux/platform_device.h> | 20 | #include <linux/platform_device.h> |
21 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
22 | 22 | ||
23 | #include "dw_dmac_regs.h" | 23 | #include "dw_dmac_regs.h" |
24 | 24 | ||
25 | /* | 25 | /* |
26 | * This supports the Synopsys "DesignWare AHB Central DMA Controller", | 26 | * This supports the Synopsys "DesignWare AHB Central DMA Controller", |
27 | * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all | 27 | * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all |
28 | * of which use ARM any more). See the "Databook" from Synopsys for | 28 | * of which use ARM any more). See the "Databook" from Synopsys for |
29 | * information beyond what licensees probably provide. | 29 | * information beyond what licensees probably provide. |
30 | * | 30 | * |
31 | * The driver has currently been tested only with the Atmel AT32AP7000, | 31 | * The driver has currently been tested only with the Atmel AT32AP7000, |
32 | * which does not support descriptor writeback. | 32 | * which does not support descriptor writeback. |
33 | */ | 33 | */ |
34 | 34 | ||
35 | /* NOTE: DMS+SMS is system-specific. We should get this information | 35 | /* NOTE: DMS+SMS is system-specific. We should get this information |
36 | * from the platform code somehow. | 36 | * from the platform code somehow. |
37 | */ | 37 | */ |
38 | #define DWC_DEFAULT_CTLLO (DWC_CTLL_DST_MSIZE(0) \ | 38 | #define DWC_DEFAULT_CTLLO (DWC_CTLL_DST_MSIZE(0) \ |
39 | | DWC_CTLL_SRC_MSIZE(0) \ | 39 | | DWC_CTLL_SRC_MSIZE(0) \ |
40 | | DWC_CTLL_DMS(0) \ | 40 | | DWC_CTLL_DMS(0) \ |
41 | | DWC_CTLL_SMS(1) \ | 41 | | DWC_CTLL_SMS(1) \ |
42 | | DWC_CTLL_LLP_D_EN \ | 42 | | DWC_CTLL_LLP_D_EN \ |
43 | | DWC_CTLL_LLP_S_EN) | 43 | | DWC_CTLL_LLP_S_EN) |
44 | 44 | ||
45 | /* | 45 | /* |
46 | * This is configuration-dependent and usually a funny size like 4095. | 46 | * This is configuration-dependent and usually a funny size like 4095. |
47 | * Let's round it down to the nearest power of two. | 47 | * Let's round it down to the nearest power of two. |
48 | * | 48 | * |
49 | * Note that this is a transfer count, i.e. if we transfer 32-bit | 49 | * Note that this is a transfer count, i.e. if we transfer 32-bit |
50 | * words, we can do 8192 bytes per descriptor. | 50 | * words, we can do 8192 bytes per descriptor. |
51 | * | 51 | * |
52 | * This parameter is also system-specific. | 52 | * This parameter is also system-specific. |
53 | */ | 53 | */ |
54 | #define DWC_MAX_COUNT 2048U | 54 | #define DWC_MAX_COUNT 2048U |
55 | 55 | ||
56 | /* | 56 | /* |
57 | * Number of descriptors to allocate for each channel. This should be | 57 | * Number of descriptors to allocate for each channel. This should be |
58 | * made configurable somehow; preferably, the clients (at least the | 58 | * made configurable somehow; preferably, the clients (at least the |
59 | * ones using slave transfers) should be able to give us a hint. | 59 | * ones using slave transfers) should be able to give us a hint. |
60 | */ | 60 | */ |
61 | #define NR_DESCS_PER_CHANNEL 64 | 61 | #define NR_DESCS_PER_CHANNEL 64 |
62 | 62 | ||
63 | /*----------------------------------------------------------------------*/ | 63 | /*----------------------------------------------------------------------*/ |
64 | 64 | ||
65 | /* | 65 | /* |
66 | * Because we're not relying on writeback from the controller (it may not | 66 | * Because we're not relying on writeback from the controller (it may not |
67 | * even be configured into the core!) we don't need to use dma_pool. These | 67 | * even be configured into the core!) we don't need to use dma_pool. These |
68 | * descriptors -- and associated data -- are cacheable. We do need to make | 68 | * descriptors -- and associated data -- are cacheable. We do need to make |
69 | * sure their dcache entries are written back before handing them off to | 69 | * sure their dcache entries are written back before handing them off to |
70 | * the controller, though. | 70 | * the controller, though. |
71 | */ | 71 | */ |
72 | 72 | ||
73 | static struct device *chan2dev(struct dma_chan *chan) | 73 | static struct device *chan2dev(struct dma_chan *chan) |
74 | { | 74 | { |
75 | return &chan->dev->device; | 75 | return &chan->dev->device; |
76 | } | 76 | } |
77 | static struct device *chan2parent(struct dma_chan *chan) | 77 | static struct device *chan2parent(struct dma_chan *chan) |
78 | { | 78 | { |
79 | return chan->dev->device.parent; | 79 | return chan->dev->device.parent; |
80 | } | 80 | } |
81 | 81 | ||
82 | static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) | 82 | static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) |
83 | { | 83 | { |
84 | return list_entry(dwc->active_list.next, struct dw_desc, desc_node); | 84 | return list_entry(dwc->active_list.next, struct dw_desc, desc_node); |
85 | } | 85 | } |
86 | 86 | ||
87 | static struct dw_desc *dwc_first_queued(struct dw_dma_chan *dwc) | 87 | static struct dw_desc *dwc_first_queued(struct dw_dma_chan *dwc) |
88 | { | 88 | { |
89 | return list_entry(dwc->queue.next, struct dw_desc, desc_node); | 89 | return list_entry(dwc->queue.next, struct dw_desc, desc_node); |
90 | } | 90 | } |
91 | 91 | ||
92 | static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) | 92 | static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) |
93 | { | 93 | { |
94 | struct dw_desc *desc, *_desc; | 94 | struct dw_desc *desc, *_desc; |
95 | struct dw_desc *ret = NULL; | 95 | struct dw_desc *ret = NULL; |
96 | unsigned int i = 0; | 96 | unsigned int i = 0; |
97 | 97 | ||
98 | spin_lock_bh(&dwc->lock); | 98 | spin_lock_bh(&dwc->lock); |
99 | list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) { | 99 | list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) { |
100 | if (async_tx_test_ack(&desc->txd)) { | 100 | if (async_tx_test_ack(&desc->txd)) { |
101 | list_del(&desc->desc_node); | 101 | list_del(&desc->desc_node); |
102 | ret = desc; | 102 | ret = desc; |
103 | break; | 103 | break; |
104 | } | 104 | } |
105 | dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc); | 105 | dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc); |
106 | i++; | 106 | i++; |
107 | } | 107 | } |
108 | spin_unlock_bh(&dwc->lock); | 108 | spin_unlock_bh(&dwc->lock); |
109 | 109 | ||
110 | dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i); | 110 | dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i); |
111 | 111 | ||
112 | return ret; | 112 | return ret; |
113 | } | 113 | } |
114 | 114 | ||
115 | static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc) | 115 | static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc) |
116 | { | 116 | { |
117 | struct dw_desc *child; | 117 | struct dw_desc *child; |
118 | 118 | ||
119 | list_for_each_entry(child, &desc->tx_list, desc_node) | 119 | list_for_each_entry(child, &desc->tx_list, desc_node) |
120 | dma_sync_single_for_cpu(chan2parent(&dwc->chan), | 120 | dma_sync_single_for_cpu(chan2parent(&dwc->chan), |
121 | child->txd.phys, sizeof(child->lli), | 121 | child->txd.phys, sizeof(child->lli), |
122 | DMA_TO_DEVICE); | 122 | DMA_TO_DEVICE); |
123 | dma_sync_single_for_cpu(chan2parent(&dwc->chan), | 123 | dma_sync_single_for_cpu(chan2parent(&dwc->chan), |
124 | desc->txd.phys, sizeof(desc->lli), | 124 | desc->txd.phys, sizeof(desc->lli), |
125 | DMA_TO_DEVICE); | 125 | DMA_TO_DEVICE); |
126 | } | 126 | } |
127 | 127 | ||
128 | /* | 128 | /* |
129 | * Move a descriptor, including any children, to the free list. | 129 | * Move a descriptor, including any children, to the free list. |
130 | * `desc' must not be on any lists. | 130 | * `desc' must not be on any lists. |
131 | */ | 131 | */ |
132 | static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) | 132 | static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) |
133 | { | 133 | { |
134 | if (desc) { | 134 | if (desc) { |
135 | struct dw_desc *child; | 135 | struct dw_desc *child; |
136 | 136 | ||
137 | dwc_sync_desc_for_cpu(dwc, desc); | 137 | dwc_sync_desc_for_cpu(dwc, desc); |
138 | 138 | ||
139 | spin_lock_bh(&dwc->lock); | 139 | spin_lock_bh(&dwc->lock); |
140 | list_for_each_entry(child, &desc->tx_list, desc_node) | 140 | list_for_each_entry(child, &desc->tx_list, desc_node) |
141 | dev_vdbg(chan2dev(&dwc->chan), | 141 | dev_vdbg(chan2dev(&dwc->chan), |
142 | "moving child desc %p to freelist\n", | 142 | "moving child desc %p to freelist\n", |
143 | child); | 143 | child); |
144 | list_splice_init(&desc->tx_list, &dwc->free_list); | 144 | list_splice_init(&desc->tx_list, &dwc->free_list); |
145 | dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc); | 145 | dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc); |
146 | list_add(&desc->desc_node, &dwc->free_list); | 146 | list_add(&desc->desc_node, &dwc->free_list); |
147 | spin_unlock_bh(&dwc->lock); | 147 | spin_unlock_bh(&dwc->lock); |
148 | } | 148 | } |
149 | } | 149 | } |
150 | 150 | ||
151 | /* Called with dwc->lock held and bh disabled */ | 151 | /* Called with dwc->lock held and bh disabled */ |
152 | static dma_cookie_t | 152 | static dma_cookie_t |
153 | dwc_assign_cookie(struct dw_dma_chan *dwc, struct dw_desc *desc) | 153 | dwc_assign_cookie(struct dw_dma_chan *dwc, struct dw_desc *desc) |
154 | { | 154 | { |
155 | dma_cookie_t cookie = dwc->chan.cookie; | 155 | dma_cookie_t cookie = dwc->chan.cookie; |
156 | 156 | ||
157 | if (++cookie < 0) | 157 | if (++cookie < 0) |
158 | cookie = 1; | 158 | cookie = 1; |
159 | 159 | ||
160 | dwc->chan.cookie = cookie; | 160 | dwc->chan.cookie = cookie; |
161 | desc->txd.cookie = cookie; | 161 | desc->txd.cookie = cookie; |
162 | 162 | ||
163 | return cookie; | 163 | return cookie; |
164 | } | 164 | } |
165 | 165 | ||
166 | /*----------------------------------------------------------------------*/ | 166 | /*----------------------------------------------------------------------*/ |
167 | 167 | ||
168 | /* Called with dwc->lock held and bh disabled */ | 168 | /* Called with dwc->lock held and bh disabled */ |
169 | static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) | 169 | static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) |
170 | { | 170 | { |
171 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | 171 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); |
172 | 172 | ||
173 | /* ASSERT: channel is idle */ | 173 | /* ASSERT: channel is idle */ |
174 | if (dma_readl(dw, CH_EN) & dwc->mask) { | 174 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
175 | dev_err(chan2dev(&dwc->chan), | 175 | dev_err(chan2dev(&dwc->chan), |
176 | "BUG: Attempted to start non-idle channel\n"); | 176 | "BUG: Attempted to start non-idle channel\n"); |
177 | dev_err(chan2dev(&dwc->chan), | 177 | dev_err(chan2dev(&dwc->chan), |
178 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", | 178 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", |
179 | channel_readl(dwc, SAR), | 179 | channel_readl(dwc, SAR), |
180 | channel_readl(dwc, DAR), | 180 | channel_readl(dwc, DAR), |
181 | channel_readl(dwc, LLP), | 181 | channel_readl(dwc, LLP), |
182 | channel_readl(dwc, CTL_HI), | 182 | channel_readl(dwc, CTL_HI), |
183 | channel_readl(dwc, CTL_LO)); | 183 | channel_readl(dwc, CTL_LO)); |
184 | 184 | ||
185 | /* The tasklet will hopefully advance the queue... */ | 185 | /* The tasklet will hopefully advance the queue... */ |
186 | return; | 186 | return; |
187 | } | 187 | } |
188 | 188 | ||
189 | channel_writel(dwc, LLP, first->txd.phys); | 189 | channel_writel(dwc, LLP, first->txd.phys); |
190 | channel_writel(dwc, CTL_LO, | 190 | channel_writel(dwc, CTL_LO, |
191 | DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); | 191 | DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); |
192 | channel_writel(dwc, CTL_HI, 0); | 192 | channel_writel(dwc, CTL_HI, 0); |
193 | channel_set_bit(dw, CH_EN, dwc->mask); | 193 | channel_set_bit(dw, CH_EN, dwc->mask); |
194 | } | 194 | } |
195 | 195 | ||
196 | /*----------------------------------------------------------------------*/ | 196 | /*----------------------------------------------------------------------*/ |
197 | 197 | ||
198 | static void | 198 | static void |
199 | dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc) | 199 | dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc) |
200 | { | 200 | { |
201 | dma_async_tx_callback callback; | 201 | dma_async_tx_callback callback; |
202 | void *param; | 202 | void *param; |
203 | struct dma_async_tx_descriptor *txd = &desc->txd; | 203 | struct dma_async_tx_descriptor *txd = &desc->txd; |
204 | 204 | ||
205 | dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); | 205 | dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); |
206 | 206 | ||
207 | dwc->completed = txd->cookie; | 207 | dwc->completed = txd->cookie; |
208 | callback = txd->callback; | 208 | callback = txd->callback; |
209 | param = txd->callback_param; | 209 | param = txd->callback_param; |
210 | 210 | ||
211 | dwc_sync_desc_for_cpu(dwc, desc); | 211 | dwc_sync_desc_for_cpu(dwc, desc); |
212 | list_splice_init(&desc->tx_list, &dwc->free_list); | 212 | list_splice_init(&desc->tx_list, &dwc->free_list); |
213 | list_move(&desc->desc_node, &dwc->free_list); | 213 | list_move(&desc->desc_node, &dwc->free_list); |
214 | 214 | ||
215 | if (!dwc->chan.private) { | 215 | if (!dwc->chan.private) { |
216 | struct device *parent = chan2parent(&dwc->chan); | 216 | struct device *parent = chan2parent(&dwc->chan); |
217 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | 217 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { |
218 | if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) | 218 | if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) |
219 | dma_unmap_single(parent, desc->lli.dar, | 219 | dma_unmap_single(parent, desc->lli.dar, |
220 | desc->len, DMA_FROM_DEVICE); | 220 | desc->len, DMA_FROM_DEVICE); |
221 | else | 221 | else |
222 | dma_unmap_page(parent, desc->lli.dar, | 222 | dma_unmap_page(parent, desc->lli.dar, |
223 | desc->len, DMA_FROM_DEVICE); | 223 | desc->len, DMA_FROM_DEVICE); |
224 | } | 224 | } |
225 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | 225 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { |
226 | if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) | 226 | if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) |
227 | dma_unmap_single(parent, desc->lli.sar, | 227 | dma_unmap_single(parent, desc->lli.sar, |
228 | desc->len, DMA_TO_DEVICE); | 228 | desc->len, DMA_TO_DEVICE); |
229 | else | 229 | else |
230 | dma_unmap_page(parent, desc->lli.sar, | 230 | dma_unmap_page(parent, desc->lli.sar, |
231 | desc->len, DMA_TO_DEVICE); | 231 | desc->len, DMA_TO_DEVICE); |
232 | } | 232 | } |
233 | } | 233 | } |
234 | 234 | ||
235 | /* | 235 | /* |
236 | * The API requires that no submissions are done from a | 236 | * The API requires that no submissions are done from a |
237 | * callback, so we don't need to drop the lock here | 237 | * callback, so we don't need to drop the lock here |
238 | */ | 238 | */ |
239 | if (callback) | 239 | if (callback) |
240 | callback(param); | 240 | callback(param); |
241 | } | 241 | } |
242 | 242 | ||
243 | static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) | 243 | static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) |
244 | { | 244 | { |
245 | struct dw_desc *desc, *_desc; | 245 | struct dw_desc *desc, *_desc; |
246 | LIST_HEAD(list); | 246 | LIST_HEAD(list); |
247 | 247 | ||
248 | if (dma_readl(dw, CH_EN) & dwc->mask) { | 248 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
249 | dev_err(chan2dev(&dwc->chan), | 249 | dev_err(chan2dev(&dwc->chan), |
250 | "BUG: XFER bit set, but channel not idle!\n"); | 250 | "BUG: XFER bit set, but channel not idle!\n"); |
251 | 251 | ||
252 | /* Try to continue after resetting the channel... */ | 252 | /* Try to continue after resetting the channel... */ |
253 | channel_clear_bit(dw, CH_EN, dwc->mask); | 253 | channel_clear_bit(dw, CH_EN, dwc->mask); |
254 | while (dma_readl(dw, CH_EN) & dwc->mask) | 254 | while (dma_readl(dw, CH_EN) & dwc->mask) |
255 | cpu_relax(); | 255 | cpu_relax(); |
256 | } | 256 | } |
257 | 257 | ||
258 | /* | 258 | /* |
259 | * Submit queued descriptors ASAP, i.e. before we go through | 259 | * Submit queued descriptors ASAP, i.e. before we go through |
260 | * the completed ones. | 260 | * the completed ones. |
261 | */ | 261 | */ |
262 | if (!list_empty(&dwc->queue)) | 262 | if (!list_empty(&dwc->queue)) |
263 | dwc_dostart(dwc, dwc_first_queued(dwc)); | 263 | dwc_dostart(dwc, dwc_first_queued(dwc)); |
264 | list_splice_init(&dwc->active_list, &list); | 264 | list_splice_init(&dwc->active_list, &list); |
265 | list_splice_init(&dwc->queue, &dwc->active_list); | 265 | list_splice_init(&dwc->queue, &dwc->active_list); |
266 | 266 | ||
267 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | 267 | list_for_each_entry_safe(desc, _desc, &list, desc_node) |
268 | dwc_descriptor_complete(dwc, desc); | 268 | dwc_descriptor_complete(dwc, desc); |
269 | } | 269 | } |
270 | 270 | ||
271 | static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | 271 | static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) |
272 | { | 272 | { |
273 | dma_addr_t llp; | 273 | dma_addr_t llp; |
274 | struct dw_desc *desc, *_desc; | 274 | struct dw_desc *desc, *_desc; |
275 | struct dw_desc *child; | 275 | struct dw_desc *child; |
276 | u32 status_xfer; | 276 | u32 status_xfer; |
277 | 277 | ||
278 | /* | 278 | /* |
279 | * Clear block interrupt flag before scanning so that we don't | 279 | * Clear block interrupt flag before scanning so that we don't |
280 | * miss any, and read LLP before RAW_XFER to ensure it is | 280 | * miss any, and read LLP before RAW_XFER to ensure it is |
281 | * valid if we decide to scan the list. | 281 | * valid if we decide to scan the list. |
282 | */ | 282 | */ |
283 | dma_writel(dw, CLEAR.BLOCK, dwc->mask); | 283 | dma_writel(dw, CLEAR.BLOCK, dwc->mask); |
284 | llp = channel_readl(dwc, LLP); | 284 | llp = channel_readl(dwc, LLP); |
285 | status_xfer = dma_readl(dw, RAW.XFER); | 285 | status_xfer = dma_readl(dw, RAW.XFER); |
286 | 286 | ||
287 | if (status_xfer & dwc->mask) { | 287 | if (status_xfer & dwc->mask) { |
288 | /* Everything we've submitted is done */ | 288 | /* Everything we've submitted is done */ |
289 | dma_writel(dw, CLEAR.XFER, dwc->mask); | 289 | dma_writel(dw, CLEAR.XFER, dwc->mask); |
290 | dwc_complete_all(dw, dwc); | 290 | dwc_complete_all(dw, dwc); |
291 | return; | 291 | return; |
292 | } | 292 | } |
293 | 293 | ||
294 | dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp); | 294 | dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp); |
295 | 295 | ||
296 | list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { | 296 | list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { |
297 | if (desc->lli.llp == llp) | 297 | if (desc->lli.llp == llp) |
298 | /* This one is currently in progress */ | 298 | /* This one is currently in progress */ |
299 | return; | 299 | return; |
300 | 300 | ||
301 | list_for_each_entry(child, &desc->tx_list, desc_node) | 301 | list_for_each_entry(child, &desc->tx_list, desc_node) |
302 | if (child->lli.llp == llp) | 302 | if (child->lli.llp == llp) |
303 | /* Currently in progress */ | 303 | /* Currently in progress */ |
304 | return; | 304 | return; |
305 | 305 | ||
306 | /* | 306 | /* |
307 | * No descriptors so far seem to be in progress, i.e. | 307 | * No descriptors so far seem to be in progress, i.e. |
308 | * this one must be done. | 308 | * this one must be done. |
309 | */ | 309 | */ |
310 | dwc_descriptor_complete(dwc, desc); | 310 | dwc_descriptor_complete(dwc, desc); |
311 | } | 311 | } |
312 | 312 | ||
313 | dev_err(chan2dev(&dwc->chan), | 313 | dev_err(chan2dev(&dwc->chan), |
314 | "BUG: All descriptors done, but channel not idle!\n"); | 314 | "BUG: All descriptors done, but channel not idle!\n"); |
315 | 315 | ||
316 | /* Try to continue after resetting the channel... */ | 316 | /* Try to continue after resetting the channel... */ |
317 | channel_clear_bit(dw, CH_EN, dwc->mask); | 317 | channel_clear_bit(dw, CH_EN, dwc->mask); |
318 | while (dma_readl(dw, CH_EN) & dwc->mask) | 318 | while (dma_readl(dw, CH_EN) & dwc->mask) |
319 | cpu_relax(); | 319 | cpu_relax(); |
320 | 320 | ||
321 | if (!list_empty(&dwc->queue)) { | 321 | if (!list_empty(&dwc->queue)) { |
322 | dwc_dostart(dwc, dwc_first_queued(dwc)); | 322 | dwc_dostart(dwc, dwc_first_queued(dwc)); |
323 | list_splice_init(&dwc->queue, &dwc->active_list); | 323 | list_splice_init(&dwc->queue, &dwc->active_list); |
324 | } | 324 | } |
325 | } | 325 | } |
326 | 326 | ||
327 | static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) | 327 | static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) |
328 | { | 328 | { |
329 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), | 329 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), |
330 | " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", | 330 | " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", |
331 | lli->sar, lli->dar, lli->llp, | 331 | lli->sar, lli->dar, lli->llp, |
332 | lli->ctlhi, lli->ctllo); | 332 | lli->ctlhi, lli->ctllo); |
333 | } | 333 | } |
334 | 334 | ||
335 | static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) | 335 | static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) |
336 | { | 336 | { |
337 | struct dw_desc *bad_desc; | 337 | struct dw_desc *bad_desc; |
338 | struct dw_desc *child; | 338 | struct dw_desc *child; |
339 | 339 | ||
340 | dwc_scan_descriptors(dw, dwc); | 340 | dwc_scan_descriptors(dw, dwc); |
341 | 341 | ||
342 | /* | 342 | /* |
343 | * The descriptor currently at the head of the active list is | 343 | * The descriptor currently at the head of the active list is |
344 | * borked. Since we don't have any way to report errors, we'll | 344 | * borked. Since we don't have any way to report errors, we'll |
345 | * just have to scream loudly and try to carry on. | 345 | * just have to scream loudly and try to carry on. |
346 | */ | 346 | */ |
347 | bad_desc = dwc_first_active(dwc); | 347 | bad_desc = dwc_first_active(dwc); |
348 | list_del_init(&bad_desc->desc_node); | 348 | list_del_init(&bad_desc->desc_node); |
349 | list_splice_init(&dwc->queue, dwc->active_list.prev); | 349 | list_splice_init(&dwc->queue, dwc->active_list.prev); |
350 | 350 | ||
351 | /* Clear the error flag and try to restart the controller */ | 351 | /* Clear the error flag and try to restart the controller */ |
352 | dma_writel(dw, CLEAR.ERROR, dwc->mask); | 352 | dma_writel(dw, CLEAR.ERROR, dwc->mask); |
353 | if (!list_empty(&dwc->active_list)) | 353 | if (!list_empty(&dwc->active_list)) |
354 | dwc_dostart(dwc, dwc_first_active(dwc)); | 354 | dwc_dostart(dwc, dwc_first_active(dwc)); |
355 | 355 | ||
356 | /* | 356 | /* |
357 | * KERN_CRITICAL may seem harsh, but since this only happens | 357 | * KERN_CRITICAL may seem harsh, but since this only happens |
358 | * when someone submits a bad physical address in a | 358 | * when someone submits a bad physical address in a |
359 | * descriptor, we should consider ourselves lucky that the | 359 | * descriptor, we should consider ourselves lucky that the |
360 | * controller flagged an error instead of scribbling over | 360 | * controller flagged an error instead of scribbling over |
361 | * random memory locations. | 361 | * random memory locations. |
362 | */ | 362 | */ |
363 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), | 363 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), |
364 | "Bad descriptor submitted for DMA!\n"); | 364 | "Bad descriptor submitted for DMA!\n"); |
365 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), | 365 | dev_printk(KERN_CRIT, chan2dev(&dwc->chan), |
366 | " cookie: %d\n", bad_desc->txd.cookie); | 366 | " cookie: %d\n", bad_desc->txd.cookie); |
367 | dwc_dump_lli(dwc, &bad_desc->lli); | 367 | dwc_dump_lli(dwc, &bad_desc->lli); |
368 | list_for_each_entry(child, &bad_desc->tx_list, desc_node) | 368 | list_for_each_entry(child, &bad_desc->tx_list, desc_node) |
369 | dwc_dump_lli(dwc, &child->lli); | 369 | dwc_dump_lli(dwc, &child->lli); |
370 | 370 | ||
371 | /* Pretend the descriptor completed successfully */ | 371 | /* Pretend the descriptor completed successfully */ |
372 | dwc_descriptor_complete(dwc, bad_desc); | 372 | dwc_descriptor_complete(dwc, bad_desc); |
373 | } | 373 | } |
374 | 374 | ||
375 | /* --------------------- Cyclic DMA API extensions -------------------- */ | 375 | /* --------------------- Cyclic DMA API extensions -------------------- */ |
376 | 376 | ||
377 | inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan) | 377 | inline dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan) |
378 | { | 378 | { |
379 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 379 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
380 | return channel_readl(dwc, SAR); | 380 | return channel_readl(dwc, SAR); |
381 | } | 381 | } |
382 | EXPORT_SYMBOL(dw_dma_get_src_addr); | 382 | EXPORT_SYMBOL(dw_dma_get_src_addr); |
383 | 383 | ||
384 | inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan) | 384 | inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan) |
385 | { | 385 | { |
386 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 386 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
387 | return channel_readl(dwc, DAR); | 387 | return channel_readl(dwc, DAR); |
388 | } | 388 | } |
389 | EXPORT_SYMBOL(dw_dma_get_dst_addr); | 389 | EXPORT_SYMBOL(dw_dma_get_dst_addr); |
390 | 390 | ||
391 | /* called with dwc->lock held and all DMAC interrupts disabled */ | 391 | /* called with dwc->lock held and all DMAC interrupts disabled */ |
392 | static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, | 392 | static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, |
393 | u32 status_block, u32 status_err, u32 status_xfer) | 393 | u32 status_block, u32 status_err, u32 status_xfer) |
394 | { | 394 | { |
395 | if (status_block & dwc->mask) { | 395 | if (status_block & dwc->mask) { |
396 | void (*callback)(void *param); | 396 | void (*callback)(void *param); |
397 | void *callback_param; | 397 | void *callback_param; |
398 | 398 | ||
399 | dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n", | 399 | dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n", |
400 | channel_readl(dwc, LLP)); | 400 | channel_readl(dwc, LLP)); |
401 | dma_writel(dw, CLEAR.BLOCK, dwc->mask); | 401 | dma_writel(dw, CLEAR.BLOCK, dwc->mask); |
402 | 402 | ||
403 | callback = dwc->cdesc->period_callback; | 403 | callback = dwc->cdesc->period_callback; |
404 | callback_param = dwc->cdesc->period_callback_param; | 404 | callback_param = dwc->cdesc->period_callback_param; |
405 | if (callback) { | 405 | if (callback) { |
406 | spin_unlock(&dwc->lock); | 406 | spin_unlock(&dwc->lock); |
407 | callback(callback_param); | 407 | callback(callback_param); |
408 | spin_lock(&dwc->lock); | 408 | spin_lock(&dwc->lock); |
409 | } | 409 | } |
410 | } | 410 | } |
411 | 411 | ||
412 | /* | 412 | /* |
413 | * Error and transfer complete are highly unlikely, and will most | 413 | * Error and transfer complete are highly unlikely, and will most |
414 | * likely be due to a configuration error by the user. | 414 | * likely be due to a configuration error by the user. |
415 | */ | 415 | */ |
416 | if (unlikely(status_err & dwc->mask) || | 416 | if (unlikely(status_err & dwc->mask) || |
417 | unlikely(status_xfer & dwc->mask)) { | 417 | unlikely(status_xfer & dwc->mask)) { |
418 | int i; | 418 | int i; |
419 | 419 | ||
420 | dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s " | 420 | dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s " |
421 | "interrupt, stopping DMA transfer\n", | 421 | "interrupt, stopping DMA transfer\n", |
422 | status_xfer ? "xfer" : "error"); | 422 | status_xfer ? "xfer" : "error"); |
423 | dev_err(chan2dev(&dwc->chan), | 423 | dev_err(chan2dev(&dwc->chan), |
424 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", | 424 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", |
425 | channel_readl(dwc, SAR), | 425 | channel_readl(dwc, SAR), |
426 | channel_readl(dwc, DAR), | 426 | channel_readl(dwc, DAR), |
427 | channel_readl(dwc, LLP), | 427 | channel_readl(dwc, LLP), |
428 | channel_readl(dwc, CTL_HI), | 428 | channel_readl(dwc, CTL_HI), |
429 | channel_readl(dwc, CTL_LO)); | 429 | channel_readl(dwc, CTL_LO)); |
430 | 430 | ||
431 | channel_clear_bit(dw, CH_EN, dwc->mask); | 431 | channel_clear_bit(dw, CH_EN, dwc->mask); |
432 | while (dma_readl(dw, CH_EN) & dwc->mask) | 432 | while (dma_readl(dw, CH_EN) & dwc->mask) |
433 | cpu_relax(); | 433 | cpu_relax(); |
434 | 434 | ||
435 | /* make sure DMA does not restart by loading a new list */ | 435 | /* make sure DMA does not restart by loading a new list */ |
436 | channel_writel(dwc, LLP, 0); | 436 | channel_writel(dwc, LLP, 0); |
437 | channel_writel(dwc, CTL_LO, 0); | 437 | channel_writel(dwc, CTL_LO, 0); |
438 | channel_writel(dwc, CTL_HI, 0); | 438 | channel_writel(dwc, CTL_HI, 0); |
439 | 439 | ||
440 | dma_writel(dw, CLEAR.BLOCK, dwc->mask); | 440 | dma_writel(dw, CLEAR.BLOCK, dwc->mask); |
441 | dma_writel(dw, CLEAR.ERROR, dwc->mask); | 441 | dma_writel(dw, CLEAR.ERROR, dwc->mask); |
442 | dma_writel(dw, CLEAR.XFER, dwc->mask); | 442 | dma_writel(dw, CLEAR.XFER, dwc->mask); |
443 | 443 | ||
444 | for (i = 0; i < dwc->cdesc->periods; i++) | 444 | for (i = 0; i < dwc->cdesc->periods; i++) |
445 | dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli); | 445 | dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli); |
446 | } | 446 | } |
447 | } | 447 | } |
448 | 448 | ||
449 | /* ------------------------------------------------------------------------- */ | 449 | /* ------------------------------------------------------------------------- */ |
450 | 450 | ||
451 | static void dw_dma_tasklet(unsigned long data) | 451 | static void dw_dma_tasklet(unsigned long data) |
452 | { | 452 | { |
453 | struct dw_dma *dw = (struct dw_dma *)data; | 453 | struct dw_dma *dw = (struct dw_dma *)data; |
454 | struct dw_dma_chan *dwc; | 454 | struct dw_dma_chan *dwc; |
455 | u32 status_block; | 455 | u32 status_block; |
456 | u32 status_xfer; | 456 | u32 status_xfer; |
457 | u32 status_err; | 457 | u32 status_err; |
458 | int i; | 458 | int i; |
459 | 459 | ||
460 | status_block = dma_readl(dw, RAW.BLOCK); | 460 | status_block = dma_readl(dw, RAW.BLOCK); |
461 | status_xfer = dma_readl(dw, RAW.XFER); | 461 | status_xfer = dma_readl(dw, RAW.XFER); |
462 | status_err = dma_readl(dw, RAW.ERROR); | 462 | status_err = dma_readl(dw, RAW.ERROR); |
463 | 463 | ||
464 | dev_vdbg(dw->dma.dev, "tasklet: status_block=%x status_err=%x\n", | 464 | dev_vdbg(dw->dma.dev, "tasklet: status_block=%x status_err=%x\n", |
465 | status_block, status_err); | 465 | status_block, status_err); |
466 | 466 | ||
467 | for (i = 0; i < dw->dma.chancnt; i++) { | 467 | for (i = 0; i < dw->dma.chancnt; i++) { |
468 | dwc = &dw->chan[i]; | 468 | dwc = &dw->chan[i]; |
469 | spin_lock(&dwc->lock); | 469 | spin_lock(&dwc->lock); |
470 | if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) | 470 | if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) |
471 | dwc_handle_cyclic(dw, dwc, status_block, status_err, | 471 | dwc_handle_cyclic(dw, dwc, status_block, status_err, |
472 | status_xfer); | 472 | status_xfer); |
473 | else if (status_err & (1 << i)) | 473 | else if (status_err & (1 << i)) |
474 | dwc_handle_error(dw, dwc); | 474 | dwc_handle_error(dw, dwc); |
475 | else if ((status_block | status_xfer) & (1 << i)) | 475 | else if ((status_block | status_xfer) & (1 << i)) |
476 | dwc_scan_descriptors(dw, dwc); | 476 | dwc_scan_descriptors(dw, dwc); |
477 | spin_unlock(&dwc->lock); | 477 | spin_unlock(&dwc->lock); |
478 | } | 478 | } |
479 | 479 | ||
480 | /* | 480 | /* |
481 | * Re-enable interrupts. Block Complete interrupts are only | 481 | * Re-enable interrupts. Block Complete interrupts are only |
482 | * enabled if the INT_EN bit in the descriptor is set. This | 482 | * enabled if the INT_EN bit in the descriptor is set. This |
483 | * will trigger a scan before the whole list is done. | 483 | * will trigger a scan before the whole list is done. |
484 | */ | 484 | */ |
485 | channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); | 485 | channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); |
486 | channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask); | 486 | channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask); |
487 | channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); | 487 | channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); |
488 | } | 488 | } |
489 | 489 | ||
490 | static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) | 490 | static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) |
491 | { | 491 | { |
492 | struct dw_dma *dw = dev_id; | 492 | struct dw_dma *dw = dev_id; |
493 | u32 status; | 493 | u32 status; |
494 | 494 | ||
495 | dev_vdbg(dw->dma.dev, "interrupt: status=0x%x\n", | 495 | dev_vdbg(dw->dma.dev, "interrupt: status=0x%x\n", |
496 | dma_readl(dw, STATUS_INT)); | 496 | dma_readl(dw, STATUS_INT)); |
497 | 497 | ||
498 | /* | 498 | /* |
499 | * Just disable the interrupts. We'll turn them back on in the | 499 | * Just disable the interrupts. We'll turn them back on in the |
500 | * softirq handler. | 500 | * softirq handler. |
501 | */ | 501 | */ |
502 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | 502 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); |
503 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); | 503 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); |
504 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); | 504 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); |
505 | 505 | ||
506 | status = dma_readl(dw, STATUS_INT); | 506 | status = dma_readl(dw, STATUS_INT); |
507 | if (status) { | 507 | if (status) { |
508 | dev_err(dw->dma.dev, | 508 | dev_err(dw->dma.dev, |
509 | "BUG: Unexpected interrupts pending: 0x%x\n", | 509 | "BUG: Unexpected interrupts pending: 0x%x\n", |
510 | status); | 510 | status); |
511 | 511 | ||
512 | /* Try to recover */ | 512 | /* Try to recover */ |
513 | channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1); | 513 | channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1); |
514 | channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1); | 514 | channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1); |
515 | channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1); | 515 | channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1); |
516 | channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1); | 516 | channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1); |
517 | channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1); | 517 | channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1); |
518 | } | 518 | } |
519 | 519 | ||
520 | tasklet_schedule(&dw->tasklet); | 520 | tasklet_schedule(&dw->tasklet); |
521 | 521 | ||
522 | return IRQ_HANDLED; | 522 | return IRQ_HANDLED; |
523 | } | 523 | } |
524 | 524 | ||
525 | /*----------------------------------------------------------------------*/ | 525 | /*----------------------------------------------------------------------*/ |
526 | 526 | ||
527 | static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) | 527 | static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) |
528 | { | 528 | { |
529 | struct dw_desc *desc = txd_to_dw_desc(tx); | 529 | struct dw_desc *desc = txd_to_dw_desc(tx); |
530 | struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan); | 530 | struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan); |
531 | dma_cookie_t cookie; | 531 | dma_cookie_t cookie; |
532 | 532 | ||
533 | spin_lock_bh(&dwc->lock); | 533 | spin_lock_bh(&dwc->lock); |
534 | cookie = dwc_assign_cookie(dwc, desc); | 534 | cookie = dwc_assign_cookie(dwc, desc); |
535 | 535 | ||
536 | /* | 536 | /* |
537 | * REVISIT: We should attempt to chain as many descriptors as | 537 | * REVISIT: We should attempt to chain as many descriptors as |
538 | * possible, perhaps even appending to those already submitted | 538 | * possible, perhaps even appending to those already submitted |
539 | * for DMA. But this is hard to do in a race-free manner. | 539 | * for DMA. But this is hard to do in a race-free manner. |
540 | */ | 540 | */ |
541 | if (list_empty(&dwc->active_list)) { | 541 | if (list_empty(&dwc->active_list)) { |
542 | dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", | 542 | dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", |
543 | desc->txd.cookie); | 543 | desc->txd.cookie); |
544 | dwc_dostart(dwc, desc); | 544 | dwc_dostart(dwc, desc); |
545 | list_add_tail(&desc->desc_node, &dwc->active_list); | 545 | list_add_tail(&desc->desc_node, &dwc->active_list); |
546 | } else { | 546 | } else { |
547 | dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", | 547 | dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", |
548 | desc->txd.cookie); | 548 | desc->txd.cookie); |
549 | 549 | ||
550 | list_add_tail(&desc->desc_node, &dwc->queue); | 550 | list_add_tail(&desc->desc_node, &dwc->queue); |
551 | } | 551 | } |
552 | 552 | ||
553 | spin_unlock_bh(&dwc->lock); | 553 | spin_unlock_bh(&dwc->lock); |
554 | 554 | ||
555 | return cookie; | 555 | return cookie; |
556 | } | 556 | } |
557 | 557 | ||
558 | static struct dma_async_tx_descriptor * | 558 | static struct dma_async_tx_descriptor * |
559 | dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | 559 | dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, |
560 | size_t len, unsigned long flags) | 560 | size_t len, unsigned long flags) |
561 | { | 561 | { |
562 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 562 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
563 | struct dw_desc *desc; | 563 | struct dw_desc *desc; |
564 | struct dw_desc *first; | 564 | struct dw_desc *first; |
565 | struct dw_desc *prev; | 565 | struct dw_desc *prev; |
566 | size_t xfer_count; | 566 | size_t xfer_count; |
567 | size_t offset; | 567 | size_t offset; |
568 | unsigned int src_width; | 568 | unsigned int src_width; |
569 | unsigned int dst_width; | 569 | unsigned int dst_width; |
570 | u32 ctllo; | 570 | u32 ctllo; |
571 | 571 | ||
572 | dev_vdbg(chan2dev(chan), "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n", | 572 | dev_vdbg(chan2dev(chan), "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n", |
573 | dest, src, len, flags); | 573 | dest, src, len, flags); |
574 | 574 | ||
575 | if (unlikely(!len)) { | 575 | if (unlikely(!len)) { |
576 | dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); | 576 | dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); |
577 | return NULL; | 577 | return NULL; |
578 | } | 578 | } |
579 | 579 | ||
580 | /* | 580 | /* |
581 | * We can be a lot more clever here, but this should take care | 581 | * We can be a lot more clever here, but this should take care |
582 | * of the most common optimization. | 582 | * of the most common optimization. |
583 | */ | 583 | */ |
584 | if (!((src | dest | len) & 3)) | 584 | if (!((src | dest | len) & 3)) |
585 | src_width = dst_width = 2; | 585 | src_width = dst_width = 2; |
586 | else if (!((src | dest | len) & 1)) | 586 | else if (!((src | dest | len) & 1)) |
587 | src_width = dst_width = 1; | 587 | src_width = dst_width = 1; |
588 | else | 588 | else |
589 | src_width = dst_width = 0; | 589 | src_width = dst_width = 0; |
590 | 590 | ||
591 | ctllo = DWC_DEFAULT_CTLLO | 591 | ctllo = DWC_DEFAULT_CTLLO |
592 | | DWC_CTLL_DST_WIDTH(dst_width) | 592 | | DWC_CTLL_DST_WIDTH(dst_width) |
593 | | DWC_CTLL_SRC_WIDTH(src_width) | 593 | | DWC_CTLL_SRC_WIDTH(src_width) |
594 | | DWC_CTLL_DST_INC | 594 | | DWC_CTLL_DST_INC |
595 | | DWC_CTLL_SRC_INC | 595 | | DWC_CTLL_SRC_INC |
596 | | DWC_CTLL_FC_M2M; | 596 | | DWC_CTLL_FC_M2M; |
597 | prev = first = NULL; | 597 | prev = first = NULL; |
598 | 598 | ||
599 | for (offset = 0; offset < len; offset += xfer_count << src_width) { | 599 | for (offset = 0; offset < len; offset += xfer_count << src_width) { |
600 | xfer_count = min_t(size_t, (len - offset) >> src_width, | 600 | xfer_count = min_t(size_t, (len - offset) >> src_width, |
601 | DWC_MAX_COUNT); | 601 | DWC_MAX_COUNT); |
602 | 602 | ||
603 | desc = dwc_desc_get(dwc); | 603 | desc = dwc_desc_get(dwc); |
604 | if (!desc) | 604 | if (!desc) |
605 | goto err_desc_get; | 605 | goto err_desc_get; |
606 | 606 | ||
607 | desc->lli.sar = src + offset; | 607 | desc->lli.sar = src + offset; |
608 | desc->lli.dar = dest + offset; | 608 | desc->lli.dar = dest + offset; |
609 | desc->lli.ctllo = ctllo; | 609 | desc->lli.ctllo = ctllo; |
610 | desc->lli.ctlhi = xfer_count; | 610 | desc->lli.ctlhi = xfer_count; |
611 | 611 | ||
612 | if (!first) { | 612 | if (!first) { |
613 | first = desc; | 613 | first = desc; |
614 | } else { | 614 | } else { |
615 | prev->lli.llp = desc->txd.phys; | 615 | prev->lli.llp = desc->txd.phys; |
616 | dma_sync_single_for_device(chan2parent(chan), | 616 | dma_sync_single_for_device(chan2parent(chan), |
617 | prev->txd.phys, sizeof(prev->lli), | 617 | prev->txd.phys, sizeof(prev->lli), |
618 | DMA_TO_DEVICE); | 618 | DMA_TO_DEVICE); |
619 | list_add_tail(&desc->desc_node, | 619 | list_add_tail(&desc->desc_node, |
620 | &first->tx_list); | 620 | &first->tx_list); |
621 | } | 621 | } |
622 | prev = desc; | 622 | prev = desc; |
623 | } | 623 | } |
624 | 624 | ||
625 | 625 | ||
626 | if (flags & DMA_PREP_INTERRUPT) | 626 | if (flags & DMA_PREP_INTERRUPT) |
627 | /* Trigger interrupt after last block */ | 627 | /* Trigger interrupt after last block */ |
628 | prev->lli.ctllo |= DWC_CTLL_INT_EN; | 628 | prev->lli.ctllo |= DWC_CTLL_INT_EN; |
629 | 629 | ||
630 | prev->lli.llp = 0; | 630 | prev->lli.llp = 0; |
631 | dma_sync_single_for_device(chan2parent(chan), | 631 | dma_sync_single_for_device(chan2parent(chan), |
632 | prev->txd.phys, sizeof(prev->lli), | 632 | prev->txd.phys, sizeof(prev->lli), |
633 | DMA_TO_DEVICE); | 633 | DMA_TO_DEVICE); |
634 | 634 | ||
635 | first->txd.flags = flags; | 635 | first->txd.flags = flags; |
636 | first->len = len; | 636 | first->len = len; |
637 | 637 | ||
638 | return &first->txd; | 638 | return &first->txd; |
639 | 639 | ||
640 | err_desc_get: | 640 | err_desc_get: |
641 | dwc_desc_put(dwc, first); | 641 | dwc_desc_put(dwc, first); |
642 | return NULL; | 642 | return NULL; |
643 | } | 643 | } |
644 | 644 | ||
645 | static struct dma_async_tx_descriptor * | 645 | static struct dma_async_tx_descriptor * |
646 | dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | 646 | dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
647 | unsigned int sg_len, enum dma_data_direction direction, | 647 | unsigned int sg_len, enum dma_data_direction direction, |
648 | unsigned long flags) | 648 | unsigned long flags) |
649 | { | 649 | { |
650 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 650 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
651 | struct dw_dma_slave *dws = chan->private; | 651 | struct dw_dma_slave *dws = chan->private; |
652 | struct dw_desc *prev; | 652 | struct dw_desc *prev; |
653 | struct dw_desc *first; | 653 | struct dw_desc *first; |
654 | u32 ctllo; | 654 | u32 ctllo; |
655 | dma_addr_t reg; | 655 | dma_addr_t reg; |
656 | unsigned int reg_width; | 656 | unsigned int reg_width; |
657 | unsigned int mem_width; | 657 | unsigned int mem_width; |
658 | unsigned int i; | 658 | unsigned int i; |
659 | struct scatterlist *sg; | 659 | struct scatterlist *sg; |
660 | size_t total_len = 0; | 660 | size_t total_len = 0; |
661 | 661 | ||
662 | dev_vdbg(chan2dev(chan), "prep_dma_slave\n"); | 662 | dev_vdbg(chan2dev(chan), "prep_dma_slave\n"); |
663 | 663 | ||
664 | if (unlikely(!dws || !sg_len)) | 664 | if (unlikely(!dws || !sg_len)) |
665 | return NULL; | 665 | return NULL; |
666 | 666 | ||
667 | reg_width = dws->reg_width; | 667 | reg_width = dws->reg_width; |
668 | prev = first = NULL; | 668 | prev = first = NULL; |
669 | 669 | ||
670 | switch (direction) { | 670 | switch (direction) { |
671 | case DMA_TO_DEVICE: | 671 | case DMA_TO_DEVICE: |
672 | ctllo = (DWC_DEFAULT_CTLLO | 672 | ctllo = (DWC_DEFAULT_CTLLO |
673 | | DWC_CTLL_DST_WIDTH(reg_width) | 673 | | DWC_CTLL_DST_WIDTH(reg_width) |
674 | | DWC_CTLL_DST_FIX | 674 | | DWC_CTLL_DST_FIX |
675 | | DWC_CTLL_SRC_INC | 675 | | DWC_CTLL_SRC_INC |
676 | | DWC_CTLL_FC_M2P); | 676 | | DWC_CTLL_FC_M2P); |
677 | reg = dws->tx_reg; | 677 | reg = dws->tx_reg; |
678 | for_each_sg(sgl, sg, sg_len, i) { | 678 | for_each_sg(sgl, sg, sg_len, i) { |
679 | struct dw_desc *desc; | 679 | struct dw_desc *desc; |
680 | u32 len; | 680 | u32 len; |
681 | u32 mem; | 681 | u32 mem; |
682 | 682 | ||
683 | desc = dwc_desc_get(dwc); | 683 | desc = dwc_desc_get(dwc); |
684 | if (!desc) { | 684 | if (!desc) { |
685 | dev_err(chan2dev(chan), | 685 | dev_err(chan2dev(chan), |
686 | "not enough descriptors available\n"); | 686 | "not enough descriptors available\n"); |
687 | goto err_desc_get; | 687 | goto err_desc_get; |
688 | } | 688 | } |
689 | 689 | ||
690 | mem = sg_phys(sg); | 690 | mem = sg_phys(sg); |
691 | len = sg_dma_len(sg); | 691 | len = sg_dma_len(sg); |
692 | mem_width = 2; | 692 | mem_width = 2; |
693 | if (unlikely(mem & 3 || len & 3)) | 693 | if (unlikely(mem & 3 || len & 3)) |
694 | mem_width = 0; | 694 | mem_width = 0; |
695 | 695 | ||
696 | desc->lli.sar = mem; | 696 | desc->lli.sar = mem; |
697 | desc->lli.dar = reg; | 697 | desc->lli.dar = reg; |
698 | desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width); | 698 | desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width); |
699 | desc->lli.ctlhi = len >> mem_width; | 699 | desc->lli.ctlhi = len >> mem_width; |
700 | 700 | ||
701 | if (!first) { | 701 | if (!first) { |
702 | first = desc; | 702 | first = desc; |
703 | } else { | 703 | } else { |
704 | prev->lli.llp = desc->txd.phys; | 704 | prev->lli.llp = desc->txd.phys; |
705 | dma_sync_single_for_device(chan2parent(chan), | 705 | dma_sync_single_for_device(chan2parent(chan), |
706 | prev->txd.phys, | 706 | prev->txd.phys, |
707 | sizeof(prev->lli), | 707 | sizeof(prev->lli), |
708 | DMA_TO_DEVICE); | 708 | DMA_TO_DEVICE); |
709 | list_add_tail(&desc->desc_node, | 709 | list_add_tail(&desc->desc_node, |
710 | &first->tx_list); | 710 | &first->tx_list); |
711 | } | 711 | } |
712 | prev = desc; | 712 | prev = desc; |
713 | total_len += len; | 713 | total_len += len; |
714 | } | 714 | } |
715 | break; | 715 | break; |
716 | case DMA_FROM_DEVICE: | 716 | case DMA_FROM_DEVICE: |
717 | ctllo = (DWC_DEFAULT_CTLLO | 717 | ctllo = (DWC_DEFAULT_CTLLO |
718 | | DWC_CTLL_SRC_WIDTH(reg_width) | 718 | | DWC_CTLL_SRC_WIDTH(reg_width) |
719 | | DWC_CTLL_DST_INC | 719 | | DWC_CTLL_DST_INC |
720 | | DWC_CTLL_SRC_FIX | 720 | | DWC_CTLL_SRC_FIX |
721 | | DWC_CTLL_FC_P2M); | 721 | | DWC_CTLL_FC_P2M); |
722 | 722 | ||
723 | reg = dws->rx_reg; | 723 | reg = dws->rx_reg; |
724 | for_each_sg(sgl, sg, sg_len, i) { | 724 | for_each_sg(sgl, sg, sg_len, i) { |
725 | struct dw_desc *desc; | 725 | struct dw_desc *desc; |
726 | u32 len; | 726 | u32 len; |
727 | u32 mem; | 727 | u32 mem; |
728 | 728 | ||
729 | desc = dwc_desc_get(dwc); | 729 | desc = dwc_desc_get(dwc); |
730 | if (!desc) { | 730 | if (!desc) { |
731 | dev_err(chan2dev(chan), | 731 | dev_err(chan2dev(chan), |
732 | "not enough descriptors available\n"); | 732 | "not enough descriptors available\n"); |
733 | goto err_desc_get; | 733 | goto err_desc_get; |
734 | } | 734 | } |
735 | 735 | ||
736 | mem = sg_phys(sg); | 736 | mem = sg_phys(sg); |
737 | len = sg_dma_len(sg); | 737 | len = sg_dma_len(sg); |
738 | mem_width = 2; | 738 | mem_width = 2; |
739 | if (unlikely(mem & 3 || len & 3)) | 739 | if (unlikely(mem & 3 || len & 3)) |
740 | mem_width = 0; | 740 | mem_width = 0; |
741 | 741 | ||
742 | desc->lli.sar = reg; | 742 | desc->lli.sar = reg; |
743 | desc->lli.dar = mem; | 743 | desc->lli.dar = mem; |
744 | desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width); | 744 | desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width); |
745 | desc->lli.ctlhi = len >> reg_width; | 745 | desc->lli.ctlhi = len >> reg_width; |
746 | 746 | ||
747 | if (!first) { | 747 | if (!first) { |
748 | first = desc; | 748 | first = desc; |
749 | } else { | 749 | } else { |
750 | prev->lli.llp = desc->txd.phys; | 750 | prev->lli.llp = desc->txd.phys; |
751 | dma_sync_single_for_device(chan2parent(chan), | 751 | dma_sync_single_for_device(chan2parent(chan), |
752 | prev->txd.phys, | 752 | prev->txd.phys, |
753 | sizeof(prev->lli), | 753 | sizeof(prev->lli), |
754 | DMA_TO_DEVICE); | 754 | DMA_TO_DEVICE); |
755 | list_add_tail(&desc->desc_node, | 755 | list_add_tail(&desc->desc_node, |
756 | &first->tx_list); | 756 | &first->tx_list); |
757 | } | 757 | } |
758 | prev = desc; | 758 | prev = desc; |
759 | total_len += len; | 759 | total_len += len; |
760 | } | 760 | } |
761 | break; | 761 | break; |
762 | default: | 762 | default: |
763 | return NULL; | 763 | return NULL; |
764 | } | 764 | } |
765 | 765 | ||
766 | if (flags & DMA_PREP_INTERRUPT) | 766 | if (flags & DMA_PREP_INTERRUPT) |
767 | /* Trigger interrupt after last block */ | 767 | /* Trigger interrupt after last block */ |
768 | prev->lli.ctllo |= DWC_CTLL_INT_EN; | 768 | prev->lli.ctllo |= DWC_CTLL_INT_EN; |
769 | 769 | ||
770 | prev->lli.llp = 0; | 770 | prev->lli.llp = 0; |
771 | dma_sync_single_for_device(chan2parent(chan), | 771 | dma_sync_single_for_device(chan2parent(chan), |
772 | prev->txd.phys, sizeof(prev->lli), | 772 | prev->txd.phys, sizeof(prev->lli), |
773 | DMA_TO_DEVICE); | 773 | DMA_TO_DEVICE); |
774 | 774 | ||
775 | first->len = total_len; | 775 | first->len = total_len; |
776 | 776 | ||
777 | return &first->txd; | 777 | return &first->txd; |
778 | 778 | ||
779 | err_desc_get: | 779 | err_desc_get: |
780 | dwc_desc_put(dwc, first); | 780 | dwc_desc_put(dwc, first); |
781 | return NULL; | 781 | return NULL; |
782 | } | 782 | } |
783 | 783 | ||
784 | static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd) | 784 | static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
785 | unsigned long arg) | ||
785 | { | 786 | { |
786 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 787 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
787 | struct dw_dma *dw = to_dw_dma(chan->device); | 788 | struct dw_dma *dw = to_dw_dma(chan->device); |
788 | struct dw_desc *desc, *_desc; | 789 | struct dw_desc *desc, *_desc; |
789 | LIST_HEAD(list); | 790 | LIST_HEAD(list); |
790 | 791 | ||
791 | /* Only supports DMA_TERMINATE_ALL */ | 792 | /* Only supports DMA_TERMINATE_ALL */ |
792 | if (cmd != DMA_TERMINATE_ALL) | 793 | if (cmd != DMA_TERMINATE_ALL) |
793 | return -ENXIO; | 794 | return -ENXIO; |
794 | 795 | ||
795 | /* | 796 | /* |
796 | * This is only called when something went wrong elsewhere, so | 797 | * This is only called when something went wrong elsewhere, so |
797 | * we don't really care about the data. Just disable the | 798 | * we don't really care about the data. Just disable the |
798 | * channel. We still have to poll the channel enable bit due | 799 | * channel. We still have to poll the channel enable bit due |
799 | * to AHB/HSB limitations. | 800 | * to AHB/HSB limitations. |
800 | */ | 801 | */ |
801 | spin_lock_bh(&dwc->lock); | 802 | spin_lock_bh(&dwc->lock); |
802 | 803 | ||
803 | channel_clear_bit(dw, CH_EN, dwc->mask); | 804 | channel_clear_bit(dw, CH_EN, dwc->mask); |
804 | 805 | ||
805 | while (dma_readl(dw, CH_EN) & dwc->mask) | 806 | while (dma_readl(dw, CH_EN) & dwc->mask) |
806 | cpu_relax(); | 807 | cpu_relax(); |
807 | 808 | ||
808 | /* active_list entries will end up before queued entries */ | 809 | /* active_list entries will end up before queued entries */ |
809 | list_splice_init(&dwc->queue, &list); | 810 | list_splice_init(&dwc->queue, &list); |
810 | list_splice_init(&dwc->active_list, &list); | 811 | list_splice_init(&dwc->active_list, &list); |
811 | 812 | ||
812 | spin_unlock_bh(&dwc->lock); | 813 | spin_unlock_bh(&dwc->lock); |
813 | 814 | ||
814 | /* Flush all pending and queued descriptors */ | 815 | /* Flush all pending and queued descriptors */ |
815 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | 816 | list_for_each_entry_safe(desc, _desc, &list, desc_node) |
816 | dwc_descriptor_complete(dwc, desc); | 817 | dwc_descriptor_complete(dwc, desc); |
817 | 818 | ||
818 | return 0; | 819 | return 0; |
819 | } | 820 | } |
820 | 821 | ||
821 | static enum dma_status | 822 | static enum dma_status |
822 | dwc_tx_status(struct dma_chan *chan, | 823 | dwc_tx_status(struct dma_chan *chan, |
823 | dma_cookie_t cookie, | 824 | dma_cookie_t cookie, |
824 | struct dma_tx_state *txstate) | 825 | struct dma_tx_state *txstate) |
825 | { | 826 | { |
826 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 827 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
827 | dma_cookie_t last_used; | 828 | dma_cookie_t last_used; |
828 | dma_cookie_t last_complete; | 829 | dma_cookie_t last_complete; |
829 | int ret; | 830 | int ret; |
830 | 831 | ||
831 | last_complete = dwc->completed; | 832 | last_complete = dwc->completed; |
832 | last_used = chan->cookie; | 833 | last_used = chan->cookie; |
833 | 834 | ||
834 | ret = dma_async_is_complete(cookie, last_complete, last_used); | 835 | ret = dma_async_is_complete(cookie, last_complete, last_used); |
835 | if (ret != DMA_SUCCESS) { | 836 | if (ret != DMA_SUCCESS) { |
836 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); | 837 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); |
837 | 838 | ||
838 | last_complete = dwc->completed; | 839 | last_complete = dwc->completed; |
839 | last_used = chan->cookie; | 840 | last_used = chan->cookie; |
840 | 841 | ||
841 | ret = dma_async_is_complete(cookie, last_complete, last_used); | 842 | ret = dma_async_is_complete(cookie, last_complete, last_used); |
842 | } | 843 | } |
843 | 844 | ||
844 | dma_set_tx_state(txstate, last_complete, last_used, 0); | 845 | dma_set_tx_state(txstate, last_complete, last_used, 0); |
845 | 846 | ||
846 | return ret; | 847 | return ret; |
847 | } | 848 | } |
848 | 849 | ||
849 | static void dwc_issue_pending(struct dma_chan *chan) | 850 | static void dwc_issue_pending(struct dma_chan *chan) |
850 | { | 851 | { |
851 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 852 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
852 | 853 | ||
853 | spin_lock_bh(&dwc->lock); | 854 | spin_lock_bh(&dwc->lock); |
854 | if (!list_empty(&dwc->queue)) | 855 | if (!list_empty(&dwc->queue)) |
855 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); | 856 | dwc_scan_descriptors(to_dw_dma(chan->device), dwc); |
856 | spin_unlock_bh(&dwc->lock); | 857 | spin_unlock_bh(&dwc->lock); |
857 | } | 858 | } |
858 | 859 | ||
859 | static int dwc_alloc_chan_resources(struct dma_chan *chan) | 860 | static int dwc_alloc_chan_resources(struct dma_chan *chan) |
860 | { | 861 | { |
861 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 862 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
862 | struct dw_dma *dw = to_dw_dma(chan->device); | 863 | struct dw_dma *dw = to_dw_dma(chan->device); |
863 | struct dw_desc *desc; | 864 | struct dw_desc *desc; |
864 | struct dw_dma_slave *dws; | 865 | struct dw_dma_slave *dws; |
865 | int i; | 866 | int i; |
866 | u32 cfghi; | 867 | u32 cfghi; |
867 | u32 cfglo; | 868 | u32 cfglo; |
868 | 869 | ||
869 | dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); | 870 | dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); |
870 | 871 | ||
871 | /* ASSERT: channel is idle */ | 872 | /* ASSERT: channel is idle */ |
872 | if (dma_readl(dw, CH_EN) & dwc->mask) { | 873 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
873 | dev_dbg(chan2dev(chan), "DMA channel not idle?\n"); | 874 | dev_dbg(chan2dev(chan), "DMA channel not idle?\n"); |
874 | return -EIO; | 875 | return -EIO; |
875 | } | 876 | } |
876 | 877 | ||
877 | dwc->completed = chan->cookie = 1; | 878 | dwc->completed = chan->cookie = 1; |
878 | 879 | ||
879 | cfghi = DWC_CFGH_FIFO_MODE; | 880 | cfghi = DWC_CFGH_FIFO_MODE; |
880 | cfglo = 0; | 881 | cfglo = 0; |
881 | 882 | ||
882 | dws = chan->private; | 883 | dws = chan->private; |
883 | if (dws) { | 884 | if (dws) { |
884 | /* | 885 | /* |
885 | * We need controller-specific data to set up slave | 886 | * We need controller-specific data to set up slave |
886 | * transfers. | 887 | * transfers. |
887 | */ | 888 | */ |
888 | BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); | 889 | BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); |
889 | 890 | ||
890 | cfghi = dws->cfg_hi; | 891 | cfghi = dws->cfg_hi; |
891 | cfglo = dws->cfg_lo; | 892 | cfglo = dws->cfg_lo; |
892 | } | 893 | } |
893 | channel_writel(dwc, CFG_LO, cfglo); | 894 | channel_writel(dwc, CFG_LO, cfglo); |
894 | channel_writel(dwc, CFG_HI, cfghi); | 895 | channel_writel(dwc, CFG_HI, cfghi); |
895 | 896 | ||
896 | /* | 897 | /* |
897 | * NOTE: some controllers may have additional features that we | 898 | * NOTE: some controllers may have additional features that we |
898 | * need to initialize here, like "scatter-gather" (which | 899 | * need to initialize here, like "scatter-gather" (which |
899 | * doesn't mean what you think it means), and status writeback. | 900 | * doesn't mean what you think it means), and status writeback. |
900 | */ | 901 | */ |
901 | 902 | ||
902 | spin_lock_bh(&dwc->lock); | 903 | spin_lock_bh(&dwc->lock); |
903 | i = dwc->descs_allocated; | 904 | i = dwc->descs_allocated; |
904 | while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) { | 905 | while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) { |
905 | spin_unlock_bh(&dwc->lock); | 906 | spin_unlock_bh(&dwc->lock); |
906 | 907 | ||
907 | desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL); | 908 | desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL); |
908 | if (!desc) { | 909 | if (!desc) { |
909 | dev_info(chan2dev(chan), | 910 | dev_info(chan2dev(chan), |
910 | "only allocated %d descriptors\n", i); | 911 | "only allocated %d descriptors\n", i); |
911 | spin_lock_bh(&dwc->lock); | 912 | spin_lock_bh(&dwc->lock); |
912 | break; | 913 | break; |
913 | } | 914 | } |
914 | 915 | ||
915 | INIT_LIST_HEAD(&desc->tx_list); | 916 | INIT_LIST_HEAD(&desc->tx_list); |
916 | dma_async_tx_descriptor_init(&desc->txd, chan); | 917 | dma_async_tx_descriptor_init(&desc->txd, chan); |
917 | desc->txd.tx_submit = dwc_tx_submit; | 918 | desc->txd.tx_submit = dwc_tx_submit; |
918 | desc->txd.flags = DMA_CTRL_ACK; | 919 | desc->txd.flags = DMA_CTRL_ACK; |
919 | desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli, | 920 | desc->txd.phys = dma_map_single(chan2parent(chan), &desc->lli, |
920 | sizeof(desc->lli), DMA_TO_DEVICE); | 921 | sizeof(desc->lli), DMA_TO_DEVICE); |
921 | dwc_desc_put(dwc, desc); | 922 | dwc_desc_put(dwc, desc); |
922 | 923 | ||
923 | spin_lock_bh(&dwc->lock); | 924 | spin_lock_bh(&dwc->lock); |
924 | i = ++dwc->descs_allocated; | 925 | i = ++dwc->descs_allocated; |
925 | } | 926 | } |
926 | 927 | ||
927 | /* Enable interrupts */ | 928 | /* Enable interrupts */ |
928 | channel_set_bit(dw, MASK.XFER, dwc->mask); | 929 | channel_set_bit(dw, MASK.XFER, dwc->mask); |
929 | channel_set_bit(dw, MASK.BLOCK, dwc->mask); | 930 | channel_set_bit(dw, MASK.BLOCK, dwc->mask); |
930 | channel_set_bit(dw, MASK.ERROR, dwc->mask); | 931 | channel_set_bit(dw, MASK.ERROR, dwc->mask); |
931 | 932 | ||
932 | spin_unlock_bh(&dwc->lock); | 933 | spin_unlock_bh(&dwc->lock); |
933 | 934 | ||
934 | dev_dbg(chan2dev(chan), | 935 | dev_dbg(chan2dev(chan), |
935 | "alloc_chan_resources allocated %d descriptors\n", i); | 936 | "alloc_chan_resources allocated %d descriptors\n", i); |
936 | 937 | ||
937 | return i; | 938 | return i; |
938 | } | 939 | } |
939 | 940 | ||
940 | static void dwc_free_chan_resources(struct dma_chan *chan) | 941 | static void dwc_free_chan_resources(struct dma_chan *chan) |
941 | { | 942 | { |
942 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 943 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
943 | struct dw_dma *dw = to_dw_dma(chan->device); | 944 | struct dw_dma *dw = to_dw_dma(chan->device); |
944 | struct dw_desc *desc, *_desc; | 945 | struct dw_desc *desc, *_desc; |
945 | LIST_HEAD(list); | 946 | LIST_HEAD(list); |
946 | 947 | ||
947 | dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n", | 948 | dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n", |
948 | dwc->descs_allocated); | 949 | dwc->descs_allocated); |
949 | 950 | ||
950 | /* ASSERT: channel is idle */ | 951 | /* ASSERT: channel is idle */ |
951 | BUG_ON(!list_empty(&dwc->active_list)); | 952 | BUG_ON(!list_empty(&dwc->active_list)); |
952 | BUG_ON(!list_empty(&dwc->queue)); | 953 | BUG_ON(!list_empty(&dwc->queue)); |
953 | BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask); | 954 | BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask); |
954 | 955 | ||
955 | spin_lock_bh(&dwc->lock); | 956 | spin_lock_bh(&dwc->lock); |
956 | list_splice_init(&dwc->free_list, &list); | 957 | list_splice_init(&dwc->free_list, &list); |
957 | dwc->descs_allocated = 0; | 958 | dwc->descs_allocated = 0; |
958 | 959 | ||
959 | /* Disable interrupts */ | 960 | /* Disable interrupts */ |
960 | channel_clear_bit(dw, MASK.XFER, dwc->mask); | 961 | channel_clear_bit(dw, MASK.XFER, dwc->mask); |
961 | channel_clear_bit(dw, MASK.BLOCK, dwc->mask); | 962 | channel_clear_bit(dw, MASK.BLOCK, dwc->mask); |
962 | channel_clear_bit(dw, MASK.ERROR, dwc->mask); | 963 | channel_clear_bit(dw, MASK.ERROR, dwc->mask); |
963 | 964 | ||
964 | spin_unlock_bh(&dwc->lock); | 965 | spin_unlock_bh(&dwc->lock); |
965 | 966 | ||
966 | list_for_each_entry_safe(desc, _desc, &list, desc_node) { | 967 | list_for_each_entry_safe(desc, _desc, &list, desc_node) { |
967 | dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); | 968 | dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); |
968 | dma_unmap_single(chan2parent(chan), desc->txd.phys, | 969 | dma_unmap_single(chan2parent(chan), desc->txd.phys, |
969 | sizeof(desc->lli), DMA_TO_DEVICE); | 970 | sizeof(desc->lli), DMA_TO_DEVICE); |
970 | kfree(desc); | 971 | kfree(desc); |
971 | } | 972 | } |
972 | 973 | ||
973 | dev_vdbg(chan2dev(chan), "free_chan_resources done\n"); | 974 | dev_vdbg(chan2dev(chan), "free_chan_resources done\n"); |
974 | } | 975 | } |
975 | 976 | ||
976 | /* --------------------- Cyclic DMA API extensions -------------------- */ | 977 | /* --------------------- Cyclic DMA API extensions -------------------- */ |
977 | 978 | ||
978 | /** | 979 | /** |
979 | * dw_dma_cyclic_start - start the cyclic DMA transfer | 980 | * dw_dma_cyclic_start - start the cyclic DMA transfer |
980 | * @chan: the DMA channel to start | 981 | * @chan: the DMA channel to start |
981 | * | 982 | * |
982 | * Must be called with soft interrupts disabled. Returns zero on success or | 983 | * Must be called with soft interrupts disabled. Returns zero on success or |
983 | * -errno on failure. | 984 | * -errno on failure. |
984 | */ | 985 | */ |
985 | int dw_dma_cyclic_start(struct dma_chan *chan) | 986 | int dw_dma_cyclic_start(struct dma_chan *chan) |
986 | { | 987 | { |
987 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 988 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
988 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | 989 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); |
989 | 990 | ||
990 | if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) { | 991 | if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) { |
991 | dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n"); | 992 | dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n"); |
992 | return -ENODEV; | 993 | return -ENODEV; |
993 | } | 994 | } |
994 | 995 | ||
995 | spin_lock(&dwc->lock); | 996 | spin_lock(&dwc->lock); |
996 | 997 | ||
997 | /* assert channel is idle */ | 998 | /* assert channel is idle */ |
998 | if (dma_readl(dw, CH_EN) & dwc->mask) { | 999 | if (dma_readl(dw, CH_EN) & dwc->mask) { |
999 | dev_err(chan2dev(&dwc->chan), | 1000 | dev_err(chan2dev(&dwc->chan), |
1000 | "BUG: Attempted to start non-idle channel\n"); | 1001 | "BUG: Attempted to start non-idle channel\n"); |
1001 | dev_err(chan2dev(&dwc->chan), | 1002 | dev_err(chan2dev(&dwc->chan), |
1002 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", | 1003 | " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", |
1003 | channel_readl(dwc, SAR), | 1004 | channel_readl(dwc, SAR), |
1004 | channel_readl(dwc, DAR), | 1005 | channel_readl(dwc, DAR), |
1005 | channel_readl(dwc, LLP), | 1006 | channel_readl(dwc, LLP), |
1006 | channel_readl(dwc, CTL_HI), | 1007 | channel_readl(dwc, CTL_HI), |
1007 | channel_readl(dwc, CTL_LO)); | 1008 | channel_readl(dwc, CTL_LO)); |
1008 | spin_unlock(&dwc->lock); | 1009 | spin_unlock(&dwc->lock); |
1009 | return -EBUSY; | 1010 | return -EBUSY; |
1010 | } | 1011 | } |
1011 | 1012 | ||
1012 | dma_writel(dw, CLEAR.BLOCK, dwc->mask); | 1013 | dma_writel(dw, CLEAR.BLOCK, dwc->mask); |
1013 | dma_writel(dw, CLEAR.ERROR, dwc->mask); | 1014 | dma_writel(dw, CLEAR.ERROR, dwc->mask); |
1014 | dma_writel(dw, CLEAR.XFER, dwc->mask); | 1015 | dma_writel(dw, CLEAR.XFER, dwc->mask); |
1015 | 1016 | ||
1016 | /* setup DMAC channel registers */ | 1017 | /* setup DMAC channel registers */ |
1017 | channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys); | 1018 | channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys); |
1018 | channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); | 1019 | channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); |
1019 | channel_writel(dwc, CTL_HI, 0); | 1020 | channel_writel(dwc, CTL_HI, 0); |
1020 | 1021 | ||
1021 | channel_set_bit(dw, CH_EN, dwc->mask); | 1022 | channel_set_bit(dw, CH_EN, dwc->mask); |
1022 | 1023 | ||
1023 | spin_unlock(&dwc->lock); | 1024 | spin_unlock(&dwc->lock); |
1024 | 1025 | ||
1025 | return 0; | 1026 | return 0; |
1026 | } | 1027 | } |
1027 | EXPORT_SYMBOL(dw_dma_cyclic_start); | 1028 | EXPORT_SYMBOL(dw_dma_cyclic_start); |
1028 | 1029 | ||
1029 | /** | 1030 | /** |
1030 | * dw_dma_cyclic_stop - stop the cyclic DMA transfer | 1031 | * dw_dma_cyclic_stop - stop the cyclic DMA transfer |
1031 | * @chan: the DMA channel to stop | 1032 | * @chan: the DMA channel to stop |
1032 | * | 1033 | * |
1033 | * Must be called with soft interrupts disabled. | 1034 | * Must be called with soft interrupts disabled. |
1034 | */ | 1035 | */ |
1035 | void dw_dma_cyclic_stop(struct dma_chan *chan) | 1036 | void dw_dma_cyclic_stop(struct dma_chan *chan) |
1036 | { | 1037 | { |
1037 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 1038 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
1038 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | 1039 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); |
1039 | 1040 | ||
1040 | spin_lock(&dwc->lock); | 1041 | spin_lock(&dwc->lock); |
1041 | 1042 | ||
1042 | channel_clear_bit(dw, CH_EN, dwc->mask); | 1043 | channel_clear_bit(dw, CH_EN, dwc->mask); |
1043 | while (dma_readl(dw, CH_EN) & dwc->mask) | 1044 | while (dma_readl(dw, CH_EN) & dwc->mask) |
1044 | cpu_relax(); | 1045 | cpu_relax(); |
1045 | 1046 | ||
1046 | spin_unlock(&dwc->lock); | 1047 | spin_unlock(&dwc->lock); |
1047 | } | 1048 | } |
1048 | EXPORT_SYMBOL(dw_dma_cyclic_stop); | 1049 | EXPORT_SYMBOL(dw_dma_cyclic_stop); |
1049 | 1050 | ||
1050 | /** | 1051 | /** |
1051 | * dw_dma_cyclic_prep - prepare the cyclic DMA transfer | 1052 | * dw_dma_cyclic_prep - prepare the cyclic DMA transfer |
1052 | * @chan: the DMA channel to prepare | 1053 | * @chan: the DMA channel to prepare |
1053 | * @buf_addr: physical DMA address where the buffer starts | 1054 | * @buf_addr: physical DMA address where the buffer starts |
1054 | * @buf_len: total number of bytes for the entire buffer | 1055 | * @buf_len: total number of bytes for the entire buffer |
1055 | * @period_len: number of bytes for each period | 1056 | * @period_len: number of bytes for each period |
1056 | * @direction: transfer direction, to or from device | 1057 | * @direction: transfer direction, to or from device |
1057 | * | 1058 | * |
1058 | * Must be called before trying to start the transfer. Returns a valid struct | 1059 | * Must be called before trying to start the transfer. Returns a valid struct |
1059 | * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful. | 1060 | * dw_cyclic_desc if successful or an ERR_PTR(-errno) if not successful. |
1060 | */ | 1061 | */ |
1061 | struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, | 1062 | struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, |
1062 | dma_addr_t buf_addr, size_t buf_len, size_t period_len, | 1063 | dma_addr_t buf_addr, size_t buf_len, size_t period_len, |
1063 | enum dma_data_direction direction) | 1064 | enum dma_data_direction direction) |
1064 | { | 1065 | { |
1065 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 1066 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
1066 | struct dw_cyclic_desc *cdesc; | 1067 | struct dw_cyclic_desc *cdesc; |
1067 | struct dw_cyclic_desc *retval = NULL; | 1068 | struct dw_cyclic_desc *retval = NULL; |
1068 | struct dw_desc *desc; | 1069 | struct dw_desc *desc; |
1069 | struct dw_desc *last = NULL; | 1070 | struct dw_desc *last = NULL; |
1070 | struct dw_dma_slave *dws = chan->private; | 1071 | struct dw_dma_slave *dws = chan->private; |
1071 | unsigned long was_cyclic; | 1072 | unsigned long was_cyclic; |
1072 | unsigned int reg_width; | 1073 | unsigned int reg_width; |
1073 | unsigned int periods; | 1074 | unsigned int periods; |
1074 | unsigned int i; | 1075 | unsigned int i; |
1075 | 1076 | ||
1076 | spin_lock_bh(&dwc->lock); | 1077 | spin_lock_bh(&dwc->lock); |
1077 | if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) { | 1078 | if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) { |
1078 | spin_unlock_bh(&dwc->lock); | 1079 | spin_unlock_bh(&dwc->lock); |
1079 | dev_dbg(chan2dev(&dwc->chan), | 1080 | dev_dbg(chan2dev(&dwc->chan), |
1080 | "queue and/or active list are not empty\n"); | 1081 | "queue and/or active list are not empty\n"); |
1081 | return ERR_PTR(-EBUSY); | 1082 | return ERR_PTR(-EBUSY); |
1082 | } | 1083 | } |
1083 | 1084 | ||
1084 | was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags); | 1085 | was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags); |
1085 | spin_unlock_bh(&dwc->lock); | 1086 | spin_unlock_bh(&dwc->lock); |
1086 | if (was_cyclic) { | 1087 | if (was_cyclic) { |
1087 | dev_dbg(chan2dev(&dwc->chan), | 1088 | dev_dbg(chan2dev(&dwc->chan), |
1088 | "channel already prepared for cyclic DMA\n"); | 1089 | "channel already prepared for cyclic DMA\n"); |
1089 | return ERR_PTR(-EBUSY); | 1090 | return ERR_PTR(-EBUSY); |
1090 | } | 1091 | } |
1091 | 1092 | ||
1092 | retval = ERR_PTR(-EINVAL); | 1093 | retval = ERR_PTR(-EINVAL); |
1093 | reg_width = dws->reg_width; | 1094 | reg_width = dws->reg_width; |
1094 | periods = buf_len / period_len; | 1095 | periods = buf_len / period_len; |
1095 | 1096 | ||
1096 | /* Check for too big/unaligned periods and unaligned DMA buffer. */ | 1097 | /* Check for too big/unaligned periods and unaligned DMA buffer. */ |
1097 | if (period_len > (DWC_MAX_COUNT << reg_width)) | 1098 | if (period_len > (DWC_MAX_COUNT << reg_width)) |
1098 | goto out_err; | 1099 | goto out_err; |
1099 | if (unlikely(period_len & ((1 << reg_width) - 1))) | 1100 | if (unlikely(period_len & ((1 << reg_width) - 1))) |
1100 | goto out_err; | 1101 | goto out_err; |
1101 | if (unlikely(buf_addr & ((1 << reg_width) - 1))) | 1102 | if (unlikely(buf_addr & ((1 << reg_width) - 1))) |
1102 | goto out_err; | 1103 | goto out_err; |
1103 | if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE)))) | 1104 | if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE)))) |
1104 | goto out_err; | 1105 | goto out_err; |
1105 | 1106 | ||
1106 | retval = ERR_PTR(-ENOMEM); | 1107 | retval = ERR_PTR(-ENOMEM); |
1107 | 1108 | ||
1108 | if (periods > NR_DESCS_PER_CHANNEL) | 1109 | if (periods > NR_DESCS_PER_CHANNEL) |
1109 | goto out_err; | 1110 | goto out_err; |
1110 | 1111 | ||
1111 | cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL); | 1112 | cdesc = kzalloc(sizeof(struct dw_cyclic_desc), GFP_KERNEL); |
1112 | if (!cdesc) | 1113 | if (!cdesc) |
1113 | goto out_err; | 1114 | goto out_err; |
1114 | 1115 | ||
1115 | cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL); | 1116 | cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL); |
1116 | if (!cdesc->desc) | 1117 | if (!cdesc->desc) |
1117 | goto out_err_alloc; | 1118 | goto out_err_alloc; |
1118 | 1119 | ||
1119 | for (i = 0; i < periods; i++) { | 1120 | for (i = 0; i < periods; i++) { |
1120 | desc = dwc_desc_get(dwc); | 1121 | desc = dwc_desc_get(dwc); |
1121 | if (!desc) | 1122 | if (!desc) |
1122 | goto out_err_desc_get; | 1123 | goto out_err_desc_get; |
1123 | 1124 | ||
1124 | switch (direction) { | 1125 | switch (direction) { |
1125 | case DMA_TO_DEVICE: | 1126 | case DMA_TO_DEVICE: |
1126 | desc->lli.dar = dws->tx_reg; | 1127 | desc->lli.dar = dws->tx_reg; |
1127 | desc->lli.sar = buf_addr + (period_len * i); | 1128 | desc->lli.sar = buf_addr + (period_len * i); |
1128 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO | 1129 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO |
1129 | | DWC_CTLL_DST_WIDTH(reg_width) | 1130 | | DWC_CTLL_DST_WIDTH(reg_width) |
1130 | | DWC_CTLL_SRC_WIDTH(reg_width) | 1131 | | DWC_CTLL_SRC_WIDTH(reg_width) |
1131 | | DWC_CTLL_DST_FIX | 1132 | | DWC_CTLL_DST_FIX |
1132 | | DWC_CTLL_SRC_INC | 1133 | | DWC_CTLL_SRC_INC |
1133 | | DWC_CTLL_FC_M2P | 1134 | | DWC_CTLL_FC_M2P |
1134 | | DWC_CTLL_INT_EN); | 1135 | | DWC_CTLL_INT_EN); |
1135 | break; | 1136 | break; |
1136 | case DMA_FROM_DEVICE: | 1137 | case DMA_FROM_DEVICE: |
1137 | desc->lli.dar = buf_addr + (period_len * i); | 1138 | desc->lli.dar = buf_addr + (period_len * i); |
1138 | desc->lli.sar = dws->rx_reg; | 1139 | desc->lli.sar = dws->rx_reg; |
1139 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO | 1140 | desc->lli.ctllo = (DWC_DEFAULT_CTLLO |
1140 | | DWC_CTLL_SRC_WIDTH(reg_width) | 1141 | | DWC_CTLL_SRC_WIDTH(reg_width) |
1141 | | DWC_CTLL_DST_WIDTH(reg_width) | 1142 | | DWC_CTLL_DST_WIDTH(reg_width) |
1142 | | DWC_CTLL_DST_INC | 1143 | | DWC_CTLL_DST_INC |
1143 | | DWC_CTLL_SRC_FIX | 1144 | | DWC_CTLL_SRC_FIX |
1144 | | DWC_CTLL_FC_P2M | 1145 | | DWC_CTLL_FC_P2M |
1145 | | DWC_CTLL_INT_EN); | 1146 | | DWC_CTLL_INT_EN); |
1146 | break; | 1147 | break; |
1147 | default: | 1148 | default: |
1148 | break; | 1149 | break; |
1149 | } | 1150 | } |
1150 | 1151 | ||
1151 | desc->lli.ctlhi = (period_len >> reg_width); | 1152 | desc->lli.ctlhi = (period_len >> reg_width); |
1152 | cdesc->desc[i] = desc; | 1153 | cdesc->desc[i] = desc; |
1153 | 1154 | ||
1154 | if (last) { | 1155 | if (last) { |
1155 | last->lli.llp = desc->txd.phys; | 1156 | last->lli.llp = desc->txd.phys; |
1156 | dma_sync_single_for_device(chan2parent(chan), | 1157 | dma_sync_single_for_device(chan2parent(chan), |
1157 | last->txd.phys, sizeof(last->lli), | 1158 | last->txd.phys, sizeof(last->lli), |
1158 | DMA_TO_DEVICE); | 1159 | DMA_TO_DEVICE); |
1159 | } | 1160 | } |
1160 | 1161 | ||
1161 | last = desc; | 1162 | last = desc; |
1162 | } | 1163 | } |
1163 | 1164 | ||
1164 | /* lets make a cyclic list */ | 1165 | /* lets make a cyclic list */ |
1165 | last->lli.llp = cdesc->desc[0]->txd.phys; | 1166 | last->lli.llp = cdesc->desc[0]->txd.phys; |
1166 | dma_sync_single_for_device(chan2parent(chan), last->txd.phys, | 1167 | dma_sync_single_for_device(chan2parent(chan), last->txd.phys, |
1167 | sizeof(last->lli), DMA_TO_DEVICE); | 1168 | sizeof(last->lli), DMA_TO_DEVICE); |
1168 | 1169 | ||
1169 | dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%08x len %zu " | 1170 | dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%08x len %zu " |
1170 | "period %zu periods %d\n", buf_addr, buf_len, | 1171 | "period %zu periods %d\n", buf_addr, buf_len, |
1171 | period_len, periods); | 1172 | period_len, periods); |
1172 | 1173 | ||
1173 | cdesc->periods = periods; | 1174 | cdesc->periods = periods; |
1174 | dwc->cdesc = cdesc; | 1175 | dwc->cdesc = cdesc; |
1175 | 1176 | ||
1176 | return cdesc; | 1177 | return cdesc; |
1177 | 1178 | ||
1178 | out_err_desc_get: | 1179 | out_err_desc_get: |
1179 | while (i--) | 1180 | while (i--) |
1180 | dwc_desc_put(dwc, cdesc->desc[i]); | 1181 | dwc_desc_put(dwc, cdesc->desc[i]); |
1181 | out_err_alloc: | 1182 | out_err_alloc: |
1182 | kfree(cdesc); | 1183 | kfree(cdesc); |
1183 | out_err: | 1184 | out_err: |
1184 | clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); | 1185 | clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); |
1185 | return (struct dw_cyclic_desc *)retval; | 1186 | return (struct dw_cyclic_desc *)retval; |
1186 | } | 1187 | } |
1187 | EXPORT_SYMBOL(dw_dma_cyclic_prep); | 1188 | EXPORT_SYMBOL(dw_dma_cyclic_prep); |
1188 | 1189 | ||
1189 | /** | 1190 | /** |
1190 | * dw_dma_cyclic_free - free a prepared cyclic DMA transfer | 1191 | * dw_dma_cyclic_free - free a prepared cyclic DMA transfer |
1191 | * @chan: the DMA channel to free | 1192 | * @chan: the DMA channel to free |
1192 | */ | 1193 | */ |
1193 | void dw_dma_cyclic_free(struct dma_chan *chan) | 1194 | void dw_dma_cyclic_free(struct dma_chan *chan) |
1194 | { | 1195 | { |
1195 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 1196 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); |
1196 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); | 1197 | struct dw_dma *dw = to_dw_dma(dwc->chan.device); |
1197 | struct dw_cyclic_desc *cdesc = dwc->cdesc; | 1198 | struct dw_cyclic_desc *cdesc = dwc->cdesc; |
1198 | int i; | 1199 | int i; |
1199 | 1200 | ||
1200 | dev_dbg(chan2dev(&dwc->chan), "cyclic free\n"); | 1201 | dev_dbg(chan2dev(&dwc->chan), "cyclic free\n"); |
1201 | 1202 | ||
1202 | if (!cdesc) | 1203 | if (!cdesc) |
1203 | return; | 1204 | return; |
1204 | 1205 | ||
1205 | spin_lock_bh(&dwc->lock); | 1206 | spin_lock_bh(&dwc->lock); |
1206 | 1207 | ||
1207 | channel_clear_bit(dw, CH_EN, dwc->mask); | 1208 | channel_clear_bit(dw, CH_EN, dwc->mask); |
1208 | while (dma_readl(dw, CH_EN) & dwc->mask) | 1209 | while (dma_readl(dw, CH_EN) & dwc->mask) |
1209 | cpu_relax(); | 1210 | cpu_relax(); |
1210 | 1211 | ||
1211 | dma_writel(dw, CLEAR.BLOCK, dwc->mask); | 1212 | dma_writel(dw, CLEAR.BLOCK, dwc->mask); |
1212 | dma_writel(dw, CLEAR.ERROR, dwc->mask); | 1213 | dma_writel(dw, CLEAR.ERROR, dwc->mask); |
1213 | dma_writel(dw, CLEAR.XFER, dwc->mask); | 1214 | dma_writel(dw, CLEAR.XFER, dwc->mask); |
1214 | 1215 | ||
1215 | spin_unlock_bh(&dwc->lock); | 1216 | spin_unlock_bh(&dwc->lock); |
1216 | 1217 | ||
1217 | for (i = 0; i < cdesc->periods; i++) | 1218 | for (i = 0; i < cdesc->periods; i++) |
1218 | dwc_desc_put(dwc, cdesc->desc[i]); | 1219 | dwc_desc_put(dwc, cdesc->desc[i]); |
1219 | 1220 | ||
1220 | kfree(cdesc->desc); | 1221 | kfree(cdesc->desc); |
1221 | kfree(cdesc); | 1222 | kfree(cdesc); |
1222 | 1223 | ||
1223 | clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); | 1224 | clear_bit(DW_DMA_IS_CYCLIC, &dwc->flags); |
1224 | } | 1225 | } |
1225 | EXPORT_SYMBOL(dw_dma_cyclic_free); | 1226 | EXPORT_SYMBOL(dw_dma_cyclic_free); |
1226 | 1227 | ||
1227 | /*----------------------------------------------------------------------*/ | 1228 | /*----------------------------------------------------------------------*/ |
1228 | 1229 | ||
1229 | static void dw_dma_off(struct dw_dma *dw) | 1230 | static void dw_dma_off(struct dw_dma *dw) |
1230 | { | 1231 | { |
1231 | dma_writel(dw, CFG, 0); | 1232 | dma_writel(dw, CFG, 0); |
1232 | 1233 | ||
1233 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | 1234 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); |
1234 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); | 1235 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); |
1235 | channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); | 1236 | channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); |
1236 | channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); | 1237 | channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); |
1237 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); | 1238 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); |
1238 | 1239 | ||
1239 | while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) | 1240 | while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) |
1240 | cpu_relax(); | 1241 | cpu_relax(); |
1241 | } | 1242 | } |
1242 | 1243 | ||
1243 | static int __init dw_probe(struct platform_device *pdev) | 1244 | static int __init dw_probe(struct platform_device *pdev) |
1244 | { | 1245 | { |
1245 | struct dw_dma_platform_data *pdata; | 1246 | struct dw_dma_platform_data *pdata; |
1246 | struct resource *io; | 1247 | struct resource *io; |
1247 | struct dw_dma *dw; | 1248 | struct dw_dma *dw; |
1248 | size_t size; | 1249 | size_t size; |
1249 | int irq; | 1250 | int irq; |
1250 | int err; | 1251 | int err; |
1251 | int i; | 1252 | int i; |
1252 | 1253 | ||
1253 | pdata = pdev->dev.platform_data; | 1254 | pdata = pdev->dev.platform_data; |
1254 | if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) | 1255 | if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) |
1255 | return -EINVAL; | 1256 | return -EINVAL; |
1256 | 1257 | ||
1257 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1258 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1258 | if (!io) | 1259 | if (!io) |
1259 | return -EINVAL; | 1260 | return -EINVAL; |
1260 | 1261 | ||
1261 | irq = platform_get_irq(pdev, 0); | 1262 | irq = platform_get_irq(pdev, 0); |
1262 | if (irq < 0) | 1263 | if (irq < 0) |
1263 | return irq; | 1264 | return irq; |
1264 | 1265 | ||
1265 | size = sizeof(struct dw_dma); | 1266 | size = sizeof(struct dw_dma); |
1266 | size += pdata->nr_channels * sizeof(struct dw_dma_chan); | 1267 | size += pdata->nr_channels * sizeof(struct dw_dma_chan); |
1267 | dw = kzalloc(size, GFP_KERNEL); | 1268 | dw = kzalloc(size, GFP_KERNEL); |
1268 | if (!dw) | 1269 | if (!dw) |
1269 | return -ENOMEM; | 1270 | return -ENOMEM; |
1270 | 1271 | ||
1271 | if (!request_mem_region(io->start, DW_REGLEN, pdev->dev.driver->name)) { | 1272 | if (!request_mem_region(io->start, DW_REGLEN, pdev->dev.driver->name)) { |
1272 | err = -EBUSY; | 1273 | err = -EBUSY; |
1273 | goto err_kfree; | 1274 | goto err_kfree; |
1274 | } | 1275 | } |
1275 | 1276 | ||
1276 | dw->regs = ioremap(io->start, DW_REGLEN); | 1277 | dw->regs = ioremap(io->start, DW_REGLEN); |
1277 | if (!dw->regs) { | 1278 | if (!dw->regs) { |
1278 | err = -ENOMEM; | 1279 | err = -ENOMEM; |
1279 | goto err_release_r; | 1280 | goto err_release_r; |
1280 | } | 1281 | } |
1281 | 1282 | ||
1282 | dw->clk = clk_get(&pdev->dev, "hclk"); | 1283 | dw->clk = clk_get(&pdev->dev, "hclk"); |
1283 | if (IS_ERR(dw->clk)) { | 1284 | if (IS_ERR(dw->clk)) { |
1284 | err = PTR_ERR(dw->clk); | 1285 | err = PTR_ERR(dw->clk); |
1285 | goto err_clk; | 1286 | goto err_clk; |
1286 | } | 1287 | } |
1287 | clk_enable(dw->clk); | 1288 | clk_enable(dw->clk); |
1288 | 1289 | ||
1289 | /* force dma off, just in case */ | 1290 | /* force dma off, just in case */ |
1290 | dw_dma_off(dw); | 1291 | dw_dma_off(dw); |
1291 | 1292 | ||
1292 | err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw); | 1293 | err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw); |
1293 | if (err) | 1294 | if (err) |
1294 | goto err_irq; | 1295 | goto err_irq; |
1295 | 1296 | ||
1296 | platform_set_drvdata(pdev, dw); | 1297 | platform_set_drvdata(pdev, dw); |
1297 | 1298 | ||
1298 | tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); | 1299 | tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); |
1299 | 1300 | ||
1300 | dw->all_chan_mask = (1 << pdata->nr_channels) - 1; | 1301 | dw->all_chan_mask = (1 << pdata->nr_channels) - 1; |
1301 | 1302 | ||
1302 | INIT_LIST_HEAD(&dw->dma.channels); | 1303 | INIT_LIST_HEAD(&dw->dma.channels); |
1303 | for (i = 0; i < pdata->nr_channels; i++, dw->dma.chancnt++) { | 1304 | for (i = 0; i < pdata->nr_channels; i++, dw->dma.chancnt++) { |
1304 | struct dw_dma_chan *dwc = &dw->chan[i]; | 1305 | struct dw_dma_chan *dwc = &dw->chan[i]; |
1305 | 1306 | ||
1306 | dwc->chan.device = &dw->dma; | 1307 | dwc->chan.device = &dw->dma; |
1307 | dwc->chan.cookie = dwc->completed = 1; | 1308 | dwc->chan.cookie = dwc->completed = 1; |
1308 | dwc->chan.chan_id = i; | 1309 | dwc->chan.chan_id = i; |
1309 | list_add_tail(&dwc->chan.device_node, &dw->dma.channels); | 1310 | list_add_tail(&dwc->chan.device_node, &dw->dma.channels); |
1310 | 1311 | ||
1311 | dwc->ch_regs = &__dw_regs(dw)->CHAN[i]; | 1312 | dwc->ch_regs = &__dw_regs(dw)->CHAN[i]; |
1312 | spin_lock_init(&dwc->lock); | 1313 | spin_lock_init(&dwc->lock); |
1313 | dwc->mask = 1 << i; | 1314 | dwc->mask = 1 << i; |
1314 | 1315 | ||
1315 | INIT_LIST_HEAD(&dwc->active_list); | 1316 | INIT_LIST_HEAD(&dwc->active_list); |
1316 | INIT_LIST_HEAD(&dwc->queue); | 1317 | INIT_LIST_HEAD(&dwc->queue); |
1317 | INIT_LIST_HEAD(&dwc->free_list); | 1318 | INIT_LIST_HEAD(&dwc->free_list); |
1318 | 1319 | ||
1319 | channel_clear_bit(dw, CH_EN, dwc->mask); | 1320 | channel_clear_bit(dw, CH_EN, dwc->mask); |
1320 | } | 1321 | } |
1321 | 1322 | ||
1322 | /* Clear/disable all interrupts on all channels. */ | 1323 | /* Clear/disable all interrupts on all channels. */ |
1323 | dma_writel(dw, CLEAR.XFER, dw->all_chan_mask); | 1324 | dma_writel(dw, CLEAR.XFER, dw->all_chan_mask); |
1324 | dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask); | 1325 | dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask); |
1325 | dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask); | 1326 | dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask); |
1326 | dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask); | 1327 | dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask); |
1327 | dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask); | 1328 | dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask); |
1328 | 1329 | ||
1329 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | 1330 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); |
1330 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); | 1331 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); |
1331 | channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); | 1332 | channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); |
1332 | channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); | 1333 | channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); |
1333 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); | 1334 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); |
1334 | 1335 | ||
1335 | dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); | 1336 | dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); |
1336 | dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); | 1337 | dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); |
1337 | dw->dma.dev = &pdev->dev; | 1338 | dw->dma.dev = &pdev->dev; |
1338 | dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; | 1339 | dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; |
1339 | dw->dma.device_free_chan_resources = dwc_free_chan_resources; | 1340 | dw->dma.device_free_chan_resources = dwc_free_chan_resources; |
1340 | 1341 | ||
1341 | dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy; | 1342 | dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy; |
1342 | 1343 | ||
1343 | dw->dma.device_prep_slave_sg = dwc_prep_slave_sg; | 1344 | dw->dma.device_prep_slave_sg = dwc_prep_slave_sg; |
1344 | dw->dma.device_control = dwc_control; | 1345 | dw->dma.device_control = dwc_control; |
1345 | 1346 | ||
1346 | dw->dma.device_tx_status = dwc_tx_status; | 1347 | dw->dma.device_tx_status = dwc_tx_status; |
1347 | dw->dma.device_issue_pending = dwc_issue_pending; | 1348 | dw->dma.device_issue_pending = dwc_issue_pending; |
1348 | 1349 | ||
1349 | dma_writel(dw, CFG, DW_CFG_DMA_EN); | 1350 | dma_writel(dw, CFG, DW_CFG_DMA_EN); |
1350 | 1351 | ||
1351 | printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n", | 1352 | printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n", |
1352 | dev_name(&pdev->dev), dw->dma.chancnt); | 1353 | dev_name(&pdev->dev), dw->dma.chancnt); |
1353 | 1354 | ||
1354 | dma_async_device_register(&dw->dma); | 1355 | dma_async_device_register(&dw->dma); |
1355 | 1356 | ||
1356 | return 0; | 1357 | return 0; |
1357 | 1358 | ||
1358 | err_irq: | 1359 | err_irq: |
1359 | clk_disable(dw->clk); | 1360 | clk_disable(dw->clk); |
1360 | clk_put(dw->clk); | 1361 | clk_put(dw->clk); |
1361 | err_clk: | 1362 | err_clk: |
1362 | iounmap(dw->regs); | 1363 | iounmap(dw->regs); |
1363 | dw->regs = NULL; | 1364 | dw->regs = NULL; |
1364 | err_release_r: | 1365 | err_release_r: |
1365 | release_resource(io); | 1366 | release_resource(io); |
1366 | err_kfree: | 1367 | err_kfree: |
1367 | kfree(dw); | 1368 | kfree(dw); |
1368 | return err; | 1369 | return err; |
1369 | } | 1370 | } |
1370 | 1371 | ||
1371 | static int __exit dw_remove(struct platform_device *pdev) | 1372 | static int __exit dw_remove(struct platform_device *pdev) |
1372 | { | 1373 | { |
1373 | struct dw_dma *dw = platform_get_drvdata(pdev); | 1374 | struct dw_dma *dw = platform_get_drvdata(pdev); |
1374 | struct dw_dma_chan *dwc, *_dwc; | 1375 | struct dw_dma_chan *dwc, *_dwc; |
1375 | struct resource *io; | 1376 | struct resource *io; |
1376 | 1377 | ||
1377 | dw_dma_off(dw); | 1378 | dw_dma_off(dw); |
1378 | dma_async_device_unregister(&dw->dma); | 1379 | dma_async_device_unregister(&dw->dma); |
1379 | 1380 | ||
1380 | free_irq(platform_get_irq(pdev, 0), dw); | 1381 | free_irq(platform_get_irq(pdev, 0), dw); |
1381 | tasklet_kill(&dw->tasklet); | 1382 | tasklet_kill(&dw->tasklet); |
1382 | 1383 | ||
1383 | list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, | 1384 | list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, |
1384 | chan.device_node) { | 1385 | chan.device_node) { |
1385 | list_del(&dwc->chan.device_node); | 1386 | list_del(&dwc->chan.device_node); |
1386 | channel_clear_bit(dw, CH_EN, dwc->mask); | 1387 | channel_clear_bit(dw, CH_EN, dwc->mask); |
1387 | } | 1388 | } |
1388 | 1389 | ||
1389 | clk_disable(dw->clk); | 1390 | clk_disable(dw->clk); |
1390 | clk_put(dw->clk); | 1391 | clk_put(dw->clk); |
1391 | 1392 | ||
1392 | iounmap(dw->regs); | 1393 | iounmap(dw->regs); |
1393 | dw->regs = NULL; | 1394 | dw->regs = NULL; |
1394 | 1395 | ||
1395 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1396 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1396 | release_mem_region(io->start, DW_REGLEN); | 1397 | release_mem_region(io->start, DW_REGLEN); |
1397 | 1398 | ||
1398 | kfree(dw); | 1399 | kfree(dw); |
1399 | 1400 | ||
1400 | return 0; | 1401 | return 0; |
1401 | } | 1402 | } |
1402 | 1403 | ||
1403 | static void dw_shutdown(struct platform_device *pdev) | 1404 | static void dw_shutdown(struct platform_device *pdev) |
1404 | { | 1405 | { |
1405 | struct dw_dma *dw = platform_get_drvdata(pdev); | 1406 | struct dw_dma *dw = platform_get_drvdata(pdev); |
1406 | 1407 | ||
1407 | dw_dma_off(platform_get_drvdata(pdev)); | 1408 | dw_dma_off(platform_get_drvdata(pdev)); |
1408 | clk_disable(dw->clk); | 1409 | clk_disable(dw->clk); |
1409 | } | 1410 | } |
1410 | 1411 | ||
1411 | static int dw_suspend_noirq(struct device *dev) | 1412 | static int dw_suspend_noirq(struct device *dev) |
1412 | { | 1413 | { |
1413 | struct platform_device *pdev = to_platform_device(dev); | 1414 | struct platform_device *pdev = to_platform_device(dev); |
1414 | struct dw_dma *dw = platform_get_drvdata(pdev); | 1415 | struct dw_dma *dw = platform_get_drvdata(pdev); |
1415 | 1416 | ||
1416 | dw_dma_off(platform_get_drvdata(pdev)); | 1417 | dw_dma_off(platform_get_drvdata(pdev)); |
1417 | clk_disable(dw->clk); | 1418 | clk_disable(dw->clk); |
1418 | return 0; | 1419 | return 0; |
1419 | } | 1420 | } |
1420 | 1421 | ||
1421 | static int dw_resume_noirq(struct device *dev) | 1422 | static int dw_resume_noirq(struct device *dev) |
1422 | { | 1423 | { |
1423 | struct platform_device *pdev = to_platform_device(dev); | 1424 | struct platform_device *pdev = to_platform_device(dev); |
1424 | struct dw_dma *dw = platform_get_drvdata(pdev); | 1425 | struct dw_dma *dw = platform_get_drvdata(pdev); |
1425 | 1426 | ||
1426 | clk_enable(dw->clk); | 1427 | clk_enable(dw->clk); |
1427 | dma_writel(dw, CFG, DW_CFG_DMA_EN); | 1428 | dma_writel(dw, CFG, DW_CFG_DMA_EN); |
1428 | return 0; | 1429 | return 0; |
1429 | } | 1430 | } |
1430 | 1431 | ||
1431 | static const struct dev_pm_ops dw_dev_pm_ops = { | 1432 | static const struct dev_pm_ops dw_dev_pm_ops = { |
1432 | .suspend_noirq = dw_suspend_noirq, | 1433 | .suspend_noirq = dw_suspend_noirq, |
1433 | .resume_noirq = dw_resume_noirq, | 1434 | .resume_noirq = dw_resume_noirq, |
1434 | }; | 1435 | }; |
1435 | 1436 | ||
1436 | static struct platform_driver dw_driver = { | 1437 | static struct platform_driver dw_driver = { |
1437 | .remove = __exit_p(dw_remove), | 1438 | .remove = __exit_p(dw_remove), |
1438 | .shutdown = dw_shutdown, | 1439 | .shutdown = dw_shutdown, |
1439 | .driver = { | 1440 | .driver = { |
1440 | .name = "dw_dmac", | 1441 | .name = "dw_dmac", |
1441 | .pm = &dw_dev_pm_ops, | 1442 | .pm = &dw_dev_pm_ops, |
1442 | }, | 1443 | }, |
1443 | }; | 1444 | }; |
1444 | 1445 | ||
1445 | static int __init dw_init(void) | 1446 | static int __init dw_init(void) |
1446 | { | 1447 | { |
1447 | return platform_driver_probe(&dw_driver, dw_probe); | 1448 | return platform_driver_probe(&dw_driver, dw_probe); |
1448 | } | 1449 | } |
1449 | module_init(dw_init); | 1450 | module_init(dw_init); |
1450 | 1451 | ||
1451 | static void __exit dw_exit(void) | 1452 | static void __exit dw_exit(void) |
1452 | { | 1453 | { |
1453 | platform_driver_unregister(&dw_driver); | 1454 | platform_driver_unregister(&dw_driver); |
1454 | } | 1455 | } |
1455 | module_exit(dw_exit); | 1456 | module_exit(dw_exit); |
1456 | 1457 | ||
1457 | MODULE_LICENSE("GPL v2"); | 1458 | MODULE_LICENSE("GPL v2"); |
1458 | MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver"); | 1459 | MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver"); |
1459 | MODULE_AUTHOR("Haavard Skinnemoen <haavard.skinnemoen@atmel.com>"); | 1460 | MODULE_AUTHOR("Haavard Skinnemoen <haavard.skinnemoen@atmel.com>"); |
1460 | 1461 |
drivers/dma/fsldma.c
1 | /* | 1 | /* |
2 | * Freescale MPC85xx, MPC83xx DMA Engine support | 2 | * Freescale MPC85xx, MPC83xx DMA Engine support |
3 | * | 3 | * |
4 | * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. | 4 | * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. |
5 | * | 5 | * |
6 | * Author: | 6 | * Author: |
7 | * Zhang Wei <wei.zhang@freescale.com>, Jul 2007 | 7 | * Zhang Wei <wei.zhang@freescale.com>, Jul 2007 |
8 | * Ebony Zhu <ebony.zhu@freescale.com>, May 2007 | 8 | * Ebony Zhu <ebony.zhu@freescale.com>, May 2007 |
9 | * | 9 | * |
10 | * Description: | 10 | * Description: |
11 | * DMA engine driver for Freescale MPC8540 DMA controller, which is | 11 | * DMA engine driver for Freescale MPC8540 DMA controller, which is |
12 | * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc. | 12 | * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc. |
13 | * The support for MPC8349 DMA contorller is also added. | 13 | * The support for MPC8349 DMA contorller is also added. |
14 | * | 14 | * |
15 | * This driver instructs the DMA controller to issue the PCI Read Multiple | 15 | * This driver instructs the DMA controller to issue the PCI Read Multiple |
16 | * command for PCI read operations, instead of using the default PCI Read Line | 16 | * command for PCI read operations, instead of using the default PCI Read Line |
17 | * command. Please be aware that this setting may result in read pre-fetching | 17 | * command. Please be aware that this setting may result in read pre-fetching |
18 | * on some platforms. | 18 | * on some platforms. |
19 | * | 19 | * |
20 | * This is free software; you can redistribute it and/or modify | 20 | * This is free software; you can redistribute it and/or modify |
21 | * it under the terms of the GNU General Public License as published by | 21 | * it under the terms of the GNU General Public License as published by |
22 | * the Free Software Foundation; either version 2 of the License, or | 22 | * the Free Software Foundation; either version 2 of the License, or |
23 | * (at your option) any later version. | 23 | * (at your option) any later version. |
24 | * | 24 | * |
25 | */ | 25 | */ |
26 | 26 | ||
27 | #include <linux/init.h> | 27 | #include <linux/init.h> |
28 | #include <linux/module.h> | 28 | #include <linux/module.h> |
29 | #include <linux/pci.h> | 29 | #include <linux/pci.h> |
30 | #include <linux/interrupt.h> | 30 | #include <linux/interrupt.h> |
31 | #include <linux/dmaengine.h> | 31 | #include <linux/dmaengine.h> |
32 | #include <linux/delay.h> | 32 | #include <linux/delay.h> |
33 | #include <linux/dma-mapping.h> | 33 | #include <linux/dma-mapping.h> |
34 | #include <linux/dmapool.h> | 34 | #include <linux/dmapool.h> |
35 | #include <linux/of_platform.h> | 35 | #include <linux/of_platform.h> |
36 | 36 | ||
37 | #include <asm/fsldma.h> | 37 | #include <asm/fsldma.h> |
38 | #include "fsldma.h" | 38 | #include "fsldma.h" |
39 | 39 | ||
40 | static void dma_init(struct fsldma_chan *chan) | 40 | static void dma_init(struct fsldma_chan *chan) |
41 | { | 41 | { |
42 | /* Reset the channel */ | 42 | /* Reset the channel */ |
43 | DMA_OUT(chan, &chan->regs->mr, 0, 32); | 43 | DMA_OUT(chan, &chan->regs->mr, 0, 32); |
44 | 44 | ||
45 | switch (chan->feature & FSL_DMA_IP_MASK) { | 45 | switch (chan->feature & FSL_DMA_IP_MASK) { |
46 | case FSL_DMA_IP_85XX: | 46 | case FSL_DMA_IP_85XX: |
47 | /* Set the channel to below modes: | 47 | /* Set the channel to below modes: |
48 | * EIE - Error interrupt enable | 48 | * EIE - Error interrupt enable |
49 | * EOSIE - End of segments interrupt enable (basic mode) | 49 | * EOSIE - End of segments interrupt enable (basic mode) |
50 | * EOLNIE - End of links interrupt enable | 50 | * EOLNIE - End of links interrupt enable |
51 | */ | 51 | */ |
52 | DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EIE | 52 | DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EIE |
53 | | FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32); | 53 | | FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32); |
54 | break; | 54 | break; |
55 | case FSL_DMA_IP_83XX: | 55 | case FSL_DMA_IP_83XX: |
56 | /* Set the channel to below modes: | 56 | /* Set the channel to below modes: |
57 | * EOTIE - End-of-transfer interrupt enable | 57 | * EOTIE - End-of-transfer interrupt enable |
58 | * PRC_RM - PCI read multiple | 58 | * PRC_RM - PCI read multiple |
59 | */ | 59 | */ |
60 | DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE | 60 | DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE |
61 | | FSL_DMA_MR_PRC_RM, 32); | 61 | | FSL_DMA_MR_PRC_RM, 32); |
62 | break; | 62 | break; |
63 | } | 63 | } |
64 | } | 64 | } |
65 | 65 | ||
66 | static void set_sr(struct fsldma_chan *chan, u32 val) | 66 | static void set_sr(struct fsldma_chan *chan, u32 val) |
67 | { | 67 | { |
68 | DMA_OUT(chan, &chan->regs->sr, val, 32); | 68 | DMA_OUT(chan, &chan->regs->sr, val, 32); |
69 | } | 69 | } |
70 | 70 | ||
71 | static u32 get_sr(struct fsldma_chan *chan) | 71 | static u32 get_sr(struct fsldma_chan *chan) |
72 | { | 72 | { |
73 | return DMA_IN(chan, &chan->regs->sr, 32); | 73 | return DMA_IN(chan, &chan->regs->sr, 32); |
74 | } | 74 | } |
75 | 75 | ||
76 | static void set_desc_cnt(struct fsldma_chan *chan, | 76 | static void set_desc_cnt(struct fsldma_chan *chan, |
77 | struct fsl_dma_ld_hw *hw, u32 count) | 77 | struct fsl_dma_ld_hw *hw, u32 count) |
78 | { | 78 | { |
79 | hw->count = CPU_TO_DMA(chan, count, 32); | 79 | hw->count = CPU_TO_DMA(chan, count, 32); |
80 | } | 80 | } |
81 | 81 | ||
82 | static void set_desc_src(struct fsldma_chan *chan, | 82 | static void set_desc_src(struct fsldma_chan *chan, |
83 | struct fsl_dma_ld_hw *hw, dma_addr_t src) | 83 | struct fsl_dma_ld_hw *hw, dma_addr_t src) |
84 | { | 84 | { |
85 | u64 snoop_bits; | 85 | u64 snoop_bits; |
86 | 86 | ||
87 | snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) | 87 | snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) |
88 | ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; | 88 | ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; |
89 | hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); | 89 | hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); |
90 | } | 90 | } |
91 | 91 | ||
92 | static void set_desc_dst(struct fsldma_chan *chan, | 92 | static void set_desc_dst(struct fsldma_chan *chan, |
93 | struct fsl_dma_ld_hw *hw, dma_addr_t dst) | 93 | struct fsl_dma_ld_hw *hw, dma_addr_t dst) |
94 | { | 94 | { |
95 | u64 snoop_bits; | 95 | u64 snoop_bits; |
96 | 96 | ||
97 | snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) | 97 | snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) |
98 | ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; | 98 | ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; |
99 | hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); | 99 | hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); |
100 | } | 100 | } |
101 | 101 | ||
102 | static void set_desc_next(struct fsldma_chan *chan, | 102 | static void set_desc_next(struct fsldma_chan *chan, |
103 | struct fsl_dma_ld_hw *hw, dma_addr_t next) | 103 | struct fsl_dma_ld_hw *hw, dma_addr_t next) |
104 | { | 104 | { |
105 | u64 snoop_bits; | 105 | u64 snoop_bits; |
106 | 106 | ||
107 | snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) | 107 | snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) |
108 | ? FSL_DMA_SNEN : 0; | 108 | ? FSL_DMA_SNEN : 0; |
109 | hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64); | 109 | hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64); |
110 | } | 110 | } |
111 | 111 | ||
112 | static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr) | 112 | static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr) |
113 | { | 113 | { |
114 | DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64); | 114 | DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64); |
115 | } | 115 | } |
116 | 116 | ||
117 | static dma_addr_t get_cdar(struct fsldma_chan *chan) | 117 | static dma_addr_t get_cdar(struct fsldma_chan *chan) |
118 | { | 118 | { |
119 | return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN; | 119 | return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN; |
120 | } | 120 | } |
121 | 121 | ||
122 | static dma_addr_t get_ndar(struct fsldma_chan *chan) | 122 | static dma_addr_t get_ndar(struct fsldma_chan *chan) |
123 | { | 123 | { |
124 | return DMA_IN(chan, &chan->regs->ndar, 64); | 124 | return DMA_IN(chan, &chan->regs->ndar, 64); |
125 | } | 125 | } |
126 | 126 | ||
127 | static u32 get_bcr(struct fsldma_chan *chan) | 127 | static u32 get_bcr(struct fsldma_chan *chan) |
128 | { | 128 | { |
129 | return DMA_IN(chan, &chan->regs->bcr, 32); | 129 | return DMA_IN(chan, &chan->regs->bcr, 32); |
130 | } | 130 | } |
131 | 131 | ||
132 | static int dma_is_idle(struct fsldma_chan *chan) | 132 | static int dma_is_idle(struct fsldma_chan *chan) |
133 | { | 133 | { |
134 | u32 sr = get_sr(chan); | 134 | u32 sr = get_sr(chan); |
135 | return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); | 135 | return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); |
136 | } | 136 | } |
137 | 137 | ||
138 | static void dma_start(struct fsldma_chan *chan) | 138 | static void dma_start(struct fsldma_chan *chan) |
139 | { | 139 | { |
140 | u32 mode; | 140 | u32 mode; |
141 | 141 | ||
142 | mode = DMA_IN(chan, &chan->regs->mr, 32); | 142 | mode = DMA_IN(chan, &chan->regs->mr, 32); |
143 | 143 | ||
144 | if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { | 144 | if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { |
145 | if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { | 145 | if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { |
146 | DMA_OUT(chan, &chan->regs->bcr, 0, 32); | 146 | DMA_OUT(chan, &chan->regs->bcr, 0, 32); |
147 | mode |= FSL_DMA_MR_EMP_EN; | 147 | mode |= FSL_DMA_MR_EMP_EN; |
148 | } else { | 148 | } else { |
149 | mode &= ~FSL_DMA_MR_EMP_EN; | 149 | mode &= ~FSL_DMA_MR_EMP_EN; |
150 | } | 150 | } |
151 | } | 151 | } |
152 | 152 | ||
153 | if (chan->feature & FSL_DMA_CHAN_START_EXT) | 153 | if (chan->feature & FSL_DMA_CHAN_START_EXT) |
154 | mode |= FSL_DMA_MR_EMS_EN; | 154 | mode |= FSL_DMA_MR_EMS_EN; |
155 | else | 155 | else |
156 | mode |= FSL_DMA_MR_CS; | 156 | mode |= FSL_DMA_MR_CS; |
157 | 157 | ||
158 | DMA_OUT(chan, &chan->regs->mr, mode, 32); | 158 | DMA_OUT(chan, &chan->regs->mr, mode, 32); |
159 | } | 159 | } |
160 | 160 | ||
161 | static void dma_halt(struct fsldma_chan *chan) | 161 | static void dma_halt(struct fsldma_chan *chan) |
162 | { | 162 | { |
163 | u32 mode; | 163 | u32 mode; |
164 | int i; | 164 | int i; |
165 | 165 | ||
166 | mode = DMA_IN(chan, &chan->regs->mr, 32); | 166 | mode = DMA_IN(chan, &chan->regs->mr, 32); |
167 | mode |= FSL_DMA_MR_CA; | 167 | mode |= FSL_DMA_MR_CA; |
168 | DMA_OUT(chan, &chan->regs->mr, mode, 32); | 168 | DMA_OUT(chan, &chan->regs->mr, mode, 32); |
169 | 169 | ||
170 | mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA); | 170 | mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA); |
171 | DMA_OUT(chan, &chan->regs->mr, mode, 32); | 171 | DMA_OUT(chan, &chan->regs->mr, mode, 32); |
172 | 172 | ||
173 | for (i = 0; i < 100; i++) { | 173 | for (i = 0; i < 100; i++) { |
174 | if (dma_is_idle(chan)) | 174 | if (dma_is_idle(chan)) |
175 | return; | 175 | return; |
176 | 176 | ||
177 | udelay(10); | 177 | udelay(10); |
178 | } | 178 | } |
179 | 179 | ||
180 | if (!dma_is_idle(chan)) | 180 | if (!dma_is_idle(chan)) |
181 | dev_err(chan->dev, "DMA halt timeout!\n"); | 181 | dev_err(chan->dev, "DMA halt timeout!\n"); |
182 | } | 182 | } |
183 | 183 | ||
184 | static void set_ld_eol(struct fsldma_chan *chan, | 184 | static void set_ld_eol(struct fsldma_chan *chan, |
185 | struct fsl_desc_sw *desc) | 185 | struct fsl_desc_sw *desc) |
186 | { | 186 | { |
187 | u64 snoop_bits; | 187 | u64 snoop_bits; |
188 | 188 | ||
189 | snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) | 189 | snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) |
190 | ? FSL_DMA_SNEN : 0; | 190 | ? FSL_DMA_SNEN : 0; |
191 | 191 | ||
192 | desc->hw.next_ln_addr = CPU_TO_DMA(chan, | 192 | desc->hw.next_ln_addr = CPU_TO_DMA(chan, |
193 | DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL | 193 | DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL |
194 | | snoop_bits, 64); | 194 | | snoop_bits, 64); |
195 | } | 195 | } |
196 | 196 | ||
197 | /** | 197 | /** |
198 | * fsl_chan_set_src_loop_size - Set source address hold transfer size | 198 | * fsl_chan_set_src_loop_size - Set source address hold transfer size |
199 | * @chan : Freescale DMA channel | 199 | * @chan : Freescale DMA channel |
200 | * @size : Address loop size, 0 for disable loop | 200 | * @size : Address loop size, 0 for disable loop |
201 | * | 201 | * |
202 | * The set source address hold transfer size. The source | 202 | * The set source address hold transfer size. The source |
203 | * address hold or loop transfer size is when the DMA transfer | 203 | * address hold or loop transfer size is when the DMA transfer |
204 | * data from source address (SA), if the loop size is 4, the DMA will | 204 | * data from source address (SA), if the loop size is 4, the DMA will |
205 | * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA, | 205 | * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA, |
206 | * SA + 1 ... and so on. | 206 | * SA + 1 ... and so on. |
207 | */ | 207 | */ |
208 | static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size) | 208 | static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size) |
209 | { | 209 | { |
210 | u32 mode; | 210 | u32 mode; |
211 | 211 | ||
212 | mode = DMA_IN(chan, &chan->regs->mr, 32); | 212 | mode = DMA_IN(chan, &chan->regs->mr, 32); |
213 | 213 | ||
214 | switch (size) { | 214 | switch (size) { |
215 | case 0: | 215 | case 0: |
216 | mode &= ~FSL_DMA_MR_SAHE; | 216 | mode &= ~FSL_DMA_MR_SAHE; |
217 | break; | 217 | break; |
218 | case 1: | 218 | case 1: |
219 | case 2: | 219 | case 2: |
220 | case 4: | 220 | case 4: |
221 | case 8: | 221 | case 8: |
222 | mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14); | 222 | mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14); |
223 | break; | 223 | break; |
224 | } | 224 | } |
225 | 225 | ||
226 | DMA_OUT(chan, &chan->regs->mr, mode, 32); | 226 | DMA_OUT(chan, &chan->regs->mr, mode, 32); |
227 | } | 227 | } |
228 | 228 | ||
229 | /** | 229 | /** |
230 | * fsl_chan_set_dst_loop_size - Set destination address hold transfer size | 230 | * fsl_chan_set_dst_loop_size - Set destination address hold transfer size |
231 | * @chan : Freescale DMA channel | 231 | * @chan : Freescale DMA channel |
232 | * @size : Address loop size, 0 for disable loop | 232 | * @size : Address loop size, 0 for disable loop |
233 | * | 233 | * |
234 | * The set destination address hold transfer size. The destination | 234 | * The set destination address hold transfer size. The destination |
235 | * address hold or loop transfer size is when the DMA transfer | 235 | * address hold or loop transfer size is when the DMA transfer |
236 | * data to destination address (TA), if the loop size is 4, the DMA will | 236 | * data to destination address (TA), if the loop size is 4, the DMA will |
237 | * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA, | 237 | * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA, |
238 | * TA + 1 ... and so on. | 238 | * TA + 1 ... and so on. |
239 | */ | 239 | */ |
240 | static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size) | 240 | static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size) |
241 | { | 241 | { |
242 | u32 mode; | 242 | u32 mode; |
243 | 243 | ||
244 | mode = DMA_IN(chan, &chan->regs->mr, 32); | 244 | mode = DMA_IN(chan, &chan->regs->mr, 32); |
245 | 245 | ||
246 | switch (size) { | 246 | switch (size) { |
247 | case 0: | 247 | case 0: |
248 | mode &= ~FSL_DMA_MR_DAHE; | 248 | mode &= ~FSL_DMA_MR_DAHE; |
249 | break; | 249 | break; |
250 | case 1: | 250 | case 1: |
251 | case 2: | 251 | case 2: |
252 | case 4: | 252 | case 4: |
253 | case 8: | 253 | case 8: |
254 | mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16); | 254 | mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16); |
255 | break; | 255 | break; |
256 | } | 256 | } |
257 | 257 | ||
258 | DMA_OUT(chan, &chan->regs->mr, mode, 32); | 258 | DMA_OUT(chan, &chan->regs->mr, mode, 32); |
259 | } | 259 | } |
260 | 260 | ||
261 | /** | 261 | /** |
262 | * fsl_chan_set_request_count - Set DMA Request Count for external control | 262 | * fsl_chan_set_request_count - Set DMA Request Count for external control |
263 | * @chan : Freescale DMA channel | 263 | * @chan : Freescale DMA channel |
264 | * @size : Number of bytes to transfer in a single request | 264 | * @size : Number of bytes to transfer in a single request |
265 | * | 265 | * |
266 | * The Freescale DMA channel can be controlled by the external signal DREQ#. | 266 | * The Freescale DMA channel can be controlled by the external signal DREQ#. |
267 | * The DMA request count is how many bytes are allowed to transfer before | 267 | * The DMA request count is how many bytes are allowed to transfer before |
268 | * pausing the channel, after which a new assertion of DREQ# resumes channel | 268 | * pausing the channel, after which a new assertion of DREQ# resumes channel |
269 | * operation. | 269 | * operation. |
270 | * | 270 | * |
271 | * A size of 0 disables external pause control. The maximum size is 1024. | 271 | * A size of 0 disables external pause control. The maximum size is 1024. |
272 | */ | 272 | */ |
273 | static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size) | 273 | static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size) |
274 | { | 274 | { |
275 | u32 mode; | 275 | u32 mode; |
276 | 276 | ||
277 | BUG_ON(size > 1024); | 277 | BUG_ON(size > 1024); |
278 | 278 | ||
279 | mode = DMA_IN(chan, &chan->regs->mr, 32); | 279 | mode = DMA_IN(chan, &chan->regs->mr, 32); |
280 | mode |= (__ilog2(size) << 24) & 0x0f000000; | 280 | mode |= (__ilog2(size) << 24) & 0x0f000000; |
281 | 281 | ||
282 | DMA_OUT(chan, &chan->regs->mr, mode, 32); | 282 | DMA_OUT(chan, &chan->regs->mr, mode, 32); |
283 | } | 283 | } |
284 | 284 | ||
285 | /** | 285 | /** |
286 | * fsl_chan_toggle_ext_pause - Toggle channel external pause status | 286 | * fsl_chan_toggle_ext_pause - Toggle channel external pause status |
287 | * @chan : Freescale DMA channel | 287 | * @chan : Freescale DMA channel |
288 | * @enable : 0 is disabled, 1 is enabled. | 288 | * @enable : 0 is disabled, 1 is enabled. |
289 | * | 289 | * |
290 | * The Freescale DMA channel can be controlled by the external signal DREQ#. | 290 | * The Freescale DMA channel can be controlled by the external signal DREQ#. |
291 | * The DMA Request Count feature should be used in addition to this feature | 291 | * The DMA Request Count feature should be used in addition to this feature |
292 | * to set the number of bytes to transfer before pausing the channel. | 292 | * to set the number of bytes to transfer before pausing the channel. |
293 | */ | 293 | */ |
294 | static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable) | 294 | static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable) |
295 | { | 295 | { |
296 | if (enable) | 296 | if (enable) |
297 | chan->feature |= FSL_DMA_CHAN_PAUSE_EXT; | 297 | chan->feature |= FSL_DMA_CHAN_PAUSE_EXT; |
298 | else | 298 | else |
299 | chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT; | 299 | chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT; |
300 | } | 300 | } |
301 | 301 | ||
302 | /** | 302 | /** |
303 | * fsl_chan_toggle_ext_start - Toggle channel external start status | 303 | * fsl_chan_toggle_ext_start - Toggle channel external start status |
304 | * @chan : Freescale DMA channel | 304 | * @chan : Freescale DMA channel |
305 | * @enable : 0 is disabled, 1 is enabled. | 305 | * @enable : 0 is disabled, 1 is enabled. |
306 | * | 306 | * |
307 | * If enable the external start, the channel can be started by an | 307 | * If enable the external start, the channel can be started by an |
308 | * external DMA start pin. So the dma_start() does not start the | 308 | * external DMA start pin. So the dma_start() does not start the |
309 | * transfer immediately. The DMA channel will wait for the | 309 | * transfer immediately. The DMA channel will wait for the |
310 | * control pin asserted. | 310 | * control pin asserted. |
311 | */ | 311 | */ |
312 | static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable) | 312 | static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable) |
313 | { | 313 | { |
314 | if (enable) | 314 | if (enable) |
315 | chan->feature |= FSL_DMA_CHAN_START_EXT; | 315 | chan->feature |= FSL_DMA_CHAN_START_EXT; |
316 | else | 316 | else |
317 | chan->feature &= ~FSL_DMA_CHAN_START_EXT; | 317 | chan->feature &= ~FSL_DMA_CHAN_START_EXT; |
318 | } | 318 | } |
319 | 319 | ||
320 | static void append_ld_queue(struct fsldma_chan *chan, | 320 | static void append_ld_queue(struct fsldma_chan *chan, |
321 | struct fsl_desc_sw *desc) | 321 | struct fsl_desc_sw *desc) |
322 | { | 322 | { |
323 | struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev); | 323 | struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev); |
324 | 324 | ||
325 | if (list_empty(&chan->ld_pending)) | 325 | if (list_empty(&chan->ld_pending)) |
326 | goto out_splice; | 326 | goto out_splice; |
327 | 327 | ||
328 | /* | 328 | /* |
329 | * Add the hardware descriptor to the chain of hardware descriptors | 329 | * Add the hardware descriptor to the chain of hardware descriptors |
330 | * that already exists in memory. | 330 | * that already exists in memory. |
331 | * | 331 | * |
332 | * This will un-set the EOL bit of the existing transaction, and the | 332 | * This will un-set the EOL bit of the existing transaction, and the |
333 | * last link in this transaction will become the EOL descriptor. | 333 | * last link in this transaction will become the EOL descriptor. |
334 | */ | 334 | */ |
335 | set_desc_next(chan, &tail->hw, desc->async_tx.phys); | 335 | set_desc_next(chan, &tail->hw, desc->async_tx.phys); |
336 | 336 | ||
337 | /* | 337 | /* |
338 | * Add the software descriptor and all children to the list | 338 | * Add the software descriptor and all children to the list |
339 | * of pending transactions | 339 | * of pending transactions |
340 | */ | 340 | */ |
341 | out_splice: | 341 | out_splice: |
342 | list_splice_tail_init(&desc->tx_list, &chan->ld_pending); | 342 | list_splice_tail_init(&desc->tx_list, &chan->ld_pending); |
343 | } | 343 | } |
344 | 344 | ||
345 | static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) | 345 | static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) |
346 | { | 346 | { |
347 | struct fsldma_chan *chan = to_fsl_chan(tx->chan); | 347 | struct fsldma_chan *chan = to_fsl_chan(tx->chan); |
348 | struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); | 348 | struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); |
349 | struct fsl_desc_sw *child; | 349 | struct fsl_desc_sw *child; |
350 | unsigned long flags; | 350 | unsigned long flags; |
351 | dma_cookie_t cookie; | 351 | dma_cookie_t cookie; |
352 | 352 | ||
353 | spin_lock_irqsave(&chan->desc_lock, flags); | 353 | spin_lock_irqsave(&chan->desc_lock, flags); |
354 | 354 | ||
355 | /* | 355 | /* |
356 | * assign cookies to all of the software descriptors | 356 | * assign cookies to all of the software descriptors |
357 | * that make up this transaction | 357 | * that make up this transaction |
358 | */ | 358 | */ |
359 | cookie = chan->common.cookie; | 359 | cookie = chan->common.cookie; |
360 | list_for_each_entry(child, &desc->tx_list, node) { | 360 | list_for_each_entry(child, &desc->tx_list, node) { |
361 | cookie++; | 361 | cookie++; |
362 | if (cookie < 0) | 362 | if (cookie < 0) |
363 | cookie = 1; | 363 | cookie = 1; |
364 | 364 | ||
365 | child->async_tx.cookie = cookie; | 365 | child->async_tx.cookie = cookie; |
366 | } | 366 | } |
367 | 367 | ||
368 | chan->common.cookie = cookie; | 368 | chan->common.cookie = cookie; |
369 | 369 | ||
370 | /* put this transaction onto the tail of the pending queue */ | 370 | /* put this transaction onto the tail of the pending queue */ |
371 | append_ld_queue(chan, desc); | 371 | append_ld_queue(chan, desc); |
372 | 372 | ||
373 | spin_unlock_irqrestore(&chan->desc_lock, flags); | 373 | spin_unlock_irqrestore(&chan->desc_lock, flags); |
374 | 374 | ||
375 | return cookie; | 375 | return cookie; |
376 | } | 376 | } |
377 | 377 | ||
378 | /** | 378 | /** |
379 | * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool. | 379 | * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool. |
380 | * @chan : Freescale DMA channel | 380 | * @chan : Freescale DMA channel |
381 | * | 381 | * |
382 | * Return - The descriptor allocated. NULL for failed. | 382 | * Return - The descriptor allocated. NULL for failed. |
383 | */ | 383 | */ |
384 | static struct fsl_desc_sw *fsl_dma_alloc_descriptor( | 384 | static struct fsl_desc_sw *fsl_dma_alloc_descriptor( |
385 | struct fsldma_chan *chan) | 385 | struct fsldma_chan *chan) |
386 | { | 386 | { |
387 | struct fsl_desc_sw *desc; | 387 | struct fsl_desc_sw *desc; |
388 | dma_addr_t pdesc; | 388 | dma_addr_t pdesc; |
389 | 389 | ||
390 | desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc); | 390 | desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc); |
391 | if (!desc) { | 391 | if (!desc) { |
392 | dev_dbg(chan->dev, "out of memory for link desc\n"); | 392 | dev_dbg(chan->dev, "out of memory for link desc\n"); |
393 | return NULL; | 393 | return NULL; |
394 | } | 394 | } |
395 | 395 | ||
396 | memset(desc, 0, sizeof(*desc)); | 396 | memset(desc, 0, sizeof(*desc)); |
397 | INIT_LIST_HEAD(&desc->tx_list); | 397 | INIT_LIST_HEAD(&desc->tx_list); |
398 | dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); | 398 | dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); |
399 | desc->async_tx.tx_submit = fsl_dma_tx_submit; | 399 | desc->async_tx.tx_submit = fsl_dma_tx_submit; |
400 | desc->async_tx.phys = pdesc; | 400 | desc->async_tx.phys = pdesc; |
401 | 401 | ||
402 | return desc; | 402 | return desc; |
403 | } | 403 | } |
404 | 404 | ||
405 | 405 | ||
406 | /** | 406 | /** |
407 | * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel. | 407 | * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel. |
408 | * @chan : Freescale DMA channel | 408 | * @chan : Freescale DMA channel |
409 | * | 409 | * |
410 | * This function will create a dma pool for descriptor allocation. | 410 | * This function will create a dma pool for descriptor allocation. |
411 | * | 411 | * |
412 | * Return - The number of descriptors allocated. | 412 | * Return - The number of descriptors allocated. |
413 | */ | 413 | */ |
414 | static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan) | 414 | static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan) |
415 | { | 415 | { |
416 | struct fsldma_chan *chan = to_fsl_chan(dchan); | 416 | struct fsldma_chan *chan = to_fsl_chan(dchan); |
417 | 417 | ||
418 | /* Has this channel already been allocated? */ | 418 | /* Has this channel already been allocated? */ |
419 | if (chan->desc_pool) | 419 | if (chan->desc_pool) |
420 | return 1; | 420 | return 1; |
421 | 421 | ||
422 | /* | 422 | /* |
423 | * We need the descriptor to be aligned to 32bytes | 423 | * We need the descriptor to be aligned to 32bytes |
424 | * for meeting FSL DMA specification requirement. | 424 | * for meeting FSL DMA specification requirement. |
425 | */ | 425 | */ |
426 | chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool", | 426 | chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool", |
427 | chan->dev, | 427 | chan->dev, |
428 | sizeof(struct fsl_desc_sw), | 428 | sizeof(struct fsl_desc_sw), |
429 | __alignof__(struct fsl_desc_sw), 0); | 429 | __alignof__(struct fsl_desc_sw), 0); |
430 | if (!chan->desc_pool) { | 430 | if (!chan->desc_pool) { |
431 | dev_err(chan->dev, "unable to allocate channel %d " | 431 | dev_err(chan->dev, "unable to allocate channel %d " |
432 | "descriptor pool\n", chan->id); | 432 | "descriptor pool\n", chan->id); |
433 | return -ENOMEM; | 433 | return -ENOMEM; |
434 | } | 434 | } |
435 | 435 | ||
436 | /* there is at least one descriptor free to be allocated */ | 436 | /* there is at least one descriptor free to be allocated */ |
437 | return 1; | 437 | return 1; |
438 | } | 438 | } |
439 | 439 | ||
440 | /** | 440 | /** |
441 | * fsldma_free_desc_list - Free all descriptors in a queue | 441 | * fsldma_free_desc_list - Free all descriptors in a queue |
442 | * @chan: Freescae DMA channel | 442 | * @chan: Freescae DMA channel |
443 | * @list: the list to free | 443 | * @list: the list to free |
444 | * | 444 | * |
445 | * LOCKING: must hold chan->desc_lock | 445 | * LOCKING: must hold chan->desc_lock |
446 | */ | 446 | */ |
447 | static void fsldma_free_desc_list(struct fsldma_chan *chan, | 447 | static void fsldma_free_desc_list(struct fsldma_chan *chan, |
448 | struct list_head *list) | 448 | struct list_head *list) |
449 | { | 449 | { |
450 | struct fsl_desc_sw *desc, *_desc; | 450 | struct fsl_desc_sw *desc, *_desc; |
451 | 451 | ||
452 | list_for_each_entry_safe(desc, _desc, list, node) { | 452 | list_for_each_entry_safe(desc, _desc, list, node) { |
453 | list_del(&desc->node); | 453 | list_del(&desc->node); |
454 | dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); | 454 | dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); |
455 | } | 455 | } |
456 | } | 456 | } |
457 | 457 | ||
458 | static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan, | 458 | static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan, |
459 | struct list_head *list) | 459 | struct list_head *list) |
460 | { | 460 | { |
461 | struct fsl_desc_sw *desc, *_desc; | 461 | struct fsl_desc_sw *desc, *_desc; |
462 | 462 | ||
463 | list_for_each_entry_safe_reverse(desc, _desc, list, node) { | 463 | list_for_each_entry_safe_reverse(desc, _desc, list, node) { |
464 | list_del(&desc->node); | 464 | list_del(&desc->node); |
465 | dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); | 465 | dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); |
466 | } | 466 | } |
467 | } | 467 | } |
468 | 468 | ||
469 | /** | 469 | /** |
470 | * fsl_dma_free_chan_resources - Free all resources of the channel. | 470 | * fsl_dma_free_chan_resources - Free all resources of the channel. |
471 | * @chan : Freescale DMA channel | 471 | * @chan : Freescale DMA channel |
472 | */ | 472 | */ |
473 | static void fsl_dma_free_chan_resources(struct dma_chan *dchan) | 473 | static void fsl_dma_free_chan_resources(struct dma_chan *dchan) |
474 | { | 474 | { |
475 | struct fsldma_chan *chan = to_fsl_chan(dchan); | 475 | struct fsldma_chan *chan = to_fsl_chan(dchan); |
476 | unsigned long flags; | 476 | unsigned long flags; |
477 | 477 | ||
478 | dev_dbg(chan->dev, "Free all channel resources.\n"); | 478 | dev_dbg(chan->dev, "Free all channel resources.\n"); |
479 | spin_lock_irqsave(&chan->desc_lock, flags); | 479 | spin_lock_irqsave(&chan->desc_lock, flags); |
480 | fsldma_free_desc_list(chan, &chan->ld_pending); | 480 | fsldma_free_desc_list(chan, &chan->ld_pending); |
481 | fsldma_free_desc_list(chan, &chan->ld_running); | 481 | fsldma_free_desc_list(chan, &chan->ld_running); |
482 | spin_unlock_irqrestore(&chan->desc_lock, flags); | 482 | spin_unlock_irqrestore(&chan->desc_lock, flags); |
483 | 483 | ||
484 | dma_pool_destroy(chan->desc_pool); | 484 | dma_pool_destroy(chan->desc_pool); |
485 | chan->desc_pool = NULL; | 485 | chan->desc_pool = NULL; |
486 | } | 486 | } |
487 | 487 | ||
488 | static struct dma_async_tx_descriptor * | 488 | static struct dma_async_tx_descriptor * |
489 | fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags) | 489 | fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags) |
490 | { | 490 | { |
491 | struct fsldma_chan *chan; | 491 | struct fsldma_chan *chan; |
492 | struct fsl_desc_sw *new; | 492 | struct fsl_desc_sw *new; |
493 | 493 | ||
494 | if (!dchan) | 494 | if (!dchan) |
495 | return NULL; | 495 | return NULL; |
496 | 496 | ||
497 | chan = to_fsl_chan(dchan); | 497 | chan = to_fsl_chan(dchan); |
498 | 498 | ||
499 | new = fsl_dma_alloc_descriptor(chan); | 499 | new = fsl_dma_alloc_descriptor(chan); |
500 | if (!new) { | 500 | if (!new) { |
501 | dev_err(chan->dev, "No free memory for link descriptor\n"); | 501 | dev_err(chan->dev, "No free memory for link descriptor\n"); |
502 | return NULL; | 502 | return NULL; |
503 | } | 503 | } |
504 | 504 | ||
505 | new->async_tx.cookie = -EBUSY; | 505 | new->async_tx.cookie = -EBUSY; |
506 | new->async_tx.flags = flags; | 506 | new->async_tx.flags = flags; |
507 | 507 | ||
508 | /* Insert the link descriptor to the LD ring */ | 508 | /* Insert the link descriptor to the LD ring */ |
509 | list_add_tail(&new->node, &new->tx_list); | 509 | list_add_tail(&new->node, &new->tx_list); |
510 | 510 | ||
511 | /* Set End-of-link to the last link descriptor of new list*/ | 511 | /* Set End-of-link to the last link descriptor of new list*/ |
512 | set_ld_eol(chan, new); | 512 | set_ld_eol(chan, new); |
513 | 513 | ||
514 | return &new->async_tx; | 514 | return &new->async_tx; |
515 | } | 515 | } |
516 | 516 | ||
517 | static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( | 517 | static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( |
518 | struct dma_chan *dchan, dma_addr_t dma_dst, dma_addr_t dma_src, | 518 | struct dma_chan *dchan, dma_addr_t dma_dst, dma_addr_t dma_src, |
519 | size_t len, unsigned long flags) | 519 | size_t len, unsigned long flags) |
520 | { | 520 | { |
521 | struct fsldma_chan *chan; | 521 | struct fsldma_chan *chan; |
522 | struct fsl_desc_sw *first = NULL, *prev = NULL, *new; | 522 | struct fsl_desc_sw *first = NULL, *prev = NULL, *new; |
523 | size_t copy; | 523 | size_t copy; |
524 | 524 | ||
525 | if (!dchan) | 525 | if (!dchan) |
526 | return NULL; | 526 | return NULL; |
527 | 527 | ||
528 | if (!len) | 528 | if (!len) |
529 | return NULL; | 529 | return NULL; |
530 | 530 | ||
531 | chan = to_fsl_chan(dchan); | 531 | chan = to_fsl_chan(dchan); |
532 | 532 | ||
533 | do { | 533 | do { |
534 | 534 | ||
535 | /* Allocate the link descriptor from DMA pool */ | 535 | /* Allocate the link descriptor from DMA pool */ |
536 | new = fsl_dma_alloc_descriptor(chan); | 536 | new = fsl_dma_alloc_descriptor(chan); |
537 | if (!new) { | 537 | if (!new) { |
538 | dev_err(chan->dev, | 538 | dev_err(chan->dev, |
539 | "No free memory for link descriptor\n"); | 539 | "No free memory for link descriptor\n"); |
540 | goto fail; | 540 | goto fail; |
541 | } | 541 | } |
542 | #ifdef FSL_DMA_LD_DEBUG | 542 | #ifdef FSL_DMA_LD_DEBUG |
543 | dev_dbg(chan->dev, "new link desc alloc %p\n", new); | 543 | dev_dbg(chan->dev, "new link desc alloc %p\n", new); |
544 | #endif | 544 | #endif |
545 | 545 | ||
546 | copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT); | 546 | copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT); |
547 | 547 | ||
548 | set_desc_cnt(chan, &new->hw, copy); | 548 | set_desc_cnt(chan, &new->hw, copy); |
549 | set_desc_src(chan, &new->hw, dma_src); | 549 | set_desc_src(chan, &new->hw, dma_src); |
550 | set_desc_dst(chan, &new->hw, dma_dst); | 550 | set_desc_dst(chan, &new->hw, dma_dst); |
551 | 551 | ||
552 | if (!first) | 552 | if (!first) |
553 | first = new; | 553 | first = new; |
554 | else | 554 | else |
555 | set_desc_next(chan, &prev->hw, new->async_tx.phys); | 555 | set_desc_next(chan, &prev->hw, new->async_tx.phys); |
556 | 556 | ||
557 | new->async_tx.cookie = 0; | 557 | new->async_tx.cookie = 0; |
558 | async_tx_ack(&new->async_tx); | 558 | async_tx_ack(&new->async_tx); |
559 | 559 | ||
560 | prev = new; | 560 | prev = new; |
561 | len -= copy; | 561 | len -= copy; |
562 | dma_src += copy; | 562 | dma_src += copy; |
563 | dma_dst += copy; | 563 | dma_dst += copy; |
564 | 564 | ||
565 | /* Insert the link descriptor to the LD ring */ | 565 | /* Insert the link descriptor to the LD ring */ |
566 | list_add_tail(&new->node, &first->tx_list); | 566 | list_add_tail(&new->node, &first->tx_list); |
567 | } while (len); | 567 | } while (len); |
568 | 568 | ||
569 | new->async_tx.flags = flags; /* client is in control of this ack */ | 569 | new->async_tx.flags = flags; /* client is in control of this ack */ |
570 | new->async_tx.cookie = -EBUSY; | 570 | new->async_tx.cookie = -EBUSY; |
571 | 571 | ||
572 | /* Set End-of-link to the last link descriptor of new list*/ | 572 | /* Set End-of-link to the last link descriptor of new list*/ |
573 | set_ld_eol(chan, new); | 573 | set_ld_eol(chan, new); |
574 | 574 | ||
575 | return &first->async_tx; | 575 | return &first->async_tx; |
576 | 576 | ||
577 | fail: | 577 | fail: |
578 | if (!first) | 578 | if (!first) |
579 | return NULL; | 579 | return NULL; |
580 | 580 | ||
581 | fsldma_free_desc_list_reverse(chan, &first->tx_list); | 581 | fsldma_free_desc_list_reverse(chan, &first->tx_list); |
582 | return NULL; | 582 | return NULL; |
583 | } | 583 | } |
584 | 584 | ||
585 | /** | 585 | /** |
586 | * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction | 586 | * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction |
587 | * @chan: DMA channel | 587 | * @chan: DMA channel |
588 | * @sgl: scatterlist to transfer to/from | 588 | * @sgl: scatterlist to transfer to/from |
589 | * @sg_len: number of entries in @scatterlist | 589 | * @sg_len: number of entries in @scatterlist |
590 | * @direction: DMA direction | 590 | * @direction: DMA direction |
591 | * @flags: DMAEngine flags | 591 | * @flags: DMAEngine flags |
592 | * | 592 | * |
593 | * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the | 593 | * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the |
594 | * DMA_SLAVE API, this gets the device-specific information from the | 594 | * DMA_SLAVE API, this gets the device-specific information from the |
595 | * chan->private variable. | 595 | * chan->private variable. |
596 | */ | 596 | */ |
597 | static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( | 597 | static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( |
598 | struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, | 598 | struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, |
599 | enum dma_data_direction direction, unsigned long flags) | 599 | enum dma_data_direction direction, unsigned long flags) |
600 | { | 600 | { |
601 | struct fsldma_chan *chan; | 601 | struct fsldma_chan *chan; |
602 | struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL; | 602 | struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL; |
603 | struct fsl_dma_slave *slave; | 603 | struct fsl_dma_slave *slave; |
604 | size_t copy; | 604 | size_t copy; |
605 | 605 | ||
606 | int i; | 606 | int i; |
607 | struct scatterlist *sg; | 607 | struct scatterlist *sg; |
608 | size_t sg_used; | 608 | size_t sg_used; |
609 | size_t hw_used; | 609 | size_t hw_used; |
610 | struct fsl_dma_hw_addr *hw; | 610 | struct fsl_dma_hw_addr *hw; |
611 | dma_addr_t dma_dst, dma_src; | 611 | dma_addr_t dma_dst, dma_src; |
612 | 612 | ||
613 | if (!dchan) | 613 | if (!dchan) |
614 | return NULL; | 614 | return NULL; |
615 | 615 | ||
616 | if (!dchan->private) | 616 | if (!dchan->private) |
617 | return NULL; | 617 | return NULL; |
618 | 618 | ||
619 | chan = to_fsl_chan(dchan); | 619 | chan = to_fsl_chan(dchan); |
620 | slave = dchan->private; | 620 | slave = dchan->private; |
621 | 621 | ||
622 | if (list_empty(&slave->addresses)) | 622 | if (list_empty(&slave->addresses)) |
623 | return NULL; | 623 | return NULL; |
624 | 624 | ||
625 | hw = list_first_entry(&slave->addresses, struct fsl_dma_hw_addr, entry); | 625 | hw = list_first_entry(&slave->addresses, struct fsl_dma_hw_addr, entry); |
626 | hw_used = 0; | 626 | hw_used = 0; |
627 | 627 | ||
628 | /* | 628 | /* |
629 | * Build the hardware transaction to copy from the scatterlist to | 629 | * Build the hardware transaction to copy from the scatterlist to |
630 | * the hardware, or from the hardware to the scatterlist | 630 | * the hardware, or from the hardware to the scatterlist |
631 | * | 631 | * |
632 | * If you are copying from the hardware to the scatterlist and it | 632 | * If you are copying from the hardware to the scatterlist and it |
633 | * takes two hardware entries to fill an entire page, then both | 633 | * takes two hardware entries to fill an entire page, then both |
634 | * hardware entries will be coalesced into the same page | 634 | * hardware entries will be coalesced into the same page |
635 | * | 635 | * |
636 | * If you are copying from the scatterlist to the hardware and a | 636 | * If you are copying from the scatterlist to the hardware and a |
637 | * single page can fill two hardware entries, then the data will | 637 | * single page can fill two hardware entries, then the data will |
638 | * be read out of the page into the first hardware entry, and so on | 638 | * be read out of the page into the first hardware entry, and so on |
639 | */ | 639 | */ |
640 | for_each_sg(sgl, sg, sg_len, i) { | 640 | for_each_sg(sgl, sg, sg_len, i) { |
641 | sg_used = 0; | 641 | sg_used = 0; |
642 | 642 | ||
643 | /* Loop until the entire scatterlist entry is used */ | 643 | /* Loop until the entire scatterlist entry is used */ |
644 | while (sg_used < sg_dma_len(sg)) { | 644 | while (sg_used < sg_dma_len(sg)) { |
645 | 645 | ||
646 | /* | 646 | /* |
647 | * If we've used up the current hardware address/length | 647 | * If we've used up the current hardware address/length |
648 | * pair, we need to load a new one | 648 | * pair, we need to load a new one |
649 | * | 649 | * |
650 | * This is done in a while loop so that descriptors with | 650 | * This is done in a while loop so that descriptors with |
651 | * length == 0 will be skipped | 651 | * length == 0 will be skipped |
652 | */ | 652 | */ |
653 | while (hw_used >= hw->length) { | 653 | while (hw_used >= hw->length) { |
654 | 654 | ||
655 | /* | 655 | /* |
656 | * If the current hardware entry is the last | 656 | * If the current hardware entry is the last |
657 | * entry in the list, we're finished | 657 | * entry in the list, we're finished |
658 | */ | 658 | */ |
659 | if (list_is_last(&hw->entry, &slave->addresses)) | 659 | if (list_is_last(&hw->entry, &slave->addresses)) |
660 | goto finished; | 660 | goto finished; |
661 | 661 | ||
662 | /* Get the next hardware address/length pair */ | 662 | /* Get the next hardware address/length pair */ |
663 | hw = list_entry(hw->entry.next, | 663 | hw = list_entry(hw->entry.next, |
664 | struct fsl_dma_hw_addr, entry); | 664 | struct fsl_dma_hw_addr, entry); |
665 | hw_used = 0; | 665 | hw_used = 0; |
666 | } | 666 | } |
667 | 667 | ||
668 | /* Allocate the link descriptor from DMA pool */ | 668 | /* Allocate the link descriptor from DMA pool */ |
669 | new = fsl_dma_alloc_descriptor(chan); | 669 | new = fsl_dma_alloc_descriptor(chan); |
670 | if (!new) { | 670 | if (!new) { |
671 | dev_err(chan->dev, "No free memory for " | 671 | dev_err(chan->dev, "No free memory for " |
672 | "link descriptor\n"); | 672 | "link descriptor\n"); |
673 | goto fail; | 673 | goto fail; |
674 | } | 674 | } |
675 | #ifdef FSL_DMA_LD_DEBUG | 675 | #ifdef FSL_DMA_LD_DEBUG |
676 | dev_dbg(chan->dev, "new link desc alloc %p\n", new); | 676 | dev_dbg(chan->dev, "new link desc alloc %p\n", new); |
677 | #endif | 677 | #endif |
678 | 678 | ||
679 | /* | 679 | /* |
680 | * Calculate the maximum number of bytes to transfer, | 680 | * Calculate the maximum number of bytes to transfer, |
681 | * making sure it is less than the DMA controller limit | 681 | * making sure it is less than the DMA controller limit |
682 | */ | 682 | */ |
683 | copy = min_t(size_t, sg_dma_len(sg) - sg_used, | 683 | copy = min_t(size_t, sg_dma_len(sg) - sg_used, |
684 | hw->length - hw_used); | 684 | hw->length - hw_used); |
685 | copy = min_t(size_t, copy, FSL_DMA_BCR_MAX_CNT); | 685 | copy = min_t(size_t, copy, FSL_DMA_BCR_MAX_CNT); |
686 | 686 | ||
687 | /* | 687 | /* |
688 | * DMA_FROM_DEVICE | 688 | * DMA_FROM_DEVICE |
689 | * from the hardware to the scatterlist | 689 | * from the hardware to the scatterlist |
690 | * | 690 | * |
691 | * DMA_TO_DEVICE | 691 | * DMA_TO_DEVICE |
692 | * from the scatterlist to the hardware | 692 | * from the scatterlist to the hardware |
693 | */ | 693 | */ |
694 | if (direction == DMA_FROM_DEVICE) { | 694 | if (direction == DMA_FROM_DEVICE) { |
695 | dma_src = hw->address + hw_used; | 695 | dma_src = hw->address + hw_used; |
696 | dma_dst = sg_dma_address(sg) + sg_used; | 696 | dma_dst = sg_dma_address(sg) + sg_used; |
697 | } else { | 697 | } else { |
698 | dma_src = sg_dma_address(sg) + sg_used; | 698 | dma_src = sg_dma_address(sg) + sg_used; |
699 | dma_dst = hw->address + hw_used; | 699 | dma_dst = hw->address + hw_used; |
700 | } | 700 | } |
701 | 701 | ||
702 | /* Fill in the descriptor */ | 702 | /* Fill in the descriptor */ |
703 | set_desc_cnt(chan, &new->hw, copy); | 703 | set_desc_cnt(chan, &new->hw, copy); |
704 | set_desc_src(chan, &new->hw, dma_src); | 704 | set_desc_src(chan, &new->hw, dma_src); |
705 | set_desc_dst(chan, &new->hw, dma_dst); | 705 | set_desc_dst(chan, &new->hw, dma_dst); |
706 | 706 | ||
707 | /* | 707 | /* |
708 | * If this is not the first descriptor, chain the | 708 | * If this is not the first descriptor, chain the |
709 | * current descriptor after the previous descriptor | 709 | * current descriptor after the previous descriptor |
710 | */ | 710 | */ |
711 | if (!first) { | 711 | if (!first) { |
712 | first = new; | 712 | first = new; |
713 | } else { | 713 | } else { |
714 | set_desc_next(chan, &prev->hw, | 714 | set_desc_next(chan, &prev->hw, |
715 | new->async_tx.phys); | 715 | new->async_tx.phys); |
716 | } | 716 | } |
717 | 717 | ||
718 | new->async_tx.cookie = 0; | 718 | new->async_tx.cookie = 0; |
719 | async_tx_ack(&new->async_tx); | 719 | async_tx_ack(&new->async_tx); |
720 | 720 | ||
721 | prev = new; | 721 | prev = new; |
722 | sg_used += copy; | 722 | sg_used += copy; |
723 | hw_used += copy; | 723 | hw_used += copy; |
724 | 724 | ||
725 | /* Insert the link descriptor into the LD ring */ | 725 | /* Insert the link descriptor into the LD ring */ |
726 | list_add_tail(&new->node, &first->tx_list); | 726 | list_add_tail(&new->node, &first->tx_list); |
727 | } | 727 | } |
728 | } | 728 | } |
729 | 729 | ||
730 | finished: | 730 | finished: |
731 | 731 | ||
732 | /* All of the hardware address/length pairs had length == 0 */ | 732 | /* All of the hardware address/length pairs had length == 0 */ |
733 | if (!first || !new) | 733 | if (!first || !new) |
734 | return NULL; | 734 | return NULL; |
735 | 735 | ||
736 | new->async_tx.flags = flags; | 736 | new->async_tx.flags = flags; |
737 | new->async_tx.cookie = -EBUSY; | 737 | new->async_tx.cookie = -EBUSY; |
738 | 738 | ||
739 | /* Set End-of-link to the last link descriptor of new list */ | 739 | /* Set End-of-link to the last link descriptor of new list */ |
740 | set_ld_eol(chan, new); | 740 | set_ld_eol(chan, new); |
741 | 741 | ||
742 | /* Enable extra controller features */ | 742 | /* Enable extra controller features */ |
743 | if (chan->set_src_loop_size) | 743 | if (chan->set_src_loop_size) |
744 | chan->set_src_loop_size(chan, slave->src_loop_size); | 744 | chan->set_src_loop_size(chan, slave->src_loop_size); |
745 | 745 | ||
746 | if (chan->set_dst_loop_size) | 746 | if (chan->set_dst_loop_size) |
747 | chan->set_dst_loop_size(chan, slave->dst_loop_size); | 747 | chan->set_dst_loop_size(chan, slave->dst_loop_size); |
748 | 748 | ||
749 | if (chan->toggle_ext_start) | 749 | if (chan->toggle_ext_start) |
750 | chan->toggle_ext_start(chan, slave->external_start); | 750 | chan->toggle_ext_start(chan, slave->external_start); |
751 | 751 | ||
752 | if (chan->toggle_ext_pause) | 752 | if (chan->toggle_ext_pause) |
753 | chan->toggle_ext_pause(chan, slave->external_pause); | 753 | chan->toggle_ext_pause(chan, slave->external_pause); |
754 | 754 | ||
755 | if (chan->set_request_count) | 755 | if (chan->set_request_count) |
756 | chan->set_request_count(chan, slave->request_count); | 756 | chan->set_request_count(chan, slave->request_count); |
757 | 757 | ||
758 | return &first->async_tx; | 758 | return &first->async_tx; |
759 | 759 | ||
760 | fail: | 760 | fail: |
761 | /* If first was not set, then we failed to allocate the very first | 761 | /* If first was not set, then we failed to allocate the very first |
762 | * descriptor, and we're done */ | 762 | * descriptor, and we're done */ |
763 | if (!first) | 763 | if (!first) |
764 | return NULL; | 764 | return NULL; |
765 | 765 | ||
766 | /* | 766 | /* |
767 | * First is set, so all of the descriptors we allocated have been added | 767 | * First is set, so all of the descriptors we allocated have been added |
768 | * to first->tx_list, INCLUDING "first" itself. Therefore we | 768 | * to first->tx_list, INCLUDING "first" itself. Therefore we |
769 | * must traverse the list backwards freeing each descriptor in turn | 769 | * must traverse the list backwards freeing each descriptor in turn |
770 | * | 770 | * |
771 | * We're re-using variables for the loop, oh well | 771 | * We're re-using variables for the loop, oh well |
772 | */ | 772 | */ |
773 | fsldma_free_desc_list_reverse(chan, &first->tx_list); | 773 | fsldma_free_desc_list_reverse(chan, &first->tx_list); |
774 | return NULL; | 774 | return NULL; |
775 | } | 775 | } |
776 | 776 | ||
777 | static int fsl_dma_device_control(struct dma_chan *dchan, | 777 | static int fsl_dma_device_control(struct dma_chan *dchan, |
778 | enum dma_ctrl_cmd cmd) | 778 | enum dma_ctrl_cmd cmd, unsigned long arg) |
779 | { | 779 | { |
780 | struct fsldma_chan *chan; | 780 | struct fsldma_chan *chan; |
781 | unsigned long flags; | 781 | unsigned long flags; |
782 | 782 | ||
783 | /* Only supports DMA_TERMINATE_ALL */ | 783 | /* Only supports DMA_TERMINATE_ALL */ |
784 | if (cmd != DMA_TERMINATE_ALL) | 784 | if (cmd != DMA_TERMINATE_ALL) |
785 | return -ENXIO; | 785 | return -ENXIO; |
786 | 786 | ||
787 | if (!dchan) | 787 | if (!dchan) |
788 | return -EINVAL; | 788 | return -EINVAL; |
789 | 789 | ||
790 | chan = to_fsl_chan(dchan); | 790 | chan = to_fsl_chan(dchan); |
791 | 791 | ||
792 | /* Halt the DMA engine */ | 792 | /* Halt the DMA engine */ |
793 | dma_halt(chan); | 793 | dma_halt(chan); |
794 | 794 | ||
795 | spin_lock_irqsave(&chan->desc_lock, flags); | 795 | spin_lock_irqsave(&chan->desc_lock, flags); |
796 | 796 | ||
797 | /* Remove and free all of the descriptors in the LD queue */ | 797 | /* Remove and free all of the descriptors in the LD queue */ |
798 | fsldma_free_desc_list(chan, &chan->ld_pending); | 798 | fsldma_free_desc_list(chan, &chan->ld_pending); |
799 | fsldma_free_desc_list(chan, &chan->ld_running); | 799 | fsldma_free_desc_list(chan, &chan->ld_running); |
800 | 800 | ||
801 | spin_unlock_irqrestore(&chan->desc_lock, flags); | 801 | spin_unlock_irqrestore(&chan->desc_lock, flags); |
802 | 802 | ||
803 | return 0; | 803 | return 0; |
804 | } | 804 | } |
805 | 805 | ||
806 | /** | 806 | /** |
807 | * fsl_dma_update_completed_cookie - Update the completed cookie. | 807 | * fsl_dma_update_completed_cookie - Update the completed cookie. |
808 | * @chan : Freescale DMA channel | 808 | * @chan : Freescale DMA channel |
809 | * | 809 | * |
810 | * CONTEXT: hardirq | 810 | * CONTEXT: hardirq |
811 | */ | 811 | */ |
812 | static void fsl_dma_update_completed_cookie(struct fsldma_chan *chan) | 812 | static void fsl_dma_update_completed_cookie(struct fsldma_chan *chan) |
813 | { | 813 | { |
814 | struct fsl_desc_sw *desc; | 814 | struct fsl_desc_sw *desc; |
815 | unsigned long flags; | 815 | unsigned long flags; |
816 | dma_cookie_t cookie; | 816 | dma_cookie_t cookie; |
817 | 817 | ||
818 | spin_lock_irqsave(&chan->desc_lock, flags); | 818 | spin_lock_irqsave(&chan->desc_lock, flags); |
819 | 819 | ||
820 | if (list_empty(&chan->ld_running)) { | 820 | if (list_empty(&chan->ld_running)) { |
821 | dev_dbg(chan->dev, "no running descriptors\n"); | 821 | dev_dbg(chan->dev, "no running descriptors\n"); |
822 | goto out_unlock; | 822 | goto out_unlock; |
823 | } | 823 | } |
824 | 824 | ||
825 | /* Get the last descriptor, update the cookie to that */ | 825 | /* Get the last descriptor, update the cookie to that */ |
826 | desc = to_fsl_desc(chan->ld_running.prev); | 826 | desc = to_fsl_desc(chan->ld_running.prev); |
827 | if (dma_is_idle(chan)) | 827 | if (dma_is_idle(chan)) |
828 | cookie = desc->async_tx.cookie; | 828 | cookie = desc->async_tx.cookie; |
829 | else { | 829 | else { |
830 | cookie = desc->async_tx.cookie - 1; | 830 | cookie = desc->async_tx.cookie - 1; |
831 | if (unlikely(cookie < DMA_MIN_COOKIE)) | 831 | if (unlikely(cookie < DMA_MIN_COOKIE)) |
832 | cookie = DMA_MAX_COOKIE; | 832 | cookie = DMA_MAX_COOKIE; |
833 | } | 833 | } |
834 | 834 | ||
835 | chan->completed_cookie = cookie; | 835 | chan->completed_cookie = cookie; |
836 | 836 | ||
837 | out_unlock: | 837 | out_unlock: |
838 | spin_unlock_irqrestore(&chan->desc_lock, flags); | 838 | spin_unlock_irqrestore(&chan->desc_lock, flags); |
839 | } | 839 | } |
840 | 840 | ||
841 | /** | 841 | /** |
842 | * fsldma_desc_status - Check the status of a descriptor | 842 | * fsldma_desc_status - Check the status of a descriptor |
843 | * @chan: Freescale DMA channel | 843 | * @chan: Freescale DMA channel |
844 | * @desc: DMA SW descriptor | 844 | * @desc: DMA SW descriptor |
845 | * | 845 | * |
846 | * This function will return the status of the given descriptor | 846 | * This function will return the status of the given descriptor |
847 | */ | 847 | */ |
848 | static enum dma_status fsldma_desc_status(struct fsldma_chan *chan, | 848 | static enum dma_status fsldma_desc_status(struct fsldma_chan *chan, |
849 | struct fsl_desc_sw *desc) | 849 | struct fsl_desc_sw *desc) |
850 | { | 850 | { |
851 | return dma_async_is_complete(desc->async_tx.cookie, | 851 | return dma_async_is_complete(desc->async_tx.cookie, |
852 | chan->completed_cookie, | 852 | chan->completed_cookie, |
853 | chan->common.cookie); | 853 | chan->common.cookie); |
854 | } | 854 | } |
855 | 855 | ||
856 | /** | 856 | /** |
857 | * fsl_chan_ld_cleanup - Clean up link descriptors | 857 | * fsl_chan_ld_cleanup - Clean up link descriptors |
858 | * @chan : Freescale DMA channel | 858 | * @chan : Freescale DMA channel |
859 | * | 859 | * |
860 | * This function clean up the ld_queue of DMA channel. | 860 | * This function clean up the ld_queue of DMA channel. |
861 | */ | 861 | */ |
862 | static void fsl_chan_ld_cleanup(struct fsldma_chan *chan) | 862 | static void fsl_chan_ld_cleanup(struct fsldma_chan *chan) |
863 | { | 863 | { |
864 | struct fsl_desc_sw *desc, *_desc; | 864 | struct fsl_desc_sw *desc, *_desc; |
865 | unsigned long flags; | 865 | unsigned long flags; |
866 | 866 | ||
867 | spin_lock_irqsave(&chan->desc_lock, flags); | 867 | spin_lock_irqsave(&chan->desc_lock, flags); |
868 | 868 | ||
869 | dev_dbg(chan->dev, "chan completed_cookie = %d\n", chan->completed_cookie); | 869 | dev_dbg(chan->dev, "chan completed_cookie = %d\n", chan->completed_cookie); |
870 | list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) { | 870 | list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) { |
871 | dma_async_tx_callback callback; | 871 | dma_async_tx_callback callback; |
872 | void *callback_param; | 872 | void *callback_param; |
873 | 873 | ||
874 | if (fsldma_desc_status(chan, desc) == DMA_IN_PROGRESS) | 874 | if (fsldma_desc_status(chan, desc) == DMA_IN_PROGRESS) |
875 | break; | 875 | break; |
876 | 876 | ||
877 | /* Remove from the list of running transactions */ | 877 | /* Remove from the list of running transactions */ |
878 | list_del(&desc->node); | 878 | list_del(&desc->node); |
879 | 879 | ||
880 | /* Run the link descriptor callback function */ | 880 | /* Run the link descriptor callback function */ |
881 | callback = desc->async_tx.callback; | 881 | callback = desc->async_tx.callback; |
882 | callback_param = desc->async_tx.callback_param; | 882 | callback_param = desc->async_tx.callback_param; |
883 | if (callback) { | 883 | if (callback) { |
884 | spin_unlock_irqrestore(&chan->desc_lock, flags); | 884 | spin_unlock_irqrestore(&chan->desc_lock, flags); |
885 | dev_dbg(chan->dev, "LD %p callback\n", desc); | 885 | dev_dbg(chan->dev, "LD %p callback\n", desc); |
886 | callback(callback_param); | 886 | callback(callback_param); |
887 | spin_lock_irqsave(&chan->desc_lock, flags); | 887 | spin_lock_irqsave(&chan->desc_lock, flags); |
888 | } | 888 | } |
889 | 889 | ||
890 | /* Run any dependencies, then free the descriptor */ | 890 | /* Run any dependencies, then free the descriptor */ |
891 | dma_run_dependencies(&desc->async_tx); | 891 | dma_run_dependencies(&desc->async_tx); |
892 | dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); | 892 | dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); |
893 | } | 893 | } |
894 | 894 | ||
895 | spin_unlock_irqrestore(&chan->desc_lock, flags); | 895 | spin_unlock_irqrestore(&chan->desc_lock, flags); |
896 | } | 896 | } |
897 | 897 | ||
898 | /** | 898 | /** |
899 | * fsl_chan_xfer_ld_queue - transfer any pending transactions | 899 | * fsl_chan_xfer_ld_queue - transfer any pending transactions |
900 | * @chan : Freescale DMA channel | 900 | * @chan : Freescale DMA channel |
901 | * | 901 | * |
902 | * This will make sure that any pending transactions will be run. | 902 | * This will make sure that any pending transactions will be run. |
903 | * If the DMA controller is idle, it will be started. Otherwise, | 903 | * If the DMA controller is idle, it will be started. Otherwise, |
904 | * the DMA controller's interrupt handler will start any pending | 904 | * the DMA controller's interrupt handler will start any pending |
905 | * transactions when it becomes idle. | 905 | * transactions when it becomes idle. |
906 | */ | 906 | */ |
907 | static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) | 907 | static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) |
908 | { | 908 | { |
909 | struct fsl_desc_sw *desc; | 909 | struct fsl_desc_sw *desc; |
910 | unsigned long flags; | 910 | unsigned long flags; |
911 | 911 | ||
912 | spin_lock_irqsave(&chan->desc_lock, flags); | 912 | spin_lock_irqsave(&chan->desc_lock, flags); |
913 | 913 | ||
914 | /* | 914 | /* |
915 | * If the list of pending descriptors is empty, then we | 915 | * If the list of pending descriptors is empty, then we |
916 | * don't need to do any work at all | 916 | * don't need to do any work at all |
917 | */ | 917 | */ |
918 | if (list_empty(&chan->ld_pending)) { | 918 | if (list_empty(&chan->ld_pending)) { |
919 | dev_dbg(chan->dev, "no pending LDs\n"); | 919 | dev_dbg(chan->dev, "no pending LDs\n"); |
920 | goto out_unlock; | 920 | goto out_unlock; |
921 | } | 921 | } |
922 | 922 | ||
923 | /* | 923 | /* |
924 | * The DMA controller is not idle, which means the interrupt | 924 | * The DMA controller is not idle, which means the interrupt |
925 | * handler will start any queued transactions when it runs | 925 | * handler will start any queued transactions when it runs |
926 | * at the end of the current transaction | 926 | * at the end of the current transaction |
927 | */ | 927 | */ |
928 | if (!dma_is_idle(chan)) { | 928 | if (!dma_is_idle(chan)) { |
929 | dev_dbg(chan->dev, "DMA controller still busy\n"); | 929 | dev_dbg(chan->dev, "DMA controller still busy\n"); |
930 | goto out_unlock; | 930 | goto out_unlock; |
931 | } | 931 | } |
932 | 932 | ||
933 | /* | 933 | /* |
934 | * TODO: | 934 | * TODO: |
935 | * make sure the dma_halt() function really un-wedges the | 935 | * make sure the dma_halt() function really un-wedges the |
936 | * controller as much as possible | 936 | * controller as much as possible |
937 | */ | 937 | */ |
938 | dma_halt(chan); | 938 | dma_halt(chan); |
939 | 939 | ||
940 | /* | 940 | /* |
941 | * If there are some link descriptors which have not been | 941 | * If there are some link descriptors which have not been |
942 | * transferred, we need to start the controller | 942 | * transferred, we need to start the controller |
943 | */ | 943 | */ |
944 | 944 | ||
945 | /* | 945 | /* |
946 | * Move all elements from the queue of pending transactions | 946 | * Move all elements from the queue of pending transactions |
947 | * onto the list of running transactions | 947 | * onto the list of running transactions |
948 | */ | 948 | */ |
949 | desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node); | 949 | desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node); |
950 | list_splice_tail_init(&chan->ld_pending, &chan->ld_running); | 950 | list_splice_tail_init(&chan->ld_pending, &chan->ld_running); |
951 | 951 | ||
952 | /* | 952 | /* |
953 | * Program the descriptor's address into the DMA controller, | 953 | * Program the descriptor's address into the DMA controller, |
954 | * then start the DMA transaction | 954 | * then start the DMA transaction |
955 | */ | 955 | */ |
956 | set_cdar(chan, desc->async_tx.phys); | 956 | set_cdar(chan, desc->async_tx.phys); |
957 | dma_start(chan); | 957 | dma_start(chan); |
958 | 958 | ||
959 | out_unlock: | 959 | out_unlock: |
960 | spin_unlock_irqrestore(&chan->desc_lock, flags); | 960 | spin_unlock_irqrestore(&chan->desc_lock, flags); |
961 | } | 961 | } |
962 | 962 | ||
963 | /** | 963 | /** |
964 | * fsl_dma_memcpy_issue_pending - Issue the DMA start command | 964 | * fsl_dma_memcpy_issue_pending - Issue the DMA start command |
965 | * @chan : Freescale DMA channel | 965 | * @chan : Freescale DMA channel |
966 | */ | 966 | */ |
967 | static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan) | 967 | static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan) |
968 | { | 968 | { |
969 | struct fsldma_chan *chan = to_fsl_chan(dchan); | 969 | struct fsldma_chan *chan = to_fsl_chan(dchan); |
970 | fsl_chan_xfer_ld_queue(chan); | 970 | fsl_chan_xfer_ld_queue(chan); |
971 | } | 971 | } |
972 | 972 | ||
973 | /** | 973 | /** |
974 | * fsl_tx_status - Determine the DMA status | 974 | * fsl_tx_status - Determine the DMA status |
975 | * @chan : Freescale DMA channel | 975 | * @chan : Freescale DMA channel |
976 | */ | 976 | */ |
977 | static enum dma_status fsl_tx_status(struct dma_chan *dchan, | 977 | static enum dma_status fsl_tx_status(struct dma_chan *dchan, |
978 | dma_cookie_t cookie, | 978 | dma_cookie_t cookie, |
979 | struct dma_tx_state *txstate) | 979 | struct dma_tx_state *txstate) |
980 | { | 980 | { |
981 | struct fsldma_chan *chan = to_fsl_chan(dchan); | 981 | struct fsldma_chan *chan = to_fsl_chan(dchan); |
982 | dma_cookie_t last_used; | 982 | dma_cookie_t last_used; |
983 | dma_cookie_t last_complete; | 983 | dma_cookie_t last_complete; |
984 | 984 | ||
985 | fsl_chan_ld_cleanup(chan); | 985 | fsl_chan_ld_cleanup(chan); |
986 | 986 | ||
987 | last_used = dchan->cookie; | 987 | last_used = dchan->cookie; |
988 | last_complete = chan->completed_cookie; | 988 | last_complete = chan->completed_cookie; |
989 | 989 | ||
990 | dma_set_tx_state(txstate, last_complete, last_used, 0); | 990 | dma_set_tx_state(txstate, last_complete, last_used, 0); |
991 | 991 | ||
992 | return dma_async_is_complete(cookie, last_complete, last_used); | 992 | return dma_async_is_complete(cookie, last_complete, last_used); |
993 | } | 993 | } |
994 | 994 | ||
995 | /*----------------------------------------------------------------------------*/ | 995 | /*----------------------------------------------------------------------------*/ |
996 | /* Interrupt Handling */ | 996 | /* Interrupt Handling */ |
997 | /*----------------------------------------------------------------------------*/ | 997 | /*----------------------------------------------------------------------------*/ |
998 | 998 | ||
999 | static irqreturn_t fsldma_chan_irq(int irq, void *data) | 999 | static irqreturn_t fsldma_chan_irq(int irq, void *data) |
1000 | { | 1000 | { |
1001 | struct fsldma_chan *chan = data; | 1001 | struct fsldma_chan *chan = data; |
1002 | int update_cookie = 0; | 1002 | int update_cookie = 0; |
1003 | int xfer_ld_q = 0; | 1003 | int xfer_ld_q = 0; |
1004 | u32 stat; | 1004 | u32 stat; |
1005 | 1005 | ||
1006 | /* save and clear the status register */ | 1006 | /* save and clear the status register */ |
1007 | stat = get_sr(chan); | 1007 | stat = get_sr(chan); |
1008 | set_sr(chan, stat); | 1008 | set_sr(chan, stat); |
1009 | dev_dbg(chan->dev, "irq: channel %d, stat = 0x%x\n", chan->id, stat); | 1009 | dev_dbg(chan->dev, "irq: channel %d, stat = 0x%x\n", chan->id, stat); |
1010 | 1010 | ||
1011 | stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); | 1011 | stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); |
1012 | if (!stat) | 1012 | if (!stat) |
1013 | return IRQ_NONE; | 1013 | return IRQ_NONE; |
1014 | 1014 | ||
1015 | if (stat & FSL_DMA_SR_TE) | 1015 | if (stat & FSL_DMA_SR_TE) |
1016 | dev_err(chan->dev, "Transfer Error!\n"); | 1016 | dev_err(chan->dev, "Transfer Error!\n"); |
1017 | 1017 | ||
1018 | /* | 1018 | /* |
1019 | * Programming Error | 1019 | * Programming Error |
1020 | * The DMA_INTERRUPT async_tx is a NULL transfer, which will | 1020 | * The DMA_INTERRUPT async_tx is a NULL transfer, which will |
1021 | * triger a PE interrupt. | 1021 | * triger a PE interrupt. |
1022 | */ | 1022 | */ |
1023 | if (stat & FSL_DMA_SR_PE) { | 1023 | if (stat & FSL_DMA_SR_PE) { |
1024 | dev_dbg(chan->dev, "irq: Programming Error INT\n"); | 1024 | dev_dbg(chan->dev, "irq: Programming Error INT\n"); |
1025 | if (get_bcr(chan) == 0) { | 1025 | if (get_bcr(chan) == 0) { |
1026 | /* BCR register is 0, this is a DMA_INTERRUPT async_tx. | 1026 | /* BCR register is 0, this is a DMA_INTERRUPT async_tx. |
1027 | * Now, update the completed cookie, and continue the | 1027 | * Now, update the completed cookie, and continue the |
1028 | * next uncompleted transfer. | 1028 | * next uncompleted transfer. |
1029 | */ | 1029 | */ |
1030 | update_cookie = 1; | 1030 | update_cookie = 1; |
1031 | xfer_ld_q = 1; | 1031 | xfer_ld_q = 1; |
1032 | } | 1032 | } |
1033 | stat &= ~FSL_DMA_SR_PE; | 1033 | stat &= ~FSL_DMA_SR_PE; |
1034 | } | 1034 | } |
1035 | 1035 | ||
1036 | /* | 1036 | /* |
1037 | * If the link descriptor segment transfer finishes, | 1037 | * If the link descriptor segment transfer finishes, |
1038 | * we will recycle the used descriptor. | 1038 | * we will recycle the used descriptor. |
1039 | */ | 1039 | */ |
1040 | if (stat & FSL_DMA_SR_EOSI) { | 1040 | if (stat & FSL_DMA_SR_EOSI) { |
1041 | dev_dbg(chan->dev, "irq: End-of-segments INT\n"); | 1041 | dev_dbg(chan->dev, "irq: End-of-segments INT\n"); |
1042 | dev_dbg(chan->dev, "irq: clndar 0x%llx, nlndar 0x%llx\n", | 1042 | dev_dbg(chan->dev, "irq: clndar 0x%llx, nlndar 0x%llx\n", |
1043 | (unsigned long long)get_cdar(chan), | 1043 | (unsigned long long)get_cdar(chan), |
1044 | (unsigned long long)get_ndar(chan)); | 1044 | (unsigned long long)get_ndar(chan)); |
1045 | stat &= ~FSL_DMA_SR_EOSI; | 1045 | stat &= ~FSL_DMA_SR_EOSI; |
1046 | update_cookie = 1; | 1046 | update_cookie = 1; |
1047 | } | 1047 | } |
1048 | 1048 | ||
1049 | /* | 1049 | /* |
1050 | * For MPC8349, EOCDI event need to update cookie | 1050 | * For MPC8349, EOCDI event need to update cookie |
1051 | * and start the next transfer if it exist. | 1051 | * and start the next transfer if it exist. |
1052 | */ | 1052 | */ |
1053 | if (stat & FSL_DMA_SR_EOCDI) { | 1053 | if (stat & FSL_DMA_SR_EOCDI) { |
1054 | dev_dbg(chan->dev, "irq: End-of-Chain link INT\n"); | 1054 | dev_dbg(chan->dev, "irq: End-of-Chain link INT\n"); |
1055 | stat &= ~FSL_DMA_SR_EOCDI; | 1055 | stat &= ~FSL_DMA_SR_EOCDI; |
1056 | update_cookie = 1; | 1056 | update_cookie = 1; |
1057 | xfer_ld_q = 1; | 1057 | xfer_ld_q = 1; |
1058 | } | 1058 | } |
1059 | 1059 | ||
1060 | /* | 1060 | /* |
1061 | * If it current transfer is the end-of-transfer, | 1061 | * If it current transfer is the end-of-transfer, |
1062 | * we should clear the Channel Start bit for | 1062 | * we should clear the Channel Start bit for |
1063 | * prepare next transfer. | 1063 | * prepare next transfer. |
1064 | */ | 1064 | */ |
1065 | if (stat & FSL_DMA_SR_EOLNI) { | 1065 | if (stat & FSL_DMA_SR_EOLNI) { |
1066 | dev_dbg(chan->dev, "irq: End-of-link INT\n"); | 1066 | dev_dbg(chan->dev, "irq: End-of-link INT\n"); |
1067 | stat &= ~FSL_DMA_SR_EOLNI; | 1067 | stat &= ~FSL_DMA_SR_EOLNI; |
1068 | xfer_ld_q = 1; | 1068 | xfer_ld_q = 1; |
1069 | } | 1069 | } |
1070 | 1070 | ||
1071 | if (update_cookie) | 1071 | if (update_cookie) |
1072 | fsl_dma_update_completed_cookie(chan); | 1072 | fsl_dma_update_completed_cookie(chan); |
1073 | if (xfer_ld_q) | 1073 | if (xfer_ld_q) |
1074 | fsl_chan_xfer_ld_queue(chan); | 1074 | fsl_chan_xfer_ld_queue(chan); |
1075 | if (stat) | 1075 | if (stat) |
1076 | dev_dbg(chan->dev, "irq: unhandled sr 0x%02x\n", stat); | 1076 | dev_dbg(chan->dev, "irq: unhandled sr 0x%02x\n", stat); |
1077 | 1077 | ||
1078 | dev_dbg(chan->dev, "irq: Exit\n"); | 1078 | dev_dbg(chan->dev, "irq: Exit\n"); |
1079 | tasklet_schedule(&chan->tasklet); | 1079 | tasklet_schedule(&chan->tasklet); |
1080 | return IRQ_HANDLED; | 1080 | return IRQ_HANDLED; |
1081 | } | 1081 | } |
1082 | 1082 | ||
1083 | static void dma_do_tasklet(unsigned long data) | 1083 | static void dma_do_tasklet(unsigned long data) |
1084 | { | 1084 | { |
1085 | struct fsldma_chan *chan = (struct fsldma_chan *)data; | 1085 | struct fsldma_chan *chan = (struct fsldma_chan *)data; |
1086 | fsl_chan_ld_cleanup(chan); | 1086 | fsl_chan_ld_cleanup(chan); |
1087 | } | 1087 | } |
1088 | 1088 | ||
1089 | static irqreturn_t fsldma_ctrl_irq(int irq, void *data) | 1089 | static irqreturn_t fsldma_ctrl_irq(int irq, void *data) |
1090 | { | 1090 | { |
1091 | struct fsldma_device *fdev = data; | 1091 | struct fsldma_device *fdev = data; |
1092 | struct fsldma_chan *chan; | 1092 | struct fsldma_chan *chan; |
1093 | unsigned int handled = 0; | 1093 | unsigned int handled = 0; |
1094 | u32 gsr, mask; | 1094 | u32 gsr, mask; |
1095 | int i; | 1095 | int i; |
1096 | 1096 | ||
1097 | gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs) | 1097 | gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs) |
1098 | : in_le32(fdev->regs); | 1098 | : in_le32(fdev->regs); |
1099 | mask = 0xff000000; | 1099 | mask = 0xff000000; |
1100 | dev_dbg(fdev->dev, "IRQ: gsr 0x%.8x\n", gsr); | 1100 | dev_dbg(fdev->dev, "IRQ: gsr 0x%.8x\n", gsr); |
1101 | 1101 | ||
1102 | for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { | 1102 | for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { |
1103 | chan = fdev->chan[i]; | 1103 | chan = fdev->chan[i]; |
1104 | if (!chan) | 1104 | if (!chan) |
1105 | continue; | 1105 | continue; |
1106 | 1106 | ||
1107 | if (gsr & mask) { | 1107 | if (gsr & mask) { |
1108 | dev_dbg(fdev->dev, "IRQ: chan %d\n", chan->id); | 1108 | dev_dbg(fdev->dev, "IRQ: chan %d\n", chan->id); |
1109 | fsldma_chan_irq(irq, chan); | 1109 | fsldma_chan_irq(irq, chan); |
1110 | handled++; | 1110 | handled++; |
1111 | } | 1111 | } |
1112 | 1112 | ||
1113 | gsr &= ~mask; | 1113 | gsr &= ~mask; |
1114 | mask >>= 8; | 1114 | mask >>= 8; |
1115 | } | 1115 | } |
1116 | 1116 | ||
1117 | return IRQ_RETVAL(handled); | 1117 | return IRQ_RETVAL(handled); |
1118 | } | 1118 | } |
1119 | 1119 | ||
1120 | static void fsldma_free_irqs(struct fsldma_device *fdev) | 1120 | static void fsldma_free_irqs(struct fsldma_device *fdev) |
1121 | { | 1121 | { |
1122 | struct fsldma_chan *chan; | 1122 | struct fsldma_chan *chan; |
1123 | int i; | 1123 | int i; |
1124 | 1124 | ||
1125 | if (fdev->irq != NO_IRQ) { | 1125 | if (fdev->irq != NO_IRQ) { |
1126 | dev_dbg(fdev->dev, "free per-controller IRQ\n"); | 1126 | dev_dbg(fdev->dev, "free per-controller IRQ\n"); |
1127 | free_irq(fdev->irq, fdev); | 1127 | free_irq(fdev->irq, fdev); |
1128 | return; | 1128 | return; |
1129 | } | 1129 | } |
1130 | 1130 | ||
1131 | for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { | 1131 | for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { |
1132 | chan = fdev->chan[i]; | 1132 | chan = fdev->chan[i]; |
1133 | if (chan && chan->irq != NO_IRQ) { | 1133 | if (chan && chan->irq != NO_IRQ) { |
1134 | dev_dbg(fdev->dev, "free channel %d IRQ\n", chan->id); | 1134 | dev_dbg(fdev->dev, "free channel %d IRQ\n", chan->id); |
1135 | free_irq(chan->irq, chan); | 1135 | free_irq(chan->irq, chan); |
1136 | } | 1136 | } |
1137 | } | 1137 | } |
1138 | } | 1138 | } |
1139 | 1139 | ||
1140 | static int fsldma_request_irqs(struct fsldma_device *fdev) | 1140 | static int fsldma_request_irqs(struct fsldma_device *fdev) |
1141 | { | 1141 | { |
1142 | struct fsldma_chan *chan; | 1142 | struct fsldma_chan *chan; |
1143 | int ret; | 1143 | int ret; |
1144 | int i; | 1144 | int i; |
1145 | 1145 | ||
1146 | /* if we have a per-controller IRQ, use that */ | 1146 | /* if we have a per-controller IRQ, use that */ |
1147 | if (fdev->irq != NO_IRQ) { | 1147 | if (fdev->irq != NO_IRQ) { |
1148 | dev_dbg(fdev->dev, "request per-controller IRQ\n"); | 1148 | dev_dbg(fdev->dev, "request per-controller IRQ\n"); |
1149 | ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED, | 1149 | ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED, |
1150 | "fsldma-controller", fdev); | 1150 | "fsldma-controller", fdev); |
1151 | return ret; | 1151 | return ret; |
1152 | } | 1152 | } |
1153 | 1153 | ||
1154 | /* no per-controller IRQ, use the per-channel IRQs */ | 1154 | /* no per-controller IRQ, use the per-channel IRQs */ |
1155 | for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { | 1155 | for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { |
1156 | chan = fdev->chan[i]; | 1156 | chan = fdev->chan[i]; |
1157 | if (!chan) | 1157 | if (!chan) |
1158 | continue; | 1158 | continue; |
1159 | 1159 | ||
1160 | if (chan->irq == NO_IRQ) { | 1160 | if (chan->irq == NO_IRQ) { |
1161 | dev_err(fdev->dev, "no interrupts property defined for " | 1161 | dev_err(fdev->dev, "no interrupts property defined for " |
1162 | "DMA channel %d. Please fix your " | 1162 | "DMA channel %d. Please fix your " |
1163 | "device tree\n", chan->id); | 1163 | "device tree\n", chan->id); |
1164 | ret = -ENODEV; | 1164 | ret = -ENODEV; |
1165 | goto out_unwind; | 1165 | goto out_unwind; |
1166 | } | 1166 | } |
1167 | 1167 | ||
1168 | dev_dbg(fdev->dev, "request channel %d IRQ\n", chan->id); | 1168 | dev_dbg(fdev->dev, "request channel %d IRQ\n", chan->id); |
1169 | ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED, | 1169 | ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED, |
1170 | "fsldma-chan", chan); | 1170 | "fsldma-chan", chan); |
1171 | if (ret) { | 1171 | if (ret) { |
1172 | dev_err(fdev->dev, "unable to request IRQ for DMA " | 1172 | dev_err(fdev->dev, "unable to request IRQ for DMA " |
1173 | "channel %d\n", chan->id); | 1173 | "channel %d\n", chan->id); |
1174 | goto out_unwind; | 1174 | goto out_unwind; |
1175 | } | 1175 | } |
1176 | } | 1176 | } |
1177 | 1177 | ||
1178 | return 0; | 1178 | return 0; |
1179 | 1179 | ||
1180 | out_unwind: | 1180 | out_unwind: |
1181 | for (/* none */; i >= 0; i--) { | 1181 | for (/* none */; i >= 0; i--) { |
1182 | chan = fdev->chan[i]; | 1182 | chan = fdev->chan[i]; |
1183 | if (!chan) | 1183 | if (!chan) |
1184 | continue; | 1184 | continue; |
1185 | 1185 | ||
1186 | if (chan->irq == NO_IRQ) | 1186 | if (chan->irq == NO_IRQ) |
1187 | continue; | 1187 | continue; |
1188 | 1188 | ||
1189 | free_irq(chan->irq, chan); | 1189 | free_irq(chan->irq, chan); |
1190 | } | 1190 | } |
1191 | 1191 | ||
1192 | return ret; | 1192 | return ret; |
1193 | } | 1193 | } |
1194 | 1194 | ||
1195 | /*----------------------------------------------------------------------------*/ | 1195 | /*----------------------------------------------------------------------------*/ |
1196 | /* OpenFirmware Subsystem */ | 1196 | /* OpenFirmware Subsystem */ |
1197 | /*----------------------------------------------------------------------------*/ | 1197 | /*----------------------------------------------------------------------------*/ |
1198 | 1198 | ||
1199 | static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev, | 1199 | static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev, |
1200 | struct device_node *node, u32 feature, const char *compatible) | 1200 | struct device_node *node, u32 feature, const char *compatible) |
1201 | { | 1201 | { |
1202 | struct fsldma_chan *chan; | 1202 | struct fsldma_chan *chan; |
1203 | struct resource res; | 1203 | struct resource res; |
1204 | int err; | 1204 | int err; |
1205 | 1205 | ||
1206 | /* alloc channel */ | 1206 | /* alloc channel */ |
1207 | chan = kzalloc(sizeof(*chan), GFP_KERNEL); | 1207 | chan = kzalloc(sizeof(*chan), GFP_KERNEL); |
1208 | if (!chan) { | 1208 | if (!chan) { |
1209 | dev_err(fdev->dev, "no free memory for DMA channels!\n"); | 1209 | dev_err(fdev->dev, "no free memory for DMA channels!\n"); |
1210 | err = -ENOMEM; | 1210 | err = -ENOMEM; |
1211 | goto out_return; | 1211 | goto out_return; |
1212 | } | 1212 | } |
1213 | 1213 | ||
1214 | /* ioremap registers for use */ | 1214 | /* ioremap registers for use */ |
1215 | chan->regs = of_iomap(node, 0); | 1215 | chan->regs = of_iomap(node, 0); |
1216 | if (!chan->regs) { | 1216 | if (!chan->regs) { |
1217 | dev_err(fdev->dev, "unable to ioremap registers\n"); | 1217 | dev_err(fdev->dev, "unable to ioremap registers\n"); |
1218 | err = -ENOMEM; | 1218 | err = -ENOMEM; |
1219 | goto out_free_chan; | 1219 | goto out_free_chan; |
1220 | } | 1220 | } |
1221 | 1221 | ||
1222 | err = of_address_to_resource(node, 0, &res); | 1222 | err = of_address_to_resource(node, 0, &res); |
1223 | if (err) { | 1223 | if (err) { |
1224 | dev_err(fdev->dev, "unable to find 'reg' property\n"); | 1224 | dev_err(fdev->dev, "unable to find 'reg' property\n"); |
1225 | goto out_iounmap_regs; | 1225 | goto out_iounmap_regs; |
1226 | } | 1226 | } |
1227 | 1227 | ||
1228 | chan->feature = feature; | 1228 | chan->feature = feature; |
1229 | if (!fdev->feature) | 1229 | if (!fdev->feature) |
1230 | fdev->feature = chan->feature; | 1230 | fdev->feature = chan->feature; |
1231 | 1231 | ||
1232 | /* | 1232 | /* |
1233 | * If the DMA device's feature is different than the feature | 1233 | * If the DMA device's feature is different than the feature |
1234 | * of its channels, report the bug | 1234 | * of its channels, report the bug |
1235 | */ | 1235 | */ |
1236 | WARN_ON(fdev->feature != chan->feature); | 1236 | WARN_ON(fdev->feature != chan->feature); |
1237 | 1237 | ||
1238 | chan->dev = fdev->dev; | 1238 | chan->dev = fdev->dev; |
1239 | chan->id = ((res.start - 0x100) & 0xfff) >> 7; | 1239 | chan->id = ((res.start - 0x100) & 0xfff) >> 7; |
1240 | if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) { | 1240 | if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) { |
1241 | dev_err(fdev->dev, "too many channels for device\n"); | 1241 | dev_err(fdev->dev, "too many channels for device\n"); |
1242 | err = -EINVAL; | 1242 | err = -EINVAL; |
1243 | goto out_iounmap_regs; | 1243 | goto out_iounmap_regs; |
1244 | } | 1244 | } |
1245 | 1245 | ||
1246 | fdev->chan[chan->id] = chan; | 1246 | fdev->chan[chan->id] = chan; |
1247 | tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan); | 1247 | tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan); |
1248 | 1248 | ||
1249 | /* Initialize the channel */ | 1249 | /* Initialize the channel */ |
1250 | dma_init(chan); | 1250 | dma_init(chan); |
1251 | 1251 | ||
1252 | /* Clear cdar registers */ | 1252 | /* Clear cdar registers */ |
1253 | set_cdar(chan, 0); | 1253 | set_cdar(chan, 0); |
1254 | 1254 | ||
1255 | switch (chan->feature & FSL_DMA_IP_MASK) { | 1255 | switch (chan->feature & FSL_DMA_IP_MASK) { |
1256 | case FSL_DMA_IP_85XX: | 1256 | case FSL_DMA_IP_85XX: |
1257 | chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; | 1257 | chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; |
1258 | case FSL_DMA_IP_83XX: | 1258 | case FSL_DMA_IP_83XX: |
1259 | chan->toggle_ext_start = fsl_chan_toggle_ext_start; | 1259 | chan->toggle_ext_start = fsl_chan_toggle_ext_start; |
1260 | chan->set_src_loop_size = fsl_chan_set_src_loop_size; | 1260 | chan->set_src_loop_size = fsl_chan_set_src_loop_size; |
1261 | chan->set_dst_loop_size = fsl_chan_set_dst_loop_size; | 1261 | chan->set_dst_loop_size = fsl_chan_set_dst_loop_size; |
1262 | chan->set_request_count = fsl_chan_set_request_count; | 1262 | chan->set_request_count = fsl_chan_set_request_count; |
1263 | } | 1263 | } |
1264 | 1264 | ||
1265 | spin_lock_init(&chan->desc_lock); | 1265 | spin_lock_init(&chan->desc_lock); |
1266 | INIT_LIST_HEAD(&chan->ld_pending); | 1266 | INIT_LIST_HEAD(&chan->ld_pending); |
1267 | INIT_LIST_HEAD(&chan->ld_running); | 1267 | INIT_LIST_HEAD(&chan->ld_running); |
1268 | 1268 | ||
1269 | chan->common.device = &fdev->common; | 1269 | chan->common.device = &fdev->common; |
1270 | 1270 | ||
1271 | /* find the IRQ line, if it exists in the device tree */ | 1271 | /* find the IRQ line, if it exists in the device tree */ |
1272 | chan->irq = irq_of_parse_and_map(node, 0); | 1272 | chan->irq = irq_of_parse_and_map(node, 0); |
1273 | 1273 | ||
1274 | /* Add the channel to DMA device channel list */ | 1274 | /* Add the channel to DMA device channel list */ |
1275 | list_add_tail(&chan->common.device_node, &fdev->common.channels); | 1275 | list_add_tail(&chan->common.device_node, &fdev->common.channels); |
1276 | fdev->common.chancnt++; | 1276 | fdev->common.chancnt++; |
1277 | 1277 | ||
1278 | dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible, | 1278 | dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible, |
1279 | chan->irq != NO_IRQ ? chan->irq : fdev->irq); | 1279 | chan->irq != NO_IRQ ? chan->irq : fdev->irq); |
1280 | 1280 | ||
1281 | return 0; | 1281 | return 0; |
1282 | 1282 | ||
1283 | out_iounmap_regs: | 1283 | out_iounmap_regs: |
1284 | iounmap(chan->regs); | 1284 | iounmap(chan->regs); |
1285 | out_free_chan: | 1285 | out_free_chan: |
1286 | kfree(chan); | 1286 | kfree(chan); |
1287 | out_return: | 1287 | out_return: |
1288 | return err; | 1288 | return err; |
1289 | } | 1289 | } |
1290 | 1290 | ||
1291 | static void fsl_dma_chan_remove(struct fsldma_chan *chan) | 1291 | static void fsl_dma_chan_remove(struct fsldma_chan *chan) |
1292 | { | 1292 | { |
1293 | irq_dispose_mapping(chan->irq); | 1293 | irq_dispose_mapping(chan->irq); |
1294 | list_del(&chan->common.device_node); | 1294 | list_del(&chan->common.device_node); |
1295 | iounmap(chan->regs); | 1295 | iounmap(chan->regs); |
1296 | kfree(chan); | 1296 | kfree(chan); |
1297 | } | 1297 | } |
1298 | 1298 | ||
1299 | static int __devinit fsldma_of_probe(struct of_device *op, | 1299 | static int __devinit fsldma_of_probe(struct of_device *op, |
1300 | const struct of_device_id *match) | 1300 | const struct of_device_id *match) |
1301 | { | 1301 | { |
1302 | struct fsldma_device *fdev; | 1302 | struct fsldma_device *fdev; |
1303 | struct device_node *child; | 1303 | struct device_node *child; |
1304 | int err; | 1304 | int err; |
1305 | 1305 | ||
1306 | fdev = kzalloc(sizeof(*fdev), GFP_KERNEL); | 1306 | fdev = kzalloc(sizeof(*fdev), GFP_KERNEL); |
1307 | if (!fdev) { | 1307 | if (!fdev) { |
1308 | dev_err(&op->dev, "No enough memory for 'priv'\n"); | 1308 | dev_err(&op->dev, "No enough memory for 'priv'\n"); |
1309 | err = -ENOMEM; | 1309 | err = -ENOMEM; |
1310 | goto out_return; | 1310 | goto out_return; |
1311 | } | 1311 | } |
1312 | 1312 | ||
1313 | fdev->dev = &op->dev; | 1313 | fdev->dev = &op->dev; |
1314 | INIT_LIST_HEAD(&fdev->common.channels); | 1314 | INIT_LIST_HEAD(&fdev->common.channels); |
1315 | 1315 | ||
1316 | /* ioremap the registers for use */ | 1316 | /* ioremap the registers for use */ |
1317 | fdev->regs = of_iomap(op->node, 0); | 1317 | fdev->regs = of_iomap(op->node, 0); |
1318 | if (!fdev->regs) { | 1318 | if (!fdev->regs) { |
1319 | dev_err(&op->dev, "unable to ioremap registers\n"); | 1319 | dev_err(&op->dev, "unable to ioremap registers\n"); |
1320 | err = -ENOMEM; | 1320 | err = -ENOMEM; |
1321 | goto out_free_fdev; | 1321 | goto out_free_fdev; |
1322 | } | 1322 | } |
1323 | 1323 | ||
1324 | /* map the channel IRQ if it exists, but don't hookup the handler yet */ | 1324 | /* map the channel IRQ if it exists, but don't hookup the handler yet */ |
1325 | fdev->irq = irq_of_parse_and_map(op->node, 0); | 1325 | fdev->irq = irq_of_parse_and_map(op->node, 0); |
1326 | 1326 | ||
1327 | dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); | 1327 | dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); |
1328 | dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); | 1328 | dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); |
1329 | dma_cap_set(DMA_SLAVE, fdev->common.cap_mask); | 1329 | dma_cap_set(DMA_SLAVE, fdev->common.cap_mask); |
1330 | fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; | 1330 | fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; |
1331 | fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; | 1331 | fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; |
1332 | fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt; | 1332 | fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt; |
1333 | fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; | 1333 | fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; |
1334 | fdev->common.device_tx_status = fsl_tx_status; | 1334 | fdev->common.device_tx_status = fsl_tx_status; |
1335 | fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; | 1335 | fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; |
1336 | fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg; | 1336 | fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg; |
1337 | fdev->common.device_control = fsl_dma_device_control; | 1337 | fdev->common.device_control = fsl_dma_device_control; |
1338 | fdev->common.dev = &op->dev; | 1338 | fdev->common.dev = &op->dev; |
1339 | 1339 | ||
1340 | dev_set_drvdata(&op->dev, fdev); | 1340 | dev_set_drvdata(&op->dev, fdev); |
1341 | 1341 | ||
1342 | /* | 1342 | /* |
1343 | * We cannot use of_platform_bus_probe() because there is no | 1343 | * We cannot use of_platform_bus_probe() because there is no |
1344 | * of_platform_bus_remove(). Instead, we manually instantiate every DMA | 1344 | * of_platform_bus_remove(). Instead, we manually instantiate every DMA |
1345 | * channel object. | 1345 | * channel object. |
1346 | */ | 1346 | */ |
1347 | for_each_child_of_node(op->node, child) { | 1347 | for_each_child_of_node(op->node, child) { |
1348 | if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) { | 1348 | if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) { |
1349 | fsl_dma_chan_probe(fdev, child, | 1349 | fsl_dma_chan_probe(fdev, child, |
1350 | FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN, | 1350 | FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN, |
1351 | "fsl,eloplus-dma-channel"); | 1351 | "fsl,eloplus-dma-channel"); |
1352 | } | 1352 | } |
1353 | 1353 | ||
1354 | if (of_device_is_compatible(child, "fsl,elo-dma-channel")) { | 1354 | if (of_device_is_compatible(child, "fsl,elo-dma-channel")) { |
1355 | fsl_dma_chan_probe(fdev, child, | 1355 | fsl_dma_chan_probe(fdev, child, |
1356 | FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN, | 1356 | FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN, |
1357 | "fsl,elo-dma-channel"); | 1357 | "fsl,elo-dma-channel"); |
1358 | } | 1358 | } |
1359 | } | 1359 | } |
1360 | 1360 | ||
1361 | /* | 1361 | /* |
1362 | * Hookup the IRQ handler(s) | 1362 | * Hookup the IRQ handler(s) |
1363 | * | 1363 | * |
1364 | * If we have a per-controller interrupt, we prefer that to the | 1364 | * If we have a per-controller interrupt, we prefer that to the |
1365 | * per-channel interrupts to reduce the number of shared interrupt | 1365 | * per-channel interrupts to reduce the number of shared interrupt |
1366 | * handlers on the same IRQ line | 1366 | * handlers on the same IRQ line |
1367 | */ | 1367 | */ |
1368 | err = fsldma_request_irqs(fdev); | 1368 | err = fsldma_request_irqs(fdev); |
1369 | if (err) { | 1369 | if (err) { |
1370 | dev_err(fdev->dev, "unable to request IRQs\n"); | 1370 | dev_err(fdev->dev, "unable to request IRQs\n"); |
1371 | goto out_free_fdev; | 1371 | goto out_free_fdev; |
1372 | } | 1372 | } |
1373 | 1373 | ||
1374 | dma_async_device_register(&fdev->common); | 1374 | dma_async_device_register(&fdev->common); |
1375 | return 0; | 1375 | return 0; |
1376 | 1376 | ||
1377 | out_free_fdev: | 1377 | out_free_fdev: |
1378 | irq_dispose_mapping(fdev->irq); | 1378 | irq_dispose_mapping(fdev->irq); |
1379 | kfree(fdev); | 1379 | kfree(fdev); |
1380 | out_return: | 1380 | out_return: |
1381 | return err; | 1381 | return err; |
1382 | } | 1382 | } |
1383 | 1383 | ||
1384 | static int fsldma_of_remove(struct of_device *op) | 1384 | static int fsldma_of_remove(struct of_device *op) |
1385 | { | 1385 | { |
1386 | struct fsldma_device *fdev; | 1386 | struct fsldma_device *fdev; |
1387 | unsigned int i; | 1387 | unsigned int i; |
1388 | 1388 | ||
1389 | fdev = dev_get_drvdata(&op->dev); | 1389 | fdev = dev_get_drvdata(&op->dev); |
1390 | dma_async_device_unregister(&fdev->common); | 1390 | dma_async_device_unregister(&fdev->common); |
1391 | 1391 | ||
1392 | fsldma_free_irqs(fdev); | 1392 | fsldma_free_irqs(fdev); |
1393 | 1393 | ||
1394 | for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { | 1394 | for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { |
1395 | if (fdev->chan[i]) | 1395 | if (fdev->chan[i]) |
1396 | fsl_dma_chan_remove(fdev->chan[i]); | 1396 | fsl_dma_chan_remove(fdev->chan[i]); |
1397 | } | 1397 | } |
1398 | 1398 | ||
1399 | iounmap(fdev->regs); | 1399 | iounmap(fdev->regs); |
1400 | dev_set_drvdata(&op->dev, NULL); | 1400 | dev_set_drvdata(&op->dev, NULL); |
1401 | kfree(fdev); | 1401 | kfree(fdev); |
1402 | 1402 | ||
1403 | return 0; | 1403 | return 0; |
1404 | } | 1404 | } |
1405 | 1405 | ||
1406 | static const struct of_device_id fsldma_of_ids[] = { | 1406 | static const struct of_device_id fsldma_of_ids[] = { |
1407 | { .compatible = "fsl,eloplus-dma", }, | 1407 | { .compatible = "fsl,eloplus-dma", }, |
1408 | { .compatible = "fsl,elo-dma", }, | 1408 | { .compatible = "fsl,elo-dma", }, |
1409 | {} | 1409 | {} |
1410 | }; | 1410 | }; |
1411 | 1411 | ||
1412 | static struct of_platform_driver fsldma_of_driver = { | 1412 | static struct of_platform_driver fsldma_of_driver = { |
1413 | .name = "fsl-elo-dma", | 1413 | .name = "fsl-elo-dma", |
1414 | .match_table = fsldma_of_ids, | 1414 | .match_table = fsldma_of_ids, |
1415 | .probe = fsldma_of_probe, | 1415 | .probe = fsldma_of_probe, |
1416 | .remove = fsldma_of_remove, | 1416 | .remove = fsldma_of_remove, |
1417 | }; | 1417 | }; |
1418 | 1418 | ||
1419 | /*----------------------------------------------------------------------------*/ | 1419 | /*----------------------------------------------------------------------------*/ |
1420 | /* Module Init / Exit */ | 1420 | /* Module Init / Exit */ |
1421 | /*----------------------------------------------------------------------------*/ | 1421 | /*----------------------------------------------------------------------------*/ |
1422 | 1422 | ||
1423 | static __init int fsldma_init(void) | 1423 | static __init int fsldma_init(void) |
1424 | { | 1424 | { |
1425 | int ret; | 1425 | int ret; |
1426 | 1426 | ||
1427 | pr_info("Freescale Elo / Elo Plus DMA driver\n"); | 1427 | pr_info("Freescale Elo / Elo Plus DMA driver\n"); |
1428 | 1428 | ||
1429 | ret = of_register_platform_driver(&fsldma_of_driver); | 1429 | ret = of_register_platform_driver(&fsldma_of_driver); |
1430 | if (ret) | 1430 | if (ret) |
1431 | pr_err("fsldma: failed to register platform driver\n"); | 1431 | pr_err("fsldma: failed to register platform driver\n"); |
1432 | 1432 | ||
1433 | return ret; | 1433 | return ret; |
1434 | } | 1434 | } |
1435 | 1435 | ||
1436 | static void __exit fsldma_exit(void) | 1436 | static void __exit fsldma_exit(void) |
1437 | { | 1437 | { |
1438 | of_unregister_platform_driver(&fsldma_of_driver); | 1438 | of_unregister_platform_driver(&fsldma_of_driver); |
1439 | } | 1439 | } |
1440 | 1440 | ||
1441 | subsys_initcall(fsldma_init); | 1441 | subsys_initcall(fsldma_init); |
1442 | module_exit(fsldma_exit); | 1442 | module_exit(fsldma_exit); |
1443 | 1443 | ||
1444 | MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver"); | 1444 | MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver"); |
1445 | MODULE_LICENSE("GPL"); | 1445 | MODULE_LICENSE("GPL"); |
1446 | 1446 |
drivers/dma/ipu/ipu_idmac.c
1 | /* | 1 | /* |
2 | * Copyright (C) 2008 | 2 | * Copyright (C) 2008 |
3 | * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de> | 3 | * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de> |
4 | * | 4 | * |
5 | * Copyright (C) 2005-2007 Freescale Semiconductor, Inc. All Rights Reserved. | 5 | * Copyright (C) 2005-2007 Freescale Semiconductor, Inc. All Rights Reserved. |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/platform_device.h> | 13 | #include <linux/platform_device.h> |
14 | #include <linux/err.h> | 14 | #include <linux/err.h> |
15 | #include <linux/spinlock.h> | 15 | #include <linux/spinlock.h> |
16 | #include <linux/delay.h> | 16 | #include <linux/delay.h> |
17 | #include <linux/list.h> | 17 | #include <linux/list.h> |
18 | #include <linux/clk.h> | 18 | #include <linux/clk.h> |
19 | #include <linux/vmalloc.h> | 19 | #include <linux/vmalloc.h> |
20 | #include <linux/string.h> | 20 | #include <linux/string.h> |
21 | #include <linux/interrupt.h> | 21 | #include <linux/interrupt.h> |
22 | #include <linux/io.h> | 22 | #include <linux/io.h> |
23 | 23 | ||
24 | #include <mach/ipu.h> | 24 | #include <mach/ipu.h> |
25 | 25 | ||
26 | #include "ipu_intern.h" | 26 | #include "ipu_intern.h" |
27 | 27 | ||
28 | #define FS_VF_IN_VALID 0x00000002 | 28 | #define FS_VF_IN_VALID 0x00000002 |
29 | #define FS_ENC_IN_VALID 0x00000001 | 29 | #define FS_ENC_IN_VALID 0x00000001 |
30 | 30 | ||
31 | static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan, | 31 | static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan, |
32 | bool wait_for_stop); | 32 | bool wait_for_stop); |
33 | 33 | ||
34 | /* | 34 | /* |
35 | * There can be only one, we could allocate it dynamically, but then we'd have | 35 | * There can be only one, we could allocate it dynamically, but then we'd have |
36 | * to add an extra parameter to some functions, and use something as ugly as | 36 | * to add an extra parameter to some functions, and use something as ugly as |
37 | * struct ipu *ipu = to_ipu(to_idmac(ichan->dma_chan.device)); | 37 | * struct ipu *ipu = to_ipu(to_idmac(ichan->dma_chan.device)); |
38 | * in the ISR | 38 | * in the ISR |
39 | */ | 39 | */ |
40 | static struct ipu ipu_data; | 40 | static struct ipu ipu_data; |
41 | 41 | ||
42 | #define to_ipu(id) container_of(id, struct ipu, idmac) | 42 | #define to_ipu(id) container_of(id, struct ipu, idmac) |
43 | 43 | ||
44 | static u32 __idmac_read_icreg(struct ipu *ipu, unsigned long reg) | 44 | static u32 __idmac_read_icreg(struct ipu *ipu, unsigned long reg) |
45 | { | 45 | { |
46 | return __raw_readl(ipu->reg_ic + reg); | 46 | return __raw_readl(ipu->reg_ic + reg); |
47 | } | 47 | } |
48 | 48 | ||
49 | #define idmac_read_icreg(ipu, reg) __idmac_read_icreg(ipu, reg - IC_CONF) | 49 | #define idmac_read_icreg(ipu, reg) __idmac_read_icreg(ipu, reg - IC_CONF) |
50 | 50 | ||
51 | static void __idmac_write_icreg(struct ipu *ipu, u32 value, unsigned long reg) | 51 | static void __idmac_write_icreg(struct ipu *ipu, u32 value, unsigned long reg) |
52 | { | 52 | { |
53 | __raw_writel(value, ipu->reg_ic + reg); | 53 | __raw_writel(value, ipu->reg_ic + reg); |
54 | } | 54 | } |
55 | 55 | ||
56 | #define idmac_write_icreg(ipu, v, reg) __idmac_write_icreg(ipu, v, reg - IC_CONF) | 56 | #define idmac_write_icreg(ipu, v, reg) __idmac_write_icreg(ipu, v, reg - IC_CONF) |
57 | 57 | ||
58 | static u32 idmac_read_ipureg(struct ipu *ipu, unsigned long reg) | 58 | static u32 idmac_read_ipureg(struct ipu *ipu, unsigned long reg) |
59 | { | 59 | { |
60 | return __raw_readl(ipu->reg_ipu + reg); | 60 | return __raw_readl(ipu->reg_ipu + reg); |
61 | } | 61 | } |
62 | 62 | ||
63 | static void idmac_write_ipureg(struct ipu *ipu, u32 value, unsigned long reg) | 63 | static void idmac_write_ipureg(struct ipu *ipu, u32 value, unsigned long reg) |
64 | { | 64 | { |
65 | __raw_writel(value, ipu->reg_ipu + reg); | 65 | __raw_writel(value, ipu->reg_ipu + reg); |
66 | } | 66 | } |
67 | 67 | ||
68 | /***************************************************************************** | 68 | /***************************************************************************** |
69 | * IPU / IC common functions | 69 | * IPU / IC common functions |
70 | */ | 70 | */ |
71 | static void dump_idmac_reg(struct ipu *ipu) | 71 | static void dump_idmac_reg(struct ipu *ipu) |
72 | { | 72 | { |
73 | dev_dbg(ipu->dev, "IDMAC_CONF 0x%x, IC_CONF 0x%x, IDMAC_CHA_EN 0x%x, " | 73 | dev_dbg(ipu->dev, "IDMAC_CONF 0x%x, IC_CONF 0x%x, IDMAC_CHA_EN 0x%x, " |
74 | "IDMAC_CHA_PRI 0x%x, IDMAC_CHA_BUSY 0x%x\n", | 74 | "IDMAC_CHA_PRI 0x%x, IDMAC_CHA_BUSY 0x%x\n", |
75 | idmac_read_icreg(ipu, IDMAC_CONF), | 75 | idmac_read_icreg(ipu, IDMAC_CONF), |
76 | idmac_read_icreg(ipu, IC_CONF), | 76 | idmac_read_icreg(ipu, IC_CONF), |
77 | idmac_read_icreg(ipu, IDMAC_CHA_EN), | 77 | idmac_read_icreg(ipu, IDMAC_CHA_EN), |
78 | idmac_read_icreg(ipu, IDMAC_CHA_PRI), | 78 | idmac_read_icreg(ipu, IDMAC_CHA_PRI), |
79 | idmac_read_icreg(ipu, IDMAC_CHA_BUSY)); | 79 | idmac_read_icreg(ipu, IDMAC_CHA_BUSY)); |
80 | dev_dbg(ipu->dev, "BUF0_RDY 0x%x, BUF1_RDY 0x%x, CUR_BUF 0x%x, " | 80 | dev_dbg(ipu->dev, "BUF0_RDY 0x%x, BUF1_RDY 0x%x, CUR_BUF 0x%x, " |
81 | "DB_MODE 0x%x, TASKS_STAT 0x%x\n", | 81 | "DB_MODE 0x%x, TASKS_STAT 0x%x\n", |
82 | idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY), | 82 | idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY), |
83 | idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY), | 83 | idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY), |
84 | idmac_read_ipureg(ipu, IPU_CHA_CUR_BUF), | 84 | idmac_read_ipureg(ipu, IPU_CHA_CUR_BUF), |
85 | idmac_read_ipureg(ipu, IPU_CHA_DB_MODE_SEL), | 85 | idmac_read_ipureg(ipu, IPU_CHA_DB_MODE_SEL), |
86 | idmac_read_ipureg(ipu, IPU_TASKS_STAT)); | 86 | idmac_read_ipureg(ipu, IPU_TASKS_STAT)); |
87 | } | 87 | } |
88 | 88 | ||
89 | static uint32_t bytes_per_pixel(enum pixel_fmt fmt) | 89 | static uint32_t bytes_per_pixel(enum pixel_fmt fmt) |
90 | { | 90 | { |
91 | switch (fmt) { | 91 | switch (fmt) { |
92 | case IPU_PIX_FMT_GENERIC: /* generic data */ | 92 | case IPU_PIX_FMT_GENERIC: /* generic data */ |
93 | case IPU_PIX_FMT_RGB332: | 93 | case IPU_PIX_FMT_RGB332: |
94 | case IPU_PIX_FMT_YUV420P: | 94 | case IPU_PIX_FMT_YUV420P: |
95 | case IPU_PIX_FMT_YUV422P: | 95 | case IPU_PIX_FMT_YUV422P: |
96 | default: | 96 | default: |
97 | return 1; | 97 | return 1; |
98 | case IPU_PIX_FMT_RGB565: | 98 | case IPU_PIX_FMT_RGB565: |
99 | case IPU_PIX_FMT_YUYV: | 99 | case IPU_PIX_FMT_YUYV: |
100 | case IPU_PIX_FMT_UYVY: | 100 | case IPU_PIX_FMT_UYVY: |
101 | return 2; | 101 | return 2; |
102 | case IPU_PIX_FMT_BGR24: | 102 | case IPU_PIX_FMT_BGR24: |
103 | case IPU_PIX_FMT_RGB24: | 103 | case IPU_PIX_FMT_RGB24: |
104 | return 3; | 104 | return 3; |
105 | case IPU_PIX_FMT_GENERIC_32: /* generic data */ | 105 | case IPU_PIX_FMT_GENERIC_32: /* generic data */ |
106 | case IPU_PIX_FMT_BGR32: | 106 | case IPU_PIX_FMT_BGR32: |
107 | case IPU_PIX_FMT_RGB32: | 107 | case IPU_PIX_FMT_RGB32: |
108 | case IPU_PIX_FMT_ABGR32: | 108 | case IPU_PIX_FMT_ABGR32: |
109 | return 4; | 109 | return 4; |
110 | } | 110 | } |
111 | } | 111 | } |
112 | 112 | ||
113 | /* Enable direct write to memory by the Camera Sensor Interface */ | 113 | /* Enable direct write to memory by the Camera Sensor Interface */ |
114 | static void ipu_ic_enable_task(struct ipu *ipu, enum ipu_channel channel) | 114 | static void ipu_ic_enable_task(struct ipu *ipu, enum ipu_channel channel) |
115 | { | 115 | { |
116 | uint32_t ic_conf, mask; | 116 | uint32_t ic_conf, mask; |
117 | 117 | ||
118 | switch (channel) { | 118 | switch (channel) { |
119 | case IDMAC_IC_0: | 119 | case IDMAC_IC_0: |
120 | mask = IC_CONF_PRPENC_EN; | 120 | mask = IC_CONF_PRPENC_EN; |
121 | break; | 121 | break; |
122 | case IDMAC_IC_7: | 122 | case IDMAC_IC_7: |
123 | mask = IC_CONF_RWS_EN | IC_CONF_PRPENC_EN; | 123 | mask = IC_CONF_RWS_EN | IC_CONF_PRPENC_EN; |
124 | break; | 124 | break; |
125 | default: | 125 | default: |
126 | return; | 126 | return; |
127 | } | 127 | } |
128 | ic_conf = idmac_read_icreg(ipu, IC_CONF) | mask; | 128 | ic_conf = idmac_read_icreg(ipu, IC_CONF) | mask; |
129 | idmac_write_icreg(ipu, ic_conf, IC_CONF); | 129 | idmac_write_icreg(ipu, ic_conf, IC_CONF); |
130 | } | 130 | } |
131 | 131 | ||
132 | /* Called under spin_lock_irqsave(&ipu_data.lock) */ | 132 | /* Called under spin_lock_irqsave(&ipu_data.lock) */ |
133 | static void ipu_ic_disable_task(struct ipu *ipu, enum ipu_channel channel) | 133 | static void ipu_ic_disable_task(struct ipu *ipu, enum ipu_channel channel) |
134 | { | 134 | { |
135 | uint32_t ic_conf, mask; | 135 | uint32_t ic_conf, mask; |
136 | 136 | ||
137 | switch (channel) { | 137 | switch (channel) { |
138 | case IDMAC_IC_0: | 138 | case IDMAC_IC_0: |
139 | mask = IC_CONF_PRPENC_EN; | 139 | mask = IC_CONF_PRPENC_EN; |
140 | break; | 140 | break; |
141 | case IDMAC_IC_7: | 141 | case IDMAC_IC_7: |
142 | mask = IC_CONF_RWS_EN | IC_CONF_PRPENC_EN; | 142 | mask = IC_CONF_RWS_EN | IC_CONF_PRPENC_EN; |
143 | break; | 143 | break; |
144 | default: | 144 | default: |
145 | return; | 145 | return; |
146 | } | 146 | } |
147 | ic_conf = idmac_read_icreg(ipu, IC_CONF) & ~mask; | 147 | ic_conf = idmac_read_icreg(ipu, IC_CONF) & ~mask; |
148 | idmac_write_icreg(ipu, ic_conf, IC_CONF); | 148 | idmac_write_icreg(ipu, ic_conf, IC_CONF); |
149 | } | 149 | } |
150 | 150 | ||
151 | static uint32_t ipu_channel_status(struct ipu *ipu, enum ipu_channel channel) | 151 | static uint32_t ipu_channel_status(struct ipu *ipu, enum ipu_channel channel) |
152 | { | 152 | { |
153 | uint32_t stat = TASK_STAT_IDLE; | 153 | uint32_t stat = TASK_STAT_IDLE; |
154 | uint32_t task_stat_reg = idmac_read_ipureg(ipu, IPU_TASKS_STAT); | 154 | uint32_t task_stat_reg = idmac_read_ipureg(ipu, IPU_TASKS_STAT); |
155 | 155 | ||
156 | switch (channel) { | 156 | switch (channel) { |
157 | case IDMAC_IC_7: | 157 | case IDMAC_IC_7: |
158 | stat = (task_stat_reg & TSTAT_CSI2MEM_MASK) >> | 158 | stat = (task_stat_reg & TSTAT_CSI2MEM_MASK) >> |
159 | TSTAT_CSI2MEM_OFFSET; | 159 | TSTAT_CSI2MEM_OFFSET; |
160 | break; | 160 | break; |
161 | case IDMAC_IC_0: | 161 | case IDMAC_IC_0: |
162 | case IDMAC_SDC_0: | 162 | case IDMAC_SDC_0: |
163 | case IDMAC_SDC_1: | 163 | case IDMAC_SDC_1: |
164 | default: | 164 | default: |
165 | break; | 165 | break; |
166 | } | 166 | } |
167 | return stat; | 167 | return stat; |
168 | } | 168 | } |
169 | 169 | ||
170 | struct chan_param_mem_planar { | 170 | struct chan_param_mem_planar { |
171 | /* Word 0 */ | 171 | /* Word 0 */ |
172 | u32 xv:10; | 172 | u32 xv:10; |
173 | u32 yv:10; | 173 | u32 yv:10; |
174 | u32 xb:12; | 174 | u32 xb:12; |
175 | 175 | ||
176 | u32 yb:12; | 176 | u32 yb:12; |
177 | u32 res1:2; | 177 | u32 res1:2; |
178 | u32 nsb:1; | 178 | u32 nsb:1; |
179 | u32 lnpb:6; | 179 | u32 lnpb:6; |
180 | u32 ubo_l:11; | 180 | u32 ubo_l:11; |
181 | 181 | ||
182 | u32 ubo_h:15; | 182 | u32 ubo_h:15; |
183 | u32 vbo_l:17; | 183 | u32 vbo_l:17; |
184 | 184 | ||
185 | u32 vbo_h:9; | 185 | u32 vbo_h:9; |
186 | u32 res2:3; | 186 | u32 res2:3; |
187 | u32 fw:12; | 187 | u32 fw:12; |
188 | u32 fh_l:8; | 188 | u32 fh_l:8; |
189 | 189 | ||
190 | u32 fh_h:4; | 190 | u32 fh_h:4; |
191 | u32 res3:28; | 191 | u32 res3:28; |
192 | 192 | ||
193 | /* Word 1 */ | 193 | /* Word 1 */ |
194 | u32 eba0; | 194 | u32 eba0; |
195 | 195 | ||
196 | u32 eba1; | 196 | u32 eba1; |
197 | 197 | ||
198 | u32 bpp:3; | 198 | u32 bpp:3; |
199 | u32 sl:14; | 199 | u32 sl:14; |
200 | u32 pfs:3; | 200 | u32 pfs:3; |
201 | u32 bam:3; | 201 | u32 bam:3; |
202 | u32 res4:2; | 202 | u32 res4:2; |
203 | u32 npb:6; | 203 | u32 npb:6; |
204 | u32 res5:1; | 204 | u32 res5:1; |
205 | 205 | ||
206 | u32 sat:2; | 206 | u32 sat:2; |
207 | u32 res6:30; | 207 | u32 res6:30; |
208 | } __attribute__ ((packed)); | 208 | } __attribute__ ((packed)); |
209 | 209 | ||
210 | struct chan_param_mem_interleaved { | 210 | struct chan_param_mem_interleaved { |
211 | /* Word 0 */ | 211 | /* Word 0 */ |
212 | u32 xv:10; | 212 | u32 xv:10; |
213 | u32 yv:10; | 213 | u32 yv:10; |
214 | u32 xb:12; | 214 | u32 xb:12; |
215 | 215 | ||
216 | u32 yb:12; | 216 | u32 yb:12; |
217 | u32 sce:1; | 217 | u32 sce:1; |
218 | u32 res1:1; | 218 | u32 res1:1; |
219 | u32 nsb:1; | 219 | u32 nsb:1; |
220 | u32 lnpb:6; | 220 | u32 lnpb:6; |
221 | u32 sx:10; | 221 | u32 sx:10; |
222 | u32 sy_l:1; | 222 | u32 sy_l:1; |
223 | 223 | ||
224 | u32 sy_h:9; | 224 | u32 sy_h:9; |
225 | u32 ns:10; | 225 | u32 ns:10; |
226 | u32 sm:10; | 226 | u32 sm:10; |
227 | u32 sdx_l:3; | 227 | u32 sdx_l:3; |
228 | 228 | ||
229 | u32 sdx_h:2; | 229 | u32 sdx_h:2; |
230 | u32 sdy:5; | 230 | u32 sdy:5; |
231 | u32 sdrx:1; | 231 | u32 sdrx:1; |
232 | u32 sdry:1; | 232 | u32 sdry:1; |
233 | u32 sdr1:1; | 233 | u32 sdr1:1; |
234 | u32 res2:2; | 234 | u32 res2:2; |
235 | u32 fw:12; | 235 | u32 fw:12; |
236 | u32 fh_l:8; | 236 | u32 fh_l:8; |
237 | 237 | ||
238 | u32 fh_h:4; | 238 | u32 fh_h:4; |
239 | u32 res3:28; | 239 | u32 res3:28; |
240 | 240 | ||
241 | /* Word 1 */ | 241 | /* Word 1 */ |
242 | u32 eba0; | 242 | u32 eba0; |
243 | 243 | ||
244 | u32 eba1; | 244 | u32 eba1; |
245 | 245 | ||
246 | u32 bpp:3; | 246 | u32 bpp:3; |
247 | u32 sl:14; | 247 | u32 sl:14; |
248 | u32 pfs:3; | 248 | u32 pfs:3; |
249 | u32 bam:3; | 249 | u32 bam:3; |
250 | u32 res4:2; | 250 | u32 res4:2; |
251 | u32 npb:6; | 251 | u32 npb:6; |
252 | u32 res5:1; | 252 | u32 res5:1; |
253 | 253 | ||
254 | u32 sat:2; | 254 | u32 sat:2; |
255 | u32 scc:1; | 255 | u32 scc:1; |
256 | u32 ofs0:5; | 256 | u32 ofs0:5; |
257 | u32 ofs1:5; | 257 | u32 ofs1:5; |
258 | u32 ofs2:5; | 258 | u32 ofs2:5; |
259 | u32 ofs3:5; | 259 | u32 ofs3:5; |
260 | u32 wid0:3; | 260 | u32 wid0:3; |
261 | u32 wid1:3; | 261 | u32 wid1:3; |
262 | u32 wid2:3; | 262 | u32 wid2:3; |
263 | 263 | ||
264 | u32 wid3:3; | 264 | u32 wid3:3; |
265 | u32 dec_sel:1; | 265 | u32 dec_sel:1; |
266 | u32 res6:28; | 266 | u32 res6:28; |
267 | } __attribute__ ((packed)); | 267 | } __attribute__ ((packed)); |
268 | 268 | ||
269 | union chan_param_mem { | 269 | union chan_param_mem { |
270 | struct chan_param_mem_planar pp; | 270 | struct chan_param_mem_planar pp; |
271 | struct chan_param_mem_interleaved ip; | 271 | struct chan_param_mem_interleaved ip; |
272 | }; | 272 | }; |
273 | 273 | ||
274 | static void ipu_ch_param_set_plane_offset(union chan_param_mem *params, | 274 | static void ipu_ch_param_set_plane_offset(union chan_param_mem *params, |
275 | u32 u_offset, u32 v_offset) | 275 | u32 u_offset, u32 v_offset) |
276 | { | 276 | { |
277 | params->pp.ubo_l = u_offset & 0x7ff; | 277 | params->pp.ubo_l = u_offset & 0x7ff; |
278 | params->pp.ubo_h = u_offset >> 11; | 278 | params->pp.ubo_h = u_offset >> 11; |
279 | params->pp.vbo_l = v_offset & 0x1ffff; | 279 | params->pp.vbo_l = v_offset & 0x1ffff; |
280 | params->pp.vbo_h = v_offset >> 17; | 280 | params->pp.vbo_h = v_offset >> 17; |
281 | } | 281 | } |
282 | 282 | ||
283 | static void ipu_ch_param_set_size(union chan_param_mem *params, | 283 | static void ipu_ch_param_set_size(union chan_param_mem *params, |
284 | uint32_t pixel_fmt, uint16_t width, | 284 | uint32_t pixel_fmt, uint16_t width, |
285 | uint16_t height, uint16_t stride) | 285 | uint16_t height, uint16_t stride) |
286 | { | 286 | { |
287 | u32 u_offset; | 287 | u32 u_offset; |
288 | u32 v_offset; | 288 | u32 v_offset; |
289 | 289 | ||
290 | params->pp.fw = width - 1; | 290 | params->pp.fw = width - 1; |
291 | params->pp.fh_l = height - 1; | 291 | params->pp.fh_l = height - 1; |
292 | params->pp.fh_h = (height - 1) >> 8; | 292 | params->pp.fh_h = (height - 1) >> 8; |
293 | params->pp.sl = stride - 1; | 293 | params->pp.sl = stride - 1; |
294 | 294 | ||
295 | switch (pixel_fmt) { | 295 | switch (pixel_fmt) { |
296 | case IPU_PIX_FMT_GENERIC: | 296 | case IPU_PIX_FMT_GENERIC: |
297 | /*Represents 8-bit Generic data */ | 297 | /*Represents 8-bit Generic data */ |
298 | params->pp.bpp = 3; | 298 | params->pp.bpp = 3; |
299 | params->pp.pfs = 7; | 299 | params->pp.pfs = 7; |
300 | params->pp.npb = 31; | 300 | params->pp.npb = 31; |
301 | params->pp.sat = 2; /* SAT = use 32-bit access */ | 301 | params->pp.sat = 2; /* SAT = use 32-bit access */ |
302 | break; | 302 | break; |
303 | case IPU_PIX_FMT_GENERIC_32: | 303 | case IPU_PIX_FMT_GENERIC_32: |
304 | /*Represents 32-bit Generic data */ | 304 | /*Represents 32-bit Generic data */ |
305 | params->pp.bpp = 0; | 305 | params->pp.bpp = 0; |
306 | params->pp.pfs = 7; | 306 | params->pp.pfs = 7; |
307 | params->pp.npb = 7; | 307 | params->pp.npb = 7; |
308 | params->pp.sat = 2; /* SAT = use 32-bit access */ | 308 | params->pp.sat = 2; /* SAT = use 32-bit access */ |
309 | break; | 309 | break; |
310 | case IPU_PIX_FMT_RGB565: | 310 | case IPU_PIX_FMT_RGB565: |
311 | params->ip.bpp = 2; | 311 | params->ip.bpp = 2; |
312 | params->ip.pfs = 4; | 312 | params->ip.pfs = 4; |
313 | params->ip.npb = 7; | 313 | params->ip.npb = 7; |
314 | params->ip.sat = 2; /* SAT = 32-bit access */ | 314 | params->ip.sat = 2; /* SAT = 32-bit access */ |
315 | params->ip.ofs0 = 0; /* Red bit offset */ | 315 | params->ip.ofs0 = 0; /* Red bit offset */ |
316 | params->ip.ofs1 = 5; /* Green bit offset */ | 316 | params->ip.ofs1 = 5; /* Green bit offset */ |
317 | params->ip.ofs2 = 11; /* Blue bit offset */ | 317 | params->ip.ofs2 = 11; /* Blue bit offset */ |
318 | params->ip.ofs3 = 16; /* Alpha bit offset */ | 318 | params->ip.ofs3 = 16; /* Alpha bit offset */ |
319 | params->ip.wid0 = 4; /* Red bit width - 1 */ | 319 | params->ip.wid0 = 4; /* Red bit width - 1 */ |
320 | params->ip.wid1 = 5; /* Green bit width - 1 */ | 320 | params->ip.wid1 = 5; /* Green bit width - 1 */ |
321 | params->ip.wid2 = 4; /* Blue bit width - 1 */ | 321 | params->ip.wid2 = 4; /* Blue bit width - 1 */ |
322 | break; | 322 | break; |
323 | case IPU_PIX_FMT_BGR24: | 323 | case IPU_PIX_FMT_BGR24: |
324 | params->ip.bpp = 1; /* 24 BPP & RGB PFS */ | 324 | params->ip.bpp = 1; /* 24 BPP & RGB PFS */ |
325 | params->ip.pfs = 4; | 325 | params->ip.pfs = 4; |
326 | params->ip.npb = 7; | 326 | params->ip.npb = 7; |
327 | params->ip.sat = 2; /* SAT = 32-bit access */ | 327 | params->ip.sat = 2; /* SAT = 32-bit access */ |
328 | params->ip.ofs0 = 0; /* Red bit offset */ | 328 | params->ip.ofs0 = 0; /* Red bit offset */ |
329 | params->ip.ofs1 = 8; /* Green bit offset */ | 329 | params->ip.ofs1 = 8; /* Green bit offset */ |
330 | params->ip.ofs2 = 16; /* Blue bit offset */ | 330 | params->ip.ofs2 = 16; /* Blue bit offset */ |
331 | params->ip.ofs3 = 24; /* Alpha bit offset */ | 331 | params->ip.ofs3 = 24; /* Alpha bit offset */ |
332 | params->ip.wid0 = 7; /* Red bit width - 1 */ | 332 | params->ip.wid0 = 7; /* Red bit width - 1 */ |
333 | params->ip.wid1 = 7; /* Green bit width - 1 */ | 333 | params->ip.wid1 = 7; /* Green bit width - 1 */ |
334 | params->ip.wid2 = 7; /* Blue bit width - 1 */ | 334 | params->ip.wid2 = 7; /* Blue bit width - 1 */ |
335 | break; | 335 | break; |
336 | case IPU_PIX_FMT_RGB24: | 336 | case IPU_PIX_FMT_RGB24: |
337 | params->ip.bpp = 1; /* 24 BPP & RGB PFS */ | 337 | params->ip.bpp = 1; /* 24 BPP & RGB PFS */ |
338 | params->ip.pfs = 4; | 338 | params->ip.pfs = 4; |
339 | params->ip.npb = 7; | 339 | params->ip.npb = 7; |
340 | params->ip.sat = 2; /* SAT = 32-bit access */ | 340 | params->ip.sat = 2; /* SAT = 32-bit access */ |
341 | params->ip.ofs0 = 16; /* Red bit offset */ | 341 | params->ip.ofs0 = 16; /* Red bit offset */ |
342 | params->ip.ofs1 = 8; /* Green bit offset */ | 342 | params->ip.ofs1 = 8; /* Green bit offset */ |
343 | params->ip.ofs2 = 0; /* Blue bit offset */ | 343 | params->ip.ofs2 = 0; /* Blue bit offset */ |
344 | params->ip.ofs3 = 24; /* Alpha bit offset */ | 344 | params->ip.ofs3 = 24; /* Alpha bit offset */ |
345 | params->ip.wid0 = 7; /* Red bit width - 1 */ | 345 | params->ip.wid0 = 7; /* Red bit width - 1 */ |
346 | params->ip.wid1 = 7; /* Green bit width - 1 */ | 346 | params->ip.wid1 = 7; /* Green bit width - 1 */ |
347 | params->ip.wid2 = 7; /* Blue bit width - 1 */ | 347 | params->ip.wid2 = 7; /* Blue bit width - 1 */ |
348 | break; | 348 | break; |
349 | case IPU_PIX_FMT_BGRA32: | 349 | case IPU_PIX_FMT_BGRA32: |
350 | case IPU_PIX_FMT_BGR32: | 350 | case IPU_PIX_FMT_BGR32: |
351 | case IPU_PIX_FMT_ABGR32: | 351 | case IPU_PIX_FMT_ABGR32: |
352 | params->ip.bpp = 0; | 352 | params->ip.bpp = 0; |
353 | params->ip.pfs = 4; | 353 | params->ip.pfs = 4; |
354 | params->ip.npb = 7; | 354 | params->ip.npb = 7; |
355 | params->ip.sat = 2; /* SAT = 32-bit access */ | 355 | params->ip.sat = 2; /* SAT = 32-bit access */ |
356 | params->ip.ofs0 = 8; /* Red bit offset */ | 356 | params->ip.ofs0 = 8; /* Red bit offset */ |
357 | params->ip.ofs1 = 16; /* Green bit offset */ | 357 | params->ip.ofs1 = 16; /* Green bit offset */ |
358 | params->ip.ofs2 = 24; /* Blue bit offset */ | 358 | params->ip.ofs2 = 24; /* Blue bit offset */ |
359 | params->ip.ofs3 = 0; /* Alpha bit offset */ | 359 | params->ip.ofs3 = 0; /* Alpha bit offset */ |
360 | params->ip.wid0 = 7; /* Red bit width - 1 */ | 360 | params->ip.wid0 = 7; /* Red bit width - 1 */ |
361 | params->ip.wid1 = 7; /* Green bit width - 1 */ | 361 | params->ip.wid1 = 7; /* Green bit width - 1 */ |
362 | params->ip.wid2 = 7; /* Blue bit width - 1 */ | 362 | params->ip.wid2 = 7; /* Blue bit width - 1 */ |
363 | params->ip.wid3 = 7; /* Alpha bit width - 1 */ | 363 | params->ip.wid3 = 7; /* Alpha bit width - 1 */ |
364 | break; | 364 | break; |
365 | case IPU_PIX_FMT_RGBA32: | 365 | case IPU_PIX_FMT_RGBA32: |
366 | case IPU_PIX_FMT_RGB32: | 366 | case IPU_PIX_FMT_RGB32: |
367 | params->ip.bpp = 0; | 367 | params->ip.bpp = 0; |
368 | params->ip.pfs = 4; | 368 | params->ip.pfs = 4; |
369 | params->ip.npb = 7; | 369 | params->ip.npb = 7; |
370 | params->ip.sat = 2; /* SAT = 32-bit access */ | 370 | params->ip.sat = 2; /* SAT = 32-bit access */ |
371 | params->ip.ofs0 = 24; /* Red bit offset */ | 371 | params->ip.ofs0 = 24; /* Red bit offset */ |
372 | params->ip.ofs1 = 16; /* Green bit offset */ | 372 | params->ip.ofs1 = 16; /* Green bit offset */ |
373 | params->ip.ofs2 = 8; /* Blue bit offset */ | 373 | params->ip.ofs2 = 8; /* Blue bit offset */ |
374 | params->ip.ofs3 = 0; /* Alpha bit offset */ | 374 | params->ip.ofs3 = 0; /* Alpha bit offset */ |
375 | params->ip.wid0 = 7; /* Red bit width - 1 */ | 375 | params->ip.wid0 = 7; /* Red bit width - 1 */ |
376 | params->ip.wid1 = 7; /* Green bit width - 1 */ | 376 | params->ip.wid1 = 7; /* Green bit width - 1 */ |
377 | params->ip.wid2 = 7; /* Blue bit width - 1 */ | 377 | params->ip.wid2 = 7; /* Blue bit width - 1 */ |
378 | params->ip.wid3 = 7; /* Alpha bit width - 1 */ | 378 | params->ip.wid3 = 7; /* Alpha bit width - 1 */ |
379 | break; | 379 | break; |
380 | case IPU_PIX_FMT_UYVY: | 380 | case IPU_PIX_FMT_UYVY: |
381 | params->ip.bpp = 2; | 381 | params->ip.bpp = 2; |
382 | params->ip.pfs = 6; | 382 | params->ip.pfs = 6; |
383 | params->ip.npb = 7; | 383 | params->ip.npb = 7; |
384 | params->ip.sat = 2; /* SAT = 32-bit access */ | 384 | params->ip.sat = 2; /* SAT = 32-bit access */ |
385 | break; | 385 | break; |
386 | case IPU_PIX_FMT_YUV420P2: | 386 | case IPU_PIX_FMT_YUV420P2: |
387 | case IPU_PIX_FMT_YUV420P: | 387 | case IPU_PIX_FMT_YUV420P: |
388 | params->ip.bpp = 3; | 388 | params->ip.bpp = 3; |
389 | params->ip.pfs = 3; | 389 | params->ip.pfs = 3; |
390 | params->ip.npb = 7; | 390 | params->ip.npb = 7; |
391 | params->ip.sat = 2; /* SAT = 32-bit access */ | 391 | params->ip.sat = 2; /* SAT = 32-bit access */ |
392 | u_offset = stride * height; | 392 | u_offset = stride * height; |
393 | v_offset = u_offset + u_offset / 4; | 393 | v_offset = u_offset + u_offset / 4; |
394 | ipu_ch_param_set_plane_offset(params, u_offset, v_offset); | 394 | ipu_ch_param_set_plane_offset(params, u_offset, v_offset); |
395 | break; | 395 | break; |
396 | case IPU_PIX_FMT_YVU422P: | 396 | case IPU_PIX_FMT_YVU422P: |
397 | params->ip.bpp = 3; | 397 | params->ip.bpp = 3; |
398 | params->ip.pfs = 2; | 398 | params->ip.pfs = 2; |
399 | params->ip.npb = 7; | 399 | params->ip.npb = 7; |
400 | params->ip.sat = 2; /* SAT = 32-bit access */ | 400 | params->ip.sat = 2; /* SAT = 32-bit access */ |
401 | v_offset = stride * height; | 401 | v_offset = stride * height; |
402 | u_offset = v_offset + v_offset / 2; | 402 | u_offset = v_offset + v_offset / 2; |
403 | ipu_ch_param_set_plane_offset(params, u_offset, v_offset); | 403 | ipu_ch_param_set_plane_offset(params, u_offset, v_offset); |
404 | break; | 404 | break; |
405 | case IPU_PIX_FMT_YUV422P: | 405 | case IPU_PIX_FMT_YUV422P: |
406 | params->ip.bpp = 3; | 406 | params->ip.bpp = 3; |
407 | params->ip.pfs = 2; | 407 | params->ip.pfs = 2; |
408 | params->ip.npb = 7; | 408 | params->ip.npb = 7; |
409 | params->ip.sat = 2; /* SAT = 32-bit access */ | 409 | params->ip.sat = 2; /* SAT = 32-bit access */ |
410 | u_offset = stride * height; | 410 | u_offset = stride * height; |
411 | v_offset = u_offset + u_offset / 2; | 411 | v_offset = u_offset + u_offset / 2; |
412 | ipu_ch_param_set_plane_offset(params, u_offset, v_offset); | 412 | ipu_ch_param_set_plane_offset(params, u_offset, v_offset); |
413 | break; | 413 | break; |
414 | default: | 414 | default: |
415 | dev_err(ipu_data.dev, | 415 | dev_err(ipu_data.dev, |
416 | "mx3 ipu: unimplemented pixel format %d\n", pixel_fmt); | 416 | "mx3 ipu: unimplemented pixel format %d\n", pixel_fmt); |
417 | break; | 417 | break; |
418 | } | 418 | } |
419 | 419 | ||
420 | params->pp.nsb = 1; | 420 | params->pp.nsb = 1; |
421 | } | 421 | } |
422 | 422 | ||
423 | static void ipu_ch_param_set_burst_size(union chan_param_mem *params, | 423 | static void ipu_ch_param_set_burst_size(union chan_param_mem *params, |
424 | uint16_t burst_pixels) | 424 | uint16_t burst_pixels) |
425 | { | 425 | { |
426 | params->pp.npb = burst_pixels - 1; | 426 | params->pp.npb = burst_pixels - 1; |
427 | } | 427 | } |
428 | 428 | ||
429 | static void ipu_ch_param_set_buffer(union chan_param_mem *params, | 429 | static void ipu_ch_param_set_buffer(union chan_param_mem *params, |
430 | dma_addr_t buf0, dma_addr_t buf1) | 430 | dma_addr_t buf0, dma_addr_t buf1) |
431 | { | 431 | { |
432 | params->pp.eba0 = buf0; | 432 | params->pp.eba0 = buf0; |
433 | params->pp.eba1 = buf1; | 433 | params->pp.eba1 = buf1; |
434 | } | 434 | } |
435 | 435 | ||
436 | static void ipu_ch_param_set_rotation(union chan_param_mem *params, | 436 | static void ipu_ch_param_set_rotation(union chan_param_mem *params, |
437 | enum ipu_rotate_mode rotate) | 437 | enum ipu_rotate_mode rotate) |
438 | { | 438 | { |
439 | params->pp.bam = rotate; | 439 | params->pp.bam = rotate; |
440 | } | 440 | } |
441 | 441 | ||
442 | static void ipu_write_param_mem(uint32_t addr, uint32_t *data, | 442 | static void ipu_write_param_mem(uint32_t addr, uint32_t *data, |
443 | uint32_t num_words) | 443 | uint32_t num_words) |
444 | { | 444 | { |
445 | for (; num_words > 0; num_words--) { | 445 | for (; num_words > 0; num_words--) { |
446 | dev_dbg(ipu_data.dev, | 446 | dev_dbg(ipu_data.dev, |
447 | "write param mem - addr = 0x%08X, data = 0x%08X\n", | 447 | "write param mem - addr = 0x%08X, data = 0x%08X\n", |
448 | addr, *data); | 448 | addr, *data); |
449 | idmac_write_ipureg(&ipu_data, addr, IPU_IMA_ADDR); | 449 | idmac_write_ipureg(&ipu_data, addr, IPU_IMA_ADDR); |
450 | idmac_write_ipureg(&ipu_data, *data++, IPU_IMA_DATA); | 450 | idmac_write_ipureg(&ipu_data, *data++, IPU_IMA_DATA); |
451 | addr++; | 451 | addr++; |
452 | if ((addr & 0x7) == 5) { | 452 | if ((addr & 0x7) == 5) { |
453 | addr &= ~0x7; /* set to word 0 */ | 453 | addr &= ~0x7; /* set to word 0 */ |
454 | addr += 8; /* increment to next row */ | 454 | addr += 8; /* increment to next row */ |
455 | } | 455 | } |
456 | } | 456 | } |
457 | } | 457 | } |
458 | 458 | ||
459 | static int calc_resize_coeffs(uint32_t in_size, uint32_t out_size, | 459 | static int calc_resize_coeffs(uint32_t in_size, uint32_t out_size, |
460 | uint32_t *resize_coeff, | 460 | uint32_t *resize_coeff, |
461 | uint32_t *downsize_coeff) | 461 | uint32_t *downsize_coeff) |
462 | { | 462 | { |
463 | uint32_t temp_size; | 463 | uint32_t temp_size; |
464 | uint32_t temp_downsize; | 464 | uint32_t temp_downsize; |
465 | 465 | ||
466 | *resize_coeff = 1 << 13; | 466 | *resize_coeff = 1 << 13; |
467 | *downsize_coeff = 1 << 13; | 467 | *downsize_coeff = 1 << 13; |
468 | 468 | ||
469 | /* Cannot downsize more than 8:1 */ | 469 | /* Cannot downsize more than 8:1 */ |
470 | if (out_size << 3 < in_size) | 470 | if (out_size << 3 < in_size) |
471 | return -EINVAL; | 471 | return -EINVAL; |
472 | 472 | ||
473 | /* compute downsizing coefficient */ | 473 | /* compute downsizing coefficient */ |
474 | temp_downsize = 0; | 474 | temp_downsize = 0; |
475 | temp_size = in_size; | 475 | temp_size = in_size; |
476 | while (temp_size >= out_size * 2 && temp_downsize < 2) { | 476 | while (temp_size >= out_size * 2 && temp_downsize < 2) { |
477 | temp_size >>= 1; | 477 | temp_size >>= 1; |
478 | temp_downsize++; | 478 | temp_downsize++; |
479 | } | 479 | } |
480 | *downsize_coeff = temp_downsize; | 480 | *downsize_coeff = temp_downsize; |
481 | 481 | ||
482 | /* | 482 | /* |
483 | * compute resizing coefficient using the following formula: | 483 | * compute resizing coefficient using the following formula: |
484 | * resize_coeff = M*(SI -1)/(SO - 1) | 484 | * resize_coeff = M*(SI -1)/(SO - 1) |
485 | * where M = 2^13, SI - input size, SO - output size | 485 | * where M = 2^13, SI - input size, SO - output size |
486 | */ | 486 | */ |
487 | *resize_coeff = (8192L * (temp_size - 1)) / (out_size - 1); | 487 | *resize_coeff = (8192L * (temp_size - 1)) / (out_size - 1); |
488 | if (*resize_coeff >= 16384L) { | 488 | if (*resize_coeff >= 16384L) { |
489 | dev_err(ipu_data.dev, "Warning! Overflow on resize coeff.\n"); | 489 | dev_err(ipu_data.dev, "Warning! Overflow on resize coeff.\n"); |
490 | *resize_coeff = 0x3FFF; | 490 | *resize_coeff = 0x3FFF; |
491 | } | 491 | } |
492 | 492 | ||
493 | dev_dbg(ipu_data.dev, "resizing from %u -> %u pixels, " | 493 | dev_dbg(ipu_data.dev, "resizing from %u -> %u pixels, " |
494 | "downsize=%u, resize=%u.%lu (reg=%u)\n", in_size, out_size, | 494 | "downsize=%u, resize=%u.%lu (reg=%u)\n", in_size, out_size, |
495 | *downsize_coeff, *resize_coeff >= 8192L ? 1 : 0, | 495 | *downsize_coeff, *resize_coeff >= 8192L ? 1 : 0, |
496 | ((*resize_coeff & 0x1FFF) * 10000L) / 8192L, *resize_coeff); | 496 | ((*resize_coeff & 0x1FFF) * 10000L) / 8192L, *resize_coeff); |
497 | 497 | ||
498 | return 0; | 498 | return 0; |
499 | } | 499 | } |
500 | 500 | ||
501 | static enum ipu_color_space format_to_colorspace(enum pixel_fmt fmt) | 501 | static enum ipu_color_space format_to_colorspace(enum pixel_fmt fmt) |
502 | { | 502 | { |
503 | switch (fmt) { | 503 | switch (fmt) { |
504 | case IPU_PIX_FMT_RGB565: | 504 | case IPU_PIX_FMT_RGB565: |
505 | case IPU_PIX_FMT_BGR24: | 505 | case IPU_PIX_FMT_BGR24: |
506 | case IPU_PIX_FMT_RGB24: | 506 | case IPU_PIX_FMT_RGB24: |
507 | case IPU_PIX_FMT_BGR32: | 507 | case IPU_PIX_FMT_BGR32: |
508 | case IPU_PIX_FMT_RGB32: | 508 | case IPU_PIX_FMT_RGB32: |
509 | return IPU_COLORSPACE_RGB; | 509 | return IPU_COLORSPACE_RGB; |
510 | default: | 510 | default: |
511 | return IPU_COLORSPACE_YCBCR; | 511 | return IPU_COLORSPACE_YCBCR; |
512 | } | 512 | } |
513 | } | 513 | } |
514 | 514 | ||
515 | static int ipu_ic_init_prpenc(struct ipu *ipu, | 515 | static int ipu_ic_init_prpenc(struct ipu *ipu, |
516 | union ipu_channel_param *params, bool src_is_csi) | 516 | union ipu_channel_param *params, bool src_is_csi) |
517 | { | 517 | { |
518 | uint32_t reg, ic_conf; | 518 | uint32_t reg, ic_conf; |
519 | uint32_t downsize_coeff, resize_coeff; | 519 | uint32_t downsize_coeff, resize_coeff; |
520 | enum ipu_color_space in_fmt, out_fmt; | 520 | enum ipu_color_space in_fmt, out_fmt; |
521 | 521 | ||
522 | /* Setup vertical resizing */ | 522 | /* Setup vertical resizing */ |
523 | calc_resize_coeffs(params->video.in_height, | 523 | calc_resize_coeffs(params->video.in_height, |
524 | params->video.out_height, | 524 | params->video.out_height, |
525 | &resize_coeff, &downsize_coeff); | 525 | &resize_coeff, &downsize_coeff); |
526 | reg = (downsize_coeff << 30) | (resize_coeff << 16); | 526 | reg = (downsize_coeff << 30) | (resize_coeff << 16); |
527 | 527 | ||
528 | /* Setup horizontal resizing */ | 528 | /* Setup horizontal resizing */ |
529 | calc_resize_coeffs(params->video.in_width, | 529 | calc_resize_coeffs(params->video.in_width, |
530 | params->video.out_width, | 530 | params->video.out_width, |
531 | &resize_coeff, &downsize_coeff); | 531 | &resize_coeff, &downsize_coeff); |
532 | reg |= (downsize_coeff << 14) | resize_coeff; | 532 | reg |= (downsize_coeff << 14) | resize_coeff; |
533 | 533 | ||
534 | /* Setup color space conversion */ | 534 | /* Setup color space conversion */ |
535 | in_fmt = format_to_colorspace(params->video.in_pixel_fmt); | 535 | in_fmt = format_to_colorspace(params->video.in_pixel_fmt); |
536 | out_fmt = format_to_colorspace(params->video.out_pixel_fmt); | 536 | out_fmt = format_to_colorspace(params->video.out_pixel_fmt); |
537 | 537 | ||
538 | /* | 538 | /* |
539 | * Colourspace conversion unsupported yet - see _init_csc() in | 539 | * Colourspace conversion unsupported yet - see _init_csc() in |
540 | * Freescale sources | 540 | * Freescale sources |
541 | */ | 541 | */ |
542 | if (in_fmt != out_fmt) { | 542 | if (in_fmt != out_fmt) { |
543 | dev_err(ipu->dev, "Colourspace conversion unsupported!\n"); | 543 | dev_err(ipu->dev, "Colourspace conversion unsupported!\n"); |
544 | return -EOPNOTSUPP; | 544 | return -EOPNOTSUPP; |
545 | } | 545 | } |
546 | 546 | ||
547 | idmac_write_icreg(ipu, reg, IC_PRP_ENC_RSC); | 547 | idmac_write_icreg(ipu, reg, IC_PRP_ENC_RSC); |
548 | 548 | ||
549 | ic_conf = idmac_read_icreg(ipu, IC_CONF); | 549 | ic_conf = idmac_read_icreg(ipu, IC_CONF); |
550 | 550 | ||
551 | if (src_is_csi) | 551 | if (src_is_csi) |
552 | ic_conf &= ~IC_CONF_RWS_EN; | 552 | ic_conf &= ~IC_CONF_RWS_EN; |
553 | else | 553 | else |
554 | ic_conf |= IC_CONF_RWS_EN; | 554 | ic_conf |= IC_CONF_RWS_EN; |
555 | 555 | ||
556 | idmac_write_icreg(ipu, ic_conf, IC_CONF); | 556 | idmac_write_icreg(ipu, ic_conf, IC_CONF); |
557 | 557 | ||
558 | return 0; | 558 | return 0; |
559 | } | 559 | } |
560 | 560 | ||
561 | static uint32_t dma_param_addr(uint32_t dma_ch) | 561 | static uint32_t dma_param_addr(uint32_t dma_ch) |
562 | { | 562 | { |
563 | /* Channel Parameter Memory */ | 563 | /* Channel Parameter Memory */ |
564 | return 0x10000 | (dma_ch << 4); | 564 | return 0x10000 | (dma_ch << 4); |
565 | } | 565 | } |
566 | 566 | ||
567 | static void ipu_channel_set_priority(struct ipu *ipu, enum ipu_channel channel, | 567 | static void ipu_channel_set_priority(struct ipu *ipu, enum ipu_channel channel, |
568 | bool prio) | 568 | bool prio) |
569 | { | 569 | { |
570 | u32 reg = idmac_read_icreg(ipu, IDMAC_CHA_PRI); | 570 | u32 reg = idmac_read_icreg(ipu, IDMAC_CHA_PRI); |
571 | 571 | ||
572 | if (prio) | 572 | if (prio) |
573 | reg |= 1UL << channel; | 573 | reg |= 1UL << channel; |
574 | else | 574 | else |
575 | reg &= ~(1UL << channel); | 575 | reg &= ~(1UL << channel); |
576 | 576 | ||
577 | idmac_write_icreg(ipu, reg, IDMAC_CHA_PRI); | 577 | idmac_write_icreg(ipu, reg, IDMAC_CHA_PRI); |
578 | 578 | ||
579 | dump_idmac_reg(ipu); | 579 | dump_idmac_reg(ipu); |
580 | } | 580 | } |
581 | 581 | ||
582 | static uint32_t ipu_channel_conf_mask(enum ipu_channel channel) | 582 | static uint32_t ipu_channel_conf_mask(enum ipu_channel channel) |
583 | { | 583 | { |
584 | uint32_t mask; | 584 | uint32_t mask; |
585 | 585 | ||
586 | switch (channel) { | 586 | switch (channel) { |
587 | case IDMAC_IC_0: | 587 | case IDMAC_IC_0: |
588 | case IDMAC_IC_7: | 588 | case IDMAC_IC_7: |
589 | mask = IPU_CONF_CSI_EN | IPU_CONF_IC_EN; | 589 | mask = IPU_CONF_CSI_EN | IPU_CONF_IC_EN; |
590 | break; | 590 | break; |
591 | case IDMAC_SDC_0: | 591 | case IDMAC_SDC_0: |
592 | case IDMAC_SDC_1: | 592 | case IDMAC_SDC_1: |
593 | mask = IPU_CONF_SDC_EN | IPU_CONF_DI_EN; | 593 | mask = IPU_CONF_SDC_EN | IPU_CONF_DI_EN; |
594 | break; | 594 | break; |
595 | default: | 595 | default: |
596 | mask = 0; | 596 | mask = 0; |
597 | break; | 597 | break; |
598 | } | 598 | } |
599 | 599 | ||
600 | return mask; | 600 | return mask; |
601 | } | 601 | } |
602 | 602 | ||
603 | /** | 603 | /** |
604 | * ipu_enable_channel() - enable an IPU channel. | 604 | * ipu_enable_channel() - enable an IPU channel. |
605 | * @idmac: IPU DMAC context. | 605 | * @idmac: IPU DMAC context. |
606 | * @ichan: IDMAC channel. | 606 | * @ichan: IDMAC channel. |
607 | * @return: 0 on success or negative error code on failure. | 607 | * @return: 0 on success or negative error code on failure. |
608 | */ | 608 | */ |
609 | static int ipu_enable_channel(struct idmac *idmac, struct idmac_channel *ichan) | 609 | static int ipu_enable_channel(struct idmac *idmac, struct idmac_channel *ichan) |
610 | { | 610 | { |
611 | struct ipu *ipu = to_ipu(idmac); | 611 | struct ipu *ipu = to_ipu(idmac); |
612 | enum ipu_channel channel = ichan->dma_chan.chan_id; | 612 | enum ipu_channel channel = ichan->dma_chan.chan_id; |
613 | uint32_t reg; | 613 | uint32_t reg; |
614 | unsigned long flags; | 614 | unsigned long flags; |
615 | 615 | ||
616 | spin_lock_irqsave(&ipu->lock, flags); | 616 | spin_lock_irqsave(&ipu->lock, flags); |
617 | 617 | ||
618 | /* Reset to buffer 0 */ | 618 | /* Reset to buffer 0 */ |
619 | idmac_write_ipureg(ipu, 1UL << channel, IPU_CHA_CUR_BUF); | 619 | idmac_write_ipureg(ipu, 1UL << channel, IPU_CHA_CUR_BUF); |
620 | ichan->active_buffer = 0; | 620 | ichan->active_buffer = 0; |
621 | ichan->status = IPU_CHANNEL_ENABLED; | 621 | ichan->status = IPU_CHANNEL_ENABLED; |
622 | 622 | ||
623 | switch (channel) { | 623 | switch (channel) { |
624 | case IDMAC_SDC_0: | 624 | case IDMAC_SDC_0: |
625 | case IDMAC_SDC_1: | 625 | case IDMAC_SDC_1: |
626 | case IDMAC_IC_7: | 626 | case IDMAC_IC_7: |
627 | ipu_channel_set_priority(ipu, channel, true); | 627 | ipu_channel_set_priority(ipu, channel, true); |
628 | default: | 628 | default: |
629 | break; | 629 | break; |
630 | } | 630 | } |
631 | 631 | ||
632 | reg = idmac_read_icreg(ipu, IDMAC_CHA_EN); | 632 | reg = idmac_read_icreg(ipu, IDMAC_CHA_EN); |
633 | 633 | ||
634 | idmac_write_icreg(ipu, reg | (1UL << channel), IDMAC_CHA_EN); | 634 | idmac_write_icreg(ipu, reg | (1UL << channel), IDMAC_CHA_EN); |
635 | 635 | ||
636 | ipu_ic_enable_task(ipu, channel); | 636 | ipu_ic_enable_task(ipu, channel); |
637 | 637 | ||
638 | spin_unlock_irqrestore(&ipu->lock, flags); | 638 | spin_unlock_irqrestore(&ipu->lock, flags); |
639 | return 0; | 639 | return 0; |
640 | } | 640 | } |
641 | 641 | ||
642 | /** | 642 | /** |
643 | * ipu_init_channel_buffer() - initialize a buffer for logical IPU channel. | 643 | * ipu_init_channel_buffer() - initialize a buffer for logical IPU channel. |
644 | * @ichan: IDMAC channel. | 644 | * @ichan: IDMAC channel. |
645 | * @pixel_fmt: pixel format of buffer. Pixel format is a FOURCC ASCII code. | 645 | * @pixel_fmt: pixel format of buffer. Pixel format is a FOURCC ASCII code. |
646 | * @width: width of buffer in pixels. | 646 | * @width: width of buffer in pixels. |
647 | * @height: height of buffer in pixels. | 647 | * @height: height of buffer in pixels. |
648 | * @stride: stride length of buffer in pixels. | 648 | * @stride: stride length of buffer in pixels. |
649 | * @rot_mode: rotation mode of buffer. A rotation setting other than | 649 | * @rot_mode: rotation mode of buffer. A rotation setting other than |
650 | * IPU_ROTATE_VERT_FLIP should only be used for input buffers of | 650 | * IPU_ROTATE_VERT_FLIP should only be used for input buffers of |
651 | * rotation channels. | 651 | * rotation channels. |
652 | * @phyaddr_0: buffer 0 physical address. | 652 | * @phyaddr_0: buffer 0 physical address. |
653 | * @phyaddr_1: buffer 1 physical address. Setting this to a value other than | 653 | * @phyaddr_1: buffer 1 physical address. Setting this to a value other than |
654 | * NULL enables double buffering mode. | 654 | * NULL enables double buffering mode. |
655 | * @return: 0 on success or negative error code on failure. | 655 | * @return: 0 on success or negative error code on failure. |
656 | */ | 656 | */ |
657 | static int ipu_init_channel_buffer(struct idmac_channel *ichan, | 657 | static int ipu_init_channel_buffer(struct idmac_channel *ichan, |
658 | enum pixel_fmt pixel_fmt, | 658 | enum pixel_fmt pixel_fmt, |
659 | uint16_t width, uint16_t height, | 659 | uint16_t width, uint16_t height, |
660 | uint32_t stride, | 660 | uint32_t stride, |
661 | enum ipu_rotate_mode rot_mode, | 661 | enum ipu_rotate_mode rot_mode, |
662 | dma_addr_t phyaddr_0, dma_addr_t phyaddr_1) | 662 | dma_addr_t phyaddr_0, dma_addr_t phyaddr_1) |
663 | { | 663 | { |
664 | enum ipu_channel channel = ichan->dma_chan.chan_id; | 664 | enum ipu_channel channel = ichan->dma_chan.chan_id; |
665 | struct idmac *idmac = to_idmac(ichan->dma_chan.device); | 665 | struct idmac *idmac = to_idmac(ichan->dma_chan.device); |
666 | struct ipu *ipu = to_ipu(idmac); | 666 | struct ipu *ipu = to_ipu(idmac); |
667 | union chan_param_mem params = {}; | 667 | union chan_param_mem params = {}; |
668 | unsigned long flags; | 668 | unsigned long flags; |
669 | uint32_t reg; | 669 | uint32_t reg; |
670 | uint32_t stride_bytes; | 670 | uint32_t stride_bytes; |
671 | 671 | ||
672 | stride_bytes = stride * bytes_per_pixel(pixel_fmt); | 672 | stride_bytes = stride * bytes_per_pixel(pixel_fmt); |
673 | 673 | ||
674 | if (stride_bytes % 4) { | 674 | if (stride_bytes % 4) { |
675 | dev_err(ipu->dev, | 675 | dev_err(ipu->dev, |
676 | "Stride length must be 32-bit aligned, stride = %d, bytes = %d\n", | 676 | "Stride length must be 32-bit aligned, stride = %d, bytes = %d\n", |
677 | stride, stride_bytes); | 677 | stride, stride_bytes); |
678 | return -EINVAL; | 678 | return -EINVAL; |
679 | } | 679 | } |
680 | 680 | ||
681 | /* IC channel's stride must be a multiple of 8 pixels */ | 681 | /* IC channel's stride must be a multiple of 8 pixels */ |
682 | if ((channel <= IDMAC_IC_13) && (stride % 8)) { | 682 | if ((channel <= IDMAC_IC_13) && (stride % 8)) { |
683 | dev_err(ipu->dev, "Stride must be 8 pixel multiple\n"); | 683 | dev_err(ipu->dev, "Stride must be 8 pixel multiple\n"); |
684 | return -EINVAL; | 684 | return -EINVAL; |
685 | } | 685 | } |
686 | 686 | ||
687 | /* Build parameter memory data for DMA channel */ | 687 | /* Build parameter memory data for DMA channel */ |
688 | ipu_ch_param_set_size(¶ms, pixel_fmt, width, height, stride_bytes); | 688 | ipu_ch_param_set_size(¶ms, pixel_fmt, width, height, stride_bytes); |
689 | ipu_ch_param_set_buffer(¶ms, phyaddr_0, phyaddr_1); | 689 | ipu_ch_param_set_buffer(¶ms, phyaddr_0, phyaddr_1); |
690 | ipu_ch_param_set_rotation(¶ms, rot_mode); | 690 | ipu_ch_param_set_rotation(¶ms, rot_mode); |
691 | /* Some channels (rotation) have restriction on burst length */ | 691 | /* Some channels (rotation) have restriction on burst length */ |
692 | switch (channel) { | 692 | switch (channel) { |
693 | case IDMAC_IC_7: /* Hangs with burst 8, 16, other values | 693 | case IDMAC_IC_7: /* Hangs with burst 8, 16, other values |
694 | invalid - Table 44-30 */ | 694 | invalid - Table 44-30 */ |
695 | /* | 695 | /* |
696 | ipu_ch_param_set_burst_size(¶ms, 8); | 696 | ipu_ch_param_set_burst_size(¶ms, 8); |
697 | */ | 697 | */ |
698 | break; | 698 | break; |
699 | case IDMAC_SDC_0: | 699 | case IDMAC_SDC_0: |
700 | case IDMAC_SDC_1: | 700 | case IDMAC_SDC_1: |
701 | /* In original code only IPU_PIX_FMT_RGB565 was setting burst */ | 701 | /* In original code only IPU_PIX_FMT_RGB565 was setting burst */ |
702 | ipu_ch_param_set_burst_size(¶ms, 16); | 702 | ipu_ch_param_set_burst_size(¶ms, 16); |
703 | break; | 703 | break; |
704 | case IDMAC_IC_0: | 704 | case IDMAC_IC_0: |
705 | default: | 705 | default: |
706 | break; | 706 | break; |
707 | } | 707 | } |
708 | 708 | ||
709 | spin_lock_irqsave(&ipu->lock, flags); | 709 | spin_lock_irqsave(&ipu->lock, flags); |
710 | 710 | ||
711 | ipu_write_param_mem(dma_param_addr(channel), (uint32_t *)¶ms, 10); | 711 | ipu_write_param_mem(dma_param_addr(channel), (uint32_t *)¶ms, 10); |
712 | 712 | ||
713 | reg = idmac_read_ipureg(ipu, IPU_CHA_DB_MODE_SEL); | 713 | reg = idmac_read_ipureg(ipu, IPU_CHA_DB_MODE_SEL); |
714 | 714 | ||
715 | if (phyaddr_1) | 715 | if (phyaddr_1) |
716 | reg |= 1UL << channel; | 716 | reg |= 1UL << channel; |
717 | else | 717 | else |
718 | reg &= ~(1UL << channel); | 718 | reg &= ~(1UL << channel); |
719 | 719 | ||
720 | idmac_write_ipureg(ipu, reg, IPU_CHA_DB_MODE_SEL); | 720 | idmac_write_ipureg(ipu, reg, IPU_CHA_DB_MODE_SEL); |
721 | 721 | ||
722 | ichan->status = IPU_CHANNEL_READY; | 722 | ichan->status = IPU_CHANNEL_READY; |
723 | 723 | ||
724 | spin_unlock_irqrestore(&ipu->lock, flags); | 724 | spin_unlock_irqrestore(&ipu->lock, flags); |
725 | 725 | ||
726 | return 0; | 726 | return 0; |
727 | } | 727 | } |
728 | 728 | ||
729 | /** | 729 | /** |
730 | * ipu_select_buffer() - mark a channel's buffer as ready. | 730 | * ipu_select_buffer() - mark a channel's buffer as ready. |
731 | * @channel: channel ID. | 731 | * @channel: channel ID. |
732 | * @buffer_n: buffer number to mark ready. | 732 | * @buffer_n: buffer number to mark ready. |
733 | */ | 733 | */ |
734 | static void ipu_select_buffer(enum ipu_channel channel, int buffer_n) | 734 | static void ipu_select_buffer(enum ipu_channel channel, int buffer_n) |
735 | { | 735 | { |
736 | /* No locking - this is a write-one-to-set register, cleared by IPU */ | 736 | /* No locking - this is a write-one-to-set register, cleared by IPU */ |
737 | if (buffer_n == 0) | 737 | if (buffer_n == 0) |
738 | /* Mark buffer 0 as ready. */ | 738 | /* Mark buffer 0 as ready. */ |
739 | idmac_write_ipureg(&ipu_data, 1UL << channel, IPU_CHA_BUF0_RDY); | 739 | idmac_write_ipureg(&ipu_data, 1UL << channel, IPU_CHA_BUF0_RDY); |
740 | else | 740 | else |
741 | /* Mark buffer 1 as ready. */ | 741 | /* Mark buffer 1 as ready. */ |
742 | idmac_write_ipureg(&ipu_data, 1UL << channel, IPU_CHA_BUF1_RDY); | 742 | idmac_write_ipureg(&ipu_data, 1UL << channel, IPU_CHA_BUF1_RDY); |
743 | } | 743 | } |
744 | 744 | ||
745 | /** | 745 | /** |
746 | * ipu_update_channel_buffer() - update physical address of a channel buffer. | 746 | * ipu_update_channel_buffer() - update physical address of a channel buffer. |
747 | * @ichan: IDMAC channel. | 747 | * @ichan: IDMAC channel. |
748 | * @buffer_n: buffer number to update. | 748 | * @buffer_n: buffer number to update. |
749 | * 0 or 1 are the only valid values. | 749 | * 0 or 1 are the only valid values. |
750 | * @phyaddr: buffer physical address. | 750 | * @phyaddr: buffer physical address. |
751 | */ | 751 | */ |
752 | /* Called under spin_lock(_irqsave)(&ichan->lock) */ | 752 | /* Called under spin_lock(_irqsave)(&ichan->lock) */ |
753 | static void ipu_update_channel_buffer(struct idmac_channel *ichan, | 753 | static void ipu_update_channel_buffer(struct idmac_channel *ichan, |
754 | int buffer_n, dma_addr_t phyaddr) | 754 | int buffer_n, dma_addr_t phyaddr) |
755 | { | 755 | { |
756 | enum ipu_channel channel = ichan->dma_chan.chan_id; | 756 | enum ipu_channel channel = ichan->dma_chan.chan_id; |
757 | uint32_t reg; | 757 | uint32_t reg; |
758 | unsigned long flags; | 758 | unsigned long flags; |
759 | 759 | ||
760 | spin_lock_irqsave(&ipu_data.lock, flags); | 760 | spin_lock_irqsave(&ipu_data.lock, flags); |
761 | 761 | ||
762 | if (buffer_n == 0) { | 762 | if (buffer_n == 0) { |
763 | reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY); | 763 | reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY); |
764 | if (reg & (1UL << channel)) { | 764 | if (reg & (1UL << channel)) { |
765 | ipu_ic_disable_task(&ipu_data, channel); | 765 | ipu_ic_disable_task(&ipu_data, channel); |
766 | ichan->status = IPU_CHANNEL_READY; | 766 | ichan->status = IPU_CHANNEL_READY; |
767 | } | 767 | } |
768 | 768 | ||
769 | /* 44.3.3.1.9 - Row Number 1 (WORD1, offset 0) */ | 769 | /* 44.3.3.1.9 - Row Number 1 (WORD1, offset 0) */ |
770 | idmac_write_ipureg(&ipu_data, dma_param_addr(channel) + | 770 | idmac_write_ipureg(&ipu_data, dma_param_addr(channel) + |
771 | 0x0008UL, IPU_IMA_ADDR); | 771 | 0x0008UL, IPU_IMA_ADDR); |
772 | idmac_write_ipureg(&ipu_data, phyaddr, IPU_IMA_DATA); | 772 | idmac_write_ipureg(&ipu_data, phyaddr, IPU_IMA_DATA); |
773 | } else { | 773 | } else { |
774 | reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY); | 774 | reg = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY); |
775 | if (reg & (1UL << channel)) { | 775 | if (reg & (1UL << channel)) { |
776 | ipu_ic_disable_task(&ipu_data, channel); | 776 | ipu_ic_disable_task(&ipu_data, channel); |
777 | ichan->status = IPU_CHANNEL_READY; | 777 | ichan->status = IPU_CHANNEL_READY; |
778 | } | 778 | } |
779 | 779 | ||
780 | /* Check if double-buffering is already enabled */ | 780 | /* Check if double-buffering is already enabled */ |
781 | reg = idmac_read_ipureg(&ipu_data, IPU_CHA_DB_MODE_SEL); | 781 | reg = idmac_read_ipureg(&ipu_data, IPU_CHA_DB_MODE_SEL); |
782 | 782 | ||
783 | if (!(reg & (1UL << channel))) | 783 | if (!(reg & (1UL << channel))) |
784 | idmac_write_ipureg(&ipu_data, reg | (1UL << channel), | 784 | idmac_write_ipureg(&ipu_data, reg | (1UL << channel), |
785 | IPU_CHA_DB_MODE_SEL); | 785 | IPU_CHA_DB_MODE_SEL); |
786 | 786 | ||
787 | /* 44.3.3.1.9 - Row Number 1 (WORD1, offset 1) */ | 787 | /* 44.3.3.1.9 - Row Number 1 (WORD1, offset 1) */ |
788 | idmac_write_ipureg(&ipu_data, dma_param_addr(channel) + | 788 | idmac_write_ipureg(&ipu_data, dma_param_addr(channel) + |
789 | 0x0009UL, IPU_IMA_ADDR); | 789 | 0x0009UL, IPU_IMA_ADDR); |
790 | idmac_write_ipureg(&ipu_data, phyaddr, IPU_IMA_DATA); | 790 | idmac_write_ipureg(&ipu_data, phyaddr, IPU_IMA_DATA); |
791 | } | 791 | } |
792 | 792 | ||
793 | spin_unlock_irqrestore(&ipu_data.lock, flags); | 793 | spin_unlock_irqrestore(&ipu_data.lock, flags); |
794 | } | 794 | } |
795 | 795 | ||
796 | /* Called under spin_lock_irqsave(&ichan->lock) */ | 796 | /* Called under spin_lock_irqsave(&ichan->lock) */ |
797 | static int ipu_submit_buffer(struct idmac_channel *ichan, | 797 | static int ipu_submit_buffer(struct idmac_channel *ichan, |
798 | struct idmac_tx_desc *desc, struct scatterlist *sg, int buf_idx) | 798 | struct idmac_tx_desc *desc, struct scatterlist *sg, int buf_idx) |
799 | { | 799 | { |
800 | unsigned int chan_id = ichan->dma_chan.chan_id; | 800 | unsigned int chan_id = ichan->dma_chan.chan_id; |
801 | struct device *dev = &ichan->dma_chan.dev->device; | 801 | struct device *dev = &ichan->dma_chan.dev->device; |
802 | 802 | ||
803 | if (async_tx_test_ack(&desc->txd)) | 803 | if (async_tx_test_ack(&desc->txd)) |
804 | return -EINTR; | 804 | return -EINTR; |
805 | 805 | ||
806 | /* | 806 | /* |
807 | * On first invocation this shouldn't be necessary, the call to | 807 | * On first invocation this shouldn't be necessary, the call to |
808 | * ipu_init_channel_buffer() above will set addresses for us, so we | 808 | * ipu_init_channel_buffer() above will set addresses for us, so we |
809 | * could make it conditional on status >= IPU_CHANNEL_ENABLED, but | 809 | * could make it conditional on status >= IPU_CHANNEL_ENABLED, but |
810 | * doing it again shouldn't hurt either. | 810 | * doing it again shouldn't hurt either. |
811 | */ | 811 | */ |
812 | ipu_update_channel_buffer(ichan, buf_idx, sg_dma_address(sg)); | 812 | ipu_update_channel_buffer(ichan, buf_idx, sg_dma_address(sg)); |
813 | 813 | ||
814 | ipu_select_buffer(chan_id, buf_idx); | 814 | ipu_select_buffer(chan_id, buf_idx); |
815 | dev_dbg(dev, "Updated sg %p on channel 0x%x buffer %d\n", | 815 | dev_dbg(dev, "Updated sg %p on channel 0x%x buffer %d\n", |
816 | sg, chan_id, buf_idx); | 816 | sg, chan_id, buf_idx); |
817 | 817 | ||
818 | return 0; | 818 | return 0; |
819 | } | 819 | } |
820 | 820 | ||
821 | /* Called under spin_lock_irqsave(&ichan->lock) */ | 821 | /* Called under spin_lock_irqsave(&ichan->lock) */ |
822 | static int ipu_submit_channel_buffers(struct idmac_channel *ichan, | 822 | static int ipu_submit_channel_buffers(struct idmac_channel *ichan, |
823 | struct idmac_tx_desc *desc) | 823 | struct idmac_tx_desc *desc) |
824 | { | 824 | { |
825 | struct scatterlist *sg; | 825 | struct scatterlist *sg; |
826 | int i, ret = 0; | 826 | int i, ret = 0; |
827 | 827 | ||
828 | for (i = 0, sg = desc->sg; i < 2 && sg; i++) { | 828 | for (i = 0, sg = desc->sg; i < 2 && sg; i++) { |
829 | if (!ichan->sg[i]) { | 829 | if (!ichan->sg[i]) { |
830 | ichan->sg[i] = sg; | 830 | ichan->sg[i] = sg; |
831 | 831 | ||
832 | ret = ipu_submit_buffer(ichan, desc, sg, i); | 832 | ret = ipu_submit_buffer(ichan, desc, sg, i); |
833 | if (ret < 0) | 833 | if (ret < 0) |
834 | return ret; | 834 | return ret; |
835 | 835 | ||
836 | sg = sg_next(sg); | 836 | sg = sg_next(sg); |
837 | } | 837 | } |
838 | } | 838 | } |
839 | 839 | ||
840 | return ret; | 840 | return ret; |
841 | } | 841 | } |
842 | 842 | ||
843 | static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx) | 843 | static dma_cookie_t idmac_tx_submit(struct dma_async_tx_descriptor *tx) |
844 | { | 844 | { |
845 | struct idmac_tx_desc *desc = to_tx_desc(tx); | 845 | struct idmac_tx_desc *desc = to_tx_desc(tx); |
846 | struct idmac_channel *ichan = to_idmac_chan(tx->chan); | 846 | struct idmac_channel *ichan = to_idmac_chan(tx->chan); |
847 | struct idmac *idmac = to_idmac(tx->chan->device); | 847 | struct idmac *idmac = to_idmac(tx->chan->device); |
848 | struct ipu *ipu = to_ipu(idmac); | 848 | struct ipu *ipu = to_ipu(idmac); |
849 | struct device *dev = &ichan->dma_chan.dev->device; | 849 | struct device *dev = &ichan->dma_chan.dev->device; |
850 | dma_cookie_t cookie; | 850 | dma_cookie_t cookie; |
851 | unsigned long flags; | 851 | unsigned long flags; |
852 | int ret; | 852 | int ret; |
853 | 853 | ||
854 | /* Sanity check */ | 854 | /* Sanity check */ |
855 | if (!list_empty(&desc->list)) { | 855 | if (!list_empty(&desc->list)) { |
856 | /* The descriptor doesn't belong to client */ | 856 | /* The descriptor doesn't belong to client */ |
857 | dev_err(dev, "Descriptor %p not prepared!\n", tx); | 857 | dev_err(dev, "Descriptor %p not prepared!\n", tx); |
858 | return -EBUSY; | 858 | return -EBUSY; |
859 | } | 859 | } |
860 | 860 | ||
861 | mutex_lock(&ichan->chan_mutex); | 861 | mutex_lock(&ichan->chan_mutex); |
862 | 862 | ||
863 | async_tx_clear_ack(tx); | 863 | async_tx_clear_ack(tx); |
864 | 864 | ||
865 | if (ichan->status < IPU_CHANNEL_READY) { | 865 | if (ichan->status < IPU_CHANNEL_READY) { |
866 | struct idmac_video_param *video = &ichan->params.video; | 866 | struct idmac_video_param *video = &ichan->params.video; |
867 | /* | 867 | /* |
868 | * Initial buffer assignment - the first two sg-entries from | 868 | * Initial buffer assignment - the first two sg-entries from |
869 | * the descriptor will end up in the IDMAC buffers | 869 | * the descriptor will end up in the IDMAC buffers |
870 | */ | 870 | */ |
871 | dma_addr_t dma_1 = sg_is_last(desc->sg) ? 0 : | 871 | dma_addr_t dma_1 = sg_is_last(desc->sg) ? 0 : |
872 | sg_dma_address(&desc->sg[1]); | 872 | sg_dma_address(&desc->sg[1]); |
873 | 873 | ||
874 | WARN_ON(ichan->sg[0] || ichan->sg[1]); | 874 | WARN_ON(ichan->sg[0] || ichan->sg[1]); |
875 | 875 | ||
876 | cookie = ipu_init_channel_buffer(ichan, | 876 | cookie = ipu_init_channel_buffer(ichan, |
877 | video->out_pixel_fmt, | 877 | video->out_pixel_fmt, |
878 | video->out_width, | 878 | video->out_width, |
879 | video->out_height, | 879 | video->out_height, |
880 | video->out_stride, | 880 | video->out_stride, |
881 | IPU_ROTATE_NONE, | 881 | IPU_ROTATE_NONE, |
882 | sg_dma_address(&desc->sg[0]), | 882 | sg_dma_address(&desc->sg[0]), |
883 | dma_1); | 883 | dma_1); |
884 | if (cookie < 0) | 884 | if (cookie < 0) |
885 | goto out; | 885 | goto out; |
886 | } | 886 | } |
887 | 887 | ||
888 | dev_dbg(dev, "Submitting sg %p\n", &desc->sg[0]); | 888 | dev_dbg(dev, "Submitting sg %p\n", &desc->sg[0]); |
889 | 889 | ||
890 | cookie = ichan->dma_chan.cookie; | 890 | cookie = ichan->dma_chan.cookie; |
891 | 891 | ||
892 | if (++cookie < 0) | 892 | if (++cookie < 0) |
893 | cookie = 1; | 893 | cookie = 1; |
894 | 894 | ||
895 | /* from dmaengine.h: "last cookie value returned to client" */ | 895 | /* from dmaengine.h: "last cookie value returned to client" */ |
896 | ichan->dma_chan.cookie = cookie; | 896 | ichan->dma_chan.cookie = cookie; |
897 | tx->cookie = cookie; | 897 | tx->cookie = cookie; |
898 | 898 | ||
899 | /* ipu->lock can be taken under ichan->lock, but not v.v. */ | 899 | /* ipu->lock can be taken under ichan->lock, but not v.v. */ |
900 | spin_lock_irqsave(&ichan->lock, flags); | 900 | spin_lock_irqsave(&ichan->lock, flags); |
901 | 901 | ||
902 | list_add_tail(&desc->list, &ichan->queue); | 902 | list_add_tail(&desc->list, &ichan->queue); |
903 | /* submit_buffers() atomically verifies and fills empty sg slots */ | 903 | /* submit_buffers() atomically verifies and fills empty sg slots */ |
904 | ret = ipu_submit_channel_buffers(ichan, desc); | 904 | ret = ipu_submit_channel_buffers(ichan, desc); |
905 | 905 | ||
906 | spin_unlock_irqrestore(&ichan->lock, flags); | 906 | spin_unlock_irqrestore(&ichan->lock, flags); |
907 | 907 | ||
908 | if (ret < 0) { | 908 | if (ret < 0) { |
909 | cookie = ret; | 909 | cookie = ret; |
910 | goto dequeue; | 910 | goto dequeue; |
911 | } | 911 | } |
912 | 912 | ||
913 | if (ichan->status < IPU_CHANNEL_ENABLED) { | 913 | if (ichan->status < IPU_CHANNEL_ENABLED) { |
914 | ret = ipu_enable_channel(idmac, ichan); | 914 | ret = ipu_enable_channel(idmac, ichan); |
915 | if (ret < 0) { | 915 | if (ret < 0) { |
916 | cookie = ret; | 916 | cookie = ret; |
917 | goto dequeue; | 917 | goto dequeue; |
918 | } | 918 | } |
919 | } | 919 | } |
920 | 920 | ||
921 | dump_idmac_reg(ipu); | 921 | dump_idmac_reg(ipu); |
922 | 922 | ||
923 | dequeue: | 923 | dequeue: |
924 | if (cookie < 0) { | 924 | if (cookie < 0) { |
925 | spin_lock_irqsave(&ichan->lock, flags); | 925 | spin_lock_irqsave(&ichan->lock, flags); |
926 | list_del_init(&desc->list); | 926 | list_del_init(&desc->list); |
927 | spin_unlock_irqrestore(&ichan->lock, flags); | 927 | spin_unlock_irqrestore(&ichan->lock, flags); |
928 | tx->cookie = cookie; | 928 | tx->cookie = cookie; |
929 | ichan->dma_chan.cookie = cookie; | 929 | ichan->dma_chan.cookie = cookie; |
930 | } | 930 | } |
931 | 931 | ||
932 | out: | 932 | out: |
933 | mutex_unlock(&ichan->chan_mutex); | 933 | mutex_unlock(&ichan->chan_mutex); |
934 | 934 | ||
935 | return cookie; | 935 | return cookie; |
936 | } | 936 | } |
937 | 937 | ||
938 | /* Called with ichan->chan_mutex held */ | 938 | /* Called with ichan->chan_mutex held */ |
939 | static int idmac_desc_alloc(struct idmac_channel *ichan, int n) | 939 | static int idmac_desc_alloc(struct idmac_channel *ichan, int n) |
940 | { | 940 | { |
941 | struct idmac_tx_desc *desc = vmalloc(n * sizeof(struct idmac_tx_desc)); | 941 | struct idmac_tx_desc *desc = vmalloc(n * sizeof(struct idmac_tx_desc)); |
942 | struct idmac *idmac = to_idmac(ichan->dma_chan.device); | 942 | struct idmac *idmac = to_idmac(ichan->dma_chan.device); |
943 | 943 | ||
944 | if (!desc) | 944 | if (!desc) |
945 | return -ENOMEM; | 945 | return -ENOMEM; |
946 | 946 | ||
947 | /* No interrupts, just disable the tasklet for a moment */ | 947 | /* No interrupts, just disable the tasklet for a moment */ |
948 | tasklet_disable(&to_ipu(idmac)->tasklet); | 948 | tasklet_disable(&to_ipu(idmac)->tasklet); |
949 | 949 | ||
950 | ichan->n_tx_desc = n; | 950 | ichan->n_tx_desc = n; |
951 | ichan->desc = desc; | 951 | ichan->desc = desc; |
952 | INIT_LIST_HEAD(&ichan->queue); | 952 | INIT_LIST_HEAD(&ichan->queue); |
953 | INIT_LIST_HEAD(&ichan->free_list); | 953 | INIT_LIST_HEAD(&ichan->free_list); |
954 | 954 | ||
955 | while (n--) { | 955 | while (n--) { |
956 | struct dma_async_tx_descriptor *txd = &desc->txd; | 956 | struct dma_async_tx_descriptor *txd = &desc->txd; |
957 | 957 | ||
958 | memset(txd, 0, sizeof(*txd)); | 958 | memset(txd, 0, sizeof(*txd)); |
959 | dma_async_tx_descriptor_init(txd, &ichan->dma_chan); | 959 | dma_async_tx_descriptor_init(txd, &ichan->dma_chan); |
960 | txd->tx_submit = idmac_tx_submit; | 960 | txd->tx_submit = idmac_tx_submit; |
961 | 961 | ||
962 | list_add(&desc->list, &ichan->free_list); | 962 | list_add(&desc->list, &ichan->free_list); |
963 | 963 | ||
964 | desc++; | 964 | desc++; |
965 | } | 965 | } |
966 | 966 | ||
967 | tasklet_enable(&to_ipu(idmac)->tasklet); | 967 | tasklet_enable(&to_ipu(idmac)->tasklet); |
968 | 968 | ||
969 | return 0; | 969 | return 0; |
970 | } | 970 | } |
971 | 971 | ||
972 | /** | 972 | /** |
973 | * ipu_init_channel() - initialize an IPU channel. | 973 | * ipu_init_channel() - initialize an IPU channel. |
974 | * @idmac: IPU DMAC context. | 974 | * @idmac: IPU DMAC context. |
975 | * @ichan: pointer to the channel object. | 975 | * @ichan: pointer to the channel object. |
976 | * @return 0 on success or negative error code on failure. | 976 | * @return 0 on success or negative error code on failure. |
977 | */ | 977 | */ |
978 | static int ipu_init_channel(struct idmac *idmac, struct idmac_channel *ichan) | 978 | static int ipu_init_channel(struct idmac *idmac, struct idmac_channel *ichan) |
979 | { | 979 | { |
980 | union ipu_channel_param *params = &ichan->params; | 980 | union ipu_channel_param *params = &ichan->params; |
981 | uint32_t ipu_conf; | 981 | uint32_t ipu_conf; |
982 | enum ipu_channel channel = ichan->dma_chan.chan_id; | 982 | enum ipu_channel channel = ichan->dma_chan.chan_id; |
983 | unsigned long flags; | 983 | unsigned long flags; |
984 | uint32_t reg; | 984 | uint32_t reg; |
985 | struct ipu *ipu = to_ipu(idmac); | 985 | struct ipu *ipu = to_ipu(idmac); |
986 | int ret = 0, n_desc = 0; | 986 | int ret = 0, n_desc = 0; |
987 | 987 | ||
988 | dev_dbg(ipu->dev, "init channel = %d\n", channel); | 988 | dev_dbg(ipu->dev, "init channel = %d\n", channel); |
989 | 989 | ||
990 | if (channel != IDMAC_SDC_0 && channel != IDMAC_SDC_1 && | 990 | if (channel != IDMAC_SDC_0 && channel != IDMAC_SDC_1 && |
991 | channel != IDMAC_IC_7) | 991 | channel != IDMAC_IC_7) |
992 | return -EINVAL; | 992 | return -EINVAL; |
993 | 993 | ||
994 | spin_lock_irqsave(&ipu->lock, flags); | 994 | spin_lock_irqsave(&ipu->lock, flags); |
995 | 995 | ||
996 | switch (channel) { | 996 | switch (channel) { |
997 | case IDMAC_IC_7: | 997 | case IDMAC_IC_7: |
998 | n_desc = 16; | 998 | n_desc = 16; |
999 | reg = idmac_read_icreg(ipu, IC_CONF); | 999 | reg = idmac_read_icreg(ipu, IC_CONF); |
1000 | idmac_write_icreg(ipu, reg & ~IC_CONF_CSI_MEM_WR_EN, IC_CONF); | 1000 | idmac_write_icreg(ipu, reg & ~IC_CONF_CSI_MEM_WR_EN, IC_CONF); |
1001 | break; | 1001 | break; |
1002 | case IDMAC_IC_0: | 1002 | case IDMAC_IC_0: |
1003 | n_desc = 16; | 1003 | n_desc = 16; |
1004 | reg = idmac_read_ipureg(ipu, IPU_FS_PROC_FLOW); | 1004 | reg = idmac_read_ipureg(ipu, IPU_FS_PROC_FLOW); |
1005 | idmac_write_ipureg(ipu, reg & ~FS_ENC_IN_VALID, IPU_FS_PROC_FLOW); | 1005 | idmac_write_ipureg(ipu, reg & ~FS_ENC_IN_VALID, IPU_FS_PROC_FLOW); |
1006 | ret = ipu_ic_init_prpenc(ipu, params, true); | 1006 | ret = ipu_ic_init_prpenc(ipu, params, true); |
1007 | break; | 1007 | break; |
1008 | case IDMAC_SDC_0: | 1008 | case IDMAC_SDC_0: |
1009 | case IDMAC_SDC_1: | 1009 | case IDMAC_SDC_1: |
1010 | n_desc = 4; | 1010 | n_desc = 4; |
1011 | default: | 1011 | default: |
1012 | break; | 1012 | break; |
1013 | } | 1013 | } |
1014 | 1014 | ||
1015 | ipu->channel_init_mask |= 1L << channel; | 1015 | ipu->channel_init_mask |= 1L << channel; |
1016 | 1016 | ||
1017 | /* Enable IPU sub module */ | 1017 | /* Enable IPU sub module */ |
1018 | ipu_conf = idmac_read_ipureg(ipu, IPU_CONF) | | 1018 | ipu_conf = idmac_read_ipureg(ipu, IPU_CONF) | |
1019 | ipu_channel_conf_mask(channel); | 1019 | ipu_channel_conf_mask(channel); |
1020 | idmac_write_ipureg(ipu, ipu_conf, IPU_CONF); | 1020 | idmac_write_ipureg(ipu, ipu_conf, IPU_CONF); |
1021 | 1021 | ||
1022 | spin_unlock_irqrestore(&ipu->lock, flags); | 1022 | spin_unlock_irqrestore(&ipu->lock, flags); |
1023 | 1023 | ||
1024 | if (n_desc && !ichan->desc) | 1024 | if (n_desc && !ichan->desc) |
1025 | ret = idmac_desc_alloc(ichan, n_desc); | 1025 | ret = idmac_desc_alloc(ichan, n_desc); |
1026 | 1026 | ||
1027 | dump_idmac_reg(ipu); | 1027 | dump_idmac_reg(ipu); |
1028 | 1028 | ||
1029 | return ret; | 1029 | return ret; |
1030 | } | 1030 | } |
1031 | 1031 | ||
1032 | /** | 1032 | /** |
1033 | * ipu_uninit_channel() - uninitialize an IPU channel. | 1033 | * ipu_uninit_channel() - uninitialize an IPU channel. |
1034 | * @idmac: IPU DMAC context. | 1034 | * @idmac: IPU DMAC context. |
1035 | * @ichan: pointer to the channel object. | 1035 | * @ichan: pointer to the channel object. |
1036 | */ | 1036 | */ |
1037 | static void ipu_uninit_channel(struct idmac *idmac, struct idmac_channel *ichan) | 1037 | static void ipu_uninit_channel(struct idmac *idmac, struct idmac_channel *ichan) |
1038 | { | 1038 | { |
1039 | enum ipu_channel channel = ichan->dma_chan.chan_id; | 1039 | enum ipu_channel channel = ichan->dma_chan.chan_id; |
1040 | unsigned long flags; | 1040 | unsigned long flags; |
1041 | uint32_t reg; | 1041 | uint32_t reg; |
1042 | unsigned long chan_mask = 1UL << channel; | 1042 | unsigned long chan_mask = 1UL << channel; |
1043 | uint32_t ipu_conf; | 1043 | uint32_t ipu_conf; |
1044 | struct ipu *ipu = to_ipu(idmac); | 1044 | struct ipu *ipu = to_ipu(idmac); |
1045 | 1045 | ||
1046 | spin_lock_irqsave(&ipu->lock, flags); | 1046 | spin_lock_irqsave(&ipu->lock, flags); |
1047 | 1047 | ||
1048 | if (!(ipu->channel_init_mask & chan_mask)) { | 1048 | if (!(ipu->channel_init_mask & chan_mask)) { |
1049 | dev_err(ipu->dev, "Channel already uninitialized %d\n", | 1049 | dev_err(ipu->dev, "Channel already uninitialized %d\n", |
1050 | channel); | 1050 | channel); |
1051 | spin_unlock_irqrestore(&ipu->lock, flags); | 1051 | spin_unlock_irqrestore(&ipu->lock, flags); |
1052 | return; | 1052 | return; |
1053 | } | 1053 | } |
1054 | 1054 | ||
1055 | /* Reset the double buffer */ | 1055 | /* Reset the double buffer */ |
1056 | reg = idmac_read_ipureg(ipu, IPU_CHA_DB_MODE_SEL); | 1056 | reg = idmac_read_ipureg(ipu, IPU_CHA_DB_MODE_SEL); |
1057 | idmac_write_ipureg(ipu, reg & ~chan_mask, IPU_CHA_DB_MODE_SEL); | 1057 | idmac_write_ipureg(ipu, reg & ~chan_mask, IPU_CHA_DB_MODE_SEL); |
1058 | 1058 | ||
1059 | ichan->sec_chan_en = false; | 1059 | ichan->sec_chan_en = false; |
1060 | 1060 | ||
1061 | switch (channel) { | 1061 | switch (channel) { |
1062 | case IDMAC_IC_7: | 1062 | case IDMAC_IC_7: |
1063 | reg = idmac_read_icreg(ipu, IC_CONF); | 1063 | reg = idmac_read_icreg(ipu, IC_CONF); |
1064 | idmac_write_icreg(ipu, reg & ~(IC_CONF_RWS_EN | IC_CONF_PRPENC_EN), | 1064 | idmac_write_icreg(ipu, reg & ~(IC_CONF_RWS_EN | IC_CONF_PRPENC_EN), |
1065 | IC_CONF); | 1065 | IC_CONF); |
1066 | break; | 1066 | break; |
1067 | case IDMAC_IC_0: | 1067 | case IDMAC_IC_0: |
1068 | reg = idmac_read_icreg(ipu, IC_CONF); | 1068 | reg = idmac_read_icreg(ipu, IC_CONF); |
1069 | idmac_write_icreg(ipu, reg & ~(IC_CONF_PRPENC_EN | IC_CONF_PRPENC_CSC1), | 1069 | idmac_write_icreg(ipu, reg & ~(IC_CONF_PRPENC_EN | IC_CONF_PRPENC_CSC1), |
1070 | IC_CONF); | 1070 | IC_CONF); |
1071 | break; | 1071 | break; |
1072 | case IDMAC_SDC_0: | 1072 | case IDMAC_SDC_0: |
1073 | case IDMAC_SDC_1: | 1073 | case IDMAC_SDC_1: |
1074 | default: | 1074 | default: |
1075 | break; | 1075 | break; |
1076 | } | 1076 | } |
1077 | 1077 | ||
1078 | ipu->channel_init_mask &= ~(1L << channel); | 1078 | ipu->channel_init_mask &= ~(1L << channel); |
1079 | 1079 | ||
1080 | ipu_conf = idmac_read_ipureg(ipu, IPU_CONF) & | 1080 | ipu_conf = idmac_read_ipureg(ipu, IPU_CONF) & |
1081 | ~ipu_channel_conf_mask(channel); | 1081 | ~ipu_channel_conf_mask(channel); |
1082 | idmac_write_ipureg(ipu, ipu_conf, IPU_CONF); | 1082 | idmac_write_ipureg(ipu, ipu_conf, IPU_CONF); |
1083 | 1083 | ||
1084 | spin_unlock_irqrestore(&ipu->lock, flags); | 1084 | spin_unlock_irqrestore(&ipu->lock, flags); |
1085 | 1085 | ||
1086 | ichan->n_tx_desc = 0; | 1086 | ichan->n_tx_desc = 0; |
1087 | vfree(ichan->desc); | 1087 | vfree(ichan->desc); |
1088 | ichan->desc = NULL; | 1088 | ichan->desc = NULL; |
1089 | } | 1089 | } |
1090 | 1090 | ||
1091 | /** | 1091 | /** |
1092 | * ipu_disable_channel() - disable an IPU channel. | 1092 | * ipu_disable_channel() - disable an IPU channel. |
1093 | * @idmac: IPU DMAC context. | 1093 | * @idmac: IPU DMAC context. |
1094 | * @ichan: channel object pointer. | 1094 | * @ichan: channel object pointer. |
1095 | * @wait_for_stop: flag to set whether to wait for channel end of frame or | 1095 | * @wait_for_stop: flag to set whether to wait for channel end of frame or |
1096 | * return immediately. | 1096 | * return immediately. |
1097 | * @return: 0 on success or negative error code on failure. | 1097 | * @return: 0 on success or negative error code on failure. |
1098 | */ | 1098 | */ |
1099 | static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan, | 1099 | static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan, |
1100 | bool wait_for_stop) | 1100 | bool wait_for_stop) |
1101 | { | 1101 | { |
1102 | enum ipu_channel channel = ichan->dma_chan.chan_id; | 1102 | enum ipu_channel channel = ichan->dma_chan.chan_id; |
1103 | struct ipu *ipu = to_ipu(idmac); | 1103 | struct ipu *ipu = to_ipu(idmac); |
1104 | uint32_t reg; | 1104 | uint32_t reg; |
1105 | unsigned long flags; | 1105 | unsigned long flags; |
1106 | unsigned long chan_mask = 1UL << channel; | 1106 | unsigned long chan_mask = 1UL << channel; |
1107 | unsigned int timeout; | 1107 | unsigned int timeout; |
1108 | 1108 | ||
1109 | if (wait_for_stop && channel != IDMAC_SDC_1 && channel != IDMAC_SDC_0) { | 1109 | if (wait_for_stop && channel != IDMAC_SDC_1 && channel != IDMAC_SDC_0) { |
1110 | timeout = 40; | 1110 | timeout = 40; |
1111 | /* This waiting always fails. Related to spurious irq problem */ | 1111 | /* This waiting always fails. Related to spurious irq problem */ |
1112 | while ((idmac_read_icreg(ipu, IDMAC_CHA_BUSY) & chan_mask) || | 1112 | while ((idmac_read_icreg(ipu, IDMAC_CHA_BUSY) & chan_mask) || |
1113 | (ipu_channel_status(ipu, channel) == TASK_STAT_ACTIVE)) { | 1113 | (ipu_channel_status(ipu, channel) == TASK_STAT_ACTIVE)) { |
1114 | timeout--; | 1114 | timeout--; |
1115 | msleep(10); | 1115 | msleep(10); |
1116 | 1116 | ||
1117 | if (!timeout) { | 1117 | if (!timeout) { |
1118 | dev_dbg(ipu->dev, | 1118 | dev_dbg(ipu->dev, |
1119 | "Warning: timeout waiting for channel %u to " | 1119 | "Warning: timeout waiting for channel %u to " |
1120 | "stop: buf0_rdy = 0x%08X, buf1_rdy = 0x%08X, " | 1120 | "stop: buf0_rdy = 0x%08X, buf1_rdy = 0x%08X, " |
1121 | "busy = 0x%08X, tstat = 0x%08X\n", channel, | 1121 | "busy = 0x%08X, tstat = 0x%08X\n", channel, |
1122 | idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY), | 1122 | idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY), |
1123 | idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY), | 1123 | idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY), |
1124 | idmac_read_icreg(ipu, IDMAC_CHA_BUSY), | 1124 | idmac_read_icreg(ipu, IDMAC_CHA_BUSY), |
1125 | idmac_read_ipureg(ipu, IPU_TASKS_STAT)); | 1125 | idmac_read_ipureg(ipu, IPU_TASKS_STAT)); |
1126 | break; | 1126 | break; |
1127 | } | 1127 | } |
1128 | } | 1128 | } |
1129 | dev_dbg(ipu->dev, "timeout = %d * 10ms\n", 40 - timeout); | 1129 | dev_dbg(ipu->dev, "timeout = %d * 10ms\n", 40 - timeout); |
1130 | } | 1130 | } |
1131 | /* SDC BG and FG must be disabled before DMA is disabled */ | 1131 | /* SDC BG and FG must be disabled before DMA is disabled */ |
1132 | if (wait_for_stop && (channel == IDMAC_SDC_0 || | 1132 | if (wait_for_stop && (channel == IDMAC_SDC_0 || |
1133 | channel == IDMAC_SDC_1)) { | 1133 | channel == IDMAC_SDC_1)) { |
1134 | for (timeout = 5; | 1134 | for (timeout = 5; |
1135 | timeout && !ipu_irq_status(ichan->eof_irq); timeout--) | 1135 | timeout && !ipu_irq_status(ichan->eof_irq); timeout--) |
1136 | msleep(5); | 1136 | msleep(5); |
1137 | } | 1137 | } |
1138 | 1138 | ||
1139 | spin_lock_irqsave(&ipu->lock, flags); | 1139 | spin_lock_irqsave(&ipu->lock, flags); |
1140 | 1140 | ||
1141 | /* Disable IC task */ | 1141 | /* Disable IC task */ |
1142 | ipu_ic_disable_task(ipu, channel); | 1142 | ipu_ic_disable_task(ipu, channel); |
1143 | 1143 | ||
1144 | /* Disable DMA channel(s) */ | 1144 | /* Disable DMA channel(s) */ |
1145 | reg = idmac_read_icreg(ipu, IDMAC_CHA_EN); | 1145 | reg = idmac_read_icreg(ipu, IDMAC_CHA_EN); |
1146 | idmac_write_icreg(ipu, reg & ~chan_mask, IDMAC_CHA_EN); | 1146 | idmac_write_icreg(ipu, reg & ~chan_mask, IDMAC_CHA_EN); |
1147 | 1147 | ||
1148 | /* | 1148 | /* |
1149 | * Problem (observed with channel DMAIC_7): after enabling the channel | 1149 | * Problem (observed with channel DMAIC_7): after enabling the channel |
1150 | * and initialising buffers, there comes an interrupt with current still | 1150 | * and initialising buffers, there comes an interrupt with current still |
1151 | * pointing at buffer 0, whereas it should use buffer 0 first and only | 1151 | * pointing at buffer 0, whereas it should use buffer 0 first and only |
1152 | * generate an interrupt when it is done, then current should already | 1152 | * generate an interrupt when it is done, then current should already |
1153 | * point to buffer 1. This spurious interrupt also comes on channel | 1153 | * point to buffer 1. This spurious interrupt also comes on channel |
1154 | * DMASDC_0. With DMAIC_7 normally, is we just leave the ISR after the | 1154 | * DMASDC_0. With DMAIC_7 normally, is we just leave the ISR after the |
1155 | * first interrupt, there comes the second with current correctly | 1155 | * first interrupt, there comes the second with current correctly |
1156 | * pointing to buffer 1 this time. But sometimes this second interrupt | 1156 | * pointing to buffer 1 this time. But sometimes this second interrupt |
1157 | * doesn't come and the channel hangs. Clearing BUFx_RDY when disabling | 1157 | * doesn't come and the channel hangs. Clearing BUFx_RDY when disabling |
1158 | * the channel seems to prevent the channel from hanging, but it doesn't | 1158 | * the channel seems to prevent the channel from hanging, but it doesn't |
1159 | * prevent the spurious interrupt. This might also be unsafe. Think | 1159 | * prevent the spurious interrupt. This might also be unsafe. Think |
1160 | * about the IDMAC controller trying to switch to a buffer, when we | 1160 | * about the IDMAC controller trying to switch to a buffer, when we |
1161 | * clear the ready bit, and re-enable it a moment later. | 1161 | * clear the ready bit, and re-enable it a moment later. |
1162 | */ | 1162 | */ |
1163 | reg = idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY); | 1163 | reg = idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY); |
1164 | idmac_write_ipureg(ipu, 0, IPU_CHA_BUF0_RDY); | 1164 | idmac_write_ipureg(ipu, 0, IPU_CHA_BUF0_RDY); |
1165 | idmac_write_ipureg(ipu, reg & ~(1UL << channel), IPU_CHA_BUF0_RDY); | 1165 | idmac_write_ipureg(ipu, reg & ~(1UL << channel), IPU_CHA_BUF0_RDY); |
1166 | 1166 | ||
1167 | reg = idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY); | 1167 | reg = idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY); |
1168 | idmac_write_ipureg(ipu, 0, IPU_CHA_BUF1_RDY); | 1168 | idmac_write_ipureg(ipu, 0, IPU_CHA_BUF1_RDY); |
1169 | idmac_write_ipureg(ipu, reg & ~(1UL << channel), IPU_CHA_BUF1_RDY); | 1169 | idmac_write_ipureg(ipu, reg & ~(1UL << channel), IPU_CHA_BUF1_RDY); |
1170 | 1170 | ||
1171 | spin_unlock_irqrestore(&ipu->lock, flags); | 1171 | spin_unlock_irqrestore(&ipu->lock, flags); |
1172 | 1172 | ||
1173 | return 0; | 1173 | return 0; |
1174 | } | 1174 | } |
1175 | 1175 | ||
1176 | static struct scatterlist *idmac_sg_next(struct idmac_channel *ichan, | 1176 | static struct scatterlist *idmac_sg_next(struct idmac_channel *ichan, |
1177 | struct idmac_tx_desc **desc, struct scatterlist *sg) | 1177 | struct idmac_tx_desc **desc, struct scatterlist *sg) |
1178 | { | 1178 | { |
1179 | struct scatterlist *sgnew = sg ? sg_next(sg) : NULL; | 1179 | struct scatterlist *sgnew = sg ? sg_next(sg) : NULL; |
1180 | 1180 | ||
1181 | if (sgnew) | 1181 | if (sgnew) |
1182 | /* next sg-element in this list */ | 1182 | /* next sg-element in this list */ |
1183 | return sgnew; | 1183 | return sgnew; |
1184 | 1184 | ||
1185 | if ((*desc)->list.next == &ichan->queue) | 1185 | if ((*desc)->list.next == &ichan->queue) |
1186 | /* No more descriptors on the queue */ | 1186 | /* No more descriptors on the queue */ |
1187 | return NULL; | 1187 | return NULL; |
1188 | 1188 | ||
1189 | /* Fetch next descriptor */ | 1189 | /* Fetch next descriptor */ |
1190 | *desc = list_entry((*desc)->list.next, struct idmac_tx_desc, list); | 1190 | *desc = list_entry((*desc)->list.next, struct idmac_tx_desc, list); |
1191 | return (*desc)->sg; | 1191 | return (*desc)->sg; |
1192 | } | 1192 | } |
1193 | 1193 | ||
1194 | /* | 1194 | /* |
1195 | * We have several possibilities here: | 1195 | * We have several possibilities here: |
1196 | * current BUF next BUF | 1196 | * current BUF next BUF |
1197 | * | 1197 | * |
1198 | * not last sg next not last sg | 1198 | * not last sg next not last sg |
1199 | * not last sg next last sg | 1199 | * not last sg next last sg |
1200 | * last sg first sg from next descriptor | 1200 | * last sg first sg from next descriptor |
1201 | * last sg NULL | 1201 | * last sg NULL |
1202 | * | 1202 | * |
1203 | * Besides, the descriptor queue might be empty or not. We process all these | 1203 | * Besides, the descriptor queue might be empty or not. We process all these |
1204 | * cases carefully. | 1204 | * cases carefully. |
1205 | */ | 1205 | */ |
1206 | static irqreturn_t idmac_interrupt(int irq, void *dev_id) | 1206 | static irqreturn_t idmac_interrupt(int irq, void *dev_id) |
1207 | { | 1207 | { |
1208 | struct idmac_channel *ichan = dev_id; | 1208 | struct idmac_channel *ichan = dev_id; |
1209 | struct device *dev = &ichan->dma_chan.dev->device; | 1209 | struct device *dev = &ichan->dma_chan.dev->device; |
1210 | unsigned int chan_id = ichan->dma_chan.chan_id; | 1210 | unsigned int chan_id = ichan->dma_chan.chan_id; |
1211 | struct scatterlist **sg, *sgnext, *sgnew = NULL; | 1211 | struct scatterlist **sg, *sgnext, *sgnew = NULL; |
1212 | /* Next transfer descriptor */ | 1212 | /* Next transfer descriptor */ |
1213 | struct idmac_tx_desc *desc, *descnew; | 1213 | struct idmac_tx_desc *desc, *descnew; |
1214 | dma_async_tx_callback callback; | 1214 | dma_async_tx_callback callback; |
1215 | void *callback_param; | 1215 | void *callback_param; |
1216 | bool done = false; | 1216 | bool done = false; |
1217 | u32 ready0, ready1, curbuf, err; | 1217 | u32 ready0, ready1, curbuf, err; |
1218 | unsigned long flags; | 1218 | unsigned long flags; |
1219 | 1219 | ||
1220 | /* IDMAC has cleared the respective BUFx_RDY bit, we manage the buffer */ | 1220 | /* IDMAC has cleared the respective BUFx_RDY bit, we manage the buffer */ |
1221 | 1221 | ||
1222 | dev_dbg(dev, "IDMAC irq %d, buf %d\n", irq, ichan->active_buffer); | 1222 | dev_dbg(dev, "IDMAC irq %d, buf %d\n", irq, ichan->active_buffer); |
1223 | 1223 | ||
1224 | spin_lock_irqsave(&ipu_data.lock, flags); | 1224 | spin_lock_irqsave(&ipu_data.lock, flags); |
1225 | 1225 | ||
1226 | ready0 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY); | 1226 | ready0 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY); |
1227 | ready1 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY); | 1227 | ready1 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY); |
1228 | curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF); | 1228 | curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF); |
1229 | err = idmac_read_ipureg(&ipu_data, IPU_INT_STAT_4); | 1229 | err = idmac_read_ipureg(&ipu_data, IPU_INT_STAT_4); |
1230 | 1230 | ||
1231 | if (err & (1 << chan_id)) { | 1231 | if (err & (1 << chan_id)) { |
1232 | idmac_write_ipureg(&ipu_data, 1 << chan_id, IPU_INT_STAT_4); | 1232 | idmac_write_ipureg(&ipu_data, 1 << chan_id, IPU_INT_STAT_4); |
1233 | spin_unlock_irqrestore(&ipu_data.lock, flags); | 1233 | spin_unlock_irqrestore(&ipu_data.lock, flags); |
1234 | /* | 1234 | /* |
1235 | * Doing this | 1235 | * Doing this |
1236 | * ichan->sg[0] = ichan->sg[1] = NULL; | 1236 | * ichan->sg[0] = ichan->sg[1] = NULL; |
1237 | * you can force channel re-enable on the next tx_submit(), but | 1237 | * you can force channel re-enable on the next tx_submit(), but |
1238 | * this is dirty - think about descriptors with multiple | 1238 | * this is dirty - think about descriptors with multiple |
1239 | * sg elements. | 1239 | * sg elements. |
1240 | */ | 1240 | */ |
1241 | dev_warn(dev, "NFB4EOF on channel %d, ready %x, %x, cur %x\n", | 1241 | dev_warn(dev, "NFB4EOF on channel %d, ready %x, %x, cur %x\n", |
1242 | chan_id, ready0, ready1, curbuf); | 1242 | chan_id, ready0, ready1, curbuf); |
1243 | return IRQ_HANDLED; | 1243 | return IRQ_HANDLED; |
1244 | } | 1244 | } |
1245 | spin_unlock_irqrestore(&ipu_data.lock, flags); | 1245 | spin_unlock_irqrestore(&ipu_data.lock, flags); |
1246 | 1246 | ||
1247 | /* Other interrupts do not interfere with this channel */ | 1247 | /* Other interrupts do not interfere with this channel */ |
1248 | spin_lock(&ichan->lock); | 1248 | spin_lock(&ichan->lock); |
1249 | if (unlikely(chan_id != IDMAC_SDC_0 && chan_id != IDMAC_SDC_1 && | 1249 | if (unlikely(chan_id != IDMAC_SDC_0 && chan_id != IDMAC_SDC_1 && |
1250 | ((curbuf >> chan_id) & 1) == ichan->active_buffer && | 1250 | ((curbuf >> chan_id) & 1) == ichan->active_buffer && |
1251 | !list_is_last(ichan->queue.next, &ichan->queue))) { | 1251 | !list_is_last(ichan->queue.next, &ichan->queue))) { |
1252 | int i = 100; | 1252 | int i = 100; |
1253 | 1253 | ||
1254 | /* This doesn't help. See comment in ipu_disable_channel() */ | 1254 | /* This doesn't help. See comment in ipu_disable_channel() */ |
1255 | while (--i) { | 1255 | while (--i) { |
1256 | curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF); | 1256 | curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF); |
1257 | if (((curbuf >> chan_id) & 1) != ichan->active_buffer) | 1257 | if (((curbuf >> chan_id) & 1) != ichan->active_buffer) |
1258 | break; | 1258 | break; |
1259 | cpu_relax(); | 1259 | cpu_relax(); |
1260 | } | 1260 | } |
1261 | 1261 | ||
1262 | if (!i) { | 1262 | if (!i) { |
1263 | spin_unlock(&ichan->lock); | 1263 | spin_unlock(&ichan->lock); |
1264 | dev_dbg(dev, | 1264 | dev_dbg(dev, |
1265 | "IRQ on active buffer on channel %x, active " | 1265 | "IRQ on active buffer on channel %x, active " |
1266 | "%d, ready %x, %x, current %x!\n", chan_id, | 1266 | "%d, ready %x, %x, current %x!\n", chan_id, |
1267 | ichan->active_buffer, ready0, ready1, curbuf); | 1267 | ichan->active_buffer, ready0, ready1, curbuf); |
1268 | return IRQ_NONE; | 1268 | return IRQ_NONE; |
1269 | } else | 1269 | } else |
1270 | dev_dbg(dev, | 1270 | dev_dbg(dev, |
1271 | "Buffer deactivated on channel %x, active " | 1271 | "Buffer deactivated on channel %x, active " |
1272 | "%d, ready %x, %x, current %x, rest %d!\n", chan_id, | 1272 | "%d, ready %x, %x, current %x, rest %d!\n", chan_id, |
1273 | ichan->active_buffer, ready0, ready1, curbuf, i); | 1273 | ichan->active_buffer, ready0, ready1, curbuf, i); |
1274 | } | 1274 | } |
1275 | 1275 | ||
1276 | if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) || | 1276 | if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) || |
1277 | (!ichan->active_buffer && (ready0 >> chan_id) & 1) | 1277 | (!ichan->active_buffer && (ready0 >> chan_id) & 1) |
1278 | )) { | 1278 | )) { |
1279 | spin_unlock(&ichan->lock); | 1279 | spin_unlock(&ichan->lock); |
1280 | dev_dbg(dev, | 1280 | dev_dbg(dev, |
1281 | "IRQ with active buffer still ready on channel %x, " | 1281 | "IRQ with active buffer still ready on channel %x, " |
1282 | "active %d, ready %x, %x!\n", chan_id, | 1282 | "active %d, ready %x, %x!\n", chan_id, |
1283 | ichan->active_buffer, ready0, ready1); | 1283 | ichan->active_buffer, ready0, ready1); |
1284 | return IRQ_NONE; | 1284 | return IRQ_NONE; |
1285 | } | 1285 | } |
1286 | 1286 | ||
1287 | if (unlikely(list_empty(&ichan->queue))) { | 1287 | if (unlikely(list_empty(&ichan->queue))) { |
1288 | ichan->sg[ichan->active_buffer] = NULL; | 1288 | ichan->sg[ichan->active_buffer] = NULL; |
1289 | spin_unlock(&ichan->lock); | 1289 | spin_unlock(&ichan->lock); |
1290 | dev_err(dev, | 1290 | dev_err(dev, |
1291 | "IRQ without queued buffers on channel %x, active %d, " | 1291 | "IRQ without queued buffers on channel %x, active %d, " |
1292 | "ready %x, %x!\n", chan_id, | 1292 | "ready %x, %x!\n", chan_id, |
1293 | ichan->active_buffer, ready0, ready1); | 1293 | ichan->active_buffer, ready0, ready1); |
1294 | return IRQ_NONE; | 1294 | return IRQ_NONE; |
1295 | } | 1295 | } |
1296 | 1296 | ||
1297 | /* | 1297 | /* |
1298 | * active_buffer is a software flag, it shows which buffer we are | 1298 | * active_buffer is a software flag, it shows which buffer we are |
1299 | * currently expecting back from the hardware, IDMAC should be | 1299 | * currently expecting back from the hardware, IDMAC should be |
1300 | * processing the other buffer already | 1300 | * processing the other buffer already |
1301 | */ | 1301 | */ |
1302 | sg = &ichan->sg[ichan->active_buffer]; | 1302 | sg = &ichan->sg[ichan->active_buffer]; |
1303 | sgnext = ichan->sg[!ichan->active_buffer]; | 1303 | sgnext = ichan->sg[!ichan->active_buffer]; |
1304 | 1304 | ||
1305 | if (!*sg) { | 1305 | if (!*sg) { |
1306 | spin_unlock(&ichan->lock); | 1306 | spin_unlock(&ichan->lock); |
1307 | return IRQ_HANDLED; | 1307 | return IRQ_HANDLED; |
1308 | } | 1308 | } |
1309 | 1309 | ||
1310 | desc = list_entry(ichan->queue.next, struct idmac_tx_desc, list); | 1310 | desc = list_entry(ichan->queue.next, struct idmac_tx_desc, list); |
1311 | descnew = desc; | 1311 | descnew = desc; |
1312 | 1312 | ||
1313 | dev_dbg(dev, "IDMAC irq %d, dma 0x%08x, next dma 0x%08x, current %d, curbuf 0x%08x\n", | 1313 | dev_dbg(dev, "IDMAC irq %d, dma 0x%08x, next dma 0x%08x, current %d, curbuf 0x%08x\n", |
1314 | irq, sg_dma_address(*sg), sgnext ? sg_dma_address(sgnext) : 0, ichan->active_buffer, curbuf); | 1314 | irq, sg_dma_address(*sg), sgnext ? sg_dma_address(sgnext) : 0, ichan->active_buffer, curbuf); |
1315 | 1315 | ||
1316 | /* Find the descriptor of sgnext */ | 1316 | /* Find the descriptor of sgnext */ |
1317 | sgnew = idmac_sg_next(ichan, &descnew, *sg); | 1317 | sgnew = idmac_sg_next(ichan, &descnew, *sg); |
1318 | if (sgnext != sgnew) | 1318 | if (sgnext != sgnew) |
1319 | dev_err(dev, "Submitted buffer %p, next buffer %p\n", sgnext, sgnew); | 1319 | dev_err(dev, "Submitted buffer %p, next buffer %p\n", sgnext, sgnew); |
1320 | 1320 | ||
1321 | /* | 1321 | /* |
1322 | * if sgnext == NULL sg must be the last element in a scatterlist and | 1322 | * if sgnext == NULL sg must be the last element in a scatterlist and |
1323 | * queue must be empty | 1323 | * queue must be empty |
1324 | */ | 1324 | */ |
1325 | if (unlikely(!sgnext)) { | 1325 | if (unlikely(!sgnext)) { |
1326 | if (!WARN_ON(sg_next(*sg))) | 1326 | if (!WARN_ON(sg_next(*sg))) |
1327 | dev_dbg(dev, "Underrun on channel %x\n", chan_id); | 1327 | dev_dbg(dev, "Underrun on channel %x\n", chan_id); |
1328 | ichan->sg[!ichan->active_buffer] = sgnew; | 1328 | ichan->sg[!ichan->active_buffer] = sgnew; |
1329 | 1329 | ||
1330 | if (unlikely(sgnew)) { | 1330 | if (unlikely(sgnew)) { |
1331 | ipu_submit_buffer(ichan, descnew, sgnew, !ichan->active_buffer); | 1331 | ipu_submit_buffer(ichan, descnew, sgnew, !ichan->active_buffer); |
1332 | } else { | 1332 | } else { |
1333 | spin_lock_irqsave(&ipu_data.lock, flags); | 1333 | spin_lock_irqsave(&ipu_data.lock, flags); |
1334 | ipu_ic_disable_task(&ipu_data, chan_id); | 1334 | ipu_ic_disable_task(&ipu_data, chan_id); |
1335 | spin_unlock_irqrestore(&ipu_data.lock, flags); | 1335 | spin_unlock_irqrestore(&ipu_data.lock, flags); |
1336 | ichan->status = IPU_CHANNEL_READY; | 1336 | ichan->status = IPU_CHANNEL_READY; |
1337 | /* Continue to check for complete descriptor */ | 1337 | /* Continue to check for complete descriptor */ |
1338 | } | 1338 | } |
1339 | } | 1339 | } |
1340 | 1340 | ||
1341 | /* Calculate and submit the next sg element */ | 1341 | /* Calculate and submit the next sg element */ |
1342 | sgnew = idmac_sg_next(ichan, &descnew, sgnew); | 1342 | sgnew = idmac_sg_next(ichan, &descnew, sgnew); |
1343 | 1343 | ||
1344 | if (unlikely(!sg_next(*sg)) || !sgnext) { | 1344 | if (unlikely(!sg_next(*sg)) || !sgnext) { |
1345 | /* | 1345 | /* |
1346 | * Last element in scatterlist done, remove from the queue, | 1346 | * Last element in scatterlist done, remove from the queue, |
1347 | * _init for debugging | 1347 | * _init for debugging |
1348 | */ | 1348 | */ |
1349 | list_del_init(&desc->list); | 1349 | list_del_init(&desc->list); |
1350 | done = true; | 1350 | done = true; |
1351 | } | 1351 | } |
1352 | 1352 | ||
1353 | *sg = sgnew; | 1353 | *sg = sgnew; |
1354 | 1354 | ||
1355 | if (likely(sgnew) && | 1355 | if (likely(sgnew) && |
1356 | ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) { | 1356 | ipu_submit_buffer(ichan, descnew, sgnew, ichan->active_buffer) < 0) { |
1357 | callback = descnew->txd.callback; | 1357 | callback = descnew->txd.callback; |
1358 | callback_param = descnew->txd.callback_param; | 1358 | callback_param = descnew->txd.callback_param; |
1359 | spin_unlock(&ichan->lock); | 1359 | spin_unlock(&ichan->lock); |
1360 | if (callback) | 1360 | if (callback) |
1361 | callback(callback_param); | 1361 | callback(callback_param); |
1362 | spin_lock(&ichan->lock); | 1362 | spin_lock(&ichan->lock); |
1363 | } | 1363 | } |
1364 | 1364 | ||
1365 | /* Flip the active buffer - even if update above failed */ | 1365 | /* Flip the active buffer - even if update above failed */ |
1366 | ichan->active_buffer = !ichan->active_buffer; | 1366 | ichan->active_buffer = !ichan->active_buffer; |
1367 | if (done) | 1367 | if (done) |
1368 | ichan->completed = desc->txd.cookie; | 1368 | ichan->completed = desc->txd.cookie; |
1369 | 1369 | ||
1370 | callback = desc->txd.callback; | 1370 | callback = desc->txd.callback; |
1371 | callback_param = desc->txd.callback_param; | 1371 | callback_param = desc->txd.callback_param; |
1372 | 1372 | ||
1373 | spin_unlock(&ichan->lock); | 1373 | spin_unlock(&ichan->lock); |
1374 | 1374 | ||
1375 | if (done && (desc->txd.flags & DMA_PREP_INTERRUPT) && callback) | 1375 | if (done && (desc->txd.flags & DMA_PREP_INTERRUPT) && callback) |
1376 | callback(callback_param); | 1376 | callback(callback_param); |
1377 | 1377 | ||
1378 | return IRQ_HANDLED; | 1378 | return IRQ_HANDLED; |
1379 | } | 1379 | } |
1380 | 1380 | ||
1381 | static void ipu_gc_tasklet(unsigned long arg) | 1381 | static void ipu_gc_tasklet(unsigned long arg) |
1382 | { | 1382 | { |
1383 | struct ipu *ipu = (struct ipu *)arg; | 1383 | struct ipu *ipu = (struct ipu *)arg; |
1384 | int i; | 1384 | int i; |
1385 | 1385 | ||
1386 | for (i = 0; i < IPU_CHANNELS_NUM; i++) { | 1386 | for (i = 0; i < IPU_CHANNELS_NUM; i++) { |
1387 | struct idmac_channel *ichan = ipu->channel + i; | 1387 | struct idmac_channel *ichan = ipu->channel + i; |
1388 | struct idmac_tx_desc *desc; | 1388 | struct idmac_tx_desc *desc; |
1389 | unsigned long flags; | 1389 | unsigned long flags; |
1390 | struct scatterlist *sg; | 1390 | struct scatterlist *sg; |
1391 | int j, k; | 1391 | int j, k; |
1392 | 1392 | ||
1393 | for (j = 0; j < ichan->n_tx_desc; j++) { | 1393 | for (j = 0; j < ichan->n_tx_desc; j++) { |
1394 | desc = ichan->desc + j; | 1394 | desc = ichan->desc + j; |
1395 | spin_lock_irqsave(&ichan->lock, flags); | 1395 | spin_lock_irqsave(&ichan->lock, flags); |
1396 | if (async_tx_test_ack(&desc->txd)) { | 1396 | if (async_tx_test_ack(&desc->txd)) { |
1397 | list_move(&desc->list, &ichan->free_list); | 1397 | list_move(&desc->list, &ichan->free_list); |
1398 | for_each_sg(desc->sg, sg, desc->sg_len, k) { | 1398 | for_each_sg(desc->sg, sg, desc->sg_len, k) { |
1399 | if (ichan->sg[0] == sg) | 1399 | if (ichan->sg[0] == sg) |
1400 | ichan->sg[0] = NULL; | 1400 | ichan->sg[0] = NULL; |
1401 | else if (ichan->sg[1] == sg) | 1401 | else if (ichan->sg[1] == sg) |
1402 | ichan->sg[1] = NULL; | 1402 | ichan->sg[1] = NULL; |
1403 | } | 1403 | } |
1404 | async_tx_clear_ack(&desc->txd); | 1404 | async_tx_clear_ack(&desc->txd); |
1405 | } | 1405 | } |
1406 | spin_unlock_irqrestore(&ichan->lock, flags); | 1406 | spin_unlock_irqrestore(&ichan->lock, flags); |
1407 | } | 1407 | } |
1408 | } | 1408 | } |
1409 | } | 1409 | } |
1410 | 1410 | ||
1411 | /* Allocate and initialise a transfer descriptor. */ | 1411 | /* Allocate and initialise a transfer descriptor. */ |
1412 | static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan, | 1412 | static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan, |
1413 | struct scatterlist *sgl, unsigned int sg_len, | 1413 | struct scatterlist *sgl, unsigned int sg_len, |
1414 | enum dma_data_direction direction, unsigned long tx_flags) | 1414 | enum dma_data_direction direction, unsigned long tx_flags) |
1415 | { | 1415 | { |
1416 | struct idmac_channel *ichan = to_idmac_chan(chan); | 1416 | struct idmac_channel *ichan = to_idmac_chan(chan); |
1417 | struct idmac_tx_desc *desc = NULL; | 1417 | struct idmac_tx_desc *desc = NULL; |
1418 | struct dma_async_tx_descriptor *txd = NULL; | 1418 | struct dma_async_tx_descriptor *txd = NULL; |
1419 | unsigned long flags; | 1419 | unsigned long flags; |
1420 | 1420 | ||
1421 | /* We only can handle these three channels so far */ | 1421 | /* We only can handle these three channels so far */ |
1422 | if (chan->chan_id != IDMAC_SDC_0 && chan->chan_id != IDMAC_SDC_1 && | 1422 | if (chan->chan_id != IDMAC_SDC_0 && chan->chan_id != IDMAC_SDC_1 && |
1423 | chan->chan_id != IDMAC_IC_7) | 1423 | chan->chan_id != IDMAC_IC_7) |
1424 | return NULL; | 1424 | return NULL; |
1425 | 1425 | ||
1426 | if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) { | 1426 | if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) { |
1427 | dev_err(chan->device->dev, "Invalid DMA direction %d!\n", direction); | 1427 | dev_err(chan->device->dev, "Invalid DMA direction %d!\n", direction); |
1428 | return NULL; | 1428 | return NULL; |
1429 | } | 1429 | } |
1430 | 1430 | ||
1431 | mutex_lock(&ichan->chan_mutex); | 1431 | mutex_lock(&ichan->chan_mutex); |
1432 | 1432 | ||
1433 | spin_lock_irqsave(&ichan->lock, flags); | 1433 | spin_lock_irqsave(&ichan->lock, flags); |
1434 | if (!list_empty(&ichan->free_list)) { | 1434 | if (!list_empty(&ichan->free_list)) { |
1435 | desc = list_entry(ichan->free_list.next, | 1435 | desc = list_entry(ichan->free_list.next, |
1436 | struct idmac_tx_desc, list); | 1436 | struct idmac_tx_desc, list); |
1437 | 1437 | ||
1438 | list_del_init(&desc->list); | 1438 | list_del_init(&desc->list); |
1439 | 1439 | ||
1440 | desc->sg_len = sg_len; | 1440 | desc->sg_len = sg_len; |
1441 | desc->sg = sgl; | 1441 | desc->sg = sgl; |
1442 | txd = &desc->txd; | 1442 | txd = &desc->txd; |
1443 | txd->flags = tx_flags; | 1443 | txd->flags = tx_flags; |
1444 | } | 1444 | } |
1445 | spin_unlock_irqrestore(&ichan->lock, flags); | 1445 | spin_unlock_irqrestore(&ichan->lock, flags); |
1446 | 1446 | ||
1447 | mutex_unlock(&ichan->chan_mutex); | 1447 | mutex_unlock(&ichan->chan_mutex); |
1448 | 1448 | ||
1449 | tasklet_schedule(&to_ipu(to_idmac(chan->device))->tasklet); | 1449 | tasklet_schedule(&to_ipu(to_idmac(chan->device))->tasklet); |
1450 | 1450 | ||
1451 | return txd; | 1451 | return txd; |
1452 | } | 1452 | } |
1453 | 1453 | ||
1454 | /* Re-select the current buffer and re-activate the channel */ | 1454 | /* Re-select the current buffer and re-activate the channel */ |
1455 | static void idmac_issue_pending(struct dma_chan *chan) | 1455 | static void idmac_issue_pending(struct dma_chan *chan) |
1456 | { | 1456 | { |
1457 | struct idmac_channel *ichan = to_idmac_chan(chan); | 1457 | struct idmac_channel *ichan = to_idmac_chan(chan); |
1458 | struct idmac *idmac = to_idmac(chan->device); | 1458 | struct idmac *idmac = to_idmac(chan->device); |
1459 | struct ipu *ipu = to_ipu(idmac); | 1459 | struct ipu *ipu = to_ipu(idmac); |
1460 | unsigned long flags; | 1460 | unsigned long flags; |
1461 | 1461 | ||
1462 | /* This is not always needed, but doesn't hurt either */ | 1462 | /* This is not always needed, but doesn't hurt either */ |
1463 | spin_lock_irqsave(&ipu->lock, flags); | 1463 | spin_lock_irqsave(&ipu->lock, flags); |
1464 | ipu_select_buffer(chan->chan_id, ichan->active_buffer); | 1464 | ipu_select_buffer(chan->chan_id, ichan->active_buffer); |
1465 | spin_unlock_irqrestore(&ipu->lock, flags); | 1465 | spin_unlock_irqrestore(&ipu->lock, flags); |
1466 | 1466 | ||
1467 | /* | 1467 | /* |
1468 | * Might need to perform some parts of initialisation from | 1468 | * Might need to perform some parts of initialisation from |
1469 | * ipu_enable_channel(), but not all, we do not want to reset to buffer | 1469 | * ipu_enable_channel(), but not all, we do not want to reset to buffer |
1470 | * 0, don't need to set priority again either, but re-enabling the task | 1470 | * 0, don't need to set priority again either, but re-enabling the task |
1471 | * and the channel might be a good idea. | 1471 | * and the channel might be a good idea. |
1472 | */ | 1472 | */ |
1473 | } | 1473 | } |
1474 | 1474 | ||
1475 | static int __idmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd) | 1475 | static int __idmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
1476 | unsigned long arg) | ||
1476 | { | 1477 | { |
1477 | struct idmac_channel *ichan = to_idmac_chan(chan); | 1478 | struct idmac_channel *ichan = to_idmac_chan(chan); |
1478 | struct idmac *idmac = to_idmac(chan->device); | 1479 | struct idmac *idmac = to_idmac(chan->device); |
1479 | unsigned long flags; | 1480 | unsigned long flags; |
1480 | int i; | 1481 | int i; |
1481 | 1482 | ||
1482 | /* Only supports DMA_TERMINATE_ALL */ | 1483 | /* Only supports DMA_TERMINATE_ALL */ |
1483 | if (cmd != DMA_TERMINATE_ALL) | 1484 | if (cmd != DMA_TERMINATE_ALL) |
1484 | return -ENXIO; | 1485 | return -ENXIO; |
1485 | 1486 | ||
1486 | ipu_disable_channel(idmac, ichan, | 1487 | ipu_disable_channel(idmac, ichan, |
1487 | ichan->status >= IPU_CHANNEL_ENABLED); | 1488 | ichan->status >= IPU_CHANNEL_ENABLED); |
1488 | 1489 | ||
1489 | tasklet_disable(&to_ipu(idmac)->tasklet); | 1490 | tasklet_disable(&to_ipu(idmac)->tasklet); |
1490 | 1491 | ||
1491 | /* ichan->queue is modified in ISR, have to spinlock */ | 1492 | /* ichan->queue is modified in ISR, have to spinlock */ |
1492 | spin_lock_irqsave(&ichan->lock, flags); | 1493 | spin_lock_irqsave(&ichan->lock, flags); |
1493 | list_splice_init(&ichan->queue, &ichan->free_list); | 1494 | list_splice_init(&ichan->queue, &ichan->free_list); |
1494 | 1495 | ||
1495 | if (ichan->desc) | 1496 | if (ichan->desc) |
1496 | for (i = 0; i < ichan->n_tx_desc; i++) { | 1497 | for (i = 0; i < ichan->n_tx_desc; i++) { |
1497 | struct idmac_tx_desc *desc = ichan->desc + i; | 1498 | struct idmac_tx_desc *desc = ichan->desc + i; |
1498 | if (list_empty(&desc->list)) | 1499 | if (list_empty(&desc->list)) |
1499 | /* Descriptor was prepared, but not submitted */ | 1500 | /* Descriptor was prepared, but not submitted */ |
1500 | list_add(&desc->list, &ichan->free_list); | 1501 | list_add(&desc->list, &ichan->free_list); |
1501 | 1502 | ||
1502 | async_tx_clear_ack(&desc->txd); | 1503 | async_tx_clear_ack(&desc->txd); |
1503 | } | 1504 | } |
1504 | 1505 | ||
1505 | ichan->sg[0] = NULL; | 1506 | ichan->sg[0] = NULL; |
1506 | ichan->sg[1] = NULL; | 1507 | ichan->sg[1] = NULL; |
1507 | spin_unlock_irqrestore(&ichan->lock, flags); | 1508 | spin_unlock_irqrestore(&ichan->lock, flags); |
1508 | 1509 | ||
1509 | tasklet_enable(&to_ipu(idmac)->tasklet); | 1510 | tasklet_enable(&to_ipu(idmac)->tasklet); |
1510 | 1511 | ||
1511 | ichan->status = IPU_CHANNEL_INITIALIZED; | 1512 | ichan->status = IPU_CHANNEL_INITIALIZED; |
1512 | 1513 | ||
1513 | return 0; | 1514 | return 0; |
1514 | } | 1515 | } |
1515 | 1516 | ||
1516 | static int idmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd) | 1517 | static int idmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
1518 | unsigned long arg) | ||
1517 | { | 1519 | { |
1518 | struct idmac_channel *ichan = to_idmac_chan(chan); | 1520 | struct idmac_channel *ichan = to_idmac_chan(chan); |
1519 | int ret; | 1521 | int ret; |
1520 | 1522 | ||
1521 | mutex_lock(&ichan->chan_mutex); | 1523 | mutex_lock(&ichan->chan_mutex); |
1522 | 1524 | ||
1523 | ret = __idmac_control(chan, cmd); | 1525 | ret = __idmac_control(chan, cmd, arg); |
1524 | 1526 | ||
1525 | mutex_unlock(&ichan->chan_mutex); | 1527 | mutex_unlock(&ichan->chan_mutex); |
1526 | 1528 | ||
1527 | return ret; | 1529 | return ret; |
1528 | } | 1530 | } |
1529 | 1531 | ||
1530 | #ifdef DEBUG | 1532 | #ifdef DEBUG |
1531 | static irqreturn_t ic_sof_irq(int irq, void *dev_id) | 1533 | static irqreturn_t ic_sof_irq(int irq, void *dev_id) |
1532 | { | 1534 | { |
1533 | struct idmac_channel *ichan = dev_id; | 1535 | struct idmac_channel *ichan = dev_id; |
1534 | printk(KERN_DEBUG "Got SOF IRQ %d on Channel %d\n", | 1536 | printk(KERN_DEBUG "Got SOF IRQ %d on Channel %d\n", |
1535 | irq, ichan->dma_chan.chan_id); | 1537 | irq, ichan->dma_chan.chan_id); |
1536 | disable_irq_nosync(irq); | 1538 | disable_irq_nosync(irq); |
1537 | return IRQ_HANDLED; | 1539 | return IRQ_HANDLED; |
1538 | } | 1540 | } |
1539 | 1541 | ||
1540 | static irqreturn_t ic_eof_irq(int irq, void *dev_id) | 1542 | static irqreturn_t ic_eof_irq(int irq, void *dev_id) |
1541 | { | 1543 | { |
1542 | struct idmac_channel *ichan = dev_id; | 1544 | struct idmac_channel *ichan = dev_id; |
1543 | printk(KERN_DEBUG "Got EOF IRQ %d on Channel %d\n", | 1545 | printk(KERN_DEBUG "Got EOF IRQ %d on Channel %d\n", |
1544 | irq, ichan->dma_chan.chan_id); | 1546 | irq, ichan->dma_chan.chan_id); |
1545 | disable_irq_nosync(irq); | 1547 | disable_irq_nosync(irq); |
1546 | return IRQ_HANDLED; | 1548 | return IRQ_HANDLED; |
1547 | } | 1549 | } |
1548 | 1550 | ||
1549 | static int ic_sof = -EINVAL, ic_eof = -EINVAL; | 1551 | static int ic_sof = -EINVAL, ic_eof = -EINVAL; |
1550 | #endif | 1552 | #endif |
1551 | 1553 | ||
1552 | static int idmac_alloc_chan_resources(struct dma_chan *chan) | 1554 | static int idmac_alloc_chan_resources(struct dma_chan *chan) |
1553 | { | 1555 | { |
1554 | struct idmac_channel *ichan = to_idmac_chan(chan); | 1556 | struct idmac_channel *ichan = to_idmac_chan(chan); |
1555 | struct idmac *idmac = to_idmac(chan->device); | 1557 | struct idmac *idmac = to_idmac(chan->device); |
1556 | int ret; | 1558 | int ret; |
1557 | 1559 | ||
1558 | /* dmaengine.c now guarantees to only offer free channels */ | 1560 | /* dmaengine.c now guarantees to only offer free channels */ |
1559 | BUG_ON(chan->client_count > 1); | 1561 | BUG_ON(chan->client_count > 1); |
1560 | WARN_ON(ichan->status != IPU_CHANNEL_FREE); | 1562 | WARN_ON(ichan->status != IPU_CHANNEL_FREE); |
1561 | 1563 | ||
1562 | chan->cookie = 1; | 1564 | chan->cookie = 1; |
1563 | ichan->completed = -ENXIO; | 1565 | ichan->completed = -ENXIO; |
1564 | 1566 | ||
1565 | ret = ipu_irq_map(chan->chan_id); | 1567 | ret = ipu_irq_map(chan->chan_id); |
1566 | if (ret < 0) | 1568 | if (ret < 0) |
1567 | goto eimap; | 1569 | goto eimap; |
1568 | 1570 | ||
1569 | ichan->eof_irq = ret; | 1571 | ichan->eof_irq = ret; |
1570 | 1572 | ||
1571 | /* | 1573 | /* |
1572 | * Important to first disable the channel, because maybe someone | 1574 | * Important to first disable the channel, because maybe someone |
1573 | * used it before us, e.g., the bootloader | 1575 | * used it before us, e.g., the bootloader |
1574 | */ | 1576 | */ |
1575 | ipu_disable_channel(idmac, ichan, true); | 1577 | ipu_disable_channel(idmac, ichan, true); |
1576 | 1578 | ||
1577 | ret = ipu_init_channel(idmac, ichan); | 1579 | ret = ipu_init_channel(idmac, ichan); |
1578 | if (ret < 0) | 1580 | if (ret < 0) |
1579 | goto eichan; | 1581 | goto eichan; |
1580 | 1582 | ||
1581 | ret = request_irq(ichan->eof_irq, idmac_interrupt, 0, | 1583 | ret = request_irq(ichan->eof_irq, idmac_interrupt, 0, |
1582 | ichan->eof_name, ichan); | 1584 | ichan->eof_name, ichan); |
1583 | if (ret < 0) | 1585 | if (ret < 0) |
1584 | goto erirq; | 1586 | goto erirq; |
1585 | 1587 | ||
1586 | #ifdef DEBUG | 1588 | #ifdef DEBUG |
1587 | if (chan->chan_id == IDMAC_IC_7) { | 1589 | if (chan->chan_id == IDMAC_IC_7) { |
1588 | ic_sof = ipu_irq_map(69); | 1590 | ic_sof = ipu_irq_map(69); |
1589 | if (ic_sof > 0) | 1591 | if (ic_sof > 0) |
1590 | request_irq(ic_sof, ic_sof_irq, 0, "IC SOF", ichan); | 1592 | request_irq(ic_sof, ic_sof_irq, 0, "IC SOF", ichan); |
1591 | ic_eof = ipu_irq_map(70); | 1593 | ic_eof = ipu_irq_map(70); |
1592 | if (ic_eof > 0) | 1594 | if (ic_eof > 0) |
1593 | request_irq(ic_eof, ic_eof_irq, 0, "IC EOF", ichan); | 1595 | request_irq(ic_eof, ic_eof_irq, 0, "IC EOF", ichan); |
1594 | } | 1596 | } |
1595 | #endif | 1597 | #endif |
1596 | 1598 | ||
1597 | ichan->status = IPU_CHANNEL_INITIALIZED; | 1599 | ichan->status = IPU_CHANNEL_INITIALIZED; |
1598 | 1600 | ||
1599 | dev_dbg(&chan->dev->device, "Found channel 0x%x, irq %d\n", | 1601 | dev_dbg(&chan->dev->device, "Found channel 0x%x, irq %d\n", |
1600 | chan->chan_id, ichan->eof_irq); | 1602 | chan->chan_id, ichan->eof_irq); |
1601 | 1603 | ||
1602 | return ret; | 1604 | return ret; |
1603 | 1605 | ||
1604 | erirq: | 1606 | erirq: |
1605 | ipu_uninit_channel(idmac, ichan); | 1607 | ipu_uninit_channel(idmac, ichan); |
1606 | eichan: | 1608 | eichan: |
1607 | ipu_irq_unmap(chan->chan_id); | 1609 | ipu_irq_unmap(chan->chan_id); |
1608 | eimap: | 1610 | eimap: |
1609 | return ret; | 1611 | return ret; |
1610 | } | 1612 | } |
1611 | 1613 | ||
1612 | static void idmac_free_chan_resources(struct dma_chan *chan) | 1614 | static void idmac_free_chan_resources(struct dma_chan *chan) |
1613 | { | 1615 | { |
1614 | struct idmac_channel *ichan = to_idmac_chan(chan); | 1616 | struct idmac_channel *ichan = to_idmac_chan(chan); |
1615 | struct idmac *idmac = to_idmac(chan->device); | 1617 | struct idmac *idmac = to_idmac(chan->device); |
1616 | 1618 | ||
1617 | mutex_lock(&ichan->chan_mutex); | 1619 | mutex_lock(&ichan->chan_mutex); |
1618 | 1620 | ||
1619 | __idmac_control(chan, DMA_TERMINATE_ALL); | 1621 | __idmac_control(chan, DMA_TERMINATE_ALL, 0); |
1620 | 1622 | ||
1621 | if (ichan->status > IPU_CHANNEL_FREE) { | 1623 | if (ichan->status > IPU_CHANNEL_FREE) { |
1622 | #ifdef DEBUG | 1624 | #ifdef DEBUG |
1623 | if (chan->chan_id == IDMAC_IC_7) { | 1625 | if (chan->chan_id == IDMAC_IC_7) { |
1624 | if (ic_sof > 0) { | 1626 | if (ic_sof > 0) { |
1625 | free_irq(ic_sof, ichan); | 1627 | free_irq(ic_sof, ichan); |
1626 | ipu_irq_unmap(69); | 1628 | ipu_irq_unmap(69); |
1627 | ic_sof = -EINVAL; | 1629 | ic_sof = -EINVAL; |
1628 | } | 1630 | } |
1629 | if (ic_eof > 0) { | 1631 | if (ic_eof > 0) { |
1630 | free_irq(ic_eof, ichan); | 1632 | free_irq(ic_eof, ichan); |
1631 | ipu_irq_unmap(70); | 1633 | ipu_irq_unmap(70); |
1632 | ic_eof = -EINVAL; | 1634 | ic_eof = -EINVAL; |
1633 | } | 1635 | } |
1634 | } | 1636 | } |
1635 | #endif | 1637 | #endif |
1636 | free_irq(ichan->eof_irq, ichan); | 1638 | free_irq(ichan->eof_irq, ichan); |
1637 | ipu_irq_unmap(chan->chan_id); | 1639 | ipu_irq_unmap(chan->chan_id); |
1638 | } | 1640 | } |
1639 | 1641 | ||
1640 | ichan->status = IPU_CHANNEL_FREE; | 1642 | ichan->status = IPU_CHANNEL_FREE; |
1641 | 1643 | ||
1642 | ipu_uninit_channel(idmac, ichan); | 1644 | ipu_uninit_channel(idmac, ichan); |
1643 | 1645 | ||
1644 | mutex_unlock(&ichan->chan_mutex); | 1646 | mutex_unlock(&ichan->chan_mutex); |
1645 | 1647 | ||
1646 | tasklet_schedule(&to_ipu(idmac)->tasklet); | 1648 | tasklet_schedule(&to_ipu(idmac)->tasklet); |
1647 | } | 1649 | } |
1648 | 1650 | ||
1649 | static enum dma_status idmac_tx_status(struct dma_chan *chan, | 1651 | static enum dma_status idmac_tx_status(struct dma_chan *chan, |
1650 | dma_cookie_t cookie, struct dma_tx_state *txstate) | 1652 | dma_cookie_t cookie, struct dma_tx_state *txstate) |
1651 | { | 1653 | { |
1652 | struct idmac_channel *ichan = to_idmac_chan(chan); | 1654 | struct idmac_channel *ichan = to_idmac_chan(chan); |
1653 | 1655 | ||
1654 | dma_set_tx_state(txstate, ichan->completed, chan->cookie, 0); | 1656 | dma_set_tx_state(txstate, ichan->completed, chan->cookie, 0); |
1655 | if (cookie != chan->cookie) | 1657 | if (cookie != chan->cookie) |
1656 | return DMA_ERROR; | 1658 | return DMA_ERROR; |
1657 | return DMA_SUCCESS; | 1659 | return DMA_SUCCESS; |
1658 | } | 1660 | } |
1659 | 1661 | ||
1660 | static int __init ipu_idmac_init(struct ipu *ipu) | 1662 | static int __init ipu_idmac_init(struct ipu *ipu) |
1661 | { | 1663 | { |
1662 | struct idmac *idmac = &ipu->idmac; | 1664 | struct idmac *idmac = &ipu->idmac; |
1663 | struct dma_device *dma = &idmac->dma; | 1665 | struct dma_device *dma = &idmac->dma; |
1664 | int i; | 1666 | int i; |
1665 | 1667 | ||
1666 | dma_cap_set(DMA_SLAVE, dma->cap_mask); | 1668 | dma_cap_set(DMA_SLAVE, dma->cap_mask); |
1667 | dma_cap_set(DMA_PRIVATE, dma->cap_mask); | 1669 | dma_cap_set(DMA_PRIVATE, dma->cap_mask); |
1668 | 1670 | ||
1669 | /* Compulsory common fields */ | 1671 | /* Compulsory common fields */ |
1670 | dma->dev = ipu->dev; | 1672 | dma->dev = ipu->dev; |
1671 | dma->device_alloc_chan_resources = idmac_alloc_chan_resources; | 1673 | dma->device_alloc_chan_resources = idmac_alloc_chan_resources; |
1672 | dma->device_free_chan_resources = idmac_free_chan_resources; | 1674 | dma->device_free_chan_resources = idmac_free_chan_resources; |
1673 | dma->device_tx_status = idmac_tx_status; | 1675 | dma->device_tx_status = idmac_tx_status; |
1674 | dma->device_issue_pending = idmac_issue_pending; | 1676 | dma->device_issue_pending = idmac_issue_pending; |
1675 | 1677 | ||
1676 | /* Compulsory for DMA_SLAVE fields */ | 1678 | /* Compulsory for DMA_SLAVE fields */ |
1677 | dma->device_prep_slave_sg = idmac_prep_slave_sg; | 1679 | dma->device_prep_slave_sg = idmac_prep_slave_sg; |
1678 | dma->device_control = idmac_control; | 1680 | dma->device_control = idmac_control; |
1679 | 1681 | ||
1680 | INIT_LIST_HEAD(&dma->channels); | 1682 | INIT_LIST_HEAD(&dma->channels); |
1681 | for (i = 0; i < IPU_CHANNELS_NUM; i++) { | 1683 | for (i = 0; i < IPU_CHANNELS_NUM; i++) { |
1682 | struct idmac_channel *ichan = ipu->channel + i; | 1684 | struct idmac_channel *ichan = ipu->channel + i; |
1683 | struct dma_chan *dma_chan = &ichan->dma_chan; | 1685 | struct dma_chan *dma_chan = &ichan->dma_chan; |
1684 | 1686 | ||
1685 | spin_lock_init(&ichan->lock); | 1687 | spin_lock_init(&ichan->lock); |
1686 | mutex_init(&ichan->chan_mutex); | 1688 | mutex_init(&ichan->chan_mutex); |
1687 | 1689 | ||
1688 | ichan->status = IPU_CHANNEL_FREE; | 1690 | ichan->status = IPU_CHANNEL_FREE; |
1689 | ichan->sec_chan_en = false; | 1691 | ichan->sec_chan_en = false; |
1690 | ichan->completed = -ENXIO; | 1692 | ichan->completed = -ENXIO; |
1691 | snprintf(ichan->eof_name, sizeof(ichan->eof_name), "IDMAC EOF %d", i); | 1693 | snprintf(ichan->eof_name, sizeof(ichan->eof_name), "IDMAC EOF %d", i); |
1692 | 1694 | ||
1693 | dma_chan->device = &idmac->dma; | 1695 | dma_chan->device = &idmac->dma; |
1694 | dma_chan->cookie = 1; | 1696 | dma_chan->cookie = 1; |
1695 | dma_chan->chan_id = i; | 1697 | dma_chan->chan_id = i; |
1696 | list_add_tail(&dma_chan->device_node, &dma->channels); | 1698 | list_add_tail(&dma_chan->device_node, &dma->channels); |
1697 | } | 1699 | } |
1698 | 1700 | ||
1699 | idmac_write_icreg(ipu, 0x00000070, IDMAC_CONF); | 1701 | idmac_write_icreg(ipu, 0x00000070, IDMAC_CONF); |
1700 | 1702 | ||
1701 | return dma_async_device_register(&idmac->dma); | 1703 | return dma_async_device_register(&idmac->dma); |
1702 | } | 1704 | } |
1703 | 1705 | ||
1704 | static void __exit ipu_idmac_exit(struct ipu *ipu) | 1706 | static void __exit ipu_idmac_exit(struct ipu *ipu) |
1705 | { | 1707 | { |
1706 | int i; | 1708 | int i; |
1707 | struct idmac *idmac = &ipu->idmac; | 1709 | struct idmac *idmac = &ipu->idmac; |
1708 | 1710 | ||
1709 | for (i = 0; i < IPU_CHANNELS_NUM; i++) { | 1711 | for (i = 0; i < IPU_CHANNELS_NUM; i++) { |
1710 | struct idmac_channel *ichan = ipu->channel + i; | 1712 | struct idmac_channel *ichan = ipu->channel + i; |
1711 | 1713 | ||
1712 | idmac_control(&ichan->dma_chan, DMA_TERMINATE_ALL); | 1714 | idmac_control(&ichan->dma_chan, DMA_TERMINATE_ALL, 0); |
1713 | idmac_prep_slave_sg(&ichan->dma_chan, NULL, 0, DMA_NONE, 0); | 1715 | idmac_prep_slave_sg(&ichan->dma_chan, NULL, 0, DMA_NONE, 0); |
1714 | } | 1716 | } |
1715 | 1717 | ||
1716 | dma_async_device_unregister(&idmac->dma); | 1718 | dma_async_device_unregister(&idmac->dma); |
1717 | } | 1719 | } |
1718 | 1720 | ||
1719 | /***************************************************************************** | 1721 | /***************************************************************************** |
1720 | * IPU common probe / remove | 1722 | * IPU common probe / remove |
1721 | */ | 1723 | */ |
1722 | 1724 | ||
1723 | static int __init ipu_probe(struct platform_device *pdev) | 1725 | static int __init ipu_probe(struct platform_device *pdev) |
1724 | { | 1726 | { |
1725 | struct ipu_platform_data *pdata = pdev->dev.platform_data; | 1727 | struct ipu_platform_data *pdata = pdev->dev.platform_data; |
1726 | struct resource *mem_ipu, *mem_ic; | 1728 | struct resource *mem_ipu, *mem_ic; |
1727 | int ret; | 1729 | int ret; |
1728 | 1730 | ||
1729 | spin_lock_init(&ipu_data.lock); | 1731 | spin_lock_init(&ipu_data.lock); |
1730 | 1732 | ||
1731 | mem_ipu = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1733 | mem_ipu = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1732 | mem_ic = platform_get_resource(pdev, IORESOURCE_MEM, 1); | 1734 | mem_ic = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
1733 | if (!pdata || !mem_ipu || !mem_ic) | 1735 | if (!pdata || !mem_ipu || !mem_ic) |
1734 | return -EINVAL; | 1736 | return -EINVAL; |
1735 | 1737 | ||
1736 | ipu_data.dev = &pdev->dev; | 1738 | ipu_data.dev = &pdev->dev; |
1737 | 1739 | ||
1738 | platform_set_drvdata(pdev, &ipu_data); | 1740 | platform_set_drvdata(pdev, &ipu_data); |
1739 | 1741 | ||
1740 | ret = platform_get_irq(pdev, 0); | 1742 | ret = platform_get_irq(pdev, 0); |
1741 | if (ret < 0) | 1743 | if (ret < 0) |
1742 | goto err_noirq; | 1744 | goto err_noirq; |
1743 | 1745 | ||
1744 | ipu_data.irq_fn = ret; | 1746 | ipu_data.irq_fn = ret; |
1745 | ret = platform_get_irq(pdev, 1); | 1747 | ret = platform_get_irq(pdev, 1); |
1746 | if (ret < 0) | 1748 | if (ret < 0) |
1747 | goto err_noirq; | 1749 | goto err_noirq; |
1748 | 1750 | ||
1749 | ipu_data.irq_err = ret; | 1751 | ipu_data.irq_err = ret; |
1750 | ipu_data.irq_base = pdata->irq_base; | 1752 | ipu_data.irq_base = pdata->irq_base; |
1751 | 1753 | ||
1752 | dev_dbg(&pdev->dev, "fn irq %u, err irq %u, irq-base %u\n", | 1754 | dev_dbg(&pdev->dev, "fn irq %u, err irq %u, irq-base %u\n", |
1753 | ipu_data.irq_fn, ipu_data.irq_err, ipu_data.irq_base); | 1755 | ipu_data.irq_fn, ipu_data.irq_err, ipu_data.irq_base); |
1754 | 1756 | ||
1755 | /* Remap IPU common registers */ | 1757 | /* Remap IPU common registers */ |
1756 | ipu_data.reg_ipu = ioremap(mem_ipu->start, | 1758 | ipu_data.reg_ipu = ioremap(mem_ipu->start, |
1757 | mem_ipu->end - mem_ipu->start + 1); | 1759 | mem_ipu->end - mem_ipu->start + 1); |
1758 | if (!ipu_data.reg_ipu) { | 1760 | if (!ipu_data.reg_ipu) { |
1759 | ret = -ENOMEM; | 1761 | ret = -ENOMEM; |
1760 | goto err_ioremap_ipu; | 1762 | goto err_ioremap_ipu; |
1761 | } | 1763 | } |
1762 | 1764 | ||
1763 | /* Remap Image Converter and Image DMA Controller registers */ | 1765 | /* Remap Image Converter and Image DMA Controller registers */ |
1764 | ipu_data.reg_ic = ioremap(mem_ic->start, | 1766 | ipu_data.reg_ic = ioremap(mem_ic->start, |
1765 | mem_ic->end - mem_ic->start + 1); | 1767 | mem_ic->end - mem_ic->start + 1); |
1766 | if (!ipu_data.reg_ic) { | 1768 | if (!ipu_data.reg_ic) { |
1767 | ret = -ENOMEM; | 1769 | ret = -ENOMEM; |
1768 | goto err_ioremap_ic; | 1770 | goto err_ioremap_ic; |
1769 | } | 1771 | } |
1770 | 1772 | ||
1771 | /* Get IPU clock */ | 1773 | /* Get IPU clock */ |
1772 | ipu_data.ipu_clk = clk_get(&pdev->dev, NULL); | 1774 | ipu_data.ipu_clk = clk_get(&pdev->dev, NULL); |
1773 | if (IS_ERR(ipu_data.ipu_clk)) { | 1775 | if (IS_ERR(ipu_data.ipu_clk)) { |
1774 | ret = PTR_ERR(ipu_data.ipu_clk); | 1776 | ret = PTR_ERR(ipu_data.ipu_clk); |
1775 | goto err_clk_get; | 1777 | goto err_clk_get; |
1776 | } | 1778 | } |
1777 | 1779 | ||
1778 | /* Make sure IPU HSP clock is running */ | 1780 | /* Make sure IPU HSP clock is running */ |
1779 | clk_enable(ipu_data.ipu_clk); | 1781 | clk_enable(ipu_data.ipu_clk); |
1780 | 1782 | ||
1781 | /* Disable all interrupts */ | 1783 | /* Disable all interrupts */ |
1782 | idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_1); | 1784 | idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_1); |
1783 | idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_2); | 1785 | idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_2); |
1784 | idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_3); | 1786 | idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_3); |
1785 | idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_4); | 1787 | idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_4); |
1786 | idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_5); | 1788 | idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_5); |
1787 | 1789 | ||
1788 | dev_dbg(&pdev->dev, "%s @ 0x%08lx, fn irq %u, err irq %u\n", pdev->name, | 1790 | dev_dbg(&pdev->dev, "%s @ 0x%08lx, fn irq %u, err irq %u\n", pdev->name, |
1789 | (unsigned long)mem_ipu->start, ipu_data.irq_fn, ipu_data.irq_err); | 1791 | (unsigned long)mem_ipu->start, ipu_data.irq_fn, ipu_data.irq_err); |
1790 | 1792 | ||
1791 | ret = ipu_irq_attach_irq(&ipu_data, pdev); | 1793 | ret = ipu_irq_attach_irq(&ipu_data, pdev); |
1792 | if (ret < 0) | 1794 | if (ret < 0) |
1793 | goto err_attach_irq; | 1795 | goto err_attach_irq; |
1794 | 1796 | ||
1795 | /* Initialize DMA engine */ | 1797 | /* Initialize DMA engine */ |
1796 | ret = ipu_idmac_init(&ipu_data); | 1798 | ret = ipu_idmac_init(&ipu_data); |
1797 | if (ret < 0) | 1799 | if (ret < 0) |
1798 | goto err_idmac_init; | 1800 | goto err_idmac_init; |
1799 | 1801 | ||
1800 | tasklet_init(&ipu_data.tasklet, ipu_gc_tasklet, (unsigned long)&ipu_data); | 1802 | tasklet_init(&ipu_data.tasklet, ipu_gc_tasklet, (unsigned long)&ipu_data); |
1801 | 1803 | ||
1802 | ipu_data.dev = &pdev->dev; | 1804 | ipu_data.dev = &pdev->dev; |
1803 | 1805 | ||
1804 | dev_dbg(ipu_data.dev, "IPU initialized\n"); | 1806 | dev_dbg(ipu_data.dev, "IPU initialized\n"); |
1805 | 1807 | ||
1806 | return 0; | 1808 | return 0; |
1807 | 1809 | ||
1808 | err_idmac_init: | 1810 | err_idmac_init: |
1809 | err_attach_irq: | 1811 | err_attach_irq: |
1810 | ipu_irq_detach_irq(&ipu_data, pdev); | 1812 | ipu_irq_detach_irq(&ipu_data, pdev); |
1811 | clk_disable(ipu_data.ipu_clk); | 1813 | clk_disable(ipu_data.ipu_clk); |
1812 | clk_put(ipu_data.ipu_clk); | 1814 | clk_put(ipu_data.ipu_clk); |
1813 | err_clk_get: | 1815 | err_clk_get: |
1814 | iounmap(ipu_data.reg_ic); | 1816 | iounmap(ipu_data.reg_ic); |
1815 | err_ioremap_ic: | 1817 | err_ioremap_ic: |
1816 | iounmap(ipu_data.reg_ipu); | 1818 | iounmap(ipu_data.reg_ipu); |
1817 | err_ioremap_ipu: | 1819 | err_ioremap_ipu: |
1818 | err_noirq: | 1820 | err_noirq: |
1819 | dev_err(&pdev->dev, "Failed to probe IPU: %d\n", ret); | 1821 | dev_err(&pdev->dev, "Failed to probe IPU: %d\n", ret); |
1820 | return ret; | 1822 | return ret; |
1821 | } | 1823 | } |
1822 | 1824 | ||
1823 | static int __exit ipu_remove(struct platform_device *pdev) | 1825 | static int __exit ipu_remove(struct platform_device *pdev) |
1824 | { | 1826 | { |
1825 | struct ipu *ipu = platform_get_drvdata(pdev); | 1827 | struct ipu *ipu = platform_get_drvdata(pdev); |
1826 | 1828 | ||
1827 | ipu_idmac_exit(ipu); | 1829 | ipu_idmac_exit(ipu); |
1828 | ipu_irq_detach_irq(ipu, pdev); | 1830 | ipu_irq_detach_irq(ipu, pdev); |
1829 | clk_disable(ipu->ipu_clk); | 1831 | clk_disable(ipu->ipu_clk); |
1830 | clk_put(ipu->ipu_clk); | 1832 | clk_put(ipu->ipu_clk); |
1831 | iounmap(ipu->reg_ic); | 1833 | iounmap(ipu->reg_ic); |
1832 | iounmap(ipu->reg_ipu); | 1834 | iounmap(ipu->reg_ipu); |
1833 | tasklet_kill(&ipu->tasklet); | 1835 | tasklet_kill(&ipu->tasklet); |
1834 | platform_set_drvdata(pdev, NULL); | 1836 | platform_set_drvdata(pdev, NULL); |
1835 | 1837 | ||
1836 | return 0; | 1838 | return 0; |
1837 | } | 1839 | } |
1838 | 1840 | ||
1839 | /* | 1841 | /* |
1840 | * We need two MEM resources - with IPU-common and Image Converter registers, | 1842 | * We need two MEM resources - with IPU-common and Image Converter registers, |
1841 | * including PF_CONF and IDMAC_* registers, and two IRQs - function and error | 1843 | * including PF_CONF and IDMAC_* registers, and two IRQs - function and error |
1842 | */ | 1844 | */ |
1843 | static struct platform_driver ipu_platform_driver = { | 1845 | static struct platform_driver ipu_platform_driver = { |
1844 | .driver = { | 1846 | .driver = { |
1845 | .name = "ipu-core", | 1847 | .name = "ipu-core", |
1846 | .owner = THIS_MODULE, | 1848 | .owner = THIS_MODULE, |
1847 | }, | 1849 | }, |
1848 | .remove = __exit_p(ipu_remove), | 1850 | .remove = __exit_p(ipu_remove), |
1849 | }; | 1851 | }; |
1850 | 1852 | ||
1851 | static int __init ipu_init(void) | 1853 | static int __init ipu_init(void) |
1852 | { | 1854 | { |
1853 | return platform_driver_probe(&ipu_platform_driver, ipu_probe); | 1855 | return platform_driver_probe(&ipu_platform_driver, ipu_probe); |
1854 | } | 1856 | } |
1855 | subsys_initcall(ipu_init); | 1857 | subsys_initcall(ipu_init); |
1856 | 1858 | ||
1857 | MODULE_DESCRIPTION("IPU core driver"); | 1859 | MODULE_DESCRIPTION("IPU core driver"); |
1858 | MODULE_LICENSE("GPL v2"); | 1860 | MODULE_LICENSE("GPL v2"); |
1859 | MODULE_AUTHOR("Guennadi Liakhovetski <lg@denx.de>"); | 1861 | MODULE_AUTHOR("Guennadi Liakhovetski <lg@denx.de>"); |
1860 | MODULE_ALIAS("platform:ipu-core"); | 1862 | MODULE_ALIAS("platform:ipu-core"); |
1861 | 1863 |
drivers/dma/shdma.c
1 | /* | 1 | /* |
2 | * Renesas SuperH DMA Engine support | 2 | * Renesas SuperH DMA Engine support |
3 | * | 3 | * |
4 | * base is drivers/dma/flsdma.c | 4 | * base is drivers/dma/flsdma.c |
5 | * | 5 | * |
6 | * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> | 6 | * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> |
7 | * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. | 7 | * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. |
8 | * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. | 8 | * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. |
9 | * | 9 | * |
10 | * This is free software; you can redistribute it and/or modify | 10 | * This is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License as published by | 11 | * it under the terms of the GNU General Public License as published by |
12 | * the Free Software Foundation; either version 2 of the License, or | 12 | * the Free Software Foundation; either version 2 of the License, or |
13 | * (at your option) any later version. | 13 | * (at your option) any later version. |
14 | * | 14 | * |
15 | * - DMA of SuperH does not have Hardware DMA chain mode. | 15 | * - DMA of SuperH does not have Hardware DMA chain mode. |
16 | * - MAX DMA size is 16MB. | 16 | * - MAX DMA size is 16MB. |
17 | * | 17 | * |
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include <linux/init.h> | 20 | #include <linux/init.h> |
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/interrupt.h> | 22 | #include <linux/interrupt.h> |
23 | #include <linux/dmaengine.h> | 23 | #include <linux/dmaengine.h> |
24 | #include <linux/delay.h> | 24 | #include <linux/delay.h> |
25 | #include <linux/dma-mapping.h> | 25 | #include <linux/dma-mapping.h> |
26 | #include <linux/platform_device.h> | 26 | #include <linux/platform_device.h> |
27 | #include <linux/pm_runtime.h> | 27 | #include <linux/pm_runtime.h> |
28 | 28 | ||
29 | #include <asm/dmaengine.h> | 29 | #include <asm/dmaengine.h> |
30 | 30 | ||
31 | #include "shdma.h" | 31 | #include "shdma.h" |
32 | 32 | ||
33 | /* DMA descriptor control */ | 33 | /* DMA descriptor control */ |
34 | enum sh_dmae_desc_status { | 34 | enum sh_dmae_desc_status { |
35 | DESC_IDLE, | 35 | DESC_IDLE, |
36 | DESC_PREPARED, | 36 | DESC_PREPARED, |
37 | DESC_SUBMITTED, | 37 | DESC_SUBMITTED, |
38 | DESC_COMPLETED, /* completed, have to call callback */ | 38 | DESC_COMPLETED, /* completed, have to call callback */ |
39 | DESC_WAITING, /* callback called, waiting for ack / re-submit */ | 39 | DESC_WAITING, /* callback called, waiting for ack / re-submit */ |
40 | }; | 40 | }; |
41 | 41 | ||
42 | #define NR_DESCS_PER_CHANNEL 32 | 42 | #define NR_DESCS_PER_CHANNEL 32 |
43 | /* Default MEMCPY transfer size = 2^2 = 4 bytes */ | 43 | /* Default MEMCPY transfer size = 2^2 = 4 bytes */ |
44 | #define LOG2_DEFAULT_XFER_SIZE 2 | 44 | #define LOG2_DEFAULT_XFER_SIZE 2 |
45 | 45 | ||
46 | /* A bitmask with bits enough for enum sh_dmae_slave_chan_id */ | 46 | /* A bitmask with bits enough for enum sh_dmae_slave_chan_id */ |
47 | static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SHDMA_SLAVE_NUMBER)]; | 47 | static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SHDMA_SLAVE_NUMBER)]; |
48 | 48 | ||
49 | static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all); | 49 | static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all); |
50 | 50 | ||
51 | static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) | 51 | static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) |
52 | { | 52 | { |
53 | __raw_writel(data, sh_dc->base + reg / sizeof(u32)); | 53 | __raw_writel(data, sh_dc->base + reg / sizeof(u32)); |
54 | } | 54 | } |
55 | 55 | ||
56 | static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) | 56 | static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) |
57 | { | 57 | { |
58 | return __raw_readl(sh_dc->base + reg / sizeof(u32)); | 58 | return __raw_readl(sh_dc->base + reg / sizeof(u32)); |
59 | } | 59 | } |
60 | 60 | ||
61 | static u16 dmaor_read(struct sh_dmae_device *shdev) | 61 | static u16 dmaor_read(struct sh_dmae_device *shdev) |
62 | { | 62 | { |
63 | return __raw_readw(shdev->chan_reg + DMAOR / sizeof(u32)); | 63 | return __raw_readw(shdev->chan_reg + DMAOR / sizeof(u32)); |
64 | } | 64 | } |
65 | 65 | ||
66 | static void dmaor_write(struct sh_dmae_device *shdev, u16 data) | 66 | static void dmaor_write(struct sh_dmae_device *shdev, u16 data) |
67 | { | 67 | { |
68 | __raw_writew(data, shdev->chan_reg + DMAOR / sizeof(u32)); | 68 | __raw_writew(data, shdev->chan_reg + DMAOR / sizeof(u32)); |
69 | } | 69 | } |
70 | 70 | ||
71 | /* | 71 | /* |
72 | * Reset DMA controller | 72 | * Reset DMA controller |
73 | * | 73 | * |
74 | * SH7780 has two DMAOR register | 74 | * SH7780 has two DMAOR register |
75 | */ | 75 | */ |
76 | static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev) | 76 | static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev) |
77 | { | 77 | { |
78 | unsigned short dmaor = dmaor_read(shdev); | 78 | unsigned short dmaor = dmaor_read(shdev); |
79 | 79 | ||
80 | dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME)); | 80 | dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME)); |
81 | } | 81 | } |
82 | 82 | ||
83 | static int sh_dmae_rst(struct sh_dmae_device *shdev) | 83 | static int sh_dmae_rst(struct sh_dmae_device *shdev) |
84 | { | 84 | { |
85 | unsigned short dmaor; | 85 | unsigned short dmaor; |
86 | 86 | ||
87 | sh_dmae_ctl_stop(shdev); | 87 | sh_dmae_ctl_stop(shdev); |
88 | dmaor = dmaor_read(shdev) | shdev->pdata->dmaor_init; | 88 | dmaor = dmaor_read(shdev) | shdev->pdata->dmaor_init; |
89 | 89 | ||
90 | dmaor_write(shdev, dmaor); | 90 | dmaor_write(shdev, dmaor); |
91 | if (dmaor_read(shdev) & (DMAOR_AE | DMAOR_NMIF)) { | 91 | if (dmaor_read(shdev) & (DMAOR_AE | DMAOR_NMIF)) { |
92 | pr_warning("dma-sh: Can't initialize DMAOR.\n"); | 92 | pr_warning("dma-sh: Can't initialize DMAOR.\n"); |
93 | return -EINVAL; | 93 | return -EINVAL; |
94 | } | 94 | } |
95 | return 0; | 95 | return 0; |
96 | } | 96 | } |
97 | 97 | ||
98 | static bool dmae_is_busy(struct sh_dmae_chan *sh_chan) | 98 | static bool dmae_is_busy(struct sh_dmae_chan *sh_chan) |
99 | { | 99 | { |
100 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); | 100 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); |
101 | 101 | ||
102 | if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE) | 102 | if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE) |
103 | return true; /* working */ | 103 | return true; /* working */ |
104 | 104 | ||
105 | return false; /* waiting */ | 105 | return false; /* waiting */ |
106 | } | 106 | } |
107 | 107 | ||
108 | static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr) | 108 | static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr) |
109 | { | 109 | { |
110 | struct sh_dmae_device *shdev = container_of(sh_chan->common.device, | 110 | struct sh_dmae_device *shdev = container_of(sh_chan->common.device, |
111 | struct sh_dmae_device, common); | 111 | struct sh_dmae_device, common); |
112 | struct sh_dmae_pdata *pdata = shdev->pdata; | 112 | struct sh_dmae_pdata *pdata = shdev->pdata; |
113 | int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) | | 113 | int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) | |
114 | ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift); | 114 | ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift); |
115 | 115 | ||
116 | if (cnt >= pdata->ts_shift_num) | 116 | if (cnt >= pdata->ts_shift_num) |
117 | cnt = 0; | 117 | cnt = 0; |
118 | 118 | ||
119 | return pdata->ts_shift[cnt]; | 119 | return pdata->ts_shift[cnt]; |
120 | } | 120 | } |
121 | 121 | ||
122 | static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size) | 122 | static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size) |
123 | { | 123 | { |
124 | struct sh_dmae_device *shdev = container_of(sh_chan->common.device, | 124 | struct sh_dmae_device *shdev = container_of(sh_chan->common.device, |
125 | struct sh_dmae_device, common); | 125 | struct sh_dmae_device, common); |
126 | struct sh_dmae_pdata *pdata = shdev->pdata; | 126 | struct sh_dmae_pdata *pdata = shdev->pdata; |
127 | int i; | 127 | int i; |
128 | 128 | ||
129 | for (i = 0; i < pdata->ts_shift_num; i++) | 129 | for (i = 0; i < pdata->ts_shift_num; i++) |
130 | if (pdata->ts_shift[i] == l2size) | 130 | if (pdata->ts_shift[i] == l2size) |
131 | break; | 131 | break; |
132 | 132 | ||
133 | if (i == pdata->ts_shift_num) | 133 | if (i == pdata->ts_shift_num) |
134 | i = 0; | 134 | i = 0; |
135 | 135 | ||
136 | return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) | | 136 | return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) | |
137 | ((i << pdata->ts_high_shift) & pdata->ts_high_mask); | 137 | ((i << pdata->ts_high_shift) & pdata->ts_high_mask); |
138 | } | 138 | } |
139 | 139 | ||
140 | static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) | 140 | static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) |
141 | { | 141 | { |
142 | sh_dmae_writel(sh_chan, hw->sar, SAR); | 142 | sh_dmae_writel(sh_chan, hw->sar, SAR); |
143 | sh_dmae_writel(sh_chan, hw->dar, DAR); | 143 | sh_dmae_writel(sh_chan, hw->dar, DAR); |
144 | sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR); | 144 | sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR); |
145 | } | 145 | } |
146 | 146 | ||
147 | static void dmae_start(struct sh_dmae_chan *sh_chan) | 147 | static void dmae_start(struct sh_dmae_chan *sh_chan) |
148 | { | 148 | { |
149 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); | 149 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); |
150 | 150 | ||
151 | chcr |= CHCR_DE | CHCR_IE; | 151 | chcr |= CHCR_DE | CHCR_IE; |
152 | sh_dmae_writel(sh_chan, chcr & ~CHCR_TE, CHCR); | 152 | sh_dmae_writel(sh_chan, chcr & ~CHCR_TE, CHCR); |
153 | } | 153 | } |
154 | 154 | ||
155 | static void dmae_halt(struct sh_dmae_chan *sh_chan) | 155 | static void dmae_halt(struct sh_dmae_chan *sh_chan) |
156 | { | 156 | { |
157 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); | 157 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); |
158 | 158 | ||
159 | chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE); | 159 | chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE); |
160 | sh_dmae_writel(sh_chan, chcr, CHCR); | 160 | sh_dmae_writel(sh_chan, chcr, CHCR); |
161 | } | 161 | } |
162 | 162 | ||
163 | static void dmae_init(struct sh_dmae_chan *sh_chan) | 163 | static void dmae_init(struct sh_dmae_chan *sh_chan) |
164 | { | 164 | { |
165 | /* | 165 | /* |
166 | * Default configuration for dual address memory-memory transfer. | 166 | * Default configuration for dual address memory-memory transfer. |
167 | * 0x400 represents auto-request. | 167 | * 0x400 represents auto-request. |
168 | */ | 168 | */ |
169 | u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan, | 169 | u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan, |
170 | LOG2_DEFAULT_XFER_SIZE); | 170 | LOG2_DEFAULT_XFER_SIZE); |
171 | sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr); | 171 | sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr); |
172 | sh_dmae_writel(sh_chan, chcr, CHCR); | 172 | sh_dmae_writel(sh_chan, chcr, CHCR); |
173 | } | 173 | } |
174 | 174 | ||
175 | static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) | 175 | static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) |
176 | { | 176 | { |
177 | /* When DMA was working, can not set data to CHCR */ | 177 | /* When DMA was working, can not set data to CHCR */ |
178 | if (dmae_is_busy(sh_chan)) | 178 | if (dmae_is_busy(sh_chan)) |
179 | return -EBUSY; | 179 | return -EBUSY; |
180 | 180 | ||
181 | sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val); | 181 | sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val); |
182 | sh_dmae_writel(sh_chan, val, CHCR); | 182 | sh_dmae_writel(sh_chan, val, CHCR); |
183 | 183 | ||
184 | return 0; | 184 | return 0; |
185 | } | 185 | } |
186 | 186 | ||
187 | static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) | 187 | static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) |
188 | { | 188 | { |
189 | struct sh_dmae_device *shdev = container_of(sh_chan->common.device, | 189 | struct sh_dmae_device *shdev = container_of(sh_chan->common.device, |
190 | struct sh_dmae_device, common); | 190 | struct sh_dmae_device, common); |
191 | struct sh_dmae_pdata *pdata = shdev->pdata; | 191 | struct sh_dmae_pdata *pdata = shdev->pdata; |
192 | struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id]; | 192 | struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id]; |
193 | u16 __iomem *addr = shdev->dmars + chan_pdata->dmars / sizeof(u16); | 193 | u16 __iomem *addr = shdev->dmars + chan_pdata->dmars / sizeof(u16); |
194 | int shift = chan_pdata->dmars_bit; | 194 | int shift = chan_pdata->dmars_bit; |
195 | 195 | ||
196 | if (dmae_is_busy(sh_chan)) | 196 | if (dmae_is_busy(sh_chan)) |
197 | return -EBUSY; | 197 | return -EBUSY; |
198 | 198 | ||
199 | __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift), | 199 | __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift), |
200 | addr); | 200 | addr); |
201 | 201 | ||
202 | return 0; | 202 | return 0; |
203 | } | 203 | } |
204 | 204 | ||
205 | static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx) | 205 | static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx) |
206 | { | 206 | { |
207 | struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c; | 207 | struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c; |
208 | struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan); | 208 | struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan); |
209 | dma_async_tx_callback callback = tx->callback; | 209 | dma_async_tx_callback callback = tx->callback; |
210 | dma_cookie_t cookie; | 210 | dma_cookie_t cookie; |
211 | 211 | ||
212 | spin_lock_bh(&sh_chan->desc_lock); | 212 | spin_lock_bh(&sh_chan->desc_lock); |
213 | 213 | ||
214 | cookie = sh_chan->common.cookie; | 214 | cookie = sh_chan->common.cookie; |
215 | cookie++; | 215 | cookie++; |
216 | if (cookie < 0) | 216 | if (cookie < 0) |
217 | cookie = 1; | 217 | cookie = 1; |
218 | 218 | ||
219 | sh_chan->common.cookie = cookie; | 219 | sh_chan->common.cookie = cookie; |
220 | tx->cookie = cookie; | 220 | tx->cookie = cookie; |
221 | 221 | ||
222 | /* Mark all chunks of this descriptor as submitted, move to the queue */ | 222 | /* Mark all chunks of this descriptor as submitted, move to the queue */ |
223 | list_for_each_entry_safe(chunk, c, desc->node.prev, node) { | 223 | list_for_each_entry_safe(chunk, c, desc->node.prev, node) { |
224 | /* | 224 | /* |
225 | * All chunks are on the global ld_free, so, we have to find | 225 | * All chunks are on the global ld_free, so, we have to find |
226 | * the end of the chain ourselves | 226 | * the end of the chain ourselves |
227 | */ | 227 | */ |
228 | if (chunk != desc && (chunk->mark == DESC_IDLE || | 228 | if (chunk != desc && (chunk->mark == DESC_IDLE || |
229 | chunk->async_tx.cookie > 0 || | 229 | chunk->async_tx.cookie > 0 || |
230 | chunk->async_tx.cookie == -EBUSY || | 230 | chunk->async_tx.cookie == -EBUSY || |
231 | &chunk->node == &sh_chan->ld_free)) | 231 | &chunk->node == &sh_chan->ld_free)) |
232 | break; | 232 | break; |
233 | chunk->mark = DESC_SUBMITTED; | 233 | chunk->mark = DESC_SUBMITTED; |
234 | /* Callback goes to the last chunk */ | 234 | /* Callback goes to the last chunk */ |
235 | chunk->async_tx.callback = NULL; | 235 | chunk->async_tx.callback = NULL; |
236 | chunk->cookie = cookie; | 236 | chunk->cookie = cookie; |
237 | list_move_tail(&chunk->node, &sh_chan->ld_queue); | 237 | list_move_tail(&chunk->node, &sh_chan->ld_queue); |
238 | last = chunk; | 238 | last = chunk; |
239 | } | 239 | } |
240 | 240 | ||
241 | last->async_tx.callback = callback; | 241 | last->async_tx.callback = callback; |
242 | last->async_tx.callback_param = tx->callback_param; | 242 | last->async_tx.callback_param = tx->callback_param; |
243 | 243 | ||
244 | dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x\n", | 244 | dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x\n", |
245 | tx->cookie, &last->async_tx, sh_chan->id, | 245 | tx->cookie, &last->async_tx, sh_chan->id, |
246 | desc->hw.sar, desc->hw.tcr, desc->hw.dar); | 246 | desc->hw.sar, desc->hw.tcr, desc->hw.dar); |
247 | 247 | ||
248 | spin_unlock_bh(&sh_chan->desc_lock); | 248 | spin_unlock_bh(&sh_chan->desc_lock); |
249 | 249 | ||
250 | return cookie; | 250 | return cookie; |
251 | } | 251 | } |
252 | 252 | ||
253 | /* Called with desc_lock held */ | 253 | /* Called with desc_lock held */ |
254 | static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan) | 254 | static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan) |
255 | { | 255 | { |
256 | struct sh_desc *desc; | 256 | struct sh_desc *desc; |
257 | 257 | ||
258 | list_for_each_entry(desc, &sh_chan->ld_free, node) | 258 | list_for_each_entry(desc, &sh_chan->ld_free, node) |
259 | if (desc->mark != DESC_PREPARED) { | 259 | if (desc->mark != DESC_PREPARED) { |
260 | BUG_ON(desc->mark != DESC_IDLE); | 260 | BUG_ON(desc->mark != DESC_IDLE); |
261 | list_del(&desc->node); | 261 | list_del(&desc->node); |
262 | return desc; | 262 | return desc; |
263 | } | 263 | } |
264 | 264 | ||
265 | return NULL; | 265 | return NULL; |
266 | } | 266 | } |
267 | 267 | ||
268 | static struct sh_dmae_slave_config *sh_dmae_find_slave( | 268 | static struct sh_dmae_slave_config *sh_dmae_find_slave( |
269 | struct sh_dmae_chan *sh_chan, enum sh_dmae_slave_chan_id slave_id) | 269 | struct sh_dmae_chan *sh_chan, enum sh_dmae_slave_chan_id slave_id) |
270 | { | 270 | { |
271 | struct dma_device *dma_dev = sh_chan->common.device; | 271 | struct dma_device *dma_dev = sh_chan->common.device; |
272 | struct sh_dmae_device *shdev = container_of(dma_dev, | 272 | struct sh_dmae_device *shdev = container_of(dma_dev, |
273 | struct sh_dmae_device, common); | 273 | struct sh_dmae_device, common); |
274 | struct sh_dmae_pdata *pdata = shdev->pdata; | 274 | struct sh_dmae_pdata *pdata = shdev->pdata; |
275 | int i; | 275 | int i; |
276 | 276 | ||
277 | if ((unsigned)slave_id >= SHDMA_SLAVE_NUMBER) | 277 | if ((unsigned)slave_id >= SHDMA_SLAVE_NUMBER) |
278 | return NULL; | 278 | return NULL; |
279 | 279 | ||
280 | for (i = 0; i < pdata->slave_num; i++) | 280 | for (i = 0; i < pdata->slave_num; i++) |
281 | if (pdata->slave[i].slave_id == slave_id) | 281 | if (pdata->slave[i].slave_id == slave_id) |
282 | return pdata->slave + i; | 282 | return pdata->slave + i; |
283 | 283 | ||
284 | return NULL; | 284 | return NULL; |
285 | } | 285 | } |
286 | 286 | ||
287 | static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) | 287 | static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) |
288 | { | 288 | { |
289 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); | 289 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); |
290 | struct sh_desc *desc; | 290 | struct sh_desc *desc; |
291 | struct sh_dmae_slave *param = chan->private; | 291 | struct sh_dmae_slave *param = chan->private; |
292 | 292 | ||
293 | pm_runtime_get_sync(sh_chan->dev); | 293 | pm_runtime_get_sync(sh_chan->dev); |
294 | 294 | ||
295 | /* | 295 | /* |
296 | * This relies on the guarantee from dmaengine that alloc_chan_resources | 296 | * This relies on the guarantee from dmaengine that alloc_chan_resources |
297 | * never runs concurrently with itself or free_chan_resources. | 297 | * never runs concurrently with itself or free_chan_resources. |
298 | */ | 298 | */ |
299 | if (param) { | 299 | if (param) { |
300 | struct sh_dmae_slave_config *cfg; | 300 | struct sh_dmae_slave_config *cfg; |
301 | 301 | ||
302 | cfg = sh_dmae_find_slave(sh_chan, param->slave_id); | 302 | cfg = sh_dmae_find_slave(sh_chan, param->slave_id); |
303 | if (!cfg) | 303 | if (!cfg) |
304 | return -EINVAL; | 304 | return -EINVAL; |
305 | 305 | ||
306 | if (test_and_set_bit(param->slave_id, sh_dmae_slave_used)) | 306 | if (test_and_set_bit(param->slave_id, sh_dmae_slave_used)) |
307 | return -EBUSY; | 307 | return -EBUSY; |
308 | 308 | ||
309 | param->config = cfg; | 309 | param->config = cfg; |
310 | 310 | ||
311 | dmae_set_dmars(sh_chan, cfg->mid_rid); | 311 | dmae_set_dmars(sh_chan, cfg->mid_rid); |
312 | dmae_set_chcr(sh_chan, cfg->chcr); | 312 | dmae_set_chcr(sh_chan, cfg->chcr); |
313 | } else if ((sh_dmae_readl(sh_chan, CHCR) & 0xf00) != 0x400) { | 313 | } else if ((sh_dmae_readl(sh_chan, CHCR) & 0xf00) != 0x400) { |
314 | dmae_init(sh_chan); | 314 | dmae_init(sh_chan); |
315 | } | 315 | } |
316 | 316 | ||
317 | spin_lock_bh(&sh_chan->desc_lock); | 317 | spin_lock_bh(&sh_chan->desc_lock); |
318 | while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) { | 318 | while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) { |
319 | spin_unlock_bh(&sh_chan->desc_lock); | 319 | spin_unlock_bh(&sh_chan->desc_lock); |
320 | desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL); | 320 | desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL); |
321 | if (!desc) { | 321 | if (!desc) { |
322 | spin_lock_bh(&sh_chan->desc_lock); | 322 | spin_lock_bh(&sh_chan->desc_lock); |
323 | break; | 323 | break; |
324 | } | 324 | } |
325 | dma_async_tx_descriptor_init(&desc->async_tx, | 325 | dma_async_tx_descriptor_init(&desc->async_tx, |
326 | &sh_chan->common); | 326 | &sh_chan->common); |
327 | desc->async_tx.tx_submit = sh_dmae_tx_submit; | 327 | desc->async_tx.tx_submit = sh_dmae_tx_submit; |
328 | desc->mark = DESC_IDLE; | 328 | desc->mark = DESC_IDLE; |
329 | 329 | ||
330 | spin_lock_bh(&sh_chan->desc_lock); | 330 | spin_lock_bh(&sh_chan->desc_lock); |
331 | list_add(&desc->node, &sh_chan->ld_free); | 331 | list_add(&desc->node, &sh_chan->ld_free); |
332 | sh_chan->descs_allocated++; | 332 | sh_chan->descs_allocated++; |
333 | } | 333 | } |
334 | spin_unlock_bh(&sh_chan->desc_lock); | 334 | spin_unlock_bh(&sh_chan->desc_lock); |
335 | 335 | ||
336 | if (!sh_chan->descs_allocated) | 336 | if (!sh_chan->descs_allocated) |
337 | pm_runtime_put(sh_chan->dev); | 337 | pm_runtime_put(sh_chan->dev); |
338 | 338 | ||
339 | return sh_chan->descs_allocated; | 339 | return sh_chan->descs_allocated; |
340 | } | 340 | } |
341 | 341 | ||
342 | /* | 342 | /* |
343 | * sh_dma_free_chan_resources - Free all resources of the channel. | 343 | * sh_dma_free_chan_resources - Free all resources of the channel. |
344 | */ | 344 | */ |
345 | static void sh_dmae_free_chan_resources(struct dma_chan *chan) | 345 | static void sh_dmae_free_chan_resources(struct dma_chan *chan) |
346 | { | 346 | { |
347 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); | 347 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); |
348 | struct sh_desc *desc, *_desc; | 348 | struct sh_desc *desc, *_desc; |
349 | LIST_HEAD(list); | 349 | LIST_HEAD(list); |
350 | int descs = sh_chan->descs_allocated; | 350 | int descs = sh_chan->descs_allocated; |
351 | 351 | ||
352 | dmae_halt(sh_chan); | 352 | dmae_halt(sh_chan); |
353 | 353 | ||
354 | /* Prepared and not submitted descriptors can still be on the queue */ | 354 | /* Prepared and not submitted descriptors can still be on the queue */ |
355 | if (!list_empty(&sh_chan->ld_queue)) | 355 | if (!list_empty(&sh_chan->ld_queue)) |
356 | sh_dmae_chan_ld_cleanup(sh_chan, true); | 356 | sh_dmae_chan_ld_cleanup(sh_chan, true); |
357 | 357 | ||
358 | if (chan->private) { | 358 | if (chan->private) { |
359 | /* The caller is holding dma_list_mutex */ | 359 | /* The caller is holding dma_list_mutex */ |
360 | struct sh_dmae_slave *param = chan->private; | 360 | struct sh_dmae_slave *param = chan->private; |
361 | clear_bit(param->slave_id, sh_dmae_slave_used); | 361 | clear_bit(param->slave_id, sh_dmae_slave_used); |
362 | } | 362 | } |
363 | 363 | ||
364 | spin_lock_bh(&sh_chan->desc_lock); | 364 | spin_lock_bh(&sh_chan->desc_lock); |
365 | 365 | ||
366 | list_splice_init(&sh_chan->ld_free, &list); | 366 | list_splice_init(&sh_chan->ld_free, &list); |
367 | sh_chan->descs_allocated = 0; | 367 | sh_chan->descs_allocated = 0; |
368 | 368 | ||
369 | spin_unlock_bh(&sh_chan->desc_lock); | 369 | spin_unlock_bh(&sh_chan->desc_lock); |
370 | 370 | ||
371 | if (descs > 0) | 371 | if (descs > 0) |
372 | pm_runtime_put(sh_chan->dev); | 372 | pm_runtime_put(sh_chan->dev); |
373 | 373 | ||
374 | list_for_each_entry_safe(desc, _desc, &list, node) | 374 | list_for_each_entry_safe(desc, _desc, &list, node) |
375 | kfree(desc); | 375 | kfree(desc); |
376 | } | 376 | } |
377 | 377 | ||
378 | /** | 378 | /** |
379 | * sh_dmae_add_desc - get, set up and return one transfer descriptor | 379 | * sh_dmae_add_desc - get, set up and return one transfer descriptor |
380 | * @sh_chan: DMA channel | 380 | * @sh_chan: DMA channel |
381 | * @flags: DMA transfer flags | 381 | * @flags: DMA transfer flags |
382 | * @dest: destination DMA address, incremented when direction equals | 382 | * @dest: destination DMA address, incremented when direction equals |
383 | * DMA_FROM_DEVICE or DMA_BIDIRECTIONAL | 383 | * DMA_FROM_DEVICE or DMA_BIDIRECTIONAL |
384 | * @src: source DMA address, incremented when direction equals | 384 | * @src: source DMA address, incremented when direction equals |
385 | * DMA_TO_DEVICE or DMA_BIDIRECTIONAL | 385 | * DMA_TO_DEVICE or DMA_BIDIRECTIONAL |
386 | * @len: DMA transfer length | 386 | * @len: DMA transfer length |
387 | * @first: if NULL, set to the current descriptor and cookie set to -EBUSY | 387 | * @first: if NULL, set to the current descriptor and cookie set to -EBUSY |
388 | * @direction: needed for slave DMA to decide which address to keep constant, | 388 | * @direction: needed for slave DMA to decide which address to keep constant, |
389 | * equals DMA_BIDIRECTIONAL for MEMCPY | 389 | * equals DMA_BIDIRECTIONAL for MEMCPY |
390 | * Returns 0 or an error | 390 | * Returns 0 or an error |
391 | * Locks: called with desc_lock held | 391 | * Locks: called with desc_lock held |
392 | */ | 392 | */ |
393 | static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan, | 393 | static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan, |
394 | unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len, | 394 | unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len, |
395 | struct sh_desc **first, enum dma_data_direction direction) | 395 | struct sh_desc **first, enum dma_data_direction direction) |
396 | { | 396 | { |
397 | struct sh_desc *new; | 397 | struct sh_desc *new; |
398 | size_t copy_size; | 398 | size_t copy_size; |
399 | 399 | ||
400 | if (!*len) | 400 | if (!*len) |
401 | return NULL; | 401 | return NULL; |
402 | 402 | ||
403 | /* Allocate the link descriptor from the free list */ | 403 | /* Allocate the link descriptor from the free list */ |
404 | new = sh_dmae_get_desc(sh_chan); | 404 | new = sh_dmae_get_desc(sh_chan); |
405 | if (!new) { | 405 | if (!new) { |
406 | dev_err(sh_chan->dev, "No free link descriptor available\n"); | 406 | dev_err(sh_chan->dev, "No free link descriptor available\n"); |
407 | return NULL; | 407 | return NULL; |
408 | } | 408 | } |
409 | 409 | ||
410 | copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1); | 410 | copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1); |
411 | 411 | ||
412 | new->hw.sar = *src; | 412 | new->hw.sar = *src; |
413 | new->hw.dar = *dest; | 413 | new->hw.dar = *dest; |
414 | new->hw.tcr = copy_size; | 414 | new->hw.tcr = copy_size; |
415 | 415 | ||
416 | if (!*first) { | 416 | if (!*first) { |
417 | /* First desc */ | 417 | /* First desc */ |
418 | new->async_tx.cookie = -EBUSY; | 418 | new->async_tx.cookie = -EBUSY; |
419 | *first = new; | 419 | *first = new; |
420 | } else { | 420 | } else { |
421 | /* Other desc - invisible to the user */ | 421 | /* Other desc - invisible to the user */ |
422 | new->async_tx.cookie = -EINVAL; | 422 | new->async_tx.cookie = -EINVAL; |
423 | } | 423 | } |
424 | 424 | ||
425 | dev_dbg(sh_chan->dev, | 425 | dev_dbg(sh_chan->dev, |
426 | "chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n", | 426 | "chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n", |
427 | copy_size, *len, *src, *dest, &new->async_tx, | 427 | copy_size, *len, *src, *dest, &new->async_tx, |
428 | new->async_tx.cookie, sh_chan->xmit_shift); | 428 | new->async_tx.cookie, sh_chan->xmit_shift); |
429 | 429 | ||
430 | new->mark = DESC_PREPARED; | 430 | new->mark = DESC_PREPARED; |
431 | new->async_tx.flags = flags; | 431 | new->async_tx.flags = flags; |
432 | new->direction = direction; | 432 | new->direction = direction; |
433 | 433 | ||
434 | *len -= copy_size; | 434 | *len -= copy_size; |
435 | if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE) | 435 | if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE) |
436 | *src += copy_size; | 436 | *src += copy_size; |
437 | if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE) | 437 | if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE) |
438 | *dest += copy_size; | 438 | *dest += copy_size; |
439 | 439 | ||
440 | return new; | 440 | return new; |
441 | } | 441 | } |
442 | 442 | ||
443 | /* | 443 | /* |
444 | * sh_dmae_prep_sg - prepare transfer descriptors from an SG list | 444 | * sh_dmae_prep_sg - prepare transfer descriptors from an SG list |
445 | * | 445 | * |
446 | * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also | 446 | * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also |
447 | * converted to scatter-gather to guarantee consistent locking and a correct | 447 | * converted to scatter-gather to guarantee consistent locking and a correct |
448 | * list manipulation. For slave DMA direction carries the usual meaning, and, | 448 | * list manipulation. For slave DMA direction carries the usual meaning, and, |
449 | * logically, the SG list is RAM and the addr variable contains slave address, | 449 | * logically, the SG list is RAM and the addr variable contains slave address, |
450 | * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL | 450 | * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL |
451 | * and the SG list contains only one element and points at the source buffer. | 451 | * and the SG list contains only one element and points at the source buffer. |
452 | */ | 452 | */ |
453 | static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan, | 453 | static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan, |
454 | struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr, | 454 | struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr, |
455 | enum dma_data_direction direction, unsigned long flags) | 455 | enum dma_data_direction direction, unsigned long flags) |
456 | { | 456 | { |
457 | struct scatterlist *sg; | 457 | struct scatterlist *sg; |
458 | struct sh_desc *first = NULL, *new = NULL /* compiler... */; | 458 | struct sh_desc *first = NULL, *new = NULL /* compiler... */; |
459 | LIST_HEAD(tx_list); | 459 | LIST_HEAD(tx_list); |
460 | int chunks = 0; | 460 | int chunks = 0; |
461 | int i; | 461 | int i; |
462 | 462 | ||
463 | if (!sg_len) | 463 | if (!sg_len) |
464 | return NULL; | 464 | return NULL; |
465 | 465 | ||
466 | for_each_sg(sgl, sg, sg_len, i) | 466 | for_each_sg(sgl, sg, sg_len, i) |
467 | chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) / | 467 | chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) / |
468 | (SH_DMA_TCR_MAX + 1); | 468 | (SH_DMA_TCR_MAX + 1); |
469 | 469 | ||
470 | /* Have to lock the whole loop to protect against concurrent release */ | 470 | /* Have to lock the whole loop to protect against concurrent release */ |
471 | spin_lock_bh(&sh_chan->desc_lock); | 471 | spin_lock_bh(&sh_chan->desc_lock); |
472 | 472 | ||
473 | /* | 473 | /* |
474 | * Chaining: | 474 | * Chaining: |
475 | * first descriptor is what user is dealing with in all API calls, its | 475 | * first descriptor is what user is dealing with in all API calls, its |
476 | * cookie is at first set to -EBUSY, at tx-submit to a positive | 476 | * cookie is at first set to -EBUSY, at tx-submit to a positive |
477 | * number | 477 | * number |
478 | * if more than one chunk is needed further chunks have cookie = -EINVAL | 478 | * if more than one chunk is needed further chunks have cookie = -EINVAL |
479 | * the last chunk, if not equal to the first, has cookie = -ENOSPC | 479 | * the last chunk, if not equal to the first, has cookie = -ENOSPC |
480 | * all chunks are linked onto the tx_list head with their .node heads | 480 | * all chunks are linked onto the tx_list head with their .node heads |
481 | * only during this function, then they are immediately spliced | 481 | * only during this function, then they are immediately spliced |
482 | * back onto the free list in form of a chain | 482 | * back onto the free list in form of a chain |
483 | */ | 483 | */ |
484 | for_each_sg(sgl, sg, sg_len, i) { | 484 | for_each_sg(sgl, sg, sg_len, i) { |
485 | dma_addr_t sg_addr = sg_dma_address(sg); | 485 | dma_addr_t sg_addr = sg_dma_address(sg); |
486 | size_t len = sg_dma_len(sg); | 486 | size_t len = sg_dma_len(sg); |
487 | 487 | ||
488 | if (!len) | 488 | if (!len) |
489 | goto err_get_desc; | 489 | goto err_get_desc; |
490 | 490 | ||
491 | do { | 491 | do { |
492 | dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n", | 492 | dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n", |
493 | i, sg, len, (unsigned long long)sg_addr); | 493 | i, sg, len, (unsigned long long)sg_addr); |
494 | 494 | ||
495 | if (direction == DMA_FROM_DEVICE) | 495 | if (direction == DMA_FROM_DEVICE) |
496 | new = sh_dmae_add_desc(sh_chan, flags, | 496 | new = sh_dmae_add_desc(sh_chan, flags, |
497 | &sg_addr, addr, &len, &first, | 497 | &sg_addr, addr, &len, &first, |
498 | direction); | 498 | direction); |
499 | else | 499 | else |
500 | new = sh_dmae_add_desc(sh_chan, flags, | 500 | new = sh_dmae_add_desc(sh_chan, flags, |
501 | addr, &sg_addr, &len, &first, | 501 | addr, &sg_addr, &len, &first, |
502 | direction); | 502 | direction); |
503 | if (!new) | 503 | if (!new) |
504 | goto err_get_desc; | 504 | goto err_get_desc; |
505 | 505 | ||
506 | new->chunks = chunks--; | 506 | new->chunks = chunks--; |
507 | list_add_tail(&new->node, &tx_list); | 507 | list_add_tail(&new->node, &tx_list); |
508 | } while (len); | 508 | } while (len); |
509 | } | 509 | } |
510 | 510 | ||
511 | if (new != first) | 511 | if (new != first) |
512 | new->async_tx.cookie = -ENOSPC; | 512 | new->async_tx.cookie = -ENOSPC; |
513 | 513 | ||
514 | /* Put them back on the free list, so, they don't get lost */ | 514 | /* Put them back on the free list, so, they don't get lost */ |
515 | list_splice_tail(&tx_list, &sh_chan->ld_free); | 515 | list_splice_tail(&tx_list, &sh_chan->ld_free); |
516 | 516 | ||
517 | spin_unlock_bh(&sh_chan->desc_lock); | 517 | spin_unlock_bh(&sh_chan->desc_lock); |
518 | 518 | ||
519 | return &first->async_tx; | 519 | return &first->async_tx; |
520 | 520 | ||
521 | err_get_desc: | 521 | err_get_desc: |
522 | list_for_each_entry(new, &tx_list, node) | 522 | list_for_each_entry(new, &tx_list, node) |
523 | new->mark = DESC_IDLE; | 523 | new->mark = DESC_IDLE; |
524 | list_splice(&tx_list, &sh_chan->ld_free); | 524 | list_splice(&tx_list, &sh_chan->ld_free); |
525 | 525 | ||
526 | spin_unlock_bh(&sh_chan->desc_lock); | 526 | spin_unlock_bh(&sh_chan->desc_lock); |
527 | 527 | ||
528 | return NULL; | 528 | return NULL; |
529 | } | 529 | } |
530 | 530 | ||
531 | static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( | 531 | static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( |
532 | struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, | 532 | struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, |
533 | size_t len, unsigned long flags) | 533 | size_t len, unsigned long flags) |
534 | { | 534 | { |
535 | struct sh_dmae_chan *sh_chan; | 535 | struct sh_dmae_chan *sh_chan; |
536 | struct scatterlist sg; | 536 | struct scatterlist sg; |
537 | 537 | ||
538 | if (!chan || !len) | 538 | if (!chan || !len) |
539 | return NULL; | 539 | return NULL; |
540 | 540 | ||
541 | chan->private = NULL; | 541 | chan->private = NULL; |
542 | 542 | ||
543 | sh_chan = to_sh_chan(chan); | 543 | sh_chan = to_sh_chan(chan); |
544 | 544 | ||
545 | sg_init_table(&sg, 1); | 545 | sg_init_table(&sg, 1); |
546 | sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len, | 546 | sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len, |
547 | offset_in_page(dma_src)); | 547 | offset_in_page(dma_src)); |
548 | sg_dma_address(&sg) = dma_src; | 548 | sg_dma_address(&sg) = dma_src; |
549 | sg_dma_len(&sg) = len; | 549 | sg_dma_len(&sg) = len; |
550 | 550 | ||
551 | return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL, | 551 | return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL, |
552 | flags); | 552 | flags); |
553 | } | 553 | } |
554 | 554 | ||
555 | static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg( | 555 | static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg( |
556 | struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, | 556 | struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, |
557 | enum dma_data_direction direction, unsigned long flags) | 557 | enum dma_data_direction direction, unsigned long flags) |
558 | { | 558 | { |
559 | struct sh_dmae_slave *param; | 559 | struct sh_dmae_slave *param; |
560 | struct sh_dmae_chan *sh_chan; | 560 | struct sh_dmae_chan *sh_chan; |
561 | 561 | ||
562 | if (!chan) | 562 | if (!chan) |
563 | return NULL; | 563 | return NULL; |
564 | 564 | ||
565 | sh_chan = to_sh_chan(chan); | 565 | sh_chan = to_sh_chan(chan); |
566 | param = chan->private; | 566 | param = chan->private; |
567 | 567 | ||
568 | /* Someone calling slave DMA on a public channel? */ | 568 | /* Someone calling slave DMA on a public channel? */ |
569 | if (!param || !sg_len) { | 569 | if (!param || !sg_len) { |
570 | dev_warn(sh_chan->dev, "%s: bad parameter: %p, %d, %d\n", | 570 | dev_warn(sh_chan->dev, "%s: bad parameter: %p, %d, %d\n", |
571 | __func__, param, sg_len, param ? param->slave_id : -1); | 571 | __func__, param, sg_len, param ? param->slave_id : -1); |
572 | return NULL; | 572 | return NULL; |
573 | } | 573 | } |
574 | 574 | ||
575 | /* | 575 | /* |
576 | * if (param != NULL), this is a successfully requested slave channel, | 576 | * if (param != NULL), this is a successfully requested slave channel, |
577 | * therefore param->config != NULL too. | 577 | * therefore param->config != NULL too. |
578 | */ | 578 | */ |
579 | return sh_dmae_prep_sg(sh_chan, sgl, sg_len, ¶m->config->addr, | 579 | return sh_dmae_prep_sg(sh_chan, sgl, sg_len, ¶m->config->addr, |
580 | direction, flags); | 580 | direction, flags); |
581 | } | 581 | } |
582 | 582 | ||
583 | static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd) | 583 | static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
584 | unsigned long arg) | ||
584 | { | 585 | { |
585 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); | 586 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); |
586 | 587 | ||
587 | /* Only supports DMA_TERMINATE_ALL */ | 588 | /* Only supports DMA_TERMINATE_ALL */ |
588 | if (cmd != DMA_TERMINATE_ALL) | 589 | if (cmd != DMA_TERMINATE_ALL) |
589 | return -ENXIO; | 590 | return -ENXIO; |
590 | 591 | ||
591 | if (!chan) | 592 | if (!chan) |
592 | return -EINVAL; | 593 | return -EINVAL; |
593 | 594 | ||
594 | dmae_halt(sh_chan); | 595 | dmae_halt(sh_chan); |
595 | 596 | ||
596 | spin_lock_bh(&sh_chan->desc_lock); | 597 | spin_lock_bh(&sh_chan->desc_lock); |
597 | if (!list_empty(&sh_chan->ld_queue)) { | 598 | if (!list_empty(&sh_chan->ld_queue)) { |
598 | /* Record partial transfer */ | 599 | /* Record partial transfer */ |
599 | struct sh_desc *desc = list_entry(sh_chan->ld_queue.next, | 600 | struct sh_desc *desc = list_entry(sh_chan->ld_queue.next, |
600 | struct sh_desc, node); | 601 | struct sh_desc, node); |
601 | desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) << | 602 | desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) << |
602 | sh_chan->xmit_shift; | 603 | sh_chan->xmit_shift; |
603 | 604 | ||
604 | } | 605 | } |
605 | spin_unlock_bh(&sh_chan->desc_lock); | 606 | spin_unlock_bh(&sh_chan->desc_lock); |
606 | 607 | ||
607 | sh_dmae_chan_ld_cleanup(sh_chan, true); | 608 | sh_dmae_chan_ld_cleanup(sh_chan, true); |
608 | 609 | ||
609 | return 0; | 610 | return 0; |
610 | } | 611 | } |
611 | 612 | ||
612 | static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) | 613 | static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) |
613 | { | 614 | { |
614 | struct sh_desc *desc, *_desc; | 615 | struct sh_desc *desc, *_desc; |
615 | /* Is the "exposed" head of a chain acked? */ | 616 | /* Is the "exposed" head of a chain acked? */ |
616 | bool head_acked = false; | 617 | bool head_acked = false; |
617 | dma_cookie_t cookie = 0; | 618 | dma_cookie_t cookie = 0; |
618 | dma_async_tx_callback callback = NULL; | 619 | dma_async_tx_callback callback = NULL; |
619 | void *param = NULL; | 620 | void *param = NULL; |
620 | 621 | ||
621 | spin_lock_bh(&sh_chan->desc_lock); | 622 | spin_lock_bh(&sh_chan->desc_lock); |
622 | list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) { | 623 | list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) { |
623 | struct dma_async_tx_descriptor *tx = &desc->async_tx; | 624 | struct dma_async_tx_descriptor *tx = &desc->async_tx; |
624 | 625 | ||
625 | BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie); | 626 | BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie); |
626 | BUG_ON(desc->mark != DESC_SUBMITTED && | 627 | BUG_ON(desc->mark != DESC_SUBMITTED && |
627 | desc->mark != DESC_COMPLETED && | 628 | desc->mark != DESC_COMPLETED && |
628 | desc->mark != DESC_WAITING); | 629 | desc->mark != DESC_WAITING); |
629 | 630 | ||
630 | /* | 631 | /* |
631 | * queue is ordered, and we use this loop to (1) clean up all | 632 | * queue is ordered, and we use this loop to (1) clean up all |
632 | * completed descriptors, and to (2) update descriptor flags of | 633 | * completed descriptors, and to (2) update descriptor flags of |
633 | * any chunks in a (partially) completed chain | 634 | * any chunks in a (partially) completed chain |
634 | */ | 635 | */ |
635 | if (!all && desc->mark == DESC_SUBMITTED && | 636 | if (!all && desc->mark == DESC_SUBMITTED && |
636 | desc->cookie != cookie) | 637 | desc->cookie != cookie) |
637 | break; | 638 | break; |
638 | 639 | ||
639 | if (tx->cookie > 0) | 640 | if (tx->cookie > 0) |
640 | cookie = tx->cookie; | 641 | cookie = tx->cookie; |
641 | 642 | ||
642 | if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { | 643 | if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { |
643 | if (sh_chan->completed_cookie != desc->cookie - 1) | 644 | if (sh_chan->completed_cookie != desc->cookie - 1) |
644 | dev_dbg(sh_chan->dev, | 645 | dev_dbg(sh_chan->dev, |
645 | "Completing cookie %d, expected %d\n", | 646 | "Completing cookie %d, expected %d\n", |
646 | desc->cookie, | 647 | desc->cookie, |
647 | sh_chan->completed_cookie + 1); | 648 | sh_chan->completed_cookie + 1); |
648 | sh_chan->completed_cookie = desc->cookie; | 649 | sh_chan->completed_cookie = desc->cookie; |
649 | } | 650 | } |
650 | 651 | ||
651 | /* Call callback on the last chunk */ | 652 | /* Call callback on the last chunk */ |
652 | if (desc->mark == DESC_COMPLETED && tx->callback) { | 653 | if (desc->mark == DESC_COMPLETED && tx->callback) { |
653 | desc->mark = DESC_WAITING; | 654 | desc->mark = DESC_WAITING; |
654 | callback = tx->callback; | 655 | callback = tx->callback; |
655 | param = tx->callback_param; | 656 | param = tx->callback_param; |
656 | dev_dbg(sh_chan->dev, "descriptor #%d@%p on %d callback\n", | 657 | dev_dbg(sh_chan->dev, "descriptor #%d@%p on %d callback\n", |
657 | tx->cookie, tx, sh_chan->id); | 658 | tx->cookie, tx, sh_chan->id); |
658 | BUG_ON(desc->chunks != 1); | 659 | BUG_ON(desc->chunks != 1); |
659 | break; | 660 | break; |
660 | } | 661 | } |
661 | 662 | ||
662 | if (tx->cookie > 0 || tx->cookie == -EBUSY) { | 663 | if (tx->cookie > 0 || tx->cookie == -EBUSY) { |
663 | if (desc->mark == DESC_COMPLETED) { | 664 | if (desc->mark == DESC_COMPLETED) { |
664 | BUG_ON(tx->cookie < 0); | 665 | BUG_ON(tx->cookie < 0); |
665 | desc->mark = DESC_WAITING; | 666 | desc->mark = DESC_WAITING; |
666 | } | 667 | } |
667 | head_acked = async_tx_test_ack(tx); | 668 | head_acked = async_tx_test_ack(tx); |
668 | } else { | 669 | } else { |
669 | switch (desc->mark) { | 670 | switch (desc->mark) { |
670 | case DESC_COMPLETED: | 671 | case DESC_COMPLETED: |
671 | desc->mark = DESC_WAITING; | 672 | desc->mark = DESC_WAITING; |
672 | /* Fall through */ | 673 | /* Fall through */ |
673 | case DESC_WAITING: | 674 | case DESC_WAITING: |
674 | if (head_acked) | 675 | if (head_acked) |
675 | async_tx_ack(&desc->async_tx); | 676 | async_tx_ack(&desc->async_tx); |
676 | } | 677 | } |
677 | } | 678 | } |
678 | 679 | ||
679 | dev_dbg(sh_chan->dev, "descriptor %p #%d completed.\n", | 680 | dev_dbg(sh_chan->dev, "descriptor %p #%d completed.\n", |
680 | tx, tx->cookie); | 681 | tx, tx->cookie); |
681 | 682 | ||
682 | if (((desc->mark == DESC_COMPLETED || | 683 | if (((desc->mark == DESC_COMPLETED || |
683 | desc->mark == DESC_WAITING) && | 684 | desc->mark == DESC_WAITING) && |
684 | async_tx_test_ack(&desc->async_tx)) || all) { | 685 | async_tx_test_ack(&desc->async_tx)) || all) { |
685 | /* Remove from ld_queue list */ | 686 | /* Remove from ld_queue list */ |
686 | desc->mark = DESC_IDLE; | 687 | desc->mark = DESC_IDLE; |
687 | list_move(&desc->node, &sh_chan->ld_free); | 688 | list_move(&desc->node, &sh_chan->ld_free); |
688 | } | 689 | } |
689 | } | 690 | } |
690 | spin_unlock_bh(&sh_chan->desc_lock); | 691 | spin_unlock_bh(&sh_chan->desc_lock); |
691 | 692 | ||
692 | if (callback) | 693 | if (callback) |
693 | callback(param); | 694 | callback(param); |
694 | 695 | ||
695 | return callback; | 696 | return callback; |
696 | } | 697 | } |
697 | 698 | ||
698 | /* | 699 | /* |
699 | * sh_chan_ld_cleanup - Clean up link descriptors | 700 | * sh_chan_ld_cleanup - Clean up link descriptors |
700 | * | 701 | * |
701 | * This function cleans up the ld_queue of DMA channel. | 702 | * This function cleans up the ld_queue of DMA channel. |
702 | */ | 703 | */ |
703 | static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) | 704 | static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) |
704 | { | 705 | { |
705 | while (__ld_cleanup(sh_chan, all)) | 706 | while (__ld_cleanup(sh_chan, all)) |
706 | ; | 707 | ; |
707 | } | 708 | } |
708 | 709 | ||
709 | static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) | 710 | static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) |
710 | { | 711 | { |
711 | struct sh_desc *desc; | 712 | struct sh_desc *desc; |
712 | 713 | ||
713 | spin_lock_bh(&sh_chan->desc_lock); | 714 | spin_lock_bh(&sh_chan->desc_lock); |
714 | /* DMA work check */ | 715 | /* DMA work check */ |
715 | if (dmae_is_busy(sh_chan)) { | 716 | if (dmae_is_busy(sh_chan)) { |
716 | spin_unlock_bh(&sh_chan->desc_lock); | 717 | spin_unlock_bh(&sh_chan->desc_lock); |
717 | return; | 718 | return; |
718 | } | 719 | } |
719 | 720 | ||
720 | /* Find the first not transferred desciptor */ | 721 | /* Find the first not transferred desciptor */ |
721 | list_for_each_entry(desc, &sh_chan->ld_queue, node) | 722 | list_for_each_entry(desc, &sh_chan->ld_queue, node) |
722 | if (desc->mark == DESC_SUBMITTED) { | 723 | if (desc->mark == DESC_SUBMITTED) { |
723 | dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n", | 724 | dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n", |
724 | desc->async_tx.cookie, sh_chan->id, | 725 | desc->async_tx.cookie, sh_chan->id, |
725 | desc->hw.tcr, desc->hw.sar, desc->hw.dar); | 726 | desc->hw.tcr, desc->hw.sar, desc->hw.dar); |
726 | /* Get the ld start address from ld_queue */ | 727 | /* Get the ld start address from ld_queue */ |
727 | dmae_set_reg(sh_chan, &desc->hw); | 728 | dmae_set_reg(sh_chan, &desc->hw); |
728 | dmae_start(sh_chan); | 729 | dmae_start(sh_chan); |
729 | break; | 730 | break; |
730 | } | 731 | } |
731 | 732 | ||
732 | spin_unlock_bh(&sh_chan->desc_lock); | 733 | spin_unlock_bh(&sh_chan->desc_lock); |
733 | } | 734 | } |
734 | 735 | ||
735 | static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan) | 736 | static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan) |
736 | { | 737 | { |
737 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); | 738 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); |
738 | sh_chan_xfer_ld_queue(sh_chan); | 739 | sh_chan_xfer_ld_queue(sh_chan); |
739 | } | 740 | } |
740 | 741 | ||
741 | static enum dma_status sh_dmae_tx_status(struct dma_chan *chan, | 742 | static enum dma_status sh_dmae_tx_status(struct dma_chan *chan, |
742 | dma_cookie_t cookie, | 743 | dma_cookie_t cookie, |
743 | struct dma_tx_state *txstate) | 744 | struct dma_tx_state *txstate) |
744 | { | 745 | { |
745 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); | 746 | struct sh_dmae_chan *sh_chan = to_sh_chan(chan); |
746 | dma_cookie_t last_used; | 747 | dma_cookie_t last_used; |
747 | dma_cookie_t last_complete; | 748 | dma_cookie_t last_complete; |
748 | enum dma_status status; | 749 | enum dma_status status; |
749 | 750 | ||
750 | sh_dmae_chan_ld_cleanup(sh_chan, false); | 751 | sh_dmae_chan_ld_cleanup(sh_chan, false); |
751 | 752 | ||
752 | last_used = chan->cookie; | 753 | last_used = chan->cookie; |
753 | last_complete = sh_chan->completed_cookie; | 754 | last_complete = sh_chan->completed_cookie; |
754 | BUG_ON(last_complete < 0); | 755 | BUG_ON(last_complete < 0); |
755 | dma_set_tx_state(txstate, last_complete, last_used, 0); | 756 | dma_set_tx_state(txstate, last_complete, last_used, 0); |
756 | 757 | ||
757 | spin_lock_bh(&sh_chan->desc_lock); | 758 | spin_lock_bh(&sh_chan->desc_lock); |
758 | 759 | ||
759 | status = dma_async_is_complete(cookie, last_complete, last_used); | 760 | status = dma_async_is_complete(cookie, last_complete, last_used); |
760 | 761 | ||
761 | /* | 762 | /* |
762 | * If we don't find cookie on the queue, it has been aborted and we have | 763 | * If we don't find cookie on the queue, it has been aborted and we have |
763 | * to report error | 764 | * to report error |
764 | */ | 765 | */ |
765 | if (status != DMA_SUCCESS) { | 766 | if (status != DMA_SUCCESS) { |
766 | struct sh_desc *desc; | 767 | struct sh_desc *desc; |
767 | status = DMA_ERROR; | 768 | status = DMA_ERROR; |
768 | list_for_each_entry(desc, &sh_chan->ld_queue, node) | 769 | list_for_each_entry(desc, &sh_chan->ld_queue, node) |
769 | if (desc->cookie == cookie) { | 770 | if (desc->cookie == cookie) { |
770 | status = DMA_IN_PROGRESS; | 771 | status = DMA_IN_PROGRESS; |
771 | break; | 772 | break; |
772 | } | 773 | } |
773 | } | 774 | } |
774 | 775 | ||
775 | spin_unlock_bh(&sh_chan->desc_lock); | 776 | spin_unlock_bh(&sh_chan->desc_lock); |
776 | 777 | ||
777 | return status; | 778 | return status; |
778 | } | 779 | } |
779 | 780 | ||
780 | static irqreturn_t sh_dmae_interrupt(int irq, void *data) | 781 | static irqreturn_t sh_dmae_interrupt(int irq, void *data) |
781 | { | 782 | { |
782 | irqreturn_t ret = IRQ_NONE; | 783 | irqreturn_t ret = IRQ_NONE; |
783 | struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; | 784 | struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; |
784 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); | 785 | u32 chcr = sh_dmae_readl(sh_chan, CHCR); |
785 | 786 | ||
786 | if (chcr & CHCR_TE) { | 787 | if (chcr & CHCR_TE) { |
787 | /* DMA stop */ | 788 | /* DMA stop */ |
788 | dmae_halt(sh_chan); | 789 | dmae_halt(sh_chan); |
789 | 790 | ||
790 | ret = IRQ_HANDLED; | 791 | ret = IRQ_HANDLED; |
791 | tasklet_schedule(&sh_chan->tasklet); | 792 | tasklet_schedule(&sh_chan->tasklet); |
792 | } | 793 | } |
793 | 794 | ||
794 | return ret; | 795 | return ret; |
795 | } | 796 | } |
796 | 797 | ||
797 | #if defined(CONFIG_CPU_SH4) | 798 | #if defined(CONFIG_CPU_SH4) |
798 | static irqreturn_t sh_dmae_err(int irq, void *data) | 799 | static irqreturn_t sh_dmae_err(int irq, void *data) |
799 | { | 800 | { |
800 | struct sh_dmae_device *shdev = (struct sh_dmae_device *)data; | 801 | struct sh_dmae_device *shdev = (struct sh_dmae_device *)data; |
801 | int i; | 802 | int i; |
802 | 803 | ||
803 | /* halt the dma controller */ | 804 | /* halt the dma controller */ |
804 | sh_dmae_ctl_stop(shdev); | 805 | sh_dmae_ctl_stop(shdev); |
805 | 806 | ||
806 | /* We cannot detect, which channel caused the error, have to reset all */ | 807 | /* We cannot detect, which channel caused the error, have to reset all */ |
807 | for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) { | 808 | for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) { |
808 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; | 809 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; |
809 | if (sh_chan) { | 810 | if (sh_chan) { |
810 | struct sh_desc *desc; | 811 | struct sh_desc *desc; |
811 | /* Stop the channel */ | 812 | /* Stop the channel */ |
812 | dmae_halt(sh_chan); | 813 | dmae_halt(sh_chan); |
813 | /* Complete all */ | 814 | /* Complete all */ |
814 | list_for_each_entry(desc, &sh_chan->ld_queue, node) { | 815 | list_for_each_entry(desc, &sh_chan->ld_queue, node) { |
815 | struct dma_async_tx_descriptor *tx = &desc->async_tx; | 816 | struct dma_async_tx_descriptor *tx = &desc->async_tx; |
816 | desc->mark = DESC_IDLE; | 817 | desc->mark = DESC_IDLE; |
817 | if (tx->callback) | 818 | if (tx->callback) |
818 | tx->callback(tx->callback_param); | 819 | tx->callback(tx->callback_param); |
819 | } | 820 | } |
820 | list_splice_init(&sh_chan->ld_queue, &sh_chan->ld_free); | 821 | list_splice_init(&sh_chan->ld_queue, &sh_chan->ld_free); |
821 | } | 822 | } |
822 | } | 823 | } |
823 | sh_dmae_rst(shdev); | 824 | sh_dmae_rst(shdev); |
824 | 825 | ||
825 | return IRQ_HANDLED; | 826 | return IRQ_HANDLED; |
826 | } | 827 | } |
827 | #endif | 828 | #endif |
828 | 829 | ||
829 | static void dmae_do_tasklet(unsigned long data) | 830 | static void dmae_do_tasklet(unsigned long data) |
830 | { | 831 | { |
831 | struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; | 832 | struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; |
832 | struct sh_desc *desc; | 833 | struct sh_desc *desc; |
833 | u32 sar_buf = sh_dmae_readl(sh_chan, SAR); | 834 | u32 sar_buf = sh_dmae_readl(sh_chan, SAR); |
834 | u32 dar_buf = sh_dmae_readl(sh_chan, DAR); | 835 | u32 dar_buf = sh_dmae_readl(sh_chan, DAR); |
835 | 836 | ||
836 | spin_lock(&sh_chan->desc_lock); | 837 | spin_lock(&sh_chan->desc_lock); |
837 | list_for_each_entry(desc, &sh_chan->ld_queue, node) { | 838 | list_for_each_entry(desc, &sh_chan->ld_queue, node) { |
838 | if (desc->mark == DESC_SUBMITTED && | 839 | if (desc->mark == DESC_SUBMITTED && |
839 | ((desc->direction == DMA_FROM_DEVICE && | 840 | ((desc->direction == DMA_FROM_DEVICE && |
840 | (desc->hw.dar + desc->hw.tcr) == dar_buf) || | 841 | (desc->hw.dar + desc->hw.tcr) == dar_buf) || |
841 | (desc->hw.sar + desc->hw.tcr) == sar_buf)) { | 842 | (desc->hw.sar + desc->hw.tcr) == sar_buf)) { |
842 | dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n", | 843 | dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n", |
843 | desc->async_tx.cookie, &desc->async_tx, | 844 | desc->async_tx.cookie, &desc->async_tx, |
844 | desc->hw.dar); | 845 | desc->hw.dar); |
845 | desc->mark = DESC_COMPLETED; | 846 | desc->mark = DESC_COMPLETED; |
846 | break; | 847 | break; |
847 | } | 848 | } |
848 | } | 849 | } |
849 | spin_unlock(&sh_chan->desc_lock); | 850 | spin_unlock(&sh_chan->desc_lock); |
850 | 851 | ||
851 | /* Next desc */ | 852 | /* Next desc */ |
852 | sh_chan_xfer_ld_queue(sh_chan); | 853 | sh_chan_xfer_ld_queue(sh_chan); |
853 | sh_dmae_chan_ld_cleanup(sh_chan, false); | 854 | sh_dmae_chan_ld_cleanup(sh_chan, false); |
854 | } | 855 | } |
855 | 856 | ||
856 | static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, | 857 | static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, |
857 | int irq, unsigned long flags) | 858 | int irq, unsigned long flags) |
858 | { | 859 | { |
859 | int err; | 860 | int err; |
860 | struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id]; | 861 | struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id]; |
861 | struct platform_device *pdev = to_platform_device(shdev->common.dev); | 862 | struct platform_device *pdev = to_platform_device(shdev->common.dev); |
862 | struct sh_dmae_chan *new_sh_chan; | 863 | struct sh_dmae_chan *new_sh_chan; |
863 | 864 | ||
864 | /* alloc channel */ | 865 | /* alloc channel */ |
865 | new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL); | 866 | new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL); |
866 | if (!new_sh_chan) { | 867 | if (!new_sh_chan) { |
867 | dev_err(shdev->common.dev, | 868 | dev_err(shdev->common.dev, |
868 | "No free memory for allocating dma channels!\n"); | 869 | "No free memory for allocating dma channels!\n"); |
869 | return -ENOMEM; | 870 | return -ENOMEM; |
870 | } | 871 | } |
871 | 872 | ||
872 | /* copy struct dma_device */ | 873 | /* copy struct dma_device */ |
873 | new_sh_chan->common.device = &shdev->common; | 874 | new_sh_chan->common.device = &shdev->common; |
874 | 875 | ||
875 | new_sh_chan->dev = shdev->common.dev; | 876 | new_sh_chan->dev = shdev->common.dev; |
876 | new_sh_chan->id = id; | 877 | new_sh_chan->id = id; |
877 | new_sh_chan->irq = irq; | 878 | new_sh_chan->irq = irq; |
878 | new_sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32); | 879 | new_sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32); |
879 | 880 | ||
880 | /* Init DMA tasklet */ | 881 | /* Init DMA tasklet */ |
881 | tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet, | 882 | tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet, |
882 | (unsigned long)new_sh_chan); | 883 | (unsigned long)new_sh_chan); |
883 | 884 | ||
884 | /* Init the channel */ | 885 | /* Init the channel */ |
885 | dmae_init(new_sh_chan); | 886 | dmae_init(new_sh_chan); |
886 | 887 | ||
887 | spin_lock_init(&new_sh_chan->desc_lock); | 888 | spin_lock_init(&new_sh_chan->desc_lock); |
888 | 889 | ||
889 | /* Init descripter manage list */ | 890 | /* Init descripter manage list */ |
890 | INIT_LIST_HEAD(&new_sh_chan->ld_queue); | 891 | INIT_LIST_HEAD(&new_sh_chan->ld_queue); |
891 | INIT_LIST_HEAD(&new_sh_chan->ld_free); | 892 | INIT_LIST_HEAD(&new_sh_chan->ld_free); |
892 | 893 | ||
893 | /* Add the channel to DMA device channel list */ | 894 | /* Add the channel to DMA device channel list */ |
894 | list_add_tail(&new_sh_chan->common.device_node, | 895 | list_add_tail(&new_sh_chan->common.device_node, |
895 | &shdev->common.channels); | 896 | &shdev->common.channels); |
896 | shdev->common.chancnt++; | 897 | shdev->common.chancnt++; |
897 | 898 | ||
898 | if (pdev->id >= 0) | 899 | if (pdev->id >= 0) |
899 | snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), | 900 | snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), |
900 | "sh-dmae%d.%d", pdev->id, new_sh_chan->id); | 901 | "sh-dmae%d.%d", pdev->id, new_sh_chan->id); |
901 | else | 902 | else |
902 | snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), | 903 | snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), |
903 | "sh-dma%d", new_sh_chan->id); | 904 | "sh-dma%d", new_sh_chan->id); |
904 | 905 | ||
905 | /* set up channel irq */ | 906 | /* set up channel irq */ |
906 | err = request_irq(irq, &sh_dmae_interrupt, flags, | 907 | err = request_irq(irq, &sh_dmae_interrupt, flags, |
907 | new_sh_chan->dev_id, new_sh_chan); | 908 | new_sh_chan->dev_id, new_sh_chan); |
908 | if (err) { | 909 | if (err) { |
909 | dev_err(shdev->common.dev, "DMA channel %d request_irq error " | 910 | dev_err(shdev->common.dev, "DMA channel %d request_irq error " |
910 | "with return %d\n", id, err); | 911 | "with return %d\n", id, err); |
911 | goto err_no_irq; | 912 | goto err_no_irq; |
912 | } | 913 | } |
913 | 914 | ||
914 | shdev->chan[id] = new_sh_chan; | 915 | shdev->chan[id] = new_sh_chan; |
915 | return 0; | 916 | return 0; |
916 | 917 | ||
917 | err_no_irq: | 918 | err_no_irq: |
918 | /* remove from dmaengine device node */ | 919 | /* remove from dmaengine device node */ |
919 | list_del(&new_sh_chan->common.device_node); | 920 | list_del(&new_sh_chan->common.device_node); |
920 | kfree(new_sh_chan); | 921 | kfree(new_sh_chan); |
921 | return err; | 922 | return err; |
922 | } | 923 | } |
923 | 924 | ||
924 | static void sh_dmae_chan_remove(struct sh_dmae_device *shdev) | 925 | static void sh_dmae_chan_remove(struct sh_dmae_device *shdev) |
925 | { | 926 | { |
926 | int i; | 927 | int i; |
927 | 928 | ||
928 | for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) { | 929 | for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) { |
929 | if (shdev->chan[i]) { | 930 | if (shdev->chan[i]) { |
930 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; | 931 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; |
931 | 932 | ||
932 | free_irq(sh_chan->irq, sh_chan); | 933 | free_irq(sh_chan->irq, sh_chan); |
933 | 934 | ||
934 | list_del(&sh_chan->common.device_node); | 935 | list_del(&sh_chan->common.device_node); |
935 | kfree(sh_chan); | 936 | kfree(sh_chan); |
936 | shdev->chan[i] = NULL; | 937 | shdev->chan[i] = NULL; |
937 | } | 938 | } |
938 | } | 939 | } |
939 | shdev->common.chancnt = 0; | 940 | shdev->common.chancnt = 0; |
940 | } | 941 | } |
941 | 942 | ||
942 | static int __init sh_dmae_probe(struct platform_device *pdev) | 943 | static int __init sh_dmae_probe(struct platform_device *pdev) |
943 | { | 944 | { |
944 | struct sh_dmae_pdata *pdata = pdev->dev.platform_data; | 945 | struct sh_dmae_pdata *pdata = pdev->dev.platform_data; |
945 | unsigned long irqflags = IRQF_DISABLED, | 946 | unsigned long irqflags = IRQF_DISABLED, |
946 | chan_flag[SH_DMAC_MAX_CHANNELS] = {}; | 947 | chan_flag[SH_DMAC_MAX_CHANNELS] = {}; |
947 | int errirq, chan_irq[SH_DMAC_MAX_CHANNELS]; | 948 | int errirq, chan_irq[SH_DMAC_MAX_CHANNELS]; |
948 | int err, i, irq_cnt = 0, irqres = 0; | 949 | int err, i, irq_cnt = 0, irqres = 0; |
949 | struct sh_dmae_device *shdev; | 950 | struct sh_dmae_device *shdev; |
950 | struct resource *chan, *dmars, *errirq_res, *chanirq_res; | 951 | struct resource *chan, *dmars, *errirq_res, *chanirq_res; |
951 | 952 | ||
952 | /* get platform data */ | 953 | /* get platform data */ |
953 | if (!pdata || !pdata->channel_num) | 954 | if (!pdata || !pdata->channel_num) |
954 | return -ENODEV; | 955 | return -ENODEV; |
955 | 956 | ||
956 | chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 957 | chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
957 | /* DMARS area is optional, if absent, this controller cannot do slave DMA */ | 958 | /* DMARS area is optional, if absent, this controller cannot do slave DMA */ |
958 | dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1); | 959 | dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
959 | /* | 960 | /* |
960 | * IRQ resources: | 961 | * IRQ resources: |
961 | * 1. there always must be at least one IRQ IO-resource. On SH4 it is | 962 | * 1. there always must be at least one IRQ IO-resource. On SH4 it is |
962 | * the error IRQ, in which case it is the only IRQ in this resource: | 963 | * the error IRQ, in which case it is the only IRQ in this resource: |
963 | * start == end. If it is the only IRQ resource, all channels also | 964 | * start == end. If it is the only IRQ resource, all channels also |
964 | * use the same IRQ. | 965 | * use the same IRQ. |
965 | * 2. DMA channel IRQ resources can be specified one per resource or in | 966 | * 2. DMA channel IRQ resources can be specified one per resource or in |
966 | * ranges (start != end) | 967 | * ranges (start != end) |
967 | * 3. iff all events (channels and, optionally, error) on this | 968 | * 3. iff all events (channels and, optionally, error) on this |
968 | * controller use the same IRQ, only one IRQ resource can be | 969 | * controller use the same IRQ, only one IRQ resource can be |
969 | * specified, otherwise there must be one IRQ per channel, even if | 970 | * specified, otherwise there must be one IRQ per channel, even if |
970 | * some of them are equal | 971 | * some of them are equal |
971 | * 4. if all IRQs on this controller are equal or if some specific IRQs | 972 | * 4. if all IRQs on this controller are equal or if some specific IRQs |
972 | * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be | 973 | * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be |
973 | * requested with the IRQF_SHARED flag | 974 | * requested with the IRQF_SHARED flag |
974 | */ | 975 | */ |
975 | errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | 976 | errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); |
976 | if (!chan || !errirq_res) | 977 | if (!chan || !errirq_res) |
977 | return -ENODEV; | 978 | return -ENODEV; |
978 | 979 | ||
979 | if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) { | 980 | if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) { |
980 | dev_err(&pdev->dev, "DMAC register region already claimed\n"); | 981 | dev_err(&pdev->dev, "DMAC register region already claimed\n"); |
981 | return -EBUSY; | 982 | return -EBUSY; |
982 | } | 983 | } |
983 | 984 | ||
984 | if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) { | 985 | if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) { |
985 | dev_err(&pdev->dev, "DMAC DMARS region already claimed\n"); | 986 | dev_err(&pdev->dev, "DMAC DMARS region already claimed\n"); |
986 | err = -EBUSY; | 987 | err = -EBUSY; |
987 | goto ermrdmars; | 988 | goto ermrdmars; |
988 | } | 989 | } |
989 | 990 | ||
990 | err = -ENOMEM; | 991 | err = -ENOMEM; |
991 | shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL); | 992 | shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL); |
992 | if (!shdev) { | 993 | if (!shdev) { |
993 | dev_err(&pdev->dev, "Not enough memory\n"); | 994 | dev_err(&pdev->dev, "Not enough memory\n"); |
994 | goto ealloc; | 995 | goto ealloc; |
995 | } | 996 | } |
996 | 997 | ||
997 | shdev->chan_reg = ioremap(chan->start, resource_size(chan)); | 998 | shdev->chan_reg = ioremap(chan->start, resource_size(chan)); |
998 | if (!shdev->chan_reg) | 999 | if (!shdev->chan_reg) |
999 | goto emapchan; | 1000 | goto emapchan; |
1000 | if (dmars) { | 1001 | if (dmars) { |
1001 | shdev->dmars = ioremap(dmars->start, resource_size(dmars)); | 1002 | shdev->dmars = ioremap(dmars->start, resource_size(dmars)); |
1002 | if (!shdev->dmars) | 1003 | if (!shdev->dmars) |
1003 | goto emapdmars; | 1004 | goto emapdmars; |
1004 | } | 1005 | } |
1005 | 1006 | ||
1006 | /* platform data */ | 1007 | /* platform data */ |
1007 | shdev->pdata = pdata; | 1008 | shdev->pdata = pdata; |
1008 | 1009 | ||
1009 | pm_runtime_enable(&pdev->dev); | 1010 | pm_runtime_enable(&pdev->dev); |
1010 | pm_runtime_get_sync(&pdev->dev); | 1011 | pm_runtime_get_sync(&pdev->dev); |
1011 | 1012 | ||
1012 | /* reset dma controller */ | 1013 | /* reset dma controller */ |
1013 | err = sh_dmae_rst(shdev); | 1014 | err = sh_dmae_rst(shdev); |
1014 | if (err) | 1015 | if (err) |
1015 | goto rst_err; | 1016 | goto rst_err; |
1016 | 1017 | ||
1017 | INIT_LIST_HEAD(&shdev->common.channels); | 1018 | INIT_LIST_HEAD(&shdev->common.channels); |
1018 | 1019 | ||
1019 | dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); | 1020 | dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); |
1020 | if (dmars) | 1021 | if (dmars) |
1021 | dma_cap_set(DMA_SLAVE, shdev->common.cap_mask); | 1022 | dma_cap_set(DMA_SLAVE, shdev->common.cap_mask); |
1022 | 1023 | ||
1023 | shdev->common.device_alloc_chan_resources | 1024 | shdev->common.device_alloc_chan_resources |
1024 | = sh_dmae_alloc_chan_resources; | 1025 | = sh_dmae_alloc_chan_resources; |
1025 | shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources; | 1026 | shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources; |
1026 | shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy; | 1027 | shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy; |
1027 | shdev->common.device_tx_status = sh_dmae_tx_status; | 1028 | shdev->common.device_tx_status = sh_dmae_tx_status; |
1028 | shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending; | 1029 | shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending; |
1029 | 1030 | ||
1030 | /* Compulsory for DMA_SLAVE fields */ | 1031 | /* Compulsory for DMA_SLAVE fields */ |
1031 | shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg; | 1032 | shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg; |
1032 | shdev->common.device_control = sh_dmae_control; | 1033 | shdev->common.device_control = sh_dmae_control; |
1033 | 1034 | ||
1034 | shdev->common.dev = &pdev->dev; | 1035 | shdev->common.dev = &pdev->dev; |
1035 | /* Default transfer size of 32 bytes requires 32-byte alignment */ | 1036 | /* Default transfer size of 32 bytes requires 32-byte alignment */ |
1036 | shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE; | 1037 | shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE; |
1037 | 1038 | ||
1038 | #if defined(CONFIG_CPU_SH4) | 1039 | #if defined(CONFIG_CPU_SH4) |
1039 | chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1); | 1040 | chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1); |
1040 | 1041 | ||
1041 | if (!chanirq_res) | 1042 | if (!chanirq_res) |
1042 | chanirq_res = errirq_res; | 1043 | chanirq_res = errirq_res; |
1043 | else | 1044 | else |
1044 | irqres++; | 1045 | irqres++; |
1045 | 1046 | ||
1046 | if (chanirq_res == errirq_res || | 1047 | if (chanirq_res == errirq_res || |
1047 | (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE) | 1048 | (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE) |
1048 | irqflags = IRQF_SHARED; | 1049 | irqflags = IRQF_SHARED; |
1049 | 1050 | ||
1050 | errirq = errirq_res->start; | 1051 | errirq = errirq_res->start; |
1051 | 1052 | ||
1052 | err = request_irq(errirq, sh_dmae_err, irqflags, | 1053 | err = request_irq(errirq, sh_dmae_err, irqflags, |
1053 | "DMAC Address Error", shdev); | 1054 | "DMAC Address Error", shdev); |
1054 | if (err) { | 1055 | if (err) { |
1055 | dev_err(&pdev->dev, | 1056 | dev_err(&pdev->dev, |
1056 | "DMA failed requesting irq #%d, error %d\n", | 1057 | "DMA failed requesting irq #%d, error %d\n", |
1057 | errirq, err); | 1058 | errirq, err); |
1058 | goto eirq_err; | 1059 | goto eirq_err; |
1059 | } | 1060 | } |
1060 | 1061 | ||
1061 | #else | 1062 | #else |
1062 | chanirq_res = errirq_res; | 1063 | chanirq_res = errirq_res; |
1063 | #endif /* CONFIG_CPU_SH4 */ | 1064 | #endif /* CONFIG_CPU_SH4 */ |
1064 | 1065 | ||
1065 | if (chanirq_res->start == chanirq_res->end && | 1066 | if (chanirq_res->start == chanirq_res->end && |
1066 | !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) { | 1067 | !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) { |
1067 | /* Special case - all multiplexed */ | 1068 | /* Special case - all multiplexed */ |
1068 | for (; irq_cnt < pdata->channel_num; irq_cnt++) { | 1069 | for (; irq_cnt < pdata->channel_num; irq_cnt++) { |
1069 | chan_irq[irq_cnt] = chanirq_res->start; | 1070 | chan_irq[irq_cnt] = chanirq_res->start; |
1070 | chan_flag[irq_cnt] = IRQF_SHARED; | 1071 | chan_flag[irq_cnt] = IRQF_SHARED; |
1071 | } | 1072 | } |
1072 | } else { | 1073 | } else { |
1073 | do { | 1074 | do { |
1074 | for (i = chanirq_res->start; i <= chanirq_res->end; i++) { | 1075 | for (i = chanirq_res->start; i <= chanirq_res->end; i++) { |
1075 | if ((errirq_res->flags & IORESOURCE_BITS) == | 1076 | if ((errirq_res->flags & IORESOURCE_BITS) == |
1076 | IORESOURCE_IRQ_SHAREABLE) | 1077 | IORESOURCE_IRQ_SHAREABLE) |
1077 | chan_flag[irq_cnt] = IRQF_SHARED; | 1078 | chan_flag[irq_cnt] = IRQF_SHARED; |
1078 | else | 1079 | else |
1079 | chan_flag[irq_cnt] = IRQF_DISABLED; | 1080 | chan_flag[irq_cnt] = IRQF_DISABLED; |
1080 | dev_dbg(&pdev->dev, | 1081 | dev_dbg(&pdev->dev, |
1081 | "Found IRQ %d for channel %d\n", | 1082 | "Found IRQ %d for channel %d\n", |
1082 | i, irq_cnt); | 1083 | i, irq_cnt); |
1083 | chan_irq[irq_cnt++] = i; | 1084 | chan_irq[irq_cnt++] = i; |
1084 | } | 1085 | } |
1085 | chanirq_res = platform_get_resource(pdev, | 1086 | chanirq_res = platform_get_resource(pdev, |
1086 | IORESOURCE_IRQ, ++irqres); | 1087 | IORESOURCE_IRQ, ++irqres); |
1087 | } while (irq_cnt < pdata->channel_num && chanirq_res); | 1088 | } while (irq_cnt < pdata->channel_num && chanirq_res); |
1088 | } | 1089 | } |
1089 | 1090 | ||
1090 | if (irq_cnt < pdata->channel_num) | 1091 | if (irq_cnt < pdata->channel_num) |
1091 | goto eirqres; | 1092 | goto eirqres; |
1092 | 1093 | ||
1093 | /* Create DMA Channel */ | 1094 | /* Create DMA Channel */ |
1094 | for (i = 0; i < pdata->channel_num; i++) { | 1095 | for (i = 0; i < pdata->channel_num; i++) { |
1095 | err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]); | 1096 | err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]); |
1096 | if (err) | 1097 | if (err) |
1097 | goto chan_probe_err; | 1098 | goto chan_probe_err; |
1098 | } | 1099 | } |
1099 | 1100 | ||
1100 | pm_runtime_put(&pdev->dev); | 1101 | pm_runtime_put(&pdev->dev); |
1101 | 1102 | ||
1102 | platform_set_drvdata(pdev, shdev); | 1103 | platform_set_drvdata(pdev, shdev); |
1103 | dma_async_device_register(&shdev->common); | 1104 | dma_async_device_register(&shdev->common); |
1104 | 1105 | ||
1105 | return err; | 1106 | return err; |
1106 | 1107 | ||
1107 | chan_probe_err: | 1108 | chan_probe_err: |
1108 | sh_dmae_chan_remove(shdev); | 1109 | sh_dmae_chan_remove(shdev); |
1109 | eirqres: | 1110 | eirqres: |
1110 | #if defined(CONFIG_CPU_SH4) | 1111 | #if defined(CONFIG_CPU_SH4) |
1111 | free_irq(errirq, shdev); | 1112 | free_irq(errirq, shdev); |
1112 | eirq_err: | 1113 | eirq_err: |
1113 | #endif | 1114 | #endif |
1114 | rst_err: | 1115 | rst_err: |
1115 | pm_runtime_put(&pdev->dev); | 1116 | pm_runtime_put(&pdev->dev); |
1116 | if (dmars) | 1117 | if (dmars) |
1117 | iounmap(shdev->dmars); | 1118 | iounmap(shdev->dmars); |
1118 | emapdmars: | 1119 | emapdmars: |
1119 | iounmap(shdev->chan_reg); | 1120 | iounmap(shdev->chan_reg); |
1120 | emapchan: | 1121 | emapchan: |
1121 | kfree(shdev); | 1122 | kfree(shdev); |
1122 | ealloc: | 1123 | ealloc: |
1123 | if (dmars) | 1124 | if (dmars) |
1124 | release_mem_region(dmars->start, resource_size(dmars)); | 1125 | release_mem_region(dmars->start, resource_size(dmars)); |
1125 | ermrdmars: | 1126 | ermrdmars: |
1126 | release_mem_region(chan->start, resource_size(chan)); | 1127 | release_mem_region(chan->start, resource_size(chan)); |
1127 | 1128 | ||
1128 | return err; | 1129 | return err; |
1129 | } | 1130 | } |
1130 | 1131 | ||
1131 | static int __exit sh_dmae_remove(struct platform_device *pdev) | 1132 | static int __exit sh_dmae_remove(struct platform_device *pdev) |
1132 | { | 1133 | { |
1133 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); | 1134 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); |
1134 | struct resource *res; | 1135 | struct resource *res; |
1135 | int errirq = platform_get_irq(pdev, 0); | 1136 | int errirq = platform_get_irq(pdev, 0); |
1136 | 1137 | ||
1137 | dma_async_device_unregister(&shdev->common); | 1138 | dma_async_device_unregister(&shdev->common); |
1138 | 1139 | ||
1139 | if (errirq > 0) | 1140 | if (errirq > 0) |
1140 | free_irq(errirq, shdev); | 1141 | free_irq(errirq, shdev); |
1141 | 1142 | ||
1142 | /* channel data remove */ | 1143 | /* channel data remove */ |
1143 | sh_dmae_chan_remove(shdev); | 1144 | sh_dmae_chan_remove(shdev); |
1144 | 1145 | ||
1145 | pm_runtime_disable(&pdev->dev); | 1146 | pm_runtime_disable(&pdev->dev); |
1146 | 1147 | ||
1147 | if (shdev->dmars) | 1148 | if (shdev->dmars) |
1148 | iounmap(shdev->dmars); | 1149 | iounmap(shdev->dmars); |
1149 | iounmap(shdev->chan_reg); | 1150 | iounmap(shdev->chan_reg); |
1150 | 1151 | ||
1151 | kfree(shdev); | 1152 | kfree(shdev); |
1152 | 1153 | ||
1153 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1154 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1154 | if (res) | 1155 | if (res) |
1155 | release_mem_region(res->start, resource_size(res)); | 1156 | release_mem_region(res->start, resource_size(res)); |
1156 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | 1157 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
1157 | if (res) | 1158 | if (res) |
1158 | release_mem_region(res->start, resource_size(res)); | 1159 | release_mem_region(res->start, resource_size(res)); |
1159 | 1160 | ||
1160 | return 0; | 1161 | return 0; |
1161 | } | 1162 | } |
1162 | 1163 | ||
1163 | static void sh_dmae_shutdown(struct platform_device *pdev) | 1164 | static void sh_dmae_shutdown(struct platform_device *pdev) |
1164 | { | 1165 | { |
1165 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); | 1166 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); |
1166 | sh_dmae_ctl_stop(shdev); | 1167 | sh_dmae_ctl_stop(shdev); |
1167 | } | 1168 | } |
1168 | 1169 | ||
1169 | static struct platform_driver sh_dmae_driver = { | 1170 | static struct platform_driver sh_dmae_driver = { |
1170 | .remove = __exit_p(sh_dmae_remove), | 1171 | .remove = __exit_p(sh_dmae_remove), |
1171 | .shutdown = sh_dmae_shutdown, | 1172 | .shutdown = sh_dmae_shutdown, |
1172 | .driver = { | 1173 | .driver = { |
1173 | .name = "sh-dma-engine", | 1174 | .name = "sh-dma-engine", |
1174 | }, | 1175 | }, |
1175 | }; | 1176 | }; |
1176 | 1177 | ||
1177 | static int __init sh_dmae_init(void) | 1178 | static int __init sh_dmae_init(void) |
1178 | { | 1179 | { |
1179 | return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe); | 1180 | return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe); |
1180 | } | 1181 | } |
1181 | module_init(sh_dmae_init); | 1182 | module_init(sh_dmae_init); |
1182 | 1183 | ||
1183 | static void __exit sh_dmae_exit(void) | 1184 | static void __exit sh_dmae_exit(void) |
1184 | { | 1185 | { |
1185 | platform_driver_unregister(&sh_dmae_driver); | 1186 | platform_driver_unregister(&sh_dmae_driver); |
1186 | } | 1187 | } |
1187 | module_exit(sh_dmae_exit); | 1188 | module_exit(sh_dmae_exit); |
1188 | 1189 | ||
1189 | MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>"); | 1190 | MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>"); |
1190 | MODULE_DESCRIPTION("Renesas SH DMA Engine driver"); | 1191 | MODULE_DESCRIPTION("Renesas SH DMA Engine driver"); |
1191 | MODULE_LICENSE("GPL"); | 1192 | MODULE_LICENSE("GPL"); |
1192 | 1193 |
drivers/dma/ste_dma40.c
1 | /* | 1 | /* |
2 | * driver/dma/ste_dma40.c | 2 | * driver/dma/ste_dma40.c |
3 | * | 3 | * |
4 | * Copyright (C) ST-Ericsson 2007-2010 | 4 | * Copyright (C) ST-Ericsson 2007-2010 |
5 | * License terms: GNU General Public License (GPL) version 2 | 5 | * License terms: GNU General Public License (GPL) version 2 |
6 | * Author: Per Friden <per.friden@stericsson.com> | 6 | * Author: Per Friden <per.friden@stericsson.com> |
7 | * Author: Jonas Aaberg <jonas.aberg@stericsson.com> | 7 | * Author: Jonas Aaberg <jonas.aberg@stericsson.com> |
8 | * | 8 | * |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
12 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
13 | #include <linux/dmaengine.h> | 13 | #include <linux/dmaengine.h> |
14 | #include <linux/platform_device.h> | 14 | #include <linux/platform_device.h> |
15 | #include <linux/clk.h> | 15 | #include <linux/clk.h> |
16 | #include <linux/delay.h> | 16 | #include <linux/delay.h> |
17 | 17 | ||
18 | #include <plat/ste_dma40.h> | 18 | #include <plat/ste_dma40.h> |
19 | 19 | ||
20 | #include "ste_dma40_ll.h" | 20 | #include "ste_dma40_ll.h" |
21 | 21 | ||
22 | #define D40_NAME "dma40" | 22 | #define D40_NAME "dma40" |
23 | 23 | ||
24 | #define D40_PHY_CHAN -1 | 24 | #define D40_PHY_CHAN -1 |
25 | 25 | ||
26 | /* For masking out/in 2 bit channel positions */ | 26 | /* For masking out/in 2 bit channel positions */ |
27 | #define D40_CHAN_POS(chan) (2 * (chan / 2)) | 27 | #define D40_CHAN_POS(chan) (2 * (chan / 2)) |
28 | #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan)) | 28 | #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan)) |
29 | 29 | ||
30 | /* Maximum iterations taken before giving up suspending a channel */ | 30 | /* Maximum iterations taken before giving up suspending a channel */ |
31 | #define D40_SUSPEND_MAX_IT 500 | 31 | #define D40_SUSPEND_MAX_IT 500 |
32 | 32 | ||
33 | #define D40_ALLOC_FREE (1 << 31) | 33 | #define D40_ALLOC_FREE (1 << 31) |
34 | #define D40_ALLOC_PHY (1 << 30) | 34 | #define D40_ALLOC_PHY (1 << 30) |
35 | #define D40_ALLOC_LOG_FREE 0 | 35 | #define D40_ALLOC_LOG_FREE 0 |
36 | 36 | ||
37 | /* The number of free d40_desc to keep in memory before starting | 37 | /* The number of free d40_desc to keep in memory before starting |
38 | * to kfree() them */ | 38 | * to kfree() them */ |
39 | #define D40_DESC_CACHE_SIZE 50 | 39 | #define D40_DESC_CACHE_SIZE 50 |
40 | 40 | ||
41 | /* Hardware designer of the block */ | 41 | /* Hardware designer of the block */ |
42 | #define D40_PERIPHID2_DESIGNER 0x8 | 42 | #define D40_PERIPHID2_DESIGNER 0x8 |
43 | 43 | ||
44 | /** | 44 | /** |
45 | * enum 40_command - The different commands and/or statuses. | 45 | * enum 40_command - The different commands and/or statuses. |
46 | * | 46 | * |
47 | * @D40_DMA_STOP: DMA channel command STOP or status STOPPED, | 47 | * @D40_DMA_STOP: DMA channel command STOP or status STOPPED, |
48 | * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN. | 48 | * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN. |
49 | * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible. | 49 | * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible. |
50 | * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED. | 50 | * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED. |
51 | */ | 51 | */ |
52 | enum d40_command { | 52 | enum d40_command { |
53 | D40_DMA_STOP = 0, | 53 | D40_DMA_STOP = 0, |
54 | D40_DMA_RUN = 1, | 54 | D40_DMA_RUN = 1, |
55 | D40_DMA_SUSPEND_REQ = 2, | 55 | D40_DMA_SUSPEND_REQ = 2, |
56 | D40_DMA_SUSPENDED = 3 | 56 | D40_DMA_SUSPENDED = 3 |
57 | }; | 57 | }; |
58 | 58 | ||
59 | /** | 59 | /** |
60 | * struct d40_lli_pool - Structure for keeping LLIs in memory | 60 | * struct d40_lli_pool - Structure for keeping LLIs in memory |
61 | * | 61 | * |
62 | * @base: Pointer to memory area when the pre_alloc_lli's are not large | 62 | * @base: Pointer to memory area when the pre_alloc_lli's are not large |
63 | * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if | 63 | * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if |
64 | * pre_alloc_lli is used. | 64 | * pre_alloc_lli is used. |
65 | * @size: The size in bytes of the memory at base or the size of pre_alloc_lli. | 65 | * @size: The size in bytes of the memory at base or the size of pre_alloc_lli. |
66 | * @pre_alloc_lli: Pre allocated area for the most common case of transfers, | 66 | * @pre_alloc_lli: Pre allocated area for the most common case of transfers, |
67 | * one buffer to one buffer. | 67 | * one buffer to one buffer. |
68 | */ | 68 | */ |
69 | struct d40_lli_pool { | 69 | struct d40_lli_pool { |
70 | void *base; | 70 | void *base; |
71 | int size; | 71 | int size; |
72 | /* Space for dst and src, plus an extra for padding */ | 72 | /* Space for dst and src, plus an extra for padding */ |
73 | u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)]; | 73 | u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)]; |
74 | }; | 74 | }; |
75 | 75 | ||
76 | /** | 76 | /** |
77 | * struct d40_desc - A descriptor is one DMA job. | 77 | * struct d40_desc - A descriptor is one DMA job. |
78 | * | 78 | * |
79 | * @lli_phy: LLI settings for physical channel. Both src and dst= | 79 | * @lli_phy: LLI settings for physical channel. Both src and dst= |
80 | * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if | 80 | * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if |
81 | * lli_len equals one. | 81 | * lli_len equals one. |
82 | * @lli_log: Same as above but for logical channels. | 82 | * @lli_log: Same as above but for logical channels. |
83 | * @lli_pool: The pool with two entries pre-allocated. | 83 | * @lli_pool: The pool with two entries pre-allocated. |
84 | * @lli_len: Number of LLI's in lli_pool | 84 | * @lli_len: Number of LLI's in lli_pool |
85 | * @lli_tcount: Number of LLIs processed in the transfer. When equals lli_len | 85 | * @lli_tcount: Number of LLIs processed in the transfer. When equals lli_len |
86 | * then this transfer job is done. | 86 | * then this transfer job is done. |
87 | * @txd: DMA engine struct. Used for among other things for communication | 87 | * @txd: DMA engine struct. Used for among other things for communication |
88 | * during a transfer. | 88 | * during a transfer. |
89 | * @node: List entry. | 89 | * @node: List entry. |
90 | * @dir: The transfer direction of this job. | 90 | * @dir: The transfer direction of this job. |
91 | * @is_in_client_list: true if the client owns this descriptor. | 91 | * @is_in_client_list: true if the client owns this descriptor. |
92 | * | 92 | * |
93 | * This descriptor is used for both logical and physical transfers. | 93 | * This descriptor is used for both logical and physical transfers. |
94 | */ | 94 | */ |
95 | 95 | ||
96 | struct d40_desc { | 96 | struct d40_desc { |
97 | /* LLI physical */ | 97 | /* LLI physical */ |
98 | struct d40_phy_lli_bidir lli_phy; | 98 | struct d40_phy_lli_bidir lli_phy; |
99 | /* LLI logical */ | 99 | /* LLI logical */ |
100 | struct d40_log_lli_bidir lli_log; | 100 | struct d40_log_lli_bidir lli_log; |
101 | 101 | ||
102 | struct d40_lli_pool lli_pool; | 102 | struct d40_lli_pool lli_pool; |
103 | u32 lli_len; | 103 | u32 lli_len; |
104 | u32 lli_tcount; | 104 | u32 lli_tcount; |
105 | 105 | ||
106 | struct dma_async_tx_descriptor txd; | 106 | struct dma_async_tx_descriptor txd; |
107 | struct list_head node; | 107 | struct list_head node; |
108 | 108 | ||
109 | enum dma_data_direction dir; | 109 | enum dma_data_direction dir; |
110 | bool is_in_client_list; | 110 | bool is_in_client_list; |
111 | }; | 111 | }; |
112 | 112 | ||
113 | /** | 113 | /** |
114 | * struct d40_lcla_pool - LCLA pool settings and data. | 114 | * struct d40_lcla_pool - LCLA pool settings and data. |
115 | * | 115 | * |
116 | * @base: The virtual address of LCLA. | 116 | * @base: The virtual address of LCLA. |
117 | * @phy: Physical base address of LCLA. | 117 | * @phy: Physical base address of LCLA. |
118 | * @base_size: size of lcla. | 118 | * @base_size: size of lcla. |
119 | * @lock: Lock to protect the content in this struct. | 119 | * @lock: Lock to protect the content in this struct. |
120 | * @alloc_map: Mapping between physical channel and LCLA entries. | 120 | * @alloc_map: Mapping between physical channel and LCLA entries. |
121 | * @num_blocks: The number of entries of alloc_map. Equals to the | 121 | * @num_blocks: The number of entries of alloc_map. Equals to the |
122 | * number of physical channels. | 122 | * number of physical channels. |
123 | */ | 123 | */ |
124 | struct d40_lcla_pool { | 124 | struct d40_lcla_pool { |
125 | void *base; | 125 | void *base; |
126 | dma_addr_t phy; | 126 | dma_addr_t phy; |
127 | resource_size_t base_size; | 127 | resource_size_t base_size; |
128 | spinlock_t lock; | 128 | spinlock_t lock; |
129 | u32 *alloc_map; | 129 | u32 *alloc_map; |
130 | int num_blocks; | 130 | int num_blocks; |
131 | }; | 131 | }; |
132 | 132 | ||
133 | /** | 133 | /** |
134 | * struct d40_phy_res - struct for handling eventlines mapped to physical | 134 | * struct d40_phy_res - struct for handling eventlines mapped to physical |
135 | * channels. | 135 | * channels. |
136 | * | 136 | * |
137 | * @lock: A lock protection this entity. | 137 | * @lock: A lock protection this entity. |
138 | * @num: The physical channel number of this entity. | 138 | * @num: The physical channel number of this entity. |
139 | * @allocated_src: Bit mapped to show which src event line's are mapped to | 139 | * @allocated_src: Bit mapped to show which src event line's are mapped to |
140 | * this physical channel. Can also be free or physically allocated. | 140 | * this physical channel. Can also be free or physically allocated. |
141 | * @allocated_dst: Same as for src but is dst. | 141 | * @allocated_dst: Same as for src but is dst. |
142 | * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as | 142 | * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as |
143 | * event line number. Both allocated_src and allocated_dst can not be | 143 | * event line number. Both allocated_src and allocated_dst can not be |
144 | * allocated to a physical channel, since the interrupt handler has then | 144 | * allocated to a physical channel, since the interrupt handler has then |
145 | * no way of figure out which one the interrupt belongs to. | 145 | * no way of figure out which one the interrupt belongs to. |
146 | */ | 146 | */ |
147 | struct d40_phy_res { | 147 | struct d40_phy_res { |
148 | spinlock_t lock; | 148 | spinlock_t lock; |
149 | int num; | 149 | int num; |
150 | u32 allocated_src; | 150 | u32 allocated_src; |
151 | u32 allocated_dst; | 151 | u32 allocated_dst; |
152 | }; | 152 | }; |
153 | 153 | ||
154 | struct d40_base; | 154 | struct d40_base; |
155 | 155 | ||
156 | /** | 156 | /** |
157 | * struct d40_chan - Struct that describes a channel. | 157 | * struct d40_chan - Struct that describes a channel. |
158 | * | 158 | * |
159 | * @lock: A spinlock to protect this struct. | 159 | * @lock: A spinlock to protect this struct. |
160 | * @log_num: The logical number, if any of this channel. | 160 | * @log_num: The logical number, if any of this channel. |
161 | * @completed: Starts with 1, after first interrupt it is set to dma engine's | 161 | * @completed: Starts with 1, after first interrupt it is set to dma engine's |
162 | * current cookie. | 162 | * current cookie. |
163 | * @pending_tx: The number of pending transfers. Used between interrupt handler | 163 | * @pending_tx: The number of pending transfers. Used between interrupt handler |
164 | * and tasklet. | 164 | * and tasklet. |
165 | * @busy: Set to true when transfer is ongoing on this channel. | 165 | * @busy: Set to true when transfer is ongoing on this channel. |
166 | * @phy_chan: Pointer to physical channel which this instance runs on. | 166 | * @phy_chan: Pointer to physical channel which this instance runs on. |
167 | * @chan: DMA engine handle. | 167 | * @chan: DMA engine handle. |
168 | * @tasklet: Tasklet that gets scheduled from interrupt context to complete a | 168 | * @tasklet: Tasklet that gets scheduled from interrupt context to complete a |
169 | * transfer and call client callback. | 169 | * transfer and call client callback. |
170 | * @client: Cliented owned descriptor list. | 170 | * @client: Cliented owned descriptor list. |
171 | * @active: Active descriptor. | 171 | * @active: Active descriptor. |
172 | * @queue: Queued jobs. | 172 | * @queue: Queued jobs. |
173 | * @free: List of free descripts, ready to be reused. | 173 | * @free: List of free descripts, ready to be reused. |
174 | * @free_len: Number of descriptors in the free list. | 174 | * @free_len: Number of descriptors in the free list. |
175 | * @dma_cfg: The client configuration of this dma channel. | 175 | * @dma_cfg: The client configuration of this dma channel. |
176 | * @base: Pointer to the device instance struct. | 176 | * @base: Pointer to the device instance struct. |
177 | * @src_def_cfg: Default cfg register setting for src. | 177 | * @src_def_cfg: Default cfg register setting for src. |
178 | * @dst_def_cfg: Default cfg register setting for dst. | 178 | * @dst_def_cfg: Default cfg register setting for dst. |
179 | * @log_def: Default logical channel settings. | 179 | * @log_def: Default logical channel settings. |
180 | * @lcla: Space for one dst src pair for logical channel transfers. | 180 | * @lcla: Space for one dst src pair for logical channel transfers. |
181 | * @lcpa: Pointer to dst and src lcpa settings. | 181 | * @lcpa: Pointer to dst and src lcpa settings. |
182 | * | 182 | * |
183 | * This struct can either "be" a logical or a physical channel. | 183 | * This struct can either "be" a logical or a physical channel. |
184 | */ | 184 | */ |
185 | struct d40_chan { | 185 | struct d40_chan { |
186 | spinlock_t lock; | 186 | spinlock_t lock; |
187 | int log_num; | 187 | int log_num; |
188 | /* ID of the most recent completed transfer */ | 188 | /* ID of the most recent completed transfer */ |
189 | int completed; | 189 | int completed; |
190 | int pending_tx; | 190 | int pending_tx; |
191 | bool busy; | 191 | bool busy; |
192 | struct d40_phy_res *phy_chan; | 192 | struct d40_phy_res *phy_chan; |
193 | struct dma_chan chan; | 193 | struct dma_chan chan; |
194 | struct tasklet_struct tasklet; | 194 | struct tasklet_struct tasklet; |
195 | struct list_head client; | 195 | struct list_head client; |
196 | struct list_head active; | 196 | struct list_head active; |
197 | struct list_head queue; | 197 | struct list_head queue; |
198 | struct list_head free; | 198 | struct list_head free; |
199 | int free_len; | 199 | int free_len; |
200 | struct stedma40_chan_cfg dma_cfg; | 200 | struct stedma40_chan_cfg dma_cfg; |
201 | struct d40_base *base; | 201 | struct d40_base *base; |
202 | /* Default register configurations */ | 202 | /* Default register configurations */ |
203 | u32 src_def_cfg; | 203 | u32 src_def_cfg; |
204 | u32 dst_def_cfg; | 204 | u32 dst_def_cfg; |
205 | struct d40_def_lcsp log_def; | 205 | struct d40_def_lcsp log_def; |
206 | struct d40_lcla_elem lcla; | 206 | struct d40_lcla_elem lcla; |
207 | struct d40_log_lli_full *lcpa; | 207 | struct d40_log_lli_full *lcpa; |
208 | }; | 208 | }; |
209 | 209 | ||
210 | /** | 210 | /** |
211 | * struct d40_base - The big global struct, one for each probe'd instance. | 211 | * struct d40_base - The big global struct, one for each probe'd instance. |
212 | * | 212 | * |
213 | * @interrupt_lock: Lock used to make sure one interrupt is handle a time. | 213 | * @interrupt_lock: Lock used to make sure one interrupt is handle a time. |
214 | * @execmd_lock: Lock for execute command usage since several channels share | 214 | * @execmd_lock: Lock for execute command usage since several channels share |
215 | * the same physical register. | 215 | * the same physical register. |
216 | * @dev: The device structure. | 216 | * @dev: The device structure. |
217 | * @virtbase: The virtual base address of the DMA's register. | 217 | * @virtbase: The virtual base address of the DMA's register. |
218 | * @clk: Pointer to the DMA clock structure. | 218 | * @clk: Pointer to the DMA clock structure. |
219 | * @phy_start: Physical memory start of the DMA registers. | 219 | * @phy_start: Physical memory start of the DMA registers. |
220 | * @phy_size: Size of the DMA register map. | 220 | * @phy_size: Size of the DMA register map. |
221 | * @irq: The IRQ number. | 221 | * @irq: The IRQ number. |
222 | * @num_phy_chans: The number of physical channels. Read from HW. This | 222 | * @num_phy_chans: The number of physical channels. Read from HW. This |
223 | * is the number of available channels for this driver, not counting "Secure | 223 | * is the number of available channels for this driver, not counting "Secure |
224 | * mode" allocated physical channels. | 224 | * mode" allocated physical channels. |
225 | * @num_log_chans: The number of logical channels. Calculated from | 225 | * @num_log_chans: The number of logical channels. Calculated from |
226 | * num_phy_chans. | 226 | * num_phy_chans. |
227 | * @dma_both: dma_device channels that can do both memcpy and slave transfers. | 227 | * @dma_both: dma_device channels that can do both memcpy and slave transfers. |
228 | * @dma_slave: dma_device channels that can do only do slave transfers. | 228 | * @dma_slave: dma_device channels that can do only do slave transfers. |
229 | * @dma_memcpy: dma_device channels that can do only do memcpy transfers. | 229 | * @dma_memcpy: dma_device channels that can do only do memcpy transfers. |
230 | * @phy_chans: Room for all possible physical channels in system. | 230 | * @phy_chans: Room for all possible physical channels in system. |
231 | * @log_chans: Room for all possible logical channels in system. | 231 | * @log_chans: Room for all possible logical channels in system. |
232 | * @lookup_log_chans: Used to map interrupt number to logical channel. Points | 232 | * @lookup_log_chans: Used to map interrupt number to logical channel. Points |
233 | * to log_chans entries. | 233 | * to log_chans entries. |
234 | * @lookup_phy_chans: Used to map interrupt number to physical channel. Points | 234 | * @lookup_phy_chans: Used to map interrupt number to physical channel. Points |
235 | * to phy_chans entries. | 235 | * to phy_chans entries. |
236 | * @plat_data: Pointer to provided platform_data which is the driver | 236 | * @plat_data: Pointer to provided platform_data which is the driver |
237 | * configuration. | 237 | * configuration. |
238 | * @phy_res: Vector containing all physical channels. | 238 | * @phy_res: Vector containing all physical channels. |
239 | * @lcla_pool: lcla pool settings and data. | 239 | * @lcla_pool: lcla pool settings and data. |
240 | * @lcpa_base: The virtual mapped address of LCPA. | 240 | * @lcpa_base: The virtual mapped address of LCPA. |
241 | * @phy_lcpa: The physical address of the LCPA. | 241 | * @phy_lcpa: The physical address of the LCPA. |
242 | * @lcpa_size: The size of the LCPA area. | 242 | * @lcpa_size: The size of the LCPA area. |
243 | */ | 243 | */ |
244 | struct d40_base { | 244 | struct d40_base { |
245 | spinlock_t interrupt_lock; | 245 | spinlock_t interrupt_lock; |
246 | spinlock_t execmd_lock; | 246 | spinlock_t execmd_lock; |
247 | struct device *dev; | 247 | struct device *dev; |
248 | void __iomem *virtbase; | 248 | void __iomem *virtbase; |
249 | struct clk *clk; | 249 | struct clk *clk; |
250 | phys_addr_t phy_start; | 250 | phys_addr_t phy_start; |
251 | resource_size_t phy_size; | 251 | resource_size_t phy_size; |
252 | int irq; | 252 | int irq; |
253 | int num_phy_chans; | 253 | int num_phy_chans; |
254 | int num_log_chans; | 254 | int num_log_chans; |
255 | struct dma_device dma_both; | 255 | struct dma_device dma_both; |
256 | struct dma_device dma_slave; | 256 | struct dma_device dma_slave; |
257 | struct dma_device dma_memcpy; | 257 | struct dma_device dma_memcpy; |
258 | struct d40_chan *phy_chans; | 258 | struct d40_chan *phy_chans; |
259 | struct d40_chan *log_chans; | 259 | struct d40_chan *log_chans; |
260 | struct d40_chan **lookup_log_chans; | 260 | struct d40_chan **lookup_log_chans; |
261 | struct d40_chan **lookup_phy_chans; | 261 | struct d40_chan **lookup_phy_chans; |
262 | struct stedma40_platform_data *plat_data; | 262 | struct stedma40_platform_data *plat_data; |
263 | /* Physical half channels */ | 263 | /* Physical half channels */ |
264 | struct d40_phy_res *phy_res; | 264 | struct d40_phy_res *phy_res; |
265 | struct d40_lcla_pool lcla_pool; | 265 | struct d40_lcla_pool lcla_pool; |
266 | void *lcpa_base; | 266 | void *lcpa_base; |
267 | dma_addr_t phy_lcpa; | 267 | dma_addr_t phy_lcpa; |
268 | resource_size_t lcpa_size; | 268 | resource_size_t lcpa_size; |
269 | }; | 269 | }; |
270 | 270 | ||
271 | /** | 271 | /** |
272 | * struct d40_interrupt_lookup - lookup table for interrupt handler | 272 | * struct d40_interrupt_lookup - lookup table for interrupt handler |
273 | * | 273 | * |
274 | * @src: Interrupt mask register. | 274 | * @src: Interrupt mask register. |
275 | * @clr: Interrupt clear register. | 275 | * @clr: Interrupt clear register. |
276 | * @is_error: true if this is an error interrupt. | 276 | * @is_error: true if this is an error interrupt. |
277 | * @offset: start delta in the lookup_log_chans in d40_base. If equals to | 277 | * @offset: start delta in the lookup_log_chans in d40_base. If equals to |
278 | * D40_PHY_CHAN, the lookup_phy_chans shall be used instead. | 278 | * D40_PHY_CHAN, the lookup_phy_chans shall be used instead. |
279 | */ | 279 | */ |
280 | struct d40_interrupt_lookup { | 280 | struct d40_interrupt_lookup { |
281 | u32 src; | 281 | u32 src; |
282 | u32 clr; | 282 | u32 clr; |
283 | bool is_error; | 283 | bool is_error; |
284 | int offset; | 284 | int offset; |
285 | }; | 285 | }; |
286 | 286 | ||
287 | /** | 287 | /** |
288 | * struct d40_reg_val - simple lookup struct | 288 | * struct d40_reg_val - simple lookup struct |
289 | * | 289 | * |
290 | * @reg: The register. | 290 | * @reg: The register. |
291 | * @val: The value that belongs to the register in reg. | 291 | * @val: The value that belongs to the register in reg. |
292 | */ | 292 | */ |
293 | struct d40_reg_val { | 293 | struct d40_reg_val { |
294 | unsigned int reg; | 294 | unsigned int reg; |
295 | unsigned int val; | 295 | unsigned int val; |
296 | }; | 296 | }; |
297 | 297 | ||
298 | static int d40_pool_lli_alloc(struct d40_desc *d40d, | 298 | static int d40_pool_lli_alloc(struct d40_desc *d40d, |
299 | int lli_len, bool is_log) | 299 | int lli_len, bool is_log) |
300 | { | 300 | { |
301 | u32 align; | 301 | u32 align; |
302 | void *base; | 302 | void *base; |
303 | 303 | ||
304 | if (is_log) | 304 | if (is_log) |
305 | align = sizeof(struct d40_log_lli); | 305 | align = sizeof(struct d40_log_lli); |
306 | else | 306 | else |
307 | align = sizeof(struct d40_phy_lli); | 307 | align = sizeof(struct d40_phy_lli); |
308 | 308 | ||
309 | if (lli_len == 1) { | 309 | if (lli_len == 1) { |
310 | base = d40d->lli_pool.pre_alloc_lli; | 310 | base = d40d->lli_pool.pre_alloc_lli; |
311 | d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli); | 311 | d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli); |
312 | d40d->lli_pool.base = NULL; | 312 | d40d->lli_pool.base = NULL; |
313 | } else { | 313 | } else { |
314 | d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align); | 314 | d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align); |
315 | 315 | ||
316 | base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT); | 316 | base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT); |
317 | d40d->lli_pool.base = base; | 317 | d40d->lli_pool.base = base; |
318 | 318 | ||
319 | if (d40d->lli_pool.base == NULL) | 319 | if (d40d->lli_pool.base == NULL) |
320 | return -ENOMEM; | 320 | return -ENOMEM; |
321 | } | 321 | } |
322 | 322 | ||
323 | if (is_log) { | 323 | if (is_log) { |
324 | d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base, | 324 | d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base, |
325 | align); | 325 | align); |
326 | d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len, | 326 | d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len, |
327 | align); | 327 | align); |
328 | } else { | 328 | } else { |
329 | d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base, | 329 | d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base, |
330 | align); | 330 | align); |
331 | d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len, | 331 | d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len, |
332 | align); | 332 | align); |
333 | 333 | ||
334 | d40d->lli_phy.src_addr = virt_to_phys(d40d->lli_phy.src); | 334 | d40d->lli_phy.src_addr = virt_to_phys(d40d->lli_phy.src); |
335 | d40d->lli_phy.dst_addr = virt_to_phys(d40d->lli_phy.dst); | 335 | d40d->lli_phy.dst_addr = virt_to_phys(d40d->lli_phy.dst); |
336 | } | 336 | } |
337 | 337 | ||
338 | return 0; | 338 | return 0; |
339 | } | 339 | } |
340 | 340 | ||
341 | static void d40_pool_lli_free(struct d40_desc *d40d) | 341 | static void d40_pool_lli_free(struct d40_desc *d40d) |
342 | { | 342 | { |
343 | kfree(d40d->lli_pool.base); | 343 | kfree(d40d->lli_pool.base); |
344 | d40d->lli_pool.base = NULL; | 344 | d40d->lli_pool.base = NULL; |
345 | d40d->lli_pool.size = 0; | 345 | d40d->lli_pool.size = 0; |
346 | d40d->lli_log.src = NULL; | 346 | d40d->lli_log.src = NULL; |
347 | d40d->lli_log.dst = NULL; | 347 | d40d->lli_log.dst = NULL; |
348 | d40d->lli_phy.src = NULL; | 348 | d40d->lli_phy.src = NULL; |
349 | d40d->lli_phy.dst = NULL; | 349 | d40d->lli_phy.dst = NULL; |
350 | d40d->lli_phy.src_addr = 0; | 350 | d40d->lli_phy.src_addr = 0; |
351 | d40d->lli_phy.dst_addr = 0; | 351 | d40d->lli_phy.dst_addr = 0; |
352 | } | 352 | } |
353 | 353 | ||
354 | static dma_cookie_t d40_assign_cookie(struct d40_chan *d40c, | 354 | static dma_cookie_t d40_assign_cookie(struct d40_chan *d40c, |
355 | struct d40_desc *desc) | 355 | struct d40_desc *desc) |
356 | { | 356 | { |
357 | dma_cookie_t cookie = d40c->chan.cookie; | 357 | dma_cookie_t cookie = d40c->chan.cookie; |
358 | 358 | ||
359 | if (++cookie < 0) | 359 | if (++cookie < 0) |
360 | cookie = 1; | 360 | cookie = 1; |
361 | 361 | ||
362 | d40c->chan.cookie = cookie; | 362 | d40c->chan.cookie = cookie; |
363 | desc->txd.cookie = cookie; | 363 | desc->txd.cookie = cookie; |
364 | 364 | ||
365 | return cookie; | 365 | return cookie; |
366 | } | 366 | } |
367 | 367 | ||
368 | static void d40_desc_reset(struct d40_desc *d40d) | 368 | static void d40_desc_reset(struct d40_desc *d40d) |
369 | { | 369 | { |
370 | d40d->lli_tcount = 0; | 370 | d40d->lli_tcount = 0; |
371 | } | 371 | } |
372 | 372 | ||
373 | static void d40_desc_remove(struct d40_desc *d40d) | 373 | static void d40_desc_remove(struct d40_desc *d40d) |
374 | { | 374 | { |
375 | list_del(&d40d->node); | 375 | list_del(&d40d->node); |
376 | } | 376 | } |
377 | 377 | ||
378 | static struct d40_desc *d40_desc_get(struct d40_chan *d40c) | 378 | static struct d40_desc *d40_desc_get(struct d40_chan *d40c) |
379 | { | 379 | { |
380 | struct d40_desc *desc; | 380 | struct d40_desc *desc; |
381 | struct d40_desc *d; | 381 | struct d40_desc *d; |
382 | struct d40_desc *_d; | 382 | struct d40_desc *_d; |
383 | 383 | ||
384 | if (!list_empty(&d40c->client)) { | 384 | if (!list_empty(&d40c->client)) { |
385 | list_for_each_entry_safe(d, _d, &d40c->client, node) | 385 | list_for_each_entry_safe(d, _d, &d40c->client, node) |
386 | if (async_tx_test_ack(&d->txd)) { | 386 | if (async_tx_test_ack(&d->txd)) { |
387 | d40_pool_lli_free(d); | 387 | d40_pool_lli_free(d); |
388 | d40_desc_remove(d); | 388 | d40_desc_remove(d); |
389 | desc = d; | 389 | desc = d; |
390 | goto out; | 390 | goto out; |
391 | } | 391 | } |
392 | } | 392 | } |
393 | 393 | ||
394 | if (list_empty(&d40c->free)) { | 394 | if (list_empty(&d40c->free)) { |
395 | /* Alloc new desc because we're out of used ones */ | 395 | /* Alloc new desc because we're out of used ones */ |
396 | desc = kzalloc(sizeof(struct d40_desc), GFP_NOWAIT); | 396 | desc = kzalloc(sizeof(struct d40_desc), GFP_NOWAIT); |
397 | if (desc == NULL) | 397 | if (desc == NULL) |
398 | goto out; | 398 | goto out; |
399 | INIT_LIST_HEAD(&desc->node); | 399 | INIT_LIST_HEAD(&desc->node); |
400 | } else { | 400 | } else { |
401 | /* Reuse an old desc. */ | 401 | /* Reuse an old desc. */ |
402 | desc = list_first_entry(&d40c->free, | 402 | desc = list_first_entry(&d40c->free, |
403 | struct d40_desc, | 403 | struct d40_desc, |
404 | node); | 404 | node); |
405 | list_del(&desc->node); | 405 | list_del(&desc->node); |
406 | d40c->free_len--; | 406 | d40c->free_len--; |
407 | } | 407 | } |
408 | out: | 408 | out: |
409 | return desc; | 409 | return desc; |
410 | } | 410 | } |
411 | 411 | ||
412 | static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d) | 412 | static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d) |
413 | { | 413 | { |
414 | if (d40c->free_len < D40_DESC_CACHE_SIZE) { | 414 | if (d40c->free_len < D40_DESC_CACHE_SIZE) { |
415 | list_add_tail(&d40d->node, &d40c->free); | 415 | list_add_tail(&d40d->node, &d40c->free); |
416 | d40c->free_len++; | 416 | d40c->free_len++; |
417 | } else | 417 | } else |
418 | kfree(d40d); | 418 | kfree(d40d); |
419 | } | 419 | } |
420 | 420 | ||
421 | static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc) | 421 | static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc) |
422 | { | 422 | { |
423 | list_add_tail(&desc->node, &d40c->active); | 423 | list_add_tail(&desc->node, &d40c->active); |
424 | } | 424 | } |
425 | 425 | ||
426 | static struct d40_desc *d40_first_active_get(struct d40_chan *d40c) | 426 | static struct d40_desc *d40_first_active_get(struct d40_chan *d40c) |
427 | { | 427 | { |
428 | struct d40_desc *d; | 428 | struct d40_desc *d; |
429 | 429 | ||
430 | if (list_empty(&d40c->active)) | 430 | if (list_empty(&d40c->active)) |
431 | return NULL; | 431 | return NULL; |
432 | 432 | ||
433 | d = list_first_entry(&d40c->active, | 433 | d = list_first_entry(&d40c->active, |
434 | struct d40_desc, | 434 | struct d40_desc, |
435 | node); | 435 | node); |
436 | return d; | 436 | return d; |
437 | } | 437 | } |
438 | 438 | ||
439 | static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc) | 439 | static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc) |
440 | { | 440 | { |
441 | list_add_tail(&desc->node, &d40c->queue); | 441 | list_add_tail(&desc->node, &d40c->queue); |
442 | } | 442 | } |
443 | 443 | ||
444 | static struct d40_desc *d40_first_queued(struct d40_chan *d40c) | 444 | static struct d40_desc *d40_first_queued(struct d40_chan *d40c) |
445 | { | 445 | { |
446 | struct d40_desc *d; | 446 | struct d40_desc *d; |
447 | 447 | ||
448 | if (list_empty(&d40c->queue)) | 448 | if (list_empty(&d40c->queue)) |
449 | return NULL; | 449 | return NULL; |
450 | 450 | ||
451 | d = list_first_entry(&d40c->queue, | 451 | d = list_first_entry(&d40c->queue, |
452 | struct d40_desc, | 452 | struct d40_desc, |
453 | node); | 453 | node); |
454 | return d; | 454 | return d; |
455 | } | 455 | } |
456 | 456 | ||
457 | /* Support functions for logical channels */ | 457 | /* Support functions for logical channels */ |
458 | 458 | ||
459 | static int d40_lcla_id_get(struct d40_chan *d40c, | 459 | static int d40_lcla_id_get(struct d40_chan *d40c, |
460 | struct d40_lcla_pool *pool) | 460 | struct d40_lcla_pool *pool) |
461 | { | 461 | { |
462 | int src_id = 0; | 462 | int src_id = 0; |
463 | int dst_id = 0; | 463 | int dst_id = 0; |
464 | struct d40_log_lli *lcla_lidx_base = | 464 | struct d40_log_lli *lcla_lidx_base = |
465 | pool->base + d40c->phy_chan->num * 1024; | 465 | pool->base + d40c->phy_chan->num * 1024; |
466 | int i; | 466 | int i; |
467 | int lli_per_log = d40c->base->plat_data->llis_per_log; | 467 | int lli_per_log = d40c->base->plat_data->llis_per_log; |
468 | 468 | ||
469 | if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0) | 469 | if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0) |
470 | return 0; | 470 | return 0; |
471 | 471 | ||
472 | if (pool->num_blocks > 32) | 472 | if (pool->num_blocks > 32) |
473 | return -EINVAL; | 473 | return -EINVAL; |
474 | 474 | ||
475 | spin_lock(&pool->lock); | 475 | spin_lock(&pool->lock); |
476 | 476 | ||
477 | for (i = 0; i < pool->num_blocks; i++) { | 477 | for (i = 0; i < pool->num_blocks; i++) { |
478 | if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) { | 478 | if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) { |
479 | pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i); | 479 | pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i); |
480 | break; | 480 | break; |
481 | } | 481 | } |
482 | } | 482 | } |
483 | src_id = i; | 483 | src_id = i; |
484 | if (src_id >= pool->num_blocks) | 484 | if (src_id >= pool->num_blocks) |
485 | goto err; | 485 | goto err; |
486 | 486 | ||
487 | for (; i < pool->num_blocks; i++) { | 487 | for (; i < pool->num_blocks; i++) { |
488 | if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) { | 488 | if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) { |
489 | pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i); | 489 | pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i); |
490 | break; | 490 | break; |
491 | } | 491 | } |
492 | } | 492 | } |
493 | 493 | ||
494 | dst_id = i; | 494 | dst_id = i; |
495 | if (dst_id == src_id) | 495 | if (dst_id == src_id) |
496 | goto err; | 496 | goto err; |
497 | 497 | ||
498 | d40c->lcla.src_id = src_id; | 498 | d40c->lcla.src_id = src_id; |
499 | d40c->lcla.dst_id = dst_id; | 499 | d40c->lcla.dst_id = dst_id; |
500 | d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1; | 500 | d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1; |
501 | d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1; | 501 | d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1; |
502 | 502 | ||
503 | 503 | ||
504 | spin_unlock(&pool->lock); | 504 | spin_unlock(&pool->lock); |
505 | return 0; | 505 | return 0; |
506 | err: | 506 | err: |
507 | spin_unlock(&pool->lock); | 507 | spin_unlock(&pool->lock); |
508 | return -EINVAL; | 508 | return -EINVAL; |
509 | } | 509 | } |
510 | 510 | ||
511 | static void d40_lcla_id_put(struct d40_chan *d40c, | 511 | static void d40_lcla_id_put(struct d40_chan *d40c, |
512 | struct d40_lcla_pool *pool, | 512 | struct d40_lcla_pool *pool, |
513 | int id) | 513 | int id) |
514 | { | 514 | { |
515 | if (id < 0) | 515 | if (id < 0) |
516 | return; | 516 | return; |
517 | 517 | ||
518 | d40c->lcla.src_id = -1; | 518 | d40c->lcla.src_id = -1; |
519 | d40c->lcla.dst_id = -1; | 519 | d40c->lcla.dst_id = -1; |
520 | 520 | ||
521 | spin_lock(&pool->lock); | 521 | spin_lock(&pool->lock); |
522 | pool->alloc_map[d40c->phy_chan->num] &= (~(0x1 << id)); | 522 | pool->alloc_map[d40c->phy_chan->num] &= (~(0x1 << id)); |
523 | spin_unlock(&pool->lock); | 523 | spin_unlock(&pool->lock); |
524 | } | 524 | } |
525 | 525 | ||
526 | static int d40_channel_execute_command(struct d40_chan *d40c, | 526 | static int d40_channel_execute_command(struct d40_chan *d40c, |
527 | enum d40_command command) | 527 | enum d40_command command) |
528 | { | 528 | { |
529 | int status, i; | 529 | int status, i; |
530 | void __iomem *active_reg; | 530 | void __iomem *active_reg; |
531 | int ret = 0; | 531 | int ret = 0; |
532 | unsigned long flags; | 532 | unsigned long flags; |
533 | 533 | ||
534 | spin_lock_irqsave(&d40c->base->execmd_lock, flags); | 534 | spin_lock_irqsave(&d40c->base->execmd_lock, flags); |
535 | 535 | ||
536 | if (d40c->phy_chan->num % 2 == 0) | 536 | if (d40c->phy_chan->num % 2 == 0) |
537 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; | 537 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; |
538 | else | 538 | else |
539 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; | 539 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; |
540 | 540 | ||
541 | if (command == D40_DMA_SUSPEND_REQ) { | 541 | if (command == D40_DMA_SUSPEND_REQ) { |
542 | status = (readl(active_reg) & | 542 | status = (readl(active_reg) & |
543 | D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> | 543 | D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> |
544 | D40_CHAN_POS(d40c->phy_chan->num); | 544 | D40_CHAN_POS(d40c->phy_chan->num); |
545 | 545 | ||
546 | if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP) | 546 | if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP) |
547 | goto done; | 547 | goto done; |
548 | } | 548 | } |
549 | 549 | ||
550 | writel(command << D40_CHAN_POS(d40c->phy_chan->num), active_reg); | 550 | writel(command << D40_CHAN_POS(d40c->phy_chan->num), active_reg); |
551 | 551 | ||
552 | if (command == D40_DMA_SUSPEND_REQ) { | 552 | if (command == D40_DMA_SUSPEND_REQ) { |
553 | 553 | ||
554 | for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) { | 554 | for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) { |
555 | status = (readl(active_reg) & | 555 | status = (readl(active_reg) & |
556 | D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> | 556 | D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> |
557 | D40_CHAN_POS(d40c->phy_chan->num); | 557 | D40_CHAN_POS(d40c->phy_chan->num); |
558 | 558 | ||
559 | cpu_relax(); | 559 | cpu_relax(); |
560 | /* | 560 | /* |
561 | * Reduce the number of bus accesses while | 561 | * Reduce the number of bus accesses while |
562 | * waiting for the DMA to suspend. | 562 | * waiting for the DMA to suspend. |
563 | */ | 563 | */ |
564 | udelay(3); | 564 | udelay(3); |
565 | 565 | ||
566 | if (status == D40_DMA_STOP || | 566 | if (status == D40_DMA_STOP || |
567 | status == D40_DMA_SUSPENDED) | 567 | status == D40_DMA_SUSPENDED) |
568 | break; | 568 | break; |
569 | } | 569 | } |
570 | 570 | ||
571 | if (i == D40_SUSPEND_MAX_IT) { | 571 | if (i == D40_SUSPEND_MAX_IT) { |
572 | dev_err(&d40c->chan.dev->device, | 572 | dev_err(&d40c->chan.dev->device, |
573 | "[%s]: unable to suspend the chl %d (log: %d) status %x\n", | 573 | "[%s]: unable to suspend the chl %d (log: %d) status %x\n", |
574 | __func__, d40c->phy_chan->num, d40c->log_num, | 574 | __func__, d40c->phy_chan->num, d40c->log_num, |
575 | status); | 575 | status); |
576 | dump_stack(); | 576 | dump_stack(); |
577 | ret = -EBUSY; | 577 | ret = -EBUSY; |
578 | } | 578 | } |
579 | 579 | ||
580 | } | 580 | } |
581 | done: | 581 | done: |
582 | spin_unlock_irqrestore(&d40c->base->execmd_lock, flags); | 582 | spin_unlock_irqrestore(&d40c->base->execmd_lock, flags); |
583 | return ret; | 583 | return ret; |
584 | } | 584 | } |
585 | 585 | ||
586 | static void d40_term_all(struct d40_chan *d40c) | 586 | static void d40_term_all(struct d40_chan *d40c) |
587 | { | 587 | { |
588 | struct d40_desc *d40d; | 588 | struct d40_desc *d40d; |
589 | struct d40_desc *d; | 589 | struct d40_desc *d; |
590 | struct d40_desc *_d; | 590 | struct d40_desc *_d; |
591 | 591 | ||
592 | /* Release active descriptors */ | 592 | /* Release active descriptors */ |
593 | while ((d40d = d40_first_active_get(d40c))) { | 593 | while ((d40d = d40_first_active_get(d40c))) { |
594 | d40_desc_remove(d40d); | 594 | d40_desc_remove(d40d); |
595 | 595 | ||
596 | /* Return desc to free-list */ | 596 | /* Return desc to free-list */ |
597 | d40_desc_free(d40c, d40d); | 597 | d40_desc_free(d40c, d40d); |
598 | } | 598 | } |
599 | 599 | ||
600 | /* Release queued descriptors waiting for transfer */ | 600 | /* Release queued descriptors waiting for transfer */ |
601 | while ((d40d = d40_first_queued(d40c))) { | 601 | while ((d40d = d40_first_queued(d40c))) { |
602 | d40_desc_remove(d40d); | 602 | d40_desc_remove(d40d); |
603 | 603 | ||
604 | /* Return desc to free-list */ | 604 | /* Return desc to free-list */ |
605 | d40_desc_free(d40c, d40d); | 605 | d40_desc_free(d40c, d40d); |
606 | } | 606 | } |
607 | 607 | ||
608 | /* Release client owned descriptors */ | 608 | /* Release client owned descriptors */ |
609 | if (!list_empty(&d40c->client)) | 609 | if (!list_empty(&d40c->client)) |
610 | list_for_each_entry_safe(d, _d, &d40c->client, node) { | 610 | list_for_each_entry_safe(d, _d, &d40c->client, node) { |
611 | d40_pool_lli_free(d); | 611 | d40_pool_lli_free(d); |
612 | d40_desc_remove(d); | 612 | d40_desc_remove(d); |
613 | /* Return desc to free-list */ | 613 | /* Return desc to free-list */ |
614 | d40_desc_free(d40c, d40d); | 614 | d40_desc_free(d40c, d40d); |
615 | } | 615 | } |
616 | 616 | ||
617 | d40_lcla_id_put(d40c, &d40c->base->lcla_pool, | 617 | d40_lcla_id_put(d40c, &d40c->base->lcla_pool, |
618 | d40c->lcla.src_id); | 618 | d40c->lcla.src_id); |
619 | d40_lcla_id_put(d40c, &d40c->base->lcla_pool, | 619 | d40_lcla_id_put(d40c, &d40c->base->lcla_pool, |
620 | d40c->lcla.dst_id); | 620 | d40c->lcla.dst_id); |
621 | 621 | ||
622 | d40c->pending_tx = 0; | 622 | d40c->pending_tx = 0; |
623 | d40c->busy = false; | 623 | d40c->busy = false; |
624 | } | 624 | } |
625 | 625 | ||
626 | static void d40_config_set_event(struct d40_chan *d40c, bool do_enable) | 626 | static void d40_config_set_event(struct d40_chan *d40c, bool do_enable) |
627 | { | 627 | { |
628 | u32 val; | 628 | u32 val; |
629 | unsigned long flags; | 629 | unsigned long flags; |
630 | 630 | ||
631 | if (do_enable) | 631 | if (do_enable) |
632 | val = D40_ACTIVATE_EVENTLINE; | 632 | val = D40_ACTIVATE_EVENTLINE; |
633 | else | 633 | else |
634 | val = D40_DEACTIVATE_EVENTLINE; | 634 | val = D40_DEACTIVATE_EVENTLINE; |
635 | 635 | ||
636 | spin_lock_irqsave(&d40c->phy_chan->lock, flags); | 636 | spin_lock_irqsave(&d40c->phy_chan->lock, flags); |
637 | 637 | ||
638 | /* Enable event line connected to device (or memcpy) */ | 638 | /* Enable event line connected to device (or memcpy) */ |
639 | if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || | 639 | if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || |
640 | (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) { | 640 | (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) { |
641 | u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); | 641 | u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); |
642 | 642 | ||
643 | writel((val << D40_EVENTLINE_POS(event)) | | 643 | writel((val << D40_EVENTLINE_POS(event)) | |
644 | ~D40_EVENTLINE_MASK(event), | 644 | ~D40_EVENTLINE_MASK(event), |
645 | d40c->base->virtbase + D40_DREG_PCBASE + | 645 | d40c->base->virtbase + D40_DREG_PCBASE + |
646 | d40c->phy_chan->num * D40_DREG_PCDELTA + | 646 | d40c->phy_chan->num * D40_DREG_PCDELTA + |
647 | D40_CHAN_REG_SSLNK); | 647 | D40_CHAN_REG_SSLNK); |
648 | } | 648 | } |
649 | if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) { | 649 | if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) { |
650 | u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); | 650 | u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); |
651 | 651 | ||
652 | writel((val << D40_EVENTLINE_POS(event)) | | 652 | writel((val << D40_EVENTLINE_POS(event)) | |
653 | ~D40_EVENTLINE_MASK(event), | 653 | ~D40_EVENTLINE_MASK(event), |
654 | d40c->base->virtbase + D40_DREG_PCBASE + | 654 | d40c->base->virtbase + D40_DREG_PCBASE + |
655 | d40c->phy_chan->num * D40_DREG_PCDELTA + | 655 | d40c->phy_chan->num * D40_DREG_PCDELTA + |
656 | D40_CHAN_REG_SDLNK); | 656 | D40_CHAN_REG_SDLNK); |
657 | } | 657 | } |
658 | 658 | ||
659 | spin_unlock_irqrestore(&d40c->phy_chan->lock, flags); | 659 | spin_unlock_irqrestore(&d40c->phy_chan->lock, flags); |
660 | } | 660 | } |
661 | 661 | ||
662 | static u32 d40_chan_has_events(struct d40_chan *d40c) | 662 | static u32 d40_chan_has_events(struct d40_chan *d40c) |
663 | { | 663 | { |
664 | u32 val = 0; | 664 | u32 val = 0; |
665 | 665 | ||
666 | /* If SSLNK or SDLNK is zero all events are disabled */ | 666 | /* If SSLNK or SDLNK is zero all events are disabled */ |
667 | if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || | 667 | if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || |
668 | (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) | 668 | (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) |
669 | val = readl(d40c->base->virtbase + D40_DREG_PCBASE + | 669 | val = readl(d40c->base->virtbase + D40_DREG_PCBASE + |
670 | d40c->phy_chan->num * D40_DREG_PCDELTA + | 670 | d40c->phy_chan->num * D40_DREG_PCDELTA + |
671 | D40_CHAN_REG_SSLNK); | 671 | D40_CHAN_REG_SSLNK); |
672 | 672 | ||
673 | if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) | 673 | if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) |
674 | val = readl(d40c->base->virtbase + D40_DREG_PCBASE + | 674 | val = readl(d40c->base->virtbase + D40_DREG_PCBASE + |
675 | d40c->phy_chan->num * D40_DREG_PCDELTA + | 675 | d40c->phy_chan->num * D40_DREG_PCDELTA + |
676 | D40_CHAN_REG_SDLNK); | 676 | D40_CHAN_REG_SDLNK); |
677 | return val; | 677 | return val; |
678 | } | 678 | } |
679 | 679 | ||
680 | static void d40_config_enable_lidx(struct d40_chan *d40c) | 680 | static void d40_config_enable_lidx(struct d40_chan *d40c) |
681 | { | 681 | { |
682 | /* Set LIDX for lcla */ | 682 | /* Set LIDX for lcla */ |
683 | writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) & | 683 | writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) & |
684 | D40_SREG_ELEM_LOG_LIDX_MASK, | 684 | D40_SREG_ELEM_LOG_LIDX_MASK, |
685 | d40c->base->virtbase + D40_DREG_PCBASE + | 685 | d40c->base->virtbase + D40_DREG_PCBASE + |
686 | d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT); | 686 | d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT); |
687 | 687 | ||
688 | writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) & | 688 | writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) & |
689 | D40_SREG_ELEM_LOG_LIDX_MASK, | 689 | D40_SREG_ELEM_LOG_LIDX_MASK, |
690 | d40c->base->virtbase + D40_DREG_PCBASE + | 690 | d40c->base->virtbase + D40_DREG_PCBASE + |
691 | d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT); | 691 | d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT); |
692 | } | 692 | } |
693 | 693 | ||
694 | static int d40_config_write(struct d40_chan *d40c) | 694 | static int d40_config_write(struct d40_chan *d40c) |
695 | { | 695 | { |
696 | u32 addr_base; | 696 | u32 addr_base; |
697 | u32 var; | 697 | u32 var; |
698 | int res; | 698 | int res; |
699 | 699 | ||
700 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); | 700 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); |
701 | if (res) | 701 | if (res) |
702 | return res; | 702 | return res; |
703 | 703 | ||
704 | /* Odd addresses are even addresses + 4 */ | 704 | /* Odd addresses are even addresses + 4 */ |
705 | addr_base = (d40c->phy_chan->num % 2) * 4; | 705 | addr_base = (d40c->phy_chan->num % 2) * 4; |
706 | /* Setup channel mode to logical or physical */ | 706 | /* Setup channel mode to logical or physical */ |
707 | var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) << | 707 | var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) << |
708 | D40_CHAN_POS(d40c->phy_chan->num); | 708 | D40_CHAN_POS(d40c->phy_chan->num); |
709 | writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base); | 709 | writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base); |
710 | 710 | ||
711 | /* Setup operational mode option register */ | 711 | /* Setup operational mode option register */ |
712 | var = ((d40c->dma_cfg.channel_type >> STEDMA40_INFO_CH_MODE_OPT_POS) & | 712 | var = ((d40c->dma_cfg.channel_type >> STEDMA40_INFO_CH_MODE_OPT_POS) & |
713 | 0x3) << D40_CHAN_POS(d40c->phy_chan->num); | 713 | 0x3) << D40_CHAN_POS(d40c->phy_chan->num); |
714 | 714 | ||
715 | writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base); | 715 | writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base); |
716 | 716 | ||
717 | if (d40c->log_num != D40_PHY_CHAN) { | 717 | if (d40c->log_num != D40_PHY_CHAN) { |
718 | /* Set default config for CFG reg */ | 718 | /* Set default config for CFG reg */ |
719 | writel(d40c->src_def_cfg, | 719 | writel(d40c->src_def_cfg, |
720 | d40c->base->virtbase + D40_DREG_PCBASE + | 720 | d40c->base->virtbase + D40_DREG_PCBASE + |
721 | d40c->phy_chan->num * D40_DREG_PCDELTA + | 721 | d40c->phy_chan->num * D40_DREG_PCDELTA + |
722 | D40_CHAN_REG_SSCFG); | 722 | D40_CHAN_REG_SSCFG); |
723 | writel(d40c->dst_def_cfg, | 723 | writel(d40c->dst_def_cfg, |
724 | d40c->base->virtbase + D40_DREG_PCBASE + | 724 | d40c->base->virtbase + D40_DREG_PCBASE + |
725 | d40c->phy_chan->num * D40_DREG_PCDELTA + | 725 | d40c->phy_chan->num * D40_DREG_PCDELTA + |
726 | D40_CHAN_REG_SDCFG); | 726 | D40_CHAN_REG_SDCFG); |
727 | 727 | ||
728 | d40_config_enable_lidx(d40c); | 728 | d40_config_enable_lidx(d40c); |
729 | } | 729 | } |
730 | return res; | 730 | return res; |
731 | } | 731 | } |
732 | 732 | ||
733 | static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) | 733 | static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) |
734 | { | 734 | { |
735 | 735 | ||
736 | if (d40d->lli_phy.dst && d40d->lli_phy.src) { | 736 | if (d40d->lli_phy.dst && d40d->lli_phy.src) { |
737 | d40_phy_lli_write(d40c->base->virtbase, | 737 | d40_phy_lli_write(d40c->base->virtbase, |
738 | d40c->phy_chan->num, | 738 | d40c->phy_chan->num, |
739 | d40d->lli_phy.dst, | 739 | d40d->lli_phy.dst, |
740 | d40d->lli_phy.src); | 740 | d40d->lli_phy.src); |
741 | d40d->lli_tcount = d40d->lli_len; | 741 | d40d->lli_tcount = d40d->lli_len; |
742 | } else if (d40d->lli_log.dst && d40d->lli_log.src) { | 742 | } else if (d40d->lli_log.dst && d40d->lli_log.src) { |
743 | u32 lli_len; | 743 | u32 lli_len; |
744 | struct d40_log_lli *src = d40d->lli_log.src; | 744 | struct d40_log_lli *src = d40d->lli_log.src; |
745 | struct d40_log_lli *dst = d40d->lli_log.dst; | 745 | struct d40_log_lli *dst = d40d->lli_log.dst; |
746 | 746 | ||
747 | src += d40d->lli_tcount; | 747 | src += d40d->lli_tcount; |
748 | dst += d40d->lli_tcount; | 748 | dst += d40d->lli_tcount; |
749 | 749 | ||
750 | if (d40d->lli_len <= d40c->base->plat_data->llis_per_log) | 750 | if (d40d->lli_len <= d40c->base->plat_data->llis_per_log) |
751 | lli_len = d40d->lli_len; | 751 | lli_len = d40d->lli_len; |
752 | else | 752 | else |
753 | lli_len = d40c->base->plat_data->llis_per_log; | 753 | lli_len = d40c->base->plat_data->llis_per_log; |
754 | d40d->lli_tcount += lli_len; | 754 | d40d->lli_tcount += lli_len; |
755 | d40_log_lli_write(d40c->lcpa, d40c->lcla.src, | 755 | d40_log_lli_write(d40c->lcpa, d40c->lcla.src, |
756 | d40c->lcla.dst, | 756 | d40c->lcla.dst, |
757 | dst, src, | 757 | dst, src, |
758 | d40c->base->plat_data->llis_per_log); | 758 | d40c->base->plat_data->llis_per_log); |
759 | } | 759 | } |
760 | } | 760 | } |
761 | 761 | ||
762 | static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) | 762 | static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) |
763 | { | 763 | { |
764 | struct d40_chan *d40c = container_of(tx->chan, | 764 | struct d40_chan *d40c = container_of(tx->chan, |
765 | struct d40_chan, | 765 | struct d40_chan, |
766 | chan); | 766 | chan); |
767 | struct d40_desc *d40d = container_of(tx, struct d40_desc, txd); | 767 | struct d40_desc *d40d = container_of(tx, struct d40_desc, txd); |
768 | unsigned long flags; | 768 | unsigned long flags; |
769 | 769 | ||
770 | spin_lock_irqsave(&d40c->lock, flags); | 770 | spin_lock_irqsave(&d40c->lock, flags); |
771 | 771 | ||
772 | tx->cookie = d40_assign_cookie(d40c, d40d); | 772 | tx->cookie = d40_assign_cookie(d40c, d40d); |
773 | 773 | ||
774 | d40_desc_queue(d40c, d40d); | 774 | d40_desc_queue(d40c, d40d); |
775 | 775 | ||
776 | spin_unlock_irqrestore(&d40c->lock, flags); | 776 | spin_unlock_irqrestore(&d40c->lock, flags); |
777 | 777 | ||
778 | return tx->cookie; | 778 | return tx->cookie; |
779 | } | 779 | } |
780 | 780 | ||
781 | static int d40_start(struct d40_chan *d40c) | 781 | static int d40_start(struct d40_chan *d40c) |
782 | { | 782 | { |
783 | int err; | 783 | int err; |
784 | 784 | ||
785 | if (d40c->log_num != D40_PHY_CHAN) { | 785 | if (d40c->log_num != D40_PHY_CHAN) { |
786 | err = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); | 786 | err = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); |
787 | if (err) | 787 | if (err) |
788 | return err; | 788 | return err; |
789 | d40_config_set_event(d40c, true); | 789 | d40_config_set_event(d40c, true); |
790 | } | 790 | } |
791 | 791 | ||
792 | err = d40_channel_execute_command(d40c, D40_DMA_RUN); | 792 | err = d40_channel_execute_command(d40c, D40_DMA_RUN); |
793 | 793 | ||
794 | return err; | 794 | return err; |
795 | } | 795 | } |
796 | 796 | ||
797 | static struct d40_desc *d40_queue_start(struct d40_chan *d40c) | 797 | static struct d40_desc *d40_queue_start(struct d40_chan *d40c) |
798 | { | 798 | { |
799 | struct d40_desc *d40d; | 799 | struct d40_desc *d40d; |
800 | int err; | 800 | int err; |
801 | 801 | ||
802 | /* Start queued jobs, if any */ | 802 | /* Start queued jobs, if any */ |
803 | d40d = d40_first_queued(d40c); | 803 | d40d = d40_first_queued(d40c); |
804 | 804 | ||
805 | if (d40d != NULL) { | 805 | if (d40d != NULL) { |
806 | d40c->busy = true; | 806 | d40c->busy = true; |
807 | 807 | ||
808 | /* Remove from queue */ | 808 | /* Remove from queue */ |
809 | d40_desc_remove(d40d); | 809 | d40_desc_remove(d40d); |
810 | 810 | ||
811 | /* Add to active queue */ | 811 | /* Add to active queue */ |
812 | d40_desc_submit(d40c, d40d); | 812 | d40_desc_submit(d40c, d40d); |
813 | 813 | ||
814 | /* Initiate DMA job */ | 814 | /* Initiate DMA job */ |
815 | d40_desc_load(d40c, d40d); | 815 | d40_desc_load(d40c, d40d); |
816 | 816 | ||
817 | /* Start dma job */ | 817 | /* Start dma job */ |
818 | err = d40_start(d40c); | 818 | err = d40_start(d40c); |
819 | 819 | ||
820 | if (err) | 820 | if (err) |
821 | return NULL; | 821 | return NULL; |
822 | } | 822 | } |
823 | 823 | ||
824 | return d40d; | 824 | return d40d; |
825 | } | 825 | } |
826 | 826 | ||
827 | /* called from interrupt context */ | 827 | /* called from interrupt context */ |
828 | static void dma_tc_handle(struct d40_chan *d40c) | 828 | static void dma_tc_handle(struct d40_chan *d40c) |
829 | { | 829 | { |
830 | struct d40_desc *d40d; | 830 | struct d40_desc *d40d; |
831 | 831 | ||
832 | if (!d40c->phy_chan) | 832 | if (!d40c->phy_chan) |
833 | return; | 833 | return; |
834 | 834 | ||
835 | /* Get first active entry from list */ | 835 | /* Get first active entry from list */ |
836 | d40d = d40_first_active_get(d40c); | 836 | d40d = d40_first_active_get(d40c); |
837 | 837 | ||
838 | if (d40d == NULL) | 838 | if (d40d == NULL) |
839 | return; | 839 | return; |
840 | 840 | ||
841 | if (d40d->lli_tcount < d40d->lli_len) { | 841 | if (d40d->lli_tcount < d40d->lli_len) { |
842 | 842 | ||
843 | d40_desc_load(d40c, d40d); | 843 | d40_desc_load(d40c, d40d); |
844 | /* Start dma job */ | 844 | /* Start dma job */ |
845 | (void) d40_start(d40c); | 845 | (void) d40_start(d40c); |
846 | return; | 846 | return; |
847 | } | 847 | } |
848 | 848 | ||
849 | if (d40_queue_start(d40c) == NULL) | 849 | if (d40_queue_start(d40c) == NULL) |
850 | d40c->busy = false; | 850 | d40c->busy = false; |
851 | 851 | ||
852 | d40c->pending_tx++; | 852 | d40c->pending_tx++; |
853 | tasklet_schedule(&d40c->tasklet); | 853 | tasklet_schedule(&d40c->tasklet); |
854 | 854 | ||
855 | } | 855 | } |
856 | 856 | ||
857 | static void dma_tasklet(unsigned long data) | 857 | static void dma_tasklet(unsigned long data) |
858 | { | 858 | { |
859 | struct d40_chan *d40c = (struct d40_chan *) data; | 859 | struct d40_chan *d40c = (struct d40_chan *) data; |
860 | struct d40_desc *d40d_fin; | 860 | struct d40_desc *d40d_fin; |
861 | unsigned long flags; | 861 | unsigned long flags; |
862 | dma_async_tx_callback callback; | 862 | dma_async_tx_callback callback; |
863 | void *callback_param; | 863 | void *callback_param; |
864 | 864 | ||
865 | spin_lock_irqsave(&d40c->lock, flags); | 865 | spin_lock_irqsave(&d40c->lock, flags); |
866 | 866 | ||
867 | /* Get first active entry from list */ | 867 | /* Get first active entry from list */ |
868 | d40d_fin = d40_first_active_get(d40c); | 868 | d40d_fin = d40_first_active_get(d40c); |
869 | 869 | ||
870 | if (d40d_fin == NULL) | 870 | if (d40d_fin == NULL) |
871 | goto err; | 871 | goto err; |
872 | 872 | ||
873 | d40c->completed = d40d_fin->txd.cookie; | 873 | d40c->completed = d40d_fin->txd.cookie; |
874 | 874 | ||
875 | /* | 875 | /* |
876 | * If terminating a channel pending_tx is set to zero. | 876 | * If terminating a channel pending_tx is set to zero. |
877 | * This prevents any finished active jobs to return to the client. | 877 | * This prevents any finished active jobs to return to the client. |
878 | */ | 878 | */ |
879 | if (d40c->pending_tx == 0) { | 879 | if (d40c->pending_tx == 0) { |
880 | spin_unlock_irqrestore(&d40c->lock, flags); | 880 | spin_unlock_irqrestore(&d40c->lock, flags); |
881 | return; | 881 | return; |
882 | } | 882 | } |
883 | 883 | ||
884 | /* Callback to client */ | 884 | /* Callback to client */ |
885 | callback = d40d_fin->txd.callback; | 885 | callback = d40d_fin->txd.callback; |
886 | callback_param = d40d_fin->txd.callback_param; | 886 | callback_param = d40d_fin->txd.callback_param; |
887 | 887 | ||
888 | if (async_tx_test_ack(&d40d_fin->txd)) { | 888 | if (async_tx_test_ack(&d40d_fin->txd)) { |
889 | d40_pool_lli_free(d40d_fin); | 889 | d40_pool_lli_free(d40d_fin); |
890 | d40_desc_remove(d40d_fin); | 890 | d40_desc_remove(d40d_fin); |
891 | /* Return desc to free-list */ | 891 | /* Return desc to free-list */ |
892 | d40_desc_free(d40c, d40d_fin); | 892 | d40_desc_free(d40c, d40d_fin); |
893 | } else { | 893 | } else { |
894 | d40_desc_reset(d40d_fin); | 894 | d40_desc_reset(d40d_fin); |
895 | if (!d40d_fin->is_in_client_list) { | 895 | if (!d40d_fin->is_in_client_list) { |
896 | d40_desc_remove(d40d_fin); | 896 | d40_desc_remove(d40d_fin); |
897 | list_add_tail(&d40d_fin->node, &d40c->client); | 897 | list_add_tail(&d40d_fin->node, &d40c->client); |
898 | d40d_fin->is_in_client_list = true; | 898 | d40d_fin->is_in_client_list = true; |
899 | } | 899 | } |
900 | } | 900 | } |
901 | 901 | ||
902 | d40c->pending_tx--; | 902 | d40c->pending_tx--; |
903 | 903 | ||
904 | if (d40c->pending_tx) | 904 | if (d40c->pending_tx) |
905 | tasklet_schedule(&d40c->tasklet); | 905 | tasklet_schedule(&d40c->tasklet); |
906 | 906 | ||
907 | spin_unlock_irqrestore(&d40c->lock, flags); | 907 | spin_unlock_irqrestore(&d40c->lock, flags); |
908 | 908 | ||
909 | if (callback) | 909 | if (callback) |
910 | callback(callback_param); | 910 | callback(callback_param); |
911 | 911 | ||
912 | return; | 912 | return; |
913 | 913 | ||
914 | err: | 914 | err: |
915 | /* Rescue manouver if receiving double interrupts */ | 915 | /* Rescue manouver if receiving double interrupts */ |
916 | if (d40c->pending_tx > 0) | 916 | if (d40c->pending_tx > 0) |
917 | d40c->pending_tx--; | 917 | d40c->pending_tx--; |
918 | spin_unlock_irqrestore(&d40c->lock, flags); | 918 | spin_unlock_irqrestore(&d40c->lock, flags); |
919 | } | 919 | } |
920 | 920 | ||
921 | static irqreturn_t d40_handle_interrupt(int irq, void *data) | 921 | static irqreturn_t d40_handle_interrupt(int irq, void *data) |
922 | { | 922 | { |
923 | static const struct d40_interrupt_lookup il[] = { | 923 | static const struct d40_interrupt_lookup il[] = { |
924 | {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0}, | 924 | {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0}, |
925 | {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32}, | 925 | {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32}, |
926 | {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64}, | 926 | {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64}, |
927 | {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96}, | 927 | {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96}, |
928 | {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0}, | 928 | {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0}, |
929 | {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32}, | 929 | {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32}, |
930 | {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64}, | 930 | {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64}, |
931 | {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96}, | 931 | {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96}, |
932 | {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN}, | 932 | {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN}, |
933 | {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN}, | 933 | {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN}, |
934 | }; | 934 | }; |
935 | 935 | ||
936 | int i; | 936 | int i; |
937 | u32 regs[ARRAY_SIZE(il)]; | 937 | u32 regs[ARRAY_SIZE(il)]; |
938 | u32 tmp; | 938 | u32 tmp; |
939 | u32 idx; | 939 | u32 idx; |
940 | u32 row; | 940 | u32 row; |
941 | long chan = -1; | 941 | long chan = -1; |
942 | struct d40_chan *d40c; | 942 | struct d40_chan *d40c; |
943 | unsigned long flags; | 943 | unsigned long flags; |
944 | struct d40_base *base = data; | 944 | struct d40_base *base = data; |
945 | 945 | ||
946 | spin_lock_irqsave(&base->interrupt_lock, flags); | 946 | spin_lock_irqsave(&base->interrupt_lock, flags); |
947 | 947 | ||
948 | /* Read interrupt status of both logical and physical channels */ | 948 | /* Read interrupt status of both logical and physical channels */ |
949 | for (i = 0; i < ARRAY_SIZE(il); i++) | 949 | for (i = 0; i < ARRAY_SIZE(il); i++) |
950 | regs[i] = readl(base->virtbase + il[i].src); | 950 | regs[i] = readl(base->virtbase + il[i].src); |
951 | 951 | ||
952 | for (;;) { | 952 | for (;;) { |
953 | 953 | ||
954 | chan = find_next_bit((unsigned long *)regs, | 954 | chan = find_next_bit((unsigned long *)regs, |
955 | BITS_PER_LONG * ARRAY_SIZE(il), chan + 1); | 955 | BITS_PER_LONG * ARRAY_SIZE(il), chan + 1); |
956 | 956 | ||
957 | /* No more set bits found? */ | 957 | /* No more set bits found? */ |
958 | if (chan == BITS_PER_LONG * ARRAY_SIZE(il)) | 958 | if (chan == BITS_PER_LONG * ARRAY_SIZE(il)) |
959 | break; | 959 | break; |
960 | 960 | ||
961 | row = chan / BITS_PER_LONG; | 961 | row = chan / BITS_PER_LONG; |
962 | idx = chan & (BITS_PER_LONG - 1); | 962 | idx = chan & (BITS_PER_LONG - 1); |
963 | 963 | ||
964 | /* ACK interrupt */ | 964 | /* ACK interrupt */ |
965 | tmp = readl(base->virtbase + il[row].clr); | 965 | tmp = readl(base->virtbase + il[row].clr); |
966 | tmp |= 1 << idx; | 966 | tmp |= 1 << idx; |
967 | writel(tmp, base->virtbase + il[row].clr); | 967 | writel(tmp, base->virtbase + il[row].clr); |
968 | 968 | ||
969 | if (il[row].offset == D40_PHY_CHAN) | 969 | if (il[row].offset == D40_PHY_CHAN) |
970 | d40c = base->lookup_phy_chans[idx]; | 970 | d40c = base->lookup_phy_chans[idx]; |
971 | else | 971 | else |
972 | d40c = base->lookup_log_chans[il[row].offset + idx]; | 972 | d40c = base->lookup_log_chans[il[row].offset + idx]; |
973 | spin_lock(&d40c->lock); | 973 | spin_lock(&d40c->lock); |
974 | 974 | ||
975 | if (!il[row].is_error) | 975 | if (!il[row].is_error) |
976 | dma_tc_handle(d40c); | 976 | dma_tc_handle(d40c); |
977 | else | 977 | else |
978 | dev_err(base->dev, "[%s] IRQ chan: %ld offset %d idx %d\n", | 978 | dev_err(base->dev, "[%s] IRQ chan: %ld offset %d idx %d\n", |
979 | __func__, chan, il[row].offset, idx); | 979 | __func__, chan, il[row].offset, idx); |
980 | 980 | ||
981 | spin_unlock(&d40c->lock); | 981 | spin_unlock(&d40c->lock); |
982 | } | 982 | } |
983 | 983 | ||
984 | spin_unlock_irqrestore(&base->interrupt_lock, flags); | 984 | spin_unlock_irqrestore(&base->interrupt_lock, flags); |
985 | 985 | ||
986 | return IRQ_HANDLED; | 986 | return IRQ_HANDLED; |
987 | } | 987 | } |
988 | 988 | ||
989 | 989 | ||
990 | static int d40_validate_conf(struct d40_chan *d40c, | 990 | static int d40_validate_conf(struct d40_chan *d40c, |
991 | struct stedma40_chan_cfg *conf) | 991 | struct stedma40_chan_cfg *conf) |
992 | { | 992 | { |
993 | int res = 0; | 993 | int res = 0; |
994 | u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type); | 994 | u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type); |
995 | u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type); | 995 | u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type); |
996 | bool is_log = (conf->channel_type & STEDMA40_CHANNEL_IN_OPER_MODE) | 996 | bool is_log = (conf->channel_type & STEDMA40_CHANNEL_IN_OPER_MODE) |
997 | == STEDMA40_CHANNEL_IN_LOG_MODE; | 997 | == STEDMA40_CHANNEL_IN_LOG_MODE; |
998 | 998 | ||
999 | if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH && | 999 | if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH && |
1000 | dst_event_group == STEDMA40_DEV_DST_MEMORY) { | 1000 | dst_event_group == STEDMA40_DEV_DST_MEMORY) { |
1001 | dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n", | 1001 | dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n", |
1002 | __func__); | 1002 | __func__); |
1003 | res = -EINVAL; | 1003 | res = -EINVAL; |
1004 | } | 1004 | } |
1005 | 1005 | ||
1006 | if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM && | 1006 | if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM && |
1007 | src_event_group == STEDMA40_DEV_SRC_MEMORY) { | 1007 | src_event_group == STEDMA40_DEV_SRC_MEMORY) { |
1008 | dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n", | 1008 | dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n", |
1009 | __func__); | 1009 | __func__); |
1010 | res = -EINVAL; | 1010 | res = -EINVAL; |
1011 | } | 1011 | } |
1012 | 1012 | ||
1013 | if (src_event_group == STEDMA40_DEV_SRC_MEMORY && | 1013 | if (src_event_group == STEDMA40_DEV_SRC_MEMORY && |
1014 | dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) { | 1014 | dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) { |
1015 | dev_err(&d40c->chan.dev->device, | 1015 | dev_err(&d40c->chan.dev->device, |
1016 | "[%s] No event line\n", __func__); | 1016 | "[%s] No event line\n", __func__); |
1017 | res = -EINVAL; | 1017 | res = -EINVAL; |
1018 | } | 1018 | } |
1019 | 1019 | ||
1020 | if (conf->dir == STEDMA40_PERIPH_TO_PERIPH && | 1020 | if (conf->dir == STEDMA40_PERIPH_TO_PERIPH && |
1021 | (src_event_group != dst_event_group)) { | 1021 | (src_event_group != dst_event_group)) { |
1022 | dev_err(&d40c->chan.dev->device, | 1022 | dev_err(&d40c->chan.dev->device, |
1023 | "[%s] Invalid event group\n", __func__); | 1023 | "[%s] Invalid event group\n", __func__); |
1024 | res = -EINVAL; | 1024 | res = -EINVAL; |
1025 | } | 1025 | } |
1026 | 1026 | ||
1027 | if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) { | 1027 | if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) { |
1028 | /* | 1028 | /* |
1029 | * DMAC HW supports it. Will be added to this driver, | 1029 | * DMAC HW supports it. Will be added to this driver, |
1030 | * in case any dma client requires it. | 1030 | * in case any dma client requires it. |
1031 | */ | 1031 | */ |
1032 | dev_err(&d40c->chan.dev->device, | 1032 | dev_err(&d40c->chan.dev->device, |
1033 | "[%s] periph to periph not supported\n", | 1033 | "[%s] periph to periph not supported\n", |
1034 | __func__); | 1034 | __func__); |
1035 | res = -EINVAL; | 1035 | res = -EINVAL; |
1036 | } | 1036 | } |
1037 | 1037 | ||
1038 | return res; | 1038 | return res; |
1039 | } | 1039 | } |
1040 | 1040 | ||
1041 | static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src, | 1041 | static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src, |
1042 | int log_event_line, bool is_log) | 1042 | int log_event_line, bool is_log) |
1043 | { | 1043 | { |
1044 | unsigned long flags; | 1044 | unsigned long flags; |
1045 | spin_lock_irqsave(&phy->lock, flags); | 1045 | spin_lock_irqsave(&phy->lock, flags); |
1046 | if (!is_log) { | 1046 | if (!is_log) { |
1047 | /* Physical interrupts are masked per physical full channel */ | 1047 | /* Physical interrupts are masked per physical full channel */ |
1048 | if (phy->allocated_src == D40_ALLOC_FREE && | 1048 | if (phy->allocated_src == D40_ALLOC_FREE && |
1049 | phy->allocated_dst == D40_ALLOC_FREE) { | 1049 | phy->allocated_dst == D40_ALLOC_FREE) { |
1050 | phy->allocated_dst = D40_ALLOC_PHY; | 1050 | phy->allocated_dst = D40_ALLOC_PHY; |
1051 | phy->allocated_src = D40_ALLOC_PHY; | 1051 | phy->allocated_src = D40_ALLOC_PHY; |
1052 | goto found; | 1052 | goto found; |
1053 | } else | 1053 | } else |
1054 | goto not_found; | 1054 | goto not_found; |
1055 | } | 1055 | } |
1056 | 1056 | ||
1057 | /* Logical channel */ | 1057 | /* Logical channel */ |
1058 | if (is_src) { | 1058 | if (is_src) { |
1059 | if (phy->allocated_src == D40_ALLOC_PHY) | 1059 | if (phy->allocated_src == D40_ALLOC_PHY) |
1060 | goto not_found; | 1060 | goto not_found; |
1061 | 1061 | ||
1062 | if (phy->allocated_src == D40_ALLOC_FREE) | 1062 | if (phy->allocated_src == D40_ALLOC_FREE) |
1063 | phy->allocated_src = D40_ALLOC_LOG_FREE; | 1063 | phy->allocated_src = D40_ALLOC_LOG_FREE; |
1064 | 1064 | ||
1065 | if (!(phy->allocated_src & (1 << log_event_line))) { | 1065 | if (!(phy->allocated_src & (1 << log_event_line))) { |
1066 | phy->allocated_src |= 1 << log_event_line; | 1066 | phy->allocated_src |= 1 << log_event_line; |
1067 | goto found; | 1067 | goto found; |
1068 | } else | 1068 | } else |
1069 | goto not_found; | 1069 | goto not_found; |
1070 | } else { | 1070 | } else { |
1071 | if (phy->allocated_dst == D40_ALLOC_PHY) | 1071 | if (phy->allocated_dst == D40_ALLOC_PHY) |
1072 | goto not_found; | 1072 | goto not_found; |
1073 | 1073 | ||
1074 | if (phy->allocated_dst == D40_ALLOC_FREE) | 1074 | if (phy->allocated_dst == D40_ALLOC_FREE) |
1075 | phy->allocated_dst = D40_ALLOC_LOG_FREE; | 1075 | phy->allocated_dst = D40_ALLOC_LOG_FREE; |
1076 | 1076 | ||
1077 | if (!(phy->allocated_dst & (1 << log_event_line))) { | 1077 | if (!(phy->allocated_dst & (1 << log_event_line))) { |
1078 | phy->allocated_dst |= 1 << log_event_line; | 1078 | phy->allocated_dst |= 1 << log_event_line; |
1079 | goto found; | 1079 | goto found; |
1080 | } else | 1080 | } else |
1081 | goto not_found; | 1081 | goto not_found; |
1082 | } | 1082 | } |
1083 | 1083 | ||
1084 | not_found: | 1084 | not_found: |
1085 | spin_unlock_irqrestore(&phy->lock, flags); | 1085 | spin_unlock_irqrestore(&phy->lock, flags); |
1086 | return false; | 1086 | return false; |
1087 | found: | 1087 | found: |
1088 | spin_unlock_irqrestore(&phy->lock, flags); | 1088 | spin_unlock_irqrestore(&phy->lock, flags); |
1089 | return true; | 1089 | return true; |
1090 | } | 1090 | } |
1091 | 1091 | ||
1092 | static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src, | 1092 | static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src, |
1093 | int log_event_line) | 1093 | int log_event_line) |
1094 | { | 1094 | { |
1095 | unsigned long flags; | 1095 | unsigned long flags; |
1096 | bool is_free = false; | 1096 | bool is_free = false; |
1097 | 1097 | ||
1098 | spin_lock_irqsave(&phy->lock, flags); | 1098 | spin_lock_irqsave(&phy->lock, flags); |
1099 | if (!log_event_line) { | 1099 | if (!log_event_line) { |
1100 | /* Physical interrupts are masked per physical full channel */ | 1100 | /* Physical interrupts are masked per physical full channel */ |
1101 | phy->allocated_dst = D40_ALLOC_FREE; | 1101 | phy->allocated_dst = D40_ALLOC_FREE; |
1102 | phy->allocated_src = D40_ALLOC_FREE; | 1102 | phy->allocated_src = D40_ALLOC_FREE; |
1103 | is_free = true; | 1103 | is_free = true; |
1104 | goto out; | 1104 | goto out; |
1105 | } | 1105 | } |
1106 | 1106 | ||
1107 | /* Logical channel */ | 1107 | /* Logical channel */ |
1108 | if (is_src) { | 1108 | if (is_src) { |
1109 | phy->allocated_src &= ~(1 << log_event_line); | 1109 | phy->allocated_src &= ~(1 << log_event_line); |
1110 | if (phy->allocated_src == D40_ALLOC_LOG_FREE) | 1110 | if (phy->allocated_src == D40_ALLOC_LOG_FREE) |
1111 | phy->allocated_src = D40_ALLOC_FREE; | 1111 | phy->allocated_src = D40_ALLOC_FREE; |
1112 | } else { | 1112 | } else { |
1113 | phy->allocated_dst &= ~(1 << log_event_line); | 1113 | phy->allocated_dst &= ~(1 << log_event_line); |
1114 | if (phy->allocated_dst == D40_ALLOC_LOG_FREE) | 1114 | if (phy->allocated_dst == D40_ALLOC_LOG_FREE) |
1115 | phy->allocated_dst = D40_ALLOC_FREE; | 1115 | phy->allocated_dst = D40_ALLOC_FREE; |
1116 | } | 1116 | } |
1117 | 1117 | ||
1118 | is_free = ((phy->allocated_src | phy->allocated_dst) == | 1118 | is_free = ((phy->allocated_src | phy->allocated_dst) == |
1119 | D40_ALLOC_FREE); | 1119 | D40_ALLOC_FREE); |
1120 | 1120 | ||
1121 | out: | 1121 | out: |
1122 | spin_unlock_irqrestore(&phy->lock, flags); | 1122 | spin_unlock_irqrestore(&phy->lock, flags); |
1123 | 1123 | ||
1124 | return is_free; | 1124 | return is_free; |
1125 | } | 1125 | } |
1126 | 1126 | ||
1127 | static int d40_allocate_channel(struct d40_chan *d40c) | 1127 | static int d40_allocate_channel(struct d40_chan *d40c) |
1128 | { | 1128 | { |
1129 | int dev_type; | 1129 | int dev_type; |
1130 | int event_group; | 1130 | int event_group; |
1131 | int event_line; | 1131 | int event_line; |
1132 | struct d40_phy_res *phys; | 1132 | struct d40_phy_res *phys; |
1133 | int i; | 1133 | int i; |
1134 | int j; | 1134 | int j; |
1135 | int log_num; | 1135 | int log_num; |
1136 | bool is_src; | 1136 | bool is_src; |
1137 | bool is_log = (d40c->dma_cfg.channel_type & STEDMA40_CHANNEL_IN_OPER_MODE) | 1137 | bool is_log = (d40c->dma_cfg.channel_type & STEDMA40_CHANNEL_IN_OPER_MODE) |
1138 | == STEDMA40_CHANNEL_IN_LOG_MODE; | 1138 | == STEDMA40_CHANNEL_IN_LOG_MODE; |
1139 | 1139 | ||
1140 | 1140 | ||
1141 | phys = d40c->base->phy_res; | 1141 | phys = d40c->base->phy_res; |
1142 | 1142 | ||
1143 | if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { | 1143 | if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { |
1144 | dev_type = d40c->dma_cfg.src_dev_type; | 1144 | dev_type = d40c->dma_cfg.src_dev_type; |
1145 | log_num = 2 * dev_type; | 1145 | log_num = 2 * dev_type; |
1146 | is_src = true; | 1146 | is_src = true; |
1147 | } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || | 1147 | } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || |
1148 | d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { | 1148 | d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { |
1149 | /* dst event lines are used for logical memcpy */ | 1149 | /* dst event lines are used for logical memcpy */ |
1150 | dev_type = d40c->dma_cfg.dst_dev_type; | 1150 | dev_type = d40c->dma_cfg.dst_dev_type; |
1151 | log_num = 2 * dev_type + 1; | 1151 | log_num = 2 * dev_type + 1; |
1152 | is_src = false; | 1152 | is_src = false; |
1153 | } else | 1153 | } else |
1154 | return -EINVAL; | 1154 | return -EINVAL; |
1155 | 1155 | ||
1156 | event_group = D40_TYPE_TO_GROUP(dev_type); | 1156 | event_group = D40_TYPE_TO_GROUP(dev_type); |
1157 | event_line = D40_TYPE_TO_EVENT(dev_type); | 1157 | event_line = D40_TYPE_TO_EVENT(dev_type); |
1158 | 1158 | ||
1159 | if (!is_log) { | 1159 | if (!is_log) { |
1160 | if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { | 1160 | if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { |
1161 | /* Find physical half channel */ | 1161 | /* Find physical half channel */ |
1162 | for (i = 0; i < d40c->base->num_phy_chans; i++) { | 1162 | for (i = 0; i < d40c->base->num_phy_chans; i++) { |
1163 | 1163 | ||
1164 | if (d40_alloc_mask_set(&phys[i], is_src, | 1164 | if (d40_alloc_mask_set(&phys[i], is_src, |
1165 | 0, is_log)) | 1165 | 0, is_log)) |
1166 | goto found_phy; | 1166 | goto found_phy; |
1167 | } | 1167 | } |
1168 | } else | 1168 | } else |
1169 | for (j = 0; j < d40c->base->num_phy_chans; j += 8) { | 1169 | for (j = 0; j < d40c->base->num_phy_chans; j += 8) { |
1170 | int phy_num = j + event_group * 2; | 1170 | int phy_num = j + event_group * 2; |
1171 | for (i = phy_num; i < phy_num + 2; i++) { | 1171 | for (i = phy_num; i < phy_num + 2; i++) { |
1172 | if (d40_alloc_mask_set(&phys[i], is_src, | 1172 | if (d40_alloc_mask_set(&phys[i], is_src, |
1173 | 0, is_log)) | 1173 | 0, is_log)) |
1174 | goto found_phy; | 1174 | goto found_phy; |
1175 | } | 1175 | } |
1176 | } | 1176 | } |
1177 | return -EINVAL; | 1177 | return -EINVAL; |
1178 | found_phy: | 1178 | found_phy: |
1179 | d40c->phy_chan = &phys[i]; | 1179 | d40c->phy_chan = &phys[i]; |
1180 | d40c->log_num = D40_PHY_CHAN; | 1180 | d40c->log_num = D40_PHY_CHAN; |
1181 | goto out; | 1181 | goto out; |
1182 | } | 1182 | } |
1183 | if (dev_type == -1) | 1183 | if (dev_type == -1) |
1184 | return -EINVAL; | 1184 | return -EINVAL; |
1185 | 1185 | ||
1186 | /* Find logical channel */ | 1186 | /* Find logical channel */ |
1187 | for (j = 0; j < d40c->base->num_phy_chans; j += 8) { | 1187 | for (j = 0; j < d40c->base->num_phy_chans; j += 8) { |
1188 | int phy_num = j + event_group * 2; | 1188 | int phy_num = j + event_group * 2; |
1189 | /* | 1189 | /* |
1190 | * Spread logical channels across all available physical rather | 1190 | * Spread logical channels across all available physical rather |
1191 | * than pack every logical channel at the first available phy | 1191 | * than pack every logical channel at the first available phy |
1192 | * channels. | 1192 | * channels. |
1193 | */ | 1193 | */ |
1194 | if (is_src) { | 1194 | if (is_src) { |
1195 | for (i = phy_num; i < phy_num + 2; i++) { | 1195 | for (i = phy_num; i < phy_num + 2; i++) { |
1196 | if (d40_alloc_mask_set(&phys[i], is_src, | 1196 | if (d40_alloc_mask_set(&phys[i], is_src, |
1197 | event_line, is_log)) | 1197 | event_line, is_log)) |
1198 | goto found_log; | 1198 | goto found_log; |
1199 | } | 1199 | } |
1200 | } else { | 1200 | } else { |
1201 | for (i = phy_num + 1; i >= phy_num; i--) { | 1201 | for (i = phy_num + 1; i >= phy_num; i--) { |
1202 | if (d40_alloc_mask_set(&phys[i], is_src, | 1202 | if (d40_alloc_mask_set(&phys[i], is_src, |
1203 | event_line, is_log)) | 1203 | event_line, is_log)) |
1204 | goto found_log; | 1204 | goto found_log; |
1205 | } | 1205 | } |
1206 | } | 1206 | } |
1207 | } | 1207 | } |
1208 | return -EINVAL; | 1208 | return -EINVAL; |
1209 | 1209 | ||
1210 | found_log: | 1210 | found_log: |
1211 | d40c->phy_chan = &phys[i]; | 1211 | d40c->phy_chan = &phys[i]; |
1212 | d40c->log_num = log_num; | 1212 | d40c->log_num = log_num; |
1213 | out: | 1213 | out: |
1214 | 1214 | ||
1215 | if (is_log) | 1215 | if (is_log) |
1216 | d40c->base->lookup_log_chans[d40c->log_num] = d40c; | 1216 | d40c->base->lookup_log_chans[d40c->log_num] = d40c; |
1217 | else | 1217 | else |
1218 | d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c; | 1218 | d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c; |
1219 | 1219 | ||
1220 | return 0; | 1220 | return 0; |
1221 | 1221 | ||
1222 | } | 1222 | } |
1223 | 1223 | ||
1224 | static int d40_config_chan(struct d40_chan *d40c, | 1224 | static int d40_config_chan(struct d40_chan *d40c, |
1225 | struct stedma40_chan_cfg *info) | 1225 | struct stedma40_chan_cfg *info) |
1226 | { | 1226 | { |
1227 | 1227 | ||
1228 | /* Fill in basic CFG register values */ | 1228 | /* Fill in basic CFG register values */ |
1229 | d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, | 1229 | d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, |
1230 | &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN); | 1230 | &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN); |
1231 | 1231 | ||
1232 | if (d40c->log_num != D40_PHY_CHAN) { | 1232 | if (d40c->log_num != D40_PHY_CHAN) { |
1233 | d40_log_cfg(&d40c->dma_cfg, | 1233 | d40_log_cfg(&d40c->dma_cfg, |
1234 | &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); | 1234 | &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); |
1235 | 1235 | ||
1236 | if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) | 1236 | if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) |
1237 | d40c->lcpa = d40c->base->lcpa_base + | 1237 | d40c->lcpa = d40c->base->lcpa_base + |
1238 | d40c->dma_cfg.src_dev_type * 32; | 1238 | d40c->dma_cfg.src_dev_type * 32; |
1239 | else | 1239 | else |
1240 | d40c->lcpa = d40c->base->lcpa_base + | 1240 | d40c->lcpa = d40c->base->lcpa_base + |
1241 | d40c->dma_cfg.dst_dev_type * 32 + 16; | 1241 | d40c->dma_cfg.dst_dev_type * 32 + 16; |
1242 | } | 1242 | } |
1243 | 1243 | ||
1244 | /* Write channel configuration to the DMA */ | 1244 | /* Write channel configuration to the DMA */ |
1245 | return d40_config_write(d40c); | 1245 | return d40_config_write(d40c); |
1246 | } | 1246 | } |
1247 | 1247 | ||
1248 | static int d40_config_memcpy(struct d40_chan *d40c) | 1248 | static int d40_config_memcpy(struct d40_chan *d40c) |
1249 | { | 1249 | { |
1250 | dma_cap_mask_t cap = d40c->chan.device->cap_mask; | 1250 | dma_cap_mask_t cap = d40c->chan.device->cap_mask; |
1251 | 1251 | ||
1252 | if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) { | 1252 | if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) { |
1253 | d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log; | 1253 | d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log; |
1254 | d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY; | 1254 | d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY; |
1255 | d40c->dma_cfg.dst_dev_type = d40c->base->plat_data-> | 1255 | d40c->dma_cfg.dst_dev_type = d40c->base->plat_data-> |
1256 | memcpy[d40c->chan.chan_id]; | 1256 | memcpy[d40c->chan.chan_id]; |
1257 | 1257 | ||
1258 | } else if (dma_has_cap(DMA_MEMCPY, cap) && | 1258 | } else if (dma_has_cap(DMA_MEMCPY, cap) && |
1259 | dma_has_cap(DMA_SLAVE, cap)) { | 1259 | dma_has_cap(DMA_SLAVE, cap)) { |
1260 | d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy; | 1260 | d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy; |
1261 | } else { | 1261 | } else { |
1262 | dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n", | 1262 | dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n", |
1263 | __func__); | 1263 | __func__); |
1264 | return -EINVAL; | 1264 | return -EINVAL; |
1265 | } | 1265 | } |
1266 | 1266 | ||
1267 | return 0; | 1267 | return 0; |
1268 | } | 1268 | } |
1269 | 1269 | ||
1270 | 1270 | ||
1271 | static int d40_free_dma(struct d40_chan *d40c) | 1271 | static int d40_free_dma(struct d40_chan *d40c) |
1272 | { | 1272 | { |
1273 | 1273 | ||
1274 | int res = 0; | 1274 | int res = 0; |
1275 | u32 event, dir; | 1275 | u32 event, dir; |
1276 | struct d40_phy_res *phy = d40c->phy_chan; | 1276 | struct d40_phy_res *phy = d40c->phy_chan; |
1277 | bool is_src; | 1277 | bool is_src; |
1278 | 1278 | ||
1279 | /* Terminate all queued and active transfers */ | 1279 | /* Terminate all queued and active transfers */ |
1280 | d40_term_all(d40c); | 1280 | d40_term_all(d40c); |
1281 | 1281 | ||
1282 | if (phy == NULL) { | 1282 | if (phy == NULL) { |
1283 | dev_err(&d40c->chan.dev->device, "[%s] phy == null\n", | 1283 | dev_err(&d40c->chan.dev->device, "[%s] phy == null\n", |
1284 | __func__); | 1284 | __func__); |
1285 | return -EINVAL; | 1285 | return -EINVAL; |
1286 | } | 1286 | } |
1287 | 1287 | ||
1288 | if (phy->allocated_src == D40_ALLOC_FREE && | 1288 | if (phy->allocated_src == D40_ALLOC_FREE && |
1289 | phy->allocated_dst == D40_ALLOC_FREE) { | 1289 | phy->allocated_dst == D40_ALLOC_FREE) { |
1290 | dev_err(&d40c->chan.dev->device, "[%s] channel already free\n", | 1290 | dev_err(&d40c->chan.dev->device, "[%s] channel already free\n", |
1291 | __func__); | 1291 | __func__); |
1292 | return -EINVAL; | 1292 | return -EINVAL; |
1293 | } | 1293 | } |
1294 | 1294 | ||
1295 | 1295 | ||
1296 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); | 1296 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); |
1297 | if (res) { | 1297 | if (res) { |
1298 | dev_err(&d40c->chan.dev->device, "[%s] suspend\n", | 1298 | dev_err(&d40c->chan.dev->device, "[%s] suspend\n", |
1299 | __func__); | 1299 | __func__); |
1300 | return res; | 1300 | return res; |
1301 | } | 1301 | } |
1302 | 1302 | ||
1303 | if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || | 1303 | if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || |
1304 | d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { | 1304 | d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { |
1305 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); | 1305 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); |
1306 | dir = D40_CHAN_REG_SDLNK; | 1306 | dir = D40_CHAN_REG_SDLNK; |
1307 | is_src = false; | 1307 | is_src = false; |
1308 | } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { | 1308 | } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { |
1309 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); | 1309 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); |
1310 | dir = D40_CHAN_REG_SSLNK; | 1310 | dir = D40_CHAN_REG_SSLNK; |
1311 | is_src = true; | 1311 | is_src = true; |
1312 | } else { | 1312 | } else { |
1313 | dev_err(&d40c->chan.dev->device, | 1313 | dev_err(&d40c->chan.dev->device, |
1314 | "[%s] Unknown direction\n", __func__); | 1314 | "[%s] Unknown direction\n", __func__); |
1315 | return -EINVAL; | 1315 | return -EINVAL; |
1316 | } | 1316 | } |
1317 | 1317 | ||
1318 | if (d40c->log_num != D40_PHY_CHAN) { | 1318 | if (d40c->log_num != D40_PHY_CHAN) { |
1319 | /* | 1319 | /* |
1320 | * Release logical channel, deactivate the event line during | 1320 | * Release logical channel, deactivate the event line during |
1321 | * the time physical res is suspended. | 1321 | * the time physical res is suspended. |
1322 | */ | 1322 | */ |
1323 | writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) & | 1323 | writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) & |
1324 | D40_EVENTLINE_MASK(event), | 1324 | D40_EVENTLINE_MASK(event), |
1325 | d40c->base->virtbase + D40_DREG_PCBASE + | 1325 | d40c->base->virtbase + D40_DREG_PCBASE + |
1326 | phy->num * D40_DREG_PCDELTA + dir); | 1326 | phy->num * D40_DREG_PCDELTA + dir); |
1327 | 1327 | ||
1328 | d40c->base->lookup_log_chans[d40c->log_num] = NULL; | 1328 | d40c->base->lookup_log_chans[d40c->log_num] = NULL; |
1329 | 1329 | ||
1330 | /* | 1330 | /* |
1331 | * Check if there are more logical allocation | 1331 | * Check if there are more logical allocation |
1332 | * on this phy channel. | 1332 | * on this phy channel. |
1333 | */ | 1333 | */ |
1334 | if (!d40_alloc_mask_free(phy, is_src, event)) { | 1334 | if (!d40_alloc_mask_free(phy, is_src, event)) { |
1335 | /* Resume the other logical channels if any */ | 1335 | /* Resume the other logical channels if any */ |
1336 | if (d40_chan_has_events(d40c)) { | 1336 | if (d40_chan_has_events(d40c)) { |
1337 | res = d40_channel_execute_command(d40c, | 1337 | res = d40_channel_execute_command(d40c, |
1338 | D40_DMA_RUN); | 1338 | D40_DMA_RUN); |
1339 | if (res) { | 1339 | if (res) { |
1340 | dev_err(&d40c->chan.dev->device, | 1340 | dev_err(&d40c->chan.dev->device, |
1341 | "[%s] Executing RUN command\n", | 1341 | "[%s] Executing RUN command\n", |
1342 | __func__); | 1342 | __func__); |
1343 | return res; | 1343 | return res; |
1344 | } | 1344 | } |
1345 | } | 1345 | } |
1346 | return 0; | 1346 | return 0; |
1347 | } | 1347 | } |
1348 | } else | 1348 | } else |
1349 | d40_alloc_mask_free(phy, is_src, 0); | 1349 | d40_alloc_mask_free(phy, is_src, 0); |
1350 | 1350 | ||
1351 | /* Release physical channel */ | 1351 | /* Release physical channel */ |
1352 | res = d40_channel_execute_command(d40c, D40_DMA_STOP); | 1352 | res = d40_channel_execute_command(d40c, D40_DMA_STOP); |
1353 | if (res) { | 1353 | if (res) { |
1354 | dev_err(&d40c->chan.dev->device, | 1354 | dev_err(&d40c->chan.dev->device, |
1355 | "[%s] Failed to stop channel\n", __func__); | 1355 | "[%s] Failed to stop channel\n", __func__); |
1356 | return res; | 1356 | return res; |
1357 | } | 1357 | } |
1358 | d40c->phy_chan = NULL; | 1358 | d40c->phy_chan = NULL; |
1359 | /* Invalidate channel type */ | 1359 | /* Invalidate channel type */ |
1360 | d40c->dma_cfg.channel_type = 0; | 1360 | d40c->dma_cfg.channel_type = 0; |
1361 | d40c->base->lookup_phy_chans[phy->num] = NULL; | 1361 | d40c->base->lookup_phy_chans[phy->num] = NULL; |
1362 | 1362 | ||
1363 | return 0; | 1363 | return 0; |
1364 | 1364 | ||
1365 | 1365 | ||
1366 | } | 1366 | } |
1367 | 1367 | ||
1368 | static int d40_pause(struct dma_chan *chan) | 1368 | static int d40_pause(struct dma_chan *chan) |
1369 | { | 1369 | { |
1370 | struct d40_chan *d40c = | 1370 | struct d40_chan *d40c = |
1371 | container_of(chan, struct d40_chan, chan); | 1371 | container_of(chan, struct d40_chan, chan); |
1372 | int res; | 1372 | int res; |
1373 | 1373 | ||
1374 | unsigned long flags; | 1374 | unsigned long flags; |
1375 | 1375 | ||
1376 | spin_lock_irqsave(&d40c->lock, flags); | 1376 | spin_lock_irqsave(&d40c->lock, flags); |
1377 | 1377 | ||
1378 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); | 1378 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); |
1379 | if (res == 0) { | 1379 | if (res == 0) { |
1380 | if (d40c->log_num != D40_PHY_CHAN) { | 1380 | if (d40c->log_num != D40_PHY_CHAN) { |
1381 | d40_config_set_event(d40c, false); | 1381 | d40_config_set_event(d40c, false); |
1382 | /* Resume the other logical channels if any */ | 1382 | /* Resume the other logical channels if any */ |
1383 | if (d40_chan_has_events(d40c)) | 1383 | if (d40_chan_has_events(d40c)) |
1384 | res = d40_channel_execute_command(d40c, | 1384 | res = d40_channel_execute_command(d40c, |
1385 | D40_DMA_RUN); | 1385 | D40_DMA_RUN); |
1386 | } | 1386 | } |
1387 | } | 1387 | } |
1388 | 1388 | ||
1389 | spin_unlock_irqrestore(&d40c->lock, flags); | 1389 | spin_unlock_irqrestore(&d40c->lock, flags); |
1390 | return res; | 1390 | return res; |
1391 | } | 1391 | } |
1392 | 1392 | ||
1393 | static bool d40_is_paused(struct d40_chan *d40c) | 1393 | static bool d40_is_paused(struct d40_chan *d40c) |
1394 | { | 1394 | { |
1395 | bool is_paused = false; | 1395 | bool is_paused = false; |
1396 | unsigned long flags; | 1396 | unsigned long flags; |
1397 | void __iomem *active_reg; | 1397 | void __iomem *active_reg; |
1398 | u32 status; | 1398 | u32 status; |
1399 | u32 event; | 1399 | u32 event; |
1400 | int res; | 1400 | int res; |
1401 | 1401 | ||
1402 | spin_lock_irqsave(&d40c->lock, flags); | 1402 | spin_lock_irqsave(&d40c->lock, flags); |
1403 | 1403 | ||
1404 | if (d40c->log_num == D40_PHY_CHAN) { | 1404 | if (d40c->log_num == D40_PHY_CHAN) { |
1405 | if (d40c->phy_chan->num % 2 == 0) | 1405 | if (d40c->phy_chan->num % 2 == 0) |
1406 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; | 1406 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; |
1407 | else | 1407 | else |
1408 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; | 1408 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; |
1409 | 1409 | ||
1410 | status = (readl(active_reg) & | 1410 | status = (readl(active_reg) & |
1411 | D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> | 1411 | D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> |
1412 | D40_CHAN_POS(d40c->phy_chan->num); | 1412 | D40_CHAN_POS(d40c->phy_chan->num); |
1413 | if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP) | 1413 | if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP) |
1414 | is_paused = true; | 1414 | is_paused = true; |
1415 | 1415 | ||
1416 | goto _exit; | 1416 | goto _exit; |
1417 | } | 1417 | } |
1418 | 1418 | ||
1419 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); | 1419 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); |
1420 | if (res != 0) | 1420 | if (res != 0) |
1421 | goto _exit; | 1421 | goto _exit; |
1422 | 1422 | ||
1423 | if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || | 1423 | if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || |
1424 | d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) | 1424 | d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) |
1425 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); | 1425 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); |
1426 | else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) | 1426 | else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) |
1427 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); | 1427 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); |
1428 | else { | 1428 | else { |
1429 | dev_err(&d40c->chan.dev->device, | 1429 | dev_err(&d40c->chan.dev->device, |
1430 | "[%s] Unknown direction\n", __func__); | 1430 | "[%s] Unknown direction\n", __func__); |
1431 | goto _exit; | 1431 | goto _exit; |
1432 | } | 1432 | } |
1433 | status = d40_chan_has_events(d40c); | 1433 | status = d40_chan_has_events(d40c); |
1434 | status = (status & D40_EVENTLINE_MASK(event)) >> | 1434 | status = (status & D40_EVENTLINE_MASK(event)) >> |
1435 | D40_EVENTLINE_POS(event); | 1435 | D40_EVENTLINE_POS(event); |
1436 | 1436 | ||
1437 | if (status != D40_DMA_RUN) | 1437 | if (status != D40_DMA_RUN) |
1438 | is_paused = true; | 1438 | is_paused = true; |
1439 | 1439 | ||
1440 | /* Resume the other logical channels if any */ | 1440 | /* Resume the other logical channels if any */ |
1441 | if (d40_chan_has_events(d40c)) | 1441 | if (d40_chan_has_events(d40c)) |
1442 | res = d40_channel_execute_command(d40c, | 1442 | res = d40_channel_execute_command(d40c, |
1443 | D40_DMA_RUN); | 1443 | D40_DMA_RUN); |
1444 | 1444 | ||
1445 | _exit: | 1445 | _exit: |
1446 | spin_unlock_irqrestore(&d40c->lock, flags); | 1446 | spin_unlock_irqrestore(&d40c->lock, flags); |
1447 | return is_paused; | 1447 | return is_paused; |
1448 | 1448 | ||
1449 | } | 1449 | } |
1450 | 1450 | ||
1451 | 1451 | ||
1452 | static bool d40_tx_is_linked(struct d40_chan *d40c) | 1452 | static bool d40_tx_is_linked(struct d40_chan *d40c) |
1453 | { | 1453 | { |
1454 | bool is_link; | 1454 | bool is_link; |
1455 | 1455 | ||
1456 | if (d40c->log_num != D40_PHY_CHAN) | 1456 | if (d40c->log_num != D40_PHY_CHAN) |
1457 | is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK; | 1457 | is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK; |
1458 | else | 1458 | else |
1459 | is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE + | 1459 | is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE + |
1460 | d40c->phy_chan->num * D40_DREG_PCDELTA + | 1460 | d40c->phy_chan->num * D40_DREG_PCDELTA + |
1461 | D40_CHAN_REG_SDLNK) & | 1461 | D40_CHAN_REG_SDLNK) & |
1462 | D40_SREG_LNK_PHYS_LNK_MASK; | 1462 | D40_SREG_LNK_PHYS_LNK_MASK; |
1463 | return is_link; | 1463 | return is_link; |
1464 | } | 1464 | } |
1465 | 1465 | ||
1466 | static u32 d40_residue(struct d40_chan *d40c) | 1466 | static u32 d40_residue(struct d40_chan *d40c) |
1467 | { | 1467 | { |
1468 | u32 num_elt; | 1468 | u32 num_elt; |
1469 | 1469 | ||
1470 | if (d40c->log_num != D40_PHY_CHAN) | 1470 | if (d40c->log_num != D40_PHY_CHAN) |
1471 | num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK) | 1471 | num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK) |
1472 | >> D40_MEM_LCSP2_ECNT_POS; | 1472 | >> D40_MEM_LCSP2_ECNT_POS; |
1473 | else | 1473 | else |
1474 | num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE + | 1474 | num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE + |
1475 | d40c->phy_chan->num * D40_DREG_PCDELTA + | 1475 | d40c->phy_chan->num * D40_DREG_PCDELTA + |
1476 | D40_CHAN_REG_SDELT) & | 1476 | D40_CHAN_REG_SDELT) & |
1477 | D40_SREG_ELEM_PHY_ECNT_MASK) >> D40_SREG_ELEM_PHY_ECNT_POS; | 1477 | D40_SREG_ELEM_PHY_ECNT_MASK) >> D40_SREG_ELEM_PHY_ECNT_POS; |
1478 | return num_elt * (1 << d40c->dma_cfg.dst_info.data_width); | 1478 | return num_elt * (1 << d40c->dma_cfg.dst_info.data_width); |
1479 | } | 1479 | } |
1480 | 1480 | ||
1481 | static int d40_resume(struct dma_chan *chan) | 1481 | static int d40_resume(struct dma_chan *chan) |
1482 | { | 1482 | { |
1483 | struct d40_chan *d40c = | 1483 | struct d40_chan *d40c = |
1484 | container_of(chan, struct d40_chan, chan); | 1484 | container_of(chan, struct d40_chan, chan); |
1485 | int res = 0; | 1485 | int res = 0; |
1486 | unsigned long flags; | 1486 | unsigned long flags; |
1487 | 1487 | ||
1488 | spin_lock_irqsave(&d40c->lock, flags); | 1488 | spin_lock_irqsave(&d40c->lock, flags); |
1489 | 1489 | ||
1490 | if (d40c->log_num != D40_PHY_CHAN) { | 1490 | if (d40c->log_num != D40_PHY_CHAN) { |
1491 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); | 1491 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); |
1492 | if (res) | 1492 | if (res) |
1493 | goto out; | 1493 | goto out; |
1494 | 1494 | ||
1495 | /* If bytes left to transfer or linked tx resume job */ | 1495 | /* If bytes left to transfer or linked tx resume job */ |
1496 | if (d40_residue(d40c) || d40_tx_is_linked(d40c)) { | 1496 | if (d40_residue(d40c) || d40_tx_is_linked(d40c)) { |
1497 | d40_config_set_event(d40c, true); | 1497 | d40_config_set_event(d40c, true); |
1498 | res = d40_channel_execute_command(d40c, D40_DMA_RUN); | 1498 | res = d40_channel_execute_command(d40c, D40_DMA_RUN); |
1499 | } | 1499 | } |
1500 | } else if (d40_residue(d40c) || d40_tx_is_linked(d40c)) | 1500 | } else if (d40_residue(d40c) || d40_tx_is_linked(d40c)) |
1501 | res = d40_channel_execute_command(d40c, D40_DMA_RUN); | 1501 | res = d40_channel_execute_command(d40c, D40_DMA_RUN); |
1502 | 1502 | ||
1503 | out: | 1503 | out: |
1504 | spin_unlock_irqrestore(&d40c->lock, flags); | 1504 | spin_unlock_irqrestore(&d40c->lock, flags); |
1505 | return res; | 1505 | return res; |
1506 | } | 1506 | } |
1507 | 1507 | ||
1508 | static u32 stedma40_residue(struct dma_chan *chan) | 1508 | static u32 stedma40_residue(struct dma_chan *chan) |
1509 | { | 1509 | { |
1510 | struct d40_chan *d40c = | 1510 | struct d40_chan *d40c = |
1511 | container_of(chan, struct d40_chan, chan); | 1511 | container_of(chan, struct d40_chan, chan); |
1512 | u32 bytes_left; | 1512 | u32 bytes_left; |
1513 | unsigned long flags; | 1513 | unsigned long flags; |
1514 | 1514 | ||
1515 | spin_lock_irqsave(&d40c->lock, flags); | 1515 | spin_lock_irqsave(&d40c->lock, flags); |
1516 | bytes_left = d40_residue(d40c); | 1516 | bytes_left = d40_residue(d40c); |
1517 | spin_unlock_irqrestore(&d40c->lock, flags); | 1517 | spin_unlock_irqrestore(&d40c->lock, flags); |
1518 | 1518 | ||
1519 | return bytes_left; | 1519 | return bytes_left; |
1520 | } | 1520 | } |
1521 | 1521 | ||
1522 | /* Public DMA functions in addition to the DMA engine framework */ | 1522 | /* Public DMA functions in addition to the DMA engine framework */ |
1523 | 1523 | ||
1524 | int stedma40_set_psize(struct dma_chan *chan, | 1524 | int stedma40_set_psize(struct dma_chan *chan, |
1525 | int src_psize, | 1525 | int src_psize, |
1526 | int dst_psize) | 1526 | int dst_psize) |
1527 | { | 1527 | { |
1528 | struct d40_chan *d40c = | 1528 | struct d40_chan *d40c = |
1529 | container_of(chan, struct d40_chan, chan); | 1529 | container_of(chan, struct d40_chan, chan); |
1530 | unsigned long flags; | 1530 | unsigned long flags; |
1531 | 1531 | ||
1532 | spin_lock_irqsave(&d40c->lock, flags); | 1532 | spin_lock_irqsave(&d40c->lock, flags); |
1533 | 1533 | ||
1534 | if (d40c->log_num != D40_PHY_CHAN) { | 1534 | if (d40c->log_num != D40_PHY_CHAN) { |
1535 | d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK; | 1535 | d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK; |
1536 | d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK; | 1536 | d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK; |
1537 | d40c->log_def.lcsp1 |= src_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS; | 1537 | d40c->log_def.lcsp1 |= src_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS; |
1538 | d40c->log_def.lcsp3 |= dst_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS; | 1538 | d40c->log_def.lcsp3 |= dst_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS; |
1539 | goto out; | 1539 | goto out; |
1540 | } | 1540 | } |
1541 | 1541 | ||
1542 | if (src_psize == STEDMA40_PSIZE_PHY_1) | 1542 | if (src_psize == STEDMA40_PSIZE_PHY_1) |
1543 | d40c->src_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS); | 1543 | d40c->src_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS); |
1544 | else { | 1544 | else { |
1545 | d40c->src_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS; | 1545 | d40c->src_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS; |
1546 | d40c->src_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 << | 1546 | d40c->src_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 << |
1547 | D40_SREG_CFG_PSIZE_POS); | 1547 | D40_SREG_CFG_PSIZE_POS); |
1548 | d40c->src_def_cfg |= src_psize << D40_SREG_CFG_PSIZE_POS; | 1548 | d40c->src_def_cfg |= src_psize << D40_SREG_CFG_PSIZE_POS; |
1549 | } | 1549 | } |
1550 | 1550 | ||
1551 | if (dst_psize == STEDMA40_PSIZE_PHY_1) | 1551 | if (dst_psize == STEDMA40_PSIZE_PHY_1) |
1552 | d40c->dst_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS); | 1552 | d40c->dst_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS); |
1553 | else { | 1553 | else { |
1554 | d40c->dst_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS; | 1554 | d40c->dst_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS; |
1555 | d40c->dst_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 << | 1555 | d40c->dst_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 << |
1556 | D40_SREG_CFG_PSIZE_POS); | 1556 | D40_SREG_CFG_PSIZE_POS); |
1557 | d40c->dst_def_cfg |= dst_psize << D40_SREG_CFG_PSIZE_POS; | 1557 | d40c->dst_def_cfg |= dst_psize << D40_SREG_CFG_PSIZE_POS; |
1558 | } | 1558 | } |
1559 | out: | 1559 | out: |
1560 | spin_unlock_irqrestore(&d40c->lock, flags); | 1560 | spin_unlock_irqrestore(&d40c->lock, flags); |
1561 | return 0; | 1561 | return 0; |
1562 | } | 1562 | } |
1563 | EXPORT_SYMBOL(stedma40_set_psize); | 1563 | EXPORT_SYMBOL(stedma40_set_psize); |
1564 | 1564 | ||
1565 | struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, | 1565 | struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, |
1566 | struct scatterlist *sgl_dst, | 1566 | struct scatterlist *sgl_dst, |
1567 | struct scatterlist *sgl_src, | 1567 | struct scatterlist *sgl_src, |
1568 | unsigned int sgl_len, | 1568 | unsigned int sgl_len, |
1569 | unsigned long flags) | 1569 | unsigned long flags) |
1570 | { | 1570 | { |
1571 | int res; | 1571 | int res; |
1572 | struct d40_desc *d40d; | 1572 | struct d40_desc *d40d; |
1573 | struct d40_chan *d40c = container_of(chan, struct d40_chan, | 1573 | struct d40_chan *d40c = container_of(chan, struct d40_chan, |
1574 | chan); | 1574 | chan); |
1575 | unsigned long flg; | 1575 | unsigned long flg; |
1576 | int lli_max = d40c->base->plat_data->llis_per_log; | 1576 | int lli_max = d40c->base->plat_data->llis_per_log; |
1577 | 1577 | ||
1578 | 1578 | ||
1579 | spin_lock_irqsave(&d40c->lock, flg); | 1579 | spin_lock_irqsave(&d40c->lock, flg); |
1580 | d40d = d40_desc_get(d40c); | 1580 | d40d = d40_desc_get(d40c); |
1581 | 1581 | ||
1582 | if (d40d == NULL) | 1582 | if (d40d == NULL) |
1583 | goto err; | 1583 | goto err; |
1584 | 1584 | ||
1585 | memset(d40d, 0, sizeof(struct d40_desc)); | 1585 | memset(d40d, 0, sizeof(struct d40_desc)); |
1586 | d40d->lli_len = sgl_len; | 1586 | d40d->lli_len = sgl_len; |
1587 | 1587 | ||
1588 | d40d->txd.flags = flags; | 1588 | d40d->txd.flags = flags; |
1589 | 1589 | ||
1590 | if (d40c->log_num != D40_PHY_CHAN) { | 1590 | if (d40c->log_num != D40_PHY_CHAN) { |
1591 | if (sgl_len > 1) | 1591 | if (sgl_len > 1) |
1592 | /* | 1592 | /* |
1593 | * Check if there is space available in lcla. If not, | 1593 | * Check if there is space available in lcla. If not, |
1594 | * split list into 1-length and run only in lcpa | 1594 | * split list into 1-length and run only in lcpa |
1595 | * space. | 1595 | * space. |
1596 | */ | 1596 | */ |
1597 | if (d40_lcla_id_get(d40c, | 1597 | if (d40_lcla_id_get(d40c, |
1598 | &d40c->base->lcla_pool) != 0) | 1598 | &d40c->base->lcla_pool) != 0) |
1599 | lli_max = 1; | 1599 | lli_max = 1; |
1600 | 1600 | ||
1601 | if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) { | 1601 | if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) { |
1602 | dev_err(&d40c->chan.dev->device, | 1602 | dev_err(&d40c->chan.dev->device, |
1603 | "[%s] Out of memory\n", __func__); | 1603 | "[%s] Out of memory\n", __func__); |
1604 | goto err; | 1604 | goto err; |
1605 | } | 1605 | } |
1606 | 1606 | ||
1607 | (void) d40_log_sg_to_lli(d40c->lcla.src_id, | 1607 | (void) d40_log_sg_to_lli(d40c->lcla.src_id, |
1608 | sgl_src, | 1608 | sgl_src, |
1609 | sgl_len, | 1609 | sgl_len, |
1610 | d40d->lli_log.src, | 1610 | d40d->lli_log.src, |
1611 | d40c->log_def.lcsp1, | 1611 | d40c->log_def.lcsp1, |
1612 | d40c->dma_cfg.src_info.data_width, | 1612 | d40c->dma_cfg.src_info.data_width, |
1613 | flags & DMA_PREP_INTERRUPT, lli_max, | 1613 | flags & DMA_PREP_INTERRUPT, lli_max, |
1614 | d40c->base->plat_data->llis_per_log); | 1614 | d40c->base->plat_data->llis_per_log); |
1615 | 1615 | ||
1616 | (void) d40_log_sg_to_lli(d40c->lcla.dst_id, | 1616 | (void) d40_log_sg_to_lli(d40c->lcla.dst_id, |
1617 | sgl_dst, | 1617 | sgl_dst, |
1618 | sgl_len, | 1618 | sgl_len, |
1619 | d40d->lli_log.dst, | 1619 | d40d->lli_log.dst, |
1620 | d40c->log_def.lcsp3, | 1620 | d40c->log_def.lcsp3, |
1621 | d40c->dma_cfg.dst_info.data_width, | 1621 | d40c->dma_cfg.dst_info.data_width, |
1622 | flags & DMA_PREP_INTERRUPT, lli_max, | 1622 | flags & DMA_PREP_INTERRUPT, lli_max, |
1623 | d40c->base->plat_data->llis_per_log); | 1623 | d40c->base->plat_data->llis_per_log); |
1624 | 1624 | ||
1625 | 1625 | ||
1626 | } else { | 1626 | } else { |
1627 | if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) { | 1627 | if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) { |
1628 | dev_err(&d40c->chan.dev->device, | 1628 | dev_err(&d40c->chan.dev->device, |
1629 | "[%s] Out of memory\n", __func__); | 1629 | "[%s] Out of memory\n", __func__); |
1630 | goto err; | 1630 | goto err; |
1631 | } | 1631 | } |
1632 | 1632 | ||
1633 | res = d40_phy_sg_to_lli(sgl_src, | 1633 | res = d40_phy_sg_to_lli(sgl_src, |
1634 | sgl_len, | 1634 | sgl_len, |
1635 | 0, | 1635 | 0, |
1636 | d40d->lli_phy.src, | 1636 | d40d->lli_phy.src, |
1637 | d40d->lli_phy.src_addr, | 1637 | d40d->lli_phy.src_addr, |
1638 | d40c->src_def_cfg, | 1638 | d40c->src_def_cfg, |
1639 | d40c->dma_cfg.src_info.data_width, | 1639 | d40c->dma_cfg.src_info.data_width, |
1640 | d40c->dma_cfg.src_info.psize, | 1640 | d40c->dma_cfg.src_info.psize, |
1641 | true); | 1641 | true); |
1642 | 1642 | ||
1643 | if (res < 0) | 1643 | if (res < 0) |
1644 | goto err; | 1644 | goto err; |
1645 | 1645 | ||
1646 | res = d40_phy_sg_to_lli(sgl_dst, | 1646 | res = d40_phy_sg_to_lli(sgl_dst, |
1647 | sgl_len, | 1647 | sgl_len, |
1648 | 0, | 1648 | 0, |
1649 | d40d->lli_phy.dst, | 1649 | d40d->lli_phy.dst, |
1650 | d40d->lli_phy.dst_addr, | 1650 | d40d->lli_phy.dst_addr, |
1651 | d40c->dst_def_cfg, | 1651 | d40c->dst_def_cfg, |
1652 | d40c->dma_cfg.dst_info.data_width, | 1652 | d40c->dma_cfg.dst_info.data_width, |
1653 | d40c->dma_cfg.dst_info.psize, | 1653 | d40c->dma_cfg.dst_info.psize, |
1654 | true); | 1654 | true); |
1655 | 1655 | ||
1656 | if (res < 0) | 1656 | if (res < 0) |
1657 | goto err; | 1657 | goto err; |
1658 | 1658 | ||
1659 | (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, | 1659 | (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, |
1660 | d40d->lli_pool.size, DMA_TO_DEVICE); | 1660 | d40d->lli_pool.size, DMA_TO_DEVICE); |
1661 | } | 1661 | } |
1662 | 1662 | ||
1663 | dma_async_tx_descriptor_init(&d40d->txd, chan); | 1663 | dma_async_tx_descriptor_init(&d40d->txd, chan); |
1664 | 1664 | ||
1665 | d40d->txd.tx_submit = d40_tx_submit; | 1665 | d40d->txd.tx_submit = d40_tx_submit; |
1666 | 1666 | ||
1667 | spin_unlock_irqrestore(&d40c->lock, flg); | 1667 | spin_unlock_irqrestore(&d40c->lock, flg); |
1668 | 1668 | ||
1669 | return &d40d->txd; | 1669 | return &d40d->txd; |
1670 | err: | 1670 | err: |
1671 | spin_unlock_irqrestore(&d40c->lock, flg); | 1671 | spin_unlock_irqrestore(&d40c->lock, flg); |
1672 | return NULL; | 1672 | return NULL; |
1673 | } | 1673 | } |
1674 | EXPORT_SYMBOL(stedma40_memcpy_sg); | 1674 | EXPORT_SYMBOL(stedma40_memcpy_sg); |
1675 | 1675 | ||
1676 | bool stedma40_filter(struct dma_chan *chan, void *data) | 1676 | bool stedma40_filter(struct dma_chan *chan, void *data) |
1677 | { | 1677 | { |
1678 | struct stedma40_chan_cfg *info = data; | 1678 | struct stedma40_chan_cfg *info = data; |
1679 | struct d40_chan *d40c = | 1679 | struct d40_chan *d40c = |
1680 | container_of(chan, struct d40_chan, chan); | 1680 | container_of(chan, struct d40_chan, chan); |
1681 | int err; | 1681 | int err; |
1682 | 1682 | ||
1683 | if (data) { | 1683 | if (data) { |
1684 | err = d40_validate_conf(d40c, info); | 1684 | err = d40_validate_conf(d40c, info); |
1685 | if (!err) | 1685 | if (!err) |
1686 | d40c->dma_cfg = *info; | 1686 | d40c->dma_cfg = *info; |
1687 | } else | 1687 | } else |
1688 | err = d40_config_memcpy(d40c); | 1688 | err = d40_config_memcpy(d40c); |
1689 | 1689 | ||
1690 | return err == 0; | 1690 | return err == 0; |
1691 | } | 1691 | } |
1692 | EXPORT_SYMBOL(stedma40_filter); | 1692 | EXPORT_SYMBOL(stedma40_filter); |
1693 | 1693 | ||
1694 | /* DMA ENGINE functions */ | 1694 | /* DMA ENGINE functions */ |
1695 | static int d40_alloc_chan_resources(struct dma_chan *chan) | 1695 | static int d40_alloc_chan_resources(struct dma_chan *chan) |
1696 | { | 1696 | { |
1697 | int err; | 1697 | int err; |
1698 | unsigned long flags; | 1698 | unsigned long flags; |
1699 | struct d40_chan *d40c = | 1699 | struct d40_chan *d40c = |
1700 | container_of(chan, struct d40_chan, chan); | 1700 | container_of(chan, struct d40_chan, chan); |
1701 | 1701 | ||
1702 | spin_lock_irqsave(&d40c->lock, flags); | 1702 | spin_lock_irqsave(&d40c->lock, flags); |
1703 | 1703 | ||
1704 | d40c->completed = chan->cookie = 1; | 1704 | d40c->completed = chan->cookie = 1; |
1705 | 1705 | ||
1706 | /* | 1706 | /* |
1707 | * If no dma configuration is set (channel_type == 0) | 1707 | * If no dma configuration is set (channel_type == 0) |
1708 | * use default configuration | 1708 | * use default configuration |
1709 | */ | 1709 | */ |
1710 | if (d40c->dma_cfg.channel_type == 0) { | 1710 | if (d40c->dma_cfg.channel_type == 0) { |
1711 | err = d40_config_memcpy(d40c); | 1711 | err = d40_config_memcpy(d40c); |
1712 | if (err) | 1712 | if (err) |
1713 | goto err_alloc; | 1713 | goto err_alloc; |
1714 | } | 1714 | } |
1715 | 1715 | ||
1716 | err = d40_allocate_channel(d40c); | 1716 | err = d40_allocate_channel(d40c); |
1717 | if (err) { | 1717 | if (err) { |
1718 | dev_err(&d40c->chan.dev->device, | 1718 | dev_err(&d40c->chan.dev->device, |
1719 | "[%s] Failed to allocate channel\n", __func__); | 1719 | "[%s] Failed to allocate channel\n", __func__); |
1720 | goto err_alloc; | 1720 | goto err_alloc; |
1721 | } | 1721 | } |
1722 | 1722 | ||
1723 | err = d40_config_chan(d40c, &d40c->dma_cfg); | 1723 | err = d40_config_chan(d40c, &d40c->dma_cfg); |
1724 | if (err) { | 1724 | if (err) { |
1725 | dev_err(&d40c->chan.dev->device, | 1725 | dev_err(&d40c->chan.dev->device, |
1726 | "[%s] Failed to configure channel\n", | 1726 | "[%s] Failed to configure channel\n", |
1727 | __func__); | 1727 | __func__); |
1728 | goto err_config; | 1728 | goto err_config; |
1729 | } | 1729 | } |
1730 | 1730 | ||
1731 | spin_unlock_irqrestore(&d40c->lock, flags); | 1731 | spin_unlock_irqrestore(&d40c->lock, flags); |
1732 | return 0; | 1732 | return 0; |
1733 | 1733 | ||
1734 | err_config: | 1734 | err_config: |
1735 | (void) d40_free_dma(d40c); | 1735 | (void) d40_free_dma(d40c); |
1736 | err_alloc: | 1736 | err_alloc: |
1737 | spin_unlock_irqrestore(&d40c->lock, flags); | 1737 | spin_unlock_irqrestore(&d40c->lock, flags); |
1738 | dev_err(&d40c->chan.dev->device, | 1738 | dev_err(&d40c->chan.dev->device, |
1739 | "[%s] Channel allocation failed\n", __func__); | 1739 | "[%s] Channel allocation failed\n", __func__); |
1740 | return -EINVAL; | 1740 | return -EINVAL; |
1741 | } | 1741 | } |
1742 | 1742 | ||
1743 | static void d40_free_chan_resources(struct dma_chan *chan) | 1743 | static void d40_free_chan_resources(struct dma_chan *chan) |
1744 | { | 1744 | { |
1745 | struct d40_chan *d40c = | 1745 | struct d40_chan *d40c = |
1746 | container_of(chan, struct d40_chan, chan); | 1746 | container_of(chan, struct d40_chan, chan); |
1747 | int err; | 1747 | int err; |
1748 | unsigned long flags; | 1748 | unsigned long flags; |
1749 | 1749 | ||
1750 | spin_lock_irqsave(&d40c->lock, flags); | 1750 | spin_lock_irqsave(&d40c->lock, flags); |
1751 | 1751 | ||
1752 | err = d40_free_dma(d40c); | 1752 | err = d40_free_dma(d40c); |
1753 | 1753 | ||
1754 | if (err) | 1754 | if (err) |
1755 | dev_err(&d40c->chan.dev->device, | 1755 | dev_err(&d40c->chan.dev->device, |
1756 | "[%s] Failed to free channel\n", __func__); | 1756 | "[%s] Failed to free channel\n", __func__); |
1757 | spin_unlock_irqrestore(&d40c->lock, flags); | 1757 | spin_unlock_irqrestore(&d40c->lock, flags); |
1758 | } | 1758 | } |
1759 | 1759 | ||
1760 | static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, | 1760 | static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, |
1761 | dma_addr_t dst, | 1761 | dma_addr_t dst, |
1762 | dma_addr_t src, | 1762 | dma_addr_t src, |
1763 | size_t size, | 1763 | size_t size, |
1764 | unsigned long flags) | 1764 | unsigned long flags) |
1765 | { | 1765 | { |
1766 | struct d40_desc *d40d; | 1766 | struct d40_desc *d40d; |
1767 | struct d40_chan *d40c = container_of(chan, struct d40_chan, | 1767 | struct d40_chan *d40c = container_of(chan, struct d40_chan, |
1768 | chan); | 1768 | chan); |
1769 | unsigned long flg; | 1769 | unsigned long flg; |
1770 | int err = 0; | 1770 | int err = 0; |
1771 | 1771 | ||
1772 | spin_lock_irqsave(&d40c->lock, flg); | 1772 | spin_lock_irqsave(&d40c->lock, flg); |
1773 | d40d = d40_desc_get(d40c); | 1773 | d40d = d40_desc_get(d40c); |
1774 | 1774 | ||
1775 | if (d40d == NULL) { | 1775 | if (d40d == NULL) { |
1776 | dev_err(&d40c->chan.dev->device, | 1776 | dev_err(&d40c->chan.dev->device, |
1777 | "[%s] Descriptor is NULL\n", __func__); | 1777 | "[%s] Descriptor is NULL\n", __func__); |
1778 | goto err; | 1778 | goto err; |
1779 | } | 1779 | } |
1780 | 1780 | ||
1781 | memset(d40d, 0, sizeof(struct d40_desc)); | 1781 | memset(d40d, 0, sizeof(struct d40_desc)); |
1782 | 1782 | ||
1783 | d40d->txd.flags = flags; | 1783 | d40d->txd.flags = flags; |
1784 | 1784 | ||
1785 | dma_async_tx_descriptor_init(&d40d->txd, chan); | 1785 | dma_async_tx_descriptor_init(&d40d->txd, chan); |
1786 | 1786 | ||
1787 | d40d->txd.tx_submit = d40_tx_submit; | 1787 | d40d->txd.tx_submit = d40_tx_submit; |
1788 | 1788 | ||
1789 | if (d40c->log_num != D40_PHY_CHAN) { | 1789 | if (d40c->log_num != D40_PHY_CHAN) { |
1790 | 1790 | ||
1791 | if (d40_pool_lli_alloc(d40d, 1, true) < 0) { | 1791 | if (d40_pool_lli_alloc(d40d, 1, true) < 0) { |
1792 | dev_err(&d40c->chan.dev->device, | 1792 | dev_err(&d40c->chan.dev->device, |
1793 | "[%s] Out of memory\n", __func__); | 1793 | "[%s] Out of memory\n", __func__); |
1794 | goto err; | 1794 | goto err; |
1795 | } | 1795 | } |
1796 | d40d->lli_len = 1; | 1796 | d40d->lli_len = 1; |
1797 | 1797 | ||
1798 | d40_log_fill_lli(d40d->lli_log.src, | 1798 | d40_log_fill_lli(d40d->lli_log.src, |
1799 | src, | 1799 | src, |
1800 | size, | 1800 | size, |
1801 | 0, | 1801 | 0, |
1802 | d40c->log_def.lcsp1, | 1802 | d40c->log_def.lcsp1, |
1803 | d40c->dma_cfg.src_info.data_width, | 1803 | d40c->dma_cfg.src_info.data_width, |
1804 | true, true); | 1804 | true, true); |
1805 | 1805 | ||
1806 | d40_log_fill_lli(d40d->lli_log.dst, | 1806 | d40_log_fill_lli(d40d->lli_log.dst, |
1807 | dst, | 1807 | dst, |
1808 | size, | 1808 | size, |
1809 | 0, | 1809 | 0, |
1810 | d40c->log_def.lcsp3, | 1810 | d40c->log_def.lcsp3, |
1811 | d40c->dma_cfg.dst_info.data_width, | 1811 | d40c->dma_cfg.dst_info.data_width, |
1812 | true, true); | 1812 | true, true); |
1813 | 1813 | ||
1814 | } else { | 1814 | } else { |
1815 | 1815 | ||
1816 | if (d40_pool_lli_alloc(d40d, 1, false) < 0) { | 1816 | if (d40_pool_lli_alloc(d40d, 1, false) < 0) { |
1817 | dev_err(&d40c->chan.dev->device, | 1817 | dev_err(&d40c->chan.dev->device, |
1818 | "[%s] Out of memory\n", __func__); | 1818 | "[%s] Out of memory\n", __func__); |
1819 | goto err; | 1819 | goto err; |
1820 | } | 1820 | } |
1821 | 1821 | ||
1822 | err = d40_phy_fill_lli(d40d->lli_phy.src, | 1822 | err = d40_phy_fill_lli(d40d->lli_phy.src, |
1823 | src, | 1823 | src, |
1824 | size, | 1824 | size, |
1825 | d40c->dma_cfg.src_info.psize, | 1825 | d40c->dma_cfg.src_info.psize, |
1826 | 0, | 1826 | 0, |
1827 | d40c->src_def_cfg, | 1827 | d40c->src_def_cfg, |
1828 | true, | 1828 | true, |
1829 | d40c->dma_cfg.src_info.data_width, | 1829 | d40c->dma_cfg.src_info.data_width, |
1830 | false); | 1830 | false); |
1831 | if (err) | 1831 | if (err) |
1832 | goto err_fill_lli; | 1832 | goto err_fill_lli; |
1833 | 1833 | ||
1834 | err = d40_phy_fill_lli(d40d->lli_phy.dst, | 1834 | err = d40_phy_fill_lli(d40d->lli_phy.dst, |
1835 | dst, | 1835 | dst, |
1836 | size, | 1836 | size, |
1837 | d40c->dma_cfg.dst_info.psize, | 1837 | d40c->dma_cfg.dst_info.psize, |
1838 | 0, | 1838 | 0, |
1839 | d40c->dst_def_cfg, | 1839 | d40c->dst_def_cfg, |
1840 | true, | 1840 | true, |
1841 | d40c->dma_cfg.dst_info.data_width, | 1841 | d40c->dma_cfg.dst_info.data_width, |
1842 | false); | 1842 | false); |
1843 | 1843 | ||
1844 | if (err) | 1844 | if (err) |
1845 | goto err_fill_lli; | 1845 | goto err_fill_lli; |
1846 | 1846 | ||
1847 | (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, | 1847 | (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, |
1848 | d40d->lli_pool.size, DMA_TO_DEVICE); | 1848 | d40d->lli_pool.size, DMA_TO_DEVICE); |
1849 | } | 1849 | } |
1850 | 1850 | ||
1851 | spin_unlock_irqrestore(&d40c->lock, flg); | 1851 | spin_unlock_irqrestore(&d40c->lock, flg); |
1852 | return &d40d->txd; | 1852 | return &d40d->txd; |
1853 | 1853 | ||
1854 | err_fill_lli: | 1854 | err_fill_lli: |
1855 | dev_err(&d40c->chan.dev->device, | 1855 | dev_err(&d40c->chan.dev->device, |
1856 | "[%s] Failed filling in PHY LLI\n", __func__); | 1856 | "[%s] Failed filling in PHY LLI\n", __func__); |
1857 | d40_pool_lli_free(d40d); | 1857 | d40_pool_lli_free(d40d); |
1858 | err: | 1858 | err: |
1859 | spin_unlock_irqrestore(&d40c->lock, flg); | 1859 | spin_unlock_irqrestore(&d40c->lock, flg); |
1860 | return NULL; | 1860 | return NULL; |
1861 | } | 1861 | } |
1862 | 1862 | ||
1863 | static int d40_prep_slave_sg_log(struct d40_desc *d40d, | 1863 | static int d40_prep_slave_sg_log(struct d40_desc *d40d, |
1864 | struct d40_chan *d40c, | 1864 | struct d40_chan *d40c, |
1865 | struct scatterlist *sgl, | 1865 | struct scatterlist *sgl, |
1866 | unsigned int sg_len, | 1866 | unsigned int sg_len, |
1867 | enum dma_data_direction direction, | 1867 | enum dma_data_direction direction, |
1868 | unsigned long flags) | 1868 | unsigned long flags) |
1869 | { | 1869 | { |
1870 | dma_addr_t dev_addr = 0; | 1870 | dma_addr_t dev_addr = 0; |
1871 | int total_size; | 1871 | int total_size; |
1872 | int lli_max = d40c->base->plat_data->llis_per_log; | 1872 | int lli_max = d40c->base->plat_data->llis_per_log; |
1873 | 1873 | ||
1874 | if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) { | 1874 | if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) { |
1875 | dev_err(&d40c->chan.dev->device, | 1875 | dev_err(&d40c->chan.dev->device, |
1876 | "[%s] Out of memory\n", __func__); | 1876 | "[%s] Out of memory\n", __func__); |
1877 | return -ENOMEM; | 1877 | return -ENOMEM; |
1878 | } | 1878 | } |
1879 | 1879 | ||
1880 | d40d->lli_len = sg_len; | 1880 | d40d->lli_len = sg_len; |
1881 | d40d->lli_tcount = 0; | 1881 | d40d->lli_tcount = 0; |
1882 | 1882 | ||
1883 | if (sg_len > 1) | 1883 | if (sg_len > 1) |
1884 | /* | 1884 | /* |
1885 | * Check if there is space available in lcla. | 1885 | * Check if there is space available in lcla. |
1886 | * If not, split list into 1-length and run only | 1886 | * If not, split list into 1-length and run only |
1887 | * in lcpa space. | 1887 | * in lcpa space. |
1888 | */ | 1888 | */ |
1889 | if (d40_lcla_id_get(d40c, &d40c->base->lcla_pool) != 0) | 1889 | if (d40_lcla_id_get(d40c, &d40c->base->lcla_pool) != 0) |
1890 | lli_max = 1; | 1890 | lli_max = 1; |
1891 | 1891 | ||
1892 | if (direction == DMA_FROM_DEVICE) { | 1892 | if (direction == DMA_FROM_DEVICE) { |
1893 | dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type]; | 1893 | dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type]; |
1894 | total_size = d40_log_sg_to_dev(&d40c->lcla, | 1894 | total_size = d40_log_sg_to_dev(&d40c->lcla, |
1895 | sgl, sg_len, | 1895 | sgl, sg_len, |
1896 | &d40d->lli_log, | 1896 | &d40d->lli_log, |
1897 | &d40c->log_def, | 1897 | &d40c->log_def, |
1898 | d40c->dma_cfg.src_info.data_width, | 1898 | d40c->dma_cfg.src_info.data_width, |
1899 | d40c->dma_cfg.dst_info.data_width, | 1899 | d40c->dma_cfg.dst_info.data_width, |
1900 | direction, | 1900 | direction, |
1901 | flags & DMA_PREP_INTERRUPT, | 1901 | flags & DMA_PREP_INTERRUPT, |
1902 | dev_addr, lli_max, | 1902 | dev_addr, lli_max, |
1903 | d40c->base->plat_data->llis_per_log); | 1903 | d40c->base->plat_data->llis_per_log); |
1904 | } else if (direction == DMA_TO_DEVICE) { | 1904 | } else if (direction == DMA_TO_DEVICE) { |
1905 | dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type]; | 1905 | dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type]; |
1906 | total_size = d40_log_sg_to_dev(&d40c->lcla, | 1906 | total_size = d40_log_sg_to_dev(&d40c->lcla, |
1907 | sgl, sg_len, | 1907 | sgl, sg_len, |
1908 | &d40d->lli_log, | 1908 | &d40d->lli_log, |
1909 | &d40c->log_def, | 1909 | &d40c->log_def, |
1910 | d40c->dma_cfg.src_info.data_width, | 1910 | d40c->dma_cfg.src_info.data_width, |
1911 | d40c->dma_cfg.dst_info.data_width, | 1911 | d40c->dma_cfg.dst_info.data_width, |
1912 | direction, | 1912 | direction, |
1913 | flags & DMA_PREP_INTERRUPT, | 1913 | flags & DMA_PREP_INTERRUPT, |
1914 | dev_addr, lli_max, | 1914 | dev_addr, lli_max, |
1915 | d40c->base->plat_data->llis_per_log); | 1915 | d40c->base->plat_data->llis_per_log); |
1916 | } else | 1916 | } else |
1917 | return -EINVAL; | 1917 | return -EINVAL; |
1918 | if (total_size < 0) | 1918 | if (total_size < 0) |
1919 | return -EINVAL; | 1919 | return -EINVAL; |
1920 | 1920 | ||
1921 | return 0; | 1921 | return 0; |
1922 | } | 1922 | } |
1923 | 1923 | ||
1924 | static int d40_prep_slave_sg_phy(struct d40_desc *d40d, | 1924 | static int d40_prep_slave_sg_phy(struct d40_desc *d40d, |
1925 | struct d40_chan *d40c, | 1925 | struct d40_chan *d40c, |
1926 | struct scatterlist *sgl, | 1926 | struct scatterlist *sgl, |
1927 | unsigned int sgl_len, | 1927 | unsigned int sgl_len, |
1928 | enum dma_data_direction direction, | 1928 | enum dma_data_direction direction, |
1929 | unsigned long flags) | 1929 | unsigned long flags) |
1930 | { | 1930 | { |
1931 | dma_addr_t src_dev_addr; | 1931 | dma_addr_t src_dev_addr; |
1932 | dma_addr_t dst_dev_addr; | 1932 | dma_addr_t dst_dev_addr; |
1933 | int res; | 1933 | int res; |
1934 | 1934 | ||
1935 | if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) { | 1935 | if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) { |
1936 | dev_err(&d40c->chan.dev->device, | 1936 | dev_err(&d40c->chan.dev->device, |
1937 | "[%s] Out of memory\n", __func__); | 1937 | "[%s] Out of memory\n", __func__); |
1938 | return -ENOMEM; | 1938 | return -ENOMEM; |
1939 | } | 1939 | } |
1940 | 1940 | ||
1941 | d40d->lli_len = sgl_len; | 1941 | d40d->lli_len = sgl_len; |
1942 | d40d->lli_tcount = 0; | 1942 | d40d->lli_tcount = 0; |
1943 | 1943 | ||
1944 | if (direction == DMA_FROM_DEVICE) { | 1944 | if (direction == DMA_FROM_DEVICE) { |
1945 | dst_dev_addr = 0; | 1945 | dst_dev_addr = 0; |
1946 | src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type]; | 1946 | src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type]; |
1947 | } else if (direction == DMA_TO_DEVICE) { | 1947 | } else if (direction == DMA_TO_DEVICE) { |
1948 | dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type]; | 1948 | dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type]; |
1949 | src_dev_addr = 0; | 1949 | src_dev_addr = 0; |
1950 | } else | 1950 | } else |
1951 | return -EINVAL; | 1951 | return -EINVAL; |
1952 | 1952 | ||
1953 | res = d40_phy_sg_to_lli(sgl, | 1953 | res = d40_phy_sg_to_lli(sgl, |
1954 | sgl_len, | 1954 | sgl_len, |
1955 | src_dev_addr, | 1955 | src_dev_addr, |
1956 | d40d->lli_phy.src, | 1956 | d40d->lli_phy.src, |
1957 | d40d->lli_phy.src_addr, | 1957 | d40d->lli_phy.src_addr, |
1958 | d40c->src_def_cfg, | 1958 | d40c->src_def_cfg, |
1959 | d40c->dma_cfg.src_info.data_width, | 1959 | d40c->dma_cfg.src_info.data_width, |
1960 | d40c->dma_cfg.src_info.psize, | 1960 | d40c->dma_cfg.src_info.psize, |
1961 | true); | 1961 | true); |
1962 | if (res < 0) | 1962 | if (res < 0) |
1963 | return res; | 1963 | return res; |
1964 | 1964 | ||
1965 | res = d40_phy_sg_to_lli(sgl, | 1965 | res = d40_phy_sg_to_lli(sgl, |
1966 | sgl_len, | 1966 | sgl_len, |
1967 | dst_dev_addr, | 1967 | dst_dev_addr, |
1968 | d40d->lli_phy.dst, | 1968 | d40d->lli_phy.dst, |
1969 | d40d->lli_phy.dst_addr, | 1969 | d40d->lli_phy.dst_addr, |
1970 | d40c->dst_def_cfg, | 1970 | d40c->dst_def_cfg, |
1971 | d40c->dma_cfg.dst_info.data_width, | 1971 | d40c->dma_cfg.dst_info.data_width, |
1972 | d40c->dma_cfg.dst_info.psize, | 1972 | d40c->dma_cfg.dst_info.psize, |
1973 | true); | 1973 | true); |
1974 | if (res < 0) | 1974 | if (res < 0) |
1975 | return res; | 1975 | return res; |
1976 | 1976 | ||
1977 | (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, | 1977 | (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, |
1978 | d40d->lli_pool.size, DMA_TO_DEVICE); | 1978 | d40d->lli_pool.size, DMA_TO_DEVICE); |
1979 | return 0; | 1979 | return 0; |
1980 | } | 1980 | } |
1981 | 1981 | ||
1982 | static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, | 1982 | static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, |
1983 | struct scatterlist *sgl, | 1983 | struct scatterlist *sgl, |
1984 | unsigned int sg_len, | 1984 | unsigned int sg_len, |
1985 | enum dma_data_direction direction, | 1985 | enum dma_data_direction direction, |
1986 | unsigned long flags) | 1986 | unsigned long flags) |
1987 | { | 1987 | { |
1988 | struct d40_desc *d40d; | 1988 | struct d40_desc *d40d; |
1989 | struct d40_chan *d40c = container_of(chan, struct d40_chan, | 1989 | struct d40_chan *d40c = container_of(chan, struct d40_chan, |
1990 | chan); | 1990 | chan); |
1991 | unsigned long flg; | 1991 | unsigned long flg; |
1992 | int err; | 1992 | int err; |
1993 | 1993 | ||
1994 | if (d40c->dma_cfg.pre_transfer) | 1994 | if (d40c->dma_cfg.pre_transfer) |
1995 | d40c->dma_cfg.pre_transfer(chan, | 1995 | d40c->dma_cfg.pre_transfer(chan, |
1996 | d40c->dma_cfg.pre_transfer_data, | 1996 | d40c->dma_cfg.pre_transfer_data, |
1997 | sg_dma_len(sgl)); | 1997 | sg_dma_len(sgl)); |
1998 | 1998 | ||
1999 | spin_lock_irqsave(&d40c->lock, flg); | 1999 | spin_lock_irqsave(&d40c->lock, flg); |
2000 | d40d = d40_desc_get(d40c); | 2000 | d40d = d40_desc_get(d40c); |
2001 | spin_unlock_irqrestore(&d40c->lock, flg); | 2001 | spin_unlock_irqrestore(&d40c->lock, flg); |
2002 | 2002 | ||
2003 | if (d40d == NULL) | 2003 | if (d40d == NULL) |
2004 | return NULL; | 2004 | return NULL; |
2005 | 2005 | ||
2006 | memset(d40d, 0, sizeof(struct d40_desc)); | 2006 | memset(d40d, 0, sizeof(struct d40_desc)); |
2007 | 2007 | ||
2008 | if (d40c->log_num != D40_PHY_CHAN) | 2008 | if (d40c->log_num != D40_PHY_CHAN) |
2009 | err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len, | 2009 | err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len, |
2010 | direction, flags); | 2010 | direction, flags); |
2011 | else | 2011 | else |
2012 | err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len, | 2012 | err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len, |
2013 | direction, flags); | 2013 | direction, flags); |
2014 | if (err) { | 2014 | if (err) { |
2015 | dev_err(&d40c->chan.dev->device, | 2015 | dev_err(&d40c->chan.dev->device, |
2016 | "[%s] Failed to prepare %s slave sg job: %d\n", | 2016 | "[%s] Failed to prepare %s slave sg job: %d\n", |
2017 | __func__, | 2017 | __func__, |
2018 | d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err); | 2018 | d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err); |
2019 | return NULL; | 2019 | return NULL; |
2020 | } | 2020 | } |
2021 | 2021 | ||
2022 | d40d->txd.flags = flags; | 2022 | d40d->txd.flags = flags; |
2023 | 2023 | ||
2024 | dma_async_tx_descriptor_init(&d40d->txd, chan); | 2024 | dma_async_tx_descriptor_init(&d40d->txd, chan); |
2025 | 2025 | ||
2026 | d40d->txd.tx_submit = d40_tx_submit; | 2026 | d40d->txd.tx_submit = d40_tx_submit; |
2027 | 2027 | ||
2028 | return &d40d->txd; | 2028 | return &d40d->txd; |
2029 | } | 2029 | } |
2030 | 2030 | ||
2031 | static enum dma_status d40_tx_status(struct dma_chan *chan, | 2031 | static enum dma_status d40_tx_status(struct dma_chan *chan, |
2032 | dma_cookie_t cookie, | 2032 | dma_cookie_t cookie, |
2033 | struct dma_tx_state *txstate) | 2033 | struct dma_tx_state *txstate) |
2034 | { | 2034 | { |
2035 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); | 2035 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); |
2036 | dma_cookie_t last_used; | 2036 | dma_cookie_t last_used; |
2037 | dma_cookie_t last_complete; | 2037 | dma_cookie_t last_complete; |
2038 | int ret; | 2038 | int ret; |
2039 | 2039 | ||
2040 | last_complete = d40c->completed; | 2040 | last_complete = d40c->completed; |
2041 | last_used = chan->cookie; | 2041 | last_used = chan->cookie; |
2042 | 2042 | ||
2043 | if (d40_is_paused(d40c)) | 2043 | if (d40_is_paused(d40c)) |
2044 | ret = DMA_PAUSED; | 2044 | ret = DMA_PAUSED; |
2045 | else | 2045 | else |
2046 | ret = dma_async_is_complete(cookie, last_complete, last_used); | 2046 | ret = dma_async_is_complete(cookie, last_complete, last_used); |
2047 | 2047 | ||
2048 | dma_set_tx_state(txstate, last_complete, last_used, | 2048 | dma_set_tx_state(txstate, last_complete, last_used, |
2049 | stedma40_residue(chan)); | 2049 | stedma40_residue(chan)); |
2050 | 2050 | ||
2051 | return ret; | 2051 | return ret; |
2052 | } | 2052 | } |
2053 | 2053 | ||
2054 | static void d40_issue_pending(struct dma_chan *chan) | 2054 | static void d40_issue_pending(struct dma_chan *chan) |
2055 | { | 2055 | { |
2056 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); | 2056 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); |
2057 | unsigned long flags; | 2057 | unsigned long flags; |
2058 | 2058 | ||
2059 | spin_lock_irqsave(&d40c->lock, flags); | 2059 | spin_lock_irqsave(&d40c->lock, flags); |
2060 | 2060 | ||
2061 | /* Busy means that pending jobs are already being processed */ | 2061 | /* Busy means that pending jobs are already being processed */ |
2062 | if (!d40c->busy) | 2062 | if (!d40c->busy) |
2063 | (void) d40_queue_start(d40c); | 2063 | (void) d40_queue_start(d40c); |
2064 | 2064 | ||
2065 | spin_unlock_irqrestore(&d40c->lock, flags); | 2065 | spin_unlock_irqrestore(&d40c->lock, flags); |
2066 | } | 2066 | } |
2067 | 2067 | ||
2068 | static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd) | 2068 | static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
2069 | unsigned long arg) | ||
2069 | { | 2070 | { |
2070 | unsigned long flags; | 2071 | unsigned long flags; |
2071 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); | 2072 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); |
2072 | 2073 | ||
2073 | switch (cmd) { | 2074 | switch (cmd) { |
2074 | case DMA_TERMINATE_ALL: | 2075 | case DMA_TERMINATE_ALL: |
2075 | spin_lock_irqsave(&d40c->lock, flags); | 2076 | spin_lock_irqsave(&d40c->lock, flags); |
2076 | d40_term_all(d40c); | 2077 | d40_term_all(d40c); |
2077 | spin_unlock_irqrestore(&d40c->lock, flags); | 2078 | spin_unlock_irqrestore(&d40c->lock, flags); |
2078 | return 0; | 2079 | return 0; |
2079 | case DMA_PAUSE: | 2080 | case DMA_PAUSE: |
2080 | return d40_pause(chan); | 2081 | return d40_pause(chan); |
2081 | case DMA_RESUME: | 2082 | case DMA_RESUME: |
2082 | return d40_resume(chan); | 2083 | return d40_resume(chan); |
2083 | } | 2084 | } |
2084 | 2085 | ||
2085 | /* Other commands are unimplemented */ | 2086 | /* Other commands are unimplemented */ |
2086 | return -ENXIO; | 2087 | return -ENXIO; |
2087 | } | 2088 | } |
2088 | 2089 | ||
2089 | /* Initialization functions */ | 2090 | /* Initialization functions */ |
2090 | 2091 | ||
2091 | static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma, | 2092 | static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma, |
2092 | struct d40_chan *chans, int offset, | 2093 | struct d40_chan *chans, int offset, |
2093 | int num_chans) | 2094 | int num_chans) |
2094 | { | 2095 | { |
2095 | int i = 0; | 2096 | int i = 0; |
2096 | struct d40_chan *d40c; | 2097 | struct d40_chan *d40c; |
2097 | 2098 | ||
2098 | INIT_LIST_HEAD(&dma->channels); | 2099 | INIT_LIST_HEAD(&dma->channels); |
2099 | 2100 | ||
2100 | for (i = offset; i < offset + num_chans; i++) { | 2101 | for (i = offset; i < offset + num_chans; i++) { |
2101 | d40c = &chans[i]; | 2102 | d40c = &chans[i]; |
2102 | d40c->base = base; | 2103 | d40c->base = base; |
2103 | d40c->chan.device = dma; | 2104 | d40c->chan.device = dma; |
2104 | 2105 | ||
2105 | /* Invalidate lcla element */ | 2106 | /* Invalidate lcla element */ |
2106 | d40c->lcla.src_id = -1; | 2107 | d40c->lcla.src_id = -1; |
2107 | d40c->lcla.dst_id = -1; | 2108 | d40c->lcla.dst_id = -1; |
2108 | 2109 | ||
2109 | spin_lock_init(&d40c->lock); | 2110 | spin_lock_init(&d40c->lock); |
2110 | 2111 | ||
2111 | d40c->log_num = D40_PHY_CHAN; | 2112 | d40c->log_num = D40_PHY_CHAN; |
2112 | 2113 | ||
2113 | INIT_LIST_HEAD(&d40c->free); | 2114 | INIT_LIST_HEAD(&d40c->free); |
2114 | INIT_LIST_HEAD(&d40c->active); | 2115 | INIT_LIST_HEAD(&d40c->active); |
2115 | INIT_LIST_HEAD(&d40c->queue); | 2116 | INIT_LIST_HEAD(&d40c->queue); |
2116 | INIT_LIST_HEAD(&d40c->client); | 2117 | INIT_LIST_HEAD(&d40c->client); |
2117 | 2118 | ||
2118 | d40c->free_len = 0; | 2119 | d40c->free_len = 0; |
2119 | 2120 | ||
2120 | tasklet_init(&d40c->tasklet, dma_tasklet, | 2121 | tasklet_init(&d40c->tasklet, dma_tasklet, |
2121 | (unsigned long) d40c); | 2122 | (unsigned long) d40c); |
2122 | 2123 | ||
2123 | list_add_tail(&d40c->chan.device_node, | 2124 | list_add_tail(&d40c->chan.device_node, |
2124 | &dma->channels); | 2125 | &dma->channels); |
2125 | } | 2126 | } |
2126 | } | 2127 | } |
2127 | 2128 | ||
2128 | static int __init d40_dmaengine_init(struct d40_base *base, | 2129 | static int __init d40_dmaengine_init(struct d40_base *base, |
2129 | int num_reserved_chans) | 2130 | int num_reserved_chans) |
2130 | { | 2131 | { |
2131 | int err ; | 2132 | int err ; |
2132 | 2133 | ||
2133 | d40_chan_init(base, &base->dma_slave, base->log_chans, | 2134 | d40_chan_init(base, &base->dma_slave, base->log_chans, |
2134 | 0, base->num_log_chans); | 2135 | 0, base->num_log_chans); |
2135 | 2136 | ||
2136 | dma_cap_zero(base->dma_slave.cap_mask); | 2137 | dma_cap_zero(base->dma_slave.cap_mask); |
2137 | dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask); | 2138 | dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask); |
2138 | 2139 | ||
2139 | base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources; | 2140 | base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources; |
2140 | base->dma_slave.device_free_chan_resources = d40_free_chan_resources; | 2141 | base->dma_slave.device_free_chan_resources = d40_free_chan_resources; |
2141 | base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy; | 2142 | base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy; |
2142 | base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg; | 2143 | base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg; |
2143 | base->dma_slave.device_tx_status = d40_tx_status; | 2144 | base->dma_slave.device_tx_status = d40_tx_status; |
2144 | base->dma_slave.device_issue_pending = d40_issue_pending; | 2145 | base->dma_slave.device_issue_pending = d40_issue_pending; |
2145 | base->dma_slave.device_control = d40_control; | 2146 | base->dma_slave.device_control = d40_control; |
2146 | base->dma_slave.dev = base->dev; | 2147 | base->dma_slave.dev = base->dev; |
2147 | 2148 | ||
2148 | err = dma_async_device_register(&base->dma_slave); | 2149 | err = dma_async_device_register(&base->dma_slave); |
2149 | 2150 | ||
2150 | if (err) { | 2151 | if (err) { |
2151 | dev_err(base->dev, | 2152 | dev_err(base->dev, |
2152 | "[%s] Failed to register slave channels\n", | 2153 | "[%s] Failed to register slave channels\n", |
2153 | __func__); | 2154 | __func__); |
2154 | goto failure1; | 2155 | goto failure1; |
2155 | } | 2156 | } |
2156 | 2157 | ||
2157 | d40_chan_init(base, &base->dma_memcpy, base->log_chans, | 2158 | d40_chan_init(base, &base->dma_memcpy, base->log_chans, |
2158 | base->num_log_chans, base->plat_data->memcpy_len); | 2159 | base->num_log_chans, base->plat_data->memcpy_len); |
2159 | 2160 | ||
2160 | dma_cap_zero(base->dma_memcpy.cap_mask); | 2161 | dma_cap_zero(base->dma_memcpy.cap_mask); |
2161 | dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); | 2162 | dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); |
2162 | 2163 | ||
2163 | base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources; | 2164 | base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources; |
2164 | base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources; | 2165 | base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources; |
2165 | base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy; | 2166 | base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy; |
2166 | base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg; | 2167 | base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg; |
2167 | base->dma_memcpy.device_tx_status = d40_tx_status; | 2168 | base->dma_memcpy.device_tx_status = d40_tx_status; |
2168 | base->dma_memcpy.device_issue_pending = d40_issue_pending; | 2169 | base->dma_memcpy.device_issue_pending = d40_issue_pending; |
2169 | base->dma_memcpy.device_control = d40_control; | 2170 | base->dma_memcpy.device_control = d40_control; |
2170 | base->dma_memcpy.dev = base->dev; | 2171 | base->dma_memcpy.dev = base->dev; |
2171 | /* | 2172 | /* |
2172 | * This controller can only access address at even | 2173 | * This controller can only access address at even |
2173 | * 32bit boundaries, i.e. 2^2 | 2174 | * 32bit boundaries, i.e. 2^2 |
2174 | */ | 2175 | */ |
2175 | base->dma_memcpy.copy_align = 2; | 2176 | base->dma_memcpy.copy_align = 2; |
2176 | 2177 | ||
2177 | err = dma_async_device_register(&base->dma_memcpy); | 2178 | err = dma_async_device_register(&base->dma_memcpy); |
2178 | 2179 | ||
2179 | if (err) { | 2180 | if (err) { |
2180 | dev_err(base->dev, | 2181 | dev_err(base->dev, |
2181 | "[%s] Failed to regsiter memcpy only channels\n", | 2182 | "[%s] Failed to regsiter memcpy only channels\n", |
2182 | __func__); | 2183 | __func__); |
2183 | goto failure2; | 2184 | goto failure2; |
2184 | } | 2185 | } |
2185 | 2186 | ||
2186 | d40_chan_init(base, &base->dma_both, base->phy_chans, | 2187 | d40_chan_init(base, &base->dma_both, base->phy_chans, |
2187 | 0, num_reserved_chans); | 2188 | 0, num_reserved_chans); |
2188 | 2189 | ||
2189 | dma_cap_zero(base->dma_both.cap_mask); | 2190 | dma_cap_zero(base->dma_both.cap_mask); |
2190 | dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask); | 2191 | dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask); |
2191 | dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask); | 2192 | dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask); |
2192 | 2193 | ||
2193 | base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources; | 2194 | base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources; |
2194 | base->dma_both.device_free_chan_resources = d40_free_chan_resources; | 2195 | base->dma_both.device_free_chan_resources = d40_free_chan_resources; |
2195 | base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy; | 2196 | base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy; |
2196 | base->dma_both.device_prep_slave_sg = d40_prep_slave_sg; | 2197 | base->dma_both.device_prep_slave_sg = d40_prep_slave_sg; |
2197 | base->dma_both.device_tx_status = d40_tx_status; | 2198 | base->dma_both.device_tx_status = d40_tx_status; |
2198 | base->dma_both.device_issue_pending = d40_issue_pending; | 2199 | base->dma_both.device_issue_pending = d40_issue_pending; |
2199 | base->dma_both.device_control = d40_control; | 2200 | base->dma_both.device_control = d40_control; |
2200 | base->dma_both.dev = base->dev; | 2201 | base->dma_both.dev = base->dev; |
2201 | base->dma_both.copy_align = 2; | 2202 | base->dma_both.copy_align = 2; |
2202 | err = dma_async_device_register(&base->dma_both); | 2203 | err = dma_async_device_register(&base->dma_both); |
2203 | 2204 | ||
2204 | if (err) { | 2205 | if (err) { |
2205 | dev_err(base->dev, | 2206 | dev_err(base->dev, |
2206 | "[%s] Failed to register logical and physical capable channels\n", | 2207 | "[%s] Failed to register logical and physical capable channels\n", |
2207 | __func__); | 2208 | __func__); |
2208 | goto failure3; | 2209 | goto failure3; |
2209 | } | 2210 | } |
2210 | return 0; | 2211 | return 0; |
2211 | failure3: | 2212 | failure3: |
2212 | dma_async_device_unregister(&base->dma_memcpy); | 2213 | dma_async_device_unregister(&base->dma_memcpy); |
2213 | failure2: | 2214 | failure2: |
2214 | dma_async_device_unregister(&base->dma_slave); | 2215 | dma_async_device_unregister(&base->dma_slave); |
2215 | failure1: | 2216 | failure1: |
2216 | return err; | 2217 | return err; |
2217 | } | 2218 | } |
2218 | 2219 | ||
2219 | /* Initialization functions. */ | 2220 | /* Initialization functions. */ |
2220 | 2221 | ||
2221 | static int __init d40_phy_res_init(struct d40_base *base) | 2222 | static int __init d40_phy_res_init(struct d40_base *base) |
2222 | { | 2223 | { |
2223 | int i; | 2224 | int i; |
2224 | int num_phy_chans_avail = 0; | 2225 | int num_phy_chans_avail = 0; |
2225 | u32 val[2]; | 2226 | u32 val[2]; |
2226 | int odd_even_bit = -2; | 2227 | int odd_even_bit = -2; |
2227 | 2228 | ||
2228 | val[0] = readl(base->virtbase + D40_DREG_PRSME); | 2229 | val[0] = readl(base->virtbase + D40_DREG_PRSME); |
2229 | val[1] = readl(base->virtbase + D40_DREG_PRSMO); | 2230 | val[1] = readl(base->virtbase + D40_DREG_PRSMO); |
2230 | 2231 | ||
2231 | for (i = 0; i < base->num_phy_chans; i++) { | 2232 | for (i = 0; i < base->num_phy_chans; i++) { |
2232 | base->phy_res[i].num = i; | 2233 | base->phy_res[i].num = i; |
2233 | odd_even_bit += 2 * ((i % 2) == 0); | 2234 | odd_even_bit += 2 * ((i % 2) == 0); |
2234 | if (((val[i % 2] >> odd_even_bit) & 3) == 1) { | 2235 | if (((val[i % 2] >> odd_even_bit) & 3) == 1) { |
2235 | /* Mark security only channels as occupied */ | 2236 | /* Mark security only channels as occupied */ |
2236 | base->phy_res[i].allocated_src = D40_ALLOC_PHY; | 2237 | base->phy_res[i].allocated_src = D40_ALLOC_PHY; |
2237 | base->phy_res[i].allocated_dst = D40_ALLOC_PHY; | 2238 | base->phy_res[i].allocated_dst = D40_ALLOC_PHY; |
2238 | } else { | 2239 | } else { |
2239 | base->phy_res[i].allocated_src = D40_ALLOC_FREE; | 2240 | base->phy_res[i].allocated_src = D40_ALLOC_FREE; |
2240 | base->phy_res[i].allocated_dst = D40_ALLOC_FREE; | 2241 | base->phy_res[i].allocated_dst = D40_ALLOC_FREE; |
2241 | num_phy_chans_avail++; | 2242 | num_phy_chans_avail++; |
2242 | } | 2243 | } |
2243 | spin_lock_init(&base->phy_res[i].lock); | 2244 | spin_lock_init(&base->phy_res[i].lock); |
2244 | } | 2245 | } |
2245 | dev_info(base->dev, "%d of %d physical DMA channels available\n", | 2246 | dev_info(base->dev, "%d of %d physical DMA channels available\n", |
2246 | num_phy_chans_avail, base->num_phy_chans); | 2247 | num_phy_chans_avail, base->num_phy_chans); |
2247 | 2248 | ||
2248 | /* Verify settings extended vs standard */ | 2249 | /* Verify settings extended vs standard */ |
2249 | val[0] = readl(base->virtbase + D40_DREG_PRTYP); | 2250 | val[0] = readl(base->virtbase + D40_DREG_PRTYP); |
2250 | 2251 | ||
2251 | for (i = 0; i < base->num_phy_chans; i++) { | 2252 | for (i = 0; i < base->num_phy_chans; i++) { |
2252 | 2253 | ||
2253 | if (base->phy_res[i].allocated_src == D40_ALLOC_FREE && | 2254 | if (base->phy_res[i].allocated_src == D40_ALLOC_FREE && |
2254 | (val[0] & 0x3) != 1) | 2255 | (val[0] & 0x3) != 1) |
2255 | dev_info(base->dev, | 2256 | dev_info(base->dev, |
2256 | "[%s] INFO: channel %d is misconfigured (%d)\n", | 2257 | "[%s] INFO: channel %d is misconfigured (%d)\n", |
2257 | __func__, i, val[0] & 0x3); | 2258 | __func__, i, val[0] & 0x3); |
2258 | 2259 | ||
2259 | val[0] = val[0] >> 2; | 2260 | val[0] = val[0] >> 2; |
2260 | } | 2261 | } |
2261 | 2262 | ||
2262 | return num_phy_chans_avail; | 2263 | return num_phy_chans_avail; |
2263 | } | 2264 | } |
2264 | 2265 | ||
2265 | static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | 2266 | static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) |
2266 | { | 2267 | { |
2267 | static const struct d40_reg_val dma_id_regs[] = { | 2268 | static const struct d40_reg_val dma_id_regs[] = { |
2268 | /* Peripheral Id */ | 2269 | /* Peripheral Id */ |
2269 | { .reg = D40_DREG_PERIPHID0, .val = 0x0040}, | 2270 | { .reg = D40_DREG_PERIPHID0, .val = 0x0040}, |
2270 | { .reg = D40_DREG_PERIPHID1, .val = 0x0000}, | 2271 | { .reg = D40_DREG_PERIPHID1, .val = 0x0000}, |
2271 | /* | 2272 | /* |
2272 | * D40_DREG_PERIPHID2 Depends on HW revision: | 2273 | * D40_DREG_PERIPHID2 Depends on HW revision: |
2273 | * MOP500/HREF ED has 0x0008, | 2274 | * MOP500/HREF ED has 0x0008, |
2274 | * ? has 0x0018, | 2275 | * ? has 0x0018, |
2275 | * HREF V1 has 0x0028 | 2276 | * HREF V1 has 0x0028 |
2276 | */ | 2277 | */ |
2277 | { .reg = D40_DREG_PERIPHID3, .val = 0x0000}, | 2278 | { .reg = D40_DREG_PERIPHID3, .val = 0x0000}, |
2278 | 2279 | ||
2279 | /* PCell Id */ | 2280 | /* PCell Id */ |
2280 | { .reg = D40_DREG_CELLID0, .val = 0x000d}, | 2281 | { .reg = D40_DREG_CELLID0, .val = 0x000d}, |
2281 | { .reg = D40_DREG_CELLID1, .val = 0x00f0}, | 2282 | { .reg = D40_DREG_CELLID1, .val = 0x00f0}, |
2282 | { .reg = D40_DREG_CELLID2, .val = 0x0005}, | 2283 | { .reg = D40_DREG_CELLID2, .val = 0x0005}, |
2283 | { .reg = D40_DREG_CELLID3, .val = 0x00b1} | 2284 | { .reg = D40_DREG_CELLID3, .val = 0x00b1} |
2284 | }; | 2285 | }; |
2285 | struct stedma40_platform_data *plat_data; | 2286 | struct stedma40_platform_data *plat_data; |
2286 | struct clk *clk = NULL; | 2287 | struct clk *clk = NULL; |
2287 | void __iomem *virtbase = NULL; | 2288 | void __iomem *virtbase = NULL; |
2288 | struct resource *res = NULL; | 2289 | struct resource *res = NULL; |
2289 | struct d40_base *base = NULL; | 2290 | struct d40_base *base = NULL; |
2290 | int num_log_chans = 0; | 2291 | int num_log_chans = 0; |
2291 | int num_phy_chans; | 2292 | int num_phy_chans; |
2292 | int i; | 2293 | int i; |
2293 | 2294 | ||
2294 | clk = clk_get(&pdev->dev, NULL); | 2295 | clk = clk_get(&pdev->dev, NULL); |
2295 | 2296 | ||
2296 | if (IS_ERR(clk)) { | 2297 | if (IS_ERR(clk)) { |
2297 | dev_err(&pdev->dev, "[%s] No matching clock found\n", | 2298 | dev_err(&pdev->dev, "[%s] No matching clock found\n", |
2298 | __func__); | 2299 | __func__); |
2299 | goto failure; | 2300 | goto failure; |
2300 | } | 2301 | } |
2301 | 2302 | ||
2302 | clk_enable(clk); | 2303 | clk_enable(clk); |
2303 | 2304 | ||
2304 | /* Get IO for DMAC base address */ | 2305 | /* Get IO for DMAC base address */ |
2305 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base"); | 2306 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base"); |
2306 | if (!res) | 2307 | if (!res) |
2307 | goto failure; | 2308 | goto failure; |
2308 | 2309 | ||
2309 | if (request_mem_region(res->start, resource_size(res), | 2310 | if (request_mem_region(res->start, resource_size(res), |
2310 | D40_NAME " I/O base") == NULL) | 2311 | D40_NAME " I/O base") == NULL) |
2311 | goto failure; | 2312 | goto failure; |
2312 | 2313 | ||
2313 | virtbase = ioremap(res->start, resource_size(res)); | 2314 | virtbase = ioremap(res->start, resource_size(res)); |
2314 | if (!virtbase) | 2315 | if (!virtbase) |
2315 | goto failure; | 2316 | goto failure; |
2316 | 2317 | ||
2317 | /* HW version check */ | 2318 | /* HW version check */ |
2318 | for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) { | 2319 | for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) { |
2319 | if (dma_id_regs[i].val != | 2320 | if (dma_id_regs[i].val != |
2320 | readl(virtbase + dma_id_regs[i].reg)) { | 2321 | readl(virtbase + dma_id_regs[i].reg)) { |
2321 | dev_err(&pdev->dev, | 2322 | dev_err(&pdev->dev, |
2322 | "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n", | 2323 | "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n", |
2323 | __func__, | 2324 | __func__, |
2324 | dma_id_regs[i].val, | 2325 | dma_id_regs[i].val, |
2325 | dma_id_regs[i].reg, | 2326 | dma_id_regs[i].reg, |
2326 | readl(virtbase + dma_id_regs[i].reg)); | 2327 | readl(virtbase + dma_id_regs[i].reg)); |
2327 | goto failure; | 2328 | goto failure; |
2328 | } | 2329 | } |
2329 | } | 2330 | } |
2330 | 2331 | ||
2331 | i = readl(virtbase + D40_DREG_PERIPHID2); | 2332 | i = readl(virtbase + D40_DREG_PERIPHID2); |
2332 | 2333 | ||
2333 | if ((i & 0xf) != D40_PERIPHID2_DESIGNER) { | 2334 | if ((i & 0xf) != D40_PERIPHID2_DESIGNER) { |
2334 | dev_err(&pdev->dev, | 2335 | dev_err(&pdev->dev, |
2335 | "[%s] Unknown designer! Got %x wanted %x\n", | 2336 | "[%s] Unknown designer! Got %x wanted %x\n", |
2336 | __func__, i & 0xf, D40_PERIPHID2_DESIGNER); | 2337 | __func__, i & 0xf, D40_PERIPHID2_DESIGNER); |
2337 | goto failure; | 2338 | goto failure; |
2338 | } | 2339 | } |
2339 | 2340 | ||
2340 | /* The number of physical channels on this HW */ | 2341 | /* The number of physical channels on this HW */ |
2341 | num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; | 2342 | num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; |
2342 | 2343 | ||
2343 | dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n", | 2344 | dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n", |
2344 | (i >> 4) & 0xf, res->start); | 2345 | (i >> 4) & 0xf, res->start); |
2345 | 2346 | ||
2346 | plat_data = pdev->dev.platform_data; | 2347 | plat_data = pdev->dev.platform_data; |
2347 | 2348 | ||
2348 | /* Count the number of logical channels in use */ | 2349 | /* Count the number of logical channels in use */ |
2349 | for (i = 0; i < plat_data->dev_len; i++) | 2350 | for (i = 0; i < plat_data->dev_len; i++) |
2350 | if (plat_data->dev_rx[i] != 0) | 2351 | if (plat_data->dev_rx[i] != 0) |
2351 | num_log_chans++; | 2352 | num_log_chans++; |
2352 | 2353 | ||
2353 | for (i = 0; i < plat_data->dev_len; i++) | 2354 | for (i = 0; i < plat_data->dev_len; i++) |
2354 | if (plat_data->dev_tx[i] != 0) | 2355 | if (plat_data->dev_tx[i] != 0) |
2355 | num_log_chans++; | 2356 | num_log_chans++; |
2356 | 2357 | ||
2357 | base = kzalloc(ALIGN(sizeof(struct d40_base), 4) + | 2358 | base = kzalloc(ALIGN(sizeof(struct d40_base), 4) + |
2358 | (num_phy_chans + num_log_chans + plat_data->memcpy_len) * | 2359 | (num_phy_chans + num_log_chans + plat_data->memcpy_len) * |
2359 | sizeof(struct d40_chan), GFP_KERNEL); | 2360 | sizeof(struct d40_chan), GFP_KERNEL); |
2360 | 2361 | ||
2361 | if (base == NULL) { | 2362 | if (base == NULL) { |
2362 | dev_err(&pdev->dev, "[%s] Out of memory\n", __func__); | 2363 | dev_err(&pdev->dev, "[%s] Out of memory\n", __func__); |
2363 | goto failure; | 2364 | goto failure; |
2364 | } | 2365 | } |
2365 | 2366 | ||
2366 | base->clk = clk; | 2367 | base->clk = clk; |
2367 | base->num_phy_chans = num_phy_chans; | 2368 | base->num_phy_chans = num_phy_chans; |
2368 | base->num_log_chans = num_log_chans; | 2369 | base->num_log_chans = num_log_chans; |
2369 | base->phy_start = res->start; | 2370 | base->phy_start = res->start; |
2370 | base->phy_size = resource_size(res); | 2371 | base->phy_size = resource_size(res); |
2371 | base->virtbase = virtbase; | 2372 | base->virtbase = virtbase; |
2372 | base->plat_data = plat_data; | 2373 | base->plat_data = plat_data; |
2373 | base->dev = &pdev->dev; | 2374 | base->dev = &pdev->dev; |
2374 | base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4); | 2375 | base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4); |
2375 | base->log_chans = &base->phy_chans[num_phy_chans]; | 2376 | base->log_chans = &base->phy_chans[num_phy_chans]; |
2376 | 2377 | ||
2377 | base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res), | 2378 | base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res), |
2378 | GFP_KERNEL); | 2379 | GFP_KERNEL); |
2379 | if (!base->phy_res) | 2380 | if (!base->phy_res) |
2380 | goto failure; | 2381 | goto failure; |
2381 | 2382 | ||
2382 | base->lookup_phy_chans = kzalloc(num_phy_chans * | 2383 | base->lookup_phy_chans = kzalloc(num_phy_chans * |
2383 | sizeof(struct d40_chan *), | 2384 | sizeof(struct d40_chan *), |
2384 | GFP_KERNEL); | 2385 | GFP_KERNEL); |
2385 | if (!base->lookup_phy_chans) | 2386 | if (!base->lookup_phy_chans) |
2386 | goto failure; | 2387 | goto failure; |
2387 | 2388 | ||
2388 | if (num_log_chans + plat_data->memcpy_len) { | 2389 | if (num_log_chans + plat_data->memcpy_len) { |
2389 | /* | 2390 | /* |
2390 | * The max number of logical channels are event lines for all | 2391 | * The max number of logical channels are event lines for all |
2391 | * src devices and dst devices | 2392 | * src devices and dst devices |
2392 | */ | 2393 | */ |
2393 | base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 * | 2394 | base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 * |
2394 | sizeof(struct d40_chan *), | 2395 | sizeof(struct d40_chan *), |
2395 | GFP_KERNEL); | 2396 | GFP_KERNEL); |
2396 | if (!base->lookup_log_chans) | 2397 | if (!base->lookup_log_chans) |
2397 | goto failure; | 2398 | goto failure; |
2398 | } | 2399 | } |
2399 | base->lcla_pool.alloc_map = kzalloc(num_phy_chans * sizeof(u32), | 2400 | base->lcla_pool.alloc_map = kzalloc(num_phy_chans * sizeof(u32), |
2400 | GFP_KERNEL); | 2401 | GFP_KERNEL); |
2401 | if (!base->lcla_pool.alloc_map) | 2402 | if (!base->lcla_pool.alloc_map) |
2402 | goto failure; | 2403 | goto failure; |
2403 | 2404 | ||
2404 | return base; | 2405 | return base; |
2405 | 2406 | ||
2406 | failure: | 2407 | failure: |
2407 | if (clk) { | 2408 | if (clk) { |
2408 | clk_disable(clk); | 2409 | clk_disable(clk); |
2409 | clk_put(clk); | 2410 | clk_put(clk); |
2410 | } | 2411 | } |
2411 | if (virtbase) | 2412 | if (virtbase) |
2412 | iounmap(virtbase); | 2413 | iounmap(virtbase); |
2413 | if (res) | 2414 | if (res) |
2414 | release_mem_region(res->start, | 2415 | release_mem_region(res->start, |
2415 | resource_size(res)); | 2416 | resource_size(res)); |
2416 | if (virtbase) | 2417 | if (virtbase) |
2417 | iounmap(virtbase); | 2418 | iounmap(virtbase); |
2418 | 2419 | ||
2419 | if (base) { | 2420 | if (base) { |
2420 | kfree(base->lcla_pool.alloc_map); | 2421 | kfree(base->lcla_pool.alloc_map); |
2421 | kfree(base->lookup_log_chans); | 2422 | kfree(base->lookup_log_chans); |
2422 | kfree(base->lookup_phy_chans); | 2423 | kfree(base->lookup_phy_chans); |
2423 | kfree(base->phy_res); | 2424 | kfree(base->phy_res); |
2424 | kfree(base); | 2425 | kfree(base); |
2425 | } | 2426 | } |
2426 | 2427 | ||
2427 | return NULL; | 2428 | return NULL; |
2428 | } | 2429 | } |
2429 | 2430 | ||
2430 | static void __init d40_hw_init(struct d40_base *base) | 2431 | static void __init d40_hw_init(struct d40_base *base) |
2431 | { | 2432 | { |
2432 | 2433 | ||
2433 | static const struct d40_reg_val dma_init_reg[] = { | 2434 | static const struct d40_reg_val dma_init_reg[] = { |
2434 | /* Clock every part of the DMA block from start */ | 2435 | /* Clock every part of the DMA block from start */ |
2435 | { .reg = D40_DREG_GCC, .val = 0x0000ff01}, | 2436 | { .reg = D40_DREG_GCC, .val = 0x0000ff01}, |
2436 | 2437 | ||
2437 | /* Interrupts on all logical channels */ | 2438 | /* Interrupts on all logical channels */ |
2438 | { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF}, | 2439 | { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF}, |
2439 | { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF}, | 2440 | { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF}, |
2440 | { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF}, | 2441 | { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF}, |
2441 | { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF}, | 2442 | { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF}, |
2442 | { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF}, | 2443 | { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF}, |
2443 | { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF}, | 2444 | { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF}, |
2444 | { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF}, | 2445 | { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF}, |
2445 | { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF}, | 2446 | { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF}, |
2446 | { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF}, | 2447 | { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF}, |
2447 | { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF}, | 2448 | { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF}, |
2448 | { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF}, | 2449 | { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF}, |
2449 | { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF} | 2450 | { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF} |
2450 | }; | 2451 | }; |
2451 | int i; | 2452 | int i; |
2452 | u32 prmseo[2] = {0, 0}; | 2453 | u32 prmseo[2] = {0, 0}; |
2453 | u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF}; | 2454 | u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF}; |
2454 | u32 pcmis = 0; | 2455 | u32 pcmis = 0; |
2455 | u32 pcicr = 0; | 2456 | u32 pcicr = 0; |
2456 | 2457 | ||
2457 | for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++) | 2458 | for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++) |
2458 | writel(dma_init_reg[i].val, | 2459 | writel(dma_init_reg[i].val, |
2459 | base->virtbase + dma_init_reg[i].reg); | 2460 | base->virtbase + dma_init_reg[i].reg); |
2460 | 2461 | ||
2461 | /* Configure all our dma channels to default settings */ | 2462 | /* Configure all our dma channels to default settings */ |
2462 | for (i = 0; i < base->num_phy_chans; i++) { | 2463 | for (i = 0; i < base->num_phy_chans; i++) { |
2463 | 2464 | ||
2464 | activeo[i % 2] = activeo[i % 2] << 2; | 2465 | activeo[i % 2] = activeo[i % 2] << 2; |
2465 | 2466 | ||
2466 | if (base->phy_res[base->num_phy_chans - i - 1].allocated_src | 2467 | if (base->phy_res[base->num_phy_chans - i - 1].allocated_src |
2467 | == D40_ALLOC_PHY) { | 2468 | == D40_ALLOC_PHY) { |
2468 | activeo[i % 2] |= 3; | 2469 | activeo[i % 2] |= 3; |
2469 | continue; | 2470 | continue; |
2470 | } | 2471 | } |
2471 | 2472 | ||
2472 | /* Enable interrupt # */ | 2473 | /* Enable interrupt # */ |
2473 | pcmis = (pcmis << 1) | 1; | 2474 | pcmis = (pcmis << 1) | 1; |
2474 | 2475 | ||
2475 | /* Clear interrupt # */ | 2476 | /* Clear interrupt # */ |
2476 | pcicr = (pcicr << 1) | 1; | 2477 | pcicr = (pcicr << 1) | 1; |
2477 | 2478 | ||
2478 | /* Set channel to physical mode */ | 2479 | /* Set channel to physical mode */ |
2479 | prmseo[i % 2] = prmseo[i % 2] << 2; | 2480 | prmseo[i % 2] = prmseo[i % 2] << 2; |
2480 | prmseo[i % 2] |= 1; | 2481 | prmseo[i % 2] |= 1; |
2481 | 2482 | ||
2482 | } | 2483 | } |
2483 | 2484 | ||
2484 | writel(prmseo[1], base->virtbase + D40_DREG_PRMSE); | 2485 | writel(prmseo[1], base->virtbase + D40_DREG_PRMSE); |
2485 | writel(prmseo[0], base->virtbase + D40_DREG_PRMSO); | 2486 | writel(prmseo[0], base->virtbase + D40_DREG_PRMSO); |
2486 | writel(activeo[1], base->virtbase + D40_DREG_ACTIVE); | 2487 | writel(activeo[1], base->virtbase + D40_DREG_ACTIVE); |
2487 | writel(activeo[0], base->virtbase + D40_DREG_ACTIVO); | 2488 | writel(activeo[0], base->virtbase + D40_DREG_ACTIVO); |
2488 | 2489 | ||
2489 | /* Write which interrupt to enable */ | 2490 | /* Write which interrupt to enable */ |
2490 | writel(pcmis, base->virtbase + D40_DREG_PCMIS); | 2491 | writel(pcmis, base->virtbase + D40_DREG_PCMIS); |
2491 | 2492 | ||
2492 | /* Write which interrupt to clear */ | 2493 | /* Write which interrupt to clear */ |
2493 | writel(pcicr, base->virtbase + D40_DREG_PCICR); | 2494 | writel(pcicr, base->virtbase + D40_DREG_PCICR); |
2494 | 2495 | ||
2495 | } | 2496 | } |
2496 | 2497 | ||
2497 | static int __init d40_probe(struct platform_device *pdev) | 2498 | static int __init d40_probe(struct platform_device *pdev) |
2498 | { | 2499 | { |
2499 | int err; | 2500 | int err; |
2500 | int ret = -ENOENT; | 2501 | int ret = -ENOENT; |
2501 | struct d40_base *base; | 2502 | struct d40_base *base; |
2502 | struct resource *res = NULL; | 2503 | struct resource *res = NULL; |
2503 | int num_reserved_chans; | 2504 | int num_reserved_chans; |
2504 | u32 val; | 2505 | u32 val; |
2505 | 2506 | ||
2506 | base = d40_hw_detect_init(pdev); | 2507 | base = d40_hw_detect_init(pdev); |
2507 | 2508 | ||
2508 | if (!base) | 2509 | if (!base) |
2509 | goto failure; | 2510 | goto failure; |
2510 | 2511 | ||
2511 | num_reserved_chans = d40_phy_res_init(base); | 2512 | num_reserved_chans = d40_phy_res_init(base); |
2512 | 2513 | ||
2513 | platform_set_drvdata(pdev, base); | 2514 | platform_set_drvdata(pdev, base); |
2514 | 2515 | ||
2515 | spin_lock_init(&base->interrupt_lock); | 2516 | spin_lock_init(&base->interrupt_lock); |
2516 | spin_lock_init(&base->execmd_lock); | 2517 | spin_lock_init(&base->execmd_lock); |
2517 | 2518 | ||
2518 | /* Get IO for logical channel parameter address */ | 2519 | /* Get IO for logical channel parameter address */ |
2519 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa"); | 2520 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa"); |
2520 | if (!res) { | 2521 | if (!res) { |
2521 | ret = -ENOENT; | 2522 | ret = -ENOENT; |
2522 | dev_err(&pdev->dev, | 2523 | dev_err(&pdev->dev, |
2523 | "[%s] No \"lcpa\" memory resource\n", | 2524 | "[%s] No \"lcpa\" memory resource\n", |
2524 | __func__); | 2525 | __func__); |
2525 | goto failure; | 2526 | goto failure; |
2526 | } | 2527 | } |
2527 | base->lcpa_size = resource_size(res); | 2528 | base->lcpa_size = resource_size(res); |
2528 | base->phy_lcpa = res->start; | 2529 | base->phy_lcpa = res->start; |
2529 | 2530 | ||
2530 | if (request_mem_region(res->start, resource_size(res), | 2531 | if (request_mem_region(res->start, resource_size(res), |
2531 | D40_NAME " I/O lcpa") == NULL) { | 2532 | D40_NAME " I/O lcpa") == NULL) { |
2532 | ret = -EBUSY; | 2533 | ret = -EBUSY; |
2533 | dev_err(&pdev->dev, | 2534 | dev_err(&pdev->dev, |
2534 | "[%s] Failed to request LCPA region 0x%x-0x%x\n", | 2535 | "[%s] Failed to request LCPA region 0x%x-0x%x\n", |
2535 | __func__, res->start, res->end); | 2536 | __func__, res->start, res->end); |
2536 | goto failure; | 2537 | goto failure; |
2537 | } | 2538 | } |
2538 | 2539 | ||
2539 | /* We make use of ESRAM memory for this. */ | 2540 | /* We make use of ESRAM memory for this. */ |
2540 | val = readl(base->virtbase + D40_DREG_LCPA); | 2541 | val = readl(base->virtbase + D40_DREG_LCPA); |
2541 | if (res->start != val && val != 0) { | 2542 | if (res->start != val && val != 0) { |
2542 | dev_warn(&pdev->dev, | 2543 | dev_warn(&pdev->dev, |
2543 | "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n", | 2544 | "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n", |
2544 | __func__, val, res->start); | 2545 | __func__, val, res->start); |
2545 | } else | 2546 | } else |
2546 | writel(res->start, base->virtbase + D40_DREG_LCPA); | 2547 | writel(res->start, base->virtbase + D40_DREG_LCPA); |
2547 | 2548 | ||
2548 | base->lcpa_base = ioremap(res->start, resource_size(res)); | 2549 | base->lcpa_base = ioremap(res->start, resource_size(res)); |
2549 | if (!base->lcpa_base) { | 2550 | if (!base->lcpa_base) { |
2550 | ret = -ENOMEM; | 2551 | ret = -ENOMEM; |
2551 | dev_err(&pdev->dev, | 2552 | dev_err(&pdev->dev, |
2552 | "[%s] Failed to ioremap LCPA region\n", | 2553 | "[%s] Failed to ioremap LCPA region\n", |
2553 | __func__); | 2554 | __func__); |
2554 | goto failure; | 2555 | goto failure; |
2555 | } | 2556 | } |
2556 | /* Get IO for logical channel link address */ | 2557 | /* Get IO for logical channel link address */ |
2557 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcla"); | 2558 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcla"); |
2558 | if (!res) { | 2559 | if (!res) { |
2559 | ret = -ENOENT; | 2560 | ret = -ENOENT; |
2560 | dev_err(&pdev->dev, | 2561 | dev_err(&pdev->dev, |
2561 | "[%s] No \"lcla\" resource defined\n", | 2562 | "[%s] No \"lcla\" resource defined\n", |
2562 | __func__); | 2563 | __func__); |
2563 | goto failure; | 2564 | goto failure; |
2564 | } | 2565 | } |
2565 | 2566 | ||
2566 | base->lcla_pool.base_size = resource_size(res); | 2567 | base->lcla_pool.base_size = resource_size(res); |
2567 | base->lcla_pool.phy = res->start; | 2568 | base->lcla_pool.phy = res->start; |
2568 | 2569 | ||
2569 | if (request_mem_region(res->start, resource_size(res), | 2570 | if (request_mem_region(res->start, resource_size(res), |
2570 | D40_NAME " I/O lcla") == NULL) { | 2571 | D40_NAME " I/O lcla") == NULL) { |
2571 | ret = -EBUSY; | 2572 | ret = -EBUSY; |
2572 | dev_err(&pdev->dev, | 2573 | dev_err(&pdev->dev, |
2573 | "[%s] Failed to request LCLA region 0x%x-0x%x\n", | 2574 | "[%s] Failed to request LCLA region 0x%x-0x%x\n", |
2574 | __func__, res->start, res->end); | 2575 | __func__, res->start, res->end); |
2575 | goto failure; | 2576 | goto failure; |
2576 | } | 2577 | } |
2577 | val = readl(base->virtbase + D40_DREG_LCLA); | 2578 | val = readl(base->virtbase + D40_DREG_LCLA); |
2578 | if (res->start != val && val != 0) { | 2579 | if (res->start != val && val != 0) { |
2579 | dev_warn(&pdev->dev, | 2580 | dev_warn(&pdev->dev, |
2580 | "[%s] Mismatch LCLA dma 0x%x, def 0x%x\n", | 2581 | "[%s] Mismatch LCLA dma 0x%x, def 0x%x\n", |
2581 | __func__, val, res->start); | 2582 | __func__, val, res->start); |
2582 | } else | 2583 | } else |
2583 | writel(res->start, base->virtbase + D40_DREG_LCLA); | 2584 | writel(res->start, base->virtbase + D40_DREG_LCLA); |
2584 | 2585 | ||
2585 | base->lcla_pool.base = ioremap(res->start, resource_size(res)); | 2586 | base->lcla_pool.base = ioremap(res->start, resource_size(res)); |
2586 | if (!base->lcla_pool.base) { | 2587 | if (!base->lcla_pool.base) { |
2587 | ret = -ENOMEM; | 2588 | ret = -ENOMEM; |
2588 | dev_err(&pdev->dev, | 2589 | dev_err(&pdev->dev, |
2589 | "[%s] Failed to ioremap LCLA 0x%x-0x%x\n", | 2590 | "[%s] Failed to ioremap LCLA 0x%x-0x%x\n", |
2590 | __func__, res->start, res->end); | 2591 | __func__, res->start, res->end); |
2591 | goto failure; | 2592 | goto failure; |
2592 | } | 2593 | } |
2593 | 2594 | ||
2594 | spin_lock_init(&base->lcla_pool.lock); | 2595 | spin_lock_init(&base->lcla_pool.lock); |
2595 | 2596 | ||
2596 | base->lcla_pool.num_blocks = base->num_phy_chans; | 2597 | base->lcla_pool.num_blocks = base->num_phy_chans; |
2597 | 2598 | ||
2598 | base->irq = platform_get_irq(pdev, 0); | 2599 | base->irq = platform_get_irq(pdev, 0); |
2599 | 2600 | ||
2600 | ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base); | 2601 | ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base); |
2601 | 2602 | ||
2602 | if (ret) { | 2603 | if (ret) { |
2603 | dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__); | 2604 | dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__); |
2604 | goto failure; | 2605 | goto failure; |
2605 | } | 2606 | } |
2606 | 2607 | ||
2607 | err = d40_dmaengine_init(base, num_reserved_chans); | 2608 | err = d40_dmaengine_init(base, num_reserved_chans); |
2608 | if (err) | 2609 | if (err) |
2609 | goto failure; | 2610 | goto failure; |
2610 | 2611 | ||
2611 | d40_hw_init(base); | 2612 | d40_hw_init(base); |
2612 | 2613 | ||
2613 | dev_info(base->dev, "initialized\n"); | 2614 | dev_info(base->dev, "initialized\n"); |
2614 | return 0; | 2615 | return 0; |
2615 | 2616 | ||
2616 | failure: | 2617 | failure: |
2617 | if (base) { | 2618 | if (base) { |
2618 | if (base->virtbase) | 2619 | if (base->virtbase) |
2619 | iounmap(base->virtbase); | 2620 | iounmap(base->virtbase); |
2620 | if (base->lcla_pool.phy) | 2621 | if (base->lcla_pool.phy) |
2621 | release_mem_region(base->lcla_pool.phy, | 2622 | release_mem_region(base->lcla_pool.phy, |
2622 | base->lcla_pool.base_size); | 2623 | base->lcla_pool.base_size); |
2623 | if (base->phy_lcpa) | 2624 | if (base->phy_lcpa) |
2624 | release_mem_region(base->phy_lcpa, | 2625 | release_mem_region(base->phy_lcpa, |
2625 | base->lcpa_size); | 2626 | base->lcpa_size); |
2626 | if (base->phy_start) | 2627 | if (base->phy_start) |
2627 | release_mem_region(base->phy_start, | 2628 | release_mem_region(base->phy_start, |
2628 | base->phy_size); | 2629 | base->phy_size); |
2629 | if (base->clk) { | 2630 | if (base->clk) { |
2630 | clk_disable(base->clk); | 2631 | clk_disable(base->clk); |
2631 | clk_put(base->clk); | 2632 | clk_put(base->clk); |
2632 | } | 2633 | } |
2633 | 2634 | ||
2634 | kfree(base->lcla_pool.alloc_map); | 2635 | kfree(base->lcla_pool.alloc_map); |
2635 | kfree(base->lookup_log_chans); | 2636 | kfree(base->lookup_log_chans); |
2636 | kfree(base->lookup_phy_chans); | 2637 | kfree(base->lookup_phy_chans); |
2637 | kfree(base->phy_res); | 2638 | kfree(base->phy_res); |
2638 | kfree(base); | 2639 | kfree(base); |
2639 | } | 2640 | } |
2640 | 2641 | ||
2641 | dev_err(&pdev->dev, "[%s] probe failed\n", __func__); | 2642 | dev_err(&pdev->dev, "[%s] probe failed\n", __func__); |
2642 | return ret; | 2643 | return ret; |
2643 | } | 2644 | } |
2644 | 2645 | ||
2645 | static struct platform_driver d40_driver = { | 2646 | static struct platform_driver d40_driver = { |
2646 | .driver = { | 2647 | .driver = { |
2647 | .owner = THIS_MODULE, | 2648 | .owner = THIS_MODULE, |
2648 | .name = D40_NAME, | 2649 | .name = D40_NAME, |
2649 | }, | 2650 | }, |
2650 | }; | 2651 | }; |
2651 | 2652 | ||
2652 | int __init stedma40_init(void) | 2653 | int __init stedma40_init(void) |
2653 | { | 2654 | { |
2654 | return platform_driver_probe(&d40_driver, d40_probe); | 2655 | return platform_driver_probe(&d40_driver, d40_probe); |
2655 | } | 2656 | } |
2656 | arch_initcall(stedma40_init); | 2657 | arch_initcall(stedma40_init); |
2657 | 2658 |
drivers/dma/timb_dma.c
1 | /* | 1 | /* |
2 | * timb_dma.c timberdale FPGA DMA driver | 2 | * timb_dma.c timberdale FPGA DMA driver |
3 | * Copyright (c) 2010 Intel Corporation | 3 | * Copyright (c) 2010 Intel Corporation |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License version 2 as | 6 | * it under the terms of the GNU General Public License version 2 as |
7 | * published by the Free Software Foundation. | 7 | * published by the Free Software Foundation. |
8 | * | 8 | * |
9 | * This program is distributed in the hope that it will be useful, | 9 | * This program is distributed in the hope that it will be useful, |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
12 | * GNU General Public License for more details. | 12 | * GNU General Public License for more details. |
13 | * | 13 | * |
14 | * You should have received a copy of the GNU General Public License | 14 | * You should have received a copy of the GNU General Public License |
15 | * along with this program; if not, write to the Free Software | 15 | * along with this program; if not, write to the Free Software |
16 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 16 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | /* Supports: | 19 | /* Supports: |
20 | * Timberdale FPGA DMA engine | 20 | * Timberdale FPGA DMA engine |
21 | */ | 21 | */ |
22 | 22 | ||
23 | #include <linux/dmaengine.h> | 23 | #include <linux/dmaengine.h> |
24 | #include <linux/dma-mapping.h> | 24 | #include <linux/dma-mapping.h> |
25 | #include <linux/init.h> | 25 | #include <linux/init.h> |
26 | #include <linux/interrupt.h> | 26 | #include <linux/interrupt.h> |
27 | #include <linux/io.h> | 27 | #include <linux/io.h> |
28 | #include <linux/module.h> | 28 | #include <linux/module.h> |
29 | #include <linux/platform_device.h> | 29 | #include <linux/platform_device.h> |
30 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
31 | 31 | ||
32 | #include <linux/timb_dma.h> | 32 | #include <linux/timb_dma.h> |
33 | 33 | ||
34 | #define DRIVER_NAME "timb-dma" | 34 | #define DRIVER_NAME "timb-dma" |
35 | 35 | ||
36 | /* Global DMA registers */ | 36 | /* Global DMA registers */ |
37 | #define TIMBDMA_ACR 0x34 | 37 | #define TIMBDMA_ACR 0x34 |
38 | #define TIMBDMA_32BIT_ADDR 0x01 | 38 | #define TIMBDMA_32BIT_ADDR 0x01 |
39 | 39 | ||
40 | #define TIMBDMA_ISR 0x080000 | 40 | #define TIMBDMA_ISR 0x080000 |
41 | #define TIMBDMA_IPR 0x080004 | 41 | #define TIMBDMA_IPR 0x080004 |
42 | #define TIMBDMA_IER 0x080008 | 42 | #define TIMBDMA_IER 0x080008 |
43 | 43 | ||
44 | /* Channel specific registers */ | 44 | /* Channel specific registers */ |
45 | /* RX instances base addresses are 0x00, 0x40, 0x80 ... | 45 | /* RX instances base addresses are 0x00, 0x40, 0x80 ... |
46 | * TX instances base addresses are 0x18, 0x58, 0x98 ... | 46 | * TX instances base addresses are 0x18, 0x58, 0x98 ... |
47 | */ | 47 | */ |
48 | #define TIMBDMA_INSTANCE_OFFSET 0x40 | 48 | #define TIMBDMA_INSTANCE_OFFSET 0x40 |
49 | #define TIMBDMA_INSTANCE_TX_OFFSET 0x18 | 49 | #define TIMBDMA_INSTANCE_TX_OFFSET 0x18 |
50 | 50 | ||
51 | /* RX registers, relative the instance base */ | 51 | /* RX registers, relative the instance base */ |
52 | #define TIMBDMA_OFFS_RX_DHAR 0x00 | 52 | #define TIMBDMA_OFFS_RX_DHAR 0x00 |
53 | #define TIMBDMA_OFFS_RX_DLAR 0x04 | 53 | #define TIMBDMA_OFFS_RX_DLAR 0x04 |
54 | #define TIMBDMA_OFFS_RX_LR 0x0C | 54 | #define TIMBDMA_OFFS_RX_LR 0x0C |
55 | #define TIMBDMA_OFFS_RX_BLR 0x10 | 55 | #define TIMBDMA_OFFS_RX_BLR 0x10 |
56 | #define TIMBDMA_OFFS_RX_ER 0x14 | 56 | #define TIMBDMA_OFFS_RX_ER 0x14 |
57 | #define TIMBDMA_RX_EN 0x01 | 57 | #define TIMBDMA_RX_EN 0x01 |
58 | /* bytes per Row, video specific register | 58 | /* bytes per Row, video specific register |
59 | * which is placed after the TX registers... | 59 | * which is placed after the TX registers... |
60 | */ | 60 | */ |
61 | #define TIMBDMA_OFFS_RX_BPRR 0x30 | 61 | #define TIMBDMA_OFFS_RX_BPRR 0x30 |
62 | 62 | ||
63 | /* TX registers, relative the instance base */ | 63 | /* TX registers, relative the instance base */ |
64 | #define TIMBDMA_OFFS_TX_DHAR 0x00 | 64 | #define TIMBDMA_OFFS_TX_DHAR 0x00 |
65 | #define TIMBDMA_OFFS_TX_DLAR 0x04 | 65 | #define TIMBDMA_OFFS_TX_DLAR 0x04 |
66 | #define TIMBDMA_OFFS_TX_BLR 0x0C | 66 | #define TIMBDMA_OFFS_TX_BLR 0x0C |
67 | #define TIMBDMA_OFFS_TX_LR 0x14 | 67 | #define TIMBDMA_OFFS_TX_LR 0x14 |
68 | 68 | ||
69 | 69 | ||
70 | #define TIMB_DMA_DESC_SIZE 8 | 70 | #define TIMB_DMA_DESC_SIZE 8 |
71 | 71 | ||
72 | struct timb_dma_desc { | 72 | struct timb_dma_desc { |
73 | struct list_head desc_node; | 73 | struct list_head desc_node; |
74 | struct dma_async_tx_descriptor txd; | 74 | struct dma_async_tx_descriptor txd; |
75 | u8 *desc_list; | 75 | u8 *desc_list; |
76 | unsigned int desc_list_len; | 76 | unsigned int desc_list_len; |
77 | bool interrupt; | 77 | bool interrupt; |
78 | }; | 78 | }; |
79 | 79 | ||
80 | struct timb_dma_chan { | 80 | struct timb_dma_chan { |
81 | struct dma_chan chan; | 81 | struct dma_chan chan; |
82 | void __iomem *membase; | 82 | void __iomem *membase; |
83 | spinlock_t lock; /* Used to protect data structures, | 83 | spinlock_t lock; /* Used to protect data structures, |
84 | especially the lists and descriptors, | 84 | especially the lists and descriptors, |
85 | from races between the tasklet and calls | 85 | from races between the tasklet and calls |
86 | from above */ | 86 | from above */ |
87 | dma_cookie_t last_completed_cookie; | 87 | dma_cookie_t last_completed_cookie; |
88 | bool ongoing; | 88 | bool ongoing; |
89 | struct list_head active_list; | 89 | struct list_head active_list; |
90 | struct list_head queue; | 90 | struct list_head queue; |
91 | struct list_head free_list; | 91 | struct list_head free_list; |
92 | unsigned int bytes_per_line; | 92 | unsigned int bytes_per_line; |
93 | enum dma_data_direction direction; | 93 | enum dma_data_direction direction; |
94 | unsigned int descs; /* Descriptors to allocate */ | 94 | unsigned int descs; /* Descriptors to allocate */ |
95 | unsigned int desc_elems; /* number of elems per descriptor */ | 95 | unsigned int desc_elems; /* number of elems per descriptor */ |
96 | }; | 96 | }; |
97 | 97 | ||
98 | struct timb_dma { | 98 | struct timb_dma { |
99 | struct dma_device dma; | 99 | struct dma_device dma; |
100 | void __iomem *membase; | 100 | void __iomem *membase; |
101 | struct tasklet_struct tasklet; | 101 | struct tasklet_struct tasklet; |
102 | struct timb_dma_chan channels[0]; | 102 | struct timb_dma_chan channels[0]; |
103 | }; | 103 | }; |
104 | 104 | ||
105 | static struct device *chan2dev(struct dma_chan *chan) | 105 | static struct device *chan2dev(struct dma_chan *chan) |
106 | { | 106 | { |
107 | return &chan->dev->device; | 107 | return &chan->dev->device; |
108 | } | 108 | } |
109 | static struct device *chan2dmadev(struct dma_chan *chan) | 109 | static struct device *chan2dmadev(struct dma_chan *chan) |
110 | { | 110 | { |
111 | return chan2dev(chan)->parent->parent; | 111 | return chan2dev(chan)->parent->parent; |
112 | } | 112 | } |
113 | 113 | ||
114 | static struct timb_dma *tdchantotd(struct timb_dma_chan *td_chan) | 114 | static struct timb_dma *tdchantotd(struct timb_dma_chan *td_chan) |
115 | { | 115 | { |
116 | int id = td_chan->chan.chan_id; | 116 | int id = td_chan->chan.chan_id; |
117 | return (struct timb_dma *)((u8 *)td_chan - | 117 | return (struct timb_dma *)((u8 *)td_chan - |
118 | id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma)); | 118 | id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma)); |
119 | } | 119 | } |
120 | 120 | ||
121 | /* Must be called with the spinlock held */ | 121 | /* Must be called with the spinlock held */ |
122 | static void __td_enable_chan_irq(struct timb_dma_chan *td_chan) | 122 | static void __td_enable_chan_irq(struct timb_dma_chan *td_chan) |
123 | { | 123 | { |
124 | int id = td_chan->chan.chan_id; | 124 | int id = td_chan->chan.chan_id; |
125 | struct timb_dma *td = tdchantotd(td_chan); | 125 | struct timb_dma *td = tdchantotd(td_chan); |
126 | u32 ier; | 126 | u32 ier; |
127 | 127 | ||
128 | /* enable interrupt for this channel */ | 128 | /* enable interrupt for this channel */ |
129 | ier = ioread32(td->membase + TIMBDMA_IER); | 129 | ier = ioread32(td->membase + TIMBDMA_IER); |
130 | ier |= 1 << id; | 130 | ier |= 1 << id; |
131 | dev_dbg(chan2dev(&td_chan->chan), "Enabling irq: %d, IER: 0x%x\n", id, | 131 | dev_dbg(chan2dev(&td_chan->chan), "Enabling irq: %d, IER: 0x%x\n", id, |
132 | ier); | 132 | ier); |
133 | iowrite32(ier, td->membase + TIMBDMA_IER); | 133 | iowrite32(ier, td->membase + TIMBDMA_IER); |
134 | } | 134 | } |
135 | 135 | ||
136 | /* Should be called with the spinlock held */ | 136 | /* Should be called with the spinlock held */ |
137 | static bool __td_dma_done_ack(struct timb_dma_chan *td_chan) | 137 | static bool __td_dma_done_ack(struct timb_dma_chan *td_chan) |
138 | { | 138 | { |
139 | int id = td_chan->chan.chan_id; | 139 | int id = td_chan->chan.chan_id; |
140 | struct timb_dma *td = (struct timb_dma *)((u8 *)td_chan - | 140 | struct timb_dma *td = (struct timb_dma *)((u8 *)td_chan - |
141 | id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma)); | 141 | id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma)); |
142 | u32 isr; | 142 | u32 isr; |
143 | bool done = false; | 143 | bool done = false; |
144 | 144 | ||
145 | dev_dbg(chan2dev(&td_chan->chan), "Checking irq: %d, td: %p\n", id, td); | 145 | dev_dbg(chan2dev(&td_chan->chan), "Checking irq: %d, td: %p\n", id, td); |
146 | 146 | ||
147 | isr = ioread32(td->membase + TIMBDMA_ISR) & (1 << id); | 147 | isr = ioread32(td->membase + TIMBDMA_ISR) & (1 << id); |
148 | if (isr) { | 148 | if (isr) { |
149 | iowrite32(isr, td->membase + TIMBDMA_ISR); | 149 | iowrite32(isr, td->membase + TIMBDMA_ISR); |
150 | done = true; | 150 | done = true; |
151 | } | 151 | } |
152 | 152 | ||
153 | return done; | 153 | return done; |
154 | } | 154 | } |
155 | 155 | ||
156 | static void __td_unmap_desc(struct timb_dma_chan *td_chan, const u8 *dma_desc, | 156 | static void __td_unmap_desc(struct timb_dma_chan *td_chan, const u8 *dma_desc, |
157 | bool single) | 157 | bool single) |
158 | { | 158 | { |
159 | dma_addr_t addr; | 159 | dma_addr_t addr; |
160 | int len; | 160 | int len; |
161 | 161 | ||
162 | addr = (dma_desc[7] << 24) | (dma_desc[6] << 16) | (dma_desc[5] << 8) | | 162 | addr = (dma_desc[7] << 24) | (dma_desc[6] << 16) | (dma_desc[5] << 8) | |
163 | dma_desc[4]; | 163 | dma_desc[4]; |
164 | 164 | ||
165 | len = (dma_desc[3] << 8) | dma_desc[2]; | 165 | len = (dma_desc[3] << 8) | dma_desc[2]; |
166 | 166 | ||
167 | if (single) | 167 | if (single) |
168 | dma_unmap_single(chan2dev(&td_chan->chan), addr, len, | 168 | dma_unmap_single(chan2dev(&td_chan->chan), addr, len, |
169 | td_chan->direction); | 169 | td_chan->direction); |
170 | else | 170 | else |
171 | dma_unmap_page(chan2dev(&td_chan->chan), addr, len, | 171 | dma_unmap_page(chan2dev(&td_chan->chan), addr, len, |
172 | td_chan->direction); | 172 | td_chan->direction); |
173 | } | 173 | } |
174 | 174 | ||
175 | static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single) | 175 | static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single) |
176 | { | 176 | { |
177 | struct timb_dma_chan *td_chan = container_of(td_desc->txd.chan, | 177 | struct timb_dma_chan *td_chan = container_of(td_desc->txd.chan, |
178 | struct timb_dma_chan, chan); | 178 | struct timb_dma_chan, chan); |
179 | u8 *descs; | 179 | u8 *descs; |
180 | 180 | ||
181 | for (descs = td_desc->desc_list; ; descs += TIMB_DMA_DESC_SIZE) { | 181 | for (descs = td_desc->desc_list; ; descs += TIMB_DMA_DESC_SIZE) { |
182 | __td_unmap_desc(td_chan, descs, single); | 182 | __td_unmap_desc(td_chan, descs, single); |
183 | if (descs[0] & 0x02) | 183 | if (descs[0] & 0x02) |
184 | break; | 184 | break; |
185 | } | 185 | } |
186 | } | 186 | } |
187 | 187 | ||
188 | static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc, | 188 | static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc, |
189 | struct scatterlist *sg, bool last) | 189 | struct scatterlist *sg, bool last) |
190 | { | 190 | { |
191 | if (sg_dma_len(sg) > USHORT_MAX) { | 191 | if (sg_dma_len(sg) > USHORT_MAX) { |
192 | dev_err(chan2dev(&td_chan->chan), "Too big sg element\n"); | 192 | dev_err(chan2dev(&td_chan->chan), "Too big sg element\n"); |
193 | return -EINVAL; | 193 | return -EINVAL; |
194 | } | 194 | } |
195 | 195 | ||
196 | /* length must be word aligned */ | 196 | /* length must be word aligned */ |
197 | if (sg_dma_len(sg) % sizeof(u32)) { | 197 | if (sg_dma_len(sg) % sizeof(u32)) { |
198 | dev_err(chan2dev(&td_chan->chan), "Incorrect length: %d\n", | 198 | dev_err(chan2dev(&td_chan->chan), "Incorrect length: %d\n", |
199 | sg_dma_len(sg)); | 199 | sg_dma_len(sg)); |
200 | return -EINVAL; | 200 | return -EINVAL; |
201 | } | 201 | } |
202 | 202 | ||
203 | dev_dbg(chan2dev(&td_chan->chan), "desc: %p, addr: %p\n", | 203 | dev_dbg(chan2dev(&td_chan->chan), "desc: %p, addr: %p\n", |
204 | dma_desc, (void *)sg_dma_address(sg)); | 204 | dma_desc, (void *)sg_dma_address(sg)); |
205 | 205 | ||
206 | dma_desc[7] = (sg_dma_address(sg) >> 24) & 0xff; | 206 | dma_desc[7] = (sg_dma_address(sg) >> 24) & 0xff; |
207 | dma_desc[6] = (sg_dma_address(sg) >> 16) & 0xff; | 207 | dma_desc[6] = (sg_dma_address(sg) >> 16) & 0xff; |
208 | dma_desc[5] = (sg_dma_address(sg) >> 8) & 0xff; | 208 | dma_desc[5] = (sg_dma_address(sg) >> 8) & 0xff; |
209 | dma_desc[4] = (sg_dma_address(sg) >> 0) & 0xff; | 209 | dma_desc[4] = (sg_dma_address(sg) >> 0) & 0xff; |
210 | 210 | ||
211 | dma_desc[3] = (sg_dma_len(sg) >> 8) & 0xff; | 211 | dma_desc[3] = (sg_dma_len(sg) >> 8) & 0xff; |
212 | dma_desc[2] = (sg_dma_len(sg) >> 0) & 0xff; | 212 | dma_desc[2] = (sg_dma_len(sg) >> 0) & 0xff; |
213 | 213 | ||
214 | dma_desc[1] = 0x00; | 214 | dma_desc[1] = 0x00; |
215 | dma_desc[0] = 0x21 | (last ? 0x02 : 0); /* tran, valid */ | 215 | dma_desc[0] = 0x21 | (last ? 0x02 : 0); /* tran, valid */ |
216 | 216 | ||
217 | return 0; | 217 | return 0; |
218 | } | 218 | } |
219 | 219 | ||
220 | /* Must be called with the spinlock held */ | 220 | /* Must be called with the spinlock held */ |
221 | static void __td_start_dma(struct timb_dma_chan *td_chan) | 221 | static void __td_start_dma(struct timb_dma_chan *td_chan) |
222 | { | 222 | { |
223 | struct timb_dma_desc *td_desc; | 223 | struct timb_dma_desc *td_desc; |
224 | 224 | ||
225 | if (td_chan->ongoing) { | 225 | if (td_chan->ongoing) { |
226 | dev_err(chan2dev(&td_chan->chan), | 226 | dev_err(chan2dev(&td_chan->chan), |
227 | "Transfer already ongoing\n"); | 227 | "Transfer already ongoing\n"); |
228 | return; | 228 | return; |
229 | } | 229 | } |
230 | 230 | ||
231 | td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc, | 231 | td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc, |
232 | desc_node); | 232 | desc_node); |
233 | 233 | ||
234 | dev_dbg(chan2dev(&td_chan->chan), | 234 | dev_dbg(chan2dev(&td_chan->chan), |
235 | "td_chan: %p, chan: %d, membase: %p\n", | 235 | "td_chan: %p, chan: %d, membase: %p\n", |
236 | td_chan, td_chan->chan.chan_id, td_chan->membase); | 236 | td_chan, td_chan->chan.chan_id, td_chan->membase); |
237 | 237 | ||
238 | if (td_chan->direction == DMA_FROM_DEVICE) { | 238 | if (td_chan->direction == DMA_FROM_DEVICE) { |
239 | 239 | ||
240 | /* descriptor address */ | 240 | /* descriptor address */ |
241 | iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR); | 241 | iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR); |
242 | iowrite32(td_desc->txd.phys, td_chan->membase + | 242 | iowrite32(td_desc->txd.phys, td_chan->membase + |
243 | TIMBDMA_OFFS_RX_DLAR); | 243 | TIMBDMA_OFFS_RX_DLAR); |
244 | /* Bytes per line */ | 244 | /* Bytes per line */ |
245 | iowrite32(td_chan->bytes_per_line, td_chan->membase + | 245 | iowrite32(td_chan->bytes_per_line, td_chan->membase + |
246 | TIMBDMA_OFFS_RX_BPRR); | 246 | TIMBDMA_OFFS_RX_BPRR); |
247 | /* enable RX */ | 247 | /* enable RX */ |
248 | iowrite32(TIMBDMA_RX_EN, td_chan->membase + TIMBDMA_OFFS_RX_ER); | 248 | iowrite32(TIMBDMA_RX_EN, td_chan->membase + TIMBDMA_OFFS_RX_ER); |
249 | } else { | 249 | } else { |
250 | /* address high */ | 250 | /* address high */ |
251 | iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DHAR); | 251 | iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DHAR); |
252 | iowrite32(td_desc->txd.phys, td_chan->membase + | 252 | iowrite32(td_desc->txd.phys, td_chan->membase + |
253 | TIMBDMA_OFFS_TX_DLAR); | 253 | TIMBDMA_OFFS_TX_DLAR); |
254 | } | 254 | } |
255 | 255 | ||
256 | td_chan->ongoing = true; | 256 | td_chan->ongoing = true; |
257 | 257 | ||
258 | if (td_desc->interrupt) | 258 | if (td_desc->interrupt) |
259 | __td_enable_chan_irq(td_chan); | 259 | __td_enable_chan_irq(td_chan); |
260 | } | 260 | } |
261 | 261 | ||
262 | static void __td_finish(struct timb_dma_chan *td_chan) | 262 | static void __td_finish(struct timb_dma_chan *td_chan) |
263 | { | 263 | { |
264 | dma_async_tx_callback callback; | 264 | dma_async_tx_callback callback; |
265 | void *param; | 265 | void *param; |
266 | struct dma_async_tx_descriptor *txd; | 266 | struct dma_async_tx_descriptor *txd; |
267 | struct timb_dma_desc *td_desc; | 267 | struct timb_dma_desc *td_desc; |
268 | 268 | ||
269 | /* can happen if the descriptor is canceled */ | 269 | /* can happen if the descriptor is canceled */ |
270 | if (list_empty(&td_chan->active_list)) | 270 | if (list_empty(&td_chan->active_list)) |
271 | return; | 271 | return; |
272 | 272 | ||
273 | td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc, | 273 | td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc, |
274 | desc_node); | 274 | desc_node); |
275 | txd = &td_desc->txd; | 275 | txd = &td_desc->txd; |
276 | 276 | ||
277 | dev_dbg(chan2dev(&td_chan->chan), "descriptor %u complete\n", | 277 | dev_dbg(chan2dev(&td_chan->chan), "descriptor %u complete\n", |
278 | txd->cookie); | 278 | txd->cookie); |
279 | 279 | ||
280 | /* make sure to stop the transfer */ | 280 | /* make sure to stop the transfer */ |
281 | if (td_chan->direction == DMA_FROM_DEVICE) | 281 | if (td_chan->direction == DMA_FROM_DEVICE) |
282 | iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER); | 282 | iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER); |
283 | /* Currently no support for stopping DMA transfers | 283 | /* Currently no support for stopping DMA transfers |
284 | else | 284 | else |
285 | iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DLAR); | 285 | iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DLAR); |
286 | */ | 286 | */ |
287 | td_chan->last_completed_cookie = txd->cookie; | 287 | td_chan->last_completed_cookie = txd->cookie; |
288 | td_chan->ongoing = false; | 288 | td_chan->ongoing = false; |
289 | 289 | ||
290 | callback = txd->callback; | 290 | callback = txd->callback; |
291 | param = txd->callback_param; | 291 | param = txd->callback_param; |
292 | 292 | ||
293 | list_move(&td_desc->desc_node, &td_chan->free_list); | 293 | list_move(&td_desc->desc_node, &td_chan->free_list); |
294 | 294 | ||
295 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) | 295 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) |
296 | __td_unmap_descs(td_desc, | 296 | __td_unmap_descs(td_desc, |
297 | txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE); | 297 | txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE); |
298 | 298 | ||
299 | /* | 299 | /* |
300 | * The API requires that no submissions are done from a | 300 | * The API requires that no submissions are done from a |
301 | * callback, so we don't need to drop the lock here | 301 | * callback, so we don't need to drop the lock here |
302 | */ | 302 | */ |
303 | if (callback) | 303 | if (callback) |
304 | callback(param); | 304 | callback(param); |
305 | } | 305 | } |
306 | 306 | ||
307 | static u32 __td_ier_mask(struct timb_dma *td) | 307 | static u32 __td_ier_mask(struct timb_dma *td) |
308 | { | 308 | { |
309 | int i; | 309 | int i; |
310 | u32 ret = 0; | 310 | u32 ret = 0; |
311 | 311 | ||
312 | for (i = 0; i < td->dma.chancnt; i++) { | 312 | for (i = 0; i < td->dma.chancnt; i++) { |
313 | struct timb_dma_chan *td_chan = td->channels + i; | 313 | struct timb_dma_chan *td_chan = td->channels + i; |
314 | if (td_chan->ongoing) { | 314 | if (td_chan->ongoing) { |
315 | struct timb_dma_desc *td_desc = | 315 | struct timb_dma_desc *td_desc = |
316 | list_entry(td_chan->active_list.next, | 316 | list_entry(td_chan->active_list.next, |
317 | struct timb_dma_desc, desc_node); | 317 | struct timb_dma_desc, desc_node); |
318 | if (td_desc->interrupt) | 318 | if (td_desc->interrupt) |
319 | ret |= 1 << i; | 319 | ret |= 1 << i; |
320 | } | 320 | } |
321 | } | 321 | } |
322 | 322 | ||
323 | return ret; | 323 | return ret; |
324 | } | 324 | } |
325 | 325 | ||
326 | static void __td_start_next(struct timb_dma_chan *td_chan) | 326 | static void __td_start_next(struct timb_dma_chan *td_chan) |
327 | { | 327 | { |
328 | struct timb_dma_desc *td_desc; | 328 | struct timb_dma_desc *td_desc; |
329 | 329 | ||
330 | BUG_ON(list_empty(&td_chan->queue)); | 330 | BUG_ON(list_empty(&td_chan->queue)); |
331 | BUG_ON(td_chan->ongoing); | 331 | BUG_ON(td_chan->ongoing); |
332 | 332 | ||
333 | td_desc = list_entry(td_chan->queue.next, struct timb_dma_desc, | 333 | td_desc = list_entry(td_chan->queue.next, struct timb_dma_desc, |
334 | desc_node); | 334 | desc_node); |
335 | 335 | ||
336 | dev_dbg(chan2dev(&td_chan->chan), "%s: started %u\n", | 336 | dev_dbg(chan2dev(&td_chan->chan), "%s: started %u\n", |
337 | __func__, td_desc->txd.cookie); | 337 | __func__, td_desc->txd.cookie); |
338 | 338 | ||
339 | list_move(&td_desc->desc_node, &td_chan->active_list); | 339 | list_move(&td_desc->desc_node, &td_chan->active_list); |
340 | __td_start_dma(td_chan); | 340 | __td_start_dma(td_chan); |
341 | } | 341 | } |
342 | 342 | ||
343 | static dma_cookie_t td_tx_submit(struct dma_async_tx_descriptor *txd) | 343 | static dma_cookie_t td_tx_submit(struct dma_async_tx_descriptor *txd) |
344 | { | 344 | { |
345 | struct timb_dma_desc *td_desc = container_of(txd, struct timb_dma_desc, | 345 | struct timb_dma_desc *td_desc = container_of(txd, struct timb_dma_desc, |
346 | txd); | 346 | txd); |
347 | struct timb_dma_chan *td_chan = container_of(txd->chan, | 347 | struct timb_dma_chan *td_chan = container_of(txd->chan, |
348 | struct timb_dma_chan, chan); | 348 | struct timb_dma_chan, chan); |
349 | dma_cookie_t cookie; | 349 | dma_cookie_t cookie; |
350 | 350 | ||
351 | spin_lock_bh(&td_chan->lock); | 351 | spin_lock_bh(&td_chan->lock); |
352 | 352 | ||
353 | cookie = txd->chan->cookie; | 353 | cookie = txd->chan->cookie; |
354 | if (++cookie < 0) | 354 | if (++cookie < 0) |
355 | cookie = 1; | 355 | cookie = 1; |
356 | txd->chan->cookie = cookie; | 356 | txd->chan->cookie = cookie; |
357 | txd->cookie = cookie; | 357 | txd->cookie = cookie; |
358 | 358 | ||
359 | if (list_empty(&td_chan->active_list)) { | 359 | if (list_empty(&td_chan->active_list)) { |
360 | dev_dbg(chan2dev(txd->chan), "%s: started %u\n", __func__, | 360 | dev_dbg(chan2dev(txd->chan), "%s: started %u\n", __func__, |
361 | txd->cookie); | 361 | txd->cookie); |
362 | list_add_tail(&td_desc->desc_node, &td_chan->active_list); | 362 | list_add_tail(&td_desc->desc_node, &td_chan->active_list); |
363 | __td_start_dma(td_chan); | 363 | __td_start_dma(td_chan); |
364 | } else { | 364 | } else { |
365 | dev_dbg(chan2dev(txd->chan), "tx_submit: queued %u\n", | 365 | dev_dbg(chan2dev(txd->chan), "tx_submit: queued %u\n", |
366 | txd->cookie); | 366 | txd->cookie); |
367 | 367 | ||
368 | list_add_tail(&td_desc->desc_node, &td_chan->queue); | 368 | list_add_tail(&td_desc->desc_node, &td_chan->queue); |
369 | } | 369 | } |
370 | 370 | ||
371 | spin_unlock_bh(&td_chan->lock); | 371 | spin_unlock_bh(&td_chan->lock); |
372 | 372 | ||
373 | return cookie; | 373 | return cookie; |
374 | } | 374 | } |
375 | 375 | ||
376 | static struct timb_dma_desc *td_alloc_init_desc(struct timb_dma_chan *td_chan) | 376 | static struct timb_dma_desc *td_alloc_init_desc(struct timb_dma_chan *td_chan) |
377 | { | 377 | { |
378 | struct dma_chan *chan = &td_chan->chan; | 378 | struct dma_chan *chan = &td_chan->chan; |
379 | struct timb_dma_desc *td_desc; | 379 | struct timb_dma_desc *td_desc; |
380 | int err; | 380 | int err; |
381 | 381 | ||
382 | td_desc = kzalloc(sizeof(struct timb_dma_desc), GFP_KERNEL); | 382 | td_desc = kzalloc(sizeof(struct timb_dma_desc), GFP_KERNEL); |
383 | if (!td_desc) { | 383 | if (!td_desc) { |
384 | dev_err(chan2dev(chan), "Failed to alloc descriptor\n"); | 384 | dev_err(chan2dev(chan), "Failed to alloc descriptor\n"); |
385 | goto err; | 385 | goto err; |
386 | } | 386 | } |
387 | 387 | ||
388 | td_desc->desc_list_len = td_chan->desc_elems * TIMB_DMA_DESC_SIZE; | 388 | td_desc->desc_list_len = td_chan->desc_elems * TIMB_DMA_DESC_SIZE; |
389 | 389 | ||
390 | td_desc->desc_list = kzalloc(td_desc->desc_list_len, GFP_KERNEL); | 390 | td_desc->desc_list = kzalloc(td_desc->desc_list_len, GFP_KERNEL); |
391 | if (!td_desc->desc_list) { | 391 | if (!td_desc->desc_list) { |
392 | dev_err(chan2dev(chan), "Failed to alloc descriptor\n"); | 392 | dev_err(chan2dev(chan), "Failed to alloc descriptor\n"); |
393 | goto err; | 393 | goto err; |
394 | } | 394 | } |
395 | 395 | ||
396 | dma_async_tx_descriptor_init(&td_desc->txd, chan); | 396 | dma_async_tx_descriptor_init(&td_desc->txd, chan); |
397 | td_desc->txd.tx_submit = td_tx_submit; | 397 | td_desc->txd.tx_submit = td_tx_submit; |
398 | td_desc->txd.flags = DMA_CTRL_ACK; | 398 | td_desc->txd.flags = DMA_CTRL_ACK; |
399 | 399 | ||
400 | td_desc->txd.phys = dma_map_single(chan2dmadev(chan), | 400 | td_desc->txd.phys = dma_map_single(chan2dmadev(chan), |
401 | td_desc->desc_list, td_desc->desc_list_len, DMA_TO_DEVICE); | 401 | td_desc->desc_list, td_desc->desc_list_len, DMA_TO_DEVICE); |
402 | 402 | ||
403 | err = dma_mapping_error(chan2dmadev(chan), td_desc->txd.phys); | 403 | err = dma_mapping_error(chan2dmadev(chan), td_desc->txd.phys); |
404 | if (err) { | 404 | if (err) { |
405 | dev_err(chan2dev(chan), "DMA mapping error: %d\n", err); | 405 | dev_err(chan2dev(chan), "DMA mapping error: %d\n", err); |
406 | goto err; | 406 | goto err; |
407 | } | 407 | } |
408 | 408 | ||
409 | return td_desc; | 409 | return td_desc; |
410 | err: | 410 | err: |
411 | kfree(td_desc->desc_list); | 411 | kfree(td_desc->desc_list); |
412 | kfree(td_desc); | 412 | kfree(td_desc); |
413 | 413 | ||
414 | return NULL; | 414 | return NULL; |
415 | 415 | ||
416 | } | 416 | } |
417 | 417 | ||
418 | static void td_free_desc(struct timb_dma_desc *td_desc) | 418 | static void td_free_desc(struct timb_dma_desc *td_desc) |
419 | { | 419 | { |
420 | dev_dbg(chan2dev(td_desc->txd.chan), "Freeing desc: %p\n", td_desc); | 420 | dev_dbg(chan2dev(td_desc->txd.chan), "Freeing desc: %p\n", td_desc); |
421 | dma_unmap_single(chan2dmadev(td_desc->txd.chan), td_desc->txd.phys, | 421 | dma_unmap_single(chan2dmadev(td_desc->txd.chan), td_desc->txd.phys, |
422 | td_desc->desc_list_len, DMA_TO_DEVICE); | 422 | td_desc->desc_list_len, DMA_TO_DEVICE); |
423 | 423 | ||
424 | kfree(td_desc->desc_list); | 424 | kfree(td_desc->desc_list); |
425 | kfree(td_desc); | 425 | kfree(td_desc); |
426 | } | 426 | } |
427 | 427 | ||
428 | static void td_desc_put(struct timb_dma_chan *td_chan, | 428 | static void td_desc_put(struct timb_dma_chan *td_chan, |
429 | struct timb_dma_desc *td_desc) | 429 | struct timb_dma_desc *td_desc) |
430 | { | 430 | { |
431 | dev_dbg(chan2dev(&td_chan->chan), "Putting desc: %p\n", td_desc); | 431 | dev_dbg(chan2dev(&td_chan->chan), "Putting desc: %p\n", td_desc); |
432 | 432 | ||
433 | spin_lock_bh(&td_chan->lock); | 433 | spin_lock_bh(&td_chan->lock); |
434 | list_add(&td_desc->desc_node, &td_chan->free_list); | 434 | list_add(&td_desc->desc_node, &td_chan->free_list); |
435 | spin_unlock_bh(&td_chan->lock); | 435 | spin_unlock_bh(&td_chan->lock); |
436 | } | 436 | } |
437 | 437 | ||
438 | static struct timb_dma_desc *td_desc_get(struct timb_dma_chan *td_chan) | 438 | static struct timb_dma_desc *td_desc_get(struct timb_dma_chan *td_chan) |
439 | { | 439 | { |
440 | struct timb_dma_desc *td_desc, *_td_desc; | 440 | struct timb_dma_desc *td_desc, *_td_desc; |
441 | struct timb_dma_desc *ret = NULL; | 441 | struct timb_dma_desc *ret = NULL; |
442 | 442 | ||
443 | spin_lock_bh(&td_chan->lock); | 443 | spin_lock_bh(&td_chan->lock); |
444 | list_for_each_entry_safe(td_desc, _td_desc, &td_chan->free_list, | 444 | list_for_each_entry_safe(td_desc, _td_desc, &td_chan->free_list, |
445 | desc_node) { | 445 | desc_node) { |
446 | if (async_tx_test_ack(&td_desc->txd)) { | 446 | if (async_tx_test_ack(&td_desc->txd)) { |
447 | list_del(&td_desc->desc_node); | 447 | list_del(&td_desc->desc_node); |
448 | ret = td_desc; | 448 | ret = td_desc; |
449 | break; | 449 | break; |
450 | } | 450 | } |
451 | dev_dbg(chan2dev(&td_chan->chan), "desc %p not ACKed\n", | 451 | dev_dbg(chan2dev(&td_chan->chan), "desc %p not ACKed\n", |
452 | td_desc); | 452 | td_desc); |
453 | } | 453 | } |
454 | spin_unlock_bh(&td_chan->lock); | 454 | spin_unlock_bh(&td_chan->lock); |
455 | 455 | ||
456 | return ret; | 456 | return ret; |
457 | } | 457 | } |
458 | 458 | ||
459 | static int td_alloc_chan_resources(struct dma_chan *chan) | 459 | static int td_alloc_chan_resources(struct dma_chan *chan) |
460 | { | 460 | { |
461 | struct timb_dma_chan *td_chan = | 461 | struct timb_dma_chan *td_chan = |
462 | container_of(chan, struct timb_dma_chan, chan); | 462 | container_of(chan, struct timb_dma_chan, chan); |
463 | int i; | 463 | int i; |
464 | 464 | ||
465 | dev_dbg(chan2dev(chan), "%s: entry\n", __func__); | 465 | dev_dbg(chan2dev(chan), "%s: entry\n", __func__); |
466 | 466 | ||
467 | BUG_ON(!list_empty(&td_chan->free_list)); | 467 | BUG_ON(!list_empty(&td_chan->free_list)); |
468 | for (i = 0; i < td_chan->descs; i++) { | 468 | for (i = 0; i < td_chan->descs; i++) { |
469 | struct timb_dma_desc *td_desc = td_alloc_init_desc(td_chan); | 469 | struct timb_dma_desc *td_desc = td_alloc_init_desc(td_chan); |
470 | if (!td_desc) { | 470 | if (!td_desc) { |
471 | if (i) | 471 | if (i) |
472 | break; | 472 | break; |
473 | else { | 473 | else { |
474 | dev_err(chan2dev(chan), | 474 | dev_err(chan2dev(chan), |
475 | "Couldnt allocate any descriptors\n"); | 475 | "Couldnt allocate any descriptors\n"); |
476 | return -ENOMEM; | 476 | return -ENOMEM; |
477 | } | 477 | } |
478 | } | 478 | } |
479 | 479 | ||
480 | td_desc_put(td_chan, td_desc); | 480 | td_desc_put(td_chan, td_desc); |
481 | } | 481 | } |
482 | 482 | ||
483 | spin_lock_bh(&td_chan->lock); | 483 | spin_lock_bh(&td_chan->lock); |
484 | td_chan->last_completed_cookie = 1; | 484 | td_chan->last_completed_cookie = 1; |
485 | chan->cookie = 1; | 485 | chan->cookie = 1; |
486 | spin_unlock_bh(&td_chan->lock); | 486 | spin_unlock_bh(&td_chan->lock); |
487 | 487 | ||
488 | return 0; | 488 | return 0; |
489 | } | 489 | } |
490 | 490 | ||
491 | static void td_free_chan_resources(struct dma_chan *chan) | 491 | static void td_free_chan_resources(struct dma_chan *chan) |
492 | { | 492 | { |
493 | struct timb_dma_chan *td_chan = | 493 | struct timb_dma_chan *td_chan = |
494 | container_of(chan, struct timb_dma_chan, chan); | 494 | container_of(chan, struct timb_dma_chan, chan); |
495 | struct timb_dma_desc *td_desc, *_td_desc; | 495 | struct timb_dma_desc *td_desc, *_td_desc; |
496 | LIST_HEAD(list); | 496 | LIST_HEAD(list); |
497 | 497 | ||
498 | dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); | 498 | dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); |
499 | 499 | ||
500 | /* check that all descriptors are free */ | 500 | /* check that all descriptors are free */ |
501 | BUG_ON(!list_empty(&td_chan->active_list)); | 501 | BUG_ON(!list_empty(&td_chan->active_list)); |
502 | BUG_ON(!list_empty(&td_chan->queue)); | 502 | BUG_ON(!list_empty(&td_chan->queue)); |
503 | 503 | ||
504 | spin_lock_bh(&td_chan->lock); | 504 | spin_lock_bh(&td_chan->lock); |
505 | list_splice_init(&td_chan->free_list, &list); | 505 | list_splice_init(&td_chan->free_list, &list); |
506 | spin_unlock_bh(&td_chan->lock); | 506 | spin_unlock_bh(&td_chan->lock); |
507 | 507 | ||
508 | list_for_each_entry_safe(td_desc, _td_desc, &list, desc_node) { | 508 | list_for_each_entry_safe(td_desc, _td_desc, &list, desc_node) { |
509 | dev_dbg(chan2dev(chan), "%s: Freeing desc: %p\n", __func__, | 509 | dev_dbg(chan2dev(chan), "%s: Freeing desc: %p\n", __func__, |
510 | td_desc); | 510 | td_desc); |
511 | td_free_desc(td_desc); | 511 | td_free_desc(td_desc); |
512 | } | 512 | } |
513 | } | 513 | } |
514 | 514 | ||
515 | static enum dma_status td_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | 515 | static enum dma_status td_tx_status(struct dma_chan *chan, dma_cookie_t cookie, |
516 | struct dma_tx_state *txstate) | 516 | struct dma_tx_state *txstate) |
517 | { | 517 | { |
518 | struct timb_dma_chan *td_chan = | 518 | struct timb_dma_chan *td_chan = |
519 | container_of(chan, struct timb_dma_chan, chan); | 519 | container_of(chan, struct timb_dma_chan, chan); |
520 | dma_cookie_t last_used; | 520 | dma_cookie_t last_used; |
521 | dma_cookie_t last_complete; | 521 | dma_cookie_t last_complete; |
522 | int ret; | 522 | int ret; |
523 | 523 | ||
524 | dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); | 524 | dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); |
525 | 525 | ||
526 | last_complete = td_chan->last_completed_cookie; | 526 | last_complete = td_chan->last_completed_cookie; |
527 | last_used = chan->cookie; | 527 | last_used = chan->cookie; |
528 | 528 | ||
529 | ret = dma_async_is_complete(cookie, last_complete, last_used); | 529 | ret = dma_async_is_complete(cookie, last_complete, last_used); |
530 | 530 | ||
531 | dma_set_tx_state(txstate, last_complete, last_used, 0); | 531 | dma_set_tx_state(txstate, last_complete, last_used, 0); |
532 | 532 | ||
533 | dev_dbg(chan2dev(chan), | 533 | dev_dbg(chan2dev(chan), |
534 | "%s: exit, ret: %d, last_complete: %d, last_used: %d\n", | 534 | "%s: exit, ret: %d, last_complete: %d, last_used: %d\n", |
535 | __func__, ret, last_complete, last_used); | 535 | __func__, ret, last_complete, last_used); |
536 | 536 | ||
537 | return ret; | 537 | return ret; |
538 | } | 538 | } |
539 | 539 | ||
540 | static void td_issue_pending(struct dma_chan *chan) | 540 | static void td_issue_pending(struct dma_chan *chan) |
541 | { | 541 | { |
542 | struct timb_dma_chan *td_chan = | 542 | struct timb_dma_chan *td_chan = |
543 | container_of(chan, struct timb_dma_chan, chan); | 543 | container_of(chan, struct timb_dma_chan, chan); |
544 | 544 | ||
545 | dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); | 545 | dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); |
546 | spin_lock_bh(&td_chan->lock); | 546 | spin_lock_bh(&td_chan->lock); |
547 | 547 | ||
548 | if (!list_empty(&td_chan->active_list)) | 548 | if (!list_empty(&td_chan->active_list)) |
549 | /* transfer ongoing */ | 549 | /* transfer ongoing */ |
550 | if (__td_dma_done_ack(td_chan)) | 550 | if (__td_dma_done_ack(td_chan)) |
551 | __td_finish(td_chan); | 551 | __td_finish(td_chan); |
552 | 552 | ||
553 | if (list_empty(&td_chan->active_list) && !list_empty(&td_chan->queue)) | 553 | if (list_empty(&td_chan->active_list) && !list_empty(&td_chan->queue)) |
554 | __td_start_next(td_chan); | 554 | __td_start_next(td_chan); |
555 | 555 | ||
556 | spin_unlock_bh(&td_chan->lock); | 556 | spin_unlock_bh(&td_chan->lock); |
557 | } | 557 | } |
558 | 558 | ||
559 | static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan, | 559 | static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan, |
560 | struct scatterlist *sgl, unsigned int sg_len, | 560 | struct scatterlist *sgl, unsigned int sg_len, |
561 | enum dma_data_direction direction, unsigned long flags) | 561 | enum dma_data_direction direction, unsigned long flags) |
562 | { | 562 | { |
563 | struct timb_dma_chan *td_chan = | 563 | struct timb_dma_chan *td_chan = |
564 | container_of(chan, struct timb_dma_chan, chan); | 564 | container_of(chan, struct timb_dma_chan, chan); |
565 | struct timb_dma_desc *td_desc; | 565 | struct timb_dma_desc *td_desc; |
566 | struct scatterlist *sg; | 566 | struct scatterlist *sg; |
567 | unsigned int i; | 567 | unsigned int i; |
568 | unsigned int desc_usage = 0; | 568 | unsigned int desc_usage = 0; |
569 | 569 | ||
570 | if (!sgl || !sg_len) { | 570 | if (!sgl || !sg_len) { |
571 | dev_err(chan2dev(chan), "%s: No SG list\n", __func__); | 571 | dev_err(chan2dev(chan), "%s: No SG list\n", __func__); |
572 | return NULL; | 572 | return NULL; |
573 | } | 573 | } |
574 | 574 | ||
575 | /* even channels are for RX, odd for TX */ | 575 | /* even channels are for RX, odd for TX */ |
576 | if (td_chan->direction != direction) { | 576 | if (td_chan->direction != direction) { |
577 | dev_err(chan2dev(chan), | 577 | dev_err(chan2dev(chan), |
578 | "Requesting channel in wrong direction\n"); | 578 | "Requesting channel in wrong direction\n"); |
579 | return NULL; | 579 | return NULL; |
580 | } | 580 | } |
581 | 581 | ||
582 | td_desc = td_desc_get(td_chan); | 582 | td_desc = td_desc_get(td_chan); |
583 | if (!td_desc) { | 583 | if (!td_desc) { |
584 | dev_err(chan2dev(chan), "Not enough descriptors available\n"); | 584 | dev_err(chan2dev(chan), "Not enough descriptors available\n"); |
585 | return NULL; | 585 | return NULL; |
586 | } | 586 | } |
587 | 587 | ||
588 | td_desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0; | 588 | td_desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0; |
589 | 589 | ||
590 | for_each_sg(sgl, sg, sg_len, i) { | 590 | for_each_sg(sgl, sg, sg_len, i) { |
591 | int err; | 591 | int err; |
592 | if (desc_usage > td_desc->desc_list_len) { | 592 | if (desc_usage > td_desc->desc_list_len) { |
593 | dev_err(chan2dev(chan), "No descriptor space\n"); | 593 | dev_err(chan2dev(chan), "No descriptor space\n"); |
594 | return NULL; | 594 | return NULL; |
595 | } | 595 | } |
596 | 596 | ||
597 | err = td_fill_desc(td_chan, td_desc->desc_list + desc_usage, sg, | 597 | err = td_fill_desc(td_chan, td_desc->desc_list + desc_usage, sg, |
598 | i == (sg_len - 1)); | 598 | i == (sg_len - 1)); |
599 | if (err) { | 599 | if (err) { |
600 | dev_err(chan2dev(chan), "Failed to update desc: %d\n", | 600 | dev_err(chan2dev(chan), "Failed to update desc: %d\n", |
601 | err); | 601 | err); |
602 | td_desc_put(td_chan, td_desc); | 602 | td_desc_put(td_chan, td_desc); |
603 | return NULL; | 603 | return NULL; |
604 | } | 604 | } |
605 | desc_usage += TIMB_DMA_DESC_SIZE; | 605 | desc_usage += TIMB_DMA_DESC_SIZE; |
606 | } | 606 | } |
607 | 607 | ||
608 | dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys, | 608 | dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys, |
609 | td_desc->desc_list_len, DMA_TO_DEVICE); | 609 | td_desc->desc_list_len, DMA_TO_DEVICE); |
610 | 610 | ||
611 | return &td_desc->txd; | 611 | return &td_desc->txd; |
612 | } | 612 | } |
613 | 613 | ||
614 | static int td_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd) | 614 | static int td_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
615 | unsigned long arg) | ||
615 | { | 616 | { |
616 | struct timb_dma_chan *td_chan = | 617 | struct timb_dma_chan *td_chan = |
617 | container_of(chan, struct timb_dma_chan, chan); | 618 | container_of(chan, struct timb_dma_chan, chan); |
618 | struct timb_dma_desc *td_desc, *_td_desc; | 619 | struct timb_dma_desc *td_desc, *_td_desc; |
619 | 620 | ||
620 | dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); | 621 | dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); |
621 | 622 | ||
622 | if (cmd != DMA_TERMINATE_ALL) | 623 | if (cmd != DMA_TERMINATE_ALL) |
623 | return -ENXIO; | 624 | return -ENXIO; |
624 | 625 | ||
625 | /* first the easy part, put the queue into the free list */ | 626 | /* first the easy part, put the queue into the free list */ |
626 | spin_lock_bh(&td_chan->lock); | 627 | spin_lock_bh(&td_chan->lock); |
627 | list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue, | 628 | list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue, |
628 | desc_node) | 629 | desc_node) |
629 | list_move(&td_desc->desc_node, &td_chan->free_list); | 630 | list_move(&td_desc->desc_node, &td_chan->free_list); |
630 | 631 | ||
631 | /* now tear down the runnning */ | 632 | /* now tear down the runnning */ |
632 | __td_finish(td_chan); | 633 | __td_finish(td_chan); |
633 | spin_unlock_bh(&td_chan->lock); | 634 | spin_unlock_bh(&td_chan->lock); |
634 | 635 | ||
635 | return 0; | 636 | return 0; |
636 | } | 637 | } |
637 | 638 | ||
638 | static void td_tasklet(unsigned long data) | 639 | static void td_tasklet(unsigned long data) |
639 | { | 640 | { |
640 | struct timb_dma *td = (struct timb_dma *)data; | 641 | struct timb_dma *td = (struct timb_dma *)data; |
641 | u32 isr; | 642 | u32 isr; |
642 | u32 ipr; | 643 | u32 ipr; |
643 | u32 ier; | 644 | u32 ier; |
644 | int i; | 645 | int i; |
645 | 646 | ||
646 | isr = ioread32(td->membase + TIMBDMA_ISR); | 647 | isr = ioread32(td->membase + TIMBDMA_ISR); |
647 | ipr = isr & __td_ier_mask(td); | 648 | ipr = isr & __td_ier_mask(td); |
648 | 649 | ||
649 | /* ack the interrupts */ | 650 | /* ack the interrupts */ |
650 | iowrite32(ipr, td->membase + TIMBDMA_ISR); | 651 | iowrite32(ipr, td->membase + TIMBDMA_ISR); |
651 | 652 | ||
652 | for (i = 0; i < td->dma.chancnt; i++) | 653 | for (i = 0; i < td->dma.chancnt; i++) |
653 | if (ipr & (1 << i)) { | 654 | if (ipr & (1 << i)) { |
654 | struct timb_dma_chan *td_chan = td->channels + i; | 655 | struct timb_dma_chan *td_chan = td->channels + i; |
655 | spin_lock(&td_chan->lock); | 656 | spin_lock(&td_chan->lock); |
656 | __td_finish(td_chan); | 657 | __td_finish(td_chan); |
657 | if (!list_empty(&td_chan->queue)) | 658 | if (!list_empty(&td_chan->queue)) |
658 | __td_start_next(td_chan); | 659 | __td_start_next(td_chan); |
659 | spin_unlock(&td_chan->lock); | 660 | spin_unlock(&td_chan->lock); |
660 | } | 661 | } |
661 | 662 | ||
662 | ier = __td_ier_mask(td); | 663 | ier = __td_ier_mask(td); |
663 | iowrite32(ier, td->membase + TIMBDMA_IER); | 664 | iowrite32(ier, td->membase + TIMBDMA_IER); |
664 | } | 665 | } |
665 | 666 | ||
666 | 667 | ||
667 | static irqreturn_t td_irq(int irq, void *devid) | 668 | static irqreturn_t td_irq(int irq, void *devid) |
668 | { | 669 | { |
669 | struct timb_dma *td = devid; | 670 | struct timb_dma *td = devid; |
670 | u32 ipr = ioread32(td->membase + TIMBDMA_IPR); | 671 | u32 ipr = ioread32(td->membase + TIMBDMA_IPR); |
671 | 672 | ||
672 | if (ipr) { | 673 | if (ipr) { |
673 | /* disable interrupts, will be re-enabled in tasklet */ | 674 | /* disable interrupts, will be re-enabled in tasklet */ |
674 | iowrite32(0, td->membase + TIMBDMA_IER); | 675 | iowrite32(0, td->membase + TIMBDMA_IER); |
675 | 676 | ||
676 | tasklet_schedule(&td->tasklet); | 677 | tasklet_schedule(&td->tasklet); |
677 | 678 | ||
678 | return IRQ_HANDLED; | 679 | return IRQ_HANDLED; |
679 | } else | 680 | } else |
680 | return IRQ_NONE; | 681 | return IRQ_NONE; |
681 | } | 682 | } |
682 | 683 | ||
683 | 684 | ||
684 | static int __devinit td_probe(struct platform_device *pdev) | 685 | static int __devinit td_probe(struct platform_device *pdev) |
685 | { | 686 | { |
686 | struct timb_dma_platform_data *pdata = pdev->dev.platform_data; | 687 | struct timb_dma_platform_data *pdata = pdev->dev.platform_data; |
687 | struct timb_dma *td; | 688 | struct timb_dma *td; |
688 | struct resource *iomem; | 689 | struct resource *iomem; |
689 | int irq; | 690 | int irq; |
690 | int err; | 691 | int err; |
691 | int i; | 692 | int i; |
692 | 693 | ||
693 | if (!pdata) { | 694 | if (!pdata) { |
694 | dev_err(&pdev->dev, "No platform data\n"); | 695 | dev_err(&pdev->dev, "No platform data\n"); |
695 | return -EINVAL; | 696 | return -EINVAL; |
696 | } | 697 | } |
697 | 698 | ||
698 | iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 699 | iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
699 | if (!iomem) | 700 | if (!iomem) |
700 | return -EINVAL; | 701 | return -EINVAL; |
701 | 702 | ||
702 | irq = platform_get_irq(pdev, 0); | 703 | irq = platform_get_irq(pdev, 0); |
703 | if (irq < 0) | 704 | if (irq < 0) |
704 | return irq; | 705 | return irq; |
705 | 706 | ||
706 | if (!request_mem_region(iomem->start, resource_size(iomem), | 707 | if (!request_mem_region(iomem->start, resource_size(iomem), |
707 | DRIVER_NAME)) | 708 | DRIVER_NAME)) |
708 | return -EBUSY; | 709 | return -EBUSY; |
709 | 710 | ||
710 | td = kzalloc(sizeof(struct timb_dma) + | 711 | td = kzalloc(sizeof(struct timb_dma) + |
711 | sizeof(struct timb_dma_chan) * pdata->nr_channels, GFP_KERNEL); | 712 | sizeof(struct timb_dma_chan) * pdata->nr_channels, GFP_KERNEL); |
712 | if (!td) { | 713 | if (!td) { |
713 | err = -ENOMEM; | 714 | err = -ENOMEM; |
714 | goto err_release_region; | 715 | goto err_release_region; |
715 | } | 716 | } |
716 | 717 | ||
717 | dev_dbg(&pdev->dev, "Allocated TD: %p\n", td); | 718 | dev_dbg(&pdev->dev, "Allocated TD: %p\n", td); |
718 | 719 | ||
719 | td->membase = ioremap(iomem->start, resource_size(iomem)); | 720 | td->membase = ioremap(iomem->start, resource_size(iomem)); |
720 | if (!td->membase) { | 721 | if (!td->membase) { |
721 | dev_err(&pdev->dev, "Failed to remap I/O memory\n"); | 722 | dev_err(&pdev->dev, "Failed to remap I/O memory\n"); |
722 | err = -ENOMEM; | 723 | err = -ENOMEM; |
723 | goto err_free_mem; | 724 | goto err_free_mem; |
724 | } | 725 | } |
725 | 726 | ||
726 | /* 32bit addressing */ | 727 | /* 32bit addressing */ |
727 | iowrite32(TIMBDMA_32BIT_ADDR, td->membase + TIMBDMA_ACR); | 728 | iowrite32(TIMBDMA_32BIT_ADDR, td->membase + TIMBDMA_ACR); |
728 | 729 | ||
729 | /* disable and clear any interrupts */ | 730 | /* disable and clear any interrupts */ |
730 | iowrite32(0x0, td->membase + TIMBDMA_IER); | 731 | iowrite32(0x0, td->membase + TIMBDMA_IER); |
731 | iowrite32(0xFFFFFFFF, td->membase + TIMBDMA_ISR); | 732 | iowrite32(0xFFFFFFFF, td->membase + TIMBDMA_ISR); |
732 | 733 | ||
733 | tasklet_init(&td->tasklet, td_tasklet, (unsigned long)td); | 734 | tasklet_init(&td->tasklet, td_tasklet, (unsigned long)td); |
734 | 735 | ||
735 | err = request_irq(irq, td_irq, IRQF_SHARED, DRIVER_NAME, td); | 736 | err = request_irq(irq, td_irq, IRQF_SHARED, DRIVER_NAME, td); |
736 | if (err) { | 737 | if (err) { |
737 | dev_err(&pdev->dev, "Failed to request IRQ\n"); | 738 | dev_err(&pdev->dev, "Failed to request IRQ\n"); |
738 | goto err_tasklet_kill; | 739 | goto err_tasklet_kill; |
739 | } | 740 | } |
740 | 741 | ||
741 | td->dma.device_alloc_chan_resources = td_alloc_chan_resources; | 742 | td->dma.device_alloc_chan_resources = td_alloc_chan_resources; |
742 | td->dma.device_free_chan_resources = td_free_chan_resources; | 743 | td->dma.device_free_chan_resources = td_free_chan_resources; |
743 | td->dma.device_tx_status = td_tx_status; | 744 | td->dma.device_tx_status = td_tx_status; |
744 | td->dma.device_issue_pending = td_issue_pending; | 745 | td->dma.device_issue_pending = td_issue_pending; |
745 | 746 | ||
746 | dma_cap_set(DMA_SLAVE, td->dma.cap_mask); | 747 | dma_cap_set(DMA_SLAVE, td->dma.cap_mask); |
747 | dma_cap_set(DMA_PRIVATE, td->dma.cap_mask); | 748 | dma_cap_set(DMA_PRIVATE, td->dma.cap_mask); |
748 | td->dma.device_prep_slave_sg = td_prep_slave_sg; | 749 | td->dma.device_prep_slave_sg = td_prep_slave_sg; |
749 | td->dma.device_control = td_control; | 750 | td->dma.device_control = td_control; |
750 | 751 | ||
751 | td->dma.dev = &pdev->dev; | 752 | td->dma.dev = &pdev->dev; |
752 | 753 | ||
753 | INIT_LIST_HEAD(&td->dma.channels); | 754 | INIT_LIST_HEAD(&td->dma.channels); |
754 | 755 | ||
755 | for (i = 0; i < pdata->nr_channels; i++, td->dma.chancnt++) { | 756 | for (i = 0; i < pdata->nr_channels; i++, td->dma.chancnt++) { |
756 | struct timb_dma_chan *td_chan = &td->channels[i]; | 757 | struct timb_dma_chan *td_chan = &td->channels[i]; |
757 | struct timb_dma_platform_data_channel *pchan = | 758 | struct timb_dma_platform_data_channel *pchan = |
758 | pdata->channels + i; | 759 | pdata->channels + i; |
759 | 760 | ||
760 | /* even channels are RX, odd are TX */ | 761 | /* even channels are RX, odd are TX */ |
761 | if (((i % 2) && pchan->rx) || (!(i % 2) && !pchan->rx)) { | 762 | if (((i % 2) && pchan->rx) || (!(i % 2) && !pchan->rx)) { |
762 | dev_err(&pdev->dev, "Wrong channel configuration\n"); | 763 | dev_err(&pdev->dev, "Wrong channel configuration\n"); |
763 | err = -EINVAL; | 764 | err = -EINVAL; |
764 | goto err_tasklet_kill; | 765 | goto err_tasklet_kill; |
765 | } | 766 | } |
766 | 767 | ||
767 | td_chan->chan.device = &td->dma; | 768 | td_chan->chan.device = &td->dma; |
768 | td_chan->chan.cookie = 1; | 769 | td_chan->chan.cookie = 1; |
769 | td_chan->chan.chan_id = i; | 770 | td_chan->chan.chan_id = i; |
770 | spin_lock_init(&td_chan->lock); | 771 | spin_lock_init(&td_chan->lock); |
771 | INIT_LIST_HEAD(&td_chan->active_list); | 772 | INIT_LIST_HEAD(&td_chan->active_list); |
772 | INIT_LIST_HEAD(&td_chan->queue); | 773 | INIT_LIST_HEAD(&td_chan->queue); |
773 | INIT_LIST_HEAD(&td_chan->free_list); | 774 | INIT_LIST_HEAD(&td_chan->free_list); |
774 | 775 | ||
775 | td_chan->descs = pchan->descriptors; | 776 | td_chan->descs = pchan->descriptors; |
776 | td_chan->desc_elems = pchan->descriptor_elements; | 777 | td_chan->desc_elems = pchan->descriptor_elements; |
777 | td_chan->bytes_per_line = pchan->bytes_per_line; | 778 | td_chan->bytes_per_line = pchan->bytes_per_line; |
778 | td_chan->direction = pchan->rx ? DMA_FROM_DEVICE : | 779 | td_chan->direction = pchan->rx ? DMA_FROM_DEVICE : |
779 | DMA_TO_DEVICE; | 780 | DMA_TO_DEVICE; |
780 | 781 | ||
781 | td_chan->membase = td->membase + | 782 | td_chan->membase = td->membase + |
782 | (i / 2) * TIMBDMA_INSTANCE_OFFSET + | 783 | (i / 2) * TIMBDMA_INSTANCE_OFFSET + |
783 | (pchan->rx ? 0 : TIMBDMA_INSTANCE_TX_OFFSET); | 784 | (pchan->rx ? 0 : TIMBDMA_INSTANCE_TX_OFFSET); |
784 | 785 | ||
785 | dev_dbg(&pdev->dev, "Chan: %d, membase: %p\n", | 786 | dev_dbg(&pdev->dev, "Chan: %d, membase: %p\n", |
786 | i, td_chan->membase); | 787 | i, td_chan->membase); |
787 | 788 | ||
788 | list_add_tail(&td_chan->chan.device_node, &td->dma.channels); | 789 | list_add_tail(&td_chan->chan.device_node, &td->dma.channels); |
789 | } | 790 | } |
790 | 791 | ||
791 | err = dma_async_device_register(&td->dma); | 792 | err = dma_async_device_register(&td->dma); |
792 | if (err) { | 793 | if (err) { |
793 | dev_err(&pdev->dev, "Failed to register async device\n"); | 794 | dev_err(&pdev->dev, "Failed to register async device\n"); |
794 | goto err_free_irq; | 795 | goto err_free_irq; |
795 | } | 796 | } |
796 | 797 | ||
797 | platform_set_drvdata(pdev, td); | 798 | platform_set_drvdata(pdev, td); |
798 | 799 | ||
799 | dev_dbg(&pdev->dev, "Probe result: %d\n", err); | 800 | dev_dbg(&pdev->dev, "Probe result: %d\n", err); |
800 | return err; | 801 | return err; |
801 | 802 | ||
802 | err_free_irq: | 803 | err_free_irq: |
803 | free_irq(irq, td); | 804 | free_irq(irq, td); |
804 | err_tasklet_kill: | 805 | err_tasklet_kill: |
805 | tasklet_kill(&td->tasklet); | 806 | tasklet_kill(&td->tasklet); |
806 | iounmap(td->membase); | 807 | iounmap(td->membase); |
807 | err_free_mem: | 808 | err_free_mem: |
808 | kfree(td); | 809 | kfree(td); |
809 | err_release_region: | 810 | err_release_region: |
810 | release_mem_region(iomem->start, resource_size(iomem)); | 811 | release_mem_region(iomem->start, resource_size(iomem)); |
811 | 812 | ||
812 | return err; | 813 | return err; |
813 | 814 | ||
814 | } | 815 | } |
815 | 816 | ||
816 | static int __devexit td_remove(struct platform_device *pdev) | 817 | static int __devexit td_remove(struct platform_device *pdev) |
817 | { | 818 | { |
818 | struct timb_dma *td = platform_get_drvdata(pdev); | 819 | struct timb_dma *td = platform_get_drvdata(pdev); |
819 | struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 820 | struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
820 | int irq = platform_get_irq(pdev, 0); | 821 | int irq = platform_get_irq(pdev, 0); |
821 | 822 | ||
822 | dma_async_device_unregister(&td->dma); | 823 | dma_async_device_unregister(&td->dma); |
823 | free_irq(irq, td); | 824 | free_irq(irq, td); |
824 | tasklet_kill(&td->tasklet); | 825 | tasklet_kill(&td->tasklet); |
825 | iounmap(td->membase); | 826 | iounmap(td->membase); |
826 | kfree(td); | 827 | kfree(td); |
827 | release_mem_region(iomem->start, resource_size(iomem)); | 828 | release_mem_region(iomem->start, resource_size(iomem)); |
828 | 829 | ||
829 | platform_set_drvdata(pdev, NULL); | 830 | platform_set_drvdata(pdev, NULL); |
830 | 831 | ||
831 | dev_dbg(&pdev->dev, "Removed...\n"); | 832 | dev_dbg(&pdev->dev, "Removed...\n"); |
832 | return 0; | 833 | return 0; |
833 | } | 834 | } |
834 | 835 | ||
835 | static struct platform_driver td_driver = { | 836 | static struct platform_driver td_driver = { |
836 | .driver = { | 837 | .driver = { |
837 | .name = DRIVER_NAME, | 838 | .name = DRIVER_NAME, |
838 | .owner = THIS_MODULE, | 839 | .owner = THIS_MODULE, |
839 | }, | 840 | }, |
840 | .probe = td_probe, | 841 | .probe = td_probe, |
841 | .remove = __exit_p(td_remove), | 842 | .remove = __exit_p(td_remove), |
842 | }; | 843 | }; |
843 | 844 | ||
844 | static int __init td_init(void) | 845 | static int __init td_init(void) |
845 | { | 846 | { |
846 | return platform_driver_register(&td_driver); | 847 | return platform_driver_register(&td_driver); |
847 | } | 848 | } |
848 | module_init(td_init); | 849 | module_init(td_init); |
849 | 850 | ||
850 | static void __exit td_exit(void) | 851 | static void __exit td_exit(void) |
851 | { | 852 | { |
852 | platform_driver_unregister(&td_driver); | 853 | platform_driver_unregister(&td_driver); |
853 | } | 854 | } |
854 | module_exit(td_exit); | 855 | module_exit(td_exit); |
855 | 856 | ||
856 | MODULE_LICENSE("GPL v2"); | 857 | MODULE_LICENSE("GPL v2"); |
857 | MODULE_DESCRIPTION("Timberdale DMA controller driver"); | 858 | MODULE_DESCRIPTION("Timberdale DMA controller driver"); |
858 | MODULE_AUTHOR("Pelagicore AB <info@pelagicore.com>"); | 859 | MODULE_AUTHOR("Pelagicore AB <info@pelagicore.com>"); |
859 | MODULE_ALIAS("platform:"DRIVER_NAME); | 860 | MODULE_ALIAS("platform:"DRIVER_NAME); |
860 | 861 |
drivers/dma/txx9dmac.c
1 | /* | 1 | /* |
2 | * Driver for the TXx9 SoC DMA Controller | 2 | * Driver for the TXx9 SoC DMA Controller |
3 | * | 3 | * |
4 | * Copyright (C) 2009 Atsushi Nemoto | 4 | * Copyright (C) 2009 Atsushi Nemoto |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | */ | 9 | */ |
10 | #include <linux/dma-mapping.h> | 10 | #include <linux/dma-mapping.h> |
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/interrupt.h> | 12 | #include <linux/interrupt.h> |
13 | #include <linux/io.h> | 13 | #include <linux/io.h> |
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/platform_device.h> | 15 | #include <linux/platform_device.h> |
16 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
17 | #include <linux/scatterlist.h> | 17 | #include <linux/scatterlist.h> |
18 | #include "txx9dmac.h" | 18 | #include "txx9dmac.h" |
19 | 19 | ||
20 | static struct txx9dmac_chan *to_txx9dmac_chan(struct dma_chan *chan) | 20 | static struct txx9dmac_chan *to_txx9dmac_chan(struct dma_chan *chan) |
21 | { | 21 | { |
22 | return container_of(chan, struct txx9dmac_chan, chan); | 22 | return container_of(chan, struct txx9dmac_chan, chan); |
23 | } | 23 | } |
24 | 24 | ||
25 | static struct txx9dmac_cregs __iomem *__dma_regs(const struct txx9dmac_chan *dc) | 25 | static struct txx9dmac_cregs __iomem *__dma_regs(const struct txx9dmac_chan *dc) |
26 | { | 26 | { |
27 | return dc->ch_regs; | 27 | return dc->ch_regs; |
28 | } | 28 | } |
29 | 29 | ||
30 | static struct txx9dmac_cregs32 __iomem *__dma_regs32( | 30 | static struct txx9dmac_cregs32 __iomem *__dma_regs32( |
31 | const struct txx9dmac_chan *dc) | 31 | const struct txx9dmac_chan *dc) |
32 | { | 32 | { |
33 | return dc->ch_regs; | 33 | return dc->ch_regs; |
34 | } | 34 | } |
35 | 35 | ||
36 | #define channel64_readq(dc, name) \ | 36 | #define channel64_readq(dc, name) \ |
37 | __raw_readq(&(__dma_regs(dc)->name)) | 37 | __raw_readq(&(__dma_regs(dc)->name)) |
38 | #define channel64_writeq(dc, name, val) \ | 38 | #define channel64_writeq(dc, name, val) \ |
39 | __raw_writeq((val), &(__dma_regs(dc)->name)) | 39 | __raw_writeq((val), &(__dma_regs(dc)->name)) |
40 | #define channel64_readl(dc, name) \ | 40 | #define channel64_readl(dc, name) \ |
41 | __raw_readl(&(__dma_regs(dc)->name)) | 41 | __raw_readl(&(__dma_regs(dc)->name)) |
42 | #define channel64_writel(dc, name, val) \ | 42 | #define channel64_writel(dc, name, val) \ |
43 | __raw_writel((val), &(__dma_regs(dc)->name)) | 43 | __raw_writel((val), &(__dma_regs(dc)->name)) |
44 | 44 | ||
45 | #define channel32_readl(dc, name) \ | 45 | #define channel32_readl(dc, name) \ |
46 | __raw_readl(&(__dma_regs32(dc)->name)) | 46 | __raw_readl(&(__dma_regs32(dc)->name)) |
47 | #define channel32_writel(dc, name, val) \ | 47 | #define channel32_writel(dc, name, val) \ |
48 | __raw_writel((val), &(__dma_regs32(dc)->name)) | 48 | __raw_writel((val), &(__dma_regs32(dc)->name)) |
49 | 49 | ||
50 | #define channel_readq(dc, name) channel64_readq(dc, name) | 50 | #define channel_readq(dc, name) channel64_readq(dc, name) |
51 | #define channel_writeq(dc, name, val) channel64_writeq(dc, name, val) | 51 | #define channel_writeq(dc, name, val) channel64_writeq(dc, name, val) |
52 | #define channel_readl(dc, name) \ | 52 | #define channel_readl(dc, name) \ |
53 | (is_dmac64(dc) ? \ | 53 | (is_dmac64(dc) ? \ |
54 | channel64_readl(dc, name) : channel32_readl(dc, name)) | 54 | channel64_readl(dc, name) : channel32_readl(dc, name)) |
55 | #define channel_writel(dc, name, val) \ | 55 | #define channel_writel(dc, name, val) \ |
56 | (is_dmac64(dc) ? \ | 56 | (is_dmac64(dc) ? \ |
57 | channel64_writel(dc, name, val) : channel32_writel(dc, name, val)) | 57 | channel64_writel(dc, name, val) : channel32_writel(dc, name, val)) |
58 | 58 | ||
59 | static dma_addr_t channel64_read_CHAR(const struct txx9dmac_chan *dc) | 59 | static dma_addr_t channel64_read_CHAR(const struct txx9dmac_chan *dc) |
60 | { | 60 | { |
61 | if (sizeof(__dma_regs(dc)->CHAR) == sizeof(u64)) | 61 | if (sizeof(__dma_regs(dc)->CHAR) == sizeof(u64)) |
62 | return channel64_readq(dc, CHAR); | 62 | return channel64_readq(dc, CHAR); |
63 | else | 63 | else |
64 | return channel64_readl(dc, CHAR); | 64 | return channel64_readl(dc, CHAR); |
65 | } | 65 | } |
66 | 66 | ||
67 | static void channel64_write_CHAR(const struct txx9dmac_chan *dc, dma_addr_t val) | 67 | static void channel64_write_CHAR(const struct txx9dmac_chan *dc, dma_addr_t val) |
68 | { | 68 | { |
69 | if (sizeof(__dma_regs(dc)->CHAR) == sizeof(u64)) | 69 | if (sizeof(__dma_regs(dc)->CHAR) == sizeof(u64)) |
70 | channel64_writeq(dc, CHAR, val); | 70 | channel64_writeq(dc, CHAR, val); |
71 | else | 71 | else |
72 | channel64_writel(dc, CHAR, val); | 72 | channel64_writel(dc, CHAR, val); |
73 | } | 73 | } |
74 | 74 | ||
75 | static void channel64_clear_CHAR(const struct txx9dmac_chan *dc) | 75 | static void channel64_clear_CHAR(const struct txx9dmac_chan *dc) |
76 | { | 76 | { |
77 | #if defined(CONFIG_32BIT) && !defined(CONFIG_64BIT_PHYS_ADDR) | 77 | #if defined(CONFIG_32BIT) && !defined(CONFIG_64BIT_PHYS_ADDR) |
78 | channel64_writel(dc, CHAR, 0); | 78 | channel64_writel(dc, CHAR, 0); |
79 | channel64_writel(dc, __pad_CHAR, 0); | 79 | channel64_writel(dc, __pad_CHAR, 0); |
80 | #else | 80 | #else |
81 | channel64_writeq(dc, CHAR, 0); | 81 | channel64_writeq(dc, CHAR, 0); |
82 | #endif | 82 | #endif |
83 | } | 83 | } |
84 | 84 | ||
85 | static dma_addr_t channel_read_CHAR(const struct txx9dmac_chan *dc) | 85 | static dma_addr_t channel_read_CHAR(const struct txx9dmac_chan *dc) |
86 | { | 86 | { |
87 | if (is_dmac64(dc)) | 87 | if (is_dmac64(dc)) |
88 | return channel64_read_CHAR(dc); | 88 | return channel64_read_CHAR(dc); |
89 | else | 89 | else |
90 | return channel32_readl(dc, CHAR); | 90 | return channel32_readl(dc, CHAR); |
91 | } | 91 | } |
92 | 92 | ||
93 | static void channel_write_CHAR(const struct txx9dmac_chan *dc, dma_addr_t val) | 93 | static void channel_write_CHAR(const struct txx9dmac_chan *dc, dma_addr_t val) |
94 | { | 94 | { |
95 | if (is_dmac64(dc)) | 95 | if (is_dmac64(dc)) |
96 | channel64_write_CHAR(dc, val); | 96 | channel64_write_CHAR(dc, val); |
97 | else | 97 | else |
98 | channel32_writel(dc, CHAR, val); | 98 | channel32_writel(dc, CHAR, val); |
99 | } | 99 | } |
100 | 100 | ||
101 | static struct txx9dmac_regs __iomem *__txx9dmac_regs( | 101 | static struct txx9dmac_regs __iomem *__txx9dmac_regs( |
102 | const struct txx9dmac_dev *ddev) | 102 | const struct txx9dmac_dev *ddev) |
103 | { | 103 | { |
104 | return ddev->regs; | 104 | return ddev->regs; |
105 | } | 105 | } |
106 | 106 | ||
107 | static struct txx9dmac_regs32 __iomem *__txx9dmac_regs32( | 107 | static struct txx9dmac_regs32 __iomem *__txx9dmac_regs32( |
108 | const struct txx9dmac_dev *ddev) | 108 | const struct txx9dmac_dev *ddev) |
109 | { | 109 | { |
110 | return ddev->regs; | 110 | return ddev->regs; |
111 | } | 111 | } |
112 | 112 | ||
113 | #define dma64_readl(ddev, name) \ | 113 | #define dma64_readl(ddev, name) \ |
114 | __raw_readl(&(__txx9dmac_regs(ddev)->name)) | 114 | __raw_readl(&(__txx9dmac_regs(ddev)->name)) |
115 | #define dma64_writel(ddev, name, val) \ | 115 | #define dma64_writel(ddev, name, val) \ |
116 | __raw_writel((val), &(__txx9dmac_regs(ddev)->name)) | 116 | __raw_writel((val), &(__txx9dmac_regs(ddev)->name)) |
117 | 117 | ||
118 | #define dma32_readl(ddev, name) \ | 118 | #define dma32_readl(ddev, name) \ |
119 | __raw_readl(&(__txx9dmac_regs32(ddev)->name)) | 119 | __raw_readl(&(__txx9dmac_regs32(ddev)->name)) |
120 | #define dma32_writel(ddev, name, val) \ | 120 | #define dma32_writel(ddev, name, val) \ |
121 | __raw_writel((val), &(__txx9dmac_regs32(ddev)->name)) | 121 | __raw_writel((val), &(__txx9dmac_regs32(ddev)->name)) |
122 | 122 | ||
123 | #define dma_readl(ddev, name) \ | 123 | #define dma_readl(ddev, name) \ |
124 | (__is_dmac64(ddev) ? \ | 124 | (__is_dmac64(ddev) ? \ |
125 | dma64_readl(ddev, name) : dma32_readl(ddev, name)) | 125 | dma64_readl(ddev, name) : dma32_readl(ddev, name)) |
126 | #define dma_writel(ddev, name, val) \ | 126 | #define dma_writel(ddev, name, val) \ |
127 | (__is_dmac64(ddev) ? \ | 127 | (__is_dmac64(ddev) ? \ |
128 | dma64_writel(ddev, name, val) : dma32_writel(ddev, name, val)) | 128 | dma64_writel(ddev, name, val) : dma32_writel(ddev, name, val)) |
129 | 129 | ||
130 | static struct device *chan2dev(struct dma_chan *chan) | 130 | static struct device *chan2dev(struct dma_chan *chan) |
131 | { | 131 | { |
132 | return &chan->dev->device; | 132 | return &chan->dev->device; |
133 | } | 133 | } |
134 | static struct device *chan2parent(struct dma_chan *chan) | 134 | static struct device *chan2parent(struct dma_chan *chan) |
135 | { | 135 | { |
136 | return chan->dev->device.parent; | 136 | return chan->dev->device.parent; |
137 | } | 137 | } |
138 | 138 | ||
139 | static struct txx9dmac_desc * | 139 | static struct txx9dmac_desc * |
140 | txd_to_txx9dmac_desc(struct dma_async_tx_descriptor *txd) | 140 | txd_to_txx9dmac_desc(struct dma_async_tx_descriptor *txd) |
141 | { | 141 | { |
142 | return container_of(txd, struct txx9dmac_desc, txd); | 142 | return container_of(txd, struct txx9dmac_desc, txd); |
143 | } | 143 | } |
144 | 144 | ||
145 | static dma_addr_t desc_read_CHAR(const struct txx9dmac_chan *dc, | 145 | static dma_addr_t desc_read_CHAR(const struct txx9dmac_chan *dc, |
146 | const struct txx9dmac_desc *desc) | 146 | const struct txx9dmac_desc *desc) |
147 | { | 147 | { |
148 | return is_dmac64(dc) ? desc->hwdesc.CHAR : desc->hwdesc32.CHAR; | 148 | return is_dmac64(dc) ? desc->hwdesc.CHAR : desc->hwdesc32.CHAR; |
149 | } | 149 | } |
150 | 150 | ||
151 | static void desc_write_CHAR(const struct txx9dmac_chan *dc, | 151 | static void desc_write_CHAR(const struct txx9dmac_chan *dc, |
152 | struct txx9dmac_desc *desc, dma_addr_t val) | 152 | struct txx9dmac_desc *desc, dma_addr_t val) |
153 | { | 153 | { |
154 | if (is_dmac64(dc)) | 154 | if (is_dmac64(dc)) |
155 | desc->hwdesc.CHAR = val; | 155 | desc->hwdesc.CHAR = val; |
156 | else | 156 | else |
157 | desc->hwdesc32.CHAR = val; | 157 | desc->hwdesc32.CHAR = val; |
158 | } | 158 | } |
159 | 159 | ||
160 | #define TXX9_DMA_MAX_COUNT 0x04000000 | 160 | #define TXX9_DMA_MAX_COUNT 0x04000000 |
161 | 161 | ||
162 | #define TXX9_DMA_INITIAL_DESC_COUNT 64 | 162 | #define TXX9_DMA_INITIAL_DESC_COUNT 64 |
163 | 163 | ||
164 | static struct txx9dmac_desc *txx9dmac_first_active(struct txx9dmac_chan *dc) | 164 | static struct txx9dmac_desc *txx9dmac_first_active(struct txx9dmac_chan *dc) |
165 | { | 165 | { |
166 | return list_entry(dc->active_list.next, | 166 | return list_entry(dc->active_list.next, |
167 | struct txx9dmac_desc, desc_node); | 167 | struct txx9dmac_desc, desc_node); |
168 | } | 168 | } |
169 | 169 | ||
170 | static struct txx9dmac_desc *txx9dmac_last_active(struct txx9dmac_chan *dc) | 170 | static struct txx9dmac_desc *txx9dmac_last_active(struct txx9dmac_chan *dc) |
171 | { | 171 | { |
172 | return list_entry(dc->active_list.prev, | 172 | return list_entry(dc->active_list.prev, |
173 | struct txx9dmac_desc, desc_node); | 173 | struct txx9dmac_desc, desc_node); |
174 | } | 174 | } |
175 | 175 | ||
176 | static struct txx9dmac_desc *txx9dmac_first_queued(struct txx9dmac_chan *dc) | 176 | static struct txx9dmac_desc *txx9dmac_first_queued(struct txx9dmac_chan *dc) |
177 | { | 177 | { |
178 | return list_entry(dc->queue.next, struct txx9dmac_desc, desc_node); | 178 | return list_entry(dc->queue.next, struct txx9dmac_desc, desc_node); |
179 | } | 179 | } |
180 | 180 | ||
181 | static struct txx9dmac_desc *txx9dmac_last_child(struct txx9dmac_desc *desc) | 181 | static struct txx9dmac_desc *txx9dmac_last_child(struct txx9dmac_desc *desc) |
182 | { | 182 | { |
183 | if (!list_empty(&desc->tx_list)) | 183 | if (!list_empty(&desc->tx_list)) |
184 | desc = list_entry(desc->tx_list.prev, typeof(*desc), desc_node); | 184 | desc = list_entry(desc->tx_list.prev, typeof(*desc), desc_node); |
185 | return desc; | 185 | return desc; |
186 | } | 186 | } |
187 | 187 | ||
188 | static dma_cookie_t txx9dmac_tx_submit(struct dma_async_tx_descriptor *tx); | 188 | static dma_cookie_t txx9dmac_tx_submit(struct dma_async_tx_descriptor *tx); |
189 | 189 | ||
190 | static struct txx9dmac_desc *txx9dmac_desc_alloc(struct txx9dmac_chan *dc, | 190 | static struct txx9dmac_desc *txx9dmac_desc_alloc(struct txx9dmac_chan *dc, |
191 | gfp_t flags) | 191 | gfp_t flags) |
192 | { | 192 | { |
193 | struct txx9dmac_dev *ddev = dc->ddev; | 193 | struct txx9dmac_dev *ddev = dc->ddev; |
194 | struct txx9dmac_desc *desc; | 194 | struct txx9dmac_desc *desc; |
195 | 195 | ||
196 | desc = kzalloc(sizeof(*desc), flags); | 196 | desc = kzalloc(sizeof(*desc), flags); |
197 | if (!desc) | 197 | if (!desc) |
198 | return NULL; | 198 | return NULL; |
199 | INIT_LIST_HEAD(&desc->tx_list); | 199 | INIT_LIST_HEAD(&desc->tx_list); |
200 | dma_async_tx_descriptor_init(&desc->txd, &dc->chan); | 200 | dma_async_tx_descriptor_init(&desc->txd, &dc->chan); |
201 | desc->txd.tx_submit = txx9dmac_tx_submit; | 201 | desc->txd.tx_submit = txx9dmac_tx_submit; |
202 | /* txd.flags will be overwritten in prep funcs */ | 202 | /* txd.flags will be overwritten in prep funcs */ |
203 | desc->txd.flags = DMA_CTRL_ACK; | 203 | desc->txd.flags = DMA_CTRL_ACK; |
204 | desc->txd.phys = dma_map_single(chan2parent(&dc->chan), &desc->hwdesc, | 204 | desc->txd.phys = dma_map_single(chan2parent(&dc->chan), &desc->hwdesc, |
205 | ddev->descsize, DMA_TO_DEVICE); | 205 | ddev->descsize, DMA_TO_DEVICE); |
206 | return desc; | 206 | return desc; |
207 | } | 207 | } |
208 | 208 | ||
209 | static struct txx9dmac_desc *txx9dmac_desc_get(struct txx9dmac_chan *dc) | 209 | static struct txx9dmac_desc *txx9dmac_desc_get(struct txx9dmac_chan *dc) |
210 | { | 210 | { |
211 | struct txx9dmac_desc *desc, *_desc; | 211 | struct txx9dmac_desc *desc, *_desc; |
212 | struct txx9dmac_desc *ret = NULL; | 212 | struct txx9dmac_desc *ret = NULL; |
213 | unsigned int i = 0; | 213 | unsigned int i = 0; |
214 | 214 | ||
215 | spin_lock_bh(&dc->lock); | 215 | spin_lock_bh(&dc->lock); |
216 | list_for_each_entry_safe(desc, _desc, &dc->free_list, desc_node) { | 216 | list_for_each_entry_safe(desc, _desc, &dc->free_list, desc_node) { |
217 | if (async_tx_test_ack(&desc->txd)) { | 217 | if (async_tx_test_ack(&desc->txd)) { |
218 | list_del(&desc->desc_node); | 218 | list_del(&desc->desc_node); |
219 | ret = desc; | 219 | ret = desc; |
220 | break; | 220 | break; |
221 | } | 221 | } |
222 | dev_dbg(chan2dev(&dc->chan), "desc %p not ACKed\n", desc); | 222 | dev_dbg(chan2dev(&dc->chan), "desc %p not ACKed\n", desc); |
223 | i++; | 223 | i++; |
224 | } | 224 | } |
225 | spin_unlock_bh(&dc->lock); | 225 | spin_unlock_bh(&dc->lock); |
226 | 226 | ||
227 | dev_vdbg(chan2dev(&dc->chan), "scanned %u descriptors on freelist\n", | 227 | dev_vdbg(chan2dev(&dc->chan), "scanned %u descriptors on freelist\n", |
228 | i); | 228 | i); |
229 | if (!ret) { | 229 | if (!ret) { |
230 | ret = txx9dmac_desc_alloc(dc, GFP_ATOMIC); | 230 | ret = txx9dmac_desc_alloc(dc, GFP_ATOMIC); |
231 | if (ret) { | 231 | if (ret) { |
232 | spin_lock_bh(&dc->lock); | 232 | spin_lock_bh(&dc->lock); |
233 | dc->descs_allocated++; | 233 | dc->descs_allocated++; |
234 | spin_unlock_bh(&dc->lock); | 234 | spin_unlock_bh(&dc->lock); |
235 | } else | 235 | } else |
236 | dev_err(chan2dev(&dc->chan), | 236 | dev_err(chan2dev(&dc->chan), |
237 | "not enough descriptors available\n"); | 237 | "not enough descriptors available\n"); |
238 | } | 238 | } |
239 | return ret; | 239 | return ret; |
240 | } | 240 | } |
241 | 241 | ||
242 | static void txx9dmac_sync_desc_for_cpu(struct txx9dmac_chan *dc, | 242 | static void txx9dmac_sync_desc_for_cpu(struct txx9dmac_chan *dc, |
243 | struct txx9dmac_desc *desc) | 243 | struct txx9dmac_desc *desc) |
244 | { | 244 | { |
245 | struct txx9dmac_dev *ddev = dc->ddev; | 245 | struct txx9dmac_dev *ddev = dc->ddev; |
246 | struct txx9dmac_desc *child; | 246 | struct txx9dmac_desc *child; |
247 | 247 | ||
248 | list_for_each_entry(child, &desc->tx_list, desc_node) | 248 | list_for_each_entry(child, &desc->tx_list, desc_node) |
249 | dma_sync_single_for_cpu(chan2parent(&dc->chan), | 249 | dma_sync_single_for_cpu(chan2parent(&dc->chan), |
250 | child->txd.phys, ddev->descsize, | 250 | child->txd.phys, ddev->descsize, |
251 | DMA_TO_DEVICE); | 251 | DMA_TO_DEVICE); |
252 | dma_sync_single_for_cpu(chan2parent(&dc->chan), | 252 | dma_sync_single_for_cpu(chan2parent(&dc->chan), |
253 | desc->txd.phys, ddev->descsize, | 253 | desc->txd.phys, ddev->descsize, |
254 | DMA_TO_DEVICE); | 254 | DMA_TO_DEVICE); |
255 | } | 255 | } |
256 | 256 | ||
257 | /* | 257 | /* |
258 | * Move a descriptor, including any children, to the free list. | 258 | * Move a descriptor, including any children, to the free list. |
259 | * `desc' must not be on any lists. | 259 | * `desc' must not be on any lists. |
260 | */ | 260 | */ |
261 | static void txx9dmac_desc_put(struct txx9dmac_chan *dc, | 261 | static void txx9dmac_desc_put(struct txx9dmac_chan *dc, |
262 | struct txx9dmac_desc *desc) | 262 | struct txx9dmac_desc *desc) |
263 | { | 263 | { |
264 | if (desc) { | 264 | if (desc) { |
265 | struct txx9dmac_desc *child; | 265 | struct txx9dmac_desc *child; |
266 | 266 | ||
267 | txx9dmac_sync_desc_for_cpu(dc, desc); | 267 | txx9dmac_sync_desc_for_cpu(dc, desc); |
268 | 268 | ||
269 | spin_lock_bh(&dc->lock); | 269 | spin_lock_bh(&dc->lock); |
270 | list_for_each_entry(child, &desc->tx_list, desc_node) | 270 | list_for_each_entry(child, &desc->tx_list, desc_node) |
271 | dev_vdbg(chan2dev(&dc->chan), | 271 | dev_vdbg(chan2dev(&dc->chan), |
272 | "moving child desc %p to freelist\n", | 272 | "moving child desc %p to freelist\n", |
273 | child); | 273 | child); |
274 | list_splice_init(&desc->tx_list, &dc->free_list); | 274 | list_splice_init(&desc->tx_list, &dc->free_list); |
275 | dev_vdbg(chan2dev(&dc->chan), "moving desc %p to freelist\n", | 275 | dev_vdbg(chan2dev(&dc->chan), "moving desc %p to freelist\n", |
276 | desc); | 276 | desc); |
277 | list_add(&desc->desc_node, &dc->free_list); | 277 | list_add(&desc->desc_node, &dc->free_list); |
278 | spin_unlock_bh(&dc->lock); | 278 | spin_unlock_bh(&dc->lock); |
279 | } | 279 | } |
280 | } | 280 | } |
281 | 281 | ||
282 | /* Called with dc->lock held and bh disabled */ | 282 | /* Called with dc->lock held and bh disabled */ |
283 | static dma_cookie_t | 283 | static dma_cookie_t |
284 | txx9dmac_assign_cookie(struct txx9dmac_chan *dc, struct txx9dmac_desc *desc) | 284 | txx9dmac_assign_cookie(struct txx9dmac_chan *dc, struct txx9dmac_desc *desc) |
285 | { | 285 | { |
286 | dma_cookie_t cookie = dc->chan.cookie; | 286 | dma_cookie_t cookie = dc->chan.cookie; |
287 | 287 | ||
288 | if (++cookie < 0) | 288 | if (++cookie < 0) |
289 | cookie = 1; | 289 | cookie = 1; |
290 | 290 | ||
291 | dc->chan.cookie = cookie; | 291 | dc->chan.cookie = cookie; |
292 | desc->txd.cookie = cookie; | 292 | desc->txd.cookie = cookie; |
293 | 293 | ||
294 | return cookie; | 294 | return cookie; |
295 | } | 295 | } |
296 | 296 | ||
297 | /*----------------------------------------------------------------------*/ | 297 | /*----------------------------------------------------------------------*/ |
298 | 298 | ||
299 | static void txx9dmac_dump_regs(struct txx9dmac_chan *dc) | 299 | static void txx9dmac_dump_regs(struct txx9dmac_chan *dc) |
300 | { | 300 | { |
301 | if (is_dmac64(dc)) | 301 | if (is_dmac64(dc)) |
302 | dev_err(chan2dev(&dc->chan), | 302 | dev_err(chan2dev(&dc->chan), |
303 | " CHAR: %#llx SAR: %#llx DAR: %#llx CNTR: %#x" | 303 | " CHAR: %#llx SAR: %#llx DAR: %#llx CNTR: %#x" |
304 | " SAIR: %#x DAIR: %#x CCR: %#x CSR: %#x\n", | 304 | " SAIR: %#x DAIR: %#x CCR: %#x CSR: %#x\n", |
305 | (u64)channel64_read_CHAR(dc), | 305 | (u64)channel64_read_CHAR(dc), |
306 | channel64_readq(dc, SAR), | 306 | channel64_readq(dc, SAR), |
307 | channel64_readq(dc, DAR), | 307 | channel64_readq(dc, DAR), |
308 | channel64_readl(dc, CNTR), | 308 | channel64_readl(dc, CNTR), |
309 | channel64_readl(dc, SAIR), | 309 | channel64_readl(dc, SAIR), |
310 | channel64_readl(dc, DAIR), | 310 | channel64_readl(dc, DAIR), |
311 | channel64_readl(dc, CCR), | 311 | channel64_readl(dc, CCR), |
312 | channel64_readl(dc, CSR)); | 312 | channel64_readl(dc, CSR)); |
313 | else | 313 | else |
314 | dev_err(chan2dev(&dc->chan), | 314 | dev_err(chan2dev(&dc->chan), |
315 | " CHAR: %#x SAR: %#x DAR: %#x CNTR: %#x" | 315 | " CHAR: %#x SAR: %#x DAR: %#x CNTR: %#x" |
316 | " SAIR: %#x DAIR: %#x CCR: %#x CSR: %#x\n", | 316 | " SAIR: %#x DAIR: %#x CCR: %#x CSR: %#x\n", |
317 | channel32_readl(dc, CHAR), | 317 | channel32_readl(dc, CHAR), |
318 | channel32_readl(dc, SAR), | 318 | channel32_readl(dc, SAR), |
319 | channel32_readl(dc, DAR), | 319 | channel32_readl(dc, DAR), |
320 | channel32_readl(dc, CNTR), | 320 | channel32_readl(dc, CNTR), |
321 | channel32_readl(dc, SAIR), | 321 | channel32_readl(dc, SAIR), |
322 | channel32_readl(dc, DAIR), | 322 | channel32_readl(dc, DAIR), |
323 | channel32_readl(dc, CCR), | 323 | channel32_readl(dc, CCR), |
324 | channel32_readl(dc, CSR)); | 324 | channel32_readl(dc, CSR)); |
325 | } | 325 | } |
326 | 326 | ||
327 | static void txx9dmac_reset_chan(struct txx9dmac_chan *dc) | 327 | static void txx9dmac_reset_chan(struct txx9dmac_chan *dc) |
328 | { | 328 | { |
329 | channel_writel(dc, CCR, TXX9_DMA_CCR_CHRST); | 329 | channel_writel(dc, CCR, TXX9_DMA_CCR_CHRST); |
330 | if (is_dmac64(dc)) { | 330 | if (is_dmac64(dc)) { |
331 | channel64_clear_CHAR(dc); | 331 | channel64_clear_CHAR(dc); |
332 | channel_writeq(dc, SAR, 0); | 332 | channel_writeq(dc, SAR, 0); |
333 | channel_writeq(dc, DAR, 0); | 333 | channel_writeq(dc, DAR, 0); |
334 | } else { | 334 | } else { |
335 | channel_writel(dc, CHAR, 0); | 335 | channel_writel(dc, CHAR, 0); |
336 | channel_writel(dc, SAR, 0); | 336 | channel_writel(dc, SAR, 0); |
337 | channel_writel(dc, DAR, 0); | 337 | channel_writel(dc, DAR, 0); |
338 | } | 338 | } |
339 | channel_writel(dc, CNTR, 0); | 339 | channel_writel(dc, CNTR, 0); |
340 | channel_writel(dc, SAIR, 0); | 340 | channel_writel(dc, SAIR, 0); |
341 | channel_writel(dc, DAIR, 0); | 341 | channel_writel(dc, DAIR, 0); |
342 | channel_writel(dc, CCR, 0); | 342 | channel_writel(dc, CCR, 0); |
343 | mmiowb(); | 343 | mmiowb(); |
344 | } | 344 | } |
345 | 345 | ||
346 | /* Called with dc->lock held and bh disabled */ | 346 | /* Called with dc->lock held and bh disabled */ |
347 | static void txx9dmac_dostart(struct txx9dmac_chan *dc, | 347 | static void txx9dmac_dostart(struct txx9dmac_chan *dc, |
348 | struct txx9dmac_desc *first) | 348 | struct txx9dmac_desc *first) |
349 | { | 349 | { |
350 | struct txx9dmac_slave *ds = dc->chan.private; | 350 | struct txx9dmac_slave *ds = dc->chan.private; |
351 | u32 sai, dai; | 351 | u32 sai, dai; |
352 | 352 | ||
353 | dev_vdbg(chan2dev(&dc->chan), "dostart %u %p\n", | 353 | dev_vdbg(chan2dev(&dc->chan), "dostart %u %p\n", |
354 | first->txd.cookie, first); | 354 | first->txd.cookie, first); |
355 | /* ASSERT: channel is idle */ | 355 | /* ASSERT: channel is idle */ |
356 | if (channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT) { | 356 | if (channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT) { |
357 | dev_err(chan2dev(&dc->chan), | 357 | dev_err(chan2dev(&dc->chan), |
358 | "BUG: Attempted to start non-idle channel\n"); | 358 | "BUG: Attempted to start non-idle channel\n"); |
359 | txx9dmac_dump_regs(dc); | 359 | txx9dmac_dump_regs(dc); |
360 | /* The tasklet will hopefully advance the queue... */ | 360 | /* The tasklet will hopefully advance the queue... */ |
361 | return; | 361 | return; |
362 | } | 362 | } |
363 | 363 | ||
364 | if (is_dmac64(dc)) { | 364 | if (is_dmac64(dc)) { |
365 | channel64_writel(dc, CNTR, 0); | 365 | channel64_writel(dc, CNTR, 0); |
366 | channel64_writel(dc, CSR, 0xffffffff); | 366 | channel64_writel(dc, CSR, 0xffffffff); |
367 | if (ds) { | 367 | if (ds) { |
368 | if (ds->tx_reg) { | 368 | if (ds->tx_reg) { |
369 | sai = ds->reg_width; | 369 | sai = ds->reg_width; |
370 | dai = 0; | 370 | dai = 0; |
371 | } else { | 371 | } else { |
372 | sai = 0; | 372 | sai = 0; |
373 | dai = ds->reg_width; | 373 | dai = ds->reg_width; |
374 | } | 374 | } |
375 | } else { | 375 | } else { |
376 | sai = 8; | 376 | sai = 8; |
377 | dai = 8; | 377 | dai = 8; |
378 | } | 378 | } |
379 | channel64_writel(dc, SAIR, sai); | 379 | channel64_writel(dc, SAIR, sai); |
380 | channel64_writel(dc, DAIR, dai); | 380 | channel64_writel(dc, DAIR, dai); |
381 | /* All 64-bit DMAC supports SMPCHN */ | 381 | /* All 64-bit DMAC supports SMPCHN */ |
382 | channel64_writel(dc, CCR, dc->ccr); | 382 | channel64_writel(dc, CCR, dc->ccr); |
383 | /* Writing a non zero value to CHAR will assert XFACT */ | 383 | /* Writing a non zero value to CHAR will assert XFACT */ |
384 | channel64_write_CHAR(dc, first->txd.phys); | 384 | channel64_write_CHAR(dc, first->txd.phys); |
385 | } else { | 385 | } else { |
386 | channel32_writel(dc, CNTR, 0); | 386 | channel32_writel(dc, CNTR, 0); |
387 | channel32_writel(dc, CSR, 0xffffffff); | 387 | channel32_writel(dc, CSR, 0xffffffff); |
388 | if (ds) { | 388 | if (ds) { |
389 | if (ds->tx_reg) { | 389 | if (ds->tx_reg) { |
390 | sai = ds->reg_width; | 390 | sai = ds->reg_width; |
391 | dai = 0; | 391 | dai = 0; |
392 | } else { | 392 | } else { |
393 | sai = 0; | 393 | sai = 0; |
394 | dai = ds->reg_width; | 394 | dai = ds->reg_width; |
395 | } | 395 | } |
396 | } else { | 396 | } else { |
397 | sai = 4; | 397 | sai = 4; |
398 | dai = 4; | 398 | dai = 4; |
399 | } | 399 | } |
400 | channel32_writel(dc, SAIR, sai); | 400 | channel32_writel(dc, SAIR, sai); |
401 | channel32_writel(dc, DAIR, dai); | 401 | channel32_writel(dc, DAIR, dai); |
402 | if (txx9_dma_have_SMPCHN()) { | 402 | if (txx9_dma_have_SMPCHN()) { |
403 | channel32_writel(dc, CCR, dc->ccr); | 403 | channel32_writel(dc, CCR, dc->ccr); |
404 | /* Writing a non zero value to CHAR will assert XFACT */ | 404 | /* Writing a non zero value to CHAR will assert XFACT */ |
405 | channel32_writel(dc, CHAR, first->txd.phys); | 405 | channel32_writel(dc, CHAR, first->txd.phys); |
406 | } else { | 406 | } else { |
407 | channel32_writel(dc, CHAR, first->txd.phys); | 407 | channel32_writel(dc, CHAR, first->txd.phys); |
408 | channel32_writel(dc, CCR, dc->ccr); | 408 | channel32_writel(dc, CCR, dc->ccr); |
409 | } | 409 | } |
410 | } | 410 | } |
411 | } | 411 | } |
412 | 412 | ||
413 | /*----------------------------------------------------------------------*/ | 413 | /*----------------------------------------------------------------------*/ |
414 | 414 | ||
415 | static void | 415 | static void |
416 | txx9dmac_descriptor_complete(struct txx9dmac_chan *dc, | 416 | txx9dmac_descriptor_complete(struct txx9dmac_chan *dc, |
417 | struct txx9dmac_desc *desc) | 417 | struct txx9dmac_desc *desc) |
418 | { | 418 | { |
419 | dma_async_tx_callback callback; | 419 | dma_async_tx_callback callback; |
420 | void *param; | 420 | void *param; |
421 | struct dma_async_tx_descriptor *txd = &desc->txd; | 421 | struct dma_async_tx_descriptor *txd = &desc->txd; |
422 | struct txx9dmac_slave *ds = dc->chan.private; | 422 | struct txx9dmac_slave *ds = dc->chan.private; |
423 | 423 | ||
424 | dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n", | 424 | dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n", |
425 | txd->cookie, desc); | 425 | txd->cookie, desc); |
426 | 426 | ||
427 | dc->completed = txd->cookie; | 427 | dc->completed = txd->cookie; |
428 | callback = txd->callback; | 428 | callback = txd->callback; |
429 | param = txd->callback_param; | 429 | param = txd->callback_param; |
430 | 430 | ||
431 | txx9dmac_sync_desc_for_cpu(dc, desc); | 431 | txx9dmac_sync_desc_for_cpu(dc, desc); |
432 | list_splice_init(&desc->tx_list, &dc->free_list); | 432 | list_splice_init(&desc->tx_list, &dc->free_list); |
433 | list_move(&desc->desc_node, &dc->free_list); | 433 | list_move(&desc->desc_node, &dc->free_list); |
434 | 434 | ||
435 | if (!ds) { | 435 | if (!ds) { |
436 | dma_addr_t dmaaddr; | 436 | dma_addr_t dmaaddr; |
437 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | 437 | if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { |
438 | dmaaddr = is_dmac64(dc) ? | 438 | dmaaddr = is_dmac64(dc) ? |
439 | desc->hwdesc.DAR : desc->hwdesc32.DAR; | 439 | desc->hwdesc.DAR : desc->hwdesc32.DAR; |
440 | if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) | 440 | if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) |
441 | dma_unmap_single(chan2parent(&dc->chan), | 441 | dma_unmap_single(chan2parent(&dc->chan), |
442 | dmaaddr, desc->len, DMA_FROM_DEVICE); | 442 | dmaaddr, desc->len, DMA_FROM_DEVICE); |
443 | else | 443 | else |
444 | dma_unmap_page(chan2parent(&dc->chan), | 444 | dma_unmap_page(chan2parent(&dc->chan), |
445 | dmaaddr, desc->len, DMA_FROM_DEVICE); | 445 | dmaaddr, desc->len, DMA_FROM_DEVICE); |
446 | } | 446 | } |
447 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | 447 | if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { |
448 | dmaaddr = is_dmac64(dc) ? | 448 | dmaaddr = is_dmac64(dc) ? |
449 | desc->hwdesc.SAR : desc->hwdesc32.SAR; | 449 | desc->hwdesc.SAR : desc->hwdesc32.SAR; |
450 | if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) | 450 | if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) |
451 | dma_unmap_single(chan2parent(&dc->chan), | 451 | dma_unmap_single(chan2parent(&dc->chan), |
452 | dmaaddr, desc->len, DMA_TO_DEVICE); | 452 | dmaaddr, desc->len, DMA_TO_DEVICE); |
453 | else | 453 | else |
454 | dma_unmap_page(chan2parent(&dc->chan), | 454 | dma_unmap_page(chan2parent(&dc->chan), |
455 | dmaaddr, desc->len, DMA_TO_DEVICE); | 455 | dmaaddr, desc->len, DMA_TO_DEVICE); |
456 | } | 456 | } |
457 | } | 457 | } |
458 | 458 | ||
459 | /* | 459 | /* |
460 | * The API requires that no submissions are done from a | 460 | * The API requires that no submissions are done from a |
461 | * callback, so we don't need to drop the lock here | 461 | * callback, so we don't need to drop the lock here |
462 | */ | 462 | */ |
463 | if (callback) | 463 | if (callback) |
464 | callback(param); | 464 | callback(param); |
465 | dma_run_dependencies(txd); | 465 | dma_run_dependencies(txd); |
466 | } | 466 | } |
467 | 467 | ||
468 | static void txx9dmac_dequeue(struct txx9dmac_chan *dc, struct list_head *list) | 468 | static void txx9dmac_dequeue(struct txx9dmac_chan *dc, struct list_head *list) |
469 | { | 469 | { |
470 | struct txx9dmac_dev *ddev = dc->ddev; | 470 | struct txx9dmac_dev *ddev = dc->ddev; |
471 | struct txx9dmac_desc *desc; | 471 | struct txx9dmac_desc *desc; |
472 | struct txx9dmac_desc *prev = NULL; | 472 | struct txx9dmac_desc *prev = NULL; |
473 | 473 | ||
474 | BUG_ON(!list_empty(list)); | 474 | BUG_ON(!list_empty(list)); |
475 | do { | 475 | do { |
476 | desc = txx9dmac_first_queued(dc); | 476 | desc = txx9dmac_first_queued(dc); |
477 | if (prev) { | 477 | if (prev) { |
478 | desc_write_CHAR(dc, prev, desc->txd.phys); | 478 | desc_write_CHAR(dc, prev, desc->txd.phys); |
479 | dma_sync_single_for_device(chan2parent(&dc->chan), | 479 | dma_sync_single_for_device(chan2parent(&dc->chan), |
480 | prev->txd.phys, ddev->descsize, | 480 | prev->txd.phys, ddev->descsize, |
481 | DMA_TO_DEVICE); | 481 | DMA_TO_DEVICE); |
482 | } | 482 | } |
483 | prev = txx9dmac_last_child(desc); | 483 | prev = txx9dmac_last_child(desc); |
484 | list_move_tail(&desc->desc_node, list); | 484 | list_move_tail(&desc->desc_node, list); |
485 | /* Make chain-completion interrupt happen */ | 485 | /* Make chain-completion interrupt happen */ |
486 | if ((desc->txd.flags & DMA_PREP_INTERRUPT) && | 486 | if ((desc->txd.flags & DMA_PREP_INTERRUPT) && |
487 | !txx9dmac_chan_INTENT(dc)) | 487 | !txx9dmac_chan_INTENT(dc)) |
488 | break; | 488 | break; |
489 | } while (!list_empty(&dc->queue)); | 489 | } while (!list_empty(&dc->queue)); |
490 | } | 490 | } |
491 | 491 | ||
492 | static void txx9dmac_complete_all(struct txx9dmac_chan *dc) | 492 | static void txx9dmac_complete_all(struct txx9dmac_chan *dc) |
493 | { | 493 | { |
494 | struct txx9dmac_desc *desc, *_desc; | 494 | struct txx9dmac_desc *desc, *_desc; |
495 | LIST_HEAD(list); | 495 | LIST_HEAD(list); |
496 | 496 | ||
497 | /* | 497 | /* |
498 | * Submit queued descriptors ASAP, i.e. before we go through | 498 | * Submit queued descriptors ASAP, i.e. before we go through |
499 | * the completed ones. | 499 | * the completed ones. |
500 | */ | 500 | */ |
501 | list_splice_init(&dc->active_list, &list); | 501 | list_splice_init(&dc->active_list, &list); |
502 | if (!list_empty(&dc->queue)) { | 502 | if (!list_empty(&dc->queue)) { |
503 | txx9dmac_dequeue(dc, &dc->active_list); | 503 | txx9dmac_dequeue(dc, &dc->active_list); |
504 | txx9dmac_dostart(dc, txx9dmac_first_active(dc)); | 504 | txx9dmac_dostart(dc, txx9dmac_first_active(dc)); |
505 | } | 505 | } |
506 | 506 | ||
507 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | 507 | list_for_each_entry_safe(desc, _desc, &list, desc_node) |
508 | txx9dmac_descriptor_complete(dc, desc); | 508 | txx9dmac_descriptor_complete(dc, desc); |
509 | } | 509 | } |
510 | 510 | ||
511 | static void txx9dmac_dump_desc(struct txx9dmac_chan *dc, | 511 | static void txx9dmac_dump_desc(struct txx9dmac_chan *dc, |
512 | struct txx9dmac_hwdesc *desc) | 512 | struct txx9dmac_hwdesc *desc) |
513 | { | 513 | { |
514 | if (is_dmac64(dc)) { | 514 | if (is_dmac64(dc)) { |
515 | #ifdef TXX9_DMA_USE_SIMPLE_CHAIN | 515 | #ifdef TXX9_DMA_USE_SIMPLE_CHAIN |
516 | dev_crit(chan2dev(&dc->chan), | 516 | dev_crit(chan2dev(&dc->chan), |
517 | " desc: ch%#llx s%#llx d%#llx c%#x\n", | 517 | " desc: ch%#llx s%#llx d%#llx c%#x\n", |
518 | (u64)desc->CHAR, desc->SAR, desc->DAR, desc->CNTR); | 518 | (u64)desc->CHAR, desc->SAR, desc->DAR, desc->CNTR); |
519 | #else | 519 | #else |
520 | dev_crit(chan2dev(&dc->chan), | 520 | dev_crit(chan2dev(&dc->chan), |
521 | " desc: ch%#llx s%#llx d%#llx c%#x" | 521 | " desc: ch%#llx s%#llx d%#llx c%#x" |
522 | " si%#x di%#x cc%#x cs%#x\n", | 522 | " si%#x di%#x cc%#x cs%#x\n", |
523 | (u64)desc->CHAR, desc->SAR, desc->DAR, desc->CNTR, | 523 | (u64)desc->CHAR, desc->SAR, desc->DAR, desc->CNTR, |
524 | desc->SAIR, desc->DAIR, desc->CCR, desc->CSR); | 524 | desc->SAIR, desc->DAIR, desc->CCR, desc->CSR); |
525 | #endif | 525 | #endif |
526 | } else { | 526 | } else { |
527 | struct txx9dmac_hwdesc32 *d = (struct txx9dmac_hwdesc32 *)desc; | 527 | struct txx9dmac_hwdesc32 *d = (struct txx9dmac_hwdesc32 *)desc; |
528 | #ifdef TXX9_DMA_USE_SIMPLE_CHAIN | 528 | #ifdef TXX9_DMA_USE_SIMPLE_CHAIN |
529 | dev_crit(chan2dev(&dc->chan), | 529 | dev_crit(chan2dev(&dc->chan), |
530 | " desc: ch%#x s%#x d%#x c%#x\n", | 530 | " desc: ch%#x s%#x d%#x c%#x\n", |
531 | d->CHAR, d->SAR, d->DAR, d->CNTR); | 531 | d->CHAR, d->SAR, d->DAR, d->CNTR); |
532 | #else | 532 | #else |
533 | dev_crit(chan2dev(&dc->chan), | 533 | dev_crit(chan2dev(&dc->chan), |
534 | " desc: ch%#x s%#x d%#x c%#x" | 534 | " desc: ch%#x s%#x d%#x c%#x" |
535 | " si%#x di%#x cc%#x cs%#x\n", | 535 | " si%#x di%#x cc%#x cs%#x\n", |
536 | d->CHAR, d->SAR, d->DAR, d->CNTR, | 536 | d->CHAR, d->SAR, d->DAR, d->CNTR, |
537 | d->SAIR, d->DAIR, d->CCR, d->CSR); | 537 | d->SAIR, d->DAIR, d->CCR, d->CSR); |
538 | #endif | 538 | #endif |
539 | } | 539 | } |
540 | } | 540 | } |
541 | 541 | ||
542 | static void txx9dmac_handle_error(struct txx9dmac_chan *dc, u32 csr) | 542 | static void txx9dmac_handle_error(struct txx9dmac_chan *dc, u32 csr) |
543 | { | 543 | { |
544 | struct txx9dmac_desc *bad_desc; | 544 | struct txx9dmac_desc *bad_desc; |
545 | struct txx9dmac_desc *child; | 545 | struct txx9dmac_desc *child; |
546 | u32 errors; | 546 | u32 errors; |
547 | 547 | ||
548 | /* | 548 | /* |
549 | * The descriptor currently at the head of the active list is | 549 | * The descriptor currently at the head of the active list is |
550 | * borked. Since we don't have any way to report errors, we'll | 550 | * borked. Since we don't have any way to report errors, we'll |
551 | * just have to scream loudly and try to carry on. | 551 | * just have to scream loudly and try to carry on. |
552 | */ | 552 | */ |
553 | dev_crit(chan2dev(&dc->chan), "Abnormal Chain Completion\n"); | 553 | dev_crit(chan2dev(&dc->chan), "Abnormal Chain Completion\n"); |
554 | txx9dmac_dump_regs(dc); | 554 | txx9dmac_dump_regs(dc); |
555 | 555 | ||
556 | bad_desc = txx9dmac_first_active(dc); | 556 | bad_desc = txx9dmac_first_active(dc); |
557 | list_del_init(&bad_desc->desc_node); | 557 | list_del_init(&bad_desc->desc_node); |
558 | 558 | ||
559 | /* Clear all error flags and try to restart the controller */ | 559 | /* Clear all error flags and try to restart the controller */ |
560 | errors = csr & (TXX9_DMA_CSR_ABCHC | | 560 | errors = csr & (TXX9_DMA_CSR_ABCHC | |
561 | TXX9_DMA_CSR_CFERR | TXX9_DMA_CSR_CHERR | | 561 | TXX9_DMA_CSR_CFERR | TXX9_DMA_CSR_CHERR | |
562 | TXX9_DMA_CSR_DESERR | TXX9_DMA_CSR_SORERR); | 562 | TXX9_DMA_CSR_DESERR | TXX9_DMA_CSR_SORERR); |
563 | channel_writel(dc, CSR, errors); | 563 | channel_writel(dc, CSR, errors); |
564 | 564 | ||
565 | if (list_empty(&dc->active_list) && !list_empty(&dc->queue)) | 565 | if (list_empty(&dc->active_list) && !list_empty(&dc->queue)) |
566 | txx9dmac_dequeue(dc, &dc->active_list); | 566 | txx9dmac_dequeue(dc, &dc->active_list); |
567 | if (!list_empty(&dc->active_list)) | 567 | if (!list_empty(&dc->active_list)) |
568 | txx9dmac_dostart(dc, txx9dmac_first_active(dc)); | 568 | txx9dmac_dostart(dc, txx9dmac_first_active(dc)); |
569 | 569 | ||
570 | dev_crit(chan2dev(&dc->chan), | 570 | dev_crit(chan2dev(&dc->chan), |
571 | "Bad descriptor submitted for DMA! (cookie: %d)\n", | 571 | "Bad descriptor submitted for DMA! (cookie: %d)\n", |
572 | bad_desc->txd.cookie); | 572 | bad_desc->txd.cookie); |
573 | txx9dmac_dump_desc(dc, &bad_desc->hwdesc); | 573 | txx9dmac_dump_desc(dc, &bad_desc->hwdesc); |
574 | list_for_each_entry(child, &bad_desc->tx_list, desc_node) | 574 | list_for_each_entry(child, &bad_desc->tx_list, desc_node) |
575 | txx9dmac_dump_desc(dc, &child->hwdesc); | 575 | txx9dmac_dump_desc(dc, &child->hwdesc); |
576 | /* Pretend the descriptor completed successfully */ | 576 | /* Pretend the descriptor completed successfully */ |
577 | txx9dmac_descriptor_complete(dc, bad_desc); | 577 | txx9dmac_descriptor_complete(dc, bad_desc); |
578 | } | 578 | } |
579 | 579 | ||
580 | static void txx9dmac_scan_descriptors(struct txx9dmac_chan *dc) | 580 | static void txx9dmac_scan_descriptors(struct txx9dmac_chan *dc) |
581 | { | 581 | { |
582 | dma_addr_t chain; | 582 | dma_addr_t chain; |
583 | struct txx9dmac_desc *desc, *_desc; | 583 | struct txx9dmac_desc *desc, *_desc; |
584 | struct txx9dmac_desc *child; | 584 | struct txx9dmac_desc *child; |
585 | u32 csr; | 585 | u32 csr; |
586 | 586 | ||
587 | if (is_dmac64(dc)) { | 587 | if (is_dmac64(dc)) { |
588 | chain = channel64_read_CHAR(dc); | 588 | chain = channel64_read_CHAR(dc); |
589 | csr = channel64_readl(dc, CSR); | 589 | csr = channel64_readl(dc, CSR); |
590 | channel64_writel(dc, CSR, csr); | 590 | channel64_writel(dc, CSR, csr); |
591 | } else { | 591 | } else { |
592 | chain = channel32_readl(dc, CHAR); | 592 | chain = channel32_readl(dc, CHAR); |
593 | csr = channel32_readl(dc, CSR); | 593 | csr = channel32_readl(dc, CSR); |
594 | channel32_writel(dc, CSR, csr); | 594 | channel32_writel(dc, CSR, csr); |
595 | } | 595 | } |
596 | /* For dynamic chain, we should look at XFACT instead of NCHNC */ | 596 | /* For dynamic chain, we should look at XFACT instead of NCHNC */ |
597 | if (!(csr & (TXX9_DMA_CSR_XFACT | TXX9_DMA_CSR_ABCHC))) { | 597 | if (!(csr & (TXX9_DMA_CSR_XFACT | TXX9_DMA_CSR_ABCHC))) { |
598 | /* Everything we've submitted is done */ | 598 | /* Everything we've submitted is done */ |
599 | txx9dmac_complete_all(dc); | 599 | txx9dmac_complete_all(dc); |
600 | return; | 600 | return; |
601 | } | 601 | } |
602 | if (!(csr & TXX9_DMA_CSR_CHNEN)) | 602 | if (!(csr & TXX9_DMA_CSR_CHNEN)) |
603 | chain = 0; /* last descriptor of this chain */ | 603 | chain = 0; /* last descriptor of this chain */ |
604 | 604 | ||
605 | dev_vdbg(chan2dev(&dc->chan), "scan_descriptors: char=%#llx\n", | 605 | dev_vdbg(chan2dev(&dc->chan), "scan_descriptors: char=%#llx\n", |
606 | (u64)chain); | 606 | (u64)chain); |
607 | 607 | ||
608 | list_for_each_entry_safe(desc, _desc, &dc->active_list, desc_node) { | 608 | list_for_each_entry_safe(desc, _desc, &dc->active_list, desc_node) { |
609 | if (desc_read_CHAR(dc, desc) == chain) { | 609 | if (desc_read_CHAR(dc, desc) == chain) { |
610 | /* This one is currently in progress */ | 610 | /* This one is currently in progress */ |
611 | if (csr & TXX9_DMA_CSR_ABCHC) | 611 | if (csr & TXX9_DMA_CSR_ABCHC) |
612 | goto scan_done; | 612 | goto scan_done; |
613 | return; | 613 | return; |
614 | } | 614 | } |
615 | 615 | ||
616 | list_for_each_entry(child, &desc->tx_list, desc_node) | 616 | list_for_each_entry(child, &desc->tx_list, desc_node) |
617 | if (desc_read_CHAR(dc, child) == chain) { | 617 | if (desc_read_CHAR(dc, child) == chain) { |
618 | /* Currently in progress */ | 618 | /* Currently in progress */ |
619 | if (csr & TXX9_DMA_CSR_ABCHC) | 619 | if (csr & TXX9_DMA_CSR_ABCHC) |
620 | goto scan_done; | 620 | goto scan_done; |
621 | return; | 621 | return; |
622 | } | 622 | } |
623 | 623 | ||
624 | /* | 624 | /* |
625 | * No descriptors so far seem to be in progress, i.e. | 625 | * No descriptors so far seem to be in progress, i.e. |
626 | * this one must be done. | 626 | * this one must be done. |
627 | */ | 627 | */ |
628 | txx9dmac_descriptor_complete(dc, desc); | 628 | txx9dmac_descriptor_complete(dc, desc); |
629 | } | 629 | } |
630 | scan_done: | 630 | scan_done: |
631 | if (csr & TXX9_DMA_CSR_ABCHC) { | 631 | if (csr & TXX9_DMA_CSR_ABCHC) { |
632 | txx9dmac_handle_error(dc, csr); | 632 | txx9dmac_handle_error(dc, csr); |
633 | return; | 633 | return; |
634 | } | 634 | } |
635 | 635 | ||
636 | dev_err(chan2dev(&dc->chan), | 636 | dev_err(chan2dev(&dc->chan), |
637 | "BUG: All descriptors done, but channel not idle!\n"); | 637 | "BUG: All descriptors done, but channel not idle!\n"); |
638 | 638 | ||
639 | /* Try to continue after resetting the channel... */ | 639 | /* Try to continue after resetting the channel... */ |
640 | txx9dmac_reset_chan(dc); | 640 | txx9dmac_reset_chan(dc); |
641 | 641 | ||
642 | if (!list_empty(&dc->queue)) { | 642 | if (!list_empty(&dc->queue)) { |
643 | txx9dmac_dequeue(dc, &dc->active_list); | 643 | txx9dmac_dequeue(dc, &dc->active_list); |
644 | txx9dmac_dostart(dc, txx9dmac_first_active(dc)); | 644 | txx9dmac_dostart(dc, txx9dmac_first_active(dc)); |
645 | } | 645 | } |
646 | } | 646 | } |
647 | 647 | ||
648 | static void txx9dmac_chan_tasklet(unsigned long data) | 648 | static void txx9dmac_chan_tasklet(unsigned long data) |
649 | { | 649 | { |
650 | int irq; | 650 | int irq; |
651 | u32 csr; | 651 | u32 csr; |
652 | struct txx9dmac_chan *dc; | 652 | struct txx9dmac_chan *dc; |
653 | 653 | ||
654 | dc = (struct txx9dmac_chan *)data; | 654 | dc = (struct txx9dmac_chan *)data; |
655 | csr = channel_readl(dc, CSR); | 655 | csr = channel_readl(dc, CSR); |
656 | dev_vdbg(chan2dev(&dc->chan), "tasklet: status=%x\n", csr); | 656 | dev_vdbg(chan2dev(&dc->chan), "tasklet: status=%x\n", csr); |
657 | 657 | ||
658 | spin_lock(&dc->lock); | 658 | spin_lock(&dc->lock); |
659 | if (csr & (TXX9_DMA_CSR_ABCHC | TXX9_DMA_CSR_NCHNC | | 659 | if (csr & (TXX9_DMA_CSR_ABCHC | TXX9_DMA_CSR_NCHNC | |
660 | TXX9_DMA_CSR_NTRNFC)) | 660 | TXX9_DMA_CSR_NTRNFC)) |
661 | txx9dmac_scan_descriptors(dc); | 661 | txx9dmac_scan_descriptors(dc); |
662 | spin_unlock(&dc->lock); | 662 | spin_unlock(&dc->lock); |
663 | irq = dc->irq; | 663 | irq = dc->irq; |
664 | 664 | ||
665 | enable_irq(irq); | 665 | enable_irq(irq); |
666 | } | 666 | } |
667 | 667 | ||
668 | static irqreturn_t txx9dmac_chan_interrupt(int irq, void *dev_id) | 668 | static irqreturn_t txx9dmac_chan_interrupt(int irq, void *dev_id) |
669 | { | 669 | { |
670 | struct txx9dmac_chan *dc = dev_id; | 670 | struct txx9dmac_chan *dc = dev_id; |
671 | 671 | ||
672 | dev_vdbg(chan2dev(&dc->chan), "interrupt: status=%#x\n", | 672 | dev_vdbg(chan2dev(&dc->chan), "interrupt: status=%#x\n", |
673 | channel_readl(dc, CSR)); | 673 | channel_readl(dc, CSR)); |
674 | 674 | ||
675 | tasklet_schedule(&dc->tasklet); | 675 | tasklet_schedule(&dc->tasklet); |
676 | /* | 676 | /* |
677 | * Just disable the interrupts. We'll turn them back on in the | 677 | * Just disable the interrupts. We'll turn them back on in the |
678 | * softirq handler. | 678 | * softirq handler. |
679 | */ | 679 | */ |
680 | disable_irq_nosync(irq); | 680 | disable_irq_nosync(irq); |
681 | 681 | ||
682 | return IRQ_HANDLED; | 682 | return IRQ_HANDLED; |
683 | } | 683 | } |
684 | 684 | ||
685 | static void txx9dmac_tasklet(unsigned long data) | 685 | static void txx9dmac_tasklet(unsigned long data) |
686 | { | 686 | { |
687 | int irq; | 687 | int irq; |
688 | u32 csr; | 688 | u32 csr; |
689 | struct txx9dmac_chan *dc; | 689 | struct txx9dmac_chan *dc; |
690 | 690 | ||
691 | struct txx9dmac_dev *ddev = (struct txx9dmac_dev *)data; | 691 | struct txx9dmac_dev *ddev = (struct txx9dmac_dev *)data; |
692 | u32 mcr; | 692 | u32 mcr; |
693 | int i; | 693 | int i; |
694 | 694 | ||
695 | mcr = dma_readl(ddev, MCR); | 695 | mcr = dma_readl(ddev, MCR); |
696 | dev_vdbg(ddev->chan[0]->dma.dev, "tasklet: mcr=%x\n", mcr); | 696 | dev_vdbg(ddev->chan[0]->dma.dev, "tasklet: mcr=%x\n", mcr); |
697 | for (i = 0; i < TXX9_DMA_MAX_NR_CHANNELS; i++) { | 697 | for (i = 0; i < TXX9_DMA_MAX_NR_CHANNELS; i++) { |
698 | if ((mcr >> (24 + i)) & 0x11) { | 698 | if ((mcr >> (24 + i)) & 0x11) { |
699 | dc = ddev->chan[i]; | 699 | dc = ddev->chan[i]; |
700 | csr = channel_readl(dc, CSR); | 700 | csr = channel_readl(dc, CSR); |
701 | dev_vdbg(chan2dev(&dc->chan), "tasklet: status=%x\n", | 701 | dev_vdbg(chan2dev(&dc->chan), "tasklet: status=%x\n", |
702 | csr); | 702 | csr); |
703 | spin_lock(&dc->lock); | 703 | spin_lock(&dc->lock); |
704 | if (csr & (TXX9_DMA_CSR_ABCHC | TXX9_DMA_CSR_NCHNC | | 704 | if (csr & (TXX9_DMA_CSR_ABCHC | TXX9_DMA_CSR_NCHNC | |
705 | TXX9_DMA_CSR_NTRNFC)) | 705 | TXX9_DMA_CSR_NTRNFC)) |
706 | txx9dmac_scan_descriptors(dc); | 706 | txx9dmac_scan_descriptors(dc); |
707 | spin_unlock(&dc->lock); | 707 | spin_unlock(&dc->lock); |
708 | } | 708 | } |
709 | } | 709 | } |
710 | irq = ddev->irq; | 710 | irq = ddev->irq; |
711 | 711 | ||
712 | enable_irq(irq); | 712 | enable_irq(irq); |
713 | } | 713 | } |
714 | 714 | ||
715 | static irqreturn_t txx9dmac_interrupt(int irq, void *dev_id) | 715 | static irqreturn_t txx9dmac_interrupt(int irq, void *dev_id) |
716 | { | 716 | { |
717 | struct txx9dmac_dev *ddev = dev_id; | 717 | struct txx9dmac_dev *ddev = dev_id; |
718 | 718 | ||
719 | dev_vdbg(ddev->chan[0]->dma.dev, "interrupt: status=%#x\n", | 719 | dev_vdbg(ddev->chan[0]->dma.dev, "interrupt: status=%#x\n", |
720 | dma_readl(ddev, MCR)); | 720 | dma_readl(ddev, MCR)); |
721 | 721 | ||
722 | tasklet_schedule(&ddev->tasklet); | 722 | tasklet_schedule(&ddev->tasklet); |
723 | /* | 723 | /* |
724 | * Just disable the interrupts. We'll turn them back on in the | 724 | * Just disable the interrupts. We'll turn them back on in the |
725 | * softirq handler. | 725 | * softirq handler. |
726 | */ | 726 | */ |
727 | disable_irq_nosync(irq); | 727 | disable_irq_nosync(irq); |
728 | 728 | ||
729 | return IRQ_HANDLED; | 729 | return IRQ_HANDLED; |
730 | } | 730 | } |
731 | 731 | ||
732 | /*----------------------------------------------------------------------*/ | 732 | /*----------------------------------------------------------------------*/ |
733 | 733 | ||
734 | static dma_cookie_t txx9dmac_tx_submit(struct dma_async_tx_descriptor *tx) | 734 | static dma_cookie_t txx9dmac_tx_submit(struct dma_async_tx_descriptor *tx) |
735 | { | 735 | { |
736 | struct txx9dmac_desc *desc = txd_to_txx9dmac_desc(tx); | 736 | struct txx9dmac_desc *desc = txd_to_txx9dmac_desc(tx); |
737 | struct txx9dmac_chan *dc = to_txx9dmac_chan(tx->chan); | 737 | struct txx9dmac_chan *dc = to_txx9dmac_chan(tx->chan); |
738 | dma_cookie_t cookie; | 738 | dma_cookie_t cookie; |
739 | 739 | ||
740 | spin_lock_bh(&dc->lock); | 740 | spin_lock_bh(&dc->lock); |
741 | cookie = txx9dmac_assign_cookie(dc, desc); | 741 | cookie = txx9dmac_assign_cookie(dc, desc); |
742 | 742 | ||
743 | dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u %p\n", | 743 | dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u %p\n", |
744 | desc->txd.cookie, desc); | 744 | desc->txd.cookie, desc); |
745 | 745 | ||
746 | list_add_tail(&desc->desc_node, &dc->queue); | 746 | list_add_tail(&desc->desc_node, &dc->queue); |
747 | spin_unlock_bh(&dc->lock); | 747 | spin_unlock_bh(&dc->lock); |
748 | 748 | ||
749 | return cookie; | 749 | return cookie; |
750 | } | 750 | } |
751 | 751 | ||
752 | static struct dma_async_tx_descriptor * | 752 | static struct dma_async_tx_descriptor * |
753 | txx9dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | 753 | txx9dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, |
754 | size_t len, unsigned long flags) | 754 | size_t len, unsigned long flags) |
755 | { | 755 | { |
756 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); | 756 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); |
757 | struct txx9dmac_dev *ddev = dc->ddev; | 757 | struct txx9dmac_dev *ddev = dc->ddev; |
758 | struct txx9dmac_desc *desc; | 758 | struct txx9dmac_desc *desc; |
759 | struct txx9dmac_desc *first; | 759 | struct txx9dmac_desc *first; |
760 | struct txx9dmac_desc *prev; | 760 | struct txx9dmac_desc *prev; |
761 | size_t xfer_count; | 761 | size_t xfer_count; |
762 | size_t offset; | 762 | size_t offset; |
763 | 763 | ||
764 | dev_vdbg(chan2dev(chan), "prep_dma_memcpy d%#llx s%#llx l%#zx f%#lx\n", | 764 | dev_vdbg(chan2dev(chan), "prep_dma_memcpy d%#llx s%#llx l%#zx f%#lx\n", |
765 | (u64)dest, (u64)src, len, flags); | 765 | (u64)dest, (u64)src, len, flags); |
766 | 766 | ||
767 | if (unlikely(!len)) { | 767 | if (unlikely(!len)) { |
768 | dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); | 768 | dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); |
769 | return NULL; | 769 | return NULL; |
770 | } | 770 | } |
771 | 771 | ||
772 | prev = first = NULL; | 772 | prev = first = NULL; |
773 | 773 | ||
774 | for (offset = 0; offset < len; offset += xfer_count) { | 774 | for (offset = 0; offset < len; offset += xfer_count) { |
775 | xfer_count = min_t(size_t, len - offset, TXX9_DMA_MAX_COUNT); | 775 | xfer_count = min_t(size_t, len - offset, TXX9_DMA_MAX_COUNT); |
776 | /* | 776 | /* |
777 | * Workaround for ERT-TX49H2-033, ERT-TX49H3-020, | 777 | * Workaround for ERT-TX49H2-033, ERT-TX49H3-020, |
778 | * ERT-TX49H4-016 (slightly conservative) | 778 | * ERT-TX49H4-016 (slightly conservative) |
779 | */ | 779 | */ |
780 | if (__is_dmac64(ddev)) { | 780 | if (__is_dmac64(ddev)) { |
781 | if (xfer_count > 0x100 && | 781 | if (xfer_count > 0x100 && |
782 | (xfer_count & 0xff) >= 0xfa && | 782 | (xfer_count & 0xff) >= 0xfa && |
783 | (xfer_count & 0xff) <= 0xff) | 783 | (xfer_count & 0xff) <= 0xff) |
784 | xfer_count -= 0x20; | 784 | xfer_count -= 0x20; |
785 | } else { | 785 | } else { |
786 | if (xfer_count > 0x80 && | 786 | if (xfer_count > 0x80 && |
787 | (xfer_count & 0x7f) >= 0x7e && | 787 | (xfer_count & 0x7f) >= 0x7e && |
788 | (xfer_count & 0x7f) <= 0x7f) | 788 | (xfer_count & 0x7f) <= 0x7f) |
789 | xfer_count -= 0x20; | 789 | xfer_count -= 0x20; |
790 | } | 790 | } |
791 | 791 | ||
792 | desc = txx9dmac_desc_get(dc); | 792 | desc = txx9dmac_desc_get(dc); |
793 | if (!desc) { | 793 | if (!desc) { |
794 | txx9dmac_desc_put(dc, first); | 794 | txx9dmac_desc_put(dc, first); |
795 | return NULL; | 795 | return NULL; |
796 | } | 796 | } |
797 | 797 | ||
798 | if (__is_dmac64(ddev)) { | 798 | if (__is_dmac64(ddev)) { |
799 | desc->hwdesc.SAR = src + offset; | 799 | desc->hwdesc.SAR = src + offset; |
800 | desc->hwdesc.DAR = dest + offset; | 800 | desc->hwdesc.DAR = dest + offset; |
801 | desc->hwdesc.CNTR = xfer_count; | 801 | desc->hwdesc.CNTR = xfer_count; |
802 | txx9dmac_desc_set_nosimple(ddev, desc, 8, 8, | 802 | txx9dmac_desc_set_nosimple(ddev, desc, 8, 8, |
803 | dc->ccr | TXX9_DMA_CCR_XFACT); | 803 | dc->ccr | TXX9_DMA_CCR_XFACT); |
804 | } else { | 804 | } else { |
805 | desc->hwdesc32.SAR = src + offset; | 805 | desc->hwdesc32.SAR = src + offset; |
806 | desc->hwdesc32.DAR = dest + offset; | 806 | desc->hwdesc32.DAR = dest + offset; |
807 | desc->hwdesc32.CNTR = xfer_count; | 807 | desc->hwdesc32.CNTR = xfer_count; |
808 | txx9dmac_desc_set_nosimple(ddev, desc, 4, 4, | 808 | txx9dmac_desc_set_nosimple(ddev, desc, 4, 4, |
809 | dc->ccr | TXX9_DMA_CCR_XFACT); | 809 | dc->ccr | TXX9_DMA_CCR_XFACT); |
810 | } | 810 | } |
811 | 811 | ||
812 | /* | 812 | /* |
813 | * The descriptors on tx_list are not reachable from | 813 | * The descriptors on tx_list are not reachable from |
814 | * the dc->queue list or dc->active_list after a | 814 | * the dc->queue list or dc->active_list after a |
815 | * submit. If we put all descriptors on active_list, | 815 | * submit. If we put all descriptors on active_list, |
816 | * calling of callback on the completion will be more | 816 | * calling of callback on the completion will be more |
817 | * complex. | 817 | * complex. |
818 | */ | 818 | */ |
819 | if (!first) { | 819 | if (!first) { |
820 | first = desc; | 820 | first = desc; |
821 | } else { | 821 | } else { |
822 | desc_write_CHAR(dc, prev, desc->txd.phys); | 822 | desc_write_CHAR(dc, prev, desc->txd.phys); |
823 | dma_sync_single_for_device(chan2parent(&dc->chan), | 823 | dma_sync_single_for_device(chan2parent(&dc->chan), |
824 | prev->txd.phys, ddev->descsize, | 824 | prev->txd.phys, ddev->descsize, |
825 | DMA_TO_DEVICE); | 825 | DMA_TO_DEVICE); |
826 | list_add_tail(&desc->desc_node, &first->tx_list); | 826 | list_add_tail(&desc->desc_node, &first->tx_list); |
827 | } | 827 | } |
828 | prev = desc; | 828 | prev = desc; |
829 | } | 829 | } |
830 | 830 | ||
831 | /* Trigger interrupt after last block */ | 831 | /* Trigger interrupt after last block */ |
832 | if (flags & DMA_PREP_INTERRUPT) | 832 | if (flags & DMA_PREP_INTERRUPT) |
833 | txx9dmac_desc_set_INTENT(ddev, prev); | 833 | txx9dmac_desc_set_INTENT(ddev, prev); |
834 | 834 | ||
835 | desc_write_CHAR(dc, prev, 0); | 835 | desc_write_CHAR(dc, prev, 0); |
836 | dma_sync_single_for_device(chan2parent(&dc->chan), | 836 | dma_sync_single_for_device(chan2parent(&dc->chan), |
837 | prev->txd.phys, ddev->descsize, | 837 | prev->txd.phys, ddev->descsize, |
838 | DMA_TO_DEVICE); | 838 | DMA_TO_DEVICE); |
839 | 839 | ||
840 | first->txd.flags = flags; | 840 | first->txd.flags = flags; |
841 | first->len = len; | 841 | first->len = len; |
842 | 842 | ||
843 | return &first->txd; | 843 | return &first->txd; |
844 | } | 844 | } |
845 | 845 | ||
846 | static struct dma_async_tx_descriptor * | 846 | static struct dma_async_tx_descriptor * |
847 | txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | 847 | txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, |
848 | unsigned int sg_len, enum dma_data_direction direction, | 848 | unsigned int sg_len, enum dma_data_direction direction, |
849 | unsigned long flags) | 849 | unsigned long flags) |
850 | { | 850 | { |
851 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); | 851 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); |
852 | struct txx9dmac_dev *ddev = dc->ddev; | 852 | struct txx9dmac_dev *ddev = dc->ddev; |
853 | struct txx9dmac_slave *ds = chan->private; | 853 | struct txx9dmac_slave *ds = chan->private; |
854 | struct txx9dmac_desc *prev; | 854 | struct txx9dmac_desc *prev; |
855 | struct txx9dmac_desc *first; | 855 | struct txx9dmac_desc *first; |
856 | unsigned int i; | 856 | unsigned int i; |
857 | struct scatterlist *sg; | 857 | struct scatterlist *sg; |
858 | 858 | ||
859 | dev_vdbg(chan2dev(chan), "prep_dma_slave\n"); | 859 | dev_vdbg(chan2dev(chan), "prep_dma_slave\n"); |
860 | 860 | ||
861 | BUG_ON(!ds || !ds->reg_width); | 861 | BUG_ON(!ds || !ds->reg_width); |
862 | if (ds->tx_reg) | 862 | if (ds->tx_reg) |
863 | BUG_ON(direction != DMA_TO_DEVICE); | 863 | BUG_ON(direction != DMA_TO_DEVICE); |
864 | else | 864 | else |
865 | BUG_ON(direction != DMA_FROM_DEVICE); | 865 | BUG_ON(direction != DMA_FROM_DEVICE); |
866 | if (unlikely(!sg_len)) | 866 | if (unlikely(!sg_len)) |
867 | return NULL; | 867 | return NULL; |
868 | 868 | ||
869 | prev = first = NULL; | 869 | prev = first = NULL; |
870 | 870 | ||
871 | for_each_sg(sgl, sg, sg_len, i) { | 871 | for_each_sg(sgl, sg, sg_len, i) { |
872 | struct txx9dmac_desc *desc; | 872 | struct txx9dmac_desc *desc; |
873 | dma_addr_t mem; | 873 | dma_addr_t mem; |
874 | u32 sai, dai; | 874 | u32 sai, dai; |
875 | 875 | ||
876 | desc = txx9dmac_desc_get(dc); | 876 | desc = txx9dmac_desc_get(dc); |
877 | if (!desc) { | 877 | if (!desc) { |
878 | txx9dmac_desc_put(dc, first); | 878 | txx9dmac_desc_put(dc, first); |
879 | return NULL; | 879 | return NULL; |
880 | } | 880 | } |
881 | 881 | ||
882 | mem = sg_dma_address(sg); | 882 | mem = sg_dma_address(sg); |
883 | 883 | ||
884 | if (__is_dmac64(ddev)) { | 884 | if (__is_dmac64(ddev)) { |
885 | if (direction == DMA_TO_DEVICE) { | 885 | if (direction == DMA_TO_DEVICE) { |
886 | desc->hwdesc.SAR = mem; | 886 | desc->hwdesc.SAR = mem; |
887 | desc->hwdesc.DAR = ds->tx_reg; | 887 | desc->hwdesc.DAR = ds->tx_reg; |
888 | } else { | 888 | } else { |
889 | desc->hwdesc.SAR = ds->rx_reg; | 889 | desc->hwdesc.SAR = ds->rx_reg; |
890 | desc->hwdesc.DAR = mem; | 890 | desc->hwdesc.DAR = mem; |
891 | } | 891 | } |
892 | desc->hwdesc.CNTR = sg_dma_len(sg); | 892 | desc->hwdesc.CNTR = sg_dma_len(sg); |
893 | } else { | 893 | } else { |
894 | if (direction == DMA_TO_DEVICE) { | 894 | if (direction == DMA_TO_DEVICE) { |
895 | desc->hwdesc32.SAR = mem; | 895 | desc->hwdesc32.SAR = mem; |
896 | desc->hwdesc32.DAR = ds->tx_reg; | 896 | desc->hwdesc32.DAR = ds->tx_reg; |
897 | } else { | 897 | } else { |
898 | desc->hwdesc32.SAR = ds->rx_reg; | 898 | desc->hwdesc32.SAR = ds->rx_reg; |
899 | desc->hwdesc32.DAR = mem; | 899 | desc->hwdesc32.DAR = mem; |
900 | } | 900 | } |
901 | desc->hwdesc32.CNTR = sg_dma_len(sg); | 901 | desc->hwdesc32.CNTR = sg_dma_len(sg); |
902 | } | 902 | } |
903 | if (direction == DMA_TO_DEVICE) { | 903 | if (direction == DMA_TO_DEVICE) { |
904 | sai = ds->reg_width; | 904 | sai = ds->reg_width; |
905 | dai = 0; | 905 | dai = 0; |
906 | } else { | 906 | } else { |
907 | sai = 0; | 907 | sai = 0; |
908 | dai = ds->reg_width; | 908 | dai = ds->reg_width; |
909 | } | 909 | } |
910 | txx9dmac_desc_set_nosimple(ddev, desc, sai, dai, | 910 | txx9dmac_desc_set_nosimple(ddev, desc, sai, dai, |
911 | dc->ccr | TXX9_DMA_CCR_XFACT); | 911 | dc->ccr | TXX9_DMA_CCR_XFACT); |
912 | 912 | ||
913 | if (!first) { | 913 | if (!first) { |
914 | first = desc; | 914 | first = desc; |
915 | } else { | 915 | } else { |
916 | desc_write_CHAR(dc, prev, desc->txd.phys); | 916 | desc_write_CHAR(dc, prev, desc->txd.phys); |
917 | dma_sync_single_for_device(chan2parent(&dc->chan), | 917 | dma_sync_single_for_device(chan2parent(&dc->chan), |
918 | prev->txd.phys, | 918 | prev->txd.phys, |
919 | ddev->descsize, | 919 | ddev->descsize, |
920 | DMA_TO_DEVICE); | 920 | DMA_TO_DEVICE); |
921 | list_add_tail(&desc->desc_node, &first->tx_list); | 921 | list_add_tail(&desc->desc_node, &first->tx_list); |
922 | } | 922 | } |
923 | prev = desc; | 923 | prev = desc; |
924 | } | 924 | } |
925 | 925 | ||
926 | /* Trigger interrupt after last block */ | 926 | /* Trigger interrupt after last block */ |
927 | if (flags & DMA_PREP_INTERRUPT) | 927 | if (flags & DMA_PREP_INTERRUPT) |
928 | txx9dmac_desc_set_INTENT(ddev, prev); | 928 | txx9dmac_desc_set_INTENT(ddev, prev); |
929 | 929 | ||
930 | desc_write_CHAR(dc, prev, 0); | 930 | desc_write_CHAR(dc, prev, 0); |
931 | dma_sync_single_for_device(chan2parent(&dc->chan), | 931 | dma_sync_single_for_device(chan2parent(&dc->chan), |
932 | prev->txd.phys, ddev->descsize, | 932 | prev->txd.phys, ddev->descsize, |
933 | DMA_TO_DEVICE); | 933 | DMA_TO_DEVICE); |
934 | 934 | ||
935 | first->txd.flags = flags; | 935 | first->txd.flags = flags; |
936 | first->len = 0; | 936 | first->len = 0; |
937 | 937 | ||
938 | return &first->txd; | 938 | return &first->txd; |
939 | } | 939 | } |
940 | 940 | ||
941 | static int txx9dmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd) | 941 | static int txx9dmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
942 | unsigned long arg) | ||
942 | { | 943 | { |
943 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); | 944 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); |
944 | struct txx9dmac_desc *desc, *_desc; | 945 | struct txx9dmac_desc *desc, *_desc; |
945 | LIST_HEAD(list); | 946 | LIST_HEAD(list); |
946 | 947 | ||
947 | /* Only supports DMA_TERMINATE_ALL */ | 948 | /* Only supports DMA_TERMINATE_ALL */ |
948 | if (cmd != DMA_TERMINATE_ALL) | 949 | if (cmd != DMA_TERMINATE_ALL) |
949 | return -EINVAL; | 950 | return -EINVAL; |
950 | 951 | ||
951 | dev_vdbg(chan2dev(chan), "terminate_all\n"); | 952 | dev_vdbg(chan2dev(chan), "terminate_all\n"); |
952 | spin_lock_bh(&dc->lock); | 953 | spin_lock_bh(&dc->lock); |
953 | 954 | ||
954 | txx9dmac_reset_chan(dc); | 955 | txx9dmac_reset_chan(dc); |
955 | 956 | ||
956 | /* active_list entries will end up before queued entries */ | 957 | /* active_list entries will end up before queued entries */ |
957 | list_splice_init(&dc->queue, &list); | 958 | list_splice_init(&dc->queue, &list); |
958 | list_splice_init(&dc->active_list, &list); | 959 | list_splice_init(&dc->active_list, &list); |
959 | 960 | ||
960 | spin_unlock_bh(&dc->lock); | 961 | spin_unlock_bh(&dc->lock); |
961 | 962 | ||
962 | /* Flush all pending and queued descriptors */ | 963 | /* Flush all pending and queued descriptors */ |
963 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | 964 | list_for_each_entry_safe(desc, _desc, &list, desc_node) |
964 | txx9dmac_descriptor_complete(dc, desc); | 965 | txx9dmac_descriptor_complete(dc, desc); |
965 | 966 | ||
966 | return 0; | 967 | return 0; |
967 | } | 968 | } |
968 | 969 | ||
969 | static enum dma_status | 970 | static enum dma_status |
970 | txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | 971 | txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, |
971 | struct dma_tx_state *txstate) | 972 | struct dma_tx_state *txstate) |
972 | { | 973 | { |
973 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); | 974 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); |
974 | dma_cookie_t last_used; | 975 | dma_cookie_t last_used; |
975 | dma_cookie_t last_complete; | 976 | dma_cookie_t last_complete; |
976 | int ret; | 977 | int ret; |
977 | 978 | ||
978 | last_complete = dc->completed; | 979 | last_complete = dc->completed; |
979 | last_used = chan->cookie; | 980 | last_used = chan->cookie; |
980 | 981 | ||
981 | ret = dma_async_is_complete(cookie, last_complete, last_used); | 982 | ret = dma_async_is_complete(cookie, last_complete, last_used); |
982 | if (ret != DMA_SUCCESS) { | 983 | if (ret != DMA_SUCCESS) { |
983 | spin_lock_bh(&dc->lock); | 984 | spin_lock_bh(&dc->lock); |
984 | txx9dmac_scan_descriptors(dc); | 985 | txx9dmac_scan_descriptors(dc); |
985 | spin_unlock_bh(&dc->lock); | 986 | spin_unlock_bh(&dc->lock); |
986 | 987 | ||
987 | last_complete = dc->completed; | 988 | last_complete = dc->completed; |
988 | last_used = chan->cookie; | 989 | last_used = chan->cookie; |
989 | 990 | ||
990 | ret = dma_async_is_complete(cookie, last_complete, last_used); | 991 | ret = dma_async_is_complete(cookie, last_complete, last_used); |
991 | } | 992 | } |
992 | 993 | ||
993 | dma_set_tx_state(txstate, last_complete, last_used, 0); | 994 | dma_set_tx_state(txstate, last_complete, last_used, 0); |
994 | 995 | ||
995 | return ret; | 996 | return ret; |
996 | } | 997 | } |
997 | 998 | ||
998 | static void txx9dmac_chain_dynamic(struct txx9dmac_chan *dc, | 999 | static void txx9dmac_chain_dynamic(struct txx9dmac_chan *dc, |
999 | struct txx9dmac_desc *prev) | 1000 | struct txx9dmac_desc *prev) |
1000 | { | 1001 | { |
1001 | struct txx9dmac_dev *ddev = dc->ddev; | 1002 | struct txx9dmac_dev *ddev = dc->ddev; |
1002 | struct txx9dmac_desc *desc; | 1003 | struct txx9dmac_desc *desc; |
1003 | LIST_HEAD(list); | 1004 | LIST_HEAD(list); |
1004 | 1005 | ||
1005 | prev = txx9dmac_last_child(prev); | 1006 | prev = txx9dmac_last_child(prev); |
1006 | txx9dmac_dequeue(dc, &list); | 1007 | txx9dmac_dequeue(dc, &list); |
1007 | desc = list_entry(list.next, struct txx9dmac_desc, desc_node); | 1008 | desc = list_entry(list.next, struct txx9dmac_desc, desc_node); |
1008 | desc_write_CHAR(dc, prev, desc->txd.phys); | 1009 | desc_write_CHAR(dc, prev, desc->txd.phys); |
1009 | dma_sync_single_for_device(chan2parent(&dc->chan), | 1010 | dma_sync_single_for_device(chan2parent(&dc->chan), |
1010 | prev->txd.phys, ddev->descsize, | 1011 | prev->txd.phys, ddev->descsize, |
1011 | DMA_TO_DEVICE); | 1012 | DMA_TO_DEVICE); |
1012 | mmiowb(); | 1013 | mmiowb(); |
1013 | if (!(channel_readl(dc, CSR) & TXX9_DMA_CSR_CHNEN) && | 1014 | if (!(channel_readl(dc, CSR) & TXX9_DMA_CSR_CHNEN) && |
1014 | channel_read_CHAR(dc) == prev->txd.phys) | 1015 | channel_read_CHAR(dc) == prev->txd.phys) |
1015 | /* Restart chain DMA */ | 1016 | /* Restart chain DMA */ |
1016 | channel_write_CHAR(dc, desc->txd.phys); | 1017 | channel_write_CHAR(dc, desc->txd.phys); |
1017 | list_splice_tail(&list, &dc->active_list); | 1018 | list_splice_tail(&list, &dc->active_list); |
1018 | } | 1019 | } |
1019 | 1020 | ||
1020 | static void txx9dmac_issue_pending(struct dma_chan *chan) | 1021 | static void txx9dmac_issue_pending(struct dma_chan *chan) |
1021 | { | 1022 | { |
1022 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); | 1023 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); |
1023 | 1024 | ||
1024 | spin_lock_bh(&dc->lock); | 1025 | spin_lock_bh(&dc->lock); |
1025 | 1026 | ||
1026 | if (!list_empty(&dc->active_list)) | 1027 | if (!list_empty(&dc->active_list)) |
1027 | txx9dmac_scan_descriptors(dc); | 1028 | txx9dmac_scan_descriptors(dc); |
1028 | if (!list_empty(&dc->queue)) { | 1029 | if (!list_empty(&dc->queue)) { |
1029 | if (list_empty(&dc->active_list)) { | 1030 | if (list_empty(&dc->active_list)) { |
1030 | txx9dmac_dequeue(dc, &dc->active_list); | 1031 | txx9dmac_dequeue(dc, &dc->active_list); |
1031 | txx9dmac_dostart(dc, txx9dmac_first_active(dc)); | 1032 | txx9dmac_dostart(dc, txx9dmac_first_active(dc)); |
1032 | } else if (txx9_dma_have_SMPCHN()) { | 1033 | } else if (txx9_dma_have_SMPCHN()) { |
1033 | struct txx9dmac_desc *prev = txx9dmac_last_active(dc); | 1034 | struct txx9dmac_desc *prev = txx9dmac_last_active(dc); |
1034 | 1035 | ||
1035 | if (!(prev->txd.flags & DMA_PREP_INTERRUPT) || | 1036 | if (!(prev->txd.flags & DMA_PREP_INTERRUPT) || |
1036 | txx9dmac_chan_INTENT(dc)) | 1037 | txx9dmac_chan_INTENT(dc)) |
1037 | txx9dmac_chain_dynamic(dc, prev); | 1038 | txx9dmac_chain_dynamic(dc, prev); |
1038 | } | 1039 | } |
1039 | } | 1040 | } |
1040 | 1041 | ||
1041 | spin_unlock_bh(&dc->lock); | 1042 | spin_unlock_bh(&dc->lock); |
1042 | } | 1043 | } |
1043 | 1044 | ||
1044 | static int txx9dmac_alloc_chan_resources(struct dma_chan *chan) | 1045 | static int txx9dmac_alloc_chan_resources(struct dma_chan *chan) |
1045 | { | 1046 | { |
1046 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); | 1047 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); |
1047 | struct txx9dmac_slave *ds = chan->private; | 1048 | struct txx9dmac_slave *ds = chan->private; |
1048 | struct txx9dmac_desc *desc; | 1049 | struct txx9dmac_desc *desc; |
1049 | int i; | 1050 | int i; |
1050 | 1051 | ||
1051 | dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); | 1052 | dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); |
1052 | 1053 | ||
1053 | /* ASSERT: channel is idle */ | 1054 | /* ASSERT: channel is idle */ |
1054 | if (channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT) { | 1055 | if (channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT) { |
1055 | dev_dbg(chan2dev(chan), "DMA channel not idle?\n"); | 1056 | dev_dbg(chan2dev(chan), "DMA channel not idle?\n"); |
1056 | return -EIO; | 1057 | return -EIO; |
1057 | } | 1058 | } |
1058 | 1059 | ||
1059 | dc->completed = chan->cookie = 1; | 1060 | dc->completed = chan->cookie = 1; |
1060 | 1061 | ||
1061 | dc->ccr = TXX9_DMA_CCR_IMMCHN | TXX9_DMA_CCR_INTENE | CCR_LE; | 1062 | dc->ccr = TXX9_DMA_CCR_IMMCHN | TXX9_DMA_CCR_INTENE | CCR_LE; |
1062 | txx9dmac_chan_set_SMPCHN(dc); | 1063 | txx9dmac_chan_set_SMPCHN(dc); |
1063 | if (!txx9_dma_have_SMPCHN() || (dc->ccr & TXX9_DMA_CCR_SMPCHN)) | 1064 | if (!txx9_dma_have_SMPCHN() || (dc->ccr & TXX9_DMA_CCR_SMPCHN)) |
1064 | dc->ccr |= TXX9_DMA_CCR_INTENC; | 1065 | dc->ccr |= TXX9_DMA_CCR_INTENC; |
1065 | if (chan->device->device_prep_dma_memcpy) { | 1066 | if (chan->device->device_prep_dma_memcpy) { |
1066 | if (ds) | 1067 | if (ds) |
1067 | return -EINVAL; | 1068 | return -EINVAL; |
1068 | dc->ccr |= TXX9_DMA_CCR_XFSZ_X8; | 1069 | dc->ccr |= TXX9_DMA_CCR_XFSZ_X8; |
1069 | } else { | 1070 | } else { |
1070 | if (!ds || | 1071 | if (!ds || |
1071 | (ds->tx_reg && ds->rx_reg) || (!ds->tx_reg && !ds->rx_reg)) | 1072 | (ds->tx_reg && ds->rx_reg) || (!ds->tx_reg && !ds->rx_reg)) |
1072 | return -EINVAL; | 1073 | return -EINVAL; |
1073 | dc->ccr |= TXX9_DMA_CCR_EXTRQ | | 1074 | dc->ccr |= TXX9_DMA_CCR_EXTRQ | |
1074 | TXX9_DMA_CCR_XFSZ(__ffs(ds->reg_width)); | 1075 | TXX9_DMA_CCR_XFSZ(__ffs(ds->reg_width)); |
1075 | txx9dmac_chan_set_INTENT(dc); | 1076 | txx9dmac_chan_set_INTENT(dc); |
1076 | } | 1077 | } |
1077 | 1078 | ||
1078 | spin_lock_bh(&dc->lock); | 1079 | spin_lock_bh(&dc->lock); |
1079 | i = dc->descs_allocated; | 1080 | i = dc->descs_allocated; |
1080 | while (dc->descs_allocated < TXX9_DMA_INITIAL_DESC_COUNT) { | 1081 | while (dc->descs_allocated < TXX9_DMA_INITIAL_DESC_COUNT) { |
1081 | spin_unlock_bh(&dc->lock); | 1082 | spin_unlock_bh(&dc->lock); |
1082 | 1083 | ||
1083 | desc = txx9dmac_desc_alloc(dc, GFP_KERNEL); | 1084 | desc = txx9dmac_desc_alloc(dc, GFP_KERNEL); |
1084 | if (!desc) { | 1085 | if (!desc) { |
1085 | dev_info(chan2dev(chan), | 1086 | dev_info(chan2dev(chan), |
1086 | "only allocated %d descriptors\n", i); | 1087 | "only allocated %d descriptors\n", i); |
1087 | spin_lock_bh(&dc->lock); | 1088 | spin_lock_bh(&dc->lock); |
1088 | break; | 1089 | break; |
1089 | } | 1090 | } |
1090 | txx9dmac_desc_put(dc, desc); | 1091 | txx9dmac_desc_put(dc, desc); |
1091 | 1092 | ||
1092 | spin_lock_bh(&dc->lock); | 1093 | spin_lock_bh(&dc->lock); |
1093 | i = ++dc->descs_allocated; | 1094 | i = ++dc->descs_allocated; |
1094 | } | 1095 | } |
1095 | spin_unlock_bh(&dc->lock); | 1096 | spin_unlock_bh(&dc->lock); |
1096 | 1097 | ||
1097 | dev_dbg(chan2dev(chan), | 1098 | dev_dbg(chan2dev(chan), |
1098 | "alloc_chan_resources allocated %d descriptors\n", i); | 1099 | "alloc_chan_resources allocated %d descriptors\n", i); |
1099 | 1100 | ||
1100 | return i; | 1101 | return i; |
1101 | } | 1102 | } |
1102 | 1103 | ||
1103 | static void txx9dmac_free_chan_resources(struct dma_chan *chan) | 1104 | static void txx9dmac_free_chan_resources(struct dma_chan *chan) |
1104 | { | 1105 | { |
1105 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); | 1106 | struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); |
1106 | struct txx9dmac_dev *ddev = dc->ddev; | 1107 | struct txx9dmac_dev *ddev = dc->ddev; |
1107 | struct txx9dmac_desc *desc, *_desc; | 1108 | struct txx9dmac_desc *desc, *_desc; |
1108 | LIST_HEAD(list); | 1109 | LIST_HEAD(list); |
1109 | 1110 | ||
1110 | dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n", | 1111 | dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n", |
1111 | dc->descs_allocated); | 1112 | dc->descs_allocated); |
1112 | 1113 | ||
1113 | /* ASSERT: channel is idle */ | 1114 | /* ASSERT: channel is idle */ |
1114 | BUG_ON(!list_empty(&dc->active_list)); | 1115 | BUG_ON(!list_empty(&dc->active_list)); |
1115 | BUG_ON(!list_empty(&dc->queue)); | 1116 | BUG_ON(!list_empty(&dc->queue)); |
1116 | BUG_ON(channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT); | 1117 | BUG_ON(channel_readl(dc, CSR) & TXX9_DMA_CSR_XFACT); |
1117 | 1118 | ||
1118 | spin_lock_bh(&dc->lock); | 1119 | spin_lock_bh(&dc->lock); |
1119 | list_splice_init(&dc->free_list, &list); | 1120 | list_splice_init(&dc->free_list, &list); |
1120 | dc->descs_allocated = 0; | 1121 | dc->descs_allocated = 0; |
1121 | spin_unlock_bh(&dc->lock); | 1122 | spin_unlock_bh(&dc->lock); |
1122 | 1123 | ||
1123 | list_for_each_entry_safe(desc, _desc, &list, desc_node) { | 1124 | list_for_each_entry_safe(desc, _desc, &list, desc_node) { |
1124 | dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); | 1125 | dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); |
1125 | dma_unmap_single(chan2parent(chan), desc->txd.phys, | 1126 | dma_unmap_single(chan2parent(chan), desc->txd.phys, |
1126 | ddev->descsize, DMA_TO_DEVICE); | 1127 | ddev->descsize, DMA_TO_DEVICE); |
1127 | kfree(desc); | 1128 | kfree(desc); |
1128 | } | 1129 | } |
1129 | 1130 | ||
1130 | dev_vdbg(chan2dev(chan), "free_chan_resources done\n"); | 1131 | dev_vdbg(chan2dev(chan), "free_chan_resources done\n"); |
1131 | } | 1132 | } |
1132 | 1133 | ||
1133 | /*----------------------------------------------------------------------*/ | 1134 | /*----------------------------------------------------------------------*/ |
1134 | 1135 | ||
1135 | static void txx9dmac_off(struct txx9dmac_dev *ddev) | 1136 | static void txx9dmac_off(struct txx9dmac_dev *ddev) |
1136 | { | 1137 | { |
1137 | dma_writel(ddev, MCR, 0); | 1138 | dma_writel(ddev, MCR, 0); |
1138 | mmiowb(); | 1139 | mmiowb(); |
1139 | } | 1140 | } |
1140 | 1141 | ||
1141 | static int __init txx9dmac_chan_probe(struct platform_device *pdev) | 1142 | static int __init txx9dmac_chan_probe(struct platform_device *pdev) |
1142 | { | 1143 | { |
1143 | struct txx9dmac_chan_platform_data *cpdata = pdev->dev.platform_data; | 1144 | struct txx9dmac_chan_platform_data *cpdata = pdev->dev.platform_data; |
1144 | struct platform_device *dmac_dev = cpdata->dmac_dev; | 1145 | struct platform_device *dmac_dev = cpdata->dmac_dev; |
1145 | struct txx9dmac_platform_data *pdata = dmac_dev->dev.platform_data; | 1146 | struct txx9dmac_platform_data *pdata = dmac_dev->dev.platform_data; |
1146 | struct txx9dmac_chan *dc; | 1147 | struct txx9dmac_chan *dc; |
1147 | int err; | 1148 | int err; |
1148 | int ch = pdev->id % TXX9_DMA_MAX_NR_CHANNELS; | 1149 | int ch = pdev->id % TXX9_DMA_MAX_NR_CHANNELS; |
1149 | int irq; | 1150 | int irq; |
1150 | 1151 | ||
1151 | dc = devm_kzalloc(&pdev->dev, sizeof(*dc), GFP_KERNEL); | 1152 | dc = devm_kzalloc(&pdev->dev, sizeof(*dc), GFP_KERNEL); |
1152 | if (!dc) | 1153 | if (!dc) |
1153 | return -ENOMEM; | 1154 | return -ENOMEM; |
1154 | 1155 | ||
1155 | dc->dma.dev = &pdev->dev; | 1156 | dc->dma.dev = &pdev->dev; |
1156 | dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources; | 1157 | dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources; |
1157 | dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources; | 1158 | dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources; |
1158 | dc->dma.device_control = txx9dmac_control; | 1159 | dc->dma.device_control = txx9dmac_control; |
1159 | dc->dma.device_tx_status = txx9dmac_tx_status; | 1160 | dc->dma.device_tx_status = txx9dmac_tx_status; |
1160 | dc->dma.device_issue_pending = txx9dmac_issue_pending; | 1161 | dc->dma.device_issue_pending = txx9dmac_issue_pending; |
1161 | if (pdata && pdata->memcpy_chan == ch) { | 1162 | if (pdata && pdata->memcpy_chan == ch) { |
1162 | dc->dma.device_prep_dma_memcpy = txx9dmac_prep_dma_memcpy; | 1163 | dc->dma.device_prep_dma_memcpy = txx9dmac_prep_dma_memcpy; |
1163 | dma_cap_set(DMA_MEMCPY, dc->dma.cap_mask); | 1164 | dma_cap_set(DMA_MEMCPY, dc->dma.cap_mask); |
1164 | } else { | 1165 | } else { |
1165 | dc->dma.device_prep_slave_sg = txx9dmac_prep_slave_sg; | 1166 | dc->dma.device_prep_slave_sg = txx9dmac_prep_slave_sg; |
1166 | dma_cap_set(DMA_SLAVE, dc->dma.cap_mask); | 1167 | dma_cap_set(DMA_SLAVE, dc->dma.cap_mask); |
1167 | dma_cap_set(DMA_PRIVATE, dc->dma.cap_mask); | 1168 | dma_cap_set(DMA_PRIVATE, dc->dma.cap_mask); |
1168 | } | 1169 | } |
1169 | 1170 | ||
1170 | INIT_LIST_HEAD(&dc->dma.channels); | 1171 | INIT_LIST_HEAD(&dc->dma.channels); |
1171 | dc->ddev = platform_get_drvdata(dmac_dev); | 1172 | dc->ddev = platform_get_drvdata(dmac_dev); |
1172 | if (dc->ddev->irq < 0) { | 1173 | if (dc->ddev->irq < 0) { |
1173 | irq = platform_get_irq(pdev, 0); | 1174 | irq = platform_get_irq(pdev, 0); |
1174 | if (irq < 0) | 1175 | if (irq < 0) |
1175 | return irq; | 1176 | return irq; |
1176 | tasklet_init(&dc->tasklet, txx9dmac_chan_tasklet, | 1177 | tasklet_init(&dc->tasklet, txx9dmac_chan_tasklet, |
1177 | (unsigned long)dc); | 1178 | (unsigned long)dc); |
1178 | dc->irq = irq; | 1179 | dc->irq = irq; |
1179 | err = devm_request_irq(&pdev->dev, dc->irq, | 1180 | err = devm_request_irq(&pdev->dev, dc->irq, |
1180 | txx9dmac_chan_interrupt, 0, dev_name(&pdev->dev), dc); | 1181 | txx9dmac_chan_interrupt, 0, dev_name(&pdev->dev), dc); |
1181 | if (err) | 1182 | if (err) |
1182 | return err; | 1183 | return err; |
1183 | } else | 1184 | } else |
1184 | dc->irq = -1; | 1185 | dc->irq = -1; |
1185 | dc->ddev->chan[ch] = dc; | 1186 | dc->ddev->chan[ch] = dc; |
1186 | dc->chan.device = &dc->dma; | 1187 | dc->chan.device = &dc->dma; |
1187 | list_add_tail(&dc->chan.device_node, &dc->chan.device->channels); | 1188 | list_add_tail(&dc->chan.device_node, &dc->chan.device->channels); |
1188 | dc->chan.cookie = dc->completed = 1; | 1189 | dc->chan.cookie = dc->completed = 1; |
1189 | 1190 | ||
1190 | if (is_dmac64(dc)) | 1191 | if (is_dmac64(dc)) |
1191 | dc->ch_regs = &__txx9dmac_regs(dc->ddev)->CHAN[ch]; | 1192 | dc->ch_regs = &__txx9dmac_regs(dc->ddev)->CHAN[ch]; |
1192 | else | 1193 | else |
1193 | dc->ch_regs = &__txx9dmac_regs32(dc->ddev)->CHAN[ch]; | 1194 | dc->ch_regs = &__txx9dmac_regs32(dc->ddev)->CHAN[ch]; |
1194 | spin_lock_init(&dc->lock); | 1195 | spin_lock_init(&dc->lock); |
1195 | 1196 | ||
1196 | INIT_LIST_HEAD(&dc->active_list); | 1197 | INIT_LIST_HEAD(&dc->active_list); |
1197 | INIT_LIST_HEAD(&dc->queue); | 1198 | INIT_LIST_HEAD(&dc->queue); |
1198 | INIT_LIST_HEAD(&dc->free_list); | 1199 | INIT_LIST_HEAD(&dc->free_list); |
1199 | 1200 | ||
1200 | txx9dmac_reset_chan(dc); | 1201 | txx9dmac_reset_chan(dc); |
1201 | 1202 | ||
1202 | platform_set_drvdata(pdev, dc); | 1203 | platform_set_drvdata(pdev, dc); |
1203 | 1204 | ||
1204 | err = dma_async_device_register(&dc->dma); | 1205 | err = dma_async_device_register(&dc->dma); |
1205 | if (err) | 1206 | if (err) |
1206 | return err; | 1207 | return err; |
1207 | dev_dbg(&pdev->dev, "TXx9 DMA Channel (dma%d%s%s)\n", | 1208 | dev_dbg(&pdev->dev, "TXx9 DMA Channel (dma%d%s%s)\n", |
1208 | dc->dma.dev_id, | 1209 | dc->dma.dev_id, |
1209 | dma_has_cap(DMA_MEMCPY, dc->dma.cap_mask) ? " memcpy" : "", | 1210 | dma_has_cap(DMA_MEMCPY, dc->dma.cap_mask) ? " memcpy" : "", |
1210 | dma_has_cap(DMA_SLAVE, dc->dma.cap_mask) ? " slave" : ""); | 1211 | dma_has_cap(DMA_SLAVE, dc->dma.cap_mask) ? " slave" : ""); |
1211 | 1212 | ||
1212 | return 0; | 1213 | return 0; |
1213 | } | 1214 | } |
1214 | 1215 | ||
1215 | static int __exit txx9dmac_chan_remove(struct platform_device *pdev) | 1216 | static int __exit txx9dmac_chan_remove(struct platform_device *pdev) |
1216 | { | 1217 | { |
1217 | struct txx9dmac_chan *dc = platform_get_drvdata(pdev); | 1218 | struct txx9dmac_chan *dc = platform_get_drvdata(pdev); |
1218 | 1219 | ||
1219 | dma_async_device_unregister(&dc->dma); | 1220 | dma_async_device_unregister(&dc->dma); |
1220 | if (dc->irq >= 0) | 1221 | if (dc->irq >= 0) |
1221 | tasklet_kill(&dc->tasklet); | 1222 | tasklet_kill(&dc->tasklet); |
1222 | dc->ddev->chan[pdev->id % TXX9_DMA_MAX_NR_CHANNELS] = NULL; | 1223 | dc->ddev->chan[pdev->id % TXX9_DMA_MAX_NR_CHANNELS] = NULL; |
1223 | return 0; | 1224 | return 0; |
1224 | } | 1225 | } |
1225 | 1226 | ||
1226 | static int __init txx9dmac_probe(struct platform_device *pdev) | 1227 | static int __init txx9dmac_probe(struct platform_device *pdev) |
1227 | { | 1228 | { |
1228 | struct txx9dmac_platform_data *pdata = pdev->dev.platform_data; | 1229 | struct txx9dmac_platform_data *pdata = pdev->dev.platform_data; |
1229 | struct resource *io; | 1230 | struct resource *io; |
1230 | struct txx9dmac_dev *ddev; | 1231 | struct txx9dmac_dev *ddev; |
1231 | u32 mcr; | 1232 | u32 mcr; |
1232 | int err; | 1233 | int err; |
1233 | 1234 | ||
1234 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1235 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1235 | if (!io) | 1236 | if (!io) |
1236 | return -EINVAL; | 1237 | return -EINVAL; |
1237 | 1238 | ||
1238 | ddev = devm_kzalloc(&pdev->dev, sizeof(*ddev), GFP_KERNEL); | 1239 | ddev = devm_kzalloc(&pdev->dev, sizeof(*ddev), GFP_KERNEL); |
1239 | if (!ddev) | 1240 | if (!ddev) |
1240 | return -ENOMEM; | 1241 | return -ENOMEM; |
1241 | 1242 | ||
1242 | if (!devm_request_mem_region(&pdev->dev, io->start, resource_size(io), | 1243 | if (!devm_request_mem_region(&pdev->dev, io->start, resource_size(io), |
1243 | dev_name(&pdev->dev))) | 1244 | dev_name(&pdev->dev))) |
1244 | return -EBUSY; | 1245 | return -EBUSY; |
1245 | 1246 | ||
1246 | ddev->regs = devm_ioremap(&pdev->dev, io->start, resource_size(io)); | 1247 | ddev->regs = devm_ioremap(&pdev->dev, io->start, resource_size(io)); |
1247 | if (!ddev->regs) | 1248 | if (!ddev->regs) |
1248 | return -ENOMEM; | 1249 | return -ENOMEM; |
1249 | ddev->have_64bit_regs = pdata->have_64bit_regs; | 1250 | ddev->have_64bit_regs = pdata->have_64bit_regs; |
1250 | if (__is_dmac64(ddev)) | 1251 | if (__is_dmac64(ddev)) |
1251 | ddev->descsize = sizeof(struct txx9dmac_hwdesc); | 1252 | ddev->descsize = sizeof(struct txx9dmac_hwdesc); |
1252 | else | 1253 | else |
1253 | ddev->descsize = sizeof(struct txx9dmac_hwdesc32); | 1254 | ddev->descsize = sizeof(struct txx9dmac_hwdesc32); |
1254 | 1255 | ||
1255 | /* force dma off, just in case */ | 1256 | /* force dma off, just in case */ |
1256 | txx9dmac_off(ddev); | 1257 | txx9dmac_off(ddev); |
1257 | 1258 | ||
1258 | ddev->irq = platform_get_irq(pdev, 0); | 1259 | ddev->irq = platform_get_irq(pdev, 0); |
1259 | if (ddev->irq >= 0) { | 1260 | if (ddev->irq >= 0) { |
1260 | tasklet_init(&ddev->tasklet, txx9dmac_tasklet, | 1261 | tasklet_init(&ddev->tasklet, txx9dmac_tasklet, |
1261 | (unsigned long)ddev); | 1262 | (unsigned long)ddev); |
1262 | err = devm_request_irq(&pdev->dev, ddev->irq, | 1263 | err = devm_request_irq(&pdev->dev, ddev->irq, |
1263 | txx9dmac_interrupt, 0, dev_name(&pdev->dev), ddev); | 1264 | txx9dmac_interrupt, 0, dev_name(&pdev->dev), ddev); |
1264 | if (err) | 1265 | if (err) |
1265 | return err; | 1266 | return err; |
1266 | } | 1267 | } |
1267 | 1268 | ||
1268 | mcr = TXX9_DMA_MCR_MSTEN | MCR_LE; | 1269 | mcr = TXX9_DMA_MCR_MSTEN | MCR_LE; |
1269 | if (pdata && pdata->memcpy_chan >= 0) | 1270 | if (pdata && pdata->memcpy_chan >= 0) |
1270 | mcr |= TXX9_DMA_MCR_FIFUM(pdata->memcpy_chan); | 1271 | mcr |= TXX9_DMA_MCR_FIFUM(pdata->memcpy_chan); |
1271 | dma_writel(ddev, MCR, mcr); | 1272 | dma_writel(ddev, MCR, mcr); |
1272 | 1273 | ||
1273 | platform_set_drvdata(pdev, ddev); | 1274 | platform_set_drvdata(pdev, ddev); |
1274 | return 0; | 1275 | return 0; |
1275 | } | 1276 | } |
1276 | 1277 | ||
1277 | static int __exit txx9dmac_remove(struct platform_device *pdev) | 1278 | static int __exit txx9dmac_remove(struct platform_device *pdev) |
1278 | { | 1279 | { |
1279 | struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); | 1280 | struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); |
1280 | 1281 | ||
1281 | txx9dmac_off(ddev); | 1282 | txx9dmac_off(ddev); |
1282 | if (ddev->irq >= 0) | 1283 | if (ddev->irq >= 0) |
1283 | tasklet_kill(&ddev->tasklet); | 1284 | tasklet_kill(&ddev->tasklet); |
1284 | return 0; | 1285 | return 0; |
1285 | } | 1286 | } |
1286 | 1287 | ||
1287 | static void txx9dmac_shutdown(struct platform_device *pdev) | 1288 | static void txx9dmac_shutdown(struct platform_device *pdev) |
1288 | { | 1289 | { |
1289 | struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); | 1290 | struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); |
1290 | 1291 | ||
1291 | txx9dmac_off(ddev); | 1292 | txx9dmac_off(ddev); |
1292 | } | 1293 | } |
1293 | 1294 | ||
1294 | static int txx9dmac_suspend_noirq(struct device *dev) | 1295 | static int txx9dmac_suspend_noirq(struct device *dev) |
1295 | { | 1296 | { |
1296 | struct platform_device *pdev = to_platform_device(dev); | 1297 | struct platform_device *pdev = to_platform_device(dev); |
1297 | struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); | 1298 | struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); |
1298 | 1299 | ||
1299 | txx9dmac_off(ddev); | 1300 | txx9dmac_off(ddev); |
1300 | return 0; | 1301 | return 0; |
1301 | } | 1302 | } |
1302 | 1303 | ||
1303 | static int txx9dmac_resume_noirq(struct device *dev) | 1304 | static int txx9dmac_resume_noirq(struct device *dev) |
1304 | { | 1305 | { |
1305 | struct platform_device *pdev = to_platform_device(dev); | 1306 | struct platform_device *pdev = to_platform_device(dev); |
1306 | struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); | 1307 | struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); |
1307 | struct txx9dmac_platform_data *pdata = pdev->dev.platform_data; | 1308 | struct txx9dmac_platform_data *pdata = pdev->dev.platform_data; |
1308 | u32 mcr; | 1309 | u32 mcr; |
1309 | 1310 | ||
1310 | mcr = TXX9_DMA_MCR_MSTEN | MCR_LE; | 1311 | mcr = TXX9_DMA_MCR_MSTEN | MCR_LE; |
1311 | if (pdata && pdata->memcpy_chan >= 0) | 1312 | if (pdata && pdata->memcpy_chan >= 0) |
1312 | mcr |= TXX9_DMA_MCR_FIFUM(pdata->memcpy_chan); | 1313 | mcr |= TXX9_DMA_MCR_FIFUM(pdata->memcpy_chan); |
1313 | dma_writel(ddev, MCR, mcr); | 1314 | dma_writel(ddev, MCR, mcr); |
1314 | return 0; | 1315 | return 0; |
1315 | 1316 | ||
1316 | } | 1317 | } |
1317 | 1318 | ||
1318 | static const struct dev_pm_ops txx9dmac_dev_pm_ops = { | 1319 | static const struct dev_pm_ops txx9dmac_dev_pm_ops = { |
1319 | .suspend_noirq = txx9dmac_suspend_noirq, | 1320 | .suspend_noirq = txx9dmac_suspend_noirq, |
1320 | .resume_noirq = txx9dmac_resume_noirq, | 1321 | .resume_noirq = txx9dmac_resume_noirq, |
1321 | }; | 1322 | }; |
1322 | 1323 | ||
1323 | static struct platform_driver txx9dmac_chan_driver = { | 1324 | static struct platform_driver txx9dmac_chan_driver = { |
1324 | .remove = __exit_p(txx9dmac_chan_remove), | 1325 | .remove = __exit_p(txx9dmac_chan_remove), |
1325 | .driver = { | 1326 | .driver = { |
1326 | .name = "txx9dmac-chan", | 1327 | .name = "txx9dmac-chan", |
1327 | }, | 1328 | }, |
1328 | }; | 1329 | }; |
1329 | 1330 | ||
1330 | static struct platform_driver txx9dmac_driver = { | 1331 | static struct platform_driver txx9dmac_driver = { |
1331 | .remove = __exit_p(txx9dmac_remove), | 1332 | .remove = __exit_p(txx9dmac_remove), |
1332 | .shutdown = txx9dmac_shutdown, | 1333 | .shutdown = txx9dmac_shutdown, |
1333 | .driver = { | 1334 | .driver = { |
1334 | .name = "txx9dmac", | 1335 | .name = "txx9dmac", |
1335 | .pm = &txx9dmac_dev_pm_ops, | 1336 | .pm = &txx9dmac_dev_pm_ops, |
1336 | }, | 1337 | }, |
1337 | }; | 1338 | }; |
1338 | 1339 | ||
1339 | static int __init txx9dmac_init(void) | 1340 | static int __init txx9dmac_init(void) |
1340 | { | 1341 | { |
1341 | int rc; | 1342 | int rc; |
1342 | 1343 | ||
1343 | rc = platform_driver_probe(&txx9dmac_driver, txx9dmac_probe); | 1344 | rc = platform_driver_probe(&txx9dmac_driver, txx9dmac_probe); |
1344 | if (!rc) { | 1345 | if (!rc) { |
1345 | rc = platform_driver_probe(&txx9dmac_chan_driver, | 1346 | rc = platform_driver_probe(&txx9dmac_chan_driver, |
1346 | txx9dmac_chan_probe); | 1347 | txx9dmac_chan_probe); |
1347 | if (rc) | 1348 | if (rc) |
1348 | platform_driver_unregister(&txx9dmac_driver); | 1349 | platform_driver_unregister(&txx9dmac_driver); |
1349 | } | 1350 | } |
1350 | return rc; | 1351 | return rc; |
1351 | } | 1352 | } |
1352 | module_init(txx9dmac_init); | 1353 | module_init(txx9dmac_init); |
1353 | 1354 | ||
1354 | static void __exit txx9dmac_exit(void) | 1355 | static void __exit txx9dmac_exit(void) |
1355 | { | 1356 | { |
1356 | platform_driver_unregister(&txx9dmac_chan_driver); | 1357 | platform_driver_unregister(&txx9dmac_chan_driver); |
1357 | platform_driver_unregister(&txx9dmac_driver); | 1358 | platform_driver_unregister(&txx9dmac_driver); |
1358 | } | 1359 | } |
1359 | module_exit(txx9dmac_exit); | 1360 | module_exit(txx9dmac_exit); |
1360 | 1361 | ||
1361 | MODULE_LICENSE("GPL"); | 1362 | MODULE_LICENSE("GPL"); |
1362 | MODULE_DESCRIPTION("TXx9 DMA Controller driver"); | 1363 | MODULE_DESCRIPTION("TXx9 DMA Controller driver"); |
1363 | MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>"); | 1364 | MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>"); |
1364 | 1365 |
drivers/mmc/host/atmel-mci.c
1 | /* | 1 | /* |
2 | * Atmel MultiMedia Card Interface driver | 2 | * Atmel MultiMedia Card Interface driver |
3 | * | 3 | * |
4 | * Copyright (C) 2004-2008 Atmel Corporation | 4 | * Copyright (C) 2004-2008 Atmel Corporation |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | */ | 9 | */ |
10 | #include <linux/blkdev.h> | 10 | #include <linux/blkdev.h> |
11 | #include <linux/clk.h> | 11 | #include <linux/clk.h> |
12 | #include <linux/debugfs.h> | 12 | #include <linux/debugfs.h> |
13 | #include <linux/device.h> | 13 | #include <linux/device.h> |
14 | #include <linux/dmaengine.h> | 14 | #include <linux/dmaengine.h> |
15 | #include <linux/dma-mapping.h> | 15 | #include <linux/dma-mapping.h> |
16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
17 | #include <linux/gpio.h> | 17 | #include <linux/gpio.h> |
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/interrupt.h> | 19 | #include <linux/interrupt.h> |
20 | #include <linux/ioport.h> | 20 | #include <linux/ioport.h> |
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/platform_device.h> | 22 | #include <linux/platform_device.h> |
23 | #include <linux/scatterlist.h> | 23 | #include <linux/scatterlist.h> |
24 | #include <linux/seq_file.h> | 24 | #include <linux/seq_file.h> |
25 | #include <linux/stat.h> | 25 | #include <linux/stat.h> |
26 | 26 | ||
27 | #include <linux/mmc/host.h> | 27 | #include <linux/mmc/host.h> |
28 | 28 | ||
29 | #include <mach/atmel-mci.h> | 29 | #include <mach/atmel-mci.h> |
30 | #include <linux/atmel-mci.h> | 30 | #include <linux/atmel-mci.h> |
31 | 31 | ||
32 | #include <asm/io.h> | 32 | #include <asm/io.h> |
33 | #include <asm/unaligned.h> | 33 | #include <asm/unaligned.h> |
34 | 34 | ||
35 | #include <mach/cpu.h> | 35 | #include <mach/cpu.h> |
36 | #include <mach/board.h> | 36 | #include <mach/board.h> |
37 | 37 | ||
38 | #include "atmel-mci-regs.h" | 38 | #include "atmel-mci-regs.h" |
39 | 39 | ||
40 | #define ATMCI_DATA_ERROR_FLAGS (MCI_DCRCE | MCI_DTOE | MCI_OVRE | MCI_UNRE) | 40 | #define ATMCI_DATA_ERROR_FLAGS (MCI_DCRCE | MCI_DTOE | MCI_OVRE | MCI_UNRE) |
41 | #define ATMCI_DMA_THRESHOLD 16 | 41 | #define ATMCI_DMA_THRESHOLD 16 |
42 | 42 | ||
43 | enum { | 43 | enum { |
44 | EVENT_CMD_COMPLETE = 0, | 44 | EVENT_CMD_COMPLETE = 0, |
45 | EVENT_XFER_COMPLETE, | 45 | EVENT_XFER_COMPLETE, |
46 | EVENT_DATA_COMPLETE, | 46 | EVENT_DATA_COMPLETE, |
47 | EVENT_DATA_ERROR, | 47 | EVENT_DATA_ERROR, |
48 | }; | 48 | }; |
49 | 49 | ||
50 | enum atmel_mci_state { | 50 | enum atmel_mci_state { |
51 | STATE_IDLE = 0, | 51 | STATE_IDLE = 0, |
52 | STATE_SENDING_CMD, | 52 | STATE_SENDING_CMD, |
53 | STATE_SENDING_DATA, | 53 | STATE_SENDING_DATA, |
54 | STATE_DATA_BUSY, | 54 | STATE_DATA_BUSY, |
55 | STATE_SENDING_STOP, | 55 | STATE_SENDING_STOP, |
56 | STATE_DATA_ERROR, | 56 | STATE_DATA_ERROR, |
57 | }; | 57 | }; |
58 | 58 | ||
59 | struct atmel_mci_dma { | 59 | struct atmel_mci_dma { |
60 | #ifdef CONFIG_MMC_ATMELMCI_DMA | 60 | #ifdef CONFIG_MMC_ATMELMCI_DMA |
61 | struct dma_chan *chan; | 61 | struct dma_chan *chan; |
62 | struct dma_async_tx_descriptor *data_desc; | 62 | struct dma_async_tx_descriptor *data_desc; |
63 | #endif | 63 | #endif |
64 | }; | 64 | }; |
65 | 65 | ||
66 | /** | 66 | /** |
67 | * struct atmel_mci - MMC controller state shared between all slots | 67 | * struct atmel_mci - MMC controller state shared between all slots |
68 | * @lock: Spinlock protecting the queue and associated data. | 68 | * @lock: Spinlock protecting the queue and associated data. |
69 | * @regs: Pointer to MMIO registers. | 69 | * @regs: Pointer to MMIO registers. |
70 | * @sg: Scatterlist entry currently being processed by PIO code, if any. | 70 | * @sg: Scatterlist entry currently being processed by PIO code, if any. |
71 | * @pio_offset: Offset into the current scatterlist entry. | 71 | * @pio_offset: Offset into the current scatterlist entry. |
72 | * @cur_slot: The slot which is currently using the controller. | 72 | * @cur_slot: The slot which is currently using the controller. |
73 | * @mrq: The request currently being processed on @cur_slot, | 73 | * @mrq: The request currently being processed on @cur_slot, |
74 | * or NULL if the controller is idle. | 74 | * or NULL if the controller is idle. |
75 | * @cmd: The command currently being sent to the card, or NULL. | 75 | * @cmd: The command currently being sent to the card, or NULL. |
76 | * @data: The data currently being transferred, or NULL if no data | 76 | * @data: The data currently being transferred, or NULL if no data |
77 | * transfer is in progress. | 77 | * transfer is in progress. |
78 | * @dma: DMA client state. | 78 | * @dma: DMA client state. |
79 | * @data_chan: DMA channel being used for the current data transfer. | 79 | * @data_chan: DMA channel being used for the current data transfer. |
80 | * @cmd_status: Snapshot of SR taken upon completion of the current | 80 | * @cmd_status: Snapshot of SR taken upon completion of the current |
81 | * command. Only valid when EVENT_CMD_COMPLETE is pending. | 81 | * command. Only valid when EVENT_CMD_COMPLETE is pending. |
82 | * @data_status: Snapshot of SR taken upon completion of the current | 82 | * @data_status: Snapshot of SR taken upon completion of the current |
83 | * data transfer. Only valid when EVENT_DATA_COMPLETE or | 83 | * data transfer. Only valid when EVENT_DATA_COMPLETE or |
84 | * EVENT_DATA_ERROR is pending. | 84 | * EVENT_DATA_ERROR is pending. |
85 | * @stop_cmdr: Value to be loaded into CMDR when the stop command is | 85 | * @stop_cmdr: Value to be loaded into CMDR when the stop command is |
86 | * to be sent. | 86 | * to be sent. |
87 | * @tasklet: Tasklet running the request state machine. | 87 | * @tasklet: Tasklet running the request state machine. |
88 | * @pending_events: Bitmask of events flagged by the interrupt handler | 88 | * @pending_events: Bitmask of events flagged by the interrupt handler |
89 | * to be processed by the tasklet. | 89 | * to be processed by the tasklet. |
90 | * @completed_events: Bitmask of events which the state machine has | 90 | * @completed_events: Bitmask of events which the state machine has |
91 | * processed. | 91 | * processed. |
92 | * @state: Tasklet state. | 92 | * @state: Tasklet state. |
93 | * @queue: List of slots waiting for access to the controller. | 93 | * @queue: List of slots waiting for access to the controller. |
94 | * @need_clock_update: Update the clock rate before the next request. | 94 | * @need_clock_update: Update the clock rate before the next request. |
95 | * @need_reset: Reset controller before next request. | 95 | * @need_reset: Reset controller before next request. |
96 | * @mode_reg: Value of the MR register. | 96 | * @mode_reg: Value of the MR register. |
97 | * @cfg_reg: Value of the CFG register. | 97 | * @cfg_reg: Value of the CFG register. |
98 | * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus | 98 | * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus |
99 | * rate and timeout calculations. | 99 | * rate and timeout calculations. |
100 | * @mapbase: Physical address of the MMIO registers. | 100 | * @mapbase: Physical address of the MMIO registers. |
101 | * @mck: The peripheral bus clock hooked up to the MMC controller. | 101 | * @mck: The peripheral bus clock hooked up to the MMC controller. |
102 | * @pdev: Platform device associated with the MMC controller. | 102 | * @pdev: Platform device associated with the MMC controller. |
103 | * @slot: Slots sharing this MMC controller. | 103 | * @slot: Slots sharing this MMC controller. |
104 | * | 104 | * |
105 | * Locking | 105 | * Locking |
106 | * ======= | 106 | * ======= |
107 | * | 107 | * |
108 | * @lock is a softirq-safe spinlock protecting @queue as well as | 108 | * @lock is a softirq-safe spinlock protecting @queue as well as |
109 | * @cur_slot, @mrq and @state. These must always be updated | 109 | * @cur_slot, @mrq and @state. These must always be updated |
110 | * at the same time while holding @lock. | 110 | * at the same time while holding @lock. |
111 | * | 111 | * |
112 | * @lock also protects mode_reg and need_clock_update since these are | 112 | * @lock also protects mode_reg and need_clock_update since these are |
113 | * used to synchronize mode register updates with the queue | 113 | * used to synchronize mode register updates with the queue |
114 | * processing. | 114 | * processing. |
115 | * | 115 | * |
116 | * The @mrq field of struct atmel_mci_slot is also protected by @lock, | 116 | * The @mrq field of struct atmel_mci_slot is also protected by @lock, |
117 | * and must always be written at the same time as the slot is added to | 117 | * and must always be written at the same time as the slot is added to |
118 | * @queue. | 118 | * @queue. |
119 | * | 119 | * |
120 | * @pending_events and @completed_events are accessed using atomic bit | 120 | * @pending_events and @completed_events are accessed using atomic bit |
121 | * operations, so they don't need any locking. | 121 | * operations, so they don't need any locking. |
122 | * | 122 | * |
123 | * None of the fields touched by the interrupt handler need any | 123 | * None of the fields touched by the interrupt handler need any |
124 | * locking. However, ordering is important: Before EVENT_DATA_ERROR or | 124 | * locking. However, ordering is important: Before EVENT_DATA_ERROR or |
125 | * EVENT_DATA_COMPLETE is set in @pending_events, all data-related | 125 | * EVENT_DATA_COMPLETE is set in @pending_events, all data-related |
126 | * interrupts must be disabled and @data_status updated with a | 126 | * interrupts must be disabled and @data_status updated with a |
127 | * snapshot of SR. Similarly, before EVENT_CMD_COMPLETE is set, the | 127 | * snapshot of SR. Similarly, before EVENT_CMD_COMPLETE is set, the |
128 | * CMDRDY interupt must be disabled and @cmd_status updated with a | 128 | * CMDRDY interupt must be disabled and @cmd_status updated with a |
129 | * snapshot of SR, and before EVENT_XFER_COMPLETE can be set, the | 129 | * snapshot of SR, and before EVENT_XFER_COMPLETE can be set, the |
130 | * bytes_xfered field of @data must be written. This is ensured by | 130 | * bytes_xfered field of @data must be written. This is ensured by |
131 | * using barriers. | 131 | * using barriers. |
132 | */ | 132 | */ |
133 | struct atmel_mci { | 133 | struct atmel_mci { |
134 | spinlock_t lock; | 134 | spinlock_t lock; |
135 | void __iomem *regs; | 135 | void __iomem *regs; |
136 | 136 | ||
137 | struct scatterlist *sg; | 137 | struct scatterlist *sg; |
138 | unsigned int pio_offset; | 138 | unsigned int pio_offset; |
139 | 139 | ||
140 | struct atmel_mci_slot *cur_slot; | 140 | struct atmel_mci_slot *cur_slot; |
141 | struct mmc_request *mrq; | 141 | struct mmc_request *mrq; |
142 | struct mmc_command *cmd; | 142 | struct mmc_command *cmd; |
143 | struct mmc_data *data; | 143 | struct mmc_data *data; |
144 | 144 | ||
145 | struct atmel_mci_dma dma; | 145 | struct atmel_mci_dma dma; |
146 | struct dma_chan *data_chan; | 146 | struct dma_chan *data_chan; |
147 | 147 | ||
148 | u32 cmd_status; | 148 | u32 cmd_status; |
149 | u32 data_status; | 149 | u32 data_status; |
150 | u32 stop_cmdr; | 150 | u32 stop_cmdr; |
151 | 151 | ||
152 | struct tasklet_struct tasklet; | 152 | struct tasklet_struct tasklet; |
153 | unsigned long pending_events; | 153 | unsigned long pending_events; |
154 | unsigned long completed_events; | 154 | unsigned long completed_events; |
155 | enum atmel_mci_state state; | 155 | enum atmel_mci_state state; |
156 | struct list_head queue; | 156 | struct list_head queue; |
157 | 157 | ||
158 | bool need_clock_update; | 158 | bool need_clock_update; |
159 | bool need_reset; | 159 | bool need_reset; |
160 | u32 mode_reg; | 160 | u32 mode_reg; |
161 | u32 cfg_reg; | 161 | u32 cfg_reg; |
162 | unsigned long bus_hz; | 162 | unsigned long bus_hz; |
163 | unsigned long mapbase; | 163 | unsigned long mapbase; |
164 | struct clk *mck; | 164 | struct clk *mck; |
165 | struct platform_device *pdev; | 165 | struct platform_device *pdev; |
166 | 166 | ||
167 | struct atmel_mci_slot *slot[ATMEL_MCI_MAX_NR_SLOTS]; | 167 | struct atmel_mci_slot *slot[ATMEL_MCI_MAX_NR_SLOTS]; |
168 | }; | 168 | }; |
169 | 169 | ||
170 | /** | 170 | /** |
171 | * struct atmel_mci_slot - MMC slot state | 171 | * struct atmel_mci_slot - MMC slot state |
172 | * @mmc: The mmc_host representing this slot. | 172 | * @mmc: The mmc_host representing this slot. |
173 | * @host: The MMC controller this slot is using. | 173 | * @host: The MMC controller this slot is using. |
174 | * @sdc_reg: Value of SDCR to be written before using this slot. | 174 | * @sdc_reg: Value of SDCR to be written before using this slot. |
175 | * @mrq: mmc_request currently being processed or waiting to be | 175 | * @mrq: mmc_request currently being processed or waiting to be |
176 | * processed, or NULL when the slot is idle. | 176 | * processed, or NULL when the slot is idle. |
177 | * @queue_node: List node for placing this node in the @queue list of | 177 | * @queue_node: List node for placing this node in the @queue list of |
178 | * &struct atmel_mci. | 178 | * &struct atmel_mci. |
179 | * @clock: Clock rate configured by set_ios(). Protected by host->lock. | 179 | * @clock: Clock rate configured by set_ios(). Protected by host->lock. |
180 | * @flags: Random state bits associated with the slot. | 180 | * @flags: Random state bits associated with the slot. |
181 | * @detect_pin: GPIO pin used for card detection, or negative if not | 181 | * @detect_pin: GPIO pin used for card detection, or negative if not |
182 | * available. | 182 | * available. |
183 | * @wp_pin: GPIO pin used for card write protect sending, or negative | 183 | * @wp_pin: GPIO pin used for card write protect sending, or negative |
184 | * if not available. | 184 | * if not available. |
185 | * @detect_is_active_high: The state of the detect pin when it is active. | 185 | * @detect_is_active_high: The state of the detect pin when it is active. |
186 | * @detect_timer: Timer used for debouncing @detect_pin interrupts. | 186 | * @detect_timer: Timer used for debouncing @detect_pin interrupts. |
187 | */ | 187 | */ |
188 | struct atmel_mci_slot { | 188 | struct atmel_mci_slot { |
189 | struct mmc_host *mmc; | 189 | struct mmc_host *mmc; |
190 | struct atmel_mci *host; | 190 | struct atmel_mci *host; |
191 | 191 | ||
192 | u32 sdc_reg; | 192 | u32 sdc_reg; |
193 | 193 | ||
194 | struct mmc_request *mrq; | 194 | struct mmc_request *mrq; |
195 | struct list_head queue_node; | 195 | struct list_head queue_node; |
196 | 196 | ||
197 | unsigned int clock; | 197 | unsigned int clock; |
198 | unsigned long flags; | 198 | unsigned long flags; |
199 | #define ATMCI_CARD_PRESENT 0 | 199 | #define ATMCI_CARD_PRESENT 0 |
200 | #define ATMCI_CARD_NEED_INIT 1 | 200 | #define ATMCI_CARD_NEED_INIT 1 |
201 | #define ATMCI_SHUTDOWN 2 | 201 | #define ATMCI_SHUTDOWN 2 |
202 | 202 | ||
203 | int detect_pin; | 203 | int detect_pin; |
204 | int wp_pin; | 204 | int wp_pin; |
205 | bool detect_is_active_high; | 205 | bool detect_is_active_high; |
206 | 206 | ||
207 | struct timer_list detect_timer; | 207 | struct timer_list detect_timer; |
208 | }; | 208 | }; |
209 | 209 | ||
210 | #define atmci_test_and_clear_pending(host, event) \ | 210 | #define atmci_test_and_clear_pending(host, event) \ |
211 | test_and_clear_bit(event, &host->pending_events) | 211 | test_and_clear_bit(event, &host->pending_events) |
212 | #define atmci_set_completed(host, event) \ | 212 | #define atmci_set_completed(host, event) \ |
213 | set_bit(event, &host->completed_events) | 213 | set_bit(event, &host->completed_events) |
214 | #define atmci_set_pending(host, event) \ | 214 | #define atmci_set_pending(host, event) \ |
215 | set_bit(event, &host->pending_events) | 215 | set_bit(event, &host->pending_events) |
216 | 216 | ||
217 | /* | 217 | /* |
218 | * Enable or disable features/registers based on | 218 | * Enable or disable features/registers based on |
219 | * whether the processor supports them | 219 | * whether the processor supports them |
220 | */ | 220 | */ |
221 | static bool mci_has_rwproof(void) | 221 | static bool mci_has_rwproof(void) |
222 | { | 222 | { |
223 | if (cpu_is_at91sam9261() || cpu_is_at91rm9200()) | 223 | if (cpu_is_at91sam9261() || cpu_is_at91rm9200()) |
224 | return false; | 224 | return false; |
225 | else | 225 | else |
226 | return true; | 226 | return true; |
227 | } | 227 | } |
228 | 228 | ||
229 | /* | 229 | /* |
230 | * The new MCI2 module isn't 100% compatible with the old MCI module, | 230 | * The new MCI2 module isn't 100% compatible with the old MCI module, |
231 | * and it has a few nice features which we want to use... | 231 | * and it has a few nice features which we want to use... |
232 | */ | 232 | */ |
233 | static inline bool atmci_is_mci2(void) | 233 | static inline bool atmci_is_mci2(void) |
234 | { | 234 | { |
235 | if (cpu_is_at91sam9g45()) | 235 | if (cpu_is_at91sam9g45()) |
236 | return true; | 236 | return true; |
237 | 237 | ||
238 | return false; | 238 | return false; |
239 | } | 239 | } |
240 | 240 | ||
241 | 241 | ||
242 | /* | 242 | /* |
243 | * The debugfs stuff below is mostly optimized away when | 243 | * The debugfs stuff below is mostly optimized away when |
244 | * CONFIG_DEBUG_FS is not set. | 244 | * CONFIG_DEBUG_FS is not set. |
245 | */ | 245 | */ |
246 | static int atmci_req_show(struct seq_file *s, void *v) | 246 | static int atmci_req_show(struct seq_file *s, void *v) |
247 | { | 247 | { |
248 | struct atmel_mci_slot *slot = s->private; | 248 | struct atmel_mci_slot *slot = s->private; |
249 | struct mmc_request *mrq; | 249 | struct mmc_request *mrq; |
250 | struct mmc_command *cmd; | 250 | struct mmc_command *cmd; |
251 | struct mmc_command *stop; | 251 | struct mmc_command *stop; |
252 | struct mmc_data *data; | 252 | struct mmc_data *data; |
253 | 253 | ||
254 | /* Make sure we get a consistent snapshot */ | 254 | /* Make sure we get a consistent snapshot */ |
255 | spin_lock_bh(&slot->host->lock); | 255 | spin_lock_bh(&slot->host->lock); |
256 | mrq = slot->mrq; | 256 | mrq = slot->mrq; |
257 | 257 | ||
258 | if (mrq) { | 258 | if (mrq) { |
259 | cmd = mrq->cmd; | 259 | cmd = mrq->cmd; |
260 | data = mrq->data; | 260 | data = mrq->data; |
261 | stop = mrq->stop; | 261 | stop = mrq->stop; |
262 | 262 | ||
263 | if (cmd) | 263 | if (cmd) |
264 | seq_printf(s, | 264 | seq_printf(s, |
265 | "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", | 265 | "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", |
266 | cmd->opcode, cmd->arg, cmd->flags, | 266 | cmd->opcode, cmd->arg, cmd->flags, |
267 | cmd->resp[0], cmd->resp[1], cmd->resp[2], | 267 | cmd->resp[0], cmd->resp[1], cmd->resp[2], |
268 | cmd->resp[2], cmd->error); | 268 | cmd->resp[2], cmd->error); |
269 | if (data) | 269 | if (data) |
270 | seq_printf(s, "DATA %u / %u * %u flg %x err %d\n", | 270 | seq_printf(s, "DATA %u / %u * %u flg %x err %d\n", |
271 | data->bytes_xfered, data->blocks, | 271 | data->bytes_xfered, data->blocks, |
272 | data->blksz, data->flags, data->error); | 272 | data->blksz, data->flags, data->error); |
273 | if (stop) | 273 | if (stop) |
274 | seq_printf(s, | 274 | seq_printf(s, |
275 | "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", | 275 | "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", |
276 | stop->opcode, stop->arg, stop->flags, | 276 | stop->opcode, stop->arg, stop->flags, |
277 | stop->resp[0], stop->resp[1], stop->resp[2], | 277 | stop->resp[0], stop->resp[1], stop->resp[2], |
278 | stop->resp[2], stop->error); | 278 | stop->resp[2], stop->error); |
279 | } | 279 | } |
280 | 280 | ||
281 | spin_unlock_bh(&slot->host->lock); | 281 | spin_unlock_bh(&slot->host->lock); |
282 | 282 | ||
283 | return 0; | 283 | return 0; |
284 | } | 284 | } |
285 | 285 | ||
286 | static int atmci_req_open(struct inode *inode, struct file *file) | 286 | static int atmci_req_open(struct inode *inode, struct file *file) |
287 | { | 287 | { |
288 | return single_open(file, atmci_req_show, inode->i_private); | 288 | return single_open(file, atmci_req_show, inode->i_private); |
289 | } | 289 | } |
290 | 290 | ||
291 | static const struct file_operations atmci_req_fops = { | 291 | static const struct file_operations atmci_req_fops = { |
292 | .owner = THIS_MODULE, | 292 | .owner = THIS_MODULE, |
293 | .open = atmci_req_open, | 293 | .open = atmci_req_open, |
294 | .read = seq_read, | 294 | .read = seq_read, |
295 | .llseek = seq_lseek, | 295 | .llseek = seq_lseek, |
296 | .release = single_release, | 296 | .release = single_release, |
297 | }; | 297 | }; |
298 | 298 | ||
299 | static void atmci_show_status_reg(struct seq_file *s, | 299 | static void atmci_show_status_reg(struct seq_file *s, |
300 | const char *regname, u32 value) | 300 | const char *regname, u32 value) |
301 | { | 301 | { |
302 | static const char *sr_bit[] = { | 302 | static const char *sr_bit[] = { |
303 | [0] = "CMDRDY", | 303 | [0] = "CMDRDY", |
304 | [1] = "RXRDY", | 304 | [1] = "RXRDY", |
305 | [2] = "TXRDY", | 305 | [2] = "TXRDY", |
306 | [3] = "BLKE", | 306 | [3] = "BLKE", |
307 | [4] = "DTIP", | 307 | [4] = "DTIP", |
308 | [5] = "NOTBUSY", | 308 | [5] = "NOTBUSY", |
309 | [6] = "ENDRX", | 309 | [6] = "ENDRX", |
310 | [7] = "ENDTX", | 310 | [7] = "ENDTX", |
311 | [8] = "SDIOIRQA", | 311 | [8] = "SDIOIRQA", |
312 | [9] = "SDIOIRQB", | 312 | [9] = "SDIOIRQB", |
313 | [12] = "SDIOWAIT", | 313 | [12] = "SDIOWAIT", |
314 | [14] = "RXBUFF", | 314 | [14] = "RXBUFF", |
315 | [15] = "TXBUFE", | 315 | [15] = "TXBUFE", |
316 | [16] = "RINDE", | 316 | [16] = "RINDE", |
317 | [17] = "RDIRE", | 317 | [17] = "RDIRE", |
318 | [18] = "RCRCE", | 318 | [18] = "RCRCE", |
319 | [19] = "RENDE", | 319 | [19] = "RENDE", |
320 | [20] = "RTOE", | 320 | [20] = "RTOE", |
321 | [21] = "DCRCE", | 321 | [21] = "DCRCE", |
322 | [22] = "DTOE", | 322 | [22] = "DTOE", |
323 | [23] = "CSTOE", | 323 | [23] = "CSTOE", |
324 | [24] = "BLKOVRE", | 324 | [24] = "BLKOVRE", |
325 | [25] = "DMADONE", | 325 | [25] = "DMADONE", |
326 | [26] = "FIFOEMPTY", | 326 | [26] = "FIFOEMPTY", |
327 | [27] = "XFRDONE", | 327 | [27] = "XFRDONE", |
328 | [30] = "OVRE", | 328 | [30] = "OVRE", |
329 | [31] = "UNRE", | 329 | [31] = "UNRE", |
330 | }; | 330 | }; |
331 | unsigned int i; | 331 | unsigned int i; |
332 | 332 | ||
333 | seq_printf(s, "%s:\t0x%08x", regname, value); | 333 | seq_printf(s, "%s:\t0x%08x", regname, value); |
334 | for (i = 0; i < ARRAY_SIZE(sr_bit); i++) { | 334 | for (i = 0; i < ARRAY_SIZE(sr_bit); i++) { |
335 | if (value & (1 << i)) { | 335 | if (value & (1 << i)) { |
336 | if (sr_bit[i]) | 336 | if (sr_bit[i]) |
337 | seq_printf(s, " %s", sr_bit[i]); | 337 | seq_printf(s, " %s", sr_bit[i]); |
338 | else | 338 | else |
339 | seq_puts(s, " UNKNOWN"); | 339 | seq_puts(s, " UNKNOWN"); |
340 | } | 340 | } |
341 | } | 341 | } |
342 | seq_putc(s, '\n'); | 342 | seq_putc(s, '\n'); |
343 | } | 343 | } |
344 | 344 | ||
345 | static int atmci_regs_show(struct seq_file *s, void *v) | 345 | static int atmci_regs_show(struct seq_file *s, void *v) |
346 | { | 346 | { |
347 | struct atmel_mci *host = s->private; | 347 | struct atmel_mci *host = s->private; |
348 | u32 *buf; | 348 | u32 *buf; |
349 | 349 | ||
350 | buf = kmalloc(MCI_REGS_SIZE, GFP_KERNEL); | 350 | buf = kmalloc(MCI_REGS_SIZE, GFP_KERNEL); |
351 | if (!buf) | 351 | if (!buf) |
352 | return -ENOMEM; | 352 | return -ENOMEM; |
353 | 353 | ||
354 | /* | 354 | /* |
355 | * Grab a more or less consistent snapshot. Note that we're | 355 | * Grab a more or less consistent snapshot. Note that we're |
356 | * not disabling interrupts, so IMR and SR may not be | 356 | * not disabling interrupts, so IMR and SR may not be |
357 | * consistent. | 357 | * consistent. |
358 | */ | 358 | */ |
359 | spin_lock_bh(&host->lock); | 359 | spin_lock_bh(&host->lock); |
360 | clk_enable(host->mck); | 360 | clk_enable(host->mck); |
361 | memcpy_fromio(buf, host->regs, MCI_REGS_SIZE); | 361 | memcpy_fromio(buf, host->regs, MCI_REGS_SIZE); |
362 | clk_disable(host->mck); | 362 | clk_disable(host->mck); |
363 | spin_unlock_bh(&host->lock); | 363 | spin_unlock_bh(&host->lock); |
364 | 364 | ||
365 | seq_printf(s, "MR:\t0x%08x%s%s CLKDIV=%u\n", | 365 | seq_printf(s, "MR:\t0x%08x%s%s CLKDIV=%u\n", |
366 | buf[MCI_MR / 4], | 366 | buf[MCI_MR / 4], |
367 | buf[MCI_MR / 4] & MCI_MR_RDPROOF ? " RDPROOF" : "", | 367 | buf[MCI_MR / 4] & MCI_MR_RDPROOF ? " RDPROOF" : "", |
368 | buf[MCI_MR / 4] & MCI_MR_WRPROOF ? " WRPROOF" : "", | 368 | buf[MCI_MR / 4] & MCI_MR_WRPROOF ? " WRPROOF" : "", |
369 | buf[MCI_MR / 4] & 0xff); | 369 | buf[MCI_MR / 4] & 0xff); |
370 | seq_printf(s, "DTOR:\t0x%08x\n", buf[MCI_DTOR / 4]); | 370 | seq_printf(s, "DTOR:\t0x%08x\n", buf[MCI_DTOR / 4]); |
371 | seq_printf(s, "SDCR:\t0x%08x\n", buf[MCI_SDCR / 4]); | 371 | seq_printf(s, "SDCR:\t0x%08x\n", buf[MCI_SDCR / 4]); |
372 | seq_printf(s, "ARGR:\t0x%08x\n", buf[MCI_ARGR / 4]); | 372 | seq_printf(s, "ARGR:\t0x%08x\n", buf[MCI_ARGR / 4]); |
373 | seq_printf(s, "BLKR:\t0x%08x BCNT=%u BLKLEN=%u\n", | 373 | seq_printf(s, "BLKR:\t0x%08x BCNT=%u BLKLEN=%u\n", |
374 | buf[MCI_BLKR / 4], | 374 | buf[MCI_BLKR / 4], |
375 | buf[MCI_BLKR / 4] & 0xffff, | 375 | buf[MCI_BLKR / 4] & 0xffff, |
376 | (buf[MCI_BLKR / 4] >> 16) & 0xffff); | 376 | (buf[MCI_BLKR / 4] >> 16) & 0xffff); |
377 | if (atmci_is_mci2()) | 377 | if (atmci_is_mci2()) |
378 | seq_printf(s, "CSTOR:\t0x%08x\n", buf[MCI_CSTOR / 4]); | 378 | seq_printf(s, "CSTOR:\t0x%08x\n", buf[MCI_CSTOR / 4]); |
379 | 379 | ||
380 | /* Don't read RSPR and RDR; it will consume the data there */ | 380 | /* Don't read RSPR and RDR; it will consume the data there */ |
381 | 381 | ||
382 | atmci_show_status_reg(s, "SR", buf[MCI_SR / 4]); | 382 | atmci_show_status_reg(s, "SR", buf[MCI_SR / 4]); |
383 | atmci_show_status_reg(s, "IMR", buf[MCI_IMR / 4]); | 383 | atmci_show_status_reg(s, "IMR", buf[MCI_IMR / 4]); |
384 | 384 | ||
385 | if (atmci_is_mci2()) { | 385 | if (atmci_is_mci2()) { |
386 | u32 val; | 386 | u32 val; |
387 | 387 | ||
388 | val = buf[MCI_DMA / 4]; | 388 | val = buf[MCI_DMA / 4]; |
389 | seq_printf(s, "DMA:\t0x%08x OFFSET=%u CHKSIZE=%u%s\n", | 389 | seq_printf(s, "DMA:\t0x%08x OFFSET=%u CHKSIZE=%u%s\n", |
390 | val, val & 3, | 390 | val, val & 3, |
391 | ((val >> 4) & 3) ? | 391 | ((val >> 4) & 3) ? |
392 | 1 << (((val >> 4) & 3) + 1) : 1, | 392 | 1 << (((val >> 4) & 3) + 1) : 1, |
393 | val & MCI_DMAEN ? " DMAEN" : ""); | 393 | val & MCI_DMAEN ? " DMAEN" : ""); |
394 | 394 | ||
395 | val = buf[MCI_CFG / 4]; | 395 | val = buf[MCI_CFG / 4]; |
396 | seq_printf(s, "CFG:\t0x%08x%s%s%s%s\n", | 396 | seq_printf(s, "CFG:\t0x%08x%s%s%s%s\n", |
397 | val, | 397 | val, |
398 | val & MCI_CFG_FIFOMODE_1DATA ? " FIFOMODE_ONE_DATA" : "", | 398 | val & MCI_CFG_FIFOMODE_1DATA ? " FIFOMODE_ONE_DATA" : "", |
399 | val & MCI_CFG_FERRCTRL_COR ? " FERRCTRL_CLEAR_ON_READ" : "", | 399 | val & MCI_CFG_FERRCTRL_COR ? " FERRCTRL_CLEAR_ON_READ" : "", |
400 | val & MCI_CFG_HSMODE ? " HSMODE" : "", | 400 | val & MCI_CFG_HSMODE ? " HSMODE" : "", |
401 | val & MCI_CFG_LSYNC ? " LSYNC" : ""); | 401 | val & MCI_CFG_LSYNC ? " LSYNC" : ""); |
402 | } | 402 | } |
403 | 403 | ||
404 | kfree(buf); | 404 | kfree(buf); |
405 | 405 | ||
406 | return 0; | 406 | return 0; |
407 | } | 407 | } |
408 | 408 | ||
409 | static int atmci_regs_open(struct inode *inode, struct file *file) | 409 | static int atmci_regs_open(struct inode *inode, struct file *file) |
410 | { | 410 | { |
411 | return single_open(file, atmci_regs_show, inode->i_private); | 411 | return single_open(file, atmci_regs_show, inode->i_private); |
412 | } | 412 | } |
413 | 413 | ||
414 | static const struct file_operations atmci_regs_fops = { | 414 | static const struct file_operations atmci_regs_fops = { |
415 | .owner = THIS_MODULE, | 415 | .owner = THIS_MODULE, |
416 | .open = atmci_regs_open, | 416 | .open = atmci_regs_open, |
417 | .read = seq_read, | 417 | .read = seq_read, |
418 | .llseek = seq_lseek, | 418 | .llseek = seq_lseek, |
419 | .release = single_release, | 419 | .release = single_release, |
420 | }; | 420 | }; |
421 | 421 | ||
422 | static void atmci_init_debugfs(struct atmel_mci_slot *slot) | 422 | static void atmci_init_debugfs(struct atmel_mci_slot *slot) |
423 | { | 423 | { |
424 | struct mmc_host *mmc = slot->mmc; | 424 | struct mmc_host *mmc = slot->mmc; |
425 | struct atmel_mci *host = slot->host; | 425 | struct atmel_mci *host = slot->host; |
426 | struct dentry *root; | 426 | struct dentry *root; |
427 | struct dentry *node; | 427 | struct dentry *node; |
428 | 428 | ||
429 | root = mmc->debugfs_root; | 429 | root = mmc->debugfs_root; |
430 | if (!root) | 430 | if (!root) |
431 | return; | 431 | return; |
432 | 432 | ||
433 | node = debugfs_create_file("regs", S_IRUSR, root, host, | 433 | node = debugfs_create_file("regs", S_IRUSR, root, host, |
434 | &atmci_regs_fops); | 434 | &atmci_regs_fops); |
435 | if (IS_ERR(node)) | 435 | if (IS_ERR(node)) |
436 | return; | 436 | return; |
437 | if (!node) | 437 | if (!node) |
438 | goto err; | 438 | goto err; |
439 | 439 | ||
440 | node = debugfs_create_file("req", S_IRUSR, root, slot, &atmci_req_fops); | 440 | node = debugfs_create_file("req", S_IRUSR, root, slot, &atmci_req_fops); |
441 | if (!node) | 441 | if (!node) |
442 | goto err; | 442 | goto err; |
443 | 443 | ||
444 | node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state); | 444 | node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state); |
445 | if (!node) | 445 | if (!node) |
446 | goto err; | 446 | goto err; |
447 | 447 | ||
448 | node = debugfs_create_x32("pending_events", S_IRUSR, root, | 448 | node = debugfs_create_x32("pending_events", S_IRUSR, root, |
449 | (u32 *)&host->pending_events); | 449 | (u32 *)&host->pending_events); |
450 | if (!node) | 450 | if (!node) |
451 | goto err; | 451 | goto err; |
452 | 452 | ||
453 | node = debugfs_create_x32("completed_events", S_IRUSR, root, | 453 | node = debugfs_create_x32("completed_events", S_IRUSR, root, |
454 | (u32 *)&host->completed_events); | 454 | (u32 *)&host->completed_events); |
455 | if (!node) | 455 | if (!node) |
456 | goto err; | 456 | goto err; |
457 | 457 | ||
458 | return; | 458 | return; |
459 | 459 | ||
460 | err: | 460 | err: |
461 | dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n"); | 461 | dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n"); |
462 | } | 462 | } |
463 | 463 | ||
464 | static inline unsigned int ns_to_clocks(struct atmel_mci *host, | 464 | static inline unsigned int ns_to_clocks(struct atmel_mci *host, |
465 | unsigned int ns) | 465 | unsigned int ns) |
466 | { | 466 | { |
467 | return (ns * (host->bus_hz / 1000000) + 999) / 1000; | 467 | return (ns * (host->bus_hz / 1000000) + 999) / 1000; |
468 | } | 468 | } |
469 | 469 | ||
470 | static void atmci_set_timeout(struct atmel_mci *host, | 470 | static void atmci_set_timeout(struct atmel_mci *host, |
471 | struct atmel_mci_slot *slot, struct mmc_data *data) | 471 | struct atmel_mci_slot *slot, struct mmc_data *data) |
472 | { | 472 | { |
473 | static unsigned dtomul_to_shift[] = { | 473 | static unsigned dtomul_to_shift[] = { |
474 | 0, 4, 7, 8, 10, 12, 16, 20 | 474 | 0, 4, 7, 8, 10, 12, 16, 20 |
475 | }; | 475 | }; |
476 | unsigned timeout; | 476 | unsigned timeout; |
477 | unsigned dtocyc; | 477 | unsigned dtocyc; |
478 | unsigned dtomul; | 478 | unsigned dtomul; |
479 | 479 | ||
480 | timeout = ns_to_clocks(host, data->timeout_ns) + data->timeout_clks; | 480 | timeout = ns_to_clocks(host, data->timeout_ns) + data->timeout_clks; |
481 | 481 | ||
482 | for (dtomul = 0; dtomul < 8; dtomul++) { | 482 | for (dtomul = 0; dtomul < 8; dtomul++) { |
483 | unsigned shift = dtomul_to_shift[dtomul]; | 483 | unsigned shift = dtomul_to_shift[dtomul]; |
484 | dtocyc = (timeout + (1 << shift) - 1) >> shift; | 484 | dtocyc = (timeout + (1 << shift) - 1) >> shift; |
485 | if (dtocyc < 15) | 485 | if (dtocyc < 15) |
486 | break; | 486 | break; |
487 | } | 487 | } |
488 | 488 | ||
489 | if (dtomul >= 8) { | 489 | if (dtomul >= 8) { |
490 | dtomul = 7; | 490 | dtomul = 7; |
491 | dtocyc = 15; | 491 | dtocyc = 15; |
492 | } | 492 | } |
493 | 493 | ||
494 | dev_vdbg(&slot->mmc->class_dev, "setting timeout to %u cycles\n", | 494 | dev_vdbg(&slot->mmc->class_dev, "setting timeout to %u cycles\n", |
495 | dtocyc << dtomul_to_shift[dtomul]); | 495 | dtocyc << dtomul_to_shift[dtomul]); |
496 | mci_writel(host, DTOR, (MCI_DTOMUL(dtomul) | MCI_DTOCYC(dtocyc))); | 496 | mci_writel(host, DTOR, (MCI_DTOMUL(dtomul) | MCI_DTOCYC(dtocyc))); |
497 | } | 497 | } |
498 | 498 | ||
499 | /* | 499 | /* |
500 | * Return mask with command flags to be enabled for this command. | 500 | * Return mask with command flags to be enabled for this command. |
501 | */ | 501 | */ |
502 | static u32 atmci_prepare_command(struct mmc_host *mmc, | 502 | static u32 atmci_prepare_command(struct mmc_host *mmc, |
503 | struct mmc_command *cmd) | 503 | struct mmc_command *cmd) |
504 | { | 504 | { |
505 | struct mmc_data *data; | 505 | struct mmc_data *data; |
506 | u32 cmdr; | 506 | u32 cmdr; |
507 | 507 | ||
508 | cmd->error = -EINPROGRESS; | 508 | cmd->error = -EINPROGRESS; |
509 | 509 | ||
510 | cmdr = MCI_CMDR_CMDNB(cmd->opcode); | 510 | cmdr = MCI_CMDR_CMDNB(cmd->opcode); |
511 | 511 | ||
512 | if (cmd->flags & MMC_RSP_PRESENT) { | 512 | if (cmd->flags & MMC_RSP_PRESENT) { |
513 | if (cmd->flags & MMC_RSP_136) | 513 | if (cmd->flags & MMC_RSP_136) |
514 | cmdr |= MCI_CMDR_RSPTYP_136BIT; | 514 | cmdr |= MCI_CMDR_RSPTYP_136BIT; |
515 | else | 515 | else |
516 | cmdr |= MCI_CMDR_RSPTYP_48BIT; | 516 | cmdr |= MCI_CMDR_RSPTYP_48BIT; |
517 | } | 517 | } |
518 | 518 | ||
519 | /* | 519 | /* |
520 | * This should really be MAXLAT_5 for CMD2 and ACMD41, but | 520 | * This should really be MAXLAT_5 for CMD2 and ACMD41, but |
521 | * it's too difficult to determine whether this is an ACMD or | 521 | * it's too difficult to determine whether this is an ACMD or |
522 | * not. Better make it 64. | 522 | * not. Better make it 64. |
523 | */ | 523 | */ |
524 | cmdr |= MCI_CMDR_MAXLAT_64CYC; | 524 | cmdr |= MCI_CMDR_MAXLAT_64CYC; |
525 | 525 | ||
526 | if (mmc->ios.bus_mode == MMC_BUSMODE_OPENDRAIN) | 526 | if (mmc->ios.bus_mode == MMC_BUSMODE_OPENDRAIN) |
527 | cmdr |= MCI_CMDR_OPDCMD; | 527 | cmdr |= MCI_CMDR_OPDCMD; |
528 | 528 | ||
529 | data = cmd->data; | 529 | data = cmd->data; |
530 | if (data) { | 530 | if (data) { |
531 | cmdr |= MCI_CMDR_START_XFER; | 531 | cmdr |= MCI_CMDR_START_XFER; |
532 | if (data->flags & MMC_DATA_STREAM) | 532 | if (data->flags & MMC_DATA_STREAM) |
533 | cmdr |= MCI_CMDR_STREAM; | 533 | cmdr |= MCI_CMDR_STREAM; |
534 | else if (data->blocks > 1) | 534 | else if (data->blocks > 1) |
535 | cmdr |= MCI_CMDR_MULTI_BLOCK; | 535 | cmdr |= MCI_CMDR_MULTI_BLOCK; |
536 | else | 536 | else |
537 | cmdr |= MCI_CMDR_BLOCK; | 537 | cmdr |= MCI_CMDR_BLOCK; |
538 | 538 | ||
539 | if (data->flags & MMC_DATA_READ) | 539 | if (data->flags & MMC_DATA_READ) |
540 | cmdr |= MCI_CMDR_TRDIR_READ; | 540 | cmdr |= MCI_CMDR_TRDIR_READ; |
541 | } | 541 | } |
542 | 542 | ||
543 | return cmdr; | 543 | return cmdr; |
544 | } | 544 | } |
545 | 545 | ||
546 | static void atmci_start_command(struct atmel_mci *host, | 546 | static void atmci_start_command(struct atmel_mci *host, |
547 | struct mmc_command *cmd, u32 cmd_flags) | 547 | struct mmc_command *cmd, u32 cmd_flags) |
548 | { | 548 | { |
549 | WARN_ON(host->cmd); | 549 | WARN_ON(host->cmd); |
550 | host->cmd = cmd; | 550 | host->cmd = cmd; |
551 | 551 | ||
552 | dev_vdbg(&host->pdev->dev, | 552 | dev_vdbg(&host->pdev->dev, |
553 | "start command: ARGR=0x%08x CMDR=0x%08x\n", | 553 | "start command: ARGR=0x%08x CMDR=0x%08x\n", |
554 | cmd->arg, cmd_flags); | 554 | cmd->arg, cmd_flags); |
555 | 555 | ||
556 | mci_writel(host, ARGR, cmd->arg); | 556 | mci_writel(host, ARGR, cmd->arg); |
557 | mci_writel(host, CMDR, cmd_flags); | 557 | mci_writel(host, CMDR, cmd_flags); |
558 | } | 558 | } |
559 | 559 | ||
560 | static void send_stop_cmd(struct atmel_mci *host, struct mmc_data *data) | 560 | static void send_stop_cmd(struct atmel_mci *host, struct mmc_data *data) |
561 | { | 561 | { |
562 | atmci_start_command(host, data->stop, host->stop_cmdr); | 562 | atmci_start_command(host, data->stop, host->stop_cmdr); |
563 | mci_writel(host, IER, MCI_CMDRDY); | 563 | mci_writel(host, IER, MCI_CMDRDY); |
564 | } | 564 | } |
565 | 565 | ||
566 | #ifdef CONFIG_MMC_ATMELMCI_DMA | 566 | #ifdef CONFIG_MMC_ATMELMCI_DMA |
567 | static void atmci_dma_cleanup(struct atmel_mci *host) | 567 | static void atmci_dma_cleanup(struct atmel_mci *host) |
568 | { | 568 | { |
569 | struct mmc_data *data = host->data; | 569 | struct mmc_data *data = host->data; |
570 | 570 | ||
571 | dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len, | 571 | dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len, |
572 | ((data->flags & MMC_DATA_WRITE) | 572 | ((data->flags & MMC_DATA_WRITE) |
573 | ? DMA_TO_DEVICE : DMA_FROM_DEVICE)); | 573 | ? DMA_TO_DEVICE : DMA_FROM_DEVICE)); |
574 | } | 574 | } |
575 | 575 | ||
576 | static void atmci_stop_dma(struct atmel_mci *host) | 576 | static void atmci_stop_dma(struct atmel_mci *host) |
577 | { | 577 | { |
578 | struct dma_chan *chan = host->data_chan; | 578 | struct dma_chan *chan = host->data_chan; |
579 | 579 | ||
580 | if (chan) { | 580 | if (chan) { |
581 | chan->device->device_control(chan, DMA_TERMINATE_ALL); | 581 | chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); |
582 | atmci_dma_cleanup(host); | 582 | atmci_dma_cleanup(host); |
583 | } else { | 583 | } else { |
584 | /* Data transfer was stopped by the interrupt handler */ | 584 | /* Data transfer was stopped by the interrupt handler */ |
585 | atmci_set_pending(host, EVENT_XFER_COMPLETE); | 585 | atmci_set_pending(host, EVENT_XFER_COMPLETE); |
586 | mci_writel(host, IER, MCI_NOTBUSY); | 586 | mci_writel(host, IER, MCI_NOTBUSY); |
587 | } | 587 | } |
588 | } | 588 | } |
589 | 589 | ||
590 | /* This function is called by the DMA driver from tasklet context. */ | 590 | /* This function is called by the DMA driver from tasklet context. */ |
591 | static void atmci_dma_complete(void *arg) | 591 | static void atmci_dma_complete(void *arg) |
592 | { | 592 | { |
593 | struct atmel_mci *host = arg; | 593 | struct atmel_mci *host = arg; |
594 | struct mmc_data *data = host->data; | 594 | struct mmc_data *data = host->data; |
595 | 595 | ||
596 | dev_vdbg(&host->pdev->dev, "DMA complete\n"); | 596 | dev_vdbg(&host->pdev->dev, "DMA complete\n"); |
597 | 597 | ||
598 | if (atmci_is_mci2()) | 598 | if (atmci_is_mci2()) |
599 | /* Disable DMA hardware handshaking on MCI */ | 599 | /* Disable DMA hardware handshaking on MCI */ |
600 | mci_writel(host, DMA, mci_readl(host, DMA) & ~MCI_DMAEN); | 600 | mci_writel(host, DMA, mci_readl(host, DMA) & ~MCI_DMAEN); |
601 | 601 | ||
602 | atmci_dma_cleanup(host); | 602 | atmci_dma_cleanup(host); |
603 | 603 | ||
604 | /* | 604 | /* |
605 | * If the card was removed, data will be NULL. No point trying | 605 | * If the card was removed, data will be NULL. No point trying |
606 | * to send the stop command or waiting for NBUSY in this case. | 606 | * to send the stop command or waiting for NBUSY in this case. |
607 | */ | 607 | */ |
608 | if (data) { | 608 | if (data) { |
609 | atmci_set_pending(host, EVENT_XFER_COMPLETE); | 609 | atmci_set_pending(host, EVENT_XFER_COMPLETE); |
610 | tasklet_schedule(&host->tasklet); | 610 | tasklet_schedule(&host->tasklet); |
611 | 611 | ||
612 | /* | 612 | /* |
613 | * Regardless of what the documentation says, we have | 613 | * Regardless of what the documentation says, we have |
614 | * to wait for NOTBUSY even after block read | 614 | * to wait for NOTBUSY even after block read |
615 | * operations. | 615 | * operations. |
616 | * | 616 | * |
617 | * When the DMA transfer is complete, the controller | 617 | * When the DMA transfer is complete, the controller |
618 | * may still be reading the CRC from the card, i.e. | 618 | * may still be reading the CRC from the card, i.e. |
619 | * the data transfer is still in progress and we | 619 | * the data transfer is still in progress and we |
620 | * haven't seen all the potential error bits yet. | 620 | * haven't seen all the potential error bits yet. |
621 | * | 621 | * |
622 | * The interrupt handler will schedule a different | 622 | * The interrupt handler will schedule a different |
623 | * tasklet to finish things up when the data transfer | 623 | * tasklet to finish things up when the data transfer |
624 | * is completely done. | 624 | * is completely done. |
625 | * | 625 | * |
626 | * We may not complete the mmc request here anyway | 626 | * We may not complete the mmc request here anyway |
627 | * because the mmc layer may call back and cause us to | 627 | * because the mmc layer may call back and cause us to |
628 | * violate the "don't submit new operations from the | 628 | * violate the "don't submit new operations from the |
629 | * completion callback" rule of the dma engine | 629 | * completion callback" rule of the dma engine |
630 | * framework. | 630 | * framework. |
631 | */ | 631 | */ |
632 | mci_writel(host, IER, MCI_NOTBUSY); | 632 | mci_writel(host, IER, MCI_NOTBUSY); |
633 | } | 633 | } |
634 | } | 634 | } |
635 | 635 | ||
636 | static int | 636 | static int |
637 | atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data) | 637 | atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data) |
638 | { | 638 | { |
639 | struct dma_chan *chan; | 639 | struct dma_chan *chan; |
640 | struct dma_async_tx_descriptor *desc; | 640 | struct dma_async_tx_descriptor *desc; |
641 | struct scatterlist *sg; | 641 | struct scatterlist *sg; |
642 | unsigned int i; | 642 | unsigned int i; |
643 | enum dma_data_direction direction; | 643 | enum dma_data_direction direction; |
644 | unsigned int sglen; | 644 | unsigned int sglen; |
645 | 645 | ||
646 | /* | 646 | /* |
647 | * We don't do DMA on "complex" transfers, i.e. with | 647 | * We don't do DMA on "complex" transfers, i.e. with |
648 | * non-word-aligned buffers or lengths. Also, we don't bother | 648 | * non-word-aligned buffers or lengths. Also, we don't bother |
649 | * with all the DMA setup overhead for short transfers. | 649 | * with all the DMA setup overhead for short transfers. |
650 | */ | 650 | */ |
651 | if (data->blocks * data->blksz < ATMCI_DMA_THRESHOLD) | 651 | if (data->blocks * data->blksz < ATMCI_DMA_THRESHOLD) |
652 | return -EINVAL; | 652 | return -EINVAL; |
653 | if (data->blksz & 3) | 653 | if (data->blksz & 3) |
654 | return -EINVAL; | 654 | return -EINVAL; |
655 | 655 | ||
656 | for_each_sg(data->sg, sg, data->sg_len, i) { | 656 | for_each_sg(data->sg, sg, data->sg_len, i) { |
657 | if (sg->offset & 3 || sg->length & 3) | 657 | if (sg->offset & 3 || sg->length & 3) |
658 | return -EINVAL; | 658 | return -EINVAL; |
659 | } | 659 | } |
660 | 660 | ||
661 | /* If we don't have a channel, we can't do DMA */ | 661 | /* If we don't have a channel, we can't do DMA */ |
662 | chan = host->dma.chan; | 662 | chan = host->dma.chan; |
663 | if (chan) | 663 | if (chan) |
664 | host->data_chan = chan; | 664 | host->data_chan = chan; |
665 | 665 | ||
666 | if (!chan) | 666 | if (!chan) |
667 | return -ENODEV; | 667 | return -ENODEV; |
668 | 668 | ||
669 | if (atmci_is_mci2()) | 669 | if (atmci_is_mci2()) |
670 | mci_writel(host, DMA, MCI_DMA_CHKSIZE(3) | MCI_DMAEN); | 670 | mci_writel(host, DMA, MCI_DMA_CHKSIZE(3) | MCI_DMAEN); |
671 | 671 | ||
672 | if (data->flags & MMC_DATA_READ) | 672 | if (data->flags & MMC_DATA_READ) |
673 | direction = DMA_FROM_DEVICE; | 673 | direction = DMA_FROM_DEVICE; |
674 | else | 674 | else |
675 | direction = DMA_TO_DEVICE; | 675 | direction = DMA_TO_DEVICE; |
676 | 676 | ||
677 | sglen = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, direction); | 677 | sglen = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, direction); |
678 | if (sglen != data->sg_len) | 678 | if (sglen != data->sg_len) |
679 | goto unmap_exit; | 679 | goto unmap_exit; |
680 | desc = chan->device->device_prep_slave_sg(chan, | 680 | desc = chan->device->device_prep_slave_sg(chan, |
681 | data->sg, data->sg_len, direction, | 681 | data->sg, data->sg_len, direction, |
682 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 682 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
683 | if (!desc) | 683 | if (!desc) |
684 | goto unmap_exit; | 684 | goto unmap_exit; |
685 | 685 | ||
686 | host->dma.data_desc = desc; | 686 | host->dma.data_desc = desc; |
687 | desc->callback = atmci_dma_complete; | 687 | desc->callback = atmci_dma_complete; |
688 | desc->callback_param = host; | 688 | desc->callback_param = host; |
689 | 689 | ||
690 | return 0; | 690 | return 0; |
691 | unmap_exit: | 691 | unmap_exit: |
692 | dma_unmap_sg(&host->pdev->dev, data->sg, sglen, direction); | 692 | dma_unmap_sg(&host->pdev->dev, data->sg, sglen, direction); |
693 | return -ENOMEM; | 693 | return -ENOMEM; |
694 | } | 694 | } |
695 | 695 | ||
696 | static void atmci_submit_data(struct atmel_mci *host) | 696 | static void atmci_submit_data(struct atmel_mci *host) |
697 | { | 697 | { |
698 | struct dma_chan *chan = host->data_chan; | 698 | struct dma_chan *chan = host->data_chan; |
699 | struct dma_async_tx_descriptor *desc = host->dma.data_desc; | 699 | struct dma_async_tx_descriptor *desc = host->dma.data_desc; |
700 | 700 | ||
701 | if (chan) { | 701 | if (chan) { |
702 | desc->tx_submit(desc); | 702 | desc->tx_submit(desc); |
703 | chan->device->device_issue_pending(chan); | 703 | chan->device->device_issue_pending(chan); |
704 | } | 704 | } |
705 | } | 705 | } |
706 | 706 | ||
707 | #else /* CONFIG_MMC_ATMELMCI_DMA */ | 707 | #else /* CONFIG_MMC_ATMELMCI_DMA */ |
708 | 708 | ||
709 | static int atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data) | 709 | static int atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data) |
710 | { | 710 | { |
711 | return -ENOSYS; | 711 | return -ENOSYS; |
712 | } | 712 | } |
713 | 713 | ||
714 | static void atmci_submit_data(struct atmel_mci *host) {} | 714 | static void atmci_submit_data(struct atmel_mci *host) {} |
715 | 715 | ||
716 | static void atmci_stop_dma(struct atmel_mci *host) | 716 | static void atmci_stop_dma(struct atmel_mci *host) |
717 | { | 717 | { |
718 | /* Data transfer was stopped by the interrupt handler */ | 718 | /* Data transfer was stopped by the interrupt handler */ |
719 | atmci_set_pending(host, EVENT_XFER_COMPLETE); | 719 | atmci_set_pending(host, EVENT_XFER_COMPLETE); |
720 | mci_writel(host, IER, MCI_NOTBUSY); | 720 | mci_writel(host, IER, MCI_NOTBUSY); |
721 | } | 721 | } |
722 | 722 | ||
723 | #endif /* CONFIG_MMC_ATMELMCI_DMA */ | 723 | #endif /* CONFIG_MMC_ATMELMCI_DMA */ |
724 | 724 | ||
725 | /* | 725 | /* |
726 | * Returns a mask of interrupt flags to be enabled after the whole | 726 | * Returns a mask of interrupt flags to be enabled after the whole |
727 | * request has been prepared. | 727 | * request has been prepared. |
728 | */ | 728 | */ |
729 | static u32 atmci_prepare_data(struct atmel_mci *host, struct mmc_data *data) | 729 | static u32 atmci_prepare_data(struct atmel_mci *host, struct mmc_data *data) |
730 | { | 730 | { |
731 | u32 iflags; | 731 | u32 iflags; |
732 | 732 | ||
733 | data->error = -EINPROGRESS; | 733 | data->error = -EINPROGRESS; |
734 | 734 | ||
735 | WARN_ON(host->data); | 735 | WARN_ON(host->data); |
736 | host->sg = NULL; | 736 | host->sg = NULL; |
737 | host->data = data; | 737 | host->data = data; |
738 | 738 | ||
739 | iflags = ATMCI_DATA_ERROR_FLAGS; | 739 | iflags = ATMCI_DATA_ERROR_FLAGS; |
740 | if (atmci_prepare_data_dma(host, data)) { | 740 | if (atmci_prepare_data_dma(host, data)) { |
741 | host->data_chan = NULL; | 741 | host->data_chan = NULL; |
742 | 742 | ||
743 | /* | 743 | /* |
744 | * Errata: MMC data write operation with less than 12 | 744 | * Errata: MMC data write operation with less than 12 |
745 | * bytes is impossible. | 745 | * bytes is impossible. |
746 | * | 746 | * |
747 | * Errata: MCI Transmit Data Register (TDR) FIFO | 747 | * Errata: MCI Transmit Data Register (TDR) FIFO |
748 | * corruption when length is not multiple of 4. | 748 | * corruption when length is not multiple of 4. |
749 | */ | 749 | */ |
750 | if (data->blocks * data->blksz < 12 | 750 | if (data->blocks * data->blksz < 12 |
751 | || (data->blocks * data->blksz) & 3) | 751 | || (data->blocks * data->blksz) & 3) |
752 | host->need_reset = true; | 752 | host->need_reset = true; |
753 | 753 | ||
754 | host->sg = data->sg; | 754 | host->sg = data->sg; |
755 | host->pio_offset = 0; | 755 | host->pio_offset = 0; |
756 | if (data->flags & MMC_DATA_READ) | 756 | if (data->flags & MMC_DATA_READ) |
757 | iflags |= MCI_RXRDY; | 757 | iflags |= MCI_RXRDY; |
758 | else | 758 | else |
759 | iflags |= MCI_TXRDY; | 759 | iflags |= MCI_TXRDY; |
760 | } | 760 | } |
761 | 761 | ||
762 | return iflags; | 762 | return iflags; |
763 | } | 763 | } |
764 | 764 | ||
765 | static void atmci_start_request(struct atmel_mci *host, | 765 | static void atmci_start_request(struct atmel_mci *host, |
766 | struct atmel_mci_slot *slot) | 766 | struct atmel_mci_slot *slot) |
767 | { | 767 | { |
768 | struct mmc_request *mrq; | 768 | struct mmc_request *mrq; |
769 | struct mmc_command *cmd; | 769 | struct mmc_command *cmd; |
770 | struct mmc_data *data; | 770 | struct mmc_data *data; |
771 | u32 iflags; | 771 | u32 iflags; |
772 | u32 cmdflags; | 772 | u32 cmdflags; |
773 | 773 | ||
774 | mrq = slot->mrq; | 774 | mrq = slot->mrq; |
775 | host->cur_slot = slot; | 775 | host->cur_slot = slot; |
776 | host->mrq = mrq; | 776 | host->mrq = mrq; |
777 | 777 | ||
778 | host->pending_events = 0; | 778 | host->pending_events = 0; |
779 | host->completed_events = 0; | 779 | host->completed_events = 0; |
780 | host->data_status = 0; | 780 | host->data_status = 0; |
781 | 781 | ||
782 | if (host->need_reset) { | 782 | if (host->need_reset) { |
783 | mci_writel(host, CR, MCI_CR_SWRST); | 783 | mci_writel(host, CR, MCI_CR_SWRST); |
784 | mci_writel(host, CR, MCI_CR_MCIEN); | 784 | mci_writel(host, CR, MCI_CR_MCIEN); |
785 | mci_writel(host, MR, host->mode_reg); | 785 | mci_writel(host, MR, host->mode_reg); |
786 | if (atmci_is_mci2()) | 786 | if (atmci_is_mci2()) |
787 | mci_writel(host, CFG, host->cfg_reg); | 787 | mci_writel(host, CFG, host->cfg_reg); |
788 | host->need_reset = false; | 788 | host->need_reset = false; |
789 | } | 789 | } |
790 | mci_writel(host, SDCR, slot->sdc_reg); | 790 | mci_writel(host, SDCR, slot->sdc_reg); |
791 | 791 | ||
792 | iflags = mci_readl(host, IMR); | 792 | iflags = mci_readl(host, IMR); |
793 | if (iflags) | 793 | if (iflags) |
794 | dev_warn(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n", | 794 | dev_warn(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n", |
795 | iflags); | 795 | iflags); |
796 | 796 | ||
797 | if (unlikely(test_and_clear_bit(ATMCI_CARD_NEED_INIT, &slot->flags))) { | 797 | if (unlikely(test_and_clear_bit(ATMCI_CARD_NEED_INIT, &slot->flags))) { |
798 | /* Send init sequence (74 clock cycles) */ | 798 | /* Send init sequence (74 clock cycles) */ |
799 | mci_writel(host, CMDR, MCI_CMDR_SPCMD_INIT); | 799 | mci_writel(host, CMDR, MCI_CMDR_SPCMD_INIT); |
800 | while (!(mci_readl(host, SR) & MCI_CMDRDY)) | 800 | while (!(mci_readl(host, SR) & MCI_CMDRDY)) |
801 | cpu_relax(); | 801 | cpu_relax(); |
802 | } | 802 | } |
803 | iflags = 0; | 803 | iflags = 0; |
804 | data = mrq->data; | 804 | data = mrq->data; |
805 | if (data) { | 805 | if (data) { |
806 | atmci_set_timeout(host, slot, data); | 806 | atmci_set_timeout(host, slot, data); |
807 | 807 | ||
808 | /* Must set block count/size before sending command */ | 808 | /* Must set block count/size before sending command */ |
809 | mci_writel(host, BLKR, MCI_BCNT(data->blocks) | 809 | mci_writel(host, BLKR, MCI_BCNT(data->blocks) |
810 | | MCI_BLKLEN(data->blksz)); | 810 | | MCI_BLKLEN(data->blksz)); |
811 | dev_vdbg(&slot->mmc->class_dev, "BLKR=0x%08x\n", | 811 | dev_vdbg(&slot->mmc->class_dev, "BLKR=0x%08x\n", |
812 | MCI_BCNT(data->blocks) | MCI_BLKLEN(data->blksz)); | 812 | MCI_BCNT(data->blocks) | MCI_BLKLEN(data->blksz)); |
813 | 813 | ||
814 | iflags |= atmci_prepare_data(host, data); | 814 | iflags |= atmci_prepare_data(host, data); |
815 | } | 815 | } |
816 | 816 | ||
817 | iflags |= MCI_CMDRDY; | 817 | iflags |= MCI_CMDRDY; |
818 | cmd = mrq->cmd; | 818 | cmd = mrq->cmd; |
819 | cmdflags = atmci_prepare_command(slot->mmc, cmd); | 819 | cmdflags = atmci_prepare_command(slot->mmc, cmd); |
820 | atmci_start_command(host, cmd, cmdflags); | 820 | atmci_start_command(host, cmd, cmdflags); |
821 | 821 | ||
822 | if (data) | 822 | if (data) |
823 | atmci_submit_data(host); | 823 | atmci_submit_data(host); |
824 | 824 | ||
825 | if (mrq->stop) { | 825 | if (mrq->stop) { |
826 | host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop); | 826 | host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop); |
827 | host->stop_cmdr |= MCI_CMDR_STOP_XFER; | 827 | host->stop_cmdr |= MCI_CMDR_STOP_XFER; |
828 | if (!(data->flags & MMC_DATA_WRITE)) | 828 | if (!(data->flags & MMC_DATA_WRITE)) |
829 | host->stop_cmdr |= MCI_CMDR_TRDIR_READ; | 829 | host->stop_cmdr |= MCI_CMDR_TRDIR_READ; |
830 | if (data->flags & MMC_DATA_STREAM) | 830 | if (data->flags & MMC_DATA_STREAM) |
831 | host->stop_cmdr |= MCI_CMDR_STREAM; | 831 | host->stop_cmdr |= MCI_CMDR_STREAM; |
832 | else | 832 | else |
833 | host->stop_cmdr |= MCI_CMDR_MULTI_BLOCK; | 833 | host->stop_cmdr |= MCI_CMDR_MULTI_BLOCK; |
834 | } | 834 | } |
835 | 835 | ||
836 | /* | 836 | /* |
837 | * We could have enabled interrupts earlier, but I suspect | 837 | * We could have enabled interrupts earlier, but I suspect |
838 | * that would open up a nice can of interesting race | 838 | * that would open up a nice can of interesting race |
839 | * conditions (e.g. command and data complete, but stop not | 839 | * conditions (e.g. command and data complete, but stop not |
840 | * prepared yet.) | 840 | * prepared yet.) |
841 | */ | 841 | */ |
842 | mci_writel(host, IER, iflags); | 842 | mci_writel(host, IER, iflags); |
843 | } | 843 | } |
844 | 844 | ||
845 | static void atmci_queue_request(struct atmel_mci *host, | 845 | static void atmci_queue_request(struct atmel_mci *host, |
846 | struct atmel_mci_slot *slot, struct mmc_request *mrq) | 846 | struct atmel_mci_slot *slot, struct mmc_request *mrq) |
847 | { | 847 | { |
848 | dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n", | 848 | dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n", |
849 | host->state); | 849 | host->state); |
850 | 850 | ||
851 | spin_lock_bh(&host->lock); | 851 | spin_lock_bh(&host->lock); |
852 | slot->mrq = mrq; | 852 | slot->mrq = mrq; |
853 | if (host->state == STATE_IDLE) { | 853 | if (host->state == STATE_IDLE) { |
854 | host->state = STATE_SENDING_CMD; | 854 | host->state = STATE_SENDING_CMD; |
855 | atmci_start_request(host, slot); | 855 | atmci_start_request(host, slot); |
856 | } else { | 856 | } else { |
857 | list_add_tail(&slot->queue_node, &host->queue); | 857 | list_add_tail(&slot->queue_node, &host->queue); |
858 | } | 858 | } |
859 | spin_unlock_bh(&host->lock); | 859 | spin_unlock_bh(&host->lock); |
860 | } | 860 | } |
861 | 861 | ||
862 | static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq) | 862 | static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq) |
863 | { | 863 | { |
864 | struct atmel_mci_slot *slot = mmc_priv(mmc); | 864 | struct atmel_mci_slot *slot = mmc_priv(mmc); |
865 | struct atmel_mci *host = slot->host; | 865 | struct atmel_mci *host = slot->host; |
866 | struct mmc_data *data; | 866 | struct mmc_data *data; |
867 | 867 | ||
868 | WARN_ON(slot->mrq); | 868 | WARN_ON(slot->mrq); |
869 | 869 | ||
870 | /* | 870 | /* |
871 | * We may "know" the card is gone even though there's still an | 871 | * We may "know" the card is gone even though there's still an |
872 | * electrical connection. If so, we really need to communicate | 872 | * electrical connection. If so, we really need to communicate |
873 | * this to the MMC core since there won't be any more | 873 | * this to the MMC core since there won't be any more |
874 | * interrupts as the card is completely removed. Otherwise, | 874 | * interrupts as the card is completely removed. Otherwise, |
875 | * the MMC core might believe the card is still there even | 875 | * the MMC core might believe the card is still there even |
876 | * though the card was just removed very slowly. | 876 | * though the card was just removed very slowly. |
877 | */ | 877 | */ |
878 | if (!test_bit(ATMCI_CARD_PRESENT, &slot->flags)) { | 878 | if (!test_bit(ATMCI_CARD_PRESENT, &slot->flags)) { |
879 | mrq->cmd->error = -ENOMEDIUM; | 879 | mrq->cmd->error = -ENOMEDIUM; |
880 | mmc_request_done(mmc, mrq); | 880 | mmc_request_done(mmc, mrq); |
881 | return; | 881 | return; |
882 | } | 882 | } |
883 | 883 | ||
884 | /* We don't support multiple blocks of weird lengths. */ | 884 | /* We don't support multiple blocks of weird lengths. */ |
885 | data = mrq->data; | 885 | data = mrq->data; |
886 | if (data && data->blocks > 1 && data->blksz & 3) { | 886 | if (data && data->blocks > 1 && data->blksz & 3) { |
887 | mrq->cmd->error = -EINVAL; | 887 | mrq->cmd->error = -EINVAL; |
888 | mmc_request_done(mmc, mrq); | 888 | mmc_request_done(mmc, mrq); |
889 | } | 889 | } |
890 | 890 | ||
891 | atmci_queue_request(host, slot, mrq); | 891 | atmci_queue_request(host, slot, mrq); |
892 | } | 892 | } |
893 | 893 | ||
894 | static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | 894 | static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) |
895 | { | 895 | { |
896 | struct atmel_mci_slot *slot = mmc_priv(mmc); | 896 | struct atmel_mci_slot *slot = mmc_priv(mmc); |
897 | struct atmel_mci *host = slot->host; | 897 | struct atmel_mci *host = slot->host; |
898 | unsigned int i; | 898 | unsigned int i; |
899 | 899 | ||
900 | slot->sdc_reg &= ~MCI_SDCBUS_MASK; | 900 | slot->sdc_reg &= ~MCI_SDCBUS_MASK; |
901 | switch (ios->bus_width) { | 901 | switch (ios->bus_width) { |
902 | case MMC_BUS_WIDTH_1: | 902 | case MMC_BUS_WIDTH_1: |
903 | slot->sdc_reg |= MCI_SDCBUS_1BIT; | 903 | slot->sdc_reg |= MCI_SDCBUS_1BIT; |
904 | break; | 904 | break; |
905 | case MMC_BUS_WIDTH_4: | 905 | case MMC_BUS_WIDTH_4: |
906 | slot->sdc_reg |= MCI_SDCBUS_4BIT; | 906 | slot->sdc_reg |= MCI_SDCBUS_4BIT; |
907 | break; | 907 | break; |
908 | } | 908 | } |
909 | 909 | ||
910 | if (ios->clock) { | 910 | if (ios->clock) { |
911 | unsigned int clock_min = ~0U; | 911 | unsigned int clock_min = ~0U; |
912 | u32 clkdiv; | 912 | u32 clkdiv; |
913 | 913 | ||
914 | spin_lock_bh(&host->lock); | 914 | spin_lock_bh(&host->lock); |
915 | if (!host->mode_reg) { | 915 | if (!host->mode_reg) { |
916 | clk_enable(host->mck); | 916 | clk_enable(host->mck); |
917 | mci_writel(host, CR, MCI_CR_SWRST); | 917 | mci_writel(host, CR, MCI_CR_SWRST); |
918 | mci_writel(host, CR, MCI_CR_MCIEN); | 918 | mci_writel(host, CR, MCI_CR_MCIEN); |
919 | if (atmci_is_mci2()) | 919 | if (atmci_is_mci2()) |
920 | mci_writel(host, CFG, host->cfg_reg); | 920 | mci_writel(host, CFG, host->cfg_reg); |
921 | } | 921 | } |
922 | 922 | ||
923 | /* | 923 | /* |
924 | * Use mirror of ios->clock to prevent race with mmc | 924 | * Use mirror of ios->clock to prevent race with mmc |
925 | * core ios update when finding the minimum. | 925 | * core ios update when finding the minimum. |
926 | */ | 926 | */ |
927 | slot->clock = ios->clock; | 927 | slot->clock = ios->clock; |
928 | for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) { | 928 | for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) { |
929 | if (host->slot[i] && host->slot[i]->clock | 929 | if (host->slot[i] && host->slot[i]->clock |
930 | && host->slot[i]->clock < clock_min) | 930 | && host->slot[i]->clock < clock_min) |
931 | clock_min = host->slot[i]->clock; | 931 | clock_min = host->slot[i]->clock; |
932 | } | 932 | } |
933 | 933 | ||
934 | /* Calculate clock divider */ | 934 | /* Calculate clock divider */ |
935 | clkdiv = DIV_ROUND_UP(host->bus_hz, 2 * clock_min) - 1; | 935 | clkdiv = DIV_ROUND_UP(host->bus_hz, 2 * clock_min) - 1; |
936 | if (clkdiv > 255) { | 936 | if (clkdiv > 255) { |
937 | dev_warn(&mmc->class_dev, | 937 | dev_warn(&mmc->class_dev, |
938 | "clock %u too slow; using %lu\n", | 938 | "clock %u too slow; using %lu\n", |
939 | clock_min, host->bus_hz / (2 * 256)); | 939 | clock_min, host->bus_hz / (2 * 256)); |
940 | clkdiv = 255; | 940 | clkdiv = 255; |
941 | } | 941 | } |
942 | 942 | ||
943 | host->mode_reg = MCI_MR_CLKDIV(clkdiv); | 943 | host->mode_reg = MCI_MR_CLKDIV(clkdiv); |
944 | 944 | ||
945 | /* | 945 | /* |
946 | * WRPROOF and RDPROOF prevent overruns/underruns by | 946 | * WRPROOF and RDPROOF prevent overruns/underruns by |
947 | * stopping the clock when the FIFO is full/empty. | 947 | * stopping the clock when the FIFO is full/empty. |
948 | * This state is not expected to last for long. | 948 | * This state is not expected to last for long. |
949 | */ | 949 | */ |
950 | if (mci_has_rwproof()) | 950 | if (mci_has_rwproof()) |
951 | host->mode_reg |= (MCI_MR_WRPROOF | MCI_MR_RDPROOF); | 951 | host->mode_reg |= (MCI_MR_WRPROOF | MCI_MR_RDPROOF); |
952 | 952 | ||
953 | if (list_empty(&host->queue)) | 953 | if (list_empty(&host->queue)) |
954 | mci_writel(host, MR, host->mode_reg); | 954 | mci_writel(host, MR, host->mode_reg); |
955 | else | 955 | else |
956 | host->need_clock_update = true; | 956 | host->need_clock_update = true; |
957 | 957 | ||
958 | spin_unlock_bh(&host->lock); | 958 | spin_unlock_bh(&host->lock); |
959 | } else { | 959 | } else { |
960 | bool any_slot_active = false; | 960 | bool any_slot_active = false; |
961 | 961 | ||
962 | spin_lock_bh(&host->lock); | 962 | spin_lock_bh(&host->lock); |
963 | slot->clock = 0; | 963 | slot->clock = 0; |
964 | for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) { | 964 | for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) { |
965 | if (host->slot[i] && host->slot[i]->clock) { | 965 | if (host->slot[i] && host->slot[i]->clock) { |
966 | any_slot_active = true; | 966 | any_slot_active = true; |
967 | break; | 967 | break; |
968 | } | 968 | } |
969 | } | 969 | } |
970 | if (!any_slot_active) { | 970 | if (!any_slot_active) { |
971 | mci_writel(host, CR, MCI_CR_MCIDIS); | 971 | mci_writel(host, CR, MCI_CR_MCIDIS); |
972 | if (host->mode_reg) { | 972 | if (host->mode_reg) { |
973 | mci_readl(host, MR); | 973 | mci_readl(host, MR); |
974 | clk_disable(host->mck); | 974 | clk_disable(host->mck); |
975 | } | 975 | } |
976 | host->mode_reg = 0; | 976 | host->mode_reg = 0; |
977 | } | 977 | } |
978 | spin_unlock_bh(&host->lock); | 978 | spin_unlock_bh(&host->lock); |
979 | } | 979 | } |
980 | 980 | ||
981 | switch (ios->power_mode) { | 981 | switch (ios->power_mode) { |
982 | case MMC_POWER_UP: | 982 | case MMC_POWER_UP: |
983 | set_bit(ATMCI_CARD_NEED_INIT, &slot->flags); | 983 | set_bit(ATMCI_CARD_NEED_INIT, &slot->flags); |
984 | break; | 984 | break; |
985 | default: | 985 | default: |
986 | /* | 986 | /* |
987 | * TODO: None of the currently available AVR32-based | 987 | * TODO: None of the currently available AVR32-based |
988 | * boards allow MMC power to be turned off. Implement | 988 | * boards allow MMC power to be turned off. Implement |
989 | * power control when this can be tested properly. | 989 | * power control when this can be tested properly. |
990 | * | 990 | * |
991 | * We also need to hook this into the clock management | 991 | * We also need to hook this into the clock management |
992 | * somehow so that newly inserted cards aren't | 992 | * somehow so that newly inserted cards aren't |
993 | * subjected to a fast clock before we have a chance | 993 | * subjected to a fast clock before we have a chance |
994 | * to figure out what the maximum rate is. Currently, | 994 | * to figure out what the maximum rate is. Currently, |
995 | * there's no way to avoid this, and there never will | 995 | * there's no way to avoid this, and there never will |
996 | * be for boards that don't support power control. | 996 | * be for boards that don't support power control. |
997 | */ | 997 | */ |
998 | break; | 998 | break; |
999 | } | 999 | } |
1000 | } | 1000 | } |
1001 | 1001 | ||
1002 | static int atmci_get_ro(struct mmc_host *mmc) | 1002 | static int atmci_get_ro(struct mmc_host *mmc) |
1003 | { | 1003 | { |
1004 | int read_only = -ENOSYS; | 1004 | int read_only = -ENOSYS; |
1005 | struct atmel_mci_slot *slot = mmc_priv(mmc); | 1005 | struct atmel_mci_slot *slot = mmc_priv(mmc); |
1006 | 1006 | ||
1007 | if (gpio_is_valid(slot->wp_pin)) { | 1007 | if (gpio_is_valid(slot->wp_pin)) { |
1008 | read_only = gpio_get_value(slot->wp_pin); | 1008 | read_only = gpio_get_value(slot->wp_pin); |
1009 | dev_dbg(&mmc->class_dev, "card is %s\n", | 1009 | dev_dbg(&mmc->class_dev, "card is %s\n", |
1010 | read_only ? "read-only" : "read-write"); | 1010 | read_only ? "read-only" : "read-write"); |
1011 | } | 1011 | } |
1012 | 1012 | ||
1013 | return read_only; | 1013 | return read_only; |
1014 | } | 1014 | } |
1015 | 1015 | ||
1016 | static int atmci_get_cd(struct mmc_host *mmc) | 1016 | static int atmci_get_cd(struct mmc_host *mmc) |
1017 | { | 1017 | { |
1018 | int present = -ENOSYS; | 1018 | int present = -ENOSYS; |
1019 | struct atmel_mci_slot *slot = mmc_priv(mmc); | 1019 | struct atmel_mci_slot *slot = mmc_priv(mmc); |
1020 | 1020 | ||
1021 | if (gpio_is_valid(slot->detect_pin)) { | 1021 | if (gpio_is_valid(slot->detect_pin)) { |
1022 | present = !(gpio_get_value(slot->detect_pin) ^ | 1022 | present = !(gpio_get_value(slot->detect_pin) ^ |
1023 | slot->detect_is_active_high); | 1023 | slot->detect_is_active_high); |
1024 | dev_dbg(&mmc->class_dev, "card is %spresent\n", | 1024 | dev_dbg(&mmc->class_dev, "card is %spresent\n", |
1025 | present ? "" : "not "); | 1025 | present ? "" : "not "); |
1026 | } | 1026 | } |
1027 | 1027 | ||
1028 | return present; | 1028 | return present; |
1029 | } | 1029 | } |
1030 | 1030 | ||
1031 | static const struct mmc_host_ops atmci_ops = { | 1031 | static const struct mmc_host_ops atmci_ops = { |
1032 | .request = atmci_request, | 1032 | .request = atmci_request, |
1033 | .set_ios = atmci_set_ios, | 1033 | .set_ios = atmci_set_ios, |
1034 | .get_ro = atmci_get_ro, | 1034 | .get_ro = atmci_get_ro, |
1035 | .get_cd = atmci_get_cd, | 1035 | .get_cd = atmci_get_cd, |
1036 | }; | 1036 | }; |
1037 | 1037 | ||
1038 | /* Called with host->lock held */ | 1038 | /* Called with host->lock held */ |
1039 | static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq) | 1039 | static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq) |
1040 | __releases(&host->lock) | 1040 | __releases(&host->lock) |
1041 | __acquires(&host->lock) | 1041 | __acquires(&host->lock) |
1042 | { | 1042 | { |
1043 | struct atmel_mci_slot *slot = NULL; | 1043 | struct atmel_mci_slot *slot = NULL; |
1044 | struct mmc_host *prev_mmc = host->cur_slot->mmc; | 1044 | struct mmc_host *prev_mmc = host->cur_slot->mmc; |
1045 | 1045 | ||
1046 | WARN_ON(host->cmd || host->data); | 1046 | WARN_ON(host->cmd || host->data); |
1047 | 1047 | ||
1048 | /* | 1048 | /* |
1049 | * Update the MMC clock rate if necessary. This may be | 1049 | * Update the MMC clock rate if necessary. This may be |
1050 | * necessary if set_ios() is called when a different slot is | 1050 | * necessary if set_ios() is called when a different slot is |
1051 | * busy transfering data. | 1051 | * busy transfering data. |
1052 | */ | 1052 | */ |
1053 | if (host->need_clock_update) | 1053 | if (host->need_clock_update) |
1054 | mci_writel(host, MR, host->mode_reg); | 1054 | mci_writel(host, MR, host->mode_reg); |
1055 | 1055 | ||
1056 | host->cur_slot->mrq = NULL; | 1056 | host->cur_slot->mrq = NULL; |
1057 | host->mrq = NULL; | 1057 | host->mrq = NULL; |
1058 | if (!list_empty(&host->queue)) { | 1058 | if (!list_empty(&host->queue)) { |
1059 | slot = list_entry(host->queue.next, | 1059 | slot = list_entry(host->queue.next, |
1060 | struct atmel_mci_slot, queue_node); | 1060 | struct atmel_mci_slot, queue_node); |
1061 | list_del(&slot->queue_node); | 1061 | list_del(&slot->queue_node); |
1062 | dev_vdbg(&host->pdev->dev, "list not empty: %s is next\n", | 1062 | dev_vdbg(&host->pdev->dev, "list not empty: %s is next\n", |
1063 | mmc_hostname(slot->mmc)); | 1063 | mmc_hostname(slot->mmc)); |
1064 | host->state = STATE_SENDING_CMD; | 1064 | host->state = STATE_SENDING_CMD; |
1065 | atmci_start_request(host, slot); | 1065 | atmci_start_request(host, slot); |
1066 | } else { | 1066 | } else { |
1067 | dev_vdbg(&host->pdev->dev, "list empty\n"); | 1067 | dev_vdbg(&host->pdev->dev, "list empty\n"); |
1068 | host->state = STATE_IDLE; | 1068 | host->state = STATE_IDLE; |
1069 | } | 1069 | } |
1070 | 1070 | ||
1071 | spin_unlock(&host->lock); | 1071 | spin_unlock(&host->lock); |
1072 | mmc_request_done(prev_mmc, mrq); | 1072 | mmc_request_done(prev_mmc, mrq); |
1073 | spin_lock(&host->lock); | 1073 | spin_lock(&host->lock); |
1074 | } | 1074 | } |
1075 | 1075 | ||
1076 | static void atmci_command_complete(struct atmel_mci *host, | 1076 | static void atmci_command_complete(struct atmel_mci *host, |
1077 | struct mmc_command *cmd) | 1077 | struct mmc_command *cmd) |
1078 | { | 1078 | { |
1079 | u32 status = host->cmd_status; | 1079 | u32 status = host->cmd_status; |
1080 | 1080 | ||
1081 | /* Read the response from the card (up to 16 bytes) */ | 1081 | /* Read the response from the card (up to 16 bytes) */ |
1082 | cmd->resp[0] = mci_readl(host, RSPR); | 1082 | cmd->resp[0] = mci_readl(host, RSPR); |
1083 | cmd->resp[1] = mci_readl(host, RSPR); | 1083 | cmd->resp[1] = mci_readl(host, RSPR); |
1084 | cmd->resp[2] = mci_readl(host, RSPR); | 1084 | cmd->resp[2] = mci_readl(host, RSPR); |
1085 | cmd->resp[3] = mci_readl(host, RSPR); | 1085 | cmd->resp[3] = mci_readl(host, RSPR); |
1086 | 1086 | ||
1087 | if (status & MCI_RTOE) | 1087 | if (status & MCI_RTOE) |
1088 | cmd->error = -ETIMEDOUT; | 1088 | cmd->error = -ETIMEDOUT; |
1089 | else if ((cmd->flags & MMC_RSP_CRC) && (status & MCI_RCRCE)) | 1089 | else if ((cmd->flags & MMC_RSP_CRC) && (status & MCI_RCRCE)) |
1090 | cmd->error = -EILSEQ; | 1090 | cmd->error = -EILSEQ; |
1091 | else if (status & (MCI_RINDE | MCI_RDIRE | MCI_RENDE)) | 1091 | else if (status & (MCI_RINDE | MCI_RDIRE | MCI_RENDE)) |
1092 | cmd->error = -EIO; | 1092 | cmd->error = -EIO; |
1093 | else | 1093 | else |
1094 | cmd->error = 0; | 1094 | cmd->error = 0; |
1095 | 1095 | ||
1096 | if (cmd->error) { | 1096 | if (cmd->error) { |
1097 | dev_dbg(&host->pdev->dev, | 1097 | dev_dbg(&host->pdev->dev, |
1098 | "command error: status=0x%08x\n", status); | 1098 | "command error: status=0x%08x\n", status); |
1099 | 1099 | ||
1100 | if (cmd->data) { | 1100 | if (cmd->data) { |
1101 | host->data = NULL; | 1101 | host->data = NULL; |
1102 | atmci_stop_dma(host); | 1102 | atmci_stop_dma(host); |
1103 | mci_writel(host, IDR, MCI_NOTBUSY | 1103 | mci_writel(host, IDR, MCI_NOTBUSY |
1104 | | MCI_TXRDY | MCI_RXRDY | 1104 | | MCI_TXRDY | MCI_RXRDY |
1105 | | ATMCI_DATA_ERROR_FLAGS); | 1105 | | ATMCI_DATA_ERROR_FLAGS); |
1106 | } | 1106 | } |
1107 | } | 1107 | } |
1108 | } | 1108 | } |
1109 | 1109 | ||
1110 | static void atmci_detect_change(unsigned long data) | 1110 | static void atmci_detect_change(unsigned long data) |
1111 | { | 1111 | { |
1112 | struct atmel_mci_slot *slot = (struct atmel_mci_slot *)data; | 1112 | struct atmel_mci_slot *slot = (struct atmel_mci_slot *)data; |
1113 | bool present; | 1113 | bool present; |
1114 | bool present_old; | 1114 | bool present_old; |
1115 | 1115 | ||
1116 | /* | 1116 | /* |
1117 | * atmci_cleanup_slot() sets the ATMCI_SHUTDOWN flag before | 1117 | * atmci_cleanup_slot() sets the ATMCI_SHUTDOWN flag before |
1118 | * freeing the interrupt. We must not re-enable the interrupt | 1118 | * freeing the interrupt. We must not re-enable the interrupt |
1119 | * if it has been freed, and if we're shutting down, it | 1119 | * if it has been freed, and if we're shutting down, it |
1120 | * doesn't really matter whether the card is present or not. | 1120 | * doesn't really matter whether the card is present or not. |
1121 | */ | 1121 | */ |
1122 | smp_rmb(); | 1122 | smp_rmb(); |
1123 | if (test_bit(ATMCI_SHUTDOWN, &slot->flags)) | 1123 | if (test_bit(ATMCI_SHUTDOWN, &slot->flags)) |
1124 | return; | 1124 | return; |
1125 | 1125 | ||
1126 | enable_irq(gpio_to_irq(slot->detect_pin)); | 1126 | enable_irq(gpio_to_irq(slot->detect_pin)); |
1127 | present = !(gpio_get_value(slot->detect_pin) ^ | 1127 | present = !(gpio_get_value(slot->detect_pin) ^ |
1128 | slot->detect_is_active_high); | 1128 | slot->detect_is_active_high); |
1129 | present_old = test_bit(ATMCI_CARD_PRESENT, &slot->flags); | 1129 | present_old = test_bit(ATMCI_CARD_PRESENT, &slot->flags); |
1130 | 1130 | ||
1131 | dev_vdbg(&slot->mmc->class_dev, "detect change: %d (was %d)\n", | 1131 | dev_vdbg(&slot->mmc->class_dev, "detect change: %d (was %d)\n", |
1132 | present, present_old); | 1132 | present, present_old); |
1133 | 1133 | ||
1134 | if (present != present_old) { | 1134 | if (present != present_old) { |
1135 | struct atmel_mci *host = slot->host; | 1135 | struct atmel_mci *host = slot->host; |
1136 | struct mmc_request *mrq; | 1136 | struct mmc_request *mrq; |
1137 | 1137 | ||
1138 | dev_dbg(&slot->mmc->class_dev, "card %s\n", | 1138 | dev_dbg(&slot->mmc->class_dev, "card %s\n", |
1139 | present ? "inserted" : "removed"); | 1139 | present ? "inserted" : "removed"); |
1140 | 1140 | ||
1141 | spin_lock(&host->lock); | 1141 | spin_lock(&host->lock); |
1142 | 1142 | ||
1143 | if (!present) | 1143 | if (!present) |
1144 | clear_bit(ATMCI_CARD_PRESENT, &slot->flags); | 1144 | clear_bit(ATMCI_CARD_PRESENT, &slot->flags); |
1145 | else | 1145 | else |
1146 | set_bit(ATMCI_CARD_PRESENT, &slot->flags); | 1146 | set_bit(ATMCI_CARD_PRESENT, &slot->flags); |
1147 | 1147 | ||
1148 | /* Clean up queue if present */ | 1148 | /* Clean up queue if present */ |
1149 | mrq = slot->mrq; | 1149 | mrq = slot->mrq; |
1150 | if (mrq) { | 1150 | if (mrq) { |
1151 | if (mrq == host->mrq) { | 1151 | if (mrq == host->mrq) { |
1152 | /* | 1152 | /* |
1153 | * Reset controller to terminate any ongoing | 1153 | * Reset controller to terminate any ongoing |
1154 | * commands or data transfers. | 1154 | * commands or data transfers. |
1155 | */ | 1155 | */ |
1156 | mci_writel(host, CR, MCI_CR_SWRST); | 1156 | mci_writel(host, CR, MCI_CR_SWRST); |
1157 | mci_writel(host, CR, MCI_CR_MCIEN); | 1157 | mci_writel(host, CR, MCI_CR_MCIEN); |
1158 | mci_writel(host, MR, host->mode_reg); | 1158 | mci_writel(host, MR, host->mode_reg); |
1159 | if (atmci_is_mci2()) | 1159 | if (atmci_is_mci2()) |
1160 | mci_writel(host, CFG, host->cfg_reg); | 1160 | mci_writel(host, CFG, host->cfg_reg); |
1161 | 1161 | ||
1162 | host->data = NULL; | 1162 | host->data = NULL; |
1163 | host->cmd = NULL; | 1163 | host->cmd = NULL; |
1164 | 1164 | ||
1165 | switch (host->state) { | 1165 | switch (host->state) { |
1166 | case STATE_IDLE: | 1166 | case STATE_IDLE: |
1167 | break; | 1167 | break; |
1168 | case STATE_SENDING_CMD: | 1168 | case STATE_SENDING_CMD: |
1169 | mrq->cmd->error = -ENOMEDIUM; | 1169 | mrq->cmd->error = -ENOMEDIUM; |
1170 | if (!mrq->data) | 1170 | if (!mrq->data) |
1171 | break; | 1171 | break; |
1172 | /* fall through */ | 1172 | /* fall through */ |
1173 | case STATE_SENDING_DATA: | 1173 | case STATE_SENDING_DATA: |
1174 | mrq->data->error = -ENOMEDIUM; | 1174 | mrq->data->error = -ENOMEDIUM; |
1175 | atmci_stop_dma(host); | 1175 | atmci_stop_dma(host); |
1176 | break; | 1176 | break; |
1177 | case STATE_DATA_BUSY: | 1177 | case STATE_DATA_BUSY: |
1178 | case STATE_DATA_ERROR: | 1178 | case STATE_DATA_ERROR: |
1179 | if (mrq->data->error == -EINPROGRESS) | 1179 | if (mrq->data->error == -EINPROGRESS) |
1180 | mrq->data->error = -ENOMEDIUM; | 1180 | mrq->data->error = -ENOMEDIUM; |
1181 | if (!mrq->stop) | 1181 | if (!mrq->stop) |
1182 | break; | 1182 | break; |
1183 | /* fall through */ | 1183 | /* fall through */ |
1184 | case STATE_SENDING_STOP: | 1184 | case STATE_SENDING_STOP: |
1185 | mrq->stop->error = -ENOMEDIUM; | 1185 | mrq->stop->error = -ENOMEDIUM; |
1186 | break; | 1186 | break; |
1187 | } | 1187 | } |
1188 | 1188 | ||
1189 | atmci_request_end(host, mrq); | 1189 | atmci_request_end(host, mrq); |
1190 | } else { | 1190 | } else { |
1191 | list_del(&slot->queue_node); | 1191 | list_del(&slot->queue_node); |
1192 | mrq->cmd->error = -ENOMEDIUM; | 1192 | mrq->cmd->error = -ENOMEDIUM; |
1193 | if (mrq->data) | 1193 | if (mrq->data) |
1194 | mrq->data->error = -ENOMEDIUM; | 1194 | mrq->data->error = -ENOMEDIUM; |
1195 | if (mrq->stop) | 1195 | if (mrq->stop) |
1196 | mrq->stop->error = -ENOMEDIUM; | 1196 | mrq->stop->error = -ENOMEDIUM; |
1197 | 1197 | ||
1198 | spin_unlock(&host->lock); | 1198 | spin_unlock(&host->lock); |
1199 | mmc_request_done(slot->mmc, mrq); | 1199 | mmc_request_done(slot->mmc, mrq); |
1200 | spin_lock(&host->lock); | 1200 | spin_lock(&host->lock); |
1201 | } | 1201 | } |
1202 | } | 1202 | } |
1203 | spin_unlock(&host->lock); | 1203 | spin_unlock(&host->lock); |
1204 | 1204 | ||
1205 | mmc_detect_change(slot->mmc, 0); | 1205 | mmc_detect_change(slot->mmc, 0); |
1206 | } | 1206 | } |
1207 | } | 1207 | } |
1208 | 1208 | ||
1209 | static void atmci_tasklet_func(unsigned long priv) | 1209 | static void atmci_tasklet_func(unsigned long priv) |
1210 | { | 1210 | { |
1211 | struct atmel_mci *host = (struct atmel_mci *)priv; | 1211 | struct atmel_mci *host = (struct atmel_mci *)priv; |
1212 | struct mmc_request *mrq = host->mrq; | 1212 | struct mmc_request *mrq = host->mrq; |
1213 | struct mmc_data *data = host->data; | 1213 | struct mmc_data *data = host->data; |
1214 | struct mmc_command *cmd = host->cmd; | 1214 | struct mmc_command *cmd = host->cmd; |
1215 | enum atmel_mci_state state = host->state; | 1215 | enum atmel_mci_state state = host->state; |
1216 | enum atmel_mci_state prev_state; | 1216 | enum atmel_mci_state prev_state; |
1217 | u32 status; | 1217 | u32 status; |
1218 | 1218 | ||
1219 | spin_lock(&host->lock); | 1219 | spin_lock(&host->lock); |
1220 | 1220 | ||
1221 | state = host->state; | 1221 | state = host->state; |
1222 | 1222 | ||
1223 | dev_vdbg(&host->pdev->dev, | 1223 | dev_vdbg(&host->pdev->dev, |
1224 | "tasklet: state %u pending/completed/mask %lx/%lx/%x\n", | 1224 | "tasklet: state %u pending/completed/mask %lx/%lx/%x\n", |
1225 | state, host->pending_events, host->completed_events, | 1225 | state, host->pending_events, host->completed_events, |
1226 | mci_readl(host, IMR)); | 1226 | mci_readl(host, IMR)); |
1227 | 1227 | ||
1228 | do { | 1228 | do { |
1229 | prev_state = state; | 1229 | prev_state = state; |
1230 | 1230 | ||
1231 | switch (state) { | 1231 | switch (state) { |
1232 | case STATE_IDLE: | 1232 | case STATE_IDLE: |
1233 | break; | 1233 | break; |
1234 | 1234 | ||
1235 | case STATE_SENDING_CMD: | 1235 | case STATE_SENDING_CMD: |
1236 | if (!atmci_test_and_clear_pending(host, | 1236 | if (!atmci_test_and_clear_pending(host, |
1237 | EVENT_CMD_COMPLETE)) | 1237 | EVENT_CMD_COMPLETE)) |
1238 | break; | 1238 | break; |
1239 | 1239 | ||
1240 | host->cmd = NULL; | 1240 | host->cmd = NULL; |
1241 | atmci_set_completed(host, EVENT_CMD_COMPLETE); | 1241 | atmci_set_completed(host, EVENT_CMD_COMPLETE); |
1242 | atmci_command_complete(host, mrq->cmd); | 1242 | atmci_command_complete(host, mrq->cmd); |
1243 | if (!mrq->data || cmd->error) { | 1243 | if (!mrq->data || cmd->error) { |
1244 | atmci_request_end(host, host->mrq); | 1244 | atmci_request_end(host, host->mrq); |
1245 | goto unlock; | 1245 | goto unlock; |
1246 | } | 1246 | } |
1247 | 1247 | ||
1248 | prev_state = state = STATE_SENDING_DATA; | 1248 | prev_state = state = STATE_SENDING_DATA; |
1249 | /* fall through */ | 1249 | /* fall through */ |
1250 | 1250 | ||
1251 | case STATE_SENDING_DATA: | 1251 | case STATE_SENDING_DATA: |
1252 | if (atmci_test_and_clear_pending(host, | 1252 | if (atmci_test_and_clear_pending(host, |
1253 | EVENT_DATA_ERROR)) { | 1253 | EVENT_DATA_ERROR)) { |
1254 | atmci_stop_dma(host); | 1254 | atmci_stop_dma(host); |
1255 | if (data->stop) | 1255 | if (data->stop) |
1256 | send_stop_cmd(host, data); | 1256 | send_stop_cmd(host, data); |
1257 | state = STATE_DATA_ERROR; | 1257 | state = STATE_DATA_ERROR; |
1258 | break; | 1258 | break; |
1259 | } | 1259 | } |
1260 | 1260 | ||
1261 | if (!atmci_test_and_clear_pending(host, | 1261 | if (!atmci_test_and_clear_pending(host, |
1262 | EVENT_XFER_COMPLETE)) | 1262 | EVENT_XFER_COMPLETE)) |
1263 | break; | 1263 | break; |
1264 | 1264 | ||
1265 | atmci_set_completed(host, EVENT_XFER_COMPLETE); | 1265 | atmci_set_completed(host, EVENT_XFER_COMPLETE); |
1266 | prev_state = state = STATE_DATA_BUSY; | 1266 | prev_state = state = STATE_DATA_BUSY; |
1267 | /* fall through */ | 1267 | /* fall through */ |
1268 | 1268 | ||
1269 | case STATE_DATA_BUSY: | 1269 | case STATE_DATA_BUSY: |
1270 | if (!atmci_test_and_clear_pending(host, | 1270 | if (!atmci_test_and_clear_pending(host, |
1271 | EVENT_DATA_COMPLETE)) | 1271 | EVENT_DATA_COMPLETE)) |
1272 | break; | 1272 | break; |
1273 | 1273 | ||
1274 | host->data = NULL; | 1274 | host->data = NULL; |
1275 | atmci_set_completed(host, EVENT_DATA_COMPLETE); | 1275 | atmci_set_completed(host, EVENT_DATA_COMPLETE); |
1276 | status = host->data_status; | 1276 | status = host->data_status; |
1277 | if (unlikely(status & ATMCI_DATA_ERROR_FLAGS)) { | 1277 | if (unlikely(status & ATMCI_DATA_ERROR_FLAGS)) { |
1278 | if (status & MCI_DTOE) { | 1278 | if (status & MCI_DTOE) { |
1279 | dev_dbg(&host->pdev->dev, | 1279 | dev_dbg(&host->pdev->dev, |
1280 | "data timeout error\n"); | 1280 | "data timeout error\n"); |
1281 | data->error = -ETIMEDOUT; | 1281 | data->error = -ETIMEDOUT; |
1282 | } else if (status & MCI_DCRCE) { | 1282 | } else if (status & MCI_DCRCE) { |
1283 | dev_dbg(&host->pdev->dev, | 1283 | dev_dbg(&host->pdev->dev, |
1284 | "data CRC error\n"); | 1284 | "data CRC error\n"); |
1285 | data->error = -EILSEQ; | 1285 | data->error = -EILSEQ; |
1286 | } else { | 1286 | } else { |
1287 | dev_dbg(&host->pdev->dev, | 1287 | dev_dbg(&host->pdev->dev, |
1288 | "data FIFO error (status=%08x)\n", | 1288 | "data FIFO error (status=%08x)\n", |
1289 | status); | 1289 | status); |
1290 | data->error = -EIO; | 1290 | data->error = -EIO; |
1291 | } | 1291 | } |
1292 | } else { | 1292 | } else { |
1293 | data->bytes_xfered = data->blocks * data->blksz; | 1293 | data->bytes_xfered = data->blocks * data->blksz; |
1294 | data->error = 0; | 1294 | data->error = 0; |
1295 | } | 1295 | } |
1296 | 1296 | ||
1297 | if (!data->stop) { | 1297 | if (!data->stop) { |
1298 | atmci_request_end(host, host->mrq); | 1298 | atmci_request_end(host, host->mrq); |
1299 | goto unlock; | 1299 | goto unlock; |
1300 | } | 1300 | } |
1301 | 1301 | ||
1302 | prev_state = state = STATE_SENDING_STOP; | 1302 | prev_state = state = STATE_SENDING_STOP; |
1303 | if (!data->error) | 1303 | if (!data->error) |
1304 | send_stop_cmd(host, data); | 1304 | send_stop_cmd(host, data); |
1305 | /* fall through */ | 1305 | /* fall through */ |
1306 | 1306 | ||
1307 | case STATE_SENDING_STOP: | 1307 | case STATE_SENDING_STOP: |
1308 | if (!atmci_test_and_clear_pending(host, | 1308 | if (!atmci_test_and_clear_pending(host, |
1309 | EVENT_CMD_COMPLETE)) | 1309 | EVENT_CMD_COMPLETE)) |
1310 | break; | 1310 | break; |
1311 | 1311 | ||
1312 | host->cmd = NULL; | 1312 | host->cmd = NULL; |
1313 | atmci_command_complete(host, mrq->stop); | 1313 | atmci_command_complete(host, mrq->stop); |
1314 | atmci_request_end(host, host->mrq); | 1314 | atmci_request_end(host, host->mrq); |
1315 | goto unlock; | 1315 | goto unlock; |
1316 | 1316 | ||
1317 | case STATE_DATA_ERROR: | 1317 | case STATE_DATA_ERROR: |
1318 | if (!atmci_test_and_clear_pending(host, | 1318 | if (!atmci_test_and_clear_pending(host, |
1319 | EVENT_XFER_COMPLETE)) | 1319 | EVENT_XFER_COMPLETE)) |
1320 | break; | 1320 | break; |
1321 | 1321 | ||
1322 | state = STATE_DATA_BUSY; | 1322 | state = STATE_DATA_BUSY; |
1323 | break; | 1323 | break; |
1324 | } | 1324 | } |
1325 | } while (state != prev_state); | 1325 | } while (state != prev_state); |
1326 | 1326 | ||
1327 | host->state = state; | 1327 | host->state = state; |
1328 | 1328 | ||
1329 | unlock: | 1329 | unlock: |
1330 | spin_unlock(&host->lock); | 1330 | spin_unlock(&host->lock); |
1331 | } | 1331 | } |
1332 | 1332 | ||
1333 | static void atmci_read_data_pio(struct atmel_mci *host) | 1333 | static void atmci_read_data_pio(struct atmel_mci *host) |
1334 | { | 1334 | { |
1335 | struct scatterlist *sg = host->sg; | 1335 | struct scatterlist *sg = host->sg; |
1336 | void *buf = sg_virt(sg); | 1336 | void *buf = sg_virt(sg); |
1337 | unsigned int offset = host->pio_offset; | 1337 | unsigned int offset = host->pio_offset; |
1338 | struct mmc_data *data = host->data; | 1338 | struct mmc_data *data = host->data; |
1339 | u32 value; | 1339 | u32 value; |
1340 | u32 status; | 1340 | u32 status; |
1341 | unsigned int nbytes = 0; | 1341 | unsigned int nbytes = 0; |
1342 | 1342 | ||
1343 | do { | 1343 | do { |
1344 | value = mci_readl(host, RDR); | 1344 | value = mci_readl(host, RDR); |
1345 | if (likely(offset + 4 <= sg->length)) { | 1345 | if (likely(offset + 4 <= sg->length)) { |
1346 | put_unaligned(value, (u32 *)(buf + offset)); | 1346 | put_unaligned(value, (u32 *)(buf + offset)); |
1347 | 1347 | ||
1348 | offset += 4; | 1348 | offset += 4; |
1349 | nbytes += 4; | 1349 | nbytes += 4; |
1350 | 1350 | ||
1351 | if (offset == sg->length) { | 1351 | if (offset == sg->length) { |
1352 | flush_dcache_page(sg_page(sg)); | 1352 | flush_dcache_page(sg_page(sg)); |
1353 | host->sg = sg = sg_next(sg); | 1353 | host->sg = sg = sg_next(sg); |
1354 | if (!sg) | 1354 | if (!sg) |
1355 | goto done; | 1355 | goto done; |
1356 | 1356 | ||
1357 | offset = 0; | 1357 | offset = 0; |
1358 | buf = sg_virt(sg); | 1358 | buf = sg_virt(sg); |
1359 | } | 1359 | } |
1360 | } else { | 1360 | } else { |
1361 | unsigned int remaining = sg->length - offset; | 1361 | unsigned int remaining = sg->length - offset; |
1362 | memcpy(buf + offset, &value, remaining); | 1362 | memcpy(buf + offset, &value, remaining); |
1363 | nbytes += remaining; | 1363 | nbytes += remaining; |
1364 | 1364 | ||
1365 | flush_dcache_page(sg_page(sg)); | 1365 | flush_dcache_page(sg_page(sg)); |
1366 | host->sg = sg = sg_next(sg); | 1366 | host->sg = sg = sg_next(sg); |
1367 | if (!sg) | 1367 | if (!sg) |
1368 | goto done; | 1368 | goto done; |
1369 | 1369 | ||
1370 | offset = 4 - remaining; | 1370 | offset = 4 - remaining; |
1371 | buf = sg_virt(sg); | 1371 | buf = sg_virt(sg); |
1372 | memcpy(buf, (u8 *)&value + remaining, offset); | 1372 | memcpy(buf, (u8 *)&value + remaining, offset); |
1373 | nbytes += offset; | 1373 | nbytes += offset; |
1374 | } | 1374 | } |
1375 | 1375 | ||
1376 | status = mci_readl(host, SR); | 1376 | status = mci_readl(host, SR); |
1377 | if (status & ATMCI_DATA_ERROR_FLAGS) { | 1377 | if (status & ATMCI_DATA_ERROR_FLAGS) { |
1378 | mci_writel(host, IDR, (MCI_NOTBUSY | MCI_RXRDY | 1378 | mci_writel(host, IDR, (MCI_NOTBUSY | MCI_RXRDY |
1379 | | ATMCI_DATA_ERROR_FLAGS)); | 1379 | | ATMCI_DATA_ERROR_FLAGS)); |
1380 | host->data_status = status; | 1380 | host->data_status = status; |
1381 | data->bytes_xfered += nbytes; | 1381 | data->bytes_xfered += nbytes; |
1382 | smp_wmb(); | 1382 | smp_wmb(); |
1383 | atmci_set_pending(host, EVENT_DATA_ERROR); | 1383 | atmci_set_pending(host, EVENT_DATA_ERROR); |
1384 | tasklet_schedule(&host->tasklet); | 1384 | tasklet_schedule(&host->tasklet); |
1385 | return; | 1385 | return; |
1386 | } | 1386 | } |
1387 | } while (status & MCI_RXRDY); | 1387 | } while (status & MCI_RXRDY); |
1388 | 1388 | ||
1389 | host->pio_offset = offset; | 1389 | host->pio_offset = offset; |
1390 | data->bytes_xfered += nbytes; | 1390 | data->bytes_xfered += nbytes; |
1391 | 1391 | ||
1392 | return; | 1392 | return; |
1393 | 1393 | ||
1394 | done: | 1394 | done: |
1395 | mci_writel(host, IDR, MCI_RXRDY); | 1395 | mci_writel(host, IDR, MCI_RXRDY); |
1396 | mci_writel(host, IER, MCI_NOTBUSY); | 1396 | mci_writel(host, IER, MCI_NOTBUSY); |
1397 | data->bytes_xfered += nbytes; | 1397 | data->bytes_xfered += nbytes; |
1398 | smp_wmb(); | 1398 | smp_wmb(); |
1399 | atmci_set_pending(host, EVENT_XFER_COMPLETE); | 1399 | atmci_set_pending(host, EVENT_XFER_COMPLETE); |
1400 | } | 1400 | } |
1401 | 1401 | ||
1402 | static void atmci_write_data_pio(struct atmel_mci *host) | 1402 | static void atmci_write_data_pio(struct atmel_mci *host) |
1403 | { | 1403 | { |
1404 | struct scatterlist *sg = host->sg; | 1404 | struct scatterlist *sg = host->sg; |
1405 | void *buf = sg_virt(sg); | 1405 | void *buf = sg_virt(sg); |
1406 | unsigned int offset = host->pio_offset; | 1406 | unsigned int offset = host->pio_offset; |
1407 | struct mmc_data *data = host->data; | 1407 | struct mmc_data *data = host->data; |
1408 | u32 value; | 1408 | u32 value; |
1409 | u32 status; | 1409 | u32 status; |
1410 | unsigned int nbytes = 0; | 1410 | unsigned int nbytes = 0; |
1411 | 1411 | ||
1412 | do { | 1412 | do { |
1413 | if (likely(offset + 4 <= sg->length)) { | 1413 | if (likely(offset + 4 <= sg->length)) { |
1414 | value = get_unaligned((u32 *)(buf + offset)); | 1414 | value = get_unaligned((u32 *)(buf + offset)); |
1415 | mci_writel(host, TDR, value); | 1415 | mci_writel(host, TDR, value); |
1416 | 1416 | ||
1417 | offset += 4; | 1417 | offset += 4; |
1418 | nbytes += 4; | 1418 | nbytes += 4; |
1419 | if (offset == sg->length) { | 1419 | if (offset == sg->length) { |
1420 | host->sg = sg = sg_next(sg); | 1420 | host->sg = sg = sg_next(sg); |
1421 | if (!sg) | 1421 | if (!sg) |
1422 | goto done; | 1422 | goto done; |
1423 | 1423 | ||
1424 | offset = 0; | 1424 | offset = 0; |
1425 | buf = sg_virt(sg); | 1425 | buf = sg_virt(sg); |
1426 | } | 1426 | } |
1427 | } else { | 1427 | } else { |
1428 | unsigned int remaining = sg->length - offset; | 1428 | unsigned int remaining = sg->length - offset; |
1429 | 1429 | ||
1430 | value = 0; | 1430 | value = 0; |
1431 | memcpy(&value, buf + offset, remaining); | 1431 | memcpy(&value, buf + offset, remaining); |
1432 | nbytes += remaining; | 1432 | nbytes += remaining; |
1433 | 1433 | ||
1434 | host->sg = sg = sg_next(sg); | 1434 | host->sg = sg = sg_next(sg); |
1435 | if (!sg) { | 1435 | if (!sg) { |
1436 | mci_writel(host, TDR, value); | 1436 | mci_writel(host, TDR, value); |
1437 | goto done; | 1437 | goto done; |
1438 | } | 1438 | } |
1439 | 1439 | ||
1440 | offset = 4 - remaining; | 1440 | offset = 4 - remaining; |
1441 | buf = sg_virt(sg); | 1441 | buf = sg_virt(sg); |
1442 | memcpy((u8 *)&value + remaining, buf, offset); | 1442 | memcpy((u8 *)&value + remaining, buf, offset); |
1443 | mci_writel(host, TDR, value); | 1443 | mci_writel(host, TDR, value); |
1444 | nbytes += offset; | 1444 | nbytes += offset; |
1445 | } | 1445 | } |
1446 | 1446 | ||
1447 | status = mci_readl(host, SR); | 1447 | status = mci_readl(host, SR); |
1448 | if (status & ATMCI_DATA_ERROR_FLAGS) { | 1448 | if (status & ATMCI_DATA_ERROR_FLAGS) { |
1449 | mci_writel(host, IDR, (MCI_NOTBUSY | MCI_TXRDY | 1449 | mci_writel(host, IDR, (MCI_NOTBUSY | MCI_TXRDY |
1450 | | ATMCI_DATA_ERROR_FLAGS)); | 1450 | | ATMCI_DATA_ERROR_FLAGS)); |
1451 | host->data_status = status; | 1451 | host->data_status = status; |
1452 | data->bytes_xfered += nbytes; | 1452 | data->bytes_xfered += nbytes; |
1453 | smp_wmb(); | 1453 | smp_wmb(); |
1454 | atmci_set_pending(host, EVENT_DATA_ERROR); | 1454 | atmci_set_pending(host, EVENT_DATA_ERROR); |
1455 | tasklet_schedule(&host->tasklet); | 1455 | tasklet_schedule(&host->tasklet); |
1456 | return; | 1456 | return; |
1457 | } | 1457 | } |
1458 | } while (status & MCI_TXRDY); | 1458 | } while (status & MCI_TXRDY); |
1459 | 1459 | ||
1460 | host->pio_offset = offset; | 1460 | host->pio_offset = offset; |
1461 | data->bytes_xfered += nbytes; | 1461 | data->bytes_xfered += nbytes; |
1462 | 1462 | ||
1463 | return; | 1463 | return; |
1464 | 1464 | ||
1465 | done: | 1465 | done: |
1466 | mci_writel(host, IDR, MCI_TXRDY); | 1466 | mci_writel(host, IDR, MCI_TXRDY); |
1467 | mci_writel(host, IER, MCI_NOTBUSY); | 1467 | mci_writel(host, IER, MCI_NOTBUSY); |
1468 | data->bytes_xfered += nbytes; | 1468 | data->bytes_xfered += nbytes; |
1469 | smp_wmb(); | 1469 | smp_wmb(); |
1470 | atmci_set_pending(host, EVENT_XFER_COMPLETE); | 1470 | atmci_set_pending(host, EVENT_XFER_COMPLETE); |
1471 | } | 1471 | } |
1472 | 1472 | ||
1473 | static void atmci_cmd_interrupt(struct atmel_mci *host, u32 status) | 1473 | static void atmci_cmd_interrupt(struct atmel_mci *host, u32 status) |
1474 | { | 1474 | { |
1475 | mci_writel(host, IDR, MCI_CMDRDY); | 1475 | mci_writel(host, IDR, MCI_CMDRDY); |
1476 | 1476 | ||
1477 | host->cmd_status = status; | 1477 | host->cmd_status = status; |
1478 | smp_wmb(); | 1478 | smp_wmb(); |
1479 | atmci_set_pending(host, EVENT_CMD_COMPLETE); | 1479 | atmci_set_pending(host, EVENT_CMD_COMPLETE); |
1480 | tasklet_schedule(&host->tasklet); | 1480 | tasklet_schedule(&host->tasklet); |
1481 | } | 1481 | } |
1482 | 1482 | ||
1483 | static irqreturn_t atmci_interrupt(int irq, void *dev_id) | 1483 | static irqreturn_t atmci_interrupt(int irq, void *dev_id) |
1484 | { | 1484 | { |
1485 | struct atmel_mci *host = dev_id; | 1485 | struct atmel_mci *host = dev_id; |
1486 | u32 status, mask, pending; | 1486 | u32 status, mask, pending; |
1487 | unsigned int pass_count = 0; | 1487 | unsigned int pass_count = 0; |
1488 | 1488 | ||
1489 | do { | 1489 | do { |
1490 | status = mci_readl(host, SR); | 1490 | status = mci_readl(host, SR); |
1491 | mask = mci_readl(host, IMR); | 1491 | mask = mci_readl(host, IMR); |
1492 | pending = status & mask; | 1492 | pending = status & mask; |
1493 | if (!pending) | 1493 | if (!pending) |
1494 | break; | 1494 | break; |
1495 | 1495 | ||
1496 | if (pending & ATMCI_DATA_ERROR_FLAGS) { | 1496 | if (pending & ATMCI_DATA_ERROR_FLAGS) { |
1497 | mci_writel(host, IDR, ATMCI_DATA_ERROR_FLAGS | 1497 | mci_writel(host, IDR, ATMCI_DATA_ERROR_FLAGS |
1498 | | MCI_RXRDY | MCI_TXRDY); | 1498 | | MCI_RXRDY | MCI_TXRDY); |
1499 | pending &= mci_readl(host, IMR); | 1499 | pending &= mci_readl(host, IMR); |
1500 | 1500 | ||
1501 | host->data_status = status; | 1501 | host->data_status = status; |
1502 | smp_wmb(); | 1502 | smp_wmb(); |
1503 | atmci_set_pending(host, EVENT_DATA_ERROR); | 1503 | atmci_set_pending(host, EVENT_DATA_ERROR); |
1504 | tasklet_schedule(&host->tasklet); | 1504 | tasklet_schedule(&host->tasklet); |
1505 | } | 1505 | } |
1506 | if (pending & MCI_NOTBUSY) { | 1506 | if (pending & MCI_NOTBUSY) { |
1507 | mci_writel(host, IDR, | 1507 | mci_writel(host, IDR, |
1508 | ATMCI_DATA_ERROR_FLAGS | MCI_NOTBUSY); | 1508 | ATMCI_DATA_ERROR_FLAGS | MCI_NOTBUSY); |
1509 | if (!host->data_status) | 1509 | if (!host->data_status) |
1510 | host->data_status = status; | 1510 | host->data_status = status; |
1511 | smp_wmb(); | 1511 | smp_wmb(); |
1512 | atmci_set_pending(host, EVENT_DATA_COMPLETE); | 1512 | atmci_set_pending(host, EVENT_DATA_COMPLETE); |
1513 | tasklet_schedule(&host->tasklet); | 1513 | tasklet_schedule(&host->tasklet); |
1514 | } | 1514 | } |
1515 | if (pending & MCI_RXRDY) | 1515 | if (pending & MCI_RXRDY) |
1516 | atmci_read_data_pio(host); | 1516 | atmci_read_data_pio(host); |
1517 | if (pending & MCI_TXRDY) | 1517 | if (pending & MCI_TXRDY) |
1518 | atmci_write_data_pio(host); | 1518 | atmci_write_data_pio(host); |
1519 | 1519 | ||
1520 | if (pending & MCI_CMDRDY) | 1520 | if (pending & MCI_CMDRDY) |
1521 | atmci_cmd_interrupt(host, status); | 1521 | atmci_cmd_interrupt(host, status); |
1522 | } while (pass_count++ < 5); | 1522 | } while (pass_count++ < 5); |
1523 | 1523 | ||
1524 | return pass_count ? IRQ_HANDLED : IRQ_NONE; | 1524 | return pass_count ? IRQ_HANDLED : IRQ_NONE; |
1525 | } | 1525 | } |
1526 | 1526 | ||
1527 | static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id) | 1527 | static irqreturn_t atmci_detect_interrupt(int irq, void *dev_id) |
1528 | { | 1528 | { |
1529 | struct atmel_mci_slot *slot = dev_id; | 1529 | struct atmel_mci_slot *slot = dev_id; |
1530 | 1530 | ||
1531 | /* | 1531 | /* |
1532 | * Disable interrupts until the pin has stabilized and check | 1532 | * Disable interrupts until the pin has stabilized and check |
1533 | * the state then. Use mod_timer() since we may be in the | 1533 | * the state then. Use mod_timer() since we may be in the |
1534 | * middle of the timer routine when this interrupt triggers. | 1534 | * middle of the timer routine when this interrupt triggers. |
1535 | */ | 1535 | */ |
1536 | disable_irq_nosync(irq); | 1536 | disable_irq_nosync(irq); |
1537 | mod_timer(&slot->detect_timer, jiffies + msecs_to_jiffies(20)); | 1537 | mod_timer(&slot->detect_timer, jiffies + msecs_to_jiffies(20)); |
1538 | 1538 | ||
1539 | return IRQ_HANDLED; | 1539 | return IRQ_HANDLED; |
1540 | } | 1540 | } |
1541 | 1541 | ||
1542 | static int __init atmci_init_slot(struct atmel_mci *host, | 1542 | static int __init atmci_init_slot(struct atmel_mci *host, |
1543 | struct mci_slot_pdata *slot_data, unsigned int id, | 1543 | struct mci_slot_pdata *slot_data, unsigned int id, |
1544 | u32 sdc_reg) | 1544 | u32 sdc_reg) |
1545 | { | 1545 | { |
1546 | struct mmc_host *mmc; | 1546 | struct mmc_host *mmc; |
1547 | struct atmel_mci_slot *slot; | 1547 | struct atmel_mci_slot *slot; |
1548 | 1548 | ||
1549 | mmc = mmc_alloc_host(sizeof(struct atmel_mci_slot), &host->pdev->dev); | 1549 | mmc = mmc_alloc_host(sizeof(struct atmel_mci_slot), &host->pdev->dev); |
1550 | if (!mmc) | 1550 | if (!mmc) |
1551 | return -ENOMEM; | 1551 | return -ENOMEM; |
1552 | 1552 | ||
1553 | slot = mmc_priv(mmc); | 1553 | slot = mmc_priv(mmc); |
1554 | slot->mmc = mmc; | 1554 | slot->mmc = mmc; |
1555 | slot->host = host; | 1555 | slot->host = host; |
1556 | slot->detect_pin = slot_data->detect_pin; | 1556 | slot->detect_pin = slot_data->detect_pin; |
1557 | slot->wp_pin = slot_data->wp_pin; | 1557 | slot->wp_pin = slot_data->wp_pin; |
1558 | slot->detect_is_active_high = slot_data->detect_is_active_high; | 1558 | slot->detect_is_active_high = slot_data->detect_is_active_high; |
1559 | slot->sdc_reg = sdc_reg; | 1559 | slot->sdc_reg = sdc_reg; |
1560 | 1560 | ||
1561 | mmc->ops = &atmci_ops; | 1561 | mmc->ops = &atmci_ops; |
1562 | mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512); | 1562 | mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512); |
1563 | mmc->f_max = host->bus_hz / 2; | 1563 | mmc->f_max = host->bus_hz / 2; |
1564 | mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; | 1564 | mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; |
1565 | if (slot_data->bus_width >= 4) | 1565 | if (slot_data->bus_width >= 4) |
1566 | mmc->caps |= MMC_CAP_4_BIT_DATA; | 1566 | mmc->caps |= MMC_CAP_4_BIT_DATA; |
1567 | 1567 | ||
1568 | mmc->max_hw_segs = 64; | 1568 | mmc->max_hw_segs = 64; |
1569 | mmc->max_phys_segs = 64; | 1569 | mmc->max_phys_segs = 64; |
1570 | mmc->max_req_size = 32768 * 512; | 1570 | mmc->max_req_size = 32768 * 512; |
1571 | mmc->max_blk_size = 32768; | 1571 | mmc->max_blk_size = 32768; |
1572 | mmc->max_blk_count = 512; | 1572 | mmc->max_blk_count = 512; |
1573 | 1573 | ||
1574 | /* Assume card is present initially */ | 1574 | /* Assume card is present initially */ |
1575 | set_bit(ATMCI_CARD_PRESENT, &slot->flags); | 1575 | set_bit(ATMCI_CARD_PRESENT, &slot->flags); |
1576 | if (gpio_is_valid(slot->detect_pin)) { | 1576 | if (gpio_is_valid(slot->detect_pin)) { |
1577 | if (gpio_request(slot->detect_pin, "mmc_detect")) { | 1577 | if (gpio_request(slot->detect_pin, "mmc_detect")) { |
1578 | dev_dbg(&mmc->class_dev, "no detect pin available\n"); | 1578 | dev_dbg(&mmc->class_dev, "no detect pin available\n"); |
1579 | slot->detect_pin = -EBUSY; | 1579 | slot->detect_pin = -EBUSY; |
1580 | } else if (gpio_get_value(slot->detect_pin) ^ | 1580 | } else if (gpio_get_value(slot->detect_pin) ^ |
1581 | slot->detect_is_active_high) { | 1581 | slot->detect_is_active_high) { |
1582 | clear_bit(ATMCI_CARD_PRESENT, &slot->flags); | 1582 | clear_bit(ATMCI_CARD_PRESENT, &slot->flags); |
1583 | } | 1583 | } |
1584 | } | 1584 | } |
1585 | 1585 | ||
1586 | if (!gpio_is_valid(slot->detect_pin)) | 1586 | if (!gpio_is_valid(slot->detect_pin)) |
1587 | mmc->caps |= MMC_CAP_NEEDS_POLL; | 1587 | mmc->caps |= MMC_CAP_NEEDS_POLL; |
1588 | 1588 | ||
1589 | if (gpio_is_valid(slot->wp_pin)) { | 1589 | if (gpio_is_valid(slot->wp_pin)) { |
1590 | if (gpio_request(slot->wp_pin, "mmc_wp")) { | 1590 | if (gpio_request(slot->wp_pin, "mmc_wp")) { |
1591 | dev_dbg(&mmc->class_dev, "no WP pin available\n"); | 1591 | dev_dbg(&mmc->class_dev, "no WP pin available\n"); |
1592 | slot->wp_pin = -EBUSY; | 1592 | slot->wp_pin = -EBUSY; |
1593 | } | 1593 | } |
1594 | } | 1594 | } |
1595 | 1595 | ||
1596 | host->slot[id] = slot; | 1596 | host->slot[id] = slot; |
1597 | mmc_add_host(mmc); | 1597 | mmc_add_host(mmc); |
1598 | 1598 | ||
1599 | if (gpio_is_valid(slot->detect_pin)) { | 1599 | if (gpio_is_valid(slot->detect_pin)) { |
1600 | int ret; | 1600 | int ret; |
1601 | 1601 | ||
1602 | setup_timer(&slot->detect_timer, atmci_detect_change, | 1602 | setup_timer(&slot->detect_timer, atmci_detect_change, |
1603 | (unsigned long)slot); | 1603 | (unsigned long)slot); |
1604 | 1604 | ||
1605 | ret = request_irq(gpio_to_irq(slot->detect_pin), | 1605 | ret = request_irq(gpio_to_irq(slot->detect_pin), |
1606 | atmci_detect_interrupt, | 1606 | atmci_detect_interrupt, |
1607 | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, | 1607 | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, |
1608 | "mmc-detect", slot); | 1608 | "mmc-detect", slot); |
1609 | if (ret) { | 1609 | if (ret) { |
1610 | dev_dbg(&mmc->class_dev, | 1610 | dev_dbg(&mmc->class_dev, |
1611 | "could not request IRQ %d for detect pin\n", | 1611 | "could not request IRQ %d for detect pin\n", |
1612 | gpio_to_irq(slot->detect_pin)); | 1612 | gpio_to_irq(slot->detect_pin)); |
1613 | gpio_free(slot->detect_pin); | 1613 | gpio_free(slot->detect_pin); |
1614 | slot->detect_pin = -EBUSY; | 1614 | slot->detect_pin = -EBUSY; |
1615 | } | 1615 | } |
1616 | } | 1616 | } |
1617 | 1617 | ||
1618 | atmci_init_debugfs(slot); | 1618 | atmci_init_debugfs(slot); |
1619 | 1619 | ||
1620 | return 0; | 1620 | return 0; |
1621 | } | 1621 | } |
1622 | 1622 | ||
1623 | static void __exit atmci_cleanup_slot(struct atmel_mci_slot *slot, | 1623 | static void __exit atmci_cleanup_slot(struct atmel_mci_slot *slot, |
1624 | unsigned int id) | 1624 | unsigned int id) |
1625 | { | 1625 | { |
1626 | /* Debugfs stuff is cleaned up by mmc core */ | 1626 | /* Debugfs stuff is cleaned up by mmc core */ |
1627 | 1627 | ||
1628 | set_bit(ATMCI_SHUTDOWN, &slot->flags); | 1628 | set_bit(ATMCI_SHUTDOWN, &slot->flags); |
1629 | smp_wmb(); | 1629 | smp_wmb(); |
1630 | 1630 | ||
1631 | mmc_remove_host(slot->mmc); | 1631 | mmc_remove_host(slot->mmc); |
1632 | 1632 | ||
1633 | if (gpio_is_valid(slot->detect_pin)) { | 1633 | if (gpio_is_valid(slot->detect_pin)) { |
1634 | int pin = slot->detect_pin; | 1634 | int pin = slot->detect_pin; |
1635 | 1635 | ||
1636 | free_irq(gpio_to_irq(pin), slot); | 1636 | free_irq(gpio_to_irq(pin), slot); |
1637 | del_timer_sync(&slot->detect_timer); | 1637 | del_timer_sync(&slot->detect_timer); |
1638 | gpio_free(pin); | 1638 | gpio_free(pin); |
1639 | } | 1639 | } |
1640 | if (gpio_is_valid(slot->wp_pin)) | 1640 | if (gpio_is_valid(slot->wp_pin)) |
1641 | gpio_free(slot->wp_pin); | 1641 | gpio_free(slot->wp_pin); |
1642 | 1642 | ||
1643 | slot->host->slot[id] = NULL; | 1643 | slot->host->slot[id] = NULL; |
1644 | mmc_free_host(slot->mmc); | 1644 | mmc_free_host(slot->mmc); |
1645 | } | 1645 | } |
1646 | 1646 | ||
1647 | #ifdef CONFIG_MMC_ATMELMCI_DMA | 1647 | #ifdef CONFIG_MMC_ATMELMCI_DMA |
1648 | static bool filter(struct dma_chan *chan, void *slave) | 1648 | static bool filter(struct dma_chan *chan, void *slave) |
1649 | { | 1649 | { |
1650 | struct mci_dma_data *sl = slave; | 1650 | struct mci_dma_data *sl = slave; |
1651 | 1651 | ||
1652 | if (sl && find_slave_dev(sl) == chan->device->dev) { | 1652 | if (sl && find_slave_dev(sl) == chan->device->dev) { |
1653 | chan->private = slave_data_ptr(sl); | 1653 | chan->private = slave_data_ptr(sl); |
1654 | return true; | 1654 | return true; |
1655 | } else { | 1655 | } else { |
1656 | return false; | 1656 | return false; |
1657 | } | 1657 | } |
1658 | } | 1658 | } |
1659 | 1659 | ||
1660 | static void atmci_configure_dma(struct atmel_mci *host) | 1660 | static void atmci_configure_dma(struct atmel_mci *host) |
1661 | { | 1661 | { |
1662 | struct mci_platform_data *pdata; | 1662 | struct mci_platform_data *pdata; |
1663 | 1663 | ||
1664 | if (host == NULL) | 1664 | if (host == NULL) |
1665 | return; | 1665 | return; |
1666 | 1666 | ||
1667 | pdata = host->pdev->dev.platform_data; | 1667 | pdata = host->pdev->dev.platform_data; |
1668 | 1668 | ||
1669 | if (pdata && find_slave_dev(pdata->dma_slave)) { | 1669 | if (pdata && find_slave_dev(pdata->dma_slave)) { |
1670 | dma_cap_mask_t mask; | 1670 | dma_cap_mask_t mask; |
1671 | 1671 | ||
1672 | setup_dma_addr(pdata->dma_slave, | 1672 | setup_dma_addr(pdata->dma_slave, |
1673 | host->mapbase + MCI_TDR, | 1673 | host->mapbase + MCI_TDR, |
1674 | host->mapbase + MCI_RDR); | 1674 | host->mapbase + MCI_RDR); |
1675 | 1675 | ||
1676 | /* Try to grab a DMA channel */ | 1676 | /* Try to grab a DMA channel */ |
1677 | dma_cap_zero(mask); | 1677 | dma_cap_zero(mask); |
1678 | dma_cap_set(DMA_SLAVE, mask); | 1678 | dma_cap_set(DMA_SLAVE, mask); |
1679 | host->dma.chan = | 1679 | host->dma.chan = |
1680 | dma_request_channel(mask, filter, pdata->dma_slave); | 1680 | dma_request_channel(mask, filter, pdata->dma_slave); |
1681 | } | 1681 | } |
1682 | if (!host->dma.chan) | 1682 | if (!host->dma.chan) |
1683 | dev_notice(&host->pdev->dev, "DMA not available, using PIO\n"); | 1683 | dev_notice(&host->pdev->dev, "DMA not available, using PIO\n"); |
1684 | else | 1684 | else |
1685 | dev_info(&host->pdev->dev, | 1685 | dev_info(&host->pdev->dev, |
1686 | "Using %s for DMA transfers\n", | 1686 | "Using %s for DMA transfers\n", |
1687 | dma_chan_name(host->dma.chan)); | 1687 | dma_chan_name(host->dma.chan)); |
1688 | } | 1688 | } |
1689 | #else | 1689 | #else |
1690 | static void atmci_configure_dma(struct atmel_mci *host) {} | 1690 | static void atmci_configure_dma(struct atmel_mci *host) {} |
1691 | #endif | 1691 | #endif |
1692 | 1692 | ||
1693 | static int __init atmci_probe(struct platform_device *pdev) | 1693 | static int __init atmci_probe(struct platform_device *pdev) |
1694 | { | 1694 | { |
1695 | struct mci_platform_data *pdata; | 1695 | struct mci_platform_data *pdata; |
1696 | struct atmel_mci *host; | 1696 | struct atmel_mci *host; |
1697 | struct resource *regs; | 1697 | struct resource *regs; |
1698 | unsigned int nr_slots; | 1698 | unsigned int nr_slots; |
1699 | int irq; | 1699 | int irq; |
1700 | int ret; | 1700 | int ret; |
1701 | 1701 | ||
1702 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1702 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1703 | if (!regs) | 1703 | if (!regs) |
1704 | return -ENXIO; | 1704 | return -ENXIO; |
1705 | pdata = pdev->dev.platform_data; | 1705 | pdata = pdev->dev.platform_data; |
1706 | if (!pdata) | 1706 | if (!pdata) |
1707 | return -ENXIO; | 1707 | return -ENXIO; |
1708 | irq = platform_get_irq(pdev, 0); | 1708 | irq = platform_get_irq(pdev, 0); |
1709 | if (irq < 0) | 1709 | if (irq < 0) |
1710 | return irq; | 1710 | return irq; |
1711 | 1711 | ||
1712 | host = kzalloc(sizeof(struct atmel_mci), GFP_KERNEL); | 1712 | host = kzalloc(sizeof(struct atmel_mci), GFP_KERNEL); |
1713 | if (!host) | 1713 | if (!host) |
1714 | return -ENOMEM; | 1714 | return -ENOMEM; |
1715 | 1715 | ||
1716 | host->pdev = pdev; | 1716 | host->pdev = pdev; |
1717 | spin_lock_init(&host->lock); | 1717 | spin_lock_init(&host->lock); |
1718 | INIT_LIST_HEAD(&host->queue); | 1718 | INIT_LIST_HEAD(&host->queue); |
1719 | 1719 | ||
1720 | host->mck = clk_get(&pdev->dev, "mci_clk"); | 1720 | host->mck = clk_get(&pdev->dev, "mci_clk"); |
1721 | if (IS_ERR(host->mck)) { | 1721 | if (IS_ERR(host->mck)) { |
1722 | ret = PTR_ERR(host->mck); | 1722 | ret = PTR_ERR(host->mck); |
1723 | goto err_clk_get; | 1723 | goto err_clk_get; |
1724 | } | 1724 | } |
1725 | 1725 | ||
1726 | ret = -ENOMEM; | 1726 | ret = -ENOMEM; |
1727 | host->regs = ioremap(regs->start, regs->end - regs->start + 1); | 1727 | host->regs = ioremap(regs->start, regs->end - regs->start + 1); |
1728 | if (!host->regs) | 1728 | if (!host->regs) |
1729 | goto err_ioremap; | 1729 | goto err_ioremap; |
1730 | 1730 | ||
1731 | clk_enable(host->mck); | 1731 | clk_enable(host->mck); |
1732 | mci_writel(host, CR, MCI_CR_SWRST); | 1732 | mci_writel(host, CR, MCI_CR_SWRST); |
1733 | host->bus_hz = clk_get_rate(host->mck); | 1733 | host->bus_hz = clk_get_rate(host->mck); |
1734 | clk_disable(host->mck); | 1734 | clk_disable(host->mck); |
1735 | 1735 | ||
1736 | host->mapbase = regs->start; | 1736 | host->mapbase = regs->start; |
1737 | 1737 | ||
1738 | tasklet_init(&host->tasklet, atmci_tasklet_func, (unsigned long)host); | 1738 | tasklet_init(&host->tasklet, atmci_tasklet_func, (unsigned long)host); |
1739 | 1739 | ||
1740 | ret = request_irq(irq, atmci_interrupt, 0, dev_name(&pdev->dev), host); | 1740 | ret = request_irq(irq, atmci_interrupt, 0, dev_name(&pdev->dev), host); |
1741 | if (ret) | 1741 | if (ret) |
1742 | goto err_request_irq; | 1742 | goto err_request_irq; |
1743 | 1743 | ||
1744 | atmci_configure_dma(host); | 1744 | atmci_configure_dma(host); |
1745 | 1745 | ||
1746 | platform_set_drvdata(pdev, host); | 1746 | platform_set_drvdata(pdev, host); |
1747 | 1747 | ||
1748 | /* We need at least one slot to succeed */ | 1748 | /* We need at least one slot to succeed */ |
1749 | nr_slots = 0; | 1749 | nr_slots = 0; |
1750 | ret = -ENODEV; | 1750 | ret = -ENODEV; |
1751 | if (pdata->slot[0].bus_width) { | 1751 | if (pdata->slot[0].bus_width) { |
1752 | ret = atmci_init_slot(host, &pdata->slot[0], | 1752 | ret = atmci_init_slot(host, &pdata->slot[0], |
1753 | MCI_SDCSEL_SLOT_A, 0); | 1753 | MCI_SDCSEL_SLOT_A, 0); |
1754 | if (!ret) | 1754 | if (!ret) |
1755 | nr_slots++; | 1755 | nr_slots++; |
1756 | } | 1756 | } |
1757 | if (pdata->slot[1].bus_width) { | 1757 | if (pdata->slot[1].bus_width) { |
1758 | ret = atmci_init_slot(host, &pdata->slot[1], | 1758 | ret = atmci_init_slot(host, &pdata->slot[1], |
1759 | MCI_SDCSEL_SLOT_B, 1); | 1759 | MCI_SDCSEL_SLOT_B, 1); |
1760 | if (!ret) | 1760 | if (!ret) |
1761 | nr_slots++; | 1761 | nr_slots++; |
1762 | } | 1762 | } |
1763 | 1763 | ||
1764 | if (!nr_slots) { | 1764 | if (!nr_slots) { |
1765 | dev_err(&pdev->dev, "init failed: no slot defined\n"); | 1765 | dev_err(&pdev->dev, "init failed: no slot defined\n"); |
1766 | goto err_init_slot; | 1766 | goto err_init_slot; |
1767 | } | 1767 | } |
1768 | 1768 | ||
1769 | dev_info(&pdev->dev, | 1769 | dev_info(&pdev->dev, |
1770 | "Atmel MCI controller at 0x%08lx irq %d, %u slots\n", | 1770 | "Atmel MCI controller at 0x%08lx irq %d, %u slots\n", |
1771 | host->mapbase, irq, nr_slots); | 1771 | host->mapbase, irq, nr_slots); |
1772 | 1772 | ||
1773 | return 0; | 1773 | return 0; |
1774 | 1774 | ||
1775 | err_init_slot: | 1775 | err_init_slot: |
1776 | #ifdef CONFIG_MMC_ATMELMCI_DMA | 1776 | #ifdef CONFIG_MMC_ATMELMCI_DMA |
1777 | if (host->dma.chan) | 1777 | if (host->dma.chan) |
1778 | dma_release_channel(host->dma.chan); | 1778 | dma_release_channel(host->dma.chan); |
1779 | #endif | 1779 | #endif |
1780 | free_irq(irq, host); | 1780 | free_irq(irq, host); |
1781 | err_request_irq: | 1781 | err_request_irq: |
1782 | iounmap(host->regs); | 1782 | iounmap(host->regs); |
1783 | err_ioremap: | 1783 | err_ioremap: |
1784 | clk_put(host->mck); | 1784 | clk_put(host->mck); |
1785 | err_clk_get: | 1785 | err_clk_get: |
1786 | kfree(host); | 1786 | kfree(host); |
1787 | return ret; | 1787 | return ret; |
1788 | } | 1788 | } |
1789 | 1789 | ||
1790 | static int __exit atmci_remove(struct platform_device *pdev) | 1790 | static int __exit atmci_remove(struct platform_device *pdev) |
1791 | { | 1791 | { |
1792 | struct atmel_mci *host = platform_get_drvdata(pdev); | 1792 | struct atmel_mci *host = platform_get_drvdata(pdev); |
1793 | unsigned int i; | 1793 | unsigned int i; |
1794 | 1794 | ||
1795 | platform_set_drvdata(pdev, NULL); | 1795 | platform_set_drvdata(pdev, NULL); |
1796 | 1796 | ||
1797 | for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) { | 1797 | for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) { |
1798 | if (host->slot[i]) | 1798 | if (host->slot[i]) |
1799 | atmci_cleanup_slot(host->slot[i], i); | 1799 | atmci_cleanup_slot(host->slot[i], i); |
1800 | } | 1800 | } |
1801 | 1801 | ||
1802 | clk_enable(host->mck); | 1802 | clk_enable(host->mck); |
1803 | mci_writel(host, IDR, ~0UL); | 1803 | mci_writel(host, IDR, ~0UL); |
1804 | mci_writel(host, CR, MCI_CR_MCIDIS); | 1804 | mci_writel(host, CR, MCI_CR_MCIDIS); |
1805 | mci_readl(host, SR); | 1805 | mci_readl(host, SR); |
1806 | clk_disable(host->mck); | 1806 | clk_disable(host->mck); |
1807 | 1807 | ||
1808 | #ifdef CONFIG_MMC_ATMELMCI_DMA | 1808 | #ifdef CONFIG_MMC_ATMELMCI_DMA |
1809 | if (host->dma.chan) | 1809 | if (host->dma.chan) |
1810 | dma_release_channel(host->dma.chan); | 1810 | dma_release_channel(host->dma.chan); |
1811 | #endif | 1811 | #endif |
1812 | 1812 | ||
1813 | free_irq(platform_get_irq(pdev, 0), host); | 1813 | free_irq(platform_get_irq(pdev, 0), host); |
1814 | iounmap(host->regs); | 1814 | iounmap(host->regs); |
1815 | 1815 | ||
1816 | clk_put(host->mck); | 1816 | clk_put(host->mck); |
1817 | kfree(host); | 1817 | kfree(host); |
1818 | 1818 | ||
1819 | return 0; | 1819 | return 0; |
1820 | } | 1820 | } |
1821 | 1821 | ||
1822 | static struct platform_driver atmci_driver = { | 1822 | static struct platform_driver atmci_driver = { |
1823 | .remove = __exit_p(atmci_remove), | 1823 | .remove = __exit_p(atmci_remove), |
1824 | .driver = { | 1824 | .driver = { |
1825 | .name = "atmel_mci", | 1825 | .name = "atmel_mci", |
1826 | }, | 1826 | }, |
1827 | }; | 1827 | }; |
1828 | 1828 | ||
1829 | static int __init atmci_init(void) | 1829 | static int __init atmci_init(void) |
1830 | { | 1830 | { |
1831 | return platform_driver_probe(&atmci_driver, atmci_probe); | 1831 | return platform_driver_probe(&atmci_driver, atmci_probe); |
1832 | } | 1832 | } |
1833 | 1833 | ||
1834 | static void __exit atmci_exit(void) | 1834 | static void __exit atmci_exit(void) |
1835 | { | 1835 | { |
1836 | platform_driver_unregister(&atmci_driver); | 1836 | platform_driver_unregister(&atmci_driver); |
1837 | } | 1837 | } |
1838 | 1838 | ||
1839 | late_initcall(atmci_init); /* try to load after dma driver when built-in */ | 1839 | late_initcall(atmci_init); /* try to load after dma driver when built-in */ |
1840 | module_exit(atmci_exit); | 1840 | module_exit(atmci_exit); |
1841 | 1841 | ||
1842 | MODULE_DESCRIPTION("Atmel Multimedia Card Interface driver"); | 1842 | MODULE_DESCRIPTION("Atmel Multimedia Card Interface driver"); |
1843 | MODULE_AUTHOR("Haavard Skinnemoen <haavard.skinnemoen@atmel.com>"); | 1843 | MODULE_AUTHOR("Haavard Skinnemoen <haavard.skinnemoen@atmel.com>"); |
1844 | MODULE_LICENSE("GPL v2"); | 1844 | MODULE_LICENSE("GPL v2"); |
1845 | 1845 |
drivers/serial/sh-sci.c
1 | /* | 1 | /* |
2 | * drivers/serial/sh-sci.c | 2 | * drivers/serial/sh-sci.c |
3 | * | 3 | * |
4 | * SuperH on-chip serial module support. (SCI with no FIFO / with FIFO) | 4 | * SuperH on-chip serial module support. (SCI with no FIFO / with FIFO) |
5 | * | 5 | * |
6 | * Copyright (C) 2002 - 2008 Paul Mundt | 6 | * Copyright (C) 2002 - 2008 Paul Mundt |
7 | * Modified to support SH7720 SCIF. Markus Brunner, Mark Jonas (Jul 2007). | 7 | * Modified to support SH7720 SCIF. Markus Brunner, Mark Jonas (Jul 2007). |
8 | * | 8 | * |
9 | * based off of the old drivers/char/sh-sci.c by: | 9 | * based off of the old drivers/char/sh-sci.c by: |
10 | * | 10 | * |
11 | * Copyright (C) 1999, 2000 Niibe Yutaka | 11 | * Copyright (C) 1999, 2000 Niibe Yutaka |
12 | * Copyright (C) 2000 Sugioka Toshinobu | 12 | * Copyright (C) 2000 Sugioka Toshinobu |
13 | * Modified to support multiple serial ports. Stuart Menefy (May 2000). | 13 | * Modified to support multiple serial ports. Stuart Menefy (May 2000). |
14 | * Modified to support SecureEdge. David McCullough (2002) | 14 | * Modified to support SecureEdge. David McCullough (2002) |
15 | * Modified to support SH7300 SCIF. Takashi Kusuda (Jun 2003). | 15 | * Modified to support SH7300 SCIF. Takashi Kusuda (Jun 2003). |
16 | * Removed SH7300 support (Jul 2007). | 16 | * Removed SH7300 support (Jul 2007). |
17 | * | 17 | * |
18 | * This file is subject to the terms and conditions of the GNU General Public | 18 | * This file is subject to the terms and conditions of the GNU General Public |
19 | * License. See the file "COPYING" in the main directory of this archive | 19 | * License. See the file "COPYING" in the main directory of this archive |
20 | * for more details. | 20 | * for more details. |
21 | */ | 21 | */ |
22 | #if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) | 22 | #if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) |
23 | #define SUPPORT_SYSRQ | 23 | #define SUPPORT_SYSRQ |
24 | #endif | 24 | #endif |
25 | 25 | ||
26 | #undef DEBUG | 26 | #undef DEBUG |
27 | 27 | ||
28 | #include <linux/module.h> | 28 | #include <linux/module.h> |
29 | #include <linux/errno.h> | 29 | #include <linux/errno.h> |
30 | #include <linux/timer.h> | 30 | #include <linux/timer.h> |
31 | #include <linux/interrupt.h> | 31 | #include <linux/interrupt.h> |
32 | #include <linux/tty.h> | 32 | #include <linux/tty.h> |
33 | #include <linux/tty_flip.h> | 33 | #include <linux/tty_flip.h> |
34 | #include <linux/serial.h> | 34 | #include <linux/serial.h> |
35 | #include <linux/major.h> | 35 | #include <linux/major.h> |
36 | #include <linux/string.h> | 36 | #include <linux/string.h> |
37 | #include <linux/sysrq.h> | 37 | #include <linux/sysrq.h> |
38 | #include <linux/ioport.h> | 38 | #include <linux/ioport.h> |
39 | #include <linux/mm.h> | 39 | #include <linux/mm.h> |
40 | #include <linux/init.h> | 40 | #include <linux/init.h> |
41 | #include <linux/delay.h> | 41 | #include <linux/delay.h> |
42 | #include <linux/console.h> | 42 | #include <linux/console.h> |
43 | #include <linux/platform_device.h> | 43 | #include <linux/platform_device.h> |
44 | #include <linux/serial_sci.h> | 44 | #include <linux/serial_sci.h> |
45 | #include <linux/notifier.h> | 45 | #include <linux/notifier.h> |
46 | #include <linux/cpufreq.h> | 46 | #include <linux/cpufreq.h> |
47 | #include <linux/clk.h> | 47 | #include <linux/clk.h> |
48 | #include <linux/ctype.h> | 48 | #include <linux/ctype.h> |
49 | #include <linux/err.h> | 49 | #include <linux/err.h> |
50 | #include <linux/list.h> | 50 | #include <linux/list.h> |
51 | #include <linux/dmaengine.h> | 51 | #include <linux/dmaengine.h> |
52 | #include <linux/scatterlist.h> | 52 | #include <linux/scatterlist.h> |
53 | 53 | ||
54 | #ifdef CONFIG_SUPERH | 54 | #ifdef CONFIG_SUPERH |
55 | #include <asm/sh_bios.h> | 55 | #include <asm/sh_bios.h> |
56 | #endif | 56 | #endif |
57 | 57 | ||
58 | #ifdef CONFIG_H8300 | 58 | #ifdef CONFIG_H8300 |
59 | #include <asm/gpio.h> | 59 | #include <asm/gpio.h> |
60 | #endif | 60 | #endif |
61 | 61 | ||
62 | #include "sh-sci.h" | 62 | #include "sh-sci.h" |
63 | 63 | ||
64 | struct sci_port { | 64 | struct sci_port { |
65 | struct uart_port port; | 65 | struct uart_port port; |
66 | 66 | ||
67 | /* Port type */ | 67 | /* Port type */ |
68 | unsigned int type; | 68 | unsigned int type; |
69 | 69 | ||
70 | /* Port IRQs: ERI, RXI, TXI, BRI (optional) */ | 70 | /* Port IRQs: ERI, RXI, TXI, BRI (optional) */ |
71 | unsigned int irqs[SCIx_NR_IRQS]; | 71 | unsigned int irqs[SCIx_NR_IRQS]; |
72 | 72 | ||
73 | /* Port enable callback */ | 73 | /* Port enable callback */ |
74 | void (*enable)(struct uart_port *port); | 74 | void (*enable)(struct uart_port *port); |
75 | 75 | ||
76 | /* Port disable callback */ | 76 | /* Port disable callback */ |
77 | void (*disable)(struct uart_port *port); | 77 | void (*disable)(struct uart_port *port); |
78 | 78 | ||
79 | /* Break timer */ | 79 | /* Break timer */ |
80 | struct timer_list break_timer; | 80 | struct timer_list break_timer; |
81 | int break_flag; | 81 | int break_flag; |
82 | 82 | ||
83 | /* Interface clock */ | 83 | /* Interface clock */ |
84 | struct clk *iclk; | 84 | struct clk *iclk; |
85 | /* Data clock */ | 85 | /* Data clock */ |
86 | struct clk *dclk; | 86 | struct clk *dclk; |
87 | 87 | ||
88 | struct list_head node; | 88 | struct list_head node; |
89 | struct dma_chan *chan_tx; | 89 | struct dma_chan *chan_tx; |
90 | struct dma_chan *chan_rx; | 90 | struct dma_chan *chan_rx; |
91 | #ifdef CONFIG_SERIAL_SH_SCI_DMA | 91 | #ifdef CONFIG_SERIAL_SH_SCI_DMA |
92 | struct device *dma_dev; | 92 | struct device *dma_dev; |
93 | enum sh_dmae_slave_chan_id slave_tx; | 93 | enum sh_dmae_slave_chan_id slave_tx; |
94 | enum sh_dmae_slave_chan_id slave_rx; | 94 | enum sh_dmae_slave_chan_id slave_rx; |
95 | struct dma_async_tx_descriptor *desc_tx; | 95 | struct dma_async_tx_descriptor *desc_tx; |
96 | struct dma_async_tx_descriptor *desc_rx[2]; | 96 | struct dma_async_tx_descriptor *desc_rx[2]; |
97 | dma_cookie_t cookie_tx; | 97 | dma_cookie_t cookie_tx; |
98 | dma_cookie_t cookie_rx[2]; | 98 | dma_cookie_t cookie_rx[2]; |
99 | dma_cookie_t active_rx; | 99 | dma_cookie_t active_rx; |
100 | struct scatterlist sg_tx; | 100 | struct scatterlist sg_tx; |
101 | unsigned int sg_len_tx; | 101 | unsigned int sg_len_tx; |
102 | struct scatterlist sg_rx[2]; | 102 | struct scatterlist sg_rx[2]; |
103 | size_t buf_len_rx; | 103 | size_t buf_len_rx; |
104 | struct sh_dmae_slave param_tx; | 104 | struct sh_dmae_slave param_tx; |
105 | struct sh_dmae_slave param_rx; | 105 | struct sh_dmae_slave param_rx; |
106 | struct work_struct work_tx; | 106 | struct work_struct work_tx; |
107 | struct work_struct work_rx; | 107 | struct work_struct work_rx; |
108 | struct timer_list rx_timer; | 108 | struct timer_list rx_timer; |
109 | #endif | 109 | #endif |
110 | }; | 110 | }; |
111 | 111 | ||
112 | struct sh_sci_priv { | 112 | struct sh_sci_priv { |
113 | spinlock_t lock; | 113 | spinlock_t lock; |
114 | struct list_head ports; | 114 | struct list_head ports; |
115 | struct notifier_block clk_nb; | 115 | struct notifier_block clk_nb; |
116 | }; | 116 | }; |
117 | 117 | ||
118 | /* Function prototypes */ | 118 | /* Function prototypes */ |
119 | static void sci_stop_tx(struct uart_port *port); | 119 | static void sci_stop_tx(struct uart_port *port); |
120 | 120 | ||
121 | #define SCI_NPORTS CONFIG_SERIAL_SH_SCI_NR_UARTS | 121 | #define SCI_NPORTS CONFIG_SERIAL_SH_SCI_NR_UARTS |
122 | 122 | ||
123 | static struct sci_port sci_ports[SCI_NPORTS]; | 123 | static struct sci_port sci_ports[SCI_NPORTS]; |
124 | static struct uart_driver sci_uart_driver; | 124 | static struct uart_driver sci_uart_driver; |
125 | 125 | ||
126 | static inline struct sci_port * | 126 | static inline struct sci_port * |
127 | to_sci_port(struct uart_port *uart) | 127 | to_sci_port(struct uart_port *uart) |
128 | { | 128 | { |
129 | return container_of(uart, struct sci_port, port); | 129 | return container_of(uart, struct sci_port, port); |
130 | } | 130 | } |
131 | 131 | ||
132 | #if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_SH_SCI_CONSOLE) | 132 | #if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_SH_SCI_CONSOLE) |
133 | 133 | ||
134 | #ifdef CONFIG_CONSOLE_POLL | 134 | #ifdef CONFIG_CONSOLE_POLL |
135 | static inline void handle_error(struct uart_port *port) | 135 | static inline void handle_error(struct uart_port *port) |
136 | { | 136 | { |
137 | /* Clear error flags */ | 137 | /* Clear error flags */ |
138 | sci_out(port, SCxSR, SCxSR_ERROR_CLEAR(port)); | 138 | sci_out(port, SCxSR, SCxSR_ERROR_CLEAR(port)); |
139 | } | 139 | } |
140 | 140 | ||
141 | static int sci_poll_get_char(struct uart_port *port) | 141 | static int sci_poll_get_char(struct uart_port *port) |
142 | { | 142 | { |
143 | unsigned short status; | 143 | unsigned short status; |
144 | int c; | 144 | int c; |
145 | 145 | ||
146 | do { | 146 | do { |
147 | status = sci_in(port, SCxSR); | 147 | status = sci_in(port, SCxSR); |
148 | if (status & SCxSR_ERRORS(port)) { | 148 | if (status & SCxSR_ERRORS(port)) { |
149 | handle_error(port); | 149 | handle_error(port); |
150 | continue; | 150 | continue; |
151 | } | 151 | } |
152 | } while (!(status & SCxSR_RDxF(port))); | 152 | } while (!(status & SCxSR_RDxF(port))); |
153 | 153 | ||
154 | c = sci_in(port, SCxRDR); | 154 | c = sci_in(port, SCxRDR); |
155 | 155 | ||
156 | /* Dummy read */ | 156 | /* Dummy read */ |
157 | sci_in(port, SCxSR); | 157 | sci_in(port, SCxSR); |
158 | sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port)); | 158 | sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port)); |
159 | 159 | ||
160 | return c; | 160 | return c; |
161 | } | 161 | } |
162 | #endif | 162 | #endif |
163 | 163 | ||
164 | static void sci_poll_put_char(struct uart_port *port, unsigned char c) | 164 | static void sci_poll_put_char(struct uart_port *port, unsigned char c) |
165 | { | 165 | { |
166 | unsigned short status; | 166 | unsigned short status; |
167 | 167 | ||
168 | do { | 168 | do { |
169 | status = sci_in(port, SCxSR); | 169 | status = sci_in(port, SCxSR); |
170 | } while (!(status & SCxSR_TDxE(port))); | 170 | } while (!(status & SCxSR_TDxE(port))); |
171 | 171 | ||
172 | sci_out(port, SCxTDR, c); | 172 | sci_out(port, SCxTDR, c); |
173 | sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port) & ~SCxSR_TEND(port)); | 173 | sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port) & ~SCxSR_TEND(port)); |
174 | } | 174 | } |
175 | #endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE */ | 175 | #endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE */ |
176 | 176 | ||
177 | #if defined(__H8300H__) || defined(__H8300S__) | 177 | #if defined(__H8300H__) || defined(__H8300S__) |
178 | static void sci_init_pins(struct uart_port *port, unsigned int cflag) | 178 | static void sci_init_pins(struct uart_port *port, unsigned int cflag) |
179 | { | 179 | { |
180 | int ch = (port->mapbase - SMR0) >> 3; | 180 | int ch = (port->mapbase - SMR0) >> 3; |
181 | 181 | ||
182 | /* set DDR regs */ | 182 | /* set DDR regs */ |
183 | H8300_GPIO_DDR(h8300_sci_pins[ch].port, | 183 | H8300_GPIO_DDR(h8300_sci_pins[ch].port, |
184 | h8300_sci_pins[ch].rx, | 184 | h8300_sci_pins[ch].rx, |
185 | H8300_GPIO_INPUT); | 185 | H8300_GPIO_INPUT); |
186 | H8300_GPIO_DDR(h8300_sci_pins[ch].port, | 186 | H8300_GPIO_DDR(h8300_sci_pins[ch].port, |
187 | h8300_sci_pins[ch].tx, | 187 | h8300_sci_pins[ch].tx, |
188 | H8300_GPIO_OUTPUT); | 188 | H8300_GPIO_OUTPUT); |
189 | 189 | ||
190 | /* tx mark output*/ | 190 | /* tx mark output*/ |
191 | H8300_SCI_DR(ch) |= h8300_sci_pins[ch].tx; | 191 | H8300_SCI_DR(ch) |= h8300_sci_pins[ch].tx; |
192 | } | 192 | } |
193 | #elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) | 193 | #elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712) |
194 | static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) | 194 | static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) |
195 | { | 195 | { |
196 | if (port->mapbase == 0xA4400000) { | 196 | if (port->mapbase == 0xA4400000) { |
197 | __raw_writew(__raw_readw(PACR) & 0xffc0, PACR); | 197 | __raw_writew(__raw_readw(PACR) & 0xffc0, PACR); |
198 | __raw_writew(__raw_readw(PBCR) & 0x0fff, PBCR); | 198 | __raw_writew(__raw_readw(PBCR) & 0x0fff, PBCR); |
199 | } else if (port->mapbase == 0xA4410000) | 199 | } else if (port->mapbase == 0xA4410000) |
200 | __raw_writew(__raw_readw(PBCR) & 0xf003, PBCR); | 200 | __raw_writew(__raw_readw(PBCR) & 0xf003, PBCR); |
201 | } | 201 | } |
202 | #elif defined(CONFIG_CPU_SUBTYPE_SH7720) || defined(CONFIG_CPU_SUBTYPE_SH7721) | 202 | #elif defined(CONFIG_CPU_SUBTYPE_SH7720) || defined(CONFIG_CPU_SUBTYPE_SH7721) |
203 | static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) | 203 | static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) |
204 | { | 204 | { |
205 | unsigned short data; | 205 | unsigned short data; |
206 | 206 | ||
207 | if (cflag & CRTSCTS) { | 207 | if (cflag & CRTSCTS) { |
208 | /* enable RTS/CTS */ | 208 | /* enable RTS/CTS */ |
209 | if (port->mapbase == 0xa4430000) { /* SCIF0 */ | 209 | if (port->mapbase == 0xa4430000) { /* SCIF0 */ |
210 | /* Clear PTCR bit 9-2; enable all scif pins but sck */ | 210 | /* Clear PTCR bit 9-2; enable all scif pins but sck */ |
211 | data = __raw_readw(PORT_PTCR); | 211 | data = __raw_readw(PORT_PTCR); |
212 | __raw_writew((data & 0xfc03), PORT_PTCR); | 212 | __raw_writew((data & 0xfc03), PORT_PTCR); |
213 | } else if (port->mapbase == 0xa4438000) { /* SCIF1 */ | 213 | } else if (port->mapbase == 0xa4438000) { /* SCIF1 */ |
214 | /* Clear PVCR bit 9-2 */ | 214 | /* Clear PVCR bit 9-2 */ |
215 | data = __raw_readw(PORT_PVCR); | 215 | data = __raw_readw(PORT_PVCR); |
216 | __raw_writew((data & 0xfc03), PORT_PVCR); | 216 | __raw_writew((data & 0xfc03), PORT_PVCR); |
217 | } | 217 | } |
218 | } else { | 218 | } else { |
219 | if (port->mapbase == 0xa4430000) { /* SCIF0 */ | 219 | if (port->mapbase == 0xa4430000) { /* SCIF0 */ |
220 | /* Clear PTCR bit 5-2; enable only tx and rx */ | 220 | /* Clear PTCR bit 5-2; enable only tx and rx */ |
221 | data = __raw_readw(PORT_PTCR); | 221 | data = __raw_readw(PORT_PTCR); |
222 | __raw_writew((data & 0xffc3), PORT_PTCR); | 222 | __raw_writew((data & 0xffc3), PORT_PTCR); |
223 | } else if (port->mapbase == 0xa4438000) { /* SCIF1 */ | 223 | } else if (port->mapbase == 0xa4438000) { /* SCIF1 */ |
224 | /* Clear PVCR bit 5-2 */ | 224 | /* Clear PVCR bit 5-2 */ |
225 | data = __raw_readw(PORT_PVCR); | 225 | data = __raw_readw(PORT_PVCR); |
226 | __raw_writew((data & 0xffc3), PORT_PVCR); | 226 | __raw_writew((data & 0xffc3), PORT_PVCR); |
227 | } | 227 | } |
228 | } | 228 | } |
229 | } | 229 | } |
230 | #elif defined(CONFIG_CPU_SH3) | 230 | #elif defined(CONFIG_CPU_SH3) |
231 | /* For SH7705, SH7706, SH7707, SH7709, SH7709A, SH7729 */ | 231 | /* For SH7705, SH7706, SH7707, SH7709, SH7709A, SH7729 */ |
232 | static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) | 232 | static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) |
233 | { | 233 | { |
234 | unsigned short data; | 234 | unsigned short data; |
235 | 235 | ||
236 | /* We need to set SCPCR to enable RTS/CTS */ | 236 | /* We need to set SCPCR to enable RTS/CTS */ |
237 | data = __raw_readw(SCPCR); | 237 | data = __raw_readw(SCPCR); |
238 | /* Clear out SCP7MD1,0, SCP6MD1,0, SCP4MD1,0*/ | 238 | /* Clear out SCP7MD1,0, SCP6MD1,0, SCP4MD1,0*/ |
239 | __raw_writew(data & 0x0fcf, SCPCR); | 239 | __raw_writew(data & 0x0fcf, SCPCR); |
240 | 240 | ||
241 | if (!(cflag & CRTSCTS)) { | 241 | if (!(cflag & CRTSCTS)) { |
242 | /* We need to set SCPCR to enable RTS/CTS */ | 242 | /* We need to set SCPCR to enable RTS/CTS */ |
243 | data = __raw_readw(SCPCR); | 243 | data = __raw_readw(SCPCR); |
244 | /* Clear out SCP7MD1,0, SCP4MD1,0, | 244 | /* Clear out SCP7MD1,0, SCP4MD1,0, |
245 | Set SCP6MD1,0 = {01} (output) */ | 245 | Set SCP6MD1,0 = {01} (output) */ |
246 | __raw_writew((data & 0x0fcf) | 0x1000, SCPCR); | 246 | __raw_writew((data & 0x0fcf) | 0x1000, SCPCR); |
247 | 247 | ||
248 | data = __raw_readb(SCPDR); | 248 | data = __raw_readb(SCPDR); |
249 | /* Set /RTS2 (bit6) = 0 */ | 249 | /* Set /RTS2 (bit6) = 0 */ |
250 | __raw_writeb(data & 0xbf, SCPDR); | 250 | __raw_writeb(data & 0xbf, SCPDR); |
251 | } | 251 | } |
252 | } | 252 | } |
253 | #elif defined(CONFIG_CPU_SUBTYPE_SH7722) | 253 | #elif defined(CONFIG_CPU_SUBTYPE_SH7722) |
254 | static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) | 254 | static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) |
255 | { | 255 | { |
256 | unsigned short data; | 256 | unsigned short data; |
257 | 257 | ||
258 | if (port->mapbase == 0xffe00000) { | 258 | if (port->mapbase == 0xffe00000) { |
259 | data = __raw_readw(PSCR); | 259 | data = __raw_readw(PSCR); |
260 | data &= ~0x03cf; | 260 | data &= ~0x03cf; |
261 | if (!(cflag & CRTSCTS)) | 261 | if (!(cflag & CRTSCTS)) |
262 | data |= 0x0340; | 262 | data |= 0x0340; |
263 | 263 | ||
264 | __raw_writew(data, PSCR); | 264 | __raw_writew(data, PSCR); |
265 | } | 265 | } |
266 | } | 266 | } |
267 | #elif defined(CONFIG_CPU_SUBTYPE_SH7757) || \ | 267 | #elif defined(CONFIG_CPU_SUBTYPE_SH7757) || \ |
268 | defined(CONFIG_CPU_SUBTYPE_SH7763) || \ | 268 | defined(CONFIG_CPU_SUBTYPE_SH7763) || \ |
269 | defined(CONFIG_CPU_SUBTYPE_SH7780) || \ | 269 | defined(CONFIG_CPU_SUBTYPE_SH7780) || \ |
270 | defined(CONFIG_CPU_SUBTYPE_SH7785) || \ | 270 | defined(CONFIG_CPU_SUBTYPE_SH7785) || \ |
271 | defined(CONFIG_CPU_SUBTYPE_SH7786) || \ | 271 | defined(CONFIG_CPU_SUBTYPE_SH7786) || \ |
272 | defined(CONFIG_CPU_SUBTYPE_SHX3) | 272 | defined(CONFIG_CPU_SUBTYPE_SHX3) |
273 | static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) | 273 | static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) |
274 | { | 274 | { |
275 | if (!(cflag & CRTSCTS)) | 275 | if (!(cflag & CRTSCTS)) |
276 | __raw_writew(0x0080, SCSPTR0); /* Set RTS = 1 */ | 276 | __raw_writew(0x0080, SCSPTR0); /* Set RTS = 1 */ |
277 | } | 277 | } |
278 | #elif defined(CONFIG_CPU_SH4) && !defined(CONFIG_CPU_SH4A) | 278 | #elif defined(CONFIG_CPU_SH4) && !defined(CONFIG_CPU_SH4A) |
279 | static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) | 279 | static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) |
280 | { | 280 | { |
281 | if (!(cflag & CRTSCTS)) | 281 | if (!(cflag & CRTSCTS)) |
282 | __raw_writew(0x0080, SCSPTR2); /* Set RTS = 1 */ | 282 | __raw_writew(0x0080, SCSPTR2); /* Set RTS = 1 */ |
283 | } | 283 | } |
284 | #else | 284 | #else |
285 | static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) | 285 | static inline void sci_init_pins(struct uart_port *port, unsigned int cflag) |
286 | { | 286 | { |
287 | /* Nothing to do */ | 287 | /* Nothing to do */ |
288 | } | 288 | } |
289 | #endif | 289 | #endif |
290 | 290 | ||
291 | #if defined(CONFIG_CPU_SUBTYPE_SH7760) || \ | 291 | #if defined(CONFIG_CPU_SUBTYPE_SH7760) || \ |
292 | defined(CONFIG_CPU_SUBTYPE_SH7780) || \ | 292 | defined(CONFIG_CPU_SUBTYPE_SH7780) || \ |
293 | defined(CONFIG_CPU_SUBTYPE_SH7785) || \ | 293 | defined(CONFIG_CPU_SUBTYPE_SH7785) || \ |
294 | defined(CONFIG_CPU_SUBTYPE_SH7786) | 294 | defined(CONFIG_CPU_SUBTYPE_SH7786) |
295 | static int scif_txfill(struct uart_port *port) | 295 | static int scif_txfill(struct uart_port *port) |
296 | { | 296 | { |
297 | return sci_in(port, SCTFDR) & 0xff; | 297 | return sci_in(port, SCTFDR) & 0xff; |
298 | } | 298 | } |
299 | 299 | ||
300 | static int scif_txroom(struct uart_port *port) | 300 | static int scif_txroom(struct uart_port *port) |
301 | { | 301 | { |
302 | return SCIF_TXROOM_MAX - scif_txfill(port); | 302 | return SCIF_TXROOM_MAX - scif_txfill(port); |
303 | } | 303 | } |
304 | 304 | ||
305 | static int scif_rxfill(struct uart_port *port) | 305 | static int scif_rxfill(struct uart_port *port) |
306 | { | 306 | { |
307 | return sci_in(port, SCRFDR) & 0xff; | 307 | return sci_in(port, SCRFDR) & 0xff; |
308 | } | 308 | } |
309 | #elif defined(CONFIG_CPU_SUBTYPE_SH7763) | 309 | #elif defined(CONFIG_CPU_SUBTYPE_SH7763) |
310 | static int scif_txfill(struct uart_port *port) | 310 | static int scif_txfill(struct uart_port *port) |
311 | { | 311 | { |
312 | if (port->mapbase == 0xffe00000 || | 312 | if (port->mapbase == 0xffe00000 || |
313 | port->mapbase == 0xffe08000) | 313 | port->mapbase == 0xffe08000) |
314 | /* SCIF0/1*/ | 314 | /* SCIF0/1*/ |
315 | return sci_in(port, SCTFDR) & 0xff; | 315 | return sci_in(port, SCTFDR) & 0xff; |
316 | else | 316 | else |
317 | /* SCIF2 */ | 317 | /* SCIF2 */ |
318 | return sci_in(port, SCFDR) >> 8; | 318 | return sci_in(port, SCFDR) >> 8; |
319 | } | 319 | } |
320 | 320 | ||
321 | static int scif_txroom(struct uart_port *port) | 321 | static int scif_txroom(struct uart_port *port) |
322 | { | 322 | { |
323 | if (port->mapbase == 0xffe00000 || | 323 | if (port->mapbase == 0xffe00000 || |
324 | port->mapbase == 0xffe08000) | 324 | port->mapbase == 0xffe08000) |
325 | /* SCIF0/1*/ | 325 | /* SCIF0/1*/ |
326 | return SCIF_TXROOM_MAX - scif_txfill(port); | 326 | return SCIF_TXROOM_MAX - scif_txfill(port); |
327 | else | 327 | else |
328 | /* SCIF2 */ | 328 | /* SCIF2 */ |
329 | return SCIF2_TXROOM_MAX - scif_txfill(port); | 329 | return SCIF2_TXROOM_MAX - scif_txfill(port); |
330 | } | 330 | } |
331 | 331 | ||
332 | static int scif_rxfill(struct uart_port *port) | 332 | static int scif_rxfill(struct uart_port *port) |
333 | { | 333 | { |
334 | if ((port->mapbase == 0xffe00000) || | 334 | if ((port->mapbase == 0xffe00000) || |
335 | (port->mapbase == 0xffe08000)) { | 335 | (port->mapbase == 0xffe08000)) { |
336 | /* SCIF0/1*/ | 336 | /* SCIF0/1*/ |
337 | return sci_in(port, SCRFDR) & 0xff; | 337 | return sci_in(port, SCRFDR) & 0xff; |
338 | } else { | 338 | } else { |
339 | /* SCIF2 */ | 339 | /* SCIF2 */ |
340 | return sci_in(port, SCFDR) & SCIF2_RFDC_MASK; | 340 | return sci_in(port, SCFDR) & SCIF2_RFDC_MASK; |
341 | } | 341 | } |
342 | } | 342 | } |
343 | #else | 343 | #else |
344 | static int scif_txfill(struct uart_port *port) | 344 | static int scif_txfill(struct uart_port *port) |
345 | { | 345 | { |
346 | return sci_in(port, SCFDR) >> 8; | 346 | return sci_in(port, SCFDR) >> 8; |
347 | } | 347 | } |
348 | 348 | ||
349 | static int scif_txroom(struct uart_port *port) | 349 | static int scif_txroom(struct uart_port *port) |
350 | { | 350 | { |
351 | return SCIF_TXROOM_MAX - scif_txfill(port); | 351 | return SCIF_TXROOM_MAX - scif_txfill(port); |
352 | } | 352 | } |
353 | 353 | ||
354 | static int scif_rxfill(struct uart_port *port) | 354 | static int scif_rxfill(struct uart_port *port) |
355 | { | 355 | { |
356 | return sci_in(port, SCFDR) & SCIF_RFDC_MASK; | 356 | return sci_in(port, SCFDR) & SCIF_RFDC_MASK; |
357 | } | 357 | } |
358 | #endif | 358 | #endif |
359 | 359 | ||
360 | static int sci_txfill(struct uart_port *port) | 360 | static int sci_txfill(struct uart_port *port) |
361 | { | 361 | { |
362 | return !(sci_in(port, SCxSR) & SCI_TDRE); | 362 | return !(sci_in(port, SCxSR) & SCI_TDRE); |
363 | } | 363 | } |
364 | 364 | ||
365 | static int sci_txroom(struct uart_port *port) | 365 | static int sci_txroom(struct uart_port *port) |
366 | { | 366 | { |
367 | return !sci_txfill(port); | 367 | return !sci_txfill(port); |
368 | } | 368 | } |
369 | 369 | ||
370 | static int sci_rxfill(struct uart_port *port) | 370 | static int sci_rxfill(struct uart_port *port) |
371 | { | 371 | { |
372 | return (sci_in(port, SCxSR) & SCxSR_RDxF(port)) != 0; | 372 | return (sci_in(port, SCxSR) & SCxSR_RDxF(port)) != 0; |
373 | } | 373 | } |
374 | 374 | ||
375 | /* ********************************************************************** * | 375 | /* ********************************************************************** * |
376 | * the interrupt related routines * | 376 | * the interrupt related routines * |
377 | * ********************************************************************** */ | 377 | * ********************************************************************** */ |
378 | 378 | ||
379 | static void sci_transmit_chars(struct uart_port *port) | 379 | static void sci_transmit_chars(struct uart_port *port) |
380 | { | 380 | { |
381 | struct circ_buf *xmit = &port->state->xmit; | 381 | struct circ_buf *xmit = &port->state->xmit; |
382 | unsigned int stopped = uart_tx_stopped(port); | 382 | unsigned int stopped = uart_tx_stopped(port); |
383 | unsigned short status; | 383 | unsigned short status; |
384 | unsigned short ctrl; | 384 | unsigned short ctrl; |
385 | int count; | 385 | int count; |
386 | 386 | ||
387 | status = sci_in(port, SCxSR); | 387 | status = sci_in(port, SCxSR); |
388 | if (!(status & SCxSR_TDxE(port))) { | 388 | if (!(status & SCxSR_TDxE(port))) { |
389 | ctrl = sci_in(port, SCSCR); | 389 | ctrl = sci_in(port, SCSCR); |
390 | if (uart_circ_empty(xmit)) | 390 | if (uart_circ_empty(xmit)) |
391 | ctrl &= ~SCI_CTRL_FLAGS_TIE; | 391 | ctrl &= ~SCI_CTRL_FLAGS_TIE; |
392 | else | 392 | else |
393 | ctrl |= SCI_CTRL_FLAGS_TIE; | 393 | ctrl |= SCI_CTRL_FLAGS_TIE; |
394 | sci_out(port, SCSCR, ctrl); | 394 | sci_out(port, SCSCR, ctrl); |
395 | return; | 395 | return; |
396 | } | 396 | } |
397 | 397 | ||
398 | if (port->type == PORT_SCI) | 398 | if (port->type == PORT_SCI) |
399 | count = sci_txroom(port); | 399 | count = sci_txroom(port); |
400 | else | 400 | else |
401 | count = scif_txroom(port); | 401 | count = scif_txroom(port); |
402 | 402 | ||
403 | do { | 403 | do { |
404 | unsigned char c; | 404 | unsigned char c; |
405 | 405 | ||
406 | if (port->x_char) { | 406 | if (port->x_char) { |
407 | c = port->x_char; | 407 | c = port->x_char; |
408 | port->x_char = 0; | 408 | port->x_char = 0; |
409 | } else if (!uart_circ_empty(xmit) && !stopped) { | 409 | } else if (!uart_circ_empty(xmit) && !stopped) { |
410 | c = xmit->buf[xmit->tail]; | 410 | c = xmit->buf[xmit->tail]; |
411 | xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); | 411 | xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); |
412 | } else { | 412 | } else { |
413 | break; | 413 | break; |
414 | } | 414 | } |
415 | 415 | ||
416 | sci_out(port, SCxTDR, c); | 416 | sci_out(port, SCxTDR, c); |
417 | 417 | ||
418 | port->icount.tx++; | 418 | port->icount.tx++; |
419 | } while (--count > 0); | 419 | } while (--count > 0); |
420 | 420 | ||
421 | sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port)); | 421 | sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port)); |
422 | 422 | ||
423 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) | 423 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) |
424 | uart_write_wakeup(port); | 424 | uart_write_wakeup(port); |
425 | if (uart_circ_empty(xmit)) { | 425 | if (uart_circ_empty(xmit)) { |
426 | sci_stop_tx(port); | 426 | sci_stop_tx(port); |
427 | } else { | 427 | } else { |
428 | ctrl = sci_in(port, SCSCR); | 428 | ctrl = sci_in(port, SCSCR); |
429 | 429 | ||
430 | if (port->type != PORT_SCI) { | 430 | if (port->type != PORT_SCI) { |
431 | sci_in(port, SCxSR); /* Dummy read */ | 431 | sci_in(port, SCxSR); /* Dummy read */ |
432 | sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port)); | 432 | sci_out(port, SCxSR, SCxSR_TDxE_CLEAR(port)); |
433 | } | 433 | } |
434 | 434 | ||
435 | ctrl |= SCI_CTRL_FLAGS_TIE; | 435 | ctrl |= SCI_CTRL_FLAGS_TIE; |
436 | sci_out(port, SCSCR, ctrl); | 436 | sci_out(port, SCSCR, ctrl); |
437 | } | 437 | } |
438 | } | 438 | } |
439 | 439 | ||
440 | /* On SH3, SCIF may read end-of-break as a space->mark char */ | 440 | /* On SH3, SCIF may read end-of-break as a space->mark char */ |
441 | #define STEPFN(c) ({int __c = (c); (((__c-1)|(__c)) == -1); }) | 441 | #define STEPFN(c) ({int __c = (c); (((__c-1)|(__c)) == -1); }) |
442 | 442 | ||
443 | static inline void sci_receive_chars(struct uart_port *port) | 443 | static inline void sci_receive_chars(struct uart_port *port) |
444 | { | 444 | { |
445 | struct sci_port *sci_port = to_sci_port(port); | 445 | struct sci_port *sci_port = to_sci_port(port); |
446 | struct tty_struct *tty = port->state->port.tty; | 446 | struct tty_struct *tty = port->state->port.tty; |
447 | int i, count, copied = 0; | 447 | int i, count, copied = 0; |
448 | unsigned short status; | 448 | unsigned short status; |
449 | unsigned char flag; | 449 | unsigned char flag; |
450 | 450 | ||
451 | status = sci_in(port, SCxSR); | 451 | status = sci_in(port, SCxSR); |
452 | if (!(status & SCxSR_RDxF(port))) | 452 | if (!(status & SCxSR_RDxF(port))) |
453 | return; | 453 | return; |
454 | 454 | ||
455 | while (1) { | 455 | while (1) { |
456 | if (port->type == PORT_SCI) | 456 | if (port->type == PORT_SCI) |
457 | count = sci_rxfill(port); | 457 | count = sci_rxfill(port); |
458 | else | 458 | else |
459 | count = scif_rxfill(port); | 459 | count = scif_rxfill(port); |
460 | 460 | ||
461 | /* Don't copy more bytes than there is room for in the buffer */ | 461 | /* Don't copy more bytes than there is room for in the buffer */ |
462 | count = tty_buffer_request_room(tty, count); | 462 | count = tty_buffer_request_room(tty, count); |
463 | 463 | ||
464 | /* If for any reason we can't copy more data, we're done! */ | 464 | /* If for any reason we can't copy more data, we're done! */ |
465 | if (count == 0) | 465 | if (count == 0) |
466 | break; | 466 | break; |
467 | 467 | ||
468 | if (port->type == PORT_SCI) { | 468 | if (port->type == PORT_SCI) { |
469 | char c = sci_in(port, SCxRDR); | 469 | char c = sci_in(port, SCxRDR); |
470 | if (uart_handle_sysrq_char(port, c) || | 470 | if (uart_handle_sysrq_char(port, c) || |
471 | sci_port->break_flag) | 471 | sci_port->break_flag) |
472 | count = 0; | 472 | count = 0; |
473 | else | 473 | else |
474 | tty_insert_flip_char(tty, c, TTY_NORMAL); | 474 | tty_insert_flip_char(tty, c, TTY_NORMAL); |
475 | } else { | 475 | } else { |
476 | for (i = 0; i < count; i++) { | 476 | for (i = 0; i < count; i++) { |
477 | char c = sci_in(port, SCxRDR); | 477 | char c = sci_in(port, SCxRDR); |
478 | status = sci_in(port, SCxSR); | 478 | status = sci_in(port, SCxSR); |
479 | #if defined(CONFIG_CPU_SH3) | 479 | #if defined(CONFIG_CPU_SH3) |
480 | /* Skip "chars" during break */ | 480 | /* Skip "chars" during break */ |
481 | if (sci_port->break_flag) { | 481 | if (sci_port->break_flag) { |
482 | if ((c == 0) && | 482 | if ((c == 0) && |
483 | (status & SCxSR_FER(port))) { | 483 | (status & SCxSR_FER(port))) { |
484 | count--; i--; | 484 | count--; i--; |
485 | continue; | 485 | continue; |
486 | } | 486 | } |
487 | 487 | ||
488 | /* Nonzero => end-of-break */ | 488 | /* Nonzero => end-of-break */ |
489 | dev_dbg(port->dev, "debounce<%02x>\n", c); | 489 | dev_dbg(port->dev, "debounce<%02x>\n", c); |
490 | sci_port->break_flag = 0; | 490 | sci_port->break_flag = 0; |
491 | 491 | ||
492 | if (STEPFN(c)) { | 492 | if (STEPFN(c)) { |
493 | count--; i--; | 493 | count--; i--; |
494 | continue; | 494 | continue; |
495 | } | 495 | } |
496 | } | 496 | } |
497 | #endif /* CONFIG_CPU_SH3 */ | 497 | #endif /* CONFIG_CPU_SH3 */ |
498 | if (uart_handle_sysrq_char(port, c)) { | 498 | if (uart_handle_sysrq_char(port, c)) { |
499 | count--; i--; | 499 | count--; i--; |
500 | continue; | 500 | continue; |
501 | } | 501 | } |
502 | 502 | ||
503 | /* Store data and status */ | 503 | /* Store data and status */ |
504 | if (status & SCxSR_FER(port)) { | 504 | if (status & SCxSR_FER(port)) { |
505 | flag = TTY_FRAME; | 505 | flag = TTY_FRAME; |
506 | dev_notice(port->dev, "frame error\n"); | 506 | dev_notice(port->dev, "frame error\n"); |
507 | } else if (status & SCxSR_PER(port)) { | 507 | } else if (status & SCxSR_PER(port)) { |
508 | flag = TTY_PARITY; | 508 | flag = TTY_PARITY; |
509 | dev_notice(port->dev, "parity error\n"); | 509 | dev_notice(port->dev, "parity error\n"); |
510 | } else | 510 | } else |
511 | flag = TTY_NORMAL; | 511 | flag = TTY_NORMAL; |
512 | 512 | ||
513 | tty_insert_flip_char(tty, c, flag); | 513 | tty_insert_flip_char(tty, c, flag); |
514 | } | 514 | } |
515 | } | 515 | } |
516 | 516 | ||
517 | sci_in(port, SCxSR); /* dummy read */ | 517 | sci_in(port, SCxSR); /* dummy read */ |
518 | sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port)); | 518 | sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port)); |
519 | 519 | ||
520 | copied += count; | 520 | copied += count; |
521 | port->icount.rx += count; | 521 | port->icount.rx += count; |
522 | } | 522 | } |
523 | 523 | ||
524 | if (copied) { | 524 | if (copied) { |
525 | /* Tell the rest of the system the news. New characters! */ | 525 | /* Tell the rest of the system the news. New characters! */ |
526 | tty_flip_buffer_push(tty); | 526 | tty_flip_buffer_push(tty); |
527 | } else { | 527 | } else { |
528 | sci_in(port, SCxSR); /* dummy read */ | 528 | sci_in(port, SCxSR); /* dummy read */ |
529 | sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port)); | 529 | sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port)); |
530 | } | 530 | } |
531 | } | 531 | } |
532 | 532 | ||
533 | #define SCI_BREAK_JIFFIES (HZ/20) | 533 | #define SCI_BREAK_JIFFIES (HZ/20) |
534 | /* The sci generates interrupts during the break, | 534 | /* The sci generates interrupts during the break, |
535 | * 1 per millisecond or so during the break period, for 9600 baud. | 535 | * 1 per millisecond or so during the break period, for 9600 baud. |
536 | * So dont bother disabling interrupts. | 536 | * So dont bother disabling interrupts. |
537 | * But dont want more than 1 break event. | 537 | * But dont want more than 1 break event. |
538 | * Use a kernel timer to periodically poll the rx line until | 538 | * Use a kernel timer to periodically poll the rx line until |
539 | * the break is finished. | 539 | * the break is finished. |
540 | */ | 540 | */ |
541 | static void sci_schedule_break_timer(struct sci_port *port) | 541 | static void sci_schedule_break_timer(struct sci_port *port) |
542 | { | 542 | { |
543 | port->break_timer.expires = jiffies + SCI_BREAK_JIFFIES; | 543 | port->break_timer.expires = jiffies + SCI_BREAK_JIFFIES; |
544 | add_timer(&port->break_timer); | 544 | add_timer(&port->break_timer); |
545 | } | 545 | } |
546 | /* Ensure that two consecutive samples find the break over. */ | 546 | /* Ensure that two consecutive samples find the break over. */ |
547 | static void sci_break_timer(unsigned long data) | 547 | static void sci_break_timer(unsigned long data) |
548 | { | 548 | { |
549 | struct sci_port *port = (struct sci_port *)data; | 549 | struct sci_port *port = (struct sci_port *)data; |
550 | 550 | ||
551 | if (sci_rxd_in(&port->port) == 0) { | 551 | if (sci_rxd_in(&port->port) == 0) { |
552 | port->break_flag = 1; | 552 | port->break_flag = 1; |
553 | sci_schedule_break_timer(port); | 553 | sci_schedule_break_timer(port); |
554 | } else if (port->break_flag == 1) { | 554 | } else if (port->break_flag == 1) { |
555 | /* break is over. */ | 555 | /* break is over. */ |
556 | port->break_flag = 2; | 556 | port->break_flag = 2; |
557 | sci_schedule_break_timer(port); | 557 | sci_schedule_break_timer(port); |
558 | } else | 558 | } else |
559 | port->break_flag = 0; | 559 | port->break_flag = 0; |
560 | } | 560 | } |
561 | 561 | ||
562 | static inline int sci_handle_errors(struct uart_port *port) | 562 | static inline int sci_handle_errors(struct uart_port *port) |
563 | { | 563 | { |
564 | int copied = 0; | 564 | int copied = 0; |
565 | unsigned short status = sci_in(port, SCxSR); | 565 | unsigned short status = sci_in(port, SCxSR); |
566 | struct tty_struct *tty = port->state->port.tty; | 566 | struct tty_struct *tty = port->state->port.tty; |
567 | 567 | ||
568 | if (status & SCxSR_ORER(port)) { | 568 | if (status & SCxSR_ORER(port)) { |
569 | /* overrun error */ | 569 | /* overrun error */ |
570 | if (tty_insert_flip_char(tty, 0, TTY_OVERRUN)) | 570 | if (tty_insert_flip_char(tty, 0, TTY_OVERRUN)) |
571 | copied++; | 571 | copied++; |
572 | 572 | ||
573 | dev_notice(port->dev, "overrun error"); | 573 | dev_notice(port->dev, "overrun error"); |
574 | } | 574 | } |
575 | 575 | ||
576 | if (status & SCxSR_FER(port)) { | 576 | if (status & SCxSR_FER(port)) { |
577 | if (sci_rxd_in(port) == 0) { | 577 | if (sci_rxd_in(port) == 0) { |
578 | /* Notify of BREAK */ | 578 | /* Notify of BREAK */ |
579 | struct sci_port *sci_port = to_sci_port(port); | 579 | struct sci_port *sci_port = to_sci_port(port); |
580 | 580 | ||
581 | if (!sci_port->break_flag) { | 581 | if (!sci_port->break_flag) { |
582 | sci_port->break_flag = 1; | 582 | sci_port->break_flag = 1; |
583 | sci_schedule_break_timer(sci_port); | 583 | sci_schedule_break_timer(sci_port); |
584 | 584 | ||
585 | /* Do sysrq handling. */ | 585 | /* Do sysrq handling. */ |
586 | if (uart_handle_break(port)) | 586 | if (uart_handle_break(port)) |
587 | return 0; | 587 | return 0; |
588 | 588 | ||
589 | dev_dbg(port->dev, "BREAK detected\n"); | 589 | dev_dbg(port->dev, "BREAK detected\n"); |
590 | 590 | ||
591 | if (tty_insert_flip_char(tty, 0, TTY_BREAK)) | 591 | if (tty_insert_flip_char(tty, 0, TTY_BREAK)) |
592 | copied++; | 592 | copied++; |
593 | } | 593 | } |
594 | 594 | ||
595 | } else { | 595 | } else { |
596 | /* frame error */ | 596 | /* frame error */ |
597 | if (tty_insert_flip_char(tty, 0, TTY_FRAME)) | 597 | if (tty_insert_flip_char(tty, 0, TTY_FRAME)) |
598 | copied++; | 598 | copied++; |
599 | 599 | ||
600 | dev_notice(port->dev, "frame error\n"); | 600 | dev_notice(port->dev, "frame error\n"); |
601 | } | 601 | } |
602 | } | 602 | } |
603 | 603 | ||
604 | if (status & SCxSR_PER(port)) { | 604 | if (status & SCxSR_PER(port)) { |
605 | /* parity error */ | 605 | /* parity error */ |
606 | if (tty_insert_flip_char(tty, 0, TTY_PARITY)) | 606 | if (tty_insert_flip_char(tty, 0, TTY_PARITY)) |
607 | copied++; | 607 | copied++; |
608 | 608 | ||
609 | dev_notice(port->dev, "parity error"); | 609 | dev_notice(port->dev, "parity error"); |
610 | } | 610 | } |
611 | 611 | ||
612 | if (copied) | 612 | if (copied) |
613 | tty_flip_buffer_push(tty); | 613 | tty_flip_buffer_push(tty); |
614 | 614 | ||
615 | return copied; | 615 | return copied; |
616 | } | 616 | } |
617 | 617 | ||
618 | static inline int sci_handle_fifo_overrun(struct uart_port *port) | 618 | static inline int sci_handle_fifo_overrun(struct uart_port *port) |
619 | { | 619 | { |
620 | struct tty_struct *tty = port->state->port.tty; | 620 | struct tty_struct *tty = port->state->port.tty; |
621 | int copied = 0; | 621 | int copied = 0; |
622 | 622 | ||
623 | if (port->type != PORT_SCIF) | 623 | if (port->type != PORT_SCIF) |
624 | return 0; | 624 | return 0; |
625 | 625 | ||
626 | if ((sci_in(port, SCLSR) & SCIF_ORER) != 0) { | 626 | if ((sci_in(port, SCLSR) & SCIF_ORER) != 0) { |
627 | sci_out(port, SCLSR, 0); | 627 | sci_out(port, SCLSR, 0); |
628 | 628 | ||
629 | tty_insert_flip_char(tty, 0, TTY_OVERRUN); | 629 | tty_insert_flip_char(tty, 0, TTY_OVERRUN); |
630 | tty_flip_buffer_push(tty); | 630 | tty_flip_buffer_push(tty); |
631 | 631 | ||
632 | dev_notice(port->dev, "overrun error\n"); | 632 | dev_notice(port->dev, "overrun error\n"); |
633 | copied++; | 633 | copied++; |
634 | } | 634 | } |
635 | 635 | ||
636 | return copied; | 636 | return copied; |
637 | } | 637 | } |
638 | 638 | ||
639 | static inline int sci_handle_breaks(struct uart_port *port) | 639 | static inline int sci_handle_breaks(struct uart_port *port) |
640 | { | 640 | { |
641 | int copied = 0; | 641 | int copied = 0; |
642 | unsigned short status = sci_in(port, SCxSR); | 642 | unsigned short status = sci_in(port, SCxSR); |
643 | struct tty_struct *tty = port->state->port.tty; | 643 | struct tty_struct *tty = port->state->port.tty; |
644 | struct sci_port *s = to_sci_port(port); | 644 | struct sci_port *s = to_sci_port(port); |
645 | 645 | ||
646 | if (uart_handle_break(port)) | 646 | if (uart_handle_break(port)) |
647 | return 0; | 647 | return 0; |
648 | 648 | ||
649 | if (!s->break_flag && status & SCxSR_BRK(port)) { | 649 | if (!s->break_flag && status & SCxSR_BRK(port)) { |
650 | #if defined(CONFIG_CPU_SH3) | 650 | #if defined(CONFIG_CPU_SH3) |
651 | /* Debounce break */ | 651 | /* Debounce break */ |
652 | s->break_flag = 1; | 652 | s->break_flag = 1; |
653 | #endif | 653 | #endif |
654 | /* Notify of BREAK */ | 654 | /* Notify of BREAK */ |
655 | if (tty_insert_flip_char(tty, 0, TTY_BREAK)) | 655 | if (tty_insert_flip_char(tty, 0, TTY_BREAK)) |
656 | copied++; | 656 | copied++; |
657 | 657 | ||
658 | dev_dbg(port->dev, "BREAK detected\n"); | 658 | dev_dbg(port->dev, "BREAK detected\n"); |
659 | } | 659 | } |
660 | 660 | ||
661 | if (copied) | 661 | if (copied) |
662 | tty_flip_buffer_push(tty); | 662 | tty_flip_buffer_push(tty); |
663 | 663 | ||
664 | copied += sci_handle_fifo_overrun(port); | 664 | copied += sci_handle_fifo_overrun(port); |
665 | 665 | ||
666 | return copied; | 666 | return copied; |
667 | } | 667 | } |
668 | 668 | ||
669 | static irqreturn_t sci_rx_interrupt(int irq, void *ptr) | 669 | static irqreturn_t sci_rx_interrupt(int irq, void *ptr) |
670 | { | 670 | { |
671 | #ifdef CONFIG_SERIAL_SH_SCI_DMA | 671 | #ifdef CONFIG_SERIAL_SH_SCI_DMA |
672 | struct uart_port *port = ptr; | 672 | struct uart_port *port = ptr; |
673 | struct sci_port *s = to_sci_port(port); | 673 | struct sci_port *s = to_sci_port(port); |
674 | 674 | ||
675 | if (s->chan_rx) { | 675 | if (s->chan_rx) { |
676 | unsigned long tout; | 676 | unsigned long tout; |
677 | u16 scr = sci_in(port, SCSCR); | 677 | u16 scr = sci_in(port, SCSCR); |
678 | u16 ssr = sci_in(port, SCxSR); | 678 | u16 ssr = sci_in(port, SCxSR); |
679 | 679 | ||
680 | /* Disable future Rx interrupts */ | 680 | /* Disable future Rx interrupts */ |
681 | sci_out(port, SCSCR, scr & ~SCI_CTRL_FLAGS_RIE); | 681 | sci_out(port, SCSCR, scr & ~SCI_CTRL_FLAGS_RIE); |
682 | /* Clear current interrupt */ | 682 | /* Clear current interrupt */ |
683 | sci_out(port, SCxSR, ssr & ~(1 | SCxSR_RDxF(port))); | 683 | sci_out(port, SCxSR, ssr & ~(1 | SCxSR_RDxF(port))); |
684 | /* Calculate delay for 1.5 DMA buffers */ | 684 | /* Calculate delay for 1.5 DMA buffers */ |
685 | tout = (port->timeout - HZ / 50) * s->buf_len_rx * 3 / | 685 | tout = (port->timeout - HZ / 50) * s->buf_len_rx * 3 / |
686 | port->fifosize / 2; | 686 | port->fifosize / 2; |
687 | dev_dbg(port->dev, "Rx IRQ: setup timeout in %lu ms\n", | 687 | dev_dbg(port->dev, "Rx IRQ: setup timeout in %lu ms\n", |
688 | tout * 1000 / HZ); | 688 | tout * 1000 / HZ); |
689 | if (tout < 2) | 689 | if (tout < 2) |
690 | tout = 2; | 690 | tout = 2; |
691 | mod_timer(&s->rx_timer, jiffies + tout); | 691 | mod_timer(&s->rx_timer, jiffies + tout); |
692 | 692 | ||
693 | return IRQ_HANDLED; | 693 | return IRQ_HANDLED; |
694 | } | 694 | } |
695 | #endif | 695 | #endif |
696 | 696 | ||
697 | /* I think sci_receive_chars has to be called irrespective | 697 | /* I think sci_receive_chars has to be called irrespective |
698 | * of whether the I_IXOFF is set, otherwise, how is the interrupt | 698 | * of whether the I_IXOFF is set, otherwise, how is the interrupt |
699 | * to be disabled? | 699 | * to be disabled? |
700 | */ | 700 | */ |
701 | sci_receive_chars(ptr); | 701 | sci_receive_chars(ptr); |
702 | 702 | ||
703 | return IRQ_HANDLED; | 703 | return IRQ_HANDLED; |
704 | } | 704 | } |
705 | 705 | ||
706 | static irqreturn_t sci_tx_interrupt(int irq, void *ptr) | 706 | static irqreturn_t sci_tx_interrupt(int irq, void *ptr) |
707 | { | 707 | { |
708 | struct uart_port *port = ptr; | 708 | struct uart_port *port = ptr; |
709 | unsigned long flags; | 709 | unsigned long flags; |
710 | 710 | ||
711 | spin_lock_irqsave(&port->lock, flags); | 711 | spin_lock_irqsave(&port->lock, flags); |
712 | sci_transmit_chars(port); | 712 | sci_transmit_chars(port); |
713 | spin_unlock_irqrestore(&port->lock, flags); | 713 | spin_unlock_irqrestore(&port->lock, flags); |
714 | 714 | ||
715 | return IRQ_HANDLED; | 715 | return IRQ_HANDLED; |
716 | } | 716 | } |
717 | 717 | ||
718 | static irqreturn_t sci_er_interrupt(int irq, void *ptr) | 718 | static irqreturn_t sci_er_interrupt(int irq, void *ptr) |
719 | { | 719 | { |
720 | struct uart_port *port = ptr; | 720 | struct uart_port *port = ptr; |
721 | 721 | ||
722 | /* Handle errors */ | 722 | /* Handle errors */ |
723 | if (port->type == PORT_SCI) { | 723 | if (port->type == PORT_SCI) { |
724 | if (sci_handle_errors(port)) { | 724 | if (sci_handle_errors(port)) { |
725 | /* discard character in rx buffer */ | 725 | /* discard character in rx buffer */ |
726 | sci_in(port, SCxSR); | 726 | sci_in(port, SCxSR); |
727 | sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port)); | 727 | sci_out(port, SCxSR, SCxSR_RDxF_CLEAR(port)); |
728 | } | 728 | } |
729 | } else { | 729 | } else { |
730 | sci_handle_fifo_overrun(port); | 730 | sci_handle_fifo_overrun(port); |
731 | sci_rx_interrupt(irq, ptr); | 731 | sci_rx_interrupt(irq, ptr); |
732 | } | 732 | } |
733 | 733 | ||
734 | sci_out(port, SCxSR, SCxSR_ERROR_CLEAR(port)); | 734 | sci_out(port, SCxSR, SCxSR_ERROR_CLEAR(port)); |
735 | 735 | ||
736 | /* Kick the transmission */ | 736 | /* Kick the transmission */ |
737 | sci_tx_interrupt(irq, ptr); | 737 | sci_tx_interrupt(irq, ptr); |
738 | 738 | ||
739 | return IRQ_HANDLED; | 739 | return IRQ_HANDLED; |
740 | } | 740 | } |
741 | 741 | ||
742 | static irqreturn_t sci_br_interrupt(int irq, void *ptr) | 742 | static irqreturn_t sci_br_interrupt(int irq, void *ptr) |
743 | { | 743 | { |
744 | struct uart_port *port = ptr; | 744 | struct uart_port *port = ptr; |
745 | 745 | ||
746 | /* Handle BREAKs */ | 746 | /* Handle BREAKs */ |
747 | sci_handle_breaks(port); | 747 | sci_handle_breaks(port); |
748 | sci_out(port, SCxSR, SCxSR_BREAK_CLEAR(port)); | 748 | sci_out(port, SCxSR, SCxSR_BREAK_CLEAR(port)); |
749 | 749 | ||
750 | return IRQ_HANDLED; | 750 | return IRQ_HANDLED; |
751 | } | 751 | } |
752 | 752 | ||
753 | static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr) | 753 | static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr) |
754 | { | 754 | { |
755 | unsigned short ssr_status, scr_status, err_enabled; | 755 | unsigned short ssr_status, scr_status, err_enabled; |
756 | struct uart_port *port = ptr; | 756 | struct uart_port *port = ptr; |
757 | struct sci_port *s = to_sci_port(port); | 757 | struct sci_port *s = to_sci_port(port); |
758 | irqreturn_t ret = IRQ_NONE; | 758 | irqreturn_t ret = IRQ_NONE; |
759 | 759 | ||
760 | ssr_status = sci_in(port, SCxSR); | 760 | ssr_status = sci_in(port, SCxSR); |
761 | scr_status = sci_in(port, SCSCR); | 761 | scr_status = sci_in(port, SCSCR); |
762 | err_enabled = scr_status & (SCI_CTRL_FLAGS_REIE | SCI_CTRL_FLAGS_RIE); | 762 | err_enabled = scr_status & (SCI_CTRL_FLAGS_REIE | SCI_CTRL_FLAGS_RIE); |
763 | 763 | ||
764 | /* Tx Interrupt */ | 764 | /* Tx Interrupt */ |
765 | if ((ssr_status & SCxSR_TDxE(port)) && (scr_status & SCI_CTRL_FLAGS_TIE) && | 765 | if ((ssr_status & SCxSR_TDxE(port)) && (scr_status & SCI_CTRL_FLAGS_TIE) && |
766 | !s->chan_tx) | 766 | !s->chan_tx) |
767 | ret = sci_tx_interrupt(irq, ptr); | 767 | ret = sci_tx_interrupt(irq, ptr); |
768 | /* | 768 | /* |
769 | * Rx Interrupt: if we're using DMA, the DMA controller clears RDF / | 769 | * Rx Interrupt: if we're using DMA, the DMA controller clears RDF / |
770 | * DR flags | 770 | * DR flags |
771 | */ | 771 | */ |
772 | if (((ssr_status & SCxSR_RDxF(port)) || s->chan_rx) && | 772 | if (((ssr_status & SCxSR_RDxF(port)) || s->chan_rx) && |
773 | (scr_status & SCI_CTRL_FLAGS_RIE)) | 773 | (scr_status & SCI_CTRL_FLAGS_RIE)) |
774 | ret = sci_rx_interrupt(irq, ptr); | 774 | ret = sci_rx_interrupt(irq, ptr); |
775 | /* Error Interrupt */ | 775 | /* Error Interrupt */ |
776 | if ((ssr_status & SCxSR_ERRORS(port)) && err_enabled) | 776 | if ((ssr_status & SCxSR_ERRORS(port)) && err_enabled) |
777 | ret = sci_er_interrupt(irq, ptr); | 777 | ret = sci_er_interrupt(irq, ptr); |
778 | /* Break Interrupt */ | 778 | /* Break Interrupt */ |
779 | if ((ssr_status & SCxSR_BRK(port)) && err_enabled) | 779 | if ((ssr_status & SCxSR_BRK(port)) && err_enabled) |
780 | ret = sci_br_interrupt(irq, ptr); | 780 | ret = sci_br_interrupt(irq, ptr); |
781 | 781 | ||
782 | WARN_ONCE(ret == IRQ_NONE, | 782 | WARN_ONCE(ret == IRQ_NONE, |
783 | "%s: %d IRQ %d, status %x, control %x\n", __func__, | 783 | "%s: %d IRQ %d, status %x, control %x\n", __func__, |
784 | irq, port->line, ssr_status, scr_status); | 784 | irq, port->line, ssr_status, scr_status); |
785 | 785 | ||
786 | return ret; | 786 | return ret; |
787 | } | 787 | } |
788 | 788 | ||
789 | /* | 789 | /* |
790 | * Here we define a transistion notifier so that we can update all of our | 790 | * Here we define a transistion notifier so that we can update all of our |
791 | * ports' baud rate when the peripheral clock changes. | 791 | * ports' baud rate when the peripheral clock changes. |
792 | */ | 792 | */ |
793 | static int sci_notifier(struct notifier_block *self, | 793 | static int sci_notifier(struct notifier_block *self, |
794 | unsigned long phase, void *p) | 794 | unsigned long phase, void *p) |
795 | { | 795 | { |
796 | struct sh_sci_priv *priv = container_of(self, | 796 | struct sh_sci_priv *priv = container_of(self, |
797 | struct sh_sci_priv, clk_nb); | 797 | struct sh_sci_priv, clk_nb); |
798 | struct sci_port *sci_port; | 798 | struct sci_port *sci_port; |
799 | unsigned long flags; | 799 | unsigned long flags; |
800 | 800 | ||
801 | if ((phase == CPUFREQ_POSTCHANGE) || | 801 | if ((phase == CPUFREQ_POSTCHANGE) || |
802 | (phase == CPUFREQ_RESUMECHANGE)) { | 802 | (phase == CPUFREQ_RESUMECHANGE)) { |
803 | spin_lock_irqsave(&priv->lock, flags); | 803 | spin_lock_irqsave(&priv->lock, flags); |
804 | list_for_each_entry(sci_port, &priv->ports, node) | 804 | list_for_each_entry(sci_port, &priv->ports, node) |
805 | sci_port->port.uartclk = clk_get_rate(sci_port->dclk); | 805 | sci_port->port.uartclk = clk_get_rate(sci_port->dclk); |
806 | spin_unlock_irqrestore(&priv->lock, flags); | 806 | spin_unlock_irqrestore(&priv->lock, flags); |
807 | } | 807 | } |
808 | 808 | ||
809 | return NOTIFY_OK; | 809 | return NOTIFY_OK; |
810 | } | 810 | } |
811 | 811 | ||
812 | static void sci_clk_enable(struct uart_port *port) | 812 | static void sci_clk_enable(struct uart_port *port) |
813 | { | 813 | { |
814 | struct sci_port *sci_port = to_sci_port(port); | 814 | struct sci_port *sci_port = to_sci_port(port); |
815 | 815 | ||
816 | clk_enable(sci_port->dclk); | 816 | clk_enable(sci_port->dclk); |
817 | sci_port->port.uartclk = clk_get_rate(sci_port->dclk); | 817 | sci_port->port.uartclk = clk_get_rate(sci_port->dclk); |
818 | 818 | ||
819 | if (sci_port->iclk) | 819 | if (sci_port->iclk) |
820 | clk_enable(sci_port->iclk); | 820 | clk_enable(sci_port->iclk); |
821 | } | 821 | } |
822 | 822 | ||
823 | static void sci_clk_disable(struct uart_port *port) | 823 | static void sci_clk_disable(struct uart_port *port) |
824 | { | 824 | { |
825 | struct sci_port *sci_port = to_sci_port(port); | 825 | struct sci_port *sci_port = to_sci_port(port); |
826 | 826 | ||
827 | if (sci_port->iclk) | 827 | if (sci_port->iclk) |
828 | clk_disable(sci_port->iclk); | 828 | clk_disable(sci_port->iclk); |
829 | 829 | ||
830 | clk_disable(sci_port->dclk); | 830 | clk_disable(sci_port->dclk); |
831 | } | 831 | } |
832 | 832 | ||
833 | static int sci_request_irq(struct sci_port *port) | 833 | static int sci_request_irq(struct sci_port *port) |
834 | { | 834 | { |
835 | int i; | 835 | int i; |
836 | irqreturn_t (*handlers[4])(int irq, void *ptr) = { | 836 | irqreturn_t (*handlers[4])(int irq, void *ptr) = { |
837 | sci_er_interrupt, sci_rx_interrupt, sci_tx_interrupt, | 837 | sci_er_interrupt, sci_rx_interrupt, sci_tx_interrupt, |
838 | sci_br_interrupt, | 838 | sci_br_interrupt, |
839 | }; | 839 | }; |
840 | const char *desc[] = { "SCI Receive Error", "SCI Receive Data Full", | 840 | const char *desc[] = { "SCI Receive Error", "SCI Receive Data Full", |
841 | "SCI Transmit Data Empty", "SCI Break" }; | 841 | "SCI Transmit Data Empty", "SCI Break" }; |
842 | 842 | ||
843 | if (port->irqs[0] == port->irqs[1]) { | 843 | if (port->irqs[0] == port->irqs[1]) { |
844 | if (unlikely(!port->irqs[0])) | 844 | if (unlikely(!port->irqs[0])) |
845 | return -ENODEV; | 845 | return -ENODEV; |
846 | 846 | ||
847 | if (request_irq(port->irqs[0], sci_mpxed_interrupt, | 847 | if (request_irq(port->irqs[0], sci_mpxed_interrupt, |
848 | IRQF_DISABLED, "sci", port)) { | 848 | IRQF_DISABLED, "sci", port)) { |
849 | dev_err(port->port.dev, "Can't allocate IRQ\n"); | 849 | dev_err(port->port.dev, "Can't allocate IRQ\n"); |
850 | return -ENODEV; | 850 | return -ENODEV; |
851 | } | 851 | } |
852 | } else { | 852 | } else { |
853 | for (i = 0; i < ARRAY_SIZE(handlers); i++) { | 853 | for (i = 0; i < ARRAY_SIZE(handlers); i++) { |
854 | if (unlikely(!port->irqs[i])) | 854 | if (unlikely(!port->irqs[i])) |
855 | continue; | 855 | continue; |
856 | 856 | ||
857 | if (request_irq(port->irqs[i], handlers[i], | 857 | if (request_irq(port->irqs[i], handlers[i], |
858 | IRQF_DISABLED, desc[i], port)) { | 858 | IRQF_DISABLED, desc[i], port)) { |
859 | dev_err(port->port.dev, "Can't allocate IRQ\n"); | 859 | dev_err(port->port.dev, "Can't allocate IRQ\n"); |
860 | return -ENODEV; | 860 | return -ENODEV; |
861 | } | 861 | } |
862 | } | 862 | } |
863 | } | 863 | } |
864 | 864 | ||
865 | return 0; | 865 | return 0; |
866 | } | 866 | } |
867 | 867 | ||
868 | static void sci_free_irq(struct sci_port *port) | 868 | static void sci_free_irq(struct sci_port *port) |
869 | { | 869 | { |
870 | int i; | 870 | int i; |
871 | 871 | ||
872 | if (port->irqs[0] == port->irqs[1]) | 872 | if (port->irqs[0] == port->irqs[1]) |
873 | free_irq(port->irqs[0], port); | 873 | free_irq(port->irqs[0], port); |
874 | else { | 874 | else { |
875 | for (i = 0; i < ARRAY_SIZE(port->irqs); i++) { | 875 | for (i = 0; i < ARRAY_SIZE(port->irqs); i++) { |
876 | if (!port->irqs[i]) | 876 | if (!port->irqs[i]) |
877 | continue; | 877 | continue; |
878 | 878 | ||
879 | free_irq(port->irqs[i], port); | 879 | free_irq(port->irqs[i], port); |
880 | } | 880 | } |
881 | } | 881 | } |
882 | } | 882 | } |
883 | 883 | ||
884 | static unsigned int sci_tx_empty(struct uart_port *port) | 884 | static unsigned int sci_tx_empty(struct uart_port *port) |
885 | { | 885 | { |
886 | unsigned short status = sci_in(port, SCxSR); | 886 | unsigned short status = sci_in(port, SCxSR); |
887 | unsigned short in_tx_fifo = scif_txfill(port); | 887 | unsigned short in_tx_fifo = scif_txfill(port); |
888 | 888 | ||
889 | return (status & SCxSR_TEND(port)) && !in_tx_fifo ? TIOCSER_TEMT : 0; | 889 | return (status & SCxSR_TEND(port)) && !in_tx_fifo ? TIOCSER_TEMT : 0; |
890 | } | 890 | } |
891 | 891 | ||
892 | static void sci_set_mctrl(struct uart_port *port, unsigned int mctrl) | 892 | static void sci_set_mctrl(struct uart_port *port, unsigned int mctrl) |
893 | { | 893 | { |
894 | /* This routine is used for seting signals of: DTR, DCD, CTS/RTS */ | 894 | /* This routine is used for seting signals of: DTR, DCD, CTS/RTS */ |
895 | /* We use SCIF's hardware for CTS/RTS, so don't need any for that. */ | 895 | /* We use SCIF's hardware for CTS/RTS, so don't need any for that. */ |
896 | /* If you have signals for DTR and DCD, please implement here. */ | 896 | /* If you have signals for DTR and DCD, please implement here. */ |
897 | } | 897 | } |
898 | 898 | ||
899 | static unsigned int sci_get_mctrl(struct uart_port *port) | 899 | static unsigned int sci_get_mctrl(struct uart_port *port) |
900 | { | 900 | { |
901 | /* This routine is used for getting signals of: DTR, DCD, DSR, RI, | 901 | /* This routine is used for getting signals of: DTR, DCD, DSR, RI, |
902 | and CTS/RTS */ | 902 | and CTS/RTS */ |
903 | 903 | ||
904 | return TIOCM_DTR | TIOCM_RTS | TIOCM_DSR; | 904 | return TIOCM_DTR | TIOCM_RTS | TIOCM_DSR; |
905 | } | 905 | } |
906 | 906 | ||
907 | #ifdef CONFIG_SERIAL_SH_SCI_DMA | 907 | #ifdef CONFIG_SERIAL_SH_SCI_DMA |
908 | static void sci_dma_tx_complete(void *arg) | 908 | static void sci_dma_tx_complete(void *arg) |
909 | { | 909 | { |
910 | struct sci_port *s = arg; | 910 | struct sci_port *s = arg; |
911 | struct uart_port *port = &s->port; | 911 | struct uart_port *port = &s->port; |
912 | struct circ_buf *xmit = &port->state->xmit; | 912 | struct circ_buf *xmit = &port->state->xmit; |
913 | unsigned long flags; | 913 | unsigned long flags; |
914 | 914 | ||
915 | dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); | 915 | dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); |
916 | 916 | ||
917 | spin_lock_irqsave(&port->lock, flags); | 917 | spin_lock_irqsave(&port->lock, flags); |
918 | 918 | ||
919 | xmit->tail += s->sg_tx.length; | 919 | xmit->tail += s->sg_tx.length; |
920 | xmit->tail &= UART_XMIT_SIZE - 1; | 920 | xmit->tail &= UART_XMIT_SIZE - 1; |
921 | 921 | ||
922 | port->icount.tx += s->sg_tx.length; | 922 | port->icount.tx += s->sg_tx.length; |
923 | 923 | ||
924 | async_tx_ack(s->desc_tx); | 924 | async_tx_ack(s->desc_tx); |
925 | s->cookie_tx = -EINVAL; | 925 | s->cookie_tx = -EINVAL; |
926 | s->desc_tx = NULL; | 926 | s->desc_tx = NULL; |
927 | 927 | ||
928 | spin_unlock_irqrestore(&port->lock, flags); | 928 | spin_unlock_irqrestore(&port->lock, flags); |
929 | 929 | ||
930 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) | 930 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) |
931 | uart_write_wakeup(port); | 931 | uart_write_wakeup(port); |
932 | 932 | ||
933 | if (uart_circ_chars_pending(xmit)) | 933 | if (uart_circ_chars_pending(xmit)) |
934 | schedule_work(&s->work_tx); | 934 | schedule_work(&s->work_tx); |
935 | } | 935 | } |
936 | 936 | ||
937 | /* Locking: called with port lock held */ | 937 | /* Locking: called with port lock held */ |
938 | static int sci_dma_rx_push(struct sci_port *s, struct tty_struct *tty, | 938 | static int sci_dma_rx_push(struct sci_port *s, struct tty_struct *tty, |
939 | size_t count) | 939 | size_t count) |
940 | { | 940 | { |
941 | struct uart_port *port = &s->port; | 941 | struct uart_port *port = &s->port; |
942 | int i, active, room; | 942 | int i, active, room; |
943 | 943 | ||
944 | room = tty_buffer_request_room(tty, count); | 944 | room = tty_buffer_request_room(tty, count); |
945 | 945 | ||
946 | if (s->active_rx == s->cookie_rx[0]) { | 946 | if (s->active_rx == s->cookie_rx[0]) { |
947 | active = 0; | 947 | active = 0; |
948 | } else if (s->active_rx == s->cookie_rx[1]) { | 948 | } else if (s->active_rx == s->cookie_rx[1]) { |
949 | active = 1; | 949 | active = 1; |
950 | } else { | 950 | } else { |
951 | dev_err(port->dev, "cookie %d not found!\n", s->active_rx); | 951 | dev_err(port->dev, "cookie %d not found!\n", s->active_rx); |
952 | return 0; | 952 | return 0; |
953 | } | 953 | } |
954 | 954 | ||
955 | if (room < count) | 955 | if (room < count) |
956 | dev_warn(port->dev, "Rx overrun: dropping %u bytes\n", | 956 | dev_warn(port->dev, "Rx overrun: dropping %u bytes\n", |
957 | count - room); | 957 | count - room); |
958 | if (!room) | 958 | if (!room) |
959 | return room; | 959 | return room; |
960 | 960 | ||
961 | for (i = 0; i < room; i++) | 961 | for (i = 0; i < room; i++) |
962 | tty_insert_flip_char(tty, ((u8 *)sg_virt(&s->sg_rx[active]))[i], | 962 | tty_insert_flip_char(tty, ((u8 *)sg_virt(&s->sg_rx[active]))[i], |
963 | TTY_NORMAL); | 963 | TTY_NORMAL); |
964 | 964 | ||
965 | port->icount.rx += room; | 965 | port->icount.rx += room; |
966 | 966 | ||
967 | return room; | 967 | return room; |
968 | } | 968 | } |
969 | 969 | ||
970 | static void sci_dma_rx_complete(void *arg) | 970 | static void sci_dma_rx_complete(void *arg) |
971 | { | 971 | { |
972 | struct sci_port *s = arg; | 972 | struct sci_port *s = arg; |
973 | struct uart_port *port = &s->port; | 973 | struct uart_port *port = &s->port; |
974 | struct tty_struct *tty = port->state->port.tty; | 974 | struct tty_struct *tty = port->state->port.tty; |
975 | unsigned long flags; | 975 | unsigned long flags; |
976 | int count; | 976 | int count; |
977 | 977 | ||
978 | dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); | 978 | dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); |
979 | 979 | ||
980 | spin_lock_irqsave(&port->lock, flags); | 980 | spin_lock_irqsave(&port->lock, flags); |
981 | 981 | ||
982 | count = sci_dma_rx_push(s, tty, s->buf_len_rx); | 982 | count = sci_dma_rx_push(s, tty, s->buf_len_rx); |
983 | 983 | ||
984 | mod_timer(&s->rx_timer, jiffies + msecs_to_jiffies(5)); | 984 | mod_timer(&s->rx_timer, jiffies + msecs_to_jiffies(5)); |
985 | 985 | ||
986 | spin_unlock_irqrestore(&port->lock, flags); | 986 | spin_unlock_irqrestore(&port->lock, flags); |
987 | 987 | ||
988 | if (count) | 988 | if (count) |
989 | tty_flip_buffer_push(tty); | 989 | tty_flip_buffer_push(tty); |
990 | 990 | ||
991 | schedule_work(&s->work_rx); | 991 | schedule_work(&s->work_rx); |
992 | } | 992 | } |
993 | 993 | ||
994 | static void sci_start_rx(struct uart_port *port); | 994 | static void sci_start_rx(struct uart_port *port); |
995 | static void sci_start_tx(struct uart_port *port); | 995 | static void sci_start_tx(struct uart_port *port); |
996 | 996 | ||
997 | static void sci_rx_dma_release(struct sci_port *s, bool enable_pio) | 997 | static void sci_rx_dma_release(struct sci_port *s, bool enable_pio) |
998 | { | 998 | { |
999 | struct dma_chan *chan = s->chan_rx; | 999 | struct dma_chan *chan = s->chan_rx; |
1000 | struct uart_port *port = &s->port; | 1000 | struct uart_port *port = &s->port; |
1001 | 1001 | ||
1002 | s->chan_rx = NULL; | 1002 | s->chan_rx = NULL; |
1003 | s->cookie_rx[0] = s->cookie_rx[1] = -EINVAL; | 1003 | s->cookie_rx[0] = s->cookie_rx[1] = -EINVAL; |
1004 | dma_release_channel(chan); | 1004 | dma_release_channel(chan); |
1005 | dma_free_coherent(port->dev, s->buf_len_rx * 2, | 1005 | dma_free_coherent(port->dev, s->buf_len_rx * 2, |
1006 | sg_virt(&s->sg_rx[0]), sg_dma_address(&s->sg_rx[0])); | 1006 | sg_virt(&s->sg_rx[0]), sg_dma_address(&s->sg_rx[0])); |
1007 | if (enable_pio) | 1007 | if (enable_pio) |
1008 | sci_start_rx(port); | 1008 | sci_start_rx(port); |
1009 | } | 1009 | } |
1010 | 1010 | ||
1011 | static void sci_tx_dma_release(struct sci_port *s, bool enable_pio) | 1011 | static void sci_tx_dma_release(struct sci_port *s, bool enable_pio) |
1012 | { | 1012 | { |
1013 | struct dma_chan *chan = s->chan_tx; | 1013 | struct dma_chan *chan = s->chan_tx; |
1014 | struct uart_port *port = &s->port; | 1014 | struct uart_port *port = &s->port; |
1015 | 1015 | ||
1016 | s->chan_tx = NULL; | 1016 | s->chan_tx = NULL; |
1017 | s->cookie_tx = -EINVAL; | 1017 | s->cookie_tx = -EINVAL; |
1018 | dma_release_channel(chan); | 1018 | dma_release_channel(chan); |
1019 | if (enable_pio) | 1019 | if (enable_pio) |
1020 | sci_start_tx(port); | 1020 | sci_start_tx(port); |
1021 | } | 1021 | } |
1022 | 1022 | ||
1023 | static void sci_submit_rx(struct sci_port *s) | 1023 | static void sci_submit_rx(struct sci_port *s) |
1024 | { | 1024 | { |
1025 | struct dma_chan *chan = s->chan_rx; | 1025 | struct dma_chan *chan = s->chan_rx; |
1026 | int i; | 1026 | int i; |
1027 | 1027 | ||
1028 | for (i = 0; i < 2; i++) { | 1028 | for (i = 0; i < 2; i++) { |
1029 | struct scatterlist *sg = &s->sg_rx[i]; | 1029 | struct scatterlist *sg = &s->sg_rx[i]; |
1030 | struct dma_async_tx_descriptor *desc; | 1030 | struct dma_async_tx_descriptor *desc; |
1031 | 1031 | ||
1032 | desc = chan->device->device_prep_slave_sg(chan, | 1032 | desc = chan->device->device_prep_slave_sg(chan, |
1033 | sg, 1, DMA_FROM_DEVICE, DMA_PREP_INTERRUPT); | 1033 | sg, 1, DMA_FROM_DEVICE, DMA_PREP_INTERRUPT); |
1034 | 1034 | ||
1035 | if (desc) { | 1035 | if (desc) { |
1036 | s->desc_rx[i] = desc; | 1036 | s->desc_rx[i] = desc; |
1037 | desc->callback = sci_dma_rx_complete; | 1037 | desc->callback = sci_dma_rx_complete; |
1038 | desc->callback_param = s; | 1038 | desc->callback_param = s; |
1039 | s->cookie_rx[i] = desc->tx_submit(desc); | 1039 | s->cookie_rx[i] = desc->tx_submit(desc); |
1040 | } | 1040 | } |
1041 | 1041 | ||
1042 | if (!desc || s->cookie_rx[i] < 0) { | 1042 | if (!desc || s->cookie_rx[i] < 0) { |
1043 | if (i) { | 1043 | if (i) { |
1044 | async_tx_ack(s->desc_rx[0]); | 1044 | async_tx_ack(s->desc_rx[0]); |
1045 | s->cookie_rx[0] = -EINVAL; | 1045 | s->cookie_rx[0] = -EINVAL; |
1046 | } | 1046 | } |
1047 | if (desc) { | 1047 | if (desc) { |
1048 | async_tx_ack(desc); | 1048 | async_tx_ack(desc); |
1049 | s->cookie_rx[i] = -EINVAL; | 1049 | s->cookie_rx[i] = -EINVAL; |
1050 | } | 1050 | } |
1051 | dev_warn(s->port.dev, | 1051 | dev_warn(s->port.dev, |
1052 | "failed to re-start DMA, using PIO\n"); | 1052 | "failed to re-start DMA, using PIO\n"); |
1053 | sci_rx_dma_release(s, true); | 1053 | sci_rx_dma_release(s, true); |
1054 | return; | 1054 | return; |
1055 | } | 1055 | } |
1056 | } | 1056 | } |
1057 | 1057 | ||
1058 | s->active_rx = s->cookie_rx[0]; | 1058 | s->active_rx = s->cookie_rx[0]; |
1059 | 1059 | ||
1060 | dma_async_issue_pending(chan); | 1060 | dma_async_issue_pending(chan); |
1061 | } | 1061 | } |
1062 | 1062 | ||
1063 | static void work_fn_rx(struct work_struct *work) | 1063 | static void work_fn_rx(struct work_struct *work) |
1064 | { | 1064 | { |
1065 | struct sci_port *s = container_of(work, struct sci_port, work_rx); | 1065 | struct sci_port *s = container_of(work, struct sci_port, work_rx); |
1066 | struct uart_port *port = &s->port; | 1066 | struct uart_port *port = &s->port; |
1067 | struct dma_async_tx_descriptor *desc; | 1067 | struct dma_async_tx_descriptor *desc; |
1068 | int new; | 1068 | int new; |
1069 | 1069 | ||
1070 | if (s->active_rx == s->cookie_rx[0]) { | 1070 | if (s->active_rx == s->cookie_rx[0]) { |
1071 | new = 0; | 1071 | new = 0; |
1072 | } else if (s->active_rx == s->cookie_rx[1]) { | 1072 | } else if (s->active_rx == s->cookie_rx[1]) { |
1073 | new = 1; | 1073 | new = 1; |
1074 | } else { | 1074 | } else { |
1075 | dev_err(port->dev, "cookie %d not found!\n", s->active_rx); | 1075 | dev_err(port->dev, "cookie %d not found!\n", s->active_rx); |
1076 | return; | 1076 | return; |
1077 | } | 1077 | } |
1078 | desc = s->desc_rx[new]; | 1078 | desc = s->desc_rx[new]; |
1079 | 1079 | ||
1080 | if (dma_async_is_tx_complete(s->chan_rx, s->active_rx, NULL, NULL) != | 1080 | if (dma_async_is_tx_complete(s->chan_rx, s->active_rx, NULL, NULL) != |
1081 | DMA_SUCCESS) { | 1081 | DMA_SUCCESS) { |
1082 | /* Handle incomplete DMA receive */ | 1082 | /* Handle incomplete DMA receive */ |
1083 | struct tty_struct *tty = port->state->port.tty; | 1083 | struct tty_struct *tty = port->state->port.tty; |
1084 | struct dma_chan *chan = s->chan_rx; | 1084 | struct dma_chan *chan = s->chan_rx; |
1085 | struct sh_desc *sh_desc = container_of(desc, struct sh_desc, | 1085 | struct sh_desc *sh_desc = container_of(desc, struct sh_desc, |
1086 | async_tx); | 1086 | async_tx); |
1087 | unsigned long flags; | 1087 | unsigned long flags; |
1088 | int count; | 1088 | int count; |
1089 | 1089 | ||
1090 | chan->device->device_control(chan, DMA_TERMINATE_ALL); | 1090 | chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); |
1091 | dev_dbg(port->dev, "Read %u bytes with cookie %d\n", | 1091 | dev_dbg(port->dev, "Read %u bytes with cookie %d\n", |
1092 | sh_desc->partial, sh_desc->cookie); | 1092 | sh_desc->partial, sh_desc->cookie); |
1093 | 1093 | ||
1094 | spin_lock_irqsave(&port->lock, flags); | 1094 | spin_lock_irqsave(&port->lock, flags); |
1095 | count = sci_dma_rx_push(s, tty, sh_desc->partial); | 1095 | count = sci_dma_rx_push(s, tty, sh_desc->partial); |
1096 | spin_unlock_irqrestore(&port->lock, flags); | 1096 | spin_unlock_irqrestore(&port->lock, flags); |
1097 | 1097 | ||
1098 | if (count) | 1098 | if (count) |
1099 | tty_flip_buffer_push(tty); | 1099 | tty_flip_buffer_push(tty); |
1100 | 1100 | ||
1101 | sci_submit_rx(s); | 1101 | sci_submit_rx(s); |
1102 | 1102 | ||
1103 | return; | 1103 | return; |
1104 | } | 1104 | } |
1105 | 1105 | ||
1106 | s->cookie_rx[new] = desc->tx_submit(desc); | 1106 | s->cookie_rx[new] = desc->tx_submit(desc); |
1107 | if (s->cookie_rx[new] < 0) { | 1107 | if (s->cookie_rx[new] < 0) { |
1108 | dev_warn(port->dev, "Failed submitting Rx DMA descriptor\n"); | 1108 | dev_warn(port->dev, "Failed submitting Rx DMA descriptor\n"); |
1109 | sci_rx_dma_release(s, true); | 1109 | sci_rx_dma_release(s, true); |
1110 | return; | 1110 | return; |
1111 | } | 1111 | } |
1112 | 1112 | ||
1113 | dev_dbg(port->dev, "%s: cookie %d #%d\n", __func__, | 1113 | dev_dbg(port->dev, "%s: cookie %d #%d\n", __func__, |
1114 | s->cookie_rx[new], new); | 1114 | s->cookie_rx[new], new); |
1115 | 1115 | ||
1116 | s->active_rx = s->cookie_rx[!new]; | 1116 | s->active_rx = s->cookie_rx[!new]; |
1117 | } | 1117 | } |
1118 | 1118 | ||
1119 | static void work_fn_tx(struct work_struct *work) | 1119 | static void work_fn_tx(struct work_struct *work) |
1120 | { | 1120 | { |
1121 | struct sci_port *s = container_of(work, struct sci_port, work_tx); | 1121 | struct sci_port *s = container_of(work, struct sci_port, work_tx); |
1122 | struct dma_async_tx_descriptor *desc; | 1122 | struct dma_async_tx_descriptor *desc; |
1123 | struct dma_chan *chan = s->chan_tx; | 1123 | struct dma_chan *chan = s->chan_tx; |
1124 | struct uart_port *port = &s->port; | 1124 | struct uart_port *port = &s->port; |
1125 | struct circ_buf *xmit = &port->state->xmit; | 1125 | struct circ_buf *xmit = &port->state->xmit; |
1126 | struct scatterlist *sg = &s->sg_tx; | 1126 | struct scatterlist *sg = &s->sg_tx; |
1127 | 1127 | ||
1128 | /* | 1128 | /* |
1129 | * DMA is idle now. | 1129 | * DMA is idle now. |
1130 | * Port xmit buffer is already mapped, and it is one page... Just adjust | 1130 | * Port xmit buffer is already mapped, and it is one page... Just adjust |
1131 | * offsets and lengths. Since it is a circular buffer, we have to | 1131 | * offsets and lengths. Since it is a circular buffer, we have to |
1132 | * transmit till the end, and then the rest. Take the port lock to get a | 1132 | * transmit till the end, and then the rest. Take the port lock to get a |
1133 | * consistent xmit buffer state. | 1133 | * consistent xmit buffer state. |
1134 | */ | 1134 | */ |
1135 | spin_lock_irq(&port->lock); | 1135 | spin_lock_irq(&port->lock); |
1136 | sg->offset = xmit->tail & (UART_XMIT_SIZE - 1); | 1136 | sg->offset = xmit->tail & (UART_XMIT_SIZE - 1); |
1137 | sg->dma_address = (sg_dma_address(sg) & ~(UART_XMIT_SIZE - 1)) + | 1137 | sg->dma_address = (sg_dma_address(sg) & ~(UART_XMIT_SIZE - 1)) + |
1138 | sg->offset; | 1138 | sg->offset; |
1139 | sg->length = min((int)CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE), | 1139 | sg->length = min((int)CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE), |
1140 | CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE)); | 1140 | CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE)); |
1141 | sg->dma_length = sg->length; | 1141 | sg->dma_length = sg->length; |
1142 | spin_unlock_irq(&port->lock); | 1142 | spin_unlock_irq(&port->lock); |
1143 | 1143 | ||
1144 | BUG_ON(!sg->length); | 1144 | BUG_ON(!sg->length); |
1145 | 1145 | ||
1146 | desc = chan->device->device_prep_slave_sg(chan, | 1146 | desc = chan->device->device_prep_slave_sg(chan, |
1147 | sg, s->sg_len_tx, DMA_TO_DEVICE, | 1147 | sg, s->sg_len_tx, DMA_TO_DEVICE, |
1148 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 1148 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
1149 | if (!desc) { | 1149 | if (!desc) { |
1150 | /* switch to PIO */ | 1150 | /* switch to PIO */ |
1151 | sci_tx_dma_release(s, true); | 1151 | sci_tx_dma_release(s, true); |
1152 | return; | 1152 | return; |
1153 | } | 1153 | } |
1154 | 1154 | ||
1155 | dma_sync_sg_for_device(port->dev, sg, 1, DMA_TO_DEVICE); | 1155 | dma_sync_sg_for_device(port->dev, sg, 1, DMA_TO_DEVICE); |
1156 | 1156 | ||
1157 | spin_lock_irq(&port->lock); | 1157 | spin_lock_irq(&port->lock); |
1158 | s->desc_tx = desc; | 1158 | s->desc_tx = desc; |
1159 | desc->callback = sci_dma_tx_complete; | 1159 | desc->callback = sci_dma_tx_complete; |
1160 | desc->callback_param = s; | 1160 | desc->callback_param = s; |
1161 | spin_unlock_irq(&port->lock); | 1161 | spin_unlock_irq(&port->lock); |
1162 | s->cookie_tx = desc->tx_submit(desc); | 1162 | s->cookie_tx = desc->tx_submit(desc); |
1163 | if (s->cookie_tx < 0) { | 1163 | if (s->cookie_tx < 0) { |
1164 | dev_warn(port->dev, "Failed submitting Tx DMA descriptor\n"); | 1164 | dev_warn(port->dev, "Failed submitting Tx DMA descriptor\n"); |
1165 | /* switch to PIO */ | 1165 | /* switch to PIO */ |
1166 | sci_tx_dma_release(s, true); | 1166 | sci_tx_dma_release(s, true); |
1167 | return; | 1167 | return; |
1168 | } | 1168 | } |
1169 | 1169 | ||
1170 | dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n", __func__, | 1170 | dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n", __func__, |
1171 | xmit->buf, xmit->tail, xmit->head, s->cookie_tx); | 1171 | xmit->buf, xmit->tail, xmit->head, s->cookie_tx); |
1172 | 1172 | ||
1173 | dma_async_issue_pending(chan); | 1173 | dma_async_issue_pending(chan); |
1174 | } | 1174 | } |
1175 | #endif | 1175 | #endif |
1176 | 1176 | ||
1177 | static void sci_start_tx(struct uart_port *port) | 1177 | static void sci_start_tx(struct uart_port *port) |
1178 | { | 1178 | { |
1179 | unsigned short ctrl; | 1179 | unsigned short ctrl; |
1180 | 1180 | ||
1181 | #ifdef CONFIG_SERIAL_SH_SCI_DMA | 1181 | #ifdef CONFIG_SERIAL_SH_SCI_DMA |
1182 | struct sci_port *s = to_sci_port(port); | 1182 | struct sci_port *s = to_sci_port(port); |
1183 | 1183 | ||
1184 | if (s->chan_tx) { | 1184 | if (s->chan_tx) { |
1185 | if (!uart_circ_empty(&s->port.state->xmit) && s->cookie_tx < 0) | 1185 | if (!uart_circ_empty(&s->port.state->xmit) && s->cookie_tx < 0) |
1186 | schedule_work(&s->work_tx); | 1186 | schedule_work(&s->work_tx); |
1187 | 1187 | ||
1188 | return; | 1188 | return; |
1189 | } | 1189 | } |
1190 | #endif | 1190 | #endif |
1191 | 1191 | ||
1192 | /* Set TIE (Transmit Interrupt Enable) bit in SCSCR */ | 1192 | /* Set TIE (Transmit Interrupt Enable) bit in SCSCR */ |
1193 | ctrl = sci_in(port, SCSCR); | 1193 | ctrl = sci_in(port, SCSCR); |
1194 | ctrl |= SCI_CTRL_FLAGS_TIE; | 1194 | ctrl |= SCI_CTRL_FLAGS_TIE; |
1195 | sci_out(port, SCSCR, ctrl); | 1195 | sci_out(port, SCSCR, ctrl); |
1196 | } | 1196 | } |
1197 | 1197 | ||
1198 | static void sci_stop_tx(struct uart_port *port) | 1198 | static void sci_stop_tx(struct uart_port *port) |
1199 | { | 1199 | { |
1200 | unsigned short ctrl; | 1200 | unsigned short ctrl; |
1201 | 1201 | ||
1202 | /* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */ | 1202 | /* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */ |
1203 | ctrl = sci_in(port, SCSCR); | 1203 | ctrl = sci_in(port, SCSCR); |
1204 | ctrl &= ~SCI_CTRL_FLAGS_TIE; | 1204 | ctrl &= ~SCI_CTRL_FLAGS_TIE; |
1205 | sci_out(port, SCSCR, ctrl); | 1205 | sci_out(port, SCSCR, ctrl); |
1206 | } | 1206 | } |
1207 | 1207 | ||
1208 | static void sci_start_rx(struct uart_port *port) | 1208 | static void sci_start_rx(struct uart_port *port) |
1209 | { | 1209 | { |
1210 | unsigned short ctrl = SCI_CTRL_FLAGS_RIE | SCI_CTRL_FLAGS_REIE; | 1210 | unsigned short ctrl = SCI_CTRL_FLAGS_RIE | SCI_CTRL_FLAGS_REIE; |
1211 | 1211 | ||
1212 | /* Set RIE (Receive Interrupt Enable) bit in SCSCR */ | 1212 | /* Set RIE (Receive Interrupt Enable) bit in SCSCR */ |
1213 | ctrl |= sci_in(port, SCSCR); | 1213 | ctrl |= sci_in(port, SCSCR); |
1214 | sci_out(port, SCSCR, ctrl); | 1214 | sci_out(port, SCSCR, ctrl); |
1215 | } | 1215 | } |
1216 | 1216 | ||
1217 | static void sci_stop_rx(struct uart_port *port) | 1217 | static void sci_stop_rx(struct uart_port *port) |
1218 | { | 1218 | { |
1219 | unsigned short ctrl; | 1219 | unsigned short ctrl; |
1220 | 1220 | ||
1221 | /* Clear RIE (Receive Interrupt Enable) bit in SCSCR */ | 1221 | /* Clear RIE (Receive Interrupt Enable) bit in SCSCR */ |
1222 | ctrl = sci_in(port, SCSCR); | 1222 | ctrl = sci_in(port, SCSCR); |
1223 | ctrl &= ~(SCI_CTRL_FLAGS_RIE | SCI_CTRL_FLAGS_REIE); | 1223 | ctrl &= ~(SCI_CTRL_FLAGS_RIE | SCI_CTRL_FLAGS_REIE); |
1224 | sci_out(port, SCSCR, ctrl); | 1224 | sci_out(port, SCSCR, ctrl); |
1225 | } | 1225 | } |
1226 | 1226 | ||
1227 | static void sci_enable_ms(struct uart_port *port) | 1227 | static void sci_enable_ms(struct uart_port *port) |
1228 | { | 1228 | { |
1229 | /* Nothing here yet .. */ | 1229 | /* Nothing here yet .. */ |
1230 | } | 1230 | } |
1231 | 1231 | ||
1232 | static void sci_break_ctl(struct uart_port *port, int break_state) | 1232 | static void sci_break_ctl(struct uart_port *port, int break_state) |
1233 | { | 1233 | { |
1234 | /* Nothing here yet .. */ | 1234 | /* Nothing here yet .. */ |
1235 | } | 1235 | } |
1236 | 1236 | ||
1237 | #ifdef CONFIG_SERIAL_SH_SCI_DMA | 1237 | #ifdef CONFIG_SERIAL_SH_SCI_DMA |
1238 | static bool filter(struct dma_chan *chan, void *slave) | 1238 | static bool filter(struct dma_chan *chan, void *slave) |
1239 | { | 1239 | { |
1240 | struct sh_dmae_slave *param = slave; | 1240 | struct sh_dmae_slave *param = slave; |
1241 | 1241 | ||
1242 | dev_dbg(chan->device->dev, "%s: slave ID %d\n", __func__, | 1242 | dev_dbg(chan->device->dev, "%s: slave ID %d\n", __func__, |
1243 | param->slave_id); | 1243 | param->slave_id); |
1244 | 1244 | ||
1245 | if (param->dma_dev == chan->device->dev) { | 1245 | if (param->dma_dev == chan->device->dev) { |
1246 | chan->private = param; | 1246 | chan->private = param; |
1247 | return true; | 1247 | return true; |
1248 | } else { | 1248 | } else { |
1249 | return false; | 1249 | return false; |
1250 | } | 1250 | } |
1251 | } | 1251 | } |
1252 | 1252 | ||
1253 | static void rx_timer_fn(unsigned long arg) | 1253 | static void rx_timer_fn(unsigned long arg) |
1254 | { | 1254 | { |
1255 | struct sci_port *s = (struct sci_port *)arg; | 1255 | struct sci_port *s = (struct sci_port *)arg; |
1256 | struct uart_port *port = &s->port; | 1256 | struct uart_port *port = &s->port; |
1257 | 1257 | ||
1258 | u16 scr = sci_in(port, SCSCR); | 1258 | u16 scr = sci_in(port, SCSCR); |
1259 | sci_out(port, SCSCR, scr | SCI_CTRL_FLAGS_RIE); | 1259 | sci_out(port, SCSCR, scr | SCI_CTRL_FLAGS_RIE); |
1260 | dev_dbg(port->dev, "DMA Rx timed out\n"); | 1260 | dev_dbg(port->dev, "DMA Rx timed out\n"); |
1261 | schedule_work(&s->work_rx); | 1261 | schedule_work(&s->work_rx); |
1262 | } | 1262 | } |
1263 | 1263 | ||
1264 | static void sci_request_dma(struct uart_port *port) | 1264 | static void sci_request_dma(struct uart_port *port) |
1265 | { | 1265 | { |
1266 | struct sci_port *s = to_sci_port(port); | 1266 | struct sci_port *s = to_sci_port(port); |
1267 | struct sh_dmae_slave *param; | 1267 | struct sh_dmae_slave *param; |
1268 | struct dma_chan *chan; | 1268 | struct dma_chan *chan; |
1269 | dma_cap_mask_t mask; | 1269 | dma_cap_mask_t mask; |
1270 | int nent; | 1270 | int nent; |
1271 | 1271 | ||
1272 | dev_dbg(port->dev, "%s: port %d DMA %p\n", __func__, | 1272 | dev_dbg(port->dev, "%s: port %d DMA %p\n", __func__, |
1273 | port->line, s->dma_dev); | 1273 | port->line, s->dma_dev); |
1274 | 1274 | ||
1275 | if (!s->dma_dev) | 1275 | if (!s->dma_dev) |
1276 | return; | 1276 | return; |
1277 | 1277 | ||
1278 | dma_cap_zero(mask); | 1278 | dma_cap_zero(mask); |
1279 | dma_cap_set(DMA_SLAVE, mask); | 1279 | dma_cap_set(DMA_SLAVE, mask); |
1280 | 1280 | ||
1281 | param = &s->param_tx; | 1281 | param = &s->param_tx; |
1282 | 1282 | ||
1283 | /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_TX */ | 1283 | /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_TX */ |
1284 | param->slave_id = s->slave_tx; | 1284 | param->slave_id = s->slave_tx; |
1285 | param->dma_dev = s->dma_dev; | 1285 | param->dma_dev = s->dma_dev; |
1286 | 1286 | ||
1287 | s->cookie_tx = -EINVAL; | 1287 | s->cookie_tx = -EINVAL; |
1288 | chan = dma_request_channel(mask, filter, param); | 1288 | chan = dma_request_channel(mask, filter, param); |
1289 | dev_dbg(port->dev, "%s: TX: got channel %p\n", __func__, chan); | 1289 | dev_dbg(port->dev, "%s: TX: got channel %p\n", __func__, chan); |
1290 | if (chan) { | 1290 | if (chan) { |
1291 | s->chan_tx = chan; | 1291 | s->chan_tx = chan; |
1292 | sg_init_table(&s->sg_tx, 1); | 1292 | sg_init_table(&s->sg_tx, 1); |
1293 | /* UART circular tx buffer is an aligned page. */ | 1293 | /* UART circular tx buffer is an aligned page. */ |
1294 | BUG_ON((int)port->state->xmit.buf & ~PAGE_MASK); | 1294 | BUG_ON((int)port->state->xmit.buf & ~PAGE_MASK); |
1295 | sg_set_page(&s->sg_tx, virt_to_page(port->state->xmit.buf), | 1295 | sg_set_page(&s->sg_tx, virt_to_page(port->state->xmit.buf), |
1296 | UART_XMIT_SIZE, (int)port->state->xmit.buf & ~PAGE_MASK); | 1296 | UART_XMIT_SIZE, (int)port->state->xmit.buf & ~PAGE_MASK); |
1297 | nent = dma_map_sg(port->dev, &s->sg_tx, 1, DMA_TO_DEVICE); | 1297 | nent = dma_map_sg(port->dev, &s->sg_tx, 1, DMA_TO_DEVICE); |
1298 | if (!nent) | 1298 | if (!nent) |
1299 | sci_tx_dma_release(s, false); | 1299 | sci_tx_dma_release(s, false); |
1300 | else | 1300 | else |
1301 | dev_dbg(port->dev, "%s: mapped %d@%p to %x\n", __func__, | 1301 | dev_dbg(port->dev, "%s: mapped %d@%p to %x\n", __func__, |
1302 | sg_dma_len(&s->sg_tx), | 1302 | sg_dma_len(&s->sg_tx), |
1303 | port->state->xmit.buf, sg_dma_address(&s->sg_tx)); | 1303 | port->state->xmit.buf, sg_dma_address(&s->sg_tx)); |
1304 | 1304 | ||
1305 | s->sg_len_tx = nent; | 1305 | s->sg_len_tx = nent; |
1306 | 1306 | ||
1307 | INIT_WORK(&s->work_tx, work_fn_tx); | 1307 | INIT_WORK(&s->work_tx, work_fn_tx); |
1308 | } | 1308 | } |
1309 | 1309 | ||
1310 | param = &s->param_rx; | 1310 | param = &s->param_rx; |
1311 | 1311 | ||
1312 | /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_RX */ | 1312 | /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_RX */ |
1313 | param->slave_id = s->slave_rx; | 1313 | param->slave_id = s->slave_rx; |
1314 | param->dma_dev = s->dma_dev; | 1314 | param->dma_dev = s->dma_dev; |
1315 | 1315 | ||
1316 | chan = dma_request_channel(mask, filter, param); | 1316 | chan = dma_request_channel(mask, filter, param); |
1317 | dev_dbg(port->dev, "%s: RX: got channel %p\n", __func__, chan); | 1317 | dev_dbg(port->dev, "%s: RX: got channel %p\n", __func__, chan); |
1318 | if (chan) { | 1318 | if (chan) { |
1319 | dma_addr_t dma[2]; | 1319 | dma_addr_t dma[2]; |
1320 | void *buf[2]; | 1320 | void *buf[2]; |
1321 | int i; | 1321 | int i; |
1322 | 1322 | ||
1323 | s->chan_rx = chan; | 1323 | s->chan_rx = chan; |
1324 | 1324 | ||
1325 | s->buf_len_rx = 2 * max(16, (int)port->fifosize); | 1325 | s->buf_len_rx = 2 * max(16, (int)port->fifosize); |
1326 | buf[0] = dma_alloc_coherent(port->dev, s->buf_len_rx * 2, | 1326 | buf[0] = dma_alloc_coherent(port->dev, s->buf_len_rx * 2, |
1327 | &dma[0], GFP_KERNEL); | 1327 | &dma[0], GFP_KERNEL); |
1328 | 1328 | ||
1329 | if (!buf[0]) { | 1329 | if (!buf[0]) { |
1330 | dev_warn(port->dev, | 1330 | dev_warn(port->dev, |
1331 | "failed to allocate dma buffer, using PIO\n"); | 1331 | "failed to allocate dma buffer, using PIO\n"); |
1332 | sci_rx_dma_release(s, true); | 1332 | sci_rx_dma_release(s, true); |
1333 | return; | 1333 | return; |
1334 | } | 1334 | } |
1335 | 1335 | ||
1336 | buf[1] = buf[0] + s->buf_len_rx; | 1336 | buf[1] = buf[0] + s->buf_len_rx; |
1337 | dma[1] = dma[0] + s->buf_len_rx; | 1337 | dma[1] = dma[0] + s->buf_len_rx; |
1338 | 1338 | ||
1339 | for (i = 0; i < 2; i++) { | 1339 | for (i = 0; i < 2; i++) { |
1340 | struct scatterlist *sg = &s->sg_rx[i]; | 1340 | struct scatterlist *sg = &s->sg_rx[i]; |
1341 | 1341 | ||
1342 | sg_init_table(sg, 1); | 1342 | sg_init_table(sg, 1); |
1343 | sg_set_page(sg, virt_to_page(buf[i]), s->buf_len_rx, | 1343 | sg_set_page(sg, virt_to_page(buf[i]), s->buf_len_rx, |
1344 | (int)buf[i] & ~PAGE_MASK); | 1344 | (int)buf[i] & ~PAGE_MASK); |
1345 | sg->dma_address = dma[i]; | 1345 | sg->dma_address = dma[i]; |
1346 | sg->dma_length = sg->length; | 1346 | sg->dma_length = sg->length; |
1347 | } | 1347 | } |
1348 | 1348 | ||
1349 | INIT_WORK(&s->work_rx, work_fn_rx); | 1349 | INIT_WORK(&s->work_rx, work_fn_rx); |
1350 | setup_timer(&s->rx_timer, rx_timer_fn, (unsigned long)s); | 1350 | setup_timer(&s->rx_timer, rx_timer_fn, (unsigned long)s); |
1351 | 1351 | ||
1352 | sci_submit_rx(s); | 1352 | sci_submit_rx(s); |
1353 | } | 1353 | } |
1354 | } | 1354 | } |
1355 | 1355 | ||
1356 | static void sci_free_dma(struct uart_port *port) | 1356 | static void sci_free_dma(struct uart_port *port) |
1357 | { | 1357 | { |
1358 | struct sci_port *s = to_sci_port(port); | 1358 | struct sci_port *s = to_sci_port(port); |
1359 | 1359 | ||
1360 | if (!s->dma_dev) | 1360 | if (!s->dma_dev) |
1361 | return; | 1361 | return; |
1362 | 1362 | ||
1363 | if (s->chan_tx) | 1363 | if (s->chan_tx) |
1364 | sci_tx_dma_release(s, false); | 1364 | sci_tx_dma_release(s, false); |
1365 | if (s->chan_rx) | 1365 | if (s->chan_rx) |
1366 | sci_rx_dma_release(s, false); | 1366 | sci_rx_dma_release(s, false); |
1367 | } | 1367 | } |
1368 | #endif | 1368 | #endif |
1369 | 1369 | ||
1370 | static int sci_startup(struct uart_port *port) | 1370 | static int sci_startup(struct uart_port *port) |
1371 | { | 1371 | { |
1372 | struct sci_port *s = to_sci_port(port); | 1372 | struct sci_port *s = to_sci_port(port); |
1373 | 1373 | ||
1374 | dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); | 1374 | dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); |
1375 | 1375 | ||
1376 | if (s->enable) | 1376 | if (s->enable) |
1377 | s->enable(port); | 1377 | s->enable(port); |
1378 | 1378 | ||
1379 | sci_request_irq(s); | 1379 | sci_request_irq(s); |
1380 | #ifdef CONFIG_SERIAL_SH_SCI_DMA | 1380 | #ifdef CONFIG_SERIAL_SH_SCI_DMA |
1381 | sci_request_dma(port); | 1381 | sci_request_dma(port); |
1382 | #endif | 1382 | #endif |
1383 | sci_start_tx(port); | 1383 | sci_start_tx(port); |
1384 | sci_start_rx(port); | 1384 | sci_start_rx(port); |
1385 | 1385 | ||
1386 | return 0; | 1386 | return 0; |
1387 | } | 1387 | } |
1388 | 1388 | ||
1389 | static void sci_shutdown(struct uart_port *port) | 1389 | static void sci_shutdown(struct uart_port *port) |
1390 | { | 1390 | { |
1391 | struct sci_port *s = to_sci_port(port); | 1391 | struct sci_port *s = to_sci_port(port); |
1392 | 1392 | ||
1393 | dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); | 1393 | dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); |
1394 | 1394 | ||
1395 | sci_stop_rx(port); | 1395 | sci_stop_rx(port); |
1396 | sci_stop_tx(port); | 1396 | sci_stop_tx(port); |
1397 | #ifdef CONFIG_SERIAL_SH_SCI_DMA | 1397 | #ifdef CONFIG_SERIAL_SH_SCI_DMA |
1398 | sci_free_dma(port); | 1398 | sci_free_dma(port); |
1399 | #endif | 1399 | #endif |
1400 | sci_free_irq(s); | 1400 | sci_free_irq(s); |
1401 | 1401 | ||
1402 | if (s->disable) | 1402 | if (s->disable) |
1403 | s->disable(port); | 1403 | s->disable(port); |
1404 | } | 1404 | } |
1405 | 1405 | ||
1406 | static void sci_set_termios(struct uart_port *port, struct ktermios *termios, | 1406 | static void sci_set_termios(struct uart_port *port, struct ktermios *termios, |
1407 | struct ktermios *old) | 1407 | struct ktermios *old) |
1408 | { | 1408 | { |
1409 | unsigned int status, baud, smr_val, max_baud; | 1409 | unsigned int status, baud, smr_val, max_baud; |
1410 | int t = -1; | 1410 | int t = -1; |
1411 | 1411 | ||
1412 | /* | 1412 | /* |
1413 | * earlyprintk comes here early on with port->uartclk set to zero. | 1413 | * earlyprintk comes here early on with port->uartclk set to zero. |
1414 | * the clock framework is not up and running at this point so here | 1414 | * the clock framework is not up and running at this point so here |
1415 | * we assume that 115200 is the maximum baud rate. please note that | 1415 | * we assume that 115200 is the maximum baud rate. please note that |
1416 | * the baud rate is not programmed during earlyprintk - it is assumed | 1416 | * the baud rate is not programmed during earlyprintk - it is assumed |
1417 | * that the previous boot loader has enabled required clocks and | 1417 | * that the previous boot loader has enabled required clocks and |
1418 | * setup the baud rate generator hardware for us already. | 1418 | * setup the baud rate generator hardware for us already. |
1419 | */ | 1419 | */ |
1420 | max_baud = port->uartclk ? port->uartclk / 16 : 115200; | 1420 | max_baud = port->uartclk ? port->uartclk / 16 : 115200; |
1421 | 1421 | ||
1422 | baud = uart_get_baud_rate(port, termios, old, 0, max_baud); | 1422 | baud = uart_get_baud_rate(port, termios, old, 0, max_baud); |
1423 | if (likely(baud && port->uartclk)) | 1423 | if (likely(baud && port->uartclk)) |
1424 | t = SCBRR_VALUE(baud, port->uartclk); | 1424 | t = SCBRR_VALUE(baud, port->uartclk); |
1425 | 1425 | ||
1426 | do { | 1426 | do { |
1427 | status = sci_in(port, SCxSR); | 1427 | status = sci_in(port, SCxSR); |
1428 | } while (!(status & SCxSR_TEND(port))); | 1428 | } while (!(status & SCxSR_TEND(port))); |
1429 | 1429 | ||
1430 | sci_out(port, SCSCR, 0x00); /* TE=0, RE=0, CKE1=0 */ | 1430 | sci_out(port, SCSCR, 0x00); /* TE=0, RE=0, CKE1=0 */ |
1431 | 1431 | ||
1432 | if (port->type != PORT_SCI) | 1432 | if (port->type != PORT_SCI) |
1433 | sci_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST); | 1433 | sci_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST); |
1434 | 1434 | ||
1435 | smr_val = sci_in(port, SCSMR) & 3; | 1435 | smr_val = sci_in(port, SCSMR) & 3; |
1436 | if ((termios->c_cflag & CSIZE) == CS7) | 1436 | if ((termios->c_cflag & CSIZE) == CS7) |
1437 | smr_val |= 0x40; | 1437 | smr_val |= 0x40; |
1438 | if (termios->c_cflag & PARENB) | 1438 | if (termios->c_cflag & PARENB) |
1439 | smr_val |= 0x20; | 1439 | smr_val |= 0x20; |
1440 | if (termios->c_cflag & PARODD) | 1440 | if (termios->c_cflag & PARODD) |
1441 | smr_val |= 0x30; | 1441 | smr_val |= 0x30; |
1442 | if (termios->c_cflag & CSTOPB) | 1442 | if (termios->c_cflag & CSTOPB) |
1443 | smr_val |= 0x08; | 1443 | smr_val |= 0x08; |
1444 | 1444 | ||
1445 | uart_update_timeout(port, termios->c_cflag, baud); | 1445 | uart_update_timeout(port, termios->c_cflag, baud); |
1446 | 1446 | ||
1447 | sci_out(port, SCSMR, smr_val); | 1447 | sci_out(port, SCSMR, smr_val); |
1448 | 1448 | ||
1449 | dev_dbg(port->dev, "%s: SMR %x, t %x, SCSCR %x\n", __func__, smr_val, t, | 1449 | dev_dbg(port->dev, "%s: SMR %x, t %x, SCSCR %x\n", __func__, smr_val, t, |
1450 | SCSCR_INIT(port)); | 1450 | SCSCR_INIT(port)); |
1451 | 1451 | ||
1452 | if (t > 0) { | 1452 | if (t > 0) { |
1453 | if (t >= 256) { | 1453 | if (t >= 256) { |
1454 | sci_out(port, SCSMR, (sci_in(port, SCSMR) & ~3) | 1); | 1454 | sci_out(port, SCSMR, (sci_in(port, SCSMR) & ~3) | 1); |
1455 | t >>= 2; | 1455 | t >>= 2; |
1456 | } else | 1456 | } else |
1457 | sci_out(port, SCSMR, sci_in(port, SCSMR) & ~3); | 1457 | sci_out(port, SCSMR, sci_in(port, SCSMR) & ~3); |
1458 | 1458 | ||
1459 | sci_out(port, SCBRR, t); | 1459 | sci_out(port, SCBRR, t); |
1460 | udelay((1000000+(baud-1)) / baud); /* Wait one bit interval */ | 1460 | udelay((1000000+(baud-1)) / baud); /* Wait one bit interval */ |
1461 | } | 1461 | } |
1462 | 1462 | ||
1463 | sci_init_pins(port, termios->c_cflag); | 1463 | sci_init_pins(port, termios->c_cflag); |
1464 | sci_out(port, SCFCR, (termios->c_cflag & CRTSCTS) ? SCFCR_MCE : 0); | 1464 | sci_out(port, SCFCR, (termios->c_cflag & CRTSCTS) ? SCFCR_MCE : 0); |
1465 | 1465 | ||
1466 | sci_out(port, SCSCR, SCSCR_INIT(port)); | 1466 | sci_out(port, SCSCR, SCSCR_INIT(port)); |
1467 | 1467 | ||
1468 | if ((termios->c_cflag & CREAD) != 0) | 1468 | if ((termios->c_cflag & CREAD) != 0) |
1469 | sci_start_rx(port); | 1469 | sci_start_rx(port); |
1470 | } | 1470 | } |
1471 | 1471 | ||
1472 | static const char *sci_type(struct uart_port *port) | 1472 | static const char *sci_type(struct uart_port *port) |
1473 | { | 1473 | { |
1474 | switch (port->type) { | 1474 | switch (port->type) { |
1475 | case PORT_IRDA: | 1475 | case PORT_IRDA: |
1476 | return "irda"; | 1476 | return "irda"; |
1477 | case PORT_SCI: | 1477 | case PORT_SCI: |
1478 | return "sci"; | 1478 | return "sci"; |
1479 | case PORT_SCIF: | 1479 | case PORT_SCIF: |
1480 | return "scif"; | 1480 | return "scif"; |
1481 | case PORT_SCIFA: | 1481 | case PORT_SCIFA: |
1482 | return "scifa"; | 1482 | return "scifa"; |
1483 | } | 1483 | } |
1484 | 1484 | ||
1485 | return NULL; | 1485 | return NULL; |
1486 | } | 1486 | } |
1487 | 1487 | ||
1488 | static void sci_release_port(struct uart_port *port) | 1488 | static void sci_release_port(struct uart_port *port) |
1489 | { | 1489 | { |
1490 | /* Nothing here yet .. */ | 1490 | /* Nothing here yet .. */ |
1491 | } | 1491 | } |
1492 | 1492 | ||
1493 | static int sci_request_port(struct uart_port *port) | 1493 | static int sci_request_port(struct uart_port *port) |
1494 | { | 1494 | { |
1495 | /* Nothing here yet .. */ | 1495 | /* Nothing here yet .. */ |
1496 | return 0; | 1496 | return 0; |
1497 | } | 1497 | } |
1498 | 1498 | ||
1499 | static void sci_config_port(struct uart_port *port, int flags) | 1499 | static void sci_config_port(struct uart_port *port, int flags) |
1500 | { | 1500 | { |
1501 | struct sci_port *s = to_sci_port(port); | 1501 | struct sci_port *s = to_sci_port(port); |
1502 | 1502 | ||
1503 | port->type = s->type; | 1503 | port->type = s->type; |
1504 | 1504 | ||
1505 | if (port->membase) | 1505 | if (port->membase) |
1506 | return; | 1506 | return; |
1507 | 1507 | ||
1508 | if (port->flags & UPF_IOREMAP) { | 1508 | if (port->flags & UPF_IOREMAP) { |
1509 | port->membase = ioremap_nocache(port->mapbase, 0x40); | 1509 | port->membase = ioremap_nocache(port->mapbase, 0x40); |
1510 | 1510 | ||
1511 | if (IS_ERR(port->membase)) | 1511 | if (IS_ERR(port->membase)) |
1512 | dev_err(port->dev, "can't remap port#%d\n", port->line); | 1512 | dev_err(port->dev, "can't remap port#%d\n", port->line); |
1513 | } else { | 1513 | } else { |
1514 | /* | 1514 | /* |
1515 | * For the simple (and majority of) cases where we don't | 1515 | * For the simple (and majority of) cases where we don't |
1516 | * need to do any remapping, just cast the cookie | 1516 | * need to do any remapping, just cast the cookie |
1517 | * directly. | 1517 | * directly. |
1518 | */ | 1518 | */ |
1519 | port->membase = (void __iomem *)port->mapbase; | 1519 | port->membase = (void __iomem *)port->mapbase; |
1520 | } | 1520 | } |
1521 | } | 1521 | } |
1522 | 1522 | ||
1523 | static int sci_verify_port(struct uart_port *port, struct serial_struct *ser) | 1523 | static int sci_verify_port(struct uart_port *port, struct serial_struct *ser) |
1524 | { | 1524 | { |
1525 | struct sci_port *s = to_sci_port(port); | 1525 | struct sci_port *s = to_sci_port(port); |
1526 | 1526 | ||
1527 | if (ser->irq != s->irqs[SCIx_TXI_IRQ] || ser->irq > nr_irqs) | 1527 | if (ser->irq != s->irqs[SCIx_TXI_IRQ] || ser->irq > nr_irqs) |
1528 | return -EINVAL; | 1528 | return -EINVAL; |
1529 | if (ser->baud_base < 2400) | 1529 | if (ser->baud_base < 2400) |
1530 | /* No paper tape reader for Mitch.. */ | 1530 | /* No paper tape reader for Mitch.. */ |
1531 | return -EINVAL; | 1531 | return -EINVAL; |
1532 | 1532 | ||
1533 | return 0; | 1533 | return 0; |
1534 | } | 1534 | } |
1535 | 1535 | ||
1536 | static struct uart_ops sci_uart_ops = { | 1536 | static struct uart_ops sci_uart_ops = { |
1537 | .tx_empty = sci_tx_empty, | 1537 | .tx_empty = sci_tx_empty, |
1538 | .set_mctrl = sci_set_mctrl, | 1538 | .set_mctrl = sci_set_mctrl, |
1539 | .get_mctrl = sci_get_mctrl, | 1539 | .get_mctrl = sci_get_mctrl, |
1540 | .start_tx = sci_start_tx, | 1540 | .start_tx = sci_start_tx, |
1541 | .stop_tx = sci_stop_tx, | 1541 | .stop_tx = sci_stop_tx, |
1542 | .stop_rx = sci_stop_rx, | 1542 | .stop_rx = sci_stop_rx, |
1543 | .enable_ms = sci_enable_ms, | 1543 | .enable_ms = sci_enable_ms, |
1544 | .break_ctl = sci_break_ctl, | 1544 | .break_ctl = sci_break_ctl, |
1545 | .startup = sci_startup, | 1545 | .startup = sci_startup, |
1546 | .shutdown = sci_shutdown, | 1546 | .shutdown = sci_shutdown, |
1547 | .set_termios = sci_set_termios, | 1547 | .set_termios = sci_set_termios, |
1548 | .type = sci_type, | 1548 | .type = sci_type, |
1549 | .release_port = sci_release_port, | 1549 | .release_port = sci_release_port, |
1550 | .request_port = sci_request_port, | 1550 | .request_port = sci_request_port, |
1551 | .config_port = sci_config_port, | 1551 | .config_port = sci_config_port, |
1552 | .verify_port = sci_verify_port, | 1552 | .verify_port = sci_verify_port, |
1553 | #ifdef CONFIG_CONSOLE_POLL | 1553 | #ifdef CONFIG_CONSOLE_POLL |
1554 | .poll_get_char = sci_poll_get_char, | 1554 | .poll_get_char = sci_poll_get_char, |
1555 | .poll_put_char = sci_poll_put_char, | 1555 | .poll_put_char = sci_poll_put_char, |
1556 | #endif | 1556 | #endif |
1557 | }; | 1557 | }; |
1558 | 1558 | ||
1559 | static void __devinit sci_init_single(struct platform_device *dev, | 1559 | static void __devinit sci_init_single(struct platform_device *dev, |
1560 | struct sci_port *sci_port, | 1560 | struct sci_port *sci_port, |
1561 | unsigned int index, | 1561 | unsigned int index, |
1562 | struct plat_sci_port *p) | 1562 | struct plat_sci_port *p) |
1563 | { | 1563 | { |
1564 | struct uart_port *port = &sci_port->port; | 1564 | struct uart_port *port = &sci_port->port; |
1565 | 1565 | ||
1566 | port->ops = &sci_uart_ops; | 1566 | port->ops = &sci_uart_ops; |
1567 | port->iotype = UPIO_MEM; | 1567 | port->iotype = UPIO_MEM; |
1568 | port->line = index; | 1568 | port->line = index; |
1569 | 1569 | ||
1570 | switch (p->type) { | 1570 | switch (p->type) { |
1571 | case PORT_SCIFA: | 1571 | case PORT_SCIFA: |
1572 | port->fifosize = 64; | 1572 | port->fifosize = 64; |
1573 | break; | 1573 | break; |
1574 | case PORT_SCIF: | 1574 | case PORT_SCIF: |
1575 | port->fifosize = 16; | 1575 | port->fifosize = 16; |
1576 | break; | 1576 | break; |
1577 | default: | 1577 | default: |
1578 | port->fifosize = 1; | 1578 | port->fifosize = 1; |
1579 | break; | 1579 | break; |
1580 | } | 1580 | } |
1581 | 1581 | ||
1582 | if (dev) { | 1582 | if (dev) { |
1583 | sci_port->iclk = p->clk ? clk_get(&dev->dev, p->clk) : NULL; | 1583 | sci_port->iclk = p->clk ? clk_get(&dev->dev, p->clk) : NULL; |
1584 | sci_port->dclk = clk_get(&dev->dev, "peripheral_clk"); | 1584 | sci_port->dclk = clk_get(&dev->dev, "peripheral_clk"); |
1585 | sci_port->enable = sci_clk_enable; | 1585 | sci_port->enable = sci_clk_enable; |
1586 | sci_port->disable = sci_clk_disable; | 1586 | sci_port->disable = sci_clk_disable; |
1587 | port->dev = &dev->dev; | 1587 | port->dev = &dev->dev; |
1588 | } | 1588 | } |
1589 | 1589 | ||
1590 | sci_port->break_timer.data = (unsigned long)sci_port; | 1590 | sci_port->break_timer.data = (unsigned long)sci_port; |
1591 | sci_port->break_timer.function = sci_break_timer; | 1591 | sci_port->break_timer.function = sci_break_timer; |
1592 | init_timer(&sci_port->break_timer); | 1592 | init_timer(&sci_port->break_timer); |
1593 | 1593 | ||
1594 | port->mapbase = p->mapbase; | 1594 | port->mapbase = p->mapbase; |
1595 | port->membase = p->membase; | 1595 | port->membase = p->membase; |
1596 | 1596 | ||
1597 | port->irq = p->irqs[SCIx_TXI_IRQ]; | 1597 | port->irq = p->irqs[SCIx_TXI_IRQ]; |
1598 | port->flags = p->flags; | 1598 | port->flags = p->flags; |
1599 | sci_port->type = port->type = p->type; | 1599 | sci_port->type = port->type = p->type; |
1600 | 1600 | ||
1601 | #ifdef CONFIG_SERIAL_SH_SCI_DMA | 1601 | #ifdef CONFIG_SERIAL_SH_SCI_DMA |
1602 | sci_port->dma_dev = p->dma_dev; | 1602 | sci_port->dma_dev = p->dma_dev; |
1603 | sci_port->slave_tx = p->dma_slave_tx; | 1603 | sci_port->slave_tx = p->dma_slave_tx; |
1604 | sci_port->slave_rx = p->dma_slave_rx; | 1604 | sci_port->slave_rx = p->dma_slave_rx; |
1605 | 1605 | ||
1606 | dev_dbg(port->dev, "%s: DMA device %p, tx %d, rx %d\n", __func__, | 1606 | dev_dbg(port->dev, "%s: DMA device %p, tx %d, rx %d\n", __func__, |
1607 | p->dma_dev, p->dma_slave_tx, p->dma_slave_rx); | 1607 | p->dma_dev, p->dma_slave_tx, p->dma_slave_rx); |
1608 | #endif | 1608 | #endif |
1609 | 1609 | ||
1610 | memcpy(&sci_port->irqs, &p->irqs, sizeof(p->irqs)); | 1610 | memcpy(&sci_port->irqs, &p->irqs, sizeof(p->irqs)); |
1611 | } | 1611 | } |
1612 | 1612 | ||
1613 | #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE | 1613 | #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE |
1614 | static struct tty_driver *serial_console_device(struct console *co, int *index) | 1614 | static struct tty_driver *serial_console_device(struct console *co, int *index) |
1615 | { | 1615 | { |
1616 | struct uart_driver *p = &sci_uart_driver; | 1616 | struct uart_driver *p = &sci_uart_driver; |
1617 | *index = co->index; | 1617 | *index = co->index; |
1618 | return p->tty_driver; | 1618 | return p->tty_driver; |
1619 | } | 1619 | } |
1620 | 1620 | ||
1621 | static void serial_console_putchar(struct uart_port *port, int ch) | 1621 | static void serial_console_putchar(struct uart_port *port, int ch) |
1622 | { | 1622 | { |
1623 | sci_poll_put_char(port, ch); | 1623 | sci_poll_put_char(port, ch); |
1624 | } | 1624 | } |
1625 | 1625 | ||
1626 | /* | 1626 | /* |
1627 | * Print a string to the serial port trying not to disturb | 1627 | * Print a string to the serial port trying not to disturb |
1628 | * any possible real use of the port... | 1628 | * any possible real use of the port... |
1629 | */ | 1629 | */ |
1630 | static void serial_console_write(struct console *co, const char *s, | 1630 | static void serial_console_write(struct console *co, const char *s, |
1631 | unsigned count) | 1631 | unsigned count) |
1632 | { | 1632 | { |
1633 | struct uart_port *port = co->data; | 1633 | struct uart_port *port = co->data; |
1634 | struct sci_port *sci_port = to_sci_port(port); | 1634 | struct sci_port *sci_port = to_sci_port(port); |
1635 | unsigned short bits; | 1635 | unsigned short bits; |
1636 | 1636 | ||
1637 | if (sci_port->enable) | 1637 | if (sci_port->enable) |
1638 | sci_port->enable(port); | 1638 | sci_port->enable(port); |
1639 | 1639 | ||
1640 | uart_console_write(port, s, count, serial_console_putchar); | 1640 | uart_console_write(port, s, count, serial_console_putchar); |
1641 | 1641 | ||
1642 | /* wait until fifo is empty and last bit has been transmitted */ | 1642 | /* wait until fifo is empty and last bit has been transmitted */ |
1643 | bits = SCxSR_TDxE(port) | SCxSR_TEND(port); | 1643 | bits = SCxSR_TDxE(port) | SCxSR_TEND(port); |
1644 | while ((sci_in(port, SCxSR) & bits) != bits) | 1644 | while ((sci_in(port, SCxSR) & bits) != bits) |
1645 | cpu_relax(); | 1645 | cpu_relax(); |
1646 | 1646 | ||
1647 | if (sci_port->disable) | 1647 | if (sci_port->disable) |
1648 | sci_port->disable(port); | 1648 | sci_port->disable(port); |
1649 | } | 1649 | } |
1650 | 1650 | ||
1651 | static int __devinit serial_console_setup(struct console *co, char *options) | 1651 | static int __devinit serial_console_setup(struct console *co, char *options) |
1652 | { | 1652 | { |
1653 | struct sci_port *sci_port; | 1653 | struct sci_port *sci_port; |
1654 | struct uart_port *port; | 1654 | struct uart_port *port; |
1655 | int baud = 115200; | 1655 | int baud = 115200; |
1656 | int bits = 8; | 1656 | int bits = 8; |
1657 | int parity = 'n'; | 1657 | int parity = 'n'; |
1658 | int flow = 'n'; | 1658 | int flow = 'n'; |
1659 | int ret; | 1659 | int ret; |
1660 | 1660 | ||
1661 | /* | 1661 | /* |
1662 | * Check whether an invalid uart number has been specified, and | 1662 | * Check whether an invalid uart number has been specified, and |
1663 | * if so, search for the first available port that does have | 1663 | * if so, search for the first available port that does have |
1664 | * console support. | 1664 | * console support. |
1665 | */ | 1665 | */ |
1666 | if (co->index >= SCI_NPORTS) | 1666 | if (co->index >= SCI_NPORTS) |
1667 | co->index = 0; | 1667 | co->index = 0; |
1668 | 1668 | ||
1669 | if (co->data) { | 1669 | if (co->data) { |
1670 | port = co->data; | 1670 | port = co->data; |
1671 | sci_port = to_sci_port(port); | 1671 | sci_port = to_sci_port(port); |
1672 | } else { | 1672 | } else { |
1673 | sci_port = &sci_ports[co->index]; | 1673 | sci_port = &sci_ports[co->index]; |
1674 | port = &sci_port->port; | 1674 | port = &sci_port->port; |
1675 | co->data = port; | 1675 | co->data = port; |
1676 | } | 1676 | } |
1677 | 1677 | ||
1678 | /* | 1678 | /* |
1679 | * Also need to check port->type, we don't actually have any | 1679 | * Also need to check port->type, we don't actually have any |
1680 | * UPIO_PORT ports, but uart_report_port() handily misreports | 1680 | * UPIO_PORT ports, but uart_report_port() handily misreports |
1681 | * it anyways if we don't have a port available by the time this is | 1681 | * it anyways if we don't have a port available by the time this is |
1682 | * called. | 1682 | * called. |
1683 | */ | 1683 | */ |
1684 | if (!port->type) | 1684 | if (!port->type) |
1685 | return -ENODEV; | 1685 | return -ENODEV; |
1686 | 1686 | ||
1687 | sci_config_port(port, 0); | 1687 | sci_config_port(port, 0); |
1688 | 1688 | ||
1689 | if (sci_port->enable) | 1689 | if (sci_port->enable) |
1690 | sci_port->enable(port); | 1690 | sci_port->enable(port); |
1691 | 1691 | ||
1692 | if (options) | 1692 | if (options) |
1693 | uart_parse_options(options, &baud, &parity, &bits, &flow); | 1693 | uart_parse_options(options, &baud, &parity, &bits, &flow); |
1694 | 1694 | ||
1695 | ret = uart_set_options(port, co, baud, parity, bits, flow); | 1695 | ret = uart_set_options(port, co, baud, parity, bits, flow); |
1696 | #if defined(__H8300H__) || defined(__H8300S__) | 1696 | #if defined(__H8300H__) || defined(__H8300S__) |
1697 | /* disable rx interrupt */ | 1697 | /* disable rx interrupt */ |
1698 | if (ret == 0) | 1698 | if (ret == 0) |
1699 | sci_stop_rx(port); | 1699 | sci_stop_rx(port); |
1700 | #endif | 1700 | #endif |
1701 | /* TODO: disable clock */ | 1701 | /* TODO: disable clock */ |
1702 | return ret; | 1702 | return ret; |
1703 | } | 1703 | } |
1704 | 1704 | ||
1705 | static struct console serial_console = { | 1705 | static struct console serial_console = { |
1706 | .name = "ttySC", | 1706 | .name = "ttySC", |
1707 | .device = serial_console_device, | 1707 | .device = serial_console_device, |
1708 | .write = serial_console_write, | 1708 | .write = serial_console_write, |
1709 | .setup = serial_console_setup, | 1709 | .setup = serial_console_setup, |
1710 | .flags = CON_PRINTBUFFER, | 1710 | .flags = CON_PRINTBUFFER, |
1711 | .index = -1, | 1711 | .index = -1, |
1712 | }; | 1712 | }; |
1713 | 1713 | ||
1714 | static int __init sci_console_init(void) | 1714 | static int __init sci_console_init(void) |
1715 | { | 1715 | { |
1716 | register_console(&serial_console); | 1716 | register_console(&serial_console); |
1717 | return 0; | 1717 | return 0; |
1718 | } | 1718 | } |
1719 | console_initcall(sci_console_init); | 1719 | console_initcall(sci_console_init); |
1720 | 1720 | ||
1721 | static struct sci_port early_serial_port; | 1721 | static struct sci_port early_serial_port; |
1722 | static struct console early_serial_console = { | 1722 | static struct console early_serial_console = { |
1723 | .name = "early_ttySC", | 1723 | .name = "early_ttySC", |
1724 | .write = serial_console_write, | 1724 | .write = serial_console_write, |
1725 | .flags = CON_PRINTBUFFER, | 1725 | .flags = CON_PRINTBUFFER, |
1726 | }; | 1726 | }; |
1727 | static char early_serial_buf[32]; | 1727 | static char early_serial_buf[32]; |
1728 | 1728 | ||
1729 | #endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */ | 1729 | #endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */ |
1730 | 1730 | ||
1731 | #if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) | 1731 | #if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) |
1732 | #define SCI_CONSOLE (&serial_console) | 1732 | #define SCI_CONSOLE (&serial_console) |
1733 | #else | 1733 | #else |
1734 | #define SCI_CONSOLE 0 | 1734 | #define SCI_CONSOLE 0 |
1735 | #endif | 1735 | #endif |
1736 | 1736 | ||
1737 | static char banner[] __initdata = | 1737 | static char banner[] __initdata = |
1738 | KERN_INFO "SuperH SCI(F) driver initialized\n"; | 1738 | KERN_INFO "SuperH SCI(F) driver initialized\n"; |
1739 | 1739 | ||
1740 | static struct uart_driver sci_uart_driver = { | 1740 | static struct uart_driver sci_uart_driver = { |
1741 | .owner = THIS_MODULE, | 1741 | .owner = THIS_MODULE, |
1742 | .driver_name = "sci", | 1742 | .driver_name = "sci", |
1743 | .dev_name = "ttySC", | 1743 | .dev_name = "ttySC", |
1744 | .major = SCI_MAJOR, | 1744 | .major = SCI_MAJOR, |
1745 | .minor = SCI_MINOR_START, | 1745 | .minor = SCI_MINOR_START, |
1746 | .nr = SCI_NPORTS, | 1746 | .nr = SCI_NPORTS, |
1747 | .cons = SCI_CONSOLE, | 1747 | .cons = SCI_CONSOLE, |
1748 | }; | 1748 | }; |
1749 | 1749 | ||
1750 | 1750 | ||
1751 | static int sci_remove(struct platform_device *dev) | 1751 | static int sci_remove(struct platform_device *dev) |
1752 | { | 1752 | { |
1753 | struct sh_sci_priv *priv = platform_get_drvdata(dev); | 1753 | struct sh_sci_priv *priv = platform_get_drvdata(dev); |
1754 | struct sci_port *p; | 1754 | struct sci_port *p; |
1755 | unsigned long flags; | 1755 | unsigned long flags; |
1756 | 1756 | ||
1757 | cpufreq_unregister_notifier(&priv->clk_nb, CPUFREQ_TRANSITION_NOTIFIER); | 1757 | cpufreq_unregister_notifier(&priv->clk_nb, CPUFREQ_TRANSITION_NOTIFIER); |
1758 | 1758 | ||
1759 | spin_lock_irqsave(&priv->lock, flags); | 1759 | spin_lock_irqsave(&priv->lock, flags); |
1760 | list_for_each_entry(p, &priv->ports, node) | 1760 | list_for_each_entry(p, &priv->ports, node) |
1761 | uart_remove_one_port(&sci_uart_driver, &p->port); | 1761 | uart_remove_one_port(&sci_uart_driver, &p->port); |
1762 | spin_unlock_irqrestore(&priv->lock, flags); | 1762 | spin_unlock_irqrestore(&priv->lock, flags); |
1763 | 1763 | ||
1764 | kfree(priv); | 1764 | kfree(priv); |
1765 | return 0; | 1765 | return 0; |
1766 | } | 1766 | } |
1767 | 1767 | ||
1768 | static int __devinit sci_probe_single(struct platform_device *dev, | 1768 | static int __devinit sci_probe_single(struct platform_device *dev, |
1769 | unsigned int index, | 1769 | unsigned int index, |
1770 | struct plat_sci_port *p, | 1770 | struct plat_sci_port *p, |
1771 | struct sci_port *sciport) | 1771 | struct sci_port *sciport) |
1772 | { | 1772 | { |
1773 | struct sh_sci_priv *priv = platform_get_drvdata(dev); | 1773 | struct sh_sci_priv *priv = platform_get_drvdata(dev); |
1774 | unsigned long flags; | 1774 | unsigned long flags; |
1775 | int ret; | 1775 | int ret; |
1776 | 1776 | ||
1777 | /* Sanity check */ | 1777 | /* Sanity check */ |
1778 | if (unlikely(index >= SCI_NPORTS)) { | 1778 | if (unlikely(index >= SCI_NPORTS)) { |
1779 | dev_notice(&dev->dev, "Attempting to register port " | 1779 | dev_notice(&dev->dev, "Attempting to register port " |
1780 | "%d when only %d are available.\n", | 1780 | "%d when only %d are available.\n", |
1781 | index+1, SCI_NPORTS); | 1781 | index+1, SCI_NPORTS); |
1782 | dev_notice(&dev->dev, "Consider bumping " | 1782 | dev_notice(&dev->dev, "Consider bumping " |
1783 | "CONFIG_SERIAL_SH_SCI_NR_UARTS!\n"); | 1783 | "CONFIG_SERIAL_SH_SCI_NR_UARTS!\n"); |
1784 | return 0; | 1784 | return 0; |
1785 | } | 1785 | } |
1786 | 1786 | ||
1787 | sci_init_single(dev, sciport, index, p); | 1787 | sci_init_single(dev, sciport, index, p); |
1788 | 1788 | ||
1789 | ret = uart_add_one_port(&sci_uart_driver, &sciport->port); | 1789 | ret = uart_add_one_port(&sci_uart_driver, &sciport->port); |
1790 | if (ret) | 1790 | if (ret) |
1791 | return ret; | 1791 | return ret; |
1792 | 1792 | ||
1793 | INIT_LIST_HEAD(&sciport->node); | 1793 | INIT_LIST_HEAD(&sciport->node); |
1794 | 1794 | ||
1795 | spin_lock_irqsave(&priv->lock, flags); | 1795 | spin_lock_irqsave(&priv->lock, flags); |
1796 | list_add(&sciport->node, &priv->ports); | 1796 | list_add(&sciport->node, &priv->ports); |
1797 | spin_unlock_irqrestore(&priv->lock, flags); | 1797 | spin_unlock_irqrestore(&priv->lock, flags); |
1798 | 1798 | ||
1799 | return 0; | 1799 | return 0; |
1800 | } | 1800 | } |
1801 | 1801 | ||
1802 | /* | 1802 | /* |
1803 | * Register a set of serial devices attached to a platform device. The | 1803 | * Register a set of serial devices attached to a platform device. The |
1804 | * list is terminated with a zero flags entry, which means we expect | 1804 | * list is terminated with a zero flags entry, which means we expect |
1805 | * all entries to have at least UPF_BOOT_AUTOCONF set. Platforms that need | 1805 | * all entries to have at least UPF_BOOT_AUTOCONF set. Platforms that need |
1806 | * remapping (such as sh64) should also set UPF_IOREMAP. | 1806 | * remapping (such as sh64) should also set UPF_IOREMAP. |
1807 | */ | 1807 | */ |
1808 | static int __devinit sci_probe(struct platform_device *dev) | 1808 | static int __devinit sci_probe(struct platform_device *dev) |
1809 | { | 1809 | { |
1810 | struct plat_sci_port *p = dev->dev.platform_data; | 1810 | struct plat_sci_port *p = dev->dev.platform_data; |
1811 | struct sh_sci_priv *priv; | 1811 | struct sh_sci_priv *priv; |
1812 | int i, ret = -EINVAL; | 1812 | int i, ret = -EINVAL; |
1813 | 1813 | ||
1814 | #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE | 1814 | #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE |
1815 | if (is_early_platform_device(dev)) { | 1815 | if (is_early_platform_device(dev)) { |
1816 | if (dev->id == -1) | 1816 | if (dev->id == -1) |
1817 | return -ENOTSUPP; | 1817 | return -ENOTSUPP; |
1818 | early_serial_console.index = dev->id; | 1818 | early_serial_console.index = dev->id; |
1819 | early_serial_console.data = &early_serial_port.port; | 1819 | early_serial_console.data = &early_serial_port.port; |
1820 | sci_init_single(NULL, &early_serial_port, dev->id, p); | 1820 | sci_init_single(NULL, &early_serial_port, dev->id, p); |
1821 | serial_console_setup(&early_serial_console, early_serial_buf); | 1821 | serial_console_setup(&early_serial_console, early_serial_buf); |
1822 | if (!strstr(early_serial_buf, "keep")) | 1822 | if (!strstr(early_serial_buf, "keep")) |
1823 | early_serial_console.flags |= CON_BOOT; | 1823 | early_serial_console.flags |= CON_BOOT; |
1824 | register_console(&early_serial_console); | 1824 | register_console(&early_serial_console); |
1825 | return 0; | 1825 | return 0; |
1826 | } | 1826 | } |
1827 | #endif | 1827 | #endif |
1828 | 1828 | ||
1829 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | 1829 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); |
1830 | if (!priv) | 1830 | if (!priv) |
1831 | return -ENOMEM; | 1831 | return -ENOMEM; |
1832 | 1832 | ||
1833 | INIT_LIST_HEAD(&priv->ports); | 1833 | INIT_LIST_HEAD(&priv->ports); |
1834 | spin_lock_init(&priv->lock); | 1834 | spin_lock_init(&priv->lock); |
1835 | platform_set_drvdata(dev, priv); | 1835 | platform_set_drvdata(dev, priv); |
1836 | 1836 | ||
1837 | priv->clk_nb.notifier_call = sci_notifier; | 1837 | priv->clk_nb.notifier_call = sci_notifier; |
1838 | cpufreq_register_notifier(&priv->clk_nb, CPUFREQ_TRANSITION_NOTIFIER); | 1838 | cpufreq_register_notifier(&priv->clk_nb, CPUFREQ_TRANSITION_NOTIFIER); |
1839 | 1839 | ||
1840 | if (dev->id != -1) { | 1840 | if (dev->id != -1) { |
1841 | ret = sci_probe_single(dev, dev->id, p, &sci_ports[dev->id]); | 1841 | ret = sci_probe_single(dev, dev->id, p, &sci_ports[dev->id]); |
1842 | if (ret) | 1842 | if (ret) |
1843 | goto err_unreg; | 1843 | goto err_unreg; |
1844 | } else { | 1844 | } else { |
1845 | for (i = 0; p && p->flags != 0; p++, i++) { | 1845 | for (i = 0; p && p->flags != 0; p++, i++) { |
1846 | ret = sci_probe_single(dev, i, p, &sci_ports[i]); | 1846 | ret = sci_probe_single(dev, i, p, &sci_ports[i]); |
1847 | if (ret) | 1847 | if (ret) |
1848 | goto err_unreg; | 1848 | goto err_unreg; |
1849 | } | 1849 | } |
1850 | } | 1850 | } |
1851 | 1851 | ||
1852 | #ifdef CONFIG_SH_STANDARD_BIOS | 1852 | #ifdef CONFIG_SH_STANDARD_BIOS |
1853 | sh_bios_gdb_detach(); | 1853 | sh_bios_gdb_detach(); |
1854 | #endif | 1854 | #endif |
1855 | 1855 | ||
1856 | return 0; | 1856 | return 0; |
1857 | 1857 | ||
1858 | err_unreg: | 1858 | err_unreg: |
1859 | sci_remove(dev); | 1859 | sci_remove(dev); |
1860 | return ret; | 1860 | return ret; |
1861 | } | 1861 | } |
1862 | 1862 | ||
1863 | static int sci_suspend(struct device *dev) | 1863 | static int sci_suspend(struct device *dev) |
1864 | { | 1864 | { |
1865 | struct sh_sci_priv *priv = dev_get_drvdata(dev); | 1865 | struct sh_sci_priv *priv = dev_get_drvdata(dev); |
1866 | struct sci_port *p; | 1866 | struct sci_port *p; |
1867 | unsigned long flags; | 1867 | unsigned long flags; |
1868 | 1868 | ||
1869 | spin_lock_irqsave(&priv->lock, flags); | 1869 | spin_lock_irqsave(&priv->lock, flags); |
1870 | list_for_each_entry(p, &priv->ports, node) | 1870 | list_for_each_entry(p, &priv->ports, node) |
1871 | uart_suspend_port(&sci_uart_driver, &p->port); | 1871 | uart_suspend_port(&sci_uart_driver, &p->port); |
1872 | spin_unlock_irqrestore(&priv->lock, flags); | 1872 | spin_unlock_irqrestore(&priv->lock, flags); |
1873 | 1873 | ||
1874 | return 0; | 1874 | return 0; |
1875 | } | 1875 | } |
1876 | 1876 | ||
1877 | static int sci_resume(struct device *dev) | 1877 | static int sci_resume(struct device *dev) |
1878 | { | 1878 | { |
1879 | struct sh_sci_priv *priv = dev_get_drvdata(dev); | 1879 | struct sh_sci_priv *priv = dev_get_drvdata(dev); |
1880 | struct sci_port *p; | 1880 | struct sci_port *p; |
1881 | unsigned long flags; | 1881 | unsigned long flags; |
1882 | 1882 | ||
1883 | spin_lock_irqsave(&priv->lock, flags); | 1883 | spin_lock_irqsave(&priv->lock, flags); |
1884 | list_for_each_entry(p, &priv->ports, node) | 1884 | list_for_each_entry(p, &priv->ports, node) |
1885 | uart_resume_port(&sci_uart_driver, &p->port); | 1885 | uart_resume_port(&sci_uart_driver, &p->port); |
1886 | spin_unlock_irqrestore(&priv->lock, flags); | 1886 | spin_unlock_irqrestore(&priv->lock, flags); |
1887 | 1887 | ||
1888 | return 0; | 1888 | return 0; |
1889 | } | 1889 | } |
1890 | 1890 | ||
1891 | static const struct dev_pm_ops sci_dev_pm_ops = { | 1891 | static const struct dev_pm_ops sci_dev_pm_ops = { |
1892 | .suspend = sci_suspend, | 1892 | .suspend = sci_suspend, |
1893 | .resume = sci_resume, | 1893 | .resume = sci_resume, |
1894 | }; | 1894 | }; |
1895 | 1895 | ||
1896 | static struct platform_driver sci_driver = { | 1896 | static struct platform_driver sci_driver = { |
1897 | .probe = sci_probe, | 1897 | .probe = sci_probe, |
1898 | .remove = sci_remove, | 1898 | .remove = sci_remove, |
1899 | .driver = { | 1899 | .driver = { |
1900 | .name = "sh-sci", | 1900 | .name = "sh-sci", |
1901 | .owner = THIS_MODULE, | 1901 | .owner = THIS_MODULE, |
1902 | .pm = &sci_dev_pm_ops, | 1902 | .pm = &sci_dev_pm_ops, |
1903 | }, | 1903 | }, |
1904 | }; | 1904 | }; |
1905 | 1905 | ||
1906 | static int __init sci_init(void) | 1906 | static int __init sci_init(void) |
1907 | { | 1907 | { |
1908 | int ret; | 1908 | int ret; |
1909 | 1909 | ||
1910 | printk(banner); | 1910 | printk(banner); |
1911 | 1911 | ||
1912 | ret = uart_register_driver(&sci_uart_driver); | 1912 | ret = uart_register_driver(&sci_uart_driver); |
1913 | if (likely(ret == 0)) { | 1913 | if (likely(ret == 0)) { |
1914 | ret = platform_driver_register(&sci_driver); | 1914 | ret = platform_driver_register(&sci_driver); |
1915 | if (unlikely(ret)) | 1915 | if (unlikely(ret)) |
1916 | uart_unregister_driver(&sci_uart_driver); | 1916 | uart_unregister_driver(&sci_uart_driver); |
1917 | } | 1917 | } |
1918 | 1918 | ||
1919 | return ret; | 1919 | return ret; |
1920 | } | 1920 | } |
1921 | 1921 | ||
1922 | static void __exit sci_exit(void) | 1922 | static void __exit sci_exit(void) |
1923 | { | 1923 | { |
1924 | platform_driver_unregister(&sci_driver); | 1924 | platform_driver_unregister(&sci_driver); |
1925 | uart_unregister_driver(&sci_uart_driver); | 1925 | uart_unregister_driver(&sci_uart_driver); |
1926 | } | 1926 | } |
1927 | 1927 | ||
1928 | #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE | 1928 | #ifdef CONFIG_SERIAL_SH_SCI_CONSOLE |
1929 | early_platform_init_buffer("earlyprintk", &sci_driver, | 1929 | early_platform_init_buffer("earlyprintk", &sci_driver, |
1930 | early_serial_buf, ARRAY_SIZE(early_serial_buf)); | 1930 | early_serial_buf, ARRAY_SIZE(early_serial_buf)); |
1931 | #endif | 1931 | #endif |
1932 | module_init(sci_init); | 1932 | module_init(sci_init); |
1933 | module_exit(sci_exit); | 1933 | module_exit(sci_exit); |
1934 | 1934 | ||
1935 | MODULE_LICENSE("GPL"); | 1935 | MODULE_LICENSE("GPL"); |
1936 | MODULE_ALIAS("platform:sh-sci"); | 1936 | MODULE_ALIAS("platform:sh-sci"); |
1937 | 1937 |
drivers/video/mx3fb.c
1 | /* | 1 | /* |
2 | * Copyright (C) 2008 | 2 | * Copyright (C) 2008 |
3 | * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de> | 3 | * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de> |
4 | * | 4 | * |
5 | * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved. | 5 | * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved. |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
14 | #include <linux/platform_device.h> | 14 | #include <linux/platform_device.h> |
15 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
16 | #include <linux/errno.h> | 16 | #include <linux/errno.h> |
17 | #include <linux/string.h> | 17 | #include <linux/string.h> |
18 | #include <linux/interrupt.h> | 18 | #include <linux/interrupt.h> |
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | #include <linux/fb.h> | 20 | #include <linux/fb.h> |
21 | #include <linux/delay.h> | 21 | #include <linux/delay.h> |
22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
23 | #include <linux/ioport.h> | 23 | #include <linux/ioport.h> |
24 | #include <linux/dma-mapping.h> | 24 | #include <linux/dma-mapping.h> |
25 | #include <linux/dmaengine.h> | 25 | #include <linux/dmaengine.h> |
26 | #include <linux/console.h> | 26 | #include <linux/console.h> |
27 | #include <linux/clk.h> | 27 | #include <linux/clk.h> |
28 | #include <linux/mutex.h> | 28 | #include <linux/mutex.h> |
29 | 29 | ||
30 | #include <mach/hardware.h> | 30 | #include <mach/hardware.h> |
31 | #include <mach/ipu.h> | 31 | #include <mach/ipu.h> |
32 | #include <mach/mx3fb.h> | 32 | #include <mach/mx3fb.h> |
33 | 33 | ||
34 | #include <asm/io.h> | 34 | #include <asm/io.h> |
35 | #include <asm/uaccess.h> | 35 | #include <asm/uaccess.h> |
36 | 36 | ||
37 | #define MX3FB_NAME "mx3_sdc_fb" | 37 | #define MX3FB_NAME "mx3_sdc_fb" |
38 | 38 | ||
39 | #define MX3FB_REG_OFFSET 0xB4 | 39 | #define MX3FB_REG_OFFSET 0xB4 |
40 | 40 | ||
41 | /* SDC Registers */ | 41 | /* SDC Registers */ |
42 | #define SDC_COM_CONF (0xB4 - MX3FB_REG_OFFSET) | 42 | #define SDC_COM_CONF (0xB4 - MX3FB_REG_OFFSET) |
43 | #define SDC_GW_CTRL (0xB8 - MX3FB_REG_OFFSET) | 43 | #define SDC_GW_CTRL (0xB8 - MX3FB_REG_OFFSET) |
44 | #define SDC_FG_POS (0xBC - MX3FB_REG_OFFSET) | 44 | #define SDC_FG_POS (0xBC - MX3FB_REG_OFFSET) |
45 | #define SDC_BG_POS (0xC0 - MX3FB_REG_OFFSET) | 45 | #define SDC_BG_POS (0xC0 - MX3FB_REG_OFFSET) |
46 | #define SDC_CUR_POS (0xC4 - MX3FB_REG_OFFSET) | 46 | #define SDC_CUR_POS (0xC4 - MX3FB_REG_OFFSET) |
47 | #define SDC_PWM_CTRL (0xC8 - MX3FB_REG_OFFSET) | 47 | #define SDC_PWM_CTRL (0xC8 - MX3FB_REG_OFFSET) |
48 | #define SDC_CUR_MAP (0xCC - MX3FB_REG_OFFSET) | 48 | #define SDC_CUR_MAP (0xCC - MX3FB_REG_OFFSET) |
49 | #define SDC_HOR_CONF (0xD0 - MX3FB_REG_OFFSET) | 49 | #define SDC_HOR_CONF (0xD0 - MX3FB_REG_OFFSET) |
50 | #define SDC_VER_CONF (0xD4 - MX3FB_REG_OFFSET) | 50 | #define SDC_VER_CONF (0xD4 - MX3FB_REG_OFFSET) |
51 | #define SDC_SHARP_CONF_1 (0xD8 - MX3FB_REG_OFFSET) | 51 | #define SDC_SHARP_CONF_1 (0xD8 - MX3FB_REG_OFFSET) |
52 | #define SDC_SHARP_CONF_2 (0xDC - MX3FB_REG_OFFSET) | 52 | #define SDC_SHARP_CONF_2 (0xDC - MX3FB_REG_OFFSET) |
53 | 53 | ||
54 | /* Register bits */ | 54 | /* Register bits */ |
55 | #define SDC_COM_TFT_COLOR 0x00000001UL | 55 | #define SDC_COM_TFT_COLOR 0x00000001UL |
56 | #define SDC_COM_FG_EN 0x00000010UL | 56 | #define SDC_COM_FG_EN 0x00000010UL |
57 | #define SDC_COM_GWSEL 0x00000020UL | 57 | #define SDC_COM_GWSEL 0x00000020UL |
58 | #define SDC_COM_GLB_A 0x00000040UL | 58 | #define SDC_COM_GLB_A 0x00000040UL |
59 | #define SDC_COM_KEY_COLOR_G 0x00000080UL | 59 | #define SDC_COM_KEY_COLOR_G 0x00000080UL |
60 | #define SDC_COM_BG_EN 0x00000200UL | 60 | #define SDC_COM_BG_EN 0x00000200UL |
61 | #define SDC_COM_SHARP 0x00001000UL | 61 | #define SDC_COM_SHARP 0x00001000UL |
62 | 62 | ||
63 | #define SDC_V_SYNC_WIDTH_L 0x00000001UL | 63 | #define SDC_V_SYNC_WIDTH_L 0x00000001UL |
64 | 64 | ||
65 | /* Display Interface registers */ | 65 | /* Display Interface registers */ |
66 | #define DI_DISP_IF_CONF (0x0124 - MX3FB_REG_OFFSET) | 66 | #define DI_DISP_IF_CONF (0x0124 - MX3FB_REG_OFFSET) |
67 | #define DI_DISP_SIG_POL (0x0128 - MX3FB_REG_OFFSET) | 67 | #define DI_DISP_SIG_POL (0x0128 - MX3FB_REG_OFFSET) |
68 | #define DI_SER_DISP1_CONF (0x012C - MX3FB_REG_OFFSET) | 68 | #define DI_SER_DISP1_CONF (0x012C - MX3FB_REG_OFFSET) |
69 | #define DI_SER_DISP2_CONF (0x0130 - MX3FB_REG_OFFSET) | 69 | #define DI_SER_DISP2_CONF (0x0130 - MX3FB_REG_OFFSET) |
70 | #define DI_HSP_CLK_PER (0x0134 - MX3FB_REG_OFFSET) | 70 | #define DI_HSP_CLK_PER (0x0134 - MX3FB_REG_OFFSET) |
71 | #define DI_DISP0_TIME_CONF_1 (0x0138 - MX3FB_REG_OFFSET) | 71 | #define DI_DISP0_TIME_CONF_1 (0x0138 - MX3FB_REG_OFFSET) |
72 | #define DI_DISP0_TIME_CONF_2 (0x013C - MX3FB_REG_OFFSET) | 72 | #define DI_DISP0_TIME_CONF_2 (0x013C - MX3FB_REG_OFFSET) |
73 | #define DI_DISP0_TIME_CONF_3 (0x0140 - MX3FB_REG_OFFSET) | 73 | #define DI_DISP0_TIME_CONF_3 (0x0140 - MX3FB_REG_OFFSET) |
74 | #define DI_DISP1_TIME_CONF_1 (0x0144 - MX3FB_REG_OFFSET) | 74 | #define DI_DISP1_TIME_CONF_1 (0x0144 - MX3FB_REG_OFFSET) |
75 | #define DI_DISP1_TIME_CONF_2 (0x0148 - MX3FB_REG_OFFSET) | 75 | #define DI_DISP1_TIME_CONF_2 (0x0148 - MX3FB_REG_OFFSET) |
76 | #define DI_DISP1_TIME_CONF_3 (0x014C - MX3FB_REG_OFFSET) | 76 | #define DI_DISP1_TIME_CONF_3 (0x014C - MX3FB_REG_OFFSET) |
77 | #define DI_DISP2_TIME_CONF_1 (0x0150 - MX3FB_REG_OFFSET) | 77 | #define DI_DISP2_TIME_CONF_1 (0x0150 - MX3FB_REG_OFFSET) |
78 | #define DI_DISP2_TIME_CONF_2 (0x0154 - MX3FB_REG_OFFSET) | 78 | #define DI_DISP2_TIME_CONF_2 (0x0154 - MX3FB_REG_OFFSET) |
79 | #define DI_DISP2_TIME_CONF_3 (0x0158 - MX3FB_REG_OFFSET) | 79 | #define DI_DISP2_TIME_CONF_3 (0x0158 - MX3FB_REG_OFFSET) |
80 | #define DI_DISP3_TIME_CONF (0x015C - MX3FB_REG_OFFSET) | 80 | #define DI_DISP3_TIME_CONF (0x015C - MX3FB_REG_OFFSET) |
81 | #define DI_DISP0_DB0_MAP (0x0160 - MX3FB_REG_OFFSET) | 81 | #define DI_DISP0_DB0_MAP (0x0160 - MX3FB_REG_OFFSET) |
82 | #define DI_DISP0_DB1_MAP (0x0164 - MX3FB_REG_OFFSET) | 82 | #define DI_DISP0_DB1_MAP (0x0164 - MX3FB_REG_OFFSET) |
83 | #define DI_DISP0_DB2_MAP (0x0168 - MX3FB_REG_OFFSET) | 83 | #define DI_DISP0_DB2_MAP (0x0168 - MX3FB_REG_OFFSET) |
84 | #define DI_DISP0_CB0_MAP (0x016C - MX3FB_REG_OFFSET) | 84 | #define DI_DISP0_CB0_MAP (0x016C - MX3FB_REG_OFFSET) |
85 | #define DI_DISP0_CB1_MAP (0x0170 - MX3FB_REG_OFFSET) | 85 | #define DI_DISP0_CB1_MAP (0x0170 - MX3FB_REG_OFFSET) |
86 | #define DI_DISP0_CB2_MAP (0x0174 - MX3FB_REG_OFFSET) | 86 | #define DI_DISP0_CB2_MAP (0x0174 - MX3FB_REG_OFFSET) |
87 | #define DI_DISP1_DB0_MAP (0x0178 - MX3FB_REG_OFFSET) | 87 | #define DI_DISP1_DB0_MAP (0x0178 - MX3FB_REG_OFFSET) |
88 | #define DI_DISP1_DB1_MAP (0x017C - MX3FB_REG_OFFSET) | 88 | #define DI_DISP1_DB1_MAP (0x017C - MX3FB_REG_OFFSET) |
89 | #define DI_DISP1_DB2_MAP (0x0180 - MX3FB_REG_OFFSET) | 89 | #define DI_DISP1_DB2_MAP (0x0180 - MX3FB_REG_OFFSET) |
90 | #define DI_DISP1_CB0_MAP (0x0184 - MX3FB_REG_OFFSET) | 90 | #define DI_DISP1_CB0_MAP (0x0184 - MX3FB_REG_OFFSET) |
91 | #define DI_DISP1_CB1_MAP (0x0188 - MX3FB_REG_OFFSET) | 91 | #define DI_DISP1_CB1_MAP (0x0188 - MX3FB_REG_OFFSET) |
92 | #define DI_DISP1_CB2_MAP (0x018C - MX3FB_REG_OFFSET) | 92 | #define DI_DISP1_CB2_MAP (0x018C - MX3FB_REG_OFFSET) |
93 | #define DI_DISP2_DB0_MAP (0x0190 - MX3FB_REG_OFFSET) | 93 | #define DI_DISP2_DB0_MAP (0x0190 - MX3FB_REG_OFFSET) |
94 | #define DI_DISP2_DB1_MAP (0x0194 - MX3FB_REG_OFFSET) | 94 | #define DI_DISP2_DB1_MAP (0x0194 - MX3FB_REG_OFFSET) |
95 | #define DI_DISP2_DB2_MAP (0x0198 - MX3FB_REG_OFFSET) | 95 | #define DI_DISP2_DB2_MAP (0x0198 - MX3FB_REG_OFFSET) |
96 | #define DI_DISP2_CB0_MAP (0x019C - MX3FB_REG_OFFSET) | 96 | #define DI_DISP2_CB0_MAP (0x019C - MX3FB_REG_OFFSET) |
97 | #define DI_DISP2_CB1_MAP (0x01A0 - MX3FB_REG_OFFSET) | 97 | #define DI_DISP2_CB1_MAP (0x01A0 - MX3FB_REG_OFFSET) |
98 | #define DI_DISP2_CB2_MAP (0x01A4 - MX3FB_REG_OFFSET) | 98 | #define DI_DISP2_CB2_MAP (0x01A4 - MX3FB_REG_OFFSET) |
99 | #define DI_DISP3_B0_MAP (0x01A8 - MX3FB_REG_OFFSET) | 99 | #define DI_DISP3_B0_MAP (0x01A8 - MX3FB_REG_OFFSET) |
100 | #define DI_DISP3_B1_MAP (0x01AC - MX3FB_REG_OFFSET) | 100 | #define DI_DISP3_B1_MAP (0x01AC - MX3FB_REG_OFFSET) |
101 | #define DI_DISP3_B2_MAP (0x01B0 - MX3FB_REG_OFFSET) | 101 | #define DI_DISP3_B2_MAP (0x01B0 - MX3FB_REG_OFFSET) |
102 | #define DI_DISP_ACC_CC (0x01B4 - MX3FB_REG_OFFSET) | 102 | #define DI_DISP_ACC_CC (0x01B4 - MX3FB_REG_OFFSET) |
103 | #define DI_DISP_LLA_CONF (0x01B8 - MX3FB_REG_OFFSET) | 103 | #define DI_DISP_LLA_CONF (0x01B8 - MX3FB_REG_OFFSET) |
104 | #define DI_DISP_LLA_DATA (0x01BC - MX3FB_REG_OFFSET) | 104 | #define DI_DISP_LLA_DATA (0x01BC - MX3FB_REG_OFFSET) |
105 | 105 | ||
106 | /* DI_DISP_SIG_POL bits */ | 106 | /* DI_DISP_SIG_POL bits */ |
107 | #define DI_D3_VSYNC_POL_SHIFT 28 | 107 | #define DI_D3_VSYNC_POL_SHIFT 28 |
108 | #define DI_D3_HSYNC_POL_SHIFT 27 | 108 | #define DI_D3_HSYNC_POL_SHIFT 27 |
109 | #define DI_D3_DRDY_SHARP_POL_SHIFT 26 | 109 | #define DI_D3_DRDY_SHARP_POL_SHIFT 26 |
110 | #define DI_D3_CLK_POL_SHIFT 25 | 110 | #define DI_D3_CLK_POL_SHIFT 25 |
111 | #define DI_D3_DATA_POL_SHIFT 24 | 111 | #define DI_D3_DATA_POL_SHIFT 24 |
112 | 112 | ||
113 | /* DI_DISP_IF_CONF bits */ | 113 | /* DI_DISP_IF_CONF bits */ |
114 | #define DI_D3_CLK_IDLE_SHIFT 26 | 114 | #define DI_D3_CLK_IDLE_SHIFT 26 |
115 | #define DI_D3_CLK_SEL_SHIFT 25 | 115 | #define DI_D3_CLK_SEL_SHIFT 25 |
116 | #define DI_D3_DATAMSK_SHIFT 24 | 116 | #define DI_D3_DATAMSK_SHIFT 24 |
117 | 117 | ||
118 | enum ipu_panel { | 118 | enum ipu_panel { |
119 | IPU_PANEL_SHARP_TFT, | 119 | IPU_PANEL_SHARP_TFT, |
120 | IPU_PANEL_TFT, | 120 | IPU_PANEL_TFT, |
121 | }; | 121 | }; |
122 | 122 | ||
123 | struct ipu_di_signal_cfg { | 123 | struct ipu_di_signal_cfg { |
124 | unsigned datamask_en:1; | 124 | unsigned datamask_en:1; |
125 | unsigned clksel_en:1; | 125 | unsigned clksel_en:1; |
126 | unsigned clkidle_en:1; | 126 | unsigned clkidle_en:1; |
127 | unsigned data_pol:1; /* true = inverted */ | 127 | unsigned data_pol:1; /* true = inverted */ |
128 | unsigned clk_pol:1; /* true = rising edge */ | 128 | unsigned clk_pol:1; /* true = rising edge */ |
129 | unsigned enable_pol:1; | 129 | unsigned enable_pol:1; |
130 | unsigned Hsync_pol:1; /* true = active high */ | 130 | unsigned Hsync_pol:1; /* true = active high */ |
131 | unsigned Vsync_pol:1; | 131 | unsigned Vsync_pol:1; |
132 | }; | 132 | }; |
133 | 133 | ||
134 | static const struct fb_videomode mx3fb_modedb[] = { | 134 | static const struct fb_videomode mx3fb_modedb[] = { |
135 | { | 135 | { |
136 | /* 240x320 @ 60 Hz */ | 136 | /* 240x320 @ 60 Hz */ |
137 | .name = "Sharp-QVGA", | 137 | .name = "Sharp-QVGA", |
138 | .refresh = 60, | 138 | .refresh = 60, |
139 | .xres = 240, | 139 | .xres = 240, |
140 | .yres = 320, | 140 | .yres = 320, |
141 | .pixclock = 185925, | 141 | .pixclock = 185925, |
142 | .left_margin = 9, | 142 | .left_margin = 9, |
143 | .right_margin = 16, | 143 | .right_margin = 16, |
144 | .upper_margin = 7, | 144 | .upper_margin = 7, |
145 | .lower_margin = 9, | 145 | .lower_margin = 9, |
146 | .hsync_len = 1, | 146 | .hsync_len = 1, |
147 | .vsync_len = 1, | 147 | .vsync_len = 1, |
148 | .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_SHARP_MODE | | 148 | .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_SHARP_MODE | |
149 | FB_SYNC_CLK_INVERT | FB_SYNC_DATA_INVERT | | 149 | FB_SYNC_CLK_INVERT | FB_SYNC_DATA_INVERT | |
150 | FB_SYNC_CLK_IDLE_EN, | 150 | FB_SYNC_CLK_IDLE_EN, |
151 | .vmode = FB_VMODE_NONINTERLACED, | 151 | .vmode = FB_VMODE_NONINTERLACED, |
152 | .flag = 0, | 152 | .flag = 0, |
153 | }, { | 153 | }, { |
154 | /* 240x33 @ 60 Hz */ | 154 | /* 240x33 @ 60 Hz */ |
155 | .name = "Sharp-CLI", | 155 | .name = "Sharp-CLI", |
156 | .refresh = 60, | 156 | .refresh = 60, |
157 | .xres = 240, | 157 | .xres = 240, |
158 | .yres = 33, | 158 | .yres = 33, |
159 | .pixclock = 185925, | 159 | .pixclock = 185925, |
160 | .left_margin = 9, | 160 | .left_margin = 9, |
161 | .right_margin = 16, | 161 | .right_margin = 16, |
162 | .upper_margin = 7, | 162 | .upper_margin = 7, |
163 | .lower_margin = 9 + 287, | 163 | .lower_margin = 9 + 287, |
164 | .hsync_len = 1, | 164 | .hsync_len = 1, |
165 | .vsync_len = 1, | 165 | .vsync_len = 1, |
166 | .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_SHARP_MODE | | 166 | .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_SHARP_MODE | |
167 | FB_SYNC_CLK_INVERT | FB_SYNC_DATA_INVERT | | 167 | FB_SYNC_CLK_INVERT | FB_SYNC_DATA_INVERT | |
168 | FB_SYNC_CLK_IDLE_EN, | 168 | FB_SYNC_CLK_IDLE_EN, |
169 | .vmode = FB_VMODE_NONINTERLACED, | 169 | .vmode = FB_VMODE_NONINTERLACED, |
170 | .flag = 0, | 170 | .flag = 0, |
171 | }, { | 171 | }, { |
172 | /* 640x480 @ 60 Hz */ | 172 | /* 640x480 @ 60 Hz */ |
173 | .name = "NEC-VGA", | 173 | .name = "NEC-VGA", |
174 | .refresh = 60, | 174 | .refresh = 60, |
175 | .xres = 640, | 175 | .xres = 640, |
176 | .yres = 480, | 176 | .yres = 480, |
177 | .pixclock = 38255, | 177 | .pixclock = 38255, |
178 | .left_margin = 144, | 178 | .left_margin = 144, |
179 | .right_margin = 0, | 179 | .right_margin = 0, |
180 | .upper_margin = 34, | 180 | .upper_margin = 34, |
181 | .lower_margin = 40, | 181 | .lower_margin = 40, |
182 | .hsync_len = 1, | 182 | .hsync_len = 1, |
183 | .vsync_len = 1, | 183 | .vsync_len = 1, |
184 | .sync = FB_SYNC_VERT_HIGH_ACT | FB_SYNC_OE_ACT_HIGH, | 184 | .sync = FB_SYNC_VERT_HIGH_ACT | FB_SYNC_OE_ACT_HIGH, |
185 | .vmode = FB_VMODE_NONINTERLACED, | 185 | .vmode = FB_VMODE_NONINTERLACED, |
186 | .flag = 0, | 186 | .flag = 0, |
187 | }, { | 187 | }, { |
188 | /* NTSC TV output */ | 188 | /* NTSC TV output */ |
189 | .name = "TV-NTSC", | 189 | .name = "TV-NTSC", |
190 | .refresh = 60, | 190 | .refresh = 60, |
191 | .xres = 640, | 191 | .xres = 640, |
192 | .yres = 480, | 192 | .yres = 480, |
193 | .pixclock = 37538, | 193 | .pixclock = 37538, |
194 | .left_margin = 38, | 194 | .left_margin = 38, |
195 | .right_margin = 858 - 640 - 38 - 3, | 195 | .right_margin = 858 - 640 - 38 - 3, |
196 | .upper_margin = 36, | 196 | .upper_margin = 36, |
197 | .lower_margin = 518 - 480 - 36 - 1, | 197 | .lower_margin = 518 - 480 - 36 - 1, |
198 | .hsync_len = 3, | 198 | .hsync_len = 3, |
199 | .vsync_len = 1, | 199 | .vsync_len = 1, |
200 | .sync = 0, | 200 | .sync = 0, |
201 | .vmode = FB_VMODE_NONINTERLACED, | 201 | .vmode = FB_VMODE_NONINTERLACED, |
202 | .flag = 0, | 202 | .flag = 0, |
203 | }, { | 203 | }, { |
204 | /* PAL TV output */ | 204 | /* PAL TV output */ |
205 | .name = "TV-PAL", | 205 | .name = "TV-PAL", |
206 | .refresh = 50, | 206 | .refresh = 50, |
207 | .xres = 640, | 207 | .xres = 640, |
208 | .yres = 480, | 208 | .yres = 480, |
209 | .pixclock = 37538, | 209 | .pixclock = 37538, |
210 | .left_margin = 38, | 210 | .left_margin = 38, |
211 | .right_margin = 960 - 640 - 38 - 32, | 211 | .right_margin = 960 - 640 - 38 - 32, |
212 | .upper_margin = 32, | 212 | .upper_margin = 32, |
213 | .lower_margin = 555 - 480 - 32 - 3, | 213 | .lower_margin = 555 - 480 - 32 - 3, |
214 | .hsync_len = 32, | 214 | .hsync_len = 32, |
215 | .vsync_len = 3, | 215 | .vsync_len = 3, |
216 | .sync = 0, | 216 | .sync = 0, |
217 | .vmode = FB_VMODE_NONINTERLACED, | 217 | .vmode = FB_VMODE_NONINTERLACED, |
218 | .flag = 0, | 218 | .flag = 0, |
219 | }, { | 219 | }, { |
220 | /* TV output VGA mode, 640x480 @ 65 Hz */ | 220 | /* TV output VGA mode, 640x480 @ 65 Hz */ |
221 | .name = "TV-VGA", | 221 | .name = "TV-VGA", |
222 | .refresh = 60, | 222 | .refresh = 60, |
223 | .xres = 640, | 223 | .xres = 640, |
224 | .yres = 480, | 224 | .yres = 480, |
225 | .pixclock = 40574, | 225 | .pixclock = 40574, |
226 | .left_margin = 35, | 226 | .left_margin = 35, |
227 | .right_margin = 45, | 227 | .right_margin = 45, |
228 | .upper_margin = 9, | 228 | .upper_margin = 9, |
229 | .lower_margin = 1, | 229 | .lower_margin = 1, |
230 | .hsync_len = 46, | 230 | .hsync_len = 46, |
231 | .vsync_len = 5, | 231 | .vsync_len = 5, |
232 | .sync = 0, | 232 | .sync = 0, |
233 | .vmode = FB_VMODE_NONINTERLACED, | 233 | .vmode = FB_VMODE_NONINTERLACED, |
234 | .flag = 0, | 234 | .flag = 0, |
235 | }, | 235 | }, |
236 | }; | 236 | }; |
237 | 237 | ||
238 | struct mx3fb_data { | 238 | struct mx3fb_data { |
239 | struct fb_info *fbi; | 239 | struct fb_info *fbi; |
240 | int backlight_level; | 240 | int backlight_level; |
241 | void __iomem *reg_base; | 241 | void __iomem *reg_base; |
242 | spinlock_t lock; | 242 | spinlock_t lock; |
243 | struct device *dev; | 243 | struct device *dev; |
244 | 244 | ||
245 | uint32_t h_start_width; | 245 | uint32_t h_start_width; |
246 | uint32_t v_start_width; | 246 | uint32_t v_start_width; |
247 | }; | 247 | }; |
248 | 248 | ||
249 | struct dma_chan_request { | 249 | struct dma_chan_request { |
250 | struct mx3fb_data *mx3fb; | 250 | struct mx3fb_data *mx3fb; |
251 | enum ipu_channel id; | 251 | enum ipu_channel id; |
252 | }; | 252 | }; |
253 | 253 | ||
254 | /* MX3 specific framebuffer information. */ | 254 | /* MX3 specific framebuffer information. */ |
255 | struct mx3fb_info { | 255 | struct mx3fb_info { |
256 | int blank; | 256 | int blank; |
257 | enum ipu_channel ipu_ch; | 257 | enum ipu_channel ipu_ch; |
258 | uint32_t cur_ipu_buf; | 258 | uint32_t cur_ipu_buf; |
259 | 259 | ||
260 | u32 pseudo_palette[16]; | 260 | u32 pseudo_palette[16]; |
261 | 261 | ||
262 | struct completion flip_cmpl; | 262 | struct completion flip_cmpl; |
263 | struct mutex mutex; /* Protects fb-ops */ | 263 | struct mutex mutex; /* Protects fb-ops */ |
264 | struct mx3fb_data *mx3fb; | 264 | struct mx3fb_data *mx3fb; |
265 | struct idmac_channel *idmac_channel; | 265 | struct idmac_channel *idmac_channel; |
266 | struct dma_async_tx_descriptor *txd; | 266 | struct dma_async_tx_descriptor *txd; |
267 | dma_cookie_t cookie; | 267 | dma_cookie_t cookie; |
268 | struct scatterlist sg[2]; | 268 | struct scatterlist sg[2]; |
269 | 269 | ||
270 | u32 sync; /* preserve var->sync flags */ | 270 | u32 sync; /* preserve var->sync flags */ |
271 | }; | 271 | }; |
272 | 272 | ||
273 | static void mx3fb_dma_done(void *); | 273 | static void mx3fb_dma_done(void *); |
274 | 274 | ||
275 | /* Used fb-mode and bpp. Can be set on kernel command line, therefore file-static. */ | 275 | /* Used fb-mode and bpp. Can be set on kernel command line, therefore file-static. */ |
276 | static const char *fb_mode; | 276 | static const char *fb_mode; |
277 | static unsigned long default_bpp = 16; | 277 | static unsigned long default_bpp = 16; |
278 | 278 | ||
279 | static u32 mx3fb_read_reg(struct mx3fb_data *mx3fb, unsigned long reg) | 279 | static u32 mx3fb_read_reg(struct mx3fb_data *mx3fb, unsigned long reg) |
280 | { | 280 | { |
281 | return __raw_readl(mx3fb->reg_base + reg); | 281 | return __raw_readl(mx3fb->reg_base + reg); |
282 | } | 282 | } |
283 | 283 | ||
284 | static void mx3fb_write_reg(struct mx3fb_data *mx3fb, u32 value, unsigned long reg) | 284 | static void mx3fb_write_reg(struct mx3fb_data *mx3fb, u32 value, unsigned long reg) |
285 | { | 285 | { |
286 | __raw_writel(value, mx3fb->reg_base + reg); | 286 | __raw_writel(value, mx3fb->reg_base + reg); |
287 | } | 287 | } |
288 | 288 | ||
289 | static const uint32_t di_mappings[] = { | 289 | static const uint32_t di_mappings[] = { |
290 | 0x1600AAAA, 0x00E05555, 0x00070000, 3, /* RGB888 */ | 290 | 0x1600AAAA, 0x00E05555, 0x00070000, 3, /* RGB888 */ |
291 | 0x0005000F, 0x000B000F, 0x0011000F, 1, /* RGB666 */ | 291 | 0x0005000F, 0x000B000F, 0x0011000F, 1, /* RGB666 */ |
292 | 0x0011000F, 0x000B000F, 0x0005000F, 1, /* BGR666 */ | 292 | 0x0011000F, 0x000B000F, 0x0005000F, 1, /* BGR666 */ |
293 | 0x0004003F, 0x000A000F, 0x000F003F, 1 /* RGB565 */ | 293 | 0x0004003F, 0x000A000F, 0x000F003F, 1 /* RGB565 */ |
294 | }; | 294 | }; |
295 | 295 | ||
296 | static void sdc_fb_init(struct mx3fb_info *fbi) | 296 | static void sdc_fb_init(struct mx3fb_info *fbi) |
297 | { | 297 | { |
298 | struct mx3fb_data *mx3fb = fbi->mx3fb; | 298 | struct mx3fb_data *mx3fb = fbi->mx3fb; |
299 | uint32_t reg; | 299 | uint32_t reg; |
300 | 300 | ||
301 | reg = mx3fb_read_reg(mx3fb, SDC_COM_CONF); | 301 | reg = mx3fb_read_reg(mx3fb, SDC_COM_CONF); |
302 | 302 | ||
303 | mx3fb_write_reg(mx3fb, reg | SDC_COM_BG_EN, SDC_COM_CONF); | 303 | mx3fb_write_reg(mx3fb, reg | SDC_COM_BG_EN, SDC_COM_CONF); |
304 | } | 304 | } |
305 | 305 | ||
306 | /* Returns enabled flag before uninit */ | 306 | /* Returns enabled flag before uninit */ |
307 | static uint32_t sdc_fb_uninit(struct mx3fb_info *fbi) | 307 | static uint32_t sdc_fb_uninit(struct mx3fb_info *fbi) |
308 | { | 308 | { |
309 | struct mx3fb_data *mx3fb = fbi->mx3fb; | 309 | struct mx3fb_data *mx3fb = fbi->mx3fb; |
310 | uint32_t reg; | 310 | uint32_t reg; |
311 | 311 | ||
312 | reg = mx3fb_read_reg(mx3fb, SDC_COM_CONF); | 312 | reg = mx3fb_read_reg(mx3fb, SDC_COM_CONF); |
313 | 313 | ||
314 | mx3fb_write_reg(mx3fb, reg & ~SDC_COM_BG_EN, SDC_COM_CONF); | 314 | mx3fb_write_reg(mx3fb, reg & ~SDC_COM_BG_EN, SDC_COM_CONF); |
315 | 315 | ||
316 | return reg & SDC_COM_BG_EN; | 316 | return reg & SDC_COM_BG_EN; |
317 | } | 317 | } |
318 | 318 | ||
319 | static void sdc_enable_channel(struct mx3fb_info *mx3_fbi) | 319 | static void sdc_enable_channel(struct mx3fb_info *mx3_fbi) |
320 | { | 320 | { |
321 | struct mx3fb_data *mx3fb = mx3_fbi->mx3fb; | 321 | struct mx3fb_data *mx3fb = mx3_fbi->mx3fb; |
322 | struct idmac_channel *ichan = mx3_fbi->idmac_channel; | 322 | struct idmac_channel *ichan = mx3_fbi->idmac_channel; |
323 | struct dma_chan *dma_chan = &ichan->dma_chan; | 323 | struct dma_chan *dma_chan = &ichan->dma_chan; |
324 | unsigned long flags; | 324 | unsigned long flags; |
325 | dma_cookie_t cookie; | 325 | dma_cookie_t cookie; |
326 | 326 | ||
327 | if (mx3_fbi->txd) | 327 | if (mx3_fbi->txd) |
328 | dev_dbg(mx3fb->dev, "mx3fbi %p, desc %p, sg %p\n", mx3_fbi, | 328 | dev_dbg(mx3fb->dev, "mx3fbi %p, desc %p, sg %p\n", mx3_fbi, |
329 | to_tx_desc(mx3_fbi->txd), to_tx_desc(mx3_fbi->txd)->sg); | 329 | to_tx_desc(mx3_fbi->txd), to_tx_desc(mx3_fbi->txd)->sg); |
330 | else | 330 | else |
331 | dev_dbg(mx3fb->dev, "mx3fbi %p, txd = NULL\n", mx3_fbi); | 331 | dev_dbg(mx3fb->dev, "mx3fbi %p, txd = NULL\n", mx3_fbi); |
332 | 332 | ||
333 | /* This enables the channel */ | 333 | /* This enables the channel */ |
334 | if (mx3_fbi->cookie < 0) { | 334 | if (mx3_fbi->cookie < 0) { |
335 | mx3_fbi->txd = dma_chan->device->device_prep_slave_sg(dma_chan, | 335 | mx3_fbi->txd = dma_chan->device->device_prep_slave_sg(dma_chan, |
336 | &mx3_fbi->sg[0], 1, DMA_TO_DEVICE, DMA_PREP_INTERRUPT); | 336 | &mx3_fbi->sg[0], 1, DMA_TO_DEVICE, DMA_PREP_INTERRUPT); |
337 | if (!mx3_fbi->txd) { | 337 | if (!mx3_fbi->txd) { |
338 | dev_err(mx3fb->dev, "Cannot allocate descriptor on %d\n", | 338 | dev_err(mx3fb->dev, "Cannot allocate descriptor on %d\n", |
339 | dma_chan->chan_id); | 339 | dma_chan->chan_id); |
340 | return; | 340 | return; |
341 | } | 341 | } |
342 | 342 | ||
343 | mx3_fbi->txd->callback_param = mx3_fbi->txd; | 343 | mx3_fbi->txd->callback_param = mx3_fbi->txd; |
344 | mx3_fbi->txd->callback = mx3fb_dma_done; | 344 | mx3_fbi->txd->callback = mx3fb_dma_done; |
345 | 345 | ||
346 | cookie = mx3_fbi->txd->tx_submit(mx3_fbi->txd); | 346 | cookie = mx3_fbi->txd->tx_submit(mx3_fbi->txd); |
347 | dev_dbg(mx3fb->dev, "%d: Submit %p #%d [%c]\n", __LINE__, | 347 | dev_dbg(mx3fb->dev, "%d: Submit %p #%d [%c]\n", __LINE__, |
348 | mx3_fbi->txd, cookie, list_empty(&ichan->queue) ? '-' : '+'); | 348 | mx3_fbi->txd, cookie, list_empty(&ichan->queue) ? '-' : '+'); |
349 | } else { | 349 | } else { |
350 | if (!mx3_fbi->txd || !mx3_fbi->txd->tx_submit) { | 350 | if (!mx3_fbi->txd || !mx3_fbi->txd->tx_submit) { |
351 | dev_err(mx3fb->dev, "Cannot enable channel %d\n", | 351 | dev_err(mx3fb->dev, "Cannot enable channel %d\n", |
352 | dma_chan->chan_id); | 352 | dma_chan->chan_id); |
353 | return; | 353 | return; |
354 | } | 354 | } |
355 | 355 | ||
356 | /* Just re-activate the same buffer */ | 356 | /* Just re-activate the same buffer */ |
357 | dma_async_issue_pending(dma_chan); | 357 | dma_async_issue_pending(dma_chan); |
358 | cookie = mx3_fbi->cookie; | 358 | cookie = mx3_fbi->cookie; |
359 | dev_dbg(mx3fb->dev, "%d: Re-submit %p #%d [%c]\n", __LINE__, | 359 | dev_dbg(mx3fb->dev, "%d: Re-submit %p #%d [%c]\n", __LINE__, |
360 | mx3_fbi->txd, cookie, list_empty(&ichan->queue) ? '-' : '+'); | 360 | mx3_fbi->txd, cookie, list_empty(&ichan->queue) ? '-' : '+'); |
361 | } | 361 | } |
362 | 362 | ||
363 | if (cookie >= 0) { | 363 | if (cookie >= 0) { |
364 | spin_lock_irqsave(&mx3fb->lock, flags); | 364 | spin_lock_irqsave(&mx3fb->lock, flags); |
365 | sdc_fb_init(mx3_fbi); | 365 | sdc_fb_init(mx3_fbi); |
366 | mx3_fbi->cookie = cookie; | 366 | mx3_fbi->cookie = cookie; |
367 | spin_unlock_irqrestore(&mx3fb->lock, flags); | 367 | spin_unlock_irqrestore(&mx3fb->lock, flags); |
368 | } | 368 | } |
369 | 369 | ||
370 | /* | 370 | /* |
371 | * Attention! Without this msleep the channel keeps generating | 371 | * Attention! Without this msleep the channel keeps generating |
372 | * interrupts. Next sdc_set_brightness() is going to be called | 372 | * interrupts. Next sdc_set_brightness() is going to be called |
373 | * from mx3fb_blank(). | 373 | * from mx3fb_blank(). |
374 | */ | 374 | */ |
375 | msleep(2); | 375 | msleep(2); |
376 | } | 376 | } |
377 | 377 | ||
378 | static void sdc_disable_channel(struct mx3fb_info *mx3_fbi) | 378 | static void sdc_disable_channel(struct mx3fb_info *mx3_fbi) |
379 | { | 379 | { |
380 | struct mx3fb_data *mx3fb = mx3_fbi->mx3fb; | 380 | struct mx3fb_data *mx3fb = mx3_fbi->mx3fb; |
381 | uint32_t enabled; | 381 | uint32_t enabled; |
382 | unsigned long flags; | 382 | unsigned long flags; |
383 | 383 | ||
384 | spin_lock_irqsave(&mx3fb->lock, flags); | 384 | spin_lock_irqsave(&mx3fb->lock, flags); |
385 | 385 | ||
386 | enabled = sdc_fb_uninit(mx3_fbi); | 386 | enabled = sdc_fb_uninit(mx3_fbi); |
387 | 387 | ||
388 | spin_unlock_irqrestore(&mx3fb->lock, flags); | 388 | spin_unlock_irqrestore(&mx3fb->lock, flags); |
389 | 389 | ||
390 | mx3_fbi->txd->chan->device->device_control(mx3_fbi->txd->chan, | 390 | mx3_fbi->txd->chan->device->device_control(mx3_fbi->txd->chan, |
391 | DMA_TERMINATE_ALL); | 391 | DMA_TERMINATE_ALL, 0); |
392 | mx3_fbi->txd = NULL; | 392 | mx3_fbi->txd = NULL; |
393 | mx3_fbi->cookie = -EINVAL; | 393 | mx3_fbi->cookie = -EINVAL; |
394 | } | 394 | } |
395 | 395 | ||
396 | /** | 396 | /** |
397 | * sdc_set_window_pos() - set window position of the respective plane. | 397 | * sdc_set_window_pos() - set window position of the respective plane. |
398 | * @mx3fb: mx3fb context. | 398 | * @mx3fb: mx3fb context. |
399 | * @channel: IPU DMAC channel ID. | 399 | * @channel: IPU DMAC channel ID. |
400 | * @x_pos: X coordinate relative to the top left corner to place window at. | 400 | * @x_pos: X coordinate relative to the top left corner to place window at. |
401 | * @y_pos: Y coordinate relative to the top left corner to place window at. | 401 | * @y_pos: Y coordinate relative to the top left corner to place window at. |
402 | * @return: 0 on success or negative error code on failure. | 402 | * @return: 0 on success or negative error code on failure. |
403 | */ | 403 | */ |
404 | static int sdc_set_window_pos(struct mx3fb_data *mx3fb, enum ipu_channel channel, | 404 | static int sdc_set_window_pos(struct mx3fb_data *mx3fb, enum ipu_channel channel, |
405 | int16_t x_pos, int16_t y_pos) | 405 | int16_t x_pos, int16_t y_pos) |
406 | { | 406 | { |
407 | if (channel != IDMAC_SDC_0) | 407 | if (channel != IDMAC_SDC_0) |
408 | return -EINVAL; | 408 | return -EINVAL; |
409 | 409 | ||
410 | x_pos += mx3fb->h_start_width; | 410 | x_pos += mx3fb->h_start_width; |
411 | y_pos += mx3fb->v_start_width; | 411 | y_pos += mx3fb->v_start_width; |
412 | 412 | ||
413 | mx3fb_write_reg(mx3fb, (x_pos << 16) | y_pos, SDC_BG_POS); | 413 | mx3fb_write_reg(mx3fb, (x_pos << 16) | y_pos, SDC_BG_POS); |
414 | return 0; | 414 | return 0; |
415 | } | 415 | } |
416 | 416 | ||
417 | /** | 417 | /** |
418 | * sdc_init_panel() - initialize a synchronous LCD panel. | 418 | * sdc_init_panel() - initialize a synchronous LCD panel. |
419 | * @mx3fb: mx3fb context. | 419 | * @mx3fb: mx3fb context. |
420 | * @panel: panel type. | 420 | * @panel: panel type. |
421 | * @pixel_clk: desired pixel clock frequency in Hz. | 421 | * @pixel_clk: desired pixel clock frequency in Hz. |
422 | * @width: width of panel in pixels. | 422 | * @width: width of panel in pixels. |
423 | * @height: height of panel in pixels. | 423 | * @height: height of panel in pixels. |
424 | * @pixel_fmt: pixel format of buffer as FOURCC ASCII code. | 424 | * @pixel_fmt: pixel format of buffer as FOURCC ASCII code. |
425 | * @h_start_width: number of pixel clocks between the HSYNC signal pulse | 425 | * @h_start_width: number of pixel clocks between the HSYNC signal pulse |
426 | * and the start of valid data. | 426 | * and the start of valid data. |
427 | * @h_sync_width: width of the HSYNC signal in units of pixel clocks. | 427 | * @h_sync_width: width of the HSYNC signal in units of pixel clocks. |
428 | * @h_end_width: number of pixel clocks between the end of valid data | 428 | * @h_end_width: number of pixel clocks between the end of valid data |
429 | * and the HSYNC signal for next line. | 429 | * and the HSYNC signal for next line. |
430 | * @v_start_width: number of lines between the VSYNC signal pulse and the | 430 | * @v_start_width: number of lines between the VSYNC signal pulse and the |
431 | * start of valid data. | 431 | * start of valid data. |
432 | * @v_sync_width: width of the VSYNC signal in units of lines | 432 | * @v_sync_width: width of the VSYNC signal in units of lines |
433 | * @v_end_width: number of lines between the end of valid data and the | 433 | * @v_end_width: number of lines between the end of valid data and the |
434 | * VSYNC signal for next frame. | 434 | * VSYNC signal for next frame. |
435 | * @sig: bitfield of signal polarities for LCD interface. | 435 | * @sig: bitfield of signal polarities for LCD interface. |
436 | * @return: 0 on success or negative error code on failure. | 436 | * @return: 0 on success or negative error code on failure. |
437 | */ | 437 | */ |
438 | static int sdc_init_panel(struct mx3fb_data *mx3fb, enum ipu_panel panel, | 438 | static int sdc_init_panel(struct mx3fb_data *mx3fb, enum ipu_panel panel, |
439 | uint32_t pixel_clk, | 439 | uint32_t pixel_clk, |
440 | uint16_t width, uint16_t height, | 440 | uint16_t width, uint16_t height, |
441 | enum pixel_fmt pixel_fmt, | 441 | enum pixel_fmt pixel_fmt, |
442 | uint16_t h_start_width, uint16_t h_sync_width, | 442 | uint16_t h_start_width, uint16_t h_sync_width, |
443 | uint16_t h_end_width, uint16_t v_start_width, | 443 | uint16_t h_end_width, uint16_t v_start_width, |
444 | uint16_t v_sync_width, uint16_t v_end_width, | 444 | uint16_t v_sync_width, uint16_t v_end_width, |
445 | struct ipu_di_signal_cfg sig) | 445 | struct ipu_di_signal_cfg sig) |
446 | { | 446 | { |
447 | unsigned long lock_flags; | 447 | unsigned long lock_flags; |
448 | uint32_t reg; | 448 | uint32_t reg; |
449 | uint32_t old_conf; | 449 | uint32_t old_conf; |
450 | uint32_t div; | 450 | uint32_t div; |
451 | struct clk *ipu_clk; | 451 | struct clk *ipu_clk; |
452 | 452 | ||
453 | dev_dbg(mx3fb->dev, "panel size = %d x %d", width, height); | 453 | dev_dbg(mx3fb->dev, "panel size = %d x %d", width, height); |
454 | 454 | ||
455 | if (v_sync_width == 0 || h_sync_width == 0) | 455 | if (v_sync_width == 0 || h_sync_width == 0) |
456 | return -EINVAL; | 456 | return -EINVAL; |
457 | 457 | ||
458 | /* Init panel size and blanking periods */ | 458 | /* Init panel size and blanking periods */ |
459 | reg = ((uint32_t) (h_sync_width - 1) << 26) | | 459 | reg = ((uint32_t) (h_sync_width - 1) << 26) | |
460 | ((uint32_t) (width + h_start_width + h_end_width - 1) << 16); | 460 | ((uint32_t) (width + h_start_width + h_end_width - 1) << 16); |
461 | mx3fb_write_reg(mx3fb, reg, SDC_HOR_CONF); | 461 | mx3fb_write_reg(mx3fb, reg, SDC_HOR_CONF); |
462 | 462 | ||
463 | #ifdef DEBUG | 463 | #ifdef DEBUG |
464 | printk(KERN_CONT " hor_conf %x,", reg); | 464 | printk(KERN_CONT " hor_conf %x,", reg); |
465 | #endif | 465 | #endif |
466 | 466 | ||
467 | reg = ((uint32_t) (v_sync_width - 1) << 26) | SDC_V_SYNC_WIDTH_L | | 467 | reg = ((uint32_t) (v_sync_width - 1) << 26) | SDC_V_SYNC_WIDTH_L | |
468 | ((uint32_t) (height + v_start_width + v_end_width - 1) << 16); | 468 | ((uint32_t) (height + v_start_width + v_end_width - 1) << 16); |
469 | mx3fb_write_reg(mx3fb, reg, SDC_VER_CONF); | 469 | mx3fb_write_reg(mx3fb, reg, SDC_VER_CONF); |
470 | 470 | ||
471 | #ifdef DEBUG | 471 | #ifdef DEBUG |
472 | printk(KERN_CONT " ver_conf %x\n", reg); | 472 | printk(KERN_CONT " ver_conf %x\n", reg); |
473 | #endif | 473 | #endif |
474 | 474 | ||
475 | mx3fb->h_start_width = h_start_width; | 475 | mx3fb->h_start_width = h_start_width; |
476 | mx3fb->v_start_width = v_start_width; | 476 | mx3fb->v_start_width = v_start_width; |
477 | 477 | ||
478 | switch (panel) { | 478 | switch (panel) { |
479 | case IPU_PANEL_SHARP_TFT: | 479 | case IPU_PANEL_SHARP_TFT: |
480 | mx3fb_write_reg(mx3fb, 0x00FD0102L, SDC_SHARP_CONF_1); | 480 | mx3fb_write_reg(mx3fb, 0x00FD0102L, SDC_SHARP_CONF_1); |
481 | mx3fb_write_reg(mx3fb, 0x00F500F4L, SDC_SHARP_CONF_2); | 481 | mx3fb_write_reg(mx3fb, 0x00F500F4L, SDC_SHARP_CONF_2); |
482 | mx3fb_write_reg(mx3fb, SDC_COM_SHARP | SDC_COM_TFT_COLOR, SDC_COM_CONF); | 482 | mx3fb_write_reg(mx3fb, SDC_COM_SHARP | SDC_COM_TFT_COLOR, SDC_COM_CONF); |
483 | break; | 483 | break; |
484 | case IPU_PANEL_TFT: | 484 | case IPU_PANEL_TFT: |
485 | mx3fb_write_reg(mx3fb, SDC_COM_TFT_COLOR, SDC_COM_CONF); | 485 | mx3fb_write_reg(mx3fb, SDC_COM_TFT_COLOR, SDC_COM_CONF); |
486 | break; | 486 | break; |
487 | default: | 487 | default: |
488 | return -EINVAL; | 488 | return -EINVAL; |
489 | } | 489 | } |
490 | 490 | ||
491 | /* Init clocking */ | 491 | /* Init clocking */ |
492 | 492 | ||
493 | /* | 493 | /* |
494 | * Calculate divider: fractional part is 4 bits so simply multiple by | 494 | * Calculate divider: fractional part is 4 bits so simply multiple by |
495 | * 2^4 to get fractional part, as long as we stay under ~250MHz and on | 495 | * 2^4 to get fractional part, as long as we stay under ~250MHz and on |
496 | * i.MX31 it (HSP_CLK) is <= 178MHz. Currently 128.267MHz | 496 | * i.MX31 it (HSP_CLK) is <= 178MHz. Currently 128.267MHz |
497 | */ | 497 | */ |
498 | ipu_clk = clk_get(mx3fb->dev, NULL); | 498 | ipu_clk = clk_get(mx3fb->dev, NULL); |
499 | if (!IS_ERR(ipu_clk)) { | 499 | if (!IS_ERR(ipu_clk)) { |
500 | div = clk_get_rate(ipu_clk) * 16 / pixel_clk; | 500 | div = clk_get_rate(ipu_clk) * 16 / pixel_clk; |
501 | clk_put(ipu_clk); | 501 | clk_put(ipu_clk); |
502 | } else { | 502 | } else { |
503 | div = 0; | 503 | div = 0; |
504 | } | 504 | } |
505 | 505 | ||
506 | if (div < 0x40) { /* Divider less than 4 */ | 506 | if (div < 0x40) { /* Divider less than 4 */ |
507 | dev_dbg(mx3fb->dev, | 507 | dev_dbg(mx3fb->dev, |
508 | "InitPanel() - Pixel clock divider less than 4\n"); | 508 | "InitPanel() - Pixel clock divider less than 4\n"); |
509 | div = 0x40; | 509 | div = 0x40; |
510 | } | 510 | } |
511 | 511 | ||
512 | dev_dbg(mx3fb->dev, "pixel clk = %u, divider %u.%u\n", | 512 | dev_dbg(mx3fb->dev, "pixel clk = %u, divider %u.%u\n", |
513 | pixel_clk, div >> 4, (div & 7) * 125); | 513 | pixel_clk, div >> 4, (div & 7) * 125); |
514 | 514 | ||
515 | spin_lock_irqsave(&mx3fb->lock, lock_flags); | 515 | spin_lock_irqsave(&mx3fb->lock, lock_flags); |
516 | 516 | ||
517 | /* | 517 | /* |
518 | * DISP3_IF_CLK_DOWN_WR is half the divider value and 2 fraction bits | 518 | * DISP3_IF_CLK_DOWN_WR is half the divider value and 2 fraction bits |
519 | * fewer. Subtract 1 extra from DISP3_IF_CLK_DOWN_WR based on timing | 519 | * fewer. Subtract 1 extra from DISP3_IF_CLK_DOWN_WR based on timing |
520 | * debug. DISP3_IF_CLK_UP_WR is 0 | 520 | * debug. DISP3_IF_CLK_UP_WR is 0 |
521 | */ | 521 | */ |
522 | mx3fb_write_reg(mx3fb, (((div / 8) - 1) << 22) | div, DI_DISP3_TIME_CONF); | 522 | mx3fb_write_reg(mx3fb, (((div / 8) - 1) << 22) | div, DI_DISP3_TIME_CONF); |
523 | 523 | ||
524 | /* DI settings */ | 524 | /* DI settings */ |
525 | old_conf = mx3fb_read_reg(mx3fb, DI_DISP_IF_CONF) & 0x78FFFFFF; | 525 | old_conf = mx3fb_read_reg(mx3fb, DI_DISP_IF_CONF) & 0x78FFFFFF; |
526 | old_conf |= sig.datamask_en << DI_D3_DATAMSK_SHIFT | | 526 | old_conf |= sig.datamask_en << DI_D3_DATAMSK_SHIFT | |
527 | sig.clksel_en << DI_D3_CLK_SEL_SHIFT | | 527 | sig.clksel_en << DI_D3_CLK_SEL_SHIFT | |
528 | sig.clkidle_en << DI_D3_CLK_IDLE_SHIFT; | 528 | sig.clkidle_en << DI_D3_CLK_IDLE_SHIFT; |
529 | mx3fb_write_reg(mx3fb, old_conf, DI_DISP_IF_CONF); | 529 | mx3fb_write_reg(mx3fb, old_conf, DI_DISP_IF_CONF); |
530 | 530 | ||
531 | old_conf = mx3fb_read_reg(mx3fb, DI_DISP_SIG_POL) & 0xE0FFFFFF; | 531 | old_conf = mx3fb_read_reg(mx3fb, DI_DISP_SIG_POL) & 0xE0FFFFFF; |
532 | old_conf |= sig.data_pol << DI_D3_DATA_POL_SHIFT | | 532 | old_conf |= sig.data_pol << DI_D3_DATA_POL_SHIFT | |
533 | sig.clk_pol << DI_D3_CLK_POL_SHIFT | | 533 | sig.clk_pol << DI_D3_CLK_POL_SHIFT | |
534 | sig.enable_pol << DI_D3_DRDY_SHARP_POL_SHIFT | | 534 | sig.enable_pol << DI_D3_DRDY_SHARP_POL_SHIFT | |
535 | sig.Hsync_pol << DI_D3_HSYNC_POL_SHIFT | | 535 | sig.Hsync_pol << DI_D3_HSYNC_POL_SHIFT | |
536 | sig.Vsync_pol << DI_D3_VSYNC_POL_SHIFT; | 536 | sig.Vsync_pol << DI_D3_VSYNC_POL_SHIFT; |
537 | mx3fb_write_reg(mx3fb, old_conf, DI_DISP_SIG_POL); | 537 | mx3fb_write_reg(mx3fb, old_conf, DI_DISP_SIG_POL); |
538 | 538 | ||
539 | switch (pixel_fmt) { | 539 | switch (pixel_fmt) { |
540 | case IPU_PIX_FMT_RGB24: | 540 | case IPU_PIX_FMT_RGB24: |
541 | mx3fb_write_reg(mx3fb, di_mappings[0], DI_DISP3_B0_MAP); | 541 | mx3fb_write_reg(mx3fb, di_mappings[0], DI_DISP3_B0_MAP); |
542 | mx3fb_write_reg(mx3fb, di_mappings[1], DI_DISP3_B1_MAP); | 542 | mx3fb_write_reg(mx3fb, di_mappings[1], DI_DISP3_B1_MAP); |
543 | mx3fb_write_reg(mx3fb, di_mappings[2], DI_DISP3_B2_MAP); | 543 | mx3fb_write_reg(mx3fb, di_mappings[2], DI_DISP3_B2_MAP); |
544 | mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) | | 544 | mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) | |
545 | ((di_mappings[3] - 1) << 12), DI_DISP_ACC_CC); | 545 | ((di_mappings[3] - 1) << 12), DI_DISP_ACC_CC); |
546 | break; | 546 | break; |
547 | case IPU_PIX_FMT_RGB666: | 547 | case IPU_PIX_FMT_RGB666: |
548 | mx3fb_write_reg(mx3fb, di_mappings[4], DI_DISP3_B0_MAP); | 548 | mx3fb_write_reg(mx3fb, di_mappings[4], DI_DISP3_B0_MAP); |
549 | mx3fb_write_reg(mx3fb, di_mappings[5], DI_DISP3_B1_MAP); | 549 | mx3fb_write_reg(mx3fb, di_mappings[5], DI_DISP3_B1_MAP); |
550 | mx3fb_write_reg(mx3fb, di_mappings[6], DI_DISP3_B2_MAP); | 550 | mx3fb_write_reg(mx3fb, di_mappings[6], DI_DISP3_B2_MAP); |
551 | mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) | | 551 | mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) | |
552 | ((di_mappings[7] - 1) << 12), DI_DISP_ACC_CC); | 552 | ((di_mappings[7] - 1) << 12), DI_DISP_ACC_CC); |
553 | break; | 553 | break; |
554 | case IPU_PIX_FMT_BGR666: | 554 | case IPU_PIX_FMT_BGR666: |
555 | mx3fb_write_reg(mx3fb, di_mappings[8], DI_DISP3_B0_MAP); | 555 | mx3fb_write_reg(mx3fb, di_mappings[8], DI_DISP3_B0_MAP); |
556 | mx3fb_write_reg(mx3fb, di_mappings[9], DI_DISP3_B1_MAP); | 556 | mx3fb_write_reg(mx3fb, di_mappings[9], DI_DISP3_B1_MAP); |
557 | mx3fb_write_reg(mx3fb, di_mappings[10], DI_DISP3_B2_MAP); | 557 | mx3fb_write_reg(mx3fb, di_mappings[10], DI_DISP3_B2_MAP); |
558 | mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) | | 558 | mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) | |
559 | ((di_mappings[11] - 1) << 12), DI_DISP_ACC_CC); | 559 | ((di_mappings[11] - 1) << 12), DI_DISP_ACC_CC); |
560 | break; | 560 | break; |
561 | default: | 561 | default: |
562 | mx3fb_write_reg(mx3fb, di_mappings[12], DI_DISP3_B0_MAP); | 562 | mx3fb_write_reg(mx3fb, di_mappings[12], DI_DISP3_B0_MAP); |
563 | mx3fb_write_reg(mx3fb, di_mappings[13], DI_DISP3_B1_MAP); | 563 | mx3fb_write_reg(mx3fb, di_mappings[13], DI_DISP3_B1_MAP); |
564 | mx3fb_write_reg(mx3fb, di_mappings[14], DI_DISP3_B2_MAP); | 564 | mx3fb_write_reg(mx3fb, di_mappings[14], DI_DISP3_B2_MAP); |
565 | mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) | | 565 | mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) | |
566 | ((di_mappings[15] - 1) << 12), DI_DISP_ACC_CC); | 566 | ((di_mappings[15] - 1) << 12), DI_DISP_ACC_CC); |
567 | break; | 567 | break; |
568 | } | 568 | } |
569 | 569 | ||
570 | spin_unlock_irqrestore(&mx3fb->lock, lock_flags); | 570 | spin_unlock_irqrestore(&mx3fb->lock, lock_flags); |
571 | 571 | ||
572 | dev_dbg(mx3fb->dev, "DI_DISP_IF_CONF = 0x%08X\n", | 572 | dev_dbg(mx3fb->dev, "DI_DISP_IF_CONF = 0x%08X\n", |
573 | mx3fb_read_reg(mx3fb, DI_DISP_IF_CONF)); | 573 | mx3fb_read_reg(mx3fb, DI_DISP_IF_CONF)); |
574 | dev_dbg(mx3fb->dev, "DI_DISP_SIG_POL = 0x%08X\n", | 574 | dev_dbg(mx3fb->dev, "DI_DISP_SIG_POL = 0x%08X\n", |
575 | mx3fb_read_reg(mx3fb, DI_DISP_SIG_POL)); | 575 | mx3fb_read_reg(mx3fb, DI_DISP_SIG_POL)); |
576 | dev_dbg(mx3fb->dev, "DI_DISP3_TIME_CONF = 0x%08X\n", | 576 | dev_dbg(mx3fb->dev, "DI_DISP3_TIME_CONF = 0x%08X\n", |
577 | mx3fb_read_reg(mx3fb, DI_DISP3_TIME_CONF)); | 577 | mx3fb_read_reg(mx3fb, DI_DISP3_TIME_CONF)); |
578 | 578 | ||
579 | return 0; | 579 | return 0; |
580 | } | 580 | } |
581 | 581 | ||
582 | /** | 582 | /** |
583 | * sdc_set_color_key() - set the transparent color key for SDC graphic plane. | 583 | * sdc_set_color_key() - set the transparent color key for SDC graphic plane. |
584 | * @mx3fb: mx3fb context. | 584 | * @mx3fb: mx3fb context. |
585 | * @channel: IPU DMAC channel ID. | 585 | * @channel: IPU DMAC channel ID. |
586 | * @enable: boolean to enable or disable color keyl. | 586 | * @enable: boolean to enable or disable color keyl. |
587 | * @color_key: 24-bit RGB color to use as transparent color key. | 587 | * @color_key: 24-bit RGB color to use as transparent color key. |
588 | * @return: 0 on success or negative error code on failure. | 588 | * @return: 0 on success or negative error code on failure. |
589 | */ | 589 | */ |
590 | static int sdc_set_color_key(struct mx3fb_data *mx3fb, enum ipu_channel channel, | 590 | static int sdc_set_color_key(struct mx3fb_data *mx3fb, enum ipu_channel channel, |
591 | bool enable, uint32_t color_key) | 591 | bool enable, uint32_t color_key) |
592 | { | 592 | { |
593 | uint32_t reg, sdc_conf; | 593 | uint32_t reg, sdc_conf; |
594 | unsigned long lock_flags; | 594 | unsigned long lock_flags; |
595 | 595 | ||
596 | spin_lock_irqsave(&mx3fb->lock, lock_flags); | 596 | spin_lock_irqsave(&mx3fb->lock, lock_flags); |
597 | 597 | ||
598 | sdc_conf = mx3fb_read_reg(mx3fb, SDC_COM_CONF); | 598 | sdc_conf = mx3fb_read_reg(mx3fb, SDC_COM_CONF); |
599 | if (channel == IDMAC_SDC_0) | 599 | if (channel == IDMAC_SDC_0) |
600 | sdc_conf &= ~SDC_COM_GWSEL; | 600 | sdc_conf &= ~SDC_COM_GWSEL; |
601 | else | 601 | else |
602 | sdc_conf |= SDC_COM_GWSEL; | 602 | sdc_conf |= SDC_COM_GWSEL; |
603 | 603 | ||
604 | if (enable) { | 604 | if (enable) { |
605 | reg = mx3fb_read_reg(mx3fb, SDC_GW_CTRL) & 0xFF000000L; | 605 | reg = mx3fb_read_reg(mx3fb, SDC_GW_CTRL) & 0xFF000000L; |
606 | mx3fb_write_reg(mx3fb, reg | (color_key & 0x00FFFFFFL), | 606 | mx3fb_write_reg(mx3fb, reg | (color_key & 0x00FFFFFFL), |
607 | SDC_GW_CTRL); | 607 | SDC_GW_CTRL); |
608 | 608 | ||
609 | sdc_conf |= SDC_COM_KEY_COLOR_G; | 609 | sdc_conf |= SDC_COM_KEY_COLOR_G; |
610 | } else { | 610 | } else { |
611 | sdc_conf &= ~SDC_COM_KEY_COLOR_G; | 611 | sdc_conf &= ~SDC_COM_KEY_COLOR_G; |
612 | } | 612 | } |
613 | mx3fb_write_reg(mx3fb, sdc_conf, SDC_COM_CONF); | 613 | mx3fb_write_reg(mx3fb, sdc_conf, SDC_COM_CONF); |
614 | 614 | ||
615 | spin_unlock_irqrestore(&mx3fb->lock, lock_flags); | 615 | spin_unlock_irqrestore(&mx3fb->lock, lock_flags); |
616 | 616 | ||
617 | return 0; | 617 | return 0; |
618 | } | 618 | } |
619 | 619 | ||
620 | /** | 620 | /** |
621 | * sdc_set_global_alpha() - set global alpha blending modes. | 621 | * sdc_set_global_alpha() - set global alpha blending modes. |
622 | * @mx3fb: mx3fb context. | 622 | * @mx3fb: mx3fb context. |
623 | * @enable: boolean to enable or disable global alpha blending. If disabled, | 623 | * @enable: boolean to enable or disable global alpha blending. If disabled, |
624 | * per pixel blending is used. | 624 | * per pixel blending is used. |
625 | * @alpha: global alpha value. | 625 | * @alpha: global alpha value. |
626 | * @return: 0 on success or negative error code on failure. | 626 | * @return: 0 on success or negative error code on failure. |
627 | */ | 627 | */ |
628 | static int sdc_set_global_alpha(struct mx3fb_data *mx3fb, bool enable, uint8_t alpha) | 628 | static int sdc_set_global_alpha(struct mx3fb_data *mx3fb, bool enable, uint8_t alpha) |
629 | { | 629 | { |
630 | uint32_t reg; | 630 | uint32_t reg; |
631 | unsigned long lock_flags; | 631 | unsigned long lock_flags; |
632 | 632 | ||
633 | spin_lock_irqsave(&mx3fb->lock, lock_flags); | 633 | spin_lock_irqsave(&mx3fb->lock, lock_flags); |
634 | 634 | ||
635 | if (enable) { | 635 | if (enable) { |
636 | reg = mx3fb_read_reg(mx3fb, SDC_GW_CTRL) & 0x00FFFFFFL; | 636 | reg = mx3fb_read_reg(mx3fb, SDC_GW_CTRL) & 0x00FFFFFFL; |
637 | mx3fb_write_reg(mx3fb, reg | ((uint32_t) alpha << 24), SDC_GW_CTRL); | 637 | mx3fb_write_reg(mx3fb, reg | ((uint32_t) alpha << 24), SDC_GW_CTRL); |
638 | 638 | ||
639 | reg = mx3fb_read_reg(mx3fb, SDC_COM_CONF); | 639 | reg = mx3fb_read_reg(mx3fb, SDC_COM_CONF); |
640 | mx3fb_write_reg(mx3fb, reg | SDC_COM_GLB_A, SDC_COM_CONF); | 640 | mx3fb_write_reg(mx3fb, reg | SDC_COM_GLB_A, SDC_COM_CONF); |
641 | } else { | 641 | } else { |
642 | reg = mx3fb_read_reg(mx3fb, SDC_COM_CONF); | 642 | reg = mx3fb_read_reg(mx3fb, SDC_COM_CONF); |
643 | mx3fb_write_reg(mx3fb, reg & ~SDC_COM_GLB_A, SDC_COM_CONF); | 643 | mx3fb_write_reg(mx3fb, reg & ~SDC_COM_GLB_A, SDC_COM_CONF); |
644 | } | 644 | } |
645 | 645 | ||
646 | spin_unlock_irqrestore(&mx3fb->lock, lock_flags); | 646 | spin_unlock_irqrestore(&mx3fb->lock, lock_flags); |
647 | 647 | ||
648 | return 0; | 648 | return 0; |
649 | } | 649 | } |
650 | 650 | ||
651 | static void sdc_set_brightness(struct mx3fb_data *mx3fb, uint8_t value) | 651 | static void sdc_set_brightness(struct mx3fb_data *mx3fb, uint8_t value) |
652 | { | 652 | { |
653 | dev_dbg(mx3fb->dev, "%s: value = %d\n", __func__, value); | 653 | dev_dbg(mx3fb->dev, "%s: value = %d\n", __func__, value); |
654 | /* This might be board-specific */ | 654 | /* This might be board-specific */ |
655 | mx3fb_write_reg(mx3fb, 0x03000000UL | value << 16, SDC_PWM_CTRL); | 655 | mx3fb_write_reg(mx3fb, 0x03000000UL | value << 16, SDC_PWM_CTRL); |
656 | return; | 656 | return; |
657 | } | 657 | } |
658 | 658 | ||
659 | static uint32_t bpp_to_pixfmt(int bpp) | 659 | static uint32_t bpp_to_pixfmt(int bpp) |
660 | { | 660 | { |
661 | uint32_t pixfmt = 0; | 661 | uint32_t pixfmt = 0; |
662 | switch (bpp) { | 662 | switch (bpp) { |
663 | case 24: | 663 | case 24: |
664 | pixfmt = IPU_PIX_FMT_BGR24; | 664 | pixfmt = IPU_PIX_FMT_BGR24; |
665 | break; | 665 | break; |
666 | case 32: | 666 | case 32: |
667 | pixfmt = IPU_PIX_FMT_BGR32; | 667 | pixfmt = IPU_PIX_FMT_BGR32; |
668 | break; | 668 | break; |
669 | case 16: | 669 | case 16: |
670 | pixfmt = IPU_PIX_FMT_RGB565; | 670 | pixfmt = IPU_PIX_FMT_RGB565; |
671 | break; | 671 | break; |
672 | } | 672 | } |
673 | return pixfmt; | 673 | return pixfmt; |
674 | } | 674 | } |
675 | 675 | ||
676 | static int mx3fb_blank(int blank, struct fb_info *fbi); | 676 | static int mx3fb_blank(int blank, struct fb_info *fbi); |
677 | static int mx3fb_map_video_memory(struct fb_info *fbi, unsigned int mem_len, | 677 | static int mx3fb_map_video_memory(struct fb_info *fbi, unsigned int mem_len, |
678 | bool lock); | 678 | bool lock); |
679 | static int mx3fb_unmap_video_memory(struct fb_info *fbi); | 679 | static int mx3fb_unmap_video_memory(struct fb_info *fbi); |
680 | 680 | ||
681 | /** | 681 | /** |
682 | * mx3fb_set_fix() - set fixed framebuffer parameters from variable settings. | 682 | * mx3fb_set_fix() - set fixed framebuffer parameters from variable settings. |
683 | * @info: framebuffer information pointer | 683 | * @info: framebuffer information pointer |
684 | * @return: 0 on success or negative error code on failure. | 684 | * @return: 0 on success or negative error code on failure. |
685 | */ | 685 | */ |
686 | static int mx3fb_set_fix(struct fb_info *fbi) | 686 | static int mx3fb_set_fix(struct fb_info *fbi) |
687 | { | 687 | { |
688 | struct fb_fix_screeninfo *fix = &fbi->fix; | 688 | struct fb_fix_screeninfo *fix = &fbi->fix; |
689 | struct fb_var_screeninfo *var = &fbi->var; | 689 | struct fb_var_screeninfo *var = &fbi->var; |
690 | 690 | ||
691 | strncpy(fix->id, "DISP3 BG", 8); | 691 | strncpy(fix->id, "DISP3 BG", 8); |
692 | 692 | ||
693 | fix->line_length = var->xres_virtual * var->bits_per_pixel / 8; | 693 | fix->line_length = var->xres_virtual * var->bits_per_pixel / 8; |
694 | 694 | ||
695 | fix->type = FB_TYPE_PACKED_PIXELS; | 695 | fix->type = FB_TYPE_PACKED_PIXELS; |
696 | fix->accel = FB_ACCEL_NONE; | 696 | fix->accel = FB_ACCEL_NONE; |
697 | fix->visual = FB_VISUAL_TRUECOLOR; | 697 | fix->visual = FB_VISUAL_TRUECOLOR; |
698 | fix->xpanstep = 1; | 698 | fix->xpanstep = 1; |
699 | fix->ypanstep = 1; | 699 | fix->ypanstep = 1; |
700 | 700 | ||
701 | return 0; | 701 | return 0; |
702 | } | 702 | } |
703 | 703 | ||
704 | static void mx3fb_dma_done(void *arg) | 704 | static void mx3fb_dma_done(void *arg) |
705 | { | 705 | { |
706 | struct idmac_tx_desc *tx_desc = to_tx_desc(arg); | 706 | struct idmac_tx_desc *tx_desc = to_tx_desc(arg); |
707 | struct dma_chan *chan = tx_desc->txd.chan; | 707 | struct dma_chan *chan = tx_desc->txd.chan; |
708 | struct idmac_channel *ichannel = to_idmac_chan(chan); | 708 | struct idmac_channel *ichannel = to_idmac_chan(chan); |
709 | struct mx3fb_data *mx3fb = ichannel->client; | 709 | struct mx3fb_data *mx3fb = ichannel->client; |
710 | struct mx3fb_info *mx3_fbi = mx3fb->fbi->par; | 710 | struct mx3fb_info *mx3_fbi = mx3fb->fbi->par; |
711 | 711 | ||
712 | dev_dbg(mx3fb->dev, "irq %d callback\n", ichannel->eof_irq); | 712 | dev_dbg(mx3fb->dev, "irq %d callback\n", ichannel->eof_irq); |
713 | 713 | ||
714 | /* We only need one interrupt, it will be re-enabled as needed */ | 714 | /* We only need one interrupt, it will be re-enabled as needed */ |
715 | disable_irq_nosync(ichannel->eof_irq); | 715 | disable_irq_nosync(ichannel->eof_irq); |
716 | 716 | ||
717 | complete(&mx3_fbi->flip_cmpl); | 717 | complete(&mx3_fbi->flip_cmpl); |
718 | } | 718 | } |
719 | 719 | ||
720 | static int __set_par(struct fb_info *fbi, bool lock) | 720 | static int __set_par(struct fb_info *fbi, bool lock) |
721 | { | 721 | { |
722 | u32 mem_len; | 722 | u32 mem_len; |
723 | struct ipu_di_signal_cfg sig_cfg; | 723 | struct ipu_di_signal_cfg sig_cfg; |
724 | enum ipu_panel mode = IPU_PANEL_TFT; | 724 | enum ipu_panel mode = IPU_PANEL_TFT; |
725 | struct mx3fb_info *mx3_fbi = fbi->par; | 725 | struct mx3fb_info *mx3_fbi = fbi->par; |
726 | struct mx3fb_data *mx3fb = mx3_fbi->mx3fb; | 726 | struct mx3fb_data *mx3fb = mx3_fbi->mx3fb; |
727 | struct idmac_channel *ichan = mx3_fbi->idmac_channel; | 727 | struct idmac_channel *ichan = mx3_fbi->idmac_channel; |
728 | struct idmac_video_param *video = &ichan->params.video; | 728 | struct idmac_video_param *video = &ichan->params.video; |
729 | struct scatterlist *sg = mx3_fbi->sg; | 729 | struct scatterlist *sg = mx3_fbi->sg; |
730 | 730 | ||
731 | /* Total cleanup */ | 731 | /* Total cleanup */ |
732 | if (mx3_fbi->txd) | 732 | if (mx3_fbi->txd) |
733 | sdc_disable_channel(mx3_fbi); | 733 | sdc_disable_channel(mx3_fbi); |
734 | 734 | ||
735 | mx3fb_set_fix(fbi); | 735 | mx3fb_set_fix(fbi); |
736 | 736 | ||
737 | mem_len = fbi->var.yres_virtual * fbi->fix.line_length; | 737 | mem_len = fbi->var.yres_virtual * fbi->fix.line_length; |
738 | if (mem_len > fbi->fix.smem_len) { | 738 | if (mem_len > fbi->fix.smem_len) { |
739 | if (fbi->fix.smem_start) | 739 | if (fbi->fix.smem_start) |
740 | mx3fb_unmap_video_memory(fbi); | 740 | mx3fb_unmap_video_memory(fbi); |
741 | 741 | ||
742 | if (mx3fb_map_video_memory(fbi, mem_len, lock) < 0) | 742 | if (mx3fb_map_video_memory(fbi, mem_len, lock) < 0) |
743 | return -ENOMEM; | 743 | return -ENOMEM; |
744 | } | 744 | } |
745 | 745 | ||
746 | sg_init_table(&sg[0], 1); | 746 | sg_init_table(&sg[0], 1); |
747 | sg_init_table(&sg[1], 1); | 747 | sg_init_table(&sg[1], 1); |
748 | 748 | ||
749 | sg_dma_address(&sg[0]) = fbi->fix.smem_start; | 749 | sg_dma_address(&sg[0]) = fbi->fix.smem_start; |
750 | sg_set_page(&sg[0], virt_to_page(fbi->screen_base), | 750 | sg_set_page(&sg[0], virt_to_page(fbi->screen_base), |
751 | fbi->fix.smem_len, | 751 | fbi->fix.smem_len, |
752 | offset_in_page(fbi->screen_base)); | 752 | offset_in_page(fbi->screen_base)); |
753 | 753 | ||
754 | if (mx3_fbi->ipu_ch == IDMAC_SDC_0) { | 754 | if (mx3_fbi->ipu_ch == IDMAC_SDC_0) { |
755 | memset(&sig_cfg, 0, sizeof(sig_cfg)); | 755 | memset(&sig_cfg, 0, sizeof(sig_cfg)); |
756 | if (fbi->var.sync & FB_SYNC_HOR_HIGH_ACT) | 756 | if (fbi->var.sync & FB_SYNC_HOR_HIGH_ACT) |
757 | sig_cfg.Hsync_pol = true; | 757 | sig_cfg.Hsync_pol = true; |
758 | if (fbi->var.sync & FB_SYNC_VERT_HIGH_ACT) | 758 | if (fbi->var.sync & FB_SYNC_VERT_HIGH_ACT) |
759 | sig_cfg.Vsync_pol = true; | 759 | sig_cfg.Vsync_pol = true; |
760 | if (fbi->var.sync & FB_SYNC_CLK_INVERT) | 760 | if (fbi->var.sync & FB_SYNC_CLK_INVERT) |
761 | sig_cfg.clk_pol = true; | 761 | sig_cfg.clk_pol = true; |
762 | if (fbi->var.sync & FB_SYNC_DATA_INVERT) | 762 | if (fbi->var.sync & FB_SYNC_DATA_INVERT) |
763 | sig_cfg.data_pol = true; | 763 | sig_cfg.data_pol = true; |
764 | if (fbi->var.sync & FB_SYNC_OE_ACT_HIGH) | 764 | if (fbi->var.sync & FB_SYNC_OE_ACT_HIGH) |
765 | sig_cfg.enable_pol = true; | 765 | sig_cfg.enable_pol = true; |
766 | if (fbi->var.sync & FB_SYNC_CLK_IDLE_EN) | 766 | if (fbi->var.sync & FB_SYNC_CLK_IDLE_EN) |
767 | sig_cfg.clkidle_en = true; | 767 | sig_cfg.clkidle_en = true; |
768 | if (fbi->var.sync & FB_SYNC_CLK_SEL_EN) | 768 | if (fbi->var.sync & FB_SYNC_CLK_SEL_EN) |
769 | sig_cfg.clksel_en = true; | 769 | sig_cfg.clksel_en = true; |
770 | if (fbi->var.sync & FB_SYNC_SHARP_MODE) | 770 | if (fbi->var.sync & FB_SYNC_SHARP_MODE) |
771 | mode = IPU_PANEL_SHARP_TFT; | 771 | mode = IPU_PANEL_SHARP_TFT; |
772 | 772 | ||
773 | dev_dbg(fbi->device, "pixclock = %ul Hz\n", | 773 | dev_dbg(fbi->device, "pixclock = %ul Hz\n", |
774 | (u32) (PICOS2KHZ(fbi->var.pixclock) * 1000UL)); | 774 | (u32) (PICOS2KHZ(fbi->var.pixclock) * 1000UL)); |
775 | 775 | ||
776 | if (sdc_init_panel(mx3fb, mode, | 776 | if (sdc_init_panel(mx3fb, mode, |
777 | (PICOS2KHZ(fbi->var.pixclock)) * 1000UL, | 777 | (PICOS2KHZ(fbi->var.pixclock)) * 1000UL, |
778 | fbi->var.xres, fbi->var.yres, | 778 | fbi->var.xres, fbi->var.yres, |
779 | (fbi->var.sync & FB_SYNC_SWAP_RGB) ? | 779 | (fbi->var.sync & FB_SYNC_SWAP_RGB) ? |
780 | IPU_PIX_FMT_BGR666 : IPU_PIX_FMT_RGB666, | 780 | IPU_PIX_FMT_BGR666 : IPU_PIX_FMT_RGB666, |
781 | fbi->var.left_margin, | 781 | fbi->var.left_margin, |
782 | fbi->var.hsync_len, | 782 | fbi->var.hsync_len, |
783 | fbi->var.right_margin + | 783 | fbi->var.right_margin + |
784 | fbi->var.hsync_len, | 784 | fbi->var.hsync_len, |
785 | fbi->var.upper_margin, | 785 | fbi->var.upper_margin, |
786 | fbi->var.vsync_len, | 786 | fbi->var.vsync_len, |
787 | fbi->var.lower_margin + | 787 | fbi->var.lower_margin + |
788 | fbi->var.vsync_len, sig_cfg) != 0) { | 788 | fbi->var.vsync_len, sig_cfg) != 0) { |
789 | dev_err(fbi->device, | 789 | dev_err(fbi->device, |
790 | "mx3fb: Error initializing panel.\n"); | 790 | "mx3fb: Error initializing panel.\n"); |
791 | return -EINVAL; | 791 | return -EINVAL; |
792 | } | 792 | } |
793 | } | 793 | } |
794 | 794 | ||
795 | sdc_set_window_pos(mx3fb, mx3_fbi->ipu_ch, 0, 0); | 795 | sdc_set_window_pos(mx3fb, mx3_fbi->ipu_ch, 0, 0); |
796 | 796 | ||
797 | mx3_fbi->cur_ipu_buf = 0; | 797 | mx3_fbi->cur_ipu_buf = 0; |
798 | 798 | ||
799 | video->out_pixel_fmt = bpp_to_pixfmt(fbi->var.bits_per_pixel); | 799 | video->out_pixel_fmt = bpp_to_pixfmt(fbi->var.bits_per_pixel); |
800 | video->out_width = fbi->var.xres; | 800 | video->out_width = fbi->var.xres; |
801 | video->out_height = fbi->var.yres; | 801 | video->out_height = fbi->var.yres; |
802 | video->out_stride = fbi->var.xres_virtual; | 802 | video->out_stride = fbi->var.xres_virtual; |
803 | 803 | ||
804 | if (mx3_fbi->blank == FB_BLANK_UNBLANK) | 804 | if (mx3_fbi->blank == FB_BLANK_UNBLANK) |
805 | sdc_enable_channel(mx3_fbi); | 805 | sdc_enable_channel(mx3_fbi); |
806 | 806 | ||
807 | return 0; | 807 | return 0; |
808 | } | 808 | } |
809 | 809 | ||
810 | /** | 810 | /** |
811 | * mx3fb_set_par() - set framebuffer parameters and change the operating mode. | 811 | * mx3fb_set_par() - set framebuffer parameters and change the operating mode. |
812 | * @fbi: framebuffer information pointer. | 812 | * @fbi: framebuffer information pointer. |
813 | * @return: 0 on success or negative error code on failure. | 813 | * @return: 0 on success or negative error code on failure. |
814 | */ | 814 | */ |
815 | static int mx3fb_set_par(struct fb_info *fbi) | 815 | static int mx3fb_set_par(struct fb_info *fbi) |
816 | { | 816 | { |
817 | struct mx3fb_info *mx3_fbi = fbi->par; | 817 | struct mx3fb_info *mx3_fbi = fbi->par; |
818 | struct mx3fb_data *mx3fb = mx3_fbi->mx3fb; | 818 | struct mx3fb_data *mx3fb = mx3_fbi->mx3fb; |
819 | struct idmac_channel *ichan = mx3_fbi->idmac_channel; | 819 | struct idmac_channel *ichan = mx3_fbi->idmac_channel; |
820 | int ret; | 820 | int ret; |
821 | 821 | ||
822 | dev_dbg(mx3fb->dev, "%s [%c]\n", __func__, list_empty(&ichan->queue) ? '-' : '+'); | 822 | dev_dbg(mx3fb->dev, "%s [%c]\n", __func__, list_empty(&ichan->queue) ? '-' : '+'); |
823 | 823 | ||
824 | mutex_lock(&mx3_fbi->mutex); | 824 | mutex_lock(&mx3_fbi->mutex); |
825 | 825 | ||
826 | ret = __set_par(fbi, true); | 826 | ret = __set_par(fbi, true); |
827 | 827 | ||
828 | mutex_unlock(&mx3_fbi->mutex); | 828 | mutex_unlock(&mx3_fbi->mutex); |
829 | 829 | ||
830 | return ret; | 830 | return ret; |
831 | } | 831 | } |
832 | 832 | ||
833 | /** | 833 | /** |
834 | * mx3fb_check_var() - check and adjust framebuffer variable parameters. | 834 | * mx3fb_check_var() - check and adjust framebuffer variable parameters. |
835 | * @var: framebuffer variable parameters | 835 | * @var: framebuffer variable parameters |
836 | * @fbi: framebuffer information pointer | 836 | * @fbi: framebuffer information pointer |
837 | */ | 837 | */ |
838 | static int mx3fb_check_var(struct fb_var_screeninfo *var, struct fb_info *fbi) | 838 | static int mx3fb_check_var(struct fb_var_screeninfo *var, struct fb_info *fbi) |
839 | { | 839 | { |
840 | struct mx3fb_info *mx3_fbi = fbi->par; | 840 | struct mx3fb_info *mx3_fbi = fbi->par; |
841 | u32 vtotal; | 841 | u32 vtotal; |
842 | u32 htotal; | 842 | u32 htotal; |
843 | 843 | ||
844 | dev_dbg(fbi->device, "%s\n", __func__); | 844 | dev_dbg(fbi->device, "%s\n", __func__); |
845 | 845 | ||
846 | if (var->xres_virtual < var->xres) | 846 | if (var->xres_virtual < var->xres) |
847 | var->xres_virtual = var->xres; | 847 | var->xres_virtual = var->xres; |
848 | if (var->yres_virtual < var->yres) | 848 | if (var->yres_virtual < var->yres) |
849 | var->yres_virtual = var->yres; | 849 | var->yres_virtual = var->yres; |
850 | 850 | ||
851 | if ((var->bits_per_pixel != 32) && (var->bits_per_pixel != 24) && | 851 | if ((var->bits_per_pixel != 32) && (var->bits_per_pixel != 24) && |
852 | (var->bits_per_pixel != 16)) | 852 | (var->bits_per_pixel != 16)) |
853 | var->bits_per_pixel = default_bpp; | 853 | var->bits_per_pixel = default_bpp; |
854 | 854 | ||
855 | switch (var->bits_per_pixel) { | 855 | switch (var->bits_per_pixel) { |
856 | case 16: | 856 | case 16: |
857 | var->red.length = 5; | 857 | var->red.length = 5; |
858 | var->red.offset = 11; | 858 | var->red.offset = 11; |
859 | var->red.msb_right = 0; | 859 | var->red.msb_right = 0; |
860 | 860 | ||
861 | var->green.length = 6; | 861 | var->green.length = 6; |
862 | var->green.offset = 5; | 862 | var->green.offset = 5; |
863 | var->green.msb_right = 0; | 863 | var->green.msb_right = 0; |
864 | 864 | ||
865 | var->blue.length = 5; | 865 | var->blue.length = 5; |
866 | var->blue.offset = 0; | 866 | var->blue.offset = 0; |
867 | var->blue.msb_right = 0; | 867 | var->blue.msb_right = 0; |
868 | 868 | ||
869 | var->transp.length = 0; | 869 | var->transp.length = 0; |
870 | var->transp.offset = 0; | 870 | var->transp.offset = 0; |
871 | var->transp.msb_right = 0; | 871 | var->transp.msb_right = 0; |
872 | break; | 872 | break; |
873 | case 24: | 873 | case 24: |
874 | var->red.length = 8; | 874 | var->red.length = 8; |
875 | var->red.offset = 16; | 875 | var->red.offset = 16; |
876 | var->red.msb_right = 0; | 876 | var->red.msb_right = 0; |
877 | 877 | ||
878 | var->green.length = 8; | 878 | var->green.length = 8; |
879 | var->green.offset = 8; | 879 | var->green.offset = 8; |
880 | var->green.msb_right = 0; | 880 | var->green.msb_right = 0; |
881 | 881 | ||
882 | var->blue.length = 8; | 882 | var->blue.length = 8; |
883 | var->blue.offset = 0; | 883 | var->blue.offset = 0; |
884 | var->blue.msb_right = 0; | 884 | var->blue.msb_right = 0; |
885 | 885 | ||
886 | var->transp.length = 0; | 886 | var->transp.length = 0; |
887 | var->transp.offset = 0; | 887 | var->transp.offset = 0; |
888 | var->transp.msb_right = 0; | 888 | var->transp.msb_right = 0; |
889 | break; | 889 | break; |
890 | case 32: | 890 | case 32: |
891 | var->red.length = 8; | 891 | var->red.length = 8; |
892 | var->red.offset = 16; | 892 | var->red.offset = 16; |
893 | var->red.msb_right = 0; | 893 | var->red.msb_right = 0; |
894 | 894 | ||
895 | var->green.length = 8; | 895 | var->green.length = 8; |
896 | var->green.offset = 8; | 896 | var->green.offset = 8; |
897 | var->green.msb_right = 0; | 897 | var->green.msb_right = 0; |
898 | 898 | ||
899 | var->blue.length = 8; | 899 | var->blue.length = 8; |
900 | var->blue.offset = 0; | 900 | var->blue.offset = 0; |
901 | var->blue.msb_right = 0; | 901 | var->blue.msb_right = 0; |
902 | 902 | ||
903 | var->transp.length = 8; | 903 | var->transp.length = 8; |
904 | var->transp.offset = 24; | 904 | var->transp.offset = 24; |
905 | var->transp.msb_right = 0; | 905 | var->transp.msb_right = 0; |
906 | break; | 906 | break; |
907 | } | 907 | } |
908 | 908 | ||
909 | if (var->pixclock < 1000) { | 909 | if (var->pixclock < 1000) { |
910 | htotal = var->xres + var->right_margin + var->hsync_len + | 910 | htotal = var->xres + var->right_margin + var->hsync_len + |
911 | var->left_margin; | 911 | var->left_margin; |
912 | vtotal = var->yres + var->lower_margin + var->vsync_len + | 912 | vtotal = var->yres + var->lower_margin + var->vsync_len + |
913 | var->upper_margin; | 913 | var->upper_margin; |
914 | var->pixclock = (vtotal * htotal * 6UL) / 100UL; | 914 | var->pixclock = (vtotal * htotal * 6UL) / 100UL; |
915 | var->pixclock = KHZ2PICOS(var->pixclock); | 915 | var->pixclock = KHZ2PICOS(var->pixclock); |
916 | dev_dbg(fbi->device, "pixclock set for 60Hz refresh = %u ps\n", | 916 | dev_dbg(fbi->device, "pixclock set for 60Hz refresh = %u ps\n", |
917 | var->pixclock); | 917 | var->pixclock); |
918 | } | 918 | } |
919 | 919 | ||
920 | var->height = -1; | 920 | var->height = -1; |
921 | var->width = -1; | 921 | var->width = -1; |
922 | var->grayscale = 0; | 922 | var->grayscale = 0; |
923 | 923 | ||
924 | /* Preserve sync flags */ | 924 | /* Preserve sync flags */ |
925 | var->sync |= mx3_fbi->sync; | 925 | var->sync |= mx3_fbi->sync; |
926 | mx3_fbi->sync |= var->sync; | 926 | mx3_fbi->sync |= var->sync; |
927 | 927 | ||
928 | return 0; | 928 | return 0; |
929 | } | 929 | } |
930 | 930 | ||
931 | static u32 chan_to_field(unsigned int chan, struct fb_bitfield *bf) | 931 | static u32 chan_to_field(unsigned int chan, struct fb_bitfield *bf) |
932 | { | 932 | { |
933 | chan &= 0xffff; | 933 | chan &= 0xffff; |
934 | chan >>= 16 - bf->length; | 934 | chan >>= 16 - bf->length; |
935 | return chan << bf->offset; | 935 | return chan << bf->offset; |
936 | } | 936 | } |
937 | 937 | ||
938 | static int mx3fb_setcolreg(unsigned int regno, unsigned int red, | 938 | static int mx3fb_setcolreg(unsigned int regno, unsigned int red, |
939 | unsigned int green, unsigned int blue, | 939 | unsigned int green, unsigned int blue, |
940 | unsigned int trans, struct fb_info *fbi) | 940 | unsigned int trans, struct fb_info *fbi) |
941 | { | 941 | { |
942 | struct mx3fb_info *mx3_fbi = fbi->par; | 942 | struct mx3fb_info *mx3_fbi = fbi->par; |
943 | u32 val; | 943 | u32 val; |
944 | int ret = 1; | 944 | int ret = 1; |
945 | 945 | ||
946 | dev_dbg(fbi->device, "%s, regno = %u\n", __func__, regno); | 946 | dev_dbg(fbi->device, "%s, regno = %u\n", __func__, regno); |
947 | 947 | ||
948 | mutex_lock(&mx3_fbi->mutex); | 948 | mutex_lock(&mx3_fbi->mutex); |
949 | /* | 949 | /* |
950 | * If greyscale is true, then we convert the RGB value | 950 | * If greyscale is true, then we convert the RGB value |
951 | * to greyscale no matter what visual we are using. | 951 | * to greyscale no matter what visual we are using. |
952 | */ | 952 | */ |
953 | if (fbi->var.grayscale) | 953 | if (fbi->var.grayscale) |
954 | red = green = blue = (19595 * red + 38470 * green + | 954 | red = green = blue = (19595 * red + 38470 * green + |
955 | 7471 * blue) >> 16; | 955 | 7471 * blue) >> 16; |
956 | switch (fbi->fix.visual) { | 956 | switch (fbi->fix.visual) { |
957 | case FB_VISUAL_TRUECOLOR: | 957 | case FB_VISUAL_TRUECOLOR: |
958 | /* | 958 | /* |
959 | * 16-bit True Colour. We encode the RGB value | 959 | * 16-bit True Colour. We encode the RGB value |
960 | * according to the RGB bitfield information. | 960 | * according to the RGB bitfield information. |
961 | */ | 961 | */ |
962 | if (regno < 16) { | 962 | if (regno < 16) { |
963 | u32 *pal = fbi->pseudo_palette; | 963 | u32 *pal = fbi->pseudo_palette; |
964 | 964 | ||
965 | val = chan_to_field(red, &fbi->var.red); | 965 | val = chan_to_field(red, &fbi->var.red); |
966 | val |= chan_to_field(green, &fbi->var.green); | 966 | val |= chan_to_field(green, &fbi->var.green); |
967 | val |= chan_to_field(blue, &fbi->var.blue); | 967 | val |= chan_to_field(blue, &fbi->var.blue); |
968 | 968 | ||
969 | pal[regno] = val; | 969 | pal[regno] = val; |
970 | 970 | ||
971 | ret = 0; | 971 | ret = 0; |
972 | } | 972 | } |
973 | break; | 973 | break; |
974 | 974 | ||
975 | case FB_VISUAL_STATIC_PSEUDOCOLOR: | 975 | case FB_VISUAL_STATIC_PSEUDOCOLOR: |
976 | case FB_VISUAL_PSEUDOCOLOR: | 976 | case FB_VISUAL_PSEUDOCOLOR: |
977 | break; | 977 | break; |
978 | } | 978 | } |
979 | mutex_unlock(&mx3_fbi->mutex); | 979 | mutex_unlock(&mx3_fbi->mutex); |
980 | 980 | ||
981 | return ret; | 981 | return ret; |
982 | } | 982 | } |
983 | 983 | ||
984 | static void __blank(int blank, struct fb_info *fbi) | 984 | static void __blank(int blank, struct fb_info *fbi) |
985 | { | 985 | { |
986 | struct mx3fb_info *mx3_fbi = fbi->par; | 986 | struct mx3fb_info *mx3_fbi = fbi->par; |
987 | struct mx3fb_data *mx3fb = mx3_fbi->mx3fb; | 987 | struct mx3fb_data *mx3fb = mx3_fbi->mx3fb; |
988 | 988 | ||
989 | mx3_fbi->blank = blank; | 989 | mx3_fbi->blank = blank; |
990 | 990 | ||
991 | switch (blank) { | 991 | switch (blank) { |
992 | case FB_BLANK_POWERDOWN: | 992 | case FB_BLANK_POWERDOWN: |
993 | case FB_BLANK_VSYNC_SUSPEND: | 993 | case FB_BLANK_VSYNC_SUSPEND: |
994 | case FB_BLANK_HSYNC_SUSPEND: | 994 | case FB_BLANK_HSYNC_SUSPEND: |
995 | case FB_BLANK_NORMAL: | 995 | case FB_BLANK_NORMAL: |
996 | sdc_set_brightness(mx3fb, 0); | 996 | sdc_set_brightness(mx3fb, 0); |
997 | memset((char *)fbi->screen_base, 0, fbi->fix.smem_len); | 997 | memset((char *)fbi->screen_base, 0, fbi->fix.smem_len); |
998 | /* Give LCD time to update - enough for 50 and 60 Hz */ | 998 | /* Give LCD time to update - enough for 50 and 60 Hz */ |
999 | msleep(25); | 999 | msleep(25); |
1000 | sdc_disable_channel(mx3_fbi); | 1000 | sdc_disable_channel(mx3_fbi); |
1001 | break; | 1001 | break; |
1002 | case FB_BLANK_UNBLANK: | 1002 | case FB_BLANK_UNBLANK: |
1003 | sdc_enable_channel(mx3_fbi); | 1003 | sdc_enable_channel(mx3_fbi); |
1004 | sdc_set_brightness(mx3fb, mx3fb->backlight_level); | 1004 | sdc_set_brightness(mx3fb, mx3fb->backlight_level); |
1005 | break; | 1005 | break; |
1006 | } | 1006 | } |
1007 | } | 1007 | } |
1008 | 1008 | ||
1009 | /** | 1009 | /** |
1010 | * mx3fb_blank() - blank the display. | 1010 | * mx3fb_blank() - blank the display. |
1011 | */ | 1011 | */ |
1012 | static int mx3fb_blank(int blank, struct fb_info *fbi) | 1012 | static int mx3fb_blank(int blank, struct fb_info *fbi) |
1013 | { | 1013 | { |
1014 | struct mx3fb_info *mx3_fbi = fbi->par; | 1014 | struct mx3fb_info *mx3_fbi = fbi->par; |
1015 | 1015 | ||
1016 | dev_dbg(fbi->device, "%s, blank = %d, base %p, len %u\n", __func__, | 1016 | dev_dbg(fbi->device, "%s, blank = %d, base %p, len %u\n", __func__, |
1017 | blank, fbi->screen_base, fbi->fix.smem_len); | 1017 | blank, fbi->screen_base, fbi->fix.smem_len); |
1018 | 1018 | ||
1019 | if (mx3_fbi->blank == blank) | 1019 | if (mx3_fbi->blank == blank) |
1020 | return 0; | 1020 | return 0; |
1021 | 1021 | ||
1022 | mutex_lock(&mx3_fbi->mutex); | 1022 | mutex_lock(&mx3_fbi->mutex); |
1023 | __blank(blank, fbi); | 1023 | __blank(blank, fbi); |
1024 | mutex_unlock(&mx3_fbi->mutex); | 1024 | mutex_unlock(&mx3_fbi->mutex); |
1025 | 1025 | ||
1026 | return 0; | 1026 | return 0; |
1027 | } | 1027 | } |
1028 | 1028 | ||
1029 | /** | 1029 | /** |
1030 | * mx3fb_pan_display() - pan or wrap the display | 1030 | * mx3fb_pan_display() - pan or wrap the display |
1031 | * @var: variable screen buffer information. | 1031 | * @var: variable screen buffer information. |
1032 | * @info: framebuffer information pointer. | 1032 | * @info: framebuffer information pointer. |
1033 | * | 1033 | * |
1034 | * We look only at xoffset, yoffset and the FB_VMODE_YWRAP flag | 1034 | * We look only at xoffset, yoffset and the FB_VMODE_YWRAP flag |
1035 | */ | 1035 | */ |
1036 | static int mx3fb_pan_display(struct fb_var_screeninfo *var, | 1036 | static int mx3fb_pan_display(struct fb_var_screeninfo *var, |
1037 | struct fb_info *fbi) | 1037 | struct fb_info *fbi) |
1038 | { | 1038 | { |
1039 | struct mx3fb_info *mx3_fbi = fbi->par; | 1039 | struct mx3fb_info *mx3_fbi = fbi->par; |
1040 | u32 y_bottom; | 1040 | u32 y_bottom; |
1041 | unsigned long base; | 1041 | unsigned long base; |
1042 | off_t offset; | 1042 | off_t offset; |
1043 | dma_cookie_t cookie; | 1043 | dma_cookie_t cookie; |
1044 | struct scatterlist *sg = mx3_fbi->sg; | 1044 | struct scatterlist *sg = mx3_fbi->sg; |
1045 | struct dma_chan *dma_chan = &mx3_fbi->idmac_channel->dma_chan; | 1045 | struct dma_chan *dma_chan = &mx3_fbi->idmac_channel->dma_chan; |
1046 | struct dma_async_tx_descriptor *txd; | 1046 | struct dma_async_tx_descriptor *txd; |
1047 | int ret; | 1047 | int ret; |
1048 | 1048 | ||
1049 | dev_dbg(fbi->device, "%s [%c]\n", __func__, | 1049 | dev_dbg(fbi->device, "%s [%c]\n", __func__, |
1050 | list_empty(&mx3_fbi->idmac_channel->queue) ? '-' : '+'); | 1050 | list_empty(&mx3_fbi->idmac_channel->queue) ? '-' : '+'); |
1051 | 1051 | ||
1052 | if (var->xoffset > 0) { | 1052 | if (var->xoffset > 0) { |
1053 | dev_dbg(fbi->device, "x panning not supported\n"); | 1053 | dev_dbg(fbi->device, "x panning not supported\n"); |
1054 | return -EINVAL; | 1054 | return -EINVAL; |
1055 | } | 1055 | } |
1056 | 1056 | ||
1057 | if (fbi->var.xoffset == var->xoffset && | 1057 | if (fbi->var.xoffset == var->xoffset && |
1058 | fbi->var.yoffset == var->yoffset) | 1058 | fbi->var.yoffset == var->yoffset) |
1059 | return 0; /* No change, do nothing */ | 1059 | return 0; /* No change, do nothing */ |
1060 | 1060 | ||
1061 | y_bottom = var->yoffset; | 1061 | y_bottom = var->yoffset; |
1062 | 1062 | ||
1063 | if (!(var->vmode & FB_VMODE_YWRAP)) | 1063 | if (!(var->vmode & FB_VMODE_YWRAP)) |
1064 | y_bottom += var->yres; | 1064 | y_bottom += var->yres; |
1065 | 1065 | ||
1066 | if (y_bottom > fbi->var.yres_virtual) | 1066 | if (y_bottom > fbi->var.yres_virtual) |
1067 | return -EINVAL; | 1067 | return -EINVAL; |
1068 | 1068 | ||
1069 | mutex_lock(&mx3_fbi->mutex); | 1069 | mutex_lock(&mx3_fbi->mutex); |
1070 | 1070 | ||
1071 | offset = (var->yoffset * var->xres_virtual + var->xoffset) * | 1071 | offset = (var->yoffset * var->xres_virtual + var->xoffset) * |
1072 | (var->bits_per_pixel / 8); | 1072 | (var->bits_per_pixel / 8); |
1073 | base = fbi->fix.smem_start + offset; | 1073 | base = fbi->fix.smem_start + offset; |
1074 | 1074 | ||
1075 | dev_dbg(fbi->device, "Updating SDC BG buf %d address=0x%08lX\n", | 1075 | dev_dbg(fbi->device, "Updating SDC BG buf %d address=0x%08lX\n", |
1076 | mx3_fbi->cur_ipu_buf, base); | 1076 | mx3_fbi->cur_ipu_buf, base); |
1077 | 1077 | ||
1078 | /* | 1078 | /* |
1079 | * We enable the End of Frame interrupt, which will free a tx-descriptor, | 1079 | * We enable the End of Frame interrupt, which will free a tx-descriptor, |
1080 | * which we will need for the next device_prep_slave_sg(). The | 1080 | * which we will need for the next device_prep_slave_sg(). The |
1081 | * IRQ-handler will disable the IRQ again. | 1081 | * IRQ-handler will disable the IRQ again. |
1082 | */ | 1082 | */ |
1083 | init_completion(&mx3_fbi->flip_cmpl); | 1083 | init_completion(&mx3_fbi->flip_cmpl); |
1084 | enable_irq(mx3_fbi->idmac_channel->eof_irq); | 1084 | enable_irq(mx3_fbi->idmac_channel->eof_irq); |
1085 | 1085 | ||
1086 | ret = wait_for_completion_timeout(&mx3_fbi->flip_cmpl, HZ / 10); | 1086 | ret = wait_for_completion_timeout(&mx3_fbi->flip_cmpl, HZ / 10); |
1087 | if (ret <= 0) { | 1087 | if (ret <= 0) { |
1088 | mutex_unlock(&mx3_fbi->mutex); | 1088 | mutex_unlock(&mx3_fbi->mutex); |
1089 | dev_info(fbi->device, "Panning failed due to %s\n", ret < 0 ? | 1089 | dev_info(fbi->device, "Panning failed due to %s\n", ret < 0 ? |
1090 | "user interrupt" : "timeout"); | 1090 | "user interrupt" : "timeout"); |
1091 | disable_irq(mx3_fbi->idmac_channel->eof_irq); | 1091 | disable_irq(mx3_fbi->idmac_channel->eof_irq); |
1092 | return ret ? : -ETIMEDOUT; | 1092 | return ret ? : -ETIMEDOUT; |
1093 | } | 1093 | } |
1094 | 1094 | ||
1095 | mx3_fbi->cur_ipu_buf = !mx3_fbi->cur_ipu_buf; | 1095 | mx3_fbi->cur_ipu_buf = !mx3_fbi->cur_ipu_buf; |
1096 | 1096 | ||
1097 | sg_dma_address(&sg[mx3_fbi->cur_ipu_buf]) = base; | 1097 | sg_dma_address(&sg[mx3_fbi->cur_ipu_buf]) = base; |
1098 | sg_set_page(&sg[mx3_fbi->cur_ipu_buf], | 1098 | sg_set_page(&sg[mx3_fbi->cur_ipu_buf], |
1099 | virt_to_page(fbi->screen_base + offset), fbi->fix.smem_len, | 1099 | virt_to_page(fbi->screen_base + offset), fbi->fix.smem_len, |
1100 | offset_in_page(fbi->screen_base + offset)); | 1100 | offset_in_page(fbi->screen_base + offset)); |
1101 | 1101 | ||
1102 | if (mx3_fbi->txd) | 1102 | if (mx3_fbi->txd) |
1103 | async_tx_ack(mx3_fbi->txd); | 1103 | async_tx_ack(mx3_fbi->txd); |
1104 | 1104 | ||
1105 | txd = dma_chan->device->device_prep_slave_sg(dma_chan, sg + | 1105 | txd = dma_chan->device->device_prep_slave_sg(dma_chan, sg + |
1106 | mx3_fbi->cur_ipu_buf, 1, DMA_TO_DEVICE, DMA_PREP_INTERRUPT); | 1106 | mx3_fbi->cur_ipu_buf, 1, DMA_TO_DEVICE, DMA_PREP_INTERRUPT); |
1107 | if (!txd) { | 1107 | if (!txd) { |
1108 | dev_err(fbi->device, | 1108 | dev_err(fbi->device, |
1109 | "Error preparing a DMA transaction descriptor.\n"); | 1109 | "Error preparing a DMA transaction descriptor.\n"); |
1110 | mutex_unlock(&mx3_fbi->mutex); | 1110 | mutex_unlock(&mx3_fbi->mutex); |
1111 | return -EIO; | 1111 | return -EIO; |
1112 | } | 1112 | } |
1113 | 1113 | ||
1114 | txd->callback_param = txd; | 1114 | txd->callback_param = txd; |
1115 | txd->callback = mx3fb_dma_done; | 1115 | txd->callback = mx3fb_dma_done; |
1116 | 1116 | ||
1117 | /* | 1117 | /* |
1118 | * Emulate original mx3fb behaviour: each new call to idmac_tx_submit() | 1118 | * Emulate original mx3fb behaviour: each new call to idmac_tx_submit() |
1119 | * should switch to another buffer | 1119 | * should switch to another buffer |
1120 | */ | 1120 | */ |
1121 | cookie = txd->tx_submit(txd); | 1121 | cookie = txd->tx_submit(txd); |
1122 | dev_dbg(fbi->device, "%d: Submit %p #%d\n", __LINE__, txd, cookie); | 1122 | dev_dbg(fbi->device, "%d: Submit %p #%d\n", __LINE__, txd, cookie); |
1123 | if (cookie < 0) { | 1123 | if (cookie < 0) { |
1124 | dev_err(fbi->device, | 1124 | dev_err(fbi->device, |
1125 | "Error updating SDC buf %d to address=0x%08lX\n", | 1125 | "Error updating SDC buf %d to address=0x%08lX\n", |
1126 | mx3_fbi->cur_ipu_buf, base); | 1126 | mx3_fbi->cur_ipu_buf, base); |
1127 | mutex_unlock(&mx3_fbi->mutex); | 1127 | mutex_unlock(&mx3_fbi->mutex); |
1128 | return -EIO; | 1128 | return -EIO; |
1129 | } | 1129 | } |
1130 | 1130 | ||
1131 | mx3_fbi->txd = txd; | 1131 | mx3_fbi->txd = txd; |
1132 | 1132 | ||
1133 | fbi->var.xoffset = var->xoffset; | 1133 | fbi->var.xoffset = var->xoffset; |
1134 | fbi->var.yoffset = var->yoffset; | 1134 | fbi->var.yoffset = var->yoffset; |
1135 | 1135 | ||
1136 | if (var->vmode & FB_VMODE_YWRAP) | 1136 | if (var->vmode & FB_VMODE_YWRAP) |
1137 | fbi->var.vmode |= FB_VMODE_YWRAP; | 1137 | fbi->var.vmode |= FB_VMODE_YWRAP; |
1138 | else | 1138 | else |
1139 | fbi->var.vmode &= ~FB_VMODE_YWRAP; | 1139 | fbi->var.vmode &= ~FB_VMODE_YWRAP; |
1140 | 1140 | ||
1141 | mutex_unlock(&mx3_fbi->mutex); | 1141 | mutex_unlock(&mx3_fbi->mutex); |
1142 | 1142 | ||
1143 | dev_dbg(fbi->device, "Update complete\n"); | 1143 | dev_dbg(fbi->device, "Update complete\n"); |
1144 | 1144 | ||
1145 | return 0; | 1145 | return 0; |
1146 | } | 1146 | } |
1147 | 1147 | ||
1148 | /* | 1148 | /* |
1149 | * This structure contains the pointers to the control functions that are | 1149 | * This structure contains the pointers to the control functions that are |
1150 | * invoked by the core framebuffer driver to perform operations like | 1150 | * invoked by the core framebuffer driver to perform operations like |
1151 | * blitting, rectangle filling, copy regions and cursor definition. | 1151 | * blitting, rectangle filling, copy regions and cursor definition. |
1152 | */ | 1152 | */ |
1153 | static struct fb_ops mx3fb_ops = { | 1153 | static struct fb_ops mx3fb_ops = { |
1154 | .owner = THIS_MODULE, | 1154 | .owner = THIS_MODULE, |
1155 | .fb_set_par = mx3fb_set_par, | 1155 | .fb_set_par = mx3fb_set_par, |
1156 | .fb_check_var = mx3fb_check_var, | 1156 | .fb_check_var = mx3fb_check_var, |
1157 | .fb_setcolreg = mx3fb_setcolreg, | 1157 | .fb_setcolreg = mx3fb_setcolreg, |
1158 | .fb_pan_display = mx3fb_pan_display, | 1158 | .fb_pan_display = mx3fb_pan_display, |
1159 | .fb_fillrect = cfb_fillrect, | 1159 | .fb_fillrect = cfb_fillrect, |
1160 | .fb_copyarea = cfb_copyarea, | 1160 | .fb_copyarea = cfb_copyarea, |
1161 | .fb_imageblit = cfb_imageblit, | 1161 | .fb_imageblit = cfb_imageblit, |
1162 | .fb_blank = mx3fb_blank, | 1162 | .fb_blank = mx3fb_blank, |
1163 | }; | 1163 | }; |
1164 | 1164 | ||
1165 | #ifdef CONFIG_PM | 1165 | #ifdef CONFIG_PM |
1166 | /* | 1166 | /* |
1167 | * Power management hooks. Note that we won't be called from IRQ context, | 1167 | * Power management hooks. Note that we won't be called from IRQ context, |
1168 | * unlike the blank functions above, so we may sleep. | 1168 | * unlike the blank functions above, so we may sleep. |
1169 | */ | 1169 | */ |
1170 | 1170 | ||
1171 | /* | 1171 | /* |
1172 | * Suspends the framebuffer and blanks the screen. Power management support | 1172 | * Suspends the framebuffer and blanks the screen. Power management support |
1173 | */ | 1173 | */ |
1174 | static int mx3fb_suspend(struct platform_device *pdev, pm_message_t state) | 1174 | static int mx3fb_suspend(struct platform_device *pdev, pm_message_t state) |
1175 | { | 1175 | { |
1176 | struct mx3fb_data *mx3fb = platform_get_drvdata(pdev); | 1176 | struct mx3fb_data *mx3fb = platform_get_drvdata(pdev); |
1177 | struct mx3fb_info *mx3_fbi = mx3fb->fbi->par; | 1177 | struct mx3fb_info *mx3_fbi = mx3fb->fbi->par; |
1178 | 1178 | ||
1179 | acquire_console_sem(); | 1179 | acquire_console_sem(); |
1180 | fb_set_suspend(mx3fb->fbi, 1); | 1180 | fb_set_suspend(mx3fb->fbi, 1); |
1181 | release_console_sem(); | 1181 | release_console_sem(); |
1182 | 1182 | ||
1183 | if (mx3_fbi->blank == FB_BLANK_UNBLANK) { | 1183 | if (mx3_fbi->blank == FB_BLANK_UNBLANK) { |
1184 | sdc_disable_channel(mx3_fbi); | 1184 | sdc_disable_channel(mx3_fbi); |
1185 | sdc_set_brightness(mx3fb, 0); | 1185 | sdc_set_brightness(mx3fb, 0); |
1186 | 1186 | ||
1187 | } | 1187 | } |
1188 | return 0; | 1188 | return 0; |
1189 | } | 1189 | } |
1190 | 1190 | ||
1191 | /* | 1191 | /* |
1192 | * Resumes the framebuffer and unblanks the screen. Power management support | 1192 | * Resumes the framebuffer and unblanks the screen. Power management support |
1193 | */ | 1193 | */ |
1194 | static int mx3fb_resume(struct platform_device *pdev) | 1194 | static int mx3fb_resume(struct platform_device *pdev) |
1195 | { | 1195 | { |
1196 | struct mx3fb_data *mx3fb = platform_get_drvdata(pdev); | 1196 | struct mx3fb_data *mx3fb = platform_get_drvdata(pdev); |
1197 | struct mx3fb_info *mx3_fbi = mx3fb->fbi->par; | 1197 | struct mx3fb_info *mx3_fbi = mx3fb->fbi->par; |
1198 | 1198 | ||
1199 | if (mx3_fbi->blank == FB_BLANK_UNBLANK) { | 1199 | if (mx3_fbi->blank == FB_BLANK_UNBLANK) { |
1200 | sdc_enable_channel(mx3_fbi); | 1200 | sdc_enable_channel(mx3_fbi); |
1201 | sdc_set_brightness(mx3fb, mx3fb->backlight_level); | 1201 | sdc_set_brightness(mx3fb, mx3fb->backlight_level); |
1202 | } | 1202 | } |
1203 | 1203 | ||
1204 | acquire_console_sem(); | 1204 | acquire_console_sem(); |
1205 | fb_set_suspend(mx3fb->fbi, 0); | 1205 | fb_set_suspend(mx3fb->fbi, 0); |
1206 | release_console_sem(); | 1206 | release_console_sem(); |
1207 | 1207 | ||
1208 | return 0; | 1208 | return 0; |
1209 | } | 1209 | } |
1210 | #else | 1210 | #else |
1211 | #define mx3fb_suspend NULL | 1211 | #define mx3fb_suspend NULL |
1212 | #define mx3fb_resume NULL | 1212 | #define mx3fb_resume NULL |
1213 | #endif | 1213 | #endif |
1214 | 1214 | ||
1215 | /* | 1215 | /* |
1216 | * Main framebuffer functions | 1216 | * Main framebuffer functions |
1217 | */ | 1217 | */ |
1218 | 1218 | ||
1219 | /** | 1219 | /** |
1220 | * mx3fb_map_video_memory() - allocates the DRAM memory for the frame buffer. | 1220 | * mx3fb_map_video_memory() - allocates the DRAM memory for the frame buffer. |
1221 | * @fbi: framebuffer information pointer | 1221 | * @fbi: framebuffer information pointer |
1222 | * @mem_len: length of mapped memory | 1222 | * @mem_len: length of mapped memory |
1223 | * @lock: do not lock during initialisation | 1223 | * @lock: do not lock during initialisation |
1224 | * @return: Error code indicating success or failure | 1224 | * @return: Error code indicating success or failure |
1225 | * | 1225 | * |
1226 | * This buffer is remapped into a non-cached, non-buffered, memory region to | 1226 | * This buffer is remapped into a non-cached, non-buffered, memory region to |
1227 | * allow palette and pixel writes to occur without flushing the cache. Once this | 1227 | * allow palette and pixel writes to occur without flushing the cache. Once this |
1228 | * area is remapped, all virtual memory access to the video memory should occur | 1228 | * area is remapped, all virtual memory access to the video memory should occur |
1229 | * at the new region. | 1229 | * at the new region. |
1230 | */ | 1230 | */ |
1231 | static int mx3fb_map_video_memory(struct fb_info *fbi, unsigned int mem_len, | 1231 | static int mx3fb_map_video_memory(struct fb_info *fbi, unsigned int mem_len, |
1232 | bool lock) | 1232 | bool lock) |
1233 | { | 1233 | { |
1234 | int retval = 0; | 1234 | int retval = 0; |
1235 | dma_addr_t addr; | 1235 | dma_addr_t addr; |
1236 | 1236 | ||
1237 | fbi->screen_base = dma_alloc_writecombine(fbi->device, | 1237 | fbi->screen_base = dma_alloc_writecombine(fbi->device, |
1238 | mem_len, | 1238 | mem_len, |
1239 | &addr, GFP_DMA); | 1239 | &addr, GFP_DMA); |
1240 | 1240 | ||
1241 | if (!fbi->screen_base) { | 1241 | if (!fbi->screen_base) { |
1242 | dev_err(fbi->device, "Cannot allocate %u bytes framebuffer memory\n", | 1242 | dev_err(fbi->device, "Cannot allocate %u bytes framebuffer memory\n", |
1243 | mem_len); | 1243 | mem_len); |
1244 | retval = -EBUSY; | 1244 | retval = -EBUSY; |
1245 | goto err0; | 1245 | goto err0; |
1246 | } | 1246 | } |
1247 | 1247 | ||
1248 | if (lock) | 1248 | if (lock) |
1249 | mutex_lock(&fbi->mm_lock); | 1249 | mutex_lock(&fbi->mm_lock); |
1250 | fbi->fix.smem_start = addr; | 1250 | fbi->fix.smem_start = addr; |
1251 | fbi->fix.smem_len = mem_len; | 1251 | fbi->fix.smem_len = mem_len; |
1252 | if (lock) | 1252 | if (lock) |
1253 | mutex_unlock(&fbi->mm_lock); | 1253 | mutex_unlock(&fbi->mm_lock); |
1254 | 1254 | ||
1255 | dev_dbg(fbi->device, "allocated fb @ p=0x%08x, v=0x%p, size=%d.\n", | 1255 | dev_dbg(fbi->device, "allocated fb @ p=0x%08x, v=0x%p, size=%d.\n", |
1256 | (uint32_t) fbi->fix.smem_start, fbi->screen_base, fbi->fix.smem_len); | 1256 | (uint32_t) fbi->fix.smem_start, fbi->screen_base, fbi->fix.smem_len); |
1257 | 1257 | ||
1258 | fbi->screen_size = fbi->fix.smem_len; | 1258 | fbi->screen_size = fbi->fix.smem_len; |
1259 | 1259 | ||
1260 | /* Clear the screen */ | 1260 | /* Clear the screen */ |
1261 | memset((char *)fbi->screen_base, 0, fbi->fix.smem_len); | 1261 | memset((char *)fbi->screen_base, 0, fbi->fix.smem_len); |
1262 | 1262 | ||
1263 | return 0; | 1263 | return 0; |
1264 | 1264 | ||
1265 | err0: | 1265 | err0: |
1266 | fbi->fix.smem_len = 0; | 1266 | fbi->fix.smem_len = 0; |
1267 | fbi->fix.smem_start = 0; | 1267 | fbi->fix.smem_start = 0; |
1268 | fbi->screen_base = NULL; | 1268 | fbi->screen_base = NULL; |
1269 | return retval; | 1269 | return retval; |
1270 | } | 1270 | } |
1271 | 1271 | ||
1272 | /** | 1272 | /** |
1273 | * mx3fb_unmap_video_memory() - de-allocate frame buffer memory. | 1273 | * mx3fb_unmap_video_memory() - de-allocate frame buffer memory. |
1274 | * @fbi: framebuffer information pointer | 1274 | * @fbi: framebuffer information pointer |
1275 | * @return: error code indicating success or failure | 1275 | * @return: error code indicating success or failure |
1276 | */ | 1276 | */ |
1277 | static int mx3fb_unmap_video_memory(struct fb_info *fbi) | 1277 | static int mx3fb_unmap_video_memory(struct fb_info *fbi) |
1278 | { | 1278 | { |
1279 | dma_free_writecombine(fbi->device, fbi->fix.smem_len, | 1279 | dma_free_writecombine(fbi->device, fbi->fix.smem_len, |
1280 | fbi->screen_base, fbi->fix.smem_start); | 1280 | fbi->screen_base, fbi->fix.smem_start); |
1281 | 1281 | ||
1282 | fbi->screen_base = 0; | 1282 | fbi->screen_base = 0; |
1283 | mutex_lock(&fbi->mm_lock); | 1283 | mutex_lock(&fbi->mm_lock); |
1284 | fbi->fix.smem_start = 0; | 1284 | fbi->fix.smem_start = 0; |
1285 | fbi->fix.smem_len = 0; | 1285 | fbi->fix.smem_len = 0; |
1286 | mutex_unlock(&fbi->mm_lock); | 1286 | mutex_unlock(&fbi->mm_lock); |
1287 | return 0; | 1287 | return 0; |
1288 | } | 1288 | } |
1289 | 1289 | ||
1290 | /** | 1290 | /** |
1291 | * mx3fb_init_fbinfo() - initialize framebuffer information object. | 1291 | * mx3fb_init_fbinfo() - initialize framebuffer information object. |
1292 | * @return: initialized framebuffer structure. | 1292 | * @return: initialized framebuffer structure. |
1293 | */ | 1293 | */ |
1294 | static struct fb_info *mx3fb_init_fbinfo(struct device *dev, struct fb_ops *ops) | 1294 | static struct fb_info *mx3fb_init_fbinfo(struct device *dev, struct fb_ops *ops) |
1295 | { | 1295 | { |
1296 | struct fb_info *fbi; | 1296 | struct fb_info *fbi; |
1297 | struct mx3fb_info *mx3fbi; | 1297 | struct mx3fb_info *mx3fbi; |
1298 | int ret; | 1298 | int ret; |
1299 | 1299 | ||
1300 | /* Allocate sufficient memory for the fb structure */ | 1300 | /* Allocate sufficient memory for the fb structure */ |
1301 | fbi = framebuffer_alloc(sizeof(struct mx3fb_info), dev); | 1301 | fbi = framebuffer_alloc(sizeof(struct mx3fb_info), dev); |
1302 | if (!fbi) | 1302 | if (!fbi) |
1303 | return NULL; | 1303 | return NULL; |
1304 | 1304 | ||
1305 | mx3fbi = fbi->par; | 1305 | mx3fbi = fbi->par; |
1306 | mx3fbi->cookie = -EINVAL; | 1306 | mx3fbi->cookie = -EINVAL; |
1307 | mx3fbi->cur_ipu_buf = 0; | 1307 | mx3fbi->cur_ipu_buf = 0; |
1308 | 1308 | ||
1309 | fbi->var.activate = FB_ACTIVATE_NOW; | 1309 | fbi->var.activate = FB_ACTIVATE_NOW; |
1310 | 1310 | ||
1311 | fbi->fbops = ops; | 1311 | fbi->fbops = ops; |
1312 | fbi->flags = FBINFO_FLAG_DEFAULT; | 1312 | fbi->flags = FBINFO_FLAG_DEFAULT; |
1313 | fbi->pseudo_palette = mx3fbi->pseudo_palette; | 1313 | fbi->pseudo_palette = mx3fbi->pseudo_palette; |
1314 | 1314 | ||
1315 | mutex_init(&mx3fbi->mutex); | 1315 | mutex_init(&mx3fbi->mutex); |
1316 | 1316 | ||
1317 | /* Allocate colormap */ | 1317 | /* Allocate colormap */ |
1318 | ret = fb_alloc_cmap(&fbi->cmap, 16, 0); | 1318 | ret = fb_alloc_cmap(&fbi->cmap, 16, 0); |
1319 | if (ret < 0) { | 1319 | if (ret < 0) { |
1320 | framebuffer_release(fbi); | 1320 | framebuffer_release(fbi); |
1321 | return NULL; | 1321 | return NULL; |
1322 | } | 1322 | } |
1323 | 1323 | ||
1324 | return fbi; | 1324 | return fbi; |
1325 | } | 1325 | } |
1326 | 1326 | ||
1327 | static int init_fb_chan(struct mx3fb_data *mx3fb, struct idmac_channel *ichan) | 1327 | static int init_fb_chan(struct mx3fb_data *mx3fb, struct idmac_channel *ichan) |
1328 | { | 1328 | { |
1329 | struct device *dev = mx3fb->dev; | 1329 | struct device *dev = mx3fb->dev; |
1330 | struct mx3fb_platform_data *mx3fb_pdata = dev->platform_data; | 1330 | struct mx3fb_platform_data *mx3fb_pdata = dev->platform_data; |
1331 | const char *name = mx3fb_pdata->name; | 1331 | const char *name = mx3fb_pdata->name; |
1332 | unsigned int irq; | 1332 | unsigned int irq; |
1333 | struct fb_info *fbi; | 1333 | struct fb_info *fbi; |
1334 | struct mx3fb_info *mx3fbi; | 1334 | struct mx3fb_info *mx3fbi; |
1335 | const struct fb_videomode *mode; | 1335 | const struct fb_videomode *mode; |
1336 | int ret, num_modes; | 1336 | int ret, num_modes; |
1337 | 1337 | ||
1338 | ichan->client = mx3fb; | 1338 | ichan->client = mx3fb; |
1339 | irq = ichan->eof_irq; | 1339 | irq = ichan->eof_irq; |
1340 | 1340 | ||
1341 | if (ichan->dma_chan.chan_id != IDMAC_SDC_0) | 1341 | if (ichan->dma_chan.chan_id != IDMAC_SDC_0) |
1342 | return -EINVAL; | 1342 | return -EINVAL; |
1343 | 1343 | ||
1344 | fbi = mx3fb_init_fbinfo(dev, &mx3fb_ops); | 1344 | fbi = mx3fb_init_fbinfo(dev, &mx3fb_ops); |
1345 | if (!fbi) | 1345 | if (!fbi) |
1346 | return -ENOMEM; | 1346 | return -ENOMEM; |
1347 | 1347 | ||
1348 | if (!fb_mode) | 1348 | if (!fb_mode) |
1349 | fb_mode = name; | 1349 | fb_mode = name; |
1350 | 1350 | ||
1351 | if (!fb_mode) { | 1351 | if (!fb_mode) { |
1352 | ret = -EINVAL; | 1352 | ret = -EINVAL; |
1353 | goto emode; | 1353 | goto emode; |
1354 | } | 1354 | } |
1355 | 1355 | ||
1356 | if (mx3fb_pdata->mode && mx3fb_pdata->num_modes) { | 1356 | if (mx3fb_pdata->mode && mx3fb_pdata->num_modes) { |
1357 | mode = mx3fb_pdata->mode; | 1357 | mode = mx3fb_pdata->mode; |
1358 | num_modes = mx3fb_pdata->num_modes; | 1358 | num_modes = mx3fb_pdata->num_modes; |
1359 | } else { | 1359 | } else { |
1360 | mode = mx3fb_modedb; | 1360 | mode = mx3fb_modedb; |
1361 | num_modes = ARRAY_SIZE(mx3fb_modedb); | 1361 | num_modes = ARRAY_SIZE(mx3fb_modedb); |
1362 | } | 1362 | } |
1363 | 1363 | ||
1364 | if (!fb_find_mode(&fbi->var, fbi, fb_mode, mode, | 1364 | if (!fb_find_mode(&fbi->var, fbi, fb_mode, mode, |
1365 | num_modes, NULL, default_bpp)) { | 1365 | num_modes, NULL, default_bpp)) { |
1366 | ret = -EBUSY; | 1366 | ret = -EBUSY; |
1367 | goto emode; | 1367 | goto emode; |
1368 | } | 1368 | } |
1369 | 1369 | ||
1370 | fb_videomode_to_modelist(mode, num_modes, &fbi->modelist); | 1370 | fb_videomode_to_modelist(mode, num_modes, &fbi->modelist); |
1371 | 1371 | ||
1372 | /* Default Y virtual size is 2x panel size */ | 1372 | /* Default Y virtual size is 2x panel size */ |
1373 | fbi->var.yres_virtual = fbi->var.yres * 2; | 1373 | fbi->var.yres_virtual = fbi->var.yres * 2; |
1374 | 1374 | ||
1375 | mx3fb->fbi = fbi; | 1375 | mx3fb->fbi = fbi; |
1376 | 1376 | ||
1377 | /* set Display Interface clock period */ | 1377 | /* set Display Interface clock period */ |
1378 | mx3fb_write_reg(mx3fb, 0x00100010L, DI_HSP_CLK_PER); | 1378 | mx3fb_write_reg(mx3fb, 0x00100010L, DI_HSP_CLK_PER); |
1379 | /* Might need to trigger HSP clock change - see 44.3.3.8.5 */ | 1379 | /* Might need to trigger HSP clock change - see 44.3.3.8.5 */ |
1380 | 1380 | ||
1381 | sdc_set_brightness(mx3fb, 255); | 1381 | sdc_set_brightness(mx3fb, 255); |
1382 | sdc_set_global_alpha(mx3fb, true, 0xFF); | 1382 | sdc_set_global_alpha(mx3fb, true, 0xFF); |
1383 | sdc_set_color_key(mx3fb, IDMAC_SDC_0, false, 0); | 1383 | sdc_set_color_key(mx3fb, IDMAC_SDC_0, false, 0); |
1384 | 1384 | ||
1385 | mx3fbi = fbi->par; | 1385 | mx3fbi = fbi->par; |
1386 | mx3fbi->idmac_channel = ichan; | 1386 | mx3fbi->idmac_channel = ichan; |
1387 | mx3fbi->ipu_ch = ichan->dma_chan.chan_id; | 1387 | mx3fbi->ipu_ch = ichan->dma_chan.chan_id; |
1388 | mx3fbi->mx3fb = mx3fb; | 1388 | mx3fbi->mx3fb = mx3fb; |
1389 | mx3fbi->blank = FB_BLANK_NORMAL; | 1389 | mx3fbi->blank = FB_BLANK_NORMAL; |
1390 | 1390 | ||
1391 | init_completion(&mx3fbi->flip_cmpl); | 1391 | init_completion(&mx3fbi->flip_cmpl); |
1392 | disable_irq(ichan->eof_irq); | 1392 | disable_irq(ichan->eof_irq); |
1393 | dev_dbg(mx3fb->dev, "disabling irq %d\n", ichan->eof_irq); | 1393 | dev_dbg(mx3fb->dev, "disabling irq %d\n", ichan->eof_irq); |
1394 | ret = __set_par(fbi, false); | 1394 | ret = __set_par(fbi, false); |
1395 | if (ret < 0) | 1395 | if (ret < 0) |
1396 | goto esetpar; | 1396 | goto esetpar; |
1397 | 1397 | ||
1398 | __blank(FB_BLANK_UNBLANK, fbi); | 1398 | __blank(FB_BLANK_UNBLANK, fbi); |
1399 | 1399 | ||
1400 | dev_info(dev, "registered, using mode %s\n", fb_mode); | 1400 | dev_info(dev, "registered, using mode %s\n", fb_mode); |
1401 | 1401 | ||
1402 | ret = register_framebuffer(fbi); | 1402 | ret = register_framebuffer(fbi); |
1403 | if (ret < 0) | 1403 | if (ret < 0) |
1404 | goto erfb; | 1404 | goto erfb; |
1405 | 1405 | ||
1406 | return 0; | 1406 | return 0; |
1407 | 1407 | ||
1408 | erfb: | 1408 | erfb: |
1409 | esetpar: | 1409 | esetpar: |
1410 | emode: | 1410 | emode: |
1411 | fb_dealloc_cmap(&fbi->cmap); | 1411 | fb_dealloc_cmap(&fbi->cmap); |
1412 | framebuffer_release(fbi); | 1412 | framebuffer_release(fbi); |
1413 | 1413 | ||
1414 | return ret; | 1414 | return ret; |
1415 | } | 1415 | } |
1416 | 1416 | ||
1417 | static bool chan_filter(struct dma_chan *chan, void *arg) | 1417 | static bool chan_filter(struct dma_chan *chan, void *arg) |
1418 | { | 1418 | { |
1419 | struct dma_chan_request *rq = arg; | 1419 | struct dma_chan_request *rq = arg; |
1420 | struct device *dev; | 1420 | struct device *dev; |
1421 | struct mx3fb_platform_data *mx3fb_pdata; | 1421 | struct mx3fb_platform_data *mx3fb_pdata; |
1422 | 1422 | ||
1423 | if (!rq) | 1423 | if (!rq) |
1424 | return false; | 1424 | return false; |
1425 | 1425 | ||
1426 | dev = rq->mx3fb->dev; | 1426 | dev = rq->mx3fb->dev; |
1427 | mx3fb_pdata = dev->platform_data; | 1427 | mx3fb_pdata = dev->platform_data; |
1428 | 1428 | ||
1429 | return rq->id == chan->chan_id && | 1429 | return rq->id == chan->chan_id && |
1430 | mx3fb_pdata->dma_dev == chan->device->dev; | 1430 | mx3fb_pdata->dma_dev == chan->device->dev; |
1431 | } | 1431 | } |
1432 | 1432 | ||
1433 | static void release_fbi(struct fb_info *fbi) | 1433 | static void release_fbi(struct fb_info *fbi) |
1434 | { | 1434 | { |
1435 | mx3fb_unmap_video_memory(fbi); | 1435 | mx3fb_unmap_video_memory(fbi); |
1436 | 1436 | ||
1437 | fb_dealloc_cmap(&fbi->cmap); | 1437 | fb_dealloc_cmap(&fbi->cmap); |
1438 | 1438 | ||
1439 | unregister_framebuffer(fbi); | 1439 | unregister_framebuffer(fbi); |
1440 | framebuffer_release(fbi); | 1440 | framebuffer_release(fbi); |
1441 | } | 1441 | } |
1442 | 1442 | ||
1443 | static int mx3fb_probe(struct platform_device *pdev) | 1443 | static int mx3fb_probe(struct platform_device *pdev) |
1444 | { | 1444 | { |
1445 | struct device *dev = &pdev->dev; | 1445 | struct device *dev = &pdev->dev; |
1446 | int ret; | 1446 | int ret; |
1447 | struct resource *sdc_reg; | 1447 | struct resource *sdc_reg; |
1448 | struct mx3fb_data *mx3fb; | 1448 | struct mx3fb_data *mx3fb; |
1449 | dma_cap_mask_t mask; | 1449 | dma_cap_mask_t mask; |
1450 | struct dma_chan *chan; | 1450 | struct dma_chan *chan; |
1451 | struct dma_chan_request rq; | 1451 | struct dma_chan_request rq; |
1452 | 1452 | ||
1453 | /* | 1453 | /* |
1454 | * Display Interface (DI) and Synchronous Display Controller (SDC) | 1454 | * Display Interface (DI) and Synchronous Display Controller (SDC) |
1455 | * registers | 1455 | * registers |
1456 | */ | 1456 | */ |
1457 | sdc_reg = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1457 | sdc_reg = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1458 | if (!sdc_reg) | 1458 | if (!sdc_reg) |
1459 | return -EINVAL; | 1459 | return -EINVAL; |
1460 | 1460 | ||
1461 | mx3fb = kzalloc(sizeof(*mx3fb), GFP_KERNEL); | 1461 | mx3fb = kzalloc(sizeof(*mx3fb), GFP_KERNEL); |
1462 | if (!mx3fb) | 1462 | if (!mx3fb) |
1463 | return -ENOMEM; | 1463 | return -ENOMEM; |
1464 | 1464 | ||
1465 | spin_lock_init(&mx3fb->lock); | 1465 | spin_lock_init(&mx3fb->lock); |
1466 | 1466 | ||
1467 | mx3fb->reg_base = ioremap(sdc_reg->start, resource_size(sdc_reg)); | 1467 | mx3fb->reg_base = ioremap(sdc_reg->start, resource_size(sdc_reg)); |
1468 | if (!mx3fb->reg_base) { | 1468 | if (!mx3fb->reg_base) { |
1469 | ret = -ENOMEM; | 1469 | ret = -ENOMEM; |
1470 | goto eremap; | 1470 | goto eremap; |
1471 | } | 1471 | } |
1472 | 1472 | ||
1473 | pr_debug("Remapped %x to %x at %p\n", sdc_reg->start, sdc_reg->end, | 1473 | pr_debug("Remapped %x to %x at %p\n", sdc_reg->start, sdc_reg->end, |
1474 | mx3fb->reg_base); | 1474 | mx3fb->reg_base); |
1475 | 1475 | ||
1476 | /* IDMAC interface */ | 1476 | /* IDMAC interface */ |
1477 | dmaengine_get(); | 1477 | dmaengine_get(); |
1478 | 1478 | ||
1479 | mx3fb->dev = dev; | 1479 | mx3fb->dev = dev; |
1480 | platform_set_drvdata(pdev, mx3fb); | 1480 | platform_set_drvdata(pdev, mx3fb); |
1481 | 1481 | ||
1482 | rq.mx3fb = mx3fb; | 1482 | rq.mx3fb = mx3fb; |
1483 | 1483 | ||
1484 | dma_cap_zero(mask); | 1484 | dma_cap_zero(mask); |
1485 | dma_cap_set(DMA_SLAVE, mask); | 1485 | dma_cap_set(DMA_SLAVE, mask); |
1486 | dma_cap_set(DMA_PRIVATE, mask); | 1486 | dma_cap_set(DMA_PRIVATE, mask); |
1487 | rq.id = IDMAC_SDC_0; | 1487 | rq.id = IDMAC_SDC_0; |
1488 | chan = dma_request_channel(mask, chan_filter, &rq); | 1488 | chan = dma_request_channel(mask, chan_filter, &rq); |
1489 | if (!chan) { | 1489 | if (!chan) { |
1490 | ret = -EBUSY; | 1490 | ret = -EBUSY; |
1491 | goto ersdc0; | 1491 | goto ersdc0; |
1492 | } | 1492 | } |
1493 | 1493 | ||
1494 | mx3fb->backlight_level = 255; | 1494 | mx3fb->backlight_level = 255; |
1495 | 1495 | ||
1496 | ret = init_fb_chan(mx3fb, to_idmac_chan(chan)); | 1496 | ret = init_fb_chan(mx3fb, to_idmac_chan(chan)); |
1497 | if (ret < 0) | 1497 | if (ret < 0) |
1498 | goto eisdc0; | 1498 | goto eisdc0; |
1499 | 1499 | ||
1500 | return 0; | 1500 | return 0; |
1501 | 1501 | ||
1502 | eisdc0: | 1502 | eisdc0: |
1503 | dma_release_channel(chan); | 1503 | dma_release_channel(chan); |
1504 | ersdc0: | 1504 | ersdc0: |
1505 | dmaengine_put(); | 1505 | dmaengine_put(); |
1506 | iounmap(mx3fb->reg_base); | 1506 | iounmap(mx3fb->reg_base); |
1507 | eremap: | 1507 | eremap: |
1508 | kfree(mx3fb); | 1508 | kfree(mx3fb); |
1509 | dev_err(dev, "mx3fb: failed to register fb\n"); | 1509 | dev_err(dev, "mx3fb: failed to register fb\n"); |
1510 | return ret; | 1510 | return ret; |
1511 | } | 1511 | } |
1512 | 1512 | ||
1513 | static int mx3fb_remove(struct platform_device *dev) | 1513 | static int mx3fb_remove(struct platform_device *dev) |
1514 | { | 1514 | { |
1515 | struct mx3fb_data *mx3fb = platform_get_drvdata(dev); | 1515 | struct mx3fb_data *mx3fb = platform_get_drvdata(dev); |
1516 | struct fb_info *fbi = mx3fb->fbi; | 1516 | struct fb_info *fbi = mx3fb->fbi; |
1517 | struct mx3fb_info *mx3_fbi = fbi->par; | 1517 | struct mx3fb_info *mx3_fbi = fbi->par; |
1518 | struct dma_chan *chan; | 1518 | struct dma_chan *chan; |
1519 | 1519 | ||
1520 | chan = &mx3_fbi->idmac_channel->dma_chan; | 1520 | chan = &mx3_fbi->idmac_channel->dma_chan; |
1521 | release_fbi(fbi); | 1521 | release_fbi(fbi); |
1522 | 1522 | ||
1523 | dma_release_channel(chan); | 1523 | dma_release_channel(chan); |
1524 | dmaengine_put(); | 1524 | dmaengine_put(); |
1525 | 1525 | ||
1526 | iounmap(mx3fb->reg_base); | 1526 | iounmap(mx3fb->reg_base); |
1527 | kfree(mx3fb); | 1527 | kfree(mx3fb); |
1528 | return 0; | 1528 | return 0; |
1529 | } | 1529 | } |
1530 | 1530 | ||
1531 | static struct platform_driver mx3fb_driver = { | 1531 | static struct platform_driver mx3fb_driver = { |
1532 | .driver = { | 1532 | .driver = { |
1533 | .name = MX3FB_NAME, | 1533 | .name = MX3FB_NAME, |
1534 | }, | 1534 | }, |
1535 | .probe = mx3fb_probe, | 1535 | .probe = mx3fb_probe, |
1536 | .remove = mx3fb_remove, | 1536 | .remove = mx3fb_remove, |
1537 | .suspend = mx3fb_suspend, | 1537 | .suspend = mx3fb_suspend, |
1538 | .resume = mx3fb_resume, | 1538 | .resume = mx3fb_resume, |
1539 | }; | 1539 | }; |
1540 | 1540 | ||
1541 | /* | 1541 | /* |
1542 | * Parse user specified options (`video=mx3fb:') | 1542 | * Parse user specified options (`video=mx3fb:') |
1543 | * example: | 1543 | * example: |
1544 | * video=mx3fb:bpp=16 | 1544 | * video=mx3fb:bpp=16 |
1545 | */ | 1545 | */ |
1546 | static int __init mx3fb_setup(void) | 1546 | static int __init mx3fb_setup(void) |
1547 | { | 1547 | { |
1548 | #ifndef MODULE | 1548 | #ifndef MODULE |
1549 | char *opt, *options = NULL; | 1549 | char *opt, *options = NULL; |
1550 | 1550 | ||
1551 | if (fb_get_options("mx3fb", &options)) | 1551 | if (fb_get_options("mx3fb", &options)) |
1552 | return -ENODEV; | 1552 | return -ENODEV; |
1553 | 1553 | ||
1554 | if (!options || !*options) | 1554 | if (!options || !*options) |
1555 | return 0; | 1555 | return 0; |
1556 | 1556 | ||
1557 | while ((opt = strsep(&options, ",")) != NULL) { | 1557 | while ((opt = strsep(&options, ",")) != NULL) { |
1558 | if (!*opt) | 1558 | if (!*opt) |
1559 | continue; | 1559 | continue; |
1560 | if (!strncmp(opt, "bpp=", 4)) | 1560 | if (!strncmp(opt, "bpp=", 4)) |
1561 | default_bpp = simple_strtoul(opt + 4, NULL, 0); | 1561 | default_bpp = simple_strtoul(opt + 4, NULL, 0); |
1562 | else | 1562 | else |
1563 | fb_mode = opt; | 1563 | fb_mode = opt; |
1564 | } | 1564 | } |
1565 | #endif | 1565 | #endif |
1566 | 1566 | ||
1567 | return 0; | 1567 | return 0; |
1568 | } | 1568 | } |
1569 | 1569 | ||
1570 | static int __init mx3fb_init(void) | 1570 | static int __init mx3fb_init(void) |
1571 | { | 1571 | { |
1572 | int ret = mx3fb_setup(); | 1572 | int ret = mx3fb_setup(); |
1573 | 1573 | ||
1574 | if (ret < 0) | 1574 | if (ret < 0) |
1575 | return ret; | 1575 | return ret; |
1576 | 1576 | ||
1577 | ret = platform_driver_register(&mx3fb_driver); | 1577 | ret = platform_driver_register(&mx3fb_driver); |
1578 | return ret; | 1578 | return ret; |
1579 | } | 1579 | } |
1580 | 1580 | ||
1581 | static void __exit mx3fb_exit(void) | 1581 | static void __exit mx3fb_exit(void) |
1582 | { | 1582 | { |
1583 | platform_driver_unregister(&mx3fb_driver); | 1583 | platform_driver_unregister(&mx3fb_driver); |
1584 | } | 1584 | } |
1585 | 1585 | ||
1586 | module_init(mx3fb_init); | 1586 | module_init(mx3fb_init); |
1587 | module_exit(mx3fb_exit); | 1587 | module_exit(mx3fb_exit); |
1588 | 1588 | ||
1589 | MODULE_AUTHOR("Freescale Semiconductor, Inc."); | 1589 | MODULE_AUTHOR("Freescale Semiconductor, Inc."); |
1590 | MODULE_DESCRIPTION("MX3 framebuffer driver"); | 1590 | MODULE_DESCRIPTION("MX3 framebuffer driver"); |
1591 | MODULE_ALIAS("platform:" MX3FB_NAME); | 1591 | MODULE_ALIAS("platform:" MX3FB_NAME); |
1592 | MODULE_LICENSE("GPL v2"); | 1592 | MODULE_LICENSE("GPL v2"); |
1593 | 1593 |
include/linux/dmaengine.h
1 | /* | 1 | /* |
2 | * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. | 2 | * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify it | 4 | * This program is free software; you can redistribute it and/or modify it |
5 | * under the terms of the GNU General Public License as published by the Free | 5 | * under the terms of the GNU General Public License as published by the Free |
6 | * Software Foundation; either version 2 of the License, or (at your option) | 6 | * Software Foundation; either version 2 of the License, or (at your option) |
7 | * any later version. | 7 | * any later version. |
8 | * | 8 | * |
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | 9 | * This program is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
12 | * more details. | 12 | * more details. |
13 | * | 13 | * |
14 | * You should have received a copy of the GNU General Public License along with | 14 | * You should have received a copy of the GNU General Public License along with |
15 | * this program; if not, write to the Free Software Foundation, Inc., 59 | 15 | * this program; if not, write to the Free Software Foundation, Inc., 59 |
16 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 16 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
17 | * | 17 | * |
18 | * The full GNU General Public License is included in this distribution in the | 18 | * The full GNU General Public License is included in this distribution in the |
19 | * file called COPYING. | 19 | * file called COPYING. |
20 | */ | 20 | */ |
21 | #ifndef DMAENGINE_H | 21 | #ifndef DMAENGINE_H |
22 | #define DMAENGINE_H | 22 | #define DMAENGINE_H |
23 | 23 | ||
24 | #include <linux/device.h> | 24 | #include <linux/device.h> |
25 | #include <linux/uio.h> | 25 | #include <linux/uio.h> |
26 | #include <linux/dma-mapping.h> | 26 | #include <linux/dma-mapping.h> |
27 | 27 | ||
28 | /** | 28 | /** |
29 | * typedef dma_cookie_t - an opaque DMA cookie | 29 | * typedef dma_cookie_t - an opaque DMA cookie |
30 | * | 30 | * |
31 | * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code | 31 | * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code |
32 | */ | 32 | */ |
33 | typedef s32 dma_cookie_t; | 33 | typedef s32 dma_cookie_t; |
34 | #define DMA_MIN_COOKIE 1 | 34 | #define DMA_MIN_COOKIE 1 |
35 | #define DMA_MAX_COOKIE INT_MAX | 35 | #define DMA_MAX_COOKIE INT_MAX |
36 | 36 | ||
37 | #define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0) | 37 | #define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0) |
38 | 38 | ||
39 | /** | 39 | /** |
40 | * enum dma_status - DMA transaction status | 40 | * enum dma_status - DMA transaction status |
41 | * @DMA_SUCCESS: transaction completed successfully | 41 | * @DMA_SUCCESS: transaction completed successfully |
42 | * @DMA_IN_PROGRESS: transaction not yet processed | 42 | * @DMA_IN_PROGRESS: transaction not yet processed |
43 | * @DMA_PAUSED: transaction is paused | 43 | * @DMA_PAUSED: transaction is paused |
44 | * @DMA_ERROR: transaction failed | 44 | * @DMA_ERROR: transaction failed |
45 | */ | 45 | */ |
46 | enum dma_status { | 46 | enum dma_status { |
47 | DMA_SUCCESS, | 47 | DMA_SUCCESS, |
48 | DMA_IN_PROGRESS, | 48 | DMA_IN_PROGRESS, |
49 | DMA_PAUSED, | 49 | DMA_PAUSED, |
50 | DMA_ERROR, | 50 | DMA_ERROR, |
51 | }; | 51 | }; |
52 | 52 | ||
53 | /** | 53 | /** |
54 | * enum dma_transaction_type - DMA transaction types/indexes | 54 | * enum dma_transaction_type - DMA transaction types/indexes |
55 | * | 55 | * |
56 | * Note: The DMA_ASYNC_TX capability is not to be set by drivers. It is | 56 | * Note: The DMA_ASYNC_TX capability is not to be set by drivers. It is |
57 | * automatically set as dma devices are registered. | 57 | * automatically set as dma devices are registered. |
58 | */ | 58 | */ |
59 | enum dma_transaction_type { | 59 | enum dma_transaction_type { |
60 | DMA_MEMCPY, | 60 | DMA_MEMCPY, |
61 | DMA_XOR, | 61 | DMA_XOR, |
62 | DMA_PQ, | 62 | DMA_PQ, |
63 | DMA_XOR_VAL, | 63 | DMA_XOR_VAL, |
64 | DMA_PQ_VAL, | 64 | DMA_PQ_VAL, |
65 | DMA_MEMSET, | 65 | DMA_MEMSET, |
66 | DMA_INTERRUPT, | 66 | DMA_INTERRUPT, |
67 | DMA_PRIVATE, | 67 | DMA_PRIVATE, |
68 | DMA_ASYNC_TX, | 68 | DMA_ASYNC_TX, |
69 | DMA_SLAVE, | 69 | DMA_SLAVE, |
70 | }; | 70 | }; |
71 | 71 | ||
72 | /* last transaction type for creation of the capabilities mask */ | 72 | /* last transaction type for creation of the capabilities mask */ |
73 | #define DMA_TX_TYPE_END (DMA_SLAVE + 1) | 73 | #define DMA_TX_TYPE_END (DMA_SLAVE + 1) |
74 | 74 | ||
75 | 75 | ||
76 | /** | 76 | /** |
77 | * enum dma_ctrl_flags - DMA flags to augment operation preparation, | 77 | * enum dma_ctrl_flags - DMA flags to augment operation preparation, |
78 | * control completion, and communicate status. | 78 | * control completion, and communicate status. |
79 | * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of | 79 | * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of |
80 | * this transaction | 80 | * this transaction |
81 | * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client | 81 | * @DMA_CTRL_ACK - if clear, the descriptor cannot be reused until the client |
82 | * acknowledges receipt, i.e. has has a chance to establish any dependency | 82 | * acknowledges receipt, i.e. has has a chance to establish any dependency |
83 | * chains | 83 | * chains |
84 | * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s) | 84 | * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s) |
85 | * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s) | 85 | * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s) |
86 | * @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single | 86 | * @DMA_COMPL_SRC_UNMAP_SINGLE - set to do the source dma-unmapping as single |
87 | * (if not set, do the source dma-unmapping as page) | 87 | * (if not set, do the source dma-unmapping as page) |
88 | * @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single | 88 | * @DMA_COMPL_DEST_UNMAP_SINGLE - set to do the destination dma-unmapping as single |
89 | * (if not set, do the destination dma-unmapping as page) | 89 | * (if not set, do the destination dma-unmapping as page) |
90 | * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q | 90 | * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q |
91 | * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P | 91 | * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P |
92 | * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as | 92 | * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as |
93 | * sources that were the result of a previous operation, in the case of a PQ | 93 | * sources that were the result of a previous operation, in the case of a PQ |
94 | * operation it continues the calculation with new sources | 94 | * operation it continues the calculation with new sources |
95 | * @DMA_PREP_FENCE - tell the driver that subsequent operations depend | 95 | * @DMA_PREP_FENCE - tell the driver that subsequent operations depend |
96 | * on the result of this operation | 96 | * on the result of this operation |
97 | */ | 97 | */ |
98 | enum dma_ctrl_flags { | 98 | enum dma_ctrl_flags { |
99 | DMA_PREP_INTERRUPT = (1 << 0), | 99 | DMA_PREP_INTERRUPT = (1 << 0), |
100 | DMA_CTRL_ACK = (1 << 1), | 100 | DMA_CTRL_ACK = (1 << 1), |
101 | DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2), | 101 | DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2), |
102 | DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3), | 102 | DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3), |
103 | DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4), | 103 | DMA_COMPL_SRC_UNMAP_SINGLE = (1 << 4), |
104 | DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5), | 104 | DMA_COMPL_DEST_UNMAP_SINGLE = (1 << 5), |
105 | DMA_PREP_PQ_DISABLE_P = (1 << 6), | 105 | DMA_PREP_PQ_DISABLE_P = (1 << 6), |
106 | DMA_PREP_PQ_DISABLE_Q = (1 << 7), | 106 | DMA_PREP_PQ_DISABLE_Q = (1 << 7), |
107 | DMA_PREP_CONTINUE = (1 << 8), | 107 | DMA_PREP_CONTINUE = (1 << 8), |
108 | DMA_PREP_FENCE = (1 << 9), | 108 | DMA_PREP_FENCE = (1 << 9), |
109 | }; | 109 | }; |
110 | 110 | ||
111 | /** | 111 | /** |
112 | * enum dma_ctrl_cmd - DMA operations that can optionally be exercised | 112 | * enum dma_ctrl_cmd - DMA operations that can optionally be exercised |
113 | * on a running channel. | 113 | * on a running channel. |
114 | * @DMA_TERMINATE_ALL: terminate all ongoing transfers | 114 | * @DMA_TERMINATE_ALL: terminate all ongoing transfers |
115 | * @DMA_PAUSE: pause ongoing transfers | 115 | * @DMA_PAUSE: pause ongoing transfers |
116 | * @DMA_RESUME: resume paused transfer | 116 | * @DMA_RESUME: resume paused transfer |
117 | */ | 117 | */ |
118 | enum dma_ctrl_cmd { | 118 | enum dma_ctrl_cmd { |
119 | DMA_TERMINATE_ALL, | 119 | DMA_TERMINATE_ALL, |
120 | DMA_PAUSE, | 120 | DMA_PAUSE, |
121 | DMA_RESUME, | 121 | DMA_RESUME, |
122 | }; | 122 | }; |
123 | 123 | ||
124 | /** | 124 | /** |
125 | * enum sum_check_bits - bit position of pq_check_flags | 125 | * enum sum_check_bits - bit position of pq_check_flags |
126 | */ | 126 | */ |
127 | enum sum_check_bits { | 127 | enum sum_check_bits { |
128 | SUM_CHECK_P = 0, | 128 | SUM_CHECK_P = 0, |
129 | SUM_CHECK_Q = 1, | 129 | SUM_CHECK_Q = 1, |
130 | }; | 130 | }; |
131 | 131 | ||
132 | /** | 132 | /** |
133 | * enum pq_check_flags - result of async_{xor,pq}_zero_sum operations | 133 | * enum pq_check_flags - result of async_{xor,pq}_zero_sum operations |
134 | * @SUM_CHECK_P_RESULT - 1 if xor zero sum error, 0 otherwise | 134 | * @SUM_CHECK_P_RESULT - 1 if xor zero sum error, 0 otherwise |
135 | * @SUM_CHECK_Q_RESULT - 1 if reed-solomon zero sum error, 0 otherwise | 135 | * @SUM_CHECK_Q_RESULT - 1 if reed-solomon zero sum error, 0 otherwise |
136 | */ | 136 | */ |
137 | enum sum_check_flags { | 137 | enum sum_check_flags { |
138 | SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P), | 138 | SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P), |
139 | SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q), | 139 | SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q), |
140 | }; | 140 | }; |
141 | 141 | ||
142 | 142 | ||
143 | /** | 143 | /** |
144 | * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t. | 144 | * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t. |
145 | * See linux/cpumask.h | 145 | * See linux/cpumask.h |
146 | */ | 146 | */ |
147 | typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t; | 147 | typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t; |
148 | 148 | ||
149 | /** | 149 | /** |
150 | * struct dma_chan_percpu - the per-CPU part of struct dma_chan | 150 | * struct dma_chan_percpu - the per-CPU part of struct dma_chan |
151 | * @memcpy_count: transaction counter | 151 | * @memcpy_count: transaction counter |
152 | * @bytes_transferred: byte counter | 152 | * @bytes_transferred: byte counter |
153 | */ | 153 | */ |
154 | 154 | ||
155 | struct dma_chan_percpu { | 155 | struct dma_chan_percpu { |
156 | /* stats */ | 156 | /* stats */ |
157 | unsigned long memcpy_count; | 157 | unsigned long memcpy_count; |
158 | unsigned long bytes_transferred; | 158 | unsigned long bytes_transferred; |
159 | }; | 159 | }; |
160 | 160 | ||
161 | /** | 161 | /** |
162 | * struct dma_chan - devices supply DMA channels, clients use them | 162 | * struct dma_chan - devices supply DMA channels, clients use them |
163 | * @device: ptr to the dma device who supplies this channel, always !%NULL | 163 | * @device: ptr to the dma device who supplies this channel, always !%NULL |
164 | * @cookie: last cookie value returned to client | 164 | * @cookie: last cookie value returned to client |
165 | * @chan_id: channel ID for sysfs | 165 | * @chan_id: channel ID for sysfs |
166 | * @dev: class device for sysfs | 166 | * @dev: class device for sysfs |
167 | * @device_node: used to add this to the device chan list | 167 | * @device_node: used to add this to the device chan list |
168 | * @local: per-cpu pointer to a struct dma_chan_percpu | 168 | * @local: per-cpu pointer to a struct dma_chan_percpu |
169 | * @client-count: how many clients are using this channel | 169 | * @client-count: how many clients are using this channel |
170 | * @table_count: number of appearances in the mem-to-mem allocation table | 170 | * @table_count: number of appearances in the mem-to-mem allocation table |
171 | * @private: private data for certain client-channel associations | 171 | * @private: private data for certain client-channel associations |
172 | */ | 172 | */ |
173 | struct dma_chan { | 173 | struct dma_chan { |
174 | struct dma_device *device; | 174 | struct dma_device *device; |
175 | dma_cookie_t cookie; | 175 | dma_cookie_t cookie; |
176 | 176 | ||
177 | /* sysfs */ | 177 | /* sysfs */ |
178 | int chan_id; | 178 | int chan_id; |
179 | struct dma_chan_dev *dev; | 179 | struct dma_chan_dev *dev; |
180 | 180 | ||
181 | struct list_head device_node; | 181 | struct list_head device_node; |
182 | struct dma_chan_percpu __percpu *local; | 182 | struct dma_chan_percpu __percpu *local; |
183 | int client_count; | 183 | int client_count; |
184 | int table_count; | 184 | int table_count; |
185 | void *private; | 185 | void *private; |
186 | }; | 186 | }; |
187 | 187 | ||
188 | /** | 188 | /** |
189 | * struct dma_chan_dev - relate sysfs device node to backing channel device | 189 | * struct dma_chan_dev - relate sysfs device node to backing channel device |
190 | * @chan - driver channel device | 190 | * @chan - driver channel device |
191 | * @device - sysfs device | 191 | * @device - sysfs device |
192 | * @dev_id - parent dma_device dev_id | 192 | * @dev_id - parent dma_device dev_id |
193 | * @idr_ref - reference count to gate release of dma_device dev_id | 193 | * @idr_ref - reference count to gate release of dma_device dev_id |
194 | */ | 194 | */ |
195 | struct dma_chan_dev { | 195 | struct dma_chan_dev { |
196 | struct dma_chan *chan; | 196 | struct dma_chan *chan; |
197 | struct device device; | 197 | struct device device; |
198 | int dev_id; | 198 | int dev_id; |
199 | atomic_t *idr_ref; | 199 | atomic_t *idr_ref; |
200 | }; | 200 | }; |
201 | 201 | ||
202 | static inline const char *dma_chan_name(struct dma_chan *chan) | 202 | static inline const char *dma_chan_name(struct dma_chan *chan) |
203 | { | 203 | { |
204 | return dev_name(&chan->dev->device); | 204 | return dev_name(&chan->dev->device); |
205 | } | 205 | } |
206 | 206 | ||
207 | void dma_chan_cleanup(struct kref *kref); | 207 | void dma_chan_cleanup(struct kref *kref); |
208 | 208 | ||
209 | /** | 209 | /** |
210 | * typedef dma_filter_fn - callback filter for dma_request_channel | 210 | * typedef dma_filter_fn - callback filter for dma_request_channel |
211 | * @chan: channel to be reviewed | 211 | * @chan: channel to be reviewed |
212 | * @filter_param: opaque parameter passed through dma_request_channel | 212 | * @filter_param: opaque parameter passed through dma_request_channel |
213 | * | 213 | * |
214 | * When this optional parameter is specified in a call to dma_request_channel a | 214 | * When this optional parameter is specified in a call to dma_request_channel a |
215 | * suitable channel is passed to this routine for further dispositioning before | 215 | * suitable channel is passed to this routine for further dispositioning before |
216 | * being returned. Where 'suitable' indicates a non-busy channel that | 216 | * being returned. Where 'suitable' indicates a non-busy channel that |
217 | * satisfies the given capability mask. It returns 'true' to indicate that the | 217 | * satisfies the given capability mask. It returns 'true' to indicate that the |
218 | * channel is suitable. | 218 | * channel is suitable. |
219 | */ | 219 | */ |
220 | typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); | 220 | typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param); |
221 | 221 | ||
222 | typedef void (*dma_async_tx_callback)(void *dma_async_param); | 222 | typedef void (*dma_async_tx_callback)(void *dma_async_param); |
223 | /** | 223 | /** |
224 | * struct dma_async_tx_descriptor - async transaction descriptor | 224 | * struct dma_async_tx_descriptor - async transaction descriptor |
225 | * ---dma generic offload fields--- | 225 | * ---dma generic offload fields--- |
226 | * @cookie: tracking cookie for this transaction, set to -EBUSY if | 226 | * @cookie: tracking cookie for this transaction, set to -EBUSY if |
227 | * this tx is sitting on a dependency list | 227 | * this tx is sitting on a dependency list |
228 | * @flags: flags to augment operation preparation, control completion, and | 228 | * @flags: flags to augment operation preparation, control completion, and |
229 | * communicate status | 229 | * communicate status |
230 | * @phys: physical address of the descriptor | 230 | * @phys: physical address of the descriptor |
231 | * @chan: target channel for this operation | 231 | * @chan: target channel for this operation |
232 | * @tx_submit: set the prepared descriptor(s) to be executed by the engine | 232 | * @tx_submit: set the prepared descriptor(s) to be executed by the engine |
233 | * @callback: routine to call after this operation is complete | 233 | * @callback: routine to call after this operation is complete |
234 | * @callback_param: general parameter to pass to the callback routine | 234 | * @callback_param: general parameter to pass to the callback routine |
235 | * ---async_tx api specific fields--- | 235 | * ---async_tx api specific fields--- |
236 | * @next: at completion submit this descriptor | 236 | * @next: at completion submit this descriptor |
237 | * @parent: pointer to the next level up in the dependency chain | 237 | * @parent: pointer to the next level up in the dependency chain |
238 | * @lock: protect the parent and next pointers | 238 | * @lock: protect the parent and next pointers |
239 | */ | 239 | */ |
240 | struct dma_async_tx_descriptor { | 240 | struct dma_async_tx_descriptor { |
241 | dma_cookie_t cookie; | 241 | dma_cookie_t cookie; |
242 | enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */ | 242 | enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */ |
243 | dma_addr_t phys; | 243 | dma_addr_t phys; |
244 | struct dma_chan *chan; | 244 | struct dma_chan *chan; |
245 | dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); | 245 | dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); |
246 | dma_async_tx_callback callback; | 246 | dma_async_tx_callback callback; |
247 | void *callback_param; | 247 | void *callback_param; |
248 | struct dma_async_tx_descriptor *next; | 248 | struct dma_async_tx_descriptor *next; |
249 | struct dma_async_tx_descriptor *parent; | 249 | struct dma_async_tx_descriptor *parent; |
250 | spinlock_t lock; | 250 | spinlock_t lock; |
251 | }; | 251 | }; |
252 | 252 | ||
253 | /** | 253 | /** |
254 | * struct dma_tx_state - filled in to report the status of | 254 | * struct dma_tx_state - filled in to report the status of |
255 | * a transfer. | 255 | * a transfer. |
256 | * @last: last completed DMA cookie | 256 | * @last: last completed DMA cookie |
257 | * @used: last issued DMA cookie (i.e. the one in progress) | 257 | * @used: last issued DMA cookie (i.e. the one in progress) |
258 | * @residue: the remaining number of bytes left to transmit | 258 | * @residue: the remaining number of bytes left to transmit |
259 | * on the selected transfer for states DMA_IN_PROGRESS and | 259 | * on the selected transfer for states DMA_IN_PROGRESS and |
260 | * DMA_PAUSED if this is implemented in the driver, else 0 | 260 | * DMA_PAUSED if this is implemented in the driver, else 0 |
261 | */ | 261 | */ |
262 | struct dma_tx_state { | 262 | struct dma_tx_state { |
263 | dma_cookie_t last; | 263 | dma_cookie_t last; |
264 | dma_cookie_t used; | 264 | dma_cookie_t used; |
265 | u32 residue; | 265 | u32 residue; |
266 | }; | 266 | }; |
267 | 267 | ||
268 | /** | 268 | /** |
269 | * struct dma_device - info on the entity supplying DMA services | 269 | * struct dma_device - info on the entity supplying DMA services |
270 | * @chancnt: how many DMA channels are supported | 270 | * @chancnt: how many DMA channels are supported |
271 | * @privatecnt: how many DMA channels are requested by dma_request_channel | 271 | * @privatecnt: how many DMA channels are requested by dma_request_channel |
272 | * @channels: the list of struct dma_chan | 272 | * @channels: the list of struct dma_chan |
273 | * @global_node: list_head for global dma_device_list | 273 | * @global_node: list_head for global dma_device_list |
274 | * @cap_mask: one or more dma_capability flags | 274 | * @cap_mask: one or more dma_capability flags |
275 | * @max_xor: maximum number of xor sources, 0 if no capability | 275 | * @max_xor: maximum number of xor sources, 0 if no capability |
276 | * @max_pq: maximum number of PQ sources and PQ-continue capability | 276 | * @max_pq: maximum number of PQ sources and PQ-continue capability |
277 | * @copy_align: alignment shift for memcpy operations | 277 | * @copy_align: alignment shift for memcpy operations |
278 | * @xor_align: alignment shift for xor operations | 278 | * @xor_align: alignment shift for xor operations |
279 | * @pq_align: alignment shift for pq operations | 279 | * @pq_align: alignment shift for pq operations |
280 | * @fill_align: alignment shift for memset operations | 280 | * @fill_align: alignment shift for memset operations |
281 | * @dev_id: unique device ID | 281 | * @dev_id: unique device ID |
282 | * @dev: struct device reference for dma mapping api | 282 | * @dev: struct device reference for dma mapping api |
283 | * @device_alloc_chan_resources: allocate resources and return the | 283 | * @device_alloc_chan_resources: allocate resources and return the |
284 | * number of allocated descriptors | 284 | * number of allocated descriptors |
285 | * @device_free_chan_resources: release DMA channel's resources | 285 | * @device_free_chan_resources: release DMA channel's resources |
286 | * @device_prep_dma_memcpy: prepares a memcpy operation | 286 | * @device_prep_dma_memcpy: prepares a memcpy operation |
287 | * @device_prep_dma_xor: prepares a xor operation | 287 | * @device_prep_dma_xor: prepares a xor operation |
288 | * @device_prep_dma_xor_val: prepares a xor validation operation | 288 | * @device_prep_dma_xor_val: prepares a xor validation operation |
289 | * @device_prep_dma_pq: prepares a pq operation | 289 | * @device_prep_dma_pq: prepares a pq operation |
290 | * @device_prep_dma_pq_val: prepares a pqzero_sum operation | 290 | * @device_prep_dma_pq_val: prepares a pqzero_sum operation |
291 | * @device_prep_dma_memset: prepares a memset operation | 291 | * @device_prep_dma_memset: prepares a memset operation |
292 | * @device_prep_dma_interrupt: prepares an end of chain interrupt operation | 292 | * @device_prep_dma_interrupt: prepares an end of chain interrupt operation |
293 | * @device_prep_slave_sg: prepares a slave dma operation | 293 | * @device_prep_slave_sg: prepares a slave dma operation |
294 | * @device_control: manipulate all pending operations on a channel, returns | 294 | * @device_control: manipulate all pending operations on a channel, returns |
295 | * zero or error code | 295 | * zero or error code |
296 | * @device_tx_status: poll for transaction completion, the optional | 296 | * @device_tx_status: poll for transaction completion, the optional |
297 | * txstate parameter can be supplied with a pointer to get a | 297 | * txstate parameter can be supplied with a pointer to get a |
298 | * struct with auxilary transfer status information, otherwise the call | 298 | * struct with auxilary transfer status information, otherwise the call |
299 | * will just return a simple status code | 299 | * will just return a simple status code |
300 | * @device_issue_pending: push pending transactions to hardware | 300 | * @device_issue_pending: push pending transactions to hardware |
301 | */ | 301 | */ |
302 | struct dma_device { | 302 | struct dma_device { |
303 | 303 | ||
304 | unsigned int chancnt; | 304 | unsigned int chancnt; |
305 | unsigned int privatecnt; | 305 | unsigned int privatecnt; |
306 | struct list_head channels; | 306 | struct list_head channels; |
307 | struct list_head global_node; | 307 | struct list_head global_node; |
308 | dma_cap_mask_t cap_mask; | 308 | dma_cap_mask_t cap_mask; |
309 | unsigned short max_xor; | 309 | unsigned short max_xor; |
310 | unsigned short max_pq; | 310 | unsigned short max_pq; |
311 | u8 copy_align; | 311 | u8 copy_align; |
312 | u8 xor_align; | 312 | u8 xor_align; |
313 | u8 pq_align; | 313 | u8 pq_align; |
314 | u8 fill_align; | 314 | u8 fill_align; |
315 | #define DMA_HAS_PQ_CONTINUE (1 << 15) | 315 | #define DMA_HAS_PQ_CONTINUE (1 << 15) |
316 | 316 | ||
317 | int dev_id; | 317 | int dev_id; |
318 | struct device *dev; | 318 | struct device *dev; |
319 | 319 | ||
320 | int (*device_alloc_chan_resources)(struct dma_chan *chan); | 320 | int (*device_alloc_chan_resources)(struct dma_chan *chan); |
321 | void (*device_free_chan_resources)(struct dma_chan *chan); | 321 | void (*device_free_chan_resources)(struct dma_chan *chan); |
322 | 322 | ||
323 | struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( | 323 | struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( |
324 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | 324 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, |
325 | size_t len, unsigned long flags); | 325 | size_t len, unsigned long flags); |
326 | struct dma_async_tx_descriptor *(*device_prep_dma_xor)( | 326 | struct dma_async_tx_descriptor *(*device_prep_dma_xor)( |
327 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | 327 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, |
328 | unsigned int src_cnt, size_t len, unsigned long flags); | 328 | unsigned int src_cnt, size_t len, unsigned long flags); |
329 | struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)( | 329 | struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)( |
330 | struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, | 330 | struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, |
331 | size_t len, enum sum_check_flags *result, unsigned long flags); | 331 | size_t len, enum sum_check_flags *result, unsigned long flags); |
332 | struct dma_async_tx_descriptor *(*device_prep_dma_pq)( | 332 | struct dma_async_tx_descriptor *(*device_prep_dma_pq)( |
333 | struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, | 333 | struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, |
334 | unsigned int src_cnt, const unsigned char *scf, | 334 | unsigned int src_cnt, const unsigned char *scf, |
335 | size_t len, unsigned long flags); | 335 | size_t len, unsigned long flags); |
336 | struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)( | 336 | struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)( |
337 | struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, | 337 | struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, |
338 | unsigned int src_cnt, const unsigned char *scf, size_t len, | 338 | unsigned int src_cnt, const unsigned char *scf, size_t len, |
339 | enum sum_check_flags *pqres, unsigned long flags); | 339 | enum sum_check_flags *pqres, unsigned long flags); |
340 | struct dma_async_tx_descriptor *(*device_prep_dma_memset)( | 340 | struct dma_async_tx_descriptor *(*device_prep_dma_memset)( |
341 | struct dma_chan *chan, dma_addr_t dest, int value, size_t len, | 341 | struct dma_chan *chan, dma_addr_t dest, int value, size_t len, |
342 | unsigned long flags); | 342 | unsigned long flags); |
343 | struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( | 343 | struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( |
344 | struct dma_chan *chan, unsigned long flags); | 344 | struct dma_chan *chan, unsigned long flags); |
345 | 345 | ||
346 | struct dma_async_tx_descriptor *(*device_prep_slave_sg)( | 346 | struct dma_async_tx_descriptor *(*device_prep_slave_sg)( |
347 | struct dma_chan *chan, struct scatterlist *sgl, | 347 | struct dma_chan *chan, struct scatterlist *sgl, |
348 | unsigned int sg_len, enum dma_data_direction direction, | 348 | unsigned int sg_len, enum dma_data_direction direction, |
349 | unsigned long flags); | 349 | unsigned long flags); |
350 | int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd); | 350 | int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
351 | unsigned long arg); | ||
351 | 352 | ||
352 | enum dma_status (*device_tx_status)(struct dma_chan *chan, | 353 | enum dma_status (*device_tx_status)(struct dma_chan *chan, |
353 | dma_cookie_t cookie, | 354 | dma_cookie_t cookie, |
354 | struct dma_tx_state *txstate); | 355 | struct dma_tx_state *txstate); |
355 | void (*device_issue_pending)(struct dma_chan *chan); | 356 | void (*device_issue_pending)(struct dma_chan *chan); |
356 | }; | 357 | }; |
357 | 358 | ||
358 | static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len) | 359 | static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len) |
359 | { | 360 | { |
360 | size_t mask; | 361 | size_t mask; |
361 | 362 | ||
362 | if (!align) | 363 | if (!align) |
363 | return true; | 364 | return true; |
364 | mask = (1 << align) - 1; | 365 | mask = (1 << align) - 1; |
365 | if (mask & (off1 | off2 | len)) | 366 | if (mask & (off1 | off2 | len)) |
366 | return false; | 367 | return false; |
367 | return true; | 368 | return true; |
368 | } | 369 | } |
369 | 370 | ||
370 | static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1, | 371 | static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1, |
371 | size_t off2, size_t len) | 372 | size_t off2, size_t len) |
372 | { | 373 | { |
373 | return dmaengine_check_align(dev->copy_align, off1, off2, len); | 374 | return dmaengine_check_align(dev->copy_align, off1, off2, len); |
374 | } | 375 | } |
375 | 376 | ||
376 | static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1, | 377 | static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1, |
377 | size_t off2, size_t len) | 378 | size_t off2, size_t len) |
378 | { | 379 | { |
379 | return dmaengine_check_align(dev->xor_align, off1, off2, len); | 380 | return dmaengine_check_align(dev->xor_align, off1, off2, len); |
380 | } | 381 | } |
381 | 382 | ||
382 | static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1, | 383 | static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1, |
383 | size_t off2, size_t len) | 384 | size_t off2, size_t len) |
384 | { | 385 | { |
385 | return dmaengine_check_align(dev->pq_align, off1, off2, len); | 386 | return dmaengine_check_align(dev->pq_align, off1, off2, len); |
386 | } | 387 | } |
387 | 388 | ||
388 | static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1, | 389 | static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1, |
389 | size_t off2, size_t len) | 390 | size_t off2, size_t len) |
390 | { | 391 | { |
391 | return dmaengine_check_align(dev->fill_align, off1, off2, len); | 392 | return dmaengine_check_align(dev->fill_align, off1, off2, len); |
392 | } | 393 | } |
393 | 394 | ||
394 | static inline void | 395 | static inline void |
395 | dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue) | 396 | dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue) |
396 | { | 397 | { |
397 | dma->max_pq = maxpq; | 398 | dma->max_pq = maxpq; |
398 | if (has_pq_continue) | 399 | if (has_pq_continue) |
399 | dma->max_pq |= DMA_HAS_PQ_CONTINUE; | 400 | dma->max_pq |= DMA_HAS_PQ_CONTINUE; |
400 | } | 401 | } |
401 | 402 | ||
402 | static inline bool dmaf_continue(enum dma_ctrl_flags flags) | 403 | static inline bool dmaf_continue(enum dma_ctrl_flags flags) |
403 | { | 404 | { |
404 | return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE; | 405 | return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE; |
405 | } | 406 | } |
406 | 407 | ||
407 | static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags) | 408 | static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags) |
408 | { | 409 | { |
409 | enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P; | 410 | enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P; |
410 | 411 | ||
411 | return (flags & mask) == mask; | 412 | return (flags & mask) == mask; |
412 | } | 413 | } |
413 | 414 | ||
414 | static inline bool dma_dev_has_pq_continue(struct dma_device *dma) | 415 | static inline bool dma_dev_has_pq_continue(struct dma_device *dma) |
415 | { | 416 | { |
416 | return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE; | 417 | return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE; |
417 | } | 418 | } |
418 | 419 | ||
419 | static unsigned short dma_dev_to_maxpq(struct dma_device *dma) | 420 | static unsigned short dma_dev_to_maxpq(struct dma_device *dma) |
420 | { | 421 | { |
421 | return dma->max_pq & ~DMA_HAS_PQ_CONTINUE; | 422 | return dma->max_pq & ~DMA_HAS_PQ_CONTINUE; |
422 | } | 423 | } |
423 | 424 | ||
424 | /* dma_maxpq - reduce maxpq in the face of continued operations | 425 | /* dma_maxpq - reduce maxpq in the face of continued operations |
425 | * @dma - dma device with PQ capability | 426 | * @dma - dma device with PQ capability |
426 | * @flags - to check if DMA_PREP_CONTINUE and DMA_PREP_PQ_DISABLE_P are set | 427 | * @flags - to check if DMA_PREP_CONTINUE and DMA_PREP_PQ_DISABLE_P are set |
427 | * | 428 | * |
428 | * When an engine does not support native continuation we need 3 extra | 429 | * When an engine does not support native continuation we need 3 extra |
429 | * source slots to reuse P and Q with the following coefficients: | 430 | * source slots to reuse P and Q with the following coefficients: |
430 | * 1/ {00} * P : remove P from Q', but use it as a source for P' | 431 | * 1/ {00} * P : remove P from Q', but use it as a source for P' |
431 | * 2/ {01} * Q : use Q to continue Q' calculation | 432 | * 2/ {01} * Q : use Q to continue Q' calculation |
432 | * 3/ {00} * Q : subtract Q from P' to cancel (2) | 433 | * 3/ {00} * Q : subtract Q from P' to cancel (2) |
433 | * | 434 | * |
434 | * In the case where P is disabled we only need 1 extra source: | 435 | * In the case where P is disabled we only need 1 extra source: |
435 | * 1/ {01} * Q : use Q to continue Q' calculation | 436 | * 1/ {01} * Q : use Q to continue Q' calculation |
436 | */ | 437 | */ |
437 | static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags) | 438 | static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags) |
438 | { | 439 | { |
439 | if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags)) | 440 | if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags)) |
440 | return dma_dev_to_maxpq(dma); | 441 | return dma_dev_to_maxpq(dma); |
441 | else if (dmaf_p_disabled_continue(flags)) | 442 | else if (dmaf_p_disabled_continue(flags)) |
442 | return dma_dev_to_maxpq(dma) - 1; | 443 | return dma_dev_to_maxpq(dma) - 1; |
443 | else if (dmaf_continue(flags)) | 444 | else if (dmaf_continue(flags)) |
444 | return dma_dev_to_maxpq(dma) - 3; | 445 | return dma_dev_to_maxpq(dma) - 3; |
445 | BUG(); | 446 | BUG(); |
446 | } | 447 | } |
447 | 448 | ||
448 | /* --- public DMA engine API --- */ | 449 | /* --- public DMA engine API --- */ |
449 | 450 | ||
450 | #ifdef CONFIG_DMA_ENGINE | 451 | #ifdef CONFIG_DMA_ENGINE |
451 | void dmaengine_get(void); | 452 | void dmaengine_get(void); |
452 | void dmaengine_put(void); | 453 | void dmaengine_put(void); |
453 | #else | 454 | #else |
454 | static inline void dmaengine_get(void) | 455 | static inline void dmaengine_get(void) |
455 | { | 456 | { |
456 | } | 457 | } |
457 | static inline void dmaengine_put(void) | 458 | static inline void dmaengine_put(void) |
458 | { | 459 | { |
459 | } | 460 | } |
460 | #endif | 461 | #endif |
461 | 462 | ||
462 | #ifdef CONFIG_NET_DMA | 463 | #ifdef CONFIG_NET_DMA |
463 | #define net_dmaengine_get() dmaengine_get() | 464 | #define net_dmaengine_get() dmaengine_get() |
464 | #define net_dmaengine_put() dmaengine_put() | 465 | #define net_dmaengine_put() dmaengine_put() |
465 | #else | 466 | #else |
466 | static inline void net_dmaengine_get(void) | 467 | static inline void net_dmaengine_get(void) |
467 | { | 468 | { |
468 | } | 469 | } |
469 | static inline void net_dmaengine_put(void) | 470 | static inline void net_dmaengine_put(void) |
470 | { | 471 | { |
471 | } | 472 | } |
472 | #endif | 473 | #endif |
473 | 474 | ||
474 | #ifdef CONFIG_ASYNC_TX_DMA | 475 | #ifdef CONFIG_ASYNC_TX_DMA |
475 | #define async_dmaengine_get() dmaengine_get() | 476 | #define async_dmaengine_get() dmaengine_get() |
476 | #define async_dmaengine_put() dmaengine_put() | 477 | #define async_dmaengine_put() dmaengine_put() |
477 | #ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH | 478 | #ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH |
478 | #define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX) | 479 | #define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX) |
479 | #else | 480 | #else |
480 | #define async_dma_find_channel(type) dma_find_channel(type) | 481 | #define async_dma_find_channel(type) dma_find_channel(type) |
481 | #endif /* CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH */ | 482 | #endif /* CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH */ |
482 | #else | 483 | #else |
483 | static inline void async_dmaengine_get(void) | 484 | static inline void async_dmaengine_get(void) |
484 | { | 485 | { |
485 | } | 486 | } |
486 | static inline void async_dmaengine_put(void) | 487 | static inline void async_dmaengine_put(void) |
487 | { | 488 | { |
488 | } | 489 | } |
489 | static inline struct dma_chan * | 490 | static inline struct dma_chan * |
490 | async_dma_find_channel(enum dma_transaction_type type) | 491 | async_dma_find_channel(enum dma_transaction_type type) |
491 | { | 492 | { |
492 | return NULL; | 493 | return NULL; |
493 | } | 494 | } |
494 | #endif /* CONFIG_ASYNC_TX_DMA */ | 495 | #endif /* CONFIG_ASYNC_TX_DMA */ |
495 | 496 | ||
496 | dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, | 497 | dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, |
497 | void *dest, void *src, size_t len); | 498 | void *dest, void *src, size_t len); |
498 | dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, | 499 | dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, |
499 | struct page *page, unsigned int offset, void *kdata, size_t len); | 500 | struct page *page, unsigned int offset, void *kdata, size_t len); |
500 | dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan, | 501 | dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan, |
501 | struct page *dest_pg, unsigned int dest_off, struct page *src_pg, | 502 | struct page *dest_pg, unsigned int dest_off, struct page *src_pg, |
502 | unsigned int src_off, size_t len); | 503 | unsigned int src_off, size_t len); |
503 | void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, | 504 | void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, |
504 | struct dma_chan *chan); | 505 | struct dma_chan *chan); |
505 | 506 | ||
506 | static inline void async_tx_ack(struct dma_async_tx_descriptor *tx) | 507 | static inline void async_tx_ack(struct dma_async_tx_descriptor *tx) |
507 | { | 508 | { |
508 | tx->flags |= DMA_CTRL_ACK; | 509 | tx->flags |= DMA_CTRL_ACK; |
509 | } | 510 | } |
510 | 511 | ||
511 | static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx) | 512 | static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx) |
512 | { | 513 | { |
513 | tx->flags &= ~DMA_CTRL_ACK; | 514 | tx->flags &= ~DMA_CTRL_ACK; |
514 | } | 515 | } |
515 | 516 | ||
516 | static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx) | 517 | static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx) |
517 | { | 518 | { |
518 | return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK; | 519 | return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK; |
519 | } | 520 | } |
520 | 521 | ||
521 | #define first_dma_cap(mask) __first_dma_cap(&(mask)) | 522 | #define first_dma_cap(mask) __first_dma_cap(&(mask)) |
522 | static inline int __first_dma_cap(const dma_cap_mask_t *srcp) | 523 | static inline int __first_dma_cap(const dma_cap_mask_t *srcp) |
523 | { | 524 | { |
524 | return min_t(int, DMA_TX_TYPE_END, | 525 | return min_t(int, DMA_TX_TYPE_END, |
525 | find_first_bit(srcp->bits, DMA_TX_TYPE_END)); | 526 | find_first_bit(srcp->bits, DMA_TX_TYPE_END)); |
526 | } | 527 | } |
527 | 528 | ||
528 | #define next_dma_cap(n, mask) __next_dma_cap((n), &(mask)) | 529 | #define next_dma_cap(n, mask) __next_dma_cap((n), &(mask)) |
529 | static inline int __next_dma_cap(int n, const dma_cap_mask_t *srcp) | 530 | static inline int __next_dma_cap(int n, const dma_cap_mask_t *srcp) |
530 | { | 531 | { |
531 | return min_t(int, DMA_TX_TYPE_END, | 532 | return min_t(int, DMA_TX_TYPE_END, |
532 | find_next_bit(srcp->bits, DMA_TX_TYPE_END, n+1)); | 533 | find_next_bit(srcp->bits, DMA_TX_TYPE_END, n+1)); |
533 | } | 534 | } |
534 | 535 | ||
535 | #define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask)) | 536 | #define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask)) |
536 | static inline void | 537 | static inline void |
537 | __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) | 538 | __dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) |
538 | { | 539 | { |
539 | set_bit(tx_type, dstp->bits); | 540 | set_bit(tx_type, dstp->bits); |
540 | } | 541 | } |
541 | 542 | ||
542 | #define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask)) | 543 | #define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask)) |
543 | static inline void | 544 | static inline void |
544 | __dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) | 545 | __dma_cap_clear(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp) |
545 | { | 546 | { |
546 | clear_bit(tx_type, dstp->bits); | 547 | clear_bit(tx_type, dstp->bits); |
547 | } | 548 | } |
548 | 549 | ||
549 | #define dma_cap_zero(mask) __dma_cap_zero(&(mask)) | 550 | #define dma_cap_zero(mask) __dma_cap_zero(&(mask)) |
550 | static inline void __dma_cap_zero(dma_cap_mask_t *dstp) | 551 | static inline void __dma_cap_zero(dma_cap_mask_t *dstp) |
551 | { | 552 | { |
552 | bitmap_zero(dstp->bits, DMA_TX_TYPE_END); | 553 | bitmap_zero(dstp->bits, DMA_TX_TYPE_END); |
553 | } | 554 | } |
554 | 555 | ||
555 | #define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask)) | 556 | #define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask)) |
556 | static inline int | 557 | static inline int |
557 | __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp) | 558 | __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp) |
558 | { | 559 | { |
559 | return test_bit(tx_type, srcp->bits); | 560 | return test_bit(tx_type, srcp->bits); |
560 | } | 561 | } |
561 | 562 | ||
562 | #define for_each_dma_cap_mask(cap, mask) \ | 563 | #define for_each_dma_cap_mask(cap, mask) \ |
563 | for ((cap) = first_dma_cap(mask); \ | 564 | for ((cap) = first_dma_cap(mask); \ |
564 | (cap) < DMA_TX_TYPE_END; \ | 565 | (cap) < DMA_TX_TYPE_END; \ |
565 | (cap) = next_dma_cap((cap), (mask))) | 566 | (cap) = next_dma_cap((cap), (mask))) |
566 | 567 | ||
567 | /** | 568 | /** |
568 | * dma_async_issue_pending - flush pending transactions to HW | 569 | * dma_async_issue_pending - flush pending transactions to HW |
569 | * @chan: target DMA channel | 570 | * @chan: target DMA channel |
570 | * | 571 | * |
571 | * This allows drivers to push copies to HW in batches, | 572 | * This allows drivers to push copies to HW in batches, |
572 | * reducing MMIO writes where possible. | 573 | * reducing MMIO writes where possible. |
573 | */ | 574 | */ |
574 | static inline void dma_async_issue_pending(struct dma_chan *chan) | 575 | static inline void dma_async_issue_pending(struct dma_chan *chan) |
575 | { | 576 | { |
576 | chan->device->device_issue_pending(chan); | 577 | chan->device->device_issue_pending(chan); |
577 | } | 578 | } |
578 | 579 | ||
579 | #define dma_async_memcpy_issue_pending(chan) dma_async_issue_pending(chan) | 580 | #define dma_async_memcpy_issue_pending(chan) dma_async_issue_pending(chan) |
580 | 581 | ||
581 | /** | 582 | /** |
582 | * dma_async_is_tx_complete - poll for transaction completion | 583 | * dma_async_is_tx_complete - poll for transaction completion |
583 | * @chan: DMA channel | 584 | * @chan: DMA channel |
584 | * @cookie: transaction identifier to check status of | 585 | * @cookie: transaction identifier to check status of |
585 | * @last: returns last completed cookie, can be NULL | 586 | * @last: returns last completed cookie, can be NULL |
586 | * @used: returns last issued cookie, can be NULL | 587 | * @used: returns last issued cookie, can be NULL |
587 | * | 588 | * |
588 | * If @last and @used are passed in, upon return they reflect the driver | 589 | * If @last and @used are passed in, upon return they reflect the driver |
589 | * internal state and can be used with dma_async_is_complete() to check | 590 | * internal state and can be used with dma_async_is_complete() to check |
590 | * the status of multiple cookies without re-checking hardware state. | 591 | * the status of multiple cookies without re-checking hardware state. |
591 | */ | 592 | */ |
592 | static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan, | 593 | static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan, |
593 | dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used) | 594 | dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used) |
594 | { | 595 | { |
595 | struct dma_tx_state state; | 596 | struct dma_tx_state state; |
596 | enum dma_status status; | 597 | enum dma_status status; |
597 | 598 | ||
598 | status = chan->device->device_tx_status(chan, cookie, &state); | 599 | status = chan->device->device_tx_status(chan, cookie, &state); |
599 | if (last) | 600 | if (last) |
600 | *last = state.last; | 601 | *last = state.last; |
601 | if (used) | 602 | if (used) |
602 | *used = state.used; | 603 | *used = state.used; |
603 | return status; | 604 | return status; |
604 | } | 605 | } |
605 | 606 | ||
606 | #define dma_async_memcpy_complete(chan, cookie, last, used)\ | 607 | #define dma_async_memcpy_complete(chan, cookie, last, used)\ |
607 | dma_async_is_tx_complete(chan, cookie, last, used) | 608 | dma_async_is_tx_complete(chan, cookie, last, used) |
608 | 609 | ||
609 | /** | 610 | /** |
610 | * dma_async_is_complete - test a cookie against chan state | 611 | * dma_async_is_complete - test a cookie against chan state |
611 | * @cookie: transaction identifier to test status of | 612 | * @cookie: transaction identifier to test status of |
612 | * @last_complete: last know completed transaction | 613 | * @last_complete: last know completed transaction |
613 | * @last_used: last cookie value handed out | 614 | * @last_used: last cookie value handed out |
614 | * | 615 | * |
615 | * dma_async_is_complete() is used in dma_async_memcpy_complete() | 616 | * dma_async_is_complete() is used in dma_async_memcpy_complete() |
616 | * the test logic is separated for lightweight testing of multiple cookies | 617 | * the test logic is separated for lightweight testing of multiple cookies |
617 | */ | 618 | */ |
618 | static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie, | 619 | static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie, |
619 | dma_cookie_t last_complete, dma_cookie_t last_used) | 620 | dma_cookie_t last_complete, dma_cookie_t last_used) |
620 | { | 621 | { |
621 | if (last_complete <= last_used) { | 622 | if (last_complete <= last_used) { |
622 | if ((cookie <= last_complete) || (cookie > last_used)) | 623 | if ((cookie <= last_complete) || (cookie > last_used)) |
623 | return DMA_SUCCESS; | 624 | return DMA_SUCCESS; |
624 | } else { | 625 | } else { |
625 | if ((cookie <= last_complete) && (cookie > last_used)) | 626 | if ((cookie <= last_complete) && (cookie > last_used)) |
626 | return DMA_SUCCESS; | 627 | return DMA_SUCCESS; |
627 | } | 628 | } |
628 | return DMA_IN_PROGRESS; | 629 | return DMA_IN_PROGRESS; |
629 | } | 630 | } |
630 | 631 | ||
631 | static inline void | 632 | static inline void |
632 | dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue) | 633 | dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue) |
633 | { | 634 | { |
634 | if (st) { | 635 | if (st) { |
635 | st->last = last; | 636 | st->last = last; |
636 | st->used = used; | 637 | st->used = used; |
637 | st->residue = residue; | 638 | st->residue = residue; |
638 | } | 639 | } |
639 | } | 640 | } |
640 | 641 | ||
641 | enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); | 642 | enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie); |
642 | #ifdef CONFIG_DMA_ENGINE | 643 | #ifdef CONFIG_DMA_ENGINE |
643 | enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); | 644 | enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); |
644 | void dma_issue_pending_all(void); | 645 | void dma_issue_pending_all(void); |
645 | #else | 646 | #else |
646 | static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) | 647 | static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) |
647 | { | 648 | { |
648 | return DMA_SUCCESS; | 649 | return DMA_SUCCESS; |
649 | } | 650 | } |
650 | static inline void dma_issue_pending_all(void) | 651 | static inline void dma_issue_pending_all(void) |
651 | { | 652 | { |
652 | do { } while (0); | 653 | do { } while (0); |
653 | } | 654 | } |
654 | #endif | 655 | #endif |
655 | 656 | ||
656 | /* --- DMA device --- */ | 657 | /* --- DMA device --- */ |
657 | 658 | ||
658 | int dma_async_device_register(struct dma_device *device); | 659 | int dma_async_device_register(struct dma_device *device); |
659 | void dma_async_device_unregister(struct dma_device *device); | 660 | void dma_async_device_unregister(struct dma_device *device); |
660 | void dma_run_dependencies(struct dma_async_tx_descriptor *tx); | 661 | void dma_run_dependencies(struct dma_async_tx_descriptor *tx); |
661 | struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type); | 662 | struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type); |
662 | #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y) | 663 | #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y) |
663 | struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param); | 664 | struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param); |
664 | void dma_release_channel(struct dma_chan *chan); | 665 | void dma_release_channel(struct dma_chan *chan); |
665 | 666 | ||
666 | /* --- Helper iov-locking functions --- */ | 667 | /* --- Helper iov-locking functions --- */ |
667 | 668 | ||
668 | struct dma_page_list { | 669 | struct dma_page_list { |
669 | char __user *base_address; | 670 | char __user *base_address; |
670 | int nr_pages; | 671 | int nr_pages; |
671 | struct page **pages; | 672 | struct page **pages; |
672 | }; | 673 | }; |
673 | 674 | ||
674 | struct dma_pinned_list { | 675 | struct dma_pinned_list { |
675 | int nr_iovecs; | 676 | int nr_iovecs; |
676 | struct dma_page_list page_list[0]; | 677 | struct dma_page_list page_list[0]; |
677 | }; | 678 | }; |
678 | 679 | ||
679 | struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len); | 680 | struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len); |
680 | void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list); | 681 | void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list); |
681 | 682 | ||
682 | dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov, | 683 | dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov, |
683 | struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len); | 684 | struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len); |
684 | dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov, | 685 | dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov, |
685 | struct dma_pinned_list *pinned_list, struct page *page, | 686 | struct dma_pinned_list *pinned_list, struct page *page, |
686 | unsigned int offset, size_t len); | 687 | unsigned int offset, size_t len); |
687 | 688 | ||
688 | #endif /* DMAENGINE_H */ | 689 | #endif /* DMAENGINE_H */ |
689 | 690 |
sound/soc/txx9/txx9aclc.c
1 | /* | 1 | /* |
2 | * Generic TXx9 ACLC platform driver | 2 | * Generic TXx9 ACLC platform driver |
3 | * | 3 | * |
4 | * Copyright (C) 2009 Atsushi Nemoto | 4 | * Copyright (C) 2009 Atsushi Nemoto |
5 | * | 5 | * |
6 | * Based on RBTX49xx patch from CELF patch archive. | 6 | * Based on RBTX49xx patch from CELF patch archive. |
7 | * (C) Copyright TOSHIBA CORPORATION 2004-2006 | 7 | * (C) Copyright TOSHIBA CORPORATION 2004-2006 |
8 | * | 8 | * |
9 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU General Public License version 2 as | 10 | * it under the terms of the GNU General Public License version 2 as |
11 | * published by the Free Software Foundation. | 11 | * published by the Free Software Foundation. |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/platform_device.h> | 16 | #include <linux/platform_device.h> |
17 | #include <linux/scatterlist.h> | 17 | #include <linux/scatterlist.h> |
18 | #include <sound/core.h> | 18 | #include <sound/core.h> |
19 | #include <sound/pcm.h> | 19 | #include <sound/pcm.h> |
20 | #include <sound/pcm_params.h> | 20 | #include <sound/pcm_params.h> |
21 | #include <sound/soc.h> | 21 | #include <sound/soc.h> |
22 | #include "txx9aclc.h" | 22 | #include "txx9aclc.h" |
23 | 23 | ||
24 | static const struct snd_pcm_hardware txx9aclc_pcm_hardware = { | 24 | static const struct snd_pcm_hardware txx9aclc_pcm_hardware = { |
25 | /* | 25 | /* |
26 | * REVISIT: SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | 26 | * REVISIT: SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
27 | * needs more works for noncoherent MIPS. | 27 | * needs more works for noncoherent MIPS. |
28 | */ | 28 | */ |
29 | .info = SNDRV_PCM_INFO_INTERLEAVED | | 29 | .info = SNDRV_PCM_INFO_INTERLEAVED | |
30 | SNDRV_PCM_INFO_BATCH | | 30 | SNDRV_PCM_INFO_BATCH | |
31 | SNDRV_PCM_INFO_PAUSE, | 31 | SNDRV_PCM_INFO_PAUSE, |
32 | #ifdef __BIG_ENDIAN | 32 | #ifdef __BIG_ENDIAN |
33 | .formats = SNDRV_PCM_FMTBIT_S16_BE, | 33 | .formats = SNDRV_PCM_FMTBIT_S16_BE, |
34 | #else | 34 | #else |
35 | .formats = SNDRV_PCM_FMTBIT_S16_LE, | 35 | .formats = SNDRV_PCM_FMTBIT_S16_LE, |
36 | #endif | 36 | #endif |
37 | .period_bytes_min = 1024, | 37 | .period_bytes_min = 1024, |
38 | .period_bytes_max = 8 * 1024, | 38 | .period_bytes_max = 8 * 1024, |
39 | .periods_min = 2, | 39 | .periods_min = 2, |
40 | .periods_max = 4096, | 40 | .periods_max = 4096, |
41 | .buffer_bytes_max = 32 * 1024, | 41 | .buffer_bytes_max = 32 * 1024, |
42 | }; | 42 | }; |
43 | 43 | ||
44 | static int txx9aclc_pcm_hw_params(struct snd_pcm_substream *substream, | 44 | static int txx9aclc_pcm_hw_params(struct snd_pcm_substream *substream, |
45 | struct snd_pcm_hw_params *params) | 45 | struct snd_pcm_hw_params *params) |
46 | { | 46 | { |
47 | struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream); | 47 | struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream); |
48 | struct snd_soc_device *socdev = rtd->socdev; | 48 | struct snd_soc_device *socdev = rtd->socdev; |
49 | struct snd_pcm_runtime *runtime = substream->runtime; | 49 | struct snd_pcm_runtime *runtime = substream->runtime; |
50 | struct txx9aclc_dmadata *dmadata = runtime->private_data; | 50 | struct txx9aclc_dmadata *dmadata = runtime->private_data; |
51 | int ret; | 51 | int ret; |
52 | 52 | ||
53 | ret = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params)); | 53 | ret = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params)); |
54 | if (ret < 0) | 54 | if (ret < 0) |
55 | return ret; | 55 | return ret; |
56 | 56 | ||
57 | dev_dbg(socdev->dev, | 57 | dev_dbg(socdev->dev, |
58 | "runtime->dma_area = %#lx dma_addr = %#lx dma_bytes = %zd " | 58 | "runtime->dma_area = %#lx dma_addr = %#lx dma_bytes = %zd " |
59 | "runtime->min_align %ld\n", | 59 | "runtime->min_align %ld\n", |
60 | (unsigned long)runtime->dma_area, | 60 | (unsigned long)runtime->dma_area, |
61 | (unsigned long)runtime->dma_addr, runtime->dma_bytes, | 61 | (unsigned long)runtime->dma_addr, runtime->dma_bytes, |
62 | runtime->min_align); | 62 | runtime->min_align); |
63 | dev_dbg(socdev->dev, | 63 | dev_dbg(socdev->dev, |
64 | "periods %d period_bytes %d stream %d\n", | 64 | "periods %d period_bytes %d stream %d\n", |
65 | params_periods(params), params_period_bytes(params), | 65 | params_periods(params), params_period_bytes(params), |
66 | substream->stream); | 66 | substream->stream); |
67 | 67 | ||
68 | dmadata->substream = substream; | 68 | dmadata->substream = substream; |
69 | dmadata->pos = 0; | 69 | dmadata->pos = 0; |
70 | return 0; | 70 | return 0; |
71 | } | 71 | } |
72 | 72 | ||
73 | static int txx9aclc_pcm_hw_free(struct snd_pcm_substream *substream) | 73 | static int txx9aclc_pcm_hw_free(struct snd_pcm_substream *substream) |
74 | { | 74 | { |
75 | return snd_pcm_lib_free_pages(substream); | 75 | return snd_pcm_lib_free_pages(substream); |
76 | } | 76 | } |
77 | 77 | ||
78 | static int txx9aclc_pcm_prepare(struct snd_pcm_substream *substream) | 78 | static int txx9aclc_pcm_prepare(struct snd_pcm_substream *substream) |
79 | { | 79 | { |
80 | struct snd_pcm_runtime *runtime = substream->runtime; | 80 | struct snd_pcm_runtime *runtime = substream->runtime; |
81 | struct txx9aclc_dmadata *dmadata = runtime->private_data; | 81 | struct txx9aclc_dmadata *dmadata = runtime->private_data; |
82 | 82 | ||
83 | dmadata->dma_addr = runtime->dma_addr; | 83 | dmadata->dma_addr = runtime->dma_addr; |
84 | dmadata->buffer_bytes = snd_pcm_lib_buffer_bytes(substream); | 84 | dmadata->buffer_bytes = snd_pcm_lib_buffer_bytes(substream); |
85 | dmadata->period_bytes = snd_pcm_lib_period_bytes(substream); | 85 | dmadata->period_bytes = snd_pcm_lib_period_bytes(substream); |
86 | 86 | ||
87 | if (dmadata->buffer_bytes == dmadata->period_bytes) { | 87 | if (dmadata->buffer_bytes == dmadata->period_bytes) { |
88 | dmadata->frag_bytes = dmadata->period_bytes >> 1; | 88 | dmadata->frag_bytes = dmadata->period_bytes >> 1; |
89 | dmadata->frags = 2; | 89 | dmadata->frags = 2; |
90 | } else { | 90 | } else { |
91 | dmadata->frag_bytes = dmadata->period_bytes; | 91 | dmadata->frag_bytes = dmadata->period_bytes; |
92 | dmadata->frags = dmadata->buffer_bytes / dmadata->period_bytes; | 92 | dmadata->frags = dmadata->buffer_bytes / dmadata->period_bytes; |
93 | } | 93 | } |
94 | dmadata->frag_count = 0; | 94 | dmadata->frag_count = 0; |
95 | dmadata->pos = 0; | 95 | dmadata->pos = 0; |
96 | return 0; | 96 | return 0; |
97 | } | 97 | } |
98 | 98 | ||
99 | static void txx9aclc_dma_complete(void *arg) | 99 | static void txx9aclc_dma_complete(void *arg) |
100 | { | 100 | { |
101 | struct txx9aclc_dmadata *dmadata = arg; | 101 | struct txx9aclc_dmadata *dmadata = arg; |
102 | unsigned long flags; | 102 | unsigned long flags; |
103 | 103 | ||
104 | /* dma completion handler cannot submit new operations */ | 104 | /* dma completion handler cannot submit new operations */ |
105 | spin_lock_irqsave(&dmadata->dma_lock, flags); | 105 | spin_lock_irqsave(&dmadata->dma_lock, flags); |
106 | if (dmadata->frag_count >= 0) { | 106 | if (dmadata->frag_count >= 0) { |
107 | dmadata->dmacount--; | 107 | dmadata->dmacount--; |
108 | BUG_ON(dmadata->dmacount < 0); | 108 | BUG_ON(dmadata->dmacount < 0); |
109 | tasklet_schedule(&dmadata->tasklet); | 109 | tasklet_schedule(&dmadata->tasklet); |
110 | } | 110 | } |
111 | spin_unlock_irqrestore(&dmadata->dma_lock, flags); | 111 | spin_unlock_irqrestore(&dmadata->dma_lock, flags); |
112 | } | 112 | } |
113 | 113 | ||
114 | static struct dma_async_tx_descriptor * | 114 | static struct dma_async_tx_descriptor * |
115 | txx9aclc_dma_submit(struct txx9aclc_dmadata *dmadata, dma_addr_t buf_dma_addr) | 115 | txx9aclc_dma_submit(struct txx9aclc_dmadata *dmadata, dma_addr_t buf_dma_addr) |
116 | { | 116 | { |
117 | struct dma_chan *chan = dmadata->dma_chan; | 117 | struct dma_chan *chan = dmadata->dma_chan; |
118 | struct dma_async_tx_descriptor *desc; | 118 | struct dma_async_tx_descriptor *desc; |
119 | struct scatterlist sg; | 119 | struct scatterlist sg; |
120 | 120 | ||
121 | sg_init_table(&sg, 1); | 121 | sg_init_table(&sg, 1); |
122 | sg_set_page(&sg, pfn_to_page(PFN_DOWN(buf_dma_addr)), | 122 | sg_set_page(&sg, pfn_to_page(PFN_DOWN(buf_dma_addr)), |
123 | dmadata->frag_bytes, buf_dma_addr & (PAGE_SIZE - 1)); | 123 | dmadata->frag_bytes, buf_dma_addr & (PAGE_SIZE - 1)); |
124 | sg_dma_address(&sg) = buf_dma_addr; | 124 | sg_dma_address(&sg) = buf_dma_addr; |
125 | desc = chan->device->device_prep_slave_sg(chan, &sg, 1, | 125 | desc = chan->device->device_prep_slave_sg(chan, &sg, 1, |
126 | dmadata->substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? | 126 | dmadata->substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? |
127 | DMA_TO_DEVICE : DMA_FROM_DEVICE, | 127 | DMA_TO_DEVICE : DMA_FROM_DEVICE, |
128 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 128 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
129 | if (!desc) { | 129 | if (!desc) { |
130 | dev_err(&chan->dev->device, "cannot prepare slave dma\n"); | 130 | dev_err(&chan->dev->device, "cannot prepare slave dma\n"); |
131 | return NULL; | 131 | return NULL; |
132 | } | 132 | } |
133 | desc->callback = txx9aclc_dma_complete; | 133 | desc->callback = txx9aclc_dma_complete; |
134 | desc->callback_param = dmadata; | 134 | desc->callback_param = dmadata; |
135 | desc->tx_submit(desc); | 135 | desc->tx_submit(desc); |
136 | return desc; | 136 | return desc; |
137 | } | 137 | } |
138 | 138 | ||
139 | #define NR_DMA_CHAIN 2 | 139 | #define NR_DMA_CHAIN 2 |
140 | 140 | ||
141 | static void txx9aclc_dma_tasklet(unsigned long data) | 141 | static void txx9aclc_dma_tasklet(unsigned long data) |
142 | { | 142 | { |
143 | struct txx9aclc_dmadata *dmadata = (struct txx9aclc_dmadata *)data; | 143 | struct txx9aclc_dmadata *dmadata = (struct txx9aclc_dmadata *)data; |
144 | struct dma_chan *chan = dmadata->dma_chan; | 144 | struct dma_chan *chan = dmadata->dma_chan; |
145 | struct dma_async_tx_descriptor *desc; | 145 | struct dma_async_tx_descriptor *desc; |
146 | struct snd_pcm_substream *substream = dmadata->substream; | 146 | struct snd_pcm_substream *substream = dmadata->substream; |
147 | u32 ctlbit = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? | 147 | u32 ctlbit = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? |
148 | ACCTL_AUDODMA : ACCTL_AUDIDMA; | 148 | ACCTL_AUDODMA : ACCTL_AUDIDMA; |
149 | int i; | 149 | int i; |
150 | unsigned long flags; | 150 | unsigned long flags; |
151 | 151 | ||
152 | spin_lock_irqsave(&dmadata->dma_lock, flags); | 152 | spin_lock_irqsave(&dmadata->dma_lock, flags); |
153 | if (dmadata->frag_count < 0) { | 153 | if (dmadata->frag_count < 0) { |
154 | struct txx9aclc_soc_device *dev = | 154 | struct txx9aclc_soc_device *dev = |
155 | container_of(dmadata, struct txx9aclc_soc_device, | 155 | container_of(dmadata, struct txx9aclc_soc_device, |
156 | dmadata[substream->stream]); | 156 | dmadata[substream->stream]); |
157 | struct txx9aclc_plat_drvdata *drvdata = | 157 | struct txx9aclc_plat_drvdata *drvdata = |
158 | txx9aclc_get_plat_drvdata(dev); | 158 | txx9aclc_get_plat_drvdata(dev); |
159 | void __iomem *base = drvdata->base; | 159 | void __iomem *base = drvdata->base; |
160 | 160 | ||
161 | spin_unlock_irqrestore(&dmadata->dma_lock, flags); | 161 | spin_unlock_irqrestore(&dmadata->dma_lock, flags); |
162 | chan->device->device_control(chan, DMA_TERMINATE_ALL); | 162 | chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); |
163 | /* first time */ | 163 | /* first time */ |
164 | for (i = 0; i < NR_DMA_CHAIN; i++) { | 164 | for (i = 0; i < NR_DMA_CHAIN; i++) { |
165 | desc = txx9aclc_dma_submit(dmadata, | 165 | desc = txx9aclc_dma_submit(dmadata, |
166 | dmadata->dma_addr + i * dmadata->frag_bytes); | 166 | dmadata->dma_addr + i * dmadata->frag_bytes); |
167 | if (!desc) | 167 | if (!desc) |
168 | return; | 168 | return; |
169 | } | 169 | } |
170 | dmadata->dmacount = NR_DMA_CHAIN; | 170 | dmadata->dmacount = NR_DMA_CHAIN; |
171 | chan->device->device_issue_pending(chan); | 171 | chan->device->device_issue_pending(chan); |
172 | spin_lock_irqsave(&dmadata->dma_lock, flags); | 172 | spin_lock_irqsave(&dmadata->dma_lock, flags); |
173 | __raw_writel(ctlbit, base + ACCTLEN); | 173 | __raw_writel(ctlbit, base + ACCTLEN); |
174 | dmadata->frag_count = NR_DMA_CHAIN % dmadata->frags; | 174 | dmadata->frag_count = NR_DMA_CHAIN % dmadata->frags; |
175 | spin_unlock_irqrestore(&dmadata->dma_lock, flags); | 175 | spin_unlock_irqrestore(&dmadata->dma_lock, flags); |
176 | return; | 176 | return; |
177 | } | 177 | } |
178 | BUG_ON(dmadata->dmacount >= NR_DMA_CHAIN); | 178 | BUG_ON(dmadata->dmacount >= NR_DMA_CHAIN); |
179 | while (dmadata->dmacount < NR_DMA_CHAIN) { | 179 | while (dmadata->dmacount < NR_DMA_CHAIN) { |
180 | dmadata->dmacount++; | 180 | dmadata->dmacount++; |
181 | spin_unlock_irqrestore(&dmadata->dma_lock, flags); | 181 | spin_unlock_irqrestore(&dmadata->dma_lock, flags); |
182 | desc = txx9aclc_dma_submit(dmadata, | 182 | desc = txx9aclc_dma_submit(dmadata, |
183 | dmadata->dma_addr + | 183 | dmadata->dma_addr + |
184 | dmadata->frag_count * dmadata->frag_bytes); | 184 | dmadata->frag_count * dmadata->frag_bytes); |
185 | if (!desc) | 185 | if (!desc) |
186 | return; | 186 | return; |
187 | chan->device->device_issue_pending(chan); | 187 | chan->device->device_issue_pending(chan); |
188 | 188 | ||
189 | spin_lock_irqsave(&dmadata->dma_lock, flags); | 189 | spin_lock_irqsave(&dmadata->dma_lock, flags); |
190 | dmadata->frag_count++; | 190 | dmadata->frag_count++; |
191 | dmadata->frag_count %= dmadata->frags; | 191 | dmadata->frag_count %= dmadata->frags; |
192 | dmadata->pos += dmadata->frag_bytes; | 192 | dmadata->pos += dmadata->frag_bytes; |
193 | dmadata->pos %= dmadata->buffer_bytes; | 193 | dmadata->pos %= dmadata->buffer_bytes; |
194 | if ((dmadata->frag_count * dmadata->frag_bytes) % | 194 | if ((dmadata->frag_count * dmadata->frag_bytes) % |
195 | dmadata->period_bytes == 0) | 195 | dmadata->period_bytes == 0) |
196 | snd_pcm_period_elapsed(substream); | 196 | snd_pcm_period_elapsed(substream); |
197 | } | 197 | } |
198 | spin_unlock_irqrestore(&dmadata->dma_lock, flags); | 198 | spin_unlock_irqrestore(&dmadata->dma_lock, flags); |
199 | } | 199 | } |
200 | 200 | ||
201 | static int txx9aclc_pcm_trigger(struct snd_pcm_substream *substream, int cmd) | 201 | static int txx9aclc_pcm_trigger(struct snd_pcm_substream *substream, int cmd) |
202 | { | 202 | { |
203 | struct txx9aclc_dmadata *dmadata = substream->runtime->private_data; | 203 | struct txx9aclc_dmadata *dmadata = substream->runtime->private_data; |
204 | struct snd_soc_pcm_runtime *rtd = substream->private_data; | 204 | struct snd_soc_pcm_runtime *rtd = substream->private_data; |
205 | struct txx9aclc_soc_device *dev = | 205 | struct txx9aclc_soc_device *dev = |
206 | container_of(rtd->socdev, struct txx9aclc_soc_device, soc_dev); | 206 | container_of(rtd->socdev, struct txx9aclc_soc_device, soc_dev); |
207 | struct txx9aclc_plat_drvdata *drvdata = txx9aclc_get_plat_drvdata(dev); | 207 | struct txx9aclc_plat_drvdata *drvdata = txx9aclc_get_plat_drvdata(dev); |
208 | void __iomem *base = drvdata->base; | 208 | void __iomem *base = drvdata->base; |
209 | unsigned long flags; | 209 | unsigned long flags; |
210 | int ret = 0; | 210 | int ret = 0; |
211 | u32 ctlbit = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? | 211 | u32 ctlbit = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? |
212 | ACCTL_AUDODMA : ACCTL_AUDIDMA; | 212 | ACCTL_AUDODMA : ACCTL_AUDIDMA; |
213 | 213 | ||
214 | spin_lock_irqsave(&dmadata->dma_lock, flags); | 214 | spin_lock_irqsave(&dmadata->dma_lock, flags); |
215 | switch (cmd) { | 215 | switch (cmd) { |
216 | case SNDRV_PCM_TRIGGER_START: | 216 | case SNDRV_PCM_TRIGGER_START: |
217 | dmadata->frag_count = -1; | 217 | dmadata->frag_count = -1; |
218 | tasklet_schedule(&dmadata->tasklet); | 218 | tasklet_schedule(&dmadata->tasklet); |
219 | break; | 219 | break; |
220 | case SNDRV_PCM_TRIGGER_STOP: | 220 | case SNDRV_PCM_TRIGGER_STOP: |
221 | case SNDRV_PCM_TRIGGER_PAUSE_PUSH: | 221 | case SNDRV_PCM_TRIGGER_PAUSE_PUSH: |
222 | case SNDRV_PCM_TRIGGER_SUSPEND: | 222 | case SNDRV_PCM_TRIGGER_SUSPEND: |
223 | __raw_writel(ctlbit, base + ACCTLDIS); | 223 | __raw_writel(ctlbit, base + ACCTLDIS); |
224 | break; | 224 | break; |
225 | case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: | 225 | case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: |
226 | case SNDRV_PCM_TRIGGER_RESUME: | 226 | case SNDRV_PCM_TRIGGER_RESUME: |
227 | __raw_writel(ctlbit, base + ACCTLEN); | 227 | __raw_writel(ctlbit, base + ACCTLEN); |
228 | break; | 228 | break; |
229 | default: | 229 | default: |
230 | ret = -EINVAL; | 230 | ret = -EINVAL; |
231 | } | 231 | } |
232 | spin_unlock_irqrestore(&dmadata->dma_lock, flags); | 232 | spin_unlock_irqrestore(&dmadata->dma_lock, flags); |
233 | return ret; | 233 | return ret; |
234 | } | 234 | } |
235 | 235 | ||
236 | static snd_pcm_uframes_t | 236 | static snd_pcm_uframes_t |
237 | txx9aclc_pcm_pointer(struct snd_pcm_substream *substream) | 237 | txx9aclc_pcm_pointer(struct snd_pcm_substream *substream) |
238 | { | 238 | { |
239 | struct txx9aclc_dmadata *dmadata = substream->runtime->private_data; | 239 | struct txx9aclc_dmadata *dmadata = substream->runtime->private_data; |
240 | 240 | ||
241 | return bytes_to_frames(substream->runtime, dmadata->pos); | 241 | return bytes_to_frames(substream->runtime, dmadata->pos); |
242 | } | 242 | } |
243 | 243 | ||
244 | static int txx9aclc_pcm_open(struct snd_pcm_substream *substream) | 244 | static int txx9aclc_pcm_open(struct snd_pcm_substream *substream) |
245 | { | 245 | { |
246 | struct snd_soc_pcm_runtime *rtd = substream->private_data; | 246 | struct snd_soc_pcm_runtime *rtd = substream->private_data; |
247 | struct txx9aclc_soc_device *dev = | 247 | struct txx9aclc_soc_device *dev = |
248 | container_of(rtd->socdev, struct txx9aclc_soc_device, soc_dev); | 248 | container_of(rtd->socdev, struct txx9aclc_soc_device, soc_dev); |
249 | struct txx9aclc_dmadata *dmadata = &dev->dmadata[substream->stream]; | 249 | struct txx9aclc_dmadata *dmadata = &dev->dmadata[substream->stream]; |
250 | int ret; | 250 | int ret; |
251 | 251 | ||
252 | ret = snd_soc_set_runtime_hwparams(substream, &txx9aclc_pcm_hardware); | 252 | ret = snd_soc_set_runtime_hwparams(substream, &txx9aclc_pcm_hardware); |
253 | if (ret) | 253 | if (ret) |
254 | return ret; | 254 | return ret; |
255 | /* ensure that buffer size is a multiple of period size */ | 255 | /* ensure that buffer size is a multiple of period size */ |
256 | ret = snd_pcm_hw_constraint_integer(substream->runtime, | 256 | ret = snd_pcm_hw_constraint_integer(substream->runtime, |
257 | SNDRV_PCM_HW_PARAM_PERIODS); | 257 | SNDRV_PCM_HW_PARAM_PERIODS); |
258 | if (ret < 0) | 258 | if (ret < 0) |
259 | return ret; | 259 | return ret; |
260 | substream->runtime->private_data = dmadata; | 260 | substream->runtime->private_data = dmadata; |
261 | return 0; | 261 | return 0; |
262 | } | 262 | } |
263 | 263 | ||
264 | static int txx9aclc_pcm_close(struct snd_pcm_substream *substream) | 264 | static int txx9aclc_pcm_close(struct snd_pcm_substream *substream) |
265 | { | 265 | { |
266 | struct txx9aclc_dmadata *dmadata = substream->runtime->private_data; | 266 | struct txx9aclc_dmadata *dmadata = substream->runtime->private_data; |
267 | struct dma_chan *chan = dmadata->dma_chan; | 267 | struct dma_chan *chan = dmadata->dma_chan; |
268 | 268 | ||
269 | dmadata->frag_count = -1; | 269 | dmadata->frag_count = -1; |
270 | chan->device->device_control(chan, DMA_TERMINATE_ALL); | 270 | chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); |
271 | return 0; | 271 | return 0; |
272 | } | 272 | } |
273 | 273 | ||
274 | static struct snd_pcm_ops txx9aclc_pcm_ops = { | 274 | static struct snd_pcm_ops txx9aclc_pcm_ops = { |
275 | .open = txx9aclc_pcm_open, | 275 | .open = txx9aclc_pcm_open, |
276 | .close = txx9aclc_pcm_close, | 276 | .close = txx9aclc_pcm_close, |
277 | .ioctl = snd_pcm_lib_ioctl, | 277 | .ioctl = snd_pcm_lib_ioctl, |
278 | .hw_params = txx9aclc_pcm_hw_params, | 278 | .hw_params = txx9aclc_pcm_hw_params, |
279 | .hw_free = txx9aclc_pcm_hw_free, | 279 | .hw_free = txx9aclc_pcm_hw_free, |
280 | .prepare = txx9aclc_pcm_prepare, | 280 | .prepare = txx9aclc_pcm_prepare, |
281 | .trigger = txx9aclc_pcm_trigger, | 281 | .trigger = txx9aclc_pcm_trigger, |
282 | .pointer = txx9aclc_pcm_pointer, | 282 | .pointer = txx9aclc_pcm_pointer, |
283 | }; | 283 | }; |
284 | 284 | ||
285 | static void txx9aclc_pcm_free_dma_buffers(struct snd_pcm *pcm) | 285 | static void txx9aclc_pcm_free_dma_buffers(struct snd_pcm *pcm) |
286 | { | 286 | { |
287 | snd_pcm_lib_preallocate_free_for_all(pcm); | 287 | snd_pcm_lib_preallocate_free_for_all(pcm); |
288 | } | 288 | } |
289 | 289 | ||
290 | static int txx9aclc_pcm_new(struct snd_card *card, struct snd_soc_dai *dai, | 290 | static int txx9aclc_pcm_new(struct snd_card *card, struct snd_soc_dai *dai, |
291 | struct snd_pcm *pcm) | 291 | struct snd_pcm *pcm) |
292 | { | 292 | { |
293 | return snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, | 293 | return snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, |
294 | card->dev, 64 * 1024, 4 * 1024 * 1024); | 294 | card->dev, 64 * 1024, 4 * 1024 * 1024); |
295 | } | 295 | } |
296 | 296 | ||
297 | static bool filter(struct dma_chan *chan, void *param) | 297 | static bool filter(struct dma_chan *chan, void *param) |
298 | { | 298 | { |
299 | struct txx9aclc_dmadata *dmadata = param; | 299 | struct txx9aclc_dmadata *dmadata = param; |
300 | char *devname; | 300 | char *devname; |
301 | bool found = false; | 301 | bool found = false; |
302 | 302 | ||
303 | devname = kasprintf(GFP_KERNEL, "%s.%d", dmadata->dma_res->name, | 303 | devname = kasprintf(GFP_KERNEL, "%s.%d", dmadata->dma_res->name, |
304 | (int)dmadata->dma_res->start); | 304 | (int)dmadata->dma_res->start); |
305 | if (strcmp(dev_name(chan->device->dev), devname) == 0) { | 305 | if (strcmp(dev_name(chan->device->dev), devname) == 0) { |
306 | chan->private = &dmadata->dma_slave; | 306 | chan->private = &dmadata->dma_slave; |
307 | found = true; | 307 | found = true; |
308 | } | 308 | } |
309 | kfree(devname); | 309 | kfree(devname); |
310 | return found; | 310 | return found; |
311 | } | 311 | } |
312 | 312 | ||
313 | static int txx9aclc_dma_init(struct txx9aclc_soc_device *dev, | 313 | static int txx9aclc_dma_init(struct txx9aclc_soc_device *dev, |
314 | struct txx9aclc_dmadata *dmadata) | 314 | struct txx9aclc_dmadata *dmadata) |
315 | { | 315 | { |
316 | struct txx9aclc_plat_drvdata *drvdata = txx9aclc_get_plat_drvdata(dev); | 316 | struct txx9aclc_plat_drvdata *drvdata = txx9aclc_get_plat_drvdata(dev); |
317 | struct txx9dmac_slave *ds = &dmadata->dma_slave; | 317 | struct txx9dmac_slave *ds = &dmadata->dma_slave; |
318 | dma_cap_mask_t mask; | 318 | dma_cap_mask_t mask; |
319 | 319 | ||
320 | spin_lock_init(&dmadata->dma_lock); | 320 | spin_lock_init(&dmadata->dma_lock); |
321 | 321 | ||
322 | ds->reg_width = sizeof(u32); | 322 | ds->reg_width = sizeof(u32); |
323 | if (dmadata->stream == SNDRV_PCM_STREAM_PLAYBACK) { | 323 | if (dmadata->stream == SNDRV_PCM_STREAM_PLAYBACK) { |
324 | ds->tx_reg = drvdata->physbase + ACAUDODAT; | 324 | ds->tx_reg = drvdata->physbase + ACAUDODAT; |
325 | ds->rx_reg = 0; | 325 | ds->rx_reg = 0; |
326 | } else { | 326 | } else { |
327 | ds->tx_reg = 0; | 327 | ds->tx_reg = 0; |
328 | ds->rx_reg = drvdata->physbase + ACAUDIDAT; | 328 | ds->rx_reg = drvdata->physbase + ACAUDIDAT; |
329 | } | 329 | } |
330 | 330 | ||
331 | /* Try to grab a DMA channel */ | 331 | /* Try to grab a DMA channel */ |
332 | dma_cap_zero(mask); | 332 | dma_cap_zero(mask); |
333 | dma_cap_set(DMA_SLAVE, mask); | 333 | dma_cap_set(DMA_SLAVE, mask); |
334 | dmadata->dma_chan = dma_request_channel(mask, filter, dmadata); | 334 | dmadata->dma_chan = dma_request_channel(mask, filter, dmadata); |
335 | if (!dmadata->dma_chan) { | 335 | if (!dmadata->dma_chan) { |
336 | dev_err(dev->soc_dev.dev, | 336 | dev_err(dev->soc_dev.dev, |
337 | "DMA channel for %s is not available\n", | 337 | "DMA channel for %s is not available\n", |
338 | dmadata->stream == SNDRV_PCM_STREAM_PLAYBACK ? | 338 | dmadata->stream == SNDRV_PCM_STREAM_PLAYBACK ? |
339 | "playback" : "capture"); | 339 | "playback" : "capture"); |
340 | return -EBUSY; | 340 | return -EBUSY; |
341 | } | 341 | } |
342 | tasklet_init(&dmadata->tasklet, txx9aclc_dma_tasklet, | 342 | tasklet_init(&dmadata->tasklet, txx9aclc_dma_tasklet, |
343 | (unsigned long)dmadata); | 343 | (unsigned long)dmadata); |
344 | return 0; | 344 | return 0; |
345 | } | 345 | } |
346 | 346 | ||
347 | static int txx9aclc_pcm_probe(struct platform_device *pdev) | 347 | static int txx9aclc_pcm_probe(struct platform_device *pdev) |
348 | { | 348 | { |
349 | struct snd_soc_device *socdev = platform_get_drvdata(pdev); | 349 | struct snd_soc_device *socdev = platform_get_drvdata(pdev); |
350 | struct txx9aclc_soc_device *dev = | 350 | struct txx9aclc_soc_device *dev = |
351 | container_of(socdev, struct txx9aclc_soc_device, soc_dev); | 351 | container_of(socdev, struct txx9aclc_soc_device, soc_dev); |
352 | struct resource *r; | 352 | struct resource *r; |
353 | int i; | 353 | int i; |
354 | int ret; | 354 | int ret; |
355 | 355 | ||
356 | dev->dmadata[0].stream = SNDRV_PCM_STREAM_PLAYBACK; | 356 | dev->dmadata[0].stream = SNDRV_PCM_STREAM_PLAYBACK; |
357 | dev->dmadata[1].stream = SNDRV_PCM_STREAM_CAPTURE; | 357 | dev->dmadata[1].stream = SNDRV_PCM_STREAM_CAPTURE; |
358 | for (i = 0; i < 2; i++) { | 358 | for (i = 0; i < 2; i++) { |
359 | r = platform_get_resource(dev->aclc_pdev, IORESOURCE_DMA, i); | 359 | r = platform_get_resource(dev->aclc_pdev, IORESOURCE_DMA, i); |
360 | if (!r) { | 360 | if (!r) { |
361 | ret = -EBUSY; | 361 | ret = -EBUSY; |
362 | goto exit; | 362 | goto exit; |
363 | } | 363 | } |
364 | dev->dmadata[i].dma_res = r; | 364 | dev->dmadata[i].dma_res = r; |
365 | ret = txx9aclc_dma_init(dev, &dev->dmadata[i]); | 365 | ret = txx9aclc_dma_init(dev, &dev->dmadata[i]); |
366 | if (ret) | 366 | if (ret) |
367 | goto exit; | 367 | goto exit; |
368 | } | 368 | } |
369 | return 0; | 369 | return 0; |
370 | 370 | ||
371 | exit: | 371 | exit: |
372 | for (i = 0; i < 2; i++) { | 372 | for (i = 0; i < 2; i++) { |
373 | if (dev->dmadata[i].dma_chan) | 373 | if (dev->dmadata[i].dma_chan) |
374 | dma_release_channel(dev->dmadata[i].dma_chan); | 374 | dma_release_channel(dev->dmadata[i].dma_chan); |
375 | dev->dmadata[i].dma_chan = NULL; | 375 | dev->dmadata[i].dma_chan = NULL; |
376 | } | 376 | } |
377 | return ret; | 377 | return ret; |
378 | } | 378 | } |
379 | 379 | ||
380 | static int txx9aclc_pcm_remove(struct platform_device *pdev) | 380 | static int txx9aclc_pcm_remove(struct platform_device *pdev) |
381 | { | 381 | { |
382 | struct snd_soc_device *socdev = platform_get_drvdata(pdev); | 382 | struct snd_soc_device *socdev = platform_get_drvdata(pdev); |
383 | struct txx9aclc_soc_device *dev = | 383 | struct txx9aclc_soc_device *dev = |
384 | container_of(socdev, struct txx9aclc_soc_device, soc_dev); | 384 | container_of(socdev, struct txx9aclc_soc_device, soc_dev); |
385 | struct txx9aclc_plat_drvdata *drvdata = txx9aclc_get_plat_drvdata(dev); | 385 | struct txx9aclc_plat_drvdata *drvdata = txx9aclc_get_plat_drvdata(dev); |
386 | void __iomem *base = drvdata->base; | 386 | void __iomem *base = drvdata->base; |
387 | int i; | 387 | int i; |
388 | 388 | ||
389 | /* disable all FIFO DMAs */ | 389 | /* disable all FIFO DMAs */ |
390 | __raw_writel(ACCTL_AUDODMA | ACCTL_AUDIDMA, base + ACCTLDIS); | 390 | __raw_writel(ACCTL_AUDODMA | ACCTL_AUDIDMA, base + ACCTLDIS); |
391 | /* dummy R/W to clear pending DMAREQ if any */ | 391 | /* dummy R/W to clear pending DMAREQ if any */ |
392 | __raw_writel(__raw_readl(base + ACAUDIDAT), base + ACAUDODAT); | 392 | __raw_writel(__raw_readl(base + ACAUDIDAT), base + ACAUDODAT); |
393 | 393 | ||
394 | for (i = 0; i < 2; i++) { | 394 | for (i = 0; i < 2; i++) { |
395 | struct txx9aclc_dmadata *dmadata = &dev->dmadata[i]; | 395 | struct txx9aclc_dmadata *dmadata = &dev->dmadata[i]; |
396 | struct dma_chan *chan = dmadata->dma_chan; | 396 | struct dma_chan *chan = dmadata->dma_chan; |
397 | if (chan) { | 397 | if (chan) { |
398 | dmadata->frag_count = -1; | 398 | dmadata->frag_count = -1; |
399 | chan->device->device_control(chan, DMA_TERMINATE_ALL); | 399 | chan->device->device_control(chan, |
400 | DMA_TERMINATE_ALL, 0); | ||
400 | dma_release_channel(chan); | 401 | dma_release_channel(chan); |
401 | } | 402 | } |
402 | dev->dmadata[i].dma_chan = NULL; | 403 | dev->dmadata[i].dma_chan = NULL; |
403 | } | 404 | } |
404 | return 0; | 405 | return 0; |
405 | } | 406 | } |
406 | 407 | ||
407 | struct snd_soc_platform txx9aclc_soc_platform = { | 408 | struct snd_soc_platform txx9aclc_soc_platform = { |
408 | .name = "txx9aclc-audio", | 409 | .name = "txx9aclc-audio", |
409 | .probe = txx9aclc_pcm_probe, | 410 | .probe = txx9aclc_pcm_probe, |
410 | .remove = txx9aclc_pcm_remove, | 411 | .remove = txx9aclc_pcm_remove, |
411 | .pcm_ops = &txx9aclc_pcm_ops, | 412 | .pcm_ops = &txx9aclc_pcm_ops, |
412 | .pcm_new = txx9aclc_pcm_new, | 413 | .pcm_new = txx9aclc_pcm_new, |
413 | .pcm_free = txx9aclc_pcm_free_dma_buffers, | 414 | .pcm_free = txx9aclc_pcm_free_dma_buffers, |
414 | }; | 415 | }; |
415 | EXPORT_SYMBOL_GPL(txx9aclc_soc_platform); | 416 | EXPORT_SYMBOL_GPL(txx9aclc_soc_platform); |
416 | 417 | ||
417 | static int __init txx9aclc_soc_platform_init(void) | 418 | static int __init txx9aclc_soc_platform_init(void) |
418 | { | 419 | { |
419 | return snd_soc_register_platform(&txx9aclc_soc_platform); | 420 | return snd_soc_register_platform(&txx9aclc_soc_platform); |
420 | } | 421 | } |
421 | 422 | ||
422 | static void __exit txx9aclc_soc_platform_exit(void) | 423 | static void __exit txx9aclc_soc_platform_exit(void) |
423 | { | 424 | { |
424 | snd_soc_unregister_platform(&txx9aclc_soc_platform); | 425 | snd_soc_unregister_platform(&txx9aclc_soc_platform); |
425 | } | 426 | } |
426 | 427 | ||
427 | module_init(txx9aclc_soc_platform_init); | 428 | module_init(txx9aclc_soc_platform_init); |
428 | module_exit(txx9aclc_soc_platform_exit); | 429 | module_exit(txx9aclc_soc_platform_exit); |
429 | 430 | ||
430 | MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>"); | 431 | MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>"); |
431 | MODULE_DESCRIPTION("TXx9 ACLC Audio DMA driver"); | 432 | MODULE_DESCRIPTION("TXx9 ACLC Audio DMA driver"); |
432 | MODULE_LICENSE("GPL"); | 433 | MODULE_LICENSE("GPL"); |
433 | 434 |