Commit eb8590b504caacb029dea4540e0b0dcc98da4381
Committed by
Vinod Koul
1 parent
c0dfc04ac9
Exists in
master
and in
7 other branches
pch_dma: modify pci device table definition
Signed-off-by: Tomoya MORINAGA <tomoya-linux@dsn.okisemi.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Showing 1 changed file with 1 additions and 1 deletions Inline Diff
drivers/dma/pch_dma.c
1 | /* | 1 | /* |
2 | * Topcliff PCH DMA controller driver | 2 | * Topcliff PCH DMA controller driver |
3 | * Copyright (c) 2010 Intel Corporation | 3 | * Copyright (c) 2010 Intel Corporation |
4 | * Copyright (C) 2011 OKI SEMICONDUCTOR CO., LTD. | 4 | * Copyright (C) 2011 OKI SEMICONDUCTOR CO., LTD. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | * | 9 | * |
10 | * This program is distributed in the hope that it will be useful, | 10 | * This program is distributed in the hope that it will be useful, |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
13 | * GNU General Public License for more details. | 13 | * GNU General Public License for more details. |
14 | * | 14 | * |
15 | * You should have received a copy of the GNU General Public License | 15 | * You should have received a copy of the GNU General Public License |
16 | * along with this program; if not, write to the Free Software | 16 | * along with this program; if not, write to the Free Software |
17 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 17 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include <linux/dmaengine.h> | 20 | #include <linux/dmaengine.h> |
21 | #include <linux/dma-mapping.h> | 21 | #include <linux/dma-mapping.h> |
22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
23 | #include <linux/pci.h> | 23 | #include <linux/pci.h> |
24 | #include <linux/interrupt.h> | 24 | #include <linux/interrupt.h> |
25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
26 | #include <linux/pch_dma.h> | 26 | #include <linux/pch_dma.h> |
27 | 27 | ||
28 | #define DRV_NAME "pch-dma" | 28 | #define DRV_NAME "pch-dma" |
29 | 29 | ||
30 | #define DMA_CTL0_DISABLE 0x0 | 30 | #define DMA_CTL0_DISABLE 0x0 |
31 | #define DMA_CTL0_SG 0x1 | 31 | #define DMA_CTL0_SG 0x1 |
32 | #define DMA_CTL0_ONESHOT 0x2 | 32 | #define DMA_CTL0_ONESHOT 0x2 |
33 | #define DMA_CTL0_MODE_MASK_BITS 0x3 | 33 | #define DMA_CTL0_MODE_MASK_BITS 0x3 |
34 | #define DMA_CTL0_DIR_SHIFT_BITS 2 | 34 | #define DMA_CTL0_DIR_SHIFT_BITS 2 |
35 | #define DMA_CTL0_BITS_PER_CH 4 | 35 | #define DMA_CTL0_BITS_PER_CH 4 |
36 | 36 | ||
37 | #define DMA_CTL2_START_SHIFT_BITS 8 | 37 | #define DMA_CTL2_START_SHIFT_BITS 8 |
38 | #define DMA_CTL2_IRQ_ENABLE_MASK ((1UL << DMA_CTL2_START_SHIFT_BITS) - 1) | 38 | #define DMA_CTL2_IRQ_ENABLE_MASK ((1UL << DMA_CTL2_START_SHIFT_BITS) - 1) |
39 | 39 | ||
40 | #define DMA_STATUS_IDLE 0x0 | 40 | #define DMA_STATUS_IDLE 0x0 |
41 | #define DMA_STATUS_DESC_READ 0x1 | 41 | #define DMA_STATUS_DESC_READ 0x1 |
42 | #define DMA_STATUS_WAIT 0x2 | 42 | #define DMA_STATUS_WAIT 0x2 |
43 | #define DMA_STATUS_ACCESS 0x3 | 43 | #define DMA_STATUS_ACCESS 0x3 |
44 | #define DMA_STATUS_BITS_PER_CH 2 | 44 | #define DMA_STATUS_BITS_PER_CH 2 |
45 | #define DMA_STATUS_MASK_BITS 0x3 | 45 | #define DMA_STATUS_MASK_BITS 0x3 |
46 | #define DMA_STATUS_SHIFT_BITS 16 | 46 | #define DMA_STATUS_SHIFT_BITS 16 |
47 | #define DMA_STATUS_IRQ(x) (0x1 << (x)) | 47 | #define DMA_STATUS_IRQ(x) (0x1 << (x)) |
48 | #define DMA_STATUS_ERR(x) (0x1 << ((x) + 8)) | 48 | #define DMA_STATUS_ERR(x) (0x1 << ((x) + 8)) |
49 | 49 | ||
50 | #define DMA_DESC_WIDTH_SHIFT_BITS 12 | 50 | #define DMA_DESC_WIDTH_SHIFT_BITS 12 |
51 | #define DMA_DESC_WIDTH_1_BYTE (0x3 << DMA_DESC_WIDTH_SHIFT_BITS) | 51 | #define DMA_DESC_WIDTH_1_BYTE (0x3 << DMA_DESC_WIDTH_SHIFT_BITS) |
52 | #define DMA_DESC_WIDTH_2_BYTES (0x2 << DMA_DESC_WIDTH_SHIFT_BITS) | 52 | #define DMA_DESC_WIDTH_2_BYTES (0x2 << DMA_DESC_WIDTH_SHIFT_BITS) |
53 | #define DMA_DESC_WIDTH_4_BYTES (0x0 << DMA_DESC_WIDTH_SHIFT_BITS) | 53 | #define DMA_DESC_WIDTH_4_BYTES (0x0 << DMA_DESC_WIDTH_SHIFT_BITS) |
54 | #define DMA_DESC_MAX_COUNT_1_BYTE 0x3FF | 54 | #define DMA_DESC_MAX_COUNT_1_BYTE 0x3FF |
55 | #define DMA_DESC_MAX_COUNT_2_BYTES 0x3FF | 55 | #define DMA_DESC_MAX_COUNT_2_BYTES 0x3FF |
56 | #define DMA_DESC_MAX_COUNT_4_BYTES 0x7FF | 56 | #define DMA_DESC_MAX_COUNT_4_BYTES 0x7FF |
57 | #define DMA_DESC_END_WITHOUT_IRQ 0x0 | 57 | #define DMA_DESC_END_WITHOUT_IRQ 0x0 |
58 | #define DMA_DESC_END_WITH_IRQ 0x1 | 58 | #define DMA_DESC_END_WITH_IRQ 0x1 |
59 | #define DMA_DESC_FOLLOW_WITHOUT_IRQ 0x2 | 59 | #define DMA_DESC_FOLLOW_WITHOUT_IRQ 0x2 |
60 | #define DMA_DESC_FOLLOW_WITH_IRQ 0x3 | 60 | #define DMA_DESC_FOLLOW_WITH_IRQ 0x3 |
61 | 61 | ||
62 | #define MAX_CHAN_NR 8 | 62 | #define MAX_CHAN_NR 8 |
63 | 63 | ||
64 | static unsigned int init_nr_desc_per_channel = 64; | 64 | static unsigned int init_nr_desc_per_channel = 64; |
65 | module_param(init_nr_desc_per_channel, uint, 0644); | 65 | module_param(init_nr_desc_per_channel, uint, 0644); |
66 | MODULE_PARM_DESC(init_nr_desc_per_channel, | 66 | MODULE_PARM_DESC(init_nr_desc_per_channel, |
67 | "initial descriptors per channel (default: 64)"); | 67 | "initial descriptors per channel (default: 64)"); |
68 | 68 | ||
69 | struct pch_dma_desc_regs { | 69 | struct pch_dma_desc_regs { |
70 | u32 dev_addr; | 70 | u32 dev_addr; |
71 | u32 mem_addr; | 71 | u32 mem_addr; |
72 | u32 size; | 72 | u32 size; |
73 | u32 next; | 73 | u32 next; |
74 | }; | 74 | }; |
75 | 75 | ||
76 | struct pch_dma_regs { | 76 | struct pch_dma_regs { |
77 | u32 dma_ctl0; | 77 | u32 dma_ctl0; |
78 | u32 dma_ctl1; | 78 | u32 dma_ctl1; |
79 | u32 dma_ctl2; | 79 | u32 dma_ctl2; |
80 | u32 dma_ctl3; | 80 | u32 dma_ctl3; |
81 | u32 dma_sts0; | 81 | u32 dma_sts0; |
82 | u32 dma_sts1; | 82 | u32 dma_sts1; |
83 | u32 dma_sts2; | 83 | u32 dma_sts2; |
84 | u32 reserved3; | 84 | u32 reserved3; |
85 | struct pch_dma_desc_regs desc[MAX_CHAN_NR]; | 85 | struct pch_dma_desc_regs desc[MAX_CHAN_NR]; |
86 | }; | 86 | }; |
87 | 87 | ||
88 | struct pch_dma_desc { | 88 | struct pch_dma_desc { |
89 | struct pch_dma_desc_regs regs; | 89 | struct pch_dma_desc_regs regs; |
90 | struct dma_async_tx_descriptor txd; | 90 | struct dma_async_tx_descriptor txd; |
91 | struct list_head desc_node; | 91 | struct list_head desc_node; |
92 | struct list_head tx_list; | 92 | struct list_head tx_list; |
93 | }; | 93 | }; |
94 | 94 | ||
95 | struct pch_dma_chan { | 95 | struct pch_dma_chan { |
96 | struct dma_chan chan; | 96 | struct dma_chan chan; |
97 | void __iomem *membase; | 97 | void __iomem *membase; |
98 | enum dma_data_direction dir; | 98 | enum dma_data_direction dir; |
99 | struct tasklet_struct tasklet; | 99 | struct tasklet_struct tasklet; |
100 | unsigned long err_status; | 100 | unsigned long err_status; |
101 | 101 | ||
102 | spinlock_t lock; | 102 | spinlock_t lock; |
103 | 103 | ||
104 | dma_cookie_t completed_cookie; | 104 | dma_cookie_t completed_cookie; |
105 | struct list_head active_list; | 105 | struct list_head active_list; |
106 | struct list_head queue; | 106 | struct list_head queue; |
107 | struct list_head free_list; | 107 | struct list_head free_list; |
108 | unsigned int descs_allocated; | 108 | unsigned int descs_allocated; |
109 | }; | 109 | }; |
110 | 110 | ||
111 | #define PDC_DEV_ADDR 0x00 | 111 | #define PDC_DEV_ADDR 0x00 |
112 | #define PDC_MEM_ADDR 0x04 | 112 | #define PDC_MEM_ADDR 0x04 |
113 | #define PDC_SIZE 0x08 | 113 | #define PDC_SIZE 0x08 |
114 | #define PDC_NEXT 0x0C | 114 | #define PDC_NEXT 0x0C |
115 | 115 | ||
116 | #define channel_readl(pdc, name) \ | 116 | #define channel_readl(pdc, name) \ |
117 | readl((pdc)->membase + PDC_##name) | 117 | readl((pdc)->membase + PDC_##name) |
118 | #define channel_writel(pdc, name, val) \ | 118 | #define channel_writel(pdc, name, val) \ |
119 | writel((val), (pdc)->membase + PDC_##name) | 119 | writel((val), (pdc)->membase + PDC_##name) |
120 | 120 | ||
121 | struct pch_dma { | 121 | struct pch_dma { |
122 | struct dma_device dma; | 122 | struct dma_device dma; |
123 | void __iomem *membase; | 123 | void __iomem *membase; |
124 | struct pci_pool *pool; | 124 | struct pci_pool *pool; |
125 | struct pch_dma_regs regs; | 125 | struct pch_dma_regs regs; |
126 | struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR]; | 126 | struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR]; |
127 | struct pch_dma_chan channels[MAX_CHAN_NR]; | 127 | struct pch_dma_chan channels[MAX_CHAN_NR]; |
128 | }; | 128 | }; |
129 | 129 | ||
130 | #define PCH_DMA_CTL0 0x00 | 130 | #define PCH_DMA_CTL0 0x00 |
131 | #define PCH_DMA_CTL1 0x04 | 131 | #define PCH_DMA_CTL1 0x04 |
132 | #define PCH_DMA_CTL2 0x08 | 132 | #define PCH_DMA_CTL2 0x08 |
133 | #define PCH_DMA_CTL3 0x0C | 133 | #define PCH_DMA_CTL3 0x0C |
134 | #define PCH_DMA_STS0 0x10 | 134 | #define PCH_DMA_STS0 0x10 |
135 | #define PCH_DMA_STS1 0x14 | 135 | #define PCH_DMA_STS1 0x14 |
136 | 136 | ||
137 | #define dma_readl(pd, name) \ | 137 | #define dma_readl(pd, name) \ |
138 | readl((pd)->membase + PCH_DMA_##name) | 138 | readl((pd)->membase + PCH_DMA_##name) |
139 | #define dma_writel(pd, name, val) \ | 139 | #define dma_writel(pd, name, val) \ |
140 | writel((val), (pd)->membase + PCH_DMA_##name) | 140 | writel((val), (pd)->membase + PCH_DMA_##name) |
141 | 141 | ||
142 | static inline | 142 | static inline |
143 | struct pch_dma_desc *to_pd_desc(struct dma_async_tx_descriptor *txd) | 143 | struct pch_dma_desc *to_pd_desc(struct dma_async_tx_descriptor *txd) |
144 | { | 144 | { |
145 | return container_of(txd, struct pch_dma_desc, txd); | 145 | return container_of(txd, struct pch_dma_desc, txd); |
146 | } | 146 | } |
147 | 147 | ||
148 | static inline struct pch_dma_chan *to_pd_chan(struct dma_chan *chan) | 148 | static inline struct pch_dma_chan *to_pd_chan(struct dma_chan *chan) |
149 | { | 149 | { |
150 | return container_of(chan, struct pch_dma_chan, chan); | 150 | return container_of(chan, struct pch_dma_chan, chan); |
151 | } | 151 | } |
152 | 152 | ||
153 | static inline struct pch_dma *to_pd(struct dma_device *ddev) | 153 | static inline struct pch_dma *to_pd(struct dma_device *ddev) |
154 | { | 154 | { |
155 | return container_of(ddev, struct pch_dma, dma); | 155 | return container_of(ddev, struct pch_dma, dma); |
156 | } | 156 | } |
157 | 157 | ||
158 | static inline struct device *chan2dev(struct dma_chan *chan) | 158 | static inline struct device *chan2dev(struct dma_chan *chan) |
159 | { | 159 | { |
160 | return &chan->dev->device; | 160 | return &chan->dev->device; |
161 | } | 161 | } |
162 | 162 | ||
163 | static inline struct device *chan2parent(struct dma_chan *chan) | 163 | static inline struct device *chan2parent(struct dma_chan *chan) |
164 | { | 164 | { |
165 | return chan->dev->device.parent; | 165 | return chan->dev->device.parent; |
166 | } | 166 | } |
167 | 167 | ||
168 | static inline | 168 | static inline |
169 | struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan) | 169 | struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan) |
170 | { | 170 | { |
171 | return list_first_entry(&pd_chan->active_list, | 171 | return list_first_entry(&pd_chan->active_list, |
172 | struct pch_dma_desc, desc_node); | 172 | struct pch_dma_desc, desc_node); |
173 | } | 173 | } |
174 | 174 | ||
175 | static inline | 175 | static inline |
176 | struct pch_dma_desc *pdc_first_queued(struct pch_dma_chan *pd_chan) | 176 | struct pch_dma_desc *pdc_first_queued(struct pch_dma_chan *pd_chan) |
177 | { | 177 | { |
178 | return list_first_entry(&pd_chan->queue, | 178 | return list_first_entry(&pd_chan->queue, |
179 | struct pch_dma_desc, desc_node); | 179 | struct pch_dma_desc, desc_node); |
180 | } | 180 | } |
181 | 181 | ||
182 | static void pdc_enable_irq(struct dma_chan *chan, int enable) | 182 | static void pdc_enable_irq(struct dma_chan *chan, int enable) |
183 | { | 183 | { |
184 | struct pch_dma *pd = to_pd(chan->device); | 184 | struct pch_dma *pd = to_pd(chan->device); |
185 | u32 val; | 185 | u32 val; |
186 | 186 | ||
187 | val = dma_readl(pd, CTL2); | 187 | val = dma_readl(pd, CTL2); |
188 | 188 | ||
189 | if (enable) | 189 | if (enable) |
190 | val |= 0x1 << chan->chan_id; | 190 | val |= 0x1 << chan->chan_id; |
191 | else | 191 | else |
192 | val &= ~(0x1 << chan->chan_id); | 192 | val &= ~(0x1 << chan->chan_id); |
193 | 193 | ||
194 | dma_writel(pd, CTL2, val); | 194 | dma_writel(pd, CTL2, val); |
195 | 195 | ||
196 | dev_dbg(chan2dev(chan), "pdc_enable_irq: chan %d -> %x\n", | 196 | dev_dbg(chan2dev(chan), "pdc_enable_irq: chan %d -> %x\n", |
197 | chan->chan_id, val); | 197 | chan->chan_id, val); |
198 | } | 198 | } |
199 | 199 | ||
200 | static void pdc_set_dir(struct dma_chan *chan) | 200 | static void pdc_set_dir(struct dma_chan *chan) |
201 | { | 201 | { |
202 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); | 202 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); |
203 | struct pch_dma *pd = to_pd(chan->device); | 203 | struct pch_dma *pd = to_pd(chan->device); |
204 | u32 val; | 204 | u32 val; |
205 | 205 | ||
206 | if (chan->chan_id < 8) { | 206 | if (chan->chan_id < 8) { |
207 | val = dma_readl(pd, CTL0); | 207 | val = dma_readl(pd, CTL0); |
208 | 208 | ||
209 | if (pd_chan->dir == DMA_TO_DEVICE) | 209 | if (pd_chan->dir == DMA_TO_DEVICE) |
210 | val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + | 210 | val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + |
211 | DMA_CTL0_DIR_SHIFT_BITS); | 211 | DMA_CTL0_DIR_SHIFT_BITS); |
212 | else | 212 | else |
213 | val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + | 213 | val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + |
214 | DMA_CTL0_DIR_SHIFT_BITS)); | 214 | DMA_CTL0_DIR_SHIFT_BITS)); |
215 | 215 | ||
216 | dma_writel(pd, CTL0, val); | 216 | dma_writel(pd, CTL0, val); |
217 | } else { | 217 | } else { |
218 | int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */ | 218 | int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */ |
219 | val = dma_readl(pd, CTL3); | 219 | val = dma_readl(pd, CTL3); |
220 | 220 | ||
221 | if (pd_chan->dir == DMA_TO_DEVICE) | 221 | if (pd_chan->dir == DMA_TO_DEVICE) |
222 | val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch + | 222 | val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch + |
223 | DMA_CTL0_DIR_SHIFT_BITS); | 223 | DMA_CTL0_DIR_SHIFT_BITS); |
224 | else | 224 | else |
225 | val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * ch + | 225 | val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * ch + |
226 | DMA_CTL0_DIR_SHIFT_BITS)); | 226 | DMA_CTL0_DIR_SHIFT_BITS)); |
227 | 227 | ||
228 | dma_writel(pd, CTL3, val); | 228 | dma_writel(pd, CTL3, val); |
229 | } | 229 | } |
230 | 230 | ||
231 | dev_dbg(chan2dev(chan), "pdc_set_dir: chan %d -> %x\n", | 231 | dev_dbg(chan2dev(chan), "pdc_set_dir: chan %d -> %x\n", |
232 | chan->chan_id, val); | 232 | chan->chan_id, val); |
233 | } | 233 | } |
234 | 234 | ||
235 | static void pdc_set_mode(struct dma_chan *chan, u32 mode) | 235 | static void pdc_set_mode(struct dma_chan *chan, u32 mode) |
236 | { | 236 | { |
237 | struct pch_dma *pd = to_pd(chan->device); | 237 | struct pch_dma *pd = to_pd(chan->device); |
238 | u32 val; | 238 | u32 val; |
239 | 239 | ||
240 | if (chan->chan_id < 8) { | 240 | if (chan->chan_id < 8) { |
241 | val = dma_readl(pd, CTL0); | 241 | val = dma_readl(pd, CTL0); |
242 | 242 | ||
243 | val &= ~(DMA_CTL0_MODE_MASK_BITS << | 243 | val &= ~(DMA_CTL0_MODE_MASK_BITS << |
244 | (DMA_CTL0_BITS_PER_CH * chan->chan_id)); | 244 | (DMA_CTL0_BITS_PER_CH * chan->chan_id)); |
245 | val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id); | 245 | val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id); |
246 | 246 | ||
247 | dma_writel(pd, CTL0, val); | 247 | dma_writel(pd, CTL0, val); |
248 | } else { | 248 | } else { |
249 | int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */ | 249 | int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */ |
250 | 250 | ||
251 | val = dma_readl(pd, CTL3); | 251 | val = dma_readl(pd, CTL3); |
252 | 252 | ||
253 | val &= ~(DMA_CTL0_MODE_MASK_BITS << | 253 | val &= ~(DMA_CTL0_MODE_MASK_BITS << |
254 | (DMA_CTL0_BITS_PER_CH * ch)); | 254 | (DMA_CTL0_BITS_PER_CH * ch)); |
255 | val |= mode << (DMA_CTL0_BITS_PER_CH * ch); | 255 | val |= mode << (DMA_CTL0_BITS_PER_CH * ch); |
256 | 256 | ||
257 | dma_writel(pd, CTL3, val); | 257 | dma_writel(pd, CTL3, val); |
258 | 258 | ||
259 | } | 259 | } |
260 | 260 | ||
261 | dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n", | 261 | dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n", |
262 | chan->chan_id, val); | 262 | chan->chan_id, val); |
263 | } | 263 | } |
264 | 264 | ||
265 | static u32 pdc_get_status(struct pch_dma_chan *pd_chan) | 265 | static u32 pdc_get_status(struct pch_dma_chan *pd_chan) |
266 | { | 266 | { |
267 | struct pch_dma *pd = to_pd(pd_chan->chan.device); | 267 | struct pch_dma *pd = to_pd(pd_chan->chan.device); |
268 | u32 val; | 268 | u32 val; |
269 | 269 | ||
270 | val = dma_readl(pd, STS0); | 270 | val = dma_readl(pd, STS0); |
271 | return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS + | 271 | return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS + |
272 | DMA_STATUS_BITS_PER_CH * pd_chan->chan.chan_id)); | 272 | DMA_STATUS_BITS_PER_CH * pd_chan->chan.chan_id)); |
273 | } | 273 | } |
274 | 274 | ||
275 | static bool pdc_is_idle(struct pch_dma_chan *pd_chan) | 275 | static bool pdc_is_idle(struct pch_dma_chan *pd_chan) |
276 | { | 276 | { |
277 | if (pdc_get_status(pd_chan) == DMA_STATUS_IDLE) | 277 | if (pdc_get_status(pd_chan) == DMA_STATUS_IDLE) |
278 | return true; | 278 | return true; |
279 | else | 279 | else |
280 | return false; | 280 | return false; |
281 | } | 281 | } |
282 | 282 | ||
283 | static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc) | 283 | static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc) |
284 | { | 284 | { |
285 | if (!pdc_is_idle(pd_chan)) { | 285 | if (!pdc_is_idle(pd_chan)) { |
286 | dev_err(chan2dev(&pd_chan->chan), | 286 | dev_err(chan2dev(&pd_chan->chan), |
287 | "BUG: Attempt to start non-idle channel\n"); | 287 | "BUG: Attempt to start non-idle channel\n"); |
288 | return; | 288 | return; |
289 | } | 289 | } |
290 | 290 | ||
291 | dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> dev_addr: %x\n", | 291 | dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> dev_addr: %x\n", |
292 | pd_chan->chan.chan_id, desc->regs.dev_addr); | 292 | pd_chan->chan.chan_id, desc->regs.dev_addr); |
293 | dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> mem_addr: %x\n", | 293 | dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> mem_addr: %x\n", |
294 | pd_chan->chan.chan_id, desc->regs.mem_addr); | 294 | pd_chan->chan.chan_id, desc->regs.mem_addr); |
295 | dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> size: %x\n", | 295 | dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> size: %x\n", |
296 | pd_chan->chan.chan_id, desc->regs.size); | 296 | pd_chan->chan.chan_id, desc->regs.size); |
297 | dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> next: %x\n", | 297 | dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> next: %x\n", |
298 | pd_chan->chan.chan_id, desc->regs.next); | 298 | pd_chan->chan.chan_id, desc->regs.next); |
299 | 299 | ||
300 | if (list_empty(&desc->tx_list)) { | 300 | if (list_empty(&desc->tx_list)) { |
301 | channel_writel(pd_chan, DEV_ADDR, desc->regs.dev_addr); | 301 | channel_writel(pd_chan, DEV_ADDR, desc->regs.dev_addr); |
302 | channel_writel(pd_chan, MEM_ADDR, desc->regs.mem_addr); | 302 | channel_writel(pd_chan, MEM_ADDR, desc->regs.mem_addr); |
303 | channel_writel(pd_chan, SIZE, desc->regs.size); | 303 | channel_writel(pd_chan, SIZE, desc->regs.size); |
304 | channel_writel(pd_chan, NEXT, desc->regs.next); | 304 | channel_writel(pd_chan, NEXT, desc->regs.next); |
305 | pdc_set_mode(&pd_chan->chan, DMA_CTL0_ONESHOT); | 305 | pdc_set_mode(&pd_chan->chan, DMA_CTL0_ONESHOT); |
306 | } else { | 306 | } else { |
307 | channel_writel(pd_chan, NEXT, desc->txd.phys); | 307 | channel_writel(pd_chan, NEXT, desc->txd.phys); |
308 | pdc_set_mode(&pd_chan->chan, DMA_CTL0_SG); | 308 | pdc_set_mode(&pd_chan->chan, DMA_CTL0_SG); |
309 | } | 309 | } |
310 | } | 310 | } |
311 | 311 | ||
312 | static void pdc_chain_complete(struct pch_dma_chan *pd_chan, | 312 | static void pdc_chain_complete(struct pch_dma_chan *pd_chan, |
313 | struct pch_dma_desc *desc) | 313 | struct pch_dma_desc *desc) |
314 | { | 314 | { |
315 | struct dma_async_tx_descriptor *txd = &desc->txd; | 315 | struct dma_async_tx_descriptor *txd = &desc->txd; |
316 | dma_async_tx_callback callback = txd->callback; | 316 | dma_async_tx_callback callback = txd->callback; |
317 | void *param = txd->callback_param; | 317 | void *param = txd->callback_param; |
318 | 318 | ||
319 | list_splice_init(&desc->tx_list, &pd_chan->free_list); | 319 | list_splice_init(&desc->tx_list, &pd_chan->free_list); |
320 | list_move(&desc->desc_node, &pd_chan->free_list); | 320 | list_move(&desc->desc_node, &pd_chan->free_list); |
321 | 321 | ||
322 | if (callback) | 322 | if (callback) |
323 | callback(param); | 323 | callback(param); |
324 | } | 324 | } |
325 | 325 | ||
326 | static void pdc_complete_all(struct pch_dma_chan *pd_chan) | 326 | static void pdc_complete_all(struct pch_dma_chan *pd_chan) |
327 | { | 327 | { |
328 | struct pch_dma_desc *desc, *_d; | 328 | struct pch_dma_desc *desc, *_d; |
329 | LIST_HEAD(list); | 329 | LIST_HEAD(list); |
330 | 330 | ||
331 | BUG_ON(!pdc_is_idle(pd_chan)); | 331 | BUG_ON(!pdc_is_idle(pd_chan)); |
332 | 332 | ||
333 | if (!list_empty(&pd_chan->queue)) | 333 | if (!list_empty(&pd_chan->queue)) |
334 | pdc_dostart(pd_chan, pdc_first_queued(pd_chan)); | 334 | pdc_dostart(pd_chan, pdc_first_queued(pd_chan)); |
335 | 335 | ||
336 | list_splice_init(&pd_chan->active_list, &list); | 336 | list_splice_init(&pd_chan->active_list, &list); |
337 | list_splice_init(&pd_chan->queue, &pd_chan->active_list); | 337 | list_splice_init(&pd_chan->queue, &pd_chan->active_list); |
338 | 338 | ||
339 | list_for_each_entry_safe(desc, _d, &list, desc_node) | 339 | list_for_each_entry_safe(desc, _d, &list, desc_node) |
340 | pdc_chain_complete(pd_chan, desc); | 340 | pdc_chain_complete(pd_chan, desc); |
341 | } | 341 | } |
342 | 342 | ||
343 | static void pdc_handle_error(struct pch_dma_chan *pd_chan) | 343 | static void pdc_handle_error(struct pch_dma_chan *pd_chan) |
344 | { | 344 | { |
345 | struct pch_dma_desc *bad_desc; | 345 | struct pch_dma_desc *bad_desc; |
346 | 346 | ||
347 | bad_desc = pdc_first_active(pd_chan); | 347 | bad_desc = pdc_first_active(pd_chan); |
348 | list_del(&bad_desc->desc_node); | 348 | list_del(&bad_desc->desc_node); |
349 | 349 | ||
350 | list_splice_init(&pd_chan->queue, pd_chan->active_list.prev); | 350 | list_splice_init(&pd_chan->queue, pd_chan->active_list.prev); |
351 | 351 | ||
352 | if (!list_empty(&pd_chan->active_list)) | 352 | if (!list_empty(&pd_chan->active_list)) |
353 | pdc_dostart(pd_chan, pdc_first_active(pd_chan)); | 353 | pdc_dostart(pd_chan, pdc_first_active(pd_chan)); |
354 | 354 | ||
355 | dev_crit(chan2dev(&pd_chan->chan), "Bad descriptor submitted\n"); | 355 | dev_crit(chan2dev(&pd_chan->chan), "Bad descriptor submitted\n"); |
356 | dev_crit(chan2dev(&pd_chan->chan), "descriptor cookie: %d\n", | 356 | dev_crit(chan2dev(&pd_chan->chan), "descriptor cookie: %d\n", |
357 | bad_desc->txd.cookie); | 357 | bad_desc->txd.cookie); |
358 | 358 | ||
359 | pdc_chain_complete(pd_chan, bad_desc); | 359 | pdc_chain_complete(pd_chan, bad_desc); |
360 | } | 360 | } |
361 | 361 | ||
362 | static void pdc_advance_work(struct pch_dma_chan *pd_chan) | 362 | static void pdc_advance_work(struct pch_dma_chan *pd_chan) |
363 | { | 363 | { |
364 | if (list_empty(&pd_chan->active_list) || | 364 | if (list_empty(&pd_chan->active_list) || |
365 | list_is_singular(&pd_chan->active_list)) { | 365 | list_is_singular(&pd_chan->active_list)) { |
366 | pdc_complete_all(pd_chan); | 366 | pdc_complete_all(pd_chan); |
367 | } else { | 367 | } else { |
368 | pdc_chain_complete(pd_chan, pdc_first_active(pd_chan)); | 368 | pdc_chain_complete(pd_chan, pdc_first_active(pd_chan)); |
369 | pdc_dostart(pd_chan, pdc_first_active(pd_chan)); | 369 | pdc_dostart(pd_chan, pdc_first_active(pd_chan)); |
370 | } | 370 | } |
371 | } | 371 | } |
372 | 372 | ||
373 | static dma_cookie_t pdc_assign_cookie(struct pch_dma_chan *pd_chan, | 373 | static dma_cookie_t pdc_assign_cookie(struct pch_dma_chan *pd_chan, |
374 | struct pch_dma_desc *desc) | 374 | struct pch_dma_desc *desc) |
375 | { | 375 | { |
376 | dma_cookie_t cookie = pd_chan->chan.cookie; | 376 | dma_cookie_t cookie = pd_chan->chan.cookie; |
377 | 377 | ||
378 | if (++cookie < 0) | 378 | if (++cookie < 0) |
379 | cookie = 1; | 379 | cookie = 1; |
380 | 380 | ||
381 | pd_chan->chan.cookie = cookie; | 381 | pd_chan->chan.cookie = cookie; |
382 | desc->txd.cookie = cookie; | 382 | desc->txd.cookie = cookie; |
383 | 383 | ||
384 | return cookie; | 384 | return cookie; |
385 | } | 385 | } |
386 | 386 | ||
387 | static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd) | 387 | static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd) |
388 | { | 388 | { |
389 | struct pch_dma_desc *desc = to_pd_desc(txd); | 389 | struct pch_dma_desc *desc = to_pd_desc(txd); |
390 | struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan); | 390 | struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan); |
391 | dma_cookie_t cookie; | 391 | dma_cookie_t cookie; |
392 | 392 | ||
393 | spin_lock(&pd_chan->lock); | 393 | spin_lock(&pd_chan->lock); |
394 | cookie = pdc_assign_cookie(pd_chan, desc); | 394 | cookie = pdc_assign_cookie(pd_chan, desc); |
395 | 395 | ||
396 | if (list_empty(&pd_chan->active_list)) { | 396 | if (list_empty(&pd_chan->active_list)) { |
397 | list_add_tail(&desc->desc_node, &pd_chan->active_list); | 397 | list_add_tail(&desc->desc_node, &pd_chan->active_list); |
398 | pdc_dostart(pd_chan, desc); | 398 | pdc_dostart(pd_chan, desc); |
399 | } else { | 399 | } else { |
400 | list_add_tail(&desc->desc_node, &pd_chan->queue); | 400 | list_add_tail(&desc->desc_node, &pd_chan->queue); |
401 | } | 401 | } |
402 | 402 | ||
403 | spin_unlock(&pd_chan->lock); | 403 | spin_unlock(&pd_chan->lock); |
404 | return 0; | 404 | return 0; |
405 | } | 405 | } |
406 | 406 | ||
407 | static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags) | 407 | static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags) |
408 | { | 408 | { |
409 | struct pch_dma_desc *desc = NULL; | 409 | struct pch_dma_desc *desc = NULL; |
410 | struct pch_dma *pd = to_pd(chan->device); | 410 | struct pch_dma *pd = to_pd(chan->device); |
411 | dma_addr_t addr; | 411 | dma_addr_t addr; |
412 | 412 | ||
413 | desc = pci_pool_alloc(pd->pool, flags, &addr); | 413 | desc = pci_pool_alloc(pd->pool, flags, &addr); |
414 | if (desc) { | 414 | if (desc) { |
415 | memset(desc, 0, sizeof(struct pch_dma_desc)); | 415 | memset(desc, 0, sizeof(struct pch_dma_desc)); |
416 | INIT_LIST_HEAD(&desc->tx_list); | 416 | INIT_LIST_HEAD(&desc->tx_list); |
417 | dma_async_tx_descriptor_init(&desc->txd, chan); | 417 | dma_async_tx_descriptor_init(&desc->txd, chan); |
418 | desc->txd.tx_submit = pd_tx_submit; | 418 | desc->txd.tx_submit = pd_tx_submit; |
419 | desc->txd.flags = DMA_CTRL_ACK; | 419 | desc->txd.flags = DMA_CTRL_ACK; |
420 | desc->txd.phys = addr; | 420 | desc->txd.phys = addr; |
421 | } | 421 | } |
422 | 422 | ||
423 | return desc; | 423 | return desc; |
424 | } | 424 | } |
425 | 425 | ||
426 | static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan) | 426 | static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan) |
427 | { | 427 | { |
428 | struct pch_dma_desc *desc, *_d; | 428 | struct pch_dma_desc *desc, *_d; |
429 | struct pch_dma_desc *ret = NULL; | 429 | struct pch_dma_desc *ret = NULL; |
430 | int i = 0; | 430 | int i = 0; |
431 | 431 | ||
432 | spin_lock(&pd_chan->lock); | 432 | spin_lock(&pd_chan->lock); |
433 | list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) { | 433 | list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) { |
434 | i++; | 434 | i++; |
435 | if (async_tx_test_ack(&desc->txd)) { | 435 | if (async_tx_test_ack(&desc->txd)) { |
436 | list_del(&desc->desc_node); | 436 | list_del(&desc->desc_node); |
437 | ret = desc; | 437 | ret = desc; |
438 | break; | 438 | break; |
439 | } | 439 | } |
440 | dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc); | 440 | dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc); |
441 | } | 441 | } |
442 | spin_unlock(&pd_chan->lock); | 442 | spin_unlock(&pd_chan->lock); |
443 | dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i); | 443 | dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i); |
444 | 444 | ||
445 | if (!ret) { | 445 | if (!ret) { |
446 | ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO); | 446 | ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO); |
447 | if (ret) { | 447 | if (ret) { |
448 | spin_lock(&pd_chan->lock); | 448 | spin_lock(&pd_chan->lock); |
449 | pd_chan->descs_allocated++; | 449 | pd_chan->descs_allocated++; |
450 | spin_unlock(&pd_chan->lock); | 450 | spin_unlock(&pd_chan->lock); |
451 | } else { | 451 | } else { |
452 | dev_err(chan2dev(&pd_chan->chan), | 452 | dev_err(chan2dev(&pd_chan->chan), |
453 | "failed to alloc desc\n"); | 453 | "failed to alloc desc\n"); |
454 | } | 454 | } |
455 | } | 455 | } |
456 | 456 | ||
457 | return ret; | 457 | return ret; |
458 | } | 458 | } |
459 | 459 | ||
460 | static void pdc_desc_put(struct pch_dma_chan *pd_chan, | 460 | static void pdc_desc_put(struct pch_dma_chan *pd_chan, |
461 | struct pch_dma_desc *desc) | 461 | struct pch_dma_desc *desc) |
462 | { | 462 | { |
463 | if (desc) { | 463 | if (desc) { |
464 | spin_lock(&pd_chan->lock); | 464 | spin_lock(&pd_chan->lock); |
465 | list_splice_init(&desc->tx_list, &pd_chan->free_list); | 465 | list_splice_init(&desc->tx_list, &pd_chan->free_list); |
466 | list_add(&desc->desc_node, &pd_chan->free_list); | 466 | list_add(&desc->desc_node, &pd_chan->free_list); |
467 | spin_unlock(&pd_chan->lock); | 467 | spin_unlock(&pd_chan->lock); |
468 | } | 468 | } |
469 | } | 469 | } |
470 | 470 | ||
471 | static int pd_alloc_chan_resources(struct dma_chan *chan) | 471 | static int pd_alloc_chan_resources(struct dma_chan *chan) |
472 | { | 472 | { |
473 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); | 473 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); |
474 | struct pch_dma_desc *desc; | 474 | struct pch_dma_desc *desc; |
475 | LIST_HEAD(tmp_list); | 475 | LIST_HEAD(tmp_list); |
476 | int i; | 476 | int i; |
477 | 477 | ||
478 | if (!pdc_is_idle(pd_chan)) { | 478 | if (!pdc_is_idle(pd_chan)) { |
479 | dev_dbg(chan2dev(chan), "DMA channel not idle ?\n"); | 479 | dev_dbg(chan2dev(chan), "DMA channel not idle ?\n"); |
480 | return -EIO; | 480 | return -EIO; |
481 | } | 481 | } |
482 | 482 | ||
483 | if (!list_empty(&pd_chan->free_list)) | 483 | if (!list_empty(&pd_chan->free_list)) |
484 | return pd_chan->descs_allocated; | 484 | return pd_chan->descs_allocated; |
485 | 485 | ||
486 | for (i = 0; i < init_nr_desc_per_channel; i++) { | 486 | for (i = 0; i < init_nr_desc_per_channel; i++) { |
487 | desc = pdc_alloc_desc(chan, GFP_KERNEL); | 487 | desc = pdc_alloc_desc(chan, GFP_KERNEL); |
488 | 488 | ||
489 | if (!desc) { | 489 | if (!desc) { |
490 | dev_warn(chan2dev(chan), | 490 | dev_warn(chan2dev(chan), |
491 | "Only allocated %d initial descriptors\n", i); | 491 | "Only allocated %d initial descriptors\n", i); |
492 | break; | 492 | break; |
493 | } | 493 | } |
494 | 494 | ||
495 | list_add_tail(&desc->desc_node, &tmp_list); | 495 | list_add_tail(&desc->desc_node, &tmp_list); |
496 | } | 496 | } |
497 | 497 | ||
498 | spin_lock_bh(&pd_chan->lock); | 498 | spin_lock_bh(&pd_chan->lock); |
499 | list_splice(&tmp_list, &pd_chan->free_list); | 499 | list_splice(&tmp_list, &pd_chan->free_list); |
500 | pd_chan->descs_allocated = i; | 500 | pd_chan->descs_allocated = i; |
501 | pd_chan->completed_cookie = chan->cookie = 1; | 501 | pd_chan->completed_cookie = chan->cookie = 1; |
502 | spin_unlock_bh(&pd_chan->lock); | 502 | spin_unlock_bh(&pd_chan->lock); |
503 | 503 | ||
504 | pdc_enable_irq(chan, 1); | 504 | pdc_enable_irq(chan, 1); |
505 | 505 | ||
506 | return pd_chan->descs_allocated; | 506 | return pd_chan->descs_allocated; |
507 | } | 507 | } |
508 | 508 | ||
509 | static void pd_free_chan_resources(struct dma_chan *chan) | 509 | static void pd_free_chan_resources(struct dma_chan *chan) |
510 | { | 510 | { |
511 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); | 511 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); |
512 | struct pch_dma *pd = to_pd(chan->device); | 512 | struct pch_dma *pd = to_pd(chan->device); |
513 | struct pch_dma_desc *desc, *_d; | 513 | struct pch_dma_desc *desc, *_d; |
514 | LIST_HEAD(tmp_list); | 514 | LIST_HEAD(tmp_list); |
515 | 515 | ||
516 | BUG_ON(!pdc_is_idle(pd_chan)); | 516 | BUG_ON(!pdc_is_idle(pd_chan)); |
517 | BUG_ON(!list_empty(&pd_chan->active_list)); | 517 | BUG_ON(!list_empty(&pd_chan->active_list)); |
518 | BUG_ON(!list_empty(&pd_chan->queue)); | 518 | BUG_ON(!list_empty(&pd_chan->queue)); |
519 | 519 | ||
520 | spin_lock_bh(&pd_chan->lock); | 520 | spin_lock_bh(&pd_chan->lock); |
521 | list_splice_init(&pd_chan->free_list, &tmp_list); | 521 | list_splice_init(&pd_chan->free_list, &tmp_list); |
522 | pd_chan->descs_allocated = 0; | 522 | pd_chan->descs_allocated = 0; |
523 | spin_unlock_bh(&pd_chan->lock); | 523 | spin_unlock_bh(&pd_chan->lock); |
524 | 524 | ||
525 | list_for_each_entry_safe(desc, _d, &tmp_list, desc_node) | 525 | list_for_each_entry_safe(desc, _d, &tmp_list, desc_node) |
526 | pci_pool_free(pd->pool, desc, desc->txd.phys); | 526 | pci_pool_free(pd->pool, desc, desc->txd.phys); |
527 | 527 | ||
528 | pdc_enable_irq(chan, 0); | 528 | pdc_enable_irq(chan, 0); |
529 | } | 529 | } |
530 | 530 | ||
531 | static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | 531 | static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie, |
532 | struct dma_tx_state *txstate) | 532 | struct dma_tx_state *txstate) |
533 | { | 533 | { |
534 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); | 534 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); |
535 | dma_cookie_t last_used; | 535 | dma_cookie_t last_used; |
536 | dma_cookie_t last_completed; | 536 | dma_cookie_t last_completed; |
537 | int ret; | 537 | int ret; |
538 | 538 | ||
539 | spin_lock_bh(&pd_chan->lock); | 539 | spin_lock_bh(&pd_chan->lock); |
540 | last_completed = pd_chan->completed_cookie; | 540 | last_completed = pd_chan->completed_cookie; |
541 | last_used = chan->cookie; | 541 | last_used = chan->cookie; |
542 | spin_unlock_bh(&pd_chan->lock); | 542 | spin_unlock_bh(&pd_chan->lock); |
543 | 543 | ||
544 | ret = dma_async_is_complete(cookie, last_completed, last_used); | 544 | ret = dma_async_is_complete(cookie, last_completed, last_used); |
545 | 545 | ||
546 | dma_set_tx_state(txstate, last_completed, last_used, 0); | 546 | dma_set_tx_state(txstate, last_completed, last_used, 0); |
547 | 547 | ||
548 | return ret; | 548 | return ret; |
549 | } | 549 | } |
550 | 550 | ||
551 | static void pd_issue_pending(struct dma_chan *chan) | 551 | static void pd_issue_pending(struct dma_chan *chan) |
552 | { | 552 | { |
553 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); | 553 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); |
554 | 554 | ||
555 | if (pdc_is_idle(pd_chan)) { | 555 | if (pdc_is_idle(pd_chan)) { |
556 | spin_lock(&pd_chan->lock); | 556 | spin_lock(&pd_chan->lock); |
557 | pdc_advance_work(pd_chan); | 557 | pdc_advance_work(pd_chan); |
558 | spin_unlock(&pd_chan->lock); | 558 | spin_unlock(&pd_chan->lock); |
559 | } | 559 | } |
560 | } | 560 | } |
561 | 561 | ||
562 | static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan, | 562 | static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan, |
563 | struct scatterlist *sgl, unsigned int sg_len, | 563 | struct scatterlist *sgl, unsigned int sg_len, |
564 | enum dma_data_direction direction, unsigned long flags) | 564 | enum dma_data_direction direction, unsigned long flags) |
565 | { | 565 | { |
566 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); | 566 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); |
567 | struct pch_dma_slave *pd_slave = chan->private; | 567 | struct pch_dma_slave *pd_slave = chan->private; |
568 | struct pch_dma_desc *first = NULL; | 568 | struct pch_dma_desc *first = NULL; |
569 | struct pch_dma_desc *prev = NULL; | 569 | struct pch_dma_desc *prev = NULL; |
570 | struct pch_dma_desc *desc = NULL; | 570 | struct pch_dma_desc *desc = NULL; |
571 | struct scatterlist *sg; | 571 | struct scatterlist *sg; |
572 | dma_addr_t reg; | 572 | dma_addr_t reg; |
573 | int i; | 573 | int i; |
574 | 574 | ||
575 | if (unlikely(!sg_len)) { | 575 | if (unlikely(!sg_len)) { |
576 | dev_info(chan2dev(chan), "prep_slave_sg: length is zero!\n"); | 576 | dev_info(chan2dev(chan), "prep_slave_sg: length is zero!\n"); |
577 | return NULL; | 577 | return NULL; |
578 | } | 578 | } |
579 | 579 | ||
580 | if (direction == DMA_FROM_DEVICE) | 580 | if (direction == DMA_FROM_DEVICE) |
581 | reg = pd_slave->rx_reg; | 581 | reg = pd_slave->rx_reg; |
582 | else if (direction == DMA_TO_DEVICE) | 582 | else if (direction == DMA_TO_DEVICE) |
583 | reg = pd_slave->tx_reg; | 583 | reg = pd_slave->tx_reg; |
584 | else | 584 | else |
585 | return NULL; | 585 | return NULL; |
586 | 586 | ||
587 | pd_chan->dir = direction; | 587 | pd_chan->dir = direction; |
588 | pdc_set_dir(chan); | 588 | pdc_set_dir(chan); |
589 | 589 | ||
590 | for_each_sg(sgl, sg, sg_len, i) { | 590 | for_each_sg(sgl, sg, sg_len, i) { |
591 | desc = pdc_desc_get(pd_chan); | 591 | desc = pdc_desc_get(pd_chan); |
592 | 592 | ||
593 | if (!desc) | 593 | if (!desc) |
594 | goto err_desc_get; | 594 | goto err_desc_get; |
595 | 595 | ||
596 | desc->regs.dev_addr = reg; | 596 | desc->regs.dev_addr = reg; |
597 | desc->regs.mem_addr = sg_phys(sg); | 597 | desc->regs.mem_addr = sg_phys(sg); |
598 | desc->regs.size = sg_dma_len(sg); | 598 | desc->regs.size = sg_dma_len(sg); |
599 | desc->regs.next = DMA_DESC_FOLLOW_WITHOUT_IRQ; | 599 | desc->regs.next = DMA_DESC_FOLLOW_WITHOUT_IRQ; |
600 | 600 | ||
601 | switch (pd_slave->width) { | 601 | switch (pd_slave->width) { |
602 | case PCH_DMA_WIDTH_1_BYTE: | 602 | case PCH_DMA_WIDTH_1_BYTE: |
603 | if (desc->regs.size > DMA_DESC_MAX_COUNT_1_BYTE) | 603 | if (desc->regs.size > DMA_DESC_MAX_COUNT_1_BYTE) |
604 | goto err_desc_get; | 604 | goto err_desc_get; |
605 | desc->regs.size |= DMA_DESC_WIDTH_1_BYTE; | 605 | desc->regs.size |= DMA_DESC_WIDTH_1_BYTE; |
606 | break; | 606 | break; |
607 | case PCH_DMA_WIDTH_2_BYTES: | 607 | case PCH_DMA_WIDTH_2_BYTES: |
608 | if (desc->regs.size > DMA_DESC_MAX_COUNT_2_BYTES) | 608 | if (desc->regs.size > DMA_DESC_MAX_COUNT_2_BYTES) |
609 | goto err_desc_get; | 609 | goto err_desc_get; |
610 | desc->regs.size |= DMA_DESC_WIDTH_2_BYTES; | 610 | desc->regs.size |= DMA_DESC_WIDTH_2_BYTES; |
611 | break; | 611 | break; |
612 | case PCH_DMA_WIDTH_4_BYTES: | 612 | case PCH_DMA_WIDTH_4_BYTES: |
613 | if (desc->regs.size > DMA_DESC_MAX_COUNT_4_BYTES) | 613 | if (desc->regs.size > DMA_DESC_MAX_COUNT_4_BYTES) |
614 | goto err_desc_get; | 614 | goto err_desc_get; |
615 | desc->regs.size |= DMA_DESC_WIDTH_4_BYTES; | 615 | desc->regs.size |= DMA_DESC_WIDTH_4_BYTES; |
616 | break; | 616 | break; |
617 | default: | 617 | default: |
618 | goto err_desc_get; | 618 | goto err_desc_get; |
619 | } | 619 | } |
620 | 620 | ||
621 | if (!first) { | 621 | if (!first) { |
622 | first = desc; | 622 | first = desc; |
623 | } else { | 623 | } else { |
624 | prev->regs.next |= desc->txd.phys; | 624 | prev->regs.next |= desc->txd.phys; |
625 | list_add_tail(&desc->desc_node, &first->tx_list); | 625 | list_add_tail(&desc->desc_node, &first->tx_list); |
626 | } | 626 | } |
627 | 627 | ||
628 | prev = desc; | 628 | prev = desc; |
629 | } | 629 | } |
630 | 630 | ||
631 | if (flags & DMA_PREP_INTERRUPT) | 631 | if (flags & DMA_PREP_INTERRUPT) |
632 | desc->regs.next = DMA_DESC_END_WITH_IRQ; | 632 | desc->regs.next = DMA_DESC_END_WITH_IRQ; |
633 | else | 633 | else |
634 | desc->regs.next = DMA_DESC_END_WITHOUT_IRQ; | 634 | desc->regs.next = DMA_DESC_END_WITHOUT_IRQ; |
635 | 635 | ||
636 | first->txd.cookie = -EBUSY; | 636 | first->txd.cookie = -EBUSY; |
637 | desc->txd.flags = flags; | 637 | desc->txd.flags = flags; |
638 | 638 | ||
639 | return &first->txd; | 639 | return &first->txd; |
640 | 640 | ||
641 | err_desc_get: | 641 | err_desc_get: |
642 | dev_err(chan2dev(chan), "failed to get desc or wrong parameters\n"); | 642 | dev_err(chan2dev(chan), "failed to get desc or wrong parameters\n"); |
643 | pdc_desc_put(pd_chan, first); | 643 | pdc_desc_put(pd_chan, first); |
644 | return NULL; | 644 | return NULL; |
645 | } | 645 | } |
646 | 646 | ||
647 | static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | 647 | static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
648 | unsigned long arg) | 648 | unsigned long arg) |
649 | { | 649 | { |
650 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); | 650 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); |
651 | struct pch_dma_desc *desc, *_d; | 651 | struct pch_dma_desc *desc, *_d; |
652 | LIST_HEAD(list); | 652 | LIST_HEAD(list); |
653 | 653 | ||
654 | if (cmd != DMA_TERMINATE_ALL) | 654 | if (cmd != DMA_TERMINATE_ALL) |
655 | return -ENXIO; | 655 | return -ENXIO; |
656 | 656 | ||
657 | spin_lock_bh(&pd_chan->lock); | 657 | spin_lock_bh(&pd_chan->lock); |
658 | 658 | ||
659 | pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE); | 659 | pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE); |
660 | 660 | ||
661 | list_splice_init(&pd_chan->active_list, &list); | 661 | list_splice_init(&pd_chan->active_list, &list); |
662 | list_splice_init(&pd_chan->queue, &list); | 662 | list_splice_init(&pd_chan->queue, &list); |
663 | 663 | ||
664 | list_for_each_entry_safe(desc, _d, &list, desc_node) | 664 | list_for_each_entry_safe(desc, _d, &list, desc_node) |
665 | pdc_chain_complete(pd_chan, desc); | 665 | pdc_chain_complete(pd_chan, desc); |
666 | 666 | ||
667 | spin_unlock_bh(&pd_chan->lock); | 667 | spin_unlock_bh(&pd_chan->lock); |
668 | 668 | ||
669 | return 0; | 669 | return 0; |
670 | } | 670 | } |
671 | 671 | ||
672 | static void pdc_tasklet(unsigned long data) | 672 | static void pdc_tasklet(unsigned long data) |
673 | { | 673 | { |
674 | struct pch_dma_chan *pd_chan = (struct pch_dma_chan *)data; | 674 | struct pch_dma_chan *pd_chan = (struct pch_dma_chan *)data; |
675 | unsigned long flags; | 675 | unsigned long flags; |
676 | 676 | ||
677 | if (!pdc_is_idle(pd_chan)) { | 677 | if (!pdc_is_idle(pd_chan)) { |
678 | dev_err(chan2dev(&pd_chan->chan), | 678 | dev_err(chan2dev(&pd_chan->chan), |
679 | "BUG: handle non-idle channel in tasklet\n"); | 679 | "BUG: handle non-idle channel in tasklet\n"); |
680 | return; | 680 | return; |
681 | } | 681 | } |
682 | 682 | ||
683 | spin_lock_irqsave(&pd_chan->lock, flags); | 683 | spin_lock_irqsave(&pd_chan->lock, flags); |
684 | if (test_and_clear_bit(0, &pd_chan->err_status)) | 684 | if (test_and_clear_bit(0, &pd_chan->err_status)) |
685 | pdc_handle_error(pd_chan); | 685 | pdc_handle_error(pd_chan); |
686 | else | 686 | else |
687 | pdc_advance_work(pd_chan); | 687 | pdc_advance_work(pd_chan); |
688 | spin_unlock_irqrestore(&pd_chan->lock, flags); | 688 | spin_unlock_irqrestore(&pd_chan->lock, flags); |
689 | } | 689 | } |
690 | 690 | ||
691 | static irqreturn_t pd_irq(int irq, void *devid) | 691 | static irqreturn_t pd_irq(int irq, void *devid) |
692 | { | 692 | { |
693 | struct pch_dma *pd = (struct pch_dma *)devid; | 693 | struct pch_dma *pd = (struct pch_dma *)devid; |
694 | struct pch_dma_chan *pd_chan; | 694 | struct pch_dma_chan *pd_chan; |
695 | u32 sts0; | 695 | u32 sts0; |
696 | int i; | 696 | int i; |
697 | int ret = IRQ_NONE; | 697 | int ret = IRQ_NONE; |
698 | 698 | ||
699 | sts0 = dma_readl(pd, STS0); | 699 | sts0 = dma_readl(pd, STS0); |
700 | 700 | ||
701 | dev_dbg(pd->dma.dev, "pd_irq sts0: %x\n", sts0); | 701 | dev_dbg(pd->dma.dev, "pd_irq sts0: %x\n", sts0); |
702 | 702 | ||
703 | for (i = 0; i < pd->dma.chancnt; i++) { | 703 | for (i = 0; i < pd->dma.chancnt; i++) { |
704 | pd_chan = &pd->channels[i]; | 704 | pd_chan = &pd->channels[i]; |
705 | 705 | ||
706 | if (sts0 & DMA_STATUS_IRQ(i)) { | 706 | if (sts0 & DMA_STATUS_IRQ(i)) { |
707 | if (sts0 & DMA_STATUS_ERR(i)) | 707 | if (sts0 & DMA_STATUS_ERR(i)) |
708 | set_bit(0, &pd_chan->err_status); | 708 | set_bit(0, &pd_chan->err_status); |
709 | 709 | ||
710 | tasklet_schedule(&pd_chan->tasklet); | 710 | tasklet_schedule(&pd_chan->tasklet); |
711 | ret = IRQ_HANDLED; | 711 | ret = IRQ_HANDLED; |
712 | } | 712 | } |
713 | 713 | ||
714 | } | 714 | } |
715 | 715 | ||
716 | /* clear interrupt bits in status register */ | 716 | /* clear interrupt bits in status register */ |
717 | dma_writel(pd, STS0, sts0); | 717 | dma_writel(pd, STS0, sts0); |
718 | 718 | ||
719 | return ret; | 719 | return ret; |
720 | } | 720 | } |
721 | 721 | ||
722 | #ifdef CONFIG_PM | 722 | #ifdef CONFIG_PM |
723 | static void pch_dma_save_regs(struct pch_dma *pd) | 723 | static void pch_dma_save_regs(struct pch_dma *pd) |
724 | { | 724 | { |
725 | struct pch_dma_chan *pd_chan; | 725 | struct pch_dma_chan *pd_chan; |
726 | struct dma_chan *chan, *_c; | 726 | struct dma_chan *chan, *_c; |
727 | int i = 0; | 727 | int i = 0; |
728 | 728 | ||
729 | pd->regs.dma_ctl0 = dma_readl(pd, CTL0); | 729 | pd->regs.dma_ctl0 = dma_readl(pd, CTL0); |
730 | pd->regs.dma_ctl1 = dma_readl(pd, CTL1); | 730 | pd->regs.dma_ctl1 = dma_readl(pd, CTL1); |
731 | pd->regs.dma_ctl2 = dma_readl(pd, CTL2); | 731 | pd->regs.dma_ctl2 = dma_readl(pd, CTL2); |
732 | pd->regs.dma_ctl3 = dma_readl(pd, CTL3); | 732 | pd->regs.dma_ctl3 = dma_readl(pd, CTL3); |
733 | 733 | ||
734 | list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) { | 734 | list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) { |
735 | pd_chan = to_pd_chan(chan); | 735 | pd_chan = to_pd_chan(chan); |
736 | 736 | ||
737 | pd->ch_regs[i].dev_addr = channel_readl(pd_chan, DEV_ADDR); | 737 | pd->ch_regs[i].dev_addr = channel_readl(pd_chan, DEV_ADDR); |
738 | pd->ch_regs[i].mem_addr = channel_readl(pd_chan, MEM_ADDR); | 738 | pd->ch_regs[i].mem_addr = channel_readl(pd_chan, MEM_ADDR); |
739 | pd->ch_regs[i].size = channel_readl(pd_chan, SIZE); | 739 | pd->ch_regs[i].size = channel_readl(pd_chan, SIZE); |
740 | pd->ch_regs[i].next = channel_readl(pd_chan, NEXT); | 740 | pd->ch_regs[i].next = channel_readl(pd_chan, NEXT); |
741 | 741 | ||
742 | i++; | 742 | i++; |
743 | } | 743 | } |
744 | } | 744 | } |
745 | 745 | ||
746 | static void pch_dma_restore_regs(struct pch_dma *pd) | 746 | static void pch_dma_restore_regs(struct pch_dma *pd) |
747 | { | 747 | { |
748 | struct pch_dma_chan *pd_chan; | 748 | struct pch_dma_chan *pd_chan; |
749 | struct dma_chan *chan, *_c; | 749 | struct dma_chan *chan, *_c; |
750 | int i = 0; | 750 | int i = 0; |
751 | 751 | ||
752 | dma_writel(pd, CTL0, pd->regs.dma_ctl0); | 752 | dma_writel(pd, CTL0, pd->regs.dma_ctl0); |
753 | dma_writel(pd, CTL1, pd->regs.dma_ctl1); | 753 | dma_writel(pd, CTL1, pd->regs.dma_ctl1); |
754 | dma_writel(pd, CTL2, pd->regs.dma_ctl2); | 754 | dma_writel(pd, CTL2, pd->regs.dma_ctl2); |
755 | dma_writel(pd, CTL3, pd->regs.dma_ctl3); | 755 | dma_writel(pd, CTL3, pd->regs.dma_ctl3); |
756 | 756 | ||
757 | list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) { | 757 | list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) { |
758 | pd_chan = to_pd_chan(chan); | 758 | pd_chan = to_pd_chan(chan); |
759 | 759 | ||
760 | channel_writel(pd_chan, DEV_ADDR, pd->ch_regs[i].dev_addr); | 760 | channel_writel(pd_chan, DEV_ADDR, pd->ch_regs[i].dev_addr); |
761 | channel_writel(pd_chan, MEM_ADDR, pd->ch_regs[i].mem_addr); | 761 | channel_writel(pd_chan, MEM_ADDR, pd->ch_regs[i].mem_addr); |
762 | channel_writel(pd_chan, SIZE, pd->ch_regs[i].size); | 762 | channel_writel(pd_chan, SIZE, pd->ch_regs[i].size); |
763 | channel_writel(pd_chan, NEXT, pd->ch_regs[i].next); | 763 | channel_writel(pd_chan, NEXT, pd->ch_regs[i].next); |
764 | 764 | ||
765 | i++; | 765 | i++; |
766 | } | 766 | } |
767 | } | 767 | } |
768 | 768 | ||
769 | static int pch_dma_suspend(struct pci_dev *pdev, pm_message_t state) | 769 | static int pch_dma_suspend(struct pci_dev *pdev, pm_message_t state) |
770 | { | 770 | { |
771 | struct pch_dma *pd = pci_get_drvdata(pdev); | 771 | struct pch_dma *pd = pci_get_drvdata(pdev); |
772 | 772 | ||
773 | if (pd) | 773 | if (pd) |
774 | pch_dma_save_regs(pd); | 774 | pch_dma_save_regs(pd); |
775 | 775 | ||
776 | pci_save_state(pdev); | 776 | pci_save_state(pdev); |
777 | pci_disable_device(pdev); | 777 | pci_disable_device(pdev); |
778 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); | 778 | pci_set_power_state(pdev, pci_choose_state(pdev, state)); |
779 | 779 | ||
780 | return 0; | 780 | return 0; |
781 | } | 781 | } |
782 | 782 | ||
783 | static int pch_dma_resume(struct pci_dev *pdev) | 783 | static int pch_dma_resume(struct pci_dev *pdev) |
784 | { | 784 | { |
785 | struct pch_dma *pd = pci_get_drvdata(pdev); | 785 | struct pch_dma *pd = pci_get_drvdata(pdev); |
786 | int err; | 786 | int err; |
787 | 787 | ||
788 | pci_set_power_state(pdev, PCI_D0); | 788 | pci_set_power_state(pdev, PCI_D0); |
789 | pci_restore_state(pdev); | 789 | pci_restore_state(pdev); |
790 | 790 | ||
791 | err = pci_enable_device(pdev); | 791 | err = pci_enable_device(pdev); |
792 | if (err) { | 792 | if (err) { |
793 | dev_dbg(&pdev->dev, "failed to enable device\n"); | 793 | dev_dbg(&pdev->dev, "failed to enable device\n"); |
794 | return err; | 794 | return err; |
795 | } | 795 | } |
796 | 796 | ||
797 | if (pd) | 797 | if (pd) |
798 | pch_dma_restore_regs(pd); | 798 | pch_dma_restore_regs(pd); |
799 | 799 | ||
800 | return 0; | 800 | return 0; |
801 | } | 801 | } |
802 | #endif | 802 | #endif |
803 | 803 | ||
804 | static int __devinit pch_dma_probe(struct pci_dev *pdev, | 804 | static int __devinit pch_dma_probe(struct pci_dev *pdev, |
805 | const struct pci_device_id *id) | 805 | const struct pci_device_id *id) |
806 | { | 806 | { |
807 | struct pch_dma *pd; | 807 | struct pch_dma *pd; |
808 | struct pch_dma_regs *regs; | 808 | struct pch_dma_regs *regs; |
809 | unsigned int nr_channels; | 809 | unsigned int nr_channels; |
810 | int err; | 810 | int err; |
811 | int i; | 811 | int i; |
812 | 812 | ||
813 | nr_channels = id->driver_data; | 813 | nr_channels = id->driver_data; |
814 | pd = kzalloc(sizeof(struct pch_dma)+ | 814 | pd = kzalloc(sizeof(struct pch_dma)+ |
815 | sizeof(struct pch_dma_chan) * nr_channels, GFP_KERNEL); | 815 | sizeof(struct pch_dma_chan) * nr_channels, GFP_KERNEL); |
816 | if (!pd) | 816 | if (!pd) |
817 | return -ENOMEM; | 817 | return -ENOMEM; |
818 | 818 | ||
819 | pci_set_drvdata(pdev, pd); | 819 | pci_set_drvdata(pdev, pd); |
820 | 820 | ||
821 | err = pci_enable_device(pdev); | 821 | err = pci_enable_device(pdev); |
822 | if (err) { | 822 | if (err) { |
823 | dev_err(&pdev->dev, "Cannot enable PCI device\n"); | 823 | dev_err(&pdev->dev, "Cannot enable PCI device\n"); |
824 | goto err_free_mem; | 824 | goto err_free_mem; |
825 | } | 825 | } |
826 | 826 | ||
827 | if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { | 827 | if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { |
828 | dev_err(&pdev->dev, "Cannot find proper base address\n"); | 828 | dev_err(&pdev->dev, "Cannot find proper base address\n"); |
829 | goto err_disable_pdev; | 829 | goto err_disable_pdev; |
830 | } | 830 | } |
831 | 831 | ||
832 | err = pci_request_regions(pdev, DRV_NAME); | 832 | err = pci_request_regions(pdev, DRV_NAME); |
833 | if (err) { | 833 | if (err) { |
834 | dev_err(&pdev->dev, "Cannot obtain PCI resources\n"); | 834 | dev_err(&pdev->dev, "Cannot obtain PCI resources\n"); |
835 | goto err_disable_pdev; | 835 | goto err_disable_pdev; |
836 | } | 836 | } |
837 | 837 | ||
838 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | 838 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
839 | if (err) { | 839 | if (err) { |
840 | dev_err(&pdev->dev, "Cannot set proper DMA config\n"); | 840 | dev_err(&pdev->dev, "Cannot set proper DMA config\n"); |
841 | goto err_free_res; | 841 | goto err_free_res; |
842 | } | 842 | } |
843 | 843 | ||
844 | regs = pd->membase = pci_iomap(pdev, 1, 0); | 844 | regs = pd->membase = pci_iomap(pdev, 1, 0); |
845 | if (!pd->membase) { | 845 | if (!pd->membase) { |
846 | dev_err(&pdev->dev, "Cannot map MMIO registers\n"); | 846 | dev_err(&pdev->dev, "Cannot map MMIO registers\n"); |
847 | err = -ENOMEM; | 847 | err = -ENOMEM; |
848 | goto err_free_res; | 848 | goto err_free_res; |
849 | } | 849 | } |
850 | 850 | ||
851 | pci_set_master(pdev); | 851 | pci_set_master(pdev); |
852 | 852 | ||
853 | err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd); | 853 | err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd); |
854 | if (err) { | 854 | if (err) { |
855 | dev_err(&pdev->dev, "Failed to request IRQ\n"); | 855 | dev_err(&pdev->dev, "Failed to request IRQ\n"); |
856 | goto err_iounmap; | 856 | goto err_iounmap; |
857 | } | 857 | } |
858 | 858 | ||
859 | pd->pool = pci_pool_create("pch_dma_desc_pool", pdev, | 859 | pd->pool = pci_pool_create("pch_dma_desc_pool", pdev, |
860 | sizeof(struct pch_dma_desc), 4, 0); | 860 | sizeof(struct pch_dma_desc), 4, 0); |
861 | if (!pd->pool) { | 861 | if (!pd->pool) { |
862 | dev_err(&pdev->dev, "Failed to alloc DMA descriptors\n"); | 862 | dev_err(&pdev->dev, "Failed to alloc DMA descriptors\n"); |
863 | err = -ENOMEM; | 863 | err = -ENOMEM; |
864 | goto err_free_irq; | 864 | goto err_free_irq; |
865 | } | 865 | } |
866 | 866 | ||
867 | pd->dma.dev = &pdev->dev; | 867 | pd->dma.dev = &pdev->dev; |
868 | pd->dma.chancnt = nr_channels; | 868 | pd->dma.chancnt = nr_channels; |
869 | 869 | ||
870 | INIT_LIST_HEAD(&pd->dma.channels); | 870 | INIT_LIST_HEAD(&pd->dma.channels); |
871 | 871 | ||
872 | for (i = 0; i < nr_channels; i++) { | 872 | for (i = 0; i < nr_channels; i++) { |
873 | struct pch_dma_chan *pd_chan = &pd->channels[i]; | 873 | struct pch_dma_chan *pd_chan = &pd->channels[i]; |
874 | 874 | ||
875 | pd_chan->chan.device = &pd->dma; | 875 | pd_chan->chan.device = &pd->dma; |
876 | pd_chan->chan.cookie = 1; | 876 | pd_chan->chan.cookie = 1; |
877 | pd_chan->chan.chan_id = i; | 877 | pd_chan->chan.chan_id = i; |
878 | 878 | ||
879 | pd_chan->membase = ®s->desc[i]; | 879 | pd_chan->membase = ®s->desc[i]; |
880 | 880 | ||
881 | spin_lock_init(&pd_chan->lock); | 881 | spin_lock_init(&pd_chan->lock); |
882 | 882 | ||
883 | INIT_LIST_HEAD(&pd_chan->active_list); | 883 | INIT_LIST_HEAD(&pd_chan->active_list); |
884 | INIT_LIST_HEAD(&pd_chan->queue); | 884 | INIT_LIST_HEAD(&pd_chan->queue); |
885 | INIT_LIST_HEAD(&pd_chan->free_list); | 885 | INIT_LIST_HEAD(&pd_chan->free_list); |
886 | 886 | ||
887 | tasklet_init(&pd_chan->tasklet, pdc_tasklet, | 887 | tasklet_init(&pd_chan->tasklet, pdc_tasklet, |
888 | (unsigned long)pd_chan); | 888 | (unsigned long)pd_chan); |
889 | list_add_tail(&pd_chan->chan.device_node, &pd->dma.channels); | 889 | list_add_tail(&pd_chan->chan.device_node, &pd->dma.channels); |
890 | } | 890 | } |
891 | 891 | ||
892 | dma_cap_zero(pd->dma.cap_mask); | 892 | dma_cap_zero(pd->dma.cap_mask); |
893 | dma_cap_set(DMA_PRIVATE, pd->dma.cap_mask); | 893 | dma_cap_set(DMA_PRIVATE, pd->dma.cap_mask); |
894 | dma_cap_set(DMA_SLAVE, pd->dma.cap_mask); | 894 | dma_cap_set(DMA_SLAVE, pd->dma.cap_mask); |
895 | 895 | ||
896 | pd->dma.device_alloc_chan_resources = pd_alloc_chan_resources; | 896 | pd->dma.device_alloc_chan_resources = pd_alloc_chan_resources; |
897 | pd->dma.device_free_chan_resources = pd_free_chan_resources; | 897 | pd->dma.device_free_chan_resources = pd_free_chan_resources; |
898 | pd->dma.device_tx_status = pd_tx_status; | 898 | pd->dma.device_tx_status = pd_tx_status; |
899 | pd->dma.device_issue_pending = pd_issue_pending; | 899 | pd->dma.device_issue_pending = pd_issue_pending; |
900 | pd->dma.device_prep_slave_sg = pd_prep_slave_sg; | 900 | pd->dma.device_prep_slave_sg = pd_prep_slave_sg; |
901 | pd->dma.device_control = pd_device_control; | 901 | pd->dma.device_control = pd_device_control; |
902 | 902 | ||
903 | err = dma_async_device_register(&pd->dma); | 903 | err = dma_async_device_register(&pd->dma); |
904 | if (err) { | 904 | if (err) { |
905 | dev_err(&pdev->dev, "Failed to register DMA device\n"); | 905 | dev_err(&pdev->dev, "Failed to register DMA device\n"); |
906 | goto err_free_pool; | 906 | goto err_free_pool; |
907 | } | 907 | } |
908 | 908 | ||
909 | return 0; | 909 | return 0; |
910 | 910 | ||
911 | err_free_pool: | 911 | err_free_pool: |
912 | pci_pool_destroy(pd->pool); | 912 | pci_pool_destroy(pd->pool); |
913 | err_free_irq: | 913 | err_free_irq: |
914 | free_irq(pdev->irq, pd); | 914 | free_irq(pdev->irq, pd); |
915 | err_iounmap: | 915 | err_iounmap: |
916 | pci_iounmap(pdev, pd->membase); | 916 | pci_iounmap(pdev, pd->membase); |
917 | err_free_res: | 917 | err_free_res: |
918 | pci_release_regions(pdev); | 918 | pci_release_regions(pdev); |
919 | err_disable_pdev: | 919 | err_disable_pdev: |
920 | pci_disable_device(pdev); | 920 | pci_disable_device(pdev); |
921 | err_free_mem: | 921 | err_free_mem: |
922 | return err; | 922 | return err; |
923 | } | 923 | } |
924 | 924 | ||
925 | static void __devexit pch_dma_remove(struct pci_dev *pdev) | 925 | static void __devexit pch_dma_remove(struct pci_dev *pdev) |
926 | { | 926 | { |
927 | struct pch_dma *pd = pci_get_drvdata(pdev); | 927 | struct pch_dma *pd = pci_get_drvdata(pdev); |
928 | struct pch_dma_chan *pd_chan; | 928 | struct pch_dma_chan *pd_chan; |
929 | struct dma_chan *chan, *_c; | 929 | struct dma_chan *chan, *_c; |
930 | 930 | ||
931 | if (pd) { | 931 | if (pd) { |
932 | dma_async_device_unregister(&pd->dma); | 932 | dma_async_device_unregister(&pd->dma); |
933 | 933 | ||
934 | list_for_each_entry_safe(chan, _c, &pd->dma.channels, | 934 | list_for_each_entry_safe(chan, _c, &pd->dma.channels, |
935 | device_node) { | 935 | device_node) { |
936 | pd_chan = to_pd_chan(chan); | 936 | pd_chan = to_pd_chan(chan); |
937 | 937 | ||
938 | tasklet_disable(&pd_chan->tasklet); | 938 | tasklet_disable(&pd_chan->tasklet); |
939 | tasklet_kill(&pd_chan->tasklet); | 939 | tasklet_kill(&pd_chan->tasklet); |
940 | } | 940 | } |
941 | 941 | ||
942 | pci_pool_destroy(pd->pool); | 942 | pci_pool_destroy(pd->pool); |
943 | free_irq(pdev->irq, pd); | 943 | free_irq(pdev->irq, pd); |
944 | pci_iounmap(pdev, pd->membase); | 944 | pci_iounmap(pdev, pd->membase); |
945 | pci_release_regions(pdev); | 945 | pci_release_regions(pdev); |
946 | pci_disable_device(pdev); | 946 | pci_disable_device(pdev); |
947 | kfree(pd); | 947 | kfree(pd); |
948 | } | 948 | } |
949 | } | 949 | } |
950 | 950 | ||
951 | /* PCI Device ID of DMA device */ | 951 | /* PCI Device ID of DMA device */ |
952 | #define PCI_VENDOR_ID_ROHM 0x10DB | 952 | #define PCI_VENDOR_ID_ROHM 0x10DB |
953 | #define PCI_DEVICE_ID_EG20T_PCH_DMA_8CH 0x8810 | 953 | #define PCI_DEVICE_ID_EG20T_PCH_DMA_8CH 0x8810 |
954 | #define PCI_DEVICE_ID_EG20T_PCH_DMA_4CH 0x8815 | 954 | #define PCI_DEVICE_ID_EG20T_PCH_DMA_4CH 0x8815 |
955 | #define PCI_DEVICE_ID_ML7213_DMA1_8CH 0x8026 | 955 | #define PCI_DEVICE_ID_ML7213_DMA1_8CH 0x8026 |
956 | #define PCI_DEVICE_ID_ML7213_DMA2_8CH 0x802B | 956 | #define PCI_DEVICE_ID_ML7213_DMA2_8CH 0x802B |
957 | #define PCI_DEVICE_ID_ML7213_DMA3_4CH 0x8034 | 957 | #define PCI_DEVICE_ID_ML7213_DMA3_4CH 0x8034 |
958 | #define PCI_DEVICE_ID_ML7213_DMA4_12CH 0x8032 | 958 | #define PCI_DEVICE_ID_ML7213_DMA4_12CH 0x8032 |
959 | #define PCI_DEVICE_ID_ML7223_DMA1_4CH 0x800B | 959 | #define PCI_DEVICE_ID_ML7223_DMA1_4CH 0x800B |
960 | #define PCI_DEVICE_ID_ML7223_DMA2_4CH 0x800E | 960 | #define PCI_DEVICE_ID_ML7223_DMA2_4CH 0x800E |
961 | #define PCI_DEVICE_ID_ML7223_DMA3_4CH 0x8017 | 961 | #define PCI_DEVICE_ID_ML7223_DMA3_4CH 0x8017 |
962 | #define PCI_DEVICE_ID_ML7223_DMA4_4CH 0x803B | 962 | #define PCI_DEVICE_ID_ML7223_DMA4_4CH 0x803B |
963 | 963 | ||
964 | static const struct pci_device_id pch_dma_id_table[] = { | 964 | DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = { |
965 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 }, | 965 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 }, |
966 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 }, | 966 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 }, |
967 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */ | 967 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */ |
968 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA2_8CH), 8}, /* PCMIF SPI */ | 968 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA2_8CH), 8}, /* PCMIF SPI */ |
969 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA3_4CH), 4}, /* FPGA */ | 969 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA3_4CH), 4}, /* FPGA */ |
970 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA4_12CH), 12}, /* I2S */ | 970 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA4_12CH), 12}, /* I2S */ |
971 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA1_4CH), 4}, /* UART */ | 971 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA1_4CH), 4}, /* UART */ |
972 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */ | 972 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */ |
973 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */ | 973 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */ |
974 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */ | 974 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */ |
975 | { 0, }, | 975 | { 0, }, |
976 | }; | 976 | }; |
977 | 977 | ||
978 | static struct pci_driver pch_dma_driver = { | 978 | static struct pci_driver pch_dma_driver = { |
979 | .name = DRV_NAME, | 979 | .name = DRV_NAME, |
980 | .id_table = pch_dma_id_table, | 980 | .id_table = pch_dma_id_table, |
981 | .probe = pch_dma_probe, | 981 | .probe = pch_dma_probe, |
982 | .remove = __devexit_p(pch_dma_remove), | 982 | .remove = __devexit_p(pch_dma_remove), |
983 | #ifdef CONFIG_PM | 983 | #ifdef CONFIG_PM |
984 | .suspend = pch_dma_suspend, | 984 | .suspend = pch_dma_suspend, |
985 | .resume = pch_dma_resume, | 985 | .resume = pch_dma_resume, |
986 | #endif | 986 | #endif |
987 | }; | 987 | }; |
988 | 988 | ||
989 | static int __init pch_dma_init(void) | 989 | static int __init pch_dma_init(void) |
990 | { | 990 | { |
991 | return pci_register_driver(&pch_dma_driver); | 991 | return pci_register_driver(&pch_dma_driver); |
992 | } | 992 | } |
993 | 993 | ||
994 | static void __exit pch_dma_exit(void) | 994 | static void __exit pch_dma_exit(void) |
995 | { | 995 | { |
996 | pci_unregister_driver(&pch_dma_driver); | 996 | pci_unregister_driver(&pch_dma_driver); |
997 | } | 997 | } |
998 | 998 | ||
999 | module_init(pch_dma_init); | 999 | module_init(pch_dma_init); |
1000 | module_exit(pch_dma_exit); | 1000 | module_exit(pch_dma_exit); |
1001 | 1001 | ||
1002 | MODULE_DESCRIPTION("Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH " | 1002 | MODULE_DESCRIPTION("Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH " |
1003 | "DMA controller driver"); | 1003 | "DMA controller driver"); |
1004 | MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>"); | 1004 | MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>"); |
1005 | MODULE_LICENSE("GPL v2"); | 1005 | MODULE_LICENSE("GPL v2"); |
1006 | 1006 |