Commit 2001f67e47ee3b0d822e2c738da5c4c33a31a7ea
Committed by
Kishon Vijay Abraham I
1 parent
0bde8afc3a
Exists in
ti-linux-3.12.y
and in
3 other branches
Revert "usb: musb: musb_cppi41: Revert the Advisory 1.0.13 workaround"
Reverting the Advisory 1.0.13 leads to functional issues when multiple Mass Storage devices are connected. Especially when more than 3 Mass Storage disks are connected it fails to mount the disks reliably. This reverts commit c424ef3e2beb89488e7e597446b4c6bc8f1852c5. Signed-off-by: George Cherian <george.cherian@ti.com> Reported-by: Roger Quadros <rogerq@ti.com>
Showing 1 changed file with 79 additions and 17 deletions Inline Diff
drivers/usb/musb/musb_cppi41.c
1 | #include <linux/device.h> | 1 | #include <linux/device.h> |
2 | #include <linux/dma-mapping.h> | 2 | #include <linux/dma-mapping.h> |
3 | #include <linux/dmaengine.h> | 3 | #include <linux/dmaengine.h> |
4 | #include <linux/sizes.h> | 4 | #include <linux/sizes.h> |
5 | #include <linux/platform_device.h> | 5 | #include <linux/platform_device.h> |
6 | #include <linux/of.h> | 6 | #include <linux/of.h> |
7 | 7 | ||
8 | #include "musb_core.h" | 8 | #include "musb_core.h" |
9 | 9 | ||
10 | #define RNDIS_REG(x) (0x80 + ((x - 1) * 4)) | 10 | #define RNDIS_REG(x) (0x80 + ((x - 1) * 4)) |
11 | 11 | ||
12 | #define EP_MODE_AUTOREG_NONE 0 | 12 | #define EP_MODE_AUTOREG_NONE 0 |
13 | #define EP_MODE_AUTOREG_ALL_NEOP 1 | 13 | #define EP_MODE_AUTOREG_ALL_NEOP 1 |
14 | #define EP_MODE_AUTOREG_ALWAYS 3 | 14 | #define EP_MODE_AUTOREG_ALWAYS 3 |
15 | 15 | ||
16 | #define EP_MODE_DMA_TRANSPARENT 0 | 16 | #define EP_MODE_DMA_TRANSPARENT 0 |
17 | #define EP_MODE_DMA_RNDIS 1 | 17 | #define EP_MODE_DMA_RNDIS 1 |
18 | #define EP_MODE_DMA_GEN_RNDIS 3 | 18 | #define EP_MODE_DMA_GEN_RNDIS 3 |
19 | 19 | ||
20 | #define USB_CTRL_TX_MODE 0x70 | 20 | #define USB_CTRL_TX_MODE 0x70 |
21 | #define USB_CTRL_RX_MODE 0x74 | 21 | #define USB_CTRL_RX_MODE 0x74 |
22 | #define USB_CTRL_AUTOREQ 0xd0 | 22 | #define USB_CTRL_AUTOREQ 0xd0 |
23 | #define USB_TDOWN 0xd8 | 23 | #define USB_TDOWN 0xd8 |
24 | 24 | ||
25 | struct cppi41_dma_channel { | 25 | struct cppi41_dma_channel { |
26 | struct dma_channel channel; | 26 | struct dma_channel channel; |
27 | struct cppi41_dma_controller *controller; | 27 | struct cppi41_dma_controller *controller; |
28 | struct musb_hw_ep *hw_ep; | 28 | struct musb_hw_ep *hw_ep; |
29 | struct dma_chan *dc; | 29 | struct dma_chan *dc; |
30 | dma_cookie_t cookie; | 30 | dma_cookie_t cookie; |
31 | u8 port_num; | 31 | u8 port_num; |
32 | u8 is_tx; | 32 | u8 is_tx; |
33 | u8 is_allocated; | 33 | u8 is_allocated; |
34 | u8 usb_toggle; | ||
34 | 35 | ||
35 | dma_addr_t buf_addr; | 36 | dma_addr_t buf_addr; |
36 | u32 total_len; | 37 | u32 total_len; |
37 | u32 prog_len; | 38 | u32 prog_len; |
38 | u32 transferred; | 39 | u32 transferred; |
39 | u32 packet_sz; | 40 | u32 packet_sz; |
40 | struct list_head tx_check; | 41 | struct list_head tx_check; |
41 | struct work_struct dma_completion; | 42 | struct work_struct dma_completion; |
42 | }; | 43 | }; |
43 | 44 | ||
44 | #define MUSB_DMA_NUM_CHANNELS 15 | 45 | #define MUSB_DMA_NUM_CHANNELS 15 |
45 | 46 | ||
46 | struct cppi41_dma_controller { | 47 | struct cppi41_dma_controller { |
47 | struct dma_controller controller; | 48 | struct dma_controller controller; |
48 | struct cppi41_dma_channel rx_channel[MUSB_DMA_NUM_CHANNELS]; | 49 | struct cppi41_dma_channel rx_channel[MUSB_DMA_NUM_CHANNELS]; |
49 | struct cppi41_dma_channel tx_channel[MUSB_DMA_NUM_CHANNELS]; | 50 | struct cppi41_dma_channel tx_channel[MUSB_DMA_NUM_CHANNELS]; |
50 | struct musb *musb; | 51 | struct musb *musb; |
51 | struct hrtimer early_tx; | 52 | struct hrtimer early_tx; |
52 | struct list_head early_tx_list; | 53 | struct list_head early_tx_list; |
53 | u32 rx_mode; | 54 | u32 rx_mode; |
54 | u32 tx_mode; | 55 | u32 tx_mode; |
55 | u32 auto_req; | 56 | u32 auto_req; |
56 | }; | 57 | }; |
57 | 58 | ||
59 | static void save_rx_toggle(struct cppi41_dma_channel *cppi41_channel) | ||
60 | { | ||
61 | u16 csr; | ||
62 | u8 toggle; | ||
63 | |||
64 | if (cppi41_channel->is_tx) | ||
65 | return; | ||
66 | if (!is_host_active(cppi41_channel->controller->musb)) | ||
67 | return; | ||
68 | |||
69 | csr = musb_readw(cppi41_channel->hw_ep->regs, MUSB_RXCSR); | ||
70 | toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0; | ||
71 | |||
72 | cppi41_channel->usb_toggle = toggle; | ||
73 | } | ||
74 | |||
75 | static void update_rx_toggle(struct cppi41_dma_channel *cppi41_channel) | ||
76 | { | ||
77 | u16 csr; | ||
78 | u8 toggle; | ||
79 | |||
80 | if (cppi41_channel->is_tx) | ||
81 | return; | ||
82 | if (!is_host_active(cppi41_channel->controller->musb)) | ||
83 | return; | ||
84 | |||
85 | csr = musb_readw(cppi41_channel->hw_ep->regs, MUSB_RXCSR); | ||
86 | toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0; | ||
87 | |||
88 | /* | ||
89 | * AM335x Advisory 1.0.13: Due to internal synchronisation error the | ||
90 | * data toggle may reset from DATA1 to DATA0 during receiving data from | ||
91 | * more than one endpoint. | ||
92 | */ | ||
93 | if (!toggle && toggle == cppi41_channel->usb_toggle) { | ||
94 | csr |= MUSB_RXCSR_H_DATATOGGLE | MUSB_RXCSR_H_WR_DATATOGGLE; | ||
95 | musb_writew(cppi41_channel->hw_ep->regs, MUSB_RXCSR, csr); | ||
96 | dev_dbg(cppi41_channel->controller->musb->controller, | ||
97 | "Restoring DATA1 toggle.\n"); | ||
98 | } | ||
99 | |||
100 | cppi41_channel->usb_toggle = toggle; | ||
101 | } | ||
102 | |||
58 | static bool musb_is_tx_fifo_empty(struct musb_hw_ep *hw_ep) | 103 | static bool musb_is_tx_fifo_empty(struct musb_hw_ep *hw_ep) |
59 | { | 104 | { |
60 | u8 epnum = hw_ep->epnum; | 105 | u8 epnum = hw_ep->epnum; |
61 | struct musb *musb = hw_ep->musb; | 106 | struct musb *musb = hw_ep->musb; |
62 | void __iomem *epio = musb->endpoints[epnum].regs; | 107 | void __iomem *epio = musb->endpoints[epnum].regs; |
63 | u16 csr; | 108 | u16 csr; |
64 | 109 | ||
65 | csr = musb_readw(epio, MUSB_TXCSR); | 110 | csr = musb_readw(epio, MUSB_TXCSR); |
66 | if (csr & MUSB_TXCSR_TXPKTRDY) | 111 | if (csr & MUSB_TXCSR_TXPKTRDY) |
67 | return false; | 112 | return false; |
68 | return true; | 113 | return true; |
69 | } | 114 | } |
70 | 115 | ||
71 | static bool is_isoc(struct musb_hw_ep *hw_ep, bool in) | 116 | static bool is_isoc(struct musb_hw_ep *hw_ep, bool in) |
72 | { | 117 | { |
73 | if (in && hw_ep->in_qh) { | 118 | if (in && hw_ep->in_qh) { |
74 | if (hw_ep->in_qh->type == USB_ENDPOINT_XFER_ISOC) | 119 | if (hw_ep->in_qh->type == USB_ENDPOINT_XFER_ISOC) |
75 | return true; | 120 | return true; |
76 | } else if (hw_ep->out_qh) { | 121 | } else if (hw_ep->out_qh) { |
77 | if (hw_ep->out_qh->type == USB_ENDPOINT_XFER_ISOC) | 122 | if (hw_ep->out_qh->type == USB_ENDPOINT_XFER_ISOC) |
78 | return true; | 123 | return true; |
79 | } | 124 | } |
80 | return false; | 125 | return false; |
81 | } | 126 | } |
82 | 127 | ||
83 | static void cppi41_dma_callback(void *private_data); | 128 | static void cppi41_dma_callback(void *private_data); |
84 | 129 | ||
85 | static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel) | 130 | static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel) |
86 | { | 131 | { |
87 | struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep; | 132 | struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep; |
88 | struct musb *musb = hw_ep->musb; | 133 | struct musb *musb = hw_ep->musb; |
89 | 134 | ||
90 | if (!cppi41_channel->prog_len || | 135 | if (!cppi41_channel->prog_len || |
91 | (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)) { | 136 | (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)) { |
92 | 137 | ||
93 | /* done, complete */ | 138 | /* done, complete */ |
94 | cppi41_channel->channel.actual_len = | 139 | cppi41_channel->channel.actual_len = |
95 | cppi41_channel->transferred; | 140 | cppi41_channel->transferred; |
96 | cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE; | 141 | cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE; |
97 | musb_dma_completion(musb, hw_ep->epnum, cppi41_channel->is_tx); | 142 | musb_dma_completion(musb, hw_ep->epnum, cppi41_channel->is_tx); |
98 | } else { | 143 | } else { |
99 | /* next iteration, reload */ | 144 | /* next iteration, reload */ |
100 | struct dma_chan *dc = cppi41_channel->dc; | 145 | struct dma_chan *dc = cppi41_channel->dc; |
101 | struct dma_async_tx_descriptor *dma_desc; | 146 | struct dma_async_tx_descriptor *dma_desc; |
102 | enum dma_transfer_direction direction; | 147 | enum dma_transfer_direction direction; |
103 | u16 csr; | 148 | u16 csr; |
104 | u32 remain_bytes; | 149 | u32 remain_bytes; |
105 | void __iomem *epio = cppi41_channel->hw_ep->regs; | 150 | void __iomem *epio = cppi41_channel->hw_ep->regs; |
106 | 151 | ||
107 | cppi41_channel->buf_addr += cppi41_channel->packet_sz; | 152 | cppi41_channel->buf_addr += cppi41_channel->packet_sz; |
108 | 153 | ||
109 | remain_bytes = cppi41_channel->total_len; | 154 | remain_bytes = cppi41_channel->total_len; |
110 | remain_bytes -= cppi41_channel->transferred; | 155 | remain_bytes -= cppi41_channel->transferred; |
111 | remain_bytes = min(remain_bytes, cppi41_channel->packet_sz); | 156 | remain_bytes = min(remain_bytes, cppi41_channel->packet_sz); |
112 | cppi41_channel->prog_len = remain_bytes; | 157 | cppi41_channel->prog_len = remain_bytes; |
113 | 158 | ||
114 | direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV | 159 | direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV |
115 | : DMA_DEV_TO_MEM; | 160 | : DMA_DEV_TO_MEM; |
116 | dma_desc = dmaengine_prep_slave_single(dc, | 161 | dma_desc = dmaengine_prep_slave_single(dc, |
117 | cppi41_channel->buf_addr, | 162 | cppi41_channel->buf_addr, |
118 | remain_bytes, | 163 | remain_bytes, |
119 | direction, | 164 | direction, |
120 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 165 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
121 | if (WARN_ON(!dma_desc)) | 166 | if (WARN_ON(!dma_desc)) |
122 | return; | 167 | return; |
123 | 168 | ||
124 | dma_desc->callback = cppi41_dma_callback; | 169 | dma_desc->callback = cppi41_dma_callback; |
125 | dma_desc->callback_param = &cppi41_channel->channel; | 170 | dma_desc->callback_param = &cppi41_channel->channel; |
126 | cppi41_channel->cookie = dma_desc->tx_submit(dma_desc); | 171 | cppi41_channel->cookie = dma_desc->tx_submit(dma_desc); |
127 | dma_async_issue_pending(dc); | 172 | dma_async_issue_pending(dc); |
128 | 173 | ||
129 | if (!cppi41_channel->is_tx) { | 174 | if (!cppi41_channel->is_tx) { |
130 | csr = musb_readw(epio, MUSB_RXCSR); | 175 | csr = musb_readw(epio, MUSB_RXCSR); |
131 | csr |= MUSB_RXCSR_H_REQPKT; | 176 | csr |= MUSB_RXCSR_H_REQPKT; |
132 | musb_writew(epio, MUSB_RXCSR, csr); | 177 | musb_writew(epio, MUSB_RXCSR, csr); |
133 | } | 178 | } |
134 | } | 179 | } |
135 | } | 180 | } |
136 | 181 | ||
137 | static void cppi_trans_done_work(struct work_struct *work) | 182 | static void cppi_trans_done_work(struct work_struct *work) |
138 | { | 183 | { |
139 | unsigned long flags; | 184 | unsigned long flags; |
140 | struct cppi41_dma_channel *cppi41_channel = | 185 | struct cppi41_dma_channel *cppi41_channel = |
141 | container_of(work, struct cppi41_dma_channel, dma_completion); | 186 | container_of(work, struct cppi41_dma_channel, dma_completion); |
142 | struct cppi41_dma_controller *controller = cppi41_channel->controller; | 187 | struct cppi41_dma_controller *controller = cppi41_channel->controller; |
143 | struct musb *musb = controller->musb; | 188 | struct musb *musb = controller->musb; |
144 | struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep; | 189 | struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep; |
145 | bool empty; | 190 | bool empty; |
146 | 191 | ||
147 | if (!cppi41_channel->is_tx && is_isoc(hw_ep, 1)) { | 192 | if (!cppi41_channel->is_tx && is_isoc(hw_ep, 1)) { |
148 | spin_lock_irqsave(&musb->lock, flags); | 193 | spin_lock_irqsave(&musb->lock, flags); |
149 | cppi41_trans_done(cppi41_channel); | 194 | cppi41_trans_done(cppi41_channel); |
150 | spin_unlock_irqrestore(&musb->lock, flags); | 195 | spin_unlock_irqrestore(&musb->lock, flags); |
151 | } else { | 196 | } else { |
152 | empty = musb_is_tx_fifo_empty(hw_ep); | 197 | empty = musb_is_tx_fifo_empty(hw_ep); |
153 | if (empty) { | 198 | if (empty) { |
154 | spin_lock_irqsave(&musb->lock, flags); | 199 | spin_lock_irqsave(&musb->lock, flags); |
155 | cppi41_trans_done(cppi41_channel); | 200 | cppi41_trans_done(cppi41_channel); |
156 | spin_unlock_irqrestore(&musb->lock, flags); | 201 | spin_unlock_irqrestore(&musb->lock, flags); |
157 | } else { | 202 | } else { |
158 | schedule_work(&cppi41_channel->dma_completion); | 203 | schedule_work(&cppi41_channel->dma_completion); |
159 | } | 204 | } |
160 | } | 205 | } |
161 | } | 206 | } |
162 | 207 | ||
163 | static enum hrtimer_restart cppi41_recheck_tx_req(struct hrtimer *timer) | 208 | static enum hrtimer_restart cppi41_recheck_tx_req(struct hrtimer *timer) |
164 | { | 209 | { |
165 | struct cppi41_dma_controller *controller; | 210 | struct cppi41_dma_controller *controller; |
166 | struct cppi41_dma_channel *cppi41_channel, *n; | 211 | struct cppi41_dma_channel *cppi41_channel, *n; |
167 | struct musb *musb; | 212 | struct musb *musb; |
168 | unsigned long flags; | 213 | unsigned long flags; |
169 | enum hrtimer_restart ret = HRTIMER_NORESTART; | 214 | enum hrtimer_restart ret = HRTIMER_NORESTART; |
170 | 215 | ||
171 | controller = container_of(timer, struct cppi41_dma_controller, | 216 | controller = container_of(timer, struct cppi41_dma_controller, |
172 | early_tx); | 217 | early_tx); |
173 | musb = controller->musb; | 218 | musb = controller->musb; |
174 | 219 | ||
175 | spin_lock_irqsave(&musb->lock, flags); | 220 | spin_lock_irqsave(&musb->lock, flags); |
176 | list_for_each_entry_safe(cppi41_channel, n, &controller->early_tx_list, | 221 | list_for_each_entry_safe(cppi41_channel, n, &controller->early_tx_list, |
177 | tx_check) { | 222 | tx_check) { |
178 | bool empty; | 223 | bool empty; |
179 | struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep; | 224 | struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep; |
180 | 225 | ||
181 | empty = musb_is_tx_fifo_empty(hw_ep); | 226 | empty = musb_is_tx_fifo_empty(hw_ep); |
182 | if (empty) { | 227 | if (empty) { |
183 | list_del_init(&cppi41_channel->tx_check); | 228 | list_del_init(&cppi41_channel->tx_check); |
184 | cppi41_trans_done(cppi41_channel); | 229 | cppi41_trans_done(cppi41_channel); |
185 | } | 230 | } |
186 | } | 231 | } |
187 | 232 | ||
188 | if (!list_empty(&controller->early_tx_list)) { | 233 | if (!list_empty(&controller->early_tx_list)) { |
189 | ret = HRTIMER_RESTART; | 234 | ret = HRTIMER_RESTART; |
190 | hrtimer_forward_now(&controller->early_tx, | 235 | hrtimer_forward_now(&controller->early_tx, |
191 | ktime_set(0, 150 * NSEC_PER_USEC)); | 236 | ktime_set(0, 150 * NSEC_PER_USEC)); |
192 | } | 237 | } |
193 | 238 | ||
194 | spin_unlock_irqrestore(&musb->lock, flags); | 239 | spin_unlock_irqrestore(&musb->lock, flags); |
195 | return ret; | 240 | return ret; |
196 | } | 241 | } |
197 | 242 | ||
198 | static void cppi41_dma_callback(void *private_data) | 243 | static void cppi41_dma_callback(void *private_data) |
199 | { | 244 | { |
200 | struct dma_channel *channel = private_data; | 245 | struct dma_channel *channel = private_data; |
201 | struct cppi41_dma_channel *cppi41_channel = channel->private_data; | 246 | struct cppi41_dma_channel *cppi41_channel = channel->private_data; |
202 | struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep; | 247 | struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep; |
203 | struct musb *musb = hw_ep->musb; | 248 | struct musb *musb = hw_ep->musb; |
204 | unsigned long flags; | 249 | unsigned long flags; |
205 | struct dma_tx_state txstate; | 250 | struct dma_tx_state txstate; |
206 | u32 transferred; | 251 | u32 transferred; |
207 | bool empty; | 252 | bool empty; |
208 | 253 | ||
209 | spin_lock_irqsave(&musb->lock, flags); | 254 | spin_lock_irqsave(&musb->lock, flags); |
210 | 255 | ||
211 | dmaengine_tx_status(cppi41_channel->dc, cppi41_channel->cookie, | 256 | dmaengine_tx_status(cppi41_channel->dc, cppi41_channel->cookie, |
212 | &txstate); | 257 | &txstate); |
213 | transferred = cppi41_channel->prog_len - txstate.residue; | 258 | transferred = cppi41_channel->prog_len - txstate.residue; |
214 | cppi41_channel->transferred += transferred; | 259 | cppi41_channel->transferred += transferred; |
215 | 260 | ||
216 | dev_dbg(musb->controller, "DMA transfer done on hw_ep=%d bytes=%d/%d\n", | 261 | dev_dbg(musb->controller, "DMA transfer done on hw_ep=%d bytes=%d/%d\n", |
217 | hw_ep->epnum, cppi41_channel->transferred, | 262 | hw_ep->epnum, cppi41_channel->transferred, |
218 | cppi41_channel->total_len); | 263 | cppi41_channel->total_len); |
219 | 264 | ||
265 | update_rx_toggle(cppi41_channel); | ||
266 | |||
220 | if (cppi41_channel->transferred == cppi41_channel->total_len || | 267 | if (cppi41_channel->transferred == cppi41_channel->total_len || |
221 | transferred < cppi41_channel->packet_sz) | 268 | transferred < cppi41_channel->packet_sz) |
222 | cppi41_channel->prog_len = 0; | 269 | cppi41_channel->prog_len = 0; |
223 | 270 | ||
224 | if (!cppi41_channel->is_tx) { | 271 | if (!cppi41_channel->is_tx) { |
225 | if (is_isoc(hw_ep, 1)) | 272 | if (is_isoc(hw_ep, 1)) |
226 | schedule_work(&cppi41_channel->dma_completion); | 273 | schedule_work(&cppi41_channel->dma_completion); |
227 | else | 274 | else |
228 | cppi41_trans_done(cppi41_channel); | 275 | cppi41_trans_done(cppi41_channel); |
229 | goto out; | 276 | goto out; |
230 | } | 277 | } |
231 | 278 | ||
232 | empty = musb_is_tx_fifo_empty(hw_ep); | 279 | empty = musb_is_tx_fifo_empty(hw_ep); |
233 | if (empty) { | 280 | if (empty) { |
234 | cppi41_trans_done(cppi41_channel); | 281 | cppi41_trans_done(cppi41_channel); |
235 | } else { | 282 | } else { |
236 | struct cppi41_dma_controller *controller; | 283 | struct cppi41_dma_controller *controller; |
237 | /* | 284 | /* |
238 | * On AM335x it has been observed that the TX interrupt fires | 285 | * On AM335x it has been observed that the TX interrupt fires |
239 | * too early that means the TXFIFO is not yet empty but the DMA | 286 | * too early that means the TXFIFO is not yet empty but the DMA |
240 | * engine says that it is done with the transfer. We don't | 287 | * engine says that it is done with the transfer. We don't |
241 | * receive a FIFO empty interrupt so the only thing we can do is | 288 | * receive a FIFO empty interrupt so the only thing we can do is |
242 | * to poll for the bit. On HS it usually takes 2us, on FS around | 289 | * to poll for the bit. On HS it usually takes 2us, on FS around |
243 | * 110us - 150us depending on the transfer size. | 290 | * 110us - 150us depending on the transfer size. |
244 | * We spin on HS (no longer than than 25us and setup a timer on | 291 | * We spin on HS (no longer than than 25us and setup a timer on |
245 | * FS to check for the bit and complete the transfer. | 292 | * FS to check for the bit and complete the transfer. |
246 | */ | 293 | */ |
247 | controller = cppi41_channel->controller; | 294 | controller = cppi41_channel->controller; |
248 | 295 | ||
249 | if (musb->g.speed == USB_SPEED_HIGH) { | 296 | if (musb->g.speed == USB_SPEED_HIGH) { |
250 | unsigned wait = 25; | 297 | unsigned wait = 25; |
251 | 298 | ||
252 | do { | 299 | do { |
253 | empty = musb_is_tx_fifo_empty(hw_ep); | 300 | empty = musb_is_tx_fifo_empty(hw_ep); |
254 | if (empty) | 301 | if (empty) |
255 | break; | 302 | break; |
256 | wait--; | 303 | wait--; |
257 | if (!wait) | 304 | if (!wait) |
258 | break; | 305 | break; |
259 | udelay(1); | 306 | udelay(1); |
260 | } while (1); | 307 | } while (1); |
261 | 308 | ||
262 | empty = musb_is_tx_fifo_empty(hw_ep); | 309 | empty = musb_is_tx_fifo_empty(hw_ep); |
263 | if (empty) { | 310 | if (empty) { |
264 | cppi41_trans_done(cppi41_channel); | 311 | cppi41_trans_done(cppi41_channel); |
265 | goto out; | 312 | goto out; |
266 | } | 313 | } |
267 | } | 314 | } |
268 | if (is_isoc(hw_ep, 0)) { | 315 | if (is_isoc(hw_ep, 0)) { |
269 | schedule_work(&cppi41_channel->dma_completion); | 316 | schedule_work(&cppi41_channel->dma_completion); |
270 | goto out; | 317 | goto out; |
271 | } | 318 | } |
272 | list_add_tail(&cppi41_channel->tx_check, | 319 | list_add_tail(&cppi41_channel->tx_check, |
273 | &controller->early_tx_list); | 320 | &controller->early_tx_list); |
274 | if (!hrtimer_active(&controller->early_tx)) { | 321 | if (!hrtimer_active(&controller->early_tx)) { |
275 | hrtimer_start_range_ns(&controller->early_tx, | 322 | hrtimer_start_range_ns(&controller->early_tx, |
276 | ktime_set(0, 140 * NSEC_PER_USEC), | 323 | ktime_set(0, 140 * NSEC_PER_USEC), |
277 | 40 * NSEC_PER_USEC, | 324 | 40 * NSEC_PER_USEC, |
278 | HRTIMER_MODE_REL); | 325 | HRTIMER_MODE_REL); |
279 | } | 326 | } |
280 | } | 327 | } |
281 | out: | 328 | out: |
282 | spin_unlock_irqrestore(&musb->lock, flags); | 329 | spin_unlock_irqrestore(&musb->lock, flags); |
283 | } | 330 | } |
284 | 331 | ||
285 | static u32 update_ep_mode(unsigned ep, unsigned mode, u32 old) | 332 | static u32 update_ep_mode(unsigned ep, unsigned mode, u32 old) |
286 | { | 333 | { |
287 | unsigned shift; | 334 | unsigned shift; |
288 | 335 | ||
289 | shift = (ep - 1) * 2; | 336 | shift = (ep - 1) * 2; |
290 | old &= ~(3 << shift); | 337 | old &= ~(3 << shift); |
291 | old |= mode << shift; | 338 | old |= mode << shift; |
292 | return old; | 339 | return old; |
293 | } | 340 | } |
294 | 341 | ||
295 | static void cppi41_set_dma_mode(struct cppi41_dma_channel *cppi41_channel, | 342 | static void cppi41_set_dma_mode(struct cppi41_dma_channel *cppi41_channel, |
296 | unsigned mode) | 343 | unsigned mode) |
297 | { | 344 | { |
298 | struct cppi41_dma_controller *controller = cppi41_channel->controller; | 345 | struct cppi41_dma_controller *controller = cppi41_channel->controller; |
299 | u32 port; | 346 | u32 port; |
300 | u32 new_mode; | 347 | u32 new_mode; |
301 | u32 old_mode; | 348 | u32 old_mode; |
302 | 349 | ||
303 | if (cppi41_channel->is_tx) | 350 | if (cppi41_channel->is_tx) |
304 | old_mode = controller->tx_mode; | 351 | old_mode = controller->tx_mode; |
305 | else | 352 | else |
306 | old_mode = controller->rx_mode; | 353 | old_mode = controller->rx_mode; |
307 | port = cppi41_channel->port_num; | 354 | port = cppi41_channel->port_num; |
308 | new_mode = update_ep_mode(port, mode, old_mode); | 355 | new_mode = update_ep_mode(port, mode, old_mode); |
309 | 356 | ||
310 | if (new_mode == old_mode) | 357 | if (new_mode == old_mode) |
311 | return; | 358 | return; |
312 | if (cppi41_channel->is_tx) { | 359 | if (cppi41_channel->is_tx) { |
313 | controller->tx_mode = new_mode; | 360 | controller->tx_mode = new_mode; |
314 | musb_writel(controller->musb->ctrl_base, USB_CTRL_TX_MODE, | 361 | musb_writel(controller->musb->ctrl_base, USB_CTRL_TX_MODE, |
315 | new_mode); | 362 | new_mode); |
316 | } else { | 363 | } else { |
317 | controller->rx_mode = new_mode; | 364 | controller->rx_mode = new_mode; |
318 | musb_writel(controller->musb->ctrl_base, USB_CTRL_RX_MODE, | 365 | musb_writel(controller->musb->ctrl_base, USB_CTRL_RX_MODE, |
319 | new_mode); | 366 | new_mode); |
320 | } | 367 | } |
321 | } | 368 | } |
322 | 369 | ||
323 | static void cppi41_set_autoreq_mode(struct cppi41_dma_channel *cppi41_channel, | 370 | static void cppi41_set_autoreq_mode(struct cppi41_dma_channel *cppi41_channel, |
324 | unsigned mode) | 371 | unsigned mode) |
325 | { | 372 | { |
326 | struct cppi41_dma_controller *controller = cppi41_channel->controller; | 373 | struct cppi41_dma_controller *controller = cppi41_channel->controller; |
327 | u32 port; | 374 | u32 port; |
328 | u32 new_mode; | 375 | u32 new_mode; |
329 | u32 old_mode; | 376 | u32 old_mode; |
330 | 377 | ||
331 | old_mode = controller->auto_req; | 378 | old_mode = controller->auto_req; |
332 | port = cppi41_channel->port_num; | 379 | port = cppi41_channel->port_num; |
333 | new_mode = update_ep_mode(port, mode, old_mode); | 380 | new_mode = update_ep_mode(port, mode, old_mode); |
334 | 381 | ||
335 | if (new_mode == old_mode) | 382 | if (new_mode == old_mode) |
336 | return; | 383 | return; |
337 | controller->auto_req = new_mode; | 384 | controller->auto_req = new_mode; |
338 | musb_writel(controller->musb->ctrl_base, USB_CTRL_AUTOREQ, new_mode); | 385 | musb_writel(controller->musb->ctrl_base, USB_CTRL_AUTOREQ, new_mode); |
339 | } | 386 | } |
340 | 387 | ||
341 | static bool cppi41_configure_channel(struct dma_channel *channel, | 388 | static bool cppi41_configure_channel(struct dma_channel *channel, |
342 | u16 packet_sz, u8 mode, | 389 | u16 packet_sz, u8 mode, |
343 | dma_addr_t dma_addr, u32 len) | 390 | dma_addr_t dma_addr, u32 len) |
344 | { | 391 | { |
345 | struct cppi41_dma_channel *cppi41_channel = channel->private_data; | 392 | struct cppi41_dma_channel *cppi41_channel = channel->private_data; |
346 | struct dma_chan *dc = cppi41_channel->dc; | 393 | struct dma_chan *dc = cppi41_channel->dc; |
347 | struct dma_async_tx_descriptor *dma_desc; | 394 | struct dma_async_tx_descriptor *dma_desc; |
348 | enum dma_transfer_direction direction; | 395 | enum dma_transfer_direction direction; |
349 | struct musb *musb = cppi41_channel->controller->musb; | 396 | struct musb *musb = cppi41_channel->controller->musb; |
397 | unsigned use_gen_rndis = 0; | ||
350 | 398 | ||
351 | dev_dbg(musb->controller, | 399 | dev_dbg(musb->controller, |
352 | "configure ep%d/%x packet_sz=%d, mode=%d, dma_addr=0x%llx, len=%d is_tx=%d\n", | 400 | "configure ep%d/%x packet_sz=%d, mode=%d, dma_addr=0x%llx, len=%d is_tx=%d\n", |
353 | cppi41_channel->port_num, RNDIS_REG(cppi41_channel->port_num), | 401 | cppi41_channel->port_num, RNDIS_REG(cppi41_channel->port_num), |
354 | packet_sz, mode, (unsigned long long) dma_addr, | 402 | packet_sz, mode, (unsigned long long) dma_addr, |
355 | len, cppi41_channel->is_tx); | 403 | len, cppi41_channel->is_tx); |
356 | 404 | ||
357 | cppi41_channel->buf_addr = dma_addr; | 405 | cppi41_channel->buf_addr = dma_addr; |
358 | cppi41_channel->total_len = len; | 406 | cppi41_channel->total_len = len; |
359 | cppi41_channel->transferred = 0; | 407 | cppi41_channel->transferred = 0; |
360 | cppi41_channel->packet_sz = packet_sz; | 408 | cppi41_channel->packet_sz = packet_sz; |
361 | 409 | ||
362 | /* RNDIS mode */ | 410 | /* |
363 | if (len > packet_sz) { | 411 | * Due to AM335x' Advisory 1.0.13 we are not allowed to transfer more |
364 | musb_writel(musb->ctrl_base, | 412 | * than max packet size at a time. |
365 | RNDIS_REG(cppi41_channel->port_num), len); | 413 | */ |
366 | /* gen rndis */ | 414 | if (cppi41_channel->is_tx) |
367 | cppi41_set_dma_mode(cppi41_channel, | 415 | use_gen_rndis = 1; |
368 | EP_MODE_DMA_GEN_RNDIS); | ||
369 | 416 | ||
370 | /* auto req */ | 417 | if (use_gen_rndis) { |
371 | cppi41_set_autoreq_mode(cppi41_channel, | 418 | /* RNDIS mode */ |
419 | if (len > packet_sz) { | ||
420 | musb_writel(musb->ctrl_base, | ||
421 | RNDIS_REG(cppi41_channel->port_num), len); | ||
422 | /* gen rndis */ | ||
423 | cppi41_set_dma_mode(cppi41_channel, | ||
424 | EP_MODE_DMA_GEN_RNDIS); | ||
425 | |||
426 | /* auto req */ | ||
427 | cppi41_set_autoreq_mode(cppi41_channel, | ||
372 | EP_MODE_AUTOREG_ALL_NEOP); | 428 | EP_MODE_AUTOREG_ALL_NEOP); |
373 | } else { | 429 | } else { |
374 | musb_writel(musb->ctrl_base, | 430 | musb_writel(musb->ctrl_base, |
375 | RNDIS_REG(cppi41_channel->port_num), 0); | 431 | RNDIS_REG(cppi41_channel->port_num), 0); |
376 | cppi41_set_dma_mode(cppi41_channel, | 432 | cppi41_set_dma_mode(cppi41_channel, |
377 | EP_MODE_DMA_TRANSPARENT); | 433 | EP_MODE_DMA_TRANSPARENT); |
378 | cppi41_set_autoreq_mode(cppi41_channel, | 434 | cppi41_set_autoreq_mode(cppi41_channel, |
379 | EP_MODE_AUTOREG_NONE); | 435 | EP_MODE_AUTOREG_NONE); |
436 | } | ||
437 | } else { | ||
438 | /* fallback mode */ | ||
439 | cppi41_set_dma_mode(cppi41_channel, EP_MODE_DMA_TRANSPARENT); | ||
440 | cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREG_NONE); | ||
441 | len = min_t(u32, packet_sz, len); | ||
380 | } | 442 | } |
381 | |||
382 | cppi41_channel->prog_len = len; | 443 | cppi41_channel->prog_len = len; |
383 | direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; | 444 | direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; |
384 | dma_desc = dmaengine_prep_slave_single(dc, dma_addr, len, direction, | 445 | dma_desc = dmaengine_prep_slave_single(dc, dma_addr, len, direction, |
385 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | 446 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
386 | if (!dma_desc) | 447 | if (!dma_desc) |
387 | return false; | 448 | return false; |
388 | 449 | ||
389 | dma_desc->callback = cppi41_dma_callback; | 450 | dma_desc->callback = cppi41_dma_callback; |
390 | dma_desc->callback_param = channel; | 451 | dma_desc->callback_param = channel; |
391 | cppi41_channel->cookie = dma_desc->tx_submit(dma_desc); | 452 | cppi41_channel->cookie = dma_desc->tx_submit(dma_desc); |
392 | 453 | ||
454 | save_rx_toggle(cppi41_channel); | ||
393 | dma_async_issue_pending(dc); | 455 | dma_async_issue_pending(dc); |
394 | return true; | 456 | return true; |
395 | } | 457 | } |
396 | 458 | ||
397 | static struct dma_channel *cppi41_dma_channel_allocate(struct dma_controller *c, | 459 | static struct dma_channel *cppi41_dma_channel_allocate(struct dma_controller *c, |
398 | struct musb_hw_ep *hw_ep, u8 is_tx) | 460 | struct musb_hw_ep *hw_ep, u8 is_tx) |
399 | { | 461 | { |
400 | struct cppi41_dma_controller *controller = container_of(c, | 462 | struct cppi41_dma_controller *controller = container_of(c, |
401 | struct cppi41_dma_controller, controller); | 463 | struct cppi41_dma_controller, controller); |
402 | struct cppi41_dma_channel *cppi41_channel = NULL; | 464 | struct cppi41_dma_channel *cppi41_channel = NULL; |
403 | u8 ch_num = hw_ep->epnum - 1; | 465 | u8 ch_num = hw_ep->epnum - 1; |
404 | 466 | ||
405 | if (ch_num >= MUSB_DMA_NUM_CHANNELS) | 467 | if (ch_num >= MUSB_DMA_NUM_CHANNELS) |
406 | return NULL; | 468 | return NULL; |
407 | 469 | ||
408 | if (is_tx) | 470 | if (is_tx) |
409 | cppi41_channel = &controller->tx_channel[ch_num]; | 471 | cppi41_channel = &controller->tx_channel[ch_num]; |
410 | else | 472 | else |
411 | cppi41_channel = &controller->rx_channel[ch_num]; | 473 | cppi41_channel = &controller->rx_channel[ch_num]; |
412 | 474 | ||
413 | if (!cppi41_channel->dc) | 475 | if (!cppi41_channel->dc) |
414 | return NULL; | 476 | return NULL; |
415 | 477 | ||
416 | if (cppi41_channel->is_allocated) | 478 | if (cppi41_channel->is_allocated) |
417 | return NULL; | 479 | return NULL; |
418 | 480 | ||
419 | cppi41_channel->hw_ep = hw_ep; | 481 | cppi41_channel->hw_ep = hw_ep; |
420 | cppi41_channel->is_allocated = 1; | 482 | cppi41_channel->is_allocated = 1; |
421 | 483 | ||
422 | return &cppi41_channel->channel; | 484 | return &cppi41_channel->channel; |
423 | } | 485 | } |
424 | 486 | ||
425 | static void cppi41_dma_channel_release(struct dma_channel *channel) | 487 | static void cppi41_dma_channel_release(struct dma_channel *channel) |
426 | { | 488 | { |
427 | struct cppi41_dma_channel *cppi41_channel = channel->private_data; | 489 | struct cppi41_dma_channel *cppi41_channel = channel->private_data; |
428 | 490 | ||
429 | if (cppi41_channel->is_allocated) { | 491 | if (cppi41_channel->is_allocated) { |
430 | cppi41_channel->is_allocated = 0; | 492 | cppi41_channel->is_allocated = 0; |
431 | channel->status = MUSB_DMA_STATUS_FREE; | 493 | channel->status = MUSB_DMA_STATUS_FREE; |
432 | channel->actual_len = 0; | 494 | channel->actual_len = 0; |
433 | } | 495 | } |
434 | } | 496 | } |
435 | 497 | ||
436 | static int cppi41_dma_channel_program(struct dma_channel *channel, | 498 | static int cppi41_dma_channel_program(struct dma_channel *channel, |
437 | u16 packet_sz, u8 mode, | 499 | u16 packet_sz, u8 mode, |
438 | dma_addr_t dma_addr, u32 len) | 500 | dma_addr_t dma_addr, u32 len) |
439 | { | 501 | { |
440 | int ret; | 502 | int ret; |
441 | struct cppi41_dma_channel *cppi41_channel = channel->private_data; | 503 | struct cppi41_dma_channel *cppi41_channel = channel->private_data; |
442 | int hb_mult = 0; | 504 | int hb_mult = 0; |
443 | 505 | ||
444 | BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN || | 506 | BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN || |
445 | channel->status == MUSB_DMA_STATUS_BUSY); | 507 | channel->status == MUSB_DMA_STATUS_BUSY); |
446 | 508 | ||
447 | if (is_host_active(cppi41_channel->controller->musb)) { | 509 | if (is_host_active(cppi41_channel->controller->musb)) { |
448 | if (cppi41_channel->is_tx) | 510 | if (cppi41_channel->is_tx) |
449 | hb_mult = cppi41_channel->hw_ep->out_qh->hb_mult; | 511 | hb_mult = cppi41_channel->hw_ep->out_qh->hb_mult; |
450 | else | 512 | else |
451 | hb_mult = cppi41_channel->hw_ep->in_qh->hb_mult; | 513 | hb_mult = cppi41_channel->hw_ep->in_qh->hb_mult; |
452 | } | 514 | } |
453 | 515 | ||
454 | channel->status = MUSB_DMA_STATUS_BUSY; | 516 | channel->status = MUSB_DMA_STATUS_BUSY; |
455 | channel->actual_len = 0; | 517 | channel->actual_len = 0; |
456 | 518 | ||
457 | if (hb_mult) | 519 | if (hb_mult) |
458 | packet_sz = hb_mult * (packet_sz & 0x7FF); | 520 | packet_sz = hb_mult * (packet_sz & 0x7FF); |
459 | 521 | ||
460 | ret = cppi41_configure_channel(channel, packet_sz, mode, dma_addr, len); | 522 | ret = cppi41_configure_channel(channel, packet_sz, mode, dma_addr, len); |
461 | if (!ret) | 523 | if (!ret) |
462 | channel->status = MUSB_DMA_STATUS_FREE; | 524 | channel->status = MUSB_DMA_STATUS_FREE; |
463 | 525 | ||
464 | return ret; | 526 | return ret; |
465 | } | 527 | } |
466 | 528 | ||
467 | static int cppi41_is_compatible(struct dma_channel *channel, u16 maxpacket, | 529 | static int cppi41_is_compatible(struct dma_channel *channel, u16 maxpacket, |
468 | void *buf, u32 length) | 530 | void *buf, u32 length) |
469 | { | 531 | { |
470 | struct cppi41_dma_channel *cppi41_channel = channel->private_data; | 532 | struct cppi41_dma_channel *cppi41_channel = channel->private_data; |
471 | struct cppi41_dma_controller *controller = cppi41_channel->controller; | 533 | struct cppi41_dma_controller *controller = cppi41_channel->controller; |
472 | struct musb *musb = controller->musb; | 534 | struct musb *musb = controller->musb; |
473 | 535 | ||
474 | if (is_host_active(musb)) { | 536 | if (is_host_active(musb)) { |
475 | WARN_ON(1); | 537 | WARN_ON(1); |
476 | return 1; | 538 | return 1; |
477 | } | 539 | } |
478 | if (cppi41_channel->hw_ep->ep_in.type != USB_ENDPOINT_XFER_BULK) | 540 | if (cppi41_channel->hw_ep->ep_in.type != USB_ENDPOINT_XFER_BULK) |
479 | return 0; | 541 | return 0; |
480 | if (cppi41_channel->is_tx) | 542 | if (cppi41_channel->is_tx) |
481 | return 1; | 543 | return 1; |
482 | /* AM335x Advisory 1.0.13. No workaround for device RX mode */ | 544 | /* AM335x Advisory 1.0.13. No workaround for device RX mode */ |
483 | return 0; | 545 | return 0; |
484 | } | 546 | } |
485 | 547 | ||
486 | static int cppi41_dma_channel_abort(struct dma_channel *channel) | 548 | static int cppi41_dma_channel_abort(struct dma_channel *channel) |
487 | { | 549 | { |
488 | struct cppi41_dma_channel *cppi41_channel = channel->private_data; | 550 | struct cppi41_dma_channel *cppi41_channel = channel->private_data; |
489 | struct cppi41_dma_controller *controller = cppi41_channel->controller; | 551 | struct cppi41_dma_controller *controller = cppi41_channel->controller; |
490 | struct musb *musb = controller->musb; | 552 | struct musb *musb = controller->musb; |
491 | void __iomem *epio = cppi41_channel->hw_ep->regs; | 553 | void __iomem *epio = cppi41_channel->hw_ep->regs; |
492 | int tdbit; | 554 | int tdbit; |
493 | int ret; | 555 | int ret; |
494 | unsigned is_tx; | 556 | unsigned is_tx; |
495 | u16 csr; | 557 | u16 csr; |
496 | 558 | ||
497 | is_tx = cppi41_channel->is_tx; | 559 | is_tx = cppi41_channel->is_tx; |
498 | dev_dbg(musb->controller, "abort channel=%d, is_tx=%d\n", | 560 | dev_dbg(musb->controller, "abort channel=%d, is_tx=%d\n", |
499 | cppi41_channel->port_num, is_tx); | 561 | cppi41_channel->port_num, is_tx); |
500 | 562 | ||
501 | if (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE) | 563 | if (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE) |
502 | return 0; | 564 | return 0; |
503 | 565 | ||
504 | list_del_init(&cppi41_channel->tx_check); | 566 | list_del_init(&cppi41_channel->tx_check); |
505 | if (is_tx) { | 567 | if (is_tx) { |
506 | csr = musb_readw(epio, MUSB_TXCSR); | 568 | csr = musb_readw(epio, MUSB_TXCSR); |
507 | csr &= ~MUSB_TXCSR_DMAENAB; | 569 | csr &= ~MUSB_TXCSR_DMAENAB; |
508 | musb_writew(epio, MUSB_TXCSR, csr); | 570 | musb_writew(epio, MUSB_TXCSR, csr); |
509 | } else { | 571 | } else { |
510 | csr = musb_readw(epio, MUSB_RXCSR); | 572 | csr = musb_readw(epio, MUSB_RXCSR); |
511 | csr &= ~(MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_DMAENAB); | 573 | csr &= ~(MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_DMAENAB); |
512 | musb_writew(epio, MUSB_RXCSR, csr); | 574 | musb_writew(epio, MUSB_RXCSR, csr); |
513 | 575 | ||
514 | csr = musb_readw(epio, MUSB_RXCSR); | 576 | csr = musb_readw(epio, MUSB_RXCSR); |
515 | if (csr & MUSB_RXCSR_RXPKTRDY) { | 577 | if (csr & MUSB_RXCSR_RXPKTRDY) { |
516 | csr |= MUSB_RXCSR_FLUSHFIFO; | 578 | csr |= MUSB_RXCSR_FLUSHFIFO; |
517 | musb_writew(epio, MUSB_RXCSR, csr); | 579 | musb_writew(epio, MUSB_RXCSR, csr); |
518 | musb_writew(epio, MUSB_RXCSR, csr); | 580 | musb_writew(epio, MUSB_RXCSR, csr); |
519 | } | 581 | } |
520 | } | 582 | } |
521 | 583 | ||
522 | tdbit = 1 << cppi41_channel->port_num; | 584 | tdbit = 1 << cppi41_channel->port_num; |
523 | if (is_tx) | 585 | if (is_tx) |
524 | tdbit <<= 16; | 586 | tdbit <<= 16; |
525 | 587 | ||
526 | do { | 588 | do { |
527 | musb_writel(musb->ctrl_base, USB_TDOWN, tdbit); | 589 | musb_writel(musb->ctrl_base, USB_TDOWN, tdbit); |
528 | ret = dmaengine_terminate_all(cppi41_channel->dc); | 590 | ret = dmaengine_terminate_all(cppi41_channel->dc); |
529 | } while (ret == -EAGAIN); | 591 | } while (ret == -EAGAIN); |
530 | 592 | ||
531 | musb_writel(musb->ctrl_base, USB_TDOWN, tdbit); | 593 | musb_writel(musb->ctrl_base, USB_TDOWN, tdbit); |
532 | 594 | ||
533 | if (is_tx) { | 595 | if (is_tx) { |
534 | csr = musb_readw(epio, MUSB_TXCSR); | 596 | csr = musb_readw(epio, MUSB_TXCSR); |
535 | if (csr & MUSB_TXCSR_TXPKTRDY) { | 597 | if (csr & MUSB_TXCSR_TXPKTRDY) { |
536 | csr |= MUSB_TXCSR_FLUSHFIFO; | 598 | csr |= MUSB_TXCSR_FLUSHFIFO; |
537 | musb_writew(epio, MUSB_TXCSR, csr); | 599 | musb_writew(epio, MUSB_TXCSR, csr); |
538 | } | 600 | } |
539 | } | 601 | } |
540 | 602 | ||
541 | cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE; | 603 | cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE; |
542 | return 0; | 604 | return 0; |
543 | } | 605 | } |
544 | 606 | ||
545 | static void cppi41_release_all_dma_chans(struct cppi41_dma_controller *ctrl) | 607 | static void cppi41_release_all_dma_chans(struct cppi41_dma_controller *ctrl) |
546 | { | 608 | { |
547 | struct dma_chan *dc; | 609 | struct dma_chan *dc; |
548 | int i; | 610 | int i; |
549 | 611 | ||
550 | for (i = 0; i < MUSB_DMA_NUM_CHANNELS; i++) { | 612 | for (i = 0; i < MUSB_DMA_NUM_CHANNELS; i++) { |
551 | dc = ctrl->tx_channel[i].dc; | 613 | dc = ctrl->tx_channel[i].dc; |
552 | if (dc) | 614 | if (dc) |
553 | dma_release_channel(dc); | 615 | dma_release_channel(dc); |
554 | dc = ctrl->rx_channel[i].dc; | 616 | dc = ctrl->rx_channel[i].dc; |
555 | if (dc) | 617 | if (dc) |
556 | dma_release_channel(dc); | 618 | dma_release_channel(dc); |
557 | } | 619 | } |
558 | } | 620 | } |
559 | 621 | ||
560 | static void cppi41_dma_controller_stop(struct cppi41_dma_controller *controller) | 622 | static void cppi41_dma_controller_stop(struct cppi41_dma_controller *controller) |
561 | { | 623 | { |
562 | cppi41_release_all_dma_chans(controller); | 624 | cppi41_release_all_dma_chans(controller); |
563 | } | 625 | } |
564 | 626 | ||
565 | static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller) | 627 | static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller) |
566 | { | 628 | { |
567 | struct musb *musb = controller->musb; | 629 | struct musb *musb = controller->musb; |
568 | struct device *dev = musb->controller; | 630 | struct device *dev = musb->controller; |
569 | struct device_node *np = dev->of_node; | 631 | struct device_node *np = dev->of_node; |
570 | struct cppi41_dma_channel *cppi41_channel; | 632 | struct cppi41_dma_channel *cppi41_channel; |
571 | int count; | 633 | int count; |
572 | int i; | 634 | int i; |
573 | int ret; | 635 | int ret; |
574 | 636 | ||
575 | count = of_property_count_strings(np, "dma-names"); | 637 | count = of_property_count_strings(np, "dma-names"); |
576 | if (count < 0) | 638 | if (count < 0) |
577 | return count; | 639 | return count; |
578 | 640 | ||
579 | for (i = 0; i < count; i++) { | 641 | for (i = 0; i < count; i++) { |
580 | struct dma_chan *dc; | 642 | struct dma_chan *dc; |
581 | struct dma_channel *musb_dma; | 643 | struct dma_channel *musb_dma; |
582 | const char *str; | 644 | const char *str; |
583 | unsigned is_tx; | 645 | unsigned is_tx; |
584 | unsigned int port; | 646 | unsigned int port; |
585 | 647 | ||
586 | ret = of_property_read_string_index(np, "dma-names", i, &str); | 648 | ret = of_property_read_string_index(np, "dma-names", i, &str); |
587 | if (ret) | 649 | if (ret) |
588 | goto err; | 650 | goto err; |
589 | if (!strncmp(str, "tx", 2)) | 651 | if (!strncmp(str, "tx", 2)) |
590 | is_tx = 1; | 652 | is_tx = 1; |
591 | else if (!strncmp(str, "rx", 2)) | 653 | else if (!strncmp(str, "rx", 2)) |
592 | is_tx = 0; | 654 | is_tx = 0; |
593 | else { | 655 | else { |
594 | dev_err(dev, "Wrong dmatype %s\n", str); | 656 | dev_err(dev, "Wrong dmatype %s\n", str); |
595 | goto err; | 657 | goto err; |
596 | } | 658 | } |
597 | ret = kstrtouint(str + 2, 0, &port); | 659 | ret = kstrtouint(str + 2, 0, &port); |
598 | if (ret) | 660 | if (ret) |
599 | goto err; | 661 | goto err; |
600 | 662 | ||
601 | ret = -EINVAL; | 663 | ret = -EINVAL; |
602 | if (port > MUSB_DMA_NUM_CHANNELS || !port) | 664 | if (port > MUSB_DMA_NUM_CHANNELS || !port) |
603 | goto err; | 665 | goto err; |
604 | if (is_tx) | 666 | if (is_tx) |
605 | cppi41_channel = &controller->tx_channel[port - 1]; | 667 | cppi41_channel = &controller->tx_channel[port - 1]; |
606 | else | 668 | else |
607 | cppi41_channel = &controller->rx_channel[port - 1]; | 669 | cppi41_channel = &controller->rx_channel[port - 1]; |
608 | 670 | ||
609 | cppi41_channel->controller = controller; | 671 | cppi41_channel->controller = controller; |
610 | cppi41_channel->port_num = port; | 672 | cppi41_channel->port_num = port; |
611 | cppi41_channel->is_tx = is_tx; | 673 | cppi41_channel->is_tx = is_tx; |
612 | INIT_LIST_HEAD(&cppi41_channel->tx_check); | 674 | INIT_LIST_HEAD(&cppi41_channel->tx_check); |
613 | INIT_WORK(&cppi41_channel->dma_completion, | 675 | INIT_WORK(&cppi41_channel->dma_completion, |
614 | cppi_trans_done_work); | 676 | cppi_trans_done_work); |
615 | musb_dma = &cppi41_channel->channel; | 677 | musb_dma = &cppi41_channel->channel; |
616 | musb_dma->private_data = cppi41_channel; | 678 | musb_dma->private_data = cppi41_channel; |
617 | musb_dma->status = MUSB_DMA_STATUS_FREE; | 679 | musb_dma->status = MUSB_DMA_STATUS_FREE; |
618 | musb_dma->max_len = SZ_4M; | 680 | musb_dma->max_len = SZ_4M; |
619 | 681 | ||
620 | dc = dma_request_slave_channel(dev, str); | 682 | dc = dma_request_slave_channel(dev, str); |
621 | if (!dc) { | 683 | if (!dc) { |
622 | dev_err(dev, "Falied to request %s.\n", str); | 684 | dev_err(dev, "Falied to request %s.\n", str); |
623 | ret = -EPROBE_DEFER; | 685 | ret = -EPROBE_DEFER; |
624 | goto err; | 686 | goto err; |
625 | } | 687 | } |
626 | cppi41_channel->dc = dc; | 688 | cppi41_channel->dc = dc; |
627 | } | 689 | } |
628 | return 0; | 690 | return 0; |
629 | err: | 691 | err: |
630 | cppi41_release_all_dma_chans(controller); | 692 | cppi41_release_all_dma_chans(controller); |
631 | return ret; | 693 | return ret; |
632 | } | 694 | } |
633 | 695 | ||
634 | void dma_controller_destroy(struct dma_controller *c) | 696 | void dma_controller_destroy(struct dma_controller *c) |
635 | { | 697 | { |
636 | struct cppi41_dma_controller *controller = container_of(c, | 698 | struct cppi41_dma_controller *controller = container_of(c, |
637 | struct cppi41_dma_controller, controller); | 699 | struct cppi41_dma_controller, controller); |
638 | 700 | ||
639 | hrtimer_cancel(&controller->early_tx); | 701 | hrtimer_cancel(&controller->early_tx); |
640 | cppi41_dma_controller_stop(controller); | 702 | cppi41_dma_controller_stop(controller); |
641 | kfree(controller); | 703 | kfree(controller); |
642 | } | 704 | } |
643 | 705 | ||
644 | struct dma_controller *dma_controller_create(struct musb *musb, | 706 | struct dma_controller *dma_controller_create(struct musb *musb, |
645 | void __iomem *base) | 707 | void __iomem *base) |
646 | { | 708 | { |
647 | struct cppi41_dma_controller *controller; | 709 | struct cppi41_dma_controller *controller; |
648 | int ret = 0; | 710 | int ret = 0; |
649 | 711 | ||
650 | if (!musb->controller->of_node) { | 712 | if (!musb->controller->of_node) { |
651 | dev_err(musb->controller, "Need DT for the DMA engine.\n"); | 713 | dev_err(musb->controller, "Need DT for the DMA engine.\n"); |
652 | return NULL; | 714 | return NULL; |
653 | } | 715 | } |
654 | 716 | ||
655 | controller = kzalloc(sizeof(*controller), GFP_KERNEL); | 717 | controller = kzalloc(sizeof(*controller), GFP_KERNEL); |
656 | if (!controller) | 718 | if (!controller) |
657 | goto kzalloc_fail; | 719 | goto kzalloc_fail; |
658 | 720 | ||
659 | hrtimer_init(&controller->early_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 721 | hrtimer_init(&controller->early_tx, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
660 | controller->early_tx.function = cppi41_recheck_tx_req; | 722 | controller->early_tx.function = cppi41_recheck_tx_req; |
661 | INIT_LIST_HEAD(&controller->early_tx_list); | 723 | INIT_LIST_HEAD(&controller->early_tx_list); |
662 | controller->musb = musb; | 724 | controller->musb = musb; |
663 | 725 | ||
664 | controller->controller.channel_alloc = cppi41_dma_channel_allocate; | 726 | controller->controller.channel_alloc = cppi41_dma_channel_allocate; |
665 | controller->controller.channel_release = cppi41_dma_channel_release; | 727 | controller->controller.channel_release = cppi41_dma_channel_release; |
666 | controller->controller.channel_program = cppi41_dma_channel_program; | 728 | controller->controller.channel_program = cppi41_dma_channel_program; |
667 | controller->controller.channel_abort = cppi41_dma_channel_abort; | 729 | controller->controller.channel_abort = cppi41_dma_channel_abort; |
668 | controller->controller.is_compatible = cppi41_is_compatible; | 730 | controller->controller.is_compatible = cppi41_is_compatible; |
669 | 731 | ||
670 | ret = cppi41_dma_controller_start(controller); | 732 | ret = cppi41_dma_controller_start(controller); |
671 | if (ret) | 733 | if (ret) |
672 | goto plat_get_fail; | 734 | goto plat_get_fail; |
673 | return &controller->controller; | 735 | return &controller->controller; |
674 | 736 | ||
675 | plat_get_fail: | 737 | plat_get_fail: |
676 | kfree(controller); | 738 | kfree(controller); |
677 | kzalloc_fail: | 739 | kzalloc_fail: |
678 | if (ret == -EPROBE_DEFER) | 740 | if (ret == -EPROBE_DEFER) |
679 | return ERR_PTR(ret); | 741 | return ERR_PTR(ret); |
680 | return NULL; | 742 | return NULL; |