Commit 9047428894660f8f46305917e519ab62f7395cac
Committed by
Felipe Balbi
1 parent
51ef74f640
Exists in
smarc-imx_3.14.28_1.0.0_ga
and in
1 other branch
usb: musb: only remove host/udc if it has been added
musb_shutdown() removes always USB host and device. musb_init_controller() adds host and device depending on port_mode. If port mode is set to HOST then the removal of UDC leads only to: |(NULL device *): gadget not registered. and nothing else happens. If port mode is set to DEVICE and we remove the host then we oops in usb_remove_hcd(). This patch ensures that we only remove host in OTG/host mode and device only in OTG/device mode to avoid any trouble. Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Felipe Balbi <balbi@ti.com>
Showing 2 changed files with 4 additions and 0 deletions Inline Diff
drivers/usb/musb/musb_gadget.c
1 | /* | 1 | /* |
2 | * MUSB OTG driver peripheral support | 2 | * MUSB OTG driver peripheral support |
3 | * | 3 | * |
4 | * Copyright 2005 Mentor Graphics Corporation | 4 | * Copyright 2005 Mentor Graphics Corporation |
5 | * Copyright (C) 2005-2006 by Texas Instruments | 5 | * Copyright (C) 2005-2006 by Texas Instruments |
6 | * Copyright (C) 2006-2007 Nokia Corporation | 6 | * Copyright (C) 2006-2007 Nokia Corporation |
7 | * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com> | 7 | * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com> |
8 | * | 8 | * |
9 | * This program is free software; you can redistribute it and/or | 9 | * This program is free software; you can redistribute it and/or |
10 | * modify it under the terms of the GNU General Public License | 10 | * modify it under the terms of the GNU General Public License |
11 | * version 2 as published by the Free Software Foundation. | 11 | * version 2 as published by the Free Software Foundation. |
12 | * | 12 | * |
13 | * This program is distributed in the hope that it will be useful, but | 13 | * This program is distributed in the hope that it will be useful, but |
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | 14 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
16 | * General Public License for more details. | 16 | * General Public License for more details. |
17 | * | 17 | * |
18 | * You should have received a copy of the GNU General Public License | 18 | * You should have received a copy of the GNU General Public License |
19 | * along with this program; if not, write to the Free Software | 19 | * along with this program; if not, write to the Free Software |
20 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | 20 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA |
21 | * 02110-1301 USA | 21 | * 02110-1301 USA |
22 | * | 22 | * |
23 | * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED | 23 | * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED |
24 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | 24 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF |
25 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN | 25 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN |
26 | * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, | 26 | * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, |
27 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | 27 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
28 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF | 28 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF |
29 | * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON | 29 | * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON |
30 | * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 30 | * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
31 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | 31 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
32 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 32 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
33 | * | 33 | * |
34 | */ | 34 | */ |
35 | 35 | ||
36 | #include <linux/kernel.h> | 36 | #include <linux/kernel.h> |
37 | #include <linux/list.h> | 37 | #include <linux/list.h> |
38 | #include <linux/timer.h> | 38 | #include <linux/timer.h> |
39 | #include <linux/module.h> | 39 | #include <linux/module.h> |
40 | #include <linux/smp.h> | 40 | #include <linux/smp.h> |
41 | #include <linux/spinlock.h> | 41 | #include <linux/spinlock.h> |
42 | #include <linux/delay.h> | 42 | #include <linux/delay.h> |
43 | #include <linux/dma-mapping.h> | 43 | #include <linux/dma-mapping.h> |
44 | #include <linux/slab.h> | 44 | #include <linux/slab.h> |
45 | 45 | ||
46 | #include "musb_core.h" | 46 | #include "musb_core.h" |
47 | 47 | ||
48 | 48 | ||
49 | /* ----------------------------------------------------------------------- */ | 49 | /* ----------------------------------------------------------------------- */ |
50 | 50 | ||
51 | #define is_buffer_mapped(req) (is_dma_capable() && \ | 51 | #define is_buffer_mapped(req) (is_dma_capable() && \ |
52 | (req->map_state != UN_MAPPED)) | 52 | (req->map_state != UN_MAPPED)) |
53 | 53 | ||
54 | /* Maps the buffer to dma */ | 54 | /* Maps the buffer to dma */ |
55 | 55 | ||
56 | static inline void map_dma_buffer(struct musb_request *request, | 56 | static inline void map_dma_buffer(struct musb_request *request, |
57 | struct musb *musb, struct musb_ep *musb_ep) | 57 | struct musb *musb, struct musb_ep *musb_ep) |
58 | { | 58 | { |
59 | int compatible = true; | 59 | int compatible = true; |
60 | struct dma_controller *dma = musb->dma_controller; | 60 | struct dma_controller *dma = musb->dma_controller; |
61 | 61 | ||
62 | request->map_state = UN_MAPPED; | 62 | request->map_state = UN_MAPPED; |
63 | 63 | ||
64 | if (!is_dma_capable() || !musb_ep->dma) | 64 | if (!is_dma_capable() || !musb_ep->dma) |
65 | return; | 65 | return; |
66 | 66 | ||
67 | /* Check if DMA engine can handle this request. | 67 | /* Check if DMA engine can handle this request. |
68 | * DMA code must reject the USB request explicitly. | 68 | * DMA code must reject the USB request explicitly. |
69 | * Default behaviour is to map the request. | 69 | * Default behaviour is to map the request. |
70 | */ | 70 | */ |
71 | if (dma->is_compatible) | 71 | if (dma->is_compatible) |
72 | compatible = dma->is_compatible(musb_ep->dma, | 72 | compatible = dma->is_compatible(musb_ep->dma, |
73 | musb_ep->packet_sz, request->request.buf, | 73 | musb_ep->packet_sz, request->request.buf, |
74 | request->request.length); | 74 | request->request.length); |
75 | if (!compatible) | 75 | if (!compatible) |
76 | return; | 76 | return; |
77 | 77 | ||
78 | if (request->request.dma == DMA_ADDR_INVALID) { | 78 | if (request->request.dma == DMA_ADDR_INVALID) { |
79 | dma_addr_t dma_addr; | 79 | dma_addr_t dma_addr; |
80 | int ret; | 80 | int ret; |
81 | 81 | ||
82 | dma_addr = dma_map_single( | 82 | dma_addr = dma_map_single( |
83 | musb->controller, | 83 | musb->controller, |
84 | request->request.buf, | 84 | request->request.buf, |
85 | request->request.length, | 85 | request->request.length, |
86 | request->tx | 86 | request->tx |
87 | ? DMA_TO_DEVICE | 87 | ? DMA_TO_DEVICE |
88 | : DMA_FROM_DEVICE); | 88 | : DMA_FROM_DEVICE); |
89 | ret = dma_mapping_error(musb->controller, dma_addr); | 89 | ret = dma_mapping_error(musb->controller, dma_addr); |
90 | if (ret) | 90 | if (ret) |
91 | return; | 91 | return; |
92 | 92 | ||
93 | request->request.dma = dma_addr; | 93 | request->request.dma = dma_addr; |
94 | request->map_state = MUSB_MAPPED; | 94 | request->map_state = MUSB_MAPPED; |
95 | } else { | 95 | } else { |
96 | dma_sync_single_for_device(musb->controller, | 96 | dma_sync_single_for_device(musb->controller, |
97 | request->request.dma, | 97 | request->request.dma, |
98 | request->request.length, | 98 | request->request.length, |
99 | request->tx | 99 | request->tx |
100 | ? DMA_TO_DEVICE | 100 | ? DMA_TO_DEVICE |
101 | : DMA_FROM_DEVICE); | 101 | : DMA_FROM_DEVICE); |
102 | request->map_state = PRE_MAPPED; | 102 | request->map_state = PRE_MAPPED; |
103 | } | 103 | } |
104 | } | 104 | } |
105 | 105 | ||
106 | /* Unmap the buffer from dma and maps it back to cpu */ | 106 | /* Unmap the buffer from dma and maps it back to cpu */ |
107 | static inline void unmap_dma_buffer(struct musb_request *request, | 107 | static inline void unmap_dma_buffer(struct musb_request *request, |
108 | struct musb *musb) | 108 | struct musb *musb) |
109 | { | 109 | { |
110 | struct musb_ep *musb_ep = request->ep; | 110 | struct musb_ep *musb_ep = request->ep; |
111 | 111 | ||
112 | if (!is_buffer_mapped(request) || !musb_ep->dma) | 112 | if (!is_buffer_mapped(request) || !musb_ep->dma) |
113 | return; | 113 | return; |
114 | 114 | ||
115 | if (request->request.dma == DMA_ADDR_INVALID) { | 115 | if (request->request.dma == DMA_ADDR_INVALID) { |
116 | dev_vdbg(musb->controller, | 116 | dev_vdbg(musb->controller, |
117 | "not unmapping a never mapped buffer\n"); | 117 | "not unmapping a never mapped buffer\n"); |
118 | return; | 118 | return; |
119 | } | 119 | } |
120 | if (request->map_state == MUSB_MAPPED) { | 120 | if (request->map_state == MUSB_MAPPED) { |
121 | dma_unmap_single(musb->controller, | 121 | dma_unmap_single(musb->controller, |
122 | request->request.dma, | 122 | request->request.dma, |
123 | request->request.length, | 123 | request->request.length, |
124 | request->tx | 124 | request->tx |
125 | ? DMA_TO_DEVICE | 125 | ? DMA_TO_DEVICE |
126 | : DMA_FROM_DEVICE); | 126 | : DMA_FROM_DEVICE); |
127 | request->request.dma = DMA_ADDR_INVALID; | 127 | request->request.dma = DMA_ADDR_INVALID; |
128 | } else { /* PRE_MAPPED */ | 128 | } else { /* PRE_MAPPED */ |
129 | dma_sync_single_for_cpu(musb->controller, | 129 | dma_sync_single_for_cpu(musb->controller, |
130 | request->request.dma, | 130 | request->request.dma, |
131 | request->request.length, | 131 | request->request.length, |
132 | request->tx | 132 | request->tx |
133 | ? DMA_TO_DEVICE | 133 | ? DMA_TO_DEVICE |
134 | : DMA_FROM_DEVICE); | 134 | : DMA_FROM_DEVICE); |
135 | } | 135 | } |
136 | request->map_state = UN_MAPPED; | 136 | request->map_state = UN_MAPPED; |
137 | } | 137 | } |
138 | 138 | ||
139 | /* | 139 | /* |
140 | * Immediately complete a request. | 140 | * Immediately complete a request. |
141 | * | 141 | * |
142 | * @param request the request to complete | 142 | * @param request the request to complete |
143 | * @param status the status to complete the request with | 143 | * @param status the status to complete the request with |
144 | * Context: controller locked, IRQs blocked. | 144 | * Context: controller locked, IRQs blocked. |
145 | */ | 145 | */ |
146 | void musb_g_giveback( | 146 | void musb_g_giveback( |
147 | struct musb_ep *ep, | 147 | struct musb_ep *ep, |
148 | struct usb_request *request, | 148 | struct usb_request *request, |
149 | int status) | 149 | int status) |
150 | __releases(ep->musb->lock) | 150 | __releases(ep->musb->lock) |
151 | __acquires(ep->musb->lock) | 151 | __acquires(ep->musb->lock) |
152 | { | 152 | { |
153 | struct musb_request *req; | 153 | struct musb_request *req; |
154 | struct musb *musb; | 154 | struct musb *musb; |
155 | int busy = ep->busy; | 155 | int busy = ep->busy; |
156 | 156 | ||
157 | req = to_musb_request(request); | 157 | req = to_musb_request(request); |
158 | 158 | ||
159 | list_del(&req->list); | 159 | list_del(&req->list); |
160 | if (req->request.status == -EINPROGRESS) | 160 | if (req->request.status == -EINPROGRESS) |
161 | req->request.status = status; | 161 | req->request.status = status; |
162 | musb = req->musb; | 162 | musb = req->musb; |
163 | 163 | ||
164 | ep->busy = 1; | 164 | ep->busy = 1; |
165 | spin_unlock(&musb->lock); | 165 | spin_unlock(&musb->lock); |
166 | 166 | ||
167 | if (!dma_mapping_error(&musb->g.dev, request->dma)) | 167 | if (!dma_mapping_error(&musb->g.dev, request->dma)) |
168 | unmap_dma_buffer(req, musb); | 168 | unmap_dma_buffer(req, musb); |
169 | 169 | ||
170 | if (request->status == 0) | 170 | if (request->status == 0) |
171 | dev_dbg(musb->controller, "%s done request %p, %d/%d\n", | 171 | dev_dbg(musb->controller, "%s done request %p, %d/%d\n", |
172 | ep->end_point.name, request, | 172 | ep->end_point.name, request, |
173 | req->request.actual, req->request.length); | 173 | req->request.actual, req->request.length); |
174 | else | 174 | else |
175 | dev_dbg(musb->controller, "%s request %p, %d/%d fault %d\n", | 175 | dev_dbg(musb->controller, "%s request %p, %d/%d fault %d\n", |
176 | ep->end_point.name, request, | 176 | ep->end_point.name, request, |
177 | req->request.actual, req->request.length, | 177 | req->request.actual, req->request.length, |
178 | request->status); | 178 | request->status); |
179 | req->request.complete(&req->ep->end_point, &req->request); | 179 | req->request.complete(&req->ep->end_point, &req->request); |
180 | spin_lock(&musb->lock); | 180 | spin_lock(&musb->lock); |
181 | ep->busy = busy; | 181 | ep->busy = busy; |
182 | } | 182 | } |
183 | 183 | ||
184 | /* ----------------------------------------------------------------------- */ | 184 | /* ----------------------------------------------------------------------- */ |
185 | 185 | ||
186 | /* | 186 | /* |
187 | * Abort requests queued to an endpoint using the status. Synchronous. | 187 | * Abort requests queued to an endpoint using the status. Synchronous. |
188 | * caller locked controller and blocked irqs, and selected this ep. | 188 | * caller locked controller and blocked irqs, and selected this ep. |
189 | */ | 189 | */ |
190 | static void nuke(struct musb_ep *ep, const int status) | 190 | static void nuke(struct musb_ep *ep, const int status) |
191 | { | 191 | { |
192 | struct musb *musb = ep->musb; | 192 | struct musb *musb = ep->musb; |
193 | struct musb_request *req = NULL; | 193 | struct musb_request *req = NULL; |
194 | void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs; | 194 | void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs; |
195 | 195 | ||
196 | ep->busy = 1; | 196 | ep->busy = 1; |
197 | 197 | ||
198 | if (is_dma_capable() && ep->dma) { | 198 | if (is_dma_capable() && ep->dma) { |
199 | struct dma_controller *c = ep->musb->dma_controller; | 199 | struct dma_controller *c = ep->musb->dma_controller; |
200 | int value; | 200 | int value; |
201 | 201 | ||
202 | if (ep->is_in) { | 202 | if (ep->is_in) { |
203 | /* | 203 | /* |
204 | * The programming guide says that we must not clear | 204 | * The programming guide says that we must not clear |
205 | * the DMAMODE bit before DMAENAB, so we only | 205 | * the DMAMODE bit before DMAENAB, so we only |
206 | * clear it in the second write... | 206 | * clear it in the second write... |
207 | */ | 207 | */ |
208 | musb_writew(epio, MUSB_TXCSR, | 208 | musb_writew(epio, MUSB_TXCSR, |
209 | MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO); | 209 | MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO); |
210 | musb_writew(epio, MUSB_TXCSR, | 210 | musb_writew(epio, MUSB_TXCSR, |
211 | 0 | MUSB_TXCSR_FLUSHFIFO); | 211 | 0 | MUSB_TXCSR_FLUSHFIFO); |
212 | } else { | 212 | } else { |
213 | musb_writew(epio, MUSB_RXCSR, | 213 | musb_writew(epio, MUSB_RXCSR, |
214 | 0 | MUSB_RXCSR_FLUSHFIFO); | 214 | 0 | MUSB_RXCSR_FLUSHFIFO); |
215 | musb_writew(epio, MUSB_RXCSR, | 215 | musb_writew(epio, MUSB_RXCSR, |
216 | 0 | MUSB_RXCSR_FLUSHFIFO); | 216 | 0 | MUSB_RXCSR_FLUSHFIFO); |
217 | } | 217 | } |
218 | 218 | ||
219 | value = c->channel_abort(ep->dma); | 219 | value = c->channel_abort(ep->dma); |
220 | dev_dbg(musb->controller, "%s: abort DMA --> %d\n", | 220 | dev_dbg(musb->controller, "%s: abort DMA --> %d\n", |
221 | ep->name, value); | 221 | ep->name, value); |
222 | c->channel_release(ep->dma); | 222 | c->channel_release(ep->dma); |
223 | ep->dma = NULL; | 223 | ep->dma = NULL; |
224 | } | 224 | } |
225 | 225 | ||
226 | while (!list_empty(&ep->req_list)) { | 226 | while (!list_empty(&ep->req_list)) { |
227 | req = list_first_entry(&ep->req_list, struct musb_request, list); | 227 | req = list_first_entry(&ep->req_list, struct musb_request, list); |
228 | musb_g_giveback(ep, &req->request, status); | 228 | musb_g_giveback(ep, &req->request, status); |
229 | } | 229 | } |
230 | } | 230 | } |
231 | 231 | ||
232 | /* ----------------------------------------------------------------------- */ | 232 | /* ----------------------------------------------------------------------- */ |
233 | 233 | ||
234 | /* Data transfers - pure PIO, pure DMA, or mixed mode */ | 234 | /* Data transfers - pure PIO, pure DMA, or mixed mode */ |
235 | 235 | ||
236 | /* | 236 | /* |
237 | * This assumes the separate CPPI engine is responding to DMA requests | 237 | * This assumes the separate CPPI engine is responding to DMA requests |
238 | * from the usb core ... sequenced a bit differently from mentor dma. | 238 | * from the usb core ... sequenced a bit differently from mentor dma. |
239 | */ | 239 | */ |
240 | 240 | ||
241 | static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep) | 241 | static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep) |
242 | { | 242 | { |
243 | if (can_bulk_split(musb, ep->type)) | 243 | if (can_bulk_split(musb, ep->type)) |
244 | return ep->hw_ep->max_packet_sz_tx; | 244 | return ep->hw_ep->max_packet_sz_tx; |
245 | else | 245 | else |
246 | return ep->packet_sz; | 246 | return ep->packet_sz; |
247 | } | 247 | } |
248 | 248 | ||
249 | /* | 249 | /* |
250 | * An endpoint is transmitting data. This can be called either from | 250 | * An endpoint is transmitting data. This can be called either from |
251 | * the IRQ routine or from ep.queue() to kickstart a request on an | 251 | * the IRQ routine or from ep.queue() to kickstart a request on an |
252 | * endpoint. | 252 | * endpoint. |
253 | * | 253 | * |
254 | * Context: controller locked, IRQs blocked, endpoint selected | 254 | * Context: controller locked, IRQs blocked, endpoint selected |
255 | */ | 255 | */ |
256 | static void txstate(struct musb *musb, struct musb_request *req) | 256 | static void txstate(struct musb *musb, struct musb_request *req) |
257 | { | 257 | { |
258 | u8 epnum = req->epnum; | 258 | u8 epnum = req->epnum; |
259 | struct musb_ep *musb_ep; | 259 | struct musb_ep *musb_ep; |
260 | void __iomem *epio = musb->endpoints[epnum].regs; | 260 | void __iomem *epio = musb->endpoints[epnum].regs; |
261 | struct usb_request *request; | 261 | struct usb_request *request; |
262 | u16 fifo_count = 0, csr; | 262 | u16 fifo_count = 0, csr; |
263 | int use_dma = 0; | 263 | int use_dma = 0; |
264 | 264 | ||
265 | musb_ep = req->ep; | 265 | musb_ep = req->ep; |
266 | 266 | ||
267 | /* Check if EP is disabled */ | 267 | /* Check if EP is disabled */ |
268 | if (!musb_ep->desc) { | 268 | if (!musb_ep->desc) { |
269 | dev_dbg(musb->controller, "ep:%s disabled - ignore request\n", | 269 | dev_dbg(musb->controller, "ep:%s disabled - ignore request\n", |
270 | musb_ep->end_point.name); | 270 | musb_ep->end_point.name); |
271 | return; | 271 | return; |
272 | } | 272 | } |
273 | 273 | ||
274 | /* we shouldn't get here while DMA is active ... but we do ... */ | 274 | /* we shouldn't get here while DMA is active ... but we do ... */ |
275 | if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { | 275 | if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { |
276 | dev_dbg(musb->controller, "dma pending...\n"); | 276 | dev_dbg(musb->controller, "dma pending...\n"); |
277 | return; | 277 | return; |
278 | } | 278 | } |
279 | 279 | ||
280 | /* read TXCSR before */ | 280 | /* read TXCSR before */ |
281 | csr = musb_readw(epio, MUSB_TXCSR); | 281 | csr = musb_readw(epio, MUSB_TXCSR); |
282 | 282 | ||
283 | request = &req->request; | 283 | request = &req->request; |
284 | fifo_count = min(max_ep_writesize(musb, musb_ep), | 284 | fifo_count = min(max_ep_writesize(musb, musb_ep), |
285 | (int)(request->length - request->actual)); | 285 | (int)(request->length - request->actual)); |
286 | 286 | ||
287 | if (csr & MUSB_TXCSR_TXPKTRDY) { | 287 | if (csr & MUSB_TXCSR_TXPKTRDY) { |
288 | dev_dbg(musb->controller, "%s old packet still ready , txcsr %03x\n", | 288 | dev_dbg(musb->controller, "%s old packet still ready , txcsr %03x\n", |
289 | musb_ep->end_point.name, csr); | 289 | musb_ep->end_point.name, csr); |
290 | return; | 290 | return; |
291 | } | 291 | } |
292 | 292 | ||
293 | if (csr & MUSB_TXCSR_P_SENDSTALL) { | 293 | if (csr & MUSB_TXCSR_P_SENDSTALL) { |
294 | dev_dbg(musb->controller, "%s stalling, txcsr %03x\n", | 294 | dev_dbg(musb->controller, "%s stalling, txcsr %03x\n", |
295 | musb_ep->end_point.name, csr); | 295 | musb_ep->end_point.name, csr); |
296 | return; | 296 | return; |
297 | } | 297 | } |
298 | 298 | ||
299 | dev_dbg(musb->controller, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n", | 299 | dev_dbg(musb->controller, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n", |
300 | epnum, musb_ep->packet_sz, fifo_count, | 300 | epnum, musb_ep->packet_sz, fifo_count, |
301 | csr); | 301 | csr); |
302 | 302 | ||
303 | #ifndef CONFIG_MUSB_PIO_ONLY | 303 | #ifndef CONFIG_MUSB_PIO_ONLY |
304 | if (is_buffer_mapped(req)) { | 304 | if (is_buffer_mapped(req)) { |
305 | struct dma_controller *c = musb->dma_controller; | 305 | struct dma_controller *c = musb->dma_controller; |
306 | size_t request_size; | 306 | size_t request_size; |
307 | 307 | ||
308 | /* setup DMA, then program endpoint CSR */ | 308 | /* setup DMA, then program endpoint CSR */ |
309 | request_size = min_t(size_t, request->length - request->actual, | 309 | request_size = min_t(size_t, request->length - request->actual, |
310 | musb_ep->dma->max_len); | 310 | musb_ep->dma->max_len); |
311 | 311 | ||
312 | use_dma = (request->dma != DMA_ADDR_INVALID && request_size); | 312 | use_dma = (request->dma != DMA_ADDR_INVALID && request_size); |
313 | 313 | ||
314 | /* MUSB_TXCSR_P_ISO is still set correctly */ | 314 | /* MUSB_TXCSR_P_ISO is still set correctly */ |
315 | 315 | ||
316 | #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) | 316 | #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) |
317 | { | 317 | { |
318 | if (request_size < musb_ep->packet_sz) | 318 | if (request_size < musb_ep->packet_sz) |
319 | musb_ep->dma->desired_mode = 0; | 319 | musb_ep->dma->desired_mode = 0; |
320 | else | 320 | else |
321 | musb_ep->dma->desired_mode = 1; | 321 | musb_ep->dma->desired_mode = 1; |
322 | 322 | ||
323 | use_dma = use_dma && c->channel_program( | 323 | use_dma = use_dma && c->channel_program( |
324 | musb_ep->dma, musb_ep->packet_sz, | 324 | musb_ep->dma, musb_ep->packet_sz, |
325 | musb_ep->dma->desired_mode, | 325 | musb_ep->dma->desired_mode, |
326 | request->dma + request->actual, request_size); | 326 | request->dma + request->actual, request_size); |
327 | if (use_dma) { | 327 | if (use_dma) { |
328 | if (musb_ep->dma->desired_mode == 0) { | 328 | if (musb_ep->dma->desired_mode == 0) { |
329 | /* | 329 | /* |
330 | * We must not clear the DMAMODE bit | 330 | * We must not clear the DMAMODE bit |
331 | * before the DMAENAB bit -- and the | 331 | * before the DMAENAB bit -- and the |
332 | * latter doesn't always get cleared | 332 | * latter doesn't always get cleared |
333 | * before we get here... | 333 | * before we get here... |
334 | */ | 334 | */ |
335 | csr &= ~(MUSB_TXCSR_AUTOSET | 335 | csr &= ~(MUSB_TXCSR_AUTOSET |
336 | | MUSB_TXCSR_DMAENAB); | 336 | | MUSB_TXCSR_DMAENAB); |
337 | musb_writew(epio, MUSB_TXCSR, csr | 337 | musb_writew(epio, MUSB_TXCSR, csr |
338 | | MUSB_TXCSR_P_WZC_BITS); | 338 | | MUSB_TXCSR_P_WZC_BITS); |
339 | csr &= ~MUSB_TXCSR_DMAMODE; | 339 | csr &= ~MUSB_TXCSR_DMAMODE; |
340 | csr |= (MUSB_TXCSR_DMAENAB | | 340 | csr |= (MUSB_TXCSR_DMAENAB | |
341 | MUSB_TXCSR_MODE); | 341 | MUSB_TXCSR_MODE); |
342 | /* against programming guide */ | 342 | /* against programming guide */ |
343 | } else { | 343 | } else { |
344 | csr |= (MUSB_TXCSR_DMAENAB | 344 | csr |= (MUSB_TXCSR_DMAENAB |
345 | | MUSB_TXCSR_DMAMODE | 345 | | MUSB_TXCSR_DMAMODE |
346 | | MUSB_TXCSR_MODE); | 346 | | MUSB_TXCSR_MODE); |
347 | /* | 347 | /* |
348 | * Enable Autoset according to table | 348 | * Enable Autoset according to table |
349 | * below | 349 | * below |
350 | * bulk_split hb_mult Autoset_Enable | 350 | * bulk_split hb_mult Autoset_Enable |
351 | * 0 0 Yes(Normal) | 351 | * 0 0 Yes(Normal) |
352 | * 0 >0 No(High BW ISO) | 352 | * 0 >0 No(High BW ISO) |
353 | * 1 0 Yes(HS bulk) | 353 | * 1 0 Yes(HS bulk) |
354 | * 1 >0 Yes(FS bulk) | 354 | * 1 >0 Yes(FS bulk) |
355 | */ | 355 | */ |
356 | if (!musb_ep->hb_mult || | 356 | if (!musb_ep->hb_mult || |
357 | (musb_ep->hb_mult && | 357 | (musb_ep->hb_mult && |
358 | can_bulk_split(musb, | 358 | can_bulk_split(musb, |
359 | musb_ep->type))) | 359 | musb_ep->type))) |
360 | csr |= MUSB_TXCSR_AUTOSET; | 360 | csr |= MUSB_TXCSR_AUTOSET; |
361 | } | 361 | } |
362 | csr &= ~MUSB_TXCSR_P_UNDERRUN; | 362 | csr &= ~MUSB_TXCSR_P_UNDERRUN; |
363 | 363 | ||
364 | musb_writew(epio, MUSB_TXCSR, csr); | 364 | musb_writew(epio, MUSB_TXCSR, csr); |
365 | } | 365 | } |
366 | } | 366 | } |
367 | 367 | ||
368 | #endif | 368 | #endif |
369 | if (is_cppi_enabled()) { | 369 | if (is_cppi_enabled()) { |
370 | /* program endpoint CSR first, then setup DMA */ | 370 | /* program endpoint CSR first, then setup DMA */ |
371 | csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY); | 371 | csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY); |
372 | csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE | | 372 | csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE | |
373 | MUSB_TXCSR_MODE; | 373 | MUSB_TXCSR_MODE; |
374 | musb_writew(epio, MUSB_TXCSR, (MUSB_TXCSR_P_WZC_BITS & | 374 | musb_writew(epio, MUSB_TXCSR, (MUSB_TXCSR_P_WZC_BITS & |
375 | ~MUSB_TXCSR_P_UNDERRUN) | csr); | 375 | ~MUSB_TXCSR_P_UNDERRUN) | csr); |
376 | 376 | ||
377 | /* ensure writebuffer is empty */ | 377 | /* ensure writebuffer is empty */ |
378 | csr = musb_readw(epio, MUSB_TXCSR); | 378 | csr = musb_readw(epio, MUSB_TXCSR); |
379 | 379 | ||
380 | /* | 380 | /* |
381 | * NOTE host side sets DMAENAB later than this; both are | 381 | * NOTE host side sets DMAENAB later than this; both are |
382 | * OK since the transfer dma glue (between CPPI and | 382 | * OK since the transfer dma glue (between CPPI and |
383 | * Mentor fifos) just tells CPPI it could start. Data | 383 | * Mentor fifos) just tells CPPI it could start. Data |
384 | * only moves to the USB TX fifo when both fifos are | 384 | * only moves to the USB TX fifo when both fifos are |
385 | * ready. | 385 | * ready. |
386 | */ | 386 | */ |
387 | /* | 387 | /* |
388 | * "mode" is irrelevant here; handle terminating ZLPs | 388 | * "mode" is irrelevant here; handle terminating ZLPs |
389 | * like PIO does, since the hardware RNDIS mode seems | 389 | * like PIO does, since the hardware RNDIS mode seems |
390 | * unreliable except for the | 390 | * unreliable except for the |
391 | * last-packet-is-already-short case. | 391 | * last-packet-is-already-short case. |
392 | */ | 392 | */ |
393 | use_dma = use_dma && c->channel_program( | 393 | use_dma = use_dma && c->channel_program( |
394 | musb_ep->dma, musb_ep->packet_sz, | 394 | musb_ep->dma, musb_ep->packet_sz, |
395 | 0, | 395 | 0, |
396 | request->dma + request->actual, | 396 | request->dma + request->actual, |
397 | request_size); | 397 | request_size); |
398 | if (!use_dma) { | 398 | if (!use_dma) { |
399 | c->channel_release(musb_ep->dma); | 399 | c->channel_release(musb_ep->dma); |
400 | musb_ep->dma = NULL; | 400 | musb_ep->dma = NULL; |
401 | csr &= ~MUSB_TXCSR_DMAENAB; | 401 | csr &= ~MUSB_TXCSR_DMAENAB; |
402 | musb_writew(epio, MUSB_TXCSR, csr); | 402 | musb_writew(epio, MUSB_TXCSR, csr); |
403 | /* invariant: prequest->buf is non-null */ | 403 | /* invariant: prequest->buf is non-null */ |
404 | } | 404 | } |
405 | } else if (tusb_dma_omap()) | 405 | } else if (tusb_dma_omap()) |
406 | use_dma = use_dma && c->channel_program( | 406 | use_dma = use_dma && c->channel_program( |
407 | musb_ep->dma, musb_ep->packet_sz, | 407 | musb_ep->dma, musb_ep->packet_sz, |
408 | request->zero, | 408 | request->zero, |
409 | request->dma + request->actual, | 409 | request->dma + request->actual, |
410 | request_size); | 410 | request_size); |
411 | } | 411 | } |
412 | #endif | 412 | #endif |
413 | 413 | ||
414 | if (!use_dma) { | 414 | if (!use_dma) { |
415 | /* | 415 | /* |
416 | * Unmap the dma buffer back to cpu if dma channel | 416 | * Unmap the dma buffer back to cpu if dma channel |
417 | * programming fails | 417 | * programming fails |
418 | */ | 418 | */ |
419 | unmap_dma_buffer(req, musb); | 419 | unmap_dma_buffer(req, musb); |
420 | 420 | ||
421 | musb_write_fifo(musb_ep->hw_ep, fifo_count, | 421 | musb_write_fifo(musb_ep->hw_ep, fifo_count, |
422 | (u8 *) (request->buf + request->actual)); | 422 | (u8 *) (request->buf + request->actual)); |
423 | request->actual += fifo_count; | 423 | request->actual += fifo_count; |
424 | csr |= MUSB_TXCSR_TXPKTRDY; | 424 | csr |= MUSB_TXCSR_TXPKTRDY; |
425 | csr &= ~MUSB_TXCSR_P_UNDERRUN; | 425 | csr &= ~MUSB_TXCSR_P_UNDERRUN; |
426 | musb_writew(epio, MUSB_TXCSR, csr); | 426 | musb_writew(epio, MUSB_TXCSR, csr); |
427 | } | 427 | } |
428 | 428 | ||
429 | /* host may already have the data when this message shows... */ | 429 | /* host may already have the data when this message shows... */ |
430 | dev_dbg(musb->controller, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n", | 430 | dev_dbg(musb->controller, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n", |
431 | musb_ep->end_point.name, use_dma ? "dma" : "pio", | 431 | musb_ep->end_point.name, use_dma ? "dma" : "pio", |
432 | request->actual, request->length, | 432 | request->actual, request->length, |
433 | musb_readw(epio, MUSB_TXCSR), | 433 | musb_readw(epio, MUSB_TXCSR), |
434 | fifo_count, | 434 | fifo_count, |
435 | musb_readw(epio, MUSB_TXMAXP)); | 435 | musb_readw(epio, MUSB_TXMAXP)); |
436 | } | 436 | } |
437 | 437 | ||
438 | /* | 438 | /* |
439 | * FIFO state update (e.g. data ready). | 439 | * FIFO state update (e.g. data ready). |
440 | * Called from IRQ, with controller locked. | 440 | * Called from IRQ, with controller locked. |
441 | */ | 441 | */ |
442 | void musb_g_tx(struct musb *musb, u8 epnum) | 442 | void musb_g_tx(struct musb *musb, u8 epnum) |
443 | { | 443 | { |
444 | u16 csr; | 444 | u16 csr; |
445 | struct musb_request *req; | 445 | struct musb_request *req; |
446 | struct usb_request *request; | 446 | struct usb_request *request; |
447 | u8 __iomem *mbase = musb->mregs; | 447 | u8 __iomem *mbase = musb->mregs; |
448 | struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in; | 448 | struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in; |
449 | void __iomem *epio = musb->endpoints[epnum].regs; | 449 | void __iomem *epio = musb->endpoints[epnum].regs; |
450 | struct dma_channel *dma; | 450 | struct dma_channel *dma; |
451 | 451 | ||
452 | musb_ep_select(mbase, epnum); | 452 | musb_ep_select(mbase, epnum); |
453 | req = next_request(musb_ep); | 453 | req = next_request(musb_ep); |
454 | request = &req->request; | 454 | request = &req->request; |
455 | 455 | ||
456 | csr = musb_readw(epio, MUSB_TXCSR); | 456 | csr = musb_readw(epio, MUSB_TXCSR); |
457 | dev_dbg(musb->controller, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr); | 457 | dev_dbg(musb->controller, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr); |
458 | 458 | ||
459 | dma = is_dma_capable() ? musb_ep->dma : NULL; | 459 | dma = is_dma_capable() ? musb_ep->dma : NULL; |
460 | 460 | ||
461 | /* | 461 | /* |
462 | * REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX | 462 | * REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX |
463 | * probably rates reporting as a host error. | 463 | * probably rates reporting as a host error. |
464 | */ | 464 | */ |
465 | if (csr & MUSB_TXCSR_P_SENTSTALL) { | 465 | if (csr & MUSB_TXCSR_P_SENTSTALL) { |
466 | csr |= MUSB_TXCSR_P_WZC_BITS; | 466 | csr |= MUSB_TXCSR_P_WZC_BITS; |
467 | csr &= ~MUSB_TXCSR_P_SENTSTALL; | 467 | csr &= ~MUSB_TXCSR_P_SENTSTALL; |
468 | musb_writew(epio, MUSB_TXCSR, csr); | 468 | musb_writew(epio, MUSB_TXCSR, csr); |
469 | return; | 469 | return; |
470 | } | 470 | } |
471 | 471 | ||
472 | if (csr & MUSB_TXCSR_P_UNDERRUN) { | 472 | if (csr & MUSB_TXCSR_P_UNDERRUN) { |
473 | /* We NAKed, no big deal... little reason to care. */ | 473 | /* We NAKed, no big deal... little reason to care. */ |
474 | csr |= MUSB_TXCSR_P_WZC_BITS; | 474 | csr |= MUSB_TXCSR_P_WZC_BITS; |
475 | csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY); | 475 | csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY); |
476 | musb_writew(epio, MUSB_TXCSR, csr); | 476 | musb_writew(epio, MUSB_TXCSR, csr); |
477 | dev_vdbg(musb->controller, "underrun on ep%d, req %p\n", | 477 | dev_vdbg(musb->controller, "underrun on ep%d, req %p\n", |
478 | epnum, request); | 478 | epnum, request); |
479 | } | 479 | } |
480 | 480 | ||
481 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { | 481 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { |
482 | /* | 482 | /* |
483 | * SHOULD NOT HAPPEN... has with CPPI though, after | 483 | * SHOULD NOT HAPPEN... has with CPPI though, after |
484 | * changing SENDSTALL (and other cases); harmless? | 484 | * changing SENDSTALL (and other cases); harmless? |
485 | */ | 485 | */ |
486 | dev_dbg(musb->controller, "%s dma still busy?\n", musb_ep->end_point.name); | 486 | dev_dbg(musb->controller, "%s dma still busy?\n", musb_ep->end_point.name); |
487 | return; | 487 | return; |
488 | } | 488 | } |
489 | 489 | ||
490 | if (request) { | 490 | if (request) { |
491 | u8 is_dma = 0; | 491 | u8 is_dma = 0; |
492 | 492 | ||
493 | if (dma && (csr & MUSB_TXCSR_DMAENAB)) { | 493 | if (dma && (csr & MUSB_TXCSR_DMAENAB)) { |
494 | is_dma = 1; | 494 | is_dma = 1; |
495 | csr |= MUSB_TXCSR_P_WZC_BITS; | 495 | csr |= MUSB_TXCSR_P_WZC_BITS; |
496 | csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN | | 496 | csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN | |
497 | MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET); | 497 | MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET); |
498 | musb_writew(epio, MUSB_TXCSR, csr); | 498 | musb_writew(epio, MUSB_TXCSR, csr); |
499 | /* Ensure writebuffer is empty. */ | 499 | /* Ensure writebuffer is empty. */ |
500 | csr = musb_readw(epio, MUSB_TXCSR); | 500 | csr = musb_readw(epio, MUSB_TXCSR); |
501 | request->actual += musb_ep->dma->actual_len; | 501 | request->actual += musb_ep->dma->actual_len; |
502 | dev_dbg(musb->controller, "TXCSR%d %04x, DMA off, len %zu, req %p\n", | 502 | dev_dbg(musb->controller, "TXCSR%d %04x, DMA off, len %zu, req %p\n", |
503 | epnum, csr, musb_ep->dma->actual_len, request); | 503 | epnum, csr, musb_ep->dma->actual_len, request); |
504 | } | 504 | } |
505 | 505 | ||
506 | /* | 506 | /* |
507 | * First, maybe a terminating short packet. Some DMA | 507 | * First, maybe a terminating short packet. Some DMA |
508 | * engines might handle this by themselves. | 508 | * engines might handle this by themselves. |
509 | */ | 509 | */ |
510 | if ((request->zero && request->length | 510 | if ((request->zero && request->length |
511 | && (request->length % musb_ep->packet_sz == 0) | 511 | && (request->length % musb_ep->packet_sz == 0) |
512 | && (request->actual == request->length)) | 512 | && (request->actual == request->length)) |
513 | #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) | 513 | #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) |
514 | || (is_dma && (!dma->desired_mode || | 514 | || (is_dma && (!dma->desired_mode || |
515 | (request->actual & | 515 | (request->actual & |
516 | (musb_ep->packet_sz - 1)))) | 516 | (musb_ep->packet_sz - 1)))) |
517 | #endif | 517 | #endif |
518 | ) { | 518 | ) { |
519 | /* | 519 | /* |
520 | * On DMA completion, FIFO may not be | 520 | * On DMA completion, FIFO may not be |
521 | * available yet... | 521 | * available yet... |
522 | */ | 522 | */ |
523 | if (csr & MUSB_TXCSR_TXPKTRDY) | 523 | if (csr & MUSB_TXCSR_TXPKTRDY) |
524 | return; | 524 | return; |
525 | 525 | ||
526 | dev_dbg(musb->controller, "sending zero pkt\n"); | 526 | dev_dbg(musb->controller, "sending zero pkt\n"); |
527 | musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE | 527 | musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE |
528 | | MUSB_TXCSR_TXPKTRDY); | 528 | | MUSB_TXCSR_TXPKTRDY); |
529 | request->zero = 0; | 529 | request->zero = 0; |
530 | } | 530 | } |
531 | 531 | ||
532 | if (request->actual == request->length) { | 532 | if (request->actual == request->length) { |
533 | musb_g_giveback(musb_ep, request, 0); | 533 | musb_g_giveback(musb_ep, request, 0); |
534 | /* | 534 | /* |
535 | * In the giveback function the MUSB lock is | 535 | * In the giveback function the MUSB lock is |
536 | * released and acquired after sometime. During | 536 | * released and acquired after sometime. During |
537 | * this time period the INDEX register could get | 537 | * this time period the INDEX register could get |
538 | * changed by the gadget_queue function especially | 538 | * changed by the gadget_queue function especially |
539 | * on SMP systems. Reselect the INDEX to be sure | 539 | * on SMP systems. Reselect the INDEX to be sure |
540 | * we are reading/modifying the right registers | 540 | * we are reading/modifying the right registers |
541 | */ | 541 | */ |
542 | musb_ep_select(mbase, epnum); | 542 | musb_ep_select(mbase, epnum); |
543 | req = musb_ep->desc ? next_request(musb_ep) : NULL; | 543 | req = musb_ep->desc ? next_request(musb_ep) : NULL; |
544 | if (!req) { | 544 | if (!req) { |
545 | dev_dbg(musb->controller, "%s idle now\n", | 545 | dev_dbg(musb->controller, "%s idle now\n", |
546 | musb_ep->end_point.name); | 546 | musb_ep->end_point.name); |
547 | return; | 547 | return; |
548 | } | 548 | } |
549 | } | 549 | } |
550 | 550 | ||
551 | txstate(musb, req); | 551 | txstate(musb, req); |
552 | } | 552 | } |
553 | } | 553 | } |
554 | 554 | ||
555 | /* ------------------------------------------------------------ */ | 555 | /* ------------------------------------------------------------ */ |
556 | 556 | ||
557 | /* | 557 | /* |
558 | * Context: controller locked, IRQs blocked, endpoint selected | 558 | * Context: controller locked, IRQs blocked, endpoint selected |
559 | */ | 559 | */ |
560 | static void rxstate(struct musb *musb, struct musb_request *req) | 560 | static void rxstate(struct musb *musb, struct musb_request *req) |
561 | { | 561 | { |
562 | const u8 epnum = req->epnum; | 562 | const u8 epnum = req->epnum; |
563 | struct usb_request *request = &req->request; | 563 | struct usb_request *request = &req->request; |
564 | struct musb_ep *musb_ep; | 564 | struct musb_ep *musb_ep; |
565 | void __iomem *epio = musb->endpoints[epnum].regs; | 565 | void __iomem *epio = musb->endpoints[epnum].regs; |
566 | unsigned len = 0; | 566 | unsigned len = 0; |
567 | u16 fifo_count; | 567 | u16 fifo_count; |
568 | u16 csr = musb_readw(epio, MUSB_RXCSR); | 568 | u16 csr = musb_readw(epio, MUSB_RXCSR); |
569 | struct musb_hw_ep *hw_ep = &musb->endpoints[epnum]; | 569 | struct musb_hw_ep *hw_ep = &musb->endpoints[epnum]; |
570 | u8 use_mode_1; | 570 | u8 use_mode_1; |
571 | 571 | ||
572 | if (hw_ep->is_shared_fifo) | 572 | if (hw_ep->is_shared_fifo) |
573 | musb_ep = &hw_ep->ep_in; | 573 | musb_ep = &hw_ep->ep_in; |
574 | else | 574 | else |
575 | musb_ep = &hw_ep->ep_out; | 575 | musb_ep = &hw_ep->ep_out; |
576 | 576 | ||
577 | fifo_count = musb_ep->packet_sz; | 577 | fifo_count = musb_ep->packet_sz; |
578 | 578 | ||
579 | /* Check if EP is disabled */ | 579 | /* Check if EP is disabled */ |
580 | if (!musb_ep->desc) { | 580 | if (!musb_ep->desc) { |
581 | dev_dbg(musb->controller, "ep:%s disabled - ignore request\n", | 581 | dev_dbg(musb->controller, "ep:%s disabled - ignore request\n", |
582 | musb_ep->end_point.name); | 582 | musb_ep->end_point.name); |
583 | return; | 583 | return; |
584 | } | 584 | } |
585 | 585 | ||
586 | /* We shouldn't get here while DMA is active, but we do... */ | 586 | /* We shouldn't get here while DMA is active, but we do... */ |
587 | if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { | 587 | if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { |
588 | dev_dbg(musb->controller, "DMA pending...\n"); | 588 | dev_dbg(musb->controller, "DMA pending...\n"); |
589 | return; | 589 | return; |
590 | } | 590 | } |
591 | 591 | ||
592 | if (csr & MUSB_RXCSR_P_SENDSTALL) { | 592 | if (csr & MUSB_RXCSR_P_SENDSTALL) { |
593 | dev_dbg(musb->controller, "%s stalling, RXCSR %04x\n", | 593 | dev_dbg(musb->controller, "%s stalling, RXCSR %04x\n", |
594 | musb_ep->end_point.name, csr); | 594 | musb_ep->end_point.name, csr); |
595 | return; | 595 | return; |
596 | } | 596 | } |
597 | 597 | ||
598 | if (is_cppi_enabled() && is_buffer_mapped(req)) { | 598 | if (is_cppi_enabled() && is_buffer_mapped(req)) { |
599 | struct dma_controller *c = musb->dma_controller; | 599 | struct dma_controller *c = musb->dma_controller; |
600 | struct dma_channel *channel = musb_ep->dma; | 600 | struct dma_channel *channel = musb_ep->dma; |
601 | 601 | ||
602 | /* NOTE: CPPI won't actually stop advancing the DMA | 602 | /* NOTE: CPPI won't actually stop advancing the DMA |
603 | * queue after short packet transfers, so this is almost | 603 | * queue after short packet transfers, so this is almost |
604 | * always going to run as IRQ-per-packet DMA so that | 604 | * always going to run as IRQ-per-packet DMA so that |
605 | * faults will be handled correctly. | 605 | * faults will be handled correctly. |
606 | */ | 606 | */ |
607 | if (c->channel_program(channel, | 607 | if (c->channel_program(channel, |
608 | musb_ep->packet_sz, | 608 | musb_ep->packet_sz, |
609 | !request->short_not_ok, | 609 | !request->short_not_ok, |
610 | request->dma + request->actual, | 610 | request->dma + request->actual, |
611 | request->length - request->actual)) { | 611 | request->length - request->actual)) { |
612 | 612 | ||
613 | /* make sure that if an rxpkt arrived after the irq, | 613 | /* make sure that if an rxpkt arrived after the irq, |
614 | * the cppi engine will be ready to take it as soon | 614 | * the cppi engine will be ready to take it as soon |
615 | * as DMA is enabled | 615 | * as DMA is enabled |
616 | */ | 616 | */ |
617 | csr &= ~(MUSB_RXCSR_AUTOCLEAR | 617 | csr &= ~(MUSB_RXCSR_AUTOCLEAR |
618 | | MUSB_RXCSR_DMAMODE); | 618 | | MUSB_RXCSR_DMAMODE); |
619 | csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS; | 619 | csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS; |
620 | musb_writew(epio, MUSB_RXCSR, csr); | 620 | musb_writew(epio, MUSB_RXCSR, csr); |
621 | return; | 621 | return; |
622 | } | 622 | } |
623 | } | 623 | } |
624 | 624 | ||
625 | if (csr & MUSB_RXCSR_RXPKTRDY) { | 625 | if (csr & MUSB_RXCSR_RXPKTRDY) { |
626 | fifo_count = musb_readw(epio, MUSB_RXCOUNT); | 626 | fifo_count = musb_readw(epio, MUSB_RXCOUNT); |
627 | 627 | ||
628 | /* | 628 | /* |
629 | * Enable Mode 1 on RX transfers only when short_not_ok flag | 629 | * Enable Mode 1 on RX transfers only when short_not_ok flag |
630 | * is set. Currently short_not_ok flag is set only from | 630 | * is set. Currently short_not_ok flag is set only from |
631 | * file_storage and f_mass_storage drivers | 631 | * file_storage and f_mass_storage drivers |
632 | */ | 632 | */ |
633 | 633 | ||
634 | if (request->short_not_ok && fifo_count == musb_ep->packet_sz) | 634 | if (request->short_not_ok && fifo_count == musb_ep->packet_sz) |
635 | use_mode_1 = 1; | 635 | use_mode_1 = 1; |
636 | else | 636 | else |
637 | use_mode_1 = 0; | 637 | use_mode_1 = 0; |
638 | 638 | ||
639 | if (request->actual < request->length) { | 639 | if (request->actual < request->length) { |
640 | #ifdef CONFIG_USB_INVENTRA_DMA | 640 | #ifdef CONFIG_USB_INVENTRA_DMA |
641 | if (is_buffer_mapped(req)) { | 641 | if (is_buffer_mapped(req)) { |
642 | struct dma_controller *c; | 642 | struct dma_controller *c; |
643 | struct dma_channel *channel; | 643 | struct dma_channel *channel; |
644 | int use_dma = 0; | 644 | int use_dma = 0; |
645 | unsigned int transfer_size; | 645 | unsigned int transfer_size; |
646 | 646 | ||
647 | c = musb->dma_controller; | 647 | c = musb->dma_controller; |
648 | channel = musb_ep->dma; | 648 | channel = musb_ep->dma; |
649 | 649 | ||
650 | /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in | 650 | /* We use DMA Req mode 0 in rx_csr, and DMA controller operates in |
651 | * mode 0 only. So we do not get endpoint interrupts due to DMA | 651 | * mode 0 only. So we do not get endpoint interrupts due to DMA |
652 | * completion. We only get interrupts from DMA controller. | 652 | * completion. We only get interrupts from DMA controller. |
653 | * | 653 | * |
654 | * We could operate in DMA mode 1 if we knew the size of the tranfer | 654 | * We could operate in DMA mode 1 if we knew the size of the tranfer |
655 | * in advance. For mass storage class, request->length = what the host | 655 | * in advance. For mass storage class, request->length = what the host |
656 | * sends, so that'd work. But for pretty much everything else, | 656 | * sends, so that'd work. But for pretty much everything else, |
657 | * request->length is routinely more than what the host sends. For | 657 | * request->length is routinely more than what the host sends. For |
658 | * most these gadgets, end of is signified either by a short packet, | 658 | * most these gadgets, end of is signified either by a short packet, |
659 | * or filling the last byte of the buffer. (Sending extra data in | 659 | * or filling the last byte of the buffer. (Sending extra data in |
660 | * that last pckate should trigger an overflow fault.) But in mode 1, | 660 | * that last pckate should trigger an overflow fault.) But in mode 1, |
661 | * we don't get DMA completion interrupt for short packets. | 661 | * we don't get DMA completion interrupt for short packets. |
662 | * | 662 | * |
663 | * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1), | 663 | * Theoretically, we could enable DMAReq irq (MUSB_RXCSR_DMAMODE = 1), |
664 | * to get endpoint interrupt on every DMA req, but that didn't seem | 664 | * to get endpoint interrupt on every DMA req, but that didn't seem |
665 | * to work reliably. | 665 | * to work reliably. |
666 | * | 666 | * |
667 | * REVISIT an updated g_file_storage can set req->short_not_ok, which | 667 | * REVISIT an updated g_file_storage can set req->short_not_ok, which |
668 | * then becomes usable as a runtime "use mode 1" hint... | 668 | * then becomes usable as a runtime "use mode 1" hint... |
669 | */ | 669 | */ |
670 | 670 | ||
671 | /* Experimental: Mode1 works with mass storage use cases */ | 671 | /* Experimental: Mode1 works with mass storage use cases */ |
672 | if (use_mode_1) { | 672 | if (use_mode_1) { |
673 | csr |= MUSB_RXCSR_AUTOCLEAR; | 673 | csr |= MUSB_RXCSR_AUTOCLEAR; |
674 | musb_writew(epio, MUSB_RXCSR, csr); | 674 | musb_writew(epio, MUSB_RXCSR, csr); |
675 | csr |= MUSB_RXCSR_DMAENAB; | 675 | csr |= MUSB_RXCSR_DMAENAB; |
676 | musb_writew(epio, MUSB_RXCSR, csr); | 676 | musb_writew(epio, MUSB_RXCSR, csr); |
677 | 677 | ||
678 | /* | 678 | /* |
679 | * this special sequence (enabling and then | 679 | * this special sequence (enabling and then |
680 | * disabling MUSB_RXCSR_DMAMODE) is required | 680 | * disabling MUSB_RXCSR_DMAMODE) is required |
681 | * to get DMAReq to activate | 681 | * to get DMAReq to activate |
682 | */ | 682 | */ |
683 | musb_writew(epio, MUSB_RXCSR, | 683 | musb_writew(epio, MUSB_RXCSR, |
684 | csr | MUSB_RXCSR_DMAMODE); | 684 | csr | MUSB_RXCSR_DMAMODE); |
685 | musb_writew(epio, MUSB_RXCSR, csr); | 685 | musb_writew(epio, MUSB_RXCSR, csr); |
686 | 686 | ||
687 | transfer_size = min_t(unsigned int, | 687 | transfer_size = min_t(unsigned int, |
688 | request->length - | 688 | request->length - |
689 | request->actual, | 689 | request->actual, |
690 | channel->max_len); | 690 | channel->max_len); |
691 | musb_ep->dma->desired_mode = 1; | 691 | musb_ep->dma->desired_mode = 1; |
692 | } else { | 692 | } else { |
693 | if (!musb_ep->hb_mult && | 693 | if (!musb_ep->hb_mult && |
694 | musb_ep->hw_ep->rx_double_buffered) | 694 | musb_ep->hw_ep->rx_double_buffered) |
695 | csr |= MUSB_RXCSR_AUTOCLEAR; | 695 | csr |= MUSB_RXCSR_AUTOCLEAR; |
696 | csr |= MUSB_RXCSR_DMAENAB; | 696 | csr |= MUSB_RXCSR_DMAENAB; |
697 | musb_writew(epio, MUSB_RXCSR, csr); | 697 | musb_writew(epio, MUSB_RXCSR, csr); |
698 | 698 | ||
699 | transfer_size = min(request->length - request->actual, | 699 | transfer_size = min(request->length - request->actual, |
700 | (unsigned)fifo_count); | 700 | (unsigned)fifo_count); |
701 | musb_ep->dma->desired_mode = 0; | 701 | musb_ep->dma->desired_mode = 0; |
702 | } | 702 | } |
703 | 703 | ||
704 | use_dma = c->channel_program( | 704 | use_dma = c->channel_program( |
705 | channel, | 705 | channel, |
706 | musb_ep->packet_sz, | 706 | musb_ep->packet_sz, |
707 | channel->desired_mode, | 707 | channel->desired_mode, |
708 | request->dma | 708 | request->dma |
709 | + request->actual, | 709 | + request->actual, |
710 | transfer_size); | 710 | transfer_size); |
711 | 711 | ||
712 | if (use_dma) | 712 | if (use_dma) |
713 | return; | 713 | return; |
714 | } | 714 | } |
715 | #elif defined(CONFIG_USB_UX500_DMA) | 715 | #elif defined(CONFIG_USB_UX500_DMA) |
716 | if ((is_buffer_mapped(req)) && | 716 | if ((is_buffer_mapped(req)) && |
717 | (request->actual < request->length)) { | 717 | (request->actual < request->length)) { |
718 | 718 | ||
719 | struct dma_controller *c; | 719 | struct dma_controller *c; |
720 | struct dma_channel *channel; | 720 | struct dma_channel *channel; |
721 | unsigned int transfer_size = 0; | 721 | unsigned int transfer_size = 0; |
722 | 722 | ||
723 | c = musb->dma_controller; | 723 | c = musb->dma_controller; |
724 | channel = musb_ep->dma; | 724 | channel = musb_ep->dma; |
725 | 725 | ||
726 | /* In case first packet is short */ | 726 | /* In case first packet is short */ |
727 | if (fifo_count < musb_ep->packet_sz) | 727 | if (fifo_count < musb_ep->packet_sz) |
728 | transfer_size = fifo_count; | 728 | transfer_size = fifo_count; |
729 | else if (request->short_not_ok) | 729 | else if (request->short_not_ok) |
730 | transfer_size = min_t(unsigned int, | 730 | transfer_size = min_t(unsigned int, |
731 | request->length - | 731 | request->length - |
732 | request->actual, | 732 | request->actual, |
733 | channel->max_len); | 733 | channel->max_len); |
734 | else | 734 | else |
735 | transfer_size = min_t(unsigned int, | 735 | transfer_size = min_t(unsigned int, |
736 | request->length - | 736 | request->length - |
737 | request->actual, | 737 | request->actual, |
738 | (unsigned)fifo_count); | 738 | (unsigned)fifo_count); |
739 | 739 | ||
740 | csr &= ~MUSB_RXCSR_DMAMODE; | 740 | csr &= ~MUSB_RXCSR_DMAMODE; |
741 | csr |= (MUSB_RXCSR_DMAENAB | | 741 | csr |= (MUSB_RXCSR_DMAENAB | |
742 | MUSB_RXCSR_AUTOCLEAR); | 742 | MUSB_RXCSR_AUTOCLEAR); |
743 | 743 | ||
744 | musb_writew(epio, MUSB_RXCSR, csr); | 744 | musb_writew(epio, MUSB_RXCSR, csr); |
745 | 745 | ||
746 | if (transfer_size <= musb_ep->packet_sz) { | 746 | if (transfer_size <= musb_ep->packet_sz) { |
747 | musb_ep->dma->desired_mode = 0; | 747 | musb_ep->dma->desired_mode = 0; |
748 | } else { | 748 | } else { |
749 | musb_ep->dma->desired_mode = 1; | 749 | musb_ep->dma->desired_mode = 1; |
750 | /* Mode must be set after DMAENAB */ | 750 | /* Mode must be set after DMAENAB */ |
751 | csr |= MUSB_RXCSR_DMAMODE; | 751 | csr |= MUSB_RXCSR_DMAMODE; |
752 | musb_writew(epio, MUSB_RXCSR, csr); | 752 | musb_writew(epio, MUSB_RXCSR, csr); |
753 | } | 753 | } |
754 | 754 | ||
755 | if (c->channel_program(channel, | 755 | if (c->channel_program(channel, |
756 | musb_ep->packet_sz, | 756 | musb_ep->packet_sz, |
757 | channel->desired_mode, | 757 | channel->desired_mode, |
758 | request->dma | 758 | request->dma |
759 | + request->actual, | 759 | + request->actual, |
760 | transfer_size)) | 760 | transfer_size)) |
761 | 761 | ||
762 | return; | 762 | return; |
763 | } | 763 | } |
764 | #endif /* Mentor's DMA */ | 764 | #endif /* Mentor's DMA */ |
765 | 765 | ||
766 | len = request->length - request->actual; | 766 | len = request->length - request->actual; |
767 | dev_dbg(musb->controller, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n", | 767 | dev_dbg(musb->controller, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n", |
768 | musb_ep->end_point.name, | 768 | musb_ep->end_point.name, |
769 | fifo_count, len, | 769 | fifo_count, len, |
770 | musb_ep->packet_sz); | 770 | musb_ep->packet_sz); |
771 | 771 | ||
772 | fifo_count = min_t(unsigned, len, fifo_count); | 772 | fifo_count = min_t(unsigned, len, fifo_count); |
773 | 773 | ||
774 | #ifdef CONFIG_USB_TUSB_OMAP_DMA | 774 | #ifdef CONFIG_USB_TUSB_OMAP_DMA |
775 | if (tusb_dma_omap() && is_buffer_mapped(req)) { | 775 | if (tusb_dma_omap() && is_buffer_mapped(req)) { |
776 | struct dma_controller *c = musb->dma_controller; | 776 | struct dma_controller *c = musb->dma_controller; |
777 | struct dma_channel *channel = musb_ep->dma; | 777 | struct dma_channel *channel = musb_ep->dma; |
778 | u32 dma_addr = request->dma + request->actual; | 778 | u32 dma_addr = request->dma + request->actual; |
779 | int ret; | 779 | int ret; |
780 | 780 | ||
781 | ret = c->channel_program(channel, | 781 | ret = c->channel_program(channel, |
782 | musb_ep->packet_sz, | 782 | musb_ep->packet_sz, |
783 | channel->desired_mode, | 783 | channel->desired_mode, |
784 | dma_addr, | 784 | dma_addr, |
785 | fifo_count); | 785 | fifo_count); |
786 | if (ret) | 786 | if (ret) |
787 | return; | 787 | return; |
788 | } | 788 | } |
789 | #endif | 789 | #endif |
790 | /* | 790 | /* |
791 | * Unmap the dma buffer back to cpu if dma channel | 791 | * Unmap the dma buffer back to cpu if dma channel |
792 | * programming fails. This buffer is mapped if the | 792 | * programming fails. This buffer is mapped if the |
793 | * channel allocation is successful | 793 | * channel allocation is successful |
794 | */ | 794 | */ |
795 | if (is_buffer_mapped(req)) { | 795 | if (is_buffer_mapped(req)) { |
796 | unmap_dma_buffer(req, musb); | 796 | unmap_dma_buffer(req, musb); |
797 | 797 | ||
798 | /* | 798 | /* |
799 | * Clear DMAENAB and AUTOCLEAR for the | 799 | * Clear DMAENAB and AUTOCLEAR for the |
800 | * PIO mode transfer | 800 | * PIO mode transfer |
801 | */ | 801 | */ |
802 | csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR); | 802 | csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR); |
803 | musb_writew(epio, MUSB_RXCSR, csr); | 803 | musb_writew(epio, MUSB_RXCSR, csr); |
804 | } | 804 | } |
805 | 805 | ||
806 | musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *) | 806 | musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *) |
807 | (request->buf + request->actual)); | 807 | (request->buf + request->actual)); |
808 | request->actual += fifo_count; | 808 | request->actual += fifo_count; |
809 | 809 | ||
810 | /* REVISIT if we left anything in the fifo, flush | 810 | /* REVISIT if we left anything in the fifo, flush |
811 | * it and report -EOVERFLOW | 811 | * it and report -EOVERFLOW |
812 | */ | 812 | */ |
813 | 813 | ||
814 | /* ack the read! */ | 814 | /* ack the read! */ |
815 | csr |= MUSB_RXCSR_P_WZC_BITS; | 815 | csr |= MUSB_RXCSR_P_WZC_BITS; |
816 | csr &= ~MUSB_RXCSR_RXPKTRDY; | 816 | csr &= ~MUSB_RXCSR_RXPKTRDY; |
817 | musb_writew(epio, MUSB_RXCSR, csr); | 817 | musb_writew(epio, MUSB_RXCSR, csr); |
818 | } | 818 | } |
819 | } | 819 | } |
820 | 820 | ||
821 | /* reach the end or short packet detected */ | 821 | /* reach the end or short packet detected */ |
822 | if (request->actual == request->length || | 822 | if (request->actual == request->length || |
823 | fifo_count < musb_ep->packet_sz) | 823 | fifo_count < musb_ep->packet_sz) |
824 | musb_g_giveback(musb_ep, request, 0); | 824 | musb_g_giveback(musb_ep, request, 0); |
825 | } | 825 | } |
826 | 826 | ||
827 | /* | 827 | /* |
828 | * Data ready for a request; called from IRQ | 828 | * Data ready for a request; called from IRQ |
829 | */ | 829 | */ |
830 | void musb_g_rx(struct musb *musb, u8 epnum) | 830 | void musb_g_rx(struct musb *musb, u8 epnum) |
831 | { | 831 | { |
832 | u16 csr; | 832 | u16 csr; |
833 | struct musb_request *req; | 833 | struct musb_request *req; |
834 | struct usb_request *request; | 834 | struct usb_request *request; |
835 | void __iomem *mbase = musb->mregs; | 835 | void __iomem *mbase = musb->mregs; |
836 | struct musb_ep *musb_ep; | 836 | struct musb_ep *musb_ep; |
837 | void __iomem *epio = musb->endpoints[epnum].regs; | 837 | void __iomem *epio = musb->endpoints[epnum].regs; |
838 | struct dma_channel *dma; | 838 | struct dma_channel *dma; |
839 | struct musb_hw_ep *hw_ep = &musb->endpoints[epnum]; | 839 | struct musb_hw_ep *hw_ep = &musb->endpoints[epnum]; |
840 | 840 | ||
841 | if (hw_ep->is_shared_fifo) | 841 | if (hw_ep->is_shared_fifo) |
842 | musb_ep = &hw_ep->ep_in; | 842 | musb_ep = &hw_ep->ep_in; |
843 | else | 843 | else |
844 | musb_ep = &hw_ep->ep_out; | 844 | musb_ep = &hw_ep->ep_out; |
845 | 845 | ||
846 | musb_ep_select(mbase, epnum); | 846 | musb_ep_select(mbase, epnum); |
847 | 847 | ||
848 | req = next_request(musb_ep); | 848 | req = next_request(musb_ep); |
849 | if (!req) | 849 | if (!req) |
850 | return; | 850 | return; |
851 | 851 | ||
852 | request = &req->request; | 852 | request = &req->request; |
853 | 853 | ||
854 | csr = musb_readw(epio, MUSB_RXCSR); | 854 | csr = musb_readw(epio, MUSB_RXCSR); |
855 | dma = is_dma_capable() ? musb_ep->dma : NULL; | 855 | dma = is_dma_capable() ? musb_ep->dma : NULL; |
856 | 856 | ||
857 | dev_dbg(musb->controller, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name, | 857 | dev_dbg(musb->controller, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name, |
858 | csr, dma ? " (dma)" : "", request); | 858 | csr, dma ? " (dma)" : "", request); |
859 | 859 | ||
860 | if (csr & MUSB_RXCSR_P_SENTSTALL) { | 860 | if (csr & MUSB_RXCSR_P_SENTSTALL) { |
861 | csr |= MUSB_RXCSR_P_WZC_BITS; | 861 | csr |= MUSB_RXCSR_P_WZC_BITS; |
862 | csr &= ~MUSB_RXCSR_P_SENTSTALL; | 862 | csr &= ~MUSB_RXCSR_P_SENTSTALL; |
863 | musb_writew(epio, MUSB_RXCSR, csr); | 863 | musb_writew(epio, MUSB_RXCSR, csr); |
864 | return; | 864 | return; |
865 | } | 865 | } |
866 | 866 | ||
867 | if (csr & MUSB_RXCSR_P_OVERRUN) { | 867 | if (csr & MUSB_RXCSR_P_OVERRUN) { |
868 | /* csr |= MUSB_RXCSR_P_WZC_BITS; */ | 868 | /* csr |= MUSB_RXCSR_P_WZC_BITS; */ |
869 | csr &= ~MUSB_RXCSR_P_OVERRUN; | 869 | csr &= ~MUSB_RXCSR_P_OVERRUN; |
870 | musb_writew(epio, MUSB_RXCSR, csr); | 870 | musb_writew(epio, MUSB_RXCSR, csr); |
871 | 871 | ||
872 | dev_dbg(musb->controller, "%s iso overrun on %p\n", musb_ep->name, request); | 872 | dev_dbg(musb->controller, "%s iso overrun on %p\n", musb_ep->name, request); |
873 | if (request->status == -EINPROGRESS) | 873 | if (request->status == -EINPROGRESS) |
874 | request->status = -EOVERFLOW; | 874 | request->status = -EOVERFLOW; |
875 | } | 875 | } |
876 | if (csr & MUSB_RXCSR_INCOMPRX) { | 876 | if (csr & MUSB_RXCSR_INCOMPRX) { |
877 | /* REVISIT not necessarily an error */ | 877 | /* REVISIT not necessarily an error */ |
878 | dev_dbg(musb->controller, "%s, incomprx\n", musb_ep->end_point.name); | 878 | dev_dbg(musb->controller, "%s, incomprx\n", musb_ep->end_point.name); |
879 | } | 879 | } |
880 | 880 | ||
881 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { | 881 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { |
882 | /* "should not happen"; likely RXPKTRDY pending for DMA */ | 882 | /* "should not happen"; likely RXPKTRDY pending for DMA */ |
883 | dev_dbg(musb->controller, "%s busy, csr %04x\n", | 883 | dev_dbg(musb->controller, "%s busy, csr %04x\n", |
884 | musb_ep->end_point.name, csr); | 884 | musb_ep->end_point.name, csr); |
885 | return; | 885 | return; |
886 | } | 886 | } |
887 | 887 | ||
888 | if (dma && (csr & MUSB_RXCSR_DMAENAB)) { | 888 | if (dma && (csr & MUSB_RXCSR_DMAENAB)) { |
889 | csr &= ~(MUSB_RXCSR_AUTOCLEAR | 889 | csr &= ~(MUSB_RXCSR_AUTOCLEAR |
890 | | MUSB_RXCSR_DMAENAB | 890 | | MUSB_RXCSR_DMAENAB |
891 | | MUSB_RXCSR_DMAMODE); | 891 | | MUSB_RXCSR_DMAMODE); |
892 | musb_writew(epio, MUSB_RXCSR, | 892 | musb_writew(epio, MUSB_RXCSR, |
893 | MUSB_RXCSR_P_WZC_BITS | csr); | 893 | MUSB_RXCSR_P_WZC_BITS | csr); |
894 | 894 | ||
895 | request->actual += musb_ep->dma->actual_len; | 895 | request->actual += musb_ep->dma->actual_len; |
896 | 896 | ||
897 | dev_dbg(musb->controller, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n", | 897 | dev_dbg(musb->controller, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n", |
898 | epnum, csr, | 898 | epnum, csr, |
899 | musb_readw(epio, MUSB_RXCSR), | 899 | musb_readw(epio, MUSB_RXCSR), |
900 | musb_ep->dma->actual_len, request); | 900 | musb_ep->dma->actual_len, request); |
901 | 901 | ||
902 | #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \ | 902 | #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \ |
903 | defined(CONFIG_USB_UX500_DMA) | 903 | defined(CONFIG_USB_UX500_DMA) |
904 | /* Autoclear doesn't clear RxPktRdy for short packets */ | 904 | /* Autoclear doesn't clear RxPktRdy for short packets */ |
905 | if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered) | 905 | if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered) |
906 | || (dma->actual_len | 906 | || (dma->actual_len |
907 | & (musb_ep->packet_sz - 1))) { | 907 | & (musb_ep->packet_sz - 1))) { |
908 | /* ack the read! */ | 908 | /* ack the read! */ |
909 | csr &= ~MUSB_RXCSR_RXPKTRDY; | 909 | csr &= ~MUSB_RXCSR_RXPKTRDY; |
910 | musb_writew(epio, MUSB_RXCSR, csr); | 910 | musb_writew(epio, MUSB_RXCSR, csr); |
911 | } | 911 | } |
912 | 912 | ||
913 | /* incomplete, and not short? wait for next IN packet */ | 913 | /* incomplete, and not short? wait for next IN packet */ |
914 | if ((request->actual < request->length) | 914 | if ((request->actual < request->length) |
915 | && (musb_ep->dma->actual_len | 915 | && (musb_ep->dma->actual_len |
916 | == musb_ep->packet_sz)) { | 916 | == musb_ep->packet_sz)) { |
917 | /* In double buffer case, continue to unload fifo if | 917 | /* In double buffer case, continue to unload fifo if |
918 | * there is Rx packet in FIFO. | 918 | * there is Rx packet in FIFO. |
919 | **/ | 919 | **/ |
920 | csr = musb_readw(epio, MUSB_RXCSR); | 920 | csr = musb_readw(epio, MUSB_RXCSR); |
921 | if ((csr & MUSB_RXCSR_RXPKTRDY) && | 921 | if ((csr & MUSB_RXCSR_RXPKTRDY) && |
922 | hw_ep->rx_double_buffered) | 922 | hw_ep->rx_double_buffered) |
923 | goto exit; | 923 | goto exit; |
924 | return; | 924 | return; |
925 | } | 925 | } |
926 | #endif | 926 | #endif |
927 | musb_g_giveback(musb_ep, request, 0); | 927 | musb_g_giveback(musb_ep, request, 0); |
928 | /* | 928 | /* |
929 | * In the giveback function the MUSB lock is | 929 | * In the giveback function the MUSB lock is |
930 | * released and acquired after sometime. During | 930 | * released and acquired after sometime. During |
931 | * this time period the INDEX register could get | 931 | * this time period the INDEX register could get |
932 | * changed by the gadget_queue function especially | 932 | * changed by the gadget_queue function especially |
933 | * on SMP systems. Reselect the INDEX to be sure | 933 | * on SMP systems. Reselect the INDEX to be sure |
934 | * we are reading/modifying the right registers | 934 | * we are reading/modifying the right registers |
935 | */ | 935 | */ |
936 | musb_ep_select(mbase, epnum); | 936 | musb_ep_select(mbase, epnum); |
937 | 937 | ||
938 | req = next_request(musb_ep); | 938 | req = next_request(musb_ep); |
939 | if (!req) | 939 | if (!req) |
940 | return; | 940 | return; |
941 | } | 941 | } |
942 | #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \ | 942 | #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \ |
943 | defined(CONFIG_USB_UX500_DMA) | 943 | defined(CONFIG_USB_UX500_DMA) |
944 | exit: | 944 | exit: |
945 | #endif | 945 | #endif |
946 | /* Analyze request */ | 946 | /* Analyze request */ |
947 | rxstate(musb, req); | 947 | rxstate(musb, req); |
948 | } | 948 | } |
949 | 949 | ||
950 | /* ------------------------------------------------------------ */ | 950 | /* ------------------------------------------------------------ */ |
951 | 951 | ||
952 | static int musb_gadget_enable(struct usb_ep *ep, | 952 | static int musb_gadget_enable(struct usb_ep *ep, |
953 | const struct usb_endpoint_descriptor *desc) | 953 | const struct usb_endpoint_descriptor *desc) |
954 | { | 954 | { |
955 | unsigned long flags; | 955 | unsigned long flags; |
956 | struct musb_ep *musb_ep; | 956 | struct musb_ep *musb_ep; |
957 | struct musb_hw_ep *hw_ep; | 957 | struct musb_hw_ep *hw_ep; |
958 | void __iomem *regs; | 958 | void __iomem *regs; |
959 | struct musb *musb; | 959 | struct musb *musb; |
960 | void __iomem *mbase; | 960 | void __iomem *mbase; |
961 | u8 epnum; | 961 | u8 epnum; |
962 | u16 csr; | 962 | u16 csr; |
963 | unsigned tmp; | 963 | unsigned tmp; |
964 | int status = -EINVAL; | 964 | int status = -EINVAL; |
965 | 965 | ||
966 | if (!ep || !desc) | 966 | if (!ep || !desc) |
967 | return -EINVAL; | 967 | return -EINVAL; |
968 | 968 | ||
969 | musb_ep = to_musb_ep(ep); | 969 | musb_ep = to_musb_ep(ep); |
970 | hw_ep = musb_ep->hw_ep; | 970 | hw_ep = musb_ep->hw_ep; |
971 | regs = hw_ep->regs; | 971 | regs = hw_ep->regs; |
972 | musb = musb_ep->musb; | 972 | musb = musb_ep->musb; |
973 | mbase = musb->mregs; | 973 | mbase = musb->mregs; |
974 | epnum = musb_ep->current_epnum; | 974 | epnum = musb_ep->current_epnum; |
975 | 975 | ||
976 | spin_lock_irqsave(&musb->lock, flags); | 976 | spin_lock_irqsave(&musb->lock, flags); |
977 | 977 | ||
978 | if (musb_ep->desc) { | 978 | if (musb_ep->desc) { |
979 | status = -EBUSY; | 979 | status = -EBUSY; |
980 | goto fail; | 980 | goto fail; |
981 | } | 981 | } |
982 | musb_ep->type = usb_endpoint_type(desc); | 982 | musb_ep->type = usb_endpoint_type(desc); |
983 | 983 | ||
984 | /* check direction and (later) maxpacket size against endpoint */ | 984 | /* check direction and (later) maxpacket size against endpoint */ |
985 | if (usb_endpoint_num(desc) != epnum) | 985 | if (usb_endpoint_num(desc) != epnum) |
986 | goto fail; | 986 | goto fail; |
987 | 987 | ||
988 | /* REVISIT this rules out high bandwidth periodic transfers */ | 988 | /* REVISIT this rules out high bandwidth periodic transfers */ |
989 | tmp = usb_endpoint_maxp(desc); | 989 | tmp = usb_endpoint_maxp(desc); |
990 | if (tmp & ~0x07ff) { | 990 | if (tmp & ~0x07ff) { |
991 | int ok; | 991 | int ok; |
992 | 992 | ||
993 | if (usb_endpoint_dir_in(desc)) | 993 | if (usb_endpoint_dir_in(desc)) |
994 | ok = musb->hb_iso_tx; | 994 | ok = musb->hb_iso_tx; |
995 | else | 995 | else |
996 | ok = musb->hb_iso_rx; | 996 | ok = musb->hb_iso_rx; |
997 | 997 | ||
998 | if (!ok) { | 998 | if (!ok) { |
999 | dev_dbg(musb->controller, "no support for high bandwidth ISO\n"); | 999 | dev_dbg(musb->controller, "no support for high bandwidth ISO\n"); |
1000 | goto fail; | 1000 | goto fail; |
1001 | } | 1001 | } |
1002 | musb_ep->hb_mult = (tmp >> 11) & 3; | 1002 | musb_ep->hb_mult = (tmp >> 11) & 3; |
1003 | } else { | 1003 | } else { |
1004 | musb_ep->hb_mult = 0; | 1004 | musb_ep->hb_mult = 0; |
1005 | } | 1005 | } |
1006 | 1006 | ||
1007 | musb_ep->packet_sz = tmp & 0x7ff; | 1007 | musb_ep->packet_sz = tmp & 0x7ff; |
1008 | tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1); | 1008 | tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1); |
1009 | 1009 | ||
1010 | /* enable the interrupts for the endpoint, set the endpoint | 1010 | /* enable the interrupts for the endpoint, set the endpoint |
1011 | * packet size (or fail), set the mode, clear the fifo | 1011 | * packet size (or fail), set the mode, clear the fifo |
1012 | */ | 1012 | */ |
1013 | musb_ep_select(mbase, epnum); | 1013 | musb_ep_select(mbase, epnum); |
1014 | if (usb_endpoint_dir_in(desc)) { | 1014 | if (usb_endpoint_dir_in(desc)) { |
1015 | 1015 | ||
1016 | if (hw_ep->is_shared_fifo) | 1016 | if (hw_ep->is_shared_fifo) |
1017 | musb_ep->is_in = 1; | 1017 | musb_ep->is_in = 1; |
1018 | if (!musb_ep->is_in) | 1018 | if (!musb_ep->is_in) |
1019 | goto fail; | 1019 | goto fail; |
1020 | 1020 | ||
1021 | if (tmp > hw_ep->max_packet_sz_tx) { | 1021 | if (tmp > hw_ep->max_packet_sz_tx) { |
1022 | dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n"); | 1022 | dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n"); |
1023 | goto fail; | 1023 | goto fail; |
1024 | } | 1024 | } |
1025 | 1025 | ||
1026 | musb->intrtxe |= (1 << epnum); | 1026 | musb->intrtxe |= (1 << epnum); |
1027 | musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe); | 1027 | musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe); |
1028 | 1028 | ||
1029 | /* REVISIT if can_bulk_split(), use by updating "tmp"; | 1029 | /* REVISIT if can_bulk_split(), use by updating "tmp"; |
1030 | * likewise high bandwidth periodic tx | 1030 | * likewise high bandwidth periodic tx |
1031 | */ | 1031 | */ |
1032 | /* Set TXMAXP with the FIFO size of the endpoint | 1032 | /* Set TXMAXP with the FIFO size of the endpoint |
1033 | * to disable double buffering mode. | 1033 | * to disable double buffering mode. |
1034 | */ | 1034 | */ |
1035 | if (musb->double_buffer_not_ok) { | 1035 | if (musb->double_buffer_not_ok) { |
1036 | musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx); | 1036 | musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx); |
1037 | } else { | 1037 | } else { |
1038 | if (can_bulk_split(musb, musb_ep->type)) | 1038 | if (can_bulk_split(musb, musb_ep->type)) |
1039 | musb_ep->hb_mult = (hw_ep->max_packet_sz_tx / | 1039 | musb_ep->hb_mult = (hw_ep->max_packet_sz_tx / |
1040 | musb_ep->packet_sz) - 1; | 1040 | musb_ep->packet_sz) - 1; |
1041 | musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz | 1041 | musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz |
1042 | | (musb_ep->hb_mult << 11)); | 1042 | | (musb_ep->hb_mult << 11)); |
1043 | } | 1043 | } |
1044 | 1044 | ||
1045 | csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG; | 1045 | csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG; |
1046 | if (musb_readw(regs, MUSB_TXCSR) | 1046 | if (musb_readw(regs, MUSB_TXCSR) |
1047 | & MUSB_TXCSR_FIFONOTEMPTY) | 1047 | & MUSB_TXCSR_FIFONOTEMPTY) |
1048 | csr |= MUSB_TXCSR_FLUSHFIFO; | 1048 | csr |= MUSB_TXCSR_FLUSHFIFO; |
1049 | if (musb_ep->type == USB_ENDPOINT_XFER_ISOC) | 1049 | if (musb_ep->type == USB_ENDPOINT_XFER_ISOC) |
1050 | csr |= MUSB_TXCSR_P_ISO; | 1050 | csr |= MUSB_TXCSR_P_ISO; |
1051 | 1051 | ||
1052 | /* set twice in case of double buffering */ | 1052 | /* set twice in case of double buffering */ |
1053 | musb_writew(regs, MUSB_TXCSR, csr); | 1053 | musb_writew(regs, MUSB_TXCSR, csr); |
1054 | /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */ | 1054 | /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */ |
1055 | musb_writew(regs, MUSB_TXCSR, csr); | 1055 | musb_writew(regs, MUSB_TXCSR, csr); |
1056 | 1056 | ||
1057 | } else { | 1057 | } else { |
1058 | 1058 | ||
1059 | if (hw_ep->is_shared_fifo) | 1059 | if (hw_ep->is_shared_fifo) |
1060 | musb_ep->is_in = 0; | 1060 | musb_ep->is_in = 0; |
1061 | if (musb_ep->is_in) | 1061 | if (musb_ep->is_in) |
1062 | goto fail; | 1062 | goto fail; |
1063 | 1063 | ||
1064 | if (tmp > hw_ep->max_packet_sz_rx) { | 1064 | if (tmp > hw_ep->max_packet_sz_rx) { |
1065 | dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n"); | 1065 | dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n"); |
1066 | goto fail; | 1066 | goto fail; |
1067 | } | 1067 | } |
1068 | 1068 | ||
1069 | musb->intrrxe |= (1 << epnum); | 1069 | musb->intrrxe |= (1 << epnum); |
1070 | musb_writew(mbase, MUSB_INTRRXE, musb->intrrxe); | 1070 | musb_writew(mbase, MUSB_INTRRXE, musb->intrrxe); |
1071 | 1071 | ||
1072 | /* REVISIT if can_bulk_combine() use by updating "tmp" | 1072 | /* REVISIT if can_bulk_combine() use by updating "tmp" |
1073 | * likewise high bandwidth periodic rx | 1073 | * likewise high bandwidth periodic rx |
1074 | */ | 1074 | */ |
1075 | /* Set RXMAXP with the FIFO size of the endpoint | 1075 | /* Set RXMAXP with the FIFO size of the endpoint |
1076 | * to disable double buffering mode. | 1076 | * to disable double buffering mode. |
1077 | */ | 1077 | */ |
1078 | if (musb->double_buffer_not_ok) | 1078 | if (musb->double_buffer_not_ok) |
1079 | musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_tx); | 1079 | musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_tx); |
1080 | else | 1080 | else |
1081 | musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz | 1081 | musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz |
1082 | | (musb_ep->hb_mult << 11)); | 1082 | | (musb_ep->hb_mult << 11)); |
1083 | 1083 | ||
1084 | /* force shared fifo to OUT-only mode */ | 1084 | /* force shared fifo to OUT-only mode */ |
1085 | if (hw_ep->is_shared_fifo) { | 1085 | if (hw_ep->is_shared_fifo) { |
1086 | csr = musb_readw(regs, MUSB_TXCSR); | 1086 | csr = musb_readw(regs, MUSB_TXCSR); |
1087 | csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY); | 1087 | csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY); |
1088 | musb_writew(regs, MUSB_TXCSR, csr); | 1088 | musb_writew(regs, MUSB_TXCSR, csr); |
1089 | } | 1089 | } |
1090 | 1090 | ||
1091 | csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG; | 1091 | csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG; |
1092 | if (musb_ep->type == USB_ENDPOINT_XFER_ISOC) | 1092 | if (musb_ep->type == USB_ENDPOINT_XFER_ISOC) |
1093 | csr |= MUSB_RXCSR_P_ISO; | 1093 | csr |= MUSB_RXCSR_P_ISO; |
1094 | else if (musb_ep->type == USB_ENDPOINT_XFER_INT) | 1094 | else if (musb_ep->type == USB_ENDPOINT_XFER_INT) |
1095 | csr |= MUSB_RXCSR_DISNYET; | 1095 | csr |= MUSB_RXCSR_DISNYET; |
1096 | 1096 | ||
1097 | /* set twice in case of double buffering */ | 1097 | /* set twice in case of double buffering */ |
1098 | musb_writew(regs, MUSB_RXCSR, csr); | 1098 | musb_writew(regs, MUSB_RXCSR, csr); |
1099 | musb_writew(regs, MUSB_RXCSR, csr); | 1099 | musb_writew(regs, MUSB_RXCSR, csr); |
1100 | } | 1100 | } |
1101 | 1101 | ||
1102 | /* NOTE: all the I/O code _should_ work fine without DMA, in case | 1102 | /* NOTE: all the I/O code _should_ work fine without DMA, in case |
1103 | * for some reason you run out of channels here. | 1103 | * for some reason you run out of channels here. |
1104 | */ | 1104 | */ |
1105 | if (is_dma_capable() && musb->dma_controller) { | 1105 | if (is_dma_capable() && musb->dma_controller) { |
1106 | struct dma_controller *c = musb->dma_controller; | 1106 | struct dma_controller *c = musb->dma_controller; |
1107 | 1107 | ||
1108 | musb_ep->dma = c->channel_alloc(c, hw_ep, | 1108 | musb_ep->dma = c->channel_alloc(c, hw_ep, |
1109 | (desc->bEndpointAddress & USB_DIR_IN)); | 1109 | (desc->bEndpointAddress & USB_DIR_IN)); |
1110 | } else | 1110 | } else |
1111 | musb_ep->dma = NULL; | 1111 | musb_ep->dma = NULL; |
1112 | 1112 | ||
1113 | musb_ep->desc = desc; | 1113 | musb_ep->desc = desc; |
1114 | musb_ep->busy = 0; | 1114 | musb_ep->busy = 0; |
1115 | musb_ep->wedged = 0; | 1115 | musb_ep->wedged = 0; |
1116 | status = 0; | 1116 | status = 0; |
1117 | 1117 | ||
1118 | pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n", | 1118 | pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n", |
1119 | musb_driver_name, musb_ep->end_point.name, | 1119 | musb_driver_name, musb_ep->end_point.name, |
1120 | ({ char *s; switch (musb_ep->type) { | 1120 | ({ char *s; switch (musb_ep->type) { |
1121 | case USB_ENDPOINT_XFER_BULK: s = "bulk"; break; | 1121 | case USB_ENDPOINT_XFER_BULK: s = "bulk"; break; |
1122 | case USB_ENDPOINT_XFER_INT: s = "int"; break; | 1122 | case USB_ENDPOINT_XFER_INT: s = "int"; break; |
1123 | default: s = "iso"; break; | 1123 | default: s = "iso"; break; |
1124 | }; s; }), | 1124 | }; s; }), |
1125 | musb_ep->is_in ? "IN" : "OUT", | 1125 | musb_ep->is_in ? "IN" : "OUT", |
1126 | musb_ep->dma ? "dma, " : "", | 1126 | musb_ep->dma ? "dma, " : "", |
1127 | musb_ep->packet_sz); | 1127 | musb_ep->packet_sz); |
1128 | 1128 | ||
1129 | schedule_work(&musb->irq_work); | 1129 | schedule_work(&musb->irq_work); |
1130 | 1130 | ||
1131 | fail: | 1131 | fail: |
1132 | spin_unlock_irqrestore(&musb->lock, flags); | 1132 | spin_unlock_irqrestore(&musb->lock, flags); |
1133 | return status; | 1133 | return status; |
1134 | } | 1134 | } |
1135 | 1135 | ||
1136 | /* | 1136 | /* |
1137 | * Disable an endpoint flushing all requests queued. | 1137 | * Disable an endpoint flushing all requests queued. |
1138 | */ | 1138 | */ |
1139 | static int musb_gadget_disable(struct usb_ep *ep) | 1139 | static int musb_gadget_disable(struct usb_ep *ep) |
1140 | { | 1140 | { |
1141 | unsigned long flags; | 1141 | unsigned long flags; |
1142 | struct musb *musb; | 1142 | struct musb *musb; |
1143 | u8 epnum; | 1143 | u8 epnum; |
1144 | struct musb_ep *musb_ep; | 1144 | struct musb_ep *musb_ep; |
1145 | void __iomem *epio; | 1145 | void __iomem *epio; |
1146 | int status = 0; | 1146 | int status = 0; |
1147 | 1147 | ||
1148 | musb_ep = to_musb_ep(ep); | 1148 | musb_ep = to_musb_ep(ep); |
1149 | musb = musb_ep->musb; | 1149 | musb = musb_ep->musb; |
1150 | epnum = musb_ep->current_epnum; | 1150 | epnum = musb_ep->current_epnum; |
1151 | epio = musb->endpoints[epnum].regs; | 1151 | epio = musb->endpoints[epnum].regs; |
1152 | 1152 | ||
1153 | spin_lock_irqsave(&musb->lock, flags); | 1153 | spin_lock_irqsave(&musb->lock, flags); |
1154 | musb_ep_select(musb->mregs, epnum); | 1154 | musb_ep_select(musb->mregs, epnum); |
1155 | 1155 | ||
1156 | /* zero the endpoint sizes */ | 1156 | /* zero the endpoint sizes */ |
1157 | if (musb_ep->is_in) { | 1157 | if (musb_ep->is_in) { |
1158 | musb->intrtxe &= ~(1 << epnum); | 1158 | musb->intrtxe &= ~(1 << epnum); |
1159 | musb_writew(musb->mregs, MUSB_INTRTXE, musb->intrtxe); | 1159 | musb_writew(musb->mregs, MUSB_INTRTXE, musb->intrtxe); |
1160 | musb_writew(epio, MUSB_TXMAXP, 0); | 1160 | musb_writew(epio, MUSB_TXMAXP, 0); |
1161 | } else { | 1161 | } else { |
1162 | musb->intrrxe &= ~(1 << epnum); | 1162 | musb->intrrxe &= ~(1 << epnum); |
1163 | musb_writew(musb->mregs, MUSB_INTRRXE, musb->intrrxe); | 1163 | musb_writew(musb->mregs, MUSB_INTRRXE, musb->intrrxe); |
1164 | musb_writew(epio, MUSB_RXMAXP, 0); | 1164 | musb_writew(epio, MUSB_RXMAXP, 0); |
1165 | } | 1165 | } |
1166 | 1166 | ||
1167 | musb_ep->desc = NULL; | 1167 | musb_ep->desc = NULL; |
1168 | musb_ep->end_point.desc = NULL; | 1168 | musb_ep->end_point.desc = NULL; |
1169 | 1169 | ||
1170 | /* abort all pending DMA and requests */ | 1170 | /* abort all pending DMA and requests */ |
1171 | nuke(musb_ep, -ESHUTDOWN); | 1171 | nuke(musb_ep, -ESHUTDOWN); |
1172 | 1172 | ||
1173 | schedule_work(&musb->irq_work); | 1173 | schedule_work(&musb->irq_work); |
1174 | 1174 | ||
1175 | spin_unlock_irqrestore(&(musb->lock), flags); | 1175 | spin_unlock_irqrestore(&(musb->lock), flags); |
1176 | 1176 | ||
1177 | dev_dbg(musb->controller, "%s\n", musb_ep->end_point.name); | 1177 | dev_dbg(musb->controller, "%s\n", musb_ep->end_point.name); |
1178 | 1178 | ||
1179 | return status; | 1179 | return status; |
1180 | } | 1180 | } |
1181 | 1181 | ||
1182 | /* | 1182 | /* |
1183 | * Allocate a request for an endpoint. | 1183 | * Allocate a request for an endpoint. |
1184 | * Reused by ep0 code. | 1184 | * Reused by ep0 code. |
1185 | */ | 1185 | */ |
1186 | struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags) | 1186 | struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags) |
1187 | { | 1187 | { |
1188 | struct musb_ep *musb_ep = to_musb_ep(ep); | 1188 | struct musb_ep *musb_ep = to_musb_ep(ep); |
1189 | struct musb *musb = musb_ep->musb; | 1189 | struct musb *musb = musb_ep->musb; |
1190 | struct musb_request *request = NULL; | 1190 | struct musb_request *request = NULL; |
1191 | 1191 | ||
1192 | request = kzalloc(sizeof *request, gfp_flags); | 1192 | request = kzalloc(sizeof *request, gfp_flags); |
1193 | if (!request) { | 1193 | if (!request) { |
1194 | dev_dbg(musb->controller, "not enough memory\n"); | 1194 | dev_dbg(musb->controller, "not enough memory\n"); |
1195 | return NULL; | 1195 | return NULL; |
1196 | } | 1196 | } |
1197 | 1197 | ||
1198 | request->request.dma = DMA_ADDR_INVALID; | 1198 | request->request.dma = DMA_ADDR_INVALID; |
1199 | request->epnum = musb_ep->current_epnum; | 1199 | request->epnum = musb_ep->current_epnum; |
1200 | request->ep = musb_ep; | 1200 | request->ep = musb_ep; |
1201 | 1201 | ||
1202 | return &request->request; | 1202 | return &request->request; |
1203 | } | 1203 | } |
1204 | 1204 | ||
1205 | /* | 1205 | /* |
1206 | * Free a request | 1206 | * Free a request |
1207 | * Reused by ep0 code. | 1207 | * Reused by ep0 code. |
1208 | */ | 1208 | */ |
1209 | void musb_free_request(struct usb_ep *ep, struct usb_request *req) | 1209 | void musb_free_request(struct usb_ep *ep, struct usb_request *req) |
1210 | { | 1210 | { |
1211 | kfree(to_musb_request(req)); | 1211 | kfree(to_musb_request(req)); |
1212 | } | 1212 | } |
1213 | 1213 | ||
1214 | static LIST_HEAD(buffers); | 1214 | static LIST_HEAD(buffers); |
1215 | 1215 | ||
1216 | struct free_record { | 1216 | struct free_record { |
1217 | struct list_head list; | 1217 | struct list_head list; |
1218 | struct device *dev; | 1218 | struct device *dev; |
1219 | unsigned bytes; | 1219 | unsigned bytes; |
1220 | dma_addr_t dma; | 1220 | dma_addr_t dma; |
1221 | }; | 1221 | }; |
1222 | 1222 | ||
1223 | /* | 1223 | /* |
1224 | * Context: controller locked, IRQs blocked. | 1224 | * Context: controller locked, IRQs blocked. |
1225 | */ | 1225 | */ |
1226 | void musb_ep_restart(struct musb *musb, struct musb_request *req) | 1226 | void musb_ep_restart(struct musb *musb, struct musb_request *req) |
1227 | { | 1227 | { |
1228 | dev_dbg(musb->controller, "<== %s request %p len %u on hw_ep%d\n", | 1228 | dev_dbg(musb->controller, "<== %s request %p len %u on hw_ep%d\n", |
1229 | req->tx ? "TX/IN" : "RX/OUT", | 1229 | req->tx ? "TX/IN" : "RX/OUT", |
1230 | &req->request, req->request.length, req->epnum); | 1230 | &req->request, req->request.length, req->epnum); |
1231 | 1231 | ||
1232 | musb_ep_select(musb->mregs, req->epnum); | 1232 | musb_ep_select(musb->mregs, req->epnum); |
1233 | if (req->tx) | 1233 | if (req->tx) |
1234 | txstate(musb, req); | 1234 | txstate(musb, req); |
1235 | else | 1235 | else |
1236 | rxstate(musb, req); | 1236 | rxstate(musb, req); |
1237 | } | 1237 | } |
1238 | 1238 | ||
1239 | static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, | 1239 | static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, |
1240 | gfp_t gfp_flags) | 1240 | gfp_t gfp_flags) |
1241 | { | 1241 | { |
1242 | struct musb_ep *musb_ep; | 1242 | struct musb_ep *musb_ep; |
1243 | struct musb_request *request; | 1243 | struct musb_request *request; |
1244 | struct musb *musb; | 1244 | struct musb *musb; |
1245 | int status = 0; | 1245 | int status = 0; |
1246 | unsigned long lockflags; | 1246 | unsigned long lockflags; |
1247 | 1247 | ||
1248 | if (!ep || !req) | 1248 | if (!ep || !req) |
1249 | return -EINVAL; | 1249 | return -EINVAL; |
1250 | if (!req->buf) | 1250 | if (!req->buf) |
1251 | return -ENODATA; | 1251 | return -ENODATA; |
1252 | 1252 | ||
1253 | musb_ep = to_musb_ep(ep); | 1253 | musb_ep = to_musb_ep(ep); |
1254 | musb = musb_ep->musb; | 1254 | musb = musb_ep->musb; |
1255 | 1255 | ||
1256 | request = to_musb_request(req); | 1256 | request = to_musb_request(req); |
1257 | request->musb = musb; | 1257 | request->musb = musb; |
1258 | 1258 | ||
1259 | if (request->ep != musb_ep) | 1259 | if (request->ep != musb_ep) |
1260 | return -EINVAL; | 1260 | return -EINVAL; |
1261 | 1261 | ||
1262 | dev_dbg(musb->controller, "<== to %s request=%p\n", ep->name, req); | 1262 | dev_dbg(musb->controller, "<== to %s request=%p\n", ep->name, req); |
1263 | 1263 | ||
1264 | /* request is mine now... */ | 1264 | /* request is mine now... */ |
1265 | request->request.actual = 0; | 1265 | request->request.actual = 0; |
1266 | request->request.status = -EINPROGRESS; | 1266 | request->request.status = -EINPROGRESS; |
1267 | request->epnum = musb_ep->current_epnum; | 1267 | request->epnum = musb_ep->current_epnum; |
1268 | request->tx = musb_ep->is_in; | 1268 | request->tx = musb_ep->is_in; |
1269 | 1269 | ||
1270 | map_dma_buffer(request, musb, musb_ep); | 1270 | map_dma_buffer(request, musb, musb_ep); |
1271 | 1271 | ||
1272 | spin_lock_irqsave(&musb->lock, lockflags); | 1272 | spin_lock_irqsave(&musb->lock, lockflags); |
1273 | 1273 | ||
1274 | /* don't queue if the ep is down */ | 1274 | /* don't queue if the ep is down */ |
1275 | if (!musb_ep->desc) { | 1275 | if (!musb_ep->desc) { |
1276 | dev_dbg(musb->controller, "req %p queued to %s while ep %s\n", | 1276 | dev_dbg(musb->controller, "req %p queued to %s while ep %s\n", |
1277 | req, ep->name, "disabled"); | 1277 | req, ep->name, "disabled"); |
1278 | status = -ESHUTDOWN; | 1278 | status = -ESHUTDOWN; |
1279 | unmap_dma_buffer(request, musb); | 1279 | unmap_dma_buffer(request, musb); |
1280 | goto unlock; | 1280 | goto unlock; |
1281 | } | 1281 | } |
1282 | 1282 | ||
1283 | /* add request to the list */ | 1283 | /* add request to the list */ |
1284 | list_add_tail(&request->list, &musb_ep->req_list); | 1284 | list_add_tail(&request->list, &musb_ep->req_list); |
1285 | 1285 | ||
1286 | /* it this is the head of the queue, start i/o ... */ | 1286 | /* it this is the head of the queue, start i/o ... */ |
1287 | if (!musb_ep->busy && &request->list == musb_ep->req_list.next) | 1287 | if (!musb_ep->busy && &request->list == musb_ep->req_list.next) |
1288 | musb_ep_restart(musb, request); | 1288 | musb_ep_restart(musb, request); |
1289 | 1289 | ||
1290 | unlock: | 1290 | unlock: |
1291 | spin_unlock_irqrestore(&musb->lock, lockflags); | 1291 | spin_unlock_irqrestore(&musb->lock, lockflags); |
1292 | return status; | 1292 | return status; |
1293 | } | 1293 | } |
1294 | 1294 | ||
1295 | static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request) | 1295 | static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request) |
1296 | { | 1296 | { |
1297 | struct musb_ep *musb_ep = to_musb_ep(ep); | 1297 | struct musb_ep *musb_ep = to_musb_ep(ep); |
1298 | struct musb_request *req = to_musb_request(request); | 1298 | struct musb_request *req = to_musb_request(request); |
1299 | struct musb_request *r; | 1299 | struct musb_request *r; |
1300 | unsigned long flags; | 1300 | unsigned long flags; |
1301 | int status = 0; | 1301 | int status = 0; |
1302 | struct musb *musb = musb_ep->musb; | 1302 | struct musb *musb = musb_ep->musb; |
1303 | 1303 | ||
1304 | if (!ep || !request || to_musb_request(request)->ep != musb_ep) | 1304 | if (!ep || !request || to_musb_request(request)->ep != musb_ep) |
1305 | return -EINVAL; | 1305 | return -EINVAL; |
1306 | 1306 | ||
1307 | spin_lock_irqsave(&musb->lock, flags); | 1307 | spin_lock_irqsave(&musb->lock, flags); |
1308 | 1308 | ||
1309 | list_for_each_entry(r, &musb_ep->req_list, list) { | 1309 | list_for_each_entry(r, &musb_ep->req_list, list) { |
1310 | if (r == req) | 1310 | if (r == req) |
1311 | break; | 1311 | break; |
1312 | } | 1312 | } |
1313 | if (r != req) { | 1313 | if (r != req) { |
1314 | dev_dbg(musb->controller, "request %p not queued to %s\n", request, ep->name); | 1314 | dev_dbg(musb->controller, "request %p not queued to %s\n", request, ep->name); |
1315 | status = -EINVAL; | 1315 | status = -EINVAL; |
1316 | goto done; | 1316 | goto done; |
1317 | } | 1317 | } |
1318 | 1318 | ||
1319 | /* if the hardware doesn't have the request, easy ... */ | 1319 | /* if the hardware doesn't have the request, easy ... */ |
1320 | if (musb_ep->req_list.next != &req->list || musb_ep->busy) | 1320 | if (musb_ep->req_list.next != &req->list || musb_ep->busy) |
1321 | musb_g_giveback(musb_ep, request, -ECONNRESET); | 1321 | musb_g_giveback(musb_ep, request, -ECONNRESET); |
1322 | 1322 | ||
1323 | /* ... else abort the dma transfer ... */ | 1323 | /* ... else abort the dma transfer ... */ |
1324 | else if (is_dma_capable() && musb_ep->dma) { | 1324 | else if (is_dma_capable() && musb_ep->dma) { |
1325 | struct dma_controller *c = musb->dma_controller; | 1325 | struct dma_controller *c = musb->dma_controller; |
1326 | 1326 | ||
1327 | musb_ep_select(musb->mregs, musb_ep->current_epnum); | 1327 | musb_ep_select(musb->mregs, musb_ep->current_epnum); |
1328 | if (c->channel_abort) | 1328 | if (c->channel_abort) |
1329 | status = c->channel_abort(musb_ep->dma); | 1329 | status = c->channel_abort(musb_ep->dma); |
1330 | else | 1330 | else |
1331 | status = -EBUSY; | 1331 | status = -EBUSY; |
1332 | if (status == 0) | 1332 | if (status == 0) |
1333 | musb_g_giveback(musb_ep, request, -ECONNRESET); | 1333 | musb_g_giveback(musb_ep, request, -ECONNRESET); |
1334 | } else { | 1334 | } else { |
1335 | /* NOTE: by sticking to easily tested hardware/driver states, | 1335 | /* NOTE: by sticking to easily tested hardware/driver states, |
1336 | * we leave counting of in-flight packets imprecise. | 1336 | * we leave counting of in-flight packets imprecise. |
1337 | */ | 1337 | */ |
1338 | musb_g_giveback(musb_ep, request, -ECONNRESET); | 1338 | musb_g_giveback(musb_ep, request, -ECONNRESET); |
1339 | } | 1339 | } |
1340 | 1340 | ||
1341 | done: | 1341 | done: |
1342 | spin_unlock_irqrestore(&musb->lock, flags); | 1342 | spin_unlock_irqrestore(&musb->lock, flags); |
1343 | return status; | 1343 | return status; |
1344 | } | 1344 | } |
1345 | 1345 | ||
1346 | /* | 1346 | /* |
1347 | * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any | 1347 | * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any |
1348 | * data but will queue requests. | 1348 | * data but will queue requests. |
1349 | * | 1349 | * |
1350 | * exported to ep0 code | 1350 | * exported to ep0 code |
1351 | */ | 1351 | */ |
1352 | static int musb_gadget_set_halt(struct usb_ep *ep, int value) | 1352 | static int musb_gadget_set_halt(struct usb_ep *ep, int value) |
1353 | { | 1353 | { |
1354 | struct musb_ep *musb_ep = to_musb_ep(ep); | 1354 | struct musb_ep *musb_ep = to_musb_ep(ep); |
1355 | u8 epnum = musb_ep->current_epnum; | 1355 | u8 epnum = musb_ep->current_epnum; |
1356 | struct musb *musb = musb_ep->musb; | 1356 | struct musb *musb = musb_ep->musb; |
1357 | void __iomem *epio = musb->endpoints[epnum].regs; | 1357 | void __iomem *epio = musb->endpoints[epnum].regs; |
1358 | void __iomem *mbase; | 1358 | void __iomem *mbase; |
1359 | unsigned long flags; | 1359 | unsigned long flags; |
1360 | u16 csr; | 1360 | u16 csr; |
1361 | struct musb_request *request; | 1361 | struct musb_request *request; |
1362 | int status = 0; | 1362 | int status = 0; |
1363 | 1363 | ||
1364 | if (!ep) | 1364 | if (!ep) |
1365 | return -EINVAL; | 1365 | return -EINVAL; |
1366 | mbase = musb->mregs; | 1366 | mbase = musb->mregs; |
1367 | 1367 | ||
1368 | spin_lock_irqsave(&musb->lock, flags); | 1368 | spin_lock_irqsave(&musb->lock, flags); |
1369 | 1369 | ||
1370 | if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) { | 1370 | if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) { |
1371 | status = -EINVAL; | 1371 | status = -EINVAL; |
1372 | goto done; | 1372 | goto done; |
1373 | } | 1373 | } |
1374 | 1374 | ||
1375 | musb_ep_select(mbase, epnum); | 1375 | musb_ep_select(mbase, epnum); |
1376 | 1376 | ||
1377 | request = next_request(musb_ep); | 1377 | request = next_request(musb_ep); |
1378 | if (value) { | 1378 | if (value) { |
1379 | if (request) { | 1379 | if (request) { |
1380 | dev_dbg(musb->controller, "request in progress, cannot halt %s\n", | 1380 | dev_dbg(musb->controller, "request in progress, cannot halt %s\n", |
1381 | ep->name); | 1381 | ep->name); |
1382 | status = -EAGAIN; | 1382 | status = -EAGAIN; |
1383 | goto done; | 1383 | goto done; |
1384 | } | 1384 | } |
1385 | /* Cannot portably stall with non-empty FIFO */ | 1385 | /* Cannot portably stall with non-empty FIFO */ |
1386 | if (musb_ep->is_in) { | 1386 | if (musb_ep->is_in) { |
1387 | csr = musb_readw(epio, MUSB_TXCSR); | 1387 | csr = musb_readw(epio, MUSB_TXCSR); |
1388 | if (csr & MUSB_TXCSR_FIFONOTEMPTY) { | 1388 | if (csr & MUSB_TXCSR_FIFONOTEMPTY) { |
1389 | dev_dbg(musb->controller, "FIFO busy, cannot halt %s\n", ep->name); | 1389 | dev_dbg(musb->controller, "FIFO busy, cannot halt %s\n", ep->name); |
1390 | status = -EAGAIN; | 1390 | status = -EAGAIN; |
1391 | goto done; | 1391 | goto done; |
1392 | } | 1392 | } |
1393 | } | 1393 | } |
1394 | } else | 1394 | } else |
1395 | musb_ep->wedged = 0; | 1395 | musb_ep->wedged = 0; |
1396 | 1396 | ||
1397 | /* set/clear the stall and toggle bits */ | 1397 | /* set/clear the stall and toggle bits */ |
1398 | dev_dbg(musb->controller, "%s: %s stall\n", ep->name, value ? "set" : "clear"); | 1398 | dev_dbg(musb->controller, "%s: %s stall\n", ep->name, value ? "set" : "clear"); |
1399 | if (musb_ep->is_in) { | 1399 | if (musb_ep->is_in) { |
1400 | csr = musb_readw(epio, MUSB_TXCSR); | 1400 | csr = musb_readw(epio, MUSB_TXCSR); |
1401 | csr |= MUSB_TXCSR_P_WZC_BITS | 1401 | csr |= MUSB_TXCSR_P_WZC_BITS |
1402 | | MUSB_TXCSR_CLRDATATOG; | 1402 | | MUSB_TXCSR_CLRDATATOG; |
1403 | if (value) | 1403 | if (value) |
1404 | csr |= MUSB_TXCSR_P_SENDSTALL; | 1404 | csr |= MUSB_TXCSR_P_SENDSTALL; |
1405 | else | 1405 | else |
1406 | csr &= ~(MUSB_TXCSR_P_SENDSTALL | 1406 | csr &= ~(MUSB_TXCSR_P_SENDSTALL |
1407 | | MUSB_TXCSR_P_SENTSTALL); | 1407 | | MUSB_TXCSR_P_SENTSTALL); |
1408 | csr &= ~MUSB_TXCSR_TXPKTRDY; | 1408 | csr &= ~MUSB_TXCSR_TXPKTRDY; |
1409 | musb_writew(epio, MUSB_TXCSR, csr); | 1409 | musb_writew(epio, MUSB_TXCSR, csr); |
1410 | } else { | 1410 | } else { |
1411 | csr = musb_readw(epio, MUSB_RXCSR); | 1411 | csr = musb_readw(epio, MUSB_RXCSR); |
1412 | csr |= MUSB_RXCSR_P_WZC_BITS | 1412 | csr |= MUSB_RXCSR_P_WZC_BITS |
1413 | | MUSB_RXCSR_FLUSHFIFO | 1413 | | MUSB_RXCSR_FLUSHFIFO |
1414 | | MUSB_RXCSR_CLRDATATOG; | 1414 | | MUSB_RXCSR_CLRDATATOG; |
1415 | if (value) | 1415 | if (value) |
1416 | csr |= MUSB_RXCSR_P_SENDSTALL; | 1416 | csr |= MUSB_RXCSR_P_SENDSTALL; |
1417 | else | 1417 | else |
1418 | csr &= ~(MUSB_RXCSR_P_SENDSTALL | 1418 | csr &= ~(MUSB_RXCSR_P_SENDSTALL |
1419 | | MUSB_RXCSR_P_SENTSTALL); | 1419 | | MUSB_RXCSR_P_SENTSTALL); |
1420 | musb_writew(epio, MUSB_RXCSR, csr); | 1420 | musb_writew(epio, MUSB_RXCSR, csr); |
1421 | } | 1421 | } |
1422 | 1422 | ||
1423 | /* maybe start the first request in the queue */ | 1423 | /* maybe start the first request in the queue */ |
1424 | if (!musb_ep->busy && !value && request) { | 1424 | if (!musb_ep->busy && !value && request) { |
1425 | dev_dbg(musb->controller, "restarting the request\n"); | 1425 | dev_dbg(musb->controller, "restarting the request\n"); |
1426 | musb_ep_restart(musb, request); | 1426 | musb_ep_restart(musb, request); |
1427 | } | 1427 | } |
1428 | 1428 | ||
1429 | done: | 1429 | done: |
1430 | spin_unlock_irqrestore(&musb->lock, flags); | 1430 | spin_unlock_irqrestore(&musb->lock, flags); |
1431 | return status; | 1431 | return status; |
1432 | } | 1432 | } |
1433 | 1433 | ||
1434 | /* | 1434 | /* |
1435 | * Sets the halt feature with the clear requests ignored | 1435 | * Sets the halt feature with the clear requests ignored |
1436 | */ | 1436 | */ |
1437 | static int musb_gadget_set_wedge(struct usb_ep *ep) | 1437 | static int musb_gadget_set_wedge(struct usb_ep *ep) |
1438 | { | 1438 | { |
1439 | struct musb_ep *musb_ep = to_musb_ep(ep); | 1439 | struct musb_ep *musb_ep = to_musb_ep(ep); |
1440 | 1440 | ||
1441 | if (!ep) | 1441 | if (!ep) |
1442 | return -EINVAL; | 1442 | return -EINVAL; |
1443 | 1443 | ||
1444 | musb_ep->wedged = 1; | 1444 | musb_ep->wedged = 1; |
1445 | 1445 | ||
1446 | return usb_ep_set_halt(ep); | 1446 | return usb_ep_set_halt(ep); |
1447 | } | 1447 | } |
1448 | 1448 | ||
1449 | static int musb_gadget_fifo_status(struct usb_ep *ep) | 1449 | static int musb_gadget_fifo_status(struct usb_ep *ep) |
1450 | { | 1450 | { |
1451 | struct musb_ep *musb_ep = to_musb_ep(ep); | 1451 | struct musb_ep *musb_ep = to_musb_ep(ep); |
1452 | void __iomem *epio = musb_ep->hw_ep->regs; | 1452 | void __iomem *epio = musb_ep->hw_ep->regs; |
1453 | int retval = -EINVAL; | 1453 | int retval = -EINVAL; |
1454 | 1454 | ||
1455 | if (musb_ep->desc && !musb_ep->is_in) { | 1455 | if (musb_ep->desc && !musb_ep->is_in) { |
1456 | struct musb *musb = musb_ep->musb; | 1456 | struct musb *musb = musb_ep->musb; |
1457 | int epnum = musb_ep->current_epnum; | 1457 | int epnum = musb_ep->current_epnum; |
1458 | void __iomem *mbase = musb->mregs; | 1458 | void __iomem *mbase = musb->mregs; |
1459 | unsigned long flags; | 1459 | unsigned long flags; |
1460 | 1460 | ||
1461 | spin_lock_irqsave(&musb->lock, flags); | 1461 | spin_lock_irqsave(&musb->lock, flags); |
1462 | 1462 | ||
1463 | musb_ep_select(mbase, epnum); | 1463 | musb_ep_select(mbase, epnum); |
1464 | /* FIXME return zero unless RXPKTRDY is set */ | 1464 | /* FIXME return zero unless RXPKTRDY is set */ |
1465 | retval = musb_readw(epio, MUSB_RXCOUNT); | 1465 | retval = musb_readw(epio, MUSB_RXCOUNT); |
1466 | 1466 | ||
1467 | spin_unlock_irqrestore(&musb->lock, flags); | 1467 | spin_unlock_irqrestore(&musb->lock, flags); |
1468 | } | 1468 | } |
1469 | return retval; | 1469 | return retval; |
1470 | } | 1470 | } |
1471 | 1471 | ||
1472 | static void musb_gadget_fifo_flush(struct usb_ep *ep) | 1472 | static void musb_gadget_fifo_flush(struct usb_ep *ep) |
1473 | { | 1473 | { |
1474 | struct musb_ep *musb_ep = to_musb_ep(ep); | 1474 | struct musb_ep *musb_ep = to_musb_ep(ep); |
1475 | struct musb *musb = musb_ep->musb; | 1475 | struct musb *musb = musb_ep->musb; |
1476 | u8 epnum = musb_ep->current_epnum; | 1476 | u8 epnum = musb_ep->current_epnum; |
1477 | void __iomem *epio = musb->endpoints[epnum].regs; | 1477 | void __iomem *epio = musb->endpoints[epnum].regs; |
1478 | void __iomem *mbase; | 1478 | void __iomem *mbase; |
1479 | unsigned long flags; | 1479 | unsigned long flags; |
1480 | u16 csr; | 1480 | u16 csr; |
1481 | 1481 | ||
1482 | mbase = musb->mregs; | 1482 | mbase = musb->mregs; |
1483 | 1483 | ||
1484 | spin_lock_irqsave(&musb->lock, flags); | 1484 | spin_lock_irqsave(&musb->lock, flags); |
1485 | musb_ep_select(mbase, (u8) epnum); | 1485 | musb_ep_select(mbase, (u8) epnum); |
1486 | 1486 | ||
1487 | /* disable interrupts */ | 1487 | /* disable interrupts */ |
1488 | musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe & ~(1 << epnum)); | 1488 | musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe & ~(1 << epnum)); |
1489 | 1489 | ||
1490 | if (musb_ep->is_in) { | 1490 | if (musb_ep->is_in) { |
1491 | csr = musb_readw(epio, MUSB_TXCSR); | 1491 | csr = musb_readw(epio, MUSB_TXCSR); |
1492 | if (csr & MUSB_TXCSR_FIFONOTEMPTY) { | 1492 | if (csr & MUSB_TXCSR_FIFONOTEMPTY) { |
1493 | csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS; | 1493 | csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS; |
1494 | /* | 1494 | /* |
1495 | * Setting both TXPKTRDY and FLUSHFIFO makes controller | 1495 | * Setting both TXPKTRDY and FLUSHFIFO makes controller |
1496 | * to interrupt current FIFO loading, but not flushing | 1496 | * to interrupt current FIFO loading, but not flushing |
1497 | * the already loaded ones. | 1497 | * the already loaded ones. |
1498 | */ | 1498 | */ |
1499 | csr &= ~MUSB_TXCSR_TXPKTRDY; | 1499 | csr &= ~MUSB_TXCSR_TXPKTRDY; |
1500 | musb_writew(epio, MUSB_TXCSR, csr); | 1500 | musb_writew(epio, MUSB_TXCSR, csr); |
1501 | /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */ | 1501 | /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */ |
1502 | musb_writew(epio, MUSB_TXCSR, csr); | 1502 | musb_writew(epio, MUSB_TXCSR, csr); |
1503 | } | 1503 | } |
1504 | } else { | 1504 | } else { |
1505 | csr = musb_readw(epio, MUSB_RXCSR); | 1505 | csr = musb_readw(epio, MUSB_RXCSR); |
1506 | csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS; | 1506 | csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS; |
1507 | musb_writew(epio, MUSB_RXCSR, csr); | 1507 | musb_writew(epio, MUSB_RXCSR, csr); |
1508 | musb_writew(epio, MUSB_RXCSR, csr); | 1508 | musb_writew(epio, MUSB_RXCSR, csr); |
1509 | } | 1509 | } |
1510 | 1510 | ||
1511 | /* re-enable interrupt */ | 1511 | /* re-enable interrupt */ |
1512 | musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe); | 1512 | musb_writew(mbase, MUSB_INTRTXE, musb->intrtxe); |
1513 | spin_unlock_irqrestore(&musb->lock, flags); | 1513 | spin_unlock_irqrestore(&musb->lock, flags); |
1514 | } | 1514 | } |
1515 | 1515 | ||
1516 | static const struct usb_ep_ops musb_ep_ops = { | 1516 | static const struct usb_ep_ops musb_ep_ops = { |
1517 | .enable = musb_gadget_enable, | 1517 | .enable = musb_gadget_enable, |
1518 | .disable = musb_gadget_disable, | 1518 | .disable = musb_gadget_disable, |
1519 | .alloc_request = musb_alloc_request, | 1519 | .alloc_request = musb_alloc_request, |
1520 | .free_request = musb_free_request, | 1520 | .free_request = musb_free_request, |
1521 | .queue = musb_gadget_queue, | 1521 | .queue = musb_gadget_queue, |
1522 | .dequeue = musb_gadget_dequeue, | 1522 | .dequeue = musb_gadget_dequeue, |
1523 | .set_halt = musb_gadget_set_halt, | 1523 | .set_halt = musb_gadget_set_halt, |
1524 | .set_wedge = musb_gadget_set_wedge, | 1524 | .set_wedge = musb_gadget_set_wedge, |
1525 | .fifo_status = musb_gadget_fifo_status, | 1525 | .fifo_status = musb_gadget_fifo_status, |
1526 | .fifo_flush = musb_gadget_fifo_flush | 1526 | .fifo_flush = musb_gadget_fifo_flush |
1527 | }; | 1527 | }; |
1528 | 1528 | ||
1529 | /* ----------------------------------------------------------------------- */ | 1529 | /* ----------------------------------------------------------------------- */ |
1530 | 1530 | ||
1531 | static int musb_gadget_get_frame(struct usb_gadget *gadget) | 1531 | static int musb_gadget_get_frame(struct usb_gadget *gadget) |
1532 | { | 1532 | { |
1533 | struct musb *musb = gadget_to_musb(gadget); | 1533 | struct musb *musb = gadget_to_musb(gadget); |
1534 | 1534 | ||
1535 | return (int)musb_readw(musb->mregs, MUSB_FRAME); | 1535 | return (int)musb_readw(musb->mregs, MUSB_FRAME); |
1536 | } | 1536 | } |
1537 | 1537 | ||
1538 | static int musb_gadget_wakeup(struct usb_gadget *gadget) | 1538 | static int musb_gadget_wakeup(struct usb_gadget *gadget) |
1539 | { | 1539 | { |
1540 | struct musb *musb = gadget_to_musb(gadget); | 1540 | struct musb *musb = gadget_to_musb(gadget); |
1541 | void __iomem *mregs = musb->mregs; | 1541 | void __iomem *mregs = musb->mregs; |
1542 | unsigned long flags; | 1542 | unsigned long flags; |
1543 | int status = -EINVAL; | 1543 | int status = -EINVAL; |
1544 | u8 power, devctl; | 1544 | u8 power, devctl; |
1545 | int retries; | 1545 | int retries; |
1546 | 1546 | ||
1547 | spin_lock_irqsave(&musb->lock, flags); | 1547 | spin_lock_irqsave(&musb->lock, flags); |
1548 | 1548 | ||
1549 | switch (musb->xceiv->state) { | 1549 | switch (musb->xceiv->state) { |
1550 | case OTG_STATE_B_PERIPHERAL: | 1550 | case OTG_STATE_B_PERIPHERAL: |
1551 | /* NOTE: OTG state machine doesn't include B_SUSPENDED; | 1551 | /* NOTE: OTG state machine doesn't include B_SUSPENDED; |
1552 | * that's part of the standard usb 1.1 state machine, and | 1552 | * that's part of the standard usb 1.1 state machine, and |
1553 | * doesn't affect OTG transitions. | 1553 | * doesn't affect OTG transitions. |
1554 | */ | 1554 | */ |
1555 | if (musb->may_wakeup && musb->is_suspended) | 1555 | if (musb->may_wakeup && musb->is_suspended) |
1556 | break; | 1556 | break; |
1557 | goto done; | 1557 | goto done; |
1558 | case OTG_STATE_B_IDLE: | 1558 | case OTG_STATE_B_IDLE: |
1559 | /* Start SRP ... OTG not required. */ | 1559 | /* Start SRP ... OTG not required. */ |
1560 | devctl = musb_readb(mregs, MUSB_DEVCTL); | 1560 | devctl = musb_readb(mregs, MUSB_DEVCTL); |
1561 | dev_dbg(musb->controller, "Sending SRP: devctl: %02x\n", devctl); | 1561 | dev_dbg(musb->controller, "Sending SRP: devctl: %02x\n", devctl); |
1562 | devctl |= MUSB_DEVCTL_SESSION; | 1562 | devctl |= MUSB_DEVCTL_SESSION; |
1563 | musb_writeb(mregs, MUSB_DEVCTL, devctl); | 1563 | musb_writeb(mregs, MUSB_DEVCTL, devctl); |
1564 | devctl = musb_readb(mregs, MUSB_DEVCTL); | 1564 | devctl = musb_readb(mregs, MUSB_DEVCTL); |
1565 | retries = 100; | 1565 | retries = 100; |
1566 | while (!(devctl & MUSB_DEVCTL_SESSION)) { | 1566 | while (!(devctl & MUSB_DEVCTL_SESSION)) { |
1567 | devctl = musb_readb(mregs, MUSB_DEVCTL); | 1567 | devctl = musb_readb(mregs, MUSB_DEVCTL); |
1568 | if (retries-- < 1) | 1568 | if (retries-- < 1) |
1569 | break; | 1569 | break; |
1570 | } | 1570 | } |
1571 | retries = 10000; | 1571 | retries = 10000; |
1572 | while (devctl & MUSB_DEVCTL_SESSION) { | 1572 | while (devctl & MUSB_DEVCTL_SESSION) { |
1573 | devctl = musb_readb(mregs, MUSB_DEVCTL); | 1573 | devctl = musb_readb(mregs, MUSB_DEVCTL); |
1574 | if (retries-- < 1) | 1574 | if (retries-- < 1) |
1575 | break; | 1575 | break; |
1576 | } | 1576 | } |
1577 | 1577 | ||
1578 | spin_unlock_irqrestore(&musb->lock, flags); | 1578 | spin_unlock_irqrestore(&musb->lock, flags); |
1579 | otg_start_srp(musb->xceiv->otg); | 1579 | otg_start_srp(musb->xceiv->otg); |
1580 | spin_lock_irqsave(&musb->lock, flags); | 1580 | spin_lock_irqsave(&musb->lock, flags); |
1581 | 1581 | ||
1582 | /* Block idling for at least 1s */ | 1582 | /* Block idling for at least 1s */ |
1583 | musb_platform_try_idle(musb, | 1583 | musb_platform_try_idle(musb, |
1584 | jiffies + msecs_to_jiffies(1 * HZ)); | 1584 | jiffies + msecs_to_jiffies(1 * HZ)); |
1585 | 1585 | ||
1586 | status = 0; | 1586 | status = 0; |
1587 | goto done; | 1587 | goto done; |
1588 | default: | 1588 | default: |
1589 | dev_dbg(musb->controller, "Unhandled wake: %s\n", | 1589 | dev_dbg(musb->controller, "Unhandled wake: %s\n", |
1590 | usb_otg_state_string(musb->xceiv->state)); | 1590 | usb_otg_state_string(musb->xceiv->state)); |
1591 | goto done; | 1591 | goto done; |
1592 | } | 1592 | } |
1593 | 1593 | ||
1594 | status = 0; | 1594 | status = 0; |
1595 | 1595 | ||
1596 | power = musb_readb(mregs, MUSB_POWER); | 1596 | power = musb_readb(mregs, MUSB_POWER); |
1597 | power |= MUSB_POWER_RESUME; | 1597 | power |= MUSB_POWER_RESUME; |
1598 | musb_writeb(mregs, MUSB_POWER, power); | 1598 | musb_writeb(mregs, MUSB_POWER, power); |
1599 | dev_dbg(musb->controller, "issue wakeup\n"); | 1599 | dev_dbg(musb->controller, "issue wakeup\n"); |
1600 | 1600 | ||
1601 | /* FIXME do this next chunk in a timer callback, no udelay */ | 1601 | /* FIXME do this next chunk in a timer callback, no udelay */ |
1602 | mdelay(2); | 1602 | mdelay(2); |
1603 | 1603 | ||
1604 | power = musb_readb(mregs, MUSB_POWER); | 1604 | power = musb_readb(mregs, MUSB_POWER); |
1605 | power &= ~MUSB_POWER_RESUME; | 1605 | power &= ~MUSB_POWER_RESUME; |
1606 | musb_writeb(mregs, MUSB_POWER, power); | 1606 | musb_writeb(mregs, MUSB_POWER, power); |
1607 | done: | 1607 | done: |
1608 | spin_unlock_irqrestore(&musb->lock, flags); | 1608 | spin_unlock_irqrestore(&musb->lock, flags); |
1609 | return status; | 1609 | return status; |
1610 | } | 1610 | } |
1611 | 1611 | ||
1612 | static int | 1612 | static int |
1613 | musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered) | 1613 | musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered) |
1614 | { | 1614 | { |
1615 | struct musb *musb = gadget_to_musb(gadget); | 1615 | struct musb *musb = gadget_to_musb(gadget); |
1616 | 1616 | ||
1617 | musb->is_self_powered = !!is_selfpowered; | 1617 | musb->is_self_powered = !!is_selfpowered; |
1618 | return 0; | 1618 | return 0; |
1619 | } | 1619 | } |
1620 | 1620 | ||
1621 | static void musb_pullup(struct musb *musb, int is_on) | 1621 | static void musb_pullup(struct musb *musb, int is_on) |
1622 | { | 1622 | { |
1623 | u8 power; | 1623 | u8 power; |
1624 | 1624 | ||
1625 | power = musb_readb(musb->mregs, MUSB_POWER); | 1625 | power = musb_readb(musb->mregs, MUSB_POWER); |
1626 | if (is_on) | 1626 | if (is_on) |
1627 | power |= MUSB_POWER_SOFTCONN; | 1627 | power |= MUSB_POWER_SOFTCONN; |
1628 | else | 1628 | else |
1629 | power &= ~MUSB_POWER_SOFTCONN; | 1629 | power &= ~MUSB_POWER_SOFTCONN; |
1630 | 1630 | ||
1631 | /* FIXME if on, HdrcStart; if off, HdrcStop */ | 1631 | /* FIXME if on, HdrcStart; if off, HdrcStop */ |
1632 | 1632 | ||
1633 | dev_dbg(musb->controller, "gadget D+ pullup %s\n", | 1633 | dev_dbg(musb->controller, "gadget D+ pullup %s\n", |
1634 | is_on ? "on" : "off"); | 1634 | is_on ? "on" : "off"); |
1635 | musb_writeb(musb->mregs, MUSB_POWER, power); | 1635 | musb_writeb(musb->mregs, MUSB_POWER, power); |
1636 | } | 1636 | } |
1637 | 1637 | ||
1638 | #if 0 | 1638 | #if 0 |
1639 | static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active) | 1639 | static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active) |
1640 | { | 1640 | { |
1641 | dev_dbg(musb->controller, "<= %s =>\n", __func__); | 1641 | dev_dbg(musb->controller, "<= %s =>\n", __func__); |
1642 | 1642 | ||
1643 | /* | 1643 | /* |
1644 | * FIXME iff driver's softconnect flag is set (as it is during probe, | 1644 | * FIXME iff driver's softconnect flag is set (as it is during probe, |
1645 | * though that can clear it), just musb_pullup(). | 1645 | * though that can clear it), just musb_pullup(). |
1646 | */ | 1646 | */ |
1647 | 1647 | ||
1648 | return -EINVAL; | 1648 | return -EINVAL; |
1649 | } | 1649 | } |
1650 | #endif | 1650 | #endif |
1651 | 1651 | ||
1652 | static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA) | 1652 | static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA) |
1653 | { | 1653 | { |
1654 | struct musb *musb = gadget_to_musb(gadget); | 1654 | struct musb *musb = gadget_to_musb(gadget); |
1655 | 1655 | ||
1656 | if (!musb->xceiv->set_power) | 1656 | if (!musb->xceiv->set_power) |
1657 | return -EOPNOTSUPP; | 1657 | return -EOPNOTSUPP; |
1658 | return usb_phy_set_power(musb->xceiv, mA); | 1658 | return usb_phy_set_power(musb->xceiv, mA); |
1659 | } | 1659 | } |
1660 | 1660 | ||
1661 | static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on) | 1661 | static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on) |
1662 | { | 1662 | { |
1663 | struct musb *musb = gadget_to_musb(gadget); | 1663 | struct musb *musb = gadget_to_musb(gadget); |
1664 | unsigned long flags; | 1664 | unsigned long flags; |
1665 | 1665 | ||
1666 | is_on = !!is_on; | 1666 | is_on = !!is_on; |
1667 | 1667 | ||
1668 | pm_runtime_get_sync(musb->controller); | 1668 | pm_runtime_get_sync(musb->controller); |
1669 | 1669 | ||
1670 | /* NOTE: this assumes we are sensing vbus; we'd rather | 1670 | /* NOTE: this assumes we are sensing vbus; we'd rather |
1671 | * not pullup unless the B-session is active. | 1671 | * not pullup unless the B-session is active. |
1672 | */ | 1672 | */ |
1673 | spin_lock_irqsave(&musb->lock, flags); | 1673 | spin_lock_irqsave(&musb->lock, flags); |
1674 | if (is_on != musb->softconnect) { | 1674 | if (is_on != musb->softconnect) { |
1675 | musb->softconnect = is_on; | 1675 | musb->softconnect = is_on; |
1676 | musb_pullup(musb, is_on); | 1676 | musb_pullup(musb, is_on); |
1677 | } | 1677 | } |
1678 | spin_unlock_irqrestore(&musb->lock, flags); | 1678 | spin_unlock_irqrestore(&musb->lock, flags); |
1679 | 1679 | ||
1680 | pm_runtime_put(musb->controller); | 1680 | pm_runtime_put(musb->controller); |
1681 | 1681 | ||
1682 | return 0; | 1682 | return 0; |
1683 | } | 1683 | } |
1684 | 1684 | ||
1685 | static int musb_gadget_start(struct usb_gadget *g, | 1685 | static int musb_gadget_start(struct usb_gadget *g, |
1686 | struct usb_gadget_driver *driver); | 1686 | struct usb_gadget_driver *driver); |
1687 | static int musb_gadget_stop(struct usb_gadget *g, | 1687 | static int musb_gadget_stop(struct usb_gadget *g, |
1688 | struct usb_gadget_driver *driver); | 1688 | struct usb_gadget_driver *driver); |
1689 | 1689 | ||
1690 | static const struct usb_gadget_ops musb_gadget_operations = { | 1690 | static const struct usb_gadget_ops musb_gadget_operations = { |
1691 | .get_frame = musb_gadget_get_frame, | 1691 | .get_frame = musb_gadget_get_frame, |
1692 | .wakeup = musb_gadget_wakeup, | 1692 | .wakeup = musb_gadget_wakeup, |
1693 | .set_selfpowered = musb_gadget_set_self_powered, | 1693 | .set_selfpowered = musb_gadget_set_self_powered, |
1694 | /* .vbus_session = musb_gadget_vbus_session, */ | 1694 | /* .vbus_session = musb_gadget_vbus_session, */ |
1695 | .vbus_draw = musb_gadget_vbus_draw, | 1695 | .vbus_draw = musb_gadget_vbus_draw, |
1696 | .pullup = musb_gadget_pullup, | 1696 | .pullup = musb_gadget_pullup, |
1697 | .udc_start = musb_gadget_start, | 1697 | .udc_start = musb_gadget_start, |
1698 | .udc_stop = musb_gadget_stop, | 1698 | .udc_stop = musb_gadget_stop, |
1699 | }; | 1699 | }; |
1700 | 1700 | ||
1701 | /* ----------------------------------------------------------------------- */ | 1701 | /* ----------------------------------------------------------------------- */ |
1702 | 1702 | ||
1703 | /* Registration */ | 1703 | /* Registration */ |
1704 | 1704 | ||
1705 | /* Only this registration code "knows" the rule (from USB standards) | 1705 | /* Only this registration code "knows" the rule (from USB standards) |
1706 | * about there being only one external upstream port. It assumes | 1706 | * about there being only one external upstream port. It assumes |
1707 | * all peripheral ports are external... | 1707 | * all peripheral ports are external... |
1708 | */ | 1708 | */ |
1709 | 1709 | ||
1710 | static void | 1710 | static void |
1711 | init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in) | 1711 | init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in) |
1712 | { | 1712 | { |
1713 | struct musb_hw_ep *hw_ep = musb->endpoints + epnum; | 1713 | struct musb_hw_ep *hw_ep = musb->endpoints + epnum; |
1714 | 1714 | ||
1715 | memset(ep, 0, sizeof *ep); | 1715 | memset(ep, 0, sizeof *ep); |
1716 | 1716 | ||
1717 | ep->current_epnum = epnum; | 1717 | ep->current_epnum = epnum; |
1718 | ep->musb = musb; | 1718 | ep->musb = musb; |
1719 | ep->hw_ep = hw_ep; | 1719 | ep->hw_ep = hw_ep; |
1720 | ep->is_in = is_in; | 1720 | ep->is_in = is_in; |
1721 | 1721 | ||
1722 | INIT_LIST_HEAD(&ep->req_list); | 1722 | INIT_LIST_HEAD(&ep->req_list); |
1723 | 1723 | ||
1724 | sprintf(ep->name, "ep%d%s", epnum, | 1724 | sprintf(ep->name, "ep%d%s", epnum, |
1725 | (!epnum || hw_ep->is_shared_fifo) ? "" : ( | 1725 | (!epnum || hw_ep->is_shared_fifo) ? "" : ( |
1726 | is_in ? "in" : "out")); | 1726 | is_in ? "in" : "out")); |
1727 | ep->end_point.name = ep->name; | 1727 | ep->end_point.name = ep->name; |
1728 | INIT_LIST_HEAD(&ep->end_point.ep_list); | 1728 | INIT_LIST_HEAD(&ep->end_point.ep_list); |
1729 | if (!epnum) { | 1729 | if (!epnum) { |
1730 | ep->end_point.maxpacket = 64; | 1730 | ep->end_point.maxpacket = 64; |
1731 | ep->end_point.ops = &musb_g_ep0_ops; | 1731 | ep->end_point.ops = &musb_g_ep0_ops; |
1732 | musb->g.ep0 = &ep->end_point; | 1732 | musb->g.ep0 = &ep->end_point; |
1733 | } else { | 1733 | } else { |
1734 | if (is_in) | 1734 | if (is_in) |
1735 | ep->end_point.maxpacket = hw_ep->max_packet_sz_tx; | 1735 | ep->end_point.maxpacket = hw_ep->max_packet_sz_tx; |
1736 | else | 1736 | else |
1737 | ep->end_point.maxpacket = hw_ep->max_packet_sz_rx; | 1737 | ep->end_point.maxpacket = hw_ep->max_packet_sz_rx; |
1738 | ep->end_point.ops = &musb_ep_ops; | 1738 | ep->end_point.ops = &musb_ep_ops; |
1739 | list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list); | 1739 | list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list); |
1740 | } | 1740 | } |
1741 | } | 1741 | } |
1742 | 1742 | ||
1743 | /* | 1743 | /* |
1744 | * Initialize the endpoints exposed to peripheral drivers, with backlinks | 1744 | * Initialize the endpoints exposed to peripheral drivers, with backlinks |
1745 | * to the rest of the driver state. | 1745 | * to the rest of the driver state. |
1746 | */ | 1746 | */ |
1747 | static inline void musb_g_init_endpoints(struct musb *musb) | 1747 | static inline void musb_g_init_endpoints(struct musb *musb) |
1748 | { | 1748 | { |
1749 | u8 epnum; | 1749 | u8 epnum; |
1750 | struct musb_hw_ep *hw_ep; | 1750 | struct musb_hw_ep *hw_ep; |
1751 | unsigned count = 0; | 1751 | unsigned count = 0; |
1752 | 1752 | ||
1753 | /* initialize endpoint list just once */ | 1753 | /* initialize endpoint list just once */ |
1754 | INIT_LIST_HEAD(&(musb->g.ep_list)); | 1754 | INIT_LIST_HEAD(&(musb->g.ep_list)); |
1755 | 1755 | ||
1756 | for (epnum = 0, hw_ep = musb->endpoints; | 1756 | for (epnum = 0, hw_ep = musb->endpoints; |
1757 | epnum < musb->nr_endpoints; | 1757 | epnum < musb->nr_endpoints; |
1758 | epnum++, hw_ep++) { | 1758 | epnum++, hw_ep++) { |
1759 | if (hw_ep->is_shared_fifo /* || !epnum */) { | 1759 | if (hw_ep->is_shared_fifo /* || !epnum */) { |
1760 | init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0); | 1760 | init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0); |
1761 | count++; | 1761 | count++; |
1762 | } else { | 1762 | } else { |
1763 | if (hw_ep->max_packet_sz_tx) { | 1763 | if (hw_ep->max_packet_sz_tx) { |
1764 | init_peripheral_ep(musb, &hw_ep->ep_in, | 1764 | init_peripheral_ep(musb, &hw_ep->ep_in, |
1765 | epnum, 1); | 1765 | epnum, 1); |
1766 | count++; | 1766 | count++; |
1767 | } | 1767 | } |
1768 | if (hw_ep->max_packet_sz_rx) { | 1768 | if (hw_ep->max_packet_sz_rx) { |
1769 | init_peripheral_ep(musb, &hw_ep->ep_out, | 1769 | init_peripheral_ep(musb, &hw_ep->ep_out, |
1770 | epnum, 0); | 1770 | epnum, 0); |
1771 | count++; | 1771 | count++; |
1772 | } | 1772 | } |
1773 | } | 1773 | } |
1774 | } | 1774 | } |
1775 | } | 1775 | } |
1776 | 1776 | ||
1777 | /* called once during driver setup to initialize and link into | 1777 | /* called once during driver setup to initialize and link into |
1778 | * the driver model; memory is zeroed. | 1778 | * the driver model; memory is zeroed. |
1779 | */ | 1779 | */ |
1780 | int musb_gadget_setup(struct musb *musb) | 1780 | int musb_gadget_setup(struct musb *musb) |
1781 | { | 1781 | { |
1782 | int status; | 1782 | int status; |
1783 | 1783 | ||
1784 | /* REVISIT minor race: if (erroneously) setting up two | 1784 | /* REVISIT minor race: if (erroneously) setting up two |
1785 | * musb peripherals at the same time, only the bus lock | 1785 | * musb peripherals at the same time, only the bus lock |
1786 | * is probably held. | 1786 | * is probably held. |
1787 | */ | 1787 | */ |
1788 | 1788 | ||
1789 | musb->g.ops = &musb_gadget_operations; | 1789 | musb->g.ops = &musb_gadget_operations; |
1790 | musb->g.max_speed = USB_SPEED_HIGH; | 1790 | musb->g.max_speed = USB_SPEED_HIGH; |
1791 | musb->g.speed = USB_SPEED_UNKNOWN; | 1791 | musb->g.speed = USB_SPEED_UNKNOWN; |
1792 | 1792 | ||
1793 | /* this "gadget" abstracts/virtualizes the controller */ | 1793 | /* this "gadget" abstracts/virtualizes the controller */ |
1794 | musb->g.name = musb_driver_name; | 1794 | musb->g.name = musb_driver_name; |
1795 | musb->g.is_otg = 1; | 1795 | musb->g.is_otg = 1; |
1796 | 1796 | ||
1797 | musb_g_init_endpoints(musb); | 1797 | musb_g_init_endpoints(musb); |
1798 | 1798 | ||
1799 | musb->is_active = 0; | 1799 | musb->is_active = 0; |
1800 | musb_platform_try_idle(musb, 0); | 1800 | musb_platform_try_idle(musb, 0); |
1801 | 1801 | ||
1802 | status = usb_add_gadget_udc(musb->controller, &musb->g); | 1802 | status = usb_add_gadget_udc(musb->controller, &musb->g); |
1803 | if (status) | 1803 | if (status) |
1804 | goto err; | 1804 | goto err; |
1805 | 1805 | ||
1806 | return 0; | 1806 | return 0; |
1807 | err: | 1807 | err: |
1808 | musb->g.dev.parent = NULL; | 1808 | musb->g.dev.parent = NULL; |
1809 | device_unregister(&musb->g.dev); | 1809 | device_unregister(&musb->g.dev); |
1810 | return status; | 1810 | return status; |
1811 | } | 1811 | } |
1812 | 1812 | ||
1813 | void musb_gadget_cleanup(struct musb *musb) | 1813 | void musb_gadget_cleanup(struct musb *musb) |
1814 | { | 1814 | { |
1815 | if (musb->port_mode == MUSB_PORT_MODE_HOST) | ||
1816 | return; | ||
1815 | usb_del_gadget_udc(&musb->g); | 1817 | usb_del_gadget_udc(&musb->g); |
1816 | } | 1818 | } |
1817 | 1819 | ||
1818 | /* | 1820 | /* |
1819 | * Register the gadget driver. Used by gadget drivers when | 1821 | * Register the gadget driver. Used by gadget drivers when |
1820 | * registering themselves with the controller. | 1822 | * registering themselves with the controller. |
1821 | * | 1823 | * |
1822 | * -EINVAL something went wrong (not driver) | 1824 | * -EINVAL something went wrong (not driver) |
1823 | * -EBUSY another gadget is already using the controller | 1825 | * -EBUSY another gadget is already using the controller |
1824 | * -ENOMEM no memory to perform the operation | 1826 | * -ENOMEM no memory to perform the operation |
1825 | * | 1827 | * |
1826 | * @param driver the gadget driver | 1828 | * @param driver the gadget driver |
1827 | * @return <0 if error, 0 if everything is fine | 1829 | * @return <0 if error, 0 if everything is fine |
1828 | */ | 1830 | */ |
1829 | static int musb_gadget_start(struct usb_gadget *g, | 1831 | static int musb_gadget_start(struct usb_gadget *g, |
1830 | struct usb_gadget_driver *driver) | 1832 | struct usb_gadget_driver *driver) |
1831 | { | 1833 | { |
1832 | struct musb *musb = gadget_to_musb(g); | 1834 | struct musb *musb = gadget_to_musb(g); |
1833 | struct usb_otg *otg = musb->xceiv->otg; | 1835 | struct usb_otg *otg = musb->xceiv->otg; |
1834 | unsigned long flags; | 1836 | unsigned long flags; |
1835 | int retval = 0; | 1837 | int retval = 0; |
1836 | 1838 | ||
1837 | if (driver->max_speed < USB_SPEED_HIGH) { | 1839 | if (driver->max_speed < USB_SPEED_HIGH) { |
1838 | retval = -EINVAL; | 1840 | retval = -EINVAL; |
1839 | goto err; | 1841 | goto err; |
1840 | } | 1842 | } |
1841 | 1843 | ||
1842 | pm_runtime_get_sync(musb->controller); | 1844 | pm_runtime_get_sync(musb->controller); |
1843 | 1845 | ||
1844 | dev_dbg(musb->controller, "registering driver %s\n", driver->function); | 1846 | dev_dbg(musb->controller, "registering driver %s\n", driver->function); |
1845 | 1847 | ||
1846 | musb->softconnect = 0; | 1848 | musb->softconnect = 0; |
1847 | musb->gadget_driver = driver; | 1849 | musb->gadget_driver = driver; |
1848 | 1850 | ||
1849 | spin_lock_irqsave(&musb->lock, flags); | 1851 | spin_lock_irqsave(&musb->lock, flags); |
1850 | musb->is_active = 1; | 1852 | musb->is_active = 1; |
1851 | 1853 | ||
1852 | otg_set_peripheral(otg, &musb->g); | 1854 | otg_set_peripheral(otg, &musb->g); |
1853 | musb->xceiv->state = OTG_STATE_B_IDLE; | 1855 | musb->xceiv->state = OTG_STATE_B_IDLE; |
1854 | spin_unlock_irqrestore(&musb->lock, flags); | 1856 | spin_unlock_irqrestore(&musb->lock, flags); |
1855 | 1857 | ||
1856 | /* REVISIT: funcall to other code, which also | 1858 | /* REVISIT: funcall to other code, which also |
1857 | * handles power budgeting ... this way also | 1859 | * handles power budgeting ... this way also |
1858 | * ensures HdrcStart is indirectly called. | 1860 | * ensures HdrcStart is indirectly called. |
1859 | */ | 1861 | */ |
1860 | if (musb->xceiv->last_event == USB_EVENT_ID) | 1862 | if (musb->xceiv->last_event == USB_EVENT_ID) |
1861 | musb_platform_set_vbus(musb, 1); | 1863 | musb_platform_set_vbus(musb, 1); |
1862 | 1864 | ||
1863 | if (musb->xceiv->last_event == USB_EVENT_NONE) | 1865 | if (musb->xceiv->last_event == USB_EVENT_NONE) |
1864 | pm_runtime_put(musb->controller); | 1866 | pm_runtime_put(musb->controller); |
1865 | 1867 | ||
1866 | return 0; | 1868 | return 0; |
1867 | 1869 | ||
1868 | err: | 1870 | err: |
1869 | return retval; | 1871 | return retval; |
1870 | } | 1872 | } |
1871 | 1873 | ||
1872 | static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver) | 1874 | static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver) |
1873 | { | 1875 | { |
1874 | int i; | 1876 | int i; |
1875 | struct musb_hw_ep *hw_ep; | 1877 | struct musb_hw_ep *hw_ep; |
1876 | 1878 | ||
1877 | /* don't disconnect if it's not connected */ | 1879 | /* don't disconnect if it's not connected */ |
1878 | if (musb->g.speed == USB_SPEED_UNKNOWN) | 1880 | if (musb->g.speed == USB_SPEED_UNKNOWN) |
1879 | driver = NULL; | 1881 | driver = NULL; |
1880 | else | 1882 | else |
1881 | musb->g.speed = USB_SPEED_UNKNOWN; | 1883 | musb->g.speed = USB_SPEED_UNKNOWN; |
1882 | 1884 | ||
1883 | /* deactivate the hardware */ | 1885 | /* deactivate the hardware */ |
1884 | if (musb->softconnect) { | 1886 | if (musb->softconnect) { |
1885 | musb->softconnect = 0; | 1887 | musb->softconnect = 0; |
1886 | musb_pullup(musb, 0); | 1888 | musb_pullup(musb, 0); |
1887 | } | 1889 | } |
1888 | musb_stop(musb); | 1890 | musb_stop(musb); |
1889 | 1891 | ||
1890 | /* killing any outstanding requests will quiesce the driver; | 1892 | /* killing any outstanding requests will quiesce the driver; |
1891 | * then report disconnect | 1893 | * then report disconnect |
1892 | */ | 1894 | */ |
1893 | if (driver) { | 1895 | if (driver) { |
1894 | for (i = 0, hw_ep = musb->endpoints; | 1896 | for (i = 0, hw_ep = musb->endpoints; |
1895 | i < musb->nr_endpoints; | 1897 | i < musb->nr_endpoints; |
1896 | i++, hw_ep++) { | 1898 | i++, hw_ep++) { |
1897 | musb_ep_select(musb->mregs, i); | 1899 | musb_ep_select(musb->mregs, i); |
1898 | if (hw_ep->is_shared_fifo /* || !epnum */) { | 1900 | if (hw_ep->is_shared_fifo /* || !epnum */) { |
1899 | nuke(&hw_ep->ep_in, -ESHUTDOWN); | 1901 | nuke(&hw_ep->ep_in, -ESHUTDOWN); |
1900 | } else { | 1902 | } else { |
1901 | if (hw_ep->max_packet_sz_tx) | 1903 | if (hw_ep->max_packet_sz_tx) |
1902 | nuke(&hw_ep->ep_in, -ESHUTDOWN); | 1904 | nuke(&hw_ep->ep_in, -ESHUTDOWN); |
1903 | if (hw_ep->max_packet_sz_rx) | 1905 | if (hw_ep->max_packet_sz_rx) |
1904 | nuke(&hw_ep->ep_out, -ESHUTDOWN); | 1906 | nuke(&hw_ep->ep_out, -ESHUTDOWN); |
1905 | } | 1907 | } |
1906 | } | 1908 | } |
1907 | } | 1909 | } |
1908 | } | 1910 | } |
1909 | 1911 | ||
1910 | /* | 1912 | /* |
1911 | * Unregister the gadget driver. Used by gadget drivers when | 1913 | * Unregister the gadget driver. Used by gadget drivers when |
1912 | * unregistering themselves from the controller. | 1914 | * unregistering themselves from the controller. |
1913 | * | 1915 | * |
1914 | * @param driver the gadget driver to unregister | 1916 | * @param driver the gadget driver to unregister |
1915 | */ | 1917 | */ |
1916 | static int musb_gadget_stop(struct usb_gadget *g, | 1918 | static int musb_gadget_stop(struct usb_gadget *g, |
1917 | struct usb_gadget_driver *driver) | 1919 | struct usb_gadget_driver *driver) |
1918 | { | 1920 | { |
1919 | struct musb *musb = gadget_to_musb(g); | 1921 | struct musb *musb = gadget_to_musb(g); |
1920 | unsigned long flags; | 1922 | unsigned long flags; |
1921 | 1923 | ||
1922 | if (musb->xceiv->last_event == USB_EVENT_NONE) | 1924 | if (musb->xceiv->last_event == USB_EVENT_NONE) |
1923 | pm_runtime_get_sync(musb->controller); | 1925 | pm_runtime_get_sync(musb->controller); |
1924 | 1926 | ||
1925 | /* | 1927 | /* |
1926 | * REVISIT always use otg_set_peripheral() here too; | 1928 | * REVISIT always use otg_set_peripheral() here too; |
1927 | * this needs to shut down the OTG engine. | 1929 | * this needs to shut down the OTG engine. |
1928 | */ | 1930 | */ |
1929 | 1931 | ||
1930 | spin_lock_irqsave(&musb->lock, flags); | 1932 | spin_lock_irqsave(&musb->lock, flags); |
1931 | 1933 | ||
1932 | musb_hnp_stop(musb); | 1934 | musb_hnp_stop(musb); |
1933 | 1935 | ||
1934 | (void) musb_gadget_vbus_draw(&musb->g, 0); | 1936 | (void) musb_gadget_vbus_draw(&musb->g, 0); |
1935 | 1937 | ||
1936 | musb->xceiv->state = OTG_STATE_UNDEFINED; | 1938 | musb->xceiv->state = OTG_STATE_UNDEFINED; |
1937 | stop_activity(musb, driver); | 1939 | stop_activity(musb, driver); |
1938 | otg_set_peripheral(musb->xceiv->otg, NULL); | 1940 | otg_set_peripheral(musb->xceiv->otg, NULL); |
1939 | 1941 | ||
1940 | dev_dbg(musb->controller, "unregistering driver %s\n", | 1942 | dev_dbg(musb->controller, "unregistering driver %s\n", |
1941 | driver ? driver->function : "(removed)"); | 1943 | driver ? driver->function : "(removed)"); |
1942 | 1944 | ||
1943 | musb->is_active = 0; | 1945 | musb->is_active = 0; |
1944 | musb->gadget_driver = NULL; | 1946 | musb->gadget_driver = NULL; |
1945 | musb_platform_try_idle(musb, 0); | 1947 | musb_platform_try_idle(musb, 0); |
1946 | spin_unlock_irqrestore(&musb->lock, flags); | 1948 | spin_unlock_irqrestore(&musb->lock, flags); |
1947 | 1949 | ||
1948 | /* | 1950 | /* |
1949 | * FIXME we need to be able to register another | 1951 | * FIXME we need to be able to register another |
1950 | * gadget driver here and have everything work; | 1952 | * gadget driver here and have everything work; |
1951 | * that currently misbehaves. | 1953 | * that currently misbehaves. |
1952 | */ | 1954 | */ |
1953 | 1955 | ||
1954 | pm_runtime_put(musb->controller); | 1956 | pm_runtime_put(musb->controller); |
1955 | 1957 | ||
1956 | return 0; | 1958 | return 0; |
1957 | } | 1959 | } |
1958 | 1960 | ||
1959 | /* ----------------------------------------------------------------------- */ | 1961 | /* ----------------------------------------------------------------------- */ |
1960 | 1962 | ||
1961 | /* lifecycle operations called through plat_uds.c */ | 1963 | /* lifecycle operations called through plat_uds.c */ |
1962 | 1964 | ||
1963 | void musb_g_resume(struct musb *musb) | 1965 | void musb_g_resume(struct musb *musb) |
1964 | { | 1966 | { |
1965 | musb->is_suspended = 0; | 1967 | musb->is_suspended = 0; |
1966 | switch (musb->xceiv->state) { | 1968 | switch (musb->xceiv->state) { |
1967 | case OTG_STATE_B_IDLE: | 1969 | case OTG_STATE_B_IDLE: |
1968 | break; | 1970 | break; |
1969 | case OTG_STATE_B_WAIT_ACON: | 1971 | case OTG_STATE_B_WAIT_ACON: |
1970 | case OTG_STATE_B_PERIPHERAL: | 1972 | case OTG_STATE_B_PERIPHERAL: |
1971 | musb->is_active = 1; | 1973 | musb->is_active = 1; |
1972 | if (musb->gadget_driver && musb->gadget_driver->resume) { | 1974 | if (musb->gadget_driver && musb->gadget_driver->resume) { |
1973 | spin_unlock(&musb->lock); | 1975 | spin_unlock(&musb->lock); |
1974 | musb->gadget_driver->resume(&musb->g); | 1976 | musb->gadget_driver->resume(&musb->g); |
1975 | spin_lock(&musb->lock); | 1977 | spin_lock(&musb->lock); |
1976 | } | 1978 | } |
1977 | break; | 1979 | break; |
1978 | default: | 1980 | default: |
1979 | WARNING("unhandled RESUME transition (%s)\n", | 1981 | WARNING("unhandled RESUME transition (%s)\n", |
1980 | usb_otg_state_string(musb->xceiv->state)); | 1982 | usb_otg_state_string(musb->xceiv->state)); |
1981 | } | 1983 | } |
1982 | } | 1984 | } |
1983 | 1985 | ||
1984 | /* called when SOF packets stop for 3+ msec */ | 1986 | /* called when SOF packets stop for 3+ msec */ |
1985 | void musb_g_suspend(struct musb *musb) | 1987 | void musb_g_suspend(struct musb *musb) |
1986 | { | 1988 | { |
1987 | u8 devctl; | 1989 | u8 devctl; |
1988 | 1990 | ||
1989 | devctl = musb_readb(musb->mregs, MUSB_DEVCTL); | 1991 | devctl = musb_readb(musb->mregs, MUSB_DEVCTL); |
1990 | dev_dbg(musb->controller, "devctl %02x\n", devctl); | 1992 | dev_dbg(musb->controller, "devctl %02x\n", devctl); |
1991 | 1993 | ||
1992 | switch (musb->xceiv->state) { | 1994 | switch (musb->xceiv->state) { |
1993 | case OTG_STATE_B_IDLE: | 1995 | case OTG_STATE_B_IDLE: |
1994 | if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) | 1996 | if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) |
1995 | musb->xceiv->state = OTG_STATE_B_PERIPHERAL; | 1997 | musb->xceiv->state = OTG_STATE_B_PERIPHERAL; |
1996 | break; | 1998 | break; |
1997 | case OTG_STATE_B_PERIPHERAL: | 1999 | case OTG_STATE_B_PERIPHERAL: |
1998 | musb->is_suspended = 1; | 2000 | musb->is_suspended = 1; |
1999 | if (musb->gadget_driver && musb->gadget_driver->suspend) { | 2001 | if (musb->gadget_driver && musb->gadget_driver->suspend) { |
2000 | spin_unlock(&musb->lock); | 2002 | spin_unlock(&musb->lock); |
2001 | musb->gadget_driver->suspend(&musb->g); | 2003 | musb->gadget_driver->suspend(&musb->g); |
2002 | spin_lock(&musb->lock); | 2004 | spin_lock(&musb->lock); |
2003 | } | 2005 | } |
2004 | break; | 2006 | break; |
2005 | default: | 2007 | default: |
2006 | /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ; | 2008 | /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ; |
2007 | * A_PERIPHERAL may need care too | 2009 | * A_PERIPHERAL may need care too |
2008 | */ | 2010 | */ |
2009 | WARNING("unhandled SUSPEND transition (%s)\n", | 2011 | WARNING("unhandled SUSPEND transition (%s)\n", |
2010 | usb_otg_state_string(musb->xceiv->state)); | 2012 | usb_otg_state_string(musb->xceiv->state)); |
2011 | } | 2013 | } |
2012 | } | 2014 | } |
2013 | 2015 | ||
2014 | /* Called during SRP */ | 2016 | /* Called during SRP */ |
2015 | void musb_g_wakeup(struct musb *musb) | 2017 | void musb_g_wakeup(struct musb *musb) |
2016 | { | 2018 | { |
2017 | musb_gadget_wakeup(&musb->g); | 2019 | musb_gadget_wakeup(&musb->g); |
2018 | } | 2020 | } |
2019 | 2021 | ||
2020 | /* called when VBUS drops below session threshold, and in other cases */ | 2022 | /* called when VBUS drops below session threshold, and in other cases */ |
2021 | void musb_g_disconnect(struct musb *musb) | 2023 | void musb_g_disconnect(struct musb *musb) |
2022 | { | 2024 | { |
2023 | void __iomem *mregs = musb->mregs; | 2025 | void __iomem *mregs = musb->mregs; |
2024 | u8 devctl = musb_readb(mregs, MUSB_DEVCTL); | 2026 | u8 devctl = musb_readb(mregs, MUSB_DEVCTL); |
2025 | 2027 | ||
2026 | dev_dbg(musb->controller, "devctl %02x\n", devctl); | 2028 | dev_dbg(musb->controller, "devctl %02x\n", devctl); |
2027 | 2029 | ||
2028 | /* clear HR */ | 2030 | /* clear HR */ |
2029 | musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION); | 2031 | musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION); |
2030 | 2032 | ||
2031 | /* don't draw vbus until new b-default session */ | 2033 | /* don't draw vbus until new b-default session */ |
2032 | (void) musb_gadget_vbus_draw(&musb->g, 0); | 2034 | (void) musb_gadget_vbus_draw(&musb->g, 0); |
2033 | 2035 | ||
2034 | musb->g.speed = USB_SPEED_UNKNOWN; | 2036 | musb->g.speed = USB_SPEED_UNKNOWN; |
2035 | if (musb->gadget_driver && musb->gadget_driver->disconnect) { | 2037 | if (musb->gadget_driver && musb->gadget_driver->disconnect) { |
2036 | spin_unlock(&musb->lock); | 2038 | spin_unlock(&musb->lock); |
2037 | musb->gadget_driver->disconnect(&musb->g); | 2039 | musb->gadget_driver->disconnect(&musb->g); |
2038 | spin_lock(&musb->lock); | 2040 | spin_lock(&musb->lock); |
2039 | } | 2041 | } |
2040 | 2042 | ||
2041 | switch (musb->xceiv->state) { | 2043 | switch (musb->xceiv->state) { |
2042 | default: | 2044 | default: |
2043 | dev_dbg(musb->controller, "Unhandled disconnect %s, setting a_idle\n", | 2045 | dev_dbg(musb->controller, "Unhandled disconnect %s, setting a_idle\n", |
2044 | usb_otg_state_string(musb->xceiv->state)); | 2046 | usb_otg_state_string(musb->xceiv->state)); |
2045 | musb->xceiv->state = OTG_STATE_A_IDLE; | 2047 | musb->xceiv->state = OTG_STATE_A_IDLE; |
2046 | MUSB_HST_MODE(musb); | 2048 | MUSB_HST_MODE(musb); |
2047 | break; | 2049 | break; |
2048 | case OTG_STATE_A_PERIPHERAL: | 2050 | case OTG_STATE_A_PERIPHERAL: |
2049 | musb->xceiv->state = OTG_STATE_A_WAIT_BCON; | 2051 | musb->xceiv->state = OTG_STATE_A_WAIT_BCON; |
2050 | MUSB_HST_MODE(musb); | 2052 | MUSB_HST_MODE(musb); |
2051 | break; | 2053 | break; |
2052 | case OTG_STATE_B_WAIT_ACON: | 2054 | case OTG_STATE_B_WAIT_ACON: |
2053 | case OTG_STATE_B_HOST: | 2055 | case OTG_STATE_B_HOST: |
2054 | case OTG_STATE_B_PERIPHERAL: | 2056 | case OTG_STATE_B_PERIPHERAL: |
2055 | case OTG_STATE_B_IDLE: | 2057 | case OTG_STATE_B_IDLE: |
2056 | musb->xceiv->state = OTG_STATE_B_IDLE; | 2058 | musb->xceiv->state = OTG_STATE_B_IDLE; |
2057 | break; | 2059 | break; |
2058 | case OTG_STATE_B_SRP_INIT: | 2060 | case OTG_STATE_B_SRP_INIT: |
2059 | break; | 2061 | break; |
2060 | } | 2062 | } |
2061 | 2063 | ||
2062 | musb->is_active = 0; | 2064 | musb->is_active = 0; |
2063 | } | 2065 | } |
2064 | 2066 | ||
2065 | void musb_g_reset(struct musb *musb) | 2067 | void musb_g_reset(struct musb *musb) |
2066 | __releases(musb->lock) | 2068 | __releases(musb->lock) |
2067 | __acquires(musb->lock) | 2069 | __acquires(musb->lock) |
2068 | { | 2070 | { |
2069 | void __iomem *mbase = musb->mregs; | 2071 | void __iomem *mbase = musb->mregs; |
2070 | u8 devctl = musb_readb(mbase, MUSB_DEVCTL); | 2072 | u8 devctl = musb_readb(mbase, MUSB_DEVCTL); |
2071 | u8 power; | 2073 | u8 power; |
2072 | 2074 | ||
2073 | dev_dbg(musb->controller, "<== %s driver '%s'\n", | 2075 | dev_dbg(musb->controller, "<== %s driver '%s'\n", |
2074 | (devctl & MUSB_DEVCTL_BDEVICE) | 2076 | (devctl & MUSB_DEVCTL_BDEVICE) |
2075 | ? "B-Device" : "A-Device", | 2077 | ? "B-Device" : "A-Device", |
2076 | musb->gadget_driver | 2078 | musb->gadget_driver |
2077 | ? musb->gadget_driver->driver.name | 2079 | ? musb->gadget_driver->driver.name |
2078 | : NULL | 2080 | : NULL |
2079 | ); | 2081 | ); |
2080 | 2082 | ||
2081 | /* report disconnect, if we didn't already (flushing EP state) */ | 2083 | /* report disconnect, if we didn't already (flushing EP state) */ |
2082 | if (musb->g.speed != USB_SPEED_UNKNOWN) | 2084 | if (musb->g.speed != USB_SPEED_UNKNOWN) |
2083 | musb_g_disconnect(musb); | 2085 | musb_g_disconnect(musb); |
2084 | 2086 | ||
2085 | /* clear HR */ | 2087 | /* clear HR */ |
2086 | else if (devctl & MUSB_DEVCTL_HR) | 2088 | else if (devctl & MUSB_DEVCTL_HR) |
2087 | musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION); | 2089 | musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION); |
2088 | 2090 | ||
2089 | 2091 | ||
2090 | /* what speed did we negotiate? */ | 2092 | /* what speed did we negotiate? */ |
2091 | power = musb_readb(mbase, MUSB_POWER); | 2093 | power = musb_readb(mbase, MUSB_POWER); |
2092 | musb->g.speed = (power & MUSB_POWER_HSMODE) | 2094 | musb->g.speed = (power & MUSB_POWER_HSMODE) |
2093 | ? USB_SPEED_HIGH : USB_SPEED_FULL; | 2095 | ? USB_SPEED_HIGH : USB_SPEED_FULL; |
2094 | 2096 | ||
2095 | /* start in USB_STATE_DEFAULT */ | 2097 | /* start in USB_STATE_DEFAULT */ |
2096 | musb->is_active = 1; | 2098 | musb->is_active = 1; |
2097 | musb->is_suspended = 0; | 2099 | musb->is_suspended = 0; |
2098 | MUSB_DEV_MODE(musb); | 2100 | MUSB_DEV_MODE(musb); |
2099 | musb->address = 0; | 2101 | musb->address = 0; |
2100 | musb->ep0_state = MUSB_EP0_STAGE_SETUP; | 2102 | musb->ep0_state = MUSB_EP0_STAGE_SETUP; |
2101 | 2103 | ||
2102 | musb->may_wakeup = 0; | 2104 | musb->may_wakeup = 0; |
2103 | musb->g.b_hnp_enable = 0; | 2105 | musb->g.b_hnp_enable = 0; |
2104 | musb->g.a_alt_hnp_support = 0; | 2106 | musb->g.a_alt_hnp_support = 0; |
2105 | musb->g.a_hnp_support = 0; | 2107 | musb->g.a_hnp_support = 0; |
2106 | 2108 | ||
2107 | /* Normal reset, as B-Device; | 2109 | /* Normal reset, as B-Device; |
2108 | * or else after HNP, as A-Device | 2110 | * or else after HNP, as A-Device |
2109 | */ | 2111 | */ |
2110 | if (devctl & MUSB_DEVCTL_BDEVICE) { | 2112 | if (devctl & MUSB_DEVCTL_BDEVICE) { |
2111 | musb->xceiv->state = OTG_STATE_B_PERIPHERAL; | 2113 | musb->xceiv->state = OTG_STATE_B_PERIPHERAL; |
2112 | musb->g.is_a_peripheral = 0; | 2114 | musb->g.is_a_peripheral = 0; |
2113 | } else { | 2115 | } else { |
2114 | musb->xceiv->state = OTG_STATE_A_PERIPHERAL; | 2116 | musb->xceiv->state = OTG_STATE_A_PERIPHERAL; |
2115 | musb->g.is_a_peripheral = 1; | 2117 | musb->g.is_a_peripheral = 1; |
2116 | } | 2118 | } |
2117 | 2119 | ||
2118 | /* start with default limits on VBUS power draw */ | 2120 | /* start with default limits on VBUS power draw */ |
2119 | (void) musb_gadget_vbus_draw(&musb->g, 8); | 2121 | (void) musb_gadget_vbus_draw(&musb->g, 8); |
2120 | } | 2122 | } |
2121 | 2123 |
drivers/usb/musb/musb_host.c
1 | /* | 1 | /* |
2 | * MUSB OTG driver host support | 2 | * MUSB OTG driver host support |
3 | * | 3 | * |
4 | * Copyright 2005 Mentor Graphics Corporation | 4 | * Copyright 2005 Mentor Graphics Corporation |
5 | * Copyright (C) 2005-2006 by Texas Instruments | 5 | * Copyright (C) 2005-2006 by Texas Instruments |
6 | * Copyright (C) 2006-2007 Nokia Corporation | 6 | * Copyright (C) 2006-2007 Nokia Corporation |
7 | * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com> | 7 | * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com> |
8 | * | 8 | * |
9 | * This program is free software; you can redistribute it and/or | 9 | * This program is free software; you can redistribute it and/or |
10 | * modify it under the terms of the GNU General Public License | 10 | * modify it under the terms of the GNU General Public License |
11 | * version 2 as published by the Free Software Foundation. | 11 | * version 2 as published by the Free Software Foundation. |
12 | * | 12 | * |
13 | * This program is distributed in the hope that it will be useful, but | 13 | * This program is distributed in the hope that it will be useful, but |
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | 14 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
16 | * General Public License for more details. | 16 | * General Public License for more details. |
17 | * | 17 | * |
18 | * You should have received a copy of the GNU General Public License | 18 | * You should have received a copy of the GNU General Public License |
19 | * along with this program; if not, write to the Free Software | 19 | * along with this program; if not, write to the Free Software |
20 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | 20 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA |
21 | * 02110-1301 USA | 21 | * 02110-1301 USA |
22 | * | 22 | * |
23 | * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED | 23 | * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED |
24 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF | 24 | * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF |
25 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN | 25 | * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN |
26 | * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, | 26 | * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, |
27 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT | 27 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
28 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF | 28 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF |
29 | * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON | 29 | * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON |
30 | * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 30 | * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
31 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | 31 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
32 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 32 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
33 | * | 33 | * |
34 | */ | 34 | */ |
35 | 35 | ||
36 | #include <linux/module.h> | 36 | #include <linux/module.h> |
37 | #include <linux/kernel.h> | 37 | #include <linux/kernel.h> |
38 | #include <linux/delay.h> | 38 | #include <linux/delay.h> |
39 | #include <linux/sched.h> | 39 | #include <linux/sched.h> |
40 | #include <linux/slab.h> | 40 | #include <linux/slab.h> |
41 | #include <linux/errno.h> | 41 | #include <linux/errno.h> |
42 | #include <linux/init.h> | 42 | #include <linux/init.h> |
43 | #include <linux/list.h> | 43 | #include <linux/list.h> |
44 | #include <linux/dma-mapping.h> | 44 | #include <linux/dma-mapping.h> |
45 | 45 | ||
46 | #include "musb_core.h" | 46 | #include "musb_core.h" |
47 | #include "musb_host.h" | 47 | #include "musb_host.h" |
48 | 48 | ||
49 | /* MUSB HOST status 22-mar-2006 | 49 | /* MUSB HOST status 22-mar-2006 |
50 | * | 50 | * |
51 | * - There's still lots of partial code duplication for fault paths, so | 51 | * - There's still lots of partial code duplication for fault paths, so |
52 | * they aren't handled as consistently as they need to be. | 52 | * they aren't handled as consistently as they need to be. |
53 | * | 53 | * |
54 | * - PIO mostly behaved when last tested. | 54 | * - PIO mostly behaved when last tested. |
55 | * + including ep0, with all usbtest cases 9, 10 | 55 | * + including ep0, with all usbtest cases 9, 10 |
56 | * + usbtest 14 (ep0out) doesn't seem to run at all | 56 | * + usbtest 14 (ep0out) doesn't seem to run at all |
57 | * + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest | 57 | * + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest |
58 | * configurations, but otherwise double buffering passes basic tests. | 58 | * configurations, but otherwise double buffering passes basic tests. |
59 | * + for 2.6.N, for N > ~10, needs API changes for hcd framework. | 59 | * + for 2.6.N, for N > ~10, needs API changes for hcd framework. |
60 | * | 60 | * |
61 | * - DMA (CPPI) ... partially behaves, not currently recommended | 61 | * - DMA (CPPI) ... partially behaves, not currently recommended |
62 | * + about 1/15 the speed of typical EHCI implementations (PCI) | 62 | * + about 1/15 the speed of typical EHCI implementations (PCI) |
63 | * + RX, all too often reqpkt seems to misbehave after tx | 63 | * + RX, all too often reqpkt seems to misbehave after tx |
64 | * + TX, no known issues (other than evident silicon issue) | 64 | * + TX, no known issues (other than evident silicon issue) |
65 | * | 65 | * |
66 | * - DMA (Mentor/OMAP) ...has at least toggle update problems | 66 | * - DMA (Mentor/OMAP) ...has at least toggle update problems |
67 | * | 67 | * |
68 | * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet | 68 | * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet |
69 | * starvation ... nothing yet for TX, interrupt, or bulk. | 69 | * starvation ... nothing yet for TX, interrupt, or bulk. |
70 | * | 70 | * |
71 | * - Not tested with HNP, but some SRP paths seem to behave. | 71 | * - Not tested with HNP, but some SRP paths seem to behave. |
72 | * | 72 | * |
73 | * NOTE 24-August-2006: | 73 | * NOTE 24-August-2006: |
74 | * | 74 | * |
75 | * - Bulk traffic finally uses both sides of hardware ep1, freeing up an | 75 | * - Bulk traffic finally uses both sides of hardware ep1, freeing up an |
76 | * extra endpoint for periodic use enabling hub + keybd + mouse. That | 76 | * extra endpoint for periodic use enabling hub + keybd + mouse. That |
77 | * mostly works, except that with "usbnet" it's easy to trigger cases | 77 | * mostly works, except that with "usbnet" it's easy to trigger cases |
78 | * with "ping" where RX loses. (a) ping to davinci, even "ping -f", | 78 | * with "ping" where RX loses. (a) ping to davinci, even "ping -f", |
79 | * fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses | 79 | * fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses |
80 | * although ARP RX wins. (That test was done with a full speed link.) | 80 | * although ARP RX wins. (That test was done with a full speed link.) |
81 | */ | 81 | */ |
82 | 82 | ||
83 | 83 | ||
84 | /* | 84 | /* |
85 | * NOTE on endpoint usage: | 85 | * NOTE on endpoint usage: |
86 | * | 86 | * |
87 | * CONTROL transfers all go through ep0. BULK ones go through dedicated IN | 87 | * CONTROL transfers all go through ep0. BULK ones go through dedicated IN |
88 | * and OUT endpoints ... hardware is dedicated for those "async" queue(s). | 88 | * and OUT endpoints ... hardware is dedicated for those "async" queue(s). |
89 | * (Yes, bulk _could_ use more of the endpoints than that, and would even | 89 | * (Yes, bulk _could_ use more of the endpoints than that, and would even |
90 | * benefit from it.) | 90 | * benefit from it.) |
91 | * | 91 | * |
92 | * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints. | 92 | * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints. |
93 | * So far that scheduling is both dumb and optimistic: the endpoint will be | 93 | * So far that scheduling is both dumb and optimistic: the endpoint will be |
94 | * "claimed" until its software queue is no longer refilled. No multiplexing | 94 | * "claimed" until its software queue is no longer refilled. No multiplexing |
95 | * of transfers between endpoints, or anything clever. | 95 | * of transfers between endpoints, or anything clever. |
96 | */ | 96 | */ |
97 | 97 | ||
98 | struct musb *hcd_to_musb(struct usb_hcd *hcd) | 98 | struct musb *hcd_to_musb(struct usb_hcd *hcd) |
99 | { | 99 | { |
100 | return *(struct musb **) hcd->hcd_priv; | 100 | return *(struct musb **) hcd->hcd_priv; |
101 | } | 101 | } |
102 | 102 | ||
103 | 103 | ||
104 | static void musb_ep_program(struct musb *musb, u8 epnum, | 104 | static void musb_ep_program(struct musb *musb, u8 epnum, |
105 | struct urb *urb, int is_out, | 105 | struct urb *urb, int is_out, |
106 | u8 *buf, u32 offset, u32 len); | 106 | u8 *buf, u32 offset, u32 len); |
107 | 107 | ||
108 | /* | 108 | /* |
109 | * Clear TX fifo. Needed to avoid BABBLE errors. | 109 | * Clear TX fifo. Needed to avoid BABBLE errors. |
110 | */ | 110 | */ |
111 | static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep) | 111 | static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep) |
112 | { | 112 | { |
113 | struct musb *musb = ep->musb; | 113 | struct musb *musb = ep->musb; |
114 | void __iomem *epio = ep->regs; | 114 | void __iomem *epio = ep->regs; |
115 | u16 csr; | 115 | u16 csr; |
116 | u16 lastcsr = 0; | 116 | u16 lastcsr = 0; |
117 | int retries = 1000; | 117 | int retries = 1000; |
118 | 118 | ||
119 | csr = musb_readw(epio, MUSB_TXCSR); | 119 | csr = musb_readw(epio, MUSB_TXCSR); |
120 | while (csr & MUSB_TXCSR_FIFONOTEMPTY) { | 120 | while (csr & MUSB_TXCSR_FIFONOTEMPTY) { |
121 | if (csr != lastcsr) | 121 | if (csr != lastcsr) |
122 | dev_dbg(musb->controller, "Host TX FIFONOTEMPTY csr: %02x\n", csr); | 122 | dev_dbg(musb->controller, "Host TX FIFONOTEMPTY csr: %02x\n", csr); |
123 | lastcsr = csr; | 123 | lastcsr = csr; |
124 | csr |= MUSB_TXCSR_FLUSHFIFO; | 124 | csr |= MUSB_TXCSR_FLUSHFIFO; |
125 | musb_writew(epio, MUSB_TXCSR, csr); | 125 | musb_writew(epio, MUSB_TXCSR, csr); |
126 | csr = musb_readw(epio, MUSB_TXCSR); | 126 | csr = musb_readw(epio, MUSB_TXCSR); |
127 | if (WARN(retries-- < 1, | 127 | if (WARN(retries-- < 1, |
128 | "Could not flush host TX%d fifo: csr: %04x\n", | 128 | "Could not flush host TX%d fifo: csr: %04x\n", |
129 | ep->epnum, csr)) | 129 | ep->epnum, csr)) |
130 | return; | 130 | return; |
131 | mdelay(1); | 131 | mdelay(1); |
132 | } | 132 | } |
133 | } | 133 | } |
134 | 134 | ||
135 | static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep) | 135 | static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep) |
136 | { | 136 | { |
137 | void __iomem *epio = ep->regs; | 137 | void __iomem *epio = ep->regs; |
138 | u16 csr; | 138 | u16 csr; |
139 | int retries = 5; | 139 | int retries = 5; |
140 | 140 | ||
141 | /* scrub any data left in the fifo */ | 141 | /* scrub any data left in the fifo */ |
142 | do { | 142 | do { |
143 | csr = musb_readw(epio, MUSB_TXCSR); | 143 | csr = musb_readw(epio, MUSB_TXCSR); |
144 | if (!(csr & (MUSB_CSR0_TXPKTRDY | MUSB_CSR0_RXPKTRDY))) | 144 | if (!(csr & (MUSB_CSR0_TXPKTRDY | MUSB_CSR0_RXPKTRDY))) |
145 | break; | 145 | break; |
146 | musb_writew(epio, MUSB_TXCSR, MUSB_CSR0_FLUSHFIFO); | 146 | musb_writew(epio, MUSB_TXCSR, MUSB_CSR0_FLUSHFIFO); |
147 | csr = musb_readw(epio, MUSB_TXCSR); | 147 | csr = musb_readw(epio, MUSB_TXCSR); |
148 | udelay(10); | 148 | udelay(10); |
149 | } while (--retries); | 149 | } while (--retries); |
150 | 150 | ||
151 | WARN(!retries, "Could not flush host TX%d fifo: csr: %04x\n", | 151 | WARN(!retries, "Could not flush host TX%d fifo: csr: %04x\n", |
152 | ep->epnum, csr); | 152 | ep->epnum, csr); |
153 | 153 | ||
154 | /* and reset for the next transfer */ | 154 | /* and reset for the next transfer */ |
155 | musb_writew(epio, MUSB_TXCSR, 0); | 155 | musb_writew(epio, MUSB_TXCSR, 0); |
156 | } | 156 | } |
157 | 157 | ||
158 | /* | 158 | /* |
159 | * Start transmit. Caller is responsible for locking shared resources. | 159 | * Start transmit. Caller is responsible for locking shared resources. |
160 | * musb must be locked. | 160 | * musb must be locked. |
161 | */ | 161 | */ |
162 | static inline void musb_h_tx_start(struct musb_hw_ep *ep) | 162 | static inline void musb_h_tx_start(struct musb_hw_ep *ep) |
163 | { | 163 | { |
164 | u16 txcsr; | 164 | u16 txcsr; |
165 | 165 | ||
166 | /* NOTE: no locks here; caller should lock and select EP */ | 166 | /* NOTE: no locks here; caller should lock and select EP */ |
167 | if (ep->epnum) { | 167 | if (ep->epnum) { |
168 | txcsr = musb_readw(ep->regs, MUSB_TXCSR); | 168 | txcsr = musb_readw(ep->regs, MUSB_TXCSR); |
169 | txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS; | 169 | txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS; |
170 | musb_writew(ep->regs, MUSB_TXCSR, txcsr); | 170 | musb_writew(ep->regs, MUSB_TXCSR, txcsr); |
171 | } else { | 171 | } else { |
172 | txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY; | 172 | txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY; |
173 | musb_writew(ep->regs, MUSB_CSR0, txcsr); | 173 | musb_writew(ep->regs, MUSB_CSR0, txcsr); |
174 | } | 174 | } |
175 | 175 | ||
176 | } | 176 | } |
177 | 177 | ||
178 | static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep) | 178 | static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep) |
179 | { | 179 | { |
180 | u16 txcsr; | 180 | u16 txcsr; |
181 | 181 | ||
182 | /* NOTE: no locks here; caller should lock and select EP */ | 182 | /* NOTE: no locks here; caller should lock and select EP */ |
183 | txcsr = musb_readw(ep->regs, MUSB_TXCSR); | 183 | txcsr = musb_readw(ep->regs, MUSB_TXCSR); |
184 | txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS; | 184 | txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS; |
185 | if (is_cppi_enabled()) | 185 | if (is_cppi_enabled()) |
186 | txcsr |= MUSB_TXCSR_DMAMODE; | 186 | txcsr |= MUSB_TXCSR_DMAMODE; |
187 | musb_writew(ep->regs, MUSB_TXCSR, txcsr); | 187 | musb_writew(ep->regs, MUSB_TXCSR, txcsr); |
188 | } | 188 | } |
189 | 189 | ||
190 | static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh) | 190 | static void musb_ep_set_qh(struct musb_hw_ep *ep, int is_in, struct musb_qh *qh) |
191 | { | 191 | { |
192 | if (is_in != 0 || ep->is_shared_fifo) | 192 | if (is_in != 0 || ep->is_shared_fifo) |
193 | ep->in_qh = qh; | 193 | ep->in_qh = qh; |
194 | if (is_in == 0 || ep->is_shared_fifo) | 194 | if (is_in == 0 || ep->is_shared_fifo) |
195 | ep->out_qh = qh; | 195 | ep->out_qh = qh; |
196 | } | 196 | } |
197 | 197 | ||
198 | static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in) | 198 | static struct musb_qh *musb_ep_get_qh(struct musb_hw_ep *ep, int is_in) |
199 | { | 199 | { |
200 | return is_in ? ep->in_qh : ep->out_qh; | 200 | return is_in ? ep->in_qh : ep->out_qh; |
201 | } | 201 | } |
202 | 202 | ||
203 | /* | 203 | /* |
204 | * Start the URB at the front of an endpoint's queue | 204 | * Start the URB at the front of an endpoint's queue |
205 | * end must be claimed from the caller. | 205 | * end must be claimed from the caller. |
206 | * | 206 | * |
207 | * Context: controller locked, irqs blocked | 207 | * Context: controller locked, irqs blocked |
208 | */ | 208 | */ |
209 | static void | 209 | static void |
210 | musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh) | 210 | musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh) |
211 | { | 211 | { |
212 | u16 frame; | 212 | u16 frame; |
213 | u32 len; | 213 | u32 len; |
214 | void __iomem *mbase = musb->mregs; | 214 | void __iomem *mbase = musb->mregs; |
215 | struct urb *urb = next_urb(qh); | 215 | struct urb *urb = next_urb(qh); |
216 | void *buf = urb->transfer_buffer; | 216 | void *buf = urb->transfer_buffer; |
217 | u32 offset = 0; | 217 | u32 offset = 0; |
218 | struct musb_hw_ep *hw_ep = qh->hw_ep; | 218 | struct musb_hw_ep *hw_ep = qh->hw_ep; |
219 | unsigned pipe = urb->pipe; | 219 | unsigned pipe = urb->pipe; |
220 | u8 address = usb_pipedevice(pipe); | 220 | u8 address = usb_pipedevice(pipe); |
221 | int epnum = hw_ep->epnum; | 221 | int epnum = hw_ep->epnum; |
222 | 222 | ||
223 | /* initialize software qh state */ | 223 | /* initialize software qh state */ |
224 | qh->offset = 0; | 224 | qh->offset = 0; |
225 | qh->segsize = 0; | 225 | qh->segsize = 0; |
226 | 226 | ||
227 | /* gather right source of data */ | 227 | /* gather right source of data */ |
228 | switch (qh->type) { | 228 | switch (qh->type) { |
229 | case USB_ENDPOINT_XFER_CONTROL: | 229 | case USB_ENDPOINT_XFER_CONTROL: |
230 | /* control transfers always start with SETUP */ | 230 | /* control transfers always start with SETUP */ |
231 | is_in = 0; | 231 | is_in = 0; |
232 | musb->ep0_stage = MUSB_EP0_START; | 232 | musb->ep0_stage = MUSB_EP0_START; |
233 | buf = urb->setup_packet; | 233 | buf = urb->setup_packet; |
234 | len = 8; | 234 | len = 8; |
235 | break; | 235 | break; |
236 | case USB_ENDPOINT_XFER_ISOC: | 236 | case USB_ENDPOINT_XFER_ISOC: |
237 | qh->iso_idx = 0; | 237 | qh->iso_idx = 0; |
238 | qh->frame = 0; | 238 | qh->frame = 0; |
239 | offset = urb->iso_frame_desc[0].offset; | 239 | offset = urb->iso_frame_desc[0].offset; |
240 | len = urb->iso_frame_desc[0].length; | 240 | len = urb->iso_frame_desc[0].length; |
241 | break; | 241 | break; |
242 | default: /* bulk, interrupt */ | 242 | default: /* bulk, interrupt */ |
243 | /* actual_length may be nonzero on retry paths */ | 243 | /* actual_length may be nonzero on retry paths */ |
244 | buf = urb->transfer_buffer + urb->actual_length; | 244 | buf = urb->transfer_buffer + urb->actual_length; |
245 | len = urb->transfer_buffer_length - urb->actual_length; | 245 | len = urb->transfer_buffer_length - urb->actual_length; |
246 | } | 246 | } |
247 | 247 | ||
248 | dev_dbg(musb->controller, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n", | 248 | dev_dbg(musb->controller, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n", |
249 | qh, urb, address, qh->epnum, | 249 | qh, urb, address, qh->epnum, |
250 | is_in ? "in" : "out", | 250 | is_in ? "in" : "out", |
251 | ({char *s; switch (qh->type) { | 251 | ({char *s; switch (qh->type) { |
252 | case USB_ENDPOINT_XFER_CONTROL: s = ""; break; | 252 | case USB_ENDPOINT_XFER_CONTROL: s = ""; break; |
253 | case USB_ENDPOINT_XFER_BULK: s = "-bulk"; break; | 253 | case USB_ENDPOINT_XFER_BULK: s = "-bulk"; break; |
254 | case USB_ENDPOINT_XFER_ISOC: s = "-iso"; break; | 254 | case USB_ENDPOINT_XFER_ISOC: s = "-iso"; break; |
255 | default: s = "-intr"; break; | 255 | default: s = "-intr"; break; |
256 | }; s; }), | 256 | }; s; }), |
257 | epnum, buf + offset, len); | 257 | epnum, buf + offset, len); |
258 | 258 | ||
259 | /* Configure endpoint */ | 259 | /* Configure endpoint */ |
260 | musb_ep_set_qh(hw_ep, is_in, qh); | 260 | musb_ep_set_qh(hw_ep, is_in, qh); |
261 | musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len); | 261 | musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len); |
262 | 262 | ||
263 | /* transmit may have more work: start it when it is time */ | 263 | /* transmit may have more work: start it when it is time */ |
264 | if (is_in) | 264 | if (is_in) |
265 | return; | 265 | return; |
266 | 266 | ||
267 | /* determine if the time is right for a periodic transfer */ | 267 | /* determine if the time is right for a periodic transfer */ |
268 | switch (qh->type) { | 268 | switch (qh->type) { |
269 | case USB_ENDPOINT_XFER_ISOC: | 269 | case USB_ENDPOINT_XFER_ISOC: |
270 | case USB_ENDPOINT_XFER_INT: | 270 | case USB_ENDPOINT_XFER_INT: |
271 | dev_dbg(musb->controller, "check whether there's still time for periodic Tx\n"); | 271 | dev_dbg(musb->controller, "check whether there's still time for periodic Tx\n"); |
272 | frame = musb_readw(mbase, MUSB_FRAME); | 272 | frame = musb_readw(mbase, MUSB_FRAME); |
273 | /* FIXME this doesn't implement that scheduling policy ... | 273 | /* FIXME this doesn't implement that scheduling policy ... |
274 | * or handle framecounter wrapping | 274 | * or handle framecounter wrapping |
275 | */ | 275 | */ |
276 | if (1) { /* Always assume URB_ISO_ASAP */ | 276 | if (1) { /* Always assume URB_ISO_ASAP */ |
277 | /* REVISIT the SOF irq handler shouldn't duplicate | 277 | /* REVISIT the SOF irq handler shouldn't duplicate |
278 | * this code; and we don't init urb->start_frame... | 278 | * this code; and we don't init urb->start_frame... |
279 | */ | 279 | */ |
280 | qh->frame = 0; | 280 | qh->frame = 0; |
281 | goto start; | 281 | goto start; |
282 | } else { | 282 | } else { |
283 | qh->frame = urb->start_frame; | 283 | qh->frame = urb->start_frame; |
284 | /* enable SOF interrupt so we can count down */ | 284 | /* enable SOF interrupt so we can count down */ |
285 | dev_dbg(musb->controller, "SOF for %d\n", epnum); | 285 | dev_dbg(musb->controller, "SOF for %d\n", epnum); |
286 | #if 1 /* ifndef CONFIG_ARCH_DAVINCI */ | 286 | #if 1 /* ifndef CONFIG_ARCH_DAVINCI */ |
287 | musb_writeb(mbase, MUSB_INTRUSBE, 0xff); | 287 | musb_writeb(mbase, MUSB_INTRUSBE, 0xff); |
288 | #endif | 288 | #endif |
289 | } | 289 | } |
290 | break; | 290 | break; |
291 | default: | 291 | default: |
292 | start: | 292 | start: |
293 | dev_dbg(musb->controller, "Start TX%d %s\n", epnum, | 293 | dev_dbg(musb->controller, "Start TX%d %s\n", epnum, |
294 | hw_ep->tx_channel ? "dma" : "pio"); | 294 | hw_ep->tx_channel ? "dma" : "pio"); |
295 | 295 | ||
296 | if (!hw_ep->tx_channel) | 296 | if (!hw_ep->tx_channel) |
297 | musb_h_tx_start(hw_ep); | 297 | musb_h_tx_start(hw_ep); |
298 | else if (is_cppi_enabled() || tusb_dma_omap()) | 298 | else if (is_cppi_enabled() || tusb_dma_omap()) |
299 | musb_h_tx_dma_start(hw_ep); | 299 | musb_h_tx_dma_start(hw_ep); |
300 | } | 300 | } |
301 | } | 301 | } |
302 | 302 | ||
303 | /* Context: caller owns controller lock, IRQs are blocked */ | 303 | /* Context: caller owns controller lock, IRQs are blocked */ |
304 | static void musb_giveback(struct musb *musb, struct urb *urb, int status) | 304 | static void musb_giveback(struct musb *musb, struct urb *urb, int status) |
305 | __releases(musb->lock) | 305 | __releases(musb->lock) |
306 | __acquires(musb->lock) | 306 | __acquires(musb->lock) |
307 | { | 307 | { |
308 | dev_dbg(musb->controller, | 308 | dev_dbg(musb->controller, |
309 | "complete %p %pF (%d), dev%d ep%d%s, %d/%d\n", | 309 | "complete %p %pF (%d), dev%d ep%d%s, %d/%d\n", |
310 | urb, urb->complete, status, | 310 | urb, urb->complete, status, |
311 | usb_pipedevice(urb->pipe), | 311 | usb_pipedevice(urb->pipe), |
312 | usb_pipeendpoint(urb->pipe), | 312 | usb_pipeendpoint(urb->pipe), |
313 | usb_pipein(urb->pipe) ? "in" : "out", | 313 | usb_pipein(urb->pipe) ? "in" : "out", |
314 | urb->actual_length, urb->transfer_buffer_length | 314 | urb->actual_length, urb->transfer_buffer_length |
315 | ); | 315 | ); |
316 | 316 | ||
317 | usb_hcd_unlink_urb_from_ep(musb->hcd, urb); | 317 | usb_hcd_unlink_urb_from_ep(musb->hcd, urb); |
318 | spin_unlock(&musb->lock); | 318 | spin_unlock(&musb->lock); |
319 | usb_hcd_giveback_urb(musb->hcd, urb, status); | 319 | usb_hcd_giveback_urb(musb->hcd, urb, status); |
320 | spin_lock(&musb->lock); | 320 | spin_lock(&musb->lock); |
321 | } | 321 | } |
322 | 322 | ||
323 | /* For bulk/interrupt endpoints only */ | 323 | /* For bulk/interrupt endpoints only */ |
324 | static inline void musb_save_toggle(struct musb_qh *qh, int is_in, | 324 | static inline void musb_save_toggle(struct musb_qh *qh, int is_in, |
325 | struct urb *urb) | 325 | struct urb *urb) |
326 | { | 326 | { |
327 | void __iomem *epio = qh->hw_ep->regs; | 327 | void __iomem *epio = qh->hw_ep->regs; |
328 | u16 csr; | 328 | u16 csr; |
329 | 329 | ||
330 | /* | 330 | /* |
331 | * FIXME: the current Mentor DMA code seems to have | 331 | * FIXME: the current Mentor DMA code seems to have |
332 | * problems getting toggle correct. | 332 | * problems getting toggle correct. |
333 | */ | 333 | */ |
334 | 334 | ||
335 | if (is_in) | 335 | if (is_in) |
336 | csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE; | 336 | csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE; |
337 | else | 337 | else |
338 | csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE; | 338 | csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE; |
339 | 339 | ||
340 | usb_settoggle(urb->dev, qh->epnum, !is_in, csr ? 1 : 0); | 340 | usb_settoggle(urb->dev, qh->epnum, !is_in, csr ? 1 : 0); |
341 | } | 341 | } |
342 | 342 | ||
343 | /* | 343 | /* |
344 | * Advance this hardware endpoint's queue, completing the specified URB and | 344 | * Advance this hardware endpoint's queue, completing the specified URB and |
345 | * advancing to either the next URB queued to that qh, or else invalidating | 345 | * advancing to either the next URB queued to that qh, or else invalidating |
346 | * that qh and advancing to the next qh scheduled after the current one. | 346 | * that qh and advancing to the next qh scheduled after the current one. |
347 | * | 347 | * |
348 | * Context: caller owns controller lock, IRQs are blocked | 348 | * Context: caller owns controller lock, IRQs are blocked |
349 | */ | 349 | */ |
350 | static void musb_advance_schedule(struct musb *musb, struct urb *urb, | 350 | static void musb_advance_schedule(struct musb *musb, struct urb *urb, |
351 | struct musb_hw_ep *hw_ep, int is_in) | 351 | struct musb_hw_ep *hw_ep, int is_in) |
352 | { | 352 | { |
353 | struct musb_qh *qh = musb_ep_get_qh(hw_ep, is_in); | 353 | struct musb_qh *qh = musb_ep_get_qh(hw_ep, is_in); |
354 | struct musb_hw_ep *ep = qh->hw_ep; | 354 | struct musb_hw_ep *ep = qh->hw_ep; |
355 | int ready = qh->is_ready; | 355 | int ready = qh->is_ready; |
356 | int status; | 356 | int status; |
357 | 357 | ||
358 | status = (urb->status == -EINPROGRESS) ? 0 : urb->status; | 358 | status = (urb->status == -EINPROGRESS) ? 0 : urb->status; |
359 | 359 | ||
360 | /* save toggle eagerly, for paranoia */ | 360 | /* save toggle eagerly, for paranoia */ |
361 | switch (qh->type) { | 361 | switch (qh->type) { |
362 | case USB_ENDPOINT_XFER_BULK: | 362 | case USB_ENDPOINT_XFER_BULK: |
363 | case USB_ENDPOINT_XFER_INT: | 363 | case USB_ENDPOINT_XFER_INT: |
364 | musb_save_toggle(qh, is_in, urb); | 364 | musb_save_toggle(qh, is_in, urb); |
365 | break; | 365 | break; |
366 | case USB_ENDPOINT_XFER_ISOC: | 366 | case USB_ENDPOINT_XFER_ISOC: |
367 | if (status == 0 && urb->error_count) | 367 | if (status == 0 && urb->error_count) |
368 | status = -EXDEV; | 368 | status = -EXDEV; |
369 | break; | 369 | break; |
370 | } | 370 | } |
371 | 371 | ||
372 | qh->is_ready = 0; | 372 | qh->is_ready = 0; |
373 | musb_giveback(musb, urb, status); | 373 | musb_giveback(musb, urb, status); |
374 | qh->is_ready = ready; | 374 | qh->is_ready = ready; |
375 | 375 | ||
376 | /* reclaim resources (and bandwidth) ASAP; deschedule it, and | 376 | /* reclaim resources (and bandwidth) ASAP; deschedule it, and |
377 | * invalidate qh as soon as list_empty(&hep->urb_list) | 377 | * invalidate qh as soon as list_empty(&hep->urb_list) |
378 | */ | 378 | */ |
379 | if (list_empty(&qh->hep->urb_list)) { | 379 | if (list_empty(&qh->hep->urb_list)) { |
380 | struct list_head *head; | 380 | struct list_head *head; |
381 | struct dma_controller *dma = musb->dma_controller; | 381 | struct dma_controller *dma = musb->dma_controller; |
382 | 382 | ||
383 | if (is_in) { | 383 | if (is_in) { |
384 | ep->rx_reinit = 1; | 384 | ep->rx_reinit = 1; |
385 | if (ep->rx_channel) { | 385 | if (ep->rx_channel) { |
386 | dma->channel_release(ep->rx_channel); | 386 | dma->channel_release(ep->rx_channel); |
387 | ep->rx_channel = NULL; | 387 | ep->rx_channel = NULL; |
388 | } | 388 | } |
389 | } else { | 389 | } else { |
390 | ep->tx_reinit = 1; | 390 | ep->tx_reinit = 1; |
391 | if (ep->tx_channel) { | 391 | if (ep->tx_channel) { |
392 | dma->channel_release(ep->tx_channel); | 392 | dma->channel_release(ep->tx_channel); |
393 | ep->tx_channel = NULL; | 393 | ep->tx_channel = NULL; |
394 | } | 394 | } |
395 | } | 395 | } |
396 | 396 | ||
397 | /* Clobber old pointers to this qh */ | 397 | /* Clobber old pointers to this qh */ |
398 | musb_ep_set_qh(ep, is_in, NULL); | 398 | musb_ep_set_qh(ep, is_in, NULL); |
399 | qh->hep->hcpriv = NULL; | 399 | qh->hep->hcpriv = NULL; |
400 | 400 | ||
401 | switch (qh->type) { | 401 | switch (qh->type) { |
402 | 402 | ||
403 | case USB_ENDPOINT_XFER_CONTROL: | 403 | case USB_ENDPOINT_XFER_CONTROL: |
404 | case USB_ENDPOINT_XFER_BULK: | 404 | case USB_ENDPOINT_XFER_BULK: |
405 | /* fifo policy for these lists, except that NAKing | 405 | /* fifo policy for these lists, except that NAKing |
406 | * should rotate a qh to the end (for fairness). | 406 | * should rotate a qh to the end (for fairness). |
407 | */ | 407 | */ |
408 | if (qh->mux == 1) { | 408 | if (qh->mux == 1) { |
409 | head = qh->ring.prev; | 409 | head = qh->ring.prev; |
410 | list_del(&qh->ring); | 410 | list_del(&qh->ring); |
411 | kfree(qh); | 411 | kfree(qh); |
412 | qh = first_qh(head); | 412 | qh = first_qh(head); |
413 | break; | 413 | break; |
414 | } | 414 | } |
415 | 415 | ||
416 | case USB_ENDPOINT_XFER_ISOC: | 416 | case USB_ENDPOINT_XFER_ISOC: |
417 | case USB_ENDPOINT_XFER_INT: | 417 | case USB_ENDPOINT_XFER_INT: |
418 | /* this is where periodic bandwidth should be | 418 | /* this is where periodic bandwidth should be |
419 | * de-allocated if it's tracked and allocated; | 419 | * de-allocated if it's tracked and allocated; |
420 | * and where we'd update the schedule tree... | 420 | * and where we'd update the schedule tree... |
421 | */ | 421 | */ |
422 | kfree(qh); | 422 | kfree(qh); |
423 | qh = NULL; | 423 | qh = NULL; |
424 | break; | 424 | break; |
425 | } | 425 | } |
426 | } | 426 | } |
427 | 427 | ||
428 | if (qh != NULL && qh->is_ready) { | 428 | if (qh != NULL && qh->is_ready) { |
429 | dev_dbg(musb->controller, "... next ep%d %cX urb %p\n", | 429 | dev_dbg(musb->controller, "... next ep%d %cX urb %p\n", |
430 | hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh)); | 430 | hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh)); |
431 | musb_start_urb(musb, is_in, qh); | 431 | musb_start_urb(musb, is_in, qh); |
432 | } | 432 | } |
433 | } | 433 | } |
434 | 434 | ||
435 | static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr) | 435 | static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr) |
436 | { | 436 | { |
437 | /* we don't want fifo to fill itself again; | 437 | /* we don't want fifo to fill itself again; |
438 | * ignore dma (various models), | 438 | * ignore dma (various models), |
439 | * leave toggle alone (may not have been saved yet) | 439 | * leave toggle alone (may not have been saved yet) |
440 | */ | 440 | */ |
441 | csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY; | 441 | csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY; |
442 | csr &= ~(MUSB_RXCSR_H_REQPKT | 442 | csr &= ~(MUSB_RXCSR_H_REQPKT |
443 | | MUSB_RXCSR_H_AUTOREQ | 443 | | MUSB_RXCSR_H_AUTOREQ |
444 | | MUSB_RXCSR_AUTOCLEAR); | 444 | | MUSB_RXCSR_AUTOCLEAR); |
445 | 445 | ||
446 | /* write 2x to allow double buffering */ | 446 | /* write 2x to allow double buffering */ |
447 | musb_writew(hw_ep->regs, MUSB_RXCSR, csr); | 447 | musb_writew(hw_ep->regs, MUSB_RXCSR, csr); |
448 | musb_writew(hw_ep->regs, MUSB_RXCSR, csr); | 448 | musb_writew(hw_ep->regs, MUSB_RXCSR, csr); |
449 | 449 | ||
450 | /* flush writebuffer */ | 450 | /* flush writebuffer */ |
451 | return musb_readw(hw_ep->regs, MUSB_RXCSR); | 451 | return musb_readw(hw_ep->regs, MUSB_RXCSR); |
452 | } | 452 | } |
453 | 453 | ||
454 | /* | 454 | /* |
455 | * PIO RX for a packet (or part of it). | 455 | * PIO RX for a packet (or part of it). |
456 | */ | 456 | */ |
457 | static bool | 457 | static bool |
458 | musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err) | 458 | musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err) |
459 | { | 459 | { |
460 | u16 rx_count; | 460 | u16 rx_count; |
461 | u8 *buf; | 461 | u8 *buf; |
462 | u16 csr; | 462 | u16 csr; |
463 | bool done = false; | 463 | bool done = false; |
464 | u32 length; | 464 | u32 length; |
465 | int do_flush = 0; | 465 | int do_flush = 0; |
466 | struct musb_hw_ep *hw_ep = musb->endpoints + epnum; | 466 | struct musb_hw_ep *hw_ep = musb->endpoints + epnum; |
467 | void __iomem *epio = hw_ep->regs; | 467 | void __iomem *epio = hw_ep->regs; |
468 | struct musb_qh *qh = hw_ep->in_qh; | 468 | struct musb_qh *qh = hw_ep->in_qh; |
469 | int pipe = urb->pipe; | 469 | int pipe = urb->pipe; |
470 | void *buffer = urb->transfer_buffer; | 470 | void *buffer = urb->transfer_buffer; |
471 | 471 | ||
472 | /* musb_ep_select(mbase, epnum); */ | 472 | /* musb_ep_select(mbase, epnum); */ |
473 | rx_count = musb_readw(epio, MUSB_RXCOUNT); | 473 | rx_count = musb_readw(epio, MUSB_RXCOUNT); |
474 | dev_dbg(musb->controller, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count, | 474 | dev_dbg(musb->controller, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count, |
475 | urb->transfer_buffer, qh->offset, | 475 | urb->transfer_buffer, qh->offset, |
476 | urb->transfer_buffer_length); | 476 | urb->transfer_buffer_length); |
477 | 477 | ||
478 | /* unload FIFO */ | 478 | /* unload FIFO */ |
479 | if (usb_pipeisoc(pipe)) { | 479 | if (usb_pipeisoc(pipe)) { |
480 | int status = 0; | 480 | int status = 0; |
481 | struct usb_iso_packet_descriptor *d; | 481 | struct usb_iso_packet_descriptor *d; |
482 | 482 | ||
483 | if (iso_err) { | 483 | if (iso_err) { |
484 | status = -EILSEQ; | 484 | status = -EILSEQ; |
485 | urb->error_count++; | 485 | urb->error_count++; |
486 | } | 486 | } |
487 | 487 | ||
488 | d = urb->iso_frame_desc + qh->iso_idx; | 488 | d = urb->iso_frame_desc + qh->iso_idx; |
489 | buf = buffer + d->offset; | 489 | buf = buffer + d->offset; |
490 | length = d->length; | 490 | length = d->length; |
491 | if (rx_count > length) { | 491 | if (rx_count > length) { |
492 | if (status == 0) { | 492 | if (status == 0) { |
493 | status = -EOVERFLOW; | 493 | status = -EOVERFLOW; |
494 | urb->error_count++; | 494 | urb->error_count++; |
495 | } | 495 | } |
496 | dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length); | 496 | dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length); |
497 | do_flush = 1; | 497 | do_flush = 1; |
498 | } else | 498 | } else |
499 | length = rx_count; | 499 | length = rx_count; |
500 | urb->actual_length += length; | 500 | urb->actual_length += length; |
501 | d->actual_length = length; | 501 | d->actual_length = length; |
502 | 502 | ||
503 | d->status = status; | 503 | d->status = status; |
504 | 504 | ||
505 | /* see if we are done */ | 505 | /* see if we are done */ |
506 | done = (++qh->iso_idx >= urb->number_of_packets); | 506 | done = (++qh->iso_idx >= urb->number_of_packets); |
507 | } else { | 507 | } else { |
508 | /* non-isoch */ | 508 | /* non-isoch */ |
509 | buf = buffer + qh->offset; | 509 | buf = buffer + qh->offset; |
510 | length = urb->transfer_buffer_length - qh->offset; | 510 | length = urb->transfer_buffer_length - qh->offset; |
511 | if (rx_count > length) { | 511 | if (rx_count > length) { |
512 | if (urb->status == -EINPROGRESS) | 512 | if (urb->status == -EINPROGRESS) |
513 | urb->status = -EOVERFLOW; | 513 | urb->status = -EOVERFLOW; |
514 | dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length); | 514 | dev_dbg(musb->controller, "** OVERFLOW %d into %d\n", rx_count, length); |
515 | do_flush = 1; | 515 | do_flush = 1; |
516 | } else | 516 | } else |
517 | length = rx_count; | 517 | length = rx_count; |
518 | urb->actual_length += length; | 518 | urb->actual_length += length; |
519 | qh->offset += length; | 519 | qh->offset += length; |
520 | 520 | ||
521 | /* see if we are done */ | 521 | /* see if we are done */ |
522 | done = (urb->actual_length == urb->transfer_buffer_length) | 522 | done = (urb->actual_length == urb->transfer_buffer_length) |
523 | || (rx_count < qh->maxpacket) | 523 | || (rx_count < qh->maxpacket) |
524 | || (urb->status != -EINPROGRESS); | 524 | || (urb->status != -EINPROGRESS); |
525 | if (done | 525 | if (done |
526 | && (urb->status == -EINPROGRESS) | 526 | && (urb->status == -EINPROGRESS) |
527 | && (urb->transfer_flags & URB_SHORT_NOT_OK) | 527 | && (urb->transfer_flags & URB_SHORT_NOT_OK) |
528 | && (urb->actual_length | 528 | && (urb->actual_length |
529 | < urb->transfer_buffer_length)) | 529 | < urb->transfer_buffer_length)) |
530 | urb->status = -EREMOTEIO; | 530 | urb->status = -EREMOTEIO; |
531 | } | 531 | } |
532 | 532 | ||
533 | musb_read_fifo(hw_ep, length, buf); | 533 | musb_read_fifo(hw_ep, length, buf); |
534 | 534 | ||
535 | csr = musb_readw(epio, MUSB_RXCSR); | 535 | csr = musb_readw(epio, MUSB_RXCSR); |
536 | csr |= MUSB_RXCSR_H_WZC_BITS; | 536 | csr |= MUSB_RXCSR_H_WZC_BITS; |
537 | if (unlikely(do_flush)) | 537 | if (unlikely(do_flush)) |
538 | musb_h_flush_rxfifo(hw_ep, csr); | 538 | musb_h_flush_rxfifo(hw_ep, csr); |
539 | else { | 539 | else { |
540 | /* REVISIT this assumes AUTOCLEAR is never set */ | 540 | /* REVISIT this assumes AUTOCLEAR is never set */ |
541 | csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT); | 541 | csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT); |
542 | if (!done) | 542 | if (!done) |
543 | csr |= MUSB_RXCSR_H_REQPKT; | 543 | csr |= MUSB_RXCSR_H_REQPKT; |
544 | musb_writew(epio, MUSB_RXCSR, csr); | 544 | musb_writew(epio, MUSB_RXCSR, csr); |
545 | } | 545 | } |
546 | 546 | ||
547 | return done; | 547 | return done; |
548 | } | 548 | } |
549 | 549 | ||
550 | /* we don't always need to reinit a given side of an endpoint... | 550 | /* we don't always need to reinit a given side of an endpoint... |
551 | * when we do, use tx/rx reinit routine and then construct a new CSR | 551 | * when we do, use tx/rx reinit routine and then construct a new CSR |
552 | * to address data toggle, NYET, and DMA or PIO. | 552 | * to address data toggle, NYET, and DMA or PIO. |
553 | * | 553 | * |
554 | * it's possible that driver bugs (especially for DMA) or aborting a | 554 | * it's possible that driver bugs (especially for DMA) or aborting a |
555 | * transfer might have left the endpoint busier than it should be. | 555 | * transfer might have left the endpoint busier than it should be. |
556 | * the busy/not-empty tests are basically paranoia. | 556 | * the busy/not-empty tests are basically paranoia. |
557 | */ | 557 | */ |
558 | static void | 558 | static void |
559 | musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep) | 559 | musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep) |
560 | { | 560 | { |
561 | u16 csr; | 561 | u16 csr; |
562 | 562 | ||
563 | /* NOTE: we know the "rx" fifo reinit never triggers for ep0. | 563 | /* NOTE: we know the "rx" fifo reinit never triggers for ep0. |
564 | * That always uses tx_reinit since ep0 repurposes TX register | 564 | * That always uses tx_reinit since ep0 repurposes TX register |
565 | * offsets; the initial SETUP packet is also a kind of OUT. | 565 | * offsets; the initial SETUP packet is also a kind of OUT. |
566 | */ | 566 | */ |
567 | 567 | ||
568 | /* if programmed for Tx, put it in RX mode */ | 568 | /* if programmed for Tx, put it in RX mode */ |
569 | if (ep->is_shared_fifo) { | 569 | if (ep->is_shared_fifo) { |
570 | csr = musb_readw(ep->regs, MUSB_TXCSR); | 570 | csr = musb_readw(ep->regs, MUSB_TXCSR); |
571 | if (csr & MUSB_TXCSR_MODE) { | 571 | if (csr & MUSB_TXCSR_MODE) { |
572 | musb_h_tx_flush_fifo(ep); | 572 | musb_h_tx_flush_fifo(ep); |
573 | csr = musb_readw(ep->regs, MUSB_TXCSR); | 573 | csr = musb_readw(ep->regs, MUSB_TXCSR); |
574 | musb_writew(ep->regs, MUSB_TXCSR, | 574 | musb_writew(ep->regs, MUSB_TXCSR, |
575 | csr | MUSB_TXCSR_FRCDATATOG); | 575 | csr | MUSB_TXCSR_FRCDATATOG); |
576 | } | 576 | } |
577 | 577 | ||
578 | /* | 578 | /* |
579 | * Clear the MODE bit (and everything else) to enable Rx. | 579 | * Clear the MODE bit (and everything else) to enable Rx. |
580 | * NOTE: we mustn't clear the DMAMODE bit before DMAENAB. | 580 | * NOTE: we mustn't clear the DMAMODE bit before DMAENAB. |
581 | */ | 581 | */ |
582 | if (csr & MUSB_TXCSR_DMAMODE) | 582 | if (csr & MUSB_TXCSR_DMAMODE) |
583 | musb_writew(ep->regs, MUSB_TXCSR, MUSB_TXCSR_DMAMODE); | 583 | musb_writew(ep->regs, MUSB_TXCSR, MUSB_TXCSR_DMAMODE); |
584 | musb_writew(ep->regs, MUSB_TXCSR, 0); | 584 | musb_writew(ep->regs, MUSB_TXCSR, 0); |
585 | 585 | ||
586 | /* scrub all previous state, clearing toggle */ | 586 | /* scrub all previous state, clearing toggle */ |
587 | } else { | 587 | } else { |
588 | csr = musb_readw(ep->regs, MUSB_RXCSR); | 588 | csr = musb_readw(ep->regs, MUSB_RXCSR); |
589 | if (csr & MUSB_RXCSR_RXPKTRDY) | 589 | if (csr & MUSB_RXCSR_RXPKTRDY) |
590 | WARNING("rx%d, packet/%d ready?\n", ep->epnum, | 590 | WARNING("rx%d, packet/%d ready?\n", ep->epnum, |
591 | musb_readw(ep->regs, MUSB_RXCOUNT)); | 591 | musb_readw(ep->regs, MUSB_RXCOUNT)); |
592 | 592 | ||
593 | musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG); | 593 | musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG); |
594 | } | 594 | } |
595 | 595 | ||
596 | /* target addr and (for multipoint) hub addr/port */ | 596 | /* target addr and (for multipoint) hub addr/port */ |
597 | if (musb->is_multipoint) { | 597 | if (musb->is_multipoint) { |
598 | musb_write_rxfunaddr(ep->target_regs, qh->addr_reg); | 598 | musb_write_rxfunaddr(ep->target_regs, qh->addr_reg); |
599 | musb_write_rxhubaddr(ep->target_regs, qh->h_addr_reg); | 599 | musb_write_rxhubaddr(ep->target_regs, qh->h_addr_reg); |
600 | musb_write_rxhubport(ep->target_regs, qh->h_port_reg); | 600 | musb_write_rxhubport(ep->target_regs, qh->h_port_reg); |
601 | 601 | ||
602 | } else | 602 | } else |
603 | musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg); | 603 | musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg); |
604 | 604 | ||
605 | /* protocol/endpoint, interval/NAKlimit, i/o size */ | 605 | /* protocol/endpoint, interval/NAKlimit, i/o size */ |
606 | musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg); | 606 | musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg); |
607 | musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg); | 607 | musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg); |
608 | /* NOTE: bulk combining rewrites high bits of maxpacket */ | 608 | /* NOTE: bulk combining rewrites high bits of maxpacket */ |
609 | /* Set RXMAXP with the FIFO size of the endpoint | 609 | /* Set RXMAXP with the FIFO size of the endpoint |
610 | * to disable double buffer mode. | 610 | * to disable double buffer mode. |
611 | */ | 611 | */ |
612 | if (musb->double_buffer_not_ok) | 612 | if (musb->double_buffer_not_ok) |
613 | musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx); | 613 | musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx); |
614 | else | 614 | else |
615 | musb_writew(ep->regs, MUSB_RXMAXP, | 615 | musb_writew(ep->regs, MUSB_RXMAXP, |
616 | qh->maxpacket | ((qh->hb_mult - 1) << 11)); | 616 | qh->maxpacket | ((qh->hb_mult - 1) << 11)); |
617 | 617 | ||
618 | ep->rx_reinit = 0; | 618 | ep->rx_reinit = 0; |
619 | } | 619 | } |
620 | 620 | ||
621 | static bool musb_tx_dma_program(struct dma_controller *dma, | 621 | static bool musb_tx_dma_program(struct dma_controller *dma, |
622 | struct musb_hw_ep *hw_ep, struct musb_qh *qh, | 622 | struct musb_hw_ep *hw_ep, struct musb_qh *qh, |
623 | struct urb *urb, u32 offset, u32 length) | 623 | struct urb *urb, u32 offset, u32 length) |
624 | { | 624 | { |
625 | struct dma_channel *channel = hw_ep->tx_channel; | 625 | struct dma_channel *channel = hw_ep->tx_channel; |
626 | void __iomem *epio = hw_ep->regs; | 626 | void __iomem *epio = hw_ep->regs; |
627 | u16 pkt_size = qh->maxpacket; | 627 | u16 pkt_size = qh->maxpacket; |
628 | u16 csr; | 628 | u16 csr; |
629 | u8 mode; | 629 | u8 mode; |
630 | 630 | ||
631 | #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) | 631 | #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) |
632 | if (length > channel->max_len) | 632 | if (length > channel->max_len) |
633 | length = channel->max_len; | 633 | length = channel->max_len; |
634 | 634 | ||
635 | csr = musb_readw(epio, MUSB_TXCSR); | 635 | csr = musb_readw(epio, MUSB_TXCSR); |
636 | if (length > pkt_size) { | 636 | if (length > pkt_size) { |
637 | mode = 1; | 637 | mode = 1; |
638 | csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB; | 638 | csr |= MUSB_TXCSR_DMAMODE | MUSB_TXCSR_DMAENAB; |
639 | /* autoset shouldn't be set in high bandwidth */ | 639 | /* autoset shouldn't be set in high bandwidth */ |
640 | /* | 640 | /* |
641 | * Enable Autoset according to table | 641 | * Enable Autoset according to table |
642 | * below | 642 | * below |
643 | * bulk_split hb_mult Autoset_Enable | 643 | * bulk_split hb_mult Autoset_Enable |
644 | * 0 1 Yes(Normal) | 644 | * 0 1 Yes(Normal) |
645 | * 0 >1 No(High BW ISO) | 645 | * 0 >1 No(High BW ISO) |
646 | * 1 1 Yes(HS bulk) | 646 | * 1 1 Yes(HS bulk) |
647 | * 1 >1 Yes(FS bulk) | 647 | * 1 >1 Yes(FS bulk) |
648 | */ | 648 | */ |
649 | if (qh->hb_mult == 1 || (qh->hb_mult > 1 && | 649 | if (qh->hb_mult == 1 || (qh->hb_mult > 1 && |
650 | can_bulk_split(hw_ep->musb, qh->type))) | 650 | can_bulk_split(hw_ep->musb, qh->type))) |
651 | csr |= MUSB_TXCSR_AUTOSET; | 651 | csr |= MUSB_TXCSR_AUTOSET; |
652 | } else { | 652 | } else { |
653 | mode = 0; | 653 | mode = 0; |
654 | csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE); | 654 | csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE); |
655 | csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */ | 655 | csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */ |
656 | } | 656 | } |
657 | channel->desired_mode = mode; | 657 | channel->desired_mode = mode; |
658 | musb_writew(epio, MUSB_TXCSR, csr); | 658 | musb_writew(epio, MUSB_TXCSR, csr); |
659 | #else | 659 | #else |
660 | if (!is_cppi_enabled() && !tusb_dma_omap()) | 660 | if (!is_cppi_enabled() && !tusb_dma_omap()) |
661 | return false; | 661 | return false; |
662 | 662 | ||
663 | channel->actual_len = 0; | 663 | channel->actual_len = 0; |
664 | 664 | ||
665 | /* | 665 | /* |
666 | * TX uses "RNDIS" mode automatically but needs help | 666 | * TX uses "RNDIS" mode automatically but needs help |
667 | * to identify the zero-length-final-packet case. | 667 | * to identify the zero-length-final-packet case. |
668 | */ | 668 | */ |
669 | mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0; | 669 | mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0; |
670 | #endif | 670 | #endif |
671 | 671 | ||
672 | qh->segsize = length; | 672 | qh->segsize = length; |
673 | 673 | ||
674 | /* | 674 | /* |
675 | * Ensure the data reaches to main memory before starting | 675 | * Ensure the data reaches to main memory before starting |
676 | * DMA transfer | 676 | * DMA transfer |
677 | */ | 677 | */ |
678 | wmb(); | 678 | wmb(); |
679 | 679 | ||
680 | if (!dma->channel_program(channel, pkt_size, mode, | 680 | if (!dma->channel_program(channel, pkt_size, mode, |
681 | urb->transfer_dma + offset, length)) { | 681 | urb->transfer_dma + offset, length)) { |
682 | dma->channel_release(channel); | 682 | dma->channel_release(channel); |
683 | hw_ep->tx_channel = NULL; | 683 | hw_ep->tx_channel = NULL; |
684 | 684 | ||
685 | csr = musb_readw(epio, MUSB_TXCSR); | 685 | csr = musb_readw(epio, MUSB_TXCSR); |
686 | csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB); | 686 | csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB); |
687 | musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_H_WZC_BITS); | 687 | musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_H_WZC_BITS); |
688 | return false; | 688 | return false; |
689 | } | 689 | } |
690 | return true; | 690 | return true; |
691 | } | 691 | } |
692 | 692 | ||
693 | /* | 693 | /* |
694 | * Program an HDRC endpoint as per the given URB | 694 | * Program an HDRC endpoint as per the given URB |
695 | * Context: irqs blocked, controller lock held | 695 | * Context: irqs blocked, controller lock held |
696 | */ | 696 | */ |
697 | static void musb_ep_program(struct musb *musb, u8 epnum, | 697 | static void musb_ep_program(struct musb *musb, u8 epnum, |
698 | struct urb *urb, int is_out, | 698 | struct urb *urb, int is_out, |
699 | u8 *buf, u32 offset, u32 len) | 699 | u8 *buf, u32 offset, u32 len) |
700 | { | 700 | { |
701 | struct dma_controller *dma_controller; | 701 | struct dma_controller *dma_controller; |
702 | struct dma_channel *dma_channel; | 702 | struct dma_channel *dma_channel; |
703 | u8 dma_ok; | 703 | u8 dma_ok; |
704 | void __iomem *mbase = musb->mregs; | 704 | void __iomem *mbase = musb->mregs; |
705 | struct musb_hw_ep *hw_ep = musb->endpoints + epnum; | 705 | struct musb_hw_ep *hw_ep = musb->endpoints + epnum; |
706 | void __iomem *epio = hw_ep->regs; | 706 | void __iomem *epio = hw_ep->regs; |
707 | struct musb_qh *qh = musb_ep_get_qh(hw_ep, !is_out); | 707 | struct musb_qh *qh = musb_ep_get_qh(hw_ep, !is_out); |
708 | u16 packet_sz = qh->maxpacket; | 708 | u16 packet_sz = qh->maxpacket; |
709 | u8 use_dma = 1; | 709 | u8 use_dma = 1; |
710 | u16 csr; | 710 | u16 csr; |
711 | 711 | ||
712 | dev_dbg(musb->controller, "%s hw%d urb %p spd%d dev%d ep%d%s " | 712 | dev_dbg(musb->controller, "%s hw%d urb %p spd%d dev%d ep%d%s " |
713 | "h_addr%02x h_port%02x bytes %d\n", | 713 | "h_addr%02x h_port%02x bytes %d\n", |
714 | is_out ? "-->" : "<--", | 714 | is_out ? "-->" : "<--", |
715 | epnum, urb, urb->dev->speed, | 715 | epnum, urb, urb->dev->speed, |
716 | qh->addr_reg, qh->epnum, is_out ? "out" : "in", | 716 | qh->addr_reg, qh->epnum, is_out ? "out" : "in", |
717 | qh->h_addr_reg, qh->h_port_reg, | 717 | qh->h_addr_reg, qh->h_port_reg, |
718 | len); | 718 | len); |
719 | 719 | ||
720 | musb_ep_select(mbase, epnum); | 720 | musb_ep_select(mbase, epnum); |
721 | 721 | ||
722 | if (is_out && !len) { | 722 | if (is_out && !len) { |
723 | use_dma = 0; | 723 | use_dma = 0; |
724 | csr = musb_readw(epio, MUSB_TXCSR); | 724 | csr = musb_readw(epio, MUSB_TXCSR); |
725 | csr &= ~MUSB_TXCSR_DMAENAB; | 725 | csr &= ~MUSB_TXCSR_DMAENAB; |
726 | musb_writew(epio, MUSB_TXCSR, csr); | 726 | musb_writew(epio, MUSB_TXCSR, csr); |
727 | hw_ep->tx_channel = NULL; | 727 | hw_ep->tx_channel = NULL; |
728 | } | 728 | } |
729 | 729 | ||
730 | /* candidate for DMA? */ | 730 | /* candidate for DMA? */ |
731 | dma_controller = musb->dma_controller; | 731 | dma_controller = musb->dma_controller; |
732 | if (use_dma && is_dma_capable() && epnum && dma_controller) { | 732 | if (use_dma && is_dma_capable() && epnum && dma_controller) { |
733 | dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel; | 733 | dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel; |
734 | if (!dma_channel) { | 734 | if (!dma_channel) { |
735 | dma_channel = dma_controller->channel_alloc( | 735 | dma_channel = dma_controller->channel_alloc( |
736 | dma_controller, hw_ep, is_out); | 736 | dma_controller, hw_ep, is_out); |
737 | if (is_out) | 737 | if (is_out) |
738 | hw_ep->tx_channel = dma_channel; | 738 | hw_ep->tx_channel = dma_channel; |
739 | else | 739 | else |
740 | hw_ep->rx_channel = dma_channel; | 740 | hw_ep->rx_channel = dma_channel; |
741 | } | 741 | } |
742 | } else | 742 | } else |
743 | dma_channel = NULL; | 743 | dma_channel = NULL; |
744 | 744 | ||
745 | /* make sure we clear DMAEnab, autoSet bits from previous run */ | 745 | /* make sure we clear DMAEnab, autoSet bits from previous run */ |
746 | 746 | ||
747 | /* OUT/transmit/EP0 or IN/receive? */ | 747 | /* OUT/transmit/EP0 or IN/receive? */ |
748 | if (is_out) { | 748 | if (is_out) { |
749 | u16 csr; | 749 | u16 csr; |
750 | u16 int_txe; | 750 | u16 int_txe; |
751 | u16 load_count; | 751 | u16 load_count; |
752 | 752 | ||
753 | csr = musb_readw(epio, MUSB_TXCSR); | 753 | csr = musb_readw(epio, MUSB_TXCSR); |
754 | 754 | ||
755 | /* disable interrupt in case we flush */ | 755 | /* disable interrupt in case we flush */ |
756 | int_txe = musb->intrtxe; | 756 | int_txe = musb->intrtxe; |
757 | musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum)); | 757 | musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum)); |
758 | 758 | ||
759 | /* general endpoint setup */ | 759 | /* general endpoint setup */ |
760 | if (epnum) { | 760 | if (epnum) { |
761 | /* flush all old state, set default */ | 761 | /* flush all old state, set default */ |
762 | /* | 762 | /* |
763 | * We could be flushing valid | 763 | * We could be flushing valid |
764 | * packets in double buffering | 764 | * packets in double buffering |
765 | * case | 765 | * case |
766 | */ | 766 | */ |
767 | if (!hw_ep->tx_double_buffered) | 767 | if (!hw_ep->tx_double_buffered) |
768 | musb_h_tx_flush_fifo(hw_ep); | 768 | musb_h_tx_flush_fifo(hw_ep); |
769 | 769 | ||
770 | /* | 770 | /* |
771 | * We must not clear the DMAMODE bit before or in | 771 | * We must not clear the DMAMODE bit before or in |
772 | * the same cycle with the DMAENAB bit, so we clear | 772 | * the same cycle with the DMAENAB bit, so we clear |
773 | * the latter first... | 773 | * the latter first... |
774 | */ | 774 | */ |
775 | csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT | 775 | csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT |
776 | | MUSB_TXCSR_AUTOSET | 776 | | MUSB_TXCSR_AUTOSET |
777 | | MUSB_TXCSR_DMAENAB | 777 | | MUSB_TXCSR_DMAENAB |
778 | | MUSB_TXCSR_FRCDATATOG | 778 | | MUSB_TXCSR_FRCDATATOG |
779 | | MUSB_TXCSR_H_RXSTALL | 779 | | MUSB_TXCSR_H_RXSTALL |
780 | | MUSB_TXCSR_H_ERROR | 780 | | MUSB_TXCSR_H_ERROR |
781 | | MUSB_TXCSR_TXPKTRDY | 781 | | MUSB_TXCSR_TXPKTRDY |
782 | ); | 782 | ); |
783 | csr |= MUSB_TXCSR_MODE; | 783 | csr |= MUSB_TXCSR_MODE; |
784 | 784 | ||
785 | if (!hw_ep->tx_double_buffered) { | 785 | if (!hw_ep->tx_double_buffered) { |
786 | if (usb_gettoggle(urb->dev, qh->epnum, 1)) | 786 | if (usb_gettoggle(urb->dev, qh->epnum, 1)) |
787 | csr |= MUSB_TXCSR_H_WR_DATATOGGLE | 787 | csr |= MUSB_TXCSR_H_WR_DATATOGGLE |
788 | | MUSB_TXCSR_H_DATATOGGLE; | 788 | | MUSB_TXCSR_H_DATATOGGLE; |
789 | else | 789 | else |
790 | csr |= MUSB_TXCSR_CLRDATATOG; | 790 | csr |= MUSB_TXCSR_CLRDATATOG; |
791 | } | 791 | } |
792 | 792 | ||
793 | musb_writew(epio, MUSB_TXCSR, csr); | 793 | musb_writew(epio, MUSB_TXCSR, csr); |
794 | /* REVISIT may need to clear FLUSHFIFO ... */ | 794 | /* REVISIT may need to clear FLUSHFIFO ... */ |
795 | csr &= ~MUSB_TXCSR_DMAMODE; | 795 | csr &= ~MUSB_TXCSR_DMAMODE; |
796 | musb_writew(epio, MUSB_TXCSR, csr); | 796 | musb_writew(epio, MUSB_TXCSR, csr); |
797 | csr = musb_readw(epio, MUSB_TXCSR); | 797 | csr = musb_readw(epio, MUSB_TXCSR); |
798 | } else { | 798 | } else { |
799 | /* endpoint 0: just flush */ | 799 | /* endpoint 0: just flush */ |
800 | musb_h_ep0_flush_fifo(hw_ep); | 800 | musb_h_ep0_flush_fifo(hw_ep); |
801 | } | 801 | } |
802 | 802 | ||
803 | /* target addr and (for multipoint) hub addr/port */ | 803 | /* target addr and (for multipoint) hub addr/port */ |
804 | if (musb->is_multipoint) { | 804 | if (musb->is_multipoint) { |
805 | musb_write_txfunaddr(mbase, epnum, qh->addr_reg); | 805 | musb_write_txfunaddr(mbase, epnum, qh->addr_reg); |
806 | musb_write_txhubaddr(mbase, epnum, qh->h_addr_reg); | 806 | musb_write_txhubaddr(mbase, epnum, qh->h_addr_reg); |
807 | musb_write_txhubport(mbase, epnum, qh->h_port_reg); | 807 | musb_write_txhubport(mbase, epnum, qh->h_port_reg); |
808 | /* FIXME if !epnum, do the same for RX ... */ | 808 | /* FIXME if !epnum, do the same for RX ... */ |
809 | } else | 809 | } else |
810 | musb_writeb(mbase, MUSB_FADDR, qh->addr_reg); | 810 | musb_writeb(mbase, MUSB_FADDR, qh->addr_reg); |
811 | 811 | ||
812 | /* protocol/endpoint/interval/NAKlimit */ | 812 | /* protocol/endpoint/interval/NAKlimit */ |
813 | if (epnum) { | 813 | if (epnum) { |
814 | musb_writeb(epio, MUSB_TXTYPE, qh->type_reg); | 814 | musb_writeb(epio, MUSB_TXTYPE, qh->type_reg); |
815 | if (musb->double_buffer_not_ok) { | 815 | if (musb->double_buffer_not_ok) { |
816 | musb_writew(epio, MUSB_TXMAXP, | 816 | musb_writew(epio, MUSB_TXMAXP, |
817 | hw_ep->max_packet_sz_tx); | 817 | hw_ep->max_packet_sz_tx); |
818 | } else if (can_bulk_split(musb, qh->type)) { | 818 | } else if (can_bulk_split(musb, qh->type)) { |
819 | qh->hb_mult = hw_ep->max_packet_sz_tx | 819 | qh->hb_mult = hw_ep->max_packet_sz_tx |
820 | / packet_sz; | 820 | / packet_sz; |
821 | musb_writew(epio, MUSB_TXMAXP, packet_sz | 821 | musb_writew(epio, MUSB_TXMAXP, packet_sz |
822 | | ((qh->hb_mult) - 1) << 11); | 822 | | ((qh->hb_mult) - 1) << 11); |
823 | } else { | 823 | } else { |
824 | musb_writew(epio, MUSB_TXMAXP, | 824 | musb_writew(epio, MUSB_TXMAXP, |
825 | qh->maxpacket | | 825 | qh->maxpacket | |
826 | ((qh->hb_mult - 1) << 11)); | 826 | ((qh->hb_mult - 1) << 11)); |
827 | } | 827 | } |
828 | musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg); | 828 | musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg); |
829 | } else { | 829 | } else { |
830 | musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg); | 830 | musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg); |
831 | if (musb->is_multipoint) | 831 | if (musb->is_multipoint) |
832 | musb_writeb(epio, MUSB_TYPE0, | 832 | musb_writeb(epio, MUSB_TYPE0, |
833 | qh->type_reg); | 833 | qh->type_reg); |
834 | } | 834 | } |
835 | 835 | ||
836 | if (can_bulk_split(musb, qh->type)) | 836 | if (can_bulk_split(musb, qh->type)) |
837 | load_count = min((u32) hw_ep->max_packet_sz_tx, | 837 | load_count = min((u32) hw_ep->max_packet_sz_tx, |
838 | len); | 838 | len); |
839 | else | 839 | else |
840 | load_count = min((u32) packet_sz, len); | 840 | load_count = min((u32) packet_sz, len); |
841 | 841 | ||
842 | if (dma_channel && musb_tx_dma_program(dma_controller, | 842 | if (dma_channel && musb_tx_dma_program(dma_controller, |
843 | hw_ep, qh, urb, offset, len)) | 843 | hw_ep, qh, urb, offset, len)) |
844 | load_count = 0; | 844 | load_count = 0; |
845 | 845 | ||
846 | if (load_count) { | 846 | if (load_count) { |
847 | /* PIO to load FIFO */ | 847 | /* PIO to load FIFO */ |
848 | qh->segsize = load_count; | 848 | qh->segsize = load_count; |
849 | if (!buf) { | 849 | if (!buf) { |
850 | sg_miter_start(&qh->sg_miter, urb->sg, 1, | 850 | sg_miter_start(&qh->sg_miter, urb->sg, 1, |
851 | SG_MITER_ATOMIC | 851 | SG_MITER_ATOMIC |
852 | | SG_MITER_FROM_SG); | 852 | | SG_MITER_FROM_SG); |
853 | if (!sg_miter_next(&qh->sg_miter)) { | 853 | if (!sg_miter_next(&qh->sg_miter)) { |
854 | dev_err(musb->controller, | 854 | dev_err(musb->controller, |
855 | "error: sg" | 855 | "error: sg" |
856 | "list empty\n"); | 856 | "list empty\n"); |
857 | sg_miter_stop(&qh->sg_miter); | 857 | sg_miter_stop(&qh->sg_miter); |
858 | goto finish; | 858 | goto finish; |
859 | } | 859 | } |
860 | buf = qh->sg_miter.addr + urb->sg->offset + | 860 | buf = qh->sg_miter.addr + urb->sg->offset + |
861 | urb->actual_length; | 861 | urb->actual_length; |
862 | load_count = min_t(u32, load_count, | 862 | load_count = min_t(u32, load_count, |
863 | qh->sg_miter.length); | 863 | qh->sg_miter.length); |
864 | musb_write_fifo(hw_ep, load_count, buf); | 864 | musb_write_fifo(hw_ep, load_count, buf); |
865 | qh->sg_miter.consumed = load_count; | 865 | qh->sg_miter.consumed = load_count; |
866 | sg_miter_stop(&qh->sg_miter); | 866 | sg_miter_stop(&qh->sg_miter); |
867 | } else | 867 | } else |
868 | musb_write_fifo(hw_ep, load_count, buf); | 868 | musb_write_fifo(hw_ep, load_count, buf); |
869 | } | 869 | } |
870 | finish: | 870 | finish: |
871 | /* re-enable interrupt */ | 871 | /* re-enable interrupt */ |
872 | musb_writew(mbase, MUSB_INTRTXE, int_txe); | 872 | musb_writew(mbase, MUSB_INTRTXE, int_txe); |
873 | 873 | ||
874 | /* IN/receive */ | 874 | /* IN/receive */ |
875 | } else { | 875 | } else { |
876 | u16 csr; | 876 | u16 csr; |
877 | 877 | ||
878 | if (hw_ep->rx_reinit) { | 878 | if (hw_ep->rx_reinit) { |
879 | musb_rx_reinit(musb, qh, hw_ep); | 879 | musb_rx_reinit(musb, qh, hw_ep); |
880 | 880 | ||
881 | /* init new state: toggle and NYET, maybe DMA later */ | 881 | /* init new state: toggle and NYET, maybe DMA later */ |
882 | if (usb_gettoggle(urb->dev, qh->epnum, 0)) | 882 | if (usb_gettoggle(urb->dev, qh->epnum, 0)) |
883 | csr = MUSB_RXCSR_H_WR_DATATOGGLE | 883 | csr = MUSB_RXCSR_H_WR_DATATOGGLE |
884 | | MUSB_RXCSR_H_DATATOGGLE; | 884 | | MUSB_RXCSR_H_DATATOGGLE; |
885 | else | 885 | else |
886 | csr = 0; | 886 | csr = 0; |
887 | if (qh->type == USB_ENDPOINT_XFER_INT) | 887 | if (qh->type == USB_ENDPOINT_XFER_INT) |
888 | csr |= MUSB_RXCSR_DISNYET; | 888 | csr |= MUSB_RXCSR_DISNYET; |
889 | 889 | ||
890 | } else { | 890 | } else { |
891 | csr = musb_readw(hw_ep->regs, MUSB_RXCSR); | 891 | csr = musb_readw(hw_ep->regs, MUSB_RXCSR); |
892 | 892 | ||
893 | if (csr & (MUSB_RXCSR_RXPKTRDY | 893 | if (csr & (MUSB_RXCSR_RXPKTRDY |
894 | | MUSB_RXCSR_DMAENAB | 894 | | MUSB_RXCSR_DMAENAB |
895 | | MUSB_RXCSR_H_REQPKT)) | 895 | | MUSB_RXCSR_H_REQPKT)) |
896 | ERR("broken !rx_reinit, ep%d csr %04x\n", | 896 | ERR("broken !rx_reinit, ep%d csr %04x\n", |
897 | hw_ep->epnum, csr); | 897 | hw_ep->epnum, csr); |
898 | 898 | ||
899 | /* scrub any stale state, leaving toggle alone */ | 899 | /* scrub any stale state, leaving toggle alone */ |
900 | csr &= MUSB_RXCSR_DISNYET; | 900 | csr &= MUSB_RXCSR_DISNYET; |
901 | } | 901 | } |
902 | 902 | ||
903 | /* kick things off */ | 903 | /* kick things off */ |
904 | 904 | ||
905 | if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) { | 905 | if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) { |
906 | /* Candidate for DMA */ | 906 | /* Candidate for DMA */ |
907 | dma_channel->actual_len = 0L; | 907 | dma_channel->actual_len = 0L; |
908 | qh->segsize = len; | 908 | qh->segsize = len; |
909 | 909 | ||
910 | /* AUTOREQ is in a DMA register */ | 910 | /* AUTOREQ is in a DMA register */ |
911 | musb_writew(hw_ep->regs, MUSB_RXCSR, csr); | 911 | musb_writew(hw_ep->regs, MUSB_RXCSR, csr); |
912 | csr = musb_readw(hw_ep->regs, MUSB_RXCSR); | 912 | csr = musb_readw(hw_ep->regs, MUSB_RXCSR); |
913 | 913 | ||
914 | /* | 914 | /* |
915 | * Unless caller treats short RX transfers as | 915 | * Unless caller treats short RX transfers as |
916 | * errors, we dare not queue multiple transfers. | 916 | * errors, we dare not queue multiple transfers. |
917 | */ | 917 | */ |
918 | dma_ok = dma_controller->channel_program(dma_channel, | 918 | dma_ok = dma_controller->channel_program(dma_channel, |
919 | packet_sz, !(urb->transfer_flags & | 919 | packet_sz, !(urb->transfer_flags & |
920 | URB_SHORT_NOT_OK), | 920 | URB_SHORT_NOT_OK), |
921 | urb->transfer_dma + offset, | 921 | urb->transfer_dma + offset, |
922 | qh->segsize); | 922 | qh->segsize); |
923 | if (!dma_ok) { | 923 | if (!dma_ok) { |
924 | dma_controller->channel_release(dma_channel); | 924 | dma_controller->channel_release(dma_channel); |
925 | hw_ep->rx_channel = dma_channel = NULL; | 925 | hw_ep->rx_channel = dma_channel = NULL; |
926 | } else | 926 | } else |
927 | csr |= MUSB_RXCSR_DMAENAB; | 927 | csr |= MUSB_RXCSR_DMAENAB; |
928 | } | 928 | } |
929 | 929 | ||
930 | csr |= MUSB_RXCSR_H_REQPKT; | 930 | csr |= MUSB_RXCSR_H_REQPKT; |
931 | dev_dbg(musb->controller, "RXCSR%d := %04x\n", epnum, csr); | 931 | dev_dbg(musb->controller, "RXCSR%d := %04x\n", epnum, csr); |
932 | musb_writew(hw_ep->regs, MUSB_RXCSR, csr); | 932 | musb_writew(hw_ep->regs, MUSB_RXCSR, csr); |
933 | csr = musb_readw(hw_ep->regs, MUSB_RXCSR); | 933 | csr = musb_readw(hw_ep->regs, MUSB_RXCSR); |
934 | } | 934 | } |
935 | } | 935 | } |
936 | 936 | ||
937 | /* Schedule next QH from musb->in_bulk/out_bulk and move the current qh to | 937 | /* Schedule next QH from musb->in_bulk/out_bulk and move the current qh to |
938 | * the end; avoids starvation for other endpoints. | 938 | * the end; avoids starvation for other endpoints. |
939 | */ | 939 | */ |
940 | static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep, | 940 | static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep, |
941 | int is_in) | 941 | int is_in) |
942 | { | 942 | { |
943 | struct dma_channel *dma; | 943 | struct dma_channel *dma; |
944 | struct urb *urb; | 944 | struct urb *urb; |
945 | void __iomem *mbase = musb->mregs; | 945 | void __iomem *mbase = musb->mregs; |
946 | void __iomem *epio = ep->regs; | 946 | void __iomem *epio = ep->regs; |
947 | struct musb_qh *cur_qh, *next_qh; | 947 | struct musb_qh *cur_qh, *next_qh; |
948 | u16 rx_csr, tx_csr; | 948 | u16 rx_csr, tx_csr; |
949 | 949 | ||
950 | musb_ep_select(mbase, ep->epnum); | 950 | musb_ep_select(mbase, ep->epnum); |
951 | if (is_in) { | 951 | if (is_in) { |
952 | dma = is_dma_capable() ? ep->rx_channel : NULL; | 952 | dma = is_dma_capable() ? ep->rx_channel : NULL; |
953 | 953 | ||
954 | /* clear nak timeout bit */ | 954 | /* clear nak timeout bit */ |
955 | rx_csr = musb_readw(epio, MUSB_RXCSR); | 955 | rx_csr = musb_readw(epio, MUSB_RXCSR); |
956 | rx_csr |= MUSB_RXCSR_H_WZC_BITS; | 956 | rx_csr |= MUSB_RXCSR_H_WZC_BITS; |
957 | rx_csr &= ~MUSB_RXCSR_DATAERROR; | 957 | rx_csr &= ~MUSB_RXCSR_DATAERROR; |
958 | musb_writew(epio, MUSB_RXCSR, rx_csr); | 958 | musb_writew(epio, MUSB_RXCSR, rx_csr); |
959 | 959 | ||
960 | cur_qh = first_qh(&musb->in_bulk); | 960 | cur_qh = first_qh(&musb->in_bulk); |
961 | } else { | 961 | } else { |
962 | dma = is_dma_capable() ? ep->tx_channel : NULL; | 962 | dma = is_dma_capable() ? ep->tx_channel : NULL; |
963 | 963 | ||
964 | /* clear nak timeout bit */ | 964 | /* clear nak timeout bit */ |
965 | tx_csr = musb_readw(epio, MUSB_TXCSR); | 965 | tx_csr = musb_readw(epio, MUSB_TXCSR); |
966 | tx_csr |= MUSB_TXCSR_H_WZC_BITS; | 966 | tx_csr |= MUSB_TXCSR_H_WZC_BITS; |
967 | tx_csr &= ~MUSB_TXCSR_H_NAKTIMEOUT; | 967 | tx_csr &= ~MUSB_TXCSR_H_NAKTIMEOUT; |
968 | musb_writew(epio, MUSB_TXCSR, tx_csr); | 968 | musb_writew(epio, MUSB_TXCSR, tx_csr); |
969 | 969 | ||
970 | cur_qh = first_qh(&musb->out_bulk); | 970 | cur_qh = first_qh(&musb->out_bulk); |
971 | } | 971 | } |
972 | if (cur_qh) { | 972 | if (cur_qh) { |
973 | urb = next_urb(cur_qh); | 973 | urb = next_urb(cur_qh); |
974 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { | 974 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { |
975 | dma->status = MUSB_DMA_STATUS_CORE_ABORT; | 975 | dma->status = MUSB_DMA_STATUS_CORE_ABORT; |
976 | musb->dma_controller->channel_abort(dma); | 976 | musb->dma_controller->channel_abort(dma); |
977 | urb->actual_length += dma->actual_len; | 977 | urb->actual_length += dma->actual_len; |
978 | dma->actual_len = 0L; | 978 | dma->actual_len = 0L; |
979 | } | 979 | } |
980 | musb_save_toggle(cur_qh, is_in, urb); | 980 | musb_save_toggle(cur_qh, is_in, urb); |
981 | 981 | ||
982 | if (is_in) { | 982 | if (is_in) { |
983 | /* move cur_qh to end of queue */ | 983 | /* move cur_qh to end of queue */ |
984 | list_move_tail(&cur_qh->ring, &musb->in_bulk); | 984 | list_move_tail(&cur_qh->ring, &musb->in_bulk); |
985 | 985 | ||
986 | /* get the next qh from musb->in_bulk */ | 986 | /* get the next qh from musb->in_bulk */ |
987 | next_qh = first_qh(&musb->in_bulk); | 987 | next_qh = first_qh(&musb->in_bulk); |
988 | 988 | ||
989 | /* set rx_reinit and schedule the next qh */ | 989 | /* set rx_reinit and schedule the next qh */ |
990 | ep->rx_reinit = 1; | 990 | ep->rx_reinit = 1; |
991 | } else { | 991 | } else { |
992 | /* move cur_qh to end of queue */ | 992 | /* move cur_qh to end of queue */ |
993 | list_move_tail(&cur_qh->ring, &musb->out_bulk); | 993 | list_move_tail(&cur_qh->ring, &musb->out_bulk); |
994 | 994 | ||
995 | /* get the next qh from musb->out_bulk */ | 995 | /* get the next qh from musb->out_bulk */ |
996 | next_qh = first_qh(&musb->out_bulk); | 996 | next_qh = first_qh(&musb->out_bulk); |
997 | 997 | ||
998 | /* set tx_reinit and schedule the next qh */ | 998 | /* set tx_reinit and schedule the next qh */ |
999 | ep->tx_reinit = 1; | 999 | ep->tx_reinit = 1; |
1000 | } | 1000 | } |
1001 | musb_start_urb(musb, is_in, next_qh); | 1001 | musb_start_urb(musb, is_in, next_qh); |
1002 | } | 1002 | } |
1003 | } | 1003 | } |
1004 | 1004 | ||
1005 | /* | 1005 | /* |
1006 | * Service the default endpoint (ep0) as host. | 1006 | * Service the default endpoint (ep0) as host. |
1007 | * Return true until it's time to start the status stage. | 1007 | * Return true until it's time to start the status stage. |
1008 | */ | 1008 | */ |
1009 | static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb) | 1009 | static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb) |
1010 | { | 1010 | { |
1011 | bool more = false; | 1011 | bool more = false; |
1012 | u8 *fifo_dest = NULL; | 1012 | u8 *fifo_dest = NULL; |
1013 | u16 fifo_count = 0; | 1013 | u16 fifo_count = 0; |
1014 | struct musb_hw_ep *hw_ep = musb->control_ep; | 1014 | struct musb_hw_ep *hw_ep = musb->control_ep; |
1015 | struct musb_qh *qh = hw_ep->in_qh; | 1015 | struct musb_qh *qh = hw_ep->in_qh; |
1016 | struct usb_ctrlrequest *request; | 1016 | struct usb_ctrlrequest *request; |
1017 | 1017 | ||
1018 | switch (musb->ep0_stage) { | 1018 | switch (musb->ep0_stage) { |
1019 | case MUSB_EP0_IN: | 1019 | case MUSB_EP0_IN: |
1020 | fifo_dest = urb->transfer_buffer + urb->actual_length; | 1020 | fifo_dest = urb->transfer_buffer + urb->actual_length; |
1021 | fifo_count = min_t(size_t, len, urb->transfer_buffer_length - | 1021 | fifo_count = min_t(size_t, len, urb->transfer_buffer_length - |
1022 | urb->actual_length); | 1022 | urb->actual_length); |
1023 | if (fifo_count < len) | 1023 | if (fifo_count < len) |
1024 | urb->status = -EOVERFLOW; | 1024 | urb->status = -EOVERFLOW; |
1025 | 1025 | ||
1026 | musb_read_fifo(hw_ep, fifo_count, fifo_dest); | 1026 | musb_read_fifo(hw_ep, fifo_count, fifo_dest); |
1027 | 1027 | ||
1028 | urb->actual_length += fifo_count; | 1028 | urb->actual_length += fifo_count; |
1029 | if (len < qh->maxpacket) { | 1029 | if (len < qh->maxpacket) { |
1030 | /* always terminate on short read; it's | 1030 | /* always terminate on short read; it's |
1031 | * rarely reported as an error. | 1031 | * rarely reported as an error. |
1032 | */ | 1032 | */ |
1033 | } else if (urb->actual_length < | 1033 | } else if (urb->actual_length < |
1034 | urb->transfer_buffer_length) | 1034 | urb->transfer_buffer_length) |
1035 | more = true; | 1035 | more = true; |
1036 | break; | 1036 | break; |
1037 | case MUSB_EP0_START: | 1037 | case MUSB_EP0_START: |
1038 | request = (struct usb_ctrlrequest *) urb->setup_packet; | 1038 | request = (struct usb_ctrlrequest *) urb->setup_packet; |
1039 | 1039 | ||
1040 | if (!request->wLength) { | 1040 | if (!request->wLength) { |
1041 | dev_dbg(musb->controller, "start no-DATA\n"); | 1041 | dev_dbg(musb->controller, "start no-DATA\n"); |
1042 | break; | 1042 | break; |
1043 | } else if (request->bRequestType & USB_DIR_IN) { | 1043 | } else if (request->bRequestType & USB_DIR_IN) { |
1044 | dev_dbg(musb->controller, "start IN-DATA\n"); | 1044 | dev_dbg(musb->controller, "start IN-DATA\n"); |
1045 | musb->ep0_stage = MUSB_EP0_IN; | 1045 | musb->ep0_stage = MUSB_EP0_IN; |
1046 | more = true; | 1046 | more = true; |
1047 | break; | 1047 | break; |
1048 | } else { | 1048 | } else { |
1049 | dev_dbg(musb->controller, "start OUT-DATA\n"); | 1049 | dev_dbg(musb->controller, "start OUT-DATA\n"); |
1050 | musb->ep0_stage = MUSB_EP0_OUT; | 1050 | musb->ep0_stage = MUSB_EP0_OUT; |
1051 | more = true; | 1051 | more = true; |
1052 | } | 1052 | } |
1053 | /* FALLTHROUGH */ | 1053 | /* FALLTHROUGH */ |
1054 | case MUSB_EP0_OUT: | 1054 | case MUSB_EP0_OUT: |
1055 | fifo_count = min_t(size_t, qh->maxpacket, | 1055 | fifo_count = min_t(size_t, qh->maxpacket, |
1056 | urb->transfer_buffer_length - | 1056 | urb->transfer_buffer_length - |
1057 | urb->actual_length); | 1057 | urb->actual_length); |
1058 | if (fifo_count) { | 1058 | if (fifo_count) { |
1059 | fifo_dest = (u8 *) (urb->transfer_buffer | 1059 | fifo_dest = (u8 *) (urb->transfer_buffer |
1060 | + urb->actual_length); | 1060 | + urb->actual_length); |
1061 | dev_dbg(musb->controller, "Sending %d byte%s to ep0 fifo %p\n", | 1061 | dev_dbg(musb->controller, "Sending %d byte%s to ep0 fifo %p\n", |
1062 | fifo_count, | 1062 | fifo_count, |
1063 | (fifo_count == 1) ? "" : "s", | 1063 | (fifo_count == 1) ? "" : "s", |
1064 | fifo_dest); | 1064 | fifo_dest); |
1065 | musb_write_fifo(hw_ep, fifo_count, fifo_dest); | 1065 | musb_write_fifo(hw_ep, fifo_count, fifo_dest); |
1066 | 1066 | ||
1067 | urb->actual_length += fifo_count; | 1067 | urb->actual_length += fifo_count; |
1068 | more = true; | 1068 | more = true; |
1069 | } | 1069 | } |
1070 | break; | 1070 | break; |
1071 | default: | 1071 | default: |
1072 | ERR("bogus ep0 stage %d\n", musb->ep0_stage); | 1072 | ERR("bogus ep0 stage %d\n", musb->ep0_stage); |
1073 | break; | 1073 | break; |
1074 | } | 1074 | } |
1075 | 1075 | ||
1076 | return more; | 1076 | return more; |
1077 | } | 1077 | } |
1078 | 1078 | ||
1079 | /* | 1079 | /* |
1080 | * Handle default endpoint interrupt as host. Only called in IRQ time | 1080 | * Handle default endpoint interrupt as host. Only called in IRQ time |
1081 | * from musb_interrupt(). | 1081 | * from musb_interrupt(). |
1082 | * | 1082 | * |
1083 | * called with controller irqlocked | 1083 | * called with controller irqlocked |
1084 | */ | 1084 | */ |
1085 | irqreturn_t musb_h_ep0_irq(struct musb *musb) | 1085 | irqreturn_t musb_h_ep0_irq(struct musb *musb) |
1086 | { | 1086 | { |
1087 | struct urb *urb; | 1087 | struct urb *urb; |
1088 | u16 csr, len; | 1088 | u16 csr, len; |
1089 | int status = 0; | 1089 | int status = 0; |
1090 | void __iomem *mbase = musb->mregs; | 1090 | void __iomem *mbase = musb->mregs; |
1091 | struct musb_hw_ep *hw_ep = musb->control_ep; | 1091 | struct musb_hw_ep *hw_ep = musb->control_ep; |
1092 | void __iomem *epio = hw_ep->regs; | 1092 | void __iomem *epio = hw_ep->regs; |
1093 | struct musb_qh *qh = hw_ep->in_qh; | 1093 | struct musb_qh *qh = hw_ep->in_qh; |
1094 | bool complete = false; | 1094 | bool complete = false; |
1095 | irqreturn_t retval = IRQ_NONE; | 1095 | irqreturn_t retval = IRQ_NONE; |
1096 | 1096 | ||
1097 | /* ep0 only has one queue, "in" */ | 1097 | /* ep0 only has one queue, "in" */ |
1098 | urb = next_urb(qh); | 1098 | urb = next_urb(qh); |
1099 | 1099 | ||
1100 | musb_ep_select(mbase, 0); | 1100 | musb_ep_select(mbase, 0); |
1101 | csr = musb_readw(epio, MUSB_CSR0); | 1101 | csr = musb_readw(epio, MUSB_CSR0); |
1102 | len = (csr & MUSB_CSR0_RXPKTRDY) | 1102 | len = (csr & MUSB_CSR0_RXPKTRDY) |
1103 | ? musb_readb(epio, MUSB_COUNT0) | 1103 | ? musb_readb(epio, MUSB_COUNT0) |
1104 | : 0; | 1104 | : 0; |
1105 | 1105 | ||
1106 | dev_dbg(musb->controller, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n", | 1106 | dev_dbg(musb->controller, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n", |
1107 | csr, qh, len, urb, musb->ep0_stage); | 1107 | csr, qh, len, urb, musb->ep0_stage); |
1108 | 1108 | ||
1109 | /* if we just did status stage, we are done */ | 1109 | /* if we just did status stage, we are done */ |
1110 | if (MUSB_EP0_STATUS == musb->ep0_stage) { | 1110 | if (MUSB_EP0_STATUS == musb->ep0_stage) { |
1111 | retval = IRQ_HANDLED; | 1111 | retval = IRQ_HANDLED; |
1112 | complete = true; | 1112 | complete = true; |
1113 | } | 1113 | } |
1114 | 1114 | ||
1115 | /* prepare status */ | 1115 | /* prepare status */ |
1116 | if (csr & MUSB_CSR0_H_RXSTALL) { | 1116 | if (csr & MUSB_CSR0_H_RXSTALL) { |
1117 | dev_dbg(musb->controller, "STALLING ENDPOINT\n"); | 1117 | dev_dbg(musb->controller, "STALLING ENDPOINT\n"); |
1118 | status = -EPIPE; | 1118 | status = -EPIPE; |
1119 | 1119 | ||
1120 | } else if (csr & MUSB_CSR0_H_ERROR) { | 1120 | } else if (csr & MUSB_CSR0_H_ERROR) { |
1121 | dev_dbg(musb->controller, "no response, csr0 %04x\n", csr); | 1121 | dev_dbg(musb->controller, "no response, csr0 %04x\n", csr); |
1122 | status = -EPROTO; | 1122 | status = -EPROTO; |
1123 | 1123 | ||
1124 | } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) { | 1124 | } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) { |
1125 | dev_dbg(musb->controller, "control NAK timeout\n"); | 1125 | dev_dbg(musb->controller, "control NAK timeout\n"); |
1126 | 1126 | ||
1127 | /* NOTE: this code path would be a good place to PAUSE a | 1127 | /* NOTE: this code path would be a good place to PAUSE a |
1128 | * control transfer, if another one is queued, so that | 1128 | * control transfer, if another one is queued, so that |
1129 | * ep0 is more likely to stay busy. That's already done | 1129 | * ep0 is more likely to stay busy. That's already done |
1130 | * for bulk RX transfers. | 1130 | * for bulk RX transfers. |
1131 | * | 1131 | * |
1132 | * if (qh->ring.next != &musb->control), then | 1132 | * if (qh->ring.next != &musb->control), then |
1133 | * we have a candidate... NAKing is *NOT* an error | 1133 | * we have a candidate... NAKing is *NOT* an error |
1134 | */ | 1134 | */ |
1135 | musb_writew(epio, MUSB_CSR0, 0); | 1135 | musb_writew(epio, MUSB_CSR0, 0); |
1136 | retval = IRQ_HANDLED; | 1136 | retval = IRQ_HANDLED; |
1137 | } | 1137 | } |
1138 | 1138 | ||
1139 | if (status) { | 1139 | if (status) { |
1140 | dev_dbg(musb->controller, "aborting\n"); | 1140 | dev_dbg(musb->controller, "aborting\n"); |
1141 | retval = IRQ_HANDLED; | 1141 | retval = IRQ_HANDLED; |
1142 | if (urb) | 1142 | if (urb) |
1143 | urb->status = status; | 1143 | urb->status = status; |
1144 | complete = true; | 1144 | complete = true; |
1145 | 1145 | ||
1146 | /* use the proper sequence to abort the transfer */ | 1146 | /* use the proper sequence to abort the transfer */ |
1147 | if (csr & MUSB_CSR0_H_REQPKT) { | 1147 | if (csr & MUSB_CSR0_H_REQPKT) { |
1148 | csr &= ~MUSB_CSR0_H_REQPKT; | 1148 | csr &= ~MUSB_CSR0_H_REQPKT; |
1149 | musb_writew(epio, MUSB_CSR0, csr); | 1149 | musb_writew(epio, MUSB_CSR0, csr); |
1150 | csr &= ~MUSB_CSR0_H_NAKTIMEOUT; | 1150 | csr &= ~MUSB_CSR0_H_NAKTIMEOUT; |
1151 | musb_writew(epio, MUSB_CSR0, csr); | 1151 | musb_writew(epio, MUSB_CSR0, csr); |
1152 | } else { | 1152 | } else { |
1153 | musb_h_ep0_flush_fifo(hw_ep); | 1153 | musb_h_ep0_flush_fifo(hw_ep); |
1154 | } | 1154 | } |
1155 | 1155 | ||
1156 | musb_writeb(epio, MUSB_NAKLIMIT0, 0); | 1156 | musb_writeb(epio, MUSB_NAKLIMIT0, 0); |
1157 | 1157 | ||
1158 | /* clear it */ | 1158 | /* clear it */ |
1159 | musb_writew(epio, MUSB_CSR0, 0); | 1159 | musb_writew(epio, MUSB_CSR0, 0); |
1160 | } | 1160 | } |
1161 | 1161 | ||
1162 | if (unlikely(!urb)) { | 1162 | if (unlikely(!urb)) { |
1163 | /* stop endpoint since we have no place for its data, this | 1163 | /* stop endpoint since we have no place for its data, this |
1164 | * SHOULD NEVER HAPPEN! */ | 1164 | * SHOULD NEVER HAPPEN! */ |
1165 | ERR("no URB for end 0\n"); | 1165 | ERR("no URB for end 0\n"); |
1166 | 1166 | ||
1167 | musb_h_ep0_flush_fifo(hw_ep); | 1167 | musb_h_ep0_flush_fifo(hw_ep); |
1168 | goto done; | 1168 | goto done; |
1169 | } | 1169 | } |
1170 | 1170 | ||
1171 | if (!complete) { | 1171 | if (!complete) { |
1172 | /* call common logic and prepare response */ | 1172 | /* call common logic and prepare response */ |
1173 | if (musb_h_ep0_continue(musb, len, urb)) { | 1173 | if (musb_h_ep0_continue(musb, len, urb)) { |
1174 | /* more packets required */ | 1174 | /* more packets required */ |
1175 | csr = (MUSB_EP0_IN == musb->ep0_stage) | 1175 | csr = (MUSB_EP0_IN == musb->ep0_stage) |
1176 | ? MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY; | 1176 | ? MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY; |
1177 | } else { | 1177 | } else { |
1178 | /* data transfer complete; perform status phase */ | 1178 | /* data transfer complete; perform status phase */ |
1179 | if (usb_pipeout(urb->pipe) | 1179 | if (usb_pipeout(urb->pipe) |
1180 | || !urb->transfer_buffer_length) | 1180 | || !urb->transfer_buffer_length) |
1181 | csr = MUSB_CSR0_H_STATUSPKT | 1181 | csr = MUSB_CSR0_H_STATUSPKT |
1182 | | MUSB_CSR0_H_REQPKT; | 1182 | | MUSB_CSR0_H_REQPKT; |
1183 | else | 1183 | else |
1184 | csr = MUSB_CSR0_H_STATUSPKT | 1184 | csr = MUSB_CSR0_H_STATUSPKT |
1185 | | MUSB_CSR0_TXPKTRDY; | 1185 | | MUSB_CSR0_TXPKTRDY; |
1186 | 1186 | ||
1187 | /* flag status stage */ | 1187 | /* flag status stage */ |
1188 | musb->ep0_stage = MUSB_EP0_STATUS; | 1188 | musb->ep0_stage = MUSB_EP0_STATUS; |
1189 | 1189 | ||
1190 | dev_dbg(musb->controller, "ep0 STATUS, csr %04x\n", csr); | 1190 | dev_dbg(musb->controller, "ep0 STATUS, csr %04x\n", csr); |
1191 | 1191 | ||
1192 | } | 1192 | } |
1193 | musb_writew(epio, MUSB_CSR0, csr); | 1193 | musb_writew(epio, MUSB_CSR0, csr); |
1194 | retval = IRQ_HANDLED; | 1194 | retval = IRQ_HANDLED; |
1195 | } else | 1195 | } else |
1196 | musb->ep0_stage = MUSB_EP0_IDLE; | 1196 | musb->ep0_stage = MUSB_EP0_IDLE; |
1197 | 1197 | ||
1198 | /* call completion handler if done */ | 1198 | /* call completion handler if done */ |
1199 | if (complete) | 1199 | if (complete) |
1200 | musb_advance_schedule(musb, urb, hw_ep, 1); | 1200 | musb_advance_schedule(musb, urb, hw_ep, 1); |
1201 | done: | 1201 | done: |
1202 | return retval; | 1202 | return retval; |
1203 | } | 1203 | } |
1204 | 1204 | ||
1205 | 1205 | ||
1206 | #ifdef CONFIG_USB_INVENTRA_DMA | 1206 | #ifdef CONFIG_USB_INVENTRA_DMA |
1207 | 1207 | ||
1208 | /* Host side TX (OUT) using Mentor DMA works as follows: | 1208 | /* Host side TX (OUT) using Mentor DMA works as follows: |
1209 | submit_urb -> | 1209 | submit_urb -> |
1210 | - if queue was empty, Program Endpoint | 1210 | - if queue was empty, Program Endpoint |
1211 | - ... which starts DMA to fifo in mode 1 or 0 | 1211 | - ... which starts DMA to fifo in mode 1 or 0 |
1212 | 1212 | ||
1213 | DMA Isr (transfer complete) -> TxAvail() | 1213 | DMA Isr (transfer complete) -> TxAvail() |
1214 | - Stop DMA (~DmaEnab) (<--- Alert ... currently happens | 1214 | - Stop DMA (~DmaEnab) (<--- Alert ... currently happens |
1215 | only in musb_cleanup_urb) | 1215 | only in musb_cleanup_urb) |
1216 | - TxPktRdy has to be set in mode 0 or for | 1216 | - TxPktRdy has to be set in mode 0 or for |
1217 | short packets in mode 1. | 1217 | short packets in mode 1. |
1218 | */ | 1218 | */ |
1219 | 1219 | ||
1220 | #endif | 1220 | #endif |
1221 | 1221 | ||
1222 | /* Service a Tx-Available or dma completion irq for the endpoint */ | 1222 | /* Service a Tx-Available or dma completion irq for the endpoint */ |
1223 | void musb_host_tx(struct musb *musb, u8 epnum) | 1223 | void musb_host_tx(struct musb *musb, u8 epnum) |
1224 | { | 1224 | { |
1225 | int pipe; | 1225 | int pipe; |
1226 | bool done = false; | 1226 | bool done = false; |
1227 | u16 tx_csr; | 1227 | u16 tx_csr; |
1228 | size_t length = 0; | 1228 | size_t length = 0; |
1229 | size_t offset = 0; | 1229 | size_t offset = 0; |
1230 | struct musb_hw_ep *hw_ep = musb->endpoints + epnum; | 1230 | struct musb_hw_ep *hw_ep = musb->endpoints + epnum; |
1231 | void __iomem *epio = hw_ep->regs; | 1231 | void __iomem *epio = hw_ep->regs; |
1232 | struct musb_qh *qh = hw_ep->out_qh; | 1232 | struct musb_qh *qh = hw_ep->out_qh; |
1233 | struct urb *urb = next_urb(qh); | 1233 | struct urb *urb = next_urb(qh); |
1234 | u32 status = 0; | 1234 | u32 status = 0; |
1235 | void __iomem *mbase = musb->mregs; | 1235 | void __iomem *mbase = musb->mregs; |
1236 | struct dma_channel *dma; | 1236 | struct dma_channel *dma; |
1237 | bool transfer_pending = false; | 1237 | bool transfer_pending = false; |
1238 | 1238 | ||
1239 | musb_ep_select(mbase, epnum); | 1239 | musb_ep_select(mbase, epnum); |
1240 | tx_csr = musb_readw(epio, MUSB_TXCSR); | 1240 | tx_csr = musb_readw(epio, MUSB_TXCSR); |
1241 | 1241 | ||
1242 | /* with CPPI, DMA sometimes triggers "extra" irqs */ | 1242 | /* with CPPI, DMA sometimes triggers "extra" irqs */ |
1243 | if (!urb) { | 1243 | if (!urb) { |
1244 | dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr); | 1244 | dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr); |
1245 | return; | 1245 | return; |
1246 | } | 1246 | } |
1247 | 1247 | ||
1248 | pipe = urb->pipe; | 1248 | pipe = urb->pipe; |
1249 | dma = is_dma_capable() ? hw_ep->tx_channel : NULL; | 1249 | dma = is_dma_capable() ? hw_ep->tx_channel : NULL; |
1250 | dev_dbg(musb->controller, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr, | 1250 | dev_dbg(musb->controller, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr, |
1251 | dma ? ", dma" : ""); | 1251 | dma ? ", dma" : ""); |
1252 | 1252 | ||
1253 | /* check for errors */ | 1253 | /* check for errors */ |
1254 | if (tx_csr & MUSB_TXCSR_H_RXSTALL) { | 1254 | if (tx_csr & MUSB_TXCSR_H_RXSTALL) { |
1255 | /* dma was disabled, fifo flushed */ | 1255 | /* dma was disabled, fifo flushed */ |
1256 | dev_dbg(musb->controller, "TX end %d stall\n", epnum); | 1256 | dev_dbg(musb->controller, "TX end %d stall\n", epnum); |
1257 | 1257 | ||
1258 | /* stall; record URB status */ | 1258 | /* stall; record URB status */ |
1259 | status = -EPIPE; | 1259 | status = -EPIPE; |
1260 | 1260 | ||
1261 | } else if (tx_csr & MUSB_TXCSR_H_ERROR) { | 1261 | } else if (tx_csr & MUSB_TXCSR_H_ERROR) { |
1262 | /* (NON-ISO) dma was disabled, fifo flushed */ | 1262 | /* (NON-ISO) dma was disabled, fifo flushed */ |
1263 | dev_dbg(musb->controller, "TX 3strikes on ep=%d\n", epnum); | 1263 | dev_dbg(musb->controller, "TX 3strikes on ep=%d\n", epnum); |
1264 | 1264 | ||
1265 | status = -ETIMEDOUT; | 1265 | status = -ETIMEDOUT; |
1266 | 1266 | ||
1267 | } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) { | 1267 | } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) { |
1268 | if (USB_ENDPOINT_XFER_BULK == qh->type && qh->mux == 1 | 1268 | if (USB_ENDPOINT_XFER_BULK == qh->type && qh->mux == 1 |
1269 | && !list_is_singular(&musb->out_bulk)) { | 1269 | && !list_is_singular(&musb->out_bulk)) { |
1270 | dev_dbg(musb->controller, | 1270 | dev_dbg(musb->controller, |
1271 | "NAK timeout on TX%d ep\n", epnum); | 1271 | "NAK timeout on TX%d ep\n", epnum); |
1272 | musb_bulk_nak_timeout(musb, hw_ep, 0); | 1272 | musb_bulk_nak_timeout(musb, hw_ep, 0); |
1273 | } else { | 1273 | } else { |
1274 | dev_dbg(musb->controller, | 1274 | dev_dbg(musb->controller, |
1275 | "TX end=%d device not responding\n", epnum); | 1275 | "TX end=%d device not responding\n", epnum); |
1276 | /* NOTE: this code path would be a good place to PAUSE a | 1276 | /* NOTE: this code path would be a good place to PAUSE a |
1277 | * transfer, if there's some other (nonperiodic) tx urb | 1277 | * transfer, if there's some other (nonperiodic) tx urb |
1278 | * that could use this fifo. (dma complicates it...) | 1278 | * that could use this fifo. (dma complicates it...) |
1279 | * That's already done for bulk RX transfers. | 1279 | * That's already done for bulk RX transfers. |
1280 | * | 1280 | * |
1281 | * if (bulk && qh->ring.next != &musb->out_bulk), then | 1281 | * if (bulk && qh->ring.next != &musb->out_bulk), then |
1282 | * we have a candidate... NAKing is *NOT* an error | 1282 | * we have a candidate... NAKing is *NOT* an error |
1283 | */ | 1283 | */ |
1284 | musb_ep_select(mbase, epnum); | 1284 | musb_ep_select(mbase, epnum); |
1285 | musb_writew(epio, MUSB_TXCSR, | 1285 | musb_writew(epio, MUSB_TXCSR, |
1286 | MUSB_TXCSR_H_WZC_BITS | 1286 | MUSB_TXCSR_H_WZC_BITS |
1287 | | MUSB_TXCSR_TXPKTRDY); | 1287 | | MUSB_TXCSR_TXPKTRDY); |
1288 | } | 1288 | } |
1289 | return; | 1289 | return; |
1290 | } | 1290 | } |
1291 | 1291 | ||
1292 | done: | 1292 | done: |
1293 | if (status) { | 1293 | if (status) { |
1294 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { | 1294 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { |
1295 | dma->status = MUSB_DMA_STATUS_CORE_ABORT; | 1295 | dma->status = MUSB_DMA_STATUS_CORE_ABORT; |
1296 | (void) musb->dma_controller->channel_abort(dma); | 1296 | (void) musb->dma_controller->channel_abort(dma); |
1297 | } | 1297 | } |
1298 | 1298 | ||
1299 | /* do the proper sequence to abort the transfer in the | 1299 | /* do the proper sequence to abort the transfer in the |
1300 | * usb core; the dma engine should already be stopped. | 1300 | * usb core; the dma engine should already be stopped. |
1301 | */ | 1301 | */ |
1302 | musb_h_tx_flush_fifo(hw_ep); | 1302 | musb_h_tx_flush_fifo(hw_ep); |
1303 | tx_csr &= ~(MUSB_TXCSR_AUTOSET | 1303 | tx_csr &= ~(MUSB_TXCSR_AUTOSET |
1304 | | MUSB_TXCSR_DMAENAB | 1304 | | MUSB_TXCSR_DMAENAB |
1305 | | MUSB_TXCSR_H_ERROR | 1305 | | MUSB_TXCSR_H_ERROR |
1306 | | MUSB_TXCSR_H_RXSTALL | 1306 | | MUSB_TXCSR_H_RXSTALL |
1307 | | MUSB_TXCSR_H_NAKTIMEOUT | 1307 | | MUSB_TXCSR_H_NAKTIMEOUT |
1308 | ); | 1308 | ); |
1309 | 1309 | ||
1310 | musb_ep_select(mbase, epnum); | 1310 | musb_ep_select(mbase, epnum); |
1311 | musb_writew(epio, MUSB_TXCSR, tx_csr); | 1311 | musb_writew(epio, MUSB_TXCSR, tx_csr); |
1312 | /* REVISIT may need to clear FLUSHFIFO ... */ | 1312 | /* REVISIT may need to clear FLUSHFIFO ... */ |
1313 | musb_writew(epio, MUSB_TXCSR, tx_csr); | 1313 | musb_writew(epio, MUSB_TXCSR, tx_csr); |
1314 | musb_writeb(epio, MUSB_TXINTERVAL, 0); | 1314 | musb_writeb(epio, MUSB_TXINTERVAL, 0); |
1315 | 1315 | ||
1316 | done = true; | 1316 | done = true; |
1317 | } | 1317 | } |
1318 | 1318 | ||
1319 | /* second cppi case */ | 1319 | /* second cppi case */ |
1320 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { | 1320 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { |
1321 | dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr); | 1321 | dev_dbg(musb->controller, "extra TX%d ready, csr %04x\n", epnum, tx_csr); |
1322 | return; | 1322 | return; |
1323 | } | 1323 | } |
1324 | 1324 | ||
1325 | if (is_dma_capable() && dma && !status) { | 1325 | if (is_dma_capable() && dma && !status) { |
1326 | /* | 1326 | /* |
1327 | * DMA has completed. But if we're using DMA mode 1 (multi | 1327 | * DMA has completed. But if we're using DMA mode 1 (multi |
1328 | * packet DMA), we need a terminal TXPKTRDY interrupt before | 1328 | * packet DMA), we need a terminal TXPKTRDY interrupt before |
1329 | * we can consider this transfer completed, lest we trash | 1329 | * we can consider this transfer completed, lest we trash |
1330 | * its last packet when writing the next URB's data. So we | 1330 | * its last packet when writing the next URB's data. So we |
1331 | * switch back to mode 0 to get that interrupt; we'll come | 1331 | * switch back to mode 0 to get that interrupt; we'll come |
1332 | * back here once it happens. | 1332 | * back here once it happens. |
1333 | */ | 1333 | */ |
1334 | if (tx_csr & MUSB_TXCSR_DMAMODE) { | 1334 | if (tx_csr & MUSB_TXCSR_DMAMODE) { |
1335 | /* | 1335 | /* |
1336 | * We shouldn't clear DMAMODE with DMAENAB set; so | 1336 | * We shouldn't clear DMAMODE with DMAENAB set; so |
1337 | * clear them in a safe order. That should be OK | 1337 | * clear them in a safe order. That should be OK |
1338 | * once TXPKTRDY has been set (and I've never seen | 1338 | * once TXPKTRDY has been set (and I've never seen |
1339 | * it being 0 at this moment -- DMA interrupt latency | 1339 | * it being 0 at this moment -- DMA interrupt latency |
1340 | * is significant) but if it hasn't been then we have | 1340 | * is significant) but if it hasn't been then we have |
1341 | * no choice but to stop being polite and ignore the | 1341 | * no choice but to stop being polite and ignore the |
1342 | * programmer's guide... :-) | 1342 | * programmer's guide... :-) |
1343 | * | 1343 | * |
1344 | * Note that we must write TXCSR with TXPKTRDY cleared | 1344 | * Note that we must write TXCSR with TXPKTRDY cleared |
1345 | * in order not to re-trigger the packet send (this bit | 1345 | * in order not to re-trigger the packet send (this bit |
1346 | * can't be cleared by CPU), and there's another caveat: | 1346 | * can't be cleared by CPU), and there's another caveat: |
1347 | * TXPKTRDY may be set shortly and then cleared in the | 1347 | * TXPKTRDY may be set shortly and then cleared in the |
1348 | * double-buffered FIFO mode, so we do an extra TXCSR | 1348 | * double-buffered FIFO mode, so we do an extra TXCSR |
1349 | * read for debouncing... | 1349 | * read for debouncing... |
1350 | */ | 1350 | */ |
1351 | tx_csr &= musb_readw(epio, MUSB_TXCSR); | 1351 | tx_csr &= musb_readw(epio, MUSB_TXCSR); |
1352 | if (tx_csr & MUSB_TXCSR_TXPKTRDY) { | 1352 | if (tx_csr & MUSB_TXCSR_TXPKTRDY) { |
1353 | tx_csr &= ~(MUSB_TXCSR_DMAENAB | | 1353 | tx_csr &= ~(MUSB_TXCSR_DMAENAB | |
1354 | MUSB_TXCSR_TXPKTRDY); | 1354 | MUSB_TXCSR_TXPKTRDY); |
1355 | musb_writew(epio, MUSB_TXCSR, | 1355 | musb_writew(epio, MUSB_TXCSR, |
1356 | tx_csr | MUSB_TXCSR_H_WZC_BITS); | 1356 | tx_csr | MUSB_TXCSR_H_WZC_BITS); |
1357 | } | 1357 | } |
1358 | tx_csr &= ~(MUSB_TXCSR_DMAMODE | | 1358 | tx_csr &= ~(MUSB_TXCSR_DMAMODE | |
1359 | MUSB_TXCSR_TXPKTRDY); | 1359 | MUSB_TXCSR_TXPKTRDY); |
1360 | musb_writew(epio, MUSB_TXCSR, | 1360 | musb_writew(epio, MUSB_TXCSR, |
1361 | tx_csr | MUSB_TXCSR_H_WZC_BITS); | 1361 | tx_csr | MUSB_TXCSR_H_WZC_BITS); |
1362 | 1362 | ||
1363 | /* | 1363 | /* |
1364 | * There is no guarantee that we'll get an interrupt | 1364 | * There is no guarantee that we'll get an interrupt |
1365 | * after clearing DMAMODE as we might have done this | 1365 | * after clearing DMAMODE as we might have done this |
1366 | * too late (after TXPKTRDY was cleared by controller). | 1366 | * too late (after TXPKTRDY was cleared by controller). |
1367 | * Re-read TXCSR as we have spoiled its previous value. | 1367 | * Re-read TXCSR as we have spoiled its previous value. |
1368 | */ | 1368 | */ |
1369 | tx_csr = musb_readw(epio, MUSB_TXCSR); | 1369 | tx_csr = musb_readw(epio, MUSB_TXCSR); |
1370 | } | 1370 | } |
1371 | 1371 | ||
1372 | /* | 1372 | /* |
1373 | * We may get here from a DMA completion or TXPKTRDY interrupt. | 1373 | * We may get here from a DMA completion or TXPKTRDY interrupt. |
1374 | * In any case, we must check the FIFO status here and bail out | 1374 | * In any case, we must check the FIFO status here and bail out |
1375 | * only if the FIFO still has data -- that should prevent the | 1375 | * only if the FIFO still has data -- that should prevent the |
1376 | * "missed" TXPKTRDY interrupts and deal with double-buffered | 1376 | * "missed" TXPKTRDY interrupts and deal with double-buffered |
1377 | * FIFO mode too... | 1377 | * FIFO mode too... |
1378 | */ | 1378 | */ |
1379 | if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) { | 1379 | if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) { |
1380 | dev_dbg(musb->controller, "DMA complete but packet still in FIFO, " | 1380 | dev_dbg(musb->controller, "DMA complete but packet still in FIFO, " |
1381 | "CSR %04x\n", tx_csr); | 1381 | "CSR %04x\n", tx_csr); |
1382 | return; | 1382 | return; |
1383 | } | 1383 | } |
1384 | } | 1384 | } |
1385 | 1385 | ||
1386 | if (!status || dma || usb_pipeisoc(pipe)) { | 1386 | if (!status || dma || usb_pipeisoc(pipe)) { |
1387 | if (dma) | 1387 | if (dma) |
1388 | length = dma->actual_len; | 1388 | length = dma->actual_len; |
1389 | else | 1389 | else |
1390 | length = qh->segsize; | 1390 | length = qh->segsize; |
1391 | qh->offset += length; | 1391 | qh->offset += length; |
1392 | 1392 | ||
1393 | if (usb_pipeisoc(pipe)) { | 1393 | if (usb_pipeisoc(pipe)) { |
1394 | struct usb_iso_packet_descriptor *d; | 1394 | struct usb_iso_packet_descriptor *d; |
1395 | 1395 | ||
1396 | d = urb->iso_frame_desc + qh->iso_idx; | 1396 | d = urb->iso_frame_desc + qh->iso_idx; |
1397 | d->actual_length = length; | 1397 | d->actual_length = length; |
1398 | d->status = status; | 1398 | d->status = status; |
1399 | if (++qh->iso_idx >= urb->number_of_packets) { | 1399 | if (++qh->iso_idx >= urb->number_of_packets) { |
1400 | done = true; | 1400 | done = true; |
1401 | } else { | 1401 | } else { |
1402 | d++; | 1402 | d++; |
1403 | offset = d->offset; | 1403 | offset = d->offset; |
1404 | length = d->length; | 1404 | length = d->length; |
1405 | } | 1405 | } |
1406 | } else if (dma && urb->transfer_buffer_length == qh->offset) { | 1406 | } else if (dma && urb->transfer_buffer_length == qh->offset) { |
1407 | done = true; | 1407 | done = true; |
1408 | } else { | 1408 | } else { |
1409 | /* see if we need to send more data, or ZLP */ | 1409 | /* see if we need to send more data, or ZLP */ |
1410 | if (qh->segsize < qh->maxpacket) | 1410 | if (qh->segsize < qh->maxpacket) |
1411 | done = true; | 1411 | done = true; |
1412 | else if (qh->offset == urb->transfer_buffer_length | 1412 | else if (qh->offset == urb->transfer_buffer_length |
1413 | && !(urb->transfer_flags | 1413 | && !(urb->transfer_flags |
1414 | & URB_ZERO_PACKET)) | 1414 | & URB_ZERO_PACKET)) |
1415 | done = true; | 1415 | done = true; |
1416 | if (!done) { | 1416 | if (!done) { |
1417 | offset = qh->offset; | 1417 | offset = qh->offset; |
1418 | length = urb->transfer_buffer_length - offset; | 1418 | length = urb->transfer_buffer_length - offset; |
1419 | transfer_pending = true; | 1419 | transfer_pending = true; |
1420 | } | 1420 | } |
1421 | } | 1421 | } |
1422 | } | 1422 | } |
1423 | 1423 | ||
1424 | /* urb->status != -EINPROGRESS means request has been faulted, | 1424 | /* urb->status != -EINPROGRESS means request has been faulted, |
1425 | * so we must abort this transfer after cleanup | 1425 | * so we must abort this transfer after cleanup |
1426 | */ | 1426 | */ |
1427 | if (urb->status != -EINPROGRESS) { | 1427 | if (urb->status != -EINPROGRESS) { |
1428 | done = true; | 1428 | done = true; |
1429 | if (status == 0) | 1429 | if (status == 0) |
1430 | status = urb->status; | 1430 | status = urb->status; |
1431 | } | 1431 | } |
1432 | 1432 | ||
1433 | if (done) { | 1433 | if (done) { |
1434 | /* set status */ | 1434 | /* set status */ |
1435 | urb->status = status; | 1435 | urb->status = status; |
1436 | urb->actual_length = qh->offset; | 1436 | urb->actual_length = qh->offset; |
1437 | musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT); | 1437 | musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT); |
1438 | return; | 1438 | return; |
1439 | } else if ((usb_pipeisoc(pipe) || transfer_pending) && dma) { | 1439 | } else if ((usb_pipeisoc(pipe) || transfer_pending) && dma) { |
1440 | if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb, | 1440 | if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb, |
1441 | offset, length)) { | 1441 | offset, length)) { |
1442 | if (is_cppi_enabled() || tusb_dma_omap()) | 1442 | if (is_cppi_enabled() || tusb_dma_omap()) |
1443 | musb_h_tx_dma_start(hw_ep); | 1443 | musb_h_tx_dma_start(hw_ep); |
1444 | return; | 1444 | return; |
1445 | } | 1445 | } |
1446 | } else if (tx_csr & MUSB_TXCSR_DMAENAB) { | 1446 | } else if (tx_csr & MUSB_TXCSR_DMAENAB) { |
1447 | dev_dbg(musb->controller, "not complete, but DMA enabled?\n"); | 1447 | dev_dbg(musb->controller, "not complete, but DMA enabled?\n"); |
1448 | return; | 1448 | return; |
1449 | } | 1449 | } |
1450 | 1450 | ||
1451 | /* | 1451 | /* |
1452 | * PIO: start next packet in this URB. | 1452 | * PIO: start next packet in this URB. |
1453 | * | 1453 | * |
1454 | * REVISIT: some docs say that when hw_ep->tx_double_buffered, | 1454 | * REVISIT: some docs say that when hw_ep->tx_double_buffered, |
1455 | * (and presumably, FIFO is not half-full) we should write *two* | 1455 | * (and presumably, FIFO is not half-full) we should write *two* |
1456 | * packets before updating TXCSR; other docs disagree... | 1456 | * packets before updating TXCSR; other docs disagree... |
1457 | */ | 1457 | */ |
1458 | if (length > qh->maxpacket) | 1458 | if (length > qh->maxpacket) |
1459 | length = qh->maxpacket; | 1459 | length = qh->maxpacket; |
1460 | /* Unmap the buffer so that CPU can use it */ | 1460 | /* Unmap the buffer so that CPU can use it */ |
1461 | usb_hcd_unmap_urb_for_dma(musb->hcd, urb); | 1461 | usb_hcd_unmap_urb_for_dma(musb->hcd, urb); |
1462 | 1462 | ||
1463 | /* | 1463 | /* |
1464 | * We need to map sg if the transfer_buffer is | 1464 | * We need to map sg if the transfer_buffer is |
1465 | * NULL. | 1465 | * NULL. |
1466 | */ | 1466 | */ |
1467 | if (!urb->transfer_buffer) | 1467 | if (!urb->transfer_buffer) |
1468 | qh->use_sg = true; | 1468 | qh->use_sg = true; |
1469 | 1469 | ||
1470 | if (qh->use_sg) { | 1470 | if (qh->use_sg) { |
1471 | /* sg_miter_start is already done in musb_ep_program */ | 1471 | /* sg_miter_start is already done in musb_ep_program */ |
1472 | if (!sg_miter_next(&qh->sg_miter)) { | 1472 | if (!sg_miter_next(&qh->sg_miter)) { |
1473 | dev_err(musb->controller, "error: sg list empty\n"); | 1473 | dev_err(musb->controller, "error: sg list empty\n"); |
1474 | sg_miter_stop(&qh->sg_miter); | 1474 | sg_miter_stop(&qh->sg_miter); |
1475 | status = -EINVAL; | 1475 | status = -EINVAL; |
1476 | goto done; | 1476 | goto done; |
1477 | } | 1477 | } |
1478 | urb->transfer_buffer = qh->sg_miter.addr; | 1478 | urb->transfer_buffer = qh->sg_miter.addr; |
1479 | length = min_t(u32, length, qh->sg_miter.length); | 1479 | length = min_t(u32, length, qh->sg_miter.length); |
1480 | musb_write_fifo(hw_ep, length, urb->transfer_buffer); | 1480 | musb_write_fifo(hw_ep, length, urb->transfer_buffer); |
1481 | qh->sg_miter.consumed = length; | 1481 | qh->sg_miter.consumed = length; |
1482 | sg_miter_stop(&qh->sg_miter); | 1482 | sg_miter_stop(&qh->sg_miter); |
1483 | } else { | 1483 | } else { |
1484 | musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset); | 1484 | musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset); |
1485 | } | 1485 | } |
1486 | 1486 | ||
1487 | qh->segsize = length; | 1487 | qh->segsize = length; |
1488 | 1488 | ||
1489 | if (qh->use_sg) { | 1489 | if (qh->use_sg) { |
1490 | if (offset + length >= urb->transfer_buffer_length) | 1490 | if (offset + length >= urb->transfer_buffer_length) |
1491 | qh->use_sg = false; | 1491 | qh->use_sg = false; |
1492 | } | 1492 | } |
1493 | 1493 | ||
1494 | musb_ep_select(mbase, epnum); | 1494 | musb_ep_select(mbase, epnum); |
1495 | musb_writew(epio, MUSB_TXCSR, | 1495 | musb_writew(epio, MUSB_TXCSR, |
1496 | MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY); | 1496 | MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY); |
1497 | } | 1497 | } |
1498 | 1498 | ||
1499 | 1499 | ||
1500 | #ifdef CONFIG_USB_INVENTRA_DMA | 1500 | #ifdef CONFIG_USB_INVENTRA_DMA |
1501 | 1501 | ||
1502 | /* Host side RX (IN) using Mentor DMA works as follows: | 1502 | /* Host side RX (IN) using Mentor DMA works as follows: |
1503 | submit_urb -> | 1503 | submit_urb -> |
1504 | - if queue was empty, ProgramEndpoint | 1504 | - if queue was empty, ProgramEndpoint |
1505 | - first IN token is sent out (by setting ReqPkt) | 1505 | - first IN token is sent out (by setting ReqPkt) |
1506 | LinuxIsr -> RxReady() | 1506 | LinuxIsr -> RxReady() |
1507 | /\ => first packet is received | 1507 | /\ => first packet is received |
1508 | | - Set in mode 0 (DmaEnab, ~ReqPkt) | 1508 | | - Set in mode 0 (DmaEnab, ~ReqPkt) |
1509 | | -> DMA Isr (transfer complete) -> RxReady() | 1509 | | -> DMA Isr (transfer complete) -> RxReady() |
1510 | | - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab) | 1510 | | - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab) |
1511 | | - if urb not complete, send next IN token (ReqPkt) | 1511 | | - if urb not complete, send next IN token (ReqPkt) |
1512 | | | else complete urb. | 1512 | | | else complete urb. |
1513 | | | | 1513 | | | |
1514 | --------------------------- | 1514 | --------------------------- |
1515 | * | 1515 | * |
1516 | * Nuances of mode 1: | 1516 | * Nuances of mode 1: |
1517 | * For short packets, no ack (+RxPktRdy) is sent automatically | 1517 | * For short packets, no ack (+RxPktRdy) is sent automatically |
1518 | * (even if AutoClear is ON) | 1518 | * (even if AutoClear is ON) |
1519 | * For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent | 1519 | * For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent |
1520 | * automatically => major problem, as collecting the next packet becomes | 1520 | * automatically => major problem, as collecting the next packet becomes |
1521 | * difficult. Hence mode 1 is not used. | 1521 | * difficult. Hence mode 1 is not used. |
1522 | * | 1522 | * |
1523 | * REVISIT | 1523 | * REVISIT |
1524 | * All we care about at this driver level is that | 1524 | * All we care about at this driver level is that |
1525 | * (a) all URBs terminate with REQPKT cleared and fifo(s) empty; | 1525 | * (a) all URBs terminate with REQPKT cleared and fifo(s) empty; |
1526 | * (b) termination conditions are: short RX, or buffer full; | 1526 | * (b) termination conditions are: short RX, or buffer full; |
1527 | * (c) fault modes include | 1527 | * (c) fault modes include |
1528 | * - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO. | 1528 | * - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO. |
1529 | * (and that endpoint's dma queue stops immediately) | 1529 | * (and that endpoint's dma queue stops immediately) |
1530 | * - overflow (full, PLUS more bytes in the terminal packet) | 1530 | * - overflow (full, PLUS more bytes in the terminal packet) |
1531 | * | 1531 | * |
1532 | * So for example, usb-storage sets URB_SHORT_NOT_OK, and would | 1532 | * So for example, usb-storage sets URB_SHORT_NOT_OK, and would |
1533 | * thus be a great candidate for using mode 1 ... for all but the | 1533 | * thus be a great candidate for using mode 1 ... for all but the |
1534 | * last packet of one URB's transfer. | 1534 | * last packet of one URB's transfer. |
1535 | */ | 1535 | */ |
1536 | 1536 | ||
1537 | #endif | 1537 | #endif |
1538 | 1538 | ||
1539 | /* | 1539 | /* |
1540 | * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso, | 1540 | * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso, |
1541 | * and high-bandwidth IN transfer cases. | 1541 | * and high-bandwidth IN transfer cases. |
1542 | */ | 1542 | */ |
1543 | void musb_host_rx(struct musb *musb, u8 epnum) | 1543 | void musb_host_rx(struct musb *musb, u8 epnum) |
1544 | { | 1544 | { |
1545 | struct urb *urb; | 1545 | struct urb *urb; |
1546 | struct musb_hw_ep *hw_ep = musb->endpoints + epnum; | 1546 | struct musb_hw_ep *hw_ep = musb->endpoints + epnum; |
1547 | void __iomem *epio = hw_ep->regs; | 1547 | void __iomem *epio = hw_ep->regs; |
1548 | struct musb_qh *qh = hw_ep->in_qh; | 1548 | struct musb_qh *qh = hw_ep->in_qh; |
1549 | size_t xfer_len; | 1549 | size_t xfer_len; |
1550 | void __iomem *mbase = musb->mregs; | 1550 | void __iomem *mbase = musb->mregs; |
1551 | int pipe; | 1551 | int pipe; |
1552 | u16 rx_csr, val; | 1552 | u16 rx_csr, val; |
1553 | bool iso_err = false; | 1553 | bool iso_err = false; |
1554 | bool done = false; | 1554 | bool done = false; |
1555 | u32 status; | 1555 | u32 status; |
1556 | struct dma_channel *dma; | 1556 | struct dma_channel *dma; |
1557 | unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG; | 1557 | unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG; |
1558 | 1558 | ||
1559 | musb_ep_select(mbase, epnum); | 1559 | musb_ep_select(mbase, epnum); |
1560 | 1560 | ||
1561 | urb = next_urb(qh); | 1561 | urb = next_urb(qh); |
1562 | dma = is_dma_capable() ? hw_ep->rx_channel : NULL; | 1562 | dma = is_dma_capable() ? hw_ep->rx_channel : NULL; |
1563 | status = 0; | 1563 | status = 0; |
1564 | xfer_len = 0; | 1564 | xfer_len = 0; |
1565 | 1565 | ||
1566 | rx_csr = musb_readw(epio, MUSB_RXCSR); | 1566 | rx_csr = musb_readw(epio, MUSB_RXCSR); |
1567 | val = rx_csr; | 1567 | val = rx_csr; |
1568 | 1568 | ||
1569 | if (unlikely(!urb)) { | 1569 | if (unlikely(!urb)) { |
1570 | /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least | 1570 | /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least |
1571 | * usbtest #11 (unlinks) triggers it regularly, sometimes | 1571 | * usbtest #11 (unlinks) triggers it regularly, sometimes |
1572 | * with fifo full. (Only with DMA??) | 1572 | * with fifo full. (Only with DMA??) |
1573 | */ | 1573 | */ |
1574 | dev_dbg(musb->controller, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val, | 1574 | dev_dbg(musb->controller, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val, |
1575 | musb_readw(epio, MUSB_RXCOUNT)); | 1575 | musb_readw(epio, MUSB_RXCOUNT)); |
1576 | musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG); | 1576 | musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG); |
1577 | return; | 1577 | return; |
1578 | } | 1578 | } |
1579 | 1579 | ||
1580 | pipe = urb->pipe; | 1580 | pipe = urb->pipe; |
1581 | 1581 | ||
1582 | dev_dbg(musb->controller, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n", | 1582 | dev_dbg(musb->controller, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n", |
1583 | epnum, rx_csr, urb->actual_length, | 1583 | epnum, rx_csr, urb->actual_length, |
1584 | dma ? dma->actual_len : 0); | 1584 | dma ? dma->actual_len : 0); |
1585 | 1585 | ||
1586 | /* check for errors, concurrent stall & unlink is not really | 1586 | /* check for errors, concurrent stall & unlink is not really |
1587 | * handled yet! */ | 1587 | * handled yet! */ |
1588 | if (rx_csr & MUSB_RXCSR_H_RXSTALL) { | 1588 | if (rx_csr & MUSB_RXCSR_H_RXSTALL) { |
1589 | dev_dbg(musb->controller, "RX end %d STALL\n", epnum); | 1589 | dev_dbg(musb->controller, "RX end %d STALL\n", epnum); |
1590 | 1590 | ||
1591 | /* stall; record URB status */ | 1591 | /* stall; record URB status */ |
1592 | status = -EPIPE; | 1592 | status = -EPIPE; |
1593 | 1593 | ||
1594 | } else if (rx_csr & MUSB_RXCSR_H_ERROR) { | 1594 | } else if (rx_csr & MUSB_RXCSR_H_ERROR) { |
1595 | dev_dbg(musb->controller, "end %d RX proto error\n", epnum); | 1595 | dev_dbg(musb->controller, "end %d RX proto error\n", epnum); |
1596 | 1596 | ||
1597 | status = -EPROTO; | 1597 | status = -EPROTO; |
1598 | musb_writeb(epio, MUSB_RXINTERVAL, 0); | 1598 | musb_writeb(epio, MUSB_RXINTERVAL, 0); |
1599 | 1599 | ||
1600 | } else if (rx_csr & MUSB_RXCSR_DATAERROR) { | 1600 | } else if (rx_csr & MUSB_RXCSR_DATAERROR) { |
1601 | 1601 | ||
1602 | if (USB_ENDPOINT_XFER_ISOC != qh->type) { | 1602 | if (USB_ENDPOINT_XFER_ISOC != qh->type) { |
1603 | dev_dbg(musb->controller, "RX end %d NAK timeout\n", epnum); | 1603 | dev_dbg(musb->controller, "RX end %d NAK timeout\n", epnum); |
1604 | 1604 | ||
1605 | /* NOTE: NAKing is *NOT* an error, so we want to | 1605 | /* NOTE: NAKing is *NOT* an error, so we want to |
1606 | * continue. Except ... if there's a request for | 1606 | * continue. Except ... if there's a request for |
1607 | * another QH, use that instead of starving it. | 1607 | * another QH, use that instead of starving it. |
1608 | * | 1608 | * |
1609 | * Devices like Ethernet and serial adapters keep | 1609 | * Devices like Ethernet and serial adapters keep |
1610 | * reads posted at all times, which will starve | 1610 | * reads posted at all times, which will starve |
1611 | * other devices without this logic. | 1611 | * other devices without this logic. |
1612 | */ | 1612 | */ |
1613 | if (usb_pipebulk(urb->pipe) | 1613 | if (usb_pipebulk(urb->pipe) |
1614 | && qh->mux == 1 | 1614 | && qh->mux == 1 |
1615 | && !list_is_singular(&musb->in_bulk)) { | 1615 | && !list_is_singular(&musb->in_bulk)) { |
1616 | musb_bulk_nak_timeout(musb, hw_ep, 1); | 1616 | musb_bulk_nak_timeout(musb, hw_ep, 1); |
1617 | return; | 1617 | return; |
1618 | } | 1618 | } |
1619 | musb_ep_select(mbase, epnum); | 1619 | musb_ep_select(mbase, epnum); |
1620 | rx_csr |= MUSB_RXCSR_H_WZC_BITS; | 1620 | rx_csr |= MUSB_RXCSR_H_WZC_BITS; |
1621 | rx_csr &= ~MUSB_RXCSR_DATAERROR; | 1621 | rx_csr &= ~MUSB_RXCSR_DATAERROR; |
1622 | musb_writew(epio, MUSB_RXCSR, rx_csr); | 1622 | musb_writew(epio, MUSB_RXCSR, rx_csr); |
1623 | 1623 | ||
1624 | goto finish; | 1624 | goto finish; |
1625 | } else { | 1625 | } else { |
1626 | dev_dbg(musb->controller, "RX end %d ISO data error\n", epnum); | 1626 | dev_dbg(musb->controller, "RX end %d ISO data error\n", epnum); |
1627 | /* packet error reported later */ | 1627 | /* packet error reported later */ |
1628 | iso_err = true; | 1628 | iso_err = true; |
1629 | } | 1629 | } |
1630 | } else if (rx_csr & MUSB_RXCSR_INCOMPRX) { | 1630 | } else if (rx_csr & MUSB_RXCSR_INCOMPRX) { |
1631 | dev_dbg(musb->controller, "end %d high bandwidth incomplete ISO packet RX\n", | 1631 | dev_dbg(musb->controller, "end %d high bandwidth incomplete ISO packet RX\n", |
1632 | epnum); | 1632 | epnum); |
1633 | status = -EPROTO; | 1633 | status = -EPROTO; |
1634 | } | 1634 | } |
1635 | 1635 | ||
1636 | /* faults abort the transfer */ | 1636 | /* faults abort the transfer */ |
1637 | if (status) { | 1637 | if (status) { |
1638 | /* clean up dma and collect transfer count */ | 1638 | /* clean up dma and collect transfer count */ |
1639 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { | 1639 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { |
1640 | dma->status = MUSB_DMA_STATUS_CORE_ABORT; | 1640 | dma->status = MUSB_DMA_STATUS_CORE_ABORT; |
1641 | (void) musb->dma_controller->channel_abort(dma); | 1641 | (void) musb->dma_controller->channel_abort(dma); |
1642 | xfer_len = dma->actual_len; | 1642 | xfer_len = dma->actual_len; |
1643 | } | 1643 | } |
1644 | musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG); | 1644 | musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG); |
1645 | musb_writeb(epio, MUSB_RXINTERVAL, 0); | 1645 | musb_writeb(epio, MUSB_RXINTERVAL, 0); |
1646 | done = true; | 1646 | done = true; |
1647 | goto finish; | 1647 | goto finish; |
1648 | } | 1648 | } |
1649 | 1649 | ||
1650 | if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) { | 1650 | if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) { |
1651 | /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */ | 1651 | /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */ |
1652 | ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr); | 1652 | ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr); |
1653 | goto finish; | 1653 | goto finish; |
1654 | } | 1654 | } |
1655 | 1655 | ||
1656 | /* thorough shutdown for now ... given more precise fault handling | 1656 | /* thorough shutdown for now ... given more precise fault handling |
1657 | * and better queueing support, we might keep a DMA pipeline going | 1657 | * and better queueing support, we might keep a DMA pipeline going |
1658 | * while processing this irq for earlier completions. | 1658 | * while processing this irq for earlier completions. |
1659 | */ | 1659 | */ |
1660 | 1660 | ||
1661 | /* FIXME this is _way_ too much in-line logic for Mentor DMA */ | 1661 | /* FIXME this is _way_ too much in-line logic for Mentor DMA */ |
1662 | 1662 | ||
1663 | #if !defined(CONFIG_USB_INVENTRA_DMA) && !defined(CONFIG_USB_UX500_DMA) | 1663 | #if !defined(CONFIG_USB_INVENTRA_DMA) && !defined(CONFIG_USB_UX500_DMA) |
1664 | if (rx_csr & MUSB_RXCSR_H_REQPKT) { | 1664 | if (rx_csr & MUSB_RXCSR_H_REQPKT) { |
1665 | /* REVISIT this happened for a while on some short reads... | 1665 | /* REVISIT this happened for a while on some short reads... |
1666 | * the cleanup still needs investigation... looks bad... | 1666 | * the cleanup still needs investigation... looks bad... |
1667 | * and also duplicates dma cleanup code above ... plus, | 1667 | * and also duplicates dma cleanup code above ... plus, |
1668 | * shouldn't this be the "half full" double buffer case? | 1668 | * shouldn't this be the "half full" double buffer case? |
1669 | */ | 1669 | */ |
1670 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { | 1670 | if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { |
1671 | dma->status = MUSB_DMA_STATUS_CORE_ABORT; | 1671 | dma->status = MUSB_DMA_STATUS_CORE_ABORT; |
1672 | (void) musb->dma_controller->channel_abort(dma); | 1672 | (void) musb->dma_controller->channel_abort(dma); |
1673 | xfer_len = dma->actual_len; | 1673 | xfer_len = dma->actual_len; |
1674 | done = true; | 1674 | done = true; |
1675 | } | 1675 | } |
1676 | 1676 | ||
1677 | dev_dbg(musb->controller, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr, | 1677 | dev_dbg(musb->controller, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr, |
1678 | xfer_len, dma ? ", dma" : ""); | 1678 | xfer_len, dma ? ", dma" : ""); |
1679 | rx_csr &= ~MUSB_RXCSR_H_REQPKT; | 1679 | rx_csr &= ~MUSB_RXCSR_H_REQPKT; |
1680 | 1680 | ||
1681 | musb_ep_select(mbase, epnum); | 1681 | musb_ep_select(mbase, epnum); |
1682 | musb_writew(epio, MUSB_RXCSR, | 1682 | musb_writew(epio, MUSB_RXCSR, |
1683 | MUSB_RXCSR_H_WZC_BITS | rx_csr); | 1683 | MUSB_RXCSR_H_WZC_BITS | rx_csr); |
1684 | } | 1684 | } |
1685 | #endif | 1685 | #endif |
1686 | if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) { | 1686 | if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) { |
1687 | xfer_len = dma->actual_len; | 1687 | xfer_len = dma->actual_len; |
1688 | 1688 | ||
1689 | val &= ~(MUSB_RXCSR_DMAENAB | 1689 | val &= ~(MUSB_RXCSR_DMAENAB |
1690 | | MUSB_RXCSR_H_AUTOREQ | 1690 | | MUSB_RXCSR_H_AUTOREQ |
1691 | | MUSB_RXCSR_AUTOCLEAR | 1691 | | MUSB_RXCSR_AUTOCLEAR |
1692 | | MUSB_RXCSR_RXPKTRDY); | 1692 | | MUSB_RXCSR_RXPKTRDY); |
1693 | musb_writew(hw_ep->regs, MUSB_RXCSR, val); | 1693 | musb_writew(hw_ep->regs, MUSB_RXCSR, val); |
1694 | 1694 | ||
1695 | #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) | 1695 | #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) |
1696 | if (usb_pipeisoc(pipe)) { | 1696 | if (usb_pipeisoc(pipe)) { |
1697 | struct usb_iso_packet_descriptor *d; | 1697 | struct usb_iso_packet_descriptor *d; |
1698 | 1698 | ||
1699 | d = urb->iso_frame_desc + qh->iso_idx; | 1699 | d = urb->iso_frame_desc + qh->iso_idx; |
1700 | d->actual_length = xfer_len; | 1700 | d->actual_length = xfer_len; |
1701 | 1701 | ||
1702 | /* even if there was an error, we did the dma | 1702 | /* even if there was an error, we did the dma |
1703 | * for iso_frame_desc->length | 1703 | * for iso_frame_desc->length |
1704 | */ | 1704 | */ |
1705 | if (d->status != -EILSEQ && d->status != -EOVERFLOW) | 1705 | if (d->status != -EILSEQ && d->status != -EOVERFLOW) |
1706 | d->status = 0; | 1706 | d->status = 0; |
1707 | 1707 | ||
1708 | if (++qh->iso_idx >= urb->number_of_packets) | 1708 | if (++qh->iso_idx >= urb->number_of_packets) |
1709 | done = true; | 1709 | done = true; |
1710 | else | 1710 | else |
1711 | done = false; | 1711 | done = false; |
1712 | 1712 | ||
1713 | } else { | 1713 | } else { |
1714 | /* done if urb buffer is full or short packet is recd */ | 1714 | /* done if urb buffer is full or short packet is recd */ |
1715 | done = (urb->actual_length + xfer_len >= | 1715 | done = (urb->actual_length + xfer_len >= |
1716 | urb->transfer_buffer_length | 1716 | urb->transfer_buffer_length |
1717 | || dma->actual_len < qh->maxpacket); | 1717 | || dma->actual_len < qh->maxpacket); |
1718 | } | 1718 | } |
1719 | 1719 | ||
1720 | /* send IN token for next packet, without AUTOREQ */ | 1720 | /* send IN token for next packet, without AUTOREQ */ |
1721 | if (!done) { | 1721 | if (!done) { |
1722 | val |= MUSB_RXCSR_H_REQPKT; | 1722 | val |= MUSB_RXCSR_H_REQPKT; |
1723 | musb_writew(epio, MUSB_RXCSR, | 1723 | musb_writew(epio, MUSB_RXCSR, |
1724 | MUSB_RXCSR_H_WZC_BITS | val); | 1724 | MUSB_RXCSR_H_WZC_BITS | val); |
1725 | } | 1725 | } |
1726 | 1726 | ||
1727 | dev_dbg(musb->controller, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum, | 1727 | dev_dbg(musb->controller, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum, |
1728 | done ? "off" : "reset", | 1728 | done ? "off" : "reset", |
1729 | musb_readw(epio, MUSB_RXCSR), | 1729 | musb_readw(epio, MUSB_RXCSR), |
1730 | musb_readw(epio, MUSB_RXCOUNT)); | 1730 | musb_readw(epio, MUSB_RXCOUNT)); |
1731 | #else | 1731 | #else |
1732 | done = true; | 1732 | done = true; |
1733 | #endif | 1733 | #endif |
1734 | } else if (urb->status == -EINPROGRESS) { | 1734 | } else if (urb->status == -EINPROGRESS) { |
1735 | /* if no errors, be sure a packet is ready for unloading */ | 1735 | /* if no errors, be sure a packet is ready for unloading */ |
1736 | if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) { | 1736 | if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) { |
1737 | status = -EPROTO; | 1737 | status = -EPROTO; |
1738 | ERR("Rx interrupt with no errors or packet!\n"); | 1738 | ERR("Rx interrupt with no errors or packet!\n"); |
1739 | 1739 | ||
1740 | /* FIXME this is another "SHOULD NEVER HAPPEN" */ | 1740 | /* FIXME this is another "SHOULD NEVER HAPPEN" */ |
1741 | 1741 | ||
1742 | /* SCRUB (RX) */ | 1742 | /* SCRUB (RX) */ |
1743 | /* do the proper sequence to abort the transfer */ | 1743 | /* do the proper sequence to abort the transfer */ |
1744 | musb_ep_select(mbase, epnum); | 1744 | musb_ep_select(mbase, epnum); |
1745 | val &= ~MUSB_RXCSR_H_REQPKT; | 1745 | val &= ~MUSB_RXCSR_H_REQPKT; |
1746 | musb_writew(epio, MUSB_RXCSR, val); | 1746 | musb_writew(epio, MUSB_RXCSR, val); |
1747 | goto finish; | 1747 | goto finish; |
1748 | } | 1748 | } |
1749 | 1749 | ||
1750 | /* we are expecting IN packets */ | 1750 | /* we are expecting IN packets */ |
1751 | #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) | 1751 | #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) |
1752 | if (dma) { | 1752 | if (dma) { |
1753 | struct dma_controller *c; | 1753 | struct dma_controller *c; |
1754 | u16 rx_count; | 1754 | u16 rx_count; |
1755 | int ret, length; | 1755 | int ret, length; |
1756 | dma_addr_t buf; | 1756 | dma_addr_t buf; |
1757 | 1757 | ||
1758 | rx_count = musb_readw(epio, MUSB_RXCOUNT); | 1758 | rx_count = musb_readw(epio, MUSB_RXCOUNT); |
1759 | 1759 | ||
1760 | dev_dbg(musb->controller, "RX%d count %d, buffer 0x%llx len %d/%d\n", | 1760 | dev_dbg(musb->controller, "RX%d count %d, buffer 0x%llx len %d/%d\n", |
1761 | epnum, rx_count, | 1761 | epnum, rx_count, |
1762 | (unsigned long long) urb->transfer_dma | 1762 | (unsigned long long) urb->transfer_dma |
1763 | + urb->actual_length, | 1763 | + urb->actual_length, |
1764 | qh->offset, | 1764 | qh->offset, |
1765 | urb->transfer_buffer_length); | 1765 | urb->transfer_buffer_length); |
1766 | 1766 | ||
1767 | c = musb->dma_controller; | 1767 | c = musb->dma_controller; |
1768 | 1768 | ||
1769 | if (usb_pipeisoc(pipe)) { | 1769 | if (usb_pipeisoc(pipe)) { |
1770 | int d_status = 0; | 1770 | int d_status = 0; |
1771 | struct usb_iso_packet_descriptor *d; | 1771 | struct usb_iso_packet_descriptor *d; |
1772 | 1772 | ||
1773 | d = urb->iso_frame_desc + qh->iso_idx; | 1773 | d = urb->iso_frame_desc + qh->iso_idx; |
1774 | 1774 | ||
1775 | if (iso_err) { | 1775 | if (iso_err) { |
1776 | d_status = -EILSEQ; | 1776 | d_status = -EILSEQ; |
1777 | urb->error_count++; | 1777 | urb->error_count++; |
1778 | } | 1778 | } |
1779 | if (rx_count > d->length) { | 1779 | if (rx_count > d->length) { |
1780 | if (d_status == 0) { | 1780 | if (d_status == 0) { |
1781 | d_status = -EOVERFLOW; | 1781 | d_status = -EOVERFLOW; |
1782 | urb->error_count++; | 1782 | urb->error_count++; |
1783 | } | 1783 | } |
1784 | dev_dbg(musb->controller, "** OVERFLOW %d into %d\n",\ | 1784 | dev_dbg(musb->controller, "** OVERFLOW %d into %d\n",\ |
1785 | rx_count, d->length); | 1785 | rx_count, d->length); |
1786 | 1786 | ||
1787 | length = d->length; | 1787 | length = d->length; |
1788 | } else | 1788 | } else |
1789 | length = rx_count; | 1789 | length = rx_count; |
1790 | d->status = d_status; | 1790 | d->status = d_status; |
1791 | buf = urb->transfer_dma + d->offset; | 1791 | buf = urb->transfer_dma + d->offset; |
1792 | } else { | 1792 | } else { |
1793 | length = rx_count; | 1793 | length = rx_count; |
1794 | buf = urb->transfer_dma + | 1794 | buf = urb->transfer_dma + |
1795 | urb->actual_length; | 1795 | urb->actual_length; |
1796 | } | 1796 | } |
1797 | 1797 | ||
1798 | dma->desired_mode = 0; | 1798 | dma->desired_mode = 0; |
1799 | #ifdef USE_MODE1 | 1799 | #ifdef USE_MODE1 |
1800 | /* because of the issue below, mode 1 will | 1800 | /* because of the issue below, mode 1 will |
1801 | * only rarely behave with correct semantics. | 1801 | * only rarely behave with correct semantics. |
1802 | */ | 1802 | */ |
1803 | if ((urb->transfer_flags & | 1803 | if ((urb->transfer_flags & |
1804 | URB_SHORT_NOT_OK) | 1804 | URB_SHORT_NOT_OK) |
1805 | && (urb->transfer_buffer_length - | 1805 | && (urb->transfer_buffer_length - |
1806 | urb->actual_length) | 1806 | urb->actual_length) |
1807 | > qh->maxpacket) | 1807 | > qh->maxpacket) |
1808 | dma->desired_mode = 1; | 1808 | dma->desired_mode = 1; |
1809 | if (rx_count < hw_ep->max_packet_sz_rx) { | 1809 | if (rx_count < hw_ep->max_packet_sz_rx) { |
1810 | length = rx_count; | 1810 | length = rx_count; |
1811 | dma->desired_mode = 0; | 1811 | dma->desired_mode = 0; |
1812 | } else { | 1812 | } else { |
1813 | length = urb->transfer_buffer_length; | 1813 | length = urb->transfer_buffer_length; |
1814 | } | 1814 | } |
1815 | #endif | 1815 | #endif |
1816 | 1816 | ||
1817 | /* Disadvantage of using mode 1: | 1817 | /* Disadvantage of using mode 1: |
1818 | * It's basically usable only for mass storage class; essentially all | 1818 | * It's basically usable only for mass storage class; essentially all |
1819 | * other protocols also terminate transfers on short packets. | 1819 | * other protocols also terminate transfers on short packets. |
1820 | * | 1820 | * |
1821 | * Details: | 1821 | * Details: |
1822 | * An extra IN token is sent at the end of the transfer (due to AUTOREQ) | 1822 | * An extra IN token is sent at the end of the transfer (due to AUTOREQ) |
1823 | * If you try to use mode 1 for (transfer_buffer_length - 512), and try | 1823 | * If you try to use mode 1 for (transfer_buffer_length - 512), and try |
1824 | * to use the extra IN token to grab the last packet using mode 0, then | 1824 | * to use the extra IN token to grab the last packet using mode 0, then |
1825 | * the problem is that you cannot be sure when the device will send the | 1825 | * the problem is that you cannot be sure when the device will send the |
1826 | * last packet and RxPktRdy set. Sometimes the packet is recd too soon | 1826 | * last packet and RxPktRdy set. Sometimes the packet is recd too soon |
1827 | * such that it gets lost when RxCSR is re-set at the end of the mode 1 | 1827 | * such that it gets lost when RxCSR is re-set at the end of the mode 1 |
1828 | * transfer, while sometimes it is recd just a little late so that if you | 1828 | * transfer, while sometimes it is recd just a little late so that if you |
1829 | * try to configure for mode 0 soon after the mode 1 transfer is | 1829 | * try to configure for mode 0 soon after the mode 1 transfer is |
1830 | * completed, you will find rxcount 0. Okay, so you might think why not | 1830 | * completed, you will find rxcount 0. Okay, so you might think why not |
1831 | * wait for an interrupt when the pkt is recd. Well, you won't get any! | 1831 | * wait for an interrupt when the pkt is recd. Well, you won't get any! |
1832 | */ | 1832 | */ |
1833 | 1833 | ||
1834 | val = musb_readw(epio, MUSB_RXCSR); | 1834 | val = musb_readw(epio, MUSB_RXCSR); |
1835 | val &= ~MUSB_RXCSR_H_REQPKT; | 1835 | val &= ~MUSB_RXCSR_H_REQPKT; |
1836 | 1836 | ||
1837 | if (dma->desired_mode == 0) | 1837 | if (dma->desired_mode == 0) |
1838 | val &= ~MUSB_RXCSR_H_AUTOREQ; | 1838 | val &= ~MUSB_RXCSR_H_AUTOREQ; |
1839 | else | 1839 | else |
1840 | val |= MUSB_RXCSR_H_AUTOREQ; | 1840 | val |= MUSB_RXCSR_H_AUTOREQ; |
1841 | val |= MUSB_RXCSR_DMAENAB; | 1841 | val |= MUSB_RXCSR_DMAENAB; |
1842 | 1842 | ||
1843 | /* autoclear shouldn't be set in high bandwidth */ | 1843 | /* autoclear shouldn't be set in high bandwidth */ |
1844 | if (qh->hb_mult == 1) | 1844 | if (qh->hb_mult == 1) |
1845 | val |= MUSB_RXCSR_AUTOCLEAR; | 1845 | val |= MUSB_RXCSR_AUTOCLEAR; |
1846 | 1846 | ||
1847 | musb_writew(epio, MUSB_RXCSR, | 1847 | musb_writew(epio, MUSB_RXCSR, |
1848 | MUSB_RXCSR_H_WZC_BITS | val); | 1848 | MUSB_RXCSR_H_WZC_BITS | val); |
1849 | 1849 | ||
1850 | /* REVISIT if when actual_length != 0, | 1850 | /* REVISIT if when actual_length != 0, |
1851 | * transfer_buffer_length needs to be | 1851 | * transfer_buffer_length needs to be |
1852 | * adjusted first... | 1852 | * adjusted first... |
1853 | */ | 1853 | */ |
1854 | ret = c->channel_program( | 1854 | ret = c->channel_program( |
1855 | dma, qh->maxpacket, | 1855 | dma, qh->maxpacket, |
1856 | dma->desired_mode, buf, length); | 1856 | dma->desired_mode, buf, length); |
1857 | 1857 | ||
1858 | if (!ret) { | 1858 | if (!ret) { |
1859 | c->channel_release(dma); | 1859 | c->channel_release(dma); |
1860 | hw_ep->rx_channel = NULL; | 1860 | hw_ep->rx_channel = NULL; |
1861 | dma = NULL; | 1861 | dma = NULL; |
1862 | val = musb_readw(epio, MUSB_RXCSR); | 1862 | val = musb_readw(epio, MUSB_RXCSR); |
1863 | val &= ~(MUSB_RXCSR_DMAENAB | 1863 | val &= ~(MUSB_RXCSR_DMAENAB |
1864 | | MUSB_RXCSR_H_AUTOREQ | 1864 | | MUSB_RXCSR_H_AUTOREQ |
1865 | | MUSB_RXCSR_AUTOCLEAR); | 1865 | | MUSB_RXCSR_AUTOCLEAR); |
1866 | musb_writew(epio, MUSB_RXCSR, val); | 1866 | musb_writew(epio, MUSB_RXCSR, val); |
1867 | } | 1867 | } |
1868 | } | 1868 | } |
1869 | #endif /* Mentor DMA */ | 1869 | #endif /* Mentor DMA */ |
1870 | 1870 | ||
1871 | if (!dma) { | 1871 | if (!dma) { |
1872 | unsigned int received_len; | 1872 | unsigned int received_len; |
1873 | 1873 | ||
1874 | /* Unmap the buffer so that CPU can use it */ | 1874 | /* Unmap the buffer so that CPU can use it */ |
1875 | usb_hcd_unmap_urb_for_dma(musb->hcd, urb); | 1875 | usb_hcd_unmap_urb_for_dma(musb->hcd, urb); |
1876 | 1876 | ||
1877 | /* | 1877 | /* |
1878 | * We need to map sg if the transfer_buffer is | 1878 | * We need to map sg if the transfer_buffer is |
1879 | * NULL. | 1879 | * NULL. |
1880 | */ | 1880 | */ |
1881 | if (!urb->transfer_buffer) { | 1881 | if (!urb->transfer_buffer) { |
1882 | qh->use_sg = true; | 1882 | qh->use_sg = true; |
1883 | sg_miter_start(&qh->sg_miter, urb->sg, 1, | 1883 | sg_miter_start(&qh->sg_miter, urb->sg, 1, |
1884 | sg_flags); | 1884 | sg_flags); |
1885 | } | 1885 | } |
1886 | 1886 | ||
1887 | if (qh->use_sg) { | 1887 | if (qh->use_sg) { |
1888 | if (!sg_miter_next(&qh->sg_miter)) { | 1888 | if (!sg_miter_next(&qh->sg_miter)) { |
1889 | dev_err(musb->controller, "error: sg list empty\n"); | 1889 | dev_err(musb->controller, "error: sg list empty\n"); |
1890 | sg_miter_stop(&qh->sg_miter); | 1890 | sg_miter_stop(&qh->sg_miter); |
1891 | status = -EINVAL; | 1891 | status = -EINVAL; |
1892 | done = true; | 1892 | done = true; |
1893 | goto finish; | 1893 | goto finish; |
1894 | } | 1894 | } |
1895 | urb->transfer_buffer = qh->sg_miter.addr; | 1895 | urb->transfer_buffer = qh->sg_miter.addr; |
1896 | received_len = urb->actual_length; | 1896 | received_len = urb->actual_length; |
1897 | qh->offset = 0x0; | 1897 | qh->offset = 0x0; |
1898 | done = musb_host_packet_rx(musb, urb, epnum, | 1898 | done = musb_host_packet_rx(musb, urb, epnum, |
1899 | iso_err); | 1899 | iso_err); |
1900 | /* Calculate the number of bytes received */ | 1900 | /* Calculate the number of bytes received */ |
1901 | received_len = urb->actual_length - | 1901 | received_len = urb->actual_length - |
1902 | received_len; | 1902 | received_len; |
1903 | qh->sg_miter.consumed = received_len; | 1903 | qh->sg_miter.consumed = received_len; |
1904 | sg_miter_stop(&qh->sg_miter); | 1904 | sg_miter_stop(&qh->sg_miter); |
1905 | } else { | 1905 | } else { |
1906 | done = musb_host_packet_rx(musb, urb, | 1906 | done = musb_host_packet_rx(musb, urb, |
1907 | epnum, iso_err); | 1907 | epnum, iso_err); |
1908 | } | 1908 | } |
1909 | dev_dbg(musb->controller, "read %spacket\n", done ? "last " : ""); | 1909 | dev_dbg(musb->controller, "read %spacket\n", done ? "last " : ""); |
1910 | } | 1910 | } |
1911 | } | 1911 | } |
1912 | 1912 | ||
1913 | finish: | 1913 | finish: |
1914 | urb->actual_length += xfer_len; | 1914 | urb->actual_length += xfer_len; |
1915 | qh->offset += xfer_len; | 1915 | qh->offset += xfer_len; |
1916 | if (done) { | 1916 | if (done) { |
1917 | if (qh->use_sg) | 1917 | if (qh->use_sg) |
1918 | qh->use_sg = false; | 1918 | qh->use_sg = false; |
1919 | 1919 | ||
1920 | if (urb->status == -EINPROGRESS) | 1920 | if (urb->status == -EINPROGRESS) |
1921 | urb->status = status; | 1921 | urb->status = status; |
1922 | musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN); | 1922 | musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN); |
1923 | } | 1923 | } |
1924 | } | 1924 | } |
1925 | 1925 | ||
1926 | /* schedule nodes correspond to peripheral endpoints, like an OHCI QH. | 1926 | /* schedule nodes correspond to peripheral endpoints, like an OHCI QH. |
1927 | * the software schedule associates multiple such nodes with a given | 1927 | * the software schedule associates multiple such nodes with a given |
1928 | * host side hardware endpoint + direction; scheduling may activate | 1928 | * host side hardware endpoint + direction; scheduling may activate |
1929 | * that hardware endpoint. | 1929 | * that hardware endpoint. |
1930 | */ | 1930 | */ |
1931 | static int musb_schedule( | 1931 | static int musb_schedule( |
1932 | struct musb *musb, | 1932 | struct musb *musb, |
1933 | struct musb_qh *qh, | 1933 | struct musb_qh *qh, |
1934 | int is_in) | 1934 | int is_in) |
1935 | { | 1935 | { |
1936 | int idle; | 1936 | int idle; |
1937 | int best_diff; | 1937 | int best_diff; |
1938 | int best_end, epnum; | 1938 | int best_end, epnum; |
1939 | struct musb_hw_ep *hw_ep = NULL; | 1939 | struct musb_hw_ep *hw_ep = NULL; |
1940 | struct list_head *head = NULL; | 1940 | struct list_head *head = NULL; |
1941 | u8 toggle; | 1941 | u8 toggle; |
1942 | u8 txtype; | 1942 | u8 txtype; |
1943 | struct urb *urb = next_urb(qh); | 1943 | struct urb *urb = next_urb(qh); |
1944 | 1944 | ||
1945 | /* use fixed hardware for control and bulk */ | 1945 | /* use fixed hardware for control and bulk */ |
1946 | if (qh->type == USB_ENDPOINT_XFER_CONTROL) { | 1946 | if (qh->type == USB_ENDPOINT_XFER_CONTROL) { |
1947 | head = &musb->control; | 1947 | head = &musb->control; |
1948 | hw_ep = musb->control_ep; | 1948 | hw_ep = musb->control_ep; |
1949 | goto success; | 1949 | goto success; |
1950 | } | 1950 | } |
1951 | 1951 | ||
1952 | /* else, periodic transfers get muxed to other endpoints */ | 1952 | /* else, periodic transfers get muxed to other endpoints */ |
1953 | 1953 | ||
1954 | /* | 1954 | /* |
1955 | * We know this qh hasn't been scheduled, so all we need to do | 1955 | * We know this qh hasn't been scheduled, so all we need to do |
1956 | * is choose which hardware endpoint to put it on ... | 1956 | * is choose which hardware endpoint to put it on ... |
1957 | * | 1957 | * |
1958 | * REVISIT what we really want here is a regular schedule tree | 1958 | * REVISIT what we really want here is a regular schedule tree |
1959 | * like e.g. OHCI uses. | 1959 | * like e.g. OHCI uses. |
1960 | */ | 1960 | */ |
1961 | best_diff = 4096; | 1961 | best_diff = 4096; |
1962 | best_end = -1; | 1962 | best_end = -1; |
1963 | 1963 | ||
1964 | for (epnum = 1, hw_ep = musb->endpoints + 1; | 1964 | for (epnum = 1, hw_ep = musb->endpoints + 1; |
1965 | epnum < musb->nr_endpoints; | 1965 | epnum < musb->nr_endpoints; |
1966 | epnum++, hw_ep++) { | 1966 | epnum++, hw_ep++) { |
1967 | int diff; | 1967 | int diff; |
1968 | 1968 | ||
1969 | if (musb_ep_get_qh(hw_ep, is_in) != NULL) | 1969 | if (musb_ep_get_qh(hw_ep, is_in) != NULL) |
1970 | continue; | 1970 | continue; |
1971 | 1971 | ||
1972 | if (hw_ep == musb->bulk_ep) | 1972 | if (hw_ep == musb->bulk_ep) |
1973 | continue; | 1973 | continue; |
1974 | 1974 | ||
1975 | if (is_in) | 1975 | if (is_in) |
1976 | diff = hw_ep->max_packet_sz_rx; | 1976 | diff = hw_ep->max_packet_sz_rx; |
1977 | else | 1977 | else |
1978 | diff = hw_ep->max_packet_sz_tx; | 1978 | diff = hw_ep->max_packet_sz_tx; |
1979 | diff -= (qh->maxpacket * qh->hb_mult); | 1979 | diff -= (qh->maxpacket * qh->hb_mult); |
1980 | 1980 | ||
1981 | if (diff >= 0 && best_diff > diff) { | 1981 | if (diff >= 0 && best_diff > diff) { |
1982 | 1982 | ||
1983 | /* | 1983 | /* |
1984 | * Mentor controller has a bug in that if we schedule | 1984 | * Mentor controller has a bug in that if we schedule |
1985 | * a BULK Tx transfer on an endpoint that had earlier | 1985 | * a BULK Tx transfer on an endpoint that had earlier |
1986 | * handled ISOC then the BULK transfer has to start on | 1986 | * handled ISOC then the BULK transfer has to start on |
1987 | * a zero toggle. If the BULK transfer starts on a 1 | 1987 | * a zero toggle. If the BULK transfer starts on a 1 |
1988 | * toggle then this transfer will fail as the mentor | 1988 | * toggle then this transfer will fail as the mentor |
1989 | * controller starts the Bulk transfer on a 0 toggle | 1989 | * controller starts the Bulk transfer on a 0 toggle |
1990 | * irrespective of the programming of the toggle bits | 1990 | * irrespective of the programming of the toggle bits |
1991 | * in the TXCSR register. Check for this condition | 1991 | * in the TXCSR register. Check for this condition |
1992 | * while allocating the EP for a Tx Bulk transfer. If | 1992 | * while allocating the EP for a Tx Bulk transfer. If |
1993 | * so skip this EP. | 1993 | * so skip this EP. |
1994 | */ | 1994 | */ |
1995 | hw_ep = musb->endpoints + epnum; | 1995 | hw_ep = musb->endpoints + epnum; |
1996 | toggle = usb_gettoggle(urb->dev, qh->epnum, !is_in); | 1996 | toggle = usb_gettoggle(urb->dev, qh->epnum, !is_in); |
1997 | txtype = (musb_readb(hw_ep->regs, MUSB_TXTYPE) | 1997 | txtype = (musb_readb(hw_ep->regs, MUSB_TXTYPE) |
1998 | >> 4) & 0x3; | 1998 | >> 4) & 0x3; |
1999 | if (!is_in && (qh->type == USB_ENDPOINT_XFER_BULK) && | 1999 | if (!is_in && (qh->type == USB_ENDPOINT_XFER_BULK) && |
2000 | toggle && (txtype == USB_ENDPOINT_XFER_ISOC)) | 2000 | toggle && (txtype == USB_ENDPOINT_XFER_ISOC)) |
2001 | continue; | 2001 | continue; |
2002 | 2002 | ||
2003 | best_diff = diff; | 2003 | best_diff = diff; |
2004 | best_end = epnum; | 2004 | best_end = epnum; |
2005 | } | 2005 | } |
2006 | } | 2006 | } |
2007 | /* use bulk reserved ep1 if no other ep is free */ | 2007 | /* use bulk reserved ep1 if no other ep is free */ |
2008 | if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) { | 2008 | if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) { |
2009 | hw_ep = musb->bulk_ep; | 2009 | hw_ep = musb->bulk_ep; |
2010 | if (is_in) | 2010 | if (is_in) |
2011 | head = &musb->in_bulk; | 2011 | head = &musb->in_bulk; |
2012 | else | 2012 | else |
2013 | head = &musb->out_bulk; | 2013 | head = &musb->out_bulk; |
2014 | 2014 | ||
2015 | /* Enable bulk RX/TX NAK timeout scheme when bulk requests are | 2015 | /* Enable bulk RX/TX NAK timeout scheme when bulk requests are |
2016 | * multiplexed. This scheme doen't work in high speed to full | 2016 | * multiplexed. This scheme doen't work in high speed to full |
2017 | * speed scenario as NAK interrupts are not coming from a | 2017 | * speed scenario as NAK interrupts are not coming from a |
2018 | * full speed device connected to a high speed device. | 2018 | * full speed device connected to a high speed device. |
2019 | * NAK timeout interval is 8 (128 uframe or 16ms) for HS and | 2019 | * NAK timeout interval is 8 (128 uframe or 16ms) for HS and |
2020 | * 4 (8 frame or 8ms) for FS device. | 2020 | * 4 (8 frame or 8ms) for FS device. |
2021 | */ | 2021 | */ |
2022 | if (qh->dev) | 2022 | if (qh->dev) |
2023 | qh->intv_reg = | 2023 | qh->intv_reg = |
2024 | (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4; | 2024 | (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4; |
2025 | goto success; | 2025 | goto success; |
2026 | } else if (best_end < 0) { | 2026 | } else if (best_end < 0) { |
2027 | return -ENOSPC; | 2027 | return -ENOSPC; |
2028 | } | 2028 | } |
2029 | 2029 | ||
2030 | idle = 1; | 2030 | idle = 1; |
2031 | qh->mux = 0; | 2031 | qh->mux = 0; |
2032 | hw_ep = musb->endpoints + best_end; | 2032 | hw_ep = musb->endpoints + best_end; |
2033 | dev_dbg(musb->controller, "qh %p periodic slot %d\n", qh, best_end); | 2033 | dev_dbg(musb->controller, "qh %p periodic slot %d\n", qh, best_end); |
2034 | success: | 2034 | success: |
2035 | if (head) { | 2035 | if (head) { |
2036 | idle = list_empty(head); | 2036 | idle = list_empty(head); |
2037 | list_add_tail(&qh->ring, head); | 2037 | list_add_tail(&qh->ring, head); |
2038 | qh->mux = 1; | 2038 | qh->mux = 1; |
2039 | } | 2039 | } |
2040 | qh->hw_ep = hw_ep; | 2040 | qh->hw_ep = hw_ep; |
2041 | qh->hep->hcpriv = qh; | 2041 | qh->hep->hcpriv = qh; |
2042 | if (idle) | 2042 | if (idle) |
2043 | musb_start_urb(musb, is_in, qh); | 2043 | musb_start_urb(musb, is_in, qh); |
2044 | return 0; | 2044 | return 0; |
2045 | } | 2045 | } |
2046 | 2046 | ||
2047 | static int musb_urb_enqueue( | 2047 | static int musb_urb_enqueue( |
2048 | struct usb_hcd *hcd, | 2048 | struct usb_hcd *hcd, |
2049 | struct urb *urb, | 2049 | struct urb *urb, |
2050 | gfp_t mem_flags) | 2050 | gfp_t mem_flags) |
2051 | { | 2051 | { |
2052 | unsigned long flags; | 2052 | unsigned long flags; |
2053 | struct musb *musb = hcd_to_musb(hcd); | 2053 | struct musb *musb = hcd_to_musb(hcd); |
2054 | struct usb_host_endpoint *hep = urb->ep; | 2054 | struct usb_host_endpoint *hep = urb->ep; |
2055 | struct musb_qh *qh; | 2055 | struct musb_qh *qh; |
2056 | struct usb_endpoint_descriptor *epd = &hep->desc; | 2056 | struct usb_endpoint_descriptor *epd = &hep->desc; |
2057 | int ret; | 2057 | int ret; |
2058 | unsigned type_reg; | 2058 | unsigned type_reg; |
2059 | unsigned interval; | 2059 | unsigned interval; |
2060 | 2060 | ||
2061 | /* host role must be active */ | 2061 | /* host role must be active */ |
2062 | if (!is_host_active(musb) || !musb->is_active) | 2062 | if (!is_host_active(musb) || !musb->is_active) |
2063 | return -ENODEV; | 2063 | return -ENODEV; |
2064 | 2064 | ||
2065 | spin_lock_irqsave(&musb->lock, flags); | 2065 | spin_lock_irqsave(&musb->lock, flags); |
2066 | ret = usb_hcd_link_urb_to_ep(hcd, urb); | 2066 | ret = usb_hcd_link_urb_to_ep(hcd, urb); |
2067 | qh = ret ? NULL : hep->hcpriv; | 2067 | qh = ret ? NULL : hep->hcpriv; |
2068 | if (qh) | 2068 | if (qh) |
2069 | urb->hcpriv = qh; | 2069 | urb->hcpriv = qh; |
2070 | spin_unlock_irqrestore(&musb->lock, flags); | 2070 | spin_unlock_irqrestore(&musb->lock, flags); |
2071 | 2071 | ||
2072 | /* DMA mapping was already done, if needed, and this urb is on | 2072 | /* DMA mapping was already done, if needed, and this urb is on |
2073 | * hep->urb_list now ... so we're done, unless hep wasn't yet | 2073 | * hep->urb_list now ... so we're done, unless hep wasn't yet |
2074 | * scheduled onto a live qh. | 2074 | * scheduled onto a live qh. |
2075 | * | 2075 | * |
2076 | * REVISIT best to keep hep->hcpriv valid until the endpoint gets | 2076 | * REVISIT best to keep hep->hcpriv valid until the endpoint gets |
2077 | * disabled, testing for empty qh->ring and avoiding qh setup costs | 2077 | * disabled, testing for empty qh->ring and avoiding qh setup costs |
2078 | * except for the first urb queued after a config change. | 2078 | * except for the first urb queued after a config change. |
2079 | */ | 2079 | */ |
2080 | if (qh || ret) | 2080 | if (qh || ret) |
2081 | return ret; | 2081 | return ret; |
2082 | 2082 | ||
2083 | /* Allocate and initialize qh, minimizing the work done each time | 2083 | /* Allocate and initialize qh, minimizing the work done each time |
2084 | * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it. | 2084 | * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it. |
2085 | * | 2085 | * |
2086 | * REVISIT consider a dedicated qh kmem_cache, so it's harder | 2086 | * REVISIT consider a dedicated qh kmem_cache, so it's harder |
2087 | * for bugs in other kernel code to break this driver... | 2087 | * for bugs in other kernel code to break this driver... |
2088 | */ | 2088 | */ |
2089 | qh = kzalloc(sizeof *qh, mem_flags); | 2089 | qh = kzalloc(sizeof *qh, mem_flags); |
2090 | if (!qh) { | 2090 | if (!qh) { |
2091 | spin_lock_irqsave(&musb->lock, flags); | 2091 | spin_lock_irqsave(&musb->lock, flags); |
2092 | usb_hcd_unlink_urb_from_ep(hcd, urb); | 2092 | usb_hcd_unlink_urb_from_ep(hcd, urb); |
2093 | spin_unlock_irqrestore(&musb->lock, flags); | 2093 | spin_unlock_irqrestore(&musb->lock, flags); |
2094 | return -ENOMEM; | 2094 | return -ENOMEM; |
2095 | } | 2095 | } |
2096 | 2096 | ||
2097 | qh->hep = hep; | 2097 | qh->hep = hep; |
2098 | qh->dev = urb->dev; | 2098 | qh->dev = urb->dev; |
2099 | INIT_LIST_HEAD(&qh->ring); | 2099 | INIT_LIST_HEAD(&qh->ring); |
2100 | qh->is_ready = 1; | 2100 | qh->is_ready = 1; |
2101 | 2101 | ||
2102 | qh->maxpacket = usb_endpoint_maxp(epd); | 2102 | qh->maxpacket = usb_endpoint_maxp(epd); |
2103 | qh->type = usb_endpoint_type(epd); | 2103 | qh->type = usb_endpoint_type(epd); |
2104 | 2104 | ||
2105 | /* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier. | 2105 | /* Bits 11 & 12 of wMaxPacketSize encode high bandwidth multiplier. |
2106 | * Some musb cores don't support high bandwidth ISO transfers; and | 2106 | * Some musb cores don't support high bandwidth ISO transfers; and |
2107 | * we don't (yet!) support high bandwidth interrupt transfers. | 2107 | * we don't (yet!) support high bandwidth interrupt transfers. |
2108 | */ | 2108 | */ |
2109 | qh->hb_mult = 1 + ((qh->maxpacket >> 11) & 0x03); | 2109 | qh->hb_mult = 1 + ((qh->maxpacket >> 11) & 0x03); |
2110 | if (qh->hb_mult > 1) { | 2110 | if (qh->hb_mult > 1) { |
2111 | int ok = (qh->type == USB_ENDPOINT_XFER_ISOC); | 2111 | int ok = (qh->type == USB_ENDPOINT_XFER_ISOC); |
2112 | 2112 | ||
2113 | if (ok) | 2113 | if (ok) |
2114 | ok = (usb_pipein(urb->pipe) && musb->hb_iso_rx) | 2114 | ok = (usb_pipein(urb->pipe) && musb->hb_iso_rx) |
2115 | || (usb_pipeout(urb->pipe) && musb->hb_iso_tx); | 2115 | || (usb_pipeout(urb->pipe) && musb->hb_iso_tx); |
2116 | if (!ok) { | 2116 | if (!ok) { |
2117 | ret = -EMSGSIZE; | 2117 | ret = -EMSGSIZE; |
2118 | goto done; | 2118 | goto done; |
2119 | } | 2119 | } |
2120 | qh->maxpacket &= 0x7ff; | 2120 | qh->maxpacket &= 0x7ff; |
2121 | } | 2121 | } |
2122 | 2122 | ||
2123 | qh->epnum = usb_endpoint_num(epd); | 2123 | qh->epnum = usb_endpoint_num(epd); |
2124 | 2124 | ||
2125 | /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */ | 2125 | /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */ |
2126 | qh->addr_reg = (u8) usb_pipedevice(urb->pipe); | 2126 | qh->addr_reg = (u8) usb_pipedevice(urb->pipe); |
2127 | 2127 | ||
2128 | /* precompute rxtype/txtype/type0 register */ | 2128 | /* precompute rxtype/txtype/type0 register */ |
2129 | type_reg = (qh->type << 4) | qh->epnum; | 2129 | type_reg = (qh->type << 4) | qh->epnum; |
2130 | switch (urb->dev->speed) { | 2130 | switch (urb->dev->speed) { |
2131 | case USB_SPEED_LOW: | 2131 | case USB_SPEED_LOW: |
2132 | type_reg |= 0xc0; | 2132 | type_reg |= 0xc0; |
2133 | break; | 2133 | break; |
2134 | case USB_SPEED_FULL: | 2134 | case USB_SPEED_FULL: |
2135 | type_reg |= 0x80; | 2135 | type_reg |= 0x80; |
2136 | break; | 2136 | break; |
2137 | default: | 2137 | default: |
2138 | type_reg |= 0x40; | 2138 | type_reg |= 0x40; |
2139 | } | 2139 | } |
2140 | qh->type_reg = type_reg; | 2140 | qh->type_reg = type_reg; |
2141 | 2141 | ||
2142 | /* Precompute RXINTERVAL/TXINTERVAL register */ | 2142 | /* Precompute RXINTERVAL/TXINTERVAL register */ |
2143 | switch (qh->type) { | 2143 | switch (qh->type) { |
2144 | case USB_ENDPOINT_XFER_INT: | 2144 | case USB_ENDPOINT_XFER_INT: |
2145 | /* | 2145 | /* |
2146 | * Full/low speeds use the linear encoding, | 2146 | * Full/low speeds use the linear encoding, |
2147 | * high speed uses the logarithmic encoding. | 2147 | * high speed uses the logarithmic encoding. |
2148 | */ | 2148 | */ |
2149 | if (urb->dev->speed <= USB_SPEED_FULL) { | 2149 | if (urb->dev->speed <= USB_SPEED_FULL) { |
2150 | interval = max_t(u8, epd->bInterval, 1); | 2150 | interval = max_t(u8, epd->bInterval, 1); |
2151 | break; | 2151 | break; |
2152 | } | 2152 | } |
2153 | /* FALLTHROUGH */ | 2153 | /* FALLTHROUGH */ |
2154 | case USB_ENDPOINT_XFER_ISOC: | 2154 | case USB_ENDPOINT_XFER_ISOC: |
2155 | /* ISO always uses logarithmic encoding */ | 2155 | /* ISO always uses logarithmic encoding */ |
2156 | interval = min_t(u8, epd->bInterval, 16); | 2156 | interval = min_t(u8, epd->bInterval, 16); |
2157 | break; | 2157 | break; |
2158 | default: | 2158 | default: |
2159 | /* REVISIT we actually want to use NAK limits, hinting to the | 2159 | /* REVISIT we actually want to use NAK limits, hinting to the |
2160 | * transfer scheduling logic to try some other qh, e.g. try | 2160 | * transfer scheduling logic to try some other qh, e.g. try |
2161 | * for 2 msec first: | 2161 | * for 2 msec first: |
2162 | * | 2162 | * |
2163 | * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2; | 2163 | * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2; |
2164 | * | 2164 | * |
2165 | * The downside of disabling this is that transfer scheduling | 2165 | * The downside of disabling this is that transfer scheduling |
2166 | * gets VERY unfair for nonperiodic transfers; a misbehaving | 2166 | * gets VERY unfair for nonperiodic transfers; a misbehaving |
2167 | * peripheral could make that hurt. That's perfectly normal | 2167 | * peripheral could make that hurt. That's perfectly normal |
2168 | * for reads from network or serial adapters ... so we have | 2168 | * for reads from network or serial adapters ... so we have |
2169 | * partial NAKlimit support for bulk RX. | 2169 | * partial NAKlimit support for bulk RX. |
2170 | * | 2170 | * |
2171 | * The upside of disabling it is simpler transfer scheduling. | 2171 | * The upside of disabling it is simpler transfer scheduling. |
2172 | */ | 2172 | */ |
2173 | interval = 0; | 2173 | interval = 0; |
2174 | } | 2174 | } |
2175 | qh->intv_reg = interval; | 2175 | qh->intv_reg = interval; |
2176 | 2176 | ||
2177 | /* precompute addressing for external hub/tt ports */ | 2177 | /* precompute addressing for external hub/tt ports */ |
2178 | if (musb->is_multipoint) { | 2178 | if (musb->is_multipoint) { |
2179 | struct usb_device *parent = urb->dev->parent; | 2179 | struct usb_device *parent = urb->dev->parent; |
2180 | 2180 | ||
2181 | if (parent != hcd->self.root_hub) { | 2181 | if (parent != hcd->self.root_hub) { |
2182 | qh->h_addr_reg = (u8) parent->devnum; | 2182 | qh->h_addr_reg = (u8) parent->devnum; |
2183 | 2183 | ||
2184 | /* set up tt info if needed */ | 2184 | /* set up tt info if needed */ |
2185 | if (urb->dev->tt) { | 2185 | if (urb->dev->tt) { |
2186 | qh->h_port_reg = (u8) urb->dev->ttport; | 2186 | qh->h_port_reg = (u8) urb->dev->ttport; |
2187 | if (urb->dev->tt->hub) | 2187 | if (urb->dev->tt->hub) |
2188 | qh->h_addr_reg = | 2188 | qh->h_addr_reg = |
2189 | (u8) urb->dev->tt->hub->devnum; | 2189 | (u8) urb->dev->tt->hub->devnum; |
2190 | if (urb->dev->tt->multi) | 2190 | if (urb->dev->tt->multi) |
2191 | qh->h_addr_reg |= 0x80; | 2191 | qh->h_addr_reg |= 0x80; |
2192 | } | 2192 | } |
2193 | } | 2193 | } |
2194 | } | 2194 | } |
2195 | 2195 | ||
2196 | /* invariant: hep->hcpriv is null OR the qh that's already scheduled. | 2196 | /* invariant: hep->hcpriv is null OR the qh that's already scheduled. |
2197 | * until we get real dma queues (with an entry for each urb/buffer), | 2197 | * until we get real dma queues (with an entry for each urb/buffer), |
2198 | * we only have work to do in the former case. | 2198 | * we only have work to do in the former case. |
2199 | */ | 2199 | */ |
2200 | spin_lock_irqsave(&musb->lock, flags); | 2200 | spin_lock_irqsave(&musb->lock, flags); |
2201 | if (hep->hcpriv || !next_urb(qh)) { | 2201 | if (hep->hcpriv || !next_urb(qh)) { |
2202 | /* some concurrent activity submitted another urb to hep... | 2202 | /* some concurrent activity submitted another urb to hep... |
2203 | * odd, rare, error prone, but legal. | 2203 | * odd, rare, error prone, but legal. |
2204 | */ | 2204 | */ |
2205 | kfree(qh); | 2205 | kfree(qh); |
2206 | qh = NULL; | 2206 | qh = NULL; |
2207 | ret = 0; | 2207 | ret = 0; |
2208 | } else | 2208 | } else |
2209 | ret = musb_schedule(musb, qh, | 2209 | ret = musb_schedule(musb, qh, |
2210 | epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK); | 2210 | epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK); |
2211 | 2211 | ||
2212 | if (ret == 0) { | 2212 | if (ret == 0) { |
2213 | urb->hcpriv = qh; | 2213 | urb->hcpriv = qh; |
2214 | /* FIXME set urb->start_frame for iso/intr, it's tested in | 2214 | /* FIXME set urb->start_frame for iso/intr, it's tested in |
2215 | * musb_start_urb(), but otherwise only konicawc cares ... | 2215 | * musb_start_urb(), but otherwise only konicawc cares ... |
2216 | */ | 2216 | */ |
2217 | } | 2217 | } |
2218 | spin_unlock_irqrestore(&musb->lock, flags); | 2218 | spin_unlock_irqrestore(&musb->lock, flags); |
2219 | 2219 | ||
2220 | done: | 2220 | done: |
2221 | if (ret != 0) { | 2221 | if (ret != 0) { |
2222 | spin_lock_irqsave(&musb->lock, flags); | 2222 | spin_lock_irqsave(&musb->lock, flags); |
2223 | usb_hcd_unlink_urb_from_ep(hcd, urb); | 2223 | usb_hcd_unlink_urb_from_ep(hcd, urb); |
2224 | spin_unlock_irqrestore(&musb->lock, flags); | 2224 | spin_unlock_irqrestore(&musb->lock, flags); |
2225 | kfree(qh); | 2225 | kfree(qh); |
2226 | } | 2226 | } |
2227 | return ret; | 2227 | return ret; |
2228 | } | 2228 | } |
2229 | 2229 | ||
2230 | 2230 | ||
2231 | /* | 2231 | /* |
2232 | * abort a transfer that's at the head of a hardware queue. | 2232 | * abort a transfer that's at the head of a hardware queue. |
2233 | * called with controller locked, irqs blocked | 2233 | * called with controller locked, irqs blocked |
2234 | * that hardware queue advances to the next transfer, unless prevented | 2234 | * that hardware queue advances to the next transfer, unless prevented |
2235 | */ | 2235 | */ |
2236 | static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh) | 2236 | static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh) |
2237 | { | 2237 | { |
2238 | struct musb_hw_ep *ep = qh->hw_ep; | 2238 | struct musb_hw_ep *ep = qh->hw_ep; |
2239 | struct musb *musb = ep->musb; | 2239 | struct musb *musb = ep->musb; |
2240 | void __iomem *epio = ep->regs; | 2240 | void __iomem *epio = ep->regs; |
2241 | unsigned hw_end = ep->epnum; | 2241 | unsigned hw_end = ep->epnum; |
2242 | void __iomem *regs = ep->musb->mregs; | 2242 | void __iomem *regs = ep->musb->mregs; |
2243 | int is_in = usb_pipein(urb->pipe); | 2243 | int is_in = usb_pipein(urb->pipe); |
2244 | int status = 0; | 2244 | int status = 0; |
2245 | u16 csr; | 2245 | u16 csr; |
2246 | 2246 | ||
2247 | musb_ep_select(regs, hw_end); | 2247 | musb_ep_select(regs, hw_end); |
2248 | 2248 | ||
2249 | if (is_dma_capable()) { | 2249 | if (is_dma_capable()) { |
2250 | struct dma_channel *dma; | 2250 | struct dma_channel *dma; |
2251 | 2251 | ||
2252 | dma = is_in ? ep->rx_channel : ep->tx_channel; | 2252 | dma = is_in ? ep->rx_channel : ep->tx_channel; |
2253 | if (dma) { | 2253 | if (dma) { |
2254 | status = ep->musb->dma_controller->channel_abort(dma); | 2254 | status = ep->musb->dma_controller->channel_abort(dma); |
2255 | dev_dbg(musb->controller, | 2255 | dev_dbg(musb->controller, |
2256 | "abort %cX%d DMA for urb %p --> %d\n", | 2256 | "abort %cX%d DMA for urb %p --> %d\n", |
2257 | is_in ? 'R' : 'T', ep->epnum, | 2257 | is_in ? 'R' : 'T', ep->epnum, |
2258 | urb, status); | 2258 | urb, status); |
2259 | urb->actual_length += dma->actual_len; | 2259 | urb->actual_length += dma->actual_len; |
2260 | } | 2260 | } |
2261 | } | 2261 | } |
2262 | 2262 | ||
2263 | /* turn off DMA requests, discard state, stop polling ... */ | 2263 | /* turn off DMA requests, discard state, stop polling ... */ |
2264 | if (ep->epnum && is_in) { | 2264 | if (ep->epnum && is_in) { |
2265 | /* giveback saves bulk toggle */ | 2265 | /* giveback saves bulk toggle */ |
2266 | csr = musb_h_flush_rxfifo(ep, 0); | 2266 | csr = musb_h_flush_rxfifo(ep, 0); |
2267 | 2267 | ||
2268 | /* REVISIT we still get an irq; should likely clear the | 2268 | /* REVISIT we still get an irq; should likely clear the |
2269 | * endpoint's irq status here to avoid bogus irqs. | 2269 | * endpoint's irq status here to avoid bogus irqs. |
2270 | * clearing that status is platform-specific... | 2270 | * clearing that status is platform-specific... |
2271 | */ | 2271 | */ |
2272 | } else if (ep->epnum) { | 2272 | } else if (ep->epnum) { |
2273 | musb_h_tx_flush_fifo(ep); | 2273 | musb_h_tx_flush_fifo(ep); |
2274 | csr = musb_readw(epio, MUSB_TXCSR); | 2274 | csr = musb_readw(epio, MUSB_TXCSR); |
2275 | csr &= ~(MUSB_TXCSR_AUTOSET | 2275 | csr &= ~(MUSB_TXCSR_AUTOSET |
2276 | | MUSB_TXCSR_DMAENAB | 2276 | | MUSB_TXCSR_DMAENAB |
2277 | | MUSB_TXCSR_H_RXSTALL | 2277 | | MUSB_TXCSR_H_RXSTALL |
2278 | | MUSB_TXCSR_H_NAKTIMEOUT | 2278 | | MUSB_TXCSR_H_NAKTIMEOUT |
2279 | | MUSB_TXCSR_H_ERROR | 2279 | | MUSB_TXCSR_H_ERROR |
2280 | | MUSB_TXCSR_TXPKTRDY); | 2280 | | MUSB_TXCSR_TXPKTRDY); |
2281 | musb_writew(epio, MUSB_TXCSR, csr); | 2281 | musb_writew(epio, MUSB_TXCSR, csr); |
2282 | /* REVISIT may need to clear FLUSHFIFO ... */ | 2282 | /* REVISIT may need to clear FLUSHFIFO ... */ |
2283 | musb_writew(epio, MUSB_TXCSR, csr); | 2283 | musb_writew(epio, MUSB_TXCSR, csr); |
2284 | /* flush cpu writebuffer */ | 2284 | /* flush cpu writebuffer */ |
2285 | csr = musb_readw(epio, MUSB_TXCSR); | 2285 | csr = musb_readw(epio, MUSB_TXCSR); |
2286 | } else { | 2286 | } else { |
2287 | musb_h_ep0_flush_fifo(ep); | 2287 | musb_h_ep0_flush_fifo(ep); |
2288 | } | 2288 | } |
2289 | if (status == 0) | 2289 | if (status == 0) |
2290 | musb_advance_schedule(ep->musb, urb, ep, is_in); | 2290 | musb_advance_schedule(ep->musb, urb, ep, is_in); |
2291 | return status; | 2291 | return status; |
2292 | } | 2292 | } |
2293 | 2293 | ||
2294 | static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | 2294 | static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) |
2295 | { | 2295 | { |
2296 | struct musb *musb = hcd_to_musb(hcd); | 2296 | struct musb *musb = hcd_to_musb(hcd); |
2297 | struct musb_qh *qh; | 2297 | struct musb_qh *qh; |
2298 | unsigned long flags; | 2298 | unsigned long flags; |
2299 | int is_in = usb_pipein(urb->pipe); | 2299 | int is_in = usb_pipein(urb->pipe); |
2300 | int ret; | 2300 | int ret; |
2301 | 2301 | ||
2302 | dev_dbg(musb->controller, "urb=%p, dev%d ep%d%s\n", urb, | 2302 | dev_dbg(musb->controller, "urb=%p, dev%d ep%d%s\n", urb, |
2303 | usb_pipedevice(urb->pipe), | 2303 | usb_pipedevice(urb->pipe), |
2304 | usb_pipeendpoint(urb->pipe), | 2304 | usb_pipeendpoint(urb->pipe), |
2305 | is_in ? "in" : "out"); | 2305 | is_in ? "in" : "out"); |
2306 | 2306 | ||
2307 | spin_lock_irqsave(&musb->lock, flags); | 2307 | spin_lock_irqsave(&musb->lock, flags); |
2308 | ret = usb_hcd_check_unlink_urb(hcd, urb, status); | 2308 | ret = usb_hcd_check_unlink_urb(hcd, urb, status); |
2309 | if (ret) | 2309 | if (ret) |
2310 | goto done; | 2310 | goto done; |
2311 | 2311 | ||
2312 | qh = urb->hcpriv; | 2312 | qh = urb->hcpriv; |
2313 | if (!qh) | 2313 | if (!qh) |
2314 | goto done; | 2314 | goto done; |
2315 | 2315 | ||
2316 | /* | 2316 | /* |
2317 | * Any URB not actively programmed into endpoint hardware can be | 2317 | * Any URB not actively programmed into endpoint hardware can be |
2318 | * immediately given back; that's any URB not at the head of an | 2318 | * immediately given back; that's any URB not at the head of an |
2319 | * endpoint queue, unless someday we get real DMA queues. And even | 2319 | * endpoint queue, unless someday we get real DMA queues. And even |
2320 | * if it's at the head, it might not be known to the hardware... | 2320 | * if it's at the head, it might not be known to the hardware... |
2321 | * | 2321 | * |
2322 | * Otherwise abort current transfer, pending DMA, etc.; urb->status | 2322 | * Otherwise abort current transfer, pending DMA, etc.; urb->status |
2323 | * has already been updated. This is a synchronous abort; it'd be | 2323 | * has already been updated. This is a synchronous abort; it'd be |
2324 | * OK to hold off until after some IRQ, though. | 2324 | * OK to hold off until after some IRQ, though. |
2325 | * | 2325 | * |
2326 | * NOTE: qh is invalid unless !list_empty(&hep->urb_list) | 2326 | * NOTE: qh is invalid unless !list_empty(&hep->urb_list) |
2327 | */ | 2327 | */ |
2328 | if (!qh->is_ready | 2328 | if (!qh->is_ready |
2329 | || urb->urb_list.prev != &qh->hep->urb_list | 2329 | || urb->urb_list.prev != &qh->hep->urb_list |
2330 | || musb_ep_get_qh(qh->hw_ep, is_in) != qh) { | 2330 | || musb_ep_get_qh(qh->hw_ep, is_in) != qh) { |
2331 | int ready = qh->is_ready; | 2331 | int ready = qh->is_ready; |
2332 | 2332 | ||
2333 | qh->is_ready = 0; | 2333 | qh->is_ready = 0; |
2334 | musb_giveback(musb, urb, 0); | 2334 | musb_giveback(musb, urb, 0); |
2335 | qh->is_ready = ready; | 2335 | qh->is_ready = ready; |
2336 | 2336 | ||
2337 | /* If nothing else (usually musb_giveback) is using it | 2337 | /* If nothing else (usually musb_giveback) is using it |
2338 | * and its URB list has emptied, recycle this qh. | 2338 | * and its URB list has emptied, recycle this qh. |
2339 | */ | 2339 | */ |
2340 | if (ready && list_empty(&qh->hep->urb_list)) { | 2340 | if (ready && list_empty(&qh->hep->urb_list)) { |
2341 | qh->hep->hcpriv = NULL; | 2341 | qh->hep->hcpriv = NULL; |
2342 | list_del(&qh->ring); | 2342 | list_del(&qh->ring); |
2343 | kfree(qh); | 2343 | kfree(qh); |
2344 | } | 2344 | } |
2345 | } else | 2345 | } else |
2346 | ret = musb_cleanup_urb(urb, qh); | 2346 | ret = musb_cleanup_urb(urb, qh); |
2347 | done: | 2347 | done: |
2348 | spin_unlock_irqrestore(&musb->lock, flags); | 2348 | spin_unlock_irqrestore(&musb->lock, flags); |
2349 | return ret; | 2349 | return ret; |
2350 | } | 2350 | } |
2351 | 2351 | ||
2352 | /* disable an endpoint */ | 2352 | /* disable an endpoint */ |
2353 | static void | 2353 | static void |
2354 | musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep) | 2354 | musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep) |
2355 | { | 2355 | { |
2356 | u8 is_in = hep->desc.bEndpointAddress & USB_DIR_IN; | 2356 | u8 is_in = hep->desc.bEndpointAddress & USB_DIR_IN; |
2357 | unsigned long flags; | 2357 | unsigned long flags; |
2358 | struct musb *musb = hcd_to_musb(hcd); | 2358 | struct musb *musb = hcd_to_musb(hcd); |
2359 | struct musb_qh *qh; | 2359 | struct musb_qh *qh; |
2360 | struct urb *urb; | 2360 | struct urb *urb; |
2361 | 2361 | ||
2362 | spin_lock_irqsave(&musb->lock, flags); | 2362 | spin_lock_irqsave(&musb->lock, flags); |
2363 | 2363 | ||
2364 | qh = hep->hcpriv; | 2364 | qh = hep->hcpriv; |
2365 | if (qh == NULL) | 2365 | if (qh == NULL) |
2366 | goto exit; | 2366 | goto exit; |
2367 | 2367 | ||
2368 | /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */ | 2368 | /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */ |
2369 | 2369 | ||
2370 | /* Kick the first URB off the hardware, if needed */ | 2370 | /* Kick the first URB off the hardware, if needed */ |
2371 | qh->is_ready = 0; | 2371 | qh->is_ready = 0; |
2372 | if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) { | 2372 | if (musb_ep_get_qh(qh->hw_ep, is_in) == qh) { |
2373 | urb = next_urb(qh); | 2373 | urb = next_urb(qh); |
2374 | 2374 | ||
2375 | /* make software (then hardware) stop ASAP */ | 2375 | /* make software (then hardware) stop ASAP */ |
2376 | if (!urb->unlinked) | 2376 | if (!urb->unlinked) |
2377 | urb->status = -ESHUTDOWN; | 2377 | urb->status = -ESHUTDOWN; |
2378 | 2378 | ||
2379 | /* cleanup */ | 2379 | /* cleanup */ |
2380 | musb_cleanup_urb(urb, qh); | 2380 | musb_cleanup_urb(urb, qh); |
2381 | 2381 | ||
2382 | /* Then nuke all the others ... and advance the | 2382 | /* Then nuke all the others ... and advance the |
2383 | * queue on hw_ep (e.g. bulk ring) when we're done. | 2383 | * queue on hw_ep (e.g. bulk ring) when we're done. |
2384 | */ | 2384 | */ |
2385 | while (!list_empty(&hep->urb_list)) { | 2385 | while (!list_empty(&hep->urb_list)) { |
2386 | urb = next_urb(qh); | 2386 | urb = next_urb(qh); |
2387 | urb->status = -ESHUTDOWN; | 2387 | urb->status = -ESHUTDOWN; |
2388 | musb_advance_schedule(musb, urb, qh->hw_ep, is_in); | 2388 | musb_advance_schedule(musb, urb, qh->hw_ep, is_in); |
2389 | } | 2389 | } |
2390 | } else { | 2390 | } else { |
2391 | /* Just empty the queue; the hardware is busy with | 2391 | /* Just empty the queue; the hardware is busy with |
2392 | * other transfers, and since !qh->is_ready nothing | 2392 | * other transfers, and since !qh->is_ready nothing |
2393 | * will activate any of these as it advances. | 2393 | * will activate any of these as it advances. |
2394 | */ | 2394 | */ |
2395 | while (!list_empty(&hep->urb_list)) | 2395 | while (!list_empty(&hep->urb_list)) |
2396 | musb_giveback(musb, next_urb(qh), -ESHUTDOWN); | 2396 | musb_giveback(musb, next_urb(qh), -ESHUTDOWN); |
2397 | 2397 | ||
2398 | hep->hcpriv = NULL; | 2398 | hep->hcpriv = NULL; |
2399 | list_del(&qh->ring); | 2399 | list_del(&qh->ring); |
2400 | kfree(qh); | 2400 | kfree(qh); |
2401 | } | 2401 | } |
2402 | exit: | 2402 | exit: |
2403 | spin_unlock_irqrestore(&musb->lock, flags); | 2403 | spin_unlock_irqrestore(&musb->lock, flags); |
2404 | } | 2404 | } |
2405 | 2405 | ||
2406 | static int musb_h_get_frame_number(struct usb_hcd *hcd) | 2406 | static int musb_h_get_frame_number(struct usb_hcd *hcd) |
2407 | { | 2407 | { |
2408 | struct musb *musb = hcd_to_musb(hcd); | 2408 | struct musb *musb = hcd_to_musb(hcd); |
2409 | 2409 | ||
2410 | return musb_readw(musb->mregs, MUSB_FRAME); | 2410 | return musb_readw(musb->mregs, MUSB_FRAME); |
2411 | } | 2411 | } |
2412 | 2412 | ||
2413 | static int musb_h_start(struct usb_hcd *hcd) | 2413 | static int musb_h_start(struct usb_hcd *hcd) |
2414 | { | 2414 | { |
2415 | struct musb *musb = hcd_to_musb(hcd); | 2415 | struct musb *musb = hcd_to_musb(hcd); |
2416 | 2416 | ||
2417 | /* NOTE: musb_start() is called when the hub driver turns | 2417 | /* NOTE: musb_start() is called when the hub driver turns |
2418 | * on port power, or when (OTG) peripheral starts. | 2418 | * on port power, or when (OTG) peripheral starts. |
2419 | */ | 2419 | */ |
2420 | hcd->state = HC_STATE_RUNNING; | 2420 | hcd->state = HC_STATE_RUNNING; |
2421 | musb->port1_status = 0; | 2421 | musb->port1_status = 0; |
2422 | return 0; | 2422 | return 0; |
2423 | } | 2423 | } |
2424 | 2424 | ||
2425 | static void musb_h_stop(struct usb_hcd *hcd) | 2425 | static void musb_h_stop(struct usb_hcd *hcd) |
2426 | { | 2426 | { |
2427 | musb_stop(hcd_to_musb(hcd)); | 2427 | musb_stop(hcd_to_musb(hcd)); |
2428 | hcd->state = HC_STATE_HALT; | 2428 | hcd->state = HC_STATE_HALT; |
2429 | } | 2429 | } |
2430 | 2430 | ||
2431 | static int musb_bus_suspend(struct usb_hcd *hcd) | 2431 | static int musb_bus_suspend(struct usb_hcd *hcd) |
2432 | { | 2432 | { |
2433 | struct musb *musb = hcd_to_musb(hcd); | 2433 | struct musb *musb = hcd_to_musb(hcd); |
2434 | u8 devctl; | 2434 | u8 devctl; |
2435 | 2435 | ||
2436 | if (!is_host_active(musb)) | 2436 | if (!is_host_active(musb)) |
2437 | return 0; | 2437 | return 0; |
2438 | 2438 | ||
2439 | switch (musb->xceiv->state) { | 2439 | switch (musb->xceiv->state) { |
2440 | case OTG_STATE_A_SUSPEND: | 2440 | case OTG_STATE_A_SUSPEND: |
2441 | return 0; | 2441 | return 0; |
2442 | case OTG_STATE_A_WAIT_VRISE: | 2442 | case OTG_STATE_A_WAIT_VRISE: |
2443 | /* ID could be grounded even if there's no device | 2443 | /* ID could be grounded even if there's no device |
2444 | * on the other end of the cable. NOTE that the | 2444 | * on the other end of the cable. NOTE that the |
2445 | * A_WAIT_VRISE timers are messy with MUSB... | 2445 | * A_WAIT_VRISE timers are messy with MUSB... |
2446 | */ | 2446 | */ |
2447 | devctl = musb_readb(musb->mregs, MUSB_DEVCTL); | 2447 | devctl = musb_readb(musb->mregs, MUSB_DEVCTL); |
2448 | if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) | 2448 | if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) |
2449 | musb->xceiv->state = OTG_STATE_A_WAIT_BCON; | 2449 | musb->xceiv->state = OTG_STATE_A_WAIT_BCON; |
2450 | break; | 2450 | break; |
2451 | default: | 2451 | default: |
2452 | break; | 2452 | break; |
2453 | } | 2453 | } |
2454 | 2454 | ||
2455 | if (musb->is_active) { | 2455 | if (musb->is_active) { |
2456 | WARNING("trying to suspend as %s while active\n", | 2456 | WARNING("trying to suspend as %s while active\n", |
2457 | usb_otg_state_string(musb->xceiv->state)); | 2457 | usb_otg_state_string(musb->xceiv->state)); |
2458 | return -EBUSY; | 2458 | return -EBUSY; |
2459 | } else | 2459 | } else |
2460 | return 0; | 2460 | return 0; |
2461 | } | 2461 | } |
2462 | 2462 | ||
2463 | static int musb_bus_resume(struct usb_hcd *hcd) | 2463 | static int musb_bus_resume(struct usb_hcd *hcd) |
2464 | { | 2464 | { |
2465 | /* resuming child port does the work */ | 2465 | /* resuming child port does the work */ |
2466 | return 0; | 2466 | return 0; |
2467 | } | 2467 | } |
2468 | 2468 | ||
2469 | #ifndef CONFIG_MUSB_PIO_ONLY | 2469 | #ifndef CONFIG_MUSB_PIO_ONLY |
2470 | 2470 | ||
2471 | #define MUSB_USB_DMA_ALIGN 4 | 2471 | #define MUSB_USB_DMA_ALIGN 4 |
2472 | 2472 | ||
2473 | struct musb_temp_buffer { | 2473 | struct musb_temp_buffer { |
2474 | void *kmalloc_ptr; | 2474 | void *kmalloc_ptr; |
2475 | void *old_xfer_buffer; | 2475 | void *old_xfer_buffer; |
2476 | u8 data[0]; | 2476 | u8 data[0]; |
2477 | }; | 2477 | }; |
2478 | 2478 | ||
2479 | static void musb_free_temp_buffer(struct urb *urb) | 2479 | static void musb_free_temp_buffer(struct urb *urb) |
2480 | { | 2480 | { |
2481 | enum dma_data_direction dir; | 2481 | enum dma_data_direction dir; |
2482 | struct musb_temp_buffer *temp; | 2482 | struct musb_temp_buffer *temp; |
2483 | 2483 | ||
2484 | if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER)) | 2484 | if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER)) |
2485 | return; | 2485 | return; |
2486 | 2486 | ||
2487 | dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; | 2487 | dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; |
2488 | 2488 | ||
2489 | temp = container_of(urb->transfer_buffer, struct musb_temp_buffer, | 2489 | temp = container_of(urb->transfer_buffer, struct musb_temp_buffer, |
2490 | data); | 2490 | data); |
2491 | 2491 | ||
2492 | if (dir == DMA_FROM_DEVICE) { | 2492 | if (dir == DMA_FROM_DEVICE) { |
2493 | memcpy(temp->old_xfer_buffer, temp->data, | 2493 | memcpy(temp->old_xfer_buffer, temp->data, |
2494 | urb->transfer_buffer_length); | 2494 | urb->transfer_buffer_length); |
2495 | } | 2495 | } |
2496 | urb->transfer_buffer = temp->old_xfer_buffer; | 2496 | urb->transfer_buffer = temp->old_xfer_buffer; |
2497 | kfree(temp->kmalloc_ptr); | 2497 | kfree(temp->kmalloc_ptr); |
2498 | 2498 | ||
2499 | urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER; | 2499 | urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER; |
2500 | } | 2500 | } |
2501 | 2501 | ||
2502 | static int musb_alloc_temp_buffer(struct urb *urb, gfp_t mem_flags) | 2502 | static int musb_alloc_temp_buffer(struct urb *urb, gfp_t mem_flags) |
2503 | { | 2503 | { |
2504 | enum dma_data_direction dir; | 2504 | enum dma_data_direction dir; |
2505 | struct musb_temp_buffer *temp; | 2505 | struct musb_temp_buffer *temp; |
2506 | void *kmalloc_ptr; | 2506 | void *kmalloc_ptr; |
2507 | size_t kmalloc_size; | 2507 | size_t kmalloc_size; |
2508 | 2508 | ||
2509 | if (urb->num_sgs || urb->sg || | 2509 | if (urb->num_sgs || urb->sg || |
2510 | urb->transfer_buffer_length == 0 || | 2510 | urb->transfer_buffer_length == 0 || |
2511 | !((uintptr_t)urb->transfer_buffer & (MUSB_USB_DMA_ALIGN - 1))) | 2511 | !((uintptr_t)urb->transfer_buffer & (MUSB_USB_DMA_ALIGN - 1))) |
2512 | return 0; | 2512 | return 0; |
2513 | 2513 | ||
2514 | dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; | 2514 | dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; |
2515 | 2515 | ||
2516 | /* Allocate a buffer with enough padding for alignment */ | 2516 | /* Allocate a buffer with enough padding for alignment */ |
2517 | kmalloc_size = urb->transfer_buffer_length + | 2517 | kmalloc_size = urb->transfer_buffer_length + |
2518 | sizeof(struct musb_temp_buffer) + MUSB_USB_DMA_ALIGN - 1; | 2518 | sizeof(struct musb_temp_buffer) + MUSB_USB_DMA_ALIGN - 1; |
2519 | 2519 | ||
2520 | kmalloc_ptr = kmalloc(kmalloc_size, mem_flags); | 2520 | kmalloc_ptr = kmalloc(kmalloc_size, mem_flags); |
2521 | if (!kmalloc_ptr) | 2521 | if (!kmalloc_ptr) |
2522 | return -ENOMEM; | 2522 | return -ENOMEM; |
2523 | 2523 | ||
2524 | /* Position our struct temp_buffer such that data is aligned */ | 2524 | /* Position our struct temp_buffer such that data is aligned */ |
2525 | temp = PTR_ALIGN(kmalloc_ptr, MUSB_USB_DMA_ALIGN); | 2525 | temp = PTR_ALIGN(kmalloc_ptr, MUSB_USB_DMA_ALIGN); |
2526 | 2526 | ||
2527 | 2527 | ||
2528 | temp->kmalloc_ptr = kmalloc_ptr; | 2528 | temp->kmalloc_ptr = kmalloc_ptr; |
2529 | temp->old_xfer_buffer = urb->transfer_buffer; | 2529 | temp->old_xfer_buffer = urb->transfer_buffer; |
2530 | if (dir == DMA_TO_DEVICE) | 2530 | if (dir == DMA_TO_DEVICE) |
2531 | memcpy(temp->data, urb->transfer_buffer, | 2531 | memcpy(temp->data, urb->transfer_buffer, |
2532 | urb->transfer_buffer_length); | 2532 | urb->transfer_buffer_length); |
2533 | urb->transfer_buffer = temp->data; | 2533 | urb->transfer_buffer = temp->data; |
2534 | 2534 | ||
2535 | urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER; | 2535 | urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER; |
2536 | 2536 | ||
2537 | return 0; | 2537 | return 0; |
2538 | } | 2538 | } |
2539 | 2539 | ||
2540 | static int musb_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, | 2540 | static int musb_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, |
2541 | gfp_t mem_flags) | 2541 | gfp_t mem_flags) |
2542 | { | 2542 | { |
2543 | struct musb *musb = hcd_to_musb(hcd); | 2543 | struct musb *musb = hcd_to_musb(hcd); |
2544 | int ret; | 2544 | int ret; |
2545 | 2545 | ||
2546 | /* | 2546 | /* |
2547 | * The DMA engine in RTL1.8 and above cannot handle | 2547 | * The DMA engine in RTL1.8 and above cannot handle |
2548 | * DMA addresses that are not aligned to a 4 byte boundary. | 2548 | * DMA addresses that are not aligned to a 4 byte boundary. |
2549 | * For such engine implemented (un)map_urb_for_dma hooks. | 2549 | * For such engine implemented (un)map_urb_for_dma hooks. |
2550 | * Do not use these hooks for RTL<1.8 | 2550 | * Do not use these hooks for RTL<1.8 |
2551 | */ | 2551 | */ |
2552 | if (musb->hwvers < MUSB_HWVERS_1800) | 2552 | if (musb->hwvers < MUSB_HWVERS_1800) |
2553 | return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags); | 2553 | return usb_hcd_map_urb_for_dma(hcd, urb, mem_flags); |
2554 | 2554 | ||
2555 | ret = musb_alloc_temp_buffer(urb, mem_flags); | 2555 | ret = musb_alloc_temp_buffer(urb, mem_flags); |
2556 | if (ret) | 2556 | if (ret) |
2557 | return ret; | 2557 | return ret; |
2558 | 2558 | ||
2559 | ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags); | 2559 | ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags); |
2560 | if (ret) | 2560 | if (ret) |
2561 | musb_free_temp_buffer(urb); | 2561 | musb_free_temp_buffer(urb); |
2562 | 2562 | ||
2563 | return ret; | 2563 | return ret; |
2564 | } | 2564 | } |
2565 | 2565 | ||
2566 | static void musb_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb) | 2566 | static void musb_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb) |
2567 | { | 2567 | { |
2568 | struct musb *musb = hcd_to_musb(hcd); | 2568 | struct musb *musb = hcd_to_musb(hcd); |
2569 | 2569 | ||
2570 | usb_hcd_unmap_urb_for_dma(hcd, urb); | 2570 | usb_hcd_unmap_urb_for_dma(hcd, urb); |
2571 | 2571 | ||
2572 | /* Do not use this hook for RTL<1.8 (see description above) */ | 2572 | /* Do not use this hook for RTL<1.8 (see description above) */ |
2573 | if (musb->hwvers < MUSB_HWVERS_1800) | 2573 | if (musb->hwvers < MUSB_HWVERS_1800) |
2574 | return; | 2574 | return; |
2575 | 2575 | ||
2576 | musb_free_temp_buffer(urb); | 2576 | musb_free_temp_buffer(urb); |
2577 | } | 2577 | } |
2578 | #endif /* !CONFIG_MUSB_PIO_ONLY */ | 2578 | #endif /* !CONFIG_MUSB_PIO_ONLY */ |
2579 | 2579 | ||
2580 | static const struct hc_driver musb_hc_driver = { | 2580 | static const struct hc_driver musb_hc_driver = { |
2581 | .description = "musb-hcd", | 2581 | .description = "musb-hcd", |
2582 | .product_desc = "MUSB HDRC host driver", | 2582 | .product_desc = "MUSB HDRC host driver", |
2583 | .hcd_priv_size = sizeof(struct musb *), | 2583 | .hcd_priv_size = sizeof(struct musb *), |
2584 | .flags = HCD_USB2 | HCD_MEMORY, | 2584 | .flags = HCD_USB2 | HCD_MEMORY, |
2585 | 2585 | ||
2586 | /* not using irq handler or reset hooks from usbcore, since | 2586 | /* not using irq handler or reset hooks from usbcore, since |
2587 | * those must be shared with peripheral code for OTG configs | 2587 | * those must be shared with peripheral code for OTG configs |
2588 | */ | 2588 | */ |
2589 | 2589 | ||
2590 | .start = musb_h_start, | 2590 | .start = musb_h_start, |
2591 | .stop = musb_h_stop, | 2591 | .stop = musb_h_stop, |
2592 | 2592 | ||
2593 | .get_frame_number = musb_h_get_frame_number, | 2593 | .get_frame_number = musb_h_get_frame_number, |
2594 | 2594 | ||
2595 | .urb_enqueue = musb_urb_enqueue, | 2595 | .urb_enqueue = musb_urb_enqueue, |
2596 | .urb_dequeue = musb_urb_dequeue, | 2596 | .urb_dequeue = musb_urb_dequeue, |
2597 | .endpoint_disable = musb_h_disable, | 2597 | .endpoint_disable = musb_h_disable, |
2598 | 2598 | ||
2599 | #ifndef CONFIG_MUSB_PIO_ONLY | 2599 | #ifndef CONFIG_MUSB_PIO_ONLY |
2600 | .map_urb_for_dma = musb_map_urb_for_dma, | 2600 | .map_urb_for_dma = musb_map_urb_for_dma, |
2601 | .unmap_urb_for_dma = musb_unmap_urb_for_dma, | 2601 | .unmap_urb_for_dma = musb_unmap_urb_for_dma, |
2602 | #endif | 2602 | #endif |
2603 | 2603 | ||
2604 | .hub_status_data = musb_hub_status_data, | 2604 | .hub_status_data = musb_hub_status_data, |
2605 | .hub_control = musb_hub_control, | 2605 | .hub_control = musb_hub_control, |
2606 | .bus_suspend = musb_bus_suspend, | 2606 | .bus_suspend = musb_bus_suspend, |
2607 | .bus_resume = musb_bus_resume, | 2607 | .bus_resume = musb_bus_resume, |
2608 | /* .start_port_reset = NULL, */ | 2608 | /* .start_port_reset = NULL, */ |
2609 | /* .hub_irq_enable = NULL, */ | 2609 | /* .hub_irq_enable = NULL, */ |
2610 | }; | 2610 | }; |
2611 | 2611 | ||
2612 | int musb_host_alloc(struct musb *musb) | 2612 | int musb_host_alloc(struct musb *musb) |
2613 | { | 2613 | { |
2614 | struct device *dev = musb->controller; | 2614 | struct device *dev = musb->controller; |
2615 | 2615 | ||
2616 | /* usbcore sets dev->driver_data to hcd, and sometimes uses that... */ | 2616 | /* usbcore sets dev->driver_data to hcd, and sometimes uses that... */ |
2617 | musb->hcd = usb_create_hcd(&musb_hc_driver, dev, dev_name(dev)); | 2617 | musb->hcd = usb_create_hcd(&musb_hc_driver, dev, dev_name(dev)); |
2618 | if (!musb->hcd) | 2618 | if (!musb->hcd) |
2619 | return -EINVAL; | 2619 | return -EINVAL; |
2620 | 2620 | ||
2621 | *musb->hcd->hcd_priv = (unsigned long) musb; | 2621 | *musb->hcd->hcd_priv = (unsigned long) musb; |
2622 | musb->hcd->self.uses_pio_for_control = 1; | 2622 | musb->hcd->self.uses_pio_for_control = 1; |
2623 | musb->hcd->uses_new_polling = 1; | 2623 | musb->hcd->uses_new_polling = 1; |
2624 | musb->hcd->has_tt = 1; | 2624 | musb->hcd->has_tt = 1; |
2625 | 2625 | ||
2626 | return 0; | 2626 | return 0; |
2627 | } | 2627 | } |
2628 | 2628 | ||
2629 | void musb_host_cleanup(struct musb *musb) | 2629 | void musb_host_cleanup(struct musb *musb) |
2630 | { | 2630 | { |
2631 | if (musb->port_mode == MUSB_PORT_MODE_GADGET) | ||
2632 | return; | ||
2631 | usb_remove_hcd(musb->hcd); | 2633 | usb_remove_hcd(musb->hcd); |
2632 | musb->hcd = NULL; | 2634 | musb->hcd = NULL; |
2633 | } | 2635 | } |
2634 | 2636 | ||
2635 | void musb_host_free(struct musb *musb) | 2637 | void musb_host_free(struct musb *musb) |
2636 | { | 2638 | { |
2637 | usb_put_hcd(musb->hcd); | 2639 | usb_put_hcd(musb->hcd); |
2638 | } | 2640 | } |
2639 | 2641 | ||
2640 | int musb_host_setup(struct musb *musb, int power_budget) | 2642 | int musb_host_setup(struct musb *musb, int power_budget) |
2641 | { | 2643 | { |
2642 | int ret; | 2644 | int ret; |
2643 | struct usb_hcd *hcd = musb->hcd; | 2645 | struct usb_hcd *hcd = musb->hcd; |
2644 | 2646 | ||
2645 | MUSB_HST_MODE(musb); | 2647 | MUSB_HST_MODE(musb); |
2646 | musb->xceiv->otg->default_a = 1; | 2648 | musb->xceiv->otg->default_a = 1; |
2647 | musb->xceiv->state = OTG_STATE_A_IDLE; | 2649 | musb->xceiv->state = OTG_STATE_A_IDLE; |
2648 | 2650 | ||
2649 | otg_set_host(musb->xceiv->otg, &hcd->self); | 2651 | otg_set_host(musb->xceiv->otg, &hcd->self); |
2650 | hcd->self.otg_port = 1; | 2652 | hcd->self.otg_port = 1; |
2651 | musb->xceiv->otg->host = &hcd->self; | 2653 | musb->xceiv->otg->host = &hcd->self; |
2652 | hcd->power_budget = 2 * (power_budget ? : 250); | 2654 | hcd->power_budget = 2 * (power_budget ? : 250); |
2653 | 2655 | ||
2654 | ret = usb_add_hcd(hcd, 0, 0); | 2656 | ret = usb_add_hcd(hcd, 0, 0); |
2655 | if (ret < 0) | 2657 | if (ret < 0) |
2656 | return ret; | 2658 | return ret; |
2657 | 2659 | ||
2658 | return 0; | 2660 | return 0; |
2659 | } | 2661 | } |
2660 | 2662 | ||
2661 | void musb_host_resume_root_hub(struct musb *musb) | 2663 | void musb_host_resume_root_hub(struct musb *musb) |
2662 | { | 2664 | { |
2663 | usb_hcd_resume_root_hub(musb->hcd); | 2665 | usb_hcd_resume_root_hub(musb->hcd); |
2664 | } | 2666 | } |
2665 | 2667 | ||
2666 | void musb_host_poke_root_hub(struct musb *musb) | 2668 | void musb_host_poke_root_hub(struct musb *musb) |
2667 | { | 2669 | { |
2668 | MUSB_HST_MODE(musb); | 2670 | MUSB_HST_MODE(musb); |
2669 | if (musb->hcd->status_urb) | 2671 | if (musb->hcd->status_urb) |
2670 | usb_hcd_poll_rh_status(musb->hcd); | 2672 | usb_hcd_poll_rh_status(musb->hcd); |
2671 | else | 2673 | else |
2672 | usb_hcd_resume_root_hub(musb->hcd); | 2674 | usb_hcd_resume_root_hub(musb->hcd); |
2673 | } | 2675 | } |
2674 | 2676 |