Commit 032ec49f5351e9cb242b1a1c367d14415043ab95

Authored by Felipe Balbi
1 parent 4f3e8d263d

usb: musb: drop useless board_mode usage

we are compiling the driver always with full OTG
capabilities, so that board_mode trick becomes
useless.

Signed-off-by: Felipe Balbi <balbi@ti.com>

Showing 11 changed files with 159 additions and 334 deletions Inline Diff

drivers/usb/musb/am35x.c
1 /* 1 /*
2 * Texas Instruments AM35x "glue layer" 2 * Texas Instruments AM35x "glue layer"
3 * 3 *
4 * Copyright (c) 2010, by Texas Instruments 4 * Copyright (c) 2010, by Texas Instruments
5 * 5 *
6 * Based on the DA8xx "glue layer" code. 6 * Based on the DA8xx "glue layer" code.
7 * Copyright (c) 2008-2009, MontaVista Software, Inc. <source@mvista.com> 7 * Copyright (c) 2008-2009, MontaVista Software, Inc. <source@mvista.com>
8 * 8 *
9 * This file is part of the Inventra Controller Driver for Linux. 9 * This file is part of the Inventra Controller Driver for Linux.
10 * 10 *
11 * The Inventra Controller Driver for Linux is free software; you 11 * The Inventra Controller Driver for Linux is free software; you
12 * can redistribute it and/or modify it under the terms of the GNU 12 * can redistribute it and/or modify it under the terms of the GNU
13 * General Public License version 2 as published by the Free Software 13 * General Public License version 2 as published by the Free Software
14 * Foundation. 14 * Foundation.
15 * 15 *
16 * The Inventra Controller Driver for Linux is distributed in 16 * The Inventra Controller Driver for Linux is distributed in
17 * the hope that it will be useful, but WITHOUT ANY WARRANTY; 17 * the hope that it will be useful, but WITHOUT ANY WARRANTY;
18 * without even the implied warranty of MERCHANTABILITY or 18 * without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public 19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
20 * License for more details. 20 * License for more details.
21 * 21 *
22 * You should have received a copy of the GNU General Public License 22 * You should have received a copy of the GNU General Public License
23 * along with The Inventra Controller Driver for Linux ; if not, 23 * along with The Inventra Controller Driver for Linux ; if not,
24 * write to the Free Software Foundation, Inc., 59 Temple Place, 24 * write to the Free Software Foundation, Inc., 59 Temple Place,
25 * Suite 330, Boston, MA 02111-1307 USA 25 * Suite 330, Boston, MA 02111-1307 USA
26 * 26 *
27 */ 27 */
28 28
29 #include <linux/init.h> 29 #include <linux/init.h>
30 #include <linux/module.h> 30 #include <linux/module.h>
31 #include <linux/clk.h> 31 #include <linux/clk.h>
32 #include <linux/err.h> 32 #include <linux/err.h>
33 #include <linux/io.h> 33 #include <linux/io.h>
34 #include <linux/platform_device.h> 34 #include <linux/platform_device.h>
35 #include <linux/dma-mapping.h> 35 #include <linux/dma-mapping.h>
36 36
37 #include <plat/usb.h> 37 #include <plat/usb.h>
38 38
39 #include "musb_core.h" 39 #include "musb_core.h"
40 40
41 /* 41 /*
42 * AM35x specific definitions 42 * AM35x specific definitions
43 */ 43 */
44 /* USB 2.0 OTG module registers */ 44 /* USB 2.0 OTG module registers */
45 #define USB_REVISION_REG 0x00 45 #define USB_REVISION_REG 0x00
46 #define USB_CTRL_REG 0x04 46 #define USB_CTRL_REG 0x04
47 #define USB_STAT_REG 0x08 47 #define USB_STAT_REG 0x08
48 #define USB_EMULATION_REG 0x0c 48 #define USB_EMULATION_REG 0x0c
49 /* 0x10 Reserved */ 49 /* 0x10 Reserved */
50 #define USB_AUTOREQ_REG 0x14 50 #define USB_AUTOREQ_REG 0x14
51 #define USB_SRP_FIX_TIME_REG 0x18 51 #define USB_SRP_FIX_TIME_REG 0x18
52 #define USB_TEARDOWN_REG 0x1c 52 #define USB_TEARDOWN_REG 0x1c
53 #define EP_INTR_SRC_REG 0x20 53 #define EP_INTR_SRC_REG 0x20
54 #define EP_INTR_SRC_SET_REG 0x24 54 #define EP_INTR_SRC_SET_REG 0x24
55 #define EP_INTR_SRC_CLEAR_REG 0x28 55 #define EP_INTR_SRC_CLEAR_REG 0x28
56 #define EP_INTR_MASK_REG 0x2c 56 #define EP_INTR_MASK_REG 0x2c
57 #define EP_INTR_MASK_SET_REG 0x30 57 #define EP_INTR_MASK_SET_REG 0x30
58 #define EP_INTR_MASK_CLEAR_REG 0x34 58 #define EP_INTR_MASK_CLEAR_REG 0x34
59 #define EP_INTR_SRC_MASKED_REG 0x38 59 #define EP_INTR_SRC_MASKED_REG 0x38
60 #define CORE_INTR_SRC_REG 0x40 60 #define CORE_INTR_SRC_REG 0x40
61 #define CORE_INTR_SRC_SET_REG 0x44 61 #define CORE_INTR_SRC_SET_REG 0x44
62 #define CORE_INTR_SRC_CLEAR_REG 0x48 62 #define CORE_INTR_SRC_CLEAR_REG 0x48
63 #define CORE_INTR_MASK_REG 0x4c 63 #define CORE_INTR_MASK_REG 0x4c
64 #define CORE_INTR_MASK_SET_REG 0x50 64 #define CORE_INTR_MASK_SET_REG 0x50
65 #define CORE_INTR_MASK_CLEAR_REG 0x54 65 #define CORE_INTR_MASK_CLEAR_REG 0x54
66 #define CORE_INTR_SRC_MASKED_REG 0x58 66 #define CORE_INTR_SRC_MASKED_REG 0x58
67 /* 0x5c Reserved */ 67 /* 0x5c Reserved */
68 #define USB_END_OF_INTR_REG 0x60 68 #define USB_END_OF_INTR_REG 0x60
69 69
70 /* Control register bits */ 70 /* Control register bits */
71 #define AM35X_SOFT_RESET_MASK 1 71 #define AM35X_SOFT_RESET_MASK 1
72 72
73 /* USB interrupt register bits */ 73 /* USB interrupt register bits */
74 #define AM35X_INTR_USB_SHIFT 16 74 #define AM35X_INTR_USB_SHIFT 16
75 #define AM35X_INTR_USB_MASK (0x1ff << AM35X_INTR_USB_SHIFT) 75 #define AM35X_INTR_USB_MASK (0x1ff << AM35X_INTR_USB_SHIFT)
76 #define AM35X_INTR_DRVVBUS 0x100 76 #define AM35X_INTR_DRVVBUS 0x100
77 #define AM35X_INTR_RX_SHIFT 16 77 #define AM35X_INTR_RX_SHIFT 16
78 #define AM35X_INTR_TX_SHIFT 0 78 #define AM35X_INTR_TX_SHIFT 0
79 #define AM35X_TX_EP_MASK 0xffff /* EP0 + 15 Tx EPs */ 79 #define AM35X_TX_EP_MASK 0xffff /* EP0 + 15 Tx EPs */
80 #define AM35X_RX_EP_MASK 0xfffe /* 15 Rx EPs */ 80 #define AM35X_RX_EP_MASK 0xfffe /* 15 Rx EPs */
81 #define AM35X_TX_INTR_MASK (AM35X_TX_EP_MASK << AM35X_INTR_TX_SHIFT) 81 #define AM35X_TX_INTR_MASK (AM35X_TX_EP_MASK << AM35X_INTR_TX_SHIFT)
82 #define AM35X_RX_INTR_MASK (AM35X_RX_EP_MASK << AM35X_INTR_RX_SHIFT) 82 #define AM35X_RX_INTR_MASK (AM35X_RX_EP_MASK << AM35X_INTR_RX_SHIFT)
83 83
84 #define USB_MENTOR_CORE_OFFSET 0x400 84 #define USB_MENTOR_CORE_OFFSET 0x400
85 85
86 struct am35x_glue { 86 struct am35x_glue {
87 struct device *dev; 87 struct device *dev;
88 struct platform_device *musb; 88 struct platform_device *musb;
89 struct clk *phy_clk; 89 struct clk *phy_clk;
90 struct clk *clk; 90 struct clk *clk;
91 }; 91 };
92 #define glue_to_musb(g) platform_get_drvdata(g->musb) 92 #define glue_to_musb(g) platform_get_drvdata(g->musb)
93 93
94 /* 94 /*
95 * am35x_musb_enable - enable interrupts 95 * am35x_musb_enable - enable interrupts
96 */ 96 */
97 static void am35x_musb_enable(struct musb *musb) 97 static void am35x_musb_enable(struct musb *musb)
98 { 98 {
99 void __iomem *reg_base = musb->ctrl_base; 99 void __iomem *reg_base = musb->ctrl_base;
100 u32 epmask; 100 u32 epmask;
101 101
102 /* Workaround: setup IRQs through both register sets. */ 102 /* Workaround: setup IRQs through both register sets. */
103 epmask = ((musb->epmask & AM35X_TX_EP_MASK) << AM35X_INTR_TX_SHIFT) | 103 epmask = ((musb->epmask & AM35X_TX_EP_MASK) << AM35X_INTR_TX_SHIFT) |
104 ((musb->epmask & AM35X_RX_EP_MASK) << AM35X_INTR_RX_SHIFT); 104 ((musb->epmask & AM35X_RX_EP_MASK) << AM35X_INTR_RX_SHIFT);
105 105
106 musb_writel(reg_base, EP_INTR_MASK_SET_REG, epmask); 106 musb_writel(reg_base, EP_INTR_MASK_SET_REG, epmask);
107 musb_writel(reg_base, CORE_INTR_MASK_SET_REG, AM35X_INTR_USB_MASK); 107 musb_writel(reg_base, CORE_INTR_MASK_SET_REG, AM35X_INTR_USB_MASK);
108 108
109 /* Force the DRVVBUS IRQ so we can start polling for ID change. */ 109 /* Force the DRVVBUS IRQ so we can start polling for ID change. */
110 if (is_otg_enabled(musb)) 110 musb_writel(reg_base, CORE_INTR_SRC_SET_REG,
111 musb_writel(reg_base, CORE_INTR_SRC_SET_REG, 111 AM35X_INTR_DRVVBUS << AM35X_INTR_USB_SHIFT);
112 AM35X_INTR_DRVVBUS << AM35X_INTR_USB_SHIFT);
113 } 112 }
114 113
115 /* 114 /*
116 * am35x_musb_disable - disable HDRC and flush interrupts 115 * am35x_musb_disable - disable HDRC and flush interrupts
117 */ 116 */
118 static void am35x_musb_disable(struct musb *musb) 117 static void am35x_musb_disable(struct musb *musb)
119 { 118 {
120 void __iomem *reg_base = musb->ctrl_base; 119 void __iomem *reg_base = musb->ctrl_base;
121 120
122 musb_writel(reg_base, CORE_INTR_MASK_CLEAR_REG, AM35X_INTR_USB_MASK); 121 musb_writel(reg_base, CORE_INTR_MASK_CLEAR_REG, AM35X_INTR_USB_MASK);
123 musb_writel(reg_base, EP_INTR_MASK_CLEAR_REG, 122 musb_writel(reg_base, EP_INTR_MASK_CLEAR_REG,
124 AM35X_TX_INTR_MASK | AM35X_RX_INTR_MASK); 123 AM35X_TX_INTR_MASK | AM35X_RX_INTR_MASK);
125 musb_writeb(musb->mregs, MUSB_DEVCTL, 0); 124 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
126 musb_writel(reg_base, USB_END_OF_INTR_REG, 0); 125 musb_writel(reg_base, USB_END_OF_INTR_REG, 0);
127 } 126 }
128 127
129 #define portstate(stmt) stmt 128 #define portstate(stmt) stmt
130 129
131 static void am35x_musb_set_vbus(struct musb *musb, int is_on) 130 static void am35x_musb_set_vbus(struct musb *musb, int is_on)
132 { 131 {
133 WARN_ON(is_on && is_peripheral_active(musb)); 132 WARN_ON(is_on && is_peripheral_active(musb));
134 } 133 }
135 134
136 #define POLL_SECONDS 2 135 #define POLL_SECONDS 2
137 136
138 static struct timer_list otg_workaround; 137 static struct timer_list otg_workaround;
139 138
140 static void otg_timer(unsigned long _musb) 139 static void otg_timer(unsigned long _musb)
141 { 140 {
142 struct musb *musb = (void *)_musb; 141 struct musb *musb = (void *)_musb;
143 void __iomem *mregs = musb->mregs; 142 void __iomem *mregs = musb->mregs;
144 u8 devctl; 143 u8 devctl;
145 unsigned long flags; 144 unsigned long flags;
146 145
147 /* 146 /*
148 * We poll because AM35x's won't expose several OTG-critical 147 * We poll because AM35x's won't expose several OTG-critical
149 * status change events (from the transceiver) otherwise. 148 * status change events (from the transceiver) otherwise.
150 */ 149 */
151 devctl = musb_readb(mregs, MUSB_DEVCTL); 150 devctl = musb_readb(mregs, MUSB_DEVCTL);
152 dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl, 151 dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl,
153 otg_state_string(musb->xceiv->state)); 152 otg_state_string(musb->xceiv->state));
154 153
155 spin_lock_irqsave(&musb->lock, flags); 154 spin_lock_irqsave(&musb->lock, flags);
156 switch (musb->xceiv->state) { 155 switch (musb->xceiv->state) {
157 case OTG_STATE_A_WAIT_BCON: 156 case OTG_STATE_A_WAIT_BCON:
158 devctl &= ~MUSB_DEVCTL_SESSION; 157 devctl &= ~MUSB_DEVCTL_SESSION;
159 musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); 158 musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
160 159
161 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); 160 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
162 if (devctl & MUSB_DEVCTL_BDEVICE) { 161 if (devctl & MUSB_DEVCTL_BDEVICE) {
163 musb->xceiv->state = OTG_STATE_B_IDLE; 162 musb->xceiv->state = OTG_STATE_B_IDLE;
164 MUSB_DEV_MODE(musb); 163 MUSB_DEV_MODE(musb);
165 } else { 164 } else {
166 musb->xceiv->state = OTG_STATE_A_IDLE; 165 musb->xceiv->state = OTG_STATE_A_IDLE;
167 MUSB_HST_MODE(musb); 166 MUSB_HST_MODE(musb);
168 } 167 }
169 break; 168 break;
170 case OTG_STATE_A_WAIT_VFALL: 169 case OTG_STATE_A_WAIT_VFALL:
171 musb->xceiv->state = OTG_STATE_A_WAIT_VRISE; 170 musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
172 musb_writel(musb->ctrl_base, CORE_INTR_SRC_SET_REG, 171 musb_writel(musb->ctrl_base, CORE_INTR_SRC_SET_REG,
173 MUSB_INTR_VBUSERROR << AM35X_INTR_USB_SHIFT); 172 MUSB_INTR_VBUSERROR << AM35X_INTR_USB_SHIFT);
174 break; 173 break;
175 case OTG_STATE_B_IDLE: 174 case OTG_STATE_B_IDLE:
176 if (!is_peripheral_enabled(musb))
177 break;
178
179 devctl = musb_readb(mregs, MUSB_DEVCTL); 175 devctl = musb_readb(mregs, MUSB_DEVCTL);
180 if (devctl & MUSB_DEVCTL_BDEVICE) 176 if (devctl & MUSB_DEVCTL_BDEVICE)
181 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); 177 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
182 else 178 else
183 musb->xceiv->state = OTG_STATE_A_IDLE; 179 musb->xceiv->state = OTG_STATE_A_IDLE;
184 break; 180 break;
185 default: 181 default:
186 break; 182 break;
187 } 183 }
188 spin_unlock_irqrestore(&musb->lock, flags); 184 spin_unlock_irqrestore(&musb->lock, flags);
189 } 185 }
190 186
191 static void am35x_musb_try_idle(struct musb *musb, unsigned long timeout) 187 static void am35x_musb_try_idle(struct musb *musb, unsigned long timeout)
192 { 188 {
193 static unsigned long last_timer; 189 static unsigned long last_timer;
194 190
195 if (!is_otg_enabled(musb))
196 return;
197
198 if (timeout == 0) 191 if (timeout == 0)
199 timeout = jiffies + msecs_to_jiffies(3); 192 timeout = jiffies + msecs_to_jiffies(3);
200 193
201 /* Never idle if active, or when VBUS timeout is not set as host */ 194 /* Never idle if active, or when VBUS timeout is not set as host */
202 if (musb->is_active || (musb->a_wait_bcon == 0 && 195 if (musb->is_active || (musb->a_wait_bcon == 0 &&
203 musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) { 196 musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) {
204 dev_dbg(musb->controller, "%s active, deleting timer\n", 197 dev_dbg(musb->controller, "%s active, deleting timer\n",
205 otg_state_string(musb->xceiv->state)); 198 otg_state_string(musb->xceiv->state));
206 del_timer(&otg_workaround); 199 del_timer(&otg_workaround);
207 last_timer = jiffies; 200 last_timer = jiffies;
208 return; 201 return;
209 } 202 }
210 203
211 if (time_after(last_timer, timeout) && timer_pending(&otg_workaround)) { 204 if (time_after(last_timer, timeout) && timer_pending(&otg_workaround)) {
212 dev_dbg(musb->controller, "Longer idle timer already pending, ignoring...\n"); 205 dev_dbg(musb->controller, "Longer idle timer already pending, ignoring...\n");
213 return; 206 return;
214 } 207 }
215 last_timer = timeout; 208 last_timer = timeout;
216 209
217 dev_dbg(musb->controller, "%s inactive, starting idle timer for %u ms\n", 210 dev_dbg(musb->controller, "%s inactive, starting idle timer for %u ms\n",
218 otg_state_string(musb->xceiv->state), 211 otg_state_string(musb->xceiv->state),
219 jiffies_to_msecs(timeout - jiffies)); 212 jiffies_to_msecs(timeout - jiffies));
220 mod_timer(&otg_workaround, timeout); 213 mod_timer(&otg_workaround, timeout);
221 } 214 }
222 215
223 static irqreturn_t am35x_musb_interrupt(int irq, void *hci) 216 static irqreturn_t am35x_musb_interrupt(int irq, void *hci)
224 { 217 {
225 struct musb *musb = hci; 218 struct musb *musb = hci;
226 void __iomem *reg_base = musb->ctrl_base; 219 void __iomem *reg_base = musb->ctrl_base;
227 struct device *dev = musb->controller; 220 struct device *dev = musb->controller;
228 struct musb_hdrc_platform_data *plat = dev->platform_data; 221 struct musb_hdrc_platform_data *plat = dev->platform_data;
229 struct omap_musb_board_data *data = plat->board_data; 222 struct omap_musb_board_data *data = plat->board_data;
230 struct usb_otg *otg = musb->xceiv->otg; 223 struct usb_otg *otg = musb->xceiv->otg;
231 unsigned long flags; 224 unsigned long flags;
232 irqreturn_t ret = IRQ_NONE; 225 irqreturn_t ret = IRQ_NONE;
233 u32 epintr, usbintr; 226 u32 epintr, usbintr;
234 227
235 spin_lock_irqsave(&musb->lock, flags); 228 spin_lock_irqsave(&musb->lock, flags);
236 229
237 /* Get endpoint interrupts */ 230 /* Get endpoint interrupts */
238 epintr = musb_readl(reg_base, EP_INTR_SRC_MASKED_REG); 231 epintr = musb_readl(reg_base, EP_INTR_SRC_MASKED_REG);
239 232
240 if (epintr) { 233 if (epintr) {
241 musb_writel(reg_base, EP_INTR_SRC_CLEAR_REG, epintr); 234 musb_writel(reg_base, EP_INTR_SRC_CLEAR_REG, epintr);
242 235
243 musb->int_rx = 236 musb->int_rx =
244 (epintr & AM35X_RX_INTR_MASK) >> AM35X_INTR_RX_SHIFT; 237 (epintr & AM35X_RX_INTR_MASK) >> AM35X_INTR_RX_SHIFT;
245 musb->int_tx = 238 musb->int_tx =
246 (epintr & AM35X_TX_INTR_MASK) >> AM35X_INTR_TX_SHIFT; 239 (epintr & AM35X_TX_INTR_MASK) >> AM35X_INTR_TX_SHIFT;
247 } 240 }
248 241
249 /* Get usb core interrupts */ 242 /* Get usb core interrupts */
250 usbintr = musb_readl(reg_base, CORE_INTR_SRC_MASKED_REG); 243 usbintr = musb_readl(reg_base, CORE_INTR_SRC_MASKED_REG);
251 if (!usbintr && !epintr) 244 if (!usbintr && !epintr)
252 goto eoi; 245 goto eoi;
253 246
254 if (usbintr) { 247 if (usbintr) {
255 musb_writel(reg_base, CORE_INTR_SRC_CLEAR_REG, usbintr); 248 musb_writel(reg_base, CORE_INTR_SRC_CLEAR_REG, usbintr);
256 249
257 musb->int_usb = 250 musb->int_usb =
258 (usbintr & AM35X_INTR_USB_MASK) >> AM35X_INTR_USB_SHIFT; 251 (usbintr & AM35X_INTR_USB_MASK) >> AM35X_INTR_USB_SHIFT;
259 } 252 }
260 /* 253 /*
261 * DRVVBUS IRQs are the only proxy we have (a very poor one!) for 254 * DRVVBUS IRQs are the only proxy we have (a very poor one!) for
262 * AM35x's missing ID change IRQ. We need an ID change IRQ to 255 * AM35x's missing ID change IRQ. We need an ID change IRQ to
263 * switch appropriately between halves of the OTG state machine. 256 * switch appropriately between halves of the OTG state machine.
264 * Managing DEVCTL.SESSION per Mentor docs requires that we know its 257 * Managing DEVCTL.SESSION per Mentor docs requires that we know its
265 * value but DEVCTL.BDEVICE is invalid without DEVCTL.SESSION set. 258 * value but DEVCTL.BDEVICE is invalid without DEVCTL.SESSION set.
266 * Also, DRVVBUS pulses for SRP (but not at 5V) ... 259 * Also, DRVVBUS pulses for SRP (but not at 5V) ...
267 */ 260 */
268 if (usbintr & (AM35X_INTR_DRVVBUS << AM35X_INTR_USB_SHIFT)) { 261 if (usbintr & (AM35X_INTR_DRVVBUS << AM35X_INTR_USB_SHIFT)) {
269 int drvvbus = musb_readl(reg_base, USB_STAT_REG); 262 int drvvbus = musb_readl(reg_base, USB_STAT_REG);
270 void __iomem *mregs = musb->mregs; 263 void __iomem *mregs = musb->mregs;
271 u8 devctl = musb_readb(mregs, MUSB_DEVCTL); 264 u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
272 int err; 265 int err;
273 266
274 err = is_host_enabled(musb) && (musb->int_usb & 267 err = musb->int_usb & MUSB_INTR_VBUSERROR;
275 MUSB_INTR_VBUSERROR);
276 if (err) { 268 if (err) {
277 /* 269 /*
278 * The Mentor core doesn't debounce VBUS as needed 270 * The Mentor core doesn't debounce VBUS as needed
279 * to cope with device connect current spikes. This 271 * to cope with device connect current spikes. This
280 * means it's not uncommon for bus-powered devices 272 * means it's not uncommon for bus-powered devices
281 * to get VBUS errors during enumeration. 273 * to get VBUS errors during enumeration.
282 * 274 *
283 * This is a workaround, but newer RTL from Mentor 275 * This is a workaround, but newer RTL from Mentor
284 * seems to allow a better one: "re"-starting sessions 276 * seems to allow a better one: "re"-starting sessions
285 * without waiting for VBUS to stop registering in 277 * without waiting for VBUS to stop registering in
286 * devctl. 278 * devctl.
287 */ 279 */
288 musb->int_usb &= ~MUSB_INTR_VBUSERROR; 280 musb->int_usb &= ~MUSB_INTR_VBUSERROR;
289 musb->xceiv->state = OTG_STATE_A_WAIT_VFALL; 281 musb->xceiv->state = OTG_STATE_A_WAIT_VFALL;
290 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); 282 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
291 WARNING("VBUS error workaround (delay coming)\n"); 283 WARNING("VBUS error workaround (delay coming)\n");
292 } else if (is_host_enabled(musb) && drvvbus) { 284 } else if (drvvbus) {
293 MUSB_HST_MODE(musb); 285 MUSB_HST_MODE(musb);
294 otg->default_a = 1; 286 otg->default_a = 1;
295 musb->xceiv->state = OTG_STATE_A_WAIT_VRISE; 287 musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
296 portstate(musb->port1_status |= USB_PORT_STAT_POWER); 288 portstate(musb->port1_status |= USB_PORT_STAT_POWER);
297 del_timer(&otg_workaround); 289 del_timer(&otg_workaround);
298 } else { 290 } else {
299 musb->is_active = 0; 291 musb->is_active = 0;
300 MUSB_DEV_MODE(musb); 292 MUSB_DEV_MODE(musb);
301 otg->default_a = 0; 293 otg->default_a = 0;
302 musb->xceiv->state = OTG_STATE_B_IDLE; 294 musb->xceiv->state = OTG_STATE_B_IDLE;
303 portstate(musb->port1_status &= ~USB_PORT_STAT_POWER); 295 portstate(musb->port1_status &= ~USB_PORT_STAT_POWER);
304 } 296 }
305 297
306 /* NOTE: this must complete power-on within 100 ms. */ 298 /* NOTE: this must complete power-on within 100 ms. */
307 dev_dbg(musb->controller, "VBUS %s (%s)%s, devctl %02x\n", 299 dev_dbg(musb->controller, "VBUS %s (%s)%s, devctl %02x\n",
308 drvvbus ? "on" : "off", 300 drvvbus ? "on" : "off",
309 otg_state_string(musb->xceiv->state), 301 otg_state_string(musb->xceiv->state),
310 err ? " ERROR" : "", 302 err ? " ERROR" : "",
311 devctl); 303 devctl);
312 ret = IRQ_HANDLED; 304 ret = IRQ_HANDLED;
313 } 305 }
314 306
315 if (musb->int_tx || musb->int_rx || musb->int_usb) 307 if (musb->int_tx || musb->int_rx || musb->int_usb)
316 ret |= musb_interrupt(musb); 308 ret |= musb_interrupt(musb);
317 309
318 eoi: 310 eoi:
319 /* EOI needs to be written for the IRQ to be re-asserted. */ 311 /* EOI needs to be written for the IRQ to be re-asserted. */
320 if (ret == IRQ_HANDLED || epintr || usbintr) { 312 if (ret == IRQ_HANDLED || epintr || usbintr) {
321 /* clear level interrupt */ 313 /* clear level interrupt */
322 if (data->clear_irq) 314 if (data->clear_irq)
323 data->clear_irq(); 315 data->clear_irq();
324 /* write EOI */ 316 /* write EOI */
325 musb_writel(reg_base, USB_END_OF_INTR_REG, 0); 317 musb_writel(reg_base, USB_END_OF_INTR_REG, 0);
326 } 318 }
327 319
328 /* Poll for ID change */ 320 /* Poll for ID change */
329 if (is_otg_enabled(musb) && musb->xceiv->state == OTG_STATE_B_IDLE) 321 if (musb->xceiv->state == OTG_STATE_B_IDLE)
330 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); 322 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
331 323
332 spin_unlock_irqrestore(&musb->lock, flags); 324 spin_unlock_irqrestore(&musb->lock, flags);
333 325
334 return ret; 326 return ret;
335 } 327 }
336 328
337 static int am35x_musb_set_mode(struct musb *musb, u8 musb_mode) 329 static int am35x_musb_set_mode(struct musb *musb, u8 musb_mode)
338 { 330 {
339 struct device *dev = musb->controller; 331 struct device *dev = musb->controller;
340 struct musb_hdrc_platform_data *plat = dev->platform_data; 332 struct musb_hdrc_platform_data *plat = dev->platform_data;
341 struct omap_musb_board_data *data = plat->board_data; 333 struct omap_musb_board_data *data = plat->board_data;
342 int retval = 0; 334 int retval = 0;
343 335
344 if (data->set_mode) 336 if (data->set_mode)
345 data->set_mode(musb_mode); 337 data->set_mode(musb_mode);
346 else 338 else
347 retval = -EIO; 339 retval = -EIO;
348 340
349 return retval; 341 return retval;
350 } 342 }
351 343
352 static int am35x_musb_init(struct musb *musb) 344 static int am35x_musb_init(struct musb *musb)
353 { 345 {
354 struct device *dev = musb->controller; 346 struct device *dev = musb->controller;
355 struct musb_hdrc_platform_data *plat = dev->platform_data; 347 struct musb_hdrc_platform_data *plat = dev->platform_data;
356 struct omap_musb_board_data *data = plat->board_data; 348 struct omap_musb_board_data *data = plat->board_data;
357 void __iomem *reg_base = musb->ctrl_base; 349 void __iomem *reg_base = musb->ctrl_base;
358 u32 rev; 350 u32 rev;
359 351
360 musb->mregs += USB_MENTOR_CORE_OFFSET; 352 musb->mregs += USB_MENTOR_CORE_OFFSET;
361 353
362 /* Returns zero if e.g. not clocked */ 354 /* Returns zero if e.g. not clocked */
363 rev = musb_readl(reg_base, USB_REVISION_REG); 355 rev = musb_readl(reg_base, USB_REVISION_REG);
364 if (!rev) 356 if (!rev)
365 return -ENODEV; 357 return -ENODEV;
366 358
367 usb_nop_xceiv_register(); 359 usb_nop_xceiv_register();
368 musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2); 360 musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
369 if (IS_ERR_OR_NULL(musb->xceiv)) 361 if (IS_ERR_OR_NULL(musb->xceiv))
370 return -ENODEV; 362 return -ENODEV;
371 363
372 if (is_host_enabled(musb)) 364 setup_timer(&otg_workaround, otg_timer, (unsigned long) musb);
373 setup_timer(&otg_workaround, otg_timer, (unsigned long) musb);
374 365
375 /* Reset the musb */ 366 /* Reset the musb */
376 if (data->reset) 367 if (data->reset)
377 data->reset(); 368 data->reset();
378 369
379 /* Reset the controller */ 370 /* Reset the controller */
380 musb_writel(reg_base, USB_CTRL_REG, AM35X_SOFT_RESET_MASK); 371 musb_writel(reg_base, USB_CTRL_REG, AM35X_SOFT_RESET_MASK);
381 372
382 /* Start the on-chip PHY and its PLL. */ 373 /* Start the on-chip PHY and its PLL. */
383 if (data->set_phy_power) 374 if (data->set_phy_power)
384 data->set_phy_power(1); 375 data->set_phy_power(1);
385 376
386 msleep(5); 377 msleep(5);
387 378
388 musb->isr = am35x_musb_interrupt; 379 musb->isr = am35x_musb_interrupt;
389 380
390 /* clear level interrupt */ 381 /* clear level interrupt */
391 if (data->clear_irq) 382 if (data->clear_irq)
392 data->clear_irq(); 383 data->clear_irq();
393 384
394 return 0; 385 return 0;
395 } 386 }
396 387
397 static int am35x_musb_exit(struct musb *musb) 388 static int am35x_musb_exit(struct musb *musb)
398 { 389 {
399 struct device *dev = musb->controller; 390 struct device *dev = musb->controller;
400 struct musb_hdrc_platform_data *plat = dev->platform_data; 391 struct musb_hdrc_platform_data *plat = dev->platform_data;
401 struct omap_musb_board_data *data = plat->board_data; 392 struct omap_musb_board_data *data = plat->board_data;
402 393
403 if (is_host_enabled(musb)) 394 del_timer_sync(&otg_workaround);
404 del_timer_sync(&otg_workaround);
405 395
406 /* Shutdown the on-chip PHY and its PLL. */ 396 /* Shutdown the on-chip PHY and its PLL. */
407 if (data->set_phy_power) 397 if (data->set_phy_power)
408 data->set_phy_power(0); 398 data->set_phy_power(0);
409 399
410 usb_put_phy(musb->xceiv); 400 usb_put_phy(musb->xceiv);
411 usb_nop_xceiv_unregister(); 401 usb_nop_xceiv_unregister();
412 402
413 return 0; 403 return 0;
414 } 404 }
415 405
416 /* AM35x supports only 32bit read operation */ 406 /* AM35x supports only 32bit read operation */
417 void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst) 407 void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
418 { 408 {
419 void __iomem *fifo = hw_ep->fifo; 409 void __iomem *fifo = hw_ep->fifo;
420 u32 val; 410 u32 val;
421 int i; 411 int i;
422 412
423 /* Read for 32bit-aligned destination address */ 413 /* Read for 32bit-aligned destination address */
424 if (likely((0x03 & (unsigned long) dst) == 0) && len >= 4) { 414 if (likely((0x03 & (unsigned long) dst) == 0) && len >= 4) {
425 readsl(fifo, dst, len >> 2); 415 readsl(fifo, dst, len >> 2);
426 dst += len & ~0x03; 416 dst += len & ~0x03;
427 len &= 0x03; 417 len &= 0x03;
428 } 418 }
429 /* 419 /*
430 * Now read the remaining 1 to 3 byte or complete length if 420 * Now read the remaining 1 to 3 byte or complete length if
431 * unaligned address. 421 * unaligned address.
432 */ 422 */
433 if (len > 4) { 423 if (len > 4) {
434 for (i = 0; i < (len >> 2); i++) { 424 for (i = 0; i < (len >> 2); i++) {
435 *(u32 *) dst = musb_readl(fifo, 0); 425 *(u32 *) dst = musb_readl(fifo, 0);
436 dst += 4; 426 dst += 4;
437 } 427 }
438 len &= 0x03; 428 len &= 0x03;
439 } 429 }
440 if (len > 0) { 430 if (len > 0) {
441 val = musb_readl(fifo, 0); 431 val = musb_readl(fifo, 0);
442 memcpy(dst, &val, len); 432 memcpy(dst, &val, len);
443 } 433 }
444 } 434 }
445 435
446 static const struct musb_platform_ops am35x_ops = { 436 static const struct musb_platform_ops am35x_ops = {
447 .init = am35x_musb_init, 437 .init = am35x_musb_init,
448 .exit = am35x_musb_exit, 438 .exit = am35x_musb_exit,
449 439
450 .enable = am35x_musb_enable, 440 .enable = am35x_musb_enable,
451 .disable = am35x_musb_disable, 441 .disable = am35x_musb_disable,
452 442
453 .set_mode = am35x_musb_set_mode, 443 .set_mode = am35x_musb_set_mode,
454 .try_idle = am35x_musb_try_idle, 444 .try_idle = am35x_musb_try_idle,
455 445
456 .set_vbus = am35x_musb_set_vbus, 446 .set_vbus = am35x_musb_set_vbus,
457 }; 447 };
458 448
459 static u64 am35x_dmamask = DMA_BIT_MASK(32); 449 static u64 am35x_dmamask = DMA_BIT_MASK(32);
460 450
461 static int __devinit am35x_probe(struct platform_device *pdev) 451 static int __devinit am35x_probe(struct platform_device *pdev)
462 { 452 {
463 struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data; 453 struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data;
464 struct platform_device *musb; 454 struct platform_device *musb;
465 struct am35x_glue *glue; 455 struct am35x_glue *glue;
466 456
467 struct clk *phy_clk; 457 struct clk *phy_clk;
468 struct clk *clk; 458 struct clk *clk;
469 459
470 int ret = -ENOMEM; 460 int ret = -ENOMEM;
471 461
472 glue = kzalloc(sizeof(*glue), GFP_KERNEL); 462 glue = kzalloc(sizeof(*glue), GFP_KERNEL);
473 if (!glue) { 463 if (!glue) {
474 dev_err(&pdev->dev, "failed to allocate glue context\n"); 464 dev_err(&pdev->dev, "failed to allocate glue context\n");
475 goto err0; 465 goto err0;
476 } 466 }
477 467
478 musb = platform_device_alloc("musb-hdrc", -1); 468 musb = platform_device_alloc("musb-hdrc", -1);
479 if (!musb) { 469 if (!musb) {
480 dev_err(&pdev->dev, "failed to allocate musb device\n"); 470 dev_err(&pdev->dev, "failed to allocate musb device\n");
481 goto err1; 471 goto err1;
482 } 472 }
483 473
484 phy_clk = clk_get(&pdev->dev, "fck"); 474 phy_clk = clk_get(&pdev->dev, "fck");
485 if (IS_ERR(phy_clk)) { 475 if (IS_ERR(phy_clk)) {
486 dev_err(&pdev->dev, "failed to get PHY clock\n"); 476 dev_err(&pdev->dev, "failed to get PHY clock\n");
487 ret = PTR_ERR(phy_clk); 477 ret = PTR_ERR(phy_clk);
488 goto err2; 478 goto err2;
489 } 479 }
490 480
491 clk = clk_get(&pdev->dev, "ick"); 481 clk = clk_get(&pdev->dev, "ick");
492 if (IS_ERR(clk)) { 482 if (IS_ERR(clk)) {
493 dev_err(&pdev->dev, "failed to get clock\n"); 483 dev_err(&pdev->dev, "failed to get clock\n");
494 ret = PTR_ERR(clk); 484 ret = PTR_ERR(clk);
495 goto err3; 485 goto err3;
496 } 486 }
497 487
498 ret = clk_enable(phy_clk); 488 ret = clk_enable(phy_clk);
499 if (ret) { 489 if (ret) {
500 dev_err(&pdev->dev, "failed to enable PHY clock\n"); 490 dev_err(&pdev->dev, "failed to enable PHY clock\n");
501 goto err4; 491 goto err4;
502 } 492 }
503 493
504 ret = clk_enable(clk); 494 ret = clk_enable(clk);
505 if (ret) { 495 if (ret) {
506 dev_err(&pdev->dev, "failed to enable clock\n"); 496 dev_err(&pdev->dev, "failed to enable clock\n");
507 goto err5; 497 goto err5;
508 } 498 }
509 499
510 musb->dev.parent = &pdev->dev; 500 musb->dev.parent = &pdev->dev;
511 musb->dev.dma_mask = &am35x_dmamask; 501 musb->dev.dma_mask = &am35x_dmamask;
512 musb->dev.coherent_dma_mask = am35x_dmamask; 502 musb->dev.coherent_dma_mask = am35x_dmamask;
513 503
514 glue->dev = &pdev->dev; 504 glue->dev = &pdev->dev;
515 glue->musb = musb; 505 glue->musb = musb;
516 glue->phy_clk = phy_clk; 506 glue->phy_clk = phy_clk;
517 glue->clk = clk; 507 glue->clk = clk;
518 508
519 pdata->platform_ops = &am35x_ops; 509 pdata->platform_ops = &am35x_ops;
520 510
521 platform_set_drvdata(pdev, glue); 511 platform_set_drvdata(pdev, glue);
522 512
523 ret = platform_device_add_resources(musb, pdev->resource, 513 ret = platform_device_add_resources(musb, pdev->resource,
524 pdev->num_resources); 514 pdev->num_resources);
525 if (ret) { 515 if (ret) {
526 dev_err(&pdev->dev, "failed to add resources\n"); 516 dev_err(&pdev->dev, "failed to add resources\n");
527 goto err6; 517 goto err6;
528 } 518 }
529 519
530 ret = platform_device_add_data(musb, pdata, sizeof(*pdata)); 520 ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
531 if (ret) { 521 if (ret) {
532 dev_err(&pdev->dev, "failed to add platform_data\n"); 522 dev_err(&pdev->dev, "failed to add platform_data\n");
533 goto err6; 523 goto err6;
534 } 524 }
535 525
536 ret = platform_device_add(musb); 526 ret = platform_device_add(musb);
537 if (ret) { 527 if (ret) {
538 dev_err(&pdev->dev, "failed to register musb device\n"); 528 dev_err(&pdev->dev, "failed to register musb device\n");
539 goto err6; 529 goto err6;
540 } 530 }
541 531
542 return 0; 532 return 0;
543 533
544 err6: 534 err6:
545 clk_disable(clk); 535 clk_disable(clk);
546 536
547 err5: 537 err5:
548 clk_disable(phy_clk); 538 clk_disable(phy_clk);
549 539
550 err4: 540 err4:
551 clk_put(clk); 541 clk_put(clk);
552 542
553 err3: 543 err3:
554 clk_put(phy_clk); 544 clk_put(phy_clk);
555 545
556 err2: 546 err2:
557 platform_device_put(musb); 547 platform_device_put(musb);
558 548
559 err1: 549 err1:
560 kfree(glue); 550 kfree(glue);
561 551
562 err0: 552 err0:
563 return ret; 553 return ret;
564 } 554 }
565 555
566 static int __devexit am35x_remove(struct platform_device *pdev) 556 static int __devexit am35x_remove(struct platform_device *pdev)
567 { 557 {
568 struct am35x_glue *glue = platform_get_drvdata(pdev); 558 struct am35x_glue *glue = platform_get_drvdata(pdev);
569 559
570 platform_device_del(glue->musb); 560 platform_device_del(glue->musb);
571 platform_device_put(glue->musb); 561 platform_device_put(glue->musb);
572 clk_disable(glue->clk); 562 clk_disable(glue->clk);
573 clk_disable(glue->phy_clk); 563 clk_disable(glue->phy_clk);
574 clk_put(glue->clk); 564 clk_put(glue->clk);
575 clk_put(glue->phy_clk); 565 clk_put(glue->phy_clk);
576 kfree(glue); 566 kfree(glue);
577 567
578 return 0; 568 return 0;
579 } 569 }
580 570
581 #ifdef CONFIG_PM 571 #ifdef CONFIG_PM
582 static int am35x_suspend(struct device *dev) 572 static int am35x_suspend(struct device *dev)
583 { 573 {
584 struct am35x_glue *glue = dev_get_drvdata(dev); 574 struct am35x_glue *glue = dev_get_drvdata(dev);
585 struct musb_hdrc_platform_data *plat = dev->platform_data; 575 struct musb_hdrc_platform_data *plat = dev->platform_data;
586 struct omap_musb_board_data *data = plat->board_data; 576 struct omap_musb_board_data *data = plat->board_data;
587 577
588 /* Shutdown the on-chip PHY and its PLL. */ 578 /* Shutdown the on-chip PHY and its PLL. */
589 if (data->set_phy_power) 579 if (data->set_phy_power)
590 data->set_phy_power(0); 580 data->set_phy_power(0);
591 581
592 clk_disable(glue->phy_clk); 582 clk_disable(glue->phy_clk);
593 clk_disable(glue->clk); 583 clk_disable(glue->clk);
594 584
595 return 0; 585 return 0;
596 } 586 }
597 587
598 static int am35x_resume(struct device *dev) 588 static int am35x_resume(struct device *dev)
599 { 589 {
600 struct am35x_glue *glue = dev_get_drvdata(dev); 590 struct am35x_glue *glue = dev_get_drvdata(dev);
601 struct musb_hdrc_platform_data *plat = dev->platform_data; 591 struct musb_hdrc_platform_data *plat = dev->platform_data;
602 struct omap_musb_board_data *data = plat->board_data; 592 struct omap_musb_board_data *data = plat->board_data;
603 int ret; 593 int ret;
604 594
605 /* Start the on-chip PHY and its PLL. */ 595 /* Start the on-chip PHY and its PLL. */
606 if (data->set_phy_power) 596 if (data->set_phy_power)
607 data->set_phy_power(1); 597 data->set_phy_power(1);
608 598
609 ret = clk_enable(glue->phy_clk); 599 ret = clk_enable(glue->phy_clk);
610 if (ret) { 600 if (ret) {
611 dev_err(dev, "failed to enable PHY clock\n"); 601 dev_err(dev, "failed to enable PHY clock\n");
612 return ret; 602 return ret;
613 } 603 }
614 604
615 ret = clk_enable(glue->clk); 605 ret = clk_enable(glue->clk);
616 if (ret) { 606 if (ret) {
617 dev_err(dev, "failed to enable clock\n"); 607 dev_err(dev, "failed to enable clock\n");
618 return ret; 608 return ret;
619 } 609 }
620 610
621 return 0; 611 return 0;
622 } 612 }
623 613
624 static struct dev_pm_ops am35x_pm_ops = { 614 static struct dev_pm_ops am35x_pm_ops = {
625 .suspend = am35x_suspend, 615 .suspend = am35x_suspend,
626 .resume = am35x_resume, 616 .resume = am35x_resume,
627 }; 617 };
628 618
629 #define DEV_PM_OPS &am35x_pm_ops 619 #define DEV_PM_OPS &am35x_pm_ops
630 #else 620 #else
631 #define DEV_PM_OPS NULL 621 #define DEV_PM_OPS NULL
632 #endif 622 #endif
633 623
634 static struct platform_driver am35x_driver = { 624 static struct platform_driver am35x_driver = {
635 .probe = am35x_probe, 625 .probe = am35x_probe,
636 .remove = __devexit_p(am35x_remove), 626 .remove = __devexit_p(am35x_remove),
637 .driver = { 627 .driver = {
638 .name = "musb-am35x", 628 .name = "musb-am35x",
639 .pm = DEV_PM_OPS, 629 .pm = DEV_PM_OPS,
640 }, 630 },
641 }; 631 };
642 632
643 MODULE_DESCRIPTION("AM35x MUSB Glue Layer"); 633 MODULE_DESCRIPTION("AM35x MUSB Glue Layer");
644 MODULE_AUTHOR("Ajay Kumar Gupta <ajay.gupta@ti.com>"); 634 MODULE_AUTHOR("Ajay Kumar Gupta <ajay.gupta@ti.com>");
645 MODULE_LICENSE("GPL v2"); 635 MODULE_LICENSE("GPL v2");
646 636
647 static int __init am35x_init(void) 637 static int __init am35x_init(void)
648 { 638 {
649 return platform_driver_register(&am35x_driver); 639 return platform_driver_register(&am35x_driver);
650 } 640 }
651 module_init(am35x_init); 641 module_init(am35x_init);
652 642
653 static void __exit am35x_exit(void) 643 static void __exit am35x_exit(void)
654 { 644 {
655 platform_driver_unregister(&am35x_driver); 645 platform_driver_unregister(&am35x_driver);
656 } 646 }
657 module_exit(am35x_exit); 647 module_exit(am35x_exit);
658 648
drivers/usb/musb/blackfin.c
1 /* 1 /*
2 * MUSB OTG controller driver for Blackfin Processors 2 * MUSB OTG controller driver for Blackfin Processors
3 * 3 *
4 * Copyright 2006-2008 Analog Devices Inc. 4 * Copyright 2006-2008 Analog Devices Inc.
5 * 5 *
6 * Enter bugs at http://blackfin.uclinux.org/ 6 * Enter bugs at http://blackfin.uclinux.org/
7 * 7 *
8 * Licensed under the GPL-2 or later. 8 * Licensed under the GPL-2 or later.
9 */ 9 */
10 10
11 #include <linux/module.h> 11 #include <linux/module.h>
12 #include <linux/kernel.h> 12 #include <linux/kernel.h>
13 #include <linux/sched.h> 13 #include <linux/sched.h>
14 #include <linux/init.h> 14 #include <linux/init.h>
15 #include <linux/list.h> 15 #include <linux/list.h>
16 #include <linux/gpio.h> 16 #include <linux/gpio.h>
17 #include <linux/io.h> 17 #include <linux/io.h>
18 #include <linux/err.h> 18 #include <linux/err.h>
19 #include <linux/platform_device.h> 19 #include <linux/platform_device.h>
20 #include <linux/dma-mapping.h> 20 #include <linux/dma-mapping.h>
21 #include <linux/prefetch.h> 21 #include <linux/prefetch.h>
22 22
23 #include <asm/cacheflush.h> 23 #include <asm/cacheflush.h>
24 24
25 #include "musb_core.h" 25 #include "musb_core.h"
26 #include "musbhsdma.h" 26 #include "musbhsdma.h"
27 #include "blackfin.h" 27 #include "blackfin.h"
28 28
29 struct bfin_glue { 29 struct bfin_glue {
30 struct device *dev; 30 struct device *dev;
31 struct platform_device *musb; 31 struct platform_device *musb;
32 }; 32 };
33 #define glue_to_musb(g) platform_get_drvdata(g->musb) 33 #define glue_to_musb(g) platform_get_drvdata(g->musb)
34 34
35 /* 35 /*
36 * Load an endpoint's FIFO 36 * Load an endpoint's FIFO
37 */ 37 */
38 void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src) 38 void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
39 { 39 {
40 struct musb *musb = hw_ep->musb; 40 struct musb *musb = hw_ep->musb;
41 void __iomem *fifo = hw_ep->fifo; 41 void __iomem *fifo = hw_ep->fifo;
42 void __iomem *epio = hw_ep->regs; 42 void __iomem *epio = hw_ep->regs;
43 u8 epnum = hw_ep->epnum; 43 u8 epnum = hw_ep->epnum;
44 44
45 prefetch((u8 *)src); 45 prefetch((u8 *)src);
46 46
47 musb_writew(epio, MUSB_TXCOUNT, len); 47 musb_writew(epio, MUSB_TXCOUNT, len);
48 48
49 dev_dbg(musb->controller, "TX ep%d fifo %p count %d buf %p, epio %p\n", 49 dev_dbg(musb->controller, "TX ep%d fifo %p count %d buf %p, epio %p\n",
50 hw_ep->epnum, fifo, len, src, epio); 50 hw_ep->epnum, fifo, len, src, epio);
51 51
52 dump_fifo_data(src, len); 52 dump_fifo_data(src, len);
53 53
54 if (!ANOMALY_05000380 && epnum != 0) { 54 if (!ANOMALY_05000380 && epnum != 0) {
55 u16 dma_reg; 55 u16 dma_reg;
56 56
57 flush_dcache_range((unsigned long)src, 57 flush_dcache_range((unsigned long)src,
58 (unsigned long)(src + len)); 58 (unsigned long)(src + len));
59 59
60 /* Setup DMA address register */ 60 /* Setup DMA address register */
61 dma_reg = (u32)src; 61 dma_reg = (u32)src;
62 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_LOW), dma_reg); 62 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_LOW), dma_reg);
63 SSYNC(); 63 SSYNC();
64 64
65 dma_reg = (u32)src >> 16; 65 dma_reg = (u32)src >> 16;
66 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_HIGH), dma_reg); 66 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_HIGH), dma_reg);
67 SSYNC(); 67 SSYNC();
68 68
69 /* Setup DMA count register */ 69 /* Setup DMA count register */
70 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_COUNT_LOW), len); 70 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_COUNT_LOW), len);
71 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_COUNT_HIGH), 0); 71 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_COUNT_HIGH), 0);
72 SSYNC(); 72 SSYNC();
73 73
74 /* Enable the DMA */ 74 /* Enable the DMA */
75 dma_reg = (epnum << 4) | DMA_ENA | INT_ENA | DIRECTION; 75 dma_reg = (epnum << 4) | DMA_ENA | INT_ENA | DIRECTION;
76 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_CTRL), dma_reg); 76 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_CTRL), dma_reg);
77 SSYNC(); 77 SSYNC();
78 78
79 /* Wait for compelete */ 79 /* Wait for compelete */
80 while (!(bfin_read_USB_DMA_INTERRUPT() & (1 << epnum))) 80 while (!(bfin_read_USB_DMA_INTERRUPT() & (1 << epnum)))
81 cpu_relax(); 81 cpu_relax();
82 82
83 /* acknowledge dma interrupt */ 83 /* acknowledge dma interrupt */
84 bfin_write_USB_DMA_INTERRUPT(1 << epnum); 84 bfin_write_USB_DMA_INTERRUPT(1 << epnum);
85 SSYNC(); 85 SSYNC();
86 86
87 /* Reset DMA */ 87 /* Reset DMA */
88 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_CTRL), 0); 88 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_CTRL), 0);
89 SSYNC(); 89 SSYNC();
90 } else { 90 } else {
91 SSYNC(); 91 SSYNC();
92 92
93 if (unlikely((unsigned long)src & 0x01)) 93 if (unlikely((unsigned long)src & 0x01))
94 outsw_8((unsigned long)fifo, src, (len + 1) >> 1); 94 outsw_8((unsigned long)fifo, src, (len + 1) >> 1);
95 else 95 else
96 outsw((unsigned long)fifo, src, (len + 1) >> 1); 96 outsw((unsigned long)fifo, src, (len + 1) >> 1);
97 } 97 }
98 } 98 }
99 /* 99 /*
100 * Unload an endpoint's FIFO 100 * Unload an endpoint's FIFO
101 */ 101 */
102 void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst) 102 void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
103 { 103 {
104 struct musb *musb = hw_ep->musb; 104 struct musb *musb = hw_ep->musb;
105 void __iomem *fifo = hw_ep->fifo; 105 void __iomem *fifo = hw_ep->fifo;
106 u8 epnum = hw_ep->epnum; 106 u8 epnum = hw_ep->epnum;
107 107
108 if (ANOMALY_05000467 && epnum != 0) { 108 if (ANOMALY_05000467 && epnum != 0) {
109 u16 dma_reg; 109 u16 dma_reg;
110 110
111 invalidate_dcache_range((unsigned long)dst, 111 invalidate_dcache_range((unsigned long)dst,
112 (unsigned long)(dst + len)); 112 (unsigned long)(dst + len));
113 113
114 /* Setup DMA address register */ 114 /* Setup DMA address register */
115 dma_reg = (u32)dst; 115 dma_reg = (u32)dst;
116 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_LOW), dma_reg); 116 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_LOW), dma_reg);
117 SSYNC(); 117 SSYNC();
118 118
119 dma_reg = (u32)dst >> 16; 119 dma_reg = (u32)dst >> 16;
120 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_HIGH), dma_reg); 120 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_HIGH), dma_reg);
121 SSYNC(); 121 SSYNC();
122 122
123 /* Setup DMA count register */ 123 /* Setup DMA count register */
124 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_COUNT_LOW), len); 124 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_COUNT_LOW), len);
125 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_COUNT_HIGH), 0); 125 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_COUNT_HIGH), 0);
126 SSYNC(); 126 SSYNC();
127 127
128 /* Enable the DMA */ 128 /* Enable the DMA */
129 dma_reg = (epnum << 4) | DMA_ENA | INT_ENA; 129 dma_reg = (epnum << 4) | DMA_ENA | INT_ENA;
130 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_CTRL), dma_reg); 130 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_CTRL), dma_reg);
131 SSYNC(); 131 SSYNC();
132 132
133 /* Wait for compelete */ 133 /* Wait for compelete */
134 while (!(bfin_read_USB_DMA_INTERRUPT() & (1 << epnum))) 134 while (!(bfin_read_USB_DMA_INTERRUPT() & (1 << epnum)))
135 cpu_relax(); 135 cpu_relax();
136 136
137 /* acknowledge dma interrupt */ 137 /* acknowledge dma interrupt */
138 bfin_write_USB_DMA_INTERRUPT(1 << epnum); 138 bfin_write_USB_DMA_INTERRUPT(1 << epnum);
139 SSYNC(); 139 SSYNC();
140 140
141 /* Reset DMA */ 141 /* Reset DMA */
142 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_CTRL), 0); 142 bfin_write16(USB_DMA_REG(epnum, USB_DMAx_CTRL), 0);
143 SSYNC(); 143 SSYNC();
144 } else { 144 } else {
145 SSYNC(); 145 SSYNC();
146 /* Read the last byte of packet with odd size from address fifo + 4 146 /* Read the last byte of packet with odd size from address fifo + 4
147 * to trigger 1 byte access to EP0 FIFO. 147 * to trigger 1 byte access to EP0 FIFO.
148 */ 148 */
149 if (len == 1) 149 if (len == 1)
150 *dst = (u8)inw((unsigned long)fifo + 4); 150 *dst = (u8)inw((unsigned long)fifo + 4);
151 else { 151 else {
152 if (unlikely((unsigned long)dst & 0x01)) 152 if (unlikely((unsigned long)dst & 0x01))
153 insw_8((unsigned long)fifo, dst, len >> 1); 153 insw_8((unsigned long)fifo, dst, len >> 1);
154 else 154 else
155 insw((unsigned long)fifo, dst, len >> 1); 155 insw((unsigned long)fifo, dst, len >> 1);
156 156
157 if (len & 0x01) 157 if (len & 0x01)
158 *(dst + len - 1) = (u8)inw((unsigned long)fifo + 4); 158 *(dst + len - 1) = (u8)inw((unsigned long)fifo + 4);
159 } 159 }
160 } 160 }
161 dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n", 161 dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n",
162 'R', hw_ep->epnum, fifo, len, dst); 162 'R', hw_ep->epnum, fifo, len, dst);
163 163
164 dump_fifo_data(dst, len); 164 dump_fifo_data(dst, len);
165 } 165 }
166 166
167 static irqreturn_t blackfin_interrupt(int irq, void *__hci) 167 static irqreturn_t blackfin_interrupt(int irq, void *__hci)
168 { 168 {
169 unsigned long flags; 169 unsigned long flags;
170 irqreturn_t retval = IRQ_NONE; 170 irqreturn_t retval = IRQ_NONE;
171 struct musb *musb = __hci; 171 struct musb *musb = __hci;
172 172
173 spin_lock_irqsave(&musb->lock, flags); 173 spin_lock_irqsave(&musb->lock, flags);
174 174
175 musb->int_usb = musb_readb(musb->mregs, MUSB_INTRUSB); 175 musb->int_usb = musb_readb(musb->mregs, MUSB_INTRUSB);
176 musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX); 176 musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX);
177 musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX); 177 musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX);
178 178
179 if (musb->int_usb || musb->int_tx || musb->int_rx) { 179 if (musb->int_usb || musb->int_tx || musb->int_rx) {
180 musb_writeb(musb->mregs, MUSB_INTRUSB, musb->int_usb); 180 musb_writeb(musb->mregs, MUSB_INTRUSB, musb->int_usb);
181 musb_writew(musb->mregs, MUSB_INTRTX, musb->int_tx); 181 musb_writew(musb->mregs, MUSB_INTRTX, musb->int_tx);
182 musb_writew(musb->mregs, MUSB_INTRRX, musb->int_rx); 182 musb_writew(musb->mregs, MUSB_INTRRX, musb->int_rx);
183 retval = musb_interrupt(musb); 183 retval = musb_interrupt(musb);
184 } 184 }
185 185
186 /* Start sampling ID pin, when plug is removed from MUSB */ 186 /* Start sampling ID pin, when plug is removed from MUSB */
187 if ((is_otg_enabled(musb) && (musb->xceiv->state == OTG_STATE_B_IDLE 187 if ((musb->xceiv->state == OTG_STATE_B_IDLE
188 || musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) || 188 || musb->xceiv->state == OTG_STATE_A_WAIT_BCON) ||
189 (musb->int_usb & MUSB_INTR_DISCONNECT && is_host_active(musb))) { 189 (musb->int_usb & MUSB_INTR_DISCONNECT && is_host_active(musb))) {
190 mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY); 190 mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY);
191 musb->a_wait_bcon = TIMER_DELAY; 191 musb->a_wait_bcon = TIMER_DELAY;
192 } 192 }
193 193
194 spin_unlock_irqrestore(&musb->lock, flags); 194 spin_unlock_irqrestore(&musb->lock, flags);
195 195
196 return retval; 196 return retval;
197 } 197 }
198 198
199 static void musb_conn_timer_handler(unsigned long _musb) 199 static void musb_conn_timer_handler(unsigned long _musb)
200 { 200 {
201 struct musb *musb = (void *)_musb; 201 struct musb *musb = (void *)_musb;
202 unsigned long flags; 202 unsigned long flags;
203 u16 val; 203 u16 val;
204 static u8 toggle; 204 static u8 toggle;
205 205
206 spin_lock_irqsave(&musb->lock, flags); 206 spin_lock_irqsave(&musb->lock, flags);
207 switch (musb->xceiv->state) { 207 switch (musb->xceiv->state) {
208 case OTG_STATE_A_IDLE: 208 case OTG_STATE_A_IDLE:
209 case OTG_STATE_A_WAIT_BCON: 209 case OTG_STATE_A_WAIT_BCON:
210 /* Start a new session */ 210 /* Start a new session */
211 val = musb_readw(musb->mregs, MUSB_DEVCTL); 211 val = musb_readw(musb->mregs, MUSB_DEVCTL);
212 val &= ~MUSB_DEVCTL_SESSION; 212 val &= ~MUSB_DEVCTL_SESSION;
213 musb_writew(musb->mregs, MUSB_DEVCTL, val); 213 musb_writew(musb->mregs, MUSB_DEVCTL, val);
214 val |= MUSB_DEVCTL_SESSION; 214 val |= MUSB_DEVCTL_SESSION;
215 musb_writew(musb->mregs, MUSB_DEVCTL, val); 215 musb_writew(musb->mregs, MUSB_DEVCTL, val);
216 /* Check if musb is host or peripheral. */ 216 /* Check if musb is host or peripheral. */
217 val = musb_readw(musb->mregs, MUSB_DEVCTL); 217 val = musb_readw(musb->mregs, MUSB_DEVCTL);
218 218
219 if (!(val & MUSB_DEVCTL_BDEVICE)) { 219 if (!(val & MUSB_DEVCTL_BDEVICE)) {
220 gpio_set_value(musb->config->gpio_vrsel, 1); 220 gpio_set_value(musb->config->gpio_vrsel, 1);
221 musb->xceiv->state = OTG_STATE_A_WAIT_BCON; 221 musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
222 } else { 222 } else {
223 gpio_set_value(musb->config->gpio_vrsel, 0); 223 gpio_set_value(musb->config->gpio_vrsel, 0);
224 /* Ignore VBUSERROR and SUSPEND IRQ */ 224 /* Ignore VBUSERROR and SUSPEND IRQ */
225 val = musb_readb(musb->mregs, MUSB_INTRUSBE); 225 val = musb_readb(musb->mregs, MUSB_INTRUSBE);
226 val &= ~MUSB_INTR_VBUSERROR; 226 val &= ~MUSB_INTR_VBUSERROR;
227 musb_writeb(musb->mregs, MUSB_INTRUSBE, val); 227 musb_writeb(musb->mregs, MUSB_INTRUSBE, val);
228 228
229 val = MUSB_INTR_SUSPEND | MUSB_INTR_VBUSERROR; 229 val = MUSB_INTR_SUSPEND | MUSB_INTR_VBUSERROR;
230 musb_writeb(musb->mregs, MUSB_INTRUSB, val); 230 musb_writeb(musb->mregs, MUSB_INTRUSB, val);
231 if (is_otg_enabled(musb)) 231 musb->xceiv->state = OTG_STATE_B_IDLE;
232 musb->xceiv->state = OTG_STATE_B_IDLE;
233 else
234 musb_writeb(musb->mregs, MUSB_POWER, MUSB_POWER_HSENAB);
235 } 232 }
236 mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY); 233 mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY);
237 break; 234 break;
238 case OTG_STATE_B_IDLE: 235 case OTG_STATE_B_IDLE:
239 236 /*
240 if (!is_peripheral_enabled(musb)) 237 * Start a new session. It seems that MUSB needs taking
241 break;
242 /* Start a new session. It seems that MUSB needs taking
243 * some time to recognize the type of the plug inserted? 238 * some time to recognize the type of the plug inserted?
244 */ 239 */
245 val = musb_readw(musb->mregs, MUSB_DEVCTL); 240 val = musb_readw(musb->mregs, MUSB_DEVCTL);
246 val |= MUSB_DEVCTL_SESSION; 241 val |= MUSB_DEVCTL_SESSION;
247 musb_writew(musb->mregs, MUSB_DEVCTL, val); 242 musb_writew(musb->mregs, MUSB_DEVCTL, val);
248 val = musb_readw(musb->mregs, MUSB_DEVCTL); 243 val = musb_readw(musb->mregs, MUSB_DEVCTL);
249 244
250 if (!(val & MUSB_DEVCTL_BDEVICE)) { 245 if (!(val & MUSB_DEVCTL_BDEVICE)) {
251 gpio_set_value(musb->config->gpio_vrsel, 1); 246 gpio_set_value(musb->config->gpio_vrsel, 1);
252 musb->xceiv->state = OTG_STATE_A_WAIT_BCON; 247 musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
253 } else { 248 } else {
254 gpio_set_value(musb->config->gpio_vrsel, 0); 249 gpio_set_value(musb->config->gpio_vrsel, 0);
255 250
256 /* Ignore VBUSERROR and SUSPEND IRQ */ 251 /* Ignore VBUSERROR and SUSPEND IRQ */
257 val = musb_readb(musb->mregs, MUSB_INTRUSBE); 252 val = musb_readb(musb->mregs, MUSB_INTRUSBE);
258 val &= ~MUSB_INTR_VBUSERROR; 253 val &= ~MUSB_INTR_VBUSERROR;
259 musb_writeb(musb->mregs, MUSB_INTRUSBE, val); 254 musb_writeb(musb->mregs, MUSB_INTRUSBE, val);
260 255
261 val = MUSB_INTR_SUSPEND | MUSB_INTR_VBUSERROR; 256 val = MUSB_INTR_SUSPEND | MUSB_INTR_VBUSERROR;
262 musb_writeb(musb->mregs, MUSB_INTRUSB, val); 257 musb_writeb(musb->mregs, MUSB_INTRUSB, val);
263 258
264 /* Toggle the Soft Conn bit, so that we can response to 259 /* Toggle the Soft Conn bit, so that we can response to
265 * the inserting of either A-plug or B-plug. 260 * the inserting of either A-plug or B-plug.
266 */ 261 */
267 if (toggle) { 262 if (toggle) {
268 val = musb_readb(musb->mregs, MUSB_POWER); 263 val = musb_readb(musb->mregs, MUSB_POWER);
269 val &= ~MUSB_POWER_SOFTCONN; 264 val &= ~MUSB_POWER_SOFTCONN;
270 musb_writeb(musb->mregs, MUSB_POWER, val); 265 musb_writeb(musb->mregs, MUSB_POWER, val);
271 toggle = 0; 266 toggle = 0;
272 } else { 267 } else {
273 val = musb_readb(musb->mregs, MUSB_POWER); 268 val = musb_readb(musb->mregs, MUSB_POWER);
274 val |= MUSB_POWER_SOFTCONN; 269 val |= MUSB_POWER_SOFTCONN;
275 musb_writeb(musb->mregs, MUSB_POWER, val); 270 musb_writeb(musb->mregs, MUSB_POWER, val);
276 toggle = 1; 271 toggle = 1;
277 } 272 }
278 /* The delay time is set to 1/4 second by default, 273 /* The delay time is set to 1/4 second by default,
279 * shortening it, if accelerating A-plug detection 274 * shortening it, if accelerating A-plug detection
280 * is needed in OTG mode. 275 * is needed in OTG mode.
281 */ 276 */
282 mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY / 4); 277 mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY / 4);
283 } 278 }
284 break; 279 break;
285 default: 280 default:
286 dev_dbg(musb->controller, "%s state not handled\n", 281 dev_dbg(musb->controller, "%s state not handled\n",
287 otg_state_string(musb->xceiv->state)); 282 otg_state_string(musb->xceiv->state));
288 break; 283 break;
289 } 284 }
290 spin_unlock_irqrestore(&musb->lock, flags); 285 spin_unlock_irqrestore(&musb->lock, flags);
291 286
292 dev_dbg(musb->controller, "state is %s\n", 287 dev_dbg(musb->controller, "state is %s\n",
293 otg_state_string(musb->xceiv->state)); 288 otg_state_string(musb->xceiv->state));
294 } 289 }
295 290
296 static void bfin_musb_enable(struct musb *musb) 291 static void bfin_musb_enable(struct musb *musb)
297 { 292 {
298 if (!is_otg_enabled(musb) && is_host_enabled(musb)) { 293 /* REVISIT is this really correct ? */
299 mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY);
300 musb->a_wait_bcon = TIMER_DELAY;
301 }
302 } 294 }
303 295
304 static void bfin_musb_disable(struct musb *musb) 296 static void bfin_musb_disable(struct musb *musb)
305 { 297 {
306 } 298 }
307 299
308 static void bfin_musb_set_vbus(struct musb *musb, int is_on) 300 static void bfin_musb_set_vbus(struct musb *musb, int is_on)
309 { 301 {
310 int value = musb->config->gpio_vrsel_active; 302 int value = musb->config->gpio_vrsel_active;
311 if (!is_on) 303 if (!is_on)
312 value = !value; 304 value = !value;
313 gpio_set_value(musb->config->gpio_vrsel, value); 305 gpio_set_value(musb->config->gpio_vrsel, value);
314 306
315 dev_dbg(musb->controller, "VBUS %s, devctl %02x " 307 dev_dbg(musb->controller, "VBUS %s, devctl %02x "
316 /* otg %3x conf %08x prcm %08x */ "\n", 308 /* otg %3x conf %08x prcm %08x */ "\n",
317 otg_state_string(musb->xceiv->state), 309 otg_state_string(musb->xceiv->state),
318 musb_readb(musb->mregs, MUSB_DEVCTL)); 310 musb_readb(musb->mregs, MUSB_DEVCTL));
319 } 311 }
320 312
321 static int bfin_musb_set_power(struct usb_phy *x, unsigned mA) 313 static int bfin_musb_set_power(struct usb_phy *x, unsigned mA)
322 { 314 {
323 return 0; 315 return 0;
324 } 316 }
325 317
326 static void bfin_musb_try_idle(struct musb *musb, unsigned long timeout)
327 {
328 if (!is_otg_enabled(musb) && is_host_enabled(musb))
329 mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY);
330 }
331
332 static int bfin_musb_vbus_status(struct musb *musb) 318 static int bfin_musb_vbus_status(struct musb *musb)
333 { 319 {
334 return 0; 320 return 0;
335 } 321 }
336 322
337 static int bfin_musb_set_mode(struct musb *musb, u8 musb_mode) 323 static int bfin_musb_set_mode(struct musb *musb, u8 musb_mode)
338 { 324 {
339 return -EIO; 325 return -EIO;
340 } 326 }
341 327
342 static int bfin_musb_adjust_channel_params(struct dma_channel *channel, 328 static int bfin_musb_adjust_channel_params(struct dma_channel *channel,
343 u16 packet_sz, u8 *mode, 329 u16 packet_sz, u8 *mode,
344 dma_addr_t *dma_addr, u32 *len) 330 dma_addr_t *dma_addr, u32 *len)
345 { 331 {
346 struct musb_dma_channel *musb_channel = channel->private_data; 332 struct musb_dma_channel *musb_channel = channel->private_data;
347 333
348 /* 334 /*
349 * Anomaly 05000450 might cause data corruption when using DMA 335 * Anomaly 05000450 might cause data corruption when using DMA
350 * MODE 1 transmits with short packet. So to work around this, 336 * MODE 1 transmits with short packet. So to work around this,
351 * we truncate all MODE 1 transfers down to a multiple of the 337 * we truncate all MODE 1 transfers down to a multiple of the
352 * max packet size, and then do the last short packet transfer 338 * max packet size, and then do the last short packet transfer
353 * (if there is any) using MODE 0. 339 * (if there is any) using MODE 0.
354 */ 340 */
355 if (ANOMALY_05000450) { 341 if (ANOMALY_05000450) {
356 if (musb_channel->transmit && *mode == 1) 342 if (musb_channel->transmit && *mode == 1)
357 *len = *len - (*len % packet_sz); 343 *len = *len - (*len % packet_sz);
358 } 344 }
359 345
360 return 0; 346 return 0;
361 } 347 }
362 348
363 static void bfin_musb_reg_init(struct musb *musb) 349 static void bfin_musb_reg_init(struct musb *musb)
364 { 350 {
365 if (ANOMALY_05000346) { 351 if (ANOMALY_05000346) {
366 bfin_write_USB_APHY_CALIB(ANOMALY_05000346_value); 352 bfin_write_USB_APHY_CALIB(ANOMALY_05000346_value);
367 SSYNC(); 353 SSYNC();
368 } 354 }
369 355
370 if (ANOMALY_05000347) { 356 if (ANOMALY_05000347) {
371 bfin_write_USB_APHY_CNTRL(0x0); 357 bfin_write_USB_APHY_CNTRL(0x0);
372 SSYNC(); 358 SSYNC();
373 } 359 }
374 360
375 /* Configure PLL oscillator register */ 361 /* Configure PLL oscillator register */
376 bfin_write_USB_PLLOSC_CTRL(0x3080 | 362 bfin_write_USB_PLLOSC_CTRL(0x3080 |
377 ((480/musb->config->clkin) << 1)); 363 ((480/musb->config->clkin) << 1));
378 SSYNC(); 364 SSYNC();
379 365
380 bfin_write_USB_SRP_CLKDIV((get_sclk()/1000) / 32 - 1); 366 bfin_write_USB_SRP_CLKDIV((get_sclk()/1000) / 32 - 1);
381 SSYNC(); 367 SSYNC();
382 368
383 bfin_write_USB_EP_NI0_RXMAXP(64); 369 bfin_write_USB_EP_NI0_RXMAXP(64);
384 SSYNC(); 370 SSYNC();
385 371
386 bfin_write_USB_EP_NI0_TXMAXP(64); 372 bfin_write_USB_EP_NI0_TXMAXP(64);
387 SSYNC(); 373 SSYNC();
388 374
389 /* Route INTRUSB/INTR_RX/INTR_TX to USB_INT0*/ 375 /* Route INTRUSB/INTR_RX/INTR_TX to USB_INT0*/
390 bfin_write_USB_GLOBINTR(0x7); 376 bfin_write_USB_GLOBINTR(0x7);
391 SSYNC(); 377 SSYNC();
392 378
393 bfin_write_USB_GLOBAL_CTL(GLOBAL_ENA | EP1_TX_ENA | EP2_TX_ENA | 379 bfin_write_USB_GLOBAL_CTL(GLOBAL_ENA | EP1_TX_ENA | EP2_TX_ENA |
394 EP3_TX_ENA | EP4_TX_ENA | EP5_TX_ENA | 380 EP3_TX_ENA | EP4_TX_ENA | EP5_TX_ENA |
395 EP6_TX_ENA | EP7_TX_ENA | EP1_RX_ENA | 381 EP6_TX_ENA | EP7_TX_ENA | EP1_RX_ENA |
396 EP2_RX_ENA | EP3_RX_ENA | EP4_RX_ENA | 382 EP2_RX_ENA | EP3_RX_ENA | EP4_RX_ENA |
397 EP5_RX_ENA | EP6_RX_ENA | EP7_RX_ENA); 383 EP5_RX_ENA | EP6_RX_ENA | EP7_RX_ENA);
398 SSYNC(); 384 SSYNC();
399 } 385 }
400 386
401 static int bfin_musb_init(struct musb *musb) 387 static int bfin_musb_init(struct musb *musb)
402 { 388 {
403 389
404 /* 390 /*
405 * Rev 1.0 BF549 EZ-KITs require PE7 to be high for both DEVICE 391 * Rev 1.0 BF549 EZ-KITs require PE7 to be high for both DEVICE
406 * and OTG HOST modes, while rev 1.1 and greater require PE7 to 392 * and OTG HOST modes, while rev 1.1 and greater require PE7 to
407 * be low for DEVICE mode and high for HOST mode. We set it high 393 * be low for DEVICE mode and high for HOST mode. We set it high
408 * here because we are in host mode 394 * here because we are in host mode
409 */ 395 */
410 396
411 if (gpio_request(musb->config->gpio_vrsel, "USB_VRSEL")) { 397 if (gpio_request(musb->config->gpio_vrsel, "USB_VRSEL")) {
412 printk(KERN_ERR "Failed ro request USB_VRSEL GPIO_%d\n", 398 printk(KERN_ERR "Failed ro request USB_VRSEL GPIO_%d\n",
413 musb->config->gpio_vrsel); 399 musb->config->gpio_vrsel);
414 return -ENODEV; 400 return -ENODEV;
415 } 401 }
416 gpio_direction_output(musb->config->gpio_vrsel, 0); 402 gpio_direction_output(musb->config->gpio_vrsel, 0);
417 403
418 usb_nop_xceiv_register(); 404 usb_nop_xceiv_register();
419 musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2); 405 musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
420 if (IS_ERR_OR_NULL(musb->xceiv)) { 406 if (IS_ERR_OR_NULL(musb->xceiv)) {
421 gpio_free(musb->config->gpio_vrsel); 407 gpio_free(musb->config->gpio_vrsel);
422 return -ENODEV; 408 return -ENODEV;
423 } 409 }
424 410
425 bfin_musb_reg_init(musb); 411 bfin_musb_reg_init(musb);
426 412
427 if (is_host_enabled(musb)) { 413 setup_timer(&musb_conn_timer, musb_conn_timer_handler,
428 setup_timer(&musb_conn_timer, 414 (unsigned long) musb);
429 musb_conn_timer_handler, (unsigned long) musb);
430 }
431 if (is_peripheral_enabled(musb))
432 musb->xceiv->set_power = bfin_musb_set_power;
433 415
416 musb->xceiv->set_power = bfin_musb_set_power;
417
434 musb->isr = blackfin_interrupt; 418 musb->isr = blackfin_interrupt;
435 musb->double_buffer_not_ok = true; 419 musb->double_buffer_not_ok = true;
436 420
437 return 0; 421 return 0;
438 } 422 }
439 423
440 static int bfin_musb_exit(struct musb *musb) 424 static int bfin_musb_exit(struct musb *musb)
441 { 425 {
442 gpio_free(musb->config->gpio_vrsel); 426 gpio_free(musb->config->gpio_vrsel);
443 427
444 usb_put_phy(musb->xceiv); 428 usb_put_phy(musb->xceiv);
445 usb_nop_xceiv_unregister(); 429 usb_nop_xceiv_unregister();
446 return 0; 430 return 0;
447 } 431 }
448 432
449 static const struct musb_platform_ops bfin_ops = { 433 static const struct musb_platform_ops bfin_ops = {
450 .init = bfin_musb_init, 434 .init = bfin_musb_init,
451 .exit = bfin_musb_exit, 435 .exit = bfin_musb_exit,
452 436
453 .enable = bfin_musb_enable, 437 .enable = bfin_musb_enable,
454 .disable = bfin_musb_disable, 438 .disable = bfin_musb_disable,
455 439
456 .set_mode = bfin_musb_set_mode, 440 .set_mode = bfin_musb_set_mode,
457 .try_idle = bfin_musb_try_idle,
458 441
459 .vbus_status = bfin_musb_vbus_status, 442 .vbus_status = bfin_musb_vbus_status,
460 .set_vbus = bfin_musb_set_vbus, 443 .set_vbus = bfin_musb_set_vbus,
461 444
462 .adjust_channel_params = bfin_musb_adjust_channel_params, 445 .adjust_channel_params = bfin_musb_adjust_channel_params,
463 }; 446 };
464 447
465 static u64 bfin_dmamask = DMA_BIT_MASK(32); 448 static u64 bfin_dmamask = DMA_BIT_MASK(32);
466 449
467 static int __devinit bfin_probe(struct platform_device *pdev) 450 static int __devinit bfin_probe(struct platform_device *pdev)
468 { 451 {
469 struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data; 452 struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data;
470 struct platform_device *musb; 453 struct platform_device *musb;
471 struct bfin_glue *glue; 454 struct bfin_glue *glue;
472 455
473 int ret = -ENOMEM; 456 int ret = -ENOMEM;
474 457
475 glue = kzalloc(sizeof(*glue), GFP_KERNEL); 458 glue = kzalloc(sizeof(*glue), GFP_KERNEL);
476 if (!glue) { 459 if (!glue) {
477 dev_err(&pdev->dev, "failed to allocate glue context\n"); 460 dev_err(&pdev->dev, "failed to allocate glue context\n");
478 goto err0; 461 goto err0;
479 } 462 }
480 463
481 musb = platform_device_alloc("musb-hdrc", -1); 464 musb = platform_device_alloc("musb-hdrc", -1);
482 if (!musb) { 465 if (!musb) {
483 dev_err(&pdev->dev, "failed to allocate musb device\n"); 466 dev_err(&pdev->dev, "failed to allocate musb device\n");
484 goto err1; 467 goto err1;
485 } 468 }
486 469
487 musb->dev.parent = &pdev->dev; 470 musb->dev.parent = &pdev->dev;
488 musb->dev.dma_mask = &bfin_dmamask; 471 musb->dev.dma_mask = &bfin_dmamask;
489 musb->dev.coherent_dma_mask = bfin_dmamask; 472 musb->dev.coherent_dma_mask = bfin_dmamask;
490 473
491 glue->dev = &pdev->dev; 474 glue->dev = &pdev->dev;
492 glue->musb = musb; 475 glue->musb = musb;
493 476
494 pdata->platform_ops = &bfin_ops; 477 pdata->platform_ops = &bfin_ops;
495 478
496 platform_set_drvdata(pdev, glue); 479 platform_set_drvdata(pdev, glue);
497 480
498 ret = platform_device_add_resources(musb, pdev->resource, 481 ret = platform_device_add_resources(musb, pdev->resource,
499 pdev->num_resources); 482 pdev->num_resources);
500 if (ret) { 483 if (ret) {
501 dev_err(&pdev->dev, "failed to add resources\n"); 484 dev_err(&pdev->dev, "failed to add resources\n");
502 goto err2; 485 goto err2;
503 } 486 }
504 487
505 ret = platform_device_add_data(musb, pdata, sizeof(*pdata)); 488 ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
506 if (ret) { 489 if (ret) {
507 dev_err(&pdev->dev, "failed to add platform_data\n"); 490 dev_err(&pdev->dev, "failed to add platform_data\n");
508 goto err2; 491 goto err2;
509 } 492 }
510 493
511 ret = platform_device_add(musb); 494 ret = platform_device_add(musb);
512 if (ret) { 495 if (ret) {
513 dev_err(&pdev->dev, "failed to register musb device\n"); 496 dev_err(&pdev->dev, "failed to register musb device\n");
514 goto err2; 497 goto err2;
515 } 498 }
516 499
517 return 0; 500 return 0;
518 501
519 err2: 502 err2:
520 platform_device_put(musb); 503 platform_device_put(musb);
521 504
522 err1: 505 err1:
523 kfree(glue); 506 kfree(glue);
524 507
525 err0: 508 err0:
526 return ret; 509 return ret;
527 } 510 }
528 511
529 static int __devexit bfin_remove(struct platform_device *pdev) 512 static int __devexit bfin_remove(struct platform_device *pdev)
530 { 513 {
531 struct bfin_glue *glue = platform_get_drvdata(pdev); 514 struct bfin_glue *glue = platform_get_drvdata(pdev);
532 515
533 platform_device_del(glue->musb); 516 platform_device_del(glue->musb);
534 platform_device_put(glue->musb); 517 platform_device_put(glue->musb);
535 kfree(glue); 518 kfree(glue);
536 519
537 return 0; 520 return 0;
538 } 521 }
539 522
540 #ifdef CONFIG_PM 523 #ifdef CONFIG_PM
541 static int bfin_suspend(struct device *dev) 524 static int bfin_suspend(struct device *dev)
542 { 525 {
543 struct bfin_glue *glue = dev_get_drvdata(dev); 526 struct bfin_glue *glue = dev_get_drvdata(dev);
544 struct musb *musb = glue_to_musb(glue); 527 struct musb *musb = glue_to_musb(glue);
545 528
546 if (is_host_active(musb)) 529 if (is_host_active(musb))
547 /* 530 /*
548 * During hibernate gpio_vrsel will change from high to low 531 * During hibernate gpio_vrsel will change from high to low
549 * low which will generate wakeup event resume the system 532 * low which will generate wakeup event resume the system
550 * immediately. Set it to 0 before hibernate to avoid this 533 * immediately. Set it to 0 before hibernate to avoid this
551 * wakeup event. 534 * wakeup event.
552 */ 535 */
553 gpio_set_value(musb->config->gpio_vrsel, 0); 536 gpio_set_value(musb->config->gpio_vrsel, 0);
554 537
555 return 0; 538 return 0;
556 } 539 }
557 540
558 static int bfin_resume(struct device *dev) 541 static int bfin_resume(struct device *dev)
559 { 542 {
560 struct bfin_glue *glue = dev_get_drvdata(dev); 543 struct bfin_glue *glue = dev_get_drvdata(dev);
561 struct musb *musb = glue_to_musb(glue); 544 struct musb *musb = glue_to_musb(glue);
562 545
563 bfin_musb_reg_init(musb); 546 bfin_musb_reg_init(musb);
564 547
565 return 0; 548 return 0;
566 } 549 }
567 550
568 static struct dev_pm_ops bfin_pm_ops = { 551 static struct dev_pm_ops bfin_pm_ops = {
569 .suspend = bfin_suspend, 552 .suspend = bfin_suspend,
570 .resume = bfin_resume, 553 .resume = bfin_resume,
571 }; 554 };
572 555
573 #define DEV_PM_OPS &bfin_pm_ops 556 #define DEV_PM_OPS &bfin_pm_ops
574 #else 557 #else
575 #define DEV_PM_OPS NULL 558 #define DEV_PM_OPS NULL
576 #endif 559 #endif
577 560
578 static struct platform_driver bfin_driver = { 561 static struct platform_driver bfin_driver = {
579 .probe = bfin_probe, 562 .probe = bfin_probe,
580 .remove = __exit_p(bfin_remove), 563 .remove = __exit_p(bfin_remove),
581 .driver = { 564 .driver = {
582 .name = "musb-blackfin", 565 .name = "musb-blackfin",
583 .pm = DEV_PM_OPS, 566 .pm = DEV_PM_OPS,
584 }, 567 },
585 }; 568 };
586 569
587 MODULE_DESCRIPTION("Blackfin MUSB Glue Layer"); 570 MODULE_DESCRIPTION("Blackfin MUSB Glue Layer");
588 MODULE_AUTHOR("Bryan Wy <cooloney@kernel.org>"); 571 MODULE_AUTHOR("Bryan Wy <cooloney@kernel.org>");
589 MODULE_LICENSE("GPL v2"); 572 MODULE_LICENSE("GPL v2");
590 573
591 static int __init bfin_init(void) 574 static int __init bfin_init(void)
592 { 575 {
593 return platform_driver_register(&bfin_driver); 576 return platform_driver_register(&bfin_driver);
594 } 577 }
595 module_init(bfin_init); 578 module_init(bfin_init);
596 579
597 static void __exit bfin_exit(void) 580 static void __exit bfin_exit(void)
598 { 581 {
599 platform_driver_unregister(&bfin_driver); 582 platform_driver_unregister(&bfin_driver);
600 } 583 }
drivers/usb/musb/da8xx.c
1 /* 1 /*
2 * Texas Instruments DA8xx/OMAP-L1x "glue layer" 2 * Texas Instruments DA8xx/OMAP-L1x "glue layer"
3 * 3 *
4 * Copyright (c) 2008-2009 MontaVista Software, Inc. <source@mvista.com> 4 * Copyright (c) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
5 * 5 *
6 * Based on the DaVinci "glue layer" code. 6 * Based on the DaVinci "glue layer" code.
7 * Copyright (C) 2005-2006 by Texas Instruments 7 * Copyright (C) 2005-2006 by Texas Instruments
8 * 8 *
9 * This file is part of the Inventra Controller Driver for Linux. 9 * This file is part of the Inventra Controller Driver for Linux.
10 * 10 *
11 * The Inventra Controller Driver for Linux is free software; you 11 * The Inventra Controller Driver for Linux is free software; you
12 * can redistribute it and/or modify it under the terms of the GNU 12 * can redistribute it and/or modify it under the terms of the GNU
13 * General Public License version 2 as published by the Free Software 13 * General Public License version 2 as published by the Free Software
14 * Foundation. 14 * Foundation.
15 * 15 *
16 * The Inventra Controller Driver for Linux is distributed in 16 * The Inventra Controller Driver for Linux is distributed in
17 * the hope that it will be useful, but WITHOUT ANY WARRANTY; 17 * the hope that it will be useful, but WITHOUT ANY WARRANTY;
18 * without even the implied warranty of MERCHANTABILITY or 18 * without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public 19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
20 * License for more details. 20 * License for more details.
21 * 21 *
22 * You should have received a copy of the GNU General Public License 22 * You should have received a copy of the GNU General Public License
23 * along with The Inventra Controller Driver for Linux ; if not, 23 * along with The Inventra Controller Driver for Linux ; if not,
24 * write to the Free Software Foundation, Inc., 59 Temple Place, 24 * write to the Free Software Foundation, Inc., 59 Temple Place,
25 * Suite 330, Boston, MA 02111-1307 USA 25 * Suite 330, Boston, MA 02111-1307 USA
26 * 26 *
27 */ 27 */
28 28
29 #include <linux/init.h> 29 #include <linux/init.h>
30 #include <linux/module.h> 30 #include <linux/module.h>
31 #include <linux/clk.h> 31 #include <linux/clk.h>
32 #include <linux/err.h> 32 #include <linux/err.h>
33 #include <linux/io.h> 33 #include <linux/io.h>
34 #include <linux/platform_device.h> 34 #include <linux/platform_device.h>
35 #include <linux/dma-mapping.h> 35 #include <linux/dma-mapping.h>
36 36
37 #include <mach/da8xx.h> 37 #include <mach/da8xx.h>
38 #include <mach/usb.h> 38 #include <mach/usb.h>
39 39
40 #include "musb_core.h" 40 #include "musb_core.h"
41 41
42 /* 42 /*
43 * DA8XX specific definitions 43 * DA8XX specific definitions
44 */ 44 */
45 45
46 /* USB 2.0 OTG module registers */ 46 /* USB 2.0 OTG module registers */
47 #define DA8XX_USB_REVISION_REG 0x00 47 #define DA8XX_USB_REVISION_REG 0x00
48 #define DA8XX_USB_CTRL_REG 0x04 48 #define DA8XX_USB_CTRL_REG 0x04
49 #define DA8XX_USB_STAT_REG 0x08 49 #define DA8XX_USB_STAT_REG 0x08
50 #define DA8XX_USB_EMULATION_REG 0x0c 50 #define DA8XX_USB_EMULATION_REG 0x0c
51 #define DA8XX_USB_MODE_REG 0x10 /* Transparent, CDC, [Generic] RNDIS */ 51 #define DA8XX_USB_MODE_REG 0x10 /* Transparent, CDC, [Generic] RNDIS */
52 #define DA8XX_USB_AUTOREQ_REG 0x14 52 #define DA8XX_USB_AUTOREQ_REG 0x14
53 #define DA8XX_USB_SRP_FIX_TIME_REG 0x18 53 #define DA8XX_USB_SRP_FIX_TIME_REG 0x18
54 #define DA8XX_USB_TEARDOWN_REG 0x1c 54 #define DA8XX_USB_TEARDOWN_REG 0x1c
55 #define DA8XX_USB_INTR_SRC_REG 0x20 55 #define DA8XX_USB_INTR_SRC_REG 0x20
56 #define DA8XX_USB_INTR_SRC_SET_REG 0x24 56 #define DA8XX_USB_INTR_SRC_SET_REG 0x24
57 #define DA8XX_USB_INTR_SRC_CLEAR_REG 0x28 57 #define DA8XX_USB_INTR_SRC_CLEAR_REG 0x28
58 #define DA8XX_USB_INTR_MASK_REG 0x2c 58 #define DA8XX_USB_INTR_MASK_REG 0x2c
59 #define DA8XX_USB_INTR_MASK_SET_REG 0x30 59 #define DA8XX_USB_INTR_MASK_SET_REG 0x30
60 #define DA8XX_USB_INTR_MASK_CLEAR_REG 0x34 60 #define DA8XX_USB_INTR_MASK_CLEAR_REG 0x34
61 #define DA8XX_USB_INTR_SRC_MASKED_REG 0x38 61 #define DA8XX_USB_INTR_SRC_MASKED_REG 0x38
62 #define DA8XX_USB_END_OF_INTR_REG 0x3c 62 #define DA8XX_USB_END_OF_INTR_REG 0x3c
63 #define DA8XX_USB_GENERIC_RNDIS_EP_SIZE_REG(n) (0x50 + (((n) - 1) << 2)) 63 #define DA8XX_USB_GENERIC_RNDIS_EP_SIZE_REG(n) (0x50 + (((n) - 1) << 2))
64 64
65 /* Control register bits */ 65 /* Control register bits */
66 #define DA8XX_SOFT_RESET_MASK 1 66 #define DA8XX_SOFT_RESET_MASK 1
67 67
68 #define DA8XX_USB_TX_EP_MASK 0x1f /* EP0 + 4 Tx EPs */ 68 #define DA8XX_USB_TX_EP_MASK 0x1f /* EP0 + 4 Tx EPs */
69 #define DA8XX_USB_RX_EP_MASK 0x1e /* 4 Rx EPs */ 69 #define DA8XX_USB_RX_EP_MASK 0x1e /* 4 Rx EPs */
70 70
71 /* USB interrupt register bits */ 71 /* USB interrupt register bits */
72 #define DA8XX_INTR_USB_SHIFT 16 72 #define DA8XX_INTR_USB_SHIFT 16
73 #define DA8XX_INTR_USB_MASK (0x1ff << DA8XX_INTR_USB_SHIFT) /* 8 Mentor */ 73 #define DA8XX_INTR_USB_MASK (0x1ff << DA8XX_INTR_USB_SHIFT) /* 8 Mentor */
74 /* interrupts and DRVVBUS interrupt */ 74 /* interrupts and DRVVBUS interrupt */
75 #define DA8XX_INTR_DRVVBUS 0x100 75 #define DA8XX_INTR_DRVVBUS 0x100
76 #define DA8XX_INTR_RX_SHIFT 8 76 #define DA8XX_INTR_RX_SHIFT 8
77 #define DA8XX_INTR_RX_MASK (DA8XX_USB_RX_EP_MASK << DA8XX_INTR_RX_SHIFT) 77 #define DA8XX_INTR_RX_MASK (DA8XX_USB_RX_EP_MASK << DA8XX_INTR_RX_SHIFT)
78 #define DA8XX_INTR_TX_SHIFT 0 78 #define DA8XX_INTR_TX_SHIFT 0
79 #define DA8XX_INTR_TX_MASK (DA8XX_USB_TX_EP_MASK << DA8XX_INTR_TX_SHIFT) 79 #define DA8XX_INTR_TX_MASK (DA8XX_USB_TX_EP_MASK << DA8XX_INTR_TX_SHIFT)
80 80
81 #define DA8XX_MENTOR_CORE_OFFSET 0x400 81 #define DA8XX_MENTOR_CORE_OFFSET 0x400
82 82
83 #define CFGCHIP2 IO_ADDRESS(DA8XX_SYSCFG0_BASE + DA8XX_CFGCHIP2_REG) 83 #define CFGCHIP2 IO_ADDRESS(DA8XX_SYSCFG0_BASE + DA8XX_CFGCHIP2_REG)
84 84
85 struct da8xx_glue { 85 struct da8xx_glue {
86 struct device *dev; 86 struct device *dev;
87 struct platform_device *musb; 87 struct platform_device *musb;
88 struct clk *clk; 88 struct clk *clk;
89 }; 89 };
90 90
91 /* 91 /*
92 * REVISIT (PM): we should be able to keep the PHY in low power mode most 92 * REVISIT (PM): we should be able to keep the PHY in low power mode most
93 * of the time (24 MHz oscillator and PLL off, etc.) by setting POWER.D0 93 * of the time (24 MHz oscillator and PLL off, etc.) by setting POWER.D0
94 * and, when in host mode, autosuspending idle root ports... PHY_PLLON 94 * and, when in host mode, autosuspending idle root ports... PHY_PLLON
95 * (overriding SUSPENDM?) then likely needs to stay off. 95 * (overriding SUSPENDM?) then likely needs to stay off.
96 */ 96 */
97 97
98 static inline void phy_on(void) 98 static inline void phy_on(void)
99 { 99 {
100 u32 cfgchip2 = __raw_readl(CFGCHIP2); 100 u32 cfgchip2 = __raw_readl(CFGCHIP2);
101 101
102 /* 102 /*
103 * Start the on-chip PHY and its PLL. 103 * Start the on-chip PHY and its PLL.
104 */ 104 */
105 cfgchip2 &= ~(CFGCHIP2_RESET | CFGCHIP2_PHYPWRDN | CFGCHIP2_OTGPWRDN); 105 cfgchip2 &= ~(CFGCHIP2_RESET | CFGCHIP2_PHYPWRDN | CFGCHIP2_OTGPWRDN);
106 cfgchip2 |= CFGCHIP2_PHY_PLLON; 106 cfgchip2 |= CFGCHIP2_PHY_PLLON;
107 __raw_writel(cfgchip2, CFGCHIP2); 107 __raw_writel(cfgchip2, CFGCHIP2);
108 108
109 pr_info("Waiting for USB PHY clock good...\n"); 109 pr_info("Waiting for USB PHY clock good...\n");
110 while (!(__raw_readl(CFGCHIP2) & CFGCHIP2_PHYCLKGD)) 110 while (!(__raw_readl(CFGCHIP2) & CFGCHIP2_PHYCLKGD))
111 cpu_relax(); 111 cpu_relax();
112 } 112 }
113 113
114 static inline void phy_off(void) 114 static inline void phy_off(void)
115 { 115 {
116 u32 cfgchip2 = __raw_readl(CFGCHIP2); 116 u32 cfgchip2 = __raw_readl(CFGCHIP2);
117 117
118 /* 118 /*
119 * Ensure that USB 1.1 reference clock is not being sourced from 119 * Ensure that USB 1.1 reference clock is not being sourced from
120 * USB 2.0 PHY. Otherwise do not power down the PHY. 120 * USB 2.0 PHY. Otherwise do not power down the PHY.
121 */ 121 */
122 if (!(cfgchip2 & CFGCHIP2_USB1PHYCLKMUX) && 122 if (!(cfgchip2 & CFGCHIP2_USB1PHYCLKMUX) &&
123 (cfgchip2 & CFGCHIP2_USB1SUSPENDM)) { 123 (cfgchip2 & CFGCHIP2_USB1SUSPENDM)) {
124 pr_warning("USB 1.1 clocked from USB 2.0 PHY -- " 124 pr_warning("USB 1.1 clocked from USB 2.0 PHY -- "
125 "can't power it down\n"); 125 "can't power it down\n");
126 return; 126 return;
127 } 127 }
128 128
129 /* 129 /*
130 * Power down the on-chip PHY. 130 * Power down the on-chip PHY.
131 */ 131 */
132 cfgchip2 |= CFGCHIP2_PHYPWRDN | CFGCHIP2_OTGPWRDN; 132 cfgchip2 |= CFGCHIP2_PHYPWRDN | CFGCHIP2_OTGPWRDN;
133 __raw_writel(cfgchip2, CFGCHIP2); 133 __raw_writel(cfgchip2, CFGCHIP2);
134 } 134 }
135 135
136 /* 136 /*
137 * Because we don't set CTRL.UINT, it's "important" to: 137 * Because we don't set CTRL.UINT, it's "important" to:
138 * - not read/write INTRUSB/INTRUSBE (except during 138 * - not read/write INTRUSB/INTRUSBE (except during
139 * initial setup, as a workaround); 139 * initial setup, as a workaround);
140 * - use INTSET/INTCLR instead. 140 * - use INTSET/INTCLR instead.
141 */ 141 */
142 142
143 /** 143 /**
144 * da8xx_musb_enable - enable interrupts 144 * da8xx_musb_enable - enable interrupts
145 */ 145 */
146 static void da8xx_musb_enable(struct musb *musb) 146 static void da8xx_musb_enable(struct musb *musb)
147 { 147 {
148 void __iomem *reg_base = musb->ctrl_base; 148 void __iomem *reg_base = musb->ctrl_base;
149 u32 mask; 149 u32 mask;
150 150
151 /* Workaround: setup IRQs through both register sets. */ 151 /* Workaround: setup IRQs through both register sets. */
152 mask = ((musb->epmask & DA8XX_USB_TX_EP_MASK) << DA8XX_INTR_TX_SHIFT) | 152 mask = ((musb->epmask & DA8XX_USB_TX_EP_MASK) << DA8XX_INTR_TX_SHIFT) |
153 ((musb->epmask & DA8XX_USB_RX_EP_MASK) << DA8XX_INTR_RX_SHIFT) | 153 ((musb->epmask & DA8XX_USB_RX_EP_MASK) << DA8XX_INTR_RX_SHIFT) |
154 DA8XX_INTR_USB_MASK; 154 DA8XX_INTR_USB_MASK;
155 musb_writel(reg_base, DA8XX_USB_INTR_MASK_SET_REG, mask); 155 musb_writel(reg_base, DA8XX_USB_INTR_MASK_SET_REG, mask);
156 156
157 /* Force the DRVVBUS IRQ so we can start polling for ID change. */ 157 /* Force the DRVVBUS IRQ so we can start polling for ID change. */
158 if (is_otg_enabled(musb)) 158 musb_writel(reg_base, DA8XX_USB_INTR_SRC_SET_REG,
159 musb_writel(reg_base, DA8XX_USB_INTR_SRC_SET_REG, 159 DA8XX_INTR_DRVVBUS << DA8XX_INTR_USB_SHIFT);
160 DA8XX_INTR_DRVVBUS << DA8XX_INTR_USB_SHIFT);
161 } 160 }
162 161
163 /** 162 /**
164 * da8xx_musb_disable - disable HDRC and flush interrupts 163 * da8xx_musb_disable - disable HDRC and flush interrupts
165 */ 164 */
166 static void da8xx_musb_disable(struct musb *musb) 165 static void da8xx_musb_disable(struct musb *musb)
167 { 166 {
168 void __iomem *reg_base = musb->ctrl_base; 167 void __iomem *reg_base = musb->ctrl_base;
169 168
170 musb_writel(reg_base, DA8XX_USB_INTR_MASK_CLEAR_REG, 169 musb_writel(reg_base, DA8XX_USB_INTR_MASK_CLEAR_REG,
171 DA8XX_INTR_USB_MASK | 170 DA8XX_INTR_USB_MASK |
172 DA8XX_INTR_TX_MASK | DA8XX_INTR_RX_MASK); 171 DA8XX_INTR_TX_MASK | DA8XX_INTR_RX_MASK);
173 musb_writeb(musb->mregs, MUSB_DEVCTL, 0); 172 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
174 musb_writel(reg_base, DA8XX_USB_END_OF_INTR_REG, 0); 173 musb_writel(reg_base, DA8XX_USB_END_OF_INTR_REG, 0);
175 } 174 }
176 175
177 #define portstate(stmt) stmt 176 #define portstate(stmt) stmt
178 177
179 static void da8xx_musb_set_vbus(struct musb *musb, int is_on) 178 static void da8xx_musb_set_vbus(struct musb *musb, int is_on)
180 { 179 {
181 WARN_ON(is_on && is_peripheral_active(musb)); 180 WARN_ON(is_on && is_peripheral_active(musb));
182 } 181 }
183 182
184 #define POLL_SECONDS 2 183 #define POLL_SECONDS 2
185 184
186 static struct timer_list otg_workaround; 185 static struct timer_list otg_workaround;
187 186
188 static void otg_timer(unsigned long _musb) 187 static void otg_timer(unsigned long _musb)
189 { 188 {
190 struct musb *musb = (void *)_musb; 189 struct musb *musb = (void *)_musb;
191 void __iomem *mregs = musb->mregs; 190 void __iomem *mregs = musb->mregs;
192 u8 devctl; 191 u8 devctl;
193 unsigned long flags; 192 unsigned long flags;
194 193
195 /* 194 /*
196 * We poll because DaVinci's won't expose several OTG-critical 195 * We poll because DaVinci's won't expose several OTG-critical
197 * status change events (from the transceiver) otherwise. 196 * status change events (from the transceiver) otherwise.
198 */ 197 */
199 devctl = musb_readb(mregs, MUSB_DEVCTL); 198 devctl = musb_readb(mregs, MUSB_DEVCTL);
200 dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl, 199 dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl,
201 otg_state_string(musb->xceiv->state)); 200 otg_state_string(musb->xceiv->state));
202 201
203 spin_lock_irqsave(&musb->lock, flags); 202 spin_lock_irqsave(&musb->lock, flags);
204 switch (musb->xceiv->state) { 203 switch (musb->xceiv->state) {
205 case OTG_STATE_A_WAIT_BCON: 204 case OTG_STATE_A_WAIT_BCON:
206 devctl &= ~MUSB_DEVCTL_SESSION; 205 devctl &= ~MUSB_DEVCTL_SESSION;
207 musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); 206 musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
208 207
209 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); 208 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
210 if (devctl & MUSB_DEVCTL_BDEVICE) { 209 if (devctl & MUSB_DEVCTL_BDEVICE) {
211 musb->xceiv->state = OTG_STATE_B_IDLE; 210 musb->xceiv->state = OTG_STATE_B_IDLE;
212 MUSB_DEV_MODE(musb); 211 MUSB_DEV_MODE(musb);
213 } else { 212 } else {
214 musb->xceiv->state = OTG_STATE_A_IDLE; 213 musb->xceiv->state = OTG_STATE_A_IDLE;
215 MUSB_HST_MODE(musb); 214 MUSB_HST_MODE(musb);
216 } 215 }
217 break; 216 break;
218 case OTG_STATE_A_WAIT_VFALL: 217 case OTG_STATE_A_WAIT_VFALL:
219 /* 218 /*
220 * Wait till VBUS falls below SessionEnd (~0.2 V); the 1.3 219 * Wait till VBUS falls below SessionEnd (~0.2 V); the 1.3
221 * RTL seems to mis-handle session "start" otherwise (or in 220 * RTL seems to mis-handle session "start" otherwise (or in
222 * our case "recover"), in routine "VBUS was valid by the time 221 * our case "recover"), in routine "VBUS was valid by the time
223 * VBUSERR got reported during enumeration" cases. 222 * VBUSERR got reported during enumeration" cases.
224 */ 223 */
225 if (devctl & MUSB_DEVCTL_VBUS) { 224 if (devctl & MUSB_DEVCTL_VBUS) {
226 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); 225 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
227 break; 226 break;
228 } 227 }
229 musb->xceiv->state = OTG_STATE_A_WAIT_VRISE; 228 musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
230 musb_writel(musb->ctrl_base, DA8XX_USB_INTR_SRC_SET_REG, 229 musb_writel(musb->ctrl_base, DA8XX_USB_INTR_SRC_SET_REG,
231 MUSB_INTR_VBUSERROR << DA8XX_INTR_USB_SHIFT); 230 MUSB_INTR_VBUSERROR << DA8XX_INTR_USB_SHIFT);
232 break; 231 break;
233 case OTG_STATE_B_IDLE: 232 case OTG_STATE_B_IDLE:
234 if (!is_peripheral_enabled(musb))
235 break;
236
237 /* 233 /*
238 * There's no ID-changed IRQ, so we have no good way to tell 234 * There's no ID-changed IRQ, so we have no good way to tell
239 * when to switch to the A-Default state machine (by setting 235 * when to switch to the A-Default state machine (by setting
240 * the DEVCTL.Session bit). 236 * the DEVCTL.Session bit).
241 * 237 *
242 * Workaround: whenever we're in B_IDLE, try setting the 238 * Workaround: whenever we're in B_IDLE, try setting the
243 * session flag every few seconds. If it works, ID was 239 * session flag every few seconds. If it works, ID was
244 * grounded and we're now in the A-Default state machine. 240 * grounded and we're now in the A-Default state machine.
245 * 241 *
246 * NOTE: setting the session flag is _supposed_ to trigger 242 * NOTE: setting the session flag is _supposed_ to trigger
247 * SRP but clearly it doesn't. 243 * SRP but clearly it doesn't.
248 */ 244 */
249 musb_writeb(mregs, MUSB_DEVCTL, devctl | MUSB_DEVCTL_SESSION); 245 musb_writeb(mregs, MUSB_DEVCTL, devctl | MUSB_DEVCTL_SESSION);
250 devctl = musb_readb(mregs, MUSB_DEVCTL); 246 devctl = musb_readb(mregs, MUSB_DEVCTL);
251 if (devctl & MUSB_DEVCTL_BDEVICE) 247 if (devctl & MUSB_DEVCTL_BDEVICE)
252 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); 248 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
253 else 249 else
254 musb->xceiv->state = OTG_STATE_A_IDLE; 250 musb->xceiv->state = OTG_STATE_A_IDLE;
255 break; 251 break;
256 default: 252 default:
257 break; 253 break;
258 } 254 }
259 spin_unlock_irqrestore(&musb->lock, flags); 255 spin_unlock_irqrestore(&musb->lock, flags);
260 } 256 }
261 257
262 static void da8xx_musb_try_idle(struct musb *musb, unsigned long timeout) 258 static void da8xx_musb_try_idle(struct musb *musb, unsigned long timeout)
263 { 259 {
264 static unsigned long last_timer; 260 static unsigned long last_timer;
265 261
266 if (!is_otg_enabled(musb))
267 return;
268
269 if (timeout == 0) 262 if (timeout == 0)
270 timeout = jiffies + msecs_to_jiffies(3); 263 timeout = jiffies + msecs_to_jiffies(3);
271 264
272 /* Never idle if active, or when VBUS timeout is not set as host */ 265 /* Never idle if active, or when VBUS timeout is not set as host */
273 if (musb->is_active || (musb->a_wait_bcon == 0 && 266 if (musb->is_active || (musb->a_wait_bcon == 0 &&
274 musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) { 267 musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) {
275 dev_dbg(musb->controller, "%s active, deleting timer\n", 268 dev_dbg(musb->controller, "%s active, deleting timer\n",
276 otg_state_string(musb->xceiv->state)); 269 otg_state_string(musb->xceiv->state));
277 del_timer(&otg_workaround); 270 del_timer(&otg_workaround);
278 last_timer = jiffies; 271 last_timer = jiffies;
279 return; 272 return;
280 } 273 }
281 274
282 if (time_after(last_timer, timeout) && timer_pending(&otg_workaround)) { 275 if (time_after(last_timer, timeout) && timer_pending(&otg_workaround)) {
283 dev_dbg(musb->controller, "Longer idle timer already pending, ignoring...\n"); 276 dev_dbg(musb->controller, "Longer idle timer already pending, ignoring...\n");
284 return; 277 return;
285 } 278 }
286 last_timer = timeout; 279 last_timer = timeout;
287 280
288 dev_dbg(musb->controller, "%s inactive, starting idle timer for %u ms\n", 281 dev_dbg(musb->controller, "%s inactive, starting idle timer for %u ms\n",
289 otg_state_string(musb->xceiv->state), 282 otg_state_string(musb->xceiv->state),
290 jiffies_to_msecs(timeout - jiffies)); 283 jiffies_to_msecs(timeout - jiffies));
291 mod_timer(&otg_workaround, timeout); 284 mod_timer(&otg_workaround, timeout);
292 } 285 }
293 286
294 static irqreturn_t da8xx_musb_interrupt(int irq, void *hci) 287 static irqreturn_t da8xx_musb_interrupt(int irq, void *hci)
295 { 288 {
296 struct musb *musb = hci; 289 struct musb *musb = hci;
297 void __iomem *reg_base = musb->ctrl_base; 290 void __iomem *reg_base = musb->ctrl_base;
298 struct usb_otg *otg = musb->xceiv->otg; 291 struct usb_otg *otg = musb->xceiv->otg;
299 unsigned long flags; 292 unsigned long flags;
300 irqreturn_t ret = IRQ_NONE; 293 irqreturn_t ret = IRQ_NONE;
301 u32 status; 294 u32 status;
302 295
303 spin_lock_irqsave(&musb->lock, flags); 296 spin_lock_irqsave(&musb->lock, flags);
304 297
305 /* 298 /*
306 * NOTE: DA8XX shadows the Mentor IRQs. Don't manage them through 299 * NOTE: DA8XX shadows the Mentor IRQs. Don't manage them through
307 * the Mentor registers (except for setup), use the TI ones and EOI. 300 * the Mentor registers (except for setup), use the TI ones and EOI.
308 */ 301 */
309 302
310 /* Acknowledge and handle non-CPPI interrupts */ 303 /* Acknowledge and handle non-CPPI interrupts */
311 status = musb_readl(reg_base, DA8XX_USB_INTR_SRC_MASKED_REG); 304 status = musb_readl(reg_base, DA8XX_USB_INTR_SRC_MASKED_REG);
312 if (!status) 305 if (!status)
313 goto eoi; 306 goto eoi;
314 307
315 musb_writel(reg_base, DA8XX_USB_INTR_SRC_CLEAR_REG, status); 308 musb_writel(reg_base, DA8XX_USB_INTR_SRC_CLEAR_REG, status);
316 dev_dbg(musb->controller, "USB IRQ %08x\n", status); 309 dev_dbg(musb->controller, "USB IRQ %08x\n", status);
317 310
318 musb->int_rx = (status & DA8XX_INTR_RX_MASK) >> DA8XX_INTR_RX_SHIFT; 311 musb->int_rx = (status & DA8XX_INTR_RX_MASK) >> DA8XX_INTR_RX_SHIFT;
319 musb->int_tx = (status & DA8XX_INTR_TX_MASK) >> DA8XX_INTR_TX_SHIFT; 312 musb->int_tx = (status & DA8XX_INTR_TX_MASK) >> DA8XX_INTR_TX_SHIFT;
320 musb->int_usb = (status & DA8XX_INTR_USB_MASK) >> DA8XX_INTR_USB_SHIFT; 313 musb->int_usb = (status & DA8XX_INTR_USB_MASK) >> DA8XX_INTR_USB_SHIFT;
321 314
322 /* 315 /*
323 * DRVVBUS IRQs are the only proxy we have (a very poor one!) for 316 * DRVVBUS IRQs are the only proxy we have (a very poor one!) for
324 * DA8xx's missing ID change IRQ. We need an ID change IRQ to 317 * DA8xx's missing ID change IRQ. We need an ID change IRQ to
325 * switch appropriately between halves of the OTG state machine. 318 * switch appropriately between halves of the OTG state machine.
326 * Managing DEVCTL.Session per Mentor docs requires that we know its 319 * Managing DEVCTL.Session per Mentor docs requires that we know its
327 * value but DEVCTL.BDevice is invalid without DEVCTL.Session set. 320 * value but DEVCTL.BDevice is invalid without DEVCTL.Session set.
328 * Also, DRVVBUS pulses for SRP (but not at 5 V)... 321 * Also, DRVVBUS pulses for SRP (but not at 5 V)...
329 */ 322 */
330 if (status & (DA8XX_INTR_DRVVBUS << DA8XX_INTR_USB_SHIFT)) { 323 if (status & (DA8XX_INTR_DRVVBUS << DA8XX_INTR_USB_SHIFT)) {
331 int drvvbus = musb_readl(reg_base, DA8XX_USB_STAT_REG); 324 int drvvbus = musb_readl(reg_base, DA8XX_USB_STAT_REG);
332 void __iomem *mregs = musb->mregs; 325 void __iomem *mregs = musb->mregs;
333 u8 devctl = musb_readb(mregs, MUSB_DEVCTL); 326 u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
334 int err; 327 int err;
335 328
336 err = is_host_enabled(musb) && (musb->int_usb & 329 err = musb->int_usb & USB_INTR_VBUSERROR;
337 MUSB_INTR_VBUSERROR);
338 if (err) { 330 if (err) {
339 /* 331 /*
340 * The Mentor core doesn't debounce VBUS as needed 332 * The Mentor core doesn't debounce VBUS as needed
341 * to cope with device connect current spikes. This 333 * to cope with device connect current spikes. This
342 * means it's not uncommon for bus-powered devices 334 * means it's not uncommon for bus-powered devices
343 * to get VBUS errors during enumeration. 335 * to get VBUS errors during enumeration.
344 * 336 *
345 * This is a workaround, but newer RTL from Mentor 337 * This is a workaround, but newer RTL from Mentor
346 * seems to allow a better one: "re"-starting sessions 338 * seems to allow a better one: "re"-starting sessions
347 * without waiting for VBUS to stop registering in 339 * without waiting for VBUS to stop registering in
348 * devctl. 340 * devctl.
349 */ 341 */
350 musb->int_usb &= ~MUSB_INTR_VBUSERROR; 342 musb->int_usb &= ~MUSB_INTR_VBUSERROR;
351 musb->xceiv->state = OTG_STATE_A_WAIT_VFALL; 343 musb->xceiv->state = OTG_STATE_A_WAIT_VFALL;
352 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); 344 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
353 WARNING("VBUS error workaround (delay coming)\n"); 345 WARNING("VBUS error workaround (delay coming)\n");
354 } else if (is_host_enabled(musb) && drvvbus) { 346 } else if (drvvbus) {
355 MUSB_HST_MODE(musb); 347 MUSB_HST_MODE(musb);
356 otg->default_a = 1; 348 otg->default_a = 1;
357 musb->xceiv->state = OTG_STATE_A_WAIT_VRISE; 349 musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
358 portstate(musb->port1_status |= USB_PORT_STAT_POWER); 350 portstate(musb->port1_status |= USB_PORT_STAT_POWER);
359 del_timer(&otg_workaround); 351 del_timer(&otg_workaround);
360 } else { 352 } else {
361 musb->is_active = 0; 353 musb->is_active = 0;
362 MUSB_DEV_MODE(musb); 354 MUSB_DEV_MODE(musb);
363 otg->default_a = 0; 355 otg->default_a = 0;
364 musb->xceiv->state = OTG_STATE_B_IDLE; 356 musb->xceiv->state = OTG_STATE_B_IDLE;
365 portstate(musb->port1_status &= ~USB_PORT_STAT_POWER); 357 portstate(musb->port1_status &= ~USB_PORT_STAT_POWER);
366 } 358 }
367 359
368 dev_dbg(musb->controller, "VBUS %s (%s)%s, devctl %02x\n", 360 dev_dbg(musb->controller, "VBUS %s (%s)%s, devctl %02x\n",
369 drvvbus ? "on" : "off", 361 drvvbus ? "on" : "off",
370 otg_state_string(musb->xceiv->state), 362 otg_state_string(musb->xceiv->state),
371 err ? " ERROR" : "", 363 err ? " ERROR" : "",
372 devctl); 364 devctl);
373 ret = IRQ_HANDLED; 365 ret = IRQ_HANDLED;
374 } 366 }
375 367
376 if (musb->int_tx || musb->int_rx || musb->int_usb) 368 if (musb->int_tx || musb->int_rx || musb->int_usb)
377 ret |= musb_interrupt(musb); 369 ret |= musb_interrupt(musb);
378 370
379 eoi: 371 eoi:
380 /* EOI needs to be written for the IRQ to be re-asserted. */ 372 /* EOI needs to be written for the IRQ to be re-asserted. */
381 if (ret == IRQ_HANDLED || status) 373 if (ret == IRQ_HANDLED || status)
382 musb_writel(reg_base, DA8XX_USB_END_OF_INTR_REG, 0); 374 musb_writel(reg_base, DA8XX_USB_END_OF_INTR_REG, 0);
383 375
384 /* Poll for ID change */ 376 /* Poll for ID change */
385 if (is_otg_enabled(musb) && musb->xceiv->state == OTG_STATE_B_IDLE) 377 if (musb->xceiv->state == OTG_STATE_B_IDLE)
386 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); 378 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
387 379
388 spin_unlock_irqrestore(&musb->lock, flags); 380 spin_unlock_irqrestore(&musb->lock, flags);
389 381
390 return ret; 382 return ret;
391 } 383 }
392 384
393 static int da8xx_musb_set_mode(struct musb *musb, u8 musb_mode) 385 static int da8xx_musb_set_mode(struct musb *musb, u8 musb_mode)
394 { 386 {
395 u32 cfgchip2 = __raw_readl(CFGCHIP2); 387 u32 cfgchip2 = __raw_readl(CFGCHIP2);
396 388
397 cfgchip2 &= ~CFGCHIP2_OTGMODE; 389 cfgchip2 &= ~CFGCHIP2_OTGMODE;
398 switch (musb_mode) { 390 switch (musb_mode) {
399 case MUSB_HOST: /* Force VBUS valid, ID = 0 */ 391 case MUSB_HOST: /* Force VBUS valid, ID = 0 */
400 cfgchip2 |= CFGCHIP2_FORCE_HOST; 392 cfgchip2 |= CFGCHIP2_FORCE_HOST;
401 break; 393 break;
402 case MUSB_PERIPHERAL: /* Force VBUS valid, ID = 1 */ 394 case MUSB_PERIPHERAL: /* Force VBUS valid, ID = 1 */
403 cfgchip2 |= CFGCHIP2_FORCE_DEVICE; 395 cfgchip2 |= CFGCHIP2_FORCE_DEVICE;
404 break; 396 break;
405 case MUSB_OTG: /* Don't override the VBUS/ID comparators */ 397 case MUSB_OTG: /* Don't override the VBUS/ID comparators */
406 cfgchip2 |= CFGCHIP2_NO_OVERRIDE; 398 cfgchip2 |= CFGCHIP2_NO_OVERRIDE;
407 break; 399 break;
408 default: 400 default:
409 dev_dbg(musb->controller, "Trying to set unsupported mode %u\n", musb_mode); 401 dev_dbg(musb->controller, "Trying to set unsupported mode %u\n", musb_mode);
410 } 402 }
411 403
412 __raw_writel(cfgchip2, CFGCHIP2); 404 __raw_writel(cfgchip2, CFGCHIP2);
413 return 0; 405 return 0;
414 } 406 }
415 407
416 static int da8xx_musb_init(struct musb *musb) 408 static int da8xx_musb_init(struct musb *musb)
417 { 409 {
418 void __iomem *reg_base = musb->ctrl_base; 410 void __iomem *reg_base = musb->ctrl_base;
419 u32 rev; 411 u32 rev;
420 412
421 musb->mregs += DA8XX_MENTOR_CORE_OFFSET; 413 musb->mregs += DA8XX_MENTOR_CORE_OFFSET;
422 414
423 /* Returns zero if e.g. not clocked */ 415 /* Returns zero if e.g. not clocked */
424 rev = musb_readl(reg_base, DA8XX_USB_REVISION_REG); 416 rev = musb_readl(reg_base, DA8XX_USB_REVISION_REG);
425 if (!rev) 417 if (!rev)
426 goto fail; 418 goto fail;
427 419
428 usb_nop_xceiv_register(); 420 usb_nop_xceiv_register();
429 musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2); 421 musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
430 if (IS_ERR_OR_NULL(musb->xceiv)) 422 if (IS_ERR_OR_NULL(musb->xceiv))
431 goto fail; 423 goto fail;
432 424
433 if (is_host_enabled(musb)) 425 setup_timer(&otg_workaround, otg_timer, (unsigned long)musb);
434 setup_timer(&otg_workaround, otg_timer, (unsigned long)musb);
435 426
436 /* Reset the controller */ 427 /* Reset the controller */
437 musb_writel(reg_base, DA8XX_USB_CTRL_REG, DA8XX_SOFT_RESET_MASK); 428 musb_writel(reg_base, DA8XX_USB_CTRL_REG, DA8XX_SOFT_RESET_MASK);
438 429
439 /* Start the on-chip PHY and its PLL. */ 430 /* Start the on-chip PHY and its PLL. */
440 phy_on(); 431 phy_on();
441 432
442 msleep(5); 433 msleep(5);
443 434
444 /* NOTE: IRQs are in mixed mode, not bypass to pure MUSB */ 435 /* NOTE: IRQs are in mixed mode, not bypass to pure MUSB */
445 pr_debug("DA8xx OTG revision %08x, PHY %03x, control %02x\n", 436 pr_debug("DA8xx OTG revision %08x, PHY %03x, control %02x\n",
446 rev, __raw_readl(CFGCHIP2), 437 rev, __raw_readl(CFGCHIP2),
447 musb_readb(reg_base, DA8XX_USB_CTRL_REG)); 438 musb_readb(reg_base, DA8XX_USB_CTRL_REG));
448 439
449 musb->isr = da8xx_musb_interrupt; 440 musb->isr = da8xx_musb_interrupt;
450 return 0; 441 return 0;
451 fail: 442 fail:
452 return -ENODEV; 443 return -ENODEV;
453 } 444 }
454 445
455 static int da8xx_musb_exit(struct musb *musb) 446 static int da8xx_musb_exit(struct musb *musb)
456 { 447 {
457 if (is_host_enabled(musb)) 448 del_timer_sync(&otg_workaround);
458 del_timer_sync(&otg_workaround);
459 449
460 phy_off(); 450 phy_off();
461 451
462 usb_put_phy(musb->xceiv); 452 usb_put_phy(musb->xceiv);
463 usb_nop_xceiv_unregister(); 453 usb_nop_xceiv_unregister();
464 454
465 return 0; 455 return 0;
466 } 456 }
467 457
468 static const struct musb_platform_ops da8xx_ops = { 458 static const struct musb_platform_ops da8xx_ops = {
469 .init = da8xx_musb_init, 459 .init = da8xx_musb_init,
470 .exit = da8xx_musb_exit, 460 .exit = da8xx_musb_exit,
471 461
472 .enable = da8xx_musb_enable, 462 .enable = da8xx_musb_enable,
473 .disable = da8xx_musb_disable, 463 .disable = da8xx_musb_disable,
474 464
475 .set_mode = da8xx_musb_set_mode, 465 .set_mode = da8xx_musb_set_mode,
476 .try_idle = da8xx_musb_try_idle, 466 .try_idle = da8xx_musb_try_idle,
477 467
478 .set_vbus = da8xx_musb_set_vbus, 468 .set_vbus = da8xx_musb_set_vbus,
479 }; 469 };
480 470
481 static u64 da8xx_dmamask = DMA_BIT_MASK(32); 471 static u64 da8xx_dmamask = DMA_BIT_MASK(32);
482 472
483 static int __devinit da8xx_probe(struct platform_device *pdev) 473 static int __devinit da8xx_probe(struct platform_device *pdev)
484 { 474 {
485 struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data; 475 struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data;
486 struct platform_device *musb; 476 struct platform_device *musb;
487 struct da8xx_glue *glue; 477 struct da8xx_glue *glue;
488 478
489 struct clk *clk; 479 struct clk *clk;
490 480
491 int ret = -ENOMEM; 481 int ret = -ENOMEM;
492 482
493 glue = kzalloc(sizeof(*glue), GFP_KERNEL); 483 glue = kzalloc(sizeof(*glue), GFP_KERNEL);
494 if (!glue) { 484 if (!glue) {
495 dev_err(&pdev->dev, "failed to allocate glue context\n"); 485 dev_err(&pdev->dev, "failed to allocate glue context\n");
496 goto err0; 486 goto err0;
497 } 487 }
498 488
499 musb = platform_device_alloc("musb-hdrc", -1); 489 musb = platform_device_alloc("musb-hdrc", -1);
500 if (!musb) { 490 if (!musb) {
501 dev_err(&pdev->dev, "failed to allocate musb device\n"); 491 dev_err(&pdev->dev, "failed to allocate musb device\n");
502 goto err1; 492 goto err1;
503 } 493 }
504 494
505 clk = clk_get(&pdev->dev, "usb20"); 495 clk = clk_get(&pdev->dev, "usb20");
506 if (IS_ERR(clk)) { 496 if (IS_ERR(clk)) {
507 dev_err(&pdev->dev, "failed to get clock\n"); 497 dev_err(&pdev->dev, "failed to get clock\n");
508 ret = PTR_ERR(clk); 498 ret = PTR_ERR(clk);
509 goto err2; 499 goto err2;
510 } 500 }
511 501
512 ret = clk_enable(clk); 502 ret = clk_enable(clk);
513 if (ret) { 503 if (ret) {
514 dev_err(&pdev->dev, "failed to enable clock\n"); 504 dev_err(&pdev->dev, "failed to enable clock\n");
515 goto err3; 505 goto err3;
516 } 506 }
517 507
518 musb->dev.parent = &pdev->dev; 508 musb->dev.parent = &pdev->dev;
519 musb->dev.dma_mask = &da8xx_dmamask; 509 musb->dev.dma_mask = &da8xx_dmamask;
520 musb->dev.coherent_dma_mask = da8xx_dmamask; 510 musb->dev.coherent_dma_mask = da8xx_dmamask;
521 511
522 glue->dev = &pdev->dev; 512 glue->dev = &pdev->dev;
523 glue->musb = musb; 513 glue->musb = musb;
524 glue->clk = clk; 514 glue->clk = clk;
525 515
526 pdata->platform_ops = &da8xx_ops; 516 pdata->platform_ops = &da8xx_ops;
527 517
528 platform_set_drvdata(pdev, glue); 518 platform_set_drvdata(pdev, glue);
529 519
530 ret = platform_device_add_resources(musb, pdev->resource, 520 ret = platform_device_add_resources(musb, pdev->resource,
531 pdev->num_resources); 521 pdev->num_resources);
532 if (ret) { 522 if (ret) {
533 dev_err(&pdev->dev, "failed to add resources\n"); 523 dev_err(&pdev->dev, "failed to add resources\n");
534 goto err4; 524 goto err4;
535 } 525 }
536 526
537 ret = platform_device_add_data(musb, pdata, sizeof(*pdata)); 527 ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
538 if (ret) { 528 if (ret) {
539 dev_err(&pdev->dev, "failed to add platform_data\n"); 529 dev_err(&pdev->dev, "failed to add platform_data\n");
540 goto err4; 530 goto err4;
541 } 531 }
542 532
543 ret = platform_device_add(musb); 533 ret = platform_device_add(musb);
544 if (ret) { 534 if (ret) {
545 dev_err(&pdev->dev, "failed to register musb device\n"); 535 dev_err(&pdev->dev, "failed to register musb device\n");
546 goto err4; 536 goto err4;
547 } 537 }
548 538
549 return 0; 539 return 0;
550 540
551 err4: 541 err4:
552 clk_disable(clk); 542 clk_disable(clk);
553 543
554 err3: 544 err3:
555 clk_put(clk); 545 clk_put(clk);
556 546
557 err2: 547 err2:
558 platform_device_put(musb); 548 platform_device_put(musb);
559 549
560 err1: 550 err1:
561 kfree(glue); 551 kfree(glue);
562 552
563 err0: 553 err0:
564 return ret; 554 return ret;
565 } 555 }
566 556
567 static int __devexit da8xx_remove(struct platform_device *pdev) 557 static int __devexit da8xx_remove(struct platform_device *pdev)
568 { 558 {
569 struct da8xx_glue *glue = platform_get_drvdata(pdev); 559 struct da8xx_glue *glue = platform_get_drvdata(pdev);
570 560
571 platform_device_del(glue->musb); 561 platform_device_del(glue->musb);
572 platform_device_put(glue->musb); 562 platform_device_put(glue->musb);
573 clk_disable(glue->clk); 563 clk_disable(glue->clk);
574 clk_put(glue->clk); 564 clk_put(glue->clk);
575 kfree(glue); 565 kfree(glue);
576 566
577 return 0; 567 return 0;
578 } 568 }
579 569
580 static struct platform_driver da8xx_driver = { 570 static struct platform_driver da8xx_driver = {
581 .probe = da8xx_probe, 571 .probe = da8xx_probe,
582 .remove = __devexit_p(da8xx_remove), 572 .remove = __devexit_p(da8xx_remove),
583 .driver = { 573 .driver = {
584 .name = "musb-da8xx", 574 .name = "musb-da8xx",
585 }, 575 },
586 }; 576 };
587 577
588 MODULE_DESCRIPTION("DA8xx/OMAP-L1x MUSB Glue Layer"); 578 MODULE_DESCRIPTION("DA8xx/OMAP-L1x MUSB Glue Layer");
589 MODULE_AUTHOR("Sergei Shtylyov <sshtylyov@ru.mvista.com>"); 579 MODULE_AUTHOR("Sergei Shtylyov <sshtylyov@ru.mvista.com>");
590 MODULE_LICENSE("GPL v2"); 580 MODULE_LICENSE("GPL v2");
591 581
592 static int __init da8xx_init(void) 582 static int __init da8xx_init(void)
593 { 583 {
594 return platform_driver_register(&da8xx_driver); 584 return platform_driver_register(&da8xx_driver);
595 } 585 }
596 module_init(da8xx_init); 586 module_init(da8xx_init);
597 587
598 static void __exit da8xx_exit(void) 588 static void __exit da8xx_exit(void)
599 { 589 {
600 platform_driver_unregister(&da8xx_driver); 590 platform_driver_unregister(&da8xx_driver);
601 } 591 }
602 module_exit(da8xx_exit); 592 module_exit(da8xx_exit);
603 593
drivers/usb/musb/davinci.c
1 /* 1 /*
2 * Copyright (C) 2005-2006 by Texas Instruments 2 * Copyright (C) 2005-2006 by Texas Instruments
3 * 3 *
4 * This file is part of the Inventra Controller Driver for Linux. 4 * This file is part of the Inventra Controller Driver for Linux.
5 * 5 *
6 * The Inventra Controller Driver for Linux is free software; you 6 * The Inventra Controller Driver for Linux is free software; you
7 * can redistribute it and/or modify it under the terms of the GNU 7 * can redistribute it and/or modify it under the terms of the GNU
8 * General Public License version 2 as published by the Free Software 8 * General Public License version 2 as published by the Free Software
9 * Foundation. 9 * Foundation.
10 * 10 *
11 * The Inventra Controller Driver for Linux is distributed in 11 * The Inventra Controller Driver for Linux is distributed in
12 * the hope that it will be useful, but WITHOUT ANY WARRANTY; 12 * the hope that it will be useful, but WITHOUT ANY WARRANTY;
13 * without even the implied warranty of MERCHANTABILITY or 13 * without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public 14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 * License for more details. 15 * License for more details.
16 * 16 *
17 * You should have received a copy of the GNU General Public License 17 * You should have received a copy of the GNU General Public License
18 * along with The Inventra Controller Driver for Linux ; if not, 18 * along with The Inventra Controller Driver for Linux ; if not,
19 * write to the Free Software Foundation, Inc., 59 Temple Place, 19 * write to the Free Software Foundation, Inc., 59 Temple Place,
20 * Suite 330, Boston, MA 02111-1307 USA 20 * Suite 330, Boston, MA 02111-1307 USA
21 * 21 *
22 */ 22 */
23 23
24 #include <linux/module.h> 24 #include <linux/module.h>
25 #include <linux/kernel.h> 25 #include <linux/kernel.h>
26 #include <linux/sched.h> 26 #include <linux/sched.h>
27 #include <linux/init.h> 27 #include <linux/init.h>
28 #include <linux/list.h> 28 #include <linux/list.h>
29 #include <linux/delay.h> 29 #include <linux/delay.h>
30 #include <linux/clk.h> 30 #include <linux/clk.h>
31 #include <linux/err.h> 31 #include <linux/err.h>
32 #include <linux/io.h> 32 #include <linux/io.h>
33 #include <linux/gpio.h> 33 #include <linux/gpio.h>
34 #include <linux/platform_device.h> 34 #include <linux/platform_device.h>
35 #include <linux/dma-mapping.h> 35 #include <linux/dma-mapping.h>
36 36
37 #include <mach/cputype.h> 37 #include <mach/cputype.h>
38 #include <mach/hardware.h> 38 #include <mach/hardware.h>
39 39
40 #include <asm/mach-types.h> 40 #include <asm/mach-types.h>
41 41
42 #include "musb_core.h" 42 #include "musb_core.h"
43 43
44 #ifdef CONFIG_MACH_DAVINCI_EVM 44 #ifdef CONFIG_MACH_DAVINCI_EVM
45 #define GPIO_nVBUS_DRV 160 45 #define GPIO_nVBUS_DRV 160
46 #endif 46 #endif
47 47
48 #include "davinci.h" 48 #include "davinci.h"
49 #include "cppi_dma.h" 49 #include "cppi_dma.h"
50 50
51 51
52 #define USB_PHY_CTRL IO_ADDRESS(USBPHY_CTL_PADDR) 52 #define USB_PHY_CTRL IO_ADDRESS(USBPHY_CTL_PADDR)
53 #define DM355_DEEPSLEEP IO_ADDRESS(DM355_DEEPSLEEP_PADDR) 53 #define DM355_DEEPSLEEP IO_ADDRESS(DM355_DEEPSLEEP_PADDR)
54 54
55 struct davinci_glue { 55 struct davinci_glue {
56 struct device *dev; 56 struct device *dev;
57 struct platform_device *musb; 57 struct platform_device *musb;
58 struct clk *clk; 58 struct clk *clk;
59 }; 59 };
60 60
61 /* REVISIT (PM) we should be able to keep the PHY in low power mode most 61 /* REVISIT (PM) we should be able to keep the PHY in low power mode most
62 * of the time (24 MHZ oscillator and PLL off, etc) by setting POWER.D0 62 * of the time (24 MHZ oscillator and PLL off, etc) by setting POWER.D0
63 * and, when in host mode, autosuspending idle root ports... PHYPLLON 63 * and, when in host mode, autosuspending idle root ports... PHYPLLON
64 * (overriding SUSPENDM?) then likely needs to stay off. 64 * (overriding SUSPENDM?) then likely needs to stay off.
65 */ 65 */
66 66
67 static inline void phy_on(void) 67 static inline void phy_on(void)
68 { 68 {
69 u32 phy_ctrl = __raw_readl(USB_PHY_CTRL); 69 u32 phy_ctrl = __raw_readl(USB_PHY_CTRL);
70 70
71 /* power everything up; start the on-chip PHY and its PLL */ 71 /* power everything up; start the on-chip PHY and its PLL */
72 phy_ctrl &= ~(USBPHY_OSCPDWN | USBPHY_OTGPDWN | USBPHY_PHYPDWN); 72 phy_ctrl &= ~(USBPHY_OSCPDWN | USBPHY_OTGPDWN | USBPHY_PHYPDWN);
73 phy_ctrl |= USBPHY_SESNDEN | USBPHY_VBDTCTEN | USBPHY_PHYPLLON; 73 phy_ctrl |= USBPHY_SESNDEN | USBPHY_VBDTCTEN | USBPHY_PHYPLLON;
74 __raw_writel(phy_ctrl, USB_PHY_CTRL); 74 __raw_writel(phy_ctrl, USB_PHY_CTRL);
75 75
76 /* wait for PLL to lock before proceeding */ 76 /* wait for PLL to lock before proceeding */
77 while ((__raw_readl(USB_PHY_CTRL) & USBPHY_PHYCLKGD) == 0) 77 while ((__raw_readl(USB_PHY_CTRL) & USBPHY_PHYCLKGD) == 0)
78 cpu_relax(); 78 cpu_relax();
79 } 79 }
80 80
81 static inline void phy_off(void) 81 static inline void phy_off(void)
82 { 82 {
83 u32 phy_ctrl = __raw_readl(USB_PHY_CTRL); 83 u32 phy_ctrl = __raw_readl(USB_PHY_CTRL);
84 84
85 /* powerdown the on-chip PHY, its PLL, and the OTG block */ 85 /* powerdown the on-chip PHY, its PLL, and the OTG block */
86 phy_ctrl &= ~(USBPHY_SESNDEN | USBPHY_VBDTCTEN | USBPHY_PHYPLLON); 86 phy_ctrl &= ~(USBPHY_SESNDEN | USBPHY_VBDTCTEN | USBPHY_PHYPLLON);
87 phy_ctrl |= USBPHY_OSCPDWN | USBPHY_OTGPDWN | USBPHY_PHYPDWN; 87 phy_ctrl |= USBPHY_OSCPDWN | USBPHY_OTGPDWN | USBPHY_PHYPDWN;
88 __raw_writel(phy_ctrl, USB_PHY_CTRL); 88 __raw_writel(phy_ctrl, USB_PHY_CTRL);
89 } 89 }
90 90
91 static int dma_off = 1; 91 static int dma_off = 1;
92 92
93 static void davinci_musb_enable(struct musb *musb) 93 static void davinci_musb_enable(struct musb *musb)
94 { 94 {
95 u32 tmp, old, val; 95 u32 tmp, old, val;
96 96
97 /* workaround: setup irqs through both register sets */ 97 /* workaround: setup irqs through both register sets */
98 tmp = (musb->epmask & DAVINCI_USB_TX_ENDPTS_MASK) 98 tmp = (musb->epmask & DAVINCI_USB_TX_ENDPTS_MASK)
99 << DAVINCI_USB_TXINT_SHIFT; 99 << DAVINCI_USB_TXINT_SHIFT;
100 musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp); 100 musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp);
101 old = tmp; 101 old = tmp;
102 tmp = (musb->epmask & (0xfffe & DAVINCI_USB_RX_ENDPTS_MASK)) 102 tmp = (musb->epmask & (0xfffe & DAVINCI_USB_RX_ENDPTS_MASK))
103 << DAVINCI_USB_RXINT_SHIFT; 103 << DAVINCI_USB_RXINT_SHIFT;
104 musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp); 104 musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp);
105 tmp |= old; 105 tmp |= old;
106 106
107 val = ~MUSB_INTR_SOF; 107 val = ~MUSB_INTR_SOF;
108 tmp |= ((val & 0x01ff) << DAVINCI_USB_USBINT_SHIFT); 108 tmp |= ((val & 0x01ff) << DAVINCI_USB_USBINT_SHIFT);
109 musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp); 109 musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_SET_REG, tmp);
110 110
111 if (is_dma_capable() && !dma_off) 111 if (is_dma_capable() && !dma_off)
112 printk(KERN_WARNING "%s %s: dma not reactivated\n", 112 printk(KERN_WARNING "%s %s: dma not reactivated\n",
113 __FILE__, __func__); 113 __FILE__, __func__);
114 else 114 else
115 dma_off = 0; 115 dma_off = 0;
116 116
117 /* force a DRVVBUS irq so we can start polling for ID change */ 117 /* force a DRVVBUS irq so we can start polling for ID change */
118 if (is_otg_enabled(musb)) 118 musb_writel(musb->ctrl_base, DAVINCI_USB_INT_SET_REG,
119 musb_writel(musb->ctrl_base, DAVINCI_USB_INT_SET_REG,
120 DAVINCI_INTR_DRVVBUS << DAVINCI_USB_USBINT_SHIFT); 119 DAVINCI_INTR_DRVVBUS << DAVINCI_USB_USBINT_SHIFT);
121 } 120 }
122 121
123 /* 122 /*
124 * Disable the HDRC and flush interrupts 123 * Disable the HDRC and flush interrupts
125 */ 124 */
126 static void davinci_musb_disable(struct musb *musb) 125 static void davinci_musb_disable(struct musb *musb)
127 { 126 {
128 /* because we don't set CTRLR.UINT, "important" to: 127 /* because we don't set CTRLR.UINT, "important" to:
129 * - not read/write INTRUSB/INTRUSBE 128 * - not read/write INTRUSB/INTRUSBE
130 * - (except during initial setup, as workaround) 129 * - (except during initial setup, as workaround)
131 * - use INTSETR/INTCLRR instead 130 * - use INTSETR/INTCLRR instead
132 */ 131 */
133 musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_CLR_REG, 132 musb_writel(musb->ctrl_base, DAVINCI_USB_INT_MASK_CLR_REG,
134 DAVINCI_USB_USBINT_MASK 133 DAVINCI_USB_USBINT_MASK
135 | DAVINCI_USB_TXINT_MASK 134 | DAVINCI_USB_TXINT_MASK
136 | DAVINCI_USB_RXINT_MASK); 135 | DAVINCI_USB_RXINT_MASK);
137 musb_writeb(musb->mregs, MUSB_DEVCTL, 0); 136 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
138 musb_writel(musb->ctrl_base, DAVINCI_USB_EOI_REG, 0); 137 musb_writel(musb->ctrl_base, DAVINCI_USB_EOI_REG, 0);
139 138
140 if (is_dma_capable() && !dma_off) 139 if (is_dma_capable() && !dma_off)
141 WARNING("dma still active\n"); 140 WARNING("dma still active\n");
142 } 141 }
143 142
144 143
145 #define portstate(stmt) stmt 144 #define portstate(stmt) stmt
146 145
147 /* 146 /*
148 * VBUS SWITCHING IS BOARD-SPECIFIC ... at least for the DM6446 EVM, 147 * VBUS SWITCHING IS BOARD-SPECIFIC ... at least for the DM6446 EVM,
149 * which doesn't wire DRVVBUS to the FET that switches it. Unclear 148 * which doesn't wire DRVVBUS to the FET that switches it. Unclear
150 * if that's a problem with the DM6446 chip or just with that board. 149 * if that's a problem with the DM6446 chip or just with that board.
151 * 150 *
152 * In either case, the DM355 EVM automates DRVVBUS the normal way, 151 * In either case, the DM355 EVM automates DRVVBUS the normal way,
153 * when J10 is out, and TI documents it as handling OTG. 152 * when J10 is out, and TI documents it as handling OTG.
154 */ 153 */
155 154
156 #ifdef CONFIG_MACH_DAVINCI_EVM 155 #ifdef CONFIG_MACH_DAVINCI_EVM
157 156
158 static int vbus_state = -1; 157 static int vbus_state = -1;
159 158
160 /* I2C operations are always synchronous, and require a task context. 159 /* I2C operations are always synchronous, and require a task context.
161 * With unloaded systems, using the shared workqueue seems to suffice 160 * With unloaded systems, using the shared workqueue seems to suffice
162 * to satisfy the 100msec A_WAIT_VRISE timeout... 161 * to satisfy the 100msec A_WAIT_VRISE timeout...
163 */ 162 */
164 static void evm_deferred_drvvbus(struct work_struct *ignored) 163 static void evm_deferred_drvvbus(struct work_struct *ignored)
165 { 164 {
166 gpio_set_value_cansleep(GPIO_nVBUS_DRV, vbus_state); 165 gpio_set_value_cansleep(GPIO_nVBUS_DRV, vbus_state);
167 vbus_state = !vbus_state; 166 vbus_state = !vbus_state;
168 } 167 }
169 168
170 #endif /* EVM */ 169 #endif /* EVM */
171 170
172 static void davinci_musb_source_power(struct musb *musb, int is_on, int immediate) 171 static void davinci_musb_source_power(struct musb *musb, int is_on, int immediate)
173 { 172 {
174 #ifdef CONFIG_MACH_DAVINCI_EVM 173 #ifdef CONFIG_MACH_DAVINCI_EVM
175 if (is_on) 174 if (is_on)
176 is_on = 1; 175 is_on = 1;
177 176
178 if (vbus_state == is_on) 177 if (vbus_state == is_on)
179 return; 178 return;
180 vbus_state = !is_on; /* 0/1 vs "-1 == unknown/init" */ 179 vbus_state = !is_on; /* 0/1 vs "-1 == unknown/init" */
181 180
182 if (machine_is_davinci_evm()) { 181 if (machine_is_davinci_evm()) {
183 static DECLARE_WORK(evm_vbus_work, evm_deferred_drvvbus); 182 static DECLARE_WORK(evm_vbus_work, evm_deferred_drvvbus);
184 183
185 if (immediate) 184 if (immediate)
186 gpio_set_value_cansleep(GPIO_nVBUS_DRV, vbus_state); 185 gpio_set_value_cansleep(GPIO_nVBUS_DRV, vbus_state);
187 else 186 else
188 schedule_work(&evm_vbus_work); 187 schedule_work(&evm_vbus_work);
189 } 188 }
190 if (immediate) 189 if (immediate)
191 vbus_state = is_on; 190 vbus_state = is_on;
192 #endif 191 #endif
193 } 192 }
194 193
195 static void davinci_musb_set_vbus(struct musb *musb, int is_on) 194 static void davinci_musb_set_vbus(struct musb *musb, int is_on)
196 { 195 {
197 WARN_ON(is_on && is_peripheral_active(musb)); 196 WARN_ON(is_on && is_peripheral_active(musb));
198 davinci_musb_source_power(musb, is_on, 0); 197 davinci_musb_source_power(musb, is_on, 0);
199 } 198 }
200 199
201 200
202 #define POLL_SECONDS 2 201 #define POLL_SECONDS 2
203 202
204 static struct timer_list otg_workaround; 203 static struct timer_list otg_workaround;
205 204
206 static void otg_timer(unsigned long _musb) 205 static void otg_timer(unsigned long _musb)
207 { 206 {
208 struct musb *musb = (void *)_musb; 207 struct musb *musb = (void *)_musb;
209 void __iomem *mregs = musb->mregs; 208 void __iomem *mregs = musb->mregs;
210 u8 devctl; 209 u8 devctl;
211 unsigned long flags; 210 unsigned long flags;
212 211
213 /* We poll because DaVinci's won't expose several OTG-critical 212 /* We poll because DaVinci's won't expose several OTG-critical
214 * status change events (from the transceiver) otherwise. 213 * status change events (from the transceiver) otherwise.
215 */ 214 */
216 devctl = musb_readb(mregs, MUSB_DEVCTL); 215 devctl = musb_readb(mregs, MUSB_DEVCTL);
217 dev_dbg(musb->controller, "poll devctl %02x (%s)\n", devctl, 216 dev_dbg(musb->controller, "poll devctl %02x (%s)\n", devctl,
218 otg_state_string(musb->xceiv->state)); 217 otg_state_string(musb->xceiv->state));
219 218
220 spin_lock_irqsave(&musb->lock, flags); 219 spin_lock_irqsave(&musb->lock, flags);
221 switch (musb->xceiv->state) { 220 switch (musb->xceiv->state) {
222 case OTG_STATE_A_WAIT_VFALL: 221 case OTG_STATE_A_WAIT_VFALL:
223 /* Wait till VBUS falls below SessionEnd (~0.2V); the 1.3 RTL 222 /* Wait till VBUS falls below SessionEnd (~0.2V); the 1.3 RTL
224 * seems to mis-handle session "start" otherwise (or in our 223 * seems to mis-handle session "start" otherwise (or in our
225 * case "recover"), in routine "VBUS was valid by the time 224 * case "recover"), in routine "VBUS was valid by the time
226 * VBUSERR got reported during enumeration" cases. 225 * VBUSERR got reported during enumeration" cases.
227 */ 226 */
228 if (devctl & MUSB_DEVCTL_VBUS) { 227 if (devctl & MUSB_DEVCTL_VBUS) {
229 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); 228 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
230 break; 229 break;
231 } 230 }
232 musb->xceiv->state = OTG_STATE_A_WAIT_VRISE; 231 musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
233 musb_writel(musb->ctrl_base, DAVINCI_USB_INT_SET_REG, 232 musb_writel(musb->ctrl_base, DAVINCI_USB_INT_SET_REG,
234 MUSB_INTR_VBUSERROR << DAVINCI_USB_USBINT_SHIFT); 233 MUSB_INTR_VBUSERROR << DAVINCI_USB_USBINT_SHIFT);
235 break; 234 break;
236 case OTG_STATE_B_IDLE: 235 case OTG_STATE_B_IDLE:
237 if (!is_peripheral_enabled(musb)) 236 /*
238 break; 237 * There's no ID-changed IRQ, so we have no good way to tell
239
240 /* There's no ID-changed IRQ, so we have no good way to tell
241 * when to switch to the A-Default state machine (by setting 238 * when to switch to the A-Default state machine (by setting
242 * the DEVCTL.SESSION flag). 239 * the DEVCTL.SESSION flag).
243 * 240 *
244 * Workaround: whenever we're in B_IDLE, try setting the 241 * Workaround: whenever we're in B_IDLE, try setting the
245 * session flag every few seconds. If it works, ID was 242 * session flag every few seconds. If it works, ID was
246 * grounded and we're now in the A-Default state machine. 243 * grounded and we're now in the A-Default state machine.
247 * 244 *
248 * NOTE setting the session flag is _supposed_ to trigger 245 * NOTE setting the session flag is _supposed_ to trigger
249 * SRP, but clearly it doesn't. 246 * SRP, but clearly it doesn't.
250 */ 247 */
251 musb_writeb(mregs, MUSB_DEVCTL, 248 musb_writeb(mregs, MUSB_DEVCTL,
252 devctl | MUSB_DEVCTL_SESSION); 249 devctl | MUSB_DEVCTL_SESSION);
253 devctl = musb_readb(mregs, MUSB_DEVCTL); 250 devctl = musb_readb(mregs, MUSB_DEVCTL);
254 if (devctl & MUSB_DEVCTL_BDEVICE) 251 if (devctl & MUSB_DEVCTL_BDEVICE)
255 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); 252 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
256 else 253 else
257 musb->xceiv->state = OTG_STATE_A_IDLE; 254 musb->xceiv->state = OTG_STATE_A_IDLE;
258 break; 255 break;
259 default: 256 default:
260 break; 257 break;
261 } 258 }
262 spin_unlock_irqrestore(&musb->lock, flags); 259 spin_unlock_irqrestore(&musb->lock, flags);
263 } 260 }
264 261
265 static irqreturn_t davinci_musb_interrupt(int irq, void *__hci) 262 static irqreturn_t davinci_musb_interrupt(int irq, void *__hci)
266 { 263 {
267 unsigned long flags; 264 unsigned long flags;
268 irqreturn_t retval = IRQ_NONE; 265 irqreturn_t retval = IRQ_NONE;
269 struct musb *musb = __hci; 266 struct musb *musb = __hci;
270 struct usb_otg *otg = musb->xceiv->otg; 267 struct usb_otg *otg = musb->xceiv->otg;
271 void __iomem *tibase = musb->ctrl_base; 268 void __iomem *tibase = musb->ctrl_base;
272 struct cppi *cppi; 269 struct cppi *cppi;
273 u32 tmp; 270 u32 tmp;
274 271
275 spin_lock_irqsave(&musb->lock, flags); 272 spin_lock_irqsave(&musb->lock, flags);
276 273
277 /* NOTE: DaVinci shadows the Mentor IRQs. Don't manage them through 274 /* NOTE: DaVinci shadows the Mentor IRQs. Don't manage them through
278 * the Mentor registers (except for setup), use the TI ones and EOI. 275 * the Mentor registers (except for setup), use the TI ones and EOI.
279 * 276 *
280 * Docs describe irq "vector" registers associated with the CPPI and 277 * Docs describe irq "vector" registers associated with the CPPI and
281 * USB EOI registers. These hold a bitmask corresponding to the 278 * USB EOI registers. These hold a bitmask corresponding to the
282 * current IRQ, not an irq handler address. Would using those bits 279 * current IRQ, not an irq handler address. Would using those bits
283 * resolve some of the races observed in this dispatch code?? 280 * resolve some of the races observed in this dispatch code??
284 */ 281 */
285 282
286 /* CPPI interrupts share the same IRQ line, but have their own 283 /* CPPI interrupts share the same IRQ line, but have their own
287 * mask, state, "vector", and EOI registers. 284 * mask, state, "vector", and EOI registers.
288 */ 285 */
289 cppi = container_of(musb->dma_controller, struct cppi, controller); 286 cppi = container_of(musb->dma_controller, struct cppi, controller);
290 if (is_cppi_enabled() && musb->dma_controller && !cppi->irq) 287 if (is_cppi_enabled() && musb->dma_controller && !cppi->irq)
291 retval = cppi_interrupt(irq, __hci); 288 retval = cppi_interrupt(irq, __hci);
292 289
293 /* ack and handle non-CPPI interrupts */ 290 /* ack and handle non-CPPI interrupts */
294 tmp = musb_readl(tibase, DAVINCI_USB_INT_SRC_MASKED_REG); 291 tmp = musb_readl(tibase, DAVINCI_USB_INT_SRC_MASKED_REG);
295 musb_writel(tibase, DAVINCI_USB_INT_SRC_CLR_REG, tmp); 292 musb_writel(tibase, DAVINCI_USB_INT_SRC_CLR_REG, tmp);
296 dev_dbg(musb->controller, "IRQ %08x\n", tmp); 293 dev_dbg(musb->controller, "IRQ %08x\n", tmp);
297 294
298 musb->int_rx = (tmp & DAVINCI_USB_RXINT_MASK) 295 musb->int_rx = (tmp & DAVINCI_USB_RXINT_MASK)
299 >> DAVINCI_USB_RXINT_SHIFT; 296 >> DAVINCI_USB_RXINT_SHIFT;
300 musb->int_tx = (tmp & DAVINCI_USB_TXINT_MASK) 297 musb->int_tx = (tmp & DAVINCI_USB_TXINT_MASK)
301 >> DAVINCI_USB_TXINT_SHIFT; 298 >> DAVINCI_USB_TXINT_SHIFT;
302 musb->int_usb = (tmp & DAVINCI_USB_USBINT_MASK) 299 musb->int_usb = (tmp & DAVINCI_USB_USBINT_MASK)
303 >> DAVINCI_USB_USBINT_SHIFT; 300 >> DAVINCI_USB_USBINT_SHIFT;
304 301
305 /* DRVVBUS irqs are the only proxy we have (a very poor one!) for 302 /* DRVVBUS irqs are the only proxy we have (a very poor one!) for
306 * DaVinci's missing ID change IRQ. We need an ID change IRQ to 303 * DaVinci's missing ID change IRQ. We need an ID change IRQ to
307 * switch appropriately between halves of the OTG state machine. 304 * switch appropriately between halves of the OTG state machine.
308 * Managing DEVCTL.SESSION per Mentor docs requires we know its 305 * Managing DEVCTL.SESSION per Mentor docs requires we know its
309 * value, but DEVCTL.BDEVICE is invalid without DEVCTL.SESSION set. 306 * value, but DEVCTL.BDEVICE is invalid without DEVCTL.SESSION set.
310 * Also, DRVVBUS pulses for SRP (but not at 5V) ... 307 * Also, DRVVBUS pulses for SRP (but not at 5V) ...
311 */ 308 */
312 if (tmp & (DAVINCI_INTR_DRVVBUS << DAVINCI_USB_USBINT_SHIFT)) { 309 if (tmp & (DAVINCI_INTR_DRVVBUS << DAVINCI_USB_USBINT_SHIFT)) {
313 int drvvbus = musb_readl(tibase, DAVINCI_USB_STAT_REG); 310 int drvvbus = musb_readl(tibase, DAVINCI_USB_STAT_REG);
314 void __iomem *mregs = musb->mregs; 311 void __iomem *mregs = musb->mregs;
315 u8 devctl = musb_readb(mregs, MUSB_DEVCTL); 312 u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
316 int err = musb->int_usb & MUSB_INTR_VBUSERROR; 313 int err = musb->int_usb & MUSB_INTR_VBUSERROR;
317 314
318 err = is_host_enabled(musb) 315 err = musb->int_usb & MUSB_INTR_VBUSERROR;
319 && (musb->int_usb & MUSB_INTR_VBUSERROR);
320 if (err) { 316 if (err) {
321 /* The Mentor core doesn't debounce VBUS as needed 317 /* The Mentor core doesn't debounce VBUS as needed
322 * to cope with device connect current spikes. This 318 * to cope with device connect current spikes. This
323 * means it's not uncommon for bus-powered devices 319 * means it's not uncommon for bus-powered devices
324 * to get VBUS errors during enumeration. 320 * to get VBUS errors during enumeration.
325 * 321 *
326 * This is a workaround, but newer RTL from Mentor 322 * This is a workaround, but newer RTL from Mentor
327 * seems to allow a better one: "re"starting sessions 323 * seems to allow a better one: "re"starting sessions
328 * without waiting (on EVM, a **long** time) for VBUS 324 * without waiting (on EVM, a **long** time) for VBUS
329 * to stop registering in devctl. 325 * to stop registering in devctl.
330 */ 326 */
331 musb->int_usb &= ~MUSB_INTR_VBUSERROR; 327 musb->int_usb &= ~MUSB_INTR_VBUSERROR;
332 musb->xceiv->state = OTG_STATE_A_WAIT_VFALL; 328 musb->xceiv->state = OTG_STATE_A_WAIT_VFALL;
333 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); 329 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
334 WARNING("VBUS error workaround (delay coming)\n"); 330 WARNING("VBUS error workaround (delay coming)\n");
335 } else if (is_host_enabled(musb) && drvvbus) { 331 } else if (drvvbus) {
336 MUSB_HST_MODE(musb); 332 MUSB_HST_MODE(musb);
337 otg->default_a = 1; 333 otg->default_a = 1;
338 musb->xceiv->state = OTG_STATE_A_WAIT_VRISE; 334 musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
339 portstate(musb->port1_status |= USB_PORT_STAT_POWER); 335 portstate(musb->port1_status |= USB_PORT_STAT_POWER);
340 del_timer(&otg_workaround); 336 del_timer(&otg_workaround);
341 } else { 337 } else {
342 musb->is_active = 0; 338 musb->is_active = 0;
343 MUSB_DEV_MODE(musb); 339 MUSB_DEV_MODE(musb);
344 otg->default_a = 0; 340 otg->default_a = 0;
345 musb->xceiv->state = OTG_STATE_B_IDLE; 341 musb->xceiv->state = OTG_STATE_B_IDLE;
346 portstate(musb->port1_status &= ~USB_PORT_STAT_POWER); 342 portstate(musb->port1_status &= ~USB_PORT_STAT_POWER);
347 } 343 }
348 344
349 /* NOTE: this must complete poweron within 100 msec 345 /* NOTE: this must complete poweron within 100 msec
350 * (OTG_TIME_A_WAIT_VRISE) but we don't check for that. 346 * (OTG_TIME_A_WAIT_VRISE) but we don't check for that.
351 */ 347 */
352 davinci_musb_source_power(musb, drvvbus, 0); 348 davinci_musb_source_power(musb, drvvbus, 0);
353 dev_dbg(musb->controller, "VBUS %s (%s)%s, devctl %02x\n", 349 dev_dbg(musb->controller, "VBUS %s (%s)%s, devctl %02x\n",
354 drvvbus ? "on" : "off", 350 drvvbus ? "on" : "off",
355 otg_state_string(musb->xceiv->state), 351 otg_state_string(musb->xceiv->state),
356 err ? " ERROR" : "", 352 err ? " ERROR" : "",
357 devctl); 353 devctl);
358 retval = IRQ_HANDLED; 354 retval = IRQ_HANDLED;
359 } 355 }
360 356
361 if (musb->int_tx || musb->int_rx || musb->int_usb) 357 if (musb->int_tx || musb->int_rx || musb->int_usb)
362 retval |= musb_interrupt(musb); 358 retval |= musb_interrupt(musb);
363 359
364 /* irq stays asserted until EOI is written */ 360 /* irq stays asserted until EOI is written */
365 musb_writel(tibase, DAVINCI_USB_EOI_REG, 0); 361 musb_writel(tibase, DAVINCI_USB_EOI_REG, 0);
366 362
367 /* poll for ID change */ 363 /* poll for ID change */
368 if (is_otg_enabled(musb) 364 if (musb->xceiv->state == OTG_STATE_B_IDLE)
369 && musb->xceiv->state == OTG_STATE_B_IDLE)
370 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ); 365 mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
371 366
372 spin_unlock_irqrestore(&musb->lock, flags); 367 spin_unlock_irqrestore(&musb->lock, flags);
373 368
374 return retval; 369 return retval;
375 } 370 }
376 371
377 static int davinci_musb_set_mode(struct musb *musb, u8 mode) 372 static int davinci_musb_set_mode(struct musb *musb, u8 mode)
378 { 373 {
379 /* EVM can't do this (right?) */ 374 /* EVM can't do this (right?) */
380 return -EIO; 375 return -EIO;
381 } 376 }
382 377
383 static int davinci_musb_init(struct musb *musb) 378 static int davinci_musb_init(struct musb *musb)
384 { 379 {
385 void __iomem *tibase = musb->ctrl_base; 380 void __iomem *tibase = musb->ctrl_base;
386 u32 revision; 381 u32 revision;
387 382
388 usb_nop_xceiv_register(); 383 usb_nop_xceiv_register();
389 musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2); 384 musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
390 if (IS_ERR_OR_NULL(musb->xceiv)) 385 if (IS_ERR_OR_NULL(musb->xceiv))
391 goto unregister; 386 goto unregister;
392 387
393 musb->mregs += DAVINCI_BASE_OFFSET; 388 musb->mregs += DAVINCI_BASE_OFFSET;
394 389
395 /* returns zero if e.g. not clocked */ 390 /* returns zero if e.g. not clocked */
396 revision = musb_readl(tibase, DAVINCI_USB_VERSION_REG); 391 revision = musb_readl(tibase, DAVINCI_USB_VERSION_REG);
397 if (revision == 0) 392 if (revision == 0)
398 goto fail; 393 goto fail;
399 394
400 if (is_host_enabled(musb)) 395 setup_timer(&otg_workaround, otg_timer, (unsigned long) musb);
401 setup_timer(&otg_workaround, otg_timer, (unsigned long) musb);
402 396
403 davinci_musb_source_power(musb, 0, 1); 397 davinci_musb_source_power(musb, 0, 1);
404 398
405 /* dm355 EVM swaps D+/D- for signal integrity, and 399 /* dm355 EVM swaps D+/D- for signal integrity, and
406 * is clocked from the main 24 MHz crystal. 400 * is clocked from the main 24 MHz crystal.
407 */ 401 */
408 if (machine_is_davinci_dm355_evm()) { 402 if (machine_is_davinci_dm355_evm()) {
409 u32 phy_ctrl = __raw_readl(USB_PHY_CTRL); 403 u32 phy_ctrl = __raw_readl(USB_PHY_CTRL);
410 404
411 phy_ctrl &= ~(3 << 9); 405 phy_ctrl &= ~(3 << 9);
412 phy_ctrl |= USBPHY_DATAPOL; 406 phy_ctrl |= USBPHY_DATAPOL;
413 __raw_writel(phy_ctrl, USB_PHY_CTRL); 407 __raw_writel(phy_ctrl, USB_PHY_CTRL);
414 } 408 }
415 409
416 /* On dm355, the default-A state machine needs DRVVBUS control. 410 /* On dm355, the default-A state machine needs DRVVBUS control.
417 * If we won't be a host, there's no need to turn it on. 411 * If we won't be a host, there's no need to turn it on.
418 */ 412 */
419 if (cpu_is_davinci_dm355()) { 413 if (cpu_is_davinci_dm355()) {
420 u32 deepsleep = __raw_readl(DM355_DEEPSLEEP); 414 u32 deepsleep = __raw_readl(DM355_DEEPSLEEP);
421 415
422 if (is_host_enabled(musb)) { 416 deepsleep &= ~DRVVBUS_FORCE;
423 deepsleep &= ~DRVVBUS_OVERRIDE;
424 } else {
425 deepsleep &= ~DRVVBUS_FORCE;
426 deepsleep |= DRVVBUS_OVERRIDE;
427 }
428 __raw_writel(deepsleep, DM355_DEEPSLEEP); 417 __raw_writel(deepsleep, DM355_DEEPSLEEP);
429 } 418 }
430 419
431 /* reset the controller */ 420 /* reset the controller */
432 musb_writel(tibase, DAVINCI_USB_CTRL_REG, 0x1); 421 musb_writel(tibase, DAVINCI_USB_CTRL_REG, 0x1);
433 422
434 /* start the on-chip PHY and its PLL */ 423 /* start the on-chip PHY and its PLL */
435 phy_on(); 424 phy_on();
436 425
437 msleep(5); 426 msleep(5);
438 427
439 /* NOTE: irqs are in mixed mode, not bypass to pure-musb */ 428 /* NOTE: irqs are in mixed mode, not bypass to pure-musb */
440 pr_debug("DaVinci OTG revision %08x phy %03x control %02x\n", 429 pr_debug("DaVinci OTG revision %08x phy %03x control %02x\n",
441 revision, __raw_readl(USB_PHY_CTRL), 430 revision, __raw_readl(USB_PHY_CTRL),
442 musb_readb(tibase, DAVINCI_USB_CTRL_REG)); 431 musb_readb(tibase, DAVINCI_USB_CTRL_REG));
443 432
444 musb->isr = davinci_musb_interrupt; 433 musb->isr = davinci_musb_interrupt;
445 return 0; 434 return 0;
446 435
447 fail: 436 fail:
448 usb_put_phy(musb->xceiv); 437 usb_put_phy(musb->xceiv);
449 unregister: 438 unregister:
450 usb_nop_xceiv_unregister(); 439 usb_nop_xceiv_unregister();
451 return -ENODEV; 440 return -ENODEV;
452 } 441 }
453 442
454 static int davinci_musb_exit(struct musb *musb) 443 static int davinci_musb_exit(struct musb *musb)
455 { 444 {
456 if (is_host_enabled(musb)) 445 del_timer_sync(&otg_workaround);
457 del_timer_sync(&otg_workaround);
458 446
459 /* force VBUS off */ 447 /* force VBUS off */
460 if (cpu_is_davinci_dm355()) { 448 if (cpu_is_davinci_dm355()) {
461 u32 deepsleep = __raw_readl(DM355_DEEPSLEEP); 449 u32 deepsleep = __raw_readl(DM355_DEEPSLEEP);
462 450
463 deepsleep &= ~DRVVBUS_FORCE; 451 deepsleep &= ~DRVVBUS_FORCE;
464 deepsleep |= DRVVBUS_OVERRIDE; 452 deepsleep |= DRVVBUS_OVERRIDE;
465 __raw_writel(deepsleep, DM355_DEEPSLEEP); 453 __raw_writel(deepsleep, DM355_DEEPSLEEP);
466 } 454 }
467 455
468 davinci_musb_source_power(musb, 0 /*off*/, 1); 456 davinci_musb_source_power(musb, 0 /*off*/, 1);
469 457
470 /* delay, to avoid problems with module reload */ 458 /* delay, to avoid problems with module reload */
471 if (is_host_enabled(musb) && musb->xceiv->otg->default_a) { 459 if (musb->xceiv->otg->default_a) {
472 int maxdelay = 30; 460 int maxdelay = 30;
473 u8 devctl, warn = 0; 461 u8 devctl, warn = 0;
474 462
475 /* if there's no peripheral connected, this can take a 463 /* if there's no peripheral connected, this can take a
476 * long time to fall, especially on EVM with huge C133. 464 * long time to fall, especially on EVM with huge C133.
477 */ 465 */
478 do { 466 do {
479 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); 467 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
480 if (!(devctl & MUSB_DEVCTL_VBUS)) 468 if (!(devctl & MUSB_DEVCTL_VBUS))
481 break; 469 break;
482 if ((devctl & MUSB_DEVCTL_VBUS) != warn) { 470 if ((devctl & MUSB_DEVCTL_VBUS) != warn) {
483 warn = devctl & MUSB_DEVCTL_VBUS; 471 warn = devctl & MUSB_DEVCTL_VBUS;
484 dev_dbg(musb->controller, "VBUS %d\n", 472 dev_dbg(musb->controller, "VBUS %d\n",
485 warn >> MUSB_DEVCTL_VBUS_SHIFT); 473 warn >> MUSB_DEVCTL_VBUS_SHIFT);
486 } 474 }
487 msleep(1000); 475 msleep(1000);
488 maxdelay--; 476 maxdelay--;
489 } while (maxdelay > 0); 477 } while (maxdelay > 0);
490 478
491 /* in OTG mode, another host might be connected */ 479 /* in OTG mode, another host might be connected */
492 if (devctl & MUSB_DEVCTL_VBUS) 480 if (devctl & MUSB_DEVCTL_VBUS)
493 dev_dbg(musb->controller, "VBUS off timeout (devctl %02x)\n", devctl); 481 dev_dbg(musb->controller, "VBUS off timeout (devctl %02x)\n", devctl);
494 } 482 }
495 483
496 phy_off(); 484 phy_off();
497 485
498 usb_put_phy(musb->xceiv); 486 usb_put_phy(musb->xceiv);
499 usb_nop_xceiv_unregister(); 487 usb_nop_xceiv_unregister();
500 488
501 return 0; 489 return 0;
502 } 490 }
503 491
504 static const struct musb_platform_ops davinci_ops = { 492 static const struct musb_platform_ops davinci_ops = {
505 .init = davinci_musb_init, 493 .init = davinci_musb_init,
506 .exit = davinci_musb_exit, 494 .exit = davinci_musb_exit,
507 495
508 .enable = davinci_musb_enable, 496 .enable = davinci_musb_enable,
509 .disable = davinci_musb_disable, 497 .disable = davinci_musb_disable,
510 498
511 .set_mode = davinci_musb_set_mode, 499 .set_mode = davinci_musb_set_mode,
512 500
513 .set_vbus = davinci_musb_set_vbus, 501 .set_vbus = davinci_musb_set_vbus,
514 }; 502 };
515 503
516 static u64 davinci_dmamask = DMA_BIT_MASK(32); 504 static u64 davinci_dmamask = DMA_BIT_MASK(32);
517 505
518 static int __devinit davinci_probe(struct platform_device *pdev) 506 static int __devinit davinci_probe(struct platform_device *pdev)
519 { 507 {
520 struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data; 508 struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data;
521 struct platform_device *musb; 509 struct platform_device *musb;
522 struct davinci_glue *glue; 510 struct davinci_glue *glue;
523 struct clk *clk; 511 struct clk *clk;
524 512
525 int ret = -ENOMEM; 513 int ret = -ENOMEM;
526 514
527 glue = kzalloc(sizeof(*glue), GFP_KERNEL); 515 glue = kzalloc(sizeof(*glue), GFP_KERNEL);
528 if (!glue) { 516 if (!glue) {
529 dev_err(&pdev->dev, "failed to allocate glue context\n"); 517 dev_err(&pdev->dev, "failed to allocate glue context\n");
530 goto err0; 518 goto err0;
531 } 519 }
532 520
533 musb = platform_device_alloc("musb-hdrc", -1); 521 musb = platform_device_alloc("musb-hdrc", -1);
534 if (!musb) { 522 if (!musb) {
535 dev_err(&pdev->dev, "failed to allocate musb device\n"); 523 dev_err(&pdev->dev, "failed to allocate musb device\n");
536 goto err1; 524 goto err1;
537 } 525 }
538 526
539 clk = clk_get(&pdev->dev, "usb"); 527 clk = clk_get(&pdev->dev, "usb");
540 if (IS_ERR(clk)) { 528 if (IS_ERR(clk)) {
541 dev_err(&pdev->dev, "failed to get clock\n"); 529 dev_err(&pdev->dev, "failed to get clock\n");
542 ret = PTR_ERR(clk); 530 ret = PTR_ERR(clk);
543 goto err2; 531 goto err2;
544 } 532 }
545 533
546 ret = clk_enable(clk); 534 ret = clk_enable(clk);
547 if (ret) { 535 if (ret) {
548 dev_err(&pdev->dev, "failed to enable clock\n"); 536 dev_err(&pdev->dev, "failed to enable clock\n");
549 goto err3; 537 goto err3;
550 } 538 }
551 539
552 musb->dev.parent = &pdev->dev; 540 musb->dev.parent = &pdev->dev;
553 musb->dev.dma_mask = &davinci_dmamask; 541 musb->dev.dma_mask = &davinci_dmamask;
554 musb->dev.coherent_dma_mask = davinci_dmamask; 542 musb->dev.coherent_dma_mask = davinci_dmamask;
555 543
556 glue->dev = &pdev->dev; 544 glue->dev = &pdev->dev;
557 glue->musb = musb; 545 glue->musb = musb;
558 glue->clk = clk; 546 glue->clk = clk;
559 547
560 pdata->platform_ops = &davinci_ops; 548 pdata->platform_ops = &davinci_ops;
561 549
562 platform_set_drvdata(pdev, glue); 550 platform_set_drvdata(pdev, glue);
563 551
564 ret = platform_device_add_resources(musb, pdev->resource, 552 ret = platform_device_add_resources(musb, pdev->resource,
565 pdev->num_resources); 553 pdev->num_resources);
566 if (ret) { 554 if (ret) {
567 dev_err(&pdev->dev, "failed to add resources\n"); 555 dev_err(&pdev->dev, "failed to add resources\n");
568 goto err4; 556 goto err4;
569 } 557 }
570 558
571 ret = platform_device_add_data(musb, pdata, sizeof(*pdata)); 559 ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
572 if (ret) { 560 if (ret) {
573 dev_err(&pdev->dev, "failed to add platform_data\n"); 561 dev_err(&pdev->dev, "failed to add platform_data\n");
574 goto err4; 562 goto err4;
575 } 563 }
576 564
577 ret = platform_device_add(musb); 565 ret = platform_device_add(musb);
578 if (ret) { 566 if (ret) {
579 dev_err(&pdev->dev, "failed to register musb device\n"); 567 dev_err(&pdev->dev, "failed to register musb device\n");
580 goto err4; 568 goto err4;
581 } 569 }
582 570
583 return 0; 571 return 0;
584 572
585 err4: 573 err4:
586 clk_disable(clk); 574 clk_disable(clk);
587 575
588 err3: 576 err3:
589 clk_put(clk); 577 clk_put(clk);
590 578
591 err2: 579 err2:
592 platform_device_put(musb); 580 platform_device_put(musb);
593 581
594 err1: 582 err1:
595 kfree(glue); 583 kfree(glue);
596 584
597 err0: 585 err0:
598 return ret; 586 return ret;
599 } 587 }
600 588
601 static int __devexit davinci_remove(struct platform_device *pdev) 589 static int __devexit davinci_remove(struct platform_device *pdev)
602 { 590 {
603 struct davinci_glue *glue = platform_get_drvdata(pdev); 591 struct davinci_glue *glue = platform_get_drvdata(pdev);
604 592
605 platform_device_del(glue->musb); 593 platform_device_del(glue->musb);
606 platform_device_put(glue->musb); 594 platform_device_put(glue->musb);
607 clk_disable(glue->clk); 595 clk_disable(glue->clk);
608 clk_put(glue->clk); 596 clk_put(glue->clk);
609 kfree(glue); 597 kfree(glue);
610 598
611 return 0; 599 return 0;
612 } 600 }
613 601
614 static struct platform_driver davinci_driver = { 602 static struct platform_driver davinci_driver = {
615 .probe = davinci_probe, 603 .probe = davinci_probe,
616 .remove = __devexit_p(davinci_remove), 604 .remove = __devexit_p(davinci_remove),
617 .driver = { 605 .driver = {
618 .name = "musb-davinci", 606 .name = "musb-davinci",
619 }, 607 },
620 }; 608 };
621 609
622 MODULE_DESCRIPTION("DaVinci MUSB Glue Layer"); 610 MODULE_DESCRIPTION("DaVinci MUSB Glue Layer");
623 MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>"); 611 MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
624 MODULE_LICENSE("GPL v2"); 612 MODULE_LICENSE("GPL v2");
625 613
626 static int __init davinci_init(void) 614 static int __init davinci_init(void)
627 { 615 {
628 return platform_driver_register(&davinci_driver); 616 return platform_driver_register(&davinci_driver);
629 } 617 }
630 module_init(davinci_init); 618 module_init(davinci_init);
631 619
632 static void __exit davinci_exit(void) 620 static void __exit davinci_exit(void)
633 { 621 {
634 platform_driver_unregister(&davinci_driver); 622 platform_driver_unregister(&davinci_driver);
635 } 623 }
636 module_exit(davinci_exit); 624 module_exit(davinci_exit);
637 625
drivers/usb/musb/musb_core.c
1 /* 1 /*
2 * MUSB OTG driver core code 2 * MUSB OTG driver core code
3 * 3 *
4 * Copyright 2005 Mentor Graphics Corporation 4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments 5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation 6 * Copyright (C) 2006-2007 Nokia Corporation
7 * 7 *
8 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation. 10 * version 2 as published by the Free Software Foundation.
11 * 11 *
12 * This program is distributed in the hope that it will be useful, but 12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of 13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details. 15 * General Public License for more details.
16 * 16 *
17 * You should have received a copy of the GNU General Public License 17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software 18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA 20 * 02110-1301 USA
21 * 21 *
22 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED 22 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
25 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
29 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * 32 *
33 */ 33 */
34 34
35 /* 35 /*
36 * Inventra (Multipoint) Dual-Role Controller Driver for Linux. 36 * Inventra (Multipoint) Dual-Role Controller Driver for Linux.
37 * 37 *
38 * This consists of a Host Controller Driver (HCD) and a peripheral 38 * This consists of a Host Controller Driver (HCD) and a peripheral
39 * controller driver implementing the "Gadget" API; OTG support is 39 * controller driver implementing the "Gadget" API; OTG support is
40 * in the works. These are normal Linux-USB controller drivers which 40 * in the works. These are normal Linux-USB controller drivers which
41 * use IRQs and have no dedicated thread. 41 * use IRQs and have no dedicated thread.
42 * 42 *
43 * This version of the driver has only been used with products from 43 * This version of the driver has only been used with products from
44 * Texas Instruments. Those products integrate the Inventra logic 44 * Texas Instruments. Those products integrate the Inventra logic
45 * with other DMA, IRQ, and bus modules, as well as other logic that 45 * with other DMA, IRQ, and bus modules, as well as other logic that
46 * needs to be reflected in this driver. 46 * needs to be reflected in this driver.
47 * 47 *
48 * 48 *
49 * NOTE: the original Mentor code here was pretty much a collection 49 * NOTE: the original Mentor code here was pretty much a collection
50 * of mechanisms that don't seem to have been fully integrated/working 50 * of mechanisms that don't seem to have been fully integrated/working
51 * for *any* Linux kernel version. This version aims at Linux 2.6.now, 51 * for *any* Linux kernel version. This version aims at Linux 2.6.now,
52 * Key open issues include: 52 * Key open issues include:
53 * 53 *
54 * - Lack of host-side transaction scheduling, for all transfer types. 54 * - Lack of host-side transaction scheduling, for all transfer types.
55 * The hardware doesn't do it; instead, software must. 55 * The hardware doesn't do it; instead, software must.
56 * 56 *
57 * This is not an issue for OTG devices that don't support external 57 * This is not an issue for OTG devices that don't support external
58 * hubs, but for more "normal" USB hosts it's a user issue that the 58 * hubs, but for more "normal" USB hosts it's a user issue that the
59 * "multipoint" support doesn't scale in the expected ways. That 59 * "multipoint" support doesn't scale in the expected ways. That
60 * includes DaVinci EVM in a common non-OTG mode. 60 * includes DaVinci EVM in a common non-OTG mode.
61 * 61 *
62 * * Control and bulk use dedicated endpoints, and there's as 62 * * Control and bulk use dedicated endpoints, and there's as
63 * yet no mechanism to either (a) reclaim the hardware when 63 * yet no mechanism to either (a) reclaim the hardware when
64 * peripherals are NAKing, which gets complicated with bulk 64 * peripherals are NAKing, which gets complicated with bulk
65 * endpoints, or (b) use more than a single bulk endpoint in 65 * endpoints, or (b) use more than a single bulk endpoint in
66 * each direction. 66 * each direction.
67 * 67 *
68 * RESULT: one device may be perceived as blocking another one. 68 * RESULT: one device may be perceived as blocking another one.
69 * 69 *
70 * * Interrupt and isochronous will dynamically allocate endpoint 70 * * Interrupt and isochronous will dynamically allocate endpoint
71 * hardware, but (a) there's no record keeping for bandwidth; 71 * hardware, but (a) there's no record keeping for bandwidth;
72 * (b) in the common case that few endpoints are available, there 72 * (b) in the common case that few endpoints are available, there
73 * is no mechanism to reuse endpoints to talk to multiple devices. 73 * is no mechanism to reuse endpoints to talk to multiple devices.
74 * 74 *
75 * RESULT: At one extreme, bandwidth can be overcommitted in 75 * RESULT: At one extreme, bandwidth can be overcommitted in
76 * some hardware configurations, no faults will be reported. 76 * some hardware configurations, no faults will be reported.
77 * At the other extreme, the bandwidth capabilities which do 77 * At the other extreme, the bandwidth capabilities which do
78 * exist tend to be severely undercommitted. You can't yet hook 78 * exist tend to be severely undercommitted. You can't yet hook
79 * up both a keyboard and a mouse to an external USB hub. 79 * up both a keyboard and a mouse to an external USB hub.
80 */ 80 */
81 81
82 /* 82 /*
83 * This gets many kinds of configuration information: 83 * This gets many kinds of configuration information:
84 * - Kconfig for everything user-configurable 84 * - Kconfig for everything user-configurable
85 * - platform_device for addressing, irq, and platform_data 85 * - platform_device for addressing, irq, and platform_data
86 * - platform_data is mostly for board-specific informarion 86 * - platform_data is mostly for board-specific informarion
87 * (plus recentrly, SOC or family details) 87 * (plus recentrly, SOC or family details)
88 * 88 *
89 * Most of the conditional compilation will (someday) vanish. 89 * Most of the conditional compilation will (someday) vanish.
90 */ 90 */
91 91
92 #include <linux/module.h> 92 #include <linux/module.h>
93 #include <linux/kernel.h> 93 #include <linux/kernel.h>
94 #include <linux/sched.h> 94 #include <linux/sched.h>
95 #include <linux/slab.h> 95 #include <linux/slab.h>
96 #include <linux/init.h> 96 #include <linux/init.h>
97 #include <linux/list.h> 97 #include <linux/list.h>
98 #include <linux/kobject.h> 98 #include <linux/kobject.h>
99 #include <linux/prefetch.h> 99 #include <linux/prefetch.h>
100 #include <linux/platform_device.h> 100 #include <linux/platform_device.h>
101 #include <linux/io.h> 101 #include <linux/io.h>
102 102
103 #include "musb_core.h" 103 #include "musb_core.h"
104 104
105 #define TA_WAIT_BCON(m) max_t(int, (m)->a_wait_bcon, OTG_TIME_A_WAIT_BCON) 105 #define TA_WAIT_BCON(m) max_t(int, (m)->a_wait_bcon, OTG_TIME_A_WAIT_BCON)
106 106
107 107
108 #define DRIVER_AUTHOR "Mentor Graphics, Texas Instruments, Nokia" 108 #define DRIVER_AUTHOR "Mentor Graphics, Texas Instruments, Nokia"
109 #define DRIVER_DESC "Inventra Dual-Role USB Controller Driver" 109 #define DRIVER_DESC "Inventra Dual-Role USB Controller Driver"
110 110
111 #define MUSB_VERSION "6.0" 111 #define MUSB_VERSION "6.0"
112 112
113 #define DRIVER_INFO DRIVER_DESC ", v" MUSB_VERSION 113 #define DRIVER_INFO DRIVER_DESC ", v" MUSB_VERSION
114 114
115 #define MUSB_DRIVER_NAME "musb-hdrc" 115 #define MUSB_DRIVER_NAME "musb-hdrc"
116 const char musb_driver_name[] = MUSB_DRIVER_NAME; 116 const char musb_driver_name[] = MUSB_DRIVER_NAME;
117 117
118 MODULE_DESCRIPTION(DRIVER_INFO); 118 MODULE_DESCRIPTION(DRIVER_INFO);
119 MODULE_AUTHOR(DRIVER_AUTHOR); 119 MODULE_AUTHOR(DRIVER_AUTHOR);
120 MODULE_LICENSE("GPL"); 120 MODULE_LICENSE("GPL");
121 MODULE_ALIAS("platform:" MUSB_DRIVER_NAME); 121 MODULE_ALIAS("platform:" MUSB_DRIVER_NAME);
122 122
123 123
124 /*-------------------------------------------------------------------------*/ 124 /*-------------------------------------------------------------------------*/
125 125
126 static inline struct musb *dev_to_musb(struct device *dev) 126 static inline struct musb *dev_to_musb(struct device *dev)
127 { 127 {
128 return dev_get_drvdata(dev); 128 return dev_get_drvdata(dev);
129 } 129 }
130 130
131 /*-------------------------------------------------------------------------*/ 131 /*-------------------------------------------------------------------------*/
132 132
133 #ifndef CONFIG_BLACKFIN 133 #ifndef CONFIG_BLACKFIN
134 static int musb_ulpi_read(struct usb_phy *phy, u32 offset) 134 static int musb_ulpi_read(struct usb_phy *phy, u32 offset)
135 { 135 {
136 void __iomem *addr = phy->io_priv; 136 void __iomem *addr = phy->io_priv;
137 int i = 0; 137 int i = 0;
138 u8 r; 138 u8 r;
139 u8 power; 139 u8 power;
140 int ret; 140 int ret;
141 141
142 pm_runtime_get_sync(phy->io_dev); 142 pm_runtime_get_sync(phy->io_dev);
143 143
144 /* Make sure the transceiver is not in low power mode */ 144 /* Make sure the transceiver is not in low power mode */
145 power = musb_readb(addr, MUSB_POWER); 145 power = musb_readb(addr, MUSB_POWER);
146 power &= ~MUSB_POWER_SUSPENDM; 146 power &= ~MUSB_POWER_SUSPENDM;
147 musb_writeb(addr, MUSB_POWER, power); 147 musb_writeb(addr, MUSB_POWER, power);
148 148
149 /* REVISIT: musbhdrc_ulpi_an.pdf recommends setting the 149 /* REVISIT: musbhdrc_ulpi_an.pdf recommends setting the
150 * ULPICarKitControlDisableUTMI after clearing POWER_SUSPENDM. 150 * ULPICarKitControlDisableUTMI after clearing POWER_SUSPENDM.
151 */ 151 */
152 152
153 musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)offset); 153 musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)offset);
154 musb_writeb(addr, MUSB_ULPI_REG_CONTROL, 154 musb_writeb(addr, MUSB_ULPI_REG_CONTROL,
155 MUSB_ULPI_REG_REQ | MUSB_ULPI_RDN_WR); 155 MUSB_ULPI_REG_REQ | MUSB_ULPI_RDN_WR);
156 156
157 while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL) 157 while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL)
158 & MUSB_ULPI_REG_CMPLT)) { 158 & MUSB_ULPI_REG_CMPLT)) {
159 i++; 159 i++;
160 if (i == 10000) { 160 if (i == 10000) {
161 ret = -ETIMEDOUT; 161 ret = -ETIMEDOUT;
162 goto out; 162 goto out;
163 } 163 }
164 164
165 } 165 }
166 r = musb_readb(addr, MUSB_ULPI_REG_CONTROL); 166 r = musb_readb(addr, MUSB_ULPI_REG_CONTROL);
167 r &= ~MUSB_ULPI_REG_CMPLT; 167 r &= ~MUSB_ULPI_REG_CMPLT;
168 musb_writeb(addr, MUSB_ULPI_REG_CONTROL, r); 168 musb_writeb(addr, MUSB_ULPI_REG_CONTROL, r);
169 169
170 ret = musb_readb(addr, MUSB_ULPI_REG_DATA); 170 ret = musb_readb(addr, MUSB_ULPI_REG_DATA);
171 171
172 out: 172 out:
173 pm_runtime_put(phy->io_dev); 173 pm_runtime_put(phy->io_dev);
174 174
175 return ret; 175 return ret;
176 } 176 }
177 177
178 static int musb_ulpi_write(struct usb_phy *phy, u32 offset, u32 data) 178 static int musb_ulpi_write(struct usb_phy *phy, u32 offset, u32 data)
179 { 179 {
180 void __iomem *addr = phy->io_priv; 180 void __iomem *addr = phy->io_priv;
181 int i = 0; 181 int i = 0;
182 u8 r = 0; 182 u8 r = 0;
183 u8 power; 183 u8 power;
184 int ret = 0; 184 int ret = 0;
185 185
186 pm_runtime_get_sync(phy->io_dev); 186 pm_runtime_get_sync(phy->io_dev);
187 187
188 /* Make sure the transceiver is not in low power mode */ 188 /* Make sure the transceiver is not in low power mode */
189 power = musb_readb(addr, MUSB_POWER); 189 power = musb_readb(addr, MUSB_POWER);
190 power &= ~MUSB_POWER_SUSPENDM; 190 power &= ~MUSB_POWER_SUSPENDM;
191 musb_writeb(addr, MUSB_POWER, power); 191 musb_writeb(addr, MUSB_POWER, power);
192 192
193 musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)offset); 193 musb_writeb(addr, MUSB_ULPI_REG_ADDR, (u8)offset);
194 musb_writeb(addr, MUSB_ULPI_REG_DATA, (u8)data); 194 musb_writeb(addr, MUSB_ULPI_REG_DATA, (u8)data);
195 musb_writeb(addr, MUSB_ULPI_REG_CONTROL, MUSB_ULPI_REG_REQ); 195 musb_writeb(addr, MUSB_ULPI_REG_CONTROL, MUSB_ULPI_REG_REQ);
196 196
197 while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL) 197 while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL)
198 & MUSB_ULPI_REG_CMPLT)) { 198 & MUSB_ULPI_REG_CMPLT)) {
199 i++; 199 i++;
200 if (i == 10000) { 200 if (i == 10000) {
201 ret = -ETIMEDOUT; 201 ret = -ETIMEDOUT;
202 goto out; 202 goto out;
203 } 203 }
204 } 204 }
205 205
206 r = musb_readb(addr, MUSB_ULPI_REG_CONTROL); 206 r = musb_readb(addr, MUSB_ULPI_REG_CONTROL);
207 r &= ~MUSB_ULPI_REG_CMPLT; 207 r &= ~MUSB_ULPI_REG_CMPLT;
208 musb_writeb(addr, MUSB_ULPI_REG_CONTROL, r); 208 musb_writeb(addr, MUSB_ULPI_REG_CONTROL, r);
209 209
210 out: 210 out:
211 pm_runtime_put(phy->io_dev); 211 pm_runtime_put(phy->io_dev);
212 212
213 return ret; 213 return ret;
214 } 214 }
215 #else 215 #else
216 #define musb_ulpi_read NULL 216 #define musb_ulpi_read NULL
217 #define musb_ulpi_write NULL 217 #define musb_ulpi_write NULL
218 #endif 218 #endif
219 219
220 static struct usb_phy_io_ops musb_ulpi_access = { 220 static struct usb_phy_io_ops musb_ulpi_access = {
221 .read = musb_ulpi_read, 221 .read = musb_ulpi_read,
222 .write = musb_ulpi_write, 222 .write = musb_ulpi_write,
223 }; 223 };
224 224
225 /*-------------------------------------------------------------------------*/ 225 /*-------------------------------------------------------------------------*/
226 226
227 #if !defined(CONFIG_USB_MUSB_TUSB6010) && !defined(CONFIG_USB_MUSB_BLACKFIN) 227 #if !defined(CONFIG_USB_MUSB_TUSB6010) && !defined(CONFIG_USB_MUSB_BLACKFIN)
228 228
229 /* 229 /*
230 * Load an endpoint's FIFO 230 * Load an endpoint's FIFO
231 */ 231 */
232 void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src) 232 void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
233 { 233 {
234 struct musb *musb = hw_ep->musb; 234 struct musb *musb = hw_ep->musb;
235 void __iomem *fifo = hw_ep->fifo; 235 void __iomem *fifo = hw_ep->fifo;
236 236
237 if (unlikely(len == 0)) 237 if (unlikely(len == 0))
238 return; 238 return;
239 239
240 prefetch((u8 *)src); 240 prefetch((u8 *)src);
241 241
242 dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n", 242 dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n",
243 'T', hw_ep->epnum, fifo, len, src); 243 'T', hw_ep->epnum, fifo, len, src);
244 244
245 /* we can't assume unaligned reads work */ 245 /* we can't assume unaligned reads work */
246 if (likely((0x01 & (unsigned long) src) == 0)) { 246 if (likely((0x01 & (unsigned long) src) == 0)) {
247 u16 index = 0; 247 u16 index = 0;
248 248
249 /* best case is 32bit-aligned source address */ 249 /* best case is 32bit-aligned source address */
250 if ((0x02 & (unsigned long) src) == 0) { 250 if ((0x02 & (unsigned long) src) == 0) {
251 if (len >= 4) { 251 if (len >= 4) {
252 writesl(fifo, src + index, len >> 2); 252 writesl(fifo, src + index, len >> 2);
253 index += len & ~0x03; 253 index += len & ~0x03;
254 } 254 }
255 if (len & 0x02) { 255 if (len & 0x02) {
256 musb_writew(fifo, 0, *(u16 *)&src[index]); 256 musb_writew(fifo, 0, *(u16 *)&src[index]);
257 index += 2; 257 index += 2;
258 } 258 }
259 } else { 259 } else {
260 if (len >= 2) { 260 if (len >= 2) {
261 writesw(fifo, src + index, len >> 1); 261 writesw(fifo, src + index, len >> 1);
262 index += len & ~0x01; 262 index += len & ~0x01;
263 } 263 }
264 } 264 }
265 if (len & 0x01) 265 if (len & 0x01)
266 musb_writeb(fifo, 0, src[index]); 266 musb_writeb(fifo, 0, src[index]);
267 } else { 267 } else {
268 /* byte aligned */ 268 /* byte aligned */
269 writesb(fifo, src, len); 269 writesb(fifo, src, len);
270 } 270 }
271 } 271 }
272 272
273 #if !defined(CONFIG_USB_MUSB_AM35X) 273 #if !defined(CONFIG_USB_MUSB_AM35X)
274 /* 274 /*
275 * Unload an endpoint's FIFO 275 * Unload an endpoint's FIFO
276 */ 276 */
277 void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst) 277 void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
278 { 278 {
279 struct musb *musb = hw_ep->musb; 279 struct musb *musb = hw_ep->musb;
280 void __iomem *fifo = hw_ep->fifo; 280 void __iomem *fifo = hw_ep->fifo;
281 281
282 if (unlikely(len == 0)) 282 if (unlikely(len == 0))
283 return; 283 return;
284 284
285 dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n", 285 dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n",
286 'R', hw_ep->epnum, fifo, len, dst); 286 'R', hw_ep->epnum, fifo, len, dst);
287 287
288 /* we can't assume unaligned writes work */ 288 /* we can't assume unaligned writes work */
289 if (likely((0x01 & (unsigned long) dst) == 0)) { 289 if (likely((0x01 & (unsigned long) dst) == 0)) {
290 u16 index = 0; 290 u16 index = 0;
291 291
292 /* best case is 32bit-aligned destination address */ 292 /* best case is 32bit-aligned destination address */
293 if ((0x02 & (unsigned long) dst) == 0) { 293 if ((0x02 & (unsigned long) dst) == 0) {
294 if (len >= 4) { 294 if (len >= 4) {
295 readsl(fifo, dst, len >> 2); 295 readsl(fifo, dst, len >> 2);
296 index = len & ~0x03; 296 index = len & ~0x03;
297 } 297 }
298 if (len & 0x02) { 298 if (len & 0x02) {
299 *(u16 *)&dst[index] = musb_readw(fifo, 0); 299 *(u16 *)&dst[index] = musb_readw(fifo, 0);
300 index += 2; 300 index += 2;
301 } 301 }
302 } else { 302 } else {
303 if (len >= 2) { 303 if (len >= 2) {
304 readsw(fifo, dst, len >> 1); 304 readsw(fifo, dst, len >> 1);
305 index = len & ~0x01; 305 index = len & ~0x01;
306 } 306 }
307 } 307 }
308 if (len & 0x01) 308 if (len & 0x01)
309 dst[index] = musb_readb(fifo, 0); 309 dst[index] = musb_readb(fifo, 0);
310 } else { 310 } else {
311 /* byte aligned */ 311 /* byte aligned */
312 readsb(fifo, dst, len); 312 readsb(fifo, dst, len);
313 } 313 }
314 } 314 }
315 #endif 315 #endif
316 316
317 #endif /* normal PIO */ 317 #endif /* normal PIO */
318 318
319 319
320 /*-------------------------------------------------------------------------*/ 320 /*-------------------------------------------------------------------------*/
321 321
322 /* for high speed test mode; see USB 2.0 spec 7.1.20 */ 322 /* for high speed test mode; see USB 2.0 spec 7.1.20 */
323 static const u8 musb_test_packet[53] = { 323 static const u8 musb_test_packet[53] = {
324 /* implicit SYNC then DATA0 to start */ 324 /* implicit SYNC then DATA0 to start */
325 325
326 /* JKJKJKJK x9 */ 326 /* JKJKJKJK x9 */
327 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 327 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
328 /* JJKKJJKK x8 */ 328 /* JJKKJJKK x8 */
329 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 329 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa,
330 /* JJJJKKKK x8 */ 330 /* JJJJKKKK x8 */
331 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 331 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee, 0xee,
332 /* JJJJJJJKKKKKKK x8 */ 332 /* JJJJJJJKKKKKKK x8 */
333 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 333 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
334 /* JJJJJJJK x8 */ 334 /* JJJJJJJK x8 */
335 0x7f, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, 335 0x7f, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd,
336 /* JKKKKKKK x10, JK */ 336 /* JKKKKKKK x10, JK */
337 0xfc, 0x7e, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, 0x7e 337 0xfc, 0x7e, 0xbf, 0xdf, 0xef, 0xf7, 0xfb, 0xfd, 0x7e
338 338
339 /* implicit CRC16 then EOP to end */ 339 /* implicit CRC16 then EOP to end */
340 }; 340 };
341 341
342 void musb_load_testpacket(struct musb *musb) 342 void musb_load_testpacket(struct musb *musb)
343 { 343 {
344 void __iomem *regs = musb->endpoints[0].regs; 344 void __iomem *regs = musb->endpoints[0].regs;
345 345
346 musb_ep_select(musb->mregs, 0); 346 musb_ep_select(musb->mregs, 0);
347 musb_write_fifo(musb->control_ep, 347 musb_write_fifo(musb->control_ep,
348 sizeof(musb_test_packet), musb_test_packet); 348 sizeof(musb_test_packet), musb_test_packet);
349 musb_writew(regs, MUSB_CSR0, MUSB_CSR0_TXPKTRDY); 349 musb_writew(regs, MUSB_CSR0, MUSB_CSR0_TXPKTRDY);
350 } 350 }
351 351
352 /*-------------------------------------------------------------------------*/ 352 /*-------------------------------------------------------------------------*/
353 353
354 /* 354 /*
355 * Handles OTG hnp timeouts, such as b_ase0_brst 355 * Handles OTG hnp timeouts, such as b_ase0_brst
356 */ 356 */
357 static void musb_otg_timer_func(unsigned long data) 357 static void musb_otg_timer_func(unsigned long data)
358 { 358 {
359 struct musb *musb = (struct musb *)data; 359 struct musb *musb = (struct musb *)data;
360 unsigned long flags; 360 unsigned long flags;
361 361
362 spin_lock_irqsave(&musb->lock, flags); 362 spin_lock_irqsave(&musb->lock, flags);
363 switch (musb->xceiv->state) { 363 switch (musb->xceiv->state) {
364 case OTG_STATE_B_WAIT_ACON: 364 case OTG_STATE_B_WAIT_ACON:
365 dev_dbg(musb->controller, "HNP: b_wait_acon timeout; back to b_peripheral\n"); 365 dev_dbg(musb->controller, "HNP: b_wait_acon timeout; back to b_peripheral\n");
366 musb_g_disconnect(musb); 366 musb_g_disconnect(musb);
367 musb->xceiv->state = OTG_STATE_B_PERIPHERAL; 367 musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
368 musb->is_active = 0; 368 musb->is_active = 0;
369 break; 369 break;
370 case OTG_STATE_A_SUSPEND: 370 case OTG_STATE_A_SUSPEND:
371 case OTG_STATE_A_WAIT_BCON: 371 case OTG_STATE_A_WAIT_BCON:
372 dev_dbg(musb->controller, "HNP: %s timeout\n", 372 dev_dbg(musb->controller, "HNP: %s timeout\n",
373 otg_state_string(musb->xceiv->state)); 373 otg_state_string(musb->xceiv->state));
374 musb_platform_set_vbus(musb, 0); 374 musb_platform_set_vbus(musb, 0);
375 musb->xceiv->state = OTG_STATE_A_WAIT_VFALL; 375 musb->xceiv->state = OTG_STATE_A_WAIT_VFALL;
376 break; 376 break;
377 default: 377 default:
378 dev_dbg(musb->controller, "HNP: Unhandled mode %s\n", 378 dev_dbg(musb->controller, "HNP: Unhandled mode %s\n",
379 otg_state_string(musb->xceiv->state)); 379 otg_state_string(musb->xceiv->state));
380 } 380 }
381 musb->ignore_disconnect = 0; 381 musb->ignore_disconnect = 0;
382 spin_unlock_irqrestore(&musb->lock, flags); 382 spin_unlock_irqrestore(&musb->lock, flags);
383 } 383 }
384 384
385 /* 385 /*
386 * Stops the HNP transition. Caller must take care of locking. 386 * Stops the HNP transition. Caller must take care of locking.
387 */ 387 */
388 void musb_hnp_stop(struct musb *musb) 388 void musb_hnp_stop(struct musb *musb)
389 { 389 {
390 struct usb_hcd *hcd = musb_to_hcd(musb); 390 struct usb_hcd *hcd = musb_to_hcd(musb);
391 void __iomem *mbase = musb->mregs; 391 void __iomem *mbase = musb->mregs;
392 u8 reg; 392 u8 reg;
393 393
394 dev_dbg(musb->controller, "HNP: stop from %s\n", otg_state_string(musb->xceiv->state)); 394 dev_dbg(musb->controller, "HNP: stop from %s\n", otg_state_string(musb->xceiv->state));
395 395
396 switch (musb->xceiv->state) { 396 switch (musb->xceiv->state) {
397 case OTG_STATE_A_PERIPHERAL: 397 case OTG_STATE_A_PERIPHERAL:
398 musb_g_disconnect(musb); 398 musb_g_disconnect(musb);
399 dev_dbg(musb->controller, "HNP: back to %s\n", 399 dev_dbg(musb->controller, "HNP: back to %s\n",
400 otg_state_string(musb->xceiv->state)); 400 otg_state_string(musb->xceiv->state));
401 break; 401 break;
402 case OTG_STATE_B_HOST: 402 case OTG_STATE_B_HOST:
403 dev_dbg(musb->controller, "HNP: Disabling HR\n"); 403 dev_dbg(musb->controller, "HNP: Disabling HR\n");
404 hcd->self.is_b_host = 0; 404 hcd->self.is_b_host = 0;
405 musb->xceiv->state = OTG_STATE_B_PERIPHERAL; 405 musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
406 MUSB_DEV_MODE(musb); 406 MUSB_DEV_MODE(musb);
407 reg = musb_readb(mbase, MUSB_POWER); 407 reg = musb_readb(mbase, MUSB_POWER);
408 reg |= MUSB_POWER_SUSPENDM; 408 reg |= MUSB_POWER_SUSPENDM;
409 musb_writeb(mbase, MUSB_POWER, reg); 409 musb_writeb(mbase, MUSB_POWER, reg);
410 /* REVISIT: Start SESSION_REQUEST here? */ 410 /* REVISIT: Start SESSION_REQUEST here? */
411 break; 411 break;
412 default: 412 default:
413 dev_dbg(musb->controller, "HNP: Stopping in unknown state %s\n", 413 dev_dbg(musb->controller, "HNP: Stopping in unknown state %s\n",
414 otg_state_string(musb->xceiv->state)); 414 otg_state_string(musb->xceiv->state));
415 } 415 }
416 416
417 /* 417 /*
418 * When returning to A state after HNP, avoid hub_port_rebounce(), 418 * When returning to A state after HNP, avoid hub_port_rebounce(),
419 * which cause occasional OPT A "Did not receive reset after connect" 419 * which cause occasional OPT A "Did not receive reset after connect"
420 * errors. 420 * errors.
421 */ 421 */
422 musb->port1_status &= ~(USB_PORT_STAT_C_CONNECTION << 16); 422 musb->port1_status &= ~(USB_PORT_STAT_C_CONNECTION << 16);
423 } 423 }
424 424
425 /* 425 /*
426 * Interrupt Service Routine to record USB "global" interrupts. 426 * Interrupt Service Routine to record USB "global" interrupts.
427 * Since these do not happen often and signify things of 427 * Since these do not happen often and signify things of
428 * paramount importance, it seems OK to check them individually; 428 * paramount importance, it seems OK to check them individually;
429 * the order of the tests is specified in the manual 429 * the order of the tests is specified in the manual
430 * 430 *
431 * @param musb instance pointer 431 * @param musb instance pointer
432 * @param int_usb register contents 432 * @param int_usb register contents
433 * @param devctl 433 * @param devctl
434 * @param power 434 * @param power
435 */ 435 */
436 436
437 static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb, 437 static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
438 u8 devctl, u8 power) 438 u8 devctl, u8 power)
439 { 439 {
440 struct usb_otg *otg = musb->xceiv->otg; 440 struct usb_otg *otg = musb->xceiv->otg;
441 irqreturn_t handled = IRQ_NONE; 441 irqreturn_t handled = IRQ_NONE;
442 442
443 dev_dbg(musb->controller, "<== Power=%02x, DevCtl=%02x, int_usb=0x%x\n", power, devctl, 443 dev_dbg(musb->controller, "<== Power=%02x, DevCtl=%02x, int_usb=0x%x\n", power, devctl,
444 int_usb); 444 int_usb);
445 445
446 /* in host mode, the peripheral may issue remote wakeup. 446 /* in host mode, the peripheral may issue remote wakeup.
447 * in peripheral mode, the host may resume the link. 447 * in peripheral mode, the host may resume the link.
448 * spurious RESUME irqs happen too, paired with SUSPEND. 448 * spurious RESUME irqs happen too, paired with SUSPEND.
449 */ 449 */
450 if (int_usb & MUSB_INTR_RESUME) { 450 if (int_usb & MUSB_INTR_RESUME) {
451 handled = IRQ_HANDLED; 451 handled = IRQ_HANDLED;
452 dev_dbg(musb->controller, "RESUME (%s)\n", otg_state_string(musb->xceiv->state)); 452 dev_dbg(musb->controller, "RESUME (%s)\n", otg_state_string(musb->xceiv->state));
453 453
454 if (devctl & MUSB_DEVCTL_HM) { 454 if (devctl & MUSB_DEVCTL_HM) {
455 void __iomem *mbase = musb->mregs; 455 void __iomem *mbase = musb->mregs;
456 456
457 switch (musb->xceiv->state) { 457 switch (musb->xceiv->state) {
458 case OTG_STATE_A_SUSPEND: 458 case OTG_STATE_A_SUSPEND:
459 /* remote wakeup? later, GetPortStatus 459 /* remote wakeup? later, GetPortStatus
460 * will stop RESUME signaling 460 * will stop RESUME signaling
461 */ 461 */
462 462
463 if (power & MUSB_POWER_SUSPENDM) { 463 if (power & MUSB_POWER_SUSPENDM) {
464 /* spurious */ 464 /* spurious */
465 musb->int_usb &= ~MUSB_INTR_SUSPEND; 465 musb->int_usb &= ~MUSB_INTR_SUSPEND;
466 dev_dbg(musb->controller, "Spurious SUSPENDM\n"); 466 dev_dbg(musb->controller, "Spurious SUSPENDM\n");
467 break; 467 break;
468 } 468 }
469 469
470 power &= ~MUSB_POWER_SUSPENDM; 470 power &= ~MUSB_POWER_SUSPENDM;
471 musb_writeb(mbase, MUSB_POWER, 471 musb_writeb(mbase, MUSB_POWER,
472 power | MUSB_POWER_RESUME); 472 power | MUSB_POWER_RESUME);
473 473
474 musb->port1_status |= 474 musb->port1_status |=
475 (USB_PORT_STAT_C_SUSPEND << 16) 475 (USB_PORT_STAT_C_SUSPEND << 16)
476 | MUSB_PORT_STAT_RESUME; 476 | MUSB_PORT_STAT_RESUME;
477 musb->rh_timer = jiffies 477 musb->rh_timer = jiffies
478 + msecs_to_jiffies(20); 478 + msecs_to_jiffies(20);
479 479
480 musb->xceiv->state = OTG_STATE_A_HOST; 480 musb->xceiv->state = OTG_STATE_A_HOST;
481 musb->is_active = 1; 481 musb->is_active = 1;
482 usb_hcd_resume_root_hub(musb_to_hcd(musb)); 482 usb_hcd_resume_root_hub(musb_to_hcd(musb));
483 break; 483 break;
484 case OTG_STATE_B_WAIT_ACON: 484 case OTG_STATE_B_WAIT_ACON:
485 musb->xceiv->state = OTG_STATE_B_PERIPHERAL; 485 musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
486 musb->is_active = 1; 486 musb->is_active = 1;
487 MUSB_DEV_MODE(musb); 487 MUSB_DEV_MODE(musb);
488 break; 488 break;
489 default: 489 default:
490 WARNING("bogus %s RESUME (%s)\n", 490 WARNING("bogus %s RESUME (%s)\n",
491 "host", 491 "host",
492 otg_state_string(musb->xceiv->state)); 492 otg_state_string(musb->xceiv->state));
493 } 493 }
494 } else { 494 } else {
495 switch (musb->xceiv->state) { 495 switch (musb->xceiv->state) {
496 case OTG_STATE_A_SUSPEND: 496 case OTG_STATE_A_SUSPEND:
497 /* possibly DISCONNECT is upcoming */ 497 /* possibly DISCONNECT is upcoming */
498 musb->xceiv->state = OTG_STATE_A_HOST; 498 musb->xceiv->state = OTG_STATE_A_HOST;
499 usb_hcd_resume_root_hub(musb_to_hcd(musb)); 499 usb_hcd_resume_root_hub(musb_to_hcd(musb));
500 break; 500 break;
501 case OTG_STATE_B_WAIT_ACON: 501 case OTG_STATE_B_WAIT_ACON:
502 case OTG_STATE_B_PERIPHERAL: 502 case OTG_STATE_B_PERIPHERAL:
503 /* disconnect while suspended? we may 503 /* disconnect while suspended? we may
504 * not get a disconnect irq... 504 * not get a disconnect irq...
505 */ 505 */
506 if ((devctl & MUSB_DEVCTL_VBUS) 506 if ((devctl & MUSB_DEVCTL_VBUS)
507 != (3 << MUSB_DEVCTL_VBUS_SHIFT) 507 != (3 << MUSB_DEVCTL_VBUS_SHIFT)
508 ) { 508 ) {
509 musb->int_usb |= MUSB_INTR_DISCONNECT; 509 musb->int_usb |= MUSB_INTR_DISCONNECT;
510 musb->int_usb &= ~MUSB_INTR_SUSPEND; 510 musb->int_usb &= ~MUSB_INTR_SUSPEND;
511 break; 511 break;
512 } 512 }
513 musb_g_resume(musb); 513 musb_g_resume(musb);
514 break; 514 break;
515 case OTG_STATE_B_IDLE: 515 case OTG_STATE_B_IDLE:
516 musb->int_usb &= ~MUSB_INTR_SUSPEND; 516 musb->int_usb &= ~MUSB_INTR_SUSPEND;
517 break; 517 break;
518 default: 518 default:
519 WARNING("bogus %s RESUME (%s)\n", 519 WARNING("bogus %s RESUME (%s)\n",
520 "peripheral", 520 "peripheral",
521 otg_state_string(musb->xceiv->state)); 521 otg_state_string(musb->xceiv->state));
522 } 522 }
523 } 523 }
524 } 524 }
525 525
526 /* see manual for the order of the tests */ 526 /* see manual for the order of the tests */
527 if (int_usb & MUSB_INTR_SESSREQ) { 527 if (int_usb & MUSB_INTR_SESSREQ) {
528 void __iomem *mbase = musb->mregs; 528 void __iomem *mbase = musb->mregs;
529 529
530 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS 530 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS
531 && (devctl & MUSB_DEVCTL_BDEVICE)) { 531 && (devctl & MUSB_DEVCTL_BDEVICE)) {
532 dev_dbg(musb->controller, "SessReq while on B state\n"); 532 dev_dbg(musb->controller, "SessReq while on B state\n");
533 return IRQ_HANDLED; 533 return IRQ_HANDLED;
534 } 534 }
535 535
536 dev_dbg(musb->controller, "SESSION_REQUEST (%s)\n", 536 dev_dbg(musb->controller, "SESSION_REQUEST (%s)\n",
537 otg_state_string(musb->xceiv->state)); 537 otg_state_string(musb->xceiv->state));
538 538
539 /* IRQ arrives from ID pin sense or (later, if VBUS power 539 /* IRQ arrives from ID pin sense or (later, if VBUS power
540 * is removed) SRP. responses are time critical: 540 * is removed) SRP. responses are time critical:
541 * - turn on VBUS (with silicon-specific mechanism) 541 * - turn on VBUS (with silicon-specific mechanism)
542 * - go through A_WAIT_VRISE 542 * - go through A_WAIT_VRISE
543 * - ... to A_WAIT_BCON. 543 * - ... to A_WAIT_BCON.
544 * a_wait_vrise_tmout triggers VBUS_ERROR transitions 544 * a_wait_vrise_tmout triggers VBUS_ERROR transitions
545 */ 545 */
546 musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION); 546 musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
547 musb->ep0_stage = MUSB_EP0_START; 547 musb->ep0_stage = MUSB_EP0_START;
548 musb->xceiv->state = OTG_STATE_A_IDLE; 548 musb->xceiv->state = OTG_STATE_A_IDLE;
549 MUSB_HST_MODE(musb); 549 MUSB_HST_MODE(musb);
550 musb_platform_set_vbus(musb, 1); 550 musb_platform_set_vbus(musb, 1);
551 551
552 handled = IRQ_HANDLED; 552 handled = IRQ_HANDLED;
553 } 553 }
554 554
555 if (int_usb & MUSB_INTR_VBUSERROR) { 555 if (int_usb & MUSB_INTR_VBUSERROR) {
556 int ignore = 0; 556 int ignore = 0;
557 557
558 /* During connection as an A-Device, we may see a short 558 /* During connection as an A-Device, we may see a short
559 * current spikes causing voltage drop, because of cable 559 * current spikes causing voltage drop, because of cable
560 * and peripheral capacitance combined with vbus draw. 560 * and peripheral capacitance combined with vbus draw.
561 * (So: less common with truly self-powered devices, where 561 * (So: less common with truly self-powered devices, where
562 * vbus doesn't act like a power supply.) 562 * vbus doesn't act like a power supply.)
563 * 563 *
564 * Such spikes are short; usually less than ~500 usec, max 564 * Such spikes are short; usually less than ~500 usec, max
565 * of ~2 msec. That is, they're not sustained overcurrent 565 * of ~2 msec. That is, they're not sustained overcurrent
566 * errors, though they're reported using VBUSERROR irqs. 566 * errors, though they're reported using VBUSERROR irqs.
567 * 567 *
568 * Workarounds: (a) hardware: use self powered devices. 568 * Workarounds: (a) hardware: use self powered devices.
569 * (b) software: ignore non-repeated VBUS errors. 569 * (b) software: ignore non-repeated VBUS errors.
570 * 570 *
571 * REVISIT: do delays from lots of DEBUG_KERNEL checks 571 * REVISIT: do delays from lots of DEBUG_KERNEL checks
572 * make trouble here, keeping VBUS < 4.4V ? 572 * make trouble here, keeping VBUS < 4.4V ?
573 */ 573 */
574 switch (musb->xceiv->state) { 574 switch (musb->xceiv->state) {
575 case OTG_STATE_A_HOST: 575 case OTG_STATE_A_HOST:
576 /* recovery is dicey once we've gotten past the 576 /* recovery is dicey once we've gotten past the
577 * initial stages of enumeration, but if VBUS 577 * initial stages of enumeration, but if VBUS
578 * stayed ok at the other end of the link, and 578 * stayed ok at the other end of the link, and
579 * another reset is due (at least for high speed, 579 * another reset is due (at least for high speed,
580 * to redo the chirp etc), it might work OK... 580 * to redo the chirp etc), it might work OK...
581 */ 581 */
582 case OTG_STATE_A_WAIT_BCON: 582 case OTG_STATE_A_WAIT_BCON:
583 case OTG_STATE_A_WAIT_VRISE: 583 case OTG_STATE_A_WAIT_VRISE:
584 if (musb->vbuserr_retry) { 584 if (musb->vbuserr_retry) {
585 void __iomem *mbase = musb->mregs; 585 void __iomem *mbase = musb->mregs;
586 586
587 musb->vbuserr_retry--; 587 musb->vbuserr_retry--;
588 ignore = 1; 588 ignore = 1;
589 devctl |= MUSB_DEVCTL_SESSION; 589 devctl |= MUSB_DEVCTL_SESSION;
590 musb_writeb(mbase, MUSB_DEVCTL, devctl); 590 musb_writeb(mbase, MUSB_DEVCTL, devctl);
591 } else { 591 } else {
592 musb->port1_status |= 592 musb->port1_status |=
593 USB_PORT_STAT_OVERCURRENT 593 USB_PORT_STAT_OVERCURRENT
594 | (USB_PORT_STAT_C_OVERCURRENT << 16); 594 | (USB_PORT_STAT_C_OVERCURRENT << 16);
595 } 595 }
596 break; 596 break;
597 default: 597 default:
598 break; 598 break;
599 } 599 }
600 600
601 dev_dbg(musb->controller, "VBUS_ERROR in %s (%02x, %s), retry #%d, port1 %08x\n", 601 dev_dbg(musb->controller, "VBUS_ERROR in %s (%02x, %s), retry #%d, port1 %08x\n",
602 otg_state_string(musb->xceiv->state), 602 otg_state_string(musb->xceiv->state),
603 devctl, 603 devctl,
604 ({ char *s; 604 ({ char *s;
605 switch (devctl & MUSB_DEVCTL_VBUS) { 605 switch (devctl & MUSB_DEVCTL_VBUS) {
606 case 0 << MUSB_DEVCTL_VBUS_SHIFT: 606 case 0 << MUSB_DEVCTL_VBUS_SHIFT:
607 s = "<SessEnd"; break; 607 s = "<SessEnd"; break;
608 case 1 << MUSB_DEVCTL_VBUS_SHIFT: 608 case 1 << MUSB_DEVCTL_VBUS_SHIFT:
609 s = "<AValid"; break; 609 s = "<AValid"; break;
610 case 2 << MUSB_DEVCTL_VBUS_SHIFT: 610 case 2 << MUSB_DEVCTL_VBUS_SHIFT:
611 s = "<VBusValid"; break; 611 s = "<VBusValid"; break;
612 /* case 3 << MUSB_DEVCTL_VBUS_SHIFT: */ 612 /* case 3 << MUSB_DEVCTL_VBUS_SHIFT: */
613 default: 613 default:
614 s = "VALID"; break; 614 s = "VALID"; break;
615 }; s; }), 615 }; s; }),
616 VBUSERR_RETRY_COUNT - musb->vbuserr_retry, 616 VBUSERR_RETRY_COUNT - musb->vbuserr_retry,
617 musb->port1_status); 617 musb->port1_status);
618 618
619 /* go through A_WAIT_VFALL then start a new session */ 619 /* go through A_WAIT_VFALL then start a new session */
620 if (!ignore) 620 if (!ignore)
621 musb_platform_set_vbus(musb, 0); 621 musb_platform_set_vbus(musb, 0);
622 handled = IRQ_HANDLED; 622 handled = IRQ_HANDLED;
623 } 623 }
624 624
625 if (int_usb & MUSB_INTR_SUSPEND) { 625 if (int_usb & MUSB_INTR_SUSPEND) {
626 dev_dbg(musb->controller, "SUSPEND (%s) devctl %02x power %02x\n", 626 dev_dbg(musb->controller, "SUSPEND (%s) devctl %02x power %02x\n",
627 otg_state_string(musb->xceiv->state), devctl, power); 627 otg_state_string(musb->xceiv->state), devctl, power);
628 handled = IRQ_HANDLED; 628 handled = IRQ_HANDLED;
629 629
630 switch (musb->xceiv->state) { 630 switch (musb->xceiv->state) {
631 case OTG_STATE_A_PERIPHERAL: 631 case OTG_STATE_A_PERIPHERAL:
632 /* We also come here if the cable is removed, since 632 /* We also come here if the cable is removed, since
633 * this silicon doesn't report ID-no-longer-grounded. 633 * this silicon doesn't report ID-no-longer-grounded.
634 * 634 *
635 * We depend on T(a_wait_bcon) to shut us down, and 635 * We depend on T(a_wait_bcon) to shut us down, and
636 * hope users don't do anything dicey during this 636 * hope users don't do anything dicey during this
637 * undesired detour through A_WAIT_BCON. 637 * undesired detour through A_WAIT_BCON.
638 */ 638 */
639 musb_hnp_stop(musb); 639 musb_hnp_stop(musb);
640 usb_hcd_resume_root_hub(musb_to_hcd(musb)); 640 usb_hcd_resume_root_hub(musb_to_hcd(musb));
641 musb_root_disconnect(musb); 641 musb_root_disconnect(musb);
642 musb_platform_try_idle(musb, jiffies 642 musb_platform_try_idle(musb, jiffies
643 + msecs_to_jiffies(musb->a_wait_bcon 643 + msecs_to_jiffies(musb->a_wait_bcon
644 ? : OTG_TIME_A_WAIT_BCON)); 644 ? : OTG_TIME_A_WAIT_BCON));
645 645
646 break; 646 break;
647 case OTG_STATE_B_IDLE: 647 case OTG_STATE_B_IDLE:
648 if (!musb->is_active) 648 if (!musb->is_active)
649 break; 649 break;
650 case OTG_STATE_B_PERIPHERAL: 650 case OTG_STATE_B_PERIPHERAL:
651 musb_g_suspend(musb); 651 musb_g_suspend(musb);
652 musb->is_active = is_otg_enabled(musb) 652 musb->is_active = otg->gadget->b_hnp_enable;
653 && otg->gadget->b_hnp_enable;
654 if (musb->is_active) { 653 if (musb->is_active) {
655 musb->xceiv->state = OTG_STATE_B_WAIT_ACON; 654 musb->xceiv->state = OTG_STATE_B_WAIT_ACON;
656 dev_dbg(musb->controller, "HNP: Setting timer for b_ase0_brst\n"); 655 dev_dbg(musb->controller, "HNP: Setting timer for b_ase0_brst\n");
657 mod_timer(&musb->otg_timer, jiffies 656 mod_timer(&musb->otg_timer, jiffies
658 + msecs_to_jiffies( 657 + msecs_to_jiffies(
659 OTG_TIME_B_ASE0_BRST)); 658 OTG_TIME_B_ASE0_BRST));
660 } 659 }
661 break; 660 break;
662 case OTG_STATE_A_WAIT_BCON: 661 case OTG_STATE_A_WAIT_BCON:
663 if (musb->a_wait_bcon != 0) 662 if (musb->a_wait_bcon != 0)
664 musb_platform_try_idle(musb, jiffies 663 musb_platform_try_idle(musb, jiffies
665 + msecs_to_jiffies(musb->a_wait_bcon)); 664 + msecs_to_jiffies(musb->a_wait_bcon));
666 break; 665 break;
667 case OTG_STATE_A_HOST: 666 case OTG_STATE_A_HOST:
668 musb->xceiv->state = OTG_STATE_A_SUSPEND; 667 musb->xceiv->state = OTG_STATE_A_SUSPEND;
669 musb->is_active = is_otg_enabled(musb) 668 musb->is_active = otg->host->b_hnp_enable;
670 && otg->host->b_hnp_enable;
671 break; 669 break;
672 case OTG_STATE_B_HOST: 670 case OTG_STATE_B_HOST:
673 /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */ 671 /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */
674 dev_dbg(musb->controller, "REVISIT: SUSPEND as B_HOST\n"); 672 dev_dbg(musb->controller, "REVISIT: SUSPEND as B_HOST\n");
675 break; 673 break;
676 default: 674 default:
677 /* "should not happen" */ 675 /* "should not happen" */
678 musb->is_active = 0; 676 musb->is_active = 0;
679 break; 677 break;
680 } 678 }
681 } 679 }
682 680
683 if (int_usb & MUSB_INTR_CONNECT) { 681 if (int_usb & MUSB_INTR_CONNECT) {
684 struct usb_hcd *hcd = musb_to_hcd(musb); 682 struct usb_hcd *hcd = musb_to_hcd(musb);
685 683
686 handled = IRQ_HANDLED; 684 handled = IRQ_HANDLED;
687 musb->is_active = 1; 685 musb->is_active = 1;
688 686
689 musb->ep0_stage = MUSB_EP0_START; 687 musb->ep0_stage = MUSB_EP0_START;
690 688
691 /* flush endpoints when transitioning from Device Mode */ 689 /* flush endpoints when transitioning from Device Mode */
692 if (is_peripheral_active(musb)) { 690 if (is_peripheral_active(musb)) {
693 /* REVISIT HNP; just force disconnect */ 691 /* REVISIT HNP; just force disconnect */
694 } 692 }
695 musb_writew(musb->mregs, MUSB_INTRTXE, musb->epmask); 693 musb_writew(musb->mregs, MUSB_INTRTXE, musb->epmask);
696 musb_writew(musb->mregs, MUSB_INTRRXE, musb->epmask & 0xfffe); 694 musb_writew(musb->mregs, MUSB_INTRRXE, musb->epmask & 0xfffe);
697 musb_writeb(musb->mregs, MUSB_INTRUSBE, 0xf7); 695 musb_writeb(musb->mregs, MUSB_INTRUSBE, 0xf7);
698 musb->port1_status &= ~(USB_PORT_STAT_LOW_SPEED 696 musb->port1_status &= ~(USB_PORT_STAT_LOW_SPEED
699 |USB_PORT_STAT_HIGH_SPEED 697 |USB_PORT_STAT_HIGH_SPEED
700 |USB_PORT_STAT_ENABLE 698 |USB_PORT_STAT_ENABLE
701 ); 699 );
702 musb->port1_status |= USB_PORT_STAT_CONNECTION 700 musb->port1_status |= USB_PORT_STAT_CONNECTION
703 |(USB_PORT_STAT_C_CONNECTION << 16); 701 |(USB_PORT_STAT_C_CONNECTION << 16);
704 702
705 /* high vs full speed is just a guess until after reset */ 703 /* high vs full speed is just a guess until after reset */
706 if (devctl & MUSB_DEVCTL_LSDEV) 704 if (devctl & MUSB_DEVCTL_LSDEV)
707 musb->port1_status |= USB_PORT_STAT_LOW_SPEED; 705 musb->port1_status |= USB_PORT_STAT_LOW_SPEED;
708 706
709 /* indicate new connection to OTG machine */ 707 /* indicate new connection to OTG machine */
710 switch (musb->xceiv->state) { 708 switch (musb->xceiv->state) {
711 case OTG_STATE_B_PERIPHERAL: 709 case OTG_STATE_B_PERIPHERAL:
712 if (int_usb & MUSB_INTR_SUSPEND) { 710 if (int_usb & MUSB_INTR_SUSPEND) {
713 dev_dbg(musb->controller, "HNP: SUSPEND+CONNECT, now b_host\n"); 711 dev_dbg(musb->controller, "HNP: SUSPEND+CONNECT, now b_host\n");
714 int_usb &= ~MUSB_INTR_SUSPEND; 712 int_usb &= ~MUSB_INTR_SUSPEND;
715 goto b_host; 713 goto b_host;
716 } else 714 } else
717 dev_dbg(musb->controller, "CONNECT as b_peripheral???\n"); 715 dev_dbg(musb->controller, "CONNECT as b_peripheral???\n");
718 break; 716 break;
719 case OTG_STATE_B_WAIT_ACON: 717 case OTG_STATE_B_WAIT_ACON:
720 dev_dbg(musb->controller, "HNP: CONNECT, now b_host\n"); 718 dev_dbg(musb->controller, "HNP: CONNECT, now b_host\n");
721 b_host: 719 b_host:
722 musb->xceiv->state = OTG_STATE_B_HOST; 720 musb->xceiv->state = OTG_STATE_B_HOST;
723 hcd->self.is_b_host = 1; 721 hcd->self.is_b_host = 1;
724 musb->ignore_disconnect = 0; 722 musb->ignore_disconnect = 0;
725 del_timer(&musb->otg_timer); 723 del_timer(&musb->otg_timer);
726 break; 724 break;
727 default: 725 default:
728 if ((devctl & MUSB_DEVCTL_VBUS) 726 if ((devctl & MUSB_DEVCTL_VBUS)
729 == (3 << MUSB_DEVCTL_VBUS_SHIFT)) { 727 == (3 << MUSB_DEVCTL_VBUS_SHIFT)) {
730 musb->xceiv->state = OTG_STATE_A_HOST; 728 musb->xceiv->state = OTG_STATE_A_HOST;
731 hcd->self.is_b_host = 0; 729 hcd->self.is_b_host = 0;
732 } 730 }
733 break; 731 break;
734 } 732 }
735 733
736 /* poke the root hub */ 734 /* poke the root hub */
737 MUSB_HST_MODE(musb); 735 MUSB_HST_MODE(musb);
738 if (hcd->status_urb) 736 if (hcd->status_urb)
739 usb_hcd_poll_rh_status(hcd); 737 usb_hcd_poll_rh_status(hcd);
740 else 738 else
741 usb_hcd_resume_root_hub(hcd); 739 usb_hcd_resume_root_hub(hcd);
742 740
743 dev_dbg(musb->controller, "CONNECT (%s) devctl %02x\n", 741 dev_dbg(musb->controller, "CONNECT (%s) devctl %02x\n",
744 otg_state_string(musb->xceiv->state), devctl); 742 otg_state_string(musb->xceiv->state), devctl);
745 } 743 }
746 744
747 if ((int_usb & MUSB_INTR_DISCONNECT) && !musb->ignore_disconnect) { 745 if ((int_usb & MUSB_INTR_DISCONNECT) && !musb->ignore_disconnect) {
748 dev_dbg(musb->controller, "DISCONNECT (%s) as %s, devctl %02x\n", 746 dev_dbg(musb->controller, "DISCONNECT (%s) as %s, devctl %02x\n",
749 otg_state_string(musb->xceiv->state), 747 otg_state_string(musb->xceiv->state),
750 MUSB_MODE(musb), devctl); 748 MUSB_MODE(musb), devctl);
751 handled = IRQ_HANDLED; 749 handled = IRQ_HANDLED;
752 750
753 switch (musb->xceiv->state) { 751 switch (musb->xceiv->state) {
754 case OTG_STATE_A_HOST: 752 case OTG_STATE_A_HOST:
755 case OTG_STATE_A_SUSPEND: 753 case OTG_STATE_A_SUSPEND:
756 usb_hcd_resume_root_hub(musb_to_hcd(musb)); 754 usb_hcd_resume_root_hub(musb_to_hcd(musb));
757 musb_root_disconnect(musb); 755 musb_root_disconnect(musb);
758 if (musb->a_wait_bcon != 0 && is_otg_enabled(musb)) 756 if (musb->a_wait_bcon != 0)
759 musb_platform_try_idle(musb, jiffies 757 musb_platform_try_idle(musb, jiffies
760 + msecs_to_jiffies(musb->a_wait_bcon)); 758 + msecs_to_jiffies(musb->a_wait_bcon));
761 break; 759 break;
762 case OTG_STATE_B_HOST: 760 case OTG_STATE_B_HOST:
763 /* REVISIT this behaves for "real disconnect" 761 /* REVISIT this behaves for "real disconnect"
764 * cases; make sure the other transitions from 762 * cases; make sure the other transitions from
765 * from B_HOST act right too. The B_HOST code 763 * from B_HOST act right too. The B_HOST code
766 * in hnp_stop() is currently not used... 764 * in hnp_stop() is currently not used...
767 */ 765 */
768 musb_root_disconnect(musb); 766 musb_root_disconnect(musb);
769 musb_to_hcd(musb)->self.is_b_host = 0; 767 musb_to_hcd(musb)->self.is_b_host = 0;
770 musb->xceiv->state = OTG_STATE_B_PERIPHERAL; 768 musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
771 MUSB_DEV_MODE(musb); 769 MUSB_DEV_MODE(musb);
772 musb_g_disconnect(musb); 770 musb_g_disconnect(musb);
773 break; 771 break;
774 case OTG_STATE_A_PERIPHERAL: 772 case OTG_STATE_A_PERIPHERAL:
775 musb_hnp_stop(musb); 773 musb_hnp_stop(musb);
776 musb_root_disconnect(musb); 774 musb_root_disconnect(musb);
777 /* FALLTHROUGH */ 775 /* FALLTHROUGH */
778 case OTG_STATE_B_WAIT_ACON: 776 case OTG_STATE_B_WAIT_ACON:
779 /* FALLTHROUGH */ 777 /* FALLTHROUGH */
780 case OTG_STATE_B_PERIPHERAL: 778 case OTG_STATE_B_PERIPHERAL:
781 case OTG_STATE_B_IDLE: 779 case OTG_STATE_B_IDLE:
782 musb_g_disconnect(musb); 780 musb_g_disconnect(musb);
783 break; 781 break;
784 default: 782 default:
785 WARNING("unhandled DISCONNECT transition (%s)\n", 783 WARNING("unhandled DISCONNECT transition (%s)\n",
786 otg_state_string(musb->xceiv->state)); 784 otg_state_string(musb->xceiv->state));
787 break; 785 break;
788 } 786 }
789 } 787 }
790 788
791 /* mentor saves a bit: bus reset and babble share the same irq. 789 /* mentor saves a bit: bus reset and babble share the same irq.
792 * only host sees babble; only peripheral sees bus reset. 790 * only host sees babble; only peripheral sees bus reset.
793 */ 791 */
794 if (int_usb & MUSB_INTR_RESET) { 792 if (int_usb & MUSB_INTR_RESET) {
795 handled = IRQ_HANDLED; 793 handled = IRQ_HANDLED;
796 if (is_host_capable() && (devctl & MUSB_DEVCTL_HM) != 0) { 794 if (is_host_capable() && (devctl & MUSB_DEVCTL_HM) != 0) {
797 /* 795 /*
798 * Looks like non-HS BABBLE can be ignored, but 796 * Looks like non-HS BABBLE can be ignored, but
799 * HS BABBLE is an error condition. For HS the solution 797 * HS BABBLE is an error condition. For HS the solution
800 * is to avoid babble in the first place and fix what 798 * is to avoid babble in the first place and fix what
801 * caused BABBLE. When HS BABBLE happens we can only 799 * caused BABBLE. When HS BABBLE happens we can only
802 * stop the session. 800 * stop the session.
803 */ 801 */
804 if (devctl & (MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV)) 802 if (devctl & (MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV))
805 dev_dbg(musb->controller, "BABBLE devctl: %02x\n", devctl); 803 dev_dbg(musb->controller, "BABBLE devctl: %02x\n", devctl);
806 else { 804 else {
807 ERR("Stopping host session -- babble\n"); 805 ERR("Stopping host session -- babble\n");
808 musb_writeb(musb->mregs, MUSB_DEVCTL, 0); 806 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
809 } 807 }
810 } else if (is_peripheral_capable()) { 808 } else if (is_peripheral_capable()) {
811 dev_dbg(musb->controller, "BUS RESET as %s\n", 809 dev_dbg(musb->controller, "BUS RESET as %s\n",
812 otg_state_string(musb->xceiv->state)); 810 otg_state_string(musb->xceiv->state));
813 switch (musb->xceiv->state) { 811 switch (musb->xceiv->state) {
814 case OTG_STATE_A_SUSPEND: 812 case OTG_STATE_A_SUSPEND:
815 /* We need to ignore disconnect on suspend 813 /* We need to ignore disconnect on suspend
816 * otherwise tusb 2.0 won't reconnect after a 814 * otherwise tusb 2.0 won't reconnect after a
817 * power cycle, which breaks otg compliance. 815 * power cycle, which breaks otg compliance.
818 */ 816 */
819 musb->ignore_disconnect = 1; 817 musb->ignore_disconnect = 1;
820 musb_g_reset(musb); 818 musb_g_reset(musb);
821 /* FALLTHROUGH */ 819 /* FALLTHROUGH */
822 case OTG_STATE_A_WAIT_BCON: /* OPT TD.4.7-900ms */ 820 case OTG_STATE_A_WAIT_BCON: /* OPT TD.4.7-900ms */
823 /* never use invalid T(a_wait_bcon) */ 821 /* never use invalid T(a_wait_bcon) */
824 dev_dbg(musb->controller, "HNP: in %s, %d msec timeout\n", 822 dev_dbg(musb->controller, "HNP: in %s, %d msec timeout\n",
825 otg_state_string(musb->xceiv->state), 823 otg_state_string(musb->xceiv->state),
826 TA_WAIT_BCON(musb)); 824 TA_WAIT_BCON(musb));
827 mod_timer(&musb->otg_timer, jiffies 825 mod_timer(&musb->otg_timer, jiffies
828 + msecs_to_jiffies(TA_WAIT_BCON(musb))); 826 + msecs_to_jiffies(TA_WAIT_BCON(musb)));
829 break; 827 break;
830 case OTG_STATE_A_PERIPHERAL: 828 case OTG_STATE_A_PERIPHERAL:
831 musb->ignore_disconnect = 0; 829 musb->ignore_disconnect = 0;
832 del_timer(&musb->otg_timer); 830 del_timer(&musb->otg_timer);
833 musb_g_reset(musb); 831 musb_g_reset(musb);
834 break; 832 break;
835 case OTG_STATE_B_WAIT_ACON: 833 case OTG_STATE_B_WAIT_ACON:
836 dev_dbg(musb->controller, "HNP: RESET (%s), to b_peripheral\n", 834 dev_dbg(musb->controller, "HNP: RESET (%s), to b_peripheral\n",
837 otg_state_string(musb->xceiv->state)); 835 otg_state_string(musb->xceiv->state));
838 musb->xceiv->state = OTG_STATE_B_PERIPHERAL; 836 musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
839 musb_g_reset(musb); 837 musb_g_reset(musb);
840 break; 838 break;
841 case OTG_STATE_B_IDLE: 839 case OTG_STATE_B_IDLE:
842 musb->xceiv->state = OTG_STATE_B_PERIPHERAL; 840 musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
843 /* FALLTHROUGH */ 841 /* FALLTHROUGH */
844 case OTG_STATE_B_PERIPHERAL: 842 case OTG_STATE_B_PERIPHERAL:
845 musb_g_reset(musb); 843 musb_g_reset(musb);
846 break; 844 break;
847 default: 845 default:
848 dev_dbg(musb->controller, "Unhandled BUS RESET as %s\n", 846 dev_dbg(musb->controller, "Unhandled BUS RESET as %s\n",
849 otg_state_string(musb->xceiv->state)); 847 otg_state_string(musb->xceiv->state));
850 } 848 }
851 } 849 }
852 } 850 }
853 851
854 #if 0 852 #if 0
855 /* REVISIT ... this would be for multiplexing periodic endpoints, or 853 /* REVISIT ... this would be for multiplexing periodic endpoints, or
856 * supporting transfer phasing to prevent exceeding ISO bandwidth 854 * supporting transfer phasing to prevent exceeding ISO bandwidth
857 * limits of a given frame or microframe. 855 * limits of a given frame or microframe.
858 * 856 *
859 * It's not needed for peripheral side, which dedicates endpoints; 857 * It's not needed for peripheral side, which dedicates endpoints;
860 * though it _might_ use SOF irqs for other purposes. 858 * though it _might_ use SOF irqs for other purposes.
861 * 859 *
862 * And it's not currently needed for host side, which also dedicates 860 * And it's not currently needed for host side, which also dedicates
863 * endpoints, relies on TX/RX interval registers, and isn't claimed 861 * endpoints, relies on TX/RX interval registers, and isn't claimed
864 * to support ISO transfers yet. 862 * to support ISO transfers yet.
865 */ 863 */
866 if (int_usb & MUSB_INTR_SOF) { 864 if (int_usb & MUSB_INTR_SOF) {
867 void __iomem *mbase = musb->mregs; 865 void __iomem *mbase = musb->mregs;
868 struct musb_hw_ep *ep; 866 struct musb_hw_ep *ep;
869 u8 epnum; 867 u8 epnum;
870 u16 frame; 868 u16 frame;
871 869
872 dev_dbg(musb->controller, "START_OF_FRAME\n"); 870 dev_dbg(musb->controller, "START_OF_FRAME\n");
873 handled = IRQ_HANDLED; 871 handled = IRQ_HANDLED;
874 872
875 /* start any periodic Tx transfers waiting for current frame */ 873 /* start any periodic Tx transfers waiting for current frame */
876 frame = musb_readw(mbase, MUSB_FRAME); 874 frame = musb_readw(mbase, MUSB_FRAME);
877 ep = musb->endpoints; 875 ep = musb->endpoints;
878 for (epnum = 1; (epnum < musb->nr_endpoints) 876 for (epnum = 1; (epnum < musb->nr_endpoints)
879 && (musb->epmask >= (1 << epnum)); 877 && (musb->epmask >= (1 << epnum));
880 epnum++, ep++) { 878 epnum++, ep++) {
881 /* 879 /*
882 * FIXME handle framecounter wraps (12 bits) 880 * FIXME handle framecounter wraps (12 bits)
883 * eliminate duplicated StartUrb logic 881 * eliminate duplicated StartUrb logic
884 */ 882 */
885 if (ep->dwWaitFrame >= frame) { 883 if (ep->dwWaitFrame >= frame) {
886 ep->dwWaitFrame = 0; 884 ep->dwWaitFrame = 0;
887 pr_debug("SOF --> periodic TX%s on %d\n", 885 pr_debug("SOF --> periodic TX%s on %d\n",
888 ep->tx_channel ? " DMA" : "", 886 ep->tx_channel ? " DMA" : "",
889 epnum); 887 epnum);
890 if (!ep->tx_channel) 888 if (!ep->tx_channel)
891 musb_h_tx_start(musb, epnum); 889 musb_h_tx_start(musb, epnum);
892 else 890 else
893 cppi_hostdma_start(musb, epnum); 891 cppi_hostdma_start(musb, epnum);
894 } 892 }
895 } /* end of for loop */ 893 } /* end of for loop */
896 } 894 }
897 #endif 895 #endif
898 896
899 schedule_work(&musb->irq_work); 897 schedule_work(&musb->irq_work);
900 898
901 return handled; 899 return handled;
902 } 900 }
903 901
904 /*-------------------------------------------------------------------------*/ 902 /*-------------------------------------------------------------------------*/
905 903
906 /* 904 /*
907 * Program the HDRC to start (enable interrupts, dma, etc.). 905 * Program the HDRC to start (enable interrupts, dma, etc.).
908 */ 906 */
909 void musb_start(struct musb *musb) 907 void musb_start(struct musb *musb)
910 { 908 {
911 void __iomem *regs = musb->mregs; 909 void __iomem *regs = musb->mregs;
912 u8 devctl = musb_readb(regs, MUSB_DEVCTL); 910 u8 devctl = musb_readb(regs, MUSB_DEVCTL);
913 911
914 dev_dbg(musb->controller, "<== devctl %02x\n", devctl); 912 dev_dbg(musb->controller, "<== devctl %02x\n", devctl);
915 913
916 /* Set INT enable registers, enable interrupts */ 914 /* Set INT enable registers, enable interrupts */
917 musb_writew(regs, MUSB_INTRTXE, musb->epmask); 915 musb_writew(regs, MUSB_INTRTXE, musb->epmask);
918 musb_writew(regs, MUSB_INTRRXE, musb->epmask & 0xfffe); 916 musb_writew(regs, MUSB_INTRRXE, musb->epmask & 0xfffe);
919 musb_writeb(regs, MUSB_INTRUSBE, 0xf7); 917 musb_writeb(regs, MUSB_INTRUSBE, 0xf7);
920 918
921 musb_writeb(regs, MUSB_TESTMODE, 0); 919 musb_writeb(regs, MUSB_TESTMODE, 0);
922 920
923 /* put into basic highspeed mode and start session */ 921 /* put into basic highspeed mode and start session */
924 musb_writeb(regs, MUSB_POWER, MUSB_POWER_ISOUPDATE 922 musb_writeb(regs, MUSB_POWER, MUSB_POWER_ISOUPDATE
925 | MUSB_POWER_HSENAB 923 | MUSB_POWER_HSENAB
926 /* ENSUSPEND wedges tusb */ 924 /* ENSUSPEND wedges tusb */
927 /* | MUSB_POWER_ENSUSPEND */ 925 /* | MUSB_POWER_ENSUSPEND */
928 ); 926 );
929 927
930 musb->is_active = 0; 928 musb->is_active = 0;
931 devctl = musb_readb(regs, MUSB_DEVCTL); 929 devctl = musb_readb(regs, MUSB_DEVCTL);
932 devctl &= ~MUSB_DEVCTL_SESSION; 930 devctl &= ~MUSB_DEVCTL_SESSION;
933 931
934 if (is_otg_enabled(musb)) { 932 /* session started after:
935 /* session started after: 933 * (a) ID-grounded irq, host mode;
936 * (a) ID-grounded irq, host mode; 934 * (b) vbus present/connect IRQ, peripheral mode;
937 * (b) vbus present/connect IRQ, peripheral mode; 935 * (c) peripheral initiates, using SRP
938 * (c) peripheral initiates, using SRP 936 */
939 */ 937 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
940 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) 938 musb->is_active = 1;
941 musb->is_active = 1; 939 else
942 else
943 devctl |= MUSB_DEVCTL_SESSION;
944
945 } else if (is_host_enabled(musb)) {
946 /* assume ID pin is hard-wired to ground */
947 devctl |= MUSB_DEVCTL_SESSION; 940 devctl |= MUSB_DEVCTL_SESSION;
948 941
949 } else /* peripheral is enabled */ {
950 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
951 musb->is_active = 1;
952 }
953 musb_platform_enable(musb); 942 musb_platform_enable(musb);
954 musb_writeb(regs, MUSB_DEVCTL, devctl); 943 musb_writeb(regs, MUSB_DEVCTL, devctl);
955 } 944 }
956 945
957 946
958 static void musb_generic_disable(struct musb *musb) 947 static void musb_generic_disable(struct musb *musb)
959 { 948 {
960 void __iomem *mbase = musb->mregs; 949 void __iomem *mbase = musb->mregs;
961 u16 temp; 950 u16 temp;
962 951
963 /* disable interrupts */ 952 /* disable interrupts */
964 musb_writeb(mbase, MUSB_INTRUSBE, 0); 953 musb_writeb(mbase, MUSB_INTRUSBE, 0);
965 musb_writew(mbase, MUSB_INTRTXE, 0); 954 musb_writew(mbase, MUSB_INTRTXE, 0);
966 musb_writew(mbase, MUSB_INTRRXE, 0); 955 musb_writew(mbase, MUSB_INTRRXE, 0);
967 956
968 /* off */ 957 /* off */
969 musb_writeb(mbase, MUSB_DEVCTL, 0); 958 musb_writeb(mbase, MUSB_DEVCTL, 0);
970 959
971 /* flush pending interrupts */ 960 /* flush pending interrupts */
972 temp = musb_readb(mbase, MUSB_INTRUSB); 961 temp = musb_readb(mbase, MUSB_INTRUSB);
973 temp = musb_readw(mbase, MUSB_INTRTX); 962 temp = musb_readw(mbase, MUSB_INTRTX);
974 temp = musb_readw(mbase, MUSB_INTRRX); 963 temp = musb_readw(mbase, MUSB_INTRRX);
975 964
976 } 965 }
977 966
978 /* 967 /*
979 * Make the HDRC stop (disable interrupts, etc.); 968 * Make the HDRC stop (disable interrupts, etc.);
980 * reversible by musb_start 969 * reversible by musb_start
981 * called on gadget driver unregister 970 * called on gadget driver unregister
982 * with controller locked, irqs blocked 971 * with controller locked, irqs blocked
983 * acts as a NOP unless some role activated the hardware 972 * acts as a NOP unless some role activated the hardware
984 */ 973 */
985 void musb_stop(struct musb *musb) 974 void musb_stop(struct musb *musb)
986 { 975 {
987 /* stop IRQs, timers, ... */ 976 /* stop IRQs, timers, ... */
988 musb_platform_disable(musb); 977 musb_platform_disable(musb);
989 musb_generic_disable(musb); 978 musb_generic_disable(musb);
990 dev_dbg(musb->controller, "HDRC disabled\n"); 979 dev_dbg(musb->controller, "HDRC disabled\n");
991 980
992 /* FIXME 981 /* FIXME
993 * - mark host and/or peripheral drivers unusable/inactive 982 * - mark host and/or peripheral drivers unusable/inactive
994 * - disable DMA (and enable it in HdrcStart) 983 * - disable DMA (and enable it in HdrcStart)
995 * - make sure we can musb_start() after musb_stop(); with 984 * - make sure we can musb_start() after musb_stop(); with
996 * OTG mode, gadget driver module rmmod/modprobe cycles that 985 * OTG mode, gadget driver module rmmod/modprobe cycles that
997 * - ... 986 * - ...
998 */ 987 */
999 musb_platform_try_idle(musb, 0); 988 musb_platform_try_idle(musb, 0);
1000 } 989 }
1001 990
1002 static void musb_shutdown(struct platform_device *pdev) 991 static void musb_shutdown(struct platform_device *pdev)
1003 { 992 {
1004 struct musb *musb = dev_to_musb(&pdev->dev); 993 struct musb *musb = dev_to_musb(&pdev->dev);
1005 unsigned long flags; 994 unsigned long flags;
1006 995
1007 pm_runtime_get_sync(musb->controller); 996 pm_runtime_get_sync(musb->controller);
1008 997
1009 musb_gadget_cleanup(musb); 998 musb_gadget_cleanup(musb);
1010 999
1011 spin_lock_irqsave(&musb->lock, flags); 1000 spin_lock_irqsave(&musb->lock, flags);
1012 musb_platform_disable(musb); 1001 musb_platform_disable(musb);
1013 musb_generic_disable(musb); 1002 musb_generic_disable(musb);
1014 spin_unlock_irqrestore(&musb->lock, flags); 1003 spin_unlock_irqrestore(&musb->lock, flags);
1015 1004
1016 if (!is_otg_enabled(musb) && is_host_enabled(musb))
1017 usb_remove_hcd(musb_to_hcd(musb));
1018 musb_writeb(musb->mregs, MUSB_DEVCTL, 0); 1005 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
1019 musb_platform_exit(musb); 1006 musb_platform_exit(musb);
1020 1007
1021 pm_runtime_put(musb->controller); 1008 pm_runtime_put(musb->controller);
1022 /* FIXME power down */ 1009 /* FIXME power down */
1023 } 1010 }
1024 1011
1025 1012
1026 /*-------------------------------------------------------------------------*/ 1013 /*-------------------------------------------------------------------------*/
1027 1014
1028 /* 1015 /*
1029 * The silicon either has hard-wired endpoint configurations, or else 1016 * The silicon either has hard-wired endpoint configurations, or else
1030 * "dynamic fifo" sizing. The driver has support for both, though at this 1017 * "dynamic fifo" sizing. The driver has support for both, though at this
1031 * writing only the dynamic sizing is very well tested. Since we switched 1018 * writing only the dynamic sizing is very well tested. Since we switched
1032 * away from compile-time hardware parameters, we can no longer rely on 1019 * away from compile-time hardware parameters, we can no longer rely on
1033 * dead code elimination to leave only the relevant one in the object file. 1020 * dead code elimination to leave only the relevant one in the object file.
1034 * 1021 *
1035 * We don't currently use dynamic fifo setup capability to do anything 1022 * We don't currently use dynamic fifo setup capability to do anything
1036 * more than selecting one of a bunch of predefined configurations. 1023 * more than selecting one of a bunch of predefined configurations.
1037 */ 1024 */
1038 #if defined(CONFIG_USB_MUSB_TUSB6010) \ 1025 #if defined(CONFIG_USB_MUSB_TUSB6010) \
1039 || defined(CONFIG_USB_MUSB_TUSB6010_MODULE) \ 1026 || defined(CONFIG_USB_MUSB_TUSB6010_MODULE) \
1040 || defined(CONFIG_USB_MUSB_OMAP2PLUS) \ 1027 || defined(CONFIG_USB_MUSB_OMAP2PLUS) \
1041 || defined(CONFIG_USB_MUSB_OMAP2PLUS_MODULE) \ 1028 || defined(CONFIG_USB_MUSB_OMAP2PLUS_MODULE) \
1042 || defined(CONFIG_USB_MUSB_AM35X) \ 1029 || defined(CONFIG_USB_MUSB_AM35X) \
1043 || defined(CONFIG_USB_MUSB_AM35X_MODULE) \ 1030 || defined(CONFIG_USB_MUSB_AM35X_MODULE) \
1044 || defined(CONFIG_USB_MUSB_DSPS) \ 1031 || defined(CONFIG_USB_MUSB_DSPS) \
1045 || defined(CONFIG_USB_MUSB_DSPS_MODULE) 1032 || defined(CONFIG_USB_MUSB_DSPS_MODULE)
1046 static ushort __devinitdata fifo_mode = 4; 1033 static ushort __devinitdata fifo_mode = 4;
1047 #elif defined(CONFIG_USB_MUSB_UX500) \ 1034 #elif defined(CONFIG_USB_MUSB_UX500) \
1048 || defined(CONFIG_USB_MUSB_UX500_MODULE) 1035 || defined(CONFIG_USB_MUSB_UX500_MODULE)
1049 static ushort __devinitdata fifo_mode = 5; 1036 static ushort __devinitdata fifo_mode = 5;
1050 #else 1037 #else
1051 static ushort __devinitdata fifo_mode = 2; 1038 static ushort __devinitdata fifo_mode = 2;
1052 #endif 1039 #endif
1053 1040
1054 /* "modprobe ... fifo_mode=1" etc */ 1041 /* "modprobe ... fifo_mode=1" etc */
1055 module_param(fifo_mode, ushort, 0); 1042 module_param(fifo_mode, ushort, 0);
1056 MODULE_PARM_DESC(fifo_mode, "initial endpoint configuration"); 1043 MODULE_PARM_DESC(fifo_mode, "initial endpoint configuration");
1057 1044
1058 /* 1045 /*
1059 * tables defining fifo_mode values. define more if you like. 1046 * tables defining fifo_mode values. define more if you like.
1060 * for host side, make sure both halves of ep1 are set up. 1047 * for host side, make sure both halves of ep1 are set up.
1061 */ 1048 */
1062 1049
1063 /* mode 0 - fits in 2KB */ 1050 /* mode 0 - fits in 2KB */
1064 static struct musb_fifo_cfg __devinitdata mode_0_cfg[] = { 1051 static struct musb_fifo_cfg __devinitdata mode_0_cfg[] = {
1065 { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, }, 1052 { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
1066 { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, }, 1053 { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
1067 { .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, }, 1054 { .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, },
1068 { .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, }, 1055 { .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
1069 { .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, }, 1056 { .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
1070 }; 1057 };
1071 1058
1072 /* mode 1 - fits in 4KB */ 1059 /* mode 1 - fits in 4KB */
1073 static struct musb_fifo_cfg __devinitdata mode_1_cfg[] = { 1060 static struct musb_fifo_cfg __devinitdata mode_1_cfg[] = {
1074 { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, }, 1061 { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, },
1075 { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, }, 1062 { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, },
1076 { .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, .mode = BUF_DOUBLE, }, 1063 { .hw_ep_num = 2, .style = FIFO_RXTX, .maxpacket = 512, .mode = BUF_DOUBLE, },
1077 { .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, }, 1064 { .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
1078 { .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, }, 1065 { .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
1079 }; 1066 };
1080 1067
1081 /* mode 2 - fits in 4KB */ 1068 /* mode 2 - fits in 4KB */
1082 static struct musb_fifo_cfg __devinitdata mode_2_cfg[] = { 1069 static struct musb_fifo_cfg __devinitdata mode_2_cfg[] = {
1083 { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, }, 1070 { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
1084 { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, }, 1071 { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
1085 { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, }, 1072 { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
1086 { .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, }, 1073 { .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
1087 { .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, }, 1074 { .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
1088 { .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, }, 1075 { .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
1089 }; 1076 };
1090 1077
1091 /* mode 3 - fits in 4KB */ 1078 /* mode 3 - fits in 4KB */
1092 static struct musb_fifo_cfg __devinitdata mode_3_cfg[] = { 1079 static struct musb_fifo_cfg __devinitdata mode_3_cfg[] = {
1093 { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, }, 1080 { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, .mode = BUF_DOUBLE, },
1094 { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, }, 1081 { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, .mode = BUF_DOUBLE, },
1095 { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, }, 1082 { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
1096 { .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, }, 1083 { .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
1097 { .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, }, 1084 { .hw_ep_num = 3, .style = FIFO_RXTX, .maxpacket = 256, },
1098 { .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, }, 1085 { .hw_ep_num = 4, .style = FIFO_RXTX, .maxpacket = 256, },
1099 }; 1086 };
1100 1087
1101 /* mode 4 - fits in 16KB */ 1088 /* mode 4 - fits in 16KB */
1102 static struct musb_fifo_cfg __devinitdata mode_4_cfg[] = { 1089 static struct musb_fifo_cfg __devinitdata mode_4_cfg[] = {
1103 { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, }, 1090 { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
1104 { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, }, 1091 { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
1105 { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, }, 1092 { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
1106 { .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, }, 1093 { .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
1107 { .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, }, 1094 { .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, },
1108 { .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, }, 1095 { .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, },
1109 { .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, }, 1096 { .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, },
1110 { .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, }, 1097 { .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, },
1111 { .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, }, 1098 { .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, },
1112 { .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, }, 1099 { .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, },
1113 { .hw_ep_num = 6, .style = FIFO_TX, .maxpacket = 512, }, 1100 { .hw_ep_num = 6, .style = FIFO_TX, .maxpacket = 512, },
1114 { .hw_ep_num = 6, .style = FIFO_RX, .maxpacket = 512, }, 1101 { .hw_ep_num = 6, .style = FIFO_RX, .maxpacket = 512, },
1115 { .hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 512, }, 1102 { .hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 512, },
1116 { .hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 512, }, 1103 { .hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 512, },
1117 { .hw_ep_num = 8, .style = FIFO_TX, .maxpacket = 512, }, 1104 { .hw_ep_num = 8, .style = FIFO_TX, .maxpacket = 512, },
1118 { .hw_ep_num = 8, .style = FIFO_RX, .maxpacket = 512, }, 1105 { .hw_ep_num = 8, .style = FIFO_RX, .maxpacket = 512, },
1119 { .hw_ep_num = 9, .style = FIFO_TX, .maxpacket = 512, }, 1106 { .hw_ep_num = 9, .style = FIFO_TX, .maxpacket = 512, },
1120 { .hw_ep_num = 9, .style = FIFO_RX, .maxpacket = 512, }, 1107 { .hw_ep_num = 9, .style = FIFO_RX, .maxpacket = 512, },
1121 { .hw_ep_num = 10, .style = FIFO_TX, .maxpacket = 256, }, 1108 { .hw_ep_num = 10, .style = FIFO_TX, .maxpacket = 256, },
1122 { .hw_ep_num = 10, .style = FIFO_RX, .maxpacket = 64, }, 1109 { .hw_ep_num = 10, .style = FIFO_RX, .maxpacket = 64, },
1123 { .hw_ep_num = 11, .style = FIFO_TX, .maxpacket = 256, }, 1110 { .hw_ep_num = 11, .style = FIFO_TX, .maxpacket = 256, },
1124 { .hw_ep_num = 11, .style = FIFO_RX, .maxpacket = 64, }, 1111 { .hw_ep_num = 11, .style = FIFO_RX, .maxpacket = 64, },
1125 { .hw_ep_num = 12, .style = FIFO_TX, .maxpacket = 256, }, 1112 { .hw_ep_num = 12, .style = FIFO_TX, .maxpacket = 256, },
1126 { .hw_ep_num = 12, .style = FIFO_RX, .maxpacket = 64, }, 1113 { .hw_ep_num = 12, .style = FIFO_RX, .maxpacket = 64, },
1127 { .hw_ep_num = 13, .style = FIFO_RXTX, .maxpacket = 4096, }, 1114 { .hw_ep_num = 13, .style = FIFO_RXTX, .maxpacket = 4096, },
1128 { .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, }, 1115 { .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, },
1129 { .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, }, 1116 { .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
1130 }; 1117 };
1131 1118
1132 /* mode 5 - fits in 8KB */ 1119 /* mode 5 - fits in 8KB */
1133 static struct musb_fifo_cfg __devinitdata mode_5_cfg[] = { 1120 static struct musb_fifo_cfg __devinitdata mode_5_cfg[] = {
1134 { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, }, 1121 { .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
1135 { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, }, 1122 { .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
1136 { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, }, 1123 { .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
1137 { .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, }, 1124 { .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
1138 { .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, }, 1125 { .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, },
1139 { .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, }, 1126 { .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, },
1140 { .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, }, 1127 { .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, },
1141 { .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, }, 1128 { .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, },
1142 { .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, }, 1129 { .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, },
1143 { .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, }, 1130 { .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, },
1144 { .hw_ep_num = 6, .style = FIFO_TX, .maxpacket = 32, }, 1131 { .hw_ep_num = 6, .style = FIFO_TX, .maxpacket = 32, },
1145 { .hw_ep_num = 6, .style = FIFO_RX, .maxpacket = 32, }, 1132 { .hw_ep_num = 6, .style = FIFO_RX, .maxpacket = 32, },
1146 { .hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 32, }, 1133 { .hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 32, },
1147 { .hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 32, }, 1134 { .hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 32, },
1148 { .hw_ep_num = 8, .style = FIFO_TX, .maxpacket = 32, }, 1135 { .hw_ep_num = 8, .style = FIFO_TX, .maxpacket = 32, },
1149 { .hw_ep_num = 8, .style = FIFO_RX, .maxpacket = 32, }, 1136 { .hw_ep_num = 8, .style = FIFO_RX, .maxpacket = 32, },
1150 { .hw_ep_num = 9, .style = FIFO_TX, .maxpacket = 32, }, 1137 { .hw_ep_num = 9, .style = FIFO_TX, .maxpacket = 32, },
1151 { .hw_ep_num = 9, .style = FIFO_RX, .maxpacket = 32, }, 1138 { .hw_ep_num = 9, .style = FIFO_RX, .maxpacket = 32, },
1152 { .hw_ep_num = 10, .style = FIFO_TX, .maxpacket = 32, }, 1139 { .hw_ep_num = 10, .style = FIFO_TX, .maxpacket = 32, },
1153 { .hw_ep_num = 10, .style = FIFO_RX, .maxpacket = 32, }, 1140 { .hw_ep_num = 10, .style = FIFO_RX, .maxpacket = 32, },
1154 { .hw_ep_num = 11, .style = FIFO_TX, .maxpacket = 32, }, 1141 { .hw_ep_num = 11, .style = FIFO_TX, .maxpacket = 32, },
1155 { .hw_ep_num = 11, .style = FIFO_RX, .maxpacket = 32, }, 1142 { .hw_ep_num = 11, .style = FIFO_RX, .maxpacket = 32, },
1156 { .hw_ep_num = 12, .style = FIFO_TX, .maxpacket = 32, }, 1143 { .hw_ep_num = 12, .style = FIFO_TX, .maxpacket = 32, },
1157 { .hw_ep_num = 12, .style = FIFO_RX, .maxpacket = 32, }, 1144 { .hw_ep_num = 12, .style = FIFO_RX, .maxpacket = 32, },
1158 { .hw_ep_num = 13, .style = FIFO_RXTX, .maxpacket = 512, }, 1145 { .hw_ep_num = 13, .style = FIFO_RXTX, .maxpacket = 512, },
1159 { .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, }, 1146 { .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, },
1160 { .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, }, 1147 { .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
1161 }; 1148 };
1162 1149
1163 /* 1150 /*
1164 * configure a fifo; for non-shared endpoints, this may be called 1151 * configure a fifo; for non-shared endpoints, this may be called
1165 * once for a tx fifo and once for an rx fifo. 1152 * once for a tx fifo and once for an rx fifo.
1166 * 1153 *
1167 * returns negative errno or offset for next fifo. 1154 * returns negative errno or offset for next fifo.
1168 */ 1155 */
1169 static int __devinit 1156 static int __devinit
1170 fifo_setup(struct musb *musb, struct musb_hw_ep *hw_ep, 1157 fifo_setup(struct musb *musb, struct musb_hw_ep *hw_ep,
1171 const struct musb_fifo_cfg *cfg, u16 offset) 1158 const struct musb_fifo_cfg *cfg, u16 offset)
1172 { 1159 {
1173 void __iomem *mbase = musb->mregs; 1160 void __iomem *mbase = musb->mregs;
1174 int size = 0; 1161 int size = 0;
1175 u16 maxpacket = cfg->maxpacket; 1162 u16 maxpacket = cfg->maxpacket;
1176 u16 c_off = offset >> 3; 1163 u16 c_off = offset >> 3;
1177 u8 c_size; 1164 u8 c_size;
1178 1165
1179 /* expect hw_ep has already been zero-initialized */ 1166 /* expect hw_ep has already been zero-initialized */
1180 1167
1181 size = ffs(max(maxpacket, (u16) 8)) - 1; 1168 size = ffs(max(maxpacket, (u16) 8)) - 1;
1182 maxpacket = 1 << size; 1169 maxpacket = 1 << size;
1183 1170
1184 c_size = size - 3; 1171 c_size = size - 3;
1185 if (cfg->mode == BUF_DOUBLE) { 1172 if (cfg->mode == BUF_DOUBLE) {
1186 if ((offset + (maxpacket << 1)) > 1173 if ((offset + (maxpacket << 1)) >
1187 (1 << (musb->config->ram_bits + 2))) 1174 (1 << (musb->config->ram_bits + 2)))
1188 return -EMSGSIZE; 1175 return -EMSGSIZE;
1189 c_size |= MUSB_FIFOSZ_DPB; 1176 c_size |= MUSB_FIFOSZ_DPB;
1190 } else { 1177 } else {
1191 if ((offset + maxpacket) > (1 << (musb->config->ram_bits + 2))) 1178 if ((offset + maxpacket) > (1 << (musb->config->ram_bits + 2)))
1192 return -EMSGSIZE; 1179 return -EMSGSIZE;
1193 } 1180 }
1194 1181
1195 /* configure the FIFO */ 1182 /* configure the FIFO */
1196 musb_writeb(mbase, MUSB_INDEX, hw_ep->epnum); 1183 musb_writeb(mbase, MUSB_INDEX, hw_ep->epnum);
1197 1184
1198 /* EP0 reserved endpoint for control, bidirectional; 1185 /* EP0 reserved endpoint for control, bidirectional;
1199 * EP1 reserved for bulk, two unidirection halves. 1186 * EP1 reserved for bulk, two unidirection halves.
1200 */ 1187 */
1201 if (hw_ep->epnum == 1) 1188 if (hw_ep->epnum == 1)
1202 musb->bulk_ep = hw_ep; 1189 musb->bulk_ep = hw_ep;
1203 /* REVISIT error check: be sure ep0 can both rx and tx ... */ 1190 /* REVISIT error check: be sure ep0 can both rx and tx ... */
1204 switch (cfg->style) { 1191 switch (cfg->style) {
1205 case FIFO_TX: 1192 case FIFO_TX:
1206 musb_write_txfifosz(mbase, c_size); 1193 musb_write_txfifosz(mbase, c_size);
1207 musb_write_txfifoadd(mbase, c_off); 1194 musb_write_txfifoadd(mbase, c_off);
1208 hw_ep->tx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB); 1195 hw_ep->tx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
1209 hw_ep->max_packet_sz_tx = maxpacket; 1196 hw_ep->max_packet_sz_tx = maxpacket;
1210 break; 1197 break;
1211 case FIFO_RX: 1198 case FIFO_RX:
1212 musb_write_rxfifosz(mbase, c_size); 1199 musb_write_rxfifosz(mbase, c_size);
1213 musb_write_rxfifoadd(mbase, c_off); 1200 musb_write_rxfifoadd(mbase, c_off);
1214 hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB); 1201 hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
1215 hw_ep->max_packet_sz_rx = maxpacket; 1202 hw_ep->max_packet_sz_rx = maxpacket;
1216 break; 1203 break;
1217 case FIFO_RXTX: 1204 case FIFO_RXTX:
1218 musb_write_txfifosz(mbase, c_size); 1205 musb_write_txfifosz(mbase, c_size);
1219 musb_write_txfifoadd(mbase, c_off); 1206 musb_write_txfifoadd(mbase, c_off);
1220 hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB); 1207 hw_ep->rx_double_buffered = !!(c_size & MUSB_FIFOSZ_DPB);
1221 hw_ep->max_packet_sz_rx = maxpacket; 1208 hw_ep->max_packet_sz_rx = maxpacket;
1222 1209
1223 musb_write_rxfifosz(mbase, c_size); 1210 musb_write_rxfifosz(mbase, c_size);
1224 musb_write_rxfifoadd(mbase, c_off); 1211 musb_write_rxfifoadd(mbase, c_off);
1225 hw_ep->tx_double_buffered = hw_ep->rx_double_buffered; 1212 hw_ep->tx_double_buffered = hw_ep->rx_double_buffered;
1226 hw_ep->max_packet_sz_tx = maxpacket; 1213 hw_ep->max_packet_sz_tx = maxpacket;
1227 1214
1228 hw_ep->is_shared_fifo = true; 1215 hw_ep->is_shared_fifo = true;
1229 break; 1216 break;
1230 } 1217 }
1231 1218
1232 /* NOTE rx and tx endpoint irqs aren't managed separately, 1219 /* NOTE rx and tx endpoint irqs aren't managed separately,
1233 * which happens to be ok 1220 * which happens to be ok
1234 */ 1221 */
1235 musb->epmask |= (1 << hw_ep->epnum); 1222 musb->epmask |= (1 << hw_ep->epnum);
1236 1223
1237 return offset + (maxpacket << ((c_size & MUSB_FIFOSZ_DPB) ? 1 : 0)); 1224 return offset + (maxpacket << ((c_size & MUSB_FIFOSZ_DPB) ? 1 : 0));
1238 } 1225 }
1239 1226
1240 static struct musb_fifo_cfg __devinitdata ep0_cfg = { 1227 static struct musb_fifo_cfg __devinitdata ep0_cfg = {
1241 .style = FIFO_RXTX, .maxpacket = 64, 1228 .style = FIFO_RXTX, .maxpacket = 64,
1242 }; 1229 };
1243 1230
1244 static int __devinit ep_config_from_table(struct musb *musb) 1231 static int __devinit ep_config_from_table(struct musb *musb)
1245 { 1232 {
1246 const struct musb_fifo_cfg *cfg; 1233 const struct musb_fifo_cfg *cfg;
1247 unsigned i, n; 1234 unsigned i, n;
1248 int offset; 1235 int offset;
1249 struct musb_hw_ep *hw_ep = musb->endpoints; 1236 struct musb_hw_ep *hw_ep = musb->endpoints;
1250 1237
1251 if (musb->config->fifo_cfg) { 1238 if (musb->config->fifo_cfg) {
1252 cfg = musb->config->fifo_cfg; 1239 cfg = musb->config->fifo_cfg;
1253 n = musb->config->fifo_cfg_size; 1240 n = musb->config->fifo_cfg_size;
1254 goto done; 1241 goto done;
1255 } 1242 }
1256 1243
1257 switch (fifo_mode) { 1244 switch (fifo_mode) {
1258 default: 1245 default:
1259 fifo_mode = 0; 1246 fifo_mode = 0;
1260 /* FALLTHROUGH */ 1247 /* FALLTHROUGH */
1261 case 0: 1248 case 0:
1262 cfg = mode_0_cfg; 1249 cfg = mode_0_cfg;
1263 n = ARRAY_SIZE(mode_0_cfg); 1250 n = ARRAY_SIZE(mode_0_cfg);
1264 break; 1251 break;
1265 case 1: 1252 case 1:
1266 cfg = mode_1_cfg; 1253 cfg = mode_1_cfg;
1267 n = ARRAY_SIZE(mode_1_cfg); 1254 n = ARRAY_SIZE(mode_1_cfg);
1268 break; 1255 break;
1269 case 2: 1256 case 2:
1270 cfg = mode_2_cfg; 1257 cfg = mode_2_cfg;
1271 n = ARRAY_SIZE(mode_2_cfg); 1258 n = ARRAY_SIZE(mode_2_cfg);
1272 break; 1259 break;
1273 case 3: 1260 case 3:
1274 cfg = mode_3_cfg; 1261 cfg = mode_3_cfg;
1275 n = ARRAY_SIZE(mode_3_cfg); 1262 n = ARRAY_SIZE(mode_3_cfg);
1276 break; 1263 break;
1277 case 4: 1264 case 4:
1278 cfg = mode_4_cfg; 1265 cfg = mode_4_cfg;
1279 n = ARRAY_SIZE(mode_4_cfg); 1266 n = ARRAY_SIZE(mode_4_cfg);
1280 break; 1267 break;
1281 case 5: 1268 case 5:
1282 cfg = mode_5_cfg; 1269 cfg = mode_5_cfg;
1283 n = ARRAY_SIZE(mode_5_cfg); 1270 n = ARRAY_SIZE(mode_5_cfg);
1284 break; 1271 break;
1285 } 1272 }
1286 1273
1287 printk(KERN_DEBUG "%s: setup fifo_mode %d\n", 1274 printk(KERN_DEBUG "%s: setup fifo_mode %d\n",
1288 musb_driver_name, fifo_mode); 1275 musb_driver_name, fifo_mode);
1289 1276
1290 1277
1291 done: 1278 done:
1292 offset = fifo_setup(musb, hw_ep, &ep0_cfg, 0); 1279 offset = fifo_setup(musb, hw_ep, &ep0_cfg, 0);
1293 /* assert(offset > 0) */ 1280 /* assert(offset > 0) */
1294 1281
1295 /* NOTE: for RTL versions >= 1.400 EPINFO and RAMINFO would 1282 /* NOTE: for RTL versions >= 1.400 EPINFO and RAMINFO would
1296 * be better than static musb->config->num_eps and DYN_FIFO_SIZE... 1283 * be better than static musb->config->num_eps and DYN_FIFO_SIZE...
1297 */ 1284 */
1298 1285
1299 for (i = 0; i < n; i++) { 1286 for (i = 0; i < n; i++) {
1300 u8 epn = cfg->hw_ep_num; 1287 u8 epn = cfg->hw_ep_num;
1301 1288
1302 if (epn >= musb->config->num_eps) { 1289 if (epn >= musb->config->num_eps) {
1303 pr_debug("%s: invalid ep %d\n", 1290 pr_debug("%s: invalid ep %d\n",
1304 musb_driver_name, epn); 1291 musb_driver_name, epn);
1305 return -EINVAL; 1292 return -EINVAL;
1306 } 1293 }
1307 offset = fifo_setup(musb, hw_ep + epn, cfg++, offset); 1294 offset = fifo_setup(musb, hw_ep + epn, cfg++, offset);
1308 if (offset < 0) { 1295 if (offset < 0) {
1309 pr_debug("%s: mem overrun, ep %d\n", 1296 pr_debug("%s: mem overrun, ep %d\n",
1310 musb_driver_name, epn); 1297 musb_driver_name, epn);
1311 return -EINVAL; 1298 return -EINVAL;
1312 } 1299 }
1313 epn++; 1300 epn++;
1314 musb->nr_endpoints = max(epn, musb->nr_endpoints); 1301 musb->nr_endpoints = max(epn, musb->nr_endpoints);
1315 } 1302 }
1316 1303
1317 printk(KERN_DEBUG "%s: %d/%d max ep, %d/%d memory\n", 1304 printk(KERN_DEBUG "%s: %d/%d max ep, %d/%d memory\n",
1318 musb_driver_name, 1305 musb_driver_name,
1319 n + 1, musb->config->num_eps * 2 - 1, 1306 n + 1, musb->config->num_eps * 2 - 1,
1320 offset, (1 << (musb->config->ram_bits + 2))); 1307 offset, (1 << (musb->config->ram_bits + 2)));
1321 1308
1322 if (!musb->bulk_ep) { 1309 if (!musb->bulk_ep) {
1323 pr_debug("%s: missing bulk\n", musb_driver_name); 1310 pr_debug("%s: missing bulk\n", musb_driver_name);
1324 return -EINVAL; 1311 return -EINVAL;
1325 } 1312 }
1326 1313
1327 return 0; 1314 return 0;
1328 } 1315 }
1329 1316
1330 1317
1331 /* 1318 /*
1332 * ep_config_from_hw - when MUSB_C_DYNFIFO_DEF is false 1319 * ep_config_from_hw - when MUSB_C_DYNFIFO_DEF is false
1333 * @param musb the controller 1320 * @param musb the controller
1334 */ 1321 */
1335 static int __devinit ep_config_from_hw(struct musb *musb) 1322 static int __devinit ep_config_from_hw(struct musb *musb)
1336 { 1323 {
1337 u8 epnum = 0; 1324 u8 epnum = 0;
1338 struct musb_hw_ep *hw_ep; 1325 struct musb_hw_ep *hw_ep;
1339 void __iomem *mbase = musb->mregs; 1326 void __iomem *mbase = musb->mregs;
1340 int ret = 0; 1327 int ret = 0;
1341 1328
1342 dev_dbg(musb->controller, "<== static silicon ep config\n"); 1329 dev_dbg(musb->controller, "<== static silicon ep config\n");
1343 1330
1344 /* FIXME pick up ep0 maxpacket size */ 1331 /* FIXME pick up ep0 maxpacket size */
1345 1332
1346 for (epnum = 1; epnum < musb->config->num_eps; epnum++) { 1333 for (epnum = 1; epnum < musb->config->num_eps; epnum++) {
1347 musb_ep_select(mbase, epnum); 1334 musb_ep_select(mbase, epnum);
1348 hw_ep = musb->endpoints + epnum; 1335 hw_ep = musb->endpoints + epnum;
1349 1336
1350 ret = musb_read_fifosize(musb, hw_ep, epnum); 1337 ret = musb_read_fifosize(musb, hw_ep, epnum);
1351 if (ret < 0) 1338 if (ret < 0)
1352 break; 1339 break;
1353 1340
1354 /* FIXME set up hw_ep->{rx,tx}_double_buffered */ 1341 /* FIXME set up hw_ep->{rx,tx}_double_buffered */
1355 1342
1356 /* pick an RX/TX endpoint for bulk */ 1343 /* pick an RX/TX endpoint for bulk */
1357 if (hw_ep->max_packet_sz_tx < 512 1344 if (hw_ep->max_packet_sz_tx < 512
1358 || hw_ep->max_packet_sz_rx < 512) 1345 || hw_ep->max_packet_sz_rx < 512)
1359 continue; 1346 continue;
1360 1347
1361 /* REVISIT: this algorithm is lazy, we should at least 1348 /* REVISIT: this algorithm is lazy, we should at least
1362 * try to pick a double buffered endpoint. 1349 * try to pick a double buffered endpoint.
1363 */ 1350 */
1364 if (musb->bulk_ep) 1351 if (musb->bulk_ep)
1365 continue; 1352 continue;
1366 musb->bulk_ep = hw_ep; 1353 musb->bulk_ep = hw_ep;
1367 } 1354 }
1368 1355
1369 if (!musb->bulk_ep) { 1356 if (!musb->bulk_ep) {
1370 pr_debug("%s: missing bulk\n", musb_driver_name); 1357 pr_debug("%s: missing bulk\n", musb_driver_name);
1371 return -EINVAL; 1358 return -EINVAL;
1372 } 1359 }
1373 1360
1374 return 0; 1361 return 0;
1375 } 1362 }
1376 1363
1377 enum { MUSB_CONTROLLER_MHDRC, MUSB_CONTROLLER_HDRC, }; 1364 enum { MUSB_CONTROLLER_MHDRC, MUSB_CONTROLLER_HDRC, };
1378 1365
1379 /* Initialize MUSB (M)HDRC part of the USB hardware subsystem; 1366 /* Initialize MUSB (M)HDRC part of the USB hardware subsystem;
1380 * configure endpoints, or take their config from silicon 1367 * configure endpoints, or take their config from silicon
1381 */ 1368 */
1382 static int __devinit musb_core_init(u16 musb_type, struct musb *musb) 1369 static int __devinit musb_core_init(u16 musb_type, struct musb *musb)
1383 { 1370 {
1384 u8 reg; 1371 u8 reg;
1385 char *type; 1372 char *type;
1386 char aInfo[90], aRevision[32], aDate[12]; 1373 char aInfo[90], aRevision[32], aDate[12];
1387 void __iomem *mbase = musb->mregs; 1374 void __iomem *mbase = musb->mregs;
1388 int status = 0; 1375 int status = 0;
1389 int i; 1376 int i;
1390 1377
1391 /* log core options (read using indexed model) */ 1378 /* log core options (read using indexed model) */
1392 reg = musb_read_configdata(mbase); 1379 reg = musb_read_configdata(mbase);
1393 1380
1394 strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8"); 1381 strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8");
1395 if (reg & MUSB_CONFIGDATA_DYNFIFO) { 1382 if (reg & MUSB_CONFIGDATA_DYNFIFO) {
1396 strcat(aInfo, ", dyn FIFOs"); 1383 strcat(aInfo, ", dyn FIFOs");
1397 musb->dyn_fifo = true; 1384 musb->dyn_fifo = true;
1398 } 1385 }
1399 if (reg & MUSB_CONFIGDATA_MPRXE) { 1386 if (reg & MUSB_CONFIGDATA_MPRXE) {
1400 strcat(aInfo, ", bulk combine"); 1387 strcat(aInfo, ", bulk combine");
1401 musb->bulk_combine = true; 1388 musb->bulk_combine = true;
1402 } 1389 }
1403 if (reg & MUSB_CONFIGDATA_MPTXE) { 1390 if (reg & MUSB_CONFIGDATA_MPTXE) {
1404 strcat(aInfo, ", bulk split"); 1391 strcat(aInfo, ", bulk split");
1405 musb->bulk_split = true; 1392 musb->bulk_split = true;
1406 } 1393 }
1407 if (reg & MUSB_CONFIGDATA_HBRXE) { 1394 if (reg & MUSB_CONFIGDATA_HBRXE) {
1408 strcat(aInfo, ", HB-ISO Rx"); 1395 strcat(aInfo, ", HB-ISO Rx");
1409 musb->hb_iso_rx = true; 1396 musb->hb_iso_rx = true;
1410 } 1397 }
1411 if (reg & MUSB_CONFIGDATA_HBTXE) { 1398 if (reg & MUSB_CONFIGDATA_HBTXE) {
1412 strcat(aInfo, ", HB-ISO Tx"); 1399 strcat(aInfo, ", HB-ISO Tx");
1413 musb->hb_iso_tx = true; 1400 musb->hb_iso_tx = true;
1414 } 1401 }
1415 if (reg & MUSB_CONFIGDATA_SOFTCONE) 1402 if (reg & MUSB_CONFIGDATA_SOFTCONE)
1416 strcat(aInfo, ", SoftConn"); 1403 strcat(aInfo, ", SoftConn");
1417 1404
1418 printk(KERN_DEBUG "%s: ConfigData=0x%02x (%s)\n", 1405 printk(KERN_DEBUG "%s: ConfigData=0x%02x (%s)\n",
1419 musb_driver_name, reg, aInfo); 1406 musb_driver_name, reg, aInfo);
1420 1407
1421 aDate[0] = 0; 1408 aDate[0] = 0;
1422 if (MUSB_CONTROLLER_MHDRC == musb_type) { 1409 if (MUSB_CONTROLLER_MHDRC == musb_type) {
1423 musb->is_multipoint = 1; 1410 musb->is_multipoint = 1;
1424 type = "M"; 1411 type = "M";
1425 } else { 1412 } else {
1426 musb->is_multipoint = 0; 1413 musb->is_multipoint = 0;
1427 type = ""; 1414 type = "";
1428 #ifndef CONFIG_USB_OTG_BLACKLIST_HUB 1415 #ifndef CONFIG_USB_OTG_BLACKLIST_HUB
1429 printk(KERN_ERR 1416 printk(KERN_ERR
1430 "%s: kernel must blacklist external hubs\n", 1417 "%s: kernel must blacklist external hubs\n",
1431 musb_driver_name); 1418 musb_driver_name);
1432 #endif 1419 #endif
1433 } 1420 }
1434 1421
1435 /* log release info */ 1422 /* log release info */
1436 musb->hwvers = musb_read_hwvers(mbase); 1423 musb->hwvers = musb_read_hwvers(mbase);
1437 snprintf(aRevision, 32, "%d.%d%s", MUSB_HWVERS_MAJOR(musb->hwvers), 1424 snprintf(aRevision, 32, "%d.%d%s", MUSB_HWVERS_MAJOR(musb->hwvers),
1438 MUSB_HWVERS_MINOR(musb->hwvers), 1425 MUSB_HWVERS_MINOR(musb->hwvers),
1439 (musb->hwvers & MUSB_HWVERS_RC) ? "RC" : ""); 1426 (musb->hwvers & MUSB_HWVERS_RC) ? "RC" : "");
1440 printk(KERN_DEBUG "%s: %sHDRC RTL version %s %s\n", 1427 printk(KERN_DEBUG "%s: %sHDRC RTL version %s %s\n",
1441 musb_driver_name, type, aRevision, aDate); 1428 musb_driver_name, type, aRevision, aDate);
1442 1429
1443 /* configure ep0 */ 1430 /* configure ep0 */
1444 musb_configure_ep0(musb); 1431 musb_configure_ep0(musb);
1445 1432
1446 /* discover endpoint configuration */ 1433 /* discover endpoint configuration */
1447 musb->nr_endpoints = 1; 1434 musb->nr_endpoints = 1;
1448 musb->epmask = 1; 1435 musb->epmask = 1;
1449 1436
1450 if (musb->dyn_fifo) 1437 if (musb->dyn_fifo)
1451 status = ep_config_from_table(musb); 1438 status = ep_config_from_table(musb);
1452 else 1439 else
1453 status = ep_config_from_hw(musb); 1440 status = ep_config_from_hw(musb);
1454 1441
1455 if (status < 0) 1442 if (status < 0)
1456 return status; 1443 return status;
1457 1444
1458 /* finish init, and print endpoint config */ 1445 /* finish init, and print endpoint config */
1459 for (i = 0; i < musb->nr_endpoints; i++) { 1446 for (i = 0; i < musb->nr_endpoints; i++) {
1460 struct musb_hw_ep *hw_ep = musb->endpoints + i; 1447 struct musb_hw_ep *hw_ep = musb->endpoints + i;
1461 1448
1462 hw_ep->fifo = MUSB_FIFO_OFFSET(i) + mbase; 1449 hw_ep->fifo = MUSB_FIFO_OFFSET(i) + mbase;
1463 #if defined(CONFIG_USB_MUSB_TUSB6010) || defined (CONFIG_USB_MUSB_TUSB6010_MODULE) 1450 #if defined(CONFIG_USB_MUSB_TUSB6010) || defined (CONFIG_USB_MUSB_TUSB6010_MODULE)
1464 hw_ep->fifo_async = musb->async + 0x400 + MUSB_FIFO_OFFSET(i); 1451 hw_ep->fifo_async = musb->async + 0x400 + MUSB_FIFO_OFFSET(i);
1465 hw_ep->fifo_sync = musb->sync + 0x400 + MUSB_FIFO_OFFSET(i); 1452 hw_ep->fifo_sync = musb->sync + 0x400 + MUSB_FIFO_OFFSET(i);
1466 hw_ep->fifo_sync_va = 1453 hw_ep->fifo_sync_va =
1467 musb->sync_va + 0x400 + MUSB_FIFO_OFFSET(i); 1454 musb->sync_va + 0x400 + MUSB_FIFO_OFFSET(i);
1468 1455
1469 if (i == 0) 1456 if (i == 0)
1470 hw_ep->conf = mbase - 0x400 + TUSB_EP0_CONF; 1457 hw_ep->conf = mbase - 0x400 + TUSB_EP0_CONF;
1471 else 1458 else
1472 hw_ep->conf = mbase + 0x400 + (((i - 1) & 0xf) << 2); 1459 hw_ep->conf = mbase + 0x400 + (((i - 1) & 0xf) << 2);
1473 #endif 1460 #endif
1474 1461
1475 hw_ep->regs = MUSB_EP_OFFSET(i, 0) + mbase; 1462 hw_ep->regs = MUSB_EP_OFFSET(i, 0) + mbase;
1476 hw_ep->target_regs = musb_read_target_reg_base(i, mbase); 1463 hw_ep->target_regs = musb_read_target_reg_base(i, mbase);
1477 hw_ep->rx_reinit = 1; 1464 hw_ep->rx_reinit = 1;
1478 hw_ep->tx_reinit = 1; 1465 hw_ep->tx_reinit = 1;
1479 1466
1480 if (hw_ep->max_packet_sz_tx) { 1467 if (hw_ep->max_packet_sz_tx) {
1481 dev_dbg(musb->controller, 1468 dev_dbg(musb->controller,
1482 "%s: hw_ep %d%s, %smax %d\n", 1469 "%s: hw_ep %d%s, %smax %d\n",
1483 musb_driver_name, i, 1470 musb_driver_name, i,
1484 hw_ep->is_shared_fifo ? "shared" : "tx", 1471 hw_ep->is_shared_fifo ? "shared" : "tx",
1485 hw_ep->tx_double_buffered 1472 hw_ep->tx_double_buffered
1486 ? "doublebuffer, " : "", 1473 ? "doublebuffer, " : "",
1487 hw_ep->max_packet_sz_tx); 1474 hw_ep->max_packet_sz_tx);
1488 } 1475 }
1489 if (hw_ep->max_packet_sz_rx && !hw_ep->is_shared_fifo) { 1476 if (hw_ep->max_packet_sz_rx && !hw_ep->is_shared_fifo) {
1490 dev_dbg(musb->controller, 1477 dev_dbg(musb->controller,
1491 "%s: hw_ep %d%s, %smax %d\n", 1478 "%s: hw_ep %d%s, %smax %d\n",
1492 musb_driver_name, i, 1479 musb_driver_name, i,
1493 "rx", 1480 "rx",
1494 hw_ep->rx_double_buffered 1481 hw_ep->rx_double_buffered
1495 ? "doublebuffer, " : "", 1482 ? "doublebuffer, " : "",
1496 hw_ep->max_packet_sz_rx); 1483 hw_ep->max_packet_sz_rx);
1497 } 1484 }
1498 if (!(hw_ep->max_packet_sz_tx || hw_ep->max_packet_sz_rx)) 1485 if (!(hw_ep->max_packet_sz_tx || hw_ep->max_packet_sz_rx))
1499 dev_dbg(musb->controller, "hw_ep %d not configured\n", i); 1486 dev_dbg(musb->controller, "hw_ep %d not configured\n", i);
1500 } 1487 }
1501 1488
1502 return 0; 1489 return 0;
1503 } 1490 }
1504 1491
1505 /*-------------------------------------------------------------------------*/ 1492 /*-------------------------------------------------------------------------*/
1506 1493
1507 #if defined(CONFIG_SOC_OMAP2430) || defined(CONFIG_SOC_OMAP3430) || \ 1494 #if defined(CONFIG_SOC_OMAP2430) || defined(CONFIG_SOC_OMAP3430) || \
1508 defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_ARCH_U8500) 1495 defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_ARCH_U8500)
1509 1496
1510 static irqreturn_t generic_interrupt(int irq, void *__hci) 1497 static irqreturn_t generic_interrupt(int irq, void *__hci)
1511 { 1498 {
1512 unsigned long flags; 1499 unsigned long flags;
1513 irqreturn_t retval = IRQ_NONE; 1500 irqreturn_t retval = IRQ_NONE;
1514 struct musb *musb = __hci; 1501 struct musb *musb = __hci;
1515 1502
1516 spin_lock_irqsave(&musb->lock, flags); 1503 spin_lock_irqsave(&musb->lock, flags);
1517 1504
1518 musb->int_usb = musb_readb(musb->mregs, MUSB_INTRUSB); 1505 musb->int_usb = musb_readb(musb->mregs, MUSB_INTRUSB);
1519 musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX); 1506 musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX);
1520 musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX); 1507 musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX);
1521 1508
1522 if (musb->int_usb || musb->int_tx || musb->int_rx) 1509 if (musb->int_usb || musb->int_tx || musb->int_rx)
1523 retval = musb_interrupt(musb); 1510 retval = musb_interrupt(musb);
1524 1511
1525 spin_unlock_irqrestore(&musb->lock, flags); 1512 spin_unlock_irqrestore(&musb->lock, flags);
1526 1513
1527 return retval; 1514 return retval;
1528 } 1515 }
1529 1516
1530 #else 1517 #else
1531 #define generic_interrupt NULL 1518 #define generic_interrupt NULL
1532 #endif 1519 #endif
1533 1520
1534 /* 1521 /*
1535 * handle all the irqs defined by the HDRC core. for now we expect: other 1522 * handle all the irqs defined by the HDRC core. for now we expect: other
1536 * irq sources (phy, dma, etc) will be handled first, musb->int_* values 1523 * irq sources (phy, dma, etc) will be handled first, musb->int_* values
1537 * will be assigned, and the irq will already have been acked. 1524 * will be assigned, and the irq will already have been acked.
1538 * 1525 *
1539 * called in irq context with spinlock held, irqs blocked 1526 * called in irq context with spinlock held, irqs blocked
1540 */ 1527 */
1541 irqreturn_t musb_interrupt(struct musb *musb) 1528 irqreturn_t musb_interrupt(struct musb *musb)
1542 { 1529 {
1543 irqreturn_t retval = IRQ_NONE; 1530 irqreturn_t retval = IRQ_NONE;
1544 u8 devctl, power; 1531 u8 devctl, power;
1545 int ep_num; 1532 int ep_num;
1546 u32 reg; 1533 u32 reg;
1547 1534
1548 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); 1535 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
1549 power = musb_readb(musb->mregs, MUSB_POWER); 1536 power = musb_readb(musb->mregs, MUSB_POWER);
1550 1537
1551 dev_dbg(musb->controller, "** IRQ %s usb%04x tx%04x rx%04x\n", 1538 dev_dbg(musb->controller, "** IRQ %s usb%04x tx%04x rx%04x\n",
1552 (devctl & MUSB_DEVCTL_HM) ? "host" : "peripheral", 1539 (devctl & MUSB_DEVCTL_HM) ? "host" : "peripheral",
1553 musb->int_usb, musb->int_tx, musb->int_rx); 1540 musb->int_usb, musb->int_tx, musb->int_rx);
1554 1541
1555 /* the core can interrupt us for multiple reasons; docs have 1542 /* the core can interrupt us for multiple reasons; docs have
1556 * a generic interrupt flowchart to follow 1543 * a generic interrupt flowchart to follow
1557 */ 1544 */
1558 if (musb->int_usb) 1545 if (musb->int_usb)
1559 retval |= musb_stage0_irq(musb, musb->int_usb, 1546 retval |= musb_stage0_irq(musb, musb->int_usb,
1560 devctl, power); 1547 devctl, power);
1561 1548
1562 /* "stage 1" is handling endpoint irqs */ 1549 /* "stage 1" is handling endpoint irqs */
1563 1550
1564 /* handle endpoint 0 first */ 1551 /* handle endpoint 0 first */
1565 if (musb->int_tx & 1) { 1552 if (musb->int_tx & 1) {
1566 if (devctl & MUSB_DEVCTL_HM) 1553 if (devctl & MUSB_DEVCTL_HM)
1567 retval |= musb_h_ep0_irq(musb); 1554 retval |= musb_h_ep0_irq(musb);
1568 else 1555 else
1569 retval |= musb_g_ep0_irq(musb); 1556 retval |= musb_g_ep0_irq(musb);
1570 } 1557 }
1571 1558
1572 /* RX on endpoints 1-15 */ 1559 /* RX on endpoints 1-15 */
1573 reg = musb->int_rx >> 1; 1560 reg = musb->int_rx >> 1;
1574 ep_num = 1; 1561 ep_num = 1;
1575 while (reg) { 1562 while (reg) {
1576 if (reg & 1) { 1563 if (reg & 1) {
1577 /* musb_ep_select(musb->mregs, ep_num); */ 1564 /* musb_ep_select(musb->mregs, ep_num); */
1578 /* REVISIT just retval = ep->rx_irq(...) */ 1565 /* REVISIT just retval = ep->rx_irq(...) */
1579 retval = IRQ_HANDLED; 1566 retval = IRQ_HANDLED;
1580 if (devctl & MUSB_DEVCTL_HM) { 1567 if (devctl & MUSB_DEVCTL_HM) {
1581 if (is_host_capable()) 1568 if (is_host_capable())
1582 musb_host_rx(musb, ep_num); 1569 musb_host_rx(musb, ep_num);
1583 } else { 1570 } else {
1584 if (is_peripheral_capable()) 1571 if (is_peripheral_capable())
1585 musb_g_rx(musb, ep_num); 1572 musb_g_rx(musb, ep_num);
1586 } 1573 }
1587 } 1574 }
1588 1575
1589 reg >>= 1; 1576 reg >>= 1;
1590 ep_num++; 1577 ep_num++;
1591 } 1578 }
1592 1579
1593 /* TX on endpoints 1-15 */ 1580 /* TX on endpoints 1-15 */
1594 reg = musb->int_tx >> 1; 1581 reg = musb->int_tx >> 1;
1595 ep_num = 1; 1582 ep_num = 1;
1596 while (reg) { 1583 while (reg) {
1597 if (reg & 1) { 1584 if (reg & 1) {
1598 /* musb_ep_select(musb->mregs, ep_num); */ 1585 /* musb_ep_select(musb->mregs, ep_num); */
1599 /* REVISIT just retval |= ep->tx_irq(...) */ 1586 /* REVISIT just retval |= ep->tx_irq(...) */
1600 retval = IRQ_HANDLED; 1587 retval = IRQ_HANDLED;
1601 if (devctl & MUSB_DEVCTL_HM) { 1588 if (devctl & MUSB_DEVCTL_HM) {
1602 if (is_host_capable()) 1589 if (is_host_capable())
1603 musb_host_tx(musb, ep_num); 1590 musb_host_tx(musb, ep_num);
1604 } else { 1591 } else {
1605 if (is_peripheral_capable()) 1592 if (is_peripheral_capable())
1606 musb_g_tx(musb, ep_num); 1593 musb_g_tx(musb, ep_num);
1607 } 1594 }
1608 } 1595 }
1609 reg >>= 1; 1596 reg >>= 1;
1610 ep_num++; 1597 ep_num++;
1611 } 1598 }
1612 1599
1613 return retval; 1600 return retval;
1614 } 1601 }
1615 EXPORT_SYMBOL_GPL(musb_interrupt); 1602 EXPORT_SYMBOL_GPL(musb_interrupt);
1616 1603
1617 #ifndef CONFIG_MUSB_PIO_ONLY 1604 #ifndef CONFIG_MUSB_PIO_ONLY
1618 static bool __devinitdata use_dma = 1; 1605 static bool __devinitdata use_dma = 1;
1619 1606
1620 /* "modprobe ... use_dma=0" etc */ 1607 /* "modprobe ... use_dma=0" etc */
1621 module_param(use_dma, bool, 0); 1608 module_param(use_dma, bool, 0);
1622 MODULE_PARM_DESC(use_dma, "enable/disable use of DMA"); 1609 MODULE_PARM_DESC(use_dma, "enable/disable use of DMA");
1623 1610
1624 void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit) 1611 void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit)
1625 { 1612 {
1626 u8 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); 1613 u8 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
1627 1614
1628 /* called with controller lock already held */ 1615 /* called with controller lock already held */
1629 1616
1630 if (!epnum) { 1617 if (!epnum) {
1631 #ifndef CONFIG_USB_TUSB_OMAP_DMA 1618 #ifndef CONFIG_USB_TUSB_OMAP_DMA
1632 if (!is_cppi_enabled()) { 1619 if (!is_cppi_enabled()) {
1633 /* endpoint 0 */ 1620 /* endpoint 0 */
1634 if (devctl & MUSB_DEVCTL_HM) 1621 if (devctl & MUSB_DEVCTL_HM)
1635 musb_h_ep0_irq(musb); 1622 musb_h_ep0_irq(musb);
1636 else 1623 else
1637 musb_g_ep0_irq(musb); 1624 musb_g_ep0_irq(musb);
1638 } 1625 }
1639 #endif 1626 #endif
1640 } else { 1627 } else {
1641 /* endpoints 1..15 */ 1628 /* endpoints 1..15 */
1642 if (transmit) { 1629 if (transmit) {
1643 if (devctl & MUSB_DEVCTL_HM) { 1630 if (devctl & MUSB_DEVCTL_HM) {
1644 if (is_host_capable()) 1631 if (is_host_capable())
1645 musb_host_tx(musb, epnum); 1632 musb_host_tx(musb, epnum);
1646 } else { 1633 } else {
1647 if (is_peripheral_capable()) 1634 if (is_peripheral_capable())
1648 musb_g_tx(musb, epnum); 1635 musb_g_tx(musb, epnum);
1649 } 1636 }
1650 } else { 1637 } else {
1651 /* receive */ 1638 /* receive */
1652 if (devctl & MUSB_DEVCTL_HM) { 1639 if (devctl & MUSB_DEVCTL_HM) {
1653 if (is_host_capable()) 1640 if (is_host_capable())
1654 musb_host_rx(musb, epnum); 1641 musb_host_rx(musb, epnum);
1655 } else { 1642 } else {
1656 if (is_peripheral_capable()) 1643 if (is_peripheral_capable())
1657 musb_g_rx(musb, epnum); 1644 musb_g_rx(musb, epnum);
1658 } 1645 }
1659 } 1646 }
1660 } 1647 }
1661 } 1648 }
1662 EXPORT_SYMBOL_GPL(musb_dma_completion); 1649 EXPORT_SYMBOL_GPL(musb_dma_completion);
1663 1650
1664 #else 1651 #else
1665 #define use_dma 0 1652 #define use_dma 0
1666 #endif 1653 #endif
1667 1654
1668 /*-------------------------------------------------------------------------*/ 1655 /*-------------------------------------------------------------------------*/
1669 1656
1670 #ifdef CONFIG_SYSFS 1657 #ifdef CONFIG_SYSFS
1671 1658
1672 static ssize_t 1659 static ssize_t
1673 musb_mode_show(struct device *dev, struct device_attribute *attr, char *buf) 1660 musb_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
1674 { 1661 {
1675 struct musb *musb = dev_to_musb(dev); 1662 struct musb *musb = dev_to_musb(dev);
1676 unsigned long flags; 1663 unsigned long flags;
1677 int ret = -EINVAL; 1664 int ret = -EINVAL;
1678 1665
1679 spin_lock_irqsave(&musb->lock, flags); 1666 spin_lock_irqsave(&musb->lock, flags);
1680 ret = sprintf(buf, "%s\n", otg_state_string(musb->xceiv->state)); 1667 ret = sprintf(buf, "%s\n", otg_state_string(musb->xceiv->state));
1681 spin_unlock_irqrestore(&musb->lock, flags); 1668 spin_unlock_irqrestore(&musb->lock, flags);
1682 1669
1683 return ret; 1670 return ret;
1684 } 1671 }
1685 1672
1686 static ssize_t 1673 static ssize_t
1687 musb_mode_store(struct device *dev, struct device_attribute *attr, 1674 musb_mode_store(struct device *dev, struct device_attribute *attr,
1688 const char *buf, size_t n) 1675 const char *buf, size_t n)
1689 { 1676 {
1690 struct musb *musb = dev_to_musb(dev); 1677 struct musb *musb = dev_to_musb(dev);
1691 unsigned long flags; 1678 unsigned long flags;
1692 int status; 1679 int status;
1693 1680
1694 spin_lock_irqsave(&musb->lock, flags); 1681 spin_lock_irqsave(&musb->lock, flags);
1695 if (sysfs_streq(buf, "host")) 1682 if (sysfs_streq(buf, "host"))
1696 status = musb_platform_set_mode(musb, MUSB_HOST); 1683 status = musb_platform_set_mode(musb, MUSB_HOST);
1697 else if (sysfs_streq(buf, "peripheral")) 1684 else if (sysfs_streq(buf, "peripheral"))
1698 status = musb_platform_set_mode(musb, MUSB_PERIPHERAL); 1685 status = musb_platform_set_mode(musb, MUSB_PERIPHERAL);
1699 else if (sysfs_streq(buf, "otg")) 1686 else if (sysfs_streq(buf, "otg"))
1700 status = musb_platform_set_mode(musb, MUSB_OTG); 1687 status = musb_platform_set_mode(musb, MUSB_OTG);
1701 else 1688 else
1702 status = -EINVAL; 1689 status = -EINVAL;
1703 spin_unlock_irqrestore(&musb->lock, flags); 1690 spin_unlock_irqrestore(&musb->lock, flags);
1704 1691
1705 return (status == 0) ? n : status; 1692 return (status == 0) ? n : status;
1706 } 1693 }
1707 static DEVICE_ATTR(mode, 0644, musb_mode_show, musb_mode_store); 1694 static DEVICE_ATTR(mode, 0644, musb_mode_show, musb_mode_store);
1708 1695
1709 static ssize_t 1696 static ssize_t
1710 musb_vbus_store(struct device *dev, struct device_attribute *attr, 1697 musb_vbus_store(struct device *dev, struct device_attribute *attr,
1711 const char *buf, size_t n) 1698 const char *buf, size_t n)
1712 { 1699 {
1713 struct musb *musb = dev_to_musb(dev); 1700 struct musb *musb = dev_to_musb(dev);
1714 unsigned long flags; 1701 unsigned long flags;
1715 unsigned long val; 1702 unsigned long val;
1716 1703
1717 if (sscanf(buf, "%lu", &val) < 1) { 1704 if (sscanf(buf, "%lu", &val) < 1) {
1718 dev_err(dev, "Invalid VBUS timeout ms value\n"); 1705 dev_err(dev, "Invalid VBUS timeout ms value\n");
1719 return -EINVAL; 1706 return -EINVAL;
1720 } 1707 }
1721 1708
1722 spin_lock_irqsave(&musb->lock, flags); 1709 spin_lock_irqsave(&musb->lock, flags);
1723 /* force T(a_wait_bcon) to be zero/unlimited *OR* valid */ 1710 /* force T(a_wait_bcon) to be zero/unlimited *OR* valid */
1724 musb->a_wait_bcon = val ? max_t(int, val, OTG_TIME_A_WAIT_BCON) : 0 ; 1711 musb->a_wait_bcon = val ? max_t(int, val, OTG_TIME_A_WAIT_BCON) : 0 ;
1725 if (musb->xceiv->state == OTG_STATE_A_WAIT_BCON) 1712 if (musb->xceiv->state == OTG_STATE_A_WAIT_BCON)
1726 musb->is_active = 0; 1713 musb->is_active = 0;
1727 musb_platform_try_idle(musb, jiffies + msecs_to_jiffies(val)); 1714 musb_platform_try_idle(musb, jiffies + msecs_to_jiffies(val));
1728 spin_unlock_irqrestore(&musb->lock, flags); 1715 spin_unlock_irqrestore(&musb->lock, flags);
1729 1716
1730 return n; 1717 return n;
1731 } 1718 }
1732 1719
1733 static ssize_t 1720 static ssize_t
1734 musb_vbus_show(struct device *dev, struct device_attribute *attr, char *buf) 1721 musb_vbus_show(struct device *dev, struct device_attribute *attr, char *buf)
1735 { 1722 {
1736 struct musb *musb = dev_to_musb(dev); 1723 struct musb *musb = dev_to_musb(dev);
1737 unsigned long flags; 1724 unsigned long flags;
1738 unsigned long val; 1725 unsigned long val;
1739 int vbus; 1726 int vbus;
1740 1727
1741 spin_lock_irqsave(&musb->lock, flags); 1728 spin_lock_irqsave(&musb->lock, flags);
1742 val = musb->a_wait_bcon; 1729 val = musb->a_wait_bcon;
1743 /* FIXME get_vbus_status() is normally #defined as false... 1730 /* FIXME get_vbus_status() is normally #defined as false...
1744 * and is effectively TUSB-specific. 1731 * and is effectively TUSB-specific.
1745 */ 1732 */
1746 vbus = musb_platform_get_vbus_status(musb); 1733 vbus = musb_platform_get_vbus_status(musb);
1747 spin_unlock_irqrestore(&musb->lock, flags); 1734 spin_unlock_irqrestore(&musb->lock, flags);
1748 1735
1749 return sprintf(buf, "Vbus %s, timeout %lu msec\n", 1736 return sprintf(buf, "Vbus %s, timeout %lu msec\n",
1750 vbus ? "on" : "off", val); 1737 vbus ? "on" : "off", val);
1751 } 1738 }
1752 static DEVICE_ATTR(vbus, 0644, musb_vbus_show, musb_vbus_store); 1739 static DEVICE_ATTR(vbus, 0644, musb_vbus_show, musb_vbus_store);
1753 1740
1754 /* Gadget drivers can't know that a host is connected so they might want 1741 /* Gadget drivers can't know that a host is connected so they might want
1755 * to start SRP, but users can. This allows userspace to trigger SRP. 1742 * to start SRP, but users can. This allows userspace to trigger SRP.
1756 */ 1743 */
1757 static ssize_t 1744 static ssize_t
1758 musb_srp_store(struct device *dev, struct device_attribute *attr, 1745 musb_srp_store(struct device *dev, struct device_attribute *attr,
1759 const char *buf, size_t n) 1746 const char *buf, size_t n)
1760 { 1747 {
1761 struct musb *musb = dev_to_musb(dev); 1748 struct musb *musb = dev_to_musb(dev);
1762 unsigned short srp; 1749 unsigned short srp;
1763 1750
1764 if (sscanf(buf, "%hu", &srp) != 1 1751 if (sscanf(buf, "%hu", &srp) != 1
1765 || (srp != 1)) { 1752 || (srp != 1)) {
1766 dev_err(dev, "SRP: Value must be 1\n"); 1753 dev_err(dev, "SRP: Value must be 1\n");
1767 return -EINVAL; 1754 return -EINVAL;
1768 } 1755 }
1769 1756
1770 if (srp == 1) 1757 if (srp == 1)
1771 musb_g_wakeup(musb); 1758 musb_g_wakeup(musb);
1772 1759
1773 return n; 1760 return n;
1774 } 1761 }
1775 static DEVICE_ATTR(srp, 0644, NULL, musb_srp_store); 1762 static DEVICE_ATTR(srp, 0644, NULL, musb_srp_store);
1776 1763
1777 static struct attribute *musb_attributes[] = { 1764 static struct attribute *musb_attributes[] = {
1778 &dev_attr_mode.attr, 1765 &dev_attr_mode.attr,
1779 &dev_attr_vbus.attr, 1766 &dev_attr_vbus.attr,
1780 &dev_attr_srp.attr, 1767 &dev_attr_srp.attr,
1781 NULL 1768 NULL
1782 }; 1769 };
1783 1770
1784 static const struct attribute_group musb_attr_group = { 1771 static const struct attribute_group musb_attr_group = {
1785 .attrs = musb_attributes, 1772 .attrs = musb_attributes,
1786 }; 1773 };
1787 1774
1788 #endif /* sysfs */ 1775 #endif /* sysfs */
1789 1776
1790 /* Only used to provide driver mode change events */ 1777 /* Only used to provide driver mode change events */
1791 static void musb_irq_work(struct work_struct *data) 1778 static void musb_irq_work(struct work_struct *data)
1792 { 1779 {
1793 struct musb *musb = container_of(data, struct musb, irq_work); 1780 struct musb *musb = container_of(data, struct musb, irq_work);
1794 static int old_state; 1781 static int old_state;
1795 1782
1796 if (musb->xceiv->state != old_state) { 1783 if (musb->xceiv->state != old_state) {
1797 old_state = musb->xceiv->state; 1784 old_state = musb->xceiv->state;
1798 sysfs_notify(&musb->controller->kobj, NULL, "mode"); 1785 sysfs_notify(&musb->controller->kobj, NULL, "mode");
1799 } 1786 }
1800 } 1787 }
1801 1788
1802 /* -------------------------------------------------------------------------- 1789 /* --------------------------------------------------------------------------
1803 * Init support 1790 * Init support
1804 */ 1791 */
1805 1792
1806 static struct musb *__devinit 1793 static struct musb *__devinit
1807 allocate_instance(struct device *dev, 1794 allocate_instance(struct device *dev,
1808 struct musb_hdrc_config *config, void __iomem *mbase) 1795 struct musb_hdrc_config *config, void __iomem *mbase)
1809 { 1796 {
1810 struct musb *musb; 1797 struct musb *musb;
1811 struct musb_hw_ep *ep; 1798 struct musb_hw_ep *ep;
1812 int epnum; 1799 int epnum;
1813 struct usb_hcd *hcd; 1800 struct usb_hcd *hcd;
1814 1801
1815 hcd = usb_create_hcd(&musb_hc_driver, dev, dev_name(dev)); 1802 hcd = usb_create_hcd(&musb_hc_driver, dev, dev_name(dev));
1816 if (!hcd) 1803 if (!hcd)
1817 return NULL; 1804 return NULL;
1818 /* usbcore sets dev->driver_data to hcd, and sometimes uses that... */ 1805 /* usbcore sets dev->driver_data to hcd, and sometimes uses that... */
1819 1806
1820 musb = hcd_to_musb(hcd); 1807 musb = hcd_to_musb(hcd);
1821 INIT_LIST_HEAD(&musb->control); 1808 INIT_LIST_HEAD(&musb->control);
1822 INIT_LIST_HEAD(&musb->in_bulk); 1809 INIT_LIST_HEAD(&musb->in_bulk);
1823 INIT_LIST_HEAD(&musb->out_bulk); 1810 INIT_LIST_HEAD(&musb->out_bulk);
1824 1811
1825 hcd->uses_new_polling = 1; 1812 hcd->uses_new_polling = 1;
1826 hcd->has_tt = 1; 1813 hcd->has_tt = 1;
1827 1814
1828 musb->vbuserr_retry = VBUSERR_RETRY_COUNT; 1815 musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
1829 musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON; 1816 musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON;
1830 dev_set_drvdata(dev, musb); 1817 dev_set_drvdata(dev, musb);
1831 musb->mregs = mbase; 1818 musb->mregs = mbase;
1832 musb->ctrl_base = mbase; 1819 musb->ctrl_base = mbase;
1833 musb->nIrq = -ENODEV; 1820 musb->nIrq = -ENODEV;
1834 musb->config = config; 1821 musb->config = config;
1835 BUG_ON(musb->config->num_eps > MUSB_C_NUM_EPS); 1822 BUG_ON(musb->config->num_eps > MUSB_C_NUM_EPS);
1836 for (epnum = 0, ep = musb->endpoints; 1823 for (epnum = 0, ep = musb->endpoints;
1837 epnum < musb->config->num_eps; 1824 epnum < musb->config->num_eps;
1838 epnum++, ep++) { 1825 epnum++, ep++) {
1839 ep->musb = musb; 1826 ep->musb = musb;
1840 ep->epnum = epnum; 1827 ep->epnum = epnum;
1841 } 1828 }
1842 1829
1843 musb->controller = dev; 1830 musb->controller = dev;
1844 1831
1845 return musb; 1832 return musb;
1846 } 1833 }
1847 1834
1848 static void musb_free(struct musb *musb) 1835 static void musb_free(struct musb *musb)
1849 { 1836 {
1850 /* this has multiple entry modes. it handles fault cleanup after 1837 /* this has multiple entry modes. it handles fault cleanup after
1851 * probe(), where things may be partially set up, as well as rmmod 1838 * probe(), where things may be partially set up, as well as rmmod
1852 * cleanup after everything's been de-activated. 1839 * cleanup after everything's been de-activated.
1853 */ 1840 */
1854 1841
1855 #ifdef CONFIG_SYSFS 1842 #ifdef CONFIG_SYSFS
1856 sysfs_remove_group(&musb->controller->kobj, &musb_attr_group); 1843 sysfs_remove_group(&musb->controller->kobj, &musb_attr_group);
1857 #endif 1844 #endif
1858 1845
1859 if (musb->nIrq >= 0) { 1846 if (musb->nIrq >= 0) {
1860 if (musb->irq_wake) 1847 if (musb->irq_wake)
1861 disable_irq_wake(musb->nIrq); 1848 disable_irq_wake(musb->nIrq);
1862 free_irq(musb->nIrq, musb); 1849 free_irq(musb->nIrq, musb);
1863 } 1850 }
1864 if (is_dma_capable() && musb->dma_controller) { 1851 if (is_dma_capable() && musb->dma_controller) {
1865 struct dma_controller *c = musb->dma_controller; 1852 struct dma_controller *c = musb->dma_controller;
1866 1853
1867 (void) c->stop(c); 1854 (void) c->stop(c);
1868 dma_controller_destroy(c); 1855 dma_controller_destroy(c);
1869 } 1856 }
1870 1857
1871 usb_put_hcd(musb_to_hcd(musb)); 1858 usb_put_hcd(musb_to_hcd(musb));
1872 } 1859 }
1873 1860
1874 /* 1861 /*
1875 * Perform generic per-controller initialization. 1862 * Perform generic per-controller initialization.
1876 * 1863 *
1877 * @pDevice: the controller (already clocked, etc) 1864 * @pDevice: the controller (already clocked, etc)
1878 * @nIrq: irq 1865 * @nIrq: irq
1879 * @mregs: virtual address of controller registers, 1866 * @mregs: virtual address of controller registers,
1880 * not yet corrected for platform-specific offsets 1867 * not yet corrected for platform-specific offsets
1881 */ 1868 */
1882 static int __devinit 1869 static int __devinit
1883 musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) 1870 musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
1884 { 1871 {
1885 int status; 1872 int status;
1886 struct musb *musb; 1873 struct musb *musb;
1887 struct musb_hdrc_platform_data *plat = dev->platform_data; 1874 struct musb_hdrc_platform_data *plat = dev->platform_data;
1875 struct usb_hcd *hcd;
1888 1876
1889 /* The driver might handle more features than the board; OK. 1877 /* The driver might handle more features than the board; OK.
1890 * Fail when the board needs a feature that's not enabled. 1878 * Fail when the board needs a feature that's not enabled.
1891 */ 1879 */
1892 if (!plat) { 1880 if (!plat) {
1893 dev_dbg(dev, "no platform_data?\n"); 1881 dev_dbg(dev, "no platform_data?\n");
1894 status = -ENODEV; 1882 status = -ENODEV;
1895 goto fail0; 1883 goto fail0;
1896 } 1884 }
1897 1885
1898 /* allocate */ 1886 /* allocate */
1899 musb = allocate_instance(dev, plat->config, ctrl); 1887 musb = allocate_instance(dev, plat->config, ctrl);
1900 if (!musb) { 1888 if (!musb) {
1901 status = -ENOMEM; 1889 status = -ENOMEM;
1902 goto fail0; 1890 goto fail0;
1903 } 1891 }
1904 1892
1905 pm_runtime_use_autosuspend(musb->controller); 1893 pm_runtime_use_autosuspend(musb->controller);
1906 pm_runtime_set_autosuspend_delay(musb->controller, 200); 1894 pm_runtime_set_autosuspend_delay(musb->controller, 200);
1907 pm_runtime_enable(musb->controller); 1895 pm_runtime_enable(musb->controller);
1908 1896
1909 spin_lock_init(&musb->lock); 1897 spin_lock_init(&musb->lock);
1910 musb->board_mode = plat->mode;
1911 musb->board_set_power = plat->set_power; 1898 musb->board_set_power = plat->set_power;
1912 musb->min_power = plat->min_power; 1899 musb->min_power = plat->min_power;
1913 musb->ops = plat->platform_ops; 1900 musb->ops = plat->platform_ops;
1914 1901
1915 /* The musb_platform_init() call: 1902 /* The musb_platform_init() call:
1916 * - adjusts musb->mregs and musb->isr if needed, 1903 * - adjusts musb->mregs and musb->isr if needed,
1917 * - may initialize an integrated tranceiver 1904 * - may initialize an integrated tranceiver
1918 * - initializes musb->xceiv, usually by otg_get_phy() 1905 * - initializes musb->xceiv, usually by otg_get_phy()
1919 * - stops powering VBUS 1906 * - stops powering VBUS
1920 * 1907 *
1921 * There are various transceiver configurations. Blackfin, 1908 * There are various transceiver configurations. Blackfin,
1922 * DaVinci, TUSB60x0, and others integrate them. OMAP3 uses 1909 * DaVinci, TUSB60x0, and others integrate them. OMAP3 uses
1923 * external/discrete ones in various flavors (twl4030 family, 1910 * external/discrete ones in various flavors (twl4030 family,
1924 * isp1504, non-OTG, etc) mostly hooking up through ULPI. 1911 * isp1504, non-OTG, etc) mostly hooking up through ULPI.
1925 */ 1912 */
1926 musb->isr = generic_interrupt; 1913 musb->isr = generic_interrupt;
1927 status = musb_platform_init(musb); 1914 status = musb_platform_init(musb);
1928 if (status < 0) 1915 if (status < 0)
1929 goto fail1; 1916 goto fail1;
1930 1917
1931 if (!musb->isr) { 1918 if (!musb->isr) {
1932 status = -ENODEV; 1919 status = -ENODEV;
1933 goto fail2; 1920 goto fail2;
1934 } 1921 }
1935 1922
1936 if (!musb->xceiv->io_ops) { 1923 if (!musb->xceiv->io_ops) {
1937 musb->xceiv->io_dev = musb->controller; 1924 musb->xceiv->io_dev = musb->controller;
1938 musb->xceiv->io_priv = musb->mregs; 1925 musb->xceiv->io_priv = musb->mregs;
1939 musb->xceiv->io_ops = &musb_ulpi_access; 1926 musb->xceiv->io_ops = &musb_ulpi_access;
1940 } 1927 }
1941 1928
1942 pm_runtime_get_sync(musb->controller); 1929 pm_runtime_get_sync(musb->controller);
1943 1930
1944 #ifndef CONFIG_MUSB_PIO_ONLY 1931 #ifndef CONFIG_MUSB_PIO_ONLY
1945 if (use_dma && dev->dma_mask) { 1932 if (use_dma && dev->dma_mask) {
1946 struct dma_controller *c; 1933 struct dma_controller *c;
1947 1934
1948 c = dma_controller_create(musb, musb->mregs); 1935 c = dma_controller_create(musb, musb->mregs);
1949 musb->dma_controller = c; 1936 musb->dma_controller = c;
1950 if (c) 1937 if (c)
1951 (void) c->start(c); 1938 (void) c->start(c);
1952 } 1939 }
1953 #endif 1940 #endif
1954 /* ideally this would be abstracted in platform setup */ 1941 /* ideally this would be abstracted in platform setup */
1955 if (!is_dma_capable() || !musb->dma_controller) 1942 if (!is_dma_capable() || !musb->dma_controller)
1956 dev->dma_mask = NULL; 1943 dev->dma_mask = NULL;
1957 1944
1958 /* be sure interrupts are disabled before connecting ISR */ 1945 /* be sure interrupts are disabled before connecting ISR */
1959 musb_platform_disable(musb); 1946 musb_platform_disable(musb);
1960 musb_generic_disable(musb); 1947 musb_generic_disable(musb);
1961 1948
1962 /* setup musb parts of the core (especially endpoints) */ 1949 /* setup musb parts of the core (especially endpoints) */
1963 status = musb_core_init(plat->config->multipoint 1950 status = musb_core_init(plat->config->multipoint
1964 ? MUSB_CONTROLLER_MHDRC 1951 ? MUSB_CONTROLLER_MHDRC
1965 : MUSB_CONTROLLER_HDRC, musb); 1952 : MUSB_CONTROLLER_HDRC, musb);
1966 if (status < 0) 1953 if (status < 0)
1967 goto fail3; 1954 goto fail3;
1968 1955
1969 setup_timer(&musb->otg_timer, musb_otg_timer_func, (unsigned long) musb); 1956 setup_timer(&musb->otg_timer, musb_otg_timer_func, (unsigned long) musb);
1970 1957
1971 /* Init IRQ workqueue before request_irq */ 1958 /* Init IRQ workqueue before request_irq */
1972 INIT_WORK(&musb->irq_work, musb_irq_work); 1959 INIT_WORK(&musb->irq_work, musb_irq_work);
1973 1960
1974 /* attach to the IRQ */ 1961 /* attach to the IRQ */
1975 if (request_irq(nIrq, musb->isr, 0, dev_name(dev), musb)) { 1962 if (request_irq(nIrq, musb->isr, 0, dev_name(dev), musb)) {
1976 dev_err(dev, "request_irq %d failed!\n", nIrq); 1963 dev_err(dev, "request_irq %d failed!\n", nIrq);
1977 status = -ENODEV; 1964 status = -ENODEV;
1978 goto fail3; 1965 goto fail3;
1979 } 1966 }
1980 musb->nIrq = nIrq; 1967 musb->nIrq = nIrq;
1981 /* FIXME this handles wakeup irqs wrong */ 1968 /* FIXME this handles wakeup irqs wrong */
1982 if (enable_irq_wake(nIrq) == 0) { 1969 if (enable_irq_wake(nIrq) == 0) {
1983 musb->irq_wake = 1; 1970 musb->irq_wake = 1;
1984 device_init_wakeup(dev, 1); 1971 device_init_wakeup(dev, 1);
1985 } else { 1972 } else {
1986 musb->irq_wake = 0; 1973 musb->irq_wake = 0;
1987 } 1974 }
1988 1975
1989 /* host side needs more setup */ 1976 /* host side needs more setup */
1990 if (is_host_enabled(musb)) { 1977 hcd = musb_to_hcd(musb);
1991 struct usb_hcd *hcd = musb_to_hcd(musb); 1978 otg_set_host(musb->xceiv->otg, &hcd->self);
1979 hcd->self.otg_port = 1;
1980 musb->xceiv->otg->host = &hcd->self;
1981 hcd->power_budget = 2 * (plat->power ? : 250);
1992 1982
1993 otg_set_host(musb->xceiv->otg, &hcd->self); 1983 /* program PHY to use external vBus if required */
1994 1984 if (plat->extvbus) {
1995 if (is_otg_enabled(musb)) 1985 u8 busctl = musb_read_ulpi_buscontrol(musb->mregs);
1996 hcd->self.otg_port = 1; 1986 busctl |= MUSB_ULPI_USE_EXTVBUS;
1997 musb->xceiv->otg->host = &hcd->self; 1987 musb_write_ulpi_buscontrol(musb->mregs, busctl);
1998 hcd->power_budget = 2 * (plat->power ? : 250);
1999
2000 /* program PHY to use external vBus if required */
2001 if (plat->extvbus) {
2002 u8 busctl = musb_read_ulpi_buscontrol(musb->mregs);
2003 busctl |= MUSB_ULPI_USE_EXTVBUS;
2004 musb_write_ulpi_buscontrol(musb->mregs, busctl);
2005 }
2006 } 1988 }
2007 1989
2008 /* For the host-only role, we can activate right away. 1990 MUSB_DEV_MODE(musb);
2009 * (We expect the ID pin to be forcibly grounded!!) 1991 musb->xceiv->otg->default_a = 0;
2010 * Otherwise, wait till the gadget driver hooks up. 1992 musb->xceiv->state = OTG_STATE_B_IDLE;
2011 */
2012 if (!is_otg_enabled(musb) && is_host_enabled(musb)) {
2013 struct usb_hcd *hcd = musb_to_hcd(musb);
2014 1993
2015 MUSB_HST_MODE(musb); 1994 status = musb_gadget_setup(musb);
2016 musb->xceiv->otg->default_a = 1;
2017 musb->xceiv->state = OTG_STATE_A_IDLE;
2018 1995
2019 status = usb_add_hcd(musb_to_hcd(musb), 0, 0);
2020
2021 hcd->self.uses_pio_for_control = 1;
2022 dev_dbg(musb->controller, "%s mode, status %d, devctl %02x %c\n",
2023 "HOST", status,
2024 musb_readb(musb->mregs, MUSB_DEVCTL),
2025 (musb_readb(musb->mregs, MUSB_DEVCTL)
2026 & MUSB_DEVCTL_BDEVICE
2027 ? 'B' : 'A'));
2028
2029 } else /* peripheral is enabled */ {
2030 MUSB_DEV_MODE(musb);
2031 musb->xceiv->otg->default_a = 0;
2032 musb->xceiv->state = OTG_STATE_B_IDLE;
2033
2034 status = musb_gadget_setup(musb);
2035
2036 dev_dbg(musb->controller, "%s mode, status %d, dev%02x\n",
2037 is_otg_enabled(musb) ? "OTG" : "PERIPHERAL",
2038 status,
2039 musb_readb(musb->mregs, MUSB_DEVCTL));
2040
2041 }
2042 if (status < 0) 1996 if (status < 0)
2043 goto fail3; 1997 goto fail3;
2044 1998
2045 status = musb_init_debugfs(musb); 1999 status = musb_init_debugfs(musb);
2046 if (status < 0) 2000 if (status < 0)
2047 goto fail4; 2001 goto fail4;
2048 2002
2049 #ifdef CONFIG_SYSFS 2003 #ifdef CONFIG_SYSFS
2050 status = sysfs_create_group(&musb->controller->kobj, &musb_attr_group); 2004 status = sysfs_create_group(&musb->controller->kobj, &musb_attr_group);
2051 if (status) 2005 if (status)
2052 goto fail5; 2006 goto fail5;
2053 #endif 2007 #endif
2054 2008
2055 pm_runtime_put(musb->controller); 2009 pm_runtime_put(musb->controller);
2056 2010
2057 dev_info(dev, "USB %s mode controller at %p using %s, IRQ %d\n",
2058 ({char *s;
2059 switch (musb->board_mode) {
2060 case MUSB_HOST: s = "Host"; break;
2061 case MUSB_PERIPHERAL: s = "Peripheral"; break;
2062 default: s = "OTG"; break;
2063 }; s; }),
2064 ctrl,
2065 (is_dma_capable() && musb->dma_controller)
2066 ? "DMA" : "PIO",
2067 musb->nIrq);
2068
2069 return 0; 2011 return 0;
2070 2012
2071 fail5: 2013 fail5:
2072 musb_exit_debugfs(musb); 2014 musb_exit_debugfs(musb);
2073 2015
2074 fail4: 2016 fail4:
2075 if (!is_otg_enabled(musb) && is_host_enabled(musb)) 2017 musb_gadget_cleanup(musb);
2076 usb_remove_hcd(musb_to_hcd(musb));
2077 else
2078 musb_gadget_cleanup(musb);
2079 2018
2080 fail3: 2019 fail3:
2081 pm_runtime_put_sync(musb->controller); 2020 pm_runtime_put_sync(musb->controller);
2082 2021
2083 fail2: 2022 fail2:
2084 if (musb->irq_wake) 2023 if (musb->irq_wake)
2085 device_init_wakeup(dev, 0); 2024 device_init_wakeup(dev, 0);
2086 musb_platform_exit(musb); 2025 musb_platform_exit(musb);
2087 2026
2088 fail1: 2027 fail1:
2089 dev_err(musb->controller, 2028 dev_err(musb->controller,
2090 "musb_init_controller failed with status %d\n", status); 2029 "musb_init_controller failed with status %d\n", status);
2091 2030
2092 musb_free(musb); 2031 musb_free(musb);
2093 2032
2094 fail0: 2033 fail0:
2095 2034
2096 return status; 2035 return status;
2097 2036
2098 } 2037 }
2099 2038
2100 /*-------------------------------------------------------------------------*/ 2039 /*-------------------------------------------------------------------------*/
2101 2040
2102 /* all implementations (PCI bridge to FPGA, VLYNQ, etc) should just 2041 /* all implementations (PCI bridge to FPGA, VLYNQ, etc) should just
2103 * bridge to a platform device; this driver then suffices. 2042 * bridge to a platform device; this driver then suffices.
2104 */ 2043 */
2105 2044
2106 #ifndef CONFIG_MUSB_PIO_ONLY 2045 #ifndef CONFIG_MUSB_PIO_ONLY
2107 static u64 *orig_dma_mask; 2046 static u64 *orig_dma_mask;
2108 #endif 2047 #endif
2109 2048
2110 static int __devinit musb_probe(struct platform_device *pdev) 2049 static int __devinit musb_probe(struct platform_device *pdev)
2111 { 2050 {
2112 struct device *dev = &pdev->dev; 2051 struct device *dev = &pdev->dev;
2113 int irq = platform_get_irq_byname(pdev, "mc"); 2052 int irq = platform_get_irq_byname(pdev, "mc");
2114 int status; 2053 int status;
2115 struct resource *iomem; 2054 struct resource *iomem;
2116 void __iomem *base; 2055 void __iomem *base;
2117 2056
2118 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2057 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2119 if (!iomem || irq <= 0) 2058 if (!iomem || irq <= 0)
2120 return -ENODEV; 2059 return -ENODEV;
2121 2060
2122 base = ioremap(iomem->start, resource_size(iomem)); 2061 base = ioremap(iomem->start, resource_size(iomem));
2123 if (!base) { 2062 if (!base) {
2124 dev_err(dev, "ioremap failed\n"); 2063 dev_err(dev, "ioremap failed\n");
2125 return -ENOMEM; 2064 return -ENOMEM;
2126 } 2065 }
2127 2066
2128 #ifndef CONFIG_MUSB_PIO_ONLY 2067 #ifndef CONFIG_MUSB_PIO_ONLY
2129 /* clobbered by use_dma=n */ 2068 /* clobbered by use_dma=n */
2130 orig_dma_mask = dev->dma_mask; 2069 orig_dma_mask = dev->dma_mask;
2131 #endif 2070 #endif
2132 status = musb_init_controller(dev, irq, base); 2071 status = musb_init_controller(dev, irq, base);
2133 if (status < 0) 2072 if (status < 0)
2134 iounmap(base); 2073 iounmap(base);
2135 2074
2136 return status; 2075 return status;
2137 } 2076 }
2138 2077
2139 static int __devexit musb_remove(struct platform_device *pdev) 2078 static int __devexit musb_remove(struct platform_device *pdev)
2140 { 2079 {
2141 struct musb *musb = dev_to_musb(&pdev->dev); 2080 struct musb *musb = dev_to_musb(&pdev->dev);
2142 void __iomem *ctrl_base = musb->ctrl_base; 2081 void __iomem *ctrl_base = musb->ctrl_base;
2143 2082
2144 /* this gets called on rmmod. 2083 /* this gets called on rmmod.
2145 * - Host mode: host may still be active 2084 * - Host mode: host may still be active
2146 * - Peripheral mode: peripheral is deactivated (or never-activated) 2085 * - Peripheral mode: peripheral is deactivated (or never-activated)
2147 * - OTG mode: both roles are deactivated (or never-activated) 2086 * - OTG mode: both roles are deactivated (or never-activated)
2148 */ 2087 */
2149 musb_exit_debugfs(musb); 2088 musb_exit_debugfs(musb);
2150 musb_shutdown(pdev); 2089 musb_shutdown(pdev);
2151 2090
2152 musb_free(musb); 2091 musb_free(musb);
2153 iounmap(ctrl_base); 2092 iounmap(ctrl_base);
2154 device_init_wakeup(&pdev->dev, 0); 2093 device_init_wakeup(&pdev->dev, 0);
2155 #ifndef CONFIG_MUSB_PIO_ONLY 2094 #ifndef CONFIG_MUSB_PIO_ONLY
2156 pdev->dev.dma_mask = orig_dma_mask; 2095 pdev->dev.dma_mask = orig_dma_mask;
2157 #endif 2096 #endif
2158 return 0; 2097 return 0;
2159 } 2098 }
2160 2099
2161 #ifdef CONFIG_PM 2100 #ifdef CONFIG_PM
2162 2101
2163 static void musb_save_context(struct musb *musb) 2102 static void musb_save_context(struct musb *musb)
2164 { 2103 {
2165 int i; 2104 int i;
2166 void __iomem *musb_base = musb->mregs; 2105 void __iomem *musb_base = musb->mregs;
2167 void __iomem *epio; 2106 void __iomem *epio;
2168 2107
2169 if (is_host_enabled(musb)) { 2108 musb->context.frame = musb_readw(musb_base, MUSB_FRAME);
2170 musb->context.frame = musb_readw(musb_base, MUSB_FRAME); 2109 musb->context.testmode = musb_readb(musb_base, MUSB_TESTMODE);
2171 musb->context.testmode = musb_readb(musb_base, MUSB_TESTMODE); 2110 musb->context.busctl = musb_read_ulpi_buscontrol(musb->mregs);
2172 musb->context.busctl = musb_read_ulpi_buscontrol(musb->mregs);
2173 }
2174 musb->context.power = musb_readb(musb_base, MUSB_POWER); 2111 musb->context.power = musb_readb(musb_base, MUSB_POWER);
2175 musb->context.intrtxe = musb_readw(musb_base, MUSB_INTRTXE); 2112 musb->context.intrtxe = musb_readw(musb_base, MUSB_INTRTXE);
2176 musb->context.intrrxe = musb_readw(musb_base, MUSB_INTRRXE); 2113 musb->context.intrrxe = musb_readw(musb_base, MUSB_INTRRXE);
2177 musb->context.intrusbe = musb_readb(musb_base, MUSB_INTRUSBE); 2114 musb->context.intrusbe = musb_readb(musb_base, MUSB_INTRUSBE);
2178 musb->context.index = musb_readb(musb_base, MUSB_INDEX); 2115 musb->context.index = musb_readb(musb_base, MUSB_INDEX);
2179 musb->context.devctl = musb_readb(musb_base, MUSB_DEVCTL); 2116 musb->context.devctl = musb_readb(musb_base, MUSB_DEVCTL);
2180 2117
2181 for (i = 0; i < musb->config->num_eps; ++i) { 2118 for (i = 0; i < musb->config->num_eps; ++i) {
2182 struct musb_hw_ep *hw_ep; 2119 struct musb_hw_ep *hw_ep;
2183 2120
2184 hw_ep = &musb->endpoints[i]; 2121 hw_ep = &musb->endpoints[i];
2185 if (!hw_ep) 2122 if (!hw_ep)
2186 continue; 2123 continue;
2187 2124
2188 epio = hw_ep->regs; 2125 epio = hw_ep->regs;
2189 if (!epio) 2126 if (!epio)
2190 continue; 2127 continue;
2191 2128
2192 musb_writeb(musb_base, MUSB_INDEX, i); 2129 musb_writeb(musb_base, MUSB_INDEX, i);
2193 musb->context.index_regs[i].txmaxp = 2130 musb->context.index_regs[i].txmaxp =
2194 musb_readw(epio, MUSB_TXMAXP); 2131 musb_readw(epio, MUSB_TXMAXP);
2195 musb->context.index_regs[i].txcsr = 2132 musb->context.index_regs[i].txcsr =
2196 musb_readw(epio, MUSB_TXCSR); 2133 musb_readw(epio, MUSB_TXCSR);
2197 musb->context.index_regs[i].rxmaxp = 2134 musb->context.index_regs[i].rxmaxp =
2198 musb_readw(epio, MUSB_RXMAXP); 2135 musb_readw(epio, MUSB_RXMAXP);
2199 musb->context.index_regs[i].rxcsr = 2136 musb->context.index_regs[i].rxcsr =
2200 musb_readw(epio, MUSB_RXCSR); 2137 musb_readw(epio, MUSB_RXCSR);
2201 2138
2202 if (musb->dyn_fifo) { 2139 if (musb->dyn_fifo) {
2203 musb->context.index_regs[i].txfifoadd = 2140 musb->context.index_regs[i].txfifoadd =
2204 musb_read_txfifoadd(musb_base); 2141 musb_read_txfifoadd(musb_base);
2205 musb->context.index_regs[i].rxfifoadd = 2142 musb->context.index_regs[i].rxfifoadd =
2206 musb_read_rxfifoadd(musb_base); 2143 musb_read_rxfifoadd(musb_base);
2207 musb->context.index_regs[i].txfifosz = 2144 musb->context.index_regs[i].txfifosz =
2208 musb_read_txfifosz(musb_base); 2145 musb_read_txfifosz(musb_base);
2209 musb->context.index_regs[i].rxfifosz = 2146 musb->context.index_regs[i].rxfifosz =
2210 musb_read_rxfifosz(musb_base); 2147 musb_read_rxfifosz(musb_base);
2211 } 2148 }
2212 if (is_host_enabled(musb)) {
2213 musb->context.index_regs[i].txtype =
2214 musb_readb(epio, MUSB_TXTYPE);
2215 musb->context.index_regs[i].txinterval =
2216 musb_readb(epio, MUSB_TXINTERVAL);
2217 musb->context.index_regs[i].rxtype =
2218 musb_readb(epio, MUSB_RXTYPE);
2219 musb->context.index_regs[i].rxinterval =
2220 musb_readb(epio, MUSB_RXINTERVAL);
2221 2149
2222 musb->context.index_regs[i].txfunaddr = 2150 musb->context.index_regs[i].txtype =
2223 musb_read_txfunaddr(musb_base, i); 2151 musb_readb(epio, MUSB_TXTYPE);
2224 musb->context.index_regs[i].txhubaddr = 2152 musb->context.index_regs[i].txinterval =
2225 musb_read_txhubaddr(musb_base, i); 2153 musb_readb(epio, MUSB_TXINTERVAL);
2226 musb->context.index_regs[i].txhubport = 2154 musb->context.index_regs[i].rxtype =
2227 musb_read_txhubport(musb_base, i); 2155 musb_readb(epio, MUSB_RXTYPE);
2156 musb->context.index_regs[i].rxinterval =
2157 musb_readb(epio, MUSB_RXINTERVAL);
2228 2158
2229 musb->context.index_regs[i].rxfunaddr = 2159 musb->context.index_regs[i].txfunaddr =
2230 musb_read_rxfunaddr(musb_base, i); 2160 musb_read_txfunaddr(musb_base, i);
2231 musb->context.index_regs[i].rxhubaddr = 2161 musb->context.index_regs[i].txhubaddr =
2232 musb_read_rxhubaddr(musb_base, i); 2162 musb_read_txhubaddr(musb_base, i);
2233 musb->context.index_regs[i].rxhubport = 2163 musb->context.index_regs[i].txhubport =
2234 musb_read_rxhubport(musb_base, i); 2164 musb_read_txhubport(musb_base, i);
2235 } 2165
2166 musb->context.index_regs[i].rxfunaddr =
2167 musb_read_rxfunaddr(musb_base, i);
2168 musb->context.index_regs[i].rxhubaddr =
2169 musb_read_rxhubaddr(musb_base, i);
2170 musb->context.index_regs[i].rxhubport =
2171 musb_read_rxhubport(musb_base, i);
2236 } 2172 }
2237 } 2173 }
2238 2174
2239 static void musb_restore_context(struct musb *musb) 2175 static void musb_restore_context(struct musb *musb)
2240 { 2176 {
2241 int i; 2177 int i;
2242 void __iomem *musb_base = musb->mregs; 2178 void __iomem *musb_base = musb->mregs;
2243 void __iomem *ep_target_regs; 2179 void __iomem *ep_target_regs;
2244 void __iomem *epio; 2180 void __iomem *epio;
2245 2181
2246 if (is_host_enabled(musb)) { 2182 musb_writew(musb_base, MUSB_FRAME, musb->context.frame);
2247 musb_writew(musb_base, MUSB_FRAME, musb->context.frame); 2183 musb_writeb(musb_base, MUSB_TESTMODE, musb->context.testmode);
2248 musb_writeb(musb_base, MUSB_TESTMODE, musb->context.testmode); 2184 musb_write_ulpi_buscontrol(musb->mregs, musb->context.busctl);
2249 musb_write_ulpi_buscontrol(musb->mregs, musb->context.busctl);
2250 }
2251 musb_writeb(musb_base, MUSB_POWER, musb->context.power); 2185 musb_writeb(musb_base, MUSB_POWER, musb->context.power);
2252 musb_writew(musb_base, MUSB_INTRTXE, musb->context.intrtxe); 2186 musb_writew(musb_base, MUSB_INTRTXE, musb->context.intrtxe);
2253 musb_writew(musb_base, MUSB_INTRRXE, musb->context.intrrxe); 2187 musb_writew(musb_base, MUSB_INTRRXE, musb->context.intrrxe);
2254 musb_writeb(musb_base, MUSB_INTRUSBE, musb->context.intrusbe); 2188 musb_writeb(musb_base, MUSB_INTRUSBE, musb->context.intrusbe);
2255 musb_writeb(musb_base, MUSB_DEVCTL, musb->context.devctl); 2189 musb_writeb(musb_base, MUSB_DEVCTL, musb->context.devctl);
2256 2190
2257 for (i = 0; i < musb->config->num_eps; ++i) { 2191 for (i = 0; i < musb->config->num_eps; ++i) {
2258 struct musb_hw_ep *hw_ep; 2192 struct musb_hw_ep *hw_ep;
2259 2193
2260 hw_ep = &musb->endpoints[i]; 2194 hw_ep = &musb->endpoints[i];
2261 if (!hw_ep) 2195 if (!hw_ep)
2262 continue; 2196 continue;
2263 2197
2264 epio = hw_ep->regs; 2198 epio = hw_ep->regs;
2265 if (!epio) 2199 if (!epio)
2266 continue; 2200 continue;
2267 2201
2268 musb_writeb(musb_base, MUSB_INDEX, i); 2202 musb_writeb(musb_base, MUSB_INDEX, i);
2269 musb_writew(epio, MUSB_TXMAXP, 2203 musb_writew(epio, MUSB_TXMAXP,
2270 musb->context.index_regs[i].txmaxp); 2204 musb->context.index_regs[i].txmaxp);
2271 musb_writew(epio, MUSB_TXCSR, 2205 musb_writew(epio, MUSB_TXCSR,
2272 musb->context.index_regs[i].txcsr); 2206 musb->context.index_regs[i].txcsr);
2273 musb_writew(epio, MUSB_RXMAXP, 2207 musb_writew(epio, MUSB_RXMAXP,
2274 musb->context.index_regs[i].rxmaxp); 2208 musb->context.index_regs[i].rxmaxp);
2275 musb_writew(epio, MUSB_RXCSR, 2209 musb_writew(epio, MUSB_RXCSR,
2276 musb->context.index_regs[i].rxcsr); 2210 musb->context.index_regs[i].rxcsr);
2277 2211
2278 if (musb->dyn_fifo) { 2212 if (musb->dyn_fifo) {
2279 musb_write_txfifosz(musb_base, 2213 musb_write_txfifosz(musb_base,
2280 musb->context.index_regs[i].txfifosz); 2214 musb->context.index_regs[i].txfifosz);
2281 musb_write_rxfifosz(musb_base, 2215 musb_write_rxfifosz(musb_base,
2282 musb->context.index_regs[i].rxfifosz); 2216 musb->context.index_regs[i].rxfifosz);
2283 musb_write_txfifoadd(musb_base, 2217 musb_write_txfifoadd(musb_base,
2284 musb->context.index_regs[i].txfifoadd); 2218 musb->context.index_regs[i].txfifoadd);
2285 musb_write_rxfifoadd(musb_base, 2219 musb_write_rxfifoadd(musb_base,
2286 musb->context.index_regs[i].rxfifoadd); 2220 musb->context.index_regs[i].rxfifoadd);
2287 } 2221 }
2288 2222
2289 if (is_host_enabled(musb)) { 2223 musb_writeb(epio, MUSB_TXTYPE,
2290 musb_writeb(epio, MUSB_TXTYPE,
2291 musb->context.index_regs[i].txtype); 2224 musb->context.index_regs[i].txtype);
2292 musb_writeb(epio, MUSB_TXINTERVAL, 2225 musb_writeb(epio, MUSB_TXINTERVAL,
2293 musb->context.index_regs[i].txinterval); 2226 musb->context.index_regs[i].txinterval);
2294 musb_writeb(epio, MUSB_RXTYPE, 2227 musb_writeb(epio, MUSB_RXTYPE,
2295 musb->context.index_regs[i].rxtype); 2228 musb->context.index_regs[i].rxtype);
2296 musb_writeb(epio, MUSB_RXINTERVAL, 2229 musb_writeb(epio, MUSB_RXINTERVAL,
2297 2230
2298 musb->context.index_regs[i].rxinterval); 2231 musb->context.index_regs[i].rxinterval);
2299 musb_write_txfunaddr(musb_base, i, 2232 musb_write_txfunaddr(musb_base, i,
2300 musb->context.index_regs[i].txfunaddr); 2233 musb->context.index_regs[i].txfunaddr);
2301 musb_write_txhubaddr(musb_base, i, 2234 musb_write_txhubaddr(musb_base, i,
2302 musb->context.index_regs[i].txhubaddr); 2235 musb->context.index_regs[i].txhubaddr);
2303 musb_write_txhubport(musb_base, i, 2236 musb_write_txhubport(musb_base, i,
2304 musb->context.index_regs[i].txhubport); 2237 musb->context.index_regs[i].txhubport);
2305 2238
2306 ep_target_regs = 2239 ep_target_regs =
2307 musb_read_target_reg_base(i, musb_base); 2240 musb_read_target_reg_base(i, musb_base);
2308 2241
2309 musb_write_rxfunaddr(ep_target_regs, 2242 musb_write_rxfunaddr(ep_target_regs,
2310 musb->context.index_regs[i].rxfunaddr); 2243 musb->context.index_regs[i].rxfunaddr);
2311 musb_write_rxhubaddr(ep_target_regs, 2244 musb_write_rxhubaddr(ep_target_regs,
2312 musb->context.index_regs[i].rxhubaddr); 2245 musb->context.index_regs[i].rxhubaddr);
2313 musb_write_rxhubport(ep_target_regs, 2246 musb_write_rxhubport(ep_target_regs,
2314 musb->context.index_regs[i].rxhubport); 2247 musb->context.index_regs[i].rxhubport);
2315 }
2316 } 2248 }
2317 musb_writeb(musb_base, MUSB_INDEX, musb->context.index); 2249 musb_writeb(musb_base, MUSB_INDEX, musb->context.index);
2318 } 2250 }
2319 2251
2320 static int musb_suspend(struct device *dev) 2252 static int musb_suspend(struct device *dev)
2321 { 2253 {
2322 struct musb *musb = dev_to_musb(dev); 2254 struct musb *musb = dev_to_musb(dev);
2323 unsigned long flags; 2255 unsigned long flags;
2324 2256
2325 spin_lock_irqsave(&musb->lock, flags); 2257 spin_lock_irqsave(&musb->lock, flags);
2326 2258
2327 if (is_peripheral_active(musb)) { 2259 if (is_peripheral_active(musb)) {
2328 /* FIXME force disconnect unless we know USB will wake 2260 /* FIXME force disconnect unless we know USB will wake
2329 * the system up quickly enough to respond ... 2261 * the system up quickly enough to respond ...
2330 */ 2262 */
2331 } else if (is_host_active(musb)) { 2263 } else if (is_host_active(musb)) {
2332 /* we know all the children are suspended; sometimes 2264 /* we know all the children are suspended; sometimes
2333 * they will even be wakeup-enabled. 2265 * they will even be wakeup-enabled.
2334 */ 2266 */
2335 } 2267 }
2336 2268
2337 spin_unlock_irqrestore(&musb->lock, flags); 2269 spin_unlock_irqrestore(&musb->lock, flags);
2338 return 0; 2270 return 0;
2339 } 2271 }
2340 2272
2341 static int musb_resume_noirq(struct device *dev) 2273 static int musb_resume_noirq(struct device *dev)
2342 { 2274 {
2343 /* for static cmos like DaVinci, register values were preserved 2275 /* for static cmos like DaVinci, register values were preserved
2344 * unless for some reason the whole soc powered down or the USB 2276 * unless for some reason the whole soc powered down or the USB
2345 * module got reset through the PSC (vs just being disabled). 2277 * module got reset through the PSC (vs just being disabled).
2346 */ 2278 */
2347 return 0; 2279 return 0;
2348 } 2280 }
2349 2281
2350 static int musb_runtime_suspend(struct device *dev) 2282 static int musb_runtime_suspend(struct device *dev)
2351 { 2283 {
2352 struct musb *musb = dev_to_musb(dev); 2284 struct musb *musb = dev_to_musb(dev);
2353 2285
2354 musb_save_context(musb); 2286 musb_save_context(musb);
2355 2287
2356 return 0; 2288 return 0;
2357 } 2289 }
2358 2290
2359 static int musb_runtime_resume(struct device *dev) 2291 static int musb_runtime_resume(struct device *dev)
2360 { 2292 {
2361 struct musb *musb = dev_to_musb(dev); 2293 struct musb *musb = dev_to_musb(dev);
2362 static int first = 1; 2294 static int first = 1;
2363 2295
2364 /* 2296 /*
2365 * When pm_runtime_get_sync called for the first time in driver 2297 * When pm_runtime_get_sync called for the first time in driver
2366 * init, some of the structure is still not initialized which is 2298 * init, some of the structure is still not initialized which is
2367 * used in restore function. But clock needs to be 2299 * used in restore function. But clock needs to be
2368 * enabled before any register access, so 2300 * enabled before any register access, so
2369 * pm_runtime_get_sync has to be called. 2301 * pm_runtime_get_sync has to be called.
2370 * Also context restore without save does not make 2302 * Also context restore without save does not make
2371 * any sense 2303 * any sense
2372 */ 2304 */
2373 if (!first) 2305 if (!first)
2374 musb_restore_context(musb); 2306 musb_restore_context(musb);
2375 first = 0; 2307 first = 0;
2376 2308
2377 return 0; 2309 return 0;
2378 } 2310 }
2379 2311
2380 static const struct dev_pm_ops musb_dev_pm_ops = { 2312 static const struct dev_pm_ops musb_dev_pm_ops = {
2381 .suspend = musb_suspend, 2313 .suspend = musb_suspend,
2382 .resume_noirq = musb_resume_noirq, 2314 .resume_noirq = musb_resume_noirq,
2383 .runtime_suspend = musb_runtime_suspend, 2315 .runtime_suspend = musb_runtime_suspend,
2384 .runtime_resume = musb_runtime_resume, 2316 .runtime_resume = musb_runtime_resume,
2385 }; 2317 };
2386 2318
2387 #define MUSB_DEV_PM_OPS (&musb_dev_pm_ops) 2319 #define MUSB_DEV_PM_OPS (&musb_dev_pm_ops)
2388 #else 2320 #else
2389 #define MUSB_DEV_PM_OPS NULL 2321 #define MUSB_DEV_PM_OPS NULL
2390 #endif 2322 #endif
2391 2323
2392 static struct platform_driver musb_driver = { 2324 static struct platform_driver musb_driver = {
2393 .driver = { 2325 .driver = {
2394 .name = (char *)musb_driver_name, 2326 .name = (char *)musb_driver_name,
2395 .bus = &platform_bus_type, 2327 .bus = &platform_bus_type,
2396 .owner = THIS_MODULE, 2328 .owner = THIS_MODULE,
2397 .pm = MUSB_DEV_PM_OPS, 2329 .pm = MUSB_DEV_PM_OPS,
2398 }, 2330 },
2399 .probe = musb_probe, 2331 .probe = musb_probe,
2400 .remove = __devexit_p(musb_remove), 2332 .remove = __devexit_p(musb_remove),
2401 .shutdown = musb_shutdown, 2333 .shutdown = musb_shutdown,
2402 }; 2334 };
2403 2335
2404 /*-------------------------------------------------------------------------*/ 2336 /*-------------------------------------------------------------------------*/
2405 2337
2406 static int __init musb_init(void) 2338 static int __init musb_init(void)
2407 { 2339 {
2408 if (usb_disabled()) 2340 if (usb_disabled())
2409 return 0; 2341 return 0;
2410 2342
2411 pr_info("%s: version " MUSB_VERSION ", " 2343 pr_info("%s: version " MUSB_VERSION ", "
2412 "?dma?" 2344 "?dma?"
2413 ", " 2345 ", "
drivers/usb/musb/musb_core.h
1 /* 1 /*
2 * MUSB OTG driver defines 2 * MUSB OTG driver defines
3 * 3 *
4 * Copyright 2005 Mentor Graphics Corporation 4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments 5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation 6 * Copyright (C) 2006-2007 Nokia Corporation
7 * 7 *
8 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation. 10 * version 2 as published by the Free Software Foundation.
11 * 11 *
12 * This program is distributed in the hope that it will be useful, but 12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of 13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details. 15 * General Public License for more details.
16 * 16 *
17 * You should have received a copy of the GNU General Public License 17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software 18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA 20 * 02110-1301 USA
21 * 21 *
22 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED 22 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
25 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
29 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * 32 *
33 */ 33 */
34 34
35 #ifndef __MUSB_CORE_H__ 35 #ifndef __MUSB_CORE_H__
36 #define __MUSB_CORE_H__ 36 #define __MUSB_CORE_H__
37 37
38 #include <linux/slab.h> 38 #include <linux/slab.h>
39 #include <linux/list.h> 39 #include <linux/list.h>
40 #include <linux/interrupt.h> 40 #include <linux/interrupt.h>
41 #include <linux/errno.h> 41 #include <linux/errno.h>
42 #include <linux/timer.h> 42 #include <linux/timer.h>
43 #include <linux/device.h> 43 #include <linux/device.h>
44 #include <linux/usb/ch9.h> 44 #include <linux/usb/ch9.h>
45 #include <linux/usb/gadget.h> 45 #include <linux/usb/gadget.h>
46 #include <linux/usb.h> 46 #include <linux/usb.h>
47 #include <linux/usb/otg.h> 47 #include <linux/usb/otg.h>
48 #include <linux/usb/musb.h> 48 #include <linux/usb/musb.h>
49 49
50 struct musb; 50 struct musb;
51 struct musb_hw_ep; 51 struct musb_hw_ep;
52 struct musb_ep; 52 struct musb_ep;
53 53
54 /* Helper defines for struct musb->hwvers */ 54 /* Helper defines for struct musb->hwvers */
55 #define MUSB_HWVERS_MAJOR(x) ((x >> 10) & 0x1f) 55 #define MUSB_HWVERS_MAJOR(x) ((x >> 10) & 0x1f)
56 #define MUSB_HWVERS_MINOR(x) (x & 0x3ff) 56 #define MUSB_HWVERS_MINOR(x) (x & 0x3ff)
57 #define MUSB_HWVERS_RC 0x8000 57 #define MUSB_HWVERS_RC 0x8000
58 #define MUSB_HWVERS_1300 0x52C 58 #define MUSB_HWVERS_1300 0x52C
59 #define MUSB_HWVERS_1400 0x590 59 #define MUSB_HWVERS_1400 0x590
60 #define MUSB_HWVERS_1800 0x720 60 #define MUSB_HWVERS_1800 0x720
61 #define MUSB_HWVERS_1900 0x784 61 #define MUSB_HWVERS_1900 0x784
62 #define MUSB_HWVERS_2000 0x800 62 #define MUSB_HWVERS_2000 0x800
63 63
64 #include "musb_debug.h" 64 #include "musb_debug.h"
65 #include "musb_dma.h" 65 #include "musb_dma.h"
66 66
67 #include "musb_io.h" 67 #include "musb_io.h"
68 #include "musb_regs.h" 68 #include "musb_regs.h"
69 69
70 #include "musb_gadget.h" 70 #include "musb_gadget.h"
71 #include <linux/usb/hcd.h> 71 #include <linux/usb/hcd.h>
72 #include "musb_host.h" 72 #include "musb_host.h"
73 73
74 #define is_peripheral_enabled(musb) ((musb)->board_mode != MUSB_HOST)
75 #define is_host_enabled(musb) ((musb)->board_mode != MUSB_PERIPHERAL)
76 #define is_otg_enabled(musb) ((musb)->board_mode == MUSB_OTG)
77
78 /* NOTE: otg and peripheral-only state machines start at B_IDLE. 74 /* NOTE: otg and peripheral-only state machines start at B_IDLE.
79 * OTG or host-only go to A_IDLE when ID is sensed. 75 * OTG or host-only go to A_IDLE when ID is sensed.
80 */ 76 */
81 #define is_peripheral_active(m) (!(m)->is_host) 77 #define is_peripheral_active(m) (!(m)->is_host)
82 #define is_host_active(m) ((m)->is_host) 78 #define is_host_active(m) ((m)->is_host)
83 79
84 #ifdef CONFIG_PROC_FS 80 #ifdef CONFIG_PROC_FS
85 #include <linux/fs.h> 81 #include <linux/fs.h>
86 #define MUSB_CONFIG_PROC_FS 82 #define MUSB_CONFIG_PROC_FS
87 #endif 83 #endif
88 84
89 /****************************** PERIPHERAL ROLE *****************************/ 85 /****************************** PERIPHERAL ROLE *****************************/
90 86
91 #define is_peripheral_capable() (1) 87 #define is_peripheral_capable() (1)
92 88
93 extern irqreturn_t musb_g_ep0_irq(struct musb *); 89 extern irqreturn_t musb_g_ep0_irq(struct musb *);
94 extern void musb_g_tx(struct musb *, u8); 90 extern void musb_g_tx(struct musb *, u8);
95 extern void musb_g_rx(struct musb *, u8); 91 extern void musb_g_rx(struct musb *, u8);
96 extern void musb_g_reset(struct musb *); 92 extern void musb_g_reset(struct musb *);
97 extern void musb_g_suspend(struct musb *); 93 extern void musb_g_suspend(struct musb *);
98 extern void musb_g_resume(struct musb *); 94 extern void musb_g_resume(struct musb *);
99 extern void musb_g_wakeup(struct musb *); 95 extern void musb_g_wakeup(struct musb *);
100 extern void musb_g_disconnect(struct musb *); 96 extern void musb_g_disconnect(struct musb *);
101 97
102 /****************************** HOST ROLE ***********************************/ 98 /****************************** HOST ROLE ***********************************/
103 99
104 #define is_host_capable() (1) 100 #define is_host_capable() (1)
105 101
106 extern irqreturn_t musb_h_ep0_irq(struct musb *); 102 extern irqreturn_t musb_h_ep0_irq(struct musb *);
107 extern void musb_host_tx(struct musb *, u8); 103 extern void musb_host_tx(struct musb *, u8);
108 extern void musb_host_rx(struct musb *, u8); 104 extern void musb_host_rx(struct musb *, u8);
109 105
110 /****************************** CONSTANTS ********************************/ 106 /****************************** CONSTANTS ********************************/
111 107
112 #ifndef MUSB_C_NUM_EPS 108 #ifndef MUSB_C_NUM_EPS
113 #define MUSB_C_NUM_EPS ((u8)16) 109 #define MUSB_C_NUM_EPS ((u8)16)
114 #endif 110 #endif
115 111
116 #ifndef MUSB_MAX_END0_PACKET 112 #ifndef MUSB_MAX_END0_PACKET
117 #define MUSB_MAX_END0_PACKET ((u16)MUSB_EP0_FIFOSIZE) 113 #define MUSB_MAX_END0_PACKET ((u16)MUSB_EP0_FIFOSIZE)
118 #endif 114 #endif
119 115
120 /* host side ep0 states */ 116 /* host side ep0 states */
121 enum musb_h_ep0_state { 117 enum musb_h_ep0_state {
122 MUSB_EP0_IDLE, 118 MUSB_EP0_IDLE,
123 MUSB_EP0_START, /* expect ack of setup */ 119 MUSB_EP0_START, /* expect ack of setup */
124 MUSB_EP0_IN, /* expect IN DATA */ 120 MUSB_EP0_IN, /* expect IN DATA */
125 MUSB_EP0_OUT, /* expect ack of OUT DATA */ 121 MUSB_EP0_OUT, /* expect ack of OUT DATA */
126 MUSB_EP0_STATUS, /* expect ack of STATUS */ 122 MUSB_EP0_STATUS, /* expect ack of STATUS */
127 } __attribute__ ((packed)); 123 } __attribute__ ((packed));
128 124
129 /* peripheral side ep0 states */ 125 /* peripheral side ep0 states */
130 enum musb_g_ep0_state { 126 enum musb_g_ep0_state {
131 MUSB_EP0_STAGE_IDLE, /* idle, waiting for SETUP */ 127 MUSB_EP0_STAGE_IDLE, /* idle, waiting for SETUP */
132 MUSB_EP0_STAGE_SETUP, /* received SETUP */ 128 MUSB_EP0_STAGE_SETUP, /* received SETUP */
133 MUSB_EP0_STAGE_TX, /* IN data */ 129 MUSB_EP0_STAGE_TX, /* IN data */
134 MUSB_EP0_STAGE_RX, /* OUT data */ 130 MUSB_EP0_STAGE_RX, /* OUT data */
135 MUSB_EP0_STAGE_STATUSIN, /* (after OUT data) */ 131 MUSB_EP0_STAGE_STATUSIN, /* (after OUT data) */
136 MUSB_EP0_STAGE_STATUSOUT, /* (after IN data) */ 132 MUSB_EP0_STAGE_STATUSOUT, /* (after IN data) */
137 MUSB_EP0_STAGE_ACKWAIT, /* after zlp, before statusin */ 133 MUSB_EP0_STAGE_ACKWAIT, /* after zlp, before statusin */
138 } __attribute__ ((packed)); 134 } __attribute__ ((packed));
139 135
140 /* 136 /*
141 * OTG protocol constants. See USB OTG 1.3 spec, 137 * OTG protocol constants. See USB OTG 1.3 spec,
142 * sections 5.5 "Device Timings" and 6.6.5 "Timers". 138 * sections 5.5 "Device Timings" and 6.6.5 "Timers".
143 */ 139 */
144 #define OTG_TIME_A_WAIT_VRISE 100 /* msec (max) */ 140 #define OTG_TIME_A_WAIT_VRISE 100 /* msec (max) */
145 #define OTG_TIME_A_WAIT_BCON 1100 /* min 1 second */ 141 #define OTG_TIME_A_WAIT_BCON 1100 /* min 1 second */
146 #define OTG_TIME_A_AIDL_BDIS 200 /* min 200 msec */ 142 #define OTG_TIME_A_AIDL_BDIS 200 /* min 200 msec */
147 #define OTG_TIME_B_ASE0_BRST 100 /* min 3.125 ms */ 143 #define OTG_TIME_B_ASE0_BRST 100 /* min 3.125 ms */
148 144
149 145
150 /*************************** REGISTER ACCESS ********************************/ 146 /*************************** REGISTER ACCESS ********************************/
151 147
152 /* Endpoint registers (other than dynfifo setup) can be accessed either 148 /* Endpoint registers (other than dynfifo setup) can be accessed either
153 * directly with the "flat" model, or after setting up an index register. 149 * directly with the "flat" model, or after setting up an index register.
154 */ 150 */
155 151
156 #if defined(CONFIG_ARCH_DAVINCI) || defined(CONFIG_SOC_OMAP2430) \ 152 #if defined(CONFIG_ARCH_DAVINCI) || defined(CONFIG_SOC_OMAP2430) \
157 || defined(CONFIG_SOC_OMAP3430) || defined(CONFIG_BLACKFIN) \ 153 || defined(CONFIG_SOC_OMAP3430) || defined(CONFIG_BLACKFIN) \
158 || defined(CONFIG_ARCH_OMAP4) 154 || defined(CONFIG_ARCH_OMAP4)
159 /* REVISIT indexed access seemed to 155 /* REVISIT indexed access seemed to
160 * misbehave (on DaVinci) for at least peripheral IN ... 156 * misbehave (on DaVinci) for at least peripheral IN ...
161 */ 157 */
162 #define MUSB_FLAT_REG 158 #define MUSB_FLAT_REG
163 #endif 159 #endif
164 160
165 /* TUSB mapping: "flat" plus ep0 special cases */ 161 /* TUSB mapping: "flat" plus ep0 special cases */
166 #if defined(CONFIG_USB_MUSB_TUSB6010) || \ 162 #if defined(CONFIG_USB_MUSB_TUSB6010) || \
167 defined(CONFIG_USB_MUSB_TUSB6010_MODULE) 163 defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
168 #define musb_ep_select(_mbase, _epnum) \ 164 #define musb_ep_select(_mbase, _epnum) \
169 musb_writeb((_mbase), MUSB_INDEX, (_epnum)) 165 musb_writeb((_mbase), MUSB_INDEX, (_epnum))
170 #define MUSB_EP_OFFSET MUSB_TUSB_OFFSET 166 #define MUSB_EP_OFFSET MUSB_TUSB_OFFSET
171 167
172 /* "flat" mapping: each endpoint has its own i/o address */ 168 /* "flat" mapping: each endpoint has its own i/o address */
173 #elif defined(MUSB_FLAT_REG) 169 #elif defined(MUSB_FLAT_REG)
174 #define musb_ep_select(_mbase, _epnum) (((void)(_mbase)), ((void)(_epnum))) 170 #define musb_ep_select(_mbase, _epnum) (((void)(_mbase)), ((void)(_epnum)))
175 #define MUSB_EP_OFFSET MUSB_FLAT_OFFSET 171 #define MUSB_EP_OFFSET MUSB_FLAT_OFFSET
176 172
177 /* "indexed" mapping: INDEX register controls register bank select */ 173 /* "indexed" mapping: INDEX register controls register bank select */
178 #else 174 #else
179 #define musb_ep_select(_mbase, _epnum) \ 175 #define musb_ep_select(_mbase, _epnum) \
180 musb_writeb((_mbase), MUSB_INDEX, (_epnum)) 176 musb_writeb((_mbase), MUSB_INDEX, (_epnum))
181 #define MUSB_EP_OFFSET MUSB_INDEXED_OFFSET 177 #define MUSB_EP_OFFSET MUSB_INDEXED_OFFSET
182 #endif 178 #endif
183 179
184 /****************************** FUNCTIONS ********************************/ 180 /****************************** FUNCTIONS ********************************/
185 181
186 #define MUSB_HST_MODE(_musb)\ 182 #define MUSB_HST_MODE(_musb)\
187 { (_musb)->is_host = true; } 183 { (_musb)->is_host = true; }
188 #define MUSB_DEV_MODE(_musb) \ 184 #define MUSB_DEV_MODE(_musb) \
189 { (_musb)->is_host = false; } 185 { (_musb)->is_host = false; }
190 186
191 #define test_devctl_hst_mode(_x) \ 187 #define test_devctl_hst_mode(_x) \
192 (musb_readb((_x)->mregs, MUSB_DEVCTL)&MUSB_DEVCTL_HM) 188 (musb_readb((_x)->mregs, MUSB_DEVCTL)&MUSB_DEVCTL_HM)
193 189
194 #define MUSB_MODE(musb) ((musb)->is_host ? "Host" : "Peripheral") 190 #define MUSB_MODE(musb) ((musb)->is_host ? "Host" : "Peripheral")
195 191
196 /******************************** TYPES *************************************/ 192 /******************************** TYPES *************************************/
197 193
198 /** 194 /**
199 * struct musb_platform_ops - Operations passed to musb_core by HW glue layer 195 * struct musb_platform_ops - Operations passed to musb_core by HW glue layer
200 * @init: turns on clocks, sets up platform-specific registers, etc 196 * @init: turns on clocks, sets up platform-specific registers, etc
201 * @exit: undoes @init 197 * @exit: undoes @init
202 * @set_mode: forcefully changes operating mode 198 * @set_mode: forcefully changes operating mode
203 * @try_ilde: tries to idle the IP 199 * @try_ilde: tries to idle the IP
204 * @vbus_status: returns vbus status if possible 200 * @vbus_status: returns vbus status if possible
205 * @set_vbus: forces vbus status 201 * @set_vbus: forces vbus status
206 * @adjust_channel_params: pre check for standard dma channel_program func 202 * @adjust_channel_params: pre check for standard dma channel_program func
207 */ 203 */
208 struct musb_platform_ops { 204 struct musb_platform_ops {
209 int (*init)(struct musb *musb); 205 int (*init)(struct musb *musb);
210 int (*exit)(struct musb *musb); 206 int (*exit)(struct musb *musb);
211 207
212 void (*enable)(struct musb *musb); 208 void (*enable)(struct musb *musb);
213 void (*disable)(struct musb *musb); 209 void (*disable)(struct musb *musb);
214 210
215 int (*set_mode)(struct musb *musb, u8 mode); 211 int (*set_mode)(struct musb *musb, u8 mode);
216 void (*try_idle)(struct musb *musb, unsigned long timeout); 212 void (*try_idle)(struct musb *musb, unsigned long timeout);
217 213
218 int (*vbus_status)(struct musb *musb); 214 int (*vbus_status)(struct musb *musb);
219 void (*set_vbus)(struct musb *musb, int on); 215 void (*set_vbus)(struct musb *musb, int on);
220 216
221 int (*adjust_channel_params)(struct dma_channel *channel, 217 int (*adjust_channel_params)(struct dma_channel *channel,
222 u16 packet_sz, u8 *mode, 218 u16 packet_sz, u8 *mode,
223 dma_addr_t *dma_addr, u32 *len); 219 dma_addr_t *dma_addr, u32 *len);
224 }; 220 };
225 221
226 /* 222 /*
227 * struct musb_hw_ep - endpoint hardware (bidirectional) 223 * struct musb_hw_ep - endpoint hardware (bidirectional)
228 * 224 *
229 * Ordered slightly for better cacheline locality. 225 * Ordered slightly for better cacheline locality.
230 */ 226 */
231 struct musb_hw_ep { 227 struct musb_hw_ep {
232 struct musb *musb; 228 struct musb *musb;
233 void __iomem *fifo; 229 void __iomem *fifo;
234 void __iomem *regs; 230 void __iomem *regs;
235 231
236 #if defined(CONFIG_USB_MUSB_TUSB6010) || \ 232 #if defined(CONFIG_USB_MUSB_TUSB6010) || \
237 defined(CONFIG_USB_MUSB_TUSB6010_MODULE) 233 defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
238 void __iomem *conf; 234 void __iomem *conf;
239 #endif 235 #endif
240 236
241 /* index in musb->endpoints[] */ 237 /* index in musb->endpoints[] */
242 u8 epnum; 238 u8 epnum;
243 239
244 /* hardware configuration, possibly dynamic */ 240 /* hardware configuration, possibly dynamic */
245 bool is_shared_fifo; 241 bool is_shared_fifo;
246 bool tx_double_buffered; 242 bool tx_double_buffered;
247 bool rx_double_buffered; 243 bool rx_double_buffered;
248 u16 max_packet_sz_tx; 244 u16 max_packet_sz_tx;
249 u16 max_packet_sz_rx; 245 u16 max_packet_sz_rx;
250 246
251 struct dma_channel *tx_channel; 247 struct dma_channel *tx_channel;
252 struct dma_channel *rx_channel; 248 struct dma_channel *rx_channel;
253 249
254 #if defined(CONFIG_USB_MUSB_TUSB6010) || \ 250 #if defined(CONFIG_USB_MUSB_TUSB6010) || \
255 defined(CONFIG_USB_MUSB_TUSB6010_MODULE) 251 defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
256 /* TUSB has "asynchronous" and "synchronous" dma modes */ 252 /* TUSB has "asynchronous" and "synchronous" dma modes */
257 dma_addr_t fifo_async; 253 dma_addr_t fifo_async;
258 dma_addr_t fifo_sync; 254 dma_addr_t fifo_sync;
259 void __iomem *fifo_sync_va; 255 void __iomem *fifo_sync_va;
260 #endif 256 #endif
261 257
262 void __iomem *target_regs; 258 void __iomem *target_regs;
263 259
264 /* currently scheduled peripheral endpoint */ 260 /* currently scheduled peripheral endpoint */
265 struct musb_qh *in_qh; 261 struct musb_qh *in_qh;
266 struct musb_qh *out_qh; 262 struct musb_qh *out_qh;
267 263
268 u8 rx_reinit; 264 u8 rx_reinit;
269 u8 tx_reinit; 265 u8 tx_reinit;
270 266
271 /* peripheral side */ 267 /* peripheral side */
272 struct musb_ep ep_in; /* TX */ 268 struct musb_ep ep_in; /* TX */
273 struct musb_ep ep_out; /* RX */ 269 struct musb_ep ep_out; /* RX */
274 }; 270 };
275 271
276 static inline struct musb_request *next_in_request(struct musb_hw_ep *hw_ep) 272 static inline struct musb_request *next_in_request(struct musb_hw_ep *hw_ep)
277 { 273 {
278 return next_request(&hw_ep->ep_in); 274 return next_request(&hw_ep->ep_in);
279 } 275 }
280 276
281 static inline struct musb_request *next_out_request(struct musb_hw_ep *hw_ep) 277 static inline struct musb_request *next_out_request(struct musb_hw_ep *hw_ep)
282 { 278 {
283 return next_request(&hw_ep->ep_out); 279 return next_request(&hw_ep->ep_out);
284 } 280 }
285 281
286 struct musb_csr_regs { 282 struct musb_csr_regs {
287 /* FIFO registers */ 283 /* FIFO registers */
288 u16 txmaxp, txcsr, rxmaxp, rxcsr; 284 u16 txmaxp, txcsr, rxmaxp, rxcsr;
289 u16 rxfifoadd, txfifoadd; 285 u16 rxfifoadd, txfifoadd;
290 u8 txtype, txinterval, rxtype, rxinterval; 286 u8 txtype, txinterval, rxtype, rxinterval;
291 u8 rxfifosz, txfifosz; 287 u8 rxfifosz, txfifosz;
292 u8 txfunaddr, txhubaddr, txhubport; 288 u8 txfunaddr, txhubaddr, txhubport;
293 u8 rxfunaddr, rxhubaddr, rxhubport; 289 u8 rxfunaddr, rxhubaddr, rxhubport;
294 }; 290 };
295 291
296 struct musb_context_registers { 292 struct musb_context_registers {
297 293
298 u8 power; 294 u8 power;
299 u16 intrtxe, intrrxe; 295 u16 intrtxe, intrrxe;
300 u8 intrusbe; 296 u8 intrusbe;
301 u16 frame; 297 u16 frame;
302 u8 index, testmode; 298 u8 index, testmode;
303 299
304 u8 devctl, busctl, misc; 300 u8 devctl, busctl, misc;
305 u32 otg_interfsel; 301 u32 otg_interfsel;
306 302
307 struct musb_csr_regs index_regs[MUSB_C_NUM_EPS]; 303 struct musb_csr_regs index_regs[MUSB_C_NUM_EPS];
308 }; 304 };
309 305
310 /* 306 /*
311 * struct musb - Driver instance data. 307 * struct musb - Driver instance data.
312 */ 308 */
313 struct musb { 309 struct musb {
314 /* device lock */ 310 /* device lock */
315 spinlock_t lock; 311 spinlock_t lock;
316 312
317 const struct musb_platform_ops *ops; 313 const struct musb_platform_ops *ops;
318 struct musb_context_registers context; 314 struct musb_context_registers context;
319 315
320 irqreturn_t (*isr)(int, void *); 316 irqreturn_t (*isr)(int, void *);
321 struct work_struct irq_work; 317 struct work_struct irq_work;
322 u16 hwvers; 318 u16 hwvers;
323 319
324 /* this hub status bit is reserved by USB 2.0 and not seen by usbcore */ 320 /* this hub status bit is reserved by USB 2.0 and not seen by usbcore */
325 #define MUSB_PORT_STAT_RESUME (1 << 31) 321 #define MUSB_PORT_STAT_RESUME (1 << 31)
326 322
327 u32 port1_status; 323 u32 port1_status;
328 324
329 unsigned long rh_timer; 325 unsigned long rh_timer;
330 326
331 enum musb_h_ep0_state ep0_stage; 327 enum musb_h_ep0_state ep0_stage;
332 328
333 /* bulk traffic normally dedicates endpoint hardware, and each 329 /* bulk traffic normally dedicates endpoint hardware, and each
334 * direction has its own ring of host side endpoints. 330 * direction has its own ring of host side endpoints.
335 * we try to progress the transfer at the head of each endpoint's 331 * we try to progress the transfer at the head of each endpoint's
336 * queue until it completes or NAKs too much; then we try the next 332 * queue until it completes or NAKs too much; then we try the next
337 * endpoint. 333 * endpoint.
338 */ 334 */
339 struct musb_hw_ep *bulk_ep; 335 struct musb_hw_ep *bulk_ep;
340 336
341 struct list_head control; /* of musb_qh */ 337 struct list_head control; /* of musb_qh */
342 struct list_head in_bulk; /* of musb_qh */ 338 struct list_head in_bulk; /* of musb_qh */
343 struct list_head out_bulk; /* of musb_qh */ 339 struct list_head out_bulk; /* of musb_qh */
344 340
345 struct timer_list otg_timer; 341 struct timer_list otg_timer;
346 struct notifier_block nb; 342 struct notifier_block nb;
347 343
348 struct dma_controller *dma_controller; 344 struct dma_controller *dma_controller;
349 345
350 struct device *controller; 346 struct device *controller;
351 void __iomem *ctrl_base; 347 void __iomem *ctrl_base;
352 void __iomem *mregs; 348 void __iomem *mregs;
353 349
354 #if defined(CONFIG_USB_MUSB_TUSB6010) || \ 350 #if defined(CONFIG_USB_MUSB_TUSB6010) || \
355 defined(CONFIG_USB_MUSB_TUSB6010_MODULE) 351 defined(CONFIG_USB_MUSB_TUSB6010_MODULE)
356 dma_addr_t async; 352 dma_addr_t async;
357 dma_addr_t sync; 353 dma_addr_t sync;
358 void __iomem *sync_va; 354 void __iomem *sync_va;
359 #endif 355 #endif
360 356
361 /* passed down from chip/board specific irq handlers */ 357 /* passed down from chip/board specific irq handlers */
362 u8 int_usb; 358 u8 int_usb;
363 u16 int_rx; 359 u16 int_rx;
364 u16 int_tx; 360 u16 int_tx;
365 361
366 struct usb_phy *xceiv; 362 struct usb_phy *xceiv;
367 363
368 int nIrq; 364 int nIrq;
369 unsigned irq_wake:1; 365 unsigned irq_wake:1;
370 366
371 struct musb_hw_ep endpoints[MUSB_C_NUM_EPS]; 367 struct musb_hw_ep endpoints[MUSB_C_NUM_EPS];
372 #define control_ep endpoints 368 #define control_ep endpoints
373 369
374 #define VBUSERR_RETRY_COUNT 3 370 #define VBUSERR_RETRY_COUNT 3
375 u16 vbuserr_retry; 371 u16 vbuserr_retry;
376 u16 epmask; 372 u16 epmask;
377 u8 nr_endpoints; 373 u8 nr_endpoints;
378 374
379 u8 board_mode; /* enum musb_mode */
380 int (*board_set_power)(int state); 375 int (*board_set_power)(int state);
381 376
382 u8 min_power; /* vbus for periph, in mA/2 */ 377 u8 min_power; /* vbus for periph, in mA/2 */
383 378
384 bool is_host; 379 bool is_host;
385 380
386 int a_wait_bcon; /* VBUS timeout in msecs */ 381 int a_wait_bcon; /* VBUS timeout in msecs */
387 unsigned long idle_timeout; /* Next timeout in jiffies */ 382 unsigned long idle_timeout; /* Next timeout in jiffies */
388 383
389 /* active means connected and not suspended */ 384 /* active means connected and not suspended */
390 unsigned is_active:1; 385 unsigned is_active:1;
391 386
392 unsigned is_multipoint:1; 387 unsigned is_multipoint:1;
393 unsigned ignore_disconnect:1; /* during bus resets */ 388 unsigned ignore_disconnect:1; /* during bus resets */
394 389
395 unsigned hb_iso_rx:1; /* high bandwidth iso rx? */ 390 unsigned hb_iso_rx:1; /* high bandwidth iso rx? */
396 unsigned hb_iso_tx:1; /* high bandwidth iso tx? */ 391 unsigned hb_iso_tx:1; /* high bandwidth iso tx? */
397 unsigned dyn_fifo:1; /* dynamic FIFO supported? */ 392 unsigned dyn_fifo:1; /* dynamic FIFO supported? */
398 393
399 unsigned bulk_split:1; 394 unsigned bulk_split:1;
400 #define can_bulk_split(musb,type) \ 395 #define can_bulk_split(musb,type) \
401 (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_split) 396 (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_split)
402 397
403 unsigned bulk_combine:1; 398 unsigned bulk_combine:1;
404 #define can_bulk_combine(musb,type) \ 399 #define can_bulk_combine(musb,type) \
405 (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_combine) 400 (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_combine)
406 401
407 /* is_suspended means USB B_PERIPHERAL suspend */ 402 /* is_suspended means USB B_PERIPHERAL suspend */
408 unsigned is_suspended:1; 403 unsigned is_suspended:1;
409 404
410 /* may_wakeup means remote wakeup is enabled */ 405 /* may_wakeup means remote wakeup is enabled */
411 unsigned may_wakeup:1; 406 unsigned may_wakeup:1;
412 407
413 /* is_self_powered is reported in device status and the 408 /* is_self_powered is reported in device status and the
414 * config descriptor. is_bus_powered means B_PERIPHERAL 409 * config descriptor. is_bus_powered means B_PERIPHERAL
415 * draws some VBUS current; both can be true. 410 * draws some VBUS current; both can be true.
416 */ 411 */
417 unsigned is_self_powered:1; 412 unsigned is_self_powered:1;
418 unsigned is_bus_powered:1; 413 unsigned is_bus_powered:1;
419 414
420 unsigned set_address:1; 415 unsigned set_address:1;
421 unsigned test_mode:1; 416 unsigned test_mode:1;
422 unsigned softconnect:1; 417 unsigned softconnect:1;
423 418
424 u8 address; 419 u8 address;
425 u8 test_mode_nr; 420 u8 test_mode_nr;
426 u16 ackpend; /* ep0 */ 421 u16 ackpend; /* ep0 */
427 enum musb_g_ep0_state ep0_state; 422 enum musb_g_ep0_state ep0_state;
428 struct usb_gadget g; /* the gadget */ 423 struct usb_gadget g; /* the gadget */
429 struct usb_gadget_driver *gadget_driver; /* its driver */ 424 struct usb_gadget_driver *gadget_driver; /* its driver */
430 425
431 /* 426 /*
432 * FIXME: Remove this flag. 427 * FIXME: Remove this flag.
433 * 428 *
434 * This is only added to allow Blackfin to work 429 * This is only added to allow Blackfin to work
435 * with current driver. For some unknown reason 430 * with current driver. For some unknown reason
436 * Blackfin doesn't work with double buffering 431 * Blackfin doesn't work with double buffering
437 * and that's enabled by default. 432 * and that's enabled by default.
438 * 433 *
439 * We added this flag to forcefully disable double 434 * We added this flag to forcefully disable double
440 * buffering until we get it working. 435 * buffering until we get it working.
441 */ 436 */
442 unsigned double_buffer_not_ok:1; 437 unsigned double_buffer_not_ok:1;
443 438
444 struct musb_hdrc_config *config; 439 struct musb_hdrc_config *config;
445 440
446 #ifdef MUSB_CONFIG_PROC_FS 441 #ifdef MUSB_CONFIG_PROC_FS
447 struct proc_dir_entry *proc_entry; 442 struct proc_dir_entry *proc_entry;
448 #endif 443 #endif
449 }; 444 };
450 445
451 static inline struct musb *gadget_to_musb(struct usb_gadget *g) 446 static inline struct musb *gadget_to_musb(struct usb_gadget *g)
452 { 447 {
453 return container_of(g, struct musb, g); 448 return container_of(g, struct musb, g);
454 } 449 }
455 450
456 #ifdef CONFIG_BLACKFIN 451 #ifdef CONFIG_BLACKFIN
457 static inline int musb_read_fifosize(struct musb *musb, 452 static inline int musb_read_fifosize(struct musb *musb,
458 struct musb_hw_ep *hw_ep, u8 epnum) 453 struct musb_hw_ep *hw_ep, u8 epnum)
459 { 454 {
460 musb->nr_endpoints++; 455 musb->nr_endpoints++;
461 musb->epmask |= (1 << epnum); 456 musb->epmask |= (1 << epnum);
462 457
463 if (epnum < 5) { 458 if (epnum < 5) {
464 hw_ep->max_packet_sz_tx = 128; 459 hw_ep->max_packet_sz_tx = 128;
465 hw_ep->max_packet_sz_rx = 128; 460 hw_ep->max_packet_sz_rx = 128;
466 } else { 461 } else {
467 hw_ep->max_packet_sz_tx = 1024; 462 hw_ep->max_packet_sz_tx = 1024;
468 hw_ep->max_packet_sz_rx = 1024; 463 hw_ep->max_packet_sz_rx = 1024;
469 } 464 }
470 hw_ep->is_shared_fifo = false; 465 hw_ep->is_shared_fifo = false;
471 466
472 return 0; 467 return 0;
473 } 468 }
474 469
475 static inline void musb_configure_ep0(struct musb *musb) 470 static inline void musb_configure_ep0(struct musb *musb)
476 { 471 {
477 musb->endpoints[0].max_packet_sz_tx = MUSB_EP0_FIFOSIZE; 472 musb->endpoints[0].max_packet_sz_tx = MUSB_EP0_FIFOSIZE;
478 musb->endpoints[0].max_packet_sz_rx = MUSB_EP0_FIFOSIZE; 473 musb->endpoints[0].max_packet_sz_rx = MUSB_EP0_FIFOSIZE;
479 musb->endpoints[0].is_shared_fifo = true; 474 musb->endpoints[0].is_shared_fifo = true;
480 } 475 }
481 476
482 #else 477 #else
483 478
484 static inline int musb_read_fifosize(struct musb *musb, 479 static inline int musb_read_fifosize(struct musb *musb,
485 struct musb_hw_ep *hw_ep, u8 epnum) 480 struct musb_hw_ep *hw_ep, u8 epnum)
486 { 481 {
487 void __iomem *mbase = musb->mregs; 482 void __iomem *mbase = musb->mregs;
488 u8 reg = 0; 483 u8 reg = 0;
489 484
490 /* read from core using indexed model */ 485 /* read from core using indexed model */
491 reg = musb_readb(mbase, MUSB_EP_OFFSET(epnum, MUSB_FIFOSIZE)); 486 reg = musb_readb(mbase, MUSB_EP_OFFSET(epnum, MUSB_FIFOSIZE));
492 /* 0's returned when no more endpoints */ 487 /* 0's returned when no more endpoints */
493 if (!reg) 488 if (!reg)
494 return -ENODEV; 489 return -ENODEV;
495 490
496 musb->nr_endpoints++; 491 musb->nr_endpoints++;
497 musb->epmask |= (1 << epnum); 492 musb->epmask |= (1 << epnum);
498 493
499 hw_ep->max_packet_sz_tx = 1 << (reg & 0x0f); 494 hw_ep->max_packet_sz_tx = 1 << (reg & 0x0f);
500 495
501 /* shared TX/RX FIFO? */ 496 /* shared TX/RX FIFO? */
502 if ((reg & 0xf0) == 0xf0) { 497 if ((reg & 0xf0) == 0xf0) {
503 hw_ep->max_packet_sz_rx = hw_ep->max_packet_sz_tx; 498 hw_ep->max_packet_sz_rx = hw_ep->max_packet_sz_tx;
504 hw_ep->is_shared_fifo = true; 499 hw_ep->is_shared_fifo = true;
505 return 0; 500 return 0;
506 } else { 501 } else {
507 hw_ep->max_packet_sz_rx = 1 << ((reg & 0xf0) >> 4); 502 hw_ep->max_packet_sz_rx = 1 << ((reg & 0xf0) >> 4);
508 hw_ep->is_shared_fifo = false; 503 hw_ep->is_shared_fifo = false;
509 } 504 }
510 505
511 return 0; 506 return 0;
512 } 507 }
513 508
514 static inline void musb_configure_ep0(struct musb *musb) 509 static inline void musb_configure_ep0(struct musb *musb)
515 { 510 {
516 musb->endpoints[0].max_packet_sz_tx = MUSB_EP0_FIFOSIZE; 511 musb->endpoints[0].max_packet_sz_tx = MUSB_EP0_FIFOSIZE;
517 musb->endpoints[0].max_packet_sz_rx = MUSB_EP0_FIFOSIZE; 512 musb->endpoints[0].max_packet_sz_rx = MUSB_EP0_FIFOSIZE;
518 musb->endpoints[0].is_shared_fifo = true; 513 musb->endpoints[0].is_shared_fifo = true;
519 } 514 }
520 #endif /* CONFIG_BLACKFIN */ 515 #endif /* CONFIG_BLACKFIN */
521 516
522 517
523 /***************************** Glue it together *****************************/ 518 /***************************** Glue it together *****************************/
524 519
525 extern const char musb_driver_name[]; 520 extern const char musb_driver_name[];
526 521
527 extern void musb_start(struct musb *musb); 522 extern void musb_start(struct musb *musb);
528 extern void musb_stop(struct musb *musb); 523 extern void musb_stop(struct musb *musb);
529 524
530 extern void musb_write_fifo(struct musb_hw_ep *ep, u16 len, const u8 *src); 525 extern void musb_write_fifo(struct musb_hw_ep *ep, u16 len, const u8 *src);
531 extern void musb_read_fifo(struct musb_hw_ep *ep, u16 len, u8 *dst); 526 extern void musb_read_fifo(struct musb_hw_ep *ep, u16 len, u8 *dst);
532 527
533 extern void musb_load_testpacket(struct musb *); 528 extern void musb_load_testpacket(struct musb *);
534 529
535 extern irqreturn_t musb_interrupt(struct musb *); 530 extern irqreturn_t musb_interrupt(struct musb *);
536 531
537 extern void musb_hnp_stop(struct musb *musb); 532 extern void musb_hnp_stop(struct musb *musb);
538 533
539 static inline void musb_platform_set_vbus(struct musb *musb, int is_on) 534 static inline void musb_platform_set_vbus(struct musb *musb, int is_on)
540 { 535 {
541 if (musb->ops->set_vbus) 536 if (musb->ops->set_vbus)
542 musb->ops->set_vbus(musb, is_on); 537 musb->ops->set_vbus(musb, is_on);
543 } 538 }
544 539
545 static inline void musb_platform_enable(struct musb *musb) 540 static inline void musb_platform_enable(struct musb *musb)
546 { 541 {
547 if (musb->ops->enable) 542 if (musb->ops->enable)
548 musb->ops->enable(musb); 543 musb->ops->enable(musb);
549 } 544 }
550 545
551 static inline void musb_platform_disable(struct musb *musb) 546 static inline void musb_platform_disable(struct musb *musb)
552 { 547 {
553 if (musb->ops->disable) 548 if (musb->ops->disable)
554 musb->ops->disable(musb); 549 musb->ops->disable(musb);
555 } 550 }
556 551
557 static inline int musb_platform_set_mode(struct musb *musb, u8 mode) 552 static inline int musb_platform_set_mode(struct musb *musb, u8 mode)
558 { 553 {
559 if (!musb->ops->set_mode) 554 if (!musb->ops->set_mode)
560 return 0; 555 return 0;
561 556
562 return musb->ops->set_mode(musb, mode); 557 return musb->ops->set_mode(musb, mode);
563 } 558 }
564 559
565 static inline void musb_platform_try_idle(struct musb *musb, 560 static inline void musb_platform_try_idle(struct musb *musb,
566 unsigned long timeout) 561 unsigned long timeout)
567 { 562 {
568 if (musb->ops->try_idle) 563 if (musb->ops->try_idle)
569 musb->ops->try_idle(musb, timeout); 564 musb->ops->try_idle(musb, timeout);
570 } 565 }
571 566
572 static inline int musb_platform_get_vbus_status(struct musb *musb) 567 static inline int musb_platform_get_vbus_status(struct musb *musb)
573 { 568 {
574 if (!musb->ops->vbus_status) 569 if (!musb->ops->vbus_status)
575 return 0; 570 return 0;
576 571
577 return musb->ops->vbus_status(musb); 572 return musb->ops->vbus_status(musb);
578 } 573 }
579 574
580 static inline int musb_platform_init(struct musb *musb) 575 static inline int musb_platform_init(struct musb *musb)
581 { 576 {
582 if (!musb->ops->init) 577 if (!musb->ops->init)
583 return -EINVAL; 578 return -EINVAL;
584 579
585 return musb->ops->init(musb); 580 return musb->ops->init(musb);
586 } 581 }
587 582
588 static inline int musb_platform_exit(struct musb *musb) 583 static inline int musb_platform_exit(struct musb *musb)
589 { 584 {
590 if (!musb->ops->exit) 585 if (!musb->ops->exit)
591 return -EINVAL; 586 return -EINVAL;
592 587
593 return musb->ops->exit(musb); 588 return musb->ops->exit(musb);
594 } 589 }
595 590
596 #endif /* __MUSB_CORE_H__ */ 591 #endif /* __MUSB_CORE_H__ */
597 592
drivers/usb/musb/musb_dsps.c
1 /* 1 /*
2 * Texas Instruments DSPS platforms "glue layer" 2 * Texas Instruments DSPS platforms "glue layer"
3 * 3 *
4 * Copyright (C) 2012, by Texas Instruments 4 * Copyright (C) 2012, by Texas Instruments
5 * 5 *
6 * Based on the am35x "glue layer" code. 6 * Based on the am35x "glue layer" code.
7 * 7 *
8 * This file is part of the Inventra Controller Driver for Linux. 8 * This file is part of the Inventra Controller Driver for Linux.
9 * 9 *
10 * The Inventra Controller Driver for Linux is free software; you 10 * The Inventra Controller Driver for Linux is free software; you
11 * can redistribute it and/or modify it under the terms of the GNU 11 * can redistribute it and/or modify it under the terms of the GNU
12 * General Public License version 2 as published by the Free Software 12 * General Public License version 2 as published by the Free Software
13 * Foundation. 13 * Foundation.
14 * 14 *
15 * The Inventra Controller Driver for Linux is distributed in 15 * The Inventra Controller Driver for Linux is distributed in
16 * the hope that it will be useful, but WITHOUT ANY WARRANTY; 16 * the hope that it will be useful, but WITHOUT ANY WARRANTY;
17 * without even the implied warranty of MERCHANTABILITY or 17 * without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public 18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
19 * License for more details. 19 * License for more details.
20 * 20 *
21 * You should have received a copy of the GNU General Public License 21 * You should have received a copy of the GNU General Public License
22 * along with The Inventra Controller Driver for Linux ; if not, 22 * along with The Inventra Controller Driver for Linux ; if not,
23 * write to the Free Software Foundation, Inc., 59 Temple Place, 23 * write to the Free Software Foundation, Inc., 59 Temple Place,
24 * Suite 330, Boston, MA 02111-1307 USA 24 * Suite 330, Boston, MA 02111-1307 USA
25 * 25 *
26 * musb_dsps.c will be a common file for all the TI DSPS platforms 26 * musb_dsps.c will be a common file for all the TI DSPS platforms
27 * such as dm64x, dm36x, dm35x, da8x, am35x and ti81x. 27 * such as dm64x, dm36x, dm35x, da8x, am35x and ti81x.
28 * For now only ti81x is using this and in future davinci.c, am35x.c 28 * For now only ti81x is using this and in future davinci.c, am35x.c
29 * da8xx.c would be merged to this file after testing. 29 * da8xx.c would be merged to this file after testing.
30 */ 30 */
31 31
32 #include <linux/init.h> 32 #include <linux/init.h>
33 #include <linux/io.h> 33 #include <linux/io.h>
34 #include <linux/err.h> 34 #include <linux/err.h>
35 #include <linux/platform_device.h> 35 #include <linux/platform_device.h>
36 #include <linux/dma-mapping.h> 36 #include <linux/dma-mapping.h>
37 #include <linux/pm_runtime.h> 37 #include <linux/pm_runtime.h>
38 #include <linux/module.h> 38 #include <linux/module.h>
39 39
40 #include <linux/of.h> 40 #include <linux/of.h>
41 #include <linux/of_device.h> 41 #include <linux/of_device.h>
42 #include <linux/of_address.h> 42 #include <linux/of_address.h>
43 43
44 #include <plat/usb.h> 44 #include <plat/usb.h>
45 45
46 #include "musb_core.h" 46 #include "musb_core.h"
47 47
48 /** 48 /**
49 * avoid using musb_readx()/musb_writex() as glue layer should not be 49 * avoid using musb_readx()/musb_writex() as glue layer should not be
50 * dependent on musb core layer symbols. 50 * dependent on musb core layer symbols.
51 */ 51 */
52 static inline u8 dsps_readb(const void __iomem *addr, unsigned offset) 52 static inline u8 dsps_readb(const void __iomem *addr, unsigned offset)
53 { return __raw_readb(addr + offset); } 53 { return __raw_readb(addr + offset); }
54 54
55 static inline u32 dsps_readl(const void __iomem *addr, unsigned offset) 55 static inline u32 dsps_readl(const void __iomem *addr, unsigned offset)
56 { return __raw_readl(addr + offset); } 56 { return __raw_readl(addr + offset); }
57 57
58 static inline void dsps_writeb(void __iomem *addr, unsigned offset, u8 data) 58 static inline void dsps_writeb(void __iomem *addr, unsigned offset, u8 data)
59 { __raw_writeb(data, addr + offset); } 59 { __raw_writeb(data, addr + offset); }
60 60
61 static inline void dsps_writel(void __iomem *addr, unsigned offset, u32 data) 61 static inline void dsps_writel(void __iomem *addr, unsigned offset, u32 data)
62 { __raw_writel(data, addr + offset); } 62 { __raw_writel(data, addr + offset); }
63 63
64 /** 64 /**
65 * DSPS musb wrapper register offset. 65 * DSPS musb wrapper register offset.
66 * FIXME: This should be expanded to have all the wrapper registers from TI DSPS 66 * FIXME: This should be expanded to have all the wrapper registers from TI DSPS
67 * musb ips. 67 * musb ips.
68 */ 68 */
69 struct dsps_musb_wrapper { 69 struct dsps_musb_wrapper {
70 u16 revision; 70 u16 revision;
71 u16 control; 71 u16 control;
72 u16 status; 72 u16 status;
73 u16 eoi; 73 u16 eoi;
74 u16 epintr_set; 74 u16 epintr_set;
75 u16 epintr_clear; 75 u16 epintr_clear;
76 u16 epintr_status; 76 u16 epintr_status;
77 u16 coreintr_set; 77 u16 coreintr_set;
78 u16 coreintr_clear; 78 u16 coreintr_clear;
79 u16 coreintr_status; 79 u16 coreintr_status;
80 u16 phy_utmi; 80 u16 phy_utmi;
81 u16 mode; 81 u16 mode;
82 82
83 /* bit positions for control */ 83 /* bit positions for control */
84 unsigned reset:5; 84 unsigned reset:5;
85 85
86 /* bit positions for interrupt */ 86 /* bit positions for interrupt */
87 unsigned usb_shift:5; 87 unsigned usb_shift:5;
88 u32 usb_mask; 88 u32 usb_mask;
89 u32 usb_bitmap; 89 u32 usb_bitmap;
90 unsigned drvvbus:5; 90 unsigned drvvbus:5;
91 91
92 unsigned txep_shift:5; 92 unsigned txep_shift:5;
93 u32 txep_mask; 93 u32 txep_mask;
94 u32 txep_bitmap; 94 u32 txep_bitmap;
95 95
96 unsigned rxep_shift:5; 96 unsigned rxep_shift:5;
97 u32 rxep_mask; 97 u32 rxep_mask;
98 u32 rxep_bitmap; 98 u32 rxep_bitmap;
99 99
100 /* bit positions for phy_utmi */ 100 /* bit positions for phy_utmi */
101 unsigned otg_disable:5; 101 unsigned otg_disable:5;
102 102
103 /* bit positions for mode */ 103 /* bit positions for mode */
104 unsigned iddig:5; 104 unsigned iddig:5;
105 /* miscellaneous stuff */ 105 /* miscellaneous stuff */
106 u32 musb_core_offset; 106 u32 musb_core_offset;
107 u8 poll_seconds; 107 u8 poll_seconds;
108 }; 108 };
109 109
110 /** 110 /**
111 * DSPS glue structure. 111 * DSPS glue structure.
112 */ 112 */
113 struct dsps_glue { 113 struct dsps_glue {
114 struct device *dev; 114 struct device *dev;
115 struct platform_device *musb; /* child musb pdev */ 115 struct platform_device *musb; /* child musb pdev */
116 const struct dsps_musb_wrapper *wrp; /* wrapper register offsets */ 116 const struct dsps_musb_wrapper *wrp; /* wrapper register offsets */
117 struct timer_list timer; /* otg_workaround timer */ 117 struct timer_list timer; /* otg_workaround timer */
118 }; 118 };
119 119
120 /** 120 /**
121 * dsps_musb_enable - enable interrupts 121 * dsps_musb_enable - enable interrupts
122 */ 122 */
123 static void dsps_musb_enable(struct musb *musb) 123 static void dsps_musb_enable(struct musb *musb)
124 { 124 {
125 struct device *dev = musb->controller; 125 struct device *dev = musb->controller;
126 struct platform_device *pdev = to_platform_device(dev->parent); 126 struct platform_device *pdev = to_platform_device(dev->parent);
127 struct dsps_glue *glue = platform_get_drvdata(pdev); 127 struct dsps_glue *glue = platform_get_drvdata(pdev);
128 const struct dsps_musb_wrapper *wrp = glue->wrp; 128 const struct dsps_musb_wrapper *wrp = glue->wrp;
129 void __iomem *reg_base = musb->ctrl_base; 129 void __iomem *reg_base = musb->ctrl_base;
130 u32 epmask, coremask; 130 u32 epmask, coremask;
131 131
132 /* Workaround: setup IRQs through both register sets. */ 132 /* Workaround: setup IRQs through both register sets. */
133 epmask = ((musb->epmask & wrp->txep_mask) << wrp->txep_shift) | 133 epmask = ((musb->epmask & wrp->txep_mask) << wrp->txep_shift) |
134 ((musb->epmask & wrp->rxep_mask) << wrp->rxep_shift); 134 ((musb->epmask & wrp->rxep_mask) << wrp->rxep_shift);
135 coremask = (wrp->usb_bitmap & ~MUSB_INTR_SOF); 135 coremask = (wrp->usb_bitmap & ~MUSB_INTR_SOF);
136 136
137 dsps_writel(reg_base, wrp->epintr_set, epmask); 137 dsps_writel(reg_base, wrp->epintr_set, epmask);
138 dsps_writel(reg_base, wrp->coreintr_set, coremask); 138 dsps_writel(reg_base, wrp->coreintr_set, coremask);
139 /* Force the DRVVBUS IRQ so we can start polling for ID change. */ 139 /* Force the DRVVBUS IRQ so we can start polling for ID change. */
140 if (is_otg_enabled(musb)) 140 dsps_writel(reg_base, wrp->coreintr_set,
141 dsps_writel(reg_base, wrp->coreintr_set, 141 (1 << wrp->drvvbus) << wrp->usb_shift);
142 (1 << wrp->drvvbus) << wrp->usb_shift);
143 } 142 }
144 143
145 /** 144 /**
146 * dsps_musb_disable - disable HDRC and flush interrupts 145 * dsps_musb_disable - disable HDRC and flush interrupts
147 */ 146 */
148 static void dsps_musb_disable(struct musb *musb) 147 static void dsps_musb_disable(struct musb *musb)
149 { 148 {
150 struct device *dev = musb->controller; 149 struct device *dev = musb->controller;
151 struct platform_device *pdev = to_platform_device(dev->parent); 150 struct platform_device *pdev = to_platform_device(dev->parent);
152 struct dsps_glue *glue = platform_get_drvdata(pdev); 151 struct dsps_glue *glue = platform_get_drvdata(pdev);
153 const struct dsps_musb_wrapper *wrp = glue->wrp; 152 const struct dsps_musb_wrapper *wrp = glue->wrp;
154 void __iomem *reg_base = musb->ctrl_base; 153 void __iomem *reg_base = musb->ctrl_base;
155 154
156 dsps_writel(reg_base, wrp->coreintr_clear, wrp->usb_bitmap); 155 dsps_writel(reg_base, wrp->coreintr_clear, wrp->usb_bitmap);
157 dsps_writel(reg_base, wrp->epintr_clear, 156 dsps_writel(reg_base, wrp->epintr_clear,
158 wrp->txep_bitmap | wrp->rxep_bitmap); 157 wrp->txep_bitmap | wrp->rxep_bitmap);
159 dsps_writeb(musb->mregs, MUSB_DEVCTL, 0); 158 dsps_writeb(musb->mregs, MUSB_DEVCTL, 0);
160 dsps_writel(reg_base, wrp->eoi, 0); 159 dsps_writel(reg_base, wrp->eoi, 0);
161 } 160 }
162 161
163 static void otg_timer(unsigned long _musb) 162 static void otg_timer(unsigned long _musb)
164 { 163 {
165 struct musb *musb = (void *)_musb; 164 struct musb *musb = (void *)_musb;
166 void __iomem *mregs = musb->mregs; 165 void __iomem *mregs = musb->mregs;
167 struct device *dev = musb->controller; 166 struct device *dev = musb->controller;
168 struct platform_device *pdev = to_platform_device(dev->parent); 167 struct platform_device *pdev = to_platform_device(dev->parent);
169 struct dsps_glue *glue = platform_get_drvdata(pdev); 168 struct dsps_glue *glue = platform_get_drvdata(pdev);
170 const struct dsps_musb_wrapper *wrp = glue->wrp; 169 const struct dsps_musb_wrapper *wrp = glue->wrp;
171 u8 devctl; 170 u8 devctl;
172 unsigned long flags; 171 unsigned long flags;
173 172
174 /* 173 /*
175 * We poll because DSPS IP's won't expose several OTG-critical 174 * We poll because DSPS IP's won't expose several OTG-critical
176 * status change events (from the transceiver) otherwise. 175 * status change events (from the transceiver) otherwise.
177 */ 176 */
178 devctl = dsps_readb(mregs, MUSB_DEVCTL); 177 devctl = dsps_readb(mregs, MUSB_DEVCTL);
179 dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl, 178 dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl,
180 otg_state_string(musb->xceiv->state)); 179 otg_state_string(musb->xceiv->state));
181 180
182 spin_lock_irqsave(&musb->lock, flags); 181 spin_lock_irqsave(&musb->lock, flags);
183 switch (musb->xceiv->state) { 182 switch (musb->xceiv->state) {
184 case OTG_STATE_A_WAIT_BCON: 183 case OTG_STATE_A_WAIT_BCON:
185 devctl &= ~MUSB_DEVCTL_SESSION; 184 devctl &= ~MUSB_DEVCTL_SESSION;
186 dsps_writeb(musb->mregs, MUSB_DEVCTL, devctl); 185 dsps_writeb(musb->mregs, MUSB_DEVCTL, devctl);
187 186
188 devctl = dsps_readb(musb->mregs, MUSB_DEVCTL); 187 devctl = dsps_readb(musb->mregs, MUSB_DEVCTL);
189 if (devctl & MUSB_DEVCTL_BDEVICE) { 188 if (devctl & MUSB_DEVCTL_BDEVICE) {
190 musb->xceiv->state = OTG_STATE_B_IDLE; 189 musb->xceiv->state = OTG_STATE_B_IDLE;
191 MUSB_DEV_MODE(musb); 190 MUSB_DEV_MODE(musb);
192 } else { 191 } else {
193 musb->xceiv->state = OTG_STATE_A_IDLE; 192 musb->xceiv->state = OTG_STATE_A_IDLE;
194 MUSB_HST_MODE(musb); 193 MUSB_HST_MODE(musb);
195 } 194 }
196 break; 195 break;
197 case OTG_STATE_A_WAIT_VFALL: 196 case OTG_STATE_A_WAIT_VFALL:
198 musb->xceiv->state = OTG_STATE_A_WAIT_VRISE; 197 musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
199 dsps_writel(musb->ctrl_base, wrp->coreintr_set, 198 dsps_writel(musb->ctrl_base, wrp->coreintr_set,
200 MUSB_INTR_VBUSERROR << wrp->usb_shift); 199 MUSB_INTR_VBUSERROR << wrp->usb_shift);
201 break; 200 break;
202 case OTG_STATE_B_IDLE: 201 case OTG_STATE_B_IDLE:
203 if (!is_peripheral_enabled(musb))
204 break;
205
206 devctl = dsps_readb(mregs, MUSB_DEVCTL); 202 devctl = dsps_readb(mregs, MUSB_DEVCTL);
207 if (devctl & MUSB_DEVCTL_BDEVICE) 203 if (devctl & MUSB_DEVCTL_BDEVICE)
208 mod_timer(&glue->timer, 204 mod_timer(&glue->timer,
209 jiffies + wrp->poll_seconds * HZ); 205 jiffies + wrp->poll_seconds * HZ);
210 else 206 else
211 musb->xceiv->state = OTG_STATE_A_IDLE; 207 musb->xceiv->state = OTG_STATE_A_IDLE;
212 break; 208 break;
213 default: 209 default:
214 break; 210 break;
215 } 211 }
216 spin_unlock_irqrestore(&musb->lock, flags); 212 spin_unlock_irqrestore(&musb->lock, flags);
217 } 213 }
218 214
219 static void dsps_musb_try_idle(struct musb *musb, unsigned long timeout) 215 static void dsps_musb_try_idle(struct musb *musb, unsigned long timeout)
220 { 216 {
221 struct device *dev = musb->controller; 217 struct device *dev = musb->controller;
222 struct platform_device *pdev = to_platform_device(dev->parent); 218 struct platform_device *pdev = to_platform_device(dev->parent);
223 struct dsps_glue *glue = platform_get_drvdata(pdev); 219 struct dsps_glue *glue = platform_get_drvdata(pdev);
224 static unsigned long last_timer; 220 static unsigned long last_timer;
225 221
226 if (!is_otg_enabled(musb))
227 return;
228
229 if (timeout == 0) 222 if (timeout == 0)
230 timeout = jiffies + msecs_to_jiffies(3); 223 timeout = jiffies + msecs_to_jiffies(3);
231 224
232 /* Never idle if active, or when VBUS timeout is not set as host */ 225 /* Never idle if active, or when VBUS timeout is not set as host */
233 if (musb->is_active || (musb->a_wait_bcon == 0 && 226 if (musb->is_active || (musb->a_wait_bcon == 0 &&
234 musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) { 227 musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) {
235 dev_dbg(musb->controller, "%s active, deleting timer\n", 228 dev_dbg(musb->controller, "%s active, deleting timer\n",
236 otg_state_string(musb->xceiv->state)); 229 otg_state_string(musb->xceiv->state));
237 del_timer(&glue->timer); 230 del_timer(&glue->timer);
238 last_timer = jiffies; 231 last_timer = jiffies;
239 return; 232 return;
240 } 233 }
241 234
242 if (time_after(last_timer, timeout) && timer_pending(&glue->timer)) { 235 if (time_after(last_timer, timeout) && timer_pending(&glue->timer)) {
243 dev_dbg(musb->controller, 236 dev_dbg(musb->controller,
244 "Longer idle timer already pending, ignoring...\n"); 237 "Longer idle timer already pending, ignoring...\n");
245 return; 238 return;
246 } 239 }
247 last_timer = timeout; 240 last_timer = timeout;
248 241
249 dev_dbg(musb->controller, "%s inactive, starting idle timer for %u ms\n", 242 dev_dbg(musb->controller, "%s inactive, starting idle timer for %u ms\n",
250 otg_state_string(musb->xceiv->state), 243 otg_state_string(musb->xceiv->state),
251 jiffies_to_msecs(timeout - jiffies)); 244 jiffies_to_msecs(timeout - jiffies));
252 mod_timer(&glue->timer, timeout); 245 mod_timer(&glue->timer, timeout);
253 } 246 }
254 247
255 static irqreturn_t dsps_interrupt(int irq, void *hci) 248 static irqreturn_t dsps_interrupt(int irq, void *hci)
256 { 249 {
257 struct musb *musb = hci; 250 struct musb *musb = hci;
258 void __iomem *reg_base = musb->ctrl_base; 251 void __iomem *reg_base = musb->ctrl_base;
259 struct device *dev = musb->controller; 252 struct device *dev = musb->controller;
260 struct platform_device *pdev = to_platform_device(dev->parent); 253 struct platform_device *pdev = to_platform_device(dev->parent);
261 struct dsps_glue *glue = platform_get_drvdata(pdev); 254 struct dsps_glue *glue = platform_get_drvdata(pdev);
262 const struct dsps_musb_wrapper *wrp = glue->wrp; 255 const struct dsps_musb_wrapper *wrp = glue->wrp;
263 unsigned long flags; 256 unsigned long flags;
264 irqreturn_t ret = IRQ_NONE; 257 irqreturn_t ret = IRQ_NONE;
265 u32 epintr, usbintr; 258 u32 epintr, usbintr;
266 259
267 spin_lock_irqsave(&musb->lock, flags); 260 spin_lock_irqsave(&musb->lock, flags);
268 261
269 /* Get endpoint interrupts */ 262 /* Get endpoint interrupts */
270 epintr = dsps_readl(reg_base, wrp->epintr_status); 263 epintr = dsps_readl(reg_base, wrp->epintr_status);
271 musb->int_rx = (epintr & wrp->rxep_bitmap) >> wrp->rxep_shift; 264 musb->int_rx = (epintr & wrp->rxep_bitmap) >> wrp->rxep_shift;
272 musb->int_tx = (epintr & wrp->txep_bitmap) >> wrp->txep_shift; 265 musb->int_tx = (epintr & wrp->txep_bitmap) >> wrp->txep_shift;
273 266
274 if (epintr) 267 if (epintr)
275 dsps_writel(reg_base, wrp->epintr_status, epintr); 268 dsps_writel(reg_base, wrp->epintr_status, epintr);
276 269
277 /* Get usb core interrupts */ 270 /* Get usb core interrupts */
278 usbintr = dsps_readl(reg_base, wrp->coreintr_status); 271 usbintr = dsps_readl(reg_base, wrp->coreintr_status);
279 if (!usbintr && !epintr) 272 if (!usbintr && !epintr)
280 goto eoi; 273 goto eoi;
281 274
282 musb->int_usb = (usbintr & wrp->usb_bitmap) >> wrp->usb_shift; 275 musb->int_usb = (usbintr & wrp->usb_bitmap) >> wrp->usb_shift;
283 if (usbintr) 276 if (usbintr)
284 dsps_writel(reg_base, wrp->coreintr_status, usbintr); 277 dsps_writel(reg_base, wrp->coreintr_status, usbintr);
285 278
286 dev_dbg(musb->controller, "usbintr (%x) epintr(%x)\n", 279 dev_dbg(musb->controller, "usbintr (%x) epintr(%x)\n",
287 usbintr, epintr); 280 usbintr, epintr);
288 /* 281 /*
289 * DRVVBUS IRQs are the only proxy we have (a very poor one!) for 282 * DRVVBUS IRQs are the only proxy we have (a very poor one!) for
290 * DSPS IP's missing ID change IRQ. We need an ID change IRQ to 283 * DSPS IP's missing ID change IRQ. We need an ID change IRQ to
291 * switch appropriately between halves of the OTG state machine. 284 * switch appropriately between halves of the OTG state machine.
292 * Managing DEVCTL.SESSION per Mentor docs requires that we know its 285 * Managing DEVCTL.SESSION per Mentor docs requires that we know its
293 * value but DEVCTL.BDEVICE is invalid without DEVCTL.SESSION set. 286 * value but DEVCTL.BDEVICE is invalid without DEVCTL.SESSION set.
294 * Also, DRVVBUS pulses for SRP (but not at 5V) ... 287 * Also, DRVVBUS pulses for SRP (but not at 5V) ...
295 */ 288 */
296 if ((usbintr & MUSB_INTR_BABBLE) && is_host_enabled(musb)) 289 if (usbintr & MUSB_INTR_BABBLE)
297 pr_info("CAUTION: musb: Babble Interrupt Occured\n"); 290 pr_info("CAUTION: musb: Babble Interrupt Occured\n");
298 291
299 if (usbintr & ((1 << wrp->drvvbus) << wrp->usb_shift)) { 292 if (usbintr & ((1 << wrp->drvvbus) << wrp->usb_shift)) {
300 int drvvbus = dsps_readl(reg_base, wrp->status); 293 int drvvbus = dsps_readl(reg_base, wrp->status);
301 void __iomem *mregs = musb->mregs; 294 void __iomem *mregs = musb->mregs;
302 u8 devctl = dsps_readb(mregs, MUSB_DEVCTL); 295 u8 devctl = dsps_readb(mregs, MUSB_DEVCTL);
303 int err; 296 int err;
304 297
305 err = is_host_enabled(musb) && (musb->int_usb & 298 err = musb->int_usb & MUSB_INTR_VBUSERROR;
306 MUSB_INTR_VBUSERROR);
307 if (err) { 299 if (err) {
308 /* 300 /*
309 * The Mentor core doesn't debounce VBUS as needed 301 * The Mentor core doesn't debounce VBUS as needed
310 * to cope with device connect current spikes. This 302 * to cope with device connect current spikes. This
311 * means it's not uncommon for bus-powered devices 303 * means it's not uncommon for bus-powered devices
312 * to get VBUS errors during enumeration. 304 * to get VBUS errors during enumeration.
313 * 305 *
314 * This is a workaround, but newer RTL from Mentor 306 * This is a workaround, but newer RTL from Mentor
315 * seems to allow a better one: "re"-starting sessions 307 * seems to allow a better one: "re"-starting sessions
316 * without waiting for VBUS to stop registering in 308 * without waiting for VBUS to stop registering in
317 * devctl. 309 * devctl.
318 */ 310 */
319 musb->int_usb &= ~MUSB_INTR_VBUSERROR; 311 musb->int_usb &= ~MUSB_INTR_VBUSERROR;
320 musb->xceiv->state = OTG_STATE_A_WAIT_VFALL; 312 musb->xceiv->state = OTG_STATE_A_WAIT_VFALL;
321 mod_timer(&glue->timer, 313 mod_timer(&glue->timer,
322 jiffies + wrp->poll_seconds * HZ); 314 jiffies + wrp->poll_seconds * HZ);
323 WARNING("VBUS error workaround (delay coming)\n"); 315 WARNING("VBUS error workaround (delay coming)\n");
324 } else if (is_host_enabled(musb) && drvvbus) { 316 } else if (drvvbus) {
325 musb->is_active = 1; 317 musb->is_active = 1;
326 MUSB_HST_MODE(musb); 318 MUSB_HST_MODE(musb);
327 musb->xceiv->otg->default_a = 1; 319 musb->xceiv->otg->default_a = 1;
328 musb->xceiv->state = OTG_STATE_A_WAIT_VRISE; 320 musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
329 del_timer(&glue->timer); 321 del_timer(&glue->timer);
330 } else { 322 } else {
331 musb->is_active = 0; 323 musb->is_active = 0;
332 MUSB_DEV_MODE(musb); 324 MUSB_DEV_MODE(musb);
333 musb->xceiv->otg->default_a = 0; 325 musb->xceiv->otg->default_a = 0;
334 musb->xceiv->state = OTG_STATE_B_IDLE; 326 musb->xceiv->state = OTG_STATE_B_IDLE;
335 } 327 }
336 328
337 /* NOTE: this must complete power-on within 100 ms. */ 329 /* NOTE: this must complete power-on within 100 ms. */
338 dev_dbg(musb->controller, "VBUS %s (%s)%s, devctl %02x\n", 330 dev_dbg(musb->controller, "VBUS %s (%s)%s, devctl %02x\n",
339 drvvbus ? "on" : "off", 331 drvvbus ? "on" : "off",
340 otg_state_string(musb->xceiv->state), 332 otg_state_string(musb->xceiv->state),
341 err ? " ERROR" : "", 333 err ? " ERROR" : "",
342 devctl); 334 devctl);
343 ret = IRQ_HANDLED; 335 ret = IRQ_HANDLED;
344 } 336 }
345 337
346 if (musb->int_tx || musb->int_rx || musb->int_usb) 338 if (musb->int_tx || musb->int_rx || musb->int_usb)
347 ret |= musb_interrupt(musb); 339 ret |= musb_interrupt(musb);
348 340
349 eoi: 341 eoi:
350 /* EOI needs to be written for the IRQ to be re-asserted. */ 342 /* EOI needs to be written for the IRQ to be re-asserted. */
351 if (ret == IRQ_HANDLED || epintr || usbintr) 343 if (ret == IRQ_HANDLED || epintr || usbintr)
352 dsps_writel(reg_base, wrp->eoi, 1); 344 dsps_writel(reg_base, wrp->eoi, 1);
353 345
354 /* Poll for ID change */ 346 /* Poll for ID change */
355 if (is_otg_enabled(musb) && musb->xceiv->state == OTG_STATE_B_IDLE) 347 if (musb->xceiv->state == OTG_STATE_B_IDLE)
356 mod_timer(&glue->timer, jiffies + wrp->poll_seconds * HZ); 348 mod_timer(&glue->timer, jiffies + wrp->poll_seconds * HZ);
357 349
358 spin_unlock_irqrestore(&musb->lock, flags); 350 spin_unlock_irqrestore(&musb->lock, flags);
359 351
360 return ret; 352 return ret;
361 } 353 }
362 354
363 static int dsps_musb_init(struct musb *musb) 355 static int dsps_musb_init(struct musb *musb)
364 { 356 {
365 struct device *dev = musb->controller; 357 struct device *dev = musb->controller;
366 struct musb_hdrc_platform_data *plat = dev->platform_data; 358 struct musb_hdrc_platform_data *plat = dev->platform_data;
367 struct platform_device *pdev = to_platform_device(dev->parent); 359 struct platform_device *pdev = to_platform_device(dev->parent);
368 struct dsps_glue *glue = platform_get_drvdata(pdev); 360 struct dsps_glue *glue = platform_get_drvdata(pdev);
369 const struct dsps_musb_wrapper *wrp = glue->wrp; 361 const struct dsps_musb_wrapper *wrp = glue->wrp;
370 struct omap_musb_board_data *data = plat->board_data; 362 struct omap_musb_board_data *data = plat->board_data;
371 void __iomem *reg_base = musb->ctrl_base; 363 void __iomem *reg_base = musb->ctrl_base;
372 u32 rev, val; 364 u32 rev, val;
373 int status; 365 int status;
374 366
375 /* mentor core register starts at offset of 0x400 from musb base */ 367 /* mentor core register starts at offset of 0x400 from musb base */
376 musb->mregs += wrp->musb_core_offset; 368 musb->mregs += wrp->musb_core_offset;
377 369
378 /* NOP driver needs change if supporting dual instance */ 370 /* NOP driver needs change if supporting dual instance */
379 usb_nop_xceiv_register(); 371 usb_nop_xceiv_register();
380 musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2); 372 musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
381 if (IS_ERR_OR_NULL(musb->xceiv)) 373 if (IS_ERR_OR_NULL(musb->xceiv))
382 return -ENODEV; 374 return -ENODEV;
383 375
384 /* Returns zero if e.g. not clocked */ 376 /* Returns zero if e.g. not clocked */
385 rev = dsps_readl(reg_base, wrp->revision); 377 rev = dsps_readl(reg_base, wrp->revision);
386 if (!rev) { 378 if (!rev) {
387 status = -ENODEV; 379 status = -ENODEV;
388 goto err0; 380 goto err0;
389 } 381 }
390 382
391 if (is_host_enabled(musb)) 383 setup_timer(&glue->timer, otg_timer, (unsigned long) musb);
392 setup_timer(&glue->timer, otg_timer, (unsigned long) musb);
393 384
394 /* Reset the musb */ 385 /* Reset the musb */
395 dsps_writel(reg_base, wrp->control, (1 << wrp->reset)); 386 dsps_writel(reg_base, wrp->control, (1 << wrp->reset));
396 387
397 /* Start the on-chip PHY and its PLL. */ 388 /* Start the on-chip PHY and its PLL. */
398 if (data->set_phy_power) 389 if (data->set_phy_power)
399 data->set_phy_power(1); 390 data->set_phy_power(1);
400 391
401 musb->isr = dsps_interrupt; 392 musb->isr = dsps_interrupt;
402 393
403 /* reset the otgdisable bit, needed for host mode to work */ 394 /* reset the otgdisable bit, needed for host mode to work */
404 val = dsps_readl(reg_base, wrp->phy_utmi); 395 val = dsps_readl(reg_base, wrp->phy_utmi);
405 val &= ~(1 << wrp->otg_disable); 396 val &= ~(1 << wrp->otg_disable);
406 dsps_writel(musb->ctrl_base, wrp->phy_utmi, val); 397 dsps_writel(musb->ctrl_base, wrp->phy_utmi, val);
407 398
408 /* clear level interrupt */ 399 /* clear level interrupt */
409 dsps_writel(reg_base, wrp->eoi, 0); 400 dsps_writel(reg_base, wrp->eoi, 0);
410 401
411 return 0; 402 return 0;
412 err0: 403 err0:
413 usb_put_phy(musb->xceiv); 404 usb_put_phy(musb->xceiv);
414 usb_nop_xceiv_unregister(); 405 usb_nop_xceiv_unregister();
415 return status; 406 return status;
416 } 407 }
417 408
418 static int dsps_musb_exit(struct musb *musb) 409 static int dsps_musb_exit(struct musb *musb)
419 { 410 {
420 struct device *dev = musb->controller; 411 struct device *dev = musb->controller;
421 struct musb_hdrc_platform_data *plat = dev->platform_data; 412 struct musb_hdrc_platform_data *plat = dev->platform_data;
422 struct omap_musb_board_data *data = plat->board_data; 413 struct omap_musb_board_data *data = plat->board_data;
423 struct platform_device *pdev = to_platform_device(dev->parent); 414 struct platform_device *pdev = to_platform_device(dev->parent);
424 struct dsps_glue *glue = platform_get_drvdata(pdev); 415 struct dsps_glue *glue = platform_get_drvdata(pdev);
425 416
426 if (is_host_enabled(musb)) 417 del_timer_sync(&glue->timer);
427 del_timer_sync(&glue->timer);
428 418
429 /* Shutdown the on-chip PHY and its PLL. */ 419 /* Shutdown the on-chip PHY and its PLL. */
430 if (data->set_phy_power) 420 if (data->set_phy_power)
431 data->set_phy_power(0); 421 data->set_phy_power(0);
432 422
433 /* NOP driver needs change if supporting dual instance */ 423 /* NOP driver needs change if supporting dual instance */
434 usb_put_phy(musb->xceiv); 424 usb_put_phy(musb->xceiv);
435 usb_nop_xceiv_unregister(); 425 usb_nop_xceiv_unregister();
436 426
437 return 0; 427 return 0;
438 } 428 }
439 429
440 static struct musb_platform_ops dsps_ops = { 430 static struct musb_platform_ops dsps_ops = {
441 .init = dsps_musb_init, 431 .init = dsps_musb_init,
442 .exit = dsps_musb_exit, 432 .exit = dsps_musb_exit,
443 433
444 .enable = dsps_musb_enable, 434 .enable = dsps_musb_enable,
445 .disable = dsps_musb_disable, 435 .disable = dsps_musb_disable,
446 436
447 .try_idle = dsps_musb_try_idle, 437 .try_idle = dsps_musb_try_idle,
448 }; 438 };
449 439
450 static u64 musb_dmamask = DMA_BIT_MASK(32); 440 static u64 musb_dmamask = DMA_BIT_MASK(32);
451 441
452 static int __devinit dsps_create_musb_pdev(struct dsps_glue *glue, u8 id) 442 static int __devinit dsps_create_musb_pdev(struct dsps_glue *glue, u8 id)
453 { 443 {
454 struct device *dev = glue->dev; 444 struct device *dev = glue->dev;
455 struct platform_device *pdev = to_platform_device(dev); 445 struct platform_device *pdev = to_platform_device(dev);
456 struct musb_hdrc_platform_data *pdata = dev->platform_data; 446 struct musb_hdrc_platform_data *pdata = dev->platform_data;
457 struct platform_device *musb; 447 struct platform_device *musb;
458 struct resource *res; 448 struct resource *res;
459 struct resource resources[2]; 449 struct resource resources[2];
460 char res_name[10]; 450 char res_name[10];
461 int ret; 451 int ret;
462 452
463 /* get memory resource */ 453 /* get memory resource */
464 sprintf(res_name, "musb%d", id); 454 sprintf(res_name, "musb%d", id);
465 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name); 455 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name);
466 if (!res) { 456 if (!res) {
467 dev_err(dev, "%s get mem resource failed\n", res_name); 457 dev_err(dev, "%s get mem resource failed\n", res_name);
468 ret = -ENODEV; 458 ret = -ENODEV;
469 goto err0; 459 goto err0;
470 } 460 }
471 res->parent = NULL; 461 res->parent = NULL;
472 resources[0] = *res; 462 resources[0] = *res;
473 463
474 /* get irq resource */ 464 /* get irq resource */
475 sprintf(res_name, "musb%d-irq", id); 465 sprintf(res_name, "musb%d-irq", id);
476 res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res_name); 466 res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res_name);
477 if (!res) { 467 if (!res) {
478 dev_err(dev, "%s get irq resource failed\n", res_name); 468 dev_err(dev, "%s get irq resource failed\n", res_name);
479 ret = -ENODEV; 469 ret = -ENODEV;
480 goto err0; 470 goto err0;
481 } 471 }
482 strcpy((u8 *)res->name, "mc"); 472 strcpy((u8 *)res->name, "mc");
483 res->parent = NULL; 473 res->parent = NULL;
484 resources[1] = *res; 474 resources[1] = *res;
485 475
486 /* allocate the child platform device */ 476 /* allocate the child platform device */
487 musb = platform_device_alloc("musb-hdrc", -1); 477 musb = platform_device_alloc("musb-hdrc", -1);
488 if (!musb) { 478 if (!musb) {
489 dev_err(dev, "failed to allocate musb device\n"); 479 dev_err(dev, "failed to allocate musb device\n");
490 ret = -ENOMEM; 480 ret = -ENOMEM;
491 goto err0; 481 goto err0;
492 } 482 }
493 483
494 musb->dev.parent = dev; 484 musb->dev.parent = dev;
495 musb->dev.dma_mask = &musb_dmamask; 485 musb->dev.dma_mask = &musb_dmamask;
496 musb->dev.coherent_dma_mask = musb_dmamask; 486 musb->dev.coherent_dma_mask = musb_dmamask;
497 487
498 glue->musb = musb; 488 glue->musb = musb;
499 489
500 pdata->platform_ops = &dsps_ops; 490 pdata->platform_ops = &dsps_ops;
501 491
502 ret = platform_device_add_resources(musb, resources, 2); 492 ret = platform_device_add_resources(musb, resources, 2);
503 if (ret) { 493 if (ret) {
504 dev_err(dev, "failed to add resources\n"); 494 dev_err(dev, "failed to add resources\n");
505 goto err1; 495 goto err1;
506 } 496 }
507 497
508 ret = platform_device_add_data(musb, pdata, sizeof(*pdata)); 498 ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
509 if (ret) { 499 if (ret) {
510 dev_err(dev, "failed to add platform_data\n"); 500 dev_err(dev, "failed to add platform_data\n");
511 goto err1; 501 goto err1;
512 } 502 }
513 503
514 ret = platform_device_add(musb); 504 ret = platform_device_add(musb);
515 if (ret) { 505 if (ret) {
516 dev_err(dev, "failed to register musb device\n"); 506 dev_err(dev, "failed to register musb device\n");
517 goto err1; 507 goto err1;
518 } 508 }
519 509
520 return 0; 510 return 0;
521 511
522 err1: 512 err1:
523 platform_device_put(musb); 513 platform_device_put(musb);
524 err0: 514 err0:
525 return ret; 515 return ret;
526 } 516 }
527 517
528 static void __devexit dsps_delete_musb_pdev(struct dsps_glue *glue) 518 static void __devexit dsps_delete_musb_pdev(struct dsps_glue *glue)
529 { 519 {
530 platform_device_del(glue->musb); 520 platform_device_del(glue->musb);
531 platform_device_put(glue->musb); 521 platform_device_put(glue->musb);
532 } 522 }
533 523
534 static int __devinit dsps_probe(struct platform_device *pdev) 524 static int __devinit dsps_probe(struct platform_device *pdev)
535 { 525 {
536 const struct platform_device_id *id = platform_get_device_id(pdev); 526 const struct platform_device_id *id = platform_get_device_id(pdev);
537 const struct dsps_musb_wrapper *wrp = 527 const struct dsps_musb_wrapper *wrp =
538 (struct dsps_musb_wrapper *)id->driver_data; 528 (struct dsps_musb_wrapper *)id->driver_data;
539 struct dsps_glue *glue; 529 struct dsps_glue *glue;
540 struct resource *iomem; 530 struct resource *iomem;
541 int ret; 531 int ret;
542 532
543 /* allocate glue */ 533 /* allocate glue */
544 glue = kzalloc(sizeof(*glue), GFP_KERNEL); 534 glue = kzalloc(sizeof(*glue), GFP_KERNEL);
545 if (!glue) { 535 if (!glue) {
546 dev_err(&pdev->dev, "unable to allocate glue memory\n"); 536 dev_err(&pdev->dev, "unable to allocate glue memory\n");
547 ret = -ENOMEM; 537 ret = -ENOMEM;
548 goto err0; 538 goto err0;
549 } 539 }
550 540
551 /* get memory resource */ 541 /* get memory resource */
552 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 542 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
553 if (!iomem) { 543 if (!iomem) {
554 dev_err(&pdev->dev, "failed to get usbss mem resourse\n"); 544 dev_err(&pdev->dev, "failed to get usbss mem resourse\n");
555 ret = -ENODEV; 545 ret = -ENODEV;
556 goto err1; 546 goto err1;
557 } 547 }
558 548
559 glue->dev = &pdev->dev; 549 glue->dev = &pdev->dev;
560 550
561 glue->wrp = kmemdup(wrp, sizeof(*wrp), GFP_KERNEL); 551 glue->wrp = kmemdup(wrp, sizeof(*wrp), GFP_KERNEL);
562 if (!glue->wrp) { 552 if (!glue->wrp) {
563 dev_err(&pdev->dev, "failed to duplicate wrapper struct memory\n"); 553 dev_err(&pdev->dev, "failed to duplicate wrapper struct memory\n");
564 ret = -ENOMEM; 554 ret = -ENOMEM;
565 goto err1; 555 goto err1;
566 } 556 }
567 platform_set_drvdata(pdev, glue); 557 platform_set_drvdata(pdev, glue);
568 558
569 /* create the child platform device for first instances of musb */ 559 /* create the child platform device for first instances of musb */
570 ret = dsps_create_musb_pdev(glue, 0); 560 ret = dsps_create_musb_pdev(glue, 0);
571 if (ret != 0) { 561 if (ret != 0) {
572 dev_err(&pdev->dev, "failed to create child pdev\n"); 562 dev_err(&pdev->dev, "failed to create child pdev\n");
573 goto err2; 563 goto err2;
574 } 564 }
575 565
576 /* enable the usbss clocks */ 566 /* enable the usbss clocks */
577 pm_runtime_enable(&pdev->dev); 567 pm_runtime_enable(&pdev->dev);
578 568
579 ret = pm_runtime_get_sync(&pdev->dev); 569 ret = pm_runtime_get_sync(&pdev->dev);
580 if (ret < 0) { 570 if (ret < 0) {
581 dev_err(&pdev->dev, "pm_runtime_get_sync FAILED"); 571 dev_err(&pdev->dev, "pm_runtime_get_sync FAILED");
582 goto err3; 572 goto err3;
583 } 573 }
584 574
585 return 0; 575 return 0;
586 576
587 err3: 577 err3:
588 pm_runtime_disable(&pdev->dev); 578 pm_runtime_disable(&pdev->dev);
589 err2: 579 err2:
590 kfree(glue->wrp); 580 kfree(glue->wrp);
591 err1: 581 err1:
592 kfree(glue); 582 kfree(glue);
593 err0: 583 err0:
594 return ret; 584 return ret;
595 } 585 }
596 static int __devexit dsps_remove(struct platform_device *pdev) 586 static int __devexit dsps_remove(struct platform_device *pdev)
597 { 587 {
598 struct dsps_glue *glue = platform_get_drvdata(pdev); 588 struct dsps_glue *glue = platform_get_drvdata(pdev);
599 589
600 /* delete the child platform device */ 590 /* delete the child platform device */
601 dsps_delete_musb_pdev(glue); 591 dsps_delete_musb_pdev(glue);
602 592
603 /* disable usbss clocks */ 593 /* disable usbss clocks */
604 pm_runtime_put(&pdev->dev); 594 pm_runtime_put(&pdev->dev);
605 pm_runtime_disable(&pdev->dev); 595 pm_runtime_disable(&pdev->dev);
606 kfree(glue->wrp); 596 kfree(glue->wrp);
607 kfree(glue); 597 kfree(glue);
608 return 0; 598 return 0;
609 } 599 }
610 600
611 #ifdef CONFIG_PM_SLEEP 601 #ifdef CONFIG_PM_SLEEP
612 static int dsps_suspend(struct device *dev) 602 static int dsps_suspend(struct device *dev)
613 { 603 {
614 struct musb_hdrc_platform_data *plat = dev->platform_data; 604 struct musb_hdrc_platform_data *plat = dev->platform_data;
615 struct omap_musb_board_data *data = plat->board_data; 605 struct omap_musb_board_data *data = plat->board_data;
616 606
617 /* Shutdown the on-chip PHY and its PLL. */ 607 /* Shutdown the on-chip PHY and its PLL. */
618 if (data->set_phy_power) 608 if (data->set_phy_power)
619 data->set_phy_power(0); 609 data->set_phy_power(0);
620 610
621 return 0; 611 return 0;
622 } 612 }
623 613
624 static int dsps_resume(struct device *dev) 614 static int dsps_resume(struct device *dev)
625 { 615 {
626 struct musb_hdrc_platform_data *plat = dev->platform_data; 616 struct musb_hdrc_platform_data *plat = dev->platform_data;
627 struct omap_musb_board_data *data = plat->board_data; 617 struct omap_musb_board_data *data = plat->board_data;
628 618
629 /* Start the on-chip PHY and its PLL. */ 619 /* Start the on-chip PHY and its PLL. */
630 if (data->set_phy_power) 620 if (data->set_phy_power)
631 data->set_phy_power(1); 621 data->set_phy_power(1);
632 622
633 return 0; 623 return 0;
634 } 624 }
635 #endif 625 #endif
636 626
637 static SIMPLE_DEV_PM_OPS(dsps_pm_ops, dsps_suspend, dsps_resume); 627 static SIMPLE_DEV_PM_OPS(dsps_pm_ops, dsps_suspend, dsps_resume);
638 628
639 static const struct dsps_musb_wrapper ti81xx_driver_data __devinitconst = { 629 static const struct dsps_musb_wrapper ti81xx_driver_data __devinitconst = {
640 .revision = 0x00, 630 .revision = 0x00,
641 .control = 0x14, 631 .control = 0x14,
642 .status = 0x18, 632 .status = 0x18,
643 .eoi = 0x24, 633 .eoi = 0x24,
644 .epintr_set = 0x38, 634 .epintr_set = 0x38,
645 .epintr_clear = 0x40, 635 .epintr_clear = 0x40,
646 .epintr_status = 0x30, 636 .epintr_status = 0x30,
647 .coreintr_set = 0x3c, 637 .coreintr_set = 0x3c,
648 .coreintr_clear = 0x44, 638 .coreintr_clear = 0x44,
649 .coreintr_status = 0x34, 639 .coreintr_status = 0x34,
650 .phy_utmi = 0xe0, 640 .phy_utmi = 0xe0,
651 .mode = 0xe8, 641 .mode = 0xe8,
652 .reset = 0, 642 .reset = 0,
653 .otg_disable = 21, 643 .otg_disable = 21,
654 .iddig = 8, 644 .iddig = 8,
655 .usb_shift = 0, 645 .usb_shift = 0,
656 .usb_mask = 0x1ff, 646 .usb_mask = 0x1ff,
657 .usb_bitmap = (0x1ff << 0), 647 .usb_bitmap = (0x1ff << 0),
658 .drvvbus = 8, 648 .drvvbus = 8,
659 .txep_shift = 0, 649 .txep_shift = 0,
660 .txep_mask = 0xffff, 650 .txep_mask = 0xffff,
661 .txep_bitmap = (0xffff << 0), 651 .txep_bitmap = (0xffff << 0),
662 .rxep_shift = 16, 652 .rxep_shift = 16,
663 .rxep_mask = 0xfffe, 653 .rxep_mask = 0xfffe,
664 .rxep_bitmap = (0xfffe << 16), 654 .rxep_bitmap = (0xfffe << 16),
665 .musb_core_offset = 0x400, 655 .musb_core_offset = 0x400,
666 .poll_seconds = 2, 656 .poll_seconds = 2,
667 }; 657 };
668 658
669 static const struct platform_device_id musb_dsps_id_table[] __devinitconst = { 659 static const struct platform_device_id musb_dsps_id_table[] __devinitconst = {
670 { 660 {
671 .name = "musb-ti81xx", 661 .name = "musb-ti81xx",
672 .driver_data = (kernel_ulong_t) &ti81xx_driver_data, 662 .driver_data = (kernel_ulong_t) &ti81xx_driver_data,
673 }, 663 },
674 { }, /* Terminating Entry */ 664 { }, /* Terminating Entry */
675 }; 665 };
676 MODULE_DEVICE_TABLE(platform, musb_dsps_id_table); 666 MODULE_DEVICE_TABLE(platform, musb_dsps_id_table);
677 667
678 static const struct of_device_id musb_dsps_of_match[] __devinitconst = { 668 static const struct of_device_id musb_dsps_of_match[] __devinitconst = {
679 { .compatible = "musb-ti81xx", }, 669 { .compatible = "musb-ti81xx", },
680 { .compatible = "ti,ti81xx-musb", }, 670 { .compatible = "ti,ti81xx-musb", },
681 { .compatible = "ti,am335x-musb", }, 671 { .compatible = "ti,am335x-musb", },
682 { }, 672 { },
683 }; 673 };
684 MODULE_DEVICE_TABLE(of, musb_dsps_of_match); 674 MODULE_DEVICE_TABLE(of, musb_dsps_of_match);
685 675
686 static struct platform_driver dsps_usbss_driver = { 676 static struct platform_driver dsps_usbss_driver = {
687 .probe = dsps_probe, 677 .probe = dsps_probe,
688 .remove = __devexit_p(dsps_remove), 678 .remove = __devexit_p(dsps_remove),
689 .driver = { 679 .driver = {
690 .name = "musb-dsps", 680 .name = "musb-dsps",
691 .pm = &dsps_pm_ops, 681 .pm = &dsps_pm_ops,
692 .of_match_table = musb_dsps_of_match, 682 .of_match_table = musb_dsps_of_match,
693 }, 683 },
694 .id_table = musb_dsps_id_table, 684 .id_table = musb_dsps_id_table,
695 }; 685 };
696 686
697 MODULE_DESCRIPTION("TI DSPS MUSB Glue Layer"); 687 MODULE_DESCRIPTION("TI DSPS MUSB Glue Layer");
698 MODULE_AUTHOR("Ravi B <ravibabu@ti.com>"); 688 MODULE_AUTHOR("Ravi B <ravibabu@ti.com>");
699 MODULE_AUTHOR("Ajay Kumar Gupta <ajay.gupta@ti.com>"); 689 MODULE_AUTHOR("Ajay Kumar Gupta <ajay.gupta@ti.com>");
700 MODULE_LICENSE("GPL v2"); 690 MODULE_LICENSE("GPL v2");
701 691
702 static int __init dsps_init(void) 692 static int __init dsps_init(void)
703 { 693 {
704 return platform_driver_register(&dsps_usbss_driver); 694 return platform_driver_register(&dsps_usbss_driver);
705 } 695 }
706 subsys_initcall(dsps_init); 696 subsys_initcall(dsps_init);
707 697
708 static void __exit dsps_exit(void) 698 static void __exit dsps_exit(void)
709 { 699 {
710 platform_driver_unregister(&dsps_usbss_driver); 700 platform_driver_unregister(&dsps_usbss_driver);
711 } 701 }
712 module_exit(dsps_exit); 702 module_exit(dsps_exit);
713 703
drivers/usb/musb/musb_gadget.c
1 /* 1 /*
2 * MUSB OTG driver peripheral support 2 * MUSB OTG driver peripheral support
3 * 3 *
4 * Copyright 2005 Mentor Graphics Corporation 4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments 5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation 6 * Copyright (C) 2006-2007 Nokia Corporation
7 * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com> 7 * Copyright (C) 2009 MontaVista Software, Inc. <source@mvista.com>
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation. 11 * version 2 as published by the Free Software Foundation.
12 * 12 *
13 * This program is distributed in the hope that it will be useful, but 13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of 14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details. 16 * General Public License for more details.
17 * 17 *
18 * You should have received a copy of the GNU General Public License 18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software 19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
21 * 02110-1301 USA 21 * 02110-1301 USA
22 * 22 *
23 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED 23 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
24 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 24 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
25 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 25 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
26 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
29 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 29 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
30 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 * 33 *
34 */ 34 */
35 35
36 #include <linux/kernel.h> 36 #include <linux/kernel.h>
37 #include <linux/list.h> 37 #include <linux/list.h>
38 #include <linux/timer.h> 38 #include <linux/timer.h>
39 #include <linux/module.h> 39 #include <linux/module.h>
40 #include <linux/smp.h> 40 #include <linux/smp.h>
41 #include <linux/spinlock.h> 41 #include <linux/spinlock.h>
42 #include <linux/delay.h> 42 #include <linux/delay.h>
43 #include <linux/dma-mapping.h> 43 #include <linux/dma-mapping.h>
44 #include <linux/slab.h> 44 #include <linux/slab.h>
45 45
46 #include "musb_core.h" 46 #include "musb_core.h"
47 47
48 48
49 /* MUSB PERIPHERAL status 3-mar-2006: 49 /* MUSB PERIPHERAL status 3-mar-2006:
50 * 50 *
51 * - EP0 seems solid. It passes both USBCV and usbtest control cases. 51 * - EP0 seems solid. It passes both USBCV and usbtest control cases.
52 * Minor glitches: 52 * Minor glitches:
53 * 53 *
54 * + remote wakeup to Linux hosts work, but saw USBCV failures; 54 * + remote wakeup to Linux hosts work, but saw USBCV failures;
55 * in one test run (operator error?) 55 * in one test run (operator error?)
56 * + endpoint halt tests -- in both usbtest and usbcv -- seem 56 * + endpoint halt tests -- in both usbtest and usbcv -- seem
57 * to break when dma is enabled ... is something wrongly 57 * to break when dma is enabled ... is something wrongly
58 * clearing SENDSTALL? 58 * clearing SENDSTALL?
59 * 59 *
60 * - Mass storage behaved ok when last tested. Network traffic patterns 60 * - Mass storage behaved ok when last tested. Network traffic patterns
61 * (with lots of short transfers etc) need retesting; they turn up the 61 * (with lots of short transfers etc) need retesting; they turn up the
62 * worst cases of the DMA, since short packets are typical but are not 62 * worst cases of the DMA, since short packets are typical but are not
63 * required. 63 * required.
64 * 64 *
65 * - TX/IN 65 * - TX/IN
66 * + both pio and dma behave in with network and g_zero tests 66 * + both pio and dma behave in with network and g_zero tests
67 * + no cppi throughput issues other than no-hw-queueing 67 * + no cppi throughput issues other than no-hw-queueing
68 * + failed with FLAT_REG (DaVinci) 68 * + failed with FLAT_REG (DaVinci)
69 * + seems to behave with double buffering, PIO -and- CPPI 69 * + seems to behave with double buffering, PIO -and- CPPI
70 * + with gadgetfs + AIO, requests got lost? 70 * + with gadgetfs + AIO, requests got lost?
71 * 71 *
72 * - RX/OUT 72 * - RX/OUT
73 * + both pio and dma behave in with network and g_zero tests 73 * + both pio and dma behave in with network and g_zero tests
74 * + dma is slow in typical case (short_not_ok is clear) 74 * + dma is slow in typical case (short_not_ok is clear)
75 * + double buffering ok with PIO 75 * + double buffering ok with PIO
76 * + double buffering *FAILS* with CPPI, wrong data bytes sometimes 76 * + double buffering *FAILS* with CPPI, wrong data bytes sometimes
77 * + request lossage observed with gadgetfs 77 * + request lossage observed with gadgetfs
78 * 78 *
79 * - ISO not tested ... might work, but only weakly isochronous 79 * - ISO not tested ... might work, but only weakly isochronous
80 * 80 *
81 * - Gadget driver disabling of softconnect during bind() is ignored; so 81 * - Gadget driver disabling of softconnect during bind() is ignored; so
82 * drivers can't hold off host requests until userspace is ready. 82 * drivers can't hold off host requests until userspace is ready.
83 * (Workaround: they can turn it off later.) 83 * (Workaround: they can turn it off later.)
84 * 84 *
85 * - PORTABILITY (assumes PIO works): 85 * - PORTABILITY (assumes PIO works):
86 * + DaVinci, basically works with cppi dma 86 * + DaVinci, basically works with cppi dma
87 * + OMAP 2430, ditto with mentor dma 87 * + OMAP 2430, ditto with mentor dma
88 * + TUSB 6010, platform-specific dma in the works 88 * + TUSB 6010, platform-specific dma in the works
89 */ 89 */
90 90
91 /* ----------------------------------------------------------------------- */ 91 /* ----------------------------------------------------------------------- */
92 92
93 #define is_buffer_mapped(req) (is_dma_capable() && \ 93 #define is_buffer_mapped(req) (is_dma_capable() && \
94 (req->map_state != UN_MAPPED)) 94 (req->map_state != UN_MAPPED))
95 95
96 /* Maps the buffer to dma */ 96 /* Maps the buffer to dma */
97 97
98 static inline void map_dma_buffer(struct musb_request *request, 98 static inline void map_dma_buffer(struct musb_request *request,
99 struct musb *musb, struct musb_ep *musb_ep) 99 struct musb *musb, struct musb_ep *musb_ep)
100 { 100 {
101 int compatible = true; 101 int compatible = true;
102 struct dma_controller *dma = musb->dma_controller; 102 struct dma_controller *dma = musb->dma_controller;
103 103
104 request->map_state = UN_MAPPED; 104 request->map_state = UN_MAPPED;
105 105
106 if (!is_dma_capable() || !musb_ep->dma) 106 if (!is_dma_capable() || !musb_ep->dma)
107 return; 107 return;
108 108
109 /* Check if DMA engine can handle this request. 109 /* Check if DMA engine can handle this request.
110 * DMA code must reject the USB request explicitly. 110 * DMA code must reject the USB request explicitly.
111 * Default behaviour is to map the request. 111 * Default behaviour is to map the request.
112 */ 112 */
113 if (dma->is_compatible) 113 if (dma->is_compatible)
114 compatible = dma->is_compatible(musb_ep->dma, 114 compatible = dma->is_compatible(musb_ep->dma,
115 musb_ep->packet_sz, request->request.buf, 115 musb_ep->packet_sz, request->request.buf,
116 request->request.length); 116 request->request.length);
117 if (!compatible) 117 if (!compatible)
118 return; 118 return;
119 119
120 if (request->request.dma == DMA_ADDR_INVALID) { 120 if (request->request.dma == DMA_ADDR_INVALID) {
121 request->request.dma = dma_map_single( 121 request->request.dma = dma_map_single(
122 musb->controller, 122 musb->controller,
123 request->request.buf, 123 request->request.buf,
124 request->request.length, 124 request->request.length,
125 request->tx 125 request->tx
126 ? DMA_TO_DEVICE 126 ? DMA_TO_DEVICE
127 : DMA_FROM_DEVICE); 127 : DMA_FROM_DEVICE);
128 request->map_state = MUSB_MAPPED; 128 request->map_state = MUSB_MAPPED;
129 } else { 129 } else {
130 dma_sync_single_for_device(musb->controller, 130 dma_sync_single_for_device(musb->controller,
131 request->request.dma, 131 request->request.dma,
132 request->request.length, 132 request->request.length,
133 request->tx 133 request->tx
134 ? DMA_TO_DEVICE 134 ? DMA_TO_DEVICE
135 : DMA_FROM_DEVICE); 135 : DMA_FROM_DEVICE);
136 request->map_state = PRE_MAPPED; 136 request->map_state = PRE_MAPPED;
137 } 137 }
138 } 138 }
139 139
140 /* Unmap the buffer from dma and maps it back to cpu */ 140 /* Unmap the buffer from dma and maps it back to cpu */
141 static inline void unmap_dma_buffer(struct musb_request *request, 141 static inline void unmap_dma_buffer(struct musb_request *request,
142 struct musb *musb) 142 struct musb *musb)
143 { 143 {
144 if (!is_buffer_mapped(request)) 144 if (!is_buffer_mapped(request))
145 return; 145 return;
146 146
147 if (request->request.dma == DMA_ADDR_INVALID) { 147 if (request->request.dma == DMA_ADDR_INVALID) {
148 dev_vdbg(musb->controller, 148 dev_vdbg(musb->controller,
149 "not unmapping a never mapped buffer\n"); 149 "not unmapping a never mapped buffer\n");
150 return; 150 return;
151 } 151 }
152 if (request->map_state == MUSB_MAPPED) { 152 if (request->map_state == MUSB_MAPPED) {
153 dma_unmap_single(musb->controller, 153 dma_unmap_single(musb->controller,
154 request->request.dma, 154 request->request.dma,
155 request->request.length, 155 request->request.length,
156 request->tx 156 request->tx
157 ? DMA_TO_DEVICE 157 ? DMA_TO_DEVICE
158 : DMA_FROM_DEVICE); 158 : DMA_FROM_DEVICE);
159 request->request.dma = DMA_ADDR_INVALID; 159 request->request.dma = DMA_ADDR_INVALID;
160 } else { /* PRE_MAPPED */ 160 } else { /* PRE_MAPPED */
161 dma_sync_single_for_cpu(musb->controller, 161 dma_sync_single_for_cpu(musb->controller,
162 request->request.dma, 162 request->request.dma,
163 request->request.length, 163 request->request.length,
164 request->tx 164 request->tx
165 ? DMA_TO_DEVICE 165 ? DMA_TO_DEVICE
166 : DMA_FROM_DEVICE); 166 : DMA_FROM_DEVICE);
167 } 167 }
168 request->map_state = UN_MAPPED; 168 request->map_state = UN_MAPPED;
169 } 169 }
170 170
171 /* 171 /*
172 * Immediately complete a request. 172 * Immediately complete a request.
173 * 173 *
174 * @param request the request to complete 174 * @param request the request to complete
175 * @param status the status to complete the request with 175 * @param status the status to complete the request with
176 * Context: controller locked, IRQs blocked. 176 * Context: controller locked, IRQs blocked.
177 */ 177 */
178 void musb_g_giveback( 178 void musb_g_giveback(
179 struct musb_ep *ep, 179 struct musb_ep *ep,
180 struct usb_request *request, 180 struct usb_request *request,
181 int status) 181 int status)
182 __releases(ep->musb->lock) 182 __releases(ep->musb->lock)
183 __acquires(ep->musb->lock) 183 __acquires(ep->musb->lock)
184 { 184 {
185 struct musb_request *req; 185 struct musb_request *req;
186 struct musb *musb; 186 struct musb *musb;
187 int busy = ep->busy; 187 int busy = ep->busy;
188 188
189 req = to_musb_request(request); 189 req = to_musb_request(request);
190 190
191 list_del(&req->list); 191 list_del(&req->list);
192 if (req->request.status == -EINPROGRESS) 192 if (req->request.status == -EINPROGRESS)
193 req->request.status = status; 193 req->request.status = status;
194 musb = req->musb; 194 musb = req->musb;
195 195
196 ep->busy = 1; 196 ep->busy = 1;
197 spin_unlock(&musb->lock); 197 spin_unlock(&musb->lock);
198 unmap_dma_buffer(req, musb); 198 unmap_dma_buffer(req, musb);
199 if (request->status == 0) 199 if (request->status == 0)
200 dev_dbg(musb->controller, "%s done request %p, %d/%d\n", 200 dev_dbg(musb->controller, "%s done request %p, %d/%d\n",
201 ep->end_point.name, request, 201 ep->end_point.name, request,
202 req->request.actual, req->request.length); 202 req->request.actual, req->request.length);
203 else 203 else
204 dev_dbg(musb->controller, "%s request %p, %d/%d fault %d\n", 204 dev_dbg(musb->controller, "%s request %p, %d/%d fault %d\n",
205 ep->end_point.name, request, 205 ep->end_point.name, request,
206 req->request.actual, req->request.length, 206 req->request.actual, req->request.length,
207 request->status); 207 request->status);
208 req->request.complete(&req->ep->end_point, &req->request); 208 req->request.complete(&req->ep->end_point, &req->request);
209 spin_lock(&musb->lock); 209 spin_lock(&musb->lock);
210 ep->busy = busy; 210 ep->busy = busy;
211 } 211 }
212 212
213 /* ----------------------------------------------------------------------- */ 213 /* ----------------------------------------------------------------------- */
214 214
215 /* 215 /*
216 * Abort requests queued to an endpoint using the status. Synchronous. 216 * Abort requests queued to an endpoint using the status. Synchronous.
217 * caller locked controller and blocked irqs, and selected this ep. 217 * caller locked controller and blocked irqs, and selected this ep.
218 */ 218 */
219 static void nuke(struct musb_ep *ep, const int status) 219 static void nuke(struct musb_ep *ep, const int status)
220 { 220 {
221 struct musb *musb = ep->musb; 221 struct musb *musb = ep->musb;
222 struct musb_request *req = NULL; 222 struct musb_request *req = NULL;
223 void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs; 223 void __iomem *epio = ep->musb->endpoints[ep->current_epnum].regs;
224 224
225 ep->busy = 1; 225 ep->busy = 1;
226 226
227 if (is_dma_capable() && ep->dma) { 227 if (is_dma_capable() && ep->dma) {
228 struct dma_controller *c = ep->musb->dma_controller; 228 struct dma_controller *c = ep->musb->dma_controller;
229 int value; 229 int value;
230 230
231 if (ep->is_in) { 231 if (ep->is_in) {
232 /* 232 /*
233 * The programming guide says that we must not clear 233 * The programming guide says that we must not clear
234 * the DMAMODE bit before DMAENAB, so we only 234 * the DMAMODE bit before DMAENAB, so we only
235 * clear it in the second write... 235 * clear it in the second write...
236 */ 236 */
237 musb_writew(epio, MUSB_TXCSR, 237 musb_writew(epio, MUSB_TXCSR,
238 MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO); 238 MUSB_TXCSR_DMAMODE | MUSB_TXCSR_FLUSHFIFO);
239 musb_writew(epio, MUSB_TXCSR, 239 musb_writew(epio, MUSB_TXCSR,
240 0 | MUSB_TXCSR_FLUSHFIFO); 240 0 | MUSB_TXCSR_FLUSHFIFO);
241 } else { 241 } else {
242 musb_writew(epio, MUSB_RXCSR, 242 musb_writew(epio, MUSB_RXCSR,
243 0 | MUSB_RXCSR_FLUSHFIFO); 243 0 | MUSB_RXCSR_FLUSHFIFO);
244 musb_writew(epio, MUSB_RXCSR, 244 musb_writew(epio, MUSB_RXCSR,
245 0 | MUSB_RXCSR_FLUSHFIFO); 245 0 | MUSB_RXCSR_FLUSHFIFO);
246 } 246 }
247 247
248 value = c->channel_abort(ep->dma); 248 value = c->channel_abort(ep->dma);
249 dev_dbg(musb->controller, "%s: abort DMA --> %d\n", 249 dev_dbg(musb->controller, "%s: abort DMA --> %d\n",
250 ep->name, value); 250 ep->name, value);
251 c->channel_release(ep->dma); 251 c->channel_release(ep->dma);
252 ep->dma = NULL; 252 ep->dma = NULL;
253 } 253 }
254 254
255 while (!list_empty(&ep->req_list)) { 255 while (!list_empty(&ep->req_list)) {
256 req = list_first_entry(&ep->req_list, struct musb_request, list); 256 req = list_first_entry(&ep->req_list, struct musb_request, list);
257 musb_g_giveback(ep, &req->request, status); 257 musb_g_giveback(ep, &req->request, status);
258 } 258 }
259 } 259 }
260 260
261 /* ----------------------------------------------------------------------- */ 261 /* ----------------------------------------------------------------------- */
262 262
263 /* Data transfers - pure PIO, pure DMA, or mixed mode */ 263 /* Data transfers - pure PIO, pure DMA, or mixed mode */
264 264
265 /* 265 /*
266 * This assumes the separate CPPI engine is responding to DMA requests 266 * This assumes the separate CPPI engine is responding to DMA requests
267 * from the usb core ... sequenced a bit differently from mentor dma. 267 * from the usb core ... sequenced a bit differently from mentor dma.
268 */ 268 */
269 269
270 static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep) 270 static inline int max_ep_writesize(struct musb *musb, struct musb_ep *ep)
271 { 271 {
272 if (can_bulk_split(musb, ep->type)) 272 if (can_bulk_split(musb, ep->type))
273 return ep->hw_ep->max_packet_sz_tx; 273 return ep->hw_ep->max_packet_sz_tx;
274 else 274 else
275 return ep->packet_sz; 275 return ep->packet_sz;
276 } 276 }
277 277
278 278
279 #ifdef CONFIG_USB_INVENTRA_DMA 279 #ifdef CONFIG_USB_INVENTRA_DMA
280 280
281 /* Peripheral tx (IN) using Mentor DMA works as follows: 281 /* Peripheral tx (IN) using Mentor DMA works as follows:
282 Only mode 0 is used for transfers <= wPktSize, 282 Only mode 0 is used for transfers <= wPktSize,
283 mode 1 is used for larger transfers, 283 mode 1 is used for larger transfers,
284 284
285 One of the following happens: 285 One of the following happens:
286 - Host sends IN token which causes an endpoint interrupt 286 - Host sends IN token which causes an endpoint interrupt
287 -> TxAvail 287 -> TxAvail
288 -> if DMA is currently busy, exit. 288 -> if DMA is currently busy, exit.
289 -> if queue is non-empty, txstate(). 289 -> if queue is non-empty, txstate().
290 290
291 - Request is queued by the gadget driver. 291 - Request is queued by the gadget driver.
292 -> if queue was previously empty, txstate() 292 -> if queue was previously empty, txstate()
293 293
294 txstate() 294 txstate()
295 -> start 295 -> start
296 /\ -> setup DMA 296 /\ -> setup DMA
297 | (data is transferred to the FIFO, then sent out when 297 | (data is transferred to the FIFO, then sent out when
298 | IN token(s) are recd from Host. 298 | IN token(s) are recd from Host.
299 | -> DMA interrupt on completion 299 | -> DMA interrupt on completion
300 | calls TxAvail. 300 | calls TxAvail.
301 | -> stop DMA, ~DMAENAB, 301 | -> stop DMA, ~DMAENAB,
302 | -> set TxPktRdy for last short pkt or zlp 302 | -> set TxPktRdy for last short pkt or zlp
303 | -> Complete Request 303 | -> Complete Request
304 | -> Continue next request (call txstate) 304 | -> Continue next request (call txstate)
305 |___________________________________| 305 |___________________________________|
306 306
307 * Non-Mentor DMA engines can of course work differently, such as by 307 * Non-Mentor DMA engines can of course work differently, such as by
308 * upleveling from irq-per-packet to irq-per-buffer. 308 * upleveling from irq-per-packet to irq-per-buffer.
309 */ 309 */
310 310
311 #endif 311 #endif
312 312
313 /* 313 /*
314 * An endpoint is transmitting data. This can be called either from 314 * An endpoint is transmitting data. This can be called either from
315 * the IRQ routine or from ep.queue() to kickstart a request on an 315 * the IRQ routine or from ep.queue() to kickstart a request on an
316 * endpoint. 316 * endpoint.
317 * 317 *
318 * Context: controller locked, IRQs blocked, endpoint selected 318 * Context: controller locked, IRQs blocked, endpoint selected
319 */ 319 */
320 static void txstate(struct musb *musb, struct musb_request *req) 320 static void txstate(struct musb *musb, struct musb_request *req)
321 { 321 {
322 u8 epnum = req->epnum; 322 u8 epnum = req->epnum;
323 struct musb_ep *musb_ep; 323 struct musb_ep *musb_ep;
324 void __iomem *epio = musb->endpoints[epnum].regs; 324 void __iomem *epio = musb->endpoints[epnum].regs;
325 struct usb_request *request; 325 struct usb_request *request;
326 u16 fifo_count = 0, csr; 326 u16 fifo_count = 0, csr;
327 int use_dma = 0; 327 int use_dma = 0;
328 328
329 musb_ep = req->ep; 329 musb_ep = req->ep;
330 330
331 /* Check if EP is disabled */ 331 /* Check if EP is disabled */
332 if (!musb_ep->desc) { 332 if (!musb_ep->desc) {
333 dev_dbg(musb->controller, "ep:%s disabled - ignore request\n", 333 dev_dbg(musb->controller, "ep:%s disabled - ignore request\n",
334 musb_ep->end_point.name); 334 musb_ep->end_point.name);
335 return; 335 return;
336 } 336 }
337 337
338 /* we shouldn't get here while DMA is active ... but we do ... */ 338 /* we shouldn't get here while DMA is active ... but we do ... */
339 if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { 339 if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
340 dev_dbg(musb->controller, "dma pending...\n"); 340 dev_dbg(musb->controller, "dma pending...\n");
341 return; 341 return;
342 } 342 }
343 343
344 /* read TXCSR before */ 344 /* read TXCSR before */
345 csr = musb_readw(epio, MUSB_TXCSR); 345 csr = musb_readw(epio, MUSB_TXCSR);
346 346
347 request = &req->request; 347 request = &req->request;
348 fifo_count = min(max_ep_writesize(musb, musb_ep), 348 fifo_count = min(max_ep_writesize(musb, musb_ep),
349 (int)(request->length - request->actual)); 349 (int)(request->length - request->actual));
350 350
351 if (csr & MUSB_TXCSR_TXPKTRDY) { 351 if (csr & MUSB_TXCSR_TXPKTRDY) {
352 dev_dbg(musb->controller, "%s old packet still ready , txcsr %03x\n", 352 dev_dbg(musb->controller, "%s old packet still ready , txcsr %03x\n",
353 musb_ep->end_point.name, csr); 353 musb_ep->end_point.name, csr);
354 return; 354 return;
355 } 355 }
356 356
357 if (csr & MUSB_TXCSR_P_SENDSTALL) { 357 if (csr & MUSB_TXCSR_P_SENDSTALL) {
358 dev_dbg(musb->controller, "%s stalling, txcsr %03x\n", 358 dev_dbg(musb->controller, "%s stalling, txcsr %03x\n",
359 musb_ep->end_point.name, csr); 359 musb_ep->end_point.name, csr);
360 return; 360 return;
361 } 361 }
362 362
363 dev_dbg(musb->controller, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n", 363 dev_dbg(musb->controller, "hw_ep%d, maxpacket %d, fifo count %d, txcsr %03x\n",
364 epnum, musb_ep->packet_sz, fifo_count, 364 epnum, musb_ep->packet_sz, fifo_count,
365 csr); 365 csr);
366 366
367 #ifndef CONFIG_MUSB_PIO_ONLY 367 #ifndef CONFIG_MUSB_PIO_ONLY
368 if (is_buffer_mapped(req)) { 368 if (is_buffer_mapped(req)) {
369 struct dma_controller *c = musb->dma_controller; 369 struct dma_controller *c = musb->dma_controller;
370 size_t request_size; 370 size_t request_size;
371 371
372 /* setup DMA, then program endpoint CSR */ 372 /* setup DMA, then program endpoint CSR */
373 request_size = min_t(size_t, request->length - request->actual, 373 request_size = min_t(size_t, request->length - request->actual,
374 musb_ep->dma->max_len); 374 musb_ep->dma->max_len);
375 375
376 use_dma = (request->dma != DMA_ADDR_INVALID && request_size); 376 use_dma = (request->dma != DMA_ADDR_INVALID && request_size);
377 377
378 /* MUSB_TXCSR_P_ISO is still set correctly */ 378 /* MUSB_TXCSR_P_ISO is still set correctly */
379 379
380 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) 380 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
381 { 381 {
382 if (request_size < musb_ep->packet_sz) 382 if (request_size < musb_ep->packet_sz)
383 musb_ep->dma->desired_mode = 0; 383 musb_ep->dma->desired_mode = 0;
384 else 384 else
385 musb_ep->dma->desired_mode = 1; 385 musb_ep->dma->desired_mode = 1;
386 386
387 use_dma = use_dma && c->channel_program( 387 use_dma = use_dma && c->channel_program(
388 musb_ep->dma, musb_ep->packet_sz, 388 musb_ep->dma, musb_ep->packet_sz,
389 musb_ep->dma->desired_mode, 389 musb_ep->dma->desired_mode,
390 request->dma + request->actual, request_size); 390 request->dma + request->actual, request_size);
391 if (use_dma) { 391 if (use_dma) {
392 if (musb_ep->dma->desired_mode == 0) { 392 if (musb_ep->dma->desired_mode == 0) {
393 /* 393 /*
394 * We must not clear the DMAMODE bit 394 * We must not clear the DMAMODE bit
395 * before the DMAENAB bit -- and the 395 * before the DMAENAB bit -- and the
396 * latter doesn't always get cleared 396 * latter doesn't always get cleared
397 * before we get here... 397 * before we get here...
398 */ 398 */
399 csr &= ~(MUSB_TXCSR_AUTOSET 399 csr &= ~(MUSB_TXCSR_AUTOSET
400 | MUSB_TXCSR_DMAENAB); 400 | MUSB_TXCSR_DMAENAB);
401 musb_writew(epio, MUSB_TXCSR, csr 401 musb_writew(epio, MUSB_TXCSR, csr
402 | MUSB_TXCSR_P_WZC_BITS); 402 | MUSB_TXCSR_P_WZC_BITS);
403 csr &= ~MUSB_TXCSR_DMAMODE; 403 csr &= ~MUSB_TXCSR_DMAMODE;
404 csr |= (MUSB_TXCSR_DMAENAB | 404 csr |= (MUSB_TXCSR_DMAENAB |
405 MUSB_TXCSR_MODE); 405 MUSB_TXCSR_MODE);
406 /* against programming guide */ 406 /* against programming guide */
407 } else { 407 } else {
408 csr |= (MUSB_TXCSR_DMAENAB 408 csr |= (MUSB_TXCSR_DMAENAB
409 | MUSB_TXCSR_DMAMODE 409 | MUSB_TXCSR_DMAMODE
410 | MUSB_TXCSR_MODE); 410 | MUSB_TXCSR_MODE);
411 if (!musb_ep->hb_mult) 411 if (!musb_ep->hb_mult)
412 csr |= MUSB_TXCSR_AUTOSET; 412 csr |= MUSB_TXCSR_AUTOSET;
413 } 413 }
414 csr &= ~MUSB_TXCSR_P_UNDERRUN; 414 csr &= ~MUSB_TXCSR_P_UNDERRUN;
415 415
416 musb_writew(epio, MUSB_TXCSR, csr); 416 musb_writew(epio, MUSB_TXCSR, csr);
417 } 417 }
418 } 418 }
419 419
420 #elif defined(CONFIG_USB_TI_CPPI_DMA) 420 #elif defined(CONFIG_USB_TI_CPPI_DMA)
421 /* program endpoint CSR first, then setup DMA */ 421 /* program endpoint CSR first, then setup DMA */
422 csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY); 422 csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
423 csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE | 423 csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE |
424 MUSB_TXCSR_MODE; 424 MUSB_TXCSR_MODE;
425 musb_writew(epio, MUSB_TXCSR, 425 musb_writew(epio, MUSB_TXCSR,
426 (MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN) 426 (MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN)
427 | csr); 427 | csr);
428 428
429 /* ensure writebuffer is empty */ 429 /* ensure writebuffer is empty */
430 csr = musb_readw(epio, MUSB_TXCSR); 430 csr = musb_readw(epio, MUSB_TXCSR);
431 431
432 /* NOTE host side sets DMAENAB later than this; both are 432 /* NOTE host side sets DMAENAB later than this; both are
433 * OK since the transfer dma glue (between CPPI and Mentor 433 * OK since the transfer dma glue (between CPPI and Mentor
434 * fifos) just tells CPPI it could start. Data only moves 434 * fifos) just tells CPPI it could start. Data only moves
435 * to the USB TX fifo when both fifos are ready. 435 * to the USB TX fifo when both fifos are ready.
436 */ 436 */
437 437
438 /* "mode" is irrelevant here; handle terminating ZLPs like 438 /* "mode" is irrelevant here; handle terminating ZLPs like
439 * PIO does, since the hardware RNDIS mode seems unreliable 439 * PIO does, since the hardware RNDIS mode seems unreliable
440 * except for the last-packet-is-already-short case. 440 * except for the last-packet-is-already-short case.
441 */ 441 */
442 use_dma = use_dma && c->channel_program( 442 use_dma = use_dma && c->channel_program(
443 musb_ep->dma, musb_ep->packet_sz, 443 musb_ep->dma, musb_ep->packet_sz,
444 0, 444 0,
445 request->dma + request->actual, 445 request->dma + request->actual,
446 request_size); 446 request_size);
447 if (!use_dma) { 447 if (!use_dma) {
448 c->channel_release(musb_ep->dma); 448 c->channel_release(musb_ep->dma);
449 musb_ep->dma = NULL; 449 musb_ep->dma = NULL;
450 csr &= ~MUSB_TXCSR_DMAENAB; 450 csr &= ~MUSB_TXCSR_DMAENAB;
451 musb_writew(epio, MUSB_TXCSR, csr); 451 musb_writew(epio, MUSB_TXCSR, csr);
452 /* invariant: prequest->buf is non-null */ 452 /* invariant: prequest->buf is non-null */
453 } 453 }
454 #elif defined(CONFIG_USB_TUSB_OMAP_DMA) 454 #elif defined(CONFIG_USB_TUSB_OMAP_DMA)
455 use_dma = use_dma && c->channel_program( 455 use_dma = use_dma && c->channel_program(
456 musb_ep->dma, musb_ep->packet_sz, 456 musb_ep->dma, musb_ep->packet_sz,
457 request->zero, 457 request->zero,
458 request->dma + request->actual, 458 request->dma + request->actual,
459 request_size); 459 request_size);
460 #endif 460 #endif
461 } 461 }
462 #endif 462 #endif
463 463
464 if (!use_dma) { 464 if (!use_dma) {
465 /* 465 /*
466 * Unmap the dma buffer back to cpu if dma channel 466 * Unmap the dma buffer back to cpu if dma channel
467 * programming fails 467 * programming fails
468 */ 468 */
469 unmap_dma_buffer(req, musb); 469 unmap_dma_buffer(req, musb);
470 470
471 musb_write_fifo(musb_ep->hw_ep, fifo_count, 471 musb_write_fifo(musb_ep->hw_ep, fifo_count,
472 (u8 *) (request->buf + request->actual)); 472 (u8 *) (request->buf + request->actual));
473 request->actual += fifo_count; 473 request->actual += fifo_count;
474 csr |= MUSB_TXCSR_TXPKTRDY; 474 csr |= MUSB_TXCSR_TXPKTRDY;
475 csr &= ~MUSB_TXCSR_P_UNDERRUN; 475 csr &= ~MUSB_TXCSR_P_UNDERRUN;
476 musb_writew(epio, MUSB_TXCSR, csr); 476 musb_writew(epio, MUSB_TXCSR, csr);
477 } 477 }
478 478
479 /* host may already have the data when this message shows... */ 479 /* host may already have the data when this message shows... */
480 dev_dbg(musb->controller, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n", 480 dev_dbg(musb->controller, "%s TX/IN %s len %d/%d, txcsr %04x, fifo %d/%d\n",
481 musb_ep->end_point.name, use_dma ? "dma" : "pio", 481 musb_ep->end_point.name, use_dma ? "dma" : "pio",
482 request->actual, request->length, 482 request->actual, request->length,
483 musb_readw(epio, MUSB_TXCSR), 483 musb_readw(epio, MUSB_TXCSR),
484 fifo_count, 484 fifo_count,
485 musb_readw(epio, MUSB_TXMAXP)); 485 musb_readw(epio, MUSB_TXMAXP));
486 } 486 }
487 487
488 /* 488 /*
489 * FIFO state update (e.g. data ready). 489 * FIFO state update (e.g. data ready).
490 * Called from IRQ, with controller locked. 490 * Called from IRQ, with controller locked.
491 */ 491 */
492 void musb_g_tx(struct musb *musb, u8 epnum) 492 void musb_g_tx(struct musb *musb, u8 epnum)
493 { 493 {
494 u16 csr; 494 u16 csr;
495 struct musb_request *req; 495 struct musb_request *req;
496 struct usb_request *request; 496 struct usb_request *request;
497 u8 __iomem *mbase = musb->mregs; 497 u8 __iomem *mbase = musb->mregs;
498 struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in; 498 struct musb_ep *musb_ep = &musb->endpoints[epnum].ep_in;
499 void __iomem *epio = musb->endpoints[epnum].regs; 499 void __iomem *epio = musb->endpoints[epnum].regs;
500 struct dma_channel *dma; 500 struct dma_channel *dma;
501 501
502 musb_ep_select(mbase, epnum); 502 musb_ep_select(mbase, epnum);
503 req = next_request(musb_ep); 503 req = next_request(musb_ep);
504 request = &req->request; 504 request = &req->request;
505 505
506 csr = musb_readw(epio, MUSB_TXCSR); 506 csr = musb_readw(epio, MUSB_TXCSR);
507 dev_dbg(musb->controller, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr); 507 dev_dbg(musb->controller, "<== %s, txcsr %04x\n", musb_ep->end_point.name, csr);
508 508
509 dma = is_dma_capable() ? musb_ep->dma : NULL; 509 dma = is_dma_capable() ? musb_ep->dma : NULL;
510 510
511 /* 511 /*
512 * REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX 512 * REVISIT: for high bandwidth, MUSB_TXCSR_P_INCOMPTX
513 * probably rates reporting as a host error. 513 * probably rates reporting as a host error.
514 */ 514 */
515 if (csr & MUSB_TXCSR_P_SENTSTALL) { 515 if (csr & MUSB_TXCSR_P_SENTSTALL) {
516 csr |= MUSB_TXCSR_P_WZC_BITS; 516 csr |= MUSB_TXCSR_P_WZC_BITS;
517 csr &= ~MUSB_TXCSR_P_SENTSTALL; 517 csr &= ~MUSB_TXCSR_P_SENTSTALL;
518 musb_writew(epio, MUSB_TXCSR, csr); 518 musb_writew(epio, MUSB_TXCSR, csr);
519 return; 519 return;
520 } 520 }
521 521
522 if (csr & MUSB_TXCSR_P_UNDERRUN) { 522 if (csr & MUSB_TXCSR_P_UNDERRUN) {
523 /* We NAKed, no big deal... little reason to care. */ 523 /* We NAKed, no big deal... little reason to care. */
524 csr |= MUSB_TXCSR_P_WZC_BITS; 524 csr |= MUSB_TXCSR_P_WZC_BITS;
525 csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY); 525 csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
526 musb_writew(epio, MUSB_TXCSR, csr); 526 musb_writew(epio, MUSB_TXCSR, csr);
527 dev_vdbg(musb->controller, "underrun on ep%d, req %p\n", 527 dev_vdbg(musb->controller, "underrun on ep%d, req %p\n",
528 epnum, request); 528 epnum, request);
529 } 529 }
530 530
531 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { 531 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
532 /* 532 /*
533 * SHOULD NOT HAPPEN... has with CPPI though, after 533 * SHOULD NOT HAPPEN... has with CPPI though, after
534 * changing SENDSTALL (and other cases); harmless? 534 * changing SENDSTALL (and other cases); harmless?
535 */ 535 */
536 dev_dbg(musb->controller, "%s dma still busy?\n", musb_ep->end_point.name); 536 dev_dbg(musb->controller, "%s dma still busy?\n", musb_ep->end_point.name);
537 return; 537 return;
538 } 538 }
539 539
540 if (request) { 540 if (request) {
541 u8 is_dma = 0; 541 u8 is_dma = 0;
542 542
543 if (dma && (csr & MUSB_TXCSR_DMAENAB)) { 543 if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
544 is_dma = 1; 544 is_dma = 1;
545 csr |= MUSB_TXCSR_P_WZC_BITS; 545 csr |= MUSB_TXCSR_P_WZC_BITS;
546 csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN | 546 csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
547 MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET); 547 MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET);
548 musb_writew(epio, MUSB_TXCSR, csr); 548 musb_writew(epio, MUSB_TXCSR, csr);
549 /* Ensure writebuffer is empty. */ 549 /* Ensure writebuffer is empty. */
550 csr = musb_readw(epio, MUSB_TXCSR); 550 csr = musb_readw(epio, MUSB_TXCSR);
551 request->actual += musb_ep->dma->actual_len; 551 request->actual += musb_ep->dma->actual_len;
552 dev_dbg(musb->controller, "TXCSR%d %04x, DMA off, len %zu, req %p\n", 552 dev_dbg(musb->controller, "TXCSR%d %04x, DMA off, len %zu, req %p\n",
553 epnum, csr, musb_ep->dma->actual_len, request); 553 epnum, csr, musb_ep->dma->actual_len, request);
554 } 554 }
555 555
556 /* 556 /*
557 * First, maybe a terminating short packet. Some DMA 557 * First, maybe a terminating short packet. Some DMA
558 * engines might handle this by themselves. 558 * engines might handle this by themselves.
559 */ 559 */
560 if ((request->zero && request->length 560 if ((request->zero && request->length
561 && (request->length % musb_ep->packet_sz == 0) 561 && (request->length % musb_ep->packet_sz == 0)
562 && (request->actual == request->length)) 562 && (request->actual == request->length))
563 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA) 563 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_UX500_DMA)
564 || (is_dma && (!dma->desired_mode || 564 || (is_dma && (!dma->desired_mode ||
565 (request->actual & 565 (request->actual &
566 (musb_ep->packet_sz - 1)))) 566 (musb_ep->packet_sz - 1))))
567 #endif 567 #endif
568 ) { 568 ) {
569 /* 569 /*
570 * On DMA completion, FIFO may not be 570 * On DMA completion, FIFO may not be
571 * available yet... 571 * available yet...
572 */ 572 */
573 if (csr & MUSB_TXCSR_TXPKTRDY) 573 if (csr & MUSB_TXCSR_TXPKTRDY)
574 return; 574 return;
575 575
576 dev_dbg(musb->controller, "sending zero pkt\n"); 576 dev_dbg(musb->controller, "sending zero pkt\n");
577 musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE 577 musb_writew(epio, MUSB_TXCSR, MUSB_TXCSR_MODE
578 | MUSB_TXCSR_TXPKTRDY); 578 | MUSB_TXCSR_TXPKTRDY);
579 request->zero = 0; 579 request->zero = 0;
580 } 580 }
581 581
582 if (request->actual == request->length) { 582 if (request->actual == request->length) {
583 musb_g_giveback(musb_ep, request, 0); 583 musb_g_giveback(musb_ep, request, 0);
584 /* 584 /*
585 * In the giveback function the MUSB lock is 585 * In the giveback function the MUSB lock is
586 * released and acquired after sometime. During 586 * released and acquired after sometime. During
587 * this time period the INDEX register could get 587 * this time period the INDEX register could get
588 * changed by the gadget_queue function especially 588 * changed by the gadget_queue function especially
589 * on SMP systems. Reselect the INDEX to be sure 589 * on SMP systems. Reselect the INDEX to be sure
590 * we are reading/modifying the right registers 590 * we are reading/modifying the right registers
591 */ 591 */
592 musb_ep_select(mbase, epnum); 592 musb_ep_select(mbase, epnum);
593 req = musb_ep->desc ? next_request(musb_ep) : NULL; 593 req = musb_ep->desc ? next_request(musb_ep) : NULL;
594 if (!req) { 594 if (!req) {
595 dev_dbg(musb->controller, "%s idle now\n", 595 dev_dbg(musb->controller, "%s idle now\n",
596 musb_ep->end_point.name); 596 musb_ep->end_point.name);
597 return; 597 return;
598 } 598 }
599 } 599 }
600 600
601 txstate(musb, req); 601 txstate(musb, req);
602 } 602 }
603 } 603 }
604 604
605 /* ------------------------------------------------------------ */ 605 /* ------------------------------------------------------------ */
606 606
607 #ifdef CONFIG_USB_INVENTRA_DMA 607 #ifdef CONFIG_USB_INVENTRA_DMA
608 608
609 /* Peripheral rx (OUT) using Mentor DMA works as follows: 609 /* Peripheral rx (OUT) using Mentor DMA works as follows:
610 - Only mode 0 is used. 610 - Only mode 0 is used.
611 611
612 - Request is queued by the gadget class driver. 612 - Request is queued by the gadget class driver.
613 -> if queue was previously empty, rxstate() 613 -> if queue was previously empty, rxstate()
614 614
615 - Host sends OUT token which causes an endpoint interrupt 615 - Host sends OUT token which causes an endpoint interrupt
616 /\ -> RxReady 616 /\ -> RxReady
617 | -> if request queued, call rxstate 617 | -> if request queued, call rxstate
618 | /\ -> setup DMA 618 | /\ -> setup DMA
619 | | -> DMA interrupt on completion 619 | | -> DMA interrupt on completion
620 | | -> RxReady 620 | | -> RxReady
621 | | -> stop DMA 621 | | -> stop DMA
622 | | -> ack the read 622 | | -> ack the read
623 | | -> if data recd = max expected 623 | | -> if data recd = max expected
624 | | by the request, or host 624 | | by the request, or host
625 | | sent a short packet, 625 | | sent a short packet,
626 | | complete the request, 626 | | complete the request,
627 | | and start the next one. 627 | | and start the next one.
628 | |_____________________________________| 628 | |_____________________________________|
629 | else just wait for the host 629 | else just wait for the host
630 | to send the next OUT token. 630 | to send the next OUT token.
631 |__________________________________________________| 631 |__________________________________________________|
632 632
633 * Non-Mentor DMA engines can of course work differently. 633 * Non-Mentor DMA engines can of course work differently.
634 */ 634 */
635 635
636 #endif 636 #endif
637 637
638 /* 638 /*
639 * Context: controller locked, IRQs blocked, endpoint selected 639 * Context: controller locked, IRQs blocked, endpoint selected
640 */ 640 */
641 static void rxstate(struct musb *musb, struct musb_request *req) 641 static void rxstate(struct musb *musb, struct musb_request *req)
642 { 642 {
643 const u8 epnum = req->epnum; 643 const u8 epnum = req->epnum;
644 struct usb_request *request = &req->request; 644 struct usb_request *request = &req->request;
645 struct musb_ep *musb_ep; 645 struct musb_ep *musb_ep;
646 void __iomem *epio = musb->endpoints[epnum].regs; 646 void __iomem *epio = musb->endpoints[epnum].regs;
647 unsigned len = 0; 647 unsigned len = 0;
648 u16 fifo_count; 648 u16 fifo_count;
649 u16 csr = musb_readw(epio, MUSB_RXCSR); 649 u16 csr = musb_readw(epio, MUSB_RXCSR);
650 struct musb_hw_ep *hw_ep = &musb->endpoints[epnum]; 650 struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
651 u8 use_mode_1; 651 u8 use_mode_1;
652 652
653 if (hw_ep->is_shared_fifo) 653 if (hw_ep->is_shared_fifo)
654 musb_ep = &hw_ep->ep_in; 654 musb_ep = &hw_ep->ep_in;
655 else 655 else
656 musb_ep = &hw_ep->ep_out; 656 musb_ep = &hw_ep->ep_out;
657 657
658 fifo_count = musb_ep->packet_sz; 658 fifo_count = musb_ep->packet_sz;
659 659
660 /* Check if EP is disabled */ 660 /* Check if EP is disabled */
661 if (!musb_ep->desc) { 661 if (!musb_ep->desc) {
662 dev_dbg(musb->controller, "ep:%s disabled - ignore request\n", 662 dev_dbg(musb->controller, "ep:%s disabled - ignore request\n",
663 musb_ep->end_point.name); 663 musb_ep->end_point.name);
664 return; 664 return;
665 } 665 }
666 666
667 /* We shouldn't get here while DMA is active, but we do... */ 667 /* We shouldn't get here while DMA is active, but we do... */
668 if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) { 668 if (dma_channel_status(musb_ep->dma) == MUSB_DMA_STATUS_BUSY) {
669 dev_dbg(musb->controller, "DMA pending...\n"); 669 dev_dbg(musb->controller, "DMA pending...\n");
670 return; 670 return;
671 } 671 }
672 672
673 if (csr & MUSB_RXCSR_P_SENDSTALL) { 673 if (csr & MUSB_RXCSR_P_SENDSTALL) {
674 dev_dbg(musb->controller, "%s stalling, RXCSR %04x\n", 674 dev_dbg(musb->controller, "%s stalling, RXCSR %04x\n",
675 musb_ep->end_point.name, csr); 675 musb_ep->end_point.name, csr);
676 return; 676 return;
677 } 677 }
678 678
679 if (is_cppi_enabled() && is_buffer_mapped(req)) { 679 if (is_cppi_enabled() && is_buffer_mapped(req)) {
680 struct dma_controller *c = musb->dma_controller; 680 struct dma_controller *c = musb->dma_controller;
681 struct dma_channel *channel = musb_ep->dma; 681 struct dma_channel *channel = musb_ep->dma;
682 682
683 /* NOTE: CPPI won't actually stop advancing the DMA 683 /* NOTE: CPPI won't actually stop advancing the DMA
684 * queue after short packet transfers, so this is almost 684 * queue after short packet transfers, so this is almost
685 * always going to run as IRQ-per-packet DMA so that 685 * always going to run as IRQ-per-packet DMA so that
686 * faults will be handled correctly. 686 * faults will be handled correctly.
687 */ 687 */
688 if (c->channel_program(channel, 688 if (c->channel_program(channel,
689 musb_ep->packet_sz, 689 musb_ep->packet_sz,
690 !request->short_not_ok, 690 !request->short_not_ok,
691 request->dma + request->actual, 691 request->dma + request->actual,
692 request->length - request->actual)) { 692 request->length - request->actual)) {
693 693
694 /* make sure that if an rxpkt arrived after the irq, 694 /* make sure that if an rxpkt arrived after the irq,
695 * the cppi engine will be ready to take it as soon 695 * the cppi engine will be ready to take it as soon
696 * as DMA is enabled 696 * as DMA is enabled
697 */ 697 */
698 csr &= ~(MUSB_RXCSR_AUTOCLEAR 698 csr &= ~(MUSB_RXCSR_AUTOCLEAR
699 | MUSB_RXCSR_DMAMODE); 699 | MUSB_RXCSR_DMAMODE);
700 csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS; 700 csr |= MUSB_RXCSR_DMAENAB | MUSB_RXCSR_P_WZC_BITS;
701 musb_writew(epio, MUSB_RXCSR, csr); 701 musb_writew(epio, MUSB_RXCSR, csr);
702 return; 702 return;
703 } 703 }
704 } 704 }
705 705
706 if (csr & MUSB_RXCSR_RXPKTRDY) { 706 if (csr & MUSB_RXCSR_RXPKTRDY) {
707 fifo_count = musb_readw(epio, MUSB_RXCOUNT); 707 fifo_count = musb_readw(epio, MUSB_RXCOUNT);
708 708
709 /* 709 /*
710 * use mode 1 only if we expect data of at least ep packet_sz 710 * use mode 1 only if we expect data of at least ep packet_sz
711 * and have not yet received a short packet 711 * and have not yet received a short packet
712 */ 712 */
713 if ((request->length - request->actual >= musb_ep->packet_sz) && 713 if ((request->length - request->actual >= musb_ep->packet_sz) &&
714 (fifo_count >= musb_ep->packet_sz)) 714 (fifo_count >= musb_ep->packet_sz))
715 use_mode_1 = 1; 715 use_mode_1 = 1;
716 else 716 else
717 use_mode_1 = 0; 717 use_mode_1 = 0;
718 718
719 if (request->actual < request->length) { 719 if (request->actual < request->length) {
720 #ifdef CONFIG_USB_INVENTRA_DMA 720 #ifdef CONFIG_USB_INVENTRA_DMA
721 if (is_buffer_mapped(req)) { 721 if (is_buffer_mapped(req)) {
722 struct dma_controller *c; 722 struct dma_controller *c;
723 struct dma_channel *channel; 723 struct dma_channel *channel;
724 int use_dma = 0; 724 int use_dma = 0;
725 725
726 c = musb->dma_controller; 726 c = musb->dma_controller;
727 channel = musb_ep->dma; 727 channel = musb_ep->dma;
728 728
729 /* Experimental: Mode1 works with mass storage use cases */ 729 /* Experimental: Mode1 works with mass storage use cases */
730 if (use_mode_1) { 730 if (use_mode_1) {
731 csr |= MUSB_RXCSR_AUTOCLEAR; 731 csr |= MUSB_RXCSR_AUTOCLEAR;
732 musb_writew(epio, MUSB_RXCSR, csr); 732 musb_writew(epio, MUSB_RXCSR, csr);
733 csr |= MUSB_RXCSR_DMAENAB; 733 csr |= MUSB_RXCSR_DMAENAB;
734 musb_writew(epio, MUSB_RXCSR, csr); 734 musb_writew(epio, MUSB_RXCSR, csr);
735 735
736 /* 736 /*
737 * this special sequence (enabling and then 737 * this special sequence (enabling and then
738 * disabling MUSB_RXCSR_DMAMODE) is required 738 * disabling MUSB_RXCSR_DMAMODE) is required
739 * to get DMAReq to activate 739 * to get DMAReq to activate
740 */ 740 */
741 musb_writew(epio, MUSB_RXCSR, 741 musb_writew(epio, MUSB_RXCSR,
742 csr | MUSB_RXCSR_DMAMODE); 742 csr | MUSB_RXCSR_DMAMODE);
743 musb_writew(epio, MUSB_RXCSR, csr); 743 musb_writew(epio, MUSB_RXCSR, csr);
744 744
745 } else { 745 } else {
746 if (!musb_ep->hb_mult && 746 if (!musb_ep->hb_mult &&
747 musb_ep->hw_ep->rx_double_buffered) 747 musb_ep->hw_ep->rx_double_buffered)
748 csr |= MUSB_RXCSR_AUTOCLEAR; 748 csr |= MUSB_RXCSR_AUTOCLEAR;
749 csr |= MUSB_RXCSR_DMAENAB; 749 csr |= MUSB_RXCSR_DMAENAB;
750 musb_writew(epio, MUSB_RXCSR, csr); 750 musb_writew(epio, MUSB_RXCSR, csr);
751 } 751 }
752 752
753 if (request->actual < request->length) { 753 if (request->actual < request->length) {
754 int transfer_size = 0; 754 int transfer_size = 0;
755 if (use_mode_1) { 755 if (use_mode_1) {
756 transfer_size = min(request->length - request->actual, 756 transfer_size = min(request->length - request->actual,
757 channel->max_len); 757 channel->max_len);
758 musb_ep->dma->desired_mode = 1; 758 musb_ep->dma->desired_mode = 1;
759 } else { 759 } else {
760 transfer_size = min(request->length - request->actual, 760 transfer_size = min(request->length - request->actual,
761 (unsigned)fifo_count); 761 (unsigned)fifo_count);
762 musb_ep->dma->desired_mode = 0; 762 musb_ep->dma->desired_mode = 0;
763 } 763 }
764 764
765 use_dma = c->channel_program( 765 use_dma = c->channel_program(
766 channel, 766 channel,
767 musb_ep->packet_sz, 767 musb_ep->packet_sz,
768 channel->desired_mode, 768 channel->desired_mode,
769 request->dma 769 request->dma
770 + request->actual, 770 + request->actual,
771 transfer_size); 771 transfer_size);
772 } 772 }
773 773
774 if (use_dma) 774 if (use_dma)
775 return; 775 return;
776 } 776 }
777 #elif defined(CONFIG_USB_UX500_DMA) 777 #elif defined(CONFIG_USB_UX500_DMA)
778 if ((is_buffer_mapped(req)) && 778 if ((is_buffer_mapped(req)) &&
779 (request->actual < request->length)) { 779 (request->actual < request->length)) {
780 780
781 struct dma_controller *c; 781 struct dma_controller *c;
782 struct dma_channel *channel; 782 struct dma_channel *channel;
783 int transfer_size = 0; 783 int transfer_size = 0;
784 784
785 c = musb->dma_controller; 785 c = musb->dma_controller;
786 channel = musb_ep->dma; 786 channel = musb_ep->dma;
787 787
788 /* In case first packet is short */ 788 /* In case first packet is short */
789 if (fifo_count < musb_ep->packet_sz) 789 if (fifo_count < musb_ep->packet_sz)
790 transfer_size = fifo_count; 790 transfer_size = fifo_count;
791 else if (request->short_not_ok) 791 else if (request->short_not_ok)
792 transfer_size = min(request->length - 792 transfer_size = min(request->length -
793 request->actual, 793 request->actual,
794 channel->max_len); 794 channel->max_len);
795 else 795 else
796 transfer_size = min(request->length - 796 transfer_size = min(request->length -
797 request->actual, 797 request->actual,
798 (unsigned)fifo_count); 798 (unsigned)fifo_count);
799 799
800 csr &= ~MUSB_RXCSR_DMAMODE; 800 csr &= ~MUSB_RXCSR_DMAMODE;
801 csr |= (MUSB_RXCSR_DMAENAB | 801 csr |= (MUSB_RXCSR_DMAENAB |
802 MUSB_RXCSR_AUTOCLEAR); 802 MUSB_RXCSR_AUTOCLEAR);
803 803
804 musb_writew(epio, MUSB_RXCSR, csr); 804 musb_writew(epio, MUSB_RXCSR, csr);
805 805
806 if (transfer_size <= musb_ep->packet_sz) { 806 if (transfer_size <= musb_ep->packet_sz) {
807 musb_ep->dma->desired_mode = 0; 807 musb_ep->dma->desired_mode = 0;
808 } else { 808 } else {
809 musb_ep->dma->desired_mode = 1; 809 musb_ep->dma->desired_mode = 1;
810 /* Mode must be set after DMAENAB */ 810 /* Mode must be set after DMAENAB */
811 csr |= MUSB_RXCSR_DMAMODE; 811 csr |= MUSB_RXCSR_DMAMODE;
812 musb_writew(epio, MUSB_RXCSR, csr); 812 musb_writew(epio, MUSB_RXCSR, csr);
813 } 813 }
814 814
815 if (c->channel_program(channel, 815 if (c->channel_program(channel,
816 musb_ep->packet_sz, 816 musb_ep->packet_sz,
817 channel->desired_mode, 817 channel->desired_mode,
818 request->dma 818 request->dma
819 + request->actual, 819 + request->actual,
820 transfer_size)) 820 transfer_size))
821 821
822 return; 822 return;
823 } 823 }
824 #endif /* Mentor's DMA */ 824 #endif /* Mentor's DMA */
825 825
826 len = request->length - request->actual; 826 len = request->length - request->actual;
827 dev_dbg(musb->controller, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n", 827 dev_dbg(musb->controller, "%s OUT/RX pio fifo %d/%d, maxpacket %d\n",
828 musb_ep->end_point.name, 828 musb_ep->end_point.name,
829 fifo_count, len, 829 fifo_count, len,
830 musb_ep->packet_sz); 830 musb_ep->packet_sz);
831 831
832 fifo_count = min_t(unsigned, len, fifo_count); 832 fifo_count = min_t(unsigned, len, fifo_count);
833 833
834 #ifdef CONFIG_USB_TUSB_OMAP_DMA 834 #ifdef CONFIG_USB_TUSB_OMAP_DMA
835 if (tusb_dma_omap() && is_buffer_mapped(req)) { 835 if (tusb_dma_omap() && is_buffer_mapped(req)) {
836 struct dma_controller *c = musb->dma_controller; 836 struct dma_controller *c = musb->dma_controller;
837 struct dma_channel *channel = musb_ep->dma; 837 struct dma_channel *channel = musb_ep->dma;
838 u32 dma_addr = request->dma + request->actual; 838 u32 dma_addr = request->dma + request->actual;
839 int ret; 839 int ret;
840 840
841 ret = c->channel_program(channel, 841 ret = c->channel_program(channel,
842 musb_ep->packet_sz, 842 musb_ep->packet_sz,
843 channel->desired_mode, 843 channel->desired_mode,
844 dma_addr, 844 dma_addr,
845 fifo_count); 845 fifo_count);
846 if (ret) 846 if (ret)
847 return; 847 return;
848 } 848 }
849 #endif 849 #endif
850 /* 850 /*
851 * Unmap the dma buffer back to cpu if dma channel 851 * Unmap the dma buffer back to cpu if dma channel
852 * programming fails. This buffer is mapped if the 852 * programming fails. This buffer is mapped if the
853 * channel allocation is successful 853 * channel allocation is successful
854 */ 854 */
855 if (is_buffer_mapped(req)) { 855 if (is_buffer_mapped(req)) {
856 unmap_dma_buffer(req, musb); 856 unmap_dma_buffer(req, musb);
857 857
858 /* 858 /*
859 * Clear DMAENAB and AUTOCLEAR for the 859 * Clear DMAENAB and AUTOCLEAR for the
860 * PIO mode transfer 860 * PIO mode transfer
861 */ 861 */
862 csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR); 862 csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR);
863 musb_writew(epio, MUSB_RXCSR, csr); 863 musb_writew(epio, MUSB_RXCSR, csr);
864 } 864 }
865 865
866 musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *) 866 musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
867 (request->buf + request->actual)); 867 (request->buf + request->actual));
868 request->actual += fifo_count; 868 request->actual += fifo_count;
869 869
870 /* REVISIT if we left anything in the fifo, flush 870 /* REVISIT if we left anything in the fifo, flush
871 * it and report -EOVERFLOW 871 * it and report -EOVERFLOW
872 */ 872 */
873 873
874 /* ack the read! */ 874 /* ack the read! */
875 csr |= MUSB_RXCSR_P_WZC_BITS; 875 csr |= MUSB_RXCSR_P_WZC_BITS;
876 csr &= ~MUSB_RXCSR_RXPKTRDY; 876 csr &= ~MUSB_RXCSR_RXPKTRDY;
877 musb_writew(epio, MUSB_RXCSR, csr); 877 musb_writew(epio, MUSB_RXCSR, csr);
878 } 878 }
879 } 879 }
880 880
881 /* reach the end or short packet detected */ 881 /* reach the end or short packet detected */
882 if (request->actual == request->length || 882 if (request->actual == request->length ||
883 fifo_count < musb_ep->packet_sz) 883 fifo_count < musb_ep->packet_sz)
884 musb_g_giveback(musb_ep, request, 0); 884 musb_g_giveback(musb_ep, request, 0);
885 } 885 }
886 886
887 /* 887 /*
888 * Data ready for a request; called from IRQ 888 * Data ready for a request; called from IRQ
889 */ 889 */
890 void musb_g_rx(struct musb *musb, u8 epnum) 890 void musb_g_rx(struct musb *musb, u8 epnum)
891 { 891 {
892 u16 csr; 892 u16 csr;
893 struct musb_request *req; 893 struct musb_request *req;
894 struct usb_request *request; 894 struct usb_request *request;
895 void __iomem *mbase = musb->mregs; 895 void __iomem *mbase = musb->mregs;
896 struct musb_ep *musb_ep; 896 struct musb_ep *musb_ep;
897 void __iomem *epio = musb->endpoints[epnum].regs; 897 void __iomem *epio = musb->endpoints[epnum].regs;
898 struct dma_channel *dma; 898 struct dma_channel *dma;
899 struct musb_hw_ep *hw_ep = &musb->endpoints[epnum]; 899 struct musb_hw_ep *hw_ep = &musb->endpoints[epnum];
900 900
901 if (hw_ep->is_shared_fifo) 901 if (hw_ep->is_shared_fifo)
902 musb_ep = &hw_ep->ep_in; 902 musb_ep = &hw_ep->ep_in;
903 else 903 else
904 musb_ep = &hw_ep->ep_out; 904 musb_ep = &hw_ep->ep_out;
905 905
906 musb_ep_select(mbase, epnum); 906 musb_ep_select(mbase, epnum);
907 907
908 req = next_request(musb_ep); 908 req = next_request(musb_ep);
909 if (!req) 909 if (!req)
910 return; 910 return;
911 911
912 request = &req->request; 912 request = &req->request;
913 913
914 csr = musb_readw(epio, MUSB_RXCSR); 914 csr = musb_readw(epio, MUSB_RXCSR);
915 dma = is_dma_capable() ? musb_ep->dma : NULL; 915 dma = is_dma_capable() ? musb_ep->dma : NULL;
916 916
917 dev_dbg(musb->controller, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name, 917 dev_dbg(musb->controller, "<== %s, rxcsr %04x%s %p\n", musb_ep->end_point.name,
918 csr, dma ? " (dma)" : "", request); 918 csr, dma ? " (dma)" : "", request);
919 919
920 if (csr & MUSB_RXCSR_P_SENTSTALL) { 920 if (csr & MUSB_RXCSR_P_SENTSTALL) {
921 csr |= MUSB_RXCSR_P_WZC_BITS; 921 csr |= MUSB_RXCSR_P_WZC_BITS;
922 csr &= ~MUSB_RXCSR_P_SENTSTALL; 922 csr &= ~MUSB_RXCSR_P_SENTSTALL;
923 musb_writew(epio, MUSB_RXCSR, csr); 923 musb_writew(epio, MUSB_RXCSR, csr);
924 return; 924 return;
925 } 925 }
926 926
927 if (csr & MUSB_RXCSR_P_OVERRUN) { 927 if (csr & MUSB_RXCSR_P_OVERRUN) {
928 /* csr |= MUSB_RXCSR_P_WZC_BITS; */ 928 /* csr |= MUSB_RXCSR_P_WZC_BITS; */
929 csr &= ~MUSB_RXCSR_P_OVERRUN; 929 csr &= ~MUSB_RXCSR_P_OVERRUN;
930 musb_writew(epio, MUSB_RXCSR, csr); 930 musb_writew(epio, MUSB_RXCSR, csr);
931 931
932 dev_dbg(musb->controller, "%s iso overrun on %p\n", musb_ep->name, request); 932 dev_dbg(musb->controller, "%s iso overrun on %p\n", musb_ep->name, request);
933 if (request->status == -EINPROGRESS) 933 if (request->status == -EINPROGRESS)
934 request->status = -EOVERFLOW; 934 request->status = -EOVERFLOW;
935 } 935 }
936 if (csr & MUSB_RXCSR_INCOMPRX) { 936 if (csr & MUSB_RXCSR_INCOMPRX) {
937 /* REVISIT not necessarily an error */ 937 /* REVISIT not necessarily an error */
938 dev_dbg(musb->controller, "%s, incomprx\n", musb_ep->end_point.name); 938 dev_dbg(musb->controller, "%s, incomprx\n", musb_ep->end_point.name);
939 } 939 }
940 940
941 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) { 941 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
942 /* "should not happen"; likely RXPKTRDY pending for DMA */ 942 /* "should not happen"; likely RXPKTRDY pending for DMA */
943 dev_dbg(musb->controller, "%s busy, csr %04x\n", 943 dev_dbg(musb->controller, "%s busy, csr %04x\n",
944 musb_ep->end_point.name, csr); 944 musb_ep->end_point.name, csr);
945 return; 945 return;
946 } 946 }
947 947
948 if (dma && (csr & MUSB_RXCSR_DMAENAB)) { 948 if (dma && (csr & MUSB_RXCSR_DMAENAB)) {
949 csr &= ~(MUSB_RXCSR_AUTOCLEAR 949 csr &= ~(MUSB_RXCSR_AUTOCLEAR
950 | MUSB_RXCSR_DMAENAB 950 | MUSB_RXCSR_DMAENAB
951 | MUSB_RXCSR_DMAMODE); 951 | MUSB_RXCSR_DMAMODE);
952 musb_writew(epio, MUSB_RXCSR, 952 musb_writew(epio, MUSB_RXCSR,
953 MUSB_RXCSR_P_WZC_BITS | csr); 953 MUSB_RXCSR_P_WZC_BITS | csr);
954 954
955 request->actual += musb_ep->dma->actual_len; 955 request->actual += musb_ep->dma->actual_len;
956 956
957 dev_dbg(musb->controller, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n", 957 dev_dbg(musb->controller, "RXCSR%d %04x, dma off, %04x, len %zu, req %p\n",
958 epnum, csr, 958 epnum, csr,
959 musb_readw(epio, MUSB_RXCSR), 959 musb_readw(epio, MUSB_RXCSR),
960 musb_ep->dma->actual_len, request); 960 musb_ep->dma->actual_len, request);
961 961
962 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \ 962 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
963 defined(CONFIG_USB_UX500_DMA) 963 defined(CONFIG_USB_UX500_DMA)
964 /* Autoclear doesn't clear RxPktRdy for short packets */ 964 /* Autoclear doesn't clear RxPktRdy for short packets */
965 if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered) 965 if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered)
966 || (dma->actual_len 966 || (dma->actual_len
967 & (musb_ep->packet_sz - 1))) { 967 & (musb_ep->packet_sz - 1))) {
968 /* ack the read! */ 968 /* ack the read! */
969 csr &= ~MUSB_RXCSR_RXPKTRDY; 969 csr &= ~MUSB_RXCSR_RXPKTRDY;
970 musb_writew(epio, MUSB_RXCSR, csr); 970 musb_writew(epio, MUSB_RXCSR, csr);
971 } 971 }
972 972
973 /* incomplete, and not short? wait for next IN packet */ 973 /* incomplete, and not short? wait for next IN packet */
974 if ((request->actual < request->length) 974 if ((request->actual < request->length)
975 && (musb_ep->dma->actual_len 975 && (musb_ep->dma->actual_len
976 == musb_ep->packet_sz)) { 976 == musb_ep->packet_sz)) {
977 /* In double buffer case, continue to unload fifo if 977 /* In double buffer case, continue to unload fifo if
978 * there is Rx packet in FIFO. 978 * there is Rx packet in FIFO.
979 **/ 979 **/
980 csr = musb_readw(epio, MUSB_RXCSR); 980 csr = musb_readw(epio, MUSB_RXCSR);
981 if ((csr & MUSB_RXCSR_RXPKTRDY) && 981 if ((csr & MUSB_RXCSR_RXPKTRDY) &&
982 hw_ep->rx_double_buffered) 982 hw_ep->rx_double_buffered)
983 goto exit; 983 goto exit;
984 return; 984 return;
985 } 985 }
986 #endif 986 #endif
987 musb_g_giveback(musb_ep, request, 0); 987 musb_g_giveback(musb_ep, request, 0);
988 /* 988 /*
989 * In the giveback function the MUSB lock is 989 * In the giveback function the MUSB lock is
990 * released and acquired after sometime. During 990 * released and acquired after sometime. During
991 * this time period the INDEX register could get 991 * this time period the INDEX register could get
992 * changed by the gadget_queue function especially 992 * changed by the gadget_queue function especially
993 * on SMP systems. Reselect the INDEX to be sure 993 * on SMP systems. Reselect the INDEX to be sure
994 * we are reading/modifying the right registers 994 * we are reading/modifying the right registers
995 */ 995 */
996 musb_ep_select(mbase, epnum); 996 musb_ep_select(mbase, epnum);
997 997
998 req = next_request(musb_ep); 998 req = next_request(musb_ep);
999 if (!req) 999 if (!req)
1000 return; 1000 return;
1001 } 1001 }
1002 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \ 1002 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA) || \
1003 defined(CONFIG_USB_UX500_DMA) 1003 defined(CONFIG_USB_UX500_DMA)
1004 exit: 1004 exit:
1005 #endif 1005 #endif
1006 /* Analyze request */ 1006 /* Analyze request */
1007 rxstate(musb, req); 1007 rxstate(musb, req);
1008 } 1008 }
1009 1009
1010 /* ------------------------------------------------------------ */ 1010 /* ------------------------------------------------------------ */
1011 1011
1012 static int musb_gadget_enable(struct usb_ep *ep, 1012 static int musb_gadget_enable(struct usb_ep *ep,
1013 const struct usb_endpoint_descriptor *desc) 1013 const struct usb_endpoint_descriptor *desc)
1014 { 1014 {
1015 unsigned long flags; 1015 unsigned long flags;
1016 struct musb_ep *musb_ep; 1016 struct musb_ep *musb_ep;
1017 struct musb_hw_ep *hw_ep; 1017 struct musb_hw_ep *hw_ep;
1018 void __iomem *regs; 1018 void __iomem *regs;
1019 struct musb *musb; 1019 struct musb *musb;
1020 void __iomem *mbase; 1020 void __iomem *mbase;
1021 u8 epnum; 1021 u8 epnum;
1022 u16 csr; 1022 u16 csr;
1023 unsigned tmp; 1023 unsigned tmp;
1024 int status = -EINVAL; 1024 int status = -EINVAL;
1025 1025
1026 if (!ep || !desc) 1026 if (!ep || !desc)
1027 return -EINVAL; 1027 return -EINVAL;
1028 1028
1029 musb_ep = to_musb_ep(ep); 1029 musb_ep = to_musb_ep(ep);
1030 hw_ep = musb_ep->hw_ep; 1030 hw_ep = musb_ep->hw_ep;
1031 regs = hw_ep->regs; 1031 regs = hw_ep->regs;
1032 musb = musb_ep->musb; 1032 musb = musb_ep->musb;
1033 mbase = musb->mregs; 1033 mbase = musb->mregs;
1034 epnum = musb_ep->current_epnum; 1034 epnum = musb_ep->current_epnum;
1035 1035
1036 spin_lock_irqsave(&musb->lock, flags); 1036 spin_lock_irqsave(&musb->lock, flags);
1037 1037
1038 if (musb_ep->desc) { 1038 if (musb_ep->desc) {
1039 status = -EBUSY; 1039 status = -EBUSY;
1040 goto fail; 1040 goto fail;
1041 } 1041 }
1042 musb_ep->type = usb_endpoint_type(desc); 1042 musb_ep->type = usb_endpoint_type(desc);
1043 1043
1044 /* check direction and (later) maxpacket size against endpoint */ 1044 /* check direction and (later) maxpacket size against endpoint */
1045 if (usb_endpoint_num(desc) != epnum) 1045 if (usb_endpoint_num(desc) != epnum)
1046 goto fail; 1046 goto fail;
1047 1047
1048 /* REVISIT this rules out high bandwidth periodic transfers */ 1048 /* REVISIT this rules out high bandwidth periodic transfers */
1049 tmp = usb_endpoint_maxp(desc); 1049 tmp = usb_endpoint_maxp(desc);
1050 if (tmp & ~0x07ff) { 1050 if (tmp & ~0x07ff) {
1051 int ok; 1051 int ok;
1052 1052
1053 if (usb_endpoint_dir_in(desc)) 1053 if (usb_endpoint_dir_in(desc))
1054 ok = musb->hb_iso_tx; 1054 ok = musb->hb_iso_tx;
1055 else 1055 else
1056 ok = musb->hb_iso_rx; 1056 ok = musb->hb_iso_rx;
1057 1057
1058 if (!ok) { 1058 if (!ok) {
1059 dev_dbg(musb->controller, "no support for high bandwidth ISO\n"); 1059 dev_dbg(musb->controller, "no support for high bandwidth ISO\n");
1060 goto fail; 1060 goto fail;
1061 } 1061 }
1062 musb_ep->hb_mult = (tmp >> 11) & 3; 1062 musb_ep->hb_mult = (tmp >> 11) & 3;
1063 } else { 1063 } else {
1064 musb_ep->hb_mult = 0; 1064 musb_ep->hb_mult = 0;
1065 } 1065 }
1066 1066
1067 musb_ep->packet_sz = tmp & 0x7ff; 1067 musb_ep->packet_sz = tmp & 0x7ff;
1068 tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1); 1068 tmp = musb_ep->packet_sz * (musb_ep->hb_mult + 1);
1069 1069
1070 /* enable the interrupts for the endpoint, set the endpoint 1070 /* enable the interrupts for the endpoint, set the endpoint
1071 * packet size (or fail), set the mode, clear the fifo 1071 * packet size (or fail), set the mode, clear the fifo
1072 */ 1072 */
1073 musb_ep_select(mbase, epnum); 1073 musb_ep_select(mbase, epnum);
1074 if (usb_endpoint_dir_in(desc)) { 1074 if (usb_endpoint_dir_in(desc)) {
1075 u16 int_txe = musb_readw(mbase, MUSB_INTRTXE); 1075 u16 int_txe = musb_readw(mbase, MUSB_INTRTXE);
1076 1076
1077 if (hw_ep->is_shared_fifo) 1077 if (hw_ep->is_shared_fifo)
1078 musb_ep->is_in = 1; 1078 musb_ep->is_in = 1;
1079 if (!musb_ep->is_in) 1079 if (!musb_ep->is_in)
1080 goto fail; 1080 goto fail;
1081 1081
1082 if (tmp > hw_ep->max_packet_sz_tx) { 1082 if (tmp > hw_ep->max_packet_sz_tx) {
1083 dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n"); 1083 dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n");
1084 goto fail; 1084 goto fail;
1085 } 1085 }
1086 1086
1087 int_txe |= (1 << epnum); 1087 int_txe |= (1 << epnum);
1088 musb_writew(mbase, MUSB_INTRTXE, int_txe); 1088 musb_writew(mbase, MUSB_INTRTXE, int_txe);
1089 1089
1090 /* REVISIT if can_bulk_split(), use by updating "tmp"; 1090 /* REVISIT if can_bulk_split(), use by updating "tmp";
1091 * likewise high bandwidth periodic tx 1091 * likewise high bandwidth periodic tx
1092 */ 1092 */
1093 /* Set TXMAXP with the FIFO size of the endpoint 1093 /* Set TXMAXP with the FIFO size of the endpoint
1094 * to disable double buffering mode. 1094 * to disable double buffering mode.
1095 */ 1095 */
1096 if (musb->double_buffer_not_ok) 1096 if (musb->double_buffer_not_ok)
1097 musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx); 1097 musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx);
1098 else 1098 else
1099 musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz 1099 musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz
1100 | (musb_ep->hb_mult << 11)); 1100 | (musb_ep->hb_mult << 11));
1101 1101
1102 csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG; 1102 csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
1103 if (musb_readw(regs, MUSB_TXCSR) 1103 if (musb_readw(regs, MUSB_TXCSR)
1104 & MUSB_TXCSR_FIFONOTEMPTY) 1104 & MUSB_TXCSR_FIFONOTEMPTY)
1105 csr |= MUSB_TXCSR_FLUSHFIFO; 1105 csr |= MUSB_TXCSR_FLUSHFIFO;
1106 if (musb_ep->type == USB_ENDPOINT_XFER_ISOC) 1106 if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
1107 csr |= MUSB_TXCSR_P_ISO; 1107 csr |= MUSB_TXCSR_P_ISO;
1108 1108
1109 /* set twice in case of double buffering */ 1109 /* set twice in case of double buffering */
1110 musb_writew(regs, MUSB_TXCSR, csr); 1110 musb_writew(regs, MUSB_TXCSR, csr);
1111 /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */ 1111 /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1112 musb_writew(regs, MUSB_TXCSR, csr); 1112 musb_writew(regs, MUSB_TXCSR, csr);
1113 1113
1114 } else { 1114 } else {
1115 u16 int_rxe = musb_readw(mbase, MUSB_INTRRXE); 1115 u16 int_rxe = musb_readw(mbase, MUSB_INTRRXE);
1116 1116
1117 if (hw_ep->is_shared_fifo) 1117 if (hw_ep->is_shared_fifo)
1118 musb_ep->is_in = 0; 1118 musb_ep->is_in = 0;
1119 if (musb_ep->is_in) 1119 if (musb_ep->is_in)
1120 goto fail; 1120 goto fail;
1121 1121
1122 if (tmp > hw_ep->max_packet_sz_rx) { 1122 if (tmp > hw_ep->max_packet_sz_rx) {
1123 dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n"); 1123 dev_dbg(musb->controller, "packet size beyond hardware FIFO size\n");
1124 goto fail; 1124 goto fail;
1125 } 1125 }
1126 1126
1127 int_rxe |= (1 << epnum); 1127 int_rxe |= (1 << epnum);
1128 musb_writew(mbase, MUSB_INTRRXE, int_rxe); 1128 musb_writew(mbase, MUSB_INTRRXE, int_rxe);
1129 1129
1130 /* REVISIT if can_bulk_combine() use by updating "tmp" 1130 /* REVISIT if can_bulk_combine() use by updating "tmp"
1131 * likewise high bandwidth periodic rx 1131 * likewise high bandwidth periodic rx
1132 */ 1132 */
1133 /* Set RXMAXP with the FIFO size of the endpoint 1133 /* Set RXMAXP with the FIFO size of the endpoint
1134 * to disable double buffering mode. 1134 * to disable double buffering mode.
1135 */ 1135 */
1136 if (musb->double_buffer_not_ok) 1136 if (musb->double_buffer_not_ok)
1137 musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_tx); 1137 musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_tx);
1138 else 1138 else
1139 musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz 1139 musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz
1140 | (musb_ep->hb_mult << 11)); 1140 | (musb_ep->hb_mult << 11));
1141 1141
1142 /* force shared fifo to OUT-only mode */ 1142 /* force shared fifo to OUT-only mode */
1143 if (hw_ep->is_shared_fifo) { 1143 if (hw_ep->is_shared_fifo) {
1144 csr = musb_readw(regs, MUSB_TXCSR); 1144 csr = musb_readw(regs, MUSB_TXCSR);
1145 csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY); 1145 csr &= ~(MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY);
1146 musb_writew(regs, MUSB_TXCSR, csr); 1146 musb_writew(regs, MUSB_TXCSR, csr);
1147 } 1147 }
1148 1148
1149 csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG; 1149 csr = MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_CLRDATATOG;
1150 if (musb_ep->type == USB_ENDPOINT_XFER_ISOC) 1150 if (musb_ep->type == USB_ENDPOINT_XFER_ISOC)
1151 csr |= MUSB_RXCSR_P_ISO; 1151 csr |= MUSB_RXCSR_P_ISO;
1152 else if (musb_ep->type == USB_ENDPOINT_XFER_INT) 1152 else if (musb_ep->type == USB_ENDPOINT_XFER_INT)
1153 csr |= MUSB_RXCSR_DISNYET; 1153 csr |= MUSB_RXCSR_DISNYET;
1154 1154
1155 /* set twice in case of double buffering */ 1155 /* set twice in case of double buffering */
1156 musb_writew(regs, MUSB_RXCSR, csr); 1156 musb_writew(regs, MUSB_RXCSR, csr);
1157 musb_writew(regs, MUSB_RXCSR, csr); 1157 musb_writew(regs, MUSB_RXCSR, csr);
1158 } 1158 }
1159 1159
1160 /* NOTE: all the I/O code _should_ work fine without DMA, in case 1160 /* NOTE: all the I/O code _should_ work fine without DMA, in case
1161 * for some reason you run out of channels here. 1161 * for some reason you run out of channels here.
1162 */ 1162 */
1163 if (is_dma_capable() && musb->dma_controller) { 1163 if (is_dma_capable() && musb->dma_controller) {
1164 struct dma_controller *c = musb->dma_controller; 1164 struct dma_controller *c = musb->dma_controller;
1165 1165
1166 musb_ep->dma = c->channel_alloc(c, hw_ep, 1166 musb_ep->dma = c->channel_alloc(c, hw_ep,
1167 (desc->bEndpointAddress & USB_DIR_IN)); 1167 (desc->bEndpointAddress & USB_DIR_IN));
1168 } else 1168 } else
1169 musb_ep->dma = NULL; 1169 musb_ep->dma = NULL;
1170 1170
1171 musb_ep->desc = desc; 1171 musb_ep->desc = desc;
1172 musb_ep->busy = 0; 1172 musb_ep->busy = 0;
1173 musb_ep->wedged = 0; 1173 musb_ep->wedged = 0;
1174 status = 0; 1174 status = 0;
1175 1175
1176 pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n", 1176 pr_debug("%s periph: enabled %s for %s %s, %smaxpacket %d\n",
1177 musb_driver_name, musb_ep->end_point.name, 1177 musb_driver_name, musb_ep->end_point.name,
1178 ({ char *s; switch (musb_ep->type) { 1178 ({ char *s; switch (musb_ep->type) {
1179 case USB_ENDPOINT_XFER_BULK: s = "bulk"; break; 1179 case USB_ENDPOINT_XFER_BULK: s = "bulk"; break;
1180 case USB_ENDPOINT_XFER_INT: s = "int"; break; 1180 case USB_ENDPOINT_XFER_INT: s = "int"; break;
1181 default: s = "iso"; break; 1181 default: s = "iso"; break;
1182 }; s; }), 1182 }; s; }),
1183 musb_ep->is_in ? "IN" : "OUT", 1183 musb_ep->is_in ? "IN" : "OUT",
1184 musb_ep->dma ? "dma, " : "", 1184 musb_ep->dma ? "dma, " : "",
1185 musb_ep->packet_sz); 1185 musb_ep->packet_sz);
1186 1186
1187 schedule_work(&musb->irq_work); 1187 schedule_work(&musb->irq_work);
1188 1188
1189 fail: 1189 fail:
1190 spin_unlock_irqrestore(&musb->lock, flags); 1190 spin_unlock_irqrestore(&musb->lock, flags);
1191 return status; 1191 return status;
1192 } 1192 }
1193 1193
1194 /* 1194 /*
1195 * Disable an endpoint flushing all requests queued. 1195 * Disable an endpoint flushing all requests queued.
1196 */ 1196 */
1197 static int musb_gadget_disable(struct usb_ep *ep) 1197 static int musb_gadget_disable(struct usb_ep *ep)
1198 { 1198 {
1199 unsigned long flags; 1199 unsigned long flags;
1200 struct musb *musb; 1200 struct musb *musb;
1201 u8 epnum; 1201 u8 epnum;
1202 struct musb_ep *musb_ep; 1202 struct musb_ep *musb_ep;
1203 void __iomem *epio; 1203 void __iomem *epio;
1204 int status = 0; 1204 int status = 0;
1205 1205
1206 musb_ep = to_musb_ep(ep); 1206 musb_ep = to_musb_ep(ep);
1207 musb = musb_ep->musb; 1207 musb = musb_ep->musb;
1208 epnum = musb_ep->current_epnum; 1208 epnum = musb_ep->current_epnum;
1209 epio = musb->endpoints[epnum].regs; 1209 epio = musb->endpoints[epnum].regs;
1210 1210
1211 spin_lock_irqsave(&musb->lock, flags); 1211 spin_lock_irqsave(&musb->lock, flags);
1212 musb_ep_select(musb->mregs, epnum); 1212 musb_ep_select(musb->mregs, epnum);
1213 1213
1214 /* zero the endpoint sizes */ 1214 /* zero the endpoint sizes */
1215 if (musb_ep->is_in) { 1215 if (musb_ep->is_in) {
1216 u16 int_txe = musb_readw(musb->mregs, MUSB_INTRTXE); 1216 u16 int_txe = musb_readw(musb->mregs, MUSB_INTRTXE);
1217 int_txe &= ~(1 << epnum); 1217 int_txe &= ~(1 << epnum);
1218 musb_writew(musb->mregs, MUSB_INTRTXE, int_txe); 1218 musb_writew(musb->mregs, MUSB_INTRTXE, int_txe);
1219 musb_writew(epio, MUSB_TXMAXP, 0); 1219 musb_writew(epio, MUSB_TXMAXP, 0);
1220 } else { 1220 } else {
1221 u16 int_rxe = musb_readw(musb->mregs, MUSB_INTRRXE); 1221 u16 int_rxe = musb_readw(musb->mregs, MUSB_INTRRXE);
1222 int_rxe &= ~(1 << epnum); 1222 int_rxe &= ~(1 << epnum);
1223 musb_writew(musb->mregs, MUSB_INTRRXE, int_rxe); 1223 musb_writew(musb->mregs, MUSB_INTRRXE, int_rxe);
1224 musb_writew(epio, MUSB_RXMAXP, 0); 1224 musb_writew(epio, MUSB_RXMAXP, 0);
1225 } 1225 }
1226 1226
1227 musb_ep->desc = NULL; 1227 musb_ep->desc = NULL;
1228 musb_ep->end_point.desc = NULL; 1228 musb_ep->end_point.desc = NULL;
1229 1229
1230 /* abort all pending DMA and requests */ 1230 /* abort all pending DMA and requests */
1231 nuke(musb_ep, -ESHUTDOWN); 1231 nuke(musb_ep, -ESHUTDOWN);
1232 1232
1233 schedule_work(&musb->irq_work); 1233 schedule_work(&musb->irq_work);
1234 1234
1235 spin_unlock_irqrestore(&(musb->lock), flags); 1235 spin_unlock_irqrestore(&(musb->lock), flags);
1236 1236
1237 dev_dbg(musb->controller, "%s\n", musb_ep->end_point.name); 1237 dev_dbg(musb->controller, "%s\n", musb_ep->end_point.name);
1238 1238
1239 return status; 1239 return status;
1240 } 1240 }
1241 1241
1242 /* 1242 /*
1243 * Allocate a request for an endpoint. 1243 * Allocate a request for an endpoint.
1244 * Reused by ep0 code. 1244 * Reused by ep0 code.
1245 */ 1245 */
1246 struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags) 1246 struct usb_request *musb_alloc_request(struct usb_ep *ep, gfp_t gfp_flags)
1247 { 1247 {
1248 struct musb_ep *musb_ep = to_musb_ep(ep); 1248 struct musb_ep *musb_ep = to_musb_ep(ep);
1249 struct musb *musb = musb_ep->musb; 1249 struct musb *musb = musb_ep->musb;
1250 struct musb_request *request = NULL; 1250 struct musb_request *request = NULL;
1251 1251
1252 request = kzalloc(sizeof *request, gfp_flags); 1252 request = kzalloc(sizeof *request, gfp_flags);
1253 if (!request) { 1253 if (!request) {
1254 dev_dbg(musb->controller, "not enough memory\n"); 1254 dev_dbg(musb->controller, "not enough memory\n");
1255 return NULL; 1255 return NULL;
1256 } 1256 }
1257 1257
1258 request->request.dma = DMA_ADDR_INVALID; 1258 request->request.dma = DMA_ADDR_INVALID;
1259 request->epnum = musb_ep->current_epnum; 1259 request->epnum = musb_ep->current_epnum;
1260 request->ep = musb_ep; 1260 request->ep = musb_ep;
1261 1261
1262 return &request->request; 1262 return &request->request;
1263 } 1263 }
1264 1264
1265 /* 1265 /*
1266 * Free a request 1266 * Free a request
1267 * Reused by ep0 code. 1267 * Reused by ep0 code.
1268 */ 1268 */
1269 void musb_free_request(struct usb_ep *ep, struct usb_request *req) 1269 void musb_free_request(struct usb_ep *ep, struct usb_request *req)
1270 { 1270 {
1271 kfree(to_musb_request(req)); 1271 kfree(to_musb_request(req));
1272 } 1272 }
1273 1273
1274 static LIST_HEAD(buffers); 1274 static LIST_HEAD(buffers);
1275 1275
1276 struct free_record { 1276 struct free_record {
1277 struct list_head list; 1277 struct list_head list;
1278 struct device *dev; 1278 struct device *dev;
1279 unsigned bytes; 1279 unsigned bytes;
1280 dma_addr_t dma; 1280 dma_addr_t dma;
1281 }; 1281 };
1282 1282
1283 /* 1283 /*
1284 * Context: controller locked, IRQs blocked. 1284 * Context: controller locked, IRQs blocked.
1285 */ 1285 */
1286 void musb_ep_restart(struct musb *musb, struct musb_request *req) 1286 void musb_ep_restart(struct musb *musb, struct musb_request *req)
1287 { 1287 {
1288 dev_dbg(musb->controller, "<== %s request %p len %u on hw_ep%d\n", 1288 dev_dbg(musb->controller, "<== %s request %p len %u on hw_ep%d\n",
1289 req->tx ? "TX/IN" : "RX/OUT", 1289 req->tx ? "TX/IN" : "RX/OUT",
1290 &req->request, req->request.length, req->epnum); 1290 &req->request, req->request.length, req->epnum);
1291 1291
1292 musb_ep_select(musb->mregs, req->epnum); 1292 musb_ep_select(musb->mregs, req->epnum);
1293 if (req->tx) 1293 if (req->tx)
1294 txstate(musb, req); 1294 txstate(musb, req);
1295 else 1295 else
1296 rxstate(musb, req); 1296 rxstate(musb, req);
1297 } 1297 }
1298 1298
1299 static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, 1299 static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
1300 gfp_t gfp_flags) 1300 gfp_t gfp_flags)
1301 { 1301 {
1302 struct musb_ep *musb_ep; 1302 struct musb_ep *musb_ep;
1303 struct musb_request *request; 1303 struct musb_request *request;
1304 struct musb *musb; 1304 struct musb *musb;
1305 int status = 0; 1305 int status = 0;
1306 unsigned long lockflags; 1306 unsigned long lockflags;
1307 1307
1308 if (!ep || !req) 1308 if (!ep || !req)
1309 return -EINVAL; 1309 return -EINVAL;
1310 if (!req->buf) 1310 if (!req->buf)
1311 return -ENODATA; 1311 return -ENODATA;
1312 1312
1313 musb_ep = to_musb_ep(ep); 1313 musb_ep = to_musb_ep(ep);
1314 musb = musb_ep->musb; 1314 musb = musb_ep->musb;
1315 1315
1316 request = to_musb_request(req); 1316 request = to_musb_request(req);
1317 request->musb = musb; 1317 request->musb = musb;
1318 1318
1319 if (request->ep != musb_ep) 1319 if (request->ep != musb_ep)
1320 return -EINVAL; 1320 return -EINVAL;
1321 1321
1322 dev_dbg(musb->controller, "<== to %s request=%p\n", ep->name, req); 1322 dev_dbg(musb->controller, "<== to %s request=%p\n", ep->name, req);
1323 1323
1324 /* request is mine now... */ 1324 /* request is mine now... */
1325 request->request.actual = 0; 1325 request->request.actual = 0;
1326 request->request.status = -EINPROGRESS; 1326 request->request.status = -EINPROGRESS;
1327 request->epnum = musb_ep->current_epnum; 1327 request->epnum = musb_ep->current_epnum;
1328 request->tx = musb_ep->is_in; 1328 request->tx = musb_ep->is_in;
1329 1329
1330 map_dma_buffer(request, musb, musb_ep); 1330 map_dma_buffer(request, musb, musb_ep);
1331 1331
1332 spin_lock_irqsave(&musb->lock, lockflags); 1332 spin_lock_irqsave(&musb->lock, lockflags);
1333 1333
1334 /* don't queue if the ep is down */ 1334 /* don't queue if the ep is down */
1335 if (!musb_ep->desc) { 1335 if (!musb_ep->desc) {
1336 dev_dbg(musb->controller, "req %p queued to %s while ep %s\n", 1336 dev_dbg(musb->controller, "req %p queued to %s while ep %s\n",
1337 req, ep->name, "disabled"); 1337 req, ep->name, "disabled");
1338 status = -ESHUTDOWN; 1338 status = -ESHUTDOWN;
1339 goto cleanup; 1339 goto cleanup;
1340 } 1340 }
1341 1341
1342 /* add request to the list */ 1342 /* add request to the list */
1343 list_add_tail(&request->list, &musb_ep->req_list); 1343 list_add_tail(&request->list, &musb_ep->req_list);
1344 1344
1345 /* it this is the head of the queue, start i/o ... */ 1345 /* it this is the head of the queue, start i/o ... */
1346 if (!musb_ep->busy && &request->list == musb_ep->req_list.next) 1346 if (!musb_ep->busy && &request->list == musb_ep->req_list.next)
1347 musb_ep_restart(musb, request); 1347 musb_ep_restart(musb, request);
1348 1348
1349 cleanup: 1349 cleanup:
1350 spin_unlock_irqrestore(&musb->lock, lockflags); 1350 spin_unlock_irqrestore(&musb->lock, lockflags);
1351 return status; 1351 return status;
1352 } 1352 }
1353 1353
1354 static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request) 1354 static int musb_gadget_dequeue(struct usb_ep *ep, struct usb_request *request)
1355 { 1355 {
1356 struct musb_ep *musb_ep = to_musb_ep(ep); 1356 struct musb_ep *musb_ep = to_musb_ep(ep);
1357 struct musb_request *req = to_musb_request(request); 1357 struct musb_request *req = to_musb_request(request);
1358 struct musb_request *r; 1358 struct musb_request *r;
1359 unsigned long flags; 1359 unsigned long flags;
1360 int status = 0; 1360 int status = 0;
1361 struct musb *musb = musb_ep->musb; 1361 struct musb *musb = musb_ep->musb;
1362 1362
1363 if (!ep || !request || to_musb_request(request)->ep != musb_ep) 1363 if (!ep || !request || to_musb_request(request)->ep != musb_ep)
1364 return -EINVAL; 1364 return -EINVAL;
1365 1365
1366 spin_lock_irqsave(&musb->lock, flags); 1366 spin_lock_irqsave(&musb->lock, flags);
1367 1367
1368 list_for_each_entry(r, &musb_ep->req_list, list) { 1368 list_for_each_entry(r, &musb_ep->req_list, list) {
1369 if (r == req) 1369 if (r == req)
1370 break; 1370 break;
1371 } 1371 }
1372 if (r != req) { 1372 if (r != req) {
1373 dev_dbg(musb->controller, "request %p not queued to %s\n", request, ep->name); 1373 dev_dbg(musb->controller, "request %p not queued to %s\n", request, ep->name);
1374 status = -EINVAL; 1374 status = -EINVAL;
1375 goto done; 1375 goto done;
1376 } 1376 }
1377 1377
1378 /* if the hardware doesn't have the request, easy ... */ 1378 /* if the hardware doesn't have the request, easy ... */
1379 if (musb_ep->req_list.next != &req->list || musb_ep->busy) 1379 if (musb_ep->req_list.next != &req->list || musb_ep->busy)
1380 musb_g_giveback(musb_ep, request, -ECONNRESET); 1380 musb_g_giveback(musb_ep, request, -ECONNRESET);
1381 1381
1382 /* ... else abort the dma transfer ... */ 1382 /* ... else abort the dma transfer ... */
1383 else if (is_dma_capable() && musb_ep->dma) { 1383 else if (is_dma_capable() && musb_ep->dma) {
1384 struct dma_controller *c = musb->dma_controller; 1384 struct dma_controller *c = musb->dma_controller;
1385 1385
1386 musb_ep_select(musb->mregs, musb_ep->current_epnum); 1386 musb_ep_select(musb->mregs, musb_ep->current_epnum);
1387 if (c->channel_abort) 1387 if (c->channel_abort)
1388 status = c->channel_abort(musb_ep->dma); 1388 status = c->channel_abort(musb_ep->dma);
1389 else 1389 else
1390 status = -EBUSY; 1390 status = -EBUSY;
1391 if (status == 0) 1391 if (status == 0)
1392 musb_g_giveback(musb_ep, request, -ECONNRESET); 1392 musb_g_giveback(musb_ep, request, -ECONNRESET);
1393 } else { 1393 } else {
1394 /* NOTE: by sticking to easily tested hardware/driver states, 1394 /* NOTE: by sticking to easily tested hardware/driver states,
1395 * we leave counting of in-flight packets imprecise. 1395 * we leave counting of in-flight packets imprecise.
1396 */ 1396 */
1397 musb_g_giveback(musb_ep, request, -ECONNRESET); 1397 musb_g_giveback(musb_ep, request, -ECONNRESET);
1398 } 1398 }
1399 1399
1400 done: 1400 done:
1401 spin_unlock_irqrestore(&musb->lock, flags); 1401 spin_unlock_irqrestore(&musb->lock, flags);
1402 return status; 1402 return status;
1403 } 1403 }
1404 1404
1405 /* 1405 /*
1406 * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any 1406 * Set or clear the halt bit of an endpoint. A halted enpoint won't tx/rx any
1407 * data but will queue requests. 1407 * data but will queue requests.
1408 * 1408 *
1409 * exported to ep0 code 1409 * exported to ep0 code
1410 */ 1410 */
1411 static int musb_gadget_set_halt(struct usb_ep *ep, int value) 1411 static int musb_gadget_set_halt(struct usb_ep *ep, int value)
1412 { 1412 {
1413 struct musb_ep *musb_ep = to_musb_ep(ep); 1413 struct musb_ep *musb_ep = to_musb_ep(ep);
1414 u8 epnum = musb_ep->current_epnum; 1414 u8 epnum = musb_ep->current_epnum;
1415 struct musb *musb = musb_ep->musb; 1415 struct musb *musb = musb_ep->musb;
1416 void __iomem *epio = musb->endpoints[epnum].regs; 1416 void __iomem *epio = musb->endpoints[epnum].regs;
1417 void __iomem *mbase; 1417 void __iomem *mbase;
1418 unsigned long flags; 1418 unsigned long flags;
1419 u16 csr; 1419 u16 csr;
1420 struct musb_request *request; 1420 struct musb_request *request;
1421 int status = 0; 1421 int status = 0;
1422 1422
1423 if (!ep) 1423 if (!ep)
1424 return -EINVAL; 1424 return -EINVAL;
1425 mbase = musb->mregs; 1425 mbase = musb->mregs;
1426 1426
1427 spin_lock_irqsave(&musb->lock, flags); 1427 spin_lock_irqsave(&musb->lock, flags);
1428 1428
1429 if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) { 1429 if ((USB_ENDPOINT_XFER_ISOC == musb_ep->type)) {
1430 status = -EINVAL; 1430 status = -EINVAL;
1431 goto done; 1431 goto done;
1432 } 1432 }
1433 1433
1434 musb_ep_select(mbase, epnum); 1434 musb_ep_select(mbase, epnum);
1435 1435
1436 request = next_request(musb_ep); 1436 request = next_request(musb_ep);
1437 if (value) { 1437 if (value) {
1438 if (request) { 1438 if (request) {
1439 dev_dbg(musb->controller, "request in progress, cannot halt %s\n", 1439 dev_dbg(musb->controller, "request in progress, cannot halt %s\n",
1440 ep->name); 1440 ep->name);
1441 status = -EAGAIN; 1441 status = -EAGAIN;
1442 goto done; 1442 goto done;
1443 } 1443 }
1444 /* Cannot portably stall with non-empty FIFO */ 1444 /* Cannot portably stall with non-empty FIFO */
1445 if (musb_ep->is_in) { 1445 if (musb_ep->is_in) {
1446 csr = musb_readw(epio, MUSB_TXCSR); 1446 csr = musb_readw(epio, MUSB_TXCSR);
1447 if (csr & MUSB_TXCSR_FIFONOTEMPTY) { 1447 if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1448 dev_dbg(musb->controller, "FIFO busy, cannot halt %s\n", ep->name); 1448 dev_dbg(musb->controller, "FIFO busy, cannot halt %s\n", ep->name);
1449 status = -EAGAIN; 1449 status = -EAGAIN;
1450 goto done; 1450 goto done;
1451 } 1451 }
1452 } 1452 }
1453 } else 1453 } else
1454 musb_ep->wedged = 0; 1454 musb_ep->wedged = 0;
1455 1455
1456 /* set/clear the stall and toggle bits */ 1456 /* set/clear the stall and toggle bits */
1457 dev_dbg(musb->controller, "%s: %s stall\n", ep->name, value ? "set" : "clear"); 1457 dev_dbg(musb->controller, "%s: %s stall\n", ep->name, value ? "set" : "clear");
1458 if (musb_ep->is_in) { 1458 if (musb_ep->is_in) {
1459 csr = musb_readw(epio, MUSB_TXCSR); 1459 csr = musb_readw(epio, MUSB_TXCSR);
1460 csr |= MUSB_TXCSR_P_WZC_BITS 1460 csr |= MUSB_TXCSR_P_WZC_BITS
1461 | MUSB_TXCSR_CLRDATATOG; 1461 | MUSB_TXCSR_CLRDATATOG;
1462 if (value) 1462 if (value)
1463 csr |= MUSB_TXCSR_P_SENDSTALL; 1463 csr |= MUSB_TXCSR_P_SENDSTALL;
1464 else 1464 else
1465 csr &= ~(MUSB_TXCSR_P_SENDSTALL 1465 csr &= ~(MUSB_TXCSR_P_SENDSTALL
1466 | MUSB_TXCSR_P_SENTSTALL); 1466 | MUSB_TXCSR_P_SENTSTALL);
1467 csr &= ~MUSB_TXCSR_TXPKTRDY; 1467 csr &= ~MUSB_TXCSR_TXPKTRDY;
1468 musb_writew(epio, MUSB_TXCSR, csr); 1468 musb_writew(epio, MUSB_TXCSR, csr);
1469 } else { 1469 } else {
1470 csr = musb_readw(epio, MUSB_RXCSR); 1470 csr = musb_readw(epio, MUSB_RXCSR);
1471 csr |= MUSB_RXCSR_P_WZC_BITS 1471 csr |= MUSB_RXCSR_P_WZC_BITS
1472 | MUSB_RXCSR_FLUSHFIFO 1472 | MUSB_RXCSR_FLUSHFIFO
1473 | MUSB_RXCSR_CLRDATATOG; 1473 | MUSB_RXCSR_CLRDATATOG;
1474 if (value) 1474 if (value)
1475 csr |= MUSB_RXCSR_P_SENDSTALL; 1475 csr |= MUSB_RXCSR_P_SENDSTALL;
1476 else 1476 else
1477 csr &= ~(MUSB_RXCSR_P_SENDSTALL 1477 csr &= ~(MUSB_RXCSR_P_SENDSTALL
1478 | MUSB_RXCSR_P_SENTSTALL); 1478 | MUSB_RXCSR_P_SENTSTALL);
1479 musb_writew(epio, MUSB_RXCSR, csr); 1479 musb_writew(epio, MUSB_RXCSR, csr);
1480 } 1480 }
1481 1481
1482 /* maybe start the first request in the queue */ 1482 /* maybe start the first request in the queue */
1483 if (!musb_ep->busy && !value && request) { 1483 if (!musb_ep->busy && !value && request) {
1484 dev_dbg(musb->controller, "restarting the request\n"); 1484 dev_dbg(musb->controller, "restarting the request\n");
1485 musb_ep_restart(musb, request); 1485 musb_ep_restart(musb, request);
1486 } 1486 }
1487 1487
1488 done: 1488 done:
1489 spin_unlock_irqrestore(&musb->lock, flags); 1489 spin_unlock_irqrestore(&musb->lock, flags);
1490 return status; 1490 return status;
1491 } 1491 }
1492 1492
1493 /* 1493 /*
1494 * Sets the halt feature with the clear requests ignored 1494 * Sets the halt feature with the clear requests ignored
1495 */ 1495 */
1496 static int musb_gadget_set_wedge(struct usb_ep *ep) 1496 static int musb_gadget_set_wedge(struct usb_ep *ep)
1497 { 1497 {
1498 struct musb_ep *musb_ep = to_musb_ep(ep); 1498 struct musb_ep *musb_ep = to_musb_ep(ep);
1499 1499
1500 if (!ep) 1500 if (!ep)
1501 return -EINVAL; 1501 return -EINVAL;
1502 1502
1503 musb_ep->wedged = 1; 1503 musb_ep->wedged = 1;
1504 1504
1505 return usb_ep_set_halt(ep); 1505 return usb_ep_set_halt(ep);
1506 } 1506 }
1507 1507
1508 static int musb_gadget_fifo_status(struct usb_ep *ep) 1508 static int musb_gadget_fifo_status(struct usb_ep *ep)
1509 { 1509 {
1510 struct musb_ep *musb_ep = to_musb_ep(ep); 1510 struct musb_ep *musb_ep = to_musb_ep(ep);
1511 void __iomem *epio = musb_ep->hw_ep->regs; 1511 void __iomem *epio = musb_ep->hw_ep->regs;
1512 int retval = -EINVAL; 1512 int retval = -EINVAL;
1513 1513
1514 if (musb_ep->desc && !musb_ep->is_in) { 1514 if (musb_ep->desc && !musb_ep->is_in) {
1515 struct musb *musb = musb_ep->musb; 1515 struct musb *musb = musb_ep->musb;
1516 int epnum = musb_ep->current_epnum; 1516 int epnum = musb_ep->current_epnum;
1517 void __iomem *mbase = musb->mregs; 1517 void __iomem *mbase = musb->mregs;
1518 unsigned long flags; 1518 unsigned long flags;
1519 1519
1520 spin_lock_irqsave(&musb->lock, flags); 1520 spin_lock_irqsave(&musb->lock, flags);
1521 1521
1522 musb_ep_select(mbase, epnum); 1522 musb_ep_select(mbase, epnum);
1523 /* FIXME return zero unless RXPKTRDY is set */ 1523 /* FIXME return zero unless RXPKTRDY is set */
1524 retval = musb_readw(epio, MUSB_RXCOUNT); 1524 retval = musb_readw(epio, MUSB_RXCOUNT);
1525 1525
1526 spin_unlock_irqrestore(&musb->lock, flags); 1526 spin_unlock_irqrestore(&musb->lock, flags);
1527 } 1527 }
1528 return retval; 1528 return retval;
1529 } 1529 }
1530 1530
1531 static void musb_gadget_fifo_flush(struct usb_ep *ep) 1531 static void musb_gadget_fifo_flush(struct usb_ep *ep)
1532 { 1532 {
1533 struct musb_ep *musb_ep = to_musb_ep(ep); 1533 struct musb_ep *musb_ep = to_musb_ep(ep);
1534 struct musb *musb = musb_ep->musb; 1534 struct musb *musb = musb_ep->musb;
1535 u8 epnum = musb_ep->current_epnum; 1535 u8 epnum = musb_ep->current_epnum;
1536 void __iomem *epio = musb->endpoints[epnum].regs; 1536 void __iomem *epio = musb->endpoints[epnum].regs;
1537 void __iomem *mbase; 1537 void __iomem *mbase;
1538 unsigned long flags; 1538 unsigned long flags;
1539 u16 csr, int_txe; 1539 u16 csr, int_txe;
1540 1540
1541 mbase = musb->mregs; 1541 mbase = musb->mregs;
1542 1542
1543 spin_lock_irqsave(&musb->lock, flags); 1543 spin_lock_irqsave(&musb->lock, flags);
1544 musb_ep_select(mbase, (u8) epnum); 1544 musb_ep_select(mbase, (u8) epnum);
1545 1545
1546 /* disable interrupts */ 1546 /* disable interrupts */
1547 int_txe = musb_readw(mbase, MUSB_INTRTXE); 1547 int_txe = musb_readw(mbase, MUSB_INTRTXE);
1548 musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum)); 1548 musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
1549 1549
1550 if (musb_ep->is_in) { 1550 if (musb_ep->is_in) {
1551 csr = musb_readw(epio, MUSB_TXCSR); 1551 csr = musb_readw(epio, MUSB_TXCSR);
1552 if (csr & MUSB_TXCSR_FIFONOTEMPTY) { 1552 if (csr & MUSB_TXCSR_FIFONOTEMPTY) {
1553 csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS; 1553 csr |= MUSB_TXCSR_FLUSHFIFO | MUSB_TXCSR_P_WZC_BITS;
1554 /* 1554 /*
1555 * Setting both TXPKTRDY and FLUSHFIFO makes controller 1555 * Setting both TXPKTRDY and FLUSHFIFO makes controller
1556 * to interrupt current FIFO loading, but not flushing 1556 * to interrupt current FIFO loading, but not flushing
1557 * the already loaded ones. 1557 * the already loaded ones.
1558 */ 1558 */
1559 csr &= ~MUSB_TXCSR_TXPKTRDY; 1559 csr &= ~MUSB_TXCSR_TXPKTRDY;
1560 musb_writew(epio, MUSB_TXCSR, csr); 1560 musb_writew(epio, MUSB_TXCSR, csr);
1561 /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */ 1561 /* REVISIT may be inappropriate w/o FIFONOTEMPTY ... */
1562 musb_writew(epio, MUSB_TXCSR, csr); 1562 musb_writew(epio, MUSB_TXCSR, csr);
1563 } 1563 }
1564 } else { 1564 } else {
1565 csr = musb_readw(epio, MUSB_RXCSR); 1565 csr = musb_readw(epio, MUSB_RXCSR);
1566 csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS; 1566 csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_P_WZC_BITS;
1567 musb_writew(epio, MUSB_RXCSR, csr); 1567 musb_writew(epio, MUSB_RXCSR, csr);
1568 musb_writew(epio, MUSB_RXCSR, csr); 1568 musb_writew(epio, MUSB_RXCSR, csr);
1569 } 1569 }
1570 1570
1571 /* re-enable interrupt */ 1571 /* re-enable interrupt */
1572 musb_writew(mbase, MUSB_INTRTXE, int_txe); 1572 musb_writew(mbase, MUSB_INTRTXE, int_txe);
1573 spin_unlock_irqrestore(&musb->lock, flags); 1573 spin_unlock_irqrestore(&musb->lock, flags);
1574 } 1574 }
1575 1575
1576 static const struct usb_ep_ops musb_ep_ops = { 1576 static const struct usb_ep_ops musb_ep_ops = {
1577 .enable = musb_gadget_enable, 1577 .enable = musb_gadget_enable,
1578 .disable = musb_gadget_disable, 1578 .disable = musb_gadget_disable,
1579 .alloc_request = musb_alloc_request, 1579 .alloc_request = musb_alloc_request,
1580 .free_request = musb_free_request, 1580 .free_request = musb_free_request,
1581 .queue = musb_gadget_queue, 1581 .queue = musb_gadget_queue,
1582 .dequeue = musb_gadget_dequeue, 1582 .dequeue = musb_gadget_dequeue,
1583 .set_halt = musb_gadget_set_halt, 1583 .set_halt = musb_gadget_set_halt,
1584 .set_wedge = musb_gadget_set_wedge, 1584 .set_wedge = musb_gadget_set_wedge,
1585 .fifo_status = musb_gadget_fifo_status, 1585 .fifo_status = musb_gadget_fifo_status,
1586 .fifo_flush = musb_gadget_fifo_flush 1586 .fifo_flush = musb_gadget_fifo_flush
1587 }; 1587 };
1588 1588
1589 /* ----------------------------------------------------------------------- */ 1589 /* ----------------------------------------------------------------------- */
1590 1590
1591 static int musb_gadget_get_frame(struct usb_gadget *gadget) 1591 static int musb_gadget_get_frame(struct usb_gadget *gadget)
1592 { 1592 {
1593 struct musb *musb = gadget_to_musb(gadget); 1593 struct musb *musb = gadget_to_musb(gadget);
1594 1594
1595 return (int)musb_readw(musb->mregs, MUSB_FRAME); 1595 return (int)musb_readw(musb->mregs, MUSB_FRAME);
1596 } 1596 }
1597 1597
1598 static int musb_gadget_wakeup(struct usb_gadget *gadget) 1598 static int musb_gadget_wakeup(struct usb_gadget *gadget)
1599 { 1599 {
1600 struct musb *musb = gadget_to_musb(gadget); 1600 struct musb *musb = gadget_to_musb(gadget);
1601 void __iomem *mregs = musb->mregs; 1601 void __iomem *mregs = musb->mregs;
1602 unsigned long flags; 1602 unsigned long flags;
1603 int status = -EINVAL; 1603 int status = -EINVAL;
1604 u8 power, devctl; 1604 u8 power, devctl;
1605 int retries; 1605 int retries;
1606 1606
1607 spin_lock_irqsave(&musb->lock, flags); 1607 spin_lock_irqsave(&musb->lock, flags);
1608 1608
1609 switch (musb->xceiv->state) { 1609 switch (musb->xceiv->state) {
1610 case OTG_STATE_B_PERIPHERAL: 1610 case OTG_STATE_B_PERIPHERAL:
1611 /* NOTE: OTG state machine doesn't include B_SUSPENDED; 1611 /* NOTE: OTG state machine doesn't include B_SUSPENDED;
1612 * that's part of the standard usb 1.1 state machine, and 1612 * that's part of the standard usb 1.1 state machine, and
1613 * doesn't affect OTG transitions. 1613 * doesn't affect OTG transitions.
1614 */ 1614 */
1615 if (musb->may_wakeup && musb->is_suspended) 1615 if (musb->may_wakeup && musb->is_suspended)
1616 break; 1616 break;
1617 goto done; 1617 goto done;
1618 case OTG_STATE_B_IDLE: 1618 case OTG_STATE_B_IDLE:
1619 /* Start SRP ... OTG not required. */ 1619 /* Start SRP ... OTG not required. */
1620 devctl = musb_readb(mregs, MUSB_DEVCTL); 1620 devctl = musb_readb(mregs, MUSB_DEVCTL);
1621 dev_dbg(musb->controller, "Sending SRP: devctl: %02x\n", devctl); 1621 dev_dbg(musb->controller, "Sending SRP: devctl: %02x\n", devctl);
1622 devctl |= MUSB_DEVCTL_SESSION; 1622 devctl |= MUSB_DEVCTL_SESSION;
1623 musb_writeb(mregs, MUSB_DEVCTL, devctl); 1623 musb_writeb(mregs, MUSB_DEVCTL, devctl);
1624 devctl = musb_readb(mregs, MUSB_DEVCTL); 1624 devctl = musb_readb(mregs, MUSB_DEVCTL);
1625 retries = 100; 1625 retries = 100;
1626 while (!(devctl & MUSB_DEVCTL_SESSION)) { 1626 while (!(devctl & MUSB_DEVCTL_SESSION)) {
1627 devctl = musb_readb(mregs, MUSB_DEVCTL); 1627 devctl = musb_readb(mregs, MUSB_DEVCTL);
1628 if (retries-- < 1) 1628 if (retries-- < 1)
1629 break; 1629 break;
1630 } 1630 }
1631 retries = 10000; 1631 retries = 10000;
1632 while (devctl & MUSB_DEVCTL_SESSION) { 1632 while (devctl & MUSB_DEVCTL_SESSION) {
1633 devctl = musb_readb(mregs, MUSB_DEVCTL); 1633 devctl = musb_readb(mregs, MUSB_DEVCTL);
1634 if (retries-- < 1) 1634 if (retries-- < 1)
1635 break; 1635 break;
1636 } 1636 }
1637 1637
1638 spin_unlock_irqrestore(&musb->lock, flags); 1638 spin_unlock_irqrestore(&musb->lock, flags);
1639 otg_start_srp(musb->xceiv->otg); 1639 otg_start_srp(musb->xceiv->otg);
1640 spin_lock_irqsave(&musb->lock, flags); 1640 spin_lock_irqsave(&musb->lock, flags);
1641 1641
1642 /* Block idling for at least 1s */ 1642 /* Block idling for at least 1s */
1643 musb_platform_try_idle(musb, 1643 musb_platform_try_idle(musb,
1644 jiffies + msecs_to_jiffies(1 * HZ)); 1644 jiffies + msecs_to_jiffies(1 * HZ));
1645 1645
1646 status = 0; 1646 status = 0;
1647 goto done; 1647 goto done;
1648 default: 1648 default:
1649 dev_dbg(musb->controller, "Unhandled wake: %s\n", 1649 dev_dbg(musb->controller, "Unhandled wake: %s\n",
1650 otg_state_string(musb->xceiv->state)); 1650 otg_state_string(musb->xceiv->state));
1651 goto done; 1651 goto done;
1652 } 1652 }
1653 1653
1654 status = 0; 1654 status = 0;
1655 1655
1656 power = musb_readb(mregs, MUSB_POWER); 1656 power = musb_readb(mregs, MUSB_POWER);
1657 power |= MUSB_POWER_RESUME; 1657 power |= MUSB_POWER_RESUME;
1658 musb_writeb(mregs, MUSB_POWER, power); 1658 musb_writeb(mregs, MUSB_POWER, power);
1659 dev_dbg(musb->controller, "issue wakeup\n"); 1659 dev_dbg(musb->controller, "issue wakeup\n");
1660 1660
1661 /* FIXME do this next chunk in a timer callback, no udelay */ 1661 /* FIXME do this next chunk in a timer callback, no udelay */
1662 mdelay(2); 1662 mdelay(2);
1663 1663
1664 power = musb_readb(mregs, MUSB_POWER); 1664 power = musb_readb(mregs, MUSB_POWER);
1665 power &= ~MUSB_POWER_RESUME; 1665 power &= ~MUSB_POWER_RESUME;
1666 musb_writeb(mregs, MUSB_POWER, power); 1666 musb_writeb(mregs, MUSB_POWER, power);
1667 done: 1667 done:
1668 spin_unlock_irqrestore(&musb->lock, flags); 1668 spin_unlock_irqrestore(&musb->lock, flags);
1669 return status; 1669 return status;
1670 } 1670 }
1671 1671
1672 static int 1672 static int
1673 musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered) 1673 musb_gadget_set_self_powered(struct usb_gadget *gadget, int is_selfpowered)
1674 { 1674 {
1675 struct musb *musb = gadget_to_musb(gadget); 1675 struct musb *musb = gadget_to_musb(gadget);
1676 1676
1677 musb->is_self_powered = !!is_selfpowered; 1677 musb->is_self_powered = !!is_selfpowered;
1678 return 0; 1678 return 0;
1679 } 1679 }
1680 1680
1681 static void musb_pullup(struct musb *musb, int is_on) 1681 static void musb_pullup(struct musb *musb, int is_on)
1682 { 1682 {
1683 u8 power; 1683 u8 power;
1684 1684
1685 power = musb_readb(musb->mregs, MUSB_POWER); 1685 power = musb_readb(musb->mregs, MUSB_POWER);
1686 if (is_on) 1686 if (is_on)
1687 power |= MUSB_POWER_SOFTCONN; 1687 power |= MUSB_POWER_SOFTCONN;
1688 else 1688 else
1689 power &= ~MUSB_POWER_SOFTCONN; 1689 power &= ~MUSB_POWER_SOFTCONN;
1690 1690
1691 /* FIXME if on, HdrcStart; if off, HdrcStop */ 1691 /* FIXME if on, HdrcStart; if off, HdrcStop */
1692 1692
1693 dev_dbg(musb->controller, "gadget D+ pullup %s\n", 1693 dev_dbg(musb->controller, "gadget D+ pullup %s\n",
1694 is_on ? "on" : "off"); 1694 is_on ? "on" : "off");
1695 musb_writeb(musb->mregs, MUSB_POWER, power); 1695 musb_writeb(musb->mregs, MUSB_POWER, power);
1696 } 1696 }
1697 1697
1698 #if 0 1698 #if 0
1699 static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active) 1699 static int musb_gadget_vbus_session(struct usb_gadget *gadget, int is_active)
1700 { 1700 {
1701 dev_dbg(musb->controller, "<= %s =>\n", __func__); 1701 dev_dbg(musb->controller, "<= %s =>\n", __func__);
1702 1702
1703 /* 1703 /*
1704 * FIXME iff driver's softconnect flag is set (as it is during probe, 1704 * FIXME iff driver's softconnect flag is set (as it is during probe,
1705 * though that can clear it), just musb_pullup(). 1705 * though that can clear it), just musb_pullup().
1706 */ 1706 */
1707 1707
1708 return -EINVAL; 1708 return -EINVAL;
1709 } 1709 }
1710 #endif 1710 #endif
1711 1711
1712 static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA) 1712 static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
1713 { 1713 {
1714 struct musb *musb = gadget_to_musb(gadget); 1714 struct musb *musb = gadget_to_musb(gadget);
1715 1715
1716 if (!musb->xceiv->set_power) 1716 if (!musb->xceiv->set_power)
1717 return -EOPNOTSUPP; 1717 return -EOPNOTSUPP;
1718 return usb_phy_set_power(musb->xceiv, mA); 1718 return usb_phy_set_power(musb->xceiv, mA);
1719 } 1719 }
1720 1720
1721 static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on) 1721 static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
1722 { 1722 {
1723 struct musb *musb = gadget_to_musb(gadget); 1723 struct musb *musb = gadget_to_musb(gadget);
1724 unsigned long flags; 1724 unsigned long flags;
1725 1725
1726 is_on = !!is_on; 1726 is_on = !!is_on;
1727 1727
1728 pm_runtime_get_sync(musb->controller); 1728 pm_runtime_get_sync(musb->controller);
1729 1729
1730 /* NOTE: this assumes we are sensing vbus; we'd rather 1730 /* NOTE: this assumes we are sensing vbus; we'd rather
1731 * not pullup unless the B-session is active. 1731 * not pullup unless the B-session is active.
1732 */ 1732 */
1733 spin_lock_irqsave(&musb->lock, flags); 1733 spin_lock_irqsave(&musb->lock, flags);
1734 if (is_on != musb->softconnect) { 1734 if (is_on != musb->softconnect) {
1735 musb->softconnect = is_on; 1735 musb->softconnect = is_on;
1736 musb_pullup(musb, is_on); 1736 musb_pullup(musb, is_on);
1737 } 1737 }
1738 spin_unlock_irqrestore(&musb->lock, flags); 1738 spin_unlock_irqrestore(&musb->lock, flags);
1739 1739
1740 pm_runtime_put(musb->controller); 1740 pm_runtime_put(musb->controller);
1741 1741
1742 return 0; 1742 return 0;
1743 } 1743 }
1744 1744
1745 static int musb_gadget_start(struct usb_gadget *g, 1745 static int musb_gadget_start(struct usb_gadget *g,
1746 struct usb_gadget_driver *driver); 1746 struct usb_gadget_driver *driver);
1747 static int musb_gadget_stop(struct usb_gadget *g, 1747 static int musb_gadget_stop(struct usb_gadget *g,
1748 struct usb_gadget_driver *driver); 1748 struct usb_gadget_driver *driver);
1749 1749
1750 static const struct usb_gadget_ops musb_gadget_operations = { 1750 static const struct usb_gadget_ops musb_gadget_operations = {
1751 .get_frame = musb_gadget_get_frame, 1751 .get_frame = musb_gadget_get_frame,
1752 .wakeup = musb_gadget_wakeup, 1752 .wakeup = musb_gadget_wakeup,
1753 .set_selfpowered = musb_gadget_set_self_powered, 1753 .set_selfpowered = musb_gadget_set_self_powered,
1754 /* .vbus_session = musb_gadget_vbus_session, */ 1754 /* .vbus_session = musb_gadget_vbus_session, */
1755 .vbus_draw = musb_gadget_vbus_draw, 1755 .vbus_draw = musb_gadget_vbus_draw,
1756 .pullup = musb_gadget_pullup, 1756 .pullup = musb_gadget_pullup,
1757 .udc_start = musb_gadget_start, 1757 .udc_start = musb_gadget_start,
1758 .udc_stop = musb_gadget_stop, 1758 .udc_stop = musb_gadget_stop,
1759 }; 1759 };
1760 1760
1761 /* ----------------------------------------------------------------------- */ 1761 /* ----------------------------------------------------------------------- */
1762 1762
1763 /* Registration */ 1763 /* Registration */
1764 1764
1765 /* Only this registration code "knows" the rule (from USB standards) 1765 /* Only this registration code "knows" the rule (from USB standards)
1766 * about there being only one external upstream port. It assumes 1766 * about there being only one external upstream port. It assumes
1767 * all peripheral ports are external... 1767 * all peripheral ports are external...
1768 */ 1768 */
1769 1769
1770 static void musb_gadget_release(struct device *dev) 1770 static void musb_gadget_release(struct device *dev)
1771 { 1771 {
1772 /* kref_put(WHAT) */ 1772 /* kref_put(WHAT) */
1773 dev_dbg(dev, "%s\n", __func__); 1773 dev_dbg(dev, "%s\n", __func__);
1774 } 1774 }
1775 1775
1776 1776
1777 static void __devinit 1777 static void __devinit
1778 init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in) 1778 init_peripheral_ep(struct musb *musb, struct musb_ep *ep, u8 epnum, int is_in)
1779 { 1779 {
1780 struct musb_hw_ep *hw_ep = musb->endpoints + epnum; 1780 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1781 1781
1782 memset(ep, 0, sizeof *ep); 1782 memset(ep, 0, sizeof *ep);
1783 1783
1784 ep->current_epnum = epnum; 1784 ep->current_epnum = epnum;
1785 ep->musb = musb; 1785 ep->musb = musb;
1786 ep->hw_ep = hw_ep; 1786 ep->hw_ep = hw_ep;
1787 ep->is_in = is_in; 1787 ep->is_in = is_in;
1788 1788
1789 INIT_LIST_HEAD(&ep->req_list); 1789 INIT_LIST_HEAD(&ep->req_list);
1790 1790
1791 sprintf(ep->name, "ep%d%s", epnum, 1791 sprintf(ep->name, "ep%d%s", epnum,
1792 (!epnum || hw_ep->is_shared_fifo) ? "" : ( 1792 (!epnum || hw_ep->is_shared_fifo) ? "" : (
1793 is_in ? "in" : "out")); 1793 is_in ? "in" : "out"));
1794 ep->end_point.name = ep->name; 1794 ep->end_point.name = ep->name;
1795 INIT_LIST_HEAD(&ep->end_point.ep_list); 1795 INIT_LIST_HEAD(&ep->end_point.ep_list);
1796 if (!epnum) { 1796 if (!epnum) {
1797 ep->end_point.maxpacket = 64; 1797 ep->end_point.maxpacket = 64;
1798 ep->end_point.ops = &musb_g_ep0_ops; 1798 ep->end_point.ops = &musb_g_ep0_ops;
1799 musb->g.ep0 = &ep->end_point; 1799 musb->g.ep0 = &ep->end_point;
1800 } else { 1800 } else {
1801 if (is_in) 1801 if (is_in)
1802 ep->end_point.maxpacket = hw_ep->max_packet_sz_tx; 1802 ep->end_point.maxpacket = hw_ep->max_packet_sz_tx;
1803 else 1803 else
1804 ep->end_point.maxpacket = hw_ep->max_packet_sz_rx; 1804 ep->end_point.maxpacket = hw_ep->max_packet_sz_rx;
1805 ep->end_point.ops = &musb_ep_ops; 1805 ep->end_point.ops = &musb_ep_ops;
1806 list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list); 1806 list_add_tail(&ep->end_point.ep_list, &musb->g.ep_list);
1807 } 1807 }
1808 } 1808 }
1809 1809
1810 /* 1810 /*
1811 * Initialize the endpoints exposed to peripheral drivers, with backlinks 1811 * Initialize the endpoints exposed to peripheral drivers, with backlinks
1812 * to the rest of the driver state. 1812 * to the rest of the driver state.
1813 */ 1813 */
1814 static inline void __devinit musb_g_init_endpoints(struct musb *musb) 1814 static inline void __devinit musb_g_init_endpoints(struct musb *musb)
1815 { 1815 {
1816 u8 epnum; 1816 u8 epnum;
1817 struct musb_hw_ep *hw_ep; 1817 struct musb_hw_ep *hw_ep;
1818 unsigned count = 0; 1818 unsigned count = 0;
1819 1819
1820 /* initialize endpoint list just once */ 1820 /* initialize endpoint list just once */
1821 INIT_LIST_HEAD(&(musb->g.ep_list)); 1821 INIT_LIST_HEAD(&(musb->g.ep_list));
1822 1822
1823 for (epnum = 0, hw_ep = musb->endpoints; 1823 for (epnum = 0, hw_ep = musb->endpoints;
1824 epnum < musb->nr_endpoints; 1824 epnum < musb->nr_endpoints;
1825 epnum++, hw_ep++) { 1825 epnum++, hw_ep++) {
1826 if (hw_ep->is_shared_fifo /* || !epnum */) { 1826 if (hw_ep->is_shared_fifo /* || !epnum */) {
1827 init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0); 1827 init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0);
1828 count++; 1828 count++;
1829 } else { 1829 } else {
1830 if (hw_ep->max_packet_sz_tx) { 1830 if (hw_ep->max_packet_sz_tx) {
1831 init_peripheral_ep(musb, &hw_ep->ep_in, 1831 init_peripheral_ep(musb, &hw_ep->ep_in,
1832 epnum, 1); 1832 epnum, 1);
1833 count++; 1833 count++;
1834 } 1834 }
1835 if (hw_ep->max_packet_sz_rx) { 1835 if (hw_ep->max_packet_sz_rx) {
1836 init_peripheral_ep(musb, &hw_ep->ep_out, 1836 init_peripheral_ep(musb, &hw_ep->ep_out,
1837 epnum, 0); 1837 epnum, 0);
1838 count++; 1838 count++;
1839 } 1839 }
1840 } 1840 }
1841 } 1841 }
1842 } 1842 }
1843 1843
1844 /* called once during driver setup to initialize and link into 1844 /* called once during driver setup to initialize and link into
1845 * the driver model; memory is zeroed. 1845 * the driver model; memory is zeroed.
1846 */ 1846 */
1847 int __devinit musb_gadget_setup(struct musb *musb) 1847 int __devinit musb_gadget_setup(struct musb *musb)
1848 { 1848 {
1849 int status; 1849 int status;
1850 1850
1851 /* REVISIT minor race: if (erroneously) setting up two 1851 /* REVISIT minor race: if (erroneously) setting up two
1852 * musb peripherals at the same time, only the bus lock 1852 * musb peripherals at the same time, only the bus lock
1853 * is probably held. 1853 * is probably held.
1854 */ 1854 */
1855 1855
1856 musb->g.ops = &musb_gadget_operations; 1856 musb->g.ops = &musb_gadget_operations;
1857 musb->g.max_speed = USB_SPEED_HIGH; 1857 musb->g.max_speed = USB_SPEED_HIGH;
1858 musb->g.speed = USB_SPEED_UNKNOWN; 1858 musb->g.speed = USB_SPEED_UNKNOWN;
1859 1859
1860 /* this "gadget" abstracts/virtualizes the controller */ 1860 /* this "gadget" abstracts/virtualizes the controller */
1861 dev_set_name(&musb->g.dev, "gadget"); 1861 dev_set_name(&musb->g.dev, "gadget");
1862 musb->g.dev.parent = musb->controller; 1862 musb->g.dev.parent = musb->controller;
1863 musb->g.dev.dma_mask = musb->controller->dma_mask; 1863 musb->g.dev.dma_mask = musb->controller->dma_mask;
1864 musb->g.dev.release = musb_gadget_release; 1864 musb->g.dev.release = musb_gadget_release;
1865 musb->g.name = musb_driver_name; 1865 musb->g.name = musb_driver_name;
1866 1866
1867 if (is_otg_enabled(musb)) 1867 musb->g.is_otg = 1;
1868 musb->g.is_otg = 1;
1869 1868
1870 musb_g_init_endpoints(musb); 1869 musb_g_init_endpoints(musb);
1871 1870
1872 musb->is_active = 0; 1871 musb->is_active = 0;
1873 musb_platform_try_idle(musb, 0); 1872 musb_platform_try_idle(musb, 0);
1874 1873
1875 status = device_register(&musb->g.dev); 1874 status = device_register(&musb->g.dev);
1876 if (status != 0) { 1875 if (status != 0) {
1877 put_device(&musb->g.dev); 1876 put_device(&musb->g.dev);
1878 return status; 1877 return status;
1879 } 1878 }
1880 status = usb_add_gadget_udc(musb->controller, &musb->g); 1879 status = usb_add_gadget_udc(musb->controller, &musb->g);
1881 if (status) 1880 if (status)
1882 goto err; 1881 goto err;
1883 1882
1884 return 0; 1883 return 0;
1885 err: 1884 err:
1886 musb->g.dev.parent = NULL; 1885 musb->g.dev.parent = NULL;
1887 device_unregister(&musb->g.dev); 1886 device_unregister(&musb->g.dev);
1888 return status; 1887 return status;
1889 } 1888 }
1890 1889
1891 void musb_gadget_cleanup(struct musb *musb) 1890 void musb_gadget_cleanup(struct musb *musb)
1892 { 1891 {
1893 usb_del_gadget_udc(&musb->g); 1892 usb_del_gadget_udc(&musb->g);
1894 if (musb->g.dev.parent) 1893 if (musb->g.dev.parent)
1895 device_unregister(&musb->g.dev); 1894 device_unregister(&musb->g.dev);
1896 } 1895 }
1897 1896
1898 /* 1897 /*
1899 * Register the gadget driver. Used by gadget drivers when 1898 * Register the gadget driver. Used by gadget drivers when
1900 * registering themselves with the controller. 1899 * registering themselves with the controller.
1901 * 1900 *
1902 * -EINVAL something went wrong (not driver) 1901 * -EINVAL something went wrong (not driver)
1903 * -EBUSY another gadget is already using the controller 1902 * -EBUSY another gadget is already using the controller
1904 * -ENOMEM no memory to perform the operation 1903 * -ENOMEM no memory to perform the operation
1905 * 1904 *
1906 * @param driver the gadget driver 1905 * @param driver the gadget driver
1907 * @return <0 if error, 0 if everything is fine 1906 * @return <0 if error, 0 if everything is fine
1908 */ 1907 */
1909 static int musb_gadget_start(struct usb_gadget *g, 1908 static int musb_gadget_start(struct usb_gadget *g,
1910 struct usb_gadget_driver *driver) 1909 struct usb_gadget_driver *driver)
1911 { 1910 {
1912 struct musb *musb = gadget_to_musb(g); 1911 struct musb *musb = gadget_to_musb(g);
1913 struct usb_otg *otg = musb->xceiv->otg; 1912 struct usb_otg *otg = musb->xceiv->otg;
1913 struct usb_hcd *hcd = musb_to_hcd(musb);
1914 unsigned long flags; 1914 unsigned long flags;
1915 int retval = -EINVAL; 1915 int retval = 0;
1916 1916
1917 if (driver->max_speed < USB_SPEED_HIGH) 1917 if (driver->max_speed < USB_SPEED_HIGH) {
1918 goto err0; 1918 retval = -EINVAL;
1919 goto err;
1920 }
1919 1921
1920 pm_runtime_get_sync(musb->controller); 1922 pm_runtime_get_sync(musb->controller);
1921 1923
1922 dev_dbg(musb->controller, "registering driver %s\n", driver->function); 1924 dev_dbg(musb->controller, "registering driver %s\n", driver->function);
1923 1925
1924 musb->softconnect = 0; 1926 musb->softconnect = 0;
1925 musb->gadget_driver = driver; 1927 musb->gadget_driver = driver;
1926 1928
1927 spin_lock_irqsave(&musb->lock, flags); 1929 spin_lock_irqsave(&musb->lock, flags);
1928 musb->is_active = 1; 1930 musb->is_active = 1;
1929 1931
1930 otg_set_peripheral(otg, &musb->g); 1932 otg_set_peripheral(otg, &musb->g);
1931 musb->xceiv->state = OTG_STATE_B_IDLE; 1933 musb->xceiv->state = OTG_STATE_B_IDLE;
1934 spin_unlock_irqrestore(&musb->lock, flags);
1932 1935
1933 /* 1936 /* REVISIT: funcall to other code, which also
1934 * FIXME this ignores the softconnect flag. Drivers are 1937 * handles power budgeting ... this way also
1935 * allowed hold the peripheral inactive until for example 1938 * ensures HdrcStart is indirectly called.
1936 * userspace hooks up printer hardware or DSP codecs, so
1937 * hosts only see fully functional devices.
1938 */ 1939 */
1940 retval = usb_add_hcd(hcd, 0, 0);
1941 if (retval < 0) {
1942 dev_dbg(musb->controller, "add_hcd failed, %d\n", retval);
1943 goto err;
1944 }
1939 1945
1940 if (!is_otg_enabled(musb)) 1946 if ((musb->xceiv->last_event == USB_EVENT_ID)
1941 musb_start(musb); 1947 && otg->set_vbus)
1948 otg_set_vbus(otg, 1);
1942 1949
1943 spin_unlock_irqrestore(&musb->lock, flags); 1950 hcd->self.uses_pio_for_control = 1;
1944 1951
1945 if (is_otg_enabled(musb)) {
1946 struct usb_hcd *hcd = musb_to_hcd(musb);
1947
1948 dev_dbg(musb->controller, "OTG startup...\n");
1949
1950 /* REVISIT: funcall to other code, which also
1951 * handles power budgeting ... this way also
1952 * ensures HdrcStart is indirectly called.
1953 */
1954 retval = usb_add_hcd(musb_to_hcd(musb), 0, 0);
1955 if (retval < 0) {
1956 dev_dbg(musb->controller, "add_hcd failed, %d\n", retval);
1957 goto err2;
1958 }
1959
1960 if ((musb->xceiv->last_event == USB_EVENT_ID)
1961 && otg->set_vbus)
1962 otg_set_vbus(otg, 1);
1963
1964 hcd->self.uses_pio_for_control = 1;
1965 }
1966 if (musb->xceiv->last_event == USB_EVENT_NONE) 1952 if (musb->xceiv->last_event == USB_EVENT_NONE)
1967 pm_runtime_put(musb->controller); 1953 pm_runtime_put(musb->controller);
1968 1954
1969 return 0; 1955 return 0;
1970 1956
1971 err2: 1957 err:
1972 if (!is_otg_enabled(musb))
1973 musb_stop(musb);
1974 err0:
1975 return retval; 1958 return retval;
1976 } 1959 }
1977 1960
1978 static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver) 1961 static void stop_activity(struct musb *musb, struct usb_gadget_driver *driver)
1979 { 1962 {
1980 int i; 1963 int i;
1981 struct musb_hw_ep *hw_ep; 1964 struct musb_hw_ep *hw_ep;
1982 1965
1983 /* don't disconnect if it's not connected */ 1966 /* don't disconnect if it's not connected */
1984 if (musb->g.speed == USB_SPEED_UNKNOWN) 1967 if (musb->g.speed == USB_SPEED_UNKNOWN)
1985 driver = NULL; 1968 driver = NULL;
1986 else 1969 else
1987 musb->g.speed = USB_SPEED_UNKNOWN; 1970 musb->g.speed = USB_SPEED_UNKNOWN;
1988 1971
1989 /* deactivate the hardware */ 1972 /* deactivate the hardware */
1990 if (musb->softconnect) { 1973 if (musb->softconnect) {
1991 musb->softconnect = 0; 1974 musb->softconnect = 0;
1992 musb_pullup(musb, 0); 1975 musb_pullup(musb, 0);
1993 } 1976 }
1994 musb_stop(musb); 1977 musb_stop(musb);
1995 1978
1996 /* killing any outstanding requests will quiesce the driver; 1979 /* killing any outstanding requests will quiesce the driver;
1997 * then report disconnect 1980 * then report disconnect
1998 */ 1981 */
1999 if (driver) { 1982 if (driver) {
2000 for (i = 0, hw_ep = musb->endpoints; 1983 for (i = 0, hw_ep = musb->endpoints;
2001 i < musb->nr_endpoints; 1984 i < musb->nr_endpoints;
2002 i++, hw_ep++) { 1985 i++, hw_ep++) {
2003 musb_ep_select(musb->mregs, i); 1986 musb_ep_select(musb->mregs, i);
2004 if (hw_ep->is_shared_fifo /* || !epnum */) { 1987 if (hw_ep->is_shared_fifo /* || !epnum */) {
2005 nuke(&hw_ep->ep_in, -ESHUTDOWN); 1988 nuke(&hw_ep->ep_in, -ESHUTDOWN);
2006 } else { 1989 } else {
2007 if (hw_ep->max_packet_sz_tx) 1990 if (hw_ep->max_packet_sz_tx)
2008 nuke(&hw_ep->ep_in, -ESHUTDOWN); 1991 nuke(&hw_ep->ep_in, -ESHUTDOWN);
2009 if (hw_ep->max_packet_sz_rx) 1992 if (hw_ep->max_packet_sz_rx)
2010 nuke(&hw_ep->ep_out, -ESHUTDOWN); 1993 nuke(&hw_ep->ep_out, -ESHUTDOWN);
2011 } 1994 }
2012 } 1995 }
2013 } 1996 }
2014 } 1997 }
2015 1998
2016 /* 1999 /*
2017 * Unregister the gadget driver. Used by gadget drivers when 2000 * Unregister the gadget driver. Used by gadget drivers when
2018 * unregistering themselves from the controller. 2001 * unregistering themselves from the controller.
2019 * 2002 *
2020 * @param driver the gadget driver to unregister 2003 * @param driver the gadget driver to unregister
2021 */ 2004 */
2022 static int musb_gadget_stop(struct usb_gadget *g, 2005 static int musb_gadget_stop(struct usb_gadget *g,
2023 struct usb_gadget_driver *driver) 2006 struct usb_gadget_driver *driver)
2024 { 2007 {
2025 struct musb *musb = gadget_to_musb(g); 2008 struct musb *musb = gadget_to_musb(g);
2026 unsigned long flags; 2009 unsigned long flags;
2027 2010
2028 if (musb->xceiv->last_event == USB_EVENT_NONE) 2011 if (musb->xceiv->last_event == USB_EVENT_NONE)
2029 pm_runtime_get_sync(musb->controller); 2012 pm_runtime_get_sync(musb->controller);
2030 2013
2031 /* 2014 /*
2032 * REVISIT always use otg_set_peripheral() here too; 2015 * REVISIT always use otg_set_peripheral() here too;
2033 * this needs to shut down the OTG engine. 2016 * this needs to shut down the OTG engine.
2034 */ 2017 */
2035 2018
2036 spin_lock_irqsave(&musb->lock, flags); 2019 spin_lock_irqsave(&musb->lock, flags);
2037 2020
2038 musb_hnp_stop(musb); 2021 musb_hnp_stop(musb);
2039 2022
2040 (void) musb_gadget_vbus_draw(&musb->g, 0); 2023 (void) musb_gadget_vbus_draw(&musb->g, 0);
2041 2024
2042 musb->xceiv->state = OTG_STATE_UNDEFINED; 2025 musb->xceiv->state = OTG_STATE_UNDEFINED;
2043 stop_activity(musb, driver); 2026 stop_activity(musb, driver);
2044 otg_set_peripheral(musb->xceiv->otg, NULL); 2027 otg_set_peripheral(musb->xceiv->otg, NULL);
2045 2028
2046 dev_dbg(musb->controller, "unregistering driver %s\n", driver->function); 2029 dev_dbg(musb->controller, "unregistering driver %s\n", driver->function);
2047 2030
2048 musb->is_active = 0; 2031 musb->is_active = 0;
2049 musb_platform_try_idle(musb, 0); 2032 musb_platform_try_idle(musb, 0);
2050 spin_unlock_irqrestore(&musb->lock, flags); 2033 spin_unlock_irqrestore(&musb->lock, flags);
2051 2034
2052 if (is_otg_enabled(musb)) { 2035 usb_remove_hcd(musb_to_hcd(musb));
2053 usb_remove_hcd(musb_to_hcd(musb)); 2036 /*
2054 /* FIXME we need to be able to register another 2037 * FIXME we need to be able to register another
2055 * gadget driver here and have everything work; 2038 * gadget driver here and have everything work;
2056 * that currently misbehaves. 2039 * that currently misbehaves.
2057 */ 2040 */
2058 }
2059 2041
2060 if (!is_otg_enabled(musb))
2061 musb_stop(musb);
2062
2063 pm_runtime_put(musb->controller); 2042 pm_runtime_put(musb->controller);
2064 2043
2065 return 0; 2044 return 0;
2066 } 2045 }
2067 2046
2068 /* ----------------------------------------------------------------------- */ 2047 /* ----------------------------------------------------------------------- */
2069 2048
2070 /* lifecycle operations called through plat_uds.c */ 2049 /* lifecycle operations called through plat_uds.c */
2071 2050
2072 void musb_g_resume(struct musb *musb) 2051 void musb_g_resume(struct musb *musb)
2073 { 2052 {
2074 musb->is_suspended = 0; 2053 musb->is_suspended = 0;
2075 switch (musb->xceiv->state) { 2054 switch (musb->xceiv->state) {
2076 case OTG_STATE_B_IDLE: 2055 case OTG_STATE_B_IDLE:
2077 break; 2056 break;
2078 case OTG_STATE_B_WAIT_ACON: 2057 case OTG_STATE_B_WAIT_ACON:
2079 case OTG_STATE_B_PERIPHERAL: 2058 case OTG_STATE_B_PERIPHERAL:
2080 musb->is_active = 1; 2059 musb->is_active = 1;
2081 if (musb->gadget_driver && musb->gadget_driver->resume) { 2060 if (musb->gadget_driver && musb->gadget_driver->resume) {
2082 spin_unlock(&musb->lock); 2061 spin_unlock(&musb->lock);
2083 musb->gadget_driver->resume(&musb->g); 2062 musb->gadget_driver->resume(&musb->g);
2084 spin_lock(&musb->lock); 2063 spin_lock(&musb->lock);
2085 } 2064 }
2086 break; 2065 break;
2087 default: 2066 default:
2088 WARNING("unhandled RESUME transition (%s)\n", 2067 WARNING("unhandled RESUME transition (%s)\n",
2089 otg_state_string(musb->xceiv->state)); 2068 otg_state_string(musb->xceiv->state));
2090 } 2069 }
2091 } 2070 }
2092 2071
2093 /* called when SOF packets stop for 3+ msec */ 2072 /* called when SOF packets stop for 3+ msec */
2094 void musb_g_suspend(struct musb *musb) 2073 void musb_g_suspend(struct musb *musb)
2095 { 2074 {
2096 u8 devctl; 2075 u8 devctl;
2097 2076
2098 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); 2077 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
2099 dev_dbg(musb->controller, "devctl %02x\n", devctl); 2078 dev_dbg(musb->controller, "devctl %02x\n", devctl);
2100 2079
2101 switch (musb->xceiv->state) { 2080 switch (musb->xceiv->state) {
2102 case OTG_STATE_B_IDLE: 2081 case OTG_STATE_B_IDLE:
2103 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) 2082 if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS)
2104 musb->xceiv->state = OTG_STATE_B_PERIPHERAL; 2083 musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
2105 break; 2084 break;
2106 case OTG_STATE_B_PERIPHERAL: 2085 case OTG_STATE_B_PERIPHERAL:
2107 musb->is_suspended = 1; 2086 musb->is_suspended = 1;
2108 if (musb->gadget_driver && musb->gadget_driver->suspend) { 2087 if (musb->gadget_driver && musb->gadget_driver->suspend) {
2109 spin_unlock(&musb->lock); 2088 spin_unlock(&musb->lock);
2110 musb->gadget_driver->suspend(&musb->g); 2089 musb->gadget_driver->suspend(&musb->g);
2111 spin_lock(&musb->lock); 2090 spin_lock(&musb->lock);
2112 } 2091 }
2113 break; 2092 break;
2114 default: 2093 default:
2115 /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ; 2094 /* REVISIT if B_HOST, clear DEVCTL.HOSTREQ;
2116 * A_PERIPHERAL may need care too 2095 * A_PERIPHERAL may need care too
2117 */ 2096 */
2118 WARNING("unhandled SUSPEND transition (%s)\n", 2097 WARNING("unhandled SUSPEND transition (%s)\n",
2119 otg_state_string(musb->xceiv->state)); 2098 otg_state_string(musb->xceiv->state));
2120 } 2099 }
2121 } 2100 }
2122 2101
2123 /* Called during SRP */ 2102 /* Called during SRP */
2124 void musb_g_wakeup(struct musb *musb) 2103 void musb_g_wakeup(struct musb *musb)
2125 { 2104 {
2126 musb_gadget_wakeup(&musb->g); 2105 musb_gadget_wakeup(&musb->g);
2127 } 2106 }
2128 2107
2129 /* called when VBUS drops below session threshold, and in other cases */ 2108 /* called when VBUS drops below session threshold, and in other cases */
2130 void musb_g_disconnect(struct musb *musb) 2109 void musb_g_disconnect(struct musb *musb)
2131 { 2110 {
2132 void __iomem *mregs = musb->mregs; 2111 void __iomem *mregs = musb->mregs;
2133 u8 devctl = musb_readb(mregs, MUSB_DEVCTL); 2112 u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
2134 2113
2135 dev_dbg(musb->controller, "devctl %02x\n", devctl); 2114 dev_dbg(musb->controller, "devctl %02x\n", devctl);
2136 2115
2137 /* clear HR */ 2116 /* clear HR */
2138 musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION); 2117 musb_writeb(mregs, MUSB_DEVCTL, devctl & MUSB_DEVCTL_SESSION);
2139 2118
2140 /* don't draw vbus until new b-default session */ 2119 /* don't draw vbus until new b-default session */
2141 (void) musb_gadget_vbus_draw(&musb->g, 0); 2120 (void) musb_gadget_vbus_draw(&musb->g, 0);
2142 2121
2143 musb->g.speed = USB_SPEED_UNKNOWN; 2122 musb->g.speed = USB_SPEED_UNKNOWN;
2144 if (musb->gadget_driver && musb->gadget_driver->disconnect) { 2123 if (musb->gadget_driver && musb->gadget_driver->disconnect) {
2145 spin_unlock(&musb->lock); 2124 spin_unlock(&musb->lock);
2146 musb->gadget_driver->disconnect(&musb->g); 2125 musb->gadget_driver->disconnect(&musb->g);
2147 spin_lock(&musb->lock); 2126 spin_lock(&musb->lock);
2148 } 2127 }
2149 2128
2150 switch (musb->xceiv->state) { 2129 switch (musb->xceiv->state) {
2151 default: 2130 default:
2152 dev_dbg(musb->controller, "Unhandled disconnect %s, setting a_idle\n", 2131 dev_dbg(musb->controller, "Unhandled disconnect %s, setting a_idle\n",
2153 otg_state_string(musb->xceiv->state)); 2132 otg_state_string(musb->xceiv->state));
2154 musb->xceiv->state = OTG_STATE_A_IDLE; 2133 musb->xceiv->state = OTG_STATE_A_IDLE;
2155 MUSB_HST_MODE(musb); 2134 MUSB_HST_MODE(musb);
2156 break; 2135 break;
2157 case OTG_STATE_A_PERIPHERAL: 2136 case OTG_STATE_A_PERIPHERAL:
2158 musb->xceiv->state = OTG_STATE_A_WAIT_BCON; 2137 musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
2159 MUSB_HST_MODE(musb); 2138 MUSB_HST_MODE(musb);
2160 break; 2139 break;
2161 case OTG_STATE_B_WAIT_ACON: 2140 case OTG_STATE_B_WAIT_ACON:
2162 case OTG_STATE_B_HOST: 2141 case OTG_STATE_B_HOST:
2163 case OTG_STATE_B_PERIPHERAL: 2142 case OTG_STATE_B_PERIPHERAL:
2164 case OTG_STATE_B_IDLE: 2143 case OTG_STATE_B_IDLE:
2165 musb->xceiv->state = OTG_STATE_B_IDLE; 2144 musb->xceiv->state = OTG_STATE_B_IDLE;
2166 break; 2145 break;
2167 case OTG_STATE_B_SRP_INIT: 2146 case OTG_STATE_B_SRP_INIT:
2168 break; 2147 break;
2169 } 2148 }
2170 2149
2171 musb->is_active = 0; 2150 musb->is_active = 0;
2172 } 2151 }
2173 2152
2174 void musb_g_reset(struct musb *musb) 2153 void musb_g_reset(struct musb *musb)
2175 __releases(musb->lock) 2154 __releases(musb->lock)
2176 __acquires(musb->lock) 2155 __acquires(musb->lock)
2177 { 2156 {
2178 void __iomem *mbase = musb->mregs; 2157 void __iomem *mbase = musb->mregs;
2179 u8 devctl = musb_readb(mbase, MUSB_DEVCTL); 2158 u8 devctl = musb_readb(mbase, MUSB_DEVCTL);
2180 u8 power; 2159 u8 power;
2181 2160
2182 dev_dbg(musb->controller, "<== %s addr=%x driver '%s'\n", 2161 dev_dbg(musb->controller, "<== %s addr=%x driver '%s'\n",
2183 (devctl & MUSB_DEVCTL_BDEVICE) 2162 (devctl & MUSB_DEVCTL_BDEVICE)
2184 ? "B-Device" : "A-Device", 2163 ? "B-Device" : "A-Device",
2185 musb_readb(mbase, MUSB_FADDR), 2164 musb_readb(mbase, MUSB_FADDR),
2186 musb->gadget_driver 2165 musb->gadget_driver
2187 ? musb->gadget_driver->driver.name 2166 ? musb->gadget_driver->driver.name
2188 : NULL 2167 : NULL
2189 ); 2168 );
2190 2169
2191 /* report disconnect, if we didn't already (flushing EP state) */ 2170 /* report disconnect, if we didn't already (flushing EP state) */
2192 if (musb->g.speed != USB_SPEED_UNKNOWN) 2171 if (musb->g.speed != USB_SPEED_UNKNOWN)
2193 musb_g_disconnect(musb); 2172 musb_g_disconnect(musb);
2194 2173
2195 /* clear HR */ 2174 /* clear HR */
2196 else if (devctl & MUSB_DEVCTL_HR) 2175 else if (devctl & MUSB_DEVCTL_HR)
2197 musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION); 2176 musb_writeb(mbase, MUSB_DEVCTL, MUSB_DEVCTL_SESSION);
2198 2177
2199 2178
2200 /* what speed did we negotiate? */ 2179 /* what speed did we negotiate? */
2201 power = musb_readb(mbase, MUSB_POWER); 2180 power = musb_readb(mbase, MUSB_POWER);
2202 musb->g.speed = (power & MUSB_POWER_HSMODE) 2181 musb->g.speed = (power & MUSB_POWER_HSMODE)
2203 ? USB_SPEED_HIGH : USB_SPEED_FULL; 2182 ? USB_SPEED_HIGH : USB_SPEED_FULL;
2204 2183
2205 /* start in USB_STATE_DEFAULT */ 2184 /* start in USB_STATE_DEFAULT */
2206 musb->is_active = 1; 2185 musb->is_active = 1;
2207 musb->is_suspended = 0; 2186 musb->is_suspended = 0;
2208 MUSB_DEV_MODE(musb); 2187 MUSB_DEV_MODE(musb);
2209 musb->address = 0; 2188 musb->address = 0;
2210 musb->ep0_state = MUSB_EP0_STAGE_SETUP; 2189 musb->ep0_state = MUSB_EP0_STAGE_SETUP;
2211 2190
2212 musb->may_wakeup = 0; 2191 musb->may_wakeup = 0;
2213 musb->g.b_hnp_enable = 0; 2192 musb->g.b_hnp_enable = 0;
2214 musb->g.a_alt_hnp_support = 0; 2193 musb->g.a_alt_hnp_support = 0;
2215 musb->g.a_hnp_support = 0; 2194 musb->g.a_hnp_support = 0;
2216 2195
2217 /* Normal reset, as B-Device; 2196 /* Normal reset, as B-Device;
2218 * or else after HNP, as A-Device 2197 * or else after HNP, as A-Device
2219 */ 2198 */
2220 if (devctl & MUSB_DEVCTL_BDEVICE) { 2199 if (devctl & MUSB_DEVCTL_BDEVICE) {
2221 musb->xceiv->state = OTG_STATE_B_PERIPHERAL; 2200 musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
2222 musb->g.is_a_peripheral = 0; 2201 musb->g.is_a_peripheral = 0;
2223 } else if (is_otg_enabled(musb)) { 2202 } else {
drivers/usb/musb/musb_virthub.c
1 /* 1 /*
2 * MUSB OTG driver virtual root hub support 2 * MUSB OTG driver virtual root hub support
3 * 3 *
4 * Copyright 2005 Mentor Graphics Corporation 4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments 5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation 6 * Copyright (C) 2006-2007 Nokia Corporation
7 * 7 *
8 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation. 10 * version 2 as published by the Free Software Foundation.
11 * 11 *
12 * This program is distributed in the hope that it will be useful, but 12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of 13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details. 15 * General Public License for more details.
16 * 16 *
17 * You should have received a copy of the GNU General Public License 17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software 18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA 20 * 02110-1301 USA
21 * 21 *
22 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED 22 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
25 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
29 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * 32 *
33 */ 33 */
34 34
35 #include <linux/module.h> 35 #include <linux/module.h>
36 #include <linux/kernel.h> 36 #include <linux/kernel.h>
37 #include <linux/sched.h> 37 #include <linux/sched.h>
38 #include <linux/errno.h> 38 #include <linux/errno.h>
39 #include <linux/init.h> 39 #include <linux/init.h>
40 #include <linux/time.h> 40 #include <linux/time.h>
41 #include <linux/timer.h> 41 #include <linux/timer.h>
42 42
43 #include <asm/unaligned.h> 43 #include <asm/unaligned.h>
44 44
45 #include "musb_core.h" 45 #include "musb_core.h"
46 46
47 47
48 static void musb_port_suspend(struct musb *musb, bool do_suspend) 48 static void musb_port_suspend(struct musb *musb, bool do_suspend)
49 { 49 {
50 struct usb_otg *otg = musb->xceiv->otg; 50 struct usb_otg *otg = musb->xceiv->otg;
51 u8 power; 51 u8 power;
52 void __iomem *mbase = musb->mregs; 52 void __iomem *mbase = musb->mregs;
53 53
54 if (!is_host_active(musb)) 54 if (!is_host_active(musb))
55 return; 55 return;
56 56
57 /* NOTE: this doesn't necessarily put PHY into low power mode, 57 /* NOTE: this doesn't necessarily put PHY into low power mode,
58 * turning off its clock; that's a function of PHY integration and 58 * turning off its clock; that's a function of PHY integration and
59 * MUSB_POWER_ENSUSPEND. PHY may need a clock (sigh) to detect 59 * MUSB_POWER_ENSUSPEND. PHY may need a clock (sigh) to detect
60 * SE0 changing to connect (J) or wakeup (K) states. 60 * SE0 changing to connect (J) or wakeup (K) states.
61 */ 61 */
62 power = musb_readb(mbase, MUSB_POWER); 62 power = musb_readb(mbase, MUSB_POWER);
63 if (do_suspend) { 63 if (do_suspend) {
64 int retries = 10000; 64 int retries = 10000;
65 65
66 power &= ~MUSB_POWER_RESUME; 66 power &= ~MUSB_POWER_RESUME;
67 power |= MUSB_POWER_SUSPENDM; 67 power |= MUSB_POWER_SUSPENDM;
68 musb_writeb(mbase, MUSB_POWER, power); 68 musb_writeb(mbase, MUSB_POWER, power);
69 69
70 /* Needed for OPT A tests */ 70 /* Needed for OPT A tests */
71 power = musb_readb(mbase, MUSB_POWER); 71 power = musb_readb(mbase, MUSB_POWER);
72 while (power & MUSB_POWER_SUSPENDM) { 72 while (power & MUSB_POWER_SUSPENDM) {
73 power = musb_readb(mbase, MUSB_POWER); 73 power = musb_readb(mbase, MUSB_POWER);
74 if (retries-- < 1) 74 if (retries-- < 1)
75 break; 75 break;
76 } 76 }
77 77
78 dev_dbg(musb->controller, "Root port suspended, power %02x\n", power); 78 dev_dbg(musb->controller, "Root port suspended, power %02x\n", power);
79 79
80 musb->port1_status |= USB_PORT_STAT_SUSPEND; 80 musb->port1_status |= USB_PORT_STAT_SUSPEND;
81 switch (musb->xceiv->state) { 81 switch (musb->xceiv->state) {
82 case OTG_STATE_A_HOST: 82 case OTG_STATE_A_HOST:
83 musb->xceiv->state = OTG_STATE_A_SUSPEND; 83 musb->xceiv->state = OTG_STATE_A_SUSPEND;
84 musb->is_active = is_otg_enabled(musb) 84 musb->is_active = otg->host->b_hnp_enable;
85 && otg->host->b_hnp_enable;
86 if (musb->is_active) 85 if (musb->is_active)
87 mod_timer(&musb->otg_timer, jiffies 86 mod_timer(&musb->otg_timer, jiffies
88 + msecs_to_jiffies( 87 + msecs_to_jiffies(
89 OTG_TIME_A_AIDL_BDIS)); 88 OTG_TIME_A_AIDL_BDIS));
90 musb_platform_try_idle(musb, 0); 89 musb_platform_try_idle(musb, 0);
91 break; 90 break;
92 case OTG_STATE_B_HOST: 91 case OTG_STATE_B_HOST:
93 musb->xceiv->state = OTG_STATE_B_WAIT_ACON; 92 musb->xceiv->state = OTG_STATE_B_WAIT_ACON;
94 musb->is_active = is_otg_enabled(musb) 93 musb->is_active = otg->host->b_hnp_enable;
95 && otg->host->b_hnp_enable;
96 musb_platform_try_idle(musb, 0); 94 musb_platform_try_idle(musb, 0);
97 break; 95 break;
98 default: 96 default:
99 dev_dbg(musb->controller, "bogus rh suspend? %s\n", 97 dev_dbg(musb->controller, "bogus rh suspend? %s\n",
100 otg_state_string(musb->xceiv->state)); 98 otg_state_string(musb->xceiv->state));
101 } 99 }
102 } else if (power & MUSB_POWER_SUSPENDM) { 100 } else if (power & MUSB_POWER_SUSPENDM) {
103 power &= ~MUSB_POWER_SUSPENDM; 101 power &= ~MUSB_POWER_SUSPENDM;
104 power |= MUSB_POWER_RESUME; 102 power |= MUSB_POWER_RESUME;
105 musb_writeb(mbase, MUSB_POWER, power); 103 musb_writeb(mbase, MUSB_POWER, power);
106 104
107 dev_dbg(musb->controller, "Root port resuming, power %02x\n", power); 105 dev_dbg(musb->controller, "Root port resuming, power %02x\n", power);
108 106
109 /* later, GetPortStatus will stop RESUME signaling */ 107 /* later, GetPortStatus will stop RESUME signaling */
110 musb->port1_status |= MUSB_PORT_STAT_RESUME; 108 musb->port1_status |= MUSB_PORT_STAT_RESUME;
111 musb->rh_timer = jiffies + msecs_to_jiffies(20); 109 musb->rh_timer = jiffies + msecs_to_jiffies(20);
112 } 110 }
113 } 111 }
114 112
115 static void musb_port_reset(struct musb *musb, bool do_reset) 113 static void musb_port_reset(struct musb *musb, bool do_reset)
116 { 114 {
117 u8 power; 115 u8 power;
118 void __iomem *mbase = musb->mregs; 116 void __iomem *mbase = musb->mregs;
119 117
120 if (musb->xceiv->state == OTG_STATE_B_IDLE) { 118 if (musb->xceiv->state == OTG_STATE_B_IDLE) {
121 dev_dbg(musb->controller, "HNP: Returning from HNP; no hub reset from b_idle\n"); 119 dev_dbg(musb->controller, "HNP: Returning from HNP; no hub reset from b_idle\n");
122 musb->port1_status &= ~USB_PORT_STAT_RESET; 120 musb->port1_status &= ~USB_PORT_STAT_RESET;
123 return; 121 return;
124 } 122 }
125 123
126 if (!is_host_active(musb)) 124 if (!is_host_active(musb))
127 return; 125 return;
128 126
129 /* NOTE: caller guarantees it will turn off the reset when 127 /* NOTE: caller guarantees it will turn off the reset when
130 * the appropriate amount of time has passed 128 * the appropriate amount of time has passed
131 */ 129 */
132 power = musb_readb(mbase, MUSB_POWER); 130 power = musb_readb(mbase, MUSB_POWER);
133 if (do_reset) { 131 if (do_reset) {
134 132
135 /* 133 /*
136 * If RESUME is set, we must make sure it stays minimum 20 ms. 134 * If RESUME is set, we must make sure it stays minimum 20 ms.
137 * Then we must clear RESUME and wait a bit to let musb start 135 * Then we must clear RESUME and wait a bit to let musb start
138 * generating SOFs. If we don't do this, OPT HS A 6.8 tests 136 * generating SOFs. If we don't do this, OPT HS A 6.8 tests
139 * fail with "Error! Did not receive an SOF before suspend 137 * fail with "Error! Did not receive an SOF before suspend
140 * detected". 138 * detected".
141 */ 139 */
142 if (power & MUSB_POWER_RESUME) { 140 if (power & MUSB_POWER_RESUME) {
143 while (time_before(jiffies, musb->rh_timer)) 141 while (time_before(jiffies, musb->rh_timer))
144 msleep(1); 142 msleep(1);
145 musb_writeb(mbase, MUSB_POWER, 143 musb_writeb(mbase, MUSB_POWER,
146 power & ~MUSB_POWER_RESUME); 144 power & ~MUSB_POWER_RESUME);
147 msleep(1); 145 msleep(1);
148 } 146 }
149 147
150 musb->ignore_disconnect = true; 148 musb->ignore_disconnect = true;
151 power &= 0xf0; 149 power &= 0xf0;
152 musb_writeb(mbase, MUSB_POWER, 150 musb_writeb(mbase, MUSB_POWER,
153 power | MUSB_POWER_RESET); 151 power | MUSB_POWER_RESET);
154 152
155 musb->port1_status |= USB_PORT_STAT_RESET; 153 musb->port1_status |= USB_PORT_STAT_RESET;
156 musb->port1_status &= ~USB_PORT_STAT_ENABLE; 154 musb->port1_status &= ~USB_PORT_STAT_ENABLE;
157 musb->rh_timer = jiffies + msecs_to_jiffies(50); 155 musb->rh_timer = jiffies + msecs_to_jiffies(50);
158 } else { 156 } else {
159 dev_dbg(musb->controller, "root port reset stopped\n"); 157 dev_dbg(musb->controller, "root port reset stopped\n");
160 musb_writeb(mbase, MUSB_POWER, 158 musb_writeb(mbase, MUSB_POWER,
161 power & ~MUSB_POWER_RESET); 159 power & ~MUSB_POWER_RESET);
162 160
163 musb->ignore_disconnect = false; 161 musb->ignore_disconnect = false;
164 162
165 power = musb_readb(mbase, MUSB_POWER); 163 power = musb_readb(mbase, MUSB_POWER);
166 if (power & MUSB_POWER_HSMODE) { 164 if (power & MUSB_POWER_HSMODE) {
167 dev_dbg(musb->controller, "high-speed device connected\n"); 165 dev_dbg(musb->controller, "high-speed device connected\n");
168 musb->port1_status |= USB_PORT_STAT_HIGH_SPEED; 166 musb->port1_status |= USB_PORT_STAT_HIGH_SPEED;
169 } 167 }
170 168
171 musb->port1_status &= ~USB_PORT_STAT_RESET; 169 musb->port1_status &= ~USB_PORT_STAT_RESET;
172 musb->port1_status |= USB_PORT_STAT_ENABLE 170 musb->port1_status |= USB_PORT_STAT_ENABLE
173 | (USB_PORT_STAT_C_RESET << 16) 171 | (USB_PORT_STAT_C_RESET << 16)
174 | (USB_PORT_STAT_C_ENABLE << 16); 172 | (USB_PORT_STAT_C_ENABLE << 16);
175 usb_hcd_poll_rh_status(musb_to_hcd(musb)); 173 usb_hcd_poll_rh_status(musb_to_hcd(musb));
176 174
177 musb->vbuserr_retry = VBUSERR_RETRY_COUNT; 175 musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
178 } 176 }
179 } 177 }
180 178
181 void musb_root_disconnect(struct musb *musb) 179 void musb_root_disconnect(struct musb *musb)
182 { 180 {
183 struct usb_otg *otg = musb->xceiv->otg; 181 struct usb_otg *otg = musb->xceiv->otg;
184 182
185 musb->port1_status = USB_PORT_STAT_POWER 183 musb->port1_status = USB_PORT_STAT_POWER
186 | (USB_PORT_STAT_C_CONNECTION << 16); 184 | (USB_PORT_STAT_C_CONNECTION << 16);
187 185
188 usb_hcd_poll_rh_status(musb_to_hcd(musb)); 186 usb_hcd_poll_rh_status(musb_to_hcd(musb));
189 musb->is_active = 0; 187 musb->is_active = 0;
190 188
191 switch (musb->xceiv->state) { 189 switch (musb->xceiv->state) {
192 case OTG_STATE_A_SUSPEND: 190 case OTG_STATE_A_SUSPEND:
193 if (is_otg_enabled(musb) 191 if (otg->host->b_hnp_enable) {
194 && otg->host->b_hnp_enable) {
195 musb->xceiv->state = OTG_STATE_A_PERIPHERAL; 192 musb->xceiv->state = OTG_STATE_A_PERIPHERAL;
196 musb->g.is_a_peripheral = 1; 193 musb->g.is_a_peripheral = 1;
197 break; 194 break;
198 } 195 }
199 /* FALLTHROUGH */ 196 /* FALLTHROUGH */
200 case OTG_STATE_A_HOST: 197 case OTG_STATE_A_HOST:
201 musb->xceiv->state = OTG_STATE_A_WAIT_BCON; 198 musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
202 musb->is_active = 0; 199 musb->is_active = 0;
203 break; 200 break;
204 case OTG_STATE_A_WAIT_VFALL: 201 case OTG_STATE_A_WAIT_VFALL:
205 musb->xceiv->state = OTG_STATE_B_IDLE; 202 musb->xceiv->state = OTG_STATE_B_IDLE;
206 break; 203 break;
207 default: 204 default:
208 dev_dbg(musb->controller, "host disconnect (%s)\n", 205 dev_dbg(musb->controller, "host disconnect (%s)\n",
209 otg_state_string(musb->xceiv->state)); 206 otg_state_string(musb->xceiv->state));
210 } 207 }
211 } 208 }
212 209
213 210
214 /*---------------------------------------------------------------------*/ 211 /*---------------------------------------------------------------------*/
215 212
216 /* Caller may or may not hold musb->lock */ 213 /* Caller may or may not hold musb->lock */
217 int musb_hub_status_data(struct usb_hcd *hcd, char *buf) 214 int musb_hub_status_data(struct usb_hcd *hcd, char *buf)
218 { 215 {
219 struct musb *musb = hcd_to_musb(hcd); 216 struct musb *musb = hcd_to_musb(hcd);
220 int retval = 0; 217 int retval = 0;
221 218
222 /* called in_irq() via usb_hcd_poll_rh_status() */ 219 /* called in_irq() via usb_hcd_poll_rh_status() */
223 if (musb->port1_status & 0xffff0000) { 220 if (musb->port1_status & 0xffff0000) {
224 *buf = 0x02; 221 *buf = 0x02;
225 retval = 1; 222 retval = 1;
226 } 223 }
227 return retval; 224 return retval;
228 } 225 }
229 226
230 int musb_hub_control( 227 int musb_hub_control(
231 struct usb_hcd *hcd, 228 struct usb_hcd *hcd,
232 u16 typeReq, 229 u16 typeReq,
233 u16 wValue, 230 u16 wValue,
234 u16 wIndex, 231 u16 wIndex,
235 char *buf, 232 char *buf,
236 u16 wLength) 233 u16 wLength)
237 { 234 {
238 struct musb *musb = hcd_to_musb(hcd); 235 struct musb *musb = hcd_to_musb(hcd);
239 u32 temp; 236 u32 temp;
240 int retval = 0; 237 int retval = 0;
241 unsigned long flags; 238 unsigned long flags;
242 239
243 spin_lock_irqsave(&musb->lock, flags); 240 spin_lock_irqsave(&musb->lock, flags);
244 241
245 if (unlikely(!HCD_HW_ACCESSIBLE(hcd))) { 242 if (unlikely(!HCD_HW_ACCESSIBLE(hcd))) {
246 spin_unlock_irqrestore(&musb->lock, flags); 243 spin_unlock_irqrestore(&musb->lock, flags);
247 return -ESHUTDOWN; 244 return -ESHUTDOWN;
248 } 245 }
249 246
250 /* hub features: always zero, setting is a NOP 247 /* hub features: always zero, setting is a NOP
251 * port features: reported, sometimes updated when host is active 248 * port features: reported, sometimes updated when host is active
252 * no indicators 249 * no indicators
253 */ 250 */
254 switch (typeReq) { 251 switch (typeReq) {
255 case ClearHubFeature: 252 case ClearHubFeature:
256 case SetHubFeature: 253 case SetHubFeature:
257 switch (wValue) { 254 switch (wValue) {
258 case C_HUB_OVER_CURRENT: 255 case C_HUB_OVER_CURRENT:
259 case C_HUB_LOCAL_POWER: 256 case C_HUB_LOCAL_POWER:
260 break; 257 break;
261 default: 258 default:
262 goto error; 259 goto error;
263 } 260 }
264 break; 261 break;
265 case ClearPortFeature: 262 case ClearPortFeature:
266 if ((wIndex & 0xff) != 1) 263 if ((wIndex & 0xff) != 1)
267 goto error; 264 goto error;
268 265
269 switch (wValue) { 266 switch (wValue) {
270 case USB_PORT_FEAT_ENABLE: 267 case USB_PORT_FEAT_ENABLE:
271 break; 268 break;
272 case USB_PORT_FEAT_SUSPEND: 269 case USB_PORT_FEAT_SUSPEND:
273 musb_port_suspend(musb, false); 270 musb_port_suspend(musb, false);
274 break; 271 break;
275 case USB_PORT_FEAT_POWER: 272 case USB_PORT_FEAT_POWER:
276 if (!(is_otg_enabled(musb) && hcd->self.is_b_host)) 273 if (!hcd->self.is_b_host)
277 musb_platform_set_vbus(musb, 0); 274 musb_platform_set_vbus(musb, 0);
278 break; 275 break;
279 case USB_PORT_FEAT_C_CONNECTION: 276 case USB_PORT_FEAT_C_CONNECTION:
280 case USB_PORT_FEAT_C_ENABLE: 277 case USB_PORT_FEAT_C_ENABLE:
281 case USB_PORT_FEAT_C_OVER_CURRENT: 278 case USB_PORT_FEAT_C_OVER_CURRENT:
282 case USB_PORT_FEAT_C_RESET: 279 case USB_PORT_FEAT_C_RESET:
283 case USB_PORT_FEAT_C_SUSPEND: 280 case USB_PORT_FEAT_C_SUSPEND:
284 break; 281 break;
285 default: 282 default:
286 goto error; 283 goto error;
287 } 284 }
288 dev_dbg(musb->controller, "clear feature %d\n", wValue); 285 dev_dbg(musb->controller, "clear feature %d\n", wValue);
289 musb->port1_status &= ~(1 << wValue); 286 musb->port1_status &= ~(1 << wValue);
290 break; 287 break;
291 case GetHubDescriptor: 288 case GetHubDescriptor:
292 { 289 {
293 struct usb_hub_descriptor *desc = (void *)buf; 290 struct usb_hub_descriptor *desc = (void *)buf;
294 291
295 desc->bDescLength = 9; 292 desc->bDescLength = 9;
296 desc->bDescriptorType = 0x29; 293 desc->bDescriptorType = 0x29;
297 desc->bNbrPorts = 1; 294 desc->bNbrPorts = 1;
298 desc->wHubCharacteristics = cpu_to_le16( 295 desc->wHubCharacteristics = cpu_to_le16(
299 0x0001 /* per-port power switching */ 296 0x0001 /* per-port power switching */
300 | 0x0010 /* no overcurrent reporting */ 297 | 0x0010 /* no overcurrent reporting */
301 ); 298 );
302 desc->bPwrOn2PwrGood = 5; /* msec/2 */ 299 desc->bPwrOn2PwrGood = 5; /* msec/2 */
303 desc->bHubContrCurrent = 0; 300 desc->bHubContrCurrent = 0;
304 301
305 /* workaround bogus struct definition */ 302 /* workaround bogus struct definition */
306 desc->u.hs.DeviceRemovable[0] = 0x02; /* port 1 */ 303 desc->u.hs.DeviceRemovable[0] = 0x02; /* port 1 */
307 desc->u.hs.DeviceRemovable[1] = 0xff; 304 desc->u.hs.DeviceRemovable[1] = 0xff;
308 } 305 }
309 break; 306 break;
310 case GetHubStatus: 307 case GetHubStatus:
311 temp = 0; 308 temp = 0;
312 *(__le32 *) buf = cpu_to_le32(temp); 309 *(__le32 *) buf = cpu_to_le32(temp);
313 break; 310 break;
314 case GetPortStatus: 311 case GetPortStatus:
315 if (wIndex != 1) 312 if (wIndex != 1)
316 goto error; 313 goto error;
317 314
318 /* finish RESET signaling? */ 315 /* finish RESET signaling? */
319 if ((musb->port1_status & USB_PORT_STAT_RESET) 316 if ((musb->port1_status & USB_PORT_STAT_RESET)
320 && time_after_eq(jiffies, musb->rh_timer)) 317 && time_after_eq(jiffies, musb->rh_timer))
321 musb_port_reset(musb, false); 318 musb_port_reset(musb, false);
322 319
323 /* finish RESUME signaling? */ 320 /* finish RESUME signaling? */
324 if ((musb->port1_status & MUSB_PORT_STAT_RESUME) 321 if ((musb->port1_status & MUSB_PORT_STAT_RESUME)
325 && time_after_eq(jiffies, musb->rh_timer)) { 322 && time_after_eq(jiffies, musb->rh_timer)) {
326 u8 power; 323 u8 power;
327 324
328 power = musb_readb(musb->mregs, MUSB_POWER); 325 power = musb_readb(musb->mregs, MUSB_POWER);
329 power &= ~MUSB_POWER_RESUME; 326 power &= ~MUSB_POWER_RESUME;
330 dev_dbg(musb->controller, "root port resume stopped, power %02x\n", 327 dev_dbg(musb->controller, "root port resume stopped, power %02x\n",
331 power); 328 power);
332 musb_writeb(musb->mregs, MUSB_POWER, power); 329 musb_writeb(musb->mregs, MUSB_POWER, power);
333 330
334 /* ISSUE: DaVinci (RTL 1.300) disconnects after 331 /* ISSUE: DaVinci (RTL 1.300) disconnects after
335 * resume of high speed peripherals (but not full 332 * resume of high speed peripherals (but not full
336 * speed ones). 333 * speed ones).
337 */ 334 */
338 335
339 musb->is_active = 1; 336 musb->is_active = 1;
340 musb->port1_status &= ~(USB_PORT_STAT_SUSPEND 337 musb->port1_status &= ~(USB_PORT_STAT_SUSPEND
341 | MUSB_PORT_STAT_RESUME); 338 | MUSB_PORT_STAT_RESUME);
342 musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16; 339 musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
343 usb_hcd_poll_rh_status(musb_to_hcd(musb)); 340 usb_hcd_poll_rh_status(musb_to_hcd(musb));
344 /* NOTE: it might really be A_WAIT_BCON ... */ 341 /* NOTE: it might really be A_WAIT_BCON ... */
345 musb->xceiv->state = OTG_STATE_A_HOST; 342 musb->xceiv->state = OTG_STATE_A_HOST;
346 } 343 }
347 344
348 put_unaligned(cpu_to_le32(musb->port1_status 345 put_unaligned(cpu_to_le32(musb->port1_status
349 & ~MUSB_PORT_STAT_RESUME), 346 & ~MUSB_PORT_STAT_RESUME),
350 (__le32 *) buf); 347 (__le32 *) buf);
351 348
352 /* port change status is more interesting */ 349 /* port change status is more interesting */
353 dev_dbg(musb->controller, "port status %08x\n", 350 dev_dbg(musb->controller, "port status %08x\n",
354 musb->port1_status); 351 musb->port1_status);
355 break; 352 break;
356 case SetPortFeature: 353 case SetPortFeature:
357 if ((wIndex & 0xff) != 1) 354 if ((wIndex & 0xff) != 1)
358 goto error; 355 goto error;
359 356
360 switch (wValue) { 357 switch (wValue) {
361 case USB_PORT_FEAT_POWER: 358 case USB_PORT_FEAT_POWER:
362 /* NOTE: this controller has a strange state machine 359 /* NOTE: this controller has a strange state machine
363 * that involves "requesting sessions" according to 360 * that involves "requesting sessions" according to
364 * magic side effects from incompletely-described 361 * magic side effects from incompletely-described
365 * rules about startup... 362 * rules about startup...
366 * 363 *
367 * This call is what really starts the host mode; be 364 * This call is what really starts the host mode; be
368 * very careful about side effects if you reorder any 365 * very careful about side effects if you reorder any
369 * initialization logic, e.g. for OTG, or change any 366 * initialization logic, e.g. for OTG, or change any
370 * logic relating to VBUS power-up. 367 * logic relating to VBUS power-up.
371 */ 368 */
372 if (!(is_otg_enabled(musb) && hcd->self.is_b_host)) 369 if (!hcd->self.is_b_host)
373 musb_start(musb); 370 musb_start(musb);
374 break; 371 break;
375 case USB_PORT_FEAT_RESET: 372 case USB_PORT_FEAT_RESET:
376 musb_port_reset(musb, true); 373 musb_port_reset(musb, true);
377 break; 374 break;
378 case USB_PORT_FEAT_SUSPEND: 375 case USB_PORT_FEAT_SUSPEND:
379 musb_port_suspend(musb, true); 376 musb_port_suspend(musb, true);
380 break; 377 break;
381 case USB_PORT_FEAT_TEST: 378 case USB_PORT_FEAT_TEST:
382 if (unlikely(is_host_active(musb))) 379 if (unlikely(is_host_active(musb)))
383 goto error; 380 goto error;
384 381
385 wIndex >>= 8; 382 wIndex >>= 8;
386 switch (wIndex) { 383 switch (wIndex) {
387 case 1: 384 case 1:
388 pr_debug("TEST_J\n"); 385 pr_debug("TEST_J\n");
389 temp = MUSB_TEST_J; 386 temp = MUSB_TEST_J;
390 break; 387 break;
391 case 2: 388 case 2:
392 pr_debug("TEST_K\n"); 389 pr_debug("TEST_K\n");
393 temp = MUSB_TEST_K; 390 temp = MUSB_TEST_K;
394 break; 391 break;
395 case 3: 392 case 3:
396 pr_debug("TEST_SE0_NAK\n"); 393 pr_debug("TEST_SE0_NAK\n");
397 temp = MUSB_TEST_SE0_NAK; 394 temp = MUSB_TEST_SE0_NAK;
398 break; 395 break;
399 case 4: 396 case 4:
400 pr_debug("TEST_PACKET\n"); 397 pr_debug("TEST_PACKET\n");
401 temp = MUSB_TEST_PACKET; 398 temp = MUSB_TEST_PACKET;
402 musb_load_testpacket(musb); 399 musb_load_testpacket(musb);
403 break; 400 break;
404 case 5: 401 case 5:
405 pr_debug("TEST_FORCE_ENABLE\n"); 402 pr_debug("TEST_FORCE_ENABLE\n");
406 temp = MUSB_TEST_FORCE_HOST 403 temp = MUSB_TEST_FORCE_HOST
407 | MUSB_TEST_FORCE_HS; 404 | MUSB_TEST_FORCE_HS;
408 405
409 musb_writeb(musb->mregs, MUSB_DEVCTL, 406 musb_writeb(musb->mregs, MUSB_DEVCTL,
410 MUSB_DEVCTL_SESSION); 407 MUSB_DEVCTL_SESSION);
411 break; 408 break;
412 case 6: 409 case 6:
413 pr_debug("TEST_FIFO_ACCESS\n"); 410 pr_debug("TEST_FIFO_ACCESS\n");
414 temp = MUSB_TEST_FIFO_ACCESS; 411 temp = MUSB_TEST_FIFO_ACCESS;
415 break; 412 break;
416 default: 413 default:
417 goto error; 414 goto error;
418 } 415 }
419 musb_writeb(musb->mregs, MUSB_TESTMODE, temp); 416 musb_writeb(musb->mregs, MUSB_TESTMODE, temp);
420 break; 417 break;
421 default: 418 default:
422 goto error; 419 goto error;
423 } 420 }
424 dev_dbg(musb->controller, "set feature %d\n", wValue); 421 dev_dbg(musb->controller, "set feature %d\n", wValue);
425 musb->port1_status |= 1 << wValue; 422 musb->port1_status |= 1 << wValue;
426 break; 423 break;
427 424
428 default: 425 default:
429 error: 426 error:
430 /* "protocol stall" on error */ 427 /* "protocol stall" on error */
431 retval = -EPIPE; 428 retval = -EPIPE;
432 } 429 }
433 spin_unlock_irqrestore(&musb->lock, flags); 430 spin_unlock_irqrestore(&musb->lock, flags);
434 return retval; 431 return retval;
435 } 432 }
436 433
drivers/usb/musb/omap2430.c
1 /* 1 /*
2 * Copyright (C) 2005-2007 by Texas Instruments 2 * Copyright (C) 2005-2007 by Texas Instruments
3 * Some code has been taken from tusb6010.c 3 * Some code has been taken from tusb6010.c
4 * Copyrights for that are attributable to: 4 * Copyrights for that are attributable to:
5 * Copyright (C) 2006 Nokia Corporation 5 * Copyright (C) 2006 Nokia Corporation
6 * Tony Lindgren <tony@atomide.com> 6 * Tony Lindgren <tony@atomide.com>
7 * 7 *
8 * This file is part of the Inventra Controller Driver for Linux. 8 * This file is part of the Inventra Controller Driver for Linux.
9 * 9 *
10 * The Inventra Controller Driver for Linux is free software; you 10 * The Inventra Controller Driver for Linux is free software; you
11 * can redistribute it and/or modify it under the terms of the GNU 11 * can redistribute it and/or modify it under the terms of the GNU
12 * General Public License version 2 as published by the Free Software 12 * General Public License version 2 as published by the Free Software
13 * Foundation. 13 * Foundation.
14 * 14 *
15 * The Inventra Controller Driver for Linux is distributed in 15 * The Inventra Controller Driver for Linux is distributed in
16 * the hope that it will be useful, but WITHOUT ANY WARRANTY; 16 * the hope that it will be useful, but WITHOUT ANY WARRANTY;
17 * without even the implied warranty of MERCHANTABILITY or 17 * without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public 18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
19 * License for more details. 19 * License for more details.
20 * 20 *
21 * You should have received a copy of the GNU General Public License 21 * You should have received a copy of the GNU General Public License
22 * along with The Inventra Controller Driver for Linux ; if not, 22 * along with The Inventra Controller Driver for Linux ; if not,
23 * write to the Free Software Foundation, Inc., 59 Temple Place, 23 * write to the Free Software Foundation, Inc., 59 Temple Place,
24 * Suite 330, Boston, MA 02111-1307 USA 24 * Suite 330, Boston, MA 02111-1307 USA
25 * 25 *
26 */ 26 */
27 #include <linux/module.h> 27 #include <linux/module.h>
28 #include <linux/kernel.h> 28 #include <linux/kernel.h>
29 #include <linux/sched.h> 29 #include <linux/sched.h>
30 #include <linux/init.h> 30 #include <linux/init.h>
31 #include <linux/list.h> 31 #include <linux/list.h>
32 #include <linux/io.h> 32 #include <linux/io.h>
33 #include <linux/platform_device.h> 33 #include <linux/platform_device.h>
34 #include <linux/dma-mapping.h> 34 #include <linux/dma-mapping.h>
35 #include <linux/pm_runtime.h> 35 #include <linux/pm_runtime.h>
36 #include <linux/err.h> 36 #include <linux/err.h>
37 #include <linux/usb/musb-omap.h> 37 #include <linux/usb/musb-omap.h>
38 38
39 #include "musb_core.h" 39 #include "musb_core.h"
40 #include "omap2430.h" 40 #include "omap2430.h"
41 41
42 struct omap2430_glue { 42 struct omap2430_glue {
43 struct device *dev; 43 struct device *dev;
44 struct platform_device *musb; 44 struct platform_device *musb;
45 enum omap_musb_vbus_id_status status; 45 enum omap_musb_vbus_id_status status;
46 struct work_struct omap_musb_mailbox_work; 46 struct work_struct omap_musb_mailbox_work;
47 }; 47 };
48 #define glue_to_musb(g) platform_get_drvdata(g->musb) 48 #define glue_to_musb(g) platform_get_drvdata(g->musb)
49 49
50 struct omap2430_glue *_glue; 50 struct omap2430_glue *_glue;
51 51
52 static struct timer_list musb_idle_timer; 52 static struct timer_list musb_idle_timer;
53 53
54 static void musb_do_idle(unsigned long _musb) 54 static void musb_do_idle(unsigned long _musb)
55 { 55 {
56 struct musb *musb = (void *)_musb; 56 struct musb *musb = (void *)_musb;
57 unsigned long flags; 57 unsigned long flags;
58 u8 power; 58 u8 power;
59 u8 devctl; 59 u8 devctl;
60 60
61 spin_lock_irqsave(&musb->lock, flags); 61 spin_lock_irqsave(&musb->lock, flags);
62 62
63 switch (musb->xceiv->state) { 63 switch (musb->xceiv->state) {
64 case OTG_STATE_A_WAIT_BCON: 64 case OTG_STATE_A_WAIT_BCON:
65 65
66 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); 66 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
67 if (devctl & MUSB_DEVCTL_BDEVICE) { 67 if (devctl & MUSB_DEVCTL_BDEVICE) {
68 musb->xceiv->state = OTG_STATE_B_IDLE; 68 musb->xceiv->state = OTG_STATE_B_IDLE;
69 MUSB_DEV_MODE(musb); 69 MUSB_DEV_MODE(musb);
70 } else { 70 } else {
71 musb->xceiv->state = OTG_STATE_A_IDLE; 71 musb->xceiv->state = OTG_STATE_A_IDLE;
72 MUSB_HST_MODE(musb); 72 MUSB_HST_MODE(musb);
73 } 73 }
74 break; 74 break;
75 case OTG_STATE_A_SUSPEND: 75 case OTG_STATE_A_SUSPEND:
76 /* finish RESUME signaling? */ 76 /* finish RESUME signaling? */
77 if (musb->port1_status & MUSB_PORT_STAT_RESUME) { 77 if (musb->port1_status & MUSB_PORT_STAT_RESUME) {
78 power = musb_readb(musb->mregs, MUSB_POWER); 78 power = musb_readb(musb->mregs, MUSB_POWER);
79 power &= ~MUSB_POWER_RESUME; 79 power &= ~MUSB_POWER_RESUME;
80 dev_dbg(musb->controller, "root port resume stopped, power %02x\n", power); 80 dev_dbg(musb->controller, "root port resume stopped, power %02x\n", power);
81 musb_writeb(musb->mregs, MUSB_POWER, power); 81 musb_writeb(musb->mregs, MUSB_POWER, power);
82 musb->is_active = 1; 82 musb->is_active = 1;
83 musb->port1_status &= ~(USB_PORT_STAT_SUSPEND 83 musb->port1_status &= ~(USB_PORT_STAT_SUSPEND
84 | MUSB_PORT_STAT_RESUME); 84 | MUSB_PORT_STAT_RESUME);
85 musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16; 85 musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
86 usb_hcd_poll_rh_status(musb_to_hcd(musb)); 86 usb_hcd_poll_rh_status(musb_to_hcd(musb));
87 /* NOTE: it might really be A_WAIT_BCON ... */ 87 /* NOTE: it might really be A_WAIT_BCON ... */
88 musb->xceiv->state = OTG_STATE_A_HOST; 88 musb->xceiv->state = OTG_STATE_A_HOST;
89 } 89 }
90 break; 90 break;
91 case OTG_STATE_A_HOST: 91 case OTG_STATE_A_HOST:
92 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); 92 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
93 if (devctl & MUSB_DEVCTL_BDEVICE) 93 if (devctl & MUSB_DEVCTL_BDEVICE)
94 musb->xceiv->state = OTG_STATE_B_IDLE; 94 musb->xceiv->state = OTG_STATE_B_IDLE;
95 else 95 else
96 musb->xceiv->state = OTG_STATE_A_WAIT_BCON; 96 musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
97 default: 97 default:
98 break; 98 break;
99 } 99 }
100 spin_unlock_irqrestore(&musb->lock, flags); 100 spin_unlock_irqrestore(&musb->lock, flags);
101 } 101 }
102 102
103 103
104 static void omap2430_musb_try_idle(struct musb *musb, unsigned long timeout) 104 static void omap2430_musb_try_idle(struct musb *musb, unsigned long timeout)
105 { 105 {
106 unsigned long default_timeout = jiffies + msecs_to_jiffies(3); 106 unsigned long default_timeout = jiffies + msecs_to_jiffies(3);
107 static unsigned long last_timer; 107 static unsigned long last_timer;
108 108
109 if (timeout == 0) 109 if (timeout == 0)
110 timeout = default_timeout; 110 timeout = default_timeout;
111 111
112 /* Never idle if active, or when VBUS timeout is not set as host */ 112 /* Never idle if active, or when VBUS timeout is not set as host */
113 if (musb->is_active || ((musb->a_wait_bcon == 0) 113 if (musb->is_active || ((musb->a_wait_bcon == 0)
114 && (musb->xceiv->state == OTG_STATE_A_WAIT_BCON))) { 114 && (musb->xceiv->state == OTG_STATE_A_WAIT_BCON))) {
115 dev_dbg(musb->controller, "%s active, deleting timer\n", 115 dev_dbg(musb->controller, "%s active, deleting timer\n",
116 otg_state_string(musb->xceiv->state)); 116 otg_state_string(musb->xceiv->state));
117 del_timer(&musb_idle_timer); 117 del_timer(&musb_idle_timer);
118 last_timer = jiffies; 118 last_timer = jiffies;
119 return; 119 return;
120 } 120 }
121 121
122 if (time_after(last_timer, timeout)) { 122 if (time_after(last_timer, timeout)) {
123 if (!timer_pending(&musb_idle_timer)) 123 if (!timer_pending(&musb_idle_timer))
124 last_timer = timeout; 124 last_timer = timeout;
125 else { 125 else {
126 dev_dbg(musb->controller, "Longer idle timer already pending, ignoring\n"); 126 dev_dbg(musb->controller, "Longer idle timer already pending, ignoring\n");
127 return; 127 return;
128 } 128 }
129 } 129 }
130 last_timer = timeout; 130 last_timer = timeout;
131 131
132 dev_dbg(musb->controller, "%s inactive, for idle timer for %lu ms\n", 132 dev_dbg(musb->controller, "%s inactive, for idle timer for %lu ms\n",
133 otg_state_string(musb->xceiv->state), 133 otg_state_string(musb->xceiv->state),
134 (unsigned long)jiffies_to_msecs(timeout - jiffies)); 134 (unsigned long)jiffies_to_msecs(timeout - jiffies));
135 mod_timer(&musb_idle_timer, timeout); 135 mod_timer(&musb_idle_timer, timeout);
136 } 136 }
137 137
138 static void omap2430_musb_set_vbus(struct musb *musb, int is_on) 138 static void omap2430_musb_set_vbus(struct musb *musb, int is_on)
139 { 139 {
140 struct usb_otg *otg = musb->xceiv->otg; 140 struct usb_otg *otg = musb->xceiv->otg;
141 u8 devctl; 141 u8 devctl;
142 unsigned long timeout = jiffies + msecs_to_jiffies(1000); 142 unsigned long timeout = jiffies + msecs_to_jiffies(1000);
143 int ret = 1; 143 int ret = 1;
144 /* HDRC controls CPEN, but beware current surges during device 144 /* HDRC controls CPEN, but beware current surges during device
145 * connect. They can trigger transient overcurrent conditions 145 * connect. They can trigger transient overcurrent conditions
146 * that must be ignored. 146 * that must be ignored.
147 */ 147 */
148 148
149 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); 149 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
150 150
151 if (is_on) { 151 if (is_on) {
152 if (musb->xceiv->state == OTG_STATE_A_IDLE) { 152 if (musb->xceiv->state == OTG_STATE_A_IDLE) {
153 /* start the session */ 153 /* start the session */
154 devctl |= MUSB_DEVCTL_SESSION; 154 devctl |= MUSB_DEVCTL_SESSION;
155 musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); 155 musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
156 /* 156 /*
157 * Wait for the musb to set as A device to enable the 157 * Wait for the musb to set as A device to enable the
158 * VBUS 158 * VBUS
159 */ 159 */
160 while (musb_readb(musb->mregs, MUSB_DEVCTL) & 0x80) { 160 while (musb_readb(musb->mregs, MUSB_DEVCTL) & 0x80) {
161 161
162 cpu_relax(); 162 cpu_relax();
163 163
164 if (time_after(jiffies, timeout)) { 164 if (time_after(jiffies, timeout)) {
165 dev_err(musb->controller, 165 dev_err(musb->controller,
166 "configured as A device timeout"); 166 "configured as A device timeout");
167 ret = -EINVAL; 167 ret = -EINVAL;
168 break; 168 break;
169 } 169 }
170 } 170 }
171 171
172 if (ret && otg->set_vbus) 172 if (ret && otg->set_vbus)
173 otg_set_vbus(otg, 1); 173 otg_set_vbus(otg, 1);
174 } else { 174 } else {
175 musb->is_active = 1; 175 musb->is_active = 1;
176 otg->default_a = 1; 176 otg->default_a = 1;
177 musb->xceiv->state = OTG_STATE_A_WAIT_VRISE; 177 musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
178 devctl |= MUSB_DEVCTL_SESSION; 178 devctl |= MUSB_DEVCTL_SESSION;
179 MUSB_HST_MODE(musb); 179 MUSB_HST_MODE(musb);
180 } 180 }
181 } else { 181 } else {
182 musb->is_active = 0; 182 musb->is_active = 0;
183 183
184 /* NOTE: we're skipping A_WAIT_VFALL -> A_IDLE and 184 /* NOTE: we're skipping A_WAIT_VFALL -> A_IDLE and
185 * jumping right to B_IDLE... 185 * jumping right to B_IDLE...
186 */ 186 */
187 187
188 otg->default_a = 0; 188 otg->default_a = 0;
189 musb->xceiv->state = OTG_STATE_B_IDLE; 189 musb->xceiv->state = OTG_STATE_B_IDLE;
190 devctl &= ~MUSB_DEVCTL_SESSION; 190 devctl &= ~MUSB_DEVCTL_SESSION;
191 191
192 MUSB_DEV_MODE(musb); 192 MUSB_DEV_MODE(musb);
193 } 193 }
194 musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); 194 musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
195 195
196 dev_dbg(musb->controller, "VBUS %s, devctl %02x " 196 dev_dbg(musb->controller, "VBUS %s, devctl %02x "
197 /* otg %3x conf %08x prcm %08x */ "\n", 197 /* otg %3x conf %08x prcm %08x */ "\n",
198 otg_state_string(musb->xceiv->state), 198 otg_state_string(musb->xceiv->state),
199 musb_readb(musb->mregs, MUSB_DEVCTL)); 199 musb_readb(musb->mregs, MUSB_DEVCTL));
200 } 200 }
201 201
202 static int omap2430_musb_set_mode(struct musb *musb, u8 musb_mode) 202 static int omap2430_musb_set_mode(struct musb *musb, u8 musb_mode)
203 { 203 {
204 u8 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); 204 u8 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
205 205
206 devctl |= MUSB_DEVCTL_SESSION; 206 devctl |= MUSB_DEVCTL_SESSION;
207 musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); 207 musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
208 208
209 return 0; 209 return 0;
210 } 210 }
211 211
212 static inline void omap2430_low_level_exit(struct musb *musb) 212 static inline void omap2430_low_level_exit(struct musb *musb)
213 { 213 {
214 u32 l; 214 u32 l;
215 215
216 /* in any role */ 216 /* in any role */
217 l = musb_readl(musb->mregs, OTG_FORCESTDBY); 217 l = musb_readl(musb->mregs, OTG_FORCESTDBY);
218 l |= ENABLEFORCE; /* enable MSTANDBY */ 218 l |= ENABLEFORCE; /* enable MSTANDBY */
219 musb_writel(musb->mregs, OTG_FORCESTDBY, l); 219 musb_writel(musb->mregs, OTG_FORCESTDBY, l);
220 } 220 }
221 221
222 static inline void omap2430_low_level_init(struct musb *musb) 222 static inline void omap2430_low_level_init(struct musb *musb)
223 { 223 {
224 u32 l; 224 u32 l;
225 225
226 l = musb_readl(musb->mregs, OTG_FORCESTDBY); 226 l = musb_readl(musb->mregs, OTG_FORCESTDBY);
227 l &= ~ENABLEFORCE; /* disable MSTANDBY */ 227 l &= ~ENABLEFORCE; /* disable MSTANDBY */
228 musb_writel(musb->mregs, OTG_FORCESTDBY, l); 228 musb_writel(musb->mregs, OTG_FORCESTDBY, l);
229 } 229 }
230 230
231 void omap_musb_mailbox(enum omap_musb_vbus_id_status status) 231 void omap_musb_mailbox(enum omap_musb_vbus_id_status status)
232 { 232 {
233 struct omap2430_glue *glue = _glue; 233 struct omap2430_glue *glue = _glue;
234 struct musb *musb = glue_to_musb(glue); 234 struct musb *musb = glue_to_musb(glue);
235 235
236 glue->status = status; 236 glue->status = status;
237 if (!musb) { 237 if (!musb) {
238 dev_err(glue->dev, "musb core is not yet ready\n"); 238 dev_err(glue->dev, "musb core is not yet ready\n");
239 return; 239 return;
240 } 240 }
241 241
242 schedule_work(&glue->omap_musb_mailbox_work); 242 schedule_work(&glue->omap_musb_mailbox_work);
243 } 243 }
244 EXPORT_SYMBOL_GPL(omap_musb_mailbox); 244 EXPORT_SYMBOL_GPL(omap_musb_mailbox);
245 245
246 static void omap_musb_set_mailbox(struct omap2430_glue *glue) 246 static void omap_musb_set_mailbox(struct omap2430_glue *glue)
247 { 247 {
248 struct musb *musb = glue_to_musb(glue); 248 struct musb *musb = glue_to_musb(glue);
249 struct device *dev = musb->controller; 249 struct device *dev = musb->controller;
250 struct musb_hdrc_platform_data *pdata = dev->platform_data; 250 struct musb_hdrc_platform_data *pdata = dev->platform_data;
251 struct omap_musb_board_data *data = pdata->board_data; 251 struct omap_musb_board_data *data = pdata->board_data;
252 struct usb_otg *otg = musb->xceiv->otg; 252 struct usb_otg *otg = musb->xceiv->otg;
253 253
254 switch (glue->status) { 254 switch (glue->status) {
255 case OMAP_MUSB_ID_GROUND: 255 case OMAP_MUSB_ID_GROUND:
256 dev_dbg(dev, "ID GND\n"); 256 dev_dbg(dev, "ID GND\n");
257 257
258 otg->default_a = true; 258 otg->default_a = true;
259 musb->xceiv->state = OTG_STATE_A_IDLE; 259 musb->xceiv->state = OTG_STATE_A_IDLE;
260 musb->xceiv->last_event = USB_EVENT_ID; 260 musb->xceiv->last_event = USB_EVENT_ID;
261 if (!is_otg_enabled(musb) || musb->gadget_driver) { 261 if (musb->gadget_driver) {
262 pm_runtime_get_sync(dev); 262 pm_runtime_get_sync(dev);
263 usb_phy_init(musb->xceiv); 263 usb_phy_init(musb->xceiv);
264 omap2430_musb_set_vbus(musb, 1); 264 omap2430_musb_set_vbus(musb, 1);
265 } 265 }
266 break; 266 break;
267 267
268 case OMAP_MUSB_VBUS_VALID: 268 case OMAP_MUSB_VBUS_VALID:
269 dev_dbg(dev, "VBUS Connect\n"); 269 dev_dbg(dev, "VBUS Connect\n");
270 270
271 otg->default_a = false; 271 otg->default_a = false;
272 musb->xceiv->state = OTG_STATE_B_IDLE; 272 musb->xceiv->state = OTG_STATE_B_IDLE;
273 musb->xceiv->last_event = USB_EVENT_VBUS; 273 musb->xceiv->last_event = USB_EVENT_VBUS;
274 if (musb->gadget_driver) 274 if (musb->gadget_driver)
275 pm_runtime_get_sync(dev); 275 pm_runtime_get_sync(dev);
276 usb_phy_init(musb->xceiv); 276 usb_phy_init(musb->xceiv);
277 break; 277 break;
278 278
279 case OMAP_MUSB_ID_FLOAT: 279 case OMAP_MUSB_ID_FLOAT:
280 case OMAP_MUSB_VBUS_OFF: 280 case OMAP_MUSB_VBUS_OFF:
281 dev_dbg(dev, "VBUS Disconnect\n"); 281 dev_dbg(dev, "VBUS Disconnect\n");
282 282
283 musb->xceiv->last_event = USB_EVENT_NONE; 283 musb->xceiv->last_event = USB_EVENT_NONE;
284 if (is_otg_enabled(musb) || is_peripheral_enabled(musb)) 284 if (musb->gadget_driver) {
285 if (musb->gadget_driver) { 285 pm_runtime_mark_last_busy(dev);
286 pm_runtime_mark_last_busy(dev); 286 pm_runtime_put_autosuspend(dev);
287 pm_runtime_put_autosuspend(dev); 287 }
288 }
289 288
290 if (data->interface_type == MUSB_INTERFACE_UTMI) { 289 if (data->interface_type == MUSB_INTERFACE_UTMI) {
291 if (musb->xceiv->otg->set_vbus) 290 if (musb->xceiv->otg->set_vbus)
292 otg_set_vbus(musb->xceiv->otg, 0); 291 otg_set_vbus(musb->xceiv->otg, 0);
293 } 292 }
294 usb_phy_shutdown(musb->xceiv); 293 usb_phy_shutdown(musb->xceiv);
295 break; 294 break;
296 default: 295 default:
297 dev_dbg(dev, "ID float\n"); 296 dev_dbg(dev, "ID float\n");
298 } 297 }
299 } 298 }
300 299
301 300
302 static void omap_musb_mailbox_work(struct work_struct *mailbox_work) 301 static void omap_musb_mailbox_work(struct work_struct *mailbox_work)
303 { 302 {
304 struct omap2430_glue *glue = container_of(mailbox_work, 303 struct omap2430_glue *glue = container_of(mailbox_work,
305 struct omap2430_glue, omap_musb_mailbox_work); 304 struct omap2430_glue, omap_musb_mailbox_work);
306 omap_musb_set_mailbox(glue); 305 omap_musb_set_mailbox(glue);
307 } 306 }
308 307
309 static int omap2430_musb_init(struct musb *musb) 308 static int omap2430_musb_init(struct musb *musb)
310 { 309 {
311 u32 l; 310 u32 l;
312 int status = 0; 311 int status = 0;
313 struct device *dev = musb->controller; 312 struct device *dev = musb->controller;
314 struct omap2430_glue *glue = dev_get_drvdata(dev->parent); 313 struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
315 struct musb_hdrc_platform_data *plat = dev->platform_data; 314 struct musb_hdrc_platform_data *plat = dev->platform_data;
316 struct omap_musb_board_data *data = plat->board_data; 315 struct omap_musb_board_data *data = plat->board_data;
317 316
318 /* We require some kind of external transceiver, hooked 317 /* We require some kind of external transceiver, hooked
319 * up through ULPI. TWL4030-family PMICs include one, 318 * up through ULPI. TWL4030-family PMICs include one,
320 * which needs a driver, drivers aren't always needed. 319 * which needs a driver, drivers aren't always needed.
321 */ 320 */
322 musb->xceiv = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2); 321 musb->xceiv = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
323 if (IS_ERR_OR_NULL(musb->xceiv)) { 322 if (IS_ERR_OR_NULL(musb->xceiv)) {
324 pr_err("HS USB OTG: no transceiver configured\n"); 323 pr_err("HS USB OTG: no transceiver configured\n");
325 return -ENODEV; 324 return -ENODEV;
326 } 325 }
327 326
328 status = pm_runtime_get_sync(dev); 327 status = pm_runtime_get_sync(dev);
329 if (status < 0) { 328 if (status < 0) {
330 dev_err(dev, "pm_runtime_get_sync FAILED %d\n", status); 329 dev_err(dev, "pm_runtime_get_sync FAILED %d\n", status);
331 goto err1; 330 goto err1;
332 } 331 }
333 332
334 l = musb_readl(musb->mregs, OTG_INTERFSEL); 333 l = musb_readl(musb->mregs, OTG_INTERFSEL);
335 334
336 if (data->interface_type == MUSB_INTERFACE_UTMI) { 335 if (data->interface_type == MUSB_INTERFACE_UTMI) {
337 /* OMAP4 uses Internal PHY GS70 which uses UTMI interface */ 336 /* OMAP4 uses Internal PHY GS70 which uses UTMI interface */
338 l &= ~ULPI_12PIN; /* Disable ULPI */ 337 l &= ~ULPI_12PIN; /* Disable ULPI */
339 l |= UTMI_8BIT; /* Enable UTMI */ 338 l |= UTMI_8BIT; /* Enable UTMI */
340 } else { 339 } else {
341 l |= ULPI_12PIN; 340 l |= ULPI_12PIN;
342 } 341 }
343 342
344 musb_writel(musb->mregs, OTG_INTERFSEL, l); 343 musb_writel(musb->mregs, OTG_INTERFSEL, l);
345 344
346 pr_debug("HS USB OTG: revision 0x%x, sysconfig 0x%02x, " 345 pr_debug("HS USB OTG: revision 0x%x, sysconfig 0x%02x, "
347 "sysstatus 0x%x, intrfsel 0x%x, simenable 0x%x\n", 346 "sysstatus 0x%x, intrfsel 0x%x, simenable 0x%x\n",
348 musb_readl(musb->mregs, OTG_REVISION), 347 musb_readl(musb->mregs, OTG_REVISION),
349 musb_readl(musb->mregs, OTG_SYSCONFIG), 348 musb_readl(musb->mregs, OTG_SYSCONFIG),
350 musb_readl(musb->mregs, OTG_SYSSTATUS), 349 musb_readl(musb->mregs, OTG_SYSSTATUS),
351 musb_readl(musb->mregs, OTG_INTERFSEL), 350 musb_readl(musb->mregs, OTG_INTERFSEL),
352 musb_readl(musb->mregs, OTG_SIMENABLE)); 351 musb_readl(musb->mregs, OTG_SIMENABLE));
353 352
354 setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb); 353 setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb);
355 354
356 if (glue->status != OMAP_MUSB_UNKNOWN) 355 if (glue->status != OMAP_MUSB_UNKNOWN)
357 omap_musb_set_mailbox(glue); 356 omap_musb_set_mailbox(glue);
358 357
359 pm_runtime_put_noidle(musb->controller); 358 pm_runtime_put_noidle(musb->controller);
360 return 0; 359 return 0;
361 360
362 err1: 361 err1:
363 return status; 362 return status;
364 } 363 }
365 364
366 static void omap2430_musb_enable(struct musb *musb) 365 static void omap2430_musb_enable(struct musb *musb)
367 { 366 {
368 u8 devctl; 367 u8 devctl;
369 unsigned long timeout = jiffies + msecs_to_jiffies(1000); 368 unsigned long timeout = jiffies + msecs_to_jiffies(1000);
370 struct device *dev = musb->controller; 369 struct device *dev = musb->controller;
371 struct omap2430_glue *glue = dev_get_drvdata(dev->parent); 370 struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
372 struct musb_hdrc_platform_data *pdata = dev->platform_data; 371 struct musb_hdrc_platform_data *pdata = dev->platform_data;
373 struct omap_musb_board_data *data = pdata->board_data; 372 struct omap_musb_board_data *data = pdata->board_data;
374 373
375 switch (glue->status) { 374 switch (glue->status) {
376 375
377 case OMAP_MUSB_ID_GROUND: 376 case OMAP_MUSB_ID_GROUND:
378 usb_phy_init(musb->xceiv); 377 usb_phy_init(musb->xceiv);
379 if (data->interface_type != MUSB_INTERFACE_UTMI) 378 if (data->interface_type != MUSB_INTERFACE_UTMI)
380 break; 379 break;
381 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); 380 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
382 /* start the session */ 381 /* start the session */
383 devctl |= MUSB_DEVCTL_SESSION; 382 devctl |= MUSB_DEVCTL_SESSION;
384 musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); 383 musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
385 while (musb_readb(musb->mregs, MUSB_DEVCTL) & 384 while (musb_readb(musb->mregs, MUSB_DEVCTL) &
386 MUSB_DEVCTL_BDEVICE) { 385 MUSB_DEVCTL_BDEVICE) {
387 cpu_relax(); 386 cpu_relax();
388 387
389 if (time_after(jiffies, timeout)) { 388 if (time_after(jiffies, timeout)) {
390 dev_err(dev, "configured as A device timeout"); 389 dev_err(dev, "configured as A device timeout");
391 break; 390 break;
392 } 391 }
393 } 392 }
394 break; 393 break;
395 394
396 case OMAP_MUSB_VBUS_VALID: 395 case OMAP_MUSB_VBUS_VALID:
397 usb_phy_init(musb->xceiv); 396 usb_phy_init(musb->xceiv);
398 break; 397 break;
399 398
400 default: 399 default:
401 break; 400 break;
402 } 401 }
403 } 402 }
404 403
405 static void omap2430_musb_disable(struct musb *musb) 404 static void omap2430_musb_disable(struct musb *musb)
406 { 405 {
407 struct device *dev = musb->controller; 406 struct device *dev = musb->controller;
408 struct omap2430_glue *glue = dev_get_drvdata(dev->parent); 407 struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
409 408
410 if (glue->status != OMAP_MUSB_UNKNOWN) 409 if (glue->status != OMAP_MUSB_UNKNOWN)
411 usb_phy_shutdown(musb->xceiv); 410 usb_phy_shutdown(musb->xceiv);
412 } 411 }
413 412
414 static int omap2430_musb_exit(struct musb *musb) 413 static int omap2430_musb_exit(struct musb *musb)
415 { 414 {
416 del_timer_sync(&musb_idle_timer); 415 del_timer_sync(&musb_idle_timer);
417 416
418 omap2430_low_level_exit(musb); 417 omap2430_low_level_exit(musb);
419 418
420 return 0; 419 return 0;
421 } 420 }
422 421
423 static const struct musb_platform_ops omap2430_ops = { 422 static const struct musb_platform_ops omap2430_ops = {
424 .init = omap2430_musb_init, 423 .init = omap2430_musb_init,
425 .exit = omap2430_musb_exit, 424 .exit = omap2430_musb_exit,
426 425
427 .set_mode = omap2430_musb_set_mode, 426 .set_mode = omap2430_musb_set_mode,
428 .try_idle = omap2430_musb_try_idle, 427 .try_idle = omap2430_musb_try_idle,
429 428
430 .set_vbus = omap2430_musb_set_vbus, 429 .set_vbus = omap2430_musb_set_vbus,
431 430
432 .enable = omap2430_musb_enable, 431 .enable = omap2430_musb_enable,
433 .disable = omap2430_musb_disable, 432 .disable = omap2430_musb_disable,
434 }; 433 };
435 434
436 static u64 omap2430_dmamask = DMA_BIT_MASK(32); 435 static u64 omap2430_dmamask = DMA_BIT_MASK(32);
437 436
438 static int __devinit omap2430_probe(struct platform_device *pdev) 437 static int __devinit omap2430_probe(struct platform_device *pdev)
439 { 438 {
440 struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data; 439 struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data;
441 struct platform_device *musb; 440 struct platform_device *musb;
442 struct omap2430_glue *glue; 441 struct omap2430_glue *glue;
443 int ret = -ENOMEM; 442 int ret = -ENOMEM;
444 443
445 glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL); 444 glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL);
446 if (!glue) { 445 if (!glue) {
447 dev_err(&pdev->dev, "failed to allocate glue context\n"); 446 dev_err(&pdev->dev, "failed to allocate glue context\n");
448 goto err0; 447 goto err0;
449 } 448 }
450 449
451 musb = platform_device_alloc("musb-hdrc", -1); 450 musb = platform_device_alloc("musb-hdrc", -1);
452 if (!musb) { 451 if (!musb) {
453 dev_err(&pdev->dev, "failed to allocate musb device\n"); 452 dev_err(&pdev->dev, "failed to allocate musb device\n");
454 goto err0; 453 goto err0;
455 } 454 }
456 455
457 musb->dev.parent = &pdev->dev; 456 musb->dev.parent = &pdev->dev;
458 musb->dev.dma_mask = &omap2430_dmamask; 457 musb->dev.dma_mask = &omap2430_dmamask;
459 musb->dev.coherent_dma_mask = omap2430_dmamask; 458 musb->dev.coherent_dma_mask = omap2430_dmamask;
460 459
461 glue->dev = &pdev->dev; 460 glue->dev = &pdev->dev;
462 glue->musb = musb; 461 glue->musb = musb;
463 glue->status = OMAP_MUSB_UNKNOWN; 462 glue->status = OMAP_MUSB_UNKNOWN;
464 463
465 pdata->platform_ops = &omap2430_ops; 464 pdata->platform_ops = &omap2430_ops;
466 465
467 platform_set_drvdata(pdev, glue); 466 platform_set_drvdata(pdev, glue);
468 467
469 /* 468 /*
470 * REVISIT if we ever have two instances of the wrapper, we will be 469 * REVISIT if we ever have two instances of the wrapper, we will be
471 * in big trouble 470 * in big trouble
472 */ 471 */
473 _glue = glue; 472 _glue = glue;
474 473
475 INIT_WORK(&glue->omap_musb_mailbox_work, omap_musb_mailbox_work); 474 INIT_WORK(&glue->omap_musb_mailbox_work, omap_musb_mailbox_work);
476 475
477 ret = platform_device_add_resources(musb, pdev->resource, 476 ret = platform_device_add_resources(musb, pdev->resource,
478 pdev->num_resources); 477 pdev->num_resources);
479 if (ret) { 478 if (ret) {
480 dev_err(&pdev->dev, "failed to add resources\n"); 479 dev_err(&pdev->dev, "failed to add resources\n");
481 goto err1; 480 goto err1;
482 } 481 }
483 482
484 ret = platform_device_add_data(musb, pdata, sizeof(*pdata)); 483 ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
485 if (ret) { 484 if (ret) {
486 dev_err(&pdev->dev, "failed to add platform_data\n"); 485 dev_err(&pdev->dev, "failed to add platform_data\n");
487 goto err1; 486 goto err1;
488 } 487 }
489 488
490 pm_runtime_enable(&pdev->dev); 489 pm_runtime_enable(&pdev->dev);
491 490
492 ret = platform_device_add(musb); 491 ret = platform_device_add(musb);
493 if (ret) { 492 if (ret) {
494 dev_err(&pdev->dev, "failed to register musb device\n"); 493 dev_err(&pdev->dev, "failed to register musb device\n");
495 goto err1; 494 goto err1;
496 } 495 }
497 496
498 return 0; 497 return 0;
499 498
500 err1: 499 err1:
501 platform_device_put(musb); 500 platform_device_put(musb);
502 501
503 err0: 502 err0:
504 return ret; 503 return ret;
505 } 504 }
506 505
507 static int __devexit omap2430_remove(struct platform_device *pdev) 506 static int __devexit omap2430_remove(struct platform_device *pdev)
508 { 507 {
509 struct omap2430_glue *glue = platform_get_drvdata(pdev); 508 struct omap2430_glue *glue = platform_get_drvdata(pdev);
510 509
511 cancel_work_sync(&glue->omap_musb_mailbox_work); 510 cancel_work_sync(&glue->omap_musb_mailbox_work);
512 platform_device_unregister(glue->musb); 511 platform_device_unregister(glue->musb);
513 512
514 return 0; 513 return 0;
515 } 514 }
516 515
517 #ifdef CONFIG_PM 516 #ifdef CONFIG_PM
518 517
519 static int omap2430_runtime_suspend(struct device *dev) 518 static int omap2430_runtime_suspend(struct device *dev)
520 { 519 {
521 struct omap2430_glue *glue = dev_get_drvdata(dev); 520 struct omap2430_glue *glue = dev_get_drvdata(dev);
522 struct musb *musb = glue_to_musb(glue); 521 struct musb *musb = glue_to_musb(glue);
523 522
524 if (musb) { 523 if (musb) {
525 musb->context.otg_interfsel = musb_readl(musb->mregs, 524 musb->context.otg_interfsel = musb_readl(musb->mregs,
526 OTG_INTERFSEL); 525 OTG_INTERFSEL);
527 526
528 omap2430_low_level_exit(musb); 527 omap2430_low_level_exit(musb);
529 usb_phy_set_suspend(musb->xceiv, 1); 528 usb_phy_set_suspend(musb->xceiv, 1);
530 } 529 }
531 530
532 return 0; 531 return 0;
533 } 532 }
534 533
535 static int omap2430_runtime_resume(struct device *dev) 534 static int omap2430_runtime_resume(struct device *dev)
536 { 535 {
537 struct omap2430_glue *glue = dev_get_drvdata(dev); 536 struct omap2430_glue *glue = dev_get_drvdata(dev);
538 struct musb *musb = glue_to_musb(glue); 537 struct musb *musb = glue_to_musb(glue);
539 538
540 if (musb) { 539 if (musb) {
541 omap2430_low_level_init(musb); 540 omap2430_low_level_init(musb);
542 musb_writel(musb->mregs, OTG_INTERFSEL, 541 musb_writel(musb->mregs, OTG_INTERFSEL,
543 musb->context.otg_interfsel); 542 musb->context.otg_interfsel);
544 543
545 usb_phy_set_suspend(musb->xceiv, 0); 544 usb_phy_set_suspend(musb->xceiv, 0);
546 } 545 }
547 546
548 return 0; 547 return 0;
549 } 548 }
550 549
551 static struct dev_pm_ops omap2430_pm_ops = { 550 static struct dev_pm_ops omap2430_pm_ops = {
552 .runtime_suspend = omap2430_runtime_suspend, 551 .runtime_suspend = omap2430_runtime_suspend,
553 .runtime_resume = omap2430_runtime_resume, 552 .runtime_resume = omap2430_runtime_resume,
554 }; 553 };
555 554
556 #define DEV_PM_OPS (&omap2430_pm_ops) 555 #define DEV_PM_OPS (&omap2430_pm_ops)
557 #else 556 #else
558 #define DEV_PM_OPS NULL 557 #define DEV_PM_OPS NULL
559 #endif 558 #endif
560 559
561 static struct platform_driver omap2430_driver = { 560 static struct platform_driver omap2430_driver = {
562 .probe = omap2430_probe, 561 .probe = omap2430_probe,
563 .remove = __devexit_p(omap2430_remove), 562 .remove = __devexit_p(omap2430_remove),
564 .driver = { 563 .driver = {
565 .name = "musb-omap2430", 564 .name = "musb-omap2430",
566 .pm = DEV_PM_OPS, 565 .pm = DEV_PM_OPS,
567 }, 566 },
568 }; 567 };
569 568
570 MODULE_DESCRIPTION("OMAP2PLUS MUSB Glue Layer"); 569 MODULE_DESCRIPTION("OMAP2PLUS MUSB Glue Layer");
571 MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>"); 570 MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
572 MODULE_LICENSE("GPL v2"); 571 MODULE_LICENSE("GPL v2");
573 572
574 static int __init omap2430_init(void) 573 static int __init omap2430_init(void)
575 { 574 {
576 return platform_driver_register(&omap2430_driver); 575 return platform_driver_register(&omap2430_driver);
577 } 576 }
578 subsys_initcall(omap2430_init); 577 subsys_initcall(omap2430_init);
579 578
580 static void __exit omap2430_exit(void) 579 static void __exit omap2430_exit(void)
581 { 580 {
582 platform_driver_unregister(&omap2430_driver); 581 platform_driver_unregister(&omap2430_driver);
583 } 582 }
584 module_exit(omap2430_exit); 583 module_exit(omap2430_exit);
585 584
drivers/usb/musb/tusb6010.c
1 /* 1 /*
2 * TUSB6010 USB 2.0 OTG Dual Role controller 2 * TUSB6010 USB 2.0 OTG Dual Role controller
3 * 3 *
4 * Copyright (C) 2006 Nokia Corporation 4 * Copyright (C) 2006 Nokia Corporation
5 * Tony Lindgren <tony@atomide.com> 5 * Tony Lindgren <tony@atomide.com>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 * 10 *
11 * Notes: 11 * Notes:
12 * - Driver assumes that interface to external host (main CPU) is 12 * - Driver assumes that interface to external host (main CPU) is
13 * configured for NOR FLASH interface instead of VLYNQ serial 13 * configured for NOR FLASH interface instead of VLYNQ serial
14 * interface. 14 * interface.
15 */ 15 */
16 16
17 #include <linux/module.h> 17 #include <linux/module.h>
18 #include <linux/kernel.h> 18 #include <linux/kernel.h>
19 #include <linux/errno.h> 19 #include <linux/errno.h>
20 #include <linux/err.h> 20 #include <linux/err.h>
21 #include <linux/init.h> 21 #include <linux/init.h>
22 #include <linux/prefetch.h> 22 #include <linux/prefetch.h>
23 #include <linux/usb.h> 23 #include <linux/usb.h>
24 #include <linux/irq.h> 24 #include <linux/irq.h>
25 #include <linux/platform_device.h> 25 #include <linux/platform_device.h>
26 #include <linux/dma-mapping.h> 26 #include <linux/dma-mapping.h>
27 27
28 #include "musb_core.h" 28 #include "musb_core.h"
29 29
30 struct tusb6010_glue { 30 struct tusb6010_glue {
31 struct device *dev; 31 struct device *dev;
32 struct platform_device *musb; 32 struct platform_device *musb;
33 }; 33 };
34 34
35 static void tusb_musb_set_vbus(struct musb *musb, int is_on); 35 static void tusb_musb_set_vbus(struct musb *musb, int is_on);
36 36
37 #define TUSB_REV_MAJOR(reg_val) ((reg_val >> 4) & 0xf) 37 #define TUSB_REV_MAJOR(reg_val) ((reg_val >> 4) & 0xf)
38 #define TUSB_REV_MINOR(reg_val) (reg_val & 0xf) 38 #define TUSB_REV_MINOR(reg_val) (reg_val & 0xf)
39 39
40 /* 40 /*
41 * Checks the revision. We need to use the DMA register as 3.0 does not 41 * Checks the revision. We need to use the DMA register as 3.0 does not
42 * have correct versions for TUSB_PRCM_REV or TUSB_INT_CTRL_REV. 42 * have correct versions for TUSB_PRCM_REV or TUSB_INT_CTRL_REV.
43 */ 43 */
44 u8 tusb_get_revision(struct musb *musb) 44 u8 tusb_get_revision(struct musb *musb)
45 { 45 {
46 void __iomem *tbase = musb->ctrl_base; 46 void __iomem *tbase = musb->ctrl_base;
47 u32 die_id; 47 u32 die_id;
48 u8 rev; 48 u8 rev;
49 49
50 rev = musb_readl(tbase, TUSB_DMA_CTRL_REV) & 0xff; 50 rev = musb_readl(tbase, TUSB_DMA_CTRL_REV) & 0xff;
51 if (TUSB_REV_MAJOR(rev) == 3) { 51 if (TUSB_REV_MAJOR(rev) == 3) {
52 die_id = TUSB_DIDR1_HI_CHIP_REV(musb_readl(tbase, 52 die_id = TUSB_DIDR1_HI_CHIP_REV(musb_readl(tbase,
53 TUSB_DIDR1_HI)); 53 TUSB_DIDR1_HI));
54 if (die_id >= TUSB_DIDR1_HI_REV_31) 54 if (die_id >= TUSB_DIDR1_HI_REV_31)
55 rev |= 1; 55 rev |= 1;
56 } 56 }
57 57
58 return rev; 58 return rev;
59 } 59 }
60 EXPORT_SYMBOL_GPL(tusb_get_revision); 60 EXPORT_SYMBOL_GPL(tusb_get_revision);
61 61
62 static int tusb_print_revision(struct musb *musb) 62 static int tusb_print_revision(struct musb *musb)
63 { 63 {
64 void __iomem *tbase = musb->ctrl_base; 64 void __iomem *tbase = musb->ctrl_base;
65 u8 rev; 65 u8 rev;
66 66
67 rev = tusb_get_revision(musb); 67 rev = tusb_get_revision(musb);
68 68
69 pr_info("tusb: %s%i.%i %s%i.%i %s%i.%i %s%i.%i %s%i %s%i.%i\n", 69 pr_info("tusb: %s%i.%i %s%i.%i %s%i.%i %s%i.%i %s%i %s%i.%i\n",
70 "prcm", 70 "prcm",
71 TUSB_REV_MAJOR(musb_readl(tbase, TUSB_PRCM_REV)), 71 TUSB_REV_MAJOR(musb_readl(tbase, TUSB_PRCM_REV)),
72 TUSB_REV_MINOR(musb_readl(tbase, TUSB_PRCM_REV)), 72 TUSB_REV_MINOR(musb_readl(tbase, TUSB_PRCM_REV)),
73 "int", 73 "int",
74 TUSB_REV_MAJOR(musb_readl(tbase, TUSB_INT_CTRL_REV)), 74 TUSB_REV_MAJOR(musb_readl(tbase, TUSB_INT_CTRL_REV)),
75 TUSB_REV_MINOR(musb_readl(tbase, TUSB_INT_CTRL_REV)), 75 TUSB_REV_MINOR(musb_readl(tbase, TUSB_INT_CTRL_REV)),
76 "gpio", 76 "gpio",
77 TUSB_REV_MAJOR(musb_readl(tbase, TUSB_GPIO_REV)), 77 TUSB_REV_MAJOR(musb_readl(tbase, TUSB_GPIO_REV)),
78 TUSB_REV_MINOR(musb_readl(tbase, TUSB_GPIO_REV)), 78 TUSB_REV_MINOR(musb_readl(tbase, TUSB_GPIO_REV)),
79 "dma", 79 "dma",
80 TUSB_REV_MAJOR(musb_readl(tbase, TUSB_DMA_CTRL_REV)), 80 TUSB_REV_MAJOR(musb_readl(tbase, TUSB_DMA_CTRL_REV)),
81 TUSB_REV_MINOR(musb_readl(tbase, TUSB_DMA_CTRL_REV)), 81 TUSB_REV_MINOR(musb_readl(tbase, TUSB_DMA_CTRL_REV)),
82 "dieid", 82 "dieid",
83 TUSB_DIDR1_HI_CHIP_REV(musb_readl(tbase, TUSB_DIDR1_HI)), 83 TUSB_DIDR1_HI_CHIP_REV(musb_readl(tbase, TUSB_DIDR1_HI)),
84 "rev", 84 "rev",
85 TUSB_REV_MAJOR(rev), TUSB_REV_MINOR(rev)); 85 TUSB_REV_MAJOR(rev), TUSB_REV_MINOR(rev));
86 86
87 return tusb_get_revision(musb); 87 return tusb_get_revision(musb);
88 } 88 }
89 89
90 #define WBUS_QUIRK_MASK (TUSB_PHY_OTG_CTRL_TESTM2 | TUSB_PHY_OTG_CTRL_TESTM1 \ 90 #define WBUS_QUIRK_MASK (TUSB_PHY_OTG_CTRL_TESTM2 | TUSB_PHY_OTG_CTRL_TESTM1 \
91 | TUSB_PHY_OTG_CTRL_TESTM0) 91 | TUSB_PHY_OTG_CTRL_TESTM0)
92 92
93 /* 93 /*
94 * Workaround for spontaneous WBUS wake-up issue #2 for tusb3.0. 94 * Workaround for spontaneous WBUS wake-up issue #2 for tusb3.0.
95 * Disables power detection in PHY for the duration of idle. 95 * Disables power detection in PHY for the duration of idle.
96 */ 96 */
97 static void tusb_wbus_quirk(struct musb *musb, int enabled) 97 static void tusb_wbus_quirk(struct musb *musb, int enabled)
98 { 98 {
99 void __iomem *tbase = musb->ctrl_base; 99 void __iomem *tbase = musb->ctrl_base;
100 static u32 phy_otg_ctrl, phy_otg_ena; 100 static u32 phy_otg_ctrl, phy_otg_ena;
101 u32 tmp; 101 u32 tmp;
102 102
103 if (enabled) { 103 if (enabled) {
104 phy_otg_ctrl = musb_readl(tbase, TUSB_PHY_OTG_CTRL); 104 phy_otg_ctrl = musb_readl(tbase, TUSB_PHY_OTG_CTRL);
105 phy_otg_ena = musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE); 105 phy_otg_ena = musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE);
106 tmp = TUSB_PHY_OTG_CTRL_WRPROTECT 106 tmp = TUSB_PHY_OTG_CTRL_WRPROTECT
107 | phy_otg_ena | WBUS_QUIRK_MASK; 107 | phy_otg_ena | WBUS_QUIRK_MASK;
108 musb_writel(tbase, TUSB_PHY_OTG_CTRL, tmp); 108 musb_writel(tbase, TUSB_PHY_OTG_CTRL, tmp);
109 tmp = phy_otg_ena & ~WBUS_QUIRK_MASK; 109 tmp = phy_otg_ena & ~WBUS_QUIRK_MASK;
110 tmp |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_TESTM2; 110 tmp |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_TESTM2;
111 musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, tmp); 111 musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, tmp);
112 dev_dbg(musb->controller, "Enabled tusb wbus quirk ctrl %08x ena %08x\n", 112 dev_dbg(musb->controller, "Enabled tusb wbus quirk ctrl %08x ena %08x\n",
113 musb_readl(tbase, TUSB_PHY_OTG_CTRL), 113 musb_readl(tbase, TUSB_PHY_OTG_CTRL),
114 musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE)); 114 musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE));
115 } else if (musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE) 115 } else if (musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE)
116 & TUSB_PHY_OTG_CTRL_TESTM2) { 116 & TUSB_PHY_OTG_CTRL_TESTM2) {
117 tmp = TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ctrl; 117 tmp = TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ctrl;
118 musb_writel(tbase, TUSB_PHY_OTG_CTRL, tmp); 118 musb_writel(tbase, TUSB_PHY_OTG_CTRL, tmp);
119 tmp = TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ena; 119 tmp = TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ena;
120 musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, tmp); 120 musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, tmp);
121 dev_dbg(musb->controller, "Disabled tusb wbus quirk ctrl %08x ena %08x\n", 121 dev_dbg(musb->controller, "Disabled tusb wbus quirk ctrl %08x ena %08x\n",
122 musb_readl(tbase, TUSB_PHY_OTG_CTRL), 122 musb_readl(tbase, TUSB_PHY_OTG_CTRL),
123 musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE)); 123 musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE));
124 phy_otg_ctrl = 0; 124 phy_otg_ctrl = 0;
125 phy_otg_ena = 0; 125 phy_otg_ena = 0;
126 } 126 }
127 } 127 }
128 128
129 /* 129 /*
130 * TUSB 6010 may use a parallel bus that doesn't support byte ops; 130 * TUSB 6010 may use a parallel bus that doesn't support byte ops;
131 * so both loading and unloading FIFOs need explicit byte counts. 131 * so both loading and unloading FIFOs need explicit byte counts.
132 */ 132 */
133 133
134 static inline void 134 static inline void
135 tusb_fifo_write_unaligned(void __iomem *fifo, const u8 *buf, u16 len) 135 tusb_fifo_write_unaligned(void __iomem *fifo, const u8 *buf, u16 len)
136 { 136 {
137 u32 val; 137 u32 val;
138 int i; 138 int i;
139 139
140 if (len > 4) { 140 if (len > 4) {
141 for (i = 0; i < (len >> 2); i++) { 141 for (i = 0; i < (len >> 2); i++) {
142 memcpy(&val, buf, 4); 142 memcpy(&val, buf, 4);
143 musb_writel(fifo, 0, val); 143 musb_writel(fifo, 0, val);
144 buf += 4; 144 buf += 4;
145 } 145 }
146 len %= 4; 146 len %= 4;
147 } 147 }
148 if (len > 0) { 148 if (len > 0) {
149 /* Write the rest 1 - 3 bytes to FIFO */ 149 /* Write the rest 1 - 3 bytes to FIFO */
150 memcpy(&val, buf, len); 150 memcpy(&val, buf, len);
151 musb_writel(fifo, 0, val); 151 musb_writel(fifo, 0, val);
152 } 152 }
153 } 153 }
154 154
155 static inline void tusb_fifo_read_unaligned(void __iomem *fifo, 155 static inline void tusb_fifo_read_unaligned(void __iomem *fifo,
156 void *buf, u16 len) 156 void *buf, u16 len)
157 { 157 {
158 u32 val; 158 u32 val;
159 int i; 159 int i;
160 160
161 if (len > 4) { 161 if (len > 4) {
162 for (i = 0; i < (len >> 2); i++) { 162 for (i = 0; i < (len >> 2); i++) {
163 val = musb_readl(fifo, 0); 163 val = musb_readl(fifo, 0);
164 memcpy(buf, &val, 4); 164 memcpy(buf, &val, 4);
165 buf += 4; 165 buf += 4;
166 } 166 }
167 len %= 4; 167 len %= 4;
168 } 168 }
169 if (len > 0) { 169 if (len > 0) {
170 /* Read the rest 1 - 3 bytes from FIFO */ 170 /* Read the rest 1 - 3 bytes from FIFO */
171 val = musb_readl(fifo, 0); 171 val = musb_readl(fifo, 0);
172 memcpy(buf, &val, len); 172 memcpy(buf, &val, len);
173 } 173 }
174 } 174 }
175 175
176 void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *buf) 176 void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *buf)
177 { 177 {
178 struct musb *musb = hw_ep->musb; 178 struct musb *musb = hw_ep->musb;
179 void __iomem *ep_conf = hw_ep->conf; 179 void __iomem *ep_conf = hw_ep->conf;
180 void __iomem *fifo = hw_ep->fifo; 180 void __iomem *fifo = hw_ep->fifo;
181 u8 epnum = hw_ep->epnum; 181 u8 epnum = hw_ep->epnum;
182 182
183 prefetch(buf); 183 prefetch(buf);
184 184
185 dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n", 185 dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n",
186 'T', epnum, fifo, len, buf); 186 'T', epnum, fifo, len, buf);
187 187
188 if (epnum) 188 if (epnum)
189 musb_writel(ep_conf, TUSB_EP_TX_OFFSET, 189 musb_writel(ep_conf, TUSB_EP_TX_OFFSET,
190 TUSB_EP_CONFIG_XFR_SIZE(len)); 190 TUSB_EP_CONFIG_XFR_SIZE(len));
191 else 191 else
192 musb_writel(ep_conf, 0, TUSB_EP0_CONFIG_DIR_TX | 192 musb_writel(ep_conf, 0, TUSB_EP0_CONFIG_DIR_TX |
193 TUSB_EP0_CONFIG_XFR_SIZE(len)); 193 TUSB_EP0_CONFIG_XFR_SIZE(len));
194 194
195 if (likely((0x01 & (unsigned long) buf) == 0)) { 195 if (likely((0x01 & (unsigned long) buf) == 0)) {
196 196
197 /* Best case is 32bit-aligned destination address */ 197 /* Best case is 32bit-aligned destination address */
198 if ((0x02 & (unsigned long) buf) == 0) { 198 if ((0x02 & (unsigned long) buf) == 0) {
199 if (len >= 4) { 199 if (len >= 4) {
200 writesl(fifo, buf, len >> 2); 200 writesl(fifo, buf, len >> 2);
201 buf += (len & ~0x03); 201 buf += (len & ~0x03);
202 len &= 0x03; 202 len &= 0x03;
203 } 203 }
204 } else { 204 } else {
205 if (len >= 2) { 205 if (len >= 2) {
206 u32 val; 206 u32 val;
207 int i; 207 int i;
208 208
209 /* Cannot use writesw, fifo is 32-bit */ 209 /* Cannot use writesw, fifo is 32-bit */
210 for (i = 0; i < (len >> 2); i++) { 210 for (i = 0; i < (len >> 2); i++) {
211 val = (u32)(*(u16 *)buf); 211 val = (u32)(*(u16 *)buf);
212 buf += 2; 212 buf += 2;
213 val |= (*(u16 *)buf) << 16; 213 val |= (*(u16 *)buf) << 16;
214 buf += 2; 214 buf += 2;
215 musb_writel(fifo, 0, val); 215 musb_writel(fifo, 0, val);
216 } 216 }
217 len &= 0x03; 217 len &= 0x03;
218 } 218 }
219 } 219 }
220 } 220 }
221 221
222 if (len > 0) 222 if (len > 0)
223 tusb_fifo_write_unaligned(fifo, buf, len); 223 tusb_fifo_write_unaligned(fifo, buf, len);
224 } 224 }
225 225
226 void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *buf) 226 void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *buf)
227 { 227 {
228 struct musb *musb = hw_ep->musb; 228 struct musb *musb = hw_ep->musb;
229 void __iomem *ep_conf = hw_ep->conf; 229 void __iomem *ep_conf = hw_ep->conf;
230 void __iomem *fifo = hw_ep->fifo; 230 void __iomem *fifo = hw_ep->fifo;
231 u8 epnum = hw_ep->epnum; 231 u8 epnum = hw_ep->epnum;
232 232
233 dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n", 233 dev_dbg(musb->controller, "%cX ep%d fifo %p count %d buf %p\n",
234 'R', epnum, fifo, len, buf); 234 'R', epnum, fifo, len, buf);
235 235
236 if (epnum) 236 if (epnum)
237 musb_writel(ep_conf, TUSB_EP_RX_OFFSET, 237 musb_writel(ep_conf, TUSB_EP_RX_OFFSET,
238 TUSB_EP_CONFIG_XFR_SIZE(len)); 238 TUSB_EP_CONFIG_XFR_SIZE(len));
239 else 239 else
240 musb_writel(ep_conf, 0, TUSB_EP0_CONFIG_XFR_SIZE(len)); 240 musb_writel(ep_conf, 0, TUSB_EP0_CONFIG_XFR_SIZE(len));
241 241
242 if (likely((0x01 & (unsigned long) buf) == 0)) { 242 if (likely((0x01 & (unsigned long) buf) == 0)) {
243 243
244 /* Best case is 32bit-aligned destination address */ 244 /* Best case is 32bit-aligned destination address */
245 if ((0x02 & (unsigned long) buf) == 0) { 245 if ((0x02 & (unsigned long) buf) == 0) {
246 if (len >= 4) { 246 if (len >= 4) {
247 readsl(fifo, buf, len >> 2); 247 readsl(fifo, buf, len >> 2);
248 buf += (len & ~0x03); 248 buf += (len & ~0x03);
249 len &= 0x03; 249 len &= 0x03;
250 } 250 }
251 } else { 251 } else {
252 if (len >= 2) { 252 if (len >= 2) {
253 u32 val; 253 u32 val;
254 int i; 254 int i;
255 255
256 /* Cannot use readsw, fifo is 32-bit */ 256 /* Cannot use readsw, fifo is 32-bit */
257 for (i = 0; i < (len >> 2); i++) { 257 for (i = 0; i < (len >> 2); i++) {
258 val = musb_readl(fifo, 0); 258 val = musb_readl(fifo, 0);
259 *(u16 *)buf = (u16)(val & 0xffff); 259 *(u16 *)buf = (u16)(val & 0xffff);
260 buf += 2; 260 buf += 2;
261 *(u16 *)buf = (u16)(val >> 16); 261 *(u16 *)buf = (u16)(val >> 16);
262 buf += 2; 262 buf += 2;
263 } 263 }
264 len &= 0x03; 264 len &= 0x03;
265 } 265 }
266 } 266 }
267 } 267 }
268 268
269 if (len > 0) 269 if (len > 0)
270 tusb_fifo_read_unaligned(fifo, buf, len); 270 tusb_fifo_read_unaligned(fifo, buf, len);
271 } 271 }
272 272
273 static struct musb *the_musb; 273 static struct musb *the_musb;
274 274
275 /* This is used by gadget drivers, and OTG transceiver logic, allowing 275 /* This is used by gadget drivers, and OTG transceiver logic, allowing
276 * at most mA current to be drawn from VBUS during a Default-B session 276 * at most mA current to be drawn from VBUS during a Default-B session
277 * (that is, while VBUS exceeds 4.4V). In Default-A (including pure host 277 * (that is, while VBUS exceeds 4.4V). In Default-A (including pure host
278 * mode), or low power Default-B sessions, something else supplies power. 278 * mode), or low power Default-B sessions, something else supplies power.
279 * Caller must take care of locking. 279 * Caller must take care of locking.
280 */ 280 */
281 static int tusb_draw_power(struct usb_phy *x, unsigned mA) 281 static int tusb_draw_power(struct usb_phy *x, unsigned mA)
282 { 282 {
283 struct musb *musb = the_musb; 283 struct musb *musb = the_musb;
284 void __iomem *tbase = musb->ctrl_base; 284 void __iomem *tbase = musb->ctrl_base;
285 u32 reg; 285 u32 reg;
286 286
287 /* tps65030 seems to consume max 100mA, with maybe 60mA available 287 /* tps65030 seems to consume max 100mA, with maybe 60mA available
288 * (measured on one board) for things other than tps and tusb. 288 * (measured on one board) for things other than tps and tusb.
289 * 289 *
290 * Boards sharing the CPU clock with CLKIN will need to prevent 290 * Boards sharing the CPU clock with CLKIN will need to prevent
291 * certain idle sleep states while the USB link is active. 291 * certain idle sleep states while the USB link is active.
292 * 292 *
293 * REVISIT we could use VBUS to supply only _one_ of { 1.5V, 3.3V }. 293 * REVISIT we could use VBUS to supply only _one_ of { 1.5V, 3.3V }.
294 * The actual current usage would be very board-specific. For now, 294 * The actual current usage would be very board-specific. For now,
295 * it's simpler to just use an aggregate (also board-specific). 295 * it's simpler to just use an aggregate (also board-specific).
296 */ 296 */
297 if (x->otg->default_a || mA < (musb->min_power << 1)) 297 if (x->otg->default_a || mA < (musb->min_power << 1))
298 mA = 0; 298 mA = 0;
299 299
300 reg = musb_readl(tbase, TUSB_PRCM_MNGMT); 300 reg = musb_readl(tbase, TUSB_PRCM_MNGMT);
301 if (mA) { 301 if (mA) {
302 musb->is_bus_powered = 1; 302 musb->is_bus_powered = 1;
303 reg |= TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN; 303 reg |= TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN;
304 } else { 304 } else {
305 musb->is_bus_powered = 0; 305 musb->is_bus_powered = 0;
306 reg &= ~(TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN); 306 reg &= ~(TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN);
307 } 307 }
308 musb_writel(tbase, TUSB_PRCM_MNGMT, reg); 308 musb_writel(tbase, TUSB_PRCM_MNGMT, reg);
309 309
310 dev_dbg(musb->controller, "draw max %d mA VBUS\n", mA); 310 dev_dbg(musb->controller, "draw max %d mA VBUS\n", mA);
311 return 0; 311 return 0;
312 } 312 }
313 313
314 /* workaround for issue 13: change clock during chip idle 314 /* workaround for issue 13: change clock during chip idle
315 * (to be fixed in rev3 silicon) ... symptoms include disconnect 315 * (to be fixed in rev3 silicon) ... symptoms include disconnect
316 * or looping suspend/resume cycles 316 * or looping suspend/resume cycles
317 */ 317 */
318 static void tusb_set_clock_source(struct musb *musb, unsigned mode) 318 static void tusb_set_clock_source(struct musb *musb, unsigned mode)
319 { 319 {
320 void __iomem *tbase = musb->ctrl_base; 320 void __iomem *tbase = musb->ctrl_base;
321 u32 reg; 321 u32 reg;
322 322
323 reg = musb_readl(tbase, TUSB_PRCM_CONF); 323 reg = musb_readl(tbase, TUSB_PRCM_CONF);
324 reg &= ~TUSB_PRCM_CONF_SYS_CLKSEL(0x3); 324 reg &= ~TUSB_PRCM_CONF_SYS_CLKSEL(0x3);
325 325
326 /* 0 = refclk (clkin, XI) 326 /* 0 = refclk (clkin, XI)
327 * 1 = PHY 60 MHz (internal PLL) 327 * 1 = PHY 60 MHz (internal PLL)
328 * 2 = not supported 328 * 2 = not supported
329 * 3 = what? 329 * 3 = what?
330 */ 330 */
331 if (mode > 0) 331 if (mode > 0)
332 reg |= TUSB_PRCM_CONF_SYS_CLKSEL(mode & 0x3); 332 reg |= TUSB_PRCM_CONF_SYS_CLKSEL(mode & 0x3);
333 333
334 musb_writel(tbase, TUSB_PRCM_CONF, reg); 334 musb_writel(tbase, TUSB_PRCM_CONF, reg);
335 335
336 /* FIXME tusb6010_platform_retime(mode == 0); */ 336 /* FIXME tusb6010_platform_retime(mode == 0); */
337 } 337 }
338 338
339 /* 339 /*
340 * Idle TUSB6010 until next wake-up event; NOR access always wakes. 340 * Idle TUSB6010 until next wake-up event; NOR access always wakes.
341 * Other code ensures that we idle unless we're connected _and_ the 341 * Other code ensures that we idle unless we're connected _and_ the
342 * USB link is not suspended ... and tells us the relevant wakeup 342 * USB link is not suspended ... and tells us the relevant wakeup
343 * events. SW_EN for voltage is handled separately. 343 * events. SW_EN for voltage is handled separately.
344 */ 344 */
345 static void tusb_allow_idle(struct musb *musb, u32 wakeup_enables) 345 static void tusb_allow_idle(struct musb *musb, u32 wakeup_enables)
346 { 346 {
347 void __iomem *tbase = musb->ctrl_base; 347 void __iomem *tbase = musb->ctrl_base;
348 u32 reg; 348 u32 reg;
349 349
350 if ((wakeup_enables & TUSB_PRCM_WBUS) 350 if ((wakeup_enables & TUSB_PRCM_WBUS)
351 && (tusb_get_revision(musb) == TUSB_REV_30)) 351 && (tusb_get_revision(musb) == TUSB_REV_30))
352 tusb_wbus_quirk(musb, 1); 352 tusb_wbus_quirk(musb, 1);
353 353
354 tusb_set_clock_source(musb, 0); 354 tusb_set_clock_source(musb, 0);
355 355
356 wakeup_enables |= TUSB_PRCM_WNORCS; 356 wakeup_enables |= TUSB_PRCM_WNORCS;
357 musb_writel(tbase, TUSB_PRCM_WAKEUP_MASK, ~wakeup_enables); 357 musb_writel(tbase, TUSB_PRCM_WAKEUP_MASK, ~wakeup_enables);
358 358
359 /* REVISIT writeup of WID implies that if WID set and ID is grounded, 359 /* REVISIT writeup of WID implies that if WID set and ID is grounded,
360 * TUSB_PHY_OTG_CTRL.TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP must be cleared. 360 * TUSB_PHY_OTG_CTRL.TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP must be cleared.
361 * Presumably that's mostly to save power, hence WID is immaterial ... 361 * Presumably that's mostly to save power, hence WID is immaterial ...
362 */ 362 */
363 363
364 reg = musb_readl(tbase, TUSB_PRCM_MNGMT); 364 reg = musb_readl(tbase, TUSB_PRCM_MNGMT);
365 /* issue 4: when driving vbus, use hipower (vbus_det) comparator */ 365 /* issue 4: when driving vbus, use hipower (vbus_det) comparator */
366 if (is_host_active(musb)) { 366 if (is_host_active(musb)) {
367 reg |= TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN; 367 reg |= TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN;
368 reg &= ~TUSB_PRCM_MNGMT_OTG_SESS_END_EN; 368 reg &= ~TUSB_PRCM_MNGMT_OTG_SESS_END_EN;
369 } else { 369 } else {
370 reg |= TUSB_PRCM_MNGMT_OTG_SESS_END_EN; 370 reg |= TUSB_PRCM_MNGMT_OTG_SESS_END_EN;
371 reg &= ~TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN; 371 reg &= ~TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN;
372 } 372 }
373 reg |= TUSB_PRCM_MNGMT_PM_IDLE | TUSB_PRCM_MNGMT_DEV_IDLE; 373 reg |= TUSB_PRCM_MNGMT_PM_IDLE | TUSB_PRCM_MNGMT_DEV_IDLE;
374 musb_writel(tbase, TUSB_PRCM_MNGMT, reg); 374 musb_writel(tbase, TUSB_PRCM_MNGMT, reg);
375 375
376 dev_dbg(musb->controller, "idle, wake on %02x\n", wakeup_enables); 376 dev_dbg(musb->controller, "idle, wake on %02x\n", wakeup_enables);
377 } 377 }
378 378
379 /* 379 /*
380 * Updates cable VBUS status. Caller must take care of locking. 380 * Updates cable VBUS status. Caller must take care of locking.
381 */ 381 */
382 static int tusb_musb_vbus_status(struct musb *musb) 382 static int tusb_musb_vbus_status(struct musb *musb)
383 { 383 {
384 void __iomem *tbase = musb->ctrl_base; 384 void __iomem *tbase = musb->ctrl_base;
385 u32 otg_stat, prcm_mngmt; 385 u32 otg_stat, prcm_mngmt;
386 int ret = 0; 386 int ret = 0;
387 387
388 otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT); 388 otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
389 prcm_mngmt = musb_readl(tbase, TUSB_PRCM_MNGMT); 389 prcm_mngmt = musb_readl(tbase, TUSB_PRCM_MNGMT);
390 390
391 /* Temporarily enable VBUS detection if it was disabled for 391 /* Temporarily enable VBUS detection if it was disabled for
392 * suspend mode. Unless it's enabled otg_stat and devctl will 392 * suspend mode. Unless it's enabled otg_stat and devctl will
393 * not show correct VBUS state. 393 * not show correct VBUS state.
394 */ 394 */
395 if (!(prcm_mngmt & TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN)) { 395 if (!(prcm_mngmt & TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN)) {
396 u32 tmp = prcm_mngmt; 396 u32 tmp = prcm_mngmt;
397 tmp |= TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN; 397 tmp |= TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN;
398 musb_writel(tbase, TUSB_PRCM_MNGMT, tmp); 398 musb_writel(tbase, TUSB_PRCM_MNGMT, tmp);
399 otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT); 399 otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
400 musb_writel(tbase, TUSB_PRCM_MNGMT, prcm_mngmt); 400 musb_writel(tbase, TUSB_PRCM_MNGMT, prcm_mngmt);
401 } 401 }
402 402
403 if (otg_stat & TUSB_DEV_OTG_STAT_VBUS_VALID) 403 if (otg_stat & TUSB_DEV_OTG_STAT_VBUS_VALID)
404 ret = 1; 404 ret = 1;
405 405
406 return ret; 406 return ret;
407 } 407 }
408 408
409 static struct timer_list musb_idle_timer; 409 static struct timer_list musb_idle_timer;
410 410
411 static void musb_do_idle(unsigned long _musb) 411 static void musb_do_idle(unsigned long _musb)
412 { 412 {
413 struct musb *musb = (void *)_musb; 413 struct musb *musb = (void *)_musb;
414 unsigned long flags; 414 unsigned long flags;
415 415
416 spin_lock_irqsave(&musb->lock, flags); 416 spin_lock_irqsave(&musb->lock, flags);
417 417
418 switch (musb->xceiv->state) { 418 switch (musb->xceiv->state) {
419 case OTG_STATE_A_WAIT_BCON: 419 case OTG_STATE_A_WAIT_BCON:
420 if ((musb->a_wait_bcon != 0) 420 if ((musb->a_wait_bcon != 0)
421 && (musb->idle_timeout == 0 421 && (musb->idle_timeout == 0
422 || time_after(jiffies, musb->idle_timeout))) { 422 || time_after(jiffies, musb->idle_timeout))) {
423 dev_dbg(musb->controller, "Nothing connected %s, turning off VBUS\n", 423 dev_dbg(musb->controller, "Nothing connected %s, turning off VBUS\n",
424 otg_state_string(musb->xceiv->state)); 424 otg_state_string(musb->xceiv->state));
425 } 425 }
426 /* FALLTHROUGH */ 426 /* FALLTHROUGH */
427 case OTG_STATE_A_IDLE: 427 case OTG_STATE_A_IDLE:
428 tusb_musb_set_vbus(musb, 0); 428 tusb_musb_set_vbus(musb, 0);
429 default: 429 default:
430 break; 430 break;
431 } 431 }
432 432
433 if (!musb->is_active) { 433 if (!musb->is_active) {
434 u32 wakeups; 434 u32 wakeups;
435 435
436 /* wait until khubd handles port change status */ 436 /* wait until khubd handles port change status */
437 if (is_host_active(musb) && (musb->port1_status >> 16)) 437 if (is_host_active(musb) && (musb->port1_status >> 16))
438 goto done; 438 goto done;
439 439
440 if (is_peripheral_enabled(musb) && !musb->gadget_driver) { 440 if (!musb->gadget_driver) {
441 wakeups = 0; 441 wakeups = 0;
442 } else { 442 } else {
443 wakeups = TUSB_PRCM_WHOSTDISCON 443 wakeups = TUSB_PRCM_WHOSTDISCON
444 | TUSB_PRCM_WBUS 444 | TUSB_PRCM_WBUS
445 | TUSB_PRCM_WVBUS; 445 | TUSB_PRCM_WVBUS;
446 if (is_otg_enabled(musb)) 446 wakeups |= TUSB_PRCM_WID;
447 wakeups |= TUSB_PRCM_WID;
448 } 447 }
449 tusb_allow_idle(musb, wakeups); 448 tusb_allow_idle(musb, wakeups);
450 } 449 }
451 done: 450 done:
452 spin_unlock_irqrestore(&musb->lock, flags); 451 spin_unlock_irqrestore(&musb->lock, flags);
453 } 452 }
454 453
455 /* 454 /*
456 * Maybe put TUSB6010 into idle mode mode depending on USB link status, 455 * Maybe put TUSB6010 into idle mode mode depending on USB link status,
457 * like "disconnected" or "suspended". We'll be woken out of it by 456 * like "disconnected" or "suspended". We'll be woken out of it by
458 * connect, resume, or disconnect. 457 * connect, resume, or disconnect.
459 * 458 *
460 * Needs to be called as the last function everywhere where there is 459 * Needs to be called as the last function everywhere where there is
461 * register access to TUSB6010 because of NOR flash wake-up. 460 * register access to TUSB6010 because of NOR flash wake-up.
462 * Caller should own controller spinlock. 461 * Caller should own controller spinlock.
463 * 462 *
464 * Delay because peripheral enables D+ pullup 3msec after SE0, and 463 * Delay because peripheral enables D+ pullup 3msec after SE0, and
465 * we don't want to treat that full speed J as a wakeup event. 464 * we don't want to treat that full speed J as a wakeup event.
466 * ... peripherals must draw only suspend current after 10 msec. 465 * ... peripherals must draw only suspend current after 10 msec.
467 */ 466 */
468 static void tusb_musb_try_idle(struct musb *musb, unsigned long timeout) 467 static void tusb_musb_try_idle(struct musb *musb, unsigned long timeout)
469 { 468 {
470 unsigned long default_timeout = jiffies + msecs_to_jiffies(3); 469 unsigned long default_timeout = jiffies + msecs_to_jiffies(3);
471 static unsigned long last_timer; 470 static unsigned long last_timer;
472 471
473 if (timeout == 0) 472 if (timeout == 0)
474 timeout = default_timeout; 473 timeout = default_timeout;
475 474
476 /* Never idle if active, or when VBUS timeout is not set as host */ 475 /* Never idle if active, or when VBUS timeout is not set as host */
477 if (musb->is_active || ((musb->a_wait_bcon == 0) 476 if (musb->is_active || ((musb->a_wait_bcon == 0)
478 && (musb->xceiv->state == OTG_STATE_A_WAIT_BCON))) { 477 && (musb->xceiv->state == OTG_STATE_A_WAIT_BCON))) {
479 dev_dbg(musb->controller, "%s active, deleting timer\n", 478 dev_dbg(musb->controller, "%s active, deleting timer\n",
480 otg_state_string(musb->xceiv->state)); 479 otg_state_string(musb->xceiv->state));
481 del_timer(&musb_idle_timer); 480 del_timer(&musb_idle_timer);
482 last_timer = jiffies; 481 last_timer = jiffies;
483 return; 482 return;
484 } 483 }
485 484
486 if (time_after(last_timer, timeout)) { 485 if (time_after(last_timer, timeout)) {
487 if (!timer_pending(&musb_idle_timer)) 486 if (!timer_pending(&musb_idle_timer))
488 last_timer = timeout; 487 last_timer = timeout;
489 else { 488 else {
490 dev_dbg(musb->controller, "Longer idle timer already pending, ignoring\n"); 489 dev_dbg(musb->controller, "Longer idle timer already pending, ignoring\n");
491 return; 490 return;
492 } 491 }
493 } 492 }
494 last_timer = timeout; 493 last_timer = timeout;
495 494
496 dev_dbg(musb->controller, "%s inactive, for idle timer for %lu ms\n", 495 dev_dbg(musb->controller, "%s inactive, for idle timer for %lu ms\n",
497 otg_state_string(musb->xceiv->state), 496 otg_state_string(musb->xceiv->state),
498 (unsigned long)jiffies_to_msecs(timeout - jiffies)); 497 (unsigned long)jiffies_to_msecs(timeout - jiffies));
499 mod_timer(&musb_idle_timer, timeout); 498 mod_timer(&musb_idle_timer, timeout);
500 } 499 }
501 500
502 /* ticks of 60 MHz clock */ 501 /* ticks of 60 MHz clock */
503 #define DEVCLOCK 60000000 502 #define DEVCLOCK 60000000
504 #define OTG_TIMER_MS(msecs) ((msecs) \ 503 #define OTG_TIMER_MS(msecs) ((msecs) \
505 ? (TUSB_DEV_OTG_TIMER_VAL((DEVCLOCK/1000)*(msecs)) \ 504 ? (TUSB_DEV_OTG_TIMER_VAL((DEVCLOCK/1000)*(msecs)) \
506 | TUSB_DEV_OTG_TIMER_ENABLE) \ 505 | TUSB_DEV_OTG_TIMER_ENABLE) \
507 : 0) 506 : 0)
508 507
509 static void tusb_musb_set_vbus(struct musb *musb, int is_on) 508 static void tusb_musb_set_vbus(struct musb *musb, int is_on)
510 { 509 {
511 void __iomem *tbase = musb->ctrl_base; 510 void __iomem *tbase = musb->ctrl_base;
512 u32 conf, prcm, timer; 511 u32 conf, prcm, timer;
513 u8 devctl; 512 u8 devctl;
514 struct usb_otg *otg = musb->xceiv->otg; 513 struct usb_otg *otg = musb->xceiv->otg;
515 514
516 /* HDRC controls CPEN, but beware current surges during device 515 /* HDRC controls CPEN, but beware current surges during device
517 * connect. They can trigger transient overcurrent conditions 516 * connect. They can trigger transient overcurrent conditions
518 * that must be ignored. 517 * that must be ignored.
519 */ 518 */
520 519
521 prcm = musb_readl(tbase, TUSB_PRCM_MNGMT); 520 prcm = musb_readl(tbase, TUSB_PRCM_MNGMT);
522 conf = musb_readl(tbase, TUSB_DEV_CONF); 521 conf = musb_readl(tbase, TUSB_DEV_CONF);
523 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); 522 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
524 523
525 if (is_on) { 524 if (is_on) {
526 timer = OTG_TIMER_MS(OTG_TIME_A_WAIT_VRISE); 525 timer = OTG_TIMER_MS(OTG_TIME_A_WAIT_VRISE);
527 otg->default_a = 1; 526 otg->default_a = 1;
528 musb->xceiv->state = OTG_STATE_A_WAIT_VRISE; 527 musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
529 devctl |= MUSB_DEVCTL_SESSION; 528 devctl |= MUSB_DEVCTL_SESSION;
530 529
531 conf |= TUSB_DEV_CONF_USB_HOST_MODE; 530 conf |= TUSB_DEV_CONF_USB_HOST_MODE;
532 MUSB_HST_MODE(musb); 531 MUSB_HST_MODE(musb);
533 } else { 532 } else {
534 u32 otg_stat; 533 u32 otg_stat;
535 534
536 timer = 0; 535 timer = 0;
537 536
538 /* If ID pin is grounded, we want to be a_idle */ 537 /* If ID pin is grounded, we want to be a_idle */
539 otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT); 538 otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
540 if (!(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS)) { 539 if (!(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS)) {
541 switch (musb->xceiv->state) { 540 switch (musb->xceiv->state) {
542 case OTG_STATE_A_WAIT_VRISE: 541 case OTG_STATE_A_WAIT_VRISE:
543 case OTG_STATE_A_WAIT_BCON: 542 case OTG_STATE_A_WAIT_BCON:
544 musb->xceiv->state = OTG_STATE_A_WAIT_VFALL; 543 musb->xceiv->state = OTG_STATE_A_WAIT_VFALL;
545 break; 544 break;
546 case OTG_STATE_A_WAIT_VFALL: 545 case OTG_STATE_A_WAIT_VFALL:
547 musb->xceiv->state = OTG_STATE_A_IDLE; 546 musb->xceiv->state = OTG_STATE_A_IDLE;
548 break; 547 break;
549 default: 548 default:
550 musb->xceiv->state = OTG_STATE_A_IDLE; 549 musb->xceiv->state = OTG_STATE_A_IDLE;
551 } 550 }
552 musb->is_active = 0; 551 musb->is_active = 0;
553 otg->default_a = 1; 552 otg->default_a = 1;
554 MUSB_HST_MODE(musb); 553 MUSB_HST_MODE(musb);
555 } else { 554 } else {
556 musb->is_active = 0; 555 musb->is_active = 0;
557 otg->default_a = 0; 556 otg->default_a = 0;
558 musb->xceiv->state = OTG_STATE_B_IDLE; 557 musb->xceiv->state = OTG_STATE_B_IDLE;
559 MUSB_DEV_MODE(musb); 558 MUSB_DEV_MODE(musb);
560 } 559 }
561 560
562 devctl &= ~MUSB_DEVCTL_SESSION; 561 devctl &= ~MUSB_DEVCTL_SESSION;
563 conf &= ~TUSB_DEV_CONF_USB_HOST_MODE; 562 conf &= ~TUSB_DEV_CONF_USB_HOST_MODE;
564 } 563 }
565 prcm &= ~(TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN); 564 prcm &= ~(TUSB_PRCM_MNGMT_15_SW_EN | TUSB_PRCM_MNGMT_33_SW_EN);
566 565
567 musb_writel(tbase, TUSB_PRCM_MNGMT, prcm); 566 musb_writel(tbase, TUSB_PRCM_MNGMT, prcm);
568 musb_writel(tbase, TUSB_DEV_OTG_TIMER, timer); 567 musb_writel(tbase, TUSB_DEV_OTG_TIMER, timer);
569 musb_writel(tbase, TUSB_DEV_CONF, conf); 568 musb_writel(tbase, TUSB_DEV_CONF, conf);
570 musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); 569 musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
571 570
572 dev_dbg(musb->controller, "VBUS %s, devctl %02x otg %3x conf %08x prcm %08x\n", 571 dev_dbg(musb->controller, "VBUS %s, devctl %02x otg %3x conf %08x prcm %08x\n",
573 otg_state_string(musb->xceiv->state), 572 otg_state_string(musb->xceiv->state),
574 musb_readb(musb->mregs, MUSB_DEVCTL), 573 musb_readb(musb->mregs, MUSB_DEVCTL),
575 musb_readl(tbase, TUSB_DEV_OTG_STAT), 574 musb_readl(tbase, TUSB_DEV_OTG_STAT),
576 conf, prcm); 575 conf, prcm);
577 } 576 }
578 577
579 /* 578 /*
580 * Sets the mode to OTG, peripheral or host by changing the ID detection. 579 * Sets the mode to OTG, peripheral or host by changing the ID detection.
581 * Caller must take care of locking. 580 * Caller must take care of locking.
582 * 581 *
583 * Note that if a mini-A cable is plugged in the ID line will stay down as 582 * Note that if a mini-A cable is plugged in the ID line will stay down as
584 * the weak ID pull-up is not able to pull the ID up. 583 * the weak ID pull-up is not able to pull the ID up.
585 *
586 * REVISIT: It would be possible to add support for changing between host
587 * and peripheral modes in non-OTG configurations by reconfiguring hardware
588 * and then setting musb->board_mode. For now, only support OTG mode.
589 */ 584 */
590 static int tusb_musb_set_mode(struct musb *musb, u8 musb_mode) 585 static int tusb_musb_set_mode(struct musb *musb, u8 musb_mode)
591 { 586 {
592 void __iomem *tbase = musb->ctrl_base; 587 void __iomem *tbase = musb->ctrl_base;
593 u32 otg_stat, phy_otg_ctrl, phy_otg_ena, dev_conf; 588 u32 otg_stat, phy_otg_ctrl, phy_otg_ena, dev_conf;
594 589
595 if (musb->board_mode != MUSB_OTG) {
596 ERR("Changing mode currently only supported in OTG mode\n");
597 return -EINVAL;
598 }
599
600 otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT); 590 otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
601 phy_otg_ctrl = musb_readl(tbase, TUSB_PHY_OTG_CTRL); 591 phy_otg_ctrl = musb_readl(tbase, TUSB_PHY_OTG_CTRL);
602 phy_otg_ena = musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE); 592 phy_otg_ena = musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE);
603 dev_conf = musb_readl(tbase, TUSB_DEV_CONF); 593 dev_conf = musb_readl(tbase, TUSB_DEV_CONF);
604 594
605 switch (musb_mode) { 595 switch (musb_mode) {
606 596
607 case MUSB_HOST: /* Disable PHY ID detect, ground ID */ 597 case MUSB_HOST: /* Disable PHY ID detect, ground ID */
608 phy_otg_ctrl &= ~TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP; 598 phy_otg_ctrl &= ~TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
609 phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP; 599 phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
610 dev_conf |= TUSB_DEV_CONF_ID_SEL; 600 dev_conf |= TUSB_DEV_CONF_ID_SEL;
611 dev_conf &= ~TUSB_DEV_CONF_SOFT_ID; 601 dev_conf &= ~TUSB_DEV_CONF_SOFT_ID;
612 break; 602 break;
613 case MUSB_PERIPHERAL: /* Disable PHY ID detect, keep ID pull-up on */ 603 case MUSB_PERIPHERAL: /* Disable PHY ID detect, keep ID pull-up on */
614 phy_otg_ctrl |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP; 604 phy_otg_ctrl |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
615 phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP; 605 phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
616 dev_conf |= (TUSB_DEV_CONF_ID_SEL | TUSB_DEV_CONF_SOFT_ID); 606 dev_conf |= (TUSB_DEV_CONF_ID_SEL | TUSB_DEV_CONF_SOFT_ID);
617 break; 607 break;
618 case MUSB_OTG: /* Use PHY ID detection */ 608 case MUSB_OTG: /* Use PHY ID detection */
619 phy_otg_ctrl |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP; 609 phy_otg_ctrl |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
620 phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP; 610 phy_otg_ena |= TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
621 dev_conf &= ~(TUSB_DEV_CONF_ID_SEL | TUSB_DEV_CONF_SOFT_ID); 611 dev_conf &= ~(TUSB_DEV_CONF_ID_SEL | TUSB_DEV_CONF_SOFT_ID);
622 break; 612 break;
623 613
624 default: 614 default:
625 dev_dbg(musb->controller, "Trying to set mode %i\n", musb_mode); 615 dev_dbg(musb->controller, "Trying to set mode %i\n", musb_mode);
626 return -EINVAL; 616 return -EINVAL;
627 } 617 }
628 618
629 musb_writel(tbase, TUSB_PHY_OTG_CTRL, 619 musb_writel(tbase, TUSB_PHY_OTG_CTRL,
630 TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ctrl); 620 TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ctrl);
631 musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, 621 musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE,
632 TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ena); 622 TUSB_PHY_OTG_CTRL_WRPROTECT | phy_otg_ena);
633 musb_writel(tbase, TUSB_DEV_CONF, dev_conf); 623 musb_writel(tbase, TUSB_DEV_CONF, dev_conf);
634 624
635 otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT); 625 otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
636 if ((musb_mode == MUSB_PERIPHERAL) && 626 if ((musb_mode == MUSB_PERIPHERAL) &&
637 !(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS)) 627 !(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS))
638 INFO("Cannot be peripheral with mini-A cable " 628 INFO("Cannot be peripheral with mini-A cable "
639 "otg_stat: %08x\n", otg_stat); 629 "otg_stat: %08x\n", otg_stat);
640 630
641 return 0; 631 return 0;
642 } 632 }
643 633
644 static inline unsigned long 634 static inline unsigned long
645 tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase) 635 tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase)
646 { 636 {
647 u32 otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT); 637 u32 otg_stat = musb_readl(tbase, TUSB_DEV_OTG_STAT);
648 unsigned long idle_timeout = 0; 638 unsigned long idle_timeout = 0;
649 struct usb_otg *otg = musb->xceiv->otg; 639 struct usb_otg *otg = musb->xceiv->otg;
650 640
651 /* ID pin */ 641 /* ID pin */
652 if ((int_src & TUSB_INT_SRC_ID_STATUS_CHNG)) { 642 if ((int_src & TUSB_INT_SRC_ID_STATUS_CHNG)) {
653 int default_a; 643 int default_a;
654 644
655 if (is_otg_enabled(musb)) 645 default_a = !(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS);
656 default_a = !(otg_stat & TUSB_DEV_OTG_STAT_ID_STATUS);
657 else
658 default_a = is_host_enabled(musb);
659 dev_dbg(musb->controller, "Default-%c\n", default_a ? 'A' : 'B'); 646 dev_dbg(musb->controller, "Default-%c\n", default_a ? 'A' : 'B');
660 otg->default_a = default_a; 647 otg->default_a = default_a;
661 tusb_musb_set_vbus(musb, default_a); 648 tusb_musb_set_vbus(musb, default_a);
662 649
663 /* Don't allow idling immediately */ 650 /* Don't allow idling immediately */
664 if (default_a) 651 if (default_a)
665 idle_timeout = jiffies + (HZ * 3); 652 idle_timeout = jiffies + (HZ * 3);
666 } 653 }
667 654
668 /* VBUS state change */ 655 /* VBUS state change */
669 if (int_src & TUSB_INT_SRC_VBUS_SENSE_CHNG) { 656 if (int_src & TUSB_INT_SRC_VBUS_SENSE_CHNG) {
670 657
671 /* B-dev state machine: no vbus ~= disconnect */ 658 /* B-dev state machine: no vbus ~= disconnect */
672 if ((is_otg_enabled(musb) && !otg->default_a) 659 if (!otg->default_a) {
673 || !is_host_enabled(musb)) {
674 /* ? musb_root_disconnect(musb); */ 660 /* ? musb_root_disconnect(musb); */
675 musb->port1_status &= 661 musb->port1_status &=
676 ~(USB_PORT_STAT_CONNECTION 662 ~(USB_PORT_STAT_CONNECTION
677 | USB_PORT_STAT_ENABLE 663 | USB_PORT_STAT_ENABLE
678 | USB_PORT_STAT_LOW_SPEED 664 | USB_PORT_STAT_LOW_SPEED
679 | USB_PORT_STAT_HIGH_SPEED 665 | USB_PORT_STAT_HIGH_SPEED
680 | USB_PORT_STAT_TEST 666 | USB_PORT_STAT_TEST
681 ); 667 );
682 668
683 if (otg_stat & TUSB_DEV_OTG_STAT_SESS_END) { 669 if (otg_stat & TUSB_DEV_OTG_STAT_SESS_END) {
684 dev_dbg(musb->controller, "Forcing disconnect (no interrupt)\n"); 670 dev_dbg(musb->controller, "Forcing disconnect (no interrupt)\n");
685 if (musb->xceiv->state != OTG_STATE_B_IDLE) { 671 if (musb->xceiv->state != OTG_STATE_B_IDLE) {
686 /* INTR_DISCONNECT can hide... */ 672 /* INTR_DISCONNECT can hide... */
687 musb->xceiv->state = OTG_STATE_B_IDLE; 673 musb->xceiv->state = OTG_STATE_B_IDLE;
688 musb->int_usb |= MUSB_INTR_DISCONNECT; 674 musb->int_usb |= MUSB_INTR_DISCONNECT;
689 } 675 }
690 musb->is_active = 0; 676 musb->is_active = 0;
691 } 677 }
692 dev_dbg(musb->controller, "vbus change, %s, otg %03x\n", 678 dev_dbg(musb->controller, "vbus change, %s, otg %03x\n",
693 otg_state_string(musb->xceiv->state), otg_stat); 679 otg_state_string(musb->xceiv->state), otg_stat);
694 idle_timeout = jiffies + (1 * HZ); 680 idle_timeout = jiffies + (1 * HZ);
695 schedule_work(&musb->irq_work); 681 schedule_work(&musb->irq_work);
696 682
697 } else /* A-dev state machine */ { 683 } else /* A-dev state machine */ {
698 dev_dbg(musb->controller, "vbus change, %s, otg %03x\n", 684 dev_dbg(musb->controller, "vbus change, %s, otg %03x\n",
699 otg_state_string(musb->xceiv->state), otg_stat); 685 otg_state_string(musb->xceiv->state), otg_stat);
700 686
701 switch (musb->xceiv->state) { 687 switch (musb->xceiv->state) {
702 case OTG_STATE_A_IDLE: 688 case OTG_STATE_A_IDLE:
703 dev_dbg(musb->controller, "Got SRP, turning on VBUS\n"); 689 dev_dbg(musb->controller, "Got SRP, turning on VBUS\n");
704 musb_platform_set_vbus(musb, 1); 690 musb_platform_set_vbus(musb, 1);
705 691
706 /* CONNECT can wake if a_wait_bcon is set */ 692 /* CONNECT can wake if a_wait_bcon is set */
707 if (musb->a_wait_bcon != 0) 693 if (musb->a_wait_bcon != 0)
708 musb->is_active = 0; 694 musb->is_active = 0;
709 else 695 else
710 musb->is_active = 1; 696 musb->is_active = 1;
711 697
712 /* 698 /*
713 * OPT FS A TD.4.6 needs few seconds for 699 * OPT FS A TD.4.6 needs few seconds for
714 * A_WAIT_VRISE 700 * A_WAIT_VRISE
715 */ 701 */
716 idle_timeout = jiffies + (2 * HZ); 702 idle_timeout = jiffies + (2 * HZ);
717 703
718 break; 704 break;
719 case OTG_STATE_A_WAIT_VRISE: 705 case OTG_STATE_A_WAIT_VRISE:
720 /* ignore; A-session-valid < VBUS_VALID/2, 706 /* ignore; A-session-valid < VBUS_VALID/2,
721 * we monitor this with the timer 707 * we monitor this with the timer
722 */ 708 */
723 break; 709 break;
724 case OTG_STATE_A_WAIT_VFALL: 710 case OTG_STATE_A_WAIT_VFALL:
725 /* REVISIT this irq triggers during short 711 /* REVISIT this irq triggers during short
726 * spikes caused by enumeration ... 712 * spikes caused by enumeration ...
727 */ 713 */
728 if (musb->vbuserr_retry) { 714 if (musb->vbuserr_retry) {
729 musb->vbuserr_retry--; 715 musb->vbuserr_retry--;
730 tusb_musb_set_vbus(musb, 1); 716 tusb_musb_set_vbus(musb, 1);
731 } else { 717 } else {
732 musb->vbuserr_retry 718 musb->vbuserr_retry
733 = VBUSERR_RETRY_COUNT; 719 = VBUSERR_RETRY_COUNT;
734 tusb_musb_set_vbus(musb, 0); 720 tusb_musb_set_vbus(musb, 0);
735 } 721 }
736 break; 722 break;
737 default: 723 default:
738 break; 724 break;
739 } 725 }
740 } 726 }
741 } 727 }
742 728
743 /* OTG timer expiration */ 729 /* OTG timer expiration */
744 if (int_src & TUSB_INT_SRC_OTG_TIMEOUT) { 730 if (int_src & TUSB_INT_SRC_OTG_TIMEOUT) {
745 u8 devctl; 731 u8 devctl;
746 732
747 dev_dbg(musb->controller, "%s timer, %03x\n", 733 dev_dbg(musb->controller, "%s timer, %03x\n",
748 otg_state_string(musb->xceiv->state), otg_stat); 734 otg_state_string(musb->xceiv->state), otg_stat);
749 735
750 switch (musb->xceiv->state) { 736 switch (musb->xceiv->state) {
751 case OTG_STATE_A_WAIT_VRISE: 737 case OTG_STATE_A_WAIT_VRISE:
752 /* VBUS has probably been valid for a while now, 738 /* VBUS has probably been valid for a while now,
753 * but may well have bounced out of range a bit 739 * but may well have bounced out of range a bit
754 */ 740 */
755 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); 741 devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
756 if (otg_stat & TUSB_DEV_OTG_STAT_VBUS_VALID) { 742 if (otg_stat & TUSB_DEV_OTG_STAT_VBUS_VALID) {
757 if ((devctl & MUSB_DEVCTL_VBUS) 743 if ((devctl & MUSB_DEVCTL_VBUS)
758 != MUSB_DEVCTL_VBUS) { 744 != MUSB_DEVCTL_VBUS) {
759 dev_dbg(musb->controller, "devctl %02x\n", devctl); 745 dev_dbg(musb->controller, "devctl %02x\n", devctl);
760 break; 746 break;
761 } 747 }
762 musb->xceiv->state = OTG_STATE_A_WAIT_BCON; 748 musb->xceiv->state = OTG_STATE_A_WAIT_BCON;
763 musb->is_active = 0; 749 musb->is_active = 0;
764 idle_timeout = jiffies 750 idle_timeout = jiffies
765 + msecs_to_jiffies(musb->a_wait_bcon); 751 + msecs_to_jiffies(musb->a_wait_bcon);
766 } else { 752 } else {
767 /* REVISIT report overcurrent to hub? */ 753 /* REVISIT report overcurrent to hub? */
768 ERR("vbus too slow, devctl %02x\n", devctl); 754 ERR("vbus too slow, devctl %02x\n", devctl);
769 tusb_musb_set_vbus(musb, 0); 755 tusb_musb_set_vbus(musb, 0);
770 } 756 }
771 break; 757 break;
772 case OTG_STATE_A_WAIT_BCON: 758 case OTG_STATE_A_WAIT_BCON:
773 if (musb->a_wait_bcon != 0) 759 if (musb->a_wait_bcon != 0)
774 idle_timeout = jiffies 760 idle_timeout = jiffies
775 + msecs_to_jiffies(musb->a_wait_bcon); 761 + msecs_to_jiffies(musb->a_wait_bcon);
776 break; 762 break;
777 case OTG_STATE_A_SUSPEND: 763 case OTG_STATE_A_SUSPEND:
778 break; 764 break;
779 case OTG_STATE_B_WAIT_ACON: 765 case OTG_STATE_B_WAIT_ACON:
780 break; 766 break;
781 default: 767 default:
782 break; 768 break;
783 } 769 }
784 } 770 }
785 schedule_work(&musb->irq_work); 771 schedule_work(&musb->irq_work);
786 772
787 return idle_timeout; 773 return idle_timeout;
788 } 774 }
789 775
790 static irqreturn_t tusb_musb_interrupt(int irq, void *__hci) 776 static irqreturn_t tusb_musb_interrupt(int irq, void *__hci)
791 { 777 {
792 struct musb *musb = __hci; 778 struct musb *musb = __hci;
793 void __iomem *tbase = musb->ctrl_base; 779 void __iomem *tbase = musb->ctrl_base;
794 unsigned long flags, idle_timeout = 0; 780 unsigned long flags, idle_timeout = 0;
795 u32 int_mask, int_src; 781 u32 int_mask, int_src;
796 782
797 spin_lock_irqsave(&musb->lock, flags); 783 spin_lock_irqsave(&musb->lock, flags);
798 784
799 /* Mask all interrupts to allow using both edge and level GPIO irq */ 785 /* Mask all interrupts to allow using both edge and level GPIO irq */
800 int_mask = musb_readl(tbase, TUSB_INT_MASK); 786 int_mask = musb_readl(tbase, TUSB_INT_MASK);
801 musb_writel(tbase, TUSB_INT_MASK, ~TUSB_INT_MASK_RESERVED_BITS); 787 musb_writel(tbase, TUSB_INT_MASK, ~TUSB_INT_MASK_RESERVED_BITS);
802 788
803 int_src = musb_readl(tbase, TUSB_INT_SRC) & ~TUSB_INT_SRC_RESERVED_BITS; 789 int_src = musb_readl(tbase, TUSB_INT_SRC) & ~TUSB_INT_SRC_RESERVED_BITS;
804 dev_dbg(musb->controller, "TUSB IRQ %08x\n", int_src); 790 dev_dbg(musb->controller, "TUSB IRQ %08x\n", int_src);
805 791
806 musb->int_usb = (u8) int_src; 792 musb->int_usb = (u8) int_src;
807 793
808 /* Acknowledge wake-up source interrupts */ 794 /* Acknowledge wake-up source interrupts */
809 if (int_src & TUSB_INT_SRC_DEV_WAKEUP) { 795 if (int_src & TUSB_INT_SRC_DEV_WAKEUP) {
810 u32 reg; 796 u32 reg;
811 u32 i; 797 u32 i;
812 798
813 if (tusb_get_revision(musb) == TUSB_REV_30) 799 if (tusb_get_revision(musb) == TUSB_REV_30)
814 tusb_wbus_quirk(musb, 0); 800 tusb_wbus_quirk(musb, 0);
815 801
816 /* there are issues re-locking the PLL on wakeup ... */ 802 /* there are issues re-locking the PLL on wakeup ... */
817 803
818 /* work around issue 8 */ 804 /* work around issue 8 */
819 for (i = 0xf7f7f7; i > 0xf7f7f7 - 1000; i--) { 805 for (i = 0xf7f7f7; i > 0xf7f7f7 - 1000; i--) {
820 musb_writel(tbase, TUSB_SCRATCH_PAD, 0); 806 musb_writel(tbase, TUSB_SCRATCH_PAD, 0);
821 musb_writel(tbase, TUSB_SCRATCH_PAD, i); 807 musb_writel(tbase, TUSB_SCRATCH_PAD, i);
822 reg = musb_readl(tbase, TUSB_SCRATCH_PAD); 808 reg = musb_readl(tbase, TUSB_SCRATCH_PAD);
823 if (reg == i) 809 if (reg == i)
824 break; 810 break;
825 dev_dbg(musb->controller, "TUSB NOR not ready\n"); 811 dev_dbg(musb->controller, "TUSB NOR not ready\n");
826 } 812 }
827 813
828 /* work around issue 13 (2nd half) */ 814 /* work around issue 13 (2nd half) */
829 tusb_set_clock_source(musb, 1); 815 tusb_set_clock_source(musb, 1);
830 816
831 reg = musb_readl(tbase, TUSB_PRCM_WAKEUP_SOURCE); 817 reg = musb_readl(tbase, TUSB_PRCM_WAKEUP_SOURCE);
832 musb_writel(tbase, TUSB_PRCM_WAKEUP_CLEAR, reg); 818 musb_writel(tbase, TUSB_PRCM_WAKEUP_CLEAR, reg);
833 if (reg & ~TUSB_PRCM_WNORCS) { 819 if (reg & ~TUSB_PRCM_WNORCS) {
834 musb->is_active = 1; 820 musb->is_active = 1;
835 schedule_work(&musb->irq_work); 821 schedule_work(&musb->irq_work);
836 } 822 }
837 dev_dbg(musb->controller, "wake %sactive %02x\n", 823 dev_dbg(musb->controller, "wake %sactive %02x\n",
838 musb->is_active ? "" : "in", reg); 824 musb->is_active ? "" : "in", reg);
839 825
840 /* REVISIT host side TUSB_PRCM_WHOSTDISCON, TUSB_PRCM_WBUS */ 826 /* REVISIT host side TUSB_PRCM_WHOSTDISCON, TUSB_PRCM_WBUS */
841 } 827 }
842 828
843 if (int_src & TUSB_INT_SRC_USB_IP_CONN) 829 if (int_src & TUSB_INT_SRC_USB_IP_CONN)
844 del_timer(&musb_idle_timer); 830 del_timer(&musb_idle_timer);
845 831
846 /* OTG state change reports (annoyingly) not issued by Mentor core */ 832 /* OTG state change reports (annoyingly) not issued by Mentor core */
847 if (int_src & (TUSB_INT_SRC_VBUS_SENSE_CHNG 833 if (int_src & (TUSB_INT_SRC_VBUS_SENSE_CHNG
848 | TUSB_INT_SRC_OTG_TIMEOUT 834 | TUSB_INT_SRC_OTG_TIMEOUT
849 | TUSB_INT_SRC_ID_STATUS_CHNG)) 835 | TUSB_INT_SRC_ID_STATUS_CHNG))
850 idle_timeout = tusb_otg_ints(musb, int_src, tbase); 836 idle_timeout = tusb_otg_ints(musb, int_src, tbase);
851 837
852 /* TX dma callback must be handled here, RX dma callback is 838 /* TX dma callback must be handled here, RX dma callback is
853 * handled in tusb_omap_dma_cb. 839 * handled in tusb_omap_dma_cb.
854 */ 840 */
855 if ((int_src & TUSB_INT_SRC_TXRX_DMA_DONE)) { 841 if ((int_src & TUSB_INT_SRC_TXRX_DMA_DONE)) {
856 u32 dma_src = musb_readl(tbase, TUSB_DMA_INT_SRC); 842 u32 dma_src = musb_readl(tbase, TUSB_DMA_INT_SRC);
857 u32 real_dma_src = musb_readl(tbase, TUSB_DMA_INT_MASK); 843 u32 real_dma_src = musb_readl(tbase, TUSB_DMA_INT_MASK);
858 844
859 dev_dbg(musb->controller, "DMA IRQ %08x\n", dma_src); 845 dev_dbg(musb->controller, "DMA IRQ %08x\n", dma_src);
860 real_dma_src = ~real_dma_src & dma_src; 846 real_dma_src = ~real_dma_src & dma_src;
861 if (tusb_dma_omap() && real_dma_src) { 847 if (tusb_dma_omap() && real_dma_src) {
862 int tx_source = (real_dma_src & 0xffff); 848 int tx_source = (real_dma_src & 0xffff);
863 int i; 849 int i;
864 850
865 for (i = 1; i <= 15; i++) { 851 for (i = 1; i <= 15; i++) {
866 if (tx_source & (1 << i)) { 852 if (tx_source & (1 << i)) {
867 dev_dbg(musb->controller, "completing ep%i %s\n", i, "tx"); 853 dev_dbg(musb->controller, "completing ep%i %s\n", i, "tx");
868 musb_dma_completion(musb, i, 1); 854 musb_dma_completion(musb, i, 1);
869 } 855 }
870 } 856 }
871 } 857 }
872 musb_writel(tbase, TUSB_DMA_INT_CLEAR, dma_src); 858 musb_writel(tbase, TUSB_DMA_INT_CLEAR, dma_src);
873 } 859 }
874 860
875 /* EP interrupts. In OCP mode tusb6010 mirrors the MUSB interrupts */ 861 /* EP interrupts. In OCP mode tusb6010 mirrors the MUSB interrupts */
876 if (int_src & (TUSB_INT_SRC_USB_IP_TX | TUSB_INT_SRC_USB_IP_RX)) { 862 if (int_src & (TUSB_INT_SRC_USB_IP_TX | TUSB_INT_SRC_USB_IP_RX)) {
877 u32 musb_src = musb_readl(tbase, TUSB_USBIP_INT_SRC); 863 u32 musb_src = musb_readl(tbase, TUSB_USBIP_INT_SRC);
878 864
879 musb_writel(tbase, TUSB_USBIP_INT_CLEAR, musb_src); 865 musb_writel(tbase, TUSB_USBIP_INT_CLEAR, musb_src);
880 musb->int_rx = (((musb_src >> 16) & 0xffff) << 1); 866 musb->int_rx = (((musb_src >> 16) & 0xffff) << 1);
881 musb->int_tx = (musb_src & 0xffff); 867 musb->int_tx = (musb_src & 0xffff);
882 } else { 868 } else {
883 musb->int_rx = 0; 869 musb->int_rx = 0;
884 musb->int_tx = 0; 870 musb->int_tx = 0;
885 } 871 }
886 872
887 if (int_src & (TUSB_INT_SRC_USB_IP_TX | TUSB_INT_SRC_USB_IP_RX | 0xff)) 873 if (int_src & (TUSB_INT_SRC_USB_IP_TX | TUSB_INT_SRC_USB_IP_RX | 0xff))
888 musb_interrupt(musb); 874 musb_interrupt(musb);
889 875
890 /* Acknowledge TUSB interrupts. Clear only non-reserved bits */ 876 /* Acknowledge TUSB interrupts. Clear only non-reserved bits */
891 musb_writel(tbase, TUSB_INT_SRC_CLEAR, 877 musb_writel(tbase, TUSB_INT_SRC_CLEAR,
892 int_src & ~TUSB_INT_MASK_RESERVED_BITS); 878 int_src & ~TUSB_INT_MASK_RESERVED_BITS);
893 879
894 tusb_musb_try_idle(musb, idle_timeout); 880 tusb_musb_try_idle(musb, idle_timeout);
895 881
896 musb_writel(tbase, TUSB_INT_MASK, int_mask); 882 musb_writel(tbase, TUSB_INT_MASK, int_mask);
897 spin_unlock_irqrestore(&musb->lock, flags); 883 spin_unlock_irqrestore(&musb->lock, flags);
898 884
899 return IRQ_HANDLED; 885 return IRQ_HANDLED;
900 } 886 }
901 887
902 static int dma_off; 888 static int dma_off;
903 889
904 /* 890 /*
905 * Enables TUSB6010. Caller must take care of locking. 891 * Enables TUSB6010. Caller must take care of locking.
906 * REVISIT: 892 * REVISIT:
907 * - Check what is unnecessary in MGC_HdrcStart() 893 * - Check what is unnecessary in MGC_HdrcStart()
908 */ 894 */
909 static void tusb_musb_enable(struct musb *musb) 895 static void tusb_musb_enable(struct musb *musb)
910 { 896 {
911 void __iomem *tbase = musb->ctrl_base; 897 void __iomem *tbase = musb->ctrl_base;
912 898
913 /* Setup TUSB6010 main interrupt mask. Enable all interrupts except SOF. 899 /* Setup TUSB6010 main interrupt mask. Enable all interrupts except SOF.
914 * REVISIT: Enable and deal with TUSB_INT_SRC_USB_IP_SOF */ 900 * REVISIT: Enable and deal with TUSB_INT_SRC_USB_IP_SOF */
915 musb_writel(tbase, TUSB_INT_MASK, TUSB_INT_SRC_USB_IP_SOF); 901 musb_writel(tbase, TUSB_INT_MASK, TUSB_INT_SRC_USB_IP_SOF);
916 902
917 /* Setup TUSB interrupt, disable DMA and GPIO interrupts */ 903 /* Setup TUSB interrupt, disable DMA and GPIO interrupts */
918 musb_writel(tbase, TUSB_USBIP_INT_MASK, 0); 904 musb_writel(tbase, TUSB_USBIP_INT_MASK, 0);
919 musb_writel(tbase, TUSB_DMA_INT_MASK, 0x7fffffff); 905 musb_writel(tbase, TUSB_DMA_INT_MASK, 0x7fffffff);
920 musb_writel(tbase, TUSB_GPIO_INT_MASK, 0x1ff); 906 musb_writel(tbase, TUSB_GPIO_INT_MASK, 0x1ff);
921 907
922 /* Clear all subsystem interrups */ 908 /* Clear all subsystem interrups */
923 musb_writel(tbase, TUSB_USBIP_INT_CLEAR, 0x7fffffff); 909 musb_writel(tbase, TUSB_USBIP_INT_CLEAR, 0x7fffffff);
924 musb_writel(tbase, TUSB_DMA_INT_CLEAR, 0x7fffffff); 910 musb_writel(tbase, TUSB_DMA_INT_CLEAR, 0x7fffffff);
925 musb_writel(tbase, TUSB_GPIO_INT_CLEAR, 0x1ff); 911 musb_writel(tbase, TUSB_GPIO_INT_CLEAR, 0x1ff);
926 912
927 /* Acknowledge pending interrupt(s) */ 913 /* Acknowledge pending interrupt(s) */
928 musb_writel(tbase, TUSB_INT_SRC_CLEAR, ~TUSB_INT_MASK_RESERVED_BITS); 914 musb_writel(tbase, TUSB_INT_SRC_CLEAR, ~TUSB_INT_MASK_RESERVED_BITS);
929 915
930 /* Only 0 clock cycles for minimum interrupt de-assertion time and 916 /* Only 0 clock cycles for minimum interrupt de-assertion time and
931 * interrupt polarity active low seems to work reliably here */ 917 * interrupt polarity active low seems to work reliably here */
932 musb_writel(tbase, TUSB_INT_CTRL_CONF, 918 musb_writel(tbase, TUSB_INT_CTRL_CONF,
933 TUSB_INT_CTRL_CONF_INT_RELCYC(0)); 919 TUSB_INT_CTRL_CONF_INT_RELCYC(0));
934 920
935 irq_set_irq_type(musb->nIrq, IRQ_TYPE_LEVEL_LOW); 921 irq_set_irq_type(musb->nIrq, IRQ_TYPE_LEVEL_LOW);
936 922
937 /* maybe force into the Default-A OTG state machine */ 923 /* maybe force into the Default-A OTG state machine */
938 if (!(musb_readl(tbase, TUSB_DEV_OTG_STAT) 924 if (!(musb_readl(tbase, TUSB_DEV_OTG_STAT)
939 & TUSB_DEV_OTG_STAT_ID_STATUS)) 925 & TUSB_DEV_OTG_STAT_ID_STATUS))
940 musb_writel(tbase, TUSB_INT_SRC_SET, 926 musb_writel(tbase, TUSB_INT_SRC_SET,
941 TUSB_INT_SRC_ID_STATUS_CHNG); 927 TUSB_INT_SRC_ID_STATUS_CHNG);
942 928
943 if (is_dma_capable() && dma_off) 929 if (is_dma_capable() && dma_off)
944 printk(KERN_WARNING "%s %s: dma not reactivated\n", 930 printk(KERN_WARNING "%s %s: dma not reactivated\n",
945 __FILE__, __func__); 931 __FILE__, __func__);
946 else 932 else
947 dma_off = 1; 933 dma_off = 1;
948 } 934 }
949 935
950 /* 936 /*
951 * Disables TUSB6010. Caller must take care of locking. 937 * Disables TUSB6010. Caller must take care of locking.
952 */ 938 */
953 static void tusb_musb_disable(struct musb *musb) 939 static void tusb_musb_disable(struct musb *musb)
954 { 940 {
955 void __iomem *tbase = musb->ctrl_base; 941 void __iomem *tbase = musb->ctrl_base;
956 942
957 /* FIXME stop DMA, IRQs, timers, ... */ 943 /* FIXME stop DMA, IRQs, timers, ... */
958 944
959 /* disable all IRQs */ 945 /* disable all IRQs */
960 musb_writel(tbase, TUSB_INT_MASK, ~TUSB_INT_MASK_RESERVED_BITS); 946 musb_writel(tbase, TUSB_INT_MASK, ~TUSB_INT_MASK_RESERVED_BITS);
961 musb_writel(tbase, TUSB_USBIP_INT_MASK, 0x7fffffff); 947 musb_writel(tbase, TUSB_USBIP_INT_MASK, 0x7fffffff);
962 musb_writel(tbase, TUSB_DMA_INT_MASK, 0x7fffffff); 948 musb_writel(tbase, TUSB_DMA_INT_MASK, 0x7fffffff);
963 musb_writel(tbase, TUSB_GPIO_INT_MASK, 0x1ff); 949 musb_writel(tbase, TUSB_GPIO_INT_MASK, 0x1ff);
964 950
965 del_timer(&musb_idle_timer); 951 del_timer(&musb_idle_timer);
966 952
967 if (is_dma_capable() && !dma_off) { 953 if (is_dma_capable() && !dma_off) {
968 printk(KERN_WARNING "%s %s: dma still active\n", 954 printk(KERN_WARNING "%s %s: dma still active\n",
969 __FILE__, __func__); 955 __FILE__, __func__);
970 dma_off = 1; 956 dma_off = 1;
971 } 957 }
972 } 958 }
973 959
974 /* 960 /*
975 * Sets up TUSB6010 CPU interface specific signals and registers 961 * Sets up TUSB6010 CPU interface specific signals and registers
976 * Note: Settings optimized for OMAP24xx 962 * Note: Settings optimized for OMAP24xx
977 */ 963 */
978 static void tusb_setup_cpu_interface(struct musb *musb) 964 static void tusb_setup_cpu_interface(struct musb *musb)
979 { 965 {
980 void __iomem *tbase = musb->ctrl_base; 966 void __iomem *tbase = musb->ctrl_base;
981 967
982 /* 968 /*
983 * Disable GPIO[5:0] pullups (used as output DMA requests) 969 * Disable GPIO[5:0] pullups (used as output DMA requests)
984 * Don't disable GPIO[7:6] as they are needed for wake-up. 970 * Don't disable GPIO[7:6] as they are needed for wake-up.
985 */ 971 */
986 musb_writel(tbase, TUSB_PULLUP_1_CTRL, 0x0000003F); 972 musb_writel(tbase, TUSB_PULLUP_1_CTRL, 0x0000003F);
987 973
988 /* Disable all pullups on NOR IF, DMAREQ0 and DMAREQ1 */ 974 /* Disable all pullups on NOR IF, DMAREQ0 and DMAREQ1 */
989 musb_writel(tbase, TUSB_PULLUP_2_CTRL, 0x01FFFFFF); 975 musb_writel(tbase, TUSB_PULLUP_2_CTRL, 0x01FFFFFF);
990 976
991 /* Turn GPIO[5:0] to DMAREQ[5:0] signals */ 977 /* Turn GPIO[5:0] to DMAREQ[5:0] signals */
992 musb_writel(tbase, TUSB_GPIO_CONF, TUSB_GPIO_CONF_DMAREQ(0x3f)); 978 musb_writel(tbase, TUSB_GPIO_CONF, TUSB_GPIO_CONF_DMAREQ(0x3f));
993 979
994 /* Burst size 16x16 bits, all six DMA requests enabled, DMA request 980 /* Burst size 16x16 bits, all six DMA requests enabled, DMA request
995 * de-assertion time 2 system clocks p 62 */ 981 * de-assertion time 2 system clocks p 62 */
996 musb_writel(tbase, TUSB_DMA_REQ_CONF, 982 musb_writel(tbase, TUSB_DMA_REQ_CONF,
997 TUSB_DMA_REQ_CONF_BURST_SIZE(2) | 983 TUSB_DMA_REQ_CONF_BURST_SIZE(2) |
998 TUSB_DMA_REQ_CONF_DMA_REQ_EN(0x3f) | 984 TUSB_DMA_REQ_CONF_DMA_REQ_EN(0x3f) |
999 TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(2)); 985 TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(2));
1000 986
1001 /* Set 0 wait count for synchronous burst access */ 987 /* Set 0 wait count for synchronous burst access */
1002 musb_writel(tbase, TUSB_WAIT_COUNT, 1); 988 musb_writel(tbase, TUSB_WAIT_COUNT, 1);
1003 } 989 }
1004 990
1005 static int tusb_musb_start(struct musb *musb) 991 static int tusb_musb_start(struct musb *musb)
1006 { 992 {
1007 void __iomem *tbase = musb->ctrl_base; 993 void __iomem *tbase = musb->ctrl_base;
1008 int ret = 0; 994 int ret = 0;
1009 unsigned long flags; 995 unsigned long flags;
1010 u32 reg; 996 u32 reg;
1011 997
1012 if (musb->board_set_power) 998 if (musb->board_set_power)
1013 ret = musb->board_set_power(1); 999 ret = musb->board_set_power(1);
1014 if (ret != 0) { 1000 if (ret != 0) {
1015 printk(KERN_ERR "tusb: Cannot enable TUSB6010\n"); 1001 printk(KERN_ERR "tusb: Cannot enable TUSB6010\n");
1016 return ret; 1002 return ret;
1017 } 1003 }
1018 1004
1019 spin_lock_irqsave(&musb->lock, flags); 1005 spin_lock_irqsave(&musb->lock, flags);
1020 1006
1021 if (musb_readl(tbase, TUSB_PROD_TEST_RESET) != 1007 if (musb_readl(tbase, TUSB_PROD_TEST_RESET) !=
1022 TUSB_PROD_TEST_RESET_VAL) { 1008 TUSB_PROD_TEST_RESET_VAL) {
1023 printk(KERN_ERR "tusb: Unable to detect TUSB6010\n"); 1009 printk(KERN_ERR "tusb: Unable to detect TUSB6010\n");
1024 goto err; 1010 goto err;
1025 } 1011 }
1026 1012
1027 ret = tusb_print_revision(musb); 1013 ret = tusb_print_revision(musb);
1028 if (ret < 2) { 1014 if (ret < 2) {
1029 printk(KERN_ERR "tusb: Unsupported TUSB6010 revision %i\n", 1015 printk(KERN_ERR "tusb: Unsupported TUSB6010 revision %i\n",
1030 ret); 1016 ret);
1031 goto err; 1017 goto err;
1032 } 1018 }
1033 1019
1034 /* The uint bit for "USB non-PDR interrupt enable" has to be 1 when 1020 /* The uint bit for "USB non-PDR interrupt enable" has to be 1 when
1035 * NOR FLASH interface is used */ 1021 * NOR FLASH interface is used */
1036 musb_writel(tbase, TUSB_VLYNQ_CTRL, 8); 1022 musb_writel(tbase, TUSB_VLYNQ_CTRL, 8);
1037 1023
1038 /* Select PHY free running 60MHz as a system clock */ 1024 /* Select PHY free running 60MHz as a system clock */
1039 tusb_set_clock_source(musb, 1); 1025 tusb_set_clock_source(musb, 1);
1040 1026
1041 /* VBus valid timer 1us, disable DFT/Debug and VLYNQ clocks for 1027 /* VBus valid timer 1us, disable DFT/Debug and VLYNQ clocks for
1042 * power saving, enable VBus detect and session end comparators, 1028 * power saving, enable VBus detect and session end comparators,
1043 * enable IDpullup, enable VBus charging */ 1029 * enable IDpullup, enable VBus charging */
1044 musb_writel(tbase, TUSB_PRCM_MNGMT, 1030 musb_writel(tbase, TUSB_PRCM_MNGMT,
1045 TUSB_PRCM_MNGMT_VBUS_VALID_TIMER(0xa) | 1031 TUSB_PRCM_MNGMT_VBUS_VALID_TIMER(0xa) |
1046 TUSB_PRCM_MNGMT_VBUS_VALID_FLT_EN | 1032 TUSB_PRCM_MNGMT_VBUS_VALID_FLT_EN |
1047 TUSB_PRCM_MNGMT_OTG_SESS_END_EN | 1033 TUSB_PRCM_MNGMT_OTG_SESS_END_EN |
1048 TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN | 1034 TUSB_PRCM_MNGMT_OTG_VBUS_DET_EN |
1049 TUSB_PRCM_MNGMT_OTG_ID_PULLUP); 1035 TUSB_PRCM_MNGMT_OTG_ID_PULLUP);
1050 tusb_setup_cpu_interface(musb); 1036 tusb_setup_cpu_interface(musb);
1051 1037
1052 /* simplify: always sense/pullup ID pins, as if in OTG mode */ 1038 /* simplify: always sense/pullup ID pins, as if in OTG mode */
1053 reg = musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE); 1039 reg = musb_readl(tbase, TUSB_PHY_OTG_CTRL_ENABLE);
1054 reg |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP; 1040 reg |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
1055 musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, reg); 1041 musb_writel(tbase, TUSB_PHY_OTG_CTRL_ENABLE, reg);
1056 1042
1057 reg = musb_readl(tbase, TUSB_PHY_OTG_CTRL); 1043 reg = musb_readl(tbase, TUSB_PHY_OTG_CTRL);
1058 reg |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP; 1044 reg |= TUSB_PHY_OTG_CTRL_WRPROTECT | TUSB_PHY_OTG_CTRL_OTG_ID_PULLUP;
1059 musb_writel(tbase, TUSB_PHY_OTG_CTRL, reg); 1045 musb_writel(tbase, TUSB_PHY_OTG_CTRL, reg);
1060 1046
1061 spin_unlock_irqrestore(&musb->lock, flags); 1047 spin_unlock_irqrestore(&musb->lock, flags);
1062 1048
1063 return 0; 1049 return 0;
1064 1050
1065 err: 1051 err:
1066 spin_unlock_irqrestore(&musb->lock, flags); 1052 spin_unlock_irqrestore(&musb->lock, flags);
1067 1053
1068 if (musb->board_set_power) 1054 if (musb->board_set_power)
1069 musb->board_set_power(0); 1055 musb->board_set_power(0);
1070 1056
1071 return -ENODEV; 1057 return -ENODEV;
1072 } 1058 }
1073 1059
1074 static int tusb_musb_init(struct musb *musb) 1060 static int tusb_musb_init(struct musb *musb)
1075 { 1061 {
1076 struct platform_device *pdev; 1062 struct platform_device *pdev;
1077 struct resource *mem; 1063 struct resource *mem;
1078 void __iomem *sync = NULL; 1064 void __iomem *sync = NULL;
1079 int ret; 1065 int ret;
1080 1066
1081 usb_nop_xceiv_register(); 1067 usb_nop_xceiv_register();
1082 musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2); 1068 musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
1083 if (IS_ERR_OR_NULL(musb->xceiv)) 1069 if (IS_ERR_OR_NULL(musb->xceiv))
1084 return -ENODEV; 1070 return -ENODEV;
1085 1071
1086 pdev = to_platform_device(musb->controller); 1072 pdev = to_platform_device(musb->controller);
1087 1073
1088 /* dma address for async dma */ 1074 /* dma address for async dma */
1089 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1075 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1090 musb->async = mem->start; 1076 musb->async = mem->start;
1091 1077
1092 /* dma address for sync dma */ 1078 /* dma address for sync dma */
1093 mem = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1079 mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1094 if (!mem) { 1080 if (!mem) {
1095 pr_debug("no sync dma resource?\n"); 1081 pr_debug("no sync dma resource?\n");
1096 ret = -ENODEV; 1082 ret = -ENODEV;
1097 goto done; 1083 goto done;
1098 } 1084 }
1099 musb->sync = mem->start; 1085 musb->sync = mem->start;
1100 1086
1101 sync = ioremap(mem->start, resource_size(mem)); 1087 sync = ioremap(mem->start, resource_size(mem));
1102 if (!sync) { 1088 if (!sync) {
1103 pr_debug("ioremap for sync failed\n"); 1089 pr_debug("ioremap for sync failed\n");
1104 ret = -ENOMEM; 1090 ret = -ENOMEM;
1105 goto done; 1091 goto done;
1106 } 1092 }
1107 musb->sync_va = sync; 1093 musb->sync_va = sync;
1108 1094
1109 /* Offsets from base: VLYNQ at 0x000, MUSB regs at 0x400, 1095 /* Offsets from base: VLYNQ at 0x000, MUSB regs at 0x400,
1110 * FIFOs at 0x600, TUSB at 0x800 1096 * FIFOs at 0x600, TUSB at 0x800
1111 */ 1097 */
1112 musb->mregs += TUSB_BASE_OFFSET; 1098 musb->mregs += TUSB_BASE_OFFSET;
1113 1099
1114 ret = tusb_musb_start(musb); 1100 ret = tusb_musb_start(musb);
1115 if (ret) { 1101 if (ret) {
1116 printk(KERN_ERR "Could not start tusb6010 (%d)\n", 1102 printk(KERN_ERR "Could not start tusb6010 (%d)\n",
1117 ret); 1103 ret);
1118 goto done; 1104 goto done;
1119 } 1105 }
1120 musb->isr = tusb_musb_interrupt; 1106 musb->isr = tusb_musb_interrupt;
1121 1107
1122 if (is_peripheral_enabled(musb)) { 1108 musb->xceiv->set_power = tusb_draw_power;
1123 musb->xceiv->set_power = tusb_draw_power; 1109 the_musb = musb;
1124 the_musb = musb;
1125 }
1126 1110
1127 setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb); 1111 setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb);
1128 1112
1129 done: 1113 done:
1130 if (ret < 0) { 1114 if (ret < 0) {
1131 if (sync) 1115 if (sync)
1132 iounmap(sync); 1116 iounmap(sync);
1133 1117
1134 usb_put_phy(musb->xceiv); 1118 usb_put_phy(musb->xceiv);
1135 usb_nop_xceiv_unregister(); 1119 usb_nop_xceiv_unregister();
1136 } 1120 }
1137 return ret; 1121 return ret;
1138 } 1122 }
1139 1123
1140 static int tusb_musb_exit(struct musb *musb) 1124 static int tusb_musb_exit(struct musb *musb)
1141 { 1125 {
1142 del_timer_sync(&musb_idle_timer); 1126 del_timer_sync(&musb_idle_timer);
1143 the_musb = NULL; 1127 the_musb = NULL;
1144 1128
1145 if (musb->board_set_power) 1129 if (musb->board_set_power)
1146 musb->board_set_power(0); 1130 musb->board_set_power(0);
1147 1131
1148 iounmap(musb->sync_va); 1132 iounmap(musb->sync_va);
1149 1133
1150 usb_put_phy(musb->xceiv); 1134 usb_put_phy(musb->xceiv);
1151 usb_nop_xceiv_unregister(); 1135 usb_nop_xceiv_unregister();
1152 return 0; 1136 return 0;
1153 } 1137 }
1154 1138
1155 static const struct musb_platform_ops tusb_ops = { 1139 static const struct musb_platform_ops tusb_ops = {
1156 .init = tusb_musb_init, 1140 .init = tusb_musb_init,
1157 .exit = tusb_musb_exit, 1141 .exit = tusb_musb_exit,
1158 1142
1159 .enable = tusb_musb_enable, 1143 .enable = tusb_musb_enable,
1160 .disable = tusb_musb_disable, 1144 .disable = tusb_musb_disable,
1161 1145
1162 .set_mode = tusb_musb_set_mode, 1146 .set_mode = tusb_musb_set_mode,
1163 .try_idle = tusb_musb_try_idle, 1147 .try_idle = tusb_musb_try_idle,
1164 1148
1165 .vbus_status = tusb_musb_vbus_status, 1149 .vbus_status = tusb_musb_vbus_status,
1166 .set_vbus = tusb_musb_set_vbus, 1150 .set_vbus = tusb_musb_set_vbus,
1167 }; 1151 };
1168 1152
1169 static u64 tusb_dmamask = DMA_BIT_MASK(32); 1153 static u64 tusb_dmamask = DMA_BIT_MASK(32);
1170 1154
1171 static int __devinit tusb_probe(struct platform_device *pdev) 1155 static int __devinit tusb_probe(struct platform_device *pdev)
1172 { 1156 {
1173 struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data; 1157 struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data;
1174 struct platform_device *musb; 1158 struct platform_device *musb;
1175 struct tusb6010_glue *glue; 1159 struct tusb6010_glue *glue;
1176 1160
1177 int ret = -ENOMEM; 1161 int ret = -ENOMEM;
1178 1162
1179 glue = kzalloc(sizeof(*glue), GFP_KERNEL); 1163 glue = kzalloc(sizeof(*glue), GFP_KERNEL);
1180 if (!glue) { 1164 if (!glue) {
1181 dev_err(&pdev->dev, "failed to allocate glue context\n"); 1165 dev_err(&pdev->dev, "failed to allocate glue context\n");
1182 goto err0; 1166 goto err0;
1183 } 1167 }
1184 1168
1185 musb = platform_device_alloc("musb-hdrc", -1); 1169 musb = platform_device_alloc("musb-hdrc", -1);
1186 if (!musb) { 1170 if (!musb) {
1187 dev_err(&pdev->dev, "failed to allocate musb device\n"); 1171 dev_err(&pdev->dev, "failed to allocate musb device\n");
1188 goto err1; 1172 goto err1;
1189 } 1173 }
1190 1174
1191 musb->dev.parent = &pdev->dev; 1175 musb->dev.parent = &pdev->dev;
1192 musb->dev.dma_mask = &tusb_dmamask; 1176 musb->dev.dma_mask = &tusb_dmamask;
1193 musb->dev.coherent_dma_mask = tusb_dmamask; 1177 musb->dev.coherent_dma_mask = tusb_dmamask;
1194 1178
1195 glue->dev = &pdev->dev; 1179 glue->dev = &pdev->dev;
1196 glue->musb = musb; 1180 glue->musb = musb;
1197 1181
1198 pdata->platform_ops = &tusb_ops; 1182 pdata->platform_ops = &tusb_ops;
1199 1183
1200 platform_set_drvdata(pdev, glue); 1184 platform_set_drvdata(pdev, glue);
1201 1185
1202 ret = platform_device_add_resources(musb, pdev->resource, 1186 ret = platform_device_add_resources(musb, pdev->resource,
1203 pdev->num_resources); 1187 pdev->num_resources);
1204 if (ret) { 1188 if (ret) {
1205 dev_err(&pdev->dev, "failed to add resources\n"); 1189 dev_err(&pdev->dev, "failed to add resources\n");
1206 goto err2; 1190 goto err2;
1207 } 1191 }
1208 1192
1209 ret = platform_device_add_data(musb, pdata, sizeof(*pdata)); 1193 ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
1210 if (ret) { 1194 if (ret) {
1211 dev_err(&pdev->dev, "failed to add platform_data\n"); 1195 dev_err(&pdev->dev, "failed to add platform_data\n");
1212 goto err2; 1196 goto err2;
1213 } 1197 }
1214 1198
1215 ret = platform_device_add(musb); 1199 ret = platform_device_add(musb);
1216 if (ret) { 1200 if (ret) {
1217 dev_err(&pdev->dev, "failed to register musb device\n"); 1201 dev_err(&pdev->dev, "failed to register musb device\n");
1218 goto err1; 1202 goto err1;
1219 } 1203 }
1220 1204
1221 return 0; 1205 return 0;
1222 1206
1223 err2: 1207 err2:
1224 platform_device_put(musb); 1208 platform_device_put(musb);
1225 1209
1226 err1: 1210 err1:
1227 kfree(glue); 1211 kfree(glue);
1228 1212
1229 err0: 1213 err0:
1230 return ret; 1214 return ret;
1231 } 1215 }
1232 1216
1233 static int __devexit tusb_remove(struct platform_device *pdev) 1217 static int __devexit tusb_remove(struct platform_device *pdev)
1234 { 1218 {
1235 struct tusb6010_glue *glue = platform_get_drvdata(pdev); 1219 struct tusb6010_glue *glue = platform_get_drvdata(pdev);
1236 1220
1237 platform_device_del(glue->musb); 1221 platform_device_del(glue->musb);
1238 platform_device_put(glue->musb); 1222 platform_device_put(glue->musb);
1239 kfree(glue); 1223 kfree(glue);
1240 1224
1241 return 0; 1225 return 0;
1242 } 1226 }
1243 1227
1244 static struct platform_driver tusb_driver = { 1228 static struct platform_driver tusb_driver = {
1245 .probe = tusb_probe, 1229 .probe = tusb_probe,
1246 .remove = __devexit_p(tusb_remove), 1230 .remove = __devexit_p(tusb_remove),
1247 .driver = { 1231 .driver = {
1248 .name = "musb-tusb", 1232 .name = "musb-tusb",
1249 }, 1233 },
1250 }; 1234 };
1251 1235
1252 MODULE_DESCRIPTION("TUSB6010 MUSB Glue Layer"); 1236 MODULE_DESCRIPTION("TUSB6010 MUSB Glue Layer");
1253 MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>"); 1237 MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
1254 MODULE_LICENSE("GPL v2"); 1238 MODULE_LICENSE("GPL v2");
1255 1239
1256 static int __init tusb_init(void) 1240 static int __init tusb_init(void)
1257 { 1241 {
1258 return platform_driver_register(&tusb_driver); 1242 return platform_driver_register(&tusb_driver);
1259 } 1243 }
1260 module_init(tusb_init); 1244 module_init(tusb_init);
1261 1245
1262 static void __exit tusb_exit(void) 1246 static void __exit tusb_exit(void)
1263 { 1247 {
1264 platform_driver_unregister(&tusb_driver); 1248 platform_driver_unregister(&tusb_driver);
1265 } 1249 }
1266 module_exit(tusb_exit); 1250 module_exit(tusb_exit);
1267 1251