Commit dcb89b5aa0a90f791a594e0177cb144fdccec784
Committed by
Marek Vasut
1 parent
369d3c439a
Exists in
v2017.01-smarct4x
and in
40 other branches
usb: ci_udc: use var name ep/ci_ep consistently
Almost all of ci_udc.c uses variable name "ep" for a struct usb_ep and "ci_ep" for a struct ci_ep. This is nice and consistent, and helps people know what type a variable is without searching for the declaration. handle_ep_complete() doesn't do this, so fix it to be consistent. Signed-off-by: Stephen Warren <swarren@nvidia.com>
Showing 1 changed file with 9 additions and 9 deletions Inline Diff
drivers/usb/gadget/ci_udc.c
1 | /* | 1 | /* |
2 | * Copyright 2011, Marvell Semiconductor Inc. | 2 | * Copyright 2011, Marvell Semiconductor Inc. |
3 | * Lei Wen <leiwen@marvell.com> | 3 | * Lei Wen <leiwen@marvell.com> |
4 | * | 4 | * |
5 | * SPDX-License-Identifier: GPL-2.0+ | 5 | * SPDX-License-Identifier: GPL-2.0+ |
6 | * | 6 | * |
7 | * Back ported to the 8xx platform (from the 8260 platform) by | 7 | * Back ported to the 8xx platform (from the 8260 platform) by |
8 | * Murray.Jensen@cmst.csiro.au, 27-Jan-01. | 8 | * Murray.Jensen@cmst.csiro.au, 27-Jan-01. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <common.h> | 11 | #include <common.h> |
12 | #include <command.h> | 12 | #include <command.h> |
13 | #include <config.h> | 13 | #include <config.h> |
14 | #include <net.h> | 14 | #include <net.h> |
15 | #include <malloc.h> | 15 | #include <malloc.h> |
16 | #include <asm/byteorder.h> | 16 | #include <asm/byteorder.h> |
17 | #include <asm/errno.h> | 17 | #include <asm/errno.h> |
18 | #include <asm/io.h> | 18 | #include <asm/io.h> |
19 | #include <asm/unaligned.h> | 19 | #include <asm/unaligned.h> |
20 | #include <linux/types.h> | 20 | #include <linux/types.h> |
21 | #include <linux/usb/ch9.h> | 21 | #include <linux/usb/ch9.h> |
22 | #include <linux/usb/gadget.h> | 22 | #include <linux/usb/gadget.h> |
23 | #include <usb/ci_udc.h> | 23 | #include <usb/ci_udc.h> |
24 | #include "../host/ehci.h" | 24 | #include "../host/ehci.h" |
25 | #include "ci_udc.h" | 25 | #include "ci_udc.h" |
26 | 26 | ||
27 | /* | 27 | /* |
28 | * Check if the system has too long cachelines. If the cachelines are | 28 | * Check if the system has too long cachelines. If the cachelines are |
29 | * longer then 128b, the driver will not be able flush/invalidate data | 29 | * longer then 128b, the driver will not be able flush/invalidate data |
30 | * cache over separate QH entries. We use 128b because one QH entry is | 30 | * cache over separate QH entries. We use 128b because one QH entry is |
31 | * 64b long and there are always two QH list entries for each endpoint. | 31 | * 64b long and there are always two QH list entries for each endpoint. |
32 | */ | 32 | */ |
33 | #if ARCH_DMA_MINALIGN > 128 | 33 | #if ARCH_DMA_MINALIGN > 128 |
34 | #error This driver can not work on systems with caches longer than 128b | 34 | #error This driver can not work on systems with caches longer than 128b |
35 | #endif | 35 | #endif |
36 | 36 | ||
37 | /* | 37 | /* |
38 | * Every QTD must be individually aligned, since we can program any | 38 | * Every QTD must be individually aligned, since we can program any |
39 | * QTD's address into HW. Cache flushing requires ARCH_DMA_MINALIGN, | 39 | * QTD's address into HW. Cache flushing requires ARCH_DMA_MINALIGN, |
40 | * and the USB HW requires 32-byte alignment. Align to both: | 40 | * and the USB HW requires 32-byte alignment. Align to both: |
41 | */ | 41 | */ |
42 | #define ILIST_ALIGN roundup(ARCH_DMA_MINALIGN, 32) | 42 | #define ILIST_ALIGN roundup(ARCH_DMA_MINALIGN, 32) |
43 | /* Each QTD is this size */ | 43 | /* Each QTD is this size */ |
44 | #define ILIST_ENT_RAW_SZ sizeof(struct ept_queue_item) | 44 | #define ILIST_ENT_RAW_SZ sizeof(struct ept_queue_item) |
45 | /* | 45 | /* |
46 | * Align the size of the QTD too, so we can add this value to each | 46 | * Align the size of the QTD too, so we can add this value to each |
47 | * QTD's address to get another aligned address. | 47 | * QTD's address to get another aligned address. |
48 | */ | 48 | */ |
49 | #define ILIST_ENT_SZ roundup(ILIST_ENT_RAW_SZ, ILIST_ALIGN) | 49 | #define ILIST_ENT_SZ roundup(ILIST_ENT_RAW_SZ, ILIST_ALIGN) |
50 | /* For each endpoint, we need 2 QTDs, one for each of IN and OUT */ | 50 | /* For each endpoint, we need 2 QTDs, one for each of IN and OUT */ |
51 | #define ILIST_SZ (NUM_ENDPOINTS * 2 * ILIST_ENT_SZ) | 51 | #define ILIST_SZ (NUM_ENDPOINTS * 2 * ILIST_ENT_SZ) |
52 | 52 | ||
53 | #ifndef DEBUG | 53 | #ifndef DEBUG |
54 | #define DBG(x...) do {} while (0) | 54 | #define DBG(x...) do {} while (0) |
55 | #else | 55 | #else |
56 | #define DBG(x...) printf(x) | 56 | #define DBG(x...) printf(x) |
57 | static const char *reqname(unsigned r) | 57 | static const char *reqname(unsigned r) |
58 | { | 58 | { |
59 | switch (r) { | 59 | switch (r) { |
60 | case USB_REQ_GET_STATUS: return "GET_STATUS"; | 60 | case USB_REQ_GET_STATUS: return "GET_STATUS"; |
61 | case USB_REQ_CLEAR_FEATURE: return "CLEAR_FEATURE"; | 61 | case USB_REQ_CLEAR_FEATURE: return "CLEAR_FEATURE"; |
62 | case USB_REQ_SET_FEATURE: return "SET_FEATURE"; | 62 | case USB_REQ_SET_FEATURE: return "SET_FEATURE"; |
63 | case USB_REQ_SET_ADDRESS: return "SET_ADDRESS"; | 63 | case USB_REQ_SET_ADDRESS: return "SET_ADDRESS"; |
64 | case USB_REQ_GET_DESCRIPTOR: return "GET_DESCRIPTOR"; | 64 | case USB_REQ_GET_DESCRIPTOR: return "GET_DESCRIPTOR"; |
65 | case USB_REQ_SET_DESCRIPTOR: return "SET_DESCRIPTOR"; | 65 | case USB_REQ_SET_DESCRIPTOR: return "SET_DESCRIPTOR"; |
66 | case USB_REQ_GET_CONFIGURATION: return "GET_CONFIGURATION"; | 66 | case USB_REQ_GET_CONFIGURATION: return "GET_CONFIGURATION"; |
67 | case USB_REQ_SET_CONFIGURATION: return "SET_CONFIGURATION"; | 67 | case USB_REQ_SET_CONFIGURATION: return "SET_CONFIGURATION"; |
68 | case USB_REQ_GET_INTERFACE: return "GET_INTERFACE"; | 68 | case USB_REQ_GET_INTERFACE: return "GET_INTERFACE"; |
69 | case USB_REQ_SET_INTERFACE: return "SET_INTERFACE"; | 69 | case USB_REQ_SET_INTERFACE: return "SET_INTERFACE"; |
70 | default: return "*UNKNOWN*"; | 70 | default: return "*UNKNOWN*"; |
71 | } | 71 | } |
72 | } | 72 | } |
73 | #endif | 73 | #endif |
74 | 74 | ||
75 | static struct usb_endpoint_descriptor ep0_desc = { | 75 | static struct usb_endpoint_descriptor ep0_desc = { |
76 | .bLength = sizeof(struct usb_endpoint_descriptor), | 76 | .bLength = sizeof(struct usb_endpoint_descriptor), |
77 | .bDescriptorType = USB_DT_ENDPOINT, | 77 | .bDescriptorType = USB_DT_ENDPOINT, |
78 | .bEndpointAddress = USB_DIR_IN, | 78 | .bEndpointAddress = USB_DIR_IN, |
79 | .bmAttributes = USB_ENDPOINT_XFER_CONTROL, | 79 | .bmAttributes = USB_ENDPOINT_XFER_CONTROL, |
80 | }; | 80 | }; |
81 | 81 | ||
82 | static int ci_pullup(struct usb_gadget *gadget, int is_on); | 82 | static int ci_pullup(struct usb_gadget *gadget, int is_on); |
83 | static int ci_ep_enable(struct usb_ep *ep, | 83 | static int ci_ep_enable(struct usb_ep *ep, |
84 | const struct usb_endpoint_descriptor *desc); | 84 | const struct usb_endpoint_descriptor *desc); |
85 | static int ci_ep_disable(struct usb_ep *ep); | 85 | static int ci_ep_disable(struct usb_ep *ep); |
86 | static int ci_ep_queue(struct usb_ep *ep, | 86 | static int ci_ep_queue(struct usb_ep *ep, |
87 | struct usb_request *req, gfp_t gfp_flags); | 87 | struct usb_request *req, gfp_t gfp_flags); |
88 | static struct usb_request * | 88 | static struct usb_request * |
89 | ci_ep_alloc_request(struct usb_ep *ep, unsigned int gfp_flags); | 89 | ci_ep_alloc_request(struct usb_ep *ep, unsigned int gfp_flags); |
90 | static void ci_ep_free_request(struct usb_ep *ep, struct usb_request *_req); | 90 | static void ci_ep_free_request(struct usb_ep *ep, struct usb_request *_req); |
91 | 91 | ||
92 | static struct usb_gadget_ops ci_udc_ops = { | 92 | static struct usb_gadget_ops ci_udc_ops = { |
93 | .pullup = ci_pullup, | 93 | .pullup = ci_pullup, |
94 | }; | 94 | }; |
95 | 95 | ||
96 | static struct usb_ep_ops ci_ep_ops = { | 96 | static struct usb_ep_ops ci_ep_ops = { |
97 | .enable = ci_ep_enable, | 97 | .enable = ci_ep_enable, |
98 | .disable = ci_ep_disable, | 98 | .disable = ci_ep_disable, |
99 | .queue = ci_ep_queue, | 99 | .queue = ci_ep_queue, |
100 | .alloc_request = ci_ep_alloc_request, | 100 | .alloc_request = ci_ep_alloc_request, |
101 | .free_request = ci_ep_free_request, | 101 | .free_request = ci_ep_free_request, |
102 | }; | 102 | }; |
103 | 103 | ||
104 | /* Init values for USB endpoints. */ | 104 | /* Init values for USB endpoints. */ |
105 | static const struct usb_ep ci_ep_init[2] = { | 105 | static const struct usb_ep ci_ep_init[2] = { |
106 | [0] = { /* EP 0 */ | 106 | [0] = { /* EP 0 */ |
107 | .maxpacket = 64, | 107 | .maxpacket = 64, |
108 | .name = "ep0", | 108 | .name = "ep0", |
109 | .ops = &ci_ep_ops, | 109 | .ops = &ci_ep_ops, |
110 | }, | 110 | }, |
111 | [1] = { /* EP 1..n */ | 111 | [1] = { /* EP 1..n */ |
112 | .maxpacket = 512, | 112 | .maxpacket = 512, |
113 | .name = "ep-", | 113 | .name = "ep-", |
114 | .ops = &ci_ep_ops, | 114 | .ops = &ci_ep_ops, |
115 | }, | 115 | }, |
116 | }; | 116 | }; |
117 | 117 | ||
118 | static struct ci_drv controller = { | 118 | static struct ci_drv controller = { |
119 | .gadget = { | 119 | .gadget = { |
120 | .name = "ci_udc", | 120 | .name = "ci_udc", |
121 | .ops = &ci_udc_ops, | 121 | .ops = &ci_udc_ops, |
122 | .is_dualspeed = 1, | 122 | .is_dualspeed = 1, |
123 | }, | 123 | }, |
124 | }; | 124 | }; |
125 | 125 | ||
126 | /** | 126 | /** |
127 | * ci_get_qh() - return queue head for endpoint | 127 | * ci_get_qh() - return queue head for endpoint |
128 | * @ep_num: Endpoint number | 128 | * @ep_num: Endpoint number |
129 | * @dir_in: Direction of the endpoint (IN = 1, OUT = 0) | 129 | * @dir_in: Direction of the endpoint (IN = 1, OUT = 0) |
130 | * | 130 | * |
131 | * This function returns the QH associated with particular endpoint | 131 | * This function returns the QH associated with particular endpoint |
132 | * and it's direction. | 132 | * and it's direction. |
133 | */ | 133 | */ |
134 | static struct ept_queue_head *ci_get_qh(int ep_num, int dir_in) | 134 | static struct ept_queue_head *ci_get_qh(int ep_num, int dir_in) |
135 | { | 135 | { |
136 | return &controller.epts[(ep_num * 2) + dir_in]; | 136 | return &controller.epts[(ep_num * 2) + dir_in]; |
137 | } | 137 | } |
138 | 138 | ||
139 | /** | 139 | /** |
140 | * ci_get_qtd() - return queue item for endpoint | 140 | * ci_get_qtd() - return queue item for endpoint |
141 | * @ep_num: Endpoint number | 141 | * @ep_num: Endpoint number |
142 | * @dir_in: Direction of the endpoint (IN = 1, OUT = 0) | 142 | * @dir_in: Direction of the endpoint (IN = 1, OUT = 0) |
143 | * | 143 | * |
144 | * This function returns the QH associated with particular endpoint | 144 | * This function returns the QH associated with particular endpoint |
145 | * and it's direction. | 145 | * and it's direction. |
146 | */ | 146 | */ |
147 | static struct ept_queue_item *ci_get_qtd(int ep_num, int dir_in) | 147 | static struct ept_queue_item *ci_get_qtd(int ep_num, int dir_in) |
148 | { | 148 | { |
149 | int index = (ep_num * 2) + dir_in; | 149 | int index = (ep_num * 2) + dir_in; |
150 | uint8_t *imem = controller.items_mem + (index * ILIST_ENT_SZ); | 150 | uint8_t *imem = controller.items_mem + (index * ILIST_ENT_SZ); |
151 | return (struct ept_queue_item *)imem; | 151 | return (struct ept_queue_item *)imem; |
152 | } | 152 | } |
153 | 153 | ||
154 | /** | 154 | /** |
155 | * ci_flush_qh - flush cache over queue head | 155 | * ci_flush_qh - flush cache over queue head |
156 | * @ep_num: Endpoint number | 156 | * @ep_num: Endpoint number |
157 | * | 157 | * |
158 | * This function flushes cache over QH for particular endpoint. | 158 | * This function flushes cache over QH for particular endpoint. |
159 | */ | 159 | */ |
160 | static void ci_flush_qh(int ep_num) | 160 | static void ci_flush_qh(int ep_num) |
161 | { | 161 | { |
162 | struct ept_queue_head *head = ci_get_qh(ep_num, 0); | 162 | struct ept_queue_head *head = ci_get_qh(ep_num, 0); |
163 | const uint32_t start = (uint32_t)head; | 163 | const uint32_t start = (uint32_t)head; |
164 | const uint32_t end = start + 2 * sizeof(*head); | 164 | const uint32_t end = start + 2 * sizeof(*head); |
165 | 165 | ||
166 | flush_dcache_range(start, end); | 166 | flush_dcache_range(start, end); |
167 | } | 167 | } |
168 | 168 | ||
169 | /** | 169 | /** |
170 | * ci_invalidate_qh - invalidate cache over queue head | 170 | * ci_invalidate_qh - invalidate cache over queue head |
171 | * @ep_num: Endpoint number | 171 | * @ep_num: Endpoint number |
172 | * | 172 | * |
173 | * This function invalidates cache over QH for particular endpoint. | 173 | * This function invalidates cache over QH for particular endpoint. |
174 | */ | 174 | */ |
175 | static void ci_invalidate_qh(int ep_num) | 175 | static void ci_invalidate_qh(int ep_num) |
176 | { | 176 | { |
177 | struct ept_queue_head *head = ci_get_qh(ep_num, 0); | 177 | struct ept_queue_head *head = ci_get_qh(ep_num, 0); |
178 | uint32_t start = (uint32_t)head; | 178 | uint32_t start = (uint32_t)head; |
179 | uint32_t end = start + 2 * sizeof(*head); | 179 | uint32_t end = start + 2 * sizeof(*head); |
180 | 180 | ||
181 | invalidate_dcache_range(start, end); | 181 | invalidate_dcache_range(start, end); |
182 | } | 182 | } |
183 | 183 | ||
184 | /** | 184 | /** |
185 | * ci_flush_qtd - flush cache over queue item | 185 | * ci_flush_qtd - flush cache over queue item |
186 | * @ep_num: Endpoint number | 186 | * @ep_num: Endpoint number |
187 | * | 187 | * |
188 | * This function flushes cache over qTD pair for particular endpoint. | 188 | * This function flushes cache over qTD pair for particular endpoint. |
189 | */ | 189 | */ |
190 | static void ci_flush_qtd(int ep_num) | 190 | static void ci_flush_qtd(int ep_num) |
191 | { | 191 | { |
192 | struct ept_queue_item *item = ci_get_qtd(ep_num, 0); | 192 | struct ept_queue_item *item = ci_get_qtd(ep_num, 0); |
193 | const uint32_t start = (uint32_t)item; | 193 | const uint32_t start = (uint32_t)item; |
194 | const uint32_t end = start + 2 * ILIST_ENT_SZ; | 194 | const uint32_t end = start + 2 * ILIST_ENT_SZ; |
195 | 195 | ||
196 | flush_dcache_range(start, end); | 196 | flush_dcache_range(start, end); |
197 | } | 197 | } |
198 | 198 | ||
199 | /** | 199 | /** |
200 | * ci_invalidate_qtd - invalidate cache over queue item | 200 | * ci_invalidate_qtd - invalidate cache over queue item |
201 | * @ep_num: Endpoint number | 201 | * @ep_num: Endpoint number |
202 | * | 202 | * |
203 | * This function invalidates cache over qTD pair for particular endpoint. | 203 | * This function invalidates cache over qTD pair for particular endpoint. |
204 | */ | 204 | */ |
205 | static void ci_invalidate_qtd(int ep_num) | 205 | static void ci_invalidate_qtd(int ep_num) |
206 | { | 206 | { |
207 | struct ept_queue_item *item = ci_get_qtd(ep_num, 0); | 207 | struct ept_queue_item *item = ci_get_qtd(ep_num, 0); |
208 | const uint32_t start = (uint32_t)item; | 208 | const uint32_t start = (uint32_t)item; |
209 | const uint32_t end = start + 2 * ILIST_ENT_SZ; | 209 | const uint32_t end = start + 2 * ILIST_ENT_SZ; |
210 | 210 | ||
211 | invalidate_dcache_range(start, end); | 211 | invalidate_dcache_range(start, end); |
212 | } | 212 | } |
213 | 213 | ||
214 | static struct usb_request * | 214 | static struct usb_request * |
215 | ci_ep_alloc_request(struct usb_ep *ep, unsigned int gfp_flags) | 215 | ci_ep_alloc_request(struct usb_ep *ep, unsigned int gfp_flags) |
216 | { | 216 | { |
217 | struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep); | 217 | struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep); |
218 | int num; | 218 | int num; |
219 | struct ci_req *ci_req; | 219 | struct ci_req *ci_req; |
220 | 220 | ||
221 | num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; | 221 | num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; |
222 | if (num == 0 && controller.ep0_req) | 222 | if (num == 0 && controller.ep0_req) |
223 | return &controller.ep0_req->req; | 223 | return &controller.ep0_req->req; |
224 | 224 | ||
225 | ci_req = calloc(1, sizeof(*ci_req)); | 225 | ci_req = calloc(1, sizeof(*ci_req)); |
226 | if (!ci_req) | 226 | if (!ci_req) |
227 | return NULL; | 227 | return NULL; |
228 | 228 | ||
229 | INIT_LIST_HEAD(&ci_req->queue); | 229 | INIT_LIST_HEAD(&ci_req->queue); |
230 | 230 | ||
231 | if (num == 0) | 231 | if (num == 0) |
232 | controller.ep0_req = ci_req; | 232 | controller.ep0_req = ci_req; |
233 | 233 | ||
234 | return &ci_req->req; | 234 | return &ci_req->req; |
235 | } | 235 | } |
236 | 236 | ||
237 | static void ci_ep_free_request(struct usb_ep *ep, struct usb_request *req) | 237 | static void ci_ep_free_request(struct usb_ep *ep, struct usb_request *req) |
238 | { | 238 | { |
239 | struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep); | 239 | struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep); |
240 | struct ci_req *ci_req = container_of(req, struct ci_req, req); | 240 | struct ci_req *ci_req = container_of(req, struct ci_req, req); |
241 | int num; | 241 | int num; |
242 | 242 | ||
243 | num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; | 243 | num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; |
244 | if (num == 0) { | 244 | if (num == 0) { |
245 | if (!controller.ep0_req) | 245 | if (!controller.ep0_req) |
246 | return; | 246 | return; |
247 | controller.ep0_req = 0; | 247 | controller.ep0_req = 0; |
248 | } | 248 | } |
249 | 249 | ||
250 | if (ci_req->b_buf) | 250 | if (ci_req->b_buf) |
251 | free(ci_req->b_buf); | 251 | free(ci_req->b_buf); |
252 | free(ci_req); | 252 | free(ci_req); |
253 | } | 253 | } |
254 | 254 | ||
255 | static void ep_enable(int num, int in, int maxpacket) | 255 | static void ep_enable(int num, int in, int maxpacket) |
256 | { | 256 | { |
257 | struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; | 257 | struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; |
258 | unsigned n; | 258 | unsigned n; |
259 | 259 | ||
260 | n = readl(&udc->epctrl[num]); | 260 | n = readl(&udc->epctrl[num]); |
261 | if (in) | 261 | if (in) |
262 | n |= (CTRL_TXE | CTRL_TXR | CTRL_TXT_BULK); | 262 | n |= (CTRL_TXE | CTRL_TXR | CTRL_TXT_BULK); |
263 | else | 263 | else |
264 | n |= (CTRL_RXE | CTRL_RXR | CTRL_RXT_BULK); | 264 | n |= (CTRL_RXE | CTRL_RXR | CTRL_RXT_BULK); |
265 | 265 | ||
266 | if (num != 0) { | 266 | if (num != 0) { |
267 | struct ept_queue_head *head = ci_get_qh(num, in); | 267 | struct ept_queue_head *head = ci_get_qh(num, in); |
268 | 268 | ||
269 | head->config = CONFIG_MAX_PKT(maxpacket) | CONFIG_ZLT; | 269 | head->config = CONFIG_MAX_PKT(maxpacket) | CONFIG_ZLT; |
270 | ci_flush_qh(num); | 270 | ci_flush_qh(num); |
271 | } | 271 | } |
272 | writel(n, &udc->epctrl[num]); | 272 | writel(n, &udc->epctrl[num]); |
273 | } | 273 | } |
274 | 274 | ||
275 | static int ci_ep_enable(struct usb_ep *ep, | 275 | static int ci_ep_enable(struct usb_ep *ep, |
276 | const struct usb_endpoint_descriptor *desc) | 276 | const struct usb_endpoint_descriptor *desc) |
277 | { | 277 | { |
278 | struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep); | 278 | struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep); |
279 | int num, in; | 279 | int num, in; |
280 | num = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; | 280 | num = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; |
281 | in = (desc->bEndpointAddress & USB_DIR_IN) != 0; | 281 | in = (desc->bEndpointAddress & USB_DIR_IN) != 0; |
282 | ci_ep->desc = desc; | 282 | ci_ep->desc = desc; |
283 | 283 | ||
284 | if (num) { | 284 | if (num) { |
285 | int max = get_unaligned_le16(&desc->wMaxPacketSize); | 285 | int max = get_unaligned_le16(&desc->wMaxPacketSize); |
286 | 286 | ||
287 | if ((max > 64) && (controller.gadget.speed == USB_SPEED_FULL)) | 287 | if ((max > 64) && (controller.gadget.speed == USB_SPEED_FULL)) |
288 | max = 64; | 288 | max = 64; |
289 | if (ep->maxpacket != max) { | 289 | if (ep->maxpacket != max) { |
290 | DBG("%s: from %d to %d\n", __func__, | 290 | DBG("%s: from %d to %d\n", __func__, |
291 | ep->maxpacket, max); | 291 | ep->maxpacket, max); |
292 | ep->maxpacket = max; | 292 | ep->maxpacket = max; |
293 | } | 293 | } |
294 | } | 294 | } |
295 | ep_enable(num, in, ep->maxpacket); | 295 | ep_enable(num, in, ep->maxpacket); |
296 | DBG("%s: num=%d maxpacket=%d\n", __func__, num, ep->maxpacket); | 296 | DBG("%s: num=%d maxpacket=%d\n", __func__, num, ep->maxpacket); |
297 | return 0; | 297 | return 0; |
298 | } | 298 | } |
299 | 299 | ||
300 | static int ci_ep_disable(struct usb_ep *ep) | 300 | static int ci_ep_disable(struct usb_ep *ep) |
301 | { | 301 | { |
302 | struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep); | 302 | struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep); |
303 | 303 | ||
304 | ci_ep->desc = NULL; | 304 | ci_ep->desc = NULL; |
305 | return 0; | 305 | return 0; |
306 | } | 306 | } |
307 | 307 | ||
308 | static int ci_bounce(struct ci_req *ci_req, int in) | 308 | static int ci_bounce(struct ci_req *ci_req, int in) |
309 | { | 309 | { |
310 | struct usb_request *req = &ci_req->req; | 310 | struct usb_request *req = &ci_req->req; |
311 | uint32_t addr = (uint32_t)req->buf; | 311 | uint32_t addr = (uint32_t)req->buf; |
312 | uint32_t hwaddr; | 312 | uint32_t hwaddr; |
313 | uint32_t aligned_used_len; | 313 | uint32_t aligned_used_len; |
314 | 314 | ||
315 | /* Input buffer address is not aligned. */ | 315 | /* Input buffer address is not aligned. */ |
316 | if (addr & (ARCH_DMA_MINALIGN - 1)) | 316 | if (addr & (ARCH_DMA_MINALIGN - 1)) |
317 | goto align; | 317 | goto align; |
318 | 318 | ||
319 | /* Input buffer length is not aligned. */ | 319 | /* Input buffer length is not aligned. */ |
320 | if (req->length & (ARCH_DMA_MINALIGN - 1)) | 320 | if (req->length & (ARCH_DMA_MINALIGN - 1)) |
321 | goto align; | 321 | goto align; |
322 | 322 | ||
323 | /* The buffer is well aligned, only flush cache. */ | 323 | /* The buffer is well aligned, only flush cache. */ |
324 | ci_req->hw_len = req->length; | 324 | ci_req->hw_len = req->length; |
325 | ci_req->hw_buf = req->buf; | 325 | ci_req->hw_buf = req->buf; |
326 | goto flush; | 326 | goto flush; |
327 | 327 | ||
328 | align: | 328 | align: |
329 | if (ci_req->b_buf && req->length > ci_req->b_len) { | 329 | if (ci_req->b_buf && req->length > ci_req->b_len) { |
330 | free(ci_req->b_buf); | 330 | free(ci_req->b_buf); |
331 | ci_req->b_buf = 0; | 331 | ci_req->b_buf = 0; |
332 | } | 332 | } |
333 | if (!ci_req->b_buf) { | 333 | if (!ci_req->b_buf) { |
334 | ci_req->b_len = roundup(req->length, ARCH_DMA_MINALIGN); | 334 | ci_req->b_len = roundup(req->length, ARCH_DMA_MINALIGN); |
335 | ci_req->b_buf = memalign(ARCH_DMA_MINALIGN, ci_req->b_len); | 335 | ci_req->b_buf = memalign(ARCH_DMA_MINALIGN, ci_req->b_len); |
336 | if (!ci_req->b_buf) | 336 | if (!ci_req->b_buf) |
337 | return -ENOMEM; | 337 | return -ENOMEM; |
338 | } | 338 | } |
339 | ci_req->hw_len = ci_req->b_len; | 339 | ci_req->hw_len = ci_req->b_len; |
340 | ci_req->hw_buf = ci_req->b_buf; | 340 | ci_req->hw_buf = ci_req->b_buf; |
341 | 341 | ||
342 | if (in) | 342 | if (in) |
343 | memcpy(ci_req->hw_buf, req->buf, req->length); | 343 | memcpy(ci_req->hw_buf, req->buf, req->length); |
344 | 344 | ||
345 | flush: | 345 | flush: |
346 | hwaddr = (uint32_t)ci_req->hw_buf; | 346 | hwaddr = (uint32_t)ci_req->hw_buf; |
347 | aligned_used_len = roundup(req->length, ARCH_DMA_MINALIGN); | 347 | aligned_used_len = roundup(req->length, ARCH_DMA_MINALIGN); |
348 | flush_dcache_range(hwaddr, hwaddr + aligned_used_len); | 348 | flush_dcache_range(hwaddr, hwaddr + aligned_used_len); |
349 | 349 | ||
350 | return 0; | 350 | return 0; |
351 | } | 351 | } |
352 | 352 | ||
353 | static void ci_debounce(struct ci_req *ci_req, int in) | 353 | static void ci_debounce(struct ci_req *ci_req, int in) |
354 | { | 354 | { |
355 | struct usb_request *req = &ci_req->req; | 355 | struct usb_request *req = &ci_req->req; |
356 | uint32_t addr = (uint32_t)req->buf; | 356 | uint32_t addr = (uint32_t)req->buf; |
357 | uint32_t hwaddr = (uint32_t)ci_req->hw_buf; | 357 | uint32_t hwaddr = (uint32_t)ci_req->hw_buf; |
358 | uint32_t aligned_used_len; | 358 | uint32_t aligned_used_len; |
359 | 359 | ||
360 | if (in) | 360 | if (in) |
361 | return; | 361 | return; |
362 | 362 | ||
363 | aligned_used_len = roundup(req->actual, ARCH_DMA_MINALIGN); | 363 | aligned_used_len = roundup(req->actual, ARCH_DMA_MINALIGN); |
364 | invalidate_dcache_range(hwaddr, hwaddr + aligned_used_len); | 364 | invalidate_dcache_range(hwaddr, hwaddr + aligned_used_len); |
365 | 365 | ||
366 | if (addr == hwaddr) | 366 | if (addr == hwaddr) |
367 | return; /* not a bounce */ | 367 | return; /* not a bounce */ |
368 | 368 | ||
369 | memcpy(req->buf, ci_req->hw_buf, req->actual); | 369 | memcpy(req->buf, ci_req->hw_buf, req->actual); |
370 | } | 370 | } |
371 | 371 | ||
372 | static void ci_ep_submit_next_request(struct ci_ep *ci_ep) | 372 | static void ci_ep_submit_next_request(struct ci_ep *ci_ep) |
373 | { | 373 | { |
374 | struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; | 374 | struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; |
375 | struct ept_queue_item *item; | 375 | struct ept_queue_item *item; |
376 | struct ept_queue_head *head; | 376 | struct ept_queue_head *head; |
377 | int bit, num, len, in; | 377 | int bit, num, len, in; |
378 | struct ci_req *ci_req; | 378 | struct ci_req *ci_req; |
379 | 379 | ||
380 | ci_ep->req_primed = true; | 380 | ci_ep->req_primed = true; |
381 | 381 | ||
382 | num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; | 382 | num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; |
383 | in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0; | 383 | in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0; |
384 | item = ci_get_qtd(num, in); | 384 | item = ci_get_qtd(num, in); |
385 | head = ci_get_qh(num, in); | 385 | head = ci_get_qh(num, in); |
386 | 386 | ||
387 | ci_req = list_first_entry(&ci_ep->queue, struct ci_req, queue); | 387 | ci_req = list_first_entry(&ci_ep->queue, struct ci_req, queue); |
388 | len = ci_req->req.length; | 388 | len = ci_req->req.length; |
389 | 389 | ||
390 | item->info = INFO_BYTES(len) | INFO_ACTIVE; | 390 | item->info = INFO_BYTES(len) | INFO_ACTIVE; |
391 | item->page0 = (uint32_t)ci_req->hw_buf; | 391 | item->page0 = (uint32_t)ci_req->hw_buf; |
392 | item->page1 = ((uint32_t)ci_req->hw_buf & 0xfffff000) + 0x1000; | 392 | item->page1 = ((uint32_t)ci_req->hw_buf & 0xfffff000) + 0x1000; |
393 | item->page2 = ((uint32_t)ci_req->hw_buf & 0xfffff000) + 0x2000; | 393 | item->page2 = ((uint32_t)ci_req->hw_buf & 0xfffff000) + 0x2000; |
394 | item->page3 = ((uint32_t)ci_req->hw_buf & 0xfffff000) + 0x3000; | 394 | item->page3 = ((uint32_t)ci_req->hw_buf & 0xfffff000) + 0x3000; |
395 | item->page4 = ((uint32_t)ci_req->hw_buf & 0xfffff000) + 0x4000; | 395 | item->page4 = ((uint32_t)ci_req->hw_buf & 0xfffff000) + 0x4000; |
396 | 396 | ||
397 | head->next = (unsigned) item; | 397 | head->next = (unsigned) item; |
398 | head->info = 0; | 398 | head->info = 0; |
399 | 399 | ||
400 | /* | 400 | /* |
401 | * When sending the data for an IN transaction, the attached host | 401 | * When sending the data for an IN transaction, the attached host |
402 | * knows that all data for the IN is sent when one of the following | 402 | * knows that all data for the IN is sent when one of the following |
403 | * occurs: | 403 | * occurs: |
404 | * a) A zero-length packet is transmitted. | 404 | * a) A zero-length packet is transmitted. |
405 | * b) A packet with length that isn't an exact multiple of the ep's | 405 | * b) A packet with length that isn't an exact multiple of the ep's |
406 | * maxpacket is transmitted. | 406 | * maxpacket is transmitted. |
407 | * c) Enough data is sent to exactly fill the host's maximum expected | 407 | * c) Enough data is sent to exactly fill the host's maximum expected |
408 | * IN transaction size. | 408 | * IN transaction size. |
409 | * | 409 | * |
410 | * One of these conditions MUST apply at the end of an IN transaction, | 410 | * One of these conditions MUST apply at the end of an IN transaction, |
411 | * or the transaction will not be considered complete by the host. If | 411 | * or the transaction will not be considered complete by the host. If |
412 | * none of (a)..(c) already applies, then we must force (a) to apply | 412 | * none of (a)..(c) already applies, then we must force (a) to apply |
413 | * by explicitly sending an extra zero-length packet. | 413 | * by explicitly sending an extra zero-length packet. |
414 | */ | 414 | */ |
415 | /* IN !a !b !c */ | 415 | /* IN !a !b !c */ |
416 | if (in && len && !(len % ci_ep->ep.maxpacket) && ci_req->req.zero) { | 416 | if (in && len && !(len % ci_ep->ep.maxpacket) && ci_req->req.zero) { |
417 | /* | 417 | /* |
418 | * Each endpoint has 2 items allocated, even though typically | 418 | * Each endpoint has 2 items allocated, even though typically |
419 | * only 1 is used at a time since either an IN or an OUT but | 419 | * only 1 is used at a time since either an IN or an OUT but |
420 | * not both is queued. For an IN transaction, item currently | 420 | * not both is queued. For an IN transaction, item currently |
421 | * points at the second of these items, so we know that we | 421 | * points at the second of these items, so we know that we |
422 | * can use the other to transmit the extra zero-length packet. | 422 | * can use the other to transmit the extra zero-length packet. |
423 | */ | 423 | */ |
424 | struct ept_queue_item *other_item = ci_get_qtd(num, 0); | 424 | struct ept_queue_item *other_item = ci_get_qtd(num, 0); |
425 | item->next = (unsigned)other_item; | 425 | item->next = (unsigned)other_item; |
426 | item = other_item; | 426 | item = other_item; |
427 | item->info = INFO_ACTIVE; | 427 | item->info = INFO_ACTIVE; |
428 | } | 428 | } |
429 | 429 | ||
430 | item->next = TERMINATE; | 430 | item->next = TERMINATE; |
431 | item->info |= INFO_IOC; | 431 | item->info |= INFO_IOC; |
432 | 432 | ||
433 | ci_flush_qtd(num); | 433 | ci_flush_qtd(num); |
434 | 434 | ||
435 | DBG("ept%d %s queue len %x, req %p, buffer %p\n", | 435 | DBG("ept%d %s queue len %x, req %p, buffer %p\n", |
436 | num, in ? "in" : "out", len, ci_req, ci_req->hw_buf); | 436 | num, in ? "in" : "out", len, ci_req, ci_req->hw_buf); |
437 | ci_flush_qh(num); | 437 | ci_flush_qh(num); |
438 | 438 | ||
439 | if (in) | 439 | if (in) |
440 | bit = EPT_TX(num); | 440 | bit = EPT_TX(num); |
441 | else | 441 | else |
442 | bit = EPT_RX(num); | 442 | bit = EPT_RX(num); |
443 | 443 | ||
444 | writel(bit, &udc->epprime); | 444 | writel(bit, &udc->epprime); |
445 | } | 445 | } |
446 | 446 | ||
447 | static int ci_ep_queue(struct usb_ep *ep, | 447 | static int ci_ep_queue(struct usb_ep *ep, |
448 | struct usb_request *req, gfp_t gfp_flags) | 448 | struct usb_request *req, gfp_t gfp_flags) |
449 | { | 449 | { |
450 | struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep); | 450 | struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep); |
451 | struct ci_req *ci_req = container_of(req, struct ci_req, req); | 451 | struct ci_req *ci_req = container_of(req, struct ci_req, req); |
452 | int in, ret; | 452 | int in, ret; |
453 | int __maybe_unused num; | 453 | int __maybe_unused num; |
454 | 454 | ||
455 | num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; | 455 | num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; |
456 | in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0; | 456 | in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0; |
457 | 457 | ||
458 | if (!num && ci_ep->req_primed) { | 458 | if (!num && ci_ep->req_primed) { |
459 | /* | 459 | /* |
460 | * The flipping of ep0 between IN and OUT relies on | 460 | * The flipping of ep0 between IN and OUT relies on |
461 | * ci_ep_queue consuming the current IN/OUT setting | 461 | * ci_ep_queue consuming the current IN/OUT setting |
462 | * immediately. If this is deferred to a later point when the | 462 | * immediately. If this is deferred to a later point when the |
463 | * req is pulled out of ci_req->queue, then the IN/OUT setting | 463 | * req is pulled out of ci_req->queue, then the IN/OUT setting |
464 | * may have been changed since the req was queued, and state | 464 | * may have been changed since the req was queued, and state |
465 | * will get out of sync. This condition doesn't occur today, | 465 | * will get out of sync. This condition doesn't occur today, |
466 | * but could if bugs were introduced later, and this error | 466 | * but could if bugs were introduced later, and this error |
467 | * check will save a lot of debugging time. | 467 | * check will save a lot of debugging time. |
468 | */ | 468 | */ |
469 | printf("%s: ep0 transaction already in progress\n", __func__); | 469 | printf("%s: ep0 transaction already in progress\n", __func__); |
470 | return -EPROTO; | 470 | return -EPROTO; |
471 | } | 471 | } |
472 | 472 | ||
473 | ret = ci_bounce(ci_req, in); | 473 | ret = ci_bounce(ci_req, in); |
474 | if (ret) | 474 | if (ret) |
475 | return ret; | 475 | return ret; |
476 | 476 | ||
477 | DBG("ept%d %s pre-queue req %p, buffer %p\n", | 477 | DBG("ept%d %s pre-queue req %p, buffer %p\n", |
478 | num, in ? "in" : "out", ci_req, ci_req->hw_buf); | 478 | num, in ? "in" : "out", ci_req, ci_req->hw_buf); |
479 | list_add_tail(&ci_req->queue, &ci_ep->queue); | 479 | list_add_tail(&ci_req->queue, &ci_ep->queue); |
480 | 480 | ||
481 | if (!ci_ep->req_primed) | 481 | if (!ci_ep->req_primed) |
482 | ci_ep_submit_next_request(ci_ep); | 482 | ci_ep_submit_next_request(ci_ep); |
483 | 483 | ||
484 | return 0; | 484 | return 0; |
485 | } | 485 | } |
486 | 486 | ||
487 | static void flip_ep0_direction(void) | 487 | static void flip_ep0_direction(void) |
488 | { | 488 | { |
489 | if (ep0_desc.bEndpointAddress == USB_DIR_IN) { | 489 | if (ep0_desc.bEndpointAddress == USB_DIR_IN) { |
490 | DBG("%s: Flipping ep0 to OUT\n", __func__); | 490 | DBG("%s: Flipping ep0 to OUT\n", __func__); |
491 | ep0_desc.bEndpointAddress = 0; | 491 | ep0_desc.bEndpointAddress = 0; |
492 | } else { | 492 | } else { |
493 | DBG("%s: Flipping ep0 to IN\n", __func__); | 493 | DBG("%s: Flipping ep0 to IN\n", __func__); |
494 | ep0_desc.bEndpointAddress = USB_DIR_IN; | 494 | ep0_desc.bEndpointAddress = USB_DIR_IN; |
495 | } | 495 | } |
496 | } | 496 | } |
497 | 497 | ||
498 | static void handle_ep_complete(struct ci_ep *ep) | 498 | static void handle_ep_complete(struct ci_ep *ci_ep) |
499 | { | 499 | { |
500 | struct ept_queue_item *item; | 500 | struct ept_queue_item *item; |
501 | int num, in, len; | 501 | int num, in, len; |
502 | struct ci_req *ci_req; | 502 | struct ci_req *ci_req; |
503 | 503 | ||
504 | num = ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; | 504 | num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; |
505 | in = (ep->desc->bEndpointAddress & USB_DIR_IN) != 0; | 505 | in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0; |
506 | item = ci_get_qtd(num, in); | 506 | item = ci_get_qtd(num, in); |
507 | ci_invalidate_qtd(num); | 507 | ci_invalidate_qtd(num); |
508 | 508 | ||
509 | len = (item->info >> 16) & 0x7fff; | 509 | len = (item->info >> 16) & 0x7fff; |
510 | if (item->info & 0xff) | 510 | if (item->info & 0xff) |
511 | printf("EP%d/%s FAIL info=%x pg0=%x\n", | 511 | printf("EP%d/%s FAIL info=%x pg0=%x\n", |
512 | num, in ? "in" : "out", item->info, item->page0); | 512 | num, in ? "in" : "out", item->info, item->page0); |
513 | 513 | ||
514 | ci_req = list_first_entry(&ep->queue, struct ci_req, queue); | 514 | ci_req = list_first_entry(&ci_ep->queue, struct ci_req, queue); |
515 | list_del_init(&ci_req->queue); | 515 | list_del_init(&ci_req->queue); |
516 | ep->req_primed = false; | 516 | ci_ep->req_primed = false; |
517 | 517 | ||
518 | if (!list_empty(&ep->queue)) | 518 | if (!list_empty(&ci_ep->queue)) |
519 | ci_ep_submit_next_request(ep); | 519 | ci_ep_submit_next_request(ci_ep); |
520 | 520 | ||
521 | ci_req->req.actual = ci_req->req.length - len; | 521 | ci_req->req.actual = ci_req->req.length - len; |
522 | ci_debounce(ci_req, in); | 522 | ci_debounce(ci_req, in); |
523 | 523 | ||
524 | DBG("ept%d %s req %p, complete %x\n", | 524 | DBG("ept%d %s req %p, complete %x\n", |
525 | num, in ? "in" : "out", ci_req, len); | 525 | num, in ? "in" : "out", ci_req, len); |
526 | if (num != 0 || controller.ep0_data_phase) | 526 | if (num != 0 || controller.ep0_data_phase) |
527 | ci_req->req.complete(&ep->ep, &ci_req->req); | 527 | ci_req->req.complete(&ci_ep->ep, &ci_req->req); |
528 | if (num == 0 && controller.ep0_data_phase) { | 528 | if (num == 0 && controller.ep0_data_phase) { |
529 | /* | 529 | /* |
530 | * Data Stage is complete, so flip ep0 dir for Status Stage, | 530 | * Data Stage is complete, so flip ep0 dir for Status Stage, |
531 | * which always transfers a packet in the opposite direction. | 531 | * which always transfers a packet in the opposite direction. |
532 | */ | 532 | */ |
533 | DBG("%s: flip ep0 dir for Status Stage\n", __func__); | 533 | DBG("%s: flip ep0 dir for Status Stage\n", __func__); |
534 | flip_ep0_direction(); | 534 | flip_ep0_direction(); |
535 | controller.ep0_data_phase = false; | 535 | controller.ep0_data_phase = false; |
536 | ci_req->req.length = 0; | 536 | ci_req->req.length = 0; |
537 | usb_ep_queue(&ep->ep, &ci_req->req, 0); | 537 | usb_ep_queue(&ci_ep->ep, &ci_req->req, 0); |
538 | } | 538 | } |
539 | } | 539 | } |
540 | 540 | ||
541 | #define SETUP(type, request) (((type) << 8) | (request)) | 541 | #define SETUP(type, request) (((type) << 8) | (request)) |
542 | 542 | ||
543 | static void handle_setup(void) | 543 | static void handle_setup(void) |
544 | { | 544 | { |
545 | struct ci_ep *ci_ep = &controller.ep[0]; | 545 | struct ci_ep *ci_ep = &controller.ep[0]; |
546 | struct ci_req *ci_req; | 546 | struct ci_req *ci_req; |
547 | struct usb_request *req; | 547 | struct usb_request *req; |
548 | struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; | 548 | struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; |
549 | struct ept_queue_head *head; | 549 | struct ept_queue_head *head; |
550 | struct usb_ctrlrequest r; | 550 | struct usb_ctrlrequest r; |
551 | int status = 0; | 551 | int status = 0; |
552 | int num, in, _num, _in, i; | 552 | int num, in, _num, _in, i; |
553 | char *buf; | 553 | char *buf; |
554 | 554 | ||
555 | ci_req = controller.ep0_req; | 555 | ci_req = controller.ep0_req; |
556 | req = &ci_req->req; | 556 | req = &ci_req->req; |
557 | head = ci_get_qh(0, 0); /* EP0 OUT */ | 557 | head = ci_get_qh(0, 0); /* EP0 OUT */ |
558 | 558 | ||
559 | ci_invalidate_qh(0); | 559 | ci_invalidate_qh(0); |
560 | memcpy(&r, head->setup_data, sizeof(struct usb_ctrlrequest)); | 560 | memcpy(&r, head->setup_data, sizeof(struct usb_ctrlrequest)); |
561 | #ifdef CONFIG_CI_UDC_HAS_HOSTPC | 561 | #ifdef CONFIG_CI_UDC_HAS_HOSTPC |
562 | writel(EPT_RX(0), &udc->epsetupstat); | 562 | writel(EPT_RX(0), &udc->epsetupstat); |
563 | #else | 563 | #else |
564 | writel(EPT_RX(0), &udc->epstat); | 564 | writel(EPT_RX(0), &udc->epstat); |
565 | #endif | 565 | #endif |
566 | DBG("handle setup %s, %x, %x index %x value %x length %x\n", | 566 | DBG("handle setup %s, %x, %x index %x value %x length %x\n", |
567 | reqname(r.bRequest), r.bRequestType, r.bRequest, r.wIndex, | 567 | reqname(r.bRequest), r.bRequestType, r.bRequest, r.wIndex, |
568 | r.wValue, r.wLength); | 568 | r.wValue, r.wLength); |
569 | 569 | ||
570 | /* Set EP0 dir for Data Stage based on Setup Stage data */ | 570 | /* Set EP0 dir for Data Stage based on Setup Stage data */ |
571 | if (r.bRequestType & USB_DIR_IN) { | 571 | if (r.bRequestType & USB_DIR_IN) { |
572 | DBG("%s: Set ep0 to IN for Data Stage\n", __func__); | 572 | DBG("%s: Set ep0 to IN for Data Stage\n", __func__); |
573 | ep0_desc.bEndpointAddress = USB_DIR_IN; | 573 | ep0_desc.bEndpointAddress = USB_DIR_IN; |
574 | } else { | 574 | } else { |
575 | DBG("%s: Set ep0 to OUT for Data Stage\n", __func__); | 575 | DBG("%s: Set ep0 to OUT for Data Stage\n", __func__); |
576 | ep0_desc.bEndpointAddress = 0; | 576 | ep0_desc.bEndpointAddress = 0; |
577 | } | 577 | } |
578 | if (r.wLength) { | 578 | if (r.wLength) { |
579 | controller.ep0_data_phase = true; | 579 | controller.ep0_data_phase = true; |
580 | } else { | 580 | } else { |
581 | /* 0 length -> no Data Stage. Flip dir for Status Stage */ | 581 | /* 0 length -> no Data Stage. Flip dir for Status Stage */ |
582 | DBG("%s: 0 length: flip ep0 dir for Status Stage\n", __func__); | 582 | DBG("%s: 0 length: flip ep0 dir for Status Stage\n", __func__); |
583 | flip_ep0_direction(); | 583 | flip_ep0_direction(); |
584 | controller.ep0_data_phase = false; | 584 | controller.ep0_data_phase = false; |
585 | } | 585 | } |
586 | 586 | ||
587 | list_del_init(&ci_req->queue); | 587 | list_del_init(&ci_req->queue); |
588 | ci_ep->req_primed = false; | 588 | ci_ep->req_primed = false; |
589 | 589 | ||
590 | switch (SETUP(r.bRequestType, r.bRequest)) { | 590 | switch (SETUP(r.bRequestType, r.bRequest)) { |
591 | case SETUP(USB_RECIP_ENDPOINT, USB_REQ_CLEAR_FEATURE): | 591 | case SETUP(USB_RECIP_ENDPOINT, USB_REQ_CLEAR_FEATURE): |
592 | _num = r.wIndex & 15; | 592 | _num = r.wIndex & 15; |
593 | _in = !!(r.wIndex & 0x80); | 593 | _in = !!(r.wIndex & 0x80); |
594 | 594 | ||
595 | if ((r.wValue == 0) && (r.wLength == 0)) { | 595 | if ((r.wValue == 0) && (r.wLength == 0)) { |
596 | req->length = 0; | 596 | req->length = 0; |
597 | for (i = 0; i < NUM_ENDPOINTS; i++) { | 597 | for (i = 0; i < NUM_ENDPOINTS; i++) { |
598 | struct ci_ep *ep = &controller.ep[i]; | 598 | struct ci_ep *ep = &controller.ep[i]; |
599 | 599 | ||
600 | if (!ep->desc) | 600 | if (!ep->desc) |
601 | continue; | 601 | continue; |
602 | num = ep->desc->bEndpointAddress | 602 | num = ep->desc->bEndpointAddress |
603 | & USB_ENDPOINT_NUMBER_MASK; | 603 | & USB_ENDPOINT_NUMBER_MASK; |
604 | in = (ep->desc->bEndpointAddress | 604 | in = (ep->desc->bEndpointAddress |
605 | & USB_DIR_IN) != 0; | 605 | & USB_DIR_IN) != 0; |
606 | if ((num == _num) && (in == _in)) { | 606 | if ((num == _num) && (in == _in)) { |
607 | ep_enable(num, in, ep->ep.maxpacket); | 607 | ep_enable(num, in, ep->ep.maxpacket); |
608 | usb_ep_queue(controller.gadget.ep0, | 608 | usb_ep_queue(controller.gadget.ep0, |
609 | req, 0); | 609 | req, 0); |
610 | break; | 610 | break; |
611 | } | 611 | } |
612 | } | 612 | } |
613 | } | 613 | } |
614 | return; | 614 | return; |
615 | 615 | ||
616 | case SETUP(USB_RECIP_DEVICE, USB_REQ_SET_ADDRESS): | 616 | case SETUP(USB_RECIP_DEVICE, USB_REQ_SET_ADDRESS): |
617 | /* | 617 | /* |
618 | * write address delayed (will take effect | 618 | * write address delayed (will take effect |
619 | * after the next IN txn) | 619 | * after the next IN txn) |
620 | */ | 620 | */ |
621 | writel((r.wValue << 25) | (1 << 24), &udc->devaddr); | 621 | writel((r.wValue << 25) | (1 << 24), &udc->devaddr); |
622 | req->length = 0; | 622 | req->length = 0; |
623 | usb_ep_queue(controller.gadget.ep0, req, 0); | 623 | usb_ep_queue(controller.gadget.ep0, req, 0); |
624 | return; | 624 | return; |
625 | 625 | ||
626 | case SETUP(USB_DIR_IN | USB_RECIP_DEVICE, USB_REQ_GET_STATUS): | 626 | case SETUP(USB_DIR_IN | USB_RECIP_DEVICE, USB_REQ_GET_STATUS): |
627 | req->length = 2; | 627 | req->length = 2; |
628 | buf = (char *)req->buf; | 628 | buf = (char *)req->buf; |
629 | buf[0] = 1 << USB_DEVICE_SELF_POWERED; | 629 | buf[0] = 1 << USB_DEVICE_SELF_POWERED; |
630 | buf[1] = 0; | 630 | buf[1] = 0; |
631 | usb_ep_queue(controller.gadget.ep0, req, 0); | 631 | usb_ep_queue(controller.gadget.ep0, req, 0); |
632 | return; | 632 | return; |
633 | } | 633 | } |
634 | /* pass request up to the gadget driver */ | 634 | /* pass request up to the gadget driver */ |
635 | if (controller.driver) | 635 | if (controller.driver) |
636 | status = controller.driver->setup(&controller.gadget, &r); | 636 | status = controller.driver->setup(&controller.gadget, &r); |
637 | else | 637 | else |
638 | status = -ENODEV; | 638 | status = -ENODEV; |
639 | 639 | ||
640 | if (!status) | 640 | if (!status) |
641 | return; | 641 | return; |
642 | DBG("STALL reqname %s type %x value %x, index %x\n", | 642 | DBG("STALL reqname %s type %x value %x, index %x\n", |
643 | reqname(r.bRequest), r.bRequestType, r.wValue, r.wIndex); | 643 | reqname(r.bRequest), r.bRequestType, r.wValue, r.wIndex); |
644 | writel((1<<16) | (1 << 0), &udc->epctrl[0]); | 644 | writel((1<<16) | (1 << 0), &udc->epctrl[0]); |
645 | } | 645 | } |
646 | 646 | ||
647 | static void stop_activity(void) | 647 | static void stop_activity(void) |
648 | { | 648 | { |
649 | int i, num, in; | 649 | int i, num, in; |
650 | struct ept_queue_head *head; | 650 | struct ept_queue_head *head; |
651 | struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; | 651 | struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; |
652 | writel(readl(&udc->epcomp), &udc->epcomp); | 652 | writel(readl(&udc->epcomp), &udc->epcomp); |
653 | #ifdef CONFIG_CI_UDC_HAS_HOSTPC | 653 | #ifdef CONFIG_CI_UDC_HAS_HOSTPC |
654 | writel(readl(&udc->epsetupstat), &udc->epsetupstat); | 654 | writel(readl(&udc->epsetupstat), &udc->epsetupstat); |
655 | #endif | 655 | #endif |
656 | writel(readl(&udc->epstat), &udc->epstat); | 656 | writel(readl(&udc->epstat), &udc->epstat); |
657 | writel(0xffffffff, &udc->epflush); | 657 | writel(0xffffffff, &udc->epflush); |
658 | 658 | ||
659 | /* error out any pending reqs */ | 659 | /* error out any pending reqs */ |
660 | for (i = 0; i < NUM_ENDPOINTS; i++) { | 660 | for (i = 0; i < NUM_ENDPOINTS; i++) { |
661 | if (i != 0) | 661 | if (i != 0) |
662 | writel(0, &udc->epctrl[i]); | 662 | writel(0, &udc->epctrl[i]); |
663 | if (controller.ep[i].desc) { | 663 | if (controller.ep[i].desc) { |
664 | num = controller.ep[i].desc->bEndpointAddress | 664 | num = controller.ep[i].desc->bEndpointAddress |
665 | & USB_ENDPOINT_NUMBER_MASK; | 665 | & USB_ENDPOINT_NUMBER_MASK; |
666 | in = (controller.ep[i].desc->bEndpointAddress | 666 | in = (controller.ep[i].desc->bEndpointAddress |
667 | & USB_DIR_IN) != 0; | 667 | & USB_DIR_IN) != 0; |
668 | head = ci_get_qh(num, in); | 668 | head = ci_get_qh(num, in); |
669 | head->info = INFO_ACTIVE; | 669 | head->info = INFO_ACTIVE; |
670 | ci_flush_qh(num); | 670 | ci_flush_qh(num); |
671 | } | 671 | } |
672 | } | 672 | } |
673 | } | 673 | } |
674 | 674 | ||
675 | void udc_irq(void) | 675 | void udc_irq(void) |
676 | { | 676 | { |
677 | struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; | 677 | struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; |
678 | unsigned n = readl(&udc->usbsts); | 678 | unsigned n = readl(&udc->usbsts); |
679 | writel(n, &udc->usbsts); | 679 | writel(n, &udc->usbsts); |
680 | int bit, i, num, in; | 680 | int bit, i, num, in; |
681 | 681 | ||
682 | n &= (STS_SLI | STS_URI | STS_PCI | STS_UI | STS_UEI); | 682 | n &= (STS_SLI | STS_URI | STS_PCI | STS_UI | STS_UEI); |
683 | if (n == 0) | 683 | if (n == 0) |
684 | return; | 684 | return; |
685 | 685 | ||
686 | if (n & STS_URI) { | 686 | if (n & STS_URI) { |
687 | DBG("-- reset --\n"); | 687 | DBG("-- reset --\n"); |
688 | stop_activity(); | 688 | stop_activity(); |
689 | } | 689 | } |
690 | if (n & STS_SLI) | 690 | if (n & STS_SLI) |
691 | DBG("-- suspend --\n"); | 691 | DBG("-- suspend --\n"); |
692 | 692 | ||
693 | if (n & STS_PCI) { | 693 | if (n & STS_PCI) { |
694 | int max = 64; | 694 | int max = 64; |
695 | int speed = USB_SPEED_FULL; | 695 | int speed = USB_SPEED_FULL; |
696 | 696 | ||
697 | #ifdef CONFIG_CI_UDC_HAS_HOSTPC | 697 | #ifdef CONFIG_CI_UDC_HAS_HOSTPC |
698 | bit = (readl(&udc->hostpc1_devlc) >> 25) & 3; | 698 | bit = (readl(&udc->hostpc1_devlc) >> 25) & 3; |
699 | #else | 699 | #else |
700 | bit = (readl(&udc->portsc) >> 26) & 3; | 700 | bit = (readl(&udc->portsc) >> 26) & 3; |
701 | #endif | 701 | #endif |
702 | DBG("-- portchange %x %s\n", bit, (bit == 2) ? "High" : "Full"); | 702 | DBG("-- portchange %x %s\n", bit, (bit == 2) ? "High" : "Full"); |
703 | if (bit == 2) { | 703 | if (bit == 2) { |
704 | speed = USB_SPEED_HIGH; | 704 | speed = USB_SPEED_HIGH; |
705 | max = 512; | 705 | max = 512; |
706 | } | 706 | } |
707 | controller.gadget.speed = speed; | 707 | controller.gadget.speed = speed; |
708 | for (i = 1; i < NUM_ENDPOINTS; i++) { | 708 | for (i = 1; i < NUM_ENDPOINTS; i++) { |
709 | if (controller.ep[i].ep.maxpacket > max) | 709 | if (controller.ep[i].ep.maxpacket > max) |
710 | controller.ep[i].ep.maxpacket = max; | 710 | controller.ep[i].ep.maxpacket = max; |
711 | } | 711 | } |
712 | } | 712 | } |
713 | 713 | ||
714 | if (n & STS_UEI) | 714 | if (n & STS_UEI) |
715 | printf("<UEI %x>\n", readl(&udc->epcomp)); | 715 | printf("<UEI %x>\n", readl(&udc->epcomp)); |
716 | 716 | ||
717 | if ((n & STS_UI) || (n & STS_UEI)) { | 717 | if ((n & STS_UI) || (n & STS_UEI)) { |
718 | #ifdef CONFIG_CI_UDC_HAS_HOSTPC | 718 | #ifdef CONFIG_CI_UDC_HAS_HOSTPC |
719 | n = readl(&udc->epsetupstat); | 719 | n = readl(&udc->epsetupstat); |
720 | #else | 720 | #else |
721 | n = readl(&udc->epstat); | 721 | n = readl(&udc->epstat); |
722 | #endif | 722 | #endif |
723 | if (n & EPT_RX(0)) | 723 | if (n & EPT_RX(0)) |
724 | handle_setup(); | 724 | handle_setup(); |
725 | 725 | ||
726 | n = readl(&udc->epcomp); | 726 | n = readl(&udc->epcomp); |
727 | if (n != 0) | 727 | if (n != 0) |
728 | writel(n, &udc->epcomp); | 728 | writel(n, &udc->epcomp); |
729 | 729 | ||
730 | for (i = 0; i < NUM_ENDPOINTS && n; i++) { | 730 | for (i = 0; i < NUM_ENDPOINTS && n; i++) { |
731 | if (controller.ep[i].desc) { | 731 | if (controller.ep[i].desc) { |
732 | num = controller.ep[i].desc->bEndpointAddress | 732 | num = controller.ep[i].desc->bEndpointAddress |
733 | & USB_ENDPOINT_NUMBER_MASK; | 733 | & USB_ENDPOINT_NUMBER_MASK; |
734 | in = (controller.ep[i].desc->bEndpointAddress | 734 | in = (controller.ep[i].desc->bEndpointAddress |
735 | & USB_DIR_IN) != 0; | 735 | & USB_DIR_IN) != 0; |
736 | bit = (in) ? EPT_TX(num) : EPT_RX(num); | 736 | bit = (in) ? EPT_TX(num) : EPT_RX(num); |
737 | if (n & bit) | 737 | if (n & bit) |
738 | handle_ep_complete(&controller.ep[i]); | 738 | handle_ep_complete(&controller.ep[i]); |
739 | } | 739 | } |
740 | } | 740 | } |
741 | } | 741 | } |
742 | } | 742 | } |
743 | 743 | ||
744 | int usb_gadget_handle_interrupts(void) | 744 | int usb_gadget_handle_interrupts(void) |
745 | { | 745 | { |
746 | u32 value; | 746 | u32 value; |
747 | struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; | 747 | struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; |
748 | 748 | ||
749 | value = readl(&udc->usbsts); | 749 | value = readl(&udc->usbsts); |
750 | if (value) | 750 | if (value) |
751 | udc_irq(); | 751 | udc_irq(); |
752 | 752 | ||
753 | return value; | 753 | return value; |
754 | } | 754 | } |
755 | 755 | ||
756 | void udc_disconnect(void) | 756 | void udc_disconnect(void) |
757 | { | 757 | { |
758 | struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; | 758 | struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; |
759 | /* disable pullup */ | 759 | /* disable pullup */ |
760 | stop_activity(); | 760 | stop_activity(); |
761 | writel(USBCMD_FS2, &udc->usbcmd); | 761 | writel(USBCMD_FS2, &udc->usbcmd); |
762 | udelay(800); | 762 | udelay(800); |
763 | if (controller.driver) | 763 | if (controller.driver) |
764 | controller.driver->disconnect(&controller.gadget); | 764 | controller.driver->disconnect(&controller.gadget); |
765 | } | 765 | } |
766 | 766 | ||
767 | static int ci_pullup(struct usb_gadget *gadget, int is_on) | 767 | static int ci_pullup(struct usb_gadget *gadget, int is_on) |
768 | { | 768 | { |
769 | struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; | 769 | struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; |
770 | if (is_on) { | 770 | if (is_on) { |
771 | /* RESET */ | 771 | /* RESET */ |
772 | writel(USBCMD_ITC(MICRO_8FRAME) | USBCMD_RST, &udc->usbcmd); | 772 | writel(USBCMD_ITC(MICRO_8FRAME) | USBCMD_RST, &udc->usbcmd); |
773 | udelay(200); | 773 | udelay(200); |
774 | 774 | ||
775 | writel((unsigned)controller.epts, &udc->epinitaddr); | 775 | writel((unsigned)controller.epts, &udc->epinitaddr); |
776 | 776 | ||
777 | /* select DEVICE mode */ | 777 | /* select DEVICE mode */ |
778 | writel(USBMODE_DEVICE, &udc->usbmode); | 778 | writel(USBMODE_DEVICE, &udc->usbmode); |
779 | 779 | ||
780 | writel(0xffffffff, &udc->epflush); | 780 | writel(0xffffffff, &udc->epflush); |
781 | 781 | ||
782 | /* Turn on the USB connection by enabling the pullup resistor */ | 782 | /* Turn on the USB connection by enabling the pullup resistor */ |
783 | writel(USBCMD_ITC(MICRO_8FRAME) | USBCMD_RUN, &udc->usbcmd); | 783 | writel(USBCMD_ITC(MICRO_8FRAME) | USBCMD_RUN, &udc->usbcmd); |
784 | } else { | 784 | } else { |
785 | udc_disconnect(); | 785 | udc_disconnect(); |
786 | } | 786 | } |
787 | 787 | ||
788 | return 0; | 788 | return 0; |
789 | } | 789 | } |
790 | 790 | ||
791 | static int ci_udc_probe(void) | 791 | static int ci_udc_probe(void) |
792 | { | 792 | { |
793 | struct ept_queue_head *head; | 793 | struct ept_queue_head *head; |
794 | int i; | 794 | int i; |
795 | 795 | ||
796 | const int num = 2 * NUM_ENDPOINTS; | 796 | const int num = 2 * NUM_ENDPOINTS; |
797 | 797 | ||
798 | const int eplist_min_align = 4096; | 798 | const int eplist_min_align = 4096; |
799 | const int eplist_align = roundup(eplist_min_align, ARCH_DMA_MINALIGN); | 799 | const int eplist_align = roundup(eplist_min_align, ARCH_DMA_MINALIGN); |
800 | const int eplist_raw_sz = num * sizeof(struct ept_queue_head); | 800 | const int eplist_raw_sz = num * sizeof(struct ept_queue_head); |
801 | const int eplist_sz = roundup(eplist_raw_sz, ARCH_DMA_MINALIGN); | 801 | const int eplist_sz = roundup(eplist_raw_sz, ARCH_DMA_MINALIGN); |
802 | 802 | ||
803 | /* The QH list must be aligned to 4096 bytes. */ | 803 | /* The QH list must be aligned to 4096 bytes. */ |
804 | controller.epts = memalign(eplist_align, eplist_sz); | 804 | controller.epts = memalign(eplist_align, eplist_sz); |
805 | if (!controller.epts) | 805 | if (!controller.epts) |
806 | return -ENOMEM; | 806 | return -ENOMEM; |
807 | memset(controller.epts, 0, eplist_sz); | 807 | memset(controller.epts, 0, eplist_sz); |
808 | 808 | ||
809 | controller.items_mem = memalign(ILIST_ALIGN, ILIST_SZ); | 809 | controller.items_mem = memalign(ILIST_ALIGN, ILIST_SZ); |
810 | if (!controller.items_mem) { | 810 | if (!controller.items_mem) { |
811 | free(controller.epts); | 811 | free(controller.epts); |
812 | return -ENOMEM; | 812 | return -ENOMEM; |
813 | } | 813 | } |
814 | memset(controller.items_mem, 0, ILIST_SZ); | 814 | memset(controller.items_mem, 0, ILIST_SZ); |
815 | 815 | ||
816 | for (i = 0; i < 2 * NUM_ENDPOINTS; i++) { | 816 | for (i = 0; i < 2 * NUM_ENDPOINTS; i++) { |
817 | /* | 817 | /* |
818 | * Configure QH for each endpoint. The structure of the QH list | 818 | * Configure QH for each endpoint. The structure of the QH list |
819 | * is such that each two subsequent fields, N and N+1 where N is | 819 | * is such that each two subsequent fields, N and N+1 where N is |
820 | * even, in the QH list represent QH for one endpoint. The Nth | 820 | * even, in the QH list represent QH for one endpoint. The Nth |
821 | * entry represents OUT configuration and the N+1th entry does | 821 | * entry represents OUT configuration and the N+1th entry does |
822 | * represent IN configuration of the endpoint. | 822 | * represent IN configuration of the endpoint. |
823 | */ | 823 | */ |
824 | head = controller.epts + i; | 824 | head = controller.epts + i; |
825 | if (i < 2) | 825 | if (i < 2) |
826 | head->config = CONFIG_MAX_PKT(EP0_MAX_PACKET_SIZE) | 826 | head->config = CONFIG_MAX_PKT(EP0_MAX_PACKET_SIZE) |
827 | | CONFIG_ZLT | CONFIG_IOS; | 827 | | CONFIG_ZLT | CONFIG_IOS; |
828 | else | 828 | else |
829 | head->config = CONFIG_MAX_PKT(EP_MAX_PACKET_SIZE) | 829 | head->config = CONFIG_MAX_PKT(EP_MAX_PACKET_SIZE) |
830 | | CONFIG_ZLT; | 830 | | CONFIG_ZLT; |
831 | head->next = TERMINATE; | 831 | head->next = TERMINATE; |
832 | head->info = 0; | 832 | head->info = 0; |
833 | 833 | ||
834 | if (i & 1) { | 834 | if (i & 1) { |
835 | ci_flush_qh(i / 2); | 835 | ci_flush_qh(i / 2); |
836 | ci_flush_qtd(i / 2); | 836 | ci_flush_qtd(i / 2); |
837 | } | 837 | } |
838 | } | 838 | } |
839 | 839 | ||
840 | INIT_LIST_HEAD(&controller.gadget.ep_list); | 840 | INIT_LIST_HEAD(&controller.gadget.ep_list); |
841 | 841 | ||
842 | /* Init EP 0 */ | 842 | /* Init EP 0 */ |
843 | memcpy(&controller.ep[0].ep, &ci_ep_init[0], sizeof(*ci_ep_init)); | 843 | memcpy(&controller.ep[0].ep, &ci_ep_init[0], sizeof(*ci_ep_init)); |
844 | controller.ep[0].desc = &ep0_desc; | 844 | controller.ep[0].desc = &ep0_desc; |
845 | INIT_LIST_HEAD(&controller.ep[0].queue); | 845 | INIT_LIST_HEAD(&controller.ep[0].queue); |
846 | controller.ep[0].req_primed = false; | 846 | controller.ep[0].req_primed = false; |
847 | controller.gadget.ep0 = &controller.ep[0].ep; | 847 | controller.gadget.ep0 = &controller.ep[0].ep; |
848 | INIT_LIST_HEAD(&controller.gadget.ep0->ep_list); | 848 | INIT_LIST_HEAD(&controller.gadget.ep0->ep_list); |
849 | 849 | ||
850 | /* Init EP 1..n */ | 850 | /* Init EP 1..n */ |
851 | for (i = 1; i < NUM_ENDPOINTS; i++) { | 851 | for (i = 1; i < NUM_ENDPOINTS; i++) { |
852 | memcpy(&controller.ep[i].ep, &ci_ep_init[1], | 852 | memcpy(&controller.ep[i].ep, &ci_ep_init[1], |
853 | sizeof(*ci_ep_init)); | 853 | sizeof(*ci_ep_init)); |
854 | INIT_LIST_HEAD(&controller.ep[i].queue); | 854 | INIT_LIST_HEAD(&controller.ep[i].queue); |
855 | controller.ep[i].req_primed = false; | 855 | controller.ep[i].req_primed = false; |
856 | list_add_tail(&controller.ep[i].ep.ep_list, | 856 | list_add_tail(&controller.ep[i].ep.ep_list, |
857 | &controller.gadget.ep_list); | 857 | &controller.gadget.ep_list); |
858 | } | 858 | } |
859 | 859 | ||
860 | ci_ep_alloc_request(&controller.ep[0].ep, 0); | 860 | ci_ep_alloc_request(&controller.ep[0].ep, 0); |
861 | if (!controller.ep0_req) { | 861 | if (!controller.ep0_req) { |
862 | free(controller.items_mem); | 862 | free(controller.items_mem); |
863 | free(controller.epts); | 863 | free(controller.epts); |
864 | return -ENOMEM; | 864 | return -ENOMEM; |
865 | } | 865 | } |
866 | 866 | ||
867 | return 0; | 867 | return 0; |
868 | } | 868 | } |
869 | 869 | ||
870 | int usb_gadget_register_driver(struct usb_gadget_driver *driver) | 870 | int usb_gadget_register_driver(struct usb_gadget_driver *driver) |
871 | { | 871 | { |
872 | int ret; | 872 | int ret; |
873 | 873 | ||
874 | if (!driver) | 874 | if (!driver) |
875 | return -EINVAL; | 875 | return -EINVAL; |
876 | if (!driver->bind || !driver->setup || !driver->disconnect) | 876 | if (!driver->bind || !driver->setup || !driver->disconnect) |
877 | return -EINVAL; | 877 | return -EINVAL; |
878 | if (driver->speed != USB_SPEED_FULL && driver->speed != USB_SPEED_HIGH) | 878 | if (driver->speed != USB_SPEED_FULL && driver->speed != USB_SPEED_HIGH) |
879 | return -EINVAL; | 879 | return -EINVAL; |
880 | 880 | ||
881 | ret = usb_lowlevel_init(0, USB_INIT_DEVICE, (void **)&controller.ctrl); | 881 | ret = usb_lowlevel_init(0, USB_INIT_DEVICE, (void **)&controller.ctrl); |
882 | if (ret) | 882 | if (ret) |
883 | return ret; | 883 | return ret; |
884 | 884 | ||
885 | ret = ci_udc_probe(); | 885 | ret = ci_udc_probe(); |
886 | #if defined(CONFIG_USB_EHCI_MX6) || defined(CONFIG_USB_EHCI_MXS) | 886 | #if defined(CONFIG_USB_EHCI_MX6) || defined(CONFIG_USB_EHCI_MXS) |
887 | /* | 887 | /* |
888 | * FIXME: usb_lowlevel_init()->ehci_hcd_init() should be doing all | 888 | * FIXME: usb_lowlevel_init()->ehci_hcd_init() should be doing all |
889 | * HW-specific initialization, e.g. ULPI-vs-UTMI PHY selection | 889 | * HW-specific initialization, e.g. ULPI-vs-UTMI PHY selection |
890 | */ | 890 | */ |
891 | if (!ret) { | 891 | if (!ret) { |
892 | struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; | 892 | struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; |
893 | 893 | ||
894 | /* select ULPI phy */ | 894 | /* select ULPI phy */ |
895 | writel(PTS(PTS_ENABLE) | PFSC, &udc->portsc); | 895 | writel(PTS(PTS_ENABLE) | PFSC, &udc->portsc); |
896 | } | 896 | } |
897 | #endif | 897 | #endif |
898 | 898 | ||
899 | ret = driver->bind(&controller.gadget); | 899 | ret = driver->bind(&controller.gadget); |
900 | if (ret) { | 900 | if (ret) { |
901 | DBG("driver->bind() returned %d\n", ret); | 901 | DBG("driver->bind() returned %d\n", ret); |
902 | return ret; | 902 | return ret; |
903 | } | 903 | } |
904 | controller.driver = driver; | 904 | controller.driver = driver; |
905 | 905 | ||
906 | return 0; | 906 | return 0; |
907 | } | 907 | } |
908 | 908 | ||
909 | int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) | 909 | int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) |
910 | { | 910 | { |
911 | udc_disconnect(); | 911 | udc_disconnect(); |
912 | 912 | ||
913 | driver->unbind(&controller.gadget); | 913 | driver->unbind(&controller.gadget); |
914 | controller.driver = NULL; | 914 | controller.driver = NULL; |
915 | 915 | ||
916 | ci_ep_free_request(&controller.ep[0].ep, &controller.ep0_req->req); | 916 | ci_ep_free_request(&controller.ep[0].ep, &controller.ep0_req->req); |
917 | free(controller.items_mem); | 917 | free(controller.items_mem); |
918 | free(controller.epts); | 918 | free(controller.epts); |
919 | 919 | ||
920 | return 0; | 920 | return 0; |
921 | } | 921 | } |
922 | 922 |