Commit 48f892dc7370a23882239be06b4ec2ce60273e57
Exists in
v2017.01-smarct4x
and in
37 other branches
Merge git://git.denx.de/u-boot-usb
Showing 8 changed files Inline Diff
common/cmd_dfu.c
1 | /* | 1 | /* |
2 | * cmd_dfu.c -- dfu command | 2 | * cmd_dfu.c -- dfu command |
3 | * | 3 | * |
4 | * Copyright (C) 2012 Samsung Electronics | 4 | * Copyright (C) 2012 Samsung Electronics |
5 | * authors: Andrzej Pietrasiewicz <andrzej.p@samsung.com> | 5 | * authors: Andrzej Pietrasiewicz <andrzej.p@samsung.com> |
6 | * Lukasz Majewski <l.majewski@samsung.com> | 6 | * Lukasz Majewski <l.majewski@samsung.com> |
7 | * | 7 | * |
8 | * SPDX-License-Identifier: GPL-2.0+ | 8 | * SPDX-License-Identifier: GPL-2.0+ |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <common.h> | 11 | #include <common.h> |
12 | #include <dfu.h> | 12 | #include <dfu.h> |
13 | #include <g_dnl.h> | 13 | #include <g_dnl.h> |
14 | #include <usb.h> | 14 | #include <usb.h> |
15 | 15 | ||
16 | static int do_dfu(cmd_tbl_t *cmdtp, int flag, int argc, char * const argv[]) | 16 | static int do_dfu(cmd_tbl_t *cmdtp, int flag, int argc, char * const argv[]) |
17 | { | 17 | { |
18 | bool dfu_reset = false; | ||
19 | |||
18 | if (argc < 4) | 20 | if (argc < 4) |
19 | return CMD_RET_USAGE; | 21 | return CMD_RET_USAGE; |
20 | 22 | ||
21 | char *usb_controller = argv[1]; | 23 | char *usb_controller = argv[1]; |
22 | char *interface = argv[2]; | 24 | char *interface = argv[2]; |
23 | char *devstring = argv[3]; | 25 | char *devstring = argv[3]; |
24 | 26 | ||
25 | int ret, i = 0; | 27 | int ret, i = 0; |
26 | 28 | ||
27 | ret = dfu_init_env_entities(interface, devstring); | 29 | ret = dfu_init_env_entities(interface, devstring); |
28 | if (ret) | 30 | if (ret) |
29 | goto done; | 31 | goto done; |
30 | 32 | ||
31 | ret = CMD_RET_SUCCESS; | 33 | ret = CMD_RET_SUCCESS; |
32 | if (argc > 4 && strcmp(argv[4], "list") == 0) { | 34 | if (argc > 4 && strcmp(argv[4], "list") == 0) { |
33 | dfu_show_entities(); | 35 | dfu_show_entities(); |
34 | goto done; | 36 | goto done; |
35 | } | 37 | } |
36 | 38 | ||
37 | int controller_index = simple_strtoul(usb_controller, NULL, 0); | 39 | int controller_index = simple_strtoul(usb_controller, NULL, 0); |
38 | board_usb_init(controller_index, USB_INIT_DEVICE); | 40 | board_usb_init(controller_index, USB_INIT_DEVICE); |
39 | 41 | dfu_clear_detach(); | |
40 | g_dnl_register("usb_dnl_dfu"); | 42 | g_dnl_register("usb_dnl_dfu"); |
41 | while (1) { | 43 | while (1) { |
42 | if (dfu_reset()) | 44 | if (dfu_detach()) { |
43 | /* | 45 | /* |
46 | * Check if USB bus reset is performed after detach, | ||
47 | * which indicates that -R switch has been passed to | ||
48 | * dfu-util. In this case reboot the device | ||
49 | */ | ||
50 | if (dfu_usb_get_reset()) { | ||
51 | dfu_reset = true; | ||
52 | goto exit; | ||
53 | } | ||
54 | |||
55 | /* | ||
44 | * This extra number of usb_gadget_handle_interrupts() | 56 | * This extra number of usb_gadget_handle_interrupts() |
45 | * calls is necessary to assure correct transmission | 57 | * calls is necessary to assure correct transmission |
46 | * completion with dfu-util | 58 | * completion with dfu-util |
47 | */ | 59 | */ |
48 | if (++i == 10) | 60 | if (++i == 10000) |
49 | goto exit; | 61 | goto exit; |
62 | } | ||
50 | 63 | ||
51 | if (ctrlc()) | 64 | if (ctrlc()) |
52 | goto exit; | 65 | goto exit; |
53 | 66 | ||
54 | usb_gadget_handle_interrupts(); | 67 | usb_gadget_handle_interrupts(); |
55 | } | 68 | } |
56 | exit: | 69 | exit: |
57 | g_dnl_unregister(); | 70 | g_dnl_unregister(); |
58 | done: | 71 | done: |
59 | dfu_free_entities(); | 72 | dfu_free_entities(); |
60 | 73 | ||
61 | if (dfu_reset()) | 74 | if (dfu_reset) |
62 | run_command("reset", 0); | 75 | run_command("reset", 0); |
76 | |||
77 | dfu_clear_detach(); | ||
63 | 78 | ||
64 | return ret; | 79 | return ret; |
65 | } | 80 | } |
66 | 81 | ||
67 | U_BOOT_CMD(dfu, CONFIG_SYS_MAXARGS, 1, do_dfu, | 82 | U_BOOT_CMD(dfu, CONFIG_SYS_MAXARGS, 1, do_dfu, |
68 | "Device Firmware Upgrade", | 83 | "Device Firmware Upgrade", |
69 | "<USB_controller> <interface> <dev> [list]\n" | 84 | "<USB_controller> <interface> <dev> [list]\n" |
70 | " - device firmware upgrade via <USB_controller>\n" | 85 | " - device firmware upgrade via <USB_controller>\n" |
71 | " on device <dev>, attached to interface\n" | 86 | " on device <dev>, attached to interface\n" |
72 | " <interface>\n" | 87 | " <interface>\n" |
73 | " [list] - list available alt settings\n" | 88 | " [list] - list available alt settings\n" |
74 | ); | 89 | ); |
75 | 90 |
drivers/dfu/dfu.c
1 | /* | 1 | /* |
2 | * dfu.c -- DFU back-end routines | 2 | * dfu.c -- DFU back-end routines |
3 | * | 3 | * |
4 | * Copyright (C) 2012 Samsung Electronics | 4 | * Copyright (C) 2012 Samsung Electronics |
5 | * author: Lukasz Majewski <l.majewski@samsung.com> | 5 | * author: Lukasz Majewski <l.majewski@samsung.com> |
6 | * | 6 | * |
7 | * SPDX-License-Identifier: GPL-2.0+ | 7 | * SPDX-License-Identifier: GPL-2.0+ |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <common.h> | 10 | #include <common.h> |
11 | #include <errno.h> | 11 | #include <errno.h> |
12 | #include <malloc.h> | 12 | #include <malloc.h> |
13 | #include <mmc.h> | 13 | #include <mmc.h> |
14 | #include <fat.h> | 14 | #include <fat.h> |
15 | #include <dfu.h> | 15 | #include <dfu.h> |
16 | #include <hash.h> | 16 | #include <hash.h> |
17 | #include <linux/list.h> | 17 | #include <linux/list.h> |
18 | #include <linux/compiler.h> | 18 | #include <linux/compiler.h> |
19 | 19 | ||
20 | static bool dfu_reset_request; | 20 | static bool dfu_detach_request; |
21 | static LIST_HEAD(dfu_list); | 21 | static LIST_HEAD(dfu_list); |
22 | static int dfu_alt_num; | 22 | static int dfu_alt_num; |
23 | static int alt_num_cnt; | 23 | static int alt_num_cnt; |
24 | static struct hash_algo *dfu_hash_algo; | 24 | static struct hash_algo *dfu_hash_algo; |
25 | 25 | ||
26 | bool dfu_reset(void) | 26 | /* |
27 | * The purpose of the dfu_usb_get_reset() function is to | ||
28 | * provide information if after USB_DETACH request | ||
29 | * being sent the dfu-util performed reset of USB | ||
30 | * bus. | ||
31 | * | ||
32 | * Described behaviour is the only way to distinct if | ||
33 | * user has typed -e (detach) or -R (reset) when invoking | ||
34 | * dfu-util command. | ||
35 | * | ||
36 | */ | ||
37 | __weak bool dfu_usb_get_reset(void) | ||
27 | { | 38 | { |
28 | return dfu_reset_request; | 39 | return true; |
29 | } | 40 | } |
30 | 41 | ||
31 | void dfu_trigger_reset() | 42 | bool dfu_detach(void) |
32 | { | 43 | { |
33 | dfu_reset_request = true; | 44 | return dfu_detach_request; |
45 | } | ||
46 | |||
47 | void dfu_trigger_detach(void) | ||
48 | { | ||
49 | dfu_detach_request = true; | ||
50 | } | ||
51 | |||
52 | void dfu_clear_detach(void) | ||
53 | { | ||
54 | dfu_detach_request = false; | ||
34 | } | 55 | } |
35 | 56 | ||
36 | static int dfu_find_alt_num(const char *s) | 57 | static int dfu_find_alt_num(const char *s) |
37 | { | 58 | { |
38 | int i = 0; | 59 | int i = 0; |
39 | 60 | ||
40 | for (; *s; s++) | 61 | for (; *s; s++) |
41 | if (*s == ';') | 62 | if (*s == ';') |
42 | i++; | 63 | i++; |
43 | 64 | ||
44 | return ++i; | 65 | return ++i; |
45 | } | 66 | } |
46 | 67 | ||
47 | int dfu_init_env_entities(char *interface, char *devstr) | 68 | int dfu_init_env_entities(char *interface, char *devstr) |
48 | { | 69 | { |
49 | const char *str_env; | 70 | const char *str_env; |
50 | char *env_bkp; | 71 | char *env_bkp; |
51 | int ret; | 72 | int ret; |
52 | 73 | ||
53 | str_env = getenv("dfu_alt_info"); | 74 | str_env = getenv("dfu_alt_info"); |
54 | if (!str_env) { | 75 | if (!str_env) { |
55 | error("\"dfu_alt_info\" env variable not defined!\n"); | 76 | error("\"dfu_alt_info\" env variable not defined!\n"); |
56 | return -EINVAL; | 77 | return -EINVAL; |
57 | } | 78 | } |
58 | 79 | ||
59 | env_bkp = strdup(str_env); | 80 | env_bkp = strdup(str_env); |
60 | ret = dfu_config_entities(env_bkp, interface, devstr); | 81 | ret = dfu_config_entities(env_bkp, interface, devstr); |
61 | if (ret) { | 82 | if (ret) { |
62 | error("DFU entities configuration failed!\n"); | 83 | error("DFU entities configuration failed!\n"); |
63 | return ret; | 84 | return ret; |
64 | } | 85 | } |
65 | 86 | ||
66 | free(env_bkp); | 87 | free(env_bkp); |
67 | return 0; | 88 | return 0; |
68 | } | 89 | } |
69 | 90 | ||
70 | static unsigned char *dfu_buf; | 91 | static unsigned char *dfu_buf; |
71 | static unsigned long dfu_buf_size = CONFIG_SYS_DFU_DATA_BUF_SIZE; | 92 | static unsigned long dfu_buf_size = CONFIG_SYS_DFU_DATA_BUF_SIZE; |
72 | 93 | ||
73 | unsigned char *dfu_free_buf(void) | 94 | unsigned char *dfu_free_buf(void) |
74 | { | 95 | { |
75 | free(dfu_buf); | 96 | free(dfu_buf); |
76 | dfu_buf = NULL; | 97 | dfu_buf = NULL; |
77 | return dfu_buf; | 98 | return dfu_buf; |
78 | } | 99 | } |
79 | 100 | ||
80 | unsigned long dfu_get_buf_size(void) | 101 | unsigned long dfu_get_buf_size(void) |
81 | { | 102 | { |
82 | return dfu_buf_size; | 103 | return dfu_buf_size; |
83 | } | 104 | } |
84 | 105 | ||
85 | unsigned char *dfu_get_buf(struct dfu_entity *dfu) | 106 | unsigned char *dfu_get_buf(struct dfu_entity *dfu) |
86 | { | 107 | { |
87 | char *s; | 108 | char *s; |
88 | 109 | ||
89 | if (dfu_buf != NULL) | 110 | if (dfu_buf != NULL) |
90 | return dfu_buf; | 111 | return dfu_buf; |
91 | 112 | ||
92 | s = getenv("dfu_bufsiz"); | 113 | s = getenv("dfu_bufsiz"); |
93 | dfu_buf_size = s ? (unsigned long)simple_strtol(s, NULL, 16) : | 114 | dfu_buf_size = s ? (unsigned long)simple_strtol(s, NULL, 16) : |
94 | CONFIG_SYS_DFU_DATA_BUF_SIZE; | 115 | CONFIG_SYS_DFU_DATA_BUF_SIZE; |
95 | if (dfu->max_buf_size && dfu_buf_size > dfu->max_buf_size) | 116 | if (dfu->max_buf_size && dfu_buf_size > dfu->max_buf_size) |
96 | dfu_buf_size = dfu->max_buf_size; | 117 | dfu_buf_size = dfu->max_buf_size; |
97 | 118 | ||
98 | dfu_buf = memalign(CONFIG_SYS_CACHELINE_SIZE, dfu_buf_size); | 119 | dfu_buf = memalign(CONFIG_SYS_CACHELINE_SIZE, dfu_buf_size); |
99 | if (dfu_buf == NULL) | 120 | if (dfu_buf == NULL) |
100 | printf("%s: Could not memalign 0x%lx bytes\n", | 121 | printf("%s: Could not memalign 0x%lx bytes\n", |
101 | __func__, dfu_buf_size); | 122 | __func__, dfu_buf_size); |
102 | 123 | ||
103 | return dfu_buf; | 124 | return dfu_buf; |
104 | } | 125 | } |
105 | 126 | ||
106 | static char *dfu_get_hash_algo(void) | 127 | static char *dfu_get_hash_algo(void) |
107 | { | 128 | { |
108 | char *s; | 129 | char *s; |
109 | 130 | ||
110 | s = getenv("dfu_hash_algo"); | 131 | s = getenv("dfu_hash_algo"); |
111 | if (!s) | 132 | if (!s) |
112 | return NULL; | 133 | return NULL; |
113 | 134 | ||
114 | if (!strcmp(s, "crc32")) { | 135 | if (!strcmp(s, "crc32")) { |
115 | debug("%s: DFU hash method: %s\n", __func__, s); | 136 | debug("%s: DFU hash method: %s\n", __func__, s); |
116 | return s; | 137 | return s; |
117 | } | 138 | } |
118 | 139 | ||
119 | error("DFU hash method: %s not supported!\n", s); | 140 | error("DFU hash method: %s not supported!\n", s); |
120 | return NULL; | 141 | return NULL; |
121 | } | 142 | } |
122 | 143 | ||
123 | static int dfu_write_buffer_drain(struct dfu_entity *dfu) | 144 | static int dfu_write_buffer_drain(struct dfu_entity *dfu) |
124 | { | 145 | { |
125 | long w_size; | 146 | long w_size; |
126 | int ret; | 147 | int ret; |
127 | 148 | ||
128 | /* flush size? */ | 149 | /* flush size? */ |
129 | w_size = dfu->i_buf - dfu->i_buf_start; | 150 | w_size = dfu->i_buf - dfu->i_buf_start; |
130 | if (w_size == 0) | 151 | if (w_size == 0) |
131 | return 0; | 152 | return 0; |
132 | 153 | ||
133 | if (dfu_hash_algo) | 154 | if (dfu_hash_algo) |
134 | dfu_hash_algo->hash_update(dfu_hash_algo, &dfu->crc, | 155 | dfu_hash_algo->hash_update(dfu_hash_algo, &dfu->crc, |
135 | dfu->i_buf_start, w_size, 0); | 156 | dfu->i_buf_start, w_size, 0); |
136 | 157 | ||
137 | ret = dfu->write_medium(dfu, dfu->offset, dfu->i_buf_start, &w_size); | 158 | ret = dfu->write_medium(dfu, dfu->offset, dfu->i_buf_start, &w_size); |
138 | if (ret) | 159 | if (ret) |
139 | debug("%s: Write error!\n", __func__); | 160 | debug("%s: Write error!\n", __func__); |
140 | 161 | ||
141 | /* point back */ | 162 | /* point back */ |
142 | dfu->i_buf = dfu->i_buf_start; | 163 | dfu->i_buf = dfu->i_buf_start; |
143 | 164 | ||
144 | /* update offset */ | 165 | /* update offset */ |
145 | dfu->offset += w_size; | 166 | dfu->offset += w_size; |
146 | 167 | ||
147 | puts("#"); | 168 | puts("#"); |
148 | 169 | ||
149 | return ret; | 170 | return ret; |
150 | } | 171 | } |
151 | 172 | ||
152 | void dfu_write_transaction_cleanup(struct dfu_entity *dfu) | 173 | void dfu_write_transaction_cleanup(struct dfu_entity *dfu) |
153 | { | 174 | { |
154 | /* clear everything */ | 175 | /* clear everything */ |
155 | dfu_free_buf(); | 176 | dfu_free_buf(); |
156 | dfu->crc = 0; | 177 | dfu->crc = 0; |
157 | dfu->offset = 0; | 178 | dfu->offset = 0; |
158 | dfu->i_blk_seq_num = 0; | 179 | dfu->i_blk_seq_num = 0; |
159 | dfu->i_buf_start = dfu_buf; | 180 | dfu->i_buf_start = dfu_buf; |
160 | dfu->i_buf_end = dfu_buf; | 181 | dfu->i_buf_end = dfu_buf; |
161 | dfu->i_buf = dfu->i_buf_start; | 182 | dfu->i_buf = dfu->i_buf_start; |
162 | dfu->inited = 0; | 183 | dfu->inited = 0; |
163 | } | 184 | } |
164 | 185 | ||
165 | int dfu_flush(struct dfu_entity *dfu, void *buf, int size, int blk_seq_num) | 186 | int dfu_flush(struct dfu_entity *dfu, void *buf, int size, int blk_seq_num) |
166 | { | 187 | { |
167 | int ret = 0; | 188 | int ret = 0; |
168 | 189 | ||
169 | ret = dfu_write_buffer_drain(dfu); | 190 | ret = dfu_write_buffer_drain(dfu); |
170 | if (ret) | 191 | if (ret) |
171 | return ret; | 192 | return ret; |
172 | 193 | ||
173 | if (dfu->flush_medium) | 194 | if (dfu->flush_medium) |
174 | ret = dfu->flush_medium(dfu); | 195 | ret = dfu->flush_medium(dfu); |
175 | 196 | ||
176 | if (dfu_hash_algo) | 197 | if (dfu_hash_algo) |
177 | printf("\nDFU complete %s: 0x%08x\n", dfu_hash_algo->name, | 198 | printf("\nDFU complete %s: 0x%08x\n", dfu_hash_algo->name, |
178 | dfu->crc); | 199 | dfu->crc); |
179 | 200 | ||
180 | dfu_write_transaction_cleanup(dfu); | 201 | dfu_write_transaction_cleanup(dfu); |
181 | 202 | ||
182 | return ret; | 203 | return ret; |
183 | } | 204 | } |
184 | 205 | ||
185 | int dfu_write(struct dfu_entity *dfu, void *buf, int size, int blk_seq_num) | 206 | int dfu_write(struct dfu_entity *dfu, void *buf, int size, int blk_seq_num) |
186 | { | 207 | { |
187 | int ret; | 208 | int ret; |
188 | 209 | ||
189 | debug("%s: name: %s buf: 0x%p size: 0x%x p_num: 0x%x offset: 0x%llx bufoffset: 0x%x\n", | 210 | debug("%s: name: %s buf: 0x%p size: 0x%x p_num: 0x%x offset: 0x%llx bufoffset: 0x%x\n", |
190 | __func__, dfu->name, buf, size, blk_seq_num, dfu->offset, | 211 | __func__, dfu->name, buf, size, blk_seq_num, dfu->offset, |
191 | dfu->i_buf - dfu->i_buf_start); | 212 | dfu->i_buf - dfu->i_buf_start); |
192 | 213 | ||
193 | if (!dfu->inited) { | 214 | if (!dfu->inited) { |
194 | /* initial state */ | 215 | /* initial state */ |
195 | dfu->crc = 0; | 216 | dfu->crc = 0; |
196 | dfu->offset = 0; | 217 | dfu->offset = 0; |
197 | dfu->bad_skip = 0; | 218 | dfu->bad_skip = 0; |
198 | dfu->i_blk_seq_num = 0; | 219 | dfu->i_blk_seq_num = 0; |
199 | dfu->i_buf_start = dfu_get_buf(dfu); | 220 | dfu->i_buf_start = dfu_get_buf(dfu); |
200 | if (dfu->i_buf_start == NULL) | 221 | if (dfu->i_buf_start == NULL) |
201 | return -ENOMEM; | 222 | return -ENOMEM; |
202 | dfu->i_buf_end = dfu_get_buf(dfu) + dfu_buf_size; | 223 | dfu->i_buf_end = dfu_get_buf(dfu) + dfu_buf_size; |
203 | dfu->i_buf = dfu->i_buf_start; | 224 | dfu->i_buf = dfu->i_buf_start; |
204 | 225 | ||
205 | dfu->inited = 1; | 226 | dfu->inited = 1; |
206 | } | 227 | } |
207 | 228 | ||
208 | if (dfu->i_blk_seq_num != blk_seq_num) { | 229 | if (dfu->i_blk_seq_num != blk_seq_num) { |
209 | printf("%s: Wrong sequence number! [%d] [%d]\n", | 230 | printf("%s: Wrong sequence number! [%d] [%d]\n", |
210 | __func__, dfu->i_blk_seq_num, blk_seq_num); | 231 | __func__, dfu->i_blk_seq_num, blk_seq_num); |
211 | dfu_write_transaction_cleanup(dfu); | 232 | dfu_write_transaction_cleanup(dfu); |
212 | return -1; | 233 | return -1; |
213 | } | 234 | } |
214 | 235 | ||
215 | /* DFU 1.1 standard says: | 236 | /* DFU 1.1 standard says: |
216 | * The wBlockNum field is a block sequence number. It increments each | 237 | * The wBlockNum field is a block sequence number. It increments each |
217 | * time a block is transferred, wrapping to zero from 65,535. It is used | 238 | * time a block is transferred, wrapping to zero from 65,535. It is used |
218 | * to provide useful context to the DFU loader in the device." | 239 | * to provide useful context to the DFU loader in the device." |
219 | * | 240 | * |
220 | * This means that it's a 16 bit counter that roll-overs at | 241 | * This means that it's a 16 bit counter that roll-overs at |
221 | * 0xffff -> 0x0000. By having a typical 4K transfer block | 242 | * 0xffff -> 0x0000. By having a typical 4K transfer block |
222 | * we roll-over at exactly 256MB. Not very fun to debug. | 243 | * we roll-over at exactly 256MB. Not very fun to debug. |
223 | * | 244 | * |
224 | * Handling rollover, and having an inited variable, | 245 | * Handling rollover, and having an inited variable, |
225 | * makes things work. | 246 | * makes things work. |
226 | */ | 247 | */ |
227 | 248 | ||
228 | /* handle rollover */ | 249 | /* handle rollover */ |
229 | dfu->i_blk_seq_num = (dfu->i_blk_seq_num + 1) & 0xffff; | 250 | dfu->i_blk_seq_num = (dfu->i_blk_seq_num + 1) & 0xffff; |
230 | 251 | ||
231 | /* flush buffer if overflow */ | 252 | /* flush buffer if overflow */ |
232 | if ((dfu->i_buf + size) > dfu->i_buf_end) { | 253 | if ((dfu->i_buf + size) > dfu->i_buf_end) { |
233 | ret = dfu_write_buffer_drain(dfu); | 254 | ret = dfu_write_buffer_drain(dfu); |
234 | if (ret) { | 255 | if (ret) { |
235 | dfu_write_transaction_cleanup(dfu); | 256 | dfu_write_transaction_cleanup(dfu); |
236 | return ret; | 257 | return ret; |
237 | } | 258 | } |
238 | } | 259 | } |
239 | 260 | ||
240 | /* we should be in buffer now (if not then size too large) */ | 261 | /* we should be in buffer now (if not then size too large) */ |
241 | if ((dfu->i_buf + size) > dfu->i_buf_end) { | 262 | if ((dfu->i_buf + size) > dfu->i_buf_end) { |
242 | error("Buffer overflow! (0x%p + 0x%x > 0x%p)\n", dfu->i_buf, | 263 | error("Buffer overflow! (0x%p + 0x%x > 0x%p)\n", dfu->i_buf, |
243 | size, dfu->i_buf_end); | 264 | size, dfu->i_buf_end); |
244 | dfu_write_transaction_cleanup(dfu); | 265 | dfu_write_transaction_cleanup(dfu); |
245 | return -1; | 266 | return -1; |
246 | } | 267 | } |
247 | 268 | ||
248 | memcpy(dfu->i_buf, buf, size); | 269 | memcpy(dfu->i_buf, buf, size); |
249 | dfu->i_buf += size; | 270 | dfu->i_buf += size; |
250 | 271 | ||
251 | /* if end or if buffer full flush */ | 272 | /* if end or if buffer full flush */ |
252 | if (size == 0 || (dfu->i_buf + size) > dfu->i_buf_end) { | 273 | if (size == 0 || (dfu->i_buf + size) > dfu->i_buf_end) { |
253 | ret = dfu_write_buffer_drain(dfu); | 274 | ret = dfu_write_buffer_drain(dfu); |
254 | if (ret) { | 275 | if (ret) { |
255 | dfu_write_transaction_cleanup(dfu); | 276 | dfu_write_transaction_cleanup(dfu); |
256 | return ret; | 277 | return ret; |
257 | } | 278 | } |
258 | } | 279 | } |
259 | 280 | ||
260 | return 0; | 281 | return 0; |
261 | } | 282 | } |
262 | 283 | ||
263 | static int dfu_read_buffer_fill(struct dfu_entity *dfu, void *buf, int size) | 284 | static int dfu_read_buffer_fill(struct dfu_entity *dfu, void *buf, int size) |
264 | { | 285 | { |
265 | long chunk; | 286 | long chunk; |
266 | int ret, readn; | 287 | int ret, readn; |
267 | 288 | ||
268 | readn = 0; | 289 | readn = 0; |
269 | while (size > 0) { | 290 | while (size > 0) { |
270 | /* get chunk that can be read */ | 291 | /* get chunk that can be read */ |
271 | chunk = min(size, dfu->b_left); | 292 | chunk = min(size, dfu->b_left); |
272 | /* consume */ | 293 | /* consume */ |
273 | if (chunk > 0) { | 294 | if (chunk > 0) { |
274 | memcpy(buf, dfu->i_buf, chunk); | 295 | memcpy(buf, dfu->i_buf, chunk); |
275 | if (dfu_hash_algo) | 296 | if (dfu_hash_algo) |
276 | dfu_hash_algo->hash_update(dfu_hash_algo, | 297 | dfu_hash_algo->hash_update(dfu_hash_algo, |
277 | &dfu->crc, buf, | 298 | &dfu->crc, buf, |
278 | chunk, 0); | 299 | chunk, 0); |
279 | 300 | ||
280 | dfu->i_buf += chunk; | 301 | dfu->i_buf += chunk; |
281 | dfu->b_left -= chunk; | 302 | dfu->b_left -= chunk; |
282 | size -= chunk; | 303 | size -= chunk; |
283 | buf += chunk; | 304 | buf += chunk; |
284 | readn += chunk; | 305 | readn += chunk; |
285 | } | 306 | } |
286 | 307 | ||
287 | /* all done */ | 308 | /* all done */ |
288 | if (size > 0) { | 309 | if (size > 0) { |
289 | /* no more to read */ | 310 | /* no more to read */ |
290 | if (dfu->r_left == 0) | 311 | if (dfu->r_left == 0) |
291 | break; | 312 | break; |
292 | 313 | ||
293 | dfu->i_buf = dfu->i_buf_start; | 314 | dfu->i_buf = dfu->i_buf_start; |
294 | dfu->b_left = dfu->i_buf_end - dfu->i_buf_start; | 315 | dfu->b_left = dfu->i_buf_end - dfu->i_buf_start; |
295 | 316 | ||
296 | /* got to read, but buffer is empty */ | 317 | /* got to read, but buffer is empty */ |
297 | if (dfu->b_left > dfu->r_left) | 318 | if (dfu->b_left > dfu->r_left) |
298 | dfu->b_left = dfu->r_left; | 319 | dfu->b_left = dfu->r_left; |
299 | ret = dfu->read_medium(dfu, dfu->offset, dfu->i_buf, | 320 | ret = dfu->read_medium(dfu, dfu->offset, dfu->i_buf, |
300 | &dfu->b_left); | 321 | &dfu->b_left); |
301 | if (ret != 0) { | 322 | if (ret != 0) { |
302 | debug("%s: Read error!\n", __func__); | 323 | debug("%s: Read error!\n", __func__); |
303 | return ret; | 324 | return ret; |
304 | } | 325 | } |
305 | dfu->offset += dfu->b_left; | 326 | dfu->offset += dfu->b_left; |
306 | dfu->r_left -= dfu->b_left; | 327 | dfu->r_left -= dfu->b_left; |
307 | 328 | ||
308 | puts("#"); | 329 | puts("#"); |
309 | } | 330 | } |
310 | } | 331 | } |
311 | 332 | ||
312 | return readn; | 333 | return readn; |
313 | } | 334 | } |
314 | 335 | ||
315 | int dfu_read(struct dfu_entity *dfu, void *buf, int size, int blk_seq_num) | 336 | int dfu_read(struct dfu_entity *dfu, void *buf, int size, int blk_seq_num) |
316 | { | 337 | { |
317 | int ret = 0; | 338 | int ret = 0; |
318 | 339 | ||
319 | debug("%s: name: %s buf: 0x%p size: 0x%x p_num: 0x%x i_buf: 0x%p\n", | 340 | debug("%s: name: %s buf: 0x%p size: 0x%x p_num: 0x%x i_buf: 0x%p\n", |
320 | __func__, dfu->name, buf, size, blk_seq_num, dfu->i_buf); | 341 | __func__, dfu->name, buf, size, blk_seq_num, dfu->i_buf); |
321 | 342 | ||
322 | if (!dfu->inited) { | 343 | if (!dfu->inited) { |
323 | dfu->i_buf_start = dfu_get_buf(dfu); | 344 | dfu->i_buf_start = dfu_get_buf(dfu); |
324 | if (dfu->i_buf_start == NULL) | 345 | if (dfu->i_buf_start == NULL) |
325 | return -ENOMEM; | 346 | return -ENOMEM; |
326 | 347 | ||
327 | dfu->r_left = dfu->get_medium_size(dfu); | 348 | dfu->r_left = dfu->get_medium_size(dfu); |
328 | if (dfu->r_left < 0) | 349 | if (dfu->r_left < 0) |
329 | return dfu->r_left; | 350 | return dfu->r_left; |
330 | switch (dfu->layout) { | 351 | switch (dfu->layout) { |
331 | case DFU_RAW_ADDR: | 352 | case DFU_RAW_ADDR: |
332 | case DFU_RAM_ADDR: | 353 | case DFU_RAM_ADDR: |
333 | break; | 354 | break; |
334 | default: | 355 | default: |
335 | if (dfu->r_left > dfu_buf_size) { | 356 | if (dfu->r_left > dfu_buf_size) { |
336 | printf("%s: File too big for buffer\n", | 357 | printf("%s: File too big for buffer\n", |
337 | __func__); | 358 | __func__); |
338 | return -EOVERFLOW; | 359 | return -EOVERFLOW; |
339 | } | 360 | } |
340 | } | 361 | } |
341 | 362 | ||
342 | debug("%s: %s %ld [B]\n", __func__, dfu->name, dfu->r_left); | 363 | debug("%s: %s %ld [B]\n", __func__, dfu->name, dfu->r_left); |
343 | 364 | ||
344 | dfu->i_blk_seq_num = 0; | 365 | dfu->i_blk_seq_num = 0; |
345 | dfu->crc = 0; | 366 | dfu->crc = 0; |
346 | dfu->offset = 0; | 367 | dfu->offset = 0; |
347 | dfu->i_buf_end = dfu_get_buf(dfu) + dfu_buf_size; | 368 | dfu->i_buf_end = dfu_get_buf(dfu) + dfu_buf_size; |
348 | dfu->i_buf = dfu->i_buf_start; | 369 | dfu->i_buf = dfu->i_buf_start; |
349 | dfu->b_left = 0; | 370 | dfu->b_left = 0; |
350 | 371 | ||
351 | dfu->bad_skip = 0; | 372 | dfu->bad_skip = 0; |
352 | 373 | ||
353 | dfu->inited = 1; | 374 | dfu->inited = 1; |
354 | } | 375 | } |
355 | 376 | ||
356 | if (dfu->i_blk_seq_num != blk_seq_num) { | 377 | if (dfu->i_blk_seq_num != blk_seq_num) { |
357 | printf("%s: Wrong sequence number! [%d] [%d]\n", | 378 | printf("%s: Wrong sequence number! [%d] [%d]\n", |
358 | __func__, dfu->i_blk_seq_num, blk_seq_num); | 379 | __func__, dfu->i_blk_seq_num, blk_seq_num); |
359 | return -1; | 380 | return -1; |
360 | } | 381 | } |
361 | /* handle rollover */ | 382 | /* handle rollover */ |
362 | dfu->i_blk_seq_num = (dfu->i_blk_seq_num + 1) & 0xffff; | 383 | dfu->i_blk_seq_num = (dfu->i_blk_seq_num + 1) & 0xffff; |
363 | 384 | ||
364 | ret = dfu_read_buffer_fill(dfu, buf, size); | 385 | ret = dfu_read_buffer_fill(dfu, buf, size); |
365 | if (ret < 0) { | 386 | if (ret < 0) { |
366 | printf("%s: Failed to fill buffer\n", __func__); | 387 | printf("%s: Failed to fill buffer\n", __func__); |
367 | return -1; | 388 | return -1; |
368 | } | 389 | } |
369 | 390 | ||
370 | if (ret < size) { | 391 | if (ret < size) { |
371 | if (dfu_hash_algo) | 392 | if (dfu_hash_algo) |
372 | debug("%s: %s %s: 0x%x\n", __func__, dfu->name, | 393 | debug("%s: %s %s: 0x%x\n", __func__, dfu->name, |
373 | dfu_hash_algo->name, dfu->crc); | 394 | dfu_hash_algo->name, dfu->crc); |
374 | puts("\nUPLOAD ... done\nCtrl+C to exit ...\n"); | 395 | puts("\nUPLOAD ... done\nCtrl+C to exit ...\n"); |
375 | 396 | ||
376 | dfu_free_buf(); | 397 | dfu_free_buf(); |
377 | dfu->i_blk_seq_num = 0; | 398 | dfu->i_blk_seq_num = 0; |
378 | dfu->crc = 0; | 399 | dfu->crc = 0; |
379 | dfu->offset = 0; | 400 | dfu->offset = 0; |
380 | dfu->i_buf_start = dfu_buf; | 401 | dfu->i_buf_start = dfu_buf; |
381 | dfu->i_buf_end = dfu_buf; | 402 | dfu->i_buf_end = dfu_buf; |
382 | dfu->i_buf = dfu->i_buf_start; | 403 | dfu->i_buf = dfu->i_buf_start; |
383 | dfu->b_left = 0; | 404 | dfu->b_left = 0; |
384 | 405 | ||
385 | dfu->bad_skip = 0; | 406 | dfu->bad_skip = 0; |
386 | 407 | ||
387 | dfu->inited = 0; | 408 | dfu->inited = 0; |
388 | } | 409 | } |
389 | 410 | ||
390 | return ret; | 411 | return ret; |
391 | } | 412 | } |
392 | 413 | ||
393 | static int dfu_fill_entity(struct dfu_entity *dfu, char *s, int alt, | 414 | static int dfu_fill_entity(struct dfu_entity *dfu, char *s, int alt, |
394 | char *interface, char *devstr) | 415 | char *interface, char *devstr) |
395 | { | 416 | { |
396 | char *st; | 417 | char *st; |
397 | 418 | ||
398 | debug("%s: %s interface: %s dev: %s\n", __func__, s, interface, devstr); | 419 | debug("%s: %s interface: %s dev: %s\n", __func__, s, interface, devstr); |
399 | st = strsep(&s, " "); | 420 | st = strsep(&s, " "); |
400 | strcpy(dfu->name, st); | 421 | strcpy(dfu->name, st); |
401 | 422 | ||
402 | dfu->alt = alt; | 423 | dfu->alt = alt; |
403 | dfu->max_buf_size = 0; | 424 | dfu->max_buf_size = 0; |
404 | dfu->free_entity = NULL; | 425 | dfu->free_entity = NULL; |
405 | 426 | ||
406 | /* Specific for mmc device */ | 427 | /* Specific for mmc device */ |
407 | if (strcmp(interface, "mmc") == 0) { | 428 | if (strcmp(interface, "mmc") == 0) { |
408 | if (dfu_fill_entity_mmc(dfu, devstr, s)) | 429 | if (dfu_fill_entity_mmc(dfu, devstr, s)) |
409 | return -1; | 430 | return -1; |
410 | } else if (strcmp(interface, "nand") == 0) { | 431 | } else if (strcmp(interface, "nand") == 0) { |
411 | if (dfu_fill_entity_nand(dfu, devstr, s)) | 432 | if (dfu_fill_entity_nand(dfu, devstr, s)) |
412 | return -1; | 433 | return -1; |
413 | } else if (strcmp(interface, "ram") == 0) { | 434 | } else if (strcmp(interface, "ram") == 0) { |
414 | if (dfu_fill_entity_ram(dfu, devstr, s)) | 435 | if (dfu_fill_entity_ram(dfu, devstr, s)) |
415 | return -1; | 436 | return -1; |
416 | } else if (strcmp(interface, "sf") == 0) { | 437 | } else if (strcmp(interface, "sf") == 0) { |
417 | if (dfu_fill_entity_sf(dfu, devstr, s)) | 438 | if (dfu_fill_entity_sf(dfu, devstr, s)) |
418 | return -1; | 439 | return -1; |
419 | } else { | 440 | } else { |
420 | printf("%s: Device %s not (yet) supported!\n", | 441 | printf("%s: Device %s not (yet) supported!\n", |
421 | __func__, interface); | 442 | __func__, interface); |
422 | return -1; | 443 | return -1; |
423 | } | 444 | } |
424 | 445 | ||
425 | return 0; | 446 | return 0; |
426 | } | 447 | } |
427 | 448 | ||
428 | void dfu_free_entities(void) | 449 | void dfu_free_entities(void) |
429 | { | 450 | { |
430 | struct dfu_entity *dfu, *p, *t = NULL; | 451 | struct dfu_entity *dfu, *p, *t = NULL; |
431 | 452 | ||
432 | list_for_each_entry_safe_reverse(dfu, p, &dfu_list, list) { | 453 | list_for_each_entry_safe_reverse(dfu, p, &dfu_list, list) { |
433 | list_del(&dfu->list); | 454 | list_del(&dfu->list); |
434 | if (dfu->free_entity) | 455 | if (dfu->free_entity) |
435 | dfu->free_entity(dfu); | 456 | dfu->free_entity(dfu); |
436 | t = dfu; | 457 | t = dfu; |
437 | } | 458 | } |
438 | if (t) | 459 | if (t) |
439 | free(t); | 460 | free(t); |
440 | INIT_LIST_HEAD(&dfu_list); | 461 | INIT_LIST_HEAD(&dfu_list); |
441 | 462 | ||
442 | alt_num_cnt = 0; | 463 | alt_num_cnt = 0; |
443 | } | 464 | } |
444 | 465 | ||
445 | int dfu_config_entities(char *env, char *interface, char *devstr) | 466 | int dfu_config_entities(char *env, char *interface, char *devstr) |
446 | { | 467 | { |
447 | struct dfu_entity *dfu; | 468 | struct dfu_entity *dfu; |
448 | int i, ret; | 469 | int i, ret; |
449 | char *s; | 470 | char *s; |
450 | 471 | ||
451 | dfu_alt_num = dfu_find_alt_num(env); | 472 | dfu_alt_num = dfu_find_alt_num(env); |
452 | debug("%s: dfu_alt_num=%d\n", __func__, dfu_alt_num); | 473 | debug("%s: dfu_alt_num=%d\n", __func__, dfu_alt_num); |
453 | 474 | ||
454 | dfu_hash_algo = NULL; | 475 | dfu_hash_algo = NULL; |
455 | s = dfu_get_hash_algo(); | 476 | s = dfu_get_hash_algo(); |
456 | if (s) { | 477 | if (s) { |
457 | ret = hash_lookup_algo(s, &dfu_hash_algo); | 478 | ret = hash_lookup_algo(s, &dfu_hash_algo); |
458 | if (ret) | 479 | if (ret) |
459 | error("Hash algorithm %s not supported\n", s); | 480 | error("Hash algorithm %s not supported\n", s); |
460 | } | 481 | } |
461 | 482 | ||
462 | dfu = calloc(sizeof(*dfu), dfu_alt_num); | 483 | dfu = calloc(sizeof(*dfu), dfu_alt_num); |
463 | if (!dfu) | 484 | if (!dfu) |
464 | return -1; | 485 | return -1; |
465 | for (i = 0; i < dfu_alt_num; i++) { | 486 | for (i = 0; i < dfu_alt_num; i++) { |
466 | 487 | ||
467 | s = strsep(&env, ";"); | 488 | s = strsep(&env, ";"); |
468 | ret = dfu_fill_entity(&dfu[i], s, alt_num_cnt, interface, | 489 | ret = dfu_fill_entity(&dfu[i], s, alt_num_cnt, interface, |
469 | devstr); | 490 | devstr); |
470 | if (ret) | 491 | if (ret) |
471 | return -1; | 492 | return -1; |
472 | 493 | ||
473 | list_add_tail(&dfu[i].list, &dfu_list); | 494 | list_add_tail(&dfu[i].list, &dfu_list); |
474 | alt_num_cnt++; | 495 | alt_num_cnt++; |
475 | } | 496 | } |
476 | 497 | ||
477 | return 0; | 498 | return 0; |
478 | } | 499 | } |
479 | 500 | ||
480 | const char *dfu_get_dev_type(enum dfu_device_type t) | 501 | const char *dfu_get_dev_type(enum dfu_device_type t) |
481 | { | 502 | { |
482 | const char *dev_t[] = {NULL, "eMMC", "OneNAND", "NAND", "RAM" }; | 503 | const char *dev_t[] = {NULL, "eMMC", "OneNAND", "NAND", "RAM" }; |
483 | return dev_t[t]; | 504 | return dev_t[t]; |
484 | } | 505 | } |
485 | 506 | ||
486 | const char *dfu_get_layout(enum dfu_layout l) | 507 | const char *dfu_get_layout(enum dfu_layout l) |
487 | { | 508 | { |
488 | const char *dfu_layout[] = {NULL, "RAW_ADDR", "FAT", "EXT2", | 509 | const char *dfu_layout[] = {NULL, "RAW_ADDR", "FAT", "EXT2", |
489 | "EXT3", "EXT4", "RAM_ADDR" }; | 510 | "EXT3", "EXT4", "RAM_ADDR" }; |
490 | return dfu_layout[l]; | 511 | return dfu_layout[l]; |
491 | } | 512 | } |
492 | 513 | ||
493 | void dfu_show_entities(void) | 514 | void dfu_show_entities(void) |
494 | { | 515 | { |
495 | struct dfu_entity *dfu; | 516 | struct dfu_entity *dfu; |
496 | 517 | ||
497 | puts("DFU alt settings list:\n"); | 518 | puts("DFU alt settings list:\n"); |
498 | 519 | ||
499 | list_for_each_entry(dfu, &dfu_list, list) { | 520 | list_for_each_entry(dfu, &dfu_list, list) { |
500 | printf("dev: %s alt: %d name: %s layout: %s\n", | 521 | printf("dev: %s alt: %d name: %s layout: %s\n", |
501 | dfu_get_dev_type(dfu->dev_type), dfu->alt, | 522 | dfu_get_dev_type(dfu->dev_type), dfu->alt, |
502 | dfu->name, dfu_get_layout(dfu->layout)); | 523 | dfu->name, dfu_get_layout(dfu->layout)); |
503 | } | 524 | } |
504 | } | 525 | } |
505 | 526 | ||
506 | int dfu_get_alt_number(void) | 527 | int dfu_get_alt_number(void) |
507 | { | 528 | { |
508 | return dfu_alt_num; | 529 | return dfu_alt_num; |
509 | } | 530 | } |
510 | 531 | ||
511 | struct dfu_entity *dfu_get_entity(int alt) | 532 | struct dfu_entity *dfu_get_entity(int alt) |
512 | { | 533 | { |
513 | struct dfu_entity *dfu; | 534 | struct dfu_entity *dfu; |
514 | 535 | ||
515 | list_for_each_entry(dfu, &dfu_list, list) { | 536 | list_for_each_entry(dfu, &dfu_list, list) { |
516 | if (dfu->alt == alt) | 537 | if (dfu->alt == alt) |
517 | return dfu; | 538 | return dfu; |
518 | } | 539 | } |
519 | 540 | ||
520 | return NULL; | 541 | return NULL; |
521 | } | 542 | } |
522 | 543 | ||
523 | int dfu_get_alt(char *name) | 544 | int dfu_get_alt(char *name) |
524 | { | 545 | { |
525 | struct dfu_entity *dfu; | 546 | struct dfu_entity *dfu; |
526 | 547 | ||
527 | list_for_each_entry(dfu, &dfu_list, list) { | 548 | list_for_each_entry(dfu, &dfu_list, list) { |
528 | if (!strncmp(dfu->name, name, strlen(dfu->name))) | 549 | if (!strncmp(dfu->name, name, strlen(dfu->name))) |
529 | return dfu->alt; | 550 | return dfu->alt; |
530 | } | 551 | } |
531 | 552 | ||
532 | return -ENODEV; | 553 | return -ENODEV; |
533 | } | 554 | } |
534 | 555 |
drivers/usb/gadget/atmel_usba_udc.c
1 | /* | 1 | /* |
2 | * Driver for the Atmel USBA high speed USB device controller | 2 | * Driver for the Atmel USBA high speed USB device controller |
3 | * [Original from Linux kernel: drivers/usb/gadget/atmel_usba_udc.c] | 3 | * [Original from Linux kernel: drivers/usb/gadget/atmel_usba_udc.c] |
4 | * | 4 | * |
5 | * Copyright (C) 2005-2013 Atmel Corporation | 5 | * Copyright (C) 2005-2013 Atmel Corporation |
6 | * Bo Shen <voice.shen@atmel.com> | 6 | * Bo Shen <voice.shen@atmel.com> |
7 | * | 7 | * |
8 | * SPDX-License-Identifier: GPL-2.0+ | 8 | * SPDX-License-Identifier: GPL-2.0+ |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <common.h> | 11 | #include <common.h> |
12 | #include <asm/errno.h> | 12 | #include <asm/errno.h> |
13 | #include <asm/gpio.h> | 13 | #include <asm/gpio.h> |
14 | #include <asm/hardware.h> | 14 | #include <asm/hardware.h> |
15 | #include <linux/list.h> | 15 | #include <linux/list.h> |
16 | #include <linux/usb/ch9.h> | 16 | #include <linux/usb/ch9.h> |
17 | #include <linux/usb/gadget.h> | 17 | #include <linux/usb/gadget.h> |
18 | #include <linux/usb/atmel_usba_udc.h> | 18 | #include <linux/usb/atmel_usba_udc.h> |
19 | #include <malloc.h> | 19 | #include <malloc.h> |
20 | #include <usb/lin_gadget_compat.h> | 20 | #include <usb/lin_gadget_compat.h> |
21 | 21 | ||
22 | #include "atmel_usba_udc.h" | 22 | #include "atmel_usba_udc.h" |
23 | 23 | ||
24 | static int vbus_is_present(struct usba_udc *udc) | 24 | static int vbus_is_present(struct usba_udc *udc) |
25 | { | 25 | { |
26 | /* No Vbus detection: Assume always present */ | 26 | /* No Vbus detection: Assume always present */ |
27 | return 1; | 27 | return 1; |
28 | } | 28 | } |
29 | 29 | ||
30 | static void next_fifo_transaction(struct usba_ep *ep, struct usba_request *req) | 30 | static void next_fifo_transaction(struct usba_ep *ep, struct usba_request *req) |
31 | { | 31 | { |
32 | unsigned int transaction_len; | 32 | unsigned int transaction_len; |
33 | 33 | ||
34 | transaction_len = req->req.length - req->req.actual; | 34 | transaction_len = req->req.length - req->req.actual; |
35 | req->last_transaction = 1; | 35 | req->last_transaction = 1; |
36 | if (transaction_len > ep->ep.maxpacket) { | 36 | if (transaction_len > ep->ep.maxpacket) { |
37 | transaction_len = ep->ep.maxpacket; | 37 | transaction_len = ep->ep.maxpacket; |
38 | req->last_transaction = 0; | 38 | req->last_transaction = 0; |
39 | } else if (transaction_len == ep->ep.maxpacket && req->req.zero) { | 39 | } else if (transaction_len == ep->ep.maxpacket && req->req.zero) { |
40 | req->last_transaction = 0; | 40 | req->last_transaction = 0; |
41 | } | 41 | } |
42 | 42 | ||
43 | DBG(DBG_QUEUE, "%s: submit_transaction, req %p (length %d)%s\n", | 43 | DBG(DBG_QUEUE, "%s: submit_transaction, req %p (length %d)%s\n", |
44 | ep->ep.name, req, transaction_len, | 44 | ep->ep.name, req, transaction_len, |
45 | req->last_transaction ? ", done" : ""); | 45 | req->last_transaction ? ", done" : ""); |
46 | 46 | ||
47 | memcpy(ep->fifo, req->req.buf + req->req.actual, transaction_len); | 47 | memcpy(ep->fifo, req->req.buf + req->req.actual, transaction_len); |
48 | usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY); | 48 | usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY); |
49 | req->req.actual += transaction_len; | 49 | req->req.actual += transaction_len; |
50 | } | 50 | } |
51 | 51 | ||
52 | static void submit_request(struct usba_ep *ep, struct usba_request *req) | 52 | static void submit_request(struct usba_ep *ep, struct usba_request *req) |
53 | { | 53 | { |
54 | DBG(DBG_QUEUE, "%s: submit_request: req %p (length %d), dma: %d\n", | 54 | DBG(DBG_QUEUE, "%s: submit_request: req %p (length %d), dma: %d\n", |
55 | ep->ep.name, req, req->req.length, req->using_dma); | 55 | ep->ep.name, req, req->req.length, req->using_dma); |
56 | 56 | ||
57 | req->req.actual = 0; | 57 | req->req.actual = 0; |
58 | req->submitted = 1; | 58 | req->submitted = 1; |
59 | 59 | ||
60 | next_fifo_transaction(ep, req); | 60 | next_fifo_transaction(ep, req); |
61 | if (req->last_transaction) { | 61 | if (req->last_transaction) { |
62 | usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY); | 62 | usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY); |
63 | usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE); | 63 | usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE); |
64 | } else { | 64 | } else { |
65 | usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE); | 65 | usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE); |
66 | usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY); | 66 | usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY); |
67 | } | 67 | } |
68 | } | 68 | } |
69 | 69 | ||
70 | static void submit_next_request(struct usba_ep *ep) | 70 | static void submit_next_request(struct usba_ep *ep) |
71 | { | 71 | { |
72 | struct usba_request *req; | 72 | struct usba_request *req; |
73 | 73 | ||
74 | if (list_empty(&ep->queue)) { | 74 | if (list_empty(&ep->queue)) { |
75 | usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY | USBA_RX_BK_RDY); | 75 | usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY | USBA_RX_BK_RDY); |
76 | return; | 76 | return; |
77 | } | 77 | } |
78 | 78 | ||
79 | req = list_entry(ep->queue.next, struct usba_request, queue); | 79 | req = list_entry(ep->queue.next, struct usba_request, queue); |
80 | if (!req->submitted) | 80 | if (!req->submitted) |
81 | submit_request(ep, req); | 81 | submit_request(ep, req); |
82 | } | 82 | } |
83 | 83 | ||
84 | static void send_status(struct usba_udc *udc, struct usba_ep *ep) | 84 | static void send_status(struct usba_udc *udc, struct usba_ep *ep) |
85 | { | 85 | { |
86 | ep->state = STATUS_STAGE_IN; | 86 | ep->state = STATUS_STAGE_IN; |
87 | usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY); | 87 | usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY); |
88 | usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE); | 88 | usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE); |
89 | } | 89 | } |
90 | 90 | ||
91 | static void receive_data(struct usba_ep *ep) | 91 | static void receive_data(struct usba_ep *ep) |
92 | { | 92 | { |
93 | struct usba_udc *udc = ep->udc; | 93 | struct usba_udc *udc = ep->udc; |
94 | struct usba_request *req; | 94 | struct usba_request *req; |
95 | unsigned long status; | 95 | unsigned long status; |
96 | unsigned int bytecount, nr_busy; | 96 | unsigned int bytecount, nr_busy; |
97 | int is_complete = 0; | 97 | int is_complete = 0; |
98 | 98 | ||
99 | status = usba_ep_readl(ep, STA); | 99 | status = usba_ep_readl(ep, STA); |
100 | nr_busy = USBA_BFEXT(BUSY_BANKS, status); | 100 | nr_busy = USBA_BFEXT(BUSY_BANKS, status); |
101 | 101 | ||
102 | DBG(DBG_QUEUE, "receive data: nr_busy=%u\n", nr_busy); | 102 | DBG(DBG_QUEUE, "receive data: nr_busy=%u\n", nr_busy); |
103 | 103 | ||
104 | while (nr_busy > 0) { | 104 | while (nr_busy > 0) { |
105 | if (list_empty(&ep->queue)) { | 105 | if (list_empty(&ep->queue)) { |
106 | usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY); | 106 | usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY); |
107 | break; | 107 | break; |
108 | } | 108 | } |
109 | req = list_entry(ep->queue.next, | 109 | req = list_entry(ep->queue.next, |
110 | struct usba_request, queue); | 110 | struct usba_request, queue); |
111 | 111 | ||
112 | bytecount = USBA_BFEXT(BYTE_COUNT, status); | 112 | bytecount = USBA_BFEXT(BYTE_COUNT, status); |
113 | 113 | ||
114 | if (status & USBA_SHORT_PACKET) | 114 | if (status & USBA_SHORT_PACKET) |
115 | is_complete = 1; | 115 | is_complete = 1; |
116 | if (req->req.actual + bytecount >= req->req.length) { | 116 | if (req->req.actual + bytecount >= req->req.length) { |
117 | is_complete = 1; | 117 | is_complete = 1; |
118 | bytecount = req->req.length - req->req.actual; | 118 | bytecount = req->req.length - req->req.actual; |
119 | } | 119 | } |
120 | 120 | ||
121 | memcpy(req->req.buf + req->req.actual, ep->fifo, bytecount); | 121 | memcpy(req->req.buf + req->req.actual, ep->fifo, bytecount); |
122 | req->req.actual += bytecount; | 122 | req->req.actual += bytecount; |
123 | 123 | ||
124 | usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY); | 124 | usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY); |
125 | 125 | ||
126 | if (is_complete) { | 126 | if (is_complete) { |
127 | DBG(DBG_QUEUE, "%s: request done\n", ep->ep.name); | 127 | DBG(DBG_QUEUE, "%s: request done\n", ep->ep.name); |
128 | req->req.status = 0; | 128 | req->req.status = 0; |
129 | list_del_init(&req->queue); | 129 | list_del_init(&req->queue); |
130 | usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY); | 130 | usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY); |
131 | spin_lock(&udc->lock); | 131 | spin_lock(&udc->lock); |
132 | req->req.complete(&ep->ep, &req->req); | 132 | req->req.complete(&ep->ep, &req->req); |
133 | spin_unlock(&udc->lock); | 133 | spin_unlock(&udc->lock); |
134 | } | 134 | } |
135 | 135 | ||
136 | status = usba_ep_readl(ep, STA); | 136 | status = usba_ep_readl(ep, STA); |
137 | nr_busy = USBA_BFEXT(BUSY_BANKS, status); | 137 | nr_busy = USBA_BFEXT(BUSY_BANKS, status); |
138 | 138 | ||
139 | if (is_complete && ep_is_control(ep)) { | 139 | if (is_complete && ep_is_control(ep)) { |
140 | send_status(udc, ep); | 140 | send_status(udc, ep); |
141 | break; | 141 | break; |
142 | } | 142 | } |
143 | } | 143 | } |
144 | } | 144 | } |
145 | 145 | ||
146 | static void | 146 | static void |
147 | request_complete(struct usba_ep *ep, struct usba_request *req, int status) | 147 | request_complete(struct usba_ep *ep, struct usba_request *req, int status) |
148 | { | 148 | { |
149 | if (req->req.status == -EINPROGRESS) | 149 | if (req->req.status == -EINPROGRESS) |
150 | req->req.status = status; | 150 | req->req.status = status; |
151 | 151 | ||
152 | DBG(DBG_GADGET | DBG_REQ, "%s: req %p complete: status %d, actual %u\n", | 152 | DBG(DBG_GADGET | DBG_REQ, "%s: req %p complete: status %d, actual %u\n", |
153 | ep->ep.name, req, req->req.status, req->req.actual); | 153 | ep->ep.name, req, req->req.status, req->req.actual); |
154 | 154 | ||
155 | req->req.complete(&ep->ep, &req->req); | 155 | req->req.complete(&ep->ep, &req->req); |
156 | } | 156 | } |
157 | 157 | ||
158 | static void | 158 | static void |
159 | request_complete_list(struct usba_ep *ep, struct list_head *list, int status) | 159 | request_complete_list(struct usba_ep *ep, struct list_head *list, int status) |
160 | { | 160 | { |
161 | struct usba_request *req, *tmp_req; | 161 | struct usba_request *req, *tmp_req; |
162 | 162 | ||
163 | list_for_each_entry_safe(req, tmp_req, list, queue) { | 163 | list_for_each_entry_safe(req, tmp_req, list, queue) { |
164 | list_del_init(&req->queue); | 164 | list_del_init(&req->queue); |
165 | request_complete(ep, req, status); | 165 | request_complete(ep, req, status); |
166 | } | 166 | } |
167 | } | 167 | } |
168 | 168 | ||
169 | static int | 169 | static int |
170 | usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) | 170 | usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) |
171 | { | 171 | { |
172 | struct usba_ep *ep = to_usba_ep(_ep); | 172 | struct usba_ep *ep = to_usba_ep(_ep); |
173 | struct usba_udc *udc = ep->udc; | 173 | struct usba_udc *udc = ep->udc; |
174 | unsigned long flags, ept_cfg, maxpacket; | 174 | unsigned long flags = 0, ept_cfg, maxpacket; |
175 | unsigned int nr_trans; | 175 | unsigned int nr_trans; |
176 | 176 | ||
177 | DBG(DBG_GADGET, "%s: ep_enable: desc=%p\n", ep->ep.name, desc); | 177 | DBG(DBG_GADGET, "%s: ep_enable: desc=%p\n", ep->ep.name, desc); |
178 | 178 | ||
179 | maxpacket = usb_endpoint_maxp(desc) & 0x7ff; | 179 | maxpacket = usb_endpoint_maxp(desc) & 0x7ff; |
180 | 180 | ||
181 | if (((desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) | 181 | if (((desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) |
182 | != ep->index) || | 182 | != ep->index) || |
183 | ep->index == 0 || | 183 | ep->index == 0 || |
184 | desc->bDescriptorType != USB_DT_ENDPOINT || | 184 | desc->bDescriptorType != USB_DT_ENDPOINT || |
185 | maxpacket == 0 || | 185 | maxpacket == 0 || |
186 | maxpacket > ep->fifo_size) { | 186 | maxpacket > ep->fifo_size) { |
187 | DBG(DBG_ERR, "ep_enable: Invalid argument"); | 187 | DBG(DBG_ERR, "ep_enable: Invalid argument"); |
188 | return -EINVAL; | 188 | return -EINVAL; |
189 | } | 189 | } |
190 | 190 | ||
191 | ep->is_isoc = 0; | 191 | ep->is_isoc = 0; |
192 | ep->is_in = 0; | 192 | ep->is_in = 0; |
193 | 193 | ||
194 | if (maxpacket <= 8) | 194 | if (maxpacket <= 8) |
195 | ept_cfg = USBA_BF(EPT_SIZE, USBA_EPT_SIZE_8); | 195 | ept_cfg = USBA_BF(EPT_SIZE, USBA_EPT_SIZE_8); |
196 | else | 196 | else |
197 | /* LSB is bit 1, not 0 */ | 197 | /* LSB is bit 1, not 0 */ |
198 | ept_cfg = USBA_BF(EPT_SIZE, fls(maxpacket - 1) - 3); | 198 | ept_cfg = USBA_BF(EPT_SIZE, fls(maxpacket - 1) - 3); |
199 | 199 | ||
200 | DBG(DBG_HW, "%s: EPT_SIZE = %lu (maxpacket = %lu)\n", | 200 | DBG(DBG_HW, "%s: EPT_SIZE = %lu (maxpacket = %lu)\n", |
201 | ep->ep.name, ept_cfg, maxpacket); | 201 | ep->ep.name, ept_cfg, maxpacket); |
202 | 202 | ||
203 | if (usb_endpoint_dir_in(desc)) { | 203 | if (usb_endpoint_dir_in(desc)) { |
204 | ep->is_in = 1; | 204 | ep->is_in = 1; |
205 | ept_cfg |= USBA_EPT_DIR_IN; | 205 | ept_cfg |= USBA_EPT_DIR_IN; |
206 | } | 206 | } |
207 | 207 | ||
208 | switch (usb_endpoint_type(desc)) { | 208 | switch (usb_endpoint_type(desc)) { |
209 | case USB_ENDPOINT_XFER_CONTROL: | 209 | case USB_ENDPOINT_XFER_CONTROL: |
210 | ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_CONTROL); | 210 | ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_CONTROL); |
211 | ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_ONE); | 211 | ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_ONE); |
212 | break; | 212 | break; |
213 | case USB_ENDPOINT_XFER_ISOC: | 213 | case USB_ENDPOINT_XFER_ISOC: |
214 | if (!ep->can_isoc) { | 214 | if (!ep->can_isoc) { |
215 | DBG(DBG_ERR, "ep_enable: %s is not isoc capable\n", | 215 | DBG(DBG_ERR, "ep_enable: %s is not isoc capable\n", |
216 | ep->ep.name); | 216 | ep->ep.name); |
217 | return -EINVAL; | 217 | return -EINVAL; |
218 | } | 218 | } |
219 | 219 | ||
220 | /* | 220 | /* |
221 | * Bits 11:12 specify number of _additional_ | 221 | * Bits 11:12 specify number of _additional_ |
222 | * transactions per microframe. | 222 | * transactions per microframe. |
223 | */ | 223 | */ |
224 | nr_trans = ((usb_endpoint_maxp(desc) >> 11) & 3) + 1; | 224 | nr_trans = ((usb_endpoint_maxp(desc) >> 11) & 3) + 1; |
225 | if (nr_trans > 3) | 225 | if (nr_trans > 3) |
226 | return -EINVAL; | 226 | return -EINVAL; |
227 | 227 | ||
228 | ep->is_isoc = 1; | 228 | ep->is_isoc = 1; |
229 | ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_ISO); | 229 | ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_ISO); |
230 | 230 | ||
231 | /* | 231 | /* |
232 | * Do triple-buffering on high-bandwidth iso endpoints. | 232 | * Do triple-buffering on high-bandwidth iso endpoints. |
233 | */ | 233 | */ |
234 | if (nr_trans > 1 && ep->nr_banks == 3) | 234 | if (nr_trans > 1 && ep->nr_banks == 3) |
235 | ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_TRIPLE); | 235 | ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_TRIPLE); |
236 | else | 236 | else |
237 | ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_DOUBLE); | 237 | ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_DOUBLE); |
238 | ept_cfg |= USBA_BF(NB_TRANS, nr_trans); | 238 | ept_cfg |= USBA_BF(NB_TRANS, nr_trans); |
239 | break; | 239 | break; |
240 | case USB_ENDPOINT_XFER_BULK: | 240 | case USB_ENDPOINT_XFER_BULK: |
241 | ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK); | 241 | ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK); |
242 | ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_ONE); | 242 | ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_ONE); |
243 | break; | 243 | break; |
244 | case USB_ENDPOINT_XFER_INT: | 244 | case USB_ENDPOINT_XFER_INT: |
245 | ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_INT); | 245 | ept_cfg |= USBA_BF(EPT_TYPE, USBA_EPT_TYPE_INT); |
246 | ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_ONE); | 246 | ept_cfg |= USBA_BF(BK_NUMBER, USBA_BK_NUMBER_ONE); |
247 | break; | 247 | break; |
248 | } | 248 | } |
249 | 249 | ||
250 | spin_lock_irqsave(&ep->udc->lock, flags); | 250 | spin_lock_irqsave(&ep->udc->lock, flags); |
251 | 251 | ||
252 | ep->desc = desc; | 252 | ep->desc = desc; |
253 | ep->ep.maxpacket = maxpacket; | 253 | ep->ep.maxpacket = maxpacket; |
254 | 254 | ||
255 | usba_ep_writel(ep, CFG, ept_cfg); | 255 | usba_ep_writel(ep, CFG, ept_cfg); |
256 | usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE); | 256 | usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE); |
257 | 257 | ||
258 | usba_writel(udc, INT_ENB, | 258 | usba_writel(udc, INT_ENB, |
259 | (usba_readl(udc, INT_ENB) | 259 | (usba_readl(udc, INT_ENB) |
260 | | USBA_BF(EPT_INT, 1 << ep->index))); | 260 | | USBA_BF(EPT_INT, 1 << ep->index))); |
261 | 261 | ||
262 | spin_unlock_irqrestore(&udc->lock, flags); | 262 | spin_unlock_irqrestore(&udc->lock, flags); |
263 | 263 | ||
264 | DBG(DBG_HW, "EPT_CFG%d after init: %#08lx\n", ep->index, | 264 | DBG(DBG_HW, "EPT_CFG%d after init: %#08lx\n", ep->index, |
265 | (unsigned long)usba_ep_readl(ep, CFG)); | 265 | (unsigned long)usba_ep_readl(ep, CFG)); |
266 | DBG(DBG_HW, "INT_ENB after init: %#08lx\n", | 266 | DBG(DBG_HW, "INT_ENB after init: %#08lx\n", |
267 | (unsigned long)usba_readl(udc, INT_ENB)); | 267 | (unsigned long)usba_readl(udc, INT_ENB)); |
268 | 268 | ||
269 | return 0; | 269 | return 0; |
270 | } | 270 | } |
271 | 271 | ||
272 | static int usba_ep_disable(struct usb_ep *_ep) | 272 | static int usba_ep_disable(struct usb_ep *_ep) |
273 | { | 273 | { |
274 | struct usba_ep *ep = to_usba_ep(_ep); | 274 | struct usba_ep *ep = to_usba_ep(_ep); |
275 | struct usba_udc *udc = ep->udc; | 275 | struct usba_udc *udc = ep->udc; |
276 | LIST_HEAD(req_list); | 276 | LIST_HEAD(req_list); |
277 | unsigned long flags; | 277 | unsigned long flags = 0; |
278 | 278 | ||
279 | DBG(DBG_GADGET, "ep_disable: %s\n", ep->ep.name); | 279 | DBG(DBG_GADGET, "ep_disable: %s\n", ep->ep.name); |
280 | 280 | ||
281 | spin_lock_irqsave(&udc->lock, flags); | 281 | spin_lock_irqsave(&udc->lock, flags); |
282 | 282 | ||
283 | if (!ep->desc) { | 283 | if (!ep->desc) { |
284 | spin_unlock_irqrestore(&udc->lock, flags); | 284 | spin_unlock_irqrestore(&udc->lock, flags); |
285 | /* REVISIT because this driver disables endpoints in | 285 | /* REVISIT because this driver disables endpoints in |
286 | * reset_all_endpoints() before calling disconnect(), | 286 | * reset_all_endpoints() before calling disconnect(), |
287 | * most gadget drivers would trigger this non-error ... | 287 | * most gadget drivers would trigger this non-error ... |
288 | */ | 288 | */ |
289 | if (udc->gadget.speed != USB_SPEED_UNKNOWN) | 289 | if (udc->gadget.speed != USB_SPEED_UNKNOWN) |
290 | DBG(DBG_ERR, "ep_disable: %s not enabled\n", | 290 | DBG(DBG_ERR, "ep_disable: %s not enabled\n", |
291 | ep->ep.name); | 291 | ep->ep.name); |
292 | return -EINVAL; | 292 | return -EINVAL; |
293 | } | 293 | } |
294 | ep->desc = NULL; | 294 | ep->desc = NULL; |
295 | 295 | ||
296 | list_splice_init(&ep->queue, &req_list); | 296 | list_splice_init(&ep->queue, &req_list); |
297 | usba_ep_writel(ep, CFG, 0); | 297 | usba_ep_writel(ep, CFG, 0); |
298 | usba_ep_writel(ep, CTL_DIS, USBA_EPT_ENABLE); | 298 | usba_ep_writel(ep, CTL_DIS, USBA_EPT_ENABLE); |
299 | usba_writel(udc, INT_ENB, | 299 | usba_writel(udc, INT_ENB, |
300 | usba_readl(udc, INT_ENB) & | 300 | usba_readl(udc, INT_ENB) & |
301 | ~USBA_BF(EPT_INT, 1 << ep->index)); | 301 | ~USBA_BF(EPT_INT, 1 << ep->index)); |
302 | 302 | ||
303 | request_complete_list(ep, &req_list, -ESHUTDOWN); | 303 | request_complete_list(ep, &req_list, -ESHUTDOWN); |
304 | 304 | ||
305 | spin_unlock_irqrestore(&udc->lock, flags); | 305 | spin_unlock_irqrestore(&udc->lock, flags); |
306 | 306 | ||
307 | return 0; | 307 | return 0; |
308 | } | 308 | } |
309 | 309 | ||
310 | static struct usb_request * | 310 | static struct usb_request * |
311 | usba_ep_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) | 311 | usba_ep_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) |
312 | { | 312 | { |
313 | struct usba_request *req; | 313 | struct usba_request *req; |
314 | 314 | ||
315 | DBG(DBG_GADGET, "ep_alloc_request: %p, 0x%x\n", _ep, gfp_flags); | 315 | DBG(DBG_GADGET, "ep_alloc_request: %p, 0x%x\n", _ep, gfp_flags); |
316 | 316 | ||
317 | req = calloc(1, sizeof(struct usba_request)); | 317 | req = calloc(1, sizeof(struct usba_request)); |
318 | if (!req) | 318 | if (!req) |
319 | return NULL; | 319 | return NULL; |
320 | 320 | ||
321 | INIT_LIST_HEAD(&req->queue); | 321 | INIT_LIST_HEAD(&req->queue); |
322 | 322 | ||
323 | return &req->req; | 323 | return &req->req; |
324 | } | 324 | } |
325 | 325 | ||
326 | static void | 326 | static void |
327 | usba_ep_free_request(struct usb_ep *_ep, struct usb_request *_req) | 327 | usba_ep_free_request(struct usb_ep *_ep, struct usb_request *_req) |
328 | { | 328 | { |
329 | struct usba_request *req = to_usba_req(_req); | 329 | struct usba_request *req = to_usba_req(_req); |
330 | 330 | ||
331 | DBG(DBG_GADGET, "ep_free_request: %p, %p\n", _ep, _req); | 331 | DBG(DBG_GADGET, "ep_free_request: %p, %p\n", _ep, _req); |
332 | 332 | ||
333 | free(req); | 333 | free(req); |
334 | } | 334 | } |
335 | 335 | ||
336 | static int | 336 | static int |
337 | usba_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) | 337 | usba_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) |
338 | { | 338 | { |
339 | struct usba_request *req = to_usba_req(_req); | 339 | struct usba_request *req = to_usba_req(_req); |
340 | struct usba_ep *ep = to_usba_ep(_ep); | 340 | struct usba_ep *ep = to_usba_ep(_ep); |
341 | struct usba_udc *udc = ep->udc; | 341 | struct usba_udc *udc = ep->udc; |
342 | unsigned long flags; | 342 | unsigned long flags = 0; |
343 | int ret; | 343 | int ret; |
344 | 344 | ||
345 | DBG(DBG_GADGET | DBG_QUEUE | DBG_REQ, "%s: queue req %p, len %u\n", | 345 | DBG(DBG_GADGET | DBG_QUEUE | DBG_REQ, "%s: queue req %p, len %u\n", |
346 | ep->ep.name, req, _req->length); | 346 | ep->ep.name, req, _req->length); |
347 | 347 | ||
348 | if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN || | 348 | if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN || |
349 | !ep->desc) | 349 | !ep->desc) |
350 | return -ESHUTDOWN; | 350 | return -ESHUTDOWN; |
351 | 351 | ||
352 | req->submitted = 0; | 352 | req->submitted = 0; |
353 | req->using_dma = 0; | 353 | req->using_dma = 0; |
354 | req->last_transaction = 0; | 354 | req->last_transaction = 0; |
355 | 355 | ||
356 | _req->status = -EINPROGRESS; | 356 | _req->status = -EINPROGRESS; |
357 | _req->actual = 0; | 357 | _req->actual = 0; |
358 | 358 | ||
359 | /* May have received a reset since last time we checked */ | 359 | /* May have received a reset since last time we checked */ |
360 | ret = -ESHUTDOWN; | 360 | ret = -ESHUTDOWN; |
361 | spin_lock_irqsave(&udc->lock, flags); | 361 | spin_lock_irqsave(&udc->lock, flags); |
362 | if (ep->desc) { | 362 | if (ep->desc) { |
363 | list_add_tail(&req->queue, &ep->queue); | 363 | list_add_tail(&req->queue, &ep->queue); |
364 | 364 | ||
365 | if ((!ep_is_control(ep) && ep->is_in) || | 365 | if ((!ep_is_control(ep) && ep->is_in) || |
366 | (ep_is_control(ep) && (ep->state == DATA_STAGE_IN || | 366 | (ep_is_control(ep) && (ep->state == DATA_STAGE_IN || |
367 | ep->state == STATUS_STAGE_IN))) | 367 | ep->state == STATUS_STAGE_IN))) |
368 | usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY); | 368 | usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY); |
369 | else | 369 | else |
370 | usba_ep_writel(ep, CTL_ENB, USBA_RX_BK_RDY); | 370 | usba_ep_writel(ep, CTL_ENB, USBA_RX_BK_RDY); |
371 | 371 | ||
372 | ret = 0; | 372 | ret = 0; |
373 | } | 373 | } |
374 | spin_unlock_irqrestore(&udc->lock, flags); | 374 | spin_unlock_irqrestore(&udc->lock, flags); |
375 | 375 | ||
376 | return ret; | 376 | return ret; |
377 | } | 377 | } |
378 | 378 | ||
379 | static int usba_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) | 379 | static int usba_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) |
380 | { | 380 | { |
381 | struct usba_ep *ep = to_usba_ep(_ep); | 381 | struct usba_ep *ep = to_usba_ep(_ep); |
382 | struct usba_request *req = to_usba_req(_req); | 382 | struct usba_request *req = to_usba_req(_req); |
383 | 383 | ||
384 | DBG(DBG_GADGET | DBG_QUEUE, "ep_dequeue: %s, req %p\n", | 384 | DBG(DBG_GADGET | DBG_QUEUE, "ep_dequeue: %s, req %p\n", |
385 | ep->ep.name, req); | 385 | ep->ep.name, req); |
386 | 386 | ||
387 | /* | 387 | /* |
388 | * Errors should stop the queue from advancing until the | 388 | * Errors should stop the queue from advancing until the |
389 | * completion function returns. | 389 | * completion function returns. |
390 | */ | 390 | */ |
391 | list_del_init(&req->queue); | 391 | list_del_init(&req->queue); |
392 | 392 | ||
393 | request_complete(ep, req, -ECONNRESET); | 393 | request_complete(ep, req, -ECONNRESET); |
394 | 394 | ||
395 | /* Process the next request if any */ | 395 | /* Process the next request if any */ |
396 | submit_next_request(ep); | 396 | submit_next_request(ep); |
397 | 397 | ||
398 | return 0; | 398 | return 0; |
399 | } | 399 | } |
400 | 400 | ||
401 | static int usba_ep_set_halt(struct usb_ep *_ep, int value) | 401 | static int usba_ep_set_halt(struct usb_ep *_ep, int value) |
402 | { | 402 | { |
403 | struct usba_ep *ep = to_usba_ep(_ep); | 403 | struct usba_ep *ep = to_usba_ep(_ep); |
404 | unsigned long flags; | 404 | unsigned long flags = 0; |
405 | int ret = 0; | 405 | int ret = 0; |
406 | 406 | ||
407 | DBG(DBG_GADGET, "endpoint %s: %s HALT\n", ep->ep.name, | 407 | DBG(DBG_GADGET, "endpoint %s: %s HALT\n", ep->ep.name, |
408 | value ? "set" : "clear"); | 408 | value ? "set" : "clear"); |
409 | 409 | ||
410 | if (!ep->desc) { | 410 | if (!ep->desc) { |
411 | DBG(DBG_ERR, "Attempted to halt uninitialized ep %s\n", | 411 | DBG(DBG_ERR, "Attempted to halt uninitialized ep %s\n", |
412 | ep->ep.name); | 412 | ep->ep.name); |
413 | return -ENODEV; | 413 | return -ENODEV; |
414 | } | 414 | } |
415 | 415 | ||
416 | if (ep->is_isoc) { | 416 | if (ep->is_isoc) { |
417 | DBG(DBG_ERR, "Attempted to halt isochronous ep %s\n", | 417 | DBG(DBG_ERR, "Attempted to halt isochronous ep %s\n", |
418 | ep->ep.name); | 418 | ep->ep.name); |
419 | return -ENOTTY; | 419 | return -ENOTTY; |
420 | } | 420 | } |
421 | 421 | ||
422 | spin_lock_irqsave(&udc->lock, flags); | 422 | spin_lock_irqsave(&udc->lock, flags); |
423 | 423 | ||
424 | /* | 424 | /* |
425 | * We can't halt IN endpoints while there are still data to be | 425 | * We can't halt IN endpoints while there are still data to be |
426 | * transferred | 426 | * transferred |
427 | */ | 427 | */ |
428 | if (!list_empty(&ep->queue) || | 428 | if (!list_empty(&ep->queue) || |
429 | ((value && ep->is_in && (usba_ep_readl(ep, STA) & | 429 | ((value && ep->is_in && (usba_ep_readl(ep, STA) & |
430 | USBA_BF(BUSY_BANKS, -1L))))) { | 430 | USBA_BF(BUSY_BANKS, -1L))))) { |
431 | ret = -EAGAIN; | 431 | ret = -EAGAIN; |
432 | } else { | 432 | } else { |
433 | if (value) | 433 | if (value) |
434 | usba_ep_writel(ep, SET_STA, USBA_FORCE_STALL); | 434 | usba_ep_writel(ep, SET_STA, USBA_FORCE_STALL); |
435 | else | 435 | else |
436 | usba_ep_writel(ep, CLR_STA, | 436 | usba_ep_writel(ep, CLR_STA, |
437 | USBA_FORCE_STALL | USBA_TOGGLE_CLR); | 437 | USBA_FORCE_STALL | USBA_TOGGLE_CLR); |
438 | usba_ep_readl(ep, STA); | 438 | usba_ep_readl(ep, STA); |
439 | } | 439 | } |
440 | 440 | ||
441 | spin_unlock_irqrestore(&udc->lock, flags); | 441 | spin_unlock_irqrestore(&udc->lock, flags); |
442 | 442 | ||
443 | return ret; | 443 | return ret; |
444 | } | 444 | } |
445 | 445 | ||
446 | static int usba_ep_fifo_status(struct usb_ep *_ep) | 446 | static int usba_ep_fifo_status(struct usb_ep *_ep) |
447 | { | 447 | { |
448 | struct usba_ep *ep = to_usba_ep(_ep); | 448 | struct usba_ep *ep = to_usba_ep(_ep); |
449 | 449 | ||
450 | return USBA_BFEXT(BYTE_COUNT, usba_ep_readl(ep, STA)); | 450 | return USBA_BFEXT(BYTE_COUNT, usba_ep_readl(ep, STA)); |
451 | } | 451 | } |
452 | 452 | ||
453 | static void usba_ep_fifo_flush(struct usb_ep *_ep) | 453 | static void usba_ep_fifo_flush(struct usb_ep *_ep) |
454 | { | 454 | { |
455 | struct usba_ep *ep = to_usba_ep(_ep); | 455 | struct usba_ep *ep = to_usba_ep(_ep); |
456 | struct usba_udc *udc = ep->udc; | 456 | struct usba_udc *udc = ep->udc; |
457 | 457 | ||
458 | usba_writel(udc, EPT_RST, 1 << ep->index); | 458 | usba_writel(udc, EPT_RST, 1 << ep->index); |
459 | } | 459 | } |
460 | 460 | ||
461 | static const struct usb_ep_ops usba_ep_ops = { | 461 | static const struct usb_ep_ops usba_ep_ops = { |
462 | .enable = usba_ep_enable, | 462 | .enable = usba_ep_enable, |
463 | .disable = usba_ep_disable, | 463 | .disable = usba_ep_disable, |
464 | .alloc_request = usba_ep_alloc_request, | 464 | .alloc_request = usba_ep_alloc_request, |
465 | .free_request = usba_ep_free_request, | 465 | .free_request = usba_ep_free_request, |
466 | .queue = usba_ep_queue, | 466 | .queue = usba_ep_queue, |
467 | .dequeue = usba_ep_dequeue, | 467 | .dequeue = usba_ep_dequeue, |
468 | .set_halt = usba_ep_set_halt, | 468 | .set_halt = usba_ep_set_halt, |
469 | .fifo_status = usba_ep_fifo_status, | 469 | .fifo_status = usba_ep_fifo_status, |
470 | .fifo_flush = usba_ep_fifo_flush, | 470 | .fifo_flush = usba_ep_fifo_flush, |
471 | }; | 471 | }; |
472 | 472 | ||
473 | static int usba_udc_get_frame(struct usb_gadget *gadget) | 473 | static int usba_udc_get_frame(struct usb_gadget *gadget) |
474 | { | 474 | { |
475 | struct usba_udc *udc = to_usba_udc(gadget); | 475 | struct usba_udc *udc = to_usba_udc(gadget); |
476 | 476 | ||
477 | return USBA_BFEXT(FRAME_NUMBER, usba_readl(udc, FNUM)); | 477 | return USBA_BFEXT(FRAME_NUMBER, usba_readl(udc, FNUM)); |
478 | } | 478 | } |
479 | 479 | ||
480 | static int usba_udc_wakeup(struct usb_gadget *gadget) | 480 | static int usba_udc_wakeup(struct usb_gadget *gadget) |
481 | { | 481 | { |
482 | struct usba_udc *udc = to_usba_udc(gadget); | 482 | struct usba_udc *udc = to_usba_udc(gadget); |
483 | unsigned long flags; | 483 | unsigned long flags = 0; |
484 | u32 ctrl; | 484 | u32 ctrl; |
485 | int ret = -EINVAL; | 485 | int ret = -EINVAL; |
486 | 486 | ||
487 | spin_lock_irqsave(&udc->lock, flags); | 487 | spin_lock_irqsave(&udc->lock, flags); |
488 | if (udc->devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) { | 488 | if (udc->devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP)) { |
489 | ctrl = usba_readl(udc, CTRL); | 489 | ctrl = usba_readl(udc, CTRL); |
490 | usba_writel(udc, CTRL, ctrl | USBA_REMOTE_WAKE_UP); | 490 | usba_writel(udc, CTRL, ctrl | USBA_REMOTE_WAKE_UP); |
491 | ret = 0; | 491 | ret = 0; |
492 | } | 492 | } |
493 | spin_unlock_irqrestore(&udc->lock, flags); | 493 | spin_unlock_irqrestore(&udc->lock, flags); |
494 | 494 | ||
495 | return ret; | 495 | return ret; |
496 | } | 496 | } |
497 | 497 | ||
498 | static int | 498 | static int |
499 | usba_udc_set_selfpowered(struct usb_gadget *gadget, int is_selfpowered) | 499 | usba_udc_set_selfpowered(struct usb_gadget *gadget, int is_selfpowered) |
500 | { | 500 | { |
501 | struct usba_udc *udc = to_usba_udc(gadget); | 501 | struct usba_udc *udc = to_usba_udc(gadget); |
502 | unsigned long flags; | 502 | unsigned long flags = 0; |
503 | 503 | ||
504 | spin_lock_irqsave(&udc->lock, flags); | 504 | spin_lock_irqsave(&udc->lock, flags); |
505 | if (is_selfpowered) | 505 | if (is_selfpowered) |
506 | udc->devstatus |= 1 << USB_DEVICE_SELF_POWERED; | 506 | udc->devstatus |= 1 << USB_DEVICE_SELF_POWERED; |
507 | else | 507 | else |
508 | udc->devstatus &= ~(1 << USB_DEVICE_SELF_POWERED); | 508 | udc->devstatus &= ~(1 << USB_DEVICE_SELF_POWERED); |
509 | spin_unlock_irqrestore(&udc->lock, flags); | 509 | spin_unlock_irqrestore(&udc->lock, flags); |
510 | 510 | ||
511 | return 0; | 511 | return 0; |
512 | } | 512 | } |
513 | 513 | ||
514 | static const struct usb_gadget_ops usba_udc_ops = { | 514 | static const struct usb_gadget_ops usba_udc_ops = { |
515 | .get_frame = usba_udc_get_frame, | 515 | .get_frame = usba_udc_get_frame, |
516 | .wakeup = usba_udc_wakeup, | 516 | .wakeup = usba_udc_wakeup, |
517 | .set_selfpowered = usba_udc_set_selfpowered, | 517 | .set_selfpowered = usba_udc_set_selfpowered, |
518 | }; | 518 | }; |
519 | 519 | ||
520 | static struct usb_endpoint_descriptor usba_ep0_desc = { | 520 | static struct usb_endpoint_descriptor usba_ep0_desc = { |
521 | .bLength = USB_DT_ENDPOINT_SIZE, | 521 | .bLength = USB_DT_ENDPOINT_SIZE, |
522 | .bDescriptorType = USB_DT_ENDPOINT, | 522 | .bDescriptorType = USB_DT_ENDPOINT, |
523 | .bEndpointAddress = 0, | 523 | .bEndpointAddress = 0, |
524 | .bmAttributes = USB_ENDPOINT_XFER_CONTROL, | 524 | .bmAttributes = USB_ENDPOINT_XFER_CONTROL, |
525 | .wMaxPacketSize = cpu_to_le16(64), | 525 | .wMaxPacketSize = cpu_to_le16(64), |
526 | /* FIXME: I have no idea what to put here */ | 526 | /* FIXME: I have no idea what to put here */ |
527 | .bInterval = 1, | 527 | .bInterval = 1, |
528 | }; | 528 | }; |
529 | 529 | ||
530 | /* | 530 | /* |
531 | * Called with interrupts disabled and udc->lock held. | 531 | * Called with interrupts disabled and udc->lock held. |
532 | */ | 532 | */ |
533 | static void reset_all_endpoints(struct usba_udc *udc) | 533 | static void reset_all_endpoints(struct usba_udc *udc) |
534 | { | 534 | { |
535 | struct usba_ep *ep; | 535 | struct usba_ep *ep; |
536 | struct usba_request *req, *tmp_req; | 536 | struct usba_request *req, *tmp_req; |
537 | 537 | ||
538 | usba_writel(udc, EPT_RST, ~0UL); | 538 | usba_writel(udc, EPT_RST, ~0UL); |
539 | 539 | ||
540 | ep = to_usba_ep(udc->gadget.ep0); | 540 | ep = to_usba_ep(udc->gadget.ep0); |
541 | list_for_each_entry_safe(req, tmp_req, &ep->queue, queue) { | 541 | list_for_each_entry_safe(req, tmp_req, &ep->queue, queue) { |
542 | list_del_init(&req->queue); | 542 | list_del_init(&req->queue); |
543 | request_complete(ep, req, -ECONNRESET); | 543 | request_complete(ep, req, -ECONNRESET); |
544 | } | 544 | } |
545 | 545 | ||
546 | /* NOTE: normally, the next call to the gadget driver is in | 546 | /* NOTE: normally, the next call to the gadget driver is in |
547 | * charge of disabling endpoints... usually disconnect(). | 547 | * charge of disabling endpoints... usually disconnect(). |
548 | * The exception would be entering a high speed test mode. | 548 | * The exception would be entering a high speed test mode. |
549 | * | 549 | * |
550 | * FIXME remove this code ... and retest thoroughly. | 550 | * FIXME remove this code ... and retest thoroughly. |
551 | */ | 551 | */ |
552 | list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) { | 552 | list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) { |
553 | if (ep->desc) { | 553 | if (ep->desc) { |
554 | spin_unlock(&udc->lock); | 554 | spin_unlock(&udc->lock); |
555 | usba_ep_disable(&ep->ep); | 555 | usba_ep_disable(&ep->ep); |
556 | spin_lock(&udc->lock); | 556 | spin_lock(&udc->lock); |
557 | } | 557 | } |
558 | } | 558 | } |
559 | } | 559 | } |
560 | 560 | ||
561 | static struct usba_ep *get_ep_by_addr(struct usba_udc *udc, u16 wIndex) | 561 | static struct usba_ep *get_ep_by_addr(struct usba_udc *udc, u16 wIndex) |
562 | { | 562 | { |
563 | struct usba_ep *ep; | 563 | struct usba_ep *ep; |
564 | 564 | ||
565 | if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0) | 565 | if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0) |
566 | return to_usba_ep(udc->gadget.ep0); | 566 | return to_usba_ep(udc->gadget.ep0); |
567 | 567 | ||
568 | list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) { | 568 | list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) { |
569 | u8 bEndpointAddress; | 569 | u8 bEndpointAddress; |
570 | 570 | ||
571 | if (!ep->desc) | 571 | if (!ep->desc) |
572 | continue; | 572 | continue; |
573 | bEndpointAddress = ep->desc->bEndpointAddress; | 573 | bEndpointAddress = ep->desc->bEndpointAddress; |
574 | if ((wIndex ^ bEndpointAddress) & USB_DIR_IN) | 574 | if ((wIndex ^ bEndpointAddress) & USB_DIR_IN) |
575 | continue; | 575 | continue; |
576 | if ((bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) | 576 | if ((bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) |
577 | == (wIndex & USB_ENDPOINT_NUMBER_MASK)) | 577 | == (wIndex & USB_ENDPOINT_NUMBER_MASK)) |
578 | return ep; | 578 | return ep; |
579 | } | 579 | } |
580 | 580 | ||
581 | return NULL; | 581 | return NULL; |
582 | } | 582 | } |
583 | 583 | ||
584 | /* Called with interrupts disabled and udc->lock held */ | 584 | /* Called with interrupts disabled and udc->lock held */ |
585 | static inline void set_protocol_stall(struct usba_udc *udc, struct usba_ep *ep) | 585 | static inline void set_protocol_stall(struct usba_udc *udc, struct usba_ep *ep) |
586 | { | 586 | { |
587 | usba_ep_writel(ep, SET_STA, USBA_FORCE_STALL); | 587 | usba_ep_writel(ep, SET_STA, USBA_FORCE_STALL); |
588 | ep->state = WAIT_FOR_SETUP; | 588 | ep->state = WAIT_FOR_SETUP; |
589 | } | 589 | } |
590 | 590 | ||
591 | static inline int is_stalled(struct usba_udc *udc, struct usba_ep *ep) | 591 | static inline int is_stalled(struct usba_udc *udc, struct usba_ep *ep) |
592 | { | 592 | { |
593 | if (usba_ep_readl(ep, STA) & USBA_FORCE_STALL) | 593 | if (usba_ep_readl(ep, STA) & USBA_FORCE_STALL) |
594 | return 1; | 594 | return 1; |
595 | return 0; | 595 | return 0; |
596 | } | 596 | } |
597 | 597 | ||
598 | static inline void set_address(struct usba_udc *udc, unsigned int addr) | 598 | static inline void set_address(struct usba_udc *udc, unsigned int addr) |
599 | { | 599 | { |
600 | u32 regval; | 600 | u32 regval; |
601 | 601 | ||
602 | DBG(DBG_BUS, "setting address %u...\n", addr); | 602 | DBG(DBG_BUS, "setting address %u...\n", addr); |
603 | regval = usba_readl(udc, CTRL); | 603 | regval = usba_readl(udc, CTRL); |
604 | regval = USBA_BFINS(DEV_ADDR, addr, regval); | 604 | regval = USBA_BFINS(DEV_ADDR, addr, regval); |
605 | usba_writel(udc, CTRL, regval); | 605 | usba_writel(udc, CTRL, regval); |
606 | } | 606 | } |
607 | 607 | ||
608 | static int do_test_mode(struct usba_udc *udc) | 608 | static int do_test_mode(struct usba_udc *udc) |
609 | { | 609 | { |
610 | static const char test_packet_buffer[] = { | 610 | static const char test_packet_buffer[] = { |
611 | /* JKJKJKJK * 9 */ | 611 | /* JKJKJKJK * 9 */ |
612 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, | 612 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
613 | /* JJKKJJKK * 8 */ | 613 | /* JJKKJJKK * 8 */ |
614 | 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, | 614 | 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, |
615 | /* JJKKJJKK * 8 */ | 615 | /* JJKKJJKK * 8 */ |
616 | 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, | 616 | 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, |
617 | /* JJJJJJJKKKKKKK * 8 */ | 617 | /* JJJJJJJKKKKKKK * 8 */ |
618 | 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, | 618 | 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, |
619 | 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, | 619 | 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, |
620 | /* JJJJJJJK * 8 */ | 620 | /* JJJJJJJK * 8 */ |
621 | 0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD, | 621 | 0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD, |
622 | /* {JKKKKKKK * 10}, JK */ | 622 | /* {JKKKKKKK * 10}, JK */ |
623 | 0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD, 0x7E | 623 | 0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD, 0x7E |
624 | }; | 624 | }; |
625 | struct usba_ep *ep; | 625 | struct usba_ep *ep; |
626 | int test_mode; | 626 | int test_mode; |
627 | 627 | ||
628 | test_mode = udc->test_mode; | 628 | test_mode = udc->test_mode; |
629 | 629 | ||
630 | /* Start from a clean slate */ | 630 | /* Start from a clean slate */ |
631 | reset_all_endpoints(udc); | 631 | reset_all_endpoints(udc); |
632 | 632 | ||
633 | switch (test_mode) { | 633 | switch (test_mode) { |
634 | case 0x0100: | 634 | case 0x0100: |
635 | /* Test_J */ | 635 | /* Test_J */ |
636 | usba_writel(udc, TST, USBA_TST_J_MODE); | 636 | usba_writel(udc, TST, USBA_TST_J_MODE); |
637 | DBG(DBG_ALL, "Entering Test_J mode...\n"); | 637 | DBG(DBG_ALL, "Entering Test_J mode...\n"); |
638 | break; | 638 | break; |
639 | case 0x0200: | 639 | case 0x0200: |
640 | /* Test_K */ | 640 | /* Test_K */ |
641 | usba_writel(udc, TST, USBA_TST_K_MODE); | 641 | usba_writel(udc, TST, USBA_TST_K_MODE); |
642 | DBG(DBG_ALL, "Entering Test_K mode...\n"); | 642 | DBG(DBG_ALL, "Entering Test_K mode...\n"); |
643 | break; | 643 | break; |
644 | case 0x0300: | 644 | case 0x0300: |
645 | /* | 645 | /* |
646 | * Test_SE0_NAK: Force high-speed mode and set up ep0 | 646 | * Test_SE0_NAK: Force high-speed mode and set up ep0 |
647 | * for Bulk IN transfers | 647 | * for Bulk IN transfers |
648 | */ | 648 | */ |
649 | ep = &udc->usba_ep[0]; | 649 | ep = &udc->usba_ep[0]; |
650 | usba_writel(udc, TST, | 650 | usba_writel(udc, TST, |
651 | USBA_BF(SPEED_CFG, USBA_SPEED_CFG_FORCE_HIGH)); | 651 | USBA_BF(SPEED_CFG, USBA_SPEED_CFG_FORCE_HIGH)); |
652 | usba_ep_writel(ep, CFG, | 652 | usba_ep_writel(ep, CFG, |
653 | USBA_BF(EPT_SIZE, USBA_EPT_SIZE_64) | 653 | USBA_BF(EPT_SIZE, USBA_EPT_SIZE_64) |
654 | | USBA_EPT_DIR_IN | 654 | | USBA_EPT_DIR_IN |
655 | | USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK) | 655 | | USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK) |
656 | | USBA_BF(BK_NUMBER, 1)); | 656 | | USBA_BF(BK_NUMBER, 1)); |
657 | if (!(usba_ep_readl(ep, CFG) & USBA_EPT_MAPPED)) { | 657 | if (!(usba_ep_readl(ep, CFG) & USBA_EPT_MAPPED)) { |
658 | set_protocol_stall(udc, ep); | 658 | set_protocol_stall(udc, ep); |
659 | DBG(DBG_ALL, "Test_SE0_NAK: ep0 not mapped\n"); | 659 | DBG(DBG_ALL, "Test_SE0_NAK: ep0 not mapped\n"); |
660 | } else { | 660 | } else { |
661 | usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE); | 661 | usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE); |
662 | DBG(DBG_ALL, "Entering Test_SE0_NAK mode...\n"); | 662 | DBG(DBG_ALL, "Entering Test_SE0_NAK mode...\n"); |
663 | } | 663 | } |
664 | break; | 664 | break; |
665 | case 0x0400: | 665 | case 0x0400: |
666 | /* Test_Packet */ | 666 | /* Test_Packet */ |
667 | ep = &udc->usba_ep[0]; | 667 | ep = &udc->usba_ep[0]; |
668 | usba_ep_writel(ep, CFG, | 668 | usba_ep_writel(ep, CFG, |
669 | USBA_BF(EPT_SIZE, USBA_EPT_SIZE_64) | 669 | USBA_BF(EPT_SIZE, USBA_EPT_SIZE_64) |
670 | | USBA_EPT_DIR_IN | 670 | | USBA_EPT_DIR_IN |
671 | | USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK) | 671 | | USBA_BF(EPT_TYPE, USBA_EPT_TYPE_BULK) |
672 | | USBA_BF(BK_NUMBER, 1)); | 672 | | USBA_BF(BK_NUMBER, 1)); |
673 | if (!(usba_ep_readl(ep, CFG) & USBA_EPT_MAPPED)) { | 673 | if (!(usba_ep_readl(ep, CFG) & USBA_EPT_MAPPED)) { |
674 | set_protocol_stall(udc, ep); | 674 | set_protocol_stall(udc, ep); |
675 | DBG(DBG_ALL, "Test_Packet: ep0 not mapped\n"); | 675 | DBG(DBG_ALL, "Test_Packet: ep0 not mapped\n"); |
676 | } else { | 676 | } else { |
677 | usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE); | 677 | usba_ep_writel(ep, CTL_ENB, USBA_EPT_ENABLE); |
678 | usba_writel(udc, TST, USBA_TST_PKT_MODE); | 678 | usba_writel(udc, TST, USBA_TST_PKT_MODE); |
679 | memcpy(ep->fifo, test_packet_buffer, | 679 | memcpy(ep->fifo, test_packet_buffer, |
680 | sizeof(test_packet_buffer)); | 680 | sizeof(test_packet_buffer)); |
681 | usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY); | 681 | usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY); |
682 | DBG(DBG_ALL, "Entering Test_Packet mode...\n"); | 682 | DBG(DBG_ALL, "Entering Test_Packet mode...\n"); |
683 | } | 683 | } |
684 | break; | 684 | break; |
685 | default: | 685 | default: |
686 | DBG(DBG_ERR, "Invalid test mode: 0x%04x\n", test_mode); | 686 | DBG(DBG_ERR, "Invalid test mode: 0x%04x\n", test_mode); |
687 | return -EINVAL; | 687 | return -EINVAL; |
688 | } | 688 | } |
689 | 689 | ||
690 | return 0; | 690 | return 0; |
691 | } | 691 | } |
692 | 692 | ||
693 | /* Avoid overly long expressions */ | 693 | /* Avoid overly long expressions */ |
694 | static inline bool feature_is_dev_remote_wakeup(struct usb_ctrlrequest *crq) | 694 | static inline bool feature_is_dev_remote_wakeup(struct usb_ctrlrequest *crq) |
695 | { | 695 | { |
696 | if (crq->wValue == cpu_to_le16(USB_DEVICE_REMOTE_WAKEUP)) | 696 | if (crq->wValue == cpu_to_le16(USB_DEVICE_REMOTE_WAKEUP)) |
697 | return true; | 697 | return true; |
698 | return false; | 698 | return false; |
699 | } | 699 | } |
700 | 700 | ||
701 | static inline bool feature_is_dev_test_mode(struct usb_ctrlrequest *crq) | 701 | static inline bool feature_is_dev_test_mode(struct usb_ctrlrequest *crq) |
702 | { | 702 | { |
703 | if (crq->wValue == cpu_to_le16(USB_DEVICE_TEST_MODE)) | 703 | if (crq->wValue == cpu_to_le16(USB_DEVICE_TEST_MODE)) |
704 | return true; | 704 | return true; |
705 | return false; | 705 | return false; |
706 | } | 706 | } |
707 | 707 | ||
708 | static inline bool feature_is_ep_halt(struct usb_ctrlrequest *crq) | 708 | static inline bool feature_is_ep_halt(struct usb_ctrlrequest *crq) |
709 | { | 709 | { |
710 | if (crq->wValue == cpu_to_le16(USB_ENDPOINT_HALT)) | 710 | if (crq->wValue == cpu_to_le16(USB_ENDPOINT_HALT)) |
711 | return true; | 711 | return true; |
712 | return false; | 712 | return false; |
713 | } | 713 | } |
714 | 714 | ||
715 | static int handle_ep0_setup(struct usba_udc *udc, struct usba_ep *ep, | 715 | static int handle_ep0_setup(struct usba_udc *udc, struct usba_ep *ep, |
716 | struct usb_ctrlrequest *crq) | 716 | struct usb_ctrlrequest *crq) |
717 | { | 717 | { |
718 | int retval = 0; | 718 | int retval = 0; |
719 | 719 | ||
720 | switch (crq->bRequest) { | 720 | switch (crq->bRequest) { |
721 | case USB_REQ_GET_STATUS: { | 721 | case USB_REQ_GET_STATUS: { |
722 | u16 status; | 722 | u16 status; |
723 | 723 | ||
724 | if (crq->bRequestType == (USB_DIR_IN | USB_RECIP_DEVICE)) { | 724 | if (crq->bRequestType == (USB_DIR_IN | USB_RECIP_DEVICE)) { |
725 | status = cpu_to_le16(udc->devstatus); | 725 | status = cpu_to_le16(udc->devstatus); |
726 | } else if (crq->bRequestType | 726 | } else if (crq->bRequestType |
727 | == (USB_DIR_IN | USB_RECIP_INTERFACE)) { | 727 | == (USB_DIR_IN | USB_RECIP_INTERFACE)) { |
728 | status = cpu_to_le16(0); | 728 | status = cpu_to_le16(0); |
729 | } else if (crq->bRequestType | 729 | } else if (crq->bRequestType |
730 | == (USB_DIR_IN | USB_RECIP_ENDPOINT)) { | 730 | == (USB_DIR_IN | USB_RECIP_ENDPOINT)) { |
731 | struct usba_ep *target; | 731 | struct usba_ep *target; |
732 | 732 | ||
733 | target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex)); | 733 | target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex)); |
734 | if (!target) | 734 | if (!target) |
735 | goto stall; | 735 | goto stall; |
736 | 736 | ||
737 | status = 0; | 737 | status = 0; |
738 | if (is_stalled(udc, target)) | 738 | if (is_stalled(udc, target)) |
739 | status |= cpu_to_le16(1); | 739 | status |= cpu_to_le16(1); |
740 | } else { | 740 | } else { |
741 | goto delegate; | 741 | goto delegate; |
742 | } | 742 | } |
743 | 743 | ||
744 | /* Write directly to the FIFO. No queueing is done. */ | 744 | /* Write directly to the FIFO. No queueing is done. */ |
745 | if (crq->wLength != cpu_to_le16(sizeof(status))) | 745 | if (crq->wLength != cpu_to_le16(sizeof(status))) |
746 | goto stall; | 746 | goto stall; |
747 | ep->state = DATA_STAGE_IN; | 747 | ep->state = DATA_STAGE_IN; |
748 | __raw_writew(status, ep->fifo); | 748 | __raw_writew(status, ep->fifo); |
749 | usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY); | 749 | usba_ep_writel(ep, SET_STA, USBA_TX_PK_RDY); |
750 | break; | 750 | break; |
751 | } | 751 | } |
752 | 752 | ||
753 | case USB_REQ_CLEAR_FEATURE: { | 753 | case USB_REQ_CLEAR_FEATURE: { |
754 | if (crq->bRequestType == USB_RECIP_DEVICE) { | 754 | if (crq->bRequestType == USB_RECIP_DEVICE) { |
755 | if (feature_is_dev_remote_wakeup(crq)) | 755 | if (feature_is_dev_remote_wakeup(crq)) |
756 | udc->devstatus | 756 | udc->devstatus |
757 | &= ~(1 << USB_DEVICE_REMOTE_WAKEUP); | 757 | &= ~(1 << USB_DEVICE_REMOTE_WAKEUP); |
758 | else | 758 | else |
759 | /* Can't CLEAR_FEATURE TEST_MODE */ | 759 | /* Can't CLEAR_FEATURE TEST_MODE */ |
760 | goto stall; | 760 | goto stall; |
761 | } else if (crq->bRequestType == USB_RECIP_ENDPOINT) { | 761 | } else if (crq->bRequestType == USB_RECIP_ENDPOINT) { |
762 | struct usba_ep *target; | 762 | struct usba_ep *target; |
763 | 763 | ||
764 | if (crq->wLength != cpu_to_le16(0) || | 764 | if (crq->wLength != cpu_to_le16(0) || |
765 | !feature_is_ep_halt(crq)) | 765 | !feature_is_ep_halt(crq)) |
766 | goto stall; | 766 | goto stall; |
767 | target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex)); | 767 | target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex)); |
768 | if (!target) | 768 | if (!target) |
769 | goto stall; | 769 | goto stall; |
770 | 770 | ||
771 | usba_ep_writel(target, CLR_STA, USBA_FORCE_STALL); | 771 | usba_ep_writel(target, CLR_STA, USBA_FORCE_STALL); |
772 | if (target->index != 0) | 772 | if (target->index != 0) |
773 | usba_ep_writel(target, CLR_STA, | 773 | usba_ep_writel(target, CLR_STA, |
774 | USBA_TOGGLE_CLR); | 774 | USBA_TOGGLE_CLR); |
775 | } else { | 775 | } else { |
776 | goto delegate; | 776 | goto delegate; |
777 | } | 777 | } |
778 | 778 | ||
779 | send_status(udc, ep); | 779 | send_status(udc, ep); |
780 | break; | 780 | break; |
781 | } | 781 | } |
782 | 782 | ||
783 | case USB_REQ_SET_FEATURE: { | 783 | case USB_REQ_SET_FEATURE: { |
784 | if (crq->bRequestType == USB_RECIP_DEVICE) { | 784 | if (crq->bRequestType == USB_RECIP_DEVICE) { |
785 | if (feature_is_dev_test_mode(crq)) { | 785 | if (feature_is_dev_test_mode(crq)) { |
786 | send_status(udc, ep); | 786 | send_status(udc, ep); |
787 | ep->state = STATUS_STAGE_TEST; | 787 | ep->state = STATUS_STAGE_TEST; |
788 | udc->test_mode = le16_to_cpu(crq->wIndex); | 788 | udc->test_mode = le16_to_cpu(crq->wIndex); |
789 | return 0; | 789 | return 0; |
790 | } else if (feature_is_dev_remote_wakeup(crq)) { | 790 | } else if (feature_is_dev_remote_wakeup(crq)) { |
791 | udc->devstatus |= 1 << USB_DEVICE_REMOTE_WAKEUP; | 791 | udc->devstatus |= 1 << USB_DEVICE_REMOTE_WAKEUP; |
792 | } else { | 792 | } else { |
793 | goto stall; | 793 | goto stall; |
794 | } | 794 | } |
795 | } else if (crq->bRequestType == USB_RECIP_ENDPOINT) { | 795 | } else if (crq->bRequestType == USB_RECIP_ENDPOINT) { |
796 | struct usba_ep *target; | 796 | struct usba_ep *target; |
797 | 797 | ||
798 | if (crq->wLength != cpu_to_le16(0) || | 798 | if (crq->wLength != cpu_to_le16(0) || |
799 | !feature_is_ep_halt(crq)) | 799 | !feature_is_ep_halt(crq)) |
800 | goto stall; | 800 | goto stall; |
801 | 801 | ||
802 | target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex)); | 802 | target = get_ep_by_addr(udc, le16_to_cpu(crq->wIndex)); |
803 | if (!target) | 803 | if (!target) |
804 | goto stall; | 804 | goto stall; |
805 | 805 | ||
806 | usba_ep_writel(target, SET_STA, USBA_FORCE_STALL); | 806 | usba_ep_writel(target, SET_STA, USBA_FORCE_STALL); |
807 | } else { | 807 | } else { |
808 | goto delegate; | 808 | goto delegate; |
809 | } | 809 | } |
810 | 810 | ||
811 | send_status(udc, ep); | 811 | send_status(udc, ep); |
812 | break; | 812 | break; |
813 | } | 813 | } |
814 | 814 | ||
815 | case USB_REQ_SET_ADDRESS: | 815 | case USB_REQ_SET_ADDRESS: |
816 | if (crq->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE)) | 816 | if (crq->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE)) |
817 | goto delegate; | 817 | goto delegate; |
818 | 818 | ||
819 | set_address(udc, le16_to_cpu(crq->wValue)); | 819 | set_address(udc, le16_to_cpu(crq->wValue)); |
820 | send_status(udc, ep); | 820 | send_status(udc, ep); |
821 | ep->state = STATUS_STAGE_ADDR; | 821 | ep->state = STATUS_STAGE_ADDR; |
822 | break; | 822 | break; |
823 | 823 | ||
824 | default: | 824 | default: |
825 | delegate: | 825 | delegate: |
826 | spin_unlock(&udc->lock); | 826 | spin_unlock(&udc->lock); |
827 | retval = udc->driver->setup(&udc->gadget, crq); | 827 | retval = udc->driver->setup(&udc->gadget, crq); |
828 | spin_lock(&udc->lock); | 828 | spin_lock(&udc->lock); |
829 | } | 829 | } |
830 | 830 | ||
831 | return retval; | 831 | return retval; |
832 | 832 | ||
833 | stall: | 833 | stall: |
834 | DBG(DBG_ALL, "%s: Invalid setup request: %02x.%02x v%04x i%04x l%d\n", | 834 | DBG(DBG_ALL, "%s: Invalid setup request: %02x.%02x v%04x i%04x l%d\n", |
835 | ep->ep.name, crq->bRequestType, crq->bRequest, | 835 | ep->ep.name, crq->bRequestType, crq->bRequest, |
836 | le16_to_cpu(crq->wValue), le16_to_cpu(crq->wIndex), | 836 | le16_to_cpu(crq->wValue), le16_to_cpu(crq->wIndex), |
837 | le16_to_cpu(crq->wLength)); | 837 | le16_to_cpu(crq->wLength)); |
838 | set_protocol_stall(udc, ep); | 838 | set_protocol_stall(udc, ep); |
839 | 839 | ||
840 | return -1; | 840 | return -1; |
841 | } | 841 | } |
842 | 842 | ||
843 | static void usba_control_irq(struct usba_udc *udc, struct usba_ep *ep) | 843 | static void usba_control_irq(struct usba_udc *udc, struct usba_ep *ep) |
844 | { | 844 | { |
845 | struct usba_request *req; | 845 | struct usba_request *req; |
846 | u32 epstatus; | 846 | u32 epstatus; |
847 | u32 epctrl; | 847 | u32 epctrl; |
848 | 848 | ||
849 | restart: | 849 | restart: |
850 | epstatus = usba_ep_readl(ep, STA); | 850 | epstatus = usba_ep_readl(ep, STA); |
851 | epctrl = usba_ep_readl(ep, CTL); | 851 | epctrl = usba_ep_readl(ep, CTL); |
852 | 852 | ||
853 | DBG(DBG_INT, "%s [%d]: s/%08x c/%08x\n", | 853 | DBG(DBG_INT, "%s [%d]: s/%08x c/%08x\n", |
854 | ep->ep.name, ep->state, epstatus, epctrl); | 854 | ep->ep.name, ep->state, epstatus, epctrl); |
855 | 855 | ||
856 | req = NULL; | 856 | req = NULL; |
857 | if (!list_empty(&ep->queue)) | 857 | if (!list_empty(&ep->queue)) |
858 | req = list_entry(ep->queue.next, | 858 | req = list_entry(ep->queue.next, |
859 | struct usba_request, queue); | 859 | struct usba_request, queue); |
860 | 860 | ||
861 | if ((epctrl & USBA_TX_PK_RDY) && !(epstatus & USBA_TX_PK_RDY)) { | 861 | if ((epctrl & USBA_TX_PK_RDY) && !(epstatus & USBA_TX_PK_RDY)) { |
862 | if (req->submitted) | 862 | if (req->submitted) |
863 | next_fifo_transaction(ep, req); | 863 | next_fifo_transaction(ep, req); |
864 | else | 864 | else |
865 | submit_request(ep, req); | 865 | submit_request(ep, req); |
866 | 866 | ||
867 | if (req->last_transaction) { | 867 | if (req->last_transaction) { |
868 | usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY); | 868 | usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY); |
869 | usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE); | 869 | usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE); |
870 | } | 870 | } |
871 | goto restart; | 871 | goto restart; |
872 | } | 872 | } |
873 | if ((epstatus & epctrl) & USBA_TX_COMPLETE) { | 873 | if ((epstatus & epctrl) & USBA_TX_COMPLETE) { |
874 | usba_ep_writel(ep, CLR_STA, USBA_TX_COMPLETE); | 874 | usba_ep_writel(ep, CLR_STA, USBA_TX_COMPLETE); |
875 | 875 | ||
876 | switch (ep->state) { | 876 | switch (ep->state) { |
877 | case DATA_STAGE_IN: | 877 | case DATA_STAGE_IN: |
878 | usba_ep_writel(ep, CTL_ENB, USBA_RX_BK_RDY); | 878 | usba_ep_writel(ep, CTL_ENB, USBA_RX_BK_RDY); |
879 | usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE); | 879 | usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE); |
880 | ep->state = STATUS_STAGE_OUT; | 880 | ep->state = STATUS_STAGE_OUT; |
881 | break; | 881 | break; |
882 | case STATUS_STAGE_ADDR: | 882 | case STATUS_STAGE_ADDR: |
883 | /* Activate our new address */ | 883 | /* Activate our new address */ |
884 | usba_writel(udc, CTRL, (usba_readl(udc, CTRL) | 884 | usba_writel(udc, CTRL, (usba_readl(udc, CTRL) |
885 | | USBA_FADDR_EN)); | 885 | | USBA_FADDR_EN)); |
886 | usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE); | 886 | usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE); |
887 | ep->state = WAIT_FOR_SETUP; | 887 | ep->state = WAIT_FOR_SETUP; |
888 | break; | 888 | break; |
889 | case STATUS_STAGE_IN: | 889 | case STATUS_STAGE_IN: |
890 | if (req) { | 890 | if (req) { |
891 | list_del_init(&req->queue); | 891 | list_del_init(&req->queue); |
892 | request_complete(ep, req, 0); | 892 | request_complete(ep, req, 0); |
893 | submit_next_request(ep); | 893 | submit_next_request(ep); |
894 | } | 894 | } |
895 | usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE); | 895 | usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE); |
896 | ep->state = WAIT_FOR_SETUP; | 896 | ep->state = WAIT_FOR_SETUP; |
897 | break; | 897 | break; |
898 | case STATUS_STAGE_TEST: | 898 | case STATUS_STAGE_TEST: |
899 | usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE); | 899 | usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE); |
900 | ep->state = WAIT_FOR_SETUP; | 900 | ep->state = WAIT_FOR_SETUP; |
901 | if (do_test_mode(udc)) | 901 | if (do_test_mode(udc)) |
902 | set_protocol_stall(udc, ep); | 902 | set_protocol_stall(udc, ep); |
903 | break; | 903 | break; |
904 | default: | 904 | default: |
905 | DBG(DBG_ALL, "%s: TXCOMP: Invalid endpoint state %d\n", | 905 | DBG(DBG_ALL, "%s: TXCOMP: Invalid endpoint state %d\n", |
906 | ep->ep.name, ep->state); | 906 | ep->ep.name, ep->state); |
907 | set_protocol_stall(udc, ep); | 907 | set_protocol_stall(udc, ep); |
908 | break; | 908 | break; |
909 | } | 909 | } |
910 | 910 | ||
911 | goto restart; | 911 | goto restart; |
912 | } | 912 | } |
913 | if ((epstatus & epctrl) & USBA_RX_BK_RDY) { | 913 | if ((epstatus & epctrl) & USBA_RX_BK_RDY) { |
914 | switch (ep->state) { | 914 | switch (ep->state) { |
915 | case STATUS_STAGE_OUT: | 915 | case STATUS_STAGE_OUT: |
916 | usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY); | 916 | usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY); |
917 | usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY); | 917 | usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY); |
918 | 918 | ||
919 | if (req) { | 919 | if (req) { |
920 | list_del_init(&req->queue); | 920 | list_del_init(&req->queue); |
921 | request_complete(ep, req, 0); | 921 | request_complete(ep, req, 0); |
922 | } | 922 | } |
923 | ep->state = WAIT_FOR_SETUP; | 923 | ep->state = WAIT_FOR_SETUP; |
924 | break; | 924 | break; |
925 | 925 | ||
926 | case DATA_STAGE_OUT: | 926 | case DATA_STAGE_OUT: |
927 | receive_data(ep); | 927 | receive_data(ep); |
928 | break; | 928 | break; |
929 | 929 | ||
930 | default: | 930 | default: |
931 | usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY); | 931 | usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY); |
932 | usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY); | 932 | usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY); |
933 | DBG(DBG_ALL, "%s: RXRDY: Invalid endpoint state %d\n", | 933 | DBG(DBG_ALL, "%s: RXRDY: Invalid endpoint state %d\n", |
934 | ep->ep.name, ep->state); | 934 | ep->ep.name, ep->state); |
935 | set_protocol_stall(udc, ep); | 935 | set_protocol_stall(udc, ep); |
936 | break; | 936 | break; |
937 | } | 937 | } |
938 | 938 | ||
939 | goto restart; | 939 | goto restart; |
940 | } | 940 | } |
941 | if (epstatus & USBA_RX_SETUP) { | 941 | if (epstatus & USBA_RX_SETUP) { |
942 | union { | 942 | union { |
943 | struct usb_ctrlrequest crq; | 943 | struct usb_ctrlrequest crq; |
944 | unsigned long data[2]; | 944 | unsigned long data[2]; |
945 | } crq; | 945 | } crq; |
946 | unsigned int pkt_len; | 946 | unsigned int pkt_len; |
947 | int ret; | 947 | int ret; |
948 | 948 | ||
949 | if (ep->state != WAIT_FOR_SETUP) { | 949 | if (ep->state != WAIT_FOR_SETUP) { |
950 | /* | 950 | /* |
951 | * Didn't expect a SETUP packet at this | 951 | * Didn't expect a SETUP packet at this |
952 | * point. Clean up any pending requests (which | 952 | * point. Clean up any pending requests (which |
953 | * may be successful). | 953 | * may be successful). |
954 | */ | 954 | */ |
955 | int status = -EPROTO; | 955 | int status = -EPROTO; |
956 | 956 | ||
957 | /* | 957 | /* |
958 | * RXRDY and TXCOMP are dropped when SETUP | 958 | * RXRDY and TXCOMP are dropped when SETUP |
959 | * packets arrive. Just pretend we received | 959 | * packets arrive. Just pretend we received |
960 | * the status packet. | 960 | * the status packet. |
961 | */ | 961 | */ |
962 | if (ep->state == STATUS_STAGE_OUT || | 962 | if (ep->state == STATUS_STAGE_OUT || |
963 | ep->state == STATUS_STAGE_IN) { | 963 | ep->state == STATUS_STAGE_IN) { |
964 | usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY); | 964 | usba_ep_writel(ep, CTL_DIS, USBA_RX_BK_RDY); |
965 | status = 0; | 965 | status = 0; |
966 | } | 966 | } |
967 | 967 | ||
968 | if (req) { | 968 | if (req) { |
969 | list_del_init(&req->queue); | 969 | list_del_init(&req->queue); |
970 | request_complete(ep, req, status); | 970 | request_complete(ep, req, status); |
971 | } | 971 | } |
972 | } | 972 | } |
973 | 973 | ||
974 | pkt_len = USBA_BFEXT(BYTE_COUNT, usba_ep_readl(ep, STA)); | 974 | pkt_len = USBA_BFEXT(BYTE_COUNT, usba_ep_readl(ep, STA)); |
975 | DBG(DBG_HW, "Packet length: %u\n", pkt_len); | 975 | DBG(DBG_HW, "Packet length: %u\n", pkt_len); |
976 | if (pkt_len != sizeof(crq)) { | 976 | if (pkt_len != sizeof(crq)) { |
977 | DBG(DBG_ALL, "udc: Invalid length %u (expected %zu)\n", | 977 | DBG(DBG_ALL, "udc: Invalid length %u (expected %zu)\n", |
978 | pkt_len, sizeof(crq)); | 978 | pkt_len, sizeof(crq)); |
979 | set_protocol_stall(udc, ep); | 979 | set_protocol_stall(udc, ep); |
980 | return; | 980 | return; |
981 | } | 981 | } |
982 | 982 | ||
983 | DBG(DBG_FIFO, "Copying ctrl request from 0x%p:\n", ep->fifo); | 983 | DBG(DBG_FIFO, "Copying ctrl request from 0x%p:\n", ep->fifo); |
984 | memcpy(crq.data, ep->fifo, sizeof(crq)); | 984 | memcpy(crq.data, ep->fifo, sizeof(crq)); |
985 | 985 | ||
986 | /* Free up one bank in the FIFO so that we can | 986 | /* Free up one bank in the FIFO so that we can |
987 | * generate or receive a reply right away. */ | 987 | * generate or receive a reply right away. */ |
988 | usba_ep_writel(ep, CLR_STA, USBA_RX_SETUP); | 988 | usba_ep_writel(ep, CLR_STA, USBA_RX_SETUP); |
989 | 989 | ||
990 | if (crq.crq.bRequestType & USB_DIR_IN) { | 990 | if (crq.crq.bRequestType & USB_DIR_IN) { |
991 | /* | 991 | /* |
992 | * The USB 2.0 spec states that "if wLength is | 992 | * The USB 2.0 spec states that "if wLength is |
993 | * zero, there is no data transfer phase." | 993 | * zero, there is no data transfer phase." |
994 | * However, testusb #14 seems to actually | 994 | * However, testusb #14 seems to actually |
995 | * expect a data phase even if wLength = 0... | 995 | * expect a data phase even if wLength = 0... |
996 | */ | 996 | */ |
997 | ep->state = DATA_STAGE_IN; | 997 | ep->state = DATA_STAGE_IN; |
998 | } else { | 998 | } else { |
999 | if (crq.crq.wLength != cpu_to_le16(0)) | 999 | if (crq.crq.wLength != cpu_to_le16(0)) |
1000 | ep->state = DATA_STAGE_OUT; | 1000 | ep->state = DATA_STAGE_OUT; |
1001 | else | 1001 | else |
1002 | ep->state = STATUS_STAGE_IN; | 1002 | ep->state = STATUS_STAGE_IN; |
1003 | } | 1003 | } |
1004 | 1004 | ||
1005 | ret = -1; | 1005 | ret = -1; |
1006 | if (ep->index == 0) { | 1006 | if (ep->index == 0) { |
1007 | ret = handle_ep0_setup(udc, ep, &crq.crq); | 1007 | ret = handle_ep0_setup(udc, ep, &crq.crq); |
1008 | } else { | 1008 | } else { |
1009 | spin_unlock(&udc->lock); | 1009 | spin_unlock(&udc->lock); |
1010 | ret = udc->driver->setup(&udc->gadget, &crq.crq); | 1010 | ret = udc->driver->setup(&udc->gadget, &crq.crq); |
1011 | spin_lock(&udc->lock); | 1011 | spin_lock(&udc->lock); |
1012 | } | 1012 | } |
1013 | 1013 | ||
1014 | DBG(DBG_BUS, "req %02x.%02x, length %d, state %d, ret %d\n", | 1014 | DBG(DBG_BUS, "req %02x.%02x, length %d, state %d, ret %d\n", |
1015 | crq.crq.bRequestType, crq.crq.bRequest, | 1015 | crq.crq.bRequestType, crq.crq.bRequest, |
1016 | le16_to_cpu(crq.crq.wLength), ep->state, ret); | 1016 | le16_to_cpu(crq.crq.wLength), ep->state, ret); |
1017 | 1017 | ||
1018 | if (ret < 0) { | 1018 | if (ret < 0) { |
1019 | /* Let the host know that we failed */ | 1019 | /* Let the host know that we failed */ |
1020 | set_protocol_stall(udc, ep); | 1020 | set_protocol_stall(udc, ep); |
1021 | } | 1021 | } |
1022 | } | 1022 | } |
1023 | } | 1023 | } |
1024 | 1024 | ||
1025 | static void usba_ep_irq(struct usba_udc *udc, struct usba_ep *ep) | 1025 | static void usba_ep_irq(struct usba_udc *udc, struct usba_ep *ep) |
1026 | { | 1026 | { |
1027 | struct usba_request *req; | 1027 | struct usba_request *req; |
1028 | u32 epstatus; | 1028 | u32 epstatus; |
1029 | u32 epctrl; | 1029 | u32 epctrl; |
1030 | 1030 | ||
1031 | epstatus = usba_ep_readl(ep, STA); | 1031 | epstatus = usba_ep_readl(ep, STA); |
1032 | epctrl = usba_ep_readl(ep, CTL); | 1032 | epctrl = usba_ep_readl(ep, CTL); |
1033 | 1033 | ||
1034 | DBG(DBG_INT, "%s: interrupt, status: 0x%08x\n", ep->ep.name, epstatus); | 1034 | DBG(DBG_INT, "%s: interrupt, status: 0x%08x\n", ep->ep.name, epstatus); |
1035 | 1035 | ||
1036 | while ((epctrl & USBA_TX_PK_RDY) && !(epstatus & USBA_TX_PK_RDY)) { | 1036 | while ((epctrl & USBA_TX_PK_RDY) && !(epstatus & USBA_TX_PK_RDY)) { |
1037 | DBG(DBG_BUS, "%s: TX PK ready\n", ep->ep.name); | 1037 | DBG(DBG_BUS, "%s: TX PK ready\n", ep->ep.name); |
1038 | 1038 | ||
1039 | if (list_empty(&ep->queue)) { | 1039 | if (list_empty(&ep->queue)) { |
1040 | DBG(DBG_INT, "ep_irq: queue empty\n"); | 1040 | DBG(DBG_INT, "ep_irq: queue empty\n"); |
1041 | usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY); | 1041 | usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY); |
1042 | return; | 1042 | return; |
1043 | } | 1043 | } |
1044 | 1044 | ||
1045 | req = list_entry(ep->queue.next, struct usba_request, queue); | 1045 | req = list_entry(ep->queue.next, struct usba_request, queue); |
1046 | 1046 | ||
1047 | if (req->submitted) | 1047 | if (req->submitted) |
1048 | next_fifo_transaction(ep, req); | 1048 | next_fifo_transaction(ep, req); |
1049 | else | 1049 | else |
1050 | submit_request(ep, req); | 1050 | submit_request(ep, req); |
1051 | 1051 | ||
1052 | if (req->last_transaction) { | 1052 | if (req->last_transaction) { |
1053 | list_del_init(&req->queue); | 1053 | list_del_init(&req->queue); |
1054 | submit_next_request(ep); | 1054 | submit_next_request(ep); |
1055 | request_complete(ep, req, 0); | 1055 | request_complete(ep, req, 0); |
1056 | } | 1056 | } |
1057 | 1057 | ||
1058 | epstatus = usba_ep_readl(ep, STA); | 1058 | epstatus = usba_ep_readl(ep, STA); |
1059 | epctrl = usba_ep_readl(ep, CTL); | 1059 | epctrl = usba_ep_readl(ep, CTL); |
1060 | } | 1060 | } |
1061 | 1061 | ||
1062 | if ((epstatus & epctrl) & USBA_RX_BK_RDY) { | 1062 | if ((epstatus & epctrl) & USBA_RX_BK_RDY) { |
1063 | DBG(DBG_BUS, "%s: RX data ready\n", ep->ep.name); | 1063 | DBG(DBG_BUS, "%s: RX data ready\n", ep->ep.name); |
1064 | receive_data(ep); | 1064 | receive_data(ep); |
1065 | usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY); | 1065 | usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY); |
1066 | } | 1066 | } |
1067 | } | 1067 | } |
1068 | 1068 | ||
1069 | static int usba_udc_irq(struct usba_udc *udc) | 1069 | static int usba_udc_irq(struct usba_udc *udc) |
1070 | { | 1070 | { |
1071 | u32 status, ep_status; | 1071 | u32 status, ep_status; |
1072 | 1072 | ||
1073 | spin_lock(&udc->lock); | 1073 | spin_lock(&udc->lock); |
1074 | 1074 | ||
1075 | status = usba_readl(udc, INT_STA); | 1075 | status = usba_readl(udc, INT_STA); |
1076 | DBG(DBG_INT, "irq, status=%#08x\n", status); | 1076 | DBG(DBG_INT, "irq, status=%#08x\n", status); |
1077 | 1077 | ||
1078 | if (status & USBA_DET_SUSPEND) { | 1078 | if (status & USBA_DET_SUSPEND) { |
1079 | usba_writel(udc, INT_CLR, USBA_DET_SUSPEND); | 1079 | usba_writel(udc, INT_CLR, USBA_DET_SUSPEND); |
1080 | DBG(DBG_BUS, "Suspend detected\n"); | 1080 | DBG(DBG_BUS, "Suspend detected\n"); |
1081 | if (udc->gadget.speed != USB_SPEED_UNKNOWN && | 1081 | if (udc->gadget.speed != USB_SPEED_UNKNOWN && |
1082 | udc->driver && udc->driver->suspend) { | 1082 | udc->driver && udc->driver->suspend) { |
1083 | spin_unlock(&udc->lock); | 1083 | spin_unlock(&udc->lock); |
1084 | udc->driver->suspend(&udc->gadget); | 1084 | udc->driver->suspend(&udc->gadget); |
1085 | spin_lock(&udc->lock); | 1085 | spin_lock(&udc->lock); |
1086 | } | 1086 | } |
1087 | } | 1087 | } |
1088 | 1088 | ||
1089 | if (status & USBA_WAKE_UP) { | 1089 | if (status & USBA_WAKE_UP) { |
1090 | usba_writel(udc, INT_CLR, USBA_WAKE_UP); | 1090 | usba_writel(udc, INT_CLR, USBA_WAKE_UP); |
1091 | DBG(DBG_BUS, "Wake Up CPU detected\n"); | 1091 | DBG(DBG_BUS, "Wake Up CPU detected\n"); |
1092 | } | 1092 | } |
1093 | 1093 | ||
1094 | if (status & USBA_END_OF_RESUME) { | 1094 | if (status & USBA_END_OF_RESUME) { |
1095 | usba_writel(udc, INT_CLR, USBA_END_OF_RESUME); | 1095 | usba_writel(udc, INT_CLR, USBA_END_OF_RESUME); |
1096 | DBG(DBG_BUS, "Resume detected\n"); | 1096 | DBG(DBG_BUS, "Resume detected\n"); |
1097 | if (udc->gadget.speed != USB_SPEED_UNKNOWN && | 1097 | if (udc->gadget.speed != USB_SPEED_UNKNOWN && |
1098 | udc->driver && udc->driver->resume) { | 1098 | udc->driver && udc->driver->resume) { |
1099 | spin_unlock(&udc->lock); | 1099 | spin_unlock(&udc->lock); |
1100 | udc->driver->resume(&udc->gadget); | 1100 | udc->driver->resume(&udc->gadget); |
1101 | spin_lock(&udc->lock); | 1101 | spin_lock(&udc->lock); |
1102 | } | 1102 | } |
1103 | } | 1103 | } |
1104 | 1104 | ||
1105 | ep_status = USBA_BFEXT(EPT_INT, status); | 1105 | ep_status = USBA_BFEXT(EPT_INT, status); |
1106 | if (ep_status) { | 1106 | if (ep_status) { |
1107 | int i; | 1107 | int i; |
1108 | 1108 | ||
1109 | for (i = 0; i < USBA_NR_ENDPOINTS; i++) | 1109 | for (i = 0; i < USBA_NR_ENDPOINTS; i++) |
1110 | if (ep_status & (1 << i)) { | 1110 | if (ep_status & (1 << i)) { |
1111 | if (ep_is_control(&udc->usba_ep[i])) | 1111 | if (ep_is_control(&udc->usba_ep[i])) |
1112 | usba_control_irq(udc, &udc->usba_ep[i]); | 1112 | usba_control_irq(udc, &udc->usba_ep[i]); |
1113 | else | 1113 | else |
1114 | usba_ep_irq(udc, &udc->usba_ep[i]); | 1114 | usba_ep_irq(udc, &udc->usba_ep[i]); |
1115 | } | 1115 | } |
1116 | } | 1116 | } |
1117 | 1117 | ||
1118 | if (status & USBA_END_OF_RESET) { | 1118 | if (status & USBA_END_OF_RESET) { |
1119 | struct usba_ep *ep0; | 1119 | struct usba_ep *ep0; |
1120 | 1120 | ||
1121 | usba_writel(udc, INT_CLR, USBA_END_OF_RESET); | 1121 | usba_writel(udc, INT_CLR, USBA_END_OF_RESET); |
1122 | reset_all_endpoints(udc); | 1122 | reset_all_endpoints(udc); |
1123 | 1123 | ||
1124 | if (udc->gadget.speed != USB_SPEED_UNKNOWN && | 1124 | if (udc->gadget.speed != USB_SPEED_UNKNOWN && |
1125 | udc->driver->disconnect) { | 1125 | udc->driver->disconnect) { |
1126 | udc->gadget.speed = USB_SPEED_UNKNOWN; | 1126 | udc->gadget.speed = USB_SPEED_UNKNOWN; |
1127 | spin_unlock(&udc->lock); | 1127 | spin_unlock(&udc->lock); |
1128 | udc->driver->disconnect(&udc->gadget); | 1128 | udc->driver->disconnect(&udc->gadget); |
1129 | spin_lock(&udc->lock); | 1129 | spin_lock(&udc->lock); |
1130 | } | 1130 | } |
1131 | 1131 | ||
1132 | if (status & USBA_HIGH_SPEED) | 1132 | if (status & USBA_HIGH_SPEED) |
1133 | udc->gadget.speed = USB_SPEED_HIGH; | 1133 | udc->gadget.speed = USB_SPEED_HIGH; |
1134 | else | 1134 | else |
1135 | udc->gadget.speed = USB_SPEED_FULL; | 1135 | udc->gadget.speed = USB_SPEED_FULL; |
1136 | 1136 | ||
1137 | ep0 = &udc->usba_ep[0]; | 1137 | ep0 = &udc->usba_ep[0]; |
1138 | ep0->desc = &usba_ep0_desc; | 1138 | ep0->desc = &usba_ep0_desc; |
1139 | ep0->state = WAIT_FOR_SETUP; | 1139 | ep0->state = WAIT_FOR_SETUP; |
1140 | usba_ep_writel(ep0, CFG, | 1140 | usba_ep_writel(ep0, CFG, |
1141 | (USBA_BF(EPT_SIZE, EP0_EPT_SIZE) | 1141 | (USBA_BF(EPT_SIZE, EP0_EPT_SIZE) |
1142 | | USBA_BF(EPT_TYPE, USBA_EPT_TYPE_CONTROL) | 1142 | | USBA_BF(EPT_TYPE, USBA_EPT_TYPE_CONTROL) |
1143 | | USBA_BF(BK_NUMBER, USBA_BK_NUMBER_ONE))); | 1143 | | USBA_BF(BK_NUMBER, USBA_BK_NUMBER_ONE))); |
1144 | usba_ep_writel(ep0, CTL_ENB, | 1144 | usba_ep_writel(ep0, CTL_ENB, |
1145 | USBA_EPT_ENABLE | USBA_RX_SETUP); | 1145 | USBA_EPT_ENABLE | USBA_RX_SETUP); |
1146 | usba_writel(udc, INT_ENB, | 1146 | usba_writel(udc, INT_ENB, |
1147 | (usba_readl(udc, INT_ENB) | 1147 | (usba_readl(udc, INT_ENB) |
1148 | | USBA_BF(EPT_INT, 1) | 1148 | | USBA_BF(EPT_INT, 1) |
1149 | | USBA_DET_SUSPEND | 1149 | | USBA_DET_SUSPEND |
1150 | | USBA_END_OF_RESUME)); | 1150 | | USBA_END_OF_RESUME)); |
1151 | 1151 | ||
1152 | /* | 1152 | /* |
1153 | * Unclear why we hit this irregularly, e.g. in usbtest, | 1153 | * Unclear why we hit this irregularly, e.g. in usbtest, |
1154 | * but it's clearly harmless... | 1154 | * but it's clearly harmless... |
1155 | */ | 1155 | */ |
1156 | if (!(usba_ep_readl(ep0, CFG) & USBA_EPT_MAPPED)) | 1156 | if (!(usba_ep_readl(ep0, CFG) & USBA_EPT_MAPPED)) |
1157 | DBG(DBG_ALL, "ODD: EP0 configuration is invalid!\n"); | 1157 | DBG(DBG_ALL, "ODD: EP0 configuration is invalid!\n"); |
1158 | } | 1158 | } |
1159 | 1159 | ||
1160 | spin_unlock(&udc->lock); | 1160 | spin_unlock(&udc->lock); |
1161 | 1161 | ||
1162 | return 0; | 1162 | return 0; |
1163 | } | 1163 | } |
1164 | 1164 | ||
1165 | static int atmel_usba_start(struct usba_udc *udc) | 1165 | static int atmel_usba_start(struct usba_udc *udc) |
1166 | { | 1166 | { |
1167 | udc->devstatus = 1 << USB_DEVICE_SELF_POWERED; | 1167 | udc->devstatus = 1 << USB_DEVICE_SELF_POWERED; |
1168 | 1168 | ||
1169 | udc->vbus_prev = 0; | 1169 | udc->vbus_prev = 0; |
1170 | 1170 | ||
1171 | /* If Vbus is present, enable the controller and wait for reset */ | 1171 | /* If Vbus is present, enable the controller and wait for reset */ |
1172 | if (vbus_is_present(udc) && udc->vbus_prev == 0) { | 1172 | if (vbus_is_present(udc) && udc->vbus_prev == 0) { |
1173 | usba_writel(udc, CTRL, USBA_ENABLE_MASK); | 1173 | usba_writel(udc, CTRL, USBA_ENABLE_MASK); |
1174 | usba_writel(udc, INT_ENB, USBA_END_OF_RESET); | 1174 | usba_writel(udc, INT_ENB, USBA_END_OF_RESET); |
1175 | } | 1175 | } |
1176 | 1176 | ||
1177 | return 0; | 1177 | return 0; |
1178 | } | 1178 | } |
1179 | 1179 | ||
1180 | static int atmel_usba_stop(struct usba_udc *udc) | 1180 | static int atmel_usba_stop(struct usba_udc *udc) |
1181 | { | 1181 | { |
1182 | udc->gadget.speed = USB_SPEED_UNKNOWN; | 1182 | udc->gadget.speed = USB_SPEED_UNKNOWN; |
1183 | reset_all_endpoints(udc); | 1183 | reset_all_endpoints(udc); |
1184 | 1184 | ||
1185 | /* This will also disable the DP pullup */ | 1185 | /* This will also disable the DP pullup */ |
1186 | usba_writel(udc, CTRL, USBA_DISABLE_MASK); | 1186 | usba_writel(udc, CTRL, USBA_DISABLE_MASK); |
1187 | 1187 | ||
1188 | return 0; | 1188 | return 0; |
1189 | } | 1189 | } |
1190 | 1190 | ||
1191 | static struct usba_udc controller = { | 1191 | static struct usba_udc controller = { |
1192 | .regs = (unsigned *)ATMEL_BASE_UDPHS, | 1192 | .regs = (unsigned *)ATMEL_BASE_UDPHS, |
1193 | .fifo = (unsigned *)ATMEL_BASE_UDPHS_FIFO, | 1193 | .fifo = (unsigned *)ATMEL_BASE_UDPHS_FIFO, |
1194 | .gadget = { | 1194 | .gadget = { |
1195 | .ops = &usba_udc_ops, | 1195 | .ops = &usba_udc_ops, |
1196 | .ep_list = LIST_HEAD_INIT(controller.gadget.ep_list), | 1196 | .ep_list = LIST_HEAD_INIT(controller.gadget.ep_list), |
1197 | .speed = USB_SPEED_HIGH, | 1197 | .speed = USB_SPEED_HIGH, |
1198 | .is_dualspeed = 1, | 1198 | .is_dualspeed = 1, |
1199 | .name = "atmel_usba_udc", | 1199 | .name = "atmel_usba_udc", |
1200 | }, | 1200 | }, |
1201 | }; | 1201 | }; |
1202 | 1202 | ||
1203 | int usb_gadget_handle_interrupts(void) | 1203 | int usb_gadget_handle_interrupts(void) |
1204 | { | 1204 | { |
1205 | struct usba_udc *udc = &controller; | 1205 | struct usba_udc *udc = &controller; |
1206 | 1206 | ||
1207 | return usba_udc_irq(udc); | 1207 | return usba_udc_irq(udc); |
1208 | } | 1208 | } |
1209 | 1209 | ||
1210 | 1210 | ||
1211 | int usb_gadget_register_driver(struct usb_gadget_driver *driver) | 1211 | int usb_gadget_register_driver(struct usb_gadget_driver *driver) |
1212 | { | 1212 | { |
1213 | struct usba_udc *udc = &controller; | 1213 | struct usba_udc *udc = &controller; |
1214 | int ret; | 1214 | int ret; |
1215 | 1215 | ||
1216 | if (!driver || !driver->bind || !driver->setup) { | 1216 | if (!driver || !driver->bind || !driver->setup) { |
1217 | printf("bad paramter\n"); | 1217 | printf("bad paramter\n"); |
1218 | return -EINVAL; | 1218 | return -EINVAL; |
1219 | } | 1219 | } |
1220 | 1220 | ||
1221 | if (udc->driver) { | 1221 | if (udc->driver) { |
1222 | printf("UDC already has a gadget driver\n"); | 1222 | printf("UDC already has a gadget driver\n"); |
1223 | return -EBUSY; | 1223 | return -EBUSY; |
1224 | } | 1224 | } |
1225 | 1225 | ||
1226 | atmel_usba_start(udc); | 1226 | atmel_usba_start(udc); |
1227 | 1227 | ||
1228 | udc->driver = driver; | 1228 | udc->driver = driver; |
1229 | 1229 | ||
1230 | ret = driver->bind(&udc->gadget); | 1230 | ret = driver->bind(&udc->gadget); |
1231 | if (ret) { | 1231 | if (ret) { |
1232 | error("driver->bind() returned %d\n", ret); | 1232 | error("driver->bind() returned %d\n", ret); |
1233 | udc->driver = NULL; | 1233 | udc->driver = NULL; |
1234 | } | 1234 | } |
1235 | 1235 | ||
1236 | return ret; | 1236 | return ret; |
1237 | } | 1237 | } |
1238 | 1238 | ||
1239 | int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) | 1239 | int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) |
1240 | { | 1240 | { |
1241 | struct usba_udc *udc = &controller; | 1241 | struct usba_udc *udc = &controller; |
1242 | 1242 | ||
1243 | if (!driver || !driver->unbind || !driver->disconnect) { | 1243 | if (!driver || !driver->unbind || !driver->disconnect) { |
1244 | error("bad paramter\n"); | 1244 | error("bad paramter\n"); |
1245 | return -EINVAL; | 1245 | return -EINVAL; |
1246 | } | 1246 | } |
1247 | 1247 | ||
1248 | driver->disconnect(&udc->gadget); | 1248 | driver->disconnect(&udc->gadget); |
1249 | driver->unbind(&udc->gadget); | 1249 | driver->unbind(&udc->gadget); |
1250 | udc->driver = NULL; | 1250 | udc->driver = NULL; |
1251 | 1251 | ||
1252 | atmel_usba_stop(udc); | 1252 | atmel_usba_stop(udc); |
1253 | 1253 | ||
1254 | return 0; | 1254 | return 0; |
1255 | } | 1255 | } |
1256 | 1256 | ||
1257 | static struct usba_ep *usba_udc_pdata(struct usba_platform_data *pdata, | 1257 | static struct usba_ep *usba_udc_pdata(struct usba_platform_data *pdata, |
1258 | struct usba_udc *udc) | 1258 | struct usba_udc *udc) |
1259 | { | 1259 | { |
1260 | struct usba_ep *eps; | 1260 | struct usba_ep *eps; |
1261 | int i; | 1261 | int i; |
1262 | 1262 | ||
1263 | eps = malloc(sizeof(struct usba_ep) * pdata->num_ep); | 1263 | eps = malloc(sizeof(struct usba_ep) * pdata->num_ep); |
1264 | if (!eps) { | 1264 | if (!eps) { |
1265 | error("failed to alloc eps\n"); | 1265 | error("failed to alloc eps\n"); |
1266 | return NULL; | 1266 | return NULL; |
1267 | } | 1267 | } |
1268 | 1268 | ||
1269 | udc->gadget.ep0 = &eps[0].ep; | 1269 | udc->gadget.ep0 = &eps[0].ep; |
1270 | 1270 | ||
1271 | INIT_LIST_HEAD(&udc->gadget.ep_list); | 1271 | INIT_LIST_HEAD(&udc->gadget.ep_list); |
1272 | INIT_LIST_HEAD(&eps[0].ep.ep_list); | 1272 | INIT_LIST_HEAD(&eps[0].ep.ep_list); |
1273 | 1273 | ||
1274 | for (i = 0; i < pdata->num_ep; i++) { | 1274 | for (i = 0; i < pdata->num_ep; i++) { |
1275 | struct usba_ep *ep = &eps[i]; | 1275 | struct usba_ep *ep = &eps[i]; |
1276 | 1276 | ||
1277 | ep->ep_regs = udc->regs + USBA_EPT_BASE(i); | 1277 | ep->ep_regs = udc->regs + USBA_EPT_BASE(i); |
1278 | ep->dma_regs = udc->regs + USBA_DMA_BASE(i); | 1278 | ep->dma_regs = udc->regs + USBA_DMA_BASE(i); |
1279 | ep->fifo = udc->fifo + USBA_FIFO_BASE(i); | 1279 | ep->fifo = udc->fifo + USBA_FIFO_BASE(i); |
1280 | ep->ep.ops = &usba_ep_ops; | 1280 | ep->ep.ops = &usba_ep_ops; |
1281 | ep->ep.name = pdata->ep[i].name; | 1281 | ep->ep.name = pdata->ep[i].name; |
1282 | ep->ep.maxpacket = pdata->ep[i].fifo_size; | 1282 | ep->ep.maxpacket = pdata->ep[i].fifo_size; |
1283 | ep->fifo_size = ep->ep.maxpacket; | 1283 | ep->fifo_size = ep->ep.maxpacket; |
1284 | ep->udc = udc; | 1284 | ep->udc = udc; |
1285 | INIT_LIST_HEAD(&ep->queue); | 1285 | INIT_LIST_HEAD(&ep->queue); |
1286 | ep->nr_banks = pdata->ep[i].nr_banks; | 1286 | ep->nr_banks = pdata->ep[i].nr_banks; |
1287 | ep->index = pdata->ep[i].index; | 1287 | ep->index = pdata->ep[i].index; |
1288 | ep->can_dma = pdata->ep[i].can_dma; | 1288 | ep->can_dma = pdata->ep[i].can_dma; |
1289 | ep->can_isoc = pdata->ep[i].can_isoc; | 1289 | ep->can_isoc = pdata->ep[i].can_isoc; |
1290 | if (i) | 1290 | if (i) |
1291 | list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list); | 1291 | list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list); |
1292 | }; | 1292 | }; |
1293 | 1293 | ||
1294 | return eps; | 1294 | return eps; |
1295 | } | 1295 | } |
1296 | 1296 | ||
1297 | int usba_udc_probe(struct usba_platform_data *pdata) | 1297 | int usba_udc_probe(struct usba_platform_data *pdata) |
1298 | { | 1298 | { |
1299 | struct usba_udc *udc; | 1299 | struct usba_udc *udc; |
1300 | 1300 | ||
1301 | udc = &controller; | 1301 | udc = &controller; |
1302 | 1302 | ||
1303 | udc->usba_ep = usba_udc_pdata(pdata, udc); | 1303 | udc->usba_ep = usba_udc_pdata(pdata, udc); |
1304 | 1304 | ||
1305 | return 0; | 1305 | return 0; |
1306 | } | 1306 | } |
1307 | 1307 |
drivers/usb/gadget/ci_udc.c
1 | /* | 1 | /* |
2 | * Copyright 2011, Marvell Semiconductor Inc. | 2 | * Copyright 2011, Marvell Semiconductor Inc. |
3 | * Lei Wen <leiwen@marvell.com> | 3 | * Lei Wen <leiwen@marvell.com> |
4 | * | 4 | * |
5 | * SPDX-License-Identifier: GPL-2.0+ | 5 | * SPDX-License-Identifier: GPL-2.0+ |
6 | * | 6 | * |
7 | * Back ported to the 8xx platform (from the 8260 platform) by | 7 | * Back ported to the 8xx platform (from the 8260 platform) by |
8 | * Murray.Jensen@cmst.csiro.au, 27-Jan-01. | 8 | * Murray.Jensen@cmst.csiro.au, 27-Jan-01. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <common.h> | 11 | #include <common.h> |
12 | #include <command.h> | 12 | #include <command.h> |
13 | #include <config.h> | 13 | #include <config.h> |
14 | #include <net.h> | 14 | #include <net.h> |
15 | #include <malloc.h> | 15 | #include <malloc.h> |
16 | #include <asm/byteorder.h> | 16 | #include <asm/byteorder.h> |
17 | #include <asm/errno.h> | 17 | #include <asm/errno.h> |
18 | #include <asm/io.h> | 18 | #include <asm/io.h> |
19 | #include <asm/unaligned.h> | 19 | #include <asm/unaligned.h> |
20 | #include <linux/types.h> | 20 | #include <linux/types.h> |
21 | #include <linux/usb/ch9.h> | 21 | #include <linux/usb/ch9.h> |
22 | #include <linux/usb/gadget.h> | 22 | #include <linux/usb/gadget.h> |
23 | #include <usb/ci_udc.h> | 23 | #include <usb/ci_udc.h> |
24 | #include "../host/ehci.h" | 24 | #include "../host/ehci.h" |
25 | #include "ci_udc.h" | 25 | #include "ci_udc.h" |
26 | 26 | ||
27 | /* | 27 | /* |
28 | * Check if the system has too long cachelines. If the cachelines are | 28 | * Check if the system has too long cachelines. If the cachelines are |
29 | * longer then 128b, the driver will not be able flush/invalidate data | 29 | * longer then 128b, the driver will not be able flush/invalidate data |
30 | * cache over separate QH entries. We use 128b because one QH entry is | 30 | * cache over separate QH entries. We use 128b because one QH entry is |
31 | * 64b long and there are always two QH list entries for each endpoint. | 31 | * 64b long and there are always two QH list entries for each endpoint. |
32 | */ | 32 | */ |
33 | #if ARCH_DMA_MINALIGN > 128 | 33 | #if ARCH_DMA_MINALIGN > 128 |
34 | #error This driver can not work on systems with caches longer than 128b | 34 | #error This driver can not work on systems with caches longer than 128b |
35 | #endif | 35 | #endif |
36 | 36 | ||
37 | /* | 37 | /* |
38 | * Every QTD must be individually aligned, since we can program any | 38 | * Every QTD must be individually aligned, since we can program any |
39 | * QTD's address into HW. Cache flushing requires ARCH_DMA_MINALIGN, | 39 | * QTD's address into HW. Cache flushing requires ARCH_DMA_MINALIGN, |
40 | * and the USB HW requires 32-byte alignment. Align to both: | 40 | * and the USB HW requires 32-byte alignment. Align to both: |
41 | */ | 41 | */ |
42 | #define ILIST_ALIGN roundup(ARCH_DMA_MINALIGN, 32) | 42 | #define ILIST_ALIGN roundup(ARCH_DMA_MINALIGN, 32) |
43 | /* Each QTD is this size */ | 43 | /* Each QTD is this size */ |
44 | #define ILIST_ENT_RAW_SZ sizeof(struct ept_queue_item) | 44 | #define ILIST_ENT_RAW_SZ sizeof(struct ept_queue_item) |
45 | /* | 45 | /* |
46 | * Align the size of the QTD too, so we can add this value to each | 46 | * Align the size of the QTD too, so we can add this value to each |
47 | * QTD's address to get another aligned address. | 47 | * QTD's address to get another aligned address. |
48 | */ | 48 | */ |
49 | #define ILIST_ENT_SZ roundup(ILIST_ENT_RAW_SZ, ILIST_ALIGN) | 49 | #define ILIST_ENT_SZ roundup(ILIST_ENT_RAW_SZ, ILIST_ALIGN) |
50 | /* For each endpoint, we need 2 QTDs, one for each of IN and OUT */ | 50 | /* For each endpoint, we need 2 QTDs, one for each of IN and OUT */ |
51 | #define ILIST_SZ (NUM_ENDPOINTS * 2 * ILIST_ENT_SZ) | 51 | #define ILIST_SZ (NUM_ENDPOINTS * 2 * ILIST_ENT_SZ) |
52 | 52 | ||
53 | #ifndef DEBUG | 53 | #ifndef DEBUG |
54 | #define DBG(x...) do {} while (0) | 54 | #define DBG(x...) do {} while (0) |
55 | #else | 55 | #else |
56 | #define DBG(x...) printf(x) | 56 | #define DBG(x...) printf(x) |
57 | static const char *reqname(unsigned r) | 57 | static const char *reqname(unsigned r) |
58 | { | 58 | { |
59 | switch (r) { | 59 | switch (r) { |
60 | case USB_REQ_GET_STATUS: return "GET_STATUS"; | 60 | case USB_REQ_GET_STATUS: return "GET_STATUS"; |
61 | case USB_REQ_CLEAR_FEATURE: return "CLEAR_FEATURE"; | 61 | case USB_REQ_CLEAR_FEATURE: return "CLEAR_FEATURE"; |
62 | case USB_REQ_SET_FEATURE: return "SET_FEATURE"; | 62 | case USB_REQ_SET_FEATURE: return "SET_FEATURE"; |
63 | case USB_REQ_SET_ADDRESS: return "SET_ADDRESS"; | 63 | case USB_REQ_SET_ADDRESS: return "SET_ADDRESS"; |
64 | case USB_REQ_GET_DESCRIPTOR: return "GET_DESCRIPTOR"; | 64 | case USB_REQ_GET_DESCRIPTOR: return "GET_DESCRIPTOR"; |
65 | case USB_REQ_SET_DESCRIPTOR: return "SET_DESCRIPTOR"; | 65 | case USB_REQ_SET_DESCRIPTOR: return "SET_DESCRIPTOR"; |
66 | case USB_REQ_GET_CONFIGURATION: return "GET_CONFIGURATION"; | 66 | case USB_REQ_GET_CONFIGURATION: return "GET_CONFIGURATION"; |
67 | case USB_REQ_SET_CONFIGURATION: return "SET_CONFIGURATION"; | 67 | case USB_REQ_SET_CONFIGURATION: return "SET_CONFIGURATION"; |
68 | case USB_REQ_GET_INTERFACE: return "GET_INTERFACE"; | 68 | case USB_REQ_GET_INTERFACE: return "GET_INTERFACE"; |
69 | case USB_REQ_SET_INTERFACE: return "SET_INTERFACE"; | 69 | case USB_REQ_SET_INTERFACE: return "SET_INTERFACE"; |
70 | default: return "*UNKNOWN*"; | 70 | default: return "*UNKNOWN*"; |
71 | } | 71 | } |
72 | } | 72 | } |
73 | #endif | 73 | #endif |
74 | 74 | ||
75 | static struct usb_endpoint_descriptor ep0_desc = { | 75 | static struct usb_endpoint_descriptor ep0_desc = { |
76 | .bLength = sizeof(struct usb_endpoint_descriptor), | 76 | .bLength = sizeof(struct usb_endpoint_descriptor), |
77 | .bDescriptorType = USB_DT_ENDPOINT, | 77 | .bDescriptorType = USB_DT_ENDPOINT, |
78 | .bEndpointAddress = USB_DIR_IN, | 78 | .bEndpointAddress = USB_DIR_IN, |
79 | .bmAttributes = USB_ENDPOINT_XFER_CONTROL, | 79 | .bmAttributes = USB_ENDPOINT_XFER_CONTROL, |
80 | }; | 80 | }; |
81 | 81 | ||
82 | static int ci_pullup(struct usb_gadget *gadget, int is_on); | 82 | static int ci_pullup(struct usb_gadget *gadget, int is_on); |
83 | static int ci_ep_enable(struct usb_ep *ep, | 83 | static int ci_ep_enable(struct usb_ep *ep, |
84 | const struct usb_endpoint_descriptor *desc); | 84 | const struct usb_endpoint_descriptor *desc); |
85 | static int ci_ep_disable(struct usb_ep *ep); | 85 | static int ci_ep_disable(struct usb_ep *ep); |
86 | static int ci_ep_queue(struct usb_ep *ep, | 86 | static int ci_ep_queue(struct usb_ep *ep, |
87 | struct usb_request *req, gfp_t gfp_flags); | 87 | struct usb_request *req, gfp_t gfp_flags); |
88 | static struct usb_request * | 88 | static struct usb_request * |
89 | ci_ep_alloc_request(struct usb_ep *ep, unsigned int gfp_flags); | 89 | ci_ep_alloc_request(struct usb_ep *ep, unsigned int gfp_flags); |
90 | static void ci_ep_free_request(struct usb_ep *ep, struct usb_request *_req); | 90 | static void ci_ep_free_request(struct usb_ep *ep, struct usb_request *_req); |
91 | 91 | ||
92 | static struct usb_gadget_ops ci_udc_ops = { | 92 | static struct usb_gadget_ops ci_udc_ops = { |
93 | .pullup = ci_pullup, | 93 | .pullup = ci_pullup, |
94 | }; | 94 | }; |
95 | 95 | ||
96 | static struct usb_ep_ops ci_ep_ops = { | 96 | static struct usb_ep_ops ci_ep_ops = { |
97 | .enable = ci_ep_enable, | 97 | .enable = ci_ep_enable, |
98 | .disable = ci_ep_disable, | 98 | .disable = ci_ep_disable, |
99 | .queue = ci_ep_queue, | 99 | .queue = ci_ep_queue, |
100 | .alloc_request = ci_ep_alloc_request, | 100 | .alloc_request = ci_ep_alloc_request, |
101 | .free_request = ci_ep_free_request, | 101 | .free_request = ci_ep_free_request, |
102 | }; | 102 | }; |
103 | 103 | ||
104 | /* Init values for USB endpoints. */ | 104 | /* Init values for USB endpoints. */ |
105 | static const struct usb_ep ci_ep_init[2] = { | 105 | static const struct usb_ep ci_ep_init[2] = { |
106 | [0] = { /* EP 0 */ | 106 | [0] = { /* EP 0 */ |
107 | .maxpacket = 64, | 107 | .maxpacket = 64, |
108 | .name = "ep0", | 108 | .name = "ep0", |
109 | .ops = &ci_ep_ops, | 109 | .ops = &ci_ep_ops, |
110 | }, | 110 | }, |
111 | [1] = { /* EP 1..n */ | 111 | [1] = { /* EP 1..n */ |
112 | .maxpacket = 512, | 112 | .maxpacket = 512, |
113 | .name = "ep-", | 113 | .name = "ep-", |
114 | .ops = &ci_ep_ops, | 114 | .ops = &ci_ep_ops, |
115 | }, | 115 | }, |
116 | }; | 116 | }; |
117 | 117 | ||
118 | static struct ci_drv controller = { | 118 | static struct ci_drv controller = { |
119 | .gadget = { | 119 | .gadget = { |
120 | .name = "ci_udc", | 120 | .name = "ci_udc", |
121 | .ops = &ci_udc_ops, | 121 | .ops = &ci_udc_ops, |
122 | .is_dualspeed = 1, | 122 | .is_dualspeed = 1, |
123 | }, | 123 | }, |
124 | }; | 124 | }; |
125 | 125 | ||
126 | /** | 126 | /** |
127 | * ci_get_qh() - return queue head for endpoint | 127 | * ci_get_qh() - return queue head for endpoint |
128 | * @ep_num: Endpoint number | 128 | * @ep_num: Endpoint number |
129 | * @dir_in: Direction of the endpoint (IN = 1, OUT = 0) | 129 | * @dir_in: Direction of the endpoint (IN = 1, OUT = 0) |
130 | * | 130 | * |
131 | * This function returns the QH associated with particular endpoint | 131 | * This function returns the QH associated with particular endpoint |
132 | * and it's direction. | 132 | * and it's direction. |
133 | */ | 133 | */ |
134 | static struct ept_queue_head *ci_get_qh(int ep_num, int dir_in) | 134 | static struct ept_queue_head *ci_get_qh(int ep_num, int dir_in) |
135 | { | 135 | { |
136 | return &controller.epts[(ep_num * 2) + dir_in]; | 136 | return &controller.epts[(ep_num * 2) + dir_in]; |
137 | } | 137 | } |
138 | 138 | ||
139 | /** | 139 | /** |
140 | * ci_get_qtd() - return queue item for endpoint | 140 | * ci_get_qtd() - return queue item for endpoint |
141 | * @ep_num: Endpoint number | 141 | * @ep_num: Endpoint number |
142 | * @dir_in: Direction of the endpoint (IN = 1, OUT = 0) | 142 | * @dir_in: Direction of the endpoint (IN = 1, OUT = 0) |
143 | * | 143 | * |
144 | * This function returns the QH associated with particular endpoint | 144 | * This function returns the QH associated with particular endpoint |
145 | * and it's direction. | 145 | * and it's direction. |
146 | */ | 146 | */ |
147 | static struct ept_queue_item *ci_get_qtd(int ep_num, int dir_in) | 147 | static struct ept_queue_item *ci_get_qtd(int ep_num, int dir_in) |
148 | { | 148 | { |
149 | int index = (ep_num * 2) + dir_in; | 149 | int index = (ep_num * 2) + dir_in; |
150 | uint8_t *imem = controller.items_mem + (index * ILIST_ENT_SZ); | 150 | uint8_t *imem = controller.items_mem + (index * ILIST_ENT_SZ); |
151 | return (struct ept_queue_item *)imem; | 151 | return (struct ept_queue_item *)imem; |
152 | } | 152 | } |
153 | 153 | ||
154 | /** | 154 | /** |
155 | * ci_flush_qh - flush cache over queue head | 155 | * ci_flush_qh - flush cache over queue head |
156 | * @ep_num: Endpoint number | 156 | * @ep_num: Endpoint number |
157 | * | 157 | * |
158 | * This function flushes cache over QH for particular endpoint. | 158 | * This function flushes cache over QH for particular endpoint. |
159 | */ | 159 | */ |
160 | static void ci_flush_qh(int ep_num) | 160 | static void ci_flush_qh(int ep_num) |
161 | { | 161 | { |
162 | struct ept_queue_head *head = ci_get_qh(ep_num, 0); | 162 | struct ept_queue_head *head = ci_get_qh(ep_num, 0); |
163 | const uint32_t start = (uint32_t)head; | 163 | const uint32_t start = (uint32_t)head; |
164 | const uint32_t end = start + 2 * sizeof(*head); | 164 | const uint32_t end = start + 2 * sizeof(*head); |
165 | 165 | ||
166 | flush_dcache_range(start, end); | 166 | flush_dcache_range(start, end); |
167 | } | 167 | } |
168 | 168 | ||
169 | /** | 169 | /** |
170 | * ci_invalidate_qh - invalidate cache over queue head | 170 | * ci_invalidate_qh - invalidate cache over queue head |
171 | * @ep_num: Endpoint number | 171 | * @ep_num: Endpoint number |
172 | * | 172 | * |
173 | * This function invalidates cache over QH for particular endpoint. | 173 | * This function invalidates cache over QH for particular endpoint. |
174 | */ | 174 | */ |
175 | static void ci_invalidate_qh(int ep_num) | 175 | static void ci_invalidate_qh(int ep_num) |
176 | { | 176 | { |
177 | struct ept_queue_head *head = ci_get_qh(ep_num, 0); | 177 | struct ept_queue_head *head = ci_get_qh(ep_num, 0); |
178 | uint32_t start = (uint32_t)head; | 178 | uint32_t start = (uint32_t)head; |
179 | uint32_t end = start + 2 * sizeof(*head); | 179 | uint32_t end = start + 2 * sizeof(*head); |
180 | 180 | ||
181 | invalidate_dcache_range(start, end); | 181 | invalidate_dcache_range(start, end); |
182 | } | 182 | } |
183 | 183 | ||
184 | /** | 184 | /** |
185 | * ci_flush_qtd - flush cache over queue item | 185 | * ci_flush_qtd - flush cache over queue item |
186 | * @ep_num: Endpoint number | 186 | * @ep_num: Endpoint number |
187 | * | 187 | * |
188 | * This function flushes cache over qTD pair for particular endpoint. | 188 | * This function flushes cache over qTD pair for particular endpoint. |
189 | */ | 189 | */ |
190 | static void ci_flush_qtd(int ep_num) | 190 | static void ci_flush_qtd(int ep_num) |
191 | { | 191 | { |
192 | struct ept_queue_item *item = ci_get_qtd(ep_num, 0); | 192 | struct ept_queue_item *item = ci_get_qtd(ep_num, 0); |
193 | const uint32_t start = (uint32_t)item; | 193 | const uint32_t start = (uint32_t)item; |
194 | const uint32_t end = start + 2 * ILIST_ENT_SZ; | 194 | const uint32_t end = start + 2 * ILIST_ENT_SZ; |
195 | 195 | ||
196 | flush_dcache_range(start, end); | 196 | flush_dcache_range(start, end); |
197 | } | 197 | } |
198 | 198 | ||
199 | /** | 199 | /** |
200 | * ci_invalidate_qtd - invalidate cache over queue item | 200 | * ci_invalidate_qtd - invalidate cache over queue item |
201 | * @ep_num: Endpoint number | 201 | * @ep_num: Endpoint number |
202 | * | 202 | * |
203 | * This function invalidates cache over qTD pair for particular endpoint. | 203 | * This function invalidates cache over qTD pair for particular endpoint. |
204 | */ | 204 | */ |
205 | static void ci_invalidate_qtd(int ep_num) | 205 | static void ci_invalidate_qtd(int ep_num) |
206 | { | 206 | { |
207 | struct ept_queue_item *item = ci_get_qtd(ep_num, 0); | 207 | struct ept_queue_item *item = ci_get_qtd(ep_num, 0); |
208 | const uint32_t start = (uint32_t)item; | 208 | const uint32_t start = (uint32_t)item; |
209 | const uint32_t end = start + 2 * ILIST_ENT_SZ; | 209 | const uint32_t end = start + 2 * ILIST_ENT_SZ; |
210 | 210 | ||
211 | invalidate_dcache_range(start, end); | 211 | invalidate_dcache_range(start, end); |
212 | } | 212 | } |
213 | 213 | ||
214 | static struct usb_request * | 214 | static struct usb_request * |
215 | ci_ep_alloc_request(struct usb_ep *ep, unsigned int gfp_flags) | 215 | ci_ep_alloc_request(struct usb_ep *ep, unsigned int gfp_flags) |
216 | { | 216 | { |
217 | struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep); | 217 | struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep); |
218 | int num; | 218 | int num; |
219 | struct ci_req *ci_req; | 219 | struct ci_req *ci_req; |
220 | 220 | ||
221 | num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; | 221 | num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; |
222 | if (num == 0 && controller.ep0_req) | 222 | if (num == 0 && controller.ep0_req) |
223 | return &controller.ep0_req->req; | 223 | return &controller.ep0_req->req; |
224 | 224 | ||
225 | ci_req = calloc(1, sizeof(*ci_req)); | 225 | ci_req = calloc(1, sizeof(*ci_req)); |
226 | if (!ci_req) | 226 | if (!ci_req) |
227 | return NULL; | 227 | return NULL; |
228 | 228 | ||
229 | INIT_LIST_HEAD(&ci_req->queue); | 229 | INIT_LIST_HEAD(&ci_req->queue); |
230 | 230 | ||
231 | if (num == 0) | 231 | if (num == 0) |
232 | controller.ep0_req = ci_req; | 232 | controller.ep0_req = ci_req; |
233 | 233 | ||
234 | return &ci_req->req; | 234 | return &ci_req->req; |
235 | } | 235 | } |
236 | 236 | ||
237 | static void ci_ep_free_request(struct usb_ep *ep, struct usb_request *req) | 237 | static void ci_ep_free_request(struct usb_ep *ep, struct usb_request *req) |
238 | { | 238 | { |
239 | struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep); | 239 | struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep); |
240 | struct ci_req *ci_req = container_of(req, struct ci_req, req); | 240 | struct ci_req *ci_req = container_of(req, struct ci_req, req); |
241 | int num; | 241 | int num; |
242 | 242 | ||
243 | num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; | 243 | num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; |
244 | if (num == 0) { | 244 | if (num == 0) { |
245 | if (!controller.ep0_req) | 245 | if (!controller.ep0_req) |
246 | return; | 246 | return; |
247 | controller.ep0_req = 0; | 247 | controller.ep0_req = 0; |
248 | } | 248 | } |
249 | 249 | ||
250 | if (ci_req->b_buf) | 250 | if (ci_req->b_buf) |
251 | free(ci_req->b_buf); | 251 | free(ci_req->b_buf); |
252 | free(ci_req); | 252 | free(ci_req); |
253 | } | 253 | } |
254 | 254 | ||
255 | static void ep_enable(int num, int in, int maxpacket) | 255 | static void ep_enable(int num, int in, int maxpacket) |
256 | { | 256 | { |
257 | struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; | 257 | struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; |
258 | unsigned n; | 258 | unsigned n; |
259 | 259 | ||
260 | n = readl(&udc->epctrl[num]); | 260 | n = readl(&udc->epctrl[num]); |
261 | if (in) | 261 | if (in) |
262 | n |= (CTRL_TXE | CTRL_TXR | CTRL_TXT_BULK); | 262 | n |= (CTRL_TXE | CTRL_TXR | CTRL_TXT_BULK); |
263 | else | 263 | else |
264 | n |= (CTRL_RXE | CTRL_RXR | CTRL_RXT_BULK); | 264 | n |= (CTRL_RXE | CTRL_RXR | CTRL_RXT_BULK); |
265 | 265 | ||
266 | if (num != 0) { | 266 | if (num != 0) { |
267 | struct ept_queue_head *head = ci_get_qh(num, in); | 267 | struct ept_queue_head *head = ci_get_qh(num, in); |
268 | 268 | ||
269 | head->config = CONFIG_MAX_PKT(maxpacket) | CONFIG_ZLT; | 269 | head->config = CONFIG_MAX_PKT(maxpacket) | CONFIG_ZLT; |
270 | ci_flush_qh(num); | 270 | ci_flush_qh(num); |
271 | } | 271 | } |
272 | writel(n, &udc->epctrl[num]); | 272 | writel(n, &udc->epctrl[num]); |
273 | } | 273 | } |
274 | 274 | ||
275 | static int ci_ep_enable(struct usb_ep *ep, | 275 | static int ci_ep_enable(struct usb_ep *ep, |
276 | const struct usb_endpoint_descriptor *desc) | 276 | const struct usb_endpoint_descriptor *desc) |
277 | { | 277 | { |
278 | struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep); | 278 | struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep); |
279 | int num, in; | 279 | int num, in; |
280 | num = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; | 280 | num = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; |
281 | in = (desc->bEndpointAddress & USB_DIR_IN) != 0; | 281 | in = (desc->bEndpointAddress & USB_DIR_IN) != 0; |
282 | ci_ep->desc = desc; | 282 | ci_ep->desc = desc; |
283 | 283 | ||
284 | if (num) { | 284 | if (num) { |
285 | int max = get_unaligned_le16(&desc->wMaxPacketSize); | 285 | int max = get_unaligned_le16(&desc->wMaxPacketSize); |
286 | 286 | ||
287 | if ((max > 64) && (controller.gadget.speed == USB_SPEED_FULL)) | 287 | if ((max > 64) && (controller.gadget.speed == USB_SPEED_FULL)) |
288 | max = 64; | 288 | max = 64; |
289 | if (ep->maxpacket != max) { | 289 | if (ep->maxpacket != max) { |
290 | DBG("%s: from %d to %d\n", __func__, | 290 | DBG("%s: from %d to %d\n", __func__, |
291 | ep->maxpacket, max); | 291 | ep->maxpacket, max); |
292 | ep->maxpacket = max; | 292 | ep->maxpacket = max; |
293 | } | 293 | } |
294 | } | 294 | } |
295 | ep_enable(num, in, ep->maxpacket); | 295 | ep_enable(num, in, ep->maxpacket); |
296 | DBG("%s: num=%d maxpacket=%d\n", __func__, num, ep->maxpacket); | 296 | DBG("%s: num=%d maxpacket=%d\n", __func__, num, ep->maxpacket); |
297 | return 0; | 297 | return 0; |
298 | } | 298 | } |
299 | 299 | ||
300 | static int ci_ep_disable(struct usb_ep *ep) | 300 | static int ci_ep_disable(struct usb_ep *ep) |
301 | { | 301 | { |
302 | struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep); | 302 | struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep); |
303 | 303 | ||
304 | ci_ep->desc = NULL; | 304 | ci_ep->desc = NULL; |
305 | return 0; | 305 | return 0; |
306 | } | 306 | } |
307 | 307 | ||
308 | static int ci_bounce(struct ci_req *ci_req, int in) | 308 | static int ci_bounce(struct ci_req *ci_req, int in) |
309 | { | 309 | { |
310 | struct usb_request *req = &ci_req->req; | 310 | struct usb_request *req = &ci_req->req; |
311 | uint32_t addr = (uint32_t)req->buf; | 311 | uint32_t addr = (uint32_t)req->buf; |
312 | uint32_t hwaddr; | 312 | uint32_t hwaddr; |
313 | uint32_t aligned_used_len; | 313 | uint32_t aligned_used_len; |
314 | 314 | ||
315 | /* Input buffer address is not aligned. */ | 315 | /* Input buffer address is not aligned. */ |
316 | if (addr & (ARCH_DMA_MINALIGN - 1)) | 316 | if (addr & (ARCH_DMA_MINALIGN - 1)) |
317 | goto align; | 317 | goto align; |
318 | 318 | ||
319 | /* Input buffer length is not aligned. */ | 319 | /* Input buffer length is not aligned. */ |
320 | if (req->length & (ARCH_DMA_MINALIGN - 1)) | 320 | if (req->length & (ARCH_DMA_MINALIGN - 1)) |
321 | goto align; | 321 | goto align; |
322 | 322 | ||
323 | /* The buffer is well aligned, only flush cache. */ | 323 | /* The buffer is well aligned, only flush cache. */ |
324 | ci_req->hw_len = req->length; | 324 | ci_req->hw_len = req->length; |
325 | ci_req->hw_buf = req->buf; | 325 | ci_req->hw_buf = req->buf; |
326 | goto flush; | 326 | goto flush; |
327 | 327 | ||
328 | align: | 328 | align: |
329 | if (ci_req->b_buf && req->length > ci_req->b_len) { | 329 | if (ci_req->b_buf && req->length > ci_req->b_len) { |
330 | free(ci_req->b_buf); | 330 | free(ci_req->b_buf); |
331 | ci_req->b_buf = 0; | 331 | ci_req->b_buf = 0; |
332 | } | 332 | } |
333 | if (!ci_req->b_buf) { | 333 | if (!ci_req->b_buf) { |
334 | ci_req->b_len = roundup(req->length, ARCH_DMA_MINALIGN); | 334 | ci_req->b_len = roundup(req->length, ARCH_DMA_MINALIGN); |
335 | ci_req->b_buf = memalign(ARCH_DMA_MINALIGN, ci_req->b_len); | 335 | ci_req->b_buf = memalign(ARCH_DMA_MINALIGN, ci_req->b_len); |
336 | if (!ci_req->b_buf) | 336 | if (!ci_req->b_buf) |
337 | return -ENOMEM; | 337 | return -ENOMEM; |
338 | } | 338 | } |
339 | ci_req->hw_len = ci_req->b_len; | 339 | ci_req->hw_len = ci_req->b_len; |
340 | ci_req->hw_buf = ci_req->b_buf; | 340 | ci_req->hw_buf = ci_req->b_buf; |
341 | 341 | ||
342 | if (in) | 342 | if (in) |
343 | memcpy(ci_req->hw_buf, req->buf, req->length); | 343 | memcpy(ci_req->hw_buf, req->buf, req->length); |
344 | 344 | ||
345 | flush: | 345 | flush: |
346 | hwaddr = (uint32_t)ci_req->hw_buf; | 346 | hwaddr = (uint32_t)ci_req->hw_buf; |
347 | aligned_used_len = roundup(req->length, ARCH_DMA_MINALIGN); | 347 | aligned_used_len = roundup(req->length, ARCH_DMA_MINALIGN); |
348 | flush_dcache_range(hwaddr, hwaddr + aligned_used_len); | 348 | flush_dcache_range(hwaddr, hwaddr + aligned_used_len); |
349 | 349 | ||
350 | return 0; | 350 | return 0; |
351 | } | 351 | } |
352 | 352 | ||
353 | static void ci_debounce(struct ci_req *ci_req, int in) | 353 | static void ci_debounce(struct ci_req *ci_req, int in) |
354 | { | 354 | { |
355 | struct usb_request *req = &ci_req->req; | 355 | struct usb_request *req = &ci_req->req; |
356 | uint32_t addr = (uint32_t)req->buf; | 356 | uint32_t addr = (uint32_t)req->buf; |
357 | uint32_t hwaddr = (uint32_t)ci_req->hw_buf; | 357 | uint32_t hwaddr = (uint32_t)ci_req->hw_buf; |
358 | uint32_t aligned_used_len; | 358 | uint32_t aligned_used_len; |
359 | 359 | ||
360 | if (in) | 360 | if (in) |
361 | return; | 361 | return; |
362 | 362 | ||
363 | aligned_used_len = roundup(req->actual, ARCH_DMA_MINALIGN); | 363 | aligned_used_len = roundup(req->actual, ARCH_DMA_MINALIGN); |
364 | invalidate_dcache_range(hwaddr, hwaddr + aligned_used_len); | 364 | invalidate_dcache_range(hwaddr, hwaddr + aligned_used_len); |
365 | 365 | ||
366 | if (addr == hwaddr) | 366 | if (addr == hwaddr) |
367 | return; /* not a bounce */ | 367 | return; /* not a bounce */ |
368 | 368 | ||
369 | memcpy(req->buf, ci_req->hw_buf, req->actual); | 369 | memcpy(req->buf, ci_req->hw_buf, req->actual); |
370 | } | 370 | } |
371 | 371 | ||
372 | static void ci_ep_submit_next_request(struct ci_ep *ci_ep) | 372 | static void ci_ep_submit_next_request(struct ci_ep *ci_ep) |
373 | { | 373 | { |
374 | struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; | 374 | struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; |
375 | struct ept_queue_item *item; | 375 | struct ept_queue_item *item; |
376 | struct ept_queue_head *head; | 376 | struct ept_queue_head *head; |
377 | int bit, num, len, in; | 377 | int bit, num, len, in; |
378 | struct ci_req *ci_req; | 378 | struct ci_req *ci_req; |
379 | 379 | ||
380 | ci_ep->req_primed = true; | 380 | ci_ep->req_primed = true; |
381 | 381 | ||
382 | num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; | 382 | num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; |
383 | in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0; | 383 | in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0; |
384 | item = ci_get_qtd(num, in); | 384 | item = ci_get_qtd(num, in); |
385 | head = ci_get_qh(num, in); | 385 | head = ci_get_qh(num, in); |
386 | 386 | ||
387 | ci_req = list_first_entry(&ci_ep->queue, struct ci_req, queue); | 387 | ci_req = list_first_entry(&ci_ep->queue, struct ci_req, queue); |
388 | len = ci_req->req.length; | 388 | len = ci_req->req.length; |
389 | 389 | ||
390 | item->info = INFO_BYTES(len) | INFO_ACTIVE; | 390 | item->info = INFO_BYTES(len) | INFO_ACTIVE; |
391 | item->page0 = (uint32_t)ci_req->hw_buf; | 391 | item->page0 = (uint32_t)ci_req->hw_buf; |
392 | item->page1 = ((uint32_t)ci_req->hw_buf & 0xfffff000) + 0x1000; | 392 | item->page1 = ((uint32_t)ci_req->hw_buf & 0xfffff000) + 0x1000; |
393 | item->page2 = ((uint32_t)ci_req->hw_buf & 0xfffff000) + 0x2000; | 393 | item->page2 = ((uint32_t)ci_req->hw_buf & 0xfffff000) + 0x2000; |
394 | item->page3 = ((uint32_t)ci_req->hw_buf & 0xfffff000) + 0x3000; | 394 | item->page3 = ((uint32_t)ci_req->hw_buf & 0xfffff000) + 0x3000; |
395 | item->page4 = ((uint32_t)ci_req->hw_buf & 0xfffff000) + 0x4000; | 395 | item->page4 = ((uint32_t)ci_req->hw_buf & 0xfffff000) + 0x4000; |
396 | 396 | ||
397 | head->next = (unsigned) item; | 397 | head->next = (unsigned) item; |
398 | head->info = 0; | 398 | head->info = 0; |
399 | 399 | ||
400 | /* | 400 | /* |
401 | * When sending the data for an IN transaction, the attached host | 401 | * When sending the data for an IN transaction, the attached host |
402 | * knows that all data for the IN is sent when one of the following | 402 | * knows that all data for the IN is sent when one of the following |
403 | * occurs: | 403 | * occurs: |
404 | * a) A zero-length packet is transmitted. | 404 | * a) A zero-length packet is transmitted. |
405 | * b) A packet with length that isn't an exact multiple of the ep's | 405 | * b) A packet with length that isn't an exact multiple of the ep's |
406 | * maxpacket is transmitted. | 406 | * maxpacket is transmitted. |
407 | * c) Enough data is sent to exactly fill the host's maximum expected | 407 | * c) Enough data is sent to exactly fill the host's maximum expected |
408 | * IN transaction size. | 408 | * IN transaction size. |
409 | * | 409 | * |
410 | * One of these conditions MUST apply at the end of an IN transaction, | 410 | * One of these conditions MUST apply at the end of an IN transaction, |
411 | * or the transaction will not be considered complete by the host. If | 411 | * or the transaction will not be considered complete by the host. If |
412 | * none of (a)..(c) already applies, then we must force (a) to apply | 412 | * none of (a)..(c) already applies, then we must force (a) to apply |
413 | * by explicitly sending an extra zero-length packet. | 413 | * by explicitly sending an extra zero-length packet. |
414 | */ | 414 | */ |
415 | /* IN !a !b !c */ | 415 | /* IN !a !b !c */ |
416 | if (in && len && !(len % ci_ep->ep.maxpacket) && ci_req->req.zero) { | 416 | if (in && len && !(len % ci_ep->ep.maxpacket) && ci_req->req.zero) { |
417 | /* | 417 | /* |
418 | * Each endpoint has 2 items allocated, even though typically | 418 | * Each endpoint has 2 items allocated, even though typically |
419 | * only 1 is used at a time since either an IN or an OUT but | 419 | * only 1 is used at a time since either an IN or an OUT but |
420 | * not both is queued. For an IN transaction, item currently | 420 | * not both is queued. For an IN transaction, item currently |
421 | * points at the second of these items, so we know that we | 421 | * points at the second of these items, so we know that we |
422 | * can use the other to transmit the extra zero-length packet. | 422 | * can use the other to transmit the extra zero-length packet. |
423 | */ | 423 | */ |
424 | struct ept_queue_item *other_item = ci_get_qtd(num, 0); | 424 | struct ept_queue_item *other_item = ci_get_qtd(num, 0); |
425 | item->next = (unsigned)other_item; | 425 | item->next = (unsigned)other_item; |
426 | item = other_item; | 426 | item = other_item; |
427 | item->info = INFO_ACTIVE; | 427 | item->info = INFO_ACTIVE; |
428 | } | 428 | } |
429 | 429 | ||
430 | item->next = TERMINATE; | 430 | item->next = TERMINATE; |
431 | item->info |= INFO_IOC; | 431 | item->info |= INFO_IOC; |
432 | 432 | ||
433 | ci_flush_qtd(num); | 433 | ci_flush_qtd(num); |
434 | 434 | ||
435 | DBG("ept%d %s queue len %x, req %p, buffer %p\n", | 435 | DBG("ept%d %s queue len %x, req %p, buffer %p\n", |
436 | num, in ? "in" : "out", len, ci_req, ci_req->hw_buf); | 436 | num, in ? "in" : "out", len, ci_req, ci_req->hw_buf); |
437 | ci_flush_qh(num); | 437 | ci_flush_qh(num); |
438 | 438 | ||
439 | if (in) | 439 | if (in) |
440 | bit = EPT_TX(num); | 440 | bit = EPT_TX(num); |
441 | else | 441 | else |
442 | bit = EPT_RX(num); | 442 | bit = EPT_RX(num); |
443 | 443 | ||
444 | writel(bit, &udc->epprime); | 444 | writel(bit, &udc->epprime); |
445 | } | 445 | } |
446 | 446 | ||
447 | static int ci_ep_queue(struct usb_ep *ep, | 447 | static int ci_ep_queue(struct usb_ep *ep, |
448 | struct usb_request *req, gfp_t gfp_flags) | 448 | struct usb_request *req, gfp_t gfp_flags) |
449 | { | 449 | { |
450 | struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep); | 450 | struct ci_ep *ci_ep = container_of(ep, struct ci_ep, ep); |
451 | struct ci_req *ci_req = container_of(req, struct ci_req, req); | 451 | struct ci_req *ci_req = container_of(req, struct ci_req, req); |
452 | int in, ret; | 452 | int in, ret; |
453 | int __maybe_unused num; | 453 | int __maybe_unused num; |
454 | 454 | ||
455 | num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; | 455 | num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; |
456 | in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0; | 456 | in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0; |
457 | 457 | ||
458 | if (!num && ci_ep->req_primed) { | 458 | if (!num && ci_ep->req_primed) { |
459 | /* | 459 | /* |
460 | * The flipping of ep0 between IN and OUT relies on | 460 | * The flipping of ep0 between IN and OUT relies on |
461 | * ci_ep_queue consuming the current IN/OUT setting | 461 | * ci_ep_queue consuming the current IN/OUT setting |
462 | * immediately. If this is deferred to a later point when the | 462 | * immediately. If this is deferred to a later point when the |
463 | * req is pulled out of ci_req->queue, then the IN/OUT setting | 463 | * req is pulled out of ci_req->queue, then the IN/OUT setting |
464 | * may have been changed since the req was queued, and state | 464 | * may have been changed since the req was queued, and state |
465 | * will get out of sync. This condition doesn't occur today, | 465 | * will get out of sync. This condition doesn't occur today, |
466 | * but could if bugs were introduced later, and this error | 466 | * but could if bugs were introduced later, and this error |
467 | * check will save a lot of debugging time. | 467 | * check will save a lot of debugging time. |
468 | */ | 468 | */ |
469 | printf("%s: ep0 transaction already in progress\n", __func__); | 469 | printf("%s: ep0 transaction already in progress\n", __func__); |
470 | return -EPROTO; | 470 | return -EPROTO; |
471 | } | 471 | } |
472 | 472 | ||
473 | ret = ci_bounce(ci_req, in); | 473 | ret = ci_bounce(ci_req, in); |
474 | if (ret) | 474 | if (ret) |
475 | return ret; | 475 | return ret; |
476 | 476 | ||
477 | DBG("ept%d %s pre-queue req %p, buffer %p\n", | 477 | DBG("ept%d %s pre-queue req %p, buffer %p\n", |
478 | num, in ? "in" : "out", ci_req, ci_req->hw_buf); | 478 | num, in ? "in" : "out", ci_req, ci_req->hw_buf); |
479 | list_add_tail(&ci_req->queue, &ci_ep->queue); | 479 | list_add_tail(&ci_req->queue, &ci_ep->queue); |
480 | 480 | ||
481 | if (!ci_ep->req_primed) | 481 | if (!ci_ep->req_primed) |
482 | ci_ep_submit_next_request(ci_ep); | 482 | ci_ep_submit_next_request(ci_ep); |
483 | 483 | ||
484 | return 0; | 484 | return 0; |
485 | } | 485 | } |
486 | 486 | ||
487 | static void flip_ep0_direction(void) | 487 | static void flip_ep0_direction(void) |
488 | { | 488 | { |
489 | if (ep0_desc.bEndpointAddress == USB_DIR_IN) { | 489 | if (ep0_desc.bEndpointAddress == USB_DIR_IN) { |
490 | DBG("%s: Flipping ep0 to OUT\n", __func__); | 490 | DBG("%s: Flipping ep0 to OUT\n", __func__); |
491 | ep0_desc.bEndpointAddress = 0; | 491 | ep0_desc.bEndpointAddress = 0; |
492 | } else { | 492 | } else { |
493 | DBG("%s: Flipping ep0 to IN\n", __func__); | 493 | DBG("%s: Flipping ep0 to IN\n", __func__); |
494 | ep0_desc.bEndpointAddress = USB_DIR_IN; | 494 | ep0_desc.bEndpointAddress = USB_DIR_IN; |
495 | } | 495 | } |
496 | } | 496 | } |
497 | 497 | ||
498 | static void handle_ep_complete(struct ci_ep *ci_ep) | 498 | static void handle_ep_complete(struct ci_ep *ci_ep) |
499 | { | 499 | { |
500 | struct ept_queue_item *item; | 500 | struct ept_queue_item *item; |
501 | int num, in, len; | 501 | int num, in, len; |
502 | struct ci_req *ci_req; | 502 | struct ci_req *ci_req; |
503 | 503 | ||
504 | num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; | 504 | num = ci_ep->desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; |
505 | in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0; | 505 | in = (ci_ep->desc->bEndpointAddress & USB_DIR_IN) != 0; |
506 | item = ci_get_qtd(num, in); | 506 | item = ci_get_qtd(num, in); |
507 | ci_invalidate_qtd(num); | 507 | ci_invalidate_qtd(num); |
508 | 508 | ||
509 | len = (item->info >> 16) & 0x7fff; | 509 | len = (item->info >> 16) & 0x7fff; |
510 | if (item->info & 0xff) | 510 | if (item->info & 0xff) |
511 | printf("EP%d/%s FAIL info=%x pg0=%x\n", | 511 | printf("EP%d/%s FAIL info=%x pg0=%x\n", |
512 | num, in ? "in" : "out", item->info, item->page0); | 512 | num, in ? "in" : "out", item->info, item->page0); |
513 | 513 | ||
514 | ci_req = list_first_entry(&ci_ep->queue, struct ci_req, queue); | 514 | ci_req = list_first_entry(&ci_ep->queue, struct ci_req, queue); |
515 | list_del_init(&ci_req->queue); | 515 | list_del_init(&ci_req->queue); |
516 | ci_ep->req_primed = false; | 516 | ci_ep->req_primed = false; |
517 | 517 | ||
518 | if (!list_empty(&ci_ep->queue)) | 518 | if (!list_empty(&ci_ep->queue)) |
519 | ci_ep_submit_next_request(ci_ep); | 519 | ci_ep_submit_next_request(ci_ep); |
520 | 520 | ||
521 | ci_req->req.actual = ci_req->req.length - len; | 521 | ci_req->req.actual = ci_req->req.length - len; |
522 | ci_debounce(ci_req, in); | 522 | ci_debounce(ci_req, in); |
523 | 523 | ||
524 | DBG("ept%d %s req %p, complete %x\n", | 524 | DBG("ept%d %s req %p, complete %x\n", |
525 | num, in ? "in" : "out", ci_req, len); | 525 | num, in ? "in" : "out", ci_req, len); |
526 | if (num != 0 || controller.ep0_data_phase) | 526 | if (num != 0 || controller.ep0_data_phase) |
527 | ci_req->req.complete(&ci_ep->ep, &ci_req->req); | 527 | ci_req->req.complete(&ci_ep->ep, &ci_req->req); |
528 | if (num == 0 && controller.ep0_data_phase) { | 528 | if (num == 0 && controller.ep0_data_phase) { |
529 | /* | 529 | /* |
530 | * Data Stage is complete, so flip ep0 dir for Status Stage, | 530 | * Data Stage is complete, so flip ep0 dir for Status Stage, |
531 | * which always transfers a packet in the opposite direction. | 531 | * which always transfers a packet in the opposite direction. |
532 | */ | 532 | */ |
533 | DBG("%s: flip ep0 dir for Status Stage\n", __func__); | 533 | DBG("%s: flip ep0 dir for Status Stage\n", __func__); |
534 | flip_ep0_direction(); | 534 | flip_ep0_direction(); |
535 | controller.ep0_data_phase = false; | 535 | controller.ep0_data_phase = false; |
536 | ci_req->req.length = 0; | 536 | ci_req->req.length = 0; |
537 | usb_ep_queue(&ci_ep->ep, &ci_req->req, 0); | 537 | usb_ep_queue(&ci_ep->ep, &ci_req->req, 0); |
538 | } | 538 | } |
539 | } | 539 | } |
540 | 540 | ||
541 | #define SETUP(type, request) (((type) << 8) | (request)) | 541 | #define SETUP(type, request) (((type) << 8) | (request)) |
542 | 542 | ||
543 | static void handle_setup(void) | 543 | static void handle_setup(void) |
544 | { | 544 | { |
545 | struct ci_ep *ci_ep = &controller.ep[0]; | 545 | struct ci_ep *ci_ep = &controller.ep[0]; |
546 | struct ci_req *ci_req; | 546 | struct ci_req *ci_req; |
547 | struct usb_request *req; | 547 | struct usb_request *req; |
548 | struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; | 548 | struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; |
549 | struct ept_queue_head *head; | 549 | struct ept_queue_head *head; |
550 | struct usb_ctrlrequest r; | 550 | struct usb_ctrlrequest r; |
551 | int status = 0; | 551 | int status = 0; |
552 | int num, in, _num, _in, i; | 552 | int num, in, _num, _in, i; |
553 | char *buf; | 553 | char *buf; |
554 | 554 | ||
555 | ci_req = controller.ep0_req; | 555 | ci_req = controller.ep0_req; |
556 | req = &ci_req->req; | 556 | req = &ci_req->req; |
557 | head = ci_get_qh(0, 0); /* EP0 OUT */ | 557 | head = ci_get_qh(0, 0); /* EP0 OUT */ |
558 | 558 | ||
559 | ci_invalidate_qh(0); | 559 | ci_invalidate_qh(0); |
560 | memcpy(&r, head->setup_data, sizeof(struct usb_ctrlrequest)); | 560 | memcpy(&r, head->setup_data, sizeof(struct usb_ctrlrequest)); |
561 | #ifdef CONFIG_CI_UDC_HAS_HOSTPC | 561 | #ifdef CONFIG_CI_UDC_HAS_HOSTPC |
562 | writel(EPT_RX(0), &udc->epsetupstat); | 562 | writel(EPT_RX(0), &udc->epsetupstat); |
563 | #else | 563 | #else |
564 | writel(EPT_RX(0), &udc->epstat); | 564 | writel(EPT_RX(0), &udc->epstat); |
565 | #endif | 565 | #endif |
566 | DBG("handle setup %s, %x, %x index %x value %x length %x\n", | 566 | DBG("handle setup %s, %x, %x index %x value %x length %x\n", |
567 | reqname(r.bRequest), r.bRequestType, r.bRequest, r.wIndex, | 567 | reqname(r.bRequest), r.bRequestType, r.bRequest, r.wIndex, |
568 | r.wValue, r.wLength); | 568 | r.wValue, r.wLength); |
569 | 569 | ||
570 | /* Set EP0 dir for Data Stage based on Setup Stage data */ | 570 | /* Set EP0 dir for Data Stage based on Setup Stage data */ |
571 | if (r.bRequestType & USB_DIR_IN) { | 571 | if (r.bRequestType & USB_DIR_IN) { |
572 | DBG("%s: Set ep0 to IN for Data Stage\n", __func__); | 572 | DBG("%s: Set ep0 to IN for Data Stage\n", __func__); |
573 | ep0_desc.bEndpointAddress = USB_DIR_IN; | 573 | ep0_desc.bEndpointAddress = USB_DIR_IN; |
574 | } else { | 574 | } else { |
575 | DBG("%s: Set ep0 to OUT for Data Stage\n", __func__); | 575 | DBG("%s: Set ep0 to OUT for Data Stage\n", __func__); |
576 | ep0_desc.bEndpointAddress = 0; | 576 | ep0_desc.bEndpointAddress = 0; |
577 | } | 577 | } |
578 | if (r.wLength) { | 578 | if (r.wLength) { |
579 | controller.ep0_data_phase = true; | 579 | controller.ep0_data_phase = true; |
580 | } else { | 580 | } else { |
581 | /* 0 length -> no Data Stage. Flip dir for Status Stage */ | 581 | /* 0 length -> no Data Stage. Flip dir for Status Stage */ |
582 | DBG("%s: 0 length: flip ep0 dir for Status Stage\n", __func__); | 582 | DBG("%s: 0 length: flip ep0 dir for Status Stage\n", __func__); |
583 | flip_ep0_direction(); | 583 | flip_ep0_direction(); |
584 | controller.ep0_data_phase = false; | 584 | controller.ep0_data_phase = false; |
585 | } | 585 | } |
586 | 586 | ||
587 | list_del_init(&ci_req->queue); | 587 | list_del_init(&ci_req->queue); |
588 | ci_ep->req_primed = false; | 588 | ci_ep->req_primed = false; |
589 | 589 | ||
590 | switch (SETUP(r.bRequestType, r.bRequest)) { | 590 | switch (SETUP(r.bRequestType, r.bRequest)) { |
591 | case SETUP(USB_RECIP_ENDPOINT, USB_REQ_CLEAR_FEATURE): | 591 | case SETUP(USB_RECIP_ENDPOINT, USB_REQ_CLEAR_FEATURE): |
592 | _num = r.wIndex & 15; | 592 | _num = r.wIndex & 15; |
593 | _in = !!(r.wIndex & 0x80); | 593 | _in = !!(r.wIndex & 0x80); |
594 | 594 | ||
595 | if ((r.wValue == 0) && (r.wLength == 0)) { | 595 | if ((r.wValue == 0) && (r.wLength == 0)) { |
596 | req->length = 0; | 596 | req->length = 0; |
597 | for (i = 0; i < NUM_ENDPOINTS; i++) { | 597 | for (i = 0; i < NUM_ENDPOINTS; i++) { |
598 | struct ci_ep *ep = &controller.ep[i]; | 598 | struct ci_ep *ep = &controller.ep[i]; |
599 | 599 | ||
600 | if (!ep->desc) | 600 | if (!ep->desc) |
601 | continue; | 601 | continue; |
602 | num = ep->desc->bEndpointAddress | 602 | num = ep->desc->bEndpointAddress |
603 | & USB_ENDPOINT_NUMBER_MASK; | 603 | & USB_ENDPOINT_NUMBER_MASK; |
604 | in = (ep->desc->bEndpointAddress | 604 | in = (ep->desc->bEndpointAddress |
605 | & USB_DIR_IN) != 0; | 605 | & USB_DIR_IN) != 0; |
606 | if ((num == _num) && (in == _in)) { | 606 | if ((num == _num) && (in == _in)) { |
607 | ep_enable(num, in, ep->ep.maxpacket); | 607 | ep_enable(num, in, ep->ep.maxpacket); |
608 | usb_ep_queue(controller.gadget.ep0, | 608 | usb_ep_queue(controller.gadget.ep0, |
609 | req, 0); | 609 | req, 0); |
610 | break; | 610 | break; |
611 | } | 611 | } |
612 | } | 612 | } |
613 | } | 613 | } |
614 | return; | 614 | return; |
615 | 615 | ||
616 | case SETUP(USB_RECIP_DEVICE, USB_REQ_SET_ADDRESS): | 616 | case SETUP(USB_RECIP_DEVICE, USB_REQ_SET_ADDRESS): |
617 | /* | 617 | /* |
618 | * write address delayed (will take effect | 618 | * write address delayed (will take effect |
619 | * after the next IN txn) | 619 | * after the next IN txn) |
620 | */ | 620 | */ |
621 | writel((r.wValue << 25) | (1 << 24), &udc->devaddr); | 621 | writel((r.wValue << 25) | (1 << 24), &udc->devaddr); |
622 | req->length = 0; | 622 | req->length = 0; |
623 | usb_ep_queue(controller.gadget.ep0, req, 0); | 623 | usb_ep_queue(controller.gadget.ep0, req, 0); |
624 | return; | 624 | return; |
625 | 625 | ||
626 | case SETUP(USB_DIR_IN | USB_RECIP_DEVICE, USB_REQ_GET_STATUS): | 626 | case SETUP(USB_DIR_IN | USB_RECIP_DEVICE, USB_REQ_GET_STATUS): |
627 | req->length = 2; | 627 | req->length = 2; |
628 | buf = (char *)req->buf; | 628 | buf = (char *)req->buf; |
629 | buf[0] = 1 << USB_DEVICE_SELF_POWERED; | 629 | buf[0] = 1 << USB_DEVICE_SELF_POWERED; |
630 | buf[1] = 0; | 630 | buf[1] = 0; |
631 | usb_ep_queue(controller.gadget.ep0, req, 0); | 631 | usb_ep_queue(controller.gadget.ep0, req, 0); |
632 | return; | 632 | return; |
633 | } | 633 | } |
634 | /* pass request up to the gadget driver */ | 634 | /* pass request up to the gadget driver */ |
635 | if (controller.driver) | 635 | if (controller.driver) |
636 | status = controller.driver->setup(&controller.gadget, &r); | 636 | status = controller.driver->setup(&controller.gadget, &r); |
637 | else | 637 | else |
638 | status = -ENODEV; | 638 | status = -ENODEV; |
639 | 639 | ||
640 | if (!status) | 640 | if (!status) |
641 | return; | 641 | return; |
642 | DBG("STALL reqname %s type %x value %x, index %x\n", | 642 | DBG("STALL reqname %s type %x value %x, index %x\n", |
643 | reqname(r.bRequest), r.bRequestType, r.wValue, r.wIndex); | 643 | reqname(r.bRequest), r.bRequestType, r.wValue, r.wIndex); |
644 | writel((1<<16) | (1 << 0), &udc->epctrl[0]); | 644 | writel((1<<16) | (1 << 0), &udc->epctrl[0]); |
645 | } | 645 | } |
646 | 646 | ||
647 | static void stop_activity(void) | 647 | static void stop_activity(void) |
648 | { | 648 | { |
649 | int i, num, in; | 649 | int i, num, in; |
650 | struct ept_queue_head *head; | 650 | struct ept_queue_head *head; |
651 | struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; | 651 | struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; |
652 | writel(readl(&udc->epcomp), &udc->epcomp); | 652 | writel(readl(&udc->epcomp), &udc->epcomp); |
653 | #ifdef CONFIG_CI_UDC_HAS_HOSTPC | 653 | #ifdef CONFIG_CI_UDC_HAS_HOSTPC |
654 | writel(readl(&udc->epsetupstat), &udc->epsetupstat); | 654 | writel(readl(&udc->epsetupstat), &udc->epsetupstat); |
655 | #endif | 655 | #endif |
656 | writel(readl(&udc->epstat), &udc->epstat); | 656 | writel(readl(&udc->epstat), &udc->epstat); |
657 | writel(0xffffffff, &udc->epflush); | 657 | writel(0xffffffff, &udc->epflush); |
658 | 658 | ||
659 | /* error out any pending reqs */ | 659 | /* error out any pending reqs */ |
660 | for (i = 0; i < NUM_ENDPOINTS; i++) { | 660 | for (i = 0; i < NUM_ENDPOINTS; i++) { |
661 | if (i != 0) | 661 | if (i != 0) |
662 | writel(0, &udc->epctrl[i]); | 662 | writel(0, &udc->epctrl[i]); |
663 | if (controller.ep[i].desc) { | 663 | if (controller.ep[i].desc) { |
664 | num = controller.ep[i].desc->bEndpointAddress | 664 | num = controller.ep[i].desc->bEndpointAddress |
665 | & USB_ENDPOINT_NUMBER_MASK; | 665 | & USB_ENDPOINT_NUMBER_MASK; |
666 | in = (controller.ep[i].desc->bEndpointAddress | 666 | in = (controller.ep[i].desc->bEndpointAddress |
667 | & USB_DIR_IN) != 0; | 667 | & USB_DIR_IN) != 0; |
668 | head = ci_get_qh(num, in); | 668 | head = ci_get_qh(num, in); |
669 | head->info = INFO_ACTIVE; | 669 | head->info = INFO_ACTIVE; |
670 | ci_flush_qh(num); | 670 | ci_flush_qh(num); |
671 | } | 671 | } |
672 | } | 672 | } |
673 | } | 673 | } |
674 | 674 | ||
675 | void udc_irq(void) | 675 | void udc_irq(void) |
676 | { | 676 | { |
677 | struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; | 677 | struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; |
678 | unsigned n = readl(&udc->usbsts); | 678 | unsigned n = readl(&udc->usbsts); |
679 | writel(n, &udc->usbsts); | 679 | writel(n, &udc->usbsts); |
680 | int bit, i, num, in; | 680 | int bit, i, num, in; |
681 | 681 | ||
682 | n &= (STS_SLI | STS_URI | STS_PCI | STS_UI | STS_UEI); | 682 | n &= (STS_SLI | STS_URI | STS_PCI | STS_UI | STS_UEI); |
683 | if (n == 0) | 683 | if (n == 0) |
684 | return; | 684 | return; |
685 | 685 | ||
686 | if (n & STS_URI) { | 686 | if (n & STS_URI) { |
687 | DBG("-- reset --\n"); | 687 | DBG("-- reset --\n"); |
688 | stop_activity(); | 688 | stop_activity(); |
689 | } | 689 | } |
690 | if (n & STS_SLI) | 690 | if (n & STS_SLI) |
691 | DBG("-- suspend --\n"); | 691 | DBG("-- suspend --\n"); |
692 | 692 | ||
693 | if (n & STS_PCI) { | 693 | if (n & STS_PCI) { |
694 | int max = 64; | 694 | int max = 64; |
695 | int speed = USB_SPEED_FULL; | 695 | int speed = USB_SPEED_FULL; |
696 | 696 | ||
697 | #ifdef CONFIG_CI_UDC_HAS_HOSTPC | 697 | #ifdef CONFIG_CI_UDC_HAS_HOSTPC |
698 | bit = (readl(&udc->hostpc1_devlc) >> 25) & 3; | 698 | bit = (readl(&udc->hostpc1_devlc) >> 25) & 3; |
699 | #else | 699 | #else |
700 | bit = (readl(&udc->portsc) >> 26) & 3; | 700 | bit = (readl(&udc->portsc) >> 26) & 3; |
701 | #endif | 701 | #endif |
702 | DBG("-- portchange %x %s\n", bit, (bit == 2) ? "High" : "Full"); | 702 | DBG("-- portchange %x %s\n", bit, (bit == 2) ? "High" : "Full"); |
703 | if (bit == 2) { | 703 | if (bit == 2) { |
704 | speed = USB_SPEED_HIGH; | 704 | speed = USB_SPEED_HIGH; |
705 | max = 512; | 705 | max = 512; |
706 | } | 706 | } |
707 | controller.gadget.speed = speed; | 707 | controller.gadget.speed = speed; |
708 | for (i = 1; i < NUM_ENDPOINTS; i++) { | 708 | for (i = 1; i < NUM_ENDPOINTS; i++) { |
709 | if (controller.ep[i].ep.maxpacket > max) | 709 | if (controller.ep[i].ep.maxpacket > max) |
710 | controller.ep[i].ep.maxpacket = max; | 710 | controller.ep[i].ep.maxpacket = max; |
711 | } | 711 | } |
712 | } | 712 | } |
713 | 713 | ||
714 | if (n & STS_UEI) | 714 | if (n & STS_UEI) |
715 | printf("<UEI %x>\n", readl(&udc->epcomp)); | 715 | printf("<UEI %x>\n", readl(&udc->epcomp)); |
716 | 716 | ||
717 | if ((n & STS_UI) || (n & STS_UEI)) { | 717 | if ((n & STS_UI) || (n & STS_UEI)) { |
718 | #ifdef CONFIG_CI_UDC_HAS_HOSTPC | 718 | #ifdef CONFIG_CI_UDC_HAS_HOSTPC |
719 | n = readl(&udc->epsetupstat); | 719 | n = readl(&udc->epsetupstat); |
720 | #else | 720 | #else |
721 | n = readl(&udc->epstat); | 721 | n = readl(&udc->epstat); |
722 | #endif | 722 | #endif |
723 | if (n & EPT_RX(0)) | 723 | if (n & EPT_RX(0)) |
724 | handle_setup(); | 724 | handle_setup(); |
725 | 725 | ||
726 | n = readl(&udc->epcomp); | 726 | n = readl(&udc->epcomp); |
727 | if (n != 0) | 727 | if (n != 0) |
728 | writel(n, &udc->epcomp); | 728 | writel(n, &udc->epcomp); |
729 | 729 | ||
730 | for (i = 0; i < NUM_ENDPOINTS && n; i++) { | 730 | for (i = 0; i < NUM_ENDPOINTS && n; i++) { |
731 | if (controller.ep[i].desc) { | 731 | if (controller.ep[i].desc) { |
732 | num = controller.ep[i].desc->bEndpointAddress | 732 | num = controller.ep[i].desc->bEndpointAddress |
733 | & USB_ENDPOINT_NUMBER_MASK; | 733 | & USB_ENDPOINT_NUMBER_MASK; |
734 | in = (controller.ep[i].desc->bEndpointAddress | 734 | in = (controller.ep[i].desc->bEndpointAddress |
735 | & USB_DIR_IN) != 0; | 735 | & USB_DIR_IN) != 0; |
736 | bit = (in) ? EPT_TX(num) : EPT_RX(num); | 736 | bit = (in) ? EPT_TX(num) : EPT_RX(num); |
737 | if (n & bit) | 737 | if (n & bit) |
738 | handle_ep_complete(&controller.ep[i]); | 738 | handle_ep_complete(&controller.ep[i]); |
739 | } | 739 | } |
740 | } | 740 | } |
741 | } | 741 | } |
742 | } | 742 | } |
743 | 743 | ||
744 | int usb_gadget_handle_interrupts(void) | 744 | int usb_gadget_handle_interrupts(void) |
745 | { | 745 | { |
746 | u32 value; | 746 | u32 value; |
747 | struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; | 747 | struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; |
748 | 748 | ||
749 | value = readl(&udc->usbsts); | 749 | value = readl(&udc->usbsts); |
750 | if (value) | 750 | if (value) |
751 | udc_irq(); | 751 | udc_irq(); |
752 | 752 | ||
753 | return value; | 753 | return value; |
754 | } | 754 | } |
755 | 755 | ||
756 | void udc_disconnect(void) | 756 | void udc_disconnect(void) |
757 | { | 757 | { |
758 | struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; | 758 | struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; |
759 | /* disable pullup */ | 759 | /* disable pullup */ |
760 | stop_activity(); | 760 | stop_activity(); |
761 | writel(USBCMD_FS2, &udc->usbcmd); | 761 | writel(USBCMD_FS2, &udc->usbcmd); |
762 | udelay(800); | 762 | udelay(800); |
763 | if (controller.driver) | 763 | if (controller.driver) |
764 | controller.driver->disconnect(&controller.gadget); | 764 | controller.driver->disconnect(&controller.gadget); |
765 | } | 765 | } |
766 | 766 | ||
767 | static int ci_pullup(struct usb_gadget *gadget, int is_on) | 767 | static int ci_pullup(struct usb_gadget *gadget, int is_on) |
768 | { | 768 | { |
769 | struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; | 769 | struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; |
770 | if (is_on) { | 770 | if (is_on) { |
771 | /* RESET */ | 771 | /* RESET */ |
772 | writel(USBCMD_ITC(MICRO_8FRAME) | USBCMD_RST, &udc->usbcmd); | 772 | writel(USBCMD_ITC(MICRO_8FRAME) | USBCMD_RST, &udc->usbcmd); |
773 | udelay(200); | 773 | udelay(200); |
774 | 774 | ||
775 | writel((unsigned)controller.epts, &udc->epinitaddr); | 775 | writel((unsigned)controller.epts, &udc->epinitaddr); |
776 | 776 | ||
777 | /* select DEVICE mode */ | 777 | /* select DEVICE mode */ |
778 | writel(USBMODE_DEVICE, &udc->usbmode); | 778 | writel(USBMODE_DEVICE, &udc->usbmode); |
779 | 779 | ||
780 | writel(0xffffffff, &udc->epflush); | 780 | writel(0xffffffff, &udc->epflush); |
781 | 781 | ||
782 | /* Turn on the USB connection by enabling the pullup resistor */ | 782 | /* Turn on the USB connection by enabling the pullup resistor */ |
783 | writel(USBCMD_ITC(MICRO_8FRAME) | USBCMD_RUN, &udc->usbcmd); | 783 | writel(USBCMD_ITC(MICRO_8FRAME) | USBCMD_RUN, &udc->usbcmd); |
784 | } else { | 784 | } else { |
785 | udc_disconnect(); | 785 | udc_disconnect(); |
786 | } | 786 | } |
787 | 787 | ||
788 | return 0; | 788 | return 0; |
789 | } | 789 | } |
790 | 790 | ||
791 | static int ci_udc_probe(void) | 791 | static int ci_udc_probe(void) |
792 | { | 792 | { |
793 | struct ept_queue_head *head; | 793 | struct ept_queue_head *head; |
794 | int i; | 794 | int i; |
795 | 795 | ||
796 | const int num = 2 * NUM_ENDPOINTS; | 796 | const int num = 2 * NUM_ENDPOINTS; |
797 | 797 | ||
798 | const int eplist_min_align = 4096; | 798 | const int eplist_min_align = 4096; |
799 | const int eplist_align = roundup(eplist_min_align, ARCH_DMA_MINALIGN); | 799 | const int eplist_align = roundup(eplist_min_align, ARCH_DMA_MINALIGN); |
800 | const int eplist_raw_sz = num * sizeof(struct ept_queue_head); | 800 | const int eplist_raw_sz = num * sizeof(struct ept_queue_head); |
801 | const int eplist_sz = roundup(eplist_raw_sz, ARCH_DMA_MINALIGN); | 801 | const int eplist_sz = roundup(eplist_raw_sz, ARCH_DMA_MINALIGN); |
802 | 802 | ||
803 | /* The QH list must be aligned to 4096 bytes. */ | 803 | /* The QH list must be aligned to 4096 bytes. */ |
804 | controller.epts = memalign(eplist_align, eplist_sz); | 804 | controller.epts = memalign(eplist_align, eplist_sz); |
805 | if (!controller.epts) | 805 | if (!controller.epts) |
806 | return -ENOMEM; | 806 | return -ENOMEM; |
807 | memset(controller.epts, 0, eplist_sz); | 807 | memset(controller.epts, 0, eplist_sz); |
808 | 808 | ||
809 | controller.items_mem = memalign(ILIST_ALIGN, ILIST_SZ); | 809 | controller.items_mem = memalign(ILIST_ALIGN, ILIST_SZ); |
810 | if (!controller.items_mem) { | 810 | if (!controller.items_mem) { |
811 | free(controller.epts); | 811 | free(controller.epts); |
812 | return -ENOMEM; | 812 | return -ENOMEM; |
813 | } | 813 | } |
814 | memset(controller.items_mem, 0, ILIST_SZ); | 814 | memset(controller.items_mem, 0, ILIST_SZ); |
815 | 815 | ||
816 | for (i = 0; i < 2 * NUM_ENDPOINTS; i++) { | 816 | for (i = 0; i < 2 * NUM_ENDPOINTS; i++) { |
817 | /* | 817 | /* |
818 | * Configure QH for each endpoint. The structure of the QH list | 818 | * Configure QH for each endpoint. The structure of the QH list |
819 | * is such that each two subsequent fields, N and N+1 where N is | 819 | * is such that each two subsequent fields, N and N+1 where N is |
820 | * even, in the QH list represent QH for one endpoint. The Nth | 820 | * even, in the QH list represent QH for one endpoint. The Nth |
821 | * entry represents OUT configuration and the N+1th entry does | 821 | * entry represents OUT configuration and the N+1th entry does |
822 | * represent IN configuration of the endpoint. | 822 | * represent IN configuration of the endpoint. |
823 | */ | 823 | */ |
824 | head = controller.epts + i; | 824 | head = controller.epts + i; |
825 | if (i < 2) | 825 | if (i < 2) |
826 | head->config = CONFIG_MAX_PKT(EP0_MAX_PACKET_SIZE) | 826 | head->config = CONFIG_MAX_PKT(EP0_MAX_PACKET_SIZE) |
827 | | CONFIG_ZLT | CONFIG_IOS; | 827 | | CONFIG_ZLT | CONFIG_IOS; |
828 | else | 828 | else |
829 | head->config = CONFIG_MAX_PKT(EP_MAX_PACKET_SIZE) | 829 | head->config = CONFIG_MAX_PKT(EP_MAX_PACKET_SIZE) |
830 | | CONFIG_ZLT; | 830 | | CONFIG_ZLT; |
831 | head->next = TERMINATE; | 831 | head->next = TERMINATE; |
832 | head->info = 0; | 832 | head->info = 0; |
833 | 833 | ||
834 | if (i & 1) { | 834 | if (i & 1) { |
835 | ci_flush_qh(i / 2); | 835 | ci_flush_qh(i / 2); |
836 | ci_flush_qtd(i / 2); | 836 | ci_flush_qtd(i / 2); |
837 | } | 837 | } |
838 | } | 838 | } |
839 | 839 | ||
840 | INIT_LIST_HEAD(&controller.gadget.ep_list); | 840 | INIT_LIST_HEAD(&controller.gadget.ep_list); |
841 | 841 | ||
842 | /* Init EP 0 */ | 842 | /* Init EP 0 */ |
843 | memcpy(&controller.ep[0].ep, &ci_ep_init[0], sizeof(*ci_ep_init)); | 843 | memcpy(&controller.ep[0].ep, &ci_ep_init[0], sizeof(*ci_ep_init)); |
844 | controller.ep[0].desc = &ep0_desc; | 844 | controller.ep[0].desc = &ep0_desc; |
845 | INIT_LIST_HEAD(&controller.ep[0].queue); | 845 | INIT_LIST_HEAD(&controller.ep[0].queue); |
846 | controller.ep[0].req_primed = false; | 846 | controller.ep[0].req_primed = false; |
847 | controller.gadget.ep0 = &controller.ep[0].ep; | 847 | controller.gadget.ep0 = &controller.ep[0].ep; |
848 | INIT_LIST_HEAD(&controller.gadget.ep0->ep_list); | 848 | INIT_LIST_HEAD(&controller.gadget.ep0->ep_list); |
849 | 849 | ||
850 | /* Init EP 1..n */ | 850 | /* Init EP 1..n */ |
851 | for (i = 1; i < NUM_ENDPOINTS; i++) { | 851 | for (i = 1; i < NUM_ENDPOINTS; i++) { |
852 | memcpy(&controller.ep[i].ep, &ci_ep_init[1], | 852 | memcpy(&controller.ep[i].ep, &ci_ep_init[1], |
853 | sizeof(*ci_ep_init)); | 853 | sizeof(*ci_ep_init)); |
854 | INIT_LIST_HEAD(&controller.ep[i].queue); | 854 | INIT_LIST_HEAD(&controller.ep[i].queue); |
855 | controller.ep[i].req_primed = false; | 855 | controller.ep[i].req_primed = false; |
856 | list_add_tail(&controller.ep[i].ep.ep_list, | 856 | list_add_tail(&controller.ep[i].ep.ep_list, |
857 | &controller.gadget.ep_list); | 857 | &controller.gadget.ep_list); |
858 | } | 858 | } |
859 | 859 | ||
860 | ci_ep_alloc_request(&controller.ep[0].ep, 0); | 860 | ci_ep_alloc_request(&controller.ep[0].ep, 0); |
861 | if (!controller.ep0_req) { | 861 | if (!controller.ep0_req) { |
862 | free(controller.items_mem); | 862 | free(controller.items_mem); |
863 | free(controller.epts); | 863 | free(controller.epts); |
864 | return -ENOMEM; | 864 | return -ENOMEM; |
865 | } | 865 | } |
866 | 866 | ||
867 | return 0; | 867 | return 0; |
868 | } | 868 | } |
869 | 869 | ||
870 | int usb_gadget_register_driver(struct usb_gadget_driver *driver) | 870 | int usb_gadget_register_driver(struct usb_gadget_driver *driver) |
871 | { | 871 | { |
872 | int ret; | 872 | int ret; |
873 | 873 | ||
874 | if (!driver) | 874 | if (!driver) |
875 | return -EINVAL; | 875 | return -EINVAL; |
876 | if (!driver->bind || !driver->setup || !driver->disconnect) | 876 | if (!driver->bind || !driver->setup || !driver->disconnect) |
877 | return -EINVAL; | 877 | return -EINVAL; |
878 | if (driver->speed != USB_SPEED_FULL && driver->speed != USB_SPEED_HIGH) | 878 | if (driver->speed != USB_SPEED_FULL && driver->speed != USB_SPEED_HIGH) |
879 | return -EINVAL; | 879 | return -EINVAL; |
880 | 880 | ||
881 | ret = usb_lowlevel_init(0, USB_INIT_DEVICE, (void **)&controller.ctrl); | 881 | ret = usb_lowlevel_init(0, USB_INIT_DEVICE, (void **)&controller.ctrl); |
882 | if (ret) | 882 | if (ret) |
883 | return ret; | 883 | return ret; |
884 | 884 | ||
885 | ret = ci_udc_probe(); | 885 | ret = ci_udc_probe(); |
886 | #if defined(CONFIG_USB_EHCI_MX6) || defined(CONFIG_USB_EHCI_MXS) | 886 | #if defined(CONFIG_USB_EHCI_MX6) || defined(CONFIG_USB_EHCI_MXS) |
887 | /* | 887 | /* |
888 | * FIXME: usb_lowlevel_init()->ehci_hcd_init() should be doing all | 888 | * FIXME: usb_lowlevel_init()->ehci_hcd_init() should be doing all |
889 | * HW-specific initialization, e.g. ULPI-vs-UTMI PHY selection | 889 | * HW-specific initialization, e.g. ULPI-vs-UTMI PHY selection |
890 | */ | 890 | */ |
891 | if (!ret) { | 891 | if (!ret) { |
892 | struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; | 892 | struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; |
893 | 893 | ||
894 | /* select ULPI phy */ | 894 | /* select ULPI phy */ |
895 | writel(PTS(PTS_ENABLE) | PFSC, &udc->portsc); | 895 | writel(PTS(PTS_ENABLE) | PFSC, &udc->portsc); |
896 | } | 896 | } |
897 | #endif | 897 | #endif |
898 | 898 | ||
899 | ret = driver->bind(&controller.gadget); | 899 | ret = driver->bind(&controller.gadget); |
900 | if (ret) { | 900 | if (ret) { |
901 | DBG("driver->bind() returned %d\n", ret); | 901 | DBG("driver->bind() returned %d\n", ret); |
902 | return ret; | 902 | return ret; |
903 | } | 903 | } |
904 | controller.driver = driver; | 904 | controller.driver = driver; |
905 | 905 | ||
906 | return 0; | 906 | return 0; |
907 | } | 907 | } |
908 | 908 | ||
909 | int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) | 909 | int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) |
910 | { | 910 | { |
911 | udc_disconnect(); | 911 | udc_disconnect(); |
912 | 912 | ||
913 | driver->unbind(&controller.gadget); | 913 | driver->unbind(&controller.gadget); |
914 | controller.driver = NULL; | 914 | controller.driver = NULL; |
915 | 915 | ||
916 | ci_ep_free_request(&controller.ep[0].ep, &controller.ep0_req->req); | 916 | ci_ep_free_request(&controller.ep[0].ep, &controller.ep0_req->req); |
917 | free(controller.items_mem); | 917 | free(controller.items_mem); |
918 | free(controller.epts); | 918 | free(controller.epts); |
919 | 919 | ||
920 | return 0; | 920 | return 0; |
921 | } | 921 | } |
922 | |||
923 | bool dfu_usb_get_reset(void) | ||
924 | { | ||
925 | struct ci_udc *udc = (struct ci_udc *)controller.ctrl->hcor; | ||
926 | |||
927 | return !!(readl(&udc->usbsts) & STS_URI); | ||
928 | } | ||
922 | 929 |
drivers/usb/gadget/f_dfu.c
1 | /* | 1 | /* |
2 | * f_dfu.c -- Device Firmware Update USB function | 2 | * f_dfu.c -- Device Firmware Update USB function |
3 | * | 3 | * |
4 | * Copyright (C) 2012 Samsung Electronics | 4 | * Copyright (C) 2012 Samsung Electronics |
5 | * authors: Andrzej Pietrasiewicz <andrzej.p@samsung.com> | 5 | * authors: Andrzej Pietrasiewicz <andrzej.p@samsung.com> |
6 | * Lukasz Majewski <l.majewski@samsung.com> | 6 | * Lukasz Majewski <l.majewski@samsung.com> |
7 | * | 7 | * |
8 | * Based on OpenMoko u-boot: drivers/usb/usbdfu.c | 8 | * Based on OpenMoko u-boot: drivers/usb/usbdfu.c |
9 | * (C) 2007 by OpenMoko, Inc. | 9 | * (C) 2007 by OpenMoko, Inc. |
10 | * Author: Harald Welte <laforge@openmoko.org> | 10 | * Author: Harald Welte <laforge@openmoko.org> |
11 | * | 11 | * |
12 | * based on existing SAM7DFU code from OpenPCD: | 12 | * based on existing SAM7DFU code from OpenPCD: |
13 | * (C) Copyright 2006 by Harald Welte <hwelte at hmw-consulting.de> | 13 | * (C) Copyright 2006 by Harald Welte <hwelte at hmw-consulting.de> |
14 | * | 14 | * |
15 | * SPDX-License-Identifier: GPL-2.0+ | 15 | * SPDX-License-Identifier: GPL-2.0+ |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include <errno.h> | 18 | #include <errno.h> |
19 | #include <common.h> | 19 | #include <common.h> |
20 | #include <malloc.h> | 20 | #include <malloc.h> |
21 | 21 | ||
22 | #include <linux/usb/ch9.h> | 22 | #include <linux/usb/ch9.h> |
23 | #include <linux/usb/gadget.h> | 23 | #include <linux/usb/gadget.h> |
24 | #include <linux/usb/composite.h> | 24 | #include <linux/usb/composite.h> |
25 | 25 | ||
26 | #include <dfu.h> | 26 | #include <dfu.h> |
27 | #include <g_dnl.h> | 27 | #include <g_dnl.h> |
28 | #include "f_dfu.h" | 28 | #include "f_dfu.h" |
29 | 29 | ||
30 | struct f_dfu { | 30 | struct f_dfu { |
31 | struct usb_function usb_function; | 31 | struct usb_function usb_function; |
32 | 32 | ||
33 | struct usb_descriptor_header **function; | 33 | struct usb_descriptor_header **function; |
34 | struct usb_string *strings; | 34 | struct usb_string *strings; |
35 | 35 | ||
36 | /* when configured, we have one config */ | 36 | /* when configured, we have one config */ |
37 | u8 config; | 37 | u8 config; |
38 | u8 altsetting; | 38 | u8 altsetting; |
39 | enum dfu_state dfu_state; | 39 | enum dfu_state dfu_state; |
40 | unsigned int dfu_status; | 40 | unsigned int dfu_status; |
41 | 41 | ||
42 | /* Send/received block number is handy for data integrity check */ | 42 | /* Send/received block number is handy for data integrity check */ |
43 | int blk_seq_num; | 43 | int blk_seq_num; |
44 | unsigned int poll_timeout; | 44 | unsigned int poll_timeout; |
45 | }; | 45 | }; |
46 | 46 | ||
47 | typedef int (*dfu_state_fn) (struct f_dfu *, | 47 | typedef int (*dfu_state_fn) (struct f_dfu *, |
48 | const struct usb_ctrlrequest *, | 48 | const struct usb_ctrlrequest *, |
49 | struct usb_gadget *, | 49 | struct usb_gadget *, |
50 | struct usb_request *); | 50 | struct usb_request *); |
51 | 51 | ||
52 | static inline struct f_dfu *func_to_dfu(struct usb_function *f) | 52 | static inline struct f_dfu *func_to_dfu(struct usb_function *f) |
53 | { | 53 | { |
54 | return container_of(f, struct f_dfu, usb_function); | 54 | return container_of(f, struct f_dfu, usb_function); |
55 | } | 55 | } |
56 | 56 | ||
57 | static const struct dfu_function_descriptor dfu_func = { | 57 | static const struct dfu_function_descriptor dfu_func = { |
58 | .bLength = sizeof dfu_func, | 58 | .bLength = sizeof dfu_func, |
59 | .bDescriptorType = DFU_DT_FUNC, | 59 | .bDescriptorType = DFU_DT_FUNC, |
60 | .bmAttributes = DFU_BIT_WILL_DETACH | | 60 | .bmAttributes = DFU_BIT_WILL_DETACH | |
61 | DFU_BIT_MANIFESTATION_TOLERANT | | 61 | DFU_BIT_MANIFESTATION_TOLERANT | |
62 | DFU_BIT_CAN_UPLOAD | | 62 | DFU_BIT_CAN_UPLOAD | |
63 | DFU_BIT_CAN_DNLOAD, | 63 | DFU_BIT_CAN_DNLOAD, |
64 | .wDetachTimeOut = 0, | 64 | .wDetachTimeOut = 0, |
65 | .wTransferSize = DFU_USB_BUFSIZ, | 65 | .wTransferSize = DFU_USB_BUFSIZ, |
66 | .bcdDFUVersion = __constant_cpu_to_le16(0x0110), | 66 | .bcdDFUVersion = __constant_cpu_to_le16(0x0110), |
67 | }; | 67 | }; |
68 | 68 | ||
69 | static struct usb_interface_descriptor dfu_intf_runtime = { | 69 | static struct usb_interface_descriptor dfu_intf_runtime = { |
70 | .bLength = sizeof dfu_intf_runtime, | 70 | .bLength = sizeof dfu_intf_runtime, |
71 | .bDescriptorType = USB_DT_INTERFACE, | 71 | .bDescriptorType = USB_DT_INTERFACE, |
72 | .bNumEndpoints = 0, | 72 | .bNumEndpoints = 0, |
73 | .bInterfaceClass = USB_CLASS_APP_SPEC, | 73 | .bInterfaceClass = USB_CLASS_APP_SPEC, |
74 | .bInterfaceSubClass = 1, | 74 | .bInterfaceSubClass = 1, |
75 | .bInterfaceProtocol = 1, | 75 | .bInterfaceProtocol = 1, |
76 | /* .iInterface = DYNAMIC */ | 76 | /* .iInterface = DYNAMIC */ |
77 | }; | 77 | }; |
78 | 78 | ||
79 | static struct usb_descriptor_header *dfu_runtime_descs[] = { | 79 | static struct usb_descriptor_header *dfu_runtime_descs[] = { |
80 | (struct usb_descriptor_header *) &dfu_intf_runtime, | 80 | (struct usb_descriptor_header *) &dfu_intf_runtime, |
81 | NULL, | 81 | NULL, |
82 | }; | 82 | }; |
83 | 83 | ||
84 | static const struct usb_qualifier_descriptor dev_qualifier = { | 84 | static const struct usb_qualifier_descriptor dev_qualifier = { |
85 | .bLength = sizeof dev_qualifier, | 85 | .bLength = sizeof dev_qualifier, |
86 | .bDescriptorType = USB_DT_DEVICE_QUALIFIER, | 86 | .bDescriptorType = USB_DT_DEVICE_QUALIFIER, |
87 | .bcdUSB = __constant_cpu_to_le16(0x0200), | 87 | .bcdUSB = __constant_cpu_to_le16(0x0200), |
88 | .bDeviceClass = USB_CLASS_VENDOR_SPEC, | 88 | .bDeviceClass = USB_CLASS_VENDOR_SPEC, |
89 | .bNumConfigurations = 1, | 89 | .bNumConfigurations = 1, |
90 | }; | 90 | }; |
91 | 91 | ||
92 | static const char dfu_name[] = "Device Firmware Upgrade"; | 92 | static const char dfu_name[] = "Device Firmware Upgrade"; |
93 | 93 | ||
94 | /* | 94 | /* |
95 | * static strings, in UTF-8 | 95 | * static strings, in UTF-8 |
96 | * | 96 | * |
97 | * dfu_generic configuration | 97 | * dfu_generic configuration |
98 | */ | 98 | */ |
99 | static struct usb_string strings_dfu_generic[] = { | 99 | static struct usb_string strings_dfu_generic[] = { |
100 | [0].s = dfu_name, | 100 | [0].s = dfu_name, |
101 | { } /* end of list */ | 101 | { } /* end of list */ |
102 | }; | 102 | }; |
103 | 103 | ||
104 | static struct usb_gadget_strings stringtab_dfu_generic = { | 104 | static struct usb_gadget_strings stringtab_dfu_generic = { |
105 | .language = 0x0409, /* en-us */ | 105 | .language = 0x0409, /* en-us */ |
106 | .strings = strings_dfu_generic, | 106 | .strings = strings_dfu_generic, |
107 | }; | 107 | }; |
108 | 108 | ||
109 | static struct usb_gadget_strings *dfu_generic_strings[] = { | 109 | static struct usb_gadget_strings *dfu_generic_strings[] = { |
110 | &stringtab_dfu_generic, | 110 | &stringtab_dfu_generic, |
111 | NULL, | 111 | NULL, |
112 | }; | 112 | }; |
113 | 113 | ||
114 | /* | 114 | /* |
115 | * usb_function specific | 115 | * usb_function specific |
116 | */ | 116 | */ |
117 | static struct usb_gadget_strings stringtab_dfu = { | 117 | static struct usb_gadget_strings stringtab_dfu = { |
118 | .language = 0x0409, /* en-us */ | 118 | .language = 0x0409, /* en-us */ |
119 | /* | 119 | /* |
120 | * .strings | 120 | * .strings |
121 | * | 121 | * |
122 | * assigned during initialization, | 122 | * assigned during initialization, |
123 | * depends on number of flash entities | 123 | * depends on number of flash entities |
124 | * | 124 | * |
125 | */ | 125 | */ |
126 | }; | 126 | }; |
127 | 127 | ||
128 | static struct usb_gadget_strings *dfu_strings[] = { | 128 | static struct usb_gadget_strings *dfu_strings[] = { |
129 | &stringtab_dfu, | 129 | &stringtab_dfu, |
130 | NULL, | 130 | NULL, |
131 | }; | 131 | }; |
132 | 132 | ||
133 | static void dfu_set_poll_timeout(struct dfu_status *dstat, unsigned int ms) | 133 | static void dfu_set_poll_timeout(struct dfu_status *dstat, unsigned int ms) |
134 | { | 134 | { |
135 | /* | 135 | /* |
136 | * The bwPollTimeout DFU_GETSTATUS request payload provides information | 136 | * The bwPollTimeout DFU_GETSTATUS request payload provides information |
137 | * about minimum time, in milliseconds, that the host should wait before | 137 | * about minimum time, in milliseconds, that the host should wait before |
138 | * sending a subsequent DFU_GETSTATUS request | 138 | * sending a subsequent DFU_GETSTATUS request |
139 | * | 139 | * |
140 | * This permits the device to vary the delay depending on its need to | 140 | * This permits the device to vary the delay depending on its need to |
141 | * erase or program the memory | 141 | * erase or program the memory |
142 | * | 142 | * |
143 | */ | 143 | */ |
144 | 144 | ||
145 | unsigned char *p = (unsigned char *)&ms; | 145 | unsigned char *p = (unsigned char *)&ms; |
146 | 146 | ||
147 | if (!ms || (ms & ~DFU_POLL_TIMEOUT_MASK)) { | 147 | if (!ms || (ms & ~DFU_POLL_TIMEOUT_MASK)) { |
148 | dstat->bwPollTimeout[0] = 0; | 148 | dstat->bwPollTimeout[0] = 0; |
149 | dstat->bwPollTimeout[1] = 0; | 149 | dstat->bwPollTimeout[1] = 0; |
150 | dstat->bwPollTimeout[2] = 0; | 150 | dstat->bwPollTimeout[2] = 0; |
151 | 151 | ||
152 | return; | 152 | return; |
153 | } | 153 | } |
154 | 154 | ||
155 | dstat->bwPollTimeout[0] = *p++; | 155 | dstat->bwPollTimeout[0] = *p++; |
156 | dstat->bwPollTimeout[1] = *p++; | 156 | dstat->bwPollTimeout[1] = *p++; |
157 | dstat->bwPollTimeout[2] = *p; | 157 | dstat->bwPollTimeout[2] = *p; |
158 | } | 158 | } |
159 | 159 | ||
160 | /*-------------------------------------------------------------------------*/ | 160 | /*-------------------------------------------------------------------------*/ |
161 | 161 | ||
162 | static void dnload_request_complete(struct usb_ep *ep, struct usb_request *req) | 162 | static void dnload_request_complete(struct usb_ep *ep, struct usb_request *req) |
163 | { | 163 | { |
164 | struct f_dfu *f_dfu = req->context; | 164 | struct f_dfu *f_dfu = req->context; |
165 | int ret; | 165 | int ret; |
166 | 166 | ||
167 | ret = dfu_write(dfu_get_entity(f_dfu->altsetting), req->buf, | 167 | ret = dfu_write(dfu_get_entity(f_dfu->altsetting), req->buf, |
168 | req->length, f_dfu->blk_seq_num); | 168 | req->length, f_dfu->blk_seq_num); |
169 | if (ret) { | 169 | if (ret) { |
170 | f_dfu->dfu_status = DFU_STATUS_errUNKNOWN; | 170 | f_dfu->dfu_status = DFU_STATUS_errUNKNOWN; |
171 | f_dfu->dfu_state = DFU_STATE_dfuERROR; | 171 | f_dfu->dfu_state = DFU_STATE_dfuERROR; |
172 | } | 172 | } |
173 | } | 173 | } |
174 | 174 | ||
175 | static void dnload_request_flush(struct usb_ep *ep, struct usb_request *req) | 175 | static void dnload_request_flush(struct usb_ep *ep, struct usb_request *req) |
176 | { | 176 | { |
177 | struct f_dfu *f_dfu = req->context; | 177 | struct f_dfu *f_dfu = req->context; |
178 | int ret; | 178 | int ret; |
179 | 179 | ||
180 | ret = dfu_flush(dfu_get_entity(f_dfu->altsetting), req->buf, | 180 | ret = dfu_flush(dfu_get_entity(f_dfu->altsetting), req->buf, |
181 | req->length, f_dfu->blk_seq_num); | 181 | req->length, f_dfu->blk_seq_num); |
182 | if (ret) { | 182 | if (ret) { |
183 | f_dfu->dfu_status = DFU_STATUS_errUNKNOWN; | 183 | f_dfu->dfu_status = DFU_STATUS_errUNKNOWN; |
184 | f_dfu->dfu_state = DFU_STATE_dfuERROR; | 184 | f_dfu->dfu_state = DFU_STATE_dfuERROR; |
185 | } | 185 | } |
186 | } | 186 | } |
187 | 187 | ||
188 | static inline int dfu_get_manifest_timeout(struct dfu_entity *dfu) | 188 | static inline int dfu_get_manifest_timeout(struct dfu_entity *dfu) |
189 | { | 189 | { |
190 | return dfu->poll_timeout ? dfu->poll_timeout(dfu) : | 190 | return dfu->poll_timeout ? dfu->poll_timeout(dfu) : |
191 | DFU_MANIFEST_POLL_TIMEOUT; | 191 | DFU_MANIFEST_POLL_TIMEOUT; |
192 | } | 192 | } |
193 | 193 | ||
194 | static void handle_getstatus(struct usb_request *req) | 194 | static void handle_getstatus(struct usb_request *req) |
195 | { | 195 | { |
196 | struct dfu_status *dstat = (struct dfu_status *)req->buf; | 196 | struct dfu_status *dstat = (struct dfu_status *)req->buf; |
197 | struct f_dfu *f_dfu = req->context; | 197 | struct f_dfu *f_dfu = req->context; |
198 | struct dfu_entity *dfu = dfu_get_entity(f_dfu->altsetting); | 198 | struct dfu_entity *dfu = dfu_get_entity(f_dfu->altsetting); |
199 | 199 | ||
200 | dfu_set_poll_timeout(dstat, 0); | 200 | dfu_set_poll_timeout(dstat, 0); |
201 | 201 | ||
202 | switch (f_dfu->dfu_state) { | 202 | switch (f_dfu->dfu_state) { |
203 | case DFU_STATE_dfuDNLOAD_SYNC: | 203 | case DFU_STATE_dfuDNLOAD_SYNC: |
204 | case DFU_STATE_dfuDNBUSY: | 204 | case DFU_STATE_dfuDNBUSY: |
205 | f_dfu->dfu_state = DFU_STATE_dfuDNLOAD_IDLE; | 205 | f_dfu->dfu_state = DFU_STATE_dfuDNLOAD_IDLE; |
206 | break; | 206 | break; |
207 | case DFU_STATE_dfuMANIFEST_SYNC: | 207 | case DFU_STATE_dfuMANIFEST_SYNC: |
208 | f_dfu->dfu_state = DFU_STATE_dfuMANIFEST; | 208 | f_dfu->dfu_state = DFU_STATE_dfuMANIFEST; |
209 | break; | 209 | break; |
210 | case DFU_STATE_dfuMANIFEST: | 210 | case DFU_STATE_dfuMANIFEST: |
211 | dfu_set_poll_timeout(dstat, dfu_get_manifest_timeout(dfu)); | 211 | dfu_set_poll_timeout(dstat, dfu_get_manifest_timeout(dfu)); |
212 | break; | 212 | break; |
213 | default: | 213 | default: |
214 | break; | 214 | break; |
215 | } | 215 | } |
216 | 216 | ||
217 | if (f_dfu->poll_timeout) | 217 | if (f_dfu->poll_timeout) |
218 | if (!(f_dfu->blk_seq_num % | 218 | if (!(f_dfu->blk_seq_num % |
219 | (dfu_get_buf_size() / DFU_USB_BUFSIZ))) | 219 | (dfu_get_buf_size() / DFU_USB_BUFSIZ))) |
220 | dfu_set_poll_timeout(dstat, f_dfu->poll_timeout); | 220 | dfu_set_poll_timeout(dstat, f_dfu->poll_timeout); |
221 | 221 | ||
222 | /* send status response */ | 222 | /* send status response */ |
223 | dstat->bStatus = f_dfu->dfu_status; | 223 | dstat->bStatus = f_dfu->dfu_status; |
224 | dstat->bState = f_dfu->dfu_state; | 224 | dstat->bState = f_dfu->dfu_state; |
225 | dstat->iString = 0; | 225 | dstat->iString = 0; |
226 | } | 226 | } |
227 | 227 | ||
228 | static void handle_getstate(struct usb_request *req) | 228 | static void handle_getstate(struct usb_request *req) |
229 | { | 229 | { |
230 | struct f_dfu *f_dfu = req->context; | 230 | struct f_dfu *f_dfu = req->context; |
231 | 231 | ||
232 | ((u8 *)req->buf)[0] = f_dfu->dfu_state; | 232 | ((u8 *)req->buf)[0] = f_dfu->dfu_state; |
233 | req->actual = sizeof(u8); | 233 | req->actual = sizeof(u8); |
234 | } | 234 | } |
235 | 235 | ||
236 | static inline void to_dfu_mode(struct f_dfu *f_dfu) | 236 | static inline void to_dfu_mode(struct f_dfu *f_dfu) |
237 | { | 237 | { |
238 | f_dfu->usb_function.strings = dfu_strings; | 238 | f_dfu->usb_function.strings = dfu_strings; |
239 | f_dfu->usb_function.hs_descriptors = f_dfu->function; | 239 | f_dfu->usb_function.hs_descriptors = f_dfu->function; |
240 | f_dfu->dfu_state = DFU_STATE_dfuIDLE; | 240 | f_dfu->dfu_state = DFU_STATE_dfuIDLE; |
241 | } | 241 | } |
242 | 242 | ||
243 | static inline void to_runtime_mode(struct f_dfu *f_dfu) | 243 | static inline void to_runtime_mode(struct f_dfu *f_dfu) |
244 | { | 244 | { |
245 | f_dfu->usb_function.strings = NULL; | 245 | f_dfu->usb_function.strings = NULL; |
246 | f_dfu->usb_function.hs_descriptors = dfu_runtime_descs; | 246 | f_dfu->usb_function.hs_descriptors = dfu_runtime_descs; |
247 | } | 247 | } |
248 | 248 | ||
249 | static int handle_upload(struct usb_request *req, u16 len) | 249 | static int handle_upload(struct usb_request *req, u16 len) |
250 | { | 250 | { |
251 | struct f_dfu *f_dfu = req->context; | 251 | struct f_dfu *f_dfu = req->context; |
252 | 252 | ||
253 | return dfu_read(dfu_get_entity(f_dfu->altsetting), req->buf, | 253 | return dfu_read(dfu_get_entity(f_dfu->altsetting), req->buf, |
254 | req->length, f_dfu->blk_seq_num); | 254 | req->length, f_dfu->blk_seq_num); |
255 | } | 255 | } |
256 | 256 | ||
257 | static int handle_dnload(struct usb_gadget *gadget, u16 len) | 257 | static int handle_dnload(struct usb_gadget *gadget, u16 len) |
258 | { | 258 | { |
259 | struct usb_composite_dev *cdev = get_gadget_data(gadget); | 259 | struct usb_composite_dev *cdev = get_gadget_data(gadget); |
260 | struct usb_request *req = cdev->req; | 260 | struct usb_request *req = cdev->req; |
261 | struct f_dfu *f_dfu = req->context; | 261 | struct f_dfu *f_dfu = req->context; |
262 | 262 | ||
263 | if (len == 0) | 263 | if (len == 0) |
264 | f_dfu->dfu_state = DFU_STATE_dfuMANIFEST_SYNC; | 264 | f_dfu->dfu_state = DFU_STATE_dfuMANIFEST_SYNC; |
265 | 265 | ||
266 | req->complete = dnload_request_complete; | 266 | req->complete = dnload_request_complete; |
267 | 267 | ||
268 | return len; | 268 | return len; |
269 | } | 269 | } |
270 | 270 | ||
271 | /*-------------------------------------------------------------------------*/ | 271 | /*-------------------------------------------------------------------------*/ |
272 | /* DFU state machine */ | 272 | /* DFU state machine */ |
273 | static int state_app_idle(struct f_dfu *f_dfu, | 273 | static int state_app_idle(struct f_dfu *f_dfu, |
274 | const struct usb_ctrlrequest *ctrl, | 274 | const struct usb_ctrlrequest *ctrl, |
275 | struct usb_gadget *gadget, | 275 | struct usb_gadget *gadget, |
276 | struct usb_request *req) | 276 | struct usb_request *req) |
277 | { | 277 | { |
278 | int value = 0; | 278 | int value = 0; |
279 | 279 | ||
280 | switch (ctrl->bRequest) { | 280 | switch (ctrl->bRequest) { |
281 | case USB_REQ_DFU_GETSTATUS: | 281 | case USB_REQ_DFU_GETSTATUS: |
282 | handle_getstatus(req); | 282 | handle_getstatus(req); |
283 | value = RET_STAT_LEN; | 283 | value = RET_STAT_LEN; |
284 | break; | 284 | break; |
285 | case USB_REQ_DFU_GETSTATE: | 285 | case USB_REQ_DFU_GETSTATE: |
286 | handle_getstate(req); | 286 | handle_getstate(req); |
287 | break; | 287 | break; |
288 | case USB_REQ_DFU_DETACH: | 288 | case USB_REQ_DFU_DETACH: |
289 | f_dfu->dfu_state = DFU_STATE_appDETACH; | 289 | f_dfu->dfu_state = DFU_STATE_appDETACH; |
290 | to_dfu_mode(f_dfu); | 290 | to_dfu_mode(f_dfu); |
291 | value = RET_ZLP; | 291 | value = RET_ZLP; |
292 | break; | 292 | break; |
293 | default: | 293 | default: |
294 | value = RET_STALL; | 294 | value = RET_STALL; |
295 | break; | 295 | break; |
296 | } | 296 | } |
297 | 297 | ||
298 | return value; | 298 | return value; |
299 | } | 299 | } |
300 | 300 | ||
301 | static int state_app_detach(struct f_dfu *f_dfu, | 301 | static int state_app_detach(struct f_dfu *f_dfu, |
302 | const struct usb_ctrlrequest *ctrl, | 302 | const struct usb_ctrlrequest *ctrl, |
303 | struct usb_gadget *gadget, | 303 | struct usb_gadget *gadget, |
304 | struct usb_request *req) | 304 | struct usb_request *req) |
305 | { | 305 | { |
306 | int value = 0; | 306 | int value = 0; |
307 | 307 | ||
308 | switch (ctrl->bRequest) { | 308 | switch (ctrl->bRequest) { |
309 | case USB_REQ_DFU_GETSTATUS: | 309 | case USB_REQ_DFU_GETSTATUS: |
310 | handle_getstatus(req); | 310 | handle_getstatus(req); |
311 | value = RET_STAT_LEN; | 311 | value = RET_STAT_LEN; |
312 | break; | 312 | break; |
313 | case USB_REQ_DFU_GETSTATE: | 313 | case USB_REQ_DFU_GETSTATE: |
314 | handle_getstate(req); | 314 | handle_getstate(req); |
315 | break; | 315 | break; |
316 | default: | 316 | default: |
317 | f_dfu->dfu_state = DFU_STATE_appIDLE; | 317 | f_dfu->dfu_state = DFU_STATE_appIDLE; |
318 | value = RET_STALL; | 318 | value = RET_STALL; |
319 | break; | 319 | break; |
320 | } | 320 | } |
321 | 321 | ||
322 | return value; | 322 | return value; |
323 | } | 323 | } |
324 | 324 | ||
325 | static int state_dfu_idle(struct f_dfu *f_dfu, | 325 | static int state_dfu_idle(struct f_dfu *f_dfu, |
326 | const struct usb_ctrlrequest *ctrl, | 326 | const struct usb_ctrlrequest *ctrl, |
327 | struct usb_gadget *gadget, | 327 | struct usb_gadget *gadget, |
328 | struct usb_request *req) | 328 | struct usb_request *req) |
329 | { | 329 | { |
330 | u16 w_value = le16_to_cpu(ctrl->wValue); | 330 | u16 w_value = le16_to_cpu(ctrl->wValue); |
331 | u16 len = le16_to_cpu(ctrl->wLength); | 331 | u16 len = le16_to_cpu(ctrl->wLength); |
332 | int value = 0; | 332 | int value = 0; |
333 | 333 | ||
334 | switch (ctrl->bRequest) { | 334 | switch (ctrl->bRequest) { |
335 | case USB_REQ_DFU_DNLOAD: | 335 | case USB_REQ_DFU_DNLOAD: |
336 | if (len == 0) { | 336 | if (len == 0) { |
337 | f_dfu->dfu_state = DFU_STATE_dfuERROR; | 337 | f_dfu->dfu_state = DFU_STATE_dfuERROR; |
338 | value = RET_STALL; | 338 | value = RET_STALL; |
339 | break; | 339 | break; |
340 | } | 340 | } |
341 | f_dfu->dfu_state = DFU_STATE_dfuDNLOAD_SYNC; | 341 | f_dfu->dfu_state = DFU_STATE_dfuDNLOAD_SYNC; |
342 | f_dfu->blk_seq_num = w_value; | 342 | f_dfu->blk_seq_num = w_value; |
343 | value = handle_dnload(gadget, len); | 343 | value = handle_dnload(gadget, len); |
344 | break; | 344 | break; |
345 | case USB_REQ_DFU_UPLOAD: | 345 | case USB_REQ_DFU_UPLOAD: |
346 | f_dfu->dfu_state = DFU_STATE_dfuUPLOAD_IDLE; | 346 | f_dfu->dfu_state = DFU_STATE_dfuUPLOAD_IDLE; |
347 | f_dfu->blk_seq_num = 0; | 347 | f_dfu->blk_seq_num = 0; |
348 | value = handle_upload(req, len); | 348 | value = handle_upload(req, len); |
349 | break; | 349 | break; |
350 | case USB_REQ_DFU_ABORT: | 350 | case USB_REQ_DFU_ABORT: |
351 | /* no zlp? */ | 351 | /* no zlp? */ |
352 | value = RET_ZLP; | 352 | value = RET_ZLP; |
353 | break; | 353 | break; |
354 | case USB_REQ_DFU_GETSTATUS: | 354 | case USB_REQ_DFU_GETSTATUS: |
355 | handle_getstatus(req); | 355 | handle_getstatus(req); |
356 | value = RET_STAT_LEN; | 356 | value = RET_STAT_LEN; |
357 | break; | 357 | break; |
358 | case USB_REQ_DFU_GETSTATE: | 358 | case USB_REQ_DFU_GETSTATE: |
359 | handle_getstate(req); | 359 | handle_getstate(req); |
360 | break; | 360 | break; |
361 | case USB_REQ_DFU_DETACH: | 361 | case USB_REQ_DFU_DETACH: |
362 | /* | 362 | /* |
363 | * Proprietary extension: 'detach' from idle mode and | 363 | * Proprietary extension: 'detach' from idle mode and |
364 | * get back to runtime mode in case of USB Reset. As | 364 | * get back to runtime mode in case of USB Reset. As |
365 | * much as I dislike this, we just can't use every USB | 365 | * much as I dislike this, we just can't use every USB |
366 | * bus reset to switch back to runtime mode, since at | 366 | * bus reset to switch back to runtime mode, since at |
367 | * least the Linux USB stack likes to send a number of | 367 | * least the Linux USB stack likes to send a number of |
368 | * resets in a row :( | 368 | * resets in a row :( |
369 | */ | 369 | */ |
370 | f_dfu->dfu_state = | 370 | f_dfu->dfu_state = |
371 | DFU_STATE_dfuMANIFEST_WAIT_RST; | 371 | DFU_STATE_dfuMANIFEST_WAIT_RST; |
372 | to_runtime_mode(f_dfu); | 372 | to_runtime_mode(f_dfu); |
373 | f_dfu->dfu_state = DFU_STATE_appIDLE; | 373 | f_dfu->dfu_state = DFU_STATE_appIDLE; |
374 | 374 | ||
375 | dfu_trigger_reset(); | 375 | dfu_trigger_detach(); |
376 | break; | 376 | break; |
377 | default: | 377 | default: |
378 | f_dfu->dfu_state = DFU_STATE_dfuERROR; | 378 | f_dfu->dfu_state = DFU_STATE_dfuERROR; |
379 | value = RET_STALL; | 379 | value = RET_STALL; |
380 | break; | 380 | break; |
381 | } | 381 | } |
382 | 382 | ||
383 | return value; | 383 | return value; |
384 | } | 384 | } |
385 | 385 | ||
386 | static int state_dfu_dnload_sync(struct f_dfu *f_dfu, | 386 | static int state_dfu_dnload_sync(struct f_dfu *f_dfu, |
387 | const struct usb_ctrlrequest *ctrl, | 387 | const struct usb_ctrlrequest *ctrl, |
388 | struct usb_gadget *gadget, | 388 | struct usb_gadget *gadget, |
389 | struct usb_request *req) | 389 | struct usb_request *req) |
390 | { | 390 | { |
391 | int value = 0; | 391 | int value = 0; |
392 | 392 | ||
393 | switch (ctrl->bRequest) { | 393 | switch (ctrl->bRequest) { |
394 | case USB_REQ_DFU_GETSTATUS: | 394 | case USB_REQ_DFU_GETSTATUS: |
395 | handle_getstatus(req); | 395 | handle_getstatus(req); |
396 | value = RET_STAT_LEN; | 396 | value = RET_STAT_LEN; |
397 | break; | 397 | break; |
398 | case USB_REQ_DFU_GETSTATE: | 398 | case USB_REQ_DFU_GETSTATE: |
399 | handle_getstate(req); | 399 | handle_getstate(req); |
400 | break; | 400 | break; |
401 | default: | 401 | default: |
402 | f_dfu->dfu_state = DFU_STATE_dfuERROR; | 402 | f_dfu->dfu_state = DFU_STATE_dfuERROR; |
403 | value = RET_STALL; | 403 | value = RET_STALL; |
404 | break; | 404 | break; |
405 | } | 405 | } |
406 | 406 | ||
407 | return value; | 407 | return value; |
408 | } | 408 | } |
409 | 409 | ||
410 | static int state_dfu_dnbusy(struct f_dfu *f_dfu, | 410 | static int state_dfu_dnbusy(struct f_dfu *f_dfu, |
411 | const struct usb_ctrlrequest *ctrl, | 411 | const struct usb_ctrlrequest *ctrl, |
412 | struct usb_gadget *gadget, | 412 | struct usb_gadget *gadget, |
413 | struct usb_request *req) | 413 | struct usb_request *req) |
414 | { | 414 | { |
415 | int value = 0; | 415 | int value = 0; |
416 | 416 | ||
417 | switch (ctrl->bRequest) { | 417 | switch (ctrl->bRequest) { |
418 | case USB_REQ_DFU_GETSTATUS: | 418 | case USB_REQ_DFU_GETSTATUS: |
419 | handle_getstatus(req); | 419 | handle_getstatus(req); |
420 | value = RET_STAT_LEN; | 420 | value = RET_STAT_LEN; |
421 | break; | 421 | break; |
422 | default: | 422 | default: |
423 | f_dfu->dfu_state = DFU_STATE_dfuERROR; | 423 | f_dfu->dfu_state = DFU_STATE_dfuERROR; |
424 | value = RET_STALL; | 424 | value = RET_STALL; |
425 | break; | 425 | break; |
426 | } | 426 | } |
427 | 427 | ||
428 | return value; | 428 | return value; |
429 | } | 429 | } |
430 | 430 | ||
431 | static int state_dfu_dnload_idle(struct f_dfu *f_dfu, | 431 | static int state_dfu_dnload_idle(struct f_dfu *f_dfu, |
432 | const struct usb_ctrlrequest *ctrl, | 432 | const struct usb_ctrlrequest *ctrl, |
433 | struct usb_gadget *gadget, | 433 | struct usb_gadget *gadget, |
434 | struct usb_request *req) | 434 | struct usb_request *req) |
435 | { | 435 | { |
436 | u16 w_value = le16_to_cpu(ctrl->wValue); | 436 | u16 w_value = le16_to_cpu(ctrl->wValue); |
437 | u16 len = le16_to_cpu(ctrl->wLength); | 437 | u16 len = le16_to_cpu(ctrl->wLength); |
438 | int value = 0; | 438 | int value = 0; |
439 | 439 | ||
440 | switch (ctrl->bRequest) { | 440 | switch (ctrl->bRequest) { |
441 | case USB_REQ_DFU_DNLOAD: | 441 | case USB_REQ_DFU_DNLOAD: |
442 | f_dfu->dfu_state = DFU_STATE_dfuDNLOAD_SYNC; | 442 | f_dfu->dfu_state = DFU_STATE_dfuDNLOAD_SYNC; |
443 | f_dfu->blk_seq_num = w_value; | 443 | f_dfu->blk_seq_num = w_value; |
444 | value = handle_dnload(gadget, len); | 444 | value = handle_dnload(gadget, len); |
445 | break; | 445 | break; |
446 | case USB_REQ_DFU_ABORT: | 446 | case USB_REQ_DFU_ABORT: |
447 | f_dfu->dfu_state = DFU_STATE_dfuIDLE; | 447 | f_dfu->dfu_state = DFU_STATE_dfuIDLE; |
448 | value = RET_ZLP; | 448 | value = RET_ZLP; |
449 | break; | 449 | break; |
450 | case USB_REQ_DFU_GETSTATUS: | 450 | case USB_REQ_DFU_GETSTATUS: |
451 | handle_getstatus(req); | 451 | handle_getstatus(req); |
452 | value = RET_STAT_LEN; | 452 | value = RET_STAT_LEN; |
453 | break; | 453 | break; |
454 | case USB_REQ_DFU_GETSTATE: | 454 | case USB_REQ_DFU_GETSTATE: |
455 | handle_getstate(req); | 455 | handle_getstate(req); |
456 | break; | 456 | break; |
457 | default: | 457 | default: |
458 | f_dfu->dfu_state = DFU_STATE_dfuERROR; | 458 | f_dfu->dfu_state = DFU_STATE_dfuERROR; |
459 | value = RET_STALL; | 459 | value = RET_STALL; |
460 | break; | 460 | break; |
461 | } | 461 | } |
462 | 462 | ||
463 | return value; | 463 | return value; |
464 | } | 464 | } |
465 | 465 | ||
466 | static int state_dfu_manifest_sync(struct f_dfu *f_dfu, | 466 | static int state_dfu_manifest_sync(struct f_dfu *f_dfu, |
467 | const struct usb_ctrlrequest *ctrl, | 467 | const struct usb_ctrlrequest *ctrl, |
468 | struct usb_gadget *gadget, | 468 | struct usb_gadget *gadget, |
469 | struct usb_request *req) | 469 | struct usb_request *req) |
470 | { | 470 | { |
471 | int value = 0; | 471 | int value = 0; |
472 | 472 | ||
473 | switch (ctrl->bRequest) { | 473 | switch (ctrl->bRequest) { |
474 | case USB_REQ_DFU_GETSTATUS: | 474 | case USB_REQ_DFU_GETSTATUS: |
475 | /* We're MainfestationTolerant */ | 475 | /* We're MainfestationTolerant */ |
476 | f_dfu->dfu_state = DFU_STATE_dfuMANIFEST; | 476 | f_dfu->dfu_state = DFU_STATE_dfuMANIFEST; |
477 | handle_getstatus(req); | 477 | handle_getstatus(req); |
478 | f_dfu->blk_seq_num = 0; | 478 | f_dfu->blk_seq_num = 0; |
479 | value = RET_STAT_LEN; | 479 | value = RET_STAT_LEN; |
480 | req->complete = dnload_request_flush; | 480 | req->complete = dnload_request_flush; |
481 | break; | 481 | break; |
482 | case USB_REQ_DFU_GETSTATE: | 482 | case USB_REQ_DFU_GETSTATE: |
483 | handle_getstate(req); | 483 | handle_getstate(req); |
484 | break; | 484 | break; |
485 | default: | 485 | default: |
486 | f_dfu->dfu_state = DFU_STATE_dfuERROR; | 486 | f_dfu->dfu_state = DFU_STATE_dfuERROR; |
487 | value = RET_STALL; | 487 | value = RET_STALL; |
488 | break; | 488 | break; |
489 | } | 489 | } |
490 | 490 | ||
491 | return value; | 491 | return value; |
492 | } | 492 | } |
493 | 493 | ||
494 | static int state_dfu_manifest(struct f_dfu *f_dfu, | 494 | static int state_dfu_manifest(struct f_dfu *f_dfu, |
495 | const struct usb_ctrlrequest *ctrl, | 495 | const struct usb_ctrlrequest *ctrl, |
496 | struct usb_gadget *gadget, | 496 | struct usb_gadget *gadget, |
497 | struct usb_request *req) | 497 | struct usb_request *req) |
498 | { | 498 | { |
499 | int value = 0; | 499 | int value = 0; |
500 | 500 | ||
501 | switch (ctrl->bRequest) { | 501 | switch (ctrl->bRequest) { |
502 | case USB_REQ_DFU_GETSTATUS: | 502 | case USB_REQ_DFU_GETSTATUS: |
503 | /* We're MainfestationTolerant */ | 503 | /* We're MainfestationTolerant */ |
504 | f_dfu->dfu_state = DFU_STATE_dfuIDLE; | 504 | f_dfu->dfu_state = DFU_STATE_dfuIDLE; |
505 | handle_getstatus(req); | 505 | handle_getstatus(req); |
506 | f_dfu->blk_seq_num = 0; | 506 | f_dfu->blk_seq_num = 0; |
507 | value = RET_STAT_LEN; | 507 | value = RET_STAT_LEN; |
508 | puts("DOWNLOAD ... OK\nCtrl+C to exit ...\n"); | 508 | puts("DOWNLOAD ... OK\nCtrl+C to exit ...\n"); |
509 | break; | 509 | break; |
510 | case USB_REQ_DFU_GETSTATE: | 510 | case USB_REQ_DFU_GETSTATE: |
511 | handle_getstate(req); | 511 | handle_getstate(req); |
512 | break; | 512 | break; |
513 | default: | 513 | default: |
514 | f_dfu->dfu_state = DFU_STATE_dfuERROR; | 514 | f_dfu->dfu_state = DFU_STATE_dfuERROR; |
515 | value = RET_STALL; | 515 | value = RET_STALL; |
516 | break; | 516 | break; |
517 | } | 517 | } |
518 | return value; | 518 | return value; |
519 | } | 519 | } |
520 | 520 | ||
521 | static int state_dfu_upload_idle(struct f_dfu *f_dfu, | 521 | static int state_dfu_upload_idle(struct f_dfu *f_dfu, |
522 | const struct usb_ctrlrequest *ctrl, | 522 | const struct usb_ctrlrequest *ctrl, |
523 | struct usb_gadget *gadget, | 523 | struct usb_gadget *gadget, |
524 | struct usb_request *req) | 524 | struct usb_request *req) |
525 | { | 525 | { |
526 | u16 w_value = le16_to_cpu(ctrl->wValue); | 526 | u16 w_value = le16_to_cpu(ctrl->wValue); |
527 | u16 len = le16_to_cpu(ctrl->wLength); | 527 | u16 len = le16_to_cpu(ctrl->wLength); |
528 | int value = 0; | 528 | int value = 0; |
529 | 529 | ||
530 | switch (ctrl->bRequest) { | 530 | switch (ctrl->bRequest) { |
531 | case USB_REQ_DFU_UPLOAD: | 531 | case USB_REQ_DFU_UPLOAD: |
532 | /* state transition if less data then requested */ | 532 | /* state transition if less data then requested */ |
533 | f_dfu->blk_seq_num = w_value; | 533 | f_dfu->blk_seq_num = w_value; |
534 | value = handle_upload(req, len); | 534 | value = handle_upload(req, len); |
535 | if (value >= 0 && value < len) | 535 | if (value >= 0 && value < len) |
536 | f_dfu->dfu_state = DFU_STATE_dfuIDLE; | 536 | f_dfu->dfu_state = DFU_STATE_dfuIDLE; |
537 | break; | 537 | break; |
538 | case USB_REQ_DFU_ABORT: | 538 | case USB_REQ_DFU_ABORT: |
539 | f_dfu->dfu_state = DFU_STATE_dfuIDLE; | 539 | f_dfu->dfu_state = DFU_STATE_dfuIDLE; |
540 | /* no zlp? */ | 540 | /* no zlp? */ |
541 | value = RET_ZLP; | 541 | value = RET_ZLP; |
542 | break; | 542 | break; |
543 | case USB_REQ_DFU_GETSTATUS: | 543 | case USB_REQ_DFU_GETSTATUS: |
544 | handle_getstatus(req); | 544 | handle_getstatus(req); |
545 | value = RET_STAT_LEN; | 545 | value = RET_STAT_LEN; |
546 | break; | 546 | break; |
547 | case USB_REQ_DFU_GETSTATE: | 547 | case USB_REQ_DFU_GETSTATE: |
548 | handle_getstate(req); | 548 | handle_getstate(req); |
549 | break; | 549 | break; |
550 | default: | 550 | default: |
551 | f_dfu->dfu_state = DFU_STATE_dfuERROR; | 551 | f_dfu->dfu_state = DFU_STATE_dfuERROR; |
552 | value = RET_STALL; | 552 | value = RET_STALL; |
553 | break; | 553 | break; |
554 | } | 554 | } |
555 | 555 | ||
556 | return value; | 556 | return value; |
557 | } | 557 | } |
558 | 558 | ||
559 | static int state_dfu_error(struct f_dfu *f_dfu, | 559 | static int state_dfu_error(struct f_dfu *f_dfu, |
560 | const struct usb_ctrlrequest *ctrl, | 560 | const struct usb_ctrlrequest *ctrl, |
561 | struct usb_gadget *gadget, | 561 | struct usb_gadget *gadget, |
562 | struct usb_request *req) | 562 | struct usb_request *req) |
563 | { | 563 | { |
564 | int value = 0; | 564 | int value = 0; |
565 | 565 | ||
566 | switch (ctrl->bRequest) { | 566 | switch (ctrl->bRequest) { |
567 | case USB_REQ_DFU_GETSTATUS: | 567 | case USB_REQ_DFU_GETSTATUS: |
568 | handle_getstatus(req); | 568 | handle_getstatus(req); |
569 | value = RET_STAT_LEN; | 569 | value = RET_STAT_LEN; |
570 | break; | 570 | break; |
571 | case USB_REQ_DFU_GETSTATE: | 571 | case USB_REQ_DFU_GETSTATE: |
572 | handle_getstate(req); | 572 | handle_getstate(req); |
573 | break; | 573 | break; |
574 | case USB_REQ_DFU_CLRSTATUS: | 574 | case USB_REQ_DFU_CLRSTATUS: |
575 | f_dfu->dfu_state = DFU_STATE_dfuIDLE; | 575 | f_dfu->dfu_state = DFU_STATE_dfuIDLE; |
576 | f_dfu->dfu_status = DFU_STATUS_OK; | 576 | f_dfu->dfu_status = DFU_STATUS_OK; |
577 | /* no zlp? */ | 577 | /* no zlp? */ |
578 | value = RET_ZLP; | 578 | value = RET_ZLP; |
579 | break; | 579 | break; |
580 | default: | 580 | default: |
581 | f_dfu->dfu_state = DFU_STATE_dfuERROR; | 581 | f_dfu->dfu_state = DFU_STATE_dfuERROR; |
582 | value = RET_STALL; | 582 | value = RET_STALL; |
583 | break; | 583 | break; |
584 | } | 584 | } |
585 | 585 | ||
586 | return value; | 586 | return value; |
587 | } | 587 | } |
588 | 588 | ||
589 | static dfu_state_fn dfu_state[] = { | 589 | static dfu_state_fn dfu_state[] = { |
590 | state_app_idle, /* DFU_STATE_appIDLE */ | 590 | state_app_idle, /* DFU_STATE_appIDLE */ |
591 | state_app_detach, /* DFU_STATE_appDETACH */ | 591 | state_app_detach, /* DFU_STATE_appDETACH */ |
592 | state_dfu_idle, /* DFU_STATE_dfuIDLE */ | 592 | state_dfu_idle, /* DFU_STATE_dfuIDLE */ |
593 | state_dfu_dnload_sync, /* DFU_STATE_dfuDNLOAD_SYNC */ | 593 | state_dfu_dnload_sync, /* DFU_STATE_dfuDNLOAD_SYNC */ |
594 | state_dfu_dnbusy, /* DFU_STATE_dfuDNBUSY */ | 594 | state_dfu_dnbusy, /* DFU_STATE_dfuDNBUSY */ |
595 | state_dfu_dnload_idle, /* DFU_STATE_dfuDNLOAD_IDLE */ | 595 | state_dfu_dnload_idle, /* DFU_STATE_dfuDNLOAD_IDLE */ |
596 | state_dfu_manifest_sync, /* DFU_STATE_dfuMANIFEST_SYNC */ | 596 | state_dfu_manifest_sync, /* DFU_STATE_dfuMANIFEST_SYNC */ |
597 | state_dfu_manifest, /* DFU_STATE_dfuMANIFEST */ | 597 | state_dfu_manifest, /* DFU_STATE_dfuMANIFEST */ |
598 | NULL, /* DFU_STATE_dfuMANIFEST_WAIT_RST */ | 598 | NULL, /* DFU_STATE_dfuMANIFEST_WAIT_RST */ |
599 | state_dfu_upload_idle, /* DFU_STATE_dfuUPLOAD_IDLE */ | 599 | state_dfu_upload_idle, /* DFU_STATE_dfuUPLOAD_IDLE */ |
600 | state_dfu_error /* DFU_STATE_dfuERROR */ | 600 | state_dfu_error /* DFU_STATE_dfuERROR */ |
601 | }; | 601 | }; |
602 | 602 | ||
603 | static int | 603 | static int |
604 | dfu_handle(struct usb_function *f, const struct usb_ctrlrequest *ctrl) | 604 | dfu_handle(struct usb_function *f, const struct usb_ctrlrequest *ctrl) |
605 | { | 605 | { |
606 | struct usb_gadget *gadget = f->config->cdev->gadget; | 606 | struct usb_gadget *gadget = f->config->cdev->gadget; |
607 | struct usb_request *req = f->config->cdev->req; | 607 | struct usb_request *req = f->config->cdev->req; |
608 | struct f_dfu *f_dfu = f->config->cdev->req->context; | 608 | struct f_dfu *f_dfu = f->config->cdev->req->context; |
609 | u16 len = le16_to_cpu(ctrl->wLength); | 609 | u16 len = le16_to_cpu(ctrl->wLength); |
610 | u16 w_value = le16_to_cpu(ctrl->wValue); | 610 | u16 w_value = le16_to_cpu(ctrl->wValue); |
611 | int value = 0; | 611 | int value = 0; |
612 | u8 req_type = ctrl->bRequestType & USB_TYPE_MASK; | 612 | u8 req_type = ctrl->bRequestType & USB_TYPE_MASK; |
613 | 613 | ||
614 | debug("w_value: 0x%x len: 0x%x\n", w_value, len); | 614 | debug("w_value: 0x%x len: 0x%x\n", w_value, len); |
615 | debug("req_type: 0x%x ctrl->bRequest: 0x%x f_dfu->dfu_state: 0x%x\n", | 615 | debug("req_type: 0x%x ctrl->bRequest: 0x%x f_dfu->dfu_state: 0x%x\n", |
616 | req_type, ctrl->bRequest, f_dfu->dfu_state); | 616 | req_type, ctrl->bRequest, f_dfu->dfu_state); |
617 | 617 | ||
618 | if (req_type == USB_TYPE_STANDARD) { | 618 | if (req_type == USB_TYPE_STANDARD) { |
619 | if (ctrl->bRequest == USB_REQ_GET_DESCRIPTOR && | 619 | if (ctrl->bRequest == USB_REQ_GET_DESCRIPTOR && |
620 | (w_value >> 8) == DFU_DT_FUNC) { | 620 | (w_value >> 8) == DFU_DT_FUNC) { |
621 | value = min(len, (u16) sizeof(dfu_func)); | 621 | value = min(len, (u16) sizeof(dfu_func)); |
622 | memcpy(req->buf, &dfu_func, value); | 622 | memcpy(req->buf, &dfu_func, value); |
623 | } | 623 | } |
624 | } else /* DFU specific request */ | 624 | } else /* DFU specific request */ |
625 | value = dfu_state[f_dfu->dfu_state] (f_dfu, ctrl, gadget, req); | 625 | value = dfu_state[f_dfu->dfu_state] (f_dfu, ctrl, gadget, req); |
626 | 626 | ||
627 | if (value >= 0) { | 627 | if (value >= 0) { |
628 | req->length = value; | 628 | req->length = value; |
629 | req->zero = value < len; | 629 | req->zero = value < len; |
630 | value = usb_ep_queue(gadget->ep0, req, 0); | 630 | value = usb_ep_queue(gadget->ep0, req, 0); |
631 | if (value < 0) { | 631 | if (value < 0) { |
632 | debug("ep_queue --> %d\n", value); | 632 | debug("ep_queue --> %d\n", value); |
633 | req->status = 0; | 633 | req->status = 0; |
634 | } | 634 | } |
635 | } | 635 | } |
636 | 636 | ||
637 | return value; | 637 | return value; |
638 | } | 638 | } |
639 | 639 | ||
640 | /*-------------------------------------------------------------------------*/ | 640 | /*-------------------------------------------------------------------------*/ |
641 | 641 | ||
642 | static int | 642 | static int |
643 | dfu_prepare_strings(struct f_dfu *f_dfu, int n) | 643 | dfu_prepare_strings(struct f_dfu *f_dfu, int n) |
644 | { | 644 | { |
645 | struct dfu_entity *de = NULL; | 645 | struct dfu_entity *de = NULL; |
646 | int i = 0; | 646 | int i = 0; |
647 | 647 | ||
648 | f_dfu->strings = calloc(sizeof(struct usb_string), n + 1); | 648 | f_dfu->strings = calloc(sizeof(struct usb_string), n + 1); |
649 | if (!f_dfu->strings) | 649 | if (!f_dfu->strings) |
650 | goto enomem; | 650 | goto enomem; |
651 | 651 | ||
652 | for (i = 0; i < n; ++i) { | 652 | for (i = 0; i < n; ++i) { |
653 | de = dfu_get_entity(i); | 653 | de = dfu_get_entity(i); |
654 | f_dfu->strings[i].s = de->name; | 654 | f_dfu->strings[i].s = de->name; |
655 | } | 655 | } |
656 | 656 | ||
657 | f_dfu->strings[i].id = 0; | 657 | f_dfu->strings[i].id = 0; |
658 | f_dfu->strings[i].s = NULL; | 658 | f_dfu->strings[i].s = NULL; |
659 | 659 | ||
660 | return 0; | 660 | return 0; |
661 | 661 | ||
662 | enomem: | 662 | enomem: |
663 | while (i) | 663 | while (i) |
664 | f_dfu->strings[--i].s = NULL; | 664 | f_dfu->strings[--i].s = NULL; |
665 | 665 | ||
666 | free(f_dfu->strings); | 666 | free(f_dfu->strings); |
667 | 667 | ||
668 | return -ENOMEM; | 668 | return -ENOMEM; |
669 | } | 669 | } |
670 | 670 | ||
671 | static int dfu_prepare_function(struct f_dfu *f_dfu, int n) | 671 | static int dfu_prepare_function(struct f_dfu *f_dfu, int n) |
672 | { | 672 | { |
673 | struct usb_interface_descriptor *d; | 673 | struct usb_interface_descriptor *d; |
674 | int i = 0; | 674 | int i = 0; |
675 | 675 | ||
676 | f_dfu->function = calloc(sizeof(struct usb_descriptor_header *), n + 1); | 676 | f_dfu->function = calloc(sizeof(struct usb_descriptor_header *), n + 1); |
677 | if (!f_dfu->function) | 677 | if (!f_dfu->function) |
678 | goto enomem; | 678 | goto enomem; |
679 | 679 | ||
680 | for (i = 0; i < n; ++i) { | 680 | for (i = 0; i < n; ++i) { |
681 | d = calloc(sizeof(*d), 1); | 681 | d = calloc(sizeof(*d), 1); |
682 | if (!d) | 682 | if (!d) |
683 | goto enomem; | 683 | goto enomem; |
684 | 684 | ||
685 | d->bLength = sizeof(*d); | 685 | d->bLength = sizeof(*d); |
686 | d->bDescriptorType = USB_DT_INTERFACE; | 686 | d->bDescriptorType = USB_DT_INTERFACE; |
687 | d->bAlternateSetting = i; | 687 | d->bAlternateSetting = i; |
688 | d->bNumEndpoints = 0; | 688 | d->bNumEndpoints = 0; |
689 | d->bInterfaceClass = USB_CLASS_APP_SPEC; | 689 | d->bInterfaceClass = USB_CLASS_APP_SPEC; |
690 | d->bInterfaceSubClass = 1; | 690 | d->bInterfaceSubClass = 1; |
691 | d->bInterfaceProtocol = 2; | 691 | d->bInterfaceProtocol = 2; |
692 | 692 | ||
693 | f_dfu->function[i] = (struct usb_descriptor_header *)d; | 693 | f_dfu->function[i] = (struct usb_descriptor_header *)d; |
694 | } | 694 | } |
695 | f_dfu->function[i] = NULL; | 695 | f_dfu->function[i] = NULL; |
696 | 696 | ||
697 | return 0; | 697 | return 0; |
698 | 698 | ||
699 | enomem: | 699 | enomem: |
700 | while (i) { | 700 | while (i) { |
701 | free(f_dfu->function[--i]); | 701 | free(f_dfu->function[--i]); |
702 | f_dfu->function[i] = NULL; | 702 | f_dfu->function[i] = NULL; |
703 | } | 703 | } |
704 | free(f_dfu->function); | 704 | free(f_dfu->function); |
705 | 705 | ||
706 | return -ENOMEM; | 706 | return -ENOMEM; |
707 | } | 707 | } |
708 | 708 | ||
709 | static int dfu_bind(struct usb_configuration *c, struct usb_function *f) | 709 | static int dfu_bind(struct usb_configuration *c, struct usb_function *f) |
710 | { | 710 | { |
711 | struct usb_composite_dev *cdev = c->cdev; | 711 | struct usb_composite_dev *cdev = c->cdev; |
712 | struct f_dfu *f_dfu = func_to_dfu(f); | 712 | struct f_dfu *f_dfu = func_to_dfu(f); |
713 | int alt_num = dfu_get_alt_number(); | 713 | int alt_num = dfu_get_alt_number(); |
714 | int rv, id, i; | 714 | int rv, id, i; |
715 | 715 | ||
716 | id = usb_interface_id(c, f); | 716 | id = usb_interface_id(c, f); |
717 | if (id < 0) | 717 | if (id < 0) |
718 | return id; | 718 | return id; |
719 | dfu_intf_runtime.bInterfaceNumber = id; | 719 | dfu_intf_runtime.bInterfaceNumber = id; |
720 | 720 | ||
721 | f_dfu->dfu_state = DFU_STATE_appIDLE; | 721 | f_dfu->dfu_state = DFU_STATE_appIDLE; |
722 | f_dfu->dfu_status = DFU_STATUS_OK; | 722 | f_dfu->dfu_status = DFU_STATUS_OK; |
723 | 723 | ||
724 | rv = dfu_prepare_function(f_dfu, alt_num); | 724 | rv = dfu_prepare_function(f_dfu, alt_num); |
725 | if (rv) | 725 | if (rv) |
726 | goto error; | 726 | goto error; |
727 | 727 | ||
728 | rv = dfu_prepare_strings(f_dfu, alt_num); | 728 | rv = dfu_prepare_strings(f_dfu, alt_num); |
729 | if (rv) | 729 | if (rv) |
730 | goto error; | 730 | goto error; |
731 | for (i = 0; i < alt_num; i++) { | 731 | for (i = 0; i < alt_num; i++) { |
732 | id = usb_string_id(cdev); | 732 | id = usb_string_id(cdev); |
733 | if (id < 0) | 733 | if (id < 0) |
734 | return id; | 734 | return id; |
735 | f_dfu->strings[i].id = id; | 735 | f_dfu->strings[i].id = id; |
736 | ((struct usb_interface_descriptor *)f_dfu->function[i]) | 736 | ((struct usb_interface_descriptor *)f_dfu->function[i]) |
737 | ->iInterface = id; | 737 | ->iInterface = id; |
738 | } | 738 | } |
739 | 739 | ||
740 | to_dfu_mode(f_dfu); | 740 | to_dfu_mode(f_dfu); |
741 | 741 | ||
742 | stringtab_dfu.strings = f_dfu->strings; | 742 | stringtab_dfu.strings = f_dfu->strings; |
743 | 743 | ||
744 | cdev->req->context = f_dfu; | 744 | cdev->req->context = f_dfu; |
745 | 745 | ||
746 | error: | 746 | error: |
747 | return rv; | 747 | return rv; |
748 | } | 748 | } |
749 | 749 | ||
750 | static void dfu_unbind(struct usb_configuration *c, struct usb_function *f) | 750 | static void dfu_unbind(struct usb_configuration *c, struct usb_function *f) |
751 | { | 751 | { |
752 | struct f_dfu *f_dfu = func_to_dfu(f); | 752 | struct f_dfu *f_dfu = func_to_dfu(f); |
753 | int alt_num = dfu_get_alt_number(); | 753 | int alt_num = dfu_get_alt_number(); |
754 | int i; | 754 | int i; |
755 | 755 | ||
756 | if (f_dfu->strings) { | 756 | if (f_dfu->strings) { |
757 | i = alt_num; | 757 | i = alt_num; |
758 | while (i) | 758 | while (i) |
759 | f_dfu->strings[--i].s = NULL; | 759 | f_dfu->strings[--i].s = NULL; |
760 | 760 | ||
761 | free(f_dfu->strings); | 761 | free(f_dfu->strings); |
762 | } | 762 | } |
763 | 763 | ||
764 | if (f_dfu->function) { | 764 | if (f_dfu->function) { |
765 | i = alt_num; | 765 | i = alt_num; |
766 | while (i) { | 766 | while (i) { |
767 | free(f_dfu->function[--i]); | 767 | free(f_dfu->function[--i]); |
768 | f_dfu->function[i] = NULL; | 768 | f_dfu->function[i] = NULL; |
769 | } | 769 | } |
770 | free(f_dfu->function); | 770 | free(f_dfu->function); |
771 | } | 771 | } |
772 | 772 | ||
773 | free(f_dfu); | 773 | free(f_dfu); |
774 | } | 774 | } |
775 | 775 | ||
776 | static int dfu_set_alt(struct usb_function *f, unsigned intf, unsigned alt) | 776 | static int dfu_set_alt(struct usb_function *f, unsigned intf, unsigned alt) |
777 | { | 777 | { |
778 | struct f_dfu *f_dfu = func_to_dfu(f); | 778 | struct f_dfu *f_dfu = func_to_dfu(f); |
779 | 779 | ||
780 | debug("%s: intf:%d alt:%d\n", __func__, intf, alt); | 780 | debug("%s: intf:%d alt:%d\n", __func__, intf, alt); |
781 | 781 | ||
782 | f_dfu->altsetting = alt; | 782 | f_dfu->altsetting = alt; |
783 | f_dfu->dfu_state = DFU_STATE_dfuIDLE; | 783 | f_dfu->dfu_state = DFU_STATE_dfuIDLE; |
784 | f_dfu->dfu_status = DFU_STATUS_OK; | 784 | f_dfu->dfu_status = DFU_STATUS_OK; |
785 | 785 | ||
786 | return 0; | 786 | return 0; |
787 | } | 787 | } |
788 | 788 | ||
789 | /* TODO: is this really what we need here? */ | 789 | /* TODO: is this really what we need here? */ |
790 | static void dfu_disable(struct usb_function *f) | 790 | static void dfu_disable(struct usb_function *f) |
791 | { | 791 | { |
792 | struct f_dfu *f_dfu = func_to_dfu(f); | 792 | struct f_dfu *f_dfu = func_to_dfu(f); |
793 | if (f_dfu->config == 0) | 793 | if (f_dfu->config == 0) |
794 | return; | 794 | return; |
795 | 795 | ||
796 | debug("%s: reset config\n", __func__); | 796 | debug("%s: reset config\n", __func__); |
797 | 797 | ||
798 | f_dfu->config = 0; | 798 | f_dfu->config = 0; |
799 | } | 799 | } |
800 | 800 | ||
801 | static int dfu_bind_config(struct usb_configuration *c) | 801 | static int dfu_bind_config(struct usb_configuration *c) |
802 | { | 802 | { |
803 | struct f_dfu *f_dfu; | 803 | struct f_dfu *f_dfu; |
804 | int status; | 804 | int status; |
805 | 805 | ||
806 | f_dfu = calloc(sizeof(*f_dfu), 1); | 806 | f_dfu = calloc(sizeof(*f_dfu), 1); |
807 | if (!f_dfu) | 807 | if (!f_dfu) |
808 | return -ENOMEM; | 808 | return -ENOMEM; |
809 | f_dfu->usb_function.name = "dfu"; | 809 | f_dfu->usb_function.name = "dfu"; |
810 | f_dfu->usb_function.hs_descriptors = dfu_runtime_descs; | 810 | f_dfu->usb_function.hs_descriptors = dfu_runtime_descs; |
811 | f_dfu->usb_function.bind = dfu_bind; | 811 | f_dfu->usb_function.bind = dfu_bind; |
812 | f_dfu->usb_function.unbind = dfu_unbind; | 812 | f_dfu->usb_function.unbind = dfu_unbind; |
813 | f_dfu->usb_function.set_alt = dfu_set_alt; | 813 | f_dfu->usb_function.set_alt = dfu_set_alt; |
814 | f_dfu->usb_function.disable = dfu_disable; | 814 | f_dfu->usb_function.disable = dfu_disable; |
815 | f_dfu->usb_function.strings = dfu_generic_strings; | 815 | f_dfu->usb_function.strings = dfu_generic_strings; |
816 | f_dfu->usb_function.setup = dfu_handle; | 816 | f_dfu->usb_function.setup = dfu_handle; |
817 | f_dfu->poll_timeout = DFU_DEFAULT_POLL_TIMEOUT; | 817 | f_dfu->poll_timeout = DFU_DEFAULT_POLL_TIMEOUT; |
818 | 818 | ||
819 | status = usb_add_function(c, &f_dfu->usb_function); | 819 | status = usb_add_function(c, &f_dfu->usb_function); |
820 | if (status) | 820 | if (status) |
821 | free(f_dfu); | 821 | free(f_dfu); |
822 | 822 | ||
823 | return status; | 823 | return status; |
824 | } | 824 | } |
825 | 825 | ||
826 | int dfu_add(struct usb_configuration *c) | 826 | int dfu_add(struct usb_configuration *c) |
827 | { | 827 | { |
828 | int id; | 828 | int id; |
829 | 829 | ||
830 | id = usb_string_id(c->cdev); | 830 | id = usb_string_id(c->cdev); |
831 | if (id < 0) | 831 | if (id < 0) |
832 | return id; | 832 | return id; |
833 | strings_dfu_generic[0].id = id; | 833 | strings_dfu_generic[0].id = id; |
834 | dfu_intf_runtime.iInterface = id; | 834 | dfu_intf_runtime.iInterface = id; |
835 | 835 | ||
836 | debug("%s: cdev: 0x%p gadget:0x%p gadget->ep0: 0x%p\n", __func__, | 836 | debug("%s: cdev: 0x%p gadget:0x%p gadget->ep0: 0x%p\n", __func__, |
837 | c->cdev, c->cdev->gadget, c->cdev->gadget->ep0); | 837 | c->cdev, c->cdev->gadget, c->cdev->gadget->ep0); |
838 | 838 | ||
839 | return dfu_bind_config(c); | 839 | return dfu_bind_config(c); |
840 | } | 840 | } |
841 | 841 | ||
842 | DECLARE_GADGET_BIND_CALLBACK(usb_dnl_dfu, dfu_add); | 842 | DECLARE_GADGET_BIND_CALLBACK(usb_dnl_dfu, dfu_add); |
843 | 843 |
drivers/usb/gadget/s3c_udc_otg.c
1 | /* | 1 | /* |
2 | * drivers/usb/gadget/s3c_udc_otg.c | 2 | * drivers/usb/gadget/s3c_udc_otg.c |
3 | * Samsung S3C on-chip full/high speed USB OTG 2.0 device controllers | 3 | * Samsung S3C on-chip full/high speed USB OTG 2.0 device controllers |
4 | * | 4 | * |
5 | * Copyright (C) 2008 for Samsung Electronics | 5 | * Copyright (C) 2008 for Samsung Electronics |
6 | * | 6 | * |
7 | * BSP Support for Samsung's UDC driver | 7 | * BSP Support for Samsung's UDC driver |
8 | * available at: | 8 | * available at: |
9 | * git://git.kernel.org/pub/scm/linux/kernel/git/kki_ap/linux-2.6-samsung.git | 9 | * git://git.kernel.org/pub/scm/linux/kernel/git/kki_ap/linux-2.6-samsung.git |
10 | * | 10 | * |
11 | * State machine bugfixes: | 11 | * State machine bugfixes: |
12 | * Marek Szyprowski <m.szyprowski@samsung.com> | 12 | * Marek Szyprowski <m.szyprowski@samsung.com> |
13 | * | 13 | * |
14 | * Ported to u-boot: | 14 | * Ported to u-boot: |
15 | * Marek Szyprowski <m.szyprowski@samsung.com> | 15 | * Marek Szyprowski <m.szyprowski@samsung.com> |
16 | * Lukasz Majewski <l.majewski@samsumg.com> | 16 | * Lukasz Majewski <l.majewski@samsumg.com> |
17 | * | 17 | * |
18 | * SPDX-License-Identifier: GPL-2.0+ | 18 | * SPDX-License-Identifier: GPL-2.0+ |
19 | */ | 19 | */ |
20 | #undef DEBUG | 20 | #undef DEBUG |
21 | #include <common.h> | 21 | #include <common.h> |
22 | #include <asm/errno.h> | 22 | #include <asm/errno.h> |
23 | #include <linux/list.h> | 23 | #include <linux/list.h> |
24 | #include <malloc.h> | 24 | #include <malloc.h> |
25 | 25 | ||
26 | #include <linux/usb/ch9.h> | 26 | #include <linux/usb/ch9.h> |
27 | #include <linux/usb/gadget.h> | 27 | #include <linux/usb/gadget.h> |
28 | 28 | ||
29 | #include <asm/byteorder.h> | 29 | #include <asm/byteorder.h> |
30 | #include <asm/unaligned.h> | 30 | #include <asm/unaligned.h> |
31 | #include <asm/io.h> | 31 | #include <asm/io.h> |
32 | 32 | ||
33 | #include <asm/mach-types.h> | 33 | #include <asm/mach-types.h> |
34 | #include <asm/arch/gpio.h> | 34 | #include <asm/arch/gpio.h> |
35 | 35 | ||
36 | #include "regs-otg.h" | 36 | #include "regs-otg.h" |
37 | #include <usb/lin_gadget_compat.h> | 37 | #include <usb/lin_gadget_compat.h> |
38 | 38 | ||
39 | /***********************************************************/ | 39 | /***********************************************************/ |
40 | 40 | ||
41 | #define OTG_DMA_MODE 1 | 41 | #define OTG_DMA_MODE 1 |
42 | 42 | ||
43 | #define DEBUG_SETUP 0 | 43 | #define DEBUG_SETUP 0 |
44 | #define DEBUG_EP0 0 | 44 | #define DEBUG_EP0 0 |
45 | #define DEBUG_ISR 0 | 45 | #define DEBUG_ISR 0 |
46 | #define DEBUG_OUT_EP 0 | 46 | #define DEBUG_OUT_EP 0 |
47 | #define DEBUG_IN_EP 0 | 47 | #define DEBUG_IN_EP 0 |
48 | 48 | ||
49 | #include <usb/s3c_udc.h> | 49 | #include <usb/s3c_udc.h> |
50 | 50 | ||
51 | #define EP0_CON 0 | 51 | #define EP0_CON 0 |
52 | #define EP_MASK 0xF | 52 | #define EP_MASK 0xF |
53 | 53 | ||
54 | static char *state_names[] = { | 54 | static char *state_names[] = { |
55 | "WAIT_FOR_SETUP", | 55 | "WAIT_FOR_SETUP", |
56 | "DATA_STATE_XMIT", | 56 | "DATA_STATE_XMIT", |
57 | "DATA_STATE_NEED_ZLP", | 57 | "DATA_STATE_NEED_ZLP", |
58 | "WAIT_FOR_OUT_STATUS", | 58 | "WAIT_FOR_OUT_STATUS", |
59 | "DATA_STATE_RECV", | 59 | "DATA_STATE_RECV", |
60 | "WAIT_FOR_COMPLETE", | 60 | "WAIT_FOR_COMPLETE", |
61 | "WAIT_FOR_OUT_COMPLETE", | 61 | "WAIT_FOR_OUT_COMPLETE", |
62 | "WAIT_FOR_IN_COMPLETE", | 62 | "WAIT_FOR_IN_COMPLETE", |
63 | "WAIT_FOR_NULL_COMPLETE", | 63 | "WAIT_FOR_NULL_COMPLETE", |
64 | }; | 64 | }; |
65 | 65 | ||
66 | #define DRIVER_DESC "S3C HS USB OTG Device Driver, (c) Samsung Electronics" | 66 | #define DRIVER_DESC "S3C HS USB OTG Device Driver, (c) Samsung Electronics" |
67 | #define DRIVER_VERSION "15 March 2009" | 67 | #define DRIVER_VERSION "15 March 2009" |
68 | 68 | ||
69 | struct s3c_udc *the_controller; | 69 | struct s3c_udc *the_controller; |
70 | 70 | ||
71 | static const char driver_name[] = "s3c-udc"; | 71 | static const char driver_name[] = "s3c-udc"; |
72 | static const char driver_desc[] = DRIVER_DESC; | 72 | static const char driver_desc[] = DRIVER_DESC; |
73 | static const char ep0name[] = "ep0-control"; | 73 | static const char ep0name[] = "ep0-control"; |
74 | 74 | ||
75 | /* Max packet size*/ | 75 | /* Max packet size*/ |
76 | static unsigned int ep0_fifo_size = 64; | 76 | static unsigned int ep0_fifo_size = 64; |
77 | static unsigned int ep_fifo_size = 512; | 77 | static unsigned int ep_fifo_size = 512; |
78 | static unsigned int ep_fifo_size2 = 1024; | 78 | static unsigned int ep_fifo_size2 = 1024; |
79 | static int reset_available = 1; | 79 | static int reset_available = 1; |
80 | 80 | ||
81 | static struct usb_ctrlrequest *usb_ctrl; | 81 | static struct usb_ctrlrequest *usb_ctrl; |
82 | static dma_addr_t usb_ctrl_dma_addr; | 82 | static dma_addr_t usb_ctrl_dma_addr; |
83 | 83 | ||
84 | /* | 84 | /* |
85 | Local declarations. | 85 | Local declarations. |
86 | */ | 86 | */ |
87 | static int s3c_ep_enable(struct usb_ep *ep, | 87 | static int s3c_ep_enable(struct usb_ep *ep, |
88 | const struct usb_endpoint_descriptor *); | 88 | const struct usb_endpoint_descriptor *); |
89 | static int s3c_ep_disable(struct usb_ep *ep); | 89 | static int s3c_ep_disable(struct usb_ep *ep); |
90 | static struct usb_request *s3c_alloc_request(struct usb_ep *ep, | 90 | static struct usb_request *s3c_alloc_request(struct usb_ep *ep, |
91 | gfp_t gfp_flags); | 91 | gfp_t gfp_flags); |
92 | static void s3c_free_request(struct usb_ep *ep, struct usb_request *); | 92 | static void s3c_free_request(struct usb_ep *ep, struct usb_request *); |
93 | 93 | ||
94 | static int s3c_queue(struct usb_ep *ep, struct usb_request *, gfp_t gfp_flags); | 94 | static int s3c_queue(struct usb_ep *ep, struct usb_request *, gfp_t gfp_flags); |
95 | static int s3c_dequeue(struct usb_ep *ep, struct usb_request *); | 95 | static int s3c_dequeue(struct usb_ep *ep, struct usb_request *); |
96 | static int s3c_fifo_status(struct usb_ep *ep); | 96 | static int s3c_fifo_status(struct usb_ep *ep); |
97 | static void s3c_fifo_flush(struct usb_ep *ep); | 97 | static void s3c_fifo_flush(struct usb_ep *ep); |
98 | static void s3c_ep0_read(struct s3c_udc *dev); | 98 | static void s3c_ep0_read(struct s3c_udc *dev); |
99 | static void s3c_ep0_kick(struct s3c_udc *dev, struct s3c_ep *ep); | 99 | static void s3c_ep0_kick(struct s3c_udc *dev, struct s3c_ep *ep); |
100 | static void s3c_handle_ep0(struct s3c_udc *dev); | 100 | static void s3c_handle_ep0(struct s3c_udc *dev); |
101 | static int s3c_ep0_write(struct s3c_udc *dev); | 101 | static int s3c_ep0_write(struct s3c_udc *dev); |
102 | static int write_fifo_ep0(struct s3c_ep *ep, struct s3c_request *req); | 102 | static int write_fifo_ep0(struct s3c_ep *ep, struct s3c_request *req); |
103 | static void done(struct s3c_ep *ep, struct s3c_request *req, int status); | 103 | static void done(struct s3c_ep *ep, struct s3c_request *req, int status); |
104 | static void stop_activity(struct s3c_udc *dev, | 104 | static void stop_activity(struct s3c_udc *dev, |
105 | struct usb_gadget_driver *driver); | 105 | struct usb_gadget_driver *driver); |
106 | static int udc_enable(struct s3c_udc *dev); | 106 | static int udc_enable(struct s3c_udc *dev); |
107 | static void udc_set_address(struct s3c_udc *dev, unsigned char address); | 107 | static void udc_set_address(struct s3c_udc *dev, unsigned char address); |
108 | static void reconfig_usbd(void); | 108 | static void reconfig_usbd(void); |
109 | static void set_max_pktsize(struct s3c_udc *dev, enum usb_device_speed speed); | 109 | static void set_max_pktsize(struct s3c_udc *dev, enum usb_device_speed speed); |
110 | static void nuke(struct s3c_ep *ep, int status); | 110 | static void nuke(struct s3c_ep *ep, int status); |
111 | static int s3c_udc_set_halt(struct usb_ep *_ep, int value); | 111 | static int s3c_udc_set_halt(struct usb_ep *_ep, int value); |
112 | static void s3c_udc_set_nak(struct s3c_ep *ep); | 112 | static void s3c_udc_set_nak(struct s3c_ep *ep); |
113 | 113 | ||
114 | void set_udc_gadget_private_data(void *p) | 114 | void set_udc_gadget_private_data(void *p) |
115 | { | 115 | { |
116 | debug_cond(DEBUG_SETUP != 0, | 116 | debug_cond(DEBUG_SETUP != 0, |
117 | "%s: the_controller: 0x%p, p: 0x%p\n", __func__, | 117 | "%s: the_controller: 0x%p, p: 0x%p\n", __func__, |
118 | the_controller, p); | 118 | the_controller, p); |
119 | the_controller->gadget.dev.device_data = p; | 119 | the_controller->gadget.dev.device_data = p; |
120 | } | 120 | } |
121 | 121 | ||
122 | void *get_udc_gadget_private_data(struct usb_gadget *gadget) | 122 | void *get_udc_gadget_private_data(struct usb_gadget *gadget) |
123 | { | 123 | { |
124 | return gadget->dev.device_data; | 124 | return gadget->dev.device_data; |
125 | } | 125 | } |
126 | 126 | ||
127 | static struct usb_ep_ops s3c_ep_ops = { | 127 | static struct usb_ep_ops s3c_ep_ops = { |
128 | .enable = s3c_ep_enable, | 128 | .enable = s3c_ep_enable, |
129 | .disable = s3c_ep_disable, | 129 | .disable = s3c_ep_disable, |
130 | 130 | ||
131 | .alloc_request = s3c_alloc_request, | 131 | .alloc_request = s3c_alloc_request, |
132 | .free_request = s3c_free_request, | 132 | .free_request = s3c_free_request, |
133 | 133 | ||
134 | .queue = s3c_queue, | 134 | .queue = s3c_queue, |
135 | .dequeue = s3c_dequeue, | 135 | .dequeue = s3c_dequeue, |
136 | 136 | ||
137 | .set_halt = s3c_udc_set_halt, | 137 | .set_halt = s3c_udc_set_halt, |
138 | .fifo_status = s3c_fifo_status, | 138 | .fifo_status = s3c_fifo_status, |
139 | .fifo_flush = s3c_fifo_flush, | 139 | .fifo_flush = s3c_fifo_flush, |
140 | }; | 140 | }; |
141 | 141 | ||
142 | #define create_proc_files() do {} while (0) | 142 | #define create_proc_files() do {} while (0) |
143 | #define remove_proc_files() do {} while (0) | 143 | #define remove_proc_files() do {} while (0) |
144 | 144 | ||
145 | /***********************************************************/ | 145 | /***********************************************************/ |
146 | 146 | ||
147 | void __iomem *regs_otg; | 147 | void __iomem *regs_otg; |
148 | struct s3c_usbotg_reg *reg; | 148 | struct s3c_usbotg_reg *reg; |
149 | struct s3c_usbotg_phy *phy; | 149 | struct s3c_usbotg_phy *phy; |
150 | static unsigned int usb_phy_ctrl; | 150 | static unsigned int usb_phy_ctrl; |
151 | 151 | ||
152 | bool dfu_usb_get_reset(void) | ||
153 | { | ||
154 | return !!(readl(®->gintsts) & INT_RESET); | ||
155 | } | ||
156 | |||
152 | void otg_phy_init(struct s3c_udc *dev) | 157 | void otg_phy_init(struct s3c_udc *dev) |
153 | { | 158 | { |
154 | dev->pdata->phy_control(1); | 159 | dev->pdata->phy_control(1); |
155 | 160 | ||
156 | /*USB PHY0 Enable */ | 161 | /*USB PHY0 Enable */ |
157 | printf("USB PHY0 Enable\n"); | 162 | printf("USB PHY0 Enable\n"); |
158 | 163 | ||
159 | /* Enable PHY */ | 164 | /* Enable PHY */ |
160 | writel(readl(usb_phy_ctrl) | USB_PHY_CTRL_EN0, usb_phy_ctrl); | 165 | writel(readl(usb_phy_ctrl) | USB_PHY_CTRL_EN0, usb_phy_ctrl); |
161 | 166 | ||
162 | if (dev->pdata->usb_flags == PHY0_SLEEP) /* C210 Universal */ | 167 | if (dev->pdata->usb_flags == PHY0_SLEEP) /* C210 Universal */ |
163 | writel((readl(&phy->phypwr) | 168 | writel((readl(&phy->phypwr) |
164 | &~(PHY_0_SLEEP | OTG_DISABLE_0 | ANALOG_PWRDOWN) | 169 | &~(PHY_0_SLEEP | OTG_DISABLE_0 | ANALOG_PWRDOWN) |
165 | &~FORCE_SUSPEND_0), &phy->phypwr); | 170 | &~FORCE_SUSPEND_0), &phy->phypwr); |
166 | else /* C110 GONI */ | 171 | else /* C110 GONI */ |
167 | writel((readl(&phy->phypwr) &~(OTG_DISABLE_0 | ANALOG_PWRDOWN) | 172 | writel((readl(&phy->phypwr) &~(OTG_DISABLE_0 | ANALOG_PWRDOWN) |
168 | &~FORCE_SUSPEND_0), &phy->phypwr); | 173 | &~FORCE_SUSPEND_0), &phy->phypwr); |
169 | 174 | ||
170 | if (s5p_cpu_id == 0x4412) | 175 | if (s5p_cpu_id == 0x4412) |
171 | writel((readl(&phy->phyclk) & ~(EXYNOS4X12_ID_PULLUP0 | | 176 | writel((readl(&phy->phyclk) & ~(EXYNOS4X12_ID_PULLUP0 | |
172 | EXYNOS4X12_COMMON_ON_N0)) | EXYNOS4X12_CLK_SEL_24MHZ, | 177 | EXYNOS4X12_COMMON_ON_N0)) | EXYNOS4X12_CLK_SEL_24MHZ, |
173 | &phy->phyclk); /* PLL 24Mhz */ | 178 | &phy->phyclk); /* PLL 24Mhz */ |
174 | else | 179 | else |
175 | writel((readl(&phy->phyclk) & ~(ID_PULLUP0 | COMMON_ON_N0)) | | 180 | writel((readl(&phy->phyclk) & ~(ID_PULLUP0 | COMMON_ON_N0)) | |
176 | CLK_SEL_24MHZ, &phy->phyclk); /* PLL 24Mhz */ | 181 | CLK_SEL_24MHZ, &phy->phyclk); /* PLL 24Mhz */ |
177 | 182 | ||
178 | writel((readl(&phy->rstcon) &~(LINK_SW_RST | PHYLNK_SW_RST)) | 183 | writel((readl(&phy->rstcon) &~(LINK_SW_RST | PHYLNK_SW_RST)) |
179 | | PHY_SW_RST0, &phy->rstcon); | 184 | | PHY_SW_RST0, &phy->rstcon); |
180 | udelay(10); | 185 | udelay(10); |
181 | writel(readl(&phy->rstcon) | 186 | writel(readl(&phy->rstcon) |
182 | &~(PHY_SW_RST0 | LINK_SW_RST | PHYLNK_SW_RST), &phy->rstcon); | 187 | &~(PHY_SW_RST0 | LINK_SW_RST | PHYLNK_SW_RST), &phy->rstcon); |
183 | udelay(10); | 188 | udelay(10); |
184 | } | 189 | } |
185 | 190 | ||
186 | void otg_phy_off(struct s3c_udc *dev) | 191 | void otg_phy_off(struct s3c_udc *dev) |
187 | { | 192 | { |
188 | /* reset controller just in case */ | 193 | /* reset controller just in case */ |
189 | writel(PHY_SW_RST0, &phy->rstcon); | 194 | writel(PHY_SW_RST0, &phy->rstcon); |
190 | udelay(20); | 195 | udelay(20); |
191 | writel(readl(&phy->phypwr) &~PHY_SW_RST0, &phy->rstcon); | 196 | writel(readl(&phy->phypwr) &~PHY_SW_RST0, &phy->rstcon); |
192 | udelay(20); | 197 | udelay(20); |
193 | 198 | ||
194 | writel(readl(&phy->phypwr) | OTG_DISABLE_0 | ANALOG_PWRDOWN | 199 | writel(readl(&phy->phypwr) | OTG_DISABLE_0 | ANALOG_PWRDOWN |
195 | | FORCE_SUSPEND_0, &phy->phypwr); | 200 | | FORCE_SUSPEND_0, &phy->phypwr); |
196 | 201 | ||
197 | writel(readl(usb_phy_ctrl) &~USB_PHY_CTRL_EN0, usb_phy_ctrl); | 202 | writel(readl(usb_phy_ctrl) &~USB_PHY_CTRL_EN0, usb_phy_ctrl); |
198 | 203 | ||
199 | writel((readl(&phy->phyclk) & ~(ID_PULLUP0 | COMMON_ON_N0)), | 204 | writel((readl(&phy->phyclk) & ~(ID_PULLUP0 | COMMON_ON_N0)), |
200 | &phy->phyclk); | 205 | &phy->phyclk); |
201 | 206 | ||
202 | udelay(10000); | 207 | udelay(10000); |
203 | 208 | ||
204 | dev->pdata->phy_control(0); | 209 | dev->pdata->phy_control(0); |
205 | } | 210 | } |
206 | 211 | ||
207 | /***********************************************************/ | 212 | /***********************************************************/ |
208 | 213 | ||
209 | #include "s3c_udc_otg_xfer_dma.c" | 214 | #include "s3c_udc_otg_xfer_dma.c" |
210 | 215 | ||
211 | /* | 216 | /* |
212 | * udc_disable - disable USB device controller | 217 | * udc_disable - disable USB device controller |
213 | */ | 218 | */ |
214 | static void udc_disable(struct s3c_udc *dev) | 219 | static void udc_disable(struct s3c_udc *dev) |
215 | { | 220 | { |
216 | debug_cond(DEBUG_SETUP != 0, "%s: %p\n", __func__, dev); | 221 | debug_cond(DEBUG_SETUP != 0, "%s: %p\n", __func__, dev); |
217 | 222 | ||
218 | udc_set_address(dev, 0); | 223 | udc_set_address(dev, 0); |
219 | 224 | ||
220 | dev->ep0state = WAIT_FOR_SETUP; | 225 | dev->ep0state = WAIT_FOR_SETUP; |
221 | dev->gadget.speed = USB_SPEED_UNKNOWN; | 226 | dev->gadget.speed = USB_SPEED_UNKNOWN; |
222 | dev->usb_address = 0; | 227 | dev->usb_address = 0; |
223 | 228 | ||
224 | otg_phy_off(dev); | 229 | otg_phy_off(dev); |
225 | } | 230 | } |
226 | 231 | ||
227 | /* | 232 | /* |
228 | * udc_reinit - initialize software state | 233 | * udc_reinit - initialize software state |
229 | */ | 234 | */ |
230 | static void udc_reinit(struct s3c_udc *dev) | 235 | static void udc_reinit(struct s3c_udc *dev) |
231 | { | 236 | { |
232 | unsigned int i; | 237 | unsigned int i; |
233 | 238 | ||
234 | debug_cond(DEBUG_SETUP != 0, "%s: %p\n", __func__, dev); | 239 | debug_cond(DEBUG_SETUP != 0, "%s: %p\n", __func__, dev); |
235 | 240 | ||
236 | /* device/ep0 records init */ | 241 | /* device/ep0 records init */ |
237 | INIT_LIST_HEAD(&dev->gadget.ep_list); | 242 | INIT_LIST_HEAD(&dev->gadget.ep_list); |
238 | INIT_LIST_HEAD(&dev->gadget.ep0->ep_list); | 243 | INIT_LIST_HEAD(&dev->gadget.ep0->ep_list); |
239 | dev->ep0state = WAIT_FOR_SETUP; | 244 | dev->ep0state = WAIT_FOR_SETUP; |
240 | 245 | ||
241 | /* basic endpoint records init */ | 246 | /* basic endpoint records init */ |
242 | for (i = 0; i < S3C_MAX_ENDPOINTS; i++) { | 247 | for (i = 0; i < S3C_MAX_ENDPOINTS; i++) { |
243 | struct s3c_ep *ep = &dev->ep[i]; | 248 | struct s3c_ep *ep = &dev->ep[i]; |
244 | 249 | ||
245 | if (i != 0) | 250 | if (i != 0) |
246 | list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list); | 251 | list_add_tail(&ep->ep.ep_list, &dev->gadget.ep_list); |
247 | 252 | ||
248 | ep->desc = 0; | 253 | ep->desc = 0; |
249 | ep->stopped = 0; | 254 | ep->stopped = 0; |
250 | INIT_LIST_HEAD(&ep->queue); | 255 | INIT_LIST_HEAD(&ep->queue); |
251 | ep->pio_irqs = 0; | 256 | ep->pio_irqs = 0; |
252 | } | 257 | } |
253 | 258 | ||
254 | /* the rest was statically initialized, and is read-only */ | 259 | /* the rest was statically initialized, and is read-only */ |
255 | } | 260 | } |
256 | 261 | ||
257 | #define BYTES2MAXP(x) (x / 8) | 262 | #define BYTES2MAXP(x) (x / 8) |
258 | #define MAXP2BYTES(x) (x * 8) | 263 | #define MAXP2BYTES(x) (x * 8) |
259 | 264 | ||
260 | /* until it's enabled, this UDC should be completely invisible | 265 | /* until it's enabled, this UDC should be completely invisible |
261 | * to any USB host. | 266 | * to any USB host. |
262 | */ | 267 | */ |
263 | static int udc_enable(struct s3c_udc *dev) | 268 | static int udc_enable(struct s3c_udc *dev) |
264 | { | 269 | { |
265 | debug_cond(DEBUG_SETUP != 0, "%s: %p\n", __func__, dev); | 270 | debug_cond(DEBUG_SETUP != 0, "%s: %p\n", __func__, dev); |
266 | 271 | ||
267 | otg_phy_init(dev); | 272 | otg_phy_init(dev); |
268 | reconfig_usbd(); | 273 | reconfig_usbd(); |
269 | 274 | ||
270 | debug_cond(DEBUG_SETUP != 0, | 275 | debug_cond(DEBUG_SETUP != 0, |
271 | "S3C USB 2.0 OTG Controller Core Initialized : 0x%x\n", | 276 | "S3C USB 2.0 OTG Controller Core Initialized : 0x%x\n", |
272 | readl(®->gintmsk)); | 277 | readl(®->gintmsk)); |
273 | 278 | ||
274 | dev->gadget.speed = USB_SPEED_UNKNOWN; | 279 | dev->gadget.speed = USB_SPEED_UNKNOWN; |
275 | 280 | ||
276 | return 0; | 281 | return 0; |
277 | } | 282 | } |
278 | 283 | ||
279 | /* | 284 | /* |
280 | Register entry point for the peripheral controller driver. | 285 | Register entry point for the peripheral controller driver. |
281 | */ | 286 | */ |
282 | int usb_gadget_register_driver(struct usb_gadget_driver *driver) | 287 | int usb_gadget_register_driver(struct usb_gadget_driver *driver) |
283 | { | 288 | { |
284 | struct s3c_udc *dev = the_controller; | 289 | struct s3c_udc *dev = the_controller; |
285 | int retval = 0; | 290 | int retval = 0; |
286 | unsigned long flags; | 291 | unsigned long flags = 0; |
287 | 292 | ||
288 | debug_cond(DEBUG_SETUP != 0, "%s: %s\n", __func__, "no name"); | 293 | debug_cond(DEBUG_SETUP != 0, "%s: %s\n", __func__, "no name"); |
289 | 294 | ||
290 | if (!driver | 295 | if (!driver |
291 | || (driver->speed != USB_SPEED_FULL | 296 | || (driver->speed != USB_SPEED_FULL |
292 | && driver->speed != USB_SPEED_HIGH) | 297 | && driver->speed != USB_SPEED_HIGH) |
293 | || !driver->bind || !driver->disconnect || !driver->setup) | 298 | || !driver->bind || !driver->disconnect || !driver->setup) |
294 | return -EINVAL; | 299 | return -EINVAL; |
295 | if (!dev) | 300 | if (!dev) |
296 | return -ENODEV; | 301 | return -ENODEV; |
297 | if (dev->driver) | 302 | if (dev->driver) |
298 | return -EBUSY; | 303 | return -EBUSY; |
299 | 304 | ||
300 | spin_lock_irqsave(&dev->lock, flags); | 305 | spin_lock_irqsave(&dev->lock, flags); |
301 | /* first hook up the driver ... */ | 306 | /* first hook up the driver ... */ |
302 | dev->driver = driver; | 307 | dev->driver = driver; |
303 | spin_unlock_irqrestore(&dev->lock, flags); | 308 | spin_unlock_irqrestore(&dev->lock, flags); |
304 | 309 | ||
305 | if (retval) { /* TODO */ | 310 | if (retval) { /* TODO */ |
306 | printf("target device_add failed, error %d\n", retval); | 311 | printf("target device_add failed, error %d\n", retval); |
307 | return retval; | 312 | return retval; |
308 | } | 313 | } |
309 | 314 | ||
310 | retval = driver->bind(&dev->gadget); | 315 | retval = driver->bind(&dev->gadget); |
311 | if (retval) { | 316 | if (retval) { |
312 | debug_cond(DEBUG_SETUP != 0, | 317 | debug_cond(DEBUG_SETUP != 0, |
313 | "%s: bind to driver --> error %d\n", | 318 | "%s: bind to driver --> error %d\n", |
314 | dev->gadget.name, retval); | 319 | dev->gadget.name, retval); |
315 | dev->driver = 0; | 320 | dev->driver = 0; |
316 | return retval; | 321 | return retval; |
317 | } | 322 | } |
318 | 323 | ||
319 | enable_irq(IRQ_OTG); | 324 | enable_irq(IRQ_OTG); |
320 | 325 | ||
321 | debug_cond(DEBUG_SETUP != 0, | 326 | debug_cond(DEBUG_SETUP != 0, |
322 | "Registered gadget driver %s\n", dev->gadget.name); | 327 | "Registered gadget driver %s\n", dev->gadget.name); |
323 | udc_enable(dev); | 328 | udc_enable(dev); |
324 | 329 | ||
325 | return 0; | 330 | return 0; |
326 | } | 331 | } |
327 | 332 | ||
328 | /* | 333 | /* |
329 | * Unregister entry point for the peripheral controller driver. | 334 | * Unregister entry point for the peripheral controller driver. |
330 | */ | 335 | */ |
331 | int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) | 336 | int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) |
332 | { | 337 | { |
333 | struct s3c_udc *dev = the_controller; | 338 | struct s3c_udc *dev = the_controller; |
334 | unsigned long flags; | 339 | unsigned long flags = 0; |
335 | 340 | ||
336 | if (!dev) | 341 | if (!dev) |
337 | return -ENODEV; | 342 | return -ENODEV; |
338 | if (!driver || driver != dev->driver) | 343 | if (!driver || driver != dev->driver) |
339 | return -EINVAL; | 344 | return -EINVAL; |
340 | 345 | ||
341 | spin_lock_irqsave(&dev->lock, flags); | 346 | spin_lock_irqsave(&dev->lock, flags); |
342 | dev->driver = 0; | 347 | dev->driver = 0; |
343 | stop_activity(dev, driver); | 348 | stop_activity(dev, driver); |
344 | spin_unlock_irqrestore(&dev->lock, flags); | 349 | spin_unlock_irqrestore(&dev->lock, flags); |
345 | 350 | ||
346 | driver->unbind(&dev->gadget); | 351 | driver->unbind(&dev->gadget); |
347 | 352 | ||
348 | disable_irq(IRQ_OTG); | 353 | disable_irq(IRQ_OTG); |
349 | 354 | ||
350 | udc_disable(dev); | 355 | udc_disable(dev); |
351 | return 0; | 356 | return 0; |
352 | } | 357 | } |
353 | 358 | ||
354 | /* | 359 | /* |
355 | * done - retire a request; caller blocked irqs | 360 | * done - retire a request; caller blocked irqs |
356 | */ | 361 | */ |
357 | static void done(struct s3c_ep *ep, struct s3c_request *req, int status) | 362 | static void done(struct s3c_ep *ep, struct s3c_request *req, int status) |
358 | { | 363 | { |
359 | unsigned int stopped = ep->stopped; | 364 | unsigned int stopped = ep->stopped; |
360 | 365 | ||
361 | debug("%s: %s %p, req = %p, stopped = %d\n", | 366 | debug("%s: %s %p, req = %p, stopped = %d\n", |
362 | __func__, ep->ep.name, ep, &req->req, stopped); | 367 | __func__, ep->ep.name, ep, &req->req, stopped); |
363 | 368 | ||
364 | list_del_init(&req->queue); | 369 | list_del_init(&req->queue); |
365 | 370 | ||
366 | if (likely(req->req.status == -EINPROGRESS)) | 371 | if (likely(req->req.status == -EINPROGRESS)) |
367 | req->req.status = status; | 372 | req->req.status = status; |
368 | else | 373 | else |
369 | status = req->req.status; | 374 | status = req->req.status; |
370 | 375 | ||
371 | if (status && status != -ESHUTDOWN) { | 376 | if (status && status != -ESHUTDOWN) { |
372 | debug("complete %s req %p stat %d len %u/%u\n", | 377 | debug("complete %s req %p stat %d len %u/%u\n", |
373 | ep->ep.name, &req->req, status, | 378 | ep->ep.name, &req->req, status, |
374 | req->req.actual, req->req.length); | 379 | req->req.actual, req->req.length); |
375 | } | 380 | } |
376 | 381 | ||
377 | /* don't modify queue heads during completion callback */ | 382 | /* don't modify queue heads during completion callback */ |
378 | ep->stopped = 1; | 383 | ep->stopped = 1; |
379 | 384 | ||
380 | #ifdef DEBUG | 385 | #ifdef DEBUG |
381 | printf("calling complete callback\n"); | 386 | printf("calling complete callback\n"); |
382 | { | 387 | { |
383 | int i, len = req->req.length; | 388 | int i, len = req->req.length; |
384 | 389 | ||
385 | printf("pkt[%d] = ", req->req.length); | 390 | printf("pkt[%d] = ", req->req.length); |
386 | if (len > 64) | 391 | if (len > 64) |
387 | len = 64; | 392 | len = 64; |
388 | for (i = 0; i < len; i++) { | 393 | for (i = 0; i < len; i++) { |
389 | printf("%02x", ((u8 *)req->req.buf)[i]); | 394 | printf("%02x", ((u8 *)req->req.buf)[i]); |
390 | if ((i & 7) == 7) | 395 | if ((i & 7) == 7) |
391 | printf(" "); | 396 | printf(" "); |
392 | } | 397 | } |
393 | printf("\n"); | 398 | printf("\n"); |
394 | } | 399 | } |
395 | #endif | 400 | #endif |
396 | spin_unlock(&ep->dev->lock); | 401 | spin_unlock(&ep->dev->lock); |
397 | req->req.complete(&ep->ep, &req->req); | 402 | req->req.complete(&ep->ep, &req->req); |
398 | spin_lock(&ep->dev->lock); | 403 | spin_lock(&ep->dev->lock); |
399 | 404 | ||
400 | debug("callback completed\n"); | 405 | debug("callback completed\n"); |
401 | 406 | ||
402 | ep->stopped = stopped; | 407 | ep->stopped = stopped; |
403 | } | 408 | } |
404 | 409 | ||
405 | /* | 410 | /* |
406 | * nuke - dequeue ALL requests | 411 | * nuke - dequeue ALL requests |
407 | */ | 412 | */ |
408 | static void nuke(struct s3c_ep *ep, int status) | 413 | static void nuke(struct s3c_ep *ep, int status) |
409 | { | 414 | { |
410 | struct s3c_request *req; | 415 | struct s3c_request *req; |
411 | 416 | ||
412 | debug("%s: %s %p\n", __func__, ep->ep.name, ep); | 417 | debug("%s: %s %p\n", __func__, ep->ep.name, ep); |
413 | 418 | ||
414 | /* called with irqs blocked */ | 419 | /* called with irqs blocked */ |
415 | while (!list_empty(&ep->queue)) { | 420 | while (!list_empty(&ep->queue)) { |
416 | req = list_entry(ep->queue.next, struct s3c_request, queue); | 421 | req = list_entry(ep->queue.next, struct s3c_request, queue); |
417 | done(ep, req, status); | 422 | done(ep, req, status); |
418 | } | 423 | } |
419 | } | 424 | } |
420 | 425 | ||
421 | static void stop_activity(struct s3c_udc *dev, | 426 | static void stop_activity(struct s3c_udc *dev, |
422 | struct usb_gadget_driver *driver) | 427 | struct usb_gadget_driver *driver) |
423 | { | 428 | { |
424 | int i; | 429 | int i; |
425 | 430 | ||
426 | /* don't disconnect drivers more than once */ | 431 | /* don't disconnect drivers more than once */ |
427 | if (dev->gadget.speed == USB_SPEED_UNKNOWN) | 432 | if (dev->gadget.speed == USB_SPEED_UNKNOWN) |
428 | driver = 0; | 433 | driver = 0; |
429 | dev->gadget.speed = USB_SPEED_UNKNOWN; | 434 | dev->gadget.speed = USB_SPEED_UNKNOWN; |
430 | 435 | ||
431 | /* prevent new request submissions, kill any outstanding requests */ | 436 | /* prevent new request submissions, kill any outstanding requests */ |
432 | for (i = 0; i < S3C_MAX_ENDPOINTS; i++) { | 437 | for (i = 0; i < S3C_MAX_ENDPOINTS; i++) { |
433 | struct s3c_ep *ep = &dev->ep[i]; | 438 | struct s3c_ep *ep = &dev->ep[i]; |
434 | ep->stopped = 1; | 439 | ep->stopped = 1; |
435 | nuke(ep, -ESHUTDOWN); | 440 | nuke(ep, -ESHUTDOWN); |
436 | } | 441 | } |
437 | 442 | ||
438 | /* report disconnect; the driver is already quiesced */ | 443 | /* report disconnect; the driver is already quiesced */ |
439 | if (driver) { | 444 | if (driver) { |
440 | spin_unlock(&dev->lock); | 445 | spin_unlock(&dev->lock); |
441 | driver->disconnect(&dev->gadget); | 446 | driver->disconnect(&dev->gadget); |
442 | spin_lock(&dev->lock); | 447 | spin_lock(&dev->lock); |
443 | } | 448 | } |
444 | 449 | ||
445 | /* re-init driver-visible data structures */ | 450 | /* re-init driver-visible data structures */ |
446 | udc_reinit(dev); | 451 | udc_reinit(dev); |
447 | } | 452 | } |
448 | 453 | ||
449 | static void reconfig_usbd(void) | 454 | static void reconfig_usbd(void) |
450 | { | 455 | { |
451 | /* 2. Soft-reset OTG Core and then unreset again. */ | 456 | /* 2. Soft-reset OTG Core and then unreset again. */ |
452 | int i; | 457 | int i; |
453 | unsigned int uTemp = writel(CORE_SOFT_RESET, ®->grstctl); | 458 | unsigned int uTemp = writel(CORE_SOFT_RESET, ®->grstctl); |
454 | 459 | ||
455 | debug("Reseting OTG controller\n"); | 460 | debug("Reseting OTG controller\n"); |
456 | 461 | ||
457 | writel(0<<15 /* PHY Low Power Clock sel*/ | 462 | writel(0<<15 /* PHY Low Power Clock sel*/ |
458 | |1<<14 /* Non-Periodic TxFIFO Rewind Enable*/ | 463 | |1<<14 /* Non-Periodic TxFIFO Rewind Enable*/ |
459 | |0x5<<10 /* Turnaround time*/ | 464 | |0x5<<10 /* Turnaround time*/ |
460 | |0<<9 | 0<<8 /* [0:HNP disable,1:HNP enable][ 0:SRP disable*/ | 465 | |0<<9 | 0<<8 /* [0:HNP disable,1:HNP enable][ 0:SRP disable*/ |
461 | /* 1:SRP enable] H1= 1,1*/ | 466 | /* 1:SRP enable] H1= 1,1*/ |
462 | |0<<7 /* Ulpi DDR sel*/ | 467 | |0<<7 /* Ulpi DDR sel*/ |
463 | |0<<6 /* 0: high speed utmi+, 1: full speed serial*/ | 468 | |0<<6 /* 0: high speed utmi+, 1: full speed serial*/ |
464 | |0<<4 /* 0: utmi+, 1:ulpi*/ | 469 | |0<<4 /* 0: utmi+, 1:ulpi*/ |
465 | |1<<3 /* phy i/f 0:8bit, 1:16bit*/ | 470 | |1<<3 /* phy i/f 0:8bit, 1:16bit*/ |
466 | |0x7<<0, /* HS/FS Timeout**/ | 471 | |0x7<<0, /* HS/FS Timeout**/ |
467 | ®->gusbcfg); | 472 | ®->gusbcfg); |
468 | 473 | ||
469 | /* 3. Put the OTG device core in the disconnected state.*/ | 474 | /* 3. Put the OTG device core in the disconnected state.*/ |
470 | uTemp = readl(®->dctl); | 475 | uTemp = readl(®->dctl); |
471 | uTemp |= SOFT_DISCONNECT; | 476 | uTemp |= SOFT_DISCONNECT; |
472 | writel(uTemp, ®->dctl); | 477 | writel(uTemp, ®->dctl); |
473 | 478 | ||
474 | udelay(20); | 479 | udelay(20); |
475 | 480 | ||
476 | /* 4. Make the OTG device core exit from the disconnected state.*/ | 481 | /* 4. Make the OTG device core exit from the disconnected state.*/ |
477 | uTemp = readl(®->dctl); | 482 | uTemp = readl(®->dctl); |
478 | uTemp = uTemp & ~SOFT_DISCONNECT; | 483 | uTemp = uTemp & ~SOFT_DISCONNECT; |
479 | writel(uTemp, ®->dctl); | 484 | writel(uTemp, ®->dctl); |
480 | 485 | ||
481 | /* 5. Configure OTG Core to initial settings of device mode.*/ | 486 | /* 5. Configure OTG Core to initial settings of device mode.*/ |
482 | /* [][1: full speed(30Mhz) 0:high speed]*/ | 487 | /* [][1: full speed(30Mhz) 0:high speed]*/ |
483 | writel(EP_MISS_CNT(1) | DEV_SPEED_HIGH_SPEED_20, ®->dcfg); | 488 | writel(EP_MISS_CNT(1) | DEV_SPEED_HIGH_SPEED_20, ®->dcfg); |
484 | 489 | ||
485 | mdelay(1); | 490 | mdelay(1); |
486 | 491 | ||
487 | /* 6. Unmask the core interrupts*/ | 492 | /* 6. Unmask the core interrupts*/ |
488 | writel(GINTMSK_INIT, ®->gintmsk); | 493 | writel(GINTMSK_INIT, ®->gintmsk); |
489 | 494 | ||
490 | /* 7. Set NAK bit of EP0, EP1, EP2*/ | 495 | /* 7. Set NAK bit of EP0, EP1, EP2*/ |
491 | writel(DEPCTL_EPDIS|DEPCTL_SNAK, ®->out_endp[EP0_CON].doepctl); | 496 | writel(DEPCTL_EPDIS|DEPCTL_SNAK, ®->out_endp[EP0_CON].doepctl); |
492 | writel(DEPCTL_EPDIS|DEPCTL_SNAK, ®->in_endp[EP0_CON].diepctl); | 497 | writel(DEPCTL_EPDIS|DEPCTL_SNAK, ®->in_endp[EP0_CON].diepctl); |
493 | 498 | ||
494 | for (i = 1; i < S3C_MAX_ENDPOINTS; i++) { | 499 | for (i = 1; i < S3C_MAX_ENDPOINTS; i++) { |
495 | writel(DEPCTL_EPDIS|DEPCTL_SNAK, ®->out_endp[i].doepctl); | 500 | writel(DEPCTL_EPDIS|DEPCTL_SNAK, ®->out_endp[i].doepctl); |
496 | writel(DEPCTL_EPDIS|DEPCTL_SNAK, ®->in_endp[i].diepctl); | 501 | writel(DEPCTL_EPDIS|DEPCTL_SNAK, ®->in_endp[i].diepctl); |
497 | } | 502 | } |
498 | 503 | ||
499 | /* 8. Unmask EPO interrupts*/ | 504 | /* 8. Unmask EPO interrupts*/ |
500 | writel(((1 << EP0_CON) << DAINT_OUT_BIT) | 505 | writel(((1 << EP0_CON) << DAINT_OUT_BIT) |
501 | | (1 << EP0_CON), ®->daintmsk); | 506 | | (1 << EP0_CON), ®->daintmsk); |
502 | 507 | ||
503 | /* 9. Unmask device OUT EP common interrupts*/ | 508 | /* 9. Unmask device OUT EP common interrupts*/ |
504 | writel(DOEPMSK_INIT, ®->doepmsk); | 509 | writel(DOEPMSK_INIT, ®->doepmsk); |
505 | 510 | ||
506 | /* 10. Unmask device IN EP common interrupts*/ | 511 | /* 10. Unmask device IN EP common interrupts*/ |
507 | writel(DIEPMSK_INIT, ®->diepmsk); | 512 | writel(DIEPMSK_INIT, ®->diepmsk); |
508 | 513 | ||
509 | /* 11. Set Rx FIFO Size (in 32-bit words) */ | 514 | /* 11. Set Rx FIFO Size (in 32-bit words) */ |
510 | writel(RX_FIFO_SIZE >> 2, ®->grxfsiz); | 515 | writel(RX_FIFO_SIZE >> 2, ®->grxfsiz); |
511 | 516 | ||
512 | /* 12. Set Non Periodic Tx FIFO Size */ | 517 | /* 12. Set Non Periodic Tx FIFO Size */ |
513 | writel((NPTX_FIFO_SIZE >> 2) << 16 | ((RX_FIFO_SIZE >> 2)) << 0, | 518 | writel((NPTX_FIFO_SIZE >> 2) << 16 | ((RX_FIFO_SIZE >> 2)) << 0, |
514 | ®->gnptxfsiz); | 519 | ®->gnptxfsiz); |
515 | 520 | ||
516 | for (i = 1; i < S3C_MAX_HW_ENDPOINTS; i++) | 521 | for (i = 1; i < S3C_MAX_HW_ENDPOINTS; i++) |
517 | writel((PTX_FIFO_SIZE >> 2) << 16 | | 522 | writel((PTX_FIFO_SIZE >> 2) << 16 | |
518 | ((RX_FIFO_SIZE + NPTX_FIFO_SIZE + | 523 | ((RX_FIFO_SIZE + NPTX_FIFO_SIZE + |
519 | PTX_FIFO_SIZE*(i-1)) >> 2) << 0, | 524 | PTX_FIFO_SIZE*(i-1)) >> 2) << 0, |
520 | ®->dieptxf[i-1]); | 525 | ®->dieptxf[i-1]); |
521 | 526 | ||
522 | /* Flush the RX FIFO */ | 527 | /* Flush the RX FIFO */ |
523 | writel(RX_FIFO_FLUSH, ®->grstctl); | 528 | writel(RX_FIFO_FLUSH, ®->grstctl); |
524 | while (readl(®->grstctl) & RX_FIFO_FLUSH) | 529 | while (readl(®->grstctl) & RX_FIFO_FLUSH) |
525 | debug("%s: waiting for S3C_UDC_OTG_GRSTCTL\n", __func__); | 530 | debug("%s: waiting for S3C_UDC_OTG_GRSTCTL\n", __func__); |
526 | 531 | ||
527 | /* Flush all the Tx FIFO's */ | 532 | /* Flush all the Tx FIFO's */ |
528 | writel(TX_FIFO_FLUSH_ALL, ®->grstctl); | 533 | writel(TX_FIFO_FLUSH_ALL, ®->grstctl); |
529 | writel(TX_FIFO_FLUSH_ALL | TX_FIFO_FLUSH, ®->grstctl); | 534 | writel(TX_FIFO_FLUSH_ALL | TX_FIFO_FLUSH, ®->grstctl); |
530 | while (readl(®->grstctl) & TX_FIFO_FLUSH) | 535 | while (readl(®->grstctl) & TX_FIFO_FLUSH) |
531 | debug("%s: waiting for S3C_UDC_OTG_GRSTCTL\n", __func__); | 536 | debug("%s: waiting for S3C_UDC_OTG_GRSTCTL\n", __func__); |
532 | 537 | ||
533 | /* 13. Clear NAK bit of EP0, EP1, EP2*/ | 538 | /* 13. Clear NAK bit of EP0, EP1, EP2*/ |
534 | /* For Slave mode*/ | 539 | /* For Slave mode*/ |
535 | /* EP0: Control OUT */ | 540 | /* EP0: Control OUT */ |
536 | writel(DEPCTL_EPDIS | DEPCTL_CNAK, | 541 | writel(DEPCTL_EPDIS | DEPCTL_CNAK, |
537 | ®->out_endp[EP0_CON].doepctl); | 542 | ®->out_endp[EP0_CON].doepctl); |
538 | 543 | ||
539 | /* 14. Initialize OTG Link Core.*/ | 544 | /* 14. Initialize OTG Link Core.*/ |
540 | writel(GAHBCFG_INIT, ®->gahbcfg); | 545 | writel(GAHBCFG_INIT, ®->gahbcfg); |
541 | } | 546 | } |
542 | 547 | ||
543 | static void set_max_pktsize(struct s3c_udc *dev, enum usb_device_speed speed) | 548 | static void set_max_pktsize(struct s3c_udc *dev, enum usb_device_speed speed) |
544 | { | 549 | { |
545 | unsigned int ep_ctrl; | 550 | unsigned int ep_ctrl; |
546 | int i; | 551 | int i; |
547 | 552 | ||
548 | if (speed == USB_SPEED_HIGH) { | 553 | if (speed == USB_SPEED_HIGH) { |
549 | ep0_fifo_size = 64; | 554 | ep0_fifo_size = 64; |
550 | ep_fifo_size = 512; | 555 | ep_fifo_size = 512; |
551 | ep_fifo_size2 = 1024; | 556 | ep_fifo_size2 = 1024; |
552 | dev->gadget.speed = USB_SPEED_HIGH; | 557 | dev->gadget.speed = USB_SPEED_HIGH; |
553 | } else { | 558 | } else { |
554 | ep0_fifo_size = 64; | 559 | ep0_fifo_size = 64; |
555 | ep_fifo_size = 64; | 560 | ep_fifo_size = 64; |
556 | ep_fifo_size2 = 64; | 561 | ep_fifo_size2 = 64; |
557 | dev->gadget.speed = USB_SPEED_FULL; | 562 | dev->gadget.speed = USB_SPEED_FULL; |
558 | } | 563 | } |
559 | 564 | ||
560 | dev->ep[0].ep.maxpacket = ep0_fifo_size; | 565 | dev->ep[0].ep.maxpacket = ep0_fifo_size; |
561 | for (i = 1; i < S3C_MAX_ENDPOINTS; i++) | 566 | for (i = 1; i < S3C_MAX_ENDPOINTS; i++) |
562 | dev->ep[i].ep.maxpacket = ep_fifo_size; | 567 | dev->ep[i].ep.maxpacket = ep_fifo_size; |
563 | 568 | ||
564 | /* EP0 - Control IN (64 bytes)*/ | 569 | /* EP0 - Control IN (64 bytes)*/ |
565 | ep_ctrl = readl(®->in_endp[EP0_CON].diepctl); | 570 | ep_ctrl = readl(®->in_endp[EP0_CON].diepctl); |
566 | writel(ep_ctrl|(0<<0), ®->in_endp[EP0_CON].diepctl); | 571 | writel(ep_ctrl|(0<<0), ®->in_endp[EP0_CON].diepctl); |
567 | 572 | ||
568 | /* EP0 - Control OUT (64 bytes)*/ | 573 | /* EP0 - Control OUT (64 bytes)*/ |
569 | ep_ctrl = readl(®->out_endp[EP0_CON].doepctl); | 574 | ep_ctrl = readl(®->out_endp[EP0_CON].doepctl); |
570 | writel(ep_ctrl|(0<<0), ®->out_endp[EP0_CON].doepctl); | 575 | writel(ep_ctrl|(0<<0), ®->out_endp[EP0_CON].doepctl); |
571 | } | 576 | } |
572 | 577 | ||
573 | static int s3c_ep_enable(struct usb_ep *_ep, | 578 | static int s3c_ep_enable(struct usb_ep *_ep, |
574 | const struct usb_endpoint_descriptor *desc) | 579 | const struct usb_endpoint_descriptor *desc) |
575 | { | 580 | { |
576 | struct s3c_ep *ep; | 581 | struct s3c_ep *ep; |
577 | struct s3c_udc *dev; | 582 | struct s3c_udc *dev; |
578 | unsigned long flags; | 583 | unsigned long flags = 0; |
579 | 584 | ||
580 | debug("%s: %p\n", __func__, _ep); | 585 | debug("%s: %p\n", __func__, _ep); |
581 | 586 | ||
582 | ep = container_of(_ep, struct s3c_ep, ep); | 587 | ep = container_of(_ep, struct s3c_ep, ep); |
583 | if (!_ep || !desc || ep->desc || _ep->name == ep0name | 588 | if (!_ep || !desc || ep->desc || _ep->name == ep0name |
584 | || desc->bDescriptorType != USB_DT_ENDPOINT | 589 | || desc->bDescriptorType != USB_DT_ENDPOINT |
585 | || ep->bEndpointAddress != desc->bEndpointAddress | 590 | || ep->bEndpointAddress != desc->bEndpointAddress |
586 | || ep_maxpacket(ep) < | 591 | || ep_maxpacket(ep) < |
587 | le16_to_cpu(get_unaligned(&desc->wMaxPacketSize))) { | 592 | le16_to_cpu(get_unaligned(&desc->wMaxPacketSize))) { |
588 | 593 | ||
589 | debug("%s: bad ep or descriptor\n", __func__); | 594 | debug("%s: bad ep or descriptor\n", __func__); |
590 | return -EINVAL; | 595 | return -EINVAL; |
591 | } | 596 | } |
592 | 597 | ||
593 | /* xfer types must match, except that interrupt ~= bulk */ | 598 | /* xfer types must match, except that interrupt ~= bulk */ |
594 | if (ep->bmAttributes != desc->bmAttributes | 599 | if (ep->bmAttributes != desc->bmAttributes |
595 | && ep->bmAttributes != USB_ENDPOINT_XFER_BULK | 600 | && ep->bmAttributes != USB_ENDPOINT_XFER_BULK |
596 | && desc->bmAttributes != USB_ENDPOINT_XFER_INT) { | 601 | && desc->bmAttributes != USB_ENDPOINT_XFER_INT) { |
597 | 602 | ||
598 | debug("%s: %s type mismatch\n", __func__, _ep->name); | 603 | debug("%s: %s type mismatch\n", __func__, _ep->name); |
599 | return -EINVAL; | 604 | return -EINVAL; |
600 | } | 605 | } |
601 | 606 | ||
602 | /* hardware _could_ do smaller, but driver doesn't */ | 607 | /* hardware _could_ do smaller, but driver doesn't */ |
603 | if ((desc->bmAttributes == USB_ENDPOINT_XFER_BULK | 608 | if ((desc->bmAttributes == USB_ENDPOINT_XFER_BULK |
604 | && le16_to_cpu(get_unaligned(&desc->wMaxPacketSize)) != | 609 | && le16_to_cpu(get_unaligned(&desc->wMaxPacketSize)) != |
605 | ep_maxpacket(ep)) || !get_unaligned(&desc->wMaxPacketSize)) { | 610 | ep_maxpacket(ep)) || !get_unaligned(&desc->wMaxPacketSize)) { |
606 | 611 | ||
607 | debug("%s: bad %s maxpacket\n", __func__, _ep->name); | 612 | debug("%s: bad %s maxpacket\n", __func__, _ep->name); |
608 | return -ERANGE; | 613 | return -ERANGE; |
609 | } | 614 | } |
610 | 615 | ||
611 | dev = ep->dev; | 616 | dev = ep->dev; |
612 | if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) { | 617 | if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) { |
613 | 618 | ||
614 | debug("%s: bogus device state\n", __func__); | 619 | debug("%s: bogus device state\n", __func__); |
615 | return -ESHUTDOWN; | 620 | return -ESHUTDOWN; |
616 | } | 621 | } |
617 | 622 | ||
618 | ep->stopped = 0; | 623 | ep->stopped = 0; |
619 | ep->desc = desc; | 624 | ep->desc = desc; |
620 | ep->pio_irqs = 0; | 625 | ep->pio_irqs = 0; |
621 | ep->ep.maxpacket = le16_to_cpu(get_unaligned(&desc->wMaxPacketSize)); | 626 | ep->ep.maxpacket = le16_to_cpu(get_unaligned(&desc->wMaxPacketSize)); |
622 | 627 | ||
623 | /* Reset halt state */ | 628 | /* Reset halt state */ |
624 | s3c_udc_set_nak(ep); | 629 | s3c_udc_set_nak(ep); |
625 | s3c_udc_set_halt(_ep, 0); | 630 | s3c_udc_set_halt(_ep, 0); |
626 | 631 | ||
627 | spin_lock_irqsave(&ep->dev->lock, flags); | 632 | spin_lock_irqsave(&ep->dev->lock, flags); |
628 | s3c_udc_ep_activate(ep); | 633 | s3c_udc_ep_activate(ep); |
629 | spin_unlock_irqrestore(&ep->dev->lock, flags); | 634 | spin_unlock_irqrestore(&ep->dev->lock, flags); |
630 | 635 | ||
631 | debug("%s: enabled %s, stopped = %d, maxpacket = %d\n", | 636 | debug("%s: enabled %s, stopped = %d, maxpacket = %d\n", |
632 | __func__, _ep->name, ep->stopped, ep->ep.maxpacket); | 637 | __func__, _ep->name, ep->stopped, ep->ep.maxpacket); |
633 | return 0; | 638 | return 0; |
634 | } | 639 | } |
635 | 640 | ||
636 | /* | 641 | /* |
637 | * Disable EP | 642 | * Disable EP |
638 | */ | 643 | */ |
639 | static int s3c_ep_disable(struct usb_ep *_ep) | 644 | static int s3c_ep_disable(struct usb_ep *_ep) |
640 | { | 645 | { |
641 | struct s3c_ep *ep; | 646 | struct s3c_ep *ep; |
642 | unsigned long flags; | 647 | unsigned long flags = 0; |
643 | 648 | ||
644 | debug("%s: %p\n", __func__, _ep); | 649 | debug("%s: %p\n", __func__, _ep); |
645 | 650 | ||
646 | ep = container_of(_ep, struct s3c_ep, ep); | 651 | ep = container_of(_ep, struct s3c_ep, ep); |
647 | if (!_ep || !ep->desc) { | 652 | if (!_ep || !ep->desc) { |
648 | debug("%s: %s not enabled\n", __func__, | 653 | debug("%s: %s not enabled\n", __func__, |
649 | _ep ? ep->ep.name : NULL); | 654 | _ep ? ep->ep.name : NULL); |
650 | return -EINVAL; | 655 | return -EINVAL; |
651 | } | 656 | } |
652 | 657 | ||
653 | spin_lock_irqsave(&ep->dev->lock, flags); | 658 | spin_lock_irqsave(&ep->dev->lock, flags); |
654 | 659 | ||
655 | /* Nuke all pending requests */ | 660 | /* Nuke all pending requests */ |
656 | nuke(ep, -ESHUTDOWN); | 661 | nuke(ep, -ESHUTDOWN); |
657 | 662 | ||
658 | ep->desc = 0; | 663 | ep->desc = 0; |
659 | ep->stopped = 1; | 664 | ep->stopped = 1; |
660 | 665 | ||
661 | spin_unlock_irqrestore(&ep->dev->lock, flags); | 666 | spin_unlock_irqrestore(&ep->dev->lock, flags); |
662 | 667 | ||
663 | debug("%s: disabled %s\n", __func__, _ep->name); | 668 | debug("%s: disabled %s\n", __func__, _ep->name); |
664 | return 0; | 669 | return 0; |
665 | } | 670 | } |
666 | 671 | ||
667 | static struct usb_request *s3c_alloc_request(struct usb_ep *ep, | 672 | static struct usb_request *s3c_alloc_request(struct usb_ep *ep, |
668 | gfp_t gfp_flags) | 673 | gfp_t gfp_flags) |
669 | { | 674 | { |
670 | struct s3c_request *req; | 675 | struct s3c_request *req; |
671 | 676 | ||
672 | debug("%s: %s %p\n", __func__, ep->name, ep); | 677 | debug("%s: %s %p\n", __func__, ep->name, ep); |
673 | 678 | ||
674 | req = memalign(CONFIG_SYS_CACHELINE_SIZE, sizeof(*req)); | 679 | req = memalign(CONFIG_SYS_CACHELINE_SIZE, sizeof(*req)); |
675 | if (!req) | 680 | if (!req) |
676 | return 0; | 681 | return 0; |
677 | 682 | ||
678 | memset(req, 0, sizeof *req); | 683 | memset(req, 0, sizeof *req); |
679 | INIT_LIST_HEAD(&req->queue); | 684 | INIT_LIST_HEAD(&req->queue); |
680 | 685 | ||
681 | return &req->req; | 686 | return &req->req; |
682 | } | 687 | } |
683 | 688 | ||
684 | static void s3c_free_request(struct usb_ep *ep, struct usb_request *_req) | 689 | static void s3c_free_request(struct usb_ep *ep, struct usb_request *_req) |
685 | { | 690 | { |
686 | struct s3c_request *req; | 691 | struct s3c_request *req; |
687 | 692 | ||
688 | debug("%s: %p\n", __func__, ep); | 693 | debug("%s: %p\n", __func__, ep); |
689 | 694 | ||
690 | req = container_of(_req, struct s3c_request, req); | 695 | req = container_of(_req, struct s3c_request, req); |
691 | WARN_ON(!list_empty(&req->queue)); | 696 | WARN_ON(!list_empty(&req->queue)); |
692 | kfree(req); | 697 | kfree(req); |
693 | } | 698 | } |
694 | 699 | ||
695 | /* dequeue JUST ONE request */ | 700 | /* dequeue JUST ONE request */ |
696 | static int s3c_dequeue(struct usb_ep *_ep, struct usb_request *_req) | 701 | static int s3c_dequeue(struct usb_ep *_ep, struct usb_request *_req) |
697 | { | 702 | { |
698 | struct s3c_ep *ep; | 703 | struct s3c_ep *ep; |
699 | struct s3c_request *req; | 704 | struct s3c_request *req; |
700 | unsigned long flags; | 705 | unsigned long flags = 0; |
701 | 706 | ||
702 | debug("%s: %p\n", __func__, _ep); | 707 | debug("%s: %p\n", __func__, _ep); |
703 | 708 | ||
704 | ep = container_of(_ep, struct s3c_ep, ep); | 709 | ep = container_of(_ep, struct s3c_ep, ep); |
705 | if (!_ep || ep->ep.name == ep0name) | 710 | if (!_ep || ep->ep.name == ep0name) |
706 | return -EINVAL; | 711 | return -EINVAL; |
707 | 712 | ||
708 | spin_lock_irqsave(&ep->dev->lock, flags); | 713 | spin_lock_irqsave(&ep->dev->lock, flags); |
709 | 714 | ||
710 | /* make sure it's actually queued on this endpoint */ | 715 | /* make sure it's actually queued on this endpoint */ |
711 | list_for_each_entry(req, &ep->queue, queue) { | 716 | list_for_each_entry(req, &ep->queue, queue) { |
712 | if (&req->req == _req) | 717 | if (&req->req == _req) |
713 | break; | 718 | break; |
714 | } | 719 | } |
715 | if (&req->req != _req) { | 720 | if (&req->req != _req) { |
716 | spin_unlock_irqrestore(&ep->dev->lock, flags); | 721 | spin_unlock_irqrestore(&ep->dev->lock, flags); |
717 | return -EINVAL; | 722 | return -EINVAL; |
718 | } | 723 | } |
719 | 724 | ||
720 | done(ep, req, -ECONNRESET); | 725 | done(ep, req, -ECONNRESET); |
721 | 726 | ||
722 | spin_unlock_irqrestore(&ep->dev->lock, flags); | 727 | spin_unlock_irqrestore(&ep->dev->lock, flags); |
723 | return 0; | 728 | return 0; |
724 | } | 729 | } |
725 | 730 | ||
726 | /* | 731 | /* |
727 | * Return bytes in EP FIFO | 732 | * Return bytes in EP FIFO |
728 | */ | 733 | */ |
729 | static int s3c_fifo_status(struct usb_ep *_ep) | 734 | static int s3c_fifo_status(struct usb_ep *_ep) |
730 | { | 735 | { |
731 | int count = 0; | 736 | int count = 0; |
732 | struct s3c_ep *ep; | 737 | struct s3c_ep *ep; |
733 | 738 | ||
734 | ep = container_of(_ep, struct s3c_ep, ep); | 739 | ep = container_of(_ep, struct s3c_ep, ep); |
735 | if (!_ep) { | 740 | if (!_ep) { |
736 | debug("%s: bad ep\n", __func__); | 741 | debug("%s: bad ep\n", __func__); |
737 | return -ENODEV; | 742 | return -ENODEV; |
738 | } | 743 | } |
739 | 744 | ||
740 | debug("%s: %d\n", __func__, ep_index(ep)); | 745 | debug("%s: %d\n", __func__, ep_index(ep)); |
741 | 746 | ||
742 | /* LPD can't report unclaimed bytes from IN fifos */ | 747 | /* LPD can't report unclaimed bytes from IN fifos */ |
743 | if (ep_is_in(ep)) | 748 | if (ep_is_in(ep)) |
744 | return -EOPNOTSUPP; | 749 | return -EOPNOTSUPP; |
745 | 750 | ||
746 | return count; | 751 | return count; |
747 | } | 752 | } |
748 | 753 | ||
749 | /* | 754 | /* |
750 | * Flush EP FIFO | 755 | * Flush EP FIFO |
751 | */ | 756 | */ |
752 | static void s3c_fifo_flush(struct usb_ep *_ep) | 757 | static void s3c_fifo_flush(struct usb_ep *_ep) |
753 | { | 758 | { |
754 | struct s3c_ep *ep; | 759 | struct s3c_ep *ep; |
755 | 760 | ||
756 | ep = container_of(_ep, struct s3c_ep, ep); | 761 | ep = container_of(_ep, struct s3c_ep, ep); |
757 | if (unlikely(!_ep || (!ep->desc && ep->ep.name != ep0name))) { | 762 | if (unlikely(!_ep || (!ep->desc && ep->ep.name != ep0name))) { |
758 | debug("%s: bad ep\n", __func__); | 763 | debug("%s: bad ep\n", __func__); |
759 | return; | 764 | return; |
760 | } | 765 | } |
761 | 766 | ||
762 | debug("%s: %d\n", __func__, ep_index(ep)); | 767 | debug("%s: %d\n", __func__, ep_index(ep)); |
763 | } | 768 | } |
764 | 769 | ||
765 | static const struct usb_gadget_ops s3c_udc_ops = { | 770 | static const struct usb_gadget_ops s3c_udc_ops = { |
766 | /* current versions must always be self-powered */ | 771 | /* current versions must always be self-powered */ |
767 | }; | 772 | }; |
768 | 773 | ||
769 | static struct s3c_udc memory = { | 774 | static struct s3c_udc memory = { |
770 | .usb_address = 0, | 775 | .usb_address = 0, |
771 | .gadget = { | 776 | .gadget = { |
772 | .ops = &s3c_udc_ops, | 777 | .ops = &s3c_udc_ops, |
773 | .ep0 = &memory.ep[0].ep, | 778 | .ep0 = &memory.ep[0].ep, |
774 | .name = driver_name, | 779 | .name = driver_name, |
775 | }, | 780 | }, |
776 | 781 | ||
777 | /* control endpoint */ | 782 | /* control endpoint */ |
778 | .ep[0] = { | 783 | .ep[0] = { |
779 | .ep = { | 784 | .ep = { |
780 | .name = ep0name, | 785 | .name = ep0name, |
781 | .ops = &s3c_ep_ops, | 786 | .ops = &s3c_ep_ops, |
782 | .maxpacket = EP0_FIFO_SIZE, | 787 | .maxpacket = EP0_FIFO_SIZE, |
783 | }, | 788 | }, |
784 | .dev = &memory, | 789 | .dev = &memory, |
785 | 790 | ||
786 | .bEndpointAddress = 0, | 791 | .bEndpointAddress = 0, |
787 | .bmAttributes = 0, | 792 | .bmAttributes = 0, |
788 | 793 | ||
789 | .ep_type = ep_control, | 794 | .ep_type = ep_control, |
790 | }, | 795 | }, |
791 | 796 | ||
792 | /* first group of endpoints */ | 797 | /* first group of endpoints */ |
793 | .ep[1] = { | 798 | .ep[1] = { |
794 | .ep = { | 799 | .ep = { |
795 | .name = "ep1in-bulk", | 800 | .name = "ep1in-bulk", |
796 | .ops = &s3c_ep_ops, | 801 | .ops = &s3c_ep_ops, |
797 | .maxpacket = EP_FIFO_SIZE, | 802 | .maxpacket = EP_FIFO_SIZE, |
798 | }, | 803 | }, |
799 | .dev = &memory, | 804 | .dev = &memory, |
800 | 805 | ||
801 | .bEndpointAddress = USB_DIR_IN | 1, | 806 | .bEndpointAddress = USB_DIR_IN | 1, |
802 | .bmAttributes = USB_ENDPOINT_XFER_BULK, | 807 | .bmAttributes = USB_ENDPOINT_XFER_BULK, |
803 | 808 | ||
804 | .ep_type = ep_bulk_out, | 809 | .ep_type = ep_bulk_out, |
805 | .fifo_num = 1, | 810 | .fifo_num = 1, |
806 | }, | 811 | }, |
807 | 812 | ||
808 | .ep[2] = { | 813 | .ep[2] = { |
809 | .ep = { | 814 | .ep = { |
810 | .name = "ep2out-bulk", | 815 | .name = "ep2out-bulk", |
811 | .ops = &s3c_ep_ops, | 816 | .ops = &s3c_ep_ops, |
812 | .maxpacket = EP_FIFO_SIZE, | 817 | .maxpacket = EP_FIFO_SIZE, |
813 | }, | 818 | }, |
814 | .dev = &memory, | 819 | .dev = &memory, |
815 | 820 | ||
816 | .bEndpointAddress = USB_DIR_OUT | 2, | 821 | .bEndpointAddress = USB_DIR_OUT | 2, |
817 | .bmAttributes = USB_ENDPOINT_XFER_BULK, | 822 | .bmAttributes = USB_ENDPOINT_XFER_BULK, |
818 | 823 | ||
819 | .ep_type = ep_bulk_in, | 824 | .ep_type = ep_bulk_in, |
820 | .fifo_num = 2, | 825 | .fifo_num = 2, |
821 | }, | 826 | }, |
822 | 827 | ||
823 | .ep[3] = { | 828 | .ep[3] = { |
824 | .ep = { | 829 | .ep = { |
825 | .name = "ep3in-int", | 830 | .name = "ep3in-int", |
826 | .ops = &s3c_ep_ops, | 831 | .ops = &s3c_ep_ops, |
827 | .maxpacket = EP_FIFO_SIZE, | 832 | .maxpacket = EP_FIFO_SIZE, |
828 | }, | 833 | }, |
829 | .dev = &memory, | 834 | .dev = &memory, |
830 | 835 | ||
831 | .bEndpointAddress = USB_DIR_IN | 3, | 836 | .bEndpointAddress = USB_DIR_IN | 3, |
832 | .bmAttributes = USB_ENDPOINT_XFER_INT, | 837 | .bmAttributes = USB_ENDPOINT_XFER_INT, |
833 | 838 | ||
834 | .ep_type = ep_interrupt, | 839 | .ep_type = ep_interrupt, |
835 | .fifo_num = 3, | 840 | .fifo_num = 3, |
836 | }, | 841 | }, |
837 | }; | 842 | }; |
838 | 843 | ||
839 | /* | 844 | /* |
840 | * probe - binds to the platform device | 845 | * probe - binds to the platform device |
841 | */ | 846 | */ |
842 | 847 | ||
843 | int s3c_udc_probe(struct s3c_plat_otg_data *pdata) | 848 | int s3c_udc_probe(struct s3c_plat_otg_data *pdata) |
844 | { | 849 | { |
845 | struct s3c_udc *dev = &memory; | 850 | struct s3c_udc *dev = &memory; |
846 | int retval = 0; | 851 | int retval = 0; |
847 | 852 | ||
848 | debug("%s: %p\n", __func__, pdata); | 853 | debug("%s: %p\n", __func__, pdata); |
849 | 854 | ||
850 | dev->pdata = pdata; | 855 | dev->pdata = pdata; |
851 | 856 | ||
852 | phy = (struct s3c_usbotg_phy *)pdata->regs_phy; | 857 | phy = (struct s3c_usbotg_phy *)pdata->regs_phy; |
853 | reg = (struct s3c_usbotg_reg *)pdata->regs_otg; | 858 | reg = (struct s3c_usbotg_reg *)pdata->regs_otg; |
854 | usb_phy_ctrl = pdata->usb_phy_ctrl; | 859 | usb_phy_ctrl = pdata->usb_phy_ctrl; |
855 | 860 | ||
856 | /* regs_otg = (void *)pdata->regs_otg; */ | 861 | /* regs_otg = (void *)pdata->regs_otg; */ |
857 | 862 | ||
858 | dev->gadget.is_dualspeed = 1; /* Hack only*/ | 863 | dev->gadget.is_dualspeed = 1; /* Hack only*/ |
859 | dev->gadget.is_otg = 0; | 864 | dev->gadget.is_otg = 0; |
860 | dev->gadget.is_a_peripheral = 0; | 865 | dev->gadget.is_a_peripheral = 0; |
861 | dev->gadget.b_hnp_enable = 0; | 866 | dev->gadget.b_hnp_enable = 0; |
862 | dev->gadget.a_hnp_support = 0; | 867 | dev->gadget.a_hnp_support = 0; |
863 | dev->gadget.a_alt_hnp_support = 0; | 868 | dev->gadget.a_alt_hnp_support = 0; |
864 | 869 | ||
865 | the_controller = dev; | 870 | the_controller = dev; |
866 | 871 | ||
867 | usb_ctrl = memalign(CONFIG_SYS_CACHELINE_SIZE, | 872 | usb_ctrl = memalign(CONFIG_SYS_CACHELINE_SIZE, |
868 | ROUND(sizeof(struct usb_ctrlrequest), | 873 | ROUND(sizeof(struct usb_ctrlrequest), |
869 | CONFIG_SYS_CACHELINE_SIZE)); | 874 | CONFIG_SYS_CACHELINE_SIZE)); |
870 | if (!usb_ctrl) { | 875 | if (!usb_ctrl) { |
871 | error("No memory available for UDC!\n"); | 876 | error("No memory available for UDC!\n"); |
872 | return -ENOMEM; | 877 | return -ENOMEM; |
873 | } | 878 | } |
874 | 879 | ||
875 | usb_ctrl_dma_addr = (dma_addr_t) usb_ctrl; | 880 | usb_ctrl_dma_addr = (dma_addr_t) usb_ctrl; |
876 | 881 | ||
877 | udc_reinit(dev); | 882 | udc_reinit(dev); |
878 | 883 | ||
879 | return retval; | 884 | return retval; |
880 | } | 885 | } |
881 | 886 | ||
882 | int usb_gadget_handle_interrupts() | 887 | int usb_gadget_handle_interrupts() |
883 | { | 888 | { |
884 | u32 intr_status = readl(®->gintsts); | 889 | u32 intr_status = readl(®->gintsts); |
885 | u32 gintmsk = readl(®->gintmsk); | 890 | u32 gintmsk = readl(®->gintmsk); |
886 | 891 | ||
887 | if (intr_status & gintmsk) | 892 | if (intr_status & gintmsk) |
888 | return s3c_udc_irq(1, (void *)the_controller); | 893 | return s3c_udc_irq(1, (void *)the_controller); |
889 | return 0; | 894 | return 0; |
890 | } | 895 | } |
891 | 896 |
drivers/usb/gadget/s3c_udc_otg_xfer_dma.c
1 | /* | 1 | /* |
2 | * drivers/usb/gadget/s3c_udc_otg_xfer_dma.c | 2 | * drivers/usb/gadget/s3c_udc_otg_xfer_dma.c |
3 | * Samsung S3C on-chip full/high speed USB OTG 2.0 device controllers | 3 | * Samsung S3C on-chip full/high speed USB OTG 2.0 device controllers |
4 | * | 4 | * |
5 | * Copyright (C) 2009 for Samsung Electronics | 5 | * Copyright (C) 2009 for Samsung Electronics |
6 | * | 6 | * |
7 | * BSP Support for Samsung's UDC driver | 7 | * BSP Support for Samsung's UDC driver |
8 | * available at: | 8 | * available at: |
9 | * git://git.kernel.org/pub/scm/linux/kernel/git/kki_ap/linux-2.6-samsung.git | 9 | * git://git.kernel.org/pub/scm/linux/kernel/git/kki_ap/linux-2.6-samsung.git |
10 | * | 10 | * |
11 | * State machine bugfixes: | 11 | * State machine bugfixes: |
12 | * Marek Szyprowski <m.szyprowski@samsung.com> | 12 | * Marek Szyprowski <m.szyprowski@samsung.com> |
13 | * | 13 | * |
14 | * Ported to u-boot: | 14 | * Ported to u-boot: |
15 | * Marek Szyprowski <m.szyprowski@samsung.com> | 15 | * Marek Szyprowski <m.szyprowski@samsung.com> |
16 | * Lukasz Majewski <l.majewski@samsumg.com> | 16 | * Lukasz Majewski <l.majewski@samsumg.com> |
17 | * | 17 | * |
18 | * SPDX-License-Identifier: GPL-2.0+ | 18 | * SPDX-License-Identifier: GPL-2.0+ |
19 | */ | 19 | */ |
20 | 20 | ||
21 | static u8 clear_feature_num; | 21 | static u8 clear_feature_num; |
22 | int clear_feature_flag; | 22 | int clear_feature_flag; |
23 | 23 | ||
24 | /* Bulk-Only Mass Storage Reset (class-specific request) */ | 24 | /* Bulk-Only Mass Storage Reset (class-specific request) */ |
25 | #define GET_MAX_LUN_REQUEST 0xFE | 25 | #define GET_MAX_LUN_REQUEST 0xFE |
26 | #define BOT_RESET_REQUEST 0xFF | 26 | #define BOT_RESET_REQUEST 0xFF |
27 | 27 | ||
28 | static inline void s3c_udc_ep0_zlp(struct s3c_udc *dev) | 28 | static inline void s3c_udc_ep0_zlp(struct s3c_udc *dev) |
29 | { | 29 | { |
30 | u32 ep_ctrl; | 30 | u32 ep_ctrl; |
31 | 31 | ||
32 | writel(usb_ctrl_dma_addr, ®->in_endp[EP0_CON].diepdma); | 32 | writel(usb_ctrl_dma_addr, ®->in_endp[EP0_CON].diepdma); |
33 | writel(DIEPT_SIZ_PKT_CNT(1), ®->in_endp[EP0_CON].dieptsiz); | 33 | writel(DIEPT_SIZ_PKT_CNT(1), ®->in_endp[EP0_CON].dieptsiz); |
34 | 34 | ||
35 | ep_ctrl = readl(®->in_endp[EP0_CON].diepctl); | 35 | ep_ctrl = readl(®->in_endp[EP0_CON].diepctl); |
36 | writel(ep_ctrl|DEPCTL_EPENA|DEPCTL_CNAK, | 36 | writel(ep_ctrl|DEPCTL_EPENA|DEPCTL_CNAK, |
37 | ®->in_endp[EP0_CON].diepctl); | 37 | ®->in_endp[EP0_CON].diepctl); |
38 | 38 | ||
39 | debug_cond(DEBUG_EP0 != 0, "%s:EP0 ZLP DIEPCTL0 = 0x%x\n", | 39 | debug_cond(DEBUG_EP0 != 0, "%s:EP0 ZLP DIEPCTL0 = 0x%x\n", |
40 | __func__, readl(®->in_endp[EP0_CON].diepctl)); | 40 | __func__, readl(®->in_endp[EP0_CON].diepctl)); |
41 | dev->ep0state = WAIT_FOR_IN_COMPLETE; | 41 | dev->ep0state = WAIT_FOR_IN_COMPLETE; |
42 | } | 42 | } |
43 | 43 | ||
44 | void s3c_udc_pre_setup(void) | 44 | void s3c_udc_pre_setup(void) |
45 | { | 45 | { |
46 | u32 ep_ctrl; | 46 | u32 ep_ctrl; |
47 | 47 | ||
48 | debug_cond(DEBUG_IN_EP, | 48 | debug_cond(DEBUG_IN_EP, |
49 | "%s : Prepare Setup packets.\n", __func__); | 49 | "%s : Prepare Setup packets.\n", __func__); |
50 | 50 | ||
51 | writel(DOEPT_SIZ_PKT_CNT(1) | sizeof(struct usb_ctrlrequest), | 51 | writel(DOEPT_SIZ_PKT_CNT(1) | sizeof(struct usb_ctrlrequest), |
52 | ®->out_endp[EP0_CON].doeptsiz); | 52 | ®->out_endp[EP0_CON].doeptsiz); |
53 | writel(usb_ctrl_dma_addr, ®->out_endp[EP0_CON].doepdma); | 53 | writel(usb_ctrl_dma_addr, ®->out_endp[EP0_CON].doepdma); |
54 | 54 | ||
55 | ep_ctrl = readl(®->out_endp[EP0_CON].doepctl); | 55 | ep_ctrl = readl(®->out_endp[EP0_CON].doepctl); |
56 | writel(ep_ctrl|DEPCTL_EPENA, ®->out_endp[EP0_CON].doepctl); | 56 | writel(ep_ctrl|DEPCTL_EPENA, ®->out_endp[EP0_CON].doepctl); |
57 | 57 | ||
58 | debug_cond(DEBUG_EP0 != 0, "%s:EP0 ZLP DIEPCTL0 = 0x%x\n", | 58 | debug_cond(DEBUG_EP0 != 0, "%s:EP0 ZLP DIEPCTL0 = 0x%x\n", |
59 | __func__, readl(®->in_endp[EP0_CON].diepctl)); | 59 | __func__, readl(®->in_endp[EP0_CON].diepctl)); |
60 | debug_cond(DEBUG_EP0 != 0, "%s:EP0 ZLP DOEPCTL0 = 0x%x\n", | 60 | debug_cond(DEBUG_EP0 != 0, "%s:EP0 ZLP DOEPCTL0 = 0x%x\n", |
61 | __func__, readl(®->out_endp[EP0_CON].doepctl)); | 61 | __func__, readl(®->out_endp[EP0_CON].doepctl)); |
62 | 62 | ||
63 | } | 63 | } |
64 | 64 | ||
65 | static inline void s3c_ep0_complete_out(void) | 65 | static inline void s3c_ep0_complete_out(void) |
66 | { | 66 | { |
67 | u32 ep_ctrl; | 67 | u32 ep_ctrl; |
68 | 68 | ||
69 | debug_cond(DEBUG_EP0 != 0, "%s:EP0 ZLP DIEPCTL0 = 0x%x\n", | 69 | debug_cond(DEBUG_EP0 != 0, "%s:EP0 ZLP DIEPCTL0 = 0x%x\n", |
70 | __func__, readl(®->in_endp[EP0_CON].diepctl)); | 70 | __func__, readl(®->in_endp[EP0_CON].diepctl)); |
71 | debug_cond(DEBUG_EP0 != 0, "%s:EP0 ZLP DOEPCTL0 = 0x%x\n", | 71 | debug_cond(DEBUG_EP0 != 0, "%s:EP0 ZLP DOEPCTL0 = 0x%x\n", |
72 | __func__, readl(®->out_endp[EP0_CON].doepctl)); | 72 | __func__, readl(®->out_endp[EP0_CON].doepctl)); |
73 | 73 | ||
74 | debug_cond(DEBUG_IN_EP, | 74 | debug_cond(DEBUG_IN_EP, |
75 | "%s : Prepare Complete Out packet.\n", __func__); | 75 | "%s : Prepare Complete Out packet.\n", __func__); |
76 | 76 | ||
77 | writel(DOEPT_SIZ_PKT_CNT(1) | sizeof(struct usb_ctrlrequest), | 77 | writel(DOEPT_SIZ_PKT_CNT(1) | sizeof(struct usb_ctrlrequest), |
78 | ®->out_endp[EP0_CON].doeptsiz); | 78 | ®->out_endp[EP0_CON].doeptsiz); |
79 | writel(usb_ctrl_dma_addr, ®->out_endp[EP0_CON].doepdma); | 79 | writel(usb_ctrl_dma_addr, ®->out_endp[EP0_CON].doepdma); |
80 | 80 | ||
81 | ep_ctrl = readl(®->out_endp[EP0_CON].doepctl); | 81 | ep_ctrl = readl(®->out_endp[EP0_CON].doepctl); |
82 | writel(ep_ctrl|DEPCTL_EPENA|DEPCTL_CNAK, | 82 | writel(ep_ctrl|DEPCTL_EPENA|DEPCTL_CNAK, |
83 | ®->out_endp[EP0_CON].doepctl); | 83 | ®->out_endp[EP0_CON].doepctl); |
84 | 84 | ||
85 | debug_cond(DEBUG_EP0 != 0, "%s:EP0 ZLP DIEPCTL0 = 0x%x\n", | 85 | debug_cond(DEBUG_EP0 != 0, "%s:EP0 ZLP DIEPCTL0 = 0x%x\n", |
86 | __func__, readl(®->in_endp[EP0_CON].diepctl)); | 86 | __func__, readl(®->in_endp[EP0_CON].diepctl)); |
87 | debug_cond(DEBUG_EP0 != 0, "%s:EP0 ZLP DOEPCTL0 = 0x%x\n", | 87 | debug_cond(DEBUG_EP0 != 0, "%s:EP0 ZLP DOEPCTL0 = 0x%x\n", |
88 | __func__, readl(®->out_endp[EP0_CON].doepctl)); | 88 | __func__, readl(®->out_endp[EP0_CON].doepctl)); |
89 | 89 | ||
90 | } | 90 | } |
91 | 91 | ||
92 | 92 | ||
93 | static int setdma_rx(struct s3c_ep *ep, struct s3c_request *req) | 93 | static int setdma_rx(struct s3c_ep *ep, struct s3c_request *req) |
94 | { | 94 | { |
95 | u32 *buf, ctrl; | 95 | u32 *buf, ctrl; |
96 | u32 length, pktcnt; | 96 | u32 length, pktcnt; |
97 | u32 ep_num = ep_index(ep); | 97 | u32 ep_num = ep_index(ep); |
98 | 98 | ||
99 | buf = req->req.buf + req->req.actual; | 99 | buf = req->req.buf + req->req.actual; |
100 | length = min(req->req.length - req->req.actual, | 100 | length = min(req->req.length - req->req.actual, |
101 | ep_num ? DMA_BUFFER_SIZE : ep->ep.maxpacket); | 101 | ep_num ? DMA_BUFFER_SIZE : ep->ep.maxpacket); |
102 | 102 | ||
103 | ep->len = length; | 103 | ep->len = length; |
104 | ep->dma_buf = buf; | 104 | ep->dma_buf = buf; |
105 | 105 | ||
106 | if (ep_num == EP0_CON || length == 0) | 106 | if (ep_num == EP0_CON || length == 0) |
107 | pktcnt = 1; | 107 | pktcnt = 1; |
108 | else | 108 | else |
109 | pktcnt = (length - 1)/(ep->ep.maxpacket) + 1; | 109 | pktcnt = (length - 1)/(ep->ep.maxpacket) + 1; |
110 | 110 | ||
111 | ctrl = readl(®->out_endp[ep_num].doepctl); | 111 | ctrl = readl(®->out_endp[ep_num].doepctl); |
112 | 112 | ||
113 | writel((unsigned int) ep->dma_buf, ®->out_endp[ep_num].doepdma); | 113 | writel((unsigned int) ep->dma_buf, ®->out_endp[ep_num].doepdma); |
114 | writel(DOEPT_SIZ_PKT_CNT(pktcnt) | DOEPT_SIZ_XFER_SIZE(length), | 114 | writel(DOEPT_SIZ_PKT_CNT(pktcnt) | DOEPT_SIZ_XFER_SIZE(length), |
115 | ®->out_endp[ep_num].doeptsiz); | 115 | ®->out_endp[ep_num].doeptsiz); |
116 | writel(DEPCTL_EPENA|DEPCTL_CNAK|ctrl, ®->out_endp[ep_num].doepctl); | 116 | writel(DEPCTL_EPENA|DEPCTL_CNAK|ctrl, ®->out_endp[ep_num].doepctl); |
117 | 117 | ||
118 | debug_cond(DEBUG_OUT_EP != 0, | 118 | debug_cond(DEBUG_OUT_EP != 0, |
119 | "%s: EP%d RX DMA start : DOEPDMA = 0x%x," | 119 | "%s: EP%d RX DMA start : DOEPDMA = 0x%x," |
120 | "DOEPTSIZ = 0x%x, DOEPCTL = 0x%x\n" | 120 | "DOEPTSIZ = 0x%x, DOEPCTL = 0x%x\n" |
121 | "\tbuf = 0x%p, pktcnt = %d, xfersize = %d\n", | 121 | "\tbuf = 0x%p, pktcnt = %d, xfersize = %d\n", |
122 | __func__, ep_num, | 122 | __func__, ep_num, |
123 | readl(®->out_endp[ep_num].doepdma), | 123 | readl(®->out_endp[ep_num].doepdma), |
124 | readl(®->out_endp[ep_num].doeptsiz), | 124 | readl(®->out_endp[ep_num].doeptsiz), |
125 | readl(®->out_endp[ep_num].doepctl), | 125 | readl(®->out_endp[ep_num].doepctl), |
126 | buf, pktcnt, length); | 126 | buf, pktcnt, length); |
127 | return 0; | 127 | return 0; |
128 | 128 | ||
129 | } | 129 | } |
130 | 130 | ||
131 | int setdma_tx(struct s3c_ep *ep, struct s3c_request *req) | 131 | int setdma_tx(struct s3c_ep *ep, struct s3c_request *req) |
132 | { | 132 | { |
133 | u32 *buf, ctrl = 0; | 133 | u32 *buf, ctrl = 0; |
134 | u32 length, pktcnt; | 134 | u32 length, pktcnt; |
135 | u32 ep_num = ep_index(ep); | 135 | u32 ep_num = ep_index(ep); |
136 | 136 | ||
137 | buf = req->req.buf + req->req.actual; | 137 | buf = req->req.buf + req->req.actual; |
138 | length = req->req.length - req->req.actual; | 138 | length = req->req.length - req->req.actual; |
139 | 139 | ||
140 | if (ep_num == EP0_CON) | 140 | if (ep_num == EP0_CON) |
141 | length = min(length, (u32)ep_maxpacket(ep)); | 141 | length = min(length, (u32)ep_maxpacket(ep)); |
142 | 142 | ||
143 | ep->len = length; | 143 | ep->len = length; |
144 | ep->dma_buf = buf; | 144 | ep->dma_buf = buf; |
145 | 145 | ||
146 | flush_dcache_range((unsigned long) ep->dma_buf, | 146 | flush_dcache_range((unsigned long) ep->dma_buf, |
147 | (unsigned long) ep->dma_buf + | 147 | (unsigned long) ep->dma_buf + |
148 | ROUND(ep->len, CONFIG_SYS_CACHELINE_SIZE)); | 148 | ROUND(ep->len, CONFIG_SYS_CACHELINE_SIZE)); |
149 | 149 | ||
150 | if (length == 0) | 150 | if (length == 0) |
151 | pktcnt = 1; | 151 | pktcnt = 1; |
152 | else | 152 | else |
153 | pktcnt = (length - 1)/(ep->ep.maxpacket) + 1; | 153 | pktcnt = (length - 1)/(ep->ep.maxpacket) + 1; |
154 | 154 | ||
155 | /* Flush the endpoint's Tx FIFO */ | 155 | /* Flush the endpoint's Tx FIFO */ |
156 | writel(TX_FIFO_NUMBER(ep->fifo_num), ®->grstctl); | 156 | writel(TX_FIFO_NUMBER(ep->fifo_num), ®->grstctl); |
157 | writel(TX_FIFO_NUMBER(ep->fifo_num) | TX_FIFO_FLUSH, ®->grstctl); | 157 | writel(TX_FIFO_NUMBER(ep->fifo_num) | TX_FIFO_FLUSH, ®->grstctl); |
158 | while (readl(®->grstctl) & TX_FIFO_FLUSH) | 158 | while (readl(®->grstctl) & TX_FIFO_FLUSH) |
159 | ; | 159 | ; |
160 | 160 | ||
161 | writel((unsigned long) ep->dma_buf, ®->in_endp[ep_num].diepdma); | 161 | writel((unsigned long) ep->dma_buf, ®->in_endp[ep_num].diepdma); |
162 | writel(DIEPT_SIZ_PKT_CNT(pktcnt) | DIEPT_SIZ_XFER_SIZE(length), | 162 | writel(DIEPT_SIZ_PKT_CNT(pktcnt) | DIEPT_SIZ_XFER_SIZE(length), |
163 | ®->in_endp[ep_num].dieptsiz); | 163 | ®->in_endp[ep_num].dieptsiz); |
164 | 164 | ||
165 | ctrl = readl(®->in_endp[ep_num].diepctl); | 165 | ctrl = readl(®->in_endp[ep_num].diepctl); |
166 | 166 | ||
167 | /* Write the FIFO number to be used for this endpoint */ | 167 | /* Write the FIFO number to be used for this endpoint */ |
168 | ctrl &= DIEPCTL_TX_FIFO_NUM_MASK; | 168 | ctrl &= DIEPCTL_TX_FIFO_NUM_MASK; |
169 | ctrl |= DIEPCTL_TX_FIFO_NUM(ep->fifo_num); | 169 | ctrl |= DIEPCTL_TX_FIFO_NUM(ep->fifo_num); |
170 | 170 | ||
171 | /* Clear reserved (Next EP) bits */ | 171 | /* Clear reserved (Next EP) bits */ |
172 | ctrl = (ctrl&~(EP_MASK<<DEPCTL_NEXT_EP_BIT)); | 172 | ctrl = (ctrl&~(EP_MASK<<DEPCTL_NEXT_EP_BIT)); |
173 | 173 | ||
174 | writel(DEPCTL_EPENA|DEPCTL_CNAK|ctrl, ®->in_endp[ep_num].diepctl); | 174 | writel(DEPCTL_EPENA|DEPCTL_CNAK|ctrl, ®->in_endp[ep_num].diepctl); |
175 | 175 | ||
176 | debug_cond(DEBUG_IN_EP, | 176 | debug_cond(DEBUG_IN_EP, |
177 | "%s:EP%d TX DMA start : DIEPDMA0 = 0x%x," | 177 | "%s:EP%d TX DMA start : DIEPDMA0 = 0x%x," |
178 | "DIEPTSIZ0 = 0x%x, DIEPCTL0 = 0x%x\n" | 178 | "DIEPTSIZ0 = 0x%x, DIEPCTL0 = 0x%x\n" |
179 | "\tbuf = 0x%p, pktcnt = %d, xfersize = %d\n", | 179 | "\tbuf = 0x%p, pktcnt = %d, xfersize = %d\n", |
180 | __func__, ep_num, | 180 | __func__, ep_num, |
181 | readl(®->in_endp[ep_num].diepdma), | 181 | readl(®->in_endp[ep_num].diepdma), |
182 | readl(®->in_endp[ep_num].dieptsiz), | 182 | readl(®->in_endp[ep_num].dieptsiz), |
183 | readl(®->in_endp[ep_num].diepctl), | 183 | readl(®->in_endp[ep_num].diepctl), |
184 | buf, pktcnt, length); | 184 | buf, pktcnt, length); |
185 | 185 | ||
186 | return length; | 186 | return length; |
187 | } | 187 | } |
188 | 188 | ||
189 | static void complete_rx(struct s3c_udc *dev, u8 ep_num) | 189 | static void complete_rx(struct s3c_udc *dev, u8 ep_num) |
190 | { | 190 | { |
191 | struct s3c_ep *ep = &dev->ep[ep_num]; | 191 | struct s3c_ep *ep = &dev->ep[ep_num]; |
192 | struct s3c_request *req = NULL; | 192 | struct s3c_request *req = NULL; |
193 | u32 ep_tsr = 0, xfer_size = 0, is_short = 0; | 193 | u32 ep_tsr = 0, xfer_size = 0, is_short = 0; |
194 | 194 | ||
195 | if (list_empty(&ep->queue)) { | 195 | if (list_empty(&ep->queue)) { |
196 | debug_cond(DEBUG_OUT_EP != 0, | 196 | debug_cond(DEBUG_OUT_EP != 0, |
197 | "%s: RX DMA done : NULL REQ on OUT EP-%d\n", | 197 | "%s: RX DMA done : NULL REQ on OUT EP-%d\n", |
198 | __func__, ep_num); | 198 | __func__, ep_num); |
199 | return; | 199 | return; |
200 | 200 | ||
201 | } | 201 | } |
202 | 202 | ||
203 | req = list_entry(ep->queue.next, struct s3c_request, queue); | 203 | req = list_entry(ep->queue.next, struct s3c_request, queue); |
204 | ep_tsr = readl(®->out_endp[ep_num].doeptsiz); | 204 | ep_tsr = readl(®->out_endp[ep_num].doeptsiz); |
205 | 205 | ||
206 | if (ep_num == EP0_CON) | 206 | if (ep_num == EP0_CON) |
207 | xfer_size = (ep_tsr & DOEPT_SIZ_XFER_SIZE_MAX_EP0); | 207 | xfer_size = (ep_tsr & DOEPT_SIZ_XFER_SIZE_MAX_EP0); |
208 | else | 208 | else |
209 | xfer_size = (ep_tsr & DOEPT_SIZ_XFER_SIZE_MAX_EP); | 209 | xfer_size = (ep_tsr & DOEPT_SIZ_XFER_SIZE_MAX_EP); |
210 | 210 | ||
211 | xfer_size = ep->len - xfer_size; | 211 | xfer_size = ep->len - xfer_size; |
212 | 212 | ||
213 | /* | 213 | /* |
214 | * NOTE: | 214 | * NOTE: |
215 | * | 215 | * |
216 | * Please be careful with proper buffer allocation for USB request, | 216 | * Please be careful with proper buffer allocation for USB request, |
217 | * which needs to be aligned to CONFIG_SYS_CACHELINE_SIZE, not only | 217 | * which needs to be aligned to CONFIG_SYS_CACHELINE_SIZE, not only |
218 | * with starting address, but also its size shall be a cache line | 218 | * with starting address, but also its size shall be a cache line |
219 | * multiplication. | 219 | * multiplication. |
220 | * | 220 | * |
221 | * This will prevent from corruption of data allocated immediatelly | 221 | * This will prevent from corruption of data allocated immediatelly |
222 | * before or after the buffer. | 222 | * before or after the buffer. |
223 | * | 223 | * |
224 | * For armv7, the cache_v7.c provides proper code to emit "ERROR" | 224 | * For armv7, the cache_v7.c provides proper code to emit "ERROR" |
225 | * message to warn users. | 225 | * message to warn users. |
226 | */ | 226 | */ |
227 | invalidate_dcache_range((unsigned long) ep->dma_buf, | 227 | invalidate_dcache_range((unsigned long) ep->dma_buf, |
228 | (unsigned long) ep->dma_buf + | 228 | (unsigned long) ep->dma_buf + |
229 | ROUND(xfer_size, CONFIG_SYS_CACHELINE_SIZE)); | 229 | ROUND(xfer_size, CONFIG_SYS_CACHELINE_SIZE)); |
230 | 230 | ||
231 | req->req.actual += min(xfer_size, req->req.length - req->req.actual); | 231 | req->req.actual += min(xfer_size, req->req.length - req->req.actual); |
232 | is_short = (xfer_size < ep->ep.maxpacket); | 232 | is_short = (xfer_size < ep->ep.maxpacket); |
233 | 233 | ||
234 | debug_cond(DEBUG_OUT_EP != 0, | 234 | debug_cond(DEBUG_OUT_EP != 0, |
235 | "%s: RX DMA done : ep = %d, rx bytes = %d/%d, " | 235 | "%s: RX DMA done : ep = %d, rx bytes = %d/%d, " |
236 | "is_short = %d, DOEPTSIZ = 0x%x, remained bytes = %d\n", | 236 | "is_short = %d, DOEPTSIZ = 0x%x, remained bytes = %d\n", |
237 | __func__, ep_num, req->req.actual, req->req.length, | 237 | __func__, ep_num, req->req.actual, req->req.length, |
238 | is_short, ep_tsr, xfer_size); | 238 | is_short, ep_tsr, xfer_size); |
239 | 239 | ||
240 | if (is_short || req->req.actual == req->req.length) { | 240 | if (is_short || req->req.actual == req->req.length) { |
241 | if (ep_num == EP0_CON && dev->ep0state == DATA_STATE_RECV) { | 241 | if (ep_num == EP0_CON && dev->ep0state == DATA_STATE_RECV) { |
242 | debug_cond(DEBUG_OUT_EP != 0, " => Send ZLP\n"); | 242 | debug_cond(DEBUG_OUT_EP != 0, " => Send ZLP\n"); |
243 | s3c_udc_ep0_zlp(dev); | 243 | s3c_udc_ep0_zlp(dev); |
244 | /* packet will be completed in complete_tx() */ | 244 | /* packet will be completed in complete_tx() */ |
245 | dev->ep0state = WAIT_FOR_IN_COMPLETE; | 245 | dev->ep0state = WAIT_FOR_IN_COMPLETE; |
246 | } else { | 246 | } else { |
247 | done(ep, req, 0); | 247 | done(ep, req, 0); |
248 | 248 | ||
249 | if (!list_empty(&ep->queue)) { | 249 | if (!list_empty(&ep->queue)) { |
250 | req = list_entry(ep->queue.next, | 250 | req = list_entry(ep->queue.next, |
251 | struct s3c_request, queue); | 251 | struct s3c_request, queue); |
252 | debug_cond(DEBUG_OUT_EP != 0, | 252 | debug_cond(DEBUG_OUT_EP != 0, |
253 | "%s: Next Rx request start...\n", | 253 | "%s: Next Rx request start...\n", |
254 | __func__); | 254 | __func__); |
255 | setdma_rx(ep, req); | 255 | setdma_rx(ep, req); |
256 | } | 256 | } |
257 | } | 257 | } |
258 | } else | 258 | } else |
259 | setdma_rx(ep, req); | 259 | setdma_rx(ep, req); |
260 | } | 260 | } |
261 | 261 | ||
262 | static void complete_tx(struct s3c_udc *dev, u8 ep_num) | 262 | static void complete_tx(struct s3c_udc *dev, u8 ep_num) |
263 | { | 263 | { |
264 | struct s3c_ep *ep = &dev->ep[ep_num]; | 264 | struct s3c_ep *ep = &dev->ep[ep_num]; |
265 | struct s3c_request *req; | 265 | struct s3c_request *req; |
266 | u32 ep_tsr = 0, xfer_size = 0, is_short = 0; | 266 | u32 ep_tsr = 0, xfer_size = 0, is_short = 0; |
267 | u32 last; | 267 | u32 last; |
268 | 268 | ||
269 | if (dev->ep0state == WAIT_FOR_NULL_COMPLETE) { | 269 | if (dev->ep0state == WAIT_FOR_NULL_COMPLETE) { |
270 | dev->ep0state = WAIT_FOR_OUT_COMPLETE; | 270 | dev->ep0state = WAIT_FOR_OUT_COMPLETE; |
271 | s3c_ep0_complete_out(); | 271 | s3c_ep0_complete_out(); |
272 | return; | 272 | return; |
273 | } | 273 | } |
274 | 274 | ||
275 | if (list_empty(&ep->queue)) { | 275 | if (list_empty(&ep->queue)) { |
276 | debug_cond(DEBUG_IN_EP, | 276 | debug_cond(DEBUG_IN_EP, |
277 | "%s: TX DMA done : NULL REQ on IN EP-%d\n", | 277 | "%s: TX DMA done : NULL REQ on IN EP-%d\n", |
278 | __func__, ep_num); | 278 | __func__, ep_num); |
279 | return; | 279 | return; |
280 | 280 | ||
281 | } | 281 | } |
282 | 282 | ||
283 | req = list_entry(ep->queue.next, struct s3c_request, queue); | 283 | req = list_entry(ep->queue.next, struct s3c_request, queue); |
284 | 284 | ||
285 | ep_tsr = readl(®->in_endp[ep_num].dieptsiz); | 285 | ep_tsr = readl(®->in_endp[ep_num].dieptsiz); |
286 | 286 | ||
287 | xfer_size = ep->len; | 287 | xfer_size = ep->len; |
288 | is_short = (xfer_size < ep->ep.maxpacket); | 288 | is_short = (xfer_size < ep->ep.maxpacket); |
289 | req->req.actual += min(xfer_size, req->req.length - req->req.actual); | 289 | req->req.actual += min(xfer_size, req->req.length - req->req.actual); |
290 | 290 | ||
291 | debug_cond(DEBUG_IN_EP, | 291 | debug_cond(DEBUG_IN_EP, |
292 | "%s: TX DMA done : ep = %d, tx bytes = %d/%d, " | 292 | "%s: TX DMA done : ep = %d, tx bytes = %d/%d, " |
293 | "is_short = %d, DIEPTSIZ = 0x%x, remained bytes = %d\n", | 293 | "is_short = %d, DIEPTSIZ = 0x%x, remained bytes = %d\n", |
294 | __func__, ep_num, req->req.actual, req->req.length, | 294 | __func__, ep_num, req->req.actual, req->req.length, |
295 | is_short, ep_tsr, xfer_size); | 295 | is_short, ep_tsr, xfer_size); |
296 | 296 | ||
297 | if (ep_num == 0) { | 297 | if (ep_num == 0) { |
298 | if (dev->ep0state == DATA_STATE_XMIT) { | 298 | if (dev->ep0state == DATA_STATE_XMIT) { |
299 | debug_cond(DEBUG_IN_EP, | 299 | debug_cond(DEBUG_IN_EP, |
300 | "%s: ep_num = %d, ep0stat ==" | 300 | "%s: ep_num = %d, ep0stat ==" |
301 | "DATA_STATE_XMIT\n", | 301 | "DATA_STATE_XMIT\n", |
302 | __func__, ep_num); | 302 | __func__, ep_num); |
303 | last = write_fifo_ep0(ep, req); | 303 | last = write_fifo_ep0(ep, req); |
304 | if (last) | 304 | if (last) |
305 | dev->ep0state = WAIT_FOR_COMPLETE; | 305 | dev->ep0state = WAIT_FOR_COMPLETE; |
306 | } else if (dev->ep0state == WAIT_FOR_IN_COMPLETE) { | 306 | } else if (dev->ep0state == WAIT_FOR_IN_COMPLETE) { |
307 | debug_cond(DEBUG_IN_EP, | 307 | debug_cond(DEBUG_IN_EP, |
308 | "%s: ep_num = %d, completing request\n", | 308 | "%s: ep_num = %d, completing request\n", |
309 | __func__, ep_num); | 309 | __func__, ep_num); |
310 | done(ep, req, 0); | 310 | done(ep, req, 0); |
311 | dev->ep0state = WAIT_FOR_SETUP; | 311 | dev->ep0state = WAIT_FOR_SETUP; |
312 | } else if (dev->ep0state == WAIT_FOR_COMPLETE) { | 312 | } else if (dev->ep0state == WAIT_FOR_COMPLETE) { |
313 | debug_cond(DEBUG_IN_EP, | 313 | debug_cond(DEBUG_IN_EP, |
314 | "%s: ep_num = %d, completing request\n", | 314 | "%s: ep_num = %d, completing request\n", |
315 | __func__, ep_num); | 315 | __func__, ep_num); |
316 | done(ep, req, 0); | 316 | done(ep, req, 0); |
317 | dev->ep0state = WAIT_FOR_OUT_COMPLETE; | 317 | dev->ep0state = WAIT_FOR_OUT_COMPLETE; |
318 | s3c_ep0_complete_out(); | 318 | s3c_ep0_complete_out(); |
319 | } else { | 319 | } else { |
320 | debug_cond(DEBUG_IN_EP, | 320 | debug_cond(DEBUG_IN_EP, |
321 | "%s: ep_num = %d, invalid ep state\n", | 321 | "%s: ep_num = %d, invalid ep state\n", |
322 | __func__, ep_num); | 322 | __func__, ep_num); |
323 | } | 323 | } |
324 | return; | 324 | return; |
325 | } | 325 | } |
326 | 326 | ||
327 | if (req->req.actual == req->req.length) | 327 | if (req->req.actual == req->req.length) |
328 | done(ep, req, 0); | 328 | done(ep, req, 0); |
329 | 329 | ||
330 | if (!list_empty(&ep->queue)) { | 330 | if (!list_empty(&ep->queue)) { |
331 | req = list_entry(ep->queue.next, struct s3c_request, queue); | 331 | req = list_entry(ep->queue.next, struct s3c_request, queue); |
332 | debug_cond(DEBUG_IN_EP, | 332 | debug_cond(DEBUG_IN_EP, |
333 | "%s: Next Tx request start...\n", __func__); | 333 | "%s: Next Tx request start...\n", __func__); |
334 | setdma_tx(ep, req); | 334 | setdma_tx(ep, req); |
335 | } | 335 | } |
336 | } | 336 | } |
337 | 337 | ||
338 | static inline void s3c_udc_check_tx_queue(struct s3c_udc *dev, u8 ep_num) | 338 | static inline void s3c_udc_check_tx_queue(struct s3c_udc *dev, u8 ep_num) |
339 | { | 339 | { |
340 | struct s3c_ep *ep = &dev->ep[ep_num]; | 340 | struct s3c_ep *ep = &dev->ep[ep_num]; |
341 | struct s3c_request *req; | 341 | struct s3c_request *req; |
342 | 342 | ||
343 | debug_cond(DEBUG_IN_EP, | 343 | debug_cond(DEBUG_IN_EP, |
344 | "%s: Check queue, ep_num = %d\n", __func__, ep_num); | 344 | "%s: Check queue, ep_num = %d\n", __func__, ep_num); |
345 | 345 | ||
346 | if (!list_empty(&ep->queue)) { | 346 | if (!list_empty(&ep->queue)) { |
347 | req = list_entry(ep->queue.next, struct s3c_request, queue); | 347 | req = list_entry(ep->queue.next, struct s3c_request, queue); |
348 | debug_cond(DEBUG_IN_EP, | 348 | debug_cond(DEBUG_IN_EP, |
349 | "%s: Next Tx request(0x%p) start...\n", | 349 | "%s: Next Tx request(0x%p) start...\n", |
350 | __func__, req); | 350 | __func__, req); |
351 | 351 | ||
352 | if (ep_is_in(ep)) | 352 | if (ep_is_in(ep)) |
353 | setdma_tx(ep, req); | 353 | setdma_tx(ep, req); |
354 | else | 354 | else |
355 | setdma_rx(ep, req); | 355 | setdma_rx(ep, req); |
356 | } else { | 356 | } else { |
357 | debug_cond(DEBUG_IN_EP, | 357 | debug_cond(DEBUG_IN_EP, |
358 | "%s: NULL REQ on IN EP-%d\n", __func__, ep_num); | 358 | "%s: NULL REQ on IN EP-%d\n", __func__, ep_num); |
359 | 359 | ||
360 | return; | 360 | return; |
361 | } | 361 | } |
362 | 362 | ||
363 | } | 363 | } |
364 | 364 | ||
365 | static void process_ep_in_intr(struct s3c_udc *dev) | 365 | static void process_ep_in_intr(struct s3c_udc *dev) |
366 | { | 366 | { |
367 | u32 ep_intr, ep_intr_status; | 367 | u32 ep_intr, ep_intr_status; |
368 | u8 ep_num = 0; | 368 | u8 ep_num = 0; |
369 | 369 | ||
370 | ep_intr = readl(®->daint); | 370 | ep_intr = readl(®->daint); |
371 | debug_cond(DEBUG_IN_EP, | 371 | debug_cond(DEBUG_IN_EP, |
372 | "*** %s: EP In interrupt : DAINT = 0x%x\n", __func__, ep_intr); | 372 | "*** %s: EP In interrupt : DAINT = 0x%x\n", __func__, ep_intr); |
373 | 373 | ||
374 | ep_intr &= DAINT_MASK; | 374 | ep_intr &= DAINT_MASK; |
375 | 375 | ||
376 | while (ep_intr) { | 376 | while (ep_intr) { |
377 | if (ep_intr & DAINT_IN_EP_INT(1)) { | 377 | if (ep_intr & DAINT_IN_EP_INT(1)) { |
378 | ep_intr_status = readl(®->in_endp[ep_num].diepint); | 378 | ep_intr_status = readl(®->in_endp[ep_num].diepint); |
379 | debug_cond(DEBUG_IN_EP, | 379 | debug_cond(DEBUG_IN_EP, |
380 | "\tEP%d-IN : DIEPINT = 0x%x\n", | 380 | "\tEP%d-IN : DIEPINT = 0x%x\n", |
381 | ep_num, ep_intr_status); | 381 | ep_num, ep_intr_status); |
382 | 382 | ||
383 | /* Interrupt Clear */ | 383 | /* Interrupt Clear */ |
384 | writel(ep_intr_status, ®->in_endp[ep_num].diepint); | 384 | writel(ep_intr_status, ®->in_endp[ep_num].diepint); |
385 | 385 | ||
386 | if (ep_intr_status & TRANSFER_DONE) { | 386 | if (ep_intr_status & TRANSFER_DONE) { |
387 | complete_tx(dev, ep_num); | 387 | complete_tx(dev, ep_num); |
388 | 388 | ||
389 | if (ep_num == 0) { | 389 | if (ep_num == 0) { |
390 | if (dev->ep0state == | 390 | if (dev->ep0state == |
391 | WAIT_FOR_IN_COMPLETE) | 391 | WAIT_FOR_IN_COMPLETE) |
392 | dev->ep0state = WAIT_FOR_SETUP; | 392 | dev->ep0state = WAIT_FOR_SETUP; |
393 | 393 | ||
394 | if (dev->ep0state == WAIT_FOR_SETUP) | 394 | if (dev->ep0state == WAIT_FOR_SETUP) |
395 | s3c_udc_pre_setup(); | 395 | s3c_udc_pre_setup(); |
396 | 396 | ||
397 | /* continue transfer after | 397 | /* continue transfer after |
398 | set_clear_halt for DMA mode */ | 398 | set_clear_halt for DMA mode */ |
399 | if (clear_feature_flag == 1) { | 399 | if (clear_feature_flag == 1) { |
400 | s3c_udc_check_tx_queue(dev, | 400 | s3c_udc_check_tx_queue(dev, |
401 | clear_feature_num); | 401 | clear_feature_num); |
402 | clear_feature_flag = 0; | 402 | clear_feature_flag = 0; |
403 | } | 403 | } |
404 | } | 404 | } |
405 | } | 405 | } |
406 | } | 406 | } |
407 | ep_num++; | 407 | ep_num++; |
408 | ep_intr >>= 1; | 408 | ep_intr >>= 1; |
409 | } | 409 | } |
410 | } | 410 | } |
411 | 411 | ||
412 | static void process_ep_out_intr(struct s3c_udc *dev) | 412 | static void process_ep_out_intr(struct s3c_udc *dev) |
413 | { | 413 | { |
414 | u32 ep_intr, ep_intr_status; | 414 | u32 ep_intr, ep_intr_status; |
415 | u8 ep_num = 0; | 415 | u8 ep_num = 0; |
416 | 416 | ||
417 | ep_intr = readl(®->daint); | 417 | ep_intr = readl(®->daint); |
418 | debug_cond(DEBUG_OUT_EP != 0, | 418 | debug_cond(DEBUG_OUT_EP != 0, |
419 | "*** %s: EP OUT interrupt : DAINT = 0x%x\n", | 419 | "*** %s: EP OUT interrupt : DAINT = 0x%x\n", |
420 | __func__, ep_intr); | 420 | __func__, ep_intr); |
421 | 421 | ||
422 | ep_intr = (ep_intr >> DAINT_OUT_BIT) & DAINT_MASK; | 422 | ep_intr = (ep_intr >> DAINT_OUT_BIT) & DAINT_MASK; |
423 | 423 | ||
424 | while (ep_intr) { | 424 | while (ep_intr) { |
425 | if (ep_intr & 0x1) { | 425 | if (ep_intr & 0x1) { |
426 | ep_intr_status = readl(®->out_endp[ep_num].doepint); | 426 | ep_intr_status = readl(®->out_endp[ep_num].doepint); |
427 | debug_cond(DEBUG_OUT_EP != 0, | 427 | debug_cond(DEBUG_OUT_EP != 0, |
428 | "\tEP%d-OUT : DOEPINT = 0x%x\n", | 428 | "\tEP%d-OUT : DOEPINT = 0x%x\n", |
429 | ep_num, ep_intr_status); | 429 | ep_num, ep_intr_status); |
430 | 430 | ||
431 | /* Interrupt Clear */ | 431 | /* Interrupt Clear */ |
432 | writel(ep_intr_status, ®->out_endp[ep_num].doepint); | 432 | writel(ep_intr_status, ®->out_endp[ep_num].doepint); |
433 | 433 | ||
434 | if (ep_num == 0) { | 434 | if (ep_num == 0) { |
435 | if (ep_intr_status & TRANSFER_DONE) { | 435 | if (ep_intr_status & TRANSFER_DONE) { |
436 | if (dev->ep0state != | 436 | if (dev->ep0state != |
437 | WAIT_FOR_OUT_COMPLETE) | 437 | WAIT_FOR_OUT_COMPLETE) |
438 | complete_rx(dev, ep_num); | 438 | complete_rx(dev, ep_num); |
439 | else { | 439 | else { |
440 | dev->ep0state = WAIT_FOR_SETUP; | 440 | dev->ep0state = WAIT_FOR_SETUP; |
441 | s3c_udc_pre_setup(); | 441 | s3c_udc_pre_setup(); |
442 | } | 442 | } |
443 | } | 443 | } |
444 | 444 | ||
445 | if (ep_intr_status & | 445 | if (ep_intr_status & |
446 | CTRL_OUT_EP_SETUP_PHASE_DONE) { | 446 | CTRL_OUT_EP_SETUP_PHASE_DONE) { |
447 | debug_cond(DEBUG_OUT_EP != 0, | 447 | debug_cond(DEBUG_OUT_EP != 0, |
448 | "SETUP packet arrived\n"); | 448 | "SETUP packet arrived\n"); |
449 | s3c_handle_ep0(dev); | 449 | s3c_handle_ep0(dev); |
450 | } | 450 | } |
451 | } else { | 451 | } else { |
452 | if (ep_intr_status & TRANSFER_DONE) | 452 | if (ep_intr_status & TRANSFER_DONE) |
453 | complete_rx(dev, ep_num); | 453 | complete_rx(dev, ep_num); |
454 | } | 454 | } |
455 | } | 455 | } |
456 | ep_num++; | 456 | ep_num++; |
457 | ep_intr >>= 1; | 457 | ep_intr >>= 1; |
458 | } | 458 | } |
459 | } | 459 | } |
460 | 460 | ||
461 | /* | 461 | /* |
462 | * usb client interrupt handler. | 462 | * usb client interrupt handler. |
463 | */ | 463 | */ |
464 | static int s3c_udc_irq(int irq, void *_dev) | 464 | static int s3c_udc_irq(int irq, void *_dev) |
465 | { | 465 | { |
466 | struct s3c_udc *dev = _dev; | 466 | struct s3c_udc *dev = _dev; |
467 | u32 intr_status; | 467 | u32 intr_status; |
468 | u32 usb_status, gintmsk; | 468 | u32 usb_status, gintmsk; |
469 | unsigned long flags; | 469 | unsigned long flags = 0; |
470 | 470 | ||
471 | spin_lock_irqsave(&dev->lock, flags); | 471 | spin_lock_irqsave(&dev->lock, flags); |
472 | 472 | ||
473 | intr_status = readl(®->gintsts); | 473 | intr_status = readl(®->gintsts); |
474 | gintmsk = readl(®->gintmsk); | 474 | gintmsk = readl(®->gintmsk); |
475 | 475 | ||
476 | debug_cond(DEBUG_ISR, | 476 | debug_cond(DEBUG_ISR, |
477 | "\n*** %s : GINTSTS=0x%x(on state %s), GINTMSK : 0x%x," | 477 | "\n*** %s : GINTSTS=0x%x(on state %s), GINTMSK : 0x%x," |
478 | "DAINT : 0x%x, DAINTMSK : 0x%x\n", | 478 | "DAINT : 0x%x, DAINTMSK : 0x%x\n", |
479 | __func__, intr_status, state_names[dev->ep0state], gintmsk, | 479 | __func__, intr_status, state_names[dev->ep0state], gintmsk, |
480 | readl(®->daint), readl(®->daintmsk)); | 480 | readl(®->daint), readl(®->daintmsk)); |
481 | 481 | ||
482 | if (!intr_status) { | 482 | if (!intr_status) { |
483 | spin_unlock_irqrestore(&dev->lock, flags); | 483 | spin_unlock_irqrestore(&dev->lock, flags); |
484 | return IRQ_HANDLED; | 484 | return IRQ_HANDLED; |
485 | } | 485 | } |
486 | 486 | ||
487 | if (intr_status & INT_ENUMDONE) { | 487 | if (intr_status & INT_ENUMDONE) { |
488 | debug_cond(DEBUG_ISR, "\tSpeed Detection interrupt\n"); | 488 | debug_cond(DEBUG_ISR, "\tSpeed Detection interrupt\n"); |
489 | 489 | ||
490 | writel(INT_ENUMDONE, ®->gintsts); | 490 | writel(INT_ENUMDONE, ®->gintsts); |
491 | usb_status = (readl(®->dsts) & 0x6); | 491 | usb_status = (readl(®->dsts) & 0x6); |
492 | 492 | ||
493 | if (usb_status & (USB_FULL_30_60MHZ | USB_FULL_48MHZ)) { | 493 | if (usb_status & (USB_FULL_30_60MHZ | USB_FULL_48MHZ)) { |
494 | debug_cond(DEBUG_ISR, | 494 | debug_cond(DEBUG_ISR, |
495 | "\t\tFull Speed Detection\n"); | 495 | "\t\tFull Speed Detection\n"); |
496 | set_max_pktsize(dev, USB_SPEED_FULL); | 496 | set_max_pktsize(dev, USB_SPEED_FULL); |
497 | 497 | ||
498 | } else { | 498 | } else { |
499 | debug_cond(DEBUG_ISR, | 499 | debug_cond(DEBUG_ISR, |
500 | "\t\tHigh Speed Detection : 0x%x\n", | 500 | "\t\tHigh Speed Detection : 0x%x\n", |
501 | usb_status); | 501 | usb_status); |
502 | set_max_pktsize(dev, USB_SPEED_HIGH); | 502 | set_max_pktsize(dev, USB_SPEED_HIGH); |
503 | } | 503 | } |
504 | } | 504 | } |
505 | 505 | ||
506 | if (intr_status & INT_EARLY_SUSPEND) { | 506 | if (intr_status & INT_EARLY_SUSPEND) { |
507 | debug_cond(DEBUG_ISR, "\tEarly suspend interrupt\n"); | 507 | debug_cond(DEBUG_ISR, "\tEarly suspend interrupt\n"); |
508 | writel(INT_EARLY_SUSPEND, ®->gintsts); | 508 | writel(INT_EARLY_SUSPEND, ®->gintsts); |
509 | } | 509 | } |
510 | 510 | ||
511 | if (intr_status & INT_SUSPEND) { | 511 | if (intr_status & INT_SUSPEND) { |
512 | usb_status = readl(®->dsts); | 512 | usb_status = readl(®->dsts); |
513 | debug_cond(DEBUG_ISR, | 513 | debug_cond(DEBUG_ISR, |
514 | "\tSuspend interrupt :(DSTS):0x%x\n", usb_status); | 514 | "\tSuspend interrupt :(DSTS):0x%x\n", usb_status); |
515 | writel(INT_SUSPEND, ®->gintsts); | 515 | writel(INT_SUSPEND, ®->gintsts); |
516 | 516 | ||
517 | if (dev->gadget.speed != USB_SPEED_UNKNOWN | 517 | if (dev->gadget.speed != USB_SPEED_UNKNOWN |
518 | && dev->driver) { | 518 | && dev->driver) { |
519 | if (dev->driver->suspend) | 519 | if (dev->driver->suspend) |
520 | dev->driver->suspend(&dev->gadget); | 520 | dev->driver->suspend(&dev->gadget); |
521 | 521 | ||
522 | /* HACK to let gadget detect disconnected state */ | 522 | /* HACK to let gadget detect disconnected state */ |
523 | if (dev->driver->disconnect) { | 523 | if (dev->driver->disconnect) { |
524 | spin_unlock_irqrestore(&dev->lock, flags); | 524 | spin_unlock_irqrestore(&dev->lock, flags); |
525 | dev->driver->disconnect(&dev->gadget); | 525 | dev->driver->disconnect(&dev->gadget); |
526 | spin_lock_irqsave(&dev->lock, flags); | 526 | spin_lock_irqsave(&dev->lock, flags); |
527 | } | 527 | } |
528 | } | 528 | } |
529 | } | 529 | } |
530 | 530 | ||
531 | if (intr_status & INT_RESUME) { | 531 | if (intr_status & INT_RESUME) { |
532 | debug_cond(DEBUG_ISR, "\tResume interrupt\n"); | 532 | debug_cond(DEBUG_ISR, "\tResume interrupt\n"); |
533 | writel(INT_RESUME, ®->gintsts); | 533 | writel(INT_RESUME, ®->gintsts); |
534 | 534 | ||
535 | if (dev->gadget.speed != USB_SPEED_UNKNOWN | 535 | if (dev->gadget.speed != USB_SPEED_UNKNOWN |
536 | && dev->driver | 536 | && dev->driver |
537 | && dev->driver->resume) { | 537 | && dev->driver->resume) { |
538 | 538 | ||
539 | dev->driver->resume(&dev->gadget); | 539 | dev->driver->resume(&dev->gadget); |
540 | } | 540 | } |
541 | } | 541 | } |
542 | 542 | ||
543 | if (intr_status & INT_RESET) { | 543 | if (intr_status & INT_RESET) { |
544 | usb_status = readl(®->gotgctl); | 544 | usb_status = readl(®->gotgctl); |
545 | debug_cond(DEBUG_ISR, | 545 | debug_cond(DEBUG_ISR, |
546 | "\tReset interrupt - (GOTGCTL):0x%x\n", usb_status); | 546 | "\tReset interrupt - (GOTGCTL):0x%x\n", usb_status); |
547 | writel(INT_RESET, ®->gintsts); | 547 | writel(INT_RESET, ®->gintsts); |
548 | 548 | ||
549 | if ((usb_status & 0xc0000) == (0x3 << 18)) { | 549 | if ((usb_status & 0xc0000) == (0x3 << 18)) { |
550 | if (reset_available) { | 550 | if (reset_available) { |
551 | debug_cond(DEBUG_ISR, | 551 | debug_cond(DEBUG_ISR, |
552 | "\t\tOTG core got reset (%d)!!\n", | 552 | "\t\tOTG core got reset (%d)!!\n", |
553 | reset_available); | 553 | reset_available); |
554 | reconfig_usbd(); | 554 | reconfig_usbd(); |
555 | dev->ep0state = WAIT_FOR_SETUP; | 555 | dev->ep0state = WAIT_FOR_SETUP; |
556 | reset_available = 0; | 556 | reset_available = 0; |
557 | s3c_udc_pre_setup(); | 557 | s3c_udc_pre_setup(); |
558 | } else | 558 | } else |
559 | reset_available = 1; | 559 | reset_available = 1; |
560 | 560 | ||
561 | } else { | 561 | } else { |
562 | reset_available = 1; | 562 | reset_available = 1; |
563 | debug_cond(DEBUG_ISR, | 563 | debug_cond(DEBUG_ISR, |
564 | "\t\tRESET handling skipped\n"); | 564 | "\t\tRESET handling skipped\n"); |
565 | } | 565 | } |
566 | } | 566 | } |
567 | 567 | ||
568 | if (intr_status & INT_IN_EP) | 568 | if (intr_status & INT_IN_EP) |
569 | process_ep_in_intr(dev); | 569 | process_ep_in_intr(dev); |
570 | 570 | ||
571 | if (intr_status & INT_OUT_EP) | 571 | if (intr_status & INT_OUT_EP) |
572 | process_ep_out_intr(dev); | 572 | process_ep_out_intr(dev); |
573 | 573 | ||
574 | spin_unlock_irqrestore(&dev->lock, flags); | 574 | spin_unlock_irqrestore(&dev->lock, flags); |
575 | 575 | ||
576 | return IRQ_HANDLED; | 576 | return IRQ_HANDLED; |
577 | } | 577 | } |
578 | 578 | ||
579 | /** Queue one request | 579 | /** Queue one request |
580 | * Kickstart transfer if needed | 580 | * Kickstart transfer if needed |
581 | */ | 581 | */ |
582 | static int s3c_queue(struct usb_ep *_ep, struct usb_request *_req, | 582 | static int s3c_queue(struct usb_ep *_ep, struct usb_request *_req, |
583 | gfp_t gfp_flags) | 583 | gfp_t gfp_flags) |
584 | { | 584 | { |
585 | struct s3c_request *req; | 585 | struct s3c_request *req; |
586 | struct s3c_ep *ep; | 586 | struct s3c_ep *ep; |
587 | struct s3c_udc *dev; | 587 | struct s3c_udc *dev; |
588 | unsigned long flags; | 588 | unsigned long flags = 0; |
589 | u32 ep_num, gintsts; | 589 | u32 ep_num, gintsts; |
590 | 590 | ||
591 | req = container_of(_req, struct s3c_request, req); | 591 | req = container_of(_req, struct s3c_request, req); |
592 | if (unlikely(!_req || !_req->complete || !_req->buf | 592 | if (unlikely(!_req || !_req->complete || !_req->buf |
593 | || !list_empty(&req->queue))) { | 593 | || !list_empty(&req->queue))) { |
594 | 594 | ||
595 | debug("%s: bad params\n", __func__); | 595 | debug("%s: bad params\n", __func__); |
596 | return -EINVAL; | 596 | return -EINVAL; |
597 | } | 597 | } |
598 | 598 | ||
599 | ep = container_of(_ep, struct s3c_ep, ep); | 599 | ep = container_of(_ep, struct s3c_ep, ep); |
600 | 600 | ||
601 | if (unlikely(!_ep || (!ep->desc && ep->ep.name != ep0name))) { | 601 | if (unlikely(!_ep || (!ep->desc && ep->ep.name != ep0name))) { |
602 | 602 | ||
603 | debug("%s: bad ep: %s, %d, %p\n", __func__, | 603 | debug("%s: bad ep: %s, %d, %p\n", __func__, |
604 | ep->ep.name, !ep->desc, _ep); | 604 | ep->ep.name, !ep->desc, _ep); |
605 | return -EINVAL; | 605 | return -EINVAL; |
606 | } | 606 | } |
607 | 607 | ||
608 | ep_num = ep_index(ep); | 608 | ep_num = ep_index(ep); |
609 | dev = ep->dev; | 609 | dev = ep->dev; |
610 | if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) { | 610 | if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) { |
611 | 611 | ||
612 | debug("%s: bogus device state %p\n", __func__, dev->driver); | 612 | debug("%s: bogus device state %p\n", __func__, dev->driver); |
613 | return -ESHUTDOWN; | 613 | return -ESHUTDOWN; |
614 | } | 614 | } |
615 | 615 | ||
616 | spin_lock_irqsave(&dev->lock, flags); | 616 | spin_lock_irqsave(&dev->lock, flags); |
617 | 617 | ||
618 | _req->status = -EINPROGRESS; | 618 | _req->status = -EINPROGRESS; |
619 | _req->actual = 0; | 619 | _req->actual = 0; |
620 | 620 | ||
621 | /* kickstart this i/o queue? */ | 621 | /* kickstart this i/o queue? */ |
622 | debug("\n*** %s: %s-%s req = %p, len = %d, buf = %p" | 622 | debug("\n*** %s: %s-%s req = %p, len = %d, buf = %p" |
623 | "Q empty = %d, stopped = %d\n", | 623 | "Q empty = %d, stopped = %d\n", |
624 | __func__, _ep->name, ep_is_in(ep) ? "in" : "out", | 624 | __func__, _ep->name, ep_is_in(ep) ? "in" : "out", |
625 | _req, _req->length, _req->buf, | 625 | _req, _req->length, _req->buf, |
626 | list_empty(&ep->queue), ep->stopped); | 626 | list_empty(&ep->queue), ep->stopped); |
627 | 627 | ||
628 | #ifdef DEBUG | 628 | #ifdef DEBUG |
629 | { | 629 | { |
630 | int i, len = _req->length; | 630 | int i, len = _req->length; |
631 | 631 | ||
632 | printf("pkt = "); | 632 | printf("pkt = "); |
633 | if (len > 64) | 633 | if (len > 64) |
634 | len = 64; | 634 | len = 64; |
635 | for (i = 0; i < len; i++) { | 635 | for (i = 0; i < len; i++) { |
636 | printf("%02x", ((u8 *)_req->buf)[i]); | 636 | printf("%02x", ((u8 *)_req->buf)[i]); |
637 | if ((i & 7) == 7) | 637 | if ((i & 7) == 7) |
638 | printf(" "); | 638 | printf(" "); |
639 | } | 639 | } |
640 | printf("\n"); | 640 | printf("\n"); |
641 | } | 641 | } |
642 | #endif | 642 | #endif |
643 | 643 | ||
644 | if (list_empty(&ep->queue) && !ep->stopped) { | 644 | if (list_empty(&ep->queue) && !ep->stopped) { |
645 | 645 | ||
646 | if (ep_num == 0) { | 646 | if (ep_num == 0) { |
647 | /* EP0 */ | 647 | /* EP0 */ |
648 | list_add_tail(&req->queue, &ep->queue); | 648 | list_add_tail(&req->queue, &ep->queue); |
649 | s3c_ep0_kick(dev, ep); | 649 | s3c_ep0_kick(dev, ep); |
650 | req = 0; | 650 | req = 0; |
651 | 651 | ||
652 | } else if (ep_is_in(ep)) { | 652 | } else if (ep_is_in(ep)) { |
653 | gintsts = readl(®->gintsts); | 653 | gintsts = readl(®->gintsts); |
654 | debug_cond(DEBUG_IN_EP, | 654 | debug_cond(DEBUG_IN_EP, |
655 | "%s: ep_is_in, S3C_UDC_OTG_GINTSTS=0x%x\n", | 655 | "%s: ep_is_in, S3C_UDC_OTG_GINTSTS=0x%x\n", |
656 | __func__, gintsts); | 656 | __func__, gintsts); |
657 | 657 | ||
658 | setdma_tx(ep, req); | 658 | setdma_tx(ep, req); |
659 | } else { | 659 | } else { |
660 | gintsts = readl(®->gintsts); | 660 | gintsts = readl(®->gintsts); |
661 | debug_cond(DEBUG_OUT_EP != 0, | 661 | debug_cond(DEBUG_OUT_EP != 0, |
662 | "%s:ep_is_out, S3C_UDC_OTG_GINTSTS=0x%x\n", | 662 | "%s:ep_is_out, S3C_UDC_OTG_GINTSTS=0x%x\n", |
663 | __func__, gintsts); | 663 | __func__, gintsts); |
664 | 664 | ||
665 | setdma_rx(ep, req); | 665 | setdma_rx(ep, req); |
666 | } | 666 | } |
667 | } | 667 | } |
668 | 668 | ||
669 | /* pio or dma irq handler advances the queue. */ | 669 | /* pio or dma irq handler advances the queue. */ |
670 | if (likely(req != 0)) | 670 | if (likely(req != 0)) |
671 | list_add_tail(&req->queue, &ep->queue); | 671 | list_add_tail(&req->queue, &ep->queue); |
672 | 672 | ||
673 | spin_unlock_irqrestore(&dev->lock, flags); | 673 | spin_unlock_irqrestore(&dev->lock, flags); |
674 | 674 | ||
675 | return 0; | 675 | return 0; |
676 | } | 676 | } |
677 | 677 | ||
678 | /****************************************************************/ | 678 | /****************************************************************/ |
679 | /* End Point 0 related functions */ | 679 | /* End Point 0 related functions */ |
680 | /****************************************************************/ | 680 | /****************************************************************/ |
681 | 681 | ||
682 | /* return: 0 = still running, 1 = completed, negative = errno */ | 682 | /* return: 0 = still running, 1 = completed, negative = errno */ |
683 | static int write_fifo_ep0(struct s3c_ep *ep, struct s3c_request *req) | 683 | static int write_fifo_ep0(struct s3c_ep *ep, struct s3c_request *req) |
684 | { | 684 | { |
685 | u32 max; | 685 | u32 max; |
686 | unsigned count; | 686 | unsigned count; |
687 | int is_last; | 687 | int is_last; |
688 | 688 | ||
689 | max = ep_maxpacket(ep); | 689 | max = ep_maxpacket(ep); |
690 | 690 | ||
691 | debug_cond(DEBUG_EP0 != 0, "%s: max = %d\n", __func__, max); | 691 | debug_cond(DEBUG_EP0 != 0, "%s: max = %d\n", __func__, max); |
692 | 692 | ||
693 | count = setdma_tx(ep, req); | 693 | count = setdma_tx(ep, req); |
694 | 694 | ||
695 | /* last packet is usually short (or a zlp) */ | 695 | /* last packet is usually short (or a zlp) */ |
696 | if (likely(count != max)) | 696 | if (likely(count != max)) |
697 | is_last = 1; | 697 | is_last = 1; |
698 | else { | 698 | else { |
699 | if (likely(req->req.length != req->req.actual + count) | 699 | if (likely(req->req.length != req->req.actual + count) |
700 | || req->req.zero) | 700 | || req->req.zero) |
701 | is_last = 0; | 701 | is_last = 0; |
702 | else | 702 | else |
703 | is_last = 1; | 703 | is_last = 1; |
704 | } | 704 | } |
705 | 705 | ||
706 | debug_cond(DEBUG_EP0 != 0, | 706 | debug_cond(DEBUG_EP0 != 0, |
707 | "%s: wrote %s %d bytes%s %d left %p\n", __func__, | 707 | "%s: wrote %s %d bytes%s %d left %p\n", __func__, |
708 | ep->ep.name, count, | 708 | ep->ep.name, count, |
709 | is_last ? "/L" : "", | 709 | is_last ? "/L" : "", |
710 | req->req.length - req->req.actual - count, req); | 710 | req->req.length - req->req.actual - count, req); |
711 | 711 | ||
712 | /* requests complete when all IN data is in the FIFO */ | 712 | /* requests complete when all IN data is in the FIFO */ |
713 | if (is_last) { | 713 | if (is_last) { |
714 | ep->dev->ep0state = WAIT_FOR_SETUP; | 714 | ep->dev->ep0state = WAIT_FOR_SETUP; |
715 | return 1; | 715 | return 1; |
716 | } | 716 | } |
717 | 717 | ||
718 | return 0; | 718 | return 0; |
719 | } | 719 | } |
720 | 720 | ||
721 | int s3c_fifo_read(struct s3c_ep *ep, u32 *cp, int max) | 721 | int s3c_fifo_read(struct s3c_ep *ep, u32 *cp, int max) |
722 | { | 722 | { |
723 | invalidate_dcache_range((unsigned long)cp, (unsigned long)cp + | 723 | invalidate_dcache_range((unsigned long)cp, (unsigned long)cp + |
724 | ROUND(max, CONFIG_SYS_CACHELINE_SIZE)); | 724 | ROUND(max, CONFIG_SYS_CACHELINE_SIZE)); |
725 | 725 | ||
726 | debug_cond(DEBUG_EP0 != 0, | 726 | debug_cond(DEBUG_EP0 != 0, |
727 | "%s: bytes=%d, ep_index=%d 0x%p\n", __func__, | 727 | "%s: bytes=%d, ep_index=%d 0x%p\n", __func__, |
728 | max, ep_index(ep), cp); | 728 | max, ep_index(ep), cp); |
729 | 729 | ||
730 | return max; | 730 | return max; |
731 | } | 731 | } |
732 | 732 | ||
733 | /** | 733 | /** |
734 | * udc_set_address - set the USB address for this device | 734 | * udc_set_address - set the USB address for this device |
735 | * @address: | 735 | * @address: |
736 | * | 736 | * |
737 | * Called from control endpoint function | 737 | * Called from control endpoint function |
738 | * after it decodes a set address setup packet. | 738 | * after it decodes a set address setup packet. |
739 | */ | 739 | */ |
740 | static void udc_set_address(struct s3c_udc *dev, unsigned char address) | 740 | static void udc_set_address(struct s3c_udc *dev, unsigned char address) |
741 | { | 741 | { |
742 | u32 ctrl = readl(®->dcfg); | 742 | u32 ctrl = readl(®->dcfg); |
743 | writel(DEVICE_ADDRESS(address) | ctrl, ®->dcfg); | 743 | writel(DEVICE_ADDRESS(address) | ctrl, ®->dcfg); |
744 | 744 | ||
745 | s3c_udc_ep0_zlp(dev); | 745 | s3c_udc_ep0_zlp(dev); |
746 | 746 | ||
747 | debug_cond(DEBUG_EP0 != 0, | 747 | debug_cond(DEBUG_EP0 != 0, |
748 | "%s: USB OTG 2.0 Device address=%d, DCFG=0x%x\n", | 748 | "%s: USB OTG 2.0 Device address=%d, DCFG=0x%x\n", |
749 | __func__, address, readl(®->dcfg)); | 749 | __func__, address, readl(®->dcfg)); |
750 | 750 | ||
751 | dev->usb_address = address; | 751 | dev->usb_address = address; |
752 | } | 752 | } |
753 | 753 | ||
754 | static inline void s3c_udc_ep0_set_stall(struct s3c_ep *ep) | 754 | static inline void s3c_udc_ep0_set_stall(struct s3c_ep *ep) |
755 | { | 755 | { |
756 | struct s3c_udc *dev; | 756 | struct s3c_udc *dev; |
757 | u32 ep_ctrl = 0; | 757 | u32 ep_ctrl = 0; |
758 | 758 | ||
759 | dev = ep->dev; | 759 | dev = ep->dev; |
760 | ep_ctrl = readl(®->in_endp[EP0_CON].diepctl); | 760 | ep_ctrl = readl(®->in_endp[EP0_CON].diepctl); |
761 | 761 | ||
762 | /* set the disable and stall bits */ | 762 | /* set the disable and stall bits */ |
763 | if (ep_ctrl & DEPCTL_EPENA) | 763 | if (ep_ctrl & DEPCTL_EPENA) |
764 | ep_ctrl |= DEPCTL_EPDIS; | 764 | ep_ctrl |= DEPCTL_EPDIS; |
765 | 765 | ||
766 | ep_ctrl |= DEPCTL_STALL; | 766 | ep_ctrl |= DEPCTL_STALL; |
767 | 767 | ||
768 | writel(ep_ctrl, ®->in_endp[EP0_CON].diepctl); | 768 | writel(ep_ctrl, ®->in_endp[EP0_CON].diepctl); |
769 | 769 | ||
770 | debug_cond(DEBUG_EP0 != 0, | 770 | debug_cond(DEBUG_EP0 != 0, |
771 | "%s: set ep%d stall, DIEPCTL0 = 0x%p\n", | 771 | "%s: set ep%d stall, DIEPCTL0 = 0x%p\n", |
772 | __func__, ep_index(ep), ®->in_endp[EP0_CON].diepctl); | 772 | __func__, ep_index(ep), ®->in_endp[EP0_CON].diepctl); |
773 | /* | 773 | /* |
774 | * The application can only set this bit, and the core clears it, | 774 | * The application can only set this bit, and the core clears it, |
775 | * when a SETUP token is received for this endpoint | 775 | * when a SETUP token is received for this endpoint |
776 | */ | 776 | */ |
777 | dev->ep0state = WAIT_FOR_SETUP; | 777 | dev->ep0state = WAIT_FOR_SETUP; |
778 | 778 | ||
779 | s3c_udc_pre_setup(); | 779 | s3c_udc_pre_setup(); |
780 | } | 780 | } |
781 | 781 | ||
782 | static void s3c_ep0_read(struct s3c_udc *dev) | 782 | static void s3c_ep0_read(struct s3c_udc *dev) |
783 | { | 783 | { |
784 | struct s3c_request *req; | 784 | struct s3c_request *req; |
785 | struct s3c_ep *ep = &dev->ep[0]; | 785 | struct s3c_ep *ep = &dev->ep[0]; |
786 | 786 | ||
787 | if (!list_empty(&ep->queue)) { | 787 | if (!list_empty(&ep->queue)) { |
788 | req = list_entry(ep->queue.next, struct s3c_request, queue); | 788 | req = list_entry(ep->queue.next, struct s3c_request, queue); |
789 | 789 | ||
790 | } else { | 790 | } else { |
791 | debug("%s: ---> BUG\n", __func__); | 791 | debug("%s: ---> BUG\n", __func__); |
792 | BUG(); | 792 | BUG(); |
793 | return; | 793 | return; |
794 | } | 794 | } |
795 | 795 | ||
796 | debug_cond(DEBUG_EP0 != 0, | 796 | debug_cond(DEBUG_EP0 != 0, |
797 | "%s: req = %p, req.length = 0x%x, req.actual = 0x%x\n", | 797 | "%s: req = %p, req.length = 0x%x, req.actual = 0x%x\n", |
798 | __func__, req, req->req.length, req->req.actual); | 798 | __func__, req, req->req.length, req->req.actual); |
799 | 799 | ||
800 | if (req->req.length == 0) { | 800 | if (req->req.length == 0) { |
801 | /* zlp for Set_configuration, Set_interface, | 801 | /* zlp for Set_configuration, Set_interface, |
802 | * or Bulk-Only mass storge reset */ | 802 | * or Bulk-Only mass storge reset */ |
803 | 803 | ||
804 | ep->len = 0; | 804 | ep->len = 0; |
805 | s3c_udc_ep0_zlp(dev); | 805 | s3c_udc_ep0_zlp(dev); |
806 | 806 | ||
807 | debug_cond(DEBUG_EP0 != 0, | 807 | debug_cond(DEBUG_EP0 != 0, |
808 | "%s: req.length = 0, bRequest = %d\n", | 808 | "%s: req.length = 0, bRequest = %d\n", |
809 | __func__, usb_ctrl->bRequest); | 809 | __func__, usb_ctrl->bRequest); |
810 | return; | 810 | return; |
811 | } | 811 | } |
812 | 812 | ||
813 | setdma_rx(ep, req); | 813 | setdma_rx(ep, req); |
814 | } | 814 | } |
815 | 815 | ||
816 | /* | 816 | /* |
817 | * DATA_STATE_XMIT | 817 | * DATA_STATE_XMIT |
818 | */ | 818 | */ |
819 | static int s3c_ep0_write(struct s3c_udc *dev) | 819 | static int s3c_ep0_write(struct s3c_udc *dev) |
820 | { | 820 | { |
821 | struct s3c_request *req; | 821 | struct s3c_request *req; |
822 | struct s3c_ep *ep = &dev->ep[0]; | 822 | struct s3c_ep *ep = &dev->ep[0]; |
823 | int ret, need_zlp = 0; | 823 | int ret, need_zlp = 0; |
824 | 824 | ||
825 | if (list_empty(&ep->queue)) | 825 | if (list_empty(&ep->queue)) |
826 | req = 0; | 826 | req = 0; |
827 | else | 827 | else |
828 | req = list_entry(ep->queue.next, struct s3c_request, queue); | 828 | req = list_entry(ep->queue.next, struct s3c_request, queue); |
829 | 829 | ||
830 | if (!req) { | 830 | if (!req) { |
831 | debug_cond(DEBUG_EP0 != 0, "%s: NULL REQ\n", __func__); | 831 | debug_cond(DEBUG_EP0 != 0, "%s: NULL REQ\n", __func__); |
832 | return 0; | 832 | return 0; |
833 | } | 833 | } |
834 | 834 | ||
835 | debug_cond(DEBUG_EP0 != 0, | 835 | debug_cond(DEBUG_EP0 != 0, |
836 | "%s: req = %p, req.length = 0x%x, req.actual = 0x%x\n", | 836 | "%s: req = %p, req.length = 0x%x, req.actual = 0x%x\n", |
837 | __func__, req, req->req.length, req->req.actual); | 837 | __func__, req, req->req.length, req->req.actual); |
838 | 838 | ||
839 | if (req->req.length - req->req.actual == ep0_fifo_size) { | 839 | if (req->req.length - req->req.actual == ep0_fifo_size) { |
840 | /* Next write will end with the packet size, */ | 840 | /* Next write will end with the packet size, */ |
841 | /* so we need Zero-length-packet */ | 841 | /* so we need Zero-length-packet */ |
842 | need_zlp = 1; | 842 | need_zlp = 1; |
843 | } | 843 | } |
844 | 844 | ||
845 | ret = write_fifo_ep0(ep, req); | 845 | ret = write_fifo_ep0(ep, req); |
846 | 846 | ||
847 | if ((ret == 1) && !need_zlp) { | 847 | if ((ret == 1) && !need_zlp) { |
848 | /* Last packet */ | 848 | /* Last packet */ |
849 | dev->ep0state = WAIT_FOR_COMPLETE; | 849 | dev->ep0state = WAIT_FOR_COMPLETE; |
850 | debug_cond(DEBUG_EP0 != 0, | 850 | debug_cond(DEBUG_EP0 != 0, |
851 | "%s: finished, waiting for status\n", __func__); | 851 | "%s: finished, waiting for status\n", __func__); |
852 | 852 | ||
853 | } else { | 853 | } else { |
854 | dev->ep0state = DATA_STATE_XMIT; | 854 | dev->ep0state = DATA_STATE_XMIT; |
855 | debug_cond(DEBUG_EP0 != 0, | 855 | debug_cond(DEBUG_EP0 != 0, |
856 | "%s: not finished\n", __func__); | 856 | "%s: not finished\n", __func__); |
857 | } | 857 | } |
858 | 858 | ||
859 | return 1; | 859 | return 1; |
860 | } | 860 | } |
861 | 861 | ||
862 | int s3c_udc_get_status(struct s3c_udc *dev, | 862 | int s3c_udc_get_status(struct s3c_udc *dev, |
863 | struct usb_ctrlrequest *crq) | 863 | struct usb_ctrlrequest *crq) |
864 | { | 864 | { |
865 | u8 ep_num = crq->wIndex & 0x7F; | 865 | u8 ep_num = crq->wIndex & 0x7F; |
866 | u16 g_status = 0; | 866 | u16 g_status = 0; |
867 | u32 ep_ctrl; | 867 | u32 ep_ctrl; |
868 | 868 | ||
869 | debug_cond(DEBUG_SETUP != 0, | 869 | debug_cond(DEBUG_SETUP != 0, |
870 | "%s: *** USB_REQ_GET_STATUS\n", __func__); | 870 | "%s: *** USB_REQ_GET_STATUS\n", __func__); |
871 | printf("crq->brequest:0x%x\n", crq->bRequestType & USB_RECIP_MASK); | 871 | printf("crq->brequest:0x%x\n", crq->bRequestType & USB_RECIP_MASK); |
872 | switch (crq->bRequestType & USB_RECIP_MASK) { | 872 | switch (crq->bRequestType & USB_RECIP_MASK) { |
873 | case USB_RECIP_INTERFACE: | 873 | case USB_RECIP_INTERFACE: |
874 | g_status = 0; | 874 | g_status = 0; |
875 | debug_cond(DEBUG_SETUP != 0, | 875 | debug_cond(DEBUG_SETUP != 0, |
876 | "\tGET_STATUS:USB_RECIP_INTERFACE, g_stauts = %d\n", | 876 | "\tGET_STATUS:USB_RECIP_INTERFACE, g_stauts = %d\n", |
877 | g_status); | 877 | g_status); |
878 | break; | 878 | break; |
879 | 879 | ||
880 | case USB_RECIP_DEVICE: | 880 | case USB_RECIP_DEVICE: |
881 | g_status = 0x1; /* Self powered */ | 881 | g_status = 0x1; /* Self powered */ |
882 | debug_cond(DEBUG_SETUP != 0, | 882 | debug_cond(DEBUG_SETUP != 0, |
883 | "\tGET_STATUS: USB_RECIP_DEVICE, g_stauts = %d\n", | 883 | "\tGET_STATUS: USB_RECIP_DEVICE, g_stauts = %d\n", |
884 | g_status); | 884 | g_status); |
885 | break; | 885 | break; |
886 | 886 | ||
887 | case USB_RECIP_ENDPOINT: | 887 | case USB_RECIP_ENDPOINT: |
888 | if (crq->wLength > 2) { | 888 | if (crq->wLength > 2) { |
889 | debug_cond(DEBUG_SETUP != 0, | 889 | debug_cond(DEBUG_SETUP != 0, |
890 | "\tGET_STATUS:Not support EP or wLength\n"); | 890 | "\tGET_STATUS:Not support EP or wLength\n"); |
891 | return 1; | 891 | return 1; |
892 | } | 892 | } |
893 | 893 | ||
894 | g_status = dev->ep[ep_num].stopped; | 894 | g_status = dev->ep[ep_num].stopped; |
895 | debug_cond(DEBUG_SETUP != 0, | 895 | debug_cond(DEBUG_SETUP != 0, |
896 | "\tGET_STATUS: USB_RECIP_ENDPOINT, g_stauts = %d\n", | 896 | "\tGET_STATUS: USB_RECIP_ENDPOINT, g_stauts = %d\n", |
897 | g_status); | 897 | g_status); |
898 | 898 | ||
899 | break; | 899 | break; |
900 | 900 | ||
901 | default: | 901 | default: |
902 | return 1; | 902 | return 1; |
903 | } | 903 | } |
904 | 904 | ||
905 | memcpy(usb_ctrl, &g_status, sizeof(g_status)); | 905 | memcpy(usb_ctrl, &g_status, sizeof(g_status)); |
906 | 906 | ||
907 | flush_dcache_range((unsigned long) usb_ctrl, | 907 | flush_dcache_range((unsigned long) usb_ctrl, |
908 | (unsigned long) usb_ctrl + | 908 | (unsigned long) usb_ctrl + |
909 | ROUND(sizeof(g_status), CONFIG_SYS_CACHELINE_SIZE)); | 909 | ROUND(sizeof(g_status), CONFIG_SYS_CACHELINE_SIZE)); |
910 | 910 | ||
911 | writel(usb_ctrl_dma_addr, ®->in_endp[EP0_CON].diepdma); | 911 | writel(usb_ctrl_dma_addr, ®->in_endp[EP0_CON].diepdma); |
912 | writel(DIEPT_SIZ_PKT_CNT(1) | DIEPT_SIZ_XFER_SIZE(2), | 912 | writel(DIEPT_SIZ_PKT_CNT(1) | DIEPT_SIZ_XFER_SIZE(2), |
913 | ®->in_endp[EP0_CON].dieptsiz); | 913 | ®->in_endp[EP0_CON].dieptsiz); |
914 | 914 | ||
915 | ep_ctrl = readl(®->in_endp[EP0_CON].diepctl); | 915 | ep_ctrl = readl(®->in_endp[EP0_CON].diepctl); |
916 | writel(ep_ctrl|DEPCTL_EPENA|DEPCTL_CNAK, | 916 | writel(ep_ctrl|DEPCTL_EPENA|DEPCTL_CNAK, |
917 | ®->in_endp[EP0_CON].diepctl); | 917 | ®->in_endp[EP0_CON].diepctl); |
918 | dev->ep0state = WAIT_FOR_NULL_COMPLETE; | 918 | dev->ep0state = WAIT_FOR_NULL_COMPLETE; |
919 | 919 | ||
920 | return 0; | 920 | return 0; |
921 | } | 921 | } |
922 | 922 | ||
923 | static void s3c_udc_set_nak(struct s3c_ep *ep) | 923 | static void s3c_udc_set_nak(struct s3c_ep *ep) |
924 | { | 924 | { |
925 | u8 ep_num; | 925 | u8 ep_num; |
926 | u32 ep_ctrl = 0; | 926 | u32 ep_ctrl = 0; |
927 | 927 | ||
928 | ep_num = ep_index(ep); | 928 | ep_num = ep_index(ep); |
929 | debug("%s: ep_num = %d, ep_type = %d\n", __func__, ep_num, ep->ep_type); | 929 | debug("%s: ep_num = %d, ep_type = %d\n", __func__, ep_num, ep->ep_type); |
930 | 930 | ||
931 | if (ep_is_in(ep)) { | 931 | if (ep_is_in(ep)) { |
932 | ep_ctrl = readl(®->in_endp[ep_num].diepctl); | 932 | ep_ctrl = readl(®->in_endp[ep_num].diepctl); |
933 | ep_ctrl |= DEPCTL_SNAK; | 933 | ep_ctrl |= DEPCTL_SNAK; |
934 | writel(ep_ctrl, ®->in_endp[ep_num].diepctl); | 934 | writel(ep_ctrl, ®->in_endp[ep_num].diepctl); |
935 | debug("%s: set NAK, DIEPCTL%d = 0x%x\n", | 935 | debug("%s: set NAK, DIEPCTL%d = 0x%x\n", |
936 | __func__, ep_num, readl(®->in_endp[ep_num].diepctl)); | 936 | __func__, ep_num, readl(®->in_endp[ep_num].diepctl)); |
937 | } else { | 937 | } else { |
938 | ep_ctrl = readl(®->out_endp[ep_num].doepctl); | 938 | ep_ctrl = readl(®->out_endp[ep_num].doepctl); |
939 | ep_ctrl |= DEPCTL_SNAK; | 939 | ep_ctrl |= DEPCTL_SNAK; |
940 | writel(ep_ctrl, ®->out_endp[ep_num].doepctl); | 940 | writel(ep_ctrl, ®->out_endp[ep_num].doepctl); |
941 | debug("%s: set NAK, DOEPCTL%d = 0x%x\n", | 941 | debug("%s: set NAK, DOEPCTL%d = 0x%x\n", |
942 | __func__, ep_num, readl(®->out_endp[ep_num].doepctl)); | 942 | __func__, ep_num, readl(®->out_endp[ep_num].doepctl)); |
943 | } | 943 | } |
944 | 944 | ||
945 | return; | 945 | return; |
946 | } | 946 | } |
947 | 947 | ||
948 | 948 | ||
949 | void s3c_udc_ep_set_stall(struct s3c_ep *ep) | 949 | void s3c_udc_ep_set_stall(struct s3c_ep *ep) |
950 | { | 950 | { |
951 | u8 ep_num; | 951 | u8 ep_num; |
952 | u32 ep_ctrl = 0; | 952 | u32 ep_ctrl = 0; |
953 | 953 | ||
954 | ep_num = ep_index(ep); | 954 | ep_num = ep_index(ep); |
955 | debug("%s: ep_num = %d, ep_type = %d\n", __func__, ep_num, ep->ep_type); | 955 | debug("%s: ep_num = %d, ep_type = %d\n", __func__, ep_num, ep->ep_type); |
956 | 956 | ||
957 | if (ep_is_in(ep)) { | 957 | if (ep_is_in(ep)) { |
958 | ep_ctrl = readl(®->in_endp[ep_num].diepctl); | 958 | ep_ctrl = readl(®->in_endp[ep_num].diepctl); |
959 | 959 | ||
960 | /* set the disable and stall bits */ | 960 | /* set the disable and stall bits */ |
961 | if (ep_ctrl & DEPCTL_EPENA) | 961 | if (ep_ctrl & DEPCTL_EPENA) |
962 | ep_ctrl |= DEPCTL_EPDIS; | 962 | ep_ctrl |= DEPCTL_EPDIS; |
963 | 963 | ||
964 | ep_ctrl |= DEPCTL_STALL; | 964 | ep_ctrl |= DEPCTL_STALL; |
965 | 965 | ||
966 | writel(ep_ctrl, ®->in_endp[ep_num].diepctl); | 966 | writel(ep_ctrl, ®->in_endp[ep_num].diepctl); |
967 | debug("%s: set stall, DIEPCTL%d = 0x%x\n", | 967 | debug("%s: set stall, DIEPCTL%d = 0x%x\n", |
968 | __func__, ep_num, readl(®->in_endp[ep_num].diepctl)); | 968 | __func__, ep_num, readl(®->in_endp[ep_num].diepctl)); |
969 | 969 | ||
970 | } else { | 970 | } else { |
971 | ep_ctrl = readl(®->out_endp[ep_num].doepctl); | 971 | ep_ctrl = readl(®->out_endp[ep_num].doepctl); |
972 | 972 | ||
973 | /* set the stall bit */ | 973 | /* set the stall bit */ |
974 | ep_ctrl |= DEPCTL_STALL; | 974 | ep_ctrl |= DEPCTL_STALL; |
975 | 975 | ||
976 | writel(ep_ctrl, ®->out_endp[ep_num].doepctl); | 976 | writel(ep_ctrl, ®->out_endp[ep_num].doepctl); |
977 | debug("%s: set stall, DOEPCTL%d = 0x%x\n", | 977 | debug("%s: set stall, DOEPCTL%d = 0x%x\n", |
978 | __func__, ep_num, readl(®->out_endp[ep_num].doepctl)); | 978 | __func__, ep_num, readl(®->out_endp[ep_num].doepctl)); |
979 | } | 979 | } |
980 | 980 | ||
981 | return; | 981 | return; |
982 | } | 982 | } |
983 | 983 | ||
984 | void s3c_udc_ep_clear_stall(struct s3c_ep *ep) | 984 | void s3c_udc_ep_clear_stall(struct s3c_ep *ep) |
985 | { | 985 | { |
986 | u8 ep_num; | 986 | u8 ep_num; |
987 | u32 ep_ctrl = 0; | 987 | u32 ep_ctrl = 0; |
988 | 988 | ||
989 | ep_num = ep_index(ep); | 989 | ep_num = ep_index(ep); |
990 | debug("%s: ep_num = %d, ep_type = %d\n", __func__, ep_num, ep->ep_type); | 990 | debug("%s: ep_num = %d, ep_type = %d\n", __func__, ep_num, ep->ep_type); |
991 | 991 | ||
992 | if (ep_is_in(ep)) { | 992 | if (ep_is_in(ep)) { |
993 | ep_ctrl = readl(®->in_endp[ep_num].diepctl); | 993 | ep_ctrl = readl(®->in_endp[ep_num].diepctl); |
994 | 994 | ||
995 | /* clear stall bit */ | 995 | /* clear stall bit */ |
996 | ep_ctrl &= ~DEPCTL_STALL; | 996 | ep_ctrl &= ~DEPCTL_STALL; |
997 | 997 | ||
998 | /* | 998 | /* |
999 | * USB Spec 9.4.5: For endpoints using data toggle, regardless | 999 | * USB Spec 9.4.5: For endpoints using data toggle, regardless |
1000 | * of whether an endpoint has the Halt feature set, a | 1000 | * of whether an endpoint has the Halt feature set, a |
1001 | * ClearFeature(ENDPOINT_HALT) request always results in the | 1001 | * ClearFeature(ENDPOINT_HALT) request always results in the |
1002 | * data toggle being reinitialized to DATA0. | 1002 | * data toggle being reinitialized to DATA0. |
1003 | */ | 1003 | */ |
1004 | if (ep->bmAttributes == USB_ENDPOINT_XFER_INT | 1004 | if (ep->bmAttributes == USB_ENDPOINT_XFER_INT |
1005 | || ep->bmAttributes == USB_ENDPOINT_XFER_BULK) { | 1005 | || ep->bmAttributes == USB_ENDPOINT_XFER_BULK) { |
1006 | ep_ctrl |= DEPCTL_SETD0PID; /* DATA0 */ | 1006 | ep_ctrl |= DEPCTL_SETD0PID; /* DATA0 */ |
1007 | } | 1007 | } |
1008 | 1008 | ||
1009 | writel(ep_ctrl, ®->in_endp[ep_num].diepctl); | 1009 | writel(ep_ctrl, ®->in_endp[ep_num].diepctl); |
1010 | debug("%s: cleared stall, DIEPCTL%d = 0x%x\n", | 1010 | debug("%s: cleared stall, DIEPCTL%d = 0x%x\n", |
1011 | __func__, ep_num, readl(®->in_endp[ep_num].diepctl)); | 1011 | __func__, ep_num, readl(®->in_endp[ep_num].diepctl)); |
1012 | 1012 | ||
1013 | } else { | 1013 | } else { |
1014 | ep_ctrl = readl(®->out_endp[ep_num].doepctl); | 1014 | ep_ctrl = readl(®->out_endp[ep_num].doepctl); |
1015 | 1015 | ||
1016 | /* clear stall bit */ | 1016 | /* clear stall bit */ |
1017 | ep_ctrl &= ~DEPCTL_STALL; | 1017 | ep_ctrl &= ~DEPCTL_STALL; |
1018 | 1018 | ||
1019 | if (ep->bmAttributes == USB_ENDPOINT_XFER_INT | 1019 | if (ep->bmAttributes == USB_ENDPOINT_XFER_INT |
1020 | || ep->bmAttributes == USB_ENDPOINT_XFER_BULK) { | 1020 | || ep->bmAttributes == USB_ENDPOINT_XFER_BULK) { |
1021 | ep_ctrl |= DEPCTL_SETD0PID; /* DATA0 */ | 1021 | ep_ctrl |= DEPCTL_SETD0PID; /* DATA0 */ |
1022 | } | 1022 | } |
1023 | 1023 | ||
1024 | writel(ep_ctrl, ®->out_endp[ep_num].doepctl); | 1024 | writel(ep_ctrl, ®->out_endp[ep_num].doepctl); |
1025 | debug("%s: cleared stall, DOEPCTL%d = 0x%x\n", | 1025 | debug("%s: cleared stall, DOEPCTL%d = 0x%x\n", |
1026 | __func__, ep_num, readl(®->out_endp[ep_num].doepctl)); | 1026 | __func__, ep_num, readl(®->out_endp[ep_num].doepctl)); |
1027 | } | 1027 | } |
1028 | 1028 | ||
1029 | return; | 1029 | return; |
1030 | } | 1030 | } |
1031 | 1031 | ||
1032 | static int s3c_udc_set_halt(struct usb_ep *_ep, int value) | 1032 | static int s3c_udc_set_halt(struct usb_ep *_ep, int value) |
1033 | { | 1033 | { |
1034 | struct s3c_ep *ep; | 1034 | struct s3c_ep *ep; |
1035 | struct s3c_udc *dev; | 1035 | struct s3c_udc *dev; |
1036 | unsigned long flags; | 1036 | unsigned long flags = 0; |
1037 | u8 ep_num; | 1037 | u8 ep_num; |
1038 | 1038 | ||
1039 | ep = container_of(_ep, struct s3c_ep, ep); | 1039 | ep = container_of(_ep, struct s3c_ep, ep); |
1040 | ep_num = ep_index(ep); | 1040 | ep_num = ep_index(ep); |
1041 | 1041 | ||
1042 | if (unlikely(!_ep || !ep->desc || ep_num == EP0_CON || | 1042 | if (unlikely(!_ep || !ep->desc || ep_num == EP0_CON || |
1043 | ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC)) { | 1043 | ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC)) { |
1044 | debug("%s: %s bad ep or descriptor\n", __func__, ep->ep.name); | 1044 | debug("%s: %s bad ep or descriptor\n", __func__, ep->ep.name); |
1045 | return -EINVAL; | 1045 | return -EINVAL; |
1046 | } | 1046 | } |
1047 | 1047 | ||
1048 | /* Attempt to halt IN ep will fail if any transfer requests | 1048 | /* Attempt to halt IN ep will fail if any transfer requests |
1049 | * are still queue */ | 1049 | * are still queue */ |
1050 | if (value && ep_is_in(ep) && !list_empty(&ep->queue)) { | 1050 | if (value && ep_is_in(ep) && !list_empty(&ep->queue)) { |
1051 | debug("%s: %s queue not empty, req = %p\n", | 1051 | debug("%s: %s queue not empty, req = %p\n", |
1052 | __func__, ep->ep.name, | 1052 | __func__, ep->ep.name, |
1053 | list_entry(ep->queue.next, struct s3c_request, queue)); | 1053 | list_entry(ep->queue.next, struct s3c_request, queue)); |
1054 | 1054 | ||
1055 | return -EAGAIN; | 1055 | return -EAGAIN; |
1056 | } | 1056 | } |
1057 | 1057 | ||
1058 | dev = ep->dev; | 1058 | dev = ep->dev; |
1059 | debug("%s: ep_num = %d, value = %d\n", __func__, ep_num, value); | 1059 | debug("%s: ep_num = %d, value = %d\n", __func__, ep_num, value); |
1060 | 1060 | ||
1061 | spin_lock_irqsave(&dev->lock, flags); | 1061 | spin_lock_irqsave(&dev->lock, flags); |
1062 | 1062 | ||
1063 | if (value == 0) { | 1063 | if (value == 0) { |
1064 | ep->stopped = 0; | 1064 | ep->stopped = 0; |
1065 | s3c_udc_ep_clear_stall(ep); | 1065 | s3c_udc_ep_clear_stall(ep); |
1066 | } else { | 1066 | } else { |
1067 | if (ep_num == 0) | 1067 | if (ep_num == 0) |
1068 | dev->ep0state = WAIT_FOR_SETUP; | 1068 | dev->ep0state = WAIT_FOR_SETUP; |
1069 | 1069 | ||
1070 | ep->stopped = 1; | 1070 | ep->stopped = 1; |
1071 | s3c_udc_ep_set_stall(ep); | 1071 | s3c_udc_ep_set_stall(ep); |
1072 | } | 1072 | } |
1073 | 1073 | ||
1074 | spin_unlock_irqrestore(&dev->lock, flags); | 1074 | spin_unlock_irqrestore(&dev->lock, flags); |
1075 | 1075 | ||
1076 | return 0; | 1076 | return 0; |
1077 | } | 1077 | } |
1078 | 1078 | ||
1079 | void s3c_udc_ep_activate(struct s3c_ep *ep) | 1079 | void s3c_udc_ep_activate(struct s3c_ep *ep) |
1080 | { | 1080 | { |
1081 | u8 ep_num; | 1081 | u8 ep_num; |
1082 | u32 ep_ctrl = 0, daintmsk = 0; | 1082 | u32 ep_ctrl = 0, daintmsk = 0; |
1083 | 1083 | ||
1084 | ep_num = ep_index(ep); | 1084 | ep_num = ep_index(ep); |
1085 | 1085 | ||
1086 | /* Read DEPCTLn register */ | 1086 | /* Read DEPCTLn register */ |
1087 | if (ep_is_in(ep)) { | 1087 | if (ep_is_in(ep)) { |
1088 | ep_ctrl = readl(®->in_endp[ep_num].diepctl); | 1088 | ep_ctrl = readl(®->in_endp[ep_num].diepctl); |
1089 | daintmsk = 1 << ep_num; | 1089 | daintmsk = 1 << ep_num; |
1090 | } else { | 1090 | } else { |
1091 | ep_ctrl = readl(®->out_endp[ep_num].doepctl); | 1091 | ep_ctrl = readl(®->out_endp[ep_num].doepctl); |
1092 | daintmsk = (1 << ep_num) << DAINT_OUT_BIT; | 1092 | daintmsk = (1 << ep_num) << DAINT_OUT_BIT; |
1093 | } | 1093 | } |
1094 | 1094 | ||
1095 | debug("%s: EPCTRL%d = 0x%x, ep_is_in = %d\n", | 1095 | debug("%s: EPCTRL%d = 0x%x, ep_is_in = %d\n", |
1096 | __func__, ep_num, ep_ctrl, ep_is_in(ep)); | 1096 | __func__, ep_num, ep_ctrl, ep_is_in(ep)); |
1097 | 1097 | ||
1098 | /* If the EP is already active don't change the EP Control | 1098 | /* If the EP is already active don't change the EP Control |
1099 | * register. */ | 1099 | * register. */ |
1100 | if (!(ep_ctrl & DEPCTL_USBACTEP)) { | 1100 | if (!(ep_ctrl & DEPCTL_USBACTEP)) { |
1101 | ep_ctrl = (ep_ctrl & ~DEPCTL_TYPE_MASK) | | 1101 | ep_ctrl = (ep_ctrl & ~DEPCTL_TYPE_MASK) | |
1102 | (ep->bmAttributes << DEPCTL_TYPE_BIT); | 1102 | (ep->bmAttributes << DEPCTL_TYPE_BIT); |
1103 | ep_ctrl = (ep_ctrl & ~DEPCTL_MPS_MASK) | | 1103 | ep_ctrl = (ep_ctrl & ~DEPCTL_MPS_MASK) | |
1104 | (ep->ep.maxpacket << DEPCTL_MPS_BIT); | 1104 | (ep->ep.maxpacket << DEPCTL_MPS_BIT); |
1105 | ep_ctrl |= (DEPCTL_SETD0PID | DEPCTL_USBACTEP | DEPCTL_SNAK); | 1105 | ep_ctrl |= (DEPCTL_SETD0PID | DEPCTL_USBACTEP | DEPCTL_SNAK); |
1106 | 1106 | ||
1107 | if (ep_is_in(ep)) { | 1107 | if (ep_is_in(ep)) { |
1108 | writel(ep_ctrl, ®->in_endp[ep_num].diepctl); | 1108 | writel(ep_ctrl, ®->in_endp[ep_num].diepctl); |
1109 | debug("%s: USB Ative EP%d, DIEPCTRL%d = 0x%x\n", | 1109 | debug("%s: USB Ative EP%d, DIEPCTRL%d = 0x%x\n", |
1110 | __func__, ep_num, ep_num, | 1110 | __func__, ep_num, ep_num, |
1111 | readl(®->in_endp[ep_num].diepctl)); | 1111 | readl(®->in_endp[ep_num].diepctl)); |
1112 | } else { | 1112 | } else { |
1113 | writel(ep_ctrl, ®->out_endp[ep_num].doepctl); | 1113 | writel(ep_ctrl, ®->out_endp[ep_num].doepctl); |
1114 | debug("%s: USB Ative EP%d, DOEPCTRL%d = 0x%x\n", | 1114 | debug("%s: USB Ative EP%d, DOEPCTRL%d = 0x%x\n", |
1115 | __func__, ep_num, ep_num, | 1115 | __func__, ep_num, ep_num, |
1116 | readl(®->out_endp[ep_num].doepctl)); | 1116 | readl(®->out_endp[ep_num].doepctl)); |
1117 | } | 1117 | } |
1118 | } | 1118 | } |
1119 | 1119 | ||
1120 | /* Unmask EP Interrtupt */ | 1120 | /* Unmask EP Interrtupt */ |
1121 | writel(readl(®->daintmsk)|daintmsk, ®->daintmsk); | 1121 | writel(readl(®->daintmsk)|daintmsk, ®->daintmsk); |
1122 | debug("%s: DAINTMSK = 0x%x\n", __func__, readl(®->daintmsk)); | 1122 | debug("%s: DAINTMSK = 0x%x\n", __func__, readl(®->daintmsk)); |
1123 | 1123 | ||
1124 | } | 1124 | } |
1125 | 1125 | ||
1126 | static int s3c_udc_clear_feature(struct usb_ep *_ep) | 1126 | static int s3c_udc_clear_feature(struct usb_ep *_ep) |
1127 | { | 1127 | { |
1128 | struct s3c_udc *dev; | 1128 | struct s3c_udc *dev; |
1129 | struct s3c_ep *ep; | 1129 | struct s3c_ep *ep; |
1130 | u8 ep_num; | 1130 | u8 ep_num; |
1131 | 1131 | ||
1132 | ep = container_of(_ep, struct s3c_ep, ep); | 1132 | ep = container_of(_ep, struct s3c_ep, ep); |
1133 | ep_num = ep_index(ep); | 1133 | ep_num = ep_index(ep); |
1134 | 1134 | ||
1135 | dev = ep->dev; | 1135 | dev = ep->dev; |
1136 | debug_cond(DEBUG_SETUP != 0, | 1136 | debug_cond(DEBUG_SETUP != 0, |
1137 | "%s: ep_num = %d, is_in = %d, clear_feature_flag = %d\n", | 1137 | "%s: ep_num = %d, is_in = %d, clear_feature_flag = %d\n", |
1138 | __func__, ep_num, ep_is_in(ep), clear_feature_flag); | 1138 | __func__, ep_num, ep_is_in(ep), clear_feature_flag); |
1139 | 1139 | ||
1140 | if (usb_ctrl->wLength != 0) { | 1140 | if (usb_ctrl->wLength != 0) { |
1141 | debug_cond(DEBUG_SETUP != 0, | 1141 | debug_cond(DEBUG_SETUP != 0, |
1142 | "\tCLEAR_FEATURE: wLength is not zero.....\n"); | 1142 | "\tCLEAR_FEATURE: wLength is not zero.....\n"); |
1143 | return 1; | 1143 | return 1; |
1144 | } | 1144 | } |
1145 | 1145 | ||
1146 | switch (usb_ctrl->bRequestType & USB_RECIP_MASK) { | 1146 | switch (usb_ctrl->bRequestType & USB_RECIP_MASK) { |
1147 | case USB_RECIP_DEVICE: | 1147 | case USB_RECIP_DEVICE: |
1148 | switch (usb_ctrl->wValue) { | 1148 | switch (usb_ctrl->wValue) { |
1149 | case USB_DEVICE_REMOTE_WAKEUP: | 1149 | case USB_DEVICE_REMOTE_WAKEUP: |
1150 | debug_cond(DEBUG_SETUP != 0, | 1150 | debug_cond(DEBUG_SETUP != 0, |
1151 | "\tOFF:USB_DEVICE_REMOTE_WAKEUP\n"); | 1151 | "\tOFF:USB_DEVICE_REMOTE_WAKEUP\n"); |
1152 | break; | 1152 | break; |
1153 | 1153 | ||
1154 | case USB_DEVICE_TEST_MODE: | 1154 | case USB_DEVICE_TEST_MODE: |
1155 | debug_cond(DEBUG_SETUP != 0, | 1155 | debug_cond(DEBUG_SETUP != 0, |
1156 | "\tCLEAR_FEATURE: USB_DEVICE_TEST_MODE\n"); | 1156 | "\tCLEAR_FEATURE: USB_DEVICE_TEST_MODE\n"); |
1157 | /** @todo Add CLEAR_FEATURE for TEST modes. */ | 1157 | /** @todo Add CLEAR_FEATURE for TEST modes. */ |
1158 | break; | 1158 | break; |
1159 | } | 1159 | } |
1160 | 1160 | ||
1161 | s3c_udc_ep0_zlp(dev); | 1161 | s3c_udc_ep0_zlp(dev); |
1162 | break; | 1162 | break; |
1163 | 1163 | ||
1164 | case USB_RECIP_ENDPOINT: | 1164 | case USB_RECIP_ENDPOINT: |
1165 | debug_cond(DEBUG_SETUP != 0, | 1165 | debug_cond(DEBUG_SETUP != 0, |
1166 | "\tCLEAR_FEATURE:USB_RECIP_ENDPOINT, wValue = %d\n", | 1166 | "\tCLEAR_FEATURE:USB_RECIP_ENDPOINT, wValue = %d\n", |
1167 | usb_ctrl->wValue); | 1167 | usb_ctrl->wValue); |
1168 | 1168 | ||
1169 | if (usb_ctrl->wValue == USB_ENDPOINT_HALT) { | 1169 | if (usb_ctrl->wValue == USB_ENDPOINT_HALT) { |
1170 | if (ep_num == 0) { | 1170 | if (ep_num == 0) { |
1171 | s3c_udc_ep0_set_stall(ep); | 1171 | s3c_udc_ep0_set_stall(ep); |
1172 | return 0; | 1172 | return 0; |
1173 | } | 1173 | } |
1174 | 1174 | ||
1175 | s3c_udc_ep0_zlp(dev); | 1175 | s3c_udc_ep0_zlp(dev); |
1176 | 1176 | ||
1177 | s3c_udc_ep_clear_stall(ep); | 1177 | s3c_udc_ep_clear_stall(ep); |
1178 | s3c_udc_ep_activate(ep); | 1178 | s3c_udc_ep_activate(ep); |
1179 | ep->stopped = 0; | 1179 | ep->stopped = 0; |
1180 | 1180 | ||
1181 | clear_feature_num = ep_num; | 1181 | clear_feature_num = ep_num; |
1182 | clear_feature_flag = 1; | 1182 | clear_feature_flag = 1; |
1183 | } | 1183 | } |
1184 | break; | 1184 | break; |
1185 | } | 1185 | } |
1186 | 1186 | ||
1187 | return 0; | 1187 | return 0; |
1188 | } | 1188 | } |
1189 | 1189 | ||
1190 | static int s3c_udc_set_feature(struct usb_ep *_ep) | 1190 | static int s3c_udc_set_feature(struct usb_ep *_ep) |
1191 | { | 1191 | { |
1192 | struct s3c_udc *dev; | 1192 | struct s3c_udc *dev; |
1193 | struct s3c_ep *ep; | 1193 | struct s3c_ep *ep; |
1194 | u8 ep_num; | 1194 | u8 ep_num; |
1195 | 1195 | ||
1196 | ep = container_of(_ep, struct s3c_ep, ep); | 1196 | ep = container_of(_ep, struct s3c_ep, ep); |
1197 | ep_num = ep_index(ep); | 1197 | ep_num = ep_index(ep); |
1198 | dev = ep->dev; | 1198 | dev = ep->dev; |
1199 | 1199 | ||
1200 | debug_cond(DEBUG_SETUP != 0, | 1200 | debug_cond(DEBUG_SETUP != 0, |
1201 | "%s: *** USB_REQ_SET_FEATURE , ep_num = %d\n", | 1201 | "%s: *** USB_REQ_SET_FEATURE , ep_num = %d\n", |
1202 | __func__, ep_num); | 1202 | __func__, ep_num); |
1203 | 1203 | ||
1204 | if (usb_ctrl->wLength != 0) { | 1204 | if (usb_ctrl->wLength != 0) { |
1205 | debug_cond(DEBUG_SETUP != 0, | 1205 | debug_cond(DEBUG_SETUP != 0, |
1206 | "\tSET_FEATURE: wLength is not zero.....\n"); | 1206 | "\tSET_FEATURE: wLength is not zero.....\n"); |
1207 | return 1; | 1207 | return 1; |
1208 | } | 1208 | } |
1209 | 1209 | ||
1210 | switch (usb_ctrl->bRequestType & USB_RECIP_MASK) { | 1210 | switch (usb_ctrl->bRequestType & USB_RECIP_MASK) { |
1211 | case USB_RECIP_DEVICE: | 1211 | case USB_RECIP_DEVICE: |
1212 | switch (usb_ctrl->wValue) { | 1212 | switch (usb_ctrl->wValue) { |
1213 | case USB_DEVICE_REMOTE_WAKEUP: | 1213 | case USB_DEVICE_REMOTE_WAKEUP: |
1214 | debug_cond(DEBUG_SETUP != 0, | 1214 | debug_cond(DEBUG_SETUP != 0, |
1215 | "\tSET_FEATURE:USB_DEVICE_REMOTE_WAKEUP\n"); | 1215 | "\tSET_FEATURE:USB_DEVICE_REMOTE_WAKEUP\n"); |
1216 | break; | 1216 | break; |
1217 | case USB_DEVICE_B_HNP_ENABLE: | 1217 | case USB_DEVICE_B_HNP_ENABLE: |
1218 | debug_cond(DEBUG_SETUP != 0, | 1218 | debug_cond(DEBUG_SETUP != 0, |
1219 | "\tSET_FEATURE: USB_DEVICE_B_HNP_ENABLE\n"); | 1219 | "\tSET_FEATURE: USB_DEVICE_B_HNP_ENABLE\n"); |
1220 | break; | 1220 | break; |
1221 | 1221 | ||
1222 | case USB_DEVICE_A_HNP_SUPPORT: | 1222 | case USB_DEVICE_A_HNP_SUPPORT: |
1223 | /* RH port supports HNP */ | 1223 | /* RH port supports HNP */ |
1224 | debug_cond(DEBUG_SETUP != 0, | 1224 | debug_cond(DEBUG_SETUP != 0, |
1225 | "\tSET_FEATURE:USB_DEVICE_A_HNP_SUPPORT\n"); | 1225 | "\tSET_FEATURE:USB_DEVICE_A_HNP_SUPPORT\n"); |
1226 | break; | 1226 | break; |
1227 | 1227 | ||
1228 | case USB_DEVICE_A_ALT_HNP_SUPPORT: | 1228 | case USB_DEVICE_A_ALT_HNP_SUPPORT: |
1229 | /* other RH port does */ | 1229 | /* other RH port does */ |
1230 | debug_cond(DEBUG_SETUP != 0, | 1230 | debug_cond(DEBUG_SETUP != 0, |
1231 | "\tSET: USB_DEVICE_A_ALT_HNP_SUPPORT\n"); | 1231 | "\tSET: USB_DEVICE_A_ALT_HNP_SUPPORT\n"); |
1232 | break; | 1232 | break; |
1233 | } | 1233 | } |
1234 | 1234 | ||
1235 | s3c_udc_ep0_zlp(dev); | 1235 | s3c_udc_ep0_zlp(dev); |
1236 | return 0; | 1236 | return 0; |
1237 | 1237 | ||
1238 | case USB_RECIP_INTERFACE: | 1238 | case USB_RECIP_INTERFACE: |
1239 | debug_cond(DEBUG_SETUP != 0, | 1239 | debug_cond(DEBUG_SETUP != 0, |
1240 | "\tSET_FEATURE: USB_RECIP_INTERFACE\n"); | 1240 | "\tSET_FEATURE: USB_RECIP_INTERFACE\n"); |
1241 | break; | 1241 | break; |
1242 | 1242 | ||
1243 | case USB_RECIP_ENDPOINT: | 1243 | case USB_RECIP_ENDPOINT: |
1244 | debug_cond(DEBUG_SETUP != 0, | 1244 | debug_cond(DEBUG_SETUP != 0, |
1245 | "\tSET_FEATURE: USB_RECIP_ENDPOINT\n"); | 1245 | "\tSET_FEATURE: USB_RECIP_ENDPOINT\n"); |
1246 | if (usb_ctrl->wValue == USB_ENDPOINT_HALT) { | 1246 | if (usb_ctrl->wValue == USB_ENDPOINT_HALT) { |
1247 | if (ep_num == 0) { | 1247 | if (ep_num == 0) { |
1248 | s3c_udc_ep0_set_stall(ep); | 1248 | s3c_udc_ep0_set_stall(ep); |
1249 | return 0; | 1249 | return 0; |
1250 | } | 1250 | } |
1251 | ep->stopped = 1; | 1251 | ep->stopped = 1; |
1252 | s3c_udc_ep_set_stall(ep); | 1252 | s3c_udc_ep_set_stall(ep); |
1253 | } | 1253 | } |
1254 | 1254 | ||
1255 | s3c_udc_ep0_zlp(dev); | 1255 | s3c_udc_ep0_zlp(dev); |
1256 | return 0; | 1256 | return 0; |
1257 | } | 1257 | } |
1258 | 1258 | ||
1259 | return 1; | 1259 | return 1; |
1260 | } | 1260 | } |
1261 | 1261 | ||
1262 | /* | 1262 | /* |
1263 | * WAIT_FOR_SETUP (OUT_PKT_RDY) | 1263 | * WAIT_FOR_SETUP (OUT_PKT_RDY) |
1264 | */ | 1264 | */ |
1265 | void s3c_ep0_setup(struct s3c_udc *dev) | 1265 | void s3c_ep0_setup(struct s3c_udc *dev) |
1266 | { | 1266 | { |
1267 | struct s3c_ep *ep = &dev->ep[0]; | 1267 | struct s3c_ep *ep = &dev->ep[0]; |
1268 | int i; | 1268 | int i; |
1269 | u8 ep_num; | 1269 | u8 ep_num; |
1270 | 1270 | ||
1271 | /* Nuke all previous transfers */ | 1271 | /* Nuke all previous transfers */ |
1272 | nuke(ep, -EPROTO); | 1272 | nuke(ep, -EPROTO); |
1273 | 1273 | ||
1274 | /* read control req from fifo (8 bytes) */ | 1274 | /* read control req from fifo (8 bytes) */ |
1275 | s3c_fifo_read(ep, (u32 *)usb_ctrl, 8); | 1275 | s3c_fifo_read(ep, (u32 *)usb_ctrl, 8); |
1276 | 1276 | ||
1277 | debug_cond(DEBUG_SETUP != 0, | 1277 | debug_cond(DEBUG_SETUP != 0, |
1278 | "%s: bRequestType = 0x%x(%s), bRequest = 0x%x" | 1278 | "%s: bRequestType = 0x%x(%s), bRequest = 0x%x" |
1279 | "\twLength = 0x%x, wValue = 0x%x, wIndex= 0x%x\n", | 1279 | "\twLength = 0x%x, wValue = 0x%x, wIndex= 0x%x\n", |
1280 | __func__, usb_ctrl->bRequestType, | 1280 | __func__, usb_ctrl->bRequestType, |
1281 | (usb_ctrl->bRequestType & USB_DIR_IN) ? "IN" : "OUT", | 1281 | (usb_ctrl->bRequestType & USB_DIR_IN) ? "IN" : "OUT", |
1282 | usb_ctrl->bRequest, | 1282 | usb_ctrl->bRequest, |
1283 | usb_ctrl->wLength, usb_ctrl->wValue, usb_ctrl->wIndex); | 1283 | usb_ctrl->wLength, usb_ctrl->wValue, usb_ctrl->wIndex); |
1284 | 1284 | ||
1285 | #ifdef DEBUG | 1285 | #ifdef DEBUG |
1286 | { | 1286 | { |
1287 | int i, len = sizeof(*usb_ctrl); | 1287 | int i, len = sizeof(*usb_ctrl); |
1288 | char *p = (char *)usb_ctrl; | 1288 | char *p = (char *)usb_ctrl; |
1289 | 1289 | ||
1290 | printf("pkt = "); | 1290 | printf("pkt = "); |
1291 | for (i = 0; i < len; i++) { | 1291 | for (i = 0; i < len; i++) { |
1292 | printf("%02x", ((u8 *)p)[i]); | 1292 | printf("%02x", ((u8 *)p)[i]); |
1293 | if ((i & 7) == 7) | 1293 | if ((i & 7) == 7) |
1294 | printf(" "); | 1294 | printf(" "); |
1295 | } | 1295 | } |
1296 | printf("\n"); | 1296 | printf("\n"); |
1297 | } | 1297 | } |
1298 | #endif | 1298 | #endif |
1299 | 1299 | ||
1300 | if (usb_ctrl->bRequest == GET_MAX_LUN_REQUEST && | 1300 | if (usb_ctrl->bRequest == GET_MAX_LUN_REQUEST && |
1301 | usb_ctrl->wLength != 1) { | 1301 | usb_ctrl->wLength != 1) { |
1302 | debug_cond(DEBUG_SETUP != 0, | 1302 | debug_cond(DEBUG_SETUP != 0, |
1303 | "\t%s:GET_MAX_LUN_REQUEST:invalid", | 1303 | "\t%s:GET_MAX_LUN_REQUEST:invalid", |
1304 | __func__); | 1304 | __func__); |
1305 | debug_cond(DEBUG_SETUP != 0, | 1305 | debug_cond(DEBUG_SETUP != 0, |
1306 | "wLength = %d, setup returned\n", | 1306 | "wLength = %d, setup returned\n", |
1307 | usb_ctrl->wLength); | 1307 | usb_ctrl->wLength); |
1308 | 1308 | ||
1309 | s3c_udc_ep0_set_stall(ep); | 1309 | s3c_udc_ep0_set_stall(ep); |
1310 | dev->ep0state = WAIT_FOR_SETUP; | 1310 | dev->ep0state = WAIT_FOR_SETUP; |
1311 | 1311 | ||
1312 | return; | 1312 | return; |
1313 | } else if (usb_ctrl->bRequest == BOT_RESET_REQUEST && | 1313 | } else if (usb_ctrl->bRequest == BOT_RESET_REQUEST && |
1314 | usb_ctrl->wLength != 0) { | 1314 | usb_ctrl->wLength != 0) { |
1315 | /* Bulk-Only *mass storge reset of class-specific request */ | 1315 | /* Bulk-Only *mass storge reset of class-specific request */ |
1316 | debug_cond(DEBUG_SETUP != 0, | 1316 | debug_cond(DEBUG_SETUP != 0, |
1317 | "%s:BOT Rest:invalid wLength =%d, setup returned\n", | 1317 | "%s:BOT Rest:invalid wLength =%d, setup returned\n", |
1318 | __func__, usb_ctrl->wLength); | 1318 | __func__, usb_ctrl->wLength); |
1319 | 1319 | ||
1320 | s3c_udc_ep0_set_stall(ep); | 1320 | s3c_udc_ep0_set_stall(ep); |
1321 | dev->ep0state = WAIT_FOR_SETUP; | 1321 | dev->ep0state = WAIT_FOR_SETUP; |
1322 | 1322 | ||
1323 | return; | 1323 | return; |
1324 | } | 1324 | } |
1325 | 1325 | ||
1326 | /* Set direction of EP0 */ | 1326 | /* Set direction of EP0 */ |
1327 | if (likely(usb_ctrl->bRequestType & USB_DIR_IN)) { | 1327 | if (likely(usb_ctrl->bRequestType & USB_DIR_IN)) { |
1328 | ep->bEndpointAddress |= USB_DIR_IN; | 1328 | ep->bEndpointAddress |= USB_DIR_IN; |
1329 | } else { | 1329 | } else { |
1330 | ep->bEndpointAddress &= ~USB_DIR_IN; | 1330 | ep->bEndpointAddress &= ~USB_DIR_IN; |
1331 | } | 1331 | } |
1332 | /* cope with automagic for some standard requests. */ | 1332 | /* cope with automagic for some standard requests. */ |
1333 | dev->req_std = (usb_ctrl->bRequestType & USB_TYPE_MASK) | 1333 | dev->req_std = (usb_ctrl->bRequestType & USB_TYPE_MASK) |
1334 | == USB_TYPE_STANDARD; | 1334 | == USB_TYPE_STANDARD; |
1335 | 1335 | ||
1336 | dev->req_pending = 1; | 1336 | dev->req_pending = 1; |
1337 | 1337 | ||
1338 | /* Handle some SETUP packets ourselves */ | 1338 | /* Handle some SETUP packets ourselves */ |
1339 | if (dev->req_std) { | 1339 | if (dev->req_std) { |
1340 | switch (usb_ctrl->bRequest) { | 1340 | switch (usb_ctrl->bRequest) { |
1341 | case USB_REQ_SET_ADDRESS: | 1341 | case USB_REQ_SET_ADDRESS: |
1342 | debug_cond(DEBUG_SETUP != 0, | 1342 | debug_cond(DEBUG_SETUP != 0, |
1343 | "%s: *** USB_REQ_SET_ADDRESS (%d)\n", | 1343 | "%s: *** USB_REQ_SET_ADDRESS (%d)\n", |
1344 | __func__, usb_ctrl->wValue); | 1344 | __func__, usb_ctrl->wValue); |
1345 | if (usb_ctrl->bRequestType | 1345 | if (usb_ctrl->bRequestType |
1346 | != (USB_TYPE_STANDARD | USB_RECIP_DEVICE)) | 1346 | != (USB_TYPE_STANDARD | USB_RECIP_DEVICE)) |
1347 | break; | 1347 | break; |
1348 | 1348 | ||
1349 | udc_set_address(dev, usb_ctrl->wValue); | 1349 | udc_set_address(dev, usb_ctrl->wValue); |
1350 | return; | 1350 | return; |
1351 | 1351 | ||
1352 | case USB_REQ_SET_CONFIGURATION: | 1352 | case USB_REQ_SET_CONFIGURATION: |
1353 | debug_cond(DEBUG_SETUP != 0, | 1353 | debug_cond(DEBUG_SETUP != 0, |
1354 | "=====================================\n"); | 1354 | "=====================================\n"); |
1355 | debug_cond(DEBUG_SETUP != 0, | 1355 | debug_cond(DEBUG_SETUP != 0, |
1356 | "%s: USB_REQ_SET_CONFIGURATION (%d)\n", | 1356 | "%s: USB_REQ_SET_CONFIGURATION (%d)\n", |
1357 | __func__, usb_ctrl->wValue); | 1357 | __func__, usb_ctrl->wValue); |
1358 | 1358 | ||
1359 | if (usb_ctrl->bRequestType == USB_RECIP_DEVICE) | 1359 | if (usb_ctrl->bRequestType == USB_RECIP_DEVICE) |
1360 | reset_available = 1; | 1360 | reset_available = 1; |
1361 | 1361 | ||
1362 | break; | 1362 | break; |
1363 | 1363 | ||
1364 | case USB_REQ_GET_DESCRIPTOR: | 1364 | case USB_REQ_GET_DESCRIPTOR: |
1365 | debug_cond(DEBUG_SETUP != 0, | 1365 | debug_cond(DEBUG_SETUP != 0, |
1366 | "%s: *** USB_REQ_GET_DESCRIPTOR\n", | 1366 | "%s: *** USB_REQ_GET_DESCRIPTOR\n", |
1367 | __func__); | 1367 | __func__); |
1368 | break; | 1368 | break; |
1369 | 1369 | ||
1370 | case USB_REQ_SET_INTERFACE: | 1370 | case USB_REQ_SET_INTERFACE: |
1371 | debug_cond(DEBUG_SETUP != 0, | 1371 | debug_cond(DEBUG_SETUP != 0, |
1372 | "%s: *** USB_REQ_SET_INTERFACE (%d)\n", | 1372 | "%s: *** USB_REQ_SET_INTERFACE (%d)\n", |
1373 | __func__, usb_ctrl->wValue); | 1373 | __func__, usb_ctrl->wValue); |
1374 | 1374 | ||
1375 | if (usb_ctrl->bRequestType == USB_RECIP_INTERFACE) | 1375 | if (usb_ctrl->bRequestType == USB_RECIP_INTERFACE) |
1376 | reset_available = 1; | 1376 | reset_available = 1; |
1377 | 1377 | ||
1378 | break; | 1378 | break; |
1379 | 1379 | ||
1380 | case USB_REQ_GET_CONFIGURATION: | 1380 | case USB_REQ_GET_CONFIGURATION: |
1381 | debug_cond(DEBUG_SETUP != 0, | 1381 | debug_cond(DEBUG_SETUP != 0, |
1382 | "%s: *** USB_REQ_GET_CONFIGURATION\n", | 1382 | "%s: *** USB_REQ_GET_CONFIGURATION\n", |
1383 | __func__); | 1383 | __func__); |
1384 | break; | 1384 | break; |
1385 | 1385 | ||
1386 | case USB_REQ_GET_STATUS: | 1386 | case USB_REQ_GET_STATUS: |
1387 | if (!s3c_udc_get_status(dev, usb_ctrl)) | 1387 | if (!s3c_udc_get_status(dev, usb_ctrl)) |
1388 | return; | 1388 | return; |
1389 | 1389 | ||
1390 | break; | 1390 | break; |
1391 | 1391 | ||
1392 | case USB_REQ_CLEAR_FEATURE: | 1392 | case USB_REQ_CLEAR_FEATURE: |
1393 | ep_num = usb_ctrl->wIndex & 0x7f; | 1393 | ep_num = usb_ctrl->wIndex & 0x7f; |
1394 | 1394 | ||
1395 | if (!s3c_udc_clear_feature(&dev->ep[ep_num].ep)) | 1395 | if (!s3c_udc_clear_feature(&dev->ep[ep_num].ep)) |
1396 | return; | 1396 | return; |
1397 | 1397 | ||
1398 | break; | 1398 | break; |
1399 | 1399 | ||
1400 | case USB_REQ_SET_FEATURE: | 1400 | case USB_REQ_SET_FEATURE: |
1401 | ep_num = usb_ctrl->wIndex & 0x7f; | 1401 | ep_num = usb_ctrl->wIndex & 0x7f; |
1402 | 1402 | ||
1403 | if (!s3c_udc_set_feature(&dev->ep[ep_num].ep)) | 1403 | if (!s3c_udc_set_feature(&dev->ep[ep_num].ep)) |
1404 | return; | 1404 | return; |
1405 | 1405 | ||
1406 | break; | 1406 | break; |
1407 | 1407 | ||
1408 | default: | 1408 | default: |
1409 | debug_cond(DEBUG_SETUP != 0, | 1409 | debug_cond(DEBUG_SETUP != 0, |
1410 | "%s: *** Default of usb_ctrl->bRequest=0x%x" | 1410 | "%s: *** Default of usb_ctrl->bRequest=0x%x" |
1411 | "happened.\n", __func__, usb_ctrl->bRequest); | 1411 | "happened.\n", __func__, usb_ctrl->bRequest); |
1412 | break; | 1412 | break; |
1413 | } | 1413 | } |
1414 | } | 1414 | } |
1415 | 1415 | ||
1416 | 1416 | ||
1417 | if (likely(dev->driver)) { | 1417 | if (likely(dev->driver)) { |
1418 | /* device-2-host (IN) or no data setup command, | 1418 | /* device-2-host (IN) or no data setup command, |
1419 | * process immediately */ | 1419 | * process immediately */ |
1420 | debug_cond(DEBUG_SETUP != 0, | 1420 | debug_cond(DEBUG_SETUP != 0, |
1421 | "%s:usb_ctrlreq will be passed to fsg_setup()\n", | 1421 | "%s:usb_ctrlreq will be passed to fsg_setup()\n", |
1422 | __func__); | 1422 | __func__); |
1423 | 1423 | ||
1424 | spin_unlock(&dev->lock); | 1424 | spin_unlock(&dev->lock); |
1425 | i = dev->driver->setup(&dev->gadget, usb_ctrl); | 1425 | i = dev->driver->setup(&dev->gadget, usb_ctrl); |
1426 | spin_lock(&dev->lock); | 1426 | spin_lock(&dev->lock); |
1427 | 1427 | ||
1428 | if (i < 0) { | 1428 | if (i < 0) { |
1429 | /* setup processing failed, force stall */ | 1429 | /* setup processing failed, force stall */ |
1430 | s3c_udc_ep0_set_stall(ep); | 1430 | s3c_udc_ep0_set_stall(ep); |
1431 | dev->ep0state = WAIT_FOR_SETUP; | 1431 | dev->ep0state = WAIT_FOR_SETUP; |
1432 | 1432 | ||
1433 | debug_cond(DEBUG_SETUP != 0, | 1433 | debug_cond(DEBUG_SETUP != 0, |
1434 | "\tdev->driver->setup failed (%d)," | 1434 | "\tdev->driver->setup failed (%d)," |
1435 | " bRequest = %d\n", | 1435 | " bRequest = %d\n", |
1436 | i, usb_ctrl->bRequest); | 1436 | i, usb_ctrl->bRequest); |
1437 | 1437 | ||
1438 | 1438 | ||
1439 | } else if (dev->req_pending) { | 1439 | } else if (dev->req_pending) { |
1440 | dev->req_pending = 0; | 1440 | dev->req_pending = 0; |
1441 | debug_cond(DEBUG_SETUP != 0, | 1441 | debug_cond(DEBUG_SETUP != 0, |
1442 | "\tdev->req_pending...\n"); | 1442 | "\tdev->req_pending...\n"); |
1443 | } | 1443 | } |
1444 | 1444 | ||
1445 | debug_cond(DEBUG_SETUP != 0, | 1445 | debug_cond(DEBUG_SETUP != 0, |
1446 | "\tep0state = %s\n", state_names[dev->ep0state]); | 1446 | "\tep0state = %s\n", state_names[dev->ep0state]); |
1447 | 1447 | ||
1448 | } | 1448 | } |
1449 | } | 1449 | } |
1450 | 1450 | ||
1451 | /* | 1451 | /* |
1452 | * handle ep0 interrupt | 1452 | * handle ep0 interrupt |
1453 | */ | 1453 | */ |
1454 | static void s3c_handle_ep0(struct s3c_udc *dev) | 1454 | static void s3c_handle_ep0(struct s3c_udc *dev) |
1455 | { | 1455 | { |
1456 | if (dev->ep0state == WAIT_FOR_SETUP) { | 1456 | if (dev->ep0state == WAIT_FOR_SETUP) { |
1457 | debug_cond(DEBUG_OUT_EP != 0, | 1457 | debug_cond(DEBUG_OUT_EP != 0, |
1458 | "%s: WAIT_FOR_SETUP\n", __func__); | 1458 | "%s: WAIT_FOR_SETUP\n", __func__); |
1459 | s3c_ep0_setup(dev); | 1459 | s3c_ep0_setup(dev); |
1460 | 1460 | ||
1461 | } else { | 1461 | } else { |
1462 | debug_cond(DEBUG_OUT_EP != 0, | 1462 | debug_cond(DEBUG_OUT_EP != 0, |
1463 | "%s: strange state!!(state = %s)\n", | 1463 | "%s: strange state!!(state = %s)\n", |
1464 | __func__, state_names[dev->ep0state]); | 1464 | __func__, state_names[dev->ep0state]); |
1465 | } | 1465 | } |
1466 | } | 1466 | } |
1467 | 1467 | ||
1468 | static void s3c_ep0_kick(struct s3c_udc *dev, struct s3c_ep *ep) | 1468 | static void s3c_ep0_kick(struct s3c_udc *dev, struct s3c_ep *ep) |
1469 | { | 1469 | { |
1470 | debug_cond(DEBUG_EP0 != 0, | 1470 | debug_cond(DEBUG_EP0 != 0, |
1471 | "%s: ep_is_in = %d\n", __func__, ep_is_in(ep)); | 1471 | "%s: ep_is_in = %d\n", __func__, ep_is_in(ep)); |
1472 | if (ep_is_in(ep)) { | 1472 | if (ep_is_in(ep)) { |
1473 | dev->ep0state = DATA_STATE_XMIT; | 1473 | dev->ep0state = DATA_STATE_XMIT; |
1474 | s3c_ep0_write(dev); | 1474 | s3c_ep0_write(dev); |
1475 | 1475 | ||
1476 | } else { | 1476 | } else { |
1477 | dev->ep0state = DATA_STATE_RECV; | 1477 | dev->ep0state = DATA_STATE_RECV; |
1478 | s3c_ep0_read(dev); | 1478 | s3c_ep0_read(dev); |
1479 | } | 1479 | } |
1480 | } | 1480 | } |
1481 | 1481 |
include/dfu.h
1 | /* | 1 | /* |
2 | * dfu.h - DFU flashable area description | 2 | * dfu.h - DFU flashable area description |
3 | * | 3 | * |
4 | * Copyright (C) 2012 Samsung Electronics | 4 | * Copyright (C) 2012 Samsung Electronics |
5 | * authors: Andrzej Pietrasiewicz <andrzej.p@samsung.com> | 5 | * authors: Andrzej Pietrasiewicz <andrzej.p@samsung.com> |
6 | * Lukasz Majewski <l.majewski@samsung.com> | 6 | * Lukasz Majewski <l.majewski@samsung.com> |
7 | * | 7 | * |
8 | * SPDX-License-Identifier: GPL-2.0+ | 8 | * SPDX-License-Identifier: GPL-2.0+ |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #ifndef __DFU_ENTITY_H_ | 11 | #ifndef __DFU_ENTITY_H_ |
12 | #define __DFU_ENTITY_H_ | 12 | #define __DFU_ENTITY_H_ |
13 | 13 | ||
14 | #include <common.h> | 14 | #include <common.h> |
15 | #include <linux/list.h> | 15 | #include <linux/list.h> |
16 | #include <mmc.h> | 16 | #include <mmc.h> |
17 | #include <spi_flash.h> | 17 | #include <spi_flash.h> |
18 | #include <linux/usb/composite.h> | 18 | #include <linux/usb/composite.h> |
19 | 19 | ||
20 | enum dfu_device_type { | 20 | enum dfu_device_type { |
21 | DFU_DEV_MMC = 1, | 21 | DFU_DEV_MMC = 1, |
22 | DFU_DEV_ONENAND, | 22 | DFU_DEV_ONENAND, |
23 | DFU_DEV_NAND, | 23 | DFU_DEV_NAND, |
24 | DFU_DEV_RAM, | 24 | DFU_DEV_RAM, |
25 | DFU_DEV_SF, | 25 | DFU_DEV_SF, |
26 | }; | 26 | }; |
27 | 27 | ||
28 | enum dfu_layout { | 28 | enum dfu_layout { |
29 | DFU_RAW_ADDR = 1, | 29 | DFU_RAW_ADDR = 1, |
30 | DFU_FS_FAT, | 30 | DFU_FS_FAT, |
31 | DFU_FS_EXT2, | 31 | DFU_FS_EXT2, |
32 | DFU_FS_EXT3, | 32 | DFU_FS_EXT3, |
33 | DFU_FS_EXT4, | 33 | DFU_FS_EXT4, |
34 | DFU_RAM_ADDR, | 34 | DFU_RAM_ADDR, |
35 | }; | 35 | }; |
36 | 36 | ||
37 | enum dfu_op { | 37 | enum dfu_op { |
38 | DFU_OP_READ = 1, | 38 | DFU_OP_READ = 1, |
39 | DFU_OP_WRITE, | 39 | DFU_OP_WRITE, |
40 | DFU_OP_SIZE, | 40 | DFU_OP_SIZE, |
41 | }; | 41 | }; |
42 | 42 | ||
43 | struct mmc_internal_data { | 43 | struct mmc_internal_data { |
44 | int dev_num; | 44 | int dev_num; |
45 | 45 | ||
46 | /* RAW programming */ | 46 | /* RAW programming */ |
47 | unsigned int lba_start; | 47 | unsigned int lba_start; |
48 | unsigned int lba_size; | 48 | unsigned int lba_size; |
49 | unsigned int lba_blk_size; | 49 | unsigned int lba_blk_size; |
50 | 50 | ||
51 | /* eMMC HW partition access */ | 51 | /* eMMC HW partition access */ |
52 | int hw_partition; | 52 | int hw_partition; |
53 | 53 | ||
54 | /* FAT/EXT */ | 54 | /* FAT/EXT */ |
55 | unsigned int dev; | 55 | unsigned int dev; |
56 | unsigned int part; | 56 | unsigned int part; |
57 | }; | 57 | }; |
58 | 58 | ||
59 | struct nand_internal_data { | 59 | struct nand_internal_data { |
60 | /* RAW programming */ | 60 | /* RAW programming */ |
61 | u64 start; | 61 | u64 start; |
62 | u64 size; | 62 | u64 size; |
63 | 63 | ||
64 | unsigned int dev; | 64 | unsigned int dev; |
65 | unsigned int part; | 65 | unsigned int part; |
66 | /* for nand/ubi use */ | 66 | /* for nand/ubi use */ |
67 | unsigned int ubi; | 67 | unsigned int ubi; |
68 | }; | 68 | }; |
69 | 69 | ||
70 | struct ram_internal_data { | 70 | struct ram_internal_data { |
71 | void *start; | 71 | void *start; |
72 | unsigned int size; | 72 | unsigned int size; |
73 | }; | 73 | }; |
74 | 74 | ||
75 | struct sf_internal_data { | 75 | struct sf_internal_data { |
76 | struct spi_flash *dev; | 76 | struct spi_flash *dev; |
77 | 77 | ||
78 | /* RAW programming */ | 78 | /* RAW programming */ |
79 | u64 start; | 79 | u64 start; |
80 | u64 size; | 80 | u64 size; |
81 | }; | 81 | }; |
82 | 82 | ||
83 | #define DFU_NAME_SIZE 32 | 83 | #define DFU_NAME_SIZE 32 |
84 | #define DFU_CMD_BUF_SIZE 128 | 84 | #define DFU_CMD_BUF_SIZE 128 |
85 | #ifndef CONFIG_SYS_DFU_DATA_BUF_SIZE | 85 | #ifndef CONFIG_SYS_DFU_DATA_BUF_SIZE |
86 | #define CONFIG_SYS_DFU_DATA_BUF_SIZE (1024*1024*8) /* 8 MiB */ | 86 | #define CONFIG_SYS_DFU_DATA_BUF_SIZE (1024*1024*8) /* 8 MiB */ |
87 | #endif | 87 | #endif |
88 | #ifndef CONFIG_SYS_DFU_MAX_FILE_SIZE | 88 | #ifndef CONFIG_SYS_DFU_MAX_FILE_SIZE |
89 | #define CONFIG_SYS_DFU_MAX_FILE_SIZE CONFIG_SYS_DFU_DATA_BUF_SIZE | 89 | #define CONFIG_SYS_DFU_MAX_FILE_SIZE CONFIG_SYS_DFU_DATA_BUF_SIZE |
90 | #endif | 90 | #endif |
91 | #ifndef DFU_DEFAULT_POLL_TIMEOUT | 91 | #ifndef DFU_DEFAULT_POLL_TIMEOUT |
92 | #define DFU_DEFAULT_POLL_TIMEOUT 0 | 92 | #define DFU_DEFAULT_POLL_TIMEOUT 0 |
93 | #endif | 93 | #endif |
94 | #ifndef DFU_MANIFEST_POLL_TIMEOUT | 94 | #ifndef DFU_MANIFEST_POLL_TIMEOUT |
95 | #define DFU_MANIFEST_POLL_TIMEOUT DFU_DEFAULT_POLL_TIMEOUT | 95 | #define DFU_MANIFEST_POLL_TIMEOUT DFU_DEFAULT_POLL_TIMEOUT |
96 | #endif | 96 | #endif |
97 | 97 | ||
98 | struct dfu_entity { | 98 | struct dfu_entity { |
99 | char name[DFU_NAME_SIZE]; | 99 | char name[DFU_NAME_SIZE]; |
100 | int alt; | 100 | int alt; |
101 | void *dev_private; | 101 | void *dev_private; |
102 | enum dfu_device_type dev_type; | 102 | enum dfu_device_type dev_type; |
103 | enum dfu_layout layout; | 103 | enum dfu_layout layout; |
104 | unsigned long max_buf_size; | 104 | unsigned long max_buf_size; |
105 | 105 | ||
106 | union { | 106 | union { |
107 | struct mmc_internal_data mmc; | 107 | struct mmc_internal_data mmc; |
108 | struct nand_internal_data nand; | 108 | struct nand_internal_data nand; |
109 | struct ram_internal_data ram; | 109 | struct ram_internal_data ram; |
110 | struct sf_internal_data sf; | 110 | struct sf_internal_data sf; |
111 | } data; | 111 | } data; |
112 | 112 | ||
113 | long (*get_medium_size)(struct dfu_entity *dfu); | 113 | long (*get_medium_size)(struct dfu_entity *dfu); |
114 | 114 | ||
115 | int (*read_medium)(struct dfu_entity *dfu, | 115 | int (*read_medium)(struct dfu_entity *dfu, |
116 | u64 offset, void *buf, long *len); | 116 | u64 offset, void *buf, long *len); |
117 | 117 | ||
118 | int (*write_medium)(struct dfu_entity *dfu, | 118 | int (*write_medium)(struct dfu_entity *dfu, |
119 | u64 offset, void *buf, long *len); | 119 | u64 offset, void *buf, long *len); |
120 | 120 | ||
121 | int (*flush_medium)(struct dfu_entity *dfu); | 121 | int (*flush_medium)(struct dfu_entity *dfu); |
122 | unsigned int (*poll_timeout)(struct dfu_entity *dfu); | 122 | unsigned int (*poll_timeout)(struct dfu_entity *dfu); |
123 | 123 | ||
124 | void (*free_entity)(struct dfu_entity *dfu); | 124 | void (*free_entity)(struct dfu_entity *dfu); |
125 | 125 | ||
126 | struct list_head list; | 126 | struct list_head list; |
127 | 127 | ||
128 | /* on the fly state */ | 128 | /* on the fly state */ |
129 | u32 crc; | 129 | u32 crc; |
130 | u64 offset; | 130 | u64 offset; |
131 | int i_blk_seq_num; | 131 | int i_blk_seq_num; |
132 | u8 *i_buf; | 132 | u8 *i_buf; |
133 | u8 *i_buf_start; | 133 | u8 *i_buf_start; |
134 | u8 *i_buf_end; | 134 | u8 *i_buf_end; |
135 | long r_left; | 135 | long r_left; |
136 | long b_left; | 136 | long b_left; |
137 | 137 | ||
138 | u32 bad_skip; /* for nand use */ | 138 | u32 bad_skip; /* for nand use */ |
139 | 139 | ||
140 | unsigned int inited:1; | 140 | unsigned int inited:1; |
141 | }; | 141 | }; |
142 | 142 | ||
143 | int dfu_config_entities(char *s, char *interface, char *devstr); | 143 | int dfu_config_entities(char *s, char *interface, char *devstr); |
144 | void dfu_free_entities(void); | 144 | void dfu_free_entities(void); |
145 | void dfu_show_entities(void); | 145 | void dfu_show_entities(void); |
146 | int dfu_get_alt_number(void); | 146 | int dfu_get_alt_number(void); |
147 | const char *dfu_get_dev_type(enum dfu_device_type t); | 147 | const char *dfu_get_dev_type(enum dfu_device_type t); |
148 | const char *dfu_get_layout(enum dfu_layout l); | 148 | const char *dfu_get_layout(enum dfu_layout l); |
149 | struct dfu_entity *dfu_get_entity(int alt); | 149 | struct dfu_entity *dfu_get_entity(int alt); |
150 | char *dfu_extract_token(char** e, int *n); | 150 | char *dfu_extract_token(char** e, int *n); |
151 | void dfu_trigger_reset(void); | 151 | void dfu_trigger_reset(void); |
152 | int dfu_get_alt(char *name); | 152 | int dfu_get_alt(char *name); |
153 | bool dfu_reset(void); | 153 | bool dfu_detach(void); |
154 | void dfu_trigger_detach(void); | ||
155 | void dfu_clear_detach(void); | ||
154 | int dfu_init_env_entities(char *interface, char *devstr); | 156 | int dfu_init_env_entities(char *interface, char *devstr); |
155 | unsigned char *dfu_get_buf(struct dfu_entity *dfu); | 157 | unsigned char *dfu_get_buf(struct dfu_entity *dfu); |
156 | unsigned char *dfu_free_buf(void); | 158 | unsigned char *dfu_free_buf(void); |
157 | unsigned long dfu_get_buf_size(void); | 159 | unsigned long dfu_get_buf_size(void); |
160 | bool dfu_usb_get_reset(void); | ||
158 | 161 | ||
159 | int dfu_read(struct dfu_entity *de, void *buf, int size, int blk_seq_num); | 162 | int dfu_read(struct dfu_entity *de, void *buf, int size, int blk_seq_num); |
160 | int dfu_write(struct dfu_entity *de, void *buf, int size, int blk_seq_num); | 163 | int dfu_write(struct dfu_entity *de, void *buf, int size, int blk_seq_num); |
161 | int dfu_flush(struct dfu_entity *de, void *buf, int size, int blk_seq_num); | 164 | int dfu_flush(struct dfu_entity *de, void *buf, int size, int blk_seq_num); |
162 | /* Device specific */ | 165 | /* Device specific */ |
163 | #ifdef CONFIG_DFU_MMC | 166 | #ifdef CONFIG_DFU_MMC |
164 | extern int dfu_fill_entity_mmc(struct dfu_entity *dfu, char *devstr, char *s); | 167 | extern int dfu_fill_entity_mmc(struct dfu_entity *dfu, char *devstr, char *s); |
165 | #else | 168 | #else |
166 | static inline int dfu_fill_entity_mmc(struct dfu_entity *dfu, char *devstr, | 169 | static inline int dfu_fill_entity_mmc(struct dfu_entity *dfu, char *devstr, |
167 | char *s) | 170 | char *s) |
168 | { | 171 | { |
169 | puts("MMC support not available!\n"); | 172 | puts("MMC support not available!\n"); |
170 | return -1; | 173 | return -1; |
171 | } | 174 | } |
172 | #endif | 175 | #endif |
173 | 176 | ||
174 | #ifdef CONFIG_DFU_NAND | 177 | #ifdef CONFIG_DFU_NAND |
175 | extern int dfu_fill_entity_nand(struct dfu_entity *dfu, char *devstr, char *s); | 178 | extern int dfu_fill_entity_nand(struct dfu_entity *dfu, char *devstr, char *s); |
176 | #else | 179 | #else |
177 | static inline int dfu_fill_entity_nand(struct dfu_entity *dfu, char *devstr, | 180 | static inline int dfu_fill_entity_nand(struct dfu_entity *dfu, char *devstr, |
178 | char *s) | 181 | char *s) |
179 | { | 182 | { |
180 | puts("NAND support not available!\n"); | 183 | puts("NAND support not available!\n"); |
181 | return -1; | 184 | return -1; |
182 | } | 185 | } |
183 | #endif | 186 | #endif |
184 | 187 | ||
185 | #ifdef CONFIG_DFU_RAM | 188 | #ifdef CONFIG_DFU_RAM |
186 | extern int dfu_fill_entity_ram(struct dfu_entity *dfu, char *devstr, char *s); | 189 | extern int dfu_fill_entity_ram(struct dfu_entity *dfu, char *devstr, char *s); |
187 | #else | 190 | #else |
188 | static inline int dfu_fill_entity_ram(struct dfu_entity *dfu, char *devstr, | 191 | static inline int dfu_fill_entity_ram(struct dfu_entity *dfu, char *devstr, |
189 | char *s) | 192 | char *s) |
190 | { | 193 | { |
191 | puts("RAM support not available!\n"); | 194 | puts("RAM support not available!\n"); |
192 | return -1; | 195 | return -1; |
193 | } | 196 | } |
194 | #endif | 197 | #endif |
195 | 198 | ||
196 | #ifdef CONFIG_DFU_SF | 199 | #ifdef CONFIG_DFU_SF |
197 | extern int dfu_fill_entity_sf(struct dfu_entity *dfu, char *devstr, char *s); | 200 | extern int dfu_fill_entity_sf(struct dfu_entity *dfu, char *devstr, char *s); |
198 | #else | 201 | #else |
199 | static inline int dfu_fill_entity_sf(struct dfu_entity *dfu, char *devstr, | 202 | static inline int dfu_fill_entity_sf(struct dfu_entity *dfu, char *devstr, |
200 | char *s) | 203 | char *s) |
201 | { | 204 | { |
202 | puts("SF support not available!\n"); | 205 | puts("SF support not available!\n"); |
203 | return -1; | 206 | return -1; |
204 | } | 207 | } |
205 | #endif | 208 | #endif |
206 | 209 | ||
207 | int dfu_add(struct usb_configuration *c); | 210 | int dfu_add(struct usb_configuration *c); |
208 | #endif /* __DFU_ENTITY_H_ */ | 211 | #endif /* __DFU_ENTITY_H_ */ |
209 | 212 |