Commit 9b7c552bba88748001574925b80ba520691b0e4d
Committed by
Takashi Iwai
1 parent
239b9f7990
Exists in
master
and in
16 other branches
ALSA: usb-audio: void return type of snd_usb_endpoint_deactivate()
The return value of snd_usb_endpoint_deactivate() is not used, make the function have no return value. Update the documentation to reflect what the function is actually doing. Signed-off-by: Eldad Zack <eldad@fogrefinery.com> Signed-off-by: Takashi Iwai <tiwai@suse.de>
Showing 2 changed files with 6 additions and 11 deletions Inline Diff
sound/usb/endpoint.c
1 | /* | 1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | 2 | * This program is free software; you can redistribute it and/or modify |
3 | * it under the terms of the GNU General Public License as published by | 3 | * it under the terms of the GNU General Public License as published by |
4 | * the Free Software Foundation; either version 2 of the License, or | 4 | * the Free Software Foundation; either version 2 of the License, or |
5 | * (at your option) any later version. | 5 | * (at your option) any later version. |
6 | * | 6 | * |
7 | * This program is distributed in the hope that it will be useful, | 7 | * This program is distributed in the hope that it will be useful, |
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
10 | * GNU General Public License for more details. | 10 | * GNU General Public License for more details. |
11 | * | 11 | * |
12 | * You should have received a copy of the GNU General Public License | 12 | * You should have received a copy of the GNU General Public License |
13 | * along with this program; if not, write to the Free Software | 13 | * along with this program; if not, write to the Free Software |
14 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 14 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
15 | * | 15 | * |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #include <linux/gfp.h> | 18 | #include <linux/gfp.h> |
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | #include <linux/ratelimit.h> | 20 | #include <linux/ratelimit.h> |
21 | #include <linux/usb.h> | 21 | #include <linux/usb.h> |
22 | #include <linux/usb/audio.h> | 22 | #include <linux/usb/audio.h> |
23 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
24 | 24 | ||
25 | #include <sound/core.h> | 25 | #include <sound/core.h> |
26 | #include <sound/pcm.h> | 26 | #include <sound/pcm.h> |
27 | #include <sound/pcm_params.h> | 27 | #include <sound/pcm_params.h> |
28 | 28 | ||
29 | #include "usbaudio.h" | 29 | #include "usbaudio.h" |
30 | #include "helper.h" | 30 | #include "helper.h" |
31 | #include "card.h" | 31 | #include "card.h" |
32 | #include "endpoint.h" | 32 | #include "endpoint.h" |
33 | #include "pcm.h" | 33 | #include "pcm.h" |
34 | #include "quirks.h" | 34 | #include "quirks.h" |
35 | 35 | ||
36 | #define EP_FLAG_ACTIVATED 0 | 36 | #define EP_FLAG_ACTIVATED 0 |
37 | #define EP_FLAG_RUNNING 1 | 37 | #define EP_FLAG_RUNNING 1 |
38 | #define EP_FLAG_STOPPING 2 | 38 | #define EP_FLAG_STOPPING 2 |
39 | 39 | ||
40 | /* | 40 | /* |
41 | * snd_usb_endpoint is a model that abstracts everything related to an | 41 | * snd_usb_endpoint is a model that abstracts everything related to an |
42 | * USB endpoint and its streaming. | 42 | * USB endpoint and its streaming. |
43 | * | 43 | * |
44 | * There are functions to activate and deactivate the streaming URBs and | 44 | * There are functions to activate and deactivate the streaming URBs and |
45 | * optional callbacks to let the pcm logic handle the actual content of the | 45 | * optional callbacks to let the pcm logic handle the actual content of the |
46 | * packets for playback and record. Thus, the bus streaming and the audio | 46 | * packets for playback and record. Thus, the bus streaming and the audio |
47 | * handlers are fully decoupled. | 47 | * handlers are fully decoupled. |
48 | * | 48 | * |
49 | * There are two different types of endpoints in audio applications. | 49 | * There are two different types of endpoints in audio applications. |
50 | * | 50 | * |
51 | * SND_USB_ENDPOINT_TYPE_DATA handles full audio data payload for both | 51 | * SND_USB_ENDPOINT_TYPE_DATA handles full audio data payload for both |
52 | * inbound and outbound traffic. | 52 | * inbound and outbound traffic. |
53 | * | 53 | * |
54 | * SND_USB_ENDPOINT_TYPE_SYNC endpoints are for inbound traffic only and | 54 | * SND_USB_ENDPOINT_TYPE_SYNC endpoints are for inbound traffic only and |
55 | * expect the payload to carry Q10.14 / Q16.16 formatted sync information | 55 | * expect the payload to carry Q10.14 / Q16.16 formatted sync information |
56 | * (3 or 4 bytes). | 56 | * (3 or 4 bytes). |
57 | * | 57 | * |
58 | * Each endpoint has to be configured prior to being used by calling | 58 | * Each endpoint has to be configured prior to being used by calling |
59 | * snd_usb_endpoint_set_params(). | 59 | * snd_usb_endpoint_set_params(). |
60 | * | 60 | * |
61 | * The model incorporates a reference counting, so that multiple users | 61 | * The model incorporates a reference counting, so that multiple users |
62 | * can call snd_usb_endpoint_start() and snd_usb_endpoint_stop(), and | 62 | * can call snd_usb_endpoint_start() and snd_usb_endpoint_stop(), and |
63 | * only the first user will effectively start the URBs, and only the last | 63 | * only the first user will effectively start the URBs, and only the last |
64 | * one to stop it will tear the URBs down again. | 64 | * one to stop it will tear the URBs down again. |
65 | */ | 65 | */ |
66 | 66 | ||
67 | /* | 67 | /* |
68 | * convert a sampling rate into our full speed format (fs/1000 in Q16.16) | 68 | * convert a sampling rate into our full speed format (fs/1000 in Q16.16) |
69 | * this will overflow at approx 524 kHz | 69 | * this will overflow at approx 524 kHz |
70 | */ | 70 | */ |
71 | static inline unsigned get_usb_full_speed_rate(unsigned int rate) | 71 | static inline unsigned get_usb_full_speed_rate(unsigned int rate) |
72 | { | 72 | { |
73 | return ((rate << 13) + 62) / 125; | 73 | return ((rate << 13) + 62) / 125; |
74 | } | 74 | } |
75 | 75 | ||
76 | /* | 76 | /* |
77 | * convert a sampling rate into USB high speed format (fs/8000 in Q16.16) | 77 | * convert a sampling rate into USB high speed format (fs/8000 in Q16.16) |
78 | * this will overflow at approx 4 MHz | 78 | * this will overflow at approx 4 MHz |
79 | */ | 79 | */ |
80 | static inline unsigned get_usb_high_speed_rate(unsigned int rate) | 80 | static inline unsigned get_usb_high_speed_rate(unsigned int rate) |
81 | { | 81 | { |
82 | return ((rate << 10) + 62) / 125; | 82 | return ((rate << 10) + 62) / 125; |
83 | } | 83 | } |
84 | 84 | ||
85 | /* | 85 | /* |
86 | * release a urb data | 86 | * release a urb data |
87 | */ | 87 | */ |
88 | static void release_urb_ctx(struct snd_urb_ctx *u) | 88 | static void release_urb_ctx(struct snd_urb_ctx *u) |
89 | { | 89 | { |
90 | if (u->buffer_size) | 90 | if (u->buffer_size) |
91 | usb_free_coherent(u->ep->chip->dev, u->buffer_size, | 91 | usb_free_coherent(u->ep->chip->dev, u->buffer_size, |
92 | u->urb->transfer_buffer, | 92 | u->urb->transfer_buffer, |
93 | u->urb->transfer_dma); | 93 | u->urb->transfer_dma); |
94 | usb_free_urb(u->urb); | 94 | usb_free_urb(u->urb); |
95 | u->urb = NULL; | 95 | u->urb = NULL; |
96 | } | 96 | } |
97 | 97 | ||
98 | static const char *usb_error_string(int err) | 98 | static const char *usb_error_string(int err) |
99 | { | 99 | { |
100 | switch (err) { | 100 | switch (err) { |
101 | case -ENODEV: | 101 | case -ENODEV: |
102 | return "no device"; | 102 | return "no device"; |
103 | case -ENOENT: | 103 | case -ENOENT: |
104 | return "endpoint not enabled"; | 104 | return "endpoint not enabled"; |
105 | case -EPIPE: | 105 | case -EPIPE: |
106 | return "endpoint stalled"; | 106 | return "endpoint stalled"; |
107 | case -ENOSPC: | 107 | case -ENOSPC: |
108 | return "not enough bandwidth"; | 108 | return "not enough bandwidth"; |
109 | case -ESHUTDOWN: | 109 | case -ESHUTDOWN: |
110 | return "device disabled"; | 110 | return "device disabled"; |
111 | case -EHOSTUNREACH: | 111 | case -EHOSTUNREACH: |
112 | return "device suspended"; | 112 | return "device suspended"; |
113 | case -EINVAL: | 113 | case -EINVAL: |
114 | case -EAGAIN: | 114 | case -EAGAIN: |
115 | case -EFBIG: | 115 | case -EFBIG: |
116 | case -EMSGSIZE: | 116 | case -EMSGSIZE: |
117 | return "internal error"; | 117 | return "internal error"; |
118 | default: | 118 | default: |
119 | return "unknown error"; | 119 | return "unknown error"; |
120 | } | 120 | } |
121 | } | 121 | } |
122 | 122 | ||
123 | /** | 123 | /** |
124 | * snd_usb_endpoint_implicit_feedback_sink: Report endpoint usage type | 124 | * snd_usb_endpoint_implicit_feedback_sink: Report endpoint usage type |
125 | * | 125 | * |
126 | * @ep: The snd_usb_endpoint | 126 | * @ep: The snd_usb_endpoint |
127 | * | 127 | * |
128 | * Determine whether an endpoint is driven by an implicit feedback | 128 | * Determine whether an endpoint is driven by an implicit feedback |
129 | * data endpoint source. | 129 | * data endpoint source. |
130 | */ | 130 | */ |
131 | int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep) | 131 | int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep) |
132 | { | 132 | { |
133 | return ep->sync_master && | 133 | return ep->sync_master && |
134 | ep->sync_master->type == SND_USB_ENDPOINT_TYPE_DATA && | 134 | ep->sync_master->type == SND_USB_ENDPOINT_TYPE_DATA && |
135 | ep->type == SND_USB_ENDPOINT_TYPE_DATA && | 135 | ep->type == SND_USB_ENDPOINT_TYPE_DATA && |
136 | usb_pipeout(ep->pipe); | 136 | usb_pipeout(ep->pipe); |
137 | } | 137 | } |
138 | 138 | ||
139 | /* | 139 | /* |
140 | * For streaming based on information derived from sync endpoints, | 140 | * For streaming based on information derived from sync endpoints, |
141 | * prepare_outbound_urb_sizes() will call next_packet_size() to | 141 | * prepare_outbound_urb_sizes() will call next_packet_size() to |
142 | * determine the number of samples to be sent in the next packet. | 142 | * determine the number of samples to be sent in the next packet. |
143 | * | 143 | * |
144 | * For implicit feedback, next_packet_size() is unused. | 144 | * For implicit feedback, next_packet_size() is unused. |
145 | */ | 145 | */ |
146 | int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep) | 146 | int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep) |
147 | { | 147 | { |
148 | unsigned long flags; | 148 | unsigned long flags; |
149 | int ret; | 149 | int ret; |
150 | 150 | ||
151 | if (ep->fill_max) | 151 | if (ep->fill_max) |
152 | return ep->maxframesize; | 152 | return ep->maxframesize; |
153 | 153 | ||
154 | spin_lock_irqsave(&ep->lock, flags); | 154 | spin_lock_irqsave(&ep->lock, flags); |
155 | ep->phase = (ep->phase & 0xffff) | 155 | ep->phase = (ep->phase & 0xffff) |
156 | + (ep->freqm << ep->datainterval); | 156 | + (ep->freqm << ep->datainterval); |
157 | ret = min(ep->phase >> 16, ep->maxframesize); | 157 | ret = min(ep->phase >> 16, ep->maxframesize); |
158 | spin_unlock_irqrestore(&ep->lock, flags); | 158 | spin_unlock_irqrestore(&ep->lock, flags); |
159 | 159 | ||
160 | return ret; | 160 | return ret; |
161 | } | 161 | } |
162 | 162 | ||
163 | static void retire_outbound_urb(struct snd_usb_endpoint *ep, | 163 | static void retire_outbound_urb(struct snd_usb_endpoint *ep, |
164 | struct snd_urb_ctx *urb_ctx) | 164 | struct snd_urb_ctx *urb_ctx) |
165 | { | 165 | { |
166 | if (ep->retire_data_urb) | 166 | if (ep->retire_data_urb) |
167 | ep->retire_data_urb(ep->data_subs, urb_ctx->urb); | 167 | ep->retire_data_urb(ep->data_subs, urb_ctx->urb); |
168 | } | 168 | } |
169 | 169 | ||
170 | static void retire_inbound_urb(struct snd_usb_endpoint *ep, | 170 | static void retire_inbound_urb(struct snd_usb_endpoint *ep, |
171 | struct snd_urb_ctx *urb_ctx) | 171 | struct snd_urb_ctx *urb_ctx) |
172 | { | 172 | { |
173 | struct urb *urb = urb_ctx->urb; | 173 | struct urb *urb = urb_ctx->urb; |
174 | 174 | ||
175 | if (unlikely(ep->skip_packets > 0)) { | 175 | if (unlikely(ep->skip_packets > 0)) { |
176 | ep->skip_packets--; | 176 | ep->skip_packets--; |
177 | return; | 177 | return; |
178 | } | 178 | } |
179 | 179 | ||
180 | if (ep->sync_slave) | 180 | if (ep->sync_slave) |
181 | snd_usb_handle_sync_urb(ep->sync_slave, ep, urb); | 181 | snd_usb_handle_sync_urb(ep->sync_slave, ep, urb); |
182 | 182 | ||
183 | if (ep->retire_data_urb) | 183 | if (ep->retire_data_urb) |
184 | ep->retire_data_urb(ep->data_subs, urb); | 184 | ep->retire_data_urb(ep->data_subs, urb); |
185 | } | 185 | } |
186 | 186 | ||
187 | /* | 187 | /* |
188 | * Prepare a PLAYBACK urb for submission to the bus. | 188 | * Prepare a PLAYBACK urb for submission to the bus. |
189 | */ | 189 | */ |
190 | static void prepare_outbound_urb(struct snd_usb_endpoint *ep, | 190 | static void prepare_outbound_urb(struct snd_usb_endpoint *ep, |
191 | struct snd_urb_ctx *ctx) | 191 | struct snd_urb_ctx *ctx) |
192 | { | 192 | { |
193 | int i; | 193 | int i; |
194 | struct urb *urb = ctx->urb; | 194 | struct urb *urb = ctx->urb; |
195 | unsigned char *cp = urb->transfer_buffer; | 195 | unsigned char *cp = urb->transfer_buffer; |
196 | 196 | ||
197 | urb->dev = ep->chip->dev; /* we need to set this at each time */ | 197 | urb->dev = ep->chip->dev; /* we need to set this at each time */ |
198 | 198 | ||
199 | switch (ep->type) { | 199 | switch (ep->type) { |
200 | case SND_USB_ENDPOINT_TYPE_DATA: | 200 | case SND_USB_ENDPOINT_TYPE_DATA: |
201 | if (ep->prepare_data_urb) { | 201 | if (ep->prepare_data_urb) { |
202 | ep->prepare_data_urb(ep->data_subs, urb); | 202 | ep->prepare_data_urb(ep->data_subs, urb); |
203 | } else { | 203 | } else { |
204 | /* no data provider, so send silence */ | 204 | /* no data provider, so send silence */ |
205 | unsigned int offs = 0; | 205 | unsigned int offs = 0; |
206 | for (i = 0; i < ctx->packets; ++i) { | 206 | for (i = 0; i < ctx->packets; ++i) { |
207 | int counts; | 207 | int counts; |
208 | 208 | ||
209 | if (ctx->packet_size[i]) | 209 | if (ctx->packet_size[i]) |
210 | counts = ctx->packet_size[i]; | 210 | counts = ctx->packet_size[i]; |
211 | else | 211 | else |
212 | counts = snd_usb_endpoint_next_packet_size(ep); | 212 | counts = snd_usb_endpoint_next_packet_size(ep); |
213 | 213 | ||
214 | urb->iso_frame_desc[i].offset = offs * ep->stride; | 214 | urb->iso_frame_desc[i].offset = offs * ep->stride; |
215 | urb->iso_frame_desc[i].length = counts * ep->stride; | 215 | urb->iso_frame_desc[i].length = counts * ep->stride; |
216 | offs += counts; | 216 | offs += counts; |
217 | } | 217 | } |
218 | 218 | ||
219 | urb->number_of_packets = ctx->packets; | 219 | urb->number_of_packets = ctx->packets; |
220 | urb->transfer_buffer_length = offs * ep->stride; | 220 | urb->transfer_buffer_length = offs * ep->stride; |
221 | memset(urb->transfer_buffer, ep->silence_value, | 221 | memset(urb->transfer_buffer, ep->silence_value, |
222 | offs * ep->stride); | 222 | offs * ep->stride); |
223 | } | 223 | } |
224 | break; | 224 | break; |
225 | 225 | ||
226 | case SND_USB_ENDPOINT_TYPE_SYNC: | 226 | case SND_USB_ENDPOINT_TYPE_SYNC: |
227 | if (snd_usb_get_speed(ep->chip->dev) >= USB_SPEED_HIGH) { | 227 | if (snd_usb_get_speed(ep->chip->dev) >= USB_SPEED_HIGH) { |
228 | /* | 228 | /* |
229 | * fill the length and offset of each urb descriptor. | 229 | * fill the length and offset of each urb descriptor. |
230 | * the fixed 12.13 frequency is passed as 16.16 through the pipe. | 230 | * the fixed 12.13 frequency is passed as 16.16 through the pipe. |
231 | */ | 231 | */ |
232 | urb->iso_frame_desc[0].length = 4; | 232 | urb->iso_frame_desc[0].length = 4; |
233 | urb->iso_frame_desc[0].offset = 0; | 233 | urb->iso_frame_desc[0].offset = 0; |
234 | cp[0] = ep->freqn; | 234 | cp[0] = ep->freqn; |
235 | cp[1] = ep->freqn >> 8; | 235 | cp[1] = ep->freqn >> 8; |
236 | cp[2] = ep->freqn >> 16; | 236 | cp[2] = ep->freqn >> 16; |
237 | cp[3] = ep->freqn >> 24; | 237 | cp[3] = ep->freqn >> 24; |
238 | } else { | 238 | } else { |
239 | /* | 239 | /* |
240 | * fill the length and offset of each urb descriptor. | 240 | * fill the length and offset of each urb descriptor. |
241 | * the fixed 10.14 frequency is passed through the pipe. | 241 | * the fixed 10.14 frequency is passed through the pipe. |
242 | */ | 242 | */ |
243 | urb->iso_frame_desc[0].length = 3; | 243 | urb->iso_frame_desc[0].length = 3; |
244 | urb->iso_frame_desc[0].offset = 0; | 244 | urb->iso_frame_desc[0].offset = 0; |
245 | cp[0] = ep->freqn >> 2; | 245 | cp[0] = ep->freqn >> 2; |
246 | cp[1] = ep->freqn >> 10; | 246 | cp[1] = ep->freqn >> 10; |
247 | cp[2] = ep->freqn >> 18; | 247 | cp[2] = ep->freqn >> 18; |
248 | } | 248 | } |
249 | 249 | ||
250 | break; | 250 | break; |
251 | } | 251 | } |
252 | } | 252 | } |
253 | 253 | ||
254 | /* | 254 | /* |
255 | * Prepare a CAPTURE or SYNC urb for submission to the bus. | 255 | * Prepare a CAPTURE or SYNC urb for submission to the bus. |
256 | */ | 256 | */ |
257 | static inline void prepare_inbound_urb(struct snd_usb_endpoint *ep, | 257 | static inline void prepare_inbound_urb(struct snd_usb_endpoint *ep, |
258 | struct snd_urb_ctx *urb_ctx) | 258 | struct snd_urb_ctx *urb_ctx) |
259 | { | 259 | { |
260 | int i, offs; | 260 | int i, offs; |
261 | struct urb *urb = urb_ctx->urb; | 261 | struct urb *urb = urb_ctx->urb; |
262 | 262 | ||
263 | urb->dev = ep->chip->dev; /* we need to set this at each time */ | 263 | urb->dev = ep->chip->dev; /* we need to set this at each time */ |
264 | 264 | ||
265 | switch (ep->type) { | 265 | switch (ep->type) { |
266 | case SND_USB_ENDPOINT_TYPE_DATA: | 266 | case SND_USB_ENDPOINT_TYPE_DATA: |
267 | offs = 0; | 267 | offs = 0; |
268 | for (i = 0; i < urb_ctx->packets; i++) { | 268 | for (i = 0; i < urb_ctx->packets; i++) { |
269 | urb->iso_frame_desc[i].offset = offs; | 269 | urb->iso_frame_desc[i].offset = offs; |
270 | urb->iso_frame_desc[i].length = ep->curpacksize; | 270 | urb->iso_frame_desc[i].length = ep->curpacksize; |
271 | offs += ep->curpacksize; | 271 | offs += ep->curpacksize; |
272 | } | 272 | } |
273 | 273 | ||
274 | urb->transfer_buffer_length = offs; | 274 | urb->transfer_buffer_length = offs; |
275 | urb->number_of_packets = urb_ctx->packets; | 275 | urb->number_of_packets = urb_ctx->packets; |
276 | break; | 276 | break; |
277 | 277 | ||
278 | case SND_USB_ENDPOINT_TYPE_SYNC: | 278 | case SND_USB_ENDPOINT_TYPE_SYNC: |
279 | urb->iso_frame_desc[0].length = min(4u, ep->syncmaxsize); | 279 | urb->iso_frame_desc[0].length = min(4u, ep->syncmaxsize); |
280 | urb->iso_frame_desc[0].offset = 0; | 280 | urb->iso_frame_desc[0].offset = 0; |
281 | break; | 281 | break; |
282 | } | 282 | } |
283 | } | 283 | } |
284 | 284 | ||
285 | /* | 285 | /* |
286 | * Send output urbs that have been prepared previously. URBs are dequeued | 286 | * Send output urbs that have been prepared previously. URBs are dequeued |
287 | * from ep->ready_playback_urbs and in case there there aren't any available | 287 | * from ep->ready_playback_urbs and in case there there aren't any available |
288 | * or there are no packets that have been prepared, this function does | 288 | * or there are no packets that have been prepared, this function does |
289 | * nothing. | 289 | * nothing. |
290 | * | 290 | * |
291 | * The reason why the functionality of sending and preparing URBs is separated | 291 | * The reason why the functionality of sending and preparing URBs is separated |
292 | * is that host controllers don't guarantee the order in which they return | 292 | * is that host controllers don't guarantee the order in which they return |
293 | * inbound and outbound packets to their submitters. | 293 | * inbound and outbound packets to their submitters. |
294 | * | 294 | * |
295 | * This function is only used for implicit feedback endpoints. For endpoints | 295 | * This function is only used for implicit feedback endpoints. For endpoints |
296 | * driven by dedicated sync endpoints, URBs are immediately re-submitted | 296 | * driven by dedicated sync endpoints, URBs are immediately re-submitted |
297 | * from their completion handler. | 297 | * from their completion handler. |
298 | */ | 298 | */ |
299 | static void queue_pending_output_urbs(struct snd_usb_endpoint *ep) | 299 | static void queue_pending_output_urbs(struct snd_usb_endpoint *ep) |
300 | { | 300 | { |
301 | while (test_bit(EP_FLAG_RUNNING, &ep->flags)) { | 301 | while (test_bit(EP_FLAG_RUNNING, &ep->flags)) { |
302 | 302 | ||
303 | unsigned long flags; | 303 | unsigned long flags; |
304 | struct snd_usb_packet_info *uninitialized_var(packet); | 304 | struct snd_usb_packet_info *uninitialized_var(packet); |
305 | struct snd_urb_ctx *ctx = NULL; | 305 | struct snd_urb_ctx *ctx = NULL; |
306 | struct urb *urb; | 306 | struct urb *urb; |
307 | int err, i; | 307 | int err, i; |
308 | 308 | ||
309 | spin_lock_irqsave(&ep->lock, flags); | 309 | spin_lock_irqsave(&ep->lock, flags); |
310 | if (ep->next_packet_read_pos != ep->next_packet_write_pos) { | 310 | if (ep->next_packet_read_pos != ep->next_packet_write_pos) { |
311 | packet = ep->next_packet + ep->next_packet_read_pos; | 311 | packet = ep->next_packet + ep->next_packet_read_pos; |
312 | ep->next_packet_read_pos++; | 312 | ep->next_packet_read_pos++; |
313 | ep->next_packet_read_pos %= MAX_URBS; | 313 | ep->next_packet_read_pos %= MAX_URBS; |
314 | 314 | ||
315 | /* take URB out of FIFO */ | 315 | /* take URB out of FIFO */ |
316 | if (!list_empty(&ep->ready_playback_urbs)) | 316 | if (!list_empty(&ep->ready_playback_urbs)) |
317 | ctx = list_first_entry(&ep->ready_playback_urbs, | 317 | ctx = list_first_entry(&ep->ready_playback_urbs, |
318 | struct snd_urb_ctx, ready_list); | 318 | struct snd_urb_ctx, ready_list); |
319 | } | 319 | } |
320 | spin_unlock_irqrestore(&ep->lock, flags); | 320 | spin_unlock_irqrestore(&ep->lock, flags); |
321 | 321 | ||
322 | if (ctx == NULL) | 322 | if (ctx == NULL) |
323 | return; | 323 | return; |
324 | 324 | ||
325 | list_del_init(&ctx->ready_list); | 325 | list_del_init(&ctx->ready_list); |
326 | urb = ctx->urb; | 326 | urb = ctx->urb; |
327 | 327 | ||
328 | /* copy over the length information */ | 328 | /* copy over the length information */ |
329 | for (i = 0; i < packet->packets; i++) | 329 | for (i = 0; i < packet->packets; i++) |
330 | ctx->packet_size[i] = packet->packet_size[i]; | 330 | ctx->packet_size[i] = packet->packet_size[i]; |
331 | 331 | ||
332 | /* call the data handler to fill in playback data */ | 332 | /* call the data handler to fill in playback data */ |
333 | prepare_outbound_urb(ep, ctx); | 333 | prepare_outbound_urb(ep, ctx); |
334 | 334 | ||
335 | err = usb_submit_urb(ctx->urb, GFP_ATOMIC); | 335 | err = usb_submit_urb(ctx->urb, GFP_ATOMIC); |
336 | if (err < 0) | 336 | if (err < 0) |
337 | snd_printk(KERN_ERR "Unable to submit urb #%d: %d (urb %p)\n", | 337 | snd_printk(KERN_ERR "Unable to submit urb #%d: %d (urb %p)\n", |
338 | ctx->index, err, ctx->urb); | 338 | ctx->index, err, ctx->urb); |
339 | else | 339 | else |
340 | set_bit(ctx->index, &ep->active_mask); | 340 | set_bit(ctx->index, &ep->active_mask); |
341 | } | 341 | } |
342 | } | 342 | } |
343 | 343 | ||
344 | /* | 344 | /* |
345 | * complete callback for urbs | 345 | * complete callback for urbs |
346 | */ | 346 | */ |
347 | static void snd_complete_urb(struct urb *urb) | 347 | static void snd_complete_urb(struct urb *urb) |
348 | { | 348 | { |
349 | struct snd_urb_ctx *ctx = urb->context; | 349 | struct snd_urb_ctx *ctx = urb->context; |
350 | struct snd_usb_endpoint *ep = ctx->ep; | 350 | struct snd_usb_endpoint *ep = ctx->ep; |
351 | int err; | 351 | int err; |
352 | 352 | ||
353 | if (unlikely(urb->status == -ENOENT || /* unlinked */ | 353 | if (unlikely(urb->status == -ENOENT || /* unlinked */ |
354 | urb->status == -ENODEV || /* device removed */ | 354 | urb->status == -ENODEV || /* device removed */ |
355 | urb->status == -ECONNRESET || /* unlinked */ | 355 | urb->status == -ECONNRESET || /* unlinked */ |
356 | urb->status == -ESHUTDOWN || /* device disabled */ | 356 | urb->status == -ESHUTDOWN || /* device disabled */ |
357 | ep->chip->shutdown)) /* device disconnected */ | 357 | ep->chip->shutdown)) /* device disconnected */ |
358 | goto exit_clear; | 358 | goto exit_clear; |
359 | 359 | ||
360 | if (usb_pipeout(ep->pipe)) { | 360 | if (usb_pipeout(ep->pipe)) { |
361 | retire_outbound_urb(ep, ctx); | 361 | retire_outbound_urb(ep, ctx); |
362 | /* can be stopped during retire callback */ | 362 | /* can be stopped during retire callback */ |
363 | if (unlikely(!test_bit(EP_FLAG_RUNNING, &ep->flags))) | 363 | if (unlikely(!test_bit(EP_FLAG_RUNNING, &ep->flags))) |
364 | goto exit_clear; | 364 | goto exit_clear; |
365 | 365 | ||
366 | if (snd_usb_endpoint_implicit_feedback_sink(ep)) { | 366 | if (snd_usb_endpoint_implicit_feedback_sink(ep)) { |
367 | unsigned long flags; | 367 | unsigned long flags; |
368 | 368 | ||
369 | spin_lock_irqsave(&ep->lock, flags); | 369 | spin_lock_irqsave(&ep->lock, flags); |
370 | list_add_tail(&ctx->ready_list, &ep->ready_playback_urbs); | 370 | list_add_tail(&ctx->ready_list, &ep->ready_playback_urbs); |
371 | spin_unlock_irqrestore(&ep->lock, flags); | 371 | spin_unlock_irqrestore(&ep->lock, flags); |
372 | queue_pending_output_urbs(ep); | 372 | queue_pending_output_urbs(ep); |
373 | 373 | ||
374 | goto exit_clear; | 374 | goto exit_clear; |
375 | } | 375 | } |
376 | 376 | ||
377 | prepare_outbound_urb(ep, ctx); | 377 | prepare_outbound_urb(ep, ctx); |
378 | } else { | 378 | } else { |
379 | retire_inbound_urb(ep, ctx); | 379 | retire_inbound_urb(ep, ctx); |
380 | /* can be stopped during retire callback */ | 380 | /* can be stopped during retire callback */ |
381 | if (unlikely(!test_bit(EP_FLAG_RUNNING, &ep->flags))) | 381 | if (unlikely(!test_bit(EP_FLAG_RUNNING, &ep->flags))) |
382 | goto exit_clear; | 382 | goto exit_clear; |
383 | 383 | ||
384 | prepare_inbound_urb(ep, ctx); | 384 | prepare_inbound_urb(ep, ctx); |
385 | } | 385 | } |
386 | 386 | ||
387 | err = usb_submit_urb(urb, GFP_ATOMIC); | 387 | err = usb_submit_urb(urb, GFP_ATOMIC); |
388 | if (err == 0) | 388 | if (err == 0) |
389 | return; | 389 | return; |
390 | 390 | ||
391 | snd_printk(KERN_ERR "cannot submit urb (err = %d)\n", err); | 391 | snd_printk(KERN_ERR "cannot submit urb (err = %d)\n", err); |
392 | //snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN); | 392 | //snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN); |
393 | 393 | ||
394 | exit_clear: | 394 | exit_clear: |
395 | clear_bit(ctx->index, &ep->active_mask); | 395 | clear_bit(ctx->index, &ep->active_mask); |
396 | } | 396 | } |
397 | 397 | ||
398 | /** | 398 | /** |
399 | * snd_usb_add_endpoint: Add an endpoint to an USB audio chip | 399 | * snd_usb_add_endpoint: Add an endpoint to an USB audio chip |
400 | * | 400 | * |
401 | * @chip: The chip | 401 | * @chip: The chip |
402 | * @alts: The USB host interface | 402 | * @alts: The USB host interface |
403 | * @ep_num: The number of the endpoint to use | 403 | * @ep_num: The number of the endpoint to use |
404 | * @direction: SNDRV_PCM_STREAM_PLAYBACK or SNDRV_PCM_STREAM_CAPTURE | 404 | * @direction: SNDRV_PCM_STREAM_PLAYBACK or SNDRV_PCM_STREAM_CAPTURE |
405 | * @type: SND_USB_ENDPOINT_TYPE_DATA or SND_USB_ENDPOINT_TYPE_SYNC | 405 | * @type: SND_USB_ENDPOINT_TYPE_DATA or SND_USB_ENDPOINT_TYPE_SYNC |
406 | * | 406 | * |
407 | * If the requested endpoint has not been added to the given chip before, | 407 | * If the requested endpoint has not been added to the given chip before, |
408 | * a new instance is created. Otherwise, a pointer to the previoulsy | 408 | * a new instance is created. Otherwise, a pointer to the previoulsy |
409 | * created instance is returned. In case of any error, NULL is returned. | 409 | * created instance is returned. In case of any error, NULL is returned. |
410 | * | 410 | * |
411 | * New endpoints will be added to chip->ep_list and must be freed by | 411 | * New endpoints will be added to chip->ep_list and must be freed by |
412 | * calling snd_usb_endpoint_free(). | 412 | * calling snd_usb_endpoint_free(). |
413 | */ | 413 | */ |
414 | struct snd_usb_endpoint *snd_usb_add_endpoint(struct snd_usb_audio *chip, | 414 | struct snd_usb_endpoint *snd_usb_add_endpoint(struct snd_usb_audio *chip, |
415 | struct usb_host_interface *alts, | 415 | struct usb_host_interface *alts, |
416 | int ep_num, int direction, int type) | 416 | int ep_num, int direction, int type) |
417 | { | 417 | { |
418 | struct snd_usb_endpoint *ep; | 418 | struct snd_usb_endpoint *ep; |
419 | int is_playback = direction == SNDRV_PCM_STREAM_PLAYBACK; | 419 | int is_playback = direction == SNDRV_PCM_STREAM_PLAYBACK; |
420 | 420 | ||
421 | if (WARN_ON(!alts)) | 421 | if (WARN_ON(!alts)) |
422 | return NULL; | 422 | return NULL; |
423 | 423 | ||
424 | mutex_lock(&chip->mutex); | 424 | mutex_lock(&chip->mutex); |
425 | 425 | ||
426 | list_for_each_entry(ep, &chip->ep_list, list) { | 426 | list_for_each_entry(ep, &chip->ep_list, list) { |
427 | if (ep->ep_num == ep_num && | 427 | if (ep->ep_num == ep_num && |
428 | ep->iface == alts->desc.bInterfaceNumber && | 428 | ep->iface == alts->desc.bInterfaceNumber && |
429 | ep->alt_idx == alts->desc.bAlternateSetting) { | 429 | ep->alt_idx == alts->desc.bAlternateSetting) { |
430 | snd_printdd(KERN_DEBUG "Re-using EP %x in iface %d,%d @%p\n", | 430 | snd_printdd(KERN_DEBUG "Re-using EP %x in iface %d,%d @%p\n", |
431 | ep_num, ep->iface, ep->alt_idx, ep); | 431 | ep_num, ep->iface, ep->alt_idx, ep); |
432 | goto __exit_unlock; | 432 | goto __exit_unlock; |
433 | } | 433 | } |
434 | } | 434 | } |
435 | 435 | ||
436 | snd_printdd(KERN_DEBUG "Creating new %s %s endpoint #%x\n", | 436 | snd_printdd(KERN_DEBUG "Creating new %s %s endpoint #%x\n", |
437 | is_playback ? "playback" : "capture", | 437 | is_playback ? "playback" : "capture", |
438 | type == SND_USB_ENDPOINT_TYPE_DATA ? "data" : "sync", | 438 | type == SND_USB_ENDPOINT_TYPE_DATA ? "data" : "sync", |
439 | ep_num); | 439 | ep_num); |
440 | 440 | ||
441 | ep = kzalloc(sizeof(*ep), GFP_KERNEL); | 441 | ep = kzalloc(sizeof(*ep), GFP_KERNEL); |
442 | if (!ep) | 442 | if (!ep) |
443 | goto __exit_unlock; | 443 | goto __exit_unlock; |
444 | 444 | ||
445 | ep->chip = chip; | 445 | ep->chip = chip; |
446 | spin_lock_init(&ep->lock); | 446 | spin_lock_init(&ep->lock); |
447 | ep->type = type; | 447 | ep->type = type; |
448 | ep->ep_num = ep_num; | 448 | ep->ep_num = ep_num; |
449 | ep->iface = alts->desc.bInterfaceNumber; | 449 | ep->iface = alts->desc.bInterfaceNumber; |
450 | ep->alt_idx = alts->desc.bAlternateSetting; | 450 | ep->alt_idx = alts->desc.bAlternateSetting; |
451 | INIT_LIST_HEAD(&ep->ready_playback_urbs); | 451 | INIT_LIST_HEAD(&ep->ready_playback_urbs); |
452 | ep_num &= USB_ENDPOINT_NUMBER_MASK; | 452 | ep_num &= USB_ENDPOINT_NUMBER_MASK; |
453 | 453 | ||
454 | if (is_playback) | 454 | if (is_playback) |
455 | ep->pipe = usb_sndisocpipe(chip->dev, ep_num); | 455 | ep->pipe = usb_sndisocpipe(chip->dev, ep_num); |
456 | else | 456 | else |
457 | ep->pipe = usb_rcvisocpipe(chip->dev, ep_num); | 457 | ep->pipe = usb_rcvisocpipe(chip->dev, ep_num); |
458 | 458 | ||
459 | if (type == SND_USB_ENDPOINT_TYPE_SYNC) { | 459 | if (type == SND_USB_ENDPOINT_TYPE_SYNC) { |
460 | if (get_endpoint(alts, 1)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE && | 460 | if (get_endpoint(alts, 1)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE && |
461 | get_endpoint(alts, 1)->bRefresh >= 1 && | 461 | get_endpoint(alts, 1)->bRefresh >= 1 && |
462 | get_endpoint(alts, 1)->bRefresh <= 9) | 462 | get_endpoint(alts, 1)->bRefresh <= 9) |
463 | ep->syncinterval = get_endpoint(alts, 1)->bRefresh; | 463 | ep->syncinterval = get_endpoint(alts, 1)->bRefresh; |
464 | else if (snd_usb_get_speed(chip->dev) == USB_SPEED_FULL) | 464 | else if (snd_usb_get_speed(chip->dev) == USB_SPEED_FULL) |
465 | ep->syncinterval = 1; | 465 | ep->syncinterval = 1; |
466 | else if (get_endpoint(alts, 1)->bInterval >= 1 && | 466 | else if (get_endpoint(alts, 1)->bInterval >= 1 && |
467 | get_endpoint(alts, 1)->bInterval <= 16) | 467 | get_endpoint(alts, 1)->bInterval <= 16) |
468 | ep->syncinterval = get_endpoint(alts, 1)->bInterval - 1; | 468 | ep->syncinterval = get_endpoint(alts, 1)->bInterval - 1; |
469 | else | 469 | else |
470 | ep->syncinterval = 3; | 470 | ep->syncinterval = 3; |
471 | 471 | ||
472 | ep->syncmaxsize = le16_to_cpu(get_endpoint(alts, 1)->wMaxPacketSize); | 472 | ep->syncmaxsize = le16_to_cpu(get_endpoint(alts, 1)->wMaxPacketSize); |
473 | } | 473 | } |
474 | 474 | ||
475 | list_add_tail(&ep->list, &chip->ep_list); | 475 | list_add_tail(&ep->list, &chip->ep_list); |
476 | 476 | ||
477 | __exit_unlock: | 477 | __exit_unlock: |
478 | mutex_unlock(&chip->mutex); | 478 | mutex_unlock(&chip->mutex); |
479 | 479 | ||
480 | return ep; | 480 | return ep; |
481 | } | 481 | } |
482 | 482 | ||
483 | /* | 483 | /* |
484 | * wait until all urbs are processed. | 484 | * wait until all urbs are processed. |
485 | */ | 485 | */ |
486 | static int wait_clear_urbs(struct snd_usb_endpoint *ep) | 486 | static int wait_clear_urbs(struct snd_usb_endpoint *ep) |
487 | { | 487 | { |
488 | unsigned long end_time = jiffies + msecs_to_jiffies(1000); | 488 | unsigned long end_time = jiffies + msecs_to_jiffies(1000); |
489 | int alive; | 489 | int alive; |
490 | 490 | ||
491 | do { | 491 | do { |
492 | alive = bitmap_weight(&ep->active_mask, ep->nurbs); | 492 | alive = bitmap_weight(&ep->active_mask, ep->nurbs); |
493 | if (!alive) | 493 | if (!alive) |
494 | break; | 494 | break; |
495 | 495 | ||
496 | schedule_timeout_uninterruptible(1); | 496 | schedule_timeout_uninterruptible(1); |
497 | } while (time_before(jiffies, end_time)); | 497 | } while (time_before(jiffies, end_time)); |
498 | 498 | ||
499 | if (alive) | 499 | if (alive) |
500 | snd_printk(KERN_ERR "timeout: still %d active urbs on EP #%x\n", | 500 | snd_printk(KERN_ERR "timeout: still %d active urbs on EP #%x\n", |
501 | alive, ep->ep_num); | 501 | alive, ep->ep_num); |
502 | clear_bit(EP_FLAG_STOPPING, &ep->flags); | 502 | clear_bit(EP_FLAG_STOPPING, &ep->flags); |
503 | 503 | ||
504 | return 0; | 504 | return 0; |
505 | } | 505 | } |
506 | 506 | ||
507 | /* sync the pending stop operation; | 507 | /* sync the pending stop operation; |
508 | * this function itself doesn't trigger the stop operation | 508 | * this function itself doesn't trigger the stop operation |
509 | */ | 509 | */ |
510 | void snd_usb_endpoint_sync_pending_stop(struct snd_usb_endpoint *ep) | 510 | void snd_usb_endpoint_sync_pending_stop(struct snd_usb_endpoint *ep) |
511 | { | 511 | { |
512 | if (ep && test_bit(EP_FLAG_STOPPING, &ep->flags)) | 512 | if (ep && test_bit(EP_FLAG_STOPPING, &ep->flags)) |
513 | wait_clear_urbs(ep); | 513 | wait_clear_urbs(ep); |
514 | } | 514 | } |
515 | 515 | ||
516 | /* | 516 | /* |
517 | * unlink active urbs. | 517 | * unlink active urbs. |
518 | */ | 518 | */ |
519 | static int deactivate_urbs(struct snd_usb_endpoint *ep, bool force) | 519 | static int deactivate_urbs(struct snd_usb_endpoint *ep, bool force) |
520 | { | 520 | { |
521 | unsigned int i; | 521 | unsigned int i; |
522 | 522 | ||
523 | if (!force && ep->chip->shutdown) /* to be sure... */ | 523 | if (!force && ep->chip->shutdown) /* to be sure... */ |
524 | return -EBADFD; | 524 | return -EBADFD; |
525 | 525 | ||
526 | clear_bit(EP_FLAG_RUNNING, &ep->flags); | 526 | clear_bit(EP_FLAG_RUNNING, &ep->flags); |
527 | 527 | ||
528 | INIT_LIST_HEAD(&ep->ready_playback_urbs); | 528 | INIT_LIST_HEAD(&ep->ready_playback_urbs); |
529 | ep->next_packet_read_pos = 0; | 529 | ep->next_packet_read_pos = 0; |
530 | ep->next_packet_write_pos = 0; | 530 | ep->next_packet_write_pos = 0; |
531 | 531 | ||
532 | for (i = 0; i < ep->nurbs; i++) { | 532 | for (i = 0; i < ep->nurbs; i++) { |
533 | if (test_bit(i, &ep->active_mask)) { | 533 | if (test_bit(i, &ep->active_mask)) { |
534 | if (!test_and_set_bit(i, &ep->unlink_mask)) { | 534 | if (!test_and_set_bit(i, &ep->unlink_mask)) { |
535 | struct urb *u = ep->urb[i].urb; | 535 | struct urb *u = ep->urb[i].urb; |
536 | usb_unlink_urb(u); | 536 | usb_unlink_urb(u); |
537 | } | 537 | } |
538 | } | 538 | } |
539 | } | 539 | } |
540 | 540 | ||
541 | return 0; | 541 | return 0; |
542 | } | 542 | } |
543 | 543 | ||
544 | /* | 544 | /* |
545 | * release an endpoint's urbs | 545 | * release an endpoint's urbs |
546 | */ | 546 | */ |
547 | static void release_urbs(struct snd_usb_endpoint *ep, int force) | 547 | static void release_urbs(struct snd_usb_endpoint *ep, int force) |
548 | { | 548 | { |
549 | int i; | 549 | int i; |
550 | 550 | ||
551 | /* route incoming urbs to nirvana */ | 551 | /* route incoming urbs to nirvana */ |
552 | ep->retire_data_urb = NULL; | 552 | ep->retire_data_urb = NULL; |
553 | ep->prepare_data_urb = NULL; | 553 | ep->prepare_data_urb = NULL; |
554 | 554 | ||
555 | /* stop urbs */ | 555 | /* stop urbs */ |
556 | deactivate_urbs(ep, force); | 556 | deactivate_urbs(ep, force); |
557 | wait_clear_urbs(ep); | 557 | wait_clear_urbs(ep); |
558 | 558 | ||
559 | for (i = 0; i < ep->nurbs; i++) | 559 | for (i = 0; i < ep->nurbs; i++) |
560 | release_urb_ctx(&ep->urb[i]); | 560 | release_urb_ctx(&ep->urb[i]); |
561 | 561 | ||
562 | if (ep->syncbuf) | 562 | if (ep->syncbuf) |
563 | usb_free_coherent(ep->chip->dev, SYNC_URBS * 4, | 563 | usb_free_coherent(ep->chip->dev, SYNC_URBS * 4, |
564 | ep->syncbuf, ep->sync_dma); | 564 | ep->syncbuf, ep->sync_dma); |
565 | 565 | ||
566 | ep->syncbuf = NULL; | 566 | ep->syncbuf = NULL; |
567 | ep->nurbs = 0; | 567 | ep->nurbs = 0; |
568 | } | 568 | } |
569 | 569 | ||
570 | /* | 570 | /* |
571 | * configure a data endpoint | 571 | * configure a data endpoint |
572 | */ | 572 | */ |
573 | static int data_ep_set_params(struct snd_usb_endpoint *ep, | 573 | static int data_ep_set_params(struct snd_usb_endpoint *ep, |
574 | snd_pcm_format_t pcm_format, | 574 | snd_pcm_format_t pcm_format, |
575 | unsigned int channels, | 575 | unsigned int channels, |
576 | unsigned int period_bytes, | 576 | unsigned int period_bytes, |
577 | unsigned int frames_per_period, | 577 | unsigned int frames_per_period, |
578 | unsigned int periods_per_buffer, | 578 | unsigned int periods_per_buffer, |
579 | struct audioformat *fmt, | 579 | struct audioformat *fmt, |
580 | struct snd_usb_endpoint *sync_ep) | 580 | struct snd_usb_endpoint *sync_ep) |
581 | { | 581 | { |
582 | unsigned int maxsize, minsize, packs_per_ms, max_packs_per_urb; | 582 | unsigned int maxsize, minsize, packs_per_ms, max_packs_per_urb; |
583 | unsigned int max_packs_per_period, urbs_per_period, urb_packs; | 583 | unsigned int max_packs_per_period, urbs_per_period, urb_packs; |
584 | unsigned int max_urbs, i; | 584 | unsigned int max_urbs, i; |
585 | int frame_bits = snd_pcm_format_physical_width(pcm_format) * channels; | 585 | int frame_bits = snd_pcm_format_physical_width(pcm_format) * channels; |
586 | 586 | ||
587 | if (pcm_format == SNDRV_PCM_FORMAT_DSD_U16_LE && fmt->dsd_dop) { | 587 | if (pcm_format == SNDRV_PCM_FORMAT_DSD_U16_LE && fmt->dsd_dop) { |
588 | /* | 588 | /* |
589 | * When operating in DSD DOP mode, the size of a sample frame | 589 | * When operating in DSD DOP mode, the size of a sample frame |
590 | * in hardware differs from the actual physical format width | 590 | * in hardware differs from the actual physical format width |
591 | * because we need to make room for the DOP markers. | 591 | * because we need to make room for the DOP markers. |
592 | */ | 592 | */ |
593 | frame_bits += channels << 3; | 593 | frame_bits += channels << 3; |
594 | } | 594 | } |
595 | 595 | ||
596 | ep->datainterval = fmt->datainterval; | 596 | ep->datainterval = fmt->datainterval; |
597 | ep->stride = frame_bits >> 3; | 597 | ep->stride = frame_bits >> 3; |
598 | ep->silence_value = pcm_format == SNDRV_PCM_FORMAT_U8 ? 0x80 : 0; | 598 | ep->silence_value = pcm_format == SNDRV_PCM_FORMAT_U8 ? 0x80 : 0; |
599 | 599 | ||
600 | /* assume max. frequency is 25% higher than nominal */ | 600 | /* assume max. frequency is 25% higher than nominal */ |
601 | ep->freqmax = ep->freqn + (ep->freqn >> 2); | 601 | ep->freqmax = ep->freqn + (ep->freqn >> 2); |
602 | maxsize = ((ep->freqmax + 0xffff) * (frame_bits >> 3)) | 602 | maxsize = ((ep->freqmax + 0xffff) * (frame_bits >> 3)) |
603 | >> (16 - ep->datainterval); | 603 | >> (16 - ep->datainterval); |
604 | /* but wMaxPacketSize might reduce this */ | 604 | /* but wMaxPacketSize might reduce this */ |
605 | if (ep->maxpacksize && ep->maxpacksize < maxsize) { | 605 | if (ep->maxpacksize && ep->maxpacksize < maxsize) { |
606 | /* whatever fits into a max. size packet */ | 606 | /* whatever fits into a max. size packet */ |
607 | maxsize = ep->maxpacksize; | 607 | maxsize = ep->maxpacksize; |
608 | ep->freqmax = (maxsize / (frame_bits >> 3)) | 608 | ep->freqmax = (maxsize / (frame_bits >> 3)) |
609 | << (16 - ep->datainterval); | 609 | << (16 - ep->datainterval); |
610 | } | 610 | } |
611 | 611 | ||
612 | if (ep->fill_max) | 612 | if (ep->fill_max) |
613 | ep->curpacksize = ep->maxpacksize; | 613 | ep->curpacksize = ep->maxpacksize; |
614 | else | 614 | else |
615 | ep->curpacksize = maxsize; | 615 | ep->curpacksize = maxsize; |
616 | 616 | ||
617 | if (snd_usb_get_speed(ep->chip->dev) != USB_SPEED_FULL) { | 617 | if (snd_usb_get_speed(ep->chip->dev) != USB_SPEED_FULL) { |
618 | packs_per_ms = 8 >> ep->datainterval; | 618 | packs_per_ms = 8 >> ep->datainterval; |
619 | max_packs_per_urb = MAX_PACKS_HS; | 619 | max_packs_per_urb = MAX_PACKS_HS; |
620 | } else { | 620 | } else { |
621 | packs_per_ms = 1; | 621 | packs_per_ms = 1; |
622 | max_packs_per_urb = MAX_PACKS; | 622 | max_packs_per_urb = MAX_PACKS; |
623 | } | 623 | } |
624 | if (sync_ep && !snd_usb_endpoint_implicit_feedback_sink(ep)) | 624 | if (sync_ep && !snd_usb_endpoint_implicit_feedback_sink(ep)) |
625 | max_packs_per_urb = min(max_packs_per_urb, | 625 | max_packs_per_urb = min(max_packs_per_urb, |
626 | 1U << sync_ep->syncinterval); | 626 | 1U << sync_ep->syncinterval); |
627 | max_packs_per_urb = max(1u, max_packs_per_urb >> ep->datainterval); | 627 | max_packs_per_urb = max(1u, max_packs_per_urb >> ep->datainterval); |
628 | 628 | ||
629 | /* | 629 | /* |
630 | * Capture endpoints need to use small URBs because there's no way | 630 | * Capture endpoints need to use small URBs because there's no way |
631 | * to tell in advance where the next period will end, and we don't | 631 | * to tell in advance where the next period will end, and we don't |
632 | * want the next URB to complete much after the period ends. | 632 | * want the next URB to complete much after the period ends. |
633 | * | 633 | * |
634 | * Playback endpoints with implicit sync much use the same parameters | 634 | * Playback endpoints with implicit sync much use the same parameters |
635 | * as their corresponding capture endpoint. | 635 | * as their corresponding capture endpoint. |
636 | */ | 636 | */ |
637 | if (usb_pipein(ep->pipe) || | 637 | if (usb_pipein(ep->pipe) || |
638 | snd_usb_endpoint_implicit_feedback_sink(ep)) { | 638 | snd_usb_endpoint_implicit_feedback_sink(ep)) { |
639 | 639 | ||
640 | /* make capture URBs <= 1 ms and smaller than a period */ | 640 | /* make capture URBs <= 1 ms and smaller than a period */ |
641 | urb_packs = min(max_packs_per_urb, packs_per_ms); | 641 | urb_packs = min(max_packs_per_urb, packs_per_ms); |
642 | while (urb_packs > 1 && urb_packs * maxsize >= period_bytes) | 642 | while (urb_packs > 1 && urb_packs * maxsize >= period_bytes) |
643 | urb_packs >>= 1; | 643 | urb_packs >>= 1; |
644 | ep->nurbs = MAX_URBS; | 644 | ep->nurbs = MAX_URBS; |
645 | 645 | ||
646 | /* | 646 | /* |
647 | * Playback endpoints without implicit sync are adjusted so that | 647 | * Playback endpoints without implicit sync are adjusted so that |
648 | * a period fits as evenly as possible in the smallest number of | 648 | * a period fits as evenly as possible in the smallest number of |
649 | * URBs. The total number of URBs is adjusted to the size of the | 649 | * URBs. The total number of URBs is adjusted to the size of the |
650 | * ALSA buffer, subject to the MAX_URBS and MAX_QUEUE limits. | 650 | * ALSA buffer, subject to the MAX_URBS and MAX_QUEUE limits. |
651 | */ | 651 | */ |
652 | } else { | 652 | } else { |
653 | /* determine how small a packet can be */ | 653 | /* determine how small a packet can be */ |
654 | minsize = (ep->freqn >> (16 - ep->datainterval)) * | 654 | minsize = (ep->freqn >> (16 - ep->datainterval)) * |
655 | (frame_bits >> 3); | 655 | (frame_bits >> 3); |
656 | /* with sync from device, assume it can be 12% lower */ | 656 | /* with sync from device, assume it can be 12% lower */ |
657 | if (sync_ep) | 657 | if (sync_ep) |
658 | minsize -= minsize >> 3; | 658 | minsize -= minsize >> 3; |
659 | minsize = max(minsize, 1u); | 659 | minsize = max(minsize, 1u); |
660 | 660 | ||
661 | /* how many packets will contain an entire ALSA period? */ | 661 | /* how many packets will contain an entire ALSA period? */ |
662 | max_packs_per_period = DIV_ROUND_UP(period_bytes, minsize); | 662 | max_packs_per_period = DIV_ROUND_UP(period_bytes, minsize); |
663 | 663 | ||
664 | /* how many URBs will contain a period? */ | 664 | /* how many URBs will contain a period? */ |
665 | urbs_per_period = DIV_ROUND_UP(max_packs_per_period, | 665 | urbs_per_period = DIV_ROUND_UP(max_packs_per_period, |
666 | max_packs_per_urb); | 666 | max_packs_per_urb); |
667 | /* how many packets are needed in each URB? */ | 667 | /* how many packets are needed in each URB? */ |
668 | urb_packs = DIV_ROUND_UP(max_packs_per_period, urbs_per_period); | 668 | urb_packs = DIV_ROUND_UP(max_packs_per_period, urbs_per_period); |
669 | 669 | ||
670 | /* limit the number of frames in a single URB */ | 670 | /* limit the number of frames in a single URB */ |
671 | ep->max_urb_frames = DIV_ROUND_UP(frames_per_period, | 671 | ep->max_urb_frames = DIV_ROUND_UP(frames_per_period, |
672 | urbs_per_period); | 672 | urbs_per_period); |
673 | 673 | ||
674 | /* try to use enough URBs to contain an entire ALSA buffer */ | 674 | /* try to use enough URBs to contain an entire ALSA buffer */ |
675 | max_urbs = min((unsigned) MAX_URBS, | 675 | max_urbs = min((unsigned) MAX_URBS, |
676 | MAX_QUEUE * packs_per_ms / urb_packs); | 676 | MAX_QUEUE * packs_per_ms / urb_packs); |
677 | ep->nurbs = min(max_urbs, urbs_per_period * periods_per_buffer); | 677 | ep->nurbs = min(max_urbs, urbs_per_period * periods_per_buffer); |
678 | } | 678 | } |
679 | 679 | ||
680 | /* allocate and initialize data urbs */ | 680 | /* allocate and initialize data urbs */ |
681 | for (i = 0; i < ep->nurbs; i++) { | 681 | for (i = 0; i < ep->nurbs; i++) { |
682 | struct snd_urb_ctx *u = &ep->urb[i]; | 682 | struct snd_urb_ctx *u = &ep->urb[i]; |
683 | u->index = i; | 683 | u->index = i; |
684 | u->ep = ep; | 684 | u->ep = ep; |
685 | u->packets = urb_packs; | 685 | u->packets = urb_packs; |
686 | u->buffer_size = maxsize * u->packets; | 686 | u->buffer_size = maxsize * u->packets; |
687 | 687 | ||
688 | if (fmt->fmt_type == UAC_FORMAT_TYPE_II) | 688 | if (fmt->fmt_type == UAC_FORMAT_TYPE_II) |
689 | u->packets++; /* for transfer delimiter */ | 689 | u->packets++; /* for transfer delimiter */ |
690 | u->urb = usb_alloc_urb(u->packets, GFP_KERNEL); | 690 | u->urb = usb_alloc_urb(u->packets, GFP_KERNEL); |
691 | if (!u->urb) | 691 | if (!u->urb) |
692 | goto out_of_memory; | 692 | goto out_of_memory; |
693 | 693 | ||
694 | u->urb->transfer_buffer = | 694 | u->urb->transfer_buffer = |
695 | usb_alloc_coherent(ep->chip->dev, u->buffer_size, | 695 | usb_alloc_coherent(ep->chip->dev, u->buffer_size, |
696 | GFP_KERNEL, &u->urb->transfer_dma); | 696 | GFP_KERNEL, &u->urb->transfer_dma); |
697 | if (!u->urb->transfer_buffer) | 697 | if (!u->urb->transfer_buffer) |
698 | goto out_of_memory; | 698 | goto out_of_memory; |
699 | u->urb->pipe = ep->pipe; | 699 | u->urb->pipe = ep->pipe; |
700 | u->urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP; | 700 | u->urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP; |
701 | u->urb->interval = 1 << ep->datainterval; | 701 | u->urb->interval = 1 << ep->datainterval; |
702 | u->urb->context = u; | 702 | u->urb->context = u; |
703 | u->urb->complete = snd_complete_urb; | 703 | u->urb->complete = snd_complete_urb; |
704 | INIT_LIST_HEAD(&u->ready_list); | 704 | INIT_LIST_HEAD(&u->ready_list); |
705 | } | 705 | } |
706 | 706 | ||
707 | return 0; | 707 | return 0; |
708 | 708 | ||
709 | out_of_memory: | 709 | out_of_memory: |
710 | release_urbs(ep, 0); | 710 | release_urbs(ep, 0); |
711 | return -ENOMEM; | 711 | return -ENOMEM; |
712 | } | 712 | } |
713 | 713 | ||
714 | /* | 714 | /* |
715 | * configure a sync endpoint | 715 | * configure a sync endpoint |
716 | */ | 716 | */ |
717 | static int sync_ep_set_params(struct snd_usb_endpoint *ep) | 717 | static int sync_ep_set_params(struct snd_usb_endpoint *ep) |
718 | { | 718 | { |
719 | int i; | 719 | int i; |
720 | 720 | ||
721 | ep->syncbuf = usb_alloc_coherent(ep->chip->dev, SYNC_URBS * 4, | 721 | ep->syncbuf = usb_alloc_coherent(ep->chip->dev, SYNC_URBS * 4, |
722 | GFP_KERNEL, &ep->sync_dma); | 722 | GFP_KERNEL, &ep->sync_dma); |
723 | if (!ep->syncbuf) | 723 | if (!ep->syncbuf) |
724 | return -ENOMEM; | 724 | return -ENOMEM; |
725 | 725 | ||
726 | for (i = 0; i < SYNC_URBS; i++) { | 726 | for (i = 0; i < SYNC_URBS; i++) { |
727 | struct snd_urb_ctx *u = &ep->urb[i]; | 727 | struct snd_urb_ctx *u = &ep->urb[i]; |
728 | u->index = i; | 728 | u->index = i; |
729 | u->ep = ep; | 729 | u->ep = ep; |
730 | u->packets = 1; | 730 | u->packets = 1; |
731 | u->urb = usb_alloc_urb(1, GFP_KERNEL); | 731 | u->urb = usb_alloc_urb(1, GFP_KERNEL); |
732 | if (!u->urb) | 732 | if (!u->urb) |
733 | goto out_of_memory; | 733 | goto out_of_memory; |
734 | u->urb->transfer_buffer = ep->syncbuf + i * 4; | 734 | u->urb->transfer_buffer = ep->syncbuf + i * 4; |
735 | u->urb->transfer_dma = ep->sync_dma + i * 4; | 735 | u->urb->transfer_dma = ep->sync_dma + i * 4; |
736 | u->urb->transfer_buffer_length = 4; | 736 | u->urb->transfer_buffer_length = 4; |
737 | u->urb->pipe = ep->pipe; | 737 | u->urb->pipe = ep->pipe; |
738 | u->urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP; | 738 | u->urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP; |
739 | u->urb->number_of_packets = 1; | 739 | u->urb->number_of_packets = 1; |
740 | u->urb->interval = 1 << ep->syncinterval; | 740 | u->urb->interval = 1 << ep->syncinterval; |
741 | u->urb->context = u; | 741 | u->urb->context = u; |
742 | u->urb->complete = snd_complete_urb; | 742 | u->urb->complete = snd_complete_urb; |
743 | } | 743 | } |
744 | 744 | ||
745 | ep->nurbs = SYNC_URBS; | 745 | ep->nurbs = SYNC_URBS; |
746 | 746 | ||
747 | return 0; | 747 | return 0; |
748 | 748 | ||
749 | out_of_memory: | 749 | out_of_memory: |
750 | release_urbs(ep, 0); | 750 | release_urbs(ep, 0); |
751 | return -ENOMEM; | 751 | return -ENOMEM; |
752 | } | 752 | } |
753 | 753 | ||
754 | /** | 754 | /** |
755 | * snd_usb_endpoint_set_params: configure an snd_usb_endpoint | 755 | * snd_usb_endpoint_set_params: configure an snd_usb_endpoint |
756 | * | 756 | * |
757 | * @ep: the snd_usb_endpoint to configure | 757 | * @ep: the snd_usb_endpoint to configure |
758 | * @pcm_format: the audio fomat. | 758 | * @pcm_format: the audio fomat. |
759 | * @channels: the number of audio channels. | 759 | * @channels: the number of audio channels. |
760 | * @period_bytes: the number of bytes in one alsa period. | 760 | * @period_bytes: the number of bytes in one alsa period. |
761 | * @period_frames: the number of frames in one alsa period. | 761 | * @period_frames: the number of frames in one alsa period. |
762 | * @buffer_periods: the number of periods in one alsa buffer. | 762 | * @buffer_periods: the number of periods in one alsa buffer. |
763 | * @rate: the frame rate. | 763 | * @rate: the frame rate. |
764 | * @fmt: the USB audio format information | 764 | * @fmt: the USB audio format information |
765 | * @sync_ep: the sync endpoint to use, if any | 765 | * @sync_ep: the sync endpoint to use, if any |
766 | * | 766 | * |
767 | * Determine the number of URBs to be used on this endpoint. | 767 | * Determine the number of URBs to be used on this endpoint. |
768 | * An endpoint must be configured before it can be started. | 768 | * An endpoint must be configured before it can be started. |
769 | * An endpoint that is already running can not be reconfigured. | 769 | * An endpoint that is already running can not be reconfigured. |
770 | */ | 770 | */ |
771 | int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep, | 771 | int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep, |
772 | snd_pcm_format_t pcm_format, | 772 | snd_pcm_format_t pcm_format, |
773 | unsigned int channels, | 773 | unsigned int channels, |
774 | unsigned int period_bytes, | 774 | unsigned int period_bytes, |
775 | unsigned int period_frames, | 775 | unsigned int period_frames, |
776 | unsigned int buffer_periods, | 776 | unsigned int buffer_periods, |
777 | unsigned int rate, | 777 | unsigned int rate, |
778 | struct audioformat *fmt, | 778 | struct audioformat *fmt, |
779 | struct snd_usb_endpoint *sync_ep) | 779 | struct snd_usb_endpoint *sync_ep) |
780 | { | 780 | { |
781 | int err; | 781 | int err; |
782 | 782 | ||
783 | if (ep->use_count != 0) { | 783 | if (ep->use_count != 0) { |
784 | snd_printk(KERN_WARNING "Unable to change format on ep #%x: already in use\n", | 784 | snd_printk(KERN_WARNING "Unable to change format on ep #%x: already in use\n", |
785 | ep->ep_num); | 785 | ep->ep_num); |
786 | return -EBUSY; | 786 | return -EBUSY; |
787 | } | 787 | } |
788 | 788 | ||
789 | /* release old buffers, if any */ | 789 | /* release old buffers, if any */ |
790 | release_urbs(ep, 0); | 790 | release_urbs(ep, 0); |
791 | 791 | ||
792 | ep->datainterval = fmt->datainterval; | 792 | ep->datainterval = fmt->datainterval; |
793 | ep->maxpacksize = fmt->maxpacksize; | 793 | ep->maxpacksize = fmt->maxpacksize; |
794 | ep->fill_max = !!(fmt->attributes & UAC_EP_CS_ATTR_FILL_MAX); | 794 | ep->fill_max = !!(fmt->attributes & UAC_EP_CS_ATTR_FILL_MAX); |
795 | 795 | ||
796 | if (snd_usb_get_speed(ep->chip->dev) == USB_SPEED_FULL) | 796 | if (snd_usb_get_speed(ep->chip->dev) == USB_SPEED_FULL) |
797 | ep->freqn = get_usb_full_speed_rate(rate); | 797 | ep->freqn = get_usb_full_speed_rate(rate); |
798 | else | 798 | else |
799 | ep->freqn = get_usb_high_speed_rate(rate); | 799 | ep->freqn = get_usb_high_speed_rate(rate); |
800 | 800 | ||
801 | /* calculate the frequency in 16.16 format */ | 801 | /* calculate the frequency in 16.16 format */ |
802 | ep->freqm = ep->freqn; | 802 | ep->freqm = ep->freqn; |
803 | ep->freqshift = INT_MIN; | 803 | ep->freqshift = INT_MIN; |
804 | 804 | ||
805 | ep->phase = 0; | 805 | ep->phase = 0; |
806 | 806 | ||
807 | switch (ep->type) { | 807 | switch (ep->type) { |
808 | case SND_USB_ENDPOINT_TYPE_DATA: | 808 | case SND_USB_ENDPOINT_TYPE_DATA: |
809 | err = data_ep_set_params(ep, pcm_format, channels, | 809 | err = data_ep_set_params(ep, pcm_format, channels, |
810 | period_bytes, period_frames, | 810 | period_bytes, period_frames, |
811 | buffer_periods, fmt, sync_ep); | 811 | buffer_periods, fmt, sync_ep); |
812 | break; | 812 | break; |
813 | case SND_USB_ENDPOINT_TYPE_SYNC: | 813 | case SND_USB_ENDPOINT_TYPE_SYNC: |
814 | err = sync_ep_set_params(ep); | 814 | err = sync_ep_set_params(ep); |
815 | break; | 815 | break; |
816 | default: | 816 | default: |
817 | err = -EINVAL; | 817 | err = -EINVAL; |
818 | } | 818 | } |
819 | 819 | ||
820 | snd_printdd(KERN_DEBUG "Setting params for ep #%x (type %d, %d urbs), ret=%d\n", | 820 | snd_printdd(KERN_DEBUG "Setting params for ep #%x (type %d, %d urbs), ret=%d\n", |
821 | ep->ep_num, ep->type, ep->nurbs, err); | 821 | ep->ep_num, ep->type, ep->nurbs, err); |
822 | 822 | ||
823 | return err; | 823 | return err; |
824 | } | 824 | } |
825 | 825 | ||
826 | /** | 826 | /** |
827 | * snd_usb_endpoint_start: start an snd_usb_endpoint | 827 | * snd_usb_endpoint_start: start an snd_usb_endpoint |
828 | * | 828 | * |
829 | * @ep: the endpoint to start | 829 | * @ep: the endpoint to start |
830 | * @can_sleep: flag indicating whether the operation is executed in | 830 | * @can_sleep: flag indicating whether the operation is executed in |
831 | * non-atomic context | 831 | * non-atomic context |
832 | * | 832 | * |
833 | * A call to this function will increment the use count of the endpoint. | 833 | * A call to this function will increment the use count of the endpoint. |
834 | * In case it is not already running, the URBs for this endpoint will be | 834 | * In case it is not already running, the URBs for this endpoint will be |
835 | * submitted. Otherwise, this function does nothing. | 835 | * submitted. Otherwise, this function does nothing. |
836 | * | 836 | * |
837 | * Must be balanced to calls of snd_usb_endpoint_stop(). | 837 | * Must be balanced to calls of snd_usb_endpoint_stop(). |
838 | * | 838 | * |
839 | * Returns an error if the URB submission failed, 0 in all other cases. | 839 | * Returns an error if the URB submission failed, 0 in all other cases. |
840 | */ | 840 | */ |
841 | int snd_usb_endpoint_start(struct snd_usb_endpoint *ep, bool can_sleep) | 841 | int snd_usb_endpoint_start(struct snd_usb_endpoint *ep, bool can_sleep) |
842 | { | 842 | { |
843 | int err; | 843 | int err; |
844 | unsigned int i; | 844 | unsigned int i; |
845 | 845 | ||
846 | if (ep->chip->shutdown) | 846 | if (ep->chip->shutdown) |
847 | return -EBADFD; | 847 | return -EBADFD; |
848 | 848 | ||
849 | /* already running? */ | 849 | /* already running? */ |
850 | if (++ep->use_count != 1) | 850 | if (++ep->use_count != 1) |
851 | return 0; | 851 | return 0; |
852 | 852 | ||
853 | /* just to be sure */ | 853 | /* just to be sure */ |
854 | deactivate_urbs(ep, false); | 854 | deactivate_urbs(ep, false); |
855 | if (can_sleep) | 855 | if (can_sleep) |
856 | wait_clear_urbs(ep); | 856 | wait_clear_urbs(ep); |
857 | 857 | ||
858 | ep->active_mask = 0; | 858 | ep->active_mask = 0; |
859 | ep->unlink_mask = 0; | 859 | ep->unlink_mask = 0; |
860 | ep->phase = 0; | 860 | ep->phase = 0; |
861 | 861 | ||
862 | snd_usb_endpoint_start_quirk(ep); | 862 | snd_usb_endpoint_start_quirk(ep); |
863 | 863 | ||
864 | /* | 864 | /* |
865 | * If this endpoint has a data endpoint as implicit feedback source, | 865 | * If this endpoint has a data endpoint as implicit feedback source, |
866 | * don't start the urbs here. Instead, mark them all as available, | 866 | * don't start the urbs here. Instead, mark them all as available, |
867 | * wait for the record urbs to return and queue the playback urbs | 867 | * wait for the record urbs to return and queue the playback urbs |
868 | * from that context. | 868 | * from that context. |
869 | */ | 869 | */ |
870 | 870 | ||
871 | set_bit(EP_FLAG_RUNNING, &ep->flags); | 871 | set_bit(EP_FLAG_RUNNING, &ep->flags); |
872 | 872 | ||
873 | if (snd_usb_endpoint_implicit_feedback_sink(ep)) { | 873 | if (snd_usb_endpoint_implicit_feedback_sink(ep)) { |
874 | for (i = 0; i < ep->nurbs; i++) { | 874 | for (i = 0; i < ep->nurbs; i++) { |
875 | struct snd_urb_ctx *ctx = ep->urb + i; | 875 | struct snd_urb_ctx *ctx = ep->urb + i; |
876 | list_add_tail(&ctx->ready_list, &ep->ready_playback_urbs); | 876 | list_add_tail(&ctx->ready_list, &ep->ready_playback_urbs); |
877 | } | 877 | } |
878 | 878 | ||
879 | return 0; | 879 | return 0; |
880 | } | 880 | } |
881 | 881 | ||
882 | for (i = 0; i < ep->nurbs; i++) { | 882 | for (i = 0; i < ep->nurbs; i++) { |
883 | struct urb *urb = ep->urb[i].urb; | 883 | struct urb *urb = ep->urb[i].urb; |
884 | 884 | ||
885 | if (snd_BUG_ON(!urb)) | 885 | if (snd_BUG_ON(!urb)) |
886 | goto __error; | 886 | goto __error; |
887 | 887 | ||
888 | if (usb_pipeout(ep->pipe)) { | 888 | if (usb_pipeout(ep->pipe)) { |
889 | prepare_outbound_urb(ep, urb->context); | 889 | prepare_outbound_urb(ep, urb->context); |
890 | } else { | 890 | } else { |
891 | prepare_inbound_urb(ep, urb->context); | 891 | prepare_inbound_urb(ep, urb->context); |
892 | } | 892 | } |
893 | 893 | ||
894 | err = usb_submit_urb(urb, GFP_ATOMIC); | 894 | err = usb_submit_urb(urb, GFP_ATOMIC); |
895 | if (err < 0) { | 895 | if (err < 0) { |
896 | snd_printk(KERN_ERR "cannot submit urb %d, error %d: %s\n", | 896 | snd_printk(KERN_ERR "cannot submit urb %d, error %d: %s\n", |
897 | i, err, usb_error_string(err)); | 897 | i, err, usb_error_string(err)); |
898 | goto __error; | 898 | goto __error; |
899 | } | 899 | } |
900 | set_bit(i, &ep->active_mask); | 900 | set_bit(i, &ep->active_mask); |
901 | } | 901 | } |
902 | 902 | ||
903 | return 0; | 903 | return 0; |
904 | 904 | ||
905 | __error: | 905 | __error: |
906 | clear_bit(EP_FLAG_RUNNING, &ep->flags); | 906 | clear_bit(EP_FLAG_RUNNING, &ep->flags); |
907 | ep->use_count--; | 907 | ep->use_count--; |
908 | deactivate_urbs(ep, false); | 908 | deactivate_urbs(ep, false); |
909 | return -EPIPE; | 909 | return -EPIPE; |
910 | } | 910 | } |
911 | 911 | ||
912 | /** | 912 | /** |
913 | * snd_usb_endpoint_stop: stop an snd_usb_endpoint | 913 | * snd_usb_endpoint_stop: stop an snd_usb_endpoint |
914 | * | 914 | * |
915 | * @ep: the endpoint to stop (may be NULL) | 915 | * @ep: the endpoint to stop (may be NULL) |
916 | * | 916 | * |
917 | * A call to this function will decrement the use count of the endpoint. | 917 | * A call to this function will decrement the use count of the endpoint. |
918 | * In case the last user has requested the endpoint stop, the URBs will | 918 | * In case the last user has requested the endpoint stop, the URBs will |
919 | * actually be deactivated. | 919 | * actually be deactivated. |
920 | * | 920 | * |
921 | * Must be balanced to calls of snd_usb_endpoint_start(). | 921 | * Must be balanced to calls of snd_usb_endpoint_start(). |
922 | * | 922 | * |
923 | * The caller needs to synchronize the pending stop operation via | 923 | * The caller needs to synchronize the pending stop operation via |
924 | * snd_usb_endpoint_sync_pending_stop(). | 924 | * snd_usb_endpoint_sync_pending_stop(). |
925 | */ | 925 | */ |
926 | void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep) | 926 | void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep) |
927 | { | 927 | { |
928 | if (!ep) | 928 | if (!ep) |
929 | return; | 929 | return; |
930 | 930 | ||
931 | if (snd_BUG_ON(ep->use_count == 0)) | 931 | if (snd_BUG_ON(ep->use_count == 0)) |
932 | return; | 932 | return; |
933 | 933 | ||
934 | if (--ep->use_count == 0) { | 934 | if (--ep->use_count == 0) { |
935 | deactivate_urbs(ep, false); | 935 | deactivate_urbs(ep, false); |
936 | ep->data_subs = NULL; | 936 | ep->data_subs = NULL; |
937 | ep->sync_slave = NULL; | 937 | ep->sync_slave = NULL; |
938 | ep->retire_data_urb = NULL; | 938 | ep->retire_data_urb = NULL; |
939 | ep->prepare_data_urb = NULL; | 939 | ep->prepare_data_urb = NULL; |
940 | set_bit(EP_FLAG_STOPPING, &ep->flags); | 940 | set_bit(EP_FLAG_STOPPING, &ep->flags); |
941 | } | 941 | } |
942 | } | 942 | } |
943 | 943 | ||
944 | /** | 944 | /** |
945 | * snd_usb_endpoint_deactivate: deactivate an snd_usb_endpoint | 945 | * snd_usb_endpoint_deactivate: deactivate an snd_usb_endpoint |
946 | * | 946 | * |
947 | * @ep: the endpoint to deactivate | 947 | * @ep: the endpoint to deactivate |
948 | * | 948 | * |
949 | * If the endpoint is not currently in use, this functions will select the | 949 | * If the endpoint is not currently in use, this functions will |
950 | * alternate interface setting 0 for the interface of this endpoint. | 950 | * deactivate its associated URBs. |
951 | * | 951 | * |
952 | * In case of any active users, this functions does nothing. | 952 | * In case of any active users, this functions does nothing. |
953 | * | ||
954 | * Returns an error if usb_set_interface() failed, 0 in all other | ||
955 | * cases. | ||
956 | */ | 953 | */ |
957 | int snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep) | 954 | void snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep) |
958 | { | 955 | { |
959 | if (!ep) | 956 | if (!ep) |
960 | return -EINVAL; | 957 | return; |
961 | 958 | ||
962 | if (ep->use_count != 0) | 959 | if (ep->use_count != 0) |
963 | return 0; | 960 | return; |
964 | 961 | ||
965 | deactivate_urbs(ep, true); | 962 | deactivate_urbs(ep, true); |
966 | wait_clear_urbs(ep); | 963 | wait_clear_urbs(ep); |
967 | 964 | ||
968 | clear_bit(EP_FLAG_ACTIVATED, &ep->flags); | 965 | clear_bit(EP_FLAG_ACTIVATED, &ep->flags); |
969 | |||
970 | return 0; | ||
971 | } | 966 | } |
972 | 967 | ||
973 | /** | 968 | /** |
974 | * snd_usb_endpoint_free: Free the resources of an snd_usb_endpoint | 969 | * snd_usb_endpoint_free: Free the resources of an snd_usb_endpoint |
975 | * | 970 | * |
976 | * @ep: the list header of the endpoint to free | 971 | * @ep: the list header of the endpoint to free |
977 | * | 972 | * |
978 | * This function does not care for the endpoint's use count but will tear | 973 | * This function does not care for the endpoint's use count but will tear |
979 | * down all the streaming URBs immediately and free all resources. | 974 | * down all the streaming URBs immediately and free all resources. |
980 | */ | 975 | */ |
981 | void snd_usb_endpoint_free(struct list_head *head) | 976 | void snd_usb_endpoint_free(struct list_head *head) |
982 | { | 977 | { |
983 | struct snd_usb_endpoint *ep; | 978 | struct snd_usb_endpoint *ep; |
984 | 979 | ||
985 | ep = list_entry(head, struct snd_usb_endpoint, list); | 980 | ep = list_entry(head, struct snd_usb_endpoint, list); |
986 | release_urbs(ep, 1); | 981 | release_urbs(ep, 1); |
987 | kfree(ep); | 982 | kfree(ep); |
988 | } | 983 | } |
989 | 984 | ||
990 | /** | 985 | /** |
991 | * snd_usb_handle_sync_urb: parse an USB sync packet | 986 | * snd_usb_handle_sync_urb: parse an USB sync packet |
992 | * | 987 | * |
993 | * @ep: the endpoint to handle the packet | 988 | * @ep: the endpoint to handle the packet |
994 | * @sender: the sending endpoint | 989 | * @sender: the sending endpoint |
995 | * @urb: the received packet | 990 | * @urb: the received packet |
996 | * | 991 | * |
997 | * This function is called from the context of an endpoint that received | 992 | * This function is called from the context of an endpoint that received |
998 | * the packet and is used to let another endpoint object handle the payload. | 993 | * the packet and is used to let another endpoint object handle the payload. |
999 | */ | 994 | */ |
1000 | void snd_usb_handle_sync_urb(struct snd_usb_endpoint *ep, | 995 | void snd_usb_handle_sync_urb(struct snd_usb_endpoint *ep, |
1001 | struct snd_usb_endpoint *sender, | 996 | struct snd_usb_endpoint *sender, |
1002 | const struct urb *urb) | 997 | const struct urb *urb) |
1003 | { | 998 | { |
1004 | int shift; | 999 | int shift; |
1005 | unsigned int f; | 1000 | unsigned int f; |
1006 | unsigned long flags; | 1001 | unsigned long flags; |
1007 | 1002 | ||
1008 | snd_BUG_ON(ep == sender); | 1003 | snd_BUG_ON(ep == sender); |
1009 | 1004 | ||
1010 | /* | 1005 | /* |
1011 | * In case the endpoint is operating in implicit feedback mode, prepare | 1006 | * In case the endpoint is operating in implicit feedback mode, prepare |
1012 | * a new outbound URB that has the same layout as the received packet | 1007 | * a new outbound URB that has the same layout as the received packet |
1013 | * and add it to the list of pending urbs. queue_pending_output_urbs() | 1008 | * and add it to the list of pending urbs. queue_pending_output_urbs() |
1014 | * will take care of them later. | 1009 | * will take care of them later. |
1015 | */ | 1010 | */ |
1016 | if (snd_usb_endpoint_implicit_feedback_sink(ep) && | 1011 | if (snd_usb_endpoint_implicit_feedback_sink(ep) && |
1017 | ep->use_count != 0) { | 1012 | ep->use_count != 0) { |
1018 | 1013 | ||
1019 | /* implicit feedback case */ | 1014 | /* implicit feedback case */ |
1020 | int i, bytes = 0; | 1015 | int i, bytes = 0; |
1021 | struct snd_urb_ctx *in_ctx; | 1016 | struct snd_urb_ctx *in_ctx; |
1022 | struct snd_usb_packet_info *out_packet; | 1017 | struct snd_usb_packet_info *out_packet; |
1023 | 1018 | ||
1024 | in_ctx = urb->context; | 1019 | in_ctx = urb->context; |
1025 | 1020 | ||
1026 | /* Count overall packet size */ | 1021 | /* Count overall packet size */ |
1027 | for (i = 0; i < in_ctx->packets; i++) | 1022 | for (i = 0; i < in_ctx->packets; i++) |
1028 | if (urb->iso_frame_desc[i].status == 0) | 1023 | if (urb->iso_frame_desc[i].status == 0) |
1029 | bytes += urb->iso_frame_desc[i].actual_length; | 1024 | bytes += urb->iso_frame_desc[i].actual_length; |
1030 | 1025 | ||
1031 | /* | 1026 | /* |
1032 | * skip empty packets. At least M-Audio's Fast Track Ultra stops | 1027 | * skip empty packets. At least M-Audio's Fast Track Ultra stops |
1033 | * streaming once it received a 0-byte OUT URB | 1028 | * streaming once it received a 0-byte OUT URB |
1034 | */ | 1029 | */ |
1035 | if (bytes == 0) | 1030 | if (bytes == 0) |
1036 | return; | 1031 | return; |
1037 | 1032 | ||
1038 | spin_lock_irqsave(&ep->lock, flags); | 1033 | spin_lock_irqsave(&ep->lock, flags); |
1039 | out_packet = ep->next_packet + ep->next_packet_write_pos; | 1034 | out_packet = ep->next_packet + ep->next_packet_write_pos; |
1040 | 1035 | ||
1041 | /* | 1036 | /* |
1042 | * Iterate through the inbound packet and prepare the lengths | 1037 | * Iterate through the inbound packet and prepare the lengths |
1043 | * for the output packet. The OUT packet we are about to send | 1038 | * for the output packet. The OUT packet we are about to send |
1044 | * will have the same amount of payload bytes per stride as the | 1039 | * will have the same amount of payload bytes per stride as the |
1045 | * IN packet we just received. Since the actual size is scaled | 1040 | * IN packet we just received. Since the actual size is scaled |
1046 | * by the stride, use the sender stride to calculate the length | 1041 | * by the stride, use the sender stride to calculate the length |
1047 | * in case the number of channels differ between the implicitly | 1042 | * in case the number of channels differ between the implicitly |
1048 | * fed-back endpoint and the synchronizing endpoint. | 1043 | * fed-back endpoint and the synchronizing endpoint. |
1049 | */ | 1044 | */ |
1050 | 1045 | ||
1051 | out_packet->packets = in_ctx->packets; | 1046 | out_packet->packets = in_ctx->packets; |
1052 | for (i = 0; i < in_ctx->packets; i++) { | 1047 | for (i = 0; i < in_ctx->packets; i++) { |
1053 | if (urb->iso_frame_desc[i].status == 0) | 1048 | if (urb->iso_frame_desc[i].status == 0) |
1054 | out_packet->packet_size[i] = | 1049 | out_packet->packet_size[i] = |
1055 | urb->iso_frame_desc[i].actual_length / sender->stride; | 1050 | urb->iso_frame_desc[i].actual_length / sender->stride; |
1056 | else | 1051 | else |
1057 | out_packet->packet_size[i] = 0; | 1052 | out_packet->packet_size[i] = 0; |
1058 | } | 1053 | } |
1059 | 1054 | ||
1060 | ep->next_packet_write_pos++; | 1055 | ep->next_packet_write_pos++; |
1061 | ep->next_packet_write_pos %= MAX_URBS; | 1056 | ep->next_packet_write_pos %= MAX_URBS; |
1062 | spin_unlock_irqrestore(&ep->lock, flags); | 1057 | spin_unlock_irqrestore(&ep->lock, flags); |
1063 | queue_pending_output_urbs(ep); | 1058 | queue_pending_output_urbs(ep); |
1064 | 1059 | ||
1065 | return; | 1060 | return; |
1066 | } | 1061 | } |
1067 | 1062 | ||
1068 | /* | 1063 | /* |
1069 | * process after playback sync complete | 1064 | * process after playback sync complete |
1070 | * | 1065 | * |
1071 | * Full speed devices report feedback values in 10.14 format as samples | 1066 | * Full speed devices report feedback values in 10.14 format as samples |
1072 | * per frame, high speed devices in 16.16 format as samples per | 1067 | * per frame, high speed devices in 16.16 format as samples per |
1073 | * microframe. | 1068 | * microframe. |
1074 | * | 1069 | * |
1075 | * Because the Audio Class 1 spec was written before USB 2.0, many high | 1070 | * Because the Audio Class 1 spec was written before USB 2.0, many high |
1076 | * speed devices use a wrong interpretation, some others use an | 1071 | * speed devices use a wrong interpretation, some others use an |
1077 | * entirely different format. | 1072 | * entirely different format. |
1078 | * | 1073 | * |
1079 | * Therefore, we cannot predict what format any particular device uses | 1074 | * Therefore, we cannot predict what format any particular device uses |
1080 | * and must detect it automatically. | 1075 | * and must detect it automatically. |
1081 | */ | 1076 | */ |
1082 | 1077 | ||
1083 | if (urb->iso_frame_desc[0].status != 0 || | 1078 | if (urb->iso_frame_desc[0].status != 0 || |
1084 | urb->iso_frame_desc[0].actual_length < 3) | 1079 | urb->iso_frame_desc[0].actual_length < 3) |
1085 | return; | 1080 | return; |
1086 | 1081 | ||
1087 | f = le32_to_cpup(urb->transfer_buffer); | 1082 | f = le32_to_cpup(urb->transfer_buffer); |
1088 | if (urb->iso_frame_desc[0].actual_length == 3) | 1083 | if (urb->iso_frame_desc[0].actual_length == 3) |
1089 | f &= 0x00ffffff; | 1084 | f &= 0x00ffffff; |
1090 | else | 1085 | else |
1091 | f &= 0x0fffffff; | 1086 | f &= 0x0fffffff; |
1092 | 1087 | ||
1093 | if (f == 0) | 1088 | if (f == 0) |
1094 | return; | 1089 | return; |
1095 | 1090 | ||
1096 | if (unlikely(ep->freqshift == INT_MIN)) { | 1091 | if (unlikely(ep->freqshift == INT_MIN)) { |
1097 | /* | 1092 | /* |
1098 | * The first time we see a feedback value, determine its format | 1093 | * The first time we see a feedback value, determine its format |
1099 | * by shifting it left or right until it matches the nominal | 1094 | * by shifting it left or right until it matches the nominal |
1100 | * frequency value. This assumes that the feedback does not | 1095 | * frequency value. This assumes that the feedback does not |
1101 | * differ from the nominal value more than +50% or -25%. | 1096 | * differ from the nominal value more than +50% or -25%. |
1102 | */ | 1097 | */ |
1103 | shift = 0; | 1098 | shift = 0; |
1104 | while (f < ep->freqn - ep->freqn / 4) { | 1099 | while (f < ep->freqn - ep->freqn / 4) { |
1105 | f <<= 1; | 1100 | f <<= 1; |
1106 | shift++; | 1101 | shift++; |
1107 | } | 1102 | } |
1108 | while (f > ep->freqn + ep->freqn / 2) { | 1103 | while (f > ep->freqn + ep->freqn / 2) { |
1109 | f >>= 1; | 1104 | f >>= 1; |
1110 | shift--; | 1105 | shift--; |
1111 | } | 1106 | } |
1112 | ep->freqshift = shift; | 1107 | ep->freqshift = shift; |
1113 | } else if (ep->freqshift >= 0) | 1108 | } else if (ep->freqshift >= 0) |
1114 | f <<= ep->freqshift; | 1109 | f <<= ep->freqshift; |
1115 | else | 1110 | else |
1116 | f >>= -ep->freqshift; | 1111 | f >>= -ep->freqshift; |
1117 | 1112 | ||
1118 | if (likely(f >= ep->freqn - ep->freqn / 8 && f <= ep->freqmax)) { | 1113 | if (likely(f >= ep->freqn - ep->freqn / 8 && f <= ep->freqmax)) { |
1119 | /* | 1114 | /* |
1120 | * If the frequency looks valid, set it. | 1115 | * If the frequency looks valid, set it. |
1121 | * This value is referred to in prepare_playback_urb(). | 1116 | * This value is referred to in prepare_playback_urb(). |
1122 | */ | 1117 | */ |
1123 | spin_lock_irqsave(&ep->lock, flags); | 1118 | spin_lock_irqsave(&ep->lock, flags); |
1124 | ep->freqm = f; | 1119 | ep->freqm = f; |
1125 | spin_unlock_irqrestore(&ep->lock, flags); | 1120 | spin_unlock_irqrestore(&ep->lock, flags); |
1126 | } else { | 1121 | } else { |
1127 | /* | 1122 | /* |
1128 | * Out of range; maybe the shift value is wrong. | 1123 | * Out of range; maybe the shift value is wrong. |
1129 | * Reset it so that we autodetect again the next time. | 1124 | * Reset it so that we autodetect again the next time. |
1130 | */ | 1125 | */ |
1131 | ep->freqshift = INT_MIN; | 1126 | ep->freqshift = INT_MIN; |
1132 | } | 1127 | } |
1133 | } | 1128 | } |
1134 | 1129 | ||
1135 | 1130 |
sound/usb/endpoint.h
1 | #ifndef __USBAUDIO_ENDPOINT_H | 1 | #ifndef __USBAUDIO_ENDPOINT_H |
2 | #define __USBAUDIO_ENDPOINT_H | 2 | #define __USBAUDIO_ENDPOINT_H |
3 | 3 | ||
4 | #define SND_USB_ENDPOINT_TYPE_DATA 0 | 4 | #define SND_USB_ENDPOINT_TYPE_DATA 0 |
5 | #define SND_USB_ENDPOINT_TYPE_SYNC 1 | 5 | #define SND_USB_ENDPOINT_TYPE_SYNC 1 |
6 | 6 | ||
7 | struct snd_usb_endpoint *snd_usb_add_endpoint(struct snd_usb_audio *chip, | 7 | struct snd_usb_endpoint *snd_usb_add_endpoint(struct snd_usb_audio *chip, |
8 | struct usb_host_interface *alts, | 8 | struct usb_host_interface *alts, |
9 | int ep_num, int direction, int type); | 9 | int ep_num, int direction, int type); |
10 | 10 | ||
11 | int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep, | 11 | int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep, |
12 | snd_pcm_format_t pcm_format, | 12 | snd_pcm_format_t pcm_format, |
13 | unsigned int channels, | 13 | unsigned int channels, |
14 | unsigned int period_bytes, | 14 | unsigned int period_bytes, |
15 | unsigned int period_frames, | 15 | unsigned int period_frames, |
16 | unsigned int buffer_periods, | 16 | unsigned int buffer_periods, |
17 | unsigned int rate, | 17 | unsigned int rate, |
18 | struct audioformat *fmt, | 18 | struct audioformat *fmt, |
19 | struct snd_usb_endpoint *sync_ep); | 19 | struct snd_usb_endpoint *sync_ep); |
20 | 20 | ||
21 | int snd_usb_endpoint_start(struct snd_usb_endpoint *ep, bool can_sleep); | 21 | int snd_usb_endpoint_start(struct snd_usb_endpoint *ep, bool can_sleep); |
22 | void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep); | 22 | void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep); |
23 | void snd_usb_endpoint_sync_pending_stop(struct snd_usb_endpoint *ep); | 23 | void snd_usb_endpoint_sync_pending_stop(struct snd_usb_endpoint *ep); |
24 | int snd_usb_endpoint_activate(struct snd_usb_endpoint *ep); | 24 | int snd_usb_endpoint_activate(struct snd_usb_endpoint *ep); |
25 | int snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep); | 25 | void snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep); |
26 | void snd_usb_endpoint_free(struct list_head *head); | 26 | void snd_usb_endpoint_free(struct list_head *head); |
27 | 27 | ||
28 | int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep); | 28 | int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep); |
29 | int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep); | 29 | int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep); |
30 | 30 | ||
31 | void snd_usb_handle_sync_urb(struct snd_usb_endpoint *ep, | 31 | void snd_usb_handle_sync_urb(struct snd_usb_endpoint *ep, |
32 | struct snd_usb_endpoint *sender, | 32 | struct snd_usb_endpoint *sender, |
33 | const struct urb *urb); | 33 | const struct urb *urb); |
34 | 34 | ||
35 | #endif /* __USBAUDIO_ENDPOINT_H */ | 35 | #endif /* __USBAUDIO_ENDPOINT_H */ |
36 | 36 |