Commit 7e44c0b56b07a5e34de9943cfb2fee72e71a9f0e
1 parent
eaf76e0d02
Exists in
master
and in
7 other branches
firewire: cdev: fix memory leak in an error path
If copy_from_user in an FW_CDEV_IOC_SEND_RESPONSE ioctl failed, an inbound_transaction_resource instance is no longer referenced and needs to be freed. Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
Showing 1 changed file with 8 additions and 3 deletions Inline Diff
drivers/firewire/core-cdev.c
1 | /* | 1 | /* |
2 | * Char device for device raw access | 2 | * Char device for device raw access |
3 | * | 3 | * |
4 | * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net> | 4 | * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net> |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
8 | * the Free Software Foundation; either version 2 of the License, or | 8 | * the Free Software Foundation; either version 2 of the License, or |
9 | * (at your option) any later version. | 9 | * (at your option) any later version. |
10 | * | 10 | * |
11 | * This program is distributed in the hope that it will be useful, | 11 | * This program is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | * GNU General Public License for more details. | 14 | * GNU General Public License for more details. |
15 | * | 15 | * |
16 | * You should have received a copy of the GNU General Public License | 16 | * You should have received a copy of the GNU General Public License |
17 | * along with this program; if not, write to the Free Software Foundation, | 17 | * along with this program; if not, write to the Free Software Foundation, |
18 | * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 18 | * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
19 | */ | 19 | */ |
20 | 20 | ||
21 | #include <linux/compat.h> | 21 | #include <linux/compat.h> |
22 | #include <linux/delay.h> | 22 | #include <linux/delay.h> |
23 | #include <linux/device.h> | 23 | #include <linux/device.h> |
24 | #include <linux/errno.h> | 24 | #include <linux/errno.h> |
25 | #include <linux/firewire.h> | 25 | #include <linux/firewire.h> |
26 | #include <linux/firewire-cdev.h> | 26 | #include <linux/firewire-cdev.h> |
27 | #include <linux/idr.h> | 27 | #include <linux/idr.h> |
28 | #include <linux/jiffies.h> | 28 | #include <linux/jiffies.h> |
29 | #include <linux/kernel.h> | 29 | #include <linux/kernel.h> |
30 | #include <linux/kref.h> | 30 | #include <linux/kref.h> |
31 | #include <linux/mm.h> | 31 | #include <linux/mm.h> |
32 | #include <linux/module.h> | 32 | #include <linux/module.h> |
33 | #include <linux/mutex.h> | 33 | #include <linux/mutex.h> |
34 | #include <linux/poll.h> | 34 | #include <linux/poll.h> |
35 | #include <linux/preempt.h> | 35 | #include <linux/preempt.h> |
36 | #include <linux/spinlock.h> | 36 | #include <linux/spinlock.h> |
37 | #include <linux/time.h> | 37 | #include <linux/time.h> |
38 | #include <linux/uaccess.h> | 38 | #include <linux/uaccess.h> |
39 | #include <linux/vmalloc.h> | 39 | #include <linux/vmalloc.h> |
40 | #include <linux/wait.h> | 40 | #include <linux/wait.h> |
41 | #include <linux/workqueue.h> | 41 | #include <linux/workqueue.h> |
42 | 42 | ||
43 | #include <asm/system.h> | 43 | #include <asm/system.h> |
44 | 44 | ||
45 | #include "core.h" | 45 | #include "core.h" |
46 | 46 | ||
47 | struct client { | 47 | struct client { |
48 | u32 version; | 48 | u32 version; |
49 | struct fw_device *device; | 49 | struct fw_device *device; |
50 | 50 | ||
51 | spinlock_t lock; | 51 | spinlock_t lock; |
52 | bool in_shutdown; | 52 | bool in_shutdown; |
53 | struct idr resource_idr; | 53 | struct idr resource_idr; |
54 | struct list_head event_list; | 54 | struct list_head event_list; |
55 | wait_queue_head_t wait; | 55 | wait_queue_head_t wait; |
56 | u64 bus_reset_closure; | 56 | u64 bus_reset_closure; |
57 | 57 | ||
58 | struct fw_iso_context *iso_context; | 58 | struct fw_iso_context *iso_context; |
59 | u64 iso_closure; | 59 | u64 iso_closure; |
60 | struct fw_iso_buffer buffer; | 60 | struct fw_iso_buffer buffer; |
61 | unsigned long vm_start; | 61 | unsigned long vm_start; |
62 | 62 | ||
63 | struct list_head link; | 63 | struct list_head link; |
64 | struct kref kref; | 64 | struct kref kref; |
65 | }; | 65 | }; |
66 | 66 | ||
67 | static inline void client_get(struct client *client) | 67 | static inline void client_get(struct client *client) |
68 | { | 68 | { |
69 | kref_get(&client->kref); | 69 | kref_get(&client->kref); |
70 | } | 70 | } |
71 | 71 | ||
72 | static void client_release(struct kref *kref) | 72 | static void client_release(struct kref *kref) |
73 | { | 73 | { |
74 | struct client *client = container_of(kref, struct client, kref); | 74 | struct client *client = container_of(kref, struct client, kref); |
75 | 75 | ||
76 | fw_device_put(client->device); | 76 | fw_device_put(client->device); |
77 | kfree(client); | 77 | kfree(client); |
78 | } | 78 | } |
79 | 79 | ||
80 | static void client_put(struct client *client) | 80 | static void client_put(struct client *client) |
81 | { | 81 | { |
82 | kref_put(&client->kref, client_release); | 82 | kref_put(&client->kref, client_release); |
83 | } | 83 | } |
84 | 84 | ||
85 | struct client_resource; | 85 | struct client_resource; |
86 | typedef void (*client_resource_release_fn_t)(struct client *, | 86 | typedef void (*client_resource_release_fn_t)(struct client *, |
87 | struct client_resource *); | 87 | struct client_resource *); |
88 | struct client_resource { | 88 | struct client_resource { |
89 | client_resource_release_fn_t release; | 89 | client_resource_release_fn_t release; |
90 | int handle; | 90 | int handle; |
91 | }; | 91 | }; |
92 | 92 | ||
93 | struct address_handler_resource { | 93 | struct address_handler_resource { |
94 | struct client_resource resource; | 94 | struct client_resource resource; |
95 | struct fw_address_handler handler; | 95 | struct fw_address_handler handler; |
96 | __u64 closure; | 96 | __u64 closure; |
97 | struct client *client; | 97 | struct client *client; |
98 | }; | 98 | }; |
99 | 99 | ||
100 | struct outbound_transaction_resource { | 100 | struct outbound_transaction_resource { |
101 | struct client_resource resource; | 101 | struct client_resource resource; |
102 | struct fw_transaction transaction; | 102 | struct fw_transaction transaction; |
103 | }; | 103 | }; |
104 | 104 | ||
105 | struct inbound_transaction_resource { | 105 | struct inbound_transaction_resource { |
106 | struct client_resource resource; | 106 | struct client_resource resource; |
107 | struct fw_request *request; | 107 | struct fw_request *request; |
108 | void *data; | 108 | void *data; |
109 | size_t length; | 109 | size_t length; |
110 | }; | 110 | }; |
111 | 111 | ||
112 | struct descriptor_resource { | 112 | struct descriptor_resource { |
113 | struct client_resource resource; | 113 | struct client_resource resource; |
114 | struct fw_descriptor descriptor; | 114 | struct fw_descriptor descriptor; |
115 | u32 data[0]; | 115 | u32 data[0]; |
116 | }; | 116 | }; |
117 | 117 | ||
118 | struct iso_resource { | 118 | struct iso_resource { |
119 | struct client_resource resource; | 119 | struct client_resource resource; |
120 | struct client *client; | 120 | struct client *client; |
121 | /* Schedule work and access todo only with client->lock held. */ | 121 | /* Schedule work and access todo only with client->lock held. */ |
122 | struct delayed_work work; | 122 | struct delayed_work work; |
123 | enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC, | 123 | enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC, |
124 | ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo; | 124 | ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo; |
125 | int generation; | 125 | int generation; |
126 | u64 channels; | 126 | u64 channels; |
127 | s32 bandwidth; | 127 | s32 bandwidth; |
128 | __be32 transaction_data[2]; | 128 | __be32 transaction_data[2]; |
129 | struct iso_resource_event *e_alloc, *e_dealloc; | 129 | struct iso_resource_event *e_alloc, *e_dealloc; |
130 | }; | 130 | }; |
131 | 131 | ||
132 | static void schedule_iso_resource(struct iso_resource *); | 132 | static void schedule_iso_resource(struct iso_resource *); |
133 | static void release_iso_resource(struct client *, struct client_resource *); | 133 | static void release_iso_resource(struct client *, struct client_resource *); |
134 | 134 | ||
135 | /* | 135 | /* |
136 | * dequeue_event() just kfree()'s the event, so the event has to be | 136 | * dequeue_event() just kfree()'s the event, so the event has to be |
137 | * the first field in a struct XYZ_event. | 137 | * the first field in a struct XYZ_event. |
138 | */ | 138 | */ |
139 | struct event { | 139 | struct event { |
140 | struct { void *data; size_t size; } v[2]; | 140 | struct { void *data; size_t size; } v[2]; |
141 | struct list_head link; | 141 | struct list_head link; |
142 | }; | 142 | }; |
143 | 143 | ||
144 | struct bus_reset_event { | 144 | struct bus_reset_event { |
145 | struct event event; | 145 | struct event event; |
146 | struct fw_cdev_event_bus_reset reset; | 146 | struct fw_cdev_event_bus_reset reset; |
147 | }; | 147 | }; |
148 | 148 | ||
149 | struct outbound_transaction_event { | 149 | struct outbound_transaction_event { |
150 | struct event event; | 150 | struct event event; |
151 | struct client *client; | 151 | struct client *client; |
152 | struct outbound_transaction_resource r; | 152 | struct outbound_transaction_resource r; |
153 | struct fw_cdev_event_response response; | 153 | struct fw_cdev_event_response response; |
154 | }; | 154 | }; |
155 | 155 | ||
156 | struct inbound_transaction_event { | 156 | struct inbound_transaction_event { |
157 | struct event event; | 157 | struct event event; |
158 | struct fw_cdev_event_request request; | 158 | struct fw_cdev_event_request request; |
159 | }; | 159 | }; |
160 | 160 | ||
161 | struct iso_interrupt_event { | 161 | struct iso_interrupt_event { |
162 | struct event event; | 162 | struct event event; |
163 | struct fw_cdev_event_iso_interrupt interrupt; | 163 | struct fw_cdev_event_iso_interrupt interrupt; |
164 | }; | 164 | }; |
165 | 165 | ||
166 | struct iso_resource_event { | 166 | struct iso_resource_event { |
167 | struct event event; | 167 | struct event event; |
168 | struct fw_cdev_event_iso_resource resource; | 168 | struct fw_cdev_event_iso_resource resource; |
169 | }; | 169 | }; |
170 | 170 | ||
171 | static inline void __user *u64_to_uptr(__u64 value) | 171 | static inline void __user *u64_to_uptr(__u64 value) |
172 | { | 172 | { |
173 | return (void __user *)(unsigned long)value; | 173 | return (void __user *)(unsigned long)value; |
174 | } | 174 | } |
175 | 175 | ||
176 | static inline __u64 uptr_to_u64(void __user *ptr) | 176 | static inline __u64 uptr_to_u64(void __user *ptr) |
177 | { | 177 | { |
178 | return (__u64)(unsigned long)ptr; | 178 | return (__u64)(unsigned long)ptr; |
179 | } | 179 | } |
180 | 180 | ||
181 | static int fw_device_op_open(struct inode *inode, struct file *file) | 181 | static int fw_device_op_open(struct inode *inode, struct file *file) |
182 | { | 182 | { |
183 | struct fw_device *device; | 183 | struct fw_device *device; |
184 | struct client *client; | 184 | struct client *client; |
185 | 185 | ||
186 | device = fw_device_get_by_devt(inode->i_rdev); | 186 | device = fw_device_get_by_devt(inode->i_rdev); |
187 | if (device == NULL) | 187 | if (device == NULL) |
188 | return -ENODEV; | 188 | return -ENODEV; |
189 | 189 | ||
190 | if (fw_device_is_shutdown(device)) { | 190 | if (fw_device_is_shutdown(device)) { |
191 | fw_device_put(device); | 191 | fw_device_put(device); |
192 | return -ENODEV; | 192 | return -ENODEV; |
193 | } | 193 | } |
194 | 194 | ||
195 | client = kzalloc(sizeof(*client), GFP_KERNEL); | 195 | client = kzalloc(sizeof(*client), GFP_KERNEL); |
196 | if (client == NULL) { | 196 | if (client == NULL) { |
197 | fw_device_put(device); | 197 | fw_device_put(device); |
198 | return -ENOMEM; | 198 | return -ENOMEM; |
199 | } | 199 | } |
200 | 200 | ||
201 | client->device = device; | 201 | client->device = device; |
202 | spin_lock_init(&client->lock); | 202 | spin_lock_init(&client->lock); |
203 | idr_init(&client->resource_idr); | 203 | idr_init(&client->resource_idr); |
204 | INIT_LIST_HEAD(&client->event_list); | 204 | INIT_LIST_HEAD(&client->event_list); |
205 | init_waitqueue_head(&client->wait); | 205 | init_waitqueue_head(&client->wait); |
206 | kref_init(&client->kref); | 206 | kref_init(&client->kref); |
207 | 207 | ||
208 | file->private_data = client; | 208 | file->private_data = client; |
209 | 209 | ||
210 | mutex_lock(&device->client_list_mutex); | 210 | mutex_lock(&device->client_list_mutex); |
211 | list_add_tail(&client->link, &device->client_list); | 211 | list_add_tail(&client->link, &device->client_list); |
212 | mutex_unlock(&device->client_list_mutex); | 212 | mutex_unlock(&device->client_list_mutex); |
213 | 213 | ||
214 | return 0; | 214 | return 0; |
215 | } | 215 | } |
216 | 216 | ||
217 | static void queue_event(struct client *client, struct event *event, | 217 | static void queue_event(struct client *client, struct event *event, |
218 | void *data0, size_t size0, void *data1, size_t size1) | 218 | void *data0, size_t size0, void *data1, size_t size1) |
219 | { | 219 | { |
220 | unsigned long flags; | 220 | unsigned long flags; |
221 | 221 | ||
222 | event->v[0].data = data0; | 222 | event->v[0].data = data0; |
223 | event->v[0].size = size0; | 223 | event->v[0].size = size0; |
224 | event->v[1].data = data1; | 224 | event->v[1].data = data1; |
225 | event->v[1].size = size1; | 225 | event->v[1].size = size1; |
226 | 226 | ||
227 | spin_lock_irqsave(&client->lock, flags); | 227 | spin_lock_irqsave(&client->lock, flags); |
228 | if (client->in_shutdown) | 228 | if (client->in_shutdown) |
229 | kfree(event); | 229 | kfree(event); |
230 | else | 230 | else |
231 | list_add_tail(&event->link, &client->event_list); | 231 | list_add_tail(&event->link, &client->event_list); |
232 | spin_unlock_irqrestore(&client->lock, flags); | 232 | spin_unlock_irqrestore(&client->lock, flags); |
233 | 233 | ||
234 | wake_up_interruptible(&client->wait); | 234 | wake_up_interruptible(&client->wait); |
235 | } | 235 | } |
236 | 236 | ||
237 | static int dequeue_event(struct client *client, | 237 | static int dequeue_event(struct client *client, |
238 | char __user *buffer, size_t count) | 238 | char __user *buffer, size_t count) |
239 | { | 239 | { |
240 | struct event *event; | 240 | struct event *event; |
241 | size_t size, total; | 241 | size_t size, total; |
242 | int i, ret; | 242 | int i, ret; |
243 | 243 | ||
244 | ret = wait_event_interruptible(client->wait, | 244 | ret = wait_event_interruptible(client->wait, |
245 | !list_empty(&client->event_list) || | 245 | !list_empty(&client->event_list) || |
246 | fw_device_is_shutdown(client->device)); | 246 | fw_device_is_shutdown(client->device)); |
247 | if (ret < 0) | 247 | if (ret < 0) |
248 | return ret; | 248 | return ret; |
249 | 249 | ||
250 | if (list_empty(&client->event_list) && | 250 | if (list_empty(&client->event_list) && |
251 | fw_device_is_shutdown(client->device)) | 251 | fw_device_is_shutdown(client->device)) |
252 | return -ENODEV; | 252 | return -ENODEV; |
253 | 253 | ||
254 | spin_lock_irq(&client->lock); | 254 | spin_lock_irq(&client->lock); |
255 | event = list_first_entry(&client->event_list, struct event, link); | 255 | event = list_first_entry(&client->event_list, struct event, link); |
256 | list_del(&event->link); | 256 | list_del(&event->link); |
257 | spin_unlock_irq(&client->lock); | 257 | spin_unlock_irq(&client->lock); |
258 | 258 | ||
259 | total = 0; | 259 | total = 0; |
260 | for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) { | 260 | for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) { |
261 | size = min(event->v[i].size, count - total); | 261 | size = min(event->v[i].size, count - total); |
262 | if (copy_to_user(buffer + total, event->v[i].data, size)) { | 262 | if (copy_to_user(buffer + total, event->v[i].data, size)) { |
263 | ret = -EFAULT; | 263 | ret = -EFAULT; |
264 | goto out; | 264 | goto out; |
265 | } | 265 | } |
266 | total += size; | 266 | total += size; |
267 | } | 267 | } |
268 | ret = total; | 268 | ret = total; |
269 | 269 | ||
270 | out: | 270 | out: |
271 | kfree(event); | 271 | kfree(event); |
272 | 272 | ||
273 | return ret; | 273 | return ret; |
274 | } | 274 | } |
275 | 275 | ||
276 | static ssize_t fw_device_op_read(struct file *file, char __user *buffer, | 276 | static ssize_t fw_device_op_read(struct file *file, char __user *buffer, |
277 | size_t count, loff_t *offset) | 277 | size_t count, loff_t *offset) |
278 | { | 278 | { |
279 | struct client *client = file->private_data; | 279 | struct client *client = file->private_data; |
280 | 280 | ||
281 | return dequeue_event(client, buffer, count); | 281 | return dequeue_event(client, buffer, count); |
282 | } | 282 | } |
283 | 283 | ||
284 | static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event, | 284 | static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event, |
285 | struct client *client) | 285 | struct client *client) |
286 | { | 286 | { |
287 | struct fw_card *card = client->device->card; | 287 | struct fw_card *card = client->device->card; |
288 | 288 | ||
289 | spin_lock_irq(&card->lock); | 289 | spin_lock_irq(&card->lock); |
290 | 290 | ||
291 | event->closure = client->bus_reset_closure; | 291 | event->closure = client->bus_reset_closure; |
292 | event->type = FW_CDEV_EVENT_BUS_RESET; | 292 | event->type = FW_CDEV_EVENT_BUS_RESET; |
293 | event->generation = client->device->generation; | 293 | event->generation = client->device->generation; |
294 | event->node_id = client->device->node_id; | 294 | event->node_id = client->device->node_id; |
295 | event->local_node_id = card->local_node->node_id; | 295 | event->local_node_id = card->local_node->node_id; |
296 | event->bm_node_id = 0; /* FIXME: We don't track the BM. */ | 296 | event->bm_node_id = 0; /* FIXME: We don't track the BM. */ |
297 | event->irm_node_id = card->irm_node->node_id; | 297 | event->irm_node_id = card->irm_node->node_id; |
298 | event->root_node_id = card->root_node->node_id; | 298 | event->root_node_id = card->root_node->node_id; |
299 | 299 | ||
300 | spin_unlock_irq(&card->lock); | 300 | spin_unlock_irq(&card->lock); |
301 | } | 301 | } |
302 | 302 | ||
303 | static void for_each_client(struct fw_device *device, | 303 | static void for_each_client(struct fw_device *device, |
304 | void (*callback)(struct client *client)) | 304 | void (*callback)(struct client *client)) |
305 | { | 305 | { |
306 | struct client *c; | 306 | struct client *c; |
307 | 307 | ||
308 | mutex_lock(&device->client_list_mutex); | 308 | mutex_lock(&device->client_list_mutex); |
309 | list_for_each_entry(c, &device->client_list, link) | 309 | list_for_each_entry(c, &device->client_list, link) |
310 | callback(c); | 310 | callback(c); |
311 | mutex_unlock(&device->client_list_mutex); | 311 | mutex_unlock(&device->client_list_mutex); |
312 | } | 312 | } |
313 | 313 | ||
314 | static int schedule_reallocations(int id, void *p, void *data) | 314 | static int schedule_reallocations(int id, void *p, void *data) |
315 | { | 315 | { |
316 | struct client_resource *r = p; | 316 | struct client_resource *r = p; |
317 | 317 | ||
318 | if (r->release == release_iso_resource) | 318 | if (r->release == release_iso_resource) |
319 | schedule_iso_resource(container_of(r, | 319 | schedule_iso_resource(container_of(r, |
320 | struct iso_resource, resource)); | 320 | struct iso_resource, resource)); |
321 | return 0; | 321 | return 0; |
322 | } | 322 | } |
323 | 323 | ||
324 | static void queue_bus_reset_event(struct client *client) | 324 | static void queue_bus_reset_event(struct client *client) |
325 | { | 325 | { |
326 | struct bus_reset_event *e; | 326 | struct bus_reset_event *e; |
327 | 327 | ||
328 | e = kzalloc(sizeof(*e), GFP_KERNEL); | 328 | e = kzalloc(sizeof(*e), GFP_KERNEL); |
329 | if (e == NULL) { | 329 | if (e == NULL) { |
330 | fw_notify("Out of memory when allocating bus reset event\n"); | 330 | fw_notify("Out of memory when allocating bus reset event\n"); |
331 | return; | 331 | return; |
332 | } | 332 | } |
333 | 333 | ||
334 | fill_bus_reset_event(&e->reset, client); | 334 | fill_bus_reset_event(&e->reset, client); |
335 | 335 | ||
336 | queue_event(client, &e->event, | 336 | queue_event(client, &e->event, |
337 | &e->reset, sizeof(e->reset), NULL, 0); | 337 | &e->reset, sizeof(e->reset), NULL, 0); |
338 | 338 | ||
339 | spin_lock_irq(&client->lock); | 339 | spin_lock_irq(&client->lock); |
340 | idr_for_each(&client->resource_idr, schedule_reallocations, client); | 340 | idr_for_each(&client->resource_idr, schedule_reallocations, client); |
341 | spin_unlock_irq(&client->lock); | 341 | spin_unlock_irq(&client->lock); |
342 | } | 342 | } |
343 | 343 | ||
344 | void fw_device_cdev_update(struct fw_device *device) | 344 | void fw_device_cdev_update(struct fw_device *device) |
345 | { | 345 | { |
346 | for_each_client(device, queue_bus_reset_event); | 346 | for_each_client(device, queue_bus_reset_event); |
347 | } | 347 | } |
348 | 348 | ||
349 | static void wake_up_client(struct client *client) | 349 | static void wake_up_client(struct client *client) |
350 | { | 350 | { |
351 | wake_up_interruptible(&client->wait); | 351 | wake_up_interruptible(&client->wait); |
352 | } | 352 | } |
353 | 353 | ||
354 | void fw_device_cdev_remove(struct fw_device *device) | 354 | void fw_device_cdev_remove(struct fw_device *device) |
355 | { | 355 | { |
356 | for_each_client(device, wake_up_client); | 356 | for_each_client(device, wake_up_client); |
357 | } | 357 | } |
358 | 358 | ||
359 | static int ioctl_get_info(struct client *client, void *buffer) | 359 | static int ioctl_get_info(struct client *client, void *buffer) |
360 | { | 360 | { |
361 | struct fw_cdev_get_info *get_info = buffer; | 361 | struct fw_cdev_get_info *get_info = buffer; |
362 | struct fw_cdev_event_bus_reset bus_reset; | 362 | struct fw_cdev_event_bus_reset bus_reset; |
363 | unsigned long ret = 0; | 363 | unsigned long ret = 0; |
364 | 364 | ||
365 | client->version = get_info->version; | 365 | client->version = get_info->version; |
366 | get_info->version = FW_CDEV_VERSION; | 366 | get_info->version = FW_CDEV_VERSION; |
367 | get_info->card = client->device->card->index; | 367 | get_info->card = client->device->card->index; |
368 | 368 | ||
369 | down_read(&fw_device_rwsem); | 369 | down_read(&fw_device_rwsem); |
370 | 370 | ||
371 | if (get_info->rom != 0) { | 371 | if (get_info->rom != 0) { |
372 | void __user *uptr = u64_to_uptr(get_info->rom); | 372 | void __user *uptr = u64_to_uptr(get_info->rom); |
373 | size_t want = get_info->rom_length; | 373 | size_t want = get_info->rom_length; |
374 | size_t have = client->device->config_rom_length * 4; | 374 | size_t have = client->device->config_rom_length * 4; |
375 | 375 | ||
376 | ret = copy_to_user(uptr, client->device->config_rom, | 376 | ret = copy_to_user(uptr, client->device->config_rom, |
377 | min(want, have)); | 377 | min(want, have)); |
378 | } | 378 | } |
379 | get_info->rom_length = client->device->config_rom_length * 4; | 379 | get_info->rom_length = client->device->config_rom_length * 4; |
380 | 380 | ||
381 | up_read(&fw_device_rwsem); | 381 | up_read(&fw_device_rwsem); |
382 | 382 | ||
383 | if (ret != 0) | 383 | if (ret != 0) |
384 | return -EFAULT; | 384 | return -EFAULT; |
385 | 385 | ||
386 | client->bus_reset_closure = get_info->bus_reset_closure; | 386 | client->bus_reset_closure = get_info->bus_reset_closure; |
387 | if (get_info->bus_reset != 0) { | 387 | if (get_info->bus_reset != 0) { |
388 | void __user *uptr = u64_to_uptr(get_info->bus_reset); | 388 | void __user *uptr = u64_to_uptr(get_info->bus_reset); |
389 | 389 | ||
390 | fill_bus_reset_event(&bus_reset, client); | 390 | fill_bus_reset_event(&bus_reset, client); |
391 | if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset))) | 391 | if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset))) |
392 | return -EFAULT; | 392 | return -EFAULT; |
393 | } | 393 | } |
394 | 394 | ||
395 | return 0; | 395 | return 0; |
396 | } | 396 | } |
397 | 397 | ||
398 | static int add_client_resource(struct client *client, | 398 | static int add_client_resource(struct client *client, |
399 | struct client_resource *resource, gfp_t gfp_mask) | 399 | struct client_resource *resource, gfp_t gfp_mask) |
400 | { | 400 | { |
401 | unsigned long flags; | 401 | unsigned long flags; |
402 | int ret; | 402 | int ret; |
403 | 403 | ||
404 | retry: | 404 | retry: |
405 | if (idr_pre_get(&client->resource_idr, gfp_mask) == 0) | 405 | if (idr_pre_get(&client->resource_idr, gfp_mask) == 0) |
406 | return -ENOMEM; | 406 | return -ENOMEM; |
407 | 407 | ||
408 | spin_lock_irqsave(&client->lock, flags); | 408 | spin_lock_irqsave(&client->lock, flags); |
409 | if (client->in_shutdown) | 409 | if (client->in_shutdown) |
410 | ret = -ECANCELED; | 410 | ret = -ECANCELED; |
411 | else | 411 | else |
412 | ret = idr_get_new(&client->resource_idr, resource, | 412 | ret = idr_get_new(&client->resource_idr, resource, |
413 | &resource->handle); | 413 | &resource->handle); |
414 | if (ret >= 0) { | 414 | if (ret >= 0) { |
415 | client_get(client); | 415 | client_get(client); |
416 | if (resource->release == release_iso_resource) | 416 | if (resource->release == release_iso_resource) |
417 | schedule_iso_resource(container_of(resource, | 417 | schedule_iso_resource(container_of(resource, |
418 | struct iso_resource, resource)); | 418 | struct iso_resource, resource)); |
419 | } | 419 | } |
420 | spin_unlock_irqrestore(&client->lock, flags); | 420 | spin_unlock_irqrestore(&client->lock, flags); |
421 | 421 | ||
422 | if (ret == -EAGAIN) | 422 | if (ret == -EAGAIN) |
423 | goto retry; | 423 | goto retry; |
424 | 424 | ||
425 | return ret < 0 ? ret : 0; | 425 | return ret < 0 ? ret : 0; |
426 | } | 426 | } |
427 | 427 | ||
428 | static int release_client_resource(struct client *client, u32 handle, | 428 | static int release_client_resource(struct client *client, u32 handle, |
429 | client_resource_release_fn_t release, | 429 | client_resource_release_fn_t release, |
430 | struct client_resource **resource) | 430 | struct client_resource **resource) |
431 | { | 431 | { |
432 | struct client_resource *r; | 432 | struct client_resource *r; |
433 | 433 | ||
434 | spin_lock_irq(&client->lock); | 434 | spin_lock_irq(&client->lock); |
435 | if (client->in_shutdown) | 435 | if (client->in_shutdown) |
436 | r = NULL; | 436 | r = NULL; |
437 | else | 437 | else |
438 | r = idr_find(&client->resource_idr, handle); | 438 | r = idr_find(&client->resource_idr, handle); |
439 | if (r && r->release == release) | 439 | if (r && r->release == release) |
440 | idr_remove(&client->resource_idr, handle); | 440 | idr_remove(&client->resource_idr, handle); |
441 | spin_unlock_irq(&client->lock); | 441 | spin_unlock_irq(&client->lock); |
442 | 442 | ||
443 | if (!(r && r->release == release)) | 443 | if (!(r && r->release == release)) |
444 | return -EINVAL; | 444 | return -EINVAL; |
445 | 445 | ||
446 | if (resource) | 446 | if (resource) |
447 | *resource = r; | 447 | *resource = r; |
448 | else | 448 | else |
449 | r->release(client, r); | 449 | r->release(client, r); |
450 | 450 | ||
451 | client_put(client); | 451 | client_put(client); |
452 | 452 | ||
453 | return 0; | 453 | return 0; |
454 | } | 454 | } |
455 | 455 | ||
456 | static void release_transaction(struct client *client, | 456 | static void release_transaction(struct client *client, |
457 | struct client_resource *resource) | 457 | struct client_resource *resource) |
458 | { | 458 | { |
459 | struct outbound_transaction_resource *r = container_of(resource, | 459 | struct outbound_transaction_resource *r = container_of(resource, |
460 | struct outbound_transaction_resource, resource); | 460 | struct outbound_transaction_resource, resource); |
461 | 461 | ||
462 | fw_cancel_transaction(client->device->card, &r->transaction); | 462 | fw_cancel_transaction(client->device->card, &r->transaction); |
463 | } | 463 | } |
464 | 464 | ||
465 | static void complete_transaction(struct fw_card *card, int rcode, | 465 | static void complete_transaction(struct fw_card *card, int rcode, |
466 | void *payload, size_t length, void *data) | 466 | void *payload, size_t length, void *data) |
467 | { | 467 | { |
468 | struct outbound_transaction_event *e = data; | 468 | struct outbound_transaction_event *e = data; |
469 | struct fw_cdev_event_response *rsp = &e->response; | 469 | struct fw_cdev_event_response *rsp = &e->response; |
470 | struct client *client = e->client; | 470 | struct client *client = e->client; |
471 | unsigned long flags; | 471 | unsigned long flags; |
472 | 472 | ||
473 | if (length < rsp->length) | 473 | if (length < rsp->length) |
474 | rsp->length = length; | 474 | rsp->length = length; |
475 | if (rcode == RCODE_COMPLETE) | 475 | if (rcode == RCODE_COMPLETE) |
476 | memcpy(rsp->data, payload, rsp->length); | 476 | memcpy(rsp->data, payload, rsp->length); |
477 | 477 | ||
478 | spin_lock_irqsave(&client->lock, flags); | 478 | spin_lock_irqsave(&client->lock, flags); |
479 | /* | 479 | /* |
480 | * 1. If called while in shutdown, the idr tree must be left untouched. | 480 | * 1. If called while in shutdown, the idr tree must be left untouched. |
481 | * The idr handle will be removed and the client reference will be | 481 | * The idr handle will be removed and the client reference will be |
482 | * dropped later. | 482 | * dropped later. |
483 | * 2. If the call chain was release_client_resource -> | 483 | * 2. If the call chain was release_client_resource -> |
484 | * release_transaction -> complete_transaction (instead of a normal | 484 | * release_transaction -> complete_transaction (instead of a normal |
485 | * conclusion of the transaction), i.e. if this resource was already | 485 | * conclusion of the transaction), i.e. if this resource was already |
486 | * unregistered from the idr, the client reference will be dropped | 486 | * unregistered from the idr, the client reference will be dropped |
487 | * by release_client_resource and we must not drop it here. | 487 | * by release_client_resource and we must not drop it here. |
488 | */ | 488 | */ |
489 | if (!client->in_shutdown && | 489 | if (!client->in_shutdown && |
490 | idr_find(&client->resource_idr, e->r.resource.handle)) { | 490 | idr_find(&client->resource_idr, e->r.resource.handle)) { |
491 | idr_remove(&client->resource_idr, e->r.resource.handle); | 491 | idr_remove(&client->resource_idr, e->r.resource.handle); |
492 | /* Drop the idr's reference */ | 492 | /* Drop the idr's reference */ |
493 | client_put(client); | 493 | client_put(client); |
494 | } | 494 | } |
495 | spin_unlock_irqrestore(&client->lock, flags); | 495 | spin_unlock_irqrestore(&client->lock, flags); |
496 | 496 | ||
497 | rsp->type = FW_CDEV_EVENT_RESPONSE; | 497 | rsp->type = FW_CDEV_EVENT_RESPONSE; |
498 | rsp->rcode = rcode; | 498 | rsp->rcode = rcode; |
499 | 499 | ||
500 | /* | 500 | /* |
501 | * In the case that sizeof(*rsp) doesn't align with the position of the | 501 | * In the case that sizeof(*rsp) doesn't align with the position of the |
502 | * data, and the read is short, preserve an extra copy of the data | 502 | * data, and the read is short, preserve an extra copy of the data |
503 | * to stay compatible with a pre-2.6.27 bug. Since the bug is harmless | 503 | * to stay compatible with a pre-2.6.27 bug. Since the bug is harmless |
504 | * for short reads and some apps depended on it, this is both safe | 504 | * for short reads and some apps depended on it, this is both safe |
505 | * and prudent for compatibility. | 505 | * and prudent for compatibility. |
506 | */ | 506 | */ |
507 | if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data)) | 507 | if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data)) |
508 | queue_event(client, &e->event, rsp, sizeof(*rsp), | 508 | queue_event(client, &e->event, rsp, sizeof(*rsp), |
509 | rsp->data, rsp->length); | 509 | rsp->data, rsp->length); |
510 | else | 510 | else |
511 | queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length, | 511 | queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length, |
512 | NULL, 0); | 512 | NULL, 0); |
513 | 513 | ||
514 | /* Drop the transaction callback's reference */ | 514 | /* Drop the transaction callback's reference */ |
515 | client_put(client); | 515 | client_put(client); |
516 | } | 516 | } |
517 | 517 | ||
518 | static int init_request(struct client *client, | 518 | static int init_request(struct client *client, |
519 | struct fw_cdev_send_request *request, | 519 | struct fw_cdev_send_request *request, |
520 | int destination_id, int speed) | 520 | int destination_id, int speed) |
521 | { | 521 | { |
522 | struct outbound_transaction_event *e; | 522 | struct outbound_transaction_event *e; |
523 | int ret; | 523 | int ret; |
524 | 524 | ||
525 | if (request->tcode != TCODE_STREAM_DATA && | 525 | if (request->tcode != TCODE_STREAM_DATA && |
526 | (request->length > 4096 || request->length > 512 << speed)) | 526 | (request->length > 4096 || request->length > 512 << speed)) |
527 | return -EIO; | 527 | return -EIO; |
528 | 528 | ||
529 | e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL); | 529 | e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL); |
530 | if (e == NULL) | 530 | if (e == NULL) |
531 | return -ENOMEM; | 531 | return -ENOMEM; |
532 | 532 | ||
533 | e->client = client; | 533 | e->client = client; |
534 | e->response.length = request->length; | 534 | e->response.length = request->length; |
535 | e->response.closure = request->closure; | 535 | e->response.closure = request->closure; |
536 | 536 | ||
537 | if (request->data && | 537 | if (request->data && |
538 | copy_from_user(e->response.data, | 538 | copy_from_user(e->response.data, |
539 | u64_to_uptr(request->data), request->length)) { | 539 | u64_to_uptr(request->data), request->length)) { |
540 | ret = -EFAULT; | 540 | ret = -EFAULT; |
541 | goto failed; | 541 | goto failed; |
542 | } | 542 | } |
543 | 543 | ||
544 | e->r.resource.release = release_transaction; | 544 | e->r.resource.release = release_transaction; |
545 | ret = add_client_resource(client, &e->r.resource, GFP_KERNEL); | 545 | ret = add_client_resource(client, &e->r.resource, GFP_KERNEL); |
546 | if (ret < 0) | 546 | if (ret < 0) |
547 | goto failed; | 547 | goto failed; |
548 | 548 | ||
549 | /* Get a reference for the transaction callback */ | 549 | /* Get a reference for the transaction callback */ |
550 | client_get(client); | 550 | client_get(client); |
551 | 551 | ||
552 | fw_send_request(client->device->card, &e->r.transaction, | 552 | fw_send_request(client->device->card, &e->r.transaction, |
553 | request->tcode, destination_id, request->generation, | 553 | request->tcode, destination_id, request->generation, |
554 | speed, request->offset, e->response.data, | 554 | speed, request->offset, e->response.data, |
555 | request->length, complete_transaction, e); | 555 | request->length, complete_transaction, e); |
556 | return 0; | 556 | return 0; |
557 | 557 | ||
558 | failed: | 558 | failed: |
559 | kfree(e); | 559 | kfree(e); |
560 | 560 | ||
561 | return ret; | 561 | return ret; |
562 | } | 562 | } |
563 | 563 | ||
564 | static int ioctl_send_request(struct client *client, void *buffer) | 564 | static int ioctl_send_request(struct client *client, void *buffer) |
565 | { | 565 | { |
566 | struct fw_cdev_send_request *request = buffer; | 566 | struct fw_cdev_send_request *request = buffer; |
567 | 567 | ||
568 | switch (request->tcode) { | 568 | switch (request->tcode) { |
569 | case TCODE_WRITE_QUADLET_REQUEST: | 569 | case TCODE_WRITE_QUADLET_REQUEST: |
570 | case TCODE_WRITE_BLOCK_REQUEST: | 570 | case TCODE_WRITE_BLOCK_REQUEST: |
571 | case TCODE_READ_QUADLET_REQUEST: | 571 | case TCODE_READ_QUADLET_REQUEST: |
572 | case TCODE_READ_BLOCK_REQUEST: | 572 | case TCODE_READ_BLOCK_REQUEST: |
573 | case TCODE_LOCK_MASK_SWAP: | 573 | case TCODE_LOCK_MASK_SWAP: |
574 | case TCODE_LOCK_COMPARE_SWAP: | 574 | case TCODE_LOCK_COMPARE_SWAP: |
575 | case TCODE_LOCK_FETCH_ADD: | 575 | case TCODE_LOCK_FETCH_ADD: |
576 | case TCODE_LOCK_LITTLE_ADD: | 576 | case TCODE_LOCK_LITTLE_ADD: |
577 | case TCODE_LOCK_BOUNDED_ADD: | 577 | case TCODE_LOCK_BOUNDED_ADD: |
578 | case TCODE_LOCK_WRAP_ADD: | 578 | case TCODE_LOCK_WRAP_ADD: |
579 | case TCODE_LOCK_VENDOR_DEPENDENT: | 579 | case TCODE_LOCK_VENDOR_DEPENDENT: |
580 | break; | 580 | break; |
581 | default: | 581 | default: |
582 | return -EINVAL; | 582 | return -EINVAL; |
583 | } | 583 | } |
584 | 584 | ||
585 | return init_request(client, request, client->device->node_id, | 585 | return init_request(client, request, client->device->node_id, |
586 | client->device->max_speed); | 586 | client->device->max_speed); |
587 | } | 587 | } |
588 | 588 | ||
589 | static void release_request(struct client *client, | 589 | static void release_request(struct client *client, |
590 | struct client_resource *resource) | 590 | struct client_resource *resource) |
591 | { | 591 | { |
592 | struct inbound_transaction_resource *r = container_of(resource, | 592 | struct inbound_transaction_resource *r = container_of(resource, |
593 | struct inbound_transaction_resource, resource); | 593 | struct inbound_transaction_resource, resource); |
594 | 594 | ||
595 | fw_send_response(client->device->card, r->request, | 595 | fw_send_response(client->device->card, r->request, |
596 | RCODE_CONFLICT_ERROR); | 596 | RCODE_CONFLICT_ERROR); |
597 | kfree(r); | 597 | kfree(r); |
598 | } | 598 | } |
599 | 599 | ||
600 | static void handle_request(struct fw_card *card, struct fw_request *request, | 600 | static void handle_request(struct fw_card *card, struct fw_request *request, |
601 | int tcode, int destination, int source, | 601 | int tcode, int destination, int source, |
602 | int generation, int speed, | 602 | int generation, int speed, |
603 | unsigned long long offset, | 603 | unsigned long long offset, |
604 | void *payload, size_t length, void *callback_data) | 604 | void *payload, size_t length, void *callback_data) |
605 | { | 605 | { |
606 | struct address_handler_resource *handler = callback_data; | 606 | struct address_handler_resource *handler = callback_data; |
607 | struct inbound_transaction_resource *r; | 607 | struct inbound_transaction_resource *r; |
608 | struct inbound_transaction_event *e; | 608 | struct inbound_transaction_event *e; |
609 | int ret; | 609 | int ret; |
610 | 610 | ||
611 | r = kmalloc(sizeof(*r), GFP_ATOMIC); | 611 | r = kmalloc(sizeof(*r), GFP_ATOMIC); |
612 | e = kmalloc(sizeof(*e), GFP_ATOMIC); | 612 | e = kmalloc(sizeof(*e), GFP_ATOMIC); |
613 | if (r == NULL || e == NULL) | 613 | if (r == NULL || e == NULL) |
614 | goto failed; | 614 | goto failed; |
615 | 615 | ||
616 | r->request = request; | 616 | r->request = request; |
617 | r->data = payload; | 617 | r->data = payload; |
618 | r->length = length; | 618 | r->length = length; |
619 | 619 | ||
620 | r->resource.release = release_request; | 620 | r->resource.release = release_request; |
621 | ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC); | 621 | ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC); |
622 | if (ret < 0) | 622 | if (ret < 0) |
623 | goto failed; | 623 | goto failed; |
624 | 624 | ||
625 | e->request.type = FW_CDEV_EVENT_REQUEST; | 625 | e->request.type = FW_CDEV_EVENT_REQUEST; |
626 | e->request.tcode = tcode; | 626 | e->request.tcode = tcode; |
627 | e->request.offset = offset; | 627 | e->request.offset = offset; |
628 | e->request.length = length; | 628 | e->request.length = length; |
629 | e->request.handle = r->resource.handle; | 629 | e->request.handle = r->resource.handle; |
630 | e->request.closure = handler->closure; | 630 | e->request.closure = handler->closure; |
631 | 631 | ||
632 | queue_event(handler->client, &e->event, | 632 | queue_event(handler->client, &e->event, |
633 | &e->request, sizeof(e->request), payload, length); | 633 | &e->request, sizeof(e->request), payload, length); |
634 | return; | 634 | return; |
635 | 635 | ||
636 | failed: | 636 | failed: |
637 | kfree(r); | 637 | kfree(r); |
638 | kfree(e); | 638 | kfree(e); |
639 | fw_send_response(card, request, RCODE_CONFLICT_ERROR); | 639 | fw_send_response(card, request, RCODE_CONFLICT_ERROR); |
640 | } | 640 | } |
641 | 641 | ||
642 | static void release_address_handler(struct client *client, | 642 | static void release_address_handler(struct client *client, |
643 | struct client_resource *resource) | 643 | struct client_resource *resource) |
644 | { | 644 | { |
645 | struct address_handler_resource *r = | 645 | struct address_handler_resource *r = |
646 | container_of(resource, struct address_handler_resource, resource); | 646 | container_of(resource, struct address_handler_resource, resource); |
647 | 647 | ||
648 | fw_core_remove_address_handler(&r->handler); | 648 | fw_core_remove_address_handler(&r->handler); |
649 | kfree(r); | 649 | kfree(r); |
650 | } | 650 | } |
651 | 651 | ||
652 | static int ioctl_allocate(struct client *client, void *buffer) | 652 | static int ioctl_allocate(struct client *client, void *buffer) |
653 | { | 653 | { |
654 | struct fw_cdev_allocate *request = buffer; | 654 | struct fw_cdev_allocate *request = buffer; |
655 | struct address_handler_resource *r; | 655 | struct address_handler_resource *r; |
656 | struct fw_address_region region; | 656 | struct fw_address_region region; |
657 | int ret; | 657 | int ret; |
658 | 658 | ||
659 | r = kmalloc(sizeof(*r), GFP_KERNEL); | 659 | r = kmalloc(sizeof(*r), GFP_KERNEL); |
660 | if (r == NULL) | 660 | if (r == NULL) |
661 | return -ENOMEM; | 661 | return -ENOMEM; |
662 | 662 | ||
663 | region.start = request->offset; | 663 | region.start = request->offset; |
664 | region.end = request->offset + request->length; | 664 | region.end = request->offset + request->length; |
665 | r->handler.length = request->length; | 665 | r->handler.length = request->length; |
666 | r->handler.address_callback = handle_request; | 666 | r->handler.address_callback = handle_request; |
667 | r->handler.callback_data = r; | 667 | r->handler.callback_data = r; |
668 | r->closure = request->closure; | 668 | r->closure = request->closure; |
669 | r->client = client; | 669 | r->client = client; |
670 | 670 | ||
671 | ret = fw_core_add_address_handler(&r->handler, ®ion); | 671 | ret = fw_core_add_address_handler(&r->handler, ®ion); |
672 | if (ret < 0) { | 672 | if (ret < 0) { |
673 | kfree(r); | 673 | kfree(r); |
674 | return ret; | 674 | return ret; |
675 | } | 675 | } |
676 | 676 | ||
677 | r->resource.release = release_address_handler; | 677 | r->resource.release = release_address_handler; |
678 | ret = add_client_resource(client, &r->resource, GFP_KERNEL); | 678 | ret = add_client_resource(client, &r->resource, GFP_KERNEL); |
679 | if (ret < 0) { | 679 | if (ret < 0) { |
680 | release_address_handler(client, &r->resource); | 680 | release_address_handler(client, &r->resource); |
681 | return ret; | 681 | return ret; |
682 | } | 682 | } |
683 | request->handle = r->resource.handle; | 683 | request->handle = r->resource.handle; |
684 | 684 | ||
685 | return 0; | 685 | return 0; |
686 | } | 686 | } |
687 | 687 | ||
688 | static int ioctl_deallocate(struct client *client, void *buffer) | 688 | static int ioctl_deallocate(struct client *client, void *buffer) |
689 | { | 689 | { |
690 | struct fw_cdev_deallocate *request = buffer; | 690 | struct fw_cdev_deallocate *request = buffer; |
691 | 691 | ||
692 | return release_client_resource(client, request->handle, | 692 | return release_client_resource(client, request->handle, |
693 | release_address_handler, NULL); | 693 | release_address_handler, NULL); |
694 | } | 694 | } |
695 | 695 | ||
696 | static int ioctl_send_response(struct client *client, void *buffer) | 696 | static int ioctl_send_response(struct client *client, void *buffer) |
697 | { | 697 | { |
698 | struct fw_cdev_send_response *request = buffer; | 698 | struct fw_cdev_send_response *request = buffer; |
699 | struct client_resource *resource; | 699 | struct client_resource *resource; |
700 | struct inbound_transaction_resource *r; | 700 | struct inbound_transaction_resource *r; |
701 | int ret = 0; | ||
701 | 702 | ||
702 | if (release_client_resource(client, request->handle, | 703 | if (release_client_resource(client, request->handle, |
703 | release_request, &resource) < 0) | 704 | release_request, &resource) < 0) |
704 | return -EINVAL; | 705 | return -EINVAL; |
705 | 706 | ||
706 | r = container_of(resource, struct inbound_transaction_resource, | 707 | r = container_of(resource, struct inbound_transaction_resource, |
707 | resource); | 708 | resource); |
708 | if (request->length < r->length) | 709 | if (request->length < r->length) |
709 | r->length = request->length; | 710 | r->length = request->length; |
710 | if (copy_from_user(r->data, u64_to_uptr(request->data), r->length)) | ||
711 | return -EFAULT; | ||
712 | 711 | ||
712 | if (copy_from_user(r->data, u64_to_uptr(request->data), r->length)) { | ||
713 | ret = -EFAULT; | ||
714 | goto out; | ||
715 | } | ||
716 | |||
713 | fw_send_response(client->device->card, r->request, request->rcode); | 717 | fw_send_response(client->device->card, r->request, request->rcode); |
718 | out: | ||
714 | kfree(r); | 719 | kfree(r); |
715 | 720 | ||
716 | return 0; | 721 | return ret; |
717 | } | 722 | } |
718 | 723 | ||
719 | static int ioctl_initiate_bus_reset(struct client *client, void *buffer) | 724 | static int ioctl_initiate_bus_reset(struct client *client, void *buffer) |
720 | { | 725 | { |
721 | struct fw_cdev_initiate_bus_reset *request = buffer; | 726 | struct fw_cdev_initiate_bus_reset *request = buffer; |
722 | int short_reset; | 727 | int short_reset; |
723 | 728 | ||
724 | short_reset = (request->type == FW_CDEV_SHORT_RESET); | 729 | short_reset = (request->type == FW_CDEV_SHORT_RESET); |
725 | 730 | ||
726 | return fw_core_initiate_bus_reset(client->device->card, short_reset); | 731 | return fw_core_initiate_bus_reset(client->device->card, short_reset); |
727 | } | 732 | } |
728 | 733 | ||
729 | static void release_descriptor(struct client *client, | 734 | static void release_descriptor(struct client *client, |
730 | struct client_resource *resource) | 735 | struct client_resource *resource) |
731 | { | 736 | { |
732 | struct descriptor_resource *r = | 737 | struct descriptor_resource *r = |
733 | container_of(resource, struct descriptor_resource, resource); | 738 | container_of(resource, struct descriptor_resource, resource); |
734 | 739 | ||
735 | fw_core_remove_descriptor(&r->descriptor); | 740 | fw_core_remove_descriptor(&r->descriptor); |
736 | kfree(r); | 741 | kfree(r); |
737 | } | 742 | } |
738 | 743 | ||
739 | static int ioctl_add_descriptor(struct client *client, void *buffer) | 744 | static int ioctl_add_descriptor(struct client *client, void *buffer) |
740 | { | 745 | { |
741 | struct fw_cdev_add_descriptor *request = buffer; | 746 | struct fw_cdev_add_descriptor *request = buffer; |
742 | struct descriptor_resource *r; | 747 | struct descriptor_resource *r; |
743 | int ret; | 748 | int ret; |
744 | 749 | ||
745 | /* Access policy: Allow this ioctl only on local nodes' device files. */ | 750 | /* Access policy: Allow this ioctl only on local nodes' device files. */ |
746 | if (!client->device->is_local) | 751 | if (!client->device->is_local) |
747 | return -ENOSYS; | 752 | return -ENOSYS; |
748 | 753 | ||
749 | if (request->length > 256) | 754 | if (request->length > 256) |
750 | return -EINVAL; | 755 | return -EINVAL; |
751 | 756 | ||
752 | r = kmalloc(sizeof(*r) + request->length * 4, GFP_KERNEL); | 757 | r = kmalloc(sizeof(*r) + request->length * 4, GFP_KERNEL); |
753 | if (r == NULL) | 758 | if (r == NULL) |
754 | return -ENOMEM; | 759 | return -ENOMEM; |
755 | 760 | ||
756 | if (copy_from_user(r->data, | 761 | if (copy_from_user(r->data, |
757 | u64_to_uptr(request->data), request->length * 4)) { | 762 | u64_to_uptr(request->data), request->length * 4)) { |
758 | ret = -EFAULT; | 763 | ret = -EFAULT; |
759 | goto failed; | 764 | goto failed; |
760 | } | 765 | } |
761 | 766 | ||
762 | r->descriptor.length = request->length; | 767 | r->descriptor.length = request->length; |
763 | r->descriptor.immediate = request->immediate; | 768 | r->descriptor.immediate = request->immediate; |
764 | r->descriptor.key = request->key; | 769 | r->descriptor.key = request->key; |
765 | r->descriptor.data = r->data; | 770 | r->descriptor.data = r->data; |
766 | 771 | ||
767 | ret = fw_core_add_descriptor(&r->descriptor); | 772 | ret = fw_core_add_descriptor(&r->descriptor); |
768 | if (ret < 0) | 773 | if (ret < 0) |
769 | goto failed; | 774 | goto failed; |
770 | 775 | ||
771 | r->resource.release = release_descriptor; | 776 | r->resource.release = release_descriptor; |
772 | ret = add_client_resource(client, &r->resource, GFP_KERNEL); | 777 | ret = add_client_resource(client, &r->resource, GFP_KERNEL); |
773 | if (ret < 0) { | 778 | if (ret < 0) { |
774 | fw_core_remove_descriptor(&r->descriptor); | 779 | fw_core_remove_descriptor(&r->descriptor); |
775 | goto failed; | 780 | goto failed; |
776 | } | 781 | } |
777 | request->handle = r->resource.handle; | 782 | request->handle = r->resource.handle; |
778 | 783 | ||
779 | return 0; | 784 | return 0; |
780 | failed: | 785 | failed: |
781 | kfree(r); | 786 | kfree(r); |
782 | 787 | ||
783 | return ret; | 788 | return ret; |
784 | } | 789 | } |
785 | 790 | ||
786 | static int ioctl_remove_descriptor(struct client *client, void *buffer) | 791 | static int ioctl_remove_descriptor(struct client *client, void *buffer) |
787 | { | 792 | { |
788 | struct fw_cdev_remove_descriptor *request = buffer; | 793 | struct fw_cdev_remove_descriptor *request = buffer; |
789 | 794 | ||
790 | return release_client_resource(client, request->handle, | 795 | return release_client_resource(client, request->handle, |
791 | release_descriptor, NULL); | 796 | release_descriptor, NULL); |
792 | } | 797 | } |
793 | 798 | ||
794 | static void iso_callback(struct fw_iso_context *context, u32 cycle, | 799 | static void iso_callback(struct fw_iso_context *context, u32 cycle, |
795 | size_t header_length, void *header, void *data) | 800 | size_t header_length, void *header, void *data) |
796 | { | 801 | { |
797 | struct client *client = data; | 802 | struct client *client = data; |
798 | struct iso_interrupt_event *e; | 803 | struct iso_interrupt_event *e; |
799 | 804 | ||
800 | e = kzalloc(sizeof(*e) + header_length, GFP_ATOMIC); | 805 | e = kzalloc(sizeof(*e) + header_length, GFP_ATOMIC); |
801 | if (e == NULL) | 806 | if (e == NULL) |
802 | return; | 807 | return; |
803 | 808 | ||
804 | e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT; | 809 | e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT; |
805 | e->interrupt.closure = client->iso_closure; | 810 | e->interrupt.closure = client->iso_closure; |
806 | e->interrupt.cycle = cycle; | 811 | e->interrupt.cycle = cycle; |
807 | e->interrupt.header_length = header_length; | 812 | e->interrupt.header_length = header_length; |
808 | memcpy(e->interrupt.header, header, header_length); | 813 | memcpy(e->interrupt.header, header, header_length); |
809 | queue_event(client, &e->event, &e->interrupt, | 814 | queue_event(client, &e->event, &e->interrupt, |
810 | sizeof(e->interrupt) + header_length, NULL, 0); | 815 | sizeof(e->interrupt) + header_length, NULL, 0); |
811 | } | 816 | } |
812 | 817 | ||
813 | static int ioctl_create_iso_context(struct client *client, void *buffer) | 818 | static int ioctl_create_iso_context(struct client *client, void *buffer) |
814 | { | 819 | { |
815 | struct fw_cdev_create_iso_context *request = buffer; | 820 | struct fw_cdev_create_iso_context *request = buffer; |
816 | struct fw_iso_context *context; | 821 | struct fw_iso_context *context; |
817 | 822 | ||
818 | /* We only support one context at this time. */ | 823 | /* We only support one context at this time. */ |
819 | if (client->iso_context != NULL) | 824 | if (client->iso_context != NULL) |
820 | return -EBUSY; | 825 | return -EBUSY; |
821 | 826 | ||
822 | if (request->channel > 63) | 827 | if (request->channel > 63) |
823 | return -EINVAL; | 828 | return -EINVAL; |
824 | 829 | ||
825 | switch (request->type) { | 830 | switch (request->type) { |
826 | case FW_ISO_CONTEXT_RECEIVE: | 831 | case FW_ISO_CONTEXT_RECEIVE: |
827 | if (request->header_size < 4 || (request->header_size & 3)) | 832 | if (request->header_size < 4 || (request->header_size & 3)) |
828 | return -EINVAL; | 833 | return -EINVAL; |
829 | 834 | ||
830 | break; | 835 | break; |
831 | 836 | ||
832 | case FW_ISO_CONTEXT_TRANSMIT: | 837 | case FW_ISO_CONTEXT_TRANSMIT: |
833 | if (request->speed > SCODE_3200) | 838 | if (request->speed > SCODE_3200) |
834 | return -EINVAL; | 839 | return -EINVAL; |
835 | 840 | ||
836 | break; | 841 | break; |
837 | 842 | ||
838 | default: | 843 | default: |
839 | return -EINVAL; | 844 | return -EINVAL; |
840 | } | 845 | } |
841 | 846 | ||
842 | context = fw_iso_context_create(client->device->card, | 847 | context = fw_iso_context_create(client->device->card, |
843 | request->type, | 848 | request->type, |
844 | request->channel, | 849 | request->channel, |
845 | request->speed, | 850 | request->speed, |
846 | request->header_size, | 851 | request->header_size, |
847 | iso_callback, client); | 852 | iso_callback, client); |
848 | if (IS_ERR(context)) | 853 | if (IS_ERR(context)) |
849 | return PTR_ERR(context); | 854 | return PTR_ERR(context); |
850 | 855 | ||
851 | client->iso_closure = request->closure; | 856 | client->iso_closure = request->closure; |
852 | client->iso_context = context; | 857 | client->iso_context = context; |
853 | 858 | ||
854 | /* We only support one context at this time. */ | 859 | /* We only support one context at this time. */ |
855 | request->handle = 0; | 860 | request->handle = 0; |
856 | 861 | ||
857 | return 0; | 862 | return 0; |
858 | } | 863 | } |
859 | 864 | ||
860 | /* Macros for decoding the iso packet control header. */ | 865 | /* Macros for decoding the iso packet control header. */ |
861 | #define GET_PAYLOAD_LENGTH(v) ((v) & 0xffff) | 866 | #define GET_PAYLOAD_LENGTH(v) ((v) & 0xffff) |
862 | #define GET_INTERRUPT(v) (((v) >> 16) & 0x01) | 867 | #define GET_INTERRUPT(v) (((v) >> 16) & 0x01) |
863 | #define GET_SKIP(v) (((v) >> 17) & 0x01) | 868 | #define GET_SKIP(v) (((v) >> 17) & 0x01) |
864 | #define GET_TAG(v) (((v) >> 18) & 0x03) | 869 | #define GET_TAG(v) (((v) >> 18) & 0x03) |
865 | #define GET_SY(v) (((v) >> 20) & 0x0f) | 870 | #define GET_SY(v) (((v) >> 20) & 0x0f) |
866 | #define GET_HEADER_LENGTH(v) (((v) >> 24) & 0xff) | 871 | #define GET_HEADER_LENGTH(v) (((v) >> 24) & 0xff) |
867 | 872 | ||
868 | static int ioctl_queue_iso(struct client *client, void *buffer) | 873 | static int ioctl_queue_iso(struct client *client, void *buffer) |
869 | { | 874 | { |
870 | struct fw_cdev_queue_iso *request = buffer; | 875 | struct fw_cdev_queue_iso *request = buffer; |
871 | struct fw_cdev_iso_packet __user *p, *end, *next; | 876 | struct fw_cdev_iso_packet __user *p, *end, *next; |
872 | struct fw_iso_context *ctx = client->iso_context; | 877 | struct fw_iso_context *ctx = client->iso_context; |
873 | unsigned long payload, buffer_end, header_length; | 878 | unsigned long payload, buffer_end, header_length; |
874 | u32 control; | 879 | u32 control; |
875 | int count; | 880 | int count; |
876 | struct { | 881 | struct { |
877 | struct fw_iso_packet packet; | 882 | struct fw_iso_packet packet; |
878 | u8 header[256]; | 883 | u8 header[256]; |
879 | } u; | 884 | } u; |
880 | 885 | ||
881 | if (ctx == NULL || request->handle != 0) | 886 | if (ctx == NULL || request->handle != 0) |
882 | return -EINVAL; | 887 | return -EINVAL; |
883 | 888 | ||
884 | /* | 889 | /* |
885 | * If the user passes a non-NULL data pointer, has mmap()'ed | 890 | * If the user passes a non-NULL data pointer, has mmap()'ed |
886 | * the iso buffer, and the pointer points inside the buffer, | 891 | * the iso buffer, and the pointer points inside the buffer, |
887 | * we setup the payload pointers accordingly. Otherwise we | 892 | * we setup the payload pointers accordingly. Otherwise we |
888 | * set them both to 0, which will still let packets with | 893 | * set them both to 0, which will still let packets with |
889 | * payload_length == 0 through. In other words, if no packets | 894 | * payload_length == 0 through. In other words, if no packets |
890 | * use the indirect payload, the iso buffer need not be mapped | 895 | * use the indirect payload, the iso buffer need not be mapped |
891 | * and the request->data pointer is ignored. | 896 | * and the request->data pointer is ignored. |
892 | */ | 897 | */ |
893 | 898 | ||
894 | payload = (unsigned long)request->data - client->vm_start; | 899 | payload = (unsigned long)request->data - client->vm_start; |
895 | buffer_end = client->buffer.page_count << PAGE_SHIFT; | 900 | buffer_end = client->buffer.page_count << PAGE_SHIFT; |
896 | if (request->data == 0 || client->buffer.pages == NULL || | 901 | if (request->data == 0 || client->buffer.pages == NULL || |
897 | payload >= buffer_end) { | 902 | payload >= buffer_end) { |
898 | payload = 0; | 903 | payload = 0; |
899 | buffer_end = 0; | 904 | buffer_end = 0; |
900 | } | 905 | } |
901 | 906 | ||
902 | p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(request->packets); | 907 | p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(request->packets); |
903 | 908 | ||
904 | if (!access_ok(VERIFY_READ, p, request->size)) | 909 | if (!access_ok(VERIFY_READ, p, request->size)) |
905 | return -EFAULT; | 910 | return -EFAULT; |
906 | 911 | ||
907 | end = (void __user *)p + request->size; | 912 | end = (void __user *)p + request->size; |
908 | count = 0; | 913 | count = 0; |
909 | while (p < end) { | 914 | while (p < end) { |
910 | if (get_user(control, &p->control)) | 915 | if (get_user(control, &p->control)) |
911 | return -EFAULT; | 916 | return -EFAULT; |
912 | u.packet.payload_length = GET_PAYLOAD_LENGTH(control); | 917 | u.packet.payload_length = GET_PAYLOAD_LENGTH(control); |
913 | u.packet.interrupt = GET_INTERRUPT(control); | 918 | u.packet.interrupt = GET_INTERRUPT(control); |
914 | u.packet.skip = GET_SKIP(control); | 919 | u.packet.skip = GET_SKIP(control); |
915 | u.packet.tag = GET_TAG(control); | 920 | u.packet.tag = GET_TAG(control); |
916 | u.packet.sy = GET_SY(control); | 921 | u.packet.sy = GET_SY(control); |
917 | u.packet.header_length = GET_HEADER_LENGTH(control); | 922 | u.packet.header_length = GET_HEADER_LENGTH(control); |
918 | 923 | ||
919 | if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) { | 924 | if (ctx->type == FW_ISO_CONTEXT_TRANSMIT) { |
920 | header_length = u.packet.header_length; | 925 | header_length = u.packet.header_length; |
921 | } else { | 926 | } else { |
922 | /* | 927 | /* |
923 | * We require that header_length is a multiple of | 928 | * We require that header_length is a multiple of |
924 | * the fixed header size, ctx->header_size. | 929 | * the fixed header size, ctx->header_size. |
925 | */ | 930 | */ |
926 | if (ctx->header_size == 0) { | 931 | if (ctx->header_size == 0) { |
927 | if (u.packet.header_length > 0) | 932 | if (u.packet.header_length > 0) |
928 | return -EINVAL; | 933 | return -EINVAL; |
929 | } else if (u.packet.header_length % ctx->header_size != 0) { | 934 | } else if (u.packet.header_length % ctx->header_size != 0) { |
930 | return -EINVAL; | 935 | return -EINVAL; |
931 | } | 936 | } |
932 | header_length = 0; | 937 | header_length = 0; |
933 | } | 938 | } |
934 | 939 | ||
935 | next = (struct fw_cdev_iso_packet __user *) | 940 | next = (struct fw_cdev_iso_packet __user *) |
936 | &p->header[header_length / 4]; | 941 | &p->header[header_length / 4]; |
937 | if (next > end) | 942 | if (next > end) |
938 | return -EINVAL; | 943 | return -EINVAL; |
939 | if (__copy_from_user | 944 | if (__copy_from_user |
940 | (u.packet.header, p->header, header_length)) | 945 | (u.packet.header, p->header, header_length)) |
941 | return -EFAULT; | 946 | return -EFAULT; |
942 | if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT && | 947 | if (u.packet.skip && ctx->type == FW_ISO_CONTEXT_TRANSMIT && |
943 | u.packet.header_length + u.packet.payload_length > 0) | 948 | u.packet.header_length + u.packet.payload_length > 0) |
944 | return -EINVAL; | 949 | return -EINVAL; |
945 | if (payload + u.packet.payload_length > buffer_end) | 950 | if (payload + u.packet.payload_length > buffer_end) |
946 | return -EINVAL; | 951 | return -EINVAL; |
947 | 952 | ||
948 | if (fw_iso_context_queue(ctx, &u.packet, | 953 | if (fw_iso_context_queue(ctx, &u.packet, |
949 | &client->buffer, payload)) | 954 | &client->buffer, payload)) |
950 | break; | 955 | break; |
951 | 956 | ||
952 | p = next; | 957 | p = next; |
953 | payload += u.packet.payload_length; | 958 | payload += u.packet.payload_length; |
954 | count++; | 959 | count++; |
955 | } | 960 | } |
956 | 961 | ||
957 | request->size -= uptr_to_u64(p) - request->packets; | 962 | request->size -= uptr_to_u64(p) - request->packets; |
958 | request->packets = uptr_to_u64(p); | 963 | request->packets = uptr_to_u64(p); |
959 | request->data = client->vm_start + payload; | 964 | request->data = client->vm_start + payload; |
960 | 965 | ||
961 | return count; | 966 | return count; |
962 | } | 967 | } |
963 | 968 | ||
964 | static int ioctl_start_iso(struct client *client, void *buffer) | 969 | static int ioctl_start_iso(struct client *client, void *buffer) |
965 | { | 970 | { |
966 | struct fw_cdev_start_iso *request = buffer; | 971 | struct fw_cdev_start_iso *request = buffer; |
967 | 972 | ||
968 | if (client->iso_context == NULL || request->handle != 0) | 973 | if (client->iso_context == NULL || request->handle != 0) |
969 | return -EINVAL; | 974 | return -EINVAL; |
970 | 975 | ||
971 | if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) { | 976 | if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) { |
972 | if (request->tags == 0 || request->tags > 15) | 977 | if (request->tags == 0 || request->tags > 15) |
973 | return -EINVAL; | 978 | return -EINVAL; |
974 | 979 | ||
975 | if (request->sync > 15) | 980 | if (request->sync > 15) |
976 | return -EINVAL; | 981 | return -EINVAL; |
977 | } | 982 | } |
978 | 983 | ||
979 | return fw_iso_context_start(client->iso_context, request->cycle, | 984 | return fw_iso_context_start(client->iso_context, request->cycle, |
980 | request->sync, request->tags); | 985 | request->sync, request->tags); |
981 | } | 986 | } |
982 | 987 | ||
983 | static int ioctl_stop_iso(struct client *client, void *buffer) | 988 | static int ioctl_stop_iso(struct client *client, void *buffer) |
984 | { | 989 | { |
985 | struct fw_cdev_stop_iso *request = buffer; | 990 | struct fw_cdev_stop_iso *request = buffer; |
986 | 991 | ||
987 | if (client->iso_context == NULL || request->handle != 0) | 992 | if (client->iso_context == NULL || request->handle != 0) |
988 | return -EINVAL; | 993 | return -EINVAL; |
989 | 994 | ||
990 | return fw_iso_context_stop(client->iso_context); | 995 | return fw_iso_context_stop(client->iso_context); |
991 | } | 996 | } |
992 | 997 | ||
993 | static int ioctl_get_cycle_timer(struct client *client, void *buffer) | 998 | static int ioctl_get_cycle_timer(struct client *client, void *buffer) |
994 | { | 999 | { |
995 | struct fw_cdev_get_cycle_timer *request = buffer; | 1000 | struct fw_cdev_get_cycle_timer *request = buffer; |
996 | struct fw_card *card = client->device->card; | 1001 | struct fw_card *card = client->device->card; |
997 | unsigned long long bus_time; | 1002 | unsigned long long bus_time; |
998 | struct timeval tv; | 1003 | struct timeval tv; |
999 | unsigned long flags; | 1004 | unsigned long flags; |
1000 | 1005 | ||
1001 | preempt_disable(); | 1006 | preempt_disable(); |
1002 | local_irq_save(flags); | 1007 | local_irq_save(flags); |
1003 | 1008 | ||
1004 | bus_time = card->driver->get_bus_time(card); | 1009 | bus_time = card->driver->get_bus_time(card); |
1005 | do_gettimeofday(&tv); | 1010 | do_gettimeofday(&tv); |
1006 | 1011 | ||
1007 | local_irq_restore(flags); | 1012 | local_irq_restore(flags); |
1008 | preempt_enable(); | 1013 | preempt_enable(); |
1009 | 1014 | ||
1010 | request->local_time = tv.tv_sec * 1000000ULL + tv.tv_usec; | 1015 | request->local_time = tv.tv_sec * 1000000ULL + tv.tv_usec; |
1011 | request->cycle_timer = bus_time & 0xffffffff; | 1016 | request->cycle_timer = bus_time & 0xffffffff; |
1012 | return 0; | 1017 | return 0; |
1013 | } | 1018 | } |
1014 | 1019 | ||
1015 | static void iso_resource_work(struct work_struct *work) | 1020 | static void iso_resource_work(struct work_struct *work) |
1016 | { | 1021 | { |
1017 | struct iso_resource_event *e; | 1022 | struct iso_resource_event *e; |
1018 | struct iso_resource *r = | 1023 | struct iso_resource *r = |
1019 | container_of(work, struct iso_resource, work.work); | 1024 | container_of(work, struct iso_resource, work.work); |
1020 | struct client *client = r->client; | 1025 | struct client *client = r->client; |
1021 | int generation, channel, bandwidth, todo; | 1026 | int generation, channel, bandwidth, todo; |
1022 | bool skip, free, success; | 1027 | bool skip, free, success; |
1023 | 1028 | ||
1024 | spin_lock_irq(&client->lock); | 1029 | spin_lock_irq(&client->lock); |
1025 | generation = client->device->generation; | 1030 | generation = client->device->generation; |
1026 | todo = r->todo; | 1031 | todo = r->todo; |
1027 | /* Allow 1000ms grace period for other reallocations. */ | 1032 | /* Allow 1000ms grace period for other reallocations. */ |
1028 | if (todo == ISO_RES_ALLOC && | 1033 | if (todo == ISO_RES_ALLOC && |
1029 | time_is_after_jiffies(client->device->card->reset_jiffies + HZ)) { | 1034 | time_is_after_jiffies(client->device->card->reset_jiffies + HZ)) { |
1030 | if (schedule_delayed_work(&r->work, DIV_ROUND_UP(HZ, 3))) | 1035 | if (schedule_delayed_work(&r->work, DIV_ROUND_UP(HZ, 3))) |
1031 | client_get(client); | 1036 | client_get(client); |
1032 | skip = true; | 1037 | skip = true; |
1033 | } else { | 1038 | } else { |
1034 | /* We could be called twice within the same generation. */ | 1039 | /* We could be called twice within the same generation. */ |
1035 | skip = todo == ISO_RES_REALLOC && | 1040 | skip = todo == ISO_RES_REALLOC && |
1036 | r->generation == generation; | 1041 | r->generation == generation; |
1037 | } | 1042 | } |
1038 | free = todo == ISO_RES_DEALLOC || | 1043 | free = todo == ISO_RES_DEALLOC || |
1039 | todo == ISO_RES_ALLOC_ONCE || | 1044 | todo == ISO_RES_ALLOC_ONCE || |
1040 | todo == ISO_RES_DEALLOC_ONCE; | 1045 | todo == ISO_RES_DEALLOC_ONCE; |
1041 | r->generation = generation; | 1046 | r->generation = generation; |
1042 | spin_unlock_irq(&client->lock); | 1047 | spin_unlock_irq(&client->lock); |
1043 | 1048 | ||
1044 | if (skip) | 1049 | if (skip) |
1045 | goto out; | 1050 | goto out; |
1046 | 1051 | ||
1047 | bandwidth = r->bandwidth; | 1052 | bandwidth = r->bandwidth; |
1048 | 1053 | ||
1049 | fw_iso_resource_manage(client->device->card, generation, | 1054 | fw_iso_resource_manage(client->device->card, generation, |
1050 | r->channels, &channel, &bandwidth, | 1055 | r->channels, &channel, &bandwidth, |
1051 | todo == ISO_RES_ALLOC || | 1056 | todo == ISO_RES_ALLOC || |
1052 | todo == ISO_RES_REALLOC || | 1057 | todo == ISO_RES_REALLOC || |
1053 | todo == ISO_RES_ALLOC_ONCE, | 1058 | todo == ISO_RES_ALLOC_ONCE, |
1054 | r->transaction_data); | 1059 | r->transaction_data); |
1055 | /* | 1060 | /* |
1056 | * Is this generation outdated already? As long as this resource sticks | 1061 | * Is this generation outdated already? As long as this resource sticks |
1057 | * in the idr, it will be scheduled again for a newer generation or at | 1062 | * in the idr, it will be scheduled again for a newer generation or at |
1058 | * shutdown. | 1063 | * shutdown. |
1059 | */ | 1064 | */ |
1060 | if (channel == -EAGAIN && | 1065 | if (channel == -EAGAIN && |
1061 | (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC)) | 1066 | (todo == ISO_RES_ALLOC || todo == ISO_RES_REALLOC)) |
1062 | goto out; | 1067 | goto out; |
1063 | 1068 | ||
1064 | success = channel >= 0 || bandwidth > 0; | 1069 | success = channel >= 0 || bandwidth > 0; |
1065 | 1070 | ||
1066 | spin_lock_irq(&client->lock); | 1071 | spin_lock_irq(&client->lock); |
1067 | /* | 1072 | /* |
1068 | * Transit from allocation to reallocation, except if the client | 1073 | * Transit from allocation to reallocation, except if the client |
1069 | * requested deallocation in the meantime. | 1074 | * requested deallocation in the meantime. |
1070 | */ | 1075 | */ |
1071 | if (r->todo == ISO_RES_ALLOC) | 1076 | if (r->todo == ISO_RES_ALLOC) |
1072 | r->todo = ISO_RES_REALLOC; | 1077 | r->todo = ISO_RES_REALLOC; |
1073 | /* | 1078 | /* |
1074 | * Allocation or reallocation failure? Pull this resource out of the | 1079 | * Allocation or reallocation failure? Pull this resource out of the |
1075 | * idr and prepare for deletion, unless the client is shutting down. | 1080 | * idr and prepare for deletion, unless the client is shutting down. |
1076 | */ | 1081 | */ |
1077 | if (r->todo == ISO_RES_REALLOC && !success && | 1082 | if (r->todo == ISO_RES_REALLOC && !success && |
1078 | !client->in_shutdown && | 1083 | !client->in_shutdown && |
1079 | idr_find(&client->resource_idr, r->resource.handle)) { | 1084 | idr_find(&client->resource_idr, r->resource.handle)) { |
1080 | idr_remove(&client->resource_idr, r->resource.handle); | 1085 | idr_remove(&client->resource_idr, r->resource.handle); |
1081 | client_put(client); | 1086 | client_put(client); |
1082 | free = true; | 1087 | free = true; |
1083 | } | 1088 | } |
1084 | spin_unlock_irq(&client->lock); | 1089 | spin_unlock_irq(&client->lock); |
1085 | 1090 | ||
1086 | if (todo == ISO_RES_ALLOC && channel >= 0) | 1091 | if (todo == ISO_RES_ALLOC && channel >= 0) |
1087 | r->channels = 1ULL << channel; | 1092 | r->channels = 1ULL << channel; |
1088 | 1093 | ||
1089 | if (todo == ISO_RES_REALLOC && success) | 1094 | if (todo == ISO_RES_REALLOC && success) |
1090 | goto out; | 1095 | goto out; |
1091 | 1096 | ||
1092 | if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) { | 1097 | if (todo == ISO_RES_ALLOC || todo == ISO_RES_ALLOC_ONCE) { |
1093 | e = r->e_alloc; | 1098 | e = r->e_alloc; |
1094 | r->e_alloc = NULL; | 1099 | r->e_alloc = NULL; |
1095 | } else { | 1100 | } else { |
1096 | e = r->e_dealloc; | 1101 | e = r->e_dealloc; |
1097 | r->e_dealloc = NULL; | 1102 | r->e_dealloc = NULL; |
1098 | } | 1103 | } |
1099 | e->resource.handle = r->resource.handle; | 1104 | e->resource.handle = r->resource.handle; |
1100 | e->resource.channel = channel; | 1105 | e->resource.channel = channel; |
1101 | e->resource.bandwidth = bandwidth; | 1106 | e->resource.bandwidth = bandwidth; |
1102 | 1107 | ||
1103 | queue_event(client, &e->event, | 1108 | queue_event(client, &e->event, |
1104 | &e->resource, sizeof(e->resource), NULL, 0); | 1109 | &e->resource, sizeof(e->resource), NULL, 0); |
1105 | 1110 | ||
1106 | if (free) { | 1111 | if (free) { |
1107 | cancel_delayed_work(&r->work); | 1112 | cancel_delayed_work(&r->work); |
1108 | kfree(r->e_alloc); | 1113 | kfree(r->e_alloc); |
1109 | kfree(r->e_dealloc); | 1114 | kfree(r->e_dealloc); |
1110 | kfree(r); | 1115 | kfree(r); |
1111 | } | 1116 | } |
1112 | out: | 1117 | out: |
1113 | client_put(client); | 1118 | client_put(client); |
1114 | } | 1119 | } |
1115 | 1120 | ||
1116 | static void schedule_iso_resource(struct iso_resource *r) | 1121 | static void schedule_iso_resource(struct iso_resource *r) |
1117 | { | 1122 | { |
1118 | client_get(r->client); | 1123 | client_get(r->client); |
1119 | if (!schedule_delayed_work(&r->work, 0)) | 1124 | if (!schedule_delayed_work(&r->work, 0)) |
1120 | client_put(r->client); | 1125 | client_put(r->client); |
1121 | } | 1126 | } |
1122 | 1127 | ||
1123 | static void release_iso_resource(struct client *client, | 1128 | static void release_iso_resource(struct client *client, |
1124 | struct client_resource *resource) | 1129 | struct client_resource *resource) |
1125 | { | 1130 | { |
1126 | struct iso_resource *r = | 1131 | struct iso_resource *r = |
1127 | container_of(resource, struct iso_resource, resource); | 1132 | container_of(resource, struct iso_resource, resource); |
1128 | 1133 | ||
1129 | spin_lock_irq(&client->lock); | 1134 | spin_lock_irq(&client->lock); |
1130 | r->todo = ISO_RES_DEALLOC; | 1135 | r->todo = ISO_RES_DEALLOC; |
1131 | schedule_iso_resource(r); | 1136 | schedule_iso_resource(r); |
1132 | spin_unlock_irq(&client->lock); | 1137 | spin_unlock_irq(&client->lock); |
1133 | } | 1138 | } |
1134 | 1139 | ||
1135 | static int init_iso_resource(struct client *client, | 1140 | static int init_iso_resource(struct client *client, |
1136 | struct fw_cdev_allocate_iso_resource *request, int todo) | 1141 | struct fw_cdev_allocate_iso_resource *request, int todo) |
1137 | { | 1142 | { |
1138 | struct iso_resource_event *e1, *e2; | 1143 | struct iso_resource_event *e1, *e2; |
1139 | struct iso_resource *r; | 1144 | struct iso_resource *r; |
1140 | int ret; | 1145 | int ret; |
1141 | 1146 | ||
1142 | if ((request->channels == 0 && request->bandwidth == 0) || | 1147 | if ((request->channels == 0 && request->bandwidth == 0) || |
1143 | request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL || | 1148 | request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL || |
1144 | request->bandwidth < 0) | 1149 | request->bandwidth < 0) |
1145 | return -EINVAL; | 1150 | return -EINVAL; |
1146 | 1151 | ||
1147 | r = kmalloc(sizeof(*r), GFP_KERNEL); | 1152 | r = kmalloc(sizeof(*r), GFP_KERNEL); |
1148 | e1 = kmalloc(sizeof(*e1), GFP_KERNEL); | 1153 | e1 = kmalloc(sizeof(*e1), GFP_KERNEL); |
1149 | e2 = kmalloc(sizeof(*e2), GFP_KERNEL); | 1154 | e2 = kmalloc(sizeof(*e2), GFP_KERNEL); |
1150 | if (r == NULL || e1 == NULL || e2 == NULL) { | 1155 | if (r == NULL || e1 == NULL || e2 == NULL) { |
1151 | ret = -ENOMEM; | 1156 | ret = -ENOMEM; |
1152 | goto fail; | 1157 | goto fail; |
1153 | } | 1158 | } |
1154 | 1159 | ||
1155 | INIT_DELAYED_WORK(&r->work, iso_resource_work); | 1160 | INIT_DELAYED_WORK(&r->work, iso_resource_work); |
1156 | r->client = client; | 1161 | r->client = client; |
1157 | r->todo = todo; | 1162 | r->todo = todo; |
1158 | r->generation = -1; | 1163 | r->generation = -1; |
1159 | r->channels = request->channels; | 1164 | r->channels = request->channels; |
1160 | r->bandwidth = request->bandwidth; | 1165 | r->bandwidth = request->bandwidth; |
1161 | r->e_alloc = e1; | 1166 | r->e_alloc = e1; |
1162 | r->e_dealloc = e2; | 1167 | r->e_dealloc = e2; |
1163 | 1168 | ||
1164 | e1->resource.closure = request->closure; | 1169 | e1->resource.closure = request->closure; |
1165 | e1->resource.type = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED; | 1170 | e1->resource.type = FW_CDEV_EVENT_ISO_RESOURCE_ALLOCATED; |
1166 | e2->resource.closure = request->closure; | 1171 | e2->resource.closure = request->closure; |
1167 | e2->resource.type = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED; | 1172 | e2->resource.type = FW_CDEV_EVENT_ISO_RESOURCE_DEALLOCATED; |
1168 | 1173 | ||
1169 | if (todo == ISO_RES_ALLOC) { | 1174 | if (todo == ISO_RES_ALLOC) { |
1170 | r->resource.release = release_iso_resource; | 1175 | r->resource.release = release_iso_resource; |
1171 | ret = add_client_resource(client, &r->resource, GFP_KERNEL); | 1176 | ret = add_client_resource(client, &r->resource, GFP_KERNEL); |
1172 | if (ret < 0) | 1177 | if (ret < 0) |
1173 | goto fail; | 1178 | goto fail; |
1174 | } else { | 1179 | } else { |
1175 | r->resource.release = NULL; | 1180 | r->resource.release = NULL; |
1176 | r->resource.handle = -1; | 1181 | r->resource.handle = -1; |
1177 | schedule_iso_resource(r); | 1182 | schedule_iso_resource(r); |
1178 | } | 1183 | } |
1179 | request->handle = r->resource.handle; | 1184 | request->handle = r->resource.handle; |
1180 | 1185 | ||
1181 | return 0; | 1186 | return 0; |
1182 | fail: | 1187 | fail: |
1183 | kfree(r); | 1188 | kfree(r); |
1184 | kfree(e1); | 1189 | kfree(e1); |
1185 | kfree(e2); | 1190 | kfree(e2); |
1186 | 1191 | ||
1187 | return ret; | 1192 | return ret; |
1188 | } | 1193 | } |
1189 | 1194 | ||
1190 | static int ioctl_allocate_iso_resource(struct client *client, void *buffer) | 1195 | static int ioctl_allocate_iso_resource(struct client *client, void *buffer) |
1191 | { | 1196 | { |
1192 | struct fw_cdev_allocate_iso_resource *request = buffer; | 1197 | struct fw_cdev_allocate_iso_resource *request = buffer; |
1193 | 1198 | ||
1194 | return init_iso_resource(client, request, ISO_RES_ALLOC); | 1199 | return init_iso_resource(client, request, ISO_RES_ALLOC); |
1195 | } | 1200 | } |
1196 | 1201 | ||
1197 | static int ioctl_deallocate_iso_resource(struct client *client, void *buffer) | 1202 | static int ioctl_deallocate_iso_resource(struct client *client, void *buffer) |
1198 | { | 1203 | { |
1199 | struct fw_cdev_deallocate *request = buffer; | 1204 | struct fw_cdev_deallocate *request = buffer; |
1200 | 1205 | ||
1201 | return release_client_resource(client, request->handle, | 1206 | return release_client_resource(client, request->handle, |
1202 | release_iso_resource, NULL); | 1207 | release_iso_resource, NULL); |
1203 | } | 1208 | } |
1204 | 1209 | ||
1205 | static int ioctl_allocate_iso_resource_once(struct client *client, void *buffer) | 1210 | static int ioctl_allocate_iso_resource_once(struct client *client, void *buffer) |
1206 | { | 1211 | { |
1207 | struct fw_cdev_allocate_iso_resource *request = buffer; | 1212 | struct fw_cdev_allocate_iso_resource *request = buffer; |
1208 | 1213 | ||
1209 | return init_iso_resource(client, request, ISO_RES_ALLOC_ONCE); | 1214 | return init_iso_resource(client, request, ISO_RES_ALLOC_ONCE); |
1210 | } | 1215 | } |
1211 | 1216 | ||
1212 | static int ioctl_deallocate_iso_resource_once(struct client *client, void *buffer) | 1217 | static int ioctl_deallocate_iso_resource_once(struct client *client, void *buffer) |
1213 | { | 1218 | { |
1214 | struct fw_cdev_allocate_iso_resource *request = buffer; | 1219 | struct fw_cdev_allocate_iso_resource *request = buffer; |
1215 | 1220 | ||
1216 | return init_iso_resource(client, request, ISO_RES_DEALLOC_ONCE); | 1221 | return init_iso_resource(client, request, ISO_RES_DEALLOC_ONCE); |
1217 | } | 1222 | } |
1218 | 1223 | ||
1219 | /* | 1224 | /* |
1220 | * Returns a speed code: Maximum speed to or from this device, | 1225 | * Returns a speed code: Maximum speed to or from this device, |
1221 | * limited by the device's link speed, the local node's link speed, | 1226 | * limited by the device's link speed, the local node's link speed, |
1222 | * and all PHY port speeds between the two links. | 1227 | * and all PHY port speeds between the two links. |
1223 | */ | 1228 | */ |
1224 | static int ioctl_get_speed(struct client *client, void *buffer) | 1229 | static int ioctl_get_speed(struct client *client, void *buffer) |
1225 | { | 1230 | { |
1226 | return client->device->max_speed; | 1231 | return client->device->max_speed; |
1227 | } | 1232 | } |
1228 | 1233 | ||
1229 | static int ioctl_send_broadcast_request(struct client *client, void *buffer) | 1234 | static int ioctl_send_broadcast_request(struct client *client, void *buffer) |
1230 | { | 1235 | { |
1231 | struct fw_cdev_send_request *request = buffer; | 1236 | struct fw_cdev_send_request *request = buffer; |
1232 | 1237 | ||
1233 | switch (request->tcode) { | 1238 | switch (request->tcode) { |
1234 | case TCODE_WRITE_QUADLET_REQUEST: | 1239 | case TCODE_WRITE_QUADLET_REQUEST: |
1235 | case TCODE_WRITE_BLOCK_REQUEST: | 1240 | case TCODE_WRITE_BLOCK_REQUEST: |
1236 | break; | 1241 | break; |
1237 | default: | 1242 | default: |
1238 | return -EINVAL; | 1243 | return -EINVAL; |
1239 | } | 1244 | } |
1240 | 1245 | ||
1241 | /* Security policy: Only allow accesses to Units Space. */ | 1246 | /* Security policy: Only allow accesses to Units Space. */ |
1242 | if (request->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END) | 1247 | if (request->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END) |
1243 | return -EACCES; | 1248 | return -EACCES; |
1244 | 1249 | ||
1245 | return init_request(client, request, LOCAL_BUS | 0x3f, SCODE_100); | 1250 | return init_request(client, request, LOCAL_BUS | 0x3f, SCODE_100); |
1246 | } | 1251 | } |
1247 | 1252 | ||
1248 | static int ioctl_send_stream_packet(struct client *client, void *buffer) | 1253 | static int ioctl_send_stream_packet(struct client *client, void *buffer) |
1249 | { | 1254 | { |
1250 | struct fw_cdev_send_stream_packet *p = buffer; | 1255 | struct fw_cdev_send_stream_packet *p = buffer; |
1251 | struct fw_cdev_send_request request; | 1256 | struct fw_cdev_send_request request; |
1252 | int dest; | 1257 | int dest; |
1253 | 1258 | ||
1254 | if (p->speed > client->device->card->link_speed || | 1259 | if (p->speed > client->device->card->link_speed || |
1255 | p->length > 1024 << p->speed) | 1260 | p->length > 1024 << p->speed) |
1256 | return -EIO; | 1261 | return -EIO; |
1257 | 1262 | ||
1258 | if (p->tag > 3 || p->channel > 63 || p->sy > 15) | 1263 | if (p->tag > 3 || p->channel > 63 || p->sy > 15) |
1259 | return -EINVAL; | 1264 | return -EINVAL; |
1260 | 1265 | ||
1261 | dest = fw_stream_packet_destination_id(p->tag, p->channel, p->sy); | 1266 | dest = fw_stream_packet_destination_id(p->tag, p->channel, p->sy); |
1262 | request.tcode = TCODE_STREAM_DATA; | 1267 | request.tcode = TCODE_STREAM_DATA; |
1263 | request.length = p->length; | 1268 | request.length = p->length; |
1264 | request.closure = p->closure; | 1269 | request.closure = p->closure; |
1265 | request.data = p->data; | 1270 | request.data = p->data; |
1266 | request.generation = p->generation; | 1271 | request.generation = p->generation; |
1267 | 1272 | ||
1268 | return init_request(client, &request, dest, p->speed); | 1273 | return init_request(client, &request, dest, p->speed); |
1269 | } | 1274 | } |
1270 | 1275 | ||
1271 | static int (* const ioctl_handlers[])(struct client *client, void *buffer) = { | 1276 | static int (* const ioctl_handlers[])(struct client *client, void *buffer) = { |
1272 | ioctl_get_info, | 1277 | ioctl_get_info, |
1273 | ioctl_send_request, | 1278 | ioctl_send_request, |
1274 | ioctl_allocate, | 1279 | ioctl_allocate, |
1275 | ioctl_deallocate, | 1280 | ioctl_deallocate, |
1276 | ioctl_send_response, | 1281 | ioctl_send_response, |
1277 | ioctl_initiate_bus_reset, | 1282 | ioctl_initiate_bus_reset, |
1278 | ioctl_add_descriptor, | 1283 | ioctl_add_descriptor, |
1279 | ioctl_remove_descriptor, | 1284 | ioctl_remove_descriptor, |
1280 | ioctl_create_iso_context, | 1285 | ioctl_create_iso_context, |
1281 | ioctl_queue_iso, | 1286 | ioctl_queue_iso, |
1282 | ioctl_start_iso, | 1287 | ioctl_start_iso, |
1283 | ioctl_stop_iso, | 1288 | ioctl_stop_iso, |
1284 | ioctl_get_cycle_timer, | 1289 | ioctl_get_cycle_timer, |
1285 | ioctl_allocate_iso_resource, | 1290 | ioctl_allocate_iso_resource, |
1286 | ioctl_deallocate_iso_resource, | 1291 | ioctl_deallocate_iso_resource, |
1287 | ioctl_allocate_iso_resource_once, | 1292 | ioctl_allocate_iso_resource_once, |
1288 | ioctl_deallocate_iso_resource_once, | 1293 | ioctl_deallocate_iso_resource_once, |
1289 | ioctl_get_speed, | 1294 | ioctl_get_speed, |
1290 | ioctl_send_broadcast_request, | 1295 | ioctl_send_broadcast_request, |
1291 | ioctl_send_stream_packet, | 1296 | ioctl_send_stream_packet, |
1292 | }; | 1297 | }; |
1293 | 1298 | ||
1294 | static int dispatch_ioctl(struct client *client, | 1299 | static int dispatch_ioctl(struct client *client, |
1295 | unsigned int cmd, void __user *arg) | 1300 | unsigned int cmd, void __user *arg) |
1296 | { | 1301 | { |
1297 | char buffer[256]; | 1302 | char buffer[256]; |
1298 | int ret; | 1303 | int ret; |
1299 | 1304 | ||
1300 | if (_IOC_TYPE(cmd) != '#' || | 1305 | if (_IOC_TYPE(cmd) != '#' || |
1301 | _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers)) | 1306 | _IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers)) |
1302 | return -EINVAL; | 1307 | return -EINVAL; |
1303 | 1308 | ||
1304 | if (_IOC_DIR(cmd) & _IOC_WRITE) { | 1309 | if (_IOC_DIR(cmd) & _IOC_WRITE) { |
1305 | if (_IOC_SIZE(cmd) > sizeof(buffer) || | 1310 | if (_IOC_SIZE(cmd) > sizeof(buffer) || |
1306 | copy_from_user(buffer, arg, _IOC_SIZE(cmd))) | 1311 | copy_from_user(buffer, arg, _IOC_SIZE(cmd))) |
1307 | return -EFAULT; | 1312 | return -EFAULT; |
1308 | } | 1313 | } |
1309 | 1314 | ||
1310 | ret = ioctl_handlers[_IOC_NR(cmd)](client, buffer); | 1315 | ret = ioctl_handlers[_IOC_NR(cmd)](client, buffer); |
1311 | if (ret < 0) | 1316 | if (ret < 0) |
1312 | return ret; | 1317 | return ret; |
1313 | 1318 | ||
1314 | if (_IOC_DIR(cmd) & _IOC_READ) { | 1319 | if (_IOC_DIR(cmd) & _IOC_READ) { |
1315 | if (_IOC_SIZE(cmd) > sizeof(buffer) || | 1320 | if (_IOC_SIZE(cmd) > sizeof(buffer) || |
1316 | copy_to_user(arg, buffer, _IOC_SIZE(cmd))) | 1321 | copy_to_user(arg, buffer, _IOC_SIZE(cmd))) |
1317 | return -EFAULT; | 1322 | return -EFAULT; |
1318 | } | 1323 | } |
1319 | 1324 | ||
1320 | return ret; | 1325 | return ret; |
1321 | } | 1326 | } |
1322 | 1327 | ||
1323 | static long fw_device_op_ioctl(struct file *file, | 1328 | static long fw_device_op_ioctl(struct file *file, |
1324 | unsigned int cmd, unsigned long arg) | 1329 | unsigned int cmd, unsigned long arg) |
1325 | { | 1330 | { |
1326 | struct client *client = file->private_data; | 1331 | struct client *client = file->private_data; |
1327 | 1332 | ||
1328 | if (fw_device_is_shutdown(client->device)) | 1333 | if (fw_device_is_shutdown(client->device)) |
1329 | return -ENODEV; | 1334 | return -ENODEV; |
1330 | 1335 | ||
1331 | return dispatch_ioctl(client, cmd, (void __user *) arg); | 1336 | return dispatch_ioctl(client, cmd, (void __user *) arg); |
1332 | } | 1337 | } |
1333 | 1338 | ||
1334 | #ifdef CONFIG_COMPAT | 1339 | #ifdef CONFIG_COMPAT |
1335 | static long fw_device_op_compat_ioctl(struct file *file, | 1340 | static long fw_device_op_compat_ioctl(struct file *file, |
1336 | unsigned int cmd, unsigned long arg) | 1341 | unsigned int cmd, unsigned long arg) |
1337 | { | 1342 | { |
1338 | struct client *client = file->private_data; | 1343 | struct client *client = file->private_data; |
1339 | 1344 | ||
1340 | if (fw_device_is_shutdown(client->device)) | 1345 | if (fw_device_is_shutdown(client->device)) |
1341 | return -ENODEV; | 1346 | return -ENODEV; |
1342 | 1347 | ||
1343 | return dispatch_ioctl(client, cmd, compat_ptr(arg)); | 1348 | return dispatch_ioctl(client, cmd, compat_ptr(arg)); |
1344 | } | 1349 | } |
1345 | #endif | 1350 | #endif |
1346 | 1351 | ||
1347 | static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma) | 1352 | static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma) |
1348 | { | 1353 | { |
1349 | struct client *client = file->private_data; | 1354 | struct client *client = file->private_data; |
1350 | enum dma_data_direction direction; | 1355 | enum dma_data_direction direction; |
1351 | unsigned long size; | 1356 | unsigned long size; |
1352 | int page_count, ret; | 1357 | int page_count, ret; |
1353 | 1358 | ||
1354 | if (fw_device_is_shutdown(client->device)) | 1359 | if (fw_device_is_shutdown(client->device)) |
1355 | return -ENODEV; | 1360 | return -ENODEV; |
1356 | 1361 | ||
1357 | /* FIXME: We could support multiple buffers, but we don't. */ | 1362 | /* FIXME: We could support multiple buffers, but we don't. */ |
1358 | if (client->buffer.pages != NULL) | 1363 | if (client->buffer.pages != NULL) |
1359 | return -EBUSY; | 1364 | return -EBUSY; |
1360 | 1365 | ||
1361 | if (!(vma->vm_flags & VM_SHARED)) | 1366 | if (!(vma->vm_flags & VM_SHARED)) |
1362 | return -EINVAL; | 1367 | return -EINVAL; |
1363 | 1368 | ||
1364 | if (vma->vm_start & ~PAGE_MASK) | 1369 | if (vma->vm_start & ~PAGE_MASK) |
1365 | return -EINVAL; | 1370 | return -EINVAL; |
1366 | 1371 | ||
1367 | client->vm_start = vma->vm_start; | 1372 | client->vm_start = vma->vm_start; |
1368 | size = vma->vm_end - vma->vm_start; | 1373 | size = vma->vm_end - vma->vm_start; |
1369 | page_count = size >> PAGE_SHIFT; | 1374 | page_count = size >> PAGE_SHIFT; |
1370 | if (size & ~PAGE_MASK) | 1375 | if (size & ~PAGE_MASK) |
1371 | return -EINVAL; | 1376 | return -EINVAL; |
1372 | 1377 | ||
1373 | if (vma->vm_flags & VM_WRITE) | 1378 | if (vma->vm_flags & VM_WRITE) |
1374 | direction = DMA_TO_DEVICE; | 1379 | direction = DMA_TO_DEVICE; |
1375 | else | 1380 | else |
1376 | direction = DMA_FROM_DEVICE; | 1381 | direction = DMA_FROM_DEVICE; |
1377 | 1382 | ||
1378 | ret = fw_iso_buffer_init(&client->buffer, client->device->card, | 1383 | ret = fw_iso_buffer_init(&client->buffer, client->device->card, |
1379 | page_count, direction); | 1384 | page_count, direction); |
1380 | if (ret < 0) | 1385 | if (ret < 0) |
1381 | return ret; | 1386 | return ret; |
1382 | 1387 | ||
1383 | ret = fw_iso_buffer_map(&client->buffer, vma); | 1388 | ret = fw_iso_buffer_map(&client->buffer, vma); |
1384 | if (ret < 0) | 1389 | if (ret < 0) |
1385 | fw_iso_buffer_destroy(&client->buffer, client->device->card); | 1390 | fw_iso_buffer_destroy(&client->buffer, client->device->card); |
1386 | 1391 | ||
1387 | return ret; | 1392 | return ret; |
1388 | } | 1393 | } |
1389 | 1394 | ||
1390 | static int shutdown_resource(int id, void *p, void *data) | 1395 | static int shutdown_resource(int id, void *p, void *data) |
1391 | { | 1396 | { |
1392 | struct client_resource *r = p; | 1397 | struct client_resource *r = p; |
1393 | struct client *client = data; | 1398 | struct client *client = data; |
1394 | 1399 | ||
1395 | r->release(client, r); | 1400 | r->release(client, r); |
1396 | client_put(client); | 1401 | client_put(client); |
1397 | 1402 | ||
1398 | return 0; | 1403 | return 0; |
1399 | } | 1404 | } |
1400 | 1405 | ||
1401 | static int fw_device_op_release(struct inode *inode, struct file *file) | 1406 | static int fw_device_op_release(struct inode *inode, struct file *file) |
1402 | { | 1407 | { |
1403 | struct client *client = file->private_data; | 1408 | struct client *client = file->private_data; |
1404 | struct event *e, *next_e; | 1409 | struct event *e, *next_e; |
1405 | 1410 | ||
1406 | mutex_lock(&client->device->client_list_mutex); | 1411 | mutex_lock(&client->device->client_list_mutex); |
1407 | list_del(&client->link); | 1412 | list_del(&client->link); |
1408 | mutex_unlock(&client->device->client_list_mutex); | 1413 | mutex_unlock(&client->device->client_list_mutex); |
1409 | 1414 | ||
1410 | if (client->iso_context) | 1415 | if (client->iso_context) |
1411 | fw_iso_context_destroy(client->iso_context); | 1416 | fw_iso_context_destroy(client->iso_context); |
1412 | 1417 | ||
1413 | if (client->buffer.pages) | 1418 | if (client->buffer.pages) |
1414 | fw_iso_buffer_destroy(&client->buffer, client->device->card); | 1419 | fw_iso_buffer_destroy(&client->buffer, client->device->card); |
1415 | 1420 | ||
1416 | /* Freeze client->resource_idr and client->event_list */ | 1421 | /* Freeze client->resource_idr and client->event_list */ |
1417 | spin_lock_irq(&client->lock); | 1422 | spin_lock_irq(&client->lock); |
1418 | client->in_shutdown = true; | 1423 | client->in_shutdown = true; |
1419 | spin_unlock_irq(&client->lock); | 1424 | spin_unlock_irq(&client->lock); |
1420 | 1425 | ||
1421 | idr_for_each(&client->resource_idr, shutdown_resource, client); | 1426 | idr_for_each(&client->resource_idr, shutdown_resource, client); |
1422 | idr_remove_all(&client->resource_idr); | 1427 | idr_remove_all(&client->resource_idr); |
1423 | idr_destroy(&client->resource_idr); | 1428 | idr_destroy(&client->resource_idr); |
1424 | 1429 | ||
1425 | list_for_each_entry_safe(e, next_e, &client->event_list, link) | 1430 | list_for_each_entry_safe(e, next_e, &client->event_list, link) |
1426 | kfree(e); | 1431 | kfree(e); |
1427 | 1432 | ||
1428 | client_put(client); | 1433 | client_put(client); |
1429 | 1434 | ||
1430 | return 0; | 1435 | return 0; |
1431 | } | 1436 | } |
1432 | 1437 | ||
1433 | static unsigned int fw_device_op_poll(struct file *file, poll_table * pt) | 1438 | static unsigned int fw_device_op_poll(struct file *file, poll_table * pt) |
1434 | { | 1439 | { |
1435 | struct client *client = file->private_data; | 1440 | struct client *client = file->private_data; |
1436 | unsigned int mask = 0; | 1441 | unsigned int mask = 0; |
1437 | 1442 | ||
1438 | poll_wait(file, &client->wait, pt); | 1443 | poll_wait(file, &client->wait, pt); |
1439 | 1444 | ||
1440 | if (fw_device_is_shutdown(client->device)) | 1445 | if (fw_device_is_shutdown(client->device)) |
1441 | mask |= POLLHUP | POLLERR; | 1446 | mask |= POLLHUP | POLLERR; |
1442 | if (!list_empty(&client->event_list)) | 1447 | if (!list_empty(&client->event_list)) |
1443 | mask |= POLLIN | POLLRDNORM; | 1448 | mask |= POLLIN | POLLRDNORM; |
1444 | 1449 | ||
1445 | return mask; | 1450 | return mask; |
1446 | } | 1451 | } |
1447 | 1452 | ||
1448 | const struct file_operations fw_device_ops = { | 1453 | const struct file_operations fw_device_ops = { |
1449 | .owner = THIS_MODULE, | 1454 | .owner = THIS_MODULE, |
1450 | .open = fw_device_op_open, | 1455 | .open = fw_device_op_open, |
1451 | .read = fw_device_op_read, | 1456 | .read = fw_device_op_read, |
1452 | .unlocked_ioctl = fw_device_op_ioctl, | 1457 | .unlocked_ioctl = fw_device_op_ioctl, |
1453 | .poll = fw_device_op_poll, | 1458 | .poll = fw_device_op_poll, |
1454 | .release = fw_device_op_release, | 1459 | .release = fw_device_op_release, |
1455 | .mmap = fw_device_op_mmap, | 1460 | .mmap = fw_device_op_mmap, |
1456 | 1461 | ||
1457 | #ifdef CONFIG_COMPAT | 1462 | #ifdef CONFIG_COMPAT |
1458 | .compat_ioctl = fw_device_op_compat_ioctl, | 1463 | .compat_ioctl = fw_device_op_compat_ioctl, |
1459 | #endif | 1464 | #endif |