Commit 93b465c2e186d96fb90012ba0f9372eb9952e732
Committed by
Ohad Ben-Cohen
1 parent
c3c1250e93
Exists in
master
and in
20 other branches
hwspinlock/core: use a mutex to protect the radix tree
Since we're using non-atomic radix tree allocations, we should be protecting the tree using a mutex and not a spinlock. Non-atomic allocations and process context locking is good enough, as the tree is manipulated only when locks are registered/ unregistered/requested/freed. The locks themselves are still protected by spinlocks of course, and mutexes are not involved in the locking/unlocking paths. Cc: <stable@kernel.org> Signed-off-by: Juan Gutierrez <jgutierrez@ti.com> [ohad@wizery.com: rewrite the commit log, #include mutex.h, add minor commentary] [ohad@wizery.com: update register/unregister parts in hwspinlock.txt] Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
Showing 2 changed files with 27 additions and 36 deletions Side-by-side Diff
Documentation/hwspinlock.txt
... | ... | @@ -39,23 +39,20 @@ |
39 | 39 | in case an unused hwspinlock isn't available. Users of this |
40 | 40 | API will usually want to communicate the lock's id to the remote core |
41 | 41 | before it can be used to achieve synchronization. |
42 | - Can be called from an atomic context (this function will not sleep) but | |
43 | - not from within interrupt context. | |
42 | + Should be called from a process context (might sleep). | |
44 | 43 | |
45 | 44 | struct hwspinlock *hwspin_lock_request_specific(unsigned int id); |
46 | 45 | - assign a specific hwspinlock id and return its address, or NULL |
47 | 46 | if that hwspinlock is already in use. Usually board code will |
48 | 47 | be calling this function in order to reserve specific hwspinlock |
49 | 48 | ids for predefined purposes. |
50 | - Can be called from an atomic context (this function will not sleep) but | |
51 | - not from within interrupt context. | |
49 | + Should be called from a process context (might sleep). | |
52 | 50 | |
53 | 51 | int hwspin_lock_free(struct hwspinlock *hwlock); |
54 | 52 | - free a previously-assigned hwspinlock; returns 0 on success, or an |
55 | 53 | appropriate error code on failure (e.g. -EINVAL if the hwspinlock |
56 | 54 | is already free). |
57 | - Can be called from an atomic context (this function will not sleep) but | |
58 | - not from within interrupt context. | |
55 | + Should be called from a process context (might sleep). | |
59 | 56 | |
60 | 57 | int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int timeout); |
61 | 58 | - lock a previously-assigned hwspinlock with a timeout limit (specified in |
62 | 59 | |
... | ... | @@ -232,15 +229,14 @@ |
232 | 229 | |
233 | 230 | int hwspin_lock_register(struct hwspinlock *hwlock); |
234 | 231 | - to be called from the underlying platform-specific implementation, in |
235 | - order to register a new hwspinlock instance. Can be called from an atomic | |
236 | - context (this function will not sleep) but not from within interrupt | |
237 | - context. Returns 0 on success, or appropriate error code on failure. | |
232 | + order to register a new hwspinlock instance. Should be called from | |
233 | + a process context (this function might sleep). | |
234 | + Returns 0 on success, or appropriate error code on failure. | |
238 | 235 | |
239 | 236 | struct hwspinlock *hwspin_lock_unregister(unsigned int id); |
240 | 237 | - to be called from the underlying vendor-specific implementation, in order |
241 | 238 | to unregister an existing (and unused) hwspinlock instance. |
242 | - Can be called from an atomic context (will not sleep) but not from | |
243 | - within interrupt context. | |
239 | + Should be called from a process context (this function might sleep). | |
244 | 240 | Returns the address of hwspinlock on success, or NULL on error (e.g. |
245 | 241 | if the hwspinlock is sill in use). |
246 | 242 |
drivers/hwspinlock/hwspinlock_core.c
... | ... | @@ -26,6 +26,7 @@ |
26 | 26 | #include <linux/radix-tree.h> |
27 | 27 | #include <linux/hwspinlock.h> |
28 | 28 | #include <linux/pm_runtime.h> |
29 | +#include <linux/mutex.h> | |
29 | 30 | |
30 | 31 | #include "hwspinlock_internal.h" |
31 | 32 | |
32 | 33 | |
33 | 34 | |
34 | 35 | |
... | ... | @@ -52,11 +53,13 @@ |
52 | 53 | static RADIX_TREE(hwspinlock_tree, GFP_KERNEL); |
53 | 54 | |
54 | 55 | /* |
55 | - * Synchronization of access to the tree is achieved using this spinlock, | |
56 | + * Synchronization of access to the tree is achieved using this mutex, | |
56 | 57 | * as the radix-tree API requires that users provide all synchronisation. |
58 | + * A mutex is needed because we're using non-atomic radix tree allocations. | |
57 | 59 | */ |
58 | -static DEFINE_SPINLOCK(hwspinlock_tree_lock); | |
60 | +static DEFINE_MUTEX(hwspinlock_tree_lock); | |
59 | 61 | |
62 | + | |
60 | 63 | /** |
61 | 64 | * __hwspin_trylock() - attempt to lock a specific hwspinlock |
62 | 65 | * @hwlock: an hwspinlock which we want to trylock |
... | ... | @@ -261,8 +264,7 @@ |
261 | 264 | * This function should be called from the underlying platform-specific |
262 | 265 | * implementation, to register a new hwspinlock instance. |
263 | 266 | * |
264 | - * Can be called from an atomic context (will not sleep) but not from | |
265 | - * within interrupt context. | |
267 | + * Should be called from a process context (might sleep) | |
266 | 268 | * |
267 | 269 | * Returns 0 on success, or an appropriate error code on failure |
268 | 270 | */ |
... | ... | @@ -279,7 +281,7 @@ |
279 | 281 | |
280 | 282 | spin_lock_init(&hwlock->lock); |
281 | 283 | |
282 | - spin_lock(&hwspinlock_tree_lock); | |
284 | + mutex_lock(&hwspinlock_tree_lock); | |
283 | 285 | |
284 | 286 | ret = radix_tree_insert(&hwspinlock_tree, hwlock->id, hwlock); |
285 | 287 | if (ret == -EEXIST) |
... | ... | @@ -295,7 +297,7 @@ |
295 | 297 | WARN_ON(tmp != hwlock); |
296 | 298 | |
297 | 299 | out: |
298 | - spin_unlock(&hwspinlock_tree_lock); | |
300 | + mutex_unlock(&hwspinlock_tree_lock); | |
299 | 301 | return ret; |
300 | 302 | } |
301 | 303 | EXPORT_SYMBOL_GPL(hwspin_lock_register); |
... | ... | @@ -307,8 +309,7 @@ |
307 | 309 | * This function should be called from the underlying platform-specific |
308 | 310 | * implementation, to unregister an existing (and unused) hwspinlock. |
309 | 311 | * |
310 | - * Can be called from an atomic context (will not sleep) but not from | |
311 | - * within interrupt context. | |
312 | + * Should be called from a process context (might sleep) | |
312 | 313 | * |
313 | 314 | * Returns the address of hwspinlock @id on success, or NULL on failure |
314 | 315 | */ |
... | ... | @@ -317,7 +318,7 @@ |
317 | 318 | struct hwspinlock *hwlock = NULL; |
318 | 319 | int ret; |
319 | 320 | |
320 | - spin_lock(&hwspinlock_tree_lock); | |
321 | + mutex_lock(&hwspinlock_tree_lock); | |
321 | 322 | |
322 | 323 | /* make sure the hwspinlock is not in use (tag is set) */ |
323 | 324 | ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED); |
... | ... | @@ -333,7 +334,7 @@ |
333 | 334 | } |
334 | 335 | |
335 | 336 | out: |
336 | - spin_unlock(&hwspinlock_tree_lock); | |
337 | + mutex_unlock(&hwspinlock_tree_lock); | |
337 | 338 | return hwlock; |
338 | 339 | } |
339 | 340 | EXPORT_SYMBOL_GPL(hwspin_lock_unregister); |
... | ... | @@ -402,9 +403,7 @@ |
402 | 403 | * to the remote core before it can be used for synchronization (to get the |
403 | 404 | * id of a given hwlock, use hwspin_lock_get_id()). |
404 | 405 | * |
405 | - * Can be called from an atomic context (will not sleep) but not from | |
406 | - * within interrupt context (simply because there is no use case for | |
407 | - * that yet). | |
406 | + * Should be called from a process context (might sleep) | |
408 | 407 | * |
409 | 408 | * Returns the address of the assigned hwspinlock, or NULL on error |
410 | 409 | */ |
... | ... | @@ -413,7 +412,7 @@ |
413 | 412 | struct hwspinlock *hwlock; |
414 | 413 | int ret; |
415 | 414 | |
416 | - spin_lock(&hwspinlock_tree_lock); | |
415 | + mutex_lock(&hwspinlock_tree_lock); | |
417 | 416 | |
418 | 417 | /* look for an unused lock */ |
419 | 418 | ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock, |
... | ... | @@ -433,7 +432,7 @@ |
433 | 432 | hwlock = NULL; |
434 | 433 | |
435 | 434 | out: |
436 | - spin_unlock(&hwspinlock_tree_lock); | |
435 | + mutex_unlock(&hwspinlock_tree_lock); | |
437 | 436 | return hwlock; |
438 | 437 | } |
439 | 438 | EXPORT_SYMBOL_GPL(hwspin_lock_request); |
... | ... | @@ -447,9 +446,7 @@ |
447 | 446 | * Usually early board code will be calling this function in order to |
448 | 447 | * reserve specific hwspinlock ids for predefined purposes. |
449 | 448 | * |
450 | - * Can be called from an atomic context (will not sleep) but not from | |
451 | - * within interrupt context (simply because there is no use case for | |
452 | - * that yet). | |
449 | + * Should be called from a process context (might sleep) | |
453 | 450 | * |
454 | 451 | * Returns the address of the assigned hwspinlock, or NULL on error |
455 | 452 | */ |
... | ... | @@ -458,7 +455,7 @@ |
458 | 455 | struct hwspinlock *hwlock; |
459 | 456 | int ret; |
460 | 457 | |
461 | - spin_lock(&hwspinlock_tree_lock); | |
458 | + mutex_lock(&hwspinlock_tree_lock); | |
462 | 459 | |
463 | 460 | /* make sure this hwspinlock exists */ |
464 | 461 | hwlock = radix_tree_lookup(&hwspinlock_tree, id); |
... | ... | @@ -484,7 +481,7 @@ |
484 | 481 | hwlock = NULL; |
485 | 482 | |
486 | 483 | out: |
487 | - spin_unlock(&hwspinlock_tree_lock); | |
484 | + mutex_unlock(&hwspinlock_tree_lock); | |
488 | 485 | return hwlock; |
489 | 486 | } |
490 | 487 | EXPORT_SYMBOL_GPL(hwspin_lock_request_specific); |
... | ... | @@ -497,9 +494,7 @@ |
497 | 494 | * Should only be called with an @hwlock that was retrieved from |
498 | 495 | * an earlier call to omap_hwspin_lock_request{_specific}. |
499 | 496 | * |
500 | - * Can be called from an atomic context (will not sleep) but not from | |
501 | - * within interrupt context (simply because there is no use case for | |
502 | - * that yet). | |
497 | + * Should be called from a process context (might sleep) | |
503 | 498 | * |
504 | 499 | * Returns 0 on success, or an appropriate error code on failure |
505 | 500 | */ |
... | ... | @@ -513,7 +508,7 @@ |
513 | 508 | return -EINVAL; |
514 | 509 | } |
515 | 510 | |
516 | - spin_lock(&hwspinlock_tree_lock); | |
511 | + mutex_lock(&hwspinlock_tree_lock); | |
517 | 512 | |
518 | 513 | /* make sure the hwspinlock is used */ |
519 | 514 | ret = radix_tree_tag_get(&hwspinlock_tree, hwlock->id, |
... | ... | @@ -540,7 +535,7 @@ |
540 | 535 | module_put(hwlock->dev->driver->owner); |
541 | 536 | |
542 | 537 | out: |
543 | - spin_unlock(&hwspinlock_tree_lock); | |
538 | + mutex_unlock(&hwspinlock_tree_lock); | |
544 | 539 | return ret; |
545 | 540 | } |
546 | 541 | EXPORT_SYMBOL_GPL(hwspin_lock_free); |