Commit a57004e1afb6ee03c509f1b1ec74a000682ab93b
Committed by
Linus Torvalds
1 parent
8382bf2e72
Exists in
master
and in
4 other branches
[PATCH] atomic: dec_and_lock use atomic primitives
Convert atomic_dec_and_lock to use new atomic primitives. Signed-off-by: Nick Piggin <npiggin@suse.de> Cc: "Paul E. McKenney" <paulmck@us.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Showing 1 changed file with 6 additions and 43 deletions Side-by-side Diff
lib/dec_and_lock.c
1 | 1 | #include <linux/module.h> |
2 | 2 | #include <linux/spinlock.h> |
3 | 3 | #include <asm/atomic.h> |
4 | -#include <asm/system.h> | |
5 | 4 | |
6 | -#ifdef __HAVE_ARCH_CMPXCHG | |
7 | 5 | /* |
8 | 6 | * This is an implementation of the notion of "decrement a |
9 | 7 | * reference count, and return locked if it decremented to zero". |
10 | 8 | * |
11 | - * This implementation can be used on any architecture that | |
12 | - * has a cmpxchg, and where atomic->value is an int holding | |
13 | - * the value of the atomic (i.e. the high bits aren't used | |
14 | - * for a lock or anything like that). | |
15 | - */ | |
16 | -int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) | |
17 | -{ | |
18 | - int counter; | |
19 | - int newcount; | |
20 | - | |
21 | - for (;;) { | |
22 | - counter = atomic_read(atomic); | |
23 | - newcount = counter - 1; | |
24 | - if (!newcount) | |
25 | - break; /* do it the slow way */ | |
26 | - | |
27 | - newcount = cmpxchg(&atomic->counter, counter, newcount); | |
28 | - if (newcount == counter) | |
29 | - return 0; | |
30 | - } | |
31 | - | |
32 | - spin_lock(lock); | |
33 | - if (atomic_dec_and_test(atomic)) | |
34 | - return 1; | |
35 | - spin_unlock(lock); | |
36 | - return 0; | |
37 | -} | |
38 | -#else | |
39 | -/* | |
40 | - * This is an architecture-neutral, but slow, | |
41 | - * implementation of the notion of "decrement | |
42 | - * a reference count, and return locked if it | |
43 | - * decremented to zero". | |
44 | - * | |
45 | 9 | * NOTE NOTE NOTE! This is _not_ equivalent to |
46 | 10 | * |
47 | 11 | * if (atomic_dec_and_test(&atomic)) { |
48 | 12 | |
49 | 13 | |
... | ... | @@ -52,22 +16,21 @@ |
52 | 16 | * |
53 | 17 | * because the spin-lock and the decrement must be |
54 | 18 | * "atomic". |
55 | - * | |
56 | - * This slow version gets the spinlock unconditionally, | |
57 | - * and releases it if it isn't needed. Architectures | |
58 | - * are encouraged to come up with better approaches, | |
59 | - * this is trivially done efficiently using a load-locked | |
60 | - * store-conditional approach, for example. | |
61 | 19 | */ |
62 | 20 | int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) |
63 | 21 | { |
22 | +#ifdef CONFIG_SMP | |
23 | + /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ | |
24 | + if (atomic_add_unless(atomic, -1, 1)) | |
25 | + return 0; | |
26 | +#endif | |
27 | + /* Otherwise do it the slow way */ | |
64 | 28 | spin_lock(lock); |
65 | 29 | if (atomic_dec_and_test(atomic)) |
66 | 30 | return 1; |
67 | 31 | spin_unlock(lock); |
68 | 32 | return 0; |
69 | 33 | } |
70 | -#endif | |
71 | 34 | |
72 | 35 | EXPORT_SYMBOL(_atomic_dec_and_lock); |