Commit 9f0cbea0d8cc47801b853d3c61d0e17475b0cc89

Authored by Segher Boessenkool
Committed by Paul Mackerras
1 parent c6d4267ece

[POWERPC] Implement atomic{, 64}_{read, write}() without volatile

Instead, use asm() like all other atomic operations already do.

Also use inline functions instead of macros; this actually
improves code generation (some code becomes a little smaller,
probably because of improved alias information -- just a few
hundred bytes total on a default kernel build, nothing shocking).

Signed-off-by: Segher Boessenkool <segher@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>

Showing 1 changed file with 28 additions and 6 deletions Side-by-side Diff

include/asm-powerpc/atomic.h
... ... @@ -5,7 +5,7 @@
5 5 * PowerPC atomic operations
6 6 */
7 7  
8   -typedef struct { volatile int counter; } atomic_t;
  8 +typedef struct { int counter; } atomic_t;
9 9  
10 10 #ifdef __KERNEL__
11 11 #include <linux/compiler.h>
12 12  
... ... @@ -15,9 +15,20 @@
15 15  
16 16 #define ATOMIC_INIT(i) { (i) }
17 17  
18   -#define atomic_read(v) ((v)->counter)
19   -#define atomic_set(v,i) (((v)->counter) = (i))
  18 +static __inline__ int atomic_read(const atomic_t *v)
  19 +{
  20 + int t;
20 21  
  22 + __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
  23 +
  24 + return t;
  25 +}
  26 +
  27 +static __inline__ void atomic_set(atomic_t *v, int i)
  28 +{
  29 + __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
  30 +}
  31 +
21 32 static __inline__ void atomic_add(int a, atomic_t *v)
22 33 {
23 34 int t;
24 35  
... ... @@ -240,12 +251,23 @@
240 251  
241 252 #ifdef __powerpc64__
242 253  
243   -typedef struct { volatile long counter; } atomic64_t;
  254 +typedef struct { long counter; } atomic64_t;
244 255  
245 256 #define ATOMIC64_INIT(i) { (i) }
246 257  
247   -#define atomic64_read(v) ((v)->counter)
248   -#define atomic64_set(v,i) (((v)->counter) = (i))
  258 +static __inline__ long atomic64_read(const atomic64_t *v)
  259 +{
  260 + long t;
  261 +
  262 + __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
  263 +
  264 + return t;
  265 +}
  266 +
  267 +static __inline__ void atomic64_set(atomic64_t *v, long i)
  268 +{
  269 + __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
  270 +}
249 271  
250 272 static __inline__ void atomic64_add(long a, atomic64_t *v)
251 273 {