Commit 14e256c107304367eff401d20f2ab9fa72e33136

Authored by Matthew Wilcox
Committed by Kyle McMartin
1 parent 04d472dc83

[PARISC] Update spinlocks from parisc tree

Neaten up the CONFIG_PA20 ifdefs

More merge fixes, this time for SMP

Signed-off-by: Matthew Wilcox <willy@parisc-linux.org>

Prettify the CONFIG_DEBUG_SPINLOCK __SPIN_LOCK_UNLOCKED initializers.

Clean up some warnings with CONFIG_DEBUG_SPINLOCK enabled.

Fix build with spinlock debugging turned on. Patch is cleaner like this,
too.

Remove mandatory 16-byte alignment requirement on PA2.0 processors by
using the ldcw,CO completer. Provides a nice insn savings.

Signed-off-by: Kyle McMartin <kyle@parisc-linux.org>

Showing 3 changed files with 30 additions and 14 deletions Side-by-side Diff

include/asm-parisc/spinlock.h
... ... @@ -5,11 +5,6 @@
5 5 #include <asm/processor.h>
6 6 #include <asm/spinlock_types.h>
7 7  
8   -/* Note that PA-RISC has to use `1' to mean unlocked and `0' to mean locked
9   - * since it only has load-and-zero. Moreover, at least on some PA processors,
10   - * the semaphore address has to be 16-byte aligned.
11   - */
12   -
13 8 static inline int __raw_spin_is_locked(raw_spinlock_t *x)
14 9 {
15 10 volatile unsigned int *a = __ldcw_align(x);
include/asm-parisc/spinlock_types.h
... ... @@ -6,10 +6,14 @@
6 6 #endif
7 7  
8 8 typedef struct {
  9 +#ifdef CONFIG_PA20
  10 + volatile unsigned int slock;
  11 +# define __RAW_SPIN_LOCK_UNLOCKED { 1 }
  12 +#else
9 13 volatile unsigned int lock[4];
  14 +# define __RAW_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
  15 +#endif
10 16 } raw_spinlock_t;
11   -
12   -#define __RAW_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
13 17  
14 18 typedef struct {
15 19 raw_spinlock_t lock;
include/asm-parisc/system.h
... ... @@ -138,13 +138,7 @@
138 138 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
139 139  
140 140  
141   -/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. */
142   -#define __ldcw(a) ({ \
143   - unsigned __ret; \
144   - __asm__ __volatile__("ldcw 0(%1),%0" : "=r" (__ret) : "r" (a)); \
145   - __ret; \
146   -})
147   -
  141 +#ifndef CONFIG_PA20
148 142 /* Because kmalloc only guarantees 8-byte alignment for kmalloc'd data,
149 143 and GCC only guarantees 8-byte alignment for stack locals, we can't
150 144 be assured of 16-byte alignment for atomic lock data even if we
151 145  
... ... @@ -152,11 +146,34 @@
152 146 we use a struct containing an array of four ints for the atomic lock
153 147 type and dynamically select the 16-byte aligned int from the array
154 148 for the semaphore. */
  149 +
155 150 #define __PA_LDCW_ALIGNMENT 16
156 151 #define __ldcw_align(a) ({ \
157 152 unsigned long __ret = (unsigned long) &(a)->lock[0]; \
158 153 __ret = (__ret + __PA_LDCW_ALIGNMENT - 1) & ~(__PA_LDCW_ALIGNMENT - 1); \
159 154 (volatile unsigned int *) __ret; \
  155 +})
  156 +#define LDCW "ldcw"
  157 +
  158 +#else /*CONFIG_PA20*/
  159 +/* From: "Jim Hull" <jim.hull of hp.com>
  160 + I've attached a summary of the change, but basically, for PA 2.0, as
  161 + long as the ",CO" (coherent operation) completer is specified, then the
  162 + 16-byte alignment requirement for ldcw and ldcd is relaxed, and instead
  163 + they only require "natural" alignment (4-byte for ldcw, 8-byte for
  164 + ldcd). */
  165 +
  166 +#define __PA_LDCW_ALIGNMENT 4
  167 +#define __ldcw_align(a) ((volatile unsigned int *)a)
  168 +#define LDCW "ldcw,co"
  169 +
  170 +#endif /*!CONFIG_PA20*/
  171 +
  172 +/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. */
  173 +#define __ldcw(a) ({ \
  174 + unsigned __ret; \
  175 + __asm__ __volatile__(LDCW " 0(%1),%0" : "=r" (__ret) : "r" (a)); \
  176 + __ret; \
160 177 })
161 178  
162 179 #ifdef CONFIG_SMP