Blame view
include/asm-sparc/spinlock.h
4.43 KB
1da177e4c
|
1 2 3 4 5 6 7 8 9 10 11 12 13 |
/* spinlock.h: 32-bit Sparc spinlock support. * * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) */ #ifndef __SPARC_SPINLOCK_H #define __SPARC_SPINLOCK_H #include <linux/threads.h> /* For NR_CPUS */ #ifndef __ASSEMBLY__ #include <asm/psr.h> |
fb1c8f93d
|
14 |
#define __raw_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) |
1da177e4c
|
15 |
|
fb1c8f93d
|
16 17 |
#define __raw_spin_unlock_wait(lock) \ do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) |
1da177e4c
|
18 |
|
3115624ed
|
19 |
static inline void __raw_spin_lock(raw_spinlock_t *lock) |
1da177e4c
|
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
{ __asm__ __volatile__( " 1: \t" "ldstub [%0], %%g2 \t" "orcc %%g2, 0x0, %%g0 \t" "bne,a 2f \t" " ldub [%0], %%g2 \t" ".subsection 2 " "2: \t" "orcc %%g2, 0x0, %%g0 \t" "bne,a 2b \t" " ldub [%0], %%g2 \t" "b,a 1b \t" ".previous " : /* no outputs */ : "r" (lock) : "g2", "memory", "cc"); } |
3115624ed
|
51 |
static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
1da177e4c
|
52 53 54 55 56 57 58 59 |
{ unsigned int result; __asm__ __volatile__("ldstub [%1], %0" : "=r" (result) : "r" (lock) : "memory"); return (result == 0); } |
3115624ed
|
60 |
static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
1da177e4c
|
61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 |
{ __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); } /* Read-write spinlocks, allowing multiple readers * but only one writer. * * NOTE! it is quite common to have readers in interrupts * but no interrupt writers. For those circumstances we * can "mix" irq-safe locks - any writer needs to get a * irq-safe write-lock, but readers can get non-irqsafe * read-locks. * * XXX This might create some problems with my dual spinlock * XXX scheme, deadlocks etc. -DaveM |
fb1c8f93d
|
76 77 |
* * Sort of like atomic_t's on Sparc, but even more clever. |
1da177e4c
|
78 79 |
* * ------------------------------------ |
fb1c8f93d
|
80 |
* | 24-bit counter | wlock | raw_rwlock_t |
1da177e4c
|
81 82 83 84 85 86 87 88 89 90 |
* ------------------------------------ * 31 8 7 0 * * wlock signifies the one writer is in or somebody is updating * counter. For a writer, if he successfully acquires the wlock, * but counter is non-zero, he has to release the lock and wait, * till both counter and wlock are zero. * * Unfortunately this scheme limits us to ~16,000,000 cpus. */ |
3115624ed
|
91 |
static inline void __read_lock(raw_rwlock_t *rw) |
1da177e4c
|
92 |
{ |
fb1c8f93d
|
93 |
register raw_rwlock_t *lp asm("g1"); |
1da177e4c
|
94 95 96 97 98 99 100 101 102 103 104 105 |
lp = rw; __asm__ __volatile__( "mov %%o7, %%g4 \t" "call ___rw_read_enter \t" " ldstub [%%g1 + 3], %%g2 " : /* no outputs */ : "r" (lp) : "g2", "g4", "memory", "cc"); } |
fb1c8f93d
|
106 |
#define __raw_read_lock(lock) \ |
1da177e4c
|
107 108 |
do { unsigned long flags; \ local_irq_save(flags); \ |
a54123e27
|
109 |
__read_lock(lock); \ |
1da177e4c
|
110 111 |
local_irq_restore(flags); \ } while(0) |
3115624ed
|
112 |
static inline void __read_unlock(raw_rwlock_t *rw) |
1da177e4c
|
113 |
{ |
fb1c8f93d
|
114 |
register raw_rwlock_t *lp asm("g1"); |
1da177e4c
|
115 116 117 118 119 120 121 122 123 124 125 126 |
lp = rw; __asm__ __volatile__( "mov %%o7, %%g4 \t" "call ___rw_read_exit \t" " ldstub [%%g1 + 3], %%g2 " : /* no outputs */ : "r" (lp) : "g2", "g4", "memory", "cc"); } |
fb1c8f93d
|
127 |
#define __raw_read_unlock(lock) \ |
1da177e4c
|
128 129 |
do { unsigned long flags; \ local_irq_save(flags); \ |
a54123e27
|
130 |
__read_unlock(lock); \ |
1da177e4c
|
131 132 |
local_irq_restore(flags); \ } while(0) |
a54123e27
|
133 |
static inline void __raw_write_lock(raw_rwlock_t *rw) |
1da177e4c
|
134 |
{ |
fb1c8f93d
|
135 |
register raw_rwlock_t *lp asm("g1"); |
1da177e4c
|
136 137 138 139 140 141 142 143 144 145 146 |
lp = rw; __asm__ __volatile__( "mov %%o7, %%g4 \t" "call ___rw_write_enter \t" " ldstub [%%g1 + 3], %%g2 " : /* no outputs */ : "r" (lp) : "g2", "g4", "memory", "cc"); |
7a39f5220
|
147 |
*(volatile __u32 *)&lp->lock = ~0U; |
1da177e4c
|
148 |
} |
a54123e27
|
149 150 151 152 153 154 155 156 157 158 159 160 161 |
static inline int __raw_write_trylock(raw_rwlock_t *rw) { unsigned int val; __asm__ __volatile__("ldstub [%1 + 3], %0" : "=r" (val) : "r" (&rw->lock) : "memory"); if (val == 0) { val = rw->lock & ~0xff; if (val) ((volatile u8*)&rw->lock)[3] = 0; |
7a39f5220
|
162 163 |
else *(volatile u32*)&rw->lock = ~0U; |
a54123e27
|
164 165 166 167 |
} return (val == 0); } |
7a39f5220
|
168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 |
static inline int __read_trylock(raw_rwlock_t *rw) { register raw_rwlock_t *lp asm("g1"); register int res asm("o0"); lp = rw; __asm__ __volatile__( "mov %%o7, %%g4 \t" "call ___rw_read_try \t" " ldstub [%%g1 + 3], %%g2 " : "=r" (res) : "r" (lp) : "g2", "g4", "memory", "cc"); return res; } #define __raw_read_trylock(lock) \ ({ unsigned long flags; \ int res; \ local_irq_save(flags); \ res = __read_trylock(lock); \ local_irq_restore(flags); \ res; \ }) |
fb1c8f93d
|
194 |
#define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0) |
1da177e4c
|
195 |
|
fb1c8f93d
|
196 |
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
1da177e4c
|
197 |
|
ef6edc974
|
198 199 200 |
#define _raw_spin_relax(lock) cpu_relax() #define _raw_read_relax(lock) cpu_relax() #define _raw_write_relax(lock) cpu_relax() |
61fc12d8e
|
201 202 |
#define __raw_read_can_lock(rw) (!((rw)->lock & 0xff)) #define __raw_write_can_lock(rw) (!(rw)->lock) |
1da177e4c
|
203 204 205 |
#endif /* !(__ASSEMBLY__) */ #endif /* __SPARC_SPINLOCK_H */ |