Blame view

include/asm-x86_64/spinlock.h 3.69 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
4
5
6
  #ifndef __ASM_SPINLOCK_H
  #define __ASM_SPINLOCK_H
  
  #include <asm/atomic.h>
  #include <asm/rwlock.h>
  #include <asm/page.h>
2bd0cfbde   Andrew Morton   [PATCH] fix x86_6...
7
  #include <asm/processor.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
8

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
9
10
  /*
   * Your basic SMP spinlocks, allowing only a single CPU anywhere
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
11
   *
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
12
13
14
15
   * Simple spin lock operations.  There are two variants, one clears IRQ's
   * on the local processor, one does not.
   *
   * We make no fairness assumptions. They have a cost.
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
16
17
   *
   * (the type definitions are in asm/spinlock_types.h)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
18
   */
8b059d237   Andi Kleen   [PATCH] Clean up ...
19
20
21
22
  static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
  {
  	return *(volatile signed int *)(&(lock)->slock) <= 0;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
23

fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
24
  static inline void __raw_spin_lock(raw_spinlock_t *lock)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
25
  {
8b059d237   Andi Kleen   [PATCH] Clean up ...
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
  	asm volatile(
  		"
  1:\t"
  		LOCK_PREFIX " ; decl %0
  \t"
  		"jns 2f
  "
  		"3:
  "
  		"rep;nop
  \t"
  		"cmpl $0,%0
  \t"
  		"jle 3b
  \t"
  		"jmp 1b
  "
  		"2:\t" : "=m" (lock->slock) : : "memory");
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
44
  }
87e1652c7   Andi Kleen   [PATCH] x86-64: D...
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
  /*
   * Same as __raw_spin_lock, but reenable interrupts during spinning.
   */
  #ifndef CONFIG_PROVE_LOCKING
  static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
  {
  	asm volatile(
  		"
  1:\t"
  		LOCK_PREFIX " ; decl %0
  \t"
  		"jns 5f
  "
  		"testl $0x200, %1
  \t"	/* interrupts were disabled? */
  		"jz 4f
  \t"
  	        "sti
  "
  		"3:\t"
  		"rep;nop
  \t"
  		"cmpl $0, %0
  \t"
  		"jle 3b
  \t"
  		"cli
  \t"
  		"jmp 1b
  "
  		"4:\t"
  		"rep;nop
  \t"
  		"cmpl $0, %0
  \t"
  		"jg 1b
  \t"
  		"jmp 4b
  "
  		"5:
  \t"
  		: "+m" (lock->slock) : "r" ((unsigned)flags) : "memory");
  }
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
89

fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
90
  static inline int __raw_spin_trylock(raw_spinlock_t *lock)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
91
  {
485832a5d   Andi Kleen   [PATCH] x86_64: U...
92
  	int oldval;
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
93

8b059d237   Andi Kleen   [PATCH] Clean up ...
94
  	asm volatile(
485832a5d   Andi Kleen   [PATCH] x86_64: U...
95
  		"xchgl %0,%1"
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
96
  		:"=q" (oldval), "=m" (lock->slock)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
97
  		:"0" (0) : "memory");
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
98

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
99
100
  	return oldval > 0;
  }
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
101
  static inline void __raw_spin_unlock(raw_spinlock_t *lock)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
102
  {
8b059d237   Andi Kleen   [PATCH] Clean up ...
103
  	asm volatile("movl $1,%0" :"=m" (lock->slock) :: "memory");
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
104
  }
8b059d237   Andi Kleen   [PATCH] Clean up ...
105
106
107
108
109
  static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
  {
  	while (__raw_spin_is_locked(lock))
  		cpu_relax();
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
110
111
112
113
114
115
116
117
118
119
  
  /*
   * Read-write spinlocks, allowing multiple readers
   * but only one writer.
   *
   * NOTE! it is quite common to have readers in interrupts
   * but no interrupt writers. For those circumstances we
   * can "mix" irq-safe locks - any writer needs to get a
   * irq-safe write-lock, but readers can get non-irqsafe
   * read-locks.
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
120
   *
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
121
122
   * On x86, we implement read-write locks as a 32-bit counter
   * with the high bit (sign) being the "contended" bit.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
123
   */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
124

8b059d237   Andi Kleen   [PATCH] Clean up ...
125
126
127
128
129
130
131
132
133
  static inline int __raw_read_can_lock(raw_rwlock_t *lock)
  {
  	return (int)(lock)->lock > 0;
  }
  
  static inline int __raw_write_can_lock(raw_rwlock_t *lock)
  {
  	return (lock)->lock == RW_LOCK_BIAS;
  }
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
134
135
  
  static inline void __raw_read_lock(raw_rwlock_t *rw)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
136
  {
8b059d237   Andi Kleen   [PATCH] Clean up ...
137
138
139
140
141
142
143
144
145
  	asm volatile(LOCK_PREFIX "subl $1,(%0)
  \t"
  		     "jns 1f
  "
  		     "call __read_lock_failed
  "
  		     "1:
  "
  		     ::"D" (rw), "i" (RW_LOCK_BIAS) : "memory");
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
146
  }
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
147
  static inline void __raw_write_lock(raw_rwlock_t *rw)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
148
  {
8b059d237   Andi Kleen   [PATCH] Clean up ...
149
150
151
152
153
154
155
156
157
  	asm volatile(LOCK_PREFIX "subl %1,(%0)
  \t"
  		     "jz 1f
  "
  		     "\tcall __write_lock_failed
  \t"
  		     "1:
  "
  		     ::"D" (rw), "i" (RW_LOCK_BIAS) : "memory");
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
158
  }
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
159
  static inline int __raw_read_trylock(raw_rwlock_t *lock)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
160
161
162
163
164
165
166
167
  {
  	atomic_t *count = (atomic_t *)lock;
  	atomic_dec(count);
  	if (atomic_read(count) >= 0)
  		return 1;
  	atomic_inc(count);
  	return 0;
  }
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
168
  static inline int __raw_write_trylock(raw_rwlock_t *lock)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
169
170
171
172
173
174
175
  {
  	atomic_t *count = (atomic_t *)lock;
  	if (atomic_sub_and_test(RW_LOCK_BIAS, count))
  		return 1;
  	atomic_add(RW_LOCK_BIAS, count);
  	return 0;
  }
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
176
177
  static inline void __raw_read_unlock(raw_rwlock_t *rw)
  {
841be8ddf   Andi Kleen   [PATCH] x86_64: R...
178
  	asm volatile(LOCK_PREFIX " ; incl %0" :"=m" (rw->lock) : : "memory");
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
179
180
181
182
  }
  
  static inline void __raw_write_unlock(raw_rwlock_t *rw)
  {
841be8ddf   Andi Kleen   [PATCH] x86_64: R...
183
  	asm volatile(LOCK_PREFIX " ; addl $" RW_LOCK_BIAS_STR ",%0"
fb1c8f93d   Ingo Molnar   [PATCH] spinlock ...
184
185
  				: "=m" (rw->lock) : : "memory");
  }
ef6edc974   Martin Schwidefsky   [PATCH] Directed ...
186
187
188
  #define _raw_spin_relax(lock)	cpu_relax()
  #define _raw_read_relax(lock)	cpu_relax()
  #define _raw_write_relax(lock)	cpu_relax()
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
189
  #endif /* __ASM_SPINLOCK_H */