Blame view

include/linux/seqlock.h 6.68 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
4
  #ifndef __LINUX_SEQLOCK_H
  #define __LINUX_SEQLOCK_H
  /*
   * Reader/writer consistent mechanism without starving writers. This type of
d08df601a   Robert P. J. Day   Various typo fixes.
5
   * lock for data where the reader wants a consistent set of information
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
   * and is willing to retry if the information changes.  Readers never
   * block but they may have to retry if a writer is in
   * progress. Writers do not wait for readers. 
   *
   * This is not as cache friendly as brlock. Also, this will not work
   * for data that contains pointers, because any writer could
   * invalidate a pointer that a reader was following.
   *
   * Expected reader usage:
   * 	do {
   *	    seq = read_seqbegin(&foo);
   * 	...
   *      } while (read_seqretry(&foo, seq));
   *
   *
   * On non-SMP the spin locks disappear but the writer still needs
   * to increment the sequence variables because an interrupt routine could
   * change the state of the data.
   *
   * Based on x86_64 vsyscall gettimeofday 
   * by Keith Owens and Andrea Arcangeli
   */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
28
29
  #include <linux/spinlock.h>
  #include <linux/preempt.h>
56a210526   David Howells   linux/seqlock.h s...
30
  #include <asm/processor.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
31
32
33
34
35
36
37
38
39
40
  
  typedef struct {
  	unsigned sequence;
  	spinlock_t lock;
  } seqlock_t;
  
  /*
   * These macros triggered gcc-3.x compile-time problems.  We think these are
   * OK now.  Be cautious.
   */
e4d919188   Ingo Molnar   [PATCH] lockdep: ...
41
42
  #define __SEQLOCK_UNLOCKED(lockname) \
  		 { 0, __SPIN_LOCK_UNLOCKED(lockname) }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
43

99a3eb384   Ingo Molnar   [PATCH] lockdep: ...
44
45
46
47
48
  #define seqlock_init(x)					\
  	do {						\
  		(x)->sequence = 0;			\
  		spin_lock_init(&(x)->lock);		\
  	} while (0)
e4d919188   Ingo Molnar   [PATCH] lockdep: ...
49
50
51
  
  #define DEFINE_SEQLOCK(x) \
  		seqlock_t x = __SEQLOCK_UNLOCKED(x)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
52
53
54
55
56
57
58
59
60
  
  /* Lock out other writers and update the count.
   * Acts like a normal spin_lock/unlock.
   * Don't need preempt_disable() because that is in the spin_lock already.
   */
  static inline void write_seqlock(seqlock_t *sl)
  {
  	spin_lock(&sl->lock);
  	++sl->sequence;
20f09390b   Daniel Walker   seqlocks: trivial...
61
62
  	smp_wmb();
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
63

20f09390b   Daniel Walker   seqlocks: trivial...
64
  static inline void write_sequnlock(seqlock_t *sl)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
65
66
67
68
69
70
71
72
73
74
75
76
  {
  	smp_wmb();
  	sl->sequence++;
  	spin_unlock(&sl->lock);
  }
  
  static inline int write_tryseqlock(seqlock_t *sl)
  {
  	int ret = spin_trylock(&sl->lock);
  
  	if (ret) {
  		++sl->sequence;
20f09390b   Daniel Walker   seqlocks: trivial...
77
  		smp_wmb();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
78
79
80
81
82
  	}
  	return ret;
  }
  
  /* Start of read calculation -- fetch last complete writer token */
cde227afe   mao, bibo   [PATCH] x86_64: i...
83
  static __always_inline unsigned read_seqbegin(const seqlock_t *sl)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
84
  {
88a411c07   Ingo Molnar   seqlock: livelock...
85
86
87
  	unsigned ret;
  
  repeat:
5db1256a5   Milton Miller   seqlock: Don't sm...
88
  	ret = ACCESS_ONCE(sl->sequence);
88a411c07   Ingo Molnar   seqlock: livelock...
89
90
91
92
  	if (unlikely(ret & 1)) {
  		cpu_relax();
  		goto repeat;
  	}
5db1256a5   Milton Miller   seqlock: Don't sm...
93
  	smp_rmb();
88a411c07   Ingo Molnar   seqlock: livelock...
94

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
95
96
  	return ret;
  }
88a411c07   Ingo Molnar   seqlock: livelock...
97
98
99
100
  /*
   * Test if reader processed invalid data.
   *
   * If sequence value changed then writer changed data while in section.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
101
   */
88a411c07   Ingo Molnar   seqlock: livelock...
102
  static __always_inline int read_seqretry(const seqlock_t *sl, unsigned start)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
103
104
  {
  	smp_rmb();
88a411c07   Ingo Molnar   seqlock: livelock...
105

3c22cd570   Nick Piggin   kernel: optimise ...
106
  	return unlikely(sl->sequence != start);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
  }
  
  
  /*
   * Version using sequence counter only.
   * This can be used when code has its own mutex protecting the
   * updating starting before the write_seqcountbeqin() and ending
   * after the write_seqcount_end().
   */
  
  typedef struct seqcount {
  	unsigned sequence;
  } seqcount_t;
  
  #define SEQCNT_ZERO { 0 }
  #define seqcount_init(x)	do { *(x) = (seqcount_t) SEQCNT_ZERO; } while (0)
3c22cd570   Nick Piggin   kernel: optimise ...
123
124
125
126
127
128
129
130
131
132
133
134
135
136
  /**
   * __read_seqcount_begin - begin a seq-read critical section (without barrier)
   * @s: pointer to seqcount_t
   * Returns: count to be passed to read_seqcount_retry
   *
   * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
   * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
   * provided before actually loading any of the variables that are to be
   * protected in this critical section.
   *
   * Use carefully, only in critical code, and comment how the barrier is
   * provided.
   */
  static inline unsigned __read_seqcount_begin(const seqcount_t *s)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
137
  {
88a411c07   Ingo Molnar   seqlock: livelock...
138
139
140
141
  	unsigned ret;
  
  repeat:
  	ret = s->sequence;
88a411c07   Ingo Molnar   seqlock: livelock...
142
143
144
145
  	if (unlikely(ret & 1)) {
  		cpu_relax();
  		goto repeat;
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
146
147
  	return ret;
  }
3c22cd570   Nick Piggin   kernel: optimise ...
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
  /**
   * read_seqcount_begin - begin a seq-read critical section
   * @s: pointer to seqcount_t
   * Returns: count to be passed to read_seqcount_retry
   *
   * read_seqcount_begin opens a read critical section of the given seqcount.
   * Validity of the critical section is tested by checking read_seqcount_retry
   * function.
   */
  static inline unsigned read_seqcount_begin(const seqcount_t *s)
  {
  	unsigned ret = __read_seqcount_begin(s);
  	smp_rmb();
  	return ret;
  }
  
  /**
   * __read_seqcount_retry - end a seq-read critical section (without barrier)
   * @s: pointer to seqcount_t
   * @start: count, from read_seqcount_begin
   * Returns: 1 if retry is required, else 0
   *
   * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
   * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
   * provided before actually loading any of the variables that are to be
   * protected in this critical section.
   *
   * Use carefully, only in critical code, and comment how the barrier is
   * provided.
   */
  static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
  {
  	return unlikely(s->sequence != start);
  }
  
  /**
   * read_seqcount_retry - end a seq-read critical section
   * @s: pointer to seqcount_t
   * @start: count, from read_seqcount_begin
   * Returns: 1 if retry is required, else 0
   *
   * read_seqcount_retry closes a read critical section of the given seqcount.
   * If the critical section was invalid, it must be ignored (and typically
   * retried).
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
192
   */
88a411c07   Ingo Molnar   seqlock: livelock...
193
  static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
194
195
  {
  	smp_rmb();
88a411c07   Ingo Molnar   seqlock: livelock...
196

3c22cd570   Nick Piggin   kernel: optimise ...
197
  	return __read_seqcount_retry(s, start);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
  }
  
  
  /*
   * Sequence counter only version assumes that callers are using their
   * own mutexing.
   */
  static inline void write_seqcount_begin(seqcount_t *s)
  {
  	s->sequence++;
  	smp_wmb();
  }
  
  static inline void write_seqcount_end(seqcount_t *s)
  {
  	smp_wmb();
  	s->sequence++;
  }
3c22cd570   Nick Piggin   kernel: optimise ...
216
217
218
219
220
221
222
223
224
225
226
227
  /**
   * write_seqcount_barrier - invalidate in-progress read-side seq operations
   * @s: pointer to seqcount_t
   *
   * After write_seqcount_barrier, no read-side seq operations will complete
   * successfully and see data older than this.
   */
  static inline void write_seqcount_barrier(seqcount_t *s)
  {
  	smp_wmb();
  	s->sequence+=2;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
  /*
   * Possible sw/hw IRQ protected versions of the interfaces.
   */
  #define write_seqlock_irqsave(lock, flags)				\
  	do { local_irq_save(flags); write_seqlock(lock); } while (0)
  #define write_seqlock_irq(lock)						\
  	do { local_irq_disable();   write_seqlock(lock); } while (0)
  #define write_seqlock_bh(lock)						\
          do { local_bh_disable();    write_seqlock(lock); } while (0)
  
  #define write_sequnlock_irqrestore(lock, flags)				\
  	do { write_sequnlock(lock); local_irq_restore(flags); } while(0)
  #define write_sequnlock_irq(lock)					\
  	do { write_sequnlock(lock); local_irq_enable(); } while(0)
  #define write_sequnlock_bh(lock)					\
  	do { write_sequnlock(lock); local_bh_enable(); } while(0)
  
  #define read_seqbegin_irqsave(lock, flags)				\
  	({ local_irq_save(flags);   read_seqbegin(lock); })
  
  #define read_seqretry_irqrestore(lock, iv, flags)			\
  	({								\
  		int ret = read_seqretry(lock, iv);			\
  		local_irq_restore(flags);				\
  		ret;							\
  	})
  
  #endif /* __LINUX_SEQLOCK_H */