Commit 3c22cd5709e8143444a6d08682a87f4c57902df3
1 parent
ff0c7d15f9
Exists in
master
and in
4 other branches
kernel: optimise seqlock
Add branch annotations for seqlock read fastpath, and introduce __read_seqcount_begin and __read_seqcount_end functions, that can avoid the smp_rmb() if used carefully. These will be used by store-free path walking algorithm performance is critical and seqlocks are in use. Signed-off-by: Nick Piggin <npiggin@kernel.dk>
Showing 1 changed file with 73 additions and 7 deletions Side-by-side Diff
include/linux/seqlock.h
... | ... | @@ -107,7 +107,7 @@ |
107 | 107 | { |
108 | 108 | smp_rmb(); |
109 | 109 | |
110 | - return (sl->sequence != start); | |
110 | + return unlikely(sl->sequence != start); | |
111 | 111 | } |
112 | 112 | |
113 | 113 | |
114 | 114 | |
... | ... | @@ -125,14 +125,25 @@ |
125 | 125 | #define SEQCNT_ZERO { 0 } |
126 | 126 | #define seqcount_init(x) do { *(x) = (seqcount_t) SEQCNT_ZERO; } while (0) |
127 | 127 | |
128 | -/* Start of read using pointer to a sequence counter only. */ | |
129 | -static inline unsigned read_seqcount_begin(const seqcount_t *s) | |
128 | +/** | |
129 | + * __read_seqcount_begin - begin a seq-read critical section (without barrier) | |
130 | + * @s: pointer to seqcount_t | |
131 | + * Returns: count to be passed to read_seqcount_retry | |
132 | + * | |
133 | + * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb() | |
134 | + * barrier. Callers should ensure that smp_rmb() or equivalent ordering is | |
135 | + * provided before actually loading any of the variables that are to be | |
136 | + * protected in this critical section. | |
137 | + * | |
138 | + * Use carefully, only in critical code, and comment how the barrier is | |
139 | + * provided. | |
140 | + */ | |
141 | +static inline unsigned __read_seqcount_begin(const seqcount_t *s) | |
130 | 142 | { |
131 | 143 | unsigned ret; |
132 | 144 | |
133 | 145 | repeat: |
134 | 146 | ret = s->sequence; |
135 | - smp_rmb(); | |
136 | 147 | if (unlikely(ret & 1)) { |
137 | 148 | cpu_relax(); |
138 | 149 | goto repeat; |
139 | 150 | |
140 | 151 | |
... | ... | @@ -140,14 +151,56 @@ |
140 | 151 | return ret; |
141 | 152 | } |
142 | 153 | |
143 | -/* | |
144 | - * Test if reader processed invalid data because sequence number has changed. | |
154 | +/** | |
155 | + * read_seqcount_begin - begin a seq-read critical section | |
156 | + * @s: pointer to seqcount_t | |
157 | + * Returns: count to be passed to read_seqcount_retry | |
158 | + * | |
159 | + * read_seqcount_begin opens a read critical section of the given seqcount. | |
160 | + * Validity of the critical section is tested by checking read_seqcount_retry | |
161 | + * function. | |
145 | 162 | */ |
163 | +static inline unsigned read_seqcount_begin(const seqcount_t *s) | |
164 | +{ | |
165 | + unsigned ret = __read_seqcount_begin(s); | |
166 | + smp_rmb(); | |
167 | + return ret; | |
168 | +} | |
169 | + | |
170 | +/** | |
171 | + * __read_seqcount_retry - end a seq-read critical section (without barrier) | |
172 | + * @s: pointer to seqcount_t | |
173 | + * @start: count, from read_seqcount_begin | |
174 | + * Returns: 1 if retry is required, else 0 | |
175 | + * | |
176 | + * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb() | |
177 | + * barrier. Callers should ensure that smp_rmb() or equivalent ordering is | |
178 | + * provided before actually loading any of the variables that are to be | |
179 | + * protected in this critical section. | |
180 | + * | |
181 | + * Use carefully, only in critical code, and comment how the barrier is | |
182 | + * provided. | |
183 | + */ | |
184 | +static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start) | |
185 | +{ | |
186 | + return unlikely(s->sequence != start); | |
187 | +} | |
188 | + | |
189 | +/** | |
190 | + * read_seqcount_retry - end a seq-read critical section | |
191 | + * @s: pointer to seqcount_t | |
192 | + * @start: count, from read_seqcount_begin | |
193 | + * Returns: 1 if retry is required, else 0 | |
194 | + * | |
195 | + * read_seqcount_retry closes a read critical section of the given seqcount. | |
196 | + * If the critical section was invalid, it must be ignored (and typically | |
197 | + * retried). | |
198 | + */ | |
146 | 199 | static inline int read_seqcount_retry(const seqcount_t *s, unsigned start) |
147 | 200 | { |
148 | 201 | smp_rmb(); |
149 | 202 | |
150 | - return s->sequence != start; | |
203 | + return __read_seqcount_retry(s, start); | |
151 | 204 | } |
152 | 205 | |
153 | 206 | |
... | ... | @@ -165,6 +218,19 @@ |
165 | 218 | { |
166 | 219 | smp_wmb(); |
167 | 220 | s->sequence++; |
221 | +} | |
222 | + | |
223 | +/** | |
224 | + * write_seqcount_barrier - invalidate in-progress read-side seq operations | |
225 | + * @s: pointer to seqcount_t | |
226 | + * | |
227 | + * After write_seqcount_barrier, no read-side seq operations will complete | |
228 | + * successfully and see data older than this. | |
229 | + */ | |
230 | +static inline void write_seqcount_barrier(seqcount_t *s) | |
231 | +{ | |
232 | + smp_wmb(); | |
233 | + s->sequence+=2; | |
168 | 234 | } |
169 | 235 | |
170 | 236 | /* |