Commit 1cc523271ef0b6305c565a143e3d48f6fff826dd

Authored by stephen hemminger
Committed by David S. Miller
1 parent 35e2da46d2

seq_file: add RCU versions of new hlist/list iterators (v3)

Many usages of seq_file use RCU protected lists, so non RCU
iterators will not work safely.

Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

Showing 3 changed files with 87 additions and 4 deletions Inline Diff

1 /* 1 /*
2 * linux/fs/seq_file.c 2 * linux/fs/seq_file.c
3 * 3 *
4 * helper functions for making synthetic files from sequences of records. 4 * helper functions for making synthetic files from sequences of records.
5 * initial implementation -- AV, Oct 2001. 5 * initial implementation -- AV, Oct 2001.
6 */ 6 */
7 7
8 #include <linux/fs.h> 8 #include <linux/fs.h>
9 #include <linux/module.h> 9 #include <linux/module.h>
10 #include <linux/seq_file.h> 10 #include <linux/seq_file.h>
11 #include <linux/slab.h> 11 #include <linux/slab.h>
12 12
13 #include <asm/uaccess.h> 13 #include <asm/uaccess.h>
14 #include <asm/page.h> 14 #include <asm/page.h>
15 15
16 /** 16 /**
17 * seq_open - initialize sequential file 17 * seq_open - initialize sequential file
18 * @file: file we initialize 18 * @file: file we initialize
19 * @op: method table describing the sequence 19 * @op: method table describing the sequence
20 * 20 *
21 * seq_open() sets @file, associating it with a sequence described 21 * seq_open() sets @file, associating it with a sequence described
22 * by @op. @op->start() sets the iterator up and returns the first 22 * by @op. @op->start() sets the iterator up and returns the first
23 * element of sequence. @op->stop() shuts it down. @op->next() 23 * element of sequence. @op->stop() shuts it down. @op->next()
24 * returns the next element of sequence. @op->show() prints element 24 * returns the next element of sequence. @op->show() prints element
25 * into the buffer. In case of error ->start() and ->next() return 25 * into the buffer. In case of error ->start() and ->next() return
26 * ERR_PTR(error). In the end of sequence they return %NULL. ->show() 26 * ERR_PTR(error). In the end of sequence they return %NULL. ->show()
27 * returns 0 in case of success and negative number in case of error. 27 * returns 0 in case of success and negative number in case of error.
28 * Returning SEQ_SKIP means "discard this element and move on". 28 * Returning SEQ_SKIP means "discard this element and move on".
29 */ 29 */
30 int seq_open(struct file *file, const struct seq_operations *op) 30 int seq_open(struct file *file, const struct seq_operations *op)
31 { 31 {
32 struct seq_file *p = file->private_data; 32 struct seq_file *p = file->private_data;
33 33
34 if (!p) { 34 if (!p) {
35 p = kmalloc(sizeof(*p), GFP_KERNEL); 35 p = kmalloc(sizeof(*p), GFP_KERNEL);
36 if (!p) 36 if (!p)
37 return -ENOMEM; 37 return -ENOMEM;
38 file->private_data = p; 38 file->private_data = p;
39 } 39 }
40 memset(p, 0, sizeof(*p)); 40 memset(p, 0, sizeof(*p));
41 mutex_init(&p->lock); 41 mutex_init(&p->lock);
42 p->op = op; 42 p->op = op;
43 43
44 /* 44 /*
45 * Wrappers around seq_open(e.g. swaps_open) need to be 45 * Wrappers around seq_open(e.g. swaps_open) need to be
46 * aware of this. If they set f_version themselves, they 46 * aware of this. If they set f_version themselves, they
47 * should call seq_open first and then set f_version. 47 * should call seq_open first and then set f_version.
48 */ 48 */
49 file->f_version = 0; 49 file->f_version = 0;
50 50
51 /* 51 /*
52 * seq_files support lseek() and pread(). They do not implement 52 * seq_files support lseek() and pread(). They do not implement
53 * write() at all, but we clear FMODE_PWRITE here for historical 53 * write() at all, but we clear FMODE_PWRITE here for historical
54 * reasons. 54 * reasons.
55 * 55 *
56 * If a client of seq_files a) implements file.write() and b) wishes to 56 * If a client of seq_files a) implements file.write() and b) wishes to
57 * support pwrite() then that client will need to implement its own 57 * support pwrite() then that client will need to implement its own
58 * file.open() which calls seq_open() and then sets FMODE_PWRITE. 58 * file.open() which calls seq_open() and then sets FMODE_PWRITE.
59 */ 59 */
60 file->f_mode &= ~FMODE_PWRITE; 60 file->f_mode &= ~FMODE_PWRITE;
61 return 0; 61 return 0;
62 } 62 }
63 EXPORT_SYMBOL(seq_open); 63 EXPORT_SYMBOL(seq_open);
64 64
65 static int traverse(struct seq_file *m, loff_t offset) 65 static int traverse(struct seq_file *m, loff_t offset)
66 { 66 {
67 loff_t pos = 0, index; 67 loff_t pos = 0, index;
68 int error = 0; 68 int error = 0;
69 void *p; 69 void *p;
70 70
71 m->version = 0; 71 m->version = 0;
72 index = 0; 72 index = 0;
73 m->count = m->from = 0; 73 m->count = m->from = 0;
74 if (!offset) { 74 if (!offset) {
75 m->index = index; 75 m->index = index;
76 return 0; 76 return 0;
77 } 77 }
78 if (!m->buf) { 78 if (!m->buf) {
79 m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL); 79 m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
80 if (!m->buf) 80 if (!m->buf)
81 return -ENOMEM; 81 return -ENOMEM;
82 } 82 }
83 p = m->op->start(m, &index); 83 p = m->op->start(m, &index);
84 while (p) { 84 while (p) {
85 error = PTR_ERR(p); 85 error = PTR_ERR(p);
86 if (IS_ERR(p)) 86 if (IS_ERR(p))
87 break; 87 break;
88 error = m->op->show(m, p); 88 error = m->op->show(m, p);
89 if (error < 0) 89 if (error < 0)
90 break; 90 break;
91 if (unlikely(error)) { 91 if (unlikely(error)) {
92 error = 0; 92 error = 0;
93 m->count = 0; 93 m->count = 0;
94 } 94 }
95 if (m->count == m->size) 95 if (m->count == m->size)
96 goto Eoverflow; 96 goto Eoverflow;
97 if (pos + m->count > offset) { 97 if (pos + m->count > offset) {
98 m->from = offset - pos; 98 m->from = offset - pos;
99 m->count -= m->from; 99 m->count -= m->from;
100 m->index = index; 100 m->index = index;
101 break; 101 break;
102 } 102 }
103 pos += m->count; 103 pos += m->count;
104 m->count = 0; 104 m->count = 0;
105 if (pos == offset) { 105 if (pos == offset) {
106 index++; 106 index++;
107 m->index = index; 107 m->index = index;
108 break; 108 break;
109 } 109 }
110 p = m->op->next(m, p, &index); 110 p = m->op->next(m, p, &index);
111 } 111 }
112 m->op->stop(m, p); 112 m->op->stop(m, p);
113 m->index = index; 113 m->index = index;
114 return error; 114 return error;
115 115
116 Eoverflow: 116 Eoverflow:
117 m->op->stop(m, p); 117 m->op->stop(m, p);
118 kfree(m->buf); 118 kfree(m->buf);
119 m->buf = kmalloc(m->size <<= 1, GFP_KERNEL); 119 m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
120 return !m->buf ? -ENOMEM : -EAGAIN; 120 return !m->buf ? -ENOMEM : -EAGAIN;
121 } 121 }
122 122
123 /** 123 /**
124 * seq_read - ->read() method for sequential files. 124 * seq_read - ->read() method for sequential files.
125 * @file: the file to read from 125 * @file: the file to read from
126 * @buf: the buffer to read to 126 * @buf: the buffer to read to
127 * @size: the maximum number of bytes to read 127 * @size: the maximum number of bytes to read
128 * @ppos: the current position in the file 128 * @ppos: the current position in the file
129 * 129 *
130 * Ready-made ->f_op->read() 130 * Ready-made ->f_op->read()
131 */ 131 */
132 ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) 132 ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
133 { 133 {
134 struct seq_file *m = (struct seq_file *)file->private_data; 134 struct seq_file *m = (struct seq_file *)file->private_data;
135 size_t copied = 0; 135 size_t copied = 0;
136 loff_t pos; 136 loff_t pos;
137 size_t n; 137 size_t n;
138 void *p; 138 void *p;
139 int err = 0; 139 int err = 0;
140 140
141 mutex_lock(&m->lock); 141 mutex_lock(&m->lock);
142 142
143 /* Don't assume *ppos is where we left it */ 143 /* Don't assume *ppos is where we left it */
144 if (unlikely(*ppos != m->read_pos)) { 144 if (unlikely(*ppos != m->read_pos)) {
145 m->read_pos = *ppos; 145 m->read_pos = *ppos;
146 while ((err = traverse(m, *ppos)) == -EAGAIN) 146 while ((err = traverse(m, *ppos)) == -EAGAIN)
147 ; 147 ;
148 if (err) { 148 if (err) {
149 /* With prejudice... */ 149 /* With prejudice... */
150 m->read_pos = 0; 150 m->read_pos = 0;
151 m->version = 0; 151 m->version = 0;
152 m->index = 0; 152 m->index = 0;
153 m->count = 0; 153 m->count = 0;
154 goto Done; 154 goto Done;
155 } 155 }
156 } 156 }
157 157
158 /* 158 /*
159 * seq_file->op->..m_start/m_stop/m_next may do special actions 159 * seq_file->op->..m_start/m_stop/m_next may do special actions
160 * or optimisations based on the file->f_version, so we want to 160 * or optimisations based on the file->f_version, so we want to
161 * pass the file->f_version to those methods. 161 * pass the file->f_version to those methods.
162 * 162 *
163 * seq_file->version is just copy of f_version, and seq_file 163 * seq_file->version is just copy of f_version, and seq_file
164 * methods can treat it simply as file version. 164 * methods can treat it simply as file version.
165 * It is copied in first and copied out after all operations. 165 * It is copied in first and copied out after all operations.
166 * It is convenient to have it as part of structure to avoid the 166 * It is convenient to have it as part of structure to avoid the
167 * need of passing another argument to all the seq_file methods. 167 * need of passing another argument to all the seq_file methods.
168 */ 168 */
169 m->version = file->f_version; 169 m->version = file->f_version;
170 /* grab buffer if we didn't have one */ 170 /* grab buffer if we didn't have one */
171 if (!m->buf) { 171 if (!m->buf) {
172 m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL); 172 m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
173 if (!m->buf) 173 if (!m->buf)
174 goto Enomem; 174 goto Enomem;
175 } 175 }
176 /* if not empty - flush it first */ 176 /* if not empty - flush it first */
177 if (m->count) { 177 if (m->count) {
178 n = min(m->count, size); 178 n = min(m->count, size);
179 err = copy_to_user(buf, m->buf + m->from, n); 179 err = copy_to_user(buf, m->buf + m->from, n);
180 if (err) 180 if (err)
181 goto Efault; 181 goto Efault;
182 m->count -= n; 182 m->count -= n;
183 m->from += n; 183 m->from += n;
184 size -= n; 184 size -= n;
185 buf += n; 185 buf += n;
186 copied += n; 186 copied += n;
187 if (!m->count) 187 if (!m->count)
188 m->index++; 188 m->index++;
189 if (!size) 189 if (!size)
190 goto Done; 190 goto Done;
191 } 191 }
192 /* we need at least one record in buffer */ 192 /* we need at least one record in buffer */
193 pos = m->index; 193 pos = m->index;
194 p = m->op->start(m, &pos); 194 p = m->op->start(m, &pos);
195 while (1) { 195 while (1) {
196 err = PTR_ERR(p); 196 err = PTR_ERR(p);
197 if (!p || IS_ERR(p)) 197 if (!p || IS_ERR(p))
198 break; 198 break;
199 err = m->op->show(m, p); 199 err = m->op->show(m, p);
200 if (err < 0) 200 if (err < 0)
201 break; 201 break;
202 if (unlikely(err)) 202 if (unlikely(err))
203 m->count = 0; 203 m->count = 0;
204 if (unlikely(!m->count)) { 204 if (unlikely(!m->count)) {
205 p = m->op->next(m, p, &pos); 205 p = m->op->next(m, p, &pos);
206 m->index = pos; 206 m->index = pos;
207 continue; 207 continue;
208 } 208 }
209 if (m->count < m->size) 209 if (m->count < m->size)
210 goto Fill; 210 goto Fill;
211 m->op->stop(m, p); 211 m->op->stop(m, p);
212 kfree(m->buf); 212 kfree(m->buf);
213 m->buf = kmalloc(m->size <<= 1, GFP_KERNEL); 213 m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
214 if (!m->buf) 214 if (!m->buf)
215 goto Enomem; 215 goto Enomem;
216 m->count = 0; 216 m->count = 0;
217 m->version = 0; 217 m->version = 0;
218 pos = m->index; 218 pos = m->index;
219 p = m->op->start(m, &pos); 219 p = m->op->start(m, &pos);
220 } 220 }
221 m->op->stop(m, p); 221 m->op->stop(m, p);
222 m->count = 0; 222 m->count = 0;
223 goto Done; 223 goto Done;
224 Fill: 224 Fill:
225 /* they want more? let's try to get some more */ 225 /* they want more? let's try to get some more */
226 while (m->count < size) { 226 while (m->count < size) {
227 size_t offs = m->count; 227 size_t offs = m->count;
228 loff_t next = pos; 228 loff_t next = pos;
229 p = m->op->next(m, p, &next); 229 p = m->op->next(m, p, &next);
230 if (!p || IS_ERR(p)) { 230 if (!p || IS_ERR(p)) {
231 err = PTR_ERR(p); 231 err = PTR_ERR(p);
232 break; 232 break;
233 } 233 }
234 err = m->op->show(m, p); 234 err = m->op->show(m, p);
235 if (m->count == m->size || err) { 235 if (m->count == m->size || err) {
236 m->count = offs; 236 m->count = offs;
237 if (likely(err <= 0)) 237 if (likely(err <= 0))
238 break; 238 break;
239 } 239 }
240 pos = next; 240 pos = next;
241 } 241 }
242 m->op->stop(m, p); 242 m->op->stop(m, p);
243 n = min(m->count, size); 243 n = min(m->count, size);
244 err = copy_to_user(buf, m->buf, n); 244 err = copy_to_user(buf, m->buf, n);
245 if (err) 245 if (err)
246 goto Efault; 246 goto Efault;
247 copied += n; 247 copied += n;
248 m->count -= n; 248 m->count -= n;
249 if (m->count) 249 if (m->count)
250 m->from = n; 250 m->from = n;
251 else 251 else
252 pos++; 252 pos++;
253 m->index = pos; 253 m->index = pos;
254 Done: 254 Done:
255 if (!copied) 255 if (!copied)
256 copied = err; 256 copied = err;
257 else { 257 else {
258 *ppos += copied; 258 *ppos += copied;
259 m->read_pos += copied; 259 m->read_pos += copied;
260 } 260 }
261 file->f_version = m->version; 261 file->f_version = m->version;
262 mutex_unlock(&m->lock); 262 mutex_unlock(&m->lock);
263 return copied; 263 return copied;
264 Enomem: 264 Enomem:
265 err = -ENOMEM; 265 err = -ENOMEM;
266 goto Done; 266 goto Done;
267 Efault: 267 Efault:
268 err = -EFAULT; 268 err = -EFAULT;
269 goto Done; 269 goto Done;
270 } 270 }
271 EXPORT_SYMBOL(seq_read); 271 EXPORT_SYMBOL(seq_read);
272 272
273 /** 273 /**
274 * seq_lseek - ->llseek() method for sequential files. 274 * seq_lseek - ->llseek() method for sequential files.
275 * @file: the file in question 275 * @file: the file in question
276 * @offset: new position 276 * @offset: new position
277 * @origin: 0 for absolute, 1 for relative position 277 * @origin: 0 for absolute, 1 for relative position
278 * 278 *
279 * Ready-made ->f_op->llseek() 279 * Ready-made ->f_op->llseek()
280 */ 280 */
281 loff_t seq_lseek(struct file *file, loff_t offset, int origin) 281 loff_t seq_lseek(struct file *file, loff_t offset, int origin)
282 { 282 {
283 struct seq_file *m = (struct seq_file *)file->private_data; 283 struct seq_file *m = (struct seq_file *)file->private_data;
284 loff_t retval = -EINVAL; 284 loff_t retval = -EINVAL;
285 285
286 mutex_lock(&m->lock); 286 mutex_lock(&m->lock);
287 m->version = file->f_version; 287 m->version = file->f_version;
288 switch (origin) { 288 switch (origin) {
289 case 1: 289 case 1:
290 offset += file->f_pos; 290 offset += file->f_pos;
291 case 0: 291 case 0:
292 if (offset < 0) 292 if (offset < 0)
293 break; 293 break;
294 retval = offset; 294 retval = offset;
295 if (offset != m->read_pos) { 295 if (offset != m->read_pos) {
296 while ((retval=traverse(m, offset)) == -EAGAIN) 296 while ((retval=traverse(m, offset)) == -EAGAIN)
297 ; 297 ;
298 if (retval) { 298 if (retval) {
299 /* with extreme prejudice... */ 299 /* with extreme prejudice... */
300 file->f_pos = 0; 300 file->f_pos = 0;
301 m->read_pos = 0; 301 m->read_pos = 0;
302 m->version = 0; 302 m->version = 0;
303 m->index = 0; 303 m->index = 0;
304 m->count = 0; 304 m->count = 0;
305 } else { 305 } else {
306 m->read_pos = offset; 306 m->read_pos = offset;
307 retval = file->f_pos = offset; 307 retval = file->f_pos = offset;
308 } 308 }
309 } 309 }
310 } 310 }
311 file->f_version = m->version; 311 file->f_version = m->version;
312 mutex_unlock(&m->lock); 312 mutex_unlock(&m->lock);
313 return retval; 313 return retval;
314 } 314 }
315 EXPORT_SYMBOL(seq_lseek); 315 EXPORT_SYMBOL(seq_lseek);
316 316
317 /** 317 /**
318 * seq_release - free the structures associated with sequential file. 318 * seq_release - free the structures associated with sequential file.
319 * @file: file in question 319 * @file: file in question
320 * @inode: file->f_path.dentry->d_inode 320 * @inode: file->f_path.dentry->d_inode
321 * 321 *
322 * Frees the structures associated with sequential file; can be used 322 * Frees the structures associated with sequential file; can be used
323 * as ->f_op->release() if you don't have private data to destroy. 323 * as ->f_op->release() if you don't have private data to destroy.
324 */ 324 */
325 int seq_release(struct inode *inode, struct file *file) 325 int seq_release(struct inode *inode, struct file *file)
326 { 326 {
327 struct seq_file *m = (struct seq_file *)file->private_data; 327 struct seq_file *m = (struct seq_file *)file->private_data;
328 kfree(m->buf); 328 kfree(m->buf);
329 kfree(m); 329 kfree(m);
330 return 0; 330 return 0;
331 } 331 }
332 EXPORT_SYMBOL(seq_release); 332 EXPORT_SYMBOL(seq_release);
333 333
334 /** 334 /**
335 * seq_escape - print string into buffer, escaping some characters 335 * seq_escape - print string into buffer, escaping some characters
336 * @m: target buffer 336 * @m: target buffer
337 * @s: string 337 * @s: string
338 * @esc: set of characters that need escaping 338 * @esc: set of characters that need escaping
339 * 339 *
340 * Puts string into buffer, replacing each occurrence of character from 340 * Puts string into buffer, replacing each occurrence of character from
341 * @esc with usual octal escape. Returns 0 in case of success, -1 - in 341 * @esc with usual octal escape. Returns 0 in case of success, -1 - in
342 * case of overflow. 342 * case of overflow.
343 */ 343 */
344 int seq_escape(struct seq_file *m, const char *s, const char *esc) 344 int seq_escape(struct seq_file *m, const char *s, const char *esc)
345 { 345 {
346 char *end = m->buf + m->size; 346 char *end = m->buf + m->size;
347 char *p; 347 char *p;
348 char c; 348 char c;
349 349
350 for (p = m->buf + m->count; (c = *s) != '\0' && p < end; s++) { 350 for (p = m->buf + m->count; (c = *s) != '\0' && p < end; s++) {
351 if (!strchr(esc, c)) { 351 if (!strchr(esc, c)) {
352 *p++ = c; 352 *p++ = c;
353 continue; 353 continue;
354 } 354 }
355 if (p + 3 < end) { 355 if (p + 3 < end) {
356 *p++ = '\\'; 356 *p++ = '\\';
357 *p++ = '0' + ((c & 0300) >> 6); 357 *p++ = '0' + ((c & 0300) >> 6);
358 *p++ = '0' + ((c & 070) >> 3); 358 *p++ = '0' + ((c & 070) >> 3);
359 *p++ = '0' + (c & 07); 359 *p++ = '0' + (c & 07);
360 continue; 360 continue;
361 } 361 }
362 m->count = m->size; 362 m->count = m->size;
363 return -1; 363 return -1;
364 } 364 }
365 m->count = p - m->buf; 365 m->count = p - m->buf;
366 return 0; 366 return 0;
367 } 367 }
368 EXPORT_SYMBOL(seq_escape); 368 EXPORT_SYMBOL(seq_escape);
369 369
370 int seq_printf(struct seq_file *m, const char *f, ...) 370 int seq_printf(struct seq_file *m, const char *f, ...)
371 { 371 {
372 va_list args; 372 va_list args;
373 int len; 373 int len;
374 374
375 if (m->count < m->size) { 375 if (m->count < m->size) {
376 va_start(args, f); 376 va_start(args, f);
377 len = vsnprintf(m->buf + m->count, m->size - m->count, f, args); 377 len = vsnprintf(m->buf + m->count, m->size - m->count, f, args);
378 va_end(args); 378 va_end(args);
379 if (m->count + len < m->size) { 379 if (m->count + len < m->size) {
380 m->count += len; 380 m->count += len;
381 return 0; 381 return 0;
382 } 382 }
383 } 383 }
384 m->count = m->size; 384 m->count = m->size;
385 return -1; 385 return -1;
386 } 386 }
387 EXPORT_SYMBOL(seq_printf); 387 EXPORT_SYMBOL(seq_printf);
388 388
389 /** 389 /**
390 * mangle_path - mangle and copy path to buffer beginning 390 * mangle_path - mangle and copy path to buffer beginning
391 * @s: buffer start 391 * @s: buffer start
392 * @p: beginning of path in above buffer 392 * @p: beginning of path in above buffer
393 * @esc: set of characters that need escaping 393 * @esc: set of characters that need escaping
394 * 394 *
395 * Copy the path from @p to @s, replacing each occurrence of character from 395 * Copy the path from @p to @s, replacing each occurrence of character from
396 * @esc with usual octal escape. 396 * @esc with usual octal escape.
397 * Returns pointer past last written character in @s, or NULL in case of 397 * Returns pointer past last written character in @s, or NULL in case of
398 * failure. 398 * failure.
399 */ 399 */
400 char *mangle_path(char *s, char *p, char *esc) 400 char *mangle_path(char *s, char *p, char *esc)
401 { 401 {
402 while (s <= p) { 402 while (s <= p) {
403 char c = *p++; 403 char c = *p++;
404 if (!c) { 404 if (!c) {
405 return s; 405 return s;
406 } else if (!strchr(esc, c)) { 406 } else if (!strchr(esc, c)) {
407 *s++ = c; 407 *s++ = c;
408 } else if (s + 4 > p) { 408 } else if (s + 4 > p) {
409 break; 409 break;
410 } else { 410 } else {
411 *s++ = '\\'; 411 *s++ = '\\';
412 *s++ = '0' + ((c & 0300) >> 6); 412 *s++ = '0' + ((c & 0300) >> 6);
413 *s++ = '0' + ((c & 070) >> 3); 413 *s++ = '0' + ((c & 070) >> 3);
414 *s++ = '0' + (c & 07); 414 *s++ = '0' + (c & 07);
415 } 415 }
416 } 416 }
417 return NULL; 417 return NULL;
418 } 418 }
419 EXPORT_SYMBOL(mangle_path); 419 EXPORT_SYMBOL(mangle_path);
420 420
421 /** 421 /**
422 * seq_path - seq_file interface to print a pathname 422 * seq_path - seq_file interface to print a pathname
423 * @m: the seq_file handle 423 * @m: the seq_file handle
424 * @path: the struct path to print 424 * @path: the struct path to print
425 * @esc: set of characters to escape in the output 425 * @esc: set of characters to escape in the output
426 * 426 *
427 * return the absolute path of 'path', as represented by the 427 * return the absolute path of 'path', as represented by the
428 * dentry / mnt pair in the path parameter. 428 * dentry / mnt pair in the path parameter.
429 */ 429 */
430 int seq_path(struct seq_file *m, struct path *path, char *esc) 430 int seq_path(struct seq_file *m, struct path *path, char *esc)
431 { 431 {
432 char *buf; 432 char *buf;
433 size_t size = seq_get_buf(m, &buf); 433 size_t size = seq_get_buf(m, &buf);
434 int res = -1; 434 int res = -1;
435 435
436 if (size) { 436 if (size) {
437 char *p = d_path(path, buf, size); 437 char *p = d_path(path, buf, size);
438 if (!IS_ERR(p)) { 438 if (!IS_ERR(p)) {
439 char *end = mangle_path(buf, p, esc); 439 char *end = mangle_path(buf, p, esc);
440 if (end) 440 if (end)
441 res = end - buf; 441 res = end - buf;
442 } 442 }
443 } 443 }
444 seq_commit(m, res); 444 seq_commit(m, res);
445 445
446 return res; 446 return res;
447 } 447 }
448 EXPORT_SYMBOL(seq_path); 448 EXPORT_SYMBOL(seq_path);
449 449
450 /* 450 /*
451 * Same as seq_path, but relative to supplied root. 451 * Same as seq_path, but relative to supplied root.
452 * 452 *
453 * root may be changed, see __d_path(). 453 * root may be changed, see __d_path().
454 */ 454 */
455 int seq_path_root(struct seq_file *m, struct path *path, struct path *root, 455 int seq_path_root(struct seq_file *m, struct path *path, struct path *root,
456 char *esc) 456 char *esc)
457 { 457 {
458 char *buf; 458 char *buf;
459 size_t size = seq_get_buf(m, &buf); 459 size_t size = seq_get_buf(m, &buf);
460 int res = -ENAMETOOLONG; 460 int res = -ENAMETOOLONG;
461 461
462 if (size) { 462 if (size) {
463 char *p; 463 char *p;
464 464
465 spin_lock(&dcache_lock); 465 spin_lock(&dcache_lock);
466 p = __d_path(path, root, buf, size); 466 p = __d_path(path, root, buf, size);
467 spin_unlock(&dcache_lock); 467 spin_unlock(&dcache_lock);
468 res = PTR_ERR(p); 468 res = PTR_ERR(p);
469 if (!IS_ERR(p)) { 469 if (!IS_ERR(p)) {
470 char *end = mangle_path(buf, p, esc); 470 char *end = mangle_path(buf, p, esc);
471 if (end) 471 if (end)
472 res = end - buf; 472 res = end - buf;
473 else 473 else
474 res = -ENAMETOOLONG; 474 res = -ENAMETOOLONG;
475 } 475 }
476 } 476 }
477 seq_commit(m, res); 477 seq_commit(m, res);
478 478
479 return res < 0 ? res : 0; 479 return res < 0 ? res : 0;
480 } 480 }
481 481
482 /* 482 /*
483 * returns the path of the 'dentry' from the root of its filesystem. 483 * returns the path of the 'dentry' from the root of its filesystem.
484 */ 484 */
485 int seq_dentry(struct seq_file *m, struct dentry *dentry, char *esc) 485 int seq_dentry(struct seq_file *m, struct dentry *dentry, char *esc)
486 { 486 {
487 char *buf; 487 char *buf;
488 size_t size = seq_get_buf(m, &buf); 488 size_t size = seq_get_buf(m, &buf);
489 int res = -1; 489 int res = -1;
490 490
491 if (size) { 491 if (size) {
492 char *p = dentry_path(dentry, buf, size); 492 char *p = dentry_path(dentry, buf, size);
493 if (!IS_ERR(p)) { 493 if (!IS_ERR(p)) {
494 char *end = mangle_path(buf, p, esc); 494 char *end = mangle_path(buf, p, esc);
495 if (end) 495 if (end)
496 res = end - buf; 496 res = end - buf;
497 } 497 }
498 } 498 }
499 seq_commit(m, res); 499 seq_commit(m, res);
500 500
501 return res; 501 return res;
502 } 502 }
503 503
504 int seq_bitmap(struct seq_file *m, const unsigned long *bits, 504 int seq_bitmap(struct seq_file *m, const unsigned long *bits,
505 unsigned int nr_bits) 505 unsigned int nr_bits)
506 { 506 {
507 if (m->count < m->size) { 507 if (m->count < m->size) {
508 int len = bitmap_scnprintf(m->buf + m->count, 508 int len = bitmap_scnprintf(m->buf + m->count,
509 m->size - m->count, bits, nr_bits); 509 m->size - m->count, bits, nr_bits);
510 if (m->count + len < m->size) { 510 if (m->count + len < m->size) {
511 m->count += len; 511 m->count += len;
512 return 0; 512 return 0;
513 } 513 }
514 } 514 }
515 m->count = m->size; 515 m->count = m->size;
516 return -1; 516 return -1;
517 } 517 }
518 EXPORT_SYMBOL(seq_bitmap); 518 EXPORT_SYMBOL(seq_bitmap);
519 519
520 int seq_bitmap_list(struct seq_file *m, const unsigned long *bits, 520 int seq_bitmap_list(struct seq_file *m, const unsigned long *bits,
521 unsigned int nr_bits) 521 unsigned int nr_bits)
522 { 522 {
523 if (m->count < m->size) { 523 if (m->count < m->size) {
524 int len = bitmap_scnlistprintf(m->buf + m->count, 524 int len = bitmap_scnlistprintf(m->buf + m->count,
525 m->size - m->count, bits, nr_bits); 525 m->size - m->count, bits, nr_bits);
526 if (m->count + len < m->size) { 526 if (m->count + len < m->size) {
527 m->count += len; 527 m->count += len;
528 return 0; 528 return 0;
529 } 529 }
530 } 530 }
531 m->count = m->size; 531 m->count = m->size;
532 return -1; 532 return -1;
533 } 533 }
534 EXPORT_SYMBOL(seq_bitmap_list); 534 EXPORT_SYMBOL(seq_bitmap_list);
535 535
536 static void *single_start(struct seq_file *p, loff_t *pos) 536 static void *single_start(struct seq_file *p, loff_t *pos)
537 { 537 {
538 return NULL + (*pos == 0); 538 return NULL + (*pos == 0);
539 } 539 }
540 540
541 static void *single_next(struct seq_file *p, void *v, loff_t *pos) 541 static void *single_next(struct seq_file *p, void *v, loff_t *pos)
542 { 542 {
543 ++*pos; 543 ++*pos;
544 return NULL; 544 return NULL;
545 } 545 }
546 546
547 static void single_stop(struct seq_file *p, void *v) 547 static void single_stop(struct seq_file *p, void *v)
548 { 548 {
549 } 549 }
550 550
551 int single_open(struct file *file, int (*show)(struct seq_file *, void *), 551 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
552 void *data) 552 void *data)
553 { 553 {
554 struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL); 554 struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
555 int res = -ENOMEM; 555 int res = -ENOMEM;
556 556
557 if (op) { 557 if (op) {
558 op->start = single_start; 558 op->start = single_start;
559 op->next = single_next; 559 op->next = single_next;
560 op->stop = single_stop; 560 op->stop = single_stop;
561 op->show = show; 561 op->show = show;
562 res = seq_open(file, op); 562 res = seq_open(file, op);
563 if (!res) 563 if (!res)
564 ((struct seq_file *)file->private_data)->private = data; 564 ((struct seq_file *)file->private_data)->private = data;
565 else 565 else
566 kfree(op); 566 kfree(op);
567 } 567 }
568 return res; 568 return res;
569 } 569 }
570 EXPORT_SYMBOL(single_open); 570 EXPORT_SYMBOL(single_open);
571 571
572 int single_release(struct inode *inode, struct file *file) 572 int single_release(struct inode *inode, struct file *file)
573 { 573 {
574 const struct seq_operations *op = ((struct seq_file *)file->private_data)->op; 574 const struct seq_operations *op = ((struct seq_file *)file->private_data)->op;
575 int res = seq_release(inode, file); 575 int res = seq_release(inode, file);
576 kfree(op); 576 kfree(op);
577 return res; 577 return res;
578 } 578 }
579 EXPORT_SYMBOL(single_release); 579 EXPORT_SYMBOL(single_release);
580 580
581 int seq_release_private(struct inode *inode, struct file *file) 581 int seq_release_private(struct inode *inode, struct file *file)
582 { 582 {
583 struct seq_file *seq = file->private_data; 583 struct seq_file *seq = file->private_data;
584 584
585 kfree(seq->private); 585 kfree(seq->private);
586 seq->private = NULL; 586 seq->private = NULL;
587 return seq_release(inode, file); 587 return seq_release(inode, file);
588 } 588 }
589 EXPORT_SYMBOL(seq_release_private); 589 EXPORT_SYMBOL(seq_release_private);
590 590
591 void *__seq_open_private(struct file *f, const struct seq_operations *ops, 591 void *__seq_open_private(struct file *f, const struct seq_operations *ops,
592 int psize) 592 int psize)
593 { 593 {
594 int rc; 594 int rc;
595 void *private; 595 void *private;
596 struct seq_file *seq; 596 struct seq_file *seq;
597 597
598 private = kzalloc(psize, GFP_KERNEL); 598 private = kzalloc(psize, GFP_KERNEL);
599 if (private == NULL) 599 if (private == NULL)
600 goto out; 600 goto out;
601 601
602 rc = seq_open(f, ops); 602 rc = seq_open(f, ops);
603 if (rc < 0) 603 if (rc < 0)
604 goto out_free; 604 goto out_free;
605 605
606 seq = f->private_data; 606 seq = f->private_data;
607 seq->private = private; 607 seq->private = private;
608 return private; 608 return private;
609 609
610 out_free: 610 out_free:
611 kfree(private); 611 kfree(private);
612 out: 612 out:
613 return NULL; 613 return NULL;
614 } 614 }
615 EXPORT_SYMBOL(__seq_open_private); 615 EXPORT_SYMBOL(__seq_open_private);
616 616
617 int seq_open_private(struct file *filp, const struct seq_operations *ops, 617 int seq_open_private(struct file *filp, const struct seq_operations *ops,
618 int psize) 618 int psize)
619 { 619 {
620 return __seq_open_private(filp, ops, psize) ? 0 : -ENOMEM; 620 return __seq_open_private(filp, ops, psize) ? 0 : -ENOMEM;
621 } 621 }
622 EXPORT_SYMBOL(seq_open_private); 622 EXPORT_SYMBOL(seq_open_private);
623 623
624 int seq_putc(struct seq_file *m, char c) 624 int seq_putc(struct seq_file *m, char c)
625 { 625 {
626 if (m->count < m->size) { 626 if (m->count < m->size) {
627 m->buf[m->count++] = c; 627 m->buf[m->count++] = c;
628 return 0; 628 return 0;
629 } 629 }
630 return -1; 630 return -1;
631 } 631 }
632 EXPORT_SYMBOL(seq_putc); 632 EXPORT_SYMBOL(seq_putc);
633 633
634 int seq_puts(struct seq_file *m, const char *s) 634 int seq_puts(struct seq_file *m, const char *s)
635 { 635 {
636 int len = strlen(s); 636 int len = strlen(s);
637 if (m->count + len < m->size) { 637 if (m->count + len < m->size) {
638 memcpy(m->buf + m->count, s, len); 638 memcpy(m->buf + m->count, s, len);
639 m->count += len; 639 m->count += len;
640 return 0; 640 return 0;
641 } 641 }
642 m->count = m->size; 642 m->count = m->size;
643 return -1; 643 return -1;
644 } 644 }
645 EXPORT_SYMBOL(seq_puts); 645 EXPORT_SYMBOL(seq_puts);
646 646
647 /** 647 /**
648 * seq_write - write arbitrary data to buffer 648 * seq_write - write arbitrary data to buffer
649 * @seq: seq_file identifying the buffer to which data should be written 649 * @seq: seq_file identifying the buffer to which data should be written
650 * @data: data address 650 * @data: data address
651 * @len: number of bytes 651 * @len: number of bytes
652 * 652 *
653 * Return 0 on success, non-zero otherwise. 653 * Return 0 on success, non-zero otherwise.
654 */ 654 */
655 int seq_write(struct seq_file *seq, const void *data, size_t len) 655 int seq_write(struct seq_file *seq, const void *data, size_t len)
656 { 656 {
657 if (seq->count + len < seq->size) { 657 if (seq->count + len < seq->size) {
658 memcpy(seq->buf + seq->count, data, len); 658 memcpy(seq->buf + seq->count, data, len);
659 seq->count += len; 659 seq->count += len;
660 return 0; 660 return 0;
661 } 661 }
662 seq->count = seq->size; 662 seq->count = seq->size;
663 return -1; 663 return -1;
664 } 664 }
665 EXPORT_SYMBOL(seq_write); 665 EXPORT_SYMBOL(seq_write);
666 666
667 struct list_head *seq_list_start(struct list_head *head, loff_t pos) 667 struct list_head *seq_list_start(struct list_head *head, loff_t pos)
668 { 668 {
669 struct list_head *lh; 669 struct list_head *lh;
670 670
671 list_for_each(lh, head) 671 list_for_each(lh, head)
672 if (pos-- == 0) 672 if (pos-- == 0)
673 return lh; 673 return lh;
674 674
675 return NULL; 675 return NULL;
676 } 676 }
677 EXPORT_SYMBOL(seq_list_start); 677 EXPORT_SYMBOL(seq_list_start);
678 678
679 struct list_head *seq_list_start_head(struct list_head *head, loff_t pos) 679 struct list_head *seq_list_start_head(struct list_head *head, loff_t pos)
680 { 680 {
681 if (!pos) 681 if (!pos)
682 return head; 682 return head;
683 683
684 return seq_list_start(head, pos - 1); 684 return seq_list_start(head, pos - 1);
685 } 685 }
686 EXPORT_SYMBOL(seq_list_start_head); 686 EXPORT_SYMBOL(seq_list_start_head);
687 687
688 struct list_head *seq_list_next(void *v, struct list_head *head, loff_t *ppos) 688 struct list_head *seq_list_next(void *v, struct list_head *head, loff_t *ppos)
689 { 689 {
690 struct list_head *lh; 690 struct list_head *lh;
691 691
692 lh = ((struct list_head *)v)->next; 692 lh = ((struct list_head *)v)->next;
693 ++*ppos; 693 ++*ppos;
694 return lh == head ? NULL : lh; 694 return lh == head ? NULL : lh;
695 } 695 }
696 EXPORT_SYMBOL(seq_list_next); 696 EXPORT_SYMBOL(seq_list_next);
697 697
698 /** 698 /**
699 * seq_hlist_start - start an iteration of a hlist 699 * seq_hlist_start - start an iteration of a hlist
700 * @head: the head of the hlist 700 * @head: the head of the hlist
701 * @pos: the start position of the sequence 701 * @pos: the start position of the sequence
702 * 702 *
703 * Called at seq_file->op->start(). 703 * Called at seq_file->op->start().
704 */ 704 */
705 struct hlist_node *seq_hlist_start(struct hlist_head *head, loff_t pos) 705 struct hlist_node *seq_hlist_start(struct hlist_head *head, loff_t pos)
706 { 706 {
707 struct hlist_node *node; 707 struct hlist_node *node;
708 708
709 hlist_for_each(node, head) 709 hlist_for_each(node, head)
710 if (pos-- == 0) 710 if (pos-- == 0)
711 return node; 711 return node;
712 return NULL; 712 return NULL;
713 } 713 }
714 EXPORT_SYMBOL(seq_hlist_start); 714 EXPORT_SYMBOL(seq_hlist_start);
715 715
716 /** 716 /**
717 * seq_hlist_start_head - start an iteration of a hlist 717 * seq_hlist_start_head - start an iteration of a hlist
718 * @head: the head of the hlist 718 * @head: the head of the hlist
719 * @pos: the start position of the sequence 719 * @pos: the start position of the sequence
720 * 720 *
721 * Called at seq_file->op->start(). Call this function if you want to 721 * Called at seq_file->op->start(). Call this function if you want to
722 * print a header at the top of the output. 722 * print a header at the top of the output.
723 */ 723 */
724 struct hlist_node *seq_hlist_start_head(struct hlist_head *head, loff_t pos) 724 struct hlist_node *seq_hlist_start_head(struct hlist_head *head, loff_t pos)
725 { 725 {
726 if (!pos) 726 if (!pos)
727 return SEQ_START_TOKEN; 727 return SEQ_START_TOKEN;
728 728
729 return seq_hlist_start(head, pos - 1); 729 return seq_hlist_start(head, pos - 1);
730 } 730 }
731 EXPORT_SYMBOL(seq_hlist_start_head); 731 EXPORT_SYMBOL(seq_hlist_start_head);
732 732
733 /** 733 /**
734 * seq_hlist_next - move to the next position of the hlist 734 * seq_hlist_next - move to the next position of the hlist
735 * @v: the current iterator 735 * @v: the current iterator
736 * @head: the head of the hlist 736 * @head: the head of the hlist
737 * @pos: the current posision 737 * @pos: the current posision
738 * 738 *
739 * Called at seq_file->op->next(). 739 * Called at seq_file->op->next().
740 */ 740 */
741 struct hlist_node *seq_hlist_next(void *v, struct hlist_head *head, 741 struct hlist_node *seq_hlist_next(void *v, struct hlist_head *head,
742 loff_t *ppos) 742 loff_t *ppos)
743 { 743 {
744 struct hlist_node *node = v; 744 struct hlist_node *node = v;
745 745
746 ++*ppos; 746 ++*ppos;
747 if (v == SEQ_START_TOKEN) 747 if (v == SEQ_START_TOKEN)
748 return head->first; 748 return head->first;
749 else 749 else
750 return node->next; 750 return node->next;
751 } 751 }
752 EXPORT_SYMBOL(seq_hlist_next); 752 EXPORT_SYMBOL(seq_hlist_next);
753
754 /**
755 * seq_hlist_start_rcu - start an iteration of a hlist protected by RCU
756 * @head: the head of the hlist
757 * @pos: the start position of the sequence
758 *
759 * Called at seq_file->op->start().
760 *
761 * This list-traversal primitive may safely run concurrently with
762 * the _rcu list-mutation primitives such as hlist_add_head_rcu()
763 * as long as the traversal is guarded by rcu_read_lock().
764 */
765 struct hlist_node *seq_hlist_start_rcu(struct hlist_head *head,
766 loff_t pos)
767 {
768 struct hlist_node *node;
769
770 __hlist_for_each_rcu(node, head)
771 if (pos-- == 0)
772 return node;
773 return NULL;
774 }
775 EXPORT_SYMBOL(seq_hlist_start_rcu);
776
777 /**
778 * seq_hlist_start_head_rcu - start an iteration of a hlist protected by RCU
779 * @head: the head of the hlist
780 * @pos: the start position of the sequence
781 *
782 * Called at seq_file->op->start(). Call this function if you want to
783 * print a header at the top of the output.
784 *
785 * This list-traversal primitive may safely run concurrently with
786 * the _rcu list-mutation primitives such as hlist_add_head_rcu()
787 * as long as the traversal is guarded by rcu_read_lock().
788 */
789 struct hlist_node *seq_hlist_start_head_rcu(struct hlist_head *head,
790 loff_t pos)
791 {
792 if (!pos)
793 return SEQ_START_TOKEN;
794
795 return seq_hlist_start_rcu(head, pos - 1);
796 }
797 EXPORT_SYMBOL(seq_hlist_start_head_rcu);
798
799 /**
800 * seq_hlist_next_rcu - move to the next position of the hlist protected by RCU
801 * @v: the current iterator
802 * @head: the head of the hlist
803 * @pos: the current posision
804 *
805 * Called at seq_file->op->next().
806 *
807 * This list-traversal primitive may safely run concurrently with
808 * the _rcu list-mutation primitives such as hlist_add_head_rcu()
809 * as long as the traversal is guarded by rcu_read_lock().
810 */
811 struct hlist_node *seq_hlist_next_rcu(void *v,
812 struct hlist_head *head,
813 loff_t *ppos)
814 {
815 struct hlist_node *node = v;
816
817 ++*ppos;
818 if (v == SEQ_START_TOKEN)
819 return rcu_dereference(head->first);
820 else
821 return rcu_dereference(node->next);
822 }
823 EXPORT_SYMBOL(seq_hlist_next_rcu);
753 824
include/linux/rculist.h
1 #ifndef _LINUX_RCULIST_H 1 #ifndef _LINUX_RCULIST_H
2 #define _LINUX_RCULIST_H 2 #define _LINUX_RCULIST_H
3 3
4 #ifdef __KERNEL__ 4 #ifdef __KERNEL__
5 5
6 /* 6 /*
7 * RCU-protected list version 7 * RCU-protected list version
8 */ 8 */
9 #include <linux/list.h> 9 #include <linux/list.h>
10 #include <linux/rcupdate.h> 10 #include <linux/rcupdate.h>
11 11
12 /* 12 /*
13 * Insert a new entry between two known consecutive entries. 13 * Insert a new entry between two known consecutive entries.
14 * 14 *
15 * This is only for internal list manipulation where we know 15 * This is only for internal list manipulation where we know
16 * the prev/next entries already! 16 * the prev/next entries already!
17 */ 17 */
18 static inline void __list_add_rcu(struct list_head *new, 18 static inline void __list_add_rcu(struct list_head *new,
19 struct list_head *prev, struct list_head *next) 19 struct list_head *prev, struct list_head *next)
20 { 20 {
21 new->next = next; 21 new->next = next;
22 new->prev = prev; 22 new->prev = prev;
23 rcu_assign_pointer(prev->next, new); 23 rcu_assign_pointer(prev->next, new);
24 next->prev = new; 24 next->prev = new;
25 } 25 }
26 26
27 /** 27 /**
28 * list_add_rcu - add a new entry to rcu-protected list 28 * list_add_rcu - add a new entry to rcu-protected list
29 * @new: new entry to be added 29 * @new: new entry to be added
30 * @head: list head to add it after 30 * @head: list head to add it after
31 * 31 *
32 * Insert a new entry after the specified head. 32 * Insert a new entry after the specified head.
33 * This is good for implementing stacks. 33 * This is good for implementing stacks.
34 * 34 *
35 * The caller must take whatever precautions are necessary 35 * The caller must take whatever precautions are necessary
36 * (such as holding appropriate locks) to avoid racing 36 * (such as holding appropriate locks) to avoid racing
37 * with another list-mutation primitive, such as list_add_rcu() 37 * with another list-mutation primitive, such as list_add_rcu()
38 * or list_del_rcu(), running on this same list. 38 * or list_del_rcu(), running on this same list.
39 * However, it is perfectly legal to run concurrently with 39 * However, it is perfectly legal to run concurrently with
40 * the _rcu list-traversal primitives, such as 40 * the _rcu list-traversal primitives, such as
41 * list_for_each_entry_rcu(). 41 * list_for_each_entry_rcu().
42 */ 42 */
43 static inline void list_add_rcu(struct list_head *new, struct list_head *head) 43 static inline void list_add_rcu(struct list_head *new, struct list_head *head)
44 { 44 {
45 __list_add_rcu(new, head, head->next); 45 __list_add_rcu(new, head, head->next);
46 } 46 }
47 47
48 /** 48 /**
49 * list_add_tail_rcu - add a new entry to rcu-protected list 49 * list_add_tail_rcu - add a new entry to rcu-protected list
50 * @new: new entry to be added 50 * @new: new entry to be added
51 * @head: list head to add it before 51 * @head: list head to add it before
52 * 52 *
53 * Insert a new entry before the specified head. 53 * Insert a new entry before the specified head.
54 * This is useful for implementing queues. 54 * This is useful for implementing queues.
55 * 55 *
56 * The caller must take whatever precautions are necessary 56 * The caller must take whatever precautions are necessary
57 * (such as holding appropriate locks) to avoid racing 57 * (such as holding appropriate locks) to avoid racing
58 * with another list-mutation primitive, such as list_add_tail_rcu() 58 * with another list-mutation primitive, such as list_add_tail_rcu()
59 * or list_del_rcu(), running on this same list. 59 * or list_del_rcu(), running on this same list.
60 * However, it is perfectly legal to run concurrently with 60 * However, it is perfectly legal to run concurrently with
61 * the _rcu list-traversal primitives, such as 61 * the _rcu list-traversal primitives, such as
62 * list_for_each_entry_rcu(). 62 * list_for_each_entry_rcu().
63 */ 63 */
64 static inline void list_add_tail_rcu(struct list_head *new, 64 static inline void list_add_tail_rcu(struct list_head *new,
65 struct list_head *head) 65 struct list_head *head)
66 { 66 {
67 __list_add_rcu(new, head->prev, head); 67 __list_add_rcu(new, head->prev, head);
68 } 68 }
69 69
70 /** 70 /**
71 * list_del_rcu - deletes entry from list without re-initialization 71 * list_del_rcu - deletes entry from list without re-initialization
72 * @entry: the element to delete from the list. 72 * @entry: the element to delete from the list.
73 * 73 *
74 * Note: list_empty() on entry does not return true after this, 74 * Note: list_empty() on entry does not return true after this,
75 * the entry is in an undefined state. It is useful for RCU based 75 * the entry is in an undefined state. It is useful for RCU based
76 * lockfree traversal. 76 * lockfree traversal.
77 * 77 *
78 * In particular, it means that we can not poison the forward 78 * In particular, it means that we can not poison the forward
79 * pointers that may still be used for walking the list. 79 * pointers that may still be used for walking the list.
80 * 80 *
81 * The caller must take whatever precautions are necessary 81 * The caller must take whatever precautions are necessary
82 * (such as holding appropriate locks) to avoid racing 82 * (such as holding appropriate locks) to avoid racing
83 * with another list-mutation primitive, such as list_del_rcu() 83 * with another list-mutation primitive, such as list_del_rcu()
84 * or list_add_rcu(), running on this same list. 84 * or list_add_rcu(), running on this same list.
85 * However, it is perfectly legal to run concurrently with 85 * However, it is perfectly legal to run concurrently with
86 * the _rcu list-traversal primitives, such as 86 * the _rcu list-traversal primitives, such as
87 * list_for_each_entry_rcu(). 87 * list_for_each_entry_rcu().
88 * 88 *
89 * Note that the caller is not permitted to immediately free 89 * Note that the caller is not permitted to immediately free
90 * the newly deleted entry. Instead, either synchronize_rcu() 90 * the newly deleted entry. Instead, either synchronize_rcu()
91 * or call_rcu() must be used to defer freeing until an RCU 91 * or call_rcu() must be used to defer freeing until an RCU
92 * grace period has elapsed. 92 * grace period has elapsed.
93 */ 93 */
94 static inline void list_del_rcu(struct list_head *entry) 94 static inline void list_del_rcu(struct list_head *entry)
95 { 95 {
96 __list_del(entry->prev, entry->next); 96 __list_del(entry->prev, entry->next);
97 entry->prev = LIST_POISON2; 97 entry->prev = LIST_POISON2;
98 } 98 }
99 99
100 /** 100 /**
101 * hlist_del_init_rcu - deletes entry from hash list with re-initialization 101 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
102 * @n: the element to delete from the hash list. 102 * @n: the element to delete from the hash list.
103 * 103 *
104 * Note: list_unhashed() on the node return true after this. It is 104 * Note: list_unhashed() on the node return true after this. It is
105 * useful for RCU based read lockfree traversal if the writer side 105 * useful for RCU based read lockfree traversal if the writer side
106 * must know if the list entry is still hashed or already unhashed. 106 * must know if the list entry is still hashed or already unhashed.
107 * 107 *
108 * In particular, it means that we can not poison the forward pointers 108 * In particular, it means that we can not poison the forward pointers
109 * that may still be used for walking the hash list and we can only 109 * that may still be used for walking the hash list and we can only
110 * zero the pprev pointer so list_unhashed() will return true after 110 * zero the pprev pointer so list_unhashed() will return true after
111 * this. 111 * this.
112 * 112 *
113 * The caller must take whatever precautions are necessary (such as 113 * The caller must take whatever precautions are necessary (such as
114 * holding appropriate locks) to avoid racing with another 114 * holding appropriate locks) to avoid racing with another
115 * list-mutation primitive, such as hlist_add_head_rcu() or 115 * list-mutation primitive, such as hlist_add_head_rcu() or
116 * hlist_del_rcu(), running on this same list. However, it is 116 * hlist_del_rcu(), running on this same list. However, it is
117 * perfectly legal to run concurrently with the _rcu list-traversal 117 * perfectly legal to run concurrently with the _rcu list-traversal
118 * primitives, such as hlist_for_each_entry_rcu(). 118 * primitives, such as hlist_for_each_entry_rcu().
119 */ 119 */
120 static inline void hlist_del_init_rcu(struct hlist_node *n) 120 static inline void hlist_del_init_rcu(struct hlist_node *n)
121 { 121 {
122 if (!hlist_unhashed(n)) { 122 if (!hlist_unhashed(n)) {
123 __hlist_del(n); 123 __hlist_del(n);
124 n->pprev = NULL; 124 n->pprev = NULL;
125 } 125 }
126 } 126 }
127 127
128 /** 128 /**
129 * list_replace_rcu - replace old entry by new one 129 * list_replace_rcu - replace old entry by new one
130 * @old : the element to be replaced 130 * @old : the element to be replaced
131 * @new : the new element to insert 131 * @new : the new element to insert
132 * 132 *
133 * The @old entry will be replaced with the @new entry atomically. 133 * The @old entry will be replaced with the @new entry atomically.
134 * Note: @old should not be empty. 134 * Note: @old should not be empty.
135 */ 135 */
136 static inline void list_replace_rcu(struct list_head *old, 136 static inline void list_replace_rcu(struct list_head *old,
137 struct list_head *new) 137 struct list_head *new)
138 { 138 {
139 new->next = old->next; 139 new->next = old->next;
140 new->prev = old->prev; 140 new->prev = old->prev;
141 rcu_assign_pointer(new->prev->next, new); 141 rcu_assign_pointer(new->prev->next, new);
142 new->next->prev = new; 142 new->next->prev = new;
143 old->prev = LIST_POISON2; 143 old->prev = LIST_POISON2;
144 } 144 }
145 145
146 /** 146 /**
147 * list_splice_init_rcu - splice an RCU-protected list into an existing list. 147 * list_splice_init_rcu - splice an RCU-protected list into an existing list.
148 * @list: the RCU-protected list to splice 148 * @list: the RCU-protected list to splice
149 * @head: the place in the list to splice the first list into 149 * @head: the place in the list to splice the first list into
150 * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ... 150 * @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
151 * 151 *
152 * @head can be RCU-read traversed concurrently with this function. 152 * @head can be RCU-read traversed concurrently with this function.
153 * 153 *
154 * Note that this function blocks. 154 * Note that this function blocks.
155 * 155 *
156 * Important note: the caller must take whatever action is necessary to 156 * Important note: the caller must take whatever action is necessary to
157 * prevent any other updates to @head. In principle, it is possible 157 * prevent any other updates to @head. In principle, it is possible
158 * to modify the list as soon as sync() begins execution. 158 * to modify the list as soon as sync() begins execution.
159 * If this sort of thing becomes necessary, an alternative version 159 * If this sort of thing becomes necessary, an alternative version
160 * based on call_rcu() could be created. But only if -really- 160 * based on call_rcu() could be created. But only if -really-
161 * needed -- there is no shortage of RCU API members. 161 * needed -- there is no shortage of RCU API members.
162 */ 162 */
163 static inline void list_splice_init_rcu(struct list_head *list, 163 static inline void list_splice_init_rcu(struct list_head *list,
164 struct list_head *head, 164 struct list_head *head,
165 void (*sync)(void)) 165 void (*sync)(void))
166 { 166 {
167 struct list_head *first = list->next; 167 struct list_head *first = list->next;
168 struct list_head *last = list->prev; 168 struct list_head *last = list->prev;
169 struct list_head *at = head->next; 169 struct list_head *at = head->next;
170 170
171 if (list_empty(head)) 171 if (list_empty(head))
172 return; 172 return;
173 173
174 /* "first" and "last" tracking list, so initialize it. */ 174 /* "first" and "last" tracking list, so initialize it. */
175 175
176 INIT_LIST_HEAD(list); 176 INIT_LIST_HEAD(list);
177 177
178 /* 178 /*
179 * At this point, the list body still points to the source list. 179 * At this point, the list body still points to the source list.
180 * Wait for any readers to finish using the list before splicing 180 * Wait for any readers to finish using the list before splicing
181 * the list body into the new list. Any new readers will see 181 * the list body into the new list. Any new readers will see
182 * an empty list. 182 * an empty list.
183 */ 183 */
184 184
185 sync(); 185 sync();
186 186
187 /* 187 /*
188 * Readers are finished with the source list, so perform splice. 188 * Readers are finished with the source list, so perform splice.
189 * The order is important if the new list is global and accessible 189 * The order is important if the new list is global and accessible
190 * to concurrent RCU readers. Note that RCU readers are not 190 * to concurrent RCU readers. Note that RCU readers are not
191 * permitted to traverse the prev pointers without excluding 191 * permitted to traverse the prev pointers without excluding
192 * this function. 192 * this function.
193 */ 193 */
194 194
195 last->next = at; 195 last->next = at;
196 rcu_assign_pointer(head->next, first); 196 rcu_assign_pointer(head->next, first);
197 first->prev = head; 197 first->prev = head;
198 at->prev = last; 198 at->prev = last;
199 } 199 }
200 200
201 /** 201 /**
202 * list_entry_rcu - get the struct for this entry 202 * list_entry_rcu - get the struct for this entry
203 * @ptr: the &struct list_head pointer. 203 * @ptr: the &struct list_head pointer.
204 * @type: the type of the struct this is embedded in. 204 * @type: the type of the struct this is embedded in.
205 * @member: the name of the list_struct within the struct. 205 * @member: the name of the list_struct within the struct.
206 * 206 *
207 * This primitive may safely run concurrently with the _rcu list-mutation 207 * This primitive may safely run concurrently with the _rcu list-mutation
208 * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). 208 * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
209 */ 209 */
210 #define list_entry_rcu(ptr, type, member) \ 210 #define list_entry_rcu(ptr, type, member) \
211 container_of(rcu_dereference(ptr), type, member) 211 container_of(rcu_dereference(ptr), type, member)
212 212
213 /** 213 /**
214 * list_first_entry_rcu - get the first element from a list 214 * list_first_entry_rcu - get the first element from a list
215 * @ptr: the list head to take the element from. 215 * @ptr: the list head to take the element from.
216 * @type: the type of the struct this is embedded in. 216 * @type: the type of the struct this is embedded in.
217 * @member: the name of the list_struct within the struct. 217 * @member: the name of the list_struct within the struct.
218 * 218 *
219 * Note, that list is expected to be not empty. 219 * Note, that list is expected to be not empty.
220 * 220 *
221 * This primitive may safely run concurrently with the _rcu list-mutation 221 * This primitive may safely run concurrently with the _rcu list-mutation
222 * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). 222 * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
223 */ 223 */
224 #define list_first_entry_rcu(ptr, type, member) \ 224 #define list_first_entry_rcu(ptr, type, member) \
225 list_entry_rcu((ptr)->next, type, member) 225 list_entry_rcu((ptr)->next, type, member)
226 226
227 #define __list_for_each_rcu(pos, head) \ 227 #define __list_for_each_rcu(pos, head) \
228 for (pos = rcu_dereference((head)->next); \ 228 for (pos = rcu_dereference((head)->next); \
229 pos != (head); \ 229 pos != (head); \
230 pos = rcu_dereference(pos->next)) 230 pos = rcu_dereference(pos->next))
231 231
232 /** 232 /**
233 * list_for_each_entry_rcu - iterate over rcu list of given type 233 * list_for_each_entry_rcu - iterate over rcu list of given type
234 * @pos: the type * to use as a loop cursor. 234 * @pos: the type * to use as a loop cursor.
235 * @head: the head for your list. 235 * @head: the head for your list.
236 * @member: the name of the list_struct within the struct. 236 * @member: the name of the list_struct within the struct.
237 * 237 *
238 * This list-traversal primitive may safely run concurrently with 238 * This list-traversal primitive may safely run concurrently with
239 * the _rcu list-mutation primitives such as list_add_rcu() 239 * the _rcu list-mutation primitives such as list_add_rcu()
240 * as long as the traversal is guarded by rcu_read_lock(). 240 * as long as the traversal is guarded by rcu_read_lock().
241 */ 241 */
242 #define list_for_each_entry_rcu(pos, head, member) \ 242 #define list_for_each_entry_rcu(pos, head, member) \
243 for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \ 243 for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \
244 prefetch(pos->member.next), &pos->member != (head); \ 244 prefetch(pos->member.next), &pos->member != (head); \
245 pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) 245 pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
246 246
247 247
248 /** 248 /**
249 * list_for_each_continue_rcu 249 * list_for_each_continue_rcu
250 * @pos: the &struct list_head to use as a loop cursor. 250 * @pos: the &struct list_head to use as a loop cursor.
251 * @head: the head for your list. 251 * @head: the head for your list.
252 * 252 *
253 * Iterate over an rcu-protected list, continuing after current point. 253 * Iterate over an rcu-protected list, continuing after current point.
254 * 254 *
255 * This list-traversal primitive may safely run concurrently with 255 * This list-traversal primitive may safely run concurrently with
256 * the _rcu list-mutation primitives such as list_add_rcu() 256 * the _rcu list-mutation primitives such as list_add_rcu()
257 * as long as the traversal is guarded by rcu_read_lock(). 257 * as long as the traversal is guarded by rcu_read_lock().
258 */ 258 */
259 #define list_for_each_continue_rcu(pos, head) \ 259 #define list_for_each_continue_rcu(pos, head) \
260 for ((pos) = rcu_dereference((pos)->next); \ 260 for ((pos) = rcu_dereference((pos)->next); \
261 prefetch((pos)->next), (pos) != (head); \ 261 prefetch((pos)->next), (pos) != (head); \
262 (pos) = rcu_dereference((pos)->next)) 262 (pos) = rcu_dereference((pos)->next))
263 263
264 /** 264 /**
265 * list_for_each_entry_continue_rcu - continue iteration over list of given type 265 * list_for_each_entry_continue_rcu - continue iteration over list of given type
266 * @pos: the type * to use as a loop cursor. 266 * @pos: the type * to use as a loop cursor.
267 * @head: the head for your list. 267 * @head: the head for your list.
268 * @member: the name of the list_struct within the struct. 268 * @member: the name of the list_struct within the struct.
269 * 269 *
270 * Continue to iterate over list of given type, continuing after 270 * Continue to iterate over list of given type, continuing after
271 * the current position. 271 * the current position.
272 */ 272 */
273 #define list_for_each_entry_continue_rcu(pos, head, member) \ 273 #define list_for_each_entry_continue_rcu(pos, head, member) \
274 for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \ 274 for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \
275 prefetch(pos->member.next), &pos->member != (head); \ 275 prefetch(pos->member.next), &pos->member != (head); \
276 pos = list_entry_rcu(pos->member.next, typeof(*pos), member)) 276 pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
277 277
278 /** 278 /**
279 * hlist_del_rcu - deletes entry from hash list without re-initialization 279 * hlist_del_rcu - deletes entry from hash list without re-initialization
280 * @n: the element to delete from the hash list. 280 * @n: the element to delete from the hash list.
281 * 281 *
282 * Note: list_unhashed() on entry does not return true after this, 282 * Note: list_unhashed() on entry does not return true after this,
283 * the entry is in an undefined state. It is useful for RCU based 283 * the entry is in an undefined state. It is useful for RCU based
284 * lockfree traversal. 284 * lockfree traversal.
285 * 285 *
286 * In particular, it means that we can not poison the forward 286 * In particular, it means that we can not poison the forward
287 * pointers that may still be used for walking the hash list. 287 * pointers that may still be used for walking the hash list.
288 * 288 *
289 * The caller must take whatever precautions are necessary 289 * The caller must take whatever precautions are necessary
290 * (such as holding appropriate locks) to avoid racing 290 * (such as holding appropriate locks) to avoid racing
291 * with another list-mutation primitive, such as hlist_add_head_rcu() 291 * with another list-mutation primitive, such as hlist_add_head_rcu()
292 * or hlist_del_rcu(), running on this same list. 292 * or hlist_del_rcu(), running on this same list.
293 * However, it is perfectly legal to run concurrently with 293 * However, it is perfectly legal to run concurrently with
294 * the _rcu list-traversal primitives, such as 294 * the _rcu list-traversal primitives, such as
295 * hlist_for_each_entry(). 295 * hlist_for_each_entry().
296 */ 296 */
297 static inline void hlist_del_rcu(struct hlist_node *n) 297 static inline void hlist_del_rcu(struct hlist_node *n)
298 { 298 {
299 __hlist_del(n); 299 __hlist_del(n);
300 n->pprev = LIST_POISON2; 300 n->pprev = LIST_POISON2;
301 } 301 }
302 302
303 /** 303 /**
304 * hlist_replace_rcu - replace old entry by new one 304 * hlist_replace_rcu - replace old entry by new one
305 * @old : the element to be replaced 305 * @old : the element to be replaced
306 * @new : the new element to insert 306 * @new : the new element to insert
307 * 307 *
308 * The @old entry will be replaced with the @new entry atomically. 308 * The @old entry will be replaced with the @new entry atomically.
309 */ 309 */
310 static inline void hlist_replace_rcu(struct hlist_node *old, 310 static inline void hlist_replace_rcu(struct hlist_node *old,
311 struct hlist_node *new) 311 struct hlist_node *new)
312 { 312 {
313 struct hlist_node *next = old->next; 313 struct hlist_node *next = old->next;
314 314
315 new->next = next; 315 new->next = next;
316 new->pprev = old->pprev; 316 new->pprev = old->pprev;
317 rcu_assign_pointer(*new->pprev, new); 317 rcu_assign_pointer(*new->pprev, new);
318 if (next) 318 if (next)
319 new->next->pprev = &new->next; 319 new->next->pprev = &new->next;
320 old->pprev = LIST_POISON2; 320 old->pprev = LIST_POISON2;
321 } 321 }
322 322
323 /** 323 /**
324 * hlist_add_head_rcu 324 * hlist_add_head_rcu
325 * @n: the element to add to the hash list. 325 * @n: the element to add to the hash list.
326 * @h: the list to add to. 326 * @h: the list to add to.
327 * 327 *
328 * Description: 328 * Description:
329 * Adds the specified element to the specified hlist, 329 * Adds the specified element to the specified hlist,
330 * while permitting racing traversals. 330 * while permitting racing traversals.
331 * 331 *
332 * The caller must take whatever precautions are necessary 332 * The caller must take whatever precautions are necessary
333 * (such as holding appropriate locks) to avoid racing 333 * (such as holding appropriate locks) to avoid racing
334 * with another list-mutation primitive, such as hlist_add_head_rcu() 334 * with another list-mutation primitive, such as hlist_add_head_rcu()
335 * or hlist_del_rcu(), running on this same list. 335 * or hlist_del_rcu(), running on this same list.
336 * However, it is perfectly legal to run concurrently with 336 * However, it is perfectly legal to run concurrently with
337 * the _rcu list-traversal primitives, such as 337 * the _rcu list-traversal primitives, such as
338 * hlist_for_each_entry_rcu(), used to prevent memory-consistency 338 * hlist_for_each_entry_rcu(), used to prevent memory-consistency
339 * problems on Alpha CPUs. Regardless of the type of CPU, the 339 * problems on Alpha CPUs. Regardless of the type of CPU, the
340 * list-traversal primitive must be guarded by rcu_read_lock(). 340 * list-traversal primitive must be guarded by rcu_read_lock().
341 */ 341 */
342 static inline void hlist_add_head_rcu(struct hlist_node *n, 342 static inline void hlist_add_head_rcu(struct hlist_node *n,
343 struct hlist_head *h) 343 struct hlist_head *h)
344 { 344 {
345 struct hlist_node *first = h->first; 345 struct hlist_node *first = h->first;
346 346
347 n->next = first; 347 n->next = first;
348 n->pprev = &h->first; 348 n->pprev = &h->first;
349 rcu_assign_pointer(h->first, n); 349 rcu_assign_pointer(h->first, n);
350 if (first) 350 if (first)
351 first->pprev = &n->next; 351 first->pprev = &n->next;
352 } 352 }
353 353
354 /** 354 /**
355 * hlist_add_before_rcu 355 * hlist_add_before_rcu
356 * @n: the new element to add to the hash list. 356 * @n: the new element to add to the hash list.
357 * @next: the existing element to add the new element before. 357 * @next: the existing element to add the new element before.
358 * 358 *
359 * Description: 359 * Description:
360 * Adds the specified element to the specified hlist 360 * Adds the specified element to the specified hlist
361 * before the specified node while permitting racing traversals. 361 * before the specified node while permitting racing traversals.
362 * 362 *
363 * The caller must take whatever precautions are necessary 363 * The caller must take whatever precautions are necessary
364 * (such as holding appropriate locks) to avoid racing 364 * (such as holding appropriate locks) to avoid racing
365 * with another list-mutation primitive, such as hlist_add_head_rcu() 365 * with another list-mutation primitive, such as hlist_add_head_rcu()
366 * or hlist_del_rcu(), running on this same list. 366 * or hlist_del_rcu(), running on this same list.
367 * However, it is perfectly legal to run concurrently with 367 * However, it is perfectly legal to run concurrently with
368 * the _rcu list-traversal primitives, such as 368 * the _rcu list-traversal primitives, such as
369 * hlist_for_each_entry_rcu(), used to prevent memory-consistency 369 * hlist_for_each_entry_rcu(), used to prevent memory-consistency
370 * problems on Alpha CPUs. 370 * problems on Alpha CPUs.
371 */ 371 */
372 static inline void hlist_add_before_rcu(struct hlist_node *n, 372 static inline void hlist_add_before_rcu(struct hlist_node *n,
373 struct hlist_node *next) 373 struct hlist_node *next)
374 { 374 {
375 n->pprev = next->pprev; 375 n->pprev = next->pprev;
376 n->next = next; 376 n->next = next;
377 rcu_assign_pointer(*(n->pprev), n); 377 rcu_assign_pointer(*(n->pprev), n);
378 next->pprev = &n->next; 378 next->pprev = &n->next;
379 } 379 }
380 380
381 /** 381 /**
382 * hlist_add_after_rcu 382 * hlist_add_after_rcu
383 * @prev: the existing element to add the new element after. 383 * @prev: the existing element to add the new element after.
384 * @n: the new element to add to the hash list. 384 * @n: the new element to add to the hash list.
385 * 385 *
386 * Description: 386 * Description:
387 * Adds the specified element to the specified hlist 387 * Adds the specified element to the specified hlist
388 * after the specified node while permitting racing traversals. 388 * after the specified node while permitting racing traversals.
389 * 389 *
390 * The caller must take whatever precautions are necessary 390 * The caller must take whatever precautions are necessary
391 * (such as holding appropriate locks) to avoid racing 391 * (such as holding appropriate locks) to avoid racing
392 * with another list-mutation primitive, such as hlist_add_head_rcu() 392 * with another list-mutation primitive, such as hlist_add_head_rcu()
393 * or hlist_del_rcu(), running on this same list. 393 * or hlist_del_rcu(), running on this same list.
394 * However, it is perfectly legal to run concurrently with 394 * However, it is perfectly legal to run concurrently with
395 * the _rcu list-traversal primitives, such as 395 * the _rcu list-traversal primitives, such as
396 * hlist_for_each_entry_rcu(), used to prevent memory-consistency 396 * hlist_for_each_entry_rcu(), used to prevent memory-consistency
397 * problems on Alpha CPUs. 397 * problems on Alpha CPUs.
398 */ 398 */
399 static inline void hlist_add_after_rcu(struct hlist_node *prev, 399 static inline void hlist_add_after_rcu(struct hlist_node *prev,
400 struct hlist_node *n) 400 struct hlist_node *n)
401 { 401 {
402 n->next = prev->next; 402 n->next = prev->next;
403 n->pprev = &prev->next; 403 n->pprev = &prev->next;
404 rcu_assign_pointer(prev->next, n); 404 rcu_assign_pointer(prev->next, n);
405 if (n->next) 405 if (n->next)
406 n->next->pprev = &n->next; 406 n->next->pprev = &n->next;
407 } 407 }
408 408
409 #define __hlist_for_each_rcu(pos, head) \
410 for (pos = rcu_dereference((head)->first); \
411 pos && ({ prefetch(pos->next); 1; }); \
412 pos = rcu_dereference(pos->next))
413
409 /** 414 /**
410 * hlist_for_each_entry_rcu - iterate over rcu list of given type 415 * hlist_for_each_entry_rcu - iterate over rcu list of given type
411 * @tpos: the type * to use as a loop cursor. 416 * @tpos: the type * to use as a loop cursor.
412 * @pos: the &struct hlist_node to use as a loop cursor. 417 * @pos: the &struct hlist_node to use as a loop cursor.
413 * @head: the head for your list. 418 * @head: the head for your list.
414 * @member: the name of the hlist_node within the struct. 419 * @member: the name of the hlist_node within the struct.
415 * 420 *
416 * This list-traversal primitive may safely run concurrently with 421 * This list-traversal primitive may safely run concurrently with
417 * the _rcu list-mutation primitives such as hlist_add_head_rcu() 422 * the _rcu list-mutation primitives such as hlist_add_head_rcu()
418 * as long as the traversal is guarded by rcu_read_lock(). 423 * as long as the traversal is guarded by rcu_read_lock().
419 */ 424 */
420 #define hlist_for_each_entry_rcu(tpos, pos, head, member) \ 425 #define hlist_for_each_entry_rcu(tpos, pos, head, member) \
421 for (pos = rcu_dereference((head)->first); \ 426 for (pos = rcu_dereference((head)->first); \
422 pos && ({ prefetch(pos->next); 1; }) && \ 427 pos && ({ prefetch(pos->next); 1; }) && \
423 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ 428 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \
424 pos = rcu_dereference(pos->next)) 429 pos = rcu_dereference(pos->next))
425 430
426 #endif /* __KERNEL__ */ 431 #endif /* __KERNEL__ */
427 #endif 432 #endif
428 433
include/linux/seq_file.h
1 #ifndef _LINUX_SEQ_FILE_H 1 #ifndef _LINUX_SEQ_FILE_H
2 #define _LINUX_SEQ_FILE_H 2 #define _LINUX_SEQ_FILE_H
3 3
4 #include <linux/types.h> 4 #include <linux/types.h>
5 #include <linux/string.h> 5 #include <linux/string.h>
6 #include <linux/mutex.h> 6 #include <linux/mutex.h>
7 #include <linux/cpumask.h> 7 #include <linux/cpumask.h>
8 #include <linux/nodemask.h> 8 #include <linux/nodemask.h>
9 9
10 struct seq_operations; 10 struct seq_operations;
11 struct file; 11 struct file;
12 struct path; 12 struct path;
13 struct inode; 13 struct inode;
14 struct dentry; 14 struct dentry;
15 15
16 struct seq_file { 16 struct seq_file {
17 char *buf; 17 char *buf;
18 size_t size; 18 size_t size;
19 size_t from; 19 size_t from;
20 size_t count; 20 size_t count;
21 loff_t index; 21 loff_t index;
22 loff_t read_pos; 22 loff_t read_pos;
23 u64 version; 23 u64 version;
24 struct mutex lock; 24 struct mutex lock;
25 const struct seq_operations *op; 25 const struct seq_operations *op;
26 void *private; 26 void *private;
27 }; 27 };
28 28
29 struct seq_operations { 29 struct seq_operations {
30 void * (*start) (struct seq_file *m, loff_t *pos); 30 void * (*start) (struct seq_file *m, loff_t *pos);
31 void (*stop) (struct seq_file *m, void *v); 31 void (*stop) (struct seq_file *m, void *v);
32 void * (*next) (struct seq_file *m, void *v, loff_t *pos); 32 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
33 int (*show) (struct seq_file *m, void *v); 33 int (*show) (struct seq_file *m, void *v);
34 }; 34 };
35 35
36 #define SEQ_SKIP 1 36 #define SEQ_SKIP 1
37 37
38 /** 38 /**
39 * seq_get_buf - get buffer to write arbitrary data to 39 * seq_get_buf - get buffer to write arbitrary data to
40 * @m: the seq_file handle 40 * @m: the seq_file handle
41 * @bufp: the beginning of the buffer is stored here 41 * @bufp: the beginning of the buffer is stored here
42 * 42 *
43 * Return the number of bytes available in the buffer, or zero if 43 * Return the number of bytes available in the buffer, or zero if
44 * there's no space. 44 * there's no space.
45 */ 45 */
46 static inline size_t seq_get_buf(struct seq_file *m, char **bufp) 46 static inline size_t seq_get_buf(struct seq_file *m, char **bufp)
47 { 47 {
48 BUG_ON(m->count > m->size); 48 BUG_ON(m->count > m->size);
49 if (m->count < m->size) 49 if (m->count < m->size)
50 *bufp = m->buf + m->count; 50 *bufp = m->buf + m->count;
51 else 51 else
52 *bufp = NULL; 52 *bufp = NULL;
53 53
54 return m->size - m->count; 54 return m->size - m->count;
55 } 55 }
56 56
57 /** 57 /**
58 * seq_commit - commit data to the buffer 58 * seq_commit - commit data to the buffer
59 * @m: the seq_file handle 59 * @m: the seq_file handle
60 * @num: the number of bytes to commit 60 * @num: the number of bytes to commit
61 * 61 *
62 * Commit @num bytes of data written to a buffer previously acquired 62 * Commit @num bytes of data written to a buffer previously acquired
63 * by seq_buf_get. To signal an error condition, or that the data 63 * by seq_buf_get. To signal an error condition, or that the data
64 * didn't fit in the available space, pass a negative @num value. 64 * didn't fit in the available space, pass a negative @num value.
65 */ 65 */
66 static inline void seq_commit(struct seq_file *m, int num) 66 static inline void seq_commit(struct seq_file *m, int num)
67 { 67 {
68 if (num < 0) { 68 if (num < 0) {
69 m->count = m->size; 69 m->count = m->size;
70 } else { 70 } else {
71 BUG_ON(m->count + num > m->size); 71 BUG_ON(m->count + num > m->size);
72 m->count += num; 72 m->count += num;
73 } 73 }
74 } 74 }
75 75
76 char *mangle_path(char *s, char *p, char *esc); 76 char *mangle_path(char *s, char *p, char *esc);
77 int seq_open(struct file *, const struct seq_operations *); 77 int seq_open(struct file *, const struct seq_operations *);
78 ssize_t seq_read(struct file *, char __user *, size_t, loff_t *); 78 ssize_t seq_read(struct file *, char __user *, size_t, loff_t *);
79 loff_t seq_lseek(struct file *, loff_t, int); 79 loff_t seq_lseek(struct file *, loff_t, int);
80 int seq_release(struct inode *, struct file *); 80 int seq_release(struct inode *, struct file *);
81 int seq_escape(struct seq_file *, const char *, const char *); 81 int seq_escape(struct seq_file *, const char *, const char *);
82 int seq_putc(struct seq_file *m, char c); 82 int seq_putc(struct seq_file *m, char c);
83 int seq_puts(struct seq_file *m, const char *s); 83 int seq_puts(struct seq_file *m, const char *s);
84 int seq_write(struct seq_file *seq, const void *data, size_t len); 84 int seq_write(struct seq_file *seq, const void *data, size_t len);
85 85
86 int seq_printf(struct seq_file *, const char *, ...) 86 int seq_printf(struct seq_file *, const char *, ...)
87 __attribute__ ((format (printf,2,3))); 87 __attribute__ ((format (printf,2,3)));
88 88
89 int seq_path(struct seq_file *, struct path *, char *); 89 int seq_path(struct seq_file *, struct path *, char *);
90 int seq_dentry(struct seq_file *, struct dentry *, char *); 90 int seq_dentry(struct seq_file *, struct dentry *, char *);
91 int seq_path_root(struct seq_file *m, struct path *path, struct path *root, 91 int seq_path_root(struct seq_file *m, struct path *path, struct path *root,
92 char *esc); 92 char *esc);
93 int seq_bitmap(struct seq_file *m, const unsigned long *bits, 93 int seq_bitmap(struct seq_file *m, const unsigned long *bits,
94 unsigned int nr_bits); 94 unsigned int nr_bits);
95 static inline int seq_cpumask(struct seq_file *m, const struct cpumask *mask) 95 static inline int seq_cpumask(struct seq_file *m, const struct cpumask *mask)
96 { 96 {
97 return seq_bitmap(m, cpumask_bits(mask), nr_cpu_ids); 97 return seq_bitmap(m, cpumask_bits(mask), nr_cpu_ids);
98 } 98 }
99 99
100 static inline int seq_nodemask(struct seq_file *m, nodemask_t *mask) 100 static inline int seq_nodemask(struct seq_file *m, nodemask_t *mask)
101 { 101 {
102 return seq_bitmap(m, mask->bits, MAX_NUMNODES); 102 return seq_bitmap(m, mask->bits, MAX_NUMNODES);
103 } 103 }
104 104
105 int seq_bitmap_list(struct seq_file *m, const unsigned long *bits, 105 int seq_bitmap_list(struct seq_file *m, const unsigned long *bits,
106 unsigned int nr_bits); 106 unsigned int nr_bits);
107 107
108 static inline int seq_cpumask_list(struct seq_file *m, 108 static inline int seq_cpumask_list(struct seq_file *m,
109 const struct cpumask *mask) 109 const struct cpumask *mask)
110 { 110 {
111 return seq_bitmap_list(m, cpumask_bits(mask), nr_cpu_ids); 111 return seq_bitmap_list(m, cpumask_bits(mask), nr_cpu_ids);
112 } 112 }
113 113
114 static inline int seq_nodemask_list(struct seq_file *m, nodemask_t *mask) 114 static inline int seq_nodemask_list(struct seq_file *m, nodemask_t *mask)
115 { 115 {
116 return seq_bitmap_list(m, mask->bits, MAX_NUMNODES); 116 return seq_bitmap_list(m, mask->bits, MAX_NUMNODES);
117 } 117 }
118 118
119 int single_open(struct file *, int (*)(struct seq_file *, void *), void *); 119 int single_open(struct file *, int (*)(struct seq_file *, void *), void *);
120 int single_release(struct inode *, struct file *); 120 int single_release(struct inode *, struct file *);
121 void *__seq_open_private(struct file *, const struct seq_operations *, int); 121 void *__seq_open_private(struct file *, const struct seq_operations *, int);
122 int seq_open_private(struct file *, const struct seq_operations *, int); 122 int seq_open_private(struct file *, const struct seq_operations *, int);
123 int seq_release_private(struct inode *, struct file *); 123 int seq_release_private(struct inode *, struct file *);
124 124
125 #define SEQ_START_TOKEN ((void *)1) 125 #define SEQ_START_TOKEN ((void *)1)
126 126
127 /* 127 /*
128 * Helpers for iteration over list_head-s in seq_files 128 * Helpers for iteration over list_head-s in seq_files
129 */ 129 */
130 130
131 extern struct list_head *seq_list_start(struct list_head *head, 131 extern struct list_head *seq_list_start(struct list_head *head,
132 loff_t pos); 132 loff_t pos);
133 extern struct list_head *seq_list_start_head(struct list_head *head, 133 extern struct list_head *seq_list_start_head(struct list_head *head,
134 loff_t pos); 134 loff_t pos);
135 extern struct list_head *seq_list_next(void *v, struct list_head *head, 135 extern struct list_head *seq_list_next(void *v, struct list_head *head,
136 loff_t *ppos); 136 loff_t *ppos);
137 137
138 /* 138 /*
139 * Helpers for iteration over hlist_head-s in seq_files 139 * Helpers for iteration over hlist_head-s in seq_files
140 */ 140 */
141 141
142 extern struct hlist_node *seq_hlist_start(struct hlist_head *head, 142 extern struct hlist_node *seq_hlist_start(struct hlist_head *head,
143 loff_t pos); 143 loff_t pos);
144 extern struct hlist_node *seq_hlist_start_head(struct hlist_head *head, 144 extern struct hlist_node *seq_hlist_start_head(struct hlist_head *head,
145 loff_t pos); 145 loff_t pos);
146 extern struct hlist_node *seq_hlist_next(void *v, struct hlist_head *head, 146 extern struct hlist_node *seq_hlist_next(void *v, struct hlist_head *head,
147 loff_t *ppos); 147 loff_t *ppos);
148 148
149 extern struct hlist_node *seq_hlist_start_rcu(struct hlist_head *head,
150 loff_t pos);
151 extern struct hlist_node *seq_hlist_start_head_rcu(struct hlist_head *head,
152 loff_t pos);
153 extern struct hlist_node *seq_hlist_next_rcu(void *v,
154 struct hlist_head *head,
155 loff_t *ppos);
149 #endif 156 #endif
150 157