Blame view

include/linux/ptr_ring.h 9.8 KB
2e0ab8ca8   Michael S. Tsirkin   ptr_ring: array b...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
  /*
   *	Definitions for the 'struct ptr_ring' datastructure.
   *
   *	Author:
   *		Michael S. Tsirkin <mst@redhat.com>
   *
   *	Copyright (C) 2016 Red Hat, Inc.
   *
   *	This program is free software; you can redistribute it and/or modify it
   *	under the terms of the GNU General Public License as published by the
   *	Free Software Foundation; either version 2 of the License, or (at your
   *	option) any later version.
   *
   *	This is a limited-size FIFO maintaining pointers in FIFO order, with
   *	one CPU producing entries and another consuming entries from a FIFO.
   *
   *	This implementation tries to minimize cache-contention when there is a
   *	single producer and a single consumer CPU.
   */
  
  #ifndef _LINUX_PTR_RING_H
  #define _LINUX_PTR_RING_H 1
  
  #ifdef __KERNEL__
  #include <linux/spinlock.h>
  #include <linux/cache.h>
  #include <linux/types.h>
  #include <linux/compiler.h>
  #include <linux/cache.h>
  #include <linux/slab.h>
  #include <asm/errno.h>
  #endif
  
  struct ptr_ring {
  	int producer ____cacheline_aligned_in_smp;
  	spinlock_t producer_lock;
  	int consumer ____cacheline_aligned_in_smp;
  	spinlock_t consumer_lock;
  	/* Shared consumer/producer data */
  	/* Read-only by both the producer and the consumer */
  	int size ____cacheline_aligned_in_smp; /* max entries in queue */
  	void **queue;
  };
  
  /* Note: callers invoking this in a loop must use a compiler barrier,
5d49de532   Michael S. Tsirkin   ptr_ring: resize ...
46
47
48
   * for example cpu_relax().  If ring is ever resized, callers must hold
   * producer_lock - see e.g. ptr_ring_full.  Otherwise, if callers don't hold
   * producer_lock, the next call to __ptr_ring_produce may fail.
2e0ab8ca8   Michael S. Tsirkin   ptr_ring: array b...
49
50
51
52
53
54
55
56
   */
  static inline bool __ptr_ring_full(struct ptr_ring *r)
  {
  	return r->queue[r->producer];
  }
  
  static inline bool ptr_ring_full(struct ptr_ring *r)
  {
5d49de532   Michael S. Tsirkin   ptr_ring: resize ...
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
  	bool ret;
  
  	spin_lock(&r->producer_lock);
  	ret = __ptr_ring_full(r);
  	spin_unlock(&r->producer_lock);
  
  	return ret;
  }
  
  static inline bool ptr_ring_full_irq(struct ptr_ring *r)
  {
  	bool ret;
  
  	spin_lock_irq(&r->producer_lock);
  	ret = __ptr_ring_full(r);
  	spin_unlock_irq(&r->producer_lock);
  
  	return ret;
  }
  
  static inline bool ptr_ring_full_any(struct ptr_ring *r)
  {
  	unsigned long flags;
  	bool ret;
  
  	spin_lock_irqsave(&r->producer_lock, flags);
  	ret = __ptr_ring_full(r);
  	spin_unlock_irqrestore(&r->producer_lock, flags);
  
  	return ret;
  }
  
  static inline bool ptr_ring_full_bh(struct ptr_ring *r)
  {
  	bool ret;
  
  	spin_lock_bh(&r->producer_lock);
  	ret = __ptr_ring_full(r);
  	spin_unlock_bh(&r->producer_lock);
  
  	return ret;
2e0ab8ca8   Michael S. Tsirkin   ptr_ring: array b...
98
99
100
  }
  
  /* Note: callers invoking this in a loop must use a compiler barrier,
5d49de532   Michael S. Tsirkin   ptr_ring: resize ...
101
   * for example cpu_relax(). Callers must hold producer_lock.
2e0ab8ca8   Michael S. Tsirkin   ptr_ring: array b...
102
103
104
   */
  static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr)
  {
982fb490c   Jason Wang   ptr_ring: support...
105
  	if (unlikely(!r->size) || r->queue[r->producer])
2e0ab8ca8   Michael S. Tsirkin   ptr_ring: array b...
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
  		return -ENOSPC;
  
  	r->queue[r->producer++] = ptr;
  	if (unlikely(r->producer >= r->size))
  		r->producer = 0;
  	return 0;
  }
  
  static inline int ptr_ring_produce(struct ptr_ring *r, void *ptr)
  {
  	int ret;
  
  	spin_lock(&r->producer_lock);
  	ret = __ptr_ring_produce(r, ptr);
  	spin_unlock(&r->producer_lock);
  
  	return ret;
  }
  
  static inline int ptr_ring_produce_irq(struct ptr_ring *r, void *ptr)
  {
  	int ret;
  
  	spin_lock_irq(&r->producer_lock);
  	ret = __ptr_ring_produce(r, ptr);
  	spin_unlock_irq(&r->producer_lock);
  
  	return ret;
  }
  
  static inline int ptr_ring_produce_any(struct ptr_ring *r, void *ptr)
  {
  	unsigned long flags;
  	int ret;
  
  	spin_lock_irqsave(&r->producer_lock, flags);
  	ret = __ptr_ring_produce(r, ptr);
  	spin_unlock_irqrestore(&r->producer_lock, flags);
  
  	return ret;
  }
  
  static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr)
  {
  	int ret;
  
  	spin_lock_bh(&r->producer_lock);
  	ret = __ptr_ring_produce(r, ptr);
  	spin_unlock_bh(&r->producer_lock);
  
  	return ret;
  }
  
  /* Note: callers invoking this in a loop must use a compiler barrier,
   * for example cpu_relax(). Callers must take consumer_lock
   * if they dereference the pointer - see e.g. PTR_RING_PEEK_CALL.
5d49de532   Michael S. Tsirkin   ptr_ring: resize ...
162
163
   * If ring is never resized, and if the pointer is merely
   * tested, there's no need to take the lock - see e.g.  __ptr_ring_empty.
2e0ab8ca8   Michael S. Tsirkin   ptr_ring: array b...
164
165
166
   */
  static inline void *__ptr_ring_peek(struct ptr_ring *r)
  {
982fb490c   Jason Wang   ptr_ring: support...
167
168
169
  	if (likely(r->size))
  		return r->queue[r->consumer];
  	return NULL;
2e0ab8ca8   Michael S. Tsirkin   ptr_ring: array b...
170
  }
5d49de532   Michael S. Tsirkin   ptr_ring: resize ...
171
172
173
174
175
  /* Note: callers invoking this in a loop must use a compiler barrier,
   * for example cpu_relax(). Callers must take consumer_lock
   * if the ring is ever resized - see e.g. ptr_ring_empty.
   */
  static inline bool __ptr_ring_empty(struct ptr_ring *r)
2e0ab8ca8   Michael S. Tsirkin   ptr_ring: array b...
176
  {
2e0ab8ca8   Michael S. Tsirkin   ptr_ring: array b...
177
178
  	return !__ptr_ring_peek(r);
  }
5d49de532   Michael S. Tsirkin   ptr_ring: resize ...
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
  static inline bool ptr_ring_empty(struct ptr_ring *r)
  {
  	bool ret;
  
  	spin_lock(&r->consumer_lock);
  	ret = __ptr_ring_empty(r);
  	spin_unlock(&r->consumer_lock);
  
  	return ret;
  }
  
  static inline bool ptr_ring_empty_irq(struct ptr_ring *r)
  {
  	bool ret;
  
  	spin_lock_irq(&r->consumer_lock);
  	ret = __ptr_ring_empty(r);
  	spin_unlock_irq(&r->consumer_lock);
  
  	return ret;
  }
  
  static inline bool ptr_ring_empty_any(struct ptr_ring *r)
  {
  	unsigned long flags;
  	bool ret;
  
  	spin_lock_irqsave(&r->consumer_lock, flags);
  	ret = __ptr_ring_empty(r);
  	spin_unlock_irqrestore(&r->consumer_lock, flags);
  
  	return ret;
  }
  
  static inline bool ptr_ring_empty_bh(struct ptr_ring *r)
  {
  	bool ret;
  
  	spin_lock_bh(&r->consumer_lock);
  	ret = __ptr_ring_empty(r);
  	spin_unlock_bh(&r->consumer_lock);
  
  	return ret;
  }
2e0ab8ca8   Michael S. Tsirkin   ptr_ring: array b...
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
  /* Must only be called after __ptr_ring_peek returned !NULL */
  static inline void __ptr_ring_discard_one(struct ptr_ring *r)
  {
  	r->queue[r->consumer++] = NULL;
  	if (unlikely(r->consumer >= r->size))
  		r->consumer = 0;
  }
  
  static inline void *__ptr_ring_consume(struct ptr_ring *r)
  {
  	void *ptr;
  
  	ptr = __ptr_ring_peek(r);
  	if (ptr)
  		__ptr_ring_discard_one(r);
  
  	return ptr;
  }
  
  static inline void *ptr_ring_consume(struct ptr_ring *r)
  {
  	void *ptr;
  
  	spin_lock(&r->consumer_lock);
  	ptr = __ptr_ring_consume(r);
  	spin_unlock(&r->consumer_lock);
  
  	return ptr;
  }
  
  static inline void *ptr_ring_consume_irq(struct ptr_ring *r)
  {
  	void *ptr;
  
  	spin_lock_irq(&r->consumer_lock);
  	ptr = __ptr_ring_consume(r);
  	spin_unlock_irq(&r->consumer_lock);
  
  	return ptr;
  }
  
  static inline void *ptr_ring_consume_any(struct ptr_ring *r)
  {
  	unsigned long flags;
  	void *ptr;
  
  	spin_lock_irqsave(&r->consumer_lock, flags);
  	ptr = __ptr_ring_consume(r);
  	spin_unlock_irqrestore(&r->consumer_lock, flags);
  
  	return ptr;
  }
  
  static inline void *ptr_ring_consume_bh(struct ptr_ring *r)
  {
  	void *ptr;
  
  	spin_lock_bh(&r->consumer_lock);
  	ptr = __ptr_ring_consume(r);
  	spin_unlock_bh(&r->consumer_lock);
  
  	return ptr;
  }
  
  /* Cast to structure type and call a function without discarding from FIFO.
   * Function must return a value.
   * Callers must take consumer_lock.
   */
  #define __PTR_RING_PEEK_CALL(r, f) ((f)(__ptr_ring_peek(r)))
  
  #define PTR_RING_PEEK_CALL(r, f) ({ \
  	typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \
  	\
  	spin_lock(&(r)->consumer_lock); \
  	__PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \
  	spin_unlock(&(r)->consumer_lock); \
  	__PTR_RING_PEEK_CALL_v; \
  })
  
  #define PTR_RING_PEEK_CALL_IRQ(r, f) ({ \
  	typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \
  	\
  	spin_lock_irq(&(r)->consumer_lock); \
  	__PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \
  	spin_unlock_irq(&(r)->consumer_lock); \
  	__PTR_RING_PEEK_CALL_v; \
  })
  
  #define PTR_RING_PEEK_CALL_BH(r, f) ({ \
  	typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \
  	\
  	spin_lock_bh(&(r)->consumer_lock); \
  	__PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \
  	spin_unlock_bh(&(r)->consumer_lock); \
  	__PTR_RING_PEEK_CALL_v; \
  })
  
  #define PTR_RING_PEEK_CALL_ANY(r, f) ({ \
  	typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \
  	unsigned long __PTR_RING_PEEK_CALL_f;\
  	\
  	spin_lock_irqsave(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \
  	__PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \
  	spin_unlock_irqrestore(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \
  	__PTR_RING_PEEK_CALL_v; \
  })
5d49de532   Michael S. Tsirkin   ptr_ring: resize ...
329
330
331
332
  static inline void **__ptr_ring_init_queue_alloc(int size, gfp_t gfp)
  {
  	return kzalloc(ALIGN(size * sizeof(void *), SMP_CACHE_BYTES), gfp);
  }
2e0ab8ca8   Michael S. Tsirkin   ptr_ring: array b...
333
334
  static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp)
  {
5d49de532   Michael S. Tsirkin   ptr_ring: resize ...
335
  	r->queue = __ptr_ring_init_queue_alloc(size, gfp);
2e0ab8ca8   Michael S. Tsirkin   ptr_ring: array b...
336
337
338
339
340
341
342
343
344
345
  	if (!r->queue)
  		return -ENOMEM;
  
  	r->size = size;
  	r->producer = r->consumer = 0;
  	spin_lock_init(&r->producer_lock);
  	spin_lock_init(&r->consumer_lock);
  
  	return 0;
  }
59e6ae532   Michael S. Tsirkin   ptr_ring: support...
346
347
348
  static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue,
  					   int size, gfp_t gfp,
  					   void (*destroy)(void *))
5d49de532   Michael S. Tsirkin   ptr_ring: resize ...
349
  {
5d49de532   Michael S. Tsirkin   ptr_ring: resize ...
350
  	int producer = 0;
5d49de532   Michael S. Tsirkin   ptr_ring: resize ...
351
352
  	void **old;
  	void *ptr;
5d49de532   Michael S. Tsirkin   ptr_ring: resize ...
353
354
355
356
357
358
359
360
361
362
363
  	while ((ptr = ptr_ring_consume(r)))
  		if (producer < size)
  			queue[producer++] = ptr;
  		else if (destroy)
  			destroy(ptr);
  
  	r->size = size;
  	r->producer = producer;
  	r->consumer = 0;
  	old = r->queue;
  	r->queue = queue;
59e6ae532   Michael S. Tsirkin   ptr_ring: support...
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
  	return old;
  }
  
  static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp,
  				  void (*destroy)(void *))
  {
  	unsigned long flags;
  	void **queue = __ptr_ring_init_queue_alloc(size, gfp);
  	void **old;
  
  	if (!queue)
  		return -ENOMEM;
  
  	spin_lock_irqsave(&(r)->producer_lock, flags);
  
  	old = __ptr_ring_swap_queue(r, queue, size, gfp, destroy);
5d49de532   Michael S. Tsirkin   ptr_ring: resize ...
380
381
382
383
384
385
  	spin_unlock_irqrestore(&(r)->producer_lock, flags);
  
  	kfree(old);
  
  	return 0;
  }
59e6ae532   Michael S. Tsirkin   ptr_ring: support...
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
  static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings,
  					   int size,
  					   gfp_t gfp, void (*destroy)(void *))
  {
  	unsigned long flags;
  	void ***queues;
  	int i;
  
  	queues = kmalloc(nrings * sizeof *queues, gfp);
  	if (!queues)
  		goto noqueues;
  
  	for (i = 0; i < nrings; ++i) {
  		queues[i] = __ptr_ring_init_queue_alloc(size, gfp);
  		if (!queues[i])
  			goto nomem;
  	}
  
  	for (i = 0; i < nrings; ++i) {
  		spin_lock_irqsave(&(rings[i])->producer_lock, flags);
  		queues[i] = __ptr_ring_swap_queue(rings[i], queues[i],
  						  size, gfp, destroy);
  		spin_unlock_irqrestore(&(rings[i])->producer_lock, flags);
  	}
  
  	for (i = 0; i < nrings; ++i)
  		kfree(queues[i]);
  
  	kfree(queues);
  
  	return 0;
  
  nomem:
  	while (--i >= 0)
  		kfree(queues[i]);
  
  	kfree(queues);
  
  noqueues:
  	return -ENOMEM;
  }
5d49de532   Michael S. Tsirkin   ptr_ring: resize ...
427
  static inline void ptr_ring_cleanup(struct ptr_ring *r, void (*destroy)(void *))
2e0ab8ca8   Michael S. Tsirkin   ptr_ring: array b...
428
  {
5d49de532   Michael S. Tsirkin   ptr_ring: resize ...
429
430
431
432
433
  	void *ptr;
  
  	if (destroy)
  		while ((ptr = ptr_ring_consume(r)))
  			destroy(ptr);
2e0ab8ca8   Michael S. Tsirkin   ptr_ring: array b...
434
435
436
437
  	kfree(r->queue);
  }
  
  #endif /* _LINUX_PTR_RING_H  */