Blame view
include/linux/rcutiny.h
2.91 KB
9b1d82fa1
|
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 |
/* * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright IBM Corporation, 2008 * * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> * * For detailed explanation of Read-Copy Update mechanism see - |
4ce5b9034
|
23 |
* Documentation/RCU |
9b1d82fa1
|
24 |
*/ |
9b1d82fa1
|
25 26 27 28 |
#ifndef __LINUX_TINY_H #define __LINUX_TINY_H #include <linux/cache.h> |
9b1d82fa1
|
29 |
#define rcu_init_sched() do { } while (0) |
9b1d82fa1
|
30 |
|
a57eb940d
|
31 |
#ifdef CONFIG_TINY_RCU |
9b1d82fa1
|
32 |
|
a57eb940d
|
33 |
static inline void synchronize_rcu_expedited(void) |
bf66f18e7
|
34 |
{ |
a57eb940d
|
35 |
synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */ |
bf66f18e7
|
36 |
} |
a57eb940d
|
37 |
static inline void rcu_barrier(void) |
bf66f18e7
|
38 |
{ |
a57eb940d
|
39 |
rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */ |
bf66f18e7
|
40 |
} |
a57eb940d
|
41 |
#else /* #ifdef CONFIG_TINY_RCU */ |
da848c47b
|
42 |
|
a57eb940d
|
43 44 |
void rcu_barrier(void); void synchronize_rcu_expedited(void); |
da848c47b
|
45 |
|
a57eb940d
|
46 |
#endif /* #else #ifdef CONFIG_TINY_RCU */ |
6ebb237be
|
47 |
|
a57eb940d
|
48 |
static inline void synchronize_rcu_bh(void) |
9b1d82fa1
|
49 50 51 52 53 54 55 56 |
{ synchronize_sched(); } static inline void synchronize_rcu_bh_expedited(void) { synchronize_sched(); } |
a57eb940d
|
57 58 59 60 61 |
#ifdef CONFIG_TINY_RCU static inline void rcu_preempt_note_context_switch(void) { } |
9b1d82fa1
|
62 63 64 |
static inline void exit_rcu(void) { } |
a57eb940d
|
65 66 67 68 |
static inline int rcu_needs_cpu(int cpu) { return 0; } |
a57eb940d
|
69 70 71 72 73 74 75 76 77 78 |
#else /* #ifdef CONFIG_TINY_RCU */ void rcu_preempt_note_context_switch(void); extern void exit_rcu(void); int rcu_preempt_needs_cpu(void); static inline int rcu_needs_cpu(int cpu) { return rcu_preempt_needs_cpu(); } |
a57eb940d
|
79 80 81 82 83 84 85 |
#endif /* #else #ifdef CONFIG_TINY_RCU */ static inline void rcu_note_context_switch(int cpu) { rcu_sched_qs(cpu); rcu_preempt_note_context_switch(); } |
a57eb940d
|
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 |
/* * Return the number of grace periods. */ static inline long rcu_batches_completed(void) { return 0; } /* * Return the number of bottom-half grace periods. */ static inline long rcu_batches_completed_bh(void) { return 0; } static inline void rcu_force_quiescent_state(void) { } static inline void rcu_bh_force_quiescent_state(void) { } static inline void rcu_sched_force_quiescent_state(void) { } |
53d84e004
|
113 114 115 |
static inline void rcu_cpu_stall_reset(void) { } |
bbad93798
|
116 117 118 119 120 121 122 123 124 125 126 127 |
#ifdef CONFIG_DEBUG_LOCK_ALLOC extern int rcu_scheduler_active __read_mostly; extern void rcu_scheduler_starting(void); #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ static inline void rcu_scheduler_starting(void) { } #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
9b1d82fa1
|
128 |
#endif /* __LINUX_RCUTINY_H */ |