Blame view

include/linux/rcutiny.h 4.31 KB
9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
  /*
   * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
   *
   * This program is free software; you can redistribute it and/or modify
   * it under the terms of the GNU General Public License as published by
   * the Free Software Foundation; either version 2 of the License, or
   * (at your option) any later version.
   *
   * This program is distributed in the hope that it will be useful,
   * but WITHOUT ANY WARRANTY; without even the implied warranty of
   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   * GNU General Public License for more details.
   *
   * You should have received a copy of the GNU General Public License
87de1cfdc   Paul E. McKenney   rcu: Stop trackin...
15
16
   * along with this program; if not, you can access it online at
   * http://www.gnu.org/licenses/gpl-2.0.html.
9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
17
18
19
20
21
22
   *
   * Copyright IBM Corporation, 2008
   *
   * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
   *
   * For detailed explanation of Read-Copy Update mechanism see -
4ce5b9034   Ingo Molnar   rcu: Do tiny clea...
23
   *		Documentation/RCU
9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
24
   */
9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
25
26
27
28
  #ifndef __LINUX_TINY_H
  #define __LINUX_TINY_H
  
  #include <linux/cache.h>
765a3f4fe   Paul E. McKenney   rcu: Provide grac...
29
30
31
32
33
34
35
36
37
  static inline unsigned long get_state_synchronize_rcu(void)
  {
  	return 0;
  }
  
  static inline void cond_synchronize_rcu(unsigned long oldstate)
  {
  	might_sleep();
  }
24560056d   Paul E. McKenney   rcu: Add RCU-sche...
38
39
40
41
42
43
44
45
46
  static inline unsigned long get_state_synchronize_sched(void)
  {
  	return 0;
  }
  
  static inline void cond_synchronize_sched(unsigned long oldstate)
  {
  	might_sleep();
  }
2c42818e9   Paul E. McKenney   rcu: Abstract com...
47
48
49
50
51
52
53
54
55
  static inline void rcu_barrier_bh(void)
  {
  	wait_rcu_gp(call_rcu_bh);
  }
  
  static inline void rcu_barrier_sched(void)
  {
  	wait_rcu_gp(call_rcu_sched);
  }
a57eb940d   Paul E. McKenney   rcu: Add a TINY_P...
56
  static inline void synchronize_rcu_expedited(void)
bf66f18e7   Paul E. McKenney   rcu: Add force_qu...
57
  {
a57eb940d   Paul E. McKenney   rcu: Add a TINY_P...
58
  	synchronize_sched();	/* Only one CPU, so pretty fast anyway!!! */
bf66f18e7   Paul E. McKenney   rcu: Add force_qu...
59
  }
a57eb940d   Paul E. McKenney   rcu: Add a TINY_P...
60
  static inline void rcu_barrier(void)
bf66f18e7   Paul E. McKenney   rcu: Add force_qu...
61
  {
a57eb940d   Paul E. McKenney   rcu: Add a TINY_P...
62
  	rcu_barrier_sched();  /* Only one CPU, so only one list of callbacks! */
bf66f18e7   Paul E. McKenney   rcu: Add force_qu...
63
  }
a57eb940d   Paul E. McKenney   rcu: Add a TINY_P...
64
  static inline void synchronize_rcu_bh(void)
9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
65
66
67
68
69
70
71
72
  {
  	synchronize_sched();
  }
  
  static inline void synchronize_rcu_bh_expedited(void)
  {
  	synchronize_sched();
  }
7b27d5475   Lai Jiangshan   rcu,cleanup: move...
73
74
75
76
  static inline void synchronize_sched_expedited(void)
  {
  	synchronize_sched();
  }
486e25934   Paul E. McKenney   rcu: Avoid waking...
77
  static inline void kfree_call_rcu(struct rcu_head *head,
b6a4ae766   Boqun Feng   rcu: Use rcu_call...
78
  				  rcu_callback_t func)
486e25934   Paul E. McKenney   rcu: Avoid waking...
79
80
81
  {
  	call_rcu(head, func);
  }
38200cf24   Paul E. McKenney   rcu: Remove "cpu"...
82
  static inline void rcu_note_context_switch(void)
a57eb940d   Paul E. McKenney   rcu: Add a TINY_P...
83
  {
284a8c93a   Paul E. McKenney   rcu: Per-CPU oper...
84
  	rcu_sched_qs();
a57eb940d   Paul E. McKenney   rcu: Add a TINY_P...
85
  }
a57eb940d   Paul E. McKenney   rcu: Add a TINY_P...
86
  /*
29ce83100   Gleb Natapov   rcu: provide rcu_...
87
88
89
90
91
92
93
94
   * Take advantage of the fact that there is only one CPU, which
   * allows us to ignore virtualization-based context switches.
   */
  static inline void rcu_virt_note_context_switch(int cpu)
  {
  }
  
  /*
917963d0b   Paul E. McKenney   rcutorture: Check...
95
   * Return the number of grace periods started.
a57eb940d   Paul E. McKenney   rcu: Add a TINY_P...
96
   */
917963d0b   Paul E. McKenney   rcutorture: Check...
97
  static inline unsigned long rcu_batches_started(void)
a57eb940d   Paul E. McKenney   rcu: Add a TINY_P...
98
99
100
101
102
  {
  	return 0;
  }
  
  /*
917963d0b   Paul E. McKenney   rcutorture: Check...
103
   * Return the number of bottom-half grace periods started.
a57eb940d   Paul E. McKenney   rcu: Add a TINY_P...
104
   */
917963d0b   Paul E. McKenney   rcutorture: Check...
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
  static inline unsigned long rcu_batches_started_bh(void)
  {
  	return 0;
  }
  
  /*
   * Return the number of sched grace periods started.
   */
  static inline unsigned long rcu_batches_started_sched(void)
  {
  	return 0;
  }
  
  /*
   * Return the number of grace periods completed.
a57eb940d   Paul E. McKenney   rcu: Add a TINY_P...
120
   */
9733e4f0a   Paul E. McKenney   rcu: Make _batche...
121
  static inline unsigned long rcu_batches_completed(void)
a57eb940d   Paul E. McKenney   rcu: Add a TINY_P...
122
123
124
125
126
  {
  	return 0;
  }
  
  /*
917963d0b   Paul E. McKenney   rcutorture: Check...
127
   * Return the number of bottom-half grace periods completed.
a57eb940d   Paul E. McKenney   rcu: Add a TINY_P...
128
   */
9733e4f0a   Paul E. McKenney   rcu: Make _batche...
129
  static inline unsigned long rcu_batches_completed_bh(void)
a57eb940d   Paul E. McKenney   rcu: Add a TINY_P...
130
131
132
  {
  	return 0;
  }
c1fe9cde4   Paul E. McKenney   rcu: Provide rcu_...
133
  /*
917963d0b   Paul E. McKenney   rcutorture: Check...
134
   * Return the number of sched grace periods completed.
c1fe9cde4   Paul E. McKenney   rcu: Provide rcu_...
135
136
   */
  static inline unsigned long rcu_batches_completed_sched(void)
a57eb940d   Paul E. McKenney   rcu: Add a TINY_P...
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
  {
  	return 0;
  }
  
  static inline void rcu_force_quiescent_state(void)
  {
  }
  
  static inline void rcu_bh_force_quiescent_state(void)
  {
  }
  
  static inline void rcu_sched_force_quiescent_state(void)
  {
  }
afea227fd   Paul E. McKenney   rcutorture: Expor...
152
153
154
  static inline void show_rcu_gp_kthreads(void)
  {
  }
53d84e004   Paul E. McKenney   rcu: permit suppr...
155
156
157
  static inline void rcu_cpu_stall_reset(void)
  {
  }
51952bc63   Paul E. McKenney   rcu: Further shri...
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
  static inline void rcu_idle_enter(void)
  {
  }
  
  static inline void rcu_idle_exit(void)
  {
  }
  
  static inline void rcu_irq_enter(void)
  {
  }
  
  static inline void rcu_irq_exit(void)
  {
  }
2439b696c   Paul E. McKenney   rcu: Shrink TINY_...
173
174
175
  static inline void exit_rcu(void)
  {
  }
bbad93798   Paul E. McKenney   rcu: slim down rc...
176
  #ifdef CONFIG_DEBUG_LOCK_ALLOC
bbad93798   Paul E. McKenney   rcu: slim down rc...
177
  extern int rcu_scheduler_active __read_mostly;
584dc4ce5   Teodora Baluta   rcu: Remove "exte...
178
  void rcu_scheduler_starting(void);
bbad93798   Paul E. McKenney   rcu: slim down rc...
179
  #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
bbad93798   Paul E. McKenney   rcu: slim down rc...
180
181
182
  static inline void rcu_scheduler_starting(void)
  {
  }
bbad93798   Paul E. McKenney   rcu: slim down rc...
183
  #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
5c173eb8b   Paul E. McKenney   rcu: Consistent r...
184
  #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE)
cc6783f78   Paul E. McKenney   rcu: Is it safe t...
185

5c173eb8b   Paul E. McKenney   rcu: Consistent r...
186
  static inline bool rcu_is_watching(void)
cc6783f78   Paul E. McKenney   rcu: Is it safe t...
187
  {
5c173eb8b   Paul E. McKenney   rcu: Consistent r...
188
  	return __rcu_is_watching();
cc6783f78   Paul E. McKenney   rcu: Is it safe t...
189
  }
5c173eb8b   Paul E. McKenney   rcu: Consistent r...
190
191
192
193
194
195
  #else /* defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
  
  static inline bool rcu_is_watching(void)
  {
  	return true;
  }
5c173eb8b   Paul E. McKenney   rcu: Consistent r...
196
  #endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
cc6783f78   Paul E. McKenney   rcu: Is it safe t...
197

5cd37193c   Paul E. McKenney   rcu: Make cond_re...
198
199
  static inline void rcu_all_qs(void)
  {
bb73c52ba   Boqun Feng   rcu: Don't disabl...
200
  	barrier(); /* Avoid RCU read-side critical sections leaking across. */
5cd37193c   Paul E. McKenney   rcu: Make cond_re...
201
  }
9b1d82fa1   Paul E. McKenney   rcu: "Tiny RCU", ...
202
  #endif /* __LINUX_RCUTINY_H */