Blame view
include/linux/rcutree.h
2.33 KB
64db4cfff
|
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 |
/* * Read-Copy Update mechanism for mutual exclusion (tree-based version) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Copyright IBM Corporation, 2008 * * Author: Dipankar Sarma <dipankar@in.ibm.com> * Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical algorithm * * Based on the original work by Paul McKenney <paulmck@us.ibm.com> * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. * * For detailed explanation of Read-Copy Update mechanism see - |
a71fca58b
|
27 |
* Documentation/RCU |
64db4cfff
|
28 29 30 31 |
*/ #ifndef __LINUX_RCUTREE_H #define __LINUX_RCUTREE_H |
b2c0710c4
|
32 |
extern void rcu_init(void); |
25502a6c1
|
33 |
extern void rcu_note_context_switch(int cpu); |
64db4cfff
|
34 |
extern int rcu_needs_cpu(int cpu); |
53d84e004
|
35 |
extern void rcu_cpu_stall_reset(void); |
64db4cfff
|
36 |
|
f41d911f8
|
37 |
#ifdef CONFIG_TREE_PREEMPT_RCU |
f41d911f8
|
38 39 40 |
extern void exit_rcu(void); #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ |
f41d911f8
|
41 42 43 44 45 |
static inline void exit_rcu(void) { } #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ |
da848c47b
|
46 |
extern void synchronize_rcu_bh(void); |
7b27d5475
|
47 |
extern void synchronize_sched_expedited(void); |
019129d59
|
48 |
extern void synchronize_rcu_expedited(void); |
03b042bf1
|
49 50 |
static inline void synchronize_rcu_bh_expedited(void) |
64db4cfff
|
51 |
{ |
03b042bf1
|
52 |
synchronize_sched_expedited(); |
64db4cfff
|
53 |
} |
a57eb940d
|
54 |
extern void rcu_barrier(void); |
64db4cfff
|
55 56 |
extern long rcu_batches_completed(void); extern long rcu_batches_completed_bh(void); |
d6714c22b
|
57 |
extern long rcu_batches_completed_sched(void); |
bf66f18e7
|
58 59 60 |
extern void rcu_force_quiescent_state(void); extern void rcu_bh_force_quiescent_state(void); extern void rcu_sched_force_quiescent_state(void); |
64db4cfff
|
61 |
|
1eba8f843
|
62 |
/* A context switch is a grace period for RCU-sched and RCU-bh. */ |
a68260483
|
63 64 65 66 |
static inline int rcu_blocking_is_gp(void) { return num_online_cpus() == 1; } |
bbad93798
|
67 68 |
extern void rcu_scheduler_starting(void); extern int rcu_scheduler_active __read_mostly; |
64db4cfff
|
69 |
#endif /* __LINUX_RCUTREE_H */ |