Blame view
include/linux/smp.h
5.32 KB
1da177e4c
|
1 2 3 4 5 6 7 |
#ifndef __LINUX_SMP_H #define __LINUX_SMP_H /* * Generic SMP support * Alan Cox. <alan@redhat.com> */ |
79974a0e4
|
8 |
#include <linux/errno.h> |
54514a70a
|
9 |
#include <linux/types.h> |
3d4422332
|
10 |
#include <linux/list.h> |
3d4422332
|
11 |
#include <linux/cpumask.h> |
04948c7f8
|
12 |
#include <linux/init.h> |
6897fc22e
|
13 |
#include <linux/llist.h> |
1da177e4c
|
14 |
|
3a5f65df5
|
15 |
typedef void (*smp_call_func_t)(void *info); |
3d4422332
|
16 |
struct call_single_data { |
0ebeb79ce
|
17 |
struct llist_node llist; |
3a5f65df5
|
18 |
smp_call_func_t func; |
3d4422332
|
19 |
void *info; |
f4d03bd14
|
20 |
unsigned int flags; |
3d4422332
|
21 |
}; |
e057d7aea
|
22 23 |
/* total number of cpus in this system (may exceed NR_CPUS) */ extern unsigned int total_cpus; |
3a5f65df5
|
24 25 |
int smp_call_function_single(int cpuid, smp_call_func_t func, void *info, int wait); |
53ce3d956
|
26 |
|
fa688207c
|
27 |
/* |
bff2dc42b
|
28 29 30 31 32 |
* Call a function on all processors */ int on_each_cpu(smp_call_func_t func, void *info, int wait); /* |
fa688207c
|
33 34 35 36 37 38 39 40 41 42 43 44 45 46 |
* Call a function on processors specified by mask, which might include * the local one. */ void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, void *info, bool wait); /* * Call a function on each processor for which the supplied function * cond_func returns a positive value. This may include the local * processor. */ void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), smp_call_func_t func, void *info, bool wait, gfp_t gfp_flags); |
c46fff2a3
|
47 |
int smp_call_function_single_async(int cpu, struct call_single_data *csd); |
7cf64f861
|
48 |
|
1da177e4c
|
49 50 51 52 53 54 55 |
#ifdef CONFIG_SMP #include <linux/preempt.h> #include <linux/kernel.h> #include <linux/compiler.h> #include <linux/thread_info.h> #include <asm/smp.h> |
1da177e4c
|
56 57 58 59 |
/* * main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc. * (defined in asm header): |
d1dedb52a
|
60 |
*/ |
1da177e4c
|
61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 |
/* * stops all CPUs but the current one: */ extern void smp_send_stop(void); /* * sends a 'reschedule' event to another CPU: */ extern void smp_send_reschedule(int cpu); /* * Prepare machine for booting other CPUs. */ extern void smp_prepare_cpus(unsigned int max_cpus); /* * Bring a CPU up */ |
8239c25f4
|
81 |
extern int __cpu_up(unsigned int cpunum, struct task_struct *tidle); |
1da177e4c
|
82 83 84 85 86 87 88 89 90 |
/* * Final polishing of CPUs */ extern void smp_cpus_done(unsigned int max_cpus); /* * Call a function on all other processors */ |
3a5f65df5
|
91 |
int smp_call_function(smp_call_func_t func, void *info, int wait); |
54b11e6d5
|
92 |
void smp_call_function_many(const struct cpumask *mask, |
3a5f65df5
|
93 |
smp_call_func_t func, void *info, bool wait); |
2d3854a37
|
94 |
|
2ea6dec4a
|
95 |
int smp_call_function_any(const struct cpumask *mask, |
3a5f65df5
|
96 |
smp_call_func_t func, void *info, int wait); |
2ea6dec4a
|
97 |
|
f37f435f3
|
98 |
void kick_all_cpus_sync(void); |
c6f4459fc
|
99 |
void wake_up_all_idle_cpus(void); |
f37f435f3
|
100 |
|
3d4422332
|
101 102 103 |
/* * Generic and arch helpers */ |
d8ad7d112
|
104 |
void __init call_function_init(void); |
3d4422332
|
105 |
void generic_smp_call_function_single_interrupt(void); |
9a46ad6d6
|
106 107 |
#define generic_smp_call_function_interrupt \ generic_smp_call_function_single_interrupt |
a3bc0dbc8
|
108 |
|
1da177e4c
|
109 |
/* |
1da177e4c
|
110 111 112 113 |
* Mark the boot cpu "online" so that it can call console drivers in * printk() and can access its per-cpu storage. */ void smp_prepare_boot_cpu(void); |
ca74a6f84
|
114 |
extern unsigned int setup_max_cpus; |
34db18a05
|
115 116 |
extern void __init setup_nr_cpu_ids(void); extern void __init smp_init(void); |
ca74a6f84
|
117 |
|
1da177e4c
|
118 |
#else /* !SMP */ |
d1dedb52a
|
119 |
static inline void smp_send_stop(void) { } |
1da177e4c
|
120 121 122 |
/* * These macros fold the SMP functionality into a single CPU system */ |
39c715b71
|
123 |
#define raw_smp_processor_id() 0 |
3a5f65df5
|
124 |
static inline int up_smp_call_function(smp_call_func_t func, void *info) |
3c30b06df
|
125 126 127 |
{ return 0; } |
8691e5a8f
|
128 |
#define smp_call_function(func, info, wait) \ |
a5fbb6d10
|
129 |
(up_smp_call_function(func, info)) |
3b8967d71
|
130 |
|
79a881022
|
131 |
static inline void smp_send_reschedule(int cpu) { } |
2ac6608c4
|
132 |
#define smp_prepare_boot_cpu() do {} while (0) |
d2ff91188
|
133 134 |
#define smp_call_function_many(mask, func, info, wait) \ (up_smp_call_function(func, info)) |
d8ad7d112
|
135 |
static inline void call_function_init(void) { } |
2ea6dec4a
|
136 137 |
static inline int |
3a5f65df5
|
138 |
smp_call_function_any(const struct cpumask *mask, smp_call_func_t func, |
2ea6dec4a
|
139 |
void *info, int wait) |
3d4422332
|
140 |
{ |
2ea6dec4a
|
141 |
return smp_call_function_single(0, func, info, wait); |
3d4422332
|
142 |
} |
2ea6dec4a
|
143 |
|
f37f435f3
|
144 |
static inline void kick_all_cpus_sync(void) { } |
c6f4459fc
|
145 |
static inline void wake_up_all_idle_cpus(void) { } |
f37f435f3
|
146 |
|
30b8b0066
|
147 148 149 150 151 152 |
#ifdef CONFIG_UP_LATE_INIT extern void __init up_late_init(void); static inline void smp_init(void) { up_late_init(); } #else static inline void smp_init(void) { } #endif |
1da177e4c
|
153 154 155 |
#endif /* !SMP */ /* |
39c715b71
|
156 |
* smp_processor_id(): get the current CPU ID. |
1da177e4c
|
157 |
* |
cfd8d6c0e
|
158 |
* if DEBUG_PREEMPT is enabled then we check whether it is |
39c715b71
|
159 160 161 |
* used in a preemption-safe way. (smp_processor_id() is safe * if it's used in a preemption-off critical section, or in * a thread that is bound to the current CPU.) |
1da177e4c
|
162 |
* |
39c715b71
|
163 164 165 166 167 168 |
* NOTE: raw_smp_processor_id() is for internal use only * (smp_processor_id() is the preferred variant), but in rare * instances it might also be used to turn off false positives * (i.e. smp_processor_id() use that the debugging code reports but * which use for some reason is legal). Don't use this to hack around * the warning message, as your code might not work under PREEMPT. |
1da177e4c
|
169 |
*/ |
39c715b71
|
170 171 172 |
#ifdef CONFIG_DEBUG_PREEMPT extern unsigned int debug_smp_processor_id(void); # define smp_processor_id() debug_smp_processor_id() |
1da177e4c
|
173 |
#else |
39c715b71
|
174 |
# define smp_processor_id() raw_smp_processor_id() |
1da177e4c
|
175 176 177 178 |
#endif #define get_cpu() ({ preempt_disable(); smp_processor_id(); }) #define put_cpu() preempt_enable() |
1da177e4c
|
179 |
|
a146649bc
|
180 181 182 183 184 |
/* * Callback to arch code if there's nosmp or maxcpus=0 on the * boot command line: */ extern void arch_disable_smp_support(void); |
fb37bb04d
|
185 186 |
extern void arch_enable_nonboot_cpus_begin(void); extern void arch_enable_nonboot_cpus_end(void); |
033ab7f8e
|
187 |
void smp_setup_processor_id(void); |
df8ce9d78
|
188 189 |
int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys); |
31487f832
|
190 191 192 193 |
/* SMP core functions */ int smpcfd_prepare_cpu(unsigned int cpu); int smpcfd_dead_cpu(unsigned int cpu); int smpcfd_dying_cpu(unsigned int cpu); |
1da177e4c
|
194 |
#endif /* __LINUX_SMP_H */ |