Blame view
include/linux/cpu.h
8.25 KB
1da177e4c
|
1 2 3 4 5 6 7 8 |
/* * include/linux/cpu.h - generic cpu definition * * This is mainly for topological representation. We define the * basic 'struct cpu' here, which can be embedded in per-arch * definitions of processors. * * Basic handling of the devices is done in drivers/base/cpu.c |
1da177e4c
|
9 |
* |
611a75e18
|
10 |
* CPUs are exported via sysfs in the devices/system/cpu |
1da177e4c
|
11 |
* directory. |
1da177e4c
|
12 13 14 |
*/ #ifndef _LINUX_CPU_H_ #define _LINUX_CPU_H_ |
1da177e4c
|
15 16 17 |
#include <linux/node.h> #include <linux/compiler.h> #include <linux/cpumask.h> |
1da177e4c
|
18 |
|
313162d0b
|
19 |
struct device; |
d1cb9d1af
|
20 |
struct device_node; |
313162d0b
|
21 |
|
1da177e4c
|
22 23 |
struct cpu { int node_id; /* The node which contains the CPU */ |
72486f1f8
|
24 |
int hotpluggable; /* creates sysfs control file if hotpluggable */ |
8a25a2fd1
|
25 |
struct device dev; |
1da177e4c
|
26 |
}; |
76b67ed9d
|
27 |
extern int register_cpu(struct cpu *cpu, int num); |
8a25a2fd1
|
28 |
extern struct device *get_cpu_device(unsigned cpu); |
2987557f5
|
29 |
extern bool cpu_is_hotpluggable(unsigned cpu); |
183912d35
|
30 |
extern bool arch_match_cpu_phys_id(int cpu, u64 phys_id); |
d1cb9d1af
|
31 32 |
extern bool arch_find_n_match_cpu_physical_id(struct device_node *cpun, int cpu, unsigned int *thread); |
0344c6c53
|
33 |
|
8a25a2fd1
|
34 35 |
extern int cpu_add_dev_attr(struct device_attribute *attr); extern void cpu_remove_dev_attr(struct device_attribute *attr); |
0344c6c53
|
36 |
|
8a25a2fd1
|
37 38 |
extern int cpu_add_dev_attr_group(struct attribute_group *attrs); extern void cpu_remove_dev_attr_group(struct attribute_group *attrs); |
0344c6c53
|
39 |
|
1da177e4c
|
40 |
#ifdef CONFIG_HOTPLUG_CPU |
76b67ed9d
|
41 |
extern void unregister_cpu(struct cpu *cpu); |
12633e803
|
42 43 |
extern ssize_t arch_cpu_probe(const char *, size_t); extern ssize_t arch_cpu_release(const char *, size_t); |
1da177e4c
|
44 45 |
#endif struct notifier_block; |
50a323b73
|
46 47 48 49 |
/* * CPU notifier priorities. */ enum { |
3a101d054
|
50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 |
/* * SCHED_ACTIVE marks a cpu which is coming up active during * CPU_ONLINE and CPU_DOWN_FAILED and must be the first * notifier. CPUSET_ACTIVE adjusts cpuset according to * cpu_active mask right after SCHED_ACTIVE. During * CPU_DOWN_PREPARE, SCHED_INACTIVE and CPUSET_INACTIVE are * ordered in the similar way. * * This ordering guarantees consistent cpu_active mask and * migration behavior to all cpu notifiers. */ CPU_PRI_SCHED_ACTIVE = INT_MAX, CPU_PRI_CPUSET_ACTIVE = INT_MAX - 1, CPU_PRI_SCHED_INACTIVE = INT_MIN + 1, CPU_PRI_CPUSET_INACTIVE = INT_MIN, |
50a323b73
|
65 66 67 |
/* migration should happen before other stuff but after perf */ CPU_PRI_PERF = 20, CPU_PRI_MIGRATION = 10, |
657582022
|
68 69 70 |
/* bring up workqueues before normal notifiers and down after */ CPU_PRI_WORKQUEUE_UP = 5, CPU_PRI_WORKQUEUE_DOWN = -5, |
50a323b73
|
71 |
}; |
80f1ff97d
|
72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 |
#define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */ #define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */ #define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */ #define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */ #define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */ #define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */ #define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task, * not handling interrupts, soon dead. * Called on the dying cpu, interrupts * are already disabled. Must not * sleep, must not fail */ #define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug * lock is dropped */ #define CPU_STARTING 0x000A /* CPU (unsigned)v soon running. * Called on the new cpu, just before * enabling interrupts. Must not sleep, * must not fail */ /* Used for CPU hotplug events occurring while tasks are frozen due to a suspend * operation in progress */ #define CPU_TASKS_FROZEN 0x0010 #define CPU_ONLINE_FROZEN (CPU_ONLINE | CPU_TASKS_FROZEN) #define CPU_UP_PREPARE_FROZEN (CPU_UP_PREPARE | CPU_TASKS_FROZEN) #define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN) #define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN) #define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN) #define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN) #define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN) #define CPU_STARTING_FROZEN (CPU_STARTING | CPU_TASKS_FROZEN) |
1da177e4c
|
103 104 |
#ifdef CONFIG_SMP /* Need to know about CPUs going up/down? */ |
799e64f05
|
105 106 |
#if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) #define cpu_notifier(fn, pri) { \ |
0db0628d9
|
107 |
static struct notifier_block fn##_nb = \ |
799e64f05
|
108 109 110 |
{ .notifier_call = fn, .priority = pri }; \ register_cpu_notifier(&fn##_nb); \ } |
93ae4f978
|
111 112 113 114 115 116 |
#define __cpu_notifier(fn, pri) { \ static struct notifier_block fn##_nb = \ { .notifier_call = fn, .priority = pri }; \ __register_cpu_notifier(&fn##_nb); \ } |
799e64f05
|
117 118 |
#else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */ #define cpu_notifier(fn, pri) do { (void)(fn); } while (0) |
93ae4f978
|
119 |
#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0) |
799e64f05
|
120 |
#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */ |
93ae4f978
|
121 |
|
65edc68c3
|
122 |
#ifdef CONFIG_HOTPLUG_CPU |
47e627bc8
|
123 |
extern int register_cpu_notifier(struct notifier_block *nb); |
93ae4f978
|
124 |
extern int __register_cpu_notifier(struct notifier_block *nb); |
1da177e4c
|
125 |
extern void unregister_cpu_notifier(struct notifier_block *nb); |
93ae4f978
|
126 |
extern void __unregister_cpu_notifier(struct notifier_block *nb); |
65edc68c3
|
127 |
#else |
47e627bc8
|
128 129 130 |
#ifndef MODULE extern int register_cpu_notifier(struct notifier_block *nb); |
93ae4f978
|
131 |
extern int __register_cpu_notifier(struct notifier_block *nb); |
47e627bc8
|
132 133 134 135 136 |
#else static inline int register_cpu_notifier(struct notifier_block *nb) { return 0; } |
93ae4f978
|
137 138 139 140 141 |
static inline int __register_cpu_notifier(struct notifier_block *nb) { return 0; } |
47e627bc8
|
142 |
#endif |
65edc68c3
|
143 144 145 |
static inline void unregister_cpu_notifier(struct notifier_block *nb) { } |
93ae4f978
|
146 147 148 149 |
static inline void __unregister_cpu_notifier(struct notifier_block *nb) { } |
65edc68c3
|
150 |
#endif |
1da177e4c
|
151 152 |
int cpu_up(unsigned int cpu); |
e545a6140
|
153 |
void notify_cpu_starting(unsigned int cpu); |
3da1c84c0
|
154 155 |
extern void cpu_maps_update_begin(void); extern void cpu_maps_update_done(void); |
d0d23b543
|
156 |
|
93ae4f978
|
157 158 |
#define cpu_notifier_register_begin cpu_maps_update_begin #define cpu_notifier_register_done cpu_maps_update_done |
3da1c84c0
|
159 |
#else /* CONFIG_SMP */ |
1da177e4c
|
160 |
|
799e64f05
|
161 |
#define cpu_notifier(fn, pri) do { (void)(fn); } while (0) |
93ae4f978
|
162 |
#define __cpu_notifier(fn, pri) do { (void)(fn); } while (0) |
799e64f05
|
163 |
|
1da177e4c
|
164 165 166 167 |
static inline int register_cpu_notifier(struct notifier_block *nb) { return 0; } |
d0d23b543
|
168 |
|
93ae4f978
|
169 170 171 172 |
static inline int __register_cpu_notifier(struct notifier_block *nb) { return 0; } |
1da177e4c
|
173 174 175 |
static inline void unregister_cpu_notifier(struct notifier_block *nb) { } |
93ae4f978
|
176 177 178 |
static inline void __unregister_cpu_notifier(struct notifier_block *nb) { } |
3da1c84c0
|
179 180 181 182 183 184 185 |
static inline void cpu_maps_update_begin(void) { } static inline void cpu_maps_update_done(void) { } |
93ae4f978
|
186 187 188 189 190 191 192 |
static inline void cpu_notifier_register_begin(void) { } static inline void cpu_notifier_register_done(void) { } |
1da177e4c
|
193 |
#endif /* CONFIG_SMP */ |
8a25a2fd1
|
194 |
extern struct bus_type cpu_subsys; |
1da177e4c
|
195 196 197 |
#ifdef CONFIG_HOTPLUG_CPU /* Stop CPUs going up and down. */ |
f7dff2b12
|
198 |
|
b9d10be7a
|
199 200 |
extern void cpu_hotplug_begin(void); extern void cpu_hotplug_done(void); |
86ef5c9a8
|
201 202 |
extern void get_online_cpus(void); extern void put_online_cpus(void); |
16e53dbf1
|
203 204 |
extern void cpu_hotplug_disable(void); extern void cpu_hotplug_enable(void); |
799e64f05
|
205 |
#define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri) |
93ae4f978
|
206 |
#define __hotcpu_notifier(fn, pri) __cpu_notifier(fn, pri) |
39f4885c5
|
207 |
#define register_hotcpu_notifier(nb) register_cpu_notifier(nb) |
93ae4f978
|
208 |
#define __register_hotcpu_notifier(nb) __register_cpu_notifier(nb) |
39f4885c5
|
209 |
#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb) |
93ae4f978
|
210 |
#define __unregister_hotcpu_notifier(nb) __unregister_cpu_notifier(nb) |
cb79295e2
|
211 |
void clear_tasks_mm_cpumask(int cpu); |
1da177e4c
|
212 |
int cpu_down(unsigned int cpu); |
f7dff2b12
|
213 214 |
#else /* CONFIG_HOTPLUG_CPU */ |
b9d10be7a
|
215 216 |
static inline void cpu_hotplug_begin(void) {} static inline void cpu_hotplug_done(void) {} |
86ef5c9a8
|
217 218 |
#define get_online_cpus() do { } while (0) #define put_online_cpus() do { } while (0) |
16e53dbf1
|
219 220 |
#define cpu_hotplug_disable() do { } while (0) #define cpu_hotplug_enable() do { } while (0) |
023160678
|
221 |
#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) |
93ae4f978
|
222 |
#define __hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) |
761bb4319
|
223 224 |
/* These aren't inline functions due to a GCC bug. */ #define register_hotcpu_notifier(nb) ({ (void)(nb); 0; }) |
93ae4f978
|
225 |
#define __register_hotcpu_notifier(nb) ({ (void)(nb); 0; }) |
761bb4319
|
226 |
#define unregister_hotcpu_notifier(nb) ({ (void)(nb); }) |
93ae4f978
|
227 |
#define __unregister_hotcpu_notifier(nb) ({ (void)(nb); }) |
f7dff2b12
|
228 |
#endif /* CONFIG_HOTPLUG_CPU */ |
1da177e4c
|
229 |
|
f3de4be9d
|
230 |
#ifdef CONFIG_PM_SLEEP_SMP |
e3920fb42
|
231 232 |
extern int disable_nonboot_cpus(void); extern void enable_nonboot_cpus(void); |
f3de4be9d
|
233 |
#else /* !CONFIG_PM_SLEEP_SMP */ |
e3920fb42
|
234 235 |
static inline int disable_nonboot_cpus(void) { return 0; } static inline void enable_nonboot_cpus(void) {} |
f3de4be9d
|
236 |
#endif /* !CONFIG_PM_SLEEP_SMP */ |
e3920fb42
|
237 |
|
a1a04ec3c
|
238 239 240 241 242 243 244 |
enum cpuhp_state { CPUHP_OFFLINE, CPUHP_ONLINE, }; void cpu_startup_entry(enum cpuhp_state state); void cpu_idle(void); |
d16699123
|
245 246 247 248 249 250 251 |
void cpu_idle_poll_ctrl(bool enable); void arch_cpu_idle(void); void arch_cpu_idle_prepare(void); void arch_cpu_idle_enter(void); void arch_cpu_idle_exit(void); void arch_cpu_idle_dead(void); |
1da177e4c
|
252 |
#endif /* _LINUX_CPU_H_ */ |