Blame view
include/linux/preempt.h
10 KB
b24413180 License cleanup: ... |
1 |
/* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4c Linux-2.6.12-rc2 |
2 3 4 5 6 7 8 |
#ifndef __LINUX_PREEMPT_H #define __LINUX_PREEMPT_H /* * include/linux/preempt.h - macros for accessing and manipulating * preempt_count (used for kernel preemption, interrupt count, etc.) */ |
1da177e4c Linux-2.6.12-rc2 |
9 |
#include <linux/linkage.h> |
e107be36e [PATCH] sched: ar... |
10 |
#include <linux/list.h> |
1da177e4c Linux-2.6.12-rc2 |
11 |
|
f27dde8de sched: Add NEED_R... |
12 |
/* |
92cf21187 sched/preempt: Me... |
13 14 15 16 17 18 19 20 21 22 23 24 |
* We put the hardirq and softirq counter into the preemption * counter. The bitmask has the following meaning: * * - bits 0-7 are the preemption count (max preemption depth: 256) * - bits 8-15 are the softirq count (max # of softirqs: 256) * * The hardirq count could in theory be the same as the number of * interrupts in the system, but we run all interrupt handlers with * interrupts disabled, so we cannot have nesting interrupts. Though * there are a few palaeontologic drivers which reenable interrupts in * the handler, so we need more than one bit here. * |
2e10e71ce sched/preempt: Re... |
25 26 27 |
* PREEMPT_MASK: 0x000000ff * SOFTIRQ_MASK: 0x0000ff00 * HARDIRQ_MASK: 0x000f0000 |
69ea03b56 hardirq/nmi: Allo... |
28 |
* NMI_MASK: 0x00f00000 |
2e10e71ce sched/preempt: Re... |
29 |
* PREEMPT_NEED_RESCHED: 0x80000000 |
92cf21187 sched/preempt: Me... |
30 31 32 33 |
*/ #define PREEMPT_BITS 8 #define SOFTIRQ_BITS 8 #define HARDIRQ_BITS 4 |
69ea03b56 hardirq/nmi: Allo... |
34 |
#define NMI_BITS 4 |
92cf21187 sched/preempt: Me... |
35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 |
#define PREEMPT_SHIFT 0 #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS) #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS) #define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS) #define __IRQ_MASK(x) ((1UL << (x))-1) #define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT) #define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) #define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT) #define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT) #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT) #define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT) #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) #define NMI_OFFSET (1UL << NMI_SHIFT) #define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) |
d04b0ad37 sched/headers: Mo... |
54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 |
#define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) /* * Disable preemption until the scheduler is running -- use an unconditional * value so that it also works on !PREEMPT_COUNT kernels. * * Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count(). */ #define INIT_PREEMPT_COUNT PREEMPT_OFFSET /* * Initial preempt_count value; reflects the preempt_count schedule invariant * which states that during context switches: * * preempt_count() == 2*PREEMPT_DISABLE_OFFSET * * Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels. * Note: See finish_task_switch(). */ #define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED) |
2e10e71ce sched/preempt: Re... |
74 75 |
/* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */ #include <asm/preempt.h> |
92cf21187 sched/preempt: Me... |
76 77 78 79 80 81 82 |
#define hardirq_count() (preempt_count() & HARDIRQ_MASK) #define softirq_count() (preempt_count() & SOFTIRQ_MASK) #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ | NMI_MASK)) /* * Are we doing bottom half or hardware interrupt processing? |
7c4788950 x86/uaccess, sche... |
83 84 85 86 87 88 89 90 91 92 |
* * in_irq() - We're in (hard) IRQ context * in_softirq() - We have BH disabled, or are processing softirqs * in_interrupt() - We're in NMI,IRQ,SoftIRQ context or have BH disabled * in_serving_softirq() - We're in softirq context * in_nmi() - We're in NMI context * in_task() - We're in task context * * Note: due to the BH disabled confusion: in_softirq(),in_interrupt() really * should not be used in new code. |
92cf21187 sched/preempt: Me... |
93 94 95 96 97 |
*/ #define in_irq() (hardirq_count()) #define in_softirq() (softirq_count()) #define in_interrupt() (irq_count()) #define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) |
7c4788950 x86/uaccess, sche... |
98 99 100 |
#define in_nmi() (preempt_count() & NMI_MASK) #define in_task() (!(preempt_count() & \ (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET))) |
92cf21187 sched/preempt: Me... |
101 |
|
fe32d3cd5 sched/preempt: Fi... |
102 103 104 |
/* * The preempt_count offset after preempt_disable(); */ |
92cf21187 sched/preempt: Me... |
105 |
#if defined(CONFIG_PREEMPT_COUNT) |
fe32d3cd5 sched/preempt: Fi... |
106 |
# define PREEMPT_DISABLE_OFFSET PREEMPT_OFFSET |
92cf21187 sched/preempt: Me... |
107 |
#else |
fe32d3cd5 sched/preempt: Fi... |
108 |
# define PREEMPT_DISABLE_OFFSET 0 |
92cf21187 sched/preempt: Me... |
109 110 111 |
#endif /* |
fe32d3cd5 sched/preempt: Fi... |
112 113 114 115 116 |
* The preempt_count offset after spin_lock() */ #define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET /* |
92cf21187 sched/preempt: Me... |
117 118 119 120 121 122 123 124 125 126 127 128 |
* The preempt_count offset needed for things like: * * spin_lock_bh() * * Which need to disable both preemption (CONFIG_PREEMPT_COUNT) and * softirqs, such that unlock sequences of: * * spin_unlock(); * local_bh_enable(); * * Work as expected. */ |
fe32d3cd5 sched/preempt: Fi... |
129 |
#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET) |
92cf21187 sched/preempt: Me... |
130 131 132 133 134 135 136 137 |
/* * Are we running in atomic context? WARNING: this macro cannot * always detect atomic context; in particular, it cannot know about * held spinlocks in non-preemptible kernels. Thus it should not be * used in the general case to determine whether sleeping is possible. * Do not use in_atomic() in driver code. */ |
3e51f3c40 sched/preempt: Re... |
138 |
#define in_atomic() (preempt_count() != 0) |
92cf21187 sched/preempt: Me... |
139 140 141 |
/* * Check whether we were atomic before we did preempt_disable(): |
e017cf21a sched/preempt: Fi... |
142 |
* (used by the scheduler) |
92cf21187 sched/preempt: Me... |
143 |
*/ |
da7142e2e sched/core: Simpl... |
144 |
#define in_atomic_preempt_off() (preempt_count() != PREEMPT_DISABLE_OFFSET) |
92cf21187 sched/preempt: Me... |
145 |
|
c3bc8fd63 tracing: Centrali... |
146 |
#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_TRACE_PREEMPT_TOGGLE) |
bdb438065 sched: Extract th... |
147 148 |
extern void preempt_count_add(int val); extern void preempt_count_sub(int val); |
fe32d3cd5 sched/preempt: Fi... |
149 150 |
#define preempt_count_dec_and_test() \ ({ preempt_count_sub(1); should_resched(0); }) |
1da177e4c Linux-2.6.12-rc2 |
151 |
#else |
bdb438065 sched: Extract th... |
152 153 154 |
#define preempt_count_add(val) __preempt_count_add(val) #define preempt_count_sub(val) __preempt_count_sub(val) #define preempt_count_dec_and_test() __preempt_count_dec_and_test() |
1da177e4c Linux-2.6.12-rc2 |
155 |
#endif |
bdb438065 sched: Extract th... |
156 157 |
#define __preempt_count_inc() __preempt_count_add(1) #define __preempt_count_dec() __preempt_count_sub(1) |
bdd4e85dc sched: Isolate pr... |
158 |
|
bdb438065 sched: Extract th... |
159 160 |
#define preempt_count_inc() preempt_count_add(1) #define preempt_count_dec() preempt_count_sub(1) |
bdd4e85dc sched: Isolate pr... |
161 162 |
#ifdef CONFIG_PREEMPT_COUNT |
1da177e4c Linux-2.6.12-rc2 |
163 164 |
#define preempt_disable() \ do { \ |
bdb438065 sched: Extract th... |
165 |
preempt_count_inc(); \ |
1da177e4c Linux-2.6.12-rc2 |
166 167 |
barrier(); \ } while (0) |
ba74c1448 sched/rt: Documen... |
168 |
#define sched_preempt_enable_no_resched() \ |
1da177e4c Linux-2.6.12-rc2 |
169 170 |
do { \ barrier(); \ |
bdb438065 sched: Extract th... |
171 |
preempt_count_dec(); \ |
1da177e4c Linux-2.6.12-rc2 |
172 |
} while (0) |
bdb438065 sched: Extract th... |
173 |
#define preempt_enable_no_resched() sched_preempt_enable_no_resched() |
ba74c1448 sched/rt: Documen... |
174 |
|
2e10e71ce sched/preempt: Re... |
175 |
#define preemptible() (preempt_count() == 0 && !irqs_disabled()) |
c1a280b68 sched/preempt: Us... |
176 |
#ifdef CONFIG_PREEMPTION |
1da177e4c Linux-2.6.12-rc2 |
177 178 |
#define preempt_enable() \ do { \ |
bdb438065 sched: Extract th... |
179 180 |
barrier(); \ if (unlikely(preempt_count_dec_and_test())) \ |
1a338ac32 sched, x86: Optim... |
181 |
__preempt_schedule(); \ |
1da177e4c Linux-2.6.12-rc2 |
182 |
} while (0) |
9a92e3dc6 preempt: Reorgani... |
183 184 185 186 187 188 |
#define preempt_enable_notrace() \ do { \ barrier(); \ if (unlikely(__preempt_count_dec_and_test())) \ __preempt_schedule_notrace(); \ } while (0) |
bdb438065 sched: Extract th... |
189 190 |
#define preempt_check_resched() \ do { \ |
fe32d3cd5 sched/preempt: Fi... |
191 |
if (should_resched(0)) \ |
1a338ac32 sched, x86: Optim... |
192 |
__preempt_schedule(); \ |
bdb438065 sched: Extract th... |
193 |
} while (0) |
c1a280b68 sched/preempt: Us... |
194 |
#else /* !CONFIG_PREEMPTION */ |
62b94a08d sched/preempt: Ta... |
195 196 197 198 199 |
#define preempt_enable() \ do { \ barrier(); \ preempt_count_dec(); \ } while (0) |
502825282 ftrace: add preem... |
200 |
|
9a92e3dc6 preempt: Reorgani... |
201 |
#define preempt_enable_notrace() \ |
502825282 ftrace: add preem... |
202 203 |
do { \ barrier(); \ |
bdb438065 sched: Extract th... |
204 |
__preempt_count_dec(); \ |
502825282 ftrace: add preem... |
205 |
} while (0) |
9a92e3dc6 preempt: Reorgani... |
206 |
#define preempt_check_resched() do { } while (0) |
c1a280b68 sched/preempt: Us... |
207 |
#endif /* CONFIG_PREEMPTION */ |
bdb438065 sched: Extract th... |
208 |
|
9a92e3dc6 preempt: Reorgani... |
209 |
#define preempt_disable_notrace() \ |
502825282 ftrace: add preem... |
210 |
do { \ |
9a92e3dc6 preempt: Reorgani... |
211 |
__preempt_count_inc(); \ |
bdb438065 sched: Extract th... |
212 |
barrier(); \ |
502825282 ftrace: add preem... |
213 |
} while (0) |
9a92e3dc6 preempt: Reorgani... |
214 215 |
#define preempt_enable_no_resched_notrace() \ |
62b94a08d sched/preempt: Ta... |
216 217 218 219 |
do { \ barrier(); \ __preempt_count_dec(); \ } while (0) |
502825282 ftrace: add preem... |
220 |
|
bdd4e85dc sched: Isolate pr... |
221 |
#else /* !CONFIG_PREEMPT_COUNT */ |
1da177e4c Linux-2.6.12-rc2 |
222 |
|
386afc911 spinlocks and pre... |
223 224 225 226 227 228 |
/* * Even if we don't have any preemption, we need preempt disable/enable * to be barriers, so that we don't have things like get_user/put_user * that can cause faults and scheduling migrate into our preempt-protected * region. */ |
bdb438065 sched: Extract th... |
229 |
#define preempt_disable() barrier() |
386afc911 spinlocks and pre... |
230 |
#define sched_preempt_enable_no_resched() barrier() |
bdb438065 sched: Extract th... |
231 232 233 |
#define preempt_enable_no_resched() barrier() #define preempt_enable() barrier() #define preempt_check_resched() do { } while (0) |
386afc911 spinlocks and pre... |
234 235 236 237 |
#define preempt_disable_notrace() barrier() #define preempt_enable_no_resched_notrace() barrier() #define preempt_enable_notrace() barrier() |
2e10e71ce sched/preempt: Re... |
238 |
#define preemptible() 0 |
502825282 ftrace: add preem... |
239 |
|
bdd4e85dc sched: Isolate pr... |
240 |
#endif /* CONFIG_PREEMPT_COUNT */ |
1da177e4c Linux-2.6.12-rc2 |
241 |
|
62b94a08d sched/preempt: Ta... |
242 243 244 245 246 247 248 249 250 |
#ifdef MODULE /* * Modules have no business playing preemption tricks. */ #undef sched_preempt_enable_no_resched #undef preempt_enable_no_resched #undef preempt_enable_no_resched_notrace #undef preempt_check_resched #endif |
8cb75e0c4 sched/preempt: Fi... |
251 252 253 254 255 256 257 258 259 |
#define preempt_set_need_resched() \ do { \ set_preempt_need_resched(); \ } while (0) #define preempt_fold_need_resched() \ do { \ if (tif_need_resched()) \ set_preempt_need_resched(); \ } while (0) |
8cb75e0c4 sched/preempt: Fi... |
260 |
|
e107be36e [PATCH] sched: ar... |
261 262 263 264 265 266 267 268 269 270 271 272 |
#ifdef CONFIG_PREEMPT_NOTIFIERS struct preempt_notifier; /** * preempt_ops - notifiers called when a task is preempted and rescheduled * @sched_in: we're about to be rescheduled: * notifier: struct preempt_notifier for the task being scheduled * cpu: cpu we're scheduled on * @sched_out: we've just been preempted * notifier: struct preempt_notifier for the task being preempted * next: the task that's kicking us out |
8592e6486 sched: Revert 498... |
273 274 275 276 277 |
* * Please note that sched_in and out are called under different * contexts. sched_out is called with rq lock held and irq disabled * while sched_in is called without rq lock and irq enabled. This * difference is intentional and depended upon by its users. |
e107be36e [PATCH] sched: ar... |
278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 |
*/ struct preempt_ops { void (*sched_in)(struct preempt_notifier *notifier, int cpu); void (*sched_out)(struct preempt_notifier *notifier, struct task_struct *next); }; /** * preempt_notifier - key for installing preemption notifiers * @link: internal use * @ops: defines the notifier functions to be called * * Usually used in conjunction with container_of(). */ struct preempt_notifier { struct hlist_node link; struct preempt_ops *ops; }; |
2ecd9d29a sched, preempt_no... |
296 297 |
void preempt_notifier_inc(void); void preempt_notifier_dec(void); |
e107be36e [PATCH] sched: ar... |
298 299 300 301 302 303 304 305 306 307 308 |
void preempt_notifier_register(struct preempt_notifier *notifier); void preempt_notifier_unregister(struct preempt_notifier *notifier); static inline void preempt_notifier_init(struct preempt_notifier *notifier, struct preempt_ops *ops) { INIT_HLIST_NODE(¬ifier->link); notifier->ops = ops; } #endif |
66630058e sched/rt: Provide... |
309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 |
/** * migrate_disable - Prevent migration of the current task * * Maps to preempt_disable() which also disables preemption. Use * migrate_disable() to annotate that the intent is to prevent migration, * but not necessarily preemption. * * Can be invoked nested like preempt_disable() and needs the corresponding * number of migrate_enable() invocations. */ static __always_inline void migrate_disable(void) { preempt_disable(); } /** * migrate_enable - Allow migration of the current task * * Counterpart to migrate_disable(). * * As migrate_disable() can be invoked nested, only the outermost invocation * reenables migration. * * Currently mapped to preempt_enable(). */ static __always_inline void migrate_enable(void) { preempt_enable(); } |
1da177e4c Linux-2.6.12-rc2 |
338 |
#endif /* __LINUX_PREEMPT_H */ |