Blame view
include/linux/preempt.h
4.88 KB
1da177e4c
|
1 2 3 4 5 6 7 |
#ifndef __LINUX_PREEMPT_H #define __LINUX_PREEMPT_H /* * include/linux/preempt.h - macros for accessing and manipulating * preempt_count (used for kernel preemption, interrupt count, etc.) */ |
1da177e4c
|
8 |
#include <linux/linkage.h> |
e107be36e
|
9 |
#include <linux/list.h> |
1da177e4c
|
10 |
|
f27dde8de
|
11 12 13 14 15 |
/* * We use the MSB mostly because its available; see <linux/preempt_mask.h> for * the other bits -- can't include that header due to inclusion hell. */ #define PREEMPT_NEED_RESCHED 0x80000000 |
a78787092
|
16 |
#include <asm/preempt.h> |
f27dde8de
|
17 |
|
6cd8a4bb2
|
18 |
#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) |
bdb438065
|
19 20 21 |
extern void preempt_count_add(int val); extern void preempt_count_sub(int val); #define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); }) |
1da177e4c
|
22 |
#else |
bdb438065
|
23 24 25 |
#define preempt_count_add(val) __preempt_count_add(val) #define preempt_count_sub(val) __preempt_count_sub(val) #define preempt_count_dec_and_test() __preempt_count_dec_and_test() |
1da177e4c
|
26 |
#endif |
bdb438065
|
27 28 |
#define __preempt_count_inc() __preempt_count_add(1) #define __preempt_count_dec() __preempt_count_sub(1) |
bdd4e85dc
|
29 |
|
bdb438065
|
30 31 |
#define preempt_count_inc() preempt_count_add(1) #define preempt_count_dec() preempt_count_sub(1) |
bdd4e85dc
|
32 33 |
#ifdef CONFIG_PREEMPT_COUNT |
1da177e4c
|
34 35 |
#define preempt_disable() \ do { \ |
bdb438065
|
36 |
preempt_count_inc(); \ |
1da177e4c
|
37 38 |
barrier(); \ } while (0) |
ba74c1448
|
39 |
#define sched_preempt_enable_no_resched() \ |
1da177e4c
|
40 41 |
do { \ barrier(); \ |
bdb438065
|
42 |
preempt_count_dec(); \ |
1da177e4c
|
43 |
} while (0) |
bdb438065
|
44 |
#define preempt_enable_no_resched() sched_preempt_enable_no_resched() |
ba74c1448
|
45 |
|
bdb438065
|
46 |
#ifdef CONFIG_PREEMPT |
1da177e4c
|
47 48 |
#define preempt_enable() \ do { \ |
bdb438065
|
49 50 |
barrier(); \ if (unlikely(preempt_count_dec_and_test())) \ |
1a338ac32
|
51 |
__preempt_schedule(); \ |
1da177e4c
|
52 |
} while (0) |
bdb438065
|
53 54 55 |
#define preempt_check_resched() \ do { \ if (should_resched()) \ |
1a338ac32
|
56 |
__preempt_schedule(); \ |
bdb438065
|
57 58 59 |
} while (0) #else |
62b94a08d
|
60 61 62 63 64 |
#define preempt_enable() \ do { \ barrier(); \ preempt_count_dec(); \ } while (0) |
bdb438065
|
65 66 |
#define preempt_check_resched() do { } while (0) #endif |
502825282
|
67 68 69 |
#define preempt_disable_notrace() \ do { \ |
bdb438065
|
70 |
__preempt_count_inc(); \ |
502825282
|
71 72 73 74 75 76 |
barrier(); \ } while (0) #define preempt_enable_no_resched_notrace() \ do { \ barrier(); \ |
bdb438065
|
77 |
__preempt_count_dec(); \ |
502825282
|
78 |
} while (0) |
bdb438065
|
79 |
#ifdef CONFIG_PREEMPT |
1a338ac32
|
80 81 |
#ifndef CONFIG_CONTEXT_TRACKING #define __preempt_schedule_context() __preempt_schedule() |
bdb438065
|
82 |
#endif |
502825282
|
83 84 |
#define preempt_enable_notrace() \ do { \ |
bdb438065
|
85 86 |
barrier(); \ if (unlikely(__preempt_count_dec_and_test())) \ |
1a338ac32
|
87 |
__preempt_schedule_context(); \ |
502825282
|
88 |
} while (0) |
bdb438065
|
89 |
#else |
62b94a08d
|
90 91 92 93 94 |
#define preempt_enable_notrace() \ do { \ barrier(); \ __preempt_count_dec(); \ } while (0) |
bdb438065
|
95 |
#endif |
502825282
|
96 |
|
bdd4e85dc
|
97 |
#else /* !CONFIG_PREEMPT_COUNT */ |
1da177e4c
|
98 |
|
386afc911
|
99 100 101 102 103 104 |
/* * Even if we don't have any preemption, we need preempt disable/enable * to be barriers, so that we don't have things like get_user/put_user * that can cause faults and scheduling migrate into our preempt-protected * region. */ |
bdb438065
|
105 |
#define preempt_disable() barrier() |
386afc911
|
106 |
#define sched_preempt_enable_no_resched() barrier() |
bdb438065
|
107 108 109 |
#define preempt_enable_no_resched() barrier() #define preempt_enable() barrier() #define preempt_check_resched() do { } while (0) |
386afc911
|
110 111 112 113 |
#define preempt_disable_notrace() barrier() #define preempt_enable_no_resched_notrace() barrier() #define preempt_enable_notrace() barrier() |
502825282
|
114 |
|
bdd4e85dc
|
115 |
#endif /* CONFIG_PREEMPT_COUNT */ |
1da177e4c
|
116 |
|
62b94a08d
|
117 118 119 120 121 122 123 124 125 |
#ifdef MODULE /* * Modules have no business playing preemption tricks. */ #undef sched_preempt_enable_no_resched #undef preempt_enable_no_resched #undef preempt_enable_no_resched_notrace #undef preempt_check_resched #endif |
8cb75e0c4
|
126 127 128 129 130 131 132 133 134 |
#define preempt_set_need_resched() \ do { \ set_preempt_need_resched(); \ } while (0) #define preempt_fold_need_resched() \ do { \ if (tif_need_resched()) \ set_preempt_need_resched(); \ } while (0) |
8cb75e0c4
|
135 |
|
e107be36e
|
136 137 138 139 140 141 142 143 144 145 146 147 |
#ifdef CONFIG_PREEMPT_NOTIFIERS struct preempt_notifier; /** * preempt_ops - notifiers called when a task is preempted and rescheduled * @sched_in: we're about to be rescheduled: * notifier: struct preempt_notifier for the task being scheduled * cpu: cpu we're scheduled on * @sched_out: we've just been preempted * notifier: struct preempt_notifier for the task being preempted * next: the task that's kicking us out |
8592e6486
|
148 149 150 151 152 |
* * Please note that sched_in and out are called under different * contexts. sched_out is called with rq lock held and irq disabled * while sched_in is called without rq lock and irq enabled. This * difference is intentional and depended upon by its users. |
e107be36e
|
153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 |
*/ struct preempt_ops { void (*sched_in)(struct preempt_notifier *notifier, int cpu); void (*sched_out)(struct preempt_notifier *notifier, struct task_struct *next); }; /** * preempt_notifier - key for installing preemption notifiers * @link: internal use * @ops: defines the notifier functions to be called * * Usually used in conjunction with container_of(). */ struct preempt_notifier { struct hlist_node link; struct preempt_ops *ops; }; void preempt_notifier_register(struct preempt_notifier *notifier); void preempt_notifier_unregister(struct preempt_notifier *notifier); static inline void preempt_notifier_init(struct preempt_notifier *notifier, struct preempt_ops *ops) { INIT_HLIST_NODE(¬ifier->link); notifier->ops = ops; } #endif |
1da177e4c
|
183 |
#endif /* __LINUX_PREEMPT_H */ |