Blame view
include/linux/preempt.h
3.94 KB
1da177e4c Linux-2.6.12-rc2 |
1 2 3 4 5 6 7 |
#ifndef __LINUX_PREEMPT_H #define __LINUX_PREEMPT_H /* * include/linux/preempt.h - macros for accessing and manipulating * preempt_count (used for kernel preemption, interrupt count, etc.) */ |
f037360f2 [PATCH] m68k: thr... |
8 |
#include <linux/thread_info.h> |
1da177e4c Linux-2.6.12-rc2 |
9 |
#include <linux/linkage.h> |
e107be36e [PATCH] sched: ar... |
10 |
#include <linux/list.h> |
1da177e4c Linux-2.6.12-rc2 |
11 |
|
6cd8a4bb2 ftrace: trace pre... |
12 |
#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) |
ec7015840 Remove fastcall f... |
13 14 |
extern void add_preempt_count(int val); extern void sub_preempt_count(int val); |
1da177e4c Linux-2.6.12-rc2 |
15 16 17 18 19 20 21 22 23 24 25 26 27 |
#else # define add_preempt_count(val) do { preempt_count() += (val); } while (0) # define sub_preempt_count(val) do { preempt_count() -= (val); } while (0) #endif #define inc_preempt_count() add_preempt_count(1) #define dec_preempt_count() sub_preempt_count(1) #define preempt_count() (current_thread_info()->preempt_count) #ifdef CONFIG_PREEMPT asmlinkage void preempt_schedule(void); |
bdd4e85dc sched: Isolate pr... |
28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
#define preempt_check_resched() \ do { \ if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \ preempt_schedule(); \ } while (0) #else /* !CONFIG_PREEMPT */ #define preempt_check_resched() do { } while (0) #endif /* CONFIG_PREEMPT */ #ifdef CONFIG_PREEMPT_COUNT |
1da177e4c Linux-2.6.12-rc2 |
42 43 44 45 46 47 48 49 50 51 52 |
#define preempt_disable() \ do { \ inc_preempt_count(); \ barrier(); \ } while (0) #define preempt_enable_no_resched() \ do { \ barrier(); \ dec_preempt_count(); \ } while (0) |
1da177e4c Linux-2.6.12-rc2 |
53 54 55 |
#define preempt_enable() \ do { \ preempt_enable_no_resched(); \ |
d6f029130 [PATCH] fix race ... |
56 |
barrier(); \ |
1da177e4c Linux-2.6.12-rc2 |
57 58 |
preempt_check_resched(); \ } while (0) |
502825282 ftrace: add preem... |
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 |
/* For debugging and tracer internals only! */ #define add_preempt_count_notrace(val) \ do { preempt_count() += (val); } while (0) #define sub_preempt_count_notrace(val) \ do { preempt_count() -= (val); } while (0) #define inc_preempt_count_notrace() add_preempt_count_notrace(1) #define dec_preempt_count_notrace() sub_preempt_count_notrace(1) #define preempt_disable_notrace() \ do { \ inc_preempt_count_notrace(); \ barrier(); \ } while (0) #define preempt_enable_no_resched_notrace() \ do { \ barrier(); \ dec_preempt_count_notrace(); \ } while (0) /* preempt_check_resched is OK to trace */ #define preempt_enable_notrace() \ do { \ preempt_enable_no_resched_notrace(); \ barrier(); \ preempt_check_resched(); \ } while (0) |
bdd4e85dc sched: Isolate pr... |
86 |
#else /* !CONFIG_PREEMPT_COUNT */ |
1da177e4c Linux-2.6.12-rc2 |
87 88 89 90 |
#define preempt_disable() do { } while (0) #define preempt_enable_no_resched() do { } while (0) #define preempt_enable() do { } while (0) |
1da177e4c Linux-2.6.12-rc2 |
91 |
|
502825282 ftrace: add preem... |
92 93 94 |
#define preempt_disable_notrace() do { } while (0) #define preempt_enable_no_resched_notrace() do { } while (0) #define preempt_enable_notrace() do { } while (0) |
bdd4e85dc sched: Isolate pr... |
95 |
#endif /* CONFIG_PREEMPT_COUNT */ |
1da177e4c Linux-2.6.12-rc2 |
96 |
|
e107be36e [PATCH] sched: ar... |
97 98 99 100 101 102 103 104 105 106 107 108 |
#ifdef CONFIG_PREEMPT_NOTIFIERS struct preempt_notifier; /** * preempt_ops - notifiers called when a task is preempted and rescheduled * @sched_in: we're about to be rescheduled: * notifier: struct preempt_notifier for the task being scheduled * cpu: cpu we're scheduled on * @sched_out: we've just been preempted * notifier: struct preempt_notifier for the task being preempted * next: the task that's kicking us out |
8592e6486 sched: Revert 498... |
109 110 111 112 113 |
* * Please note that sched_in and out are called under different * contexts. sched_out is called with rq lock held and irq disabled * while sched_in is called without rq lock and irq enabled. This * difference is intentional and depended upon by its users. |
e107be36e [PATCH] sched: ar... |
114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 |
*/ struct preempt_ops { void (*sched_in)(struct preempt_notifier *notifier, int cpu); void (*sched_out)(struct preempt_notifier *notifier, struct task_struct *next); }; /** * preempt_notifier - key for installing preemption notifiers * @link: internal use * @ops: defines the notifier functions to be called * * Usually used in conjunction with container_of(). */ struct preempt_notifier { struct hlist_node link; struct preempt_ops *ops; }; void preempt_notifier_register(struct preempt_notifier *notifier); void preempt_notifier_unregister(struct preempt_notifier *notifier); static inline void preempt_notifier_init(struct preempt_notifier *notifier, struct preempt_ops *ops) { INIT_HLIST_NODE(¬ifier->link); notifier->ops = ops; } #endif |
1da177e4c Linux-2.6.12-rc2 |
144 |
#endif /* __LINUX_PREEMPT_H */ |