Blame view

include/linux/preempt.h 4.88 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
4
5
6
7
  #ifndef __LINUX_PREEMPT_H
  #define __LINUX_PREEMPT_H
  
  /*
   * include/linux/preempt.h - macros for accessing and manipulating
   * preempt_count (used for kernel preemption, interrupt count, etc.)
   */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
8
  #include <linux/linkage.h>
e107be36e   Avi Kivity   [PATCH] sched: ar...
9
  #include <linux/list.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
10

f27dde8de   Peter Zijlstra   sched: Add NEED_R...
11
12
13
14
15
  /*
   * We use the MSB mostly because its available; see <linux/preempt_mask.h> for
   * the other bits -- can't include that header due to inclusion hell.
   */
  #define PREEMPT_NEED_RESCHED	0x80000000
a78787092   Peter Zijlstra   sched, arch: Crea...
16
  #include <asm/preempt.h>
f27dde8de   Peter Zijlstra   sched: Add NEED_R...
17

6cd8a4bb2   Steven Rostedt   ftrace: trace pre...
18
  #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
bdb438065   Peter Zijlstra   sched: Extract th...
19
20
21
  extern void preempt_count_add(int val);
  extern void preempt_count_sub(int val);
  #define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); })
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
22
  #else
bdb438065   Peter Zijlstra   sched: Extract th...
23
24
25
  #define preempt_count_add(val)	__preempt_count_add(val)
  #define preempt_count_sub(val)	__preempt_count_sub(val)
  #define preempt_count_dec_and_test() __preempt_count_dec_and_test()
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
26
  #endif
bdb438065   Peter Zijlstra   sched: Extract th...
27
28
  #define __preempt_count_inc() __preempt_count_add(1)
  #define __preempt_count_dec() __preempt_count_sub(1)
bdd4e85dc   Frederic Weisbecker   sched: Isolate pr...
29

bdb438065   Peter Zijlstra   sched: Extract th...
30
31
  #define preempt_count_inc() preempt_count_add(1)
  #define preempt_count_dec() preempt_count_sub(1)
bdd4e85dc   Frederic Weisbecker   sched: Isolate pr...
32
33
  
  #ifdef CONFIG_PREEMPT_COUNT
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
34
35
  #define preempt_disable() \
  do { \
bdb438065   Peter Zijlstra   sched: Extract th...
36
  	preempt_count_inc(); \
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
37
38
  	barrier(); \
  } while (0)
ba74c1448   Thomas Gleixner   sched/rt: Documen...
39
  #define sched_preempt_enable_no_resched() \
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
40
41
  do { \
  	barrier(); \
bdb438065   Peter Zijlstra   sched: Extract th...
42
  	preempt_count_dec(); \
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
43
  } while (0)
bdb438065   Peter Zijlstra   sched: Extract th...
44
  #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
ba74c1448   Thomas Gleixner   sched/rt: Documen...
45

bdb438065   Peter Zijlstra   sched: Extract th...
46
  #ifdef CONFIG_PREEMPT
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
47
48
  #define preempt_enable() \
  do { \
bdb438065   Peter Zijlstra   sched: Extract th...
49
50
  	barrier(); \
  	if (unlikely(preempt_count_dec_and_test())) \
1a338ac32   Peter Zijlstra   sched, x86: Optim...
51
  		__preempt_schedule(); \
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
52
  } while (0)
bdb438065   Peter Zijlstra   sched: Extract th...
53
54
55
  #define preempt_check_resched() \
  do { \
  	if (should_resched()) \
1a338ac32   Peter Zijlstra   sched, x86: Optim...
56
  		__preempt_schedule(); \
bdb438065   Peter Zijlstra   sched: Extract th...
57
58
59
  } while (0)
  
  #else
62b94a08d   Peter Zijlstra   sched/preempt: Ta...
60
61
62
63
64
  #define preempt_enable() \
  do { \
  	barrier(); \
  	preempt_count_dec(); \
  } while (0)
bdb438065   Peter Zijlstra   sched: Extract th...
65
66
  #define preempt_check_resched() do { } while (0)
  #endif
502825282   Steven Rostedt   ftrace: add preem...
67
68
69
  
  #define preempt_disable_notrace() \
  do { \
bdb438065   Peter Zijlstra   sched: Extract th...
70
  	__preempt_count_inc(); \
502825282   Steven Rostedt   ftrace: add preem...
71
72
73
74
75
76
  	barrier(); \
  } while (0)
  
  #define preempt_enable_no_resched_notrace() \
  do { \
  	barrier(); \
bdb438065   Peter Zijlstra   sched: Extract th...
77
  	__preempt_count_dec(); \
502825282   Steven Rostedt   ftrace: add preem...
78
  } while (0)
bdb438065   Peter Zijlstra   sched: Extract th...
79
  #ifdef CONFIG_PREEMPT
1a338ac32   Peter Zijlstra   sched, x86: Optim...
80
81
  #ifndef CONFIG_CONTEXT_TRACKING
  #define __preempt_schedule_context() __preempt_schedule()
bdb438065   Peter Zijlstra   sched: Extract th...
82
  #endif
502825282   Steven Rostedt   ftrace: add preem...
83
84
  #define preempt_enable_notrace() \
  do { \
bdb438065   Peter Zijlstra   sched: Extract th...
85
86
  	barrier(); \
  	if (unlikely(__preempt_count_dec_and_test())) \
1a338ac32   Peter Zijlstra   sched, x86: Optim...
87
  		__preempt_schedule_context(); \
502825282   Steven Rostedt   ftrace: add preem...
88
  } while (0)
bdb438065   Peter Zijlstra   sched: Extract th...
89
  #else
62b94a08d   Peter Zijlstra   sched/preempt: Ta...
90
91
92
93
94
  #define preempt_enable_notrace() \
  do { \
  	barrier(); \
  	__preempt_count_dec(); \
  } while (0)
bdb438065   Peter Zijlstra   sched: Extract th...
95
  #endif
502825282   Steven Rostedt   ftrace: add preem...
96

bdd4e85dc   Frederic Weisbecker   sched: Isolate pr...
97
  #else /* !CONFIG_PREEMPT_COUNT */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
98

386afc911   Linus Torvalds   spinlocks and pre...
99
100
101
102
103
104
  /*
   * Even if we don't have any preemption, we need preempt disable/enable
   * to be barriers, so that we don't have things like get_user/put_user
   * that can cause faults and scheduling migrate into our preempt-protected
   * region.
   */
bdb438065   Peter Zijlstra   sched: Extract th...
105
  #define preempt_disable()			barrier()
386afc911   Linus Torvalds   spinlocks and pre...
106
  #define sched_preempt_enable_no_resched()	barrier()
bdb438065   Peter Zijlstra   sched: Extract th...
107
108
109
  #define preempt_enable_no_resched()		barrier()
  #define preempt_enable()			barrier()
  #define preempt_check_resched()			do { } while (0)
386afc911   Linus Torvalds   spinlocks and pre...
110
111
112
113
  
  #define preempt_disable_notrace()		barrier()
  #define preempt_enable_no_resched_notrace()	barrier()
  #define preempt_enable_notrace()		barrier()
502825282   Steven Rostedt   ftrace: add preem...
114

bdd4e85dc   Frederic Weisbecker   sched: Isolate pr...
115
  #endif /* CONFIG_PREEMPT_COUNT */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
116

62b94a08d   Peter Zijlstra   sched/preempt: Ta...
117
118
119
120
121
122
123
124
125
  #ifdef MODULE
  /*
   * Modules have no business playing preemption tricks.
   */
  #undef sched_preempt_enable_no_resched
  #undef preempt_enable_no_resched
  #undef preempt_enable_no_resched_notrace
  #undef preempt_check_resched
  #endif
8cb75e0c4   Peter Zijlstra   sched/preempt: Fi...
126
127
128
129
130
131
132
133
134
  #define preempt_set_need_resched() \
  do { \
  	set_preempt_need_resched(); \
  } while (0)
  #define preempt_fold_need_resched() \
  do { \
  	if (tif_need_resched()) \
  		set_preempt_need_resched(); \
  } while (0)
8cb75e0c4   Peter Zijlstra   sched/preempt: Fi...
135

e107be36e   Avi Kivity   [PATCH] sched: ar...
136
137
138
139
140
141
142
143
144
145
146
147
  #ifdef CONFIG_PREEMPT_NOTIFIERS
  
  struct preempt_notifier;
  
  /**
   * preempt_ops - notifiers called when a task is preempted and rescheduled
   * @sched_in: we're about to be rescheduled:
   *    notifier: struct preempt_notifier for the task being scheduled
   *    cpu:  cpu we're scheduled on
   * @sched_out: we've just been preempted
   *    notifier: struct preempt_notifier for the task being preempted
   *    next: the task that's kicking us out
8592e6486   Tejun Heo   sched: Revert 498...
148
149
150
151
152
   *
   * Please note that sched_in and out are called under different
   * contexts.  sched_out is called with rq lock held and irq disabled
   * while sched_in is called without rq lock and irq enabled.  This
   * difference is intentional and depended upon by its users.
e107be36e   Avi Kivity   [PATCH] sched: ar...
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
   */
  struct preempt_ops {
  	void (*sched_in)(struct preempt_notifier *notifier, int cpu);
  	void (*sched_out)(struct preempt_notifier *notifier,
  			  struct task_struct *next);
  };
  
  /**
   * preempt_notifier - key for installing preemption notifiers
   * @link: internal use
   * @ops: defines the notifier functions to be called
   *
   * Usually used in conjunction with container_of().
   */
  struct preempt_notifier {
  	struct hlist_node link;
  	struct preempt_ops *ops;
  };
  
  void preempt_notifier_register(struct preempt_notifier *notifier);
  void preempt_notifier_unregister(struct preempt_notifier *notifier);
  
  static inline void preempt_notifier_init(struct preempt_notifier *notifier,
  				     struct preempt_ops *ops)
  {
  	INIT_HLIST_NODE(&notifier->link);
  	notifier->ops = ops;
  }
  
  #endif
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
183
  #endif /* __LINUX_PREEMPT_H */