Commit 633fe795b80693a8198e7d82f66538a72d2bbba2
Committed by
Ingo Molnar
1 parent
7c526e1fef
Exists in
master
and in
39 other branches
timers: add missing kernel-doc
Add missing kernel-doc parameter notation and change function name to its new name: Warning(kernel/timer.c:543): No description found for parameter 'name' Warning(kernel/timer.c:543): No description found for parameter 'key' Signed-off-by: Randy Dunlap <randy.dunlap@oracle.com> Cc: akpm <akpm@linux-foundation.org> Cc: Johannes Berg <johannes@sipsolutions.net> LKML-Reference: <20090401174723.f0bea0eb.randy.dunlap@oracle.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Showing 1 changed file with 5 additions and 2 deletions Inline Diff
kernel/timer.c
1 | /* | 1 | /* |
2 | * linux/kernel/timer.c | 2 | * linux/kernel/timer.c |
3 | * | 3 | * |
4 | * Kernel internal timers, basic process system calls | 4 | * Kernel internal timers, basic process system calls |
5 | * | 5 | * |
6 | * Copyright (C) 1991, 1992 Linus Torvalds | 6 | * Copyright (C) 1991, 1992 Linus Torvalds |
7 | * | 7 | * |
8 | * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better. | 8 | * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better. |
9 | * | 9 | * |
10 | * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 | 10 | * 1997-09-10 Updated NTP code according to technical memorandum Jan '96 |
11 | * "A Kernel Model for Precision Timekeeping" by Dave Mills | 11 | * "A Kernel Model for Precision Timekeeping" by Dave Mills |
12 | * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to | 12 | * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to |
13 | * serialize accesses to xtime/lost_ticks). | 13 | * serialize accesses to xtime/lost_ticks). |
14 | * Copyright (C) 1998 Andrea Arcangeli | 14 | * Copyright (C) 1998 Andrea Arcangeli |
15 | * 1999-03-10 Improved NTP compatibility by Ulrich Windl | 15 | * 1999-03-10 Improved NTP compatibility by Ulrich Windl |
16 | * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love | 16 | * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love |
17 | * 2000-10-05 Implemented scalable SMP per-CPU timer handling. | 17 | * 2000-10-05 Implemented scalable SMP per-CPU timer handling. |
18 | * Copyright (C) 2000, 2001, 2002 Ingo Molnar | 18 | * Copyright (C) 2000, 2001, 2002 Ingo Molnar |
19 | * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar | 19 | * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar |
20 | */ | 20 | */ |
21 | 21 | ||
22 | #include <linux/kernel_stat.h> | 22 | #include <linux/kernel_stat.h> |
23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
24 | #include <linux/interrupt.h> | 24 | #include <linux/interrupt.h> |
25 | #include <linux/percpu.h> | 25 | #include <linux/percpu.h> |
26 | #include <linux/init.h> | 26 | #include <linux/init.h> |
27 | #include <linux/mm.h> | 27 | #include <linux/mm.h> |
28 | #include <linux/swap.h> | 28 | #include <linux/swap.h> |
29 | #include <linux/pid_namespace.h> | 29 | #include <linux/pid_namespace.h> |
30 | #include <linux/notifier.h> | 30 | #include <linux/notifier.h> |
31 | #include <linux/thread_info.h> | 31 | #include <linux/thread_info.h> |
32 | #include <linux/time.h> | 32 | #include <linux/time.h> |
33 | #include <linux/jiffies.h> | 33 | #include <linux/jiffies.h> |
34 | #include <linux/posix-timers.h> | 34 | #include <linux/posix-timers.h> |
35 | #include <linux/cpu.h> | 35 | #include <linux/cpu.h> |
36 | #include <linux/syscalls.h> | 36 | #include <linux/syscalls.h> |
37 | #include <linux/delay.h> | 37 | #include <linux/delay.h> |
38 | #include <linux/tick.h> | 38 | #include <linux/tick.h> |
39 | #include <linux/kallsyms.h> | 39 | #include <linux/kallsyms.h> |
40 | 40 | ||
41 | #include <asm/uaccess.h> | 41 | #include <asm/uaccess.h> |
42 | #include <asm/unistd.h> | 42 | #include <asm/unistd.h> |
43 | #include <asm/div64.h> | 43 | #include <asm/div64.h> |
44 | #include <asm/timex.h> | 44 | #include <asm/timex.h> |
45 | #include <asm/io.h> | 45 | #include <asm/io.h> |
46 | 46 | ||
47 | u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; | 47 | u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES; |
48 | 48 | ||
49 | EXPORT_SYMBOL(jiffies_64); | 49 | EXPORT_SYMBOL(jiffies_64); |
50 | 50 | ||
51 | /* | 51 | /* |
52 | * per-CPU timer vector definitions: | 52 | * per-CPU timer vector definitions: |
53 | */ | 53 | */ |
54 | #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6) | 54 | #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6) |
55 | #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8) | 55 | #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8) |
56 | #define TVN_SIZE (1 << TVN_BITS) | 56 | #define TVN_SIZE (1 << TVN_BITS) |
57 | #define TVR_SIZE (1 << TVR_BITS) | 57 | #define TVR_SIZE (1 << TVR_BITS) |
58 | #define TVN_MASK (TVN_SIZE - 1) | 58 | #define TVN_MASK (TVN_SIZE - 1) |
59 | #define TVR_MASK (TVR_SIZE - 1) | 59 | #define TVR_MASK (TVR_SIZE - 1) |
60 | 60 | ||
61 | struct tvec { | 61 | struct tvec { |
62 | struct list_head vec[TVN_SIZE]; | 62 | struct list_head vec[TVN_SIZE]; |
63 | }; | 63 | }; |
64 | 64 | ||
65 | struct tvec_root { | 65 | struct tvec_root { |
66 | struct list_head vec[TVR_SIZE]; | 66 | struct list_head vec[TVR_SIZE]; |
67 | }; | 67 | }; |
68 | 68 | ||
69 | struct tvec_base { | 69 | struct tvec_base { |
70 | spinlock_t lock; | 70 | spinlock_t lock; |
71 | struct timer_list *running_timer; | 71 | struct timer_list *running_timer; |
72 | unsigned long timer_jiffies; | 72 | unsigned long timer_jiffies; |
73 | struct tvec_root tv1; | 73 | struct tvec_root tv1; |
74 | struct tvec tv2; | 74 | struct tvec tv2; |
75 | struct tvec tv3; | 75 | struct tvec tv3; |
76 | struct tvec tv4; | 76 | struct tvec tv4; |
77 | struct tvec tv5; | 77 | struct tvec tv5; |
78 | } ____cacheline_aligned; | 78 | } ____cacheline_aligned; |
79 | 79 | ||
80 | struct tvec_base boot_tvec_bases; | 80 | struct tvec_base boot_tvec_bases; |
81 | EXPORT_SYMBOL(boot_tvec_bases); | 81 | EXPORT_SYMBOL(boot_tvec_bases); |
82 | static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases; | 82 | static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases; |
83 | 83 | ||
84 | /* | 84 | /* |
85 | * Note that all tvec_bases are 2 byte aligned and lower bit of | 85 | * Note that all tvec_bases are 2 byte aligned and lower bit of |
86 | * base in timer_list is guaranteed to be zero. Use the LSB for | 86 | * base in timer_list is guaranteed to be zero. Use the LSB for |
87 | * the new flag to indicate whether the timer is deferrable | 87 | * the new flag to indicate whether the timer is deferrable |
88 | */ | 88 | */ |
89 | #define TBASE_DEFERRABLE_FLAG (0x1) | 89 | #define TBASE_DEFERRABLE_FLAG (0x1) |
90 | 90 | ||
91 | /* Functions below help us manage 'deferrable' flag */ | 91 | /* Functions below help us manage 'deferrable' flag */ |
92 | static inline unsigned int tbase_get_deferrable(struct tvec_base *base) | 92 | static inline unsigned int tbase_get_deferrable(struct tvec_base *base) |
93 | { | 93 | { |
94 | return ((unsigned int)(unsigned long)base & TBASE_DEFERRABLE_FLAG); | 94 | return ((unsigned int)(unsigned long)base & TBASE_DEFERRABLE_FLAG); |
95 | } | 95 | } |
96 | 96 | ||
97 | static inline struct tvec_base *tbase_get_base(struct tvec_base *base) | 97 | static inline struct tvec_base *tbase_get_base(struct tvec_base *base) |
98 | { | 98 | { |
99 | return ((struct tvec_base *)((unsigned long)base & ~TBASE_DEFERRABLE_FLAG)); | 99 | return ((struct tvec_base *)((unsigned long)base & ~TBASE_DEFERRABLE_FLAG)); |
100 | } | 100 | } |
101 | 101 | ||
102 | static inline void timer_set_deferrable(struct timer_list *timer) | 102 | static inline void timer_set_deferrable(struct timer_list *timer) |
103 | { | 103 | { |
104 | timer->base = ((struct tvec_base *)((unsigned long)(timer->base) | | 104 | timer->base = ((struct tvec_base *)((unsigned long)(timer->base) | |
105 | TBASE_DEFERRABLE_FLAG)); | 105 | TBASE_DEFERRABLE_FLAG)); |
106 | } | 106 | } |
107 | 107 | ||
108 | static inline void | 108 | static inline void |
109 | timer_set_base(struct timer_list *timer, struct tvec_base *new_base) | 109 | timer_set_base(struct timer_list *timer, struct tvec_base *new_base) |
110 | { | 110 | { |
111 | timer->base = (struct tvec_base *)((unsigned long)(new_base) | | 111 | timer->base = (struct tvec_base *)((unsigned long)(new_base) | |
112 | tbase_get_deferrable(timer->base)); | 112 | tbase_get_deferrable(timer->base)); |
113 | } | 113 | } |
114 | 114 | ||
115 | static unsigned long round_jiffies_common(unsigned long j, int cpu, | 115 | static unsigned long round_jiffies_common(unsigned long j, int cpu, |
116 | bool force_up) | 116 | bool force_up) |
117 | { | 117 | { |
118 | int rem; | 118 | int rem; |
119 | unsigned long original = j; | 119 | unsigned long original = j; |
120 | 120 | ||
121 | /* | 121 | /* |
122 | * We don't want all cpus firing their timers at once hitting the | 122 | * We don't want all cpus firing their timers at once hitting the |
123 | * same lock or cachelines, so we skew each extra cpu with an extra | 123 | * same lock or cachelines, so we skew each extra cpu with an extra |
124 | * 3 jiffies. This 3 jiffies came originally from the mm/ code which | 124 | * 3 jiffies. This 3 jiffies came originally from the mm/ code which |
125 | * already did this. | 125 | * already did this. |
126 | * The skew is done by adding 3*cpunr, then round, then subtract this | 126 | * The skew is done by adding 3*cpunr, then round, then subtract this |
127 | * extra offset again. | 127 | * extra offset again. |
128 | */ | 128 | */ |
129 | j += cpu * 3; | 129 | j += cpu * 3; |
130 | 130 | ||
131 | rem = j % HZ; | 131 | rem = j % HZ; |
132 | 132 | ||
133 | /* | 133 | /* |
134 | * If the target jiffie is just after a whole second (which can happen | 134 | * If the target jiffie is just after a whole second (which can happen |
135 | * due to delays of the timer irq, long irq off times etc etc) then | 135 | * due to delays of the timer irq, long irq off times etc etc) then |
136 | * we should round down to the whole second, not up. Use 1/4th second | 136 | * we should round down to the whole second, not up. Use 1/4th second |
137 | * as cutoff for this rounding as an extreme upper bound for this. | 137 | * as cutoff for this rounding as an extreme upper bound for this. |
138 | * But never round down if @force_up is set. | 138 | * But never round down if @force_up is set. |
139 | */ | 139 | */ |
140 | if (rem < HZ/4 && !force_up) /* round down */ | 140 | if (rem < HZ/4 && !force_up) /* round down */ |
141 | j = j - rem; | 141 | j = j - rem; |
142 | else /* round up */ | 142 | else /* round up */ |
143 | j = j - rem + HZ; | 143 | j = j - rem + HZ; |
144 | 144 | ||
145 | /* now that we have rounded, subtract the extra skew again */ | 145 | /* now that we have rounded, subtract the extra skew again */ |
146 | j -= cpu * 3; | 146 | j -= cpu * 3; |
147 | 147 | ||
148 | if (j <= jiffies) /* rounding ate our timeout entirely; */ | 148 | if (j <= jiffies) /* rounding ate our timeout entirely; */ |
149 | return original; | 149 | return original; |
150 | return j; | 150 | return j; |
151 | } | 151 | } |
152 | 152 | ||
153 | /** | 153 | /** |
154 | * __round_jiffies - function to round jiffies to a full second | 154 | * __round_jiffies - function to round jiffies to a full second |
155 | * @j: the time in (absolute) jiffies that should be rounded | 155 | * @j: the time in (absolute) jiffies that should be rounded |
156 | * @cpu: the processor number on which the timeout will happen | 156 | * @cpu: the processor number on which the timeout will happen |
157 | * | 157 | * |
158 | * __round_jiffies() rounds an absolute time in the future (in jiffies) | 158 | * __round_jiffies() rounds an absolute time in the future (in jiffies) |
159 | * up or down to (approximately) full seconds. This is useful for timers | 159 | * up or down to (approximately) full seconds. This is useful for timers |
160 | * for which the exact time they fire does not matter too much, as long as | 160 | * for which the exact time they fire does not matter too much, as long as |
161 | * they fire approximately every X seconds. | 161 | * they fire approximately every X seconds. |
162 | * | 162 | * |
163 | * By rounding these timers to whole seconds, all such timers will fire | 163 | * By rounding these timers to whole seconds, all such timers will fire |
164 | * at the same time, rather than at various times spread out. The goal | 164 | * at the same time, rather than at various times spread out. The goal |
165 | * of this is to have the CPU wake up less, which saves power. | 165 | * of this is to have the CPU wake up less, which saves power. |
166 | * | 166 | * |
167 | * The exact rounding is skewed for each processor to avoid all | 167 | * The exact rounding is skewed for each processor to avoid all |
168 | * processors firing at the exact same time, which could lead | 168 | * processors firing at the exact same time, which could lead |
169 | * to lock contention or spurious cache line bouncing. | 169 | * to lock contention or spurious cache line bouncing. |
170 | * | 170 | * |
171 | * The return value is the rounded version of the @j parameter. | 171 | * The return value is the rounded version of the @j parameter. |
172 | */ | 172 | */ |
173 | unsigned long __round_jiffies(unsigned long j, int cpu) | 173 | unsigned long __round_jiffies(unsigned long j, int cpu) |
174 | { | 174 | { |
175 | return round_jiffies_common(j, cpu, false); | 175 | return round_jiffies_common(j, cpu, false); |
176 | } | 176 | } |
177 | EXPORT_SYMBOL_GPL(__round_jiffies); | 177 | EXPORT_SYMBOL_GPL(__round_jiffies); |
178 | 178 | ||
179 | /** | 179 | /** |
180 | * __round_jiffies_relative - function to round jiffies to a full second | 180 | * __round_jiffies_relative - function to round jiffies to a full second |
181 | * @j: the time in (relative) jiffies that should be rounded | 181 | * @j: the time in (relative) jiffies that should be rounded |
182 | * @cpu: the processor number on which the timeout will happen | 182 | * @cpu: the processor number on which the timeout will happen |
183 | * | 183 | * |
184 | * __round_jiffies_relative() rounds a time delta in the future (in jiffies) | 184 | * __round_jiffies_relative() rounds a time delta in the future (in jiffies) |
185 | * up or down to (approximately) full seconds. This is useful for timers | 185 | * up or down to (approximately) full seconds. This is useful for timers |
186 | * for which the exact time they fire does not matter too much, as long as | 186 | * for which the exact time they fire does not matter too much, as long as |
187 | * they fire approximately every X seconds. | 187 | * they fire approximately every X seconds. |
188 | * | 188 | * |
189 | * By rounding these timers to whole seconds, all such timers will fire | 189 | * By rounding these timers to whole seconds, all such timers will fire |
190 | * at the same time, rather than at various times spread out. The goal | 190 | * at the same time, rather than at various times spread out. The goal |
191 | * of this is to have the CPU wake up less, which saves power. | 191 | * of this is to have the CPU wake up less, which saves power. |
192 | * | 192 | * |
193 | * The exact rounding is skewed for each processor to avoid all | 193 | * The exact rounding is skewed for each processor to avoid all |
194 | * processors firing at the exact same time, which could lead | 194 | * processors firing at the exact same time, which could lead |
195 | * to lock contention or spurious cache line bouncing. | 195 | * to lock contention or spurious cache line bouncing. |
196 | * | 196 | * |
197 | * The return value is the rounded version of the @j parameter. | 197 | * The return value is the rounded version of the @j parameter. |
198 | */ | 198 | */ |
199 | unsigned long __round_jiffies_relative(unsigned long j, int cpu) | 199 | unsigned long __round_jiffies_relative(unsigned long j, int cpu) |
200 | { | 200 | { |
201 | unsigned long j0 = jiffies; | 201 | unsigned long j0 = jiffies; |
202 | 202 | ||
203 | /* Use j0 because jiffies might change while we run */ | 203 | /* Use j0 because jiffies might change while we run */ |
204 | return round_jiffies_common(j + j0, cpu, false) - j0; | 204 | return round_jiffies_common(j + j0, cpu, false) - j0; |
205 | } | 205 | } |
206 | EXPORT_SYMBOL_GPL(__round_jiffies_relative); | 206 | EXPORT_SYMBOL_GPL(__round_jiffies_relative); |
207 | 207 | ||
208 | /** | 208 | /** |
209 | * round_jiffies - function to round jiffies to a full second | 209 | * round_jiffies - function to round jiffies to a full second |
210 | * @j: the time in (absolute) jiffies that should be rounded | 210 | * @j: the time in (absolute) jiffies that should be rounded |
211 | * | 211 | * |
212 | * round_jiffies() rounds an absolute time in the future (in jiffies) | 212 | * round_jiffies() rounds an absolute time in the future (in jiffies) |
213 | * up or down to (approximately) full seconds. This is useful for timers | 213 | * up or down to (approximately) full seconds. This is useful for timers |
214 | * for which the exact time they fire does not matter too much, as long as | 214 | * for which the exact time they fire does not matter too much, as long as |
215 | * they fire approximately every X seconds. | 215 | * they fire approximately every X seconds. |
216 | * | 216 | * |
217 | * By rounding these timers to whole seconds, all such timers will fire | 217 | * By rounding these timers to whole seconds, all such timers will fire |
218 | * at the same time, rather than at various times spread out. The goal | 218 | * at the same time, rather than at various times spread out. The goal |
219 | * of this is to have the CPU wake up less, which saves power. | 219 | * of this is to have the CPU wake up less, which saves power. |
220 | * | 220 | * |
221 | * The return value is the rounded version of the @j parameter. | 221 | * The return value is the rounded version of the @j parameter. |
222 | */ | 222 | */ |
223 | unsigned long round_jiffies(unsigned long j) | 223 | unsigned long round_jiffies(unsigned long j) |
224 | { | 224 | { |
225 | return round_jiffies_common(j, raw_smp_processor_id(), false); | 225 | return round_jiffies_common(j, raw_smp_processor_id(), false); |
226 | } | 226 | } |
227 | EXPORT_SYMBOL_GPL(round_jiffies); | 227 | EXPORT_SYMBOL_GPL(round_jiffies); |
228 | 228 | ||
229 | /** | 229 | /** |
230 | * round_jiffies_relative - function to round jiffies to a full second | 230 | * round_jiffies_relative - function to round jiffies to a full second |
231 | * @j: the time in (relative) jiffies that should be rounded | 231 | * @j: the time in (relative) jiffies that should be rounded |
232 | * | 232 | * |
233 | * round_jiffies_relative() rounds a time delta in the future (in jiffies) | 233 | * round_jiffies_relative() rounds a time delta in the future (in jiffies) |
234 | * up or down to (approximately) full seconds. This is useful for timers | 234 | * up or down to (approximately) full seconds. This is useful for timers |
235 | * for which the exact time they fire does not matter too much, as long as | 235 | * for which the exact time they fire does not matter too much, as long as |
236 | * they fire approximately every X seconds. | 236 | * they fire approximately every X seconds. |
237 | * | 237 | * |
238 | * By rounding these timers to whole seconds, all such timers will fire | 238 | * By rounding these timers to whole seconds, all such timers will fire |
239 | * at the same time, rather than at various times spread out. The goal | 239 | * at the same time, rather than at various times spread out. The goal |
240 | * of this is to have the CPU wake up less, which saves power. | 240 | * of this is to have the CPU wake up less, which saves power. |
241 | * | 241 | * |
242 | * The return value is the rounded version of the @j parameter. | 242 | * The return value is the rounded version of the @j parameter. |
243 | */ | 243 | */ |
244 | unsigned long round_jiffies_relative(unsigned long j) | 244 | unsigned long round_jiffies_relative(unsigned long j) |
245 | { | 245 | { |
246 | return __round_jiffies_relative(j, raw_smp_processor_id()); | 246 | return __round_jiffies_relative(j, raw_smp_processor_id()); |
247 | } | 247 | } |
248 | EXPORT_SYMBOL_GPL(round_jiffies_relative); | 248 | EXPORT_SYMBOL_GPL(round_jiffies_relative); |
249 | 249 | ||
250 | /** | 250 | /** |
251 | * __round_jiffies_up - function to round jiffies up to a full second | 251 | * __round_jiffies_up - function to round jiffies up to a full second |
252 | * @j: the time in (absolute) jiffies that should be rounded | 252 | * @j: the time in (absolute) jiffies that should be rounded |
253 | * @cpu: the processor number on which the timeout will happen | 253 | * @cpu: the processor number on which the timeout will happen |
254 | * | 254 | * |
255 | * This is the same as __round_jiffies() except that it will never | 255 | * This is the same as __round_jiffies() except that it will never |
256 | * round down. This is useful for timeouts for which the exact time | 256 | * round down. This is useful for timeouts for which the exact time |
257 | * of firing does not matter too much, as long as they don't fire too | 257 | * of firing does not matter too much, as long as they don't fire too |
258 | * early. | 258 | * early. |
259 | */ | 259 | */ |
260 | unsigned long __round_jiffies_up(unsigned long j, int cpu) | 260 | unsigned long __round_jiffies_up(unsigned long j, int cpu) |
261 | { | 261 | { |
262 | return round_jiffies_common(j, cpu, true); | 262 | return round_jiffies_common(j, cpu, true); |
263 | } | 263 | } |
264 | EXPORT_SYMBOL_GPL(__round_jiffies_up); | 264 | EXPORT_SYMBOL_GPL(__round_jiffies_up); |
265 | 265 | ||
266 | /** | 266 | /** |
267 | * __round_jiffies_up_relative - function to round jiffies up to a full second | 267 | * __round_jiffies_up_relative - function to round jiffies up to a full second |
268 | * @j: the time in (relative) jiffies that should be rounded | 268 | * @j: the time in (relative) jiffies that should be rounded |
269 | * @cpu: the processor number on which the timeout will happen | 269 | * @cpu: the processor number on which the timeout will happen |
270 | * | 270 | * |
271 | * This is the same as __round_jiffies_relative() except that it will never | 271 | * This is the same as __round_jiffies_relative() except that it will never |
272 | * round down. This is useful for timeouts for which the exact time | 272 | * round down. This is useful for timeouts for which the exact time |
273 | * of firing does not matter too much, as long as they don't fire too | 273 | * of firing does not matter too much, as long as they don't fire too |
274 | * early. | 274 | * early. |
275 | */ | 275 | */ |
276 | unsigned long __round_jiffies_up_relative(unsigned long j, int cpu) | 276 | unsigned long __round_jiffies_up_relative(unsigned long j, int cpu) |
277 | { | 277 | { |
278 | unsigned long j0 = jiffies; | 278 | unsigned long j0 = jiffies; |
279 | 279 | ||
280 | /* Use j0 because jiffies might change while we run */ | 280 | /* Use j0 because jiffies might change while we run */ |
281 | return round_jiffies_common(j + j0, cpu, true) - j0; | 281 | return round_jiffies_common(j + j0, cpu, true) - j0; |
282 | } | 282 | } |
283 | EXPORT_SYMBOL_GPL(__round_jiffies_up_relative); | 283 | EXPORT_SYMBOL_GPL(__round_jiffies_up_relative); |
284 | 284 | ||
285 | /** | 285 | /** |
286 | * round_jiffies_up - function to round jiffies up to a full second | 286 | * round_jiffies_up - function to round jiffies up to a full second |
287 | * @j: the time in (absolute) jiffies that should be rounded | 287 | * @j: the time in (absolute) jiffies that should be rounded |
288 | * | 288 | * |
289 | * This is the same as round_jiffies() except that it will never | 289 | * This is the same as round_jiffies() except that it will never |
290 | * round down. This is useful for timeouts for which the exact time | 290 | * round down. This is useful for timeouts for which the exact time |
291 | * of firing does not matter too much, as long as they don't fire too | 291 | * of firing does not matter too much, as long as they don't fire too |
292 | * early. | 292 | * early. |
293 | */ | 293 | */ |
294 | unsigned long round_jiffies_up(unsigned long j) | 294 | unsigned long round_jiffies_up(unsigned long j) |
295 | { | 295 | { |
296 | return round_jiffies_common(j, raw_smp_processor_id(), true); | 296 | return round_jiffies_common(j, raw_smp_processor_id(), true); |
297 | } | 297 | } |
298 | EXPORT_SYMBOL_GPL(round_jiffies_up); | 298 | EXPORT_SYMBOL_GPL(round_jiffies_up); |
299 | 299 | ||
300 | /** | 300 | /** |
301 | * round_jiffies_up_relative - function to round jiffies up to a full second | 301 | * round_jiffies_up_relative - function to round jiffies up to a full second |
302 | * @j: the time in (relative) jiffies that should be rounded | 302 | * @j: the time in (relative) jiffies that should be rounded |
303 | * | 303 | * |
304 | * This is the same as round_jiffies_relative() except that it will never | 304 | * This is the same as round_jiffies_relative() except that it will never |
305 | * round down. This is useful for timeouts for which the exact time | 305 | * round down. This is useful for timeouts for which the exact time |
306 | * of firing does not matter too much, as long as they don't fire too | 306 | * of firing does not matter too much, as long as they don't fire too |
307 | * early. | 307 | * early. |
308 | */ | 308 | */ |
309 | unsigned long round_jiffies_up_relative(unsigned long j) | 309 | unsigned long round_jiffies_up_relative(unsigned long j) |
310 | { | 310 | { |
311 | return __round_jiffies_up_relative(j, raw_smp_processor_id()); | 311 | return __round_jiffies_up_relative(j, raw_smp_processor_id()); |
312 | } | 312 | } |
313 | EXPORT_SYMBOL_GPL(round_jiffies_up_relative); | 313 | EXPORT_SYMBOL_GPL(round_jiffies_up_relative); |
314 | 314 | ||
315 | 315 | ||
316 | static inline void set_running_timer(struct tvec_base *base, | 316 | static inline void set_running_timer(struct tvec_base *base, |
317 | struct timer_list *timer) | 317 | struct timer_list *timer) |
318 | { | 318 | { |
319 | #ifdef CONFIG_SMP | 319 | #ifdef CONFIG_SMP |
320 | base->running_timer = timer; | 320 | base->running_timer = timer; |
321 | #endif | 321 | #endif |
322 | } | 322 | } |
323 | 323 | ||
324 | static void internal_add_timer(struct tvec_base *base, struct timer_list *timer) | 324 | static void internal_add_timer(struct tvec_base *base, struct timer_list *timer) |
325 | { | 325 | { |
326 | unsigned long expires = timer->expires; | 326 | unsigned long expires = timer->expires; |
327 | unsigned long idx = expires - base->timer_jiffies; | 327 | unsigned long idx = expires - base->timer_jiffies; |
328 | struct list_head *vec; | 328 | struct list_head *vec; |
329 | 329 | ||
330 | if (idx < TVR_SIZE) { | 330 | if (idx < TVR_SIZE) { |
331 | int i = expires & TVR_MASK; | 331 | int i = expires & TVR_MASK; |
332 | vec = base->tv1.vec + i; | 332 | vec = base->tv1.vec + i; |
333 | } else if (idx < 1 << (TVR_BITS + TVN_BITS)) { | 333 | } else if (idx < 1 << (TVR_BITS + TVN_BITS)) { |
334 | int i = (expires >> TVR_BITS) & TVN_MASK; | 334 | int i = (expires >> TVR_BITS) & TVN_MASK; |
335 | vec = base->tv2.vec + i; | 335 | vec = base->tv2.vec + i; |
336 | } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) { | 336 | } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) { |
337 | int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK; | 337 | int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK; |
338 | vec = base->tv3.vec + i; | 338 | vec = base->tv3.vec + i; |
339 | } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) { | 339 | } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) { |
340 | int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK; | 340 | int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK; |
341 | vec = base->tv4.vec + i; | 341 | vec = base->tv4.vec + i; |
342 | } else if ((signed long) idx < 0) { | 342 | } else if ((signed long) idx < 0) { |
343 | /* | 343 | /* |
344 | * Can happen if you add a timer with expires == jiffies, | 344 | * Can happen if you add a timer with expires == jiffies, |
345 | * or you set a timer to go off in the past | 345 | * or you set a timer to go off in the past |
346 | */ | 346 | */ |
347 | vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK); | 347 | vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK); |
348 | } else { | 348 | } else { |
349 | int i; | 349 | int i; |
350 | /* If the timeout is larger than 0xffffffff on 64-bit | 350 | /* If the timeout is larger than 0xffffffff on 64-bit |
351 | * architectures then we use the maximum timeout: | 351 | * architectures then we use the maximum timeout: |
352 | */ | 352 | */ |
353 | if (idx > 0xffffffffUL) { | 353 | if (idx > 0xffffffffUL) { |
354 | idx = 0xffffffffUL; | 354 | idx = 0xffffffffUL; |
355 | expires = idx + base->timer_jiffies; | 355 | expires = idx + base->timer_jiffies; |
356 | } | 356 | } |
357 | i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK; | 357 | i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK; |
358 | vec = base->tv5.vec + i; | 358 | vec = base->tv5.vec + i; |
359 | } | 359 | } |
360 | /* | 360 | /* |
361 | * Timers are FIFO: | 361 | * Timers are FIFO: |
362 | */ | 362 | */ |
363 | list_add_tail(&timer->entry, vec); | 363 | list_add_tail(&timer->entry, vec); |
364 | } | 364 | } |
365 | 365 | ||
366 | #ifdef CONFIG_TIMER_STATS | 366 | #ifdef CONFIG_TIMER_STATS |
367 | void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr) | 367 | void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr) |
368 | { | 368 | { |
369 | if (timer->start_site) | 369 | if (timer->start_site) |
370 | return; | 370 | return; |
371 | 371 | ||
372 | timer->start_site = addr; | 372 | timer->start_site = addr; |
373 | memcpy(timer->start_comm, current->comm, TASK_COMM_LEN); | 373 | memcpy(timer->start_comm, current->comm, TASK_COMM_LEN); |
374 | timer->start_pid = current->pid; | 374 | timer->start_pid = current->pid; |
375 | } | 375 | } |
376 | 376 | ||
377 | static void timer_stats_account_timer(struct timer_list *timer) | 377 | static void timer_stats_account_timer(struct timer_list *timer) |
378 | { | 378 | { |
379 | unsigned int flag = 0; | 379 | unsigned int flag = 0; |
380 | 380 | ||
381 | if (unlikely(tbase_get_deferrable(timer->base))) | 381 | if (unlikely(tbase_get_deferrable(timer->base))) |
382 | flag |= TIMER_STATS_FLAG_DEFERRABLE; | 382 | flag |= TIMER_STATS_FLAG_DEFERRABLE; |
383 | 383 | ||
384 | timer_stats_update_stats(timer, timer->start_pid, timer->start_site, | 384 | timer_stats_update_stats(timer, timer->start_pid, timer->start_site, |
385 | timer->function, timer->start_comm, flag); | 385 | timer->function, timer->start_comm, flag); |
386 | } | 386 | } |
387 | 387 | ||
388 | #else | 388 | #else |
389 | static void timer_stats_account_timer(struct timer_list *timer) {} | 389 | static void timer_stats_account_timer(struct timer_list *timer) {} |
390 | #endif | 390 | #endif |
391 | 391 | ||
392 | #ifdef CONFIG_DEBUG_OBJECTS_TIMERS | 392 | #ifdef CONFIG_DEBUG_OBJECTS_TIMERS |
393 | 393 | ||
394 | static struct debug_obj_descr timer_debug_descr; | 394 | static struct debug_obj_descr timer_debug_descr; |
395 | 395 | ||
396 | /* | 396 | /* |
397 | * fixup_init is called when: | 397 | * fixup_init is called when: |
398 | * - an active object is initialized | 398 | * - an active object is initialized |
399 | */ | 399 | */ |
400 | static int timer_fixup_init(void *addr, enum debug_obj_state state) | 400 | static int timer_fixup_init(void *addr, enum debug_obj_state state) |
401 | { | 401 | { |
402 | struct timer_list *timer = addr; | 402 | struct timer_list *timer = addr; |
403 | 403 | ||
404 | switch (state) { | 404 | switch (state) { |
405 | case ODEBUG_STATE_ACTIVE: | 405 | case ODEBUG_STATE_ACTIVE: |
406 | del_timer_sync(timer); | 406 | del_timer_sync(timer); |
407 | debug_object_init(timer, &timer_debug_descr); | 407 | debug_object_init(timer, &timer_debug_descr); |
408 | return 1; | 408 | return 1; |
409 | default: | 409 | default: |
410 | return 0; | 410 | return 0; |
411 | } | 411 | } |
412 | } | 412 | } |
413 | 413 | ||
414 | /* | 414 | /* |
415 | * fixup_activate is called when: | 415 | * fixup_activate is called when: |
416 | * - an active object is activated | 416 | * - an active object is activated |
417 | * - an unknown object is activated (might be a statically initialized object) | 417 | * - an unknown object is activated (might be a statically initialized object) |
418 | */ | 418 | */ |
419 | static int timer_fixup_activate(void *addr, enum debug_obj_state state) | 419 | static int timer_fixup_activate(void *addr, enum debug_obj_state state) |
420 | { | 420 | { |
421 | struct timer_list *timer = addr; | 421 | struct timer_list *timer = addr; |
422 | 422 | ||
423 | switch (state) { | 423 | switch (state) { |
424 | 424 | ||
425 | case ODEBUG_STATE_NOTAVAILABLE: | 425 | case ODEBUG_STATE_NOTAVAILABLE: |
426 | /* | 426 | /* |
427 | * This is not really a fixup. The timer was | 427 | * This is not really a fixup. The timer was |
428 | * statically initialized. We just make sure that it | 428 | * statically initialized. We just make sure that it |
429 | * is tracked in the object tracker. | 429 | * is tracked in the object tracker. |
430 | */ | 430 | */ |
431 | if (timer->entry.next == NULL && | 431 | if (timer->entry.next == NULL && |
432 | timer->entry.prev == TIMER_ENTRY_STATIC) { | 432 | timer->entry.prev == TIMER_ENTRY_STATIC) { |
433 | debug_object_init(timer, &timer_debug_descr); | 433 | debug_object_init(timer, &timer_debug_descr); |
434 | debug_object_activate(timer, &timer_debug_descr); | 434 | debug_object_activate(timer, &timer_debug_descr); |
435 | return 0; | 435 | return 0; |
436 | } else { | 436 | } else { |
437 | WARN_ON_ONCE(1); | 437 | WARN_ON_ONCE(1); |
438 | } | 438 | } |
439 | return 0; | 439 | return 0; |
440 | 440 | ||
441 | case ODEBUG_STATE_ACTIVE: | 441 | case ODEBUG_STATE_ACTIVE: |
442 | WARN_ON(1); | 442 | WARN_ON(1); |
443 | 443 | ||
444 | default: | 444 | default: |
445 | return 0; | 445 | return 0; |
446 | } | 446 | } |
447 | } | 447 | } |
448 | 448 | ||
449 | /* | 449 | /* |
450 | * fixup_free is called when: | 450 | * fixup_free is called when: |
451 | * - an active object is freed | 451 | * - an active object is freed |
452 | */ | 452 | */ |
453 | static int timer_fixup_free(void *addr, enum debug_obj_state state) | 453 | static int timer_fixup_free(void *addr, enum debug_obj_state state) |
454 | { | 454 | { |
455 | struct timer_list *timer = addr; | 455 | struct timer_list *timer = addr; |
456 | 456 | ||
457 | switch (state) { | 457 | switch (state) { |
458 | case ODEBUG_STATE_ACTIVE: | 458 | case ODEBUG_STATE_ACTIVE: |
459 | del_timer_sync(timer); | 459 | del_timer_sync(timer); |
460 | debug_object_free(timer, &timer_debug_descr); | 460 | debug_object_free(timer, &timer_debug_descr); |
461 | return 1; | 461 | return 1; |
462 | default: | 462 | default: |
463 | return 0; | 463 | return 0; |
464 | } | 464 | } |
465 | } | 465 | } |
466 | 466 | ||
467 | static struct debug_obj_descr timer_debug_descr = { | 467 | static struct debug_obj_descr timer_debug_descr = { |
468 | .name = "timer_list", | 468 | .name = "timer_list", |
469 | .fixup_init = timer_fixup_init, | 469 | .fixup_init = timer_fixup_init, |
470 | .fixup_activate = timer_fixup_activate, | 470 | .fixup_activate = timer_fixup_activate, |
471 | .fixup_free = timer_fixup_free, | 471 | .fixup_free = timer_fixup_free, |
472 | }; | 472 | }; |
473 | 473 | ||
474 | static inline void debug_timer_init(struct timer_list *timer) | 474 | static inline void debug_timer_init(struct timer_list *timer) |
475 | { | 475 | { |
476 | debug_object_init(timer, &timer_debug_descr); | 476 | debug_object_init(timer, &timer_debug_descr); |
477 | } | 477 | } |
478 | 478 | ||
479 | static inline void debug_timer_activate(struct timer_list *timer) | 479 | static inline void debug_timer_activate(struct timer_list *timer) |
480 | { | 480 | { |
481 | debug_object_activate(timer, &timer_debug_descr); | 481 | debug_object_activate(timer, &timer_debug_descr); |
482 | } | 482 | } |
483 | 483 | ||
484 | static inline void debug_timer_deactivate(struct timer_list *timer) | 484 | static inline void debug_timer_deactivate(struct timer_list *timer) |
485 | { | 485 | { |
486 | debug_object_deactivate(timer, &timer_debug_descr); | 486 | debug_object_deactivate(timer, &timer_debug_descr); |
487 | } | 487 | } |
488 | 488 | ||
489 | static inline void debug_timer_free(struct timer_list *timer) | 489 | static inline void debug_timer_free(struct timer_list *timer) |
490 | { | 490 | { |
491 | debug_object_free(timer, &timer_debug_descr); | 491 | debug_object_free(timer, &timer_debug_descr); |
492 | } | 492 | } |
493 | 493 | ||
494 | static void __init_timer(struct timer_list *timer); | 494 | static void __init_timer(struct timer_list *timer); |
495 | 495 | ||
496 | void init_timer_on_stack(struct timer_list *timer) | 496 | void init_timer_on_stack(struct timer_list *timer) |
497 | { | 497 | { |
498 | debug_object_init_on_stack(timer, &timer_debug_descr); | 498 | debug_object_init_on_stack(timer, &timer_debug_descr); |
499 | __init_timer(timer); | 499 | __init_timer(timer); |
500 | } | 500 | } |
501 | EXPORT_SYMBOL_GPL(init_timer_on_stack); | 501 | EXPORT_SYMBOL_GPL(init_timer_on_stack); |
502 | 502 | ||
503 | void destroy_timer_on_stack(struct timer_list *timer) | 503 | void destroy_timer_on_stack(struct timer_list *timer) |
504 | { | 504 | { |
505 | debug_object_free(timer, &timer_debug_descr); | 505 | debug_object_free(timer, &timer_debug_descr); |
506 | } | 506 | } |
507 | EXPORT_SYMBOL_GPL(destroy_timer_on_stack); | 507 | EXPORT_SYMBOL_GPL(destroy_timer_on_stack); |
508 | 508 | ||
509 | #else | 509 | #else |
510 | static inline void debug_timer_init(struct timer_list *timer) { } | 510 | static inline void debug_timer_init(struct timer_list *timer) { } |
511 | static inline void debug_timer_activate(struct timer_list *timer) { } | 511 | static inline void debug_timer_activate(struct timer_list *timer) { } |
512 | static inline void debug_timer_deactivate(struct timer_list *timer) { } | 512 | static inline void debug_timer_deactivate(struct timer_list *timer) { } |
513 | #endif | 513 | #endif |
514 | 514 | ||
515 | static void __init_timer(struct timer_list *timer) | 515 | static void __init_timer(struct timer_list *timer) |
516 | { | 516 | { |
517 | timer->entry.next = NULL; | 517 | timer->entry.next = NULL; |
518 | timer->base = __raw_get_cpu_var(tvec_bases); | 518 | timer->base = __raw_get_cpu_var(tvec_bases); |
519 | #ifdef CONFIG_TIMER_STATS | 519 | #ifdef CONFIG_TIMER_STATS |
520 | timer->start_site = NULL; | 520 | timer->start_site = NULL; |
521 | timer->start_pid = -1; | 521 | timer->start_pid = -1; |
522 | memset(timer->start_comm, 0, TASK_COMM_LEN); | 522 | memset(timer->start_comm, 0, TASK_COMM_LEN); |
523 | #endif | 523 | #endif |
524 | } | 524 | } |
525 | 525 | ||
526 | /** | 526 | /** |
527 | * init_timer - initialize a timer. | 527 | * init_timer_key - initialize a timer |
528 | * @timer: the timer to be initialized | 528 | * @timer: the timer to be initialized |
529 | * @name: name of the timer | ||
530 | * @key: lockdep class key of the fake lock used for tracking timer | ||
531 | * sync lock dependencies | ||
529 | * | 532 | * |
530 | * init_timer() must be done to a timer prior calling *any* of the | 533 | * init_timer_key() must be done to a timer prior calling *any* of the |
531 | * other timer functions. | 534 | * other timer functions. |
532 | */ | 535 | */ |
533 | void init_timer(struct timer_list *timer) | 536 | void init_timer(struct timer_list *timer) |
534 | { | 537 | { |
535 | debug_timer_init(timer); | 538 | debug_timer_init(timer); |
536 | __init_timer(timer); | 539 | __init_timer(timer); |
537 | } | 540 | } |
538 | EXPORT_SYMBOL(init_timer); | 541 | EXPORT_SYMBOL(init_timer); |
539 | 542 | ||
540 | void init_timer_deferrable(struct timer_list *timer) | 543 | void init_timer_deferrable(struct timer_list *timer) |
541 | { | 544 | { |
542 | init_timer(timer); | 545 | init_timer(timer); |
543 | timer_set_deferrable(timer); | 546 | timer_set_deferrable(timer); |
544 | } | 547 | } |
545 | EXPORT_SYMBOL(init_timer_deferrable); | 548 | EXPORT_SYMBOL(init_timer_deferrable); |
546 | 549 | ||
547 | static inline void detach_timer(struct timer_list *timer, | 550 | static inline void detach_timer(struct timer_list *timer, |
548 | int clear_pending) | 551 | int clear_pending) |
549 | { | 552 | { |
550 | struct list_head *entry = &timer->entry; | 553 | struct list_head *entry = &timer->entry; |
551 | 554 | ||
552 | debug_timer_deactivate(timer); | 555 | debug_timer_deactivate(timer); |
553 | 556 | ||
554 | __list_del(entry->prev, entry->next); | 557 | __list_del(entry->prev, entry->next); |
555 | if (clear_pending) | 558 | if (clear_pending) |
556 | entry->next = NULL; | 559 | entry->next = NULL; |
557 | entry->prev = LIST_POISON2; | 560 | entry->prev = LIST_POISON2; |
558 | } | 561 | } |
559 | 562 | ||
560 | /* | 563 | /* |
561 | * We are using hashed locking: holding per_cpu(tvec_bases).lock | 564 | * We are using hashed locking: holding per_cpu(tvec_bases).lock |
562 | * means that all timers which are tied to this base via timer->base are | 565 | * means that all timers which are tied to this base via timer->base are |
563 | * locked, and the base itself is locked too. | 566 | * locked, and the base itself is locked too. |
564 | * | 567 | * |
565 | * So __run_timers/migrate_timers can safely modify all timers which could | 568 | * So __run_timers/migrate_timers can safely modify all timers which could |
566 | * be found on ->tvX lists. | 569 | * be found on ->tvX lists. |
567 | * | 570 | * |
568 | * When the timer's base is locked, and the timer removed from list, it is | 571 | * When the timer's base is locked, and the timer removed from list, it is |
569 | * possible to set timer->base = NULL and drop the lock: the timer remains | 572 | * possible to set timer->base = NULL and drop the lock: the timer remains |
570 | * locked. | 573 | * locked. |
571 | */ | 574 | */ |
572 | static struct tvec_base *lock_timer_base(struct timer_list *timer, | 575 | static struct tvec_base *lock_timer_base(struct timer_list *timer, |
573 | unsigned long *flags) | 576 | unsigned long *flags) |
574 | __acquires(timer->base->lock) | 577 | __acquires(timer->base->lock) |
575 | { | 578 | { |
576 | struct tvec_base *base; | 579 | struct tvec_base *base; |
577 | 580 | ||
578 | for (;;) { | 581 | for (;;) { |
579 | struct tvec_base *prelock_base = timer->base; | 582 | struct tvec_base *prelock_base = timer->base; |
580 | base = tbase_get_base(prelock_base); | 583 | base = tbase_get_base(prelock_base); |
581 | if (likely(base != NULL)) { | 584 | if (likely(base != NULL)) { |
582 | spin_lock_irqsave(&base->lock, *flags); | 585 | spin_lock_irqsave(&base->lock, *flags); |
583 | if (likely(prelock_base == timer->base)) | 586 | if (likely(prelock_base == timer->base)) |
584 | return base; | 587 | return base; |
585 | /* The timer has migrated to another CPU */ | 588 | /* The timer has migrated to another CPU */ |
586 | spin_unlock_irqrestore(&base->lock, *flags); | 589 | spin_unlock_irqrestore(&base->lock, *flags); |
587 | } | 590 | } |
588 | cpu_relax(); | 591 | cpu_relax(); |
589 | } | 592 | } |
590 | } | 593 | } |
591 | 594 | ||
592 | static inline int | 595 | static inline int |
593 | __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only) | 596 | __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only) |
594 | { | 597 | { |
595 | struct tvec_base *base, *new_base; | 598 | struct tvec_base *base, *new_base; |
596 | unsigned long flags; | 599 | unsigned long flags; |
597 | int ret; | 600 | int ret; |
598 | 601 | ||
599 | ret = 0; | 602 | ret = 0; |
600 | 603 | ||
601 | timer_stats_timer_set_start_info(timer); | 604 | timer_stats_timer_set_start_info(timer); |
602 | BUG_ON(!timer->function); | 605 | BUG_ON(!timer->function); |
603 | 606 | ||
604 | base = lock_timer_base(timer, &flags); | 607 | base = lock_timer_base(timer, &flags); |
605 | 608 | ||
606 | if (timer_pending(timer)) { | 609 | if (timer_pending(timer)) { |
607 | detach_timer(timer, 0); | 610 | detach_timer(timer, 0); |
608 | ret = 1; | 611 | ret = 1; |
609 | } else { | 612 | } else { |
610 | if (pending_only) | 613 | if (pending_only) |
611 | goto out_unlock; | 614 | goto out_unlock; |
612 | } | 615 | } |
613 | 616 | ||
614 | debug_timer_activate(timer); | 617 | debug_timer_activate(timer); |
615 | 618 | ||
616 | new_base = __get_cpu_var(tvec_bases); | 619 | new_base = __get_cpu_var(tvec_bases); |
617 | 620 | ||
618 | if (base != new_base) { | 621 | if (base != new_base) { |
619 | /* | 622 | /* |
620 | * We are trying to schedule the timer on the local CPU. | 623 | * We are trying to schedule the timer on the local CPU. |
621 | * However we can't change timer's base while it is running, | 624 | * However we can't change timer's base while it is running, |
622 | * otherwise del_timer_sync() can't detect that the timer's | 625 | * otherwise del_timer_sync() can't detect that the timer's |
623 | * handler yet has not finished. This also guarantees that | 626 | * handler yet has not finished. This also guarantees that |
624 | * the timer is serialized wrt itself. | 627 | * the timer is serialized wrt itself. |
625 | */ | 628 | */ |
626 | if (likely(base->running_timer != timer)) { | 629 | if (likely(base->running_timer != timer)) { |
627 | /* See the comment in lock_timer_base() */ | 630 | /* See the comment in lock_timer_base() */ |
628 | timer_set_base(timer, NULL); | 631 | timer_set_base(timer, NULL); |
629 | spin_unlock(&base->lock); | 632 | spin_unlock(&base->lock); |
630 | base = new_base; | 633 | base = new_base; |
631 | spin_lock(&base->lock); | 634 | spin_lock(&base->lock); |
632 | timer_set_base(timer, base); | 635 | timer_set_base(timer, base); |
633 | } | 636 | } |
634 | } | 637 | } |
635 | 638 | ||
636 | timer->expires = expires; | 639 | timer->expires = expires; |
637 | internal_add_timer(base, timer); | 640 | internal_add_timer(base, timer); |
638 | 641 | ||
639 | out_unlock: | 642 | out_unlock: |
640 | spin_unlock_irqrestore(&base->lock, flags); | 643 | spin_unlock_irqrestore(&base->lock, flags); |
641 | 644 | ||
642 | return ret; | 645 | return ret; |
643 | } | 646 | } |
644 | 647 | ||
645 | /** | 648 | /** |
646 | * mod_timer_pending - modify a pending timer's timeout | 649 | * mod_timer_pending - modify a pending timer's timeout |
647 | * @timer: the pending timer to be modified | 650 | * @timer: the pending timer to be modified |
648 | * @expires: new timeout in jiffies | 651 | * @expires: new timeout in jiffies |
649 | * | 652 | * |
650 | * mod_timer_pending() is the same for pending timers as mod_timer(), | 653 | * mod_timer_pending() is the same for pending timers as mod_timer(), |
651 | * but will not re-activate and modify already deleted timers. | 654 | * but will not re-activate and modify already deleted timers. |
652 | * | 655 | * |
653 | * It is useful for unserialized use of timers. | 656 | * It is useful for unserialized use of timers. |
654 | */ | 657 | */ |
655 | int mod_timer_pending(struct timer_list *timer, unsigned long expires) | 658 | int mod_timer_pending(struct timer_list *timer, unsigned long expires) |
656 | { | 659 | { |
657 | return __mod_timer(timer, expires, true); | 660 | return __mod_timer(timer, expires, true); |
658 | } | 661 | } |
659 | EXPORT_SYMBOL(mod_timer_pending); | 662 | EXPORT_SYMBOL(mod_timer_pending); |
660 | 663 | ||
661 | /** | 664 | /** |
662 | * mod_timer - modify a timer's timeout | 665 | * mod_timer - modify a timer's timeout |
663 | * @timer: the timer to be modified | 666 | * @timer: the timer to be modified |
664 | * @expires: new timeout in jiffies | 667 | * @expires: new timeout in jiffies |
665 | * | 668 | * |
666 | * mod_timer() is a more efficient way to update the expire field of an | 669 | * mod_timer() is a more efficient way to update the expire field of an |
667 | * active timer (if the timer is inactive it will be activated) | 670 | * active timer (if the timer is inactive it will be activated) |
668 | * | 671 | * |
669 | * mod_timer(timer, expires) is equivalent to: | 672 | * mod_timer(timer, expires) is equivalent to: |
670 | * | 673 | * |
671 | * del_timer(timer); timer->expires = expires; add_timer(timer); | 674 | * del_timer(timer); timer->expires = expires; add_timer(timer); |
672 | * | 675 | * |
673 | * Note that if there are multiple unserialized concurrent users of the | 676 | * Note that if there are multiple unserialized concurrent users of the |
674 | * same timer, then mod_timer() is the only safe way to modify the timeout, | 677 | * same timer, then mod_timer() is the only safe way to modify the timeout, |
675 | * since add_timer() cannot modify an already running timer. | 678 | * since add_timer() cannot modify an already running timer. |
676 | * | 679 | * |
677 | * The function returns whether it has modified a pending timer or not. | 680 | * The function returns whether it has modified a pending timer or not. |
678 | * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an | 681 | * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an |
679 | * active timer returns 1.) | 682 | * active timer returns 1.) |
680 | */ | 683 | */ |
681 | int mod_timer(struct timer_list *timer, unsigned long expires) | 684 | int mod_timer(struct timer_list *timer, unsigned long expires) |
682 | { | 685 | { |
683 | /* | 686 | /* |
684 | * This is a common optimization triggered by the | 687 | * This is a common optimization triggered by the |
685 | * networking code - if the timer is re-modified | 688 | * networking code - if the timer is re-modified |
686 | * to be the same thing then just return: | 689 | * to be the same thing then just return: |
687 | */ | 690 | */ |
688 | if (timer->expires == expires && timer_pending(timer)) | 691 | if (timer->expires == expires && timer_pending(timer)) |
689 | return 1; | 692 | return 1; |
690 | 693 | ||
691 | return __mod_timer(timer, expires, false); | 694 | return __mod_timer(timer, expires, false); |
692 | } | 695 | } |
693 | EXPORT_SYMBOL(mod_timer); | 696 | EXPORT_SYMBOL(mod_timer); |
694 | 697 | ||
695 | /** | 698 | /** |
696 | * add_timer - start a timer | 699 | * add_timer - start a timer |
697 | * @timer: the timer to be added | 700 | * @timer: the timer to be added |
698 | * | 701 | * |
699 | * The kernel will do a ->function(->data) callback from the | 702 | * The kernel will do a ->function(->data) callback from the |
700 | * timer interrupt at the ->expires point in the future. The | 703 | * timer interrupt at the ->expires point in the future. The |
701 | * current time is 'jiffies'. | 704 | * current time is 'jiffies'. |
702 | * | 705 | * |
703 | * The timer's ->expires, ->function (and if the handler uses it, ->data) | 706 | * The timer's ->expires, ->function (and if the handler uses it, ->data) |
704 | * fields must be set prior calling this function. | 707 | * fields must be set prior calling this function. |
705 | * | 708 | * |
706 | * Timers with an ->expires field in the past will be executed in the next | 709 | * Timers with an ->expires field in the past will be executed in the next |
707 | * timer tick. | 710 | * timer tick. |
708 | */ | 711 | */ |
709 | void add_timer(struct timer_list *timer) | 712 | void add_timer(struct timer_list *timer) |
710 | { | 713 | { |
711 | BUG_ON(timer_pending(timer)); | 714 | BUG_ON(timer_pending(timer)); |
712 | mod_timer(timer, timer->expires); | 715 | mod_timer(timer, timer->expires); |
713 | } | 716 | } |
714 | EXPORT_SYMBOL(add_timer); | 717 | EXPORT_SYMBOL(add_timer); |
715 | 718 | ||
716 | /** | 719 | /** |
717 | * add_timer_on - start a timer on a particular CPU | 720 | * add_timer_on - start a timer on a particular CPU |
718 | * @timer: the timer to be added | 721 | * @timer: the timer to be added |
719 | * @cpu: the CPU to start it on | 722 | * @cpu: the CPU to start it on |
720 | * | 723 | * |
721 | * This is not very scalable on SMP. Double adds are not possible. | 724 | * This is not very scalable on SMP. Double adds are not possible. |
722 | */ | 725 | */ |
723 | void add_timer_on(struct timer_list *timer, int cpu) | 726 | void add_timer_on(struct timer_list *timer, int cpu) |
724 | { | 727 | { |
725 | struct tvec_base *base = per_cpu(tvec_bases, cpu); | 728 | struct tvec_base *base = per_cpu(tvec_bases, cpu); |
726 | unsigned long flags; | 729 | unsigned long flags; |
727 | 730 | ||
728 | timer_stats_timer_set_start_info(timer); | 731 | timer_stats_timer_set_start_info(timer); |
729 | BUG_ON(timer_pending(timer) || !timer->function); | 732 | BUG_ON(timer_pending(timer) || !timer->function); |
730 | spin_lock_irqsave(&base->lock, flags); | 733 | spin_lock_irqsave(&base->lock, flags); |
731 | timer_set_base(timer, base); | 734 | timer_set_base(timer, base); |
732 | debug_timer_activate(timer); | 735 | debug_timer_activate(timer); |
733 | internal_add_timer(base, timer); | 736 | internal_add_timer(base, timer); |
734 | /* | 737 | /* |
735 | * Check whether the other CPU is idle and needs to be | 738 | * Check whether the other CPU is idle and needs to be |
736 | * triggered to reevaluate the timer wheel when nohz is | 739 | * triggered to reevaluate the timer wheel when nohz is |
737 | * active. We are protected against the other CPU fiddling | 740 | * active. We are protected against the other CPU fiddling |
738 | * with the timer by holding the timer base lock. This also | 741 | * with the timer by holding the timer base lock. This also |
739 | * makes sure that a CPU on the way to idle can not evaluate | 742 | * makes sure that a CPU on the way to idle can not evaluate |
740 | * the timer wheel. | 743 | * the timer wheel. |
741 | */ | 744 | */ |
742 | wake_up_idle_cpu(cpu); | 745 | wake_up_idle_cpu(cpu); |
743 | spin_unlock_irqrestore(&base->lock, flags); | 746 | spin_unlock_irqrestore(&base->lock, flags); |
744 | } | 747 | } |
745 | 748 | ||
746 | /** | 749 | /** |
747 | * del_timer - deactive a timer. | 750 | * del_timer - deactive a timer. |
748 | * @timer: the timer to be deactivated | 751 | * @timer: the timer to be deactivated |
749 | * | 752 | * |
750 | * del_timer() deactivates a timer - this works on both active and inactive | 753 | * del_timer() deactivates a timer - this works on both active and inactive |
751 | * timers. | 754 | * timers. |
752 | * | 755 | * |
753 | * The function returns whether it has deactivated a pending timer or not. | 756 | * The function returns whether it has deactivated a pending timer or not. |
754 | * (ie. del_timer() of an inactive timer returns 0, del_timer() of an | 757 | * (ie. del_timer() of an inactive timer returns 0, del_timer() of an |
755 | * active timer returns 1.) | 758 | * active timer returns 1.) |
756 | */ | 759 | */ |
757 | int del_timer(struct timer_list *timer) | 760 | int del_timer(struct timer_list *timer) |
758 | { | 761 | { |
759 | struct tvec_base *base; | 762 | struct tvec_base *base; |
760 | unsigned long flags; | 763 | unsigned long flags; |
761 | int ret = 0; | 764 | int ret = 0; |
762 | 765 | ||
763 | timer_stats_timer_clear_start_info(timer); | 766 | timer_stats_timer_clear_start_info(timer); |
764 | if (timer_pending(timer)) { | 767 | if (timer_pending(timer)) { |
765 | base = lock_timer_base(timer, &flags); | 768 | base = lock_timer_base(timer, &flags); |
766 | if (timer_pending(timer)) { | 769 | if (timer_pending(timer)) { |
767 | detach_timer(timer, 1); | 770 | detach_timer(timer, 1); |
768 | ret = 1; | 771 | ret = 1; |
769 | } | 772 | } |
770 | spin_unlock_irqrestore(&base->lock, flags); | 773 | spin_unlock_irqrestore(&base->lock, flags); |
771 | } | 774 | } |
772 | 775 | ||
773 | return ret; | 776 | return ret; |
774 | } | 777 | } |
775 | EXPORT_SYMBOL(del_timer); | 778 | EXPORT_SYMBOL(del_timer); |
776 | 779 | ||
777 | #ifdef CONFIG_SMP | 780 | #ifdef CONFIG_SMP |
778 | /** | 781 | /** |
779 | * try_to_del_timer_sync - Try to deactivate a timer | 782 | * try_to_del_timer_sync - Try to deactivate a timer |
780 | * @timer: timer do del | 783 | * @timer: timer do del |
781 | * | 784 | * |
782 | * This function tries to deactivate a timer. Upon successful (ret >= 0) | 785 | * This function tries to deactivate a timer. Upon successful (ret >= 0) |
783 | * exit the timer is not queued and the handler is not running on any CPU. | 786 | * exit the timer is not queued and the handler is not running on any CPU. |
784 | * | 787 | * |
785 | * It must not be called from interrupt contexts. | 788 | * It must not be called from interrupt contexts. |
786 | */ | 789 | */ |
787 | int try_to_del_timer_sync(struct timer_list *timer) | 790 | int try_to_del_timer_sync(struct timer_list *timer) |
788 | { | 791 | { |
789 | struct tvec_base *base; | 792 | struct tvec_base *base; |
790 | unsigned long flags; | 793 | unsigned long flags; |
791 | int ret = -1; | 794 | int ret = -1; |
792 | 795 | ||
793 | base = lock_timer_base(timer, &flags); | 796 | base = lock_timer_base(timer, &flags); |
794 | 797 | ||
795 | if (base->running_timer == timer) | 798 | if (base->running_timer == timer) |
796 | goto out; | 799 | goto out; |
797 | 800 | ||
798 | ret = 0; | 801 | ret = 0; |
799 | if (timer_pending(timer)) { | 802 | if (timer_pending(timer)) { |
800 | detach_timer(timer, 1); | 803 | detach_timer(timer, 1); |
801 | ret = 1; | 804 | ret = 1; |
802 | } | 805 | } |
803 | out: | 806 | out: |
804 | spin_unlock_irqrestore(&base->lock, flags); | 807 | spin_unlock_irqrestore(&base->lock, flags); |
805 | 808 | ||
806 | return ret; | 809 | return ret; |
807 | } | 810 | } |
808 | EXPORT_SYMBOL(try_to_del_timer_sync); | 811 | EXPORT_SYMBOL(try_to_del_timer_sync); |
809 | 812 | ||
810 | /** | 813 | /** |
811 | * del_timer_sync - deactivate a timer and wait for the handler to finish. | 814 | * del_timer_sync - deactivate a timer and wait for the handler to finish. |
812 | * @timer: the timer to be deactivated | 815 | * @timer: the timer to be deactivated |
813 | * | 816 | * |
814 | * This function only differs from del_timer() on SMP: besides deactivating | 817 | * This function only differs from del_timer() on SMP: besides deactivating |
815 | * the timer it also makes sure the handler has finished executing on other | 818 | * the timer it also makes sure the handler has finished executing on other |
816 | * CPUs. | 819 | * CPUs. |
817 | * | 820 | * |
818 | * Synchronization rules: Callers must prevent restarting of the timer, | 821 | * Synchronization rules: Callers must prevent restarting of the timer, |
819 | * otherwise this function is meaningless. It must not be called from | 822 | * otherwise this function is meaningless. It must not be called from |
820 | * interrupt contexts. The caller must not hold locks which would prevent | 823 | * interrupt contexts. The caller must not hold locks which would prevent |
821 | * completion of the timer's handler. The timer's handler must not call | 824 | * completion of the timer's handler. The timer's handler must not call |
822 | * add_timer_on(). Upon exit the timer is not queued and the handler is | 825 | * add_timer_on(). Upon exit the timer is not queued and the handler is |
823 | * not running on any CPU. | 826 | * not running on any CPU. |
824 | * | 827 | * |
825 | * The function returns whether it has deactivated a pending timer or not. | 828 | * The function returns whether it has deactivated a pending timer or not. |
826 | */ | 829 | */ |
827 | int del_timer_sync(struct timer_list *timer) | 830 | int del_timer_sync(struct timer_list *timer) |
828 | { | 831 | { |
829 | for (;;) { | 832 | for (;;) { |
830 | int ret = try_to_del_timer_sync(timer); | 833 | int ret = try_to_del_timer_sync(timer); |
831 | if (ret >= 0) | 834 | if (ret >= 0) |
832 | return ret; | 835 | return ret; |
833 | cpu_relax(); | 836 | cpu_relax(); |
834 | } | 837 | } |
835 | } | 838 | } |
836 | EXPORT_SYMBOL(del_timer_sync); | 839 | EXPORT_SYMBOL(del_timer_sync); |
837 | #endif | 840 | #endif |
838 | 841 | ||
839 | static int cascade(struct tvec_base *base, struct tvec *tv, int index) | 842 | static int cascade(struct tvec_base *base, struct tvec *tv, int index) |
840 | { | 843 | { |
841 | /* cascade all the timers from tv up one level */ | 844 | /* cascade all the timers from tv up one level */ |
842 | struct timer_list *timer, *tmp; | 845 | struct timer_list *timer, *tmp; |
843 | struct list_head tv_list; | 846 | struct list_head tv_list; |
844 | 847 | ||
845 | list_replace_init(tv->vec + index, &tv_list); | 848 | list_replace_init(tv->vec + index, &tv_list); |
846 | 849 | ||
847 | /* | 850 | /* |
848 | * We are removing _all_ timers from the list, so we | 851 | * We are removing _all_ timers from the list, so we |
849 | * don't have to detach them individually. | 852 | * don't have to detach them individually. |
850 | */ | 853 | */ |
851 | list_for_each_entry_safe(timer, tmp, &tv_list, entry) { | 854 | list_for_each_entry_safe(timer, tmp, &tv_list, entry) { |
852 | BUG_ON(tbase_get_base(timer->base) != base); | 855 | BUG_ON(tbase_get_base(timer->base) != base); |
853 | internal_add_timer(base, timer); | 856 | internal_add_timer(base, timer); |
854 | } | 857 | } |
855 | 858 | ||
856 | return index; | 859 | return index; |
857 | } | 860 | } |
858 | 861 | ||
859 | #define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK) | 862 | #define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK) |
860 | 863 | ||
861 | /** | 864 | /** |
862 | * __run_timers - run all expired timers (if any) on this CPU. | 865 | * __run_timers - run all expired timers (if any) on this CPU. |
863 | * @base: the timer vector to be processed. | 866 | * @base: the timer vector to be processed. |
864 | * | 867 | * |
865 | * This function cascades all vectors and executes all expired timer | 868 | * This function cascades all vectors and executes all expired timer |
866 | * vectors. | 869 | * vectors. |
867 | */ | 870 | */ |
868 | static inline void __run_timers(struct tvec_base *base) | 871 | static inline void __run_timers(struct tvec_base *base) |
869 | { | 872 | { |
870 | struct timer_list *timer; | 873 | struct timer_list *timer; |
871 | 874 | ||
872 | spin_lock_irq(&base->lock); | 875 | spin_lock_irq(&base->lock); |
873 | while (time_after_eq(jiffies, base->timer_jiffies)) { | 876 | while (time_after_eq(jiffies, base->timer_jiffies)) { |
874 | struct list_head work_list; | 877 | struct list_head work_list; |
875 | struct list_head *head = &work_list; | 878 | struct list_head *head = &work_list; |
876 | int index = base->timer_jiffies & TVR_MASK; | 879 | int index = base->timer_jiffies & TVR_MASK; |
877 | 880 | ||
878 | /* | 881 | /* |
879 | * Cascade timers: | 882 | * Cascade timers: |
880 | */ | 883 | */ |
881 | if (!index && | 884 | if (!index && |
882 | (!cascade(base, &base->tv2, INDEX(0))) && | 885 | (!cascade(base, &base->tv2, INDEX(0))) && |
883 | (!cascade(base, &base->tv3, INDEX(1))) && | 886 | (!cascade(base, &base->tv3, INDEX(1))) && |
884 | !cascade(base, &base->tv4, INDEX(2))) | 887 | !cascade(base, &base->tv4, INDEX(2))) |
885 | cascade(base, &base->tv5, INDEX(3)); | 888 | cascade(base, &base->tv5, INDEX(3)); |
886 | ++base->timer_jiffies; | 889 | ++base->timer_jiffies; |
887 | list_replace_init(base->tv1.vec + index, &work_list); | 890 | list_replace_init(base->tv1.vec + index, &work_list); |
888 | while (!list_empty(head)) { | 891 | while (!list_empty(head)) { |
889 | void (*fn)(unsigned long); | 892 | void (*fn)(unsigned long); |
890 | unsigned long data; | 893 | unsigned long data; |
891 | 894 | ||
892 | timer = list_first_entry(head, struct timer_list,entry); | 895 | timer = list_first_entry(head, struct timer_list,entry); |
893 | fn = timer->function; | 896 | fn = timer->function; |
894 | data = timer->data; | 897 | data = timer->data; |
895 | 898 | ||
896 | timer_stats_account_timer(timer); | 899 | timer_stats_account_timer(timer); |
897 | 900 | ||
898 | set_running_timer(base, timer); | 901 | set_running_timer(base, timer); |
899 | detach_timer(timer, 1); | 902 | detach_timer(timer, 1); |
900 | spin_unlock_irq(&base->lock); | 903 | spin_unlock_irq(&base->lock); |
901 | { | 904 | { |
902 | int preempt_count = preempt_count(); | 905 | int preempt_count = preempt_count(); |
903 | fn(data); | 906 | fn(data); |
904 | if (preempt_count != preempt_count()) { | 907 | if (preempt_count != preempt_count()) { |
905 | printk(KERN_ERR "huh, entered %p " | 908 | printk(KERN_ERR "huh, entered %p " |
906 | "with preempt_count %08x, exited" | 909 | "with preempt_count %08x, exited" |
907 | " with %08x?\n", | 910 | " with %08x?\n", |
908 | fn, preempt_count, | 911 | fn, preempt_count, |
909 | preempt_count()); | 912 | preempt_count()); |
910 | BUG(); | 913 | BUG(); |
911 | } | 914 | } |
912 | } | 915 | } |
913 | spin_lock_irq(&base->lock); | 916 | spin_lock_irq(&base->lock); |
914 | } | 917 | } |
915 | } | 918 | } |
916 | set_running_timer(base, NULL); | 919 | set_running_timer(base, NULL); |
917 | spin_unlock_irq(&base->lock); | 920 | spin_unlock_irq(&base->lock); |
918 | } | 921 | } |
919 | 922 | ||
920 | #ifdef CONFIG_NO_HZ | 923 | #ifdef CONFIG_NO_HZ |
921 | /* | 924 | /* |
922 | * Find out when the next timer event is due to happen. This | 925 | * Find out when the next timer event is due to happen. This |
923 | * is used on S/390 to stop all activity when a cpus is idle. | 926 | * is used on S/390 to stop all activity when a cpus is idle. |
924 | * This functions needs to be called disabled. | 927 | * This functions needs to be called disabled. |
925 | */ | 928 | */ |
926 | static unsigned long __next_timer_interrupt(struct tvec_base *base) | 929 | static unsigned long __next_timer_interrupt(struct tvec_base *base) |
927 | { | 930 | { |
928 | unsigned long timer_jiffies = base->timer_jiffies; | 931 | unsigned long timer_jiffies = base->timer_jiffies; |
929 | unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA; | 932 | unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA; |
930 | int index, slot, array, found = 0; | 933 | int index, slot, array, found = 0; |
931 | struct timer_list *nte; | 934 | struct timer_list *nte; |
932 | struct tvec *varray[4]; | 935 | struct tvec *varray[4]; |
933 | 936 | ||
934 | /* Look for timer events in tv1. */ | 937 | /* Look for timer events in tv1. */ |
935 | index = slot = timer_jiffies & TVR_MASK; | 938 | index = slot = timer_jiffies & TVR_MASK; |
936 | do { | 939 | do { |
937 | list_for_each_entry(nte, base->tv1.vec + slot, entry) { | 940 | list_for_each_entry(nte, base->tv1.vec + slot, entry) { |
938 | if (tbase_get_deferrable(nte->base)) | 941 | if (tbase_get_deferrable(nte->base)) |
939 | continue; | 942 | continue; |
940 | 943 | ||
941 | found = 1; | 944 | found = 1; |
942 | expires = nte->expires; | 945 | expires = nte->expires; |
943 | /* Look at the cascade bucket(s)? */ | 946 | /* Look at the cascade bucket(s)? */ |
944 | if (!index || slot < index) | 947 | if (!index || slot < index) |
945 | goto cascade; | 948 | goto cascade; |
946 | return expires; | 949 | return expires; |
947 | } | 950 | } |
948 | slot = (slot + 1) & TVR_MASK; | 951 | slot = (slot + 1) & TVR_MASK; |
949 | } while (slot != index); | 952 | } while (slot != index); |
950 | 953 | ||
951 | cascade: | 954 | cascade: |
952 | /* Calculate the next cascade event */ | 955 | /* Calculate the next cascade event */ |
953 | if (index) | 956 | if (index) |
954 | timer_jiffies += TVR_SIZE - index; | 957 | timer_jiffies += TVR_SIZE - index; |
955 | timer_jiffies >>= TVR_BITS; | 958 | timer_jiffies >>= TVR_BITS; |
956 | 959 | ||
957 | /* Check tv2-tv5. */ | 960 | /* Check tv2-tv5. */ |
958 | varray[0] = &base->tv2; | 961 | varray[0] = &base->tv2; |
959 | varray[1] = &base->tv3; | 962 | varray[1] = &base->tv3; |
960 | varray[2] = &base->tv4; | 963 | varray[2] = &base->tv4; |
961 | varray[3] = &base->tv5; | 964 | varray[3] = &base->tv5; |
962 | 965 | ||
963 | for (array = 0; array < 4; array++) { | 966 | for (array = 0; array < 4; array++) { |
964 | struct tvec *varp = varray[array]; | 967 | struct tvec *varp = varray[array]; |
965 | 968 | ||
966 | index = slot = timer_jiffies & TVN_MASK; | 969 | index = slot = timer_jiffies & TVN_MASK; |
967 | do { | 970 | do { |
968 | list_for_each_entry(nte, varp->vec + slot, entry) { | 971 | list_for_each_entry(nte, varp->vec + slot, entry) { |
969 | found = 1; | 972 | found = 1; |
970 | if (time_before(nte->expires, expires)) | 973 | if (time_before(nte->expires, expires)) |
971 | expires = nte->expires; | 974 | expires = nte->expires; |
972 | } | 975 | } |
973 | /* | 976 | /* |
974 | * Do we still search for the first timer or are | 977 | * Do we still search for the first timer or are |
975 | * we looking up the cascade buckets ? | 978 | * we looking up the cascade buckets ? |
976 | */ | 979 | */ |
977 | if (found) { | 980 | if (found) { |
978 | /* Look at the cascade bucket(s)? */ | 981 | /* Look at the cascade bucket(s)? */ |
979 | if (!index || slot < index) | 982 | if (!index || slot < index) |
980 | break; | 983 | break; |
981 | return expires; | 984 | return expires; |
982 | } | 985 | } |
983 | slot = (slot + 1) & TVN_MASK; | 986 | slot = (slot + 1) & TVN_MASK; |
984 | } while (slot != index); | 987 | } while (slot != index); |
985 | 988 | ||
986 | if (index) | 989 | if (index) |
987 | timer_jiffies += TVN_SIZE - index; | 990 | timer_jiffies += TVN_SIZE - index; |
988 | timer_jiffies >>= TVN_BITS; | 991 | timer_jiffies >>= TVN_BITS; |
989 | } | 992 | } |
990 | return expires; | 993 | return expires; |
991 | } | 994 | } |
992 | 995 | ||
993 | /* | 996 | /* |
994 | * Check, if the next hrtimer event is before the next timer wheel | 997 | * Check, if the next hrtimer event is before the next timer wheel |
995 | * event: | 998 | * event: |
996 | */ | 999 | */ |
997 | static unsigned long cmp_next_hrtimer_event(unsigned long now, | 1000 | static unsigned long cmp_next_hrtimer_event(unsigned long now, |
998 | unsigned long expires) | 1001 | unsigned long expires) |
999 | { | 1002 | { |
1000 | ktime_t hr_delta = hrtimer_get_next_event(); | 1003 | ktime_t hr_delta = hrtimer_get_next_event(); |
1001 | struct timespec tsdelta; | 1004 | struct timespec tsdelta; |
1002 | unsigned long delta; | 1005 | unsigned long delta; |
1003 | 1006 | ||
1004 | if (hr_delta.tv64 == KTIME_MAX) | 1007 | if (hr_delta.tv64 == KTIME_MAX) |
1005 | return expires; | 1008 | return expires; |
1006 | 1009 | ||
1007 | /* | 1010 | /* |
1008 | * Expired timer available, let it expire in the next tick | 1011 | * Expired timer available, let it expire in the next tick |
1009 | */ | 1012 | */ |
1010 | if (hr_delta.tv64 <= 0) | 1013 | if (hr_delta.tv64 <= 0) |
1011 | return now + 1; | 1014 | return now + 1; |
1012 | 1015 | ||
1013 | tsdelta = ktime_to_timespec(hr_delta); | 1016 | tsdelta = ktime_to_timespec(hr_delta); |
1014 | delta = timespec_to_jiffies(&tsdelta); | 1017 | delta = timespec_to_jiffies(&tsdelta); |
1015 | 1018 | ||
1016 | /* | 1019 | /* |
1017 | * Limit the delta to the max value, which is checked in | 1020 | * Limit the delta to the max value, which is checked in |
1018 | * tick_nohz_stop_sched_tick(): | 1021 | * tick_nohz_stop_sched_tick(): |
1019 | */ | 1022 | */ |
1020 | if (delta > NEXT_TIMER_MAX_DELTA) | 1023 | if (delta > NEXT_TIMER_MAX_DELTA) |
1021 | delta = NEXT_TIMER_MAX_DELTA; | 1024 | delta = NEXT_TIMER_MAX_DELTA; |
1022 | 1025 | ||
1023 | /* | 1026 | /* |
1024 | * Take rounding errors in to account and make sure, that it | 1027 | * Take rounding errors in to account and make sure, that it |
1025 | * expires in the next tick. Otherwise we go into an endless | 1028 | * expires in the next tick. Otherwise we go into an endless |
1026 | * ping pong due to tick_nohz_stop_sched_tick() retriggering | 1029 | * ping pong due to tick_nohz_stop_sched_tick() retriggering |
1027 | * the timer softirq | 1030 | * the timer softirq |
1028 | */ | 1031 | */ |
1029 | if (delta < 1) | 1032 | if (delta < 1) |
1030 | delta = 1; | 1033 | delta = 1; |
1031 | now += delta; | 1034 | now += delta; |
1032 | if (time_before(now, expires)) | 1035 | if (time_before(now, expires)) |
1033 | return now; | 1036 | return now; |
1034 | return expires; | 1037 | return expires; |
1035 | } | 1038 | } |
1036 | 1039 | ||
1037 | /** | 1040 | /** |
1038 | * get_next_timer_interrupt - return the jiffy of the next pending timer | 1041 | * get_next_timer_interrupt - return the jiffy of the next pending timer |
1039 | * @now: current time (in jiffies) | 1042 | * @now: current time (in jiffies) |
1040 | */ | 1043 | */ |
1041 | unsigned long get_next_timer_interrupt(unsigned long now) | 1044 | unsigned long get_next_timer_interrupt(unsigned long now) |
1042 | { | 1045 | { |
1043 | struct tvec_base *base = __get_cpu_var(tvec_bases); | 1046 | struct tvec_base *base = __get_cpu_var(tvec_bases); |
1044 | unsigned long expires; | 1047 | unsigned long expires; |
1045 | 1048 | ||
1046 | spin_lock(&base->lock); | 1049 | spin_lock(&base->lock); |
1047 | expires = __next_timer_interrupt(base); | 1050 | expires = __next_timer_interrupt(base); |
1048 | spin_unlock(&base->lock); | 1051 | spin_unlock(&base->lock); |
1049 | 1052 | ||
1050 | if (time_before_eq(expires, now)) | 1053 | if (time_before_eq(expires, now)) |
1051 | return now; | 1054 | return now; |
1052 | 1055 | ||
1053 | return cmp_next_hrtimer_event(now, expires); | 1056 | return cmp_next_hrtimer_event(now, expires); |
1054 | } | 1057 | } |
1055 | #endif | 1058 | #endif |
1056 | 1059 | ||
1057 | /* | 1060 | /* |
1058 | * Called from the timer interrupt handler to charge one tick to the current | 1061 | * Called from the timer interrupt handler to charge one tick to the current |
1059 | * process. user_tick is 1 if the tick is user time, 0 for system. | 1062 | * process. user_tick is 1 if the tick is user time, 0 for system. |
1060 | */ | 1063 | */ |
1061 | void update_process_times(int user_tick) | 1064 | void update_process_times(int user_tick) |
1062 | { | 1065 | { |
1063 | struct task_struct *p = current; | 1066 | struct task_struct *p = current; |
1064 | int cpu = smp_processor_id(); | 1067 | int cpu = smp_processor_id(); |
1065 | 1068 | ||
1066 | /* Note: this timer irq context must be accounted for as well. */ | 1069 | /* Note: this timer irq context must be accounted for as well. */ |
1067 | account_process_tick(p, user_tick); | 1070 | account_process_tick(p, user_tick); |
1068 | run_local_timers(); | 1071 | run_local_timers(); |
1069 | if (rcu_pending(cpu)) | 1072 | if (rcu_pending(cpu)) |
1070 | rcu_check_callbacks(cpu, user_tick); | 1073 | rcu_check_callbacks(cpu, user_tick); |
1071 | printk_tick(); | 1074 | printk_tick(); |
1072 | scheduler_tick(); | 1075 | scheduler_tick(); |
1073 | run_posix_cpu_timers(p); | 1076 | run_posix_cpu_timers(p); |
1074 | } | 1077 | } |
1075 | 1078 | ||
1076 | /* | 1079 | /* |
1077 | * Nr of active tasks - counted in fixed-point numbers | 1080 | * Nr of active tasks - counted in fixed-point numbers |
1078 | */ | 1081 | */ |
1079 | static unsigned long count_active_tasks(void) | 1082 | static unsigned long count_active_tasks(void) |
1080 | { | 1083 | { |
1081 | return nr_active() * FIXED_1; | 1084 | return nr_active() * FIXED_1; |
1082 | } | 1085 | } |
1083 | 1086 | ||
1084 | /* | 1087 | /* |
1085 | * Hmm.. Changed this, as the GNU make sources (load.c) seems to | 1088 | * Hmm.. Changed this, as the GNU make sources (load.c) seems to |
1086 | * imply that avenrun[] is the standard name for this kind of thing. | 1089 | * imply that avenrun[] is the standard name for this kind of thing. |
1087 | * Nothing else seems to be standardized: the fractional size etc | 1090 | * Nothing else seems to be standardized: the fractional size etc |
1088 | * all seem to differ on different machines. | 1091 | * all seem to differ on different machines. |
1089 | * | 1092 | * |
1090 | * Requires xtime_lock to access. | 1093 | * Requires xtime_lock to access. |
1091 | */ | 1094 | */ |
1092 | unsigned long avenrun[3]; | 1095 | unsigned long avenrun[3]; |
1093 | 1096 | ||
1094 | EXPORT_SYMBOL(avenrun); | 1097 | EXPORT_SYMBOL(avenrun); |
1095 | 1098 | ||
1096 | /* | 1099 | /* |
1097 | * calc_load - given tick count, update the avenrun load estimates. | 1100 | * calc_load - given tick count, update the avenrun load estimates. |
1098 | * This is called while holding a write_lock on xtime_lock. | 1101 | * This is called while holding a write_lock on xtime_lock. |
1099 | */ | 1102 | */ |
1100 | static inline void calc_load(unsigned long ticks) | 1103 | static inline void calc_load(unsigned long ticks) |
1101 | { | 1104 | { |
1102 | unsigned long active_tasks; /* fixed-point */ | 1105 | unsigned long active_tasks; /* fixed-point */ |
1103 | static int count = LOAD_FREQ; | 1106 | static int count = LOAD_FREQ; |
1104 | 1107 | ||
1105 | count -= ticks; | 1108 | count -= ticks; |
1106 | if (unlikely(count < 0)) { | 1109 | if (unlikely(count < 0)) { |
1107 | active_tasks = count_active_tasks(); | 1110 | active_tasks = count_active_tasks(); |
1108 | do { | 1111 | do { |
1109 | CALC_LOAD(avenrun[0], EXP_1, active_tasks); | 1112 | CALC_LOAD(avenrun[0], EXP_1, active_tasks); |
1110 | CALC_LOAD(avenrun[1], EXP_5, active_tasks); | 1113 | CALC_LOAD(avenrun[1], EXP_5, active_tasks); |
1111 | CALC_LOAD(avenrun[2], EXP_15, active_tasks); | 1114 | CALC_LOAD(avenrun[2], EXP_15, active_tasks); |
1112 | count += LOAD_FREQ; | 1115 | count += LOAD_FREQ; |
1113 | } while (count < 0); | 1116 | } while (count < 0); |
1114 | } | 1117 | } |
1115 | } | 1118 | } |
1116 | 1119 | ||
1117 | /* | 1120 | /* |
1118 | * This function runs timers and the timer-tq in bottom half context. | 1121 | * This function runs timers and the timer-tq in bottom half context. |
1119 | */ | 1122 | */ |
1120 | static void run_timer_softirq(struct softirq_action *h) | 1123 | static void run_timer_softirq(struct softirq_action *h) |
1121 | { | 1124 | { |
1122 | struct tvec_base *base = __get_cpu_var(tvec_bases); | 1125 | struct tvec_base *base = __get_cpu_var(tvec_bases); |
1123 | 1126 | ||
1124 | hrtimer_run_pending(); | 1127 | hrtimer_run_pending(); |
1125 | 1128 | ||
1126 | if (time_after_eq(jiffies, base->timer_jiffies)) | 1129 | if (time_after_eq(jiffies, base->timer_jiffies)) |
1127 | __run_timers(base); | 1130 | __run_timers(base); |
1128 | } | 1131 | } |
1129 | 1132 | ||
1130 | /* | 1133 | /* |
1131 | * Called by the local, per-CPU timer interrupt on SMP. | 1134 | * Called by the local, per-CPU timer interrupt on SMP. |
1132 | */ | 1135 | */ |
1133 | void run_local_timers(void) | 1136 | void run_local_timers(void) |
1134 | { | 1137 | { |
1135 | hrtimer_run_queues(); | 1138 | hrtimer_run_queues(); |
1136 | raise_softirq(TIMER_SOFTIRQ); | 1139 | raise_softirq(TIMER_SOFTIRQ); |
1137 | softlockup_tick(); | 1140 | softlockup_tick(); |
1138 | } | 1141 | } |
1139 | 1142 | ||
1140 | /* | 1143 | /* |
1141 | * Called by the timer interrupt. xtime_lock must already be taken | 1144 | * Called by the timer interrupt. xtime_lock must already be taken |
1142 | * by the timer IRQ! | 1145 | * by the timer IRQ! |
1143 | */ | 1146 | */ |
1144 | static inline void update_times(unsigned long ticks) | 1147 | static inline void update_times(unsigned long ticks) |
1145 | { | 1148 | { |
1146 | update_wall_time(); | 1149 | update_wall_time(); |
1147 | calc_load(ticks); | 1150 | calc_load(ticks); |
1148 | } | 1151 | } |
1149 | 1152 | ||
1150 | /* | 1153 | /* |
1151 | * The 64-bit jiffies value is not atomic - you MUST NOT read it | 1154 | * The 64-bit jiffies value is not atomic - you MUST NOT read it |
1152 | * without sampling the sequence number in xtime_lock. | 1155 | * without sampling the sequence number in xtime_lock. |
1153 | * jiffies is defined in the linker script... | 1156 | * jiffies is defined in the linker script... |
1154 | */ | 1157 | */ |
1155 | 1158 | ||
1156 | void do_timer(unsigned long ticks) | 1159 | void do_timer(unsigned long ticks) |
1157 | { | 1160 | { |
1158 | jiffies_64 += ticks; | 1161 | jiffies_64 += ticks; |
1159 | update_times(ticks); | 1162 | update_times(ticks); |
1160 | } | 1163 | } |
1161 | 1164 | ||
1162 | #ifdef __ARCH_WANT_SYS_ALARM | 1165 | #ifdef __ARCH_WANT_SYS_ALARM |
1163 | 1166 | ||
1164 | /* | 1167 | /* |
1165 | * For backwards compatibility? This can be done in libc so Alpha | 1168 | * For backwards compatibility? This can be done in libc so Alpha |
1166 | * and all newer ports shouldn't need it. | 1169 | * and all newer ports shouldn't need it. |
1167 | */ | 1170 | */ |
1168 | SYSCALL_DEFINE1(alarm, unsigned int, seconds) | 1171 | SYSCALL_DEFINE1(alarm, unsigned int, seconds) |
1169 | { | 1172 | { |
1170 | return alarm_setitimer(seconds); | 1173 | return alarm_setitimer(seconds); |
1171 | } | 1174 | } |
1172 | 1175 | ||
1173 | #endif | 1176 | #endif |
1174 | 1177 | ||
1175 | #ifndef __alpha__ | 1178 | #ifndef __alpha__ |
1176 | 1179 | ||
1177 | /* | 1180 | /* |
1178 | * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this | 1181 | * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this |
1179 | * should be moved into arch/i386 instead? | 1182 | * should be moved into arch/i386 instead? |
1180 | */ | 1183 | */ |
1181 | 1184 | ||
1182 | /** | 1185 | /** |
1183 | * sys_getpid - return the thread group id of the current process | 1186 | * sys_getpid - return the thread group id of the current process |
1184 | * | 1187 | * |
1185 | * Note, despite the name, this returns the tgid not the pid. The tgid and | 1188 | * Note, despite the name, this returns the tgid not the pid. The tgid and |
1186 | * the pid are identical unless CLONE_THREAD was specified on clone() in | 1189 | * the pid are identical unless CLONE_THREAD was specified on clone() in |
1187 | * which case the tgid is the same in all threads of the same group. | 1190 | * which case the tgid is the same in all threads of the same group. |
1188 | * | 1191 | * |
1189 | * This is SMP safe as current->tgid does not change. | 1192 | * This is SMP safe as current->tgid does not change. |
1190 | */ | 1193 | */ |
1191 | SYSCALL_DEFINE0(getpid) | 1194 | SYSCALL_DEFINE0(getpid) |
1192 | { | 1195 | { |
1193 | return task_tgid_vnr(current); | 1196 | return task_tgid_vnr(current); |
1194 | } | 1197 | } |
1195 | 1198 | ||
1196 | /* | 1199 | /* |
1197 | * Accessing ->real_parent is not SMP-safe, it could | 1200 | * Accessing ->real_parent is not SMP-safe, it could |
1198 | * change from under us. However, we can use a stale | 1201 | * change from under us. However, we can use a stale |
1199 | * value of ->real_parent under rcu_read_lock(), see | 1202 | * value of ->real_parent under rcu_read_lock(), see |
1200 | * release_task()->call_rcu(delayed_put_task_struct). | 1203 | * release_task()->call_rcu(delayed_put_task_struct). |
1201 | */ | 1204 | */ |
1202 | SYSCALL_DEFINE0(getppid) | 1205 | SYSCALL_DEFINE0(getppid) |
1203 | { | 1206 | { |
1204 | int pid; | 1207 | int pid; |
1205 | 1208 | ||
1206 | rcu_read_lock(); | 1209 | rcu_read_lock(); |
1207 | pid = task_tgid_vnr(current->real_parent); | 1210 | pid = task_tgid_vnr(current->real_parent); |
1208 | rcu_read_unlock(); | 1211 | rcu_read_unlock(); |
1209 | 1212 | ||
1210 | return pid; | 1213 | return pid; |
1211 | } | 1214 | } |
1212 | 1215 | ||
1213 | SYSCALL_DEFINE0(getuid) | 1216 | SYSCALL_DEFINE0(getuid) |
1214 | { | 1217 | { |
1215 | /* Only we change this so SMP safe */ | 1218 | /* Only we change this so SMP safe */ |
1216 | return current_uid(); | 1219 | return current_uid(); |
1217 | } | 1220 | } |
1218 | 1221 | ||
1219 | SYSCALL_DEFINE0(geteuid) | 1222 | SYSCALL_DEFINE0(geteuid) |
1220 | { | 1223 | { |
1221 | /* Only we change this so SMP safe */ | 1224 | /* Only we change this so SMP safe */ |
1222 | return current_euid(); | 1225 | return current_euid(); |
1223 | } | 1226 | } |
1224 | 1227 | ||
1225 | SYSCALL_DEFINE0(getgid) | 1228 | SYSCALL_DEFINE0(getgid) |
1226 | { | 1229 | { |
1227 | /* Only we change this so SMP safe */ | 1230 | /* Only we change this so SMP safe */ |
1228 | return current_gid(); | 1231 | return current_gid(); |
1229 | } | 1232 | } |
1230 | 1233 | ||
1231 | SYSCALL_DEFINE0(getegid) | 1234 | SYSCALL_DEFINE0(getegid) |
1232 | { | 1235 | { |
1233 | /* Only we change this so SMP safe */ | 1236 | /* Only we change this so SMP safe */ |
1234 | return current_egid(); | 1237 | return current_egid(); |
1235 | } | 1238 | } |
1236 | 1239 | ||
1237 | #endif | 1240 | #endif |
1238 | 1241 | ||
1239 | static void process_timeout(unsigned long __data) | 1242 | static void process_timeout(unsigned long __data) |
1240 | { | 1243 | { |
1241 | wake_up_process((struct task_struct *)__data); | 1244 | wake_up_process((struct task_struct *)__data); |
1242 | } | 1245 | } |
1243 | 1246 | ||
1244 | /** | 1247 | /** |
1245 | * schedule_timeout - sleep until timeout | 1248 | * schedule_timeout - sleep until timeout |
1246 | * @timeout: timeout value in jiffies | 1249 | * @timeout: timeout value in jiffies |
1247 | * | 1250 | * |
1248 | * Make the current task sleep until @timeout jiffies have | 1251 | * Make the current task sleep until @timeout jiffies have |
1249 | * elapsed. The routine will return immediately unless | 1252 | * elapsed. The routine will return immediately unless |
1250 | * the current task state has been set (see set_current_state()). | 1253 | * the current task state has been set (see set_current_state()). |
1251 | * | 1254 | * |
1252 | * You can set the task state as follows - | 1255 | * You can set the task state as follows - |
1253 | * | 1256 | * |
1254 | * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to | 1257 | * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to |
1255 | * pass before the routine returns. The routine will return 0 | 1258 | * pass before the routine returns. The routine will return 0 |
1256 | * | 1259 | * |
1257 | * %TASK_INTERRUPTIBLE - the routine may return early if a signal is | 1260 | * %TASK_INTERRUPTIBLE - the routine may return early if a signal is |
1258 | * delivered to the current task. In this case the remaining time | 1261 | * delivered to the current task. In this case the remaining time |
1259 | * in jiffies will be returned, or 0 if the timer expired in time | 1262 | * in jiffies will be returned, or 0 if the timer expired in time |
1260 | * | 1263 | * |
1261 | * The current task state is guaranteed to be TASK_RUNNING when this | 1264 | * The current task state is guaranteed to be TASK_RUNNING when this |
1262 | * routine returns. | 1265 | * routine returns. |
1263 | * | 1266 | * |
1264 | * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule | 1267 | * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule |
1265 | * the CPU away without a bound on the timeout. In this case the return | 1268 | * the CPU away without a bound on the timeout. In this case the return |
1266 | * value will be %MAX_SCHEDULE_TIMEOUT. | 1269 | * value will be %MAX_SCHEDULE_TIMEOUT. |
1267 | * | 1270 | * |
1268 | * In all cases the return value is guaranteed to be non-negative. | 1271 | * In all cases the return value is guaranteed to be non-negative. |
1269 | */ | 1272 | */ |
1270 | signed long __sched schedule_timeout(signed long timeout) | 1273 | signed long __sched schedule_timeout(signed long timeout) |
1271 | { | 1274 | { |
1272 | struct timer_list timer; | 1275 | struct timer_list timer; |
1273 | unsigned long expire; | 1276 | unsigned long expire; |
1274 | 1277 | ||
1275 | switch (timeout) | 1278 | switch (timeout) |
1276 | { | 1279 | { |
1277 | case MAX_SCHEDULE_TIMEOUT: | 1280 | case MAX_SCHEDULE_TIMEOUT: |
1278 | /* | 1281 | /* |
1279 | * These two special cases are useful to be comfortable | 1282 | * These two special cases are useful to be comfortable |
1280 | * in the caller. Nothing more. We could take | 1283 | * in the caller. Nothing more. We could take |
1281 | * MAX_SCHEDULE_TIMEOUT from one of the negative value | 1284 | * MAX_SCHEDULE_TIMEOUT from one of the negative value |
1282 | * but I' d like to return a valid offset (>=0) to allow | 1285 | * but I' d like to return a valid offset (>=0) to allow |
1283 | * the caller to do everything it want with the retval. | 1286 | * the caller to do everything it want with the retval. |
1284 | */ | 1287 | */ |
1285 | schedule(); | 1288 | schedule(); |
1286 | goto out; | 1289 | goto out; |
1287 | default: | 1290 | default: |
1288 | /* | 1291 | /* |
1289 | * Another bit of PARANOID. Note that the retval will be | 1292 | * Another bit of PARANOID. Note that the retval will be |
1290 | * 0 since no piece of kernel is supposed to do a check | 1293 | * 0 since no piece of kernel is supposed to do a check |
1291 | * for a negative retval of schedule_timeout() (since it | 1294 | * for a negative retval of schedule_timeout() (since it |
1292 | * should never happens anyway). You just have the printk() | 1295 | * should never happens anyway). You just have the printk() |
1293 | * that will tell you if something is gone wrong and where. | 1296 | * that will tell you if something is gone wrong and where. |
1294 | */ | 1297 | */ |
1295 | if (timeout < 0) { | 1298 | if (timeout < 0) { |
1296 | printk(KERN_ERR "schedule_timeout: wrong timeout " | 1299 | printk(KERN_ERR "schedule_timeout: wrong timeout " |
1297 | "value %lx\n", timeout); | 1300 | "value %lx\n", timeout); |
1298 | dump_stack(); | 1301 | dump_stack(); |
1299 | current->state = TASK_RUNNING; | 1302 | current->state = TASK_RUNNING; |
1300 | goto out; | 1303 | goto out; |
1301 | } | 1304 | } |
1302 | } | 1305 | } |
1303 | 1306 | ||
1304 | expire = timeout + jiffies; | 1307 | expire = timeout + jiffies; |
1305 | 1308 | ||
1306 | setup_timer_on_stack(&timer, process_timeout, (unsigned long)current); | 1309 | setup_timer_on_stack(&timer, process_timeout, (unsigned long)current); |
1307 | __mod_timer(&timer, expire, false); | 1310 | __mod_timer(&timer, expire, false); |
1308 | schedule(); | 1311 | schedule(); |
1309 | del_singleshot_timer_sync(&timer); | 1312 | del_singleshot_timer_sync(&timer); |
1310 | 1313 | ||
1311 | /* Remove the timer from the object tracker */ | 1314 | /* Remove the timer from the object tracker */ |
1312 | destroy_timer_on_stack(&timer); | 1315 | destroy_timer_on_stack(&timer); |
1313 | 1316 | ||
1314 | timeout = expire - jiffies; | 1317 | timeout = expire - jiffies; |
1315 | 1318 | ||
1316 | out: | 1319 | out: |
1317 | return timeout < 0 ? 0 : timeout; | 1320 | return timeout < 0 ? 0 : timeout; |
1318 | } | 1321 | } |
1319 | EXPORT_SYMBOL(schedule_timeout); | 1322 | EXPORT_SYMBOL(schedule_timeout); |
1320 | 1323 | ||
1321 | /* | 1324 | /* |
1322 | * We can use __set_current_state() here because schedule_timeout() calls | 1325 | * We can use __set_current_state() here because schedule_timeout() calls |
1323 | * schedule() unconditionally. | 1326 | * schedule() unconditionally. |
1324 | */ | 1327 | */ |
1325 | signed long __sched schedule_timeout_interruptible(signed long timeout) | 1328 | signed long __sched schedule_timeout_interruptible(signed long timeout) |
1326 | { | 1329 | { |
1327 | __set_current_state(TASK_INTERRUPTIBLE); | 1330 | __set_current_state(TASK_INTERRUPTIBLE); |
1328 | return schedule_timeout(timeout); | 1331 | return schedule_timeout(timeout); |
1329 | } | 1332 | } |
1330 | EXPORT_SYMBOL(schedule_timeout_interruptible); | 1333 | EXPORT_SYMBOL(schedule_timeout_interruptible); |
1331 | 1334 | ||
1332 | signed long __sched schedule_timeout_killable(signed long timeout) | 1335 | signed long __sched schedule_timeout_killable(signed long timeout) |
1333 | { | 1336 | { |
1334 | __set_current_state(TASK_KILLABLE); | 1337 | __set_current_state(TASK_KILLABLE); |
1335 | return schedule_timeout(timeout); | 1338 | return schedule_timeout(timeout); |
1336 | } | 1339 | } |
1337 | EXPORT_SYMBOL(schedule_timeout_killable); | 1340 | EXPORT_SYMBOL(schedule_timeout_killable); |
1338 | 1341 | ||
1339 | signed long __sched schedule_timeout_uninterruptible(signed long timeout) | 1342 | signed long __sched schedule_timeout_uninterruptible(signed long timeout) |
1340 | { | 1343 | { |
1341 | __set_current_state(TASK_UNINTERRUPTIBLE); | 1344 | __set_current_state(TASK_UNINTERRUPTIBLE); |
1342 | return schedule_timeout(timeout); | 1345 | return schedule_timeout(timeout); |
1343 | } | 1346 | } |
1344 | EXPORT_SYMBOL(schedule_timeout_uninterruptible); | 1347 | EXPORT_SYMBOL(schedule_timeout_uninterruptible); |
1345 | 1348 | ||
1346 | /* Thread ID - the internal kernel "pid" */ | 1349 | /* Thread ID - the internal kernel "pid" */ |
1347 | SYSCALL_DEFINE0(gettid) | 1350 | SYSCALL_DEFINE0(gettid) |
1348 | { | 1351 | { |
1349 | return task_pid_vnr(current); | 1352 | return task_pid_vnr(current); |
1350 | } | 1353 | } |
1351 | 1354 | ||
1352 | /** | 1355 | /** |
1353 | * do_sysinfo - fill in sysinfo struct | 1356 | * do_sysinfo - fill in sysinfo struct |
1354 | * @info: pointer to buffer to fill | 1357 | * @info: pointer to buffer to fill |
1355 | */ | 1358 | */ |
1356 | int do_sysinfo(struct sysinfo *info) | 1359 | int do_sysinfo(struct sysinfo *info) |
1357 | { | 1360 | { |
1358 | unsigned long mem_total, sav_total; | 1361 | unsigned long mem_total, sav_total; |
1359 | unsigned int mem_unit, bitcount; | 1362 | unsigned int mem_unit, bitcount; |
1360 | unsigned long seq; | 1363 | unsigned long seq; |
1361 | 1364 | ||
1362 | memset(info, 0, sizeof(struct sysinfo)); | 1365 | memset(info, 0, sizeof(struct sysinfo)); |
1363 | 1366 | ||
1364 | do { | 1367 | do { |
1365 | struct timespec tp; | 1368 | struct timespec tp; |
1366 | seq = read_seqbegin(&xtime_lock); | 1369 | seq = read_seqbegin(&xtime_lock); |
1367 | 1370 | ||
1368 | /* | 1371 | /* |
1369 | * This is annoying. The below is the same thing | 1372 | * This is annoying. The below is the same thing |
1370 | * posix_get_clock_monotonic() does, but it wants to | 1373 | * posix_get_clock_monotonic() does, but it wants to |
1371 | * take the lock which we want to cover the loads stuff | 1374 | * take the lock which we want to cover the loads stuff |
1372 | * too. | 1375 | * too. |
1373 | */ | 1376 | */ |
1374 | 1377 | ||
1375 | getnstimeofday(&tp); | 1378 | getnstimeofday(&tp); |
1376 | tp.tv_sec += wall_to_monotonic.tv_sec; | 1379 | tp.tv_sec += wall_to_monotonic.tv_sec; |
1377 | tp.tv_nsec += wall_to_monotonic.tv_nsec; | 1380 | tp.tv_nsec += wall_to_monotonic.tv_nsec; |
1378 | monotonic_to_bootbased(&tp); | 1381 | monotonic_to_bootbased(&tp); |
1379 | if (tp.tv_nsec - NSEC_PER_SEC >= 0) { | 1382 | if (tp.tv_nsec - NSEC_PER_SEC >= 0) { |
1380 | tp.tv_nsec = tp.tv_nsec - NSEC_PER_SEC; | 1383 | tp.tv_nsec = tp.tv_nsec - NSEC_PER_SEC; |
1381 | tp.tv_sec++; | 1384 | tp.tv_sec++; |
1382 | } | 1385 | } |
1383 | info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0); | 1386 | info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0); |
1384 | 1387 | ||
1385 | info->loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT); | 1388 | info->loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT); |
1386 | info->loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT); | 1389 | info->loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT); |
1387 | info->loads[2] = avenrun[2] << (SI_LOAD_SHIFT - FSHIFT); | 1390 | info->loads[2] = avenrun[2] << (SI_LOAD_SHIFT - FSHIFT); |
1388 | 1391 | ||
1389 | info->procs = nr_threads; | 1392 | info->procs = nr_threads; |
1390 | } while (read_seqretry(&xtime_lock, seq)); | 1393 | } while (read_seqretry(&xtime_lock, seq)); |
1391 | 1394 | ||
1392 | si_meminfo(info); | 1395 | si_meminfo(info); |
1393 | si_swapinfo(info); | 1396 | si_swapinfo(info); |
1394 | 1397 | ||
1395 | /* | 1398 | /* |
1396 | * If the sum of all the available memory (i.e. ram + swap) | 1399 | * If the sum of all the available memory (i.e. ram + swap) |
1397 | * is less than can be stored in a 32 bit unsigned long then | 1400 | * is less than can be stored in a 32 bit unsigned long then |
1398 | * we can be binary compatible with 2.2.x kernels. If not, | 1401 | * we can be binary compatible with 2.2.x kernels. If not, |
1399 | * well, in that case 2.2.x was broken anyways... | 1402 | * well, in that case 2.2.x was broken anyways... |
1400 | * | 1403 | * |
1401 | * -Erik Andersen <andersee@debian.org> | 1404 | * -Erik Andersen <andersee@debian.org> |
1402 | */ | 1405 | */ |
1403 | 1406 | ||
1404 | mem_total = info->totalram + info->totalswap; | 1407 | mem_total = info->totalram + info->totalswap; |
1405 | if (mem_total < info->totalram || mem_total < info->totalswap) | 1408 | if (mem_total < info->totalram || mem_total < info->totalswap) |
1406 | goto out; | 1409 | goto out; |
1407 | bitcount = 0; | 1410 | bitcount = 0; |
1408 | mem_unit = info->mem_unit; | 1411 | mem_unit = info->mem_unit; |
1409 | while (mem_unit > 1) { | 1412 | while (mem_unit > 1) { |
1410 | bitcount++; | 1413 | bitcount++; |
1411 | mem_unit >>= 1; | 1414 | mem_unit >>= 1; |
1412 | sav_total = mem_total; | 1415 | sav_total = mem_total; |
1413 | mem_total <<= 1; | 1416 | mem_total <<= 1; |
1414 | if (mem_total < sav_total) | 1417 | if (mem_total < sav_total) |
1415 | goto out; | 1418 | goto out; |
1416 | } | 1419 | } |
1417 | 1420 | ||
1418 | /* | 1421 | /* |
1419 | * If mem_total did not overflow, multiply all memory values by | 1422 | * If mem_total did not overflow, multiply all memory values by |
1420 | * info->mem_unit and set it to 1. This leaves things compatible | 1423 | * info->mem_unit and set it to 1. This leaves things compatible |
1421 | * with 2.2.x, and also retains compatibility with earlier 2.4.x | 1424 | * with 2.2.x, and also retains compatibility with earlier 2.4.x |
1422 | * kernels... | 1425 | * kernels... |
1423 | */ | 1426 | */ |
1424 | 1427 | ||
1425 | info->mem_unit = 1; | 1428 | info->mem_unit = 1; |
1426 | info->totalram <<= bitcount; | 1429 | info->totalram <<= bitcount; |
1427 | info->freeram <<= bitcount; | 1430 | info->freeram <<= bitcount; |
1428 | info->sharedram <<= bitcount; | 1431 | info->sharedram <<= bitcount; |
1429 | info->bufferram <<= bitcount; | 1432 | info->bufferram <<= bitcount; |
1430 | info->totalswap <<= bitcount; | 1433 | info->totalswap <<= bitcount; |
1431 | info->freeswap <<= bitcount; | 1434 | info->freeswap <<= bitcount; |
1432 | info->totalhigh <<= bitcount; | 1435 | info->totalhigh <<= bitcount; |
1433 | info->freehigh <<= bitcount; | 1436 | info->freehigh <<= bitcount; |
1434 | 1437 | ||
1435 | out: | 1438 | out: |
1436 | return 0; | 1439 | return 0; |
1437 | } | 1440 | } |
1438 | 1441 | ||
1439 | SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info) | 1442 | SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info) |
1440 | { | 1443 | { |
1441 | struct sysinfo val; | 1444 | struct sysinfo val; |
1442 | 1445 | ||
1443 | do_sysinfo(&val); | 1446 | do_sysinfo(&val); |
1444 | 1447 | ||
1445 | if (copy_to_user(info, &val, sizeof(struct sysinfo))) | 1448 | if (copy_to_user(info, &val, sizeof(struct sysinfo))) |
1446 | return -EFAULT; | 1449 | return -EFAULT; |
1447 | 1450 | ||
1448 | return 0; | 1451 | return 0; |
1449 | } | 1452 | } |
1450 | 1453 | ||
1451 | static int __cpuinit init_timers_cpu(int cpu) | 1454 | static int __cpuinit init_timers_cpu(int cpu) |
1452 | { | 1455 | { |
1453 | int j; | 1456 | int j; |
1454 | struct tvec_base *base; | 1457 | struct tvec_base *base; |
1455 | static char __cpuinitdata tvec_base_done[NR_CPUS]; | 1458 | static char __cpuinitdata tvec_base_done[NR_CPUS]; |
1456 | 1459 | ||
1457 | if (!tvec_base_done[cpu]) { | 1460 | if (!tvec_base_done[cpu]) { |
1458 | static char boot_done; | 1461 | static char boot_done; |
1459 | 1462 | ||
1460 | if (boot_done) { | 1463 | if (boot_done) { |
1461 | /* | 1464 | /* |
1462 | * The APs use this path later in boot | 1465 | * The APs use this path later in boot |
1463 | */ | 1466 | */ |
1464 | base = kmalloc_node(sizeof(*base), | 1467 | base = kmalloc_node(sizeof(*base), |
1465 | GFP_KERNEL | __GFP_ZERO, | 1468 | GFP_KERNEL | __GFP_ZERO, |
1466 | cpu_to_node(cpu)); | 1469 | cpu_to_node(cpu)); |
1467 | if (!base) | 1470 | if (!base) |
1468 | return -ENOMEM; | 1471 | return -ENOMEM; |
1469 | 1472 | ||
1470 | /* Make sure that tvec_base is 2 byte aligned */ | 1473 | /* Make sure that tvec_base is 2 byte aligned */ |
1471 | if (tbase_get_deferrable(base)) { | 1474 | if (tbase_get_deferrable(base)) { |
1472 | WARN_ON(1); | 1475 | WARN_ON(1); |
1473 | kfree(base); | 1476 | kfree(base); |
1474 | return -ENOMEM; | 1477 | return -ENOMEM; |
1475 | } | 1478 | } |
1476 | per_cpu(tvec_bases, cpu) = base; | 1479 | per_cpu(tvec_bases, cpu) = base; |
1477 | } else { | 1480 | } else { |
1478 | /* | 1481 | /* |
1479 | * This is for the boot CPU - we use compile-time | 1482 | * This is for the boot CPU - we use compile-time |
1480 | * static initialisation because per-cpu memory isn't | 1483 | * static initialisation because per-cpu memory isn't |
1481 | * ready yet and because the memory allocators are not | 1484 | * ready yet and because the memory allocators are not |
1482 | * initialised either. | 1485 | * initialised either. |
1483 | */ | 1486 | */ |
1484 | boot_done = 1; | 1487 | boot_done = 1; |
1485 | base = &boot_tvec_bases; | 1488 | base = &boot_tvec_bases; |
1486 | } | 1489 | } |
1487 | tvec_base_done[cpu] = 1; | 1490 | tvec_base_done[cpu] = 1; |
1488 | } else { | 1491 | } else { |
1489 | base = per_cpu(tvec_bases, cpu); | 1492 | base = per_cpu(tvec_bases, cpu); |
1490 | } | 1493 | } |
1491 | 1494 | ||
1492 | spin_lock_init(&base->lock); | 1495 | spin_lock_init(&base->lock); |
1493 | 1496 | ||
1494 | for (j = 0; j < TVN_SIZE; j++) { | 1497 | for (j = 0; j < TVN_SIZE; j++) { |
1495 | INIT_LIST_HEAD(base->tv5.vec + j); | 1498 | INIT_LIST_HEAD(base->tv5.vec + j); |
1496 | INIT_LIST_HEAD(base->tv4.vec + j); | 1499 | INIT_LIST_HEAD(base->tv4.vec + j); |
1497 | INIT_LIST_HEAD(base->tv3.vec + j); | 1500 | INIT_LIST_HEAD(base->tv3.vec + j); |
1498 | INIT_LIST_HEAD(base->tv2.vec + j); | 1501 | INIT_LIST_HEAD(base->tv2.vec + j); |
1499 | } | 1502 | } |
1500 | for (j = 0; j < TVR_SIZE; j++) | 1503 | for (j = 0; j < TVR_SIZE; j++) |
1501 | INIT_LIST_HEAD(base->tv1.vec + j); | 1504 | INIT_LIST_HEAD(base->tv1.vec + j); |
1502 | 1505 | ||
1503 | base->timer_jiffies = jiffies; | 1506 | base->timer_jiffies = jiffies; |
1504 | return 0; | 1507 | return 0; |
1505 | } | 1508 | } |
1506 | 1509 | ||
1507 | #ifdef CONFIG_HOTPLUG_CPU | 1510 | #ifdef CONFIG_HOTPLUG_CPU |
1508 | static void migrate_timer_list(struct tvec_base *new_base, struct list_head *head) | 1511 | static void migrate_timer_list(struct tvec_base *new_base, struct list_head *head) |
1509 | { | 1512 | { |
1510 | struct timer_list *timer; | 1513 | struct timer_list *timer; |
1511 | 1514 | ||
1512 | while (!list_empty(head)) { | 1515 | while (!list_empty(head)) { |
1513 | timer = list_first_entry(head, struct timer_list, entry); | 1516 | timer = list_first_entry(head, struct timer_list, entry); |
1514 | detach_timer(timer, 0); | 1517 | detach_timer(timer, 0); |
1515 | timer_set_base(timer, new_base); | 1518 | timer_set_base(timer, new_base); |
1516 | internal_add_timer(new_base, timer); | 1519 | internal_add_timer(new_base, timer); |
1517 | } | 1520 | } |
1518 | } | 1521 | } |
1519 | 1522 | ||
1520 | static void __cpuinit migrate_timers(int cpu) | 1523 | static void __cpuinit migrate_timers(int cpu) |
1521 | { | 1524 | { |
1522 | struct tvec_base *old_base; | 1525 | struct tvec_base *old_base; |
1523 | struct tvec_base *new_base; | 1526 | struct tvec_base *new_base; |
1524 | int i; | 1527 | int i; |
1525 | 1528 | ||
1526 | BUG_ON(cpu_online(cpu)); | 1529 | BUG_ON(cpu_online(cpu)); |
1527 | old_base = per_cpu(tvec_bases, cpu); | 1530 | old_base = per_cpu(tvec_bases, cpu); |
1528 | new_base = get_cpu_var(tvec_bases); | 1531 | new_base = get_cpu_var(tvec_bases); |
1529 | /* | 1532 | /* |
1530 | * The caller is globally serialized and nobody else | 1533 | * The caller is globally serialized and nobody else |
1531 | * takes two locks at once, deadlock is not possible. | 1534 | * takes two locks at once, deadlock is not possible. |
1532 | */ | 1535 | */ |
1533 | spin_lock_irq(&new_base->lock); | 1536 | spin_lock_irq(&new_base->lock); |
1534 | spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); | 1537 | spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); |
1535 | 1538 | ||
1536 | BUG_ON(old_base->running_timer); | 1539 | BUG_ON(old_base->running_timer); |
1537 | 1540 | ||
1538 | for (i = 0; i < TVR_SIZE; i++) | 1541 | for (i = 0; i < TVR_SIZE; i++) |
1539 | migrate_timer_list(new_base, old_base->tv1.vec + i); | 1542 | migrate_timer_list(new_base, old_base->tv1.vec + i); |
1540 | for (i = 0; i < TVN_SIZE; i++) { | 1543 | for (i = 0; i < TVN_SIZE; i++) { |
1541 | migrate_timer_list(new_base, old_base->tv2.vec + i); | 1544 | migrate_timer_list(new_base, old_base->tv2.vec + i); |
1542 | migrate_timer_list(new_base, old_base->tv3.vec + i); | 1545 | migrate_timer_list(new_base, old_base->tv3.vec + i); |
1543 | migrate_timer_list(new_base, old_base->tv4.vec + i); | 1546 | migrate_timer_list(new_base, old_base->tv4.vec + i); |
1544 | migrate_timer_list(new_base, old_base->tv5.vec + i); | 1547 | migrate_timer_list(new_base, old_base->tv5.vec + i); |
1545 | } | 1548 | } |
1546 | 1549 | ||
1547 | spin_unlock(&old_base->lock); | 1550 | spin_unlock(&old_base->lock); |
1548 | spin_unlock_irq(&new_base->lock); | 1551 | spin_unlock_irq(&new_base->lock); |
1549 | put_cpu_var(tvec_bases); | 1552 | put_cpu_var(tvec_bases); |
1550 | } | 1553 | } |
1551 | #endif /* CONFIG_HOTPLUG_CPU */ | 1554 | #endif /* CONFIG_HOTPLUG_CPU */ |
1552 | 1555 | ||
1553 | static int __cpuinit timer_cpu_notify(struct notifier_block *self, | 1556 | static int __cpuinit timer_cpu_notify(struct notifier_block *self, |
1554 | unsigned long action, void *hcpu) | 1557 | unsigned long action, void *hcpu) |
1555 | { | 1558 | { |
1556 | long cpu = (long)hcpu; | 1559 | long cpu = (long)hcpu; |
1557 | switch(action) { | 1560 | switch(action) { |
1558 | case CPU_UP_PREPARE: | 1561 | case CPU_UP_PREPARE: |
1559 | case CPU_UP_PREPARE_FROZEN: | 1562 | case CPU_UP_PREPARE_FROZEN: |
1560 | if (init_timers_cpu(cpu) < 0) | 1563 | if (init_timers_cpu(cpu) < 0) |
1561 | return NOTIFY_BAD; | 1564 | return NOTIFY_BAD; |
1562 | break; | 1565 | break; |
1563 | #ifdef CONFIG_HOTPLUG_CPU | 1566 | #ifdef CONFIG_HOTPLUG_CPU |
1564 | case CPU_DEAD: | 1567 | case CPU_DEAD: |
1565 | case CPU_DEAD_FROZEN: | 1568 | case CPU_DEAD_FROZEN: |
1566 | migrate_timers(cpu); | 1569 | migrate_timers(cpu); |
1567 | break; | 1570 | break; |
1568 | #endif | 1571 | #endif |
1569 | default: | 1572 | default: |
1570 | break; | 1573 | break; |
1571 | } | 1574 | } |
1572 | return NOTIFY_OK; | 1575 | return NOTIFY_OK; |
1573 | } | 1576 | } |
1574 | 1577 | ||
1575 | static struct notifier_block __cpuinitdata timers_nb = { | 1578 | static struct notifier_block __cpuinitdata timers_nb = { |
1576 | .notifier_call = timer_cpu_notify, | 1579 | .notifier_call = timer_cpu_notify, |
1577 | }; | 1580 | }; |
1578 | 1581 | ||
1579 | 1582 | ||
1580 | void __init init_timers(void) | 1583 | void __init init_timers(void) |
1581 | { | 1584 | { |
1582 | int err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE, | 1585 | int err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE, |
1583 | (void *)(long)smp_processor_id()); | 1586 | (void *)(long)smp_processor_id()); |
1584 | 1587 | ||
1585 | init_timer_stats(); | 1588 | init_timer_stats(); |
1586 | 1589 | ||
1587 | BUG_ON(err == NOTIFY_BAD); | 1590 | BUG_ON(err == NOTIFY_BAD); |
1588 | register_cpu_notifier(&timers_nb); | 1591 | register_cpu_notifier(&timers_nb); |
1589 | open_softirq(TIMER_SOFTIRQ, run_timer_softirq); | 1592 | open_softirq(TIMER_SOFTIRQ, run_timer_softirq); |
1590 | } | 1593 | } |
1591 | 1594 | ||
1592 | /** | 1595 | /** |
1593 | * msleep - sleep safely even with waitqueue interruptions | 1596 | * msleep - sleep safely even with waitqueue interruptions |
1594 | * @msecs: Time in milliseconds to sleep for | 1597 | * @msecs: Time in milliseconds to sleep for |
1595 | */ | 1598 | */ |
1596 | void msleep(unsigned int msecs) | 1599 | void msleep(unsigned int msecs) |
1597 | { | 1600 | { |
1598 | unsigned long timeout = msecs_to_jiffies(msecs) + 1; | 1601 | unsigned long timeout = msecs_to_jiffies(msecs) + 1; |
1599 | 1602 | ||
1600 | while (timeout) | 1603 | while (timeout) |
1601 | timeout = schedule_timeout_uninterruptible(timeout); | 1604 | timeout = schedule_timeout_uninterruptible(timeout); |
1602 | } | 1605 | } |
1603 | 1606 | ||
1604 | EXPORT_SYMBOL(msleep); | 1607 | EXPORT_SYMBOL(msleep); |
1605 | 1608 | ||
1606 | /** | 1609 | /** |
1607 | * msleep_interruptible - sleep waiting for signals | 1610 | * msleep_interruptible - sleep waiting for signals |
1608 | * @msecs: Time in milliseconds to sleep for | 1611 | * @msecs: Time in milliseconds to sleep for |
1609 | */ | 1612 | */ |
1610 | unsigned long msleep_interruptible(unsigned int msecs) | 1613 | unsigned long msleep_interruptible(unsigned int msecs) |
1611 | { | 1614 | { |
1612 | unsigned long timeout = msecs_to_jiffies(msecs) + 1; | 1615 | unsigned long timeout = msecs_to_jiffies(msecs) + 1; |
1613 | 1616 | ||
1614 | while (timeout && !signal_pending(current)) | 1617 | while (timeout && !signal_pending(current)) |
1615 | timeout = schedule_timeout_interruptible(timeout); | 1618 | timeout = schedule_timeout_interruptible(timeout); |
1616 | return jiffies_to_msecs(timeout); | 1619 | return jiffies_to_msecs(timeout); |
1617 | } | 1620 | } |
1618 | 1621 | ||
1619 | EXPORT_SYMBOL(msleep_interruptible); | 1622 | EXPORT_SYMBOL(msleep_interruptible); |
1620 | 1623 |