Commit 3691c5199e8a4be1c7a91b5ab925db5feb866e19
Committed by
Linus Torvalds
1 parent
5ce29646eb
Exists in
master
and in
39 other branches
[PATCH] kill __init_timer_base in favor of boot_tvec_bases
Commit a4a6198b80cf82eb8160603c98da218d1bd5e104: [PATCH] tvec_bases too large for per-cpu data introduced "struct tvec_t_base_s boot_tvec_bases" which is visible at compile time. This means we can kill __init_timer_base and move timer_base_s's content into tvec_t_base_s. Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Showing 2 changed files with 39 additions and 53 deletions Side-by-side Diff
include/linux/timer.h
... | ... | @@ -6,7 +6,7 @@ |
6 | 6 | #include <linux/spinlock.h> |
7 | 7 | #include <linux/stddef.h> |
8 | 8 | |
9 | -struct timer_base_s; | |
9 | +struct tvec_t_base_s; | |
10 | 10 | |
11 | 11 | struct timer_list { |
12 | 12 | struct list_head entry; |
13 | 13 | |
14 | 14 | |
... | ... | @@ -15,16 +15,16 @@ |
15 | 15 | void (*function)(unsigned long); |
16 | 16 | unsigned long data; |
17 | 17 | |
18 | - struct timer_base_s *base; | |
18 | + struct tvec_t_base_s *base; | |
19 | 19 | }; |
20 | 20 | |
21 | -extern struct timer_base_s __init_timer_base; | |
21 | +extern struct tvec_t_base_s boot_tvec_bases; | |
22 | 22 | |
23 | 23 | #define TIMER_INITIALIZER(_function, _expires, _data) { \ |
24 | 24 | .function = (_function), \ |
25 | 25 | .expires = (_expires), \ |
26 | 26 | .data = (_data), \ |
27 | - .base = &__init_timer_base, \ | |
27 | + .base = &boot_tvec_bases, \ | |
28 | 28 | } |
29 | 29 | |
30 | 30 | #define DEFINE_TIMER(_name, _function, _expires, _data) \ |
kernel/timer.c
... | ... | @@ -54,7 +54,6 @@ |
54 | 54 | /* |
55 | 55 | * per-CPU timer vector definitions: |
56 | 56 | */ |
57 | - | |
58 | 57 | #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6) |
59 | 58 | #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8) |
60 | 59 | #define TVN_SIZE (1 << TVN_BITS) |
... | ... | @@ -62,11 +61,6 @@ |
62 | 61 | #define TVN_MASK (TVN_SIZE - 1) |
63 | 62 | #define TVR_MASK (TVR_SIZE - 1) |
64 | 63 | |
65 | -struct timer_base_s { | |
66 | - spinlock_t lock; | |
67 | - struct timer_list *running_timer; | |
68 | -}; | |
69 | - | |
70 | 64 | typedef struct tvec_s { |
71 | 65 | struct list_head vec[TVN_SIZE]; |
72 | 66 | } tvec_t; |
... | ... | @@ -76,7 +70,8 @@ |
76 | 70 | } tvec_root_t; |
77 | 71 | |
78 | 72 | struct tvec_t_base_s { |
79 | - struct timer_base_s t_base; | |
73 | + spinlock_t lock; | |
74 | + struct timer_list *running_timer; | |
80 | 75 | unsigned long timer_jiffies; |
81 | 76 | tvec_root_t tv1; |
82 | 77 | tvec_t tv2; |
83 | 78 | |
... | ... | @@ -87,13 +82,14 @@ |
87 | 82 | |
88 | 83 | typedef struct tvec_t_base_s tvec_base_t; |
89 | 84 | static DEFINE_PER_CPU(tvec_base_t *, tvec_bases); |
90 | -static tvec_base_t boot_tvec_bases; | |
85 | +tvec_base_t boot_tvec_bases; | |
86 | +EXPORT_SYMBOL(boot_tvec_bases); | |
91 | 87 | |
92 | 88 | static inline void set_running_timer(tvec_base_t *base, |
93 | 89 | struct timer_list *timer) |
94 | 90 | { |
95 | 91 | #ifdef CONFIG_SMP |
96 | - base->t_base.running_timer = timer; | |
92 | + base->running_timer = timer; | |
97 | 93 | #endif |
98 | 94 | } |
99 | 95 | |
... | ... | @@ -139,15 +135,6 @@ |
139 | 135 | list_add_tail(&timer->entry, vec); |
140 | 136 | } |
141 | 137 | |
142 | -typedef struct timer_base_s timer_base_t; | |
143 | -/* | |
144 | - * Used by TIMER_INITIALIZER, we can't use per_cpu(tvec_bases) | |
145 | - * at compile time, and we need timer->base to lock the timer. | |
146 | - */ | |
147 | -timer_base_t __init_timer_base | |
148 | - ____cacheline_aligned_in_smp = { .lock = SPIN_LOCK_UNLOCKED }; | |
149 | -EXPORT_SYMBOL(__init_timer_base); | |
150 | - | |
151 | 138 | /*** |
152 | 139 | * init_timer - initialize a timer. |
153 | 140 | * @timer: the timer to be initialized |
... | ... | @@ -158,7 +145,7 @@ |
158 | 145 | void fastcall init_timer(struct timer_list *timer) |
159 | 146 | { |
160 | 147 | timer->entry.next = NULL; |
161 | - timer->base = &per_cpu(tvec_bases, raw_smp_processor_id())->t_base; | |
148 | + timer->base = per_cpu(tvec_bases, raw_smp_processor_id()); | |
162 | 149 | } |
163 | 150 | EXPORT_SYMBOL(init_timer); |
164 | 151 | |
... | ... | @@ -174,7 +161,7 @@ |
174 | 161 | } |
175 | 162 | |
176 | 163 | /* |
177 | - * We are using hashed locking: holding per_cpu(tvec_bases).t_base.lock | |
164 | + * We are using hashed locking: holding per_cpu(tvec_bases).lock | |
178 | 165 | * means that all timers which are tied to this base via timer->base are |
179 | 166 | * locked, and the base itself is locked too. |
180 | 167 | * |
181 | 168 | |
... | ... | @@ -185,10 +172,10 @@ |
185 | 172 | * possible to set timer->base = NULL and drop the lock: the timer remains |
186 | 173 | * locked. |
187 | 174 | */ |
188 | -static timer_base_t *lock_timer_base(struct timer_list *timer, | |
175 | +static tvec_base_t *lock_timer_base(struct timer_list *timer, | |
189 | 176 | unsigned long *flags) |
190 | 177 | { |
191 | - timer_base_t *base; | |
178 | + tvec_base_t *base; | |
192 | 179 | |
193 | 180 | for (;;) { |
194 | 181 | base = timer->base; |
... | ... | @@ -205,8 +192,7 @@ |
205 | 192 | |
206 | 193 | int __mod_timer(struct timer_list *timer, unsigned long expires) |
207 | 194 | { |
208 | - timer_base_t *base; | |
209 | - tvec_base_t *new_base; | |
195 | + tvec_base_t *base, *new_base; | |
210 | 196 | unsigned long flags; |
211 | 197 | int ret = 0; |
212 | 198 | |
... | ... | @@ -221,7 +207,7 @@ |
221 | 207 | |
222 | 208 | new_base = __get_cpu_var(tvec_bases); |
223 | 209 | |
224 | - if (base != &new_base->t_base) { | |
210 | + if (base != new_base) { | |
225 | 211 | /* |
226 | 212 | * We are trying to schedule the timer on the local CPU. |
227 | 213 | * However we can't change timer's base while it is running, |
228 | 214 | |
229 | 215 | |
... | ... | @@ -231,19 +217,19 @@ |
231 | 217 | */ |
232 | 218 | if (unlikely(base->running_timer == timer)) { |
233 | 219 | /* The timer remains on a former base */ |
234 | - new_base = container_of(base, tvec_base_t, t_base); | |
220 | + new_base = base; | |
235 | 221 | } else { |
236 | 222 | /* See the comment in lock_timer_base() */ |
237 | 223 | timer->base = NULL; |
238 | 224 | spin_unlock(&base->lock); |
239 | - spin_lock(&new_base->t_base.lock); | |
240 | - timer->base = &new_base->t_base; | |
225 | + spin_lock(&new_base->lock); | |
226 | + timer->base = new_base; | |
241 | 227 | } |
242 | 228 | } |
243 | 229 | |
244 | 230 | timer->expires = expires; |
245 | 231 | internal_add_timer(new_base, timer); |
246 | - spin_unlock_irqrestore(&new_base->t_base.lock, flags); | |
232 | + spin_unlock_irqrestore(&new_base->lock, flags); | |
247 | 233 | |
248 | 234 | return ret; |
249 | 235 | } |
250 | 236 | |
... | ... | @@ -263,10 +249,10 @@ |
263 | 249 | unsigned long flags; |
264 | 250 | |
265 | 251 | BUG_ON(timer_pending(timer) || !timer->function); |
266 | - spin_lock_irqsave(&base->t_base.lock, flags); | |
267 | - timer->base = &base->t_base; | |
252 | + spin_lock_irqsave(&base->lock, flags); | |
253 | + timer->base = base; | |
268 | 254 | internal_add_timer(base, timer); |
269 | - spin_unlock_irqrestore(&base->t_base.lock, flags); | |
255 | + spin_unlock_irqrestore(&base->lock, flags); | |
270 | 256 | } |
271 | 257 | |
272 | 258 | |
... | ... | @@ -319,7 +305,7 @@ |
319 | 305 | */ |
320 | 306 | int del_timer(struct timer_list *timer) |
321 | 307 | { |
322 | - timer_base_t *base; | |
308 | + tvec_base_t *base; | |
323 | 309 | unsigned long flags; |
324 | 310 | int ret = 0; |
325 | 311 | |
... | ... | @@ -346,7 +332,7 @@ |
346 | 332 | */ |
347 | 333 | int try_to_del_timer_sync(struct timer_list *timer) |
348 | 334 | { |
349 | - timer_base_t *base; | |
335 | + tvec_base_t *base; | |
350 | 336 | unsigned long flags; |
351 | 337 | int ret = -1; |
352 | 338 | |
... | ... | @@ -410,7 +396,7 @@ |
410 | 396 | struct timer_list *tmp; |
411 | 397 | |
412 | 398 | tmp = list_entry(curr, struct timer_list, entry); |
413 | - BUG_ON(tmp->base != &base->t_base); | |
399 | + BUG_ON(tmp->base != base); | |
414 | 400 | curr = curr->next; |
415 | 401 | internal_add_timer(base, tmp); |
416 | 402 | } |
... | ... | @@ -432,7 +418,7 @@ |
432 | 418 | { |
433 | 419 | struct timer_list *timer; |
434 | 420 | |
435 | - spin_lock_irq(&base->t_base.lock); | |
421 | + spin_lock_irq(&base->lock); | |
436 | 422 | while (time_after_eq(jiffies, base->timer_jiffies)) { |
437 | 423 | struct list_head work_list = LIST_HEAD_INIT(work_list); |
438 | 424 | struct list_head *head = &work_list; |
... | ... | @@ -458,7 +444,7 @@ |
458 | 444 | |
459 | 445 | set_running_timer(base, timer); |
460 | 446 | detach_timer(timer, 1); |
461 | - spin_unlock_irq(&base->t_base.lock); | |
447 | + spin_unlock_irq(&base->lock); | |
462 | 448 | { |
463 | 449 | int preempt_count = preempt_count(); |
464 | 450 | fn(data); |
465 | 451 | |
... | ... | @@ -471,11 +457,11 @@ |
471 | 457 | BUG(); |
472 | 458 | } |
473 | 459 | } |
474 | - spin_lock_irq(&base->t_base.lock); | |
460 | + spin_lock_irq(&base->lock); | |
475 | 461 | } |
476 | 462 | } |
477 | 463 | set_running_timer(base, NULL); |
478 | - spin_unlock_irq(&base->t_base.lock); | |
464 | + spin_unlock_irq(&base->lock); | |
479 | 465 | } |
480 | 466 | |
481 | 467 | #ifdef CONFIG_NO_IDLE_HZ |
... | ... | @@ -506,7 +492,7 @@ |
506 | 492 | hr_expires += jiffies; |
507 | 493 | |
508 | 494 | base = __get_cpu_var(tvec_bases); |
509 | - spin_lock(&base->t_base.lock); | |
495 | + spin_lock(&base->lock); | |
510 | 496 | expires = base->timer_jiffies + (LONG_MAX >> 1); |
511 | 497 | list = NULL; |
512 | 498 | |
... | ... | @@ -554,7 +540,7 @@ |
554 | 540 | expires = nte->expires; |
555 | 541 | } |
556 | 542 | } |
557 | - spin_unlock(&base->t_base.lock); | |
543 | + spin_unlock(&base->lock); | |
558 | 544 | |
559 | 545 | if (time_before(hr_expires, expires)) |
560 | 546 | return hr_expires; |
... | ... | @@ -1262,7 +1248,7 @@ |
1262 | 1248 | } |
1263 | 1249 | per_cpu(tvec_bases, cpu) = base; |
1264 | 1250 | } |
1265 | - spin_lock_init(&base->t_base.lock); | |
1251 | + spin_lock_init(&base->lock); | |
1266 | 1252 | for (j = 0; j < TVN_SIZE; j++) { |
1267 | 1253 | INIT_LIST_HEAD(base->tv5.vec + j); |
1268 | 1254 | INIT_LIST_HEAD(base->tv4.vec + j); |
... | ... | @@ -1284,7 +1270,7 @@ |
1284 | 1270 | while (!list_empty(head)) { |
1285 | 1271 | timer = list_entry(head->next, struct timer_list, entry); |
1286 | 1272 | detach_timer(timer, 0); |
1287 | - timer->base = &new_base->t_base; | |
1273 | + timer->base = new_base; | |
1288 | 1274 | internal_add_timer(new_base, timer); |
1289 | 1275 | } |
1290 | 1276 | } |
1291 | 1277 | |
... | ... | @@ -1300,11 +1286,11 @@ |
1300 | 1286 | new_base = get_cpu_var(tvec_bases); |
1301 | 1287 | |
1302 | 1288 | local_irq_disable(); |
1303 | - spin_lock(&new_base->t_base.lock); | |
1304 | - spin_lock(&old_base->t_base.lock); | |
1289 | + spin_lock(&new_base->lock); | |
1290 | + spin_lock(&old_base->lock); | |
1305 | 1291 | |
1306 | - if (old_base->t_base.running_timer) | |
1307 | - BUG(); | |
1292 | + BUG_ON(old_base->running_timer); | |
1293 | + | |
1308 | 1294 | for (i = 0; i < TVR_SIZE; i++) |
1309 | 1295 | migrate_timer_list(new_base, old_base->tv1.vec + i); |
1310 | 1296 | for (i = 0; i < TVN_SIZE; i++) { |
... | ... | @@ -1314,8 +1300,8 @@ |
1314 | 1300 | migrate_timer_list(new_base, old_base->tv5.vec + i); |
1315 | 1301 | } |
1316 | 1302 | |
1317 | - spin_unlock(&old_base->t_base.lock); | |
1318 | - spin_unlock(&new_base->t_base.lock); | |
1303 | + spin_unlock(&old_base->lock); | |
1304 | + spin_unlock(&new_base->lock); | |
1319 | 1305 | local_irq_enable(); |
1320 | 1306 | put_cpu_var(tvec_bases); |
1321 | 1307 | } |