Blame view
kernel/softirq.c
19.2 KB
1da177e4c Linux-2.6.12-rc2 |
1 2 3 4 5 |
/* * linux/kernel/softirq.c * * Copyright (C) 1992 Linus Torvalds * |
b10db7f0d time: more timer ... |
6 7 8 |
* Distribute under GPLv2. * * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903) |
54514a70a softirq: Add supp... |
9 10 |
* * Remote softirq infrastructure is by Jens Axboe. |
1da177e4c Linux-2.6.12-rc2 |
11 12 13 14 15 16 17 18 19 20 |
*/ #include <linux/module.h> #include <linux/kernel_stat.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/notifier.h> #include <linux/percpu.h> #include <linux/cpu.h> |
831441862 Freezer: make ker... |
21 |
#include <linux/freezer.h> |
1da177e4c Linux-2.6.12-rc2 |
22 23 |
#include <linux/kthread.h> #include <linux/rcupdate.h> |
7e49fcce1 trace, lockdep: m... |
24 |
#include <linux/ftrace.h> |
78eef01b0 [PATCH] on_each_c... |
25 |
#include <linux/smp.h> |
79bf2bb33 [PATCH] tick-mana... |
26 |
#include <linux/tick.h> |
1da177e4c Linux-2.6.12-rc2 |
27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
#include <asm/irq.h> /* - No shared variables, all the data are CPU local. - If a softirq needs serialization, let it serialize itself by its own spinlocks. - Even if softirq is serialized, only local cpu is marked for execution. Hence, we get something sort of weak cpu binding. Though it is still not clear, will it result in better locality or will not. Examples: - NET RX softirq. It is multithreaded and does not require any global serialization. - NET TX softirq. It kicks software netdevice queues, hence it is logically serialized per device, but this serialization is invisible to common code. - Tasklets: serialized wrt itself. */ #ifndef __ARCH_IRQ_STAT irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned; EXPORT_SYMBOL(irq_stat); #endif |
978b0116c softirq: allocate... |
51 |
static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp; |
1da177e4c Linux-2.6.12-rc2 |
52 53 |
static DEFINE_PER_CPU(struct task_struct *, ksoftirqd); |
5d592b44b tracing: tracepoi... |
54 55 56 57 58 |
char *softirq_to_name[NR_SOFTIRQS] = { "HI_SOFTIRQ", "TIMER_SOFTIRQ", "NET_TX_SOFTIRQ", "NET_RX_SOFTIRQ", "BLOCK_SOFTIRQ", "TASKLET_SOFTIRQ", "SCHED_SOFTIRQ", "HRTIMER_SOFTIRQ", "RCU_SOFTIRQ" }; |
1da177e4c Linux-2.6.12-rc2 |
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 |
/* * we cannot loop indefinitely here to avoid userspace starvation, * but we also don't want to introduce a worst case 1/HZ latency * to the pending events, so lets the scheduler to balance * the softirq load for us. */ static inline void wakeup_softirqd(void) { /* Interrupts are disabled: no need to stop preemption */ struct task_struct *tsk = __get_cpu_var(ksoftirqd); if (tsk && tsk->state != TASK_RUNNING) wake_up_process(tsk); } /* |
de30a2b35 [PATCH] lockdep: ... |
75 76 77 |
* This one is for softirq.c-internal use, * where hardirqs are disabled legitimately: */ |
3c829c367 [PATCH] Reducing ... |
78 |
#ifdef CONFIG_TRACE_IRQFLAGS |
de30a2b35 [PATCH] lockdep: ... |
79 80 81 82 83 84 85 |
static void __local_bh_disable(unsigned long ip) { unsigned long flags; WARN_ON_ONCE(in_irq()); raw_local_irq_save(flags); |
7e49fcce1 trace, lockdep: m... |
86 87 88 89 90 91 92 93 |
/* * The preempt tracer hooks into add_preempt_count and will break * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET * is set and before current->softirq_enabled is cleared. * We must manually increment preempt_count here and manually * call the trace_preempt_off later. */ preempt_count() += SOFTIRQ_OFFSET; |
de30a2b35 [PATCH] lockdep: ... |
94 95 96 97 98 99 |
/* * Were softirqs turned off above: */ if (softirq_count() == SOFTIRQ_OFFSET) trace_softirqs_off(ip); raw_local_irq_restore(flags); |
7e49fcce1 trace, lockdep: m... |
100 101 102 |
if (preempt_count() == SOFTIRQ_OFFSET) trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); |
de30a2b35 [PATCH] lockdep: ... |
103 |
} |
3c829c367 [PATCH] Reducing ... |
104 105 106 107 108 109 110 |
#else /* !CONFIG_TRACE_IRQFLAGS */ static inline void __local_bh_disable(unsigned long ip) { add_preempt_count(SOFTIRQ_OFFSET); barrier(); } #endif /* CONFIG_TRACE_IRQFLAGS */ |
de30a2b35 [PATCH] lockdep: ... |
111 112 113 114 115 116 117 |
void local_bh_disable(void) { __local_bh_disable((unsigned long)__builtin_return_address(0)); } EXPORT_SYMBOL(local_bh_disable); |
de30a2b35 [PATCH] lockdep: ... |
118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 |
/* * Special-case - softirqs can safely be enabled in * cond_resched_softirq(), or by __do_softirq(), * without processing still-pending softirqs: */ void _local_bh_enable(void) { WARN_ON_ONCE(in_irq()); WARN_ON_ONCE(!irqs_disabled()); if (softirq_count() == SOFTIRQ_OFFSET) trace_softirqs_on((unsigned long)__builtin_return_address(0)); sub_preempt_count(SOFTIRQ_OFFSET); } EXPORT_SYMBOL(_local_bh_enable); |
0f476b6d9 softirq: remove i... |
134 |
static inline void _local_bh_enable_ip(unsigned long ip) |
de30a2b35 [PATCH] lockdep: ... |
135 |
{ |
0f476b6d9 softirq: remove i... |
136 |
WARN_ON_ONCE(in_irq() || irqs_disabled()); |
3c829c367 [PATCH] Reducing ... |
137 |
#ifdef CONFIG_TRACE_IRQFLAGS |
0f476b6d9 softirq: remove i... |
138 |
local_irq_disable(); |
3c829c367 [PATCH] Reducing ... |
139 |
#endif |
de30a2b35 [PATCH] lockdep: ... |
140 141 142 143 |
/* * Are softirqs going to be turned on now: */ if (softirq_count() == SOFTIRQ_OFFSET) |
0f476b6d9 softirq: remove i... |
144 |
trace_softirqs_on(ip); |
de30a2b35 [PATCH] lockdep: ... |
145 146 147 148 149 150 151 152 153 154 |
/* * Keep preemption disabled until we are done with * softirq processing: */ sub_preempt_count(SOFTIRQ_OFFSET - 1); if (unlikely(!in_interrupt() && local_softirq_pending())) do_softirq(); dec_preempt_count(); |
3c829c367 [PATCH] Reducing ... |
155 |
#ifdef CONFIG_TRACE_IRQFLAGS |
0f476b6d9 softirq: remove i... |
156 |
local_irq_enable(); |
3c829c367 [PATCH] Reducing ... |
157 |
#endif |
de30a2b35 [PATCH] lockdep: ... |
158 159 |
preempt_check_resched(); } |
0f476b6d9 softirq: remove i... |
160 161 162 163 164 |
void local_bh_enable(void) { _local_bh_enable_ip((unsigned long)__builtin_return_address(0)); } |
de30a2b35 [PATCH] lockdep: ... |
165 166 167 168 |
EXPORT_SYMBOL(local_bh_enable); void local_bh_enable_ip(unsigned long ip) { |
0f476b6d9 softirq: remove i... |
169 |
_local_bh_enable_ip(ip); |
de30a2b35 [PATCH] lockdep: ... |
170 171 172 173 |
} EXPORT_SYMBOL(local_bh_enable_ip); /* |
1da177e4c Linux-2.6.12-rc2 |
174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 |
* We restart softirq processing MAX_SOFTIRQ_RESTART times, * and we fall back to softirqd after that. * * This number has been established via experimentation. * The two things to balance is latency against fairness - * we want to handle softirqs as soon as possible, but they * should not be able to lock up the box. */ #define MAX_SOFTIRQ_RESTART 10 asmlinkage void __do_softirq(void) { struct softirq_action *h; __u32 pending; int max_restart = MAX_SOFTIRQ_RESTART; int cpu; pending = local_softirq_pending(); |
829035fd7 [PATCH] lockdep: ... |
192 |
account_system_vtime(current); |
de30a2b35 [PATCH] lockdep: ... |
193 |
__local_bh_disable((unsigned long)__builtin_return_address(0)); |
d820ac4c2 locking: rename t... |
194 |
lockdep_softirq_enter(); |
1da177e4c Linux-2.6.12-rc2 |
195 |
|
1da177e4c Linux-2.6.12-rc2 |
196 197 198 |
cpu = smp_processor_id(); restart: /* Reset the pending bitmask before enabling irqs */ |
3f74478b5 [PATCH] x86-64: S... |
199 |
set_softirq_pending(0); |
1da177e4c Linux-2.6.12-rc2 |
200 |
|
c70f5d661 [PATCH] revert bo... |
201 |
local_irq_enable(); |
1da177e4c Linux-2.6.12-rc2 |
202 203 204 205 206 |
h = softirq_vec; do { if (pending & 1) { |
8e85b4b55 softirqs, debug: ... |
207 |
int prev_count = preempt_count(); |
1da177e4c Linux-2.6.12-rc2 |
208 |
h->action(h); |
8e85b4b55 softirqs, debug: ... |
209 210 |
if (unlikely(prev_count != preempt_count())) { |
5d592b44b tracing: tracepoi... |
211 |
printk(KERN_ERR "huh, entered softirq %td %s %p" |
8e85b4b55 softirqs, debug: ... |
212 213 214 |
"with preempt_count %08x," " exited with %08x? ", h - softirq_vec, |
5d592b44b tracing: tracepoi... |
215 |
softirq_to_name[h - softirq_vec], |
8e85b4b55 softirqs, debug: ... |
216 217 218 |
h->action, prev_count, preempt_count()); preempt_count() = prev_count; } |
1da177e4c Linux-2.6.12-rc2 |
219 220 221 222 223 |
rcu_bh_qsctr_inc(cpu); } h++; pending >>= 1; } while (pending); |
c70f5d661 [PATCH] revert bo... |
224 |
local_irq_disable(); |
1da177e4c Linux-2.6.12-rc2 |
225 226 227 228 229 230 231 |
pending = local_softirq_pending(); if (pending && --max_restart) goto restart; if (pending) wakeup_softirqd(); |
d820ac4c2 locking: rename t... |
232 |
lockdep_softirq_exit(); |
829035fd7 [PATCH] lockdep: ... |
233 234 |
account_system_vtime(current); |
de30a2b35 [PATCH] lockdep: ... |
235 |
_local_bh_enable(); |
1da177e4c Linux-2.6.12-rc2 |
236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 |
} #ifndef __ARCH_HAS_DO_SOFTIRQ asmlinkage void do_softirq(void) { __u32 pending; unsigned long flags; if (in_interrupt()) return; local_irq_save(flags); pending = local_softirq_pending(); if (pending) __do_softirq(); local_irq_restore(flags); } |
1da177e4c Linux-2.6.12-rc2 |
257 |
#endif |
dde4b2b5f [PATCH] uninline ... |
258 259 260 261 262 |
/* * Enter an interrupt context. */ void irq_enter(void) { |
6378ddb59 time: track accur... |
263 |
int cpu = smp_processor_id(); |
719254faa NOHZ: unify the n... |
264 |
|
64db4cfff "Tree RCU": scala... |
265 |
rcu_irq_enter(); |
ee5f80a99 irq: call __irq_e... |
266 267 |
if (idle_cpu(cpu) && !in_interrupt()) { __irq_enter(); |
719254faa NOHZ: unify the n... |
268 |
tick_check_idle(cpu); |
ee5f80a99 irq: call __irq_e... |
269 270 |
} else __irq_enter(); |
dde4b2b5f [PATCH] uninline ... |
271 |
} |
1da177e4c Linux-2.6.12-rc2 |
272 273 274 275 276 277 278 279 280 281 282 283 |
#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED # define invoke_softirq() __do_softirq() #else # define invoke_softirq() do_softirq() #endif /* * Exit an interrupt context. Process softirqs if needed and possible: */ void irq_exit(void) { account_system_vtime(current); |
de30a2b35 [PATCH] lockdep: ... |
284 |
trace_hardirq_exit(); |
1da177e4c Linux-2.6.12-rc2 |
285 286 287 |
sub_preempt_count(IRQ_EXIT_OFFSET); if (!in_interrupt() && local_softirq_pending()) invoke_softirq(); |
79bf2bb33 [PATCH] tick-mana... |
288 289 290 |
#ifdef CONFIG_NO_HZ /* Make sure that timer wheel updates are propagated */ |
2232c2d8e rcu: add support ... |
291 |
rcu_irq_exit(); |
64db4cfff "Tree RCU": scala... |
292 293 |
if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched()) tick_nohz_stop_sched_tick(0); |
79bf2bb33 [PATCH] tick-mana... |
294 |
#endif |
1da177e4c Linux-2.6.12-rc2 |
295 296 297 298 299 300 |
preempt_enable_no_resched(); } /* * This function must run with irqs disabled! */ |
7ad5b3a50 kernel: remove fa... |
301 |
inline void raise_softirq_irqoff(unsigned int nr) |
1da177e4c Linux-2.6.12-rc2 |
302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 |
{ __raise_softirq_irqoff(nr); /* * If we're in an interrupt or softirq, we're done * (this also catches softirq-disabled code). We will * actually run the softirq once we return from * the irq or softirq. * * Otherwise we wake up ksoftirqd to make sure we * schedule the softirq soon. */ if (!in_interrupt()) wakeup_softirqd(); } |
7ad5b3a50 kernel: remove fa... |
317 |
void raise_softirq(unsigned int nr) |
1da177e4c Linux-2.6.12-rc2 |
318 319 320 321 322 323 324 |
{ unsigned long flags; local_irq_save(flags); raise_softirq_irqoff(nr); local_irq_restore(flags); } |
962cf36c5 Remove argument f... |
325 |
void open_softirq(int nr, void (*action)(struct softirq_action *)) |
1da177e4c Linux-2.6.12-rc2 |
326 |
{ |
1da177e4c Linux-2.6.12-rc2 |
327 328 |
softirq_vec[nr].action = action; } |
1da177e4c Linux-2.6.12-rc2 |
329 330 331 |
/* Tasklets */ struct tasklet_head { |
48f20a9a9 tasklets: execute... |
332 333 |
struct tasklet_struct *head; struct tasklet_struct **tail; |
1da177e4c Linux-2.6.12-rc2 |
334 |
}; |
4620b49f7 softirq: remove i... |
335 336 |
static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); |
1da177e4c Linux-2.6.12-rc2 |
337 |
|
7ad5b3a50 kernel: remove fa... |
338 |
void __tasklet_schedule(struct tasklet_struct *t) |
1da177e4c Linux-2.6.12-rc2 |
339 340 341 342 |
{ unsigned long flags; local_irq_save(flags); |
48f20a9a9 tasklets: execute... |
343 344 345 |
t->next = NULL; *__get_cpu_var(tasklet_vec).tail = t; __get_cpu_var(tasklet_vec).tail = &(t->next); |
1da177e4c Linux-2.6.12-rc2 |
346 347 348 349 350 |
raise_softirq_irqoff(TASKLET_SOFTIRQ); local_irq_restore(flags); } EXPORT_SYMBOL(__tasklet_schedule); |
7ad5b3a50 kernel: remove fa... |
351 |
void __tasklet_hi_schedule(struct tasklet_struct *t) |
1da177e4c Linux-2.6.12-rc2 |
352 353 354 355 |
{ unsigned long flags; local_irq_save(flags); |
48f20a9a9 tasklets: execute... |
356 357 358 |
t->next = NULL; *__get_cpu_var(tasklet_hi_vec).tail = t; __get_cpu_var(tasklet_hi_vec).tail = &(t->next); |
1da177e4c Linux-2.6.12-rc2 |
359 360 361 362 363 364 365 366 367 368 369 |
raise_softirq_irqoff(HI_SOFTIRQ); local_irq_restore(flags); } EXPORT_SYMBOL(__tasklet_hi_schedule); static void tasklet_action(struct softirq_action *a) { struct tasklet_struct *list; local_irq_disable(); |
48f20a9a9 tasklets: execute... |
370 371 372 |
list = __get_cpu_var(tasklet_vec).head; __get_cpu_var(tasklet_vec).head = NULL; __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head; |
1da177e4c Linux-2.6.12-rc2 |
373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 |
local_irq_enable(); while (list) { struct tasklet_struct *t = list; list = list->next; if (tasklet_trylock(t)) { if (!atomic_read(&t->count)) { if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) BUG(); t->func(t->data); tasklet_unlock(t); continue; } tasklet_unlock(t); } local_irq_disable(); |
48f20a9a9 tasklets: execute... |
392 393 394 |
t->next = NULL; *__get_cpu_var(tasklet_vec).tail = t; __get_cpu_var(tasklet_vec).tail = &(t->next); |
1da177e4c Linux-2.6.12-rc2 |
395 396 397 398 399 400 401 402 403 404 |
__raise_softirq_irqoff(TASKLET_SOFTIRQ); local_irq_enable(); } } static void tasklet_hi_action(struct softirq_action *a) { struct tasklet_struct *list; local_irq_disable(); |
48f20a9a9 tasklets: execute... |
405 406 407 |
list = __get_cpu_var(tasklet_hi_vec).head; __get_cpu_var(tasklet_hi_vec).head = NULL; __get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head; |
1da177e4c Linux-2.6.12-rc2 |
408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 |
local_irq_enable(); while (list) { struct tasklet_struct *t = list; list = list->next; if (tasklet_trylock(t)) { if (!atomic_read(&t->count)) { if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) BUG(); t->func(t->data); tasklet_unlock(t); continue; } tasklet_unlock(t); } local_irq_disable(); |
48f20a9a9 tasklets: execute... |
427 428 429 |
t->next = NULL; *__get_cpu_var(tasklet_hi_vec).tail = t; __get_cpu_var(tasklet_hi_vec).tail = &(t->next); |
1da177e4c Linux-2.6.12-rc2 |
430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 |
__raise_softirq_irqoff(HI_SOFTIRQ); local_irq_enable(); } } void tasklet_init(struct tasklet_struct *t, void (*func)(unsigned long), unsigned long data) { t->next = NULL; t->state = 0; atomic_set(&t->count, 0); t->func = func; t->data = data; } EXPORT_SYMBOL(tasklet_init); void tasklet_kill(struct tasklet_struct *t) { if (in_interrupt()) printk("Attempt to kill tasklet from interrupt "); while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { do yield(); while (test_bit(TASKLET_STATE_SCHED, &t->state)); } tasklet_unlock_wait(t); clear_bit(TASKLET_STATE_SCHED, &t->state); } EXPORT_SYMBOL(tasklet_kill); |
54514a70a softirq: Add supp... |
464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 |
DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list); EXPORT_PER_CPU_SYMBOL(softirq_work_list); static void __local_trigger(struct call_single_data *cp, int softirq) { struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]); list_add_tail(&cp->list, head); /* Trigger the softirq only if the list was previously empty. */ if (head->next == &cp->list) raise_softirq_irqoff(softirq); } #ifdef CONFIG_USE_GENERIC_SMP_HELPERS static void remote_softirq_receive(void *data) { struct call_single_data *cp = data; unsigned long flags; int softirq; softirq = cp->priv; local_irq_save(flags); __local_trigger(cp, softirq); local_irq_restore(flags); } static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq) { if (cpu_online(cpu)) { cp->func = remote_softirq_receive; cp->info = cp; cp->flags = 0; cp->priv = softirq; __smp_call_function_single(cpu, cp); return 0; } return 1; } #else /* CONFIG_USE_GENERIC_SMP_HELPERS */ static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq) { return 1; } #endif /** * __send_remote_softirq - try to schedule softirq work on a remote cpu * @cp: private SMP call function data area * @cpu: the remote cpu * @this_cpu: the currently executing cpu * @softirq: the softirq for the work * * Attempt to schedule softirq work on a remote cpu. If this cannot be * done, the work is instead queued up on the local cpu. * * Interrupts must be disabled. */ void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq) { if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq)) __local_trigger(cp, softirq); } EXPORT_SYMBOL(__send_remote_softirq); /** * send_remote_softirq - try to schedule softirq work on a remote cpu * @cp: private SMP call function data area * @cpu: the remote cpu * @softirq: the softirq for the work * * Like __send_remote_softirq except that disabling interrupts and * computing the current cpu is done for the caller. */ void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq) { unsigned long flags; int this_cpu; local_irq_save(flags); this_cpu = smp_processor_id(); __send_remote_softirq(cp, cpu, this_cpu, softirq); local_irq_restore(flags); } EXPORT_SYMBOL(send_remote_softirq); static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { /* * If a CPU goes away, splice its entries to the current CPU * and trigger a run of the softirq */ if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { int cpu = (unsigned long) hcpu; int i; local_irq_disable(); for (i = 0; i < NR_SOFTIRQS; i++) { struct list_head *head = &per_cpu(softirq_work_list[i], cpu); struct list_head *local_head; if (list_empty(head)) continue; local_head = &__get_cpu_var(softirq_work_list[i]); list_splice_init(head, local_head); raise_softirq_irqoff(i); } local_irq_enable(); } return NOTIFY_OK; } static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = { .notifier_call = remote_softirq_cpu_notify, }; |
1da177e4c Linux-2.6.12-rc2 |
584 585 |
void __init softirq_init(void) { |
48f20a9a9 tasklets: execute... |
586 587 588 |
int cpu; for_each_possible_cpu(cpu) { |
54514a70a softirq: Add supp... |
589 |
int i; |
48f20a9a9 tasklets: execute... |
590 591 592 593 |
per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head; |
54514a70a softirq: Add supp... |
594 595 |
for (i = 0; i < NR_SOFTIRQS; i++) INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu)); |
48f20a9a9 tasklets: execute... |
596 |
} |
54514a70a softirq: Add supp... |
597 |
register_hotcpu_notifier(&remote_softirq_cpu_notifier); |
962cf36c5 Remove argument f... |
598 599 |
open_softirq(TASKLET_SOFTIRQ, tasklet_action); open_softirq(HI_SOFTIRQ, tasklet_hi_action); |
1da177e4c Linux-2.6.12-rc2 |
600 601 602 603 |
} static int ksoftirqd(void * __bind_cpu) { |
1da177e4c Linux-2.6.12-rc2 |
604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 |
set_current_state(TASK_INTERRUPTIBLE); while (!kthread_should_stop()) { preempt_disable(); if (!local_softirq_pending()) { preempt_enable_no_resched(); schedule(); preempt_disable(); } __set_current_state(TASK_RUNNING); while (local_softirq_pending()) { /* Preempt disable stops cpu going offline. If already offline, we'll be on wrong CPU: don't process */ if (cpu_is_offline((long)__bind_cpu)) goto wait_to_die; do_softirq(); preempt_enable_no_resched(); cond_resched(); preempt_disable(); |
64ca5ab91 rcu: increment qu... |
626 |
rcu_qsctr_inc((long)__bind_cpu); |
1da177e4c Linux-2.6.12-rc2 |
627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 |
} preempt_enable(); set_current_state(TASK_INTERRUPTIBLE); } __set_current_state(TASK_RUNNING); return 0; wait_to_die: preempt_enable(); /* Wait for kthread_stop */ set_current_state(TASK_INTERRUPTIBLE); while (!kthread_should_stop()) { schedule(); set_current_state(TASK_INTERRUPTIBLE); } __set_current_state(TASK_RUNNING); return 0; } #ifdef CONFIG_HOTPLUG_CPU /* * tasklet_kill_immediate is called to remove a tasklet which can already be * scheduled for execution on @cpu. * * Unlike tasklet_kill, this function removes the tasklet * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state. * * When this function is called, @cpu must be in the CPU_DEAD state. */ void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu) { struct tasklet_struct **i; BUG_ON(cpu_online(cpu)); BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state)); if (!test_bit(TASKLET_STATE_SCHED, &t->state)) return; /* CPU is dead, so no lock needed. */ |
48f20a9a9 tasklets: execute... |
667 |
for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) { |
1da177e4c Linux-2.6.12-rc2 |
668 669 |
if (*i == t) { *i = t->next; |
48f20a9a9 tasklets: execute... |
670 671 672 |
/* If this was the tail element, move the tail ptr */ if (*i == NULL) per_cpu(tasklet_vec, cpu).tail = i; |
1da177e4c Linux-2.6.12-rc2 |
673 674 675 676 677 678 679 680 |
return; } } BUG(); } static void takeover_tasklets(unsigned int cpu) { |
1da177e4c Linux-2.6.12-rc2 |
681 682 683 684 |
/* CPU is dead, so no lock needed. */ local_irq_disable(); /* Find end, append list for that CPU. */ |
e5e417232 Fix cpu hotplug p... |
685 686 687 688 689 690 |
if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { *(__get_cpu_var(tasklet_vec).tail) = per_cpu(tasklet_vec, cpu).head; __get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail; per_cpu(tasklet_vec, cpu).head = NULL; per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; } |
1da177e4c Linux-2.6.12-rc2 |
691 |
raise_softirq_irqoff(TASKLET_SOFTIRQ); |
e5e417232 Fix cpu hotplug p... |
692 693 694 695 696 697 |
if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) { *__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head; __get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail; per_cpu(tasklet_hi_vec, cpu).head = NULL; per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head; } |
1da177e4c Linux-2.6.12-rc2 |
698 699 700 701 702 |
raise_softirq_irqoff(HI_SOFTIRQ); local_irq_enable(); } #endif /* CONFIG_HOTPLUG_CPU */ |
8c78f3075 [PATCH] cpu hotpl... |
703 |
static int __cpuinit cpu_callback(struct notifier_block *nfb, |
1da177e4c Linux-2.6.12-rc2 |
704 705 706 707 708 709 710 711 |
unsigned long action, void *hcpu) { int hotcpu = (unsigned long)hcpu; struct task_struct *p; switch (action) { case CPU_UP_PREPARE: |
8bb784428 Add suspend-relat... |
712 |
case CPU_UP_PREPARE_FROZEN: |
1da177e4c Linux-2.6.12-rc2 |
713 714 715 716 717 718 719 720 721 722 |
p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu); if (IS_ERR(p)) { printk("ksoftirqd for %i failed ", hotcpu); return NOTIFY_BAD; } kthread_bind(p, hotcpu); per_cpu(ksoftirqd, hotcpu) = p; break; case CPU_ONLINE: |
8bb784428 Add suspend-relat... |
723 |
case CPU_ONLINE_FROZEN: |
1da177e4c Linux-2.6.12-rc2 |
724 725 726 727 |
wake_up_process(per_cpu(ksoftirqd, hotcpu)); break; #ifdef CONFIG_HOTPLUG_CPU case CPU_UP_CANCELED: |
8bb784428 Add suspend-relat... |
728 |
case CPU_UP_CANCELED_FROZEN: |
fc75cdfa5 [PATCH] cpu hotpl... |
729 730 |
if (!per_cpu(ksoftirqd, hotcpu)) break; |
1da177e4c Linux-2.6.12-rc2 |
731 |
/* Unbind so it can run. Fall thru. */ |
a4c4af7c8 [PATCH] cpu hoptl... |
732 |
kthread_bind(per_cpu(ksoftirqd, hotcpu), |
f1fc057c7 cpumask: remove a... |
733 |
cpumask_any(cpu_online_mask)); |
1da177e4c Linux-2.6.12-rc2 |
734 |
case CPU_DEAD: |
1c6b4aa94 cpu hotplug: fix ... |
735 736 |
case CPU_DEAD_FROZEN: { struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; |
1da177e4c Linux-2.6.12-rc2 |
737 738 |
p = per_cpu(ksoftirqd, hotcpu); per_cpu(ksoftirqd, hotcpu) = NULL; |
961ccddd5 sched: add new AP... |
739 |
sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m); |
1da177e4c Linux-2.6.12-rc2 |
740 741 742 |
kthread_stop(p); takeover_tasklets(hotcpu); break; |
1c6b4aa94 cpu hotplug: fix ... |
743 |
} |
1da177e4c Linux-2.6.12-rc2 |
744 745 746 747 |
#endif /* CONFIG_HOTPLUG_CPU */ } return NOTIFY_OK; } |
8c78f3075 [PATCH] cpu hotpl... |
748 |
static struct notifier_block __cpuinitdata cpu_nfb = { |
1da177e4c Linux-2.6.12-rc2 |
749 750 |
.notifier_call = cpu_callback }; |
7babe8db9 Full conversion t... |
751 |
static __init int spawn_ksoftirqd(void) |
1da177e4c Linux-2.6.12-rc2 |
752 753 |
{ void *cpu = (void *)(long)smp_processor_id(); |
07dccf334 [PATCH] check ret... |
754 755 756 |
int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); BUG_ON(err == NOTIFY_BAD); |
1da177e4c Linux-2.6.12-rc2 |
757 758 759 760 |
cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); register_cpu_notifier(&cpu_nfb); return 0; } |
7babe8db9 Full conversion t... |
761 |
early_initcall(spawn_ksoftirqd); |
78eef01b0 [PATCH] on_each_c... |
762 763 764 765 766 |
#ifdef CONFIG_SMP /* * Call a function on all processors */ |
15c8b6c1a on_each_cpu(): ki... |
767 |
int on_each_cpu(void (*func) (void *info), void *info, int wait) |
78eef01b0 [PATCH] on_each_c... |
768 769 770 771 |
{ int ret = 0; preempt_disable(); |
8691e5a8f smp_call_function... |
772 |
ret = smp_call_function(func, info, wait); |
78eef01b0 [PATCH] on_each_c... |
773 774 775 776 777 778 779 780 |
local_irq_disable(); func(info); local_irq_enable(); preempt_enable(); return ret; } EXPORT_SYMBOL(on_each_cpu); #endif |
43a256322 sparseirq: move _... |
781 782 783 784 785 786 787 788 789 790 |
/* * [ These __weak aliases are kept in a separate compilation unit, so that * GCC does not inline them incorrectly. ] */ int __init __weak early_irq_init(void) { return 0; } |
4a046d175 x86: arch_probe_n... |
791 792 793 794 |
int __init __weak arch_probe_nr_irqs(void) { return 0; } |
43a256322 sparseirq: move _... |
795 796 797 798 799 800 801 802 803 |
int __init __weak arch_early_irq_init(void) { return 0; } int __weak arch_init_chip_data(struct irq_desc *desc, int cpu) { return 0; } |