Blame view
kernel/softirq.c
23.5 KB
767a67b0b treewide: Replace... |
1 |
// SPDX-License-Identifier: GPL-2.0-only |
1da177e4c Linux-2.6.12-rc2 |
2 3 4 5 6 |
/* * linux/kernel/softirq.c * * Copyright (C) 1992 Linus Torvalds * |
b10db7f0d time: more timer ... |
7 |
* Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903) |
1da177e4c Linux-2.6.12-rc2 |
8 |
*/ |
403227641 softirq: convert ... |
9 |
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
9984de1a5 kernel: Map most ... |
10 |
#include <linux/export.h> |
1da177e4c Linux-2.6.12-rc2 |
11 12 13 |
#include <linux/kernel_stat.h> #include <linux/interrupt.h> #include <linux/init.h> |
8b1c04aca softirq: Make sof... |
14 |
#include <linux/local_lock.h> |
1da177e4c Linux-2.6.12-rc2 |
15 16 17 18 |
#include <linux/mm.h> #include <linux/notifier.h> #include <linux/percpu.h> #include <linux/cpu.h> |
831441862 Freezer: make ker... |
19 |
#include <linux/freezer.h> |
1da177e4c Linux-2.6.12-rc2 |
20 21 |
#include <linux/kthread.h> #include <linux/rcupdate.h> |
7e49fcce1 trace, lockdep: m... |
22 |
#include <linux/ftrace.h> |
78eef01b0 [PATCH] on_each_c... |
23 |
#include <linux/smp.h> |
3e339b5da softirq: Use hotp... |
24 |
#include <linux/smpboot.h> |
79bf2bb33 [PATCH] tick-mana... |
25 |
#include <linux/tick.h> |
d532676cc softirq: Add linu... |
26 |
#include <linux/irq.h> |
da0447474 tasklets: Replace... |
27 |
#include <linux/wait_bit.h> |
a0e39ed37 tracing: fix buil... |
28 |
|
db1cc7aed softirq: Move do_... |
29 |
#include <asm/softirq_stack.h> |
a0e39ed37 tracing: fix buil... |
30 |
#define CREATE_TRACE_POINTS |
ad8d75fff tracing/events: m... |
31 |
#include <trace/events/irq.h> |
1da177e4c Linux-2.6.12-rc2 |
32 |
|
1da177e4c Linux-2.6.12-rc2 |
33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 |
/* - No shared variables, all the data are CPU local. - If a softirq needs serialization, let it serialize itself by its own spinlocks. - Even if softirq is serialized, only local cpu is marked for execution. Hence, we get something sort of weak cpu binding. Though it is still not clear, will it result in better locality or will not. Examples: - NET RX softirq. It is multithreaded and does not require any global serialization. - NET TX softirq. It kicks software netdevice queues, hence it is logically serialized per device, but this serialization is invisible to common code. - Tasklets: serialized wrt itself. */ #ifndef __ARCH_IRQ_STAT |
0f6f47bac softirq/core: Tur... |
52 53 |
DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat); EXPORT_PER_CPU_SYMBOL(irq_stat); |
1da177e4c Linux-2.6.12-rc2 |
54 |
#endif |
978b0116c softirq: allocate... |
55 |
static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp; |
1da177e4c Linux-2.6.12-rc2 |
56 |
|
4dd53d891 softirqs: Free up... |
57 |
DEFINE_PER_CPU(struct task_struct *, ksoftirqd); |
1da177e4c Linux-2.6.12-rc2 |
58 |
|
ce85b4f2e softirq: use cons... |
59 |
const char * const softirq_to_name[NR_SOFTIRQS] = { |
f660f6066 softirq: Display ... |
60 |
"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL", |
09223371d rcu: Use softirq ... |
61 |
"TASKLET", "SCHED", "HRTIMER", "RCU" |
5d592b44b tracing: tracepoi... |
62 |
}; |
1da177e4c Linux-2.6.12-rc2 |
63 64 65 66 67 68 |
/* * we cannot loop indefinitely here to avoid userspace starvation, * but we also don't want to introduce a worst case 1/HZ latency * to the pending events, so lets the scheduler to balance * the softirq load for us. */ |
676cb02dc softirqs: Make wa... |
69 |
static void wakeup_softirqd(void) |
1da177e4c Linux-2.6.12-rc2 |
70 71 |
{ /* Interrupts are disabled: no need to stop preemption */ |
909ea9646 core: Replace __g... |
72 |
struct task_struct *tsk = __this_cpu_read(ksoftirqd); |
1da177e4c Linux-2.6.12-rc2 |
73 |
|
37aadc687 sched: Unbreak wa... |
74 |
if (tsk) |
1da177e4c Linux-2.6.12-rc2 |
75 76 77 78 |
wake_up_process(tsk); } /* |
4cd13c21b softirq: Let ksof... |
79 |
* If ksoftirqd is scheduled, we do not want to process pending softirqs |
3c53776e2 Mark HI and TASKL... |
80 81 |
* right now. Let ksoftirqd handle this at its own rate, to get fairness, * unless we're doing some of the synchronous softirqs. |
4cd13c21b softirq: Let ksof... |
82 |
*/ |
3c53776e2 Mark HI and TASKL... |
83 84 |
#define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ)) static bool ksoftirqd_running(unsigned long pending) |
4cd13c21b softirq: Let ksof... |
85 86 |
{ struct task_struct *tsk = __this_cpu_read(ksoftirqd); |
3c53776e2 Mark HI and TASKL... |
87 88 |
if (pending & SOFTIRQ_NOW_MASK) return false; |
b03fbd4ff sched: Introduce ... |
89 |
return tsk && task_is_running(tsk) && !__kthread_should_park(tsk); |
4cd13c21b softirq: Let ksof... |
90 |
} |
ae9ef5899 softirq: Move rel... |
91 92 93 94 95 96 |
#ifdef CONFIG_TRACE_IRQFLAGS DEFINE_PER_CPU(int, hardirqs_enabled); DEFINE_PER_CPU(int, hardirq_context); EXPORT_PER_CPU_SYMBOL_GPL(hardirqs_enabled); EXPORT_PER_CPU_SYMBOL_GPL(hardirq_context); #endif |
4cd13c21b softirq: Let ksof... |
97 |
/* |
8b1c04aca softirq: Make sof... |
98 99 100 101 102 103 104 105 106 |
* SOFTIRQ_OFFSET usage: * * On !RT kernels 'count' is the preempt counter, on RT kernels this applies * to a per CPU counter and to task::softirqs_disabled_cnt. * * - count is changed by SOFTIRQ_OFFSET on entering or leaving softirq * processing. * * - count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET) |
75e1056f5 sched: Fix softir... |
107 |
* on local_bh_disable or local_bh_enable. |
8b1c04aca softirq: Make sof... |
108 |
* |
75e1056f5 sched: Fix softir... |
109 110 111 |
* This lets us distinguish between whether we are currently processing * softirq and whether we just have bh disabled. */ |
8b1c04aca softirq: Make sof... |
112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 |
#ifdef CONFIG_PREEMPT_RT /* * RT accounts for BH disabled sections in task::softirqs_disabled_cnt and * also in per CPU softirq_ctrl::cnt. This is necessary to allow tasks in a * softirq disabled section to be preempted. * * The per task counter is used for softirq_count(), in_softirq() and * in_serving_softirqs() because these counts are only valid when the task * holding softirq_ctrl::lock is running. * * The per CPU counter prevents pointless wakeups of ksoftirqd in case that * the task which is in a softirq disabled section is preempted or blocks. */ struct softirq_ctrl { local_lock_t lock; int cnt; }; static DEFINE_PER_CPU(struct softirq_ctrl, softirq_ctrl) = { .lock = INIT_LOCAL_LOCK(softirq_ctrl.lock), }; |
47c218dca tick/sched: Preve... |
134 135 136 137 138 139 140 141 142 143 144 145 146 147 |
/** * local_bh_blocked() - Check for idle whether BH processing is blocked * * Returns false if the per CPU softirq::cnt is 0 otherwise true. * * This is invoked from the idle task to guard against false positive * softirq pending warnings, which would happen when the task which holds * softirq_ctrl::lock was the only running task on the CPU and blocks on * some other lock. */ bool local_bh_blocked(void) { return __this_cpu_read(softirq_ctrl.cnt) != 0; } |
8b1c04aca softirq: Make sof... |
148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 |
void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) { unsigned long flags; int newcnt; WARN_ON_ONCE(in_hardirq()); /* First entry of a task into a BH disabled section? */ if (!current->softirq_disable_cnt) { if (preemptible()) { local_lock(&softirq_ctrl.lock); /* Required to meet the RCU bottomhalf requirements. */ rcu_read_lock(); } else { DEBUG_LOCKS_WARN_ON(this_cpu_read(softirq_ctrl.cnt)); } } /* * Track the per CPU softirq disabled state. On RT this is per CPU * state to allow preemption of bottom half disabled sections. */ newcnt = __this_cpu_add_return(softirq_ctrl.cnt, cnt); /* * Reflect the result in the task state to prevent recursion on the * local lock and to make softirq_count() & al work. */ current->softirq_disable_cnt = newcnt; if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && newcnt == cnt) { raw_local_irq_save(flags); lockdep_softirqs_off(ip); raw_local_irq_restore(flags); } } EXPORT_SYMBOL(__local_bh_disable_ip); static void __local_bh_enable(unsigned int cnt, bool unlock) { unsigned long flags; int newcnt; DEBUG_LOCKS_WARN_ON(current->softirq_disable_cnt != this_cpu_read(softirq_ctrl.cnt)); if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS) && softirq_count() == cnt) { raw_local_irq_save(flags); lockdep_softirqs_on(_RET_IP_); raw_local_irq_restore(flags); } newcnt = __this_cpu_sub_return(softirq_ctrl.cnt, cnt); current->softirq_disable_cnt = newcnt; if (!newcnt && unlock) { rcu_read_unlock(); local_unlock(&softirq_ctrl.lock); } } void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) { bool preempt_on = preemptible(); unsigned long flags; u32 pending; int curcnt; WARN_ON_ONCE(in_irq()); lockdep_assert_irqs_enabled(); local_irq_save(flags); curcnt = __this_cpu_read(softirq_ctrl.cnt); /* * If this is not reenabling soft interrupts, no point in trying to * run pending ones. */ if (curcnt != cnt) goto out; pending = local_softirq_pending(); if (!pending || ksoftirqd_running(pending)) goto out; /* * If this was called from non preemptible context, wake up the * softirq daemon. */ if (!preempt_on) { wakeup_softirqd(); goto out; } /* * Adjust softirq count to SOFTIRQ_OFFSET which makes * in_serving_softirq() become true. */ cnt = SOFTIRQ_OFFSET; __local_bh_enable(cnt, false); __do_softirq(); out: __local_bh_enable(cnt, preempt_on); local_irq_restore(flags); } EXPORT_SYMBOL(__local_bh_enable_ip); |
75e1056f5 sched: Fix softir... |
254 255 |
/* |
8b1c04aca softirq: Make sof... |
256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 |
* Invoked from ksoftirqd_run() outside of the interrupt disabled section * to acquire the per CPU local lock for reentrancy protection. */ static inline void ksoftirqd_run_begin(void) { __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET); local_irq_disable(); } /* Counterpart to ksoftirqd_run_begin() */ static inline void ksoftirqd_run_end(void) { __local_bh_enable(SOFTIRQ_OFFSET, true); WARN_ON_ONCE(in_interrupt()); local_irq_enable(); } static inline void softirq_handle_begin(void) { } static inline void softirq_handle_end(void) { } static inline bool should_wake_ksoftirqd(void) { return !this_cpu_read(softirq_ctrl.cnt); } static inline void invoke_softirq(void) { if (should_wake_ksoftirqd()) wakeup_softirqd(); } #else /* CONFIG_PREEMPT_RT */ /* * This one is for softirq.c-internal use, where hardirqs are disabled |
ae9ef5899 softirq: Move rel... |
291 |
* legitimately: |
de30a2b35 [PATCH] lockdep: ... |
292 |
*/ |
8b1c04aca softirq: Make sof... |
293 |
#ifdef CONFIG_TRACE_IRQFLAGS |
0bd3a173d sched/preempt, lo... |
294 |
void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) |
de30a2b35 [PATCH] lockdep: ... |
295 296 297 298 299 300 |
{ unsigned long flags; WARN_ON_ONCE(in_irq()); raw_local_irq_save(flags); |
7e49fcce1 trace, lockdep: m... |
301 |
/* |
bdb438065 sched: Extract th... |
302 |
* The preempt tracer hooks into preempt_count_add and will break |
7e49fcce1 trace, lockdep: m... |
303 304 305 306 307 |
* lockdep because it calls back into lockdep after SOFTIRQ_OFFSET * is set and before current->softirq_enabled is cleared. * We must manually increment preempt_count here and manually * call the trace_preempt_off later. */ |
bdb438065 sched: Extract th... |
308 |
__preempt_count_add(cnt); |
de30a2b35 [PATCH] lockdep: ... |
309 310 311 |
/* * Were softirqs turned off above: */ |
9ea4c3800 locking: Optimize... |
312 |
if (softirq_count() == (cnt & SOFTIRQ_MASK)) |
0d38453c8 lockdep: Rename t... |
313 |
lockdep_softirqs_off(ip); |
de30a2b35 [PATCH] lockdep: ... |
314 |
raw_local_irq_restore(flags); |
7e49fcce1 trace, lockdep: m... |
315 |
|
0f1ba9a2c softirq/preempt: ... |
316 317 |
if (preempt_count() == cnt) { #ifdef CONFIG_DEBUG_PREEMPT |
f904f5826 sched/debug: Fix ... |
318 |
current->preempt_disable_ip = get_lock_parent_ip(); |
0f1ba9a2c softirq/preempt: ... |
319 |
#endif |
f904f5826 sched/debug: Fix ... |
320 |
trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip()); |
0f1ba9a2c softirq/preempt: ... |
321 |
} |
de30a2b35 [PATCH] lockdep: ... |
322 |
} |
0bd3a173d sched/preempt, lo... |
323 |
EXPORT_SYMBOL(__local_bh_disable_ip); |
3c829c367 [PATCH] Reducing ... |
324 |
#endif /* CONFIG_TRACE_IRQFLAGS */ |
de30a2b35 [PATCH] lockdep: ... |
325 |
|
75e1056f5 sched: Fix softir... |
326 327 |
static void __local_bh_enable(unsigned int cnt) { |
f71b74bca irq/softirqs: Use... |
328 |
lockdep_assert_irqs_disabled(); |
75e1056f5 sched: Fix softir... |
329 |
|
1a63dcd87 softirq: Reorder ... |
330 331 |
if (preempt_count() == cnt) trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip()); |
9ea4c3800 locking: Optimize... |
332 |
if (softirq_count() == (cnt & SOFTIRQ_MASK)) |
0d38453c8 lockdep: Rename t... |
333 |
lockdep_softirqs_on(_RET_IP_); |
1a63dcd87 softirq: Reorder ... |
334 335 |
__preempt_count_sub(cnt); |
75e1056f5 sched: Fix softir... |
336 |
} |
de30a2b35 [PATCH] lockdep: ... |
337 |
/* |
c3442697c softirq: Eliminat... |
338 |
* Special-case - softirqs can safely be enabled by __do_softirq(), |
de30a2b35 [PATCH] lockdep: ... |
339 340 341 342 |
* without processing still-pending softirqs: */ void _local_bh_enable(void) { |
5d60d3e7c irq: Improve a bi... |
343 |
WARN_ON_ONCE(in_irq()); |
75e1056f5 sched: Fix softir... |
344 |
__local_bh_enable(SOFTIRQ_DISABLE_OFFSET); |
de30a2b35 [PATCH] lockdep: ... |
345 |
} |
de30a2b35 [PATCH] lockdep: ... |
346 |
EXPORT_SYMBOL(_local_bh_enable); |
0bd3a173d sched/preempt, lo... |
347 |
void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) |
de30a2b35 [PATCH] lockdep: ... |
348 |
{ |
f71b74bca irq/softirqs: Use... |
349 350 |
WARN_ON_ONCE(in_irq()); lockdep_assert_irqs_enabled(); |
3c829c367 [PATCH] Reducing ... |
351 |
#ifdef CONFIG_TRACE_IRQFLAGS |
0f476b6d9 softirq: remove i... |
352 |
local_irq_disable(); |
3c829c367 [PATCH] Reducing ... |
353 |
#endif |
de30a2b35 [PATCH] lockdep: ... |
354 355 356 |
/* * Are softirqs going to be turned on now: */ |
75e1056f5 sched: Fix softir... |
357 |
if (softirq_count() == SOFTIRQ_DISABLE_OFFSET) |
0d38453c8 lockdep: Rename t... |
358 |
lockdep_softirqs_on(ip); |
de30a2b35 [PATCH] lockdep: ... |
359 360 361 |
/* * Keep preemption disabled until we are done with * softirq processing: |
ce85b4f2e softirq: use cons... |
362 |
*/ |
91ea62d58 softirq: Avoid ba... |
363 |
__preempt_count_sub(cnt - 1); |
de30a2b35 [PATCH] lockdep: ... |
364 |
|
0bed698a3 irq: Justify the ... |
365 366 367 368 369 |
if (unlikely(!in_interrupt() && local_softirq_pending())) { /* * Run softirq if any pending. And do it in its own stack * as we may be calling this deep in a task call stack already. */ |
de30a2b35 [PATCH] lockdep: ... |
370 |
do_softirq(); |
0bed698a3 irq: Justify the ... |
371 |
} |
de30a2b35 [PATCH] lockdep: ... |
372 |
|
bdb438065 sched: Extract th... |
373 |
preempt_count_dec(); |
3c829c367 [PATCH] Reducing ... |
374 |
#ifdef CONFIG_TRACE_IRQFLAGS |
0f476b6d9 softirq: remove i... |
375 |
local_irq_enable(); |
3c829c367 [PATCH] Reducing ... |
376 |
#endif |
de30a2b35 [PATCH] lockdep: ... |
377 378 |
preempt_check_resched(); } |
0bd3a173d sched/preempt, lo... |
379 |
EXPORT_SYMBOL(__local_bh_enable_ip); |
de30a2b35 [PATCH] lockdep: ... |
380 |
|
f02fc963e softirq: Move var... |
381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 |
static inline void softirq_handle_begin(void) { __local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET); } static inline void softirq_handle_end(void) { __local_bh_enable(SOFTIRQ_OFFSET); WARN_ON_ONCE(in_interrupt()); } static inline void ksoftirqd_run_begin(void) { local_irq_disable(); } static inline void ksoftirqd_run_end(void) { local_irq_enable(); } static inline bool should_wake_ksoftirqd(void) { return true; } |
ae9ef5899 softirq: Move rel... |
406 407 408 409 |
static inline void invoke_softirq(void) { if (ksoftirqd_running(local_softirq_pending())) return; |
91cc470e7 genirq: Change fo... |
410 |
if (!force_irqthreads() || !__this_cpu_read(ksoftirqd)) { |
ae9ef5899 softirq: Move rel... |
411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 |
#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK /* * We can safely execute softirq on the current stack if * it is the irq stack, because it should be near empty * at this stage. */ __do_softirq(); #else /* * Otherwise, irq_exit() is called on the task stack that can * be potentially deep already. So call softirq in its own stack * to prevent from any overrun. */ do_softirq_own_stack(); #endif } else { wakeup_softirqd(); } } asmlinkage __visible void do_softirq(void) { __u32 pending; unsigned long flags; if (in_interrupt()) return; local_irq_save(flags); pending = local_softirq_pending(); if (pending && !ksoftirqd_running(pending)) do_softirq_own_stack(); local_irq_restore(flags); } |
8b1c04aca softirq: Make sof... |
448 |
#endif /* !CONFIG_PREEMPT_RT */ |
de30a2b35 [PATCH] lockdep: ... |
449 |
/* |
34376a50f Fix lockup relate... |
450 451 452 453 454 455 |
* We restart softirq processing for at most MAX_SOFTIRQ_RESTART times, * but break the loop if need_resched() is set or after 2 ms. * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in * certain cases, such as stop_machine(), jiffies may cease to * increment and so we need the MAX_SOFTIRQ_RESTART limit as * well to make sure we eventually return from this method. |
1da177e4c Linux-2.6.12-rc2 |
456 |
* |
c10d73671 softirq: reduce l... |
457 |
* These limits have been established via experimentation. |
1da177e4c Linux-2.6.12-rc2 |
458 459 460 461 |
* The two things to balance is latency against fairness - * we want to handle softirqs as soon as possible, but they * should not be able to lock up the box. */ |
c10d73671 softirq: reduce l... |
462 |
#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2) |
34376a50f Fix lockup relate... |
463 |
#define MAX_SOFTIRQ_RESTART 10 |
1da177e4c Linux-2.6.12-rc2 |
464 |
|
f1a83e652 lockdep: Correctl... |
465 466 |
#ifdef CONFIG_TRACE_IRQFLAGS /* |
f1a83e652 lockdep: Correctl... |
467 468 469 470 |
* When we run softirqs from irq_exit() and thus on the hardirq stack we need * to keep the lockdep irq context tracking as tight as possible in order to * not miss-qualify lock contexts and miss possible deadlocks. */ |
f1a83e652 lockdep: Correctl... |
471 |
|
5c4853b60 lockdep: Simplify... |
472 |
static inline bool lockdep_softirq_start(void) |
f1a83e652 lockdep: Correctl... |
473 |
{ |
5c4853b60 lockdep: Simplify... |
474 |
bool in_hardirq = false; |
f1a83e652 lockdep: Correctl... |
475 |
|
f9ad4a5f3 lockdep: Remove l... |
476 |
if (lockdep_hardirq_context()) { |
5c4853b60 lockdep: Simplify... |
477 |
in_hardirq = true; |
2502ec37a lockdep: Rename t... |
478 |
lockdep_hardirq_exit(); |
5c4853b60 lockdep: Simplify... |
479 |
} |
f1a83e652 lockdep: Correctl... |
480 |
lockdep_softirq_enter(); |
5c4853b60 lockdep: Simplify... |
481 482 |
return in_hardirq; |
f1a83e652 lockdep: Correctl... |
483 |
} |
5c4853b60 lockdep: Simplify... |
484 |
static inline void lockdep_softirq_end(bool in_hardirq) |
f1a83e652 lockdep: Correctl... |
485 486 |
{ lockdep_softirq_exit(); |
5c4853b60 lockdep: Simplify... |
487 488 |
if (in_hardirq) |
2502ec37a lockdep: Rename t... |
489 |
lockdep_hardirq_enter(); |
f1a83e652 lockdep: Correctl... |
490 |
} |
f1a83e652 lockdep: Correctl... |
491 |
#else |
5c4853b60 lockdep: Simplify... |
492 493 |
static inline bool lockdep_softirq_start(void) { return false; } static inline void lockdep_softirq_end(bool in_hardirq) { } |
f1a83e652 lockdep: Correctl... |
494 |
#endif |
be7635e72 arch, ftrace: for... |
495 |
asmlinkage __visible void __softirq_entry __do_softirq(void) |
1da177e4c Linux-2.6.12-rc2 |
496 |
{ |
c10d73671 softirq: reduce l... |
497 |
unsigned long end = jiffies + MAX_SOFTIRQ_TIME; |
907aed48f mm: allow PF_MEMA... |
498 |
unsigned long old_flags = current->flags; |
34376a50f Fix lockup relate... |
499 |
int max_restart = MAX_SOFTIRQ_RESTART; |
f1a83e652 lockdep: Correctl... |
500 |
struct softirq_action *h; |
5c4853b60 lockdep: Simplify... |
501 |
bool in_hardirq; |
f1a83e652 lockdep: Correctl... |
502 |
__u32 pending; |
2e702b9f6 softirq: use ffs(... |
503 |
int softirq_bit; |
907aed48f mm: allow PF_MEMA... |
504 505 |
/* |
e45506ac0 softirq: Fix typo... |
506 507 508 |
* Mask out PF_MEMALLOC as the current task context is borrowed for the * softirq. A softirq handled, such as network RX, might set PF_MEMALLOC * again if the socket is related to swapping. |
907aed48f mm: allow PF_MEMA... |
509 510 |
*/ current->flags &= ~PF_MEMALLOC; |
1da177e4c Linux-2.6.12-rc2 |
511 512 |
pending = local_softirq_pending(); |
829035fd7 [PATCH] lockdep: ... |
513 |
|
f02fc963e softirq: Move var... |
514 |
softirq_handle_begin(); |
5c4853b60 lockdep: Simplify... |
515 |
in_hardirq = lockdep_softirq_start(); |
d3759e718 irqtime: Move irq... |
516 |
account_softirq_enter(current); |
1da177e4c Linux-2.6.12-rc2 |
517 |
|
1da177e4c Linux-2.6.12-rc2 |
518 519 |
restart: /* Reset the pending bitmask before enabling irqs */ |
3f74478b5 [PATCH] x86-64: S... |
520 |
set_softirq_pending(0); |
1da177e4c Linux-2.6.12-rc2 |
521 |
|
c70f5d661 [PATCH] revert bo... |
522 |
local_irq_enable(); |
1da177e4c Linux-2.6.12-rc2 |
523 524 |
h = softirq_vec; |
2e702b9f6 softirq: use ffs(... |
525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 |
while ((softirq_bit = ffs(pending))) { unsigned int vec_nr; int prev_count; h += softirq_bit - 1; vec_nr = h - softirq_vec; prev_count = preempt_count(); kstat_incr_softirqs_this_cpu(vec_nr); trace_softirq_entry(vec_nr); h->action(h); trace_softirq_exit(vec_nr); if (unlikely(prev_count != preempt_count())) { |
403227641 softirq: convert ... |
540 541 |
pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x? ", |
2e702b9f6 softirq: use ffs(... |
542 543 544 |
vec_nr, softirq_to_name[vec_nr], h->action, prev_count, preempt_count()); preempt_count_set(prev_count); |
1da177e4c Linux-2.6.12-rc2 |
545 546 |
} h++; |
2e702b9f6 softirq: use ffs(... |
547 548 |
pending >>= softirq_bit; } |
1da177e4c Linux-2.6.12-rc2 |
549 |
|
8b1c04aca softirq: Make sof... |
550 551 |
if (!IS_ENABLED(CONFIG_PREEMPT_RT) && __this_cpu_read(ksoftirqd) == current) |
d28139c4e rcu: Apply RCU-bh... |
552 |
rcu_softirq_qs(); |
8b1c04aca softirq: Make sof... |
553 |
|
c70f5d661 [PATCH] revert bo... |
554 |
local_irq_disable(); |
1da177e4c Linux-2.6.12-rc2 |
555 556 |
pending = local_softirq_pending(); |
c10d73671 softirq: reduce l... |
557 |
if (pending) { |
34376a50f Fix lockup relate... |
558 559 |
if (time_before(jiffies, end) && !need_resched() && --max_restart) |
c10d73671 softirq: reduce l... |
560 |
goto restart; |
1da177e4c Linux-2.6.12-rc2 |
561 |
|
1da177e4c Linux-2.6.12-rc2 |
562 |
wakeup_softirqd(); |
c10d73671 softirq: reduce l... |
563 |
} |
1da177e4c Linux-2.6.12-rc2 |
564 |
|
d3759e718 irqtime: Move irq... |
565 |
account_softirq_exit(current); |
5c4853b60 lockdep: Simplify... |
566 |
lockdep_softirq_end(in_hardirq); |
f02fc963e softirq: Move var... |
567 |
softirq_handle_end(); |
717a94b5f sched/core: Remov... |
568 |
current_restore_flags(old_flags, PF_MEMALLOC); |
1da177e4c Linux-2.6.12-rc2 |
569 |
} |
8a6bc4787 genirq: Provide i... |
570 571 |
/** * irq_enter_rcu - Enter an interrupt context with RCU watching |
dde4b2b5f [PATCH] uninline ... |
572 |
*/ |
8a6bc4787 genirq: Provide i... |
573 |
void irq_enter_rcu(void) |
dde4b2b5f [PATCH] uninline ... |
574 |
{ |
d14ce74f1 irq: Call tick_ir... |
575 576 577 |
__irq_enter_raw(); if (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET)) |
5acac1be4 tick: Rename tick... |
578 |
tick_irq_enter(); |
d14ce74f1 irq: Call tick_ir... |
579 580 |
account_hardirq_enter(current); |
dde4b2b5f [PATCH] uninline ... |
581 |
} |
8a6bc4787 genirq: Provide i... |
582 583 584 585 586 587 588 589 |
/** * irq_enter - Enter an interrupt context including RCU update */ void irq_enter(void) { rcu_irq_enter(); irq_enter_rcu(); } |
67826eae8 nohz: Disable the... |
590 591 592 593 594 595 596 |
static inline void tick_irq_exit(void) { #ifdef CONFIG_NO_HZ_COMMON int cpu = smp_processor_id(); /* Make sure that timer wheel updates are propagated */ if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) { |
0a0e0829f nohz: Fix missing... |
597 |
if (!in_irq()) |
67826eae8 nohz: Disable the... |
598 599 600 601 |
tick_nohz_irq_exit(); } #endif } |
59bc300b7 x86/entry: Clarif... |
602 |
static inline void __irq_exit_rcu(void) |
1da177e4c Linux-2.6.12-rc2 |
603 |
{ |
74eed0163 irq: Ensure irq_e... |
604 |
#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED |
4cd5d1115 irq: Don't re-ena... |
605 |
local_irq_disable(); |
74eed0163 irq: Ensure irq_e... |
606 |
#else |
f71b74bca irq/softirqs: Use... |
607 |
lockdep_assert_irqs_disabled(); |
74eed0163 irq: Ensure irq_e... |
608 |
#endif |
d3759e718 irqtime: Move irq... |
609 |
account_hardirq_exit(current); |
bdb438065 sched: Extract th... |
610 |
preempt_count_sub(HARDIRQ_OFFSET); |
1da177e4c Linux-2.6.12-rc2 |
611 612 |
if (!in_interrupt() && local_softirq_pending()) invoke_softirq(); |
79bf2bb33 [PATCH] tick-mana... |
613 |
|
67826eae8 nohz: Disable the... |
614 |
tick_irq_exit(); |
8a6bc4787 genirq: Provide i... |
615 616 617 |
} /** |
59bc300b7 x86/entry: Clarif... |
618 619 620 621 622 623 624 625 626 627 628 629 |
* irq_exit_rcu() - Exit an interrupt context without updating RCU * * Also processes softirqs if needed and possible. */ void irq_exit_rcu(void) { __irq_exit_rcu(); /* must be last! */ lockdep_hardirq_exit(); } /** |
8a6bc4787 genirq: Provide i... |
630 631 632 633 634 635 |
* irq_exit - Exit an interrupt context, update RCU and lockdep * * Also processes softirqs if needed and possible. */ void irq_exit(void) { |
59bc300b7 x86/entry: Clarif... |
636 |
__irq_exit_rcu(); |
416eb33cd rcu: Fix early ca... |
637 |
rcu_irq_exit(); |
2502ec37a lockdep: Rename t... |
638 639 |
/* must be last! */ lockdep_hardirq_exit(); |
1da177e4c Linux-2.6.12-rc2 |
640 641 642 643 644 |
} /* * This function must run with irqs disabled! */ |
7ad5b3a50 kernel: remove fa... |
645 |
inline void raise_softirq_irqoff(unsigned int nr) |
1da177e4c Linux-2.6.12-rc2 |
646 647 648 649 650 651 652 653 654 655 656 657 |
{ __raise_softirq_irqoff(nr); /* * If we're in an interrupt or softirq, we're done * (this also catches softirq-disabled code). We will * actually run the softirq once we return from * the irq or softirq. * * Otherwise we wake up ksoftirqd to make sure we * schedule the softirq soon. */ |
f02fc963e softirq: Move var... |
658 |
if (!in_interrupt() && should_wake_ksoftirqd()) |
1da177e4c Linux-2.6.12-rc2 |
659 660 |
wakeup_softirqd(); } |
7ad5b3a50 kernel: remove fa... |
661 |
void raise_softirq(unsigned int nr) |
1da177e4c Linux-2.6.12-rc2 |
662 663 664 665 666 667 668 |
{ unsigned long flags; local_irq_save(flags); raise_softirq_irqoff(nr); local_irq_restore(flags); } |
f069686e4 tracing/softirq: ... |
669 670 |
void __raise_softirq_irqoff(unsigned int nr) { |
cdabce2e3 softirq: Add debu... |
671 |
lockdep_assert_irqs_disabled(); |
f069686e4 tracing/softirq: ... |
672 673 674 |
trace_softirq_raise(nr); or_softirq_pending(1UL << nr); } |
962cf36c5 Remove argument f... |
675 |
void open_softirq(int nr, void (*action)(struct softirq_action *)) |
1da177e4c Linux-2.6.12-rc2 |
676 |
{ |
1da177e4c Linux-2.6.12-rc2 |
677 678 |
softirq_vec[nr].action = action; } |
9ba5f005c softirq: introduc... |
679 680 681 |
/* * Tasklets */ |
ce85b4f2e softirq: use cons... |
682 |
struct tasklet_head { |
48f20a9a9 tasklets: execute... |
683 684 |
struct tasklet_struct *head; struct tasklet_struct **tail; |
1da177e4c Linux-2.6.12-rc2 |
685 |
}; |
4620b49f7 softirq: remove i... |
686 687 |
static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); |
1da177e4c Linux-2.6.12-rc2 |
688 |
|
6498ddad3 softirq: Consolid... |
689 690 691 |
static void __tasklet_schedule_common(struct tasklet_struct *t, struct tasklet_head __percpu *headp, unsigned int softirq_nr) |
1da177e4c Linux-2.6.12-rc2 |
692 |
{ |
6498ddad3 softirq: Consolid... |
693 |
struct tasklet_head *head; |
1da177e4c Linux-2.6.12-rc2 |
694 695 696 |
unsigned long flags; local_irq_save(flags); |
6498ddad3 softirq: Consolid... |
697 |
head = this_cpu_ptr(headp); |
48f20a9a9 tasklets: execute... |
698 |
t->next = NULL; |
6498ddad3 softirq: Consolid... |
699 700 701 |
*head->tail = t; head->tail = &(t->next); raise_softirq_irqoff(softirq_nr); |
1da177e4c Linux-2.6.12-rc2 |
702 703 |
local_irq_restore(flags); } |
6498ddad3 softirq: Consolid... |
704 705 706 707 708 709 |
void __tasklet_schedule(struct tasklet_struct *t) { __tasklet_schedule_common(t, &tasklet_vec, TASKLET_SOFTIRQ); } |
1da177e4c Linux-2.6.12-rc2 |
710 |
EXPORT_SYMBOL(__tasklet_schedule); |
7ad5b3a50 kernel: remove fa... |
711 |
void __tasklet_hi_schedule(struct tasklet_struct *t) |
1da177e4c Linux-2.6.12-rc2 |
712 |
{ |
6498ddad3 softirq: Consolid... |
713 714 |
__tasklet_schedule_common(t, &tasklet_hi_vec, HI_SOFTIRQ); |
1da177e4c Linux-2.6.12-rc2 |
715 |
} |
1da177e4c Linux-2.6.12-rc2 |
716 |
EXPORT_SYMBOL(__tasklet_hi_schedule); |
697d8c63c tasklets: Replace... |
717 |
static bool tasklet_clear_sched(struct tasklet_struct *t) |
6b2c339df softirq: s/BUG/WA... |
718 |
{ |
697d8c63c tasklets: Replace... |
719 720 |
if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) { wake_up_var(&t->state); |
6b2c339df softirq: s/BUG/WA... |
721 |
return true; |
697d8c63c tasklets: Replace... |
722 |
} |
6b2c339df softirq: s/BUG/WA... |
723 724 725 726 727 728 729 730 |
WARN_ONCE(1, "tasklet SCHED state not set: %s %pS ", t->use_callback ? "callback" : "func", t->use_callback ? (void *)t->callback : (void *)t->func); return false; } |
82b691bed softirq: Consolid... |
731 732 733 |
static void tasklet_action_common(struct softirq_action *a, struct tasklet_head *tl_head, unsigned int softirq_nr) |
1da177e4c Linux-2.6.12-rc2 |
734 735 736 737 |
{ struct tasklet_struct *list; local_irq_disable(); |
82b691bed softirq: Consolid... |
738 739 740 |
list = tl_head->head; tl_head->head = NULL; tl_head->tail = &tl_head->head; |
1da177e4c Linux-2.6.12-rc2 |
741 742 743 744 745 746 747 748 749 |
local_irq_enable(); while (list) { struct tasklet_struct *t = list; list = list->next; if (tasklet_trylock(t)) { if (!atomic_read(&t->count)) { |
697d8c63c tasklets: Replace... |
750 |
if (tasklet_clear_sched(t)) { |
6b2c339df softirq: s/BUG/WA... |
751 752 753 754 755 |
if (t->use_callback) t->callback(t); else t->func(t->data); } |
1da177e4c Linux-2.6.12-rc2 |
756 757 758 759 760 761 762 |
tasklet_unlock(t); continue; } tasklet_unlock(t); } local_irq_disable(); |
48f20a9a9 tasklets: execute... |
763 |
t->next = NULL; |
82b691bed softirq: Consolid... |
764 765 766 |
*tl_head->tail = t; tl_head->tail = &t->next; __raise_softirq_irqoff(softirq_nr); |
1da177e4c Linux-2.6.12-rc2 |
767 768 769 |
local_irq_enable(); } } |
82b691bed softirq: Consolid... |
770 |
static __latent_entropy void tasklet_action(struct softirq_action *a) |
1da177e4c Linux-2.6.12-rc2 |
771 |
{ |
82b691bed softirq: Consolid... |
772 773 |
tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ); } |
1da177e4c Linux-2.6.12-rc2 |
774 |
|
82b691bed softirq: Consolid... |
775 776 777 |
static __latent_entropy void tasklet_hi_action(struct softirq_action *a) { tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ); |
1da177e4c Linux-2.6.12-rc2 |
778 |
} |
12cc923f1 tasklet: Introduc... |
779 780 781 782 783 784 785 786 787 788 789 |
void tasklet_setup(struct tasklet_struct *t, void (*callback)(struct tasklet_struct *)) { t->next = NULL; t->state = 0; atomic_set(&t->count, 0); t->callback = callback; t->use_callback = true; t->data = 0; } EXPORT_SYMBOL(tasklet_setup); |
1da177e4c Linux-2.6.12-rc2 |
790 791 792 793 794 795 796 |
void tasklet_init(struct tasklet_struct *t, void (*func)(unsigned long), unsigned long data) { t->next = NULL; t->state = 0; atomic_set(&t->count, 0); t->func = func; |
12cc923f1 tasklet: Introduc... |
797 |
t->use_callback = false; |
1da177e4c Linux-2.6.12-rc2 |
798 799 |
t->data = data; } |
1da177e4c Linux-2.6.12-rc2 |
800 |
EXPORT_SYMBOL(tasklet_init); |
eb2dafbba tasklets: Prevent... |
801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 |
#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) /* * Do not use in new code. Waiting for tasklets from atomic contexts is * error prone and should be avoided. */ void tasklet_unlock_spin_wait(struct tasklet_struct *t) { while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { if (IS_ENABLED(CONFIG_PREEMPT_RT)) { /* * Prevent a live lock when current preempted soft * interrupt processing or prevents ksoftirqd from * running. If the tasklet runs on a different CPU * then this has no effect other than doing the BH * disable/enable dance for nothing. */ local_bh_disable(); local_bh_enable(); } else { cpu_relax(); } } } EXPORT_SYMBOL(tasklet_unlock_spin_wait); #endif |
1da177e4c Linux-2.6.12-rc2 |
826 827 828 |
void tasklet_kill(struct tasklet_struct *t) { if (in_interrupt()) |
403227641 softirq: convert ... |
829 830 |
pr_notice("Attempt to kill tasklet from interrupt "); |
1da177e4c Linux-2.6.12-rc2 |
831 |
|
697d8c63c tasklets: Replace... |
832 833 |
while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) wait_var_event(&t->state, !test_bit(TASKLET_STATE_SCHED, &t->state)); |
1da177e4c Linux-2.6.12-rc2 |
834 |
tasklet_unlock_wait(t); |
697d8c63c tasklets: Replace... |
835 |
tasklet_clear_sched(t); |
1da177e4c Linux-2.6.12-rc2 |
836 |
} |
1da177e4c Linux-2.6.12-rc2 |
837 |
EXPORT_SYMBOL(tasklet_kill); |
eb2dafbba tasklets: Prevent... |
838 |
#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) |
da0447474 tasklets: Replace... |
839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 |
void tasklet_unlock(struct tasklet_struct *t) { smp_mb__before_atomic(); clear_bit(TASKLET_STATE_RUN, &t->state); smp_mb__after_atomic(); wake_up_var(&t->state); } EXPORT_SYMBOL_GPL(tasklet_unlock); void tasklet_unlock_wait(struct tasklet_struct *t) { wait_var_event(&t->state, !test_bit(TASKLET_STATE_RUN, &t->state)); } EXPORT_SYMBOL_GPL(tasklet_unlock_wait); #endif |
1da177e4c Linux-2.6.12-rc2 |
854 855 |
void __init softirq_init(void) { |
48f20a9a9 tasklets: execute... |
856 857 858 859 860 861 862 863 |
int cpu; for_each_possible_cpu(cpu) { per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head; } |
962cf36c5 Remove argument f... |
864 865 |
open_softirq(TASKLET_SOFTIRQ, tasklet_action); open_softirq(HI_SOFTIRQ, tasklet_hi_action); |
1da177e4c Linux-2.6.12-rc2 |
866 |
} |
3e339b5da softirq: Use hotp... |
867 |
static int ksoftirqd_should_run(unsigned int cpu) |
1da177e4c Linux-2.6.12-rc2 |
868 |
{ |
3e339b5da softirq: Use hotp... |
869 870 |
return local_softirq_pending(); } |
1da177e4c Linux-2.6.12-rc2 |
871 |
|
3e339b5da softirq: Use hotp... |
872 873 |
static void run_ksoftirqd(unsigned int cpu) { |
f02fc963e softirq: Move var... |
874 |
ksoftirqd_run_begin(); |
3e339b5da softirq: Use hotp... |
875 |
if (local_softirq_pending()) { |
0bed698a3 irq: Justify the ... |
876 877 878 879 |
/* * We can safely run softirq on inline stack, as we are not deep * in the task stack here. */ |
3e339b5da softirq: Use hotp... |
880 |
__do_softirq(); |
f02fc963e softirq: Move var... |
881 |
ksoftirqd_run_end(); |
edf22f4ca softirq: Eliminat... |
882 |
cond_resched(); |
3e339b5da softirq: Use hotp... |
883 |
return; |
1da177e4c Linux-2.6.12-rc2 |
884 |
} |
f02fc963e softirq: Move var... |
885 |
ksoftirqd_run_end(); |
1da177e4c Linux-2.6.12-rc2 |
886 887 888 |
} #ifdef CONFIG_HOTPLUG_CPU |
c4544dbc7 kernel/softirq: C... |
889 |
static int takeover_tasklets(unsigned int cpu) |
1da177e4c Linux-2.6.12-rc2 |
890 |
{ |
1da177e4c Linux-2.6.12-rc2 |
891 892 893 894 |
/* CPU is dead, so no lock needed. */ local_irq_disable(); /* Find end, append list for that CPU. */ |
e5e417232 Fix cpu hotplug p... |
895 |
if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { |
909ea9646 core: Replace __g... |
896 |
*__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head; |
8afecaa68 softirq: Use __th... |
897 |
__this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail); |
e5e417232 Fix cpu hotplug p... |
898 899 900 |
per_cpu(tasklet_vec, cpu).head = NULL; per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; } |
1da177e4c Linux-2.6.12-rc2 |
901 |
raise_softirq_irqoff(TASKLET_SOFTIRQ); |
e5e417232 Fix cpu hotplug p... |
902 |
if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) { |
909ea9646 core: Replace __g... |
903 904 |
*__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head; __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail); |
e5e417232 Fix cpu hotplug p... |
905 906 907 |
per_cpu(tasklet_hi_vec, cpu).head = NULL; per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head; } |
1da177e4c Linux-2.6.12-rc2 |
908 909 910 |
raise_softirq_irqoff(HI_SOFTIRQ); local_irq_enable(); |
c4544dbc7 kernel/softirq: C... |
911 |
return 0; |
1da177e4c Linux-2.6.12-rc2 |
912 |
} |
c4544dbc7 kernel/softirq: C... |
913 914 |
#else #define takeover_tasklets NULL |
1da177e4c Linux-2.6.12-rc2 |
915 |
#endif /* CONFIG_HOTPLUG_CPU */ |
3e339b5da softirq: Use hotp... |
916 917 918 919 920 921 |
static struct smp_hotplug_thread softirq_threads = { .store = &ksoftirqd, .thread_should_run = ksoftirqd_should_run, .thread_fn = run_ksoftirqd, .thread_comm = "ksoftirqd/%u", }; |
7babe8db9 Full conversion t... |
922 |
static __init int spawn_ksoftirqd(void) |
1da177e4c Linux-2.6.12-rc2 |
923 |
{ |
c4544dbc7 kernel/softirq: C... |
924 925 |
cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL, takeover_tasklets); |
3e339b5da softirq: Use hotp... |
926 |
BUG_ON(smpboot_register_percpu_thread(&softirq_threads)); |
1da177e4c Linux-2.6.12-rc2 |
927 928 |
return 0; } |
7babe8db9 Full conversion t... |
929 |
early_initcall(spawn_ksoftirqd); |
78eef01b0 [PATCH] on_each_c... |
930 |
|
43a256322 sparseirq: move _... |
931 932 933 934 935 936 937 938 939 |
/* * [ These __weak aliases are kept in a separate compilation unit, so that * GCC does not inline them incorrectly. ] */ int __init __weak early_irq_init(void) { return 0; } |
4a046d175 x86: arch_probe_n... |
940 941 |
int __init __weak arch_probe_nr_irqs(void) { |
b683de2b3 genirq: Query arc... |
942 |
return NR_IRQS_LEGACY; |
4a046d175 x86: arch_probe_n... |
943 |
} |
43a256322 sparseirq: move _... |
944 945 946 947 |
int __init __weak arch_early_irq_init(void) { return 0; } |
62a08ae2a genirq: x86: Ensu... |
948 949 950 951 952 |
unsigned int __weak arch_dynirq_lower_bound(unsigned int from) { return from; } |