Blame view
kernel/softirq.c
18.5 KB
1da177e4c
|
1 2 3 4 5 |
/* * linux/kernel/softirq.c * * Copyright (C) 1992 Linus Torvalds * |
b10db7f0d
|
6 7 8 |
* Distribute under GPLv2. * * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903) |
1da177e4c
|
9 |
*/ |
403227641
|
10 |
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
9984de1a5
|
11 |
#include <linux/export.h> |
1da177e4c
|
12 13 14 15 16 17 18 |
#include <linux/kernel_stat.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/notifier.h> #include <linux/percpu.h> #include <linux/cpu.h> |
831441862
|
19 |
#include <linux/freezer.h> |
1da177e4c
|
20 21 |
#include <linux/kthread.h> #include <linux/rcupdate.h> |
7e49fcce1
|
22 |
#include <linux/ftrace.h> |
78eef01b0
|
23 |
#include <linux/smp.h> |
3e339b5da
|
24 |
#include <linux/smpboot.h> |
79bf2bb33
|
25 |
#include <linux/tick.h> |
d532676cc
|
26 |
#include <linux/irq.h> |
a0e39ed37
|
27 28 |
#define CREATE_TRACE_POINTS |
ad8d75fff
|
29 |
#include <trace/events/irq.h> |
1da177e4c
|
30 |
|
1da177e4c
|
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 |
/* - No shared variables, all the data are CPU local. - If a softirq needs serialization, let it serialize itself by its own spinlocks. - Even if softirq is serialized, only local cpu is marked for execution. Hence, we get something sort of weak cpu binding. Though it is still not clear, will it result in better locality or will not. Examples: - NET RX softirq. It is multithreaded and does not require any global serialization. - NET TX softirq. It kicks software netdevice queues, hence it is logically serialized per device, but this serialization is invisible to common code. - Tasklets: serialized wrt itself. */ #ifndef __ARCH_IRQ_STAT |
0f6f47bac
|
50 51 |
DEFINE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat); EXPORT_PER_CPU_SYMBOL(irq_stat); |
1da177e4c
|
52 |
#endif |
978b0116c
|
53 |
static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp; |
1da177e4c
|
54 |
|
4dd53d891
|
55 |
DEFINE_PER_CPU(struct task_struct *, ksoftirqd); |
1da177e4c
|
56 |
|
ce85b4f2e
|
57 |
const char * const softirq_to_name[NR_SOFTIRQS] = { |
f660f6066
|
58 |
"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL", |
09223371d
|
59 |
"TASKLET", "SCHED", "HRTIMER", "RCU" |
5d592b44b
|
60 |
}; |
1da177e4c
|
61 62 63 64 65 66 |
/* * we cannot loop indefinitely here to avoid userspace starvation, * but we also don't want to introduce a worst case 1/HZ latency * to the pending events, so lets the scheduler to balance * the softirq load for us. */ |
676cb02dc
|
67 |
static void wakeup_softirqd(void) |
1da177e4c
|
68 69 |
{ /* Interrupts are disabled: no need to stop preemption */ |
909ea9646
|
70 |
struct task_struct *tsk = __this_cpu_read(ksoftirqd); |
1da177e4c
|
71 72 73 74 75 76 |
if (tsk && tsk->state != TASK_RUNNING) wake_up_process(tsk); } /* |
4cd13c21b
|
77 |
* If ksoftirqd is scheduled, we do not want to process pending softirqs |
3c53776e2
|
78 79 |
* right now. Let ksoftirqd handle this at its own rate, to get fairness, * unless we're doing some of the synchronous softirqs. |
4cd13c21b
|
80 |
*/ |
3c53776e2
|
81 82 |
#define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ)) static bool ksoftirqd_running(unsigned long pending) |
4cd13c21b
|
83 84 |
{ struct task_struct *tsk = __this_cpu_read(ksoftirqd); |
3c53776e2
|
85 86 |
if (pending & SOFTIRQ_NOW_MASK) return false; |
4cd13c21b
|
87 88 89 90 |
return tsk && (tsk->state == TASK_RUNNING); } /* |
75e1056f5
|
91 92 93 94 95 96 97 98 99 100 |
* preempt_count and SOFTIRQ_OFFSET usage: * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving * softirq processing. * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET) * on local_bh_disable or local_bh_enable. * This lets us distinguish between whether we are currently processing * softirq and whether we just have bh disabled. */ /* |
de30a2b35
|
101 102 103 |
* This one is for softirq.c-internal use, * where hardirqs are disabled legitimately: */ |
3c829c367
|
104 |
#ifdef CONFIG_TRACE_IRQFLAGS |
0bd3a173d
|
105 |
void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) |
de30a2b35
|
106 107 108 109 110 111 |
{ unsigned long flags; WARN_ON_ONCE(in_irq()); raw_local_irq_save(flags); |
7e49fcce1
|
112 |
/* |
bdb438065
|
113 |
* The preempt tracer hooks into preempt_count_add and will break |
7e49fcce1
|
114 115 116 117 118 |
* lockdep because it calls back into lockdep after SOFTIRQ_OFFSET * is set and before current->softirq_enabled is cleared. * We must manually increment preempt_count here and manually * call the trace_preempt_off later. */ |
bdb438065
|
119 |
__preempt_count_add(cnt); |
de30a2b35
|
120 121 122 |
/* * Were softirqs turned off above: */ |
9ea4c3800
|
123 |
if (softirq_count() == (cnt & SOFTIRQ_MASK)) |
de30a2b35
|
124 125 |
trace_softirqs_off(ip); raw_local_irq_restore(flags); |
7e49fcce1
|
126 |
|
0f1ba9a2c
|
127 128 |
if (preempt_count() == cnt) { #ifdef CONFIG_DEBUG_PREEMPT |
f904f5826
|
129 |
current->preempt_disable_ip = get_lock_parent_ip(); |
0f1ba9a2c
|
130 |
#endif |
f904f5826
|
131 |
trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip()); |
0f1ba9a2c
|
132 |
} |
de30a2b35
|
133 |
} |
0bd3a173d
|
134 |
EXPORT_SYMBOL(__local_bh_disable_ip); |
3c829c367
|
135 |
#endif /* CONFIG_TRACE_IRQFLAGS */ |
de30a2b35
|
136 |
|
75e1056f5
|
137 138 |
static void __local_bh_enable(unsigned int cnt) { |
f71b74bca
|
139 |
lockdep_assert_irqs_disabled(); |
75e1056f5
|
140 |
|
1a63dcd87
|
141 142 |
if (preempt_count() == cnt) trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip()); |
9ea4c3800
|
143 |
if (softirq_count() == (cnt & SOFTIRQ_MASK)) |
d2e08473f
|
144 |
trace_softirqs_on(_RET_IP_); |
1a63dcd87
|
145 146 |
__preempt_count_sub(cnt); |
75e1056f5
|
147 |
} |
de30a2b35
|
148 |
/* |
c3442697c
|
149 |
* Special-case - softirqs can safely be enabled by __do_softirq(), |
de30a2b35
|
150 151 152 153 |
* without processing still-pending softirqs: */ void _local_bh_enable(void) { |
5d60d3e7c
|
154 |
WARN_ON_ONCE(in_irq()); |
75e1056f5
|
155 |
__local_bh_enable(SOFTIRQ_DISABLE_OFFSET); |
de30a2b35
|
156 |
} |
de30a2b35
|
157 |
EXPORT_SYMBOL(_local_bh_enable); |
0bd3a173d
|
158 |
void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) |
de30a2b35
|
159 |
{ |
f71b74bca
|
160 161 |
WARN_ON_ONCE(in_irq()); lockdep_assert_irqs_enabled(); |
3c829c367
|
162 |
#ifdef CONFIG_TRACE_IRQFLAGS |
0f476b6d9
|
163 |
local_irq_disable(); |
3c829c367
|
164 |
#endif |
de30a2b35
|
165 166 167 |
/* * Are softirqs going to be turned on now: */ |
75e1056f5
|
168 |
if (softirq_count() == SOFTIRQ_DISABLE_OFFSET) |
0f476b6d9
|
169 |
trace_softirqs_on(ip); |
de30a2b35
|
170 171 172 |
/* * Keep preemption disabled until we are done with * softirq processing: |
ce85b4f2e
|
173 |
*/ |
0bd3a173d
|
174 |
preempt_count_sub(cnt - 1); |
de30a2b35
|
175 |
|
0bed698a3
|
176 177 178 179 180 |
if (unlikely(!in_interrupt() && local_softirq_pending())) { /* * Run softirq if any pending. And do it in its own stack * as we may be calling this deep in a task call stack already. */ |
de30a2b35
|
181 |
do_softirq(); |
0bed698a3
|
182 |
} |
de30a2b35
|
183 |
|
bdb438065
|
184 |
preempt_count_dec(); |
3c829c367
|
185 |
#ifdef CONFIG_TRACE_IRQFLAGS |
0f476b6d9
|
186 |
local_irq_enable(); |
3c829c367
|
187 |
#endif |
de30a2b35
|
188 189 |
preempt_check_resched(); } |
0bd3a173d
|
190 |
EXPORT_SYMBOL(__local_bh_enable_ip); |
de30a2b35
|
191 192 |
/* |
34376a50f
|
193 194 195 196 197 198 |
* We restart softirq processing for at most MAX_SOFTIRQ_RESTART times, * but break the loop if need_resched() is set or after 2 ms. * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in * certain cases, such as stop_machine(), jiffies may cease to * increment and so we need the MAX_SOFTIRQ_RESTART limit as * well to make sure we eventually return from this method. |
1da177e4c
|
199 |
* |
c10d73671
|
200 |
* These limits have been established via experimentation. |
1da177e4c
|
201 202 203 204 |
* The two things to balance is latency against fairness - * we want to handle softirqs as soon as possible, but they * should not be able to lock up the box. */ |
c10d73671
|
205 |
#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2) |
34376a50f
|
206 |
#define MAX_SOFTIRQ_RESTART 10 |
1da177e4c
|
207 |
|
f1a83e652
|
208 209 |
#ifdef CONFIG_TRACE_IRQFLAGS /* |
f1a83e652
|
210 211 212 213 |
* When we run softirqs from irq_exit() and thus on the hardirq stack we need * to keep the lockdep irq context tracking as tight as possible in order to * not miss-qualify lock contexts and miss possible deadlocks. */ |
f1a83e652
|
214 |
|
5c4853b60
|
215 |
static inline bool lockdep_softirq_start(void) |
f1a83e652
|
216 |
{ |
5c4853b60
|
217 |
bool in_hardirq = false; |
f1a83e652
|
218 |
|
5c4853b60
|
219 220 |
if (trace_hardirq_context(current)) { in_hardirq = true; |
f1a83e652
|
221 |
trace_hardirq_exit(); |
5c4853b60
|
222 |
} |
f1a83e652
|
223 |
lockdep_softirq_enter(); |
5c4853b60
|
224 225 |
return in_hardirq; |
f1a83e652
|
226 |
} |
5c4853b60
|
227 |
static inline void lockdep_softirq_end(bool in_hardirq) |
f1a83e652
|
228 229 |
{ lockdep_softirq_exit(); |
5c4853b60
|
230 231 |
if (in_hardirq) |
f1a83e652
|
232 |
trace_hardirq_enter(); |
f1a83e652
|
233 |
} |
f1a83e652
|
234 |
#else |
5c4853b60
|
235 236 |
static inline bool lockdep_softirq_start(void) { return false; } static inline void lockdep_softirq_end(bool in_hardirq) { } |
f1a83e652
|
237 |
#endif |
be7635e72
|
238 |
asmlinkage __visible void __softirq_entry __do_softirq(void) |
1da177e4c
|
239 |
{ |
c10d73671
|
240 |
unsigned long end = jiffies + MAX_SOFTIRQ_TIME; |
907aed48f
|
241 |
unsigned long old_flags = current->flags; |
34376a50f
|
242 |
int max_restart = MAX_SOFTIRQ_RESTART; |
f1a83e652
|
243 |
struct softirq_action *h; |
5c4853b60
|
244 |
bool in_hardirq; |
f1a83e652
|
245 |
__u32 pending; |
2e702b9f6
|
246 |
int softirq_bit; |
907aed48f
|
247 248 249 250 251 252 253 |
/* * Mask out PF_MEMALLOC s current task context is borrowed for the * softirq. A softirq handled such as network RX might set PF_MEMALLOC * again if the socket is related to swap */ current->flags &= ~PF_MEMALLOC; |
1da177e4c
|
254 255 |
pending = local_softirq_pending(); |
6a61671bb
|
256 |
account_irq_enter_time(current); |
829035fd7
|
257 |
|
0bd3a173d
|
258 |
__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET); |
5c4853b60
|
259 |
in_hardirq = lockdep_softirq_start(); |
1da177e4c
|
260 |
|
1da177e4c
|
261 262 |
restart: /* Reset the pending bitmask before enabling irqs */ |
3f74478b5
|
263 |
set_softirq_pending(0); |
1da177e4c
|
264 |
|
c70f5d661
|
265 |
local_irq_enable(); |
1da177e4c
|
266 267 |
h = softirq_vec; |
2e702b9f6
|
268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 |
while ((softirq_bit = ffs(pending))) { unsigned int vec_nr; int prev_count; h += softirq_bit - 1; vec_nr = h - softirq_vec; prev_count = preempt_count(); kstat_incr_softirqs_this_cpu(vec_nr); trace_softirq_entry(vec_nr); h->action(h); trace_softirq_exit(vec_nr); if (unlikely(prev_count != preempt_count())) { |
403227641
|
283 284 |
pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x? ", |
2e702b9f6
|
285 286 287 |
vec_nr, softirq_to_name[vec_nr], h->action, prev_count, preempt_count()); preempt_count_set(prev_count); |
1da177e4c
|
288 289 |
} h++; |
2e702b9f6
|
290 291 |
pending >>= softirq_bit; } |
1da177e4c
|
292 |
|
284a8c93a
|
293 |
rcu_bh_qs(); |
c70f5d661
|
294 |
local_irq_disable(); |
1da177e4c
|
295 296 |
pending = local_softirq_pending(); |
c10d73671
|
297 |
if (pending) { |
34376a50f
|
298 299 |
if (time_before(jiffies, end) && !need_resched() && --max_restart) |
c10d73671
|
300 |
goto restart; |
1da177e4c
|
301 |
|
1da177e4c
|
302 |
wakeup_softirqd(); |
c10d73671
|
303 |
} |
1da177e4c
|
304 |
|
5c4853b60
|
305 |
lockdep_softirq_end(in_hardirq); |
6a61671bb
|
306 |
account_irq_exit_time(current); |
75e1056f5
|
307 |
__local_bh_enable(SOFTIRQ_OFFSET); |
5d60d3e7c
|
308 |
WARN_ON_ONCE(in_interrupt()); |
717a94b5f
|
309 |
current_restore_flags(old_flags, PF_MEMALLOC); |
1da177e4c
|
310 |
} |
722a9f929
|
311 |
asmlinkage __visible void do_softirq(void) |
1da177e4c
|
312 313 314 315 316 317 318 319 320 321 |
{ __u32 pending; unsigned long flags; if (in_interrupt()) return; local_irq_save(flags); pending = local_softirq_pending(); |
3c53776e2
|
322 |
if (pending && !ksoftirqd_running(pending)) |
7d65f4a65
|
323 |
do_softirq_own_stack(); |
1da177e4c
|
324 325 326 |
local_irq_restore(flags); } |
dde4b2b5f
|
327 328 329 330 331 |
/* * Enter an interrupt context. */ void irq_enter(void) { |
64db4cfff
|
332 |
rcu_irq_enter(); |
0a8a2e78b
|
333 |
if (is_idle_task(current) && !in_interrupt()) { |
d267f87fb
|
334 335 336 337 338 |
/* * Prevent raise_softirq from needlessly waking up ksoftirqd * here, as softirq will be serviced on return from interrupt. */ local_bh_disable(); |
5acac1be4
|
339 |
tick_irq_enter(); |
d267f87fb
|
340 341 342 343 |
_local_bh_enable(); } __irq_enter(); |
dde4b2b5f
|
344 |
} |
8d32a307e
|
345 346 |
static inline void invoke_softirq(void) { |
3c53776e2
|
347 |
if (ksoftirqd_running(local_softirq_pending())) |
4cd13c21b
|
348 |
return; |
ded797547
|
349 |
if (!force_irqthreads) { |
cc1f02745
|
350 |
#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK |
ded797547
|
351 352 353 |
/* * We can safely execute softirq on the current stack if * it is the irq stack, because it should be near empty |
cc1f02745
|
354 355 356 357 358 359 360 361 |
* at this stage. */ __do_softirq(); #else /* * Otherwise, irq_exit() is called on the task stack that can * be potentially deep already. So call softirq in its own stack * to prevent from any overrun. |
ded797547
|
362 |
*/ |
be6e10164
|
363 |
do_softirq_own_stack(); |
cc1f02745
|
364 |
#endif |
ded797547
|
365 |
} else { |
8d32a307e
|
366 |
wakeup_softirqd(); |
ded797547
|
367 |
} |
8d32a307e
|
368 |
} |
1da177e4c
|
369 |
|
67826eae8
|
370 371 372 373 374 375 376 |
static inline void tick_irq_exit(void) { #ifdef CONFIG_NO_HZ_COMMON int cpu = smp_processor_id(); /* Make sure that timer wheel updates are propagated */ if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) { |
0a0e0829f
|
377 |
if (!in_irq()) |
67826eae8
|
378 379 380 381 |
tick_nohz_irq_exit(); } #endif } |
1da177e4c
|
382 383 384 385 386 |
/* * Exit an interrupt context. Process softirqs if needed and possible: */ void irq_exit(void) { |
74eed0163
|
387 |
#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED |
4cd5d1115
|
388 |
local_irq_disable(); |
74eed0163
|
389 |
#else |
f71b74bca
|
390 |
lockdep_assert_irqs_disabled(); |
74eed0163
|
391 |
#endif |
6a61671bb
|
392 |
account_irq_exit_time(current); |
bdb438065
|
393 |
preempt_count_sub(HARDIRQ_OFFSET); |
1da177e4c
|
394 395 |
if (!in_interrupt() && local_softirq_pending()) invoke_softirq(); |
79bf2bb33
|
396 |
|
67826eae8
|
397 |
tick_irq_exit(); |
416eb33cd
|
398 |
rcu_irq_exit(); |
f1a83e652
|
399 |
trace_hardirq_exit(); /* must be last! */ |
1da177e4c
|
400 401 402 403 404 |
} /* * This function must run with irqs disabled! */ |
7ad5b3a50
|
405 |
inline void raise_softirq_irqoff(unsigned int nr) |
1da177e4c
|
406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 |
{ __raise_softirq_irqoff(nr); /* * If we're in an interrupt or softirq, we're done * (this also catches softirq-disabled code). We will * actually run the softirq once we return from * the irq or softirq. * * Otherwise we wake up ksoftirqd to make sure we * schedule the softirq soon. */ if (!in_interrupt()) wakeup_softirqd(); } |
7ad5b3a50
|
421 |
void raise_softirq(unsigned int nr) |
1da177e4c
|
422 423 424 425 426 427 428 |
{ unsigned long flags; local_irq_save(flags); raise_softirq_irqoff(nr); local_irq_restore(flags); } |
f069686e4
|
429 430 431 432 433 |
void __raise_softirq_irqoff(unsigned int nr) { trace_softirq_raise(nr); or_softirq_pending(1UL << nr); } |
962cf36c5
|
434 |
void open_softirq(int nr, void (*action)(struct softirq_action *)) |
1da177e4c
|
435 |
{ |
1da177e4c
|
436 437 |
softirq_vec[nr].action = action; } |
9ba5f005c
|
438 439 440 |
/* * Tasklets */ |
ce85b4f2e
|
441 |
struct tasklet_head { |
48f20a9a9
|
442 443 |
struct tasklet_struct *head; struct tasklet_struct **tail; |
1da177e4c
|
444 |
}; |
4620b49f7
|
445 446 |
static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); |
1da177e4c
|
447 |
|
6498ddad3
|
448 449 450 |
static void __tasklet_schedule_common(struct tasklet_struct *t, struct tasklet_head __percpu *headp, unsigned int softirq_nr) |
1da177e4c
|
451 |
{ |
6498ddad3
|
452 |
struct tasklet_head *head; |
1da177e4c
|
453 454 455 |
unsigned long flags; local_irq_save(flags); |
6498ddad3
|
456 |
head = this_cpu_ptr(headp); |
48f20a9a9
|
457 |
t->next = NULL; |
6498ddad3
|
458 459 460 |
*head->tail = t; head->tail = &(t->next); raise_softirq_irqoff(softirq_nr); |
1da177e4c
|
461 462 |
local_irq_restore(flags); } |
6498ddad3
|
463 464 465 466 467 468 |
void __tasklet_schedule(struct tasklet_struct *t) { __tasklet_schedule_common(t, &tasklet_vec, TASKLET_SOFTIRQ); } |
1da177e4c
|
469 |
EXPORT_SYMBOL(__tasklet_schedule); |
7ad5b3a50
|
470 |
void __tasklet_hi_schedule(struct tasklet_struct *t) |
1da177e4c
|
471 |
{ |
6498ddad3
|
472 473 |
__tasklet_schedule_common(t, &tasklet_hi_vec, HI_SOFTIRQ); |
1da177e4c
|
474 |
} |
1da177e4c
|
475 |
EXPORT_SYMBOL(__tasklet_hi_schedule); |
82b691bed
|
476 477 478 |
static void tasklet_action_common(struct softirq_action *a, struct tasklet_head *tl_head, unsigned int softirq_nr) |
1da177e4c
|
479 480 481 482 |
{ struct tasklet_struct *list; local_irq_disable(); |
82b691bed
|
483 484 485 |
list = tl_head->head; tl_head->head = NULL; tl_head->tail = &tl_head->head; |
1da177e4c
|
486 487 488 489 490 491 492 493 494 |
local_irq_enable(); while (list) { struct tasklet_struct *t = list; list = list->next; if (tasklet_trylock(t)) { if (!atomic_read(&t->count)) { |
ce85b4f2e
|
495 496 |
if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) |
1da177e4c
|
497 498 499 500 501 502 503 504 505 |
BUG(); t->func(t->data); tasklet_unlock(t); continue; } tasklet_unlock(t); } local_irq_disable(); |
48f20a9a9
|
506 |
t->next = NULL; |
82b691bed
|
507 508 509 |
*tl_head->tail = t; tl_head->tail = &t->next; __raise_softirq_irqoff(softirq_nr); |
1da177e4c
|
510 511 512 |
local_irq_enable(); } } |
82b691bed
|
513 |
static __latent_entropy void tasklet_action(struct softirq_action *a) |
1da177e4c
|
514 |
{ |
82b691bed
|
515 516 |
tasklet_action_common(a, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ); } |
1da177e4c
|
517 |
|
82b691bed
|
518 519 520 |
static __latent_entropy void tasklet_hi_action(struct softirq_action *a) { tasklet_action_common(a, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ); |
1da177e4c
|
521 |
} |
1da177e4c
|
522 523 524 525 526 527 528 529 530 |
void tasklet_init(struct tasklet_struct *t, void (*func)(unsigned long), unsigned long data) { t->next = NULL; t->state = 0; atomic_set(&t->count, 0); t->func = func; t->data = data; } |
1da177e4c
|
531 532 533 534 535 |
EXPORT_SYMBOL(tasklet_init); void tasklet_kill(struct tasklet_struct *t) { if (in_interrupt()) |
403227641
|
536 537 |
pr_notice("Attempt to kill tasklet from interrupt "); |
1da177e4c
|
538 539 |
while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { |
79d381c9f
|
540 |
do { |
1da177e4c
|
541 |
yield(); |
79d381c9f
|
542 |
} while (test_bit(TASKLET_STATE_SCHED, &t->state)); |
1da177e4c
|
543 544 545 546 |
} tasklet_unlock_wait(t); clear_bit(TASKLET_STATE_SCHED, &t->state); } |
1da177e4c
|
547 |
EXPORT_SYMBOL(tasklet_kill); |
9ba5f005c
|
548 549 550 551 552 |
/* * tasklet_hrtimer */ /* |
b9c303227
|
553 554 555 |
* The trampoline is called when the hrtimer expires. It schedules a tasklet * to run __tasklet_hrtimer_trampoline() which in turn will call the intended * hrtimer callback, but from softirq context. |
9ba5f005c
|
556 557 558 559 560 |
*/ static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer) { struct tasklet_hrtimer *ttimer = container_of(timer, struct tasklet_hrtimer, timer); |
b9c303227
|
561 562 |
tasklet_hi_schedule(&ttimer->tasklet); return HRTIMER_NORESTART; |
9ba5f005c
|
563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 |
} /* * Helper function which calls the hrtimer callback from * tasklet/softirq context */ static void __tasklet_hrtimer_trampoline(unsigned long data) { struct tasklet_hrtimer *ttimer = (void *)data; enum hrtimer_restart restart; restart = ttimer->function(&ttimer->timer); if (restart != HRTIMER_NORESTART) hrtimer_restart(&ttimer->timer); } /** * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks * @ttimer: tasklet_hrtimer which is initialized |
25985edce
|
582 |
* @function: hrtimer callback function which gets called from softirq context |
9ba5f005c
|
583 584 585 586 587 588 589 590 591 592 593 594 595 596 |
* @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME) * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL) */ void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer, enum hrtimer_restart (*function)(struct hrtimer *), clockid_t which_clock, enum hrtimer_mode mode) { hrtimer_init(&ttimer->timer, which_clock, mode); ttimer->timer.function = __hrtimer_tasklet_trampoline; tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline, (unsigned long)ttimer); ttimer->function = function; } EXPORT_SYMBOL_GPL(tasklet_hrtimer_init); |
1da177e4c
|
597 598 |
void __init softirq_init(void) { |
48f20a9a9
|
599 600 601 602 603 604 605 606 |
int cpu; for_each_possible_cpu(cpu) { per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head; } |
962cf36c5
|
607 608 |
open_softirq(TASKLET_SOFTIRQ, tasklet_action); open_softirq(HI_SOFTIRQ, tasklet_hi_action); |
1da177e4c
|
609 |
} |
3e339b5da
|
610 |
static int ksoftirqd_should_run(unsigned int cpu) |
1da177e4c
|
611 |
{ |
3e339b5da
|
612 613 |
return local_softirq_pending(); } |
1da177e4c
|
614 |
|
3e339b5da
|
615 616 617 618 |
static void run_ksoftirqd(unsigned int cpu) { local_irq_disable(); if (local_softirq_pending()) { |
0bed698a3
|
619 620 621 622 |
/* * We can safely run softirq on inline stack, as we are not deep * in the task stack here. */ |
3e339b5da
|
623 |
__do_softirq(); |
3e339b5da
|
624 |
local_irq_enable(); |
edf22f4ca
|
625 |
cond_resched(); |
3e339b5da
|
626 |
return; |
1da177e4c
|
627 |
} |
3e339b5da
|
628 |
local_irq_enable(); |
1da177e4c
|
629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 |
} #ifdef CONFIG_HOTPLUG_CPU /* * tasklet_kill_immediate is called to remove a tasklet which can already be * scheduled for execution on @cpu. * * Unlike tasklet_kill, this function removes the tasklet * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state. * * When this function is called, @cpu must be in the CPU_DEAD state. */ void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu) { struct tasklet_struct **i; BUG_ON(cpu_online(cpu)); BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state)); if (!test_bit(TASKLET_STATE_SCHED, &t->state)) return; /* CPU is dead, so no lock needed. */ |
48f20a9a9
|
652 |
for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) { |
1da177e4c
|
653 654 |
if (*i == t) { *i = t->next; |
48f20a9a9
|
655 656 657 |
/* If this was the tail element, move the tail ptr */ if (*i == NULL) per_cpu(tasklet_vec, cpu).tail = i; |
1da177e4c
|
658 659 660 661 662 |
return; } } BUG(); } |
c4544dbc7
|
663 |
static int takeover_tasklets(unsigned int cpu) |
1da177e4c
|
664 |
{ |
1da177e4c
|
665 666 667 668 |
/* CPU is dead, so no lock needed. */ local_irq_disable(); /* Find end, append list for that CPU. */ |
e5e417232
|
669 |
if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { |
909ea9646
|
670 671 |
*__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head; this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail); |
e5e417232
|
672 673 674 |
per_cpu(tasklet_vec, cpu).head = NULL; per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; } |
1da177e4c
|
675 |
raise_softirq_irqoff(TASKLET_SOFTIRQ); |
e5e417232
|
676 |
if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) { |
909ea9646
|
677 678 |
*__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head; __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail); |
e5e417232
|
679 680 681 |
per_cpu(tasklet_hi_vec, cpu).head = NULL; per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head; } |
1da177e4c
|
682 683 684 |
raise_softirq_irqoff(HI_SOFTIRQ); local_irq_enable(); |
c4544dbc7
|
685 |
return 0; |
1da177e4c
|
686 |
} |
c4544dbc7
|
687 688 |
#else #define takeover_tasklets NULL |
1da177e4c
|
689 |
#endif /* CONFIG_HOTPLUG_CPU */ |
3e339b5da
|
690 691 692 693 694 695 |
static struct smp_hotplug_thread softirq_threads = { .store = &ksoftirqd, .thread_should_run = ksoftirqd_should_run, .thread_fn = run_ksoftirqd, .thread_comm = "ksoftirqd/%u", }; |
7babe8db9
|
696 |
static __init int spawn_ksoftirqd(void) |
1da177e4c
|
697 |
{ |
c4544dbc7
|
698 699 |
cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL, takeover_tasklets); |
3e339b5da
|
700 |
BUG_ON(smpboot_register_percpu_thread(&softirq_threads)); |
1da177e4c
|
701 702 |
return 0; } |
7babe8db9
|
703 |
early_initcall(spawn_ksoftirqd); |
78eef01b0
|
704 |
|
43a256322
|
705 706 707 708 709 710 711 712 713 |
/* * [ These __weak aliases are kept in a separate compilation unit, so that * GCC does not inline them incorrectly. ] */ int __init __weak early_irq_init(void) { return 0; } |
4a046d175
|
714 715 |
int __init __weak arch_probe_nr_irqs(void) { |
b683de2b3
|
716 |
return NR_IRQS_LEGACY; |
4a046d175
|
717 |
} |
43a256322
|
718 719 720 721 |
int __init __weak arch_early_irq_init(void) { return 0; } |
62a08ae2a
|
722 723 724 725 726 |
unsigned int __weak arch_dynirq_lower_bound(unsigned int from) { return from; } |