Blame view
kernel/softirq.c
18.8 KB
1da177e4c
|
1 2 3 4 5 |
/* * linux/kernel/softirq.c * * Copyright (C) 1992 Linus Torvalds * |
b10db7f0d
|
6 7 8 |
* Distribute under GPLv2. * * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903) |
1da177e4c
|
9 |
*/ |
403227641
|
10 |
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
9984de1a5
|
11 |
#include <linux/export.h> |
1da177e4c
|
12 13 14 15 16 17 18 |
#include <linux/kernel_stat.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/notifier.h> #include <linux/percpu.h> #include <linux/cpu.h> |
831441862
|
19 |
#include <linux/freezer.h> |
1da177e4c
|
20 21 |
#include <linux/kthread.h> #include <linux/rcupdate.h> |
7e49fcce1
|
22 |
#include <linux/ftrace.h> |
78eef01b0
|
23 |
#include <linux/smp.h> |
3e339b5da
|
24 |
#include <linux/smpboot.h> |
79bf2bb33
|
25 |
#include <linux/tick.h> |
d532676cc
|
26 |
#include <linux/irq.h> |
a0e39ed37
|
27 28 |
#define CREATE_TRACE_POINTS |
ad8d75fff
|
29 |
#include <trace/events/irq.h> |
1da177e4c
|
30 |
|
1da177e4c
|
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
/* - No shared variables, all the data are CPU local. - If a softirq needs serialization, let it serialize itself by its own spinlocks. - Even if softirq is serialized, only local cpu is marked for execution. Hence, we get something sort of weak cpu binding. Though it is still not clear, will it result in better locality or will not. Examples: - NET RX softirq. It is multithreaded and does not require any global serialization. - NET TX softirq. It kicks software netdevice queues, hence it is logically serialized per device, but this serialization is invisible to common code. - Tasklets: serialized wrt itself. */ #ifndef __ARCH_IRQ_STAT irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned; EXPORT_SYMBOL(irq_stat); #endif |
978b0116c
|
53 |
static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp; |
1da177e4c
|
54 |
|
4dd53d891
|
55 |
DEFINE_PER_CPU(struct task_struct *, ksoftirqd); |
1da177e4c
|
56 |
|
ce85b4f2e
|
57 |
const char * const softirq_to_name[NR_SOFTIRQS] = { |
5dd4de587
|
58 |
"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL", |
09223371d
|
59 |
"TASKLET", "SCHED", "HRTIMER", "RCU" |
5d592b44b
|
60 |
}; |
1da177e4c
|
61 62 63 64 65 66 |
/* * we cannot loop indefinitely here to avoid userspace starvation, * but we also don't want to introduce a worst case 1/HZ latency * to the pending events, so lets the scheduler to balance * the softirq load for us. */ |
676cb02dc
|
67 |
static void wakeup_softirqd(void) |
1da177e4c
|
68 69 |
{ /* Interrupts are disabled: no need to stop preemption */ |
909ea9646
|
70 |
struct task_struct *tsk = __this_cpu_read(ksoftirqd); |
1da177e4c
|
71 72 73 74 75 76 |
if (tsk && tsk->state != TASK_RUNNING) wake_up_process(tsk); } /* |
75e1056f5
|
77 78 79 80 81 82 83 84 85 86 |
* preempt_count and SOFTIRQ_OFFSET usage: * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving * softirq processing. * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET) * on local_bh_disable or local_bh_enable. * This lets us distinguish between whether we are currently processing * softirq and whether we just have bh disabled. */ /* |
de30a2b35
|
87 88 89 |
* This one is for softirq.c-internal use, * where hardirqs are disabled legitimately: */ |
3c829c367
|
90 |
#ifdef CONFIG_TRACE_IRQFLAGS |
0bd3a173d
|
91 |
void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) |
de30a2b35
|
92 93 94 95 96 97 |
{ unsigned long flags; WARN_ON_ONCE(in_irq()); raw_local_irq_save(flags); |
7e49fcce1
|
98 |
/* |
bdb438065
|
99 |
* The preempt tracer hooks into preempt_count_add and will break |
7e49fcce1
|
100 101 102 103 104 |
* lockdep because it calls back into lockdep after SOFTIRQ_OFFSET * is set and before current->softirq_enabled is cleared. * We must manually increment preempt_count here and manually * call the trace_preempt_off later. */ |
bdb438065
|
105 |
__preempt_count_add(cnt); |
de30a2b35
|
106 107 108 |
/* * Were softirqs turned off above: */ |
9ea4c3800
|
109 |
if (softirq_count() == (cnt & SOFTIRQ_MASK)) |
de30a2b35
|
110 111 |
trace_softirqs_off(ip); raw_local_irq_restore(flags); |
7e49fcce1
|
112 |
|
75e1056f5
|
113 |
if (preempt_count() == cnt) |
7e49fcce1
|
114 |
trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); |
de30a2b35
|
115 |
} |
0bd3a173d
|
116 |
EXPORT_SYMBOL(__local_bh_disable_ip); |
3c829c367
|
117 |
#endif /* CONFIG_TRACE_IRQFLAGS */ |
de30a2b35
|
118 |
|
75e1056f5
|
119 120 |
static void __local_bh_enable(unsigned int cnt) { |
75e1056f5
|
121 |
WARN_ON_ONCE(!irqs_disabled()); |
9ea4c3800
|
122 |
if (softirq_count() == (cnt & SOFTIRQ_MASK)) |
d2e08473f
|
123 |
trace_softirqs_on(_RET_IP_); |
bdb438065
|
124 |
preempt_count_sub(cnt); |
75e1056f5
|
125 |
} |
de30a2b35
|
126 127 128 129 130 131 132 |
/* * Special-case - softirqs can safely be enabled in * cond_resched_softirq(), or by __do_softirq(), * without processing still-pending softirqs: */ void _local_bh_enable(void) { |
5d60d3e7c
|
133 |
WARN_ON_ONCE(in_irq()); |
75e1056f5
|
134 |
__local_bh_enable(SOFTIRQ_DISABLE_OFFSET); |
de30a2b35
|
135 |
} |
de30a2b35
|
136 |
EXPORT_SYMBOL(_local_bh_enable); |
0bd3a173d
|
137 |
void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) |
de30a2b35
|
138 |
{ |
0f476b6d9
|
139 |
WARN_ON_ONCE(in_irq() || irqs_disabled()); |
3c829c367
|
140 |
#ifdef CONFIG_TRACE_IRQFLAGS |
0f476b6d9
|
141 |
local_irq_disable(); |
3c829c367
|
142 |
#endif |
de30a2b35
|
143 144 145 |
/* * Are softirqs going to be turned on now: */ |
75e1056f5
|
146 |
if (softirq_count() == SOFTIRQ_DISABLE_OFFSET) |
0f476b6d9
|
147 |
trace_softirqs_on(ip); |
de30a2b35
|
148 149 150 |
/* * Keep preemption disabled until we are done with * softirq processing: |
ce85b4f2e
|
151 |
*/ |
0bd3a173d
|
152 |
preempt_count_sub(cnt - 1); |
de30a2b35
|
153 |
|
0bed698a3
|
154 155 156 157 158 |
if (unlikely(!in_interrupt() && local_softirq_pending())) { /* * Run softirq if any pending. And do it in its own stack * as we may be calling this deep in a task call stack already. */ |
de30a2b35
|
159 |
do_softirq(); |
0bed698a3
|
160 |
} |
de30a2b35
|
161 |
|
bdb438065
|
162 |
preempt_count_dec(); |
3c829c367
|
163 |
#ifdef CONFIG_TRACE_IRQFLAGS |
0f476b6d9
|
164 |
local_irq_enable(); |
3c829c367
|
165 |
#endif |
de30a2b35
|
166 167 |
preempt_check_resched(); } |
0bd3a173d
|
168 |
EXPORT_SYMBOL(__local_bh_enable_ip); |
de30a2b35
|
169 170 |
/* |
34376a50f
|
171 172 173 174 175 176 |
* We restart softirq processing for at most MAX_SOFTIRQ_RESTART times, * but break the loop if need_resched() is set or after 2 ms. * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in * certain cases, such as stop_machine(), jiffies may cease to * increment and so we need the MAX_SOFTIRQ_RESTART limit as * well to make sure we eventually return from this method. |
1da177e4c
|
177 |
* |
c10d73671
|
178 |
* These limits have been established via experimentation. |
1da177e4c
|
179 180 181 182 |
* The two things to balance is latency against fairness - * we want to handle softirqs as soon as possible, but they * should not be able to lock up the box. */ |
c10d73671
|
183 |
#define MAX_SOFTIRQ_TIME msecs_to_jiffies(2) |
34376a50f
|
184 |
#define MAX_SOFTIRQ_RESTART 10 |
1da177e4c
|
185 |
|
f1a83e652
|
186 187 |
#ifdef CONFIG_TRACE_IRQFLAGS /* |
f1a83e652
|
188 189 190 191 |
* When we run softirqs from irq_exit() and thus on the hardirq stack we need * to keep the lockdep irq context tracking as tight as possible in order to * not miss-qualify lock contexts and miss possible deadlocks. */ |
f1a83e652
|
192 |
|
5c4853b60
|
193 |
static inline bool lockdep_softirq_start(void) |
f1a83e652
|
194 |
{ |
5c4853b60
|
195 |
bool in_hardirq = false; |
f1a83e652
|
196 |
|
5c4853b60
|
197 198 |
if (trace_hardirq_context(current)) { in_hardirq = true; |
f1a83e652
|
199 |
trace_hardirq_exit(); |
5c4853b60
|
200 |
} |
f1a83e652
|
201 |
lockdep_softirq_enter(); |
5c4853b60
|
202 203 |
return in_hardirq; |
f1a83e652
|
204 |
} |
5c4853b60
|
205 |
static inline void lockdep_softirq_end(bool in_hardirq) |
f1a83e652
|
206 207 |
{ lockdep_softirq_exit(); |
5c4853b60
|
208 209 |
if (in_hardirq) |
f1a83e652
|
210 |
trace_hardirq_enter(); |
f1a83e652
|
211 |
} |
f1a83e652
|
212 |
#else |
5c4853b60
|
213 214 |
static inline bool lockdep_softirq_start(void) { return false; } static inline void lockdep_softirq_end(bool in_hardirq) { } |
f1a83e652
|
215 |
#endif |
722a9f929
|
216 |
asmlinkage __visible void __do_softirq(void) |
1da177e4c
|
217 |
{ |
c10d73671
|
218 |
unsigned long end = jiffies + MAX_SOFTIRQ_TIME; |
907aed48f
|
219 |
unsigned long old_flags = current->flags; |
34376a50f
|
220 |
int max_restart = MAX_SOFTIRQ_RESTART; |
f1a83e652
|
221 |
struct softirq_action *h; |
5c4853b60
|
222 |
bool in_hardirq; |
f1a83e652
|
223 |
__u32 pending; |
2e702b9f6
|
224 |
int softirq_bit; |
907aed48f
|
225 226 227 228 229 230 231 |
/* * Mask out PF_MEMALLOC s current task context is borrowed for the * softirq. A softirq handled such as network RX might set PF_MEMALLOC * again if the socket is related to swap */ current->flags &= ~PF_MEMALLOC; |
1da177e4c
|
232 233 |
pending = local_softirq_pending(); |
6a61671bb
|
234 |
account_irq_enter_time(current); |
829035fd7
|
235 |
|
0bd3a173d
|
236 |
__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET); |
5c4853b60
|
237 |
in_hardirq = lockdep_softirq_start(); |
1da177e4c
|
238 |
|
1da177e4c
|
239 240 |
restart: /* Reset the pending bitmask before enabling irqs */ |
3f74478b5
|
241 |
set_softirq_pending(0); |
1da177e4c
|
242 |
|
c70f5d661
|
243 |
local_irq_enable(); |
1da177e4c
|
244 245 |
h = softirq_vec; |
2e702b9f6
|
246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 |
while ((softirq_bit = ffs(pending))) { unsigned int vec_nr; int prev_count; h += softirq_bit - 1; vec_nr = h - softirq_vec; prev_count = preempt_count(); kstat_incr_softirqs_this_cpu(vec_nr); trace_softirq_entry(vec_nr); h->action(h); trace_softirq_exit(vec_nr); if (unlikely(prev_count != preempt_count())) { |
403227641
|
261 262 |
pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x? ", |
2e702b9f6
|
263 264 265 |
vec_nr, softirq_to_name[vec_nr], h->action, prev_count, preempt_count()); preempt_count_set(prev_count); |
1da177e4c
|
266 267 |
} h++; |
2e702b9f6
|
268 269 |
pending >>= softirq_bit; } |
1da177e4c
|
270 |
|
284a8c93a
|
271 |
rcu_bh_qs(); |
c70f5d661
|
272 |
local_irq_disable(); |
1da177e4c
|
273 274 |
pending = local_softirq_pending(); |
c10d73671
|
275 |
if (pending) { |
34376a50f
|
276 277 |
if (time_before(jiffies, end) && !need_resched() && --max_restart) |
c10d73671
|
278 |
goto restart; |
1da177e4c
|
279 |
|
1da177e4c
|
280 |
wakeup_softirqd(); |
c10d73671
|
281 |
} |
1da177e4c
|
282 |
|
5c4853b60
|
283 |
lockdep_softirq_end(in_hardirq); |
6a61671bb
|
284 |
account_irq_exit_time(current); |
75e1056f5
|
285 |
__local_bh_enable(SOFTIRQ_OFFSET); |
5d60d3e7c
|
286 |
WARN_ON_ONCE(in_interrupt()); |
907aed48f
|
287 |
tsk_restore_flags(current, old_flags, PF_MEMALLOC); |
1da177e4c
|
288 |
} |
722a9f929
|
289 |
asmlinkage __visible void do_softirq(void) |
1da177e4c
|
290 291 292 293 294 295 296 297 298 299 300 301 |
{ __u32 pending; unsigned long flags; if (in_interrupt()) return; local_irq_save(flags); pending = local_softirq_pending(); if (pending) |
7d65f4a65
|
302 |
do_softirq_own_stack(); |
1da177e4c
|
303 304 305 |
local_irq_restore(flags); } |
dde4b2b5f
|
306 307 308 309 310 |
/* * Enter an interrupt context. */ void irq_enter(void) { |
64db4cfff
|
311 |
rcu_irq_enter(); |
0a8a2e78b
|
312 |
if (is_idle_task(current) && !in_interrupt()) { |
d267f87fb
|
313 314 315 316 317 |
/* * Prevent raise_softirq from needlessly waking up ksoftirqd * here, as softirq will be serviced on return from interrupt. */ local_bh_disable(); |
5acac1be4
|
318 |
tick_irq_enter(); |
d267f87fb
|
319 320 321 322 |
_local_bh_enable(); } __irq_enter(); |
dde4b2b5f
|
323 |
} |
8d32a307e
|
324 325 |
static inline void invoke_softirq(void) { |
ded797547
|
326 |
if (!force_irqthreads) { |
cc1f02745
|
327 |
#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK |
ded797547
|
328 329 330 |
/* * We can safely execute softirq on the current stack if * it is the irq stack, because it should be near empty |
cc1f02745
|
331 332 333 334 335 336 337 338 |
* at this stage. */ __do_softirq(); #else /* * Otherwise, irq_exit() is called on the task stack that can * be potentially deep already. So call softirq in its own stack * to prevent from any overrun. |
ded797547
|
339 |
*/ |
be6e10164
|
340 |
do_softirq_own_stack(); |
cc1f02745
|
341 |
#endif |
ded797547
|
342 |
} else { |
8d32a307e
|
343 |
wakeup_softirqd(); |
ded797547
|
344 |
} |
8d32a307e
|
345 |
} |
1da177e4c
|
346 |
|
67826eae8
|
347 348 349 350 351 352 353 354 355 356 357 358 |
static inline void tick_irq_exit(void) { #ifdef CONFIG_NO_HZ_COMMON int cpu = smp_processor_id(); /* Make sure that timer wheel updates are propagated */ if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) { if (!in_interrupt()) tick_nohz_irq_exit(); } #endif } |
1da177e4c
|
359 360 361 362 363 |
/* * Exit an interrupt context. Process softirqs if needed and possible: */ void irq_exit(void) { |
74eed0163
|
364 |
#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED |
4cd5d1115
|
365 |
local_irq_disable(); |
74eed0163
|
366 367 368 |
#else WARN_ON_ONCE(!irqs_disabled()); #endif |
6a61671bb
|
369 |
account_irq_exit_time(current); |
bdb438065
|
370 |
preempt_count_sub(HARDIRQ_OFFSET); |
1da177e4c
|
371 372 |
if (!in_interrupt() && local_softirq_pending()) invoke_softirq(); |
79bf2bb33
|
373 |
|
67826eae8
|
374 |
tick_irq_exit(); |
416eb33cd
|
375 |
rcu_irq_exit(); |
f1a83e652
|
376 |
trace_hardirq_exit(); /* must be last! */ |
1da177e4c
|
377 378 379 380 381 |
} /* * This function must run with irqs disabled! */ |
7ad5b3a50
|
382 |
inline void raise_softirq_irqoff(unsigned int nr) |
1da177e4c
|
383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 |
{ __raise_softirq_irqoff(nr); /* * If we're in an interrupt or softirq, we're done * (this also catches softirq-disabled code). We will * actually run the softirq once we return from * the irq or softirq. * * Otherwise we wake up ksoftirqd to make sure we * schedule the softirq soon. */ if (!in_interrupt()) wakeup_softirqd(); } |
7ad5b3a50
|
398 |
void raise_softirq(unsigned int nr) |
1da177e4c
|
399 400 401 402 403 404 405 |
{ unsigned long flags; local_irq_save(flags); raise_softirq_irqoff(nr); local_irq_restore(flags); } |
f069686e4
|
406 407 408 409 410 |
void __raise_softirq_irqoff(unsigned int nr) { trace_softirq_raise(nr); or_softirq_pending(1UL << nr); } |
962cf36c5
|
411 |
void open_softirq(int nr, void (*action)(struct softirq_action *)) |
1da177e4c
|
412 |
{ |
1da177e4c
|
413 414 |
softirq_vec[nr].action = action; } |
9ba5f005c
|
415 416 417 |
/* * Tasklets */ |
ce85b4f2e
|
418 |
struct tasklet_head { |
48f20a9a9
|
419 420 |
struct tasklet_struct *head; struct tasklet_struct **tail; |
1da177e4c
|
421 |
}; |
4620b49f7
|
422 423 |
static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); |
1da177e4c
|
424 |
|
7ad5b3a50
|
425 |
void __tasklet_schedule(struct tasklet_struct *t) |
1da177e4c
|
426 427 428 429 |
{ unsigned long flags; local_irq_save(flags); |
48f20a9a9
|
430 |
t->next = NULL; |
909ea9646
|
431 432 |
*__this_cpu_read(tasklet_vec.tail) = t; __this_cpu_write(tasklet_vec.tail, &(t->next)); |
1da177e4c
|
433 434 435 |
raise_softirq_irqoff(TASKLET_SOFTIRQ); local_irq_restore(flags); } |
1da177e4c
|
436 |
EXPORT_SYMBOL(__tasklet_schedule); |
7ad5b3a50
|
437 |
void __tasklet_hi_schedule(struct tasklet_struct *t) |
1da177e4c
|
438 439 440 441 |
{ unsigned long flags; local_irq_save(flags); |
48f20a9a9
|
442 |
t->next = NULL; |
909ea9646
|
443 444 |
*__this_cpu_read(tasklet_hi_vec.tail) = t; __this_cpu_write(tasklet_hi_vec.tail, &(t->next)); |
1da177e4c
|
445 446 447 |
raise_softirq_irqoff(HI_SOFTIRQ); local_irq_restore(flags); } |
1da177e4c
|
448 |
EXPORT_SYMBOL(__tasklet_hi_schedule); |
7c692cbad
|
449 450 451 |
void __tasklet_hi_schedule_first(struct tasklet_struct *t) { BUG_ON(!irqs_disabled()); |
909ea9646
|
452 453 |
t->next = __this_cpu_read(tasklet_hi_vec.head); __this_cpu_write(tasklet_hi_vec.head, t); |
7c692cbad
|
454 455 |
__raise_softirq_irqoff(HI_SOFTIRQ); } |
7c692cbad
|
456 |
EXPORT_SYMBOL(__tasklet_hi_schedule_first); |
1da177e4c
|
457 458 459 460 461 |
static void tasklet_action(struct softirq_action *a) { struct tasklet_struct *list; local_irq_disable(); |
909ea9646
|
462 463 |
list = __this_cpu_read(tasklet_vec.head); __this_cpu_write(tasklet_vec.head, NULL); |
22127e93c
|
464 |
__this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head)); |
1da177e4c
|
465 466 467 468 469 470 471 472 473 |
local_irq_enable(); while (list) { struct tasklet_struct *t = list; list = list->next; if (tasklet_trylock(t)) { if (!atomic_read(&t->count)) { |
ce85b4f2e
|
474 475 |
if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) |
1da177e4c
|
476 477 478 479 480 481 482 483 484 |
BUG(); t->func(t->data); tasklet_unlock(t); continue; } tasklet_unlock(t); } local_irq_disable(); |
48f20a9a9
|
485 |
t->next = NULL; |
909ea9646
|
486 487 |
*__this_cpu_read(tasklet_vec.tail) = t; __this_cpu_write(tasklet_vec.tail, &(t->next)); |
1da177e4c
|
488 489 490 491 492 493 494 495 496 497 |
__raise_softirq_irqoff(TASKLET_SOFTIRQ); local_irq_enable(); } } static void tasklet_hi_action(struct softirq_action *a) { struct tasklet_struct *list; local_irq_disable(); |
909ea9646
|
498 499 |
list = __this_cpu_read(tasklet_hi_vec.head); __this_cpu_write(tasklet_hi_vec.head, NULL); |
22127e93c
|
500 |
__this_cpu_write(tasklet_hi_vec.tail, this_cpu_ptr(&tasklet_hi_vec.head)); |
1da177e4c
|
501 502 503 504 505 506 507 508 509 |
local_irq_enable(); while (list) { struct tasklet_struct *t = list; list = list->next; if (tasklet_trylock(t)) { if (!atomic_read(&t->count)) { |
ce85b4f2e
|
510 511 |
if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) |
1da177e4c
|
512 513 514 515 516 517 518 519 520 |
BUG(); t->func(t->data); tasklet_unlock(t); continue; } tasklet_unlock(t); } local_irq_disable(); |
48f20a9a9
|
521 |
t->next = NULL; |
909ea9646
|
522 523 |
*__this_cpu_read(tasklet_hi_vec.tail) = t; __this_cpu_write(tasklet_hi_vec.tail, &(t->next)); |
1da177e4c
|
524 525 526 527 |
__raise_softirq_irqoff(HI_SOFTIRQ); local_irq_enable(); } } |
1da177e4c
|
528 529 530 531 532 533 534 535 536 |
void tasklet_init(struct tasklet_struct *t, void (*func)(unsigned long), unsigned long data) { t->next = NULL; t->state = 0; atomic_set(&t->count, 0); t->func = func; t->data = data; } |
1da177e4c
|
537 538 539 540 541 |
EXPORT_SYMBOL(tasklet_init); void tasklet_kill(struct tasklet_struct *t) { if (in_interrupt()) |
403227641
|
542 543 |
pr_notice("Attempt to kill tasklet from interrupt "); |
1da177e4c
|
544 545 |
while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { |
79d381c9f
|
546 |
do { |
1da177e4c
|
547 |
yield(); |
79d381c9f
|
548 |
} while (test_bit(TASKLET_STATE_SCHED, &t->state)); |
1da177e4c
|
549 550 551 552 |
} tasklet_unlock_wait(t); clear_bit(TASKLET_STATE_SCHED, &t->state); } |
1da177e4c
|
553 |
EXPORT_SYMBOL(tasklet_kill); |
9ba5f005c
|
554 555 556 557 558 |
/* * tasklet_hrtimer */ /* |
b9c303227
|
559 560 561 |
* The trampoline is called when the hrtimer expires. It schedules a tasklet * to run __tasklet_hrtimer_trampoline() which in turn will call the intended * hrtimer callback, but from softirq context. |
9ba5f005c
|
562 563 564 565 566 |
*/ static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer) { struct tasklet_hrtimer *ttimer = container_of(timer, struct tasklet_hrtimer, timer); |
b9c303227
|
567 568 |
tasklet_hi_schedule(&ttimer->tasklet); return HRTIMER_NORESTART; |
9ba5f005c
|
569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 |
} /* * Helper function which calls the hrtimer callback from * tasklet/softirq context */ static void __tasklet_hrtimer_trampoline(unsigned long data) { struct tasklet_hrtimer *ttimer = (void *)data; enum hrtimer_restart restart; restart = ttimer->function(&ttimer->timer); if (restart != HRTIMER_NORESTART) hrtimer_restart(&ttimer->timer); } /** * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks * @ttimer: tasklet_hrtimer which is initialized |
25985edce
|
588 |
* @function: hrtimer callback function which gets called from softirq context |
9ba5f005c
|
589 590 591 592 593 594 595 596 597 598 599 600 601 602 |
* @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME) * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL) */ void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer, enum hrtimer_restart (*function)(struct hrtimer *), clockid_t which_clock, enum hrtimer_mode mode) { hrtimer_init(&ttimer->timer, which_clock, mode); ttimer->timer.function = __hrtimer_tasklet_trampoline; tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline, (unsigned long)ttimer); ttimer->function = function; } EXPORT_SYMBOL_GPL(tasklet_hrtimer_init); |
1da177e4c
|
603 604 |
void __init softirq_init(void) { |
48f20a9a9
|
605 606 607 608 609 610 611 612 |
int cpu; for_each_possible_cpu(cpu) { per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head; } |
962cf36c5
|
613 614 |
open_softirq(TASKLET_SOFTIRQ, tasklet_action); open_softirq(HI_SOFTIRQ, tasklet_hi_action); |
1da177e4c
|
615 |
} |
3e339b5da
|
616 |
static int ksoftirqd_should_run(unsigned int cpu) |
1da177e4c
|
617 |
{ |
3e339b5da
|
618 619 |
return local_softirq_pending(); } |
1da177e4c
|
620 |
|
3e339b5da
|
621 622 623 624 |
static void run_ksoftirqd(unsigned int cpu) { local_irq_disable(); if (local_softirq_pending()) { |
0bed698a3
|
625 626 627 628 |
/* * We can safely run softirq on inline stack, as we are not deep * in the task stack here. */ |
3e339b5da
|
629 630 631 632 633 |
__do_softirq(); rcu_note_context_switch(cpu); local_irq_enable(); cond_resched(); return; |
1da177e4c
|
634 |
} |
3e339b5da
|
635 |
local_irq_enable(); |
1da177e4c
|
636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 |
} #ifdef CONFIG_HOTPLUG_CPU /* * tasklet_kill_immediate is called to remove a tasklet which can already be * scheduled for execution on @cpu. * * Unlike tasklet_kill, this function removes the tasklet * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state. * * When this function is called, @cpu must be in the CPU_DEAD state. */ void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu) { struct tasklet_struct **i; BUG_ON(cpu_online(cpu)); BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state)); if (!test_bit(TASKLET_STATE_SCHED, &t->state)) return; /* CPU is dead, so no lock needed. */ |
48f20a9a9
|
659 |
for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) { |
1da177e4c
|
660 661 |
if (*i == t) { *i = t->next; |
48f20a9a9
|
662 663 664 |
/* If this was the tail element, move the tail ptr */ if (*i == NULL) per_cpu(tasklet_vec, cpu).tail = i; |
1da177e4c
|
665 666 667 668 669 670 671 672 |
return; } } BUG(); } static void takeover_tasklets(unsigned int cpu) { |
1da177e4c
|
673 674 675 676 |
/* CPU is dead, so no lock needed. */ local_irq_disable(); /* Find end, append list for that CPU. */ |
e5e417232
|
677 |
if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { |
909ea9646
|
678 679 |
*__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head; this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail); |
e5e417232
|
680 681 682 |
per_cpu(tasklet_vec, cpu).head = NULL; per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; } |
1da177e4c
|
683 |
raise_softirq_irqoff(TASKLET_SOFTIRQ); |
e5e417232
|
684 |
if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) { |
909ea9646
|
685 686 |
*__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head; __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail); |
e5e417232
|
687 688 689 |
per_cpu(tasklet_hi_vec, cpu).head = NULL; per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head; } |
1da177e4c
|
690 691 692 693 694 |
raise_softirq_irqoff(HI_SOFTIRQ); local_irq_enable(); } #endif /* CONFIG_HOTPLUG_CPU */ |
ce85b4f2e
|
695 696 |
static int cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) |
1da177e4c
|
697 |
{ |
1da177e4c
|
698 |
switch (action) { |
1da177e4c
|
699 |
#ifdef CONFIG_HOTPLUG_CPU |
1da177e4c
|
700 |
case CPU_DEAD: |
3e339b5da
|
701 702 |
case CPU_DEAD_FROZEN: takeover_tasklets((unsigned long)hcpu); |
1da177e4c
|
703 704 |
break; #endif /* CONFIG_HOTPLUG_CPU */ |
3e339b5da
|
705 |
} |
1da177e4c
|
706 707 |
return NOTIFY_OK; } |
0db0628d9
|
708 |
static struct notifier_block cpu_nfb = { |
1da177e4c
|
709 710 |
.notifier_call = cpu_callback }; |
3e339b5da
|
711 712 713 714 715 716 |
static struct smp_hotplug_thread softirq_threads = { .store = &ksoftirqd, .thread_should_run = ksoftirqd_should_run, .thread_fn = run_ksoftirqd, .thread_comm = "ksoftirqd/%u", }; |
7babe8db9
|
717 |
static __init int spawn_ksoftirqd(void) |
1da177e4c
|
718 |
{ |
1da177e4c
|
719 |
register_cpu_notifier(&cpu_nfb); |
3e339b5da
|
720 721 |
BUG_ON(smpboot_register_percpu_thread(&softirq_threads)); |
1da177e4c
|
722 723 |
return 0; } |
7babe8db9
|
724 |
early_initcall(spawn_ksoftirqd); |
78eef01b0
|
725 |
|
43a256322
|
726 727 728 729 730 731 732 733 734 |
/* * [ These __weak aliases are kept in a separate compilation unit, so that * GCC does not inline them incorrectly. ] */ int __init __weak early_irq_init(void) { return 0; } |
4a046d175
|
735 736 |
int __init __weak arch_probe_nr_irqs(void) { |
b683de2b3
|
737 |
return NR_IRQS_LEGACY; |
4a046d175
|
738 |
} |
43a256322
|
739 740 741 742 |
int __init __weak arch_early_irq_init(void) { return 0; } |
62a08ae2a
|
743 744 745 746 747 |
unsigned int __weak arch_dynirq_lower_bound(unsigned int from) { return from; } |