Blame view

kernel/softirq.c 19 KB
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
1
2
3
4
5
  /*
   *	linux/kernel/softirq.c
   *
   *	Copyright (C) 1992 Linus Torvalds
   *
b10db7f0d   Pavel Machek   time: more timer ...
6
7
8
   *	Distribute under GPLv2.
   *
   *	Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
9
   */
403227641   Joe Perches   softirq: convert ...
10
  #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9984de1a5   Paul Gortmaker   kernel: Map most ...
11
  #include <linux/export.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
12
13
14
15
16
17
18
  #include <linux/kernel_stat.h>
  #include <linux/interrupt.h>
  #include <linux/init.h>
  #include <linux/mm.h>
  #include <linux/notifier.h>
  #include <linux/percpu.h>
  #include <linux/cpu.h>
831441862   Rafael J. Wysocki   Freezer: make ker...
19
  #include <linux/freezer.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
20
21
  #include <linux/kthread.h>
  #include <linux/rcupdate.h>
7e49fcce1   Steven Rostedt   trace, lockdep: m...
22
  #include <linux/ftrace.h>
78eef01b0   Andrew Morton   [PATCH] on_each_c...
23
  #include <linux/smp.h>
3e339b5da   Thomas Gleixner   softirq: Use hotp...
24
  #include <linux/smpboot.h>
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
25
  #include <linux/tick.h>
d532676cc   Thomas Gleixner   softirq: Add linu...
26
  #include <linux/irq.h>
a0e39ed37   Heiko Carstens   tracing: fix buil...
27
28
  
  #define CREATE_TRACE_POINTS
ad8d75fff   Steven Rostedt   tracing/events: m...
29
  #include <trace/events/irq.h>
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
30

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
  /*
     - No shared variables, all the data are CPU local.
     - If a softirq needs serialization, let it serialize itself
       by its own spinlocks.
     - Even if softirq is serialized, only local cpu is marked for
       execution. Hence, we get something sort of weak cpu binding.
       Though it is still not clear, will it result in better locality
       or will not.
  
     Examples:
     - NET RX softirq. It is multithreaded and does not require
       any global serialization.
     - NET TX softirq. It kicks software netdevice queues, hence
       it is logically serialized per device, but this serialization
       is invisible to common code.
     - Tasklets: serialized wrt itself.
   */
  
  #ifndef __ARCH_IRQ_STAT
  irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
  EXPORT_SYMBOL(irq_stat);
  #endif
978b0116c   Alexey Dobriyan   softirq: allocate...
53
  static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
54

4dd53d891   Venkatesh Pallipadi   softirqs: Free up...
55
  DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
56

ce85b4f2e   Joe Perches   softirq: use cons...
57
  const char * const softirq_to_name[NR_SOFTIRQS] = {
f660f6066   Sagi Grimberg   softirq: Display ...
58
  	"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
09223371d   Shaohua Li   rcu: Use softirq ...
59
  	"TASKLET", "SCHED", "HRTIMER", "RCU"
5d592b44b   Jason Baron   tracing: tracepoi...
60
  };
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
61
62
63
64
65
66
  /*
   * we cannot loop indefinitely here to avoid userspace starvation,
   * but we also don't want to introduce a worst case 1/HZ latency
   * to the pending events, so lets the scheduler to balance
   * the softirq load for us.
   */
676cb02dc   Thomas Gleixner   softirqs: Make wa...
67
  static void wakeup_softirqd(void)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
68
69
  {
  	/* Interrupts are disabled: no need to stop preemption */
909ea9646   Christoph Lameter   core: Replace __g...
70
  	struct task_struct *tsk = __this_cpu_read(ksoftirqd);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
71
72
73
74
75
76
  
  	if (tsk && tsk->state != TASK_RUNNING)
  		wake_up_process(tsk);
  }
  
  /*
4cd13c21b   Eric Dumazet   softirq: Let ksof...
77
   * If ksoftirqd is scheduled, we do not want to process pending softirqs
3eb86ff32   Linus Torvalds   Mark HI and TASKL...
78
79
   * right now. Let ksoftirqd handle this at its own rate, to get fairness,
   * unless we're doing some of the synchronous softirqs.
4cd13c21b   Eric Dumazet   softirq: Let ksof...
80
   */
3eb86ff32   Linus Torvalds   Mark HI and TASKL...
81
82
  #define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ))
  static bool ksoftirqd_running(unsigned long pending)
4cd13c21b   Eric Dumazet   softirq: Let ksof...
83
84
  {
  	struct task_struct *tsk = __this_cpu_read(ksoftirqd);
3eb86ff32   Linus Torvalds   Mark HI and TASKL...
85
86
  	if (pending & SOFTIRQ_NOW_MASK)
  		return false;
4cd13c21b   Eric Dumazet   softirq: Let ksof...
87
88
89
90
  	return tsk && (tsk->state == TASK_RUNNING);
  }
  
  /*
75e1056f5   Venkatesh Pallipadi   sched: Fix softir...
91
92
93
94
95
96
97
98
99
100
   * preempt_count and SOFTIRQ_OFFSET usage:
   * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
   *   softirq processing.
   * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
   *   on local_bh_disable or local_bh_enable.
   * This lets us distinguish between whether we are currently processing
   * softirq and whether we just have bh disabled.
   */
  
  /*
de30a2b35   Ingo Molnar   [PATCH] lockdep: ...
101
102
103
   * This one is for softirq.c-internal use,
   * where hardirqs are disabled legitimately:
   */
3c829c367   Tim Chen   [PATCH] Reducing ...
104
  #ifdef CONFIG_TRACE_IRQFLAGS
0bd3a173d   Peter Zijlstra   sched/preempt, lo...
105
  void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
de30a2b35   Ingo Molnar   [PATCH] lockdep: ...
106
107
108
109
110
111
  {
  	unsigned long flags;
  
  	WARN_ON_ONCE(in_irq());
  
  	raw_local_irq_save(flags);
7e49fcce1   Steven Rostedt   trace, lockdep: m...
112
  	/*
bdb438065   Peter Zijlstra   sched: Extract th...
113
  	 * The preempt tracer hooks into preempt_count_add and will break
7e49fcce1   Steven Rostedt   trace, lockdep: m...
114
115
116
117
118
  	 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
  	 * is set and before current->softirq_enabled is cleared.
  	 * We must manually increment preempt_count here and manually
  	 * call the trace_preempt_off later.
  	 */
bdb438065   Peter Zijlstra   sched: Extract th...
119
  	__preempt_count_add(cnt);
de30a2b35   Ingo Molnar   [PATCH] lockdep: ...
120
121
122
  	/*
  	 * Were softirqs turned off above:
  	 */
9ea4c3800   Peter Zijlstra   locking: Optimize...
123
  	if (softirq_count() == (cnt & SOFTIRQ_MASK))
de30a2b35   Ingo Molnar   [PATCH] lockdep: ...
124
125
  		trace_softirqs_off(ip);
  	raw_local_irq_restore(flags);
7e49fcce1   Steven Rostedt   trace, lockdep: m...
126

0f1ba9a2c   Heiko Carstens   softirq/preempt: ...
127
128
  	if (preempt_count() == cnt) {
  #ifdef CONFIG_DEBUG_PREEMPT
f904f5826   Sebastian Andrzej Siewior   sched/debug: Fix ...
129
  		current->preempt_disable_ip = get_lock_parent_ip();
0f1ba9a2c   Heiko Carstens   softirq/preempt: ...
130
  #endif
f904f5826   Sebastian Andrzej Siewior   sched/debug: Fix ...
131
  		trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
0f1ba9a2c   Heiko Carstens   softirq/preempt: ...
132
  	}
de30a2b35   Ingo Molnar   [PATCH] lockdep: ...
133
  }
0bd3a173d   Peter Zijlstra   sched/preempt, lo...
134
  EXPORT_SYMBOL(__local_bh_disable_ip);
3c829c367   Tim Chen   [PATCH] Reducing ...
135
  #endif /* CONFIG_TRACE_IRQFLAGS */
de30a2b35   Ingo Molnar   [PATCH] lockdep: ...
136

75e1056f5   Venkatesh Pallipadi   sched: Fix softir...
137
138
  static void __local_bh_enable(unsigned int cnt)
  {
75e1056f5   Venkatesh Pallipadi   sched: Fix softir...
139
  	WARN_ON_ONCE(!irqs_disabled());
9ea4c3800   Peter Zijlstra   locking: Optimize...
140
  	if (softirq_count() == (cnt & SOFTIRQ_MASK))
d2e08473f   Davidlohr Bueso   softirq: Use _RET...
141
  		trace_softirqs_on(_RET_IP_);
bdb438065   Peter Zijlstra   sched: Extract th...
142
  	preempt_count_sub(cnt);
75e1056f5   Venkatesh Pallipadi   sched: Fix softir...
143
  }
de30a2b35   Ingo Molnar   [PATCH] lockdep: ...
144
145
146
147
148
149
150
  /*
   * Special-case - softirqs can safely be enabled in
   * cond_resched_softirq(), or by __do_softirq(),
   * without processing still-pending softirqs:
   */
  void _local_bh_enable(void)
  {
5d60d3e7c   Frederic Weisbecker   irq: Improve a bi...
151
  	WARN_ON_ONCE(in_irq());
75e1056f5   Venkatesh Pallipadi   sched: Fix softir...
152
  	__local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
de30a2b35   Ingo Molnar   [PATCH] lockdep: ...
153
  }
de30a2b35   Ingo Molnar   [PATCH] lockdep: ...
154
  EXPORT_SYMBOL(_local_bh_enable);
0bd3a173d   Peter Zijlstra   sched/preempt, lo...
155
  void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
de30a2b35   Ingo Molnar   [PATCH] lockdep: ...
156
  {
0f476b6d9   Johannes Berg   softirq: remove i...
157
  	WARN_ON_ONCE(in_irq() || irqs_disabled());
3c829c367   Tim Chen   [PATCH] Reducing ...
158
  #ifdef CONFIG_TRACE_IRQFLAGS
0f476b6d9   Johannes Berg   softirq: remove i...
159
  	local_irq_disable();
3c829c367   Tim Chen   [PATCH] Reducing ...
160
  #endif
de30a2b35   Ingo Molnar   [PATCH] lockdep: ...
161
162
163
  	/*
  	 * Are softirqs going to be turned on now:
  	 */
75e1056f5   Venkatesh Pallipadi   sched: Fix softir...
164
  	if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
0f476b6d9   Johannes Berg   softirq: remove i...
165
  		trace_softirqs_on(ip);
de30a2b35   Ingo Molnar   [PATCH] lockdep: ...
166
167
168
  	/*
  	 * Keep preemption disabled until we are done with
  	 * softirq processing:
ce85b4f2e   Joe Perches   softirq: use cons...
169
  	 */
0bd3a173d   Peter Zijlstra   sched/preempt, lo...
170
  	preempt_count_sub(cnt - 1);
de30a2b35   Ingo Molnar   [PATCH] lockdep: ...
171

0bed698a3   Frederic Weisbecker   irq: Justify the ...
172
173
174
175
176
  	if (unlikely(!in_interrupt() && local_softirq_pending())) {
  		/*
  		 * Run softirq if any pending. And do it in its own stack
  		 * as we may be calling this deep in a task call stack already.
  		 */
de30a2b35   Ingo Molnar   [PATCH] lockdep: ...
177
  		do_softirq();
0bed698a3   Frederic Weisbecker   irq: Justify the ...
178
  	}
de30a2b35   Ingo Molnar   [PATCH] lockdep: ...
179

bdb438065   Peter Zijlstra   sched: Extract th...
180
  	preempt_count_dec();
3c829c367   Tim Chen   [PATCH] Reducing ...
181
  #ifdef CONFIG_TRACE_IRQFLAGS
0f476b6d9   Johannes Berg   softirq: remove i...
182
  	local_irq_enable();
3c829c367   Tim Chen   [PATCH] Reducing ...
183
  #endif
de30a2b35   Ingo Molnar   [PATCH] lockdep: ...
184
185
  	preempt_check_resched();
  }
0bd3a173d   Peter Zijlstra   sched/preempt, lo...
186
  EXPORT_SYMBOL(__local_bh_enable_ip);
de30a2b35   Ingo Molnar   [PATCH] lockdep: ...
187
188
  
  /*
34376a50f   Ben Greear   Fix lockup relate...
189
190
191
192
193
194
   * We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
   * but break the loop if need_resched() is set or after 2 ms.
   * The MAX_SOFTIRQ_TIME provides a nice upper bound in most cases, but in
   * certain cases, such as stop_machine(), jiffies may cease to
   * increment and so we need the MAX_SOFTIRQ_RESTART limit as
   * well to make sure we eventually return from this method.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
195
   *
c10d73671   Eric Dumazet   softirq: reduce l...
196
   * These limits have been established via experimentation.
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
197
198
199
200
   * The two things to balance is latency against fairness -
   * we want to handle softirqs as soon as possible, but they
   * should not be able to lock up the box.
   */
c10d73671   Eric Dumazet   softirq: reduce l...
201
  #define MAX_SOFTIRQ_TIME  msecs_to_jiffies(2)
34376a50f   Ben Greear   Fix lockup relate...
202
  #define MAX_SOFTIRQ_RESTART 10
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
203

f1a83e652   Peter Zijlstra   lockdep: Correctl...
204
205
  #ifdef CONFIG_TRACE_IRQFLAGS
  /*
f1a83e652   Peter Zijlstra   lockdep: Correctl...
206
207
208
209
   * When we run softirqs from irq_exit() and thus on the hardirq stack we need
   * to keep the lockdep irq context tracking as tight as possible in order to
   * not miss-qualify lock contexts and miss possible deadlocks.
   */
f1a83e652   Peter Zijlstra   lockdep: Correctl...
210

5c4853b60   Frederic Weisbecker   lockdep: Simplify...
211
  static inline bool lockdep_softirq_start(void)
f1a83e652   Peter Zijlstra   lockdep: Correctl...
212
  {
5c4853b60   Frederic Weisbecker   lockdep: Simplify...
213
  	bool in_hardirq = false;
f1a83e652   Peter Zijlstra   lockdep: Correctl...
214

5c4853b60   Frederic Weisbecker   lockdep: Simplify...
215
216
  	if (trace_hardirq_context(current)) {
  		in_hardirq = true;
f1a83e652   Peter Zijlstra   lockdep: Correctl...
217
  		trace_hardirq_exit();
5c4853b60   Frederic Weisbecker   lockdep: Simplify...
218
  	}
f1a83e652   Peter Zijlstra   lockdep: Correctl...
219
  	lockdep_softirq_enter();
5c4853b60   Frederic Weisbecker   lockdep: Simplify...
220
221
  
  	return in_hardirq;
f1a83e652   Peter Zijlstra   lockdep: Correctl...
222
  }
5c4853b60   Frederic Weisbecker   lockdep: Simplify...
223
  static inline void lockdep_softirq_end(bool in_hardirq)
f1a83e652   Peter Zijlstra   lockdep: Correctl...
224
225
  {
  	lockdep_softirq_exit();
5c4853b60   Frederic Weisbecker   lockdep: Simplify...
226
227
  
  	if (in_hardirq)
f1a83e652   Peter Zijlstra   lockdep: Correctl...
228
  		trace_hardirq_enter();
f1a83e652   Peter Zijlstra   lockdep: Correctl...
229
  }
f1a83e652   Peter Zijlstra   lockdep: Correctl...
230
  #else
5c4853b60   Frederic Weisbecker   lockdep: Simplify...
231
232
  static inline bool lockdep_softirq_start(void) { return false; }
  static inline void lockdep_softirq_end(bool in_hardirq) { }
f1a83e652   Peter Zijlstra   lockdep: Correctl...
233
  #endif
be7635e72   Alexander Potapenko   arch, ftrace: for...
234
  asmlinkage __visible void __softirq_entry __do_softirq(void)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
235
  {
c10d73671   Eric Dumazet   softirq: reduce l...
236
  	unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
907aed48f   Mel Gorman   mm: allow PF_MEMA...
237
  	unsigned long old_flags = current->flags;
34376a50f   Ben Greear   Fix lockup relate...
238
  	int max_restart = MAX_SOFTIRQ_RESTART;
f1a83e652   Peter Zijlstra   lockdep: Correctl...
239
  	struct softirq_action *h;
5c4853b60   Frederic Weisbecker   lockdep: Simplify...
240
  	bool in_hardirq;
f1a83e652   Peter Zijlstra   lockdep: Correctl...
241
  	__u32 pending;
2e702b9f6   Joe Perches   softirq: use ffs(...
242
  	int softirq_bit;
907aed48f   Mel Gorman   mm: allow PF_MEMA...
243
244
245
246
247
248
249
  
  	/*
  	 * Mask out PF_MEMALLOC s current task context is borrowed for the
  	 * softirq. A softirq handled such as network RX might set PF_MEMALLOC
  	 * again if the socket is related to swap
  	 */
  	current->flags &= ~PF_MEMALLOC;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
250
251
  
  	pending = local_softirq_pending();
6a61671bb   Frederic Weisbecker   cputime: Safely r...
252
  	account_irq_enter_time(current);
829035fd7   Paul Mackerras   [PATCH] lockdep: ...
253

0bd3a173d   Peter Zijlstra   sched/preempt, lo...
254
  	__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
5c4853b60   Frederic Weisbecker   lockdep: Simplify...
255
  	in_hardirq = lockdep_softirq_start();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
256

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
257
258
  restart:
  	/* Reset the pending bitmask before enabling irqs */
3f74478b5   Andi Kleen   [PATCH] x86-64: S...
259
  	set_softirq_pending(0);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
260

c70f5d661   Andrew Morton   [PATCH] revert bo...
261
  	local_irq_enable();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
262
263
  
  	h = softirq_vec;
2e702b9f6   Joe Perches   softirq: use ffs(...
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
  	while ((softirq_bit = ffs(pending))) {
  		unsigned int vec_nr;
  		int prev_count;
  
  		h += softirq_bit - 1;
  
  		vec_nr = h - softirq_vec;
  		prev_count = preempt_count();
  
  		kstat_incr_softirqs_this_cpu(vec_nr);
  
  		trace_softirq_entry(vec_nr);
  		h->action(h);
  		trace_softirq_exit(vec_nr);
  		if (unlikely(prev_count != preempt_count())) {
403227641   Joe Perches   softirq: convert ...
279
280
  			pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?
  ",
2e702b9f6   Joe Perches   softirq: use ffs(...
281
282
283
  			       vec_nr, softirq_to_name[vec_nr], h->action,
  			       prev_count, preempt_count());
  			preempt_count_set(prev_count);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
284
285
  		}
  		h++;
2e702b9f6   Joe Perches   softirq: use ffs(...
286
287
  		pending >>= softirq_bit;
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
288

284a8c93a   Paul E. McKenney   rcu: Per-CPU oper...
289
  	rcu_bh_qs();
c70f5d661   Andrew Morton   [PATCH] revert bo...
290
  	local_irq_disable();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
291
292
  
  	pending = local_softirq_pending();
c10d73671   Eric Dumazet   softirq: reduce l...
293
  	if (pending) {
34376a50f   Ben Greear   Fix lockup relate...
294
295
  		if (time_before(jiffies, end) && !need_resched() &&
  		    --max_restart)
c10d73671   Eric Dumazet   softirq: reduce l...
296
  			goto restart;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
297

1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
298
  		wakeup_softirqd();
c10d73671   Eric Dumazet   softirq: reduce l...
299
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
300

5c4853b60   Frederic Weisbecker   lockdep: Simplify...
301
  	lockdep_softirq_end(in_hardirq);
6a61671bb   Frederic Weisbecker   cputime: Safely r...
302
  	account_irq_exit_time(current);
75e1056f5   Venkatesh Pallipadi   sched: Fix softir...
303
  	__local_bh_enable(SOFTIRQ_OFFSET);
5d60d3e7c   Frederic Weisbecker   irq: Improve a bi...
304
  	WARN_ON_ONCE(in_interrupt());
717a94b5f   NeilBrown   sched/core: Remov...
305
  	current_restore_flags(old_flags, PF_MEMALLOC);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
306
  }
722a9f929   Andi Kleen   asmlinkage: Add e...
307
  asmlinkage __visible void do_softirq(void)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
308
309
310
311
312
313
314
315
316
317
  {
  	__u32 pending;
  	unsigned long flags;
  
  	if (in_interrupt())
  		return;
  
  	local_irq_save(flags);
  
  	pending = local_softirq_pending();
3eb86ff32   Linus Torvalds   Mark HI and TASKL...
318
  	if (pending && !ksoftirqd_running(pending))
7d65f4a65   Frederic Weisbecker   irq: Consolidate ...
319
  		do_softirq_own_stack();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
320
321
322
  
  	local_irq_restore(flags);
  }
dde4b2b5f   Ingo Molnar   [PATCH] uninline ...
323
324
325
326
327
  /*
   * Enter an interrupt context.
   */
  void irq_enter(void)
  {
64db4cfff   Paul E. McKenney   "Tree RCU": scala...
328
  	rcu_irq_enter();
0a8a2e78b   Frederic Weisbecker   timer: Fix bad id...
329
  	if (is_idle_task(current) && !in_interrupt()) {
d267f87fb   Venkatesh Pallipadi   sched: Call tick_...
330
331
332
333
334
  		/*
  		 * Prevent raise_softirq from needlessly waking up ksoftirqd
  		 * here, as softirq will be serviced on return from interrupt.
  		 */
  		local_bh_disable();
5acac1be4   Frederic Weisbecker   tick: Rename tick...
335
  		tick_irq_enter();
d267f87fb   Venkatesh Pallipadi   sched: Call tick_...
336
337
338
339
  		_local_bh_enable();
  	}
  
  	__irq_enter();
dde4b2b5f   Ingo Molnar   [PATCH] uninline ...
340
  }
8d32a307e   Thomas Gleixner   genirq: Provide f...
341
342
  static inline void invoke_softirq(void)
  {
3eb86ff32   Linus Torvalds   Mark HI and TASKL...
343
  	if (ksoftirqd_running(local_softirq_pending()))
4cd13c21b   Eric Dumazet   softirq: Let ksof...
344
  		return;
ded797547   Frederic Weisbecker   irq: Force hardir...
345
  	if (!force_irqthreads) {
cc1f02745   Frederic Weisbecker   irq: Optimize sof...
346
  #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
ded797547   Frederic Weisbecker   irq: Force hardir...
347
348
349
  		/*
  		 * We can safely execute softirq on the current stack if
  		 * it is the irq stack, because it should be near empty
cc1f02745   Frederic Weisbecker   irq: Optimize sof...
350
351
352
353
354
355
356
357
  		 * at this stage.
  		 */
  		__do_softirq();
  #else
  		/*
  		 * Otherwise, irq_exit() is called on the task stack that can
  		 * be potentially deep already. So call softirq in its own stack
  		 * to prevent from any overrun.
ded797547   Frederic Weisbecker   irq: Force hardir...
358
  		 */
be6e10164   Frederic Weisbecker   irq: Optimize cal...
359
  		do_softirq_own_stack();
cc1f02745   Frederic Weisbecker   irq: Optimize sof...
360
  #endif
ded797547   Frederic Weisbecker   irq: Force hardir...
361
  	} else {
8d32a307e   Thomas Gleixner   genirq: Provide f...
362
  		wakeup_softirqd();
ded797547   Frederic Weisbecker   irq: Force hardir...
363
  	}
8d32a307e   Thomas Gleixner   genirq: Provide f...
364
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
365

67826eae8   Frederic Weisbecker   nohz: Disable the...
366
367
368
369
370
371
372
  static inline void tick_irq_exit(void)
  {
  #ifdef CONFIG_NO_HZ_COMMON
  	int cpu = smp_processor_id();
  
  	/* Make sure that timer wheel updates are propagated */
  	if ((idle_cpu(cpu) && !need_resched()) || tick_nohz_full_cpu(cpu)) {
2d898915c   Frederic Weisbecker   nohz: Fix missing...
373
  		if (!in_irq())
67826eae8   Frederic Weisbecker   nohz: Disable the...
374
375
376
377
  			tick_nohz_irq_exit();
  	}
  #endif
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
378
379
380
381
382
  /*
   * Exit an interrupt context. Process softirqs if needed and possible:
   */
  void irq_exit(void)
  {
74eed0163   Thomas Gleixner   irq: Ensure irq_e...
383
  #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
4cd5d1115   Frederic Weisbecker   irq: Don't re-ena...
384
  	local_irq_disable();
74eed0163   Thomas Gleixner   irq: Ensure irq_e...
385
386
387
  #else
  	WARN_ON_ONCE(!irqs_disabled());
  #endif
6a61671bb   Frederic Weisbecker   cputime: Safely r...
388
  	account_irq_exit_time(current);
bdb438065   Peter Zijlstra   sched: Extract th...
389
  	preempt_count_sub(HARDIRQ_OFFSET);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
390
391
  	if (!in_interrupt() && local_softirq_pending())
  		invoke_softirq();
79bf2bb33   Thomas Gleixner   [PATCH] tick-mana...
392

67826eae8   Frederic Weisbecker   nohz: Disable the...
393
  	tick_irq_exit();
416eb33cd   Frederic Weisbecker   rcu: Fix early ca...
394
  	rcu_irq_exit();
f1a83e652   Peter Zijlstra   lockdep: Correctl...
395
  	trace_hardirq_exit(); /* must be last! */
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
396
397
398
399
400
  }
  
  /*
   * This function must run with irqs disabled!
   */
7ad5b3a50   Harvey Harrison   kernel: remove fa...
401
  inline void raise_softirq_irqoff(unsigned int nr)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
  {
  	__raise_softirq_irqoff(nr);
  
  	/*
  	 * If we're in an interrupt or softirq, we're done
  	 * (this also catches softirq-disabled code). We will
  	 * actually run the softirq once we return from
  	 * the irq or softirq.
  	 *
  	 * Otherwise we wake up ksoftirqd to make sure we
  	 * schedule the softirq soon.
  	 */
  	if (!in_interrupt())
  		wakeup_softirqd();
  }
7ad5b3a50   Harvey Harrison   kernel: remove fa...
417
  void raise_softirq(unsigned int nr)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
418
419
420
421
422
423
424
  {
  	unsigned long flags;
  
  	local_irq_save(flags);
  	raise_softirq_irqoff(nr);
  	local_irq_restore(flags);
  }
f069686e4   Steven Rostedt   tracing/softirq: ...
425
426
427
428
429
  void __raise_softirq_irqoff(unsigned int nr)
  {
  	trace_softirq_raise(nr);
  	or_softirq_pending(1UL << nr);
  }
962cf36c5   Carlos R. Mafra   Remove argument f...
430
  void open_softirq(int nr, void (*action)(struct softirq_action *))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
431
  {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
432
433
  	softirq_vec[nr].action = action;
  }
9ba5f005c   Peter Zijlstra   softirq: introduc...
434
435
436
  /*
   * Tasklets
   */
ce85b4f2e   Joe Perches   softirq: use cons...
437
  struct tasklet_head {
48f20a9a9   Olof Johansson   tasklets: execute...
438
439
  	struct tasklet_struct *head;
  	struct tasklet_struct **tail;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
440
  };
4620b49f7   Vegard Nossum   softirq: remove i...
441
442
  static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
  static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
443

7ad5b3a50   Harvey Harrison   kernel: remove fa...
444
  void __tasklet_schedule(struct tasklet_struct *t)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
445
446
447
448
  {
  	unsigned long flags;
  
  	local_irq_save(flags);
48f20a9a9   Olof Johansson   tasklets: execute...
449
  	t->next = NULL;
909ea9646   Christoph Lameter   core: Replace __g...
450
451
  	*__this_cpu_read(tasklet_vec.tail) = t;
  	__this_cpu_write(tasklet_vec.tail, &(t->next));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
452
453
454
  	raise_softirq_irqoff(TASKLET_SOFTIRQ);
  	local_irq_restore(flags);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
455
  EXPORT_SYMBOL(__tasklet_schedule);
7ad5b3a50   Harvey Harrison   kernel: remove fa...
456
  void __tasklet_hi_schedule(struct tasklet_struct *t)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
457
458
459
460
  {
  	unsigned long flags;
  
  	local_irq_save(flags);
48f20a9a9   Olof Johansson   tasklets: execute...
461
  	t->next = NULL;
909ea9646   Christoph Lameter   core: Replace __g...
462
463
  	*__this_cpu_read(tasklet_hi_vec.tail) = t;
  	__this_cpu_write(tasklet_hi_vec.tail,  &(t->next));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
464
465
466
  	raise_softirq_irqoff(HI_SOFTIRQ);
  	local_irq_restore(flags);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
467
  EXPORT_SYMBOL(__tasklet_hi_schedule);
0766f788e   Emese Revfy   latent_entropy: M...
468
  static __latent_entropy void tasklet_action(struct softirq_action *a)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
469
470
471
472
  {
  	struct tasklet_struct *list;
  
  	local_irq_disable();
909ea9646   Christoph Lameter   core: Replace __g...
473
474
  	list = __this_cpu_read(tasklet_vec.head);
  	__this_cpu_write(tasklet_vec.head, NULL);
22127e93c   Christoph Lameter   time: Replace __g...
475
  	__this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
476
477
478
479
480
481
482
483
484
  	local_irq_enable();
  
  	while (list) {
  		struct tasklet_struct *t = list;
  
  		list = list->next;
  
  		if (tasklet_trylock(t)) {
  			if (!atomic_read(&t->count)) {
ce85b4f2e   Joe Perches   softirq: use cons...
485
486
  				if (!test_and_clear_bit(TASKLET_STATE_SCHED,
  							&t->state))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
487
488
489
490
491
492
493
494
495
  					BUG();
  				t->func(t->data);
  				tasklet_unlock(t);
  				continue;
  			}
  			tasklet_unlock(t);
  		}
  
  		local_irq_disable();
48f20a9a9   Olof Johansson   tasklets: execute...
496
  		t->next = NULL;
909ea9646   Christoph Lameter   core: Replace __g...
497
498
  		*__this_cpu_read(tasklet_vec.tail) = t;
  		__this_cpu_write(tasklet_vec.tail, &(t->next));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
499
500
501
502
  		__raise_softirq_irqoff(TASKLET_SOFTIRQ);
  		local_irq_enable();
  	}
  }
0766f788e   Emese Revfy   latent_entropy: M...
503
  static __latent_entropy void tasklet_hi_action(struct softirq_action *a)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
504
505
506
507
  {
  	struct tasklet_struct *list;
  
  	local_irq_disable();
909ea9646   Christoph Lameter   core: Replace __g...
508
509
  	list = __this_cpu_read(tasklet_hi_vec.head);
  	__this_cpu_write(tasklet_hi_vec.head, NULL);
22127e93c   Christoph Lameter   time: Replace __g...
510
  	__this_cpu_write(tasklet_hi_vec.tail, this_cpu_ptr(&tasklet_hi_vec.head));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
511
512
513
514
515
516
517
518
519
  	local_irq_enable();
  
  	while (list) {
  		struct tasklet_struct *t = list;
  
  		list = list->next;
  
  		if (tasklet_trylock(t)) {
  			if (!atomic_read(&t->count)) {
ce85b4f2e   Joe Perches   softirq: use cons...
520
521
  				if (!test_and_clear_bit(TASKLET_STATE_SCHED,
  							&t->state))
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
522
523
524
525
526
527
528
529
530
  					BUG();
  				t->func(t->data);
  				tasklet_unlock(t);
  				continue;
  			}
  			tasklet_unlock(t);
  		}
  
  		local_irq_disable();
48f20a9a9   Olof Johansson   tasklets: execute...
531
  		t->next = NULL;
909ea9646   Christoph Lameter   core: Replace __g...
532
533
  		*__this_cpu_read(tasklet_hi_vec.tail) = t;
  		__this_cpu_write(tasklet_hi_vec.tail, &(t->next));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
534
535
536
537
  		__raise_softirq_irqoff(HI_SOFTIRQ);
  		local_irq_enable();
  	}
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
538
539
540
541
542
543
544
545
546
  void tasklet_init(struct tasklet_struct *t,
  		  void (*func)(unsigned long), unsigned long data)
  {
  	t->next = NULL;
  	t->state = 0;
  	atomic_set(&t->count, 0);
  	t->func = func;
  	t->data = data;
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
547
548
549
550
551
  EXPORT_SYMBOL(tasklet_init);
  
  void tasklet_kill(struct tasklet_struct *t)
  {
  	if (in_interrupt())
403227641   Joe Perches   softirq: convert ...
552
553
  		pr_notice("Attempt to kill tasklet from interrupt
  ");
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
554
555
  
  	while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
79d381c9f   H Hartley Sweeten   kernel/softirq.c:...
556
  		do {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
557
  			yield();
79d381c9f   H Hartley Sweeten   kernel/softirq.c:...
558
  		} while (test_bit(TASKLET_STATE_SCHED, &t->state));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
559
560
561
562
  	}
  	tasklet_unlock_wait(t);
  	clear_bit(TASKLET_STATE_SCHED, &t->state);
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
563
  EXPORT_SYMBOL(tasklet_kill);
9ba5f005c   Peter Zijlstra   softirq: introduc...
564
565
566
567
568
  /*
   * tasklet_hrtimer
   */
  
  /*
b9c303227   Peter Zijlstra   hrtimer, softirq:...
569
570
571
   * The trampoline is called when the hrtimer expires. It schedules a tasklet
   * to run __tasklet_hrtimer_trampoline() which in turn will call the intended
   * hrtimer callback, but from softirq context.
9ba5f005c   Peter Zijlstra   softirq: introduc...
572
573
574
575
576
   */
  static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
  {
  	struct tasklet_hrtimer *ttimer =
  		container_of(timer, struct tasklet_hrtimer, timer);
b9c303227   Peter Zijlstra   hrtimer, softirq:...
577
578
  	tasklet_hi_schedule(&ttimer->tasklet);
  	return HRTIMER_NORESTART;
9ba5f005c   Peter Zijlstra   softirq: introduc...
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
  }
  
  /*
   * Helper function which calls the hrtimer callback from
   * tasklet/softirq context
   */
  static void __tasklet_hrtimer_trampoline(unsigned long data)
  {
  	struct tasklet_hrtimer *ttimer = (void *)data;
  	enum hrtimer_restart restart;
  
  	restart = ttimer->function(&ttimer->timer);
  	if (restart != HRTIMER_NORESTART)
  		hrtimer_restart(&ttimer->timer);
  }
  
  /**
   * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
   * @ttimer:	 tasklet_hrtimer which is initialized
25985edce   Lucas De Marchi   Fix common misspe...
598
   * @function:	 hrtimer callback function which gets called from softirq context
9ba5f005c   Peter Zijlstra   softirq: introduc...
599
600
601
602
603
604
605
606
607
608
609
610
611
612
   * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
   * @mode:	 hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
   */
  void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
  			  enum hrtimer_restart (*function)(struct hrtimer *),
  			  clockid_t which_clock, enum hrtimer_mode mode)
  {
  	hrtimer_init(&ttimer->timer, which_clock, mode);
  	ttimer->timer.function = __hrtimer_tasklet_trampoline;
  	tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
  		     (unsigned long)ttimer);
  	ttimer->function = function;
  }
  EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
613
614
  void __init softirq_init(void)
  {
48f20a9a9   Olof Johansson   tasklets: execute...
615
616
617
618
619
620
621
622
  	int cpu;
  
  	for_each_possible_cpu(cpu) {
  		per_cpu(tasklet_vec, cpu).tail =
  			&per_cpu(tasklet_vec, cpu).head;
  		per_cpu(tasklet_hi_vec, cpu).tail =
  			&per_cpu(tasklet_hi_vec, cpu).head;
  	}
962cf36c5   Carlos R. Mafra   Remove argument f...
623
624
  	open_softirq(TASKLET_SOFTIRQ, tasklet_action);
  	open_softirq(HI_SOFTIRQ, tasklet_hi_action);
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
625
  }
3e339b5da   Thomas Gleixner   softirq: Use hotp...
626
  static int ksoftirqd_should_run(unsigned int cpu)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
627
  {
3e339b5da   Thomas Gleixner   softirq: Use hotp...
628
629
  	return local_softirq_pending();
  }
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
630

3e339b5da   Thomas Gleixner   softirq: Use hotp...
631
632
633
634
  static void run_ksoftirqd(unsigned int cpu)
  {
  	local_irq_disable();
  	if (local_softirq_pending()) {
0bed698a3   Frederic Weisbecker   irq: Justify the ...
635
636
637
638
  		/*
  		 * We can safely run softirq on inline stack, as we are not deep
  		 * in the task stack here.
  		 */
3e339b5da   Thomas Gleixner   softirq: Use hotp...
639
  		__do_softirq();
3e339b5da   Thomas Gleixner   softirq: Use hotp...
640
  		local_irq_enable();
60479676e   Paul E. McKenney   ksoftirqd: Use ne...
641
  		cond_resched_rcu_qs();
3e339b5da   Thomas Gleixner   softirq: Use hotp...
642
  		return;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
643
  	}
3e339b5da   Thomas Gleixner   softirq: Use hotp...
644
  	local_irq_enable();
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
  }
  
  #ifdef CONFIG_HOTPLUG_CPU
  /*
   * tasklet_kill_immediate is called to remove a tasklet which can already be
   * scheduled for execution on @cpu.
   *
   * Unlike tasklet_kill, this function removes the tasklet
   * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
   *
   * When this function is called, @cpu must be in the CPU_DEAD state.
   */
  void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
  {
  	struct tasklet_struct **i;
  
  	BUG_ON(cpu_online(cpu));
  	BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
  
  	if (!test_bit(TASKLET_STATE_SCHED, &t->state))
  		return;
  
  	/* CPU is dead, so no lock needed. */
48f20a9a9   Olof Johansson   tasklets: execute...
668
  	for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
669
670
  		if (*i == t) {
  			*i = t->next;
48f20a9a9   Olof Johansson   tasklets: execute...
671
672
673
  			/* If this was the tail element, move the tail ptr */
  			if (*i == NULL)
  				per_cpu(tasklet_vec, cpu).tail = i;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
674
675
676
677
678
  			return;
  		}
  	}
  	BUG();
  }
c4544dbc7   Sebastian Andrzej Siewior   kernel/softirq: C...
679
  static int takeover_tasklets(unsigned int cpu)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
680
  {
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
681
682
683
684
  	/* CPU is dead, so no lock needed. */
  	local_irq_disable();
  
  	/* Find end, append list for that CPU. */
e5e417232   Christian Borntraeger   Fix cpu hotplug p...
685
  	if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
909ea9646   Christoph Lameter   core: Replace __g...
686
687
  		*__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head;
  		this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail);
e5e417232   Christian Borntraeger   Fix cpu hotplug p...
688
689
690
  		per_cpu(tasklet_vec, cpu).head = NULL;
  		per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
691
  	raise_softirq_irqoff(TASKLET_SOFTIRQ);
e5e417232   Christian Borntraeger   Fix cpu hotplug p...
692
  	if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
909ea9646   Christoph Lameter   core: Replace __g...
693
694
  		*__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head;
  		__this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail);
e5e417232   Christian Borntraeger   Fix cpu hotplug p...
695
696
697
  		per_cpu(tasklet_hi_vec, cpu).head = NULL;
  		per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
  	}
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
698
699
700
  	raise_softirq_irqoff(HI_SOFTIRQ);
  
  	local_irq_enable();
c4544dbc7   Sebastian Andrzej Siewior   kernel/softirq: C...
701
  	return 0;
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
702
  }
c4544dbc7   Sebastian Andrzej Siewior   kernel/softirq: C...
703
704
  #else
  #define takeover_tasklets	NULL
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
705
  #endif /* CONFIG_HOTPLUG_CPU */
3e339b5da   Thomas Gleixner   softirq: Use hotp...
706
707
708
709
710
711
  static struct smp_hotplug_thread softirq_threads = {
  	.store			= &ksoftirqd,
  	.thread_should_run	= ksoftirqd_should_run,
  	.thread_fn		= run_ksoftirqd,
  	.thread_comm		= "ksoftirqd/%u",
  };
7babe8db9   Eduard - Gabriel Munteanu   Full conversion t...
712
  static __init int spawn_ksoftirqd(void)
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
713
  {
c4544dbc7   Sebastian Andrzej Siewior   kernel/softirq: C...
714
715
  	cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
  				  takeover_tasklets);
3e339b5da   Thomas Gleixner   softirq: Use hotp...
716
  	BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
1da177e4c   Linus Torvalds   Linux-2.6.12-rc2
717
718
  	return 0;
  }
7babe8db9   Eduard - Gabriel Munteanu   Full conversion t...
719
  early_initcall(spawn_ksoftirqd);
78eef01b0   Andrew Morton   [PATCH] on_each_c...
720

43a256322   Yinghai Lu   sparseirq: move _...
721
722
723
724
725
726
727
728
729
  /*
   * [ These __weak aliases are kept in a separate compilation unit, so that
   *   GCC does not inline them incorrectly. ]
   */
  
  int __init __weak early_irq_init(void)
  {
  	return 0;
  }
4a046d175   Yinghai Lu   x86: arch_probe_n...
730
731
  int __init __weak arch_probe_nr_irqs(void)
  {
b683de2b3   Thomas Gleixner   genirq: Query arc...
732
  	return NR_IRQS_LEGACY;
4a046d175   Yinghai Lu   x86: arch_probe_n...
733
  }
43a256322   Yinghai Lu   sparseirq: move _...
734
735
736
737
  int __init __weak arch_early_irq_init(void)
  {
  	return 0;
  }
62a08ae2a   Thomas Gleixner   genirq: x86: Ensu...
738
739
740
741
742
  
  unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
  {
  	return from;
  }