Blame view

kernel/irq_work.c 4.49 KB
e360adbe2   Peter Zijlstra   irq_work: Add gen...
1
2
3
4
5
6
  /*
   * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
   *
   * Provides a framework for enqueueing and running callbacks from hardirq
   * context. The enqueueing is NMI-safe.
   */
83e3fa6f0   Paul Gortmaker   irq_work: fix com...
7
  #include <linux/bug.h>
e360adbe2   Peter Zijlstra   irq_work: Add gen...
8
  #include <linux/kernel.h>
9984de1a5   Paul Gortmaker   kernel: Map most ...
9
  #include <linux/export.h>
e360adbe2   Peter Zijlstra   irq_work: Add gen...
10
  #include <linux/irq_work.h>
967d1f906   Paul Gortmaker   kernel: fix two i...
11
  #include <linux/percpu.h>
e360adbe2   Peter Zijlstra   irq_work: Add gen...
12
  #include <linux/hardirq.h>
ef1f09825   Chris Metcalf   irq_work: fix com...
13
  #include <linux/irqflags.h>
bc6679aef   Frederic Weisbecker   irq_work: Make se...
14
15
  #include <linux/sched.h>
  #include <linux/tick.h>
c0e980a4b   Steven Rostedt   irq_work: Flush w...
16
17
  #include <linux/cpu.h>
  #include <linux/notifier.h>
967d1f906   Paul Gortmaker   kernel: fix two i...
18
  #include <asm/processor.h>
e360adbe2   Peter Zijlstra   irq_work: Add gen...
19

e360adbe2   Peter Zijlstra   irq_work: Add gen...
20

38aaf8090   Huang Ying   irq_work: Use lli...
21
  static DEFINE_PER_CPU(struct llist_head, irq_work_list);
bc6679aef   Frederic Weisbecker   irq_work: Make se...
22
  static DEFINE_PER_CPU(int, irq_work_raised);
e360adbe2   Peter Zijlstra   irq_work: Add gen...
23
24
25
26
  
  /*
   * Claim the entry so that no one else will poke at it.
   */
38aaf8090   Huang Ying   irq_work: Use lli...
27
  static bool irq_work_claim(struct irq_work *work)
e360adbe2   Peter Zijlstra   irq_work: Add gen...
28
  {
e0bbe2d80   Frederic Weisbecker   irq_work: Fix rac...
29
  	unsigned long flags, oflags, nflags;
e360adbe2   Peter Zijlstra   irq_work: Add gen...
30

e0bbe2d80   Frederic Weisbecker   irq_work: Fix rac...
31
32
33
34
35
  	/*
  	 * Start with our best wish as a premise but only trust any
  	 * flag value after cmpxchg() result.
  	 */
  	flags = work->flags & ~IRQ_WORK_PENDING;
38aaf8090   Huang Ying   irq_work: Use lli...
36
  	for (;;) {
38aaf8090   Huang Ying   irq_work: Use lli...
37
  		nflags = flags | IRQ_WORK_FLAGS;
e0bbe2d80   Frederic Weisbecker   irq_work: Fix rac...
38
39
  		oflags = cmpxchg(&work->flags, flags, nflags);
  		if (oflags == flags)
38aaf8090   Huang Ying   irq_work: Use lli...
40
  			break;
e0bbe2d80   Frederic Weisbecker   irq_work: Fix rac...
41
42
43
  		if (oflags & IRQ_WORK_PENDING)
  			return false;
  		flags = oflags;
38aaf8090   Huang Ying   irq_work: Use lli...
44
45
  		cpu_relax();
  	}
e360adbe2   Peter Zijlstra   irq_work: Add gen...
46
47
48
  
  	return true;
  }
e360adbe2   Peter Zijlstra   irq_work: Add gen...
49
50
51
52
53
54
55
56
  void __weak arch_irq_work_raise(void)
  {
  	/*
  	 * Lame architectures will get the timer tick callback
  	 */
  }
  
  /*
c02cf5f8e   anish kumar   irq_work: Remove ...
57
58
59
60
   * Enqueue the irq_work @entry unless it's already pending
   * somewhere.
   *
   * Can be re-enqueued while the callback is still in progress.
e360adbe2   Peter Zijlstra   irq_work: Add gen...
61
   */
cd578abb2   Peter Zijlstra   perf/x86: Warn to...
62
  bool irq_work_queue(struct irq_work *work)
e360adbe2   Peter Zijlstra   irq_work: Add gen...
63
  {
c02cf5f8e   anish kumar   irq_work: Remove ...
64
65
  	/* Only queue if not already pending */
  	if (!irq_work_claim(work))
cd578abb2   Peter Zijlstra   perf/x86: Warn to...
66
  		return false;
c02cf5f8e   anish kumar   irq_work: Remove ...
67
68
  
  	/* Queue the entry and raise the IPI if needed. */
20b876918   Christoph Lameter   irq_work: Use per...
69
  	preempt_disable();
e360adbe2   Peter Zijlstra   irq_work: Add gen...
70

bc6679aef   Frederic Weisbecker   irq_work: Make se...
71
72
73
74
75
76
77
78
79
80
81
  	llist_add(&work->llnode, &__get_cpu_var(irq_work_list));
  
  	/*
  	 * If the work is not "lazy" or the tick is stopped, raise the irq
  	 * work interrupt (if supported by the arch), otherwise, just wait
  	 * for the next tick.
  	 */
  	if (!(work->flags & IRQ_WORK_LAZY) || tick_nohz_tick_stopped()) {
  		if (!this_cpu_cmpxchg(irq_work_raised, 0, 1))
  			arch_irq_work_raise();
  	}
e360adbe2   Peter Zijlstra   irq_work: Add gen...
82

20b876918   Christoph Lameter   irq_work: Use per...
83
  	preempt_enable();
cd578abb2   Peter Zijlstra   perf/x86: Warn to...
84
85
  
  	return true;
e360adbe2   Peter Zijlstra   irq_work: Add gen...
86
  }
e360adbe2   Peter Zijlstra   irq_work: Add gen...
87
  EXPORT_SYMBOL_GPL(irq_work_queue);
00b429591   Frederic Weisbecker   irq_work: Don't s...
88
89
90
91
92
93
94
  bool irq_work_needs_cpu(void)
  {
  	struct llist_head *this_list;
  
  	this_list = &__get_cpu_var(irq_work_list);
  	if (llist_empty(this_list))
  		return false;
8aa2accee   Steven Rostedt   irq_work: Warn if...
95
96
  	/* All work should have been flushed before going offline */
  	WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
00b429591   Frederic Weisbecker   irq_work: Don't s...
97
98
  	return true;
  }
c0e980a4b   Steven Rostedt   irq_work: Flush w...
99
  static void __irq_work_run(void)
e360adbe2   Peter Zijlstra   irq_work: Add gen...
100
  {
bc6679aef   Frederic Weisbecker   irq_work: Make se...
101
  	unsigned long flags;
38aaf8090   Huang Ying   irq_work: Use lli...
102
103
104
  	struct irq_work *work;
  	struct llist_head *this_list;
  	struct llist_node *llnode;
e360adbe2   Peter Zijlstra   irq_work: Add gen...
105

bc6679aef   Frederic Weisbecker   irq_work: Make se...
106
107
108
109
110
111
112
  
  	/*
  	 * Reset the "raised" state right before we check the list because
  	 * an NMI may enqueue after we find the list empty from the runner.
  	 */
  	__this_cpu_write(irq_work_raised, 0);
  	barrier();
38aaf8090   Huang Ying   irq_work: Use lli...
113
114
  	this_list = &__get_cpu_var(irq_work_list);
  	if (llist_empty(this_list))
e360adbe2   Peter Zijlstra   irq_work: Add gen...
115
  		return;
e360adbe2   Peter Zijlstra   irq_work: Add gen...
116
  	BUG_ON(!irqs_disabled());
38aaf8090   Huang Ying   irq_work: Use lli...
117
118
119
  	llnode = llist_del_all(this_list);
  	while (llnode != NULL) {
  		work = llist_entry(llnode, struct irq_work, llnode);
e360adbe2   Peter Zijlstra   irq_work: Add gen...
120

924f8f5af   Peter Zijlstra   llist: Add llist_...
121
  		llnode = llist_next(llnode);
e360adbe2   Peter Zijlstra   irq_work: Add gen...
122
123
  
  		/*
38aaf8090   Huang Ying   irq_work: Use lli...
124
  		 * Clear the PENDING bit, after this point the @work
e360adbe2   Peter Zijlstra   irq_work: Add gen...
125
  		 * can be re-used.
c8446b75b   Frederic Weisbecker   irq_work: Fix rac...
126
127
128
  		 * Make it immediately visible so that other CPUs trying
  		 * to claim that work don't rely on us to handle their data
  		 * while we are in the middle of the func.
e360adbe2   Peter Zijlstra   irq_work: Add gen...
129
  		 */
bc6679aef   Frederic Weisbecker   irq_work: Make se...
130
131
  		flags = work->flags & ~IRQ_WORK_PENDING;
  		xchg(&work->flags, flags);
38aaf8090   Huang Ying   irq_work: Use lli...
132
  		work->func(work);
e360adbe2   Peter Zijlstra   irq_work: Add gen...
133
134
135
136
  		/*
  		 * Clear the BUSY bit and return to the free state if
  		 * no-one else claimed it meanwhile.
  		 */
bc6679aef   Frederic Weisbecker   irq_work: Make se...
137
  		(void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
e360adbe2   Peter Zijlstra   irq_work: Add gen...
138
139
  	}
  }
c0e980a4b   Steven Rostedt   irq_work: Flush w...
140
141
142
143
144
145
146
147
148
149
  
  /*
   * Run the irq_work entries on this cpu. Requires to be ran from hardirq
   * context with local IRQs disabled.
   */
  void irq_work_run(void)
  {
  	BUG_ON(!in_irq());
  	__irq_work_run();
  }
e360adbe2   Peter Zijlstra   irq_work: Add gen...
150
151
152
153
154
155
  EXPORT_SYMBOL_GPL(irq_work_run);
  
  /*
   * Synchronize against the irq_work @entry, ensures the entry is not
   * currently in use.
   */
38aaf8090   Huang Ying   irq_work: Use lli...
156
  void irq_work_sync(struct irq_work *work)
e360adbe2   Peter Zijlstra   irq_work: Add gen...
157
158
  {
  	WARN_ON_ONCE(irqs_disabled());
38aaf8090   Huang Ying   irq_work: Use lli...
159
  	while (work->flags & IRQ_WORK_BUSY)
e360adbe2   Peter Zijlstra   irq_work: Add gen...
160
161
162
  		cpu_relax();
  }
  EXPORT_SYMBOL_GPL(irq_work_sync);
c0e980a4b   Steven Rostedt   irq_work: Flush w...
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
  
  #ifdef CONFIG_HOTPLUG_CPU
  static int irq_work_cpu_notify(struct notifier_block *self,
  			       unsigned long action, void *hcpu)
  {
  	long cpu = (long)hcpu;
  
  	switch (action) {
  	case CPU_DYING:
  		/* Called from stop_machine */
  		if (WARN_ON_ONCE(cpu != smp_processor_id()))
  			break;
  		__irq_work_run();
  		break;
  	default:
  		break;
  	}
  	return NOTIFY_OK;
  }
  
  static struct notifier_block cpu_notify;
  
  static __init int irq_work_init_cpu_notifier(void)
  {
  	cpu_notify.notifier_call = irq_work_cpu_notify;
  	cpu_notify.priority = 0;
  	register_cpu_notifier(&cpu_notify);
  	return 0;
  }
  device_initcall(irq_work_init_cpu_notifier);
  
  #endif /* CONFIG_HOTPLUG_CPU */