Blame view

kernel/irq_work.c 4.75 KB
457c89965   Thomas Gleixner   treewide: Add SPD...
1
  // SPDX-License-Identifier: GPL-2.0-only
e360adbe2   Peter Zijlstra   irq_work: Add gen...
2
  /*
90eec103b   Peter Zijlstra   treewide: Remove ...
3
   * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
e360adbe2   Peter Zijlstra   irq_work: Add gen...
4
5
6
7
   *
   * Provides a framework for enqueueing and running callbacks from hardirq
   * context. The enqueueing is NMI-safe.
   */
83e3fa6f0   Paul Gortmaker   irq_work: fix com...
8
  #include <linux/bug.h>
e360adbe2   Peter Zijlstra   irq_work: Add gen...
9
  #include <linux/kernel.h>
9984de1a5   Paul Gortmaker   kernel: Map most ...
10
  #include <linux/export.h>
e360adbe2   Peter Zijlstra   irq_work: Add gen...
11
  #include <linux/irq_work.h>
967d1f906   Paul Gortmaker   kernel: fix two i...
12
  #include <linux/percpu.h>
e360adbe2   Peter Zijlstra   irq_work: Add gen...
13
  #include <linux/hardirq.h>
ef1f09825   Chris Metcalf   irq_work: fix com...
14
  #include <linux/irqflags.h>
bc6679aef   Frederic Weisbecker   irq_work: Make se...
15
16
  #include <linux/sched.h>
  #include <linux/tick.h>
c0e980a4b   Steven Rostedt   irq_work: Flush w...
17
18
  #include <linux/cpu.h>
  #include <linux/notifier.h>
478850160   Frederic Weisbecker   irq_work: Impleme...
19
  #include <linux/smp.h>
967d1f906   Paul Gortmaker   kernel: fix two i...
20
  #include <asm/processor.h>
e360adbe2   Peter Zijlstra   irq_work: Add gen...
21

e360adbe2   Peter Zijlstra   irq_work: Add gen...
22

b93e0b8fa   Frederic Weisbecker   irq_work: Split r...
23
24
  static DEFINE_PER_CPU(struct llist_head, raised_list);
  static DEFINE_PER_CPU(struct llist_head, lazy_list);
e360adbe2   Peter Zijlstra   irq_work: Add gen...
25
26
27
28
  
  /*
   * Claim the entry so that no one else will poke at it.
   */
38aaf8090   Huang Ying   irq_work: Use lli...
29
  static bool irq_work_claim(struct irq_work *work)
e360adbe2   Peter Zijlstra   irq_work: Add gen...
30
  {
e0bbe2d80   Frederic Weisbecker   irq_work: Fix rac...
31
  	unsigned long flags, oflags, nflags;
e360adbe2   Peter Zijlstra   irq_work: Add gen...
32

e0bbe2d80   Frederic Weisbecker   irq_work: Fix rac...
33
34
35
36
37
  	/*
  	 * Start with our best wish as a premise but only trust any
  	 * flag value after cmpxchg() result.
  	 */
  	flags = work->flags & ~IRQ_WORK_PENDING;
38aaf8090   Huang Ying   irq_work: Use lli...
38
  	for (;;) {
6baf9e67c   Bartosz Golaszewski   irq/work: Improve...
39
  		nflags = flags | IRQ_WORK_CLAIMED;
e0bbe2d80   Frederic Weisbecker   irq_work: Fix rac...
40
41
  		oflags = cmpxchg(&work->flags, flags, nflags);
  		if (oflags == flags)
38aaf8090   Huang Ying   irq_work: Use lli...
42
  			break;
e0bbe2d80   Frederic Weisbecker   irq_work: Fix rac...
43
44
45
  		if (oflags & IRQ_WORK_PENDING)
  			return false;
  		flags = oflags;
38aaf8090   Huang Ying   irq_work: Use lli...
46
47
  		cpu_relax();
  	}
e360adbe2   Peter Zijlstra   irq_work: Add gen...
48
49
50
  
  	return true;
  }
e360adbe2   Peter Zijlstra   irq_work: Add gen...
51
52
53
54
55
56
  void __weak arch_irq_work_raise(void)
  {
  	/*
  	 * Lame architectures will get the timer tick callback
  	 */
  }
471ba0e68   Nicholas Piggin   irq_work: Do not ...
57
58
  /* Enqueue on current CPU, work must already be claimed and preempt disabled */
  static void __irq_work_queue_local(struct irq_work *work)
478850160   Frederic Weisbecker   irq_work: Impleme...
59
  {
471ba0e68   Nicholas Piggin   irq_work: Do not ...
60
61
62
63
64
65
66
67
68
69
  	/* If the work is "lazy", handle it from next tick if any */
  	if (work->flags & IRQ_WORK_LAZY) {
  		if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
  		    tick_nohz_tick_stopped())
  			arch_irq_work_raise();
  	} else {
  		if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
  			arch_irq_work_raise();
  	}
  }
478850160   Frederic Weisbecker   irq_work: Impleme...
70

471ba0e68   Nicholas Piggin   irq_work: Do not ...
71
72
73
  /* Enqueue the irq work @work on the current CPU */
  bool irq_work_queue(struct irq_work *work)
  {
478850160   Frederic Weisbecker   irq_work: Impleme...
74
75
76
  	/* Only queue if not already pending */
  	if (!irq_work_claim(work))
  		return false;
471ba0e68   Nicholas Piggin   irq_work: Do not ...
77
78
79
80
  	/* Queue the entry and raise the IPI if needed. */
  	preempt_disable();
  	__irq_work_queue_local(work);
  	preempt_enable();
6733bab7b   Paul E. McKenney   irq_work: Map irq...
81

478850160   Frederic Weisbecker   irq_work: Impleme...
82
83
  	return true;
  }
471ba0e68   Nicholas Piggin   irq_work: Do not ...
84
  EXPORT_SYMBOL_GPL(irq_work_queue);
478850160   Frederic Weisbecker   irq_work: Impleme...
85

471ba0e68   Nicholas Piggin   irq_work: Do not ...
86
87
88
89
90
91
92
  /*
   * Enqueue the irq_work @work on @cpu unless it's already pending
   * somewhere.
   *
   * Can be re-enqueued while the callback is still in progress.
   */
  bool irq_work_queue_on(struct irq_work *work, int cpu)
e360adbe2   Peter Zijlstra   irq_work: Add gen...
93
  {
471ba0e68   Nicholas Piggin   irq_work: Do not ...
94
95
96
97
98
99
  #ifndef CONFIG_SMP
  	return irq_work_queue(work);
  
  #else /* CONFIG_SMP: */
  	/* All work should have been flushed before going offline */
  	WARN_ON_ONCE(cpu_is_offline(cpu));
c02cf5f8e   anish kumar   irq_work: Remove ...
100
101
  	/* Only queue if not already pending */
  	if (!irq_work_claim(work))
cd578abb2   Peter Zijlstra   perf/x86: Warn to...
102
  		return false;
c02cf5f8e   anish kumar   irq_work: Remove ...
103

20b876918   Christoph Lameter   irq_work: Use per...
104
  	preempt_disable();
471ba0e68   Nicholas Piggin   irq_work: Do not ...
105
106
107
108
109
  	if (cpu != smp_processor_id()) {
  		/* Arch remote IPI send/receive backend aren't NMI safe */
  		WARN_ON_ONCE(in_nmi());
  		if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
  			arch_send_call_function_single_ipi(cpu);
b93e0b8fa   Frederic Weisbecker   irq_work: Split r...
110
  	} else {
471ba0e68   Nicholas Piggin   irq_work: Do not ...
111
  		__irq_work_queue_local(work);
bc6679aef   Frederic Weisbecker   irq_work: Make se...
112
  	}
20b876918   Christoph Lameter   irq_work: Use per...
113
  	preempt_enable();
cd578abb2   Peter Zijlstra   perf/x86: Warn to...
114
115
  
  	return true;
471ba0e68   Nicholas Piggin   irq_work: Do not ...
116
  #endif /* CONFIG_SMP */
e360adbe2   Peter Zijlstra   irq_work: Add gen...
117
  }
471ba0e68   Nicholas Piggin   irq_work: Do not ...
118

e360adbe2   Peter Zijlstra   irq_work: Add gen...
119

00b429591   Frederic Weisbecker   irq_work: Don't s...
120
121
  bool irq_work_needs_cpu(void)
  {
b93e0b8fa   Frederic Weisbecker   irq_work: Split r...
122
  	struct llist_head *raised, *lazy;
00b429591   Frederic Weisbecker   irq_work: Don't s...
123

22127e93c   Christoph Lameter   time: Replace __g...
124
125
  	raised = this_cpu_ptr(&raised_list);
  	lazy = this_cpu_ptr(&lazy_list);
76a33061b   Frederic Weisbecker   irq_work: Force r...
126
127
128
129
  
  	if (llist_empty(raised) || arch_irq_work_has_interrupt())
  		if (llist_empty(lazy))
  			return false;
00b429591   Frederic Weisbecker   irq_work: Don't s...
130

8aa2accee   Steven Rostedt   irq_work: Warn if...
131
132
  	/* All work should have been flushed before going offline */
  	WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
00b429591   Frederic Weisbecker   irq_work: Don't s...
133
134
  	return true;
  }
b93e0b8fa   Frederic Weisbecker   irq_work: Split r...
135
  static void irq_work_run_list(struct llist_head *list)
e360adbe2   Peter Zijlstra   irq_work: Add gen...
136
  {
d00a08cf9   Thomas Gleixner   irq/work: Use lli...
137
  	struct irq_work *work, *tmp;
38aaf8090   Huang Ying   irq_work: Use lli...
138
  	struct llist_node *llnode;
d00a08cf9   Thomas Gleixner   irq/work: Use lli...
139
  	unsigned long flags;
e360adbe2   Peter Zijlstra   irq_work: Add gen...
140

b93e0b8fa   Frederic Weisbecker   irq_work: Split r...
141
  	BUG_ON(!irqs_disabled());
bc6679aef   Frederic Weisbecker   irq_work: Make se...
142

b93e0b8fa   Frederic Weisbecker   irq_work: Split r...
143
  	if (llist_empty(list))
e360adbe2   Peter Zijlstra   irq_work: Add gen...
144
  		return;
b93e0b8fa   Frederic Weisbecker   irq_work: Split r...
145
  	llnode = llist_del_all(list);
d00a08cf9   Thomas Gleixner   irq/work: Use lli...
146
  	llist_for_each_entry_safe(work, tmp, llnode, llnode) {
e360adbe2   Peter Zijlstra   irq_work: Add gen...
147
  		/*
38aaf8090   Huang Ying   irq_work: Use lli...
148
  		 * Clear the PENDING bit, after this point the @work
e360adbe2   Peter Zijlstra   irq_work: Add gen...
149
  		 * can be re-used.
c8446b75b   Frederic Weisbecker   irq_work: Fix rac...
150
151
152
  		 * Make it immediately visible so that other CPUs trying
  		 * to claim that work don't rely on us to handle their data
  		 * while we are in the middle of the func.
e360adbe2   Peter Zijlstra   irq_work: Add gen...
153
  		 */
bc6679aef   Frederic Weisbecker   irq_work: Make se...
154
155
  		flags = work->flags & ~IRQ_WORK_PENDING;
  		xchg(&work->flags, flags);
38aaf8090   Huang Ying   irq_work: Use lli...
156
  		work->func(work);
e360adbe2   Peter Zijlstra   irq_work: Add gen...
157
158
159
160
  		/*
  		 * Clear the BUSY bit and return to the free state if
  		 * no-one else claimed it meanwhile.
  		 */
bc6679aef   Frederic Weisbecker   irq_work: Make se...
161
  		(void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
e360adbe2   Peter Zijlstra   irq_work: Add gen...
162
163
  	}
  }
c0e980a4b   Steven Rostedt   irq_work: Flush w...
164
165
  
  /*
a77353e5e   Peter Zijlstra   irq_work: Remove ...
166
167
   * hotplug calls this through:
   *  hotplug_cfd() -> flush_smp_call_function_queue()
c0e980a4b   Steven Rostedt   irq_work: Flush w...
168
169
170
   */
  void irq_work_run(void)
  {
22127e93c   Christoph Lameter   time: Replace __g...
171
172
  	irq_work_run_list(this_cpu_ptr(&raised_list));
  	irq_work_run_list(this_cpu_ptr(&lazy_list));
c0e980a4b   Steven Rostedt   irq_work: Flush w...
173
  }
e360adbe2   Peter Zijlstra   irq_work: Add gen...
174
  EXPORT_SYMBOL_GPL(irq_work_run);
76a33061b   Frederic Weisbecker   irq_work: Force r...
175
176
  void irq_work_tick(void)
  {
56e4dea81   Christoph Lameter   percpu: Convert r...
177
  	struct llist_head *raised = this_cpu_ptr(&raised_list);
76a33061b   Frederic Weisbecker   irq_work: Force r...
178
179
180
  
  	if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
  		irq_work_run_list(raised);
56e4dea81   Christoph Lameter   percpu: Convert r...
181
  	irq_work_run_list(this_cpu_ptr(&lazy_list));
76a33061b   Frederic Weisbecker   irq_work: Force r...
182
  }
e360adbe2   Peter Zijlstra   irq_work: Add gen...
183
184
185
186
  /*
   * Synchronize against the irq_work @entry, ensures the entry is not
   * currently in use.
   */
38aaf8090   Huang Ying   irq_work: Use lli...
187
  void irq_work_sync(struct irq_work *work)
e360adbe2   Peter Zijlstra   irq_work: Add gen...
188
  {
3c7169a3b   Frederic Weisbecker   irq_work: Use loc...
189
  	lockdep_assert_irqs_enabled();
e360adbe2   Peter Zijlstra   irq_work: Add gen...
190

38aaf8090   Huang Ying   irq_work: Use lli...
191
  	while (work->flags & IRQ_WORK_BUSY)
e360adbe2   Peter Zijlstra   irq_work: Add gen...
192
193
194
  		cpu_relax();
  }
  EXPORT_SYMBOL_GPL(irq_work_sync);