Blame view
kernel/irq_work.c
4.75 KB
457c89965
|
1 |
// SPDX-License-Identifier: GPL-2.0-only |
e360adbe2
|
2 |
/* |
90eec103b
|
3 |
* Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra |
e360adbe2
|
4 5 6 7 |
* * Provides a framework for enqueueing and running callbacks from hardirq * context. The enqueueing is NMI-safe. */ |
83e3fa6f0
|
8 |
#include <linux/bug.h> |
e360adbe2
|
9 |
#include <linux/kernel.h> |
9984de1a5
|
10 |
#include <linux/export.h> |
e360adbe2
|
11 |
#include <linux/irq_work.h> |
967d1f906
|
12 |
#include <linux/percpu.h> |
e360adbe2
|
13 |
#include <linux/hardirq.h> |
ef1f09825
|
14 |
#include <linux/irqflags.h> |
bc6679aef
|
15 16 |
#include <linux/sched.h> #include <linux/tick.h> |
c0e980a4b
|
17 18 |
#include <linux/cpu.h> #include <linux/notifier.h> |
478850160
|
19 |
#include <linux/smp.h> |
967d1f906
|
20 |
#include <asm/processor.h> |
e360adbe2
|
21 |
|
e360adbe2
|
22 |
|
b93e0b8fa
|
23 24 |
static DEFINE_PER_CPU(struct llist_head, raised_list); static DEFINE_PER_CPU(struct llist_head, lazy_list); |
e360adbe2
|
25 26 27 28 |
/* * Claim the entry so that no one else will poke at it. */ |
38aaf8090
|
29 |
static bool irq_work_claim(struct irq_work *work) |
e360adbe2
|
30 |
{ |
e0bbe2d80
|
31 |
unsigned long flags, oflags, nflags; |
e360adbe2
|
32 |
|
e0bbe2d80
|
33 34 35 36 37 |
/* * Start with our best wish as a premise but only trust any * flag value after cmpxchg() result. */ flags = work->flags & ~IRQ_WORK_PENDING; |
38aaf8090
|
38 |
for (;;) { |
6baf9e67c
|
39 |
nflags = flags | IRQ_WORK_CLAIMED; |
e0bbe2d80
|
40 41 |
oflags = cmpxchg(&work->flags, flags, nflags); if (oflags == flags) |
38aaf8090
|
42 |
break; |
e0bbe2d80
|
43 44 45 |
if (oflags & IRQ_WORK_PENDING) return false; flags = oflags; |
38aaf8090
|
46 47 |
cpu_relax(); } |
e360adbe2
|
48 49 50 |
return true; } |
e360adbe2
|
51 52 53 54 55 56 |
void __weak arch_irq_work_raise(void) { /* * Lame architectures will get the timer tick callback */ } |
471ba0e68
|
57 58 |
/* Enqueue on current CPU, work must already be claimed and preempt disabled */ static void __irq_work_queue_local(struct irq_work *work) |
478850160
|
59 |
{ |
471ba0e68
|
60 61 62 63 64 65 66 67 68 69 |
/* If the work is "lazy", handle it from next tick if any */ if (work->flags & IRQ_WORK_LAZY) { if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) && tick_nohz_tick_stopped()) arch_irq_work_raise(); } else { if (llist_add(&work->llnode, this_cpu_ptr(&raised_list))) arch_irq_work_raise(); } } |
478850160
|
70 |
|
471ba0e68
|
71 72 73 |
/* Enqueue the irq work @work on the current CPU */ bool irq_work_queue(struct irq_work *work) { |
478850160
|
74 75 76 |
/* Only queue if not already pending */ if (!irq_work_claim(work)) return false; |
471ba0e68
|
77 78 79 80 |
/* Queue the entry and raise the IPI if needed. */ preempt_disable(); __irq_work_queue_local(work); preempt_enable(); |
6733bab7b
|
81 |
|
478850160
|
82 83 |
return true; } |
471ba0e68
|
84 |
EXPORT_SYMBOL_GPL(irq_work_queue); |
478850160
|
85 |
|
471ba0e68
|
86 87 88 89 90 91 92 |
/* * Enqueue the irq_work @work on @cpu unless it's already pending * somewhere. * * Can be re-enqueued while the callback is still in progress. */ bool irq_work_queue_on(struct irq_work *work, int cpu) |
e360adbe2
|
93 |
{ |
471ba0e68
|
94 95 96 97 98 99 |
#ifndef CONFIG_SMP return irq_work_queue(work); #else /* CONFIG_SMP: */ /* All work should have been flushed before going offline */ WARN_ON_ONCE(cpu_is_offline(cpu)); |
c02cf5f8e
|
100 101 |
/* Only queue if not already pending */ if (!irq_work_claim(work)) |
cd578abb2
|
102 |
return false; |
c02cf5f8e
|
103 |
|
20b876918
|
104 |
preempt_disable(); |
471ba0e68
|
105 106 107 108 109 |
if (cpu != smp_processor_id()) { /* Arch remote IPI send/receive backend aren't NMI safe */ WARN_ON_ONCE(in_nmi()); if (llist_add(&work->llnode, &per_cpu(raised_list, cpu))) arch_send_call_function_single_ipi(cpu); |
b93e0b8fa
|
110 |
} else { |
471ba0e68
|
111 |
__irq_work_queue_local(work); |
bc6679aef
|
112 |
} |
20b876918
|
113 |
preempt_enable(); |
cd578abb2
|
114 115 |
return true; |
471ba0e68
|
116 |
#endif /* CONFIG_SMP */ |
e360adbe2
|
117 |
} |
471ba0e68
|
118 |
|
e360adbe2
|
119 |
|
00b429591
|
120 121 |
bool irq_work_needs_cpu(void) { |
b93e0b8fa
|
122 |
struct llist_head *raised, *lazy; |
00b429591
|
123 |
|
22127e93c
|
124 125 |
raised = this_cpu_ptr(&raised_list); lazy = this_cpu_ptr(&lazy_list); |
76a33061b
|
126 127 128 129 |
if (llist_empty(raised) || arch_irq_work_has_interrupt()) if (llist_empty(lazy)) return false; |
00b429591
|
130 |
|
8aa2accee
|
131 132 |
/* All work should have been flushed before going offline */ WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); |
00b429591
|
133 134 |
return true; } |
b93e0b8fa
|
135 |
static void irq_work_run_list(struct llist_head *list) |
e360adbe2
|
136 |
{ |
d00a08cf9
|
137 |
struct irq_work *work, *tmp; |
38aaf8090
|
138 |
struct llist_node *llnode; |
d00a08cf9
|
139 |
unsigned long flags; |
e360adbe2
|
140 |
|
b93e0b8fa
|
141 |
BUG_ON(!irqs_disabled()); |
bc6679aef
|
142 |
|
b93e0b8fa
|
143 |
if (llist_empty(list)) |
e360adbe2
|
144 |
return; |
b93e0b8fa
|
145 |
llnode = llist_del_all(list); |
d00a08cf9
|
146 |
llist_for_each_entry_safe(work, tmp, llnode, llnode) { |
e360adbe2
|
147 |
/* |
38aaf8090
|
148 |
* Clear the PENDING bit, after this point the @work |
e360adbe2
|
149 |
* can be re-used. |
c8446b75b
|
150 151 152 |
* Make it immediately visible so that other CPUs trying * to claim that work don't rely on us to handle their data * while we are in the middle of the func. |
e360adbe2
|
153 |
*/ |
bc6679aef
|
154 155 |
flags = work->flags & ~IRQ_WORK_PENDING; xchg(&work->flags, flags); |
38aaf8090
|
156 |
work->func(work); |
e360adbe2
|
157 158 159 160 |
/* * Clear the BUSY bit and return to the free state if * no-one else claimed it meanwhile. */ |
bc6679aef
|
161 |
(void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY); |
e360adbe2
|
162 163 |
} } |
c0e980a4b
|
164 165 |
/* |
a77353e5e
|
166 167 |
* hotplug calls this through: * hotplug_cfd() -> flush_smp_call_function_queue() |
c0e980a4b
|
168 169 170 |
*/ void irq_work_run(void) { |
22127e93c
|
171 172 |
irq_work_run_list(this_cpu_ptr(&raised_list)); irq_work_run_list(this_cpu_ptr(&lazy_list)); |
c0e980a4b
|
173 |
} |
e360adbe2
|
174 |
EXPORT_SYMBOL_GPL(irq_work_run); |
76a33061b
|
175 176 |
void irq_work_tick(void) { |
56e4dea81
|
177 |
struct llist_head *raised = this_cpu_ptr(&raised_list); |
76a33061b
|
178 179 180 |
if (!llist_empty(raised) && !arch_irq_work_has_interrupt()) irq_work_run_list(raised); |
56e4dea81
|
181 |
irq_work_run_list(this_cpu_ptr(&lazy_list)); |
76a33061b
|
182 |
} |
e360adbe2
|
183 184 185 186 |
/* * Synchronize against the irq_work @entry, ensures the entry is not * currently in use. */ |
38aaf8090
|
187 |
void irq_work_sync(struct irq_work *work) |
e360adbe2
|
188 |
{ |
3c7169a3b
|
189 |
lockdep_assert_irqs_enabled(); |
e360adbe2
|
190 |
|
38aaf8090
|
191 |
while (work->flags & IRQ_WORK_BUSY) |
e360adbe2
|
192 193 194 |
cpu_relax(); } EXPORT_SYMBOL_GPL(irq_work_sync); |