Blame view
kernel/irq_work.c
4.82 KB
457c89965 treewide: Add SPD... |
1 |
// SPDX-License-Identifier: GPL-2.0-only |
e360adbe2 irq_work: Add gen... |
2 |
/* |
90eec103b treewide: Remove ... |
3 |
* Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra |
e360adbe2 irq_work: Add gen... |
4 5 6 7 |
* * Provides a framework for enqueueing and running callbacks from hardirq * context. The enqueueing is NMI-safe. */ |
83e3fa6f0 irq_work: fix com... |
8 |
#include <linux/bug.h> |
e360adbe2 irq_work: Add gen... |
9 |
#include <linux/kernel.h> |
9984de1a5 kernel: Map most ... |
10 |
#include <linux/export.h> |
e360adbe2 irq_work: Add gen... |
11 |
#include <linux/irq_work.h> |
967d1f906 kernel: fix two i... |
12 |
#include <linux/percpu.h> |
e360adbe2 irq_work: Add gen... |
13 |
#include <linux/hardirq.h> |
ef1f09825 irq_work: fix com... |
14 |
#include <linux/irqflags.h> |
bc6679aef irq_work: Make se... |
15 16 |
#include <linux/sched.h> #include <linux/tick.h> |
c0e980a4b irq_work: Flush w... |
17 18 |
#include <linux/cpu.h> #include <linux/notifier.h> |
478850160 irq_work: Impleme... |
19 |
#include <linux/smp.h> |
967d1f906 kernel: fix two i... |
20 |
#include <asm/processor.h> |
e360adbe2 irq_work: Add gen... |
21 |
|
e360adbe2 irq_work: Add gen... |
22 |
|
b93e0b8fa irq_work: Split r... |
23 24 |
static DEFINE_PER_CPU(struct llist_head, raised_list); static DEFINE_PER_CPU(struct llist_head, lazy_list); |
e360adbe2 irq_work: Add gen... |
25 26 27 28 |
/* * Claim the entry so that no one else will poke at it. */ |
38aaf8090 irq_work: Use lli... |
29 |
static bool irq_work_claim(struct irq_work *work) |
e360adbe2 irq_work: Add gen... |
30 |
{ |
25269871d irq_work: Fix irq... |
31 |
int oflags; |
e360adbe2 irq_work: Add gen... |
32 |
|
4b44a21dd irq_work, smp: Al... |
33 |
oflags = atomic_fetch_or(IRQ_WORK_CLAIMED | CSD_TYPE_IRQ_WORK, &work->flags); |
e0bbe2d80 irq_work: Fix rac... |
34 |
/* |
25269871d irq_work: Fix irq... |
35 |
* If the work is already pending, no need to raise the IPI. |
feb4a5132 irq_work: Slightl... |
36 |
* The pairing atomic_fetch_andnot() in irq_work_run() makes sure |
25269871d irq_work: Fix irq... |
37 |
* everything we did before is visible. |
e0bbe2d80 irq_work: Fix rac... |
38 |
*/ |
25269871d irq_work: Fix irq... |
39 40 |
if (oflags & IRQ_WORK_PENDING) return false; |
e360adbe2 irq_work: Add gen... |
41 42 |
return true; } |
e360adbe2 irq_work: Add gen... |
43 44 45 46 47 48 |
void __weak arch_irq_work_raise(void) { /* * Lame architectures will get the timer tick callback */ } |
471ba0e68 irq_work: Do not ... |
49 50 |
/* Enqueue on current CPU, work must already be claimed and preempt disabled */ static void __irq_work_queue_local(struct irq_work *work) |
478850160 irq_work: Impleme... |
51 |
{ |
471ba0e68 irq_work: Do not ... |
52 |
/* If the work is "lazy", handle it from next tick if any */ |
153bedbac irq_work: Convert... |
53 |
if (atomic_read(&work->flags) & IRQ_WORK_LAZY) { |
471ba0e68 irq_work: Do not ... |
54 55 56 57 58 59 60 61 |
if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) && tick_nohz_tick_stopped()) arch_irq_work_raise(); } else { if (llist_add(&work->llnode, this_cpu_ptr(&raised_list))) arch_irq_work_raise(); } } |
478850160 irq_work: Impleme... |
62 |
|
471ba0e68 irq_work: Do not ... |
63 64 65 |
/* Enqueue the irq work @work on the current CPU */ bool irq_work_queue(struct irq_work *work) { |
478850160 irq_work: Impleme... |
66 67 68 |
/* Only queue if not already pending */ if (!irq_work_claim(work)) return false; |
471ba0e68 irq_work: Do not ... |
69 70 71 72 |
/* Queue the entry and raise the IPI if needed. */ preempt_disable(); __irq_work_queue_local(work); preempt_enable(); |
6733bab7b irq_work: Map irq... |
73 |
|
478850160 irq_work: Impleme... |
74 75 |
return true; } |
471ba0e68 irq_work: Do not ... |
76 |
EXPORT_SYMBOL_GPL(irq_work_queue); |
478850160 irq_work: Impleme... |
77 |
|
471ba0e68 irq_work: Do not ... |
78 79 80 81 82 83 84 |
/* * Enqueue the irq_work @work on @cpu unless it's already pending * somewhere. * * Can be re-enqueued while the callback is still in progress. */ bool irq_work_queue_on(struct irq_work *work, int cpu) |
e360adbe2 irq_work: Add gen... |
85 |
{ |
471ba0e68 irq_work: Do not ... |
86 87 88 89 90 91 |
#ifndef CONFIG_SMP return irq_work_queue(work); #else /* CONFIG_SMP: */ /* All work should have been flushed before going offline */ WARN_ON_ONCE(cpu_is_offline(cpu)); |
c02cf5f8e irq_work: Remove ... |
92 93 |
/* Only queue if not already pending */ if (!irq_work_claim(work)) |
cd578abb2 perf/x86: Warn to... |
94 |
return false; |
c02cf5f8e irq_work: Remove ... |
95 |
|
20b876918 irq_work: Use per... |
96 |
preempt_disable(); |
471ba0e68 irq_work: Do not ... |
97 98 99 |
if (cpu != smp_processor_id()) { /* Arch remote IPI send/receive backend aren't NMI safe */ WARN_ON_ONCE(in_nmi()); |
4b44a21dd irq_work, smp: Al... |
100 |
__smp_call_single_queue(cpu, &work->llnode); |
b93e0b8fa irq_work: Split r... |
101 |
} else { |
471ba0e68 irq_work: Do not ... |
102 |
__irq_work_queue_local(work); |
bc6679aef irq_work: Make se... |
103 |
} |
20b876918 irq_work: Use per... |
104 |
preempt_enable(); |
cd578abb2 perf/x86: Warn to... |
105 106 |
return true; |
471ba0e68 irq_work: Do not ... |
107 |
#endif /* CONFIG_SMP */ |
e360adbe2 irq_work: Add gen... |
108 |
} |
5a920a650 ANDROID: Sched: E... |
109 |
EXPORT_SYMBOL_GPL(irq_work_queue_on); |
e360adbe2 irq_work: Add gen... |
110 |
|
00b429591 irq_work: Don't s... |
111 112 |
bool irq_work_needs_cpu(void) { |
b93e0b8fa irq_work: Split r... |
113 |
struct llist_head *raised, *lazy; |
00b429591 irq_work: Don't s... |
114 |
|
22127e93c time: Replace __g... |
115 116 |
raised = this_cpu_ptr(&raised_list); lazy = this_cpu_ptr(&lazy_list); |
76a33061b irq_work: Force r... |
117 118 119 120 |
if (llist_empty(raised) || arch_irq_work_has_interrupt()) if (llist_empty(lazy)) return false; |
00b429591 irq_work: Don't s... |
121 |
|
8aa2accee irq_work: Warn if... |
122 123 |
/* All work should have been flushed before going offline */ WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); |
00b429591 irq_work: Don't s... |
124 125 |
return true; } |
4b44a21dd irq_work, smp: Al... |
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 |
void irq_work_single(void *arg) { struct irq_work *work = arg; int flags; /* * Clear the PENDING bit, after this point the @work * can be re-used. * Make it immediately visible so that other CPUs trying * to claim that work don't rely on us to handle their data * while we are in the middle of the func. */ flags = atomic_fetch_andnot(IRQ_WORK_PENDING, &work->flags); lockdep_irq_work_enter(work); work->func(work); lockdep_irq_work_exit(work); /* * Clear the BUSY bit and return to the free state if * no-one else claimed it meanwhile. */ flags &= ~IRQ_WORK_PENDING; (void)atomic_cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY); } |
b93e0b8fa irq_work: Split r... |
150 |
static void irq_work_run_list(struct llist_head *list) |
e360adbe2 irq_work: Add gen... |
151 |
{ |
d00a08cf9 irq/work: Use lli... |
152 |
struct irq_work *work, *tmp; |
38aaf8090 irq_work: Use lli... |
153 |
struct llist_node *llnode; |
e360adbe2 irq_work: Add gen... |
154 |
|
b93e0b8fa irq_work: Split r... |
155 |
BUG_ON(!irqs_disabled()); |
bc6679aef irq_work: Make se... |
156 |
|
b93e0b8fa irq_work: Split r... |
157 |
if (llist_empty(list)) |
e360adbe2 irq_work: Add gen... |
158 |
return; |
b93e0b8fa irq_work: Split r... |
159 |
llnode = llist_del_all(list); |
4b44a21dd irq_work, smp: Al... |
160 161 |
llist_for_each_entry_safe(work, tmp, llnode, llnode) irq_work_single(work); |
e360adbe2 irq_work: Add gen... |
162 |
} |
c0e980a4b irq_work: Flush w... |
163 164 |
/* |
a77353e5e irq_work: Remove ... |
165 166 |
* hotplug calls this through: * hotplug_cfd() -> flush_smp_call_function_queue() |
c0e980a4b irq_work: Flush w... |
167 168 169 |
*/ void irq_work_run(void) { |
22127e93c time: Replace __g... |
170 171 |
irq_work_run_list(this_cpu_ptr(&raised_list)); irq_work_run_list(this_cpu_ptr(&lazy_list)); |
c0e980a4b irq_work: Flush w... |
172 |
} |
e360adbe2 irq_work: Add gen... |
173 |
EXPORT_SYMBOL_GPL(irq_work_run); |
76a33061b irq_work: Force r... |
174 175 |
void irq_work_tick(void) { |
56e4dea81 percpu: Convert r... |
176 |
struct llist_head *raised = this_cpu_ptr(&raised_list); |
76a33061b irq_work: Force r... |
177 178 179 |
if (!llist_empty(raised) && !arch_irq_work_has_interrupt()) irq_work_run_list(raised); |
56e4dea81 percpu: Convert r... |
180 |
irq_work_run_list(this_cpu_ptr(&lazy_list)); |
76a33061b irq_work: Force r... |
181 |
} |
e360adbe2 irq_work: Add gen... |
182 183 184 185 |
/* * Synchronize against the irq_work @entry, ensures the entry is not * currently in use. */ |
38aaf8090 irq_work: Use lli... |
186 |
void irq_work_sync(struct irq_work *work) |
e360adbe2 irq_work: Add gen... |
187 |
{ |
3c7169a3b irq_work: Use loc... |
188 |
lockdep_assert_irqs_enabled(); |
e360adbe2 irq_work: Add gen... |
189 |
|
153bedbac irq_work: Convert... |
190 |
while (atomic_read(&work->flags) & IRQ_WORK_BUSY) |
e360adbe2 irq_work: Add gen... |
191 192 193 |
cpu_relax(); } EXPORT_SYMBOL_GPL(irq_work_sync); |