Blame view
kernel/irq_work.c
3.72 KB
e360adbe2
|
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 |
/* * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> * * Provides a framework for enqueueing and running callbacks from hardirq * context. The enqueueing is NMI-safe. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/irq_work.h> #include <linux/hardirq.h> /* * An entry can be in one of four states: * * free NULL, 0 -> {claimed} : free to be used * claimed NULL, 3 -> {pending} : claimed to be enqueued * pending next, 3 -> {busy} : queued, pending callback * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed * * We use the lower two bits of the next pointer to keep PENDING and BUSY * flags. */ #define IRQ_WORK_PENDING 1UL #define IRQ_WORK_BUSY 2UL #define IRQ_WORK_FLAGS 3UL static inline bool irq_work_is_set(struct irq_work *entry, int flags) { return (unsigned long)entry->next & flags; } static inline struct irq_work *irq_work_next(struct irq_work *entry) { unsigned long next = (unsigned long)entry->next; next &= ~IRQ_WORK_FLAGS; return (struct irq_work *)next; } static inline struct irq_work *next_flags(struct irq_work *entry, int flags) { unsigned long next = (unsigned long)entry; next |= flags; return (struct irq_work *)next; } static DEFINE_PER_CPU(struct irq_work *, irq_work_list); /* * Claim the entry so that no one else will poke at it. */ static bool irq_work_claim(struct irq_work *entry) { struct irq_work *next, *nflags; do { next = entry->next; if ((unsigned long)next & IRQ_WORK_PENDING) return false; nflags = next_flags(next, IRQ_WORK_FLAGS); } while (cmpxchg(&entry->next, next, nflags) != next); return true; } void __weak arch_irq_work_raise(void) { /* * Lame architectures will get the timer tick callback */ } /* * Queue the entry and raise the IPI if needed. */ static void __irq_work_queue(struct irq_work *entry) { |
20b876918
|
80 |
struct irq_work *next; |
e360adbe2
|
81 |
|
20b876918
|
82 |
preempt_disable(); |
e360adbe2
|
83 84 |
do { |
20b876918
|
85 |
next = __this_cpu_read(irq_work_list); |
e360adbe2
|
86 87 |
/* Can assign non-atomic because we keep the flags set. */ entry->next = next_flags(next, IRQ_WORK_FLAGS); |
20b876918
|
88 |
} while (this_cpu_cmpxchg(irq_work_list, next, entry) != next); |
e360adbe2
|
89 90 91 92 |
/* The list was empty, raise self-interrupt to start processing. */ if (!irq_work_next(entry)) arch_irq_work_raise(); |
20b876918
|
93 |
preempt_enable(); |
e360adbe2
|
94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 |
} /* * Enqueue the irq_work @entry, returns true on success, failure when the * @entry was already enqueued by someone else. * * Can be re-enqueued while the callback is still in progress. */ bool irq_work_queue(struct irq_work *entry) { if (!irq_work_claim(entry)) { /* * Already enqueued, can't do! */ return false; } __irq_work_queue(entry); return true; } EXPORT_SYMBOL_GPL(irq_work_queue); /* * Run the irq_work entries on this cpu. Requires to be ran from hardirq * context with local IRQs disabled. */ void irq_work_run(void) { |
20b876918
|
122 |
struct irq_work *list; |
e360adbe2
|
123 |
|
20b876918
|
124 |
if (this_cpu_read(irq_work_list) == NULL) |
e360adbe2
|
125 126 127 128 |
return; BUG_ON(!in_irq()); BUG_ON(!irqs_disabled()); |
20b876918
|
129 |
list = this_cpu_xchg(irq_work_list, NULL); |
e360adbe2
|
130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 |
while (list != NULL) { struct irq_work *entry = list; list = irq_work_next(list); /* * Clear the PENDING bit, after this point the @entry * can be re-used. */ entry->next = next_flags(NULL, IRQ_WORK_BUSY); entry->func(entry); /* * Clear the BUSY bit and return to the free state if * no-one else claimed it meanwhile. */ |
94e8ba728
|
145 146 147 |
(void)cmpxchg(&entry->next, next_flags(NULL, IRQ_WORK_BUSY), NULL); |
e360adbe2
|
148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 |
} } EXPORT_SYMBOL_GPL(irq_work_run); /* * Synchronize against the irq_work @entry, ensures the entry is not * currently in use. */ void irq_work_sync(struct irq_work *entry) { WARN_ON_ONCE(irqs_disabled()); while (irq_work_is_set(entry, IRQ_WORK_BUSY)) cpu_relax(); } EXPORT_SYMBOL_GPL(irq_work_sync); |