Commit 950d0a10d12578a270f3dfa9fd76fe5c2deb343f
Exists in
master
and in
4 other branches
Merge branch 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel…
…/git/tip/linux-2.6-tip * 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: irq: Track the owner of irq descriptor irq: Always set IRQF_ONESHOT if no primary handler is specified genirq: Fix wrong bit operation
Showing 5 changed files Inline Diff
include/linux/irq.h
1 | #ifndef _LINUX_IRQ_H | 1 | #ifndef _LINUX_IRQ_H |
2 | #define _LINUX_IRQ_H | 2 | #define _LINUX_IRQ_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * Please do not include this file in generic code. There is currently | 5 | * Please do not include this file in generic code. There is currently |
6 | * no requirement for any architecture to implement anything held | 6 | * no requirement for any architecture to implement anything held |
7 | * within this file. | 7 | * within this file. |
8 | * | 8 | * |
9 | * Thanks. --rmk | 9 | * Thanks. --rmk |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/smp.h> | 12 | #include <linux/smp.h> |
13 | 13 | ||
14 | #ifndef CONFIG_S390 | 14 | #ifndef CONFIG_S390 |
15 | 15 | ||
16 | #include <linux/linkage.h> | 16 | #include <linux/linkage.h> |
17 | #include <linux/cache.h> | 17 | #include <linux/cache.h> |
18 | #include <linux/spinlock.h> | 18 | #include <linux/spinlock.h> |
19 | #include <linux/cpumask.h> | 19 | #include <linux/cpumask.h> |
20 | #include <linux/gfp.h> | 20 | #include <linux/gfp.h> |
21 | #include <linux/irqreturn.h> | 21 | #include <linux/irqreturn.h> |
22 | #include <linux/irqnr.h> | 22 | #include <linux/irqnr.h> |
23 | #include <linux/errno.h> | 23 | #include <linux/errno.h> |
24 | #include <linux/topology.h> | 24 | #include <linux/topology.h> |
25 | #include <linux/wait.h> | 25 | #include <linux/wait.h> |
26 | #include <linux/module.h> | ||
26 | 27 | ||
27 | #include <asm/irq.h> | 28 | #include <asm/irq.h> |
28 | #include <asm/ptrace.h> | 29 | #include <asm/ptrace.h> |
29 | #include <asm/irq_regs.h> | 30 | #include <asm/irq_regs.h> |
30 | 31 | ||
31 | struct seq_file; | 32 | struct seq_file; |
32 | struct irq_desc; | 33 | struct irq_desc; |
33 | struct irq_data; | 34 | struct irq_data; |
34 | typedef void (*irq_flow_handler_t)(unsigned int irq, | 35 | typedef void (*irq_flow_handler_t)(unsigned int irq, |
35 | struct irq_desc *desc); | 36 | struct irq_desc *desc); |
36 | typedef void (*irq_preflow_handler_t)(struct irq_data *data); | 37 | typedef void (*irq_preflow_handler_t)(struct irq_data *data); |
37 | 38 | ||
38 | /* | 39 | /* |
39 | * IRQ line status. | 40 | * IRQ line status. |
40 | * | 41 | * |
41 | * Bits 0-7 are the same as the IRQF_* bits in linux/interrupt.h | 42 | * Bits 0-7 are the same as the IRQF_* bits in linux/interrupt.h |
42 | * | 43 | * |
43 | * IRQ_TYPE_NONE - default, unspecified type | 44 | * IRQ_TYPE_NONE - default, unspecified type |
44 | * IRQ_TYPE_EDGE_RISING - rising edge triggered | 45 | * IRQ_TYPE_EDGE_RISING - rising edge triggered |
45 | * IRQ_TYPE_EDGE_FALLING - falling edge triggered | 46 | * IRQ_TYPE_EDGE_FALLING - falling edge triggered |
46 | * IRQ_TYPE_EDGE_BOTH - rising and falling edge triggered | 47 | * IRQ_TYPE_EDGE_BOTH - rising and falling edge triggered |
47 | * IRQ_TYPE_LEVEL_HIGH - high level triggered | 48 | * IRQ_TYPE_LEVEL_HIGH - high level triggered |
48 | * IRQ_TYPE_LEVEL_LOW - low level triggered | 49 | * IRQ_TYPE_LEVEL_LOW - low level triggered |
49 | * IRQ_TYPE_LEVEL_MASK - Mask to filter out the level bits | 50 | * IRQ_TYPE_LEVEL_MASK - Mask to filter out the level bits |
50 | * IRQ_TYPE_SENSE_MASK - Mask for all the above bits | 51 | * IRQ_TYPE_SENSE_MASK - Mask for all the above bits |
51 | * IRQ_TYPE_PROBE - Special flag for probing in progress | 52 | * IRQ_TYPE_PROBE - Special flag for probing in progress |
52 | * | 53 | * |
53 | * Bits which can be modified via irq_set/clear/modify_status_flags() | 54 | * Bits which can be modified via irq_set/clear/modify_status_flags() |
54 | * IRQ_LEVEL - Interrupt is level type. Will be also | 55 | * IRQ_LEVEL - Interrupt is level type. Will be also |
55 | * updated in the code when the above trigger | 56 | * updated in the code when the above trigger |
56 | * bits are modified via irq_set_irq_type() | 57 | * bits are modified via irq_set_irq_type() |
57 | * IRQ_PER_CPU - Mark an interrupt PER_CPU. Will protect | 58 | * IRQ_PER_CPU - Mark an interrupt PER_CPU. Will protect |
58 | * it from affinity setting | 59 | * it from affinity setting |
59 | * IRQ_NOPROBE - Interrupt cannot be probed by autoprobing | 60 | * IRQ_NOPROBE - Interrupt cannot be probed by autoprobing |
60 | * IRQ_NOREQUEST - Interrupt cannot be requested via | 61 | * IRQ_NOREQUEST - Interrupt cannot be requested via |
61 | * request_irq() | 62 | * request_irq() |
62 | * IRQ_NOTHREAD - Interrupt cannot be threaded | 63 | * IRQ_NOTHREAD - Interrupt cannot be threaded |
63 | * IRQ_NOAUTOEN - Interrupt is not automatically enabled in | 64 | * IRQ_NOAUTOEN - Interrupt is not automatically enabled in |
64 | * request/setup_irq() | 65 | * request/setup_irq() |
65 | * IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set) | 66 | * IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set) |
66 | * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context | 67 | * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context |
67 | * IRQ_NESTED_TRHEAD - Interrupt nests into another thread | 68 | * IRQ_NESTED_TRHEAD - Interrupt nests into another thread |
68 | */ | 69 | */ |
69 | enum { | 70 | enum { |
70 | IRQ_TYPE_NONE = 0x00000000, | 71 | IRQ_TYPE_NONE = 0x00000000, |
71 | IRQ_TYPE_EDGE_RISING = 0x00000001, | 72 | IRQ_TYPE_EDGE_RISING = 0x00000001, |
72 | IRQ_TYPE_EDGE_FALLING = 0x00000002, | 73 | IRQ_TYPE_EDGE_FALLING = 0x00000002, |
73 | IRQ_TYPE_EDGE_BOTH = (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING), | 74 | IRQ_TYPE_EDGE_BOTH = (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING), |
74 | IRQ_TYPE_LEVEL_HIGH = 0x00000004, | 75 | IRQ_TYPE_LEVEL_HIGH = 0x00000004, |
75 | IRQ_TYPE_LEVEL_LOW = 0x00000008, | 76 | IRQ_TYPE_LEVEL_LOW = 0x00000008, |
76 | IRQ_TYPE_LEVEL_MASK = (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH), | 77 | IRQ_TYPE_LEVEL_MASK = (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH), |
77 | IRQ_TYPE_SENSE_MASK = 0x0000000f, | 78 | IRQ_TYPE_SENSE_MASK = 0x0000000f, |
78 | 79 | ||
79 | IRQ_TYPE_PROBE = 0x00000010, | 80 | IRQ_TYPE_PROBE = 0x00000010, |
80 | 81 | ||
81 | IRQ_LEVEL = (1 << 8), | 82 | IRQ_LEVEL = (1 << 8), |
82 | IRQ_PER_CPU = (1 << 9), | 83 | IRQ_PER_CPU = (1 << 9), |
83 | IRQ_NOPROBE = (1 << 10), | 84 | IRQ_NOPROBE = (1 << 10), |
84 | IRQ_NOREQUEST = (1 << 11), | 85 | IRQ_NOREQUEST = (1 << 11), |
85 | IRQ_NOAUTOEN = (1 << 12), | 86 | IRQ_NOAUTOEN = (1 << 12), |
86 | IRQ_NO_BALANCING = (1 << 13), | 87 | IRQ_NO_BALANCING = (1 << 13), |
87 | IRQ_MOVE_PCNTXT = (1 << 14), | 88 | IRQ_MOVE_PCNTXT = (1 << 14), |
88 | IRQ_NESTED_THREAD = (1 << 15), | 89 | IRQ_NESTED_THREAD = (1 << 15), |
89 | IRQ_NOTHREAD = (1 << 16), | 90 | IRQ_NOTHREAD = (1 << 16), |
90 | }; | 91 | }; |
91 | 92 | ||
92 | #define IRQF_MODIFY_MASK \ | 93 | #define IRQF_MODIFY_MASK \ |
93 | (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ | 94 | (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ |
94 | IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ | 95 | IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ |
95 | IRQ_PER_CPU | IRQ_NESTED_THREAD) | 96 | IRQ_PER_CPU | IRQ_NESTED_THREAD) |
96 | 97 | ||
97 | #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) | 98 | #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) |
98 | 99 | ||
99 | /* | 100 | /* |
100 | * Return value for chip->irq_set_affinity() | 101 | * Return value for chip->irq_set_affinity() |
101 | * | 102 | * |
102 | * IRQ_SET_MASK_OK - OK, core updates irq_data.affinity | 103 | * IRQ_SET_MASK_OK - OK, core updates irq_data.affinity |
103 | * IRQ_SET_MASK_NOCPY - OK, chip did update irq_data.affinity | 104 | * IRQ_SET_MASK_NOCPY - OK, chip did update irq_data.affinity |
104 | */ | 105 | */ |
105 | enum { | 106 | enum { |
106 | IRQ_SET_MASK_OK = 0, | 107 | IRQ_SET_MASK_OK = 0, |
107 | IRQ_SET_MASK_OK_NOCOPY, | 108 | IRQ_SET_MASK_OK_NOCOPY, |
108 | }; | 109 | }; |
109 | 110 | ||
110 | struct msi_desc; | 111 | struct msi_desc; |
111 | struct irq_domain; | 112 | struct irq_domain; |
112 | 113 | ||
113 | /** | 114 | /** |
114 | * struct irq_data - per irq and irq chip data passed down to chip functions | 115 | * struct irq_data - per irq and irq chip data passed down to chip functions |
115 | * @irq: interrupt number | 116 | * @irq: interrupt number |
116 | * @hwirq: hardware interrupt number, local to the interrupt domain | 117 | * @hwirq: hardware interrupt number, local to the interrupt domain |
117 | * @node: node index useful for balancing | 118 | * @node: node index useful for balancing |
118 | * @state_use_accessors: status information for irq chip functions. | 119 | * @state_use_accessors: status information for irq chip functions. |
119 | * Use accessor functions to deal with it | 120 | * Use accessor functions to deal with it |
120 | * @chip: low level interrupt hardware access | 121 | * @chip: low level interrupt hardware access |
121 | * @domain: Interrupt translation domain; responsible for mapping | 122 | * @domain: Interrupt translation domain; responsible for mapping |
122 | * between hwirq number and linux irq number. | 123 | * between hwirq number and linux irq number. |
123 | * @handler_data: per-IRQ data for the irq_chip methods | 124 | * @handler_data: per-IRQ data for the irq_chip methods |
124 | * @chip_data: platform-specific per-chip private data for the chip | 125 | * @chip_data: platform-specific per-chip private data for the chip |
125 | * methods, to allow shared chip implementations | 126 | * methods, to allow shared chip implementations |
126 | * @msi_desc: MSI descriptor | 127 | * @msi_desc: MSI descriptor |
127 | * @affinity: IRQ affinity on SMP | 128 | * @affinity: IRQ affinity on SMP |
128 | * | 129 | * |
129 | * The fields here need to overlay the ones in irq_desc until we | 130 | * The fields here need to overlay the ones in irq_desc until we |
130 | * cleaned up the direct references and switched everything over to | 131 | * cleaned up the direct references and switched everything over to |
131 | * irq_data. | 132 | * irq_data. |
132 | */ | 133 | */ |
133 | struct irq_data { | 134 | struct irq_data { |
134 | unsigned int irq; | 135 | unsigned int irq; |
135 | unsigned long hwirq; | 136 | unsigned long hwirq; |
136 | unsigned int node; | 137 | unsigned int node; |
137 | unsigned int state_use_accessors; | 138 | unsigned int state_use_accessors; |
138 | struct irq_chip *chip; | 139 | struct irq_chip *chip; |
139 | struct irq_domain *domain; | 140 | struct irq_domain *domain; |
140 | void *handler_data; | 141 | void *handler_data; |
141 | void *chip_data; | 142 | void *chip_data; |
142 | struct msi_desc *msi_desc; | 143 | struct msi_desc *msi_desc; |
143 | #ifdef CONFIG_SMP | 144 | #ifdef CONFIG_SMP |
144 | cpumask_var_t affinity; | 145 | cpumask_var_t affinity; |
145 | #endif | 146 | #endif |
146 | }; | 147 | }; |
147 | 148 | ||
148 | /* | 149 | /* |
149 | * Bit masks for irq_data.state | 150 | * Bit masks for irq_data.state |
150 | * | 151 | * |
151 | * IRQD_TRIGGER_MASK - Mask for the trigger type bits | 152 | * IRQD_TRIGGER_MASK - Mask for the trigger type bits |
152 | * IRQD_SETAFFINITY_PENDING - Affinity setting is pending | 153 | * IRQD_SETAFFINITY_PENDING - Affinity setting is pending |
153 | * IRQD_NO_BALANCING - Balancing disabled for this IRQ | 154 | * IRQD_NO_BALANCING - Balancing disabled for this IRQ |
154 | * IRQD_PER_CPU - Interrupt is per cpu | 155 | * IRQD_PER_CPU - Interrupt is per cpu |
155 | * IRQD_AFFINITY_SET - Interrupt affinity was set | 156 | * IRQD_AFFINITY_SET - Interrupt affinity was set |
156 | * IRQD_LEVEL - Interrupt is level triggered | 157 | * IRQD_LEVEL - Interrupt is level triggered |
157 | * IRQD_WAKEUP_STATE - Interrupt is configured for wakeup | 158 | * IRQD_WAKEUP_STATE - Interrupt is configured for wakeup |
158 | * from suspend | 159 | * from suspend |
159 | * IRDQ_MOVE_PCNTXT - Interrupt can be moved in process | 160 | * IRDQ_MOVE_PCNTXT - Interrupt can be moved in process |
160 | * context | 161 | * context |
161 | * IRQD_IRQ_DISABLED - Disabled state of the interrupt | 162 | * IRQD_IRQ_DISABLED - Disabled state of the interrupt |
162 | * IRQD_IRQ_MASKED - Masked state of the interrupt | 163 | * IRQD_IRQ_MASKED - Masked state of the interrupt |
163 | * IRQD_IRQ_INPROGRESS - In progress state of the interrupt | 164 | * IRQD_IRQ_INPROGRESS - In progress state of the interrupt |
164 | */ | 165 | */ |
165 | enum { | 166 | enum { |
166 | IRQD_TRIGGER_MASK = 0xf, | 167 | IRQD_TRIGGER_MASK = 0xf, |
167 | IRQD_SETAFFINITY_PENDING = (1 << 8), | 168 | IRQD_SETAFFINITY_PENDING = (1 << 8), |
168 | IRQD_NO_BALANCING = (1 << 10), | 169 | IRQD_NO_BALANCING = (1 << 10), |
169 | IRQD_PER_CPU = (1 << 11), | 170 | IRQD_PER_CPU = (1 << 11), |
170 | IRQD_AFFINITY_SET = (1 << 12), | 171 | IRQD_AFFINITY_SET = (1 << 12), |
171 | IRQD_LEVEL = (1 << 13), | 172 | IRQD_LEVEL = (1 << 13), |
172 | IRQD_WAKEUP_STATE = (1 << 14), | 173 | IRQD_WAKEUP_STATE = (1 << 14), |
173 | IRQD_MOVE_PCNTXT = (1 << 15), | 174 | IRQD_MOVE_PCNTXT = (1 << 15), |
174 | IRQD_IRQ_DISABLED = (1 << 16), | 175 | IRQD_IRQ_DISABLED = (1 << 16), |
175 | IRQD_IRQ_MASKED = (1 << 17), | 176 | IRQD_IRQ_MASKED = (1 << 17), |
176 | IRQD_IRQ_INPROGRESS = (1 << 18), | 177 | IRQD_IRQ_INPROGRESS = (1 << 18), |
177 | }; | 178 | }; |
178 | 179 | ||
179 | static inline bool irqd_is_setaffinity_pending(struct irq_data *d) | 180 | static inline bool irqd_is_setaffinity_pending(struct irq_data *d) |
180 | { | 181 | { |
181 | return d->state_use_accessors & IRQD_SETAFFINITY_PENDING; | 182 | return d->state_use_accessors & IRQD_SETAFFINITY_PENDING; |
182 | } | 183 | } |
183 | 184 | ||
184 | static inline bool irqd_is_per_cpu(struct irq_data *d) | 185 | static inline bool irqd_is_per_cpu(struct irq_data *d) |
185 | { | 186 | { |
186 | return d->state_use_accessors & IRQD_PER_CPU; | 187 | return d->state_use_accessors & IRQD_PER_CPU; |
187 | } | 188 | } |
188 | 189 | ||
189 | static inline bool irqd_can_balance(struct irq_data *d) | 190 | static inline bool irqd_can_balance(struct irq_data *d) |
190 | { | 191 | { |
191 | return !(d->state_use_accessors & (IRQD_PER_CPU | IRQD_NO_BALANCING)); | 192 | return !(d->state_use_accessors & (IRQD_PER_CPU | IRQD_NO_BALANCING)); |
192 | } | 193 | } |
193 | 194 | ||
194 | static inline bool irqd_affinity_was_set(struct irq_data *d) | 195 | static inline bool irqd_affinity_was_set(struct irq_data *d) |
195 | { | 196 | { |
196 | return d->state_use_accessors & IRQD_AFFINITY_SET; | 197 | return d->state_use_accessors & IRQD_AFFINITY_SET; |
197 | } | 198 | } |
198 | 199 | ||
199 | static inline void irqd_mark_affinity_was_set(struct irq_data *d) | 200 | static inline void irqd_mark_affinity_was_set(struct irq_data *d) |
200 | { | 201 | { |
201 | d->state_use_accessors |= IRQD_AFFINITY_SET; | 202 | d->state_use_accessors |= IRQD_AFFINITY_SET; |
202 | } | 203 | } |
203 | 204 | ||
204 | static inline u32 irqd_get_trigger_type(struct irq_data *d) | 205 | static inline u32 irqd_get_trigger_type(struct irq_data *d) |
205 | { | 206 | { |
206 | return d->state_use_accessors & IRQD_TRIGGER_MASK; | 207 | return d->state_use_accessors & IRQD_TRIGGER_MASK; |
207 | } | 208 | } |
208 | 209 | ||
209 | /* | 210 | /* |
210 | * Must only be called inside irq_chip.irq_set_type() functions. | 211 | * Must only be called inside irq_chip.irq_set_type() functions. |
211 | */ | 212 | */ |
212 | static inline void irqd_set_trigger_type(struct irq_data *d, u32 type) | 213 | static inline void irqd_set_trigger_type(struct irq_data *d, u32 type) |
213 | { | 214 | { |
214 | d->state_use_accessors &= ~IRQD_TRIGGER_MASK; | 215 | d->state_use_accessors &= ~IRQD_TRIGGER_MASK; |
215 | d->state_use_accessors |= type & IRQD_TRIGGER_MASK; | 216 | d->state_use_accessors |= type & IRQD_TRIGGER_MASK; |
216 | } | 217 | } |
217 | 218 | ||
218 | static inline bool irqd_is_level_type(struct irq_data *d) | 219 | static inline bool irqd_is_level_type(struct irq_data *d) |
219 | { | 220 | { |
220 | return d->state_use_accessors & IRQD_LEVEL; | 221 | return d->state_use_accessors & IRQD_LEVEL; |
221 | } | 222 | } |
222 | 223 | ||
223 | static inline bool irqd_is_wakeup_set(struct irq_data *d) | 224 | static inline bool irqd_is_wakeup_set(struct irq_data *d) |
224 | { | 225 | { |
225 | return d->state_use_accessors & IRQD_WAKEUP_STATE; | 226 | return d->state_use_accessors & IRQD_WAKEUP_STATE; |
226 | } | 227 | } |
227 | 228 | ||
228 | static inline bool irqd_can_move_in_process_context(struct irq_data *d) | 229 | static inline bool irqd_can_move_in_process_context(struct irq_data *d) |
229 | { | 230 | { |
230 | return d->state_use_accessors & IRQD_MOVE_PCNTXT; | 231 | return d->state_use_accessors & IRQD_MOVE_PCNTXT; |
231 | } | 232 | } |
232 | 233 | ||
233 | static inline bool irqd_irq_disabled(struct irq_data *d) | 234 | static inline bool irqd_irq_disabled(struct irq_data *d) |
234 | { | 235 | { |
235 | return d->state_use_accessors & IRQD_IRQ_DISABLED; | 236 | return d->state_use_accessors & IRQD_IRQ_DISABLED; |
236 | } | 237 | } |
237 | 238 | ||
238 | static inline bool irqd_irq_masked(struct irq_data *d) | 239 | static inline bool irqd_irq_masked(struct irq_data *d) |
239 | { | 240 | { |
240 | return d->state_use_accessors & IRQD_IRQ_MASKED; | 241 | return d->state_use_accessors & IRQD_IRQ_MASKED; |
241 | } | 242 | } |
242 | 243 | ||
243 | static inline bool irqd_irq_inprogress(struct irq_data *d) | 244 | static inline bool irqd_irq_inprogress(struct irq_data *d) |
244 | { | 245 | { |
245 | return d->state_use_accessors & IRQD_IRQ_INPROGRESS; | 246 | return d->state_use_accessors & IRQD_IRQ_INPROGRESS; |
246 | } | 247 | } |
247 | 248 | ||
248 | /* | 249 | /* |
249 | * Functions for chained handlers which can be enabled/disabled by the | 250 | * Functions for chained handlers which can be enabled/disabled by the |
250 | * standard disable_irq/enable_irq calls. Must be called with | 251 | * standard disable_irq/enable_irq calls. Must be called with |
251 | * irq_desc->lock held. | 252 | * irq_desc->lock held. |
252 | */ | 253 | */ |
253 | static inline void irqd_set_chained_irq_inprogress(struct irq_data *d) | 254 | static inline void irqd_set_chained_irq_inprogress(struct irq_data *d) |
254 | { | 255 | { |
255 | d->state_use_accessors |= IRQD_IRQ_INPROGRESS; | 256 | d->state_use_accessors |= IRQD_IRQ_INPROGRESS; |
256 | } | 257 | } |
257 | 258 | ||
258 | static inline void irqd_clr_chained_irq_inprogress(struct irq_data *d) | 259 | static inline void irqd_clr_chained_irq_inprogress(struct irq_data *d) |
259 | { | 260 | { |
260 | d->state_use_accessors &= ~IRQD_IRQ_INPROGRESS; | 261 | d->state_use_accessors &= ~IRQD_IRQ_INPROGRESS; |
261 | } | 262 | } |
262 | 263 | ||
263 | /** | 264 | /** |
264 | * struct irq_chip - hardware interrupt chip descriptor | 265 | * struct irq_chip - hardware interrupt chip descriptor |
265 | * | 266 | * |
266 | * @name: name for /proc/interrupts | 267 | * @name: name for /proc/interrupts |
267 | * @irq_startup: start up the interrupt (defaults to ->enable if NULL) | 268 | * @irq_startup: start up the interrupt (defaults to ->enable if NULL) |
268 | * @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL) | 269 | * @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL) |
269 | * @irq_enable: enable the interrupt (defaults to chip->unmask if NULL) | 270 | * @irq_enable: enable the interrupt (defaults to chip->unmask if NULL) |
270 | * @irq_disable: disable the interrupt | 271 | * @irq_disable: disable the interrupt |
271 | * @irq_ack: start of a new interrupt | 272 | * @irq_ack: start of a new interrupt |
272 | * @irq_mask: mask an interrupt source | 273 | * @irq_mask: mask an interrupt source |
273 | * @irq_mask_ack: ack and mask an interrupt source | 274 | * @irq_mask_ack: ack and mask an interrupt source |
274 | * @irq_unmask: unmask an interrupt source | 275 | * @irq_unmask: unmask an interrupt source |
275 | * @irq_eoi: end of interrupt | 276 | * @irq_eoi: end of interrupt |
276 | * @irq_set_affinity: set the CPU affinity on SMP machines | 277 | * @irq_set_affinity: set the CPU affinity on SMP machines |
277 | * @irq_retrigger: resend an IRQ to the CPU | 278 | * @irq_retrigger: resend an IRQ to the CPU |
278 | * @irq_set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ | 279 | * @irq_set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ |
279 | * @irq_set_wake: enable/disable power-management wake-on of an IRQ | 280 | * @irq_set_wake: enable/disable power-management wake-on of an IRQ |
280 | * @irq_bus_lock: function to lock access to slow bus (i2c) chips | 281 | * @irq_bus_lock: function to lock access to slow bus (i2c) chips |
281 | * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips | 282 | * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips |
282 | * @irq_cpu_online: configure an interrupt source for a secondary CPU | 283 | * @irq_cpu_online: configure an interrupt source for a secondary CPU |
283 | * @irq_cpu_offline: un-configure an interrupt source for a secondary CPU | 284 | * @irq_cpu_offline: un-configure an interrupt source for a secondary CPU |
284 | * @irq_suspend: function called from core code on suspend once per chip | 285 | * @irq_suspend: function called from core code on suspend once per chip |
285 | * @irq_resume: function called from core code on resume once per chip | 286 | * @irq_resume: function called from core code on resume once per chip |
286 | * @irq_pm_shutdown: function called from core code on shutdown once per chip | 287 | * @irq_pm_shutdown: function called from core code on shutdown once per chip |
287 | * @irq_print_chip: optional to print special chip info in show_interrupts | 288 | * @irq_print_chip: optional to print special chip info in show_interrupts |
288 | * @flags: chip specific flags | 289 | * @flags: chip specific flags |
289 | * | 290 | * |
290 | * @release: release function solely used by UML | 291 | * @release: release function solely used by UML |
291 | */ | 292 | */ |
292 | struct irq_chip { | 293 | struct irq_chip { |
293 | const char *name; | 294 | const char *name; |
294 | unsigned int (*irq_startup)(struct irq_data *data); | 295 | unsigned int (*irq_startup)(struct irq_data *data); |
295 | void (*irq_shutdown)(struct irq_data *data); | 296 | void (*irq_shutdown)(struct irq_data *data); |
296 | void (*irq_enable)(struct irq_data *data); | 297 | void (*irq_enable)(struct irq_data *data); |
297 | void (*irq_disable)(struct irq_data *data); | 298 | void (*irq_disable)(struct irq_data *data); |
298 | 299 | ||
299 | void (*irq_ack)(struct irq_data *data); | 300 | void (*irq_ack)(struct irq_data *data); |
300 | void (*irq_mask)(struct irq_data *data); | 301 | void (*irq_mask)(struct irq_data *data); |
301 | void (*irq_mask_ack)(struct irq_data *data); | 302 | void (*irq_mask_ack)(struct irq_data *data); |
302 | void (*irq_unmask)(struct irq_data *data); | 303 | void (*irq_unmask)(struct irq_data *data); |
303 | void (*irq_eoi)(struct irq_data *data); | 304 | void (*irq_eoi)(struct irq_data *data); |
304 | 305 | ||
305 | int (*irq_set_affinity)(struct irq_data *data, const struct cpumask *dest, bool force); | 306 | int (*irq_set_affinity)(struct irq_data *data, const struct cpumask *dest, bool force); |
306 | int (*irq_retrigger)(struct irq_data *data); | 307 | int (*irq_retrigger)(struct irq_data *data); |
307 | int (*irq_set_type)(struct irq_data *data, unsigned int flow_type); | 308 | int (*irq_set_type)(struct irq_data *data, unsigned int flow_type); |
308 | int (*irq_set_wake)(struct irq_data *data, unsigned int on); | 309 | int (*irq_set_wake)(struct irq_data *data, unsigned int on); |
309 | 310 | ||
310 | void (*irq_bus_lock)(struct irq_data *data); | 311 | void (*irq_bus_lock)(struct irq_data *data); |
311 | void (*irq_bus_sync_unlock)(struct irq_data *data); | 312 | void (*irq_bus_sync_unlock)(struct irq_data *data); |
312 | 313 | ||
313 | void (*irq_cpu_online)(struct irq_data *data); | 314 | void (*irq_cpu_online)(struct irq_data *data); |
314 | void (*irq_cpu_offline)(struct irq_data *data); | 315 | void (*irq_cpu_offline)(struct irq_data *data); |
315 | 316 | ||
316 | void (*irq_suspend)(struct irq_data *data); | 317 | void (*irq_suspend)(struct irq_data *data); |
317 | void (*irq_resume)(struct irq_data *data); | 318 | void (*irq_resume)(struct irq_data *data); |
318 | void (*irq_pm_shutdown)(struct irq_data *data); | 319 | void (*irq_pm_shutdown)(struct irq_data *data); |
319 | 320 | ||
320 | void (*irq_print_chip)(struct irq_data *data, struct seq_file *p); | 321 | void (*irq_print_chip)(struct irq_data *data, struct seq_file *p); |
321 | 322 | ||
322 | unsigned long flags; | 323 | unsigned long flags; |
323 | 324 | ||
324 | /* Currently used only by UML, might disappear one day.*/ | 325 | /* Currently used only by UML, might disappear one day.*/ |
325 | #ifdef CONFIG_IRQ_RELEASE_METHOD | 326 | #ifdef CONFIG_IRQ_RELEASE_METHOD |
326 | void (*release)(unsigned int irq, void *dev_id); | 327 | void (*release)(unsigned int irq, void *dev_id); |
327 | #endif | 328 | #endif |
328 | }; | 329 | }; |
329 | 330 | ||
330 | /* | 331 | /* |
331 | * irq_chip specific flags | 332 | * irq_chip specific flags |
332 | * | 333 | * |
333 | * IRQCHIP_SET_TYPE_MASKED: Mask before calling chip.irq_set_type() | 334 | * IRQCHIP_SET_TYPE_MASKED: Mask before calling chip.irq_set_type() |
334 | * IRQCHIP_EOI_IF_HANDLED: Only issue irq_eoi() when irq was handled | 335 | * IRQCHIP_EOI_IF_HANDLED: Only issue irq_eoi() when irq was handled |
335 | * IRQCHIP_MASK_ON_SUSPEND: Mask non wake irqs in the suspend path | 336 | * IRQCHIP_MASK_ON_SUSPEND: Mask non wake irqs in the suspend path |
336 | * IRQCHIP_ONOFFLINE_ENABLED: Only call irq_on/off_line callbacks | 337 | * IRQCHIP_ONOFFLINE_ENABLED: Only call irq_on/off_line callbacks |
337 | * when irq enabled | 338 | * when irq enabled |
338 | */ | 339 | */ |
339 | enum { | 340 | enum { |
340 | IRQCHIP_SET_TYPE_MASKED = (1 << 0), | 341 | IRQCHIP_SET_TYPE_MASKED = (1 << 0), |
341 | IRQCHIP_EOI_IF_HANDLED = (1 << 1), | 342 | IRQCHIP_EOI_IF_HANDLED = (1 << 1), |
342 | IRQCHIP_MASK_ON_SUSPEND = (1 << 2), | 343 | IRQCHIP_MASK_ON_SUSPEND = (1 << 2), |
343 | IRQCHIP_ONOFFLINE_ENABLED = (1 << 3), | 344 | IRQCHIP_ONOFFLINE_ENABLED = (1 << 3), |
344 | }; | 345 | }; |
345 | 346 | ||
346 | /* This include will go away once we isolated irq_desc usage to core code */ | 347 | /* This include will go away once we isolated irq_desc usage to core code */ |
347 | #include <linux/irqdesc.h> | 348 | #include <linux/irqdesc.h> |
348 | 349 | ||
349 | /* | 350 | /* |
350 | * Pick up the arch-dependent methods: | 351 | * Pick up the arch-dependent methods: |
351 | */ | 352 | */ |
352 | #include <asm/hw_irq.h> | 353 | #include <asm/hw_irq.h> |
353 | 354 | ||
354 | #ifndef NR_IRQS_LEGACY | 355 | #ifndef NR_IRQS_LEGACY |
355 | # define NR_IRQS_LEGACY 0 | 356 | # define NR_IRQS_LEGACY 0 |
356 | #endif | 357 | #endif |
357 | 358 | ||
358 | #ifndef ARCH_IRQ_INIT_FLAGS | 359 | #ifndef ARCH_IRQ_INIT_FLAGS |
359 | # define ARCH_IRQ_INIT_FLAGS 0 | 360 | # define ARCH_IRQ_INIT_FLAGS 0 |
360 | #endif | 361 | #endif |
361 | 362 | ||
362 | #define IRQ_DEFAULT_INIT_FLAGS ARCH_IRQ_INIT_FLAGS | 363 | #define IRQ_DEFAULT_INIT_FLAGS ARCH_IRQ_INIT_FLAGS |
363 | 364 | ||
364 | struct irqaction; | 365 | struct irqaction; |
365 | extern int setup_irq(unsigned int irq, struct irqaction *new); | 366 | extern int setup_irq(unsigned int irq, struct irqaction *new); |
366 | extern void remove_irq(unsigned int irq, struct irqaction *act); | 367 | extern void remove_irq(unsigned int irq, struct irqaction *act); |
367 | 368 | ||
368 | extern void irq_cpu_online(void); | 369 | extern void irq_cpu_online(void); |
369 | extern void irq_cpu_offline(void); | 370 | extern void irq_cpu_offline(void); |
370 | extern int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *cpumask); | 371 | extern int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *cpumask); |
371 | 372 | ||
372 | #ifdef CONFIG_GENERIC_HARDIRQS | 373 | #ifdef CONFIG_GENERIC_HARDIRQS |
373 | 374 | ||
374 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ) | 375 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ) |
375 | void irq_move_irq(struct irq_data *data); | 376 | void irq_move_irq(struct irq_data *data); |
376 | void irq_move_masked_irq(struct irq_data *data); | 377 | void irq_move_masked_irq(struct irq_data *data); |
377 | #else | 378 | #else |
378 | static inline void irq_move_irq(struct irq_data *data) { } | 379 | static inline void irq_move_irq(struct irq_data *data) { } |
379 | static inline void irq_move_masked_irq(struct irq_data *data) { } | 380 | static inline void irq_move_masked_irq(struct irq_data *data) { } |
380 | #endif | 381 | #endif |
381 | 382 | ||
382 | extern int no_irq_affinity; | 383 | extern int no_irq_affinity; |
383 | 384 | ||
384 | /* | 385 | /* |
385 | * Built-in IRQ handlers for various IRQ types, | 386 | * Built-in IRQ handlers for various IRQ types, |
386 | * callable via desc->handle_irq() | 387 | * callable via desc->handle_irq() |
387 | */ | 388 | */ |
388 | extern void handle_level_irq(unsigned int irq, struct irq_desc *desc); | 389 | extern void handle_level_irq(unsigned int irq, struct irq_desc *desc); |
389 | extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc); | 390 | extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc); |
390 | extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc); | 391 | extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc); |
391 | extern void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc); | 392 | extern void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc); |
392 | extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc); | 393 | extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc); |
393 | extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc); | 394 | extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc); |
394 | extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); | 395 | extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); |
395 | extern void handle_nested_irq(unsigned int irq); | 396 | extern void handle_nested_irq(unsigned int irq); |
396 | 397 | ||
397 | /* Handling of unhandled and spurious interrupts: */ | 398 | /* Handling of unhandled and spurious interrupts: */ |
398 | extern void note_interrupt(unsigned int irq, struct irq_desc *desc, | 399 | extern void note_interrupt(unsigned int irq, struct irq_desc *desc, |
399 | irqreturn_t action_ret); | 400 | irqreturn_t action_ret); |
400 | 401 | ||
401 | 402 | ||
402 | /* Enable/disable irq debugging output: */ | 403 | /* Enable/disable irq debugging output: */ |
403 | extern int noirqdebug_setup(char *str); | 404 | extern int noirqdebug_setup(char *str); |
404 | 405 | ||
405 | /* Checks whether the interrupt can be requested by request_irq(): */ | 406 | /* Checks whether the interrupt can be requested by request_irq(): */ |
406 | extern int can_request_irq(unsigned int irq, unsigned long irqflags); | 407 | extern int can_request_irq(unsigned int irq, unsigned long irqflags); |
407 | 408 | ||
408 | /* Dummy irq-chip implementations: */ | 409 | /* Dummy irq-chip implementations: */ |
409 | extern struct irq_chip no_irq_chip; | 410 | extern struct irq_chip no_irq_chip; |
410 | extern struct irq_chip dummy_irq_chip; | 411 | extern struct irq_chip dummy_irq_chip; |
411 | 412 | ||
412 | extern void | 413 | extern void |
413 | irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, | 414 | irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, |
414 | irq_flow_handler_t handle, const char *name); | 415 | irq_flow_handler_t handle, const char *name); |
415 | 416 | ||
416 | static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *chip, | 417 | static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *chip, |
417 | irq_flow_handler_t handle) | 418 | irq_flow_handler_t handle) |
418 | { | 419 | { |
419 | irq_set_chip_and_handler_name(irq, chip, handle, NULL); | 420 | irq_set_chip_and_handler_name(irq, chip, handle, NULL); |
420 | } | 421 | } |
421 | 422 | ||
422 | extern void | 423 | extern void |
423 | __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | 424 | __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, |
424 | const char *name); | 425 | const char *name); |
425 | 426 | ||
426 | static inline void | 427 | static inline void |
427 | irq_set_handler(unsigned int irq, irq_flow_handler_t handle) | 428 | irq_set_handler(unsigned int irq, irq_flow_handler_t handle) |
428 | { | 429 | { |
429 | __irq_set_handler(irq, handle, 0, NULL); | 430 | __irq_set_handler(irq, handle, 0, NULL); |
430 | } | 431 | } |
431 | 432 | ||
432 | /* | 433 | /* |
433 | * Set a highlevel chained flow handler for a given IRQ. | 434 | * Set a highlevel chained flow handler for a given IRQ. |
434 | * (a chained handler is automatically enabled and set to | 435 | * (a chained handler is automatically enabled and set to |
435 | * IRQ_NOREQUEST, IRQ_NOPROBE, and IRQ_NOTHREAD) | 436 | * IRQ_NOREQUEST, IRQ_NOPROBE, and IRQ_NOTHREAD) |
436 | */ | 437 | */ |
437 | static inline void | 438 | static inline void |
438 | irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle) | 439 | irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle) |
439 | { | 440 | { |
440 | __irq_set_handler(irq, handle, 1, NULL); | 441 | __irq_set_handler(irq, handle, 1, NULL); |
441 | } | 442 | } |
442 | 443 | ||
443 | void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set); | 444 | void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set); |
444 | 445 | ||
445 | static inline void irq_set_status_flags(unsigned int irq, unsigned long set) | 446 | static inline void irq_set_status_flags(unsigned int irq, unsigned long set) |
446 | { | 447 | { |
447 | irq_modify_status(irq, 0, set); | 448 | irq_modify_status(irq, 0, set); |
448 | } | 449 | } |
449 | 450 | ||
450 | static inline void irq_clear_status_flags(unsigned int irq, unsigned long clr) | 451 | static inline void irq_clear_status_flags(unsigned int irq, unsigned long clr) |
451 | { | 452 | { |
452 | irq_modify_status(irq, clr, 0); | 453 | irq_modify_status(irq, clr, 0); |
453 | } | 454 | } |
454 | 455 | ||
455 | static inline void irq_set_noprobe(unsigned int irq) | 456 | static inline void irq_set_noprobe(unsigned int irq) |
456 | { | 457 | { |
457 | irq_modify_status(irq, 0, IRQ_NOPROBE); | 458 | irq_modify_status(irq, 0, IRQ_NOPROBE); |
458 | } | 459 | } |
459 | 460 | ||
460 | static inline void irq_set_probe(unsigned int irq) | 461 | static inline void irq_set_probe(unsigned int irq) |
461 | { | 462 | { |
462 | irq_modify_status(irq, IRQ_NOPROBE, 0); | 463 | irq_modify_status(irq, IRQ_NOPROBE, 0); |
463 | } | 464 | } |
464 | 465 | ||
465 | static inline void irq_set_nothread(unsigned int irq) | 466 | static inline void irq_set_nothread(unsigned int irq) |
466 | { | 467 | { |
467 | irq_modify_status(irq, 0, IRQ_NOTHREAD); | 468 | irq_modify_status(irq, 0, IRQ_NOTHREAD); |
468 | } | 469 | } |
469 | 470 | ||
470 | static inline void irq_set_thread(unsigned int irq) | 471 | static inline void irq_set_thread(unsigned int irq) |
471 | { | 472 | { |
472 | irq_modify_status(irq, IRQ_NOTHREAD, 0); | 473 | irq_modify_status(irq, IRQ_NOTHREAD, 0); |
473 | } | 474 | } |
474 | 475 | ||
475 | static inline void irq_set_nested_thread(unsigned int irq, bool nest) | 476 | static inline void irq_set_nested_thread(unsigned int irq, bool nest) |
476 | { | 477 | { |
477 | if (nest) | 478 | if (nest) |
478 | irq_set_status_flags(irq, IRQ_NESTED_THREAD); | 479 | irq_set_status_flags(irq, IRQ_NESTED_THREAD); |
479 | else | 480 | else |
480 | irq_clear_status_flags(irq, IRQ_NESTED_THREAD); | 481 | irq_clear_status_flags(irq, IRQ_NESTED_THREAD); |
481 | } | 482 | } |
482 | 483 | ||
483 | /* Handle dynamic irq creation and destruction */ | 484 | /* Handle dynamic irq creation and destruction */ |
484 | extern unsigned int create_irq_nr(unsigned int irq_want, int node); | 485 | extern unsigned int create_irq_nr(unsigned int irq_want, int node); |
485 | extern int create_irq(void); | 486 | extern int create_irq(void); |
486 | extern void destroy_irq(unsigned int irq); | 487 | extern void destroy_irq(unsigned int irq); |
487 | 488 | ||
488 | /* | 489 | /* |
489 | * Dynamic irq helper functions. Obsolete. Use irq_alloc_desc* and | 490 | * Dynamic irq helper functions. Obsolete. Use irq_alloc_desc* and |
490 | * irq_free_desc instead. | 491 | * irq_free_desc instead. |
491 | */ | 492 | */ |
492 | extern void dynamic_irq_cleanup(unsigned int irq); | 493 | extern void dynamic_irq_cleanup(unsigned int irq); |
493 | static inline void dynamic_irq_init(unsigned int irq) | 494 | static inline void dynamic_irq_init(unsigned int irq) |
494 | { | 495 | { |
495 | dynamic_irq_cleanup(irq); | 496 | dynamic_irq_cleanup(irq); |
496 | } | 497 | } |
497 | 498 | ||
498 | /* Set/get chip/data for an IRQ: */ | 499 | /* Set/get chip/data for an IRQ: */ |
499 | extern int irq_set_chip(unsigned int irq, struct irq_chip *chip); | 500 | extern int irq_set_chip(unsigned int irq, struct irq_chip *chip); |
500 | extern int irq_set_handler_data(unsigned int irq, void *data); | 501 | extern int irq_set_handler_data(unsigned int irq, void *data); |
501 | extern int irq_set_chip_data(unsigned int irq, void *data); | 502 | extern int irq_set_chip_data(unsigned int irq, void *data); |
502 | extern int irq_set_irq_type(unsigned int irq, unsigned int type); | 503 | extern int irq_set_irq_type(unsigned int irq, unsigned int type); |
503 | extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry); | 504 | extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry); |
504 | extern struct irq_data *irq_get_irq_data(unsigned int irq); | 505 | extern struct irq_data *irq_get_irq_data(unsigned int irq); |
505 | 506 | ||
506 | static inline struct irq_chip *irq_get_chip(unsigned int irq) | 507 | static inline struct irq_chip *irq_get_chip(unsigned int irq) |
507 | { | 508 | { |
508 | struct irq_data *d = irq_get_irq_data(irq); | 509 | struct irq_data *d = irq_get_irq_data(irq); |
509 | return d ? d->chip : NULL; | 510 | return d ? d->chip : NULL; |
510 | } | 511 | } |
511 | 512 | ||
512 | static inline struct irq_chip *irq_data_get_irq_chip(struct irq_data *d) | 513 | static inline struct irq_chip *irq_data_get_irq_chip(struct irq_data *d) |
513 | { | 514 | { |
514 | return d->chip; | 515 | return d->chip; |
515 | } | 516 | } |
516 | 517 | ||
517 | static inline void *irq_get_chip_data(unsigned int irq) | 518 | static inline void *irq_get_chip_data(unsigned int irq) |
518 | { | 519 | { |
519 | struct irq_data *d = irq_get_irq_data(irq); | 520 | struct irq_data *d = irq_get_irq_data(irq); |
520 | return d ? d->chip_data : NULL; | 521 | return d ? d->chip_data : NULL; |
521 | } | 522 | } |
522 | 523 | ||
523 | static inline void *irq_data_get_irq_chip_data(struct irq_data *d) | 524 | static inline void *irq_data_get_irq_chip_data(struct irq_data *d) |
524 | { | 525 | { |
525 | return d->chip_data; | 526 | return d->chip_data; |
526 | } | 527 | } |
527 | 528 | ||
528 | static inline void *irq_get_handler_data(unsigned int irq) | 529 | static inline void *irq_get_handler_data(unsigned int irq) |
529 | { | 530 | { |
530 | struct irq_data *d = irq_get_irq_data(irq); | 531 | struct irq_data *d = irq_get_irq_data(irq); |
531 | return d ? d->handler_data : NULL; | 532 | return d ? d->handler_data : NULL; |
532 | } | 533 | } |
533 | 534 | ||
534 | static inline void *irq_data_get_irq_handler_data(struct irq_data *d) | 535 | static inline void *irq_data_get_irq_handler_data(struct irq_data *d) |
535 | { | 536 | { |
536 | return d->handler_data; | 537 | return d->handler_data; |
537 | } | 538 | } |
538 | 539 | ||
539 | static inline struct msi_desc *irq_get_msi_desc(unsigned int irq) | 540 | static inline struct msi_desc *irq_get_msi_desc(unsigned int irq) |
540 | { | 541 | { |
541 | struct irq_data *d = irq_get_irq_data(irq); | 542 | struct irq_data *d = irq_get_irq_data(irq); |
542 | return d ? d->msi_desc : NULL; | 543 | return d ? d->msi_desc : NULL; |
543 | } | 544 | } |
544 | 545 | ||
545 | static inline struct msi_desc *irq_data_get_msi(struct irq_data *d) | 546 | static inline struct msi_desc *irq_data_get_msi(struct irq_data *d) |
546 | { | 547 | { |
547 | return d->msi_desc; | 548 | return d->msi_desc; |
548 | } | 549 | } |
549 | 550 | ||
550 | int irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node); | 551 | int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, |
552 | struct module *owner); | ||
553 | |||
554 | static inline int irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, | ||
555 | int node) | ||
556 | { | ||
557 | return __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE); | ||
558 | } | ||
559 | |||
551 | void irq_free_descs(unsigned int irq, unsigned int cnt); | 560 | void irq_free_descs(unsigned int irq, unsigned int cnt); |
552 | int irq_reserve_irqs(unsigned int from, unsigned int cnt); | 561 | int irq_reserve_irqs(unsigned int from, unsigned int cnt); |
553 | 562 | ||
554 | static inline int irq_alloc_desc(int node) | 563 | static inline int irq_alloc_desc(int node) |
555 | { | 564 | { |
556 | return irq_alloc_descs(-1, 0, 1, node); | 565 | return irq_alloc_descs(-1, 0, 1, node); |
557 | } | 566 | } |
558 | 567 | ||
559 | static inline int irq_alloc_desc_at(unsigned int at, int node) | 568 | static inline int irq_alloc_desc_at(unsigned int at, int node) |
560 | { | 569 | { |
561 | return irq_alloc_descs(at, at, 1, node); | 570 | return irq_alloc_descs(at, at, 1, node); |
562 | } | 571 | } |
563 | 572 | ||
564 | static inline int irq_alloc_desc_from(unsigned int from, int node) | 573 | static inline int irq_alloc_desc_from(unsigned int from, int node) |
565 | { | 574 | { |
566 | return irq_alloc_descs(-1, from, 1, node); | 575 | return irq_alloc_descs(-1, from, 1, node); |
567 | } | 576 | } |
568 | 577 | ||
569 | static inline void irq_free_desc(unsigned int irq) | 578 | static inline void irq_free_desc(unsigned int irq) |
570 | { | 579 | { |
571 | irq_free_descs(irq, 1); | 580 | irq_free_descs(irq, 1); |
572 | } | 581 | } |
573 | 582 | ||
574 | static inline int irq_reserve_irq(unsigned int irq) | 583 | static inline int irq_reserve_irq(unsigned int irq) |
575 | { | 584 | { |
576 | return irq_reserve_irqs(irq, 1); | 585 | return irq_reserve_irqs(irq, 1); |
577 | } | 586 | } |
578 | 587 | ||
579 | #ifndef irq_reg_writel | 588 | #ifndef irq_reg_writel |
580 | # define irq_reg_writel(val, addr) writel(val, addr) | 589 | # define irq_reg_writel(val, addr) writel(val, addr) |
581 | #endif | 590 | #endif |
582 | #ifndef irq_reg_readl | 591 | #ifndef irq_reg_readl |
583 | # define irq_reg_readl(addr) readl(addr) | 592 | # define irq_reg_readl(addr) readl(addr) |
584 | #endif | 593 | #endif |
585 | 594 | ||
586 | /** | 595 | /** |
587 | * struct irq_chip_regs - register offsets for struct irq_gci | 596 | * struct irq_chip_regs - register offsets for struct irq_gci |
588 | * @enable: Enable register offset to reg_base | 597 | * @enable: Enable register offset to reg_base |
589 | * @disable: Disable register offset to reg_base | 598 | * @disable: Disable register offset to reg_base |
590 | * @mask: Mask register offset to reg_base | 599 | * @mask: Mask register offset to reg_base |
591 | * @ack: Ack register offset to reg_base | 600 | * @ack: Ack register offset to reg_base |
592 | * @eoi: Eoi register offset to reg_base | 601 | * @eoi: Eoi register offset to reg_base |
593 | * @type: Type configuration register offset to reg_base | 602 | * @type: Type configuration register offset to reg_base |
594 | * @polarity: Polarity configuration register offset to reg_base | 603 | * @polarity: Polarity configuration register offset to reg_base |
595 | */ | 604 | */ |
596 | struct irq_chip_regs { | 605 | struct irq_chip_regs { |
597 | unsigned long enable; | 606 | unsigned long enable; |
598 | unsigned long disable; | 607 | unsigned long disable; |
599 | unsigned long mask; | 608 | unsigned long mask; |
600 | unsigned long ack; | 609 | unsigned long ack; |
601 | unsigned long eoi; | 610 | unsigned long eoi; |
602 | unsigned long type; | 611 | unsigned long type; |
603 | unsigned long polarity; | 612 | unsigned long polarity; |
604 | }; | 613 | }; |
605 | 614 | ||
606 | /** | 615 | /** |
607 | * struct irq_chip_type - Generic interrupt chip instance for a flow type | 616 | * struct irq_chip_type - Generic interrupt chip instance for a flow type |
608 | * @chip: The real interrupt chip which provides the callbacks | 617 | * @chip: The real interrupt chip which provides the callbacks |
609 | * @regs: Register offsets for this chip | 618 | * @regs: Register offsets for this chip |
610 | * @handler: Flow handler associated with this chip | 619 | * @handler: Flow handler associated with this chip |
611 | * @type: Chip can handle these flow types | 620 | * @type: Chip can handle these flow types |
612 | * | 621 | * |
613 | * A irq_generic_chip can have several instances of irq_chip_type when | 622 | * A irq_generic_chip can have several instances of irq_chip_type when |
614 | * it requires different functions and register offsets for different | 623 | * it requires different functions and register offsets for different |
615 | * flow types. | 624 | * flow types. |
616 | */ | 625 | */ |
617 | struct irq_chip_type { | 626 | struct irq_chip_type { |
618 | struct irq_chip chip; | 627 | struct irq_chip chip; |
619 | struct irq_chip_regs regs; | 628 | struct irq_chip_regs regs; |
620 | irq_flow_handler_t handler; | 629 | irq_flow_handler_t handler; |
621 | u32 type; | 630 | u32 type; |
622 | }; | 631 | }; |
623 | 632 | ||
624 | /** | 633 | /** |
625 | * struct irq_chip_generic - Generic irq chip data structure | 634 | * struct irq_chip_generic - Generic irq chip data structure |
626 | * @lock: Lock to protect register and cache data access | 635 | * @lock: Lock to protect register and cache data access |
627 | * @reg_base: Register base address (virtual) | 636 | * @reg_base: Register base address (virtual) |
628 | * @irq_base: Interrupt base nr for this chip | 637 | * @irq_base: Interrupt base nr for this chip |
629 | * @irq_cnt: Number of interrupts handled by this chip | 638 | * @irq_cnt: Number of interrupts handled by this chip |
630 | * @mask_cache: Cached mask register | 639 | * @mask_cache: Cached mask register |
631 | * @type_cache: Cached type register | 640 | * @type_cache: Cached type register |
632 | * @polarity_cache: Cached polarity register | 641 | * @polarity_cache: Cached polarity register |
633 | * @wake_enabled: Interrupt can wakeup from suspend | 642 | * @wake_enabled: Interrupt can wakeup from suspend |
634 | * @wake_active: Interrupt is marked as an wakeup from suspend source | 643 | * @wake_active: Interrupt is marked as an wakeup from suspend source |
635 | * @num_ct: Number of available irq_chip_type instances (usually 1) | 644 | * @num_ct: Number of available irq_chip_type instances (usually 1) |
636 | * @private: Private data for non generic chip callbacks | 645 | * @private: Private data for non generic chip callbacks |
637 | * @list: List head for keeping track of instances | 646 | * @list: List head for keeping track of instances |
638 | * @chip_types: Array of interrupt irq_chip_types | 647 | * @chip_types: Array of interrupt irq_chip_types |
639 | * | 648 | * |
640 | * Note, that irq_chip_generic can have multiple irq_chip_type | 649 | * Note, that irq_chip_generic can have multiple irq_chip_type |
641 | * implementations which can be associated to a particular irq line of | 650 | * implementations which can be associated to a particular irq line of |
642 | * an irq_chip_generic instance. That allows to share and protect | 651 | * an irq_chip_generic instance. That allows to share and protect |
643 | * state in an irq_chip_generic instance when we need to implement | 652 | * state in an irq_chip_generic instance when we need to implement |
644 | * different flow mechanisms (level/edge) for it. | 653 | * different flow mechanisms (level/edge) for it. |
645 | */ | 654 | */ |
646 | struct irq_chip_generic { | 655 | struct irq_chip_generic { |
647 | raw_spinlock_t lock; | 656 | raw_spinlock_t lock; |
648 | void __iomem *reg_base; | 657 | void __iomem *reg_base; |
649 | unsigned int irq_base; | 658 | unsigned int irq_base; |
650 | unsigned int irq_cnt; | 659 | unsigned int irq_cnt; |
651 | u32 mask_cache; | 660 | u32 mask_cache; |
652 | u32 type_cache; | 661 | u32 type_cache; |
653 | u32 polarity_cache; | 662 | u32 polarity_cache; |
654 | u32 wake_enabled; | 663 | u32 wake_enabled; |
655 | u32 wake_active; | 664 | u32 wake_active; |
656 | unsigned int num_ct; | 665 | unsigned int num_ct; |
657 | void *private; | 666 | void *private; |
658 | struct list_head list; | 667 | struct list_head list; |
659 | struct irq_chip_type chip_types[0]; | 668 | struct irq_chip_type chip_types[0]; |
660 | }; | 669 | }; |
661 | 670 | ||
662 | /** | 671 | /** |
663 | * enum irq_gc_flags - Initialization flags for generic irq chips | 672 | * enum irq_gc_flags - Initialization flags for generic irq chips |
664 | * @IRQ_GC_INIT_MASK_CACHE: Initialize the mask_cache by reading mask reg | 673 | * @IRQ_GC_INIT_MASK_CACHE: Initialize the mask_cache by reading mask reg |
665 | * @IRQ_GC_INIT_NESTED_LOCK: Set the lock class of the irqs to nested for | 674 | * @IRQ_GC_INIT_NESTED_LOCK: Set the lock class of the irqs to nested for |
666 | * irq chips which need to call irq_set_wake() on | 675 | * irq chips which need to call irq_set_wake() on |
667 | * the parent irq. Usually GPIO implementations | 676 | * the parent irq. Usually GPIO implementations |
668 | */ | 677 | */ |
669 | enum irq_gc_flags { | 678 | enum irq_gc_flags { |
670 | IRQ_GC_INIT_MASK_CACHE = 1 << 0, | 679 | IRQ_GC_INIT_MASK_CACHE = 1 << 0, |
671 | IRQ_GC_INIT_NESTED_LOCK = 1 << 1, | 680 | IRQ_GC_INIT_NESTED_LOCK = 1 << 1, |
672 | }; | 681 | }; |
673 | 682 | ||
674 | /* Generic chip callback functions */ | 683 | /* Generic chip callback functions */ |
675 | void irq_gc_noop(struct irq_data *d); | 684 | void irq_gc_noop(struct irq_data *d); |
676 | void irq_gc_mask_disable_reg(struct irq_data *d); | 685 | void irq_gc_mask_disable_reg(struct irq_data *d); |
677 | void irq_gc_mask_set_bit(struct irq_data *d); | 686 | void irq_gc_mask_set_bit(struct irq_data *d); |
678 | void irq_gc_mask_clr_bit(struct irq_data *d); | 687 | void irq_gc_mask_clr_bit(struct irq_data *d); |
679 | void irq_gc_unmask_enable_reg(struct irq_data *d); | 688 | void irq_gc_unmask_enable_reg(struct irq_data *d); |
680 | void irq_gc_ack_set_bit(struct irq_data *d); | 689 | void irq_gc_ack_set_bit(struct irq_data *d); |
681 | void irq_gc_ack_clr_bit(struct irq_data *d); | 690 | void irq_gc_ack_clr_bit(struct irq_data *d); |
682 | void irq_gc_mask_disable_reg_and_ack(struct irq_data *d); | 691 | void irq_gc_mask_disable_reg_and_ack(struct irq_data *d); |
683 | void irq_gc_eoi(struct irq_data *d); | 692 | void irq_gc_eoi(struct irq_data *d); |
684 | int irq_gc_set_wake(struct irq_data *d, unsigned int on); | 693 | int irq_gc_set_wake(struct irq_data *d, unsigned int on); |
685 | 694 | ||
686 | /* Setup functions for irq_chip_generic */ | 695 | /* Setup functions for irq_chip_generic */ |
687 | struct irq_chip_generic * | 696 | struct irq_chip_generic * |
688 | irq_alloc_generic_chip(const char *name, int nr_ct, unsigned int irq_base, | 697 | irq_alloc_generic_chip(const char *name, int nr_ct, unsigned int irq_base, |
689 | void __iomem *reg_base, irq_flow_handler_t handler); | 698 | void __iomem *reg_base, irq_flow_handler_t handler); |
690 | void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk, | 699 | void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk, |
691 | enum irq_gc_flags flags, unsigned int clr, | 700 | enum irq_gc_flags flags, unsigned int clr, |
692 | unsigned int set); | 701 | unsigned int set); |
693 | int irq_setup_alt_chip(struct irq_data *d, unsigned int type); | 702 | int irq_setup_alt_chip(struct irq_data *d, unsigned int type); |
694 | void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk, | 703 | void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk, |
695 | unsigned int clr, unsigned int set); | 704 | unsigned int clr, unsigned int set); |
696 | 705 | ||
697 | static inline struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d) | 706 | static inline struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d) |
698 | { | 707 | { |
699 | return container_of(d->chip, struct irq_chip_type, chip); | 708 | return container_of(d->chip, struct irq_chip_type, chip); |
700 | } | 709 | } |
701 | 710 | ||
702 | #define IRQ_MSK(n) (u32)((n) < 32 ? ((1 << (n)) - 1) : UINT_MAX) | 711 | #define IRQ_MSK(n) (u32)((n) < 32 ? ((1 << (n)) - 1) : UINT_MAX) |
703 | 712 | ||
704 | #ifdef CONFIG_SMP | 713 | #ifdef CONFIG_SMP |
705 | static inline void irq_gc_lock(struct irq_chip_generic *gc) | 714 | static inline void irq_gc_lock(struct irq_chip_generic *gc) |
706 | { | 715 | { |
707 | raw_spin_lock(&gc->lock); | 716 | raw_spin_lock(&gc->lock); |
708 | } | 717 | } |
709 | 718 | ||
710 | static inline void irq_gc_unlock(struct irq_chip_generic *gc) | 719 | static inline void irq_gc_unlock(struct irq_chip_generic *gc) |
711 | { | 720 | { |
712 | raw_spin_unlock(&gc->lock); | 721 | raw_spin_unlock(&gc->lock); |
713 | } | 722 | } |
714 | #else | 723 | #else |
715 | static inline void irq_gc_lock(struct irq_chip_generic *gc) { } | 724 | static inline void irq_gc_lock(struct irq_chip_generic *gc) { } |
716 | static inline void irq_gc_unlock(struct irq_chip_generic *gc) { } | 725 | static inline void irq_gc_unlock(struct irq_chip_generic *gc) { } |
717 | #endif | 726 | #endif |
718 | 727 | ||
719 | #endif /* CONFIG_GENERIC_HARDIRQS */ | 728 | #endif /* CONFIG_GENERIC_HARDIRQS */ |
720 | 729 | ||
721 | #endif /* !CONFIG_S390 */ | 730 | #endif /* !CONFIG_S390 */ |
722 | 731 | ||
723 | #endif /* _LINUX_IRQ_H */ | 732 | #endif /* _LINUX_IRQ_H */ |
724 | 733 |
include/linux/irqdesc.h
1 | #ifndef _LINUX_IRQDESC_H | 1 | #ifndef _LINUX_IRQDESC_H |
2 | #define _LINUX_IRQDESC_H | 2 | #define _LINUX_IRQDESC_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * Core internal functions to deal with irq descriptors | 5 | * Core internal functions to deal with irq descriptors |
6 | * | 6 | * |
7 | * This include will move to kernel/irq once we cleaned up the tree. | 7 | * This include will move to kernel/irq once we cleaned up the tree. |
8 | * For now it's included from <linux/irq.h> | 8 | * For now it's included from <linux/irq.h> |
9 | */ | 9 | */ |
10 | 10 | ||
11 | struct irq_affinity_notify; | 11 | struct irq_affinity_notify; |
12 | struct proc_dir_entry; | 12 | struct proc_dir_entry; |
13 | struct timer_rand_state; | 13 | struct timer_rand_state; |
14 | /** | 14 | /** |
15 | * struct irq_desc - interrupt descriptor | 15 | * struct irq_desc - interrupt descriptor |
16 | * @irq_data: per irq and chip data passed down to chip functions | 16 | * @irq_data: per irq and chip data passed down to chip functions |
17 | * @timer_rand_state: pointer to timer rand state struct | 17 | * @timer_rand_state: pointer to timer rand state struct |
18 | * @kstat_irqs: irq stats per cpu | 18 | * @kstat_irqs: irq stats per cpu |
19 | * @handle_irq: highlevel irq-events handler | 19 | * @handle_irq: highlevel irq-events handler |
20 | * @preflow_handler: handler called before the flow handler (currently used by sparc) | 20 | * @preflow_handler: handler called before the flow handler (currently used by sparc) |
21 | * @action: the irq action chain | 21 | * @action: the irq action chain |
22 | * @status: status information | 22 | * @status: status information |
23 | * @core_internal_state__do_not_mess_with_it: core internal status information | 23 | * @core_internal_state__do_not_mess_with_it: core internal status information |
24 | * @depth: disable-depth, for nested irq_disable() calls | 24 | * @depth: disable-depth, for nested irq_disable() calls |
25 | * @wake_depth: enable depth, for multiple irq_set_irq_wake() callers | 25 | * @wake_depth: enable depth, for multiple irq_set_irq_wake() callers |
26 | * @irq_count: stats field to detect stalled irqs | 26 | * @irq_count: stats field to detect stalled irqs |
27 | * @last_unhandled: aging timer for unhandled count | 27 | * @last_unhandled: aging timer for unhandled count |
28 | * @irqs_unhandled: stats field for spurious unhandled interrupts | 28 | * @irqs_unhandled: stats field for spurious unhandled interrupts |
29 | * @lock: locking for SMP | 29 | * @lock: locking for SMP |
30 | * @affinity_hint: hint to user space for preferred irq affinity | 30 | * @affinity_hint: hint to user space for preferred irq affinity |
31 | * @affinity_notify: context for notification of affinity changes | 31 | * @affinity_notify: context for notification of affinity changes |
32 | * @pending_mask: pending rebalanced interrupts | 32 | * @pending_mask: pending rebalanced interrupts |
33 | * @threads_oneshot: bitfield to handle shared oneshot threads | 33 | * @threads_oneshot: bitfield to handle shared oneshot threads |
34 | * @threads_active: number of irqaction threads currently running | 34 | * @threads_active: number of irqaction threads currently running |
35 | * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers | 35 | * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers |
36 | * @dir: /proc/irq/ procfs entry | 36 | * @dir: /proc/irq/ procfs entry |
37 | * @name: flow handler name for /proc/interrupts output | 37 | * @name: flow handler name for /proc/interrupts output |
38 | */ | 38 | */ |
39 | struct irq_desc { | 39 | struct irq_desc { |
40 | struct irq_data irq_data; | 40 | struct irq_data irq_data; |
41 | struct timer_rand_state *timer_rand_state; | 41 | struct timer_rand_state *timer_rand_state; |
42 | unsigned int __percpu *kstat_irqs; | 42 | unsigned int __percpu *kstat_irqs; |
43 | irq_flow_handler_t handle_irq; | 43 | irq_flow_handler_t handle_irq; |
44 | #ifdef CONFIG_IRQ_PREFLOW_FASTEOI | 44 | #ifdef CONFIG_IRQ_PREFLOW_FASTEOI |
45 | irq_preflow_handler_t preflow_handler; | 45 | irq_preflow_handler_t preflow_handler; |
46 | #endif | 46 | #endif |
47 | struct irqaction *action; /* IRQ action list */ | 47 | struct irqaction *action; /* IRQ action list */ |
48 | unsigned int status_use_accessors; | 48 | unsigned int status_use_accessors; |
49 | unsigned int core_internal_state__do_not_mess_with_it; | 49 | unsigned int core_internal_state__do_not_mess_with_it; |
50 | unsigned int depth; /* nested irq disables */ | 50 | unsigned int depth; /* nested irq disables */ |
51 | unsigned int wake_depth; /* nested wake enables */ | 51 | unsigned int wake_depth; /* nested wake enables */ |
52 | unsigned int irq_count; /* For detecting broken IRQs */ | 52 | unsigned int irq_count; /* For detecting broken IRQs */ |
53 | unsigned long last_unhandled; /* Aging timer for unhandled count */ | 53 | unsigned long last_unhandled; /* Aging timer for unhandled count */ |
54 | unsigned int irqs_unhandled; | 54 | unsigned int irqs_unhandled; |
55 | raw_spinlock_t lock; | 55 | raw_spinlock_t lock; |
56 | #ifdef CONFIG_SMP | 56 | #ifdef CONFIG_SMP |
57 | const struct cpumask *affinity_hint; | 57 | const struct cpumask *affinity_hint; |
58 | struct irq_affinity_notify *affinity_notify; | 58 | struct irq_affinity_notify *affinity_notify; |
59 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 59 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
60 | cpumask_var_t pending_mask; | 60 | cpumask_var_t pending_mask; |
61 | #endif | 61 | #endif |
62 | #endif | 62 | #endif |
63 | unsigned long threads_oneshot; | 63 | unsigned long threads_oneshot; |
64 | atomic_t threads_active; | 64 | atomic_t threads_active; |
65 | wait_queue_head_t wait_for_threads; | 65 | wait_queue_head_t wait_for_threads; |
66 | #ifdef CONFIG_PROC_FS | 66 | #ifdef CONFIG_PROC_FS |
67 | struct proc_dir_entry *dir; | 67 | struct proc_dir_entry *dir; |
68 | #endif | 68 | #endif |
69 | struct module *owner; | ||
69 | const char *name; | 70 | const char *name; |
70 | } ____cacheline_internodealigned_in_smp; | 71 | } ____cacheline_internodealigned_in_smp; |
71 | 72 | ||
72 | #ifndef CONFIG_SPARSE_IRQ | 73 | #ifndef CONFIG_SPARSE_IRQ |
73 | extern struct irq_desc irq_desc[NR_IRQS]; | 74 | extern struct irq_desc irq_desc[NR_IRQS]; |
74 | #endif | 75 | #endif |
75 | 76 | ||
76 | #ifdef CONFIG_GENERIC_HARDIRQS | 77 | #ifdef CONFIG_GENERIC_HARDIRQS |
77 | 78 | ||
78 | static inline struct irq_data *irq_desc_get_irq_data(struct irq_desc *desc) | 79 | static inline struct irq_data *irq_desc_get_irq_data(struct irq_desc *desc) |
79 | { | 80 | { |
80 | return &desc->irq_data; | 81 | return &desc->irq_data; |
81 | } | 82 | } |
82 | 83 | ||
83 | static inline struct irq_chip *irq_desc_get_chip(struct irq_desc *desc) | 84 | static inline struct irq_chip *irq_desc_get_chip(struct irq_desc *desc) |
84 | { | 85 | { |
85 | return desc->irq_data.chip; | 86 | return desc->irq_data.chip; |
86 | } | 87 | } |
87 | 88 | ||
88 | static inline void *irq_desc_get_chip_data(struct irq_desc *desc) | 89 | static inline void *irq_desc_get_chip_data(struct irq_desc *desc) |
89 | { | 90 | { |
90 | return desc->irq_data.chip_data; | 91 | return desc->irq_data.chip_data; |
91 | } | 92 | } |
92 | 93 | ||
93 | static inline void *irq_desc_get_handler_data(struct irq_desc *desc) | 94 | static inline void *irq_desc_get_handler_data(struct irq_desc *desc) |
94 | { | 95 | { |
95 | return desc->irq_data.handler_data; | 96 | return desc->irq_data.handler_data; |
96 | } | 97 | } |
97 | 98 | ||
98 | static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc) | 99 | static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc) |
99 | { | 100 | { |
100 | return desc->irq_data.msi_desc; | 101 | return desc->irq_data.msi_desc; |
101 | } | 102 | } |
102 | 103 | ||
103 | /* | 104 | /* |
104 | * Architectures call this to let the generic IRQ layer | 105 | * Architectures call this to let the generic IRQ layer |
105 | * handle an interrupt. If the descriptor is attached to an | 106 | * handle an interrupt. If the descriptor is attached to an |
106 | * irqchip-style controller then we call the ->handle_irq() handler, | 107 | * irqchip-style controller then we call the ->handle_irq() handler, |
107 | * and it calls __do_IRQ() if it's attached to an irqtype-style controller. | 108 | * and it calls __do_IRQ() if it's attached to an irqtype-style controller. |
108 | */ | 109 | */ |
109 | static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc) | 110 | static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc) |
110 | { | 111 | { |
111 | desc->handle_irq(irq, desc); | 112 | desc->handle_irq(irq, desc); |
112 | } | 113 | } |
113 | 114 | ||
114 | int generic_handle_irq(unsigned int irq); | 115 | int generic_handle_irq(unsigned int irq); |
115 | 116 | ||
116 | /* Test to see if a driver has successfully requested an irq */ | 117 | /* Test to see if a driver has successfully requested an irq */ |
117 | static inline int irq_has_action(unsigned int irq) | 118 | static inline int irq_has_action(unsigned int irq) |
118 | { | 119 | { |
119 | struct irq_desc *desc = irq_to_desc(irq); | 120 | struct irq_desc *desc = irq_to_desc(irq); |
120 | return desc->action != NULL; | 121 | return desc->action != NULL; |
121 | } | 122 | } |
122 | 123 | ||
123 | /* caller has locked the irq_desc and both params are valid */ | 124 | /* caller has locked the irq_desc and both params are valid */ |
124 | static inline void __irq_set_handler_locked(unsigned int irq, | 125 | static inline void __irq_set_handler_locked(unsigned int irq, |
125 | irq_flow_handler_t handler) | 126 | irq_flow_handler_t handler) |
126 | { | 127 | { |
127 | struct irq_desc *desc; | 128 | struct irq_desc *desc; |
128 | 129 | ||
129 | desc = irq_to_desc(irq); | 130 | desc = irq_to_desc(irq); |
130 | desc->handle_irq = handler; | 131 | desc->handle_irq = handler; |
131 | } | 132 | } |
132 | 133 | ||
133 | /* caller has locked the irq_desc and both params are valid */ | 134 | /* caller has locked the irq_desc and both params are valid */ |
134 | static inline void | 135 | static inline void |
135 | __irq_set_chip_handler_name_locked(unsigned int irq, struct irq_chip *chip, | 136 | __irq_set_chip_handler_name_locked(unsigned int irq, struct irq_chip *chip, |
136 | irq_flow_handler_t handler, const char *name) | 137 | irq_flow_handler_t handler, const char *name) |
137 | { | 138 | { |
138 | struct irq_desc *desc; | 139 | struct irq_desc *desc; |
139 | 140 | ||
140 | desc = irq_to_desc(irq); | 141 | desc = irq_to_desc(irq); |
141 | irq_desc_get_irq_data(desc)->chip = chip; | 142 | irq_desc_get_irq_data(desc)->chip = chip; |
142 | desc->handle_irq = handler; | 143 | desc->handle_irq = handler; |
143 | desc->name = name; | 144 | desc->name = name; |
144 | } | 145 | } |
145 | 146 | ||
146 | static inline int irq_balancing_disabled(unsigned int irq) | 147 | static inline int irq_balancing_disabled(unsigned int irq) |
147 | { | 148 | { |
148 | struct irq_desc *desc; | 149 | struct irq_desc *desc; |
149 | 150 | ||
150 | desc = irq_to_desc(irq); | 151 | desc = irq_to_desc(irq); |
151 | return desc->status_use_accessors & IRQ_NO_BALANCING_MASK; | 152 | return desc->status_use_accessors & IRQ_NO_BALANCING_MASK; |
152 | } | 153 | } |
153 | 154 | ||
154 | static inline void | 155 | static inline void |
155 | irq_set_lockdep_class(unsigned int irq, struct lock_class_key *class) | 156 | irq_set_lockdep_class(unsigned int irq, struct lock_class_key *class) |
156 | { | 157 | { |
157 | struct irq_desc *desc = irq_to_desc(irq); | 158 | struct irq_desc *desc = irq_to_desc(irq); |
158 | 159 | ||
159 | if (desc) | 160 | if (desc) |
160 | lockdep_set_class(&desc->lock, class); | 161 | lockdep_set_class(&desc->lock, class); |
161 | } | 162 | } |
162 | 163 | ||
163 | #ifdef CONFIG_IRQ_PREFLOW_FASTEOI | 164 | #ifdef CONFIG_IRQ_PREFLOW_FASTEOI |
164 | static inline void | 165 | static inline void |
165 | __irq_set_preflow_handler(unsigned int irq, irq_preflow_handler_t handler) | 166 | __irq_set_preflow_handler(unsigned int irq, irq_preflow_handler_t handler) |
166 | { | 167 | { |
167 | struct irq_desc *desc; | 168 | struct irq_desc *desc; |
168 | 169 | ||
169 | desc = irq_to_desc(irq); | 170 | desc = irq_to_desc(irq); |
170 | desc->preflow_handler = handler; | 171 | desc->preflow_handler = handler; |
171 | } | 172 | } |
172 | #endif | 173 | #endif |
173 | #endif | 174 | #endif |
174 | 175 | ||
175 | #endif | 176 | #endif |
176 | 177 |
kernel/irq/generic-chip.c
1 | /* | 1 | /* |
2 | * Library implementing the most common irq chip callback functions | 2 | * Library implementing the most common irq chip callback functions |
3 | * | 3 | * |
4 | * Copyright (C) 2011, Thomas Gleixner | 4 | * Copyright (C) 2011, Thomas Gleixner |
5 | */ | 5 | */ |
6 | #include <linux/io.h> | 6 | #include <linux/io.h> |
7 | #include <linux/irq.h> | 7 | #include <linux/irq.h> |
8 | #include <linux/slab.h> | 8 | #include <linux/slab.h> |
9 | #include <linux/interrupt.h> | 9 | #include <linux/interrupt.h> |
10 | #include <linux/kernel_stat.h> | 10 | #include <linux/kernel_stat.h> |
11 | #include <linux/syscore_ops.h> | 11 | #include <linux/syscore_ops.h> |
12 | 12 | ||
13 | #include "internals.h" | 13 | #include "internals.h" |
14 | 14 | ||
15 | static LIST_HEAD(gc_list); | 15 | static LIST_HEAD(gc_list); |
16 | static DEFINE_RAW_SPINLOCK(gc_lock); | 16 | static DEFINE_RAW_SPINLOCK(gc_lock); |
17 | 17 | ||
18 | static inline struct irq_chip_regs *cur_regs(struct irq_data *d) | 18 | static inline struct irq_chip_regs *cur_regs(struct irq_data *d) |
19 | { | 19 | { |
20 | return &container_of(d->chip, struct irq_chip_type, chip)->regs; | 20 | return &container_of(d->chip, struct irq_chip_type, chip)->regs; |
21 | } | 21 | } |
22 | 22 | ||
23 | /** | 23 | /** |
24 | * irq_gc_noop - NOOP function | 24 | * irq_gc_noop - NOOP function |
25 | * @d: irq_data | 25 | * @d: irq_data |
26 | */ | 26 | */ |
27 | void irq_gc_noop(struct irq_data *d) | 27 | void irq_gc_noop(struct irq_data *d) |
28 | { | 28 | { |
29 | } | 29 | } |
30 | 30 | ||
31 | /** | 31 | /** |
32 | * irq_gc_mask_disable_reg - Mask chip via disable register | 32 | * irq_gc_mask_disable_reg - Mask chip via disable register |
33 | * @d: irq_data | 33 | * @d: irq_data |
34 | * | 34 | * |
35 | * Chip has separate enable/disable registers instead of a single mask | 35 | * Chip has separate enable/disable registers instead of a single mask |
36 | * register. | 36 | * register. |
37 | */ | 37 | */ |
38 | void irq_gc_mask_disable_reg(struct irq_data *d) | 38 | void irq_gc_mask_disable_reg(struct irq_data *d) |
39 | { | 39 | { |
40 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | 40 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); |
41 | u32 mask = 1 << (d->irq - gc->irq_base); | 41 | u32 mask = 1 << (d->irq - gc->irq_base); |
42 | 42 | ||
43 | irq_gc_lock(gc); | 43 | irq_gc_lock(gc); |
44 | irq_reg_writel(mask, gc->reg_base + cur_regs(d)->disable); | 44 | irq_reg_writel(mask, gc->reg_base + cur_regs(d)->disable); |
45 | gc->mask_cache &= ~mask; | 45 | gc->mask_cache &= ~mask; |
46 | irq_gc_unlock(gc); | 46 | irq_gc_unlock(gc); |
47 | } | 47 | } |
48 | 48 | ||
49 | /** | 49 | /** |
50 | * irq_gc_mask_set_mask_bit - Mask chip via setting bit in mask register | 50 | * irq_gc_mask_set_mask_bit - Mask chip via setting bit in mask register |
51 | * @d: irq_data | 51 | * @d: irq_data |
52 | * | 52 | * |
53 | * Chip has a single mask register. Values of this register are cached | 53 | * Chip has a single mask register. Values of this register are cached |
54 | * and protected by gc->lock | 54 | * and protected by gc->lock |
55 | */ | 55 | */ |
56 | void irq_gc_mask_set_bit(struct irq_data *d) | 56 | void irq_gc_mask_set_bit(struct irq_data *d) |
57 | { | 57 | { |
58 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | 58 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); |
59 | u32 mask = 1 << (d->irq - gc->irq_base); | 59 | u32 mask = 1 << (d->irq - gc->irq_base); |
60 | 60 | ||
61 | irq_gc_lock(gc); | 61 | irq_gc_lock(gc); |
62 | gc->mask_cache |= mask; | 62 | gc->mask_cache |= mask; |
63 | irq_reg_writel(gc->mask_cache, gc->reg_base + cur_regs(d)->mask); | 63 | irq_reg_writel(gc->mask_cache, gc->reg_base + cur_regs(d)->mask); |
64 | irq_gc_unlock(gc); | 64 | irq_gc_unlock(gc); |
65 | } | 65 | } |
66 | 66 | ||
67 | /** | 67 | /** |
68 | * irq_gc_mask_set_mask_bit - Mask chip via clearing bit in mask register | 68 | * irq_gc_mask_set_mask_bit - Mask chip via clearing bit in mask register |
69 | * @d: irq_data | 69 | * @d: irq_data |
70 | * | 70 | * |
71 | * Chip has a single mask register. Values of this register are cached | 71 | * Chip has a single mask register. Values of this register are cached |
72 | * and protected by gc->lock | 72 | * and protected by gc->lock |
73 | */ | 73 | */ |
74 | void irq_gc_mask_clr_bit(struct irq_data *d) | 74 | void irq_gc_mask_clr_bit(struct irq_data *d) |
75 | { | 75 | { |
76 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | 76 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); |
77 | u32 mask = 1 << (d->irq - gc->irq_base); | 77 | u32 mask = 1 << (d->irq - gc->irq_base); |
78 | 78 | ||
79 | irq_gc_lock(gc); | 79 | irq_gc_lock(gc); |
80 | gc->mask_cache &= ~mask; | 80 | gc->mask_cache &= ~mask; |
81 | irq_reg_writel(gc->mask_cache, gc->reg_base + cur_regs(d)->mask); | 81 | irq_reg_writel(gc->mask_cache, gc->reg_base + cur_regs(d)->mask); |
82 | irq_gc_unlock(gc); | 82 | irq_gc_unlock(gc); |
83 | } | 83 | } |
84 | 84 | ||
85 | /** | 85 | /** |
86 | * irq_gc_unmask_enable_reg - Unmask chip via enable register | 86 | * irq_gc_unmask_enable_reg - Unmask chip via enable register |
87 | * @d: irq_data | 87 | * @d: irq_data |
88 | * | 88 | * |
89 | * Chip has separate enable/disable registers instead of a single mask | 89 | * Chip has separate enable/disable registers instead of a single mask |
90 | * register. | 90 | * register. |
91 | */ | 91 | */ |
92 | void irq_gc_unmask_enable_reg(struct irq_data *d) | 92 | void irq_gc_unmask_enable_reg(struct irq_data *d) |
93 | { | 93 | { |
94 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | 94 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); |
95 | u32 mask = 1 << (d->irq - gc->irq_base); | 95 | u32 mask = 1 << (d->irq - gc->irq_base); |
96 | 96 | ||
97 | irq_gc_lock(gc); | 97 | irq_gc_lock(gc); |
98 | irq_reg_writel(mask, gc->reg_base + cur_regs(d)->enable); | 98 | irq_reg_writel(mask, gc->reg_base + cur_regs(d)->enable); |
99 | gc->mask_cache |= mask; | 99 | gc->mask_cache |= mask; |
100 | irq_gc_unlock(gc); | 100 | irq_gc_unlock(gc); |
101 | } | 101 | } |
102 | 102 | ||
103 | /** | 103 | /** |
104 | * irq_gc_ack_set_bit - Ack pending interrupt via setting bit | 104 | * irq_gc_ack_set_bit - Ack pending interrupt via setting bit |
105 | * @d: irq_data | 105 | * @d: irq_data |
106 | */ | 106 | */ |
107 | void irq_gc_ack_set_bit(struct irq_data *d) | 107 | void irq_gc_ack_set_bit(struct irq_data *d) |
108 | { | 108 | { |
109 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | 109 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); |
110 | u32 mask = 1 << (d->irq - gc->irq_base); | 110 | u32 mask = 1 << (d->irq - gc->irq_base); |
111 | 111 | ||
112 | irq_gc_lock(gc); | 112 | irq_gc_lock(gc); |
113 | irq_reg_writel(mask, gc->reg_base + cur_regs(d)->ack); | 113 | irq_reg_writel(mask, gc->reg_base + cur_regs(d)->ack); |
114 | irq_gc_unlock(gc); | 114 | irq_gc_unlock(gc); |
115 | } | 115 | } |
116 | 116 | ||
117 | /** | 117 | /** |
118 | * irq_gc_ack_clr_bit - Ack pending interrupt via clearing bit | 118 | * irq_gc_ack_clr_bit - Ack pending interrupt via clearing bit |
119 | * @d: irq_data | 119 | * @d: irq_data |
120 | */ | 120 | */ |
121 | void irq_gc_ack_clr_bit(struct irq_data *d) | 121 | void irq_gc_ack_clr_bit(struct irq_data *d) |
122 | { | 122 | { |
123 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | 123 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); |
124 | u32 mask = ~(1 << (d->irq - gc->irq_base)); | 124 | u32 mask = ~(1 << (d->irq - gc->irq_base)); |
125 | 125 | ||
126 | irq_gc_lock(gc); | 126 | irq_gc_lock(gc); |
127 | irq_reg_writel(mask, gc->reg_base + cur_regs(d)->ack); | 127 | irq_reg_writel(mask, gc->reg_base + cur_regs(d)->ack); |
128 | irq_gc_unlock(gc); | 128 | irq_gc_unlock(gc); |
129 | } | 129 | } |
130 | 130 | ||
131 | /** | 131 | /** |
132 | * irq_gc_mask_disable_reg_and_ack- Mask and ack pending interrupt | 132 | * irq_gc_mask_disable_reg_and_ack- Mask and ack pending interrupt |
133 | * @d: irq_data | 133 | * @d: irq_data |
134 | */ | 134 | */ |
135 | void irq_gc_mask_disable_reg_and_ack(struct irq_data *d) | 135 | void irq_gc_mask_disable_reg_and_ack(struct irq_data *d) |
136 | { | 136 | { |
137 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | 137 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); |
138 | u32 mask = 1 << (d->irq - gc->irq_base); | 138 | u32 mask = 1 << (d->irq - gc->irq_base); |
139 | 139 | ||
140 | irq_gc_lock(gc); | 140 | irq_gc_lock(gc); |
141 | irq_reg_writel(mask, gc->reg_base + cur_regs(d)->mask); | 141 | irq_reg_writel(mask, gc->reg_base + cur_regs(d)->mask); |
142 | irq_reg_writel(mask, gc->reg_base + cur_regs(d)->ack); | 142 | irq_reg_writel(mask, gc->reg_base + cur_regs(d)->ack); |
143 | irq_gc_unlock(gc); | 143 | irq_gc_unlock(gc); |
144 | } | 144 | } |
145 | 145 | ||
146 | /** | 146 | /** |
147 | * irq_gc_eoi - EOI interrupt | 147 | * irq_gc_eoi - EOI interrupt |
148 | * @d: irq_data | 148 | * @d: irq_data |
149 | */ | 149 | */ |
150 | void irq_gc_eoi(struct irq_data *d) | 150 | void irq_gc_eoi(struct irq_data *d) |
151 | { | 151 | { |
152 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | 152 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); |
153 | u32 mask = 1 << (d->irq - gc->irq_base); | 153 | u32 mask = 1 << (d->irq - gc->irq_base); |
154 | 154 | ||
155 | irq_gc_lock(gc); | 155 | irq_gc_lock(gc); |
156 | irq_reg_writel(mask, gc->reg_base + cur_regs(d)->eoi); | 156 | irq_reg_writel(mask, gc->reg_base + cur_regs(d)->eoi); |
157 | irq_gc_unlock(gc); | 157 | irq_gc_unlock(gc); |
158 | } | 158 | } |
159 | 159 | ||
160 | /** | 160 | /** |
161 | * irq_gc_set_wake - Set/clr wake bit for an interrupt | 161 | * irq_gc_set_wake - Set/clr wake bit for an interrupt |
162 | * @d: irq_data | 162 | * @d: irq_data |
163 | * | 163 | * |
164 | * For chips where the wake from suspend functionality is not | 164 | * For chips where the wake from suspend functionality is not |
165 | * configured in a separate register and the wakeup active state is | 165 | * configured in a separate register and the wakeup active state is |
166 | * just stored in a bitmask. | 166 | * just stored in a bitmask. |
167 | */ | 167 | */ |
168 | int irq_gc_set_wake(struct irq_data *d, unsigned int on) | 168 | int irq_gc_set_wake(struct irq_data *d, unsigned int on) |
169 | { | 169 | { |
170 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | 170 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); |
171 | u32 mask = 1 << (d->irq - gc->irq_base); | 171 | u32 mask = 1 << (d->irq - gc->irq_base); |
172 | 172 | ||
173 | if (!(mask & gc->wake_enabled)) | 173 | if (!(mask & gc->wake_enabled)) |
174 | return -EINVAL; | 174 | return -EINVAL; |
175 | 175 | ||
176 | irq_gc_lock(gc); | 176 | irq_gc_lock(gc); |
177 | if (on) | 177 | if (on) |
178 | gc->wake_active |= mask; | 178 | gc->wake_active |= mask; |
179 | else | 179 | else |
180 | gc->wake_active &= ~mask; | 180 | gc->wake_active &= ~mask; |
181 | irq_gc_unlock(gc); | 181 | irq_gc_unlock(gc); |
182 | return 0; | 182 | return 0; |
183 | } | 183 | } |
184 | 184 | ||
185 | /** | 185 | /** |
186 | * irq_alloc_generic_chip - Allocate a generic chip and initialize it | 186 | * irq_alloc_generic_chip - Allocate a generic chip and initialize it |
187 | * @name: Name of the irq chip | 187 | * @name: Name of the irq chip |
188 | * @num_ct: Number of irq_chip_type instances associated with this | 188 | * @num_ct: Number of irq_chip_type instances associated with this |
189 | * @irq_base: Interrupt base nr for this chip | 189 | * @irq_base: Interrupt base nr for this chip |
190 | * @reg_base: Register base address (virtual) | 190 | * @reg_base: Register base address (virtual) |
191 | * @handler: Default flow handler associated with this chip | 191 | * @handler: Default flow handler associated with this chip |
192 | * | 192 | * |
193 | * Returns an initialized irq_chip_generic structure. The chip defaults | 193 | * Returns an initialized irq_chip_generic structure. The chip defaults |
194 | * to the primary (index 0) irq_chip_type and @handler | 194 | * to the primary (index 0) irq_chip_type and @handler |
195 | */ | 195 | */ |
196 | struct irq_chip_generic * | 196 | struct irq_chip_generic * |
197 | irq_alloc_generic_chip(const char *name, int num_ct, unsigned int irq_base, | 197 | irq_alloc_generic_chip(const char *name, int num_ct, unsigned int irq_base, |
198 | void __iomem *reg_base, irq_flow_handler_t handler) | 198 | void __iomem *reg_base, irq_flow_handler_t handler) |
199 | { | 199 | { |
200 | struct irq_chip_generic *gc; | 200 | struct irq_chip_generic *gc; |
201 | unsigned long sz = sizeof(*gc) + num_ct * sizeof(struct irq_chip_type); | 201 | unsigned long sz = sizeof(*gc) + num_ct * sizeof(struct irq_chip_type); |
202 | 202 | ||
203 | gc = kzalloc(sz, GFP_KERNEL); | 203 | gc = kzalloc(sz, GFP_KERNEL); |
204 | if (gc) { | 204 | if (gc) { |
205 | raw_spin_lock_init(&gc->lock); | 205 | raw_spin_lock_init(&gc->lock); |
206 | gc->num_ct = num_ct; | 206 | gc->num_ct = num_ct; |
207 | gc->irq_base = irq_base; | 207 | gc->irq_base = irq_base; |
208 | gc->reg_base = reg_base; | 208 | gc->reg_base = reg_base; |
209 | gc->chip_types->chip.name = name; | 209 | gc->chip_types->chip.name = name; |
210 | gc->chip_types->handler = handler; | 210 | gc->chip_types->handler = handler; |
211 | } | 211 | } |
212 | return gc; | 212 | return gc; |
213 | } | 213 | } |
214 | 214 | ||
215 | /* | 215 | /* |
216 | * Separate lockdep class for interrupt chip which can nest irq_desc | 216 | * Separate lockdep class for interrupt chip which can nest irq_desc |
217 | * lock. | 217 | * lock. |
218 | */ | 218 | */ |
219 | static struct lock_class_key irq_nested_lock_class; | 219 | static struct lock_class_key irq_nested_lock_class; |
220 | 220 | ||
221 | /** | 221 | /** |
222 | * irq_setup_generic_chip - Setup a range of interrupts with a generic chip | 222 | * irq_setup_generic_chip - Setup a range of interrupts with a generic chip |
223 | * @gc: Generic irq chip holding all data | 223 | * @gc: Generic irq chip holding all data |
224 | * @msk: Bitmask holding the irqs to initialize relative to gc->irq_base | 224 | * @msk: Bitmask holding the irqs to initialize relative to gc->irq_base |
225 | * @flags: Flags for initialization | 225 | * @flags: Flags for initialization |
226 | * @clr: IRQ_* bits to clear | 226 | * @clr: IRQ_* bits to clear |
227 | * @set: IRQ_* bits to set | 227 | * @set: IRQ_* bits to set |
228 | * | 228 | * |
229 | * Set up max. 32 interrupts starting from gc->irq_base. Note, this | 229 | * Set up max. 32 interrupts starting from gc->irq_base. Note, this |
230 | * initializes all interrupts to the primary irq_chip_type and its | 230 | * initializes all interrupts to the primary irq_chip_type and its |
231 | * associated handler. | 231 | * associated handler. |
232 | */ | 232 | */ |
233 | void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk, | 233 | void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk, |
234 | enum irq_gc_flags flags, unsigned int clr, | 234 | enum irq_gc_flags flags, unsigned int clr, |
235 | unsigned int set) | 235 | unsigned int set) |
236 | { | 236 | { |
237 | struct irq_chip_type *ct = gc->chip_types; | 237 | struct irq_chip_type *ct = gc->chip_types; |
238 | unsigned int i; | 238 | unsigned int i; |
239 | 239 | ||
240 | raw_spin_lock(&gc_lock); | 240 | raw_spin_lock(&gc_lock); |
241 | list_add_tail(&gc->list, &gc_list); | 241 | list_add_tail(&gc->list, &gc_list); |
242 | raw_spin_unlock(&gc_lock); | 242 | raw_spin_unlock(&gc_lock); |
243 | 243 | ||
244 | /* Init mask cache ? */ | 244 | /* Init mask cache ? */ |
245 | if (flags & IRQ_GC_INIT_MASK_CACHE) | 245 | if (flags & IRQ_GC_INIT_MASK_CACHE) |
246 | gc->mask_cache = irq_reg_readl(gc->reg_base + ct->regs.mask); | 246 | gc->mask_cache = irq_reg_readl(gc->reg_base + ct->regs.mask); |
247 | 247 | ||
248 | for (i = gc->irq_base; msk; msk >>= 1, i++) { | 248 | for (i = gc->irq_base; msk; msk >>= 1, i++) { |
249 | if (!msk & 0x01) | 249 | if (!(msk & 0x01)) |
250 | continue; | 250 | continue; |
251 | 251 | ||
252 | if (flags & IRQ_GC_INIT_NESTED_LOCK) | 252 | if (flags & IRQ_GC_INIT_NESTED_LOCK) |
253 | irq_set_lockdep_class(i, &irq_nested_lock_class); | 253 | irq_set_lockdep_class(i, &irq_nested_lock_class); |
254 | 254 | ||
255 | irq_set_chip_and_handler(i, &ct->chip, ct->handler); | 255 | irq_set_chip_and_handler(i, &ct->chip, ct->handler); |
256 | irq_set_chip_data(i, gc); | 256 | irq_set_chip_data(i, gc); |
257 | irq_modify_status(i, clr, set); | 257 | irq_modify_status(i, clr, set); |
258 | } | 258 | } |
259 | gc->irq_cnt = i - gc->irq_base; | 259 | gc->irq_cnt = i - gc->irq_base; |
260 | } | 260 | } |
261 | 261 | ||
262 | /** | 262 | /** |
263 | * irq_setup_alt_chip - Switch to alternative chip | 263 | * irq_setup_alt_chip - Switch to alternative chip |
264 | * @d: irq_data for this interrupt | 264 | * @d: irq_data for this interrupt |
265 | * @type Flow type to be initialized | 265 | * @type Flow type to be initialized |
266 | * | 266 | * |
267 | * Only to be called from chip->irq_set_type() callbacks. | 267 | * Only to be called from chip->irq_set_type() callbacks. |
268 | */ | 268 | */ |
269 | int irq_setup_alt_chip(struct irq_data *d, unsigned int type) | 269 | int irq_setup_alt_chip(struct irq_data *d, unsigned int type) |
270 | { | 270 | { |
271 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | 271 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); |
272 | struct irq_chip_type *ct = gc->chip_types; | 272 | struct irq_chip_type *ct = gc->chip_types; |
273 | unsigned int i; | 273 | unsigned int i; |
274 | 274 | ||
275 | for (i = 0; i < gc->num_ct; i++, ct++) { | 275 | for (i = 0; i < gc->num_ct; i++, ct++) { |
276 | if (ct->type & type) { | 276 | if (ct->type & type) { |
277 | d->chip = &ct->chip; | 277 | d->chip = &ct->chip; |
278 | irq_data_to_desc(d)->handle_irq = ct->handler; | 278 | irq_data_to_desc(d)->handle_irq = ct->handler; |
279 | return 0; | 279 | return 0; |
280 | } | 280 | } |
281 | } | 281 | } |
282 | return -EINVAL; | 282 | return -EINVAL; |
283 | } | 283 | } |
284 | 284 | ||
285 | /** | 285 | /** |
286 | * irq_remove_generic_chip - Remove a chip | 286 | * irq_remove_generic_chip - Remove a chip |
287 | * @gc: Generic irq chip holding all data | 287 | * @gc: Generic irq chip holding all data |
288 | * @msk: Bitmask holding the irqs to initialize relative to gc->irq_base | 288 | * @msk: Bitmask holding the irqs to initialize relative to gc->irq_base |
289 | * @clr: IRQ_* bits to clear | 289 | * @clr: IRQ_* bits to clear |
290 | * @set: IRQ_* bits to set | 290 | * @set: IRQ_* bits to set |
291 | * | 291 | * |
292 | * Remove up to 32 interrupts starting from gc->irq_base. | 292 | * Remove up to 32 interrupts starting from gc->irq_base. |
293 | */ | 293 | */ |
294 | void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk, | 294 | void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk, |
295 | unsigned int clr, unsigned int set) | 295 | unsigned int clr, unsigned int set) |
296 | { | 296 | { |
297 | unsigned int i = gc->irq_base; | 297 | unsigned int i = gc->irq_base; |
298 | 298 | ||
299 | raw_spin_lock(&gc_lock); | 299 | raw_spin_lock(&gc_lock); |
300 | list_del(&gc->list); | 300 | list_del(&gc->list); |
301 | raw_spin_unlock(&gc_lock); | 301 | raw_spin_unlock(&gc_lock); |
302 | 302 | ||
303 | for (; msk; msk >>= 1, i++) { | 303 | for (; msk; msk >>= 1, i++) { |
304 | if (!msk & 0x01) | 304 | if (!(msk & 0x01)) |
305 | continue; | 305 | continue; |
306 | 306 | ||
307 | /* Remove handler first. That will mask the irq line */ | 307 | /* Remove handler first. That will mask the irq line */ |
308 | irq_set_handler(i, NULL); | 308 | irq_set_handler(i, NULL); |
309 | irq_set_chip(i, &no_irq_chip); | 309 | irq_set_chip(i, &no_irq_chip); |
310 | irq_set_chip_data(i, NULL); | 310 | irq_set_chip_data(i, NULL); |
311 | irq_modify_status(i, clr, set); | 311 | irq_modify_status(i, clr, set); |
312 | } | 312 | } |
313 | } | 313 | } |
314 | 314 | ||
315 | #ifdef CONFIG_PM | 315 | #ifdef CONFIG_PM |
316 | static int irq_gc_suspend(void) | 316 | static int irq_gc_suspend(void) |
317 | { | 317 | { |
318 | struct irq_chip_generic *gc; | 318 | struct irq_chip_generic *gc; |
319 | 319 | ||
320 | list_for_each_entry(gc, &gc_list, list) { | 320 | list_for_each_entry(gc, &gc_list, list) { |
321 | struct irq_chip_type *ct = gc->chip_types; | 321 | struct irq_chip_type *ct = gc->chip_types; |
322 | 322 | ||
323 | if (ct->chip.irq_suspend) | 323 | if (ct->chip.irq_suspend) |
324 | ct->chip.irq_suspend(irq_get_irq_data(gc->irq_base)); | 324 | ct->chip.irq_suspend(irq_get_irq_data(gc->irq_base)); |
325 | } | 325 | } |
326 | return 0; | 326 | return 0; |
327 | } | 327 | } |
328 | 328 | ||
329 | static void irq_gc_resume(void) | 329 | static void irq_gc_resume(void) |
330 | { | 330 | { |
331 | struct irq_chip_generic *gc; | 331 | struct irq_chip_generic *gc; |
332 | 332 | ||
333 | list_for_each_entry(gc, &gc_list, list) { | 333 | list_for_each_entry(gc, &gc_list, list) { |
334 | struct irq_chip_type *ct = gc->chip_types; | 334 | struct irq_chip_type *ct = gc->chip_types; |
335 | 335 | ||
336 | if (ct->chip.irq_resume) | 336 | if (ct->chip.irq_resume) |
337 | ct->chip.irq_resume(irq_get_irq_data(gc->irq_base)); | 337 | ct->chip.irq_resume(irq_get_irq_data(gc->irq_base)); |
338 | } | 338 | } |
339 | } | 339 | } |
340 | #else | 340 | #else |
341 | #define irq_gc_suspend NULL | 341 | #define irq_gc_suspend NULL |
342 | #define irq_gc_resume NULL | 342 | #define irq_gc_resume NULL |
343 | #endif | 343 | #endif |
344 | 344 | ||
345 | static void irq_gc_shutdown(void) | 345 | static void irq_gc_shutdown(void) |
346 | { | 346 | { |
347 | struct irq_chip_generic *gc; | 347 | struct irq_chip_generic *gc; |
348 | 348 | ||
349 | list_for_each_entry(gc, &gc_list, list) { | 349 | list_for_each_entry(gc, &gc_list, list) { |
350 | struct irq_chip_type *ct = gc->chip_types; | 350 | struct irq_chip_type *ct = gc->chip_types; |
351 | 351 | ||
352 | if (ct->chip.irq_pm_shutdown) | 352 | if (ct->chip.irq_pm_shutdown) |
353 | ct->chip.irq_pm_shutdown(irq_get_irq_data(gc->irq_base)); | 353 | ct->chip.irq_pm_shutdown(irq_get_irq_data(gc->irq_base)); |
354 | } | 354 | } |
355 | } | 355 | } |
356 | 356 | ||
357 | static struct syscore_ops irq_gc_syscore_ops = { | 357 | static struct syscore_ops irq_gc_syscore_ops = { |
358 | .suspend = irq_gc_suspend, | 358 | .suspend = irq_gc_suspend, |
359 | .resume = irq_gc_resume, | 359 | .resume = irq_gc_resume, |
360 | .shutdown = irq_gc_shutdown, | 360 | .shutdown = irq_gc_shutdown, |
361 | }; | 361 | }; |
362 | 362 | ||
363 | static int __init irq_gc_init_ops(void) | 363 | static int __init irq_gc_init_ops(void) |
364 | { | 364 | { |
365 | register_syscore_ops(&irq_gc_syscore_ops); | 365 | register_syscore_ops(&irq_gc_syscore_ops); |
366 | return 0; | 366 | return 0; |
367 | } | 367 | } |
368 | device_initcall(irq_gc_init_ops); | 368 | device_initcall(irq_gc_init_ops); |
369 | 369 |
kernel/irq/irqdesc.c
1 | /* | 1 | /* |
2 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar | 2 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar |
3 | * Copyright (C) 2005-2006, Thomas Gleixner, Russell King | 3 | * Copyright (C) 2005-2006, Thomas Gleixner, Russell King |
4 | * | 4 | * |
5 | * This file contains the interrupt descriptor management code | 5 | * This file contains the interrupt descriptor management code |
6 | * | 6 | * |
7 | * Detailed information is available in Documentation/DocBook/genericirq | 7 | * Detailed information is available in Documentation/DocBook/genericirq |
8 | * | 8 | * |
9 | */ | 9 | */ |
10 | #include <linux/irq.h> | 10 | #include <linux/irq.h> |
11 | #include <linux/slab.h> | 11 | #include <linux/slab.h> |
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/interrupt.h> | 13 | #include <linux/interrupt.h> |
14 | #include <linux/kernel_stat.h> | 14 | #include <linux/kernel_stat.h> |
15 | #include <linux/radix-tree.h> | 15 | #include <linux/radix-tree.h> |
16 | #include <linux/bitmap.h> | 16 | #include <linux/bitmap.h> |
17 | 17 | ||
18 | #include "internals.h" | 18 | #include "internals.h" |
19 | 19 | ||
20 | /* | 20 | /* |
21 | * lockdep: we want to handle all irq_desc locks as a single lock-class: | 21 | * lockdep: we want to handle all irq_desc locks as a single lock-class: |
22 | */ | 22 | */ |
23 | static struct lock_class_key irq_desc_lock_class; | 23 | static struct lock_class_key irq_desc_lock_class; |
24 | 24 | ||
25 | #if defined(CONFIG_SMP) | 25 | #if defined(CONFIG_SMP) |
26 | static void __init init_irq_default_affinity(void) | 26 | static void __init init_irq_default_affinity(void) |
27 | { | 27 | { |
28 | alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); | 28 | alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); |
29 | cpumask_setall(irq_default_affinity); | 29 | cpumask_setall(irq_default_affinity); |
30 | } | 30 | } |
31 | #else | 31 | #else |
32 | static void __init init_irq_default_affinity(void) | 32 | static void __init init_irq_default_affinity(void) |
33 | { | 33 | { |
34 | } | 34 | } |
35 | #endif | 35 | #endif |
36 | 36 | ||
37 | #ifdef CONFIG_SMP | 37 | #ifdef CONFIG_SMP |
38 | static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) | 38 | static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) |
39 | { | 39 | { |
40 | if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node)) | 40 | if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node)) |
41 | return -ENOMEM; | 41 | return -ENOMEM; |
42 | 42 | ||
43 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 43 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
44 | if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { | 44 | if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { |
45 | free_cpumask_var(desc->irq_data.affinity); | 45 | free_cpumask_var(desc->irq_data.affinity); |
46 | return -ENOMEM; | 46 | return -ENOMEM; |
47 | } | 47 | } |
48 | #endif | 48 | #endif |
49 | return 0; | 49 | return 0; |
50 | } | 50 | } |
51 | 51 | ||
52 | static void desc_smp_init(struct irq_desc *desc, int node) | 52 | static void desc_smp_init(struct irq_desc *desc, int node) |
53 | { | 53 | { |
54 | desc->irq_data.node = node; | 54 | desc->irq_data.node = node; |
55 | cpumask_copy(desc->irq_data.affinity, irq_default_affinity); | 55 | cpumask_copy(desc->irq_data.affinity, irq_default_affinity); |
56 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 56 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
57 | cpumask_clear(desc->pending_mask); | 57 | cpumask_clear(desc->pending_mask); |
58 | #endif | 58 | #endif |
59 | } | 59 | } |
60 | 60 | ||
61 | static inline int desc_node(struct irq_desc *desc) | 61 | static inline int desc_node(struct irq_desc *desc) |
62 | { | 62 | { |
63 | return desc->irq_data.node; | 63 | return desc->irq_data.node; |
64 | } | 64 | } |
65 | 65 | ||
66 | #else | 66 | #else |
67 | static inline int | 67 | static inline int |
68 | alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; } | 68 | alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; } |
69 | static inline void desc_smp_init(struct irq_desc *desc, int node) { } | 69 | static inline void desc_smp_init(struct irq_desc *desc, int node) { } |
70 | static inline int desc_node(struct irq_desc *desc) { return 0; } | 70 | static inline int desc_node(struct irq_desc *desc) { return 0; } |
71 | #endif | 71 | #endif |
72 | 72 | ||
73 | static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node) | 73 | static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, |
74 | struct module *owner) | ||
74 | { | 75 | { |
75 | int cpu; | 76 | int cpu; |
76 | 77 | ||
77 | desc->irq_data.irq = irq; | 78 | desc->irq_data.irq = irq; |
78 | desc->irq_data.chip = &no_irq_chip; | 79 | desc->irq_data.chip = &no_irq_chip; |
79 | desc->irq_data.chip_data = NULL; | 80 | desc->irq_data.chip_data = NULL; |
80 | desc->irq_data.handler_data = NULL; | 81 | desc->irq_data.handler_data = NULL; |
81 | desc->irq_data.msi_desc = NULL; | 82 | desc->irq_data.msi_desc = NULL; |
82 | irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); | 83 | irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); |
83 | irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); | 84 | irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); |
84 | desc->handle_irq = handle_bad_irq; | 85 | desc->handle_irq = handle_bad_irq; |
85 | desc->depth = 1; | 86 | desc->depth = 1; |
86 | desc->irq_count = 0; | 87 | desc->irq_count = 0; |
87 | desc->irqs_unhandled = 0; | 88 | desc->irqs_unhandled = 0; |
88 | desc->name = NULL; | 89 | desc->name = NULL; |
90 | desc->owner = owner; | ||
89 | for_each_possible_cpu(cpu) | 91 | for_each_possible_cpu(cpu) |
90 | *per_cpu_ptr(desc->kstat_irqs, cpu) = 0; | 92 | *per_cpu_ptr(desc->kstat_irqs, cpu) = 0; |
91 | desc_smp_init(desc, node); | 93 | desc_smp_init(desc, node); |
92 | } | 94 | } |
93 | 95 | ||
94 | int nr_irqs = NR_IRQS; | 96 | int nr_irqs = NR_IRQS; |
95 | EXPORT_SYMBOL_GPL(nr_irqs); | 97 | EXPORT_SYMBOL_GPL(nr_irqs); |
96 | 98 | ||
97 | static DEFINE_MUTEX(sparse_irq_lock); | 99 | static DEFINE_MUTEX(sparse_irq_lock); |
98 | static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS); | 100 | static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS); |
99 | 101 | ||
100 | #ifdef CONFIG_SPARSE_IRQ | 102 | #ifdef CONFIG_SPARSE_IRQ |
101 | 103 | ||
102 | static RADIX_TREE(irq_desc_tree, GFP_KERNEL); | 104 | static RADIX_TREE(irq_desc_tree, GFP_KERNEL); |
103 | 105 | ||
104 | static void irq_insert_desc(unsigned int irq, struct irq_desc *desc) | 106 | static void irq_insert_desc(unsigned int irq, struct irq_desc *desc) |
105 | { | 107 | { |
106 | radix_tree_insert(&irq_desc_tree, irq, desc); | 108 | radix_tree_insert(&irq_desc_tree, irq, desc); |
107 | } | 109 | } |
108 | 110 | ||
109 | struct irq_desc *irq_to_desc(unsigned int irq) | 111 | struct irq_desc *irq_to_desc(unsigned int irq) |
110 | { | 112 | { |
111 | return radix_tree_lookup(&irq_desc_tree, irq); | 113 | return radix_tree_lookup(&irq_desc_tree, irq); |
112 | } | 114 | } |
113 | 115 | ||
114 | static void delete_irq_desc(unsigned int irq) | 116 | static void delete_irq_desc(unsigned int irq) |
115 | { | 117 | { |
116 | radix_tree_delete(&irq_desc_tree, irq); | 118 | radix_tree_delete(&irq_desc_tree, irq); |
117 | } | 119 | } |
118 | 120 | ||
119 | #ifdef CONFIG_SMP | 121 | #ifdef CONFIG_SMP |
120 | static void free_masks(struct irq_desc *desc) | 122 | static void free_masks(struct irq_desc *desc) |
121 | { | 123 | { |
122 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 124 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
123 | free_cpumask_var(desc->pending_mask); | 125 | free_cpumask_var(desc->pending_mask); |
124 | #endif | 126 | #endif |
125 | free_cpumask_var(desc->irq_data.affinity); | 127 | free_cpumask_var(desc->irq_data.affinity); |
126 | } | 128 | } |
127 | #else | 129 | #else |
128 | static inline void free_masks(struct irq_desc *desc) { } | 130 | static inline void free_masks(struct irq_desc *desc) { } |
129 | #endif | 131 | #endif |
130 | 132 | ||
131 | static struct irq_desc *alloc_desc(int irq, int node) | 133 | static struct irq_desc *alloc_desc(int irq, int node, struct module *owner) |
132 | { | 134 | { |
133 | struct irq_desc *desc; | 135 | struct irq_desc *desc; |
134 | gfp_t gfp = GFP_KERNEL; | 136 | gfp_t gfp = GFP_KERNEL; |
135 | 137 | ||
136 | desc = kzalloc_node(sizeof(*desc), gfp, node); | 138 | desc = kzalloc_node(sizeof(*desc), gfp, node); |
137 | if (!desc) | 139 | if (!desc) |
138 | return NULL; | 140 | return NULL; |
139 | /* allocate based on nr_cpu_ids */ | 141 | /* allocate based on nr_cpu_ids */ |
140 | desc->kstat_irqs = alloc_percpu(unsigned int); | 142 | desc->kstat_irqs = alloc_percpu(unsigned int); |
141 | if (!desc->kstat_irqs) | 143 | if (!desc->kstat_irqs) |
142 | goto err_desc; | 144 | goto err_desc; |
143 | 145 | ||
144 | if (alloc_masks(desc, gfp, node)) | 146 | if (alloc_masks(desc, gfp, node)) |
145 | goto err_kstat; | 147 | goto err_kstat; |
146 | 148 | ||
147 | raw_spin_lock_init(&desc->lock); | 149 | raw_spin_lock_init(&desc->lock); |
148 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | 150 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); |
149 | 151 | ||
150 | desc_set_defaults(irq, desc, node); | 152 | desc_set_defaults(irq, desc, node, owner); |
151 | 153 | ||
152 | return desc; | 154 | return desc; |
153 | 155 | ||
154 | err_kstat: | 156 | err_kstat: |
155 | free_percpu(desc->kstat_irqs); | 157 | free_percpu(desc->kstat_irqs); |
156 | err_desc: | 158 | err_desc: |
157 | kfree(desc); | 159 | kfree(desc); |
158 | return NULL; | 160 | return NULL; |
159 | } | 161 | } |
160 | 162 | ||
161 | static void free_desc(unsigned int irq) | 163 | static void free_desc(unsigned int irq) |
162 | { | 164 | { |
163 | struct irq_desc *desc = irq_to_desc(irq); | 165 | struct irq_desc *desc = irq_to_desc(irq); |
164 | 166 | ||
165 | unregister_irq_proc(irq, desc); | 167 | unregister_irq_proc(irq, desc); |
166 | 168 | ||
167 | mutex_lock(&sparse_irq_lock); | 169 | mutex_lock(&sparse_irq_lock); |
168 | delete_irq_desc(irq); | 170 | delete_irq_desc(irq); |
169 | mutex_unlock(&sparse_irq_lock); | 171 | mutex_unlock(&sparse_irq_lock); |
170 | 172 | ||
171 | free_masks(desc); | 173 | free_masks(desc); |
172 | free_percpu(desc->kstat_irqs); | 174 | free_percpu(desc->kstat_irqs); |
173 | kfree(desc); | 175 | kfree(desc); |
174 | } | 176 | } |
175 | 177 | ||
176 | static int alloc_descs(unsigned int start, unsigned int cnt, int node) | 178 | static int alloc_descs(unsigned int start, unsigned int cnt, int node, |
179 | struct module *owner) | ||
177 | { | 180 | { |
178 | struct irq_desc *desc; | 181 | struct irq_desc *desc; |
179 | int i; | 182 | int i; |
180 | 183 | ||
181 | for (i = 0; i < cnt; i++) { | 184 | for (i = 0; i < cnt; i++) { |
182 | desc = alloc_desc(start + i, node); | 185 | desc = alloc_desc(start + i, node, owner); |
183 | if (!desc) | 186 | if (!desc) |
184 | goto err; | 187 | goto err; |
185 | mutex_lock(&sparse_irq_lock); | 188 | mutex_lock(&sparse_irq_lock); |
186 | irq_insert_desc(start + i, desc); | 189 | irq_insert_desc(start + i, desc); |
187 | mutex_unlock(&sparse_irq_lock); | 190 | mutex_unlock(&sparse_irq_lock); |
188 | } | 191 | } |
189 | return start; | 192 | return start; |
190 | 193 | ||
191 | err: | 194 | err: |
192 | for (i--; i >= 0; i--) | 195 | for (i--; i >= 0; i--) |
193 | free_desc(start + i); | 196 | free_desc(start + i); |
194 | 197 | ||
195 | mutex_lock(&sparse_irq_lock); | 198 | mutex_lock(&sparse_irq_lock); |
196 | bitmap_clear(allocated_irqs, start, cnt); | 199 | bitmap_clear(allocated_irqs, start, cnt); |
197 | mutex_unlock(&sparse_irq_lock); | 200 | mutex_unlock(&sparse_irq_lock); |
198 | return -ENOMEM; | 201 | return -ENOMEM; |
199 | } | 202 | } |
200 | 203 | ||
201 | static int irq_expand_nr_irqs(unsigned int nr) | 204 | static int irq_expand_nr_irqs(unsigned int nr) |
202 | { | 205 | { |
203 | if (nr > IRQ_BITMAP_BITS) | 206 | if (nr > IRQ_BITMAP_BITS) |
204 | return -ENOMEM; | 207 | return -ENOMEM; |
205 | nr_irqs = nr; | 208 | nr_irqs = nr; |
206 | return 0; | 209 | return 0; |
207 | } | 210 | } |
208 | 211 | ||
209 | int __init early_irq_init(void) | 212 | int __init early_irq_init(void) |
210 | { | 213 | { |
211 | int i, initcnt, node = first_online_node; | 214 | int i, initcnt, node = first_online_node; |
212 | struct irq_desc *desc; | 215 | struct irq_desc *desc; |
213 | 216 | ||
214 | init_irq_default_affinity(); | 217 | init_irq_default_affinity(); |
215 | 218 | ||
216 | /* Let arch update nr_irqs and return the nr of preallocated irqs */ | 219 | /* Let arch update nr_irqs and return the nr of preallocated irqs */ |
217 | initcnt = arch_probe_nr_irqs(); | 220 | initcnt = arch_probe_nr_irqs(); |
218 | printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt); | 221 | printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt); |
219 | 222 | ||
220 | if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS)) | 223 | if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS)) |
221 | nr_irqs = IRQ_BITMAP_BITS; | 224 | nr_irqs = IRQ_BITMAP_BITS; |
222 | 225 | ||
223 | if (WARN_ON(initcnt > IRQ_BITMAP_BITS)) | 226 | if (WARN_ON(initcnt > IRQ_BITMAP_BITS)) |
224 | initcnt = IRQ_BITMAP_BITS; | 227 | initcnt = IRQ_BITMAP_BITS; |
225 | 228 | ||
226 | if (initcnt > nr_irqs) | 229 | if (initcnt > nr_irqs) |
227 | nr_irqs = initcnt; | 230 | nr_irqs = initcnt; |
228 | 231 | ||
229 | for (i = 0; i < initcnt; i++) { | 232 | for (i = 0; i < initcnt; i++) { |
230 | desc = alloc_desc(i, node); | 233 | desc = alloc_desc(i, node, NULL); |
231 | set_bit(i, allocated_irqs); | 234 | set_bit(i, allocated_irqs); |
232 | irq_insert_desc(i, desc); | 235 | irq_insert_desc(i, desc); |
233 | } | 236 | } |
234 | return arch_early_irq_init(); | 237 | return arch_early_irq_init(); |
235 | } | 238 | } |
236 | 239 | ||
237 | #else /* !CONFIG_SPARSE_IRQ */ | 240 | #else /* !CONFIG_SPARSE_IRQ */ |
238 | 241 | ||
239 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { | 242 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { |
240 | [0 ... NR_IRQS-1] = { | 243 | [0 ... NR_IRQS-1] = { |
241 | .handle_irq = handle_bad_irq, | 244 | .handle_irq = handle_bad_irq, |
242 | .depth = 1, | 245 | .depth = 1, |
243 | .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), | 246 | .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), |
244 | } | 247 | } |
245 | }; | 248 | }; |
246 | 249 | ||
247 | int __init early_irq_init(void) | 250 | int __init early_irq_init(void) |
248 | { | 251 | { |
249 | int count, i, node = first_online_node; | 252 | int count, i, node = first_online_node; |
250 | struct irq_desc *desc; | 253 | struct irq_desc *desc; |
251 | 254 | ||
252 | init_irq_default_affinity(); | 255 | init_irq_default_affinity(); |
253 | 256 | ||
254 | printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS); | 257 | printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS); |
255 | 258 | ||
256 | desc = irq_desc; | 259 | desc = irq_desc; |
257 | count = ARRAY_SIZE(irq_desc); | 260 | count = ARRAY_SIZE(irq_desc); |
258 | 261 | ||
259 | for (i = 0; i < count; i++) { | 262 | for (i = 0; i < count; i++) { |
260 | desc[i].kstat_irqs = alloc_percpu(unsigned int); | 263 | desc[i].kstat_irqs = alloc_percpu(unsigned int); |
261 | alloc_masks(&desc[i], GFP_KERNEL, node); | 264 | alloc_masks(&desc[i], GFP_KERNEL, node); |
262 | raw_spin_lock_init(&desc[i].lock); | 265 | raw_spin_lock_init(&desc[i].lock); |
263 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); | 266 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); |
264 | desc_set_defaults(i, &desc[i], node); | 267 | desc_set_defaults(i, &desc[i], node, NULL); |
265 | } | 268 | } |
266 | return arch_early_irq_init(); | 269 | return arch_early_irq_init(); |
267 | } | 270 | } |
268 | 271 | ||
269 | struct irq_desc *irq_to_desc(unsigned int irq) | 272 | struct irq_desc *irq_to_desc(unsigned int irq) |
270 | { | 273 | { |
271 | return (irq < NR_IRQS) ? irq_desc + irq : NULL; | 274 | return (irq < NR_IRQS) ? irq_desc + irq : NULL; |
272 | } | 275 | } |
273 | 276 | ||
274 | static void free_desc(unsigned int irq) | 277 | static void free_desc(unsigned int irq) |
275 | { | 278 | { |
276 | dynamic_irq_cleanup(irq); | 279 | dynamic_irq_cleanup(irq); |
277 | } | 280 | } |
278 | 281 | ||
279 | static inline int alloc_descs(unsigned int start, unsigned int cnt, int node) | 282 | static inline int alloc_descs(unsigned int start, unsigned int cnt, int node, |
283 | struct module *owner) | ||
280 | { | 284 | { |
285 | u32 i; | ||
286 | |||
287 | for (i = 0; i < cnt; i++) { | ||
288 | struct irq_desc *desc = irq_to_desc(start + i); | ||
289 | |||
290 | desc->owner = owner; | ||
291 | } | ||
281 | return start; | 292 | return start; |
282 | } | 293 | } |
283 | 294 | ||
284 | static int irq_expand_nr_irqs(unsigned int nr) | 295 | static int irq_expand_nr_irqs(unsigned int nr) |
285 | { | 296 | { |
286 | return -ENOMEM; | 297 | return -ENOMEM; |
287 | } | 298 | } |
288 | 299 | ||
289 | #endif /* !CONFIG_SPARSE_IRQ */ | 300 | #endif /* !CONFIG_SPARSE_IRQ */ |
290 | 301 | ||
291 | /** | 302 | /** |
292 | * generic_handle_irq - Invoke the handler for a particular irq | 303 | * generic_handle_irq - Invoke the handler for a particular irq |
293 | * @irq: The irq number to handle | 304 | * @irq: The irq number to handle |
294 | * | 305 | * |
295 | */ | 306 | */ |
296 | int generic_handle_irq(unsigned int irq) | 307 | int generic_handle_irq(unsigned int irq) |
297 | { | 308 | { |
298 | struct irq_desc *desc = irq_to_desc(irq); | 309 | struct irq_desc *desc = irq_to_desc(irq); |
299 | 310 | ||
300 | if (!desc) | 311 | if (!desc) |
301 | return -EINVAL; | 312 | return -EINVAL; |
302 | generic_handle_irq_desc(irq, desc); | 313 | generic_handle_irq_desc(irq, desc); |
303 | return 0; | 314 | return 0; |
304 | } | 315 | } |
305 | EXPORT_SYMBOL_GPL(generic_handle_irq); | 316 | EXPORT_SYMBOL_GPL(generic_handle_irq); |
306 | 317 | ||
307 | /* Dynamic interrupt handling */ | 318 | /* Dynamic interrupt handling */ |
308 | 319 | ||
309 | /** | 320 | /** |
310 | * irq_free_descs - free irq descriptors | 321 | * irq_free_descs - free irq descriptors |
311 | * @from: Start of descriptor range | 322 | * @from: Start of descriptor range |
312 | * @cnt: Number of consecutive irqs to free | 323 | * @cnt: Number of consecutive irqs to free |
313 | */ | 324 | */ |
314 | void irq_free_descs(unsigned int from, unsigned int cnt) | 325 | void irq_free_descs(unsigned int from, unsigned int cnt) |
315 | { | 326 | { |
316 | int i; | 327 | int i; |
317 | 328 | ||
318 | if (from >= nr_irqs || (from + cnt) > nr_irqs) | 329 | if (from >= nr_irqs || (from + cnt) > nr_irqs) |
319 | return; | 330 | return; |
320 | 331 | ||
321 | for (i = 0; i < cnt; i++) | 332 | for (i = 0; i < cnt; i++) |
322 | free_desc(from + i); | 333 | free_desc(from + i); |
323 | 334 | ||
324 | mutex_lock(&sparse_irq_lock); | 335 | mutex_lock(&sparse_irq_lock); |
325 | bitmap_clear(allocated_irqs, from, cnt); | 336 | bitmap_clear(allocated_irqs, from, cnt); |
326 | mutex_unlock(&sparse_irq_lock); | 337 | mutex_unlock(&sparse_irq_lock); |
327 | } | 338 | } |
328 | EXPORT_SYMBOL_GPL(irq_free_descs); | 339 | EXPORT_SYMBOL_GPL(irq_free_descs); |
329 | 340 | ||
330 | /** | 341 | /** |
331 | * irq_alloc_descs - allocate and initialize a range of irq descriptors | 342 | * irq_alloc_descs - allocate and initialize a range of irq descriptors |
332 | * @irq: Allocate for specific irq number if irq >= 0 | 343 | * @irq: Allocate for specific irq number if irq >= 0 |
333 | * @from: Start the search from this irq number | 344 | * @from: Start the search from this irq number |
334 | * @cnt: Number of consecutive irqs to allocate. | 345 | * @cnt: Number of consecutive irqs to allocate. |
335 | * @node: Preferred node on which the irq descriptor should be allocated | 346 | * @node: Preferred node on which the irq descriptor should be allocated |
336 | * | 347 | * |
337 | * Returns the first irq number or error code | 348 | * Returns the first irq number or error code |
338 | */ | 349 | */ |
339 | int __ref | 350 | int __ref |
340 | irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node) | 351 | __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, |
352 | struct module *owner) | ||
341 | { | 353 | { |
342 | int start, ret; | 354 | int start, ret; |
343 | 355 | ||
344 | if (!cnt) | 356 | if (!cnt) |
345 | return -EINVAL; | 357 | return -EINVAL; |
346 | 358 | ||
347 | if (irq >= 0) { | 359 | if (irq >= 0) { |
348 | if (from > irq) | 360 | if (from > irq) |
349 | return -EINVAL; | 361 | return -EINVAL; |
350 | from = irq; | 362 | from = irq; |
351 | } | 363 | } |
352 | 364 | ||
353 | mutex_lock(&sparse_irq_lock); | 365 | mutex_lock(&sparse_irq_lock); |
354 | 366 | ||
355 | start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS, | 367 | start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS, |
356 | from, cnt, 0); | 368 | from, cnt, 0); |
357 | ret = -EEXIST; | 369 | ret = -EEXIST; |
358 | if (irq >=0 && start != irq) | 370 | if (irq >=0 && start != irq) |
359 | goto err; | 371 | goto err; |
360 | 372 | ||
361 | if (start + cnt > nr_irqs) { | 373 | if (start + cnt > nr_irqs) { |
362 | ret = irq_expand_nr_irqs(start + cnt); | 374 | ret = irq_expand_nr_irqs(start + cnt); |
363 | if (ret) | 375 | if (ret) |
364 | goto err; | 376 | goto err; |
365 | } | 377 | } |
366 | 378 | ||
367 | bitmap_set(allocated_irqs, start, cnt); | 379 | bitmap_set(allocated_irqs, start, cnt); |
368 | mutex_unlock(&sparse_irq_lock); | 380 | mutex_unlock(&sparse_irq_lock); |
369 | return alloc_descs(start, cnt, node); | 381 | return alloc_descs(start, cnt, node, owner); |
370 | 382 | ||
371 | err: | 383 | err: |
372 | mutex_unlock(&sparse_irq_lock); | 384 | mutex_unlock(&sparse_irq_lock); |
373 | return ret; | 385 | return ret; |
374 | } | 386 | } |
375 | EXPORT_SYMBOL_GPL(irq_alloc_descs); | 387 | EXPORT_SYMBOL_GPL(__irq_alloc_descs); |
376 | 388 | ||
377 | /** | 389 | /** |
378 | * irq_reserve_irqs - mark irqs allocated | 390 | * irq_reserve_irqs - mark irqs allocated |
379 | * @from: mark from irq number | 391 | * @from: mark from irq number |
380 | * @cnt: number of irqs to mark | 392 | * @cnt: number of irqs to mark |
381 | * | 393 | * |
382 | * Returns 0 on success or an appropriate error code | 394 | * Returns 0 on success or an appropriate error code |
383 | */ | 395 | */ |
384 | int irq_reserve_irqs(unsigned int from, unsigned int cnt) | 396 | int irq_reserve_irqs(unsigned int from, unsigned int cnt) |
385 | { | 397 | { |
386 | unsigned int start; | 398 | unsigned int start; |
387 | int ret = 0; | 399 | int ret = 0; |
388 | 400 | ||
389 | if (!cnt || (from + cnt) > nr_irqs) | 401 | if (!cnt || (from + cnt) > nr_irqs) |
390 | return -EINVAL; | 402 | return -EINVAL; |
391 | 403 | ||
392 | mutex_lock(&sparse_irq_lock); | 404 | mutex_lock(&sparse_irq_lock); |
393 | start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0); | 405 | start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0); |
394 | if (start == from) | 406 | if (start == from) |
395 | bitmap_set(allocated_irqs, start, cnt); | 407 | bitmap_set(allocated_irqs, start, cnt); |
396 | else | 408 | else |
397 | ret = -EEXIST; | 409 | ret = -EEXIST; |
398 | mutex_unlock(&sparse_irq_lock); | 410 | mutex_unlock(&sparse_irq_lock); |
399 | return ret; | 411 | return ret; |
400 | } | 412 | } |
401 | 413 | ||
402 | /** | 414 | /** |
403 | * irq_get_next_irq - get next allocated irq number | 415 | * irq_get_next_irq - get next allocated irq number |
404 | * @offset: where to start the search | 416 | * @offset: where to start the search |
405 | * | 417 | * |
406 | * Returns next irq number after offset or nr_irqs if none is found. | 418 | * Returns next irq number after offset or nr_irqs if none is found. |
407 | */ | 419 | */ |
408 | unsigned int irq_get_next_irq(unsigned int offset) | 420 | unsigned int irq_get_next_irq(unsigned int offset) |
409 | { | 421 | { |
410 | return find_next_bit(allocated_irqs, nr_irqs, offset); | 422 | return find_next_bit(allocated_irqs, nr_irqs, offset); |
411 | } | 423 | } |
412 | 424 | ||
413 | struct irq_desc * | 425 | struct irq_desc * |
414 | __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus) | 426 | __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus) |
415 | { | 427 | { |
416 | struct irq_desc *desc = irq_to_desc(irq); | 428 | struct irq_desc *desc = irq_to_desc(irq); |
417 | 429 | ||
418 | if (desc) { | 430 | if (desc) { |
419 | if (bus) | 431 | if (bus) |
420 | chip_bus_lock(desc); | 432 | chip_bus_lock(desc); |
421 | raw_spin_lock_irqsave(&desc->lock, *flags); | 433 | raw_spin_lock_irqsave(&desc->lock, *flags); |
422 | } | 434 | } |
423 | return desc; | 435 | return desc; |
424 | } | 436 | } |
425 | 437 | ||
426 | void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus) | 438 | void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus) |
427 | { | 439 | { |
428 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 440 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
429 | if (bus) | 441 | if (bus) |
430 | chip_bus_sync_unlock(desc); | 442 | chip_bus_sync_unlock(desc); |
431 | } | 443 | } |
432 | 444 | ||
433 | /** | 445 | /** |
434 | * dynamic_irq_cleanup - cleanup a dynamically allocated irq | 446 | * dynamic_irq_cleanup - cleanup a dynamically allocated irq |
435 | * @irq: irq number to initialize | 447 | * @irq: irq number to initialize |
436 | */ | 448 | */ |
437 | void dynamic_irq_cleanup(unsigned int irq) | 449 | void dynamic_irq_cleanup(unsigned int irq) |
438 | { | 450 | { |
439 | struct irq_desc *desc = irq_to_desc(irq); | 451 | struct irq_desc *desc = irq_to_desc(irq); |
440 | unsigned long flags; | 452 | unsigned long flags; |
441 | 453 | ||
442 | raw_spin_lock_irqsave(&desc->lock, flags); | 454 | raw_spin_lock_irqsave(&desc->lock, flags); |
443 | desc_set_defaults(irq, desc, desc_node(desc)); | 455 | desc_set_defaults(irq, desc, desc_node(desc), NULL); |
444 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 456 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
445 | } | 457 | } |
446 | 458 | ||
447 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) | 459 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) |
448 | { | 460 | { |
449 | struct irq_desc *desc = irq_to_desc(irq); | 461 | struct irq_desc *desc = irq_to_desc(irq); |
450 | 462 | ||
451 | return desc && desc->kstat_irqs ? | 463 | return desc && desc->kstat_irqs ? |
452 | *per_cpu_ptr(desc->kstat_irqs, cpu) : 0; | 464 | *per_cpu_ptr(desc->kstat_irqs, cpu) : 0; |
453 | } | 465 | } |
454 | 466 | ||
455 | unsigned int kstat_irqs(unsigned int irq) | 467 | unsigned int kstat_irqs(unsigned int irq) |
456 | { | 468 | { |
457 | struct irq_desc *desc = irq_to_desc(irq); | 469 | struct irq_desc *desc = irq_to_desc(irq); |
458 | int cpu; | 470 | int cpu; |
459 | int sum = 0; | 471 | int sum = 0; |
460 | 472 | ||
461 | if (!desc || !desc->kstat_irqs) | 473 | if (!desc || !desc->kstat_irqs) |
462 | return 0; | 474 | return 0; |
463 | for_each_possible_cpu(cpu) | 475 | for_each_possible_cpu(cpu) |
464 | sum += *per_cpu_ptr(desc->kstat_irqs, cpu); | 476 | sum += *per_cpu_ptr(desc->kstat_irqs, cpu); |
465 | return sum; | 477 | return sum; |
466 | } | 478 | } |
467 | 479 |
kernel/irq/manage.c
1 | /* | 1 | /* |
2 | * linux/kernel/irq/manage.c | 2 | * linux/kernel/irq/manage.c |
3 | * | 3 | * |
4 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar | 4 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar |
5 | * Copyright (C) 2005-2006 Thomas Gleixner | 5 | * Copyright (C) 2005-2006 Thomas Gleixner |
6 | * | 6 | * |
7 | * This file contains driver APIs to the irq subsystem. | 7 | * This file contains driver APIs to the irq subsystem. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/irq.h> | 10 | #include <linux/irq.h> |
11 | #include <linux/kthread.h> | 11 | #include <linux/kthread.h> |
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/random.h> | 13 | #include <linux/random.h> |
14 | #include <linux/interrupt.h> | 14 | #include <linux/interrupt.h> |
15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/sched.h> | 16 | #include <linux/sched.h> |
17 | 17 | ||
18 | #include "internals.h" | 18 | #include "internals.h" |
19 | 19 | ||
20 | #ifdef CONFIG_IRQ_FORCED_THREADING | 20 | #ifdef CONFIG_IRQ_FORCED_THREADING |
21 | __read_mostly bool force_irqthreads; | 21 | __read_mostly bool force_irqthreads; |
22 | 22 | ||
23 | static int __init setup_forced_irqthreads(char *arg) | 23 | static int __init setup_forced_irqthreads(char *arg) |
24 | { | 24 | { |
25 | force_irqthreads = true; | 25 | force_irqthreads = true; |
26 | return 0; | 26 | return 0; |
27 | } | 27 | } |
28 | early_param("threadirqs", setup_forced_irqthreads); | 28 | early_param("threadirqs", setup_forced_irqthreads); |
29 | #endif | 29 | #endif |
30 | 30 | ||
31 | /** | 31 | /** |
32 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) | 32 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) |
33 | * @irq: interrupt number to wait for | 33 | * @irq: interrupt number to wait for |
34 | * | 34 | * |
35 | * This function waits for any pending IRQ handlers for this interrupt | 35 | * This function waits for any pending IRQ handlers for this interrupt |
36 | * to complete before returning. If you use this function while | 36 | * to complete before returning. If you use this function while |
37 | * holding a resource the IRQ handler may need you will deadlock. | 37 | * holding a resource the IRQ handler may need you will deadlock. |
38 | * | 38 | * |
39 | * This function may be called - with care - from IRQ context. | 39 | * This function may be called - with care - from IRQ context. |
40 | */ | 40 | */ |
41 | void synchronize_irq(unsigned int irq) | 41 | void synchronize_irq(unsigned int irq) |
42 | { | 42 | { |
43 | struct irq_desc *desc = irq_to_desc(irq); | 43 | struct irq_desc *desc = irq_to_desc(irq); |
44 | bool inprogress; | 44 | bool inprogress; |
45 | 45 | ||
46 | if (!desc) | 46 | if (!desc) |
47 | return; | 47 | return; |
48 | 48 | ||
49 | do { | 49 | do { |
50 | unsigned long flags; | 50 | unsigned long flags; |
51 | 51 | ||
52 | /* | 52 | /* |
53 | * Wait until we're out of the critical section. This might | 53 | * Wait until we're out of the critical section. This might |
54 | * give the wrong answer due to the lack of memory barriers. | 54 | * give the wrong answer due to the lack of memory barriers. |
55 | */ | 55 | */ |
56 | while (irqd_irq_inprogress(&desc->irq_data)) | 56 | while (irqd_irq_inprogress(&desc->irq_data)) |
57 | cpu_relax(); | 57 | cpu_relax(); |
58 | 58 | ||
59 | /* Ok, that indicated we're done: double-check carefully. */ | 59 | /* Ok, that indicated we're done: double-check carefully. */ |
60 | raw_spin_lock_irqsave(&desc->lock, flags); | 60 | raw_spin_lock_irqsave(&desc->lock, flags); |
61 | inprogress = irqd_irq_inprogress(&desc->irq_data); | 61 | inprogress = irqd_irq_inprogress(&desc->irq_data); |
62 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 62 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
63 | 63 | ||
64 | /* Oops, that failed? */ | 64 | /* Oops, that failed? */ |
65 | } while (inprogress); | 65 | } while (inprogress); |
66 | 66 | ||
67 | /* | 67 | /* |
68 | * We made sure that no hardirq handler is running. Now verify | 68 | * We made sure that no hardirq handler is running. Now verify |
69 | * that no threaded handlers are active. | 69 | * that no threaded handlers are active. |
70 | */ | 70 | */ |
71 | wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active)); | 71 | wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active)); |
72 | } | 72 | } |
73 | EXPORT_SYMBOL(synchronize_irq); | 73 | EXPORT_SYMBOL(synchronize_irq); |
74 | 74 | ||
75 | #ifdef CONFIG_SMP | 75 | #ifdef CONFIG_SMP |
76 | cpumask_var_t irq_default_affinity; | 76 | cpumask_var_t irq_default_affinity; |
77 | 77 | ||
78 | /** | 78 | /** |
79 | * irq_can_set_affinity - Check if the affinity of a given irq can be set | 79 | * irq_can_set_affinity - Check if the affinity of a given irq can be set |
80 | * @irq: Interrupt to check | 80 | * @irq: Interrupt to check |
81 | * | 81 | * |
82 | */ | 82 | */ |
83 | int irq_can_set_affinity(unsigned int irq) | 83 | int irq_can_set_affinity(unsigned int irq) |
84 | { | 84 | { |
85 | struct irq_desc *desc = irq_to_desc(irq); | 85 | struct irq_desc *desc = irq_to_desc(irq); |
86 | 86 | ||
87 | if (!desc || !irqd_can_balance(&desc->irq_data) || | 87 | if (!desc || !irqd_can_balance(&desc->irq_data) || |
88 | !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity) | 88 | !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity) |
89 | return 0; | 89 | return 0; |
90 | 90 | ||
91 | return 1; | 91 | return 1; |
92 | } | 92 | } |
93 | 93 | ||
94 | /** | 94 | /** |
95 | * irq_set_thread_affinity - Notify irq threads to adjust affinity | 95 | * irq_set_thread_affinity - Notify irq threads to adjust affinity |
96 | * @desc: irq descriptor which has affitnity changed | 96 | * @desc: irq descriptor which has affitnity changed |
97 | * | 97 | * |
98 | * We just set IRQTF_AFFINITY and delegate the affinity setting | 98 | * We just set IRQTF_AFFINITY and delegate the affinity setting |
99 | * to the interrupt thread itself. We can not call | 99 | * to the interrupt thread itself. We can not call |
100 | * set_cpus_allowed_ptr() here as we hold desc->lock and this | 100 | * set_cpus_allowed_ptr() here as we hold desc->lock and this |
101 | * code can be called from hard interrupt context. | 101 | * code can be called from hard interrupt context. |
102 | */ | 102 | */ |
103 | void irq_set_thread_affinity(struct irq_desc *desc) | 103 | void irq_set_thread_affinity(struct irq_desc *desc) |
104 | { | 104 | { |
105 | struct irqaction *action = desc->action; | 105 | struct irqaction *action = desc->action; |
106 | 106 | ||
107 | while (action) { | 107 | while (action) { |
108 | if (action->thread) | 108 | if (action->thread) |
109 | set_bit(IRQTF_AFFINITY, &action->thread_flags); | 109 | set_bit(IRQTF_AFFINITY, &action->thread_flags); |
110 | action = action->next; | 110 | action = action->next; |
111 | } | 111 | } |
112 | } | 112 | } |
113 | 113 | ||
114 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 114 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
115 | static inline bool irq_can_move_pcntxt(struct irq_data *data) | 115 | static inline bool irq_can_move_pcntxt(struct irq_data *data) |
116 | { | 116 | { |
117 | return irqd_can_move_in_process_context(data); | 117 | return irqd_can_move_in_process_context(data); |
118 | } | 118 | } |
119 | static inline bool irq_move_pending(struct irq_data *data) | 119 | static inline bool irq_move_pending(struct irq_data *data) |
120 | { | 120 | { |
121 | return irqd_is_setaffinity_pending(data); | 121 | return irqd_is_setaffinity_pending(data); |
122 | } | 122 | } |
123 | static inline void | 123 | static inline void |
124 | irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) | 124 | irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) |
125 | { | 125 | { |
126 | cpumask_copy(desc->pending_mask, mask); | 126 | cpumask_copy(desc->pending_mask, mask); |
127 | } | 127 | } |
128 | static inline void | 128 | static inline void |
129 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) | 129 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) |
130 | { | 130 | { |
131 | cpumask_copy(mask, desc->pending_mask); | 131 | cpumask_copy(mask, desc->pending_mask); |
132 | } | 132 | } |
133 | #else | 133 | #else |
134 | static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; } | 134 | static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; } |
135 | static inline bool irq_move_pending(struct irq_data *data) { return false; } | 135 | static inline bool irq_move_pending(struct irq_data *data) { return false; } |
136 | static inline void | 136 | static inline void |
137 | irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { } | 137 | irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { } |
138 | static inline void | 138 | static inline void |
139 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } | 139 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } |
140 | #endif | 140 | #endif |
141 | 141 | ||
142 | int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask) | 142 | int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask) |
143 | { | 143 | { |
144 | struct irq_chip *chip = irq_data_get_irq_chip(data); | 144 | struct irq_chip *chip = irq_data_get_irq_chip(data); |
145 | struct irq_desc *desc = irq_data_to_desc(data); | 145 | struct irq_desc *desc = irq_data_to_desc(data); |
146 | int ret = 0; | 146 | int ret = 0; |
147 | 147 | ||
148 | if (!chip || !chip->irq_set_affinity) | 148 | if (!chip || !chip->irq_set_affinity) |
149 | return -EINVAL; | 149 | return -EINVAL; |
150 | 150 | ||
151 | if (irq_can_move_pcntxt(data)) { | 151 | if (irq_can_move_pcntxt(data)) { |
152 | ret = chip->irq_set_affinity(data, mask, false); | 152 | ret = chip->irq_set_affinity(data, mask, false); |
153 | switch (ret) { | 153 | switch (ret) { |
154 | case IRQ_SET_MASK_OK: | 154 | case IRQ_SET_MASK_OK: |
155 | cpumask_copy(data->affinity, mask); | 155 | cpumask_copy(data->affinity, mask); |
156 | case IRQ_SET_MASK_OK_NOCOPY: | 156 | case IRQ_SET_MASK_OK_NOCOPY: |
157 | irq_set_thread_affinity(desc); | 157 | irq_set_thread_affinity(desc); |
158 | ret = 0; | 158 | ret = 0; |
159 | } | 159 | } |
160 | } else { | 160 | } else { |
161 | irqd_set_move_pending(data); | 161 | irqd_set_move_pending(data); |
162 | irq_copy_pending(desc, mask); | 162 | irq_copy_pending(desc, mask); |
163 | } | 163 | } |
164 | 164 | ||
165 | if (desc->affinity_notify) { | 165 | if (desc->affinity_notify) { |
166 | kref_get(&desc->affinity_notify->kref); | 166 | kref_get(&desc->affinity_notify->kref); |
167 | schedule_work(&desc->affinity_notify->work); | 167 | schedule_work(&desc->affinity_notify->work); |
168 | } | 168 | } |
169 | irqd_set(data, IRQD_AFFINITY_SET); | 169 | irqd_set(data, IRQD_AFFINITY_SET); |
170 | 170 | ||
171 | return ret; | 171 | return ret; |
172 | } | 172 | } |
173 | 173 | ||
174 | /** | 174 | /** |
175 | * irq_set_affinity - Set the irq affinity of a given irq | 175 | * irq_set_affinity - Set the irq affinity of a given irq |
176 | * @irq: Interrupt to set affinity | 176 | * @irq: Interrupt to set affinity |
177 | * @mask: cpumask | 177 | * @mask: cpumask |
178 | * | 178 | * |
179 | */ | 179 | */ |
180 | int irq_set_affinity(unsigned int irq, const struct cpumask *mask) | 180 | int irq_set_affinity(unsigned int irq, const struct cpumask *mask) |
181 | { | 181 | { |
182 | struct irq_desc *desc = irq_to_desc(irq); | 182 | struct irq_desc *desc = irq_to_desc(irq); |
183 | unsigned long flags; | 183 | unsigned long flags; |
184 | int ret; | 184 | int ret; |
185 | 185 | ||
186 | if (!desc) | 186 | if (!desc) |
187 | return -EINVAL; | 187 | return -EINVAL; |
188 | 188 | ||
189 | raw_spin_lock_irqsave(&desc->lock, flags); | 189 | raw_spin_lock_irqsave(&desc->lock, flags); |
190 | ret = __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask); | 190 | ret = __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask); |
191 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 191 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
192 | return ret; | 192 | return ret; |
193 | } | 193 | } |
194 | 194 | ||
195 | int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) | 195 | int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) |
196 | { | 196 | { |
197 | unsigned long flags; | 197 | unsigned long flags; |
198 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags); | 198 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags); |
199 | 199 | ||
200 | if (!desc) | 200 | if (!desc) |
201 | return -EINVAL; | 201 | return -EINVAL; |
202 | desc->affinity_hint = m; | 202 | desc->affinity_hint = m; |
203 | irq_put_desc_unlock(desc, flags); | 203 | irq_put_desc_unlock(desc, flags); |
204 | return 0; | 204 | return 0; |
205 | } | 205 | } |
206 | EXPORT_SYMBOL_GPL(irq_set_affinity_hint); | 206 | EXPORT_SYMBOL_GPL(irq_set_affinity_hint); |
207 | 207 | ||
208 | static void irq_affinity_notify(struct work_struct *work) | 208 | static void irq_affinity_notify(struct work_struct *work) |
209 | { | 209 | { |
210 | struct irq_affinity_notify *notify = | 210 | struct irq_affinity_notify *notify = |
211 | container_of(work, struct irq_affinity_notify, work); | 211 | container_of(work, struct irq_affinity_notify, work); |
212 | struct irq_desc *desc = irq_to_desc(notify->irq); | 212 | struct irq_desc *desc = irq_to_desc(notify->irq); |
213 | cpumask_var_t cpumask; | 213 | cpumask_var_t cpumask; |
214 | unsigned long flags; | 214 | unsigned long flags; |
215 | 215 | ||
216 | if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL)) | 216 | if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL)) |
217 | goto out; | 217 | goto out; |
218 | 218 | ||
219 | raw_spin_lock_irqsave(&desc->lock, flags); | 219 | raw_spin_lock_irqsave(&desc->lock, flags); |
220 | if (irq_move_pending(&desc->irq_data)) | 220 | if (irq_move_pending(&desc->irq_data)) |
221 | irq_get_pending(cpumask, desc); | 221 | irq_get_pending(cpumask, desc); |
222 | else | 222 | else |
223 | cpumask_copy(cpumask, desc->irq_data.affinity); | 223 | cpumask_copy(cpumask, desc->irq_data.affinity); |
224 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 224 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
225 | 225 | ||
226 | notify->notify(notify, cpumask); | 226 | notify->notify(notify, cpumask); |
227 | 227 | ||
228 | free_cpumask_var(cpumask); | 228 | free_cpumask_var(cpumask); |
229 | out: | 229 | out: |
230 | kref_put(¬ify->kref, notify->release); | 230 | kref_put(¬ify->kref, notify->release); |
231 | } | 231 | } |
232 | 232 | ||
233 | /** | 233 | /** |
234 | * irq_set_affinity_notifier - control notification of IRQ affinity changes | 234 | * irq_set_affinity_notifier - control notification of IRQ affinity changes |
235 | * @irq: Interrupt for which to enable/disable notification | 235 | * @irq: Interrupt for which to enable/disable notification |
236 | * @notify: Context for notification, or %NULL to disable | 236 | * @notify: Context for notification, or %NULL to disable |
237 | * notification. Function pointers must be initialised; | 237 | * notification. Function pointers must be initialised; |
238 | * the other fields will be initialised by this function. | 238 | * the other fields will be initialised by this function. |
239 | * | 239 | * |
240 | * Must be called in process context. Notification may only be enabled | 240 | * Must be called in process context. Notification may only be enabled |
241 | * after the IRQ is allocated and must be disabled before the IRQ is | 241 | * after the IRQ is allocated and must be disabled before the IRQ is |
242 | * freed using free_irq(). | 242 | * freed using free_irq(). |
243 | */ | 243 | */ |
244 | int | 244 | int |
245 | irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) | 245 | irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) |
246 | { | 246 | { |
247 | struct irq_desc *desc = irq_to_desc(irq); | 247 | struct irq_desc *desc = irq_to_desc(irq); |
248 | struct irq_affinity_notify *old_notify; | 248 | struct irq_affinity_notify *old_notify; |
249 | unsigned long flags; | 249 | unsigned long flags; |
250 | 250 | ||
251 | /* The release function is promised process context */ | 251 | /* The release function is promised process context */ |
252 | might_sleep(); | 252 | might_sleep(); |
253 | 253 | ||
254 | if (!desc) | 254 | if (!desc) |
255 | return -EINVAL; | 255 | return -EINVAL; |
256 | 256 | ||
257 | /* Complete initialisation of *notify */ | 257 | /* Complete initialisation of *notify */ |
258 | if (notify) { | 258 | if (notify) { |
259 | notify->irq = irq; | 259 | notify->irq = irq; |
260 | kref_init(¬ify->kref); | 260 | kref_init(¬ify->kref); |
261 | INIT_WORK(¬ify->work, irq_affinity_notify); | 261 | INIT_WORK(¬ify->work, irq_affinity_notify); |
262 | } | 262 | } |
263 | 263 | ||
264 | raw_spin_lock_irqsave(&desc->lock, flags); | 264 | raw_spin_lock_irqsave(&desc->lock, flags); |
265 | old_notify = desc->affinity_notify; | 265 | old_notify = desc->affinity_notify; |
266 | desc->affinity_notify = notify; | 266 | desc->affinity_notify = notify; |
267 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 267 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
268 | 268 | ||
269 | if (old_notify) | 269 | if (old_notify) |
270 | kref_put(&old_notify->kref, old_notify->release); | 270 | kref_put(&old_notify->kref, old_notify->release); |
271 | 271 | ||
272 | return 0; | 272 | return 0; |
273 | } | 273 | } |
274 | EXPORT_SYMBOL_GPL(irq_set_affinity_notifier); | 274 | EXPORT_SYMBOL_GPL(irq_set_affinity_notifier); |
275 | 275 | ||
276 | #ifndef CONFIG_AUTO_IRQ_AFFINITY | 276 | #ifndef CONFIG_AUTO_IRQ_AFFINITY |
277 | /* | 277 | /* |
278 | * Generic version of the affinity autoselector. | 278 | * Generic version of the affinity autoselector. |
279 | */ | 279 | */ |
280 | static int | 280 | static int |
281 | setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) | 281 | setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) |
282 | { | 282 | { |
283 | struct irq_chip *chip = irq_desc_get_chip(desc); | 283 | struct irq_chip *chip = irq_desc_get_chip(desc); |
284 | struct cpumask *set = irq_default_affinity; | 284 | struct cpumask *set = irq_default_affinity; |
285 | int ret; | 285 | int ret; |
286 | 286 | ||
287 | /* Excludes PER_CPU and NO_BALANCE interrupts */ | 287 | /* Excludes PER_CPU and NO_BALANCE interrupts */ |
288 | if (!irq_can_set_affinity(irq)) | 288 | if (!irq_can_set_affinity(irq)) |
289 | return 0; | 289 | return 0; |
290 | 290 | ||
291 | /* | 291 | /* |
292 | * Preserve an userspace affinity setup, but make sure that | 292 | * Preserve an userspace affinity setup, but make sure that |
293 | * one of the targets is online. | 293 | * one of the targets is online. |
294 | */ | 294 | */ |
295 | if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { | 295 | if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { |
296 | if (cpumask_intersects(desc->irq_data.affinity, | 296 | if (cpumask_intersects(desc->irq_data.affinity, |
297 | cpu_online_mask)) | 297 | cpu_online_mask)) |
298 | set = desc->irq_data.affinity; | 298 | set = desc->irq_data.affinity; |
299 | else | 299 | else |
300 | irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); | 300 | irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); |
301 | } | 301 | } |
302 | 302 | ||
303 | cpumask_and(mask, cpu_online_mask, set); | 303 | cpumask_and(mask, cpu_online_mask, set); |
304 | ret = chip->irq_set_affinity(&desc->irq_data, mask, false); | 304 | ret = chip->irq_set_affinity(&desc->irq_data, mask, false); |
305 | switch (ret) { | 305 | switch (ret) { |
306 | case IRQ_SET_MASK_OK: | 306 | case IRQ_SET_MASK_OK: |
307 | cpumask_copy(desc->irq_data.affinity, mask); | 307 | cpumask_copy(desc->irq_data.affinity, mask); |
308 | case IRQ_SET_MASK_OK_NOCOPY: | 308 | case IRQ_SET_MASK_OK_NOCOPY: |
309 | irq_set_thread_affinity(desc); | 309 | irq_set_thread_affinity(desc); |
310 | } | 310 | } |
311 | return 0; | 311 | return 0; |
312 | } | 312 | } |
313 | #else | 313 | #else |
314 | static inline int | 314 | static inline int |
315 | setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask) | 315 | setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask) |
316 | { | 316 | { |
317 | return irq_select_affinity(irq); | 317 | return irq_select_affinity(irq); |
318 | } | 318 | } |
319 | #endif | 319 | #endif |
320 | 320 | ||
321 | /* | 321 | /* |
322 | * Called when affinity is set via /proc/irq | 322 | * Called when affinity is set via /proc/irq |
323 | */ | 323 | */ |
324 | int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask) | 324 | int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask) |
325 | { | 325 | { |
326 | struct irq_desc *desc = irq_to_desc(irq); | 326 | struct irq_desc *desc = irq_to_desc(irq); |
327 | unsigned long flags; | 327 | unsigned long flags; |
328 | int ret; | 328 | int ret; |
329 | 329 | ||
330 | raw_spin_lock_irqsave(&desc->lock, flags); | 330 | raw_spin_lock_irqsave(&desc->lock, flags); |
331 | ret = setup_affinity(irq, desc, mask); | 331 | ret = setup_affinity(irq, desc, mask); |
332 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 332 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
333 | return ret; | 333 | return ret; |
334 | } | 334 | } |
335 | 335 | ||
336 | #else | 336 | #else |
337 | static inline int | 337 | static inline int |
338 | setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) | 338 | setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) |
339 | { | 339 | { |
340 | return 0; | 340 | return 0; |
341 | } | 341 | } |
342 | #endif | 342 | #endif |
343 | 343 | ||
344 | void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) | 344 | void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) |
345 | { | 345 | { |
346 | if (suspend) { | 346 | if (suspend) { |
347 | if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND)) | 347 | if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND)) |
348 | return; | 348 | return; |
349 | desc->istate |= IRQS_SUSPENDED; | 349 | desc->istate |= IRQS_SUSPENDED; |
350 | } | 350 | } |
351 | 351 | ||
352 | if (!desc->depth++) | 352 | if (!desc->depth++) |
353 | irq_disable(desc); | 353 | irq_disable(desc); |
354 | } | 354 | } |
355 | 355 | ||
356 | static int __disable_irq_nosync(unsigned int irq) | 356 | static int __disable_irq_nosync(unsigned int irq) |
357 | { | 357 | { |
358 | unsigned long flags; | 358 | unsigned long flags; |
359 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); | 359 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); |
360 | 360 | ||
361 | if (!desc) | 361 | if (!desc) |
362 | return -EINVAL; | 362 | return -EINVAL; |
363 | __disable_irq(desc, irq, false); | 363 | __disable_irq(desc, irq, false); |
364 | irq_put_desc_busunlock(desc, flags); | 364 | irq_put_desc_busunlock(desc, flags); |
365 | return 0; | 365 | return 0; |
366 | } | 366 | } |
367 | 367 | ||
368 | /** | 368 | /** |
369 | * disable_irq_nosync - disable an irq without waiting | 369 | * disable_irq_nosync - disable an irq without waiting |
370 | * @irq: Interrupt to disable | 370 | * @irq: Interrupt to disable |
371 | * | 371 | * |
372 | * Disable the selected interrupt line. Disables and Enables are | 372 | * Disable the selected interrupt line. Disables and Enables are |
373 | * nested. | 373 | * nested. |
374 | * Unlike disable_irq(), this function does not ensure existing | 374 | * Unlike disable_irq(), this function does not ensure existing |
375 | * instances of the IRQ handler have completed before returning. | 375 | * instances of the IRQ handler have completed before returning. |
376 | * | 376 | * |
377 | * This function may be called from IRQ context. | 377 | * This function may be called from IRQ context. |
378 | */ | 378 | */ |
379 | void disable_irq_nosync(unsigned int irq) | 379 | void disable_irq_nosync(unsigned int irq) |
380 | { | 380 | { |
381 | __disable_irq_nosync(irq); | 381 | __disable_irq_nosync(irq); |
382 | } | 382 | } |
383 | EXPORT_SYMBOL(disable_irq_nosync); | 383 | EXPORT_SYMBOL(disable_irq_nosync); |
384 | 384 | ||
385 | /** | 385 | /** |
386 | * disable_irq - disable an irq and wait for completion | 386 | * disable_irq - disable an irq and wait for completion |
387 | * @irq: Interrupt to disable | 387 | * @irq: Interrupt to disable |
388 | * | 388 | * |
389 | * Disable the selected interrupt line. Enables and Disables are | 389 | * Disable the selected interrupt line. Enables and Disables are |
390 | * nested. | 390 | * nested. |
391 | * This function waits for any pending IRQ handlers for this interrupt | 391 | * This function waits for any pending IRQ handlers for this interrupt |
392 | * to complete before returning. If you use this function while | 392 | * to complete before returning. If you use this function while |
393 | * holding a resource the IRQ handler may need you will deadlock. | 393 | * holding a resource the IRQ handler may need you will deadlock. |
394 | * | 394 | * |
395 | * This function may be called - with care - from IRQ context. | 395 | * This function may be called - with care - from IRQ context. |
396 | */ | 396 | */ |
397 | void disable_irq(unsigned int irq) | 397 | void disable_irq(unsigned int irq) |
398 | { | 398 | { |
399 | if (!__disable_irq_nosync(irq)) | 399 | if (!__disable_irq_nosync(irq)) |
400 | synchronize_irq(irq); | 400 | synchronize_irq(irq); |
401 | } | 401 | } |
402 | EXPORT_SYMBOL(disable_irq); | 402 | EXPORT_SYMBOL(disable_irq); |
403 | 403 | ||
404 | void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) | 404 | void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) |
405 | { | 405 | { |
406 | if (resume) { | 406 | if (resume) { |
407 | if (!(desc->istate & IRQS_SUSPENDED)) { | 407 | if (!(desc->istate & IRQS_SUSPENDED)) { |
408 | if (!desc->action) | 408 | if (!desc->action) |
409 | return; | 409 | return; |
410 | if (!(desc->action->flags & IRQF_FORCE_RESUME)) | 410 | if (!(desc->action->flags & IRQF_FORCE_RESUME)) |
411 | return; | 411 | return; |
412 | /* Pretend that it got disabled ! */ | 412 | /* Pretend that it got disabled ! */ |
413 | desc->depth++; | 413 | desc->depth++; |
414 | } | 414 | } |
415 | desc->istate &= ~IRQS_SUSPENDED; | 415 | desc->istate &= ~IRQS_SUSPENDED; |
416 | } | 416 | } |
417 | 417 | ||
418 | switch (desc->depth) { | 418 | switch (desc->depth) { |
419 | case 0: | 419 | case 0: |
420 | err_out: | 420 | err_out: |
421 | WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); | 421 | WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); |
422 | break; | 422 | break; |
423 | case 1: { | 423 | case 1: { |
424 | if (desc->istate & IRQS_SUSPENDED) | 424 | if (desc->istate & IRQS_SUSPENDED) |
425 | goto err_out; | 425 | goto err_out; |
426 | /* Prevent probing on this irq: */ | 426 | /* Prevent probing on this irq: */ |
427 | irq_settings_set_noprobe(desc); | 427 | irq_settings_set_noprobe(desc); |
428 | irq_enable(desc); | 428 | irq_enable(desc); |
429 | check_irq_resend(desc, irq); | 429 | check_irq_resend(desc, irq); |
430 | /* fall-through */ | 430 | /* fall-through */ |
431 | } | 431 | } |
432 | default: | 432 | default: |
433 | desc->depth--; | 433 | desc->depth--; |
434 | } | 434 | } |
435 | } | 435 | } |
436 | 436 | ||
437 | /** | 437 | /** |
438 | * enable_irq - enable handling of an irq | 438 | * enable_irq - enable handling of an irq |
439 | * @irq: Interrupt to enable | 439 | * @irq: Interrupt to enable |
440 | * | 440 | * |
441 | * Undoes the effect of one call to disable_irq(). If this | 441 | * Undoes the effect of one call to disable_irq(). If this |
442 | * matches the last disable, processing of interrupts on this | 442 | * matches the last disable, processing of interrupts on this |
443 | * IRQ line is re-enabled. | 443 | * IRQ line is re-enabled. |
444 | * | 444 | * |
445 | * This function may be called from IRQ context only when | 445 | * This function may be called from IRQ context only when |
446 | * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! | 446 | * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! |
447 | */ | 447 | */ |
448 | void enable_irq(unsigned int irq) | 448 | void enable_irq(unsigned int irq) |
449 | { | 449 | { |
450 | unsigned long flags; | 450 | unsigned long flags; |
451 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); | 451 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); |
452 | 452 | ||
453 | if (!desc) | 453 | if (!desc) |
454 | return; | 454 | return; |
455 | if (WARN(!desc->irq_data.chip, | 455 | if (WARN(!desc->irq_data.chip, |
456 | KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq)) | 456 | KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq)) |
457 | goto out; | 457 | goto out; |
458 | 458 | ||
459 | __enable_irq(desc, irq, false); | 459 | __enable_irq(desc, irq, false); |
460 | out: | 460 | out: |
461 | irq_put_desc_busunlock(desc, flags); | 461 | irq_put_desc_busunlock(desc, flags); |
462 | } | 462 | } |
463 | EXPORT_SYMBOL(enable_irq); | 463 | EXPORT_SYMBOL(enable_irq); |
464 | 464 | ||
465 | static int set_irq_wake_real(unsigned int irq, unsigned int on) | 465 | static int set_irq_wake_real(unsigned int irq, unsigned int on) |
466 | { | 466 | { |
467 | struct irq_desc *desc = irq_to_desc(irq); | 467 | struct irq_desc *desc = irq_to_desc(irq); |
468 | int ret = -ENXIO; | 468 | int ret = -ENXIO; |
469 | 469 | ||
470 | if (desc->irq_data.chip->irq_set_wake) | 470 | if (desc->irq_data.chip->irq_set_wake) |
471 | ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on); | 471 | ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on); |
472 | 472 | ||
473 | return ret; | 473 | return ret; |
474 | } | 474 | } |
475 | 475 | ||
476 | /** | 476 | /** |
477 | * irq_set_irq_wake - control irq power management wakeup | 477 | * irq_set_irq_wake - control irq power management wakeup |
478 | * @irq: interrupt to control | 478 | * @irq: interrupt to control |
479 | * @on: enable/disable power management wakeup | 479 | * @on: enable/disable power management wakeup |
480 | * | 480 | * |
481 | * Enable/disable power management wakeup mode, which is | 481 | * Enable/disable power management wakeup mode, which is |
482 | * disabled by default. Enables and disables must match, | 482 | * disabled by default. Enables and disables must match, |
483 | * just as they match for non-wakeup mode support. | 483 | * just as they match for non-wakeup mode support. |
484 | * | 484 | * |
485 | * Wakeup mode lets this IRQ wake the system from sleep | 485 | * Wakeup mode lets this IRQ wake the system from sleep |
486 | * states like "suspend to RAM". | 486 | * states like "suspend to RAM". |
487 | */ | 487 | */ |
488 | int irq_set_irq_wake(unsigned int irq, unsigned int on) | 488 | int irq_set_irq_wake(unsigned int irq, unsigned int on) |
489 | { | 489 | { |
490 | unsigned long flags; | 490 | unsigned long flags; |
491 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); | 491 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); |
492 | int ret = 0; | 492 | int ret = 0; |
493 | 493 | ||
494 | if (!desc) | 494 | if (!desc) |
495 | return -EINVAL; | 495 | return -EINVAL; |
496 | 496 | ||
497 | /* wakeup-capable irqs can be shared between drivers that | 497 | /* wakeup-capable irqs can be shared between drivers that |
498 | * don't need to have the same sleep mode behaviors. | 498 | * don't need to have the same sleep mode behaviors. |
499 | */ | 499 | */ |
500 | if (on) { | 500 | if (on) { |
501 | if (desc->wake_depth++ == 0) { | 501 | if (desc->wake_depth++ == 0) { |
502 | ret = set_irq_wake_real(irq, on); | 502 | ret = set_irq_wake_real(irq, on); |
503 | if (ret) | 503 | if (ret) |
504 | desc->wake_depth = 0; | 504 | desc->wake_depth = 0; |
505 | else | 505 | else |
506 | irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE); | 506 | irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE); |
507 | } | 507 | } |
508 | } else { | 508 | } else { |
509 | if (desc->wake_depth == 0) { | 509 | if (desc->wake_depth == 0) { |
510 | WARN(1, "Unbalanced IRQ %d wake disable\n", irq); | 510 | WARN(1, "Unbalanced IRQ %d wake disable\n", irq); |
511 | } else if (--desc->wake_depth == 0) { | 511 | } else if (--desc->wake_depth == 0) { |
512 | ret = set_irq_wake_real(irq, on); | 512 | ret = set_irq_wake_real(irq, on); |
513 | if (ret) | 513 | if (ret) |
514 | desc->wake_depth = 1; | 514 | desc->wake_depth = 1; |
515 | else | 515 | else |
516 | irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); | 516 | irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); |
517 | } | 517 | } |
518 | } | 518 | } |
519 | irq_put_desc_busunlock(desc, flags); | 519 | irq_put_desc_busunlock(desc, flags); |
520 | return ret; | 520 | return ret; |
521 | } | 521 | } |
522 | EXPORT_SYMBOL(irq_set_irq_wake); | 522 | EXPORT_SYMBOL(irq_set_irq_wake); |
523 | 523 | ||
524 | /* | 524 | /* |
525 | * Internal function that tells the architecture code whether a | 525 | * Internal function that tells the architecture code whether a |
526 | * particular irq has been exclusively allocated or is available | 526 | * particular irq has been exclusively allocated or is available |
527 | * for driver use. | 527 | * for driver use. |
528 | */ | 528 | */ |
529 | int can_request_irq(unsigned int irq, unsigned long irqflags) | 529 | int can_request_irq(unsigned int irq, unsigned long irqflags) |
530 | { | 530 | { |
531 | unsigned long flags; | 531 | unsigned long flags; |
532 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags); | 532 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags); |
533 | int canrequest = 0; | 533 | int canrequest = 0; |
534 | 534 | ||
535 | if (!desc) | 535 | if (!desc) |
536 | return 0; | 536 | return 0; |
537 | 537 | ||
538 | if (irq_settings_can_request(desc)) { | 538 | if (irq_settings_can_request(desc)) { |
539 | if (desc->action) | 539 | if (desc->action) |
540 | if (irqflags & desc->action->flags & IRQF_SHARED) | 540 | if (irqflags & desc->action->flags & IRQF_SHARED) |
541 | canrequest =1; | 541 | canrequest =1; |
542 | } | 542 | } |
543 | irq_put_desc_unlock(desc, flags); | 543 | irq_put_desc_unlock(desc, flags); |
544 | return canrequest; | 544 | return canrequest; |
545 | } | 545 | } |
546 | 546 | ||
547 | int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | 547 | int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, |
548 | unsigned long flags) | 548 | unsigned long flags) |
549 | { | 549 | { |
550 | struct irq_chip *chip = desc->irq_data.chip; | 550 | struct irq_chip *chip = desc->irq_data.chip; |
551 | int ret, unmask = 0; | 551 | int ret, unmask = 0; |
552 | 552 | ||
553 | if (!chip || !chip->irq_set_type) { | 553 | if (!chip || !chip->irq_set_type) { |
554 | /* | 554 | /* |
555 | * IRQF_TRIGGER_* but the PIC does not support multiple | 555 | * IRQF_TRIGGER_* but the PIC does not support multiple |
556 | * flow-types? | 556 | * flow-types? |
557 | */ | 557 | */ |
558 | pr_debug("No set_type function for IRQ %d (%s)\n", irq, | 558 | pr_debug("No set_type function for IRQ %d (%s)\n", irq, |
559 | chip ? (chip->name ? : "unknown") : "unknown"); | 559 | chip ? (chip->name ? : "unknown") : "unknown"); |
560 | return 0; | 560 | return 0; |
561 | } | 561 | } |
562 | 562 | ||
563 | flags &= IRQ_TYPE_SENSE_MASK; | 563 | flags &= IRQ_TYPE_SENSE_MASK; |
564 | 564 | ||
565 | if (chip->flags & IRQCHIP_SET_TYPE_MASKED) { | 565 | if (chip->flags & IRQCHIP_SET_TYPE_MASKED) { |
566 | if (!irqd_irq_masked(&desc->irq_data)) | 566 | if (!irqd_irq_masked(&desc->irq_data)) |
567 | mask_irq(desc); | 567 | mask_irq(desc); |
568 | if (!irqd_irq_disabled(&desc->irq_data)) | 568 | if (!irqd_irq_disabled(&desc->irq_data)) |
569 | unmask = 1; | 569 | unmask = 1; |
570 | } | 570 | } |
571 | 571 | ||
572 | /* caller masked out all except trigger mode flags */ | 572 | /* caller masked out all except trigger mode flags */ |
573 | ret = chip->irq_set_type(&desc->irq_data, flags); | 573 | ret = chip->irq_set_type(&desc->irq_data, flags); |
574 | 574 | ||
575 | switch (ret) { | 575 | switch (ret) { |
576 | case IRQ_SET_MASK_OK: | 576 | case IRQ_SET_MASK_OK: |
577 | irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); | 577 | irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); |
578 | irqd_set(&desc->irq_data, flags); | 578 | irqd_set(&desc->irq_data, flags); |
579 | 579 | ||
580 | case IRQ_SET_MASK_OK_NOCOPY: | 580 | case IRQ_SET_MASK_OK_NOCOPY: |
581 | flags = irqd_get_trigger_type(&desc->irq_data); | 581 | flags = irqd_get_trigger_type(&desc->irq_data); |
582 | irq_settings_set_trigger_mask(desc, flags); | 582 | irq_settings_set_trigger_mask(desc, flags); |
583 | irqd_clear(&desc->irq_data, IRQD_LEVEL); | 583 | irqd_clear(&desc->irq_data, IRQD_LEVEL); |
584 | irq_settings_clr_level(desc); | 584 | irq_settings_clr_level(desc); |
585 | if (flags & IRQ_TYPE_LEVEL_MASK) { | 585 | if (flags & IRQ_TYPE_LEVEL_MASK) { |
586 | irq_settings_set_level(desc); | 586 | irq_settings_set_level(desc); |
587 | irqd_set(&desc->irq_data, IRQD_LEVEL); | 587 | irqd_set(&desc->irq_data, IRQD_LEVEL); |
588 | } | 588 | } |
589 | 589 | ||
590 | ret = 0; | 590 | ret = 0; |
591 | break; | 591 | break; |
592 | default: | 592 | default: |
593 | pr_err("setting trigger mode %lu for irq %u failed (%pF)\n", | 593 | pr_err("setting trigger mode %lu for irq %u failed (%pF)\n", |
594 | flags, irq, chip->irq_set_type); | 594 | flags, irq, chip->irq_set_type); |
595 | } | 595 | } |
596 | if (unmask) | 596 | if (unmask) |
597 | unmask_irq(desc); | 597 | unmask_irq(desc); |
598 | return ret; | 598 | return ret; |
599 | } | 599 | } |
600 | 600 | ||
601 | /* | 601 | /* |
602 | * Default primary interrupt handler for threaded interrupts. Is | 602 | * Default primary interrupt handler for threaded interrupts. Is |
603 | * assigned as primary handler when request_threaded_irq is called | 603 | * assigned as primary handler when request_threaded_irq is called |
604 | * with handler == NULL. Useful for oneshot interrupts. | 604 | * with handler == NULL. Useful for oneshot interrupts. |
605 | */ | 605 | */ |
606 | static irqreturn_t irq_default_primary_handler(int irq, void *dev_id) | 606 | static irqreturn_t irq_default_primary_handler(int irq, void *dev_id) |
607 | { | 607 | { |
608 | return IRQ_WAKE_THREAD; | 608 | return IRQ_WAKE_THREAD; |
609 | } | 609 | } |
610 | 610 | ||
611 | /* | 611 | /* |
612 | * Primary handler for nested threaded interrupts. Should never be | 612 | * Primary handler for nested threaded interrupts. Should never be |
613 | * called. | 613 | * called. |
614 | */ | 614 | */ |
615 | static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id) | 615 | static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id) |
616 | { | 616 | { |
617 | WARN(1, "Primary handler called for nested irq %d\n", irq); | 617 | WARN(1, "Primary handler called for nested irq %d\n", irq); |
618 | return IRQ_NONE; | 618 | return IRQ_NONE; |
619 | } | 619 | } |
620 | 620 | ||
621 | static int irq_wait_for_interrupt(struct irqaction *action) | 621 | static int irq_wait_for_interrupt(struct irqaction *action) |
622 | { | 622 | { |
623 | while (!kthread_should_stop()) { | 623 | while (!kthread_should_stop()) { |
624 | set_current_state(TASK_INTERRUPTIBLE); | 624 | set_current_state(TASK_INTERRUPTIBLE); |
625 | 625 | ||
626 | if (test_and_clear_bit(IRQTF_RUNTHREAD, | 626 | if (test_and_clear_bit(IRQTF_RUNTHREAD, |
627 | &action->thread_flags)) { | 627 | &action->thread_flags)) { |
628 | __set_current_state(TASK_RUNNING); | 628 | __set_current_state(TASK_RUNNING); |
629 | return 0; | 629 | return 0; |
630 | } | 630 | } |
631 | schedule(); | 631 | schedule(); |
632 | } | 632 | } |
633 | return -1; | 633 | return -1; |
634 | } | 634 | } |
635 | 635 | ||
636 | /* | 636 | /* |
637 | * Oneshot interrupts keep the irq line masked until the threaded | 637 | * Oneshot interrupts keep the irq line masked until the threaded |
638 | * handler finished. unmask if the interrupt has not been disabled and | 638 | * handler finished. unmask if the interrupt has not been disabled and |
639 | * is marked MASKED. | 639 | * is marked MASKED. |
640 | */ | 640 | */ |
641 | static void irq_finalize_oneshot(struct irq_desc *desc, | 641 | static void irq_finalize_oneshot(struct irq_desc *desc, |
642 | struct irqaction *action, bool force) | 642 | struct irqaction *action, bool force) |
643 | { | 643 | { |
644 | if (!(desc->istate & IRQS_ONESHOT)) | 644 | if (!(desc->istate & IRQS_ONESHOT)) |
645 | return; | 645 | return; |
646 | again: | 646 | again: |
647 | chip_bus_lock(desc); | 647 | chip_bus_lock(desc); |
648 | raw_spin_lock_irq(&desc->lock); | 648 | raw_spin_lock_irq(&desc->lock); |
649 | 649 | ||
650 | /* | 650 | /* |
651 | * Implausible though it may be we need to protect us against | 651 | * Implausible though it may be we need to protect us against |
652 | * the following scenario: | 652 | * the following scenario: |
653 | * | 653 | * |
654 | * The thread is faster done than the hard interrupt handler | 654 | * The thread is faster done than the hard interrupt handler |
655 | * on the other CPU. If we unmask the irq line then the | 655 | * on the other CPU. If we unmask the irq line then the |
656 | * interrupt can come in again and masks the line, leaves due | 656 | * interrupt can come in again and masks the line, leaves due |
657 | * to IRQS_INPROGRESS and the irq line is masked forever. | 657 | * to IRQS_INPROGRESS and the irq line is masked forever. |
658 | * | 658 | * |
659 | * This also serializes the state of shared oneshot handlers | 659 | * This also serializes the state of shared oneshot handlers |
660 | * versus "desc->threads_onehsot |= action->thread_mask;" in | 660 | * versus "desc->threads_onehsot |= action->thread_mask;" in |
661 | * irq_wake_thread(). See the comment there which explains the | 661 | * irq_wake_thread(). See the comment there which explains the |
662 | * serialization. | 662 | * serialization. |
663 | */ | 663 | */ |
664 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) { | 664 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) { |
665 | raw_spin_unlock_irq(&desc->lock); | 665 | raw_spin_unlock_irq(&desc->lock); |
666 | chip_bus_sync_unlock(desc); | 666 | chip_bus_sync_unlock(desc); |
667 | cpu_relax(); | 667 | cpu_relax(); |
668 | goto again; | 668 | goto again; |
669 | } | 669 | } |
670 | 670 | ||
671 | /* | 671 | /* |
672 | * Now check again, whether the thread should run. Otherwise | 672 | * Now check again, whether the thread should run. Otherwise |
673 | * we would clear the threads_oneshot bit of this thread which | 673 | * we would clear the threads_oneshot bit of this thread which |
674 | * was just set. | 674 | * was just set. |
675 | */ | 675 | */ |
676 | if (!force && test_bit(IRQTF_RUNTHREAD, &action->thread_flags)) | 676 | if (!force && test_bit(IRQTF_RUNTHREAD, &action->thread_flags)) |
677 | goto out_unlock; | 677 | goto out_unlock; |
678 | 678 | ||
679 | desc->threads_oneshot &= ~action->thread_mask; | 679 | desc->threads_oneshot &= ~action->thread_mask; |
680 | 680 | ||
681 | if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) && | 681 | if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) && |
682 | irqd_irq_masked(&desc->irq_data)) | 682 | irqd_irq_masked(&desc->irq_data)) |
683 | unmask_irq(desc); | 683 | unmask_irq(desc); |
684 | 684 | ||
685 | out_unlock: | 685 | out_unlock: |
686 | raw_spin_unlock_irq(&desc->lock); | 686 | raw_spin_unlock_irq(&desc->lock); |
687 | chip_bus_sync_unlock(desc); | 687 | chip_bus_sync_unlock(desc); |
688 | } | 688 | } |
689 | 689 | ||
690 | #ifdef CONFIG_SMP | 690 | #ifdef CONFIG_SMP |
691 | /* | 691 | /* |
692 | * Check whether we need to chasnge the affinity of the interrupt thread. | 692 | * Check whether we need to chasnge the affinity of the interrupt thread. |
693 | */ | 693 | */ |
694 | static void | 694 | static void |
695 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) | 695 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) |
696 | { | 696 | { |
697 | cpumask_var_t mask; | 697 | cpumask_var_t mask; |
698 | 698 | ||
699 | if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) | 699 | if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) |
700 | return; | 700 | return; |
701 | 701 | ||
702 | /* | 702 | /* |
703 | * In case we are out of memory we set IRQTF_AFFINITY again and | 703 | * In case we are out of memory we set IRQTF_AFFINITY again and |
704 | * try again next time | 704 | * try again next time |
705 | */ | 705 | */ |
706 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { | 706 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { |
707 | set_bit(IRQTF_AFFINITY, &action->thread_flags); | 707 | set_bit(IRQTF_AFFINITY, &action->thread_flags); |
708 | return; | 708 | return; |
709 | } | 709 | } |
710 | 710 | ||
711 | raw_spin_lock_irq(&desc->lock); | 711 | raw_spin_lock_irq(&desc->lock); |
712 | cpumask_copy(mask, desc->irq_data.affinity); | 712 | cpumask_copy(mask, desc->irq_data.affinity); |
713 | raw_spin_unlock_irq(&desc->lock); | 713 | raw_spin_unlock_irq(&desc->lock); |
714 | 714 | ||
715 | set_cpus_allowed_ptr(current, mask); | 715 | set_cpus_allowed_ptr(current, mask); |
716 | free_cpumask_var(mask); | 716 | free_cpumask_var(mask); |
717 | } | 717 | } |
718 | #else | 718 | #else |
719 | static inline void | 719 | static inline void |
720 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } | 720 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } |
721 | #endif | 721 | #endif |
722 | 722 | ||
723 | /* | 723 | /* |
724 | * Interrupts which are not explicitely requested as threaded | 724 | * Interrupts which are not explicitely requested as threaded |
725 | * interrupts rely on the implicit bh/preempt disable of the hard irq | 725 | * interrupts rely on the implicit bh/preempt disable of the hard irq |
726 | * context. So we need to disable bh here to avoid deadlocks and other | 726 | * context. So we need to disable bh here to avoid deadlocks and other |
727 | * side effects. | 727 | * side effects. |
728 | */ | 728 | */ |
729 | static irqreturn_t | 729 | static irqreturn_t |
730 | irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) | 730 | irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) |
731 | { | 731 | { |
732 | irqreturn_t ret; | 732 | irqreturn_t ret; |
733 | 733 | ||
734 | local_bh_disable(); | 734 | local_bh_disable(); |
735 | ret = action->thread_fn(action->irq, action->dev_id); | 735 | ret = action->thread_fn(action->irq, action->dev_id); |
736 | irq_finalize_oneshot(desc, action, false); | 736 | irq_finalize_oneshot(desc, action, false); |
737 | local_bh_enable(); | 737 | local_bh_enable(); |
738 | return ret; | 738 | return ret; |
739 | } | 739 | } |
740 | 740 | ||
741 | /* | 741 | /* |
742 | * Interrupts explicitely requested as threaded interupts want to be | 742 | * Interrupts explicitely requested as threaded interupts want to be |
743 | * preemtible - many of them need to sleep and wait for slow busses to | 743 | * preemtible - many of them need to sleep and wait for slow busses to |
744 | * complete. | 744 | * complete. |
745 | */ | 745 | */ |
746 | static irqreturn_t irq_thread_fn(struct irq_desc *desc, | 746 | static irqreturn_t irq_thread_fn(struct irq_desc *desc, |
747 | struct irqaction *action) | 747 | struct irqaction *action) |
748 | { | 748 | { |
749 | irqreturn_t ret; | 749 | irqreturn_t ret; |
750 | 750 | ||
751 | ret = action->thread_fn(action->irq, action->dev_id); | 751 | ret = action->thread_fn(action->irq, action->dev_id); |
752 | irq_finalize_oneshot(desc, action, false); | 752 | irq_finalize_oneshot(desc, action, false); |
753 | return ret; | 753 | return ret; |
754 | } | 754 | } |
755 | 755 | ||
756 | /* | 756 | /* |
757 | * Interrupt handler thread | 757 | * Interrupt handler thread |
758 | */ | 758 | */ |
759 | static int irq_thread(void *data) | 759 | static int irq_thread(void *data) |
760 | { | 760 | { |
761 | static const struct sched_param param = { | 761 | static const struct sched_param param = { |
762 | .sched_priority = MAX_USER_RT_PRIO/2, | 762 | .sched_priority = MAX_USER_RT_PRIO/2, |
763 | }; | 763 | }; |
764 | struct irqaction *action = data; | 764 | struct irqaction *action = data; |
765 | struct irq_desc *desc = irq_to_desc(action->irq); | 765 | struct irq_desc *desc = irq_to_desc(action->irq); |
766 | irqreturn_t (*handler_fn)(struct irq_desc *desc, | 766 | irqreturn_t (*handler_fn)(struct irq_desc *desc, |
767 | struct irqaction *action); | 767 | struct irqaction *action); |
768 | int wake; | 768 | int wake; |
769 | 769 | ||
770 | if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD, | 770 | if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD, |
771 | &action->thread_flags)) | 771 | &action->thread_flags)) |
772 | handler_fn = irq_forced_thread_fn; | 772 | handler_fn = irq_forced_thread_fn; |
773 | else | 773 | else |
774 | handler_fn = irq_thread_fn; | 774 | handler_fn = irq_thread_fn; |
775 | 775 | ||
776 | sched_setscheduler(current, SCHED_FIFO, ¶m); | 776 | sched_setscheduler(current, SCHED_FIFO, ¶m); |
777 | current->irqaction = action; | 777 | current->irqaction = action; |
778 | 778 | ||
779 | while (!irq_wait_for_interrupt(action)) { | 779 | while (!irq_wait_for_interrupt(action)) { |
780 | 780 | ||
781 | irq_thread_check_affinity(desc, action); | 781 | irq_thread_check_affinity(desc, action); |
782 | 782 | ||
783 | atomic_inc(&desc->threads_active); | 783 | atomic_inc(&desc->threads_active); |
784 | 784 | ||
785 | raw_spin_lock_irq(&desc->lock); | 785 | raw_spin_lock_irq(&desc->lock); |
786 | if (unlikely(irqd_irq_disabled(&desc->irq_data))) { | 786 | if (unlikely(irqd_irq_disabled(&desc->irq_data))) { |
787 | /* | 787 | /* |
788 | * CHECKME: We might need a dedicated | 788 | * CHECKME: We might need a dedicated |
789 | * IRQ_THREAD_PENDING flag here, which | 789 | * IRQ_THREAD_PENDING flag here, which |
790 | * retriggers the thread in check_irq_resend() | 790 | * retriggers the thread in check_irq_resend() |
791 | * but AFAICT IRQS_PENDING should be fine as it | 791 | * but AFAICT IRQS_PENDING should be fine as it |
792 | * retriggers the interrupt itself --- tglx | 792 | * retriggers the interrupt itself --- tglx |
793 | */ | 793 | */ |
794 | desc->istate |= IRQS_PENDING; | 794 | desc->istate |= IRQS_PENDING; |
795 | raw_spin_unlock_irq(&desc->lock); | 795 | raw_spin_unlock_irq(&desc->lock); |
796 | } else { | 796 | } else { |
797 | irqreturn_t action_ret; | 797 | irqreturn_t action_ret; |
798 | 798 | ||
799 | raw_spin_unlock_irq(&desc->lock); | 799 | raw_spin_unlock_irq(&desc->lock); |
800 | action_ret = handler_fn(desc, action); | 800 | action_ret = handler_fn(desc, action); |
801 | if (!noirqdebug) | 801 | if (!noirqdebug) |
802 | note_interrupt(action->irq, desc, action_ret); | 802 | note_interrupt(action->irq, desc, action_ret); |
803 | } | 803 | } |
804 | 804 | ||
805 | wake = atomic_dec_and_test(&desc->threads_active); | 805 | wake = atomic_dec_and_test(&desc->threads_active); |
806 | 806 | ||
807 | if (wake && waitqueue_active(&desc->wait_for_threads)) | 807 | if (wake && waitqueue_active(&desc->wait_for_threads)) |
808 | wake_up(&desc->wait_for_threads); | 808 | wake_up(&desc->wait_for_threads); |
809 | } | 809 | } |
810 | 810 | ||
811 | /* Prevent a stale desc->threads_oneshot */ | 811 | /* Prevent a stale desc->threads_oneshot */ |
812 | irq_finalize_oneshot(desc, action, true); | 812 | irq_finalize_oneshot(desc, action, true); |
813 | 813 | ||
814 | /* | 814 | /* |
815 | * Clear irqaction. Otherwise exit_irq_thread() would make | 815 | * Clear irqaction. Otherwise exit_irq_thread() would make |
816 | * fuzz about an active irq thread going into nirvana. | 816 | * fuzz about an active irq thread going into nirvana. |
817 | */ | 817 | */ |
818 | current->irqaction = NULL; | 818 | current->irqaction = NULL; |
819 | return 0; | 819 | return 0; |
820 | } | 820 | } |
821 | 821 | ||
822 | /* | 822 | /* |
823 | * Called from do_exit() | 823 | * Called from do_exit() |
824 | */ | 824 | */ |
825 | void exit_irq_thread(void) | 825 | void exit_irq_thread(void) |
826 | { | 826 | { |
827 | struct task_struct *tsk = current; | 827 | struct task_struct *tsk = current; |
828 | struct irq_desc *desc; | 828 | struct irq_desc *desc; |
829 | 829 | ||
830 | if (!tsk->irqaction) | 830 | if (!tsk->irqaction) |
831 | return; | 831 | return; |
832 | 832 | ||
833 | printk(KERN_ERR | 833 | printk(KERN_ERR |
834 | "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", | 834 | "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", |
835 | tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq); | 835 | tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq); |
836 | 836 | ||
837 | desc = irq_to_desc(tsk->irqaction->irq); | 837 | desc = irq_to_desc(tsk->irqaction->irq); |
838 | 838 | ||
839 | /* | 839 | /* |
840 | * Prevent a stale desc->threads_oneshot. Must be called | 840 | * Prevent a stale desc->threads_oneshot. Must be called |
841 | * before setting the IRQTF_DIED flag. | 841 | * before setting the IRQTF_DIED flag. |
842 | */ | 842 | */ |
843 | irq_finalize_oneshot(desc, tsk->irqaction, true); | 843 | irq_finalize_oneshot(desc, tsk->irqaction, true); |
844 | 844 | ||
845 | /* | 845 | /* |
846 | * Set the THREAD DIED flag to prevent further wakeups of the | 846 | * Set the THREAD DIED flag to prevent further wakeups of the |
847 | * soon to be gone threaded handler. | 847 | * soon to be gone threaded handler. |
848 | */ | 848 | */ |
849 | set_bit(IRQTF_DIED, &tsk->irqaction->flags); | 849 | set_bit(IRQTF_DIED, &tsk->irqaction->flags); |
850 | } | 850 | } |
851 | 851 | ||
852 | static void irq_setup_forced_threading(struct irqaction *new) | 852 | static void irq_setup_forced_threading(struct irqaction *new) |
853 | { | 853 | { |
854 | if (!force_irqthreads) | 854 | if (!force_irqthreads) |
855 | return; | 855 | return; |
856 | if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)) | 856 | if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)) |
857 | return; | 857 | return; |
858 | 858 | ||
859 | new->flags |= IRQF_ONESHOT; | 859 | new->flags |= IRQF_ONESHOT; |
860 | 860 | ||
861 | if (!new->thread_fn) { | 861 | if (!new->thread_fn) { |
862 | set_bit(IRQTF_FORCED_THREAD, &new->thread_flags); | 862 | set_bit(IRQTF_FORCED_THREAD, &new->thread_flags); |
863 | new->thread_fn = new->handler; | 863 | new->thread_fn = new->handler; |
864 | new->handler = irq_default_primary_handler; | 864 | new->handler = irq_default_primary_handler; |
865 | } | 865 | } |
866 | } | 866 | } |
867 | 867 | ||
868 | /* | 868 | /* |
869 | * Internal function to register an irqaction - typically used to | 869 | * Internal function to register an irqaction - typically used to |
870 | * allocate special interrupts that are part of the architecture. | 870 | * allocate special interrupts that are part of the architecture. |
871 | */ | 871 | */ |
872 | static int | 872 | static int |
873 | __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | 873 | __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) |
874 | { | 874 | { |
875 | struct irqaction *old, **old_ptr; | 875 | struct irqaction *old, **old_ptr; |
876 | const char *old_name = NULL; | 876 | const char *old_name = NULL; |
877 | unsigned long flags, thread_mask = 0; | 877 | unsigned long flags, thread_mask = 0; |
878 | int ret, nested, shared = 0; | 878 | int ret, nested, shared = 0; |
879 | cpumask_var_t mask; | 879 | cpumask_var_t mask; |
880 | 880 | ||
881 | if (!desc) | 881 | if (!desc) |
882 | return -EINVAL; | 882 | return -EINVAL; |
883 | 883 | ||
884 | if (desc->irq_data.chip == &no_irq_chip) | 884 | if (desc->irq_data.chip == &no_irq_chip) |
885 | return -ENOSYS; | 885 | return -ENOSYS; |
886 | if (!try_module_get(desc->owner)) | ||
887 | return -ENODEV; | ||
886 | /* | 888 | /* |
887 | * Some drivers like serial.c use request_irq() heavily, | 889 | * Some drivers like serial.c use request_irq() heavily, |
888 | * so we have to be careful not to interfere with a | 890 | * so we have to be careful not to interfere with a |
889 | * running system. | 891 | * running system. |
890 | */ | 892 | */ |
891 | if (new->flags & IRQF_SAMPLE_RANDOM) { | 893 | if (new->flags & IRQF_SAMPLE_RANDOM) { |
892 | /* | 894 | /* |
893 | * This function might sleep, we want to call it first, | 895 | * This function might sleep, we want to call it first, |
894 | * outside of the atomic block. | 896 | * outside of the atomic block. |
895 | * Yes, this might clear the entropy pool if the wrong | 897 | * Yes, this might clear the entropy pool if the wrong |
896 | * driver is attempted to be loaded, without actually | 898 | * driver is attempted to be loaded, without actually |
897 | * installing a new handler, but is this really a problem, | 899 | * installing a new handler, but is this really a problem, |
898 | * only the sysadmin is able to do this. | 900 | * only the sysadmin is able to do this. |
899 | */ | 901 | */ |
900 | rand_initialize_irq(irq); | 902 | rand_initialize_irq(irq); |
901 | } | 903 | } |
902 | 904 | ||
903 | /* | 905 | /* |
904 | * Check whether the interrupt nests into another interrupt | 906 | * Check whether the interrupt nests into another interrupt |
905 | * thread. | 907 | * thread. |
906 | */ | 908 | */ |
907 | nested = irq_settings_is_nested_thread(desc); | 909 | nested = irq_settings_is_nested_thread(desc); |
908 | if (nested) { | 910 | if (nested) { |
909 | if (!new->thread_fn) | 911 | if (!new->thread_fn) { |
910 | return -EINVAL; | 912 | ret = -EINVAL; |
913 | goto out_mput; | ||
914 | } | ||
911 | /* | 915 | /* |
912 | * Replace the primary handler which was provided from | 916 | * Replace the primary handler which was provided from |
913 | * the driver for non nested interrupt handling by the | 917 | * the driver for non nested interrupt handling by the |
914 | * dummy function which warns when called. | 918 | * dummy function which warns when called. |
915 | */ | 919 | */ |
916 | new->handler = irq_nested_primary_handler; | 920 | new->handler = irq_nested_primary_handler; |
917 | } else { | 921 | } else { |
918 | if (irq_settings_can_thread(desc)) | 922 | if (irq_settings_can_thread(desc)) |
919 | irq_setup_forced_threading(new); | 923 | irq_setup_forced_threading(new); |
920 | } | 924 | } |
921 | 925 | ||
922 | /* | 926 | /* |
923 | * Create a handler thread when a thread function is supplied | 927 | * Create a handler thread when a thread function is supplied |
924 | * and the interrupt does not nest into another interrupt | 928 | * and the interrupt does not nest into another interrupt |
925 | * thread. | 929 | * thread. |
926 | */ | 930 | */ |
927 | if (new->thread_fn && !nested) { | 931 | if (new->thread_fn && !nested) { |
928 | struct task_struct *t; | 932 | struct task_struct *t; |
929 | 933 | ||
930 | t = kthread_create(irq_thread, new, "irq/%d-%s", irq, | 934 | t = kthread_create(irq_thread, new, "irq/%d-%s", irq, |
931 | new->name); | 935 | new->name); |
932 | if (IS_ERR(t)) | 936 | if (IS_ERR(t)) { |
933 | return PTR_ERR(t); | 937 | ret = PTR_ERR(t); |
938 | goto out_mput; | ||
939 | } | ||
934 | /* | 940 | /* |
935 | * We keep the reference to the task struct even if | 941 | * We keep the reference to the task struct even if |
936 | * the thread dies to avoid that the interrupt code | 942 | * the thread dies to avoid that the interrupt code |
937 | * references an already freed task_struct. | 943 | * references an already freed task_struct. |
938 | */ | 944 | */ |
939 | get_task_struct(t); | 945 | get_task_struct(t); |
940 | new->thread = t; | 946 | new->thread = t; |
941 | } | 947 | } |
942 | 948 | ||
943 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { | 949 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { |
944 | ret = -ENOMEM; | 950 | ret = -ENOMEM; |
945 | goto out_thread; | 951 | goto out_thread; |
946 | } | 952 | } |
947 | 953 | ||
948 | /* | 954 | /* |
949 | * The following block of code has to be executed atomically | 955 | * The following block of code has to be executed atomically |
950 | */ | 956 | */ |
951 | raw_spin_lock_irqsave(&desc->lock, flags); | 957 | raw_spin_lock_irqsave(&desc->lock, flags); |
952 | old_ptr = &desc->action; | 958 | old_ptr = &desc->action; |
953 | old = *old_ptr; | 959 | old = *old_ptr; |
954 | if (old) { | 960 | if (old) { |
955 | /* | 961 | /* |
956 | * Can't share interrupts unless both agree to and are | 962 | * Can't share interrupts unless both agree to and are |
957 | * the same type (level, edge, polarity). So both flag | 963 | * the same type (level, edge, polarity). So both flag |
958 | * fields must have IRQF_SHARED set and the bits which | 964 | * fields must have IRQF_SHARED set and the bits which |
959 | * set the trigger type must match. Also all must | 965 | * set the trigger type must match. Also all must |
960 | * agree on ONESHOT. | 966 | * agree on ONESHOT. |
961 | */ | 967 | */ |
962 | if (!((old->flags & new->flags) & IRQF_SHARED) || | 968 | if (!((old->flags & new->flags) & IRQF_SHARED) || |
963 | ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) || | 969 | ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) || |
964 | ((old->flags ^ new->flags) & IRQF_ONESHOT)) { | 970 | ((old->flags ^ new->flags) & IRQF_ONESHOT)) { |
965 | old_name = old->name; | 971 | old_name = old->name; |
966 | goto mismatch; | 972 | goto mismatch; |
967 | } | 973 | } |
968 | 974 | ||
969 | /* All handlers must agree on per-cpuness */ | 975 | /* All handlers must agree on per-cpuness */ |
970 | if ((old->flags & IRQF_PERCPU) != | 976 | if ((old->flags & IRQF_PERCPU) != |
971 | (new->flags & IRQF_PERCPU)) | 977 | (new->flags & IRQF_PERCPU)) |
972 | goto mismatch; | 978 | goto mismatch; |
973 | 979 | ||
974 | /* add new interrupt at end of irq queue */ | 980 | /* add new interrupt at end of irq queue */ |
975 | do { | 981 | do { |
976 | thread_mask |= old->thread_mask; | 982 | thread_mask |= old->thread_mask; |
977 | old_ptr = &old->next; | 983 | old_ptr = &old->next; |
978 | old = *old_ptr; | 984 | old = *old_ptr; |
979 | } while (old); | 985 | } while (old); |
980 | shared = 1; | 986 | shared = 1; |
981 | } | 987 | } |
982 | 988 | ||
983 | /* | 989 | /* |
984 | * Setup the thread mask for this irqaction. Unlikely to have | 990 | * Setup the thread mask for this irqaction. Unlikely to have |
985 | * 32 resp 64 irqs sharing one line, but who knows. | 991 | * 32 resp 64 irqs sharing one line, but who knows. |
986 | */ | 992 | */ |
987 | if (new->flags & IRQF_ONESHOT && thread_mask == ~0UL) { | 993 | if (new->flags & IRQF_ONESHOT && thread_mask == ~0UL) { |
988 | ret = -EBUSY; | 994 | ret = -EBUSY; |
989 | goto out_mask; | 995 | goto out_mask; |
990 | } | 996 | } |
991 | new->thread_mask = 1 << ffz(thread_mask); | 997 | new->thread_mask = 1 << ffz(thread_mask); |
992 | 998 | ||
993 | if (!shared) { | 999 | if (!shared) { |
994 | init_waitqueue_head(&desc->wait_for_threads); | 1000 | init_waitqueue_head(&desc->wait_for_threads); |
995 | 1001 | ||
996 | /* Setup the type (level, edge polarity) if configured: */ | 1002 | /* Setup the type (level, edge polarity) if configured: */ |
997 | if (new->flags & IRQF_TRIGGER_MASK) { | 1003 | if (new->flags & IRQF_TRIGGER_MASK) { |
998 | ret = __irq_set_trigger(desc, irq, | 1004 | ret = __irq_set_trigger(desc, irq, |
999 | new->flags & IRQF_TRIGGER_MASK); | 1005 | new->flags & IRQF_TRIGGER_MASK); |
1000 | 1006 | ||
1001 | if (ret) | 1007 | if (ret) |
1002 | goto out_mask; | 1008 | goto out_mask; |
1003 | } | 1009 | } |
1004 | 1010 | ||
1005 | desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ | 1011 | desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ |
1006 | IRQS_ONESHOT | IRQS_WAITING); | 1012 | IRQS_ONESHOT | IRQS_WAITING); |
1007 | irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); | 1013 | irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
1008 | 1014 | ||
1009 | if (new->flags & IRQF_PERCPU) { | 1015 | if (new->flags & IRQF_PERCPU) { |
1010 | irqd_set(&desc->irq_data, IRQD_PER_CPU); | 1016 | irqd_set(&desc->irq_data, IRQD_PER_CPU); |
1011 | irq_settings_set_per_cpu(desc); | 1017 | irq_settings_set_per_cpu(desc); |
1012 | } | 1018 | } |
1013 | 1019 | ||
1014 | if (new->flags & IRQF_ONESHOT) | 1020 | if (new->flags & IRQF_ONESHOT) |
1015 | desc->istate |= IRQS_ONESHOT; | 1021 | desc->istate |= IRQS_ONESHOT; |
1016 | 1022 | ||
1017 | if (irq_settings_can_autoenable(desc)) | 1023 | if (irq_settings_can_autoenable(desc)) |
1018 | irq_startup(desc); | 1024 | irq_startup(desc); |
1019 | else | 1025 | else |
1020 | /* Undo nested disables: */ | 1026 | /* Undo nested disables: */ |
1021 | desc->depth = 1; | 1027 | desc->depth = 1; |
1022 | 1028 | ||
1023 | /* Exclude IRQ from balancing if requested */ | 1029 | /* Exclude IRQ from balancing if requested */ |
1024 | if (new->flags & IRQF_NOBALANCING) { | 1030 | if (new->flags & IRQF_NOBALANCING) { |
1025 | irq_settings_set_no_balancing(desc); | 1031 | irq_settings_set_no_balancing(desc); |
1026 | irqd_set(&desc->irq_data, IRQD_NO_BALANCING); | 1032 | irqd_set(&desc->irq_data, IRQD_NO_BALANCING); |
1027 | } | 1033 | } |
1028 | 1034 | ||
1029 | /* Set default affinity mask once everything is setup */ | 1035 | /* Set default affinity mask once everything is setup */ |
1030 | setup_affinity(irq, desc, mask); | 1036 | setup_affinity(irq, desc, mask); |
1031 | 1037 | ||
1032 | } else if (new->flags & IRQF_TRIGGER_MASK) { | 1038 | } else if (new->flags & IRQF_TRIGGER_MASK) { |
1033 | unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK; | 1039 | unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK; |
1034 | unsigned int omsk = irq_settings_get_trigger_mask(desc); | 1040 | unsigned int omsk = irq_settings_get_trigger_mask(desc); |
1035 | 1041 | ||
1036 | if (nmsk != omsk) | 1042 | if (nmsk != omsk) |
1037 | /* hope the handler works with current trigger mode */ | 1043 | /* hope the handler works with current trigger mode */ |
1038 | pr_warning("IRQ %d uses trigger mode %u; requested %u\n", | 1044 | pr_warning("IRQ %d uses trigger mode %u; requested %u\n", |
1039 | irq, nmsk, omsk); | 1045 | irq, nmsk, omsk); |
1040 | } | 1046 | } |
1041 | 1047 | ||
1042 | new->irq = irq; | 1048 | new->irq = irq; |
1043 | *old_ptr = new; | 1049 | *old_ptr = new; |
1044 | 1050 | ||
1045 | /* Reset broken irq detection when installing new handler */ | 1051 | /* Reset broken irq detection when installing new handler */ |
1046 | desc->irq_count = 0; | 1052 | desc->irq_count = 0; |
1047 | desc->irqs_unhandled = 0; | 1053 | desc->irqs_unhandled = 0; |
1048 | 1054 | ||
1049 | /* | 1055 | /* |
1050 | * Check whether we disabled the irq via the spurious handler | 1056 | * Check whether we disabled the irq via the spurious handler |
1051 | * before. Reenable it and give it another chance. | 1057 | * before. Reenable it and give it another chance. |
1052 | */ | 1058 | */ |
1053 | if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { | 1059 | if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { |
1054 | desc->istate &= ~IRQS_SPURIOUS_DISABLED; | 1060 | desc->istate &= ~IRQS_SPURIOUS_DISABLED; |
1055 | __enable_irq(desc, irq, false); | 1061 | __enable_irq(desc, irq, false); |
1056 | } | 1062 | } |
1057 | 1063 | ||
1058 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 1064 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
1059 | 1065 | ||
1060 | /* | 1066 | /* |
1061 | * Strictly no need to wake it up, but hung_task complains | 1067 | * Strictly no need to wake it up, but hung_task complains |
1062 | * when no hard interrupt wakes the thread up. | 1068 | * when no hard interrupt wakes the thread up. |
1063 | */ | 1069 | */ |
1064 | if (new->thread) | 1070 | if (new->thread) |
1065 | wake_up_process(new->thread); | 1071 | wake_up_process(new->thread); |
1066 | 1072 | ||
1067 | register_irq_proc(irq, desc); | 1073 | register_irq_proc(irq, desc); |
1068 | new->dir = NULL; | 1074 | new->dir = NULL; |
1069 | register_handler_proc(irq, new); | 1075 | register_handler_proc(irq, new); |
1070 | free_cpumask_var(mask); | 1076 | free_cpumask_var(mask); |
1071 | 1077 | ||
1072 | return 0; | 1078 | return 0; |
1073 | 1079 | ||
1074 | mismatch: | 1080 | mismatch: |
1075 | #ifdef CONFIG_DEBUG_SHIRQ | 1081 | #ifdef CONFIG_DEBUG_SHIRQ |
1076 | if (!(new->flags & IRQF_PROBE_SHARED)) { | 1082 | if (!(new->flags & IRQF_PROBE_SHARED)) { |
1077 | printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq); | 1083 | printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq); |
1078 | if (old_name) | 1084 | if (old_name) |
1079 | printk(KERN_ERR "current handler: %s\n", old_name); | 1085 | printk(KERN_ERR "current handler: %s\n", old_name); |
1080 | dump_stack(); | 1086 | dump_stack(); |
1081 | } | 1087 | } |
1082 | #endif | 1088 | #endif |
1083 | ret = -EBUSY; | 1089 | ret = -EBUSY; |
1084 | 1090 | ||
1085 | out_mask: | 1091 | out_mask: |
1086 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 1092 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
1087 | free_cpumask_var(mask); | 1093 | free_cpumask_var(mask); |
1088 | 1094 | ||
1089 | out_thread: | 1095 | out_thread: |
1090 | if (new->thread) { | 1096 | if (new->thread) { |
1091 | struct task_struct *t = new->thread; | 1097 | struct task_struct *t = new->thread; |
1092 | 1098 | ||
1093 | new->thread = NULL; | 1099 | new->thread = NULL; |
1094 | if (likely(!test_bit(IRQTF_DIED, &new->thread_flags))) | 1100 | if (likely(!test_bit(IRQTF_DIED, &new->thread_flags))) |
1095 | kthread_stop(t); | 1101 | kthread_stop(t); |
1096 | put_task_struct(t); | 1102 | put_task_struct(t); |
1097 | } | 1103 | } |
1104 | out_mput: | ||
1105 | module_put(desc->owner); | ||
1098 | return ret; | 1106 | return ret; |
1099 | } | 1107 | } |
1100 | 1108 | ||
1101 | /** | 1109 | /** |
1102 | * setup_irq - setup an interrupt | 1110 | * setup_irq - setup an interrupt |
1103 | * @irq: Interrupt line to setup | 1111 | * @irq: Interrupt line to setup |
1104 | * @act: irqaction for the interrupt | 1112 | * @act: irqaction for the interrupt |
1105 | * | 1113 | * |
1106 | * Used to statically setup interrupts in the early boot process. | 1114 | * Used to statically setup interrupts in the early boot process. |
1107 | */ | 1115 | */ |
1108 | int setup_irq(unsigned int irq, struct irqaction *act) | 1116 | int setup_irq(unsigned int irq, struct irqaction *act) |
1109 | { | 1117 | { |
1110 | int retval; | 1118 | int retval; |
1111 | struct irq_desc *desc = irq_to_desc(irq); | 1119 | struct irq_desc *desc = irq_to_desc(irq); |
1112 | 1120 | ||
1113 | chip_bus_lock(desc); | 1121 | chip_bus_lock(desc); |
1114 | retval = __setup_irq(irq, desc, act); | 1122 | retval = __setup_irq(irq, desc, act); |
1115 | chip_bus_sync_unlock(desc); | 1123 | chip_bus_sync_unlock(desc); |
1116 | 1124 | ||
1117 | return retval; | 1125 | return retval; |
1118 | } | 1126 | } |
1119 | EXPORT_SYMBOL_GPL(setup_irq); | 1127 | EXPORT_SYMBOL_GPL(setup_irq); |
1120 | 1128 | ||
1121 | /* | 1129 | /* |
1122 | * Internal function to unregister an irqaction - used to free | 1130 | * Internal function to unregister an irqaction - used to free |
1123 | * regular and special interrupts that are part of the architecture. | 1131 | * regular and special interrupts that are part of the architecture. |
1124 | */ | 1132 | */ |
1125 | static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | 1133 | static struct irqaction *__free_irq(unsigned int irq, void *dev_id) |
1126 | { | 1134 | { |
1127 | struct irq_desc *desc = irq_to_desc(irq); | 1135 | struct irq_desc *desc = irq_to_desc(irq); |
1128 | struct irqaction *action, **action_ptr; | 1136 | struct irqaction *action, **action_ptr; |
1129 | unsigned long flags; | 1137 | unsigned long flags; |
1130 | 1138 | ||
1131 | WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); | 1139 | WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); |
1132 | 1140 | ||
1133 | if (!desc) | 1141 | if (!desc) |
1134 | return NULL; | 1142 | return NULL; |
1135 | 1143 | ||
1136 | raw_spin_lock_irqsave(&desc->lock, flags); | 1144 | raw_spin_lock_irqsave(&desc->lock, flags); |
1137 | 1145 | ||
1138 | /* | 1146 | /* |
1139 | * There can be multiple actions per IRQ descriptor, find the right | 1147 | * There can be multiple actions per IRQ descriptor, find the right |
1140 | * one based on the dev_id: | 1148 | * one based on the dev_id: |
1141 | */ | 1149 | */ |
1142 | action_ptr = &desc->action; | 1150 | action_ptr = &desc->action; |
1143 | for (;;) { | 1151 | for (;;) { |
1144 | action = *action_ptr; | 1152 | action = *action_ptr; |
1145 | 1153 | ||
1146 | if (!action) { | 1154 | if (!action) { |
1147 | WARN(1, "Trying to free already-free IRQ %d\n", irq); | 1155 | WARN(1, "Trying to free already-free IRQ %d\n", irq); |
1148 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 1156 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
1149 | 1157 | ||
1150 | return NULL; | 1158 | return NULL; |
1151 | } | 1159 | } |
1152 | 1160 | ||
1153 | if (action->dev_id == dev_id) | 1161 | if (action->dev_id == dev_id) |
1154 | break; | 1162 | break; |
1155 | action_ptr = &action->next; | 1163 | action_ptr = &action->next; |
1156 | } | 1164 | } |
1157 | 1165 | ||
1158 | /* Found it - now remove it from the list of entries: */ | 1166 | /* Found it - now remove it from the list of entries: */ |
1159 | *action_ptr = action->next; | 1167 | *action_ptr = action->next; |
1160 | 1168 | ||
1161 | /* Currently used only by UML, might disappear one day: */ | 1169 | /* Currently used only by UML, might disappear one day: */ |
1162 | #ifdef CONFIG_IRQ_RELEASE_METHOD | 1170 | #ifdef CONFIG_IRQ_RELEASE_METHOD |
1163 | if (desc->irq_data.chip->release) | 1171 | if (desc->irq_data.chip->release) |
1164 | desc->irq_data.chip->release(irq, dev_id); | 1172 | desc->irq_data.chip->release(irq, dev_id); |
1165 | #endif | 1173 | #endif |
1166 | 1174 | ||
1167 | /* If this was the last handler, shut down the IRQ line: */ | 1175 | /* If this was the last handler, shut down the IRQ line: */ |
1168 | if (!desc->action) | 1176 | if (!desc->action) |
1169 | irq_shutdown(desc); | 1177 | irq_shutdown(desc); |
1170 | 1178 | ||
1171 | #ifdef CONFIG_SMP | 1179 | #ifdef CONFIG_SMP |
1172 | /* make sure affinity_hint is cleaned up */ | 1180 | /* make sure affinity_hint is cleaned up */ |
1173 | if (WARN_ON_ONCE(desc->affinity_hint)) | 1181 | if (WARN_ON_ONCE(desc->affinity_hint)) |
1174 | desc->affinity_hint = NULL; | 1182 | desc->affinity_hint = NULL; |
1175 | #endif | 1183 | #endif |
1176 | 1184 | ||
1177 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 1185 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
1178 | 1186 | ||
1179 | unregister_handler_proc(irq, action); | 1187 | unregister_handler_proc(irq, action); |
1180 | 1188 | ||
1181 | /* Make sure it's not being used on another CPU: */ | 1189 | /* Make sure it's not being used on another CPU: */ |
1182 | synchronize_irq(irq); | 1190 | synchronize_irq(irq); |
1183 | 1191 | ||
1184 | #ifdef CONFIG_DEBUG_SHIRQ | 1192 | #ifdef CONFIG_DEBUG_SHIRQ |
1185 | /* | 1193 | /* |
1186 | * It's a shared IRQ -- the driver ought to be prepared for an IRQ | 1194 | * It's a shared IRQ -- the driver ought to be prepared for an IRQ |
1187 | * event to happen even now it's being freed, so let's make sure that | 1195 | * event to happen even now it's being freed, so let's make sure that |
1188 | * is so by doing an extra call to the handler .... | 1196 | * is so by doing an extra call to the handler .... |
1189 | * | 1197 | * |
1190 | * ( We do this after actually deregistering it, to make sure that a | 1198 | * ( We do this after actually deregistering it, to make sure that a |
1191 | * 'real' IRQ doesn't run in * parallel with our fake. ) | 1199 | * 'real' IRQ doesn't run in * parallel with our fake. ) |
1192 | */ | 1200 | */ |
1193 | if (action->flags & IRQF_SHARED) { | 1201 | if (action->flags & IRQF_SHARED) { |
1194 | local_irq_save(flags); | 1202 | local_irq_save(flags); |
1195 | action->handler(irq, dev_id); | 1203 | action->handler(irq, dev_id); |
1196 | local_irq_restore(flags); | 1204 | local_irq_restore(flags); |
1197 | } | 1205 | } |
1198 | #endif | 1206 | #endif |
1199 | 1207 | ||
1200 | if (action->thread) { | 1208 | if (action->thread) { |
1201 | if (!test_bit(IRQTF_DIED, &action->thread_flags)) | 1209 | if (!test_bit(IRQTF_DIED, &action->thread_flags)) |
1202 | kthread_stop(action->thread); | 1210 | kthread_stop(action->thread); |
1203 | put_task_struct(action->thread); | 1211 | put_task_struct(action->thread); |
1204 | } | 1212 | } |
1205 | 1213 | ||
1214 | module_put(desc->owner); | ||
1206 | return action; | 1215 | return action; |
1207 | } | 1216 | } |
1208 | 1217 | ||
1209 | /** | 1218 | /** |
1210 | * remove_irq - free an interrupt | 1219 | * remove_irq - free an interrupt |
1211 | * @irq: Interrupt line to free | 1220 | * @irq: Interrupt line to free |
1212 | * @act: irqaction for the interrupt | 1221 | * @act: irqaction for the interrupt |
1213 | * | 1222 | * |
1214 | * Used to remove interrupts statically setup by the early boot process. | 1223 | * Used to remove interrupts statically setup by the early boot process. |
1215 | */ | 1224 | */ |
1216 | void remove_irq(unsigned int irq, struct irqaction *act) | 1225 | void remove_irq(unsigned int irq, struct irqaction *act) |
1217 | { | 1226 | { |
1218 | __free_irq(irq, act->dev_id); | 1227 | __free_irq(irq, act->dev_id); |
1219 | } | 1228 | } |
1220 | EXPORT_SYMBOL_GPL(remove_irq); | 1229 | EXPORT_SYMBOL_GPL(remove_irq); |
1221 | 1230 | ||
1222 | /** | 1231 | /** |
1223 | * free_irq - free an interrupt allocated with request_irq | 1232 | * free_irq - free an interrupt allocated with request_irq |
1224 | * @irq: Interrupt line to free | 1233 | * @irq: Interrupt line to free |
1225 | * @dev_id: Device identity to free | 1234 | * @dev_id: Device identity to free |
1226 | * | 1235 | * |
1227 | * Remove an interrupt handler. The handler is removed and if the | 1236 | * Remove an interrupt handler. The handler is removed and if the |
1228 | * interrupt line is no longer in use by any driver it is disabled. | 1237 | * interrupt line is no longer in use by any driver it is disabled. |
1229 | * On a shared IRQ the caller must ensure the interrupt is disabled | 1238 | * On a shared IRQ the caller must ensure the interrupt is disabled |
1230 | * on the card it drives before calling this function. The function | 1239 | * on the card it drives before calling this function. The function |
1231 | * does not return until any executing interrupts for this IRQ | 1240 | * does not return until any executing interrupts for this IRQ |
1232 | * have completed. | 1241 | * have completed. |
1233 | * | 1242 | * |
1234 | * This function must not be called from interrupt context. | 1243 | * This function must not be called from interrupt context. |
1235 | */ | 1244 | */ |
1236 | void free_irq(unsigned int irq, void *dev_id) | 1245 | void free_irq(unsigned int irq, void *dev_id) |
1237 | { | 1246 | { |
1238 | struct irq_desc *desc = irq_to_desc(irq); | 1247 | struct irq_desc *desc = irq_to_desc(irq); |
1239 | 1248 | ||
1240 | if (!desc) | 1249 | if (!desc) |
1241 | return; | 1250 | return; |
1242 | 1251 | ||
1243 | #ifdef CONFIG_SMP | 1252 | #ifdef CONFIG_SMP |
1244 | if (WARN_ON(desc->affinity_notify)) | 1253 | if (WARN_ON(desc->affinity_notify)) |
1245 | desc->affinity_notify = NULL; | 1254 | desc->affinity_notify = NULL; |
1246 | #endif | 1255 | #endif |
1247 | 1256 | ||
1248 | chip_bus_lock(desc); | 1257 | chip_bus_lock(desc); |
1249 | kfree(__free_irq(irq, dev_id)); | 1258 | kfree(__free_irq(irq, dev_id)); |
1250 | chip_bus_sync_unlock(desc); | 1259 | chip_bus_sync_unlock(desc); |
1251 | } | 1260 | } |
1252 | EXPORT_SYMBOL(free_irq); | 1261 | EXPORT_SYMBOL(free_irq); |
1253 | 1262 | ||
1254 | /** | 1263 | /** |
1255 | * request_threaded_irq - allocate an interrupt line | 1264 | * request_threaded_irq - allocate an interrupt line |
1256 | * @irq: Interrupt line to allocate | 1265 | * @irq: Interrupt line to allocate |
1257 | * @handler: Function to be called when the IRQ occurs. | 1266 | * @handler: Function to be called when the IRQ occurs. |
1258 | * Primary handler for threaded interrupts | 1267 | * Primary handler for threaded interrupts |
1259 | * If NULL and thread_fn != NULL the default | 1268 | * If NULL and thread_fn != NULL the default |
1260 | * primary handler is installed | 1269 | * primary handler is installed |
1261 | * @thread_fn: Function called from the irq handler thread | 1270 | * @thread_fn: Function called from the irq handler thread |
1262 | * If NULL, no irq thread is created | 1271 | * If NULL, no irq thread is created |
1263 | * @irqflags: Interrupt type flags | 1272 | * @irqflags: Interrupt type flags |
1264 | * @devname: An ascii name for the claiming device | 1273 | * @devname: An ascii name for the claiming device |
1265 | * @dev_id: A cookie passed back to the handler function | 1274 | * @dev_id: A cookie passed back to the handler function |
1266 | * | 1275 | * |
1267 | * This call allocates interrupt resources and enables the | 1276 | * This call allocates interrupt resources and enables the |
1268 | * interrupt line and IRQ handling. From the point this | 1277 | * interrupt line and IRQ handling. From the point this |
1269 | * call is made your handler function may be invoked. Since | 1278 | * call is made your handler function may be invoked. Since |
1270 | * your handler function must clear any interrupt the board | 1279 | * your handler function must clear any interrupt the board |
1271 | * raises, you must take care both to initialise your hardware | 1280 | * raises, you must take care both to initialise your hardware |
1272 | * and to set up the interrupt handler in the right order. | 1281 | * and to set up the interrupt handler in the right order. |
1273 | * | 1282 | * |
1274 | * If you want to set up a threaded irq handler for your device | 1283 | * If you want to set up a threaded irq handler for your device |
1275 | * then you need to supply @handler and @thread_fn. @handler ist | 1284 | * then you need to supply @handler and @thread_fn. @handler ist |
1276 | * still called in hard interrupt context and has to check | 1285 | * still called in hard interrupt context and has to check |
1277 | * whether the interrupt originates from the device. If yes it | 1286 | * whether the interrupt originates from the device. If yes it |
1278 | * needs to disable the interrupt on the device and return | 1287 | * needs to disable the interrupt on the device and return |
1279 | * IRQ_WAKE_THREAD which will wake up the handler thread and run | 1288 | * IRQ_WAKE_THREAD which will wake up the handler thread and run |
1280 | * @thread_fn. This split handler design is necessary to support | 1289 | * @thread_fn. This split handler design is necessary to support |
1281 | * shared interrupts. | 1290 | * shared interrupts. |
1282 | * | 1291 | * |
1283 | * Dev_id must be globally unique. Normally the address of the | 1292 | * Dev_id must be globally unique. Normally the address of the |
1284 | * device data structure is used as the cookie. Since the handler | 1293 | * device data structure is used as the cookie. Since the handler |
1285 | * receives this value it makes sense to use it. | 1294 | * receives this value it makes sense to use it. |
1286 | * | 1295 | * |
1287 | * If your interrupt is shared you must pass a non NULL dev_id | 1296 | * If your interrupt is shared you must pass a non NULL dev_id |
1288 | * as this is required when freeing the interrupt. | 1297 | * as this is required when freeing the interrupt. |
1289 | * | 1298 | * |
1290 | * Flags: | 1299 | * Flags: |
1291 | * | 1300 | * |
1292 | * IRQF_SHARED Interrupt is shared | 1301 | * IRQF_SHARED Interrupt is shared |
1293 | * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy | 1302 | * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy |
1294 | * IRQF_TRIGGER_* Specify active edge(s) or level | 1303 | * IRQF_TRIGGER_* Specify active edge(s) or level |
1295 | * | 1304 | * |
1296 | */ | 1305 | */ |
1297 | int request_threaded_irq(unsigned int irq, irq_handler_t handler, | 1306 | int request_threaded_irq(unsigned int irq, irq_handler_t handler, |
1298 | irq_handler_t thread_fn, unsigned long irqflags, | 1307 | irq_handler_t thread_fn, unsigned long irqflags, |
1299 | const char *devname, void *dev_id) | 1308 | const char *devname, void *dev_id) |
1300 | { | 1309 | { |
1301 | struct irqaction *action; | 1310 | struct irqaction *action; |
1302 | struct irq_desc *desc; | 1311 | struct irq_desc *desc; |
1303 | int retval; | 1312 | int retval; |
1304 | 1313 | ||
1305 | /* | 1314 | /* |
1306 | * Sanity-check: shared interrupts must pass in a real dev-ID, | 1315 | * Sanity-check: shared interrupts must pass in a real dev-ID, |
1307 | * otherwise we'll have trouble later trying to figure out | 1316 | * otherwise we'll have trouble later trying to figure out |
1308 | * which interrupt is which (messes up the interrupt freeing | 1317 | * which interrupt is which (messes up the interrupt freeing |
1309 | * logic etc). | 1318 | * logic etc). |
1310 | */ | 1319 | */ |
1311 | if ((irqflags & IRQF_SHARED) && !dev_id) | 1320 | if ((irqflags & IRQF_SHARED) && !dev_id) |
1312 | return -EINVAL; | 1321 | return -EINVAL; |
1313 | 1322 | ||
1314 | desc = irq_to_desc(irq); | 1323 | desc = irq_to_desc(irq); |
1315 | if (!desc) | 1324 | if (!desc) |
1316 | return -EINVAL; | 1325 | return -EINVAL; |
1317 | 1326 | ||
1318 | if (!irq_settings_can_request(desc)) | 1327 | if (!irq_settings_can_request(desc)) |
1319 | return -EINVAL; | 1328 | return -EINVAL; |
1320 | 1329 | ||
1321 | if (!handler) { | 1330 | if (!handler) { |
1322 | if (!thread_fn) | 1331 | if (!thread_fn) |
1323 | return -EINVAL; | 1332 | return -EINVAL; |
1324 | handler = irq_default_primary_handler; | 1333 | handler = irq_default_primary_handler; |
1334 | irqflags |= IRQF_ONESHOT; | ||
1325 | } | 1335 | } |
1326 | 1336 | ||
1327 | action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); | 1337 | action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); |
1328 | if (!action) | 1338 | if (!action) |
1329 | return -ENOMEM; | 1339 | return -ENOMEM; |
1330 | 1340 | ||
1331 | action->handler = handler; | 1341 | action->handler = handler; |
1332 | action->thread_fn = thread_fn; | 1342 | action->thread_fn = thread_fn; |
1333 | action->flags = irqflags; | 1343 | action->flags = irqflags; |
1334 | action->name = devname; | 1344 | action->name = devname; |
1335 | action->dev_id = dev_id; | 1345 | action->dev_id = dev_id; |
1336 | 1346 | ||
1337 | chip_bus_lock(desc); | 1347 | chip_bus_lock(desc); |
1338 | retval = __setup_irq(irq, desc, action); | 1348 | retval = __setup_irq(irq, desc, action); |
1339 | chip_bus_sync_unlock(desc); | 1349 | chip_bus_sync_unlock(desc); |
1340 | 1350 | ||
1341 | if (retval) | 1351 | if (retval) |
1342 | kfree(action); | 1352 | kfree(action); |
1343 | 1353 | ||
1344 | #ifdef CONFIG_DEBUG_SHIRQ_FIXME | 1354 | #ifdef CONFIG_DEBUG_SHIRQ_FIXME |
1345 | if (!retval && (irqflags & IRQF_SHARED)) { | 1355 | if (!retval && (irqflags & IRQF_SHARED)) { |
1346 | /* | 1356 | /* |
1347 | * It's a shared IRQ -- the driver ought to be prepared for it | 1357 | * It's a shared IRQ -- the driver ought to be prepared for it |
1348 | * to happen immediately, so let's make sure.... | 1358 | * to happen immediately, so let's make sure.... |
1349 | * We disable the irq to make sure that a 'real' IRQ doesn't | 1359 | * We disable the irq to make sure that a 'real' IRQ doesn't |
1350 | * run in parallel with our fake. | 1360 | * run in parallel with our fake. |
1351 | */ | 1361 | */ |
1352 | unsigned long flags; | 1362 | unsigned long flags; |
1353 | 1363 | ||
1354 | disable_irq(irq); | 1364 | disable_irq(irq); |
1355 | local_irq_save(flags); | 1365 | local_irq_save(flags); |
1356 | 1366 | ||
1357 | handler(irq, dev_id); | 1367 | handler(irq, dev_id); |
1358 | 1368 | ||
1359 | local_irq_restore(flags); | 1369 | local_irq_restore(flags); |
1360 | enable_irq(irq); | 1370 | enable_irq(irq); |
1361 | } | 1371 | } |
1362 | #endif | 1372 | #endif |
1363 | return retval; | 1373 | return retval; |
1364 | } | 1374 | } |
1365 | EXPORT_SYMBOL(request_threaded_irq); | 1375 | EXPORT_SYMBOL(request_threaded_irq); |
1366 | 1376 | ||
1367 | /** | 1377 | /** |
1368 | * request_any_context_irq - allocate an interrupt line | 1378 | * request_any_context_irq - allocate an interrupt line |
1369 | * @irq: Interrupt line to allocate | 1379 | * @irq: Interrupt line to allocate |
1370 | * @handler: Function to be called when the IRQ occurs. | 1380 | * @handler: Function to be called when the IRQ occurs. |
1371 | * Threaded handler for threaded interrupts. | 1381 | * Threaded handler for threaded interrupts. |
1372 | * @flags: Interrupt type flags | 1382 | * @flags: Interrupt type flags |
1373 | * @name: An ascii name for the claiming device | 1383 | * @name: An ascii name for the claiming device |
1374 | * @dev_id: A cookie passed back to the handler function | 1384 | * @dev_id: A cookie passed back to the handler function |
1375 | * | 1385 | * |
1376 | * This call allocates interrupt resources and enables the | 1386 | * This call allocates interrupt resources and enables the |
1377 | * interrupt line and IRQ handling. It selects either a | 1387 | * interrupt line and IRQ handling. It selects either a |
1378 | * hardirq or threaded handling method depending on the | 1388 | * hardirq or threaded handling method depending on the |
1379 | * context. | 1389 | * context. |
1380 | * | 1390 | * |
1381 | * On failure, it returns a negative value. On success, | 1391 | * On failure, it returns a negative value. On success, |
1382 | * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED. | 1392 | * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED. |
1383 | */ | 1393 | */ |
1384 | int request_any_context_irq(unsigned int irq, irq_handler_t handler, | 1394 | int request_any_context_irq(unsigned int irq, irq_handler_t handler, |
1385 | unsigned long flags, const char *name, void *dev_id) | 1395 | unsigned long flags, const char *name, void *dev_id) |
1386 | { | 1396 | { |
1387 | struct irq_desc *desc = irq_to_desc(irq); | 1397 | struct irq_desc *desc = irq_to_desc(irq); |
1388 | int ret; | 1398 | int ret; |
1389 | 1399 | ||
1390 | if (!desc) | 1400 | if (!desc) |
1391 | return -EINVAL; | 1401 | return -EINVAL; |
1392 | 1402 | ||
1393 | if (irq_settings_is_nested_thread(desc)) { | 1403 | if (irq_settings_is_nested_thread(desc)) { |
1394 | ret = request_threaded_irq(irq, NULL, handler, | 1404 | ret = request_threaded_irq(irq, NULL, handler, |
1395 | flags, name, dev_id); | 1405 | flags, name, dev_id); |
1396 | return !ret ? IRQC_IS_NESTED : ret; | 1406 | return !ret ? IRQC_IS_NESTED : ret; |
1397 | } | 1407 | } |
1398 | 1408 | ||
1399 | ret = request_irq(irq, handler, flags, name, dev_id); | 1409 | ret = request_irq(irq, handler, flags, name, dev_id); |
1400 | return !ret ? IRQC_IS_HARDIRQ : ret; | 1410 | return !ret ? IRQC_IS_HARDIRQ : ret; |
1401 | } | 1411 | } |
1402 | EXPORT_SYMBOL_GPL(request_any_context_irq); | 1412 | EXPORT_SYMBOL_GPL(request_any_context_irq); |
1403 | 1413 |