Blame view
include/linux/irq.h
11.8 KB
06fcb0c6f
|
1 2 |
#ifndef _LINUX_IRQ_H #define _LINUX_IRQ_H |
1da177e4c
|
3 4 5 6 7 8 9 10 |
/* * Please do not include this file in generic code. There is currently * no requirement for any architecture to implement anything held * within this file. * * Thanks. --rmk */ |
23f9b317e
|
11 |
#include <linux/smp.h> |
1da177e4c
|
12 |
|
06fcb0c6f
|
13 |
#ifndef CONFIG_S390 |
1da177e4c
|
14 15 16 17 18 |
#include <linux/linkage.h> #include <linux/cache.h> #include <linux/spinlock.h> #include <linux/cpumask.h> |
908dcecda
|
19 |
#include <linux/irqreturn.h> |
77904fd64
|
20 |
#include <linux/errno.h> |
1da177e4c
|
21 22 23 |
#include <asm/irq.h> #include <asm/ptrace.h> |
7d12e780e
|
24 |
#include <asm/irq_regs.h> |
1da177e4c
|
25 |
|
57a58a943
|
26 27 |
struct irq_desc; typedef void fastcall (*irq_flow_handler_t)(unsigned int irq, |
7d12e780e
|
28 |
struct irq_desc *desc); |
57a58a943
|
29 |
|
1da177e4c
|
30 31 |
/* * IRQ line status. |
6e2136161
|
32 |
* |
950f4427c
|
33 |
* Bits 0-7 are reserved for the IRQF_* bits in linux/interrupt.h |
6e2136161
|
34 35 |
* * IRQ types |
1da177e4c
|
36 |
*/ |
6e2136161
|
37 38 39 40 41 42 43 44 45 46 |
#define IRQ_TYPE_NONE 0x00000000 /* Default, unspecified type */ #define IRQ_TYPE_EDGE_RISING 0x00000001 /* Edge rising type */ #define IRQ_TYPE_EDGE_FALLING 0x00000002 /* Edge falling type */ #define IRQ_TYPE_EDGE_BOTH (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING) #define IRQ_TYPE_LEVEL_HIGH 0x00000004 /* Level high type */ #define IRQ_TYPE_LEVEL_LOW 0x00000008 /* Level low type */ #define IRQ_TYPE_SENSE_MASK 0x0000000f /* Mask of the above */ #define IRQ_TYPE_PROBE 0x00000010 /* Probing in progress */ /* Internal flags */ |
950f4427c
|
47 48 49 50 51 52 53 54 55 56 57 58 |
#define IRQ_INPROGRESS 0x00000100 /* IRQ handler active - do not enter! */ #define IRQ_DISABLED 0x00000200 /* IRQ disabled - do not enter! */ #define IRQ_PENDING 0x00000400 /* IRQ pending - replay on enable */ #define IRQ_REPLAY 0x00000800 /* IRQ has been replayed but not acked yet */ #define IRQ_AUTODETECT 0x00001000 /* IRQ is being autodetected */ #define IRQ_WAITING 0x00002000 /* IRQ not yet seen - for autodetection */ #define IRQ_LEVEL 0x00004000 /* IRQ level triggered */ #define IRQ_MASKED 0x00008000 /* IRQ masked - shouldn't be seen again */ #define IRQ_PER_CPU 0x00010000 /* IRQ is per CPU */ #define IRQ_NOPROBE 0x00020000 /* IRQ is not valid for probing */ #define IRQ_NOREQUEST 0x00040000 /* IRQ cannot be requested */ #define IRQ_NOAUTOEN 0x00080000 /* IRQ will not be enabled on request irq */ |
d7e25f339
|
59 60 61 |
#define IRQ_WAKEUP 0x00100000 /* IRQ triggers system wakeup */ #define IRQ_MOVE_PENDING 0x00200000 /* need to re-target IRQ destination */ #define IRQ_NO_BALANCING 0x00400000 /* IRQ is excluded from balancing */ |
950f4427c
|
62 |
|
0d7012a96
|
63 |
#ifdef CONFIG_IRQ_PER_CPU |
f26fdd599
|
64 |
# define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) |
950f4427c
|
65 |
# define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) |
f26fdd599
|
66 67 |
#else # define CHECK_IRQ_PER_CPU(var) 0 |
950f4427c
|
68 |
# define IRQ_NO_BALANCING_MASK IRQ_NO_BALANCING |
f26fdd599
|
69 |
#endif |
1da177e4c
|
70 |
|
6a6de9ef5
|
71 |
struct proc_dir_entry; |
5b912c108
|
72 |
struct msi_desc; |
6a6de9ef5
|
73 |
|
8fee5c361
|
74 |
/** |
6a6de9ef5
|
75 |
* struct irq_chip - hardware interrupt chip descriptor |
8fee5c361
|
76 77 78 79 80 81 |
* * @name: name for /proc/interrupts * @startup: start up the interrupt (defaults to ->enable if NULL) * @shutdown: shut down the interrupt (defaults to ->disable if NULL) * @enable: enable the interrupt (defaults to chip->unmask if NULL) * @disable: disable the interrupt (defaults to chip->mask if NULL) |
8fee5c361
|
82 83 84 85 |
* @ack: start of a new interrupt * @mask: mask an interrupt source * @mask_ack: ack and mask an interrupt source * @unmask: unmask an interrupt source |
47c2a3aa4
|
86 87 |
* @eoi: end of interrupt - chip level * @end: end of interrupt - flow level |
8fee5c361
|
88 89 90 91 92 93 |
* @set_affinity: set the CPU affinity on SMP machines * @retrigger: resend an IRQ to the CPU * @set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ * @set_wake: enable/disable power-management wake-on of an IRQ * * @release: release function solely used by UML |
6a6de9ef5
|
94 |
* @typename: obsoleted by name, kept as migration helper |
1da177e4c
|
95 |
*/ |
6a6de9ef5
|
96 97 |
struct irq_chip { const char *name; |
71d218b75
|
98 99 100 101 |
unsigned int (*startup)(unsigned int irq); void (*shutdown)(unsigned int irq); void (*enable)(unsigned int irq); void (*disable)(unsigned int irq); |
6a6de9ef5
|
102 |
|
71d218b75
|
103 |
void (*ack)(unsigned int irq); |
6a6de9ef5
|
104 105 106 |
void (*mask)(unsigned int irq); void (*mask_ack)(unsigned int irq); void (*unmask)(unsigned int irq); |
47c2a3aa4
|
107 |
void (*eoi)(unsigned int irq); |
6a6de9ef5
|
108 |
|
71d218b75
|
109 110 |
void (*end)(unsigned int irq); void (*set_affinity)(unsigned int irq, cpumask_t dest); |
c0ad90a32
|
111 |
int (*retrigger)(unsigned int irq); |
6a6de9ef5
|
112 113 |
int (*set_type)(unsigned int irq, unsigned int flow_type); int (*set_wake)(unsigned int irq, unsigned int on); |
c0ad90a32
|
114 |
|
b77d6adc9
|
115 116 |
/* Currently used only by UML, might disappear one day.*/ #ifdef CONFIG_IRQ_RELEASE_METHOD |
71d218b75
|
117 |
void (*release)(unsigned int irq, void *dev_id); |
b77d6adc9
|
118 |
#endif |
6a6de9ef5
|
119 120 121 122 123 |
/* * For compatibility, ->typename is copied into ->name. * Will disappear. */ const char *typename; |
1da177e4c
|
124 |
}; |
8fee5c361
|
125 126 127 |
/** * struct irq_desc - interrupt descriptor * |
6a6de9ef5
|
128 129 |
* @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()] * @chip: low level interrupt hardware access |
472900b8b
|
130 |
* @msi_desc: MSI descriptor |
6a6de9ef5
|
131 132 133 |
* @handler_data: per-IRQ data for the irq_chip methods * @chip_data: platform-specific per-chip private data for the chip * methods, to allow shared chip implementations |
8fee5c361
|
134 135 136 |
* @action: the irq action chain * @status: status information * @depth: disable-depth, for nested irq_disable() calls |
15a647eba
|
137 |
* @wake_depth: enable depth, for multiple set_irq_wake() callers |
8fee5c361
|
138 139 140 141 |
* @irq_count: stats field to detect stalled irqs * @irqs_unhandled: stats field for spurious unhandled interrupts * @lock: locking for SMP * @affinity: IRQ affinity on SMP |
6a6de9ef5
|
142 |
* @cpu: cpu index useful for balancing |
8fee5c361
|
143 |
* @pending_mask: pending rebalanced interrupts |
8fee5c361
|
144 145 |
* @dir: /proc/irq/ procfs entry * @affinity_entry: /proc/irq/smp_affinity procfs entry on SMP |
a460e745e
|
146 |
* @name: flow handler name for /proc/interrupts output |
1da177e4c
|
147 |
*/ |
34ffdb723
|
148 |
struct irq_desc { |
57a58a943
|
149 |
irq_flow_handler_t handle_irq; |
6a6de9ef5
|
150 |
struct irq_chip *chip; |
5b912c108
|
151 |
struct msi_desc *msi_desc; |
6a6de9ef5
|
152 |
void *handler_data; |
71d218b75
|
153 154 155 |
void *chip_data; struct irqaction *action; /* IRQ action list */ unsigned int status; /* IRQ status */ |
6a6de9ef5
|
156 |
|
71d218b75
|
157 |
unsigned int depth; /* nested irq disables */ |
15a647eba
|
158 |
unsigned int wake_depth; /* nested wake enables */ |
71d218b75
|
159 160 |
unsigned int irq_count; /* For detecting broken IRQs */ unsigned int irqs_unhandled; |
4f27c00bf
|
161 |
unsigned long last_unhandled; /* Aging timer for unhandled count */ |
71d218b75
|
162 |
spinlock_t lock; |
a53da52fd
|
163 |
#ifdef CONFIG_SMP |
71d218b75
|
164 |
cpumask_t affinity; |
6a6de9ef5
|
165 |
unsigned int cpu; |
a53da52fd
|
166 |
#endif |
06fcb0c6f
|
167 |
#if defined(CONFIG_GENERIC_PENDING_IRQ) || defined(CONFIG_IRQBALANCE) |
cd916d31c
|
168 |
cpumask_t pending_mask; |
54d5d4240
|
169 |
#endif |
4a733ee12
|
170 |
#ifdef CONFIG_PROC_FS |
a460e745e
|
171 |
struct proc_dir_entry *dir; |
4a733ee12
|
172 |
#endif |
a460e745e
|
173 |
const char *name; |
e729aa16b
|
174 |
} ____cacheline_internodealigned_in_smp; |
1da177e4c
|
175 |
|
34ffdb723
|
176 |
extern struct irq_desc irq_desc[NR_IRQS]; |
1da177e4c
|
177 |
|
34ffdb723
|
178 179 180 |
/* * Migration helpers for obsolete names, they will go away: */ |
6a6de9ef5
|
181 182 183 |
#define hw_interrupt_type irq_chip typedef struct irq_chip hw_irq_controller; #define no_irq_type no_irq_chip |
34ffdb723
|
184 185 186 187 188 189 |
typedef struct irq_desc irq_desc_t; /* * Pick up the arch-dependent methods: */ #include <asm/hw_irq.h> |
1da177e4c
|
190 |
|
06fcb0c6f
|
191 |
extern int setup_irq(unsigned int irq, struct irqaction *new); |
1da177e4c
|
192 193 |
#ifdef CONFIG_GENERIC_HARDIRQS |
06fcb0c6f
|
194 |
|
d061daa0e
|
195 196 197 |
#ifndef handle_dynamic_tick # define handle_dynamic_tick(a) do { } while (0) #endif |
54d5d4240
|
198 |
#ifdef CONFIG_SMP |
54d5d4240
|
199 |
|
06fcb0c6f
|
200 |
#if defined(CONFIG_GENERIC_PENDING_IRQ) || defined(CONFIG_IRQBALANCE) |
54d5d4240
|
201 |
|
c777ac559
|
202 203 |
void set_pending_irq(unsigned int irq, cpumask_t mask); void move_native_irq(int irq); |
e7b946e98
|
204 |
void move_masked_irq(int irq); |
54d5d4240
|
205 |
|
06fcb0c6f
|
206 207 208 209 210 211 212 213 214 |
#else /* CONFIG_GENERIC_PENDING_IRQ || CONFIG_IRQBALANCE */ static inline void move_irq(int irq) { } static inline void move_native_irq(int irq) { } |
e7b946e98
|
215 216 217 |
static inline void move_masked_irq(int irq) { } |
06fcb0c6f
|
218 219 220 |
static inline void set_pending_irq(unsigned int irq, cpumask_t mask) { } |
54d5d4240
|
221 |
|
06fcb0c6f
|
222 |
#endif /* CONFIG_GENERIC_PENDING_IRQ */ |
54d5d4240
|
223 |
|
771ee3b04
|
224 225 |
extern int irq_set_affinity(unsigned int irq, cpumask_t cpumask); extern int irq_can_set_affinity(unsigned int irq); |
06fcb0c6f
|
226 |
#else /* CONFIG_SMP */ |
54d5d4240
|
227 |
|
54d5d4240
|
228 |
#define move_native_irq(x) |
e7b946e98
|
229 |
#define move_masked_irq(x) |
54d5d4240
|
230 |
|
771ee3b04
|
231 232 233 234 235 236 |
static inline int irq_set_affinity(unsigned int irq, cpumask_t cpumask) { return -EINVAL; } static inline int irq_can_set_affinity(unsigned int irq) { return 0; } |
06fcb0c6f
|
237 |
#endif /* CONFIG_SMP */ |
54d5d4240
|
238 |
|
1b61b910e
|
239 240 241 242 243 244 245 |
#ifdef CONFIG_IRQBALANCE extern void set_balance_irq_affinity(unsigned int irq, cpumask_t mask); #else static inline void set_balance_irq_affinity(unsigned int irq, cpumask_t mask) { } #endif |
71d218b75
|
246 247 248 249 250 251 252 253 |
#ifdef CONFIG_AUTO_IRQ_AFFINITY extern int select_smp_affinity(unsigned int irq); #else static inline int select_smp_affinity(unsigned int irq) { return 1; } #endif |
1da177e4c
|
254 |
extern int no_irq_affinity; |
1da177e4c
|
255 |
|
950f4427c
|
256 257 258 259 |
static inline int irq_balancing_disabled(unsigned int irq) { return irq_desc[irq].status & IRQ_NO_BALANCING_MASK; } |
6a6de9ef5
|
260 |
/* Handle irq action chains: */ |
7d12e780e
|
261 |
extern int handle_IRQ_event(unsigned int irq, struct irqaction *action); |
6a6de9ef5
|
262 263 264 265 266 |
/* * Built-in IRQ handlers for various IRQ types, * callable via desc->chip->handle_irq() */ |
7d12e780e
|
267 268 269 270 271 272 |
extern void fastcall handle_level_irq(unsigned int irq, struct irq_desc *desc); extern void fastcall handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc); extern void fastcall handle_edge_irq(unsigned int irq, struct irq_desc *desc); extern void fastcall handle_simple_irq(unsigned int irq, struct irq_desc *desc); extern void fastcall handle_percpu_irq(unsigned int irq, struct irq_desc *desc); extern void fastcall handle_bad_irq(unsigned int irq, struct irq_desc *desc); |
6a6de9ef5
|
273 274 |
/* |
6a6de9ef5
|
275 276 |
* Monolithic do_IRQ implementation. * (is an explicit fastcall, because i386 4KSTACKS calls it from assembly) |
2e60bbb6d
|
277 |
*/ |
af8c65b57
|
278 |
#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ |
7d12e780e
|
279 |
extern fastcall unsigned int __do_IRQ(unsigned int irq); |
af8c65b57
|
280 |
#endif |
2e60bbb6d
|
281 |
|
dae862042
|
282 283 284 285 286 287 |
/* * Architectures call this to let the generic IRQ layer * handle an interrupt. If the descriptor is attached to an * irqchip-style controller then we call the ->handle_irq() handler, * and it calls __do_IRQ() if it's attached to an irqtype-style controller. */ |
7d12e780e
|
288 |
static inline void generic_handle_irq(unsigned int irq) |
dae862042
|
289 290 |
{ struct irq_desc *desc = irq_desc + irq; |
af8c65b57
|
291 |
#ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ |
7d12e780e
|
292 |
desc->handle_irq(irq, desc); |
af8c65b57
|
293 |
#else |
dae862042
|
294 |
if (likely(desc->handle_irq)) |
7d12e780e
|
295 |
desc->handle_irq(irq, desc); |
dae862042
|
296 |
else |
7d12e780e
|
297 |
__do_IRQ(irq); |
af8c65b57
|
298 |
#endif |
dae862042
|
299 |
} |
6a6de9ef5
|
300 |
/* Handling of unhandled and spurious interrupts: */ |
34ffdb723
|
301 |
extern void note_interrupt(unsigned int irq, struct irq_desc *desc, |
7d12e780e
|
302 |
int action_ret); |
1da177e4c
|
303 |
|
a4633adcd
|
304 305 |
/* Resending of interrupts :*/ void check_irq_resend(struct irq_desc *desc, unsigned int irq); |
6a6de9ef5
|
306 307 308 309 310 |
/* Enable/disable irq debugging output: */ extern int noirqdebug_setup(char *str); /* Checks whether the interrupt can be requested by request_irq(): */ extern int can_request_irq(unsigned int irq, unsigned long irqflags); |
f8b5473fc
|
311 |
/* Dummy irq-chip implementations: */ |
6a6de9ef5
|
312 |
extern struct irq_chip no_irq_chip; |
f8b5473fc
|
313 |
extern struct irq_chip dummy_irq_chip; |
6a6de9ef5
|
314 315 |
extern void |
145fc655a
|
316 317 318 |
set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip, irq_flow_handler_t handle); extern void |
a460e745e
|
319 320 |
set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, irq_flow_handler_t handle, const char *name); |
6a6de9ef5
|
321 |
extern void |
a460e745e
|
322 323 |
__set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, const char *name); |
1da177e4c
|
324 |
|
6a6de9ef5
|
325 326 327 328 |
/* * Set a highlevel flow handler for a given IRQ: */ static inline void |
57a58a943
|
329 |
set_irq_handler(unsigned int irq, irq_flow_handler_t handle) |
6a6de9ef5
|
330 |
{ |
a460e745e
|
331 |
__set_irq_handler(irq, handle, 0, NULL); |
6a6de9ef5
|
332 333 334 335 336 337 338 339 340 |
} /* * Set a highlevel chained flow handler for a given IRQ. * (a chained handler is automatically enabled and set to * IRQ_NOREQUEST and IRQ_NOPROBE) */ static inline void set_irq_chained_handler(unsigned int irq, |
57a58a943
|
341 |
irq_flow_handler_t handle) |
6a6de9ef5
|
342 |
{ |
a460e745e
|
343 |
__set_irq_handler(irq, handle, 1, NULL); |
6a6de9ef5
|
344 |
} |
3a16d7136
|
345 346 347 |
/* Handle dynamic irq creation and destruction */ extern int create_irq(void); extern void destroy_irq(unsigned int irq); |
1f80025e6
|
348 349 350 351 352 353 |
/* Test to see if a driver has successfully requested an irq */ static inline int irq_has_action(unsigned int irq) { struct irq_desc *desc = irq_desc + irq; return desc->action != NULL; } |
3a16d7136
|
354 355 356 |
/* Dynamic irq helper functions */ extern void dynamic_irq_init(unsigned int irq); extern void dynamic_irq_cleanup(unsigned int irq); |
dd87eb3a2
|
357 |
|
3a16d7136
|
358 |
/* Set/get chip/data for an IRQ: */ |
dd87eb3a2
|
359 360 361 362 |
extern int set_irq_chip(unsigned int irq, struct irq_chip *chip); extern int set_irq_data(unsigned int irq, void *data); extern int set_irq_chip_data(unsigned int irq, void *data); extern int set_irq_type(unsigned int irq, unsigned int type); |
5b912c108
|
363 |
extern int set_irq_msi(unsigned int irq, struct msi_desc *entry); |
dd87eb3a2
|
364 365 366 367 |
#define get_irq_chip(irq) (irq_desc[irq].chip) #define get_irq_chip_data(irq) (irq_desc[irq].chip_data) #define get_irq_data(irq) (irq_desc[irq].handler_data) |
5b912c108
|
368 |
#define get_irq_msi(irq) (irq_desc[irq].msi_desc) |
dd87eb3a2
|
369 |
|
6a6de9ef5
|
370 |
#endif /* CONFIG_GENERIC_HARDIRQS */ |
1da177e4c
|
371 |
|
06fcb0c6f
|
372 |
#endif /* !CONFIG_S390 */ |
1da177e4c
|
373 |
|
06fcb0c6f
|
374 |
#endif /* _LINUX_IRQ_H */ |