Blame view
include/linux/hardirq.h
1.7 KB
1da177e4c
|
1 2 |
#ifndef LINUX_HARDIRQ_H #define LINUX_HARDIRQ_H |
92cf21187
|
3 |
#include <linux/preempt.h> |
fbb9ce953
|
4 |
#include <linux/lockdep.h> |
6a60dd121
|
5 |
#include <linux/ftrace_irq.h> |
dcbf832e5
|
6 |
#include <linux/vtime.h> |
0bd3a173d
|
7 |
#include <asm/hardirq.h> |
1da177e4c
|
8 |
|
1da177e4c
|
9 |
|
1da177e4c
|
10 |
extern void synchronize_irq(unsigned int irq); |
02cea3958
|
11 |
extern bool synchronize_hardirq(unsigned int irq); |
1da177e4c
|
12 |
|
127781d1b
|
13 |
#if defined(CONFIG_TINY_RCU) |
9b1d82fa1
|
14 15 16 17 18 19 20 21 22 23 |
static inline void rcu_nmi_enter(void) { } static inline void rcu_nmi_exit(void) { } #else |
64db4cfff
|
24 25 |
extern void rcu_nmi_enter(void); extern void rcu_nmi_exit(void); |
9b1d82fa1
|
26 |
#endif |
2232c2d8e
|
27 |
|
de30a2b35
|
28 29 30 31 32 33 |
/* * It is safe to do non-atomic ops on ->hardirq_context, * because NMI handlers may not preempt and the ops are * always balanced, so the interrupted value of ->hardirq_context * will always be restored. */ |
79bf2bb33
|
34 35 |
#define __irq_enter() \ do { \ |
6a61671bb
|
36 |
account_irq_enter_time(current); \ |
bdb438065
|
37 |
preempt_count_add(HARDIRQ_OFFSET); \ |
79bf2bb33
|
38 39 40 41 42 43 |
trace_hardirq_enter(); \ } while (0) /* * Enter irq context (on NO_HZ, update jiffies): */ |
dde4b2b5f
|
44 |
extern void irq_enter(void); |
de30a2b35
|
45 46 47 48 49 50 51 |
/* * Exit irq context without processing softirqs: */ #define __irq_exit() \ do { \ trace_hardirq_exit(); \ |
6a61671bb
|
52 |
account_irq_exit_time(current); \ |
bdb438065
|
53 |
preempt_count_sub(HARDIRQ_OFFSET); \ |
1da177e4c
|
54 |
} while (0) |
de30a2b35
|
55 56 57 |
/* * Exit irq context and process softirqs if needed: */ |
1da177e4c
|
58 |
extern void irq_exit(void); |
2a7b8df04
|
59 60 |
#define nmi_enter() \ do { \ |
0f1ac8fd2
|
61 |
lockdep_off(); \ |
2a7b8df04
|
62 63 |
ftrace_nmi_enter(); \ BUG_ON(in_nmi()); \ |
bdb438065
|
64 |
preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \ |
2a7b8df04
|
65 66 |
rcu_nmi_enter(); \ trace_hardirq_enter(); \ |
17666f02b
|
67 |
} while (0) |
5f34fe1cf
|
68 |
|
2a7b8df04
|
69 70 71 72 |
#define nmi_exit() \ do { \ trace_hardirq_exit(); \ rcu_nmi_exit(); \ |
2a7b8df04
|
73 |
BUG_ON(!in_nmi()); \ |
bdb438065
|
74 |
preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \ |
2a7b8df04
|
75 |
ftrace_nmi_exit(); \ |
0f1ac8fd2
|
76 |
lockdep_on(); \ |
17666f02b
|
77 |
} while (0) |
de30a2b35
|
78 |
|
1da177e4c
|
79 |
#endif /* LINUX_HARDIRQ_H */ |