Commit ac88ee3b6cbac80c32556a39fe16c4bbc55fcbc5
Exists in
ti-lsk-linux-4.1.y
and in
10 other branches
Merge branch 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull irq core fix from Thomas Gleixner: "A single fix plugging a long standing race between proc/stat and proc/interrupts access and freeing of interrupt descriptors" * 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: genirq: Prevent proc race against freeing of irq descriptors
Showing 5 changed files Inline Diff
fs/proc/stat.c
1 | #include <linux/cpumask.h> | 1 | #include <linux/cpumask.h> |
2 | #include <linux/fs.h> | 2 | #include <linux/fs.h> |
3 | #include <linux/init.h> | 3 | #include <linux/init.h> |
4 | #include <linux/interrupt.h> | 4 | #include <linux/interrupt.h> |
5 | #include <linux/kernel_stat.h> | 5 | #include <linux/kernel_stat.h> |
6 | #include <linux/proc_fs.h> | 6 | #include <linux/proc_fs.h> |
7 | #include <linux/sched.h> | 7 | #include <linux/sched.h> |
8 | #include <linux/seq_file.h> | 8 | #include <linux/seq_file.h> |
9 | #include <linux/slab.h> | 9 | #include <linux/slab.h> |
10 | #include <linux/time.h> | 10 | #include <linux/time.h> |
11 | #include <linux/irqnr.h> | 11 | #include <linux/irqnr.h> |
12 | #include <linux/cputime.h> | 12 | #include <linux/cputime.h> |
13 | #include <linux/tick.h> | 13 | #include <linux/tick.h> |
14 | 14 | ||
15 | #ifndef arch_irq_stat_cpu | 15 | #ifndef arch_irq_stat_cpu |
16 | #define arch_irq_stat_cpu(cpu) 0 | 16 | #define arch_irq_stat_cpu(cpu) 0 |
17 | #endif | 17 | #endif |
18 | #ifndef arch_irq_stat | 18 | #ifndef arch_irq_stat |
19 | #define arch_irq_stat() 0 | 19 | #define arch_irq_stat() 0 |
20 | #endif | 20 | #endif |
21 | 21 | ||
22 | #ifdef arch_idle_time | 22 | #ifdef arch_idle_time |
23 | 23 | ||
24 | static cputime64_t get_idle_time(int cpu) | 24 | static cputime64_t get_idle_time(int cpu) |
25 | { | 25 | { |
26 | cputime64_t idle; | 26 | cputime64_t idle; |
27 | 27 | ||
28 | idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE]; | 28 | idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE]; |
29 | if (cpu_online(cpu) && !nr_iowait_cpu(cpu)) | 29 | if (cpu_online(cpu) && !nr_iowait_cpu(cpu)) |
30 | idle += arch_idle_time(cpu); | 30 | idle += arch_idle_time(cpu); |
31 | return idle; | 31 | return idle; |
32 | } | 32 | } |
33 | 33 | ||
34 | static cputime64_t get_iowait_time(int cpu) | 34 | static cputime64_t get_iowait_time(int cpu) |
35 | { | 35 | { |
36 | cputime64_t iowait; | 36 | cputime64_t iowait; |
37 | 37 | ||
38 | iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT]; | 38 | iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT]; |
39 | if (cpu_online(cpu) && nr_iowait_cpu(cpu)) | 39 | if (cpu_online(cpu) && nr_iowait_cpu(cpu)) |
40 | iowait += arch_idle_time(cpu); | 40 | iowait += arch_idle_time(cpu); |
41 | return iowait; | 41 | return iowait; |
42 | } | 42 | } |
43 | 43 | ||
44 | #else | 44 | #else |
45 | 45 | ||
46 | static u64 get_idle_time(int cpu) | 46 | static u64 get_idle_time(int cpu) |
47 | { | 47 | { |
48 | u64 idle, idle_time = -1ULL; | 48 | u64 idle, idle_time = -1ULL; |
49 | 49 | ||
50 | if (cpu_online(cpu)) | 50 | if (cpu_online(cpu)) |
51 | idle_time = get_cpu_idle_time_us(cpu, NULL); | 51 | idle_time = get_cpu_idle_time_us(cpu, NULL); |
52 | 52 | ||
53 | if (idle_time == -1ULL) | 53 | if (idle_time == -1ULL) |
54 | /* !NO_HZ or cpu offline so we can rely on cpustat.idle */ | 54 | /* !NO_HZ or cpu offline so we can rely on cpustat.idle */ |
55 | idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE]; | 55 | idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE]; |
56 | else | 56 | else |
57 | idle = usecs_to_cputime64(idle_time); | 57 | idle = usecs_to_cputime64(idle_time); |
58 | 58 | ||
59 | return idle; | 59 | return idle; |
60 | } | 60 | } |
61 | 61 | ||
62 | static u64 get_iowait_time(int cpu) | 62 | static u64 get_iowait_time(int cpu) |
63 | { | 63 | { |
64 | u64 iowait, iowait_time = -1ULL; | 64 | u64 iowait, iowait_time = -1ULL; |
65 | 65 | ||
66 | if (cpu_online(cpu)) | 66 | if (cpu_online(cpu)) |
67 | iowait_time = get_cpu_iowait_time_us(cpu, NULL); | 67 | iowait_time = get_cpu_iowait_time_us(cpu, NULL); |
68 | 68 | ||
69 | if (iowait_time == -1ULL) | 69 | if (iowait_time == -1ULL) |
70 | /* !NO_HZ or cpu offline so we can rely on cpustat.iowait */ | 70 | /* !NO_HZ or cpu offline so we can rely on cpustat.iowait */ |
71 | iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT]; | 71 | iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT]; |
72 | else | 72 | else |
73 | iowait = usecs_to_cputime64(iowait_time); | 73 | iowait = usecs_to_cputime64(iowait_time); |
74 | 74 | ||
75 | return iowait; | 75 | return iowait; |
76 | } | 76 | } |
77 | 77 | ||
78 | #endif | 78 | #endif |
79 | 79 | ||
80 | static int show_stat(struct seq_file *p, void *v) | 80 | static int show_stat(struct seq_file *p, void *v) |
81 | { | 81 | { |
82 | int i, j; | 82 | int i, j; |
83 | unsigned long jif; | 83 | unsigned long jif; |
84 | u64 user, nice, system, idle, iowait, irq, softirq, steal; | 84 | u64 user, nice, system, idle, iowait, irq, softirq, steal; |
85 | u64 guest, guest_nice; | 85 | u64 guest, guest_nice; |
86 | u64 sum = 0; | 86 | u64 sum = 0; |
87 | u64 sum_softirq = 0; | 87 | u64 sum_softirq = 0; |
88 | unsigned int per_softirq_sums[NR_SOFTIRQS] = {0}; | 88 | unsigned int per_softirq_sums[NR_SOFTIRQS] = {0}; |
89 | struct timespec boottime; | 89 | struct timespec boottime; |
90 | 90 | ||
91 | user = nice = system = idle = iowait = | 91 | user = nice = system = idle = iowait = |
92 | irq = softirq = steal = 0; | 92 | irq = softirq = steal = 0; |
93 | guest = guest_nice = 0; | 93 | guest = guest_nice = 0; |
94 | getboottime(&boottime); | 94 | getboottime(&boottime); |
95 | jif = boottime.tv_sec; | 95 | jif = boottime.tv_sec; |
96 | 96 | ||
97 | for_each_possible_cpu(i) { | 97 | for_each_possible_cpu(i) { |
98 | user += kcpustat_cpu(i).cpustat[CPUTIME_USER]; | 98 | user += kcpustat_cpu(i).cpustat[CPUTIME_USER]; |
99 | nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE]; | 99 | nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE]; |
100 | system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]; | 100 | system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]; |
101 | idle += get_idle_time(i); | 101 | idle += get_idle_time(i); |
102 | iowait += get_iowait_time(i); | 102 | iowait += get_iowait_time(i); |
103 | irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ]; | 103 | irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ]; |
104 | softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]; | 104 | softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]; |
105 | steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL]; | 105 | steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL]; |
106 | guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST]; | 106 | guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST]; |
107 | guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE]; | 107 | guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE]; |
108 | sum += kstat_cpu_irqs_sum(i); | 108 | sum += kstat_cpu_irqs_sum(i); |
109 | sum += arch_irq_stat_cpu(i); | 109 | sum += arch_irq_stat_cpu(i); |
110 | 110 | ||
111 | for (j = 0; j < NR_SOFTIRQS; j++) { | 111 | for (j = 0; j < NR_SOFTIRQS; j++) { |
112 | unsigned int softirq_stat = kstat_softirqs_cpu(j, i); | 112 | unsigned int softirq_stat = kstat_softirqs_cpu(j, i); |
113 | 113 | ||
114 | per_softirq_sums[j] += softirq_stat; | 114 | per_softirq_sums[j] += softirq_stat; |
115 | sum_softirq += softirq_stat; | 115 | sum_softirq += softirq_stat; |
116 | } | 116 | } |
117 | } | 117 | } |
118 | sum += arch_irq_stat(); | 118 | sum += arch_irq_stat(); |
119 | 119 | ||
120 | seq_puts(p, "cpu "); | 120 | seq_puts(p, "cpu "); |
121 | seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user)); | 121 | seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user)); |
122 | seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice)); | 122 | seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice)); |
123 | seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(system)); | 123 | seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(system)); |
124 | seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(idle)); | 124 | seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(idle)); |
125 | seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(iowait)); | 125 | seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(iowait)); |
126 | seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(irq)); | 126 | seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(irq)); |
127 | seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(softirq)); | 127 | seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(softirq)); |
128 | seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(steal)); | 128 | seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(steal)); |
129 | seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest)); | 129 | seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest)); |
130 | seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest_nice)); | 130 | seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest_nice)); |
131 | seq_putc(p, '\n'); | 131 | seq_putc(p, '\n'); |
132 | 132 | ||
133 | for_each_online_cpu(i) { | 133 | for_each_online_cpu(i) { |
134 | /* Copy values here to work around gcc-2.95.3, gcc-2.96 */ | 134 | /* Copy values here to work around gcc-2.95.3, gcc-2.96 */ |
135 | user = kcpustat_cpu(i).cpustat[CPUTIME_USER]; | 135 | user = kcpustat_cpu(i).cpustat[CPUTIME_USER]; |
136 | nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE]; | 136 | nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE]; |
137 | system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]; | 137 | system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]; |
138 | idle = get_idle_time(i); | 138 | idle = get_idle_time(i); |
139 | iowait = get_iowait_time(i); | 139 | iowait = get_iowait_time(i); |
140 | irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ]; | 140 | irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ]; |
141 | softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]; | 141 | softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]; |
142 | steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL]; | 142 | steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL]; |
143 | guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST]; | 143 | guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST]; |
144 | guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE]; | 144 | guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE]; |
145 | seq_printf(p, "cpu%d", i); | 145 | seq_printf(p, "cpu%d", i); |
146 | seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user)); | 146 | seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user)); |
147 | seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice)); | 147 | seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice)); |
148 | seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(system)); | 148 | seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(system)); |
149 | seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(idle)); | 149 | seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(idle)); |
150 | seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(iowait)); | 150 | seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(iowait)); |
151 | seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(irq)); | 151 | seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(irq)); |
152 | seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(softirq)); | 152 | seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(softirq)); |
153 | seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(steal)); | 153 | seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(steal)); |
154 | seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest)); | 154 | seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest)); |
155 | seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest_nice)); | 155 | seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest_nice)); |
156 | seq_putc(p, '\n'); | 156 | seq_putc(p, '\n'); |
157 | } | 157 | } |
158 | seq_printf(p, "intr %llu", (unsigned long long)sum); | 158 | seq_printf(p, "intr %llu", (unsigned long long)sum); |
159 | 159 | ||
160 | /* sum again ? it could be updated? */ | 160 | /* sum again ? it could be updated? */ |
161 | for_each_irq_nr(j) | 161 | for_each_irq_nr(j) |
162 | seq_put_decimal_ull(p, ' ', kstat_irqs(j)); | 162 | seq_put_decimal_ull(p, ' ', kstat_irqs_usr(j)); |
163 | 163 | ||
164 | seq_printf(p, | 164 | seq_printf(p, |
165 | "\nctxt %llu\n" | 165 | "\nctxt %llu\n" |
166 | "btime %lu\n" | 166 | "btime %lu\n" |
167 | "processes %lu\n" | 167 | "processes %lu\n" |
168 | "procs_running %lu\n" | 168 | "procs_running %lu\n" |
169 | "procs_blocked %lu\n", | 169 | "procs_blocked %lu\n", |
170 | nr_context_switches(), | 170 | nr_context_switches(), |
171 | (unsigned long)jif, | 171 | (unsigned long)jif, |
172 | total_forks, | 172 | total_forks, |
173 | nr_running(), | 173 | nr_running(), |
174 | nr_iowait()); | 174 | nr_iowait()); |
175 | 175 | ||
176 | seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq); | 176 | seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq); |
177 | 177 | ||
178 | for (i = 0; i < NR_SOFTIRQS; i++) | 178 | for (i = 0; i < NR_SOFTIRQS; i++) |
179 | seq_put_decimal_ull(p, ' ', per_softirq_sums[i]); | 179 | seq_put_decimal_ull(p, ' ', per_softirq_sums[i]); |
180 | seq_putc(p, '\n'); | 180 | seq_putc(p, '\n'); |
181 | 181 | ||
182 | return 0; | 182 | return 0; |
183 | } | 183 | } |
184 | 184 | ||
185 | static int stat_open(struct inode *inode, struct file *file) | 185 | static int stat_open(struct inode *inode, struct file *file) |
186 | { | 186 | { |
187 | size_t size = 1024 + 128 * num_online_cpus(); | 187 | size_t size = 1024 + 128 * num_online_cpus(); |
188 | 188 | ||
189 | /* minimum size to display an interrupt count : 2 bytes */ | 189 | /* minimum size to display an interrupt count : 2 bytes */ |
190 | size += 2 * nr_irqs; | 190 | size += 2 * nr_irqs; |
191 | return single_open_size(file, show_stat, NULL, size); | 191 | return single_open_size(file, show_stat, NULL, size); |
192 | } | 192 | } |
193 | 193 | ||
194 | static const struct file_operations proc_stat_operations = { | 194 | static const struct file_operations proc_stat_operations = { |
195 | .open = stat_open, | 195 | .open = stat_open, |
196 | .read = seq_read, | 196 | .read = seq_read, |
197 | .llseek = seq_lseek, | 197 | .llseek = seq_lseek, |
198 | .release = single_release, | 198 | .release = single_release, |
199 | }; | 199 | }; |
200 | 200 | ||
201 | static int __init proc_stat_init(void) | 201 | static int __init proc_stat_init(void) |
202 | { | 202 | { |
203 | proc_create("stat", 0, NULL, &proc_stat_operations); | 203 | proc_create("stat", 0, NULL, &proc_stat_operations); |
204 | return 0; | 204 | return 0; |
205 | } | 205 | } |
206 | fs_initcall(proc_stat_init); | 206 | fs_initcall(proc_stat_init); |
207 | 207 |
include/linux/kernel_stat.h
1 | #ifndef _LINUX_KERNEL_STAT_H | 1 | #ifndef _LINUX_KERNEL_STAT_H |
2 | #define _LINUX_KERNEL_STAT_H | 2 | #define _LINUX_KERNEL_STAT_H |
3 | 3 | ||
4 | #include <linux/smp.h> | 4 | #include <linux/smp.h> |
5 | #include <linux/threads.h> | 5 | #include <linux/threads.h> |
6 | #include <linux/percpu.h> | 6 | #include <linux/percpu.h> |
7 | #include <linux/cpumask.h> | 7 | #include <linux/cpumask.h> |
8 | #include <linux/interrupt.h> | 8 | #include <linux/interrupt.h> |
9 | #include <linux/sched.h> | 9 | #include <linux/sched.h> |
10 | #include <linux/vtime.h> | 10 | #include <linux/vtime.h> |
11 | #include <asm/irq.h> | 11 | #include <asm/irq.h> |
12 | #include <linux/cputime.h> | 12 | #include <linux/cputime.h> |
13 | 13 | ||
14 | /* | 14 | /* |
15 | * 'kernel_stat.h' contains the definitions needed for doing | 15 | * 'kernel_stat.h' contains the definitions needed for doing |
16 | * some kernel statistics (CPU usage, context switches ...), | 16 | * some kernel statistics (CPU usage, context switches ...), |
17 | * used by rstatd/perfmeter | 17 | * used by rstatd/perfmeter |
18 | */ | 18 | */ |
19 | 19 | ||
20 | enum cpu_usage_stat { | 20 | enum cpu_usage_stat { |
21 | CPUTIME_USER, | 21 | CPUTIME_USER, |
22 | CPUTIME_NICE, | 22 | CPUTIME_NICE, |
23 | CPUTIME_SYSTEM, | 23 | CPUTIME_SYSTEM, |
24 | CPUTIME_SOFTIRQ, | 24 | CPUTIME_SOFTIRQ, |
25 | CPUTIME_IRQ, | 25 | CPUTIME_IRQ, |
26 | CPUTIME_IDLE, | 26 | CPUTIME_IDLE, |
27 | CPUTIME_IOWAIT, | 27 | CPUTIME_IOWAIT, |
28 | CPUTIME_STEAL, | 28 | CPUTIME_STEAL, |
29 | CPUTIME_GUEST, | 29 | CPUTIME_GUEST, |
30 | CPUTIME_GUEST_NICE, | 30 | CPUTIME_GUEST_NICE, |
31 | NR_STATS, | 31 | NR_STATS, |
32 | }; | 32 | }; |
33 | 33 | ||
34 | struct kernel_cpustat { | 34 | struct kernel_cpustat { |
35 | u64 cpustat[NR_STATS]; | 35 | u64 cpustat[NR_STATS]; |
36 | }; | 36 | }; |
37 | 37 | ||
38 | struct kernel_stat { | 38 | struct kernel_stat { |
39 | unsigned long irqs_sum; | 39 | unsigned long irqs_sum; |
40 | unsigned int softirqs[NR_SOFTIRQS]; | 40 | unsigned int softirqs[NR_SOFTIRQS]; |
41 | }; | 41 | }; |
42 | 42 | ||
43 | DECLARE_PER_CPU(struct kernel_stat, kstat); | 43 | DECLARE_PER_CPU(struct kernel_stat, kstat); |
44 | DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat); | 44 | DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat); |
45 | 45 | ||
46 | /* Must have preemption disabled for this to be meaningful. */ | 46 | /* Must have preemption disabled for this to be meaningful. */ |
47 | #define kstat_this_cpu this_cpu_ptr(&kstat) | 47 | #define kstat_this_cpu this_cpu_ptr(&kstat) |
48 | #define kcpustat_this_cpu this_cpu_ptr(&kernel_cpustat) | 48 | #define kcpustat_this_cpu this_cpu_ptr(&kernel_cpustat) |
49 | #define kstat_cpu(cpu) per_cpu(kstat, cpu) | 49 | #define kstat_cpu(cpu) per_cpu(kstat, cpu) |
50 | #define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu) | 50 | #define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu) |
51 | 51 | ||
52 | extern unsigned long long nr_context_switches(void); | 52 | extern unsigned long long nr_context_switches(void); |
53 | 53 | ||
54 | extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu); | 54 | extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu); |
55 | extern void kstat_incr_irq_this_cpu(unsigned int irq); | 55 | extern void kstat_incr_irq_this_cpu(unsigned int irq); |
56 | 56 | ||
57 | static inline void kstat_incr_softirqs_this_cpu(unsigned int irq) | 57 | static inline void kstat_incr_softirqs_this_cpu(unsigned int irq) |
58 | { | 58 | { |
59 | __this_cpu_inc(kstat.softirqs[irq]); | 59 | __this_cpu_inc(kstat.softirqs[irq]); |
60 | } | 60 | } |
61 | 61 | ||
62 | static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu) | 62 | static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu) |
63 | { | 63 | { |
64 | return kstat_cpu(cpu).softirqs[irq]; | 64 | return kstat_cpu(cpu).softirqs[irq]; |
65 | } | 65 | } |
66 | 66 | ||
67 | /* | 67 | /* |
68 | * Number of interrupts per specific IRQ source, since bootup | 68 | * Number of interrupts per specific IRQ source, since bootup |
69 | */ | 69 | */ |
70 | extern unsigned int kstat_irqs(unsigned int irq); | 70 | extern unsigned int kstat_irqs(unsigned int irq); |
71 | extern unsigned int kstat_irqs_usr(unsigned int irq); | ||
71 | 72 | ||
72 | /* | 73 | /* |
73 | * Number of interrupts per cpu, since bootup | 74 | * Number of interrupts per cpu, since bootup |
74 | */ | 75 | */ |
75 | static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu) | 76 | static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu) |
76 | { | 77 | { |
77 | return kstat_cpu(cpu).irqs_sum; | 78 | return kstat_cpu(cpu).irqs_sum; |
78 | } | 79 | } |
79 | 80 | ||
80 | extern void account_user_time(struct task_struct *, cputime_t, cputime_t); | 81 | extern void account_user_time(struct task_struct *, cputime_t, cputime_t); |
81 | extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t); | 82 | extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t); |
82 | extern void account_steal_time(cputime_t); | 83 | extern void account_steal_time(cputime_t); |
83 | extern void account_idle_time(cputime_t); | 84 | extern void account_idle_time(cputime_t); |
84 | 85 | ||
85 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE | 86 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE |
86 | static inline void account_process_tick(struct task_struct *tsk, int user) | 87 | static inline void account_process_tick(struct task_struct *tsk, int user) |
87 | { | 88 | { |
88 | vtime_account_user(tsk); | 89 | vtime_account_user(tsk); |
89 | } | 90 | } |
90 | #else | 91 | #else |
91 | extern void account_process_tick(struct task_struct *, int user); | 92 | extern void account_process_tick(struct task_struct *, int user); |
92 | #endif | 93 | #endif |
93 | 94 | ||
94 | extern void account_steal_ticks(unsigned long ticks); | 95 | extern void account_steal_ticks(unsigned long ticks); |
95 | extern void account_idle_ticks(unsigned long ticks); | 96 | extern void account_idle_ticks(unsigned long ticks); |
96 | 97 | ||
97 | #endif /* _LINUX_KERNEL_STAT_H */ | 98 | #endif /* _LINUX_KERNEL_STAT_H */ |
98 | 99 |
kernel/irq/internals.h
1 | /* | 1 | /* |
2 | * IRQ subsystem internal functions and variables: | 2 | * IRQ subsystem internal functions and variables: |
3 | * | 3 | * |
4 | * Do not ever include this file from anything else than | 4 | * Do not ever include this file from anything else than |
5 | * kernel/irq/. Do not even think about using any information outside | 5 | * kernel/irq/. Do not even think about using any information outside |
6 | * of this file for your non core code. | 6 | * of this file for your non core code. |
7 | */ | 7 | */ |
8 | #include <linux/irqdesc.h> | 8 | #include <linux/irqdesc.h> |
9 | #include <linux/kernel_stat.h> | 9 | #include <linux/kernel_stat.h> |
10 | 10 | ||
11 | #ifdef CONFIG_SPARSE_IRQ | 11 | #ifdef CONFIG_SPARSE_IRQ |
12 | # define IRQ_BITMAP_BITS (NR_IRQS + 8196) | 12 | # define IRQ_BITMAP_BITS (NR_IRQS + 8196) |
13 | #else | 13 | #else |
14 | # define IRQ_BITMAP_BITS NR_IRQS | 14 | # define IRQ_BITMAP_BITS NR_IRQS |
15 | #endif | 15 | #endif |
16 | 16 | ||
17 | #define istate core_internal_state__do_not_mess_with_it | 17 | #define istate core_internal_state__do_not_mess_with_it |
18 | 18 | ||
19 | extern bool noirqdebug; | 19 | extern bool noirqdebug; |
20 | 20 | ||
21 | /* | 21 | /* |
22 | * Bits used by threaded handlers: | 22 | * Bits used by threaded handlers: |
23 | * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run | 23 | * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run |
24 | * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed | 24 | * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed |
25 | * IRQTF_AFFINITY - irq thread is requested to adjust affinity | 25 | * IRQTF_AFFINITY - irq thread is requested to adjust affinity |
26 | * IRQTF_FORCED_THREAD - irq action is force threaded | 26 | * IRQTF_FORCED_THREAD - irq action is force threaded |
27 | */ | 27 | */ |
28 | enum { | 28 | enum { |
29 | IRQTF_RUNTHREAD, | 29 | IRQTF_RUNTHREAD, |
30 | IRQTF_WARNED, | 30 | IRQTF_WARNED, |
31 | IRQTF_AFFINITY, | 31 | IRQTF_AFFINITY, |
32 | IRQTF_FORCED_THREAD, | 32 | IRQTF_FORCED_THREAD, |
33 | }; | 33 | }; |
34 | 34 | ||
35 | /* | 35 | /* |
36 | * Bit masks for desc->core_internal_state__do_not_mess_with_it | 36 | * Bit masks for desc->core_internal_state__do_not_mess_with_it |
37 | * | 37 | * |
38 | * IRQS_AUTODETECT - autodetection in progress | 38 | * IRQS_AUTODETECT - autodetection in progress |
39 | * IRQS_SPURIOUS_DISABLED - was disabled due to spurious interrupt | 39 | * IRQS_SPURIOUS_DISABLED - was disabled due to spurious interrupt |
40 | * detection | 40 | * detection |
41 | * IRQS_POLL_INPROGRESS - polling in progress | 41 | * IRQS_POLL_INPROGRESS - polling in progress |
42 | * IRQS_ONESHOT - irq is not unmasked in primary handler | 42 | * IRQS_ONESHOT - irq is not unmasked in primary handler |
43 | * IRQS_REPLAY - irq is replayed | 43 | * IRQS_REPLAY - irq is replayed |
44 | * IRQS_WAITING - irq is waiting | 44 | * IRQS_WAITING - irq is waiting |
45 | * IRQS_PENDING - irq is pending and replayed later | 45 | * IRQS_PENDING - irq is pending and replayed later |
46 | * IRQS_SUSPENDED - irq is suspended | 46 | * IRQS_SUSPENDED - irq is suspended |
47 | */ | 47 | */ |
48 | enum { | 48 | enum { |
49 | IRQS_AUTODETECT = 0x00000001, | 49 | IRQS_AUTODETECT = 0x00000001, |
50 | IRQS_SPURIOUS_DISABLED = 0x00000002, | 50 | IRQS_SPURIOUS_DISABLED = 0x00000002, |
51 | IRQS_POLL_INPROGRESS = 0x00000008, | 51 | IRQS_POLL_INPROGRESS = 0x00000008, |
52 | IRQS_ONESHOT = 0x00000020, | 52 | IRQS_ONESHOT = 0x00000020, |
53 | IRQS_REPLAY = 0x00000040, | 53 | IRQS_REPLAY = 0x00000040, |
54 | IRQS_WAITING = 0x00000080, | 54 | IRQS_WAITING = 0x00000080, |
55 | IRQS_PENDING = 0x00000200, | 55 | IRQS_PENDING = 0x00000200, |
56 | IRQS_SUSPENDED = 0x00000800, | 56 | IRQS_SUSPENDED = 0x00000800, |
57 | }; | 57 | }; |
58 | 58 | ||
59 | #include "debug.h" | 59 | #include "debug.h" |
60 | #include "settings.h" | 60 | #include "settings.h" |
61 | 61 | ||
62 | #define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data) | 62 | #define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data) |
63 | 63 | ||
64 | extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | 64 | extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, |
65 | unsigned long flags); | 65 | unsigned long flags); |
66 | extern void __disable_irq(struct irq_desc *desc, unsigned int irq); | 66 | extern void __disable_irq(struct irq_desc *desc, unsigned int irq); |
67 | extern void __enable_irq(struct irq_desc *desc, unsigned int irq); | 67 | extern void __enable_irq(struct irq_desc *desc, unsigned int irq); |
68 | 68 | ||
69 | extern int irq_startup(struct irq_desc *desc, bool resend); | 69 | extern int irq_startup(struct irq_desc *desc, bool resend); |
70 | extern void irq_shutdown(struct irq_desc *desc); | 70 | extern void irq_shutdown(struct irq_desc *desc); |
71 | extern void irq_enable(struct irq_desc *desc); | 71 | extern void irq_enable(struct irq_desc *desc); |
72 | extern void irq_disable(struct irq_desc *desc); | 72 | extern void irq_disable(struct irq_desc *desc); |
73 | extern void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu); | 73 | extern void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu); |
74 | extern void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu); | 74 | extern void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu); |
75 | extern void mask_irq(struct irq_desc *desc); | 75 | extern void mask_irq(struct irq_desc *desc); |
76 | extern void unmask_irq(struct irq_desc *desc); | 76 | extern void unmask_irq(struct irq_desc *desc); |
77 | extern void unmask_threaded_irq(struct irq_desc *desc); | 77 | extern void unmask_threaded_irq(struct irq_desc *desc); |
78 | 78 | ||
79 | #ifdef CONFIG_SPARSE_IRQ | 79 | #ifdef CONFIG_SPARSE_IRQ |
80 | static inline void irq_mark_irq(unsigned int irq) { } | 80 | static inline void irq_mark_irq(unsigned int irq) { } |
81 | extern void irq_lock_sparse(void); | ||
82 | extern void irq_unlock_sparse(void); | ||
81 | #else | 83 | #else |
82 | extern void irq_mark_irq(unsigned int irq); | 84 | extern void irq_mark_irq(unsigned int irq); |
85 | static inline void irq_lock_sparse(void) { } | ||
86 | static inline void irq_unlock_sparse(void) { } | ||
83 | #endif | 87 | #endif |
84 | 88 | ||
85 | extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); | 89 | extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); |
86 | 90 | ||
87 | irqreturn_t handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action); | 91 | irqreturn_t handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action); |
88 | irqreturn_t handle_irq_event(struct irq_desc *desc); | 92 | irqreturn_t handle_irq_event(struct irq_desc *desc); |
89 | 93 | ||
90 | /* Resending of interrupts :*/ | 94 | /* Resending of interrupts :*/ |
91 | void check_irq_resend(struct irq_desc *desc, unsigned int irq); | 95 | void check_irq_resend(struct irq_desc *desc, unsigned int irq); |
92 | bool irq_wait_for_poll(struct irq_desc *desc); | 96 | bool irq_wait_for_poll(struct irq_desc *desc); |
93 | void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action); | 97 | void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action); |
94 | 98 | ||
95 | #ifdef CONFIG_PROC_FS | 99 | #ifdef CONFIG_PROC_FS |
96 | extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); | 100 | extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); |
97 | extern void unregister_irq_proc(unsigned int irq, struct irq_desc *desc); | 101 | extern void unregister_irq_proc(unsigned int irq, struct irq_desc *desc); |
98 | extern void register_handler_proc(unsigned int irq, struct irqaction *action); | 102 | extern void register_handler_proc(unsigned int irq, struct irqaction *action); |
99 | extern void unregister_handler_proc(unsigned int irq, struct irqaction *action); | 103 | extern void unregister_handler_proc(unsigned int irq, struct irqaction *action); |
100 | #else | 104 | #else |
101 | static inline void register_irq_proc(unsigned int irq, struct irq_desc *desc) { } | 105 | static inline void register_irq_proc(unsigned int irq, struct irq_desc *desc) { } |
102 | static inline void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) { } | 106 | static inline void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) { } |
103 | static inline void register_handler_proc(unsigned int irq, | 107 | static inline void register_handler_proc(unsigned int irq, |
104 | struct irqaction *action) { } | 108 | struct irqaction *action) { } |
105 | static inline void unregister_handler_proc(unsigned int irq, | 109 | static inline void unregister_handler_proc(unsigned int irq, |
106 | struct irqaction *action) { } | 110 | struct irqaction *action) { } |
107 | #endif | 111 | #endif |
108 | 112 | ||
109 | extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask); | 113 | extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask); |
110 | 114 | ||
111 | extern void irq_set_thread_affinity(struct irq_desc *desc); | 115 | extern void irq_set_thread_affinity(struct irq_desc *desc); |
112 | 116 | ||
113 | extern int irq_do_set_affinity(struct irq_data *data, | 117 | extern int irq_do_set_affinity(struct irq_data *data, |
114 | const struct cpumask *dest, bool force); | 118 | const struct cpumask *dest, bool force); |
115 | 119 | ||
116 | /* Inline functions for support of irq chips on slow busses */ | 120 | /* Inline functions for support of irq chips on slow busses */ |
117 | static inline void chip_bus_lock(struct irq_desc *desc) | 121 | static inline void chip_bus_lock(struct irq_desc *desc) |
118 | { | 122 | { |
119 | if (unlikely(desc->irq_data.chip->irq_bus_lock)) | 123 | if (unlikely(desc->irq_data.chip->irq_bus_lock)) |
120 | desc->irq_data.chip->irq_bus_lock(&desc->irq_data); | 124 | desc->irq_data.chip->irq_bus_lock(&desc->irq_data); |
121 | } | 125 | } |
122 | 126 | ||
123 | static inline void chip_bus_sync_unlock(struct irq_desc *desc) | 127 | static inline void chip_bus_sync_unlock(struct irq_desc *desc) |
124 | { | 128 | { |
125 | if (unlikely(desc->irq_data.chip->irq_bus_sync_unlock)) | 129 | if (unlikely(desc->irq_data.chip->irq_bus_sync_unlock)) |
126 | desc->irq_data.chip->irq_bus_sync_unlock(&desc->irq_data); | 130 | desc->irq_data.chip->irq_bus_sync_unlock(&desc->irq_data); |
127 | } | 131 | } |
128 | 132 | ||
129 | #define _IRQ_DESC_CHECK (1 << 0) | 133 | #define _IRQ_DESC_CHECK (1 << 0) |
130 | #define _IRQ_DESC_PERCPU (1 << 1) | 134 | #define _IRQ_DESC_PERCPU (1 << 1) |
131 | 135 | ||
132 | #define IRQ_GET_DESC_CHECK_GLOBAL (_IRQ_DESC_CHECK) | 136 | #define IRQ_GET_DESC_CHECK_GLOBAL (_IRQ_DESC_CHECK) |
133 | #define IRQ_GET_DESC_CHECK_PERCPU (_IRQ_DESC_CHECK | _IRQ_DESC_PERCPU) | 137 | #define IRQ_GET_DESC_CHECK_PERCPU (_IRQ_DESC_CHECK | _IRQ_DESC_PERCPU) |
134 | 138 | ||
135 | struct irq_desc * | 139 | struct irq_desc * |
136 | __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, | 140 | __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, |
137 | unsigned int check); | 141 | unsigned int check); |
138 | void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus); | 142 | void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus); |
139 | 143 | ||
140 | static inline struct irq_desc * | 144 | static inline struct irq_desc * |
141 | irq_get_desc_buslock(unsigned int irq, unsigned long *flags, unsigned int check) | 145 | irq_get_desc_buslock(unsigned int irq, unsigned long *flags, unsigned int check) |
142 | { | 146 | { |
143 | return __irq_get_desc_lock(irq, flags, true, check); | 147 | return __irq_get_desc_lock(irq, flags, true, check); |
144 | } | 148 | } |
145 | 149 | ||
146 | static inline void | 150 | static inline void |
147 | irq_put_desc_busunlock(struct irq_desc *desc, unsigned long flags) | 151 | irq_put_desc_busunlock(struct irq_desc *desc, unsigned long flags) |
148 | { | 152 | { |
149 | __irq_put_desc_unlock(desc, flags, true); | 153 | __irq_put_desc_unlock(desc, flags, true); |
150 | } | 154 | } |
151 | 155 | ||
152 | static inline struct irq_desc * | 156 | static inline struct irq_desc * |
153 | irq_get_desc_lock(unsigned int irq, unsigned long *flags, unsigned int check) | 157 | irq_get_desc_lock(unsigned int irq, unsigned long *flags, unsigned int check) |
154 | { | 158 | { |
155 | return __irq_get_desc_lock(irq, flags, false, check); | 159 | return __irq_get_desc_lock(irq, flags, false, check); |
156 | } | 160 | } |
157 | 161 | ||
158 | static inline void | 162 | static inline void |
159 | irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags) | 163 | irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags) |
160 | { | 164 | { |
161 | __irq_put_desc_unlock(desc, flags, false); | 165 | __irq_put_desc_unlock(desc, flags, false); |
162 | } | 166 | } |
163 | 167 | ||
164 | /* | 168 | /* |
165 | * Manipulation functions for irq_data.state | 169 | * Manipulation functions for irq_data.state |
166 | */ | 170 | */ |
167 | static inline void irqd_set_move_pending(struct irq_data *d) | 171 | static inline void irqd_set_move_pending(struct irq_data *d) |
168 | { | 172 | { |
169 | d->state_use_accessors |= IRQD_SETAFFINITY_PENDING; | 173 | d->state_use_accessors |= IRQD_SETAFFINITY_PENDING; |
170 | } | 174 | } |
171 | 175 | ||
172 | static inline void irqd_clr_move_pending(struct irq_data *d) | 176 | static inline void irqd_clr_move_pending(struct irq_data *d) |
173 | { | 177 | { |
174 | d->state_use_accessors &= ~IRQD_SETAFFINITY_PENDING; | 178 | d->state_use_accessors &= ~IRQD_SETAFFINITY_PENDING; |
175 | } | 179 | } |
176 | 180 | ||
177 | static inline void irqd_clear(struct irq_data *d, unsigned int mask) | 181 | static inline void irqd_clear(struct irq_data *d, unsigned int mask) |
178 | { | 182 | { |
179 | d->state_use_accessors &= ~mask; | 183 | d->state_use_accessors &= ~mask; |
180 | } | 184 | } |
181 | 185 | ||
182 | static inline void irqd_set(struct irq_data *d, unsigned int mask) | 186 | static inline void irqd_set(struct irq_data *d, unsigned int mask) |
183 | { | 187 | { |
184 | d->state_use_accessors |= mask; | 188 | d->state_use_accessors |= mask; |
185 | } | 189 | } |
186 | 190 | ||
187 | static inline bool irqd_has_set(struct irq_data *d, unsigned int mask) | 191 | static inline bool irqd_has_set(struct irq_data *d, unsigned int mask) |
188 | { | 192 | { |
189 | return d->state_use_accessors & mask; | 193 | return d->state_use_accessors & mask; |
190 | } | 194 | } |
191 | 195 | ||
192 | static inline void kstat_incr_irqs_this_cpu(unsigned int irq, struct irq_desc *desc) | 196 | static inline void kstat_incr_irqs_this_cpu(unsigned int irq, struct irq_desc *desc) |
193 | { | 197 | { |
194 | __this_cpu_inc(*desc->kstat_irqs); | 198 | __this_cpu_inc(*desc->kstat_irqs); |
195 | __this_cpu_inc(kstat.irqs_sum); | 199 | __this_cpu_inc(kstat.irqs_sum); |
196 | } | 200 | } |
197 | 201 | ||
198 | #ifdef CONFIG_PM_SLEEP | 202 | #ifdef CONFIG_PM_SLEEP |
199 | bool irq_pm_check_wakeup(struct irq_desc *desc); | 203 | bool irq_pm_check_wakeup(struct irq_desc *desc); |
200 | void irq_pm_install_action(struct irq_desc *desc, struct irqaction *action); | 204 | void irq_pm_install_action(struct irq_desc *desc, struct irqaction *action); |
201 | void irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action); | 205 | void irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action); |
202 | #else | 206 | #else |
203 | static inline bool irq_pm_check_wakeup(struct irq_desc *desc) { return false; } | 207 | static inline bool irq_pm_check_wakeup(struct irq_desc *desc) { return false; } |
204 | static inline void | 208 | static inline void |
205 | irq_pm_install_action(struct irq_desc *desc, struct irqaction *action) { } | 209 | irq_pm_install_action(struct irq_desc *desc, struct irqaction *action) { } |
206 | static inline void | 210 | static inline void |
207 | irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action) { } | 211 | irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action) { } |
208 | #endif | 212 | #endif |
209 | 213 |
kernel/irq/irqdesc.c
1 | /* | 1 | /* |
2 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar | 2 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar |
3 | * Copyright (C) 2005-2006, Thomas Gleixner, Russell King | 3 | * Copyright (C) 2005-2006, Thomas Gleixner, Russell King |
4 | * | 4 | * |
5 | * This file contains the interrupt descriptor management code | 5 | * This file contains the interrupt descriptor management code |
6 | * | 6 | * |
7 | * Detailed information is available in Documentation/DocBook/genericirq | 7 | * Detailed information is available in Documentation/DocBook/genericirq |
8 | * | 8 | * |
9 | */ | 9 | */ |
10 | #include <linux/irq.h> | 10 | #include <linux/irq.h> |
11 | #include <linux/slab.h> | 11 | #include <linux/slab.h> |
12 | #include <linux/export.h> | 12 | #include <linux/export.h> |
13 | #include <linux/interrupt.h> | 13 | #include <linux/interrupt.h> |
14 | #include <linux/kernel_stat.h> | 14 | #include <linux/kernel_stat.h> |
15 | #include <linux/radix-tree.h> | 15 | #include <linux/radix-tree.h> |
16 | #include <linux/bitmap.h> | 16 | #include <linux/bitmap.h> |
17 | #include <linux/irqdomain.h> | 17 | #include <linux/irqdomain.h> |
18 | 18 | ||
19 | #include "internals.h" | 19 | #include "internals.h" |
20 | 20 | ||
21 | /* | 21 | /* |
22 | * lockdep: we want to handle all irq_desc locks as a single lock-class: | 22 | * lockdep: we want to handle all irq_desc locks as a single lock-class: |
23 | */ | 23 | */ |
24 | static struct lock_class_key irq_desc_lock_class; | 24 | static struct lock_class_key irq_desc_lock_class; |
25 | 25 | ||
26 | #if defined(CONFIG_SMP) | 26 | #if defined(CONFIG_SMP) |
27 | static void __init init_irq_default_affinity(void) | 27 | static void __init init_irq_default_affinity(void) |
28 | { | 28 | { |
29 | alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); | 29 | alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); |
30 | cpumask_setall(irq_default_affinity); | 30 | cpumask_setall(irq_default_affinity); |
31 | } | 31 | } |
32 | #else | 32 | #else |
33 | static void __init init_irq_default_affinity(void) | 33 | static void __init init_irq_default_affinity(void) |
34 | { | 34 | { |
35 | } | 35 | } |
36 | #endif | 36 | #endif |
37 | 37 | ||
38 | #ifdef CONFIG_SMP | 38 | #ifdef CONFIG_SMP |
39 | static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) | 39 | static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) |
40 | { | 40 | { |
41 | if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node)) | 41 | if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node)) |
42 | return -ENOMEM; | 42 | return -ENOMEM; |
43 | 43 | ||
44 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 44 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
45 | if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { | 45 | if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { |
46 | free_cpumask_var(desc->irq_data.affinity); | 46 | free_cpumask_var(desc->irq_data.affinity); |
47 | return -ENOMEM; | 47 | return -ENOMEM; |
48 | } | 48 | } |
49 | #endif | 49 | #endif |
50 | return 0; | 50 | return 0; |
51 | } | 51 | } |
52 | 52 | ||
53 | static void desc_smp_init(struct irq_desc *desc, int node) | 53 | static void desc_smp_init(struct irq_desc *desc, int node) |
54 | { | 54 | { |
55 | desc->irq_data.node = node; | 55 | desc->irq_data.node = node; |
56 | cpumask_copy(desc->irq_data.affinity, irq_default_affinity); | 56 | cpumask_copy(desc->irq_data.affinity, irq_default_affinity); |
57 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 57 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
58 | cpumask_clear(desc->pending_mask); | 58 | cpumask_clear(desc->pending_mask); |
59 | #endif | 59 | #endif |
60 | } | 60 | } |
61 | 61 | ||
62 | static inline int desc_node(struct irq_desc *desc) | 62 | static inline int desc_node(struct irq_desc *desc) |
63 | { | 63 | { |
64 | return desc->irq_data.node; | 64 | return desc->irq_data.node; |
65 | } | 65 | } |
66 | 66 | ||
67 | #else | 67 | #else |
68 | static inline int | 68 | static inline int |
69 | alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; } | 69 | alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; } |
70 | static inline void desc_smp_init(struct irq_desc *desc, int node) { } | 70 | static inline void desc_smp_init(struct irq_desc *desc, int node) { } |
71 | static inline int desc_node(struct irq_desc *desc) { return 0; } | 71 | static inline int desc_node(struct irq_desc *desc) { return 0; } |
72 | #endif | 72 | #endif |
73 | 73 | ||
74 | static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, | 74 | static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, |
75 | struct module *owner) | 75 | struct module *owner) |
76 | { | 76 | { |
77 | int cpu; | 77 | int cpu; |
78 | 78 | ||
79 | desc->irq_data.irq = irq; | 79 | desc->irq_data.irq = irq; |
80 | desc->irq_data.chip = &no_irq_chip; | 80 | desc->irq_data.chip = &no_irq_chip; |
81 | desc->irq_data.chip_data = NULL; | 81 | desc->irq_data.chip_data = NULL; |
82 | desc->irq_data.handler_data = NULL; | 82 | desc->irq_data.handler_data = NULL; |
83 | desc->irq_data.msi_desc = NULL; | 83 | desc->irq_data.msi_desc = NULL; |
84 | irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); | 84 | irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); |
85 | irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); | 85 | irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); |
86 | desc->handle_irq = handle_bad_irq; | 86 | desc->handle_irq = handle_bad_irq; |
87 | desc->depth = 1; | 87 | desc->depth = 1; |
88 | desc->irq_count = 0; | 88 | desc->irq_count = 0; |
89 | desc->irqs_unhandled = 0; | 89 | desc->irqs_unhandled = 0; |
90 | desc->name = NULL; | 90 | desc->name = NULL; |
91 | desc->owner = owner; | 91 | desc->owner = owner; |
92 | for_each_possible_cpu(cpu) | 92 | for_each_possible_cpu(cpu) |
93 | *per_cpu_ptr(desc->kstat_irqs, cpu) = 0; | 93 | *per_cpu_ptr(desc->kstat_irqs, cpu) = 0; |
94 | desc_smp_init(desc, node); | 94 | desc_smp_init(desc, node); |
95 | } | 95 | } |
96 | 96 | ||
97 | int nr_irqs = NR_IRQS; | 97 | int nr_irqs = NR_IRQS; |
98 | EXPORT_SYMBOL_GPL(nr_irqs); | 98 | EXPORT_SYMBOL_GPL(nr_irqs); |
99 | 99 | ||
100 | static DEFINE_MUTEX(sparse_irq_lock); | 100 | static DEFINE_MUTEX(sparse_irq_lock); |
101 | static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS); | 101 | static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS); |
102 | 102 | ||
103 | #ifdef CONFIG_SPARSE_IRQ | 103 | #ifdef CONFIG_SPARSE_IRQ |
104 | 104 | ||
105 | static RADIX_TREE(irq_desc_tree, GFP_KERNEL); | 105 | static RADIX_TREE(irq_desc_tree, GFP_KERNEL); |
106 | 106 | ||
107 | static void irq_insert_desc(unsigned int irq, struct irq_desc *desc) | 107 | static void irq_insert_desc(unsigned int irq, struct irq_desc *desc) |
108 | { | 108 | { |
109 | radix_tree_insert(&irq_desc_tree, irq, desc); | 109 | radix_tree_insert(&irq_desc_tree, irq, desc); |
110 | } | 110 | } |
111 | 111 | ||
112 | struct irq_desc *irq_to_desc(unsigned int irq) | 112 | struct irq_desc *irq_to_desc(unsigned int irq) |
113 | { | 113 | { |
114 | return radix_tree_lookup(&irq_desc_tree, irq); | 114 | return radix_tree_lookup(&irq_desc_tree, irq); |
115 | } | 115 | } |
116 | EXPORT_SYMBOL(irq_to_desc); | 116 | EXPORT_SYMBOL(irq_to_desc); |
117 | 117 | ||
118 | static void delete_irq_desc(unsigned int irq) | 118 | static void delete_irq_desc(unsigned int irq) |
119 | { | 119 | { |
120 | radix_tree_delete(&irq_desc_tree, irq); | 120 | radix_tree_delete(&irq_desc_tree, irq); |
121 | } | 121 | } |
122 | 122 | ||
123 | #ifdef CONFIG_SMP | 123 | #ifdef CONFIG_SMP |
124 | static void free_masks(struct irq_desc *desc) | 124 | static void free_masks(struct irq_desc *desc) |
125 | { | 125 | { |
126 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 126 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
127 | free_cpumask_var(desc->pending_mask); | 127 | free_cpumask_var(desc->pending_mask); |
128 | #endif | 128 | #endif |
129 | free_cpumask_var(desc->irq_data.affinity); | 129 | free_cpumask_var(desc->irq_data.affinity); |
130 | } | 130 | } |
131 | #else | 131 | #else |
132 | static inline void free_masks(struct irq_desc *desc) { } | 132 | static inline void free_masks(struct irq_desc *desc) { } |
133 | #endif | 133 | #endif |
134 | 134 | ||
135 | void irq_lock_sparse(void) | ||
136 | { | ||
137 | mutex_lock(&sparse_irq_lock); | ||
138 | } | ||
139 | |||
140 | void irq_unlock_sparse(void) | ||
141 | { | ||
142 | mutex_unlock(&sparse_irq_lock); | ||
143 | } | ||
144 | |||
135 | static struct irq_desc *alloc_desc(int irq, int node, struct module *owner) | 145 | static struct irq_desc *alloc_desc(int irq, int node, struct module *owner) |
136 | { | 146 | { |
137 | struct irq_desc *desc; | 147 | struct irq_desc *desc; |
138 | gfp_t gfp = GFP_KERNEL; | 148 | gfp_t gfp = GFP_KERNEL; |
139 | 149 | ||
140 | desc = kzalloc_node(sizeof(*desc), gfp, node); | 150 | desc = kzalloc_node(sizeof(*desc), gfp, node); |
141 | if (!desc) | 151 | if (!desc) |
142 | return NULL; | 152 | return NULL; |
143 | /* allocate based on nr_cpu_ids */ | 153 | /* allocate based on nr_cpu_ids */ |
144 | desc->kstat_irqs = alloc_percpu(unsigned int); | 154 | desc->kstat_irqs = alloc_percpu(unsigned int); |
145 | if (!desc->kstat_irqs) | 155 | if (!desc->kstat_irqs) |
146 | goto err_desc; | 156 | goto err_desc; |
147 | 157 | ||
148 | if (alloc_masks(desc, gfp, node)) | 158 | if (alloc_masks(desc, gfp, node)) |
149 | goto err_kstat; | 159 | goto err_kstat; |
150 | 160 | ||
151 | raw_spin_lock_init(&desc->lock); | 161 | raw_spin_lock_init(&desc->lock); |
152 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | 162 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); |
153 | 163 | ||
154 | desc_set_defaults(irq, desc, node, owner); | 164 | desc_set_defaults(irq, desc, node, owner); |
155 | 165 | ||
156 | return desc; | 166 | return desc; |
157 | 167 | ||
158 | err_kstat: | 168 | err_kstat: |
159 | free_percpu(desc->kstat_irqs); | 169 | free_percpu(desc->kstat_irqs); |
160 | err_desc: | 170 | err_desc: |
161 | kfree(desc); | 171 | kfree(desc); |
162 | return NULL; | 172 | return NULL; |
163 | } | 173 | } |
164 | 174 | ||
165 | static void free_desc(unsigned int irq) | 175 | static void free_desc(unsigned int irq) |
166 | { | 176 | { |
167 | struct irq_desc *desc = irq_to_desc(irq); | 177 | struct irq_desc *desc = irq_to_desc(irq); |
168 | 178 | ||
169 | unregister_irq_proc(irq, desc); | 179 | unregister_irq_proc(irq, desc); |
170 | 180 | ||
181 | /* | ||
182 | * sparse_irq_lock protects also show_interrupts() and | ||
183 | * kstat_irq_usr(). Once we deleted the descriptor from the | ||
184 | * sparse tree we can free it. Access in proc will fail to | ||
185 | * lookup the descriptor. | ||
186 | */ | ||
171 | mutex_lock(&sparse_irq_lock); | 187 | mutex_lock(&sparse_irq_lock); |
172 | delete_irq_desc(irq); | 188 | delete_irq_desc(irq); |
173 | mutex_unlock(&sparse_irq_lock); | 189 | mutex_unlock(&sparse_irq_lock); |
174 | 190 | ||
175 | free_masks(desc); | 191 | free_masks(desc); |
176 | free_percpu(desc->kstat_irqs); | 192 | free_percpu(desc->kstat_irqs); |
177 | kfree(desc); | 193 | kfree(desc); |
178 | } | 194 | } |
179 | 195 | ||
180 | static int alloc_descs(unsigned int start, unsigned int cnt, int node, | 196 | static int alloc_descs(unsigned int start, unsigned int cnt, int node, |
181 | struct module *owner) | 197 | struct module *owner) |
182 | { | 198 | { |
183 | struct irq_desc *desc; | 199 | struct irq_desc *desc; |
184 | int i; | 200 | int i; |
185 | 201 | ||
186 | for (i = 0; i < cnt; i++) { | 202 | for (i = 0; i < cnt; i++) { |
187 | desc = alloc_desc(start + i, node, owner); | 203 | desc = alloc_desc(start + i, node, owner); |
188 | if (!desc) | 204 | if (!desc) |
189 | goto err; | 205 | goto err; |
190 | mutex_lock(&sparse_irq_lock); | 206 | mutex_lock(&sparse_irq_lock); |
191 | irq_insert_desc(start + i, desc); | 207 | irq_insert_desc(start + i, desc); |
192 | mutex_unlock(&sparse_irq_lock); | 208 | mutex_unlock(&sparse_irq_lock); |
193 | } | 209 | } |
194 | return start; | 210 | return start; |
195 | 211 | ||
196 | err: | 212 | err: |
197 | for (i--; i >= 0; i--) | 213 | for (i--; i >= 0; i--) |
198 | free_desc(start + i); | 214 | free_desc(start + i); |
199 | 215 | ||
200 | mutex_lock(&sparse_irq_lock); | 216 | mutex_lock(&sparse_irq_lock); |
201 | bitmap_clear(allocated_irqs, start, cnt); | 217 | bitmap_clear(allocated_irqs, start, cnt); |
202 | mutex_unlock(&sparse_irq_lock); | 218 | mutex_unlock(&sparse_irq_lock); |
203 | return -ENOMEM; | 219 | return -ENOMEM; |
204 | } | 220 | } |
205 | 221 | ||
206 | static int irq_expand_nr_irqs(unsigned int nr) | 222 | static int irq_expand_nr_irqs(unsigned int nr) |
207 | { | 223 | { |
208 | if (nr > IRQ_BITMAP_BITS) | 224 | if (nr > IRQ_BITMAP_BITS) |
209 | return -ENOMEM; | 225 | return -ENOMEM; |
210 | nr_irqs = nr; | 226 | nr_irqs = nr; |
211 | return 0; | 227 | return 0; |
212 | } | 228 | } |
213 | 229 | ||
214 | int __init early_irq_init(void) | 230 | int __init early_irq_init(void) |
215 | { | 231 | { |
216 | int i, initcnt, node = first_online_node; | 232 | int i, initcnt, node = first_online_node; |
217 | struct irq_desc *desc; | 233 | struct irq_desc *desc; |
218 | 234 | ||
219 | init_irq_default_affinity(); | 235 | init_irq_default_affinity(); |
220 | 236 | ||
221 | /* Let arch update nr_irqs and return the nr of preallocated irqs */ | 237 | /* Let arch update nr_irqs and return the nr of preallocated irqs */ |
222 | initcnt = arch_probe_nr_irqs(); | 238 | initcnt = arch_probe_nr_irqs(); |
223 | printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt); | 239 | printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt); |
224 | 240 | ||
225 | if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS)) | 241 | if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS)) |
226 | nr_irqs = IRQ_BITMAP_BITS; | 242 | nr_irqs = IRQ_BITMAP_BITS; |
227 | 243 | ||
228 | if (WARN_ON(initcnt > IRQ_BITMAP_BITS)) | 244 | if (WARN_ON(initcnt > IRQ_BITMAP_BITS)) |
229 | initcnt = IRQ_BITMAP_BITS; | 245 | initcnt = IRQ_BITMAP_BITS; |
230 | 246 | ||
231 | if (initcnt > nr_irqs) | 247 | if (initcnt > nr_irqs) |
232 | nr_irqs = initcnt; | 248 | nr_irqs = initcnt; |
233 | 249 | ||
234 | for (i = 0; i < initcnt; i++) { | 250 | for (i = 0; i < initcnt; i++) { |
235 | desc = alloc_desc(i, node, NULL); | 251 | desc = alloc_desc(i, node, NULL); |
236 | set_bit(i, allocated_irqs); | 252 | set_bit(i, allocated_irqs); |
237 | irq_insert_desc(i, desc); | 253 | irq_insert_desc(i, desc); |
238 | } | 254 | } |
239 | return arch_early_irq_init(); | 255 | return arch_early_irq_init(); |
240 | } | 256 | } |
241 | 257 | ||
242 | #else /* !CONFIG_SPARSE_IRQ */ | 258 | #else /* !CONFIG_SPARSE_IRQ */ |
243 | 259 | ||
244 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { | 260 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { |
245 | [0 ... NR_IRQS-1] = { | 261 | [0 ... NR_IRQS-1] = { |
246 | .handle_irq = handle_bad_irq, | 262 | .handle_irq = handle_bad_irq, |
247 | .depth = 1, | 263 | .depth = 1, |
248 | .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), | 264 | .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock), |
249 | } | 265 | } |
250 | }; | 266 | }; |
251 | 267 | ||
252 | int __init early_irq_init(void) | 268 | int __init early_irq_init(void) |
253 | { | 269 | { |
254 | int count, i, node = first_online_node; | 270 | int count, i, node = first_online_node; |
255 | struct irq_desc *desc; | 271 | struct irq_desc *desc; |
256 | 272 | ||
257 | init_irq_default_affinity(); | 273 | init_irq_default_affinity(); |
258 | 274 | ||
259 | printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS); | 275 | printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS); |
260 | 276 | ||
261 | desc = irq_desc; | 277 | desc = irq_desc; |
262 | count = ARRAY_SIZE(irq_desc); | 278 | count = ARRAY_SIZE(irq_desc); |
263 | 279 | ||
264 | for (i = 0; i < count; i++) { | 280 | for (i = 0; i < count; i++) { |
265 | desc[i].kstat_irqs = alloc_percpu(unsigned int); | 281 | desc[i].kstat_irqs = alloc_percpu(unsigned int); |
266 | alloc_masks(&desc[i], GFP_KERNEL, node); | 282 | alloc_masks(&desc[i], GFP_KERNEL, node); |
267 | raw_spin_lock_init(&desc[i].lock); | 283 | raw_spin_lock_init(&desc[i].lock); |
268 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); | 284 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); |
269 | desc_set_defaults(i, &desc[i], node, NULL); | 285 | desc_set_defaults(i, &desc[i], node, NULL); |
270 | } | 286 | } |
271 | return arch_early_irq_init(); | 287 | return arch_early_irq_init(); |
272 | } | 288 | } |
273 | 289 | ||
274 | struct irq_desc *irq_to_desc(unsigned int irq) | 290 | struct irq_desc *irq_to_desc(unsigned int irq) |
275 | { | 291 | { |
276 | return (irq < NR_IRQS) ? irq_desc + irq : NULL; | 292 | return (irq < NR_IRQS) ? irq_desc + irq : NULL; |
277 | } | 293 | } |
278 | EXPORT_SYMBOL(irq_to_desc); | 294 | EXPORT_SYMBOL(irq_to_desc); |
279 | 295 | ||
280 | static void free_desc(unsigned int irq) | 296 | static void free_desc(unsigned int irq) |
281 | { | 297 | { |
282 | struct irq_desc *desc = irq_to_desc(irq); | 298 | struct irq_desc *desc = irq_to_desc(irq); |
283 | unsigned long flags; | 299 | unsigned long flags; |
284 | 300 | ||
285 | raw_spin_lock_irqsave(&desc->lock, flags); | 301 | raw_spin_lock_irqsave(&desc->lock, flags); |
286 | desc_set_defaults(irq, desc, desc_node(desc), NULL); | 302 | desc_set_defaults(irq, desc, desc_node(desc), NULL); |
287 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 303 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
288 | } | 304 | } |
289 | 305 | ||
290 | static inline int alloc_descs(unsigned int start, unsigned int cnt, int node, | 306 | static inline int alloc_descs(unsigned int start, unsigned int cnt, int node, |
291 | struct module *owner) | 307 | struct module *owner) |
292 | { | 308 | { |
293 | u32 i; | 309 | u32 i; |
294 | 310 | ||
295 | for (i = 0; i < cnt; i++) { | 311 | for (i = 0; i < cnt; i++) { |
296 | struct irq_desc *desc = irq_to_desc(start + i); | 312 | struct irq_desc *desc = irq_to_desc(start + i); |
297 | 313 | ||
298 | desc->owner = owner; | 314 | desc->owner = owner; |
299 | } | 315 | } |
300 | return start; | 316 | return start; |
301 | } | 317 | } |
302 | 318 | ||
303 | static int irq_expand_nr_irqs(unsigned int nr) | 319 | static int irq_expand_nr_irqs(unsigned int nr) |
304 | { | 320 | { |
305 | return -ENOMEM; | 321 | return -ENOMEM; |
306 | } | 322 | } |
307 | 323 | ||
308 | void irq_mark_irq(unsigned int irq) | 324 | void irq_mark_irq(unsigned int irq) |
309 | { | 325 | { |
310 | mutex_lock(&sparse_irq_lock); | 326 | mutex_lock(&sparse_irq_lock); |
311 | bitmap_set(allocated_irqs, irq, 1); | 327 | bitmap_set(allocated_irqs, irq, 1); |
312 | mutex_unlock(&sparse_irq_lock); | 328 | mutex_unlock(&sparse_irq_lock); |
313 | } | 329 | } |
314 | 330 | ||
315 | #ifdef CONFIG_GENERIC_IRQ_LEGACY | 331 | #ifdef CONFIG_GENERIC_IRQ_LEGACY |
316 | void irq_init_desc(unsigned int irq) | 332 | void irq_init_desc(unsigned int irq) |
317 | { | 333 | { |
318 | free_desc(irq); | 334 | free_desc(irq); |
319 | } | 335 | } |
320 | #endif | 336 | #endif |
321 | 337 | ||
322 | #endif /* !CONFIG_SPARSE_IRQ */ | 338 | #endif /* !CONFIG_SPARSE_IRQ */ |
323 | 339 | ||
324 | /** | 340 | /** |
325 | * generic_handle_irq - Invoke the handler for a particular irq | 341 | * generic_handle_irq - Invoke the handler for a particular irq |
326 | * @irq: The irq number to handle | 342 | * @irq: The irq number to handle |
327 | * | 343 | * |
328 | */ | 344 | */ |
329 | int generic_handle_irq(unsigned int irq) | 345 | int generic_handle_irq(unsigned int irq) |
330 | { | 346 | { |
331 | struct irq_desc *desc = irq_to_desc(irq); | 347 | struct irq_desc *desc = irq_to_desc(irq); |
332 | 348 | ||
333 | if (!desc) | 349 | if (!desc) |
334 | return -EINVAL; | 350 | return -EINVAL; |
335 | generic_handle_irq_desc(irq, desc); | 351 | generic_handle_irq_desc(irq, desc); |
336 | return 0; | 352 | return 0; |
337 | } | 353 | } |
338 | EXPORT_SYMBOL_GPL(generic_handle_irq); | 354 | EXPORT_SYMBOL_GPL(generic_handle_irq); |
339 | 355 | ||
340 | #ifdef CONFIG_HANDLE_DOMAIN_IRQ | 356 | #ifdef CONFIG_HANDLE_DOMAIN_IRQ |
341 | /** | 357 | /** |
342 | * __handle_domain_irq - Invoke the handler for a HW irq belonging to a domain | 358 | * __handle_domain_irq - Invoke the handler for a HW irq belonging to a domain |
343 | * @domain: The domain where to perform the lookup | 359 | * @domain: The domain where to perform the lookup |
344 | * @hwirq: The HW irq number to convert to a logical one | 360 | * @hwirq: The HW irq number to convert to a logical one |
345 | * @lookup: Whether to perform the domain lookup or not | 361 | * @lookup: Whether to perform the domain lookup or not |
346 | * @regs: Register file coming from the low-level handling code | 362 | * @regs: Register file coming from the low-level handling code |
347 | * | 363 | * |
348 | * Returns: 0 on success, or -EINVAL if conversion has failed | 364 | * Returns: 0 on success, or -EINVAL if conversion has failed |
349 | */ | 365 | */ |
350 | int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq, | 366 | int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq, |
351 | bool lookup, struct pt_regs *regs) | 367 | bool lookup, struct pt_regs *regs) |
352 | { | 368 | { |
353 | struct pt_regs *old_regs = set_irq_regs(regs); | 369 | struct pt_regs *old_regs = set_irq_regs(regs); |
354 | unsigned int irq = hwirq; | 370 | unsigned int irq = hwirq; |
355 | int ret = 0; | 371 | int ret = 0; |
356 | 372 | ||
357 | irq_enter(); | 373 | irq_enter(); |
358 | 374 | ||
359 | #ifdef CONFIG_IRQ_DOMAIN | 375 | #ifdef CONFIG_IRQ_DOMAIN |
360 | if (lookup) | 376 | if (lookup) |
361 | irq = irq_find_mapping(domain, hwirq); | 377 | irq = irq_find_mapping(domain, hwirq); |
362 | #endif | 378 | #endif |
363 | 379 | ||
364 | /* | 380 | /* |
365 | * Some hardware gives randomly wrong interrupts. Rather | 381 | * Some hardware gives randomly wrong interrupts. Rather |
366 | * than crashing, do something sensible. | 382 | * than crashing, do something sensible. |
367 | */ | 383 | */ |
368 | if (unlikely(!irq || irq >= nr_irqs)) { | 384 | if (unlikely(!irq || irq >= nr_irqs)) { |
369 | ack_bad_irq(irq); | 385 | ack_bad_irq(irq); |
370 | ret = -EINVAL; | 386 | ret = -EINVAL; |
371 | } else { | 387 | } else { |
372 | generic_handle_irq(irq); | 388 | generic_handle_irq(irq); |
373 | } | 389 | } |
374 | 390 | ||
375 | irq_exit(); | 391 | irq_exit(); |
376 | set_irq_regs(old_regs); | 392 | set_irq_regs(old_regs); |
377 | return ret; | 393 | return ret; |
378 | } | 394 | } |
379 | #endif | 395 | #endif |
380 | 396 | ||
381 | /* Dynamic interrupt handling */ | 397 | /* Dynamic interrupt handling */ |
382 | 398 | ||
383 | /** | 399 | /** |
384 | * irq_free_descs - free irq descriptors | 400 | * irq_free_descs - free irq descriptors |
385 | * @from: Start of descriptor range | 401 | * @from: Start of descriptor range |
386 | * @cnt: Number of consecutive irqs to free | 402 | * @cnt: Number of consecutive irqs to free |
387 | */ | 403 | */ |
388 | void irq_free_descs(unsigned int from, unsigned int cnt) | 404 | void irq_free_descs(unsigned int from, unsigned int cnt) |
389 | { | 405 | { |
390 | int i; | 406 | int i; |
391 | 407 | ||
392 | if (from >= nr_irqs || (from + cnt) > nr_irqs) | 408 | if (from >= nr_irqs || (from + cnt) > nr_irqs) |
393 | return; | 409 | return; |
394 | 410 | ||
395 | for (i = 0; i < cnt; i++) | 411 | for (i = 0; i < cnt; i++) |
396 | free_desc(from + i); | 412 | free_desc(from + i); |
397 | 413 | ||
398 | mutex_lock(&sparse_irq_lock); | 414 | mutex_lock(&sparse_irq_lock); |
399 | bitmap_clear(allocated_irqs, from, cnt); | 415 | bitmap_clear(allocated_irqs, from, cnt); |
400 | mutex_unlock(&sparse_irq_lock); | 416 | mutex_unlock(&sparse_irq_lock); |
401 | } | 417 | } |
402 | EXPORT_SYMBOL_GPL(irq_free_descs); | 418 | EXPORT_SYMBOL_GPL(irq_free_descs); |
403 | 419 | ||
404 | /** | 420 | /** |
405 | * irq_alloc_descs - allocate and initialize a range of irq descriptors | 421 | * irq_alloc_descs - allocate and initialize a range of irq descriptors |
406 | * @irq: Allocate for specific irq number if irq >= 0 | 422 | * @irq: Allocate for specific irq number if irq >= 0 |
407 | * @from: Start the search from this irq number | 423 | * @from: Start the search from this irq number |
408 | * @cnt: Number of consecutive irqs to allocate. | 424 | * @cnt: Number of consecutive irqs to allocate. |
409 | * @node: Preferred node on which the irq descriptor should be allocated | 425 | * @node: Preferred node on which the irq descriptor should be allocated |
410 | * @owner: Owning module (can be NULL) | 426 | * @owner: Owning module (can be NULL) |
411 | * | 427 | * |
412 | * Returns the first irq number or error code | 428 | * Returns the first irq number or error code |
413 | */ | 429 | */ |
414 | int __ref | 430 | int __ref |
415 | __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, | 431 | __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, |
416 | struct module *owner) | 432 | struct module *owner) |
417 | { | 433 | { |
418 | int start, ret; | 434 | int start, ret; |
419 | 435 | ||
420 | if (!cnt) | 436 | if (!cnt) |
421 | return -EINVAL; | 437 | return -EINVAL; |
422 | 438 | ||
423 | if (irq >= 0) { | 439 | if (irq >= 0) { |
424 | if (from > irq) | 440 | if (from > irq) |
425 | return -EINVAL; | 441 | return -EINVAL; |
426 | from = irq; | 442 | from = irq; |
427 | } else { | 443 | } else { |
428 | /* | 444 | /* |
429 | * For interrupts which are freely allocated the | 445 | * For interrupts which are freely allocated the |
430 | * architecture can force a lower bound to the @from | 446 | * architecture can force a lower bound to the @from |
431 | * argument. x86 uses this to exclude the GSI space. | 447 | * argument. x86 uses this to exclude the GSI space. |
432 | */ | 448 | */ |
433 | from = arch_dynirq_lower_bound(from); | 449 | from = arch_dynirq_lower_bound(from); |
434 | } | 450 | } |
435 | 451 | ||
436 | mutex_lock(&sparse_irq_lock); | 452 | mutex_lock(&sparse_irq_lock); |
437 | 453 | ||
438 | start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS, | 454 | start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS, |
439 | from, cnt, 0); | 455 | from, cnt, 0); |
440 | ret = -EEXIST; | 456 | ret = -EEXIST; |
441 | if (irq >=0 && start != irq) | 457 | if (irq >=0 && start != irq) |
442 | goto err; | 458 | goto err; |
443 | 459 | ||
444 | if (start + cnt > nr_irqs) { | 460 | if (start + cnt > nr_irqs) { |
445 | ret = irq_expand_nr_irqs(start + cnt); | 461 | ret = irq_expand_nr_irqs(start + cnt); |
446 | if (ret) | 462 | if (ret) |
447 | goto err; | 463 | goto err; |
448 | } | 464 | } |
449 | 465 | ||
450 | bitmap_set(allocated_irqs, start, cnt); | 466 | bitmap_set(allocated_irqs, start, cnt); |
451 | mutex_unlock(&sparse_irq_lock); | 467 | mutex_unlock(&sparse_irq_lock); |
452 | return alloc_descs(start, cnt, node, owner); | 468 | return alloc_descs(start, cnt, node, owner); |
453 | 469 | ||
454 | err: | 470 | err: |
455 | mutex_unlock(&sparse_irq_lock); | 471 | mutex_unlock(&sparse_irq_lock); |
456 | return ret; | 472 | return ret; |
457 | } | 473 | } |
458 | EXPORT_SYMBOL_GPL(__irq_alloc_descs); | 474 | EXPORT_SYMBOL_GPL(__irq_alloc_descs); |
459 | 475 | ||
460 | #ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ | 476 | #ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ |
461 | /** | 477 | /** |
462 | * irq_alloc_hwirqs - Allocate an irq descriptor and initialize the hardware | 478 | * irq_alloc_hwirqs - Allocate an irq descriptor and initialize the hardware |
463 | * @cnt: number of interrupts to allocate | 479 | * @cnt: number of interrupts to allocate |
464 | * @node: node on which to allocate | 480 | * @node: node on which to allocate |
465 | * | 481 | * |
466 | * Returns an interrupt number > 0 or 0, if the allocation fails. | 482 | * Returns an interrupt number > 0 or 0, if the allocation fails. |
467 | */ | 483 | */ |
468 | unsigned int irq_alloc_hwirqs(int cnt, int node) | 484 | unsigned int irq_alloc_hwirqs(int cnt, int node) |
469 | { | 485 | { |
470 | int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL); | 486 | int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL); |
471 | 487 | ||
472 | if (irq < 0) | 488 | if (irq < 0) |
473 | return 0; | 489 | return 0; |
474 | 490 | ||
475 | for (i = irq; cnt > 0; i++, cnt--) { | 491 | for (i = irq; cnt > 0; i++, cnt--) { |
476 | if (arch_setup_hwirq(i, node)) | 492 | if (arch_setup_hwirq(i, node)) |
477 | goto err; | 493 | goto err; |
478 | irq_clear_status_flags(i, _IRQ_NOREQUEST); | 494 | irq_clear_status_flags(i, _IRQ_NOREQUEST); |
479 | } | 495 | } |
480 | return irq; | 496 | return irq; |
481 | 497 | ||
482 | err: | 498 | err: |
483 | for (i--; i >= irq; i--) { | 499 | for (i--; i >= irq; i--) { |
484 | irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE); | 500 | irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE); |
485 | arch_teardown_hwirq(i); | 501 | arch_teardown_hwirq(i); |
486 | } | 502 | } |
487 | irq_free_descs(irq, cnt); | 503 | irq_free_descs(irq, cnt); |
488 | return 0; | 504 | return 0; |
489 | } | 505 | } |
490 | EXPORT_SYMBOL_GPL(irq_alloc_hwirqs); | 506 | EXPORT_SYMBOL_GPL(irq_alloc_hwirqs); |
491 | 507 | ||
492 | /** | 508 | /** |
493 | * irq_free_hwirqs - Free irq descriptor and cleanup the hardware | 509 | * irq_free_hwirqs - Free irq descriptor and cleanup the hardware |
494 | * @from: Free from irq number | 510 | * @from: Free from irq number |
495 | * @cnt: number of interrupts to free | 511 | * @cnt: number of interrupts to free |
496 | * | 512 | * |
497 | */ | 513 | */ |
498 | void irq_free_hwirqs(unsigned int from, int cnt) | 514 | void irq_free_hwirqs(unsigned int from, int cnt) |
499 | { | 515 | { |
500 | int i, j; | 516 | int i, j; |
501 | 517 | ||
502 | for (i = from, j = cnt; j > 0; i++, j--) { | 518 | for (i = from, j = cnt; j > 0; i++, j--) { |
503 | irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE); | 519 | irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE); |
504 | arch_teardown_hwirq(i); | 520 | arch_teardown_hwirq(i); |
505 | } | 521 | } |
506 | irq_free_descs(from, cnt); | 522 | irq_free_descs(from, cnt); |
507 | } | 523 | } |
508 | EXPORT_SYMBOL_GPL(irq_free_hwirqs); | 524 | EXPORT_SYMBOL_GPL(irq_free_hwirqs); |
509 | #endif | 525 | #endif |
510 | 526 | ||
511 | /** | 527 | /** |
512 | * irq_get_next_irq - get next allocated irq number | 528 | * irq_get_next_irq - get next allocated irq number |
513 | * @offset: where to start the search | 529 | * @offset: where to start the search |
514 | * | 530 | * |
515 | * Returns next irq number after offset or nr_irqs if none is found. | 531 | * Returns next irq number after offset or nr_irqs if none is found. |
516 | */ | 532 | */ |
517 | unsigned int irq_get_next_irq(unsigned int offset) | 533 | unsigned int irq_get_next_irq(unsigned int offset) |
518 | { | 534 | { |
519 | return find_next_bit(allocated_irqs, nr_irqs, offset); | 535 | return find_next_bit(allocated_irqs, nr_irqs, offset); |
520 | } | 536 | } |
521 | 537 | ||
522 | struct irq_desc * | 538 | struct irq_desc * |
523 | __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, | 539 | __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus, |
524 | unsigned int check) | 540 | unsigned int check) |
525 | { | 541 | { |
526 | struct irq_desc *desc = irq_to_desc(irq); | 542 | struct irq_desc *desc = irq_to_desc(irq); |
527 | 543 | ||
528 | if (desc) { | 544 | if (desc) { |
529 | if (check & _IRQ_DESC_CHECK) { | 545 | if (check & _IRQ_DESC_CHECK) { |
530 | if ((check & _IRQ_DESC_PERCPU) && | 546 | if ((check & _IRQ_DESC_PERCPU) && |
531 | !irq_settings_is_per_cpu_devid(desc)) | 547 | !irq_settings_is_per_cpu_devid(desc)) |
532 | return NULL; | 548 | return NULL; |
533 | 549 | ||
534 | if (!(check & _IRQ_DESC_PERCPU) && | 550 | if (!(check & _IRQ_DESC_PERCPU) && |
535 | irq_settings_is_per_cpu_devid(desc)) | 551 | irq_settings_is_per_cpu_devid(desc)) |
536 | return NULL; | 552 | return NULL; |
537 | } | 553 | } |
538 | 554 | ||
539 | if (bus) | 555 | if (bus) |
540 | chip_bus_lock(desc); | 556 | chip_bus_lock(desc); |
541 | raw_spin_lock_irqsave(&desc->lock, *flags); | 557 | raw_spin_lock_irqsave(&desc->lock, *flags); |
542 | } | 558 | } |
543 | return desc; | 559 | return desc; |
544 | } | 560 | } |
545 | 561 | ||
546 | void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus) | 562 | void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus) |
547 | { | 563 | { |
548 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 564 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
549 | if (bus) | 565 | if (bus) |
550 | chip_bus_sync_unlock(desc); | 566 | chip_bus_sync_unlock(desc); |
551 | } | 567 | } |
552 | 568 | ||
553 | int irq_set_percpu_devid(unsigned int irq) | 569 | int irq_set_percpu_devid(unsigned int irq) |
554 | { | 570 | { |
555 | struct irq_desc *desc = irq_to_desc(irq); | 571 | struct irq_desc *desc = irq_to_desc(irq); |
556 | 572 | ||
557 | if (!desc) | 573 | if (!desc) |
558 | return -EINVAL; | 574 | return -EINVAL; |
559 | 575 | ||
560 | if (desc->percpu_enabled) | 576 | if (desc->percpu_enabled) |
561 | return -EINVAL; | 577 | return -EINVAL; |
562 | 578 | ||
563 | desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL); | 579 | desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL); |
564 | 580 | ||
565 | if (!desc->percpu_enabled) | 581 | if (!desc->percpu_enabled) |
566 | return -ENOMEM; | 582 | return -ENOMEM; |
567 | 583 | ||
568 | irq_set_percpu_devid_flags(irq); | 584 | irq_set_percpu_devid_flags(irq); |
569 | return 0; | 585 | return 0; |
570 | } | 586 | } |
571 | 587 | ||
572 | void kstat_incr_irq_this_cpu(unsigned int irq) | 588 | void kstat_incr_irq_this_cpu(unsigned int irq) |
573 | { | 589 | { |
574 | kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq)); | 590 | kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq)); |
575 | } | 591 | } |
576 | 592 | ||
593 | /** | ||
594 | * kstat_irqs_cpu - Get the statistics for an interrupt on a cpu | ||
595 | * @irq: The interrupt number | ||
596 | * @cpu: The cpu number | ||
597 | * | ||
598 | * Returns the sum of interrupt counts on @cpu since boot for | ||
599 | * @irq. The caller must ensure that the interrupt is not removed | ||
600 | * concurrently. | ||
601 | */ | ||
577 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) | 602 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) |
578 | { | 603 | { |
579 | struct irq_desc *desc = irq_to_desc(irq); | 604 | struct irq_desc *desc = irq_to_desc(irq); |
580 | 605 | ||
581 | return desc && desc->kstat_irqs ? | 606 | return desc && desc->kstat_irqs ? |
582 | *per_cpu_ptr(desc->kstat_irqs, cpu) : 0; | 607 | *per_cpu_ptr(desc->kstat_irqs, cpu) : 0; |
583 | } | 608 | } |
584 | 609 | ||
610 | /** | ||
611 | * kstat_irqs - Get the statistics for an interrupt | ||
612 | * @irq: The interrupt number | ||
613 | * | ||
614 | * Returns the sum of interrupt counts on all cpus since boot for | ||
615 | * @irq. The caller must ensure that the interrupt is not removed | ||
616 | * concurrently. | ||
617 | */ | ||
585 | unsigned int kstat_irqs(unsigned int irq) | 618 | unsigned int kstat_irqs(unsigned int irq) |
586 | { | 619 | { |
587 | struct irq_desc *desc = irq_to_desc(irq); | 620 | struct irq_desc *desc = irq_to_desc(irq); |
588 | int cpu; | 621 | int cpu; |
589 | int sum = 0; | 622 | int sum = 0; |
590 | 623 | ||
591 | if (!desc || !desc->kstat_irqs) | 624 | if (!desc || !desc->kstat_irqs) |
592 | return 0; | 625 | return 0; |
593 | for_each_possible_cpu(cpu) | 626 | for_each_possible_cpu(cpu) |
594 | sum += *per_cpu_ptr(desc->kstat_irqs, cpu); | 627 | sum += *per_cpu_ptr(desc->kstat_irqs, cpu); |
628 | return sum; | ||
629 | } | ||
630 | |||
631 | /** | ||
632 | * kstat_irqs_usr - Get the statistics for an interrupt | ||
633 | * @irq: The interrupt number | ||
634 | * | ||
635 | * Returns the sum of interrupt counts on all cpus since boot for | ||
636 | * @irq. Contrary to kstat_irqs() this can be called from any | ||
637 | * preemptible context. It's protected against concurrent removal of | ||
638 | * an interrupt descriptor when sparse irqs are enabled. | ||
639 | */ | ||
640 | unsigned int kstat_irqs_usr(unsigned int irq) | ||
641 | { | ||
642 | int sum; | ||
643 | |||
644 | irq_lock_sparse(); | ||
645 | sum = kstat_irqs(irq); | ||
646 | irq_unlock_sparse(); | ||
595 | return sum; | 647 | return sum; |
596 | } | 648 | } |
597 | 649 |
kernel/irq/proc.c
1 | /* | 1 | /* |
2 | * linux/kernel/irq/proc.c | 2 | * linux/kernel/irq/proc.c |
3 | * | 3 | * |
4 | * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar | 4 | * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar |
5 | * | 5 | * |
6 | * This file contains the /proc/irq/ handling code. | 6 | * This file contains the /proc/irq/ handling code. |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/irq.h> | 9 | #include <linux/irq.h> |
10 | #include <linux/gfp.h> | 10 | #include <linux/gfp.h> |
11 | #include <linux/proc_fs.h> | 11 | #include <linux/proc_fs.h> |
12 | #include <linux/seq_file.h> | 12 | #include <linux/seq_file.h> |
13 | #include <linux/interrupt.h> | 13 | #include <linux/interrupt.h> |
14 | #include <linux/kernel_stat.h> | 14 | #include <linux/kernel_stat.h> |
15 | 15 | ||
16 | #include "internals.h" | 16 | #include "internals.h" |
17 | 17 | ||
18 | /* | ||
19 | * Access rules: | ||
20 | * | ||
21 | * procfs protects read/write of /proc/irq/N/ files against a | ||
22 | * concurrent free of the interrupt descriptor. remove_proc_entry() | ||
23 | * immediately prevents new read/writes to happen and waits for | ||
24 | * already running read/write functions to complete. | ||
25 | * | ||
26 | * We remove the proc entries first and then delete the interrupt | ||
27 | * descriptor from the radix tree and free it. So it is guaranteed | ||
28 | * that irq_to_desc(N) is valid as long as the read/writes are | ||
29 | * permitted by procfs. | ||
30 | * | ||
31 | * The read from /proc/interrupts is a different problem because there | ||
32 | * is no protection. So the lookup and the access to irqdesc | ||
33 | * information must be protected by sparse_irq_lock. | ||
34 | */ | ||
18 | static struct proc_dir_entry *root_irq_dir; | 35 | static struct proc_dir_entry *root_irq_dir; |
19 | 36 | ||
20 | #ifdef CONFIG_SMP | 37 | #ifdef CONFIG_SMP |
21 | 38 | ||
22 | static int show_irq_affinity(int type, struct seq_file *m, void *v) | 39 | static int show_irq_affinity(int type, struct seq_file *m, void *v) |
23 | { | 40 | { |
24 | struct irq_desc *desc = irq_to_desc((long)m->private); | 41 | struct irq_desc *desc = irq_to_desc((long)m->private); |
25 | const struct cpumask *mask = desc->irq_data.affinity; | 42 | const struct cpumask *mask = desc->irq_data.affinity; |
26 | 43 | ||
27 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 44 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
28 | if (irqd_is_setaffinity_pending(&desc->irq_data)) | 45 | if (irqd_is_setaffinity_pending(&desc->irq_data)) |
29 | mask = desc->pending_mask; | 46 | mask = desc->pending_mask; |
30 | #endif | 47 | #endif |
31 | if (type) | 48 | if (type) |
32 | seq_cpumask_list(m, mask); | 49 | seq_cpumask_list(m, mask); |
33 | else | 50 | else |
34 | seq_cpumask(m, mask); | 51 | seq_cpumask(m, mask); |
35 | seq_putc(m, '\n'); | 52 | seq_putc(m, '\n'); |
36 | return 0; | 53 | return 0; |
37 | } | 54 | } |
38 | 55 | ||
39 | static int irq_affinity_hint_proc_show(struct seq_file *m, void *v) | 56 | static int irq_affinity_hint_proc_show(struct seq_file *m, void *v) |
40 | { | 57 | { |
41 | struct irq_desc *desc = irq_to_desc((long)m->private); | 58 | struct irq_desc *desc = irq_to_desc((long)m->private); |
42 | unsigned long flags; | 59 | unsigned long flags; |
43 | cpumask_var_t mask; | 60 | cpumask_var_t mask; |
44 | 61 | ||
45 | if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) | 62 | if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) |
46 | return -ENOMEM; | 63 | return -ENOMEM; |
47 | 64 | ||
48 | raw_spin_lock_irqsave(&desc->lock, flags); | 65 | raw_spin_lock_irqsave(&desc->lock, flags); |
49 | if (desc->affinity_hint) | 66 | if (desc->affinity_hint) |
50 | cpumask_copy(mask, desc->affinity_hint); | 67 | cpumask_copy(mask, desc->affinity_hint); |
51 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 68 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
52 | 69 | ||
53 | seq_cpumask(m, mask); | 70 | seq_cpumask(m, mask); |
54 | seq_putc(m, '\n'); | 71 | seq_putc(m, '\n'); |
55 | free_cpumask_var(mask); | 72 | free_cpumask_var(mask); |
56 | 73 | ||
57 | return 0; | 74 | return 0; |
58 | } | 75 | } |
59 | 76 | ||
60 | #ifndef is_affinity_mask_valid | 77 | #ifndef is_affinity_mask_valid |
61 | #define is_affinity_mask_valid(val) 1 | 78 | #define is_affinity_mask_valid(val) 1 |
62 | #endif | 79 | #endif |
63 | 80 | ||
64 | int no_irq_affinity; | 81 | int no_irq_affinity; |
65 | static int irq_affinity_proc_show(struct seq_file *m, void *v) | 82 | static int irq_affinity_proc_show(struct seq_file *m, void *v) |
66 | { | 83 | { |
67 | return show_irq_affinity(0, m, v); | 84 | return show_irq_affinity(0, m, v); |
68 | } | 85 | } |
69 | 86 | ||
70 | static int irq_affinity_list_proc_show(struct seq_file *m, void *v) | 87 | static int irq_affinity_list_proc_show(struct seq_file *m, void *v) |
71 | { | 88 | { |
72 | return show_irq_affinity(1, m, v); | 89 | return show_irq_affinity(1, m, v); |
73 | } | 90 | } |
74 | 91 | ||
75 | 92 | ||
76 | static ssize_t write_irq_affinity(int type, struct file *file, | 93 | static ssize_t write_irq_affinity(int type, struct file *file, |
77 | const char __user *buffer, size_t count, loff_t *pos) | 94 | const char __user *buffer, size_t count, loff_t *pos) |
78 | { | 95 | { |
79 | unsigned int irq = (int)(long)PDE_DATA(file_inode(file)); | 96 | unsigned int irq = (int)(long)PDE_DATA(file_inode(file)); |
80 | cpumask_var_t new_value; | 97 | cpumask_var_t new_value; |
81 | int err; | 98 | int err; |
82 | 99 | ||
83 | if (!irq_can_set_affinity(irq) || no_irq_affinity) | 100 | if (!irq_can_set_affinity(irq) || no_irq_affinity) |
84 | return -EIO; | 101 | return -EIO; |
85 | 102 | ||
86 | if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) | 103 | if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) |
87 | return -ENOMEM; | 104 | return -ENOMEM; |
88 | 105 | ||
89 | if (type) | 106 | if (type) |
90 | err = cpumask_parselist_user(buffer, count, new_value); | 107 | err = cpumask_parselist_user(buffer, count, new_value); |
91 | else | 108 | else |
92 | err = cpumask_parse_user(buffer, count, new_value); | 109 | err = cpumask_parse_user(buffer, count, new_value); |
93 | if (err) | 110 | if (err) |
94 | goto free_cpumask; | 111 | goto free_cpumask; |
95 | 112 | ||
96 | if (!is_affinity_mask_valid(new_value)) { | 113 | if (!is_affinity_mask_valid(new_value)) { |
97 | err = -EINVAL; | 114 | err = -EINVAL; |
98 | goto free_cpumask; | 115 | goto free_cpumask; |
99 | } | 116 | } |
100 | 117 | ||
101 | /* | 118 | /* |
102 | * Do not allow disabling IRQs completely - it's a too easy | 119 | * Do not allow disabling IRQs completely - it's a too easy |
103 | * way to make the system unusable accidentally :-) At least | 120 | * way to make the system unusable accidentally :-) At least |
104 | * one online CPU still has to be targeted. | 121 | * one online CPU still has to be targeted. |
105 | */ | 122 | */ |
106 | if (!cpumask_intersects(new_value, cpu_online_mask)) { | 123 | if (!cpumask_intersects(new_value, cpu_online_mask)) { |
107 | /* Special case for empty set - allow the architecture | 124 | /* Special case for empty set - allow the architecture |
108 | code to set default SMP affinity. */ | 125 | code to set default SMP affinity. */ |
109 | err = irq_select_affinity_usr(irq, new_value) ? -EINVAL : count; | 126 | err = irq_select_affinity_usr(irq, new_value) ? -EINVAL : count; |
110 | } else { | 127 | } else { |
111 | irq_set_affinity(irq, new_value); | 128 | irq_set_affinity(irq, new_value); |
112 | err = count; | 129 | err = count; |
113 | } | 130 | } |
114 | 131 | ||
115 | free_cpumask: | 132 | free_cpumask: |
116 | free_cpumask_var(new_value); | 133 | free_cpumask_var(new_value); |
117 | return err; | 134 | return err; |
118 | } | 135 | } |
119 | 136 | ||
120 | static ssize_t irq_affinity_proc_write(struct file *file, | 137 | static ssize_t irq_affinity_proc_write(struct file *file, |
121 | const char __user *buffer, size_t count, loff_t *pos) | 138 | const char __user *buffer, size_t count, loff_t *pos) |
122 | { | 139 | { |
123 | return write_irq_affinity(0, file, buffer, count, pos); | 140 | return write_irq_affinity(0, file, buffer, count, pos); |
124 | } | 141 | } |
125 | 142 | ||
126 | static ssize_t irq_affinity_list_proc_write(struct file *file, | 143 | static ssize_t irq_affinity_list_proc_write(struct file *file, |
127 | const char __user *buffer, size_t count, loff_t *pos) | 144 | const char __user *buffer, size_t count, loff_t *pos) |
128 | { | 145 | { |
129 | return write_irq_affinity(1, file, buffer, count, pos); | 146 | return write_irq_affinity(1, file, buffer, count, pos); |
130 | } | 147 | } |
131 | 148 | ||
132 | static int irq_affinity_proc_open(struct inode *inode, struct file *file) | 149 | static int irq_affinity_proc_open(struct inode *inode, struct file *file) |
133 | { | 150 | { |
134 | return single_open(file, irq_affinity_proc_show, PDE_DATA(inode)); | 151 | return single_open(file, irq_affinity_proc_show, PDE_DATA(inode)); |
135 | } | 152 | } |
136 | 153 | ||
137 | static int irq_affinity_list_proc_open(struct inode *inode, struct file *file) | 154 | static int irq_affinity_list_proc_open(struct inode *inode, struct file *file) |
138 | { | 155 | { |
139 | return single_open(file, irq_affinity_list_proc_show, PDE_DATA(inode)); | 156 | return single_open(file, irq_affinity_list_proc_show, PDE_DATA(inode)); |
140 | } | 157 | } |
141 | 158 | ||
142 | static int irq_affinity_hint_proc_open(struct inode *inode, struct file *file) | 159 | static int irq_affinity_hint_proc_open(struct inode *inode, struct file *file) |
143 | { | 160 | { |
144 | return single_open(file, irq_affinity_hint_proc_show, PDE_DATA(inode)); | 161 | return single_open(file, irq_affinity_hint_proc_show, PDE_DATA(inode)); |
145 | } | 162 | } |
146 | 163 | ||
147 | static const struct file_operations irq_affinity_proc_fops = { | 164 | static const struct file_operations irq_affinity_proc_fops = { |
148 | .open = irq_affinity_proc_open, | 165 | .open = irq_affinity_proc_open, |
149 | .read = seq_read, | 166 | .read = seq_read, |
150 | .llseek = seq_lseek, | 167 | .llseek = seq_lseek, |
151 | .release = single_release, | 168 | .release = single_release, |
152 | .write = irq_affinity_proc_write, | 169 | .write = irq_affinity_proc_write, |
153 | }; | 170 | }; |
154 | 171 | ||
155 | static const struct file_operations irq_affinity_hint_proc_fops = { | 172 | static const struct file_operations irq_affinity_hint_proc_fops = { |
156 | .open = irq_affinity_hint_proc_open, | 173 | .open = irq_affinity_hint_proc_open, |
157 | .read = seq_read, | 174 | .read = seq_read, |
158 | .llseek = seq_lseek, | 175 | .llseek = seq_lseek, |
159 | .release = single_release, | 176 | .release = single_release, |
160 | }; | 177 | }; |
161 | 178 | ||
162 | static const struct file_operations irq_affinity_list_proc_fops = { | 179 | static const struct file_operations irq_affinity_list_proc_fops = { |
163 | .open = irq_affinity_list_proc_open, | 180 | .open = irq_affinity_list_proc_open, |
164 | .read = seq_read, | 181 | .read = seq_read, |
165 | .llseek = seq_lseek, | 182 | .llseek = seq_lseek, |
166 | .release = single_release, | 183 | .release = single_release, |
167 | .write = irq_affinity_list_proc_write, | 184 | .write = irq_affinity_list_proc_write, |
168 | }; | 185 | }; |
169 | 186 | ||
170 | static int default_affinity_show(struct seq_file *m, void *v) | 187 | static int default_affinity_show(struct seq_file *m, void *v) |
171 | { | 188 | { |
172 | seq_cpumask(m, irq_default_affinity); | 189 | seq_cpumask(m, irq_default_affinity); |
173 | seq_putc(m, '\n'); | 190 | seq_putc(m, '\n'); |
174 | return 0; | 191 | return 0; |
175 | } | 192 | } |
176 | 193 | ||
177 | static ssize_t default_affinity_write(struct file *file, | 194 | static ssize_t default_affinity_write(struct file *file, |
178 | const char __user *buffer, size_t count, loff_t *ppos) | 195 | const char __user *buffer, size_t count, loff_t *ppos) |
179 | { | 196 | { |
180 | cpumask_var_t new_value; | 197 | cpumask_var_t new_value; |
181 | int err; | 198 | int err; |
182 | 199 | ||
183 | if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) | 200 | if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) |
184 | return -ENOMEM; | 201 | return -ENOMEM; |
185 | 202 | ||
186 | err = cpumask_parse_user(buffer, count, new_value); | 203 | err = cpumask_parse_user(buffer, count, new_value); |
187 | if (err) | 204 | if (err) |
188 | goto out; | 205 | goto out; |
189 | 206 | ||
190 | if (!is_affinity_mask_valid(new_value)) { | 207 | if (!is_affinity_mask_valid(new_value)) { |
191 | err = -EINVAL; | 208 | err = -EINVAL; |
192 | goto out; | 209 | goto out; |
193 | } | 210 | } |
194 | 211 | ||
195 | /* | 212 | /* |
196 | * Do not allow disabling IRQs completely - it's a too easy | 213 | * Do not allow disabling IRQs completely - it's a too easy |
197 | * way to make the system unusable accidentally :-) At least | 214 | * way to make the system unusable accidentally :-) At least |
198 | * one online CPU still has to be targeted. | 215 | * one online CPU still has to be targeted. |
199 | */ | 216 | */ |
200 | if (!cpumask_intersects(new_value, cpu_online_mask)) { | 217 | if (!cpumask_intersects(new_value, cpu_online_mask)) { |
201 | err = -EINVAL; | 218 | err = -EINVAL; |
202 | goto out; | 219 | goto out; |
203 | } | 220 | } |
204 | 221 | ||
205 | cpumask_copy(irq_default_affinity, new_value); | 222 | cpumask_copy(irq_default_affinity, new_value); |
206 | err = count; | 223 | err = count; |
207 | 224 | ||
208 | out: | 225 | out: |
209 | free_cpumask_var(new_value); | 226 | free_cpumask_var(new_value); |
210 | return err; | 227 | return err; |
211 | } | 228 | } |
212 | 229 | ||
213 | static int default_affinity_open(struct inode *inode, struct file *file) | 230 | static int default_affinity_open(struct inode *inode, struct file *file) |
214 | { | 231 | { |
215 | return single_open(file, default_affinity_show, PDE_DATA(inode)); | 232 | return single_open(file, default_affinity_show, PDE_DATA(inode)); |
216 | } | 233 | } |
217 | 234 | ||
218 | static const struct file_operations default_affinity_proc_fops = { | 235 | static const struct file_operations default_affinity_proc_fops = { |
219 | .open = default_affinity_open, | 236 | .open = default_affinity_open, |
220 | .read = seq_read, | 237 | .read = seq_read, |
221 | .llseek = seq_lseek, | 238 | .llseek = seq_lseek, |
222 | .release = single_release, | 239 | .release = single_release, |
223 | .write = default_affinity_write, | 240 | .write = default_affinity_write, |
224 | }; | 241 | }; |
225 | 242 | ||
226 | static int irq_node_proc_show(struct seq_file *m, void *v) | 243 | static int irq_node_proc_show(struct seq_file *m, void *v) |
227 | { | 244 | { |
228 | struct irq_desc *desc = irq_to_desc((long) m->private); | 245 | struct irq_desc *desc = irq_to_desc((long) m->private); |
229 | 246 | ||
230 | seq_printf(m, "%d\n", desc->irq_data.node); | 247 | seq_printf(m, "%d\n", desc->irq_data.node); |
231 | return 0; | 248 | return 0; |
232 | } | 249 | } |
233 | 250 | ||
234 | static int irq_node_proc_open(struct inode *inode, struct file *file) | 251 | static int irq_node_proc_open(struct inode *inode, struct file *file) |
235 | { | 252 | { |
236 | return single_open(file, irq_node_proc_show, PDE_DATA(inode)); | 253 | return single_open(file, irq_node_proc_show, PDE_DATA(inode)); |
237 | } | 254 | } |
238 | 255 | ||
239 | static const struct file_operations irq_node_proc_fops = { | 256 | static const struct file_operations irq_node_proc_fops = { |
240 | .open = irq_node_proc_open, | 257 | .open = irq_node_proc_open, |
241 | .read = seq_read, | 258 | .read = seq_read, |
242 | .llseek = seq_lseek, | 259 | .llseek = seq_lseek, |
243 | .release = single_release, | 260 | .release = single_release, |
244 | }; | 261 | }; |
245 | #endif | 262 | #endif |
246 | 263 | ||
247 | static int irq_spurious_proc_show(struct seq_file *m, void *v) | 264 | static int irq_spurious_proc_show(struct seq_file *m, void *v) |
248 | { | 265 | { |
249 | struct irq_desc *desc = irq_to_desc((long) m->private); | 266 | struct irq_desc *desc = irq_to_desc((long) m->private); |
250 | 267 | ||
251 | seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n", | 268 | seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n", |
252 | desc->irq_count, desc->irqs_unhandled, | 269 | desc->irq_count, desc->irqs_unhandled, |
253 | jiffies_to_msecs(desc->last_unhandled)); | 270 | jiffies_to_msecs(desc->last_unhandled)); |
254 | return 0; | 271 | return 0; |
255 | } | 272 | } |
256 | 273 | ||
257 | static int irq_spurious_proc_open(struct inode *inode, struct file *file) | 274 | static int irq_spurious_proc_open(struct inode *inode, struct file *file) |
258 | { | 275 | { |
259 | return single_open(file, irq_spurious_proc_show, PDE_DATA(inode)); | 276 | return single_open(file, irq_spurious_proc_show, PDE_DATA(inode)); |
260 | } | 277 | } |
261 | 278 | ||
262 | static const struct file_operations irq_spurious_proc_fops = { | 279 | static const struct file_operations irq_spurious_proc_fops = { |
263 | .open = irq_spurious_proc_open, | 280 | .open = irq_spurious_proc_open, |
264 | .read = seq_read, | 281 | .read = seq_read, |
265 | .llseek = seq_lseek, | 282 | .llseek = seq_lseek, |
266 | .release = single_release, | 283 | .release = single_release, |
267 | }; | 284 | }; |
268 | 285 | ||
269 | #define MAX_NAMELEN 128 | 286 | #define MAX_NAMELEN 128 |
270 | 287 | ||
271 | static int name_unique(unsigned int irq, struct irqaction *new_action) | 288 | static int name_unique(unsigned int irq, struct irqaction *new_action) |
272 | { | 289 | { |
273 | struct irq_desc *desc = irq_to_desc(irq); | 290 | struct irq_desc *desc = irq_to_desc(irq); |
274 | struct irqaction *action; | 291 | struct irqaction *action; |
275 | unsigned long flags; | 292 | unsigned long flags; |
276 | int ret = 1; | 293 | int ret = 1; |
277 | 294 | ||
278 | raw_spin_lock_irqsave(&desc->lock, flags); | 295 | raw_spin_lock_irqsave(&desc->lock, flags); |
279 | for (action = desc->action ; action; action = action->next) { | 296 | for (action = desc->action ; action; action = action->next) { |
280 | if ((action != new_action) && action->name && | 297 | if ((action != new_action) && action->name && |
281 | !strcmp(new_action->name, action->name)) { | 298 | !strcmp(new_action->name, action->name)) { |
282 | ret = 0; | 299 | ret = 0; |
283 | break; | 300 | break; |
284 | } | 301 | } |
285 | } | 302 | } |
286 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 303 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
287 | return ret; | 304 | return ret; |
288 | } | 305 | } |
289 | 306 | ||
290 | void register_handler_proc(unsigned int irq, struct irqaction *action) | 307 | void register_handler_proc(unsigned int irq, struct irqaction *action) |
291 | { | 308 | { |
292 | char name [MAX_NAMELEN]; | 309 | char name [MAX_NAMELEN]; |
293 | struct irq_desc *desc = irq_to_desc(irq); | 310 | struct irq_desc *desc = irq_to_desc(irq); |
294 | 311 | ||
295 | if (!desc->dir || action->dir || !action->name || | 312 | if (!desc->dir || action->dir || !action->name || |
296 | !name_unique(irq, action)) | 313 | !name_unique(irq, action)) |
297 | return; | 314 | return; |
298 | 315 | ||
299 | memset(name, 0, MAX_NAMELEN); | 316 | memset(name, 0, MAX_NAMELEN); |
300 | snprintf(name, MAX_NAMELEN, "%s", action->name); | 317 | snprintf(name, MAX_NAMELEN, "%s", action->name); |
301 | 318 | ||
302 | /* create /proc/irq/1234/handler/ */ | 319 | /* create /proc/irq/1234/handler/ */ |
303 | action->dir = proc_mkdir(name, desc->dir); | 320 | action->dir = proc_mkdir(name, desc->dir); |
304 | } | 321 | } |
305 | 322 | ||
306 | #undef MAX_NAMELEN | 323 | #undef MAX_NAMELEN |
307 | 324 | ||
308 | #define MAX_NAMELEN 10 | 325 | #define MAX_NAMELEN 10 |
309 | 326 | ||
310 | void register_irq_proc(unsigned int irq, struct irq_desc *desc) | 327 | void register_irq_proc(unsigned int irq, struct irq_desc *desc) |
311 | { | 328 | { |
312 | char name [MAX_NAMELEN]; | 329 | char name [MAX_NAMELEN]; |
313 | 330 | ||
314 | if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir) | 331 | if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir) |
315 | return; | 332 | return; |
316 | 333 | ||
317 | memset(name, 0, MAX_NAMELEN); | 334 | memset(name, 0, MAX_NAMELEN); |
318 | sprintf(name, "%d", irq); | 335 | sprintf(name, "%d", irq); |
319 | 336 | ||
320 | /* create /proc/irq/1234 */ | 337 | /* create /proc/irq/1234 */ |
321 | desc->dir = proc_mkdir(name, root_irq_dir); | 338 | desc->dir = proc_mkdir(name, root_irq_dir); |
322 | if (!desc->dir) | 339 | if (!desc->dir) |
323 | return; | 340 | return; |
324 | 341 | ||
325 | #ifdef CONFIG_SMP | 342 | #ifdef CONFIG_SMP |
326 | /* create /proc/irq/<irq>/smp_affinity */ | 343 | /* create /proc/irq/<irq>/smp_affinity */ |
327 | proc_create_data("smp_affinity", 0644, desc->dir, | 344 | proc_create_data("smp_affinity", 0644, desc->dir, |
328 | &irq_affinity_proc_fops, (void *)(long)irq); | 345 | &irq_affinity_proc_fops, (void *)(long)irq); |
329 | 346 | ||
330 | /* create /proc/irq/<irq>/affinity_hint */ | 347 | /* create /proc/irq/<irq>/affinity_hint */ |
331 | proc_create_data("affinity_hint", 0444, desc->dir, | 348 | proc_create_data("affinity_hint", 0444, desc->dir, |
332 | &irq_affinity_hint_proc_fops, (void *)(long)irq); | 349 | &irq_affinity_hint_proc_fops, (void *)(long)irq); |
333 | 350 | ||
334 | /* create /proc/irq/<irq>/smp_affinity_list */ | 351 | /* create /proc/irq/<irq>/smp_affinity_list */ |
335 | proc_create_data("smp_affinity_list", 0644, desc->dir, | 352 | proc_create_data("smp_affinity_list", 0644, desc->dir, |
336 | &irq_affinity_list_proc_fops, (void *)(long)irq); | 353 | &irq_affinity_list_proc_fops, (void *)(long)irq); |
337 | 354 | ||
338 | proc_create_data("node", 0444, desc->dir, | 355 | proc_create_data("node", 0444, desc->dir, |
339 | &irq_node_proc_fops, (void *)(long)irq); | 356 | &irq_node_proc_fops, (void *)(long)irq); |
340 | #endif | 357 | #endif |
341 | 358 | ||
342 | proc_create_data("spurious", 0444, desc->dir, | 359 | proc_create_data("spurious", 0444, desc->dir, |
343 | &irq_spurious_proc_fops, (void *)(long)irq); | 360 | &irq_spurious_proc_fops, (void *)(long)irq); |
344 | } | 361 | } |
345 | 362 | ||
346 | void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) | 363 | void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) |
347 | { | 364 | { |
348 | char name [MAX_NAMELEN]; | 365 | char name [MAX_NAMELEN]; |
349 | 366 | ||
350 | if (!root_irq_dir || !desc->dir) | 367 | if (!root_irq_dir || !desc->dir) |
351 | return; | 368 | return; |
352 | #ifdef CONFIG_SMP | 369 | #ifdef CONFIG_SMP |
353 | remove_proc_entry("smp_affinity", desc->dir); | 370 | remove_proc_entry("smp_affinity", desc->dir); |
354 | remove_proc_entry("affinity_hint", desc->dir); | 371 | remove_proc_entry("affinity_hint", desc->dir); |
355 | remove_proc_entry("smp_affinity_list", desc->dir); | 372 | remove_proc_entry("smp_affinity_list", desc->dir); |
356 | remove_proc_entry("node", desc->dir); | 373 | remove_proc_entry("node", desc->dir); |
357 | #endif | 374 | #endif |
358 | remove_proc_entry("spurious", desc->dir); | 375 | remove_proc_entry("spurious", desc->dir); |
359 | 376 | ||
360 | memset(name, 0, MAX_NAMELEN); | 377 | memset(name, 0, MAX_NAMELEN); |
361 | sprintf(name, "%u", irq); | 378 | sprintf(name, "%u", irq); |
362 | remove_proc_entry(name, root_irq_dir); | 379 | remove_proc_entry(name, root_irq_dir); |
363 | } | 380 | } |
364 | 381 | ||
365 | #undef MAX_NAMELEN | 382 | #undef MAX_NAMELEN |
366 | 383 | ||
367 | void unregister_handler_proc(unsigned int irq, struct irqaction *action) | 384 | void unregister_handler_proc(unsigned int irq, struct irqaction *action) |
368 | { | 385 | { |
369 | proc_remove(action->dir); | 386 | proc_remove(action->dir); |
370 | } | 387 | } |
371 | 388 | ||
372 | static void register_default_affinity_proc(void) | 389 | static void register_default_affinity_proc(void) |
373 | { | 390 | { |
374 | #ifdef CONFIG_SMP | 391 | #ifdef CONFIG_SMP |
375 | proc_create("irq/default_smp_affinity", 0644, NULL, | 392 | proc_create("irq/default_smp_affinity", 0644, NULL, |
376 | &default_affinity_proc_fops); | 393 | &default_affinity_proc_fops); |
377 | #endif | 394 | #endif |
378 | } | 395 | } |
379 | 396 | ||
380 | void init_irq_proc(void) | 397 | void init_irq_proc(void) |
381 | { | 398 | { |
382 | unsigned int irq; | 399 | unsigned int irq; |
383 | struct irq_desc *desc; | 400 | struct irq_desc *desc; |
384 | 401 | ||
385 | /* create /proc/irq */ | 402 | /* create /proc/irq */ |
386 | root_irq_dir = proc_mkdir("irq", NULL); | 403 | root_irq_dir = proc_mkdir("irq", NULL); |
387 | if (!root_irq_dir) | 404 | if (!root_irq_dir) |
388 | return; | 405 | return; |
389 | 406 | ||
390 | register_default_affinity_proc(); | 407 | register_default_affinity_proc(); |
391 | 408 | ||
392 | /* | 409 | /* |
393 | * Create entries for all existing IRQs. | 410 | * Create entries for all existing IRQs. |
394 | */ | 411 | */ |
395 | for_each_irq_desc(irq, desc) { | 412 | for_each_irq_desc(irq, desc) { |
396 | if (!desc) | 413 | if (!desc) |
397 | continue; | 414 | continue; |
398 | 415 | ||
399 | register_irq_proc(irq, desc); | 416 | register_irq_proc(irq, desc); |
400 | } | 417 | } |
401 | } | 418 | } |
402 | 419 | ||
403 | #ifdef CONFIG_GENERIC_IRQ_SHOW | 420 | #ifdef CONFIG_GENERIC_IRQ_SHOW |
404 | 421 | ||
405 | int __weak arch_show_interrupts(struct seq_file *p, int prec) | 422 | int __weak arch_show_interrupts(struct seq_file *p, int prec) |
406 | { | 423 | { |
407 | return 0; | 424 | return 0; |
408 | } | 425 | } |
409 | 426 | ||
410 | #ifndef ACTUAL_NR_IRQS | 427 | #ifndef ACTUAL_NR_IRQS |
411 | # define ACTUAL_NR_IRQS nr_irqs | 428 | # define ACTUAL_NR_IRQS nr_irqs |
412 | #endif | 429 | #endif |
413 | 430 | ||
414 | int show_interrupts(struct seq_file *p, void *v) | 431 | int show_interrupts(struct seq_file *p, void *v) |
415 | { | 432 | { |
416 | static int prec; | 433 | static int prec; |
417 | 434 | ||
418 | unsigned long flags, any_count = 0; | 435 | unsigned long flags, any_count = 0; |
419 | int i = *(loff_t *) v, j; | 436 | int i = *(loff_t *) v, j; |
420 | struct irqaction *action; | 437 | struct irqaction *action; |
421 | struct irq_desc *desc; | 438 | struct irq_desc *desc; |
422 | 439 | ||
423 | if (i > ACTUAL_NR_IRQS) | 440 | if (i > ACTUAL_NR_IRQS) |
424 | return 0; | 441 | return 0; |
425 | 442 | ||
426 | if (i == ACTUAL_NR_IRQS) | 443 | if (i == ACTUAL_NR_IRQS) |
427 | return arch_show_interrupts(p, prec); | 444 | return arch_show_interrupts(p, prec); |
428 | 445 | ||
429 | /* print header and calculate the width of the first column */ | 446 | /* print header and calculate the width of the first column */ |
430 | if (i == 0) { | 447 | if (i == 0) { |
431 | for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec) | 448 | for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec) |
432 | j *= 10; | 449 | j *= 10; |
433 | 450 | ||
434 | seq_printf(p, "%*s", prec + 8, ""); | 451 | seq_printf(p, "%*s", prec + 8, ""); |
435 | for_each_online_cpu(j) | 452 | for_each_online_cpu(j) |
436 | seq_printf(p, "CPU%-8d", j); | 453 | seq_printf(p, "CPU%-8d", j); |
437 | seq_putc(p, '\n'); | 454 | seq_putc(p, '\n'); |
438 | } | 455 | } |
439 | 456 | ||
457 | irq_lock_sparse(); | ||
440 | desc = irq_to_desc(i); | 458 | desc = irq_to_desc(i); |
441 | if (!desc) | 459 | if (!desc) |
442 | return 0; | 460 | goto outsparse; |
443 | 461 | ||
444 | raw_spin_lock_irqsave(&desc->lock, flags); | 462 | raw_spin_lock_irqsave(&desc->lock, flags); |
445 | for_each_online_cpu(j) | 463 | for_each_online_cpu(j) |
446 | any_count |= kstat_irqs_cpu(i, j); | 464 | any_count |= kstat_irqs_cpu(i, j); |
447 | action = desc->action; | 465 | action = desc->action; |
448 | if (!action && !any_count) | 466 | if (!action && !any_count) |
449 | goto out; | 467 | goto out; |
450 | 468 | ||
451 | seq_printf(p, "%*d: ", prec, i); | 469 | seq_printf(p, "%*d: ", prec, i); |
452 | for_each_online_cpu(j) | 470 | for_each_online_cpu(j) |
453 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); | 471 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); |
454 | 472 | ||
455 | if (desc->irq_data.chip) { | 473 | if (desc->irq_data.chip) { |
456 | if (desc->irq_data.chip->irq_print_chip) | 474 | if (desc->irq_data.chip->irq_print_chip) |
457 | desc->irq_data.chip->irq_print_chip(&desc->irq_data, p); | 475 | desc->irq_data.chip->irq_print_chip(&desc->irq_data, p); |
458 | else if (desc->irq_data.chip->name) | 476 | else if (desc->irq_data.chip->name) |
459 | seq_printf(p, " %8s", desc->irq_data.chip->name); | 477 | seq_printf(p, " %8s", desc->irq_data.chip->name); |
460 | else | 478 | else |
461 | seq_printf(p, " %8s", "-"); | 479 | seq_printf(p, " %8s", "-"); |
462 | } else { | 480 | } else { |
463 | seq_printf(p, " %8s", "None"); | 481 | seq_printf(p, " %8s", "None"); |
464 | } | 482 | } |
465 | if (desc->irq_data.domain) | 483 | if (desc->irq_data.domain) |
466 | seq_printf(p, " %*d", prec, (int) desc->irq_data.hwirq); | 484 | seq_printf(p, " %*d", prec, (int) desc->irq_data.hwirq); |
467 | #ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL | 485 | #ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL |
468 | seq_printf(p, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge"); | 486 | seq_printf(p, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge"); |
469 | #endif | 487 | #endif |
470 | if (desc->name) | 488 | if (desc->name) |
471 | seq_printf(p, "-%-8s", desc->name); | 489 | seq_printf(p, "-%-8s", desc->name); |
472 | 490 | ||
473 | if (action) { | 491 | if (action) { |
474 | seq_printf(p, " %s", action->name); | 492 | seq_printf(p, " %s", action->name); |
475 | while ((action = action->next) != NULL) | 493 | while ((action = action->next) != NULL) |
476 | seq_printf(p, ", %s", action->name); | 494 | seq_printf(p, ", %s", action->name); |
477 | } | 495 | } |
478 | 496 | ||
479 | seq_putc(p, '\n'); | 497 | seq_putc(p, '\n'); |
480 | out: | 498 | out: |
481 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 499 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
500 | outsparse: | ||
501 | irq_unlock_sparse(); | ||
482 | return 0; | 502 | return 0; |
483 | } | 503 | } |
484 | #endif | 504 | #endif |
485 | 505 |