Commit 495e0c79406fc0915fe80c7b1bbc006ef1370842

Authored by Thomas Gleixner
1 parent df31a0a06f

xtensa: Convert main irq_chip to new functions

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Chris Zankel <chris@zankel.net>
LKML-Reference: <20110206211137.653005586@linutronix.de>

Showing 1 changed file with 21 additions and 22 deletions Inline Diff

arch/xtensa/kernel/irq.c
1 /* 1 /*
2 * linux/arch/xtensa/kernel/irq.c 2 * linux/arch/xtensa/kernel/irq.c
3 * 3 *
4 * Xtensa built-in interrupt controller and some generic functions copied 4 * Xtensa built-in interrupt controller and some generic functions copied
5 * from i386. 5 * from i386.
6 * 6 *
7 * Copyright (C) 2002 - 2006 Tensilica, Inc. 7 * Copyright (C) 2002 - 2006 Tensilica, Inc.
8 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar 8 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
9 * 9 *
10 * 10 *
11 * Chris Zankel <chris@zankel.net> 11 * Chris Zankel <chris@zankel.net>
12 * Kevin Chea 12 * Kevin Chea
13 * 13 *
14 */ 14 */
15 15
16 #include <linux/module.h> 16 #include <linux/module.h>
17 #include <linux/seq_file.h> 17 #include <linux/seq_file.h>
18 #include <linux/interrupt.h> 18 #include <linux/interrupt.h>
19 #include <linux/irq.h> 19 #include <linux/irq.h>
20 #include <linux/kernel_stat.h> 20 #include <linux/kernel_stat.h>
21 21
22 #include <asm/uaccess.h> 22 #include <asm/uaccess.h>
23 #include <asm/platform.h> 23 #include <asm/platform.h>
24 24
25 static unsigned int cached_irq_mask; 25 static unsigned int cached_irq_mask;
26 26
27 atomic_t irq_err_count; 27 atomic_t irq_err_count;
28 28
29 /* 29 /*
30 * do_IRQ handles all normal device IRQ's (the special 30 * do_IRQ handles all normal device IRQ's (the special
31 * SMP cross-CPU interrupts have their own specific 31 * SMP cross-CPU interrupts have their own specific
32 * handlers). 32 * handlers).
33 */ 33 */
34 34
35 asmlinkage void do_IRQ(int irq, struct pt_regs *regs) 35 asmlinkage void do_IRQ(int irq, struct pt_regs *regs)
36 { 36 {
37 struct pt_regs *old_regs = set_irq_regs(regs); 37 struct pt_regs *old_regs = set_irq_regs(regs);
38 struct irq_desc *desc = irq_desc + irq;
39 38
40 if (irq >= NR_IRQS) { 39 if (irq >= NR_IRQS) {
41 printk(KERN_EMERG "%s: cannot handle IRQ %d\n", 40 printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
42 __func__, irq); 41 __func__, irq);
43 } 42 }
44 43
45 irq_enter(); 44 irq_enter();
46 45
47 #ifdef CONFIG_DEBUG_STACKOVERFLOW 46 #ifdef CONFIG_DEBUG_STACKOVERFLOW
48 /* Debugging check for stack overflow: is there less than 1KB free? */ 47 /* Debugging check for stack overflow: is there less than 1KB free? */
49 { 48 {
50 unsigned long sp; 49 unsigned long sp;
51 50
52 __asm__ __volatile__ ("mov %0, a1\n" : "=a" (sp)); 51 __asm__ __volatile__ ("mov %0, a1\n" : "=a" (sp));
53 sp &= THREAD_SIZE - 1; 52 sp &= THREAD_SIZE - 1;
54 53
55 if (unlikely(sp < (sizeof(thread_info) + 1024))) 54 if (unlikely(sp < (sizeof(thread_info) + 1024)))
56 printk("Stack overflow in do_IRQ: %ld\n", 55 printk("Stack overflow in do_IRQ: %ld\n",
57 sp - sizeof(struct thread_info)); 56 sp - sizeof(struct thread_info));
58 } 57 }
59 #endif 58 #endif
60 desc->handle_irq(irq, desc); 59 generic_handle_irq(irq);
61 60
62 irq_exit(); 61 irq_exit();
63 set_irq_regs(old_regs); 62 set_irq_regs(old_regs);
64 } 63 }
65 64
66 /* 65 /*
67 * Generic, controller-independent functions: 66 * Generic, controller-independent functions:
68 */ 67 */
69 68
70 int show_interrupts(struct seq_file *p, void *v) 69 int show_interrupts(struct seq_file *p, void *v)
71 { 70 {
72 int i = *(loff_t *) v, j; 71 int i = *(loff_t *) v, j;
73 struct irqaction * action; 72 struct irqaction * action;
74 unsigned long flags; 73 unsigned long flags;
75 74
76 if (i == 0) { 75 if (i == 0) {
77 seq_printf(p, " "); 76 seq_printf(p, " ");
78 for_each_online_cpu(j) 77 for_each_online_cpu(j)
79 seq_printf(p, "CPU%d ",j); 78 seq_printf(p, "CPU%d ",j);
80 seq_putc(p, '\n'); 79 seq_putc(p, '\n');
81 } 80 }
82 81
83 if (i < NR_IRQS) { 82 if (i < NR_IRQS) {
84 raw_spin_lock_irqsave(&irq_desc[i].lock, flags); 83 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
85 action = irq_desc[i].action; 84 action = irq_desc[i].action;
86 if (!action) 85 if (!action)
87 goto skip; 86 goto skip;
88 seq_printf(p, "%3d: ",i); 87 seq_printf(p, "%3d: ",i);
89 #ifndef CONFIG_SMP 88 #ifndef CONFIG_SMP
90 seq_printf(p, "%10u ", kstat_irqs(i)); 89 seq_printf(p, "%10u ", kstat_irqs(i));
91 #else 90 #else
92 for_each_online_cpu(j) 91 for_each_online_cpu(j)
93 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); 92 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
94 #endif 93 #endif
95 seq_printf(p, " %14s", irq_desc[i].chip->name); 94 seq_printf(p, " %14s", irq_desc[i].chip->name);
96 seq_printf(p, " %s", action->name); 95 seq_printf(p, " %s", action->name);
97 96
98 for (action=action->next; action; action = action->next) 97 for (action=action->next; action; action = action->next)
99 seq_printf(p, ", %s", action->name); 98 seq_printf(p, ", %s", action->name);
100 99
101 seq_putc(p, '\n'); 100 seq_putc(p, '\n');
102 skip: 101 skip:
103 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); 102 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
104 } else if (i == NR_IRQS) { 103 } else if (i == NR_IRQS) {
105 seq_printf(p, "NMI: "); 104 seq_printf(p, "NMI: ");
106 for_each_online_cpu(j) 105 for_each_online_cpu(j)
107 seq_printf(p, "%10u ", nmi_count(j)); 106 seq_printf(p, "%10u ", nmi_count(j));
108 seq_putc(p, '\n'); 107 seq_putc(p, '\n');
109 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); 108 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
110 } 109 }
111 return 0; 110 return 0;
112 } 111 }
113 112
114 static void xtensa_irq_mask(unsigned int irq) 113 static void xtensa_irq_mask(struct irq_chip *d)
115 { 114 {
116 cached_irq_mask &= ~(1 << irq); 115 cached_irq_mask &= ~(1 << d->irq);
117 set_sr (cached_irq_mask, INTENABLE); 116 set_sr (cached_irq_mask, INTENABLE);
118 } 117 }
119 118
120 static void xtensa_irq_unmask(unsigned int irq) 119 static void xtensa_irq_unmask(struct irq_chip *d)
121 { 120 {
122 cached_irq_mask |= 1 << irq; 121 cached_irq_mask |= 1 << d->irq;
123 set_sr (cached_irq_mask, INTENABLE); 122 set_sr (cached_irq_mask, INTENABLE);
124 } 123 }
125 124
126 static void xtensa_irq_enable(unsigned int irq) 125 static void xtensa_irq_enable(struct irq_chip *d)
127 { 126 {
128 variant_irq_enable(irq); 127 variant_irq_enable(d->irq);
129 xtensa_irq_unmask(irq); 128 xtensa_irq_unmask(d->irq);
130 } 129 }
131 130
132 static void xtensa_irq_disable(unsigned int irq) 131 static void xtensa_irq_disable(struct irq_chip *d)
133 { 132 {
134 xtensa_irq_mask(irq); 133 xtensa_irq_mask(d->irq);
135 variant_irq_disable(irq); 134 variant_irq_disable(d->irq);
136 } 135 }
137 136
138 static void xtensa_irq_ack(unsigned int irq) 137 static void xtensa_irq_ack(struct irq_chip *d)
139 { 138 {
140 set_sr(1 << irq, INTCLEAR); 139 set_sr(1 << d->irq, INTCLEAR);
141 } 140 }
142 141
143 static int xtensa_irq_retrigger(unsigned int irq) 142 static int xtensa_irq_retrigger(struct irq_chip *d)
144 { 143 {
145 set_sr (1 << irq, INTSET); 144 set_sr (1 << d->irq, INTSET);
146 return 1; 145 return 1;
147 } 146 }
148 147
149 148
150 static struct irq_chip xtensa_irq_chip = { 149 static struct irq_chip xtensa_irq_chip = {
151 .name = "xtensa", 150 .name = "xtensa",
152 .enable = xtensa_irq_enable, 151 .irq_enable = xtensa_irq_enable,
153 .disable = xtensa_irq_disable, 152 .irq_disable = xtensa_irq_disable,
154 .mask = xtensa_irq_mask, 153 .irq_mask = xtensa_irq_mask,
155 .unmask = xtensa_irq_unmask, 154 .irq_unmask = xtensa_irq_unmask,
156 .ack = xtensa_irq_ack, 155 .irq_ack = xtensa_irq_ack,
157 .retrigger = xtensa_irq_retrigger, 156 .irq_retrigger = xtensa_irq_retrigger,
158 }; 157 };
159 158
160 void __init init_IRQ(void) 159 void __init init_IRQ(void)
161 { 160 {
162 int index; 161 int index;
163 162
164 for (index = 0; index < XTENSA_NR_IRQS; index++) { 163 for (index = 0; index < XTENSA_NR_IRQS; index++) {
165 int mask = 1 << index; 164 int mask = 1 << index;
166 165
167 if (mask & XCHAL_INTTYPE_MASK_SOFTWARE) 166 if (mask & XCHAL_INTTYPE_MASK_SOFTWARE)
168 set_irq_chip_and_handler(index, &xtensa_irq_chip, 167 set_irq_chip_and_handler(index, &xtensa_irq_chip,
169 handle_simple_irq); 168 handle_simple_irq);
170 169
171 else if (mask & XCHAL_INTTYPE_MASK_EXTERN_EDGE) 170 else if (mask & XCHAL_INTTYPE_MASK_EXTERN_EDGE)
172 set_irq_chip_and_handler(index, &xtensa_irq_chip, 171 set_irq_chip_and_handler(index, &xtensa_irq_chip,
173 handle_edge_irq); 172 handle_edge_irq);
174 173
175 else if (mask & XCHAL_INTTYPE_MASK_EXTERN_LEVEL) 174 else if (mask & XCHAL_INTTYPE_MASK_EXTERN_LEVEL)
176 set_irq_chip_and_handler(index, &xtensa_irq_chip, 175 set_irq_chip_and_handler(index, &xtensa_irq_chip,
177 handle_level_irq); 176 handle_level_irq);
178 177
179 else if (mask & XCHAL_INTTYPE_MASK_TIMER) 178 else if (mask & XCHAL_INTTYPE_MASK_TIMER)
180 set_irq_chip_and_handler(index, &xtensa_irq_chip, 179 set_irq_chip_and_handler(index, &xtensa_irq_chip,
181 handle_edge_irq); 180 handle_edge_irq);
182 181
183 else /* XCHAL_INTTYPE_MASK_WRITE_ERROR */ 182 else /* XCHAL_INTTYPE_MASK_WRITE_ERROR */
184 /* XCHAL_INTTYPE_MASK_NMI */ 183 /* XCHAL_INTTYPE_MASK_NMI */
185 184
186 set_irq_chip_and_handler(index, &xtensa_irq_chip, 185 set_irq_chip_and_handler(index, &xtensa_irq_chip,
187 handle_level_irq); 186 handle_level_irq);
188 } 187 }
189 188
190 cached_irq_mask = 0; 189 cached_irq_mask = 0;
191 190
192 variant_init_irq(); 191 variant_init_irq();
193 } 192 }
194 193