Commit 508996b6a0ef0c7aa7701995d137e71c56180752

Authored by Linus Torvalds

Merge branches 'irq-fixes-for-linus' and 'sched-fixes-for-linus' of git://git.ke…

…rnel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'irq-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  genirq: Fix incorrect unlock in __setup_irq()
  cris: Use generic show_interrupts()
  genirq: show_interrupts: Check desc->name before printing it blindly
  cris: Use accessor functions to set IRQ_PER_CPU flag
  cris: Fix irq conversion fallout

* 'sched-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  sched, kernel-doc: Fix runqueue_is_locked() description

Showing 6 changed files Inline Diff

1 config MMU 1 config MMU
2 bool 2 bool
3 default y 3 default y
4 4
5 config ZONE_DMA 5 config ZONE_DMA
6 bool 6 bool
7 default y 7 default y
8 8
9 config RWSEM_GENERIC_SPINLOCK 9 config RWSEM_GENERIC_SPINLOCK
10 bool 10 bool
11 default y 11 default y
12 12
13 config RWSEM_XCHGADD_ALGORITHM 13 config RWSEM_XCHGADD_ALGORITHM
14 bool 14 bool
15 15
16 config GENERIC_CMOS_UPDATE 16 config GENERIC_CMOS_UPDATE
17 def_bool y 17 def_bool y
18 18
19 config ARCH_USES_GETTIMEOFFSET 19 config ARCH_USES_GETTIMEOFFSET
20 def_bool n 20 def_bool n
21 21
22 config GENERIC_IOMAP 22 config GENERIC_IOMAP
23 bool 23 bool
24 default y 24 default y
25 25
26 config ARCH_HAS_ILOG2_U32 26 config ARCH_HAS_ILOG2_U32
27 bool 27 bool
28 default n 28 default n
29 29
30 config ARCH_HAS_ILOG2_U64 30 config ARCH_HAS_ILOG2_U64
31 bool 31 bool
32 default n 32 default n
33 33
34 config GENERIC_FIND_NEXT_BIT 34 config GENERIC_FIND_NEXT_BIT
35 bool 35 bool
36 default y 36 default y
37 37
38 config GENERIC_HWEIGHT 38 config GENERIC_HWEIGHT
39 bool 39 bool
40 default y 40 default y
41 41
42 config GENERIC_CALIBRATE_DELAY 42 config GENERIC_CALIBRATE_DELAY
43 bool 43 bool
44 default y 44 default y
45 45
46 config NO_IOPORT 46 config NO_IOPORT
47 def_bool y 47 def_bool y
48 48
49 config FORCE_MAX_ZONEORDER 49 config FORCE_MAX_ZONEORDER
50 int 50 int
51 default 6 51 default 6
52 52
53 config CRIS 53 config CRIS
54 bool 54 bool
55 default y 55 default y
56 select HAVE_IDE 56 select HAVE_IDE
57 select HAVE_GENERIC_HARDIRQS 57 select HAVE_GENERIC_HARDIRQS
58 select GENERIC_HARDIRQS_NO_DEPRECATED 58 select GENERIC_HARDIRQS_NO_DEPRECATED
59 select GENERIC_IRQ_SHOW
59 60
60 config HZ 61 config HZ
61 int 62 int
62 default 100 63 default 100
63 64
64 source "init/Kconfig" 65 source "init/Kconfig"
65 66
66 source "kernel/Kconfig.freezer" 67 source "kernel/Kconfig.freezer"
67 68
68 menu "General setup" 69 menu "General setup"
69 70
70 source "fs/Kconfig.binfmt" 71 source "fs/Kconfig.binfmt"
71 72
72 config ETRAX_CMDLINE 73 config ETRAX_CMDLINE
73 string "Kernel command line" 74 string "Kernel command line"
74 default "root=/dev/mtdblock3" 75 default "root=/dev/mtdblock3"
75 help 76 help
76 Pass additional commands to the kernel. 77 Pass additional commands to the kernel.
77 78
78 config ETRAX_WATCHDOG 79 config ETRAX_WATCHDOG
79 bool "Enable ETRAX watchdog" 80 bool "Enable ETRAX watchdog"
80 help 81 help
81 Enable the built-in watchdog timer support on ETRAX based embedded 82 Enable the built-in watchdog timer support on ETRAX based embedded
82 network computers. 83 network computers.
83 84
84 config ETRAX_WATCHDOG_NICE_DOGGY 85 config ETRAX_WATCHDOG_NICE_DOGGY
85 bool "Disable watchdog during Oops printouts" 86 bool "Disable watchdog during Oops printouts"
86 depends on ETRAX_WATCHDOG 87 depends on ETRAX_WATCHDOG
87 help 88 help
88 By enabling this you make sure that the watchdog does not bite while 89 By enabling this you make sure that the watchdog does not bite while
89 printing oopses. Recommended for development systems but not for 90 printing oopses. Recommended for development systems but not for
90 production releases. 91 production releases.
91 92
92 config ETRAX_FAST_TIMER 93 config ETRAX_FAST_TIMER
93 bool "Enable ETRAX fast timer API" 94 bool "Enable ETRAX fast timer API"
94 help 95 help
95 This options enables the API to a fast timer implementation using 96 This options enables the API to a fast timer implementation using
96 timer1 to get sub jiffie resolution timers (primarily one-shot 97 timer1 to get sub jiffie resolution timers (primarily one-shot
97 timers). 98 timers).
98 This is needed if CONFIG_ETRAX_SERIAL_FAST_TIMER is enabled. 99 This is needed if CONFIG_ETRAX_SERIAL_FAST_TIMER is enabled.
99 100
100 config ETRAX_KMALLOCED_MODULES 101 config ETRAX_KMALLOCED_MODULES
101 bool "Enable module allocation with kmalloc" 102 bool "Enable module allocation with kmalloc"
102 help 103 help
103 Enable module allocation with kmalloc instead of vmalloc. 104 Enable module allocation with kmalloc instead of vmalloc.
104 105
105 config OOM_REBOOT 106 config OOM_REBOOT
106 bool "Enable reboot at out of memory" 107 bool "Enable reboot at out of memory"
107 108
108 source "kernel/Kconfig.preempt" 109 source "kernel/Kconfig.preempt"
109 110
110 source mm/Kconfig 111 source mm/Kconfig
111 112
112 endmenu 113 endmenu
113 114
114 menu "Hardware setup" 115 menu "Hardware setup"
115 116
116 choice 117 choice
117 prompt "Processor type" 118 prompt "Processor type"
118 default ETRAX100LX 119 default ETRAX100LX
119 120
120 config ETRAX100LX 121 config ETRAX100LX
121 bool "ETRAX-100LX-v1" 122 bool "ETRAX-100LX-v1"
122 select ARCH_USES_GETTIMEOFFSET 123 select ARCH_USES_GETTIMEOFFSET
123 help 124 help
124 Support version 1 of the ETRAX 100LX. 125 Support version 1 of the ETRAX 100LX.
125 126
126 config ETRAX100LX_V2 127 config ETRAX100LX_V2
127 bool "ETRAX-100LX-v2" 128 bool "ETRAX-100LX-v2"
128 select ARCH_USES_GETTIMEOFFSET 129 select ARCH_USES_GETTIMEOFFSET
129 help 130 help
130 Support version 2 of the ETRAX 100LX. 131 Support version 2 of the ETRAX 100LX.
131 132
132 config SVINTO_SIM 133 config SVINTO_SIM
133 bool "ETRAX-100LX-for-xsim-simulator" 134 bool "ETRAX-100LX-for-xsim-simulator"
134 select ARCH_USES_GETTIMEOFFSET 135 select ARCH_USES_GETTIMEOFFSET
135 help 136 help
136 Support the xsim ETRAX Simulator. 137 Support the xsim ETRAX Simulator.
137 138
138 config ETRAXFS 139 config ETRAXFS
139 bool "ETRAX-FS-V32" 140 bool "ETRAX-FS-V32"
140 help 141 help
141 Support CRIS V32. 142 Support CRIS V32.
142 143
143 config CRIS_MACH_ARTPEC3 144 config CRIS_MACH_ARTPEC3
144 bool "ARTPEC-3" 145 bool "ARTPEC-3"
145 help 146 help
146 Support Axis ARTPEC-3. 147 Support Axis ARTPEC-3.
147 148
148 endchoice 149 endchoice
149 150
150 config ETRAX_VCS_SIM 151 config ETRAX_VCS_SIM
151 bool "VCS Simulator" 152 bool "VCS Simulator"
152 help 153 help
153 Setup hardware to be run in the VCS simulator. 154 Setup hardware to be run in the VCS simulator.
154 155
155 config ETRAX_ARCH_V10 156 config ETRAX_ARCH_V10
156 bool 157 bool
157 default y if ETRAX100LX || ETRAX100LX_V2 158 default y if ETRAX100LX || ETRAX100LX_V2
158 default n if !(ETRAX100LX || ETRAX100LX_V2) 159 default n if !(ETRAX100LX || ETRAX100LX_V2)
159 160
160 config ETRAX_ARCH_V32 161 config ETRAX_ARCH_V32
161 bool 162 bool
162 default y if (ETRAXFS || CRIS_MACH_ARTPEC3) 163 default y if (ETRAXFS || CRIS_MACH_ARTPEC3)
163 default n if !(ETRAXFS || CRIS_MACH_ARTPEC3) 164 default n if !(ETRAXFS || CRIS_MACH_ARTPEC3)
164 165
165 config ETRAX_DRAM_SIZE 166 config ETRAX_DRAM_SIZE
166 int "DRAM size (dec, in MB)" 167 int "DRAM size (dec, in MB)"
167 default "8" 168 default "8"
168 help 169 help
169 Size of DRAM (decimal in MB) typically 2, 8 or 16. 170 Size of DRAM (decimal in MB) typically 2, 8 or 16.
170 171
171 config ETRAX_VMEM_SIZE 172 config ETRAX_VMEM_SIZE
172 int "Video memory size (dec, in MB)" 173 int "Video memory size (dec, in MB)"
173 depends on ETRAX_ARCH_V32 && !ETRAXFS 174 depends on ETRAX_ARCH_V32 && !ETRAXFS
174 default 8 if !ETRAXFS 175 default 8 if !ETRAXFS
175 help 176 help
176 Size of Video accessible memory (decimal, in MB). 177 Size of Video accessible memory (decimal, in MB).
177 178
178 config ETRAX_FLASH_BUSWIDTH 179 config ETRAX_FLASH_BUSWIDTH
179 int "Buswidth of NOR flash in bytes" 180 int "Buswidth of NOR flash in bytes"
180 default "2" 181 default "2"
181 help 182 help
182 Width in bytes of the NOR Flash bus (1, 2 or 4). Is usually 2. 183 Width in bytes of the NOR Flash bus (1, 2 or 4). Is usually 2.
183 184
184 config ETRAX_NANDFLASH_BUSWIDTH 185 config ETRAX_NANDFLASH_BUSWIDTH
185 int "Buswidth of NAND flash in bytes" 186 int "Buswidth of NAND flash in bytes"
186 default "1" 187 default "1"
187 help 188 help
188 Width in bytes of the NAND flash (1 or 2). 189 Width in bytes of the NAND flash (1 or 2).
189 190
190 config ETRAX_FLASH1_SIZE 191 config ETRAX_FLASH1_SIZE
191 int "FLASH1 size (dec, in MB. 0 = Unknown)" 192 int "FLASH1 size (dec, in MB. 0 = Unknown)"
192 default "0" 193 default "0"
193 194
194 choice 195 choice
195 prompt "Product debug-port" 196 prompt "Product debug-port"
196 default ETRAX_DEBUG_PORT0 197 default ETRAX_DEBUG_PORT0
197 198
198 config ETRAX_DEBUG_PORT0 199 config ETRAX_DEBUG_PORT0
199 bool "Serial-0" 200 bool "Serial-0"
200 help 201 help
201 Choose a serial port for the ETRAX debug console. Default to 202 Choose a serial port for the ETRAX debug console. Default to
202 port 0. 203 port 0.
203 204
204 config ETRAX_DEBUG_PORT1 205 config ETRAX_DEBUG_PORT1
205 bool "Serial-1" 206 bool "Serial-1"
206 help 207 help
207 Use serial port 1 for the console. 208 Use serial port 1 for the console.
208 209
209 config ETRAX_DEBUG_PORT2 210 config ETRAX_DEBUG_PORT2
210 bool "Serial-2" 211 bool "Serial-2"
211 help 212 help
212 Use serial port 2 for the console. 213 Use serial port 2 for the console.
213 214
214 config ETRAX_DEBUG_PORT3 215 config ETRAX_DEBUG_PORT3
215 bool "Serial-3" 216 bool "Serial-3"
216 help 217 help
217 Use serial port 3 for the console. 218 Use serial port 3 for the console.
218 219
219 config ETRAX_DEBUG_PORT_NULL 220 config ETRAX_DEBUG_PORT_NULL
220 bool "disabled" 221 bool "disabled"
221 help 222 help
222 Disable serial-port debugging. 223 Disable serial-port debugging.
223 224
224 endchoice 225 endchoice
225 226
226 choice 227 choice
227 prompt "Kernel GDB port" 228 prompt "Kernel GDB port"
228 depends on ETRAX_KGDB 229 depends on ETRAX_KGDB
229 default ETRAX_KGDB_PORT0 230 default ETRAX_KGDB_PORT0
230 help 231 help
231 Choose a serial port for kernel debugging. NOTE: This port should 232 Choose a serial port for kernel debugging. NOTE: This port should
232 not be enabled under Drivers for built-in interfaces (as it has its 233 not be enabled under Drivers for built-in interfaces (as it has its
233 own initialization code) and should not be the same as the debug port. 234 own initialization code) and should not be the same as the debug port.
234 235
235 config ETRAX_KGDB_PORT0 236 config ETRAX_KGDB_PORT0
236 bool "Serial-0" 237 bool "Serial-0"
237 help 238 help
238 Use serial port 0 for kernel debugging. 239 Use serial port 0 for kernel debugging.
239 240
240 config ETRAX_KGDB_PORT1 241 config ETRAX_KGDB_PORT1
241 bool "Serial-1" 242 bool "Serial-1"
242 help 243 help
243 Use serial port 1 for kernel debugging. 244 Use serial port 1 for kernel debugging.
244 245
245 config ETRAX_KGDB_PORT2 246 config ETRAX_KGDB_PORT2
246 bool "Serial-2" 247 bool "Serial-2"
247 help 248 help
248 Use serial port 2 for kernel debugging. 249 Use serial port 2 for kernel debugging.
249 250
250 config ETRAX_KGDB_PORT3 251 config ETRAX_KGDB_PORT3
251 bool "Serial-3" 252 bool "Serial-3"
252 help 253 help
253 Use serial port 3 for kernel debugging. 254 Use serial port 3 for kernel debugging.
254 255
255 endchoice 256 endchoice
256 257
257 source arch/cris/arch-v10/Kconfig 258 source arch/cris/arch-v10/Kconfig
258 source arch/cris/arch-v32/Kconfig 259 source arch/cris/arch-v32/Kconfig
259 260
260 endmenu 261 endmenu
261 262
262 source "net/Kconfig" 263 source "net/Kconfig"
263 264
264 # bring in ETRAX built-in drivers 265 # bring in ETRAX built-in drivers
265 menu "Drivers for built-in interfaces" 266 menu "Drivers for built-in interfaces"
266 source arch/cris/arch-v10/drivers/Kconfig 267 source arch/cris/arch-v10/drivers/Kconfig
267 source arch/cris/arch-v32/drivers/Kconfig 268 source arch/cris/arch-v32/drivers/Kconfig
268 269
269 config ETRAX_AXISFLASHMAP 270 config ETRAX_AXISFLASHMAP
270 bool "Axis flash-map support" 271 bool "Axis flash-map support"
271 select MTD 272 select MTD
272 select MTD_CFI 273 select MTD_CFI
273 select MTD_CFI_AMDSTD 274 select MTD_CFI_AMDSTD
274 select MTD_JEDECPROBE if ETRAX_ARCH_V32 275 select MTD_JEDECPROBE if ETRAX_ARCH_V32
275 select MTD_CHAR 276 select MTD_CHAR
276 select MTD_BLOCK 277 select MTD_BLOCK
277 select MTD_PARTITIONS 278 select MTD_PARTITIONS
278 select MTD_CONCAT 279 select MTD_CONCAT
279 select MTD_COMPLEX_MAPPINGS 280 select MTD_COMPLEX_MAPPINGS
280 help 281 help
281 This option enables MTD mapping of flash devices. Needed to use 282 This option enables MTD mapping of flash devices. Needed to use
282 flash memories. If unsure, say Y. 283 flash memories. If unsure, say Y.
283 284
284 config ETRAX_RTC 285 config ETRAX_RTC
285 bool "Real Time Clock support" 286 bool "Real Time Clock support"
286 depends on ETRAX_I2C 287 depends on ETRAX_I2C
287 help 288 help
288 Enables drivers for the Real-Time Clock battery-backed chips on 289 Enables drivers for the Real-Time Clock battery-backed chips on
289 some products. The kernel reads the time when booting, and 290 some products. The kernel reads the time when booting, and
290 the date can be set using ioctl(fd, RTC_SET_TIME, &rt) with rt a 291 the date can be set using ioctl(fd, RTC_SET_TIME, &rt) with rt a
291 rtc_time struct (see <file:include/asm-cris/rtc.h>) on the /dev/rtc 292 rtc_time struct (see <file:include/asm-cris/rtc.h>) on the /dev/rtc
292 device. You can check the time with cat /proc/rtc, but 293 device. You can check the time with cat /proc/rtc, but
293 normal time reading should be done using libc function time and 294 normal time reading should be done using libc function time and
294 friends. 295 friends.
295 296
296 choice 297 choice
297 prompt "RTC chip" 298 prompt "RTC chip"
298 depends on ETRAX_RTC 299 depends on ETRAX_RTC
299 default ETRAX_PCF8563 if ETRAX_ARCH_V32 300 default ETRAX_PCF8563 if ETRAX_ARCH_V32
300 default ETRAX_DS1302 if ETRAX_ARCH_V10 301 default ETRAX_DS1302 if ETRAX_ARCH_V10
301 302
302 config ETRAX_DS1302 303 config ETRAX_DS1302
303 depends on ETRAX_ARCH_V10 304 depends on ETRAX_ARCH_V10
304 bool "DS1302" 305 bool "DS1302"
305 help 306 help
306 Enables the driver for the DS1302 Real-Time Clock battery-backed 307 Enables the driver for the DS1302 Real-Time Clock battery-backed
307 chip on some products. 308 chip on some products.
308 309
309 config ETRAX_PCF8563 310 config ETRAX_PCF8563
310 bool "PCF8563" 311 bool "PCF8563"
311 help 312 help
312 Enables the driver for the PCF8563 Real-Time Clock battery-backed 313 Enables the driver for the PCF8563 Real-Time Clock battery-backed
313 chip on some products. 314 chip on some products.
314 315
315 endchoice 316 endchoice
316 317
317 config ETRAX_SYNCHRONOUS_SERIAL 318 config ETRAX_SYNCHRONOUS_SERIAL
318 bool "Synchronous serial-port support" 319 bool "Synchronous serial-port support"
319 help 320 help
320 Select this to enable the synchronous serial port driver. 321 Select this to enable the synchronous serial port driver.
321 322
322 config ETRAX_SYNCHRONOUS_SERIAL_PORT0 323 config ETRAX_SYNCHRONOUS_SERIAL_PORT0
323 bool "Synchronous serial port 0 enabled" 324 bool "Synchronous serial port 0 enabled"
324 depends on ETRAX_SYNCHRONOUS_SERIAL 325 depends on ETRAX_SYNCHRONOUS_SERIAL
325 help 326 help
326 Enabled synchronous serial port 0. 327 Enabled synchronous serial port 0.
327 328
328 config ETRAX_SYNCHRONOUS_SERIAL0_DMA 329 config ETRAX_SYNCHRONOUS_SERIAL0_DMA
329 bool "Enable DMA on synchronous serial port 0." 330 bool "Enable DMA on synchronous serial port 0."
330 depends on ETRAX_SYNCHRONOUS_SERIAL_PORT0 331 depends on ETRAX_SYNCHRONOUS_SERIAL_PORT0
331 help 332 help
332 A synchronous serial port can run in manual or DMA mode. 333 A synchronous serial port can run in manual or DMA mode.
333 Selecting this option will make it run in DMA mode. 334 Selecting this option will make it run in DMA mode.
334 335
335 config ETRAX_SYNCHRONOUS_SERIAL_PORT1 336 config ETRAX_SYNCHRONOUS_SERIAL_PORT1
336 bool "Synchronous serial port 1 enabled" 337 bool "Synchronous serial port 1 enabled"
337 depends on ETRAX_SYNCHRONOUS_SERIAL && (ETRAXFS || ETRAX_ARCH_V10) 338 depends on ETRAX_SYNCHRONOUS_SERIAL && (ETRAXFS || ETRAX_ARCH_V10)
338 help 339 help
339 Enabled synchronous serial port 1. 340 Enabled synchronous serial port 1.
340 341
341 config ETRAX_SYNCHRONOUS_SERIAL1_DMA 342 config ETRAX_SYNCHRONOUS_SERIAL1_DMA
342 bool "Enable DMA on synchronous serial port 1." 343 bool "Enable DMA on synchronous serial port 1."
343 depends on ETRAX_SYNCHRONOUS_SERIAL_PORT1 344 depends on ETRAX_SYNCHRONOUS_SERIAL_PORT1
344 help 345 help
345 A synchronous serial port can run in manual or DMA mode. 346 A synchronous serial port can run in manual or DMA mode.
346 Selecting this option will make it run in DMA mode. 347 Selecting this option will make it run in DMA mode.
347 348
348 choice 349 choice
349 prompt "Network LED behavior" 350 prompt "Network LED behavior"
350 depends on ETRAX_ETHERNET 351 depends on ETRAX_ETHERNET
351 default ETRAX_NETWORK_LED_ON_WHEN_ACTIVITY 352 default ETRAX_NETWORK_LED_ON_WHEN_ACTIVITY
352 353
353 config ETRAX_NETWORK_LED_ON_WHEN_LINK 354 config ETRAX_NETWORK_LED_ON_WHEN_LINK
354 bool "LED_on_when_link" 355 bool "LED_on_when_link"
355 help 356 help
356 Selecting LED_on_when_link will light the LED when there is a 357 Selecting LED_on_when_link will light the LED when there is a
357 connection and will flash off when there is activity. 358 connection and will flash off when there is activity.
358 359
359 Selecting LED_on_when_activity will light the LED only when 360 Selecting LED_on_when_activity will light the LED only when
360 there is activity. 361 there is activity.
361 362
362 This setting will also affect the behaviour of other activity LEDs 363 This setting will also affect the behaviour of other activity LEDs
363 e.g. Bluetooth. 364 e.g. Bluetooth.
364 365
365 config ETRAX_NETWORK_LED_ON_WHEN_ACTIVITY 366 config ETRAX_NETWORK_LED_ON_WHEN_ACTIVITY
366 bool "LED_on_when_activity" 367 bool "LED_on_when_activity"
367 help 368 help
368 Selecting LED_on_when_link will light the LED when there is a 369 Selecting LED_on_when_link will light the LED when there is a
369 connection and will flash off when there is activity. 370 connection and will flash off when there is activity.
370 371
371 Selecting LED_on_when_activity will light the LED only when 372 Selecting LED_on_when_activity will light the LED only when
372 there is activity. 373 there is activity.
373 374
374 This setting will also affect the behaviour of other activity LEDs 375 This setting will also affect the behaviour of other activity LEDs
375 e.g. Bluetooth. 376 e.g. Bluetooth.
376 377
377 endchoice 378 endchoice
378 379
379 choice 380 choice
380 prompt "Ser0 DMA out channel" 381 prompt "Ser0 DMA out channel"
381 depends on ETRAX_SERIAL_PORT0 382 depends on ETRAX_SERIAL_PORT0
382 default ETRAX_SERIAL_PORT0_DMA6_OUT if ETRAX_ARCH_V32 383 default ETRAX_SERIAL_PORT0_DMA6_OUT if ETRAX_ARCH_V32
383 default ETRAX_SERIAL_PORT0_NO_DMA_OUT if ETRAX_ARCH_V10 384 default ETRAX_SERIAL_PORT0_NO_DMA_OUT if ETRAX_ARCH_V10
384 385
385 config ETRAX_SERIAL_PORT0_NO_DMA_OUT 386 config ETRAX_SERIAL_PORT0_NO_DMA_OUT
386 bool "Ser0 uses no DMA for output" 387 bool "Ser0 uses no DMA for output"
387 help 388 help
388 Do not use DMA for ser0 output. 389 Do not use DMA for ser0 output.
389 390
390 config ETRAX_SERIAL_PORT0_DMA6_OUT 391 config ETRAX_SERIAL_PORT0_DMA6_OUT
391 bool "Ser0 uses DMA6 for output" 392 bool "Ser0 uses DMA6 for output"
392 depends on ETRAXFS 393 depends on ETRAXFS
393 help 394 help
394 Enables the DMA6 output channel for ser0 (ttyS0). 395 Enables the DMA6 output channel for ser0 (ttyS0).
395 If you do not enable DMA, an interrupt for each character will be 396 If you do not enable DMA, an interrupt for each character will be
396 used when transmitting data. 397 used when transmitting data.
397 Normally you want to use DMA, unless you use the DMA channel for 398 Normally you want to use DMA, unless you use the DMA channel for
398 something else. 399 something else.
399 400
400 config ETRAX_SERIAL_PORT0_DMA0_OUT 401 config ETRAX_SERIAL_PORT0_DMA0_OUT
401 bool "Ser0 uses DMA0 for output" 402 bool "Ser0 uses DMA0 for output"
402 depends on CRIS_MACH_ARTPEC3 403 depends on CRIS_MACH_ARTPEC3
403 help 404 help
404 Enables the DMA0 output channel for ser0 (ttyS0). 405 Enables the DMA0 output channel for ser0 (ttyS0).
405 If you do not enable DMA, an interrupt for each character will be 406 If you do not enable DMA, an interrupt for each character will be
406 used when transmitting data. 407 used when transmitting data.
407 Normally you want to use DMA, unless you use the DMA channel for 408 Normally you want to use DMA, unless you use the DMA channel for
408 something else. 409 something else.
409 410
410 endchoice 411 endchoice
411 412
412 choice 413 choice
413 prompt "Ser0 DMA in channel " 414 prompt "Ser0 DMA in channel "
414 depends on ETRAX_SERIAL_PORT0 415 depends on ETRAX_SERIAL_PORT0
415 default ETRAX_SERIAL_PORT0_NO_DMA_IN if ETRAX_ARCH_V32 416 default ETRAX_SERIAL_PORT0_NO_DMA_IN if ETRAX_ARCH_V32
416 default ETRAX_SERIAL_PORT0_DMA7_IN if ETRAX_ARCH_V10 417 default ETRAX_SERIAL_PORT0_DMA7_IN if ETRAX_ARCH_V10
417 help 418 help
418 What DMA channel to use for ser0. 419 What DMA channel to use for ser0.
419 420
420 config ETRAX_SERIAL_PORT0_NO_DMA_IN 421 config ETRAX_SERIAL_PORT0_NO_DMA_IN
421 bool "Ser0 uses no DMA for input" 422 bool "Ser0 uses no DMA for input"
422 help 423 help
423 Do not use DMA for ser0 input. 424 Do not use DMA for ser0 input.
424 425
425 config ETRAX_SERIAL_PORT0_DMA7_IN 426 config ETRAX_SERIAL_PORT0_DMA7_IN
426 bool "Ser0 uses DMA7 for input" 427 bool "Ser0 uses DMA7 for input"
427 depends on ETRAXFS 428 depends on ETRAXFS
428 help 429 help
429 Enables the DMA7 input channel for ser0 (ttyS0). 430 Enables the DMA7 input channel for ser0 (ttyS0).
430 If you do not enable DMA, an interrupt for each character will be 431 If you do not enable DMA, an interrupt for each character will be
431 used when receiving data. 432 used when receiving data.
432 Normally you want to use DMA, unless you use the DMA channel for 433 Normally you want to use DMA, unless you use the DMA channel for
433 something else. 434 something else.
434 435
435 config ETRAX_SERIAL_PORT0_DMA1_IN 436 config ETRAX_SERIAL_PORT0_DMA1_IN
436 bool "Ser0 uses DMA1 for input" 437 bool "Ser0 uses DMA1 for input"
437 depends on CRIS_MACH_ARTPEC3 438 depends on CRIS_MACH_ARTPEC3
438 help 439 help
439 Enables the DMA1 input channel for ser0 (ttyS0). 440 Enables the DMA1 input channel for ser0 (ttyS0).
440 If you do not enable DMA, an interrupt for each character will be 441 If you do not enable DMA, an interrupt for each character will be
441 used when receiving data. 442 used when receiving data.
442 Normally you want to use DMA, unless you use the DMA channel for 443 Normally you want to use DMA, unless you use the DMA channel for
443 something else. 444 something else.
444 445
445 endchoice 446 endchoice
446 447
447 choice 448 choice
448 prompt "Ser1 DMA in channel " 449 prompt "Ser1 DMA in channel "
449 depends on ETRAX_SERIAL_PORT1 450 depends on ETRAX_SERIAL_PORT1
450 default ETRAX_SERIAL_PORT1_NO_DMA_IN if ETRAX_ARCH_V32 451 default ETRAX_SERIAL_PORT1_NO_DMA_IN if ETRAX_ARCH_V32
451 default ETRAX_SERIAL_PORT1_DMA9_IN if ETRAX_ARCH_V10 452 default ETRAX_SERIAL_PORT1_DMA9_IN if ETRAX_ARCH_V10
452 help 453 help
453 What DMA channel to use for ser1. 454 What DMA channel to use for ser1.
454 455
455 config ETRAX_SERIAL_PORT1_NO_DMA_IN 456 config ETRAX_SERIAL_PORT1_NO_DMA_IN
456 bool "Ser1 uses no DMA for input" 457 bool "Ser1 uses no DMA for input"
457 help 458 help
458 Do not use DMA for ser1 input. 459 Do not use DMA for ser1 input.
459 460
460 config ETRAX_SERIAL_PORT1_DMA5_IN 461 config ETRAX_SERIAL_PORT1_DMA5_IN
461 bool "Ser1 uses DMA5 for input" 462 bool "Ser1 uses DMA5 for input"
462 depends on ETRAX_ARCH_V32 463 depends on ETRAX_ARCH_V32
463 help 464 help
464 Enables the DMA5 input channel for ser1 (ttyS1). 465 Enables the DMA5 input channel for ser1 (ttyS1).
465 If you do not enable DMA, an interrupt for each character will be 466 If you do not enable DMA, an interrupt for each character will be
466 used when receiving data. 467 used when receiving data.
467 Normally you want this on, unless you use the DMA channel for 468 Normally you want this on, unless you use the DMA channel for
468 something else. 469 something else.
469 470
470 config ETRAX_SERIAL_PORT1_DMA9_IN 471 config ETRAX_SERIAL_PORT1_DMA9_IN
471 depends on ETRAX_ARCH_V10 472 depends on ETRAX_ARCH_V10
472 bool "Ser1 uses DMA9 for input" 473 bool "Ser1 uses DMA9 for input"
473 474
474 endchoice 475 endchoice
475 476
476 477
477 choice 478 choice
478 prompt "Ser1 DMA out channel" 479 prompt "Ser1 DMA out channel"
479 depends on ETRAX_SERIAL_PORT1 480 depends on ETRAX_SERIAL_PORT1
480 default ETRAX_SERIAL_PORT1_NO_DMA_OUT if ETRAX_ARCH_V32 481 default ETRAX_SERIAL_PORT1_NO_DMA_OUT if ETRAX_ARCH_V32
481 default ETRAX_SERIAL_PORT1_DMA8_OUT if ETRAX_ARCH_V10 482 default ETRAX_SERIAL_PORT1_DMA8_OUT if ETRAX_ARCH_V10
482 help 483 help
483 What DMA channel to use for ser1. 484 What DMA channel to use for ser1.
484 485
485 config ETRAX_SERIAL_PORT1_NO_DMA_OUT 486 config ETRAX_SERIAL_PORT1_NO_DMA_OUT
486 bool "Ser1 uses no DMA for output" 487 bool "Ser1 uses no DMA for output"
487 help 488 help
488 Do not use DMA for ser1 output. 489 Do not use DMA for ser1 output.
489 490
490 config ETRAX_SERIAL_PORT1_DMA8_OUT 491 config ETRAX_SERIAL_PORT1_DMA8_OUT
491 depends on ETRAX_ARCH_V10 492 depends on ETRAX_ARCH_V10
492 bool "Ser1 uses DMA8 for output" 493 bool "Ser1 uses DMA8 for output"
493 494
494 config ETRAX_SERIAL_PORT1_DMA4_OUT 495 config ETRAX_SERIAL_PORT1_DMA4_OUT
495 depends on ETRAX_ARCH_V32 496 depends on ETRAX_ARCH_V32
496 bool "Ser1 uses DMA4 for output" 497 bool "Ser1 uses DMA4 for output"
497 help 498 help
498 Enables the DMA4 output channel for ser1 (ttyS1). 499 Enables the DMA4 output channel for ser1 (ttyS1).
499 If you do not enable DMA, an interrupt for each character will be 500 If you do not enable DMA, an interrupt for each character will be
500 used when transmitting data. 501 used when transmitting data.
501 Normally you want this on, unless you use the DMA channel for 502 Normally you want this on, unless you use the DMA channel for
502 something else. 503 something else.
503 504
504 endchoice 505 endchoice
505 506
506 choice 507 choice
507 prompt "Ser2 DMA out channel" 508 prompt "Ser2 DMA out channel"
508 depends on ETRAX_SERIAL_PORT2 509 depends on ETRAX_SERIAL_PORT2
509 default ETRAX_SERIAL_PORT2_NO_DMA_OUT if ETRAX_ARCH_V32 510 default ETRAX_SERIAL_PORT2_NO_DMA_OUT if ETRAX_ARCH_V32
510 default ETRAX_SERIAL_PORT2_DMA2_OUT if ETRAX_ARCH_V10 511 default ETRAX_SERIAL_PORT2_DMA2_OUT if ETRAX_ARCH_V10
511 512
512 config ETRAX_SERIAL_PORT2_NO_DMA_OUT 513 config ETRAX_SERIAL_PORT2_NO_DMA_OUT
513 bool "Ser2 uses no DMA for output" 514 bool "Ser2 uses no DMA for output"
514 help 515 help
515 Do not use DMA for ser2 output. 516 Do not use DMA for ser2 output.
516 517
517 config ETRAX_SERIAL_PORT2_DMA2_OUT 518 config ETRAX_SERIAL_PORT2_DMA2_OUT
518 bool "Ser2 uses DMA2 for output" 519 bool "Ser2 uses DMA2 for output"
519 depends on ETRAXFS || ETRAX_ARCH_V10 520 depends on ETRAXFS || ETRAX_ARCH_V10
520 help 521 help
521 Enables the DMA2 output channel for ser2 (ttyS2). 522 Enables the DMA2 output channel for ser2 (ttyS2).
522 If you do not enable DMA, an interrupt for each character will be 523 If you do not enable DMA, an interrupt for each character will be
523 used when transmitting data. 524 used when transmitting data.
524 Normally you want to use DMA, unless you use the DMA channel for 525 Normally you want to use DMA, unless you use the DMA channel for
525 something else. 526 something else.
526 527
527 config ETRAX_SERIAL_PORT2_DMA6_OUT 528 config ETRAX_SERIAL_PORT2_DMA6_OUT
528 bool "Ser2 uses DMA6 for output" 529 bool "Ser2 uses DMA6 for output"
529 depends on CRIS_MACH_ARTPEC3 530 depends on CRIS_MACH_ARTPEC3
530 help 531 help
531 Enables the DMA6 output channel for ser2 (ttyS2). 532 Enables the DMA6 output channel for ser2 (ttyS2).
532 If you do not enable DMA, an interrupt for each character will be 533 If you do not enable DMA, an interrupt for each character will be
533 used when transmitting data. 534 used when transmitting data.
534 Normally you want to use DMA, unless you use the DMA channel for 535 Normally you want to use DMA, unless you use the DMA channel for
535 something else. 536 something else.
536 537
537 endchoice 538 endchoice
538 539
539 choice 540 choice
540 prompt "Ser2 DMA in channel" 541 prompt "Ser2 DMA in channel"
541 depends on ETRAX_SERIAL_PORT2 542 depends on ETRAX_SERIAL_PORT2
542 default ETRAX_SERIAL_PORT2_NO_DMA_IN if ETRAX_ARCH_V32 543 default ETRAX_SERIAL_PORT2_NO_DMA_IN if ETRAX_ARCH_V32
543 default ETRAX_SERIAL_PORT2_DMA3_IN if ETRAX_ARCH_V10 544 default ETRAX_SERIAL_PORT2_DMA3_IN if ETRAX_ARCH_V10
544 help 545 help
545 What DMA channel to use for ser2. 546 What DMA channel to use for ser2.
546 547
547 config ETRAX_SERIAL_PORT2_NO_DMA_IN 548 config ETRAX_SERIAL_PORT2_NO_DMA_IN
548 bool "Ser2 uses no DMA for input" 549 bool "Ser2 uses no DMA for input"
549 help 550 help
550 Do not use DMA for ser2 input. 551 Do not use DMA for ser2 input.
551 552
552 config ETRAX_SERIAL_PORT2_DMA3_IN 553 config ETRAX_SERIAL_PORT2_DMA3_IN
553 bool "Ser2 uses DMA3 for input" 554 bool "Ser2 uses DMA3 for input"
554 depends on ETRAXFS || ETRAX_ARCH_V10 555 depends on ETRAXFS || ETRAX_ARCH_V10
555 help 556 help
556 Enables the DMA3 input channel for ser2 (ttyS2). 557 Enables the DMA3 input channel for ser2 (ttyS2).
557 If you do not enable DMA, an interrupt for each character will be 558 If you do not enable DMA, an interrupt for each character will be
558 used when receiving data. 559 used when receiving data.
559 Normally you want to use DMA, unless you use the DMA channel for 560 Normally you want to use DMA, unless you use the DMA channel for
560 something else. 561 something else.
561 562
562 config ETRAX_SERIAL_PORT2_DMA7_IN 563 config ETRAX_SERIAL_PORT2_DMA7_IN
563 bool "Ser2 uses DMA7 for input" 564 bool "Ser2 uses DMA7 for input"
564 depends on CRIS_MACH_ARTPEC3 565 depends on CRIS_MACH_ARTPEC3
565 help 566 help
566 Enables the DMA7 input channel for ser2 (ttyS2). 567 Enables the DMA7 input channel for ser2 (ttyS2).
567 If you do not enable DMA, an interrupt for each character will be 568 If you do not enable DMA, an interrupt for each character will be
568 used when receiving data. 569 used when receiving data.
569 Normally you want to use DMA, unless you use the DMA channel for 570 Normally you want to use DMA, unless you use the DMA channel for
570 something else. 571 something else.
571 572
572 endchoice 573 endchoice
573 574
574 choice 575 choice
575 prompt "Ser3 DMA in channel" 576 prompt "Ser3 DMA in channel"
576 depends on ETRAX_SERIAL_PORT3 577 depends on ETRAX_SERIAL_PORT3
577 default ETRAX_SERIAL_PORT3_NO_DMA_IN if ETRAX_ARCH_V32 578 default ETRAX_SERIAL_PORT3_NO_DMA_IN if ETRAX_ARCH_V32
578 default ETRAX_SERIAL_PORT3_DMA5_IN if ETRAX_ARCH_V10 579 default ETRAX_SERIAL_PORT3_DMA5_IN if ETRAX_ARCH_V10
579 help 580 help
580 What DMA channel to use for ser3. 581 What DMA channel to use for ser3.
581 582
582 config ETRAX_SERIAL_PORT3_NO_DMA_IN 583 config ETRAX_SERIAL_PORT3_NO_DMA_IN
583 bool "Ser3 uses no DMA for input" 584 bool "Ser3 uses no DMA for input"
584 help 585 help
585 Do not use DMA for ser3 input. 586 Do not use DMA for ser3 input.
586 587
587 config ETRAX_SERIAL_PORT3_DMA5_IN 588 config ETRAX_SERIAL_PORT3_DMA5_IN
588 depends on ETRAX_ARCH_V10 589 depends on ETRAX_ARCH_V10
589 bool "DMA 5" 590 bool "DMA 5"
590 591
591 config ETRAX_SERIAL_PORT3_DMA9_IN 592 config ETRAX_SERIAL_PORT3_DMA9_IN
592 bool "Ser3 uses DMA9 for input" 593 bool "Ser3 uses DMA9 for input"
593 depends on ETRAXFS 594 depends on ETRAXFS
594 help 595 help
595 Enables the DMA9 input channel for ser3 (ttyS3). 596 Enables the DMA9 input channel for ser3 (ttyS3).
596 If you do not enable DMA, an interrupt for each character will be 597 If you do not enable DMA, an interrupt for each character will be
597 used when receiving data. 598 used when receiving data.
598 Normally you want to use DMA, unless you use the DMA channel for 599 Normally you want to use DMA, unless you use the DMA channel for
599 something else. 600 something else.
600 601
601 config ETRAX_SERIAL_PORT3_DMA3_IN 602 config ETRAX_SERIAL_PORT3_DMA3_IN
602 bool "Ser3 uses DMA3 for input" 603 bool "Ser3 uses DMA3 for input"
603 depends on CRIS_MACH_ARTPEC3 604 depends on CRIS_MACH_ARTPEC3
604 help 605 help
605 Enables the DMA3 input channel for ser3 (ttyS3). 606 Enables the DMA3 input channel for ser3 (ttyS3).
606 If you do not enable DMA, an interrupt for each character will be 607 If you do not enable DMA, an interrupt for each character will be
607 used when receiving data. 608 used when receiving data.
608 Normally you want to use DMA, unless you use the DMA channel for 609 Normally you want to use DMA, unless you use the DMA channel for
609 something else. 610 something else.
610 611
611 endchoice 612 endchoice
612 613
613 choice 614 choice
614 prompt "Ser3 DMA out channel" 615 prompt "Ser3 DMA out channel"
615 depends on ETRAX_SERIAL_PORT3 616 depends on ETRAX_SERIAL_PORT3
616 default ETRAX_SERIAL_PORT3_NO_DMA_OUT if ETRAX_ARCH_V32 617 default ETRAX_SERIAL_PORT3_NO_DMA_OUT if ETRAX_ARCH_V32
617 default ETRAX_SERIAL_PORT3_DMA4_OUT if ETRAX_ARCH_V10 618 default ETRAX_SERIAL_PORT3_DMA4_OUT if ETRAX_ARCH_V10
618 619
619 config ETRAX_SERIAL_PORT3_NO_DMA_OUT 620 config ETRAX_SERIAL_PORT3_NO_DMA_OUT
620 bool "Ser3 uses no DMA for output" 621 bool "Ser3 uses no DMA for output"
621 help 622 help
622 Do not use DMA for ser3 output. 623 Do not use DMA for ser3 output.
623 624
624 config ETRAX_SERIAL_PORT3_DMA4_OUT 625 config ETRAX_SERIAL_PORT3_DMA4_OUT
625 depends on ETRAX_ARCH_V10 626 depends on ETRAX_ARCH_V10
626 bool "DMA 4" 627 bool "DMA 4"
627 628
628 config ETRAX_SERIAL_PORT3_DMA8_OUT 629 config ETRAX_SERIAL_PORT3_DMA8_OUT
629 bool "Ser3 uses DMA8 for output" 630 bool "Ser3 uses DMA8 for output"
630 depends on ETRAXFS 631 depends on ETRAXFS
631 help 632 help
632 Enables the DMA8 output channel for ser3 (ttyS3). 633 Enables the DMA8 output channel for ser3 (ttyS3).
633 If you do not enable DMA, an interrupt for each character will be 634 If you do not enable DMA, an interrupt for each character will be
634 used when transmitting data. 635 used when transmitting data.
635 Normally you want to use DMA, unless you use the DMA channel for 636 Normally you want to use DMA, unless you use the DMA channel for
636 something else. 637 something else.
637 638
638 config ETRAX_SERIAL_PORT3_DMA2_OUT 639 config ETRAX_SERIAL_PORT3_DMA2_OUT
639 bool "Ser3 uses DMA2 for output" 640 bool "Ser3 uses DMA2 for output"
640 depends on CRIS_MACH_ARTPEC3 641 depends on CRIS_MACH_ARTPEC3
641 help 642 help
642 Enables the DMA2 output channel for ser3 (ttyS3). 643 Enables the DMA2 output channel for ser3 (ttyS3).
643 If you do not enable DMA, an interrupt for each character will be 644 If you do not enable DMA, an interrupt for each character will be
644 used when transmitting data. 645 used when transmitting data.
645 Normally you want to use DMA, unless you use the DMA channel for 646 Normally you want to use DMA, unless you use the DMA channel for
646 something else. 647 something else.
647 648
648 endchoice 649 endchoice
649 650
650 endmenu 651 endmenu
651 652
652 source "drivers/base/Kconfig" 653 source "drivers/base/Kconfig"
653 654
654 # standard linux drivers 655 # standard linux drivers
655 source "drivers/mtd/Kconfig" 656 source "drivers/mtd/Kconfig"
656 657
657 source "drivers/parport/Kconfig" 658 source "drivers/parport/Kconfig"
658 659
659 source "drivers/pnp/Kconfig" 660 source "drivers/pnp/Kconfig"
660 661
661 source "drivers/block/Kconfig" 662 source "drivers/block/Kconfig"
662 663
663 source "drivers/ide/Kconfig" 664 source "drivers/ide/Kconfig"
664 665
665 source "drivers/net/Kconfig" 666 source "drivers/net/Kconfig"
666 667
667 source "drivers/i2c/Kconfig" 668 source "drivers/i2c/Kconfig"
668 669
669 source "drivers/rtc/Kconfig" 670 source "drivers/rtc/Kconfig"
670 671
671 # 672 #
672 # input before char - char/joystick depends on it. As does USB. 673 # input before char - char/joystick depends on it. As does USB.
673 # 674 #
674 source "drivers/input/Kconfig" 675 source "drivers/input/Kconfig"
675 676
676 source "drivers/char/Kconfig" 677 source "drivers/char/Kconfig"
677 678
678 source "fs/Kconfig" 679 source "fs/Kconfig"
679 680
680 source "drivers/usb/Kconfig" 681 source "drivers/usb/Kconfig"
681 682
682 source "drivers/uwb/Kconfig" 683 source "drivers/uwb/Kconfig"
683 684
684 source "drivers/staging/Kconfig" 685 source "drivers/staging/Kconfig"
685 686
686 source "arch/cris/Kconfig.debug" 687 source "arch/cris/Kconfig.debug"
687 688
688 source "security/Kconfig" 689 source "security/Kconfig"
689 690
690 source "crypto/Kconfig" 691 source "crypto/Kconfig"
691 692
692 source "lib/Kconfig" 693 source "lib/Kconfig"
693 694
arch/cris/arch-v10/kernel/irq.c
1 /* 1 /*
2 * linux/arch/cris/kernel/irq.c 2 * linux/arch/cris/kernel/irq.c
3 * 3 *
4 * Copyright (c) 2000-2002 Axis Communications AB 4 * Copyright (c) 2000-2002 Axis Communications AB
5 * 5 *
6 * Authors: Bjorn Wesen (bjornw@axis.com) 6 * Authors: Bjorn Wesen (bjornw@axis.com)
7 * 7 *
8 * This file contains the interrupt vectors and some 8 * This file contains the interrupt vectors and some
9 * helper functions 9 * helper functions
10 * 10 *
11 */ 11 */
12 12
13 #include <asm/irq.h> 13 #include <asm/irq.h>
14 #include <asm/current.h> 14 #include <asm/current.h>
15 #include <linux/irq.h> 15 #include <linux/irq.h>
16 #include <linux/interrupt.h> 16 #include <linux/interrupt.h>
17 #include <linux/kernel.h> 17 #include <linux/kernel.h>
18 #include <linux/init.h> 18 #include <linux/init.h>
19 19
20 #define crisv10_mask_irq(irq_nr) (*R_VECT_MASK_CLR = 1 << (irq_nr)); 20 #define crisv10_mask_irq(irq_nr) (*R_VECT_MASK_CLR = 1 << (irq_nr));
21 #define crisv10_unmask_irq(irq_nr) (*R_VECT_MASK_SET = 1 << (irq_nr)); 21 #define crisv10_unmask_irq(irq_nr) (*R_VECT_MASK_SET = 1 << (irq_nr));
22 22
23 /* don't use set_int_vector, it bypasses the linux interrupt handlers. it is 23 /* don't use set_int_vector, it bypasses the linux interrupt handlers. it is
24 * global just so that the kernel gdb can use it. 24 * global just so that the kernel gdb can use it.
25 */ 25 */
26 26
27 void 27 void
28 set_int_vector(int n, irqvectptr addr) 28 set_int_vector(int n, irqvectptr addr)
29 { 29 {
30 etrax_irv->v[n + 0x20] = (irqvectptr)addr; 30 etrax_irv->v[n + 0x20] = (irqvectptr)addr;
31 } 31 }
32 32
33 /* the breakpoint vector is obviously not made just like the normal irq handlers 33 /* the breakpoint vector is obviously not made just like the normal irq handlers
34 * but needs to contain _code_ to jump to addr. 34 * but needs to contain _code_ to jump to addr.
35 * 35 *
36 * the BREAK n instruction jumps to IBR + n * 8 36 * the BREAK n instruction jumps to IBR + n * 8
37 */ 37 */
38 38
39 void 39 void
40 set_break_vector(int n, irqvectptr addr) 40 set_break_vector(int n, irqvectptr addr)
41 { 41 {
42 unsigned short *jinstr = (unsigned short *)&etrax_irv->v[n*2]; 42 unsigned short *jinstr = (unsigned short *)&etrax_irv->v[n*2];
43 unsigned long *jaddr = (unsigned long *)(jinstr + 1); 43 unsigned long *jaddr = (unsigned long *)(jinstr + 1);
44 44
45 /* if you don't know what this does, do not touch it! */ 45 /* if you don't know what this does, do not touch it! */
46 46
47 *jinstr = 0x0d3f; 47 *jinstr = 0x0d3f;
48 *jaddr = (unsigned long)addr; 48 *jaddr = (unsigned long)addr;
49 49
50 /* 00000026 <clrlop+1a> 3f0d82000000 jump 0x82 */ 50 /* 00000026 <clrlop+1a> 3f0d82000000 jump 0x82 */
51 } 51 }
52 52
53 /* 53 /*
54 * This builds up the IRQ handler stubs using some ugly macros in irq.h 54 * This builds up the IRQ handler stubs using some ugly macros in irq.h
55 * 55 *
56 * These macros create the low-level assembly IRQ routines that do all 56 * These macros create the low-level assembly IRQ routines that do all
57 * the operations that are needed. They are also written to be fast - and to 57 * the operations that are needed. They are also written to be fast - and to
58 * disable interrupts as little as humanly possible. 58 * disable interrupts as little as humanly possible.
59 * 59 *
60 */ 60 */
61 61
62 /* IRQ0 and 1 are special traps */ 62 /* IRQ0 and 1 are special traps */
63 void hwbreakpoint(void); 63 void hwbreakpoint(void);
64 void IRQ1_interrupt(void); 64 void IRQ1_interrupt(void);
65 BUILD_TIMER_IRQ(2, 0x04) /* the timer interrupt is somewhat special */ 65 BUILD_TIMER_IRQ(2, 0x04) /* the timer interrupt is somewhat special */
66 BUILD_IRQ(3, 0x08) 66 BUILD_IRQ(3, 0x08)
67 BUILD_IRQ(4, 0x10) 67 BUILD_IRQ(4, 0x10)
68 BUILD_IRQ(5, 0x20) 68 BUILD_IRQ(5, 0x20)
69 BUILD_IRQ(6, 0x40) 69 BUILD_IRQ(6, 0x40)
70 BUILD_IRQ(7, 0x80) 70 BUILD_IRQ(7, 0x80)
71 BUILD_IRQ(8, 0x100) 71 BUILD_IRQ(8, 0x100)
72 BUILD_IRQ(9, 0x200) 72 BUILD_IRQ(9, 0x200)
73 BUILD_IRQ(10, 0x400) 73 BUILD_IRQ(10, 0x400)
74 BUILD_IRQ(11, 0x800) 74 BUILD_IRQ(11, 0x800)
75 BUILD_IRQ(12, 0x1000) 75 BUILD_IRQ(12, 0x1000)
76 BUILD_IRQ(13, 0x2000) 76 BUILD_IRQ(13, 0x2000)
77 void mmu_bus_fault(void); /* IRQ 14 is the bus fault interrupt */ 77 void mmu_bus_fault(void); /* IRQ 14 is the bus fault interrupt */
78 void multiple_interrupt(void); /* IRQ 15 is the multiple IRQ interrupt */ 78 void multiple_interrupt(void); /* IRQ 15 is the multiple IRQ interrupt */
79 BUILD_IRQ(16, 0x10000 | 0x20000) /* ethernet tx interrupt needs to block rx */ 79 BUILD_IRQ(16, 0x10000 | 0x20000) /* ethernet tx interrupt needs to block rx */
80 BUILD_IRQ(17, 0x20000 | 0x10000) /* ...and vice versa */ 80 BUILD_IRQ(17, 0x20000 | 0x10000) /* ...and vice versa */
81 BUILD_IRQ(18, 0x40000) 81 BUILD_IRQ(18, 0x40000)
82 BUILD_IRQ(19, 0x80000) 82 BUILD_IRQ(19, 0x80000)
83 BUILD_IRQ(20, 0x100000) 83 BUILD_IRQ(20, 0x100000)
84 BUILD_IRQ(21, 0x200000) 84 BUILD_IRQ(21, 0x200000)
85 BUILD_IRQ(22, 0x400000) 85 BUILD_IRQ(22, 0x400000)
86 BUILD_IRQ(23, 0x800000) 86 BUILD_IRQ(23, 0x800000)
87 BUILD_IRQ(24, 0x1000000) 87 BUILD_IRQ(24, 0x1000000)
88 BUILD_IRQ(25, 0x2000000) 88 BUILD_IRQ(25, 0x2000000)
89 /* IRQ 26-30 are reserved */ 89 /* IRQ 26-30 are reserved */
90 BUILD_IRQ(31, 0x80000000) 90 BUILD_IRQ(31, 0x80000000)
91 91
92 /* 92 /*
93 * Pointers to the low-level handlers 93 * Pointers to the low-level handlers
94 */ 94 */
95 95
96 static void (*interrupt[NR_IRQS])(void) = { 96 static void (*interrupt[NR_IRQS])(void) = {
97 NULL, NULL, IRQ2_interrupt, IRQ3_interrupt, 97 NULL, NULL, IRQ2_interrupt, IRQ3_interrupt,
98 IRQ4_interrupt, IRQ5_interrupt, IRQ6_interrupt, IRQ7_interrupt, 98 IRQ4_interrupt, IRQ5_interrupt, IRQ6_interrupt, IRQ7_interrupt,
99 IRQ8_interrupt, IRQ9_interrupt, IRQ10_interrupt, IRQ11_interrupt, 99 IRQ8_interrupt, IRQ9_interrupt, IRQ10_interrupt, IRQ11_interrupt,
100 IRQ12_interrupt, IRQ13_interrupt, NULL, NULL, 100 IRQ12_interrupt, IRQ13_interrupt, NULL, NULL,
101 IRQ16_interrupt, IRQ17_interrupt, IRQ18_interrupt, IRQ19_interrupt, 101 IRQ16_interrupt, IRQ17_interrupt, IRQ18_interrupt, IRQ19_interrupt,
102 IRQ20_interrupt, IRQ21_interrupt, IRQ22_interrupt, IRQ23_interrupt, 102 IRQ20_interrupt, IRQ21_interrupt, IRQ22_interrupt, IRQ23_interrupt,
103 IRQ24_interrupt, IRQ25_interrupt, NULL, NULL, NULL, NULL, NULL, 103 IRQ24_interrupt, IRQ25_interrupt, NULL, NULL, NULL, NULL, NULL,
104 IRQ31_interrupt 104 IRQ31_interrupt
105 }; 105 };
106 106
107 static void enable_crisv10_irq(struct irq_data *data) 107 static void enable_crisv10_irq(struct irq_data *data)
108 { 108 {
109 crisv10_unmask_irq(data->irq); 109 crisv10_unmask_irq(data->irq);
110 } 110 }
111 111
112 static void disable_crisv10_irq(struct irq_data *data) 112 static void disable_crisv10_irq(struct irq_data *data)
113 { 113 {
114 crisv10_mask_irq(data->irq); 114 crisv10_mask_irq(data->irq);
115 } 115 }
116 116
117 static struct irq_chip crisv10_irq_type = { 117 static struct irq_chip crisv10_irq_type = {
118 .name = "CRISv10", 118 .name = "CRISv10",
119 .irq_shutdown = disable_crisv10_irq, 119 .irq_shutdown = disable_crisv10_irq,
120 .irq_enable = enable_crisv10_irq, 120 .irq_enable = enable_crisv10_irq,
121 .irq_disable = disable_crisv10_irq, 121 .irq_disable = disable_crisv10_irq,
122 }; 122 };
123 123
124 void weird_irq(void); 124 void weird_irq(void);
125 void system_call(void); /* from entry.S */ 125 void system_call(void); /* from entry.S */
126 void do_sigtrap(void); /* from entry.S */ 126 void do_sigtrap(void); /* from entry.S */
127 void gdb_handle_breakpoint(void); /* from entry.S */ 127 void gdb_handle_breakpoint(void); /* from entry.S */
128 128
129 extern void do_IRQ(int irq, struct pt_regs * regs); 129 extern void do_IRQ(int irq, struct pt_regs * regs);
130 130
131 /* Handle multiple IRQs */ 131 /* Handle multiple IRQs */
132 void do_multiple_IRQ(struct pt_regs* regs) 132 void do_multiple_IRQ(struct pt_regs* regs)
133 { 133 {
134 int bit; 134 int bit;
135 unsigned masked; 135 unsigned masked;
136 unsigned mask; 136 unsigned mask;
137 unsigned ethmask = 0; 137 unsigned ethmask = 0;
138 138
139 /* Get interrupts to mask and handle */ 139 /* Get interrupts to mask and handle */
140 mask = masked = *R_VECT_MASK_RD; 140 mask = masked = *R_VECT_MASK_RD;
141 141
142 /* Never mask timer IRQ */ 142 /* Never mask timer IRQ */
143 mask &= ~(IO_MASK(R_VECT_MASK_RD, timer0)); 143 mask &= ~(IO_MASK(R_VECT_MASK_RD, timer0));
144 144
145 /* 145 /*
146 * If either ethernet interrupt (rx or tx) is active then block 146 * If either ethernet interrupt (rx or tx) is active then block
147 * the other one too. Unblock afterwards also. 147 * the other one too. Unblock afterwards also.
148 */ 148 */
149 if (mask & 149 if (mask &
150 (IO_STATE(R_VECT_MASK_RD, dma0, active) | 150 (IO_STATE(R_VECT_MASK_RD, dma0, active) |
151 IO_STATE(R_VECT_MASK_RD, dma1, active))) { 151 IO_STATE(R_VECT_MASK_RD, dma1, active))) {
152 ethmask = (IO_MASK(R_VECT_MASK_RD, dma0) | 152 ethmask = (IO_MASK(R_VECT_MASK_RD, dma0) |
153 IO_MASK(R_VECT_MASK_RD, dma1)); 153 IO_MASK(R_VECT_MASK_RD, dma1));
154 } 154 }
155 155
156 /* Block them */ 156 /* Block them */
157 *R_VECT_MASK_CLR = (mask | ethmask); 157 *R_VECT_MASK_CLR = (mask | ethmask);
158 158
159 /* An extra irq_enter here to prevent softIRQs to run after 159 /* An extra irq_enter here to prevent softIRQs to run after
160 * each do_IRQ. This will decrease the interrupt latency. 160 * each do_IRQ. This will decrease the interrupt latency.
161 */ 161 */
162 irq_enter(); 162 irq_enter();
163 163
164 /* Handle all IRQs */ 164 /* Handle all IRQs */
165 for (bit = 2; bit < 32; bit++) { 165 for (bit = 2; bit < 32; bit++) {
166 if (masked & (1 << bit)) { 166 if (masked & (1 << bit)) {
167 do_IRQ(bit, regs); 167 do_IRQ(bit, regs);
168 } 168 }
169 } 169 }
170 170
171 /* This irq_exit() will trigger the soft IRQs. */ 171 /* This irq_exit() will trigger the soft IRQs. */
172 irq_exit(); 172 irq_exit();
173 173
174 /* Unblock the IRQs again */ 174 /* Unblock the IRQs again */
175 *R_VECT_MASK_SET = (masked | ethmask); 175 *R_VECT_MASK_SET = (masked | ethmask);
176 } 176 }
177 177
178 /* init_IRQ() is called by start_kernel and is responsible for fixing IRQ masks and 178 /* init_IRQ() is called by start_kernel and is responsible for fixing IRQ masks and
179 setting the irq vector table. 179 setting the irq vector table.
180 */ 180 */
181 181
182 void __init 182 void __init
183 init_IRQ(void) 183 init_IRQ(void)
184 { 184 {
185 int i; 185 int i;
186 186
187 /* clear all interrupt masks */ 187 /* clear all interrupt masks */
188 188
189 #ifndef CONFIG_SVINTO_SIM 189 #ifndef CONFIG_SVINTO_SIM
190 *R_IRQ_MASK0_CLR = 0xffffffff; 190 *R_IRQ_MASK0_CLR = 0xffffffff;
191 *R_IRQ_MASK1_CLR = 0xffffffff; 191 *R_IRQ_MASK1_CLR = 0xffffffff;
192 *R_IRQ_MASK2_CLR = 0xffffffff; 192 *R_IRQ_MASK2_CLR = 0xffffffff;
193 #endif 193 #endif
194 194
195 *R_VECT_MASK_CLR = 0xffffffff; 195 *R_VECT_MASK_CLR = 0xffffffff;
196 196
197 for (i = 0; i < 256; i++) 197 for (i = 0; i < 256; i++)
198 etrax_irv->v[i] = weird_irq; 198 etrax_irv->v[i] = weird_irq;
199 199
200 /* Initialize IRQ handler descriptors. */ 200 /* Initialize IRQ handler descriptors. */
201 for(i = 2; i < NR_IRQS; i++) { 201 for(i = 2; i < NR_IRQS; i++) {
202 set_irq_desc_and_handler(i, &crisv10_irq_type, 202 irq_set_chip_and_handler(i, &crisv10_irq_type,
203 handle_simple_irq); 203 handle_simple_irq);
204 set_int_vector(i, interrupt[i]); 204 set_int_vector(i, interrupt[i]);
205 } 205 }
206 206
207 /* the entries in the break vector contain actual code to be 207 /* the entries in the break vector contain actual code to be
208 executed by the associated break handler, rather than just a jump 208 executed by the associated break handler, rather than just a jump
209 address. therefore we need to setup a default breakpoint handler 209 address. therefore we need to setup a default breakpoint handler
210 for all breakpoints */ 210 for all breakpoints */
211 211
212 for (i = 0; i < 16; i++) 212 for (i = 0; i < 16; i++)
213 set_break_vector(i, do_sigtrap); 213 set_break_vector(i, do_sigtrap);
214 214
215 /* except IRQ 15 which is the multiple-IRQ handler on Etrax100 */ 215 /* except IRQ 15 which is the multiple-IRQ handler on Etrax100 */
216 216
217 set_int_vector(15, multiple_interrupt); 217 set_int_vector(15, multiple_interrupt);
218 218
219 /* 0 and 1 which are special breakpoint/NMI traps */ 219 /* 0 and 1 which are special breakpoint/NMI traps */
220 220
221 set_int_vector(0, hwbreakpoint); 221 set_int_vector(0, hwbreakpoint);
222 set_int_vector(1, IRQ1_interrupt); 222 set_int_vector(1, IRQ1_interrupt);
223 223
224 /* and irq 14 which is the mmu bus fault handler */ 224 /* and irq 14 which is the mmu bus fault handler */
225 225
226 set_int_vector(14, mmu_bus_fault); 226 set_int_vector(14, mmu_bus_fault);
227 227
228 /* setup the system-call trap, which is reached by BREAK 13 */ 228 /* setup the system-call trap, which is reached by BREAK 13 */
229 229
230 set_break_vector(13, system_call); 230 set_break_vector(13, system_call);
231 231
232 /* setup a breakpoint handler for debugging used for both user and 232 /* setup a breakpoint handler for debugging used for both user and
233 kernel mode debugging (which is why it is not inside an ifdef 233 kernel mode debugging (which is why it is not inside an ifdef
234 CONFIG_ETRAX_KGDB) */ 234 CONFIG_ETRAX_KGDB) */
235 set_break_vector(8, gdb_handle_breakpoint); 235 set_break_vector(8, gdb_handle_breakpoint);
236 236
237 #ifdef CONFIG_ETRAX_KGDB 237 #ifdef CONFIG_ETRAX_KGDB
238 /* setup kgdb if its enabled, and break into the debugger */ 238 /* setup kgdb if its enabled, and break into the debugger */
239 kgdb_init(); 239 kgdb_init();
240 breakpoint(); 240 breakpoint();
241 #endif 241 #endif
242 } 242 }
243 243
arch/cris/arch-v32/kernel/irq.c
1 /* 1 /*
2 * Copyright (C) 2003, Axis Communications AB. 2 * Copyright (C) 2003, Axis Communications AB.
3 */ 3 */
4 4
5 #include <asm/irq.h> 5 #include <asm/irq.h>
6 #include <linux/irq.h> 6 #include <linux/irq.h>
7 #include <linux/interrupt.h> 7 #include <linux/interrupt.h>
8 #include <linux/smp.h> 8 #include <linux/smp.h>
9 #include <linux/kernel.h> 9 #include <linux/kernel.h>
10 #include <linux/errno.h> 10 #include <linux/errno.h>
11 #include <linux/init.h> 11 #include <linux/init.h>
12 #include <linux/profile.h> 12 #include <linux/profile.h>
13 #include <linux/proc_fs.h> 13 #include <linux/proc_fs.h>
14 #include <linux/seq_file.h> 14 #include <linux/seq_file.h>
15 #include <linux/threads.h> 15 #include <linux/threads.h>
16 #include <linux/spinlock.h> 16 #include <linux/spinlock.h>
17 #include <linux/kernel_stat.h> 17 #include <linux/kernel_stat.h>
18 #include <hwregs/reg_map.h> 18 #include <hwregs/reg_map.h>
19 #include <hwregs/reg_rdwr.h> 19 #include <hwregs/reg_rdwr.h>
20 #include <hwregs/intr_vect.h> 20 #include <hwregs/intr_vect.h>
21 #include <hwregs/intr_vect_defs.h> 21 #include <hwregs/intr_vect_defs.h>
22 22
23 #define CPU_FIXED -1 23 #define CPU_FIXED -1
24 24
25 /* IRQ masks (refer to comment for crisv32_do_multiple) */ 25 /* IRQ masks (refer to comment for crisv32_do_multiple) */
26 #if TIMER0_INTR_VECT - FIRST_IRQ < 32 26 #if TIMER0_INTR_VECT - FIRST_IRQ < 32
27 #define TIMER_MASK (1 << (TIMER0_INTR_VECT - FIRST_IRQ)) 27 #define TIMER_MASK (1 << (TIMER0_INTR_VECT - FIRST_IRQ))
28 #undef TIMER_VECT1 28 #undef TIMER_VECT1
29 #else 29 #else
30 #define TIMER_MASK (1 << (TIMER0_INTR_VECT - FIRST_IRQ - 32)) 30 #define TIMER_MASK (1 << (TIMER0_INTR_VECT - FIRST_IRQ - 32))
31 #define TIMER_VECT1 31 #define TIMER_VECT1
32 #endif 32 #endif
33 #ifdef CONFIG_ETRAX_KGDB 33 #ifdef CONFIG_ETRAX_KGDB
34 #if defined(CONFIG_ETRAX_KGDB_PORT0) 34 #if defined(CONFIG_ETRAX_KGDB_PORT0)
35 #define IGNOREMASK (1 << (SER0_INTR_VECT - FIRST_IRQ)) 35 #define IGNOREMASK (1 << (SER0_INTR_VECT - FIRST_IRQ))
36 #elif defined(CONFIG_ETRAX_KGDB_PORT1) 36 #elif defined(CONFIG_ETRAX_KGDB_PORT1)
37 #define IGNOREMASK (1 << (SER1_INTR_VECT - FIRST_IRQ)) 37 #define IGNOREMASK (1 << (SER1_INTR_VECT - FIRST_IRQ))
38 #elif defined(CONFIG_ETRAX_KGB_PORT2) 38 #elif defined(CONFIG_ETRAX_KGB_PORT2)
39 #define IGNOREMASK (1 << (SER2_INTR_VECT - FIRST_IRQ)) 39 #define IGNOREMASK (1 << (SER2_INTR_VECT - FIRST_IRQ))
40 #elif defined(CONFIG_ETRAX_KGDB_PORT3) 40 #elif defined(CONFIG_ETRAX_KGDB_PORT3)
41 #define IGNOREMASK (1 << (SER3_INTR_VECT - FIRST_IRQ)) 41 #define IGNOREMASK (1 << (SER3_INTR_VECT - FIRST_IRQ))
42 #endif 42 #endif
43 #endif 43 #endif
44 44
45 DEFINE_SPINLOCK(irq_lock); 45 DEFINE_SPINLOCK(irq_lock);
46 46
47 struct cris_irq_allocation 47 struct cris_irq_allocation
48 { 48 {
49 int cpu; /* The CPU to which the IRQ is currently allocated. */ 49 int cpu; /* The CPU to which the IRQ is currently allocated. */
50 cpumask_t mask; /* The CPUs to which the IRQ may be allocated. */ 50 cpumask_t mask; /* The CPUs to which the IRQ may be allocated. */
51 }; 51 };
52 52
53 struct cris_irq_allocation irq_allocations[NR_REAL_IRQS] = 53 struct cris_irq_allocation irq_allocations[NR_REAL_IRQS] =
54 { [0 ... NR_REAL_IRQS - 1] = {0, CPU_MASK_ALL} }; 54 { [0 ... NR_REAL_IRQS - 1] = {0, CPU_MASK_ALL} };
55 55
56 static unsigned long irq_regs[NR_CPUS] = 56 static unsigned long irq_regs[NR_CPUS] =
57 { 57 {
58 regi_irq, 58 regi_irq,
59 #ifdef CONFIG_SMP 59 #ifdef CONFIG_SMP
60 regi_irq2, 60 regi_irq2,
61 #endif 61 #endif
62 }; 62 };
63 63
64 #if NR_REAL_IRQS > 32 64 #if NR_REAL_IRQS > 32
65 #define NBR_REGS 2 65 #define NBR_REGS 2
66 #else 66 #else
67 #define NBR_REGS 1 67 #define NBR_REGS 1
68 #endif 68 #endif
69 69
70 unsigned long cpu_irq_counters[NR_CPUS]; 70 unsigned long cpu_irq_counters[NR_CPUS];
71 unsigned long irq_counters[NR_REAL_IRQS]; 71 unsigned long irq_counters[NR_REAL_IRQS];
72 72
73 /* From irq.c. */ 73 /* From irq.c. */
74 extern void weird_irq(void); 74 extern void weird_irq(void);
75 75
76 /* From entry.S. */ 76 /* From entry.S. */
77 extern void system_call(void); 77 extern void system_call(void);
78 extern void nmi_interrupt(void); 78 extern void nmi_interrupt(void);
79 extern void multiple_interrupt(void); 79 extern void multiple_interrupt(void);
80 extern void gdb_handle_exception(void); 80 extern void gdb_handle_exception(void);
81 extern void i_mmu_refill(void); 81 extern void i_mmu_refill(void);
82 extern void i_mmu_invalid(void); 82 extern void i_mmu_invalid(void);
83 extern void i_mmu_access(void); 83 extern void i_mmu_access(void);
84 extern void i_mmu_execute(void); 84 extern void i_mmu_execute(void);
85 extern void d_mmu_refill(void); 85 extern void d_mmu_refill(void);
86 extern void d_mmu_invalid(void); 86 extern void d_mmu_invalid(void);
87 extern void d_mmu_access(void); 87 extern void d_mmu_access(void);
88 extern void d_mmu_write(void); 88 extern void d_mmu_write(void);
89 89
90 /* From kgdb.c. */ 90 /* From kgdb.c. */
91 extern void kgdb_init(void); 91 extern void kgdb_init(void);
92 extern void breakpoint(void); 92 extern void breakpoint(void);
93 93
94 /* From traps.c. */ 94 /* From traps.c. */
95 extern void breakh_BUG(void); 95 extern void breakh_BUG(void);
96 96
97 /* 97 /*
98 * Build the IRQ handler stubs using macros from irq.h. 98 * Build the IRQ handler stubs using macros from irq.h.
99 */ 99 */
100 #ifdef CONFIG_CRIS_MACH_ARTPEC3 100 #ifdef CONFIG_CRIS_MACH_ARTPEC3
101 BUILD_TIMER_IRQ(0x31, 0) 101 BUILD_TIMER_IRQ(0x31, 0)
102 #else 102 #else
103 BUILD_IRQ(0x31) 103 BUILD_IRQ(0x31)
104 #endif 104 #endif
105 BUILD_IRQ(0x32) 105 BUILD_IRQ(0x32)
106 BUILD_IRQ(0x33) 106 BUILD_IRQ(0x33)
107 BUILD_IRQ(0x34) 107 BUILD_IRQ(0x34)
108 BUILD_IRQ(0x35) 108 BUILD_IRQ(0x35)
109 BUILD_IRQ(0x36) 109 BUILD_IRQ(0x36)
110 BUILD_IRQ(0x37) 110 BUILD_IRQ(0x37)
111 BUILD_IRQ(0x38) 111 BUILD_IRQ(0x38)
112 BUILD_IRQ(0x39) 112 BUILD_IRQ(0x39)
113 BUILD_IRQ(0x3a) 113 BUILD_IRQ(0x3a)
114 BUILD_IRQ(0x3b) 114 BUILD_IRQ(0x3b)
115 BUILD_IRQ(0x3c) 115 BUILD_IRQ(0x3c)
116 BUILD_IRQ(0x3d) 116 BUILD_IRQ(0x3d)
117 BUILD_IRQ(0x3e) 117 BUILD_IRQ(0x3e)
118 BUILD_IRQ(0x3f) 118 BUILD_IRQ(0x3f)
119 BUILD_IRQ(0x40) 119 BUILD_IRQ(0x40)
120 BUILD_IRQ(0x41) 120 BUILD_IRQ(0x41)
121 BUILD_IRQ(0x42) 121 BUILD_IRQ(0x42)
122 BUILD_IRQ(0x43) 122 BUILD_IRQ(0x43)
123 BUILD_IRQ(0x44) 123 BUILD_IRQ(0x44)
124 BUILD_IRQ(0x45) 124 BUILD_IRQ(0x45)
125 BUILD_IRQ(0x46) 125 BUILD_IRQ(0x46)
126 BUILD_IRQ(0x47) 126 BUILD_IRQ(0x47)
127 BUILD_IRQ(0x48) 127 BUILD_IRQ(0x48)
128 BUILD_IRQ(0x49) 128 BUILD_IRQ(0x49)
129 BUILD_IRQ(0x4a) 129 BUILD_IRQ(0x4a)
130 #ifdef CONFIG_ETRAXFS 130 #ifdef CONFIG_ETRAXFS
131 BUILD_TIMER_IRQ(0x4b, 0) 131 BUILD_TIMER_IRQ(0x4b, 0)
132 #else 132 #else
133 BUILD_IRQ(0x4b) 133 BUILD_IRQ(0x4b)
134 #endif 134 #endif
135 BUILD_IRQ(0x4c) 135 BUILD_IRQ(0x4c)
136 BUILD_IRQ(0x4d) 136 BUILD_IRQ(0x4d)
137 BUILD_IRQ(0x4e) 137 BUILD_IRQ(0x4e)
138 BUILD_IRQ(0x4f) 138 BUILD_IRQ(0x4f)
139 BUILD_IRQ(0x50) 139 BUILD_IRQ(0x50)
140 #if MACH_IRQS > 32 140 #if MACH_IRQS > 32
141 BUILD_IRQ(0x51) 141 BUILD_IRQ(0x51)
142 BUILD_IRQ(0x52) 142 BUILD_IRQ(0x52)
143 BUILD_IRQ(0x53) 143 BUILD_IRQ(0x53)
144 BUILD_IRQ(0x54) 144 BUILD_IRQ(0x54)
145 BUILD_IRQ(0x55) 145 BUILD_IRQ(0x55)
146 BUILD_IRQ(0x56) 146 BUILD_IRQ(0x56)
147 BUILD_IRQ(0x57) 147 BUILD_IRQ(0x57)
148 BUILD_IRQ(0x58) 148 BUILD_IRQ(0x58)
149 BUILD_IRQ(0x59) 149 BUILD_IRQ(0x59)
150 BUILD_IRQ(0x5a) 150 BUILD_IRQ(0x5a)
151 BUILD_IRQ(0x5b) 151 BUILD_IRQ(0x5b)
152 BUILD_IRQ(0x5c) 152 BUILD_IRQ(0x5c)
153 BUILD_IRQ(0x5d) 153 BUILD_IRQ(0x5d)
154 BUILD_IRQ(0x5e) 154 BUILD_IRQ(0x5e)
155 BUILD_IRQ(0x5f) 155 BUILD_IRQ(0x5f)
156 BUILD_IRQ(0x60) 156 BUILD_IRQ(0x60)
157 BUILD_IRQ(0x61) 157 BUILD_IRQ(0x61)
158 BUILD_IRQ(0x62) 158 BUILD_IRQ(0x62)
159 BUILD_IRQ(0x63) 159 BUILD_IRQ(0x63)
160 BUILD_IRQ(0x64) 160 BUILD_IRQ(0x64)
161 BUILD_IRQ(0x65) 161 BUILD_IRQ(0x65)
162 BUILD_IRQ(0x66) 162 BUILD_IRQ(0x66)
163 BUILD_IRQ(0x67) 163 BUILD_IRQ(0x67)
164 BUILD_IRQ(0x68) 164 BUILD_IRQ(0x68)
165 BUILD_IRQ(0x69) 165 BUILD_IRQ(0x69)
166 BUILD_IRQ(0x6a) 166 BUILD_IRQ(0x6a)
167 BUILD_IRQ(0x6b) 167 BUILD_IRQ(0x6b)
168 BUILD_IRQ(0x6c) 168 BUILD_IRQ(0x6c)
169 BUILD_IRQ(0x6d) 169 BUILD_IRQ(0x6d)
170 BUILD_IRQ(0x6e) 170 BUILD_IRQ(0x6e)
171 BUILD_IRQ(0x6f) 171 BUILD_IRQ(0x6f)
172 BUILD_IRQ(0x70) 172 BUILD_IRQ(0x70)
173 #endif 173 #endif
174 174
175 /* Pointers to the low-level handlers. */ 175 /* Pointers to the low-level handlers. */
176 static void (*interrupt[MACH_IRQS])(void) = { 176 static void (*interrupt[MACH_IRQS])(void) = {
177 IRQ0x31_interrupt, IRQ0x32_interrupt, IRQ0x33_interrupt, 177 IRQ0x31_interrupt, IRQ0x32_interrupt, IRQ0x33_interrupt,
178 IRQ0x34_interrupt, IRQ0x35_interrupt, IRQ0x36_interrupt, 178 IRQ0x34_interrupt, IRQ0x35_interrupt, IRQ0x36_interrupt,
179 IRQ0x37_interrupt, IRQ0x38_interrupt, IRQ0x39_interrupt, 179 IRQ0x37_interrupt, IRQ0x38_interrupt, IRQ0x39_interrupt,
180 IRQ0x3a_interrupt, IRQ0x3b_interrupt, IRQ0x3c_interrupt, 180 IRQ0x3a_interrupt, IRQ0x3b_interrupt, IRQ0x3c_interrupt,
181 IRQ0x3d_interrupt, IRQ0x3e_interrupt, IRQ0x3f_interrupt, 181 IRQ0x3d_interrupt, IRQ0x3e_interrupt, IRQ0x3f_interrupt,
182 IRQ0x40_interrupt, IRQ0x41_interrupt, IRQ0x42_interrupt, 182 IRQ0x40_interrupt, IRQ0x41_interrupt, IRQ0x42_interrupt,
183 IRQ0x43_interrupt, IRQ0x44_interrupt, IRQ0x45_interrupt, 183 IRQ0x43_interrupt, IRQ0x44_interrupt, IRQ0x45_interrupt,
184 IRQ0x46_interrupt, IRQ0x47_interrupt, IRQ0x48_interrupt, 184 IRQ0x46_interrupt, IRQ0x47_interrupt, IRQ0x48_interrupt,
185 IRQ0x49_interrupt, IRQ0x4a_interrupt, IRQ0x4b_interrupt, 185 IRQ0x49_interrupt, IRQ0x4a_interrupt, IRQ0x4b_interrupt,
186 IRQ0x4c_interrupt, IRQ0x4d_interrupt, IRQ0x4e_interrupt, 186 IRQ0x4c_interrupt, IRQ0x4d_interrupt, IRQ0x4e_interrupt,
187 IRQ0x4f_interrupt, IRQ0x50_interrupt, 187 IRQ0x4f_interrupt, IRQ0x50_interrupt,
188 #if MACH_IRQS > 32 188 #if MACH_IRQS > 32
189 IRQ0x51_interrupt, IRQ0x52_interrupt, IRQ0x53_interrupt, 189 IRQ0x51_interrupt, IRQ0x52_interrupt, IRQ0x53_interrupt,
190 IRQ0x54_interrupt, IRQ0x55_interrupt, IRQ0x56_interrupt, 190 IRQ0x54_interrupt, IRQ0x55_interrupt, IRQ0x56_interrupt,
191 IRQ0x57_interrupt, IRQ0x58_interrupt, IRQ0x59_interrupt, 191 IRQ0x57_interrupt, IRQ0x58_interrupt, IRQ0x59_interrupt,
192 IRQ0x5a_interrupt, IRQ0x5b_interrupt, IRQ0x5c_interrupt, 192 IRQ0x5a_interrupt, IRQ0x5b_interrupt, IRQ0x5c_interrupt,
193 IRQ0x5d_interrupt, IRQ0x5e_interrupt, IRQ0x5f_interrupt, 193 IRQ0x5d_interrupt, IRQ0x5e_interrupt, IRQ0x5f_interrupt,
194 IRQ0x60_interrupt, IRQ0x61_interrupt, IRQ0x62_interrupt, 194 IRQ0x60_interrupt, IRQ0x61_interrupt, IRQ0x62_interrupt,
195 IRQ0x63_interrupt, IRQ0x64_interrupt, IRQ0x65_interrupt, 195 IRQ0x63_interrupt, IRQ0x64_interrupt, IRQ0x65_interrupt,
196 IRQ0x66_interrupt, IRQ0x67_interrupt, IRQ0x68_interrupt, 196 IRQ0x66_interrupt, IRQ0x67_interrupt, IRQ0x68_interrupt,
197 IRQ0x69_interrupt, IRQ0x6a_interrupt, IRQ0x6b_interrupt, 197 IRQ0x69_interrupt, IRQ0x6a_interrupt, IRQ0x6b_interrupt,
198 IRQ0x6c_interrupt, IRQ0x6d_interrupt, IRQ0x6e_interrupt, 198 IRQ0x6c_interrupt, IRQ0x6d_interrupt, IRQ0x6e_interrupt,
199 IRQ0x6f_interrupt, IRQ0x70_interrupt, 199 IRQ0x6f_interrupt, IRQ0x70_interrupt,
200 #endif 200 #endif
201 }; 201 };
202 202
203 void 203 void
204 block_irq(int irq, int cpu) 204 block_irq(int irq, int cpu)
205 { 205 {
206 int intr_mask; 206 int intr_mask;
207 unsigned long flags; 207 unsigned long flags;
208 208
209 spin_lock_irqsave(&irq_lock, flags); 209 spin_lock_irqsave(&irq_lock, flags);
210 /* Remember, 1 let thru, 0 block. */ 210 /* Remember, 1 let thru, 0 block. */
211 if (irq - FIRST_IRQ < 32) { 211 if (irq - FIRST_IRQ < 32) {
212 intr_mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu], 212 intr_mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu],
213 rw_mask, 0); 213 rw_mask, 0);
214 intr_mask &= ~(1 << (irq - FIRST_IRQ)); 214 intr_mask &= ~(1 << (irq - FIRST_IRQ));
215 REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, 215 REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask,
216 0, intr_mask); 216 0, intr_mask);
217 } else { 217 } else {
218 intr_mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu], 218 intr_mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu],
219 rw_mask, 1); 219 rw_mask, 1);
220 intr_mask &= ~(1 << (irq - FIRST_IRQ - 32)); 220 intr_mask &= ~(1 << (irq - FIRST_IRQ - 32));
221 REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, 221 REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask,
222 1, intr_mask); 222 1, intr_mask);
223 } 223 }
224 spin_unlock_irqrestore(&irq_lock, flags); 224 spin_unlock_irqrestore(&irq_lock, flags);
225 } 225 }
226 226
227 void 227 void
228 unblock_irq(int irq, int cpu) 228 unblock_irq(int irq, int cpu)
229 { 229 {
230 int intr_mask; 230 int intr_mask;
231 unsigned long flags; 231 unsigned long flags;
232 232
233 spin_lock_irqsave(&irq_lock, flags); 233 spin_lock_irqsave(&irq_lock, flags);
234 /* Remember, 1 let thru, 0 block. */ 234 /* Remember, 1 let thru, 0 block. */
235 if (irq - FIRST_IRQ < 32) { 235 if (irq - FIRST_IRQ < 32) {
236 intr_mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu], 236 intr_mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu],
237 rw_mask, 0); 237 rw_mask, 0);
238 intr_mask |= (1 << (irq - FIRST_IRQ)); 238 intr_mask |= (1 << (irq - FIRST_IRQ));
239 REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, 239 REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask,
240 0, intr_mask); 240 0, intr_mask);
241 } else { 241 } else {
242 intr_mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu], 242 intr_mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu],
243 rw_mask, 1); 243 rw_mask, 1);
244 intr_mask |= (1 << (irq - FIRST_IRQ - 32)); 244 intr_mask |= (1 << (irq - FIRST_IRQ - 32));
245 REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, 245 REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask,
246 1, intr_mask); 246 1, intr_mask);
247 } 247 }
248 spin_unlock_irqrestore(&irq_lock, flags); 248 spin_unlock_irqrestore(&irq_lock, flags);
249 } 249 }
250 250
251 /* Find out which CPU the irq should be allocated to. */ 251 /* Find out which CPU the irq should be allocated to. */
252 static int irq_cpu(int irq) 252 static int irq_cpu(int irq)
253 { 253 {
254 int cpu; 254 int cpu;
255 unsigned long flags; 255 unsigned long flags;
256 256
257 spin_lock_irqsave(&irq_lock, flags); 257 spin_lock_irqsave(&irq_lock, flags);
258 cpu = irq_allocations[irq - FIRST_IRQ].cpu; 258 cpu = irq_allocations[irq - FIRST_IRQ].cpu;
259 259
260 /* Fixed interrupts stay on the local CPU. */ 260 /* Fixed interrupts stay on the local CPU. */
261 if (cpu == CPU_FIXED) 261 if (cpu == CPU_FIXED)
262 { 262 {
263 spin_unlock_irqrestore(&irq_lock, flags); 263 spin_unlock_irqrestore(&irq_lock, flags);
264 return smp_processor_id(); 264 return smp_processor_id();
265 } 265 }
266 266
267 267
268 /* Let the interrupt stay if possible */ 268 /* Let the interrupt stay if possible */
269 if (cpu_isset(cpu, irq_allocations[irq - FIRST_IRQ].mask)) 269 if (cpu_isset(cpu, irq_allocations[irq - FIRST_IRQ].mask))
270 goto out; 270 goto out;
271 271
272 /* IRQ must be moved to another CPU. */ 272 /* IRQ must be moved to another CPU. */
273 cpu = first_cpu(irq_allocations[irq - FIRST_IRQ].mask); 273 cpu = first_cpu(irq_allocations[irq - FIRST_IRQ].mask);
274 irq_allocations[irq - FIRST_IRQ].cpu = cpu; 274 irq_allocations[irq - FIRST_IRQ].cpu = cpu;
275 out: 275 out:
276 spin_unlock_irqrestore(&irq_lock, flags); 276 spin_unlock_irqrestore(&irq_lock, flags);
277 return cpu; 277 return cpu;
278 } 278 }
279 279
280 void crisv32_mask_irq(int irq) 280 void crisv32_mask_irq(int irq)
281 { 281 {
282 int cpu; 282 int cpu;
283 283
284 for (cpu = 0; cpu < NR_CPUS; cpu++) 284 for (cpu = 0; cpu < NR_CPUS; cpu++)
285 block_irq(irq, cpu); 285 block_irq(irq, cpu);
286 } 286 }
287 287
288 void crisv32_unmask_irq(int irq) 288 void crisv32_unmask_irq(int irq)
289 { 289 {
290 unblock_irq(irq, irq_cpu(irq)); 290 unblock_irq(irq, irq_cpu(irq));
291 } 291 }
292 292
293 293
294 static void enable_crisv32_irq(struct irq_data *data) 294 static void enable_crisv32_irq(struct irq_data *data)
295 { 295 {
296 crisv32_unmask_irq(data->irq); 296 crisv32_unmask_irq(data->irq);
297 } 297 }
298 298
299 static void disable_crisv32_irq(struct irq_data *data) 299 static void disable_crisv32_irq(struct irq_data *data)
300 { 300 {
301 crisv32_mask_irq(data->irq); 301 crisv32_mask_irq(data->irq);
302 } 302 }
303 303
304 static int set_affinity_crisv32_irq(struct irq_data *data, 304 static int set_affinity_crisv32_irq(struct irq_data *data,
305 const struct cpumask *dest, bool force) 305 const struct cpumask *dest, bool force)
306 { 306 {
307 unsigned long flags; 307 unsigned long flags;
308 308
309 spin_lock_irqsave(&irq_lock, flags); 309 spin_lock_irqsave(&irq_lock, flags);
310 irq_allocations[data->irq - FIRST_IRQ].mask = *dest; 310 irq_allocations[data->irq - FIRST_IRQ].mask = *dest;
311 spin_unlock_irqrestore(&irq_lock, flags); 311 spin_unlock_irqrestore(&irq_lock, flags);
312 return 0; 312 return 0;
313 } 313 }
314 314
315 static struct irq_chip crisv32_irq_type = { 315 static struct irq_chip crisv32_irq_type = {
316 .name = "CRISv32", 316 .name = "CRISv32",
317 .irq_shutdown = disable_crisv32_irq, 317 .irq_shutdown = disable_crisv32_irq,
318 .irq_enable = enable_crisv32_irq, 318 .irq_enable = enable_crisv32_irq,
319 .irq_disable = disable_crisv32_irq, 319 .irq_disable = disable_crisv32_irq,
320 .irq_set_affinity = set_affinity_crisv32_irq, 320 .irq_set_affinity = set_affinity_crisv32_irq,
321 }; 321 };
322 322
323 void 323 void
324 set_exception_vector(int n, irqvectptr addr) 324 set_exception_vector(int n, irqvectptr addr)
325 { 325 {
326 etrax_irv->v[n] = (irqvectptr) addr; 326 etrax_irv->v[n] = (irqvectptr) addr;
327 } 327 }
328 328
329 extern void do_IRQ(int irq, struct pt_regs * regs); 329 extern void do_IRQ(int irq, struct pt_regs * regs);
330 330
331 void 331 void
332 crisv32_do_IRQ(int irq, int block, struct pt_regs* regs) 332 crisv32_do_IRQ(int irq, int block, struct pt_regs* regs)
333 { 333 {
334 /* Interrupts that may not be moved to another CPU and 334 /* Interrupts that may not be moved to another CPU and
335 * are IRQF_DISABLED may skip blocking. This is currently 335 * are IRQF_DISABLED may skip blocking. This is currently
336 * only valid for the timer IRQ and the IPI and is used 336 * only valid for the timer IRQ and the IPI and is used
337 * for the timer interrupt to avoid watchdog starvation. 337 * for the timer interrupt to avoid watchdog starvation.
338 */ 338 */
339 if (!block) { 339 if (!block) {
340 do_IRQ(irq, regs); 340 do_IRQ(irq, regs);
341 return; 341 return;
342 } 342 }
343 343
344 block_irq(irq, smp_processor_id()); 344 block_irq(irq, smp_processor_id());
345 do_IRQ(irq, regs); 345 do_IRQ(irq, regs);
346 346
347 unblock_irq(irq, irq_cpu(irq)); 347 unblock_irq(irq, irq_cpu(irq));
348 } 348 }
349 349
350 /* If multiple interrupts occur simultaneously we get a multiple 350 /* If multiple interrupts occur simultaneously we get a multiple
351 * interrupt from the CPU and software has to sort out which 351 * interrupt from the CPU and software has to sort out which
352 * interrupts that happened. There are two special cases here: 352 * interrupts that happened. There are two special cases here:
353 * 353 *
354 * 1. Timer interrupts may never be blocked because of the 354 * 1. Timer interrupts may never be blocked because of the
355 * watchdog (refer to comment in include/asr/arch/irq.h) 355 * watchdog (refer to comment in include/asr/arch/irq.h)
356 * 2. GDB serial port IRQs are unhandled here and will be handled 356 * 2. GDB serial port IRQs are unhandled here and will be handled
357 * as a single IRQ when it strikes again because the GDB 357 * as a single IRQ when it strikes again because the GDB
358 * stubb wants to save the registers in its own fashion. 358 * stubb wants to save the registers in its own fashion.
359 */ 359 */
360 void 360 void
361 crisv32_do_multiple(struct pt_regs* regs) 361 crisv32_do_multiple(struct pt_regs* regs)
362 { 362 {
363 int cpu; 363 int cpu;
364 int mask; 364 int mask;
365 int masked[NBR_REGS]; 365 int masked[NBR_REGS];
366 int bit; 366 int bit;
367 int i; 367 int i;
368 368
369 cpu = smp_processor_id(); 369 cpu = smp_processor_id();
370 370
371 /* An extra irq_enter here to prevent softIRQs to run after 371 /* An extra irq_enter here to prevent softIRQs to run after
372 * each do_IRQ. This will decrease the interrupt latency. 372 * each do_IRQ. This will decrease the interrupt latency.
373 */ 373 */
374 irq_enter(); 374 irq_enter();
375 375
376 for (i = 0; i < NBR_REGS; i++) { 376 for (i = 0; i < NBR_REGS; i++) {
377 /* Get which IRQs that happend. */ 377 /* Get which IRQs that happend. */
378 masked[i] = REG_RD_INT_VECT(intr_vect, irq_regs[cpu], 378 masked[i] = REG_RD_INT_VECT(intr_vect, irq_regs[cpu],
379 r_masked_vect, i); 379 r_masked_vect, i);
380 380
381 /* Calculate new IRQ mask with these IRQs disabled. */ 381 /* Calculate new IRQ mask with these IRQs disabled. */
382 mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, i); 382 mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, i);
383 mask &= ~masked[i]; 383 mask &= ~masked[i];
384 384
385 /* Timer IRQ is never masked */ 385 /* Timer IRQ is never masked */
386 #ifdef TIMER_VECT1 386 #ifdef TIMER_VECT1
387 if ((i == 1) && (masked[0] & TIMER_MASK)) 387 if ((i == 1) && (masked[0] & TIMER_MASK))
388 mask |= TIMER_MASK; 388 mask |= TIMER_MASK;
389 #else 389 #else
390 if ((i == 0) && (masked[0] & TIMER_MASK)) 390 if ((i == 0) && (masked[0] & TIMER_MASK))
391 mask |= TIMER_MASK; 391 mask |= TIMER_MASK;
392 #endif 392 #endif
393 /* Block all the IRQs */ 393 /* Block all the IRQs */
394 REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, i, mask); 394 REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, i, mask);
395 395
396 /* Check for timer IRQ and handle it special. */ 396 /* Check for timer IRQ and handle it special. */
397 #ifdef TIMER_VECT1 397 #ifdef TIMER_VECT1
398 if ((i == 1) && (masked[i] & TIMER_MASK)) { 398 if ((i == 1) && (masked[i] & TIMER_MASK)) {
399 masked[i] &= ~TIMER_MASK; 399 masked[i] &= ~TIMER_MASK;
400 do_IRQ(TIMER0_INTR_VECT, regs); 400 do_IRQ(TIMER0_INTR_VECT, regs);
401 } 401 }
402 #else 402 #else
403 if ((i == 0) && (masked[i] & TIMER_MASK)) { 403 if ((i == 0) && (masked[i] & TIMER_MASK)) {
404 masked[i] &= ~TIMER_MASK; 404 masked[i] &= ~TIMER_MASK;
405 do_IRQ(TIMER0_INTR_VECT, regs); 405 do_IRQ(TIMER0_INTR_VECT, regs);
406 } 406 }
407 #endif 407 #endif
408 } 408 }
409 409
410 #ifdef IGNORE_MASK 410 #ifdef IGNORE_MASK
411 /* Remove IRQs that can't be handled as multiple. */ 411 /* Remove IRQs that can't be handled as multiple. */
412 masked[0] &= ~IGNORE_MASK; 412 masked[0] &= ~IGNORE_MASK;
413 #endif 413 #endif
414 414
415 /* Handle the rest of the IRQs. */ 415 /* Handle the rest of the IRQs. */
416 for (i = 0; i < NBR_REGS; i++) { 416 for (i = 0; i < NBR_REGS; i++) {
417 for (bit = 0; bit < 32; bit++) { 417 for (bit = 0; bit < 32; bit++) {
418 if (masked[i] & (1 << bit)) 418 if (masked[i] & (1 << bit))
419 do_IRQ(bit + FIRST_IRQ + i*32, regs); 419 do_IRQ(bit + FIRST_IRQ + i*32, regs);
420 } 420 }
421 } 421 }
422 422
423 /* Unblock all the IRQs. */ 423 /* Unblock all the IRQs. */
424 for (i = 0; i < NBR_REGS; i++) { 424 for (i = 0; i < NBR_REGS; i++) {
425 mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, i); 425 mask = REG_RD_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, i);
426 mask |= masked[i]; 426 mask |= masked[i];
427 REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, i, mask); 427 REG_WR_INT_VECT(intr_vect, irq_regs[cpu], rw_mask, i, mask);
428 } 428 }
429 429
430 /* This irq_exit() will trigger the soft IRQs. */ 430 /* This irq_exit() will trigger the soft IRQs. */
431 irq_exit(); 431 irq_exit();
432 } 432 }
433 433
434 /* 434 /*
435 * This is called by start_kernel. It fixes the IRQ masks and setup the 435 * This is called by start_kernel. It fixes the IRQ masks and setup the
436 * interrupt vector table to point to bad_interrupt pointers. 436 * interrupt vector table to point to bad_interrupt pointers.
437 */ 437 */
438 void __init 438 void __init
439 init_IRQ(void) 439 init_IRQ(void)
440 { 440 {
441 int i; 441 int i;
442 int j; 442 int j;
443 reg_intr_vect_rw_mask vect_mask = {0}; 443 reg_intr_vect_rw_mask vect_mask = {0};
444 444
445 /* Clear all interrupts masks. */ 445 /* Clear all interrupts masks. */
446 for (i = 0; i < NBR_REGS; i++) 446 for (i = 0; i < NBR_REGS; i++)
447 REG_WR_VECT(intr_vect, regi_irq, rw_mask, i, vect_mask); 447 REG_WR_VECT(intr_vect, regi_irq, rw_mask, i, vect_mask);
448 448
449 for (i = 0; i < 256; i++) 449 for (i = 0; i < 256; i++)
450 etrax_irv->v[i] = weird_irq; 450 etrax_irv->v[i] = weird_irq;
451 451
452 /* Point all IRQ's to bad handlers. */ 452 /* Point all IRQ's to bad handlers. */
453 for (i = FIRST_IRQ, j = 0; j < NR_IRQS; i++, j++) { 453 for (i = FIRST_IRQ, j = 0; j < NR_IRQS; i++, j++) {
454 set_irq_chip_and_handler(j, &crisv32_irq_type, 454 irq_set_chip_and_handler(j, &crisv32_irq_type,
455 handle_simple_irq); 455 handle_simple_irq);
456 set_exception_vector(i, interrupt[j]); 456 set_exception_vector(i, interrupt[j]);
457 } 457 }
458 458
459 /* Mark Timer and IPI IRQs as CPU local */ 459 /* Mark Timer and IPI IRQs as CPU local */
460 irq_allocations[TIMER0_INTR_VECT - FIRST_IRQ].cpu = CPU_FIXED; 460 irq_allocations[TIMER0_INTR_VECT - FIRST_IRQ].cpu = CPU_FIXED;
461 irq_desc[TIMER0_INTR_VECT].status |= IRQ_PER_CPU; 461 irq_set_status_flags(TIMER0_INTR_VECT, IRQ_PER_CPU);
462 irq_allocations[IPI_INTR_VECT - FIRST_IRQ].cpu = CPU_FIXED; 462 irq_allocations[IPI_INTR_VECT - FIRST_IRQ].cpu = CPU_FIXED;
463 irq_desc[IPI_INTR_VECT].status |= IRQ_PER_CPU; 463 irq_set_status_flags(IPI_INTR_VECT, IRQ_PER_CPU);
464 464
465 set_exception_vector(0x00, nmi_interrupt); 465 set_exception_vector(0x00, nmi_interrupt);
466 set_exception_vector(0x30, multiple_interrupt); 466 set_exception_vector(0x30, multiple_interrupt);
467 467
468 /* Set up handler for various MMU bus faults. */ 468 /* Set up handler for various MMU bus faults. */
469 set_exception_vector(0x04, i_mmu_refill); 469 set_exception_vector(0x04, i_mmu_refill);
470 set_exception_vector(0x05, i_mmu_invalid); 470 set_exception_vector(0x05, i_mmu_invalid);
471 set_exception_vector(0x06, i_mmu_access); 471 set_exception_vector(0x06, i_mmu_access);
472 set_exception_vector(0x07, i_mmu_execute); 472 set_exception_vector(0x07, i_mmu_execute);
473 set_exception_vector(0x08, d_mmu_refill); 473 set_exception_vector(0x08, d_mmu_refill);
474 set_exception_vector(0x09, d_mmu_invalid); 474 set_exception_vector(0x09, d_mmu_invalid);
475 set_exception_vector(0x0a, d_mmu_access); 475 set_exception_vector(0x0a, d_mmu_access);
476 set_exception_vector(0x0b, d_mmu_write); 476 set_exception_vector(0x0b, d_mmu_write);
477 477
478 #ifdef CONFIG_BUG 478 #ifdef CONFIG_BUG
479 /* Break 14 handler, used to implement cheap BUG(). */ 479 /* Break 14 handler, used to implement cheap BUG(). */
480 set_exception_vector(0x1e, breakh_BUG); 480 set_exception_vector(0x1e, breakh_BUG);
481 #endif 481 #endif
482 482
483 /* The system-call trap is reached by "break 13". */ 483 /* The system-call trap is reached by "break 13". */
484 set_exception_vector(0x1d, system_call); 484 set_exception_vector(0x1d, system_call);
485 485
486 /* Exception handlers for debugging, both user-mode and kernel-mode. */ 486 /* Exception handlers for debugging, both user-mode and kernel-mode. */
487 487
488 /* Break 8. */ 488 /* Break 8. */
489 set_exception_vector(0x18, gdb_handle_exception); 489 set_exception_vector(0x18, gdb_handle_exception);
490 /* Hardware single step. */ 490 /* Hardware single step. */
491 set_exception_vector(0x3, gdb_handle_exception); 491 set_exception_vector(0x3, gdb_handle_exception);
492 /* Hardware breakpoint. */ 492 /* Hardware breakpoint. */
493 set_exception_vector(0xc, gdb_handle_exception); 493 set_exception_vector(0xc, gdb_handle_exception);
494 494
495 #ifdef CONFIG_ETRAX_KGDB 495 #ifdef CONFIG_ETRAX_KGDB
496 kgdb_init(); 496 kgdb_init();
497 /* Everything is set up; now trap the kernel. */ 497 /* Everything is set up; now trap the kernel. */
498 breakpoint(); 498 breakpoint();
499 #endif 499 #endif
500 } 500 }
501 501
502 502
arch/cris/kernel/irq.c
1 /* 1 /*
2 * 2 *
3 * linux/arch/cris/kernel/irq.c 3 * linux/arch/cris/kernel/irq.c
4 * 4 *
5 * Copyright (c) 2000,2007 Axis Communications AB 5 * Copyright (c) 2000,2007 Axis Communications AB
6 * 6 *
7 * Authors: Bjorn Wesen (bjornw@axis.com) 7 * Authors: Bjorn Wesen (bjornw@axis.com)
8 * 8 *
9 * This file contains the code used by various IRQ handling routines: 9 * This file contains the code used by various IRQ handling routines:
10 * asking for different IRQs should be done through these routines 10 * asking for different IRQs should be done through these routines
11 * instead of just grabbing them. Thus setups with different IRQ numbers 11 * instead of just grabbing them. Thus setups with different IRQ numbers
12 * shouldn't result in any weird surprises, and installing new handlers 12 * shouldn't result in any weird surprises, and installing new handlers
13 * should be easier. 13 * should be easier.
14 * 14 *
15 */ 15 */
16 16
17 /* 17 /*
18 * IRQs are in fact implemented a bit like signal handlers for the kernel. 18 * IRQs are in fact implemented a bit like signal handlers for the kernel.
19 * Naturally it's not a 1:1 relation, but there are similarities. 19 * Naturally it's not a 1:1 relation, but there are similarities.
20 */ 20 */
21 21
22 #include <linux/module.h> 22 #include <linux/module.h>
23 #include <linux/ptrace.h> 23 #include <linux/ptrace.h>
24 #include <linux/irq.h> 24 #include <linux/irq.h>
25 25
26 #include <linux/kernel_stat.h> 26 #include <linux/kernel_stat.h>
27 #include <linux/signal.h> 27 #include <linux/signal.h>
28 #include <linux/sched.h> 28 #include <linux/sched.h>
29 #include <linux/ioport.h> 29 #include <linux/ioport.h>
30 #include <linux/interrupt.h> 30 #include <linux/interrupt.h>
31 #include <linux/timex.h> 31 #include <linux/timex.h>
32 #include <linux/random.h> 32 #include <linux/random.h>
33 #include <linux/init.h> 33 #include <linux/init.h>
34 #include <linux/seq_file.h> 34 #include <linux/seq_file.h>
35 #include <linux/errno.h> 35 #include <linux/errno.h>
36 #include <linux/spinlock.h> 36 #include <linux/spinlock.h>
37 37
38 #include <asm/io.h> 38 #include <asm/io.h>
39 39
40 int show_interrupts(struct seq_file *p, void *v)
41 {
42 int i = *(loff_t *) v, j;
43 struct irqaction * action;
44 unsigned long flags;
45
46 if (i == 0) {
47 seq_printf(p, " ");
48 for_each_online_cpu(j)
49 seq_printf(p, "CPU%d ",j);
50 seq_putc(p, '\n');
51 }
52
53 if (i < NR_IRQS) {
54 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
55 action = irq_desc[i].action;
56 if (!action)
57 goto skip;
58 seq_printf(p, "%3d: ",i);
59 #ifndef CONFIG_SMP
60 seq_printf(p, "%10u ", kstat_irqs(i));
61 #else
62 for_each_online_cpu(j)
63 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
64 #endif
65 seq_printf(p, " %14s", irq_desc[i].irq_data.chip->name);
66 seq_printf(p, " %s", action->name);
67
68 for (action=action->next; action; action = action->next)
69 seq_printf(p, ", %s", action->name);
70
71 seq_putc(p, '\n');
72 skip:
73 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
74 }
75 return 0;
76 }
77
78
79 /* called by the assembler IRQ entry functions defined in irq.h 40 /* called by the assembler IRQ entry functions defined in irq.h
80 * to dispatch the interrupts to registered handlers 41 * to dispatch the interrupts to registered handlers
81 * interrupts are disabled upon entry - depending on if the 42 * interrupts are disabled upon entry - depending on if the
82 * interrupt was registered with IRQF_DISABLED or not, interrupts 43 * interrupt was registered with IRQF_DISABLED or not, interrupts
83 * are re-enabled or not. 44 * are re-enabled or not.
84 */ 45 */
85 46
86 asmlinkage void do_IRQ(int irq, struct pt_regs * regs) 47 asmlinkage void do_IRQ(int irq, struct pt_regs * regs)
87 { 48 {
88 unsigned long sp; 49 unsigned long sp;
89 struct pt_regs *old_regs = set_irq_regs(regs); 50 struct pt_regs *old_regs = set_irq_regs(regs);
90 irq_enter(); 51 irq_enter();
91 sp = rdsp(); 52 sp = rdsp();
92 if (unlikely((sp & (PAGE_SIZE - 1)) < (PAGE_SIZE/8))) { 53 if (unlikely((sp & (PAGE_SIZE - 1)) < (PAGE_SIZE/8))) {
93 printk("do_IRQ: stack overflow: %lX\n", sp); 54 printk("do_IRQ: stack overflow: %lX\n", sp);
94 show_stack(NULL, (unsigned long *)sp); 55 show_stack(NULL, (unsigned long *)sp);
95 } 56 }
96 generic_handle_irq(irq); 57 generic_handle_irq(irq);
97 irq_exit(); 58 irq_exit();
98 set_irq_regs(old_regs); 59 set_irq_regs(old_regs);
99 } 60 }
100 61
101 void weird_irq(void) 62 void weird_irq(void)
102 { 63 {
103 local_irq_disable(); 64 local_irq_disable();
104 printk("weird irq\n"); 65 printk("weird irq\n");
105 while(1); 66 while(1);
106 } 67 }
107 68
108 69
1 /* 1 /*
2 * linux/kernel/irq/manage.c 2 * linux/kernel/irq/manage.c
3 * 3 *
4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar 4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006 Thomas Gleixner 5 * Copyright (C) 2005-2006 Thomas Gleixner
6 * 6 *
7 * This file contains driver APIs to the irq subsystem. 7 * This file contains driver APIs to the irq subsystem.
8 */ 8 */
9 9
10 #include <linux/irq.h> 10 #include <linux/irq.h>
11 #include <linux/kthread.h> 11 #include <linux/kthread.h>
12 #include <linux/module.h> 12 #include <linux/module.h>
13 #include <linux/random.h> 13 #include <linux/random.h>
14 #include <linux/interrupt.h> 14 #include <linux/interrupt.h>
15 #include <linux/slab.h> 15 #include <linux/slab.h>
16 #include <linux/sched.h> 16 #include <linux/sched.h>
17 17
18 #include "internals.h" 18 #include "internals.h"
19 19
20 #ifdef CONFIG_IRQ_FORCED_THREADING 20 #ifdef CONFIG_IRQ_FORCED_THREADING
21 __read_mostly bool force_irqthreads; 21 __read_mostly bool force_irqthreads;
22 22
23 static int __init setup_forced_irqthreads(char *arg) 23 static int __init setup_forced_irqthreads(char *arg)
24 { 24 {
25 force_irqthreads = true; 25 force_irqthreads = true;
26 return 0; 26 return 0;
27 } 27 }
28 early_param("threadirqs", setup_forced_irqthreads); 28 early_param("threadirqs", setup_forced_irqthreads);
29 #endif 29 #endif
30 30
31 /** 31 /**
32 * synchronize_irq - wait for pending IRQ handlers (on other CPUs) 32 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
33 * @irq: interrupt number to wait for 33 * @irq: interrupt number to wait for
34 * 34 *
35 * This function waits for any pending IRQ handlers for this interrupt 35 * This function waits for any pending IRQ handlers for this interrupt
36 * to complete before returning. If you use this function while 36 * to complete before returning. If you use this function while
37 * holding a resource the IRQ handler may need you will deadlock. 37 * holding a resource the IRQ handler may need you will deadlock.
38 * 38 *
39 * This function may be called - with care - from IRQ context. 39 * This function may be called - with care - from IRQ context.
40 */ 40 */
41 void synchronize_irq(unsigned int irq) 41 void synchronize_irq(unsigned int irq)
42 { 42 {
43 struct irq_desc *desc = irq_to_desc(irq); 43 struct irq_desc *desc = irq_to_desc(irq);
44 unsigned int state; 44 unsigned int state;
45 45
46 if (!desc) 46 if (!desc)
47 return; 47 return;
48 48
49 do { 49 do {
50 unsigned long flags; 50 unsigned long flags;
51 51
52 /* 52 /*
53 * Wait until we're out of the critical section. This might 53 * Wait until we're out of the critical section. This might
54 * give the wrong answer due to the lack of memory barriers. 54 * give the wrong answer due to the lack of memory barriers.
55 */ 55 */
56 while (desc->istate & IRQS_INPROGRESS) 56 while (desc->istate & IRQS_INPROGRESS)
57 cpu_relax(); 57 cpu_relax();
58 58
59 /* Ok, that indicated we're done: double-check carefully. */ 59 /* Ok, that indicated we're done: double-check carefully. */
60 raw_spin_lock_irqsave(&desc->lock, flags); 60 raw_spin_lock_irqsave(&desc->lock, flags);
61 state = desc->istate; 61 state = desc->istate;
62 raw_spin_unlock_irqrestore(&desc->lock, flags); 62 raw_spin_unlock_irqrestore(&desc->lock, flags);
63 63
64 /* Oops, that failed? */ 64 /* Oops, that failed? */
65 } while (state & IRQS_INPROGRESS); 65 } while (state & IRQS_INPROGRESS);
66 66
67 /* 67 /*
68 * We made sure that no hardirq handler is running. Now verify 68 * We made sure that no hardirq handler is running. Now verify
69 * that no threaded handlers are active. 69 * that no threaded handlers are active.
70 */ 70 */
71 wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active)); 71 wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active));
72 } 72 }
73 EXPORT_SYMBOL(synchronize_irq); 73 EXPORT_SYMBOL(synchronize_irq);
74 74
75 #ifdef CONFIG_SMP 75 #ifdef CONFIG_SMP
76 cpumask_var_t irq_default_affinity; 76 cpumask_var_t irq_default_affinity;
77 77
78 /** 78 /**
79 * irq_can_set_affinity - Check if the affinity of a given irq can be set 79 * irq_can_set_affinity - Check if the affinity of a given irq can be set
80 * @irq: Interrupt to check 80 * @irq: Interrupt to check
81 * 81 *
82 */ 82 */
83 int irq_can_set_affinity(unsigned int irq) 83 int irq_can_set_affinity(unsigned int irq)
84 { 84 {
85 struct irq_desc *desc = irq_to_desc(irq); 85 struct irq_desc *desc = irq_to_desc(irq);
86 86
87 if (!desc || !irqd_can_balance(&desc->irq_data) || 87 if (!desc || !irqd_can_balance(&desc->irq_data) ||
88 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity) 88 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
89 return 0; 89 return 0;
90 90
91 return 1; 91 return 1;
92 } 92 }
93 93
94 /** 94 /**
95 * irq_set_thread_affinity - Notify irq threads to adjust affinity 95 * irq_set_thread_affinity - Notify irq threads to adjust affinity
96 * @desc: irq descriptor which has affitnity changed 96 * @desc: irq descriptor which has affitnity changed
97 * 97 *
98 * We just set IRQTF_AFFINITY and delegate the affinity setting 98 * We just set IRQTF_AFFINITY and delegate the affinity setting
99 * to the interrupt thread itself. We can not call 99 * to the interrupt thread itself. We can not call
100 * set_cpus_allowed_ptr() here as we hold desc->lock and this 100 * set_cpus_allowed_ptr() here as we hold desc->lock and this
101 * code can be called from hard interrupt context. 101 * code can be called from hard interrupt context.
102 */ 102 */
103 void irq_set_thread_affinity(struct irq_desc *desc) 103 void irq_set_thread_affinity(struct irq_desc *desc)
104 { 104 {
105 struct irqaction *action = desc->action; 105 struct irqaction *action = desc->action;
106 106
107 while (action) { 107 while (action) {
108 if (action->thread) 108 if (action->thread)
109 set_bit(IRQTF_AFFINITY, &action->thread_flags); 109 set_bit(IRQTF_AFFINITY, &action->thread_flags);
110 action = action->next; 110 action = action->next;
111 } 111 }
112 } 112 }
113 113
114 #ifdef CONFIG_GENERIC_PENDING_IRQ 114 #ifdef CONFIG_GENERIC_PENDING_IRQ
115 static inline bool irq_can_move_pcntxt(struct irq_desc *desc) 115 static inline bool irq_can_move_pcntxt(struct irq_desc *desc)
116 { 116 {
117 return irq_settings_can_move_pcntxt(desc); 117 return irq_settings_can_move_pcntxt(desc);
118 } 118 }
119 static inline bool irq_move_pending(struct irq_desc *desc) 119 static inline bool irq_move_pending(struct irq_desc *desc)
120 { 120 {
121 return irqd_is_setaffinity_pending(&desc->irq_data); 121 return irqd_is_setaffinity_pending(&desc->irq_data);
122 } 122 }
123 static inline void 123 static inline void
124 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) 124 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
125 { 125 {
126 cpumask_copy(desc->pending_mask, mask); 126 cpumask_copy(desc->pending_mask, mask);
127 } 127 }
128 static inline void 128 static inline void
129 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) 129 irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
130 { 130 {
131 cpumask_copy(mask, desc->pending_mask); 131 cpumask_copy(mask, desc->pending_mask);
132 } 132 }
133 #else 133 #else
134 static inline bool irq_can_move_pcntxt(struct irq_desc *desc) { return true; } 134 static inline bool irq_can_move_pcntxt(struct irq_desc *desc) { return true; }
135 static inline bool irq_move_pending(struct irq_desc *desc) { return false; } 135 static inline bool irq_move_pending(struct irq_desc *desc) { return false; }
136 static inline void 136 static inline void
137 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { } 137 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
138 static inline void 138 static inline void
139 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } 139 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
140 #endif 140 #endif
141 141
142 /** 142 /**
143 * irq_set_affinity - Set the irq affinity of a given irq 143 * irq_set_affinity - Set the irq affinity of a given irq
144 * @irq: Interrupt to set affinity 144 * @irq: Interrupt to set affinity
145 * @cpumask: cpumask 145 * @cpumask: cpumask
146 * 146 *
147 */ 147 */
148 int irq_set_affinity(unsigned int irq, const struct cpumask *mask) 148 int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
149 { 149 {
150 struct irq_desc *desc = irq_to_desc(irq); 150 struct irq_desc *desc = irq_to_desc(irq);
151 struct irq_chip *chip = desc->irq_data.chip; 151 struct irq_chip *chip = desc->irq_data.chip;
152 unsigned long flags; 152 unsigned long flags;
153 int ret = 0; 153 int ret = 0;
154 154
155 if (!chip->irq_set_affinity) 155 if (!chip->irq_set_affinity)
156 return -EINVAL; 156 return -EINVAL;
157 157
158 raw_spin_lock_irqsave(&desc->lock, flags); 158 raw_spin_lock_irqsave(&desc->lock, flags);
159 159
160 if (irq_can_move_pcntxt(desc)) { 160 if (irq_can_move_pcntxt(desc)) {
161 ret = chip->irq_set_affinity(&desc->irq_data, mask, false); 161 ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
162 switch (ret) { 162 switch (ret) {
163 case IRQ_SET_MASK_OK: 163 case IRQ_SET_MASK_OK:
164 cpumask_copy(desc->irq_data.affinity, mask); 164 cpumask_copy(desc->irq_data.affinity, mask);
165 case IRQ_SET_MASK_OK_NOCOPY: 165 case IRQ_SET_MASK_OK_NOCOPY:
166 irq_set_thread_affinity(desc); 166 irq_set_thread_affinity(desc);
167 ret = 0; 167 ret = 0;
168 } 168 }
169 } else { 169 } else {
170 irqd_set_move_pending(&desc->irq_data); 170 irqd_set_move_pending(&desc->irq_data);
171 irq_copy_pending(desc, mask); 171 irq_copy_pending(desc, mask);
172 } 172 }
173 173
174 if (desc->affinity_notify) { 174 if (desc->affinity_notify) {
175 kref_get(&desc->affinity_notify->kref); 175 kref_get(&desc->affinity_notify->kref);
176 schedule_work(&desc->affinity_notify->work); 176 schedule_work(&desc->affinity_notify->work);
177 } 177 }
178 irq_compat_set_affinity(desc); 178 irq_compat_set_affinity(desc);
179 irqd_set(&desc->irq_data, IRQD_AFFINITY_SET); 179 irqd_set(&desc->irq_data, IRQD_AFFINITY_SET);
180 raw_spin_unlock_irqrestore(&desc->lock, flags); 180 raw_spin_unlock_irqrestore(&desc->lock, flags);
181 return ret; 181 return ret;
182 } 182 }
183 183
184 int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) 184 int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
185 { 185 {
186 unsigned long flags; 186 unsigned long flags;
187 struct irq_desc *desc = irq_get_desc_lock(irq, &flags); 187 struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
188 188
189 if (!desc) 189 if (!desc)
190 return -EINVAL; 190 return -EINVAL;
191 desc->affinity_hint = m; 191 desc->affinity_hint = m;
192 irq_put_desc_unlock(desc, flags); 192 irq_put_desc_unlock(desc, flags);
193 return 0; 193 return 0;
194 } 194 }
195 EXPORT_SYMBOL_GPL(irq_set_affinity_hint); 195 EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
196 196
197 static void irq_affinity_notify(struct work_struct *work) 197 static void irq_affinity_notify(struct work_struct *work)
198 { 198 {
199 struct irq_affinity_notify *notify = 199 struct irq_affinity_notify *notify =
200 container_of(work, struct irq_affinity_notify, work); 200 container_of(work, struct irq_affinity_notify, work);
201 struct irq_desc *desc = irq_to_desc(notify->irq); 201 struct irq_desc *desc = irq_to_desc(notify->irq);
202 cpumask_var_t cpumask; 202 cpumask_var_t cpumask;
203 unsigned long flags; 203 unsigned long flags;
204 204
205 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL)) 205 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
206 goto out; 206 goto out;
207 207
208 raw_spin_lock_irqsave(&desc->lock, flags); 208 raw_spin_lock_irqsave(&desc->lock, flags);
209 if (irq_move_pending(desc)) 209 if (irq_move_pending(desc))
210 irq_get_pending(cpumask, desc); 210 irq_get_pending(cpumask, desc);
211 else 211 else
212 cpumask_copy(cpumask, desc->irq_data.affinity); 212 cpumask_copy(cpumask, desc->irq_data.affinity);
213 raw_spin_unlock_irqrestore(&desc->lock, flags); 213 raw_spin_unlock_irqrestore(&desc->lock, flags);
214 214
215 notify->notify(notify, cpumask); 215 notify->notify(notify, cpumask);
216 216
217 free_cpumask_var(cpumask); 217 free_cpumask_var(cpumask);
218 out: 218 out:
219 kref_put(&notify->kref, notify->release); 219 kref_put(&notify->kref, notify->release);
220 } 220 }
221 221
222 /** 222 /**
223 * irq_set_affinity_notifier - control notification of IRQ affinity changes 223 * irq_set_affinity_notifier - control notification of IRQ affinity changes
224 * @irq: Interrupt for which to enable/disable notification 224 * @irq: Interrupt for which to enable/disable notification
225 * @notify: Context for notification, or %NULL to disable 225 * @notify: Context for notification, or %NULL to disable
226 * notification. Function pointers must be initialised; 226 * notification. Function pointers must be initialised;
227 * the other fields will be initialised by this function. 227 * the other fields will be initialised by this function.
228 * 228 *
229 * Must be called in process context. Notification may only be enabled 229 * Must be called in process context. Notification may only be enabled
230 * after the IRQ is allocated and must be disabled before the IRQ is 230 * after the IRQ is allocated and must be disabled before the IRQ is
231 * freed using free_irq(). 231 * freed using free_irq().
232 */ 232 */
233 int 233 int
234 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) 234 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
235 { 235 {
236 struct irq_desc *desc = irq_to_desc(irq); 236 struct irq_desc *desc = irq_to_desc(irq);
237 struct irq_affinity_notify *old_notify; 237 struct irq_affinity_notify *old_notify;
238 unsigned long flags; 238 unsigned long flags;
239 239
240 /* The release function is promised process context */ 240 /* The release function is promised process context */
241 might_sleep(); 241 might_sleep();
242 242
243 if (!desc) 243 if (!desc)
244 return -EINVAL; 244 return -EINVAL;
245 245
246 /* Complete initialisation of *notify */ 246 /* Complete initialisation of *notify */
247 if (notify) { 247 if (notify) {
248 notify->irq = irq; 248 notify->irq = irq;
249 kref_init(&notify->kref); 249 kref_init(&notify->kref);
250 INIT_WORK(&notify->work, irq_affinity_notify); 250 INIT_WORK(&notify->work, irq_affinity_notify);
251 } 251 }
252 252
253 raw_spin_lock_irqsave(&desc->lock, flags); 253 raw_spin_lock_irqsave(&desc->lock, flags);
254 old_notify = desc->affinity_notify; 254 old_notify = desc->affinity_notify;
255 desc->affinity_notify = notify; 255 desc->affinity_notify = notify;
256 raw_spin_unlock_irqrestore(&desc->lock, flags); 256 raw_spin_unlock_irqrestore(&desc->lock, flags);
257 257
258 if (old_notify) 258 if (old_notify)
259 kref_put(&old_notify->kref, old_notify->release); 259 kref_put(&old_notify->kref, old_notify->release);
260 260
261 return 0; 261 return 0;
262 } 262 }
263 EXPORT_SYMBOL_GPL(irq_set_affinity_notifier); 263 EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
264 264
265 #ifndef CONFIG_AUTO_IRQ_AFFINITY 265 #ifndef CONFIG_AUTO_IRQ_AFFINITY
266 /* 266 /*
267 * Generic version of the affinity autoselector. 267 * Generic version of the affinity autoselector.
268 */ 268 */
269 static int 269 static int
270 setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) 270 setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
271 { 271 {
272 struct irq_chip *chip = irq_desc_get_chip(desc); 272 struct irq_chip *chip = irq_desc_get_chip(desc);
273 struct cpumask *set = irq_default_affinity; 273 struct cpumask *set = irq_default_affinity;
274 int ret; 274 int ret;
275 275
276 /* Excludes PER_CPU and NO_BALANCE interrupts */ 276 /* Excludes PER_CPU and NO_BALANCE interrupts */
277 if (!irq_can_set_affinity(irq)) 277 if (!irq_can_set_affinity(irq))
278 return 0; 278 return 0;
279 279
280 /* 280 /*
281 * Preserve an userspace affinity setup, but make sure that 281 * Preserve an userspace affinity setup, but make sure that
282 * one of the targets is online. 282 * one of the targets is online.
283 */ 283 */
284 if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { 284 if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
285 if (cpumask_intersects(desc->irq_data.affinity, 285 if (cpumask_intersects(desc->irq_data.affinity,
286 cpu_online_mask)) 286 cpu_online_mask))
287 set = desc->irq_data.affinity; 287 set = desc->irq_data.affinity;
288 else { 288 else {
289 irq_compat_clr_affinity(desc); 289 irq_compat_clr_affinity(desc);
290 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); 290 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
291 } 291 }
292 } 292 }
293 293
294 cpumask_and(mask, cpu_online_mask, set); 294 cpumask_and(mask, cpu_online_mask, set);
295 ret = chip->irq_set_affinity(&desc->irq_data, mask, false); 295 ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
296 switch (ret) { 296 switch (ret) {
297 case IRQ_SET_MASK_OK: 297 case IRQ_SET_MASK_OK:
298 cpumask_copy(desc->irq_data.affinity, mask); 298 cpumask_copy(desc->irq_data.affinity, mask);
299 case IRQ_SET_MASK_OK_NOCOPY: 299 case IRQ_SET_MASK_OK_NOCOPY:
300 irq_set_thread_affinity(desc); 300 irq_set_thread_affinity(desc);
301 } 301 }
302 return 0; 302 return 0;
303 } 303 }
304 #else 304 #else
305 static inline int 305 static inline int
306 setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask) 306 setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
307 { 307 {
308 return irq_select_affinity(irq); 308 return irq_select_affinity(irq);
309 } 309 }
310 #endif 310 #endif
311 311
312 /* 312 /*
313 * Called when affinity is set via /proc/irq 313 * Called when affinity is set via /proc/irq
314 */ 314 */
315 int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask) 315 int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
316 { 316 {
317 struct irq_desc *desc = irq_to_desc(irq); 317 struct irq_desc *desc = irq_to_desc(irq);
318 unsigned long flags; 318 unsigned long flags;
319 int ret; 319 int ret;
320 320
321 raw_spin_lock_irqsave(&desc->lock, flags); 321 raw_spin_lock_irqsave(&desc->lock, flags);
322 ret = setup_affinity(irq, desc, mask); 322 ret = setup_affinity(irq, desc, mask);
323 raw_spin_unlock_irqrestore(&desc->lock, flags); 323 raw_spin_unlock_irqrestore(&desc->lock, flags);
324 return ret; 324 return ret;
325 } 325 }
326 326
327 #else 327 #else
328 static inline int 328 static inline int
329 setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) 329 setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
330 { 330 {
331 return 0; 331 return 0;
332 } 332 }
333 #endif 333 #endif
334 334
335 void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) 335 void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
336 { 336 {
337 if (suspend) { 337 if (suspend) {
338 if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND)) 338 if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND))
339 return; 339 return;
340 desc->istate |= IRQS_SUSPENDED; 340 desc->istate |= IRQS_SUSPENDED;
341 } 341 }
342 342
343 if (!desc->depth++) 343 if (!desc->depth++)
344 irq_disable(desc); 344 irq_disable(desc);
345 } 345 }
346 346
347 static int __disable_irq_nosync(unsigned int irq) 347 static int __disable_irq_nosync(unsigned int irq)
348 { 348 {
349 unsigned long flags; 349 unsigned long flags;
350 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); 350 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
351 351
352 if (!desc) 352 if (!desc)
353 return -EINVAL; 353 return -EINVAL;
354 __disable_irq(desc, irq, false); 354 __disable_irq(desc, irq, false);
355 irq_put_desc_busunlock(desc, flags); 355 irq_put_desc_busunlock(desc, flags);
356 return 0; 356 return 0;
357 } 357 }
358 358
359 /** 359 /**
360 * disable_irq_nosync - disable an irq without waiting 360 * disable_irq_nosync - disable an irq without waiting
361 * @irq: Interrupt to disable 361 * @irq: Interrupt to disable
362 * 362 *
363 * Disable the selected interrupt line. Disables and Enables are 363 * Disable the selected interrupt line. Disables and Enables are
364 * nested. 364 * nested.
365 * Unlike disable_irq(), this function does not ensure existing 365 * Unlike disable_irq(), this function does not ensure existing
366 * instances of the IRQ handler have completed before returning. 366 * instances of the IRQ handler have completed before returning.
367 * 367 *
368 * This function may be called from IRQ context. 368 * This function may be called from IRQ context.
369 */ 369 */
370 void disable_irq_nosync(unsigned int irq) 370 void disable_irq_nosync(unsigned int irq)
371 { 371 {
372 __disable_irq_nosync(irq); 372 __disable_irq_nosync(irq);
373 } 373 }
374 EXPORT_SYMBOL(disable_irq_nosync); 374 EXPORT_SYMBOL(disable_irq_nosync);
375 375
376 /** 376 /**
377 * disable_irq - disable an irq and wait for completion 377 * disable_irq - disable an irq and wait for completion
378 * @irq: Interrupt to disable 378 * @irq: Interrupt to disable
379 * 379 *
380 * Disable the selected interrupt line. Enables and Disables are 380 * Disable the selected interrupt line. Enables and Disables are
381 * nested. 381 * nested.
382 * This function waits for any pending IRQ handlers for this interrupt 382 * This function waits for any pending IRQ handlers for this interrupt
383 * to complete before returning. If you use this function while 383 * to complete before returning. If you use this function while
384 * holding a resource the IRQ handler may need you will deadlock. 384 * holding a resource the IRQ handler may need you will deadlock.
385 * 385 *
386 * This function may be called - with care - from IRQ context. 386 * This function may be called - with care - from IRQ context.
387 */ 387 */
388 void disable_irq(unsigned int irq) 388 void disable_irq(unsigned int irq)
389 { 389 {
390 if (!__disable_irq_nosync(irq)) 390 if (!__disable_irq_nosync(irq))
391 synchronize_irq(irq); 391 synchronize_irq(irq);
392 } 392 }
393 EXPORT_SYMBOL(disable_irq); 393 EXPORT_SYMBOL(disable_irq);
394 394
395 void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) 395 void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
396 { 396 {
397 if (resume) { 397 if (resume) {
398 if (!(desc->istate & IRQS_SUSPENDED)) { 398 if (!(desc->istate & IRQS_SUSPENDED)) {
399 if (!desc->action) 399 if (!desc->action)
400 return; 400 return;
401 if (!(desc->action->flags & IRQF_FORCE_RESUME)) 401 if (!(desc->action->flags & IRQF_FORCE_RESUME))
402 return; 402 return;
403 /* Pretend that it got disabled ! */ 403 /* Pretend that it got disabled ! */
404 desc->depth++; 404 desc->depth++;
405 } 405 }
406 desc->istate &= ~IRQS_SUSPENDED; 406 desc->istate &= ~IRQS_SUSPENDED;
407 } 407 }
408 408
409 switch (desc->depth) { 409 switch (desc->depth) {
410 case 0: 410 case 0:
411 err_out: 411 err_out:
412 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); 412 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
413 break; 413 break;
414 case 1: { 414 case 1: {
415 if (desc->istate & IRQS_SUSPENDED) 415 if (desc->istate & IRQS_SUSPENDED)
416 goto err_out; 416 goto err_out;
417 /* Prevent probing on this irq: */ 417 /* Prevent probing on this irq: */
418 irq_settings_set_noprobe(desc); 418 irq_settings_set_noprobe(desc);
419 irq_enable(desc); 419 irq_enable(desc);
420 check_irq_resend(desc, irq); 420 check_irq_resend(desc, irq);
421 /* fall-through */ 421 /* fall-through */
422 } 422 }
423 default: 423 default:
424 desc->depth--; 424 desc->depth--;
425 } 425 }
426 } 426 }
427 427
428 /** 428 /**
429 * enable_irq - enable handling of an irq 429 * enable_irq - enable handling of an irq
430 * @irq: Interrupt to enable 430 * @irq: Interrupt to enable
431 * 431 *
432 * Undoes the effect of one call to disable_irq(). If this 432 * Undoes the effect of one call to disable_irq(). If this
433 * matches the last disable, processing of interrupts on this 433 * matches the last disable, processing of interrupts on this
434 * IRQ line is re-enabled. 434 * IRQ line is re-enabled.
435 * 435 *
436 * This function may be called from IRQ context only when 436 * This function may be called from IRQ context only when
437 * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! 437 * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
438 */ 438 */
439 void enable_irq(unsigned int irq) 439 void enable_irq(unsigned int irq)
440 { 440 {
441 unsigned long flags; 441 unsigned long flags;
442 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); 442 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
443 443
444 if (!desc) 444 if (!desc)
445 return; 445 return;
446 if (WARN(!desc->irq_data.chip, 446 if (WARN(!desc->irq_data.chip,
447 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq)) 447 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
448 goto out; 448 goto out;
449 449
450 __enable_irq(desc, irq, false); 450 __enable_irq(desc, irq, false);
451 out: 451 out:
452 irq_put_desc_busunlock(desc, flags); 452 irq_put_desc_busunlock(desc, flags);
453 } 453 }
454 EXPORT_SYMBOL(enable_irq); 454 EXPORT_SYMBOL(enable_irq);
455 455
456 static int set_irq_wake_real(unsigned int irq, unsigned int on) 456 static int set_irq_wake_real(unsigned int irq, unsigned int on)
457 { 457 {
458 struct irq_desc *desc = irq_to_desc(irq); 458 struct irq_desc *desc = irq_to_desc(irq);
459 int ret = -ENXIO; 459 int ret = -ENXIO;
460 460
461 if (desc->irq_data.chip->irq_set_wake) 461 if (desc->irq_data.chip->irq_set_wake)
462 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on); 462 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
463 463
464 return ret; 464 return ret;
465 } 465 }
466 466
467 /** 467 /**
468 * irq_set_irq_wake - control irq power management wakeup 468 * irq_set_irq_wake - control irq power management wakeup
469 * @irq: interrupt to control 469 * @irq: interrupt to control
470 * @on: enable/disable power management wakeup 470 * @on: enable/disable power management wakeup
471 * 471 *
472 * Enable/disable power management wakeup mode, which is 472 * Enable/disable power management wakeup mode, which is
473 * disabled by default. Enables and disables must match, 473 * disabled by default. Enables and disables must match,
474 * just as they match for non-wakeup mode support. 474 * just as they match for non-wakeup mode support.
475 * 475 *
476 * Wakeup mode lets this IRQ wake the system from sleep 476 * Wakeup mode lets this IRQ wake the system from sleep
477 * states like "suspend to RAM". 477 * states like "suspend to RAM".
478 */ 478 */
479 int irq_set_irq_wake(unsigned int irq, unsigned int on) 479 int irq_set_irq_wake(unsigned int irq, unsigned int on)
480 { 480 {
481 unsigned long flags; 481 unsigned long flags;
482 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); 482 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
483 int ret = 0; 483 int ret = 0;
484 484
485 /* wakeup-capable irqs can be shared between drivers that 485 /* wakeup-capable irqs can be shared between drivers that
486 * don't need to have the same sleep mode behaviors. 486 * don't need to have the same sleep mode behaviors.
487 */ 487 */
488 if (on) { 488 if (on) {
489 if (desc->wake_depth++ == 0) { 489 if (desc->wake_depth++ == 0) {
490 ret = set_irq_wake_real(irq, on); 490 ret = set_irq_wake_real(irq, on);
491 if (ret) 491 if (ret)
492 desc->wake_depth = 0; 492 desc->wake_depth = 0;
493 else 493 else
494 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE); 494 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
495 } 495 }
496 } else { 496 } else {
497 if (desc->wake_depth == 0) { 497 if (desc->wake_depth == 0) {
498 WARN(1, "Unbalanced IRQ %d wake disable\n", irq); 498 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
499 } else if (--desc->wake_depth == 0) { 499 } else if (--desc->wake_depth == 0) {
500 ret = set_irq_wake_real(irq, on); 500 ret = set_irq_wake_real(irq, on);
501 if (ret) 501 if (ret)
502 desc->wake_depth = 1; 502 desc->wake_depth = 1;
503 else 503 else
504 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); 504 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
505 } 505 }
506 } 506 }
507 irq_put_desc_busunlock(desc, flags); 507 irq_put_desc_busunlock(desc, flags);
508 return ret; 508 return ret;
509 } 509 }
510 EXPORT_SYMBOL(irq_set_irq_wake); 510 EXPORT_SYMBOL(irq_set_irq_wake);
511 511
512 /* 512 /*
513 * Internal function that tells the architecture code whether a 513 * Internal function that tells the architecture code whether a
514 * particular irq has been exclusively allocated or is available 514 * particular irq has been exclusively allocated or is available
515 * for driver use. 515 * for driver use.
516 */ 516 */
517 int can_request_irq(unsigned int irq, unsigned long irqflags) 517 int can_request_irq(unsigned int irq, unsigned long irqflags)
518 { 518 {
519 unsigned long flags; 519 unsigned long flags;
520 struct irq_desc *desc = irq_get_desc_lock(irq, &flags); 520 struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
521 int canrequest = 0; 521 int canrequest = 0;
522 522
523 if (!desc) 523 if (!desc)
524 return 0; 524 return 0;
525 525
526 if (irq_settings_can_request(desc)) { 526 if (irq_settings_can_request(desc)) {
527 if (desc->action) 527 if (desc->action)
528 if (irqflags & desc->action->flags & IRQF_SHARED) 528 if (irqflags & desc->action->flags & IRQF_SHARED)
529 canrequest =1; 529 canrequest =1;
530 } 530 }
531 irq_put_desc_unlock(desc, flags); 531 irq_put_desc_unlock(desc, flags);
532 return canrequest; 532 return canrequest;
533 } 533 }
534 534
535 int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, 535 int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
536 unsigned long flags) 536 unsigned long flags)
537 { 537 {
538 struct irq_chip *chip = desc->irq_data.chip; 538 struct irq_chip *chip = desc->irq_data.chip;
539 int ret, unmask = 0; 539 int ret, unmask = 0;
540 540
541 if (!chip || !chip->irq_set_type) { 541 if (!chip || !chip->irq_set_type) {
542 /* 542 /*
543 * IRQF_TRIGGER_* but the PIC does not support multiple 543 * IRQF_TRIGGER_* but the PIC does not support multiple
544 * flow-types? 544 * flow-types?
545 */ 545 */
546 pr_debug("No set_type function for IRQ %d (%s)\n", irq, 546 pr_debug("No set_type function for IRQ %d (%s)\n", irq,
547 chip ? (chip->name ? : "unknown") : "unknown"); 547 chip ? (chip->name ? : "unknown") : "unknown");
548 return 0; 548 return 0;
549 } 549 }
550 550
551 flags &= IRQ_TYPE_SENSE_MASK; 551 flags &= IRQ_TYPE_SENSE_MASK;
552 552
553 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) { 553 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
554 if (!(desc->istate & IRQS_MASKED)) 554 if (!(desc->istate & IRQS_MASKED))
555 mask_irq(desc); 555 mask_irq(desc);
556 if (!(desc->istate & IRQS_DISABLED)) 556 if (!(desc->istate & IRQS_DISABLED))
557 unmask = 1; 557 unmask = 1;
558 } 558 }
559 559
560 /* caller masked out all except trigger mode flags */ 560 /* caller masked out all except trigger mode flags */
561 ret = chip->irq_set_type(&desc->irq_data, flags); 561 ret = chip->irq_set_type(&desc->irq_data, flags);
562 562
563 switch (ret) { 563 switch (ret) {
564 case IRQ_SET_MASK_OK: 564 case IRQ_SET_MASK_OK:
565 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); 565 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
566 irqd_set(&desc->irq_data, flags); 566 irqd_set(&desc->irq_data, flags);
567 567
568 case IRQ_SET_MASK_OK_NOCOPY: 568 case IRQ_SET_MASK_OK_NOCOPY:
569 flags = irqd_get_trigger_type(&desc->irq_data); 569 flags = irqd_get_trigger_type(&desc->irq_data);
570 irq_settings_set_trigger_mask(desc, flags); 570 irq_settings_set_trigger_mask(desc, flags);
571 irqd_clear(&desc->irq_data, IRQD_LEVEL); 571 irqd_clear(&desc->irq_data, IRQD_LEVEL);
572 irq_settings_clr_level(desc); 572 irq_settings_clr_level(desc);
573 if (flags & IRQ_TYPE_LEVEL_MASK) { 573 if (flags & IRQ_TYPE_LEVEL_MASK) {
574 irq_settings_set_level(desc); 574 irq_settings_set_level(desc);
575 irqd_set(&desc->irq_data, IRQD_LEVEL); 575 irqd_set(&desc->irq_data, IRQD_LEVEL);
576 } 576 }
577 577
578 if (chip != desc->irq_data.chip) 578 if (chip != desc->irq_data.chip)
579 irq_chip_set_defaults(desc->irq_data.chip); 579 irq_chip_set_defaults(desc->irq_data.chip);
580 ret = 0; 580 ret = 0;
581 break; 581 break;
582 default: 582 default:
583 pr_err("setting trigger mode %lu for irq %u failed (%pF)\n", 583 pr_err("setting trigger mode %lu for irq %u failed (%pF)\n",
584 flags, irq, chip->irq_set_type); 584 flags, irq, chip->irq_set_type);
585 } 585 }
586 if (unmask) 586 if (unmask)
587 unmask_irq(desc); 587 unmask_irq(desc);
588 return ret; 588 return ret;
589 } 589 }
590 590
591 /* 591 /*
592 * Default primary interrupt handler for threaded interrupts. Is 592 * Default primary interrupt handler for threaded interrupts. Is
593 * assigned as primary handler when request_threaded_irq is called 593 * assigned as primary handler when request_threaded_irq is called
594 * with handler == NULL. Useful for oneshot interrupts. 594 * with handler == NULL. Useful for oneshot interrupts.
595 */ 595 */
596 static irqreturn_t irq_default_primary_handler(int irq, void *dev_id) 596 static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
597 { 597 {
598 return IRQ_WAKE_THREAD; 598 return IRQ_WAKE_THREAD;
599 } 599 }
600 600
601 /* 601 /*
602 * Primary handler for nested threaded interrupts. Should never be 602 * Primary handler for nested threaded interrupts. Should never be
603 * called. 603 * called.
604 */ 604 */
605 static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id) 605 static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
606 { 606 {
607 WARN(1, "Primary handler called for nested irq %d\n", irq); 607 WARN(1, "Primary handler called for nested irq %d\n", irq);
608 return IRQ_NONE; 608 return IRQ_NONE;
609 } 609 }
610 610
611 static int irq_wait_for_interrupt(struct irqaction *action) 611 static int irq_wait_for_interrupt(struct irqaction *action)
612 { 612 {
613 while (!kthread_should_stop()) { 613 while (!kthread_should_stop()) {
614 set_current_state(TASK_INTERRUPTIBLE); 614 set_current_state(TASK_INTERRUPTIBLE);
615 615
616 if (test_and_clear_bit(IRQTF_RUNTHREAD, 616 if (test_and_clear_bit(IRQTF_RUNTHREAD,
617 &action->thread_flags)) { 617 &action->thread_flags)) {
618 __set_current_state(TASK_RUNNING); 618 __set_current_state(TASK_RUNNING);
619 return 0; 619 return 0;
620 } 620 }
621 schedule(); 621 schedule();
622 } 622 }
623 return -1; 623 return -1;
624 } 624 }
625 625
626 /* 626 /*
627 * Oneshot interrupts keep the irq line masked until the threaded 627 * Oneshot interrupts keep the irq line masked until the threaded
628 * handler finished. unmask if the interrupt has not been disabled and 628 * handler finished. unmask if the interrupt has not been disabled and
629 * is marked MASKED. 629 * is marked MASKED.
630 */ 630 */
631 static void irq_finalize_oneshot(struct irq_desc *desc, 631 static void irq_finalize_oneshot(struct irq_desc *desc,
632 struct irqaction *action, bool force) 632 struct irqaction *action, bool force)
633 { 633 {
634 if (!(desc->istate & IRQS_ONESHOT)) 634 if (!(desc->istate & IRQS_ONESHOT))
635 return; 635 return;
636 again: 636 again:
637 chip_bus_lock(desc); 637 chip_bus_lock(desc);
638 raw_spin_lock_irq(&desc->lock); 638 raw_spin_lock_irq(&desc->lock);
639 639
640 /* 640 /*
641 * Implausible though it may be we need to protect us against 641 * Implausible though it may be we need to protect us against
642 * the following scenario: 642 * the following scenario:
643 * 643 *
644 * The thread is faster done than the hard interrupt handler 644 * The thread is faster done than the hard interrupt handler
645 * on the other CPU. If we unmask the irq line then the 645 * on the other CPU. If we unmask the irq line then the
646 * interrupt can come in again and masks the line, leaves due 646 * interrupt can come in again and masks the line, leaves due
647 * to IRQS_INPROGRESS and the irq line is masked forever. 647 * to IRQS_INPROGRESS and the irq line is masked forever.
648 * 648 *
649 * This also serializes the state of shared oneshot handlers 649 * This also serializes the state of shared oneshot handlers
650 * versus "desc->threads_onehsot |= action->thread_mask;" in 650 * versus "desc->threads_onehsot |= action->thread_mask;" in
651 * irq_wake_thread(). See the comment there which explains the 651 * irq_wake_thread(). See the comment there which explains the
652 * serialization. 652 * serialization.
653 */ 653 */
654 if (unlikely(desc->istate & IRQS_INPROGRESS)) { 654 if (unlikely(desc->istate & IRQS_INPROGRESS)) {
655 raw_spin_unlock_irq(&desc->lock); 655 raw_spin_unlock_irq(&desc->lock);
656 chip_bus_sync_unlock(desc); 656 chip_bus_sync_unlock(desc);
657 cpu_relax(); 657 cpu_relax();
658 goto again; 658 goto again;
659 } 659 }
660 660
661 /* 661 /*
662 * Now check again, whether the thread should run. Otherwise 662 * Now check again, whether the thread should run. Otherwise
663 * we would clear the threads_oneshot bit of this thread which 663 * we would clear the threads_oneshot bit of this thread which
664 * was just set. 664 * was just set.
665 */ 665 */
666 if (!force && test_bit(IRQTF_RUNTHREAD, &action->thread_flags)) 666 if (!force && test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
667 goto out_unlock; 667 goto out_unlock;
668 668
669 desc->threads_oneshot &= ~action->thread_mask; 669 desc->threads_oneshot &= ~action->thread_mask;
670 670
671 if (!desc->threads_oneshot && !(desc->istate & IRQS_DISABLED) && 671 if (!desc->threads_oneshot && !(desc->istate & IRQS_DISABLED) &&
672 (desc->istate & IRQS_MASKED)) { 672 (desc->istate & IRQS_MASKED)) {
673 irq_compat_clr_masked(desc); 673 irq_compat_clr_masked(desc);
674 desc->istate &= ~IRQS_MASKED; 674 desc->istate &= ~IRQS_MASKED;
675 desc->irq_data.chip->irq_unmask(&desc->irq_data); 675 desc->irq_data.chip->irq_unmask(&desc->irq_data);
676 } 676 }
677 out_unlock: 677 out_unlock:
678 raw_spin_unlock_irq(&desc->lock); 678 raw_spin_unlock_irq(&desc->lock);
679 chip_bus_sync_unlock(desc); 679 chip_bus_sync_unlock(desc);
680 } 680 }
681 681
682 #ifdef CONFIG_SMP 682 #ifdef CONFIG_SMP
683 /* 683 /*
684 * Check whether we need to chasnge the affinity of the interrupt thread. 684 * Check whether we need to chasnge the affinity of the interrupt thread.
685 */ 685 */
686 static void 686 static void
687 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) 687 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
688 { 688 {
689 cpumask_var_t mask; 689 cpumask_var_t mask;
690 690
691 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) 691 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
692 return; 692 return;
693 693
694 /* 694 /*
695 * In case we are out of memory we set IRQTF_AFFINITY again and 695 * In case we are out of memory we set IRQTF_AFFINITY again and
696 * try again next time 696 * try again next time
697 */ 697 */
698 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { 698 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
699 set_bit(IRQTF_AFFINITY, &action->thread_flags); 699 set_bit(IRQTF_AFFINITY, &action->thread_flags);
700 return; 700 return;
701 } 701 }
702 702
703 raw_spin_lock_irq(&desc->lock); 703 raw_spin_lock_irq(&desc->lock);
704 cpumask_copy(mask, desc->irq_data.affinity); 704 cpumask_copy(mask, desc->irq_data.affinity);
705 raw_spin_unlock_irq(&desc->lock); 705 raw_spin_unlock_irq(&desc->lock);
706 706
707 set_cpus_allowed_ptr(current, mask); 707 set_cpus_allowed_ptr(current, mask);
708 free_cpumask_var(mask); 708 free_cpumask_var(mask);
709 } 709 }
710 #else 710 #else
711 static inline void 711 static inline void
712 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } 712 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
713 #endif 713 #endif
714 714
715 /* 715 /*
716 * Interrupts which are not explicitely requested as threaded 716 * Interrupts which are not explicitely requested as threaded
717 * interrupts rely on the implicit bh/preempt disable of the hard irq 717 * interrupts rely on the implicit bh/preempt disable of the hard irq
718 * context. So we need to disable bh here to avoid deadlocks and other 718 * context. So we need to disable bh here to avoid deadlocks and other
719 * side effects. 719 * side effects.
720 */ 720 */
721 static void 721 static void
722 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) 722 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
723 { 723 {
724 local_bh_disable(); 724 local_bh_disable();
725 action->thread_fn(action->irq, action->dev_id); 725 action->thread_fn(action->irq, action->dev_id);
726 irq_finalize_oneshot(desc, action, false); 726 irq_finalize_oneshot(desc, action, false);
727 local_bh_enable(); 727 local_bh_enable();
728 } 728 }
729 729
730 /* 730 /*
731 * Interrupts explicitely requested as threaded interupts want to be 731 * Interrupts explicitely requested as threaded interupts want to be
732 * preemtible - many of them need to sleep and wait for slow busses to 732 * preemtible - many of them need to sleep and wait for slow busses to
733 * complete. 733 * complete.
734 */ 734 */
735 static void irq_thread_fn(struct irq_desc *desc, struct irqaction *action) 735 static void irq_thread_fn(struct irq_desc *desc, struct irqaction *action)
736 { 736 {
737 action->thread_fn(action->irq, action->dev_id); 737 action->thread_fn(action->irq, action->dev_id);
738 irq_finalize_oneshot(desc, action, false); 738 irq_finalize_oneshot(desc, action, false);
739 } 739 }
740 740
741 /* 741 /*
742 * Interrupt handler thread 742 * Interrupt handler thread
743 */ 743 */
744 static int irq_thread(void *data) 744 static int irq_thread(void *data)
745 { 745 {
746 static const struct sched_param param = { 746 static const struct sched_param param = {
747 .sched_priority = MAX_USER_RT_PRIO/2, 747 .sched_priority = MAX_USER_RT_PRIO/2,
748 }; 748 };
749 struct irqaction *action = data; 749 struct irqaction *action = data;
750 struct irq_desc *desc = irq_to_desc(action->irq); 750 struct irq_desc *desc = irq_to_desc(action->irq);
751 void (*handler_fn)(struct irq_desc *desc, struct irqaction *action); 751 void (*handler_fn)(struct irq_desc *desc, struct irqaction *action);
752 int wake; 752 int wake;
753 753
754 if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD, 754 if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD,
755 &action->thread_flags)) 755 &action->thread_flags))
756 handler_fn = irq_forced_thread_fn; 756 handler_fn = irq_forced_thread_fn;
757 else 757 else
758 handler_fn = irq_thread_fn; 758 handler_fn = irq_thread_fn;
759 759
760 sched_setscheduler(current, SCHED_FIFO, &param); 760 sched_setscheduler(current, SCHED_FIFO, &param);
761 current->irqaction = action; 761 current->irqaction = action;
762 762
763 while (!irq_wait_for_interrupt(action)) { 763 while (!irq_wait_for_interrupt(action)) {
764 764
765 irq_thread_check_affinity(desc, action); 765 irq_thread_check_affinity(desc, action);
766 766
767 atomic_inc(&desc->threads_active); 767 atomic_inc(&desc->threads_active);
768 768
769 raw_spin_lock_irq(&desc->lock); 769 raw_spin_lock_irq(&desc->lock);
770 if (unlikely(desc->istate & IRQS_DISABLED)) { 770 if (unlikely(desc->istate & IRQS_DISABLED)) {
771 /* 771 /*
772 * CHECKME: We might need a dedicated 772 * CHECKME: We might need a dedicated
773 * IRQ_THREAD_PENDING flag here, which 773 * IRQ_THREAD_PENDING flag here, which
774 * retriggers the thread in check_irq_resend() 774 * retriggers the thread in check_irq_resend()
775 * but AFAICT IRQS_PENDING should be fine as it 775 * but AFAICT IRQS_PENDING should be fine as it
776 * retriggers the interrupt itself --- tglx 776 * retriggers the interrupt itself --- tglx
777 */ 777 */
778 irq_compat_set_pending(desc); 778 irq_compat_set_pending(desc);
779 desc->istate |= IRQS_PENDING; 779 desc->istate |= IRQS_PENDING;
780 raw_spin_unlock_irq(&desc->lock); 780 raw_spin_unlock_irq(&desc->lock);
781 } else { 781 } else {
782 raw_spin_unlock_irq(&desc->lock); 782 raw_spin_unlock_irq(&desc->lock);
783 handler_fn(desc, action); 783 handler_fn(desc, action);
784 } 784 }
785 785
786 wake = atomic_dec_and_test(&desc->threads_active); 786 wake = atomic_dec_and_test(&desc->threads_active);
787 787
788 if (wake && waitqueue_active(&desc->wait_for_threads)) 788 if (wake && waitqueue_active(&desc->wait_for_threads))
789 wake_up(&desc->wait_for_threads); 789 wake_up(&desc->wait_for_threads);
790 } 790 }
791 791
792 /* Prevent a stale desc->threads_oneshot */ 792 /* Prevent a stale desc->threads_oneshot */
793 irq_finalize_oneshot(desc, action, true); 793 irq_finalize_oneshot(desc, action, true);
794 794
795 /* 795 /*
796 * Clear irqaction. Otherwise exit_irq_thread() would make 796 * Clear irqaction. Otherwise exit_irq_thread() would make
797 * fuzz about an active irq thread going into nirvana. 797 * fuzz about an active irq thread going into nirvana.
798 */ 798 */
799 current->irqaction = NULL; 799 current->irqaction = NULL;
800 return 0; 800 return 0;
801 } 801 }
802 802
803 /* 803 /*
804 * Called from do_exit() 804 * Called from do_exit()
805 */ 805 */
806 void exit_irq_thread(void) 806 void exit_irq_thread(void)
807 { 807 {
808 struct task_struct *tsk = current; 808 struct task_struct *tsk = current;
809 struct irq_desc *desc; 809 struct irq_desc *desc;
810 810
811 if (!tsk->irqaction) 811 if (!tsk->irqaction)
812 return; 812 return;
813 813
814 printk(KERN_ERR 814 printk(KERN_ERR
815 "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", 815 "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
816 tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq); 816 tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq);
817 817
818 desc = irq_to_desc(tsk->irqaction->irq); 818 desc = irq_to_desc(tsk->irqaction->irq);
819 819
820 /* 820 /*
821 * Prevent a stale desc->threads_oneshot. Must be called 821 * Prevent a stale desc->threads_oneshot. Must be called
822 * before setting the IRQTF_DIED flag. 822 * before setting the IRQTF_DIED flag.
823 */ 823 */
824 irq_finalize_oneshot(desc, tsk->irqaction, true); 824 irq_finalize_oneshot(desc, tsk->irqaction, true);
825 825
826 /* 826 /*
827 * Set the THREAD DIED flag to prevent further wakeups of the 827 * Set the THREAD DIED flag to prevent further wakeups of the
828 * soon to be gone threaded handler. 828 * soon to be gone threaded handler.
829 */ 829 */
830 set_bit(IRQTF_DIED, &tsk->irqaction->flags); 830 set_bit(IRQTF_DIED, &tsk->irqaction->flags);
831 } 831 }
832 832
833 static void irq_setup_forced_threading(struct irqaction *new) 833 static void irq_setup_forced_threading(struct irqaction *new)
834 { 834 {
835 if (!force_irqthreads) 835 if (!force_irqthreads)
836 return; 836 return;
837 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)) 837 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
838 return; 838 return;
839 839
840 new->flags |= IRQF_ONESHOT; 840 new->flags |= IRQF_ONESHOT;
841 841
842 if (!new->thread_fn) { 842 if (!new->thread_fn) {
843 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags); 843 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
844 new->thread_fn = new->handler; 844 new->thread_fn = new->handler;
845 new->handler = irq_default_primary_handler; 845 new->handler = irq_default_primary_handler;
846 } 846 }
847 } 847 }
848 848
849 /* 849 /*
850 * Internal function to register an irqaction - typically used to 850 * Internal function to register an irqaction - typically used to
851 * allocate special interrupts that are part of the architecture. 851 * allocate special interrupts that are part of the architecture.
852 */ 852 */
853 static int 853 static int
854 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) 854 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
855 { 855 {
856 struct irqaction *old, **old_ptr; 856 struct irqaction *old, **old_ptr;
857 const char *old_name = NULL; 857 const char *old_name = NULL;
858 unsigned long flags, thread_mask = 0; 858 unsigned long flags, thread_mask = 0;
859 int ret, nested, shared = 0; 859 int ret, nested, shared = 0;
860 cpumask_var_t mask; 860 cpumask_var_t mask;
861 861
862 if (!desc) 862 if (!desc)
863 return -EINVAL; 863 return -EINVAL;
864 864
865 if (desc->irq_data.chip == &no_irq_chip) 865 if (desc->irq_data.chip == &no_irq_chip)
866 return -ENOSYS; 866 return -ENOSYS;
867 /* 867 /*
868 * Some drivers like serial.c use request_irq() heavily, 868 * Some drivers like serial.c use request_irq() heavily,
869 * so we have to be careful not to interfere with a 869 * so we have to be careful not to interfere with a
870 * running system. 870 * running system.
871 */ 871 */
872 if (new->flags & IRQF_SAMPLE_RANDOM) { 872 if (new->flags & IRQF_SAMPLE_RANDOM) {
873 /* 873 /*
874 * This function might sleep, we want to call it first, 874 * This function might sleep, we want to call it first,
875 * outside of the atomic block. 875 * outside of the atomic block.
876 * Yes, this might clear the entropy pool if the wrong 876 * Yes, this might clear the entropy pool if the wrong
877 * driver is attempted to be loaded, without actually 877 * driver is attempted to be loaded, without actually
878 * installing a new handler, but is this really a problem, 878 * installing a new handler, but is this really a problem,
879 * only the sysadmin is able to do this. 879 * only the sysadmin is able to do this.
880 */ 880 */
881 rand_initialize_irq(irq); 881 rand_initialize_irq(irq);
882 } 882 }
883 883
884 /* 884 /*
885 * Check whether the interrupt nests into another interrupt 885 * Check whether the interrupt nests into another interrupt
886 * thread. 886 * thread.
887 */ 887 */
888 nested = irq_settings_is_nested_thread(desc); 888 nested = irq_settings_is_nested_thread(desc);
889 if (nested) { 889 if (nested) {
890 if (!new->thread_fn) 890 if (!new->thread_fn)
891 return -EINVAL; 891 return -EINVAL;
892 /* 892 /*
893 * Replace the primary handler which was provided from 893 * Replace the primary handler which was provided from
894 * the driver for non nested interrupt handling by the 894 * the driver for non nested interrupt handling by the
895 * dummy function which warns when called. 895 * dummy function which warns when called.
896 */ 896 */
897 new->handler = irq_nested_primary_handler; 897 new->handler = irq_nested_primary_handler;
898 } else { 898 } else {
899 irq_setup_forced_threading(new); 899 irq_setup_forced_threading(new);
900 } 900 }
901 901
902 /* 902 /*
903 * Create a handler thread when a thread function is supplied 903 * Create a handler thread when a thread function is supplied
904 * and the interrupt does not nest into another interrupt 904 * and the interrupt does not nest into another interrupt
905 * thread. 905 * thread.
906 */ 906 */
907 if (new->thread_fn && !nested) { 907 if (new->thread_fn && !nested) {
908 struct task_struct *t; 908 struct task_struct *t;
909 909
910 t = kthread_create(irq_thread, new, "irq/%d-%s", irq, 910 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
911 new->name); 911 new->name);
912 if (IS_ERR(t)) 912 if (IS_ERR(t))
913 return PTR_ERR(t); 913 return PTR_ERR(t);
914 /* 914 /*
915 * We keep the reference to the task struct even if 915 * We keep the reference to the task struct even if
916 * the thread dies to avoid that the interrupt code 916 * the thread dies to avoid that the interrupt code
917 * references an already freed task_struct. 917 * references an already freed task_struct.
918 */ 918 */
919 get_task_struct(t); 919 get_task_struct(t);
920 new->thread = t; 920 new->thread = t;
921 } 921 }
922 922
923 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { 923 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
924 ret = -ENOMEM; 924 ret = -ENOMEM;
925 goto out_thread; 925 goto out_thread;
926 } 926 }
927 927
928 /* 928 /*
929 * The following block of code has to be executed atomically 929 * The following block of code has to be executed atomically
930 */ 930 */
931 raw_spin_lock_irqsave(&desc->lock, flags); 931 raw_spin_lock_irqsave(&desc->lock, flags);
932 old_ptr = &desc->action; 932 old_ptr = &desc->action;
933 old = *old_ptr; 933 old = *old_ptr;
934 if (old) { 934 if (old) {
935 /* 935 /*
936 * Can't share interrupts unless both agree to and are 936 * Can't share interrupts unless both agree to and are
937 * the same type (level, edge, polarity). So both flag 937 * the same type (level, edge, polarity). So both flag
938 * fields must have IRQF_SHARED set and the bits which 938 * fields must have IRQF_SHARED set and the bits which
939 * set the trigger type must match. Also all must 939 * set the trigger type must match. Also all must
940 * agree on ONESHOT. 940 * agree on ONESHOT.
941 */ 941 */
942 if (!((old->flags & new->flags) & IRQF_SHARED) || 942 if (!((old->flags & new->flags) & IRQF_SHARED) ||
943 ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) || 943 ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
944 ((old->flags ^ new->flags) & IRQF_ONESHOT)) { 944 ((old->flags ^ new->flags) & IRQF_ONESHOT)) {
945 old_name = old->name; 945 old_name = old->name;
946 goto mismatch; 946 goto mismatch;
947 } 947 }
948 948
949 /* All handlers must agree on per-cpuness */ 949 /* All handlers must agree on per-cpuness */
950 if ((old->flags & IRQF_PERCPU) != 950 if ((old->flags & IRQF_PERCPU) !=
951 (new->flags & IRQF_PERCPU)) 951 (new->flags & IRQF_PERCPU))
952 goto mismatch; 952 goto mismatch;
953 953
954 /* add new interrupt at end of irq queue */ 954 /* add new interrupt at end of irq queue */
955 do { 955 do {
956 thread_mask |= old->thread_mask; 956 thread_mask |= old->thread_mask;
957 old_ptr = &old->next; 957 old_ptr = &old->next;
958 old = *old_ptr; 958 old = *old_ptr;
959 } while (old); 959 } while (old);
960 shared = 1; 960 shared = 1;
961 } 961 }
962 962
963 /* 963 /*
964 * Setup the thread mask for this irqaction. Unlikely to have 964 * Setup the thread mask for this irqaction. Unlikely to have
965 * 32 resp 64 irqs sharing one line, but who knows. 965 * 32 resp 64 irqs sharing one line, but who knows.
966 */ 966 */
967 if (new->flags & IRQF_ONESHOT && thread_mask == ~0UL) { 967 if (new->flags & IRQF_ONESHOT && thread_mask == ~0UL) {
968 ret = -EBUSY; 968 ret = -EBUSY;
969 goto out_mask; 969 goto out_mask;
970 } 970 }
971 new->thread_mask = 1 << ffz(thread_mask); 971 new->thread_mask = 1 << ffz(thread_mask);
972 972
973 if (!shared) { 973 if (!shared) {
974 irq_chip_set_defaults(desc->irq_data.chip); 974 irq_chip_set_defaults(desc->irq_data.chip);
975 975
976 init_waitqueue_head(&desc->wait_for_threads); 976 init_waitqueue_head(&desc->wait_for_threads);
977 977
978 /* Setup the type (level, edge polarity) if configured: */ 978 /* Setup the type (level, edge polarity) if configured: */
979 if (new->flags & IRQF_TRIGGER_MASK) { 979 if (new->flags & IRQF_TRIGGER_MASK) {
980 ret = __irq_set_trigger(desc, irq, 980 ret = __irq_set_trigger(desc, irq,
981 new->flags & IRQF_TRIGGER_MASK); 981 new->flags & IRQF_TRIGGER_MASK);
982 982
983 if (ret) 983 if (ret)
984 goto out_mask; 984 goto out_mask;
985 } 985 }
986 986
987 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ 987 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
988 IRQS_INPROGRESS | IRQS_ONESHOT | \ 988 IRQS_INPROGRESS | IRQS_ONESHOT | \
989 IRQS_WAITING); 989 IRQS_WAITING);
990 990
991 if (new->flags & IRQF_PERCPU) { 991 if (new->flags & IRQF_PERCPU) {
992 irqd_set(&desc->irq_data, IRQD_PER_CPU); 992 irqd_set(&desc->irq_data, IRQD_PER_CPU);
993 irq_settings_set_per_cpu(desc); 993 irq_settings_set_per_cpu(desc);
994 } 994 }
995 995
996 if (new->flags & IRQF_ONESHOT) 996 if (new->flags & IRQF_ONESHOT)
997 desc->istate |= IRQS_ONESHOT; 997 desc->istate |= IRQS_ONESHOT;
998 998
999 if (irq_settings_can_autoenable(desc)) 999 if (irq_settings_can_autoenable(desc))
1000 irq_startup(desc); 1000 irq_startup(desc);
1001 else 1001 else
1002 /* Undo nested disables: */ 1002 /* Undo nested disables: */
1003 desc->depth = 1; 1003 desc->depth = 1;
1004 1004
1005 /* Exclude IRQ from balancing if requested */ 1005 /* Exclude IRQ from balancing if requested */
1006 if (new->flags & IRQF_NOBALANCING) { 1006 if (new->flags & IRQF_NOBALANCING) {
1007 irq_settings_set_no_balancing(desc); 1007 irq_settings_set_no_balancing(desc);
1008 irqd_set(&desc->irq_data, IRQD_NO_BALANCING); 1008 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1009 } 1009 }
1010 1010
1011 /* Set default affinity mask once everything is setup */ 1011 /* Set default affinity mask once everything is setup */
1012 setup_affinity(irq, desc, mask); 1012 setup_affinity(irq, desc, mask);
1013 1013
1014 } else if (new->flags & IRQF_TRIGGER_MASK) { 1014 } else if (new->flags & IRQF_TRIGGER_MASK) {
1015 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK; 1015 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1016 unsigned int omsk = irq_settings_get_trigger_mask(desc); 1016 unsigned int omsk = irq_settings_get_trigger_mask(desc);
1017 1017
1018 if (nmsk != omsk) 1018 if (nmsk != omsk)
1019 /* hope the handler works with current trigger mode */ 1019 /* hope the handler works with current trigger mode */
1020 pr_warning("IRQ %d uses trigger mode %u; requested %u\n", 1020 pr_warning("IRQ %d uses trigger mode %u; requested %u\n",
1021 irq, nmsk, omsk); 1021 irq, nmsk, omsk);
1022 } 1022 }
1023 1023
1024 new->irq = irq; 1024 new->irq = irq;
1025 *old_ptr = new; 1025 *old_ptr = new;
1026 1026
1027 /* Reset broken irq detection when installing new handler */ 1027 /* Reset broken irq detection when installing new handler */
1028 desc->irq_count = 0; 1028 desc->irq_count = 0;
1029 desc->irqs_unhandled = 0; 1029 desc->irqs_unhandled = 0;
1030 1030
1031 /* 1031 /*
1032 * Check whether we disabled the irq via the spurious handler 1032 * Check whether we disabled the irq via the spurious handler
1033 * before. Reenable it and give it another chance. 1033 * before. Reenable it and give it another chance.
1034 */ 1034 */
1035 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { 1035 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1036 desc->istate &= ~IRQS_SPURIOUS_DISABLED; 1036 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1037 __enable_irq(desc, irq, false); 1037 __enable_irq(desc, irq, false);
1038 } 1038 }
1039 1039
1040 raw_spin_unlock_irqrestore(&desc->lock, flags); 1040 raw_spin_unlock_irqrestore(&desc->lock, flags);
1041 1041
1042 /* 1042 /*
1043 * Strictly no need to wake it up, but hung_task complains 1043 * Strictly no need to wake it up, but hung_task complains
1044 * when no hard interrupt wakes the thread up. 1044 * when no hard interrupt wakes the thread up.
1045 */ 1045 */
1046 if (new->thread) 1046 if (new->thread)
1047 wake_up_process(new->thread); 1047 wake_up_process(new->thread);
1048 1048
1049 register_irq_proc(irq, desc); 1049 register_irq_proc(irq, desc);
1050 new->dir = NULL; 1050 new->dir = NULL;
1051 register_handler_proc(irq, new); 1051 register_handler_proc(irq, new);
1052 1052
1053 return 0; 1053 return 0;
1054 1054
1055 mismatch: 1055 mismatch:
1056 #ifdef CONFIG_DEBUG_SHIRQ 1056 #ifdef CONFIG_DEBUG_SHIRQ
1057 if (!(new->flags & IRQF_PROBE_SHARED)) { 1057 if (!(new->flags & IRQF_PROBE_SHARED)) {
1058 printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq); 1058 printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq);
1059 if (old_name) 1059 if (old_name)
1060 printk(KERN_ERR "current handler: %s\n", old_name); 1060 printk(KERN_ERR "current handler: %s\n", old_name);
1061 dump_stack(); 1061 dump_stack();
1062 } 1062 }
1063 #endif 1063 #endif
1064 ret = -EBUSY; 1064 ret = -EBUSY;
1065 1065
1066 out_mask: 1066 out_mask:
1067 raw_spin_unlock_irqrestore(&desc->lock, flags);
1067 free_cpumask_var(mask); 1068 free_cpumask_var(mask);
1068 1069
1069 out_thread: 1070 out_thread:
1070 raw_spin_unlock_irqrestore(&desc->lock, flags);
1071 if (new->thread) { 1071 if (new->thread) {
1072 struct task_struct *t = new->thread; 1072 struct task_struct *t = new->thread;
1073 1073
1074 new->thread = NULL; 1074 new->thread = NULL;
1075 if (likely(!test_bit(IRQTF_DIED, &new->thread_flags))) 1075 if (likely(!test_bit(IRQTF_DIED, &new->thread_flags)))
1076 kthread_stop(t); 1076 kthread_stop(t);
1077 put_task_struct(t); 1077 put_task_struct(t);
1078 } 1078 }
1079 return ret; 1079 return ret;
1080 } 1080 }
1081 1081
1082 /** 1082 /**
1083 * setup_irq - setup an interrupt 1083 * setup_irq - setup an interrupt
1084 * @irq: Interrupt line to setup 1084 * @irq: Interrupt line to setup
1085 * @act: irqaction for the interrupt 1085 * @act: irqaction for the interrupt
1086 * 1086 *
1087 * Used to statically setup interrupts in the early boot process. 1087 * Used to statically setup interrupts in the early boot process.
1088 */ 1088 */
1089 int setup_irq(unsigned int irq, struct irqaction *act) 1089 int setup_irq(unsigned int irq, struct irqaction *act)
1090 { 1090 {
1091 int retval; 1091 int retval;
1092 struct irq_desc *desc = irq_to_desc(irq); 1092 struct irq_desc *desc = irq_to_desc(irq);
1093 1093
1094 chip_bus_lock(desc); 1094 chip_bus_lock(desc);
1095 retval = __setup_irq(irq, desc, act); 1095 retval = __setup_irq(irq, desc, act);
1096 chip_bus_sync_unlock(desc); 1096 chip_bus_sync_unlock(desc);
1097 1097
1098 return retval; 1098 return retval;
1099 } 1099 }
1100 EXPORT_SYMBOL_GPL(setup_irq); 1100 EXPORT_SYMBOL_GPL(setup_irq);
1101 1101
1102 /* 1102 /*
1103 * Internal function to unregister an irqaction - used to free 1103 * Internal function to unregister an irqaction - used to free
1104 * regular and special interrupts that are part of the architecture. 1104 * regular and special interrupts that are part of the architecture.
1105 */ 1105 */
1106 static struct irqaction *__free_irq(unsigned int irq, void *dev_id) 1106 static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1107 { 1107 {
1108 struct irq_desc *desc = irq_to_desc(irq); 1108 struct irq_desc *desc = irq_to_desc(irq);
1109 struct irqaction *action, **action_ptr; 1109 struct irqaction *action, **action_ptr;
1110 unsigned long flags; 1110 unsigned long flags;
1111 1111
1112 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); 1112 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1113 1113
1114 if (!desc) 1114 if (!desc)
1115 return NULL; 1115 return NULL;
1116 1116
1117 raw_spin_lock_irqsave(&desc->lock, flags); 1117 raw_spin_lock_irqsave(&desc->lock, flags);
1118 1118
1119 /* 1119 /*
1120 * There can be multiple actions per IRQ descriptor, find the right 1120 * There can be multiple actions per IRQ descriptor, find the right
1121 * one based on the dev_id: 1121 * one based on the dev_id:
1122 */ 1122 */
1123 action_ptr = &desc->action; 1123 action_ptr = &desc->action;
1124 for (;;) { 1124 for (;;) {
1125 action = *action_ptr; 1125 action = *action_ptr;
1126 1126
1127 if (!action) { 1127 if (!action) {
1128 WARN(1, "Trying to free already-free IRQ %d\n", irq); 1128 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1129 raw_spin_unlock_irqrestore(&desc->lock, flags); 1129 raw_spin_unlock_irqrestore(&desc->lock, flags);
1130 1130
1131 return NULL; 1131 return NULL;
1132 } 1132 }
1133 1133
1134 if (action->dev_id == dev_id) 1134 if (action->dev_id == dev_id)
1135 break; 1135 break;
1136 action_ptr = &action->next; 1136 action_ptr = &action->next;
1137 } 1137 }
1138 1138
1139 /* Found it - now remove it from the list of entries: */ 1139 /* Found it - now remove it from the list of entries: */
1140 *action_ptr = action->next; 1140 *action_ptr = action->next;
1141 1141
1142 /* Currently used only by UML, might disappear one day: */ 1142 /* Currently used only by UML, might disappear one day: */
1143 #ifdef CONFIG_IRQ_RELEASE_METHOD 1143 #ifdef CONFIG_IRQ_RELEASE_METHOD
1144 if (desc->irq_data.chip->release) 1144 if (desc->irq_data.chip->release)
1145 desc->irq_data.chip->release(irq, dev_id); 1145 desc->irq_data.chip->release(irq, dev_id);
1146 #endif 1146 #endif
1147 1147
1148 /* If this was the last handler, shut down the IRQ line: */ 1148 /* If this was the last handler, shut down the IRQ line: */
1149 if (!desc->action) 1149 if (!desc->action)
1150 irq_shutdown(desc); 1150 irq_shutdown(desc);
1151 1151
1152 #ifdef CONFIG_SMP 1152 #ifdef CONFIG_SMP
1153 /* make sure affinity_hint is cleaned up */ 1153 /* make sure affinity_hint is cleaned up */
1154 if (WARN_ON_ONCE(desc->affinity_hint)) 1154 if (WARN_ON_ONCE(desc->affinity_hint))
1155 desc->affinity_hint = NULL; 1155 desc->affinity_hint = NULL;
1156 #endif 1156 #endif
1157 1157
1158 raw_spin_unlock_irqrestore(&desc->lock, flags); 1158 raw_spin_unlock_irqrestore(&desc->lock, flags);
1159 1159
1160 unregister_handler_proc(irq, action); 1160 unregister_handler_proc(irq, action);
1161 1161
1162 /* Make sure it's not being used on another CPU: */ 1162 /* Make sure it's not being used on another CPU: */
1163 synchronize_irq(irq); 1163 synchronize_irq(irq);
1164 1164
1165 #ifdef CONFIG_DEBUG_SHIRQ 1165 #ifdef CONFIG_DEBUG_SHIRQ
1166 /* 1166 /*
1167 * It's a shared IRQ -- the driver ought to be prepared for an IRQ 1167 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1168 * event to happen even now it's being freed, so let's make sure that 1168 * event to happen even now it's being freed, so let's make sure that
1169 * is so by doing an extra call to the handler .... 1169 * is so by doing an extra call to the handler ....
1170 * 1170 *
1171 * ( We do this after actually deregistering it, to make sure that a 1171 * ( We do this after actually deregistering it, to make sure that a
1172 * 'real' IRQ doesn't run in * parallel with our fake. ) 1172 * 'real' IRQ doesn't run in * parallel with our fake. )
1173 */ 1173 */
1174 if (action->flags & IRQF_SHARED) { 1174 if (action->flags & IRQF_SHARED) {
1175 local_irq_save(flags); 1175 local_irq_save(flags);
1176 action->handler(irq, dev_id); 1176 action->handler(irq, dev_id);
1177 local_irq_restore(flags); 1177 local_irq_restore(flags);
1178 } 1178 }
1179 #endif 1179 #endif
1180 1180
1181 if (action->thread) { 1181 if (action->thread) {
1182 if (!test_bit(IRQTF_DIED, &action->thread_flags)) 1182 if (!test_bit(IRQTF_DIED, &action->thread_flags))
1183 kthread_stop(action->thread); 1183 kthread_stop(action->thread);
1184 put_task_struct(action->thread); 1184 put_task_struct(action->thread);
1185 } 1185 }
1186 1186
1187 return action; 1187 return action;
1188 } 1188 }
1189 1189
1190 /** 1190 /**
1191 * remove_irq - free an interrupt 1191 * remove_irq - free an interrupt
1192 * @irq: Interrupt line to free 1192 * @irq: Interrupt line to free
1193 * @act: irqaction for the interrupt 1193 * @act: irqaction for the interrupt
1194 * 1194 *
1195 * Used to remove interrupts statically setup by the early boot process. 1195 * Used to remove interrupts statically setup by the early boot process.
1196 */ 1196 */
1197 void remove_irq(unsigned int irq, struct irqaction *act) 1197 void remove_irq(unsigned int irq, struct irqaction *act)
1198 { 1198 {
1199 __free_irq(irq, act->dev_id); 1199 __free_irq(irq, act->dev_id);
1200 } 1200 }
1201 EXPORT_SYMBOL_GPL(remove_irq); 1201 EXPORT_SYMBOL_GPL(remove_irq);
1202 1202
1203 /** 1203 /**
1204 * free_irq - free an interrupt allocated with request_irq 1204 * free_irq - free an interrupt allocated with request_irq
1205 * @irq: Interrupt line to free 1205 * @irq: Interrupt line to free
1206 * @dev_id: Device identity to free 1206 * @dev_id: Device identity to free
1207 * 1207 *
1208 * Remove an interrupt handler. The handler is removed and if the 1208 * Remove an interrupt handler. The handler is removed and if the
1209 * interrupt line is no longer in use by any driver it is disabled. 1209 * interrupt line is no longer in use by any driver it is disabled.
1210 * On a shared IRQ the caller must ensure the interrupt is disabled 1210 * On a shared IRQ the caller must ensure the interrupt is disabled
1211 * on the card it drives before calling this function. The function 1211 * on the card it drives before calling this function. The function
1212 * does not return until any executing interrupts for this IRQ 1212 * does not return until any executing interrupts for this IRQ
1213 * have completed. 1213 * have completed.
1214 * 1214 *
1215 * This function must not be called from interrupt context. 1215 * This function must not be called from interrupt context.
1216 */ 1216 */
1217 void free_irq(unsigned int irq, void *dev_id) 1217 void free_irq(unsigned int irq, void *dev_id)
1218 { 1218 {
1219 struct irq_desc *desc = irq_to_desc(irq); 1219 struct irq_desc *desc = irq_to_desc(irq);
1220 1220
1221 if (!desc) 1221 if (!desc)
1222 return; 1222 return;
1223 1223
1224 #ifdef CONFIG_SMP 1224 #ifdef CONFIG_SMP
1225 if (WARN_ON(desc->affinity_notify)) 1225 if (WARN_ON(desc->affinity_notify))
1226 desc->affinity_notify = NULL; 1226 desc->affinity_notify = NULL;
1227 #endif 1227 #endif
1228 1228
1229 chip_bus_lock(desc); 1229 chip_bus_lock(desc);
1230 kfree(__free_irq(irq, dev_id)); 1230 kfree(__free_irq(irq, dev_id));
1231 chip_bus_sync_unlock(desc); 1231 chip_bus_sync_unlock(desc);
1232 } 1232 }
1233 EXPORT_SYMBOL(free_irq); 1233 EXPORT_SYMBOL(free_irq);
1234 1234
1235 /** 1235 /**
1236 * request_threaded_irq - allocate an interrupt line 1236 * request_threaded_irq - allocate an interrupt line
1237 * @irq: Interrupt line to allocate 1237 * @irq: Interrupt line to allocate
1238 * @handler: Function to be called when the IRQ occurs. 1238 * @handler: Function to be called when the IRQ occurs.
1239 * Primary handler for threaded interrupts 1239 * Primary handler for threaded interrupts
1240 * If NULL and thread_fn != NULL the default 1240 * If NULL and thread_fn != NULL the default
1241 * primary handler is installed 1241 * primary handler is installed
1242 * @thread_fn: Function called from the irq handler thread 1242 * @thread_fn: Function called from the irq handler thread
1243 * If NULL, no irq thread is created 1243 * If NULL, no irq thread is created
1244 * @irqflags: Interrupt type flags 1244 * @irqflags: Interrupt type flags
1245 * @devname: An ascii name for the claiming device 1245 * @devname: An ascii name for the claiming device
1246 * @dev_id: A cookie passed back to the handler function 1246 * @dev_id: A cookie passed back to the handler function
1247 * 1247 *
1248 * This call allocates interrupt resources and enables the 1248 * This call allocates interrupt resources and enables the
1249 * interrupt line and IRQ handling. From the point this 1249 * interrupt line and IRQ handling. From the point this
1250 * call is made your handler function may be invoked. Since 1250 * call is made your handler function may be invoked. Since
1251 * your handler function must clear any interrupt the board 1251 * your handler function must clear any interrupt the board
1252 * raises, you must take care both to initialise your hardware 1252 * raises, you must take care both to initialise your hardware
1253 * and to set up the interrupt handler in the right order. 1253 * and to set up the interrupt handler in the right order.
1254 * 1254 *
1255 * If you want to set up a threaded irq handler for your device 1255 * If you want to set up a threaded irq handler for your device
1256 * then you need to supply @handler and @thread_fn. @handler ist 1256 * then you need to supply @handler and @thread_fn. @handler ist
1257 * still called in hard interrupt context and has to check 1257 * still called in hard interrupt context and has to check
1258 * whether the interrupt originates from the device. If yes it 1258 * whether the interrupt originates from the device. If yes it
1259 * needs to disable the interrupt on the device and return 1259 * needs to disable the interrupt on the device and return
1260 * IRQ_WAKE_THREAD which will wake up the handler thread and run 1260 * IRQ_WAKE_THREAD which will wake up the handler thread and run
1261 * @thread_fn. This split handler design is necessary to support 1261 * @thread_fn. This split handler design is necessary to support
1262 * shared interrupts. 1262 * shared interrupts.
1263 * 1263 *
1264 * Dev_id must be globally unique. Normally the address of the 1264 * Dev_id must be globally unique. Normally the address of the
1265 * device data structure is used as the cookie. Since the handler 1265 * device data structure is used as the cookie. Since the handler
1266 * receives this value it makes sense to use it. 1266 * receives this value it makes sense to use it.
1267 * 1267 *
1268 * If your interrupt is shared you must pass a non NULL dev_id 1268 * If your interrupt is shared you must pass a non NULL dev_id
1269 * as this is required when freeing the interrupt. 1269 * as this is required when freeing the interrupt.
1270 * 1270 *
1271 * Flags: 1271 * Flags:
1272 * 1272 *
1273 * IRQF_SHARED Interrupt is shared 1273 * IRQF_SHARED Interrupt is shared
1274 * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy 1274 * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy
1275 * IRQF_TRIGGER_* Specify active edge(s) or level 1275 * IRQF_TRIGGER_* Specify active edge(s) or level
1276 * 1276 *
1277 */ 1277 */
1278 int request_threaded_irq(unsigned int irq, irq_handler_t handler, 1278 int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1279 irq_handler_t thread_fn, unsigned long irqflags, 1279 irq_handler_t thread_fn, unsigned long irqflags,
1280 const char *devname, void *dev_id) 1280 const char *devname, void *dev_id)
1281 { 1281 {
1282 struct irqaction *action; 1282 struct irqaction *action;
1283 struct irq_desc *desc; 1283 struct irq_desc *desc;
1284 int retval; 1284 int retval;
1285 1285
1286 /* 1286 /*
1287 * Sanity-check: shared interrupts must pass in a real dev-ID, 1287 * Sanity-check: shared interrupts must pass in a real dev-ID,
1288 * otherwise we'll have trouble later trying to figure out 1288 * otherwise we'll have trouble later trying to figure out
1289 * which interrupt is which (messes up the interrupt freeing 1289 * which interrupt is which (messes up the interrupt freeing
1290 * logic etc). 1290 * logic etc).
1291 */ 1291 */
1292 if ((irqflags & IRQF_SHARED) && !dev_id) 1292 if ((irqflags & IRQF_SHARED) && !dev_id)
1293 return -EINVAL; 1293 return -EINVAL;
1294 1294
1295 desc = irq_to_desc(irq); 1295 desc = irq_to_desc(irq);
1296 if (!desc) 1296 if (!desc)
1297 return -EINVAL; 1297 return -EINVAL;
1298 1298
1299 if (!irq_settings_can_request(desc)) 1299 if (!irq_settings_can_request(desc))
1300 return -EINVAL; 1300 return -EINVAL;
1301 1301
1302 if (!handler) { 1302 if (!handler) {
1303 if (!thread_fn) 1303 if (!thread_fn)
1304 return -EINVAL; 1304 return -EINVAL;
1305 handler = irq_default_primary_handler; 1305 handler = irq_default_primary_handler;
1306 } 1306 }
1307 1307
1308 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); 1308 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1309 if (!action) 1309 if (!action)
1310 return -ENOMEM; 1310 return -ENOMEM;
1311 1311
1312 action->handler = handler; 1312 action->handler = handler;
1313 action->thread_fn = thread_fn; 1313 action->thread_fn = thread_fn;
1314 action->flags = irqflags; 1314 action->flags = irqflags;
1315 action->name = devname; 1315 action->name = devname;
1316 action->dev_id = dev_id; 1316 action->dev_id = dev_id;
1317 1317
1318 chip_bus_lock(desc); 1318 chip_bus_lock(desc);
1319 retval = __setup_irq(irq, desc, action); 1319 retval = __setup_irq(irq, desc, action);
1320 chip_bus_sync_unlock(desc); 1320 chip_bus_sync_unlock(desc);
1321 1321
1322 if (retval) 1322 if (retval)
1323 kfree(action); 1323 kfree(action);
1324 1324
1325 #ifdef CONFIG_DEBUG_SHIRQ_FIXME 1325 #ifdef CONFIG_DEBUG_SHIRQ_FIXME
1326 if (!retval && (irqflags & IRQF_SHARED)) { 1326 if (!retval && (irqflags & IRQF_SHARED)) {
1327 /* 1327 /*
1328 * It's a shared IRQ -- the driver ought to be prepared for it 1328 * It's a shared IRQ -- the driver ought to be prepared for it
1329 * to happen immediately, so let's make sure.... 1329 * to happen immediately, so let's make sure....
1330 * We disable the irq to make sure that a 'real' IRQ doesn't 1330 * We disable the irq to make sure that a 'real' IRQ doesn't
1331 * run in parallel with our fake. 1331 * run in parallel with our fake.
1332 */ 1332 */
1333 unsigned long flags; 1333 unsigned long flags;
1334 1334
1335 disable_irq(irq); 1335 disable_irq(irq);
1336 local_irq_save(flags); 1336 local_irq_save(flags);
1337 1337
1338 handler(irq, dev_id); 1338 handler(irq, dev_id);
1339 1339
1340 local_irq_restore(flags); 1340 local_irq_restore(flags);
1341 enable_irq(irq); 1341 enable_irq(irq);
1342 } 1342 }
1343 #endif 1343 #endif
1344 return retval; 1344 return retval;
1345 } 1345 }
1346 EXPORT_SYMBOL(request_threaded_irq); 1346 EXPORT_SYMBOL(request_threaded_irq);
1347 1347
1348 /** 1348 /**
1349 * request_any_context_irq - allocate an interrupt line 1349 * request_any_context_irq - allocate an interrupt line
1350 * @irq: Interrupt line to allocate 1350 * @irq: Interrupt line to allocate
1351 * @handler: Function to be called when the IRQ occurs. 1351 * @handler: Function to be called when the IRQ occurs.
1352 * Threaded handler for threaded interrupts. 1352 * Threaded handler for threaded interrupts.
1353 * @flags: Interrupt type flags 1353 * @flags: Interrupt type flags
1354 * @name: An ascii name for the claiming device 1354 * @name: An ascii name for the claiming device
1355 * @dev_id: A cookie passed back to the handler function 1355 * @dev_id: A cookie passed back to the handler function
1356 * 1356 *
1357 * This call allocates interrupt resources and enables the 1357 * This call allocates interrupt resources and enables the
1358 * interrupt line and IRQ handling. It selects either a 1358 * interrupt line and IRQ handling. It selects either a
1359 * hardirq or threaded handling method depending on the 1359 * hardirq or threaded handling method depending on the
1360 * context. 1360 * context.
1361 * 1361 *
1362 * On failure, it returns a negative value. On success, 1362 * On failure, it returns a negative value. On success,
1363 * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED. 1363 * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
1364 */ 1364 */
1365 int request_any_context_irq(unsigned int irq, irq_handler_t handler, 1365 int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1366 unsigned long flags, const char *name, void *dev_id) 1366 unsigned long flags, const char *name, void *dev_id)
1367 { 1367 {
1368 struct irq_desc *desc = irq_to_desc(irq); 1368 struct irq_desc *desc = irq_to_desc(irq);
1369 int ret; 1369 int ret;
1370 1370
1371 if (!desc) 1371 if (!desc)
1372 return -EINVAL; 1372 return -EINVAL;
1373 1373
1374 if (irq_settings_is_nested_thread(desc)) { 1374 if (irq_settings_is_nested_thread(desc)) {
1375 ret = request_threaded_irq(irq, NULL, handler, 1375 ret = request_threaded_irq(irq, NULL, handler,
1376 flags, name, dev_id); 1376 flags, name, dev_id);
1377 return !ret ? IRQC_IS_NESTED : ret; 1377 return !ret ? IRQC_IS_NESTED : ret;
1378 } 1378 }
1379 1379
1380 ret = request_irq(irq, handler, flags, name, dev_id); 1380 ret = request_irq(irq, handler, flags, name, dev_id);
1381 return !ret ? IRQC_IS_HARDIRQ : ret; 1381 return !ret ? IRQC_IS_HARDIRQ : ret;
1382 } 1382 }
1383 EXPORT_SYMBOL_GPL(request_any_context_irq); 1383 EXPORT_SYMBOL_GPL(request_any_context_irq);
1 /* 1 /*
2 * linux/kernel/irq/proc.c 2 * linux/kernel/irq/proc.c
3 * 3 *
4 * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar 4 * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
5 * 5 *
6 * This file contains the /proc/irq/ handling code. 6 * This file contains the /proc/irq/ handling code.
7 */ 7 */
8 8
9 #include <linux/irq.h> 9 #include <linux/irq.h>
10 #include <linux/gfp.h> 10 #include <linux/gfp.h>
11 #include <linux/proc_fs.h> 11 #include <linux/proc_fs.h>
12 #include <linux/seq_file.h> 12 #include <linux/seq_file.h>
13 #include <linux/interrupt.h> 13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h> 14 #include <linux/kernel_stat.h>
15 15
16 #include "internals.h" 16 #include "internals.h"
17 17
18 static struct proc_dir_entry *root_irq_dir; 18 static struct proc_dir_entry *root_irq_dir;
19 19
20 #ifdef CONFIG_SMP 20 #ifdef CONFIG_SMP
21 21
22 static int irq_affinity_proc_show(struct seq_file *m, void *v) 22 static int irq_affinity_proc_show(struct seq_file *m, void *v)
23 { 23 {
24 struct irq_desc *desc = irq_to_desc((long)m->private); 24 struct irq_desc *desc = irq_to_desc((long)m->private);
25 const struct cpumask *mask = desc->irq_data.affinity; 25 const struct cpumask *mask = desc->irq_data.affinity;
26 26
27 #ifdef CONFIG_GENERIC_PENDING_IRQ 27 #ifdef CONFIG_GENERIC_PENDING_IRQ
28 if (irqd_is_setaffinity_pending(&desc->irq_data)) 28 if (irqd_is_setaffinity_pending(&desc->irq_data))
29 mask = desc->pending_mask; 29 mask = desc->pending_mask;
30 #endif 30 #endif
31 seq_cpumask(m, mask); 31 seq_cpumask(m, mask);
32 seq_putc(m, '\n'); 32 seq_putc(m, '\n');
33 return 0; 33 return 0;
34 } 34 }
35 35
36 static int irq_affinity_hint_proc_show(struct seq_file *m, void *v) 36 static int irq_affinity_hint_proc_show(struct seq_file *m, void *v)
37 { 37 {
38 struct irq_desc *desc = irq_to_desc((long)m->private); 38 struct irq_desc *desc = irq_to_desc((long)m->private);
39 unsigned long flags; 39 unsigned long flags;
40 cpumask_var_t mask; 40 cpumask_var_t mask;
41 41
42 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) 42 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
43 return -ENOMEM; 43 return -ENOMEM;
44 44
45 raw_spin_lock_irqsave(&desc->lock, flags); 45 raw_spin_lock_irqsave(&desc->lock, flags);
46 if (desc->affinity_hint) 46 if (desc->affinity_hint)
47 cpumask_copy(mask, desc->affinity_hint); 47 cpumask_copy(mask, desc->affinity_hint);
48 raw_spin_unlock_irqrestore(&desc->lock, flags); 48 raw_spin_unlock_irqrestore(&desc->lock, flags);
49 49
50 seq_cpumask(m, mask); 50 seq_cpumask(m, mask);
51 seq_putc(m, '\n'); 51 seq_putc(m, '\n');
52 free_cpumask_var(mask); 52 free_cpumask_var(mask);
53 53
54 return 0; 54 return 0;
55 } 55 }
56 56
57 #ifndef is_affinity_mask_valid 57 #ifndef is_affinity_mask_valid
58 #define is_affinity_mask_valid(val) 1 58 #define is_affinity_mask_valid(val) 1
59 #endif 59 #endif
60 60
61 int no_irq_affinity; 61 int no_irq_affinity;
62 static ssize_t irq_affinity_proc_write(struct file *file, 62 static ssize_t irq_affinity_proc_write(struct file *file,
63 const char __user *buffer, size_t count, loff_t *pos) 63 const char __user *buffer, size_t count, loff_t *pos)
64 { 64 {
65 unsigned int irq = (int)(long)PDE(file->f_path.dentry->d_inode)->data; 65 unsigned int irq = (int)(long)PDE(file->f_path.dentry->d_inode)->data;
66 cpumask_var_t new_value; 66 cpumask_var_t new_value;
67 int err; 67 int err;
68 68
69 if (!irq_can_set_affinity(irq) || no_irq_affinity) 69 if (!irq_can_set_affinity(irq) || no_irq_affinity)
70 return -EIO; 70 return -EIO;
71 71
72 if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) 72 if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
73 return -ENOMEM; 73 return -ENOMEM;
74 74
75 err = cpumask_parse_user(buffer, count, new_value); 75 err = cpumask_parse_user(buffer, count, new_value);
76 if (err) 76 if (err)
77 goto free_cpumask; 77 goto free_cpumask;
78 78
79 if (!is_affinity_mask_valid(new_value)) { 79 if (!is_affinity_mask_valid(new_value)) {
80 err = -EINVAL; 80 err = -EINVAL;
81 goto free_cpumask; 81 goto free_cpumask;
82 } 82 }
83 83
84 /* 84 /*
85 * Do not allow disabling IRQs completely - it's a too easy 85 * Do not allow disabling IRQs completely - it's a too easy
86 * way to make the system unusable accidentally :-) At least 86 * way to make the system unusable accidentally :-) At least
87 * one online CPU still has to be targeted. 87 * one online CPU still has to be targeted.
88 */ 88 */
89 if (!cpumask_intersects(new_value, cpu_online_mask)) { 89 if (!cpumask_intersects(new_value, cpu_online_mask)) {
90 /* Special case for empty set - allow the architecture 90 /* Special case for empty set - allow the architecture
91 code to set default SMP affinity. */ 91 code to set default SMP affinity. */
92 err = irq_select_affinity_usr(irq, new_value) ? -EINVAL : count; 92 err = irq_select_affinity_usr(irq, new_value) ? -EINVAL : count;
93 } else { 93 } else {
94 irq_set_affinity(irq, new_value); 94 irq_set_affinity(irq, new_value);
95 err = count; 95 err = count;
96 } 96 }
97 97
98 free_cpumask: 98 free_cpumask:
99 free_cpumask_var(new_value); 99 free_cpumask_var(new_value);
100 return err; 100 return err;
101 } 101 }
102 102
103 static int irq_affinity_proc_open(struct inode *inode, struct file *file) 103 static int irq_affinity_proc_open(struct inode *inode, struct file *file)
104 { 104 {
105 return single_open(file, irq_affinity_proc_show, PDE(inode)->data); 105 return single_open(file, irq_affinity_proc_show, PDE(inode)->data);
106 } 106 }
107 107
108 static int irq_affinity_hint_proc_open(struct inode *inode, struct file *file) 108 static int irq_affinity_hint_proc_open(struct inode *inode, struct file *file)
109 { 109 {
110 return single_open(file, irq_affinity_hint_proc_show, PDE(inode)->data); 110 return single_open(file, irq_affinity_hint_proc_show, PDE(inode)->data);
111 } 111 }
112 112
113 static const struct file_operations irq_affinity_proc_fops = { 113 static const struct file_operations irq_affinity_proc_fops = {
114 .open = irq_affinity_proc_open, 114 .open = irq_affinity_proc_open,
115 .read = seq_read, 115 .read = seq_read,
116 .llseek = seq_lseek, 116 .llseek = seq_lseek,
117 .release = single_release, 117 .release = single_release,
118 .write = irq_affinity_proc_write, 118 .write = irq_affinity_proc_write,
119 }; 119 };
120 120
121 static const struct file_operations irq_affinity_hint_proc_fops = { 121 static const struct file_operations irq_affinity_hint_proc_fops = {
122 .open = irq_affinity_hint_proc_open, 122 .open = irq_affinity_hint_proc_open,
123 .read = seq_read, 123 .read = seq_read,
124 .llseek = seq_lseek, 124 .llseek = seq_lseek,
125 .release = single_release, 125 .release = single_release,
126 }; 126 };
127 127
128 static int default_affinity_show(struct seq_file *m, void *v) 128 static int default_affinity_show(struct seq_file *m, void *v)
129 { 129 {
130 seq_cpumask(m, irq_default_affinity); 130 seq_cpumask(m, irq_default_affinity);
131 seq_putc(m, '\n'); 131 seq_putc(m, '\n');
132 return 0; 132 return 0;
133 } 133 }
134 134
135 static ssize_t default_affinity_write(struct file *file, 135 static ssize_t default_affinity_write(struct file *file,
136 const char __user *buffer, size_t count, loff_t *ppos) 136 const char __user *buffer, size_t count, loff_t *ppos)
137 { 137 {
138 cpumask_var_t new_value; 138 cpumask_var_t new_value;
139 int err; 139 int err;
140 140
141 if (!alloc_cpumask_var(&new_value, GFP_KERNEL)) 141 if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
142 return -ENOMEM; 142 return -ENOMEM;
143 143
144 err = cpumask_parse_user(buffer, count, new_value); 144 err = cpumask_parse_user(buffer, count, new_value);
145 if (err) 145 if (err)
146 goto out; 146 goto out;
147 147
148 if (!is_affinity_mask_valid(new_value)) { 148 if (!is_affinity_mask_valid(new_value)) {
149 err = -EINVAL; 149 err = -EINVAL;
150 goto out; 150 goto out;
151 } 151 }
152 152
153 /* 153 /*
154 * Do not allow disabling IRQs completely - it's a too easy 154 * Do not allow disabling IRQs completely - it's a too easy
155 * way to make the system unusable accidentally :-) At least 155 * way to make the system unusable accidentally :-) At least
156 * one online CPU still has to be targeted. 156 * one online CPU still has to be targeted.
157 */ 157 */
158 if (!cpumask_intersects(new_value, cpu_online_mask)) { 158 if (!cpumask_intersects(new_value, cpu_online_mask)) {
159 err = -EINVAL; 159 err = -EINVAL;
160 goto out; 160 goto out;
161 } 161 }
162 162
163 cpumask_copy(irq_default_affinity, new_value); 163 cpumask_copy(irq_default_affinity, new_value);
164 err = count; 164 err = count;
165 165
166 out: 166 out:
167 free_cpumask_var(new_value); 167 free_cpumask_var(new_value);
168 return err; 168 return err;
169 } 169 }
170 170
171 static int default_affinity_open(struct inode *inode, struct file *file) 171 static int default_affinity_open(struct inode *inode, struct file *file)
172 { 172 {
173 return single_open(file, default_affinity_show, PDE(inode)->data); 173 return single_open(file, default_affinity_show, PDE(inode)->data);
174 } 174 }
175 175
176 static const struct file_operations default_affinity_proc_fops = { 176 static const struct file_operations default_affinity_proc_fops = {
177 .open = default_affinity_open, 177 .open = default_affinity_open,
178 .read = seq_read, 178 .read = seq_read,
179 .llseek = seq_lseek, 179 .llseek = seq_lseek,
180 .release = single_release, 180 .release = single_release,
181 .write = default_affinity_write, 181 .write = default_affinity_write,
182 }; 182 };
183 183
184 static int irq_node_proc_show(struct seq_file *m, void *v) 184 static int irq_node_proc_show(struct seq_file *m, void *v)
185 { 185 {
186 struct irq_desc *desc = irq_to_desc((long) m->private); 186 struct irq_desc *desc = irq_to_desc((long) m->private);
187 187
188 seq_printf(m, "%d\n", desc->irq_data.node); 188 seq_printf(m, "%d\n", desc->irq_data.node);
189 return 0; 189 return 0;
190 } 190 }
191 191
192 static int irq_node_proc_open(struct inode *inode, struct file *file) 192 static int irq_node_proc_open(struct inode *inode, struct file *file)
193 { 193 {
194 return single_open(file, irq_node_proc_show, PDE(inode)->data); 194 return single_open(file, irq_node_proc_show, PDE(inode)->data);
195 } 195 }
196 196
197 static const struct file_operations irq_node_proc_fops = { 197 static const struct file_operations irq_node_proc_fops = {
198 .open = irq_node_proc_open, 198 .open = irq_node_proc_open,
199 .read = seq_read, 199 .read = seq_read,
200 .llseek = seq_lseek, 200 .llseek = seq_lseek,
201 .release = single_release, 201 .release = single_release,
202 }; 202 };
203 #endif 203 #endif
204 204
205 static int irq_spurious_proc_show(struct seq_file *m, void *v) 205 static int irq_spurious_proc_show(struct seq_file *m, void *v)
206 { 206 {
207 struct irq_desc *desc = irq_to_desc((long) m->private); 207 struct irq_desc *desc = irq_to_desc((long) m->private);
208 208
209 seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n", 209 seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n",
210 desc->irq_count, desc->irqs_unhandled, 210 desc->irq_count, desc->irqs_unhandled,
211 jiffies_to_msecs(desc->last_unhandled)); 211 jiffies_to_msecs(desc->last_unhandled));
212 return 0; 212 return 0;
213 } 213 }
214 214
215 static int irq_spurious_proc_open(struct inode *inode, struct file *file) 215 static int irq_spurious_proc_open(struct inode *inode, struct file *file)
216 { 216 {
217 return single_open(file, irq_spurious_proc_show, PDE(inode)->data); 217 return single_open(file, irq_spurious_proc_show, PDE(inode)->data);
218 } 218 }
219 219
220 static const struct file_operations irq_spurious_proc_fops = { 220 static const struct file_operations irq_spurious_proc_fops = {
221 .open = irq_spurious_proc_open, 221 .open = irq_spurious_proc_open,
222 .read = seq_read, 222 .read = seq_read,
223 .llseek = seq_lseek, 223 .llseek = seq_lseek,
224 .release = single_release, 224 .release = single_release,
225 }; 225 };
226 226
227 #define MAX_NAMELEN 128 227 #define MAX_NAMELEN 128
228 228
229 static int name_unique(unsigned int irq, struct irqaction *new_action) 229 static int name_unique(unsigned int irq, struct irqaction *new_action)
230 { 230 {
231 struct irq_desc *desc = irq_to_desc(irq); 231 struct irq_desc *desc = irq_to_desc(irq);
232 struct irqaction *action; 232 struct irqaction *action;
233 unsigned long flags; 233 unsigned long flags;
234 int ret = 1; 234 int ret = 1;
235 235
236 raw_spin_lock_irqsave(&desc->lock, flags); 236 raw_spin_lock_irqsave(&desc->lock, flags);
237 for (action = desc->action ; action; action = action->next) { 237 for (action = desc->action ; action; action = action->next) {
238 if ((action != new_action) && action->name && 238 if ((action != new_action) && action->name &&
239 !strcmp(new_action->name, action->name)) { 239 !strcmp(new_action->name, action->name)) {
240 ret = 0; 240 ret = 0;
241 break; 241 break;
242 } 242 }
243 } 243 }
244 raw_spin_unlock_irqrestore(&desc->lock, flags); 244 raw_spin_unlock_irqrestore(&desc->lock, flags);
245 return ret; 245 return ret;
246 } 246 }
247 247
248 void register_handler_proc(unsigned int irq, struct irqaction *action) 248 void register_handler_proc(unsigned int irq, struct irqaction *action)
249 { 249 {
250 char name [MAX_NAMELEN]; 250 char name [MAX_NAMELEN];
251 struct irq_desc *desc = irq_to_desc(irq); 251 struct irq_desc *desc = irq_to_desc(irq);
252 252
253 if (!desc->dir || action->dir || !action->name || 253 if (!desc->dir || action->dir || !action->name ||
254 !name_unique(irq, action)) 254 !name_unique(irq, action))
255 return; 255 return;
256 256
257 memset(name, 0, MAX_NAMELEN); 257 memset(name, 0, MAX_NAMELEN);
258 snprintf(name, MAX_NAMELEN, "%s", action->name); 258 snprintf(name, MAX_NAMELEN, "%s", action->name);
259 259
260 /* create /proc/irq/1234/handler/ */ 260 /* create /proc/irq/1234/handler/ */
261 action->dir = proc_mkdir(name, desc->dir); 261 action->dir = proc_mkdir(name, desc->dir);
262 } 262 }
263 263
264 #undef MAX_NAMELEN 264 #undef MAX_NAMELEN
265 265
266 #define MAX_NAMELEN 10 266 #define MAX_NAMELEN 10
267 267
268 void register_irq_proc(unsigned int irq, struct irq_desc *desc) 268 void register_irq_proc(unsigned int irq, struct irq_desc *desc)
269 { 269 {
270 char name [MAX_NAMELEN]; 270 char name [MAX_NAMELEN];
271 271
272 if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir) 272 if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip) || desc->dir)
273 return; 273 return;
274 274
275 memset(name, 0, MAX_NAMELEN); 275 memset(name, 0, MAX_NAMELEN);
276 sprintf(name, "%d", irq); 276 sprintf(name, "%d", irq);
277 277
278 /* create /proc/irq/1234 */ 278 /* create /proc/irq/1234 */
279 desc->dir = proc_mkdir(name, root_irq_dir); 279 desc->dir = proc_mkdir(name, root_irq_dir);
280 if (!desc->dir) 280 if (!desc->dir)
281 return; 281 return;
282 282
283 #ifdef CONFIG_SMP 283 #ifdef CONFIG_SMP
284 /* create /proc/irq/<irq>/smp_affinity */ 284 /* create /proc/irq/<irq>/smp_affinity */
285 proc_create_data("smp_affinity", 0600, desc->dir, 285 proc_create_data("smp_affinity", 0600, desc->dir,
286 &irq_affinity_proc_fops, (void *)(long)irq); 286 &irq_affinity_proc_fops, (void *)(long)irq);
287 287
288 /* create /proc/irq/<irq>/affinity_hint */ 288 /* create /proc/irq/<irq>/affinity_hint */
289 proc_create_data("affinity_hint", 0400, desc->dir, 289 proc_create_data("affinity_hint", 0400, desc->dir,
290 &irq_affinity_hint_proc_fops, (void *)(long)irq); 290 &irq_affinity_hint_proc_fops, (void *)(long)irq);
291 291
292 proc_create_data("node", 0444, desc->dir, 292 proc_create_data("node", 0444, desc->dir,
293 &irq_node_proc_fops, (void *)(long)irq); 293 &irq_node_proc_fops, (void *)(long)irq);
294 #endif 294 #endif
295 295
296 proc_create_data("spurious", 0444, desc->dir, 296 proc_create_data("spurious", 0444, desc->dir,
297 &irq_spurious_proc_fops, (void *)(long)irq); 297 &irq_spurious_proc_fops, (void *)(long)irq);
298 } 298 }
299 299
300 void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) 300 void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
301 { 301 {
302 char name [MAX_NAMELEN]; 302 char name [MAX_NAMELEN];
303 303
304 if (!root_irq_dir || !desc->dir) 304 if (!root_irq_dir || !desc->dir)
305 return; 305 return;
306 #ifdef CONFIG_SMP 306 #ifdef CONFIG_SMP
307 remove_proc_entry("smp_affinity", desc->dir); 307 remove_proc_entry("smp_affinity", desc->dir);
308 remove_proc_entry("affinity_hint", desc->dir); 308 remove_proc_entry("affinity_hint", desc->dir);
309 remove_proc_entry("node", desc->dir); 309 remove_proc_entry("node", desc->dir);
310 #endif 310 #endif
311 remove_proc_entry("spurious", desc->dir); 311 remove_proc_entry("spurious", desc->dir);
312 312
313 memset(name, 0, MAX_NAMELEN); 313 memset(name, 0, MAX_NAMELEN);
314 sprintf(name, "%u", irq); 314 sprintf(name, "%u", irq);
315 remove_proc_entry(name, root_irq_dir); 315 remove_proc_entry(name, root_irq_dir);
316 } 316 }
317 317
318 #undef MAX_NAMELEN 318 #undef MAX_NAMELEN
319 319
320 void unregister_handler_proc(unsigned int irq, struct irqaction *action) 320 void unregister_handler_proc(unsigned int irq, struct irqaction *action)
321 { 321 {
322 if (action->dir) { 322 if (action->dir) {
323 struct irq_desc *desc = irq_to_desc(irq); 323 struct irq_desc *desc = irq_to_desc(irq);
324 324
325 remove_proc_entry(action->dir->name, desc->dir); 325 remove_proc_entry(action->dir->name, desc->dir);
326 } 326 }
327 } 327 }
328 328
329 static void register_default_affinity_proc(void) 329 static void register_default_affinity_proc(void)
330 { 330 {
331 #ifdef CONFIG_SMP 331 #ifdef CONFIG_SMP
332 proc_create("irq/default_smp_affinity", 0600, NULL, 332 proc_create("irq/default_smp_affinity", 0600, NULL,
333 &default_affinity_proc_fops); 333 &default_affinity_proc_fops);
334 #endif 334 #endif
335 } 335 }
336 336
337 void init_irq_proc(void) 337 void init_irq_proc(void)
338 { 338 {
339 unsigned int irq; 339 unsigned int irq;
340 struct irq_desc *desc; 340 struct irq_desc *desc;
341 341
342 /* create /proc/irq */ 342 /* create /proc/irq */
343 root_irq_dir = proc_mkdir("irq", NULL); 343 root_irq_dir = proc_mkdir("irq", NULL);
344 if (!root_irq_dir) 344 if (!root_irq_dir)
345 return; 345 return;
346 346
347 register_default_affinity_proc(); 347 register_default_affinity_proc();
348 348
349 /* 349 /*
350 * Create entries for all existing IRQs. 350 * Create entries for all existing IRQs.
351 */ 351 */
352 for_each_irq_desc(irq, desc) { 352 for_each_irq_desc(irq, desc) {
353 if (!desc) 353 if (!desc)
354 continue; 354 continue;
355 355
356 register_irq_proc(irq, desc); 356 register_irq_proc(irq, desc);
357 } 357 }
358 } 358 }
359 359
360 #ifdef CONFIG_GENERIC_IRQ_SHOW 360 #ifdef CONFIG_GENERIC_IRQ_SHOW
361 361
362 int __weak arch_show_interrupts(struct seq_file *p, int prec) 362 int __weak arch_show_interrupts(struct seq_file *p, int prec)
363 { 363 {
364 return 0; 364 return 0;
365 } 365 }
366 366
367 int show_interrupts(struct seq_file *p, void *v) 367 int show_interrupts(struct seq_file *p, void *v)
368 { 368 {
369 static int prec; 369 static int prec;
370 370
371 unsigned long flags, any_count = 0; 371 unsigned long flags, any_count = 0;
372 int i = *(loff_t *) v, j; 372 int i = *(loff_t *) v, j;
373 struct irqaction *action; 373 struct irqaction *action;
374 struct irq_desc *desc; 374 struct irq_desc *desc;
375 375
376 if (i > nr_irqs) 376 if (i > nr_irqs)
377 return 0; 377 return 0;
378 378
379 if (i == nr_irqs) 379 if (i == nr_irqs)
380 return arch_show_interrupts(p, prec); 380 return arch_show_interrupts(p, prec);
381 381
382 /* print header and calculate the width of the first column */ 382 /* print header and calculate the width of the first column */
383 if (i == 0) { 383 if (i == 0) {
384 for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec) 384 for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
385 j *= 10; 385 j *= 10;
386 386
387 seq_printf(p, "%*s", prec + 8, ""); 387 seq_printf(p, "%*s", prec + 8, "");
388 for_each_online_cpu(j) 388 for_each_online_cpu(j)
389 seq_printf(p, "CPU%-8d", j); 389 seq_printf(p, "CPU%-8d", j);
390 seq_putc(p, '\n'); 390 seq_putc(p, '\n');
391 } 391 }
392 392
393 desc = irq_to_desc(i); 393 desc = irq_to_desc(i);
394 if (!desc) 394 if (!desc)
395 return 0; 395 return 0;
396 396
397 raw_spin_lock_irqsave(&desc->lock, flags); 397 raw_spin_lock_irqsave(&desc->lock, flags);
398 for_each_online_cpu(j) 398 for_each_online_cpu(j)
399 any_count |= kstat_irqs_cpu(i, j); 399 any_count |= kstat_irqs_cpu(i, j);
400 action = desc->action; 400 action = desc->action;
401 if (!action && !any_count) 401 if (!action && !any_count)
402 goto out; 402 goto out;
403 403
404 seq_printf(p, "%*d: ", prec, i); 404 seq_printf(p, "%*d: ", prec, i);
405 for_each_online_cpu(j) 405 for_each_online_cpu(j)
406 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); 406 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
407 seq_printf(p, " %8s", desc->irq_data.chip->name); 407 seq_printf(p, " %8s", desc->irq_data.chip->name);
408 seq_printf(p, "-%-8s", desc->name); 408 if (desc->name)
409 seq_printf(p, "-%-8s", desc->name);
409 410
410 if (action) { 411 if (action) {
411 seq_printf(p, " %s", action->name); 412 seq_printf(p, " %s", action->name);
412 while ((action = action->next) != NULL) 413 while ((action = action->next) != NULL)
413 seq_printf(p, ", %s", action->name); 414 seq_printf(p, ", %s", action->name);
414 } 415 }
415 416
416 seq_putc(p, '\n'); 417 seq_putc(p, '\n');
417 out: 418 out:
418 raw_spin_unlock_irqrestore(&desc->lock, flags); 419 raw_spin_unlock_irqrestore(&desc->lock, flags);
419 return 0; 420 return 0;
420 } 421 }
421 #endif 422 #endif
422 423