Blame view
kernel/irq/manage.c
72.5 KB
52a65ff56
|
1 |
// SPDX-License-Identifier: GPL-2.0 |
1da177e4c
|
2 |
/* |
a34db9b28
|
3 4 |
* Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar * Copyright (C) 2005-2006 Thomas Gleixner |
1da177e4c
|
5 6 7 |
* * This file contains driver APIs to the irq subsystem. */ |
97fd75b7b
|
8 |
#define pr_fmt(fmt) "genirq: " fmt |
1da177e4c
|
9 |
#include <linux/irq.h> |
3aa551c9b
|
10 |
#include <linux/kthread.h> |
1da177e4c
|
11 12 13 |
#include <linux/module.h> #include <linux/random.h> #include <linux/interrupt.h> |
4001d8e87
|
14 |
#include <linux/irqdomain.h> |
1aeb272cf
|
15 |
#include <linux/slab.h> |
3aa551c9b
|
16 |
#include <linux/sched.h> |
8bd75c77b
|
17 |
#include <linux/sched/rt.h> |
0881e7bd3
|
18 |
#include <linux/sched/task.h> |
11ea68f55
|
19 |
#include <linux/sched/isolation.h> |
ae7e81c07
|
20 |
#include <uapi/linux/sched/types.h> |
4d1d61a6b
|
21 |
#include <linux/task_work.h> |
1da177e4c
|
22 23 |
#include "internals.h" |
b6a32bbd8
|
24 |
#if defined(CONFIG_IRQ_FORCED_THREADING) && !defined(CONFIG_PREEMPT_RT) |
8d32a307e
|
25 |
__read_mostly bool force_irqthreads; |
47b82e881
|
26 |
EXPORT_SYMBOL_GPL(force_irqthreads); |
8d32a307e
|
27 28 29 30 31 32 33 34 |
static int __init setup_forced_irqthreads(char *arg) { force_irqthreads = true; return 0; } early_param("threadirqs", setup_forced_irqthreads); #endif |
62e046865
|
35 |
static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip) |
1da177e4c
|
36 |
{ |
62e046865
|
37 |
struct irq_data *irqd = irq_desc_get_irq_data(desc); |
32f4125eb
|
38 |
bool inprogress; |
1da177e4c
|
39 |
|
a98ce5c6f
|
40 41 42 43 44 45 46 |
do { unsigned long flags; /* * Wait until we're out of the critical section. This might * give the wrong answer due to the lack of memory barriers. */ |
32f4125eb
|
47 |
while (irqd_irq_inprogress(&desc->irq_data)) |
a98ce5c6f
|
48 49 50 |
cpu_relax(); /* Ok, that indicated we're done: double-check carefully. */ |
239007b84
|
51 |
raw_spin_lock_irqsave(&desc->lock, flags); |
32f4125eb
|
52 |
inprogress = irqd_irq_inprogress(&desc->irq_data); |
62e046865
|
53 54 55 56 57 58 59 60 61 62 63 64 65 66 |
/* * If requested and supported, check at the chip whether it * is in flight at the hardware level, i.e. already pending * in a CPU and waiting for service and acknowledge. */ if (!inprogress && sync_chip) { /* * Ignore the return code. inprogress is only updated * when the chip supports it. */ __irq_get_irqchip_state(irqd, IRQCHIP_STATE_ACTIVE, &inprogress); } |
239007b84
|
67 |
raw_spin_unlock_irqrestore(&desc->lock, flags); |
a98ce5c6f
|
68 69 |
/* Oops, that failed? */ |
32f4125eb
|
70 |
} while (inprogress); |
18258f723
|
71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 |
} /** * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs) * @irq: interrupt number to wait for * * This function waits for any pending hard IRQ handlers for this * interrupt to complete before returning. If you use this * function while holding a resource the IRQ handler may need you * will deadlock. It does not take associated threaded handlers * into account. * * Do not use this for shutdown scenarios where you must be sure * that all parts (hardirq and threaded handler) have completed. * |
02cea3958
|
86 87 |
* Returns: false if a threaded handler is active. * |
18258f723
|
88 |
* This function may be called - with care - from IRQ context. |
62e046865
|
89 90 91 92 93 |
* * It does not check whether there is an interrupt in flight at the * hardware level, but not serviced yet, as this might deadlock when * called with interrupts disabled and the target CPU of the interrupt * is the current CPU. |
18258f723
|
94 |
*/ |
02cea3958
|
95 |
bool synchronize_hardirq(unsigned int irq) |
18258f723
|
96 97 |
{ struct irq_desc *desc = irq_to_desc(irq); |
3aa551c9b
|
98 |
|
02cea3958
|
99 |
if (desc) { |
62e046865
|
100 |
__synchronize_hardirq(desc, false); |
02cea3958
|
101 102 103 104 |
return !atomic_read(&desc->threads_active); } return true; |
18258f723
|
105 106 107 108 109 110 111 112 113 114 115 |
} EXPORT_SYMBOL(synchronize_hardirq); /** * synchronize_irq - wait for pending IRQ handlers (on other CPUs) * @irq: interrupt number to wait for * * This function waits for any pending IRQ handlers for this interrupt * to complete before returning. If you use this function while * holding a resource the IRQ handler may need you will deadlock. * |
1d21f2af8
|
116 117 |
* Can only be called from preemptible code as it might sleep when * an interrupt thread is associated to @irq. |
62e046865
|
118 119 120 121 |
* * It optionally makes sure (when the irq chip supports that method) * that the interrupt is not pending in any CPU and waiting for * service. |
18258f723
|
122 123 124 125 126 127 |
*/ void synchronize_irq(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); if (desc) { |
62e046865
|
128 |
__synchronize_hardirq(desc, true); |
18258f723
|
129 130 131 132 133 134 135 136 |
/* * We made sure that no hardirq handler is * running. Now verify that no threaded handlers are * active. */ wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active)); } |
1da177e4c
|
137 |
} |
1da177e4c
|
138 |
EXPORT_SYMBOL(synchronize_irq); |
3aa551c9b
|
139 140 |
#ifdef CONFIG_SMP cpumask_var_t irq_default_affinity; |
9c2555835
|
141 |
static bool __irq_can_set_affinity(struct irq_desc *desc) |
e019c249a
|
142 143 144 |
{ if (!desc || !irqd_can_balance(&desc->irq_data) || !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity) |
9c2555835
|
145 146 |
return false; return true; |
e019c249a
|
147 |
} |
771ee3b04
|
148 149 150 151 152 153 154 |
/** * irq_can_set_affinity - Check if the affinity of a given irq can be set * @irq: Interrupt to check * */ int irq_can_set_affinity(unsigned int irq) { |
e019c249a
|
155 |
return __irq_can_set_affinity(irq_to_desc(irq)); |
771ee3b04
|
156 |
} |
591d2fb02
|
157 |
/** |
9c2555835
|
158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 |
* irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space * @irq: Interrupt to check * * Like irq_can_set_affinity() above, but additionally checks for the * AFFINITY_MANAGED flag. */ bool irq_can_set_affinity_usr(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); return __irq_can_set_affinity(desc) && !irqd_affinity_is_managed(&desc->irq_data); } /** |
591d2fb02
|
173 174 175 176 177 178 179 180 181 |
* irq_set_thread_affinity - Notify irq threads to adjust affinity * @desc: irq descriptor which has affitnity changed * * We just set IRQTF_AFFINITY and delegate the affinity setting * to the interrupt thread itself. We can not call * set_cpus_allowed_ptr() here as we hold desc->lock and this * code can be called from hard interrupt context. */ void irq_set_thread_affinity(struct irq_desc *desc) |
3aa551c9b
|
182 |
{ |
f944b5a7a
|
183 |
struct irqaction *action; |
3aa551c9b
|
184 |
|
f944b5a7a
|
185 |
for_each_action_of_desc(desc, action) |
3aa551c9b
|
186 |
if (action->thread) |
591d2fb02
|
187 |
set_bit(IRQTF_AFFINITY, &action->thread_flags); |
3aa551c9b
|
188 |
} |
baedb87d1
|
189 |
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK |
19e1d4e94
|
190 191 |
static void irq_validate_effective_affinity(struct irq_data *data) { |
19e1d4e94
|
192 193 194 195 196 197 198 199 |
const struct cpumask *m = irq_data_get_effective_affinity_mask(data); struct irq_chip *chip = irq_data_get_irq_chip(data); if (!cpumask_empty(m)) return; pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u ", chip->name, data->irq); |
19e1d4e94
|
200 |
} |
baedb87d1
|
201 202 203 204 205 206 207 208 209 210 |
static inline void irq_init_effective_affinity(struct irq_data *data, const struct cpumask *mask) { cpumask_copy(irq_data_get_effective_affinity_mask(data), mask); } #else static inline void irq_validate_effective_affinity(struct irq_data *data) { } static inline void irq_init_effective_affinity(struct irq_data *data, const struct cpumask *mask) { } #endif |
818b0f3bf
|
211 212 213 214 215 216 |
int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) { struct irq_desc *desc = irq_data_to_desc(data); struct irq_chip *chip = irq_data_get_irq_chip(data); int ret; |
e43b3b585
|
217 218 |
if (!chip || !chip->irq_set_affinity) return -EINVAL; |
11ea68f55
|
219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 |
/* * If this is a managed interrupt and housekeeping is enabled on * it check whether the requested affinity mask intersects with * a housekeeping CPU. If so, then remove the isolated CPUs from * the mask and just keep the housekeeping CPU(s). This prevents * the affinity setter from routing the interrupt to an isolated * CPU to avoid that I/O submitted from a housekeeping CPU causes * interrupts on an isolated one. * * If the masks do not intersect or include online CPU(s) then * keep the requested mask. The isolated target CPUs are only * receiving interrupts when the I/O operation was submitted * directly from them. * * If all housekeeping CPUs in the affinity mask are offline, the * interrupt will be migrated by the CPU hotplug code once a * housekeeping CPU which belongs to the affinity mask comes * online. */ if (irqd_affinity_is_managed(data) && housekeeping_enabled(HK_FLAG_MANAGED_IRQ)) { const struct cpumask *hk_mask, *prog_mask; static DEFINE_RAW_SPINLOCK(tmp_mask_lock); static struct cpumask tmp_mask; hk_mask = housekeeping_cpumask(HK_FLAG_MANAGED_IRQ); raw_spin_lock(&tmp_mask_lock); cpumask_and(&tmp_mask, mask, hk_mask); if (!cpumask_intersects(&tmp_mask, cpu_online_mask)) prog_mask = mask; else prog_mask = &tmp_mask; ret = chip->irq_set_affinity(data, prog_mask, force); raw_spin_unlock(&tmp_mask_lock); } else { ret = chip->irq_set_affinity(data, mask, force); } |
818b0f3bf
|
258 259 |
switch (ret) { case IRQ_SET_MASK_OK: |
2cb625478
|
260 |
case IRQ_SET_MASK_OK_DONE: |
9df872faa
|
261 |
cpumask_copy(desc->irq_common_data.affinity, mask); |
df561f668
|
262 |
fallthrough; |
818b0f3bf
|
263 |
case IRQ_SET_MASK_OK_NOCOPY: |
19e1d4e94
|
264 |
irq_validate_effective_affinity(data); |
818b0f3bf
|
265 266 267 268 269 270 |
irq_set_thread_affinity(desc); ret = 0; } return ret; } |
12f47073a
|
271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 |
#ifdef CONFIG_GENERIC_PENDING_IRQ static inline int irq_set_affinity_pending(struct irq_data *data, const struct cpumask *dest) { struct irq_desc *desc = irq_data_to_desc(data); irqd_set_move_pending(data); irq_copy_pending(desc, dest); return 0; } #else static inline int irq_set_affinity_pending(struct irq_data *data, const struct cpumask *dest) { return -EBUSY; } #endif static int irq_try_set_affinity(struct irq_data *data, const struct cpumask *dest, bool force) { int ret = irq_do_set_affinity(data, dest, force); /* * In case that the underlying vector management is busy and the * architecture supports the generic pending mechanism then utilize * this to avoid returning an error to user space. */ if (ret == -EBUSY && !force) ret = irq_set_affinity_pending(data, dest); return ret; } |
baedb87d1
|
303 304 305 306 307 308 |
static bool irq_set_affinity_deactivated(struct irq_data *data, const struct cpumask *mask, bool force) { struct irq_desc *desc = irq_data_to_desc(data); /* |
f0c7baca1
|
309 310 311 |
* Handle irq chips which can handle affinity only in activated * state correctly * |
baedb87d1
|
312 313 314 315 316 |
* If the interrupt is not yet activated, just store the affinity * mask and do not call the chip driver at all. On activation the * driver has to make sure anyway that the interrupt is in a * useable state so startup works. */ |
f0c7baca1
|
317 318 |
if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) || irqd_is_activated(data) || !irqd_affinity_on_activate(data)) |
baedb87d1
|
319 320 321 322 323 324 325 |
return false; cpumask_copy(desc->irq_common_data.affinity, mask); irq_init_effective_affinity(data, mask); irqd_set(data, IRQD_AFFINITY_SET); return true; } |
01f8fa4f0
|
326 327 |
int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, bool force) |
771ee3b04
|
328 |
{ |
c2d0c555c
|
329 330 |
struct irq_chip *chip = irq_data_get_irq_chip(data); struct irq_desc *desc = irq_data_to_desc(data); |
1fa46f1f0
|
331 |
int ret = 0; |
771ee3b04
|
332 |
|
c2d0c555c
|
333 |
if (!chip || !chip->irq_set_affinity) |
771ee3b04
|
334 |
return -EINVAL; |
baedb87d1
|
335 336 |
if (irq_set_affinity_deactivated(data, mask, force)) return 0; |
12f47073a
|
337 338 |
if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) { ret = irq_try_set_affinity(data, mask, force); |
1fa46f1f0
|
339 |
} else { |
c2d0c555c
|
340 |
irqd_set_move_pending(data); |
1fa46f1f0
|
341 |
irq_copy_pending(desc, mask); |
57b150cce
|
342 |
} |
1fa46f1f0
|
343 |
|
cd7eab44e
|
344 345 |
if (desc->affinity_notify) { kref_get(&desc->affinity_notify->kref); |
df81dfcfd
|
346 347 348 349 350 |
if (!schedule_work(&desc->affinity_notify->work)) { /* Work was already scheduled, drop our extra ref */ kref_put(&desc->affinity_notify->kref, desc->affinity_notify->release); } |
cd7eab44e
|
351 |
} |
c2d0c555c
|
352 353 354 355 |
irqd_set(data, IRQD_AFFINITY_SET); return ret; } |
01f8fa4f0
|
356 |
int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force) |
c2d0c555c
|
357 358 359 360 361 362 363 364 365 |
{ struct irq_desc *desc = irq_to_desc(irq); unsigned long flags; int ret; if (!desc) return -EINVAL; raw_spin_lock_irqsave(&desc->lock, flags); |
01f8fa4f0
|
366 |
ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force); |
239007b84
|
367 |
raw_spin_unlock_irqrestore(&desc->lock, flags); |
1fa46f1f0
|
368 |
return ret; |
771ee3b04
|
369 |
} |
e7a297b0d
|
370 371 |
int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) { |
e7a297b0d
|
372 |
unsigned long flags; |
31d9d9b6d
|
373 |
struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
e7a297b0d
|
374 375 376 |
if (!desc) return -EINVAL; |
e7a297b0d
|
377 |
desc->affinity_hint = m; |
02725e747
|
378 |
irq_put_desc_unlock(desc, flags); |
e2e64a932
|
379 |
/* set the initial affinity to prevent every interrupt being on CPU0 */ |
4fe7ffb7e
|
380 381 |
if (m) __irq_set_affinity(irq, m, false); |
e7a297b0d
|
382 383 384 |
return 0; } EXPORT_SYMBOL_GPL(irq_set_affinity_hint); |
cd7eab44e
|
385 386 387 388 389 390 391 |
static void irq_affinity_notify(struct work_struct *work) { struct irq_affinity_notify *notify = container_of(work, struct irq_affinity_notify, work); struct irq_desc *desc = irq_to_desc(notify->irq); cpumask_var_t cpumask; unsigned long flags; |
1fa46f1f0
|
392 |
if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL)) |
cd7eab44e
|
393 394 395 |
goto out; raw_spin_lock_irqsave(&desc->lock, flags); |
0ef5ca1e1
|
396 |
if (irq_move_pending(&desc->irq_data)) |
1fa46f1f0
|
397 |
irq_get_pending(cpumask, desc); |
cd7eab44e
|
398 |
else |
9df872faa
|
399 |
cpumask_copy(cpumask, desc->irq_common_data.affinity); |
cd7eab44e
|
400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 |
raw_spin_unlock_irqrestore(&desc->lock, flags); notify->notify(notify, cpumask); free_cpumask_var(cpumask); out: kref_put(¬ify->kref, notify->release); } /** * irq_set_affinity_notifier - control notification of IRQ affinity changes * @irq: Interrupt for which to enable/disable notification * @notify: Context for notification, or %NULL to disable * notification. Function pointers must be initialised; * the other fields will be initialised by this function. * * Must be called in process context. Notification may only be enabled * after the IRQ is allocated and must be disabled before the IRQ is * freed using free_irq(). */ int irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) { struct irq_desc *desc = irq_to_desc(irq); struct irq_affinity_notify *old_notify; unsigned long flags; /* The release function is promised process context */ might_sleep(); |
b525903c2
|
429 |
if (!desc || desc->istate & IRQS_NMI) |
cd7eab44e
|
430 431 432 433 434 435 436 437 438 439 440 441 442 |
return -EINVAL; /* Complete initialisation of *notify */ if (notify) { notify->irq = irq; kref_init(¬ify->kref); INIT_WORK(¬ify->work, irq_affinity_notify); } raw_spin_lock_irqsave(&desc->lock, flags); old_notify = desc->affinity_notify; desc->affinity_notify = notify; raw_spin_unlock_irqrestore(&desc->lock, flags); |
59c39840f
|
443 |
if (old_notify) { |
df81dfcfd
|
444 445 446 447 |
if (cancel_work_sync(&old_notify->work)) { /* Pending work had a ref, put that one too */ kref_put(&old_notify->kref, old_notify->release); } |
cd7eab44e
|
448 |
kref_put(&old_notify->kref, old_notify->release); |
59c39840f
|
449 |
} |
cd7eab44e
|
450 451 452 453 |
return 0; } EXPORT_SYMBOL_GPL(irq_set_affinity_notifier); |
184047567
|
454 455 456 457 |
#ifndef CONFIG_AUTO_IRQ_AFFINITY /* * Generic version of the affinity autoselector. */ |
43564bd97
|
458 |
int irq_setup_affinity(struct irq_desc *desc) |
184047567
|
459 |
{ |
569bda8df
|
460 |
struct cpumask *set = irq_default_affinity; |
cba4235e6
|
461 462 463 |
int ret, node = irq_desc_get_node(desc); static DEFINE_RAW_SPINLOCK(mask_lock); static struct cpumask mask; |
569bda8df
|
464 |
|
b008207cb
|
465 |
/* Excludes PER_CPU and NO_BALANCE interrupts */ |
e019c249a
|
466 |
if (!__irq_can_set_affinity(desc)) |
184047567
|
467 |
return 0; |
cba4235e6
|
468 |
raw_spin_lock(&mask_lock); |
f6d87f4bd
|
469 |
/* |
9332ef9db
|
470 |
* Preserve the managed affinity setting and a userspace affinity |
06ee6d571
|
471 |
* setup, but make sure that one of the targets is online. |
f6d87f4bd
|
472 |
*/ |
06ee6d571
|
473 474 |
if (irqd_affinity_is_managed(&desc->irq_data) || irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { |
9df872faa
|
475 |
if (cpumask_intersects(desc->irq_common_data.affinity, |
569bda8df
|
476 |
cpu_online_mask)) |
9df872faa
|
477 |
set = desc->irq_common_data.affinity; |
0c6f8a8b9
|
478 |
else |
2bdd10558
|
479 |
irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); |
f6d87f4bd
|
480 |
} |
184047567
|
481 |
|
cba4235e6
|
482 |
cpumask_and(&mask, cpu_online_mask, set); |
bddda606e
|
483 484 |
if (cpumask_empty(&mask)) cpumask_copy(&mask, cpu_online_mask); |
241fc640b
|
485 486 487 488 |
if (node != NUMA_NO_NODE) { const struct cpumask *nodemask = cpumask_of_node(node); /* make sure at least one of the cpus in nodemask is online */ |
cba4235e6
|
489 490 |
if (cpumask_intersects(&mask, nodemask)) cpumask_and(&mask, &mask, nodemask); |
241fc640b
|
491 |
} |
cba4235e6
|
492 493 494 |
ret = irq_do_set_affinity(&desc->irq_data, &mask, false); raw_spin_unlock(&mask_lock); return ret; |
184047567
|
495 |
} |
f6d87f4bd
|
496 |
#else |
a8a98eac7
|
497 |
/* Wrapper for ALPHA specific affinity selector magic */ |
cba4235e6
|
498 |
int irq_setup_affinity(struct irq_desc *desc) |
f6d87f4bd
|
499 |
{ |
cba4235e6
|
500 |
return irq_select_affinity(irq_desc_get_irq(desc)); |
f6d87f4bd
|
501 |
} |
cba6437a1
|
502 503 |
#endif /* CONFIG_AUTO_IRQ_AFFINITY */ #endif /* CONFIG_SMP */ |
184047567
|
504 |
|
1da177e4c
|
505 |
|
fcf1ae2f7
|
506 507 508 |
/** * irq_set_vcpu_affinity - Set vcpu affinity for the interrupt * @irq: interrupt number to set affinity |
250a53d6f
|
509 510 |
* @vcpu_info: vCPU specific data or pointer to a percpu array of vCPU * specific data for percpu_devid interrupts |
fcf1ae2f7
|
511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 |
* * This function uses the vCPU specific data to set the vCPU * affinity for an irq. The vCPU specific data is passed from * outside, such as KVM. One example code path is as below: * KVM -> IOMMU -> irq_set_vcpu_affinity(). */ int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info) { unsigned long flags; struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); struct irq_data *data; struct irq_chip *chip; int ret = -ENOSYS; if (!desc) return -EINVAL; data = irq_desc_get_irq_data(desc); |
0abce64a5
|
529 530 531 532 533 534 535 536 537 538 539 540 |
do { chip = irq_data_get_irq_chip(data); if (chip && chip->irq_set_vcpu_affinity) break; #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY data = data->parent_data; #else data = NULL; #endif } while (data); if (data) |
fcf1ae2f7
|
541 542 543 544 545 546 |
ret = chip->irq_set_vcpu_affinity(data, vcpu_info); irq_put_desc_unlock(desc, flags); return ret; } EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity); |
79ff1cda3
|
547 |
void __disable_irq(struct irq_desc *desc) |
0a0c5168d
|
548 |
{ |
3aae994fb
|
549 |
if (!desc->depth++) |
87923470c
|
550 |
irq_disable(desc); |
0a0c5168d
|
551 |
} |
02725e747
|
552 553 554 |
static int __disable_irq_nosync(unsigned int irq) { unsigned long flags; |
31d9d9b6d
|
555 |
struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
02725e747
|
556 557 558 |
if (!desc) return -EINVAL; |
79ff1cda3
|
559 |
__disable_irq(desc); |
02725e747
|
560 561 562 |
irq_put_desc_busunlock(desc, flags); return 0; } |
1da177e4c
|
563 564 565 566 567 568 569 570 571 572 573 574 575 |
/** * disable_irq_nosync - disable an irq without waiting * @irq: Interrupt to disable * * Disable the selected interrupt line. Disables and Enables are * nested. * Unlike disable_irq(), this function does not ensure existing * instances of the IRQ handler have completed before returning. * * This function may be called from IRQ context. */ void disable_irq_nosync(unsigned int irq) { |
02725e747
|
576 |
__disable_irq_nosync(irq); |
1da177e4c
|
577 |
} |
1da177e4c
|
578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 |
EXPORT_SYMBOL(disable_irq_nosync); /** * disable_irq - disable an irq and wait for completion * @irq: Interrupt to disable * * Disable the selected interrupt line. Enables and Disables are * nested. * This function waits for any pending IRQ handlers for this interrupt * to complete before returning. If you use this function while * holding a resource the IRQ handler may need you will deadlock. * * This function may be called - with care - from IRQ context. */ void disable_irq(unsigned int irq) { |
02725e747
|
594 |
if (!__disable_irq_nosync(irq)) |
1da177e4c
|
595 596 |
synchronize_irq(irq); } |
1da177e4c
|
597 |
EXPORT_SYMBOL(disable_irq); |
02cea3958
|
598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 |
/** * disable_hardirq - disables an irq and waits for hardirq completion * @irq: Interrupt to disable * * Disable the selected interrupt line. Enables and Disables are * nested. * This function waits for any pending hard IRQ handlers for this * interrupt to complete before returning. If you use this function while * holding a resource the hard IRQ handler may need you will deadlock. * * When used to optimistically disable an interrupt from atomic context * the return value must be checked. * * Returns: false if a threaded handler is active. * * This function may be called - with care - from IRQ context. */ bool disable_hardirq(unsigned int irq) { if (!__disable_irq_nosync(irq)) return synchronize_hardirq(irq); return false; } EXPORT_SYMBOL_GPL(disable_hardirq); |
b525903c2
|
623 624 625 626 627 628 629 630 631 632 633 634 635 636 |
/** * disable_nmi_nosync - disable an nmi without waiting * @irq: Interrupt to disable * * Disable the selected interrupt line. Disables and enables are * nested. * The interrupt to disable must have been requested through request_nmi. * Unlike disable_nmi(), this function does not ensure existing * instances of the IRQ handler have completed before returning. */ void disable_nmi_nosync(unsigned int irq) { disable_irq_nosync(irq); } |
79ff1cda3
|
637 |
void __enable_irq(struct irq_desc *desc) |
1adb0850a
|
638 639 640 |
{ switch (desc->depth) { case 0: |
0a0c5168d
|
641 |
err_out: |
79ff1cda3
|
642 643 644 |
WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d ", irq_desc_get_irq(desc)); |
1adb0850a
|
645 646 |
break; case 1: { |
c531e8361
|
647 |
if (desc->istate & IRQS_SUSPENDED) |
0a0c5168d
|
648 |
goto err_out; |
1adb0850a
|
649 |
/* Prevent probing on this irq: */ |
1ccb4e612
|
650 |
irq_settings_set_noprobe(desc); |
201d7f47f
|
651 652 653 654 655 656 657 |
/* * Call irq_startup() not irq_enable() here because the * interrupt might be marked NOAUTOEN. So irq_startup() * needs to be invoked when it gets enabled the first * time. If it was already started up, then irq_startup() * will invoke irq_enable() under the hood. */ |
c942cee46
|
658 |
irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE); |
201d7f47f
|
659 |
break; |
1adb0850a
|
660 661 662 663 664 |
} default: desc->depth--; } } |
1da177e4c
|
665 666 667 668 669 670 671 672 |
/** * enable_irq - enable handling of an irq * @irq: Interrupt to enable * * Undoes the effect of one call to disable_irq(). If this * matches the last disable, processing of interrupts on this * IRQ line is re-enabled. * |
70aedd24d
|
673 |
* This function may be called from IRQ context only when |
6b8ff3120
|
674 |
* desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! |
1da177e4c
|
675 676 677 |
*/ void enable_irq(unsigned int irq) { |
1da177e4c
|
678 |
unsigned long flags; |
31d9d9b6d
|
679 |
struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
1da177e4c
|
680 |
|
7d94f7ca4
|
681 |
if (!desc) |
c2b5a251b
|
682 |
return; |
50f7c0327
|
683 684 685 |
if (WARN(!desc->irq_data.chip, KERN_ERR "enable_irq before setup/request_irq: irq %u ", irq)) |
02725e747
|
686 |
goto out; |
2656c3669
|
687 |
|
79ff1cda3
|
688 |
__enable_irq(desc); |
02725e747
|
689 690 |
out: irq_put_desc_busunlock(desc, flags); |
1da177e4c
|
691 |
} |
1da177e4c
|
692 |
EXPORT_SYMBOL(enable_irq); |
b525903c2
|
693 694 695 696 697 698 699 700 701 702 703 704 705 |
/** * enable_nmi - enable handling of an nmi * @irq: Interrupt to enable * * The interrupt to enable must have been requested through request_nmi. * Undoes the effect of one call to disable_nmi(). If this * matches the last disable, processing of interrupts on this * IRQ line is re-enabled. */ void enable_nmi(unsigned int irq) { enable_irq(irq); } |
0c5d1eb77
|
706 |
static int set_irq_wake_real(unsigned int irq, unsigned int on) |
2db873211
|
707 |
{ |
08678b084
|
708 |
struct irq_desc *desc = irq_to_desc(irq); |
2db873211
|
709 |
int ret = -ENXIO; |
60f96b41f
|
710 711 |
if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE) return 0; |
2f7e99bb9
|
712 713 |
if (desc->irq_data.chip->irq_set_wake) ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on); |
2db873211
|
714 715 716 |
return ret; } |
ba9a2331b
|
717 |
/** |
a0cd9ca2b
|
718 |
* irq_set_irq_wake - control irq power management wakeup |
ba9a2331b
|
719 720 721 |
* @irq: interrupt to control * @on: enable/disable power management wakeup * |
15a647eba
|
722 723 724 725 726 727 |
* Enable/disable power management wakeup mode, which is * disabled by default. Enables and disables must match, * just as they match for non-wakeup mode support. * * Wakeup mode lets this IRQ wake the system from sleep * states like "suspend to RAM". |
f9f21cea3
|
728 729 730 731 732 733 734 |
* * Note: irq enable/disable state is completely orthogonal * to the enable/disable state of irq wake. An irq can be * disabled with disable_irq() and still wake the system as * long as the irq has wake enabled. If this does not hold, * then the underlying irq chip and the related driver need * to be investigated. |
ba9a2331b
|
735 |
*/ |
a0cd9ca2b
|
736 |
int irq_set_irq_wake(unsigned int irq, unsigned int on) |
ba9a2331b
|
737 |
{ |
ba9a2331b
|
738 |
unsigned long flags; |
31d9d9b6d
|
739 |
struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); |
2db873211
|
740 |
int ret = 0; |
ba9a2331b
|
741 |
|
13863a66c
|
742 743 |
if (!desc) return -EINVAL; |
b525903c2
|
744 745 746 747 748 |
/* Don't use NMIs as wake up interrupts please */ if (desc->istate & IRQS_NMI) { ret = -EINVAL; goto out_unlock; } |
15a647eba
|
749 750 751 |
/* wakeup-capable irqs can be shared between drivers that * don't need to have the same sleep mode behaviors. */ |
15a647eba
|
752 |
if (on) { |
2db873211
|
753 754 755 756 757 |
if (desc->wake_depth++ == 0) { ret = set_irq_wake_real(irq, on); if (ret) desc->wake_depth = 0; else |
7f94226f0
|
758 |
irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE); |
2db873211
|
759 |
} |
15a647eba
|
760 761 |
} else { if (desc->wake_depth == 0) { |
7a2c47706
|
762 763 |
WARN(1, "Unbalanced IRQ %d wake disable ", irq); |
2db873211
|
764 765 766 767 768 |
} else if (--desc->wake_depth == 0) { ret = set_irq_wake_real(irq, on); if (ret) desc->wake_depth = 1; else |
7f94226f0
|
769 |
irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); |
2db873211
|
770 |
} |
15a647eba
|
771 |
} |
b525903c2
|
772 773 |
out_unlock: |
02725e747
|
774 |
irq_put_desc_busunlock(desc, flags); |
ba9a2331b
|
775 776 |
return ret; } |
a0cd9ca2b
|
777 |
EXPORT_SYMBOL(irq_set_irq_wake); |
ba9a2331b
|
778 |
|
1da177e4c
|
779 780 781 782 783 784 785 |
/* * Internal function that tells the architecture code whether a * particular irq has been exclusively allocated or is available * for driver use. */ int can_request_irq(unsigned int irq, unsigned long irqflags) { |
cc8c3b784
|
786 |
unsigned long flags; |
31d9d9b6d
|
787 |
struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); |
02725e747
|
788 |
int canrequest = 0; |
1da177e4c
|
789 |
|
7d94f7ca4
|
790 791 |
if (!desc) return 0; |
02725e747
|
792 |
if (irq_settings_can_request(desc)) { |
2779db8d3
|
793 794 795 |
if (!desc->action || irqflags & desc->action->flags & IRQF_SHARED) canrequest = 1; |
02725e747
|
796 797 798 |
} irq_put_desc_unlock(desc, flags); return canrequest; |
1da177e4c
|
799 |
} |
a1ff541a4
|
800 |
int __irq_set_trigger(struct irq_desc *desc, unsigned long flags) |
82736f4d1
|
801 |
{ |
6b8ff3120
|
802 |
struct irq_chip *chip = desc->irq_data.chip; |
d4d5e0896
|
803 |
int ret, unmask = 0; |
82736f4d1
|
804 |
|
b2ba2c300
|
805 |
if (!chip || !chip->irq_set_type) { |
82736f4d1
|
806 807 808 809 |
/* * IRQF_TRIGGER_* but the PIC does not support multiple * flow-types? */ |
a1ff541a4
|
810 811 812 |
pr_debug("No set_type function for IRQ %d (%s) ", irq_desc_get_irq(desc), |
f5d89470f
|
813 |
chip ? (chip->name ? : "unknown") : "unknown"); |
82736f4d1
|
814 815 |
return 0; } |
d4d5e0896
|
816 |
if (chip->flags & IRQCHIP_SET_TYPE_MASKED) { |
32f4125eb
|
817 |
if (!irqd_irq_masked(&desc->irq_data)) |
d4d5e0896
|
818 |
mask_irq(desc); |
32f4125eb
|
819 |
if (!irqd_irq_disabled(&desc->irq_data)) |
d4d5e0896
|
820 821 |
unmask = 1; } |
00b992dea
|
822 823 |
/* Mask all flags except trigger mode */ flags &= IRQ_TYPE_SENSE_MASK; |
b2ba2c300
|
824 |
ret = chip->irq_set_type(&desc->irq_data, flags); |
82736f4d1
|
825 |
|
876dbd4cc
|
826 827 |
switch (ret) { case IRQ_SET_MASK_OK: |
2cb625478
|
828 |
case IRQ_SET_MASK_OK_DONE: |
876dbd4cc
|
829 830 |
irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); irqd_set(&desc->irq_data, flags); |
df561f668
|
831 |
fallthrough; |
876dbd4cc
|
832 833 834 835 836 837 838 839 840 841 |
case IRQ_SET_MASK_OK_NOCOPY: flags = irqd_get_trigger_type(&desc->irq_data); irq_settings_set_trigger_mask(desc, flags); irqd_clear(&desc->irq_data, IRQD_LEVEL); irq_settings_clr_level(desc); if (flags & IRQ_TYPE_LEVEL_MASK) { irq_settings_set_level(desc); irqd_set(&desc->irq_data, IRQD_LEVEL); } |
467324756
|
842 |
|
d4d5e0896
|
843 |
ret = 0; |
8fff39e06
|
844 |
break; |
876dbd4cc
|
845 |
default: |
d75f773c8
|
846 847 |
pr_err("Setting trigger mode %lu for irq %u failed (%pS) ", |
a1ff541a4
|
848 |
flags, irq_desc_get_irq(desc), chip->irq_set_type); |
0c5d1eb77
|
849 |
} |
d4d5e0896
|
850 851 |
if (unmask) unmask_irq(desc); |
82736f4d1
|
852 853 |
return ret; } |
293a7a0a1
|
854 855 856 857 858 859 860 861 862 863 864 865 866 867 |
#ifdef CONFIG_HARDIRQS_SW_RESEND int irq_set_parent(int irq, int parent_irq) { unsigned long flags; struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); if (!desc) return -EINVAL; desc->parent_irq = parent_irq; irq_put_desc_unlock(desc, flags); return 0; } |
3118dac50
|
868 |
EXPORT_SYMBOL_GPL(irq_set_parent); |
293a7a0a1
|
869 |
#endif |
b25c340c1
|
870 871 872 873 874 875 876 877 878 |
/* * Default primary interrupt handler for threaded interrupts. Is * assigned as primary handler when request_threaded_irq is called * with handler == NULL. Useful for oneshot interrupts. */ static irqreturn_t irq_default_primary_handler(int irq, void *dev_id) { return IRQ_WAKE_THREAD; } |
399b5da29
|
879 880 881 882 883 884 885 886 887 888 |
/* * Primary handler for nested threaded interrupts. Should never be * called. */ static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id) { WARN(1, "Primary handler called for nested irq %d ", irq); return IRQ_NONE; } |
2a1d3ab89
|
889 890 891 892 893 894 |
static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id) { WARN(1, "Secondary action handler called for irq %d ", irq); return IRQ_NONE; } |
3aa551c9b
|
895 896 |
static int irq_wait_for_interrupt(struct irqaction *action) { |
519cc8652
|
897 898 |
for (;;) { set_current_state(TASK_INTERRUPTIBLE); |
550acb192
|
899 |
|
519cc8652
|
900 901 902 903 904 905 906 907 908 909 |
if (kthread_should_stop()) { /* may need to run one last time */ if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags)) { __set_current_state(TASK_RUNNING); return 0; } __set_current_state(TASK_RUNNING); return -1; } |
f48fe81e5
|
910 911 912 |
if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags)) { |
3aa551c9b
|
913 914 |
__set_current_state(TASK_RUNNING); return 0; |
f48fe81e5
|
915 916 |
} schedule(); |
3aa551c9b
|
917 |
} |
3aa551c9b
|
918 |
} |
b25c340c1
|
919 920 921 922 923 |
/* * Oneshot interrupts keep the irq line masked until the threaded * handler finished. unmask if the interrupt has not been disabled and * is marked MASKED. */ |
b5faba21a
|
924 |
static void irq_finalize_oneshot(struct irq_desc *desc, |
f3f79e38d
|
925 |
struct irqaction *action) |
b25c340c1
|
926 |
{ |
2a1d3ab89
|
927 928 |
if (!(desc->istate & IRQS_ONESHOT) || action->handler == irq_forced_secondary_handler) |
b5faba21a
|
929 |
return; |
0b1adaa03
|
930 |
again: |
3876ec9ef
|
931 |
chip_bus_lock(desc); |
239007b84
|
932 |
raw_spin_lock_irq(&desc->lock); |
0b1adaa03
|
933 934 935 936 937 938 939 940 |
/* * Implausible though it may be we need to protect us against * the following scenario: * * The thread is faster done than the hard interrupt handler * on the other CPU. If we unmask the irq line then the * interrupt can come in again and masks the line, leaves due |
009b4c3b8
|
941 |
* to IRQS_INPROGRESS and the irq line is masked forever. |
b5faba21a
|
942 943 944 945 946 |
* * This also serializes the state of shared oneshot handlers * versus "desc->threads_onehsot |= action->thread_mask;" in * irq_wake_thread(). See the comment there which explains the * serialization. |
0b1adaa03
|
947 |
*/ |
32f4125eb
|
948 |
if (unlikely(irqd_irq_inprogress(&desc->irq_data))) { |
0b1adaa03
|
949 |
raw_spin_unlock_irq(&desc->lock); |
3876ec9ef
|
950 |
chip_bus_sync_unlock(desc); |
0b1adaa03
|
951 952 953 |
cpu_relax(); goto again; } |
b5faba21a
|
954 955 956 957 958 |
/* * Now check again, whether the thread should run. Otherwise * we would clear the threads_oneshot bit of this thread which * was just set. */ |
f3f79e38d
|
959 |
if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags)) |
b5faba21a
|
960 961 962 |
goto out_unlock; desc->threads_oneshot &= ~action->thread_mask; |
32f4125eb
|
963 964 |
if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) && irqd_irq_masked(&desc->irq_data)) |
328a4978d
|
965 |
unmask_threaded_irq(desc); |
32f4125eb
|
966 |
|
b5faba21a
|
967 |
out_unlock: |
239007b84
|
968 |
raw_spin_unlock_irq(&desc->lock); |
3876ec9ef
|
969 |
chip_bus_sync_unlock(desc); |
b25c340c1
|
970 |
} |
61f382613
|
971 |
#ifdef CONFIG_SMP |
3aa551c9b
|
972 |
/* |
b04c644e6
|
973 |
* Check whether we need to change the affinity of the interrupt thread. |
591d2fb02
|
974 975 976 977 978 |
*/ static void irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { cpumask_var_t mask; |
04aa530ec
|
979 |
bool valid = true; |
591d2fb02
|
980 981 982 983 984 985 986 987 988 989 990 991 |
if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) return; /* * In case we are out of memory we set IRQTF_AFFINITY again and * try again next time */ if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { set_bit(IRQTF_AFFINITY, &action->thread_flags); return; } |
239007b84
|
992 |
raw_spin_lock_irq(&desc->lock); |
04aa530ec
|
993 994 995 996 |
/* * This code is triggered unconditionally. Check the affinity * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out. */ |
cbf869999
|
997 998 999 1000 1001 1002 |
if (cpumask_available(desc->irq_common_data.affinity)) { const struct cpumask *m; m = irq_data_get_effective_affinity_mask(&desc->irq_data); cpumask_copy(mask, m); } else { |
04aa530ec
|
1003 |
valid = false; |
cbf869999
|
1004 |
} |
239007b84
|
1005 |
raw_spin_unlock_irq(&desc->lock); |
591d2fb02
|
1006 |
|
04aa530ec
|
1007 1008 |
if (valid) set_cpus_allowed_ptr(current, mask); |
591d2fb02
|
1009 1010 |
free_cpumask_var(mask); } |
61f382613
|
1011 1012 1013 1014 |
#else static inline void irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } #endif |
591d2fb02
|
1015 1016 |
/* |
c5f48c0a7
|
1017 |
* Interrupts which are not explicitly requested as threaded |
8d32a307e
|
1018 1019 1020 1021 |
* interrupts rely on the implicit bh/preempt disable of the hard irq * context. So we need to disable bh here to avoid deadlocks and other * side effects. */ |
3a43e05f4
|
1022 |
static irqreturn_t |
8d32a307e
|
1023 1024 |
irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) { |
3a43e05f4
|
1025 |
irqreturn_t ret; |
8d32a307e
|
1026 |
local_bh_disable(); |
3a43e05f4
|
1027 |
ret = action->thread_fn(action->irq, action->dev_id); |
746a923b8
|
1028 1029 |
if (ret == IRQ_HANDLED) atomic_inc(&desc->threads_handled); |
f3f79e38d
|
1030 |
irq_finalize_oneshot(desc, action); |
8d32a307e
|
1031 |
local_bh_enable(); |
3a43e05f4
|
1032 |
return ret; |
8d32a307e
|
1033 1034 1035 |
} /* |
f788e7bf0
|
1036 |
* Interrupts explicitly requested as threaded interrupts want to be |
8d32a307e
|
1037 1038 1039 |
* preemtible - many of them need to sleep and wait for slow busses to * complete. */ |
3a43e05f4
|
1040 1041 |
static irqreturn_t irq_thread_fn(struct irq_desc *desc, struct irqaction *action) |
8d32a307e
|
1042 |
{ |
3a43e05f4
|
1043 1044 1045 |
irqreturn_t ret; ret = action->thread_fn(action->irq, action->dev_id); |
746a923b8
|
1046 1047 |
if (ret == IRQ_HANDLED) atomic_inc(&desc->threads_handled); |
f3f79e38d
|
1048 |
irq_finalize_oneshot(desc, action); |
3a43e05f4
|
1049 |
return ret; |
8d32a307e
|
1050 |
} |
7140ea198
|
1051 1052 |
static void wake_threads_waitq(struct irq_desc *desc) { |
c685689fd
|
1053 |
if (atomic_dec_and_test(&desc->threads_active)) |
7140ea198
|
1054 1055 |
wake_up(&desc->wait_for_threads); } |
67d121455
|
1056 |
static void irq_thread_dtor(struct callback_head *unused) |
4d1d61a6b
|
1057 1058 1059 1060 1061 1062 1063 1064 1065 |
{ struct task_struct *tsk = current; struct irq_desc *desc; struct irqaction *action; if (WARN_ON_ONCE(!(current->flags & PF_EXITING))) return; action = kthread_data(tsk); |
fb21affa4
|
1066 1067 |
pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d) ", |
19af395d7
|
1068 |
tsk->comm, tsk->pid, action->irq); |
4d1d61a6b
|
1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 |
desc = irq_to_desc(action->irq); /* * If IRQTF_RUNTHREAD is set, we need to decrement * desc->threads_active and wake possible waiters. */ if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags)) wake_threads_waitq(desc); /* Prevent a stale desc->threads_oneshot */ irq_finalize_oneshot(desc, action); } |
2a1d3ab89
|
1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 |
static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action) { struct irqaction *secondary = action->secondary; if (WARN_ON_ONCE(!secondary)) return; raw_spin_lock_irq(&desc->lock); __irq_wake_thread(desc, secondary); raw_spin_unlock_irq(&desc->lock); } |
8d32a307e
|
1093 |
/* |
3aa551c9b
|
1094 1095 1096 1097 |
* Interrupt handler thread */ static int irq_thread(void *data) { |
67d121455
|
1098 |
struct callback_head on_exit_work; |
3aa551c9b
|
1099 1100 |
struct irqaction *action = data; struct irq_desc *desc = irq_to_desc(action->irq); |
3a43e05f4
|
1101 1102 |
irqreturn_t (*handler_fn)(struct irq_desc *desc, struct irqaction *action); |
3aa551c9b
|
1103 |
|
540b60e24
|
1104 |
if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD, |
8d32a307e
|
1105 1106 1107 1108 |
&action->thread_flags)) handler_fn = irq_forced_thread_fn; else handler_fn = irq_thread_fn; |
41f9d29f0
|
1109 |
init_task_work(&on_exit_work, irq_thread_dtor); |
91989c707
|
1110 |
task_work_add(current, &on_exit_work, TWA_NONE); |
3aa551c9b
|
1111 |
|
f3de44edf
|
1112 |
irq_thread_check_affinity(desc, action); |
3aa551c9b
|
1113 |
while (!irq_wait_for_interrupt(action)) { |
7140ea198
|
1114 |
irqreturn_t action_ret; |
3aa551c9b
|
1115 |
|
591d2fb02
|
1116 |
irq_thread_check_affinity(desc, action); |
7140ea198
|
1117 |
action_ret = handler_fn(desc, action); |
2a1d3ab89
|
1118 1119 |
if (action_ret == IRQ_WAKE_THREAD) irq_wake_secondary(desc, action); |
3aa551c9b
|
1120 |
|
7140ea198
|
1121 |
wake_threads_waitq(desc); |
3aa551c9b
|
1122 |
} |
7140ea198
|
1123 1124 1125 |
/* * This is the regular exit path. __free_irq() is stopping the * thread via kthread_stop() after calling |
519cc8652
|
1126 |
* synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the |
836557bd5
|
1127 |
* oneshot mask bit can be set. |
3aa551c9b
|
1128 |
*/ |
4d1d61a6b
|
1129 |
task_work_cancel(current, irq_thread_dtor); |
3aa551c9b
|
1130 1131 |
return 0; } |
a92444c6b
|
1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 |
/** * irq_wake_thread - wake the irq thread for the action identified by dev_id * @irq: Interrupt line * @dev_id: Device identity for which the thread should be woken * */ void irq_wake_thread(unsigned int irq, void *dev_id) { struct irq_desc *desc = irq_to_desc(irq); struct irqaction *action; unsigned long flags; if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) return; raw_spin_lock_irqsave(&desc->lock, flags); |
f944b5a7a
|
1148 |
for_each_action_of_desc(desc, action) { |
a92444c6b
|
1149 1150 1151 1152 1153 1154 1155 1156 1157 |
if (action->dev_id == dev_id) { if (action->thread) __irq_wake_thread(desc, action); break; } } raw_spin_unlock_irqrestore(&desc->lock, flags); } EXPORT_SYMBOL_GPL(irq_wake_thread); |
2a1d3ab89
|
1158 |
static int irq_setup_forced_threading(struct irqaction *new) |
8d32a307e
|
1159 1160 |
{ if (!force_irqthreads) |
2a1d3ab89
|
1161 |
return 0; |
8d32a307e
|
1162 |
if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)) |
2a1d3ab89
|
1163 |
return 0; |
8d32a307e
|
1164 |
|
d1f0301b3
|
1165 1166 1167 1168 1169 1170 |
/* * No further action required for interrupts which are requested as * threaded interrupts already */ if (new->handler == irq_default_primary_handler) return 0; |
8d32a307e
|
1171 |
new->flags |= IRQF_ONESHOT; |
2a1d3ab89
|
1172 1173 1174 1175 1176 |
/* * Handle the case where we have a real primary handler and a * thread handler. We force thread them as well by creating a * secondary action. */ |
d1f0301b3
|
1177 |
if (new->handler && new->thread_fn) { |
2a1d3ab89
|
1178 1179 1180 1181 1182 1183 1184 1185 1186 |
/* Allocate the secondary action */ new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL); if (!new->secondary) return -ENOMEM; new->secondary->handler = irq_forced_secondary_handler; new->secondary->thread_fn = new->thread_fn; new->secondary->dev_id = new->dev_id; new->secondary->irq = new->irq; new->secondary->name = new->name; |
8d32a307e
|
1187 |
} |
2a1d3ab89
|
1188 1189 1190 1191 1192 |
/* Deal with the primary handler */ set_bit(IRQTF_FORCED_THREAD, &new->thread_flags); new->thread_fn = new->handler; new->handler = irq_default_primary_handler; return 0; |
8d32a307e
|
1193 |
} |
c1bacbae8
|
1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 |
static int irq_request_resources(struct irq_desc *desc) { struct irq_data *d = &desc->irq_data; struct irq_chip *c = d->chip; return c->irq_request_resources ? c->irq_request_resources(d) : 0; } static void irq_release_resources(struct irq_desc *desc) { struct irq_data *d = &desc->irq_data; struct irq_chip *c = d->chip; if (c->irq_release_resources) c->irq_release_resources(d); } |
b525903c2
|
1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 |
static bool irq_supports_nmi(struct irq_desc *desc) { struct irq_data *d = irq_desc_get_irq_data(desc); #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY /* Only IRQs directly managed by the root irqchip can be set as NMI */ if (d->parent_data) return false; #endif /* Don't support NMIs for chips behind a slow bus */ if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock) return false; return d->chip->flags & IRQCHIP_SUPPORTS_NMI; } static int irq_nmi_setup(struct irq_desc *desc) { struct irq_data *d = irq_desc_get_irq_data(desc); struct irq_chip *c = d->chip; return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL; } static void irq_nmi_teardown(struct irq_desc *desc) { struct irq_data *d = irq_desc_get_irq_data(desc); struct irq_chip *c = d->chip; if (c->irq_nmi_teardown) c->irq_nmi_teardown(d); } |
2a1d3ab89
|
1242 1243 1244 1245 |
static int setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary) { struct task_struct *t; |
2a1d3ab89
|
1246 1247 1248 1249 1250 1251 1252 |
if (!secondary) { t = kthread_create(irq_thread, new, "irq/%d-%s", irq, new->name); } else { t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq, new->name); |
2a1d3ab89
|
1253 1254 1255 1256 |
} if (IS_ERR(t)) return PTR_ERR(t); |
7a40798c7
|
1257 |
sched_set_fifo(t); |
2a1d3ab89
|
1258 1259 1260 1261 1262 1263 |
/* * We keep the reference to the task struct even if * the thread dies to avoid that the interrupt code * references an already freed task_struct. */ |
7b3c92b85
|
1264 |
new->thread = get_task_struct(t); |
2a1d3ab89
|
1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 |
/* * Tell the thread to set its affinity. This is * important for shared interrupt handlers as we do * not invoke setup_affinity() for the secondary * handlers as everything is already set up. Even for * interrupts marked with IRQF_NO_BALANCE this is * correct as we want the thread to move to the cpu(s) * on which the requesting code placed the interrupt. */ set_bit(IRQTF_AFFINITY, &new->thread_flags); return 0; } |
1da177e4c
|
1277 1278 1279 |
/* * Internal function to register an irqaction - typically used to * allocate special interrupts that are part of the architecture. |
19d39a381
|
1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 |
* * Locking rules: * * desc->request_mutex Provides serialization against a concurrent free_irq() * chip_bus_lock Provides serialization for slow bus operations * desc->lock Provides serialization against hard interrupts * * chip_bus_lock and desc->lock are sufficient for all other management and * interrupt related functions. desc->request_mutex solely serializes * request/free_irq(). |
1da177e4c
|
1290 |
*/ |
d3c60047b
|
1291 |
static int |
327ec5699
|
1292 |
__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) |
1da177e4c
|
1293 |
{ |
f17c75453
|
1294 |
struct irqaction *old, **old_ptr; |
b5faba21a
|
1295 |
unsigned long flags, thread_mask = 0; |
3b8249e75
|
1296 |
int ret, nested, shared = 0; |
1da177e4c
|
1297 |
|
7d94f7ca4
|
1298 |
if (!desc) |
c2b5a251b
|
1299 |
return -EINVAL; |
6b8ff3120
|
1300 |
if (desc->irq_data.chip == &no_irq_chip) |
1da177e4c
|
1301 |
return -ENOSYS; |
b6873807a
|
1302 1303 |
if (!try_module_get(desc->owner)) return -ENODEV; |
1da177e4c
|
1304 |
|
2a1d3ab89
|
1305 |
new->irq = irq; |
1da177e4c
|
1306 |
/* |
4b357daed
|
1307 1308 1309 1310 1311 1312 1313 |
* If the trigger type is not specified by the caller, * then use the default for this interrupt. */ if (!(new->flags & IRQF_TRIGGER_MASK)) new->flags |= irqd_get_trigger_type(&desc->irq_data); /* |
399b5da29
|
1314 1315 1316 |
* Check whether the interrupt nests into another interrupt * thread. */ |
1ccb4e612
|
1317 |
nested = irq_settings_is_nested_thread(desc); |
399b5da29
|
1318 |
if (nested) { |
b6873807a
|
1319 1320 1321 1322 |
if (!new->thread_fn) { ret = -EINVAL; goto out_mput; } |
399b5da29
|
1323 1324 1325 1326 1327 1328 |
/* * Replace the primary handler which was provided from * the driver for non nested interrupt handling by the * dummy function which warns when called. */ new->handler = irq_nested_primary_handler; |
8d32a307e
|
1329 |
} else { |
2a1d3ab89
|
1330 1331 1332 1333 1334 |
if (irq_settings_can_thread(desc)) { ret = irq_setup_forced_threading(new); if (ret) goto out_mput; } |
399b5da29
|
1335 |
} |
1da177e4c
|
1336 |
/* |
399b5da29
|
1337 1338 1339 |
* Create a handler thread when a thread function is supplied * and the interrupt does not nest into another interrupt * thread. |
3aa551c9b
|
1340 |
*/ |
399b5da29
|
1341 |
if (new->thread_fn && !nested) { |
2a1d3ab89
|
1342 1343 |
ret = setup_irq_thread(new, irq, false); if (ret) |
b6873807a
|
1344 |
goto out_mput; |
2a1d3ab89
|
1345 1346 1347 1348 |
if (new->secondary) { ret = setup_irq_thread(new->secondary, irq, true); if (ret) goto out_thread; |
b6873807a
|
1349 |
} |
3aa551c9b
|
1350 1351 1352 |
} /* |
dc9b229a5
|
1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 |
* Drivers are often written to work w/o knowledge about the * underlying irq chip implementation, so a request for a * threaded irq without a primary hard irq context handler * requires the ONESHOT flag to be set. Some irq chips like * MSI based interrupts are per se one shot safe. Check the * chip flags, so we can avoid the unmask dance at the end of * the threaded handler for those. */ if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE) new->flags &= ~IRQF_ONESHOT; |
19d39a381
|
1363 1364 |
/* * Protects against a concurrent __free_irq() call which might wait |
519cc8652
|
1365 |
* for synchronize_hardirq() to complete without holding the optional |
836557bd5
|
1366 1367 1368 |
* chip bus lock and desc->lock. Also protects against handing out * a recycled oneshot thread_mask bit while it's still in use by * its previous owner. |
19d39a381
|
1369 |
*/ |
9114014cf
|
1370 |
mutex_lock(&desc->request_mutex); |
19d39a381
|
1371 1372 1373 1374 1375 1376 1377 1378 1379 |
/* * Acquire bus lock as the irq_request_resources() callback below * might rely on the serialization or the magic power management * functions which are abusing the irq_bus_lock() callback, */ chip_bus_lock(desc); /* First installed action requests resources. */ |
46e48e257
|
1380 1381 1382 1383 1384 1385 |
if (!desc->action) { ret = irq_request_resources(desc); if (ret) { pr_err("Failed to request resources for %s (irq %d) on irqchip %s ", new->name, irq, desc->irq_data.chip->name); |
19d39a381
|
1386 |
goto out_bus_unlock; |
46e48e257
|
1387 1388 |
} } |
9114014cf
|
1389 |
|
dc9b229a5
|
1390 |
/* |
1da177e4c
|
1391 |
* The following block of code has to be executed atomically |
19d39a381
|
1392 1393 1394 |
* protected against a concurrent interrupt and any of the other * management calls which are not serialized via * desc->request_mutex or the optional bus lock. |
1da177e4c
|
1395 |
*/ |
239007b84
|
1396 |
raw_spin_lock_irqsave(&desc->lock, flags); |
f17c75453
|
1397 1398 |
old_ptr = &desc->action; old = *old_ptr; |
06fcb0c6f
|
1399 |
if (old) { |
e76de9f8e
|
1400 1401 1402 |
/* * Can't share interrupts unless both agree to and are * the same type (level, edge, polarity). So both flag |
3cca53b02
|
1403 |
* fields must have IRQF_SHARED set and the bits which |
9d591edd0
|
1404 1405 |
* set the trigger type must match. Also all must * agree on ONESHOT. |
b525903c2
|
1406 |
* Interrupt lines used for NMIs cannot be shared. |
e76de9f8e
|
1407 |
*/ |
4f8413a3a
|
1408 |
unsigned int oldtype; |
b525903c2
|
1409 1410 1411 1412 1413 1414 1415 |
if (desc->istate & IRQS_NMI) { pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s. ", new->name, irq, desc->irq_data.chip->name); ret = -EINVAL; goto out_unlock; } |
4f8413a3a
|
1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 |
/* * If nobody did set the configuration before, inherit * the one provided by the requester. */ if (irqd_trigger_type_was_set(&desc->irq_data)) { oldtype = irqd_get_trigger_type(&desc->irq_data); } else { oldtype = new->flags & IRQF_TRIGGER_MASK; irqd_set_trigger_type(&desc->irq_data, oldtype); } |
382bd4de6
|
1426 |
|
3cca53b02
|
1427 |
if (!((old->flags & new->flags) & IRQF_SHARED) || |
382bd4de6
|
1428 |
(oldtype != (new->flags & IRQF_TRIGGER_MASK)) || |
f5d89470f
|
1429 |
((old->flags ^ new->flags) & IRQF_ONESHOT)) |
f51634274
|
1430 |
goto mismatch; |
f51634274
|
1431 |
/* All handlers must agree on per-cpuness */ |
3cca53b02
|
1432 1433 |
if ((old->flags & IRQF_PERCPU) != (new->flags & IRQF_PERCPU)) |
f51634274
|
1434 |
goto mismatch; |
1da177e4c
|
1435 1436 1437 |
/* add new interrupt at end of irq queue */ do { |
52abb700e
|
1438 1439 1440 1441 1442 |
/* * Or all existing action->thread_mask bits, * so we can find the next zero bit for this * new action. */ |
b5faba21a
|
1443 |
thread_mask |= old->thread_mask; |
f17c75453
|
1444 1445 |
old_ptr = &old->next; old = *old_ptr; |
1da177e4c
|
1446 1447 1448 |
} while (old); shared = 1; } |
b5faba21a
|
1449 |
/* |
52abb700e
|
1450 1451 1452 |
* Setup the thread mask for this irqaction for ONESHOT. For * !ONESHOT irqs the thread mask is 0 so we can avoid a * conditional in irq_wake_thread(). |
b5faba21a
|
1453 |
*/ |
52abb700e
|
1454 1455 1456 1457 1458 1459 1460 |
if (new->flags & IRQF_ONESHOT) { /* * Unlikely to have 32 resp 64 irqs sharing one line, * but who knows. */ if (thread_mask == ~0UL) { ret = -EBUSY; |
cba4235e6
|
1461 |
goto out_unlock; |
52abb700e
|
1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 |
} /* * The thread_mask for the action is or'ed to * desc->thread_active to indicate that the * IRQF_ONESHOT thread handler has been woken, but not * yet finished. The bit is cleared when a thread * completes. When all threads of a shared interrupt * line have completed desc->threads_active becomes * zero and the interrupt line is unmasked. See * handle.c:irq_wake_thread() for further information. * * If no thread is woken by primary (hard irq context) * interrupt handlers, then desc->threads_active is * also checked for zero to unmask the irq line in the * affected hard irq flow handlers * (handle_[fasteoi|level]_irq). * * The new action gets the first zero bit of * thread_mask assigned. See the loop above which or's * all existing action->thread_mask bits. */ |
ffc661c99
|
1483 |
new->thread_mask = 1UL << ffz(thread_mask); |
1c6c69525
|
1484 |
|
dc9b229a5
|
1485 1486 |
} else if (new->handler == irq_default_primary_handler && !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) { |
1c6c69525
|
1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 |
/* * The interrupt was requested with handler = NULL, so * we use the default primary handler for it. But it * does not have the oneshot flag set. In combination * with level interrupts this is deadly, because the * default primary handler just wakes the thread, then * the irq lines is reenabled, but the device still * has the level irq asserted. Rinse and repeat.... * * While this works for edge type interrupts, we play * it safe and reject unconditionally because we can't * say for sure which type this interrupt really * has. The type flags are unreliable as the * underlying chip implementation can override them. */ |
025af39b8
|
1502 1503 1504 |
pr_err("Threaded irq requested with handler=NULL and !ONESHOT for %s (irq %d) ", new->name, irq); |
1c6c69525
|
1505 |
ret = -EINVAL; |
cba4235e6
|
1506 |
goto out_unlock; |
b5faba21a
|
1507 |
} |
b5faba21a
|
1508 |
|
1da177e4c
|
1509 |
if (!shared) { |
3aa551c9b
|
1510 |
init_waitqueue_head(&desc->wait_for_threads); |
e76de9f8e
|
1511 |
/* Setup the type (level, edge polarity) if configured: */ |
3cca53b02
|
1512 |
if (new->flags & IRQF_TRIGGER_MASK) { |
a1ff541a4
|
1513 1514 |
ret = __irq_set_trigger(desc, new->flags & IRQF_TRIGGER_MASK); |
82736f4d1
|
1515 |
|
19d39a381
|
1516 |
if (ret) |
cba4235e6
|
1517 |
goto out_unlock; |
091738a26
|
1518 |
} |
6a6de9ef5
|
1519 |
|
c942cee46
|
1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 |
/* * Activate the interrupt. That activation must happen * independently of IRQ_NOAUTOEN. request_irq() can fail * and the callers are supposed to handle * that. enable_irq() of an interrupt requested with * IRQ_NOAUTOEN is not supposed to fail. The activation * keeps it in shutdown mode, it merily associates * resources if necessary and if that's not possible it * fails. Interrupts which are in managed shutdown mode * will simply ignore that activation request. */ ret = irq_activate(desc); if (ret) goto out_unlock; |
009b4c3b8
|
1534 |
desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ |
32f4125eb
|
1535 1536 |
IRQS_ONESHOT | IRQS_WAITING); irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); |
94d39e1f6
|
1537 |
|
a005677b3
|
1538 1539 1540 1541 |
if (new->flags & IRQF_PERCPU) { irqd_set(&desc->irq_data, IRQD_PER_CPU); irq_settings_set_per_cpu(desc); } |
6a58fb3ba
|
1542 |
|
b25c340c1
|
1543 |
if (new->flags & IRQF_ONESHOT) |
3d67baec7
|
1544 |
desc->istate |= IRQS_ONESHOT; |
b25c340c1
|
1545 |
|
2e051552d
|
1546 1547 1548 1549 1550 |
/* Exclude IRQ from balancing if requested */ if (new->flags & IRQF_NOBALANCING) { irq_settings_set_no_balancing(desc); irqd_set(&desc->irq_data, IRQD_NO_BALANCING); } |
04c848d39
|
1551 |
if (irq_settings_can_autoenable(desc)) { |
4cde9c6b8
|
1552 |
irq_startup(desc, IRQ_RESEND, IRQ_START_COND); |
04c848d39
|
1553 1554 1555 1556 1557 1558 1559 1560 |
} else { /* * Shared interrupts do not go well with disabling * auto enable. The sharing interrupt might request * it while it's still disabled and then wait for * interrupts forever. */ WARN_ON_ONCE(new->flags & IRQF_SHARED); |
e76de9f8e
|
1561 1562 |
/* Undo nested disables: */ desc->depth = 1; |
04c848d39
|
1563 |
} |
184047567
|
1564 |
|
876dbd4cc
|
1565 1566 |
} else if (new->flags & IRQF_TRIGGER_MASK) { unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK; |
7ee7e87df
|
1567 |
unsigned int omsk = irqd_get_trigger_type(&desc->irq_data); |
876dbd4cc
|
1568 1569 1570 |
if (nmsk != omsk) /* hope the handler works with current trigger mode */ |
a395d6a7e
|
1571 1572 |
pr_warn("irq %d uses trigger mode %u; requested %u ", |
7ee7e87df
|
1573 |
irq, omsk, nmsk); |
1da177e4c
|
1574 |
} |
82736f4d1
|
1575 |
|
f17c75453
|
1576 |
*old_ptr = new; |
82736f4d1
|
1577 |
|
cab303be9
|
1578 |
irq_pm_install_action(desc, new); |
8528b0f1d
|
1579 1580 1581 |
/* Reset broken irq detection when installing new handler */ desc->irq_count = 0; desc->irqs_unhandled = 0; |
1adb0850a
|
1582 1583 1584 1585 1586 |
/* * Check whether we disabled the irq via the spurious handler * before. Reenable it and give it another chance. */ |
7acdd53e5
|
1587 1588 |
if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { desc->istate &= ~IRQS_SPURIOUS_DISABLED; |
79ff1cda3
|
1589 |
__enable_irq(desc); |
1adb0850a
|
1590 |
} |
239007b84
|
1591 |
raw_spin_unlock_irqrestore(&desc->lock, flags); |
3a90795e1
|
1592 |
chip_bus_sync_unlock(desc); |
9114014cf
|
1593 |
mutex_unlock(&desc->request_mutex); |
1da177e4c
|
1594 |
|
b2d3d61ad
|
1595 |
irq_setup_timings(desc, new); |
69ab84943
|
1596 1597 1598 1599 1600 1601 |
/* * Strictly no need to wake it up, but hung_task complains * when no hard interrupt wakes the thread up. */ if (new->thread) wake_up_process(new->thread); |
2a1d3ab89
|
1602 1603 |
if (new->secondary) wake_up_process(new->secondary->thread); |
69ab84943
|
1604 |
|
2c6927a38
|
1605 |
register_irq_proc(irq, desc); |
1da177e4c
|
1606 1607 |
new->dir = NULL; register_handler_proc(irq, new); |
1da177e4c
|
1608 |
return 0; |
f51634274
|
1609 1610 |
mismatch: |
3cca53b02
|
1611 |
if (!(new->flags & IRQF_PROBE_SHARED)) { |
97fd75b7b
|
1612 1613 |
pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s) ", |
f5d89470f
|
1614 1615 |
irq, new->flags, new->name, old->flags, old->name); #ifdef CONFIG_DEBUG_SHIRQ |
13e87ec68
|
1616 |
dump_stack(); |
3f0504471
|
1617 |
#endif |
f5d89470f
|
1618 |
} |
3aa551c9b
|
1619 |
ret = -EBUSY; |
cba4235e6
|
1620 |
out_unlock: |
1c389795c
|
1621 |
raw_spin_unlock_irqrestore(&desc->lock, flags); |
3b8249e75
|
1622 |
|
46e48e257
|
1623 1624 |
if (!desc->action) irq_release_resources(desc); |
19d39a381
|
1625 1626 |
out_bus_unlock: chip_bus_sync_unlock(desc); |
9114014cf
|
1627 |
mutex_unlock(&desc->request_mutex); |
3aa551c9b
|
1628 |
out_thread: |
3aa551c9b
|
1629 1630 1631 1632 |
if (new->thread) { struct task_struct *t = new->thread; new->thread = NULL; |
05d74efa3
|
1633 |
kthread_stop(t); |
3aa551c9b
|
1634 1635 |
put_task_struct(t); } |
2a1d3ab89
|
1636 1637 1638 1639 1640 1641 1642 |
if (new->secondary && new->secondary->thread) { struct task_struct *t = new->secondary->thread; new->secondary->thread = NULL; kthread_stop(t); put_task_struct(t); } |
b6873807a
|
1643 1644 |
out_mput: module_put(desc->owner); |
3aa551c9b
|
1645 |
return ret; |
1da177e4c
|
1646 |
} |
31d9d9b6d
|
1647 |
/* |
cbf94f068
|
1648 1649 |
* Internal function to unregister an irqaction - used to free * regular and special interrupts that are part of the architecture. |
1da177e4c
|
1650 |
*/ |
83ac4ca94
|
1651 |
static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id) |
1da177e4c
|
1652 |
{ |
83ac4ca94
|
1653 |
unsigned irq = desc->irq_data.irq; |
f17c75453
|
1654 |
struct irqaction *action, **action_ptr; |
1da177e4c
|
1655 |
unsigned long flags; |
ae88a23b3
|
1656 1657 |
WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context! ", irq); |
7d94f7ca4
|
1658 |
|
9114014cf
|
1659 |
mutex_lock(&desc->request_mutex); |
abc7e40c8
|
1660 |
chip_bus_lock(desc); |
239007b84
|
1661 |
raw_spin_lock_irqsave(&desc->lock, flags); |
ae88a23b3
|
1662 1663 1664 1665 1666 |
/* * There can be multiple actions per IRQ descriptor, find the right * one based on the dev_id: */ |
f17c75453
|
1667 |
action_ptr = &desc->action; |
1da177e4c
|
1668 |
for (;;) { |
f17c75453
|
1669 |
action = *action_ptr; |
1da177e4c
|
1670 |
|
ae88a23b3
|
1671 1672 1673 |
if (!action) { WARN(1, "Trying to free already-free IRQ %d ", irq); |
239007b84
|
1674 |
raw_spin_unlock_irqrestore(&desc->lock, flags); |
abc7e40c8
|
1675 |
chip_bus_sync_unlock(desc); |
19d39a381
|
1676 |
mutex_unlock(&desc->request_mutex); |
f21cfb258
|
1677 |
return NULL; |
ae88a23b3
|
1678 |
} |
1da177e4c
|
1679 |
|
8316e3810
|
1680 1681 |
if (action->dev_id == dev_id) break; |
f17c75453
|
1682 |
action_ptr = &action->next; |
ae88a23b3
|
1683 |
} |
dbce706e2
|
1684 |
|
ae88a23b3
|
1685 |
/* Found it - now remove it from the list of entries: */ |
f17c75453
|
1686 |
*action_ptr = action->next; |
ae88a23b3
|
1687 |
|
cab303be9
|
1688 |
irq_pm_remove_action(desc, action); |
ae88a23b3
|
1689 |
/* If this was the last handler, shut down the IRQ line: */ |
c1bacbae8
|
1690 |
if (!desc->action) { |
e9849777d
|
1691 |
irq_settings_clr_disable_unlazy(desc); |
4001d8e87
|
1692 |
/* Only shutdown. Deactivate after synchronize_hardirq() */ |
469992386
|
1693 |
irq_shutdown(desc); |
c1bacbae8
|
1694 |
} |
3aa551c9b
|
1695 |
|
e7a297b0d
|
1696 1697 1698 1699 1700 |
#ifdef CONFIG_SMP /* make sure affinity_hint is cleaned up */ if (WARN_ON_ONCE(desc->affinity_hint)) desc->affinity_hint = NULL; #endif |
239007b84
|
1701 |
raw_spin_unlock_irqrestore(&desc->lock, flags); |
19d39a381
|
1702 1703 1704 |
/* * Drop bus_lock here so the changes which were done in the chip * callbacks above are synced out to the irq chips which hang |
519cc8652
|
1705 |
* behind a slow bus (I2C, SPI) before calling synchronize_hardirq(). |
19d39a381
|
1706 1707 1708 |
* * Aside of that the bus_lock can also be taken from the threaded * handler in irq_finalize_oneshot() which results in a deadlock |
519cc8652
|
1709 |
* because kthread_stop() would wait forever for the thread to |
19d39a381
|
1710 1711 1712 1713 1714 1715 |
* complete, which is blocked on the bus lock. * * The still held desc->request_mutex() protects against a * concurrent request_irq() of this irq so the release of resources * and timing data is properly serialized. */ |
abc7e40c8
|
1716 |
chip_bus_sync_unlock(desc); |
ae88a23b3
|
1717 1718 |
unregister_handler_proc(irq, action); |
62e046865
|
1719 1720 1721 1722 1723 1724 |
/* * Make sure it's not being used on another CPU and if the chip * supports it also make sure that there is no (not yet serviced) * interrupt in flight at the hardware level. */ __synchronize_hardirq(desc, true); |
1da177e4c
|
1725 |
|
70edcd77a
|
1726 |
#ifdef CONFIG_DEBUG_SHIRQ |
ae88a23b3
|
1727 1728 1729 1730 1731 1732 |
/* * It's a shared IRQ -- the driver ought to be prepared for an IRQ * event to happen even now it's being freed, so let's make sure that * is so by doing an extra call to the handler .... * * ( We do this after actually deregistering it, to make sure that a |
0a13ec0bb
|
1733 |
* 'real' IRQ doesn't run in parallel with our fake. ) |
ae88a23b3
|
1734 1735 1736 1737 1738 |
*/ if (action->flags & IRQF_SHARED) { local_irq_save(flags); action->handler(irq, dev_id); local_irq_restore(flags); |
1da177e4c
|
1739 |
} |
ae88a23b3
|
1740 |
#endif |
2d860ad76
|
1741 |
|
519cc8652
|
1742 1743 1744 1745 1746 1747 |
/* * The action has already been removed above, but the thread writes * its oneshot mask bit when it completes. Though request_mutex is * held across this which prevents __setup_irq() from handing out * the same bit to a newly requested action. */ |
2d860ad76
|
1748 |
if (action->thread) { |
05d74efa3
|
1749 |
kthread_stop(action->thread); |
2d860ad76
|
1750 |
put_task_struct(action->thread); |
2a1d3ab89
|
1751 1752 1753 1754 |
if (action->secondary && action->secondary->thread) { kthread_stop(action->secondary->thread); put_task_struct(action->secondary->thread); } |
2d860ad76
|
1755 |
} |
19d39a381
|
1756 |
/* Last action releases resources */ |
2343877fb
|
1757 |
if (!desc->action) { |
19d39a381
|
1758 1759 1760 1761 1762 |
/* * Reaquire bus lock as irq_release_resources() might * require it to deallocate resources over the slow bus. */ chip_bus_lock(desc); |
4001d8e87
|
1763 1764 1765 1766 1767 1768 1769 |
/* * There is no interrupt on the fly anymore. Deactivate it * completely. */ raw_spin_lock_irqsave(&desc->lock, flags); irq_domain_deactivate_irq(&desc->irq_data); raw_spin_unlock_irqrestore(&desc->lock, flags); |
46e48e257
|
1770 |
irq_release_resources(desc); |
19d39a381
|
1771 |
chip_bus_sync_unlock(desc); |
2343877fb
|
1772 1773 |
irq_remove_timings(desc); } |
46e48e257
|
1774 |
|
9114014cf
|
1775 |
mutex_unlock(&desc->request_mutex); |
be45beb2d
|
1776 |
irq_chip_pm_put(&desc->irq_data); |
b6873807a
|
1777 |
module_put(desc->owner); |
2a1d3ab89
|
1778 |
kfree(action->secondary); |
f21cfb258
|
1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 |
return action; } /** * free_irq - free an interrupt allocated with request_irq * @irq: Interrupt line to free * @dev_id: Device identity to free * * Remove an interrupt handler. The handler is removed and if the * interrupt line is no longer in use by any driver it is disabled. * On a shared IRQ the caller must ensure the interrupt is disabled * on the card it drives before calling this function. The function * does not return until any executing interrupts for this IRQ * have completed. * * This function must not be called from interrupt context. |
25ce4be72
|
1795 1796 |
* * Returns the devname argument passed to request_irq. |
f21cfb258
|
1797 |
*/ |
25ce4be72
|
1798 |
const void *free_irq(unsigned int irq, void *dev_id) |
f21cfb258
|
1799 |
{ |
70aedd24d
|
1800 |
struct irq_desc *desc = irq_to_desc(irq); |
25ce4be72
|
1801 1802 |
struct irqaction *action; const char *devname; |
70aedd24d
|
1803 |
|
31d9d9b6d
|
1804 |
if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) |
25ce4be72
|
1805 |
return NULL; |
70aedd24d
|
1806 |
|
cd7eab44e
|
1807 1808 1809 1810 |
#ifdef CONFIG_SMP if (WARN_ON(desc->affinity_notify)) desc->affinity_notify = NULL; #endif |
83ac4ca94
|
1811 |
action = __free_irq(desc, dev_id); |
2827a418c
|
1812 1813 1814 |
if (!action) return NULL; |
25ce4be72
|
1815 1816 1817 |
devname = action->name; kfree(action); return devname; |
1da177e4c
|
1818 |
} |
1da177e4c
|
1819 |
EXPORT_SYMBOL(free_irq); |
b525903c2
|
1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 |
/* This function must be called with desc->lock held */ static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc) { const char *devname = NULL; desc->istate &= ~IRQS_NMI; if (!WARN_ON(desc->action == NULL)) { irq_pm_remove_action(desc, desc->action); devname = desc->action->name; unregister_handler_proc(irq, desc->action); kfree(desc->action); desc->action = NULL; } irq_settings_clr_disable_unlazy(desc); |
4001d8e87
|
1837 |
irq_shutdown_and_deactivate(desc); |
b525903c2
|
1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 |
irq_release_resources(desc); irq_chip_pm_put(&desc->irq_data); module_put(desc->owner); return devname; } const void *free_nmi(unsigned int irq, void *dev_id) { struct irq_desc *desc = irq_to_desc(irq); unsigned long flags; const void *devname; if (!desc || WARN_ON(!(desc->istate & IRQS_NMI))) return NULL; if (WARN_ON(irq_settings_is_per_cpu_devid(desc))) return NULL; /* NMI still enabled */ if (WARN_ON(desc->depth == 0)) disable_nmi_nosync(irq); raw_spin_lock_irqsave(&desc->lock, flags); irq_nmi_teardown(desc); devname = __cleanup_nmi(irq, desc); raw_spin_unlock_irqrestore(&desc->lock, flags); return devname; } |
1da177e4c
|
1872 |
/** |
3aa551c9b
|
1873 |
* request_threaded_irq - allocate an interrupt line |
1da177e4c
|
1874 |
* @irq: Interrupt line to allocate |
3aa551c9b
|
1875 1876 |
* @handler: Function to be called when the IRQ occurs. * Primary handler for threaded interrupts |
b25c340c1
|
1877 1878 |
* If NULL and thread_fn != NULL the default * primary handler is installed |
f48fe81e5
|
1879 1880 |
* @thread_fn: Function called from the irq handler thread * If NULL, no irq thread is created |
1da177e4c
|
1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 |
* @irqflags: Interrupt type flags * @devname: An ascii name for the claiming device * @dev_id: A cookie passed back to the handler function * * This call allocates interrupt resources and enables the * interrupt line and IRQ handling. From the point this * call is made your handler function may be invoked. Since * your handler function must clear any interrupt the board * raises, you must take care both to initialise your hardware * and to set up the interrupt handler in the right order. * |
3aa551c9b
|
1892 |
* If you want to set up a threaded irq handler for your device |
6d21af4f7
|
1893 |
* then you need to supply @handler and @thread_fn. @handler is |
3aa551c9b
|
1894 1895 1896 |
* still called in hard interrupt context and has to check * whether the interrupt originates from the device. If yes it * needs to disable the interrupt on the device and return |
39a2eddb9
|
1897 |
* IRQ_WAKE_THREAD which will wake up the handler thread and run |
3aa551c9b
|
1898 1899 1900 |
* @thread_fn. This split handler design is necessary to support * shared interrupts. * |
1da177e4c
|
1901 1902 1903 1904 1905 1906 1907 1908 1909 |
* Dev_id must be globally unique. Normally the address of the * device data structure is used as the cookie. Since the handler * receives this value it makes sense to use it. * * If your interrupt is shared you must pass a non NULL dev_id * as this is required when freeing the interrupt. * * Flags: * |
3cca53b02
|
1910 |
* IRQF_SHARED Interrupt is shared |
0c5d1eb77
|
1911 |
* IRQF_TRIGGER_* Specify active edge(s) or level |
1da177e4c
|
1912 1913 |
* */ |
3aa551c9b
|
1914 1915 1916 |
int request_threaded_irq(unsigned int irq, irq_handler_t handler, irq_handler_t thread_fn, unsigned long irqflags, const char *devname, void *dev_id) |
1da177e4c
|
1917 |
{ |
06fcb0c6f
|
1918 |
struct irqaction *action; |
08678b084
|
1919 |
struct irq_desc *desc; |
d3c60047b
|
1920 |
int retval; |
1da177e4c
|
1921 |
|
e237a5518
|
1922 1923 |
if (irq == IRQ_NOTCONNECTED) return -ENOTCONN; |
470c66239
|
1924 |
/* |
1da177e4c
|
1925 1926 1927 1928 |
* Sanity-check: shared interrupts must pass in a real dev-ID, * otherwise we'll have trouble later trying to figure out * which interrupt is which (messes up the interrupt freeing * logic etc). |
17f480342
|
1929 1930 1931 |
* * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and * it cannot be set along with IRQF_NO_SUSPEND. |
1da177e4c
|
1932 |
*/ |
17f480342
|
1933 1934 1935 |
if (((irqflags & IRQF_SHARED) && !dev_id) || (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) || ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND))) |
1da177e4c
|
1936 |
return -EINVAL; |
7d94f7ca4
|
1937 |
|
cb5bc8322
|
1938 |
desc = irq_to_desc(irq); |
7d94f7ca4
|
1939 |
if (!desc) |
1da177e4c
|
1940 |
return -EINVAL; |
7d94f7ca4
|
1941 |
|
31d9d9b6d
|
1942 1943 |
if (!irq_settings_can_request(desc) || WARN_ON(irq_settings_is_per_cpu_devid(desc))) |
6550c775c
|
1944 |
return -EINVAL; |
b25c340c1
|
1945 1946 1947 1948 1949 1950 |
if (!handler) { if (!thread_fn) return -EINVAL; handler = irq_default_primary_handler; } |
1da177e4c
|
1951 |
|
455357327
|
1952 |
action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); |
1da177e4c
|
1953 1954 1955 1956 |
if (!action) return -ENOMEM; action->handler = handler; |
3aa551c9b
|
1957 |
action->thread_fn = thread_fn; |
1da177e4c
|
1958 |
action->flags = irqflags; |
1da177e4c
|
1959 |
action->name = devname; |
1da177e4c
|
1960 |
action->dev_id = dev_id; |
be45beb2d
|
1961 |
retval = irq_chip_pm_get(&desc->irq_data); |
4396f46c8
|
1962 1963 |
if (retval < 0) { kfree(action); |
be45beb2d
|
1964 |
return retval; |
4396f46c8
|
1965 |
} |
be45beb2d
|
1966 |
|
d3c60047b
|
1967 |
retval = __setup_irq(irq, desc, action); |
70aedd24d
|
1968 |
|
2a1d3ab89
|
1969 |
if (retval) { |
be45beb2d
|
1970 |
irq_chip_pm_put(&desc->irq_data); |
2a1d3ab89
|
1971 |
kfree(action->secondary); |
377bf1e4a
|
1972 |
kfree(action); |
2a1d3ab89
|
1973 |
} |
377bf1e4a
|
1974 |
|
6d83f94db
|
1975 |
#ifdef CONFIG_DEBUG_SHIRQ_FIXME |
6ce51c431
|
1976 |
if (!retval && (irqflags & IRQF_SHARED)) { |
a304e1b82
|
1977 1978 1979 |
/* * It's a shared IRQ -- the driver ought to be prepared for it * to happen immediately, so let's make sure.... |
377bf1e4a
|
1980 1981 |
* We disable the irq to make sure that a 'real' IRQ doesn't * run in parallel with our fake. |
a304e1b82
|
1982 |
*/ |
59845b1ff
|
1983 |
unsigned long flags; |
a304e1b82
|
1984 |
|
377bf1e4a
|
1985 |
disable_irq(irq); |
59845b1ff
|
1986 |
local_irq_save(flags); |
377bf1e4a
|
1987 |
|
59845b1ff
|
1988 |
handler(irq, dev_id); |
377bf1e4a
|
1989 |
|
59845b1ff
|
1990 |
local_irq_restore(flags); |
377bf1e4a
|
1991 |
enable_irq(irq); |
a304e1b82
|
1992 1993 |
} #endif |
1da177e4c
|
1994 1995 |
return retval; } |
3aa551c9b
|
1996 |
EXPORT_SYMBOL(request_threaded_irq); |
ae731f8d0
|
1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 |
/** * request_any_context_irq - allocate an interrupt line * @irq: Interrupt line to allocate * @handler: Function to be called when the IRQ occurs. * Threaded handler for threaded interrupts. * @flags: Interrupt type flags * @name: An ascii name for the claiming device * @dev_id: A cookie passed back to the handler function * * This call allocates interrupt resources and enables the * interrupt line and IRQ handling. It selects either a * hardirq or threaded handling method depending on the * context. * * On failure, it returns a negative value. On success, * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED. */ int request_any_context_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, const char *name, void *dev_id) { |
e237a5518
|
2018 |
struct irq_desc *desc; |
ae731f8d0
|
2019 |
int ret; |
e237a5518
|
2020 2021 2022 2023 |
if (irq == IRQ_NOTCONNECTED) return -ENOTCONN; desc = irq_to_desc(irq); |
ae731f8d0
|
2024 2025 |
if (!desc) return -EINVAL; |
1ccb4e612
|
2026 |
if (irq_settings_is_nested_thread(desc)) { |
ae731f8d0
|
2027 2028 2029 2030 2031 2032 2033 2034 2035 |
ret = request_threaded_irq(irq, NULL, handler, flags, name, dev_id); return !ret ? IRQC_IS_NESTED : ret; } ret = request_irq(irq, handler, flags, name, dev_id); return !ret ? IRQC_IS_HARDIRQ : ret; } EXPORT_SYMBOL_GPL(request_any_context_irq); |
31d9d9b6d
|
2036 |
|
b525903c2
|
2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 |
/** * request_nmi - allocate an interrupt line for NMI delivery * @irq: Interrupt line to allocate * @handler: Function to be called when the IRQ occurs. * Threaded handler for threaded interrupts. * @irqflags: Interrupt type flags * @name: An ascii name for the claiming device * @dev_id: A cookie passed back to the handler function * * This call allocates interrupt resources and enables the * interrupt line and IRQ handling. It sets up the IRQ line * to be handled as an NMI. * * An interrupt line delivering NMIs cannot be shared and IRQ handling * cannot be threaded. * * Interrupt lines requested for NMI delivering must produce per cpu * interrupts and have auto enabling setting disabled. * * Dev_id must be globally unique. Normally the address of the * device data structure is used as the cookie. Since the handler * receives this value it makes sense to use it. * * If the interrupt line cannot be used to deliver NMIs, function * will fail and return a negative value. */ int request_nmi(unsigned int irq, irq_handler_t handler, unsigned long irqflags, const char *name, void *dev_id) { struct irqaction *action; struct irq_desc *desc; unsigned long flags; int retval; if (irq == IRQ_NOTCONNECTED) return -ENOTCONN; /* NMI cannot be shared, used for Polling */ if (irqflags & (IRQF_SHARED | IRQF_COND_SUSPEND | IRQF_IRQPOLL)) return -EINVAL; if (!(irqflags & IRQF_PERCPU)) return -EINVAL; if (!handler) return -EINVAL; desc = irq_to_desc(irq); if (!desc || irq_settings_can_autoenable(desc) || !irq_settings_can_request(desc) || WARN_ON(irq_settings_is_per_cpu_devid(desc)) || !irq_supports_nmi(desc)) return -EINVAL; action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); if (!action) return -ENOMEM; action->handler = handler; action->flags = irqflags | IRQF_NO_THREAD | IRQF_NOBALANCING; action->name = name; action->dev_id = dev_id; retval = irq_chip_pm_get(&desc->irq_data); if (retval < 0) goto err_out; retval = __setup_irq(irq, desc, action); if (retval) goto err_irq_setup; raw_spin_lock_irqsave(&desc->lock, flags); /* Setup NMI state */ desc->istate |= IRQS_NMI; retval = irq_nmi_setup(desc); if (retval) { __cleanup_nmi(irq, desc); raw_spin_unlock_irqrestore(&desc->lock, flags); return -EINVAL; } raw_spin_unlock_irqrestore(&desc->lock, flags); return 0; err_irq_setup: irq_chip_pm_put(&desc->irq_data); err_out: kfree(action); return retval; } |
1e7c5fd29
|
2131 |
void enable_percpu_irq(unsigned int irq, unsigned int type) |
31d9d9b6d
|
2132 2133 2134 2135 2136 2137 2138 |
{ unsigned int cpu = smp_processor_id(); unsigned long flags; struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); if (!desc) return; |
f35ad0837
|
2139 2140 2141 2142 |
/* * If the trigger type is not specified by the caller, then * use the default for this interrupt. */ |
1e7c5fd29
|
2143 |
type &= IRQ_TYPE_SENSE_MASK; |
f35ad0837
|
2144 2145 |
if (type == IRQ_TYPE_NONE) type = irqd_get_trigger_type(&desc->irq_data); |
1e7c5fd29
|
2146 2147 |
if (type != IRQ_TYPE_NONE) { int ret; |
a1ff541a4
|
2148 |
ret = __irq_set_trigger(desc, type); |
1e7c5fd29
|
2149 2150 |
if (ret) { |
32cffdde4
|
2151 2152 |
WARN(1, "failed to set type for IRQ%d ", irq); |
1e7c5fd29
|
2153 2154 2155 |
goto out; } } |
31d9d9b6d
|
2156 |
irq_percpu_enable(desc, cpu); |
1e7c5fd29
|
2157 |
out: |
31d9d9b6d
|
2158 2159 |
irq_put_desc_unlock(desc, flags); } |
36a5df85e
|
2160 |
EXPORT_SYMBOL_GPL(enable_percpu_irq); |
31d9d9b6d
|
2161 |
|
4b078c3f1
|
2162 2163 2164 2165 |
void enable_percpu_nmi(unsigned int irq, unsigned int type) { enable_percpu_irq(irq, type); } |
f0cb32207
|
2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 |
/** * irq_percpu_is_enabled - Check whether the per cpu irq is enabled * @irq: Linux irq number to check for * * Must be called from a non migratable context. Returns the enable * state of a per cpu interrupt on the current cpu. */ bool irq_percpu_is_enabled(unsigned int irq) { unsigned int cpu = smp_processor_id(); struct irq_desc *desc; unsigned long flags; bool is_enabled; desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); if (!desc) return false; is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled); irq_put_desc_unlock(desc, flags); return is_enabled; } EXPORT_SYMBOL_GPL(irq_percpu_is_enabled); |
31d9d9b6d
|
2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 |
void disable_percpu_irq(unsigned int irq) { unsigned int cpu = smp_processor_id(); unsigned long flags; struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); if (!desc) return; irq_percpu_disable(desc, cpu); irq_put_desc_unlock(desc, flags); } |
36a5df85e
|
2202 |
EXPORT_SYMBOL_GPL(disable_percpu_irq); |
31d9d9b6d
|
2203 |
|
4b078c3f1
|
2204 2205 2206 2207 |
void disable_percpu_nmi(unsigned int irq) { disable_percpu_irq(irq); } |
31d9d9b6d
|
2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 |
/* * Internal function to unregister a percpu irqaction. */ static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id) { struct irq_desc *desc = irq_to_desc(irq); struct irqaction *action; unsigned long flags; WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context! ", irq); if (!desc) return NULL; raw_spin_lock_irqsave(&desc->lock, flags); action = desc->action; if (!action || action->percpu_dev_id != dev_id) { WARN(1, "Trying to free already-free IRQ %d ", irq); goto bad; } if (!cpumask_empty(desc->percpu_enabled)) { WARN(1, "percpu IRQ %d still enabled on CPU%d! ", irq, cpumask_first(desc->percpu_enabled)); goto bad; } /* Found it - now remove it from the list of entries: */ desc->action = NULL; |
4b078c3f1
|
2241 |
desc->istate &= ~IRQS_NMI; |
31d9d9b6d
|
2242 2243 2244 |
raw_spin_unlock_irqrestore(&desc->lock, flags); unregister_handler_proc(irq, action); |
be45beb2d
|
2245 |
irq_chip_pm_put(&desc->irq_data); |
31d9d9b6d
|
2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 |
module_put(desc->owner); return action; bad: raw_spin_unlock_irqrestore(&desc->lock, flags); return NULL; } /** * remove_percpu_irq - free a per-cpu interrupt * @irq: Interrupt line to free * @act: irqaction for the interrupt * * Used to remove interrupts statically setup by the early boot process. */ void remove_percpu_irq(unsigned int irq, struct irqaction *act) { struct irq_desc *desc = irq_to_desc(irq); if (desc && irq_settings_is_per_cpu_devid(desc)) __free_percpu_irq(irq, act->percpu_dev_id); } /** * free_percpu_irq - free an interrupt allocated with request_percpu_irq * @irq: Interrupt line to free * @dev_id: Device identity to free * * Remove a percpu interrupt handler. The handler is removed, but * the interrupt line is not disabled. This must be done on each * CPU before calling this function. The function does not return * until any executing interrupts for this IRQ have completed. * * This function must not be called from interrupt context. */ void free_percpu_irq(unsigned int irq, void __percpu *dev_id) { struct irq_desc *desc = irq_to_desc(irq); if (!desc || !irq_settings_is_per_cpu_devid(desc)) return; chip_bus_lock(desc); kfree(__free_percpu_irq(irq, dev_id)); chip_bus_sync_unlock(desc); } |
aec2e2ad1
|
2292 |
EXPORT_SYMBOL_GPL(free_percpu_irq); |
31d9d9b6d
|
2293 |
|
4b078c3f1
|
2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 |
void free_percpu_nmi(unsigned int irq, void __percpu *dev_id) { struct irq_desc *desc = irq_to_desc(irq); if (!desc || !irq_settings_is_per_cpu_devid(desc)) return; if (WARN_ON(!(desc->istate & IRQS_NMI))) return; kfree(__free_percpu_irq(irq, dev_id)); } |
31d9d9b6d
|
2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 |
/** * setup_percpu_irq - setup a per-cpu interrupt * @irq: Interrupt line to setup * @act: irqaction for the interrupt * * Used to statically setup per-cpu interrupts in the early boot process. */ int setup_percpu_irq(unsigned int irq, struct irqaction *act) { struct irq_desc *desc = irq_to_desc(irq); int retval; if (!desc || !irq_settings_is_per_cpu_devid(desc)) return -EINVAL; |
be45beb2d
|
2320 2321 2322 2323 |
retval = irq_chip_pm_get(&desc->irq_data); if (retval < 0) return retval; |
31d9d9b6d
|
2324 |
retval = __setup_irq(irq, desc, act); |
31d9d9b6d
|
2325 |
|
be45beb2d
|
2326 2327 |
if (retval) irq_chip_pm_put(&desc->irq_data); |
31d9d9b6d
|
2328 2329 2330 2331 |
return retval; } /** |
c80081b92
|
2332 |
* __request_percpu_irq - allocate a percpu interrupt line |
31d9d9b6d
|
2333 2334 |
* @irq: Interrupt line to allocate * @handler: Function to be called when the IRQ occurs. |
c80081b92
|
2335 |
* @flags: Interrupt type flags (IRQF_TIMER only) |
31d9d9b6d
|
2336 2337 2338 |
* @devname: An ascii name for the claiming device * @dev_id: A percpu cookie passed back to the handler function * |
a1b7febd7
|
2339 2340 2341 2342 |
* This call allocates interrupt resources and enables the * interrupt on the local CPU. If the interrupt is supposed to be * enabled on other CPUs, it has to be done on each CPU using * enable_percpu_irq(). |
31d9d9b6d
|
2343 2344 2345 2346 2347 |
* * Dev_id must be globally unique. It is a per-cpu variable, and * the handler gets called with the interrupted CPU's instance of * that variable. */ |
c80081b92
|
2348 2349 2350 |
int __request_percpu_irq(unsigned int irq, irq_handler_t handler, unsigned long flags, const char *devname, void __percpu *dev_id) |
31d9d9b6d
|
2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 |
{ struct irqaction *action; struct irq_desc *desc; int retval; if (!dev_id) return -EINVAL; desc = irq_to_desc(irq); if (!desc || !irq_settings_can_request(desc) || !irq_settings_is_per_cpu_devid(desc)) return -EINVAL; |
c80081b92
|
2363 2364 |
if (flags && flags != IRQF_TIMER) return -EINVAL; |
31d9d9b6d
|
2365 2366 2367 2368 2369 |
action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); if (!action) return -ENOMEM; action->handler = handler; |
c80081b92
|
2370 |
action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND; |
31d9d9b6d
|
2371 2372 |
action->name = devname; action->percpu_dev_id = dev_id; |
be45beb2d
|
2373 |
retval = irq_chip_pm_get(&desc->irq_data); |
4396f46c8
|
2374 2375 |
if (retval < 0) { kfree(action); |
be45beb2d
|
2376 |
return retval; |
4396f46c8
|
2377 |
} |
be45beb2d
|
2378 |
|
31d9d9b6d
|
2379 |
retval = __setup_irq(irq, desc, action); |
31d9d9b6d
|
2380 |
|
be45beb2d
|
2381 2382 |
if (retval) { irq_chip_pm_put(&desc->irq_data); |
31d9d9b6d
|
2383 |
kfree(action); |
be45beb2d
|
2384 |
} |
31d9d9b6d
|
2385 2386 2387 |
return retval; } |
c80081b92
|
2388 |
EXPORT_SYMBOL_GPL(__request_percpu_irq); |
1b7047edf
|
2389 2390 |
/** |
4b078c3f1
|
2391 2392 2393 2394 2395 2396 2397 |
* request_percpu_nmi - allocate a percpu interrupt line for NMI delivery * @irq: Interrupt line to allocate * @handler: Function to be called when the IRQ occurs. * @name: An ascii name for the claiming device * @dev_id: A percpu cookie passed back to the handler function * * This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs |
a51866946
|
2398 2399 |
* have to be setup on each CPU by calling prepare_percpu_nmi() before * being enabled on the same CPU by using enable_percpu_nmi(). |
4b078c3f1
|
2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 |
* * Dev_id must be globally unique. It is a per-cpu variable, and * the handler gets called with the interrupted CPU's instance of * that variable. * * Interrupt lines requested for NMI delivering should have auto enabling * setting disabled. * * If the interrupt line cannot be used to deliver NMIs, function * will fail returning a negative value. */ int request_percpu_nmi(unsigned int irq, irq_handler_t handler, const char *name, void __percpu *dev_id) { struct irqaction *action; struct irq_desc *desc; unsigned long flags; int retval; if (!handler) return -EINVAL; desc = irq_to_desc(irq); if (!desc || !irq_settings_can_request(desc) || !irq_settings_is_per_cpu_devid(desc) || irq_settings_can_autoenable(desc) || !irq_supports_nmi(desc)) return -EINVAL; /* The line cannot already be NMI */ if (desc->istate & IRQS_NMI) return -EINVAL; action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); if (!action) return -ENOMEM; action->handler = handler; action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND | IRQF_NO_THREAD | IRQF_NOBALANCING; action->name = name; action->percpu_dev_id = dev_id; retval = irq_chip_pm_get(&desc->irq_data); if (retval < 0) goto err_out; retval = __setup_irq(irq, desc, action); if (retval) goto err_irq_setup; raw_spin_lock_irqsave(&desc->lock, flags); desc->istate |= IRQS_NMI; raw_spin_unlock_irqrestore(&desc->lock, flags); return 0; err_irq_setup: irq_chip_pm_put(&desc->irq_data); err_out: kfree(action); return retval; } /** * prepare_percpu_nmi - performs CPU local setup for NMI delivery * @irq: Interrupt line to prepare for NMI delivery * * This call prepares an interrupt line to deliver NMI on the current CPU, * before that interrupt line gets enabled with enable_percpu_nmi(). * * As a CPU local operation, this should be called from non-preemptible * context. * * If the interrupt line cannot be used to deliver NMIs, function * will fail returning a negative value. */ int prepare_percpu_nmi(unsigned int irq) { unsigned long flags; struct irq_desc *desc; int ret = 0; WARN_ON(preemptible()); desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); if (!desc) return -EINVAL; if (WARN(!(desc->istate & IRQS_NMI), KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u ", irq)) { ret = -EINVAL; goto out; } ret = irq_nmi_setup(desc); if (ret) { pr_err("Failed to setup NMI delivery: irq %u ", irq); goto out; } out: irq_put_desc_unlock(desc, flags); return ret; } /** * teardown_percpu_nmi - undoes NMI setup of IRQ line * @irq: Interrupt line from which CPU local NMI configuration should be * removed * * This call undoes the setup done by prepare_percpu_nmi(). * * IRQ line should not be enabled for the current CPU. * * As a CPU local operation, this should be called from non-preemptible * context. */ void teardown_percpu_nmi(unsigned int irq) { unsigned long flags; struct irq_desc *desc; WARN_ON(preemptible()); desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); if (!desc) return; if (WARN_ON(!(desc->istate & IRQS_NMI))) goto out; irq_nmi_teardown(desc); out: irq_put_desc_unlock(desc, flags); } |
62e046865
|
2543 2544 2545 2546 2547 2548 2549 2550 |
int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which, bool *state) { struct irq_chip *chip; int err = -EINVAL; do { chip = irq_data_get_irq_chip(data); |
1d0326f35
|
2551 2552 |
if (WARN_ON_ONCE(!chip)) return -ENODEV; |
62e046865
|
2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 |
if (chip->irq_get_irqchip_state) break; #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY data = data->parent_data; #else data = NULL; #endif } while (data); if (data) err = chip->irq_get_irqchip_state(data, which, state); return err; } |
4b078c3f1
|
2566 |
/** |
1b7047edf
|
2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 |
* irq_get_irqchip_state - returns the irqchip state of a interrupt. * @irq: Interrupt line that is forwarded to a VM * @which: One of IRQCHIP_STATE_* the caller wants to know about * @state: a pointer to a boolean where the state is to be storeed * * This call snapshots the internal irqchip state of an * interrupt, returning into @state the bit corresponding to * stage @which * * This function should be called with preemption disabled if the * interrupt controller has per-cpu registers. */ int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which, bool *state) { struct irq_desc *desc; struct irq_data *data; |
1b7047edf
|
2584 2585 2586 2587 2588 2589 2590 2591 |
unsigned long flags; int err = -EINVAL; desc = irq_get_desc_buslock(irq, &flags, 0); if (!desc) return err; data = irq_desc_get_irq_data(desc); |
62e046865
|
2592 |
err = __irq_get_irqchip_state(data, which, state); |
1b7047edf
|
2593 2594 2595 2596 |
irq_put_desc_busunlock(desc, flags); return err; } |
1ee4fb3ee
|
2597 |
EXPORT_SYMBOL_GPL(irq_get_irqchip_state); |
1b7047edf
|
2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 |
/** * irq_set_irqchip_state - set the state of a forwarded interrupt. * @irq: Interrupt line that is forwarded to a VM * @which: State to be restored (one of IRQCHIP_STATE_*) * @val: Value corresponding to @which * * This call sets the internal irqchip state of an interrupt, * depending on the value of @which. * * This function should be called with preemption disabled if the * interrupt controller has per-cpu registers. */ int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, bool val) { struct irq_desc *desc; struct irq_data *data; struct irq_chip *chip; unsigned long flags; int err = -EINVAL; desc = irq_get_desc_buslock(irq, &flags, 0); if (!desc) return err; data = irq_desc_get_irq_data(desc); do { chip = irq_data_get_irq_chip(data); |
f107cee94
|
2628 2629 2630 2631 |
if (WARN_ON_ONCE(!chip)) { err = -ENODEV; goto out_unlock; } |
1b7047edf
|
2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 |
if (chip->irq_set_irqchip_state) break; #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY data = data->parent_data; #else data = NULL; #endif } while (data); if (data) err = chip->irq_set_irqchip_state(data, which, val); |
f107cee94
|
2643 |
out_unlock: |
1b7047edf
|
2644 2645 2646 |
irq_put_desc_busunlock(desc, flags); return err; } |
1ee4fb3ee
|
2647 |
EXPORT_SYMBOL_GPL(irq_set_irqchip_state); |