Blame view

kernel/irq/migration.c 1.8 KB
c777ac559   Andrew Morton   [PATCH] irq: unin...
1

d824e66a9   Christoph Hellwig   [PATCH] build ker...
2
  #include <linux/irq.h>
57b150cce   Yinghai Lu   irq: only update ...
3
4
5
  #include <linux/interrupt.h>
  
  #include "internals.h"
c777ac559   Andrew Morton   [PATCH] irq: unin...
6

a439520f8   Thomas Gleixner   genirq: Implement...
7
  void irq_move_masked_irq(struct irq_data *idata)
c777ac559   Andrew Morton   [PATCH] irq: unin...
8
  {
a439520f8   Thomas Gleixner   genirq: Implement...
9
10
  	struct irq_desc *desc = irq_data_to_desc(idata);
  	struct irq_chip *chip = idata->chip;
c777ac559   Andrew Morton   [PATCH] irq: unin...
11

f230b6d5c   Thomas Gleixner   genirq: Add IRQ_M...
12
  	if (likely(!irqd_is_setaffinity_pending(&desc->irq_data)))
c777ac559   Andrew Morton   [PATCH] irq: unin...
13
  		return;
501f2499b   Bryan Holty   [PATCH] IRQ: prev...
14
15
16
  	/*
  	 * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
  	 */
a005677b3   Thomas Gleixner   genirq: Mirror IR...
17
  	if (!irqd_can_balance(&desc->irq_data)) {
501f2499b   Bryan Holty   [PATCH] IRQ: prev...
18
19
20
  		WARN_ON(1);
  		return;
  	}
f230b6d5c   Thomas Gleixner   genirq: Add IRQ_M...
21
  	irqd_clr_move_pending(&desc->irq_data);
c777ac559   Andrew Morton   [PATCH] irq: unin...
22

7f7ace0cd   Mike Travis   cpumask: update i...
23
  	if (unlikely(cpumask_empty(desc->pending_mask)))
c777ac559   Andrew Morton   [PATCH] irq: unin...
24
  		return;
c96b3b3c4   Thomas Gleixner   genirq: Provide c...
25
  	if (!chip->irq_set_affinity)
c777ac559   Andrew Morton   [PATCH] irq: unin...
26
  		return;
239007b84   Thomas Gleixner   genirq: Convert i...
27
  	assert_raw_spin_locked(&desc->lock);
501f2499b   Bryan Holty   [PATCH] IRQ: prev...
28

c777ac559   Andrew Morton   [PATCH] irq: unin...
29
30
31
32
33
  	/*
  	 * If there was a valid mask to work with, please
  	 * do the disable, re-program, enable sequence.
  	 * This is *not* particularly important for level triggered
  	 * but in a edge trigger case, we might be setting rte
25985edce   Lucas De Marchi   Fix common misspe...
34
  	 * when an active trigger is coming in. This could
c777ac559   Andrew Morton   [PATCH] irq: unin...
35
36
  	 * cause some ioapics to mal-function.
  	 * Being paranoid i guess!
e7b946e98   Eric W. Biederman   [PATCH] genirq: i...
37
38
39
  	 *
  	 * For correct operation this depends on the caller
  	 * masking the irqs.
c777ac559   Andrew Morton   [PATCH] irq: unin...
40
  	 */
7f7ace0cd   Mike Travis   cpumask: update i...
41
  	if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)
57b150cce   Yinghai Lu   irq: only update ...
42
  		   < nr_cpu_ids))
c96b3b3c4   Thomas Gleixner   genirq: Provide c...
43
44
  		if (!chip->irq_set_affinity(&desc->irq_data,
  					    desc->pending_mask, false)) {
6b8ff3120   Thomas Gleixner   genirq: Convert c...
45
  			cpumask_copy(desc->irq_data.affinity, desc->pending_mask);
591d2fb02   Thomas Gleixner   genirq: Delegate ...
46
  			irq_set_thread_affinity(desc);
57b150cce   Yinghai Lu   irq: only update ...
47
  		}
7f7ace0cd   Mike Travis   cpumask: update i...
48
  	cpumask_clear(desc->pending_mask);
c777ac559   Andrew Morton   [PATCH] irq: unin...
49
  }
e7b946e98   Eric W. Biederman   [PATCH] genirq: i...
50

a439520f8   Thomas Gleixner   genirq: Implement...
51
  void irq_move_irq(struct irq_data *idata)
e7b946e98   Eric W. Biederman   [PATCH] genirq: i...
52
  {
f1a06390d   Thomas Gleixner   genirq: Prevent i...
53
  	bool masked;
e7b946e98   Eric W. Biederman   [PATCH] genirq: i...
54

a439520f8   Thomas Gleixner   genirq: Implement...
55
  	if (likely(!irqd_is_setaffinity_pending(idata)))
e7b946e98   Eric W. Biederman   [PATCH] genirq: i...
56
  		return;
32f4125eb   Thomas Gleixner   genirq: Move INPR...
57
  	if (unlikely(irqd_irq_disabled(idata)))
2a786b452   Eric W. Biederman   [PATCH] genirq: M...
58
  		return;
e7b946e98   Eric W. Biederman   [PATCH] genirq: i...
59

f1a06390d   Thomas Gleixner   genirq: Prevent i...
60
61
62
63
64
  	/*
  	 * Be careful vs. already masked interrupts. If this is a
  	 * threaded interrupt with ONESHOT set, we can end up with an
  	 * interrupt storm.
  	 */
32f4125eb   Thomas Gleixner   genirq: Move INPR...
65
  	masked = irqd_irq_masked(idata);
f1a06390d   Thomas Gleixner   genirq: Prevent i...
66
  	if (!masked)
a439520f8   Thomas Gleixner   genirq: Implement...
67
68
  		idata->chip->irq_mask(idata);
  	irq_move_masked_irq(idata);
f1a06390d   Thomas Gleixner   genirq: Prevent i...
69
  	if (!masked)
a439520f8   Thomas Gleixner   genirq: Implement...
70
71
  		idata->chip->irq_unmask(idata);
  }