Blame view

kernel/irq/migration.c 3.08 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  // SPDX-License-Identifier: GPL-2.0
c777ac559   Andrew Morton   [PATCH] irq: unin...
2

d824e66a9   Christoph Hellwig   [PATCH] build ker...
3
  #include <linux/irq.h>
57b150cce   Yinghai Lu   irq: only update ...
4
5
6
  #include <linux/interrupt.h>
  
  #include "internals.h"
c777ac559   Andrew Morton   [PATCH] irq: unin...
7

cdd16365b   Thomas Gleixner   genirq: Provide i...
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
  /**
   * irq_fixup_move_pending - Cleanup irq move pending from a dying CPU
   * @desc:		Interrupt descpriptor to clean up
   * @force_clear:	If set clear the move pending bit unconditionally.
   *			If not set, clear it only when the dying CPU is the
   *			last one in the pending mask.
   *
   * Returns true if the pending bit was set and the pending mask contains an
   * online CPU other than the dying CPU.
   */
  bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear)
  {
  	struct irq_data *data = irq_desc_get_irq_data(desc);
  
  	if (!irqd_is_setaffinity_pending(data))
  		return false;
  
  	/*
  	 * The outgoing CPU might be the last online target in a pending
  	 * interrupt move. If that's the case clear the pending move bit.
  	 */
  	if (cpumask_any_and(desc->pending_mask, cpu_online_mask) >= nr_cpu_ids) {
  		irqd_clr_move_pending(data);
  		return false;
  	}
  	if (force_clear)
  		irqd_clr_move_pending(data);
  	return true;
  }
a439520f8   Thomas Gleixner   genirq: Implement...
37
  void irq_move_masked_irq(struct irq_data *idata)
c777ac559   Andrew Morton   [PATCH] irq: unin...
38
  {
a439520f8   Thomas Gleixner   genirq: Implement...
39
  	struct irq_desc *desc = irq_data_to_desc(idata);
a33a5d2d1   Thomas Gleixner   genirq/generic_pe...
40
41
  	struct irq_data *data = &desc->irq_data;
  	struct irq_chip *chip = data->chip;
c777ac559   Andrew Morton   [PATCH] irq: unin...
42

a33a5d2d1   Thomas Gleixner   genirq/generic_pe...
43
  	if (likely(!irqd_is_setaffinity_pending(data)))
c777ac559   Andrew Morton   [PATCH] irq: unin...
44
  		return;
a33a5d2d1   Thomas Gleixner   genirq/generic_pe...
45
  	irqd_clr_move_pending(data);
a614a610a   Thomas Gleixner   genirq: Remove bo...
46

501f2499b   Bryan Holty   [PATCH] IRQ: prev...
47
48
49
  	/*
  	 * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
  	 */
a33a5d2d1   Thomas Gleixner   genirq/generic_pe...
50
  	if (irqd_is_per_cpu(data)) {
501f2499b   Bryan Holty   [PATCH] IRQ: prev...
51
52
53
  		WARN_ON(1);
  		return;
  	}
7f7ace0cd   Mike Travis   cpumask: update i...
54
  	if (unlikely(cpumask_empty(desc->pending_mask)))
c777ac559   Andrew Morton   [PATCH] irq: unin...
55
  		return;
c96b3b3c4   Thomas Gleixner   genirq: Provide c...
56
  	if (!chip->irq_set_affinity)
c777ac559   Andrew Morton   [PATCH] irq: unin...
57
  		return;
239007b84   Thomas Gleixner   genirq: Convert i...
58
  	assert_raw_spin_locked(&desc->lock);
501f2499b   Bryan Holty   [PATCH] IRQ: prev...
59

c777ac559   Andrew Morton   [PATCH] irq: unin...
60
61
62
63
64
  	/*
  	 * If there was a valid mask to work with, please
  	 * do the disable, re-program, enable sequence.
  	 * This is *not* particularly important for level triggered
  	 * but in a edge trigger case, we might be setting rte
25985edce   Lucas De Marchi   Fix common misspe...
65
  	 * when an active trigger is coming in. This could
c777ac559   Andrew Morton   [PATCH] irq: unin...
66
67
  	 * cause some ioapics to mal-function.
  	 * Being paranoid i guess!
e7b946e98   Eric W. Biederman   [PATCH] genirq: i...
68
69
70
  	 *
  	 * For correct operation this depends on the caller
  	 * masking the irqs.
c777ac559   Andrew Morton   [PATCH] irq: unin...
71
  	 */
a33a5d2d1   Thomas Gleixner   genirq/generic_pe...
72
73
74
75
76
77
78
79
80
81
82
83
84
85
  	if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) {
  		int ret;
  
  		ret = irq_do_set_affinity(data, desc->pending_mask, false);
  		/*
  		 * If the there is a cleanup pending in the underlying
  		 * vector management, reschedule the move for the next
  		 * interrupt. Leave desc->pending_mask intact.
  		 */
  		if (ret == -EBUSY) {
  			irqd_set_move_pending(data);
  			return;
  		}
  	}
7f7ace0cd   Mike Travis   cpumask: update i...
86
  	cpumask_clear(desc->pending_mask);
c777ac559   Andrew Morton   [PATCH] irq: unin...
87
  }
e7b946e98   Eric W. Biederman   [PATCH] genirq: i...
88

d340ebd69   Thomas Gleixner   genirq/migration:...
89
  void __irq_move_irq(struct irq_data *idata)
e7b946e98   Eric W. Biederman   [PATCH] genirq: i...
90
  {
f1a06390d   Thomas Gleixner   genirq: Prevent i...
91
  	bool masked;
e7b946e98   Eric W. Biederman   [PATCH] genirq: i...
92

77ed42f18   Jiang Liu   genirq: Prevent c...
93
94
95
96
97
98
  	/*
  	 * Get top level irq_data when CONFIG_IRQ_DOMAIN_HIERARCHY is enabled,
  	 * and it should be optimized away when CONFIG_IRQ_DOMAIN_HIERARCHY is
  	 * disabled. So we avoid an "#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY" here.
  	 */
  	idata = irq_desc_get_irq_data(irq_data_to_desc(idata));
32f4125eb   Thomas Gleixner   genirq: Move INPR...
99
  	if (unlikely(irqd_irq_disabled(idata)))
2a786b452   Eric W. Biederman   [PATCH] genirq: M...
100
  		return;
e7b946e98   Eric W. Biederman   [PATCH] genirq: i...
101

f1a06390d   Thomas Gleixner   genirq: Prevent i...
102
103
104
105
106
  	/*
  	 * Be careful vs. already masked interrupts. If this is a
  	 * threaded interrupt with ONESHOT set, we can end up with an
  	 * interrupt storm.
  	 */
32f4125eb   Thomas Gleixner   genirq: Move INPR...
107
  	masked = irqd_irq_masked(idata);
f1a06390d   Thomas Gleixner   genirq: Prevent i...
108
  	if (!masked)
a439520f8   Thomas Gleixner   genirq: Implement...
109
110
  		idata->chip->irq_mask(idata);
  	irq_move_masked_irq(idata);
f1a06390d   Thomas Gleixner   genirq: Prevent i...
111
  	if (!masked)
a439520f8   Thomas Gleixner   genirq: Implement...
112
113
  		idata->chip->irq_unmask(idata);
  }