Commit 388d4359680b56dba82fe2ffca05871e9fd2b73e

Authored by Andre Przywara
Committed by Paolo Bonzini
1 parent 4c27625b7a

KVM: arm/arm64: Properly protect VGIC locks from IRQs

As Jan reported [1], lockdep complains about the VGIC not being bullet
proof. This seems to be due to two issues:
- When commit 006df0f34930 ("KVM: arm/arm64: Support calling
  vgic_update_irq_pending from irq context") promoted irq_lock and
  ap_list_lock to _irqsave, we forgot two instances of irq_lock.
  lockdeps seems to pick those up.
- If a lock is _irqsave, any other locks we take inside them should be
  _irqsafe as well. So the lpi_list_lock needs to be promoted also.

This fixes both issues by simply making the remaining instances of those
locks _irqsave.
One irq_lock is addressed in a separate patch, to simplify backporting.

[1] http://lists.infradead.org/pipermail/linux-arm-kernel/2018-May/575718.html

Cc: stable@vger.kernel.org
Fixes: 006df0f34930 ("KVM: arm/arm64: Support calling vgic_update_irq_pending from irq context")
Reported-by: Jan Glauber <jan.glauber@caviumnetworks.com>
Acked-by: Christoffer Dall <christoffer.dall@arm.com>
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>

Showing 3 changed files with 23 additions and 14 deletions Side-by-side Diff

virt/kvm/arm/vgic/vgic-debug.c
... ... @@ -211,6 +211,7 @@
211 211 struct vgic_state_iter *iter = (struct vgic_state_iter *)v;
212 212 struct vgic_irq *irq;
213 213 struct kvm_vcpu *vcpu = NULL;
  214 + unsigned long flags;
214 215  
215 216 if (iter->dist_id == 0) {
216 217 print_dist_state(s, &kvm->arch.vgic);
217 218  
... ... @@ -227,9 +228,9 @@
227 228 irq = &kvm->arch.vgic.spis[iter->intid - VGIC_NR_PRIVATE_IRQS];
228 229 }
229 230  
230   - spin_lock(&irq->irq_lock);
  231 + spin_lock_irqsave(&irq->irq_lock, flags);
231 232 print_irq_state(s, irq, vcpu);
232   - spin_unlock(&irq->irq_lock);
  233 + spin_unlock_irqrestore(&irq->irq_lock, flags);
233 234  
234 235 return 0;
235 236 }
virt/kvm/arm/vgic/vgic-its.c
... ... @@ -52,6 +52,7 @@
52 52 {
53 53 struct vgic_dist *dist = &kvm->arch.vgic;
54 54 struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq;
  55 + unsigned long flags;
55 56 int ret;
56 57  
57 58 /* In this case there is no put, since we keep the reference. */
... ... @@ -71,7 +72,7 @@
71 72 irq->intid = intid;
72 73 irq->target_vcpu = vcpu;
73 74  
74   - spin_lock(&dist->lpi_list_lock);
  75 + spin_lock_irqsave(&dist->lpi_list_lock, flags);
75 76  
76 77 /*
77 78 * There could be a race with another vgic_add_lpi(), so we need to
... ... @@ -99,7 +100,7 @@
99 100 dist->lpi_list_count++;
100 101  
101 102 out_unlock:
102   - spin_unlock(&dist->lpi_list_lock);
  103 + spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
103 104  
104 105 /*
105 106 * We "cache" the configuration table entries in our struct vgic_irq's.
... ... @@ -315,6 +316,7 @@
315 316 {
316 317 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
317 318 struct vgic_irq *irq;
  319 + unsigned long flags;
318 320 u32 *intids;
319 321 int irq_count, i = 0;
320 322  
... ... @@ -330,7 +332,7 @@
330 332 if (!intids)
331 333 return -ENOMEM;
332 334  
333   - spin_lock(&dist->lpi_list_lock);
  335 + spin_lock_irqsave(&dist->lpi_list_lock, flags);
334 336 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
335 337 if (i == irq_count)
336 338 break;
... ... @@ -339,7 +341,7 @@
339 341 continue;
340 342 intids[i++] = irq->intid;
341 343 }
342   - spin_unlock(&dist->lpi_list_lock);
  344 + spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
343 345  
344 346 *intid_ptr = intids;
345 347 return i;
virt/kvm/arm/vgic/vgic.c
... ... @@ -43,10 +43,14 @@
43 43 * kvm->lock (mutex)
44 44 * its->cmd_lock (mutex)
45 45 * its->its_lock (mutex)
46   - * vgic_cpu->ap_list_lock
47   - * kvm->lpi_list_lock
48   - * vgic_irq->irq_lock
  46 + * vgic_cpu->ap_list_lock must be taken with IRQs disabled
  47 + * kvm->lpi_list_lock must be taken with IRQs disabled
  48 + * vgic_irq->irq_lock must be taken with IRQs disabled
49 49 *
  50 + * As the ap_list_lock might be taken from the timer interrupt handler,
  51 + * we have to disable IRQs before taking this lock and everything lower
  52 + * than it.
  53 + *
50 54 * If you need to take multiple locks, always take the upper lock first,
51 55 * then the lower ones, e.g. first take the its_lock, then the irq_lock.
52 56 * If you are already holding a lock and need to take a higher one, you
53 57  
... ... @@ -72,8 +76,9 @@
72 76 {
73 77 struct vgic_dist *dist = &kvm->arch.vgic;
74 78 struct vgic_irq *irq = NULL;
  79 + unsigned long flags;
75 80  
76   - spin_lock(&dist->lpi_list_lock);
  81 + spin_lock_irqsave(&dist->lpi_list_lock, flags);
77 82  
78 83 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
79 84 if (irq->intid != intid)
... ... @@ -89,7 +94,7 @@
89 94 irq = NULL;
90 95  
91 96 out_unlock:
92   - spin_unlock(&dist->lpi_list_lock);
  97 + spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
93 98  
94 99 return irq;
95 100 }
96 101  
97 102  
98 103  
... ... @@ -134,19 +139,20 @@
134 139 void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
135 140 {
136 141 struct vgic_dist *dist = &kvm->arch.vgic;
  142 + unsigned long flags;
137 143  
138 144 if (irq->intid < VGIC_MIN_LPI)
139 145 return;
140 146  
141   - spin_lock(&dist->lpi_list_lock);
  147 + spin_lock_irqsave(&dist->lpi_list_lock, flags);
142 148 if (!kref_put(&irq->refcount, vgic_irq_release)) {
143   - spin_unlock(&dist->lpi_list_lock);
  149 + spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
144 150 return;
145 151 };
146 152  
147 153 list_del(&irq->lpi_list);
148 154 dist->lpi_list_count--;
149   - spin_unlock(&dist->lpi_list_lock);
  155 + spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
150 156  
151 157 kfree(irq);
152 158 }