Commit 9103617df202d74e5c65f8af84a9aa727f812a06

Authored by Christoffer Dall
1 parent d35268da66

arm/arm64: KVM: vgic: Factor out level irq processing on guest exit

Currently vgic_process_maintenance() processes dealing with a completed
level-triggered interrupt directly, but we are soon going to reuse this
logic for level-triggered mapped interrupts with the HW bit set, so
move this logic into a separate static function.

Probably the most scary part of this commit is convincing yourself that
the current flow is safe compared to the old one.  In the following I
try to list the changes and why they are harmless:

  Move vgic_irq_clear_queued after kvm_notify_acked_irq:
    Harmless because the only potential effect of clearing the queued
    flag wrt.  kvm_set_irq is that vgic_update_irq_pending does not set
    the pending bit on the emulated CPU interface or in the
    pending_on_cpu bitmask if the function is called with level=1.
    However, the point of kvm_notify_acked_irq is to call kvm_set_irq
    with level=0, and we set the queued flag again in
    __kvm_vgic_sync_hwstate later on if the level is stil high.

  Move vgic_set_lr before kvm_notify_acked_irq:
    Also, harmless because the LR are cpu-local operations and
    kvm_notify_acked only affects the dist

  Move vgic_dist_irq_clear_soft_pend after kvm_notify_acked_irq:
    Also harmless, because now we check the level state in the
    clear_soft_pend function and lower the pending bits if the level is
    low.

Reviewed-by: Eric Auger <eric.auger@linaro.org>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>

Showing 1 changed file with 56 additions and 38 deletions Side-by-side Diff

... ... @@ -107,6 +107,7 @@
107 107 static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
108 108 static struct irq_phys_map *vgic_irq_map_search(struct kvm_vcpu *vcpu,
109 109 int virt_irq);
  110 +static int compute_pending_for_cpu(struct kvm_vcpu *vcpu);
110 111  
111 112 static const struct vgic_ops *vgic_ops;
112 113 static const struct vgic_params *vgic;
... ... @@ -357,6 +358,11 @@
357 358 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
358 359  
359 360 vgic_bitmap_set_irq_val(&dist->irq_soft_pend, vcpu->vcpu_id, irq, 0);
  361 + if (!vgic_dist_irq_get_level(vcpu, irq)) {
  362 + vgic_dist_irq_clear_pending(vcpu, irq);
  363 + if (!compute_pending_for_cpu(vcpu))
  364 + clear_bit(vcpu->vcpu_id, dist->irq_pending_on_cpu);
  365 + }
360 366 }
361 367  
362 368 static int vgic_dist_irq_is_pending(struct kvm_vcpu *vcpu, int irq)
363 369  
364 370  
... ... @@ -1338,12 +1344,56 @@
1338 1344 }
1339 1345 }
1340 1346  
  1347 +static int process_level_irq(struct kvm_vcpu *vcpu, int lr, struct vgic_lr vlr)
  1348 +{
  1349 + int level_pending = 0;
  1350 +
  1351 + vlr.state = 0;
  1352 + vlr.hwirq = 0;
  1353 + vgic_set_lr(vcpu, lr, vlr);
  1354 +
  1355 + /*
  1356 + * If the IRQ was EOIed (called from vgic_process_maintenance) or it
  1357 + * went from active to non-active (called from vgic_sync_hwirq) it was
  1358 + * also ACKed and we we therefore assume we can clear the soft pending
  1359 + * state (should it had been set) for this interrupt.
  1360 + *
  1361 + * Note: if the IRQ soft pending state was set after the IRQ was
  1362 + * acked, it actually shouldn't be cleared, but we have no way of
  1363 + * knowing that unless we start trapping ACKs when the soft-pending
  1364 + * state is set.
  1365 + */
  1366 + vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq);
  1367 +
  1368 + /*
  1369 + * Tell the gic to start sampling the line of this interrupt again.
  1370 + */
  1371 + vgic_irq_clear_queued(vcpu, vlr.irq);
  1372 +
  1373 + /* Any additional pending interrupt? */
  1374 + if (vgic_dist_irq_get_level(vcpu, vlr.irq)) {
  1375 + vgic_cpu_irq_set(vcpu, vlr.irq);
  1376 + level_pending = 1;
  1377 + } else {
  1378 + vgic_dist_irq_clear_pending(vcpu, vlr.irq);
  1379 + vgic_cpu_irq_clear(vcpu, vlr.irq);
  1380 + }
  1381 +
  1382 + /*
  1383 + * Despite being EOIed, the LR may not have
  1384 + * been marked as empty.
  1385 + */
  1386 + vgic_sync_lr_elrsr(vcpu, lr, vlr);
  1387 +
  1388 + return level_pending;
  1389 +}
  1390 +
1341 1391 static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1342 1392 {
1343 1393 u32 status = vgic_get_interrupt_status(vcpu);
1344 1394 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1345   - bool level_pending = false;
1346 1395 struct kvm *kvm = vcpu->kvm;
  1396 + int level_pending = 0;
1347 1397  
1348 1398 kvm_debug("STATUS = %08x\n", status);
1349 1399  
1350 1400  
1351 1401  
1352 1402  
1353 1403  
1354 1404  
1355 1405  
1356 1406  
1357 1407  
... ... @@ -1358,54 +1408,22 @@
1358 1408  
1359 1409 for_each_set_bit(lr, eisr_ptr, vgic->nr_lr) {
1360 1410 struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
1361   - WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq));
1362 1411  
1363   - spin_lock(&dist->lock);
1364   - vgic_irq_clear_queued(vcpu, vlr.irq);
  1412 + WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq));
1365 1413 WARN_ON(vlr.state & LR_STATE_MASK);
1366   - vlr.state = 0;
1367   - vgic_set_lr(vcpu, lr, vlr);
1368 1414  
1369   - /*
1370   - * If the IRQ was EOIed it was also ACKed and we we
1371   - * therefore assume we can clear the soft pending
1372   - * state (should it had been set) for this interrupt.
1373   - *
1374   - * Note: if the IRQ soft pending state was set after
1375   - * the IRQ was acked, it actually shouldn't be
1376   - * cleared, but we have no way of knowing that unless
1377   - * we start trapping ACKs when the soft-pending state
1378   - * is set.
1379   - */
1380   - vgic_dist_irq_clear_soft_pend(vcpu, vlr.irq);
1381 1415  
1382 1416 /*
1383 1417 * kvm_notify_acked_irq calls kvm_set_irq()
1384   - * to reset the IRQ level. Need to release the
1385   - * lock for kvm_set_irq to grab it.
  1418 + * to reset the IRQ level, which grabs the dist->lock
  1419 + * so we call this before taking the dist->lock.
1386 1420 */
1387   - spin_unlock(&dist->lock);
1388   -
1389 1421 kvm_notify_acked_irq(kvm, 0,
1390 1422 vlr.irq - VGIC_NR_PRIVATE_IRQS);
1391   - spin_lock(&dist->lock);
1392 1423  
1393   - /* Any additional pending interrupt? */
1394   - if (vgic_dist_irq_get_level(vcpu, vlr.irq)) {
1395   - vgic_cpu_irq_set(vcpu, vlr.irq);
1396   - level_pending = true;
1397   - } else {
1398   - vgic_dist_irq_clear_pending(vcpu, vlr.irq);
1399   - vgic_cpu_irq_clear(vcpu, vlr.irq);
1400   - }
1401   -
  1424 + spin_lock(&dist->lock);
  1425 + level_pending |= process_level_irq(vcpu, lr, vlr);
1402 1426 spin_unlock(&dist->lock);
1403   -
1404   - /*
1405   - * Despite being EOIed, the LR may not have
1406   - * been marked as empty.
1407   - */
1408   - vgic_sync_lr_elrsr(vcpu, lr, vlr);
1409 1427 }
1410 1428 }
1411 1429